summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xCHANGES1
-rwxr-xr-xCONTRIBUTORS5
-rwxr-xr-xLICENSE202
-rwxr-xr-xREADME.asciidoc54
-rwxr-xr-xTODO1
-rwxr-xr-xVERSION1
-rwxr-xr-xscripts/automation/config/trex-dan.cfg33
-rwxr-xr-xscripts/automation/config/trex-dev3.cfg34
-rwxr-xr-xscripts/automation/config/trex-esp80-hhaim.cfg31
-rwxr-xr-xscripts/automation/config/trex-hhaim.cfg33
-rwxr-xr-xscripts/automation/config/trex01-1g.cfg35
-rwxr-xr-xscripts/automation/graph_template.html80
-rwxr-xr-xscripts/automation/h_avc.py195
-rwxr-xr-xscripts/automation/phantom/phantomjsbin0 -> 38346752 bytes
-rwxr-xr-xscripts/automation/phantom/rasterize.js32
-rwxr-xr-xscripts/automation/readme.txt15
-rwxr-xr-xscripts/automation/report_template.html96
-rwxr-xr-xscripts/automation/sshpass.exp15
-rwxr-xr-xscripts/automation/trex_control_plane/__init__.py1
-rwxr-xr-xscripts/automation/trex_control_plane/client/__init__.py1
-rwxr-xr-xscripts/automation/trex_control_plane/client/outer_packages.py29
-rwxr-xr-xscripts/automation/trex_control_plane/client/trex_adv_client.py70
-rwxr-xr-xscripts/automation/trex_control_plane/client/trex_client.py1066
-rwxr-xr-xscripts/automation/trex_control_plane/client_utils/__init__.py1
-rwxr-xr-xscripts/automation/trex_control_plane/client_utils/general_utils.py57
-rwxr-xr-xscripts/automation/trex_control_plane/client_utils/trex_yaml_gen.py212
-rwxr-xr-xscripts/automation/trex_control_plane/common/__init__.py1
-rwxr-xr-xscripts/automation/trex_control_plane/common/trex_exceptions.py140
-rwxr-xr-xscripts/automation/trex_control_plane/common/trex_status_e.py8
-rwxr-xr-xscripts/automation/trex_control_plane/dirtree_no_files.txt11
-rwxr-xr-xscripts/automation/trex_control_plane/dirtree_with_files.txt31
-rwxr-xr-xscripts/automation/trex_control_plane/doc/Makefile192
-rwxr-xr-xscripts/automation/trex_control_plane/doc/_static/no_scrollbars.css10
-rwxr-xr-xscripts/automation/trex_control_plane/doc/about_trex.rst16
-rwxr-xr-xscripts/automation/trex_control_plane/doc/api/client_code.rst17
-rwxr-xr-xscripts/automation/trex_control_plane/doc/api/exceptions.rst7
-rwxr-xr-xscripts/automation/trex_control_plane/doc/api/index.rst19
-rwxr-xr-xscripts/automation/trex_control_plane/doc/api/json_fields.rst233
-rwxr-xr-xscripts/automation/trex_control_plane/doc/authors.rst12
-rwxr-xr-xscripts/automation/trex_control_plane/doc/client_utils.rst14
-rwxr-xr-xscripts/automation/trex_control_plane/doc/conf.py303
-rwxr-xr-xscripts/automation/trex_control_plane/doc/docs_utilities.py37
-rwxr-xr-xscripts/automation/trex_control_plane/doc/index.rst57
-rwxr-xr-xscripts/automation/trex_control_plane/doc/installation.rst25
-rwxr-xr-xscripts/automation/trex_control_plane/doc/json_dictionary.yaml252
-rwxr-xr-xscripts/automation/trex_control_plane/doc/license.rst18
-rwxr-xr-xscripts/automation/trex_control_plane/doc/requirements.rst0
-rwxr-xr-xscripts/automation/trex_control_plane/doc/usage_examples.rst68
-rwxr-xr-xscripts/automation/trex_control_plane/examples/__init__.py1
-rwxr-xr-xscripts/automation/trex_control_plane/examples/client_interactive_example.py256
-rwxr-xr-xscripts/automation/trex_control_plane/examples/pkt_generation_for_trex.py105
-rwxr-xr-xscripts/automation/trex_control_plane/examples/trex_root_path.py15
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/__init__.py1
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/enum34-1.0.4/PKG-INFO746
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/LICENSE32
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/README2
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/__init__.py790
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/doc/enum.rst725
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/enum.py790
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/test_enum.py1690
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/enum34-1.0.4/setup.py44
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/LICENSE.txt11
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/PKG-INFO10
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/README.txt203
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/SimpleJSONRPCServer.py229
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/__init__.py6
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/config.py38
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/history.py40
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/jsonclass.py145
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/jsonrpc.py556
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/setup.py28
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/LICENSE.txt11
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/MANIFEST.in2
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/PKG-INFO460
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/README.rst438
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/SimpleJSONRPCServer.py602
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/__init__.py34
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/config.py141
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/history.py95
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/jsonclass.py295
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/jsonrpc.py1192
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/threadpool.py490
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/utils.py122
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/PKG-INFO460
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/SOURCES.txt17
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/dependency_links.txt1
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/top_level.txt1
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/setup.cfg8
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/setup.py74
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/ACKS6
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/AUTHORS5
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/ChangeLog165
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/LICENSE21
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/PKG-INFO51
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/README27
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/RELEASE-NOTES50
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/doc/source/Makefile73
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/doc/source/conf.py179
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/doc/source/index.rst275
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/PKG-INFO51
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/SOURCES.txt26
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/dependency_links.txt1
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/not-zip-safe1
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/top_level.txt1
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/__init__.py326
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/linklockfile.py73
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/mkdirlockfile.py83
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/pidlockfile.py193
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/sqlitelockfile.py155
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/symlinklockfile.py69
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/setup.cfg39
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/setup.py30
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/test-requirements.txt2
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/test/compliancetest.py261
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/test/test_lockfile.py36
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/tox.ini28
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/ChangeLog380
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/LICENSE.ASF-2202
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/LICENSE.GPL-3674
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/MANIFEST.in7
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/PKG-INFO38
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/__init__.py49
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/_metadata.py152
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/daemon.py926
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/pidfile.py67
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/runner.py324
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/CREDITS53
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/FAQ156
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/TODO95
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/hacking.txt180
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/PKG-INFO38
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/SOURCES.txt30
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/dependency_links.txt1
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/not-zip-safe1
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/requires.txt3
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/top_level.txt1
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/version_info.json6
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/setup.cfg11
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/setup.py106
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/__init__.py23
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/scaffold.py322
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_daemon.py1744
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_metadata.py380
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_pidfile.py472
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_runner.py675
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test_version.py1373
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/version.py547
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/rednose-0.4.1/rednose.py387
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/rednose-0.4.1/setup.py29
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/termstyle/MANIFEST.in1
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/termstyle/Makefile9
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/termstyle/README.rst82
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/termstyle/VERSION1
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/termstyle/python-termstyle.xml183
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/termstyle/setup.py27
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/termstyle/termstyle.py107
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/termstyle/test2.py5
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/termstyle/test3.py5
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/termstyle/test_all.sh4
-rwxr-xr-xscripts/automation/trex_control_plane/python_lib/zmq_fedora.tar.gzbin0 -> 1752871 bytes
-rwxr-xr-xscripts/automation/trex_control_plane/server/CCustomLogger.py100
-rwxr-xr-xscripts/automation/trex_control_plane/server/extended_daemon_runner.py144
-rwxr-xr-xscripts/automation/trex_control_plane/server/outer_packages.py66
-rwxr-xr-xscripts/automation/trex_control_plane/server/trex_daemon_server25
-rwxr-xr-xscripts/automation/trex_control_plane/server/trex_daemon_server.py87
-rwxr-xr-xscripts/automation/trex_control_plane/server/trex_launch_thread.py92
-rwxr-xr-xscripts/automation/trex_control_plane/server/trex_server.py465
-rwxr-xr-xscripts/automation/trex_control_plane/server/zmq_monitor_thread.py80
-rwxr-xr-xscripts/automation/trex_control_plane/unit_tests/__init__.py1
-rwxr-xr-xscripts/automation/trex_control_plane/unit_tests/client_launching_test.py31
-rwxr-xr-xscripts/automation/trex_control_plane/unit_tests/control_plane_general_test.py72
-rwxr-xr-xscripts/automation/trex_control_plane/unit_tests/control_plane_unit_test.py73
-rwxr-xr-xscripts/automation/trex_control_plane/unit_tests/functional_test.py160
-rwxr-xr-xscripts/automation/trex_control_plane/unit_tests/nose_outer_packages.py27
-rwxr-xr-xscripts/automation/trex_control_plane/unit_tests/sock.py552
-rwxr-xr-xscripts/automation/trex_control_plane/unit_tests/test.py36
-rwxr-xr-xscripts/automation/trex_perf.py1265
-rwxr-xr-xscripts/automation/wkhtmltopdf-amd64bin0 -> 8301444 bytes
-rwxr-xr-xscripts/avl/_tun_citrix_0_fixed.pcapbin0 -> 94477 bytes
-rwxr-xr-xscripts/avl/_tun_exchange_0_fixed.pcapbin0 -> 11946 bytes
-rwxr-xr-xscripts/avl/_tun_http_browsing_0_fixed.pcapbin0 -> 45175 bytes
-rwxr-xr-xscripts/avl/_tun_http_get_0_fixed.pcapbin0 -> 49326 bytes
-rwxr-xr-xscripts/avl/_tun_http_post_0_fixed.pcapbin0 -> 63342 bytes
-rwxr-xr-xscripts/avl/_tun_https_0_fixed.pcapbin0 -> 111955 bytes
-rwxr-xr-xscripts/avl/_tun_mail_pop_0_fixed.pcapbin0 -> 7227 bytes
-rwxr-xr-xscripts/avl/_tun_mail_pop_1_fixed.pcapbin0 -> 123955 bytes
-rwxr-xr-xscripts/avl/_tun_mail_pop_2_fixed.pcapbin0 -> 19484 bytes
-rwxr-xr-xscripts/avl/_tun_oracle_0_fixed.pcapbin0 -> 64895 bytes
-rwxr-xr-xscripts/avl/_tun_rtsp_0_fixed.pcapbin0 -> 5002 bytes
-rwxr-xr-xscripts/avl/_tun_smtp_0_fixed.pcapbin0 -> 7298 bytes
-rwxr-xr-xscripts/avl/_tun_smtp_1_fixed.pcapbin0 -> 22742 bytes
-rwxr-xr-xscripts/avl/_tun_smtp_2_fixed.pcapbin0 -> 117980 bytes
-rwxr-xr-xscripts/avl/avl.csv26
-rwxr-xr-xscripts/avl/avl_delay_10.csv27
-rwxr-xr-xscripts/avl/citrix_0.pcapbin0 -> 91105 bytes
-rwxr-xr-xscripts/avl/delay_10_citrix_0.pcapbin0 -> 90017 bytes
-rwxr-xr-xscripts/avl/delay_10_dns_0.pcapbin0 -> 226 bytes
-rwxr-xr-xscripts/avl/delay_10_exchange_0.pcapbin0 -> 10732 bytes
-rwxr-xr-xscripts/avl/delay_10_http_browsing_0.pcapbin0 -> 35189 bytes
-rwxr-xr-xscripts/avl/delay_10_http_get_0.pcapbin0 -> 38734 bytes
-rwxr-xr-xscripts/avl/delay_10_http_post_0.pcapbin0 -> 49572 bytes
-rwxr-xr-xscripts/avl/delay_10_https_0.pcapbin0 -> 93563 bytes
-rwxr-xr-xscripts/avl/delay_10_mail_pop_0.pcapbin0 -> 6027 bytes
-rwxr-xr-xscripts/avl/delay_10_mail_pop_1.pcapbin0 -> 103821 bytes
-rwxr-xr-xscripts/avl/delay_10_mail_pop_2.pcapbin0 -> 16254 bytes
-rwxr-xr-xscripts/avl/delay_10_oracle_0.pcapbin0 -> 62195 bytes
-rwxr-xr-xscripts/avl/delay_10_rtp_160k_0.pcapbin0 -> 99790 bytes
-rwxr-xr-xscripts/avl/delay_10_rtp_160k_1.pcapbin0 -> 1155285 bytes
-rwxr-xr-xscripts/avl/delay_10_rtp_160k_full.pcapbin0 -> 1259861 bytes
-rwxr-xr-xscripts/avl/delay_10_rtp_250k_0_0.pcapbin0 -> 168536 bytes
-rwxr-xr-xscripts/avl/delay_10_rtp_250k_1_0.pcapbin0 -> 1790082 bytes
-rwxr-xr-xscripts/avl/delay_10_rtp_250k_full.pcapbin0 -> 1963404 bytes
-rwxr-xr-xscripts/avl/delay_10_rtsp_0.pcapbin0 -> 4774 bytes
-rwxr-xr-xscripts/avl/delay_10_sip_0.pcapbin0 -> 2831 bytes
-rwxr-xr-xscripts/avl/delay_10_sip_video_call_full.pcapbin0 -> 153359 bytes
-rwxr-xr-xscripts/avl/delay_10_sip_video_call_short.pcapbin0 -> 3515 bytes
-rwxr-xr-xscripts/avl/delay_10_smtp_0.pcapbin0 -> 6082 bytes
-rwxr-xr-xscripts/avl/delay_10_smtp_1.pcapbin0 -> 19068 bytes
-rwxr-xr-xscripts/avl/delay_10_smtp_2.pcapbin0 -> 98768 bytes
-rwxr-xr-xscripts/avl/delay_10_video_call_0.pcapbin0 -> 2579101 bytes
-rwxr-xr-xscripts/avl/delay_10_video_call_rtp_0.pcapbin0 -> 128964 bytes
-rwxr-xr-xscripts/avl/delay_citrix_0.pcapbin0 -> 94794 bytes
-rwxr-xr-xscripts/avl/delay_dns_0.pcapbin0 -> 226 bytes
-rwxr-xr-xscripts/avl/delay_email_pop_0.pcapbin0 -> 6027 bytes
-rwxr-xr-xscripts/avl/delay_email_pop_1.pcapbin0 -> 110263 bytes
-rwxr-xr-xscripts/avl/delay_email_pop_2.pcapbin0 -> 18508 bytes
-rwxr-xr-xscripts/avl/delay_exchange_0.pcapbin0 -> 10732 bytes
-rwxr-xr-xscripts/avl/delay_http_browsing_0.pcapbin0 -> 35349 bytes
-rwxr-xr-xscripts/avl/delay_http_get_0.pcapbin0 -> 39134 bytes
-rwxr-xr-xscripts/avl/delay_http_post_0.pcapbin0 -> 50052 bytes
-rwxr-xr-xscripts/avl/delay_https_0.pcapbin0 -> 96297 bytes
-rwxr-xr-xscripts/avl/delay_oracle_0.pcapbin0 -> 62195 bytes
-rwxr-xr-xscripts/avl/delay_rtp_160k_1_1_0.pcapbin0 -> 1155285 bytes
-rwxr-xr-xscripts/avl/delay_rtp_160k_1_1_1.pcapbin0 -> 99790 bytes
-rwxr-xr-xscripts/avl/delay_rtp_250k_0_0.pcapbin0 -> 168536 bytes
-rwxr-xr-xscripts/avl/delay_rtp_250k_2_0.pcapbin0 -> 1790082 bytes
-rwxr-xr-xscripts/avl/delay_rtsp_0.pcapbin0 -> 4304 bytes
-rwxr-xr-xscripts/avl/delay_sip_0.pcapbin0 -> 2831 bytes
-rwxr-xr-xscripts/avl/delay_smtp_0.pcapbin0 -> 6242 bytes
-rwxr-xr-xscripts/avl/delay_smtp_1.pcapbin0 -> 19628 bytes
-rwxr-xr-xscripts/avl/delay_smtp_2.pcapbin0 -> 105770 bytes
-rwxr-xr-xscripts/avl/delay_video_call_0.pcapbin0 -> 2579101 bytes
-rwxr-xr-xscripts/avl/delay_video_call_rtp_0.pcapbin0 -> 150508 bytes
-rwxr-xr-xscripts/avl/dns_0.pcapbin0 -> 234 bytes
-rwxr-xr-xscripts/avl/email_pop1.pcapbin0 -> 6107 bytes
-rwxr-xr-xscripts/avl/email_pop1_1.pcapbin0 -> 6107 bytes
-rwxr-xr-xscripts/avl/email_pop2.pcapbin0 -> 104277 bytes
-rwxr-xr-xscripts/avl/email_pop2_2.pcapbin0 -> 104277 bytes
-rwxr-xr-xscripts/avl/email_pop4_29.pcapbin0 -> 16374 bytes
-rwxr-xr-xscripts/avl/exchange.pcapbin0 -> 10904 bytes
-rwxr-xr-xscripts/avl/http_browsing.pcapbin0 -> 35337 bytes
-rwxr-xr-xscripts/avl/http_get.pcapbin0 -> 38910 bytes
-rwxr-xr-xscripts/avl/http_post.pcapbin0 -> 49788 bytes
-rwxr-xr-xscripts/avl/https.pcapbin0 -> 93947 bytes
-rwxr-xr-xscripts/avl/mac_uit.yaml5
-rwxr-xr-xscripts/avl/oracle.pcapbin0 -> 63403 bytes
-rwxr-xr-xscripts/avl/rtp_160_0.pcapbin0 -> 98852 bytes
-rwxr-xr-xscripts/avl/rtp_160_1.pcapbin0 -> 1158408 bytes
-rwxr-xr-xscripts/avl/rtp_250k_1_0.pcapbin0 -> 148880 bytes
-rwxr-xr-xscripts/avl/rtp_250k_2_0.pcapbin0 -> 1797762 bytes
-rwxr-xr-xscripts/avl/rtsp_0.pcapbin0 -> 4382 bytes
-rwxr-xr-xscripts/avl/sfr_branch_profile_delay_10.yaml114
-rwxr-xr-xscripts/avl/sfr_delay_10.yaml119
-rwxr-xr-xscripts/avl/sfr_delay_10_1g.yaml118
-rwxr-xr-xscripts/avl/sfr_delay_10_1g_no_bundeling.yaml130
-rwxr-xr-xscripts/avl/sfr_delay_10_no_bundeling.yaml131
-rwxr-xr-xscripts/avl/sfr_delay_50_tunnel_no_bundeling.yaml131
-rwxr-xr-xscripts/avl/sip_0.pcapbin0 -> 2859 bytes
-rwxr-xr-xscripts/avl/smtp_1.pcapbin0 -> 6170 bytes
-rwxr-xr-xscripts/avl/smtp_2.pcapbin0 -> 19208 bytes
-rwxr-xr-xscripts/avl/smtp_3.pcapbin0 -> 99208 bytes
-rwxr-xr-xscripts/avl/test_mac.yaml29
-rwxr-xr-xscripts/avl/video_call_0.pcapbin0 -> 2588401 bytes
-rwxr-xr-xscripts/avl/video_rtp_1588_0.pcapbin0 -> 148656 bytes
-rwxr-xr-xscripts/cap2/Oracle.pcapbin0 -> 62195 bytes
-rwxr-xr-xscripts/cap2/Video_Calls.pcapbin0 -> 2579101 bytes
-rwxr-xr-xscripts/cap2/Voice_calls_rtp_only.pcapbin0 -> 124214 bytes
-rwxr-xr-xscripts/cap2/citrix.pcapbin0 -> 89937 bytes
-rwxr-xr-xscripts/cap2/delay_10_rtp_250k_short.pcapbin0 -> 19821 bytes
-rwxr-xr-xscripts/cap2/dns.pcapbin0 -> 226 bytes
-rwxr-xr-xscripts/cap2/dns.yaml23
-rwxr-xr-xscripts/cap2/dns_one_server.yaml33
-rwxr-xr-xscripts/cap2/dns_single_server.yaml32
-rwxr-xr-xscripts/cap2/dns_wlen.yaml25
-rwxr-xr-xscripts/cap2/dns_wlen1.yaml25
-rwxr-xr-xscripts/cap2/dns_wlen2.yaml32
-rwxr-xr-xscripts/cap2/dns_wlength.yaml25
-rwxr-xr-xscripts/cap2/dyn_pyld1.yaml36
-rwxr-xr-xscripts/cap2/exchange.pcapbin0 -> 10732 bytes
-rwxr-xr-xscripts/cap2/http.yaml22
-rwxr-xr-xscripts/cap2/http_browsing.pcapbin0 -> 35429 bytes
-rwxr-xr-xscripts/cap2/http_get.pcapbin0 -> 42781 bytes
-rwxr-xr-xscripts/cap2/http_plugin.yaml24
-rwxr-xr-xscripts/cap2/http_post.pcapbin0 -> 41026 bytes
-rwxr-xr-xscripts/cap2/http_simple.yaml21
-rwxr-xr-xscripts/cap2/https.pcapbin0 -> 174163 bytes
-rwxr-xr-xscripts/cap2/imix.yaml35
-rwxr-xr-xscripts/cap2/imix_1518.yaml70
-rwxr-xr-xscripts/cap2/imix_64.yaml70
-rwxr-xr-xscripts/cap2/imix_fast_1g.yaml53
-rwxr-xr-xscripts/cap2/imix_fast_1g_100k_flows.yaml53
-rwxr-xr-xscripts/cap2/ipv4_vlan.yaml21
-rwxr-xr-xscripts/cap2/ipv6.pcapbin0 -> 130 bytes
-rwxr-xr-xscripts/cap2/ipv6.yaml22
-rwxr-xr-xscripts/cap2/ipv6_vlan.yaml23
-rwxr-xr-xscripts/cap2/lb_ex1.yaml26
-rwxr-xr-xscripts/cap2/limit_multi_pkt.yaml23
-rwxr-xr-xscripts/cap2/limit_single_pkt.yaml23
-rwxr-xr-xscripts/cap2/mail_pop.pcapbin0 -> 16254 bytes
-rwxr-xr-xscripts/cap2/nat_test.yaml46
-rwxr-xr-xscripts/cap2/rtp_160k.pcapbin0 -> 1150301 bytes
-rwxr-xr-xscripts/cap2/rtp_250k_rtp_only.pcapbin0 -> 3900796 bytes
-rwxr-xr-xscripts/cap2/rtp_250k_rtp_only_1.pcapbin0 -> 168032 bytes
-rwxr-xr-xscripts/cap2/rtp_250k_rtp_only_2.pcapbin0 -> 1782402 bytes
-rwxr-xr-xscripts/cap2/rtsp.yaml24
-rwxr-xr-xscripts/cap2/rtsp_full1.yaml17
-rwxr-xr-xscripts/cap2/rtsp_full2.yaml24
-rwxr-xr-xscripts/cap2/rtsp_short.pcapbin0 -> 11692 bytes
-rwxr-xr-xscripts/cap2/rtsp_short1.yaml24
-rwxr-xr-xscripts/cap2/rtsp_short1_slow.yaml24
-rwxr-xr-xscripts/cap2/rtsp_short2.yaml24
-rwxr-xr-xscripts/cap2/rtsp_short3.yaml24
-rwxr-xr-xscripts/cap2/sfr.yaml90
-rwxr-xr-xscripts/cap2/sfr2.yaml30
-rwxr-xr-xscripts/cap2/sfr3.yaml90
-rwxr-xr-xscripts/cap2/sfr4.yaml20
-rwxr-xr-xscripts/cap2/sfr_agg_tcp14_udp11_http200msec_new_high_new_nir_profile.yaml83
-rwxr-xr-xscripts/cap2/sfr_agg_tcp14_udp11_http200msec_new_high_new_nir_profile_ipg_mix.yaml240
-rwxr-xr-xscripts/cap2/short_tcp.yaml20
-rwxr-xr-xscripts/cap2/sip_short1.yaml24
-rwxr-xr-xscripts/cap2/sip_short2.yaml24
-rwxr-xr-xscripts/cap2/smtp.pcapbin0 -> 98768 bytes
-rwxr-xr-xscripts/cap2/test_mac.yaml9
-rwxr-xr-xscripts/cap2/test_pcap_mode1.yaml24
-rwxr-xr-xscripts/cap2/test_pcap_mode2.yaml24
-rwxr-xr-xscripts/cap2/tuple_gen.yaml10
-rwxr-xr-xscripts/cap2/udp_1518B.pcapbin0 -> 1558 bytes
-rwxr-xr-xscripts/cap2/udp_594B.pcapbin0 -> 634 bytes
-rwxr-xr-xscripts/cap2/udp_64B.pcapbin0 -> 104 bytes
-rwxr-xr-xscripts/cfg/cfg_example1.yaml27
-rwxr-xr-xscripts/cfg/cfg_example2.yaml26
-rwxr-xr-xscripts/cfg/ins1.yaml25
-rwxr-xr-xscripts/cfg/ins2.yaml25
-rwxr-xr-xscripts/cfg/ins3.yaml27
-rwxr-xr-xscripts/cfg/ucs_h0.yaml9
-rwxr-xr-xscripts/cfg/ucs_h1.yaml9
-rwxr-xr-xscripts/cfg/xl710.yaml29
-rwxr-xr-xscripts/daemon_server27
-rwxr-xr-xscripts/doc_process.py109
-rwxr-xr-xscripts/dpdk_nic_bind.py539
-rwxr-xr-xscripts/dpdk_setup_ports.py232
-rwxr-xr-xscripts/exp/dns-0-ex.erfbin0 -> 2080 bytes
-rw-r--r--scripts/exp/dns-0.erfbin0 -> 2080 bytes
-rwxr-xr-xscripts/exp/dns_e-0-ex.erfbin0 -> 2080 bytes
-rw-r--r--scripts/exp/dns_e-0.erfbin0 -> 2080 bytes
-rwxr-xr-xscripts/exp/dns_flip-0-ex.erfbin0 -> 2080 bytes
-rw-r--r--scripts/exp/dns_flip-0.erfbin0 -> 2080 bytes
-rwxr-xr-xscripts/exp/dns_ipv6-0-ex.erfbin0 -> 2560 bytes
-rw-r--r--scripts/exp/dns_ipv6-0.erfbin0 -> 2560 bytes
-rwxr-xr-xscripts/exp/dns_ipv6_rxcheck-ex.erfbin0 -> 304 bytes
-rw-r--r--scripts/exp/dns_ipv6_rxcheck.erfbin0 -> 304 bytes
-rwxr-xr-xscripts/exp/dns_one_server-0-ex.erfbin0 -> 4160 bytes
-rw-r--r--scripts/exp/dns_one_server-0.erfbin0 -> 4160 bytes
-rwxr-xr-xscripts/exp/dns_p-0-ex.erfbin0 -> 2080 bytes
-rw-r--r--scripts/exp/dns_p-0.erfbin0 -> 2080 bytes
-rwxr-xr-xscripts/exp/dns_rxcheck-ex.erfbin0 -> 256 bytes
-rw-r--r--scripts/exp/dns_rxcheck.erfbin0 -> 256 bytes
-rwxr-xr-xscripts/exp/dns_single_server-0-ex.erfbin0 -> 3360 bytes
-rwxr-xr-xscripts/exp/dns_wlen-0-ex.erfbin0 -> 2240 bytes
-rwxr-xr-xscripts/exp/dns_wlen1-0-ex.erfbin0 -> 2240 bytes
-rwxr-xr-xscripts/exp/dns_wlen2-0-ex.erfbin0 -> 4480 bytes
-rwxr-xr-xscripts/exp/dyn_pyld1-0-ex.erfbin0 -> 2080 bytes
-rw-r--r--scripts/exp/dyn_pyld1-0.erfbin0 -> 2080 bytes
-rwxr-xr-xscripts/exp/http1_with_option-ex.pcapbin0 -> 35049 bytes
-rw-r--r--scripts/exp/http1_with_option.pcapbin0 -> 35049 bytes
-rwxr-xr-xscripts/exp/http1_with_option_ipv6-ex.pcapbin0 -> 35713 bytes
-rw-r--r--scripts/exp/http1_with_option_ipv6.pcapbin0 -> 35713 bytes
-rwxr-xr-xscripts/exp/http_plugin-0-ex.erfbin0 -> 35328 bytes
-rw-r--r--scripts/exp/http_plugin-0.erfbin0 -> 35328 bytes
-rwxr-xr-xscripts/exp/http_plugin_v6-0-ex.erfbin0 -> 36008 bytes
-rw-r--r--scripts/exp/http_plugin_v6-0.erfbin0 -> 36008 bytes
-rwxr-xr-xscripts/exp/imix-0-ex.erfbin0 -> 62872 bytes
-rw-r--r--scripts/exp/imix-0.erfbin0 -> 62872 bytes
-rwxr-xr-xscripts/exp/imix_v6-0-ex.erfbin0 -> 65480 bytes
-rw-r--r--scripts/exp/imix_v6-0.erfbin0 -> 65480 bytes
-rwxr-xr-xscripts/exp/ipv4_vlan-0-ex.erfbin0 -> 8800 bytes
-rw-r--r--scripts/exp/ipv4_vlan-0.erfbin0 -> 8800 bytes
-rwxr-xr-xscripts/exp/ipv6-0-ex.erfbin0 -> 11200 bytes
-rw-r--r--scripts/exp/ipv6-0.erfbin0 -> 11200 bytes
-rwxr-xr-xscripts/exp/ipv6_vlan-0-ex.erfbin0 -> 11200 bytes
-rw-r--r--scripts/exp/ipv6_vlan-0.erfbin0 -> 11200 bytes
-rwxr-xr-xscripts/exp/limit_multi_pkt-0-ex.erfbin0 -> 30368 bytes
-rw-r--r--scripts/exp/limit_multi_pkt-0.erfbin0 -> 30368 bytes
-rwxr-xr-xscripts/exp/limit_single_pkt-0-ex.erfbin0 -> 5368 bytes
-rw-r--r--scripts/exp/limit_single_pkt-0.erfbin0 -> 5368 bytes
-rwxr-xr-xscripts/exp/pcap_mode1-0-ex.erfbin0 -> 91456 bytes
-rw-r--r--scripts/exp/pcap_mode1-0.erfbin0 -> 91456 bytes
-rwxr-xr-xscripts/exp/pcap_mode2-0-ex.erfbin0 -> 914560 bytes
-rw-r--r--scripts/exp/pcap_mode2-0.erfbin0 -> 914560 bytes
-rwxr-xr-xscripts/exp/rtsp_short1-0-ex.erfbin0 -> 20024 bytes
-rw-r--r--scripts/exp/rtsp_short1-0.erfbin0 -> 20024 bytes
-rwxr-xr-xscripts/exp/rtsp_short1_ipv6_rxcheck-ex.erfbin0 -> 21560 bytes
-rw-r--r--scripts/exp/rtsp_short1_ipv6_rxcheck.erfbin0 -> 21560 bytes
-rwxr-xr-xscripts/exp/rtsp_short1_rxcheck-ex.erfbin0 -> 20912 bytes
-rw-r--r--scripts/exp/rtsp_short1_rxcheck.erfbin0 -> 20912 bytes
-rwxr-xr-xscripts/exp/rtsp_short1_v6-0-ex.erfbin0 -> 20672 bytes
-rw-r--r--scripts/exp/rtsp_short1_v6-0.erfbin0 -> 20672 bytes
-rwxr-xr-xscripts/exp/rtsp_short2-0-ex.erfbin0 -> 20024 bytes
-rw-r--r--scripts/exp/rtsp_short2-0.erfbin0 -> 20024 bytes
-rwxr-xr-xscripts/exp/rtsp_short2_v6-0-ex.erfbin0 -> 20672 bytes
-rw-r--r--scripts/exp/rtsp_short2_v6-0.erfbin0 -> 20672 bytes
-rwxr-xr-xscripts/exp/rtsp_short3-0-ex.erfbin0 -> 20032 bytes
-rw-r--r--scripts/exp/rtsp_short3-0.erfbin0 -> 20032 bytes
-rwxr-xr-xscripts/exp/rtsp_short3_v6-0-ex.erfbin0 -> 20696 bytes
-rw-r--r--scripts/exp/rtsp_short3_v6-0.erfbin0 -> 20696 bytes
-rwxr-xr-xscripts/exp/sctp-ex.erfbin0 -> 704 bytes
-rw-r--r--scripts/exp/sctp.erfbin0 -> 704 bytes
-rwxr-xr-xscripts/exp/sfr2-0-ex.erfbin0 -> 1830944 bytes
-rw-r--r--scripts/exp/sfr2-0.erfbin0 -> 1830944 bytes
-rwxr-xr-xscripts/exp/sfr3-0-ex.erfbin0 -> 10351656 bytes
-rw-r--r--scripts/exp/sfr3-0.erfbin0 -> 10351656 bytes
-rwxr-xr-xscripts/exp/sfr_4-0-ex.erfbin0 -> 42968 bytes
-rw-r--r--scripts/exp/sfr_4-0.erfbin0 -> 42968 bytes
-rwxr-xr-xscripts/exp/sip_short1-0-ex.erfbin0 -> 3576 bytes
-rw-r--r--scripts/exp/sip_short1-0.erfbin0 -> 3576 bytes
-rwxr-xr-xscripts/exp/sip_short1_v6-0-ex.erfbin0 -> 3880 bytes
-rw-r--r--scripts/exp/sip_short1_v6-0.erfbin0 -> 3880 bytes
-rwxr-xr-xscripts/exp/sip_short2-0-ex.erfbin0 -> 3576 bytes
-rw-r--r--scripts/exp/sip_short2-0.erfbin0 -> 3576 bytes
-rwxr-xr-xscripts/exp/sip_short2_v6-0-ex.erfbin0 -> 3880 bytes
-rw-r--r--scripts/exp/sip_short2_v6-0.erfbin0 -> 3880 bytes
-rwxr-xr-xscripts/exp/sip_short3-0-ex.erfbin0 -> 3584 bytes
-rw-r--r--scripts/exp/sip_short3-0.erfbin0 -> 3584 bytes
-rwxr-xr-xscripts/exp/sip_short3_v6-0-ex.erfbin0 -> 3888 bytes
-rw-r--r--scripts/exp/sip_short3_v6-0.erfbin0 -> 3888 bytes
-rwxr-xr-xscripts/ko/3.11.10-301.fc20.x86_64/igb_uio.kobin0 -> 230302 bytes
-rwxr-xr-xscripts/ko/3.13.0-32-generic/igb_uio.kobin0 -> 16907 bytes
-rwxr-xr-xscripts/ko/3.16.0-37-generic/igb_uio.kobin0 -> 16531 bytes
-rwxr-xr-xscripts/ko/3.17.4-301.fc21.x86_64/igb_uio.kobin0 -> 241815 bytes
-rwxr-xr-xscripts/ko/3.18.9-100.fc20.x86_64/igb_uio.kobin0 -> 251127 bytes
-rwxr-xr-xscripts/ko/3.19.1-201.fc21.x86_64/igb_uio.kobin0 -> 252755 bytes
-rwxr-xr-xscripts/ko/3.6.10-4.fc18.x86_64/igb_uio.kobin0 -> 199501 bytes
-rwxr-xr-xscripts/ko/src/Makefile38
-rwxr-xr-xscripts/ko/src/compat.h116
-rwxr-xr-xscripts/ko/src/igb_uio.c643
-rwxr-xr-xscripts/ko/src/readme.txt16
-rwxr-xr-xscripts/ko/src/rte_pci_dev_feature_defs.h45
-rwxr-xr-xscripts/ko/src/rte_pci_dev_features.h44
-rwxr-xr-xscripts/libzmq.so.3bin0 -> 3150071 bytes
-rwxr-xr-xscripts/libzmq.so.3.1.0bin0 -> 3150071 bytes
-rwxr-xr-xscripts/python-lib/yaml/__init__.py315
-rw-r--r--scripts/python-lib/yaml/__init__.pycbin0 -> 13543 bytes
-rwxr-xr-xscripts/python-lib/yaml/composer.py139
-rw-r--r--scripts/python-lib/yaml/composer.pycbin0 -> 5476 bytes
-rwxr-xr-xscripts/python-lib/yaml/constructor.py675
-rw-r--r--scripts/python-lib/yaml/constructor.pycbin0 -> 25778 bytes
-rwxr-xr-xscripts/python-lib/yaml/cyaml.py85
-rw-r--r--scripts/python-lib/yaml/cyaml.pycbin0 -> 4841 bytes
-rwxr-xr-xscripts/python-lib/yaml/dumper.py62
-rw-r--r--scripts/python-lib/yaml/dumper.pycbin0 -> 3120 bytes
-rwxr-xr-xscripts/python-lib/yaml/emitter.py1140
-rw-r--r--scripts/python-lib/yaml/emitter.pycbin0 -> 37006 bytes
-rwxr-xr-xscripts/python-lib/yaml/error.py75
-rw-r--r--scripts/python-lib/yaml/error.pycbin0 -> 3674 bytes
-rwxr-xr-xscripts/python-lib/yaml/events.py86
-rw-r--r--scripts/python-lib/yaml/events.pycbin0 -> 6757 bytes
-rwxr-xr-xscripts/python-lib/yaml/loader.py40
-rw-r--r--scripts/python-lib/yaml/loader.pycbin0 -> 2443 bytes
-rwxr-xr-xscripts/python-lib/yaml/nodes.py49
-rw-r--r--scripts/python-lib/yaml/nodes.pycbin0 -> 2976 bytes
-rwxr-xr-xscripts/python-lib/yaml/parser.py589
-rw-r--r--scripts/python-lib/yaml/parser.pycbin0 -> 17397 bytes
-rwxr-xr-xscripts/python-lib/yaml/reader.py190
-rw-r--r--scripts/python-lib/yaml/reader.pycbin0 -> 6696 bytes
-rwxr-xr-xscripts/python-lib/yaml/representer.py484
-rw-r--r--scripts/python-lib/yaml/representer.pycbin0 -> 17745 bytes
-rwxr-xr-xscripts/python-lib/yaml/resolver.py224
-rw-r--r--scripts/python-lib/yaml/resolver.pycbin0 -> 7503 bytes
-rwxr-xr-xscripts/python-lib/yaml/scanner.py1457
-rw-r--r--scripts/python-lib/yaml/scanner.pycbin0 -> 39060 bytes
-rwxr-xr-xscripts/python-lib/yaml/serializer.py111
-rw-r--r--scripts/python-lib/yaml/serializer.pycbin0 -> 5121 bytes
-rwxr-xr-xscripts/python-lib/yaml/tokens.py104
-rw-r--r--scripts/python-lib/yaml/tokens.pycbin0 -> 8859 bytes
-rwxr-xr-xscripts/stty_r2
-rwxr-xr-xscripts/t-rex-6425
-rwxr-xr-xscripts/t-rex-64-debug9
-rwxr-xr-xscripts/t-rex-64-debug-gdb4
-rwxr-xr-xscripts/t-rex-64-debug-o9
-rwxr-xr-xscripts/t-rex-64-debug-o-gdb4
-rwxr-xr-xscripts/t-rex-64-o9
-rwxr-xr-xscripts/trex-cfg57
-rwxr-xr-xscripts/trex_daemon_server25
-rwxr-xr-xscripts/version.txt4
-rwxr-xr-xsrc/SimpleGlob.h979
-rwxr-xr-xsrc/SimpleOpt.h1060
-rwxr-xr-xsrc/bp_gtest.cpp2792
-rwxr-xr-xsrc/bp_sim.cpp6622
-rwxr-xr-xsrc/bp_sim.h3990
-rwxr-xr-xsrc/common/BigEndianBitManipulation.h73
-rwxr-xr-xsrc/common/Env.h34
-rwxr-xr-xsrc/common/Network/Packet/CPktCmn.cpp145
-rwxr-xr-xsrc/common/Network/Packet/CPktCmn.h88
-rwxr-xr-xsrc/common/Network/Packet/EthernetHeader.cpp32
-rwxr-xr-xsrc/common/Network/Packet/EthernetHeader.h98
-rwxr-xr-xsrc/common/Network/Packet/EthernetHeader.inl39
-rwxr-xr-xsrc/common/Network/Packet/IPHeader.cpp71
-rwxr-xr-xsrc/common/Network/Packet/IPHeader.h197
-rwxr-xr-xsrc/common/Network/Packet/IPHeader.inl290
-rwxr-xr-xsrc/common/Network/Packet/IPv6Header.cpp71
-rwxr-xr-xsrc/common/Network/Packet/IPv6Header.h142
-rwxr-xr-xsrc/common/Network/Packet/IPv6Header.inl182
-rwxr-xr-xsrc/common/Network/Packet/MacAddress.cpp24
-rwxr-xr-xsrc/common/Network/Packet/MacAddress.h130
-rwxr-xr-xsrc/common/Network/Packet/PacketHeaderBase.h50
-rwxr-xr-xsrc/common/Network/Packet/TCPHeader.cpp35
-rwxr-xr-xsrc/common/Network/Packet/TCPOptions.cpp176
-rwxr-xr-xsrc/common/Network/Packet/TCPOptions.h163
-rwxr-xr-xsrc/common/Network/Packet/TcpHeader.h124
-rwxr-xr-xsrc/common/Network/Packet/TcpHeader.inl220
-rwxr-xr-xsrc/common/Network/Packet/UDPHeader.cpp28
-rwxr-xr-xsrc/common/Network/Packet/UdpHeader.h86
-rwxr-xr-xsrc/common/Network/Packet/UdpHeader.inl102
-rwxr-xr-xsrc/common/Network/Packet/VLANHeader.cpp100
-rwxr-xr-xsrc/common/Network/Packet/VLANHeader.h107
-rwxr-xr-xsrc/common/Network/Packet/VLANHeader.inl111
-rwxr-xr-xsrc/common/arg/SimpleGlob.h979
-rwxr-xr-xsrc/common/arg/SimpleOpt.h1060
-rwxr-xr-xsrc/common/basic_utils.cpp163
-rwxr-xr-xsrc/common/basic_utils.h91
-rwxr-xr-xsrc/common/bitMan.h185
-rwxr-xr-xsrc/common/c_common.h52
-rwxr-xr-xsrc/common/captureFile.cpp328
-rwxr-xr-xsrc/common/captureFile.h289
-rwxr-xr-xsrc/common/cgen_map.h96
-rwxr-xr-xsrc/common/erf.cpp454
-rwxr-xr-xsrc/common/erf.h257
-rwxr-xr-xsrc/common/erf_reader.h27
-rwxr-xr-xsrc/common/gtest-all.cc8528
-rwxr-xr-xsrc/common/gtest.h18065
-rwxr-xr-xsrc/common/gtest_main.cc51
-rwxr-xr-xsrc/common/os_types.h40
-rwxr-xr-xsrc/common/pcap.cpp299
-rwxr-xr-xsrc/common/pcap.h150
-rwxr-xr-xsrc/dpdk_lib18/librte_acl/Makefile63
-rwxr-xr-xsrc/dpdk_lib18/librte_acl/acl.h196
-rwxr-xr-xsrc/dpdk_lib18/librte_acl/acl_bld.c2008
-rwxr-xr-xsrc/dpdk_lib18/librte_acl/acl_gen.c475
-rwxr-xr-xsrc/dpdk_lib18/librte_acl/acl_run.h268
-rwxr-xr-xsrc/dpdk_lib18/librte_acl/acl_run_scalar.c193
-rwxr-xr-xsrc/dpdk_lib18/librte_acl/acl_run_sse.c626
-rwxr-xr-xsrc/dpdk_lib18/librte_acl/acl_vect.h132
-rwxr-xr-xsrc/dpdk_lib18/librte_acl/rte_acl.c516
-rwxr-xr-xsrc/dpdk_lib18/librte_acl/rte_acl.h485
-rwxr-xr-xsrc/dpdk_lib18/librte_acl/rte_acl_osdep.h92
-rwxr-xr-xsrc/dpdk_lib18/librte_acl/rte_acl_osdep_alone.h278
-rwxr-xr-xsrc/dpdk_lib18/librte_acl/tb_mem.c104
-rwxr-xr-xsrc/dpdk_lib18/librte_acl/tb_mem.h73
-rwxr-xr-xsrc/dpdk_lib18/librte_cfgfile/Makefile53
-rwxr-xr-xsrc/dpdk_lib18/librte_cfgfile/rte_cfgfile.c356
-rwxr-xr-xsrc/dpdk_lib18/librte_cfgfile/rte_cfgfile.h195
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/Makefile63
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline.c264
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline.h91
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_cirbuf.c467
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_cirbuf.h245
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_parse.c564
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_parse.h191
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_parse_etheraddr.c180
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_parse_etheraddr.h94
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_parse_ipaddr.c408
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_parse_ipaddr.h186
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_parse_num.c402
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_parse_num.h113
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_parse_portlist.c173
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_parse_portlist.h101
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_parse_string.c253
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_parse_string.h110
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_rdline.c698
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_rdline.h254
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_socket.c119
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_socket.h76
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_vt100.c185
-rwxr-xr-xsrc/dpdk_lib18/librte_cmdline/cmdline_vt100.h151
-rwxr-xr-xsrc/dpdk_lib18/librte_distributor/Makefile50
-rwxr-xr-xsrc/dpdk_lib18/librte_distributor/rte_distributor.c488
-rwxr-xr-xsrc/dpdk_lib18/librte_distributor/rte_distributor.h248
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/Makefile39
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/Makefile38
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/contigmem/BSDmakefile36
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/contigmem/Makefile52
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/contigmem/contigmem.c233
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/eal/Makefile97
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/eal/eal.c563
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/eal/eal_alarm.c60
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/eal/eal_debug.c113
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/eal/eal_hugepage_info.c133
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/eal/eal_interrupts.c71
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/eal/eal_lcore.c107
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/eal/eal_log.c57
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/eal/eal_memory.c224
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/eal/eal_pci.c510
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/eal/eal_thread.c233
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/eal/eal_timer.c149
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/eal/include/exec-env/rte_dom0_common.h107
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/eal/include/exec-env/rte_interrupts.h54
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/nic_uio/BSDmakefile36
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/nic_uio/Makefile52
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/bsdapp/nic_uio/nic_uio.c329
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/Makefile61
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/eal_common_cpuflags.c85
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/eal_common_dev.c109
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/eal_common_devargs.c152
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/eal_common_errno.c74
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/eal_common_hexdump.c121
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/eal_common_launch.c120
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/eal_common_log.c320
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/eal_common_memory.c121
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/eal_common_memzone.c533
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/eal_common_options.c611
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/eal_common_pci.c207
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/eal_common_string_fns.c69
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/eal_common_tailqs.c146
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/eal_filesystem.h118
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/eal_hugepages.h67
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/eal_internal_cfg.h93
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/eal_options.h93
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/eal_private.h206
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/eal_thread.h53
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_atomic.h426
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_byteorder.h149
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_cpuflags.h187
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_cycles.h87
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_memcpy.h225
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_prefetch.h61
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_spinlock.h73
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/arch/x86/rte_atomic.h216
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/arch/x86/rte_atomic_32.h222
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/arch/x86/rte_atomic_64.h191
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/arch/x86/rte_byteorder.h125
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/arch/x86/rte_byteorder_32.h51
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/arch/x86/rte_byteorder_64.h52
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/arch/x86/rte_cpuflags.h310
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/arch/x86/rte_cycles.h121
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/arch/x86/rte_memcpy.h297
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/arch/x86/rte_prefetch.h62
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/arch/x86/rte_spinlock.h94
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/generic/rte_atomic.h918
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/generic/rte_byteorder.h217
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/generic/rte_cpuflags.h110
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/generic/rte_cycles.h205
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/generic/rte_memcpy.h144
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/generic/rte_prefetch.h71
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/generic/rte_spinlock.h226
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_alarm.h106
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_branch_prediction.h70
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_common.h389
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_common_vect.h93
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_debug.h105
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_dev.h111
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_devargs.h149
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_eal.h269
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_eal_memconfig.h112
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_errno.h96
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_hexdump.h89
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_interrupts.h121
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_launch.h177
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_lcore.h229
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_log.h308
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_malloc_heap.h56
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_memory.h218
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_memzone.h278
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_pci.h305
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_pci_dev_feature_defs.h45
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_pci_dev_features.h44
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_pci_dev_ids.h540
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_per_lcore.h79
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_random.h91
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_rwlock.h158
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_string_fns.h81
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_tailq.h215
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_tailq_elem.h90
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_version.h129
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/common/include/rte_warnings.h84
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/Makefile45
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/eal/Makefile112
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/eal/eal.c861
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/eal/eal_alarm.c268
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/eal/eal_debug.c113
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/eal/eal_hugepage_info.c359
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/eal/eal_interrupts.c826
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/eal/eal_ivshmem.c968
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/eal/eal_lcore.c191
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/eal/eal_log.c197
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/eal/eal_memory.c1564
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci.c629
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci_init.h122
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci_uio.c440
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci_vfio.c807
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c395
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/eal/eal_thread.c233
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/eal/eal_timer.c344
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/eal/eal_vfio.h55
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/eal/eal_xen_memory.c370
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/eal/include/exec-env/rte_dom0_common.h108
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h58
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h174
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/igb_uio/Makefile53
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/igb_uio/compat.h116
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/igb_uio/igb_uio.c643
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/Makefile93
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/compat.h21
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/README100
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/COPYING339
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c3665
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.h509
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_api.c1160
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_api.h157
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_defines.h1380
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_hw.h793
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_i210.c909
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_i210.h91
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mac.c2096
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mac.h80
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_manage.c556
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_manage.h89
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.c526
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.h87
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.c967
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.h75
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_osdep.h136
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_phy.c3405
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_phy.h256
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_regs.h646
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb.h859
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_debugfs.c29
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c2859
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_hwmon.c260
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c10263
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_param.c848
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_procfs.c363
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_ptp.c944
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_regtest.h251
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.c437
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.h46
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/kcompat.c1482
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h3884
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c1172
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/COPYING339
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe.h925
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.c1296
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.h44
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.c2314
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.h58
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.c1158
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.h168
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c4083
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.h140
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_dcb.h168
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c2901
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_fcoe.h91
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c2975
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_mbx.h105
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_osdep.h132
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.c1847
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.h137
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_sriov.h74
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_type.h3254
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_x540.c938
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_x540.h58
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.c1246
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.h3143
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/kni_dev.h150
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/kni_ethtool.c217
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/kni_fifo.h108
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/kni_misc.c606
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/kni_net.c687
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/kni/kni_vhost.c811
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/xen_dom0/Makefile56
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/xen_dom0/compat.h15
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/xen_dom0/dom0_mm_dev.h107
-rwxr-xr-xsrc/dpdk_lib18/librte_eal/linuxapp/xen_dom0/dom0_mm_misc.c781
-rwxr-xr-xsrc/dpdk_lib18/librte_ether/Makefile54
-rwxr-xr-xsrc/dpdk_lib18/librte_ether/rte_eth_ctrl.h458
-rwxr-xr-xsrc/dpdk_lib18/librte_ether/rte_ethdev.c3271
-rwxr-xr-xsrc/dpdk_lib18/librte_ether/rte_ethdev.h3759
-rwxr-xr-xsrc/dpdk_lib18/librte_ether/rte_ether.h340
-rwxr-xr-xsrc/dpdk_lib18/librte_hash/Makefile53
-rwxr-xr-xsrc/dpdk_lib18/librte_hash/rte_fbk_hash.c240
-rwxr-xr-xsrc/dpdk_lib18/librte_hash/rte_fbk_hash.h397
-rwxr-xr-xsrc/dpdk_lib18/librte_hash/rte_hash.c483
-rwxr-xr-xsrc/dpdk_lib18/librte_hash/rte_hash.h310
-rwxr-xr-xsrc/dpdk_lib18/librte_hash/rte_hash_crc.h110
-rwxr-xr-xsrc/dpdk_lib18/librte_hash/rte_jhash.h253
-rwxr-xr-xsrc/dpdk_lib18/librte_ip_frag/Makefile59
-rwxr-xr-xsrc/dpdk_lib18/librte_ip_frag/ip_frag_common.h192
-rwxr-xr-xsrc/dpdk_lib18/librte_ip_frag/ip_frag_internal.c418
-rwxr-xr-xsrc/dpdk_lib18/librte_ip_frag/rte_ip_frag.h353
-rwxr-xr-xsrc/dpdk_lib18/librte_ip_frag/rte_ip_frag_common.c139
-rwxr-xr-xsrc/dpdk_lib18/librte_ip_frag/rte_ipv4_fragmentation.c209
-rwxr-xr-xsrc/dpdk_lib18/librte_ip_frag/rte_ipv4_reassembly.c183
-rwxr-xr-xsrc/dpdk_lib18/librte_ip_frag/rte_ipv6_fragmentation.c215
-rwxr-xr-xsrc/dpdk_lib18/librte_ip_frag/rte_ipv6_reassembly.c222
-rwxr-xr-xsrc/dpdk_lib18/librte_ivshmem/Makefile48
-rwxr-xr-xsrc/dpdk_lib18/librte_ivshmem/rte_ivshmem.c884
-rwxr-xr-xsrc/dpdk_lib18/librte_ivshmem/rte_ivshmem.h165
-rwxr-xr-xsrc/dpdk_lib18/librte_kni/Makefile49
-rwxr-xr-xsrc/dpdk_lib18/librte_kni/rte_kni.c747
-rwxr-xr-xsrc/dpdk_lib18/librte_kni/rte_kni.h306
-rwxr-xr-xsrc/dpdk_lib18/librte_kni/rte_kni_fifo.h93
-rwxr-xr-xsrc/dpdk_lib18/librte_kvargs/Makefile51
-rwxr-xr-xsrc/dpdk_lib18/librte_kvargs/rte_kvargs.c208
-rwxr-xr-xsrc/dpdk_lib18/librte_kvargs/rte_kvargs.h155
-rwxr-xr-xsrc/dpdk_lib18/librte_lpm/Makefile49
-rwxr-xr-xsrc/dpdk_lib18/librte_lpm/rte_lpm.c1017
-rwxr-xr-xsrc/dpdk_lib18/librte_lpm/rte_lpm.h472
-rwxr-xr-xsrc/dpdk_lib18/librte_lpm/rte_lpm6.c892
-rwxr-xr-xsrc/dpdk_lib18/librte_lpm/rte_lpm6.h228
-rwxr-xr-xsrc/dpdk_lib18/librte_malloc/Makefile48
-rwxr-xr-xsrc/dpdk_lib18/librte_malloc/malloc_elem.c321
-rwxr-xr-xsrc/dpdk_lib18/librte_malloc/malloc_elem.h190
-rwxr-xr-xsrc/dpdk_lib18/librte_malloc/malloc_heap.c210
-rwxr-xr-xsrc/dpdk_lib18/librte_malloc/malloc_heap.h65
-rwxr-xr-xsrc/dpdk_lib18/librte_malloc/rte_malloc.c261
-rwxr-xr-xsrc/dpdk_lib18/librte_malloc/rte_malloc.h342
-rwxr-xr-xsrc/dpdk_lib18/librte_mbuf/Makefile48
-rwxr-xr-xsrc/dpdk_lib18/librte_mbuf/rte_mbuf.c252
-rwxr-xr-xsrc/dpdk_lib18/librte_mbuf/rte_mbuf.h1133
-rwxr-xr-xsrc/dpdk_lib18/librte_mempool/Makefile51
-rwxr-xr-xsrc/dpdk_lib18/librte_mempool/rte_dom0_mempool.c134
-rwxr-xr-xsrc/dpdk_lib18/librte_mempool/rte_mempool.c901
-rwxr-xr-xsrc/dpdk_lib18/librte_mempool/rte_mempool.h1392
-rwxr-xr-xsrc/dpdk_lib18/librte_meter/Makefile53
-rwxr-xr-xsrc/dpdk_lib18/librte_meter/rte_meter.c120
-rwxr-xr-xsrc/dpdk_lib18/librte_meter/rte_meter.h387
-rwxr-xr-xsrc/dpdk_lib18/librte_net/Makefile40
-rwxr-xr-xsrc/dpdk_lib18/librte_net/rte_arp.h84
-rwxr-xr-xsrc/dpdk_lib18/librte_net/rte_icmp.h101
-rwxr-xr-xsrc/dpdk_lib18/librte_net/rte_ip.h402
-rwxr-xr-xsrc/dpdk_lib18/librte_net/rte_sctp.h99
-rwxr-xr-xsrc/dpdk_lib18/librte_net/rte_tcp.h104
-rwxr-xr-xsrc/dpdk_lib18/librte_net/rte_udp.h99
-rwxr-xr-xsrc/dpdk_lib18/librte_pipeline/Makefile54
-rwxr-xr-xsrc/dpdk_lib18/librte_pipeline/rte_pipeline.c1373
-rwxr-xr-xsrc/dpdk_lib18/librte_pipeline/rte_pipeline.h664
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_af_packet/Makefile60
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_af_packet/rte_eth_af_packet.c846
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_af_packet/rte_eth_af_packet.h53
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_bond/Makefile67
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_bond/rte_eth_bond.h359
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_bond/rte_eth_bond_8023ad.c1216
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_bond/rte_eth_bond_8023ad.h214
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_bond/rte_eth_bond_8023ad_private.h308
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_bond/rte_eth_bond_api.c822
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_bond/rte_eth_bond_args.c279
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_bond/rte_eth_bond_pmd.c1881
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_bond/rte_eth_bond_private.h268
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/Makefile95
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/README39
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_80003es2lan.c1514
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_80003es2lan.h100
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82540.c717
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82541.c1268
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82541.h91
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82542.c588
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82543.c1553
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82543.h56
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82571.c2026
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82571.h65
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82575.c3639
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82575.h520
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_api.c1357
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_api.h167
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_defines.h1498
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_hw.h1026
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_i210.c1000
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_i210.h110
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_ich8lan.c5260
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_ich8lan.h313
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_mac.c2247
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_mac.h95
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_manage.c573
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_manage.h95
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_mbx.c777
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_mbx.h105
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_nvm.c1377
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_nvm.h98
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_osdep.c83
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_osdep.h182
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_phy.c4273
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_phy.h327
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_regs.h685
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_vf.c586
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000/e1000_vf.h295
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000_ethdev.h248
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/e1000_logs.h77
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/em_ethdev.c1532
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/em_rxtx.c1867
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/igb_ethdev.c3164
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/igb_pf.c483
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_e1000/igb_rxtx.c2415
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/LICENSE27
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/Makefile67
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/enic.h197
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/enic_clsf.c241
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/enic_compat.h143
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/enic_ethdev.c612
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/enic_main.c1117
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/enic_res.c219
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/enic_res.h168
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/vnic/cq_desc.h126
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/vnic/cq_enet_desc.h261
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/vnic/rq_enet_desc.h76
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/vnic/vnic_cq.c117
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/vnic/vnic_cq.h151
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/vnic/vnic_dev.c1054
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/vnic/vnic_dev.h212
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/vnic/vnic_devcmd.h774
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/vnic/vnic_enet.h78
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/vnic/vnic_intr.c78
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/vnic/vnic_intr.h126
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/vnic/vnic_nic.h88
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/vnic/vnic_resource.h97
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/vnic/vnic_rq.c245
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/vnic/vnic_rq.h282
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/vnic/vnic_rss.c85
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/vnic/vnic_rss.h61
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/vnic/vnic_stats.h86
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/vnic/vnic_wq.c245
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/vnic/vnic_wq.h283
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_enic/vnic/wq_enet_desc.h114
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/Makefile101
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/i40e/i40e_adminq.c1084
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/i40e/i40e_adminq.h157
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/i40e/i40e_adminq_cmd.h2179
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/i40e/i40e_alloc.h65
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/i40e/i40e_common.c4793
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/i40e/i40e_dcb.c479
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/i40e/i40e_dcb.h161
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/i40e/i40e_diag.c178
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/i40e/i40e_diag.h61
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/i40e/i40e_hmc.c373
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/i40e/i40e_hmc.h243
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/i40e/i40e_lan_hmc.c1417
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/i40e/i40e_lan_hmc.h200
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/i40e/i40e_nvm.c940
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/i40e/i40e_osdep.h197
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/i40e/i40e_prototype.h430
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/i40e/i40e_register.h3377
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/i40e/i40e_status.h107
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/i40e/i40e_type.h1425
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/i40e/i40e_virtchnl.h373
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/i40e_ethdev.c5456
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/i40e_ethdev.h567
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/i40e_ethdev_vf.c1897
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/i40e_fdir.c1365
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/i40e_logs.h77
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/i40e_pf.c1063
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/i40e_pf.h127
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/i40e_rxtx.c2650
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_i40e/i40e_rxtx.h198
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/Makefile117
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe/README67
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_82598.c1436
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_82598.h53
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_82599.c2699
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_82599.h66
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_api.c1420
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_api.h203
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_common.c4869
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_common.h182
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb.c715
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb.h176
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82598.c361
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82598.h100
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82599.c594
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82599.h154
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.c789
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.h150
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_osdep.h156
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_phy.c2425
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_phy.h176
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_type.h3765
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_vf.c725
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_vf.h145
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_x540.c1038
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_x540.h67
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_x550.c1809
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_x550.h88
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe_82599_bypass.c314
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe_bypass.c414
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe_bypass.h68
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe_bypass_api.h299
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe_bypass_defines.h160
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe_ethdev.c4133
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe_ethdev.h344
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe_fdir.c922
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe_logs.h78
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe_pf.c566
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe_rxtx.c4228
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe_rxtx.h270
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ixgbe/ixgbe_rxtx_vec.c802
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_pcap/Makefile59
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_pcap/rte_eth_pcap.c936
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ring/Makefile57
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ring/rte_eth_ring.c530
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_ring/rte_eth_ring.h63
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_virtio/Makefile57
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_virtio/virtio_ethdev.c1209
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_virtio/virtio_ethdev.h132
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_virtio/virtio_logs.h70
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_virtio/virtio_pci.c129
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_virtio/virtio_pci.h266
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_virtio/virtio_ring.h163
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_virtio/virtio_rxtx.c747
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_virtio/virtqueue.c70
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_virtio/virtqueue.h283
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_vmxnet3/Makefile80
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/README50
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/includeCheck.h40
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/upt1_defs.h117
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/vmware_pack_begin.h32
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/vmware_pack_end.h32
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/vmxnet3_defs.h751
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/vmxnet3_osdep.h48
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_ethdev.c781
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_ethdev.h177
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_logs.h74
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_ring.h183
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_rxtx.c1096
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_xenvirt/Makefile58
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_xenvirt/rte_eth_xenvirt.c716
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_xenvirt/rte_eth_xenvirt.h62
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_xenvirt/rte_mempool_gntalloc.c298
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_xenvirt/rte_xen_lib.c428
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_xenvirt/rte_xen_lib.h113
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_xenvirt/virtio_logs.h70
-rwxr-xr-xsrc/dpdk_lib18/librte_pmd_xenvirt/virtqueue.h279
-rwxr-xr-xsrc/dpdk_lib18/librte_port/Makefile77
-rwxr-xr-xsrc/dpdk_lib18/librte_port/rte_port.h213
-rwxr-xr-xsrc/dpdk_lib18/librte_port/rte_port_ethdev.c305
-rwxr-xr-xsrc/dpdk_lib18/librte_port/rte_port_ethdev.h86
-rwxr-xr-xsrc/dpdk_lib18/librte_port/rte_port_frag.c241
-rwxr-xr-xsrc/dpdk_lib18/librte_port/rte_port_frag.h94
-rwxr-xr-xsrc/dpdk_lib18/librte_port/rte_port_ras.c252
-rwxr-xr-xsrc/dpdk_lib18/librte_port/rte_port_ras.h83
-rwxr-xr-xsrc/dpdk_lib18/librte_port/rte_port_ring.c237
-rwxr-xr-xsrc/dpdk_lib18/librte_port/rte_port_ring.h82
-rwxr-xr-xsrc/dpdk_lib18/librte_port/rte_port_sched.c239
-rwxr-xr-xsrc/dpdk_lib18/librte_port/rte_port_sched.h82
-rwxr-xr-xsrc/dpdk_lib18/librte_port/rte_port_source_sink.c158
-rwxr-xr-xsrc/dpdk_lib18/librte_port/rte_port_source_sink.h70
-rwxr-xr-xsrc/dpdk_lib18/librte_power/Makefile49
-rwxr-xr-xsrc/dpdk_lib18/librte_power/channel_commands.h77
-rwxr-xr-xsrc/dpdk_lib18/librte_power/guest_channel.c162
-rwxr-xr-xsrc/dpdk_lib18/librte_power/guest_channel.h89
-rwxr-xr-xsrc/dpdk_lib18/librte_power/rte_power.c143
-rwxr-xr-xsrc/dpdk_lib18/librte_power/rte_power.h251
-rwxr-xr-xsrc/dpdk_lib18/librte_power/rte_power_acpi_cpufreq.c545
-rwxr-xr-xsrc/dpdk_lib18/librte_power/rte_power_acpi_cpufreq.h192
-rwxr-xr-xsrc/dpdk_lib18/librte_power/rte_power_common.h39
-rwxr-xr-xsrc/dpdk_lib18/librte_power/rte_power_kvm_vm.c136
-rwxr-xr-xsrc/dpdk_lib18/librte_power/rte_power_kvm_vm.h179
-rwxr-xr-xsrc/dpdk_lib18/librte_ring/Makefile48
-rwxr-xr-xsrc/dpdk_lib18/librte_ring/rte_ring.c338
-rwxr-xr-xsrc/dpdk_lib18/librte_ring/rte_ring.h1214
-rwxr-xr-xsrc/dpdk_lib18/librte_sched/Makefile56
-rwxr-xr-xsrc/dpdk_lib18/librte_sched/rte_approx.c196
-rwxr-xr-xsrc/dpdk_lib18/librte_sched/rte_approx.h75
-rwxr-xr-xsrc/dpdk_lib18/librte_sched/rte_bitmap.h563
-rwxr-xr-xsrc/dpdk_lib18/librte_sched/rte_red.c158
-rwxr-xr-xsrc/dpdk_lib18/librte_sched/rte_red.h453
-rwxr-xr-xsrc/dpdk_lib18/librte_sched/rte_sched.c2150
-rwxr-xr-xsrc/dpdk_lib18/librte_sched/rte_sched.h442
-rwxr-xr-xsrc/dpdk_lib18/librte_sched/rte_sched_common.h129
-rwxr-xr-xsrc/dpdk_lib18/librte_table/Makefile82
-rwxr-xr-xsrc/dpdk_lib18/librte_table/rte_lru.h213
-rwxr-xr-xsrc/dpdk_lib18/librte_table/rte_table.h202
-rwxr-xr-xsrc/dpdk_lib18/librte_table/rte_table_acl.c491
-rwxr-xr-xsrc/dpdk_lib18/librte_table/rte_table_acl.h95
-rwxr-xr-xsrc/dpdk_lib18/librte_table/rte_table_array.c205
-rwxr-xr-xsrc/dpdk_lib18/librte_table/rte_table_array.h76
-rwxr-xr-xsrc/dpdk_lib18/librte_table/rte_table_hash.h350
-rwxr-xr-xsrc/dpdk_lib18/librte_table/rte_table_hash_ext.c1122
-rwxr-xr-xsrc/dpdk_lib18/librte_table/rte_table_hash_key16.c1101
-rwxr-xr-xsrc/dpdk_lib18/librte_table/rte_table_hash_key32.c1121
-rwxr-xr-xsrc/dpdk_lib18/librte_table/rte_table_hash_key8.c1399
-rwxr-xr-xsrc/dpdk_lib18/librte_table/rte_table_hash_lru.c1065
-rwxr-xr-xsrc/dpdk_lib18/librte_table/rte_table_lpm.c348
-rwxr-xr-xsrc/dpdk_lib18/librte_table/rte_table_lpm.h115
-rwxr-xr-xsrc/dpdk_lib18/librte_table/rte_table_lpm_ipv6.c362
-rwxr-xr-xsrc/dpdk_lib18/librte_table/rte_table_lpm_ipv6.h119
-rwxr-xr-xsrc/dpdk_lib18/librte_table/rte_table_stub.c65
-rwxr-xr-xsrc/dpdk_lib18/librte_table/rte_table_stub.h62
-rwxr-xr-xsrc/dpdk_lib18/librte_timer/Makefile48
-rwxr-xr-xsrc/dpdk_lib18/librte_timer/rte_timer.c610
-rwxr-xr-xsrc/dpdk_lib18/librte_timer/rte_timer.h335
-rwxr-xr-xsrc/dpdk_lib18/librte_vhost/Makefile50
-rwxr-xr-xsrc/dpdk_lib18/librte_vhost/eventfd_link/Makefile39
-rwxr-xr-xsrc/dpdk_lib18/librte_vhost/eventfd_link/eventfd_link.c195
-rwxr-xr-xsrc/dpdk_lib18/librte_vhost/eventfd_link/eventfd_link.h76
-rwxr-xr-xsrc/dpdk_lib18/librte_vhost/libvirt/qemu-wrap.py367
-rwxr-xr-xsrc/dpdk_lib18/librte_vhost/rte_virtio_net.h215
-rwxr-xr-xsrc/dpdk_lib18/librte_vhost/vhost-net-cdev.c389
-rwxr-xr-xsrc/dpdk_lib18/librte_vhost/vhost-net-cdev.h113
-rwxr-xr-xsrc/dpdk_lib18/librte_vhost/vhost_rxtx.c730
-rwxr-xr-xsrc/dpdk_lib18/librte_vhost/virtio-net.c1163
-rwxr-xr-xsrc/global_io_mode.cpp128
-rwxr-xr-xsrc/global_io_mode.h121
-rwxr-xr-xsrc/gtest/Makefile40
-rwxr-xr-xsrc/gtest/nat_test.cpp5
-rwxr-xr-xsrc/gtest/tuple_gen_test.cpp700
-rwxr-xr-xsrc/l2fwd/main.c715
-rwxr-xr-xsrc/main.cpp783
-rwxr-xr-xsrc/main_dpdk.cpp5041
-rwxr-xr-xsrc/msg_manager.cpp98
-rwxr-xr-xsrc/msg_manager.h109
-rwxr-xr-xsrc/nat_check.cpp197
-rwxr-xr-xsrc/nat_check.h164
-rwxr-xr-xsrc/os_time.cpp125
-rwxr-xr-xsrc/os_time.h134
-rwxr-xr-xsrc/pal/linux/CRing.h98
-rwxr-xr-xsrc/pal/linux/mbuf.cpp425
-rwxr-xr-xsrc/pal/linux/mbuf.h192
-rwxr-xr-xsrc/pal/linux/pal_utl.cpp29
-rwxr-xr-xsrc/pal/linux/pal_utl.h70
-rwxr-xr-xsrc/pal/linux/sanb_atomic.h175
-rwxr-xr-xsrc/pal/linux_dpdk/CRing.h89
-rwxr-xr-xsrc/pal/linux_dpdk/dpdk180/rte_config.h234
-rwxr-xr-xsrc/pal/linux_dpdk/mbuf.cpp75
-rwxr-xr-xsrc/pal/linux_dpdk/mbuf.h83
-rwxr-xr-xsrc/pal/linux_dpdk/pal_utl.cpp29
-rwxr-xr-xsrc/pal/linux_dpdk/pal_utl.h45
-rwxr-xr-xsrc/pal/linux_dpdk/x86_64-default-linuxapp-gcc/include/rte_config.h72
-rwxr-xr-xsrc/platform_cfg.cpp453
-rwxr-xr-xsrc/platform_cfg.h235
-rwxr-xr-xsrc/rx_check.cpp536
-rwxr-xr-xsrc/rx_check.h427
-rwxr-xr-xsrc/rx_check_header.cpp52
-rwxr-xr-xsrc/rx_check_header.h212
-rwxr-xr-xsrc/time_histogram.cpp251
-rwxr-xr-xsrc/time_histogram.h92
-rwxr-xr-xsrc/timer_wheel_pq.cpp355
-rwxr-xr-xsrc/timer_wheel_pq.h134
-rwxr-xr-xsrc/tuple_gen.cpp307
-rwxr-xr-xsrc/tuple_gen.h620
-rwxr-xr-xsrc/utl_cpuu.cpp57
-rwxr-xr-xsrc/utl_cpuu.h76
-rwxr-xr-xsrc/utl_jitter.h86
-rwxr-xr-xsrc/utl_json.cpp59
-rwxr-xr-xsrc/utl_json.h35
-rwxr-xr-xsrc/utl_term_io.cpp98
-rwxr-xr-xsrc/utl_term_io.h33
-rwxr-xr-xsrc/utl_yaml.cpp124
-rwxr-xr-xsrc/utl_yaml.h46
-rwxr-xr-xsrc/zmq/include/zmq.h416
-rwxr-xr-xsrc/zmq/include/zmq_utils.h105
-rwxr-xr-xsrc/zmq/libzmq.abin0 -> 7932556 bytes
-rwxr-xr-xsrc/zmq/libzmq.la41
-rwxr-xr-xsrc/zmq/libzmq.lai41
-rwxr-xr-xsrc/zmq/libzmq.sobin0 -> 3150071 bytes
-rwxr-xr-xsrc/zmq/libzmq.so.3bin0 -> 3150071 bytes
-rwxr-xr-xsrc/zmq/libzmq.so.3.1.0bin0 -> 3150071 bytes
-rwxr-xr-xyaml-cpp/CMakeLists.txt282
-rwxr-xr-xyaml-cpp/include/yaml-cpp/aliasmanager.h34
-rwxr-xr-xyaml-cpp/include/yaml-cpp/anchor.h16
-rwxr-xr-xyaml-cpp/include/yaml-cpp/binary.h66
-rwxr-xr-xyaml-cpp/include/yaml-cpp/contrib/anchordict.h42
-rwxr-xr-xyaml-cpp/include/yaml-cpp/contrib/graphbuilder.h133
-rwxr-xr-xyaml-cpp/include/yaml-cpp/conversion.h75
-rwxr-xr-xyaml-cpp/include/yaml-cpp/dll.h28
-rwxr-xr-xyaml-cpp/include/yaml-cpp/emitfromevents.h45
-rwxr-xr-xyaml-cpp/include/yaml-cpp/emitter.h186
-rwxr-xr-xyaml-cpp/include/yaml-cpp/emittermanip.h149
-rwxr-xr-xyaml-cpp/include/yaml-cpp/eventhandler.h36
-rwxr-xr-xyaml-cpp/include/yaml-cpp/exceptions.h164
-rwxr-xr-xyaml-cpp/include/yaml-cpp/iterator.h40
-rwxr-xr-xyaml-cpp/include/yaml-cpp/ltnode.h18
-rwxr-xr-xyaml-cpp/include/yaml-cpp/mark.h26
-rwxr-xr-xyaml-cpp/include/yaml-cpp/node.h135
-rwxr-xr-xyaml-cpp/include/yaml-cpp/nodeimpl.h85
-rwxr-xr-xyaml-cpp/include/yaml-cpp/nodereadimpl.h86
-rwxr-xr-xyaml-cpp/include/yaml-cpp/nodeutil.h62
-rwxr-xr-xyaml-cpp/include/yaml-cpp/noncopyable.h25
-rwxr-xr-xyaml-cpp/include/yaml-cpp/null.h25
-rwxr-xr-xyaml-cpp/include/yaml-cpp/ostream.h40
-rwxr-xr-xyaml-cpp/include/yaml-cpp/parser.h51
-rwxr-xr-xyaml-cpp/include/yaml-cpp/stlemitter.h51
-rwxr-xr-xyaml-cpp/include/yaml-cpp/stlnode.h38
-rwxr-xr-xyaml-cpp/include/yaml-cpp/traits.h57
-rwxr-xr-xyaml-cpp/include/yaml-cpp/yaml.h23
-rwxr-xr-xyaml-cpp/install.txt24
-rwxr-xr-xyaml-cpp/license.txt19
-rwxr-xr-xyaml-cpp/src/aliasmanager.cpp29
-rwxr-xr-xyaml-cpp/src/binary.cpp102
-rwxr-xr-xyaml-cpp/src/collectionstack.h35
-rwxr-xr-xyaml-cpp/src/contrib/graphbuilder.cpp16
-rwxr-xr-xyaml-cpp/src/contrib/graphbuilderadapter.cpp96
-rwxr-xr-xyaml-cpp/src/contrib/graphbuilderadapter.h73
-rwxr-xr-xyaml-cpp/src/conversion.cpp89
-rwxr-xr-xyaml-cpp/src/directives.cpp24
-rwxr-xr-xyaml-cpp/src/directives.h29
-rwxr-xr-xyaml-cpp/src/emitfromevents.cpp105
-rwxr-xr-xyaml-cpp/src/emitter.cpp882
-rwxr-xr-xyaml-cpp/src/emitterstate.cpp284
-rwxr-xr-xyaml-cpp/src/emitterstate.h217
-rwxr-xr-xyaml-cpp/src/emitterutils.cpp378
-rwxr-xr-xyaml-cpp/src/emitterutils.h32
-rwxr-xr-xyaml-cpp/src/exp.cpp113
-rwxr-xr-xyaml-cpp/src/exp.h196
-rwxr-xr-xyaml-cpp/src/indentation.h38
-rwxr-xr-xyaml-cpp/src/iterator.cpp103
-rwxr-xr-xyaml-cpp/src/iterpriv.h33
-rwxr-xr-xyaml-cpp/src/node.cpp269
-rwxr-xr-xyaml-cpp/src/nodebuilder.cpp145
-rwxr-xr-xyaml-cpp/src/nodebuilder.h61
-rwxr-xr-xyaml-cpp/src/nodeownership.cpp31
-rwxr-xr-xyaml-cpp/src/nodeownership.h39
-rwxr-xr-xyaml-cpp/src/null.cpp12
-rwxr-xr-xyaml-cpp/src/ostream.cpp63
-rwxr-xr-xyaml-cpp/src/parser.cpp152
-rwxr-xr-xyaml-cpp/src/ptr_stack.h46
-rwxr-xr-xyaml-cpp/src/ptr_vector.h47
-rwxr-xr-xyaml-cpp/src/regex.cpp60
-rwxr-xr-xyaml-cpp/src/regex.h67
-rwxr-xr-xyaml-cpp/src/regeximpl.h186
-rwxr-xr-xyaml-cpp/src/scanner.cpp387
-rwxr-xr-xyaml-cpp/src/scanner.h132
-rwxr-xr-xyaml-cpp/src/scanscalar.cpp214
-rwxr-xr-xyaml-cpp/src/scanscalar.h45
-rwxr-xr-xyaml-cpp/src/scantag.cpp84
-rwxr-xr-xyaml-cpp/src/scantag.h20
-rwxr-xr-xyaml-cpp/src/scantoken.cpp439
-rwxr-xr-xyaml-cpp/src/setting.h105
-rwxr-xr-xyaml-cpp/src/simplekey.cpp139
-rwxr-xr-xyaml-cpp/src/singledocparser.cpp381
-rwxr-xr-xyaml-cpp/src/singledocparser.h65
-rwxr-xr-xyaml-cpp/src/stream.cpp448
-rwxr-xr-xyaml-cpp/src/stream.h79
-rwxr-xr-xyaml-cpp/src/streamcharsource.h48
-rwxr-xr-xyaml-cpp/src/stringsource.h47
-rwxr-xr-xyaml-cpp/src/tag.cpp52
-rwxr-xr-xyaml-cpp/src/tag.h28
-rwxr-xr-xyaml-cpp/src/token.h85
-rwxr-xr-xyaml-cpp/test/CMakeLists.txt15
-rwxr-xr-xyaml-cpp/test/emittertests.cpp1148
-rwxr-xr-xyaml-cpp/test/emittertests.h13
-rwxr-xr-xyaml-cpp/test/main.cpp7
-rwxr-xr-xyaml-cpp/test/nodetests.h13
-rwxr-xr-xyaml-cpp/test/old-api/parsertests.cpp1237
-rwxr-xr-xyaml-cpp/test/old-api/spectests.cpp1456
-rwxr-xr-xyaml-cpp/test/parsertests.h13
-rwxr-xr-xyaml-cpp/test/specexamples.h850
-rwxr-xr-xyaml-cpp/test/spectests.cpp149
-rwxr-xr-xyaml-cpp/test/spectests.h360
-rwxr-xr-xyaml-cpp/test/tests.cpp30
-rwxr-xr-xyaml-cpp/test/tests.h53
-rwxr-xr-xyaml-cpp/util/CMakeLists.txt2
-rwxr-xr-xyaml-cpp/util/api.cpp129
-rwxr-xr-xyaml-cpp/util/parse.cpp65
-rwxr-xr-xyaml-cpp/yaml-cpp.pc.cmake11
1261 files changed, 417257 insertions, 0 deletions
diff --git a/CHANGES b/CHANGES
new file mode 100755
index 00000000..cfedc005
--- /dev/null
+++ b/CHANGES
@@ -0,0 +1 @@
+1.72 Initial release
diff --git a/CONTRIBUTORS b/CONTRIBUTORS
new file mode 100755
index 00000000..06929302
--- /dev/null
+++ b/CONTRIBUTORS
@@ -0,0 +1,5 @@
+Hanoh haim
+Dave Johnson
+Wenxian Li
+Dan Klein
+
diff --git a/LICENSE b/LICENSE
new file mode 100755
index 00000000..d6456956
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/README.asciidoc b/README.asciidoc
new file mode 100755
index 00000000..a5e819ff
--- /dev/null
+++ b/README.asciidoc
@@ -0,0 +1,54 @@
+
+== TRex Low-Cost, High-Speed Stateful, Traffic Generator
+
+Traditionally, network infrastructure devices have been tested using commercial traffic generators, while performance was measured using metrics like packets per second (PPS) and No Drop Rate (NDR). As the network infrastructure functionality has become more complex, stateful traffic generators have become necessary in order to test with more realistic application traffic pattern scenarios.
+Realistic and Stateful traffic generators are needed in order to:
+
+* Test and provide more realistic performance numbers
+* Design and architecture of SW and HW based on realistic use cases
+
+=== Current Challenges
+
+* *Cost* : Commercial State-full traffic generators are expensive
+* *Scale* : Bandwidth does not scale up well with features complexity
+* *Standardization* : Lack of standardization of traffic patterns and methodologies
+* *Flexibility* : Commercial tools do not allow agility when flexibility and changes are needed
+
+=== Implications
+
+* High capital expenditure (capex) spent by different teams
+* Testing in low scale and extrapolation became a common practice, it is not accurate, and hides real life bottlenecks and quality issues
+* Different feature / platform teams benchmark and results methodology
+* Delays in development and testing due to testing tools features dependency
+* Resource and effort investment in developing different ad hoc tools and test methodologies
+
+TRex addresses these problems through an innovative and extendable software implementation and by leveraging standard and open SW and x86/UCS HW.
+
+=== TRex in a Nutshell
+
+* Generates and analyzes L4-7 traffic and able to provide in one tool capabilities provided by commercial L7 tools.
+* Stateful traffic generator based on pre-processing and smart replay of real traffic templates.
+* Generates and *amplifies* both client and server side traffic.
+* Customized functionality can be added.
+* Scale to 200Gb/sec for one UCS ( using Intel 40Gb/sec NICS)
+* Low cost
+* Virtual interfaces support, enable TRex to be used in a fully virtual environment without physical NICs and the following example use cases:
+** Amazon AWS
+** Cisco LaaS
+** TRex on your laptop
+** Self-contained packaging that can be easily installed and deployed
+
+=== Current TRex Feature sets
+
+* Support Intel DPDK 1/10/40Gbps interfaces
+* High scale of realistic traffic (number of clients, number of server, bandwidth)-up to 200Gb/sec in one UCS
+* Virtualization interfaces support (VMXNET3/E1000 )
+* Some stateless support for example IMIX traffic generation
+* Latency/Jitter measurements
+* Flow ordering checks
+* NAT, PAT dynamic translation learning
+* IPV6 inline replacement
+* Some cross flow support (e.g RTSP/SIP)
+
+*Cisco System Inc*
+
diff --git a/TODO b/TODO
new file mode 100755
index 00000000..8b137891
--- /dev/null
+++ b/TODO
@@ -0,0 +1 @@
+
diff --git a/VERSION b/VERSION
new file mode 100755
index 00000000..3566bb74
--- /dev/null
+++ b/VERSION
@@ -0,0 +1 @@
+v1.72
diff --git a/scripts/automation/config/trex-dan.cfg b/scripts/automation/config/trex-dan.cfg
new file mode 100755
index 00000000..110f22e9
--- /dev/null
+++ b/scripts/automation/config/trex-dan.cfg
@@ -0,0 +1,33 @@
+# Configuration for trex01-1g TRex machine
+#
+# machine_name - can be DNS name or IP for the TRex machine for ssh to the box
+# machine_typ - 1G or 10G TRex machine
+# config_file - [Optional] configuration file for TRex if needed
+# is_dual - should the TRex inject with -p ?
+# cores - how many cores should be used
+# limit-ports - how many ports should be used
+# latency - rate of latency packets injected by the TRex
+# misc_params - [Optional] misc parameters to be passed to the trex
+
+[trex]
+machine_name=trex-dan
+machine_port=8090
+history_size=100
+machine_type=1G
+config_file=
+is_dual=yes
+cores=2
+limit_ports=2
+latency=1000
+latency_condition=10000
+misc_params=
+
+# Configuration for the router connected to the TRex
+#
+# interface - interface that can be used to communicate with the router
+
+[router]
+type=ASR
+interface=10.56.199.247
+password=lab
+
diff --git a/scripts/automation/config/trex-dev3.cfg b/scripts/automation/config/trex-dev3.cfg
new file mode 100755
index 00000000..0d0801e9
--- /dev/null
+++ b/scripts/automation/config/trex-dev3.cfg
@@ -0,0 +1,34 @@
+# Configuration for trex-dev3 TRex machine
+#
+# machine_name - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# machine_typ - 1G or 10G TRex machine
+# config_file - [Optional] configuration file for TRex if needed
+# is_dual - should the TRex inject with -p ?
+# version_pat - path to the t-rex version and executable
+# exec - executable name (which will be under the version_path)
+# cores - how many cores should be used
+# limit-ports - how many ports should be used
+# latency - rate of latency packets injected by the TRex
+# misc_params - [Optional] misc parameters to be passed to the trex
+
+[trex]
+machine_name=trex-dev3
+machine_type=10G
+config_file=
+is_dual=yes
+version_path=/auto/proj-pcube-b/apps/PL-b/tools/bp_sim2/v1.52
+exec=t-rex-64
+cores=2
+limit_ports=2
+latency=1000
+# misc_params="--nc"
+
+# Configuration for the router connected to the TRex
+#
+# interface - interface that can be used to communicate with the router
+
+[router]
+type=ASR
+interface= 10.56.128.198
+password=lab
diff --git a/scripts/automation/config/trex-esp80-hhaim.cfg b/scripts/automation/config/trex-esp80-hhaim.cfg
new file mode 100755
index 00000000..fa5414d4
--- /dev/null
+++ b/scripts/automation/config/trex-esp80-hhaim.cfg
@@ -0,0 +1,31 @@
+# Configuration for trex01-1g TRex machine
+#
+# machine_name - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# machine_type - 1G or 10G TRex machine
+# config_file - configuration file for TRex, can be "" if default
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the t-rex version and executable
+# exec - executable name (which will be under the version_path)
+# cores - how many cores should be used
+# limit-ports - how many ports should be used
+# latency - rate of latency packets injected by the TRex
+
+[trex]
+machine_name=csi-kiwi-02
+machine_type=10G
+config_file=
+is_dual=yes
+version_path=/auto/proj-pcube-b/apps/PL-b/tools/bp_sim2/v1.32/
+exec=t-rex-64
+limit_ports=4
+latency=1000
+misc_params=
+
+# Configuration for the router connected to the TRex
+#
+# interface - interface that can be used to communicate with the router
+
+[router]
+interface=10.56.192.57
+password=cisco
diff --git a/scripts/automation/config/trex-hhaim.cfg b/scripts/automation/config/trex-hhaim.cfg
new file mode 100755
index 00000000..44eba6f2
--- /dev/null
+++ b/scripts/automation/config/trex-hhaim.cfg
@@ -0,0 +1,33 @@
+# Configuration for trex01-1g TRex machine
+#
+# machine_name - can be DNS name or IP for the TRex machine for ssh to the box
+# machine_typ - 1G or 10G TRex machine
+# config_file - [Optional] configuration file for TRex if needed
+# is_dual - should the TRex inject with -p ?
+# cores - how many cores should be used
+# limit-ports - how many ports should be used
+# latency - rate of latency packets injected by the TRex
+# misc_params - [Optional] misc parameters to be passed to the trex
+
+[trex]
+machine_name=10.56.217.210
+machine_port=8090
+history_size=100
+machine_type=10G
+config_file=
+is_dual=yes
+cores=4
+limit_ports=4
+latency=1000
+latency_condition=1000
+misc_params=
+
+# Configuration for the router connected to the TRex
+#
+# interface - interface that can be used to communicate with the router
+
+[router]
+type=ASR
+interface=10.56.192.57
+password=cisco
+
diff --git a/scripts/automation/config/trex01-1g.cfg b/scripts/automation/config/trex01-1g.cfg
new file mode 100755
index 00000000..98953cae
--- /dev/null
+++ b/scripts/automation/config/trex01-1g.cfg
@@ -0,0 +1,35 @@
+# Configuration for trex01-1g TRex machine
+#
+# machine_name - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# machine_typ - 1G or 10G TRex machine
+# config_file - [Optional] configuration file for TRex if needed
+# is_dual - should the TRex inject with -p ?
+# version_pat - path to the t-rex version and executable
+# exec - executable name (which will be under the version_path)
+# cores - how many cores should be used
+# limit-ports - how many ports should be used
+# latency - rate of latency packets injected by the TRex
+# misc_params - [Optional] misc parameters to be passed to the trex
+
+[trex]
+machine_name=trex01-1g
+password=password
+machine_type=1G
+config_file=
+is_dual=yes
+version_path=/auto/proj-pcube-b/apps/PL-b/tools/bp_sim2/v1.35
+exec=t-rex-64
+cores=2
+limit_ports=2
+latency=1000
+misc_params="--nc"
+
+# Configuration for the router connected to the TRex
+#
+# interface - interface that can be used to communicate with the router
+
+[router]
+type=ASR
+interface=10.56.30.49
+password=cisco
diff --git a/scripts/automation/graph_template.html b/scripts/automation/graph_template.html
new file mode 100755
index 00000000..984fbc49
--- /dev/null
+++ b/scripts/automation/graph_template.html
@@ -0,0 +1,80 @@
+
+<html>
+ <head>
+ <script type="text/javascript" src="https://www.google.com/jsapi"></script>
+ <script type="text/javascript">
+ google.load("visualization", "1", {packages:["corechart"]});
+ google.load("visualization", "1", {packages:["table"]});
+ google.setOnLoadCallback(drawChart);
+ function drawChart() {
+
+ var cpu_data = google.visualization.arrayToDataTable([
+ ['Bandwidth [Mbps]', 'CPU [%]', 'Max. Latency [usec]', 'Avg. Latency [usec]'],
+ !@#$template_fill_graph!@#$
+ ])
+
+ var cpu_options = {
+ title: '!@#$template_fill_head!@#$',
+ hAxis: { title: 'Bandwidth [Mbps]', format:'#.##'},
+ vAxes:[
+ {title: 'CPU Util [%]',format:'#%', minValue:0, maxValue: 1}, // Left axis
+ {title: 'Latency [usec]'}, // Right axis
+ ],
+ series: {0: {targetAxisIndex:0},
+ 1: {targetAxisIndex:1},
+ 2: {targetAxisIndex:1},
+ },
+ colors: ["green", "red", "blue"],
+ };
+
+ var chart = new google.visualization.LineChart(document.getElementById('chart_div'));
+
+ chart.draw(cpu_data, cpu_options);
+
+ var plot_data = new google.visualization.DataTable();
+ plot_data.addColumn('number', 'BW [Mbps]');
+ plot_data.addColumn('number', 'PPS [Kpps]');
+ plot_data.addColumn('number', 'CPU Util. [%]');
+ plot_data.addColumn('number', 'BW / CPU');
+ plot_data.addColumn('number', 'Max. Latency [usec]');
+ plot_data.addColumn('number', 'Avg. Latency [usec]');
+ plot_data.addColumn('number', 'Pkt Drop [pkts]');
+ plot_data.addRows([
+ !@#$template_fill_table!@#$
+ ]);
+
+ var formatter = new google.visualization.NumberFormat(
+ {fractionDigits:2});
+ formatter.format(plot_data, 0); // Apply formatter to Bandwidth util column
+
+ var formatter = new google.visualization.NumberFormat(
+ {fractionDigits: 0});
+ formatter.format(plot_data, 1); // Apply formatter to PPS column
+
+ formatter = new google.visualization.NumberFormat(
+ {pattern:'#,###%'});
+ formatter.format(plot_data, 2); // Apply formatter to CPU util column
+
+ formatter = new google.visualization.NumberFormat(
+ {fractionDigits: 2});
+ formatter.format(plot_data, 3); // Apply formatter to BW / CPU column
+
+ formatter = new google.visualization.NumberFormat(
+ {fractionDigits: 0});
+ formatter.format(plot_data, 4); // Apply formatter to Avg Latency util column
+ formatter.format(plot_data, 5); // Apply formatter to Max Latency util column
+ formatter.format(plot_data, 6); // Apply formatter to Pkt Drop
+
+ var table = new google.visualization.Table(document.getElementById('table_div'));
+
+ table.draw(plot_data, {showRowNumber: true});
+ }
+
+ </script>
+ </head>
+ <body>
+ <div id="chart_div" style="width: 900px; height: 500px; position: relative;"></div>
+ <div id="table_div" style="display: table"></div>
+ </body>
+</html>
+
diff --git a/scripts/automation/h_avc.py b/scripts/automation/h_avc.py
new file mode 100755
index 00000000..75548d92
--- /dev/null
+++ b/scripts/automation/h_avc.py
@@ -0,0 +1,195 @@
+#!/router/bin/python-2.4.3
+import time,os, sys, string
+from os.path import exists
+from os import system, remove, chdir
+import re
+import time
+import random
+import copy
+import telnetlib
+import datetime
+import collections
+from trex_perf import TrexRunException
+
+
+import random
+import time
+
+class RouterIOException(Exception):
+ def __init__ (self, reason):
+ # generate the error message
+ #self.message = "\nSummary of error:\n\n %s\n" % (reason)
+ self.message = reason
+
+ def __str__(self):
+ return self.message
+
+# basic router class
+class Router:
+ def __init__ (self, host, port, password, str_wait = "#"):
+ self.host = host
+ self.port = port;
+ self.pr = str_wait;
+ self.password = password
+ self.to = 60
+ self.cpu_util_histo = []
+
+ # private function - command send
+ def _command (self, command, match = None, timeout = None):
+ to = timeout if (timeout != None) else self.to
+ m = match if (match != None) else [self.pr]
+
+ if not isinstance(m, list):
+ m = [m]
+
+ total_to = 0
+ while True:
+ self.tn.write(command + "\n")
+ ret = self.tn.expect(m, timeout = 2)
+ total_to += 2
+
+ if ret[0] != -1:
+ result = {}
+ result['match_index'] = ret[0]
+ result['output'] = ret[2]
+ return (result)
+
+ if total_to >= self.to:
+ raise RouterIOException("Failed to process command to router %s" % command)
+
+ # connect to router by telnet
+ def connect (self):
+ # create telnet session
+ self.tn = telnetlib.Telnet ()
+
+ try:
+ self.tn.open(self.host, self.port)
+ except IOError:
+ raise RouterIOException("Failed To Connect To Router interface at '%s' : '%s'" % (self.host, self.port))
+
+ # get a ready console and decides if you need password
+ ret = self._command("", ["Password", ">", "#"])
+ if ret['match_index'] == 0:
+ self._command(self.password, [">", "#"])
+
+ # can't hurt to call enable even if on enable
+ ret = self._command("enable 15", ["Password", "#"])
+ if (ret['match_index'] == 0):
+ self._command(self.password, "#")
+
+ self._command("terminal length 0")
+
+ def close (self):
+ self.tn.close ()
+ self.tn = None
+
+ # implemented through derived classes
+ def sample_cpu (self):
+ raise Exception("abstract method called")
+
+ def get_last_cpu_util (self):
+ if not self.cpu_util_histo:
+ return (0)
+ else:
+ return self.cpu_util_histo[len(self.cpu_util_histo) - 1]
+
+ def get_cpu_util_histo (self):
+ return self.cpu_util_histo
+
+ def get_filtered_cpu_util_histo (self):
+ trim_start = int(0.15 * len(self.cpu_util_histo))
+
+ filtered = self.cpu_util_histo[trim_start:]
+ if not filtered:
+ return [0]
+
+ m = collections.Counter(filtered).most_common(n = 1)[0][0]
+ #m = max(self.cpu_util_histo)
+ filtered = [x for x in filtered if (x > (0.9*m))]
+ return filtered
+
+ def clear_sampling_stats (self):
+ self.cpu_util_histo = []
+
+
+ # add a sample to the database
+ def sample_stats (self):
+ # sample CPU util
+ cpu_util = self.sample_cpu()
+ self.cpu_util_histo.append(cpu_util)
+
+ def get_stats (self):
+ stats = {}
+
+ filtered_cpu_util = self.get_filtered_cpu_util_histo()
+
+ if not filtered_cpu_util:
+ stats['cpu_util'] = 0
+ else:
+ stats['cpu_util'] = sum(filtered_cpu_util)/len(filtered_cpu_util)
+
+ stats['cpu_histo'] = self.get_cpu_util_histo()
+
+ return stats
+
+
+class ASR1k(Router):
+ def __init__ (self, host, password, port, str_wait = "#"):
+ Router.__init__(self, host, password, port, str_wait)
+
+ def sample_cpu (self):
+ cpu_show_cmd = "show platform hardware qfp active datapath utilization | inc Load"
+ output = self._command(cpu_show_cmd)['output']
+ lines = output.split('\n');
+
+ cpu_util = -1.0
+ # search for the line
+ for l in lines:
+ m = re.match("\W*Processing: Load\D*(\d+)\D*(\d+)\D*(\d+)\D*(\d+)\D*", l)
+ if m:
+ cpu_util = float(m.group(1))
+
+ if (cpu_util == -1.0):
+ raise Exception("cannot determine CPU util. for asr1k")
+
+ return cpu_util
+
+
+class ISR(Router):
+ def __init__ (self, host, password, port, str_wait = "#"):
+ Router.__init__(self, host, password, port, str_wait)
+
+ def sample_cpu (self):
+ cpu_show_cmd = "show processes cpu sorted | inc CPU utilization"
+ output = self._command(cpu_show_cmd)['output']
+ lines = output.split('\n');
+
+ cpu_util = -1.0
+
+ # search for the line
+ for l in lines:
+ m = re.match("\W*CPU utilization for five seconds: (\d+)%/(\d+)%", l)
+ if m:
+ max_cpu_util = float(m.group(1))
+ min_cpu_util = float(m.group(2))
+ cpu_util = (min_cpu_util + max_cpu_util)/2
+
+ if (cpu_util == -1.0):
+ raise Exception("cannot determine CPU util. for ISR")
+
+ return cpu_util
+
+
+
+if __name__ == "__main__":
+ #router = ASR1k("pqemb19ts", "cisco", port=2052)
+ router = ISR("10.56.198.7", "lab")
+ router.connect()
+ for i in range(1, 10):
+ router.sample_stats()
+ time.sleep(1)
+
+
+
+
+
diff --git a/scripts/automation/phantom/phantomjs b/scripts/automation/phantom/phantomjs
new file mode 100755
index 00000000..af9e4ab1
--- /dev/null
+++ b/scripts/automation/phantom/phantomjs
Binary files differ
diff --git a/scripts/automation/phantom/rasterize.js b/scripts/automation/phantom/rasterize.js
new file mode 100755
index 00000000..165bcfa7
--- /dev/null
+++ b/scripts/automation/phantom/rasterize.js
@@ -0,0 +1,32 @@
+var page = require('webpage').create(),
+ system = require('system'),
+ address, output, size;
+
+if (system.args.length < 3 || system.args.length > 5) {
+ console.log('Usage: rasterize.js URL filename [paperwidth*paperheight|paperformat] [zoom]');
+ console.log(' paper (pdf output) examples: "5in*7.5in", "10cm*20cm", "A4", "Letter"');
+ phantom.exit(1);
+} else {
+ address = system.args[1];
+ output = system.args[2];
+ page.viewportSize = { width: 600, height: 600 };
+ if (system.args.length > 3 && system.args[2].substr(-4) === ".pdf") {
+ size = system.args[3].split('*');
+ page.paperSize = size.length === 2 ? { width: size[0], height: size[1], margin: '0px' }
+ : { format: system.args[3], orientation: 'portrait', margin: '1cm' };
+ }
+ if (system.args.length > 4) {
+ page.zoomFactor = system.args[4];
+ }
+ page.open(address, function (status) {
+ if (status !== 'success') {
+ console.log('Unable to load the address!');
+ phantom.exit();
+ } else {
+ window.setTimeout(function () {
+ page.render(output);
+ phantom.exit();
+ }, 200);
+ }
+ });
+}
diff --git a/scripts/automation/readme.txt b/scripts/automation/readme.txt
new file mode 100755
index 00000000..2541a1a3
--- /dev/null
+++ b/scripts/automation/readme.txt
@@ -0,0 +1,15 @@
+README - trex_perf.py
+=====================
+
+This script uses the T-Rex RESTfull client-server conrtol plane achitecture and tries to find the maximum M (platform factor) for trex before hitting one of two stopping conditions:
+(*) Packet drops
+(*) High latency.
+ Since high latency can change from one platform to another, and might suffer from kickoff peak (espicially at VM), it is the user responsibility to provide the latency condition.
+ A common value used by non-vm machines is 1000, where in VM machines values around 5000 are more common.
+
+please note that '-f' and '-c' options are mandatory.
+
+Also, this is the user's responsibility to make sure a T-Rex is running, listening to relevant client request coming from this script.
+
+example for finding max M (between 10 to 100) with imix_fast_1g.yaml traffic profile:
+./trex_perf.py -m 10 100 -c config/trex-hhaim.cfg all drop -f cap2/imix_fast_1g.yaml
diff --git a/scripts/automation/report_template.html b/scripts/automation/report_template.html
new file mode 100755
index 00000000..779d5429
--- /dev/null
+++ b/scripts/automation/report_template.html
@@ -0,0 +1,96 @@
+<!DOCTYPE html>
+<html>
+
+<head>
+
+<style>
+ html{overflow-y:scroll;}
+body
+{
+font-size:12px;
+color:#000000;
+background-color:#ffffff;
+margin:0px;
+background-image:url('/images/gradientfromtop.gif');
+background-repeat:repeat-x;
+}
+body,p,h1,h2,h3,h4,table,td,th,ul,ol,textarea,input
+{
+font-family:verdana,helvetica,arial,sans-serif;
+}
+h1 {font-size:190%;margin-top:0px;font-weight:normal}
+h2 {font-size:160%;margin-top:10px;margin-bottom:10px;font-weight:normal}
+h3 {font-size:120%;font-weight:normal}
+h4 {font-size:100%;}
+h5 {font-size:90%;}
+h6 {font-size:80%;}
+h1,h2,h3,h4,h5,h6
+{
+background-color:transparent;
+color:#000000;
+}
+table.myWideTable
+{
+background-color:#ffffff;
+border:1px solid #c3c3c3;
+border-collapse:collapse;
+width:100%;
+}
+table.myWideTable th
+{
+background-color:#e5eecc;
+border:1px solid #c3c3c3;
+padding:3px;
+vertical-align:top;
+text-align:left;
+}table.myWideTable td
+{
+border:1px solid #c3c3c3;
+padding:3px;
+vertical-align:top;
+}table.myTable
+{
+background-color:#ffffff;
+border:1px solid #c3c3c3;
+border-collapse:collapse;
+width:50%;
+}
+table.myTable th
+{
+background-color:#e5eecc;
+border:1px solid #c3c3c3;
+padding:3px;
+vertical-align:top;
+text-align:left;
+}table.myTable td
+{
+border:1px solid #c3c3c3;
+padding:3px;
+vertical-align:top;
+}}
+ </style>
+
+
+</head>
+
+<body>
+
+<H1>
+T-Rex Performance Report
+</H1>
+
+<H2>
+Job Setup
+</H2>
+
+!@#$template_fill_job_setup_table!@#$
+
+<H2>
+Job Summary
+</H2>
+
+!@#$template_fill_job_summary_table!@#$
+
+</body>
+</html>
+
diff --git a/scripts/automation/sshpass.exp b/scripts/automation/sshpass.exp
new file mode 100755
index 00000000..f27210c8
--- /dev/null
+++ b/scripts/automation/sshpass.exp
@@ -0,0 +1,15 @@
+#!/usr/cisco/bin/expect -f
+# ./ssh.exp password 192.168.1.11 id *
+set pass [lrange $argv 0 0]
+set server [lrange $argv 1 1]
+set name [lrange $argv 2 2]
+set cmd [lrange $argv 3 10]
+
+set cmd_str [join $cmd]
+
+spawn ssh $name@$server $cmd_str
+match_max 100000
+expect "*?assword:*"
+send -- "$pass\r"
+send -- "\r"
+interact
diff --git a/scripts/automation/trex_control_plane/__init__.py b/scripts/automation/trex_control_plane/__init__.py
new file mode 100755
index 00000000..d3f5a12f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/__init__.py
@@ -0,0 +1 @@
+
diff --git a/scripts/automation/trex_control_plane/client/__init__.py b/scripts/automation/trex_control_plane/client/__init__.py
new file mode 100755
index 00000000..e1d24710
--- /dev/null
+++ b/scripts/automation/trex_control_plane/client/__init__.py
@@ -0,0 +1 @@
+__all__ = ["trex_client"]
diff --git a/scripts/automation/trex_control_plane/client/outer_packages.py b/scripts/automation/trex_control_plane/client/outer_packages.py
new file mode 100755
index 00000000..a7c34e48
--- /dev/null
+++ b/scripts/automation/trex_control_plane/client/outer_packages.py
@@ -0,0 +1,29 @@
+#!/router/bin/python
+
+import sys,site
+import platform,os
+
+CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
+ROOT_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir)) # path to trex_control_plane directory
+PATH_TO_PYTHON_LIB = os.path.abspath(os.path.join(ROOT_PATH, 'python_lib'))
+
+
+CLIENT_MODULES = ['enum34-1.0.4',
+ # 'jsonrpclib-0.1.3',
+ 'jsonrpclib-pelix-0.2.5',
+ 'termstyle',
+ 'rpc_exceptions-0.1'
+ ]
+
+def import_client_modules ():
+ sys.path.append(ROOT_PATH)
+ import_module_list(CLIENT_MODULES)
+
+def import_module_list (modules_list):
+ assert(isinstance(modules_list, list))
+ for p in modules_list:
+ full_path = os.path.join(PATH_TO_PYTHON_LIB, p)
+ fix_path = os.path.normcase(full_path) #CURRENT_PATH+p)
+ site.addsitedir(full_path)
+
+import_client_modules()
diff --git a/scripts/automation/trex_control_plane/client/trex_adv_client.py b/scripts/automation/trex_control_plane/client/trex_adv_client.py
new file mode 100755
index 00000000..b3fe3dad
--- /dev/null
+++ b/scripts/automation/trex_control_plane/client/trex_adv_client.py
@@ -0,0 +1,70 @@
+#!/router/bin/python
+
+import trex_client
+from jsonrpclib import ProtocolError, AppError
+
+class CTRexAdvClient(trex_client.CTRexClient):
+ def __init__ (self, trex_host, max_history_size = 100, trex_daemon_port = 8090, trex_zmq_port = 4500, verbose = False):
+ super(CTRexAdvClient, self).__init__(trex_host, max_history_size, trex_daemon_port, trex_zmq_port, verbose)
+ pass
+
+ # T-REX KIWI advanced methods
+ def start_quick_trex(self, pcap_file, d, delay, dual, ipv6, times, interfaces):
+ try:
+ return self.server.start_quick_trex(pcap_file = pcap_file, duration = d, dual = dual, delay = delay, ipv6 = ipv6, times = times, interfaces = interfaces)
+ except AppError as err:
+ self.__handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def stop_quick_trex(self):
+ try:
+ return self.server.stop_quick_trex()
+ except AppError as err:
+ self.__handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+# def is_running(self):
+# pass
+
+ def get_running_stats(self):
+ try:
+ return self.server.get_running_stats()
+ except AppError as err:
+ self.__handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def clear_counters(self):
+ try:
+ return self.server.clear_counters()
+ except AppError as err:
+ self.__handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+
+if __name__ == "__main__":
+ trex = CTRexAdvClient('trex-dan', trex_daemon_port = 8383, verbose = True)
+ print trex.start_quick_trex(delay = 10,
+ dual = True,
+ d = 20,
+ interfaces = ["gig0/0/1", "gig0/0/2"],
+ ipv6 = False,
+ pcap_file="avl/http_browsing.pcap",
+ times=3)
+ print trex.stop_quick_trex()
+ print trex.get_running_stats()
+ print trex.clear_counters()
+ pass
+
+
diff --git a/scripts/automation/trex_control_plane/client/trex_client.py b/scripts/automation/trex_control_plane/client/trex_client.py
new file mode 100755
index 00000000..1f297538
--- /dev/null
+++ b/scripts/automation/trex_control_plane/client/trex_client.py
@@ -0,0 +1,1066 @@
+#!/router/bin/python
+
+import sys
+import os
+
+try:
+ # support import for Python 2
+ import outer_packages
+except ImportError:
+ # support import for Python 3
+ import client.outer_packages
+import jsonrpclib
+from jsonrpclib import ProtocolError, AppError
+from common.trex_status_e import TRexStatus
+from common.trex_exceptions import *
+from common.trex_exceptions import exception_handler
+from client_utils.general_utils import *
+from enum import Enum
+import socket
+import errno
+import time
+import re
+import copy
+import binascii
+from collections import deque
+from json import JSONDecoder
+from distutils.util import strtobool
+
+
+
+class CTRexClient(object):
+ """
+ This class defines the client side of the RESTfull interaction with T-Rex
+ """
+
+ def __init__(self, trex_host, max_history_size = 100, trex_daemon_port = 8090, trex_zmq_port = 4500, verbose = False):
+ """
+ Instatiate a T-Rex client object, and connecting it to listening deamon-server
+
+ :parameters:
+ trex_host : str
+ a string of the t-rex ip address or hostname.
+ max_history_size : int
+ a number to set the maximum history size of a single T-Rex run. Each sampling adds a new item to history.
+
+ default value : **100**
+ trex_daemon_port : int
+ the port number on which the trex-deamon server can be reached
+
+ default value: **8090**
+ trex_zmq_port : int
+ the port number on which trex's zmq module will interact with deamon server
+
+ default value: **4500**
+ verbose : bool
+ sets a verbose output on suported class method.
+
+ default value : **False**
+
+ :raises:
+ socket errors, in case server could not be reached.
+
+ """
+ self.trex_host = trex_host
+ self.trex_daemon_port = trex_daemon_port
+ self.trex_zmq_port = trex_zmq_port
+ self.seq = None
+ self.verbose = verbose
+ self.result_obj = CTRexResult(max_history_size)
+ self.decoder = JSONDecoder()
+ self.trex_server_path = "http://{hostname}:{port}/".format( hostname = trex_host, port = trex_daemon_port )
+ self.__verbose_print("Connecting to T-Rex @ {trex_path} ...".format( trex_path = self.trex_server_path ) )
+ self.history = jsonrpclib.history.History()
+ self.server = jsonrpclib.Server(self.trex_server_path, history = self.history)
+ self.check_server_connectivity()
+ self.__verbose_print("Connection established successfully!")
+ self._last_sample = time.time()
+ self.__default_user = get_current_user()
+
+
+ def add (self, x, y):
+ try:
+ return self.server.add(x,y)
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def start_trex (self, f, d, block_to_success = True, timeout = 30, user = None, **trex_cmd_options):
+ """
+ Request to start a T-Rex run on server.
+
+ :parameters:
+ f : str
+ a path (on server) for the injected traffic data (.yaml file)
+ d : int
+ the desired duration of the test. must be at least 30 seconds long.
+ block_to_success : bool
+ determine if this method blocks until T-Rex changes state from 'Starting' to either 'Idle' or 'Running'
+
+ default value : **True**
+ timeout : int
+ maximum time (in seconds) to wait in blocking state until T-Rex changes state from 'Starting' to either 'Idle' or 'Running'
+
+ default value: **30**
+ user : str
+ the identity of the the run issuer.
+ trex_cmd_options : key, val
+ sets desired T-Rex options using key=val syntax, separated by comma.
+ for keys with no value, state key=True
+
+ :return:
+ **True** on success
+
+ :raises:
+ + :exc:`ValueError`, in case 'd' parameter inserted with wrong value.
+ + :exc:`trex_exceptions.TRexError`, in case one of the trex_cmd_options raised an exception at server.
+ + :exc:`trex_exceptions.TRexInUseError`, in case T-Rex is already taken.
+ + :exc:`trex_exceptions.TRexRequestDenied`, in case T-Rex is reserved for another user than the one trying start T-Rex.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ user = user or self.__default_user
+ try:
+ d = int(d)
+ if d < 30: # specify a test should take at least 30 seconds long.
+ raise ValueError
+ except ValueError:
+ raise ValueError('d parameter must be integer, specifying how long T-Rex run, and must be larger than 30 secs.')
+
+ trex_cmd_options.update( {'f' : f, 'd' : d} )
+
+ self.result_obj.clear_results()
+ try:
+ issue_time = time.time()
+ retval = self.server.start_trex(trex_cmd_options, user, block_to_success, timeout)
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ if retval!=0:
+ self.seq = retval # update seq num only on successful submission
+ return True
+ else: # T-Rex is has been started by another user
+ raise TRexInUseError('T-Rex is already being used by another user or process. Try again once T-Rex is back in IDLE state.')
+
+ def stop_trex (self):
+ """
+ Request to stop a T-Rex run on server.
+
+ The request is only valid if the stop intitiator is the same client as the T-Rex run intitiator.
+
+ :parameters:
+ None
+
+ :return:
+ + **True** on successful termination
+ + **False** if request issued but T-Rex wasn't running.
+
+ :raises:
+ + :exc:`trex_exceptions.TRexRequestDenied`, in case T-Rex ir running but started by another user.
+ + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed T-Rex run (unexpected termination).
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ try:
+ return self.server.stop_trex(self.seq)
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def force_kill (self, confirm = True):
+ """
+ Force killing of running T-Rex process (if exists) on the server.
+
+ .. tip:: This method is a safety method and **overrides any running or reserved resources**, and as such isn't designed to be used on a regular basis.
+ Always consider using :func:`trex_client.CTRexClient.stop_trex` instead.
+
+ In the end of this method, T-Rex will return to IDLE state with no reservation.
+
+ :parameters:
+ confirm : bool
+ Prompt a user confirmation before continue terminating T-Rex session
+
+ :return:
+ + **True** on successful termination
+ + **False** otherwise.
+
+ :raises:
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ if confirm:
+ prompt = "WARNING: This will terminate active T-Rex session indiscriminately.\nAre you sure? "
+ sys.stdout.write('%s [y/n]\n' % prompt)
+ while True:
+ try:
+ if strtobool(user_input().lower()):
+ break
+ else:
+ return
+ except ValueError:
+ sys.stdout.write('Please respond with \'y\' or \'n\'.\n')
+ try:
+ return self.server.force_trex_kill()
+ except AppError as err:
+ # Silence any kind of application errors- by design
+ return False
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def wait_until_kickoff_finish(self, timeout = 40):
+ """
+ Block the client application until T-Rex changes state from 'Starting' to either 'Idle' or 'Running'
+
+ The request is only valid if the stop intitiator is the same client as the T-Rex run intitiator.
+
+ :parameters:
+ timeout : int
+ maximum time (in seconds) to wait in blocking state until T-Rex changes state from 'Starting' to either 'Idle' or 'Running'
+
+ :return:
+ + **True** on successful termination
+ + **False** if request issued but T-Rex wasn't running.
+
+ :raises:
+ + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed T-Rex run (unexpected termination).
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ .. note:: Exceptions are throws only when start_trex did not block in the first place, i.e. `block_to_success` parameter was set to `False`
+
+ """
+
+ try:
+ return self.server.wait_until_kickoff_finish(timeout)
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def is_running (self, dump_out = False):
+ """
+ Poll for T-Rex running status.
+
+ If T-Rex is running, a history item will be added into result_obj and processed.
+
+ .. tip:: This method is especially useful for iterating until T-Rex run is finished.
+
+ :parameters:
+ dump_out : dict
+ if passed, the pointer object is cleared and the latest dump stored in it.
+
+ :return:
+ + **True** if T-Rex is running.
+ + **False** if T-Rex is not running.
+
+ :raises:
+ + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed T-Rex run (unexpected termination).
+ + :exc:`TypeError`, in case JSON stream decoding error.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ try:
+ res = self.get_running_info()
+ if res == {}:
+ return False
+ if (dump_out != False) and (isinstance(dump_out, dict)): # save received dump to given 'dump_out' pointer
+ dump_out.clear()
+ dump_out.update(res)
+ return True
+ except TRexWarning as err:
+ if err.code == -12: # TRex is either still at 'Starting' state or in Idle state, however NO error occured
+ return False
+ except TRexException:
+ raise
+ except ProtocolError as err:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def get_trex_files_path (self):
+ """
+ Fetches the local path in which files are stored when pushed to t-rex server from client.
+
+ :parameters:
+ None
+
+ :return:
+ string representation of the desired path
+
+ .. note:: The returned path represents a path on the T-Rex server **local machine**
+
+ :raises:
+ ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ try:
+ return (self.server.get_files_path() + '/')
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def get_running_status (self):
+ """
+ Fetches the current T-Rex status.
+
+ If available, a verbose data will accompany the state itself.
+
+ :parameters:
+ None
+
+ :return:
+ dictionary with 'state' and 'verbose' keys.
+
+ :raises:
+ ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ try:
+ res = self.server.get_running_status()
+ res['state'] = TRexStatus(res['state'])
+ return res
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def get_running_info (self):
+ """
+ Performs single poll of T-Rex running data and process it into the result object (named `result_obj`).
+
+ .. tip:: This method will throw an exception if T-Rex isn't running. Always consider using :func:`trex_client.CTRexClient.is_running` which handles a single poll operation in safer manner.
+
+ :parameters:
+ None
+
+ :return:
+ dictionary containing the most updated data dump from T-Rex.
+
+ :raises:
+ + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed T-Rex run (unexpected termination).
+ + :exc:`TypeError`, in case JSON stream decoding error.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ if not self.is_query_relevance():
+ # if requested in timeframe smaller than the original sample rate, return the last known data without interacting with server
+ return self.result_obj.get_latest_dump()
+ else:
+ try:
+ latest_dump = self.decoder.decode( self.server.get_running_info() ) # latest dump is not a dict, but json string. decode it.
+ self.result_obj.update_result_data(latest_dump)
+ return latest_dump
+ except TypeError as inst:
+ raise TypeError('JSON-RPC data decoding failed. Check out incoming JSON stream.')
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def sample_until_condition (self, condition_func, time_between_samples = 5):
+ """
+ Automatically sets ongoing sampling of T-Rex data, with sampling rate described by time_between_samples.
+
+ On each fetched dump, the condition_func is applied on the result objects, and if returns True, the sampling will stop.
+
+ :parameters:
+ condition_func : function
+ function that operates on result_obj and checks if a condition has been met
+
+ .. note:: `condition_finc` is applied on `CTRexResult` object. Make sure to design a relevant method.
+ time_between_samples : int
+ determines the time between each sample of the server
+
+ default value : **5**
+
+ :return:
+ the first result object (see :class:`CTRexResult` for further details) of the T-Rex run on which the condition has been met.
+
+ :raises:
+ + :exc:`UserWarning`, in case the condition_func method condition hasn't been met
+ + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed T-Rex run (unexpected termination).
+ + :exc:`TypeError`, in case JSON stream decoding error.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+ + :exc:`Exception`, in case the condition_func suffered from any kind of exception
+
+ """
+ # make sure T-Rex is running. raise exceptions here if any
+ self.wait_until_kickoff_finish()
+ try:
+ while self.is_running():
+ results = self.get_result_obj()
+ if condition_func(results):
+ # if condition satisfied, stop T-Rex and return result object
+ self.stop_trex()
+ return results
+ time.sleep(time_between_samples)
+ except TRexWarning:
+ # means we're back to Idle state, and didn't meet our condition
+ raise UserWarning("T-Rex results condition wasn't met during T-Rex run.")
+ except Exception:
+ # this could come from provided method 'condition_func'
+ raise
+
+ def sample_to_run_finish (self, time_between_samples = 5):
+ """
+ Automatically sets automatically sampling of T-Rex data with sampling rate described by time_between_samples until T-Rex run finished.
+
+ :parameters:
+ time_between_samples : int
+ determines the time between each sample of the server
+
+ default value : **5**
+
+ :return:
+ the latest result object (see :class:`CTRexResult` for further details) with sampled data.
+
+ :raises:
+ + :exc:`UserWarning`, in case the condition_func method condition hasn't been met
+ + :exc:`trex_exceptions.TRexIncompleteRunError`, in case one of failed T-Rex run (unexpected termination).
+ + :exc:`TypeError`, in case JSON stream decoding error.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ self.wait_until_kickoff_finish()
+
+ try:
+ while self.is_running():
+ time.sleep(time_between_samples)
+ except TRexWarning:
+ pass
+ results = self.get_result_obj()
+ return results
+
+
+ def get_result_obj (self, copy_obj = True):
+ """
+ Returns the result object of the trex_client's instance.
+
+ By default, returns a **copy** of the objects (so that changes to the original object are masked).
+
+ :parameters:
+ copy_obj : bool
+ False means that a reference to the original (possibly changing) object are passed
+
+ defaul value : **True**
+
+ :return:
+ the latest result object (see :class:`CTRexResult` for further details) with sampled data.
+
+ """
+ if copy_obj:
+ return copy.deepcopy(self.result_obj)
+ else:
+ return self.result_obj
+
+ def is_reserved (self):
+ """
+ Checks if T-Rex is currently reserved to any user or not.
+
+ :parameters:
+ None
+
+ :return:
+ + **True** if T-Rex is reserved.
+ + **False** otherwise.
+
+ :raises:
+ ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ try:
+ return self.server.is_reserved()
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def reserve_trex (self, user = None):
+ """
+ Reserves the usage of T-Rex to a certain user.
+
+ When T-Rex is reserved, it can't be reserved.
+
+ :parameters:
+ user : str
+ a username of the desired owner of T-Rex
+
+ default: current logged user
+
+ :return:
+ **True** if reservation made successfully
+
+ :raises:
+ + :exc:`trex_exceptions.TRexRequestDenied`, in case T-Rex is reserved for another user than the one trying to make the reservation.
+ + :exc:`trex_exceptions.TRexInUseError`, in case T-Rex is currently running.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ username = user or self.__default_user
+ try:
+ return self.server.reserve_trex(user = username)
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def cancel_reservation (self, user = None):
+ """
+ Cancels a current reservation of T-Rex to a certain user.
+
+ When T-Rex is reserved, no other user can start new T-Rex runs.
+
+
+ :parameters:
+ user : str
+ a username of the desired owner of T-Rex
+
+ default: current logged user
+
+ :return:
+ + **True** if reservation canceled successfully,
+ + **False** if there was no reservation at all.
+
+ :raises:
+ + :exc:`trex_exceptions.TRexRequestDenied`, in case T-Rex is reserved for another user than the one trying to cancel the reservation.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+
+ username = user or self.__default_user
+ try:
+ return self.server.cancel_reservation(user = username)
+ except AppError as err:
+ self._handle_AppError_exception(err.args[0])
+ except ProtocolError:
+ raise
+ finally:
+ self.prompt_verbose_data()
+
+ def push_files (self, filepaths):
+ """
+ Pushes a file (or a list of files) to store locally on server.
+
+ :parameters:
+ filepaths : str or list
+ a path to a file to be pushed to server.
+ if a list of paths is passed, all of those will be pushed to server
+
+ :return:
+ + **True** if file(s) copied successfully.
+ + **False** otherwise.
+
+ :raises:
+ + :exc:`IOError`, in case specified file wasn't found or could not be accessed.
+ + ProtocolError, in case of error in JSON-RPC protocol.
+
+ """
+ paths_list = None
+ if isinstance(filepaths, str):
+ paths_list = [filepaths]
+ elif isinstance(filepaths, list):
+ paths_list = filepaths
+ else:
+ raise TypeError("filepaths argument must be of type str or list")
+
+ for filepath in paths_list:
+ try:
+ if not os.path.exists(filepath):
+ raise IOError(errno.ENOENT, "The requested `{fname}` file wasn't found. Operation aborted.".format(
+ fname = filepath) )
+ else:
+ filename = os.path.basename(filepath)
+ with open(filepath, 'rb') as f:
+ file_content = f.read()
+ self.server.push_file(filename, binascii.b2a_base64(file_content))
+ finally:
+ self.prompt_verbose_data()
+ return True
+
+ def is_query_relevance(self):
+ """
+ Checks if time between any two consecutive server queries (asking for live running data) passed.
+
+ .. note:: The allowed minimum time between each two consecutive samples is 0.5 seconds.
+
+ :parameters:
+ None
+
+ :return:
+ + **True** if more than 0.5 seconds has been past from last server query.
+ + **False** otherwise.
+
+ """
+ cur_time = time.time()
+ if cur_time-self._last_sample < 0.5:
+ return False
+ else:
+ self._last_sample = cur_time
+ return True
+
+ def call_server_mathod_safely (self, method_to_call):
+ try:
+ return method_to_call()
+ except socket.error as e:
+ if e.errno == errno.ECONNREFUSED:
+ raise SocketError(errno.ECONNREFUSED, "Connection from T-Rex server was refused. Please make sure the server is up.")
+
+ def check_server_connectivity (self):
+ """
+ Checks for server valid connectivity.
+ """
+ try:
+ socket.gethostbyname(self.trex_host)
+ return self.server.connectivity_check()
+ except socket.gaierror as e:
+ raise socket.gaierror(e.errno, "Could not resolve server hostname. Please make sure hostname entered correctly.")
+ except socket.error as e:
+ if e.errno == errno.ECONNREFUSED:
+ raise socket.error(errno.ECONNREFUSED, "Connection from T-Rex server was refused. Please make sure the server is up.")
+ finally:
+ self.prompt_verbose_data()
+
+ def prompt_verbose_data(self):
+ """
+ This method prompts any verbose data available, only if `verbose` option has been turned on.
+ """
+ if self.verbose:
+ print ('\n')
+ print ("(*) JSON-RPC request: "+ self.history.request)
+ print ("(*) JSON-RPC response: "+ self.history.response)
+
+ def __verbose_print(self, print_str):
+ """
+ This private method prints the `print_str` string only in case self.verbose flag is turned on.
+
+ :parameters:
+ print_str : str
+ a string to be printed
+
+ :returns:
+ None
+ """
+ if self.verbose:
+ print (print_str)
+
+
+
+ def _handle_AppError_exception(self, err):
+ """
+ This private method triggres the T-Rex dedicated exception generation in case a general ProtocolError has been raised.
+ """
+ # handle known exceptions based on known error codes.
+ # if error code is not known, raise ProtocolError
+ raise exception_handler.gen_exception(err)
+
+
+class CTRexResult(object):
+ """
+ A class containing all results received from T-Rex.
+
+ Ontop to containing the results, this class offers easier data access and extended results processing options
+ """
+ def __init__(self, max_history_size):
+ """
+ Instatiate a T-Rex result object
+
+ :parameters:
+ max_history_size : int
+ a number to set the maximum history size of a single T-Rex run. Each sampling adds a new item to history.
+
+ """
+ self._history = deque(maxlen = max_history_size)
+ self.clear_results()
+
+ def __repr__(self):
+ return ("Is valid history? {arg}\n".format( arg = self.is_valid_hist() ) +
+ "Done warmup? {arg}\n".format( arg = self.is_done_warmup() ) +
+ "Expected tx rate: {arg}\n".format( arg = self.get_expected_tx_rate() ) +
+ "Current tx rate: {arg}\n".format( arg = self.get_current_tx_rate() ) +
+ "Maximum latency: {arg}\n".format( arg = self.get_max_latency() ) +
+ "Average latency: {arg}\n".format( arg = self.get_avg_latency() ) +
+ "Average window latency: {arg}\n".format( arg = self.get_avg_window_latency() ) +
+ "Total drops: {arg}\n".format( arg = self.get_total_drops() ) +
+ "Drop rate: {arg}\n".format( arg = self.get_drop_rate() ) +
+ "History size so far: {arg}\n".format( arg = len(self._history) ) )
+
+ def get_expected_tx_rate (self):
+ """
+ Fetches the expected TX rate in various units representation
+
+ :parameters:
+ None
+
+ :return:
+ dictionary containing the expected TX rate, where the key is the measurement units, and the value is the measurement value.
+
+ """
+ return self._expected_tx_rate
+
+ def get_current_tx_rate (self):
+ """
+ Fetches the current TX rate in various units representation
+
+ :parameters:
+ None
+
+ :return:
+ dictionary containing the current TX rate, where the key is the measurement units, and the value is the measurement value.
+
+ """
+ return self._current_tx_rate
+
+ def get_max_latency (self):
+ """
+ Fetches the maximum latency measured on each of the interfaces
+
+ :parameters:
+ None
+
+ :return:
+ dictionary containing the maximum latency, where the key is the measurement interface (`c` indicates client), and the value is the measurement value.
+
+ """
+ return self._max_latency
+
+ def get_avg_latency (self):
+ """
+ Fetches the average latency measured on each of the interfaces from the start of T-Rex run
+
+ :parameters:
+ None
+
+ :return:
+ dictionary containing the average latency, where the key is the measurement interface (`c` indicates client), and the value is the measurement value.
+
+ The `all` key represents the average of all interfaces' average
+
+ """
+ return self._avg_latency
+
+ def get_avg_window_latency (self):
+ """
+ Fetches the average latency measured on each of the interfaces from all the sampled currently stored in window.
+
+ :parameters:
+ None
+
+ :return:
+ dictionary containing the average latency, where the key is the measurement interface (`c` indicates client), and the value is the measurement value.
+
+ The `all` key represents the average of all interfaces' average
+
+ """
+ return self._avg_window_latency
+
+ def get_total_drops (self):
+ """
+ Fetches the total number of drops identified from the moment T-Rex run began.
+
+ :parameters:
+ None
+
+ :return:
+ total drops count (as int)
+
+ """
+ return self._total_drops
+
+ def get_drop_rate (self):
+ """
+ Fetches the most recent drop rate in pkts/sec units.
+
+ :parameters:
+ None
+
+ :return:
+ current drop rate (as float)
+
+ """
+ return self._drop_rate
+
+ def is_valid_hist (self):
+ """
+ Checks if result obejct contains valid data.
+
+ :parameters:
+ None
+
+ :return:
+ + **True** if history is valid.
+ + **False** otherwise.
+
+ """
+ return self.valid
+
+ def set_valid_hist (self, valid_stat = True):
+ """
+ Sets result obejct validity status.
+
+ :parameters:
+ valid_stat : bool
+ defines the validity status
+
+ dafault value : **True**
+
+ :return:
+ None
+
+ """
+ self.valid = valid_stat
+
+ def is_done_warmup (self):
+ """
+ Checks if T-Rex latest results TX-rate indicates that T-Rex has reached its expected TX-rate.
+
+ :parameters:
+ None
+
+ :return:
+ + **True** if expected TX-rate has been reached.
+ + **False** otherwise.
+
+ """
+ return self._done_warmup
+
+ def get_last_value (self, tree_path_to_key, regex = None):
+ """
+ A dynamic getter from the latest sampled data item stored in the result object.
+
+ :parameters:
+ tree_path_to_key : str
+ defines a path to desired data.
+
+ .. tip:: | Use '.' to enter one level deeper in dictionary hierarchy.
+ | Use '[i]' to access the i'th indexed obejct of an array.
+
+ tree_path_to_key : regex
+ apply a regex to filter results out from a multiple results set.
+
+ Filter applies only on keys of dictionary type.
+
+ dafault value : **None**
+
+ :return:
+ + a list of values relevant to the specified path
+ + None if no results were fetched or the history isn't valid.
+
+ """
+ if not self.is_valid_hist():
+ return None
+ else:
+ return CTRexResult.__get_value_by_path(self._history[len(self._history)-1], tree_path_to_key, regex)
+
+ def get_value_list (self, tree_path_to_key, regex = None, filter_none = True):
+ """
+ A dynamic getter from all sampled data items stored in the result object.
+
+ :parameters:
+ tree_path_to_key : str
+ defines a path to desired data.
+
+ .. tip:: | Use '.' to enter one level deeper in dictionary hierarchy.
+ | Use '[i]' to access the i'th indexed object of an array.
+
+ tree_path_to_key : regex
+ apply a regex to filter results out from a multiple results set.
+
+ Filter applies only on keys of dictionary type.
+
+ dafault value : **None**
+
+ filter_none : bool
+ specify if None results should be filtered out or not.
+
+ dafault value : **True**
+
+ :return:
+ + a list of values relevant to the specified path. Each item on the list refers to a single server sample.
+ + None if no results were fetched or the history isn't valid.
+ """
+
+ if not self.is_valid_hist():
+ return None
+ else:
+ raw_list = list( map(lambda x: CTRexResult.__get_value_by_path(x, tree_path_to_key, regex), self._history) )
+ if filter_none:
+ return list (filter(lambda x: x!=None, raw_list) )
+ else:
+ return raw_list
+
+ def get_latest_dump(self):
+ """
+ A getter to the latest sampled data item stored in the result object.
+
+ :parameters:
+ None
+
+ :return:
+ + a dictionary of the latest data item
+ + an empty dictionary if history is empty.
+
+ """
+ history_size = len(self._history)
+ if history_size != 0:
+ return self._history[len(self._history) - 1]
+ else:
+ return {}
+
+ def update_result_data (self, latest_dump):
+ """
+ Integrates a `latest_dump` dictionary into the CTRexResult object.
+
+ :parameters:
+ latest_dump : dict
+ a dictionary with the items desired to be integrated into the object history and stats
+
+ :return:
+ None
+
+ """
+ # add latest dump to history
+ if latest_dump != {}:
+ self._history.append(latest_dump)
+ if not self.valid:
+ self.valid = True
+
+ # parse important fields and calculate averages and others
+ if self._expected_tx_rate is None:
+ # get the expected data only once since it doesn't change
+ self._expected_tx_rate = CTRexResult.__get_value_by_path(latest_dump, "trex-global.data", "m_tx_expected_\w+")
+
+ self._current_tx_rate = CTRexResult.__get_value_by_path(latest_dump, "trex-global.data", "m_tx_(?!expected_)\w+")
+ if not self._done_warmup and self._expected_tx_rate is not None:
+ # check for up to 2% change between expected and actual
+ if (self._current_tx_rate['m_tx_bps']/self._expected_tx_rate['m_tx_expected_bps'] > 0.98):
+ self._done_warmup = True
+
+ # handle latency data
+ latency_pre = "trex-latency"
+ self._max_latency = self.get_last_value("{latency}.data".format(latency = latency_pre), ".*max-")#None # TBC
+ # support old typo
+ if self._max_latency is None:
+ latency_pre = "trex-latecny"
+ self._max_latency = self.get_last_value("{latency}.data".format(latency = latency_pre), ".*max-")
+
+ self._avg_latency = self.get_last_value("{latency}.data".format(latency = latency_pre), "avg-")#None # TBC
+ self._avg_latency = CTRexResult.__avg_all_and_rename_keys(self._avg_latency)
+
+ avg_win_latency_list = self.get_value_list("{latency}.data".format(latency = latency_pre), "avg-")
+ self._avg_window_latency = CTRexResult.__calc_latency_win_stats(avg_win_latency_list)
+
+ tx_pkts = CTRexResult.__get_value_by_path(latest_dump, "trex-global.data.m_total_tx_pkts")
+ rx_pkts = CTRexResult.__get_value_by_path(latest_dump, "trex-global.data.m_total_rx_pkts")
+ if tx_pkts is not None and rx_pkts is not None:
+ self._total_drops = tx_pkts - rx_pkts
+ self._drop_rate = CTRexResult.__get_value_by_path(latest_dump, "trex-global.data.m_rx_drop_bps")
+
+ def clear_results (self):
+ """
+ Clears all results and sets the history's validity to `False`
+
+ :parameters:
+ None
+
+ :return:
+ None
+
+ """
+ self.valid = False
+ self._done_warmup = False
+ self._expected_tx_rate = None
+ self._current_tx_rate = None
+ self._max_latency = None
+ self._avg_latency = None
+ self._avg_window_latency = None
+ self._total_drops = None
+ self._drop_rate = None
+ self._history.clear()
+
+ @staticmethod
+ def __get_value_by_path (dct, tree_path, regex = None):
+ try:
+ for i, p in re.findall(r'(\d+)|([\w|-]+)', tree_path):
+ dct = dct[p or int(i)]
+ if regex is not None and isinstance(dct, dict):
+ res = {}
+ for key,val in dct.items():
+ match = re.match(regex, key)
+ if match:
+ res[key]=val
+ return res
+ else:
+ return dct
+ except (KeyError, TypeError):
+ return None
+
+ @staticmethod
+ def __calc_latency_win_stats (latency_win_list):
+ res = {'all' : None }
+ port_dict = {'all' : []}
+ list( map(lambda x: CTRexResult.__update_port_dict(x, port_dict), latency_win_list) )
+
+ # finally, calculate everages for each list
+ res['all'] = float("%.3f" % (sum(port_dict['all'])/float(len(port_dict['all']))) )
+ port_dict.pop('all')
+ for port, avg_list in port_dict.items():
+ res[port] = float("%.3f" % (sum(avg_list)/float(len(avg_list))) )
+
+ return res
+
+ @staticmethod
+ def __update_port_dict (src_avg_dict, dest_port_dict):
+ all_list = src_avg_dict.values()
+ dest_port_dict['all'].extend(all_list)
+ for key, val in src_avg_dict.items():
+ reg_res = re.match("avg-(\d+)", key)
+ if reg_res:
+ tmp_key = "port"+reg_res.group(1)
+ if tmp_key in dest_port_dict:
+ dest_port_dict[tmp_key].append(val)
+ else:
+ dest_port_dict[tmp_key] = [val]
+
+ @staticmethod
+ def __avg_all_and_rename_keys (src_dict):
+ res = {}
+ all_list = src_dict.values()
+ res['all'] = float("%.3f" % (sum(all_list)/float(len(all_list))) )
+ for key, val in src_dict.items():
+ reg_res = re.match("avg-(\d+)", key)
+ if reg_res:
+ tmp_key = "port"+reg_res.group(1)
+ res[tmp_key] = val # don't touch original fields values
+ return res
+
+
+
+if __name__ == "__main__":
+ pass
+
+
+
diff --git a/scripts/automation/trex_control_plane/client_utils/__init__.py b/scripts/automation/trex_control_plane/client_utils/__init__.py
new file mode 100755
index 00000000..c38c2cca
--- /dev/null
+++ b/scripts/automation/trex_control_plane/client_utils/__init__.py
@@ -0,0 +1 @@
+__all__ = ["general_utils", "trex_yaml_gen"]
diff --git a/scripts/automation/trex_control_plane/client_utils/general_utils.py b/scripts/automation/trex_control_plane/client_utils/general_utils.py
new file mode 100755
index 00000000..5544eabc
--- /dev/null
+++ b/scripts/automation/trex_control_plane/client_utils/general_utils.py
@@ -0,0 +1,57 @@
+#!/router/bin/python
+
+import sys,site
+import os
+
+try:
+ import pwd
+except ImportError:
+ import getpass
+ pwd = None
+
+using_python_3 = True if sys.version_info.major == 3 else False
+
+
+def user_input():
+ if using_python_3:
+ return input()
+ else:
+ # using python version 2
+ return raw_input()
+
+def get_current_user():
+ if pwd:
+ return pwd.getpwuid( os.geteuid() ).pw_name
+ else:
+ return getpass.getuser()
+
+def import_module_list_by_path (modules_list):
+ assert(isinstance(modules_list, list))
+ for full_path in modules_list:
+ site.addsitedir(full_path)
+
+def find_path_to_pardir (pardir, base_path = os.getcwd() ):
+ """
+ Finds the absolute path for some parent dir `pardir`, starting from base_path
+
+ The request is only valid if the stop intitiator is the same client as the T-Rex run intitiator.
+
+ :parameters:
+ pardir : str
+ name of an upper-level directory to which we want to find an absolute path for
+ base_path : str
+ a full (usually nested) path from which we want to find a parent folder.
+
+ default value : **current working dir**
+
+ :return:
+ string representation of the full path to
+
+ """
+ components = base_path.split(os.sep)
+ return str.join(os.sep, components[:components.index(pardir)+1])
+
+
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/trex_control_plane/client_utils/trex_yaml_gen.py b/scripts/automation/trex_control_plane/client_utils/trex_yaml_gen.py
new file mode 100755
index 00000000..755674ea
--- /dev/null
+++ b/scripts/automation/trex_control_plane/client_utils/trex_yaml_gen.py
@@ -0,0 +1,212 @@
+#!/router/bin/python
+
+import pprint
+import yaml
+import os
+# import bisect
+
+class CTRexYaml(object):
+ """
+ This class functions as a YAML generator according to T-Rex YAML format.
+
+ CTRexYaml is compatible with both Python 2 and Python 3.
+ """
+ YAML_TEMPLATE = [{'cap_info': [],
+ 'duration': 10.0,
+ 'generator': {'clients_end': '16.0.1.255',
+ 'clients_per_gb': 201,
+ 'clients_start': '16.0.0.1',
+ 'distribution': 'seq',
+ 'dual_port_mask': '1.0.0.0',
+ 'min_clients': 101,
+ 'servers_end': '48.0.0.255',
+ 'servers_start': '48.0.0.1',
+ 'tcp_aging': 1,
+ 'udp_aging': 1},
+ 'mac' : [0x00,0x00,0x00,0x01,0x00,0x00]}]
+ PCAP_TEMPLATE = {'cps': 1.0,
+ 'ipg': 10000,
+ 'name': '',
+ 'rtt': 10000,
+ 'w': 1}
+
+ def __init__ (self, trex_files_path):
+ """
+ The initialization of this class creates a CTRexYaml object with **empty** 'cap-info', and with default client-server configuration.
+
+ Use class methods to add and assign pcap files and export the data to a YAML file.
+
+ :parameters:
+ trex_files_path : str
+ a path (on T-Rex server side) for the pcap files using which T-Rex can access it.
+
+ """
+ self.yaml_obj = list(CTRexYaml.YAML_TEMPLATE)
+ self.empty_cap = True
+ self.file_list = []
+ self.yaml_dumped = False
+ self.trex_files_path = trex_files_path
+
+ def add_pcap_file (self, local_pcap_path):
+ """
+ Adds a .pcap file with recorded traffic to the yaml object by linking the file with 'cap-info' template key fields.
+
+ :parameters:
+ local_pcap_path : str
+ a path (on client side) for the pcap file to be added.
+
+ :return:
+ + The index of the inserted item (as int) if item added successfully
+ + -1 if pcap file already exists in 'cap_info'.
+
+ """
+ new_pcap = dict(CTRexYaml.PCAP_TEMPLATE)
+ new_pcap['name'] = self.trex_files_path + os.path.basename(local_pcap_path)
+ if self.get_pcap_idx(new_pcap['name']) != -1:
+ # pcap already exists in 'cap_info'
+ return -1
+ else:
+ self.yaml_obj[0]['cap_info'].append(new_pcap)
+ if self.empty_cap:
+ self.empty_cap = False
+ self.file_list.append(local_pcap_path)
+ return ( len(self.yaml_obj[0]['cap_info']) - 1)
+
+
+ def get_pcap_idx (self, pcap_name):
+ """
+ Checks if a certain .pcap file has been added into the yaml object.
+
+ :parameters:
+ pcap_name : str
+ the name of the pcap file to be searched
+
+ :return:
+ + The index of the pcap file (as int) if exists
+ + -1 if not exists.
+
+ """
+ comp_pcap = pcap_name if pcap_name.startswith(self.trex_files_path) else (self.trex_files_path + pcap_name)
+ for idx, pcap in enumerate(self.yaml_obj[0]['cap_info']):
+ print (pcap['name'] == comp_pcap)
+ if pcap['name'] == comp_pcap:
+ return idx
+ # pcap file wasn't found
+ return -1
+
+ def dump_as_python_obj (self):
+ """
+ dumps with nice indentation the pythonic format (dictionaries and lists) of the currently built yaml object.
+
+ :parameters:
+ None
+
+ :return:
+ None
+
+ """
+ pprint.pprint(self.yaml_obj)
+
+ def dump(self):
+ """
+ dumps with nice indentation the YAML format of the currently built yaml object.
+
+ :parameters:
+ None
+
+ :reaturn:
+ None
+
+ """
+ print (yaml.safe_dump(self.yaml_obj, default_flow_style = False))
+
+ def to_yaml(self, filename):
+ """
+ Exports to YAML file the built configuration into an actual YAML file.
+
+ :parameters:
+ filename : str
+ a path (on client side, including filename) to store the generated yaml file.
+
+ :return:
+ None
+
+ :raises:
+ + :exc:`ValueError`, in case no pcap files has been added to the object.
+ + :exc:`EnvironmentError`, in case of any IO error of writing to the files or OSError when trying to open it for writing.
+
+ """
+ if self.empty_cap:
+ raise ValueError("No .pcap file has been assigned to yaml object. Must add at least one")
+ else:
+ try:
+ with open(filename, 'w') as yaml_file:
+ yaml_file.write( yaml.safe_dump(self.yaml_obj, default_flow_style = False) )
+ self.yaml_dumped = True
+ self.file_list.append(filename)
+ except EnvironmentError as inst:
+ raise
+
+ def set_cap_info_param (self, param, value, seq):
+ """
+ Set cap-info parameters' value of a specific pcap file.
+
+ :parameters:
+ param : str
+ the name of the parameters to be set.
+ value : int/float
+ the desired value to be set to `param` key.
+ seq : int
+ an index to the relevant caps array to be changed (index supplied when adding new pcap file, see :func:`add_pcap_file`).
+
+ :return:
+ **True** on success
+
+ :raises:
+ :exc:`IndexError`, in case an out-of range index was given.
+
+ """
+ try:
+ self.yaml_obj[0]['cap_info'][seq][param] = value
+
+ return True
+ except IndexError:
+ return False
+
+ def set_generator_param (self, param, value):
+ """
+ Set generator parameters' value of the yaml object.
+
+ :parameters:
+ param : str
+ the name of the parameters to be set.
+ value : int/float/str
+ the desired value to be set to `param` key.
+
+ :return:
+ None
+
+ """
+ self.yaml_obj[0]['generator'][param] = value
+
+ def get_file_list(self):
+ """
+ Returns a list of all files related to the YAML object, including the YAML filename itself.
+
+ .. tip:: This method is especially useful for listing all the files that should be pushed to T-Rex server as part of the same yaml selection.
+
+ :parameters:
+ None
+
+ :return:
+ a list of filepaths, each is a local client-machine file path.
+
+ """
+ if not self.yaml_dumped:
+ print ("WARNING: .yaml file wasn't dumped yet. Files list contains only .pcap files")
+ return self.file_list
+
+
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/trex_control_plane/common/__init__.py b/scripts/automation/trex_control_plane/common/__init__.py
new file mode 100755
index 00000000..5a1da046
--- /dev/null
+++ b/scripts/automation/trex_control_plane/common/__init__.py
@@ -0,0 +1 @@
+__all__ = ["trex_status_e", "trex_exceptions"]
diff --git a/scripts/automation/trex_control_plane/common/trex_exceptions.py b/scripts/automation/trex_control_plane/common/trex_exceptions.py
new file mode 100755
index 00000000..1353fd00
--- /dev/null
+++ b/scripts/automation/trex_control_plane/common/trex_exceptions.py
@@ -0,0 +1,140 @@
+#!/router/bin/python
+
+#from rpc_exceptions import RPCExceptionHandler, WrappedRPCError
+
+from jsonrpclib import Fault, ProtocolError, AppError
+
+class RPCError(Exception):
+ """
+ This is the general RPC error exception class from which :exc:`trex_exceptions.TRexException` inherits.
+
+ Every exception in this class has as error format according to JSON-RPC convention convention: code, message and data.
+
+ """
+ def __init__(self, code, message, remote_data = None):
+ self.code = code
+ self.msg = message or self._default_message
+ self.data = remote_data
+ self.args = (code, self.msg, remote_data)
+
+ def __str__(self):
+ return self.__repr__()
+ def __repr__(self):
+ if self.args[2] is not None:
+ return u"[errcode:%r] %r. Extended data: %r" % (self.args[0], self.args[1], self.args[2])
+ else:
+ return u"[errcode:%r] %r" % (self.args[0], self.args[1])
+
+class TRexException(RPCError):
+ """
+ This is the most general T-Rex exception.
+
+ All exceptions inherits from this class has an error code and a default message which describes the most common use case of the error.
+
+ This exception isn't used by default and will only when an unrelated to ProtocolError will occur, and it can't be resolved to any of the deriviate exceptions.
+
+ """
+ code = -10
+ _default_message = 'T-Rex encountered an unexpected error. please contact T-Rex dev team.'
+ # api_name = 'TRex'
+
+class TRexError(TRexException):
+ """
+ This is the most general T-Rex exception.
+
+ This exception isn't used by default and will only when an unrelated to ProtocolError will occur, and it can't be resolved to any of the deriviate exceptions.
+ """
+ code = -11
+ _default_message = 'T-Rex run failed due to wrong input parameters, or due to reachability issues.'
+
+class TRexWarning(TRexException):
+ """ Indicates a warning from T-Rex server. When this exception raises it normally used to indicate required data isn't ready yet """
+ code = -12
+ _default_message = 'T-Rex is starting (data is not available yet).'
+
+class TRexRequestDenied(TRexException):
+ """ Indicates the desired reques was denied by the server """
+ code = -33
+ _default_message = 'T-Rex desired request denied because the requested resource is already taken. Try again once T-Rex is back in IDLE state.'
+
+class TRexInUseError(TRexException):
+ """
+ Indicates that T-Rex is currently in use
+
+ """
+ code = -13
+ _default_message = 'T-Rex is already being used by another user or process. Try again once T-Rex is back in IDLE state.'
+
+class TRexRunFailedError(TRexException):
+ """ Indicates that T-Rex has failed due to some reason. This Exception is used when T-Rex process itself terminates due to unknown reason """
+ code = -14
+ _default_message = ''
+
+class TRexIncompleteRunError(TRexException):
+ """
+ Indicates that T-Rex has failed due to some reason.
+ This Exception is used when T-Rex process itself terminated with error fault or it has been terminated by an external intervention in the OS.
+
+ """
+ code = -15
+ _default_message = 'T-Rex run was terminated unexpectedly by outer process or by the hosting OS'
+
+EXCEPTIONS = [TRexException, TRexError, TRexWarning, TRexInUseError, TRexRequestDenied, TRexRunFailedError, TRexIncompleteRunError]
+
+class CExceptionHandler(object):
+ """
+ CExceptionHandler is responsible for generating T-Rex API related exceptions in client side.
+ """
+ def __init__(self, exceptions):
+ """
+ Instatiate a CExceptionHandler object
+
+ :parameters:
+
+ exceptions : list
+ a list of all T-Rex acceptable exception objects.
+
+ default list:
+ - :exc:`trex_exceptions.TRexException`
+ - :exc:`trex_exceptions.TRexError`
+ - :exc:`trex_exceptions.TRexWarning`
+ - :exc:`trex_exceptions.TRexInUseError`
+ - :exc:`trex_exceptions.TRexRequestDenied`
+ - :exc:`trex_exceptions.TRexRunFailedError`
+ - :exc:`trex_exceptions.TRexIncompleteRunError`
+
+ """
+ if isinstance(exceptions, type):
+ exceptions = [ exceptions, ]
+ self.exceptions = exceptions
+ self.exceptions_dict = dict((e.code, e) for e in self.exceptions)
+
+ def gen_exception (self, err):
+ """
+ Generates an exception based on a general ProtocolError exception object `err`.
+
+ When T-Rex is reserved, no other user can start new T-Rex runs.
+
+
+ :parameters:
+
+ err : exception
+ a ProtocolError exception raised by :class:`trex_client.CTRexClient` class
+
+ :return:
+ A T-Rex exception from the exception list defined in class creation.
+
+ If such exception wasn't found, returns a TRexException exception
+
+ """
+ code, message, data = err
+ try:
+ exp = self.exceptions_dict[code]
+ return exp(exp.code, message, data)
+ except KeyError:
+ # revert to TRexException when unknown error application raised
+ return TRexException(err)
+
+
+exception_handler = CExceptionHandler( EXCEPTIONS )
+
diff --git a/scripts/automation/trex_control_plane/common/trex_status_e.py b/scripts/automation/trex_control_plane/common/trex_status_e.py
new file mode 100755
index 00000000..34db9b39
--- /dev/null
+++ b/scripts/automation/trex_control_plane/common/trex_status_e.py
@@ -0,0 +1,8 @@
+#!/router/bin/python
+
+# import outer_packages
+from enum import Enum
+
+
+# define the states in which a T-Rex can hold during its lifetime
+TRexStatus = Enum('TRexStatus', 'Idle Starting Running')
diff --git a/scripts/automation/trex_control_plane/dirtree_no_files.txt b/scripts/automation/trex_control_plane/dirtree_no_files.txt
new file mode 100755
index 00000000..b87c4167
--- /dev/null
+++ b/scripts/automation/trex_control_plane/dirtree_no_files.txt
@@ -0,0 +1,11 @@
+trex_control_plane/
+|-- Client
+|-- Server
+|-- common
+`-- python_lib
+ |-- enum34-1.0.4
+ |-- jsonrpclib-0.1.3
+ |-- lockfile-0.10.2
+ |-- python-daemon-2.0.5
+ `-- zmq
+
diff --git a/scripts/automation/trex_control_plane/dirtree_with_files.txt b/scripts/automation/trex_control_plane/dirtree_with_files.txt
new file mode 100755
index 00000000..5ce7cdfc
--- /dev/null
+++ b/scripts/automation/trex_control_plane/dirtree_with_files.txt
@@ -0,0 +1,31 @@
+trex_control_plane/
+|-- Client
+| |-- __init__.py
+| |-- outer_packages.py
+| |-- python_lib
+| `-- trex_client.py
+|-- Server
+| |-- CCustomLogger.py
+| |-- outer_packages.py
+| |-- trex_daemon_server
+| |-- trex_daemon_server.py
+| |-- trex_launch_thread.py
+| |-- trex_server.py
+| `-- zmq_monitor_thread.py
+|-- __init__.py
+|-- common
+| |-- __init__.py
+| |-- __init__.pyc
+| |-- trex_status_e.py
+| `-- trex_status_e.pyc
+|-- dirtree_no_files.txt
+|-- dirtree_with_files.txt
+`-- python_lib
+ |-- __init__.py
+ |-- enum34-1.0.4
+ |-- jsonrpclib-0.1.3
+ |-- lockfile-0.10.2
+ |-- python-daemon-2.0.5
+ |-- zmq
+ `-- zmq_fedora.tar.gz
+
diff --git a/scripts/automation/trex_control_plane/doc/Makefile b/scripts/automation/trex_control_plane/doc/Makefile
new file mode 100755
index 00000000..37a28d0f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/Makefile
@@ -0,0 +1,192 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+BUILDDIR = _build
+
+# User-friendly check for sphinx-build
+ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
+$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
+endif
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " singlehtml to make a single large HTML file"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " applehelp to make an Apple Help Book"
+ @echo " devhelp to make HTML files and a Devhelp project"
+ @echo " epub to make an epub"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " latexpdf to make LaTeX files and run them through pdflatex"
+ @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
+ @echo " text to make text files"
+ @echo " man to make manual pages"
+ @echo " texinfo to make Texinfo files"
+ @echo " info to make Texinfo files and run them through makeinfo"
+ @echo " gettext to make PO message catalogs"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " xml to make Docutils-native XML files"
+ @echo " pseudoxml to make pseudoxml-XML files for display purposes"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+ @echo " coverage to run coverage check of the documentation (if enabled)"
+
+clean:
+ rm -rf $(BUILDDIR)/*
+
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+ $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+ @echo
+ @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/T-RexProjectControlPlain.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/T-RexProjectControlPlain.qhc"
+
+applehelp:
+ $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
+ @echo
+ @echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
+ @echo "N.B. You won't be able to view it unless you put it in" \
+ "~/Library/Documentation/Help or install it in your application" \
+ "bundle."
+
+devhelp:
+ $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+ @echo
+ @echo "Build finished."
+ @echo "To view the help file:"
+ @echo "# mkdir -p $$HOME/.local/share/devhelp/T-RexProjectControlPlain"
+ @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/T-RexProjectControlPlain"
+ @echo "# devhelp"
+
+epub:
+ $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+ @echo
+ @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make' in that directory to run these through (pdf)latex" \
+ "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through pdflatex..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+latexpdfja:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through platex and dvipdfmx..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+ @echo
+ @echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+ $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+ @echo
+ @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo
+ @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+ @echo "Run \`make' in that directory to run these through makeinfo" \
+ "(use \`make info' here to do that automatically)."
+
+info:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo "Running Texinfo files through makeinfo..."
+ make -C $(BUILDDIR)/texinfo info
+ @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+ $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+ @echo
+ @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
+
+coverage:
+ $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
+ @echo "Testing of coverage in the sources finished, look at the " \
+ "results in $(BUILDDIR)/coverage/python.txt."
+
+xml:
+ $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
+ @echo
+ @echo "Build finished. The XML files are in $(BUILDDIR)/xml."
+
+pseudoxml:
+ $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
+ @echo
+ @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
diff --git a/scripts/automation/trex_control_plane/doc/_static/no_scrollbars.css b/scripts/automation/trex_control_plane/doc/_static/no_scrollbars.css
new file mode 100755
index 00000000..f86e823a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/_static/no_scrollbars.css
@@ -0,0 +1,10 @@
+/* override table width restrictions */
+.wy-table-responsive table td, .wy-table-responsive table th {
+ /* !important prevents the common CSS stylesheets from
+ overriding this as on RTD they are loaded after this stylesheet */
+ white-space: normal !important;
+}
+
+.wy-table-responsive {
+ overflow: visible !important;
+} \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/about_trex.rst b/scripts/automation/trex_control_plane/doc/about_trex.rst
new file mode 100755
index 00000000..97cad97d
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/about_trex.rst
@@ -0,0 +1,16 @@
+===================
+About T-Rex project
+===================
+
+Full project's official site
+----------------------------
+
+To learn all about T-Rex project, visit Cisco's internal `official site <http://csi-wiki-01:8080/display/bpsim/Home>`_
+
+Even more
+---------
+
+.. toctree::
+ :maxdepth: 2
+
+ authors \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/api/client_code.rst b/scripts/automation/trex_control_plane/doc/api/client_code.rst
new file mode 100755
index 00000000..0cda3451
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/api/client_code.rst
@@ -0,0 +1,17 @@
+
+trex_client Module documentation
+================================
+
+
+CTRexClient class
+-----------------
+
+.. autoclass:: trex_client.CTRexClient
+ :members:
+ :member-order: alphabetical
+
+CTRexResult class
+-----------------
+
+.. autoclass:: trex_client.CTRexResult
+ :members:
diff --git a/scripts/automation/trex_control_plane/doc/api/exceptions.rst b/scripts/automation/trex_control_plane/doc/api/exceptions.rst
new file mode 100755
index 00000000..d9df6484
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/api/exceptions.rst
@@ -0,0 +1,7 @@
+
+
+trex_exceptions Exceptions module
+=================================
+
+.. automodule:: trex_exceptions
+ :members:
diff --git a/scripts/automation/trex_control_plane/doc/api/index.rst b/scripts/automation/trex_control_plane/doc/api/index.rst
new file mode 100755
index 00000000..8233a634
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/api/index.rst
@@ -0,0 +1,19 @@
+
+API Reference
+=============
+The T-Rex API reference section is currently a work in progress.
+
+**T-Rex Modules**
+
+.. toctree::
+ :maxdepth: 4
+
+ client_code
+ exceptions
+
+**T-Rex JSON Template**
+
+.. toctree::
+ :maxdepth: 4
+
+ json_fields
diff --git a/scripts/automation/trex_control_plane/doc/api/json_fields.rst b/scripts/automation/trex_control_plane/doc/api/json_fields.rst
new file mode 100755
index 00000000..b1a2af7c
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/api/json_fields.rst
@@ -0,0 +1,233 @@
+
+T-Rex JSON Template
+===================
+
+Whenever T-Rex is publishing live data, it uses JSON notation to describe the data-object.
+
+Each client may parse it diffrently, however this page will describe the values meaning when published by T-Rex server.
+
+
+Main Fields
+-----------
+
+Each T-Rex server-published JSON object contains data divided to main fields under which the actual data lays.
+
+These main fields are:
+
++-----------------------------+----------------------------------------------------+---------------------------+
+| Main field | Contains | Comments |
++=============================+====================================================+===========================+
+| :ref:`trex-global-field` | Must-have data on T-Rex run, | |
+| | mainly regarding Tx/Rx and packet drops | |
++-----------------------------+----------------------------------------------------+---------------------------+
+| :ref:`tx-gen-field` | Data indicate the quality of the transmit process. | |
+| | In case histogram is zero it means that all packets| |
+| | were injected in the right time. | |
++-----------------------------+----------------------------------------------------+---------------------------+
+| :ref:`trex-latecny-field` | Latency reports, containing latency data on | - Generated when latency |
+| | generated data and on response traffic | test is enabled (``l`` |
+| | | param) |
+| | | - *typo* on field key: |
++-----------------------------+----------------------------------------------------+ will be fixed on next |
+| :ref:`trex-latecny-v2-field`| Extended latency information | release |
++-----------------------------+----------------------------------------------------+---------------------------+
+
+
+Each of these fields contains keys for field general data (such as its name) and its actual data, which is always stored under the **"data"** key.
+
+For example, in order to access some trex-global data, the access path would look like::
+
+ AllData -> trex-global -> data -> desired_info
+
+
+
+
+Detailed explanation
+--------------------
+
+.. _trex-global-field:
+
+trex-global field
+~~~~~~~~~~~~~~~~~
+
+
++--------------------------------+-------+-----------------------------------------------------------+
+| Sub-key | Type | Meaning |
++================================+=======+===========================================================+
+| m_cpu_util | float | CPU utilization (0-100) |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_platform_factor | float | multiplier factor |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_tx_bps | float | total tx bit per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_rx_bps | float | total rx bit per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_tx_pps | float | total tx packet per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_tx_cps | float | total tx connection per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_tx_expected_cps | float | expected tx connection per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_tx_expected_pps | float | expected tx packet per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_tx_expected_bps | float | expected tx bit per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_rx_drop_bps | float | drop rate in bit per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_active_flows | float | active trex flows |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_open_flows | float | open trex flows from startup (monotonically incrementing) |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_tx_pkts | int | total tx in packets |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_rx_pkts | int | total rx in packets |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_tx_bytes | int | total tx in bytes |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_rx_bytes | int | total rx in bytes |
++--------------------------------+-------+-----------------------------------------------------------+
+| opackets-# | int | output packets (per interface) |
++--------------------------------+-------+-----------------------------------------------------------+
+| obytes-# | int | output bytes (per interface) |
++--------------------------------+-------+-----------------------------------------------------------+
+| ipackets-# | int | input packet (per interface) |
++--------------------------------+-------+-----------------------------------------------------------+
+| ibytes-# | int | input bytes (per interface) |
++--------------------------------+-------+-----------------------------------------------------------+
+| ierrors-# | int | input errors (per interface) |
++--------------------------------+-------+-----------------------------------------------------------+
+| oerrors-# | int | input errors (per interface) |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_tx_bps-# | float | total transmitted data in bit per second |
++--------------------------------+-------+-----------------------------------------------------------+
+| unknown | int | |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_nat_learn_error [#f1]_ | int | |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_nat_active [#f2]_ | int | |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_nat_no_fid [#f2]_ | int | |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_nat_time_out [#f2]_ | int | |
++--------------------------------+-------+-----------------------------------------------------------+
+| m_total_nat_open [#f2]_ | int | |
++--------------------------------+-------+-----------------------------------------------------------+
+
+
+.. _tx-gen-field:
+
+tx-gen field
+~~~~~~~~~~~~~~
+
++-------------------+-------+-----------------------------------------------------------+
+| Sub-key | Type | Meaning |
++===================+=======+===========================================================+
+| realtime-hist | dict | histogram of transmission. See extended information about |
+| | | histogram object under :ref:`histogram-object-fields`. |
+| | | The attribute analyzed is time packet has been sent |
+| | | before/after it was intended to be |
++-------------------+-------+-----------------------------------------------------------+
+| unknown | int | |
++-------------------+-------+-----------------------------------------------------------+
+
+.. _trex-latecny-field:
+
+trex-latecny field
+~~~~~~~~~~~~~~~~~~
+
++---------+-------+---------------------------------------------------------+
+| Sub-key | Type | Meaning |
++=========+=======+=========================================================+
+| avg-# | float | average latency in usec (per interface) |
++---------+-------+---------------------------------------------------------+
+| max-# | float | max latency in usec from the test start (per interface) |
++---------+-------+---------------------------------------------------------+
+| c-max-# | float | max in the last 1 sec window (per interface) |
++---------+-------+---------------------------------------------------------+
+| error-# | float | errors in latency packets (per interface) |
++---------+-------+---------------------------------------------------------+
+| unknown | int | |
++---------+-------+---------------------------------------------------------+
+
+.. _trex-latecny-v2-field:
+
+trex-latecny-v2 field
+~~~~~~~~~~~~~~~~~~~~~
+
++--------------------------------------+-------+--------------------------------------+
+| Sub-key | Type | Meaning |
++======================================+=======+======================================+
+| cpu_util | float | rx thread cpu % (this is not trex DP |
+| | | threads cpu%%) |
++--------------------------------------+-------+--------------------------------------+
+| port-# | | Containing per interface |
+| | dict | information. See extended |
+| | | information under ``port-# -> |
+| | | key_name -> sub_key`` |
++--------------------------------------+-------+--------------------------------------+
+| port-#->hist | dict | histogram of latency. See extended |
+| | | information about histogram object |
+| | | under :ref:`histogram-object-fields`.|
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats | | Containing per interface |
+| | dict | information. See extended |
+| | | information under ``port-# -> |
+| | | key_name -> sub_key`` |
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats->m_tx_pkt_ok | int | total of try sent packets |
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats->m_pkt_ok | int | total of packets sent from hardware |
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats->m_no_magic | int | rx error with no magic |
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats->m_no_id | int | rx errors with no id |
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats->m_seq_error | int | error in seq number |
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats->m_length_error | int | |
++--------------------------------------+-------+--------------------------------------+
+| port-#->stats->m_rx_check | int | packets tested in rx |
++--------------------------------------+-------+--------------------------------------+
+| unknown | int | |
++--------------------------------------+-------+--------------------------------------+
+
+
+
+.. _histogram-object-fields:
+
+Histogram object fields
+~~~~~~~~~~~~~~~~~~~~~~~
+
+The histogram object is being used in number of place throughout the JSON object.
+The following section describes its fields in detail.
+
+
++-----------+-------+-----------------------------------------------------------------------------------+
+| Sub-key | Type | Meaning |
++===========+=======+===================================================================================+
+| min_usec | int | min attribute value in usec. pkt with latency less than this value is not counted |
++-----------+-------+-----------------------------------------------------------------------------------+
+| max_usec | int | max attribute value in usec |
++-----------+-------+-----------------------------------------------------------------------------------+
+| high_cnt | int | how many packets on which its attribute > min_usec |
++-----------+-------+-----------------------------------------------------------------------------------+
+| cnt | int | total packets from test startup |
++-----------+-------+-----------------------------------------------------------------------------------+
+| s_avg | float | average value from test startup |
++-----------+-------+-----------------------------------------------------------------------------------+
+| histogram | | histogram of relevant object by the following keys: |
+| | array | - key: value in usec |
+| | | - val: number of packets |
++-----------+-------+-----------------------------------------------------------------------------------+
+
+
+Access Examples
+---------------
+
+
+
+.. rubric:: Footnotes
+
+.. [#f1] Available only in NAT and NAT learning operation (``learn`` and ``learn-verify`` flags)
+
+.. [#f2] Available only in NAT operation (``learn`` flag) \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/authors.rst b/scripts/automation/trex_control_plane/doc/authors.rst
new file mode 100755
index 00000000..3b85f020
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/authors.rst
@@ -0,0 +1,12 @@
+=======
+Authors
+=======
+
+T-Rex is developed in Cisco Systems Inc. as the next generation traffic generator.
+
+T-Rex core-team developers are:
+
+ - Hanoch Haim
+ - Dave Johnson
+ - Wenxian Li
+ - Dan Klein \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/client_utils.rst b/scripts/automation/trex_control_plane/doc/client_utils.rst
new file mode 100755
index 00000000..224dfe19
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/client_utils.rst
@@ -0,0 +1,14 @@
+
+Client Utilities
+================
+
+T-Rex YAML generator
+--------------------
+
+.. automodule:: trex_yaml_gen
+ :members:
+
+General Utilities
+-----------------
+.. automodule:: general_utils
+ :members: \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/conf.py b/scripts/automation/trex_control_plane/doc/conf.py
new file mode 100755
index 00000000..fb9ea83c
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/conf.py
@@ -0,0 +1,303 @@
+# -*- coding: utf-8 -*-
+#
+# T-Rex Control Plain documentation build configuration file, created by
+# sphinx-quickstart on Tue Jun 2 07:48:10 2015.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys
+import os
+import shlex
+
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.insert(0, os.path.abspath('../client'))
+sys.path.insert(0, os.path.abspath('../client_utils'))
+sys.path.insert(0, os.path.abspath('../examples'))
+sys.path.insert(0, os.path.abspath('../common'))
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+ 'sphinx.ext.autodoc',
+ 'sphinx.ext.todo',
+ 'sphinx.ext.viewcode',
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+# source_suffix = ['.rst', '.md']
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'T-Rex Control Plain'
+copyright = u'2015, Cisco Systems Inc.'
+author = u'Dan Klein for Cisco Systems Inc.'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = '1.7'
+# The full version, including alpha/beta/rc tags.
+release = '1.7.1'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+#keep_warnings = False
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = True
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = 'sphinx_rtd_theme'
+html_theme_options = {
+# "rightsidebar": "true"
+ }
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Language to be used for generating the HTML full-text search index.
+# Sphinx supports the following languages:
+# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
+# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
+#html_search_language = 'en'
+
+# A dictionary with options for the search language support, empty by default.
+# Now only 'ja' uses this config value
+#html_search_options = {'type': 'default'}
+
+# The name of a javascript file (relative to the configuration directory) that
+# implements a search results scorer. If empty, the default will be used.
+#html_search_scorer = 'scorer.js'
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'T-RexControlPlaindoc'
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+
+# Latex figure (float) alignment
+#'figure_align': 'htbp',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ (master_doc, 'T-RexControlPlain.tex', u'T-Rex Control Plain Documentation',
+ u'Dan Klein for Cisco Systems Inc', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ (master_doc, 't-rexcontrolplain', u'T-Rex Control Plain Documentation',
+ [author], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ (master_doc, 'T-RexControlPlain', u'T-Rex Control Plain Documentation',
+ author, 'T-RexControlPlain', 'One line description of project.',
+ 'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#texinfo_no_detailmenu = False
+
+
+# show documentation for both __init__ methods and class methods
+autoclass_content = "both"
+
+# A workaround for the responsive tables always having annoying scrollbars.
+def setup(app):
+ app.add_stylesheet("no_scrollbars.css") \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/docs_utilities.py b/scripts/automation/trex_control_plane/doc/docs_utilities.py
new file mode 100755
index 00000000..e80d765f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/docs_utilities.py
@@ -0,0 +1,37 @@
+#!/router/bin/python
+
+from texttable import Texttable
+import yaml
+
+
+def handle_data_items(field_yaml_dict):
+ data_items = field_yaml_dict['data']
+ return [ [json_key, meaning['type'], meaning['exp'] ]
+ for json_key,meaning in data_items.items() ]
+
+
+def json_dict_to_txt_table(dict_yaml_file):
+
+ # table = Texttable(max_width=120)
+ with open(dict_yaml_file, 'r') as f:
+ yaml_stream = yaml.load(f)
+
+ for main_field, sub_key in yaml_stream.items():
+ print main_field + ' field' '\n' + '~'*len(main_field+' field') + '\n'
+
+ field_data_rows = handle_data_items(sub_key)
+ table = Texttable(max_width=120)
+ table.set_cols_align(["l", "c", "l"])
+ table.set_cols_valign(["t", "m", "m"])
+ # create table's header
+ table.add_rows([["Sub-key", "Type", "Meaning"]])
+ table.add_rows(field_data_rows, header=False)
+
+
+ print table.draw() + "\n"
+
+
+
+
+
+json_dict_to_txt_table("json_dictionary.yaml") \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/index.rst b/scripts/automation/trex_control_plane/doc/index.rst
new file mode 100755
index 00000000..e7a619d8
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/index.rst
@@ -0,0 +1,57 @@
+.. T-Rex Control Plain documentation master file, created by
+ sphinx-quickstart on Tue Jun 2 07:48:10 2015.
+ You can adapt this file completely to your liking, but it should at least
+ contain the root `toctree` directive.
+
+Welcome to T-Rex Control Plain's documentation!
+===============================================
+
+T-Rex is a **realistic traffic generator** that enables you to do get learn more about your under developement devices.
+
+This site covers the Python API of T-Rex control plane, and explains how to utilize it to your needs.
+However, since the entire API is JSON-RPC [#f1]_ based, you may want to check out other implementations that could suit you.
+
+
+To understand the entirely how the API works and how to set up the server side, check out the `API documentation <http://csi-wiki-01:8080/display/bpsim/Documentation>`_ undee the documentation section of T-Rex website.
+
+
+**Use the table of contents below or the menu to your left to navigate through the site**
+
+
+Getting Started
+===============
+.. toctree::
+ :maxdepth: 2
+
+ installation
+ client_utils
+ usage_examples
+
+API Reference
+=============
+.. toctree::
+ :maxdepth: 2
+
+ api/index
+
+About T-Rex
+===========
+.. toctree::
+ :maxdepth: 2
+
+ All about T-Rex <about_trex>
+ license
+
+
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
+
+.. rubric:: Footnotes
+
+.. [#f1] For more information on JSON-RPC, check out the `official site <http://www.jsonrpc.org/>`_ \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/installation.rst b/scripts/automation/trex_control_plane/doc/installation.rst
new file mode 100755
index 00000000..dda32f56
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/installation.rst
@@ -0,0 +1,25 @@
+============
+Installation
+============
+
+Prerequisites
+-------------
+The T-Rex control plane is based on client-server model that interacts using JSON-RPC.
+
+In order to use the client-side API documented a T-Rex server daemon must be up and listening on the same host and port that the client tries to connect with.
+
+Compatibility
+-------------
+Both client and server side were developed for Linux platform.
+The client-side module is also compatible with windows python.
+
+The client side can be used with both Python 2 and Python 3 versions.
+However, the server side was desined to and best fits with Python 2.7.6 and on (all 2.x series, assuming > 2.6.9).
+
+
+Installation manual
+-------------------
+
+T-Rex Control Plane is a cross-platform, cross-operatin system APi to control and run T-Rex.
+
+The full, most updated manual (which refers to all programming languages) can be found under the `Automation API documentation <http://csi-wiki-01:8080/display/bpsim/Documentation>`_ . \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/doc/json_dictionary.yaml b/scripts/automation/trex_control_plane/doc/json_dictionary.yaml
new file mode 100755
index 00000000..853ded65
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/json_dictionary.yaml
@@ -0,0 +1,252 @@
+################################################################
+#### T-Rex JSON Dictionary definitions ####
+################################################################
+
+
+trex-global :
+ name :
+ type : string
+ exp : "this is a name representation of the main field"
+ val : "trex-global"
+ type :
+ type : int
+ val : 0
+ data :
+ m_cpu_util :
+ type : float
+ exp : "CPU utilization (0-100)"
+ val : 0.0
+ m_platform_factor :
+ type : float
+ exp : "multiplier factor"
+ val : 1.0
+ m_tx_bps :
+ type : float
+ exp : "total tx bit per second"
+ val : 0.0
+ m_rx_bps :
+ type : float
+ exp : "total rx bit per second"
+ val : 0.0
+ m_tx_pps :
+ type : float
+ exp : "total tx packet per second"
+ val : 0.0
+ m_tx_cps :
+ type : float
+ exp : "total tx connection per second"
+ val : 0.0
+ m_tx_expected_cps :
+ type : float
+ exp : "expected tx connection per second"
+ val : 0.0
+ m_tx_expected_pps :
+ type : float
+ exp : "expected tx packet per second"
+ val : 0.0
+ m_tx_expected_bps :
+ type : float
+ exp : "expected tx bit per second"
+ val : 0.0
+ m_rx_drop_bps :
+ type : float
+ exp : "drop rate in bit per second"
+ val : 0.0
+ m_active_flows :
+ type : float
+ exp : "active trex flows"
+ val : 0.0
+ m_open_flows :
+ type : float
+ exp : "open trex flows from startup (monotonically incrementing)"
+ val : 0.0
+ m_total_tx_pkts :
+ type : int
+ exp : "total tx in packets"
+ val : 0
+ m_total_rx_pkts :
+ type : int
+ exp : "total rx in packets"
+ val : 0
+ m_total_tx_bytes :
+ type : int
+ exp : "total tx in bytes"
+ val : 0
+ m_total_rx_bytes :
+ type : int
+ exp : "total rx in bytes"
+ val : 0
+ opackets-# :
+ type : int
+ exp : "output packets (per interface)"
+ val : 0
+ obytes-# :
+ type : int
+ exp : "output bytes (per interface)"
+ val : 0
+ ipackets-# :
+ type : int
+ exp : "input packet (per interface)"
+ val : 0
+ ibytes-# :
+ type : int
+ exp : "input bytes (per interface)"
+ val : 0
+ ierrors-# :
+ type : int
+ exp : "input errors (per interface)"
+ val : 0
+ oerrors-# :
+ type : int
+ exp : "input errors (per interface)"
+ val : 0
+ m_total_tx_bps-# :
+ type : float
+ exp : "total transmitted data in bit per second"
+ val : 0.0
+ unknown :
+ type : int
+ exp : ""
+ val : 0
+
+tx-gen :
+ name :
+ type : string
+ exp : "this is a name representation of the main field"
+ val : "tx-gen"
+ type :
+ type : int
+ val : 0
+ data :
+ realtime-hist :
+ type : dict
+ #exp : "Containing TX history data, by the following keys:\n - min_usec (max_usec): min (max) time packet sent before (after) it was intended to be sent\n - cnt (high_cnt): how many packet were lower than min_usec (higher than max_usec) relative to the time these packets were intended to be injected"
+ exp : "histogram of transmission. See extended information about histogram object under :ref:`histogram-object-fields`. The attribute analyzed is time packet has been sent before/after it was intended to be"
+ val : '{ "min_usec":10, "max_usec":0, "high_cnt":0, "cnt":667866, "s_avg":0.0, "histogram":[] }'
+ unknown :
+ type : int
+ exp : ""
+ val : 0
+
+trex-latecny :
+ name :
+ type : string
+ exp : "this is a name representation of the main field"
+ val : "trex-latecny"
+ type :
+ type : int
+ val : 0
+ data :
+ avg-# :
+ type : float
+ exp : "average latency in usec (per interface)"
+ val : 75.0
+ max-# :
+ type : float
+ exp : "max latency in usec from the test start (per interface)"
+ val : 75.0
+ c-max-# :
+ type : float
+ exp : "max in the last 1 sec window (per interface)"
+ val : 75.0
+ error-# :
+ type : float
+ exp : "errors in latency packets (per interface)"
+ val : 75.0
+ unknown :
+ type : int
+ exp : ""
+ val : 0
+
+
+trex-latecny-v2 :
+ name :
+ type : string
+ exp : "this is a name representation of the main field"
+ val : "trex-latecny-v2"
+ type :
+ type : int
+ val : 0
+ data :
+ cpu_util :
+ type : float
+ exp : "rx thread cpu % (this is not trex DP threads cpu%%)"
+ val : 75.0
+ port-# :
+ type : dict
+ exp : "Containing per interface information. See extended information under ``port-# -> key_name -> sub_key``"
+ val : ''
+ port-#->hist :
+ type : dict
+ exp : "histogram of latency. See extended information about histogram object under :ref:`histogram-object-fields`"
+ val : ''
+ port-#->stats :
+ type : dict
+ exp : "Containing per interface information. See extended information under ``port-# -> key_name -> sub_key``"
+ val : ''
+ port-#->stats->m_tx_pkt_ok :
+ type : int
+ exp : "total of try sent packets"
+ val : 60110
+ port-#->stats->m_pkt_ok :
+ type : int
+ exp : "total of packets sent from hardware"
+ val : 60065
+ port-#->stats->m_no_magic :
+ type : int
+ exp : "rx error with no magic"
+ val : 0
+ port-#->stats->m_no_id :
+ type : int
+ exp : "rx errors with no id"
+ val : 0
+ port-#->stats->m_seq_error :
+ type : int
+ exp : "error in seq number"
+ val : 18
+ port-#->stats->m_length_error :
+ type : int
+ exp : ""
+ val : 0
+ port-#->stats->m_rx_check :
+ type : int
+ exp : "packets tested in rx"
+ val : 407495
+ unknown :
+ type : int
+ exp : ""
+ val : 0
+histogram-obj :
+ name :
+ type : string
+ exp : "this is description of a histogram object being used in number of place throughout the JSON object"
+ val : "histogram-obj"
+ data :
+ min_usec :
+ type : int
+ exp : "min attribute value in usec. pkt with latency less than this value is not counted"
+ val : 10
+ max_usec :
+ type : int
+ exp : "max attribute value in usec"
+ val : 83819
+ high_cnt :
+ type : int
+ exp : "how many packets on which its attribute > min_usec"
+ val : 83819
+ cnt :
+ type : int
+ exp : "total packets from test startup"
+ val : 83819
+ s_avg :
+ type : float
+ exp : "average value from test startup"
+ val : 39.3
+ histogram :
+ type : array
+ exp : "histogram of relevant object by the following keys:\n - key: value in usec \n - val: number of packets"
+ val : '[{"key": 20, "val": 5048}, {"key": 30, "val": 6092}, {"key": 40, "val": 2092}]'
+
+
+
+
diff --git a/scripts/automation/trex_control_plane/doc/license.rst b/scripts/automation/trex_control_plane/doc/license.rst
new file mode 100755
index 00000000..b83dd4b3
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/license.rst
@@ -0,0 +1,18 @@
+=======
+License
+=======
+
+
+Copyright 2015 Cisco Systems Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/scripts/automation/trex_control_plane/doc/requirements.rst b/scripts/automation/trex_control_plane/doc/requirements.rst
new file mode 100755
index 00000000..e69de29b
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/requirements.rst
diff --git a/scripts/automation/trex_control_plane/doc/usage_examples.rst b/scripts/automation/trex_control_plane/doc/usage_examples.rst
new file mode 100755
index 00000000..7116f28c
--- /dev/null
+++ b/scripts/automation/trex_control_plane/doc/usage_examples.rst
@@ -0,0 +1,68 @@
+
+Usage Examples
+==============
+
+
+Full-featured interactive shell
+-------------------------------
+
+The `client_interactive_example.py` extends and uses the `Cmd <https://docs.python.org/2/library/cmd.html>`_ built in python class to create a Full-featured shell using which one can interact with T-Rex server and get instant results.
+
+The help menu of this application is:
+
+.. code-block:: json
+
+ usage: client_interactive_example [options]
+
+ Run T-Rex client API demos and scenarios.
+
+ optional arguments:
+ -h, --help show this help message and exit
+ -v, --version show program's version number and exit
+ -t HOST, --trex-host HOST
+ Specify the hostname or ip to connect with T-Rex
+ server.
+ -p PORT, --trex-port PORT
+ Select port on which the T-Rex server listens. Default
+ port is 8090.
+ -m SIZE, --maxhist SIZE
+ Specify maximum history size saved at client side.
+ Default size is 100.
+ --verbose Switch ON verbose option at T-Rex client. Default is:
+ OFF.
+
+**Code Excerpt**
+
+.. literalinclude:: ../examples/client_interactive_example.py
+ :language: python
+ :emphasize-lines: 0
+ :linenos:
+
+
+End-to-End cycle
+----------------
+
+This example (``pkt_generation_for_trex.py``) demonstrates a full cycle of using the API.
+
+.. note:: this module uses the `Scapy <http://www.secdev.org/projects/scapy/doc/usage.html>`_ in order to generate packets to be used as a basis of the traffic injection. It is recommended to *install* this module to best experience the example.
+
+The demo takes the user a full circle:
+ 1. Generating packets (using Scapy)
+ 2. exporting the generated packets into .pcap file named `dns_traffic.pcap`.
+ 3. Use the :class:`trex_yaml_gen.CTRexYaml` generator to include that pcap file in the yaml object.
+ 4. Export the YAML object onto a YAML file named `dns_traffic.yaml`
+ 5. Push the generated files to T-Rex server.
+ 6. Run T-Rex based on the generated (and pushed) files.
+
+**Code Excerpt** [#f1]_
+
+.. literalinclude:: ../examples/pkt_generation_for_trex.py
+ :language: python
+ :lines: 10-
+ :emphasize-lines: 32,36,42,46,51,60,63-69,76-80
+ :linenos:
+
+
+.. rubric:: Footnotes
+
+.. [#f1] The marked codelines corresponds with the steps mentioned above. \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/examples/__init__.py b/scripts/automation/trex_control_plane/examples/__init__.py
new file mode 100755
index 00000000..d3f5a12f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/examples/__init__.py
@@ -0,0 +1 @@
+
diff --git a/scripts/automation/trex_control_plane/examples/client_interactive_example.py b/scripts/automation/trex_control_plane/examples/client_interactive_example.py
new file mode 100755
index 00000000..e8d358a9
--- /dev/null
+++ b/scripts/automation/trex_control_plane/examples/client_interactive_example.py
@@ -0,0 +1,256 @@
+#!/router/bin/python-2.7.4
+
+import trex_root_path
+from client.trex_client import *
+from common.trex_exceptions import *
+import cmd
+from python_lib.termstyle import termstyle
+import os
+from argparse import ArgumentParser
+from pprint import pprint
+import json
+import time
+import socket
+import errno
+
+
+class InteractiveTRexClient(cmd.Cmd):
+
+ intro = termstyle.green("\nInteractive shell to play with Cisco's T-Rex API.\nType help to view available pre-defined scenarios\n(c) All rights reserved.\n")
+ prompt = '> '
+
+ def __init__(self, trex_host, max_history_size = 100, trex_port = 8090, verbose_mode = False ):
+ cmd.Cmd.__init__(self)
+ self.verbose = verbose_mode
+ self.trex = CTRexClient(trex_host, max_history_size, trex_daemon_port = trex_port, verbose = verbose_mode)
+ self.DEFAULT_RUN_PARAMS = dict(c = 4,
+ m = 1.5,
+ nc = True,
+ p = True,
+ d = 100,
+ f = 'avl/sfr_delay_10_1g.yaml',
+ l = 1000)
+ self.run_params = self.DEFAULT_RUN_PARAMS
+ self.decoder = json.JSONDecoder()
+
+
+ def do_push_files (self, filepaths):
+ """Pushes a custom file to be stored locally on T-Rex server.\nPush multiple files by spefiying their path separated by ' ' (space)."""
+ try:
+ filepaths = filepaths.split(' ')
+ print termstyle.green("*** Starting pushing files ({trex_files}) to T-Rex. ***".format (trex_files = ', '.join(filepaths)) )
+ ret_val = self.trex.push_files(filepaths)
+ if ret_val:
+ print termstyle.green("*** End of T-Rex push_files method (success) ***")
+ else:
+ print termstyle.magenta("*** End of T-Rex push_files method (failed) ***")
+
+ except IOError as inst:
+ print termstyle.magenta(inst)
+
+ def do_show_default_run_params(self,line):
+ """Outputs the default T-Rex running parameters"""
+ pprint(self.DEFAULT_RUN_PARAMS)
+ print termstyle.green("*** End of default T-Rex running parameters ***")
+
+ def do_show_run_params(self,line):
+ """Outputs the currently configured T-Rex running parameters"""
+ pprint(self.run_params)
+ print termstyle.green("*** End of T-Rex running parameters ***")
+
+ def do_update_run_params(self, json_str):
+ """Updates provided parameters on T-Rex running configuration. Provide using JSON string"""
+ if json_str:
+ try:
+ upd_params = self.decoder.decode(json_str)
+ self.run_params.update(upd_params)
+ print termstyle.green("*** End of T-Rex parameters update ***")
+ except ValueError as inst:
+ print termstyle.magenta("Provided illegal JSON string. Please try again.\n[", inst,"]")
+ else:
+ print termstyle.magenta("JSON configuration string is missing. Please try again.")
+
+ def do_show_status (self, line):
+ """Prompts T-Rex current status"""
+ print self.trex.get_running_status()
+ print termstyle.green("*** End of T-Rex status prompt ***")
+
+ def do_show_trex_files_path (self, line):
+ """Prompts the local path in which files are stored when pushed to t-rex server from client"""
+ print self.trex.get_trex_files_path()
+ print termstyle.green("*** End of trex_files_path prompt ***")
+
+ def do_show_reservation_status (self, line):
+ """Prompts if T-Rex is currently reserved or not"""
+ if self.trex.is_reserved():
+ print "T-Rex is reserved"
+ else:
+ print "T-Rex is NOT reserved"
+ print termstyle.green("*** End of reservation status prompt ***")
+
+ def do_reserve_trex (self, user):
+ """Reserves the usage of T-Rex to a certain user"""
+ try:
+ if not user:
+ ret = self.trex.reserve_trex()
+ else:
+ ret = self.trex.reserve_trex(user.split(' ')[0])
+ print termstyle.green("*** T-Rex reserved successfully ***")
+ except TRexException as inst:
+ print termstyle.red(inst)
+
+ def do_cancel_reservation (self, user):
+ """Cancels a current reservation of T-Rex to a certain user"""
+ try:
+ if not user:
+ ret = self.trex.cancel_reservation()
+ else:
+ ret = self.trex.cancel_reservation(user.split(' ')[0])
+ print termstyle.green("*** T-Rex reservation canceled successfully ***")
+ except TRexException as inst:
+ print termstyle.red(inst)
+
+ def do_restore_run_default (self, line):
+ """Restores original T-Rex running configuration"""
+ self.run_params = self.DEFAULT_RUN_PARAMS
+ print termstyle.green("*** End of restoring default run parameters ***")
+
+ def do_run_until_finish (self, sample_rate):
+ """Starts T-Rex and sample server until run is done."""
+ print termstyle.green("*** Starting T-Rex run_until_finish scenario ***")
+
+ if not sample_rate: # use default sample rate if not passed
+ sample_rate = 5
+ try:
+ sample_rate = int(sample_rate)
+ ret = self.trex.start_trex(**self.run_params)
+ self.trex.sample_to_run_finish(sample_rate)
+ print termstyle.green("*** End of T-Rex run ***")
+ except ValueError as inst:
+ print termstyle.magenta("Provided illegal sample rate value. Please try again.\n[", inst,"]")
+ except TRexException as inst:
+ print termstyle.red(inst)
+
+ def do_run_and_poll (self, sample_rate):
+ """Starts T-Rex and sample server manually until run is done."""
+ print termstyle.green("*** Starting T-Rex run and manually poll scenario ***")
+ if not sample_rate: # use default sample rate if not passed
+ sample_rate = 5
+ try:
+ sample_rate = int(sample_rate)
+ ret = self.trex.start_trex(**self.run_params)
+ last_res = dict()
+ while self.trex.is_running(dump_out = last_res):
+ obj = self.trex.get_result_obj()
+ if (self.verbose):
+ print obj
+ # do WHATEVER here
+ time.sleep(sample_rate)
+
+ print termstyle.green("*** End of T-Rex run ***")
+ except ValueError as inst:
+ print termstyle.magenta("Provided illegal sample rate value. Please try again.\n[", inst,"]")
+ except TRexException as inst:
+ print termstyle.red(inst)
+
+
+ def do_run_until_condition (self, sample_rate):
+ """Starts T-Rex and sample server until condition is satisfied."""
+ print termstyle.green("*** Starting T-Rex run until condition is satisfied scenario ***")
+
+ def condition (result_obj):
+ return result_obj.get_current_tx_rate()['m_tx_pps'] > 200000
+
+ if not sample_rate: # use default sample rate if not passed
+ sample_rate = 5
+ try:
+ sample_rate = int(sample_rate)
+ ret = self.trex.start_trex(**self.run_params)
+ ret_val = self.trex.sample_until_condition(condition, sample_rate)
+ print ret_val
+ print termstyle.green("*** End of T-Rex run ***")
+ except ValueError as inst:
+ print termstyle.magenta("Provided illegal sample rate value. Please try again.\n[", inst,"]")
+ except TRexException as inst:
+ print termstyle.red(inst)
+
+ def do_start_and_return (self, line):
+ """Start T-Rex run and once in 'Running' mode, return to cmd prompt"""
+ print termstyle.green("*** Starting T-Rex run, wait until in 'Running' state ***")
+ try:
+ ret = self.trex.start_trex(**self.run_params)
+ print termstyle.green("*** End of scenario (T-Rex is probably still running!) ***")
+ except TRexException as inst:
+ print termstyle.red(inst)
+
+ def do_poll_once (self, line):
+ """Performs a single poll of T-Rex current data dump (if T-Rex is running) and prompts and short version of latest result_obj"""
+ print termstyle.green("*** Trying T-Rex single poll ***")
+ try:
+ last_res = dict()
+ if self.trex.is_running(dump_out = last_res):
+ obj = self.trex.get_result_obj()
+ print obj
+ else:
+ print termstyle.magenta("T-Rex isn't currently running.")
+ print termstyle.green("*** End of scenario (T-Rex is posssibly still running!) ***")
+ except TRexException as inst:
+ print termstyle.red(inst)
+
+
+ def do_stop_trex (self, line):
+ """Try to stop T-Rex run (if T-Rex is currently running)"""
+ print termstyle.green("*** Starting T-Rex termination ***")
+ try:
+ ret = self.trex.stop_trex()
+ print termstyle.green("*** End of scenario (T-Rex is not running now) ***")
+ except TRexException as inst:
+ print termstyle.red(inst)
+
+ def do_kill_indiscriminately (self, line):
+ """Force killing of running T-Rex process (if exists) on the server."""
+ print termstyle.green("*** Starting T-Rex termination ***")
+ ret = self.trex.force_kill()
+ if ret:
+ print termstyle.green("*** End of scenario (T-Rex is not running now) ***")
+ elif ret is None:
+ print termstyle.magenta("*** End of scenario (T-Rex termination aborted) ***")
+ else:
+ print termstyle.red("*** End of scenario (T-Rex termination failed) ***")
+
+ def do_exit(self, arg):
+ """Quits the application"""
+ print termstyle.cyan('Bye Bye!')
+ return True
+
+
+if __name__ == "__main__":
+ parser = ArgumentParser(description = termstyle.cyan('Run T-Rex client API demos and scenarios.'),
+ usage = """client_interactive_example [options]""" )
+
+ parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0 \t (C) Cisco Systems Inc.\n')
+
+ parser.add_argument("-t", "--trex-host", required = True, dest="trex_host",
+ action="store", help="Specify the hostname or ip to connect with T-Rex server.",
+ metavar="HOST" )
+ parser.add_argument("-p", "--trex-port", type=int, default = 8090, metavar="PORT", dest="trex_port",
+ help="Select port on which the T-Rex server listens. Default port is 8090.", action="store")
+ parser.add_argument("-m", "--maxhist", type=int, default = 100, metavar="SIZE", dest="hist_size",
+ help="Specify maximum history size saved at client side. Default size is 100.", action="store")
+ parser.add_argument("--verbose", dest="verbose",
+ action="store_true", help="Switch ON verbose option at T-Rex client. Default is: OFF.",
+ default = False )
+ args = parser.parse_args()
+
+ try:
+ InteractiveTRexClient(args.trex_host, args.hist_size, args.trex_port, args.verbose).cmdloop()
+
+ except KeyboardInterrupt:
+ print termstyle.cyan('Bye Bye!')
+ exit(-1)
+ except socket.error, e:
+ if e.errno == errno.ECONNREFUSED:
+ raise socket.error(errno.ECONNREFUSED, "Connection from T-Rex server was terminated. Please make sure the server is up.")
+
+
+
diff --git a/scripts/automation/trex_control_plane/examples/pkt_generation_for_trex.py b/scripts/automation/trex_control_plane/examples/pkt_generation_for_trex.py
new file mode 100755
index 00000000..7e7f6139
--- /dev/null
+++ b/scripts/automation/trex_control_plane/examples/pkt_generation_for_trex.py
@@ -0,0 +1,105 @@
+#!/router/bin/python
+
+######################################################################################
+### ###
+### T-Rex end-to-end demo script, written by T-Rex dev-team ###
+### THIS SCRIPT ASSUMES PyYaml and Scapy INSTALLED ON PYTHON'S RUNNING MACHINE ###
+### (for any question please contact trex-dev team @ trex-dev@cisco.com) ###
+### ###
+######################################################################################
+
+
+import logging
+import time
+import trex_root_path
+from client.trex_client import *
+from client_utils.general_utils import *
+from client_utils.trex_yaml_gen import *
+from pprint import pprint
+from argparse import ArgumentParser
+
+# import scapy package
+logging.getLogger("scapy.runtime").setLevel(logging.ERROR) # supress scapy import warnings from being displayed
+from scapy.all import *
+
+
+def generate_dns_packets (src_ip, dst_ip):
+ dns_rqst = Ether(src='00:15:17:a7:75:a3', dst='e0:5f:b9:69:e9:22')/IP(src=src_ip,dst=dst_ip,version=4L)/UDP(dport=53, sport=1030)/DNS(rd=1,qd=DNSQR(qname="www.cisco.com"))
+ dns_resp = Ether(src='e0:5f:b9:69:e9:22', dst='00:15:17:a7:75:a3')/IP(src=dst_ip,dst=src_ip,version=4L)/UDP(dport=1030, sport=53)/DNS(aa=1L, qr=1L, an=DNSRR(rclass=1, rrname='www.cisco.com.', rdata='100.100.100.100', type=1), ad=0L, qdcount=1, ns=None, tc=0L, rd=0L, ar=None, opcode=0L, ra=1L, cd=0L, z=0L, rcode=0L, qd=DNSQR(qclass=1, qtype=1, qname='www.cisco.com.'))
+ return [dns_rqst, dns_resp]
+
+def pkts_to_pcap (pcap_filename, packets):
+ wrpcap(pcap_filename, packets)
+
+
+def main (args):
+ # instantiate T-Rex client
+ trex = CTRexClient('trex-dan', verbose = args.verbose)
+
+ if args.steps:
+ print "\nNext step: .pcap generation."
+ raw_input("Press Enter to continue...")
+ # generate T-Rex traffic.
+ pkts = generate_dns_packets('21.0.0.2', '22.0.0.12') # In this case - DNS traffic (request-response)
+ print "\ngenerated traffic:"
+ print "=================="
+ map(lambda x: pprint(x.summary()) , pkts)
+ pkts_to_pcap("dns_traffic.pcap", pkts) # Export the generated to a .pcap file
+
+ if args.steps:
+ print "\nNext step: .yaml generation."
+ raw_input("Press Enter to continue...")
+ # Generate .yaml file that uses the generated .pcap file
+ trex_files_path = trex.get_trex_files_path() # fetch the path in which packets are saved on T-Rex server
+ yaml_obj = CTRexYaml(trex_files_path) # instantiate CTRexYaml obj
+
+ # set .yaml file parameters according to need and use
+ ret_idx = yaml_obj.add_pcap_file("dns_traffic.pcap")
+ yaml_obj.set_cap_info_param('cps', 1.1, ret_idx)
+
+ # export yaml_ob to .yaml file
+ yaml_file_path = trex_files_path + 'dns_traffic.yaml'
+ yaml_obj.to_yaml('dns_traffic.yaml')
+ print "\ngenerated .yaml file:"
+ print "===================="
+ yaml_obj.dump()
+
+ if args.steps:
+ print "\nNext step: run T-Rex with provided files."
+ raw_input("Press Enter to continue...")
+ # push all relevant files to server
+ trex.push_files( yaml_obj.get_file_list() )
+
+ print "\nStarting T-Rex..."
+ trex.start_trex(c = 2,
+ m = 1.5,
+ nc = True,
+ p = True,
+ d = 30,
+ f = yaml_file_path, # <-- we use out generated .yaml file here
+ l = 1000)
+
+ if args.verbose:
+ print "T-Rex state changed to 'Running'."
+ print "Sampling T-Rex in 0.2 samples/sec (single sample every 5 secs)"
+
+ last_res = dict()
+ while trex.is_running(dump_out = last_res):
+ print "CURRENT RESULT OBJECT:"
+ obj = trex.get_result_obj()
+ print obj
+ time.sleep(5)
+
+
+if __name__ == "__main__":
+ parser = ArgumentParser(description = 'Run T-Rex client API end-to-end example.',
+ usage = """pkt_generation_for_trex [options]""" )
+
+ parser.add_argument("-s", "--step-by-step", dest="steps",
+ action="store_false", help="Switch OFF step-by-step script overview. Default is: ON.",
+ default = True )
+ parser.add_argument("--verbose", dest="verbose",
+ action="store_true", help="Switch ON verbose option at T-Rex client. Default is: OFF.",
+ default = False )
+ args = parser.parse_args()
+ main(args) \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/examples/trex_root_path.py b/scripts/automation/trex_control_plane/examples/trex_root_path.py
new file mode 100755
index 00000000..3aefd1d2
--- /dev/null
+++ b/scripts/automation/trex_control_plane/examples/trex_root_path.py
@@ -0,0 +1,15 @@
+#!/router/bin/python
+
+import os
+import sys
+
+def add_root_to_path ():
+ """adds trex_control_plane root dir to script path, up to `depth` parent dirs"""
+ root_dirname = 'trex_control_plane'
+ file_path = os.path.dirname(os.path.realpath(__file__))
+
+ components = file_path.split(os.sep)
+ sys.path.append( str.join(os.sep, components[:components.index(root_dirname)+1]) )
+ return
+
+add_root_to_path()
diff --git a/scripts/automation/trex_control_plane/python_lib/__init__.py b/scripts/automation/trex_control_plane/python_lib/__init__.py
new file mode 100755
index 00000000..d3f5a12f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/__init__.py
@@ -0,0 +1 @@
+
diff --git a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/PKG-INFO b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/PKG-INFO
new file mode 100755
index 00000000..428ce0e3
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/PKG-INFO
@@ -0,0 +1,746 @@
+Metadata-Version: 1.1
+Name: enum34
+Version: 1.0.4
+Summary: Python 3.4 Enum backported to 3.3, 3.2, 3.1, 2.7, 2.6, 2.5, and 2.4
+Home-page: https://pypi.python.org/pypi/enum34
+Author: Ethan Furman
+Author-email: ethan@stoneleaf.us
+License: BSD License
+Description: ``enum`` --- support for enumerations
+ ========================================
+
+ .. :synopsis: enumerations are sets of symbolic names bound to unique, constant
+ values.
+ .. :moduleauthor:: Ethan Furman <ethan@stoneleaf.us>
+ .. :sectionauthor:: Barry Warsaw <barry@python.org>,
+ .. :sectionauthor:: Eli Bendersky <eliben@gmail.com>,
+ .. :sectionauthor:: Ethan Furman <ethan@stoneleaf.us>
+
+ ----------------
+
+ An enumeration is a set of symbolic names (members) bound to unique, constant
+ values. Within an enumeration, the members can be compared by identity, and
+ the enumeration itself can be iterated over.
+
+
+ Module Contents
+ ---------------
+
+ This module defines two enumeration classes that can be used to define unique
+ sets of names and values: ``Enum`` and ``IntEnum``. It also defines
+ one decorator, ``unique``.
+
+ ``Enum``
+
+ Base class for creating enumerated constants. See section `Functional API`_
+ for an alternate construction syntax.
+
+ ``IntEnum``
+
+ Base class for creating enumerated constants that are also subclasses of ``int``.
+
+ ``unique``
+
+ Enum class decorator that ensures only one name is bound to any one value.
+
+
+ Creating an Enum
+ ----------------
+
+ Enumerations are created using the ``class`` syntax, which makes them
+ easy to read and write. An alternative creation method is described in
+ `Functional API`_. To define an enumeration, subclass ``Enum`` as
+ follows::
+
+ >>> from enum import Enum
+ >>> class Color(Enum):
+ ... red = 1
+ ... green = 2
+ ... blue = 3
+
+ Note: Nomenclature
+
+ - The class ``Color`` is an *enumeration* (or *enum*)
+ - The attributes ``Color.red``, ``Color.green``, etc., are
+ *enumeration members* (or *enum members*).
+ - The enum members have *names* and *values* (the name of
+ ``Color.red`` is ``red``, the value of ``Color.blue`` is
+ ``3``, etc.)
+
+ Note:
+
+ Even though we use the ``class`` syntax to create Enums, Enums
+ are not normal Python classes. See `How are Enums different?`_ for
+ more details.
+
+ Enumeration members have human readable string representations::
+
+ >>> print(Color.red)
+ Color.red
+
+ ...while their ``repr`` has more information::
+
+ >>> print(repr(Color.red))
+ <Color.red: 1>
+
+ The *type* of an enumeration member is the enumeration it belongs to::
+
+ >>> type(Color.red)
+ <enum 'Color'>
+ >>> isinstance(Color.green, Color)
+ True
+ >>>
+
+ Enum members also have a property that contains just their item name::
+
+ >>> print(Color.red.name)
+ red
+
+ Enumerations support iteration. In Python 3.x definition order is used; in
+ Python 2.x the definition order is not available, but class attribute
+ ``__order__`` is supported; otherwise, value order is used::
+
+ >>> class Shake(Enum):
+ ... __order__ = 'vanilla chocolate cookies mint' # only needed in 2.x
+ ... vanilla = 7
+ ... chocolate = 4
+ ... cookies = 9
+ ... mint = 3
+ ...
+ >>> for shake in Shake:
+ ... print(shake)
+ ...
+ Shake.vanilla
+ Shake.chocolate
+ Shake.cookies
+ Shake.mint
+
+ The ``__order__`` attribute is always removed, and in 3.x it is also ignored
+ (order is definition order); however, in the stdlib version it will be ignored
+ but not removed.
+
+ Enumeration members are hashable, so they can be used in dictionaries and sets::
+
+ >>> apples = {}
+ >>> apples[Color.red] = 'red delicious'
+ >>> apples[Color.green] = 'granny smith'
+ >>> apples == {Color.red: 'red delicious', Color.green: 'granny smith'}
+ True
+
+
+ Programmatic access to enumeration members and their attributes
+ ---------------------------------------------------------------
+
+ Sometimes it's useful to access members in enumerations programmatically (i.e.
+ situations where ``Color.red`` won't do because the exact color is not known
+ at program-writing time). ``Enum`` allows such access::
+
+ >>> Color(1)
+ <Color.red: 1>
+ >>> Color(3)
+ <Color.blue: 3>
+
+ If you want to access enum members by *name*, use item access::
+
+ >>> Color['red']
+ <Color.red: 1>
+ >>> Color['green']
+ <Color.green: 2>
+
+ If have an enum member and need its ``name`` or ``value``::
+
+ >>> member = Color.red
+ >>> member.name
+ 'red'
+ >>> member.value
+ 1
+
+
+ Duplicating enum members and values
+ -----------------------------------
+
+ Having two enum members (or any other attribute) with the same name is invalid;
+ in Python 3.x this would raise an error, but in Python 2.x the second member
+ simply overwrites the first::
+
+ >>> # python 2.x
+ >>> class Shape(Enum):
+ ... square = 2
+ ... square = 3
+ ...
+ >>> Shape.square
+ <Shape.square: 3>
+
+ >>> # python 3.x
+ >>> class Shape(Enum):
+ ... square = 2
+ ... square = 3
+ Traceback (most recent call last):
+ ...
+ TypeError: Attempted to reuse key: 'square'
+
+ However, two enum members are allowed to have the same value. Given two members
+ A and B with the same value (and A defined first), B is an alias to A. By-value
+ lookup of the value of A and B will return A. By-name lookup of B will also
+ return A::
+
+ >>> class Shape(Enum):
+ ... __order__ = 'square diamond circle alias_for_square' # only needed in 2.x
+ ... square = 2
+ ... diamond = 1
+ ... circle = 3
+ ... alias_for_square = 2
+ ...
+ >>> Shape.square
+ <Shape.square: 2>
+ >>> Shape.alias_for_square
+ <Shape.square: 2>
+ >>> Shape(2)
+ <Shape.square: 2>
+
+
+ Allowing aliases is not always desirable. ``unique`` can be used to ensure
+ that none exist in a particular enumeration::
+
+ >>> from enum import unique
+ >>> @unique
+ ... class Mistake(Enum):
+ ... __order__ = 'one two three four' # only needed in 2.x
+ ... one = 1
+ ... two = 2
+ ... three = 3
+ ... four = 3
+ Traceback (most recent call last):
+ ...
+ ValueError: duplicate names found in <enum 'Mistake'>: four -> three
+
+ Iterating over the members of an enum does not provide the aliases::
+
+ >>> list(Shape)
+ [<Shape.square: 2>, <Shape.diamond: 1>, <Shape.circle: 3>]
+
+ The special attribute ``__members__`` is a dictionary mapping names to members.
+ It includes all names defined in the enumeration, including the aliases::
+
+ >>> for name, member in sorted(Shape.__members__.items()):
+ ... name, member
+ ...
+ ('alias_for_square', <Shape.square: 2>)
+ ('circle', <Shape.circle: 3>)
+ ('diamond', <Shape.diamond: 1>)
+ ('square', <Shape.square: 2>)
+
+ The ``__members__`` attribute can be used for detailed programmatic access to
+ the enumeration members. For example, finding all the aliases::
+
+ >>> [name for name, member in Shape.__members__.items() if member.name != name]
+ ['alias_for_square']
+
+ Comparisons
+ -----------
+
+ Enumeration members are compared by identity::
+
+ >>> Color.red is Color.red
+ True
+ >>> Color.red is Color.blue
+ False
+ >>> Color.red is not Color.blue
+ True
+
+ Ordered comparisons between enumeration values are *not* supported. Enum
+ members are not integers (but see `IntEnum`_ below)::
+
+ >>> Color.red < Color.blue
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ TypeError: unorderable types: Color() < Color()
+
+ .. warning::
+
+ In Python 2 *everything* is ordered, even though the ordering may not
+ make sense. If you want your enumerations to have a sensible ordering
+ check out the `OrderedEnum`_ recipe below.
+
+
+ Equality comparisons are defined though::
+
+ >>> Color.blue == Color.red
+ False
+ >>> Color.blue != Color.red
+ True
+ >>> Color.blue == Color.blue
+ True
+
+ Comparisons against non-enumeration values will always compare not equal
+ (again, ``IntEnum`` was explicitly designed to behave differently, see
+ below)::
+
+ >>> Color.blue == 2
+ False
+
+
+ Allowed members and attributes of enumerations
+ ----------------------------------------------
+
+ The examples above use integers for enumeration values. Using integers is
+ short and handy (and provided by default by the `Functional API`_), but not
+ strictly enforced. In the vast majority of use-cases, one doesn't care what
+ the actual value of an enumeration is. But if the value *is* important,
+ enumerations can have arbitrary values.
+
+ Enumerations are Python classes, and can have methods and special methods as
+ usual. If we have this enumeration::
+
+ >>> class Mood(Enum):
+ ... funky = 1
+ ... happy = 3
+ ...
+ ... def describe(self):
+ ... # self is the member here
+ ... return self.name, self.value
+ ...
+ ... def __str__(self):
+ ... return 'my custom str! {0}'.format(self.value)
+ ...
+ ... @classmethod
+ ... def favorite_mood(cls):
+ ... # cls here is the enumeration
+ ... return cls.happy
+
+ Then::
+
+ >>> Mood.favorite_mood()
+ <Mood.happy: 3>
+ >>> Mood.happy.describe()
+ ('happy', 3)
+ >>> str(Mood.funky)
+ 'my custom str! 1'
+
+ The rules for what is allowed are as follows: _sunder_ names (starting and
+ ending with a single underscore) are reserved by enum and cannot be used;
+ all other attributes defined within an enumeration will become members of this
+ enumeration, with the exception of *__dunder__* names and descriptors (methods
+ are also descriptors).
+
+ Note:
+
+ If your enumeration defines ``__new__`` and/or ``__init__`` then
+ whatever value(s) were given to the enum member will be passed into
+ those methods. See `Planet`_ for an example.
+
+
+ Restricted subclassing of enumerations
+ --------------------------------------
+
+ Subclassing an enumeration is allowed only if the enumeration does not define
+ any members. So this is forbidden::
+
+ >>> class MoreColor(Color):
+ ... pink = 17
+ Traceback (most recent call last):
+ ...
+ TypeError: Cannot extend enumerations
+
+ But this is allowed::
+
+ >>> class Foo(Enum):
+ ... def some_behavior(self):
+ ... pass
+ ...
+ >>> class Bar(Foo):
+ ... happy = 1
+ ... sad = 2
+ ...
+
+ Allowing subclassing of enums that define members would lead to a violation of
+ some important invariants of types and instances. On the other hand, it makes
+ sense to allow sharing some common behavior between a group of enumerations.
+ (See `OrderedEnum`_ for an example.)
+
+
+ Pickling
+ --------
+
+ Enumerations can be pickled and unpickled::
+
+ >>> from enum.test_enum import Fruit
+ >>> from pickle import dumps, loads
+ >>> Fruit.tomato is loads(dumps(Fruit.tomato, 2))
+ True
+
+ The usual restrictions for pickling apply: picklable enums must be defined in
+ the top level of a module, since unpickling requires them to be importable
+ from that module.
+
+ Note:
+
+ With pickle protocol version 4 (introduced in Python 3.4) it is possible
+ to easily pickle enums nested in other classes.
+
+
+
+ Functional API
+ --------------
+
+ The ``Enum`` class is callable, providing the following functional API::
+
+ >>> Animal = Enum('Animal', 'ant bee cat dog')
+ >>> Animal
+ <enum 'Animal'>
+ >>> Animal.ant
+ <Animal.ant: 1>
+ >>> Animal.ant.value
+ 1
+ >>> list(Animal)
+ [<Animal.ant: 1>, <Animal.bee: 2>, <Animal.cat: 3>, <Animal.dog: 4>]
+
+ The semantics of this API resemble ``namedtuple``. The first argument
+ of the call to ``Enum`` is the name of the enumeration.
+
+ The second argument is the *source* of enumeration member names. It can be a
+ whitespace-separated string of names, a sequence of names, a sequence of
+ 2-tuples with key/value pairs, or a mapping (e.g. dictionary) of names to
+ values. The last two options enable assigning arbitrary values to
+ enumerations; the others auto-assign increasing integers starting with 1. A
+ new class derived from ``Enum`` is returned. In other words, the above
+ assignment to ``Animal`` is equivalent to::
+
+ >>> class Animals(Enum):
+ ... ant = 1
+ ... bee = 2
+ ... cat = 3
+ ... dog = 4
+
+ Pickling enums created with the functional API can be tricky as frame stack
+ implementation details are used to try and figure out which module the
+ enumeration is being created in (e.g. it will fail if you use a utility
+ function in separate module, and also may not work on IronPython or Jython).
+ The solution is to specify the module name explicitly as follows::
+
+ >>> Animals = Enum('Animals', 'ant bee cat dog', module=__name__)
+
+ Derived Enumerations
+ --------------------
+
+ IntEnum
+ ^^^^^^^
+
+ A variation of ``Enum`` is provided which is also a subclass of
+ ``int``. Members of an ``IntEnum`` can be compared to integers;
+ by extension, integer enumerations of different types can also be compared
+ to each other::
+
+ >>> from enum import IntEnum
+ >>> class Shape(IntEnum):
+ ... circle = 1
+ ... square = 2
+ ...
+ >>> class Request(IntEnum):
+ ... post = 1
+ ... get = 2
+ ...
+ >>> Shape == 1
+ False
+ >>> Shape.circle == 1
+ True
+ >>> Shape.circle == Request.post
+ True
+
+ However, they still can't be compared to standard ``Enum`` enumerations::
+
+ >>> class Shape(IntEnum):
+ ... circle = 1
+ ... square = 2
+ ...
+ >>> class Color(Enum):
+ ... red = 1
+ ... green = 2
+ ...
+ >>> Shape.circle == Color.red
+ False
+
+ ``IntEnum`` values behave like integers in other ways you'd expect::
+
+ >>> int(Shape.circle)
+ 1
+ >>> ['a', 'b', 'c'][Shape.circle]
+ 'b'
+ >>> [i for i in range(Shape.square)]
+ [0, 1]
+
+ For the vast majority of code, ``Enum`` is strongly recommended,
+ since ``IntEnum`` breaks some semantic promises of an enumeration (by
+ being comparable to integers, and thus by transitivity to other
+ unrelated enumerations). It should be used only in special cases where
+ there's no other choice; for example, when integer constants are
+ replaced with enumerations and backwards compatibility is required with code
+ that still expects integers.
+
+
+ Others
+ ^^^^^^
+
+ While ``IntEnum`` is part of the ``enum`` module, it would be very
+ simple to implement independently::
+
+ class IntEnum(int, Enum):
+ pass
+
+ This demonstrates how similar derived enumerations can be defined; for example
+ a ``StrEnum`` that mixes in ``str`` instead of ``int``.
+
+ Some rules:
+
+ 1. When subclassing ``Enum``, mix-in types must appear before
+ ``Enum`` itself in the sequence of bases, as in the ``IntEnum``
+ example above.
+ 2. While ``Enum`` can have members of any type, once you mix in an
+ additional type, all the members must have values of that type, e.g.
+ ``int`` above. This restriction does not apply to mix-ins which only
+ add methods and don't specify another data type such as ``int`` or
+ ``str``.
+ 3. When another data type is mixed in, the ``value`` attribute is *not the
+ same* as the enum member itself, although it is equivalant and will compare
+ equal.
+ 4. %-style formatting: ``%s`` and ``%r`` call ``Enum``'s ``__str__`` and
+ ``__repr__`` respectively; other codes (such as ``%i`` or ``%h`` for
+ IntEnum) treat the enum member as its mixed-in type.
+
+ Note: Prior to Python 3.4 there is a bug in ``str``'s %-formatting: ``int``
+ subclasses are printed as strings and not numbers when the ``%d``, ``%i``,
+ or ``%u`` codes are used.
+ 5. ``str.__format__`` (or ``format``) will use the mixed-in
+ type's ``__format__``. If the ``Enum``'s ``str`` or
+ ``repr`` is desired use the ``!s`` or ``!r`` ``str`` format codes.
+
+
+ Decorators
+ ----------
+
+ unique
+ ^^^^^^
+
+ A ``class`` decorator specifically for enumerations. It searches an
+ enumeration's ``__members__`` gathering any aliases it finds; if any are
+ found ``ValueError`` is raised with the details::
+
+ >>> @unique
+ ... class NoDupes(Enum):
+ ... first = 'one'
+ ... second = 'two'
+ ... third = 'two'
+ Traceback (most recent call last):
+ ...
+ ValueError: duplicate names found in <enum 'NoDupes'>: third -> second
+
+
+ Interesting examples
+ --------------------
+
+ While ``Enum`` and ``IntEnum`` are expected to cover the majority of
+ use-cases, they cannot cover them all. Here are recipes for some different
+ types of enumerations that can be used directly, or as examples for creating
+ one's own.
+
+
+ AutoNumber
+ ^^^^^^^^^^
+
+ Avoids having to specify the value for each enumeration member::
+
+ >>> class AutoNumber(Enum):
+ ... def __new__(cls):
+ ... value = len(cls.__members__) + 1
+ ... obj = object.__new__(cls)
+ ... obj._value_ = value
+ ... return obj
+ ...
+ >>> class Color(AutoNumber):
+ ... __order__ = "red green blue" # only needed in 2.x
+ ... red = ()
+ ... green = ()
+ ... blue = ()
+ ...
+ >>> Color.green.value == 2
+ True
+
+ Note:
+
+ The `__new__` method, if defined, is used during creation of the Enum
+ members; it is then replaced by Enum's `__new__` which is used after
+ class creation for lookup of existing members. Due to the way Enums are
+ supposed to behave, there is no way to customize Enum's `__new__`.
+
+
+ UniqueEnum
+ ^^^^^^^^^^
+
+ Raises an error if a duplicate member name is found instead of creating an
+ alias::
+
+ >>> class UniqueEnum(Enum):
+ ... def __init__(self, *args):
+ ... cls = self.__class__
+ ... if any(self.value == e.value for e in cls):
+ ... a = self.name
+ ... e = cls(self.value).name
+ ... raise ValueError(
+ ... "aliases not allowed in UniqueEnum: %r --> %r"
+ ... % (a, e))
+ ...
+ >>> class Color(UniqueEnum):
+ ... red = 1
+ ... green = 2
+ ... blue = 3
+ ... grene = 2
+ Traceback (most recent call last):
+ ...
+ ValueError: aliases not allowed in UniqueEnum: 'grene' --> 'green'
+
+
+ OrderedEnum
+ ^^^^^^^^^^^
+
+ An ordered enumeration that is not based on ``IntEnum`` and so maintains
+ the normal ``Enum`` invariants (such as not being comparable to other
+ enumerations)::
+
+ >>> class OrderedEnum(Enum):
+ ... def __ge__(self, other):
+ ... if self.__class__ is other.__class__:
+ ... return self._value_ >= other._value_
+ ... return NotImplemented
+ ... def __gt__(self, other):
+ ... if self.__class__ is other.__class__:
+ ... return self._value_ > other._value_
+ ... return NotImplemented
+ ... def __le__(self, other):
+ ... if self.__class__ is other.__class__:
+ ... return self._value_ <= other._value_
+ ... return NotImplemented
+ ... def __lt__(self, other):
+ ... if self.__class__ is other.__class__:
+ ... return self._value_ < other._value_
+ ... return NotImplemented
+ ...
+ >>> class Grade(OrderedEnum):
+ ... __ordered__ = 'A B C D F'
+ ... A = 5
+ ... B = 4
+ ... C = 3
+ ... D = 2
+ ... F = 1
+ ...
+ >>> Grade.C < Grade.A
+ True
+
+
+ Planet
+ ^^^^^^
+
+ If ``__new__`` or ``__init__`` is defined the value of the enum member
+ will be passed to those methods::
+
+ >>> class Planet(Enum):
+ ... MERCURY = (3.303e+23, 2.4397e6)
+ ... VENUS = (4.869e+24, 6.0518e6)
+ ... EARTH = (5.976e+24, 6.37814e6)
+ ... MARS = (6.421e+23, 3.3972e6)
+ ... JUPITER = (1.9e+27, 7.1492e7)
+ ... SATURN = (5.688e+26, 6.0268e7)
+ ... URANUS = (8.686e+25, 2.5559e7)
+ ... NEPTUNE = (1.024e+26, 2.4746e7)
+ ... def __init__(self, mass, radius):
+ ... self.mass = mass # in kilograms
+ ... self.radius = radius # in meters
+ ... @property
+ ... def surface_gravity(self):
+ ... # universal gravitational constant (m3 kg-1 s-2)
+ ... G = 6.67300E-11
+ ... return G * self.mass / (self.radius * self.radius)
+ ...
+ >>> Planet.EARTH.value
+ (5.976e+24, 6378140.0)
+ >>> Planet.EARTH.surface_gravity
+ 9.802652743337129
+
+
+ How are Enums different?
+ ------------------------
+
+ Enums have a custom metaclass that affects many aspects of both derived Enum
+ classes and their instances (members).
+
+
+ Enum Classes
+ ^^^^^^^^^^^^
+
+ The ``EnumMeta`` metaclass is responsible for providing the
+ ``__contains__``, ``__dir__``, ``__iter__`` and other methods that
+ allow one to do things with an ``Enum`` class that fail on a typical
+ class, such as ``list(Color)`` or ``some_var in Color``. ``EnumMeta`` is
+ responsible for ensuring that various other methods on the final ``Enum``
+ class are correct (such as ``__new__``, ``__getnewargs__``,
+ ``__str__`` and ``__repr__``)
+
+
+ Enum Members (aka instances)
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The most interesting thing about Enum members is that they are singletons.
+ ``EnumMeta`` creates them all while it is creating the ``Enum``
+ class itself, and then puts a custom ``__new__`` in place to ensure
+ that no new ones are ever instantiated by returning only the existing
+ member instances.
+
+
+ Finer Points
+ ^^^^^^^^^^^^
+
+ Enum members are instances of an Enum class, and even though they are
+ accessible as ``EnumClass.member``, they are not accessible directly from
+ the member::
+
+ >>> Color.red
+ <Color.red: 1>
+ >>> Color.red.blue
+ Traceback (most recent call last):
+ ...
+ AttributeError: 'Color' object has no attribute 'blue'
+
+ Likewise, ``__members__`` is only available on the class.
+
+ In Python 3.x ``__members__`` is always an ``OrderedDict``, with the order being
+ the definition order. In Python 2.7 ``__members__`` is an ``OrderedDict`` if
+ ``__order__`` was specified, and a plain ``dict`` otherwise. In all other Python
+ 2.x versions ``__members__`` is a plain ``dict`` even if ``__order__`` was specified
+ as the ``OrderedDict`` type didn't exist yet.
+
+ If you give your ``Enum`` subclass extra methods, like the `Planet`_
+ class above, those methods will show up in a `dir` of the member,
+ but not of the class::
+
+ >>> dir(Planet)
+ ['EARTH', 'JUPITER', 'MARS', 'MERCURY', 'NEPTUNE', 'SATURN', 'URANUS',
+ 'VENUS', '__class__', '__doc__', '__members__', '__module__']
+ >>> dir(Planet.EARTH)
+ ['__class__', '__doc__', '__module__', 'name', 'surface_gravity', 'value']
+
+ A ``__new__`` method will only be used for the creation of the
+ ``Enum`` members -- after that it is replaced. This means if you wish to
+ change how ``Enum`` members are looked up you either have to write a
+ helper function or a ``classmethod``.
+
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Programming Language :: Python
+Classifier: Topic :: Software Development
+Classifier: Programming Language :: Python :: 2.4
+Classifier: Programming Language :: Python :: 2.5
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Provides: enum
diff --git a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/LICENSE b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/LICENSE
new file mode 100755
index 00000000..9003b885
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/LICENSE
@@ -0,0 +1,32 @@
+Copyright (c) 2013, Ethan Furman.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+ Redistributions of source code must retain the above
+ copyright notice, this list of conditions and the
+ following disclaimer.
+
+ Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials
+ provided with the distribution.
+
+ Neither the name Ethan Furman nor the names of any
+ contributors may be used to endorse or promote products
+ derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
diff --git a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/README b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/README
new file mode 100755
index 00000000..511af984
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/README
@@ -0,0 +1,2 @@
+enum34 is the new Python stdlib enum module available in Python 3.4
+backported for previous versions of Python from 2.4 to 3.3.
diff --git a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/__init__.py b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/__init__.py
new file mode 100755
index 00000000..6a327a8a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/__init__.py
@@ -0,0 +1,790 @@
+"""Python Enumerations"""
+
+import sys as _sys
+
+__all__ = ['Enum', 'IntEnum', 'unique']
+
+version = 1, 0, 4
+
+pyver = float('%s.%s' % _sys.version_info[:2])
+
+try:
+ any
+except NameError:
+ def any(iterable):
+ for element in iterable:
+ if element:
+ return True
+ return False
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ OrderedDict = None
+
+try:
+ basestring
+except NameError:
+ # In Python 2 basestring is the ancestor of both str and unicode
+ # in Python 3 it's just str, but was missing in 3.1
+ basestring = str
+
+try:
+ unicode
+except NameError:
+ # In Python 3 unicode no longer exists (it's just str)
+ unicode = str
+
+class _RouteClassAttributeToGetattr(object):
+ """Route attribute access on a class to __getattr__.
+
+ This is a descriptor, used to define attributes that act differently when
+ accessed through an instance and through a class. Instance access remains
+ normal, but access to an attribute through a class will be routed to the
+ class's __getattr__ method; this is done by raising AttributeError.
+
+ """
+ def __init__(self, fget=None):
+ self.fget = fget
+
+ def __get__(self, instance, ownerclass=None):
+ if instance is None:
+ raise AttributeError()
+ return self.fget(instance)
+
+ def __set__(self, instance, value):
+ raise AttributeError("can't set attribute")
+
+ def __delete__(self, instance):
+ raise AttributeError("can't delete attribute")
+
+
+def _is_descriptor(obj):
+ """Returns True if obj is a descriptor, False otherwise."""
+ return (
+ hasattr(obj, '__get__') or
+ hasattr(obj, '__set__') or
+ hasattr(obj, '__delete__'))
+
+
+def _is_dunder(name):
+ """Returns True if a __dunder__ name, False otherwise."""
+ return (name[:2] == name[-2:] == '__' and
+ name[2:3] != '_' and
+ name[-3:-2] != '_' and
+ len(name) > 4)
+
+
+def _is_sunder(name):
+ """Returns True if a _sunder_ name, False otherwise."""
+ return (name[0] == name[-1] == '_' and
+ name[1:2] != '_' and
+ name[-2:-1] != '_' and
+ len(name) > 2)
+
+
+def _make_class_unpicklable(cls):
+ """Make the given class un-picklable."""
+ def _break_on_call_reduce(self, protocol=None):
+ raise TypeError('%r cannot be pickled' % self)
+ cls.__reduce_ex__ = _break_on_call_reduce
+ cls.__module__ = '<unknown>'
+
+
+class _EnumDict(dict):
+ """Track enum member order and ensure member names are not reused.
+
+ EnumMeta will use the names found in self._member_names as the
+ enumeration member names.
+
+ """
+ def __init__(self):
+ super(_EnumDict, self).__init__()
+ self._member_names = []
+
+ def __setitem__(self, key, value):
+ """Changes anything not dundered or not a descriptor.
+
+ If a descriptor is added with the same name as an enum member, the name
+ is removed from _member_names (this may leave a hole in the numerical
+ sequence of values).
+
+ If an enum member name is used twice, an error is raised; duplicate
+ values are not checked for.
+
+ Single underscore (sunder) names are reserved.
+
+ Note: in 3.x __order__ is simply discarded as a not necessary piece
+ leftover from 2.x
+
+ """
+ if pyver >= 3.0 and key == '__order__':
+ return
+ if _is_sunder(key):
+ raise ValueError('_names_ are reserved for future Enum use')
+ elif _is_dunder(key):
+ pass
+ elif key in self._member_names:
+ # descriptor overwriting an enum?
+ raise TypeError('Attempted to reuse key: %r' % key)
+ elif not _is_descriptor(value):
+ if key in self:
+ # enum overwriting a descriptor?
+ raise TypeError('Key already defined as: %r' % self[key])
+ self._member_names.append(key)
+ super(_EnumDict, self).__setitem__(key, value)
+
+
+# Dummy value for Enum as EnumMeta explicity checks for it, but of course until
+# EnumMeta finishes running the first time the Enum class doesn't exist. This
+# is also why there are checks in EnumMeta like `if Enum is not None`
+Enum = None
+
+
+class EnumMeta(type):
+ """Metaclass for Enum"""
+ @classmethod
+ def __prepare__(metacls, cls, bases):
+ return _EnumDict()
+
+ def __new__(metacls, cls, bases, classdict):
+ # an Enum class is final once enumeration items have been defined; it
+ # cannot be mixed with other types (int, float, etc.) if it has an
+ # inherited __new__ unless a new __new__ is defined (or the resulting
+ # class will fail).
+ if type(classdict) is dict:
+ original_dict = classdict
+ classdict = _EnumDict()
+ for k, v in original_dict.items():
+ classdict[k] = v
+
+ member_type, first_enum = metacls._get_mixins_(bases)
+ __new__, save_new, use_args = metacls._find_new_(classdict, member_type,
+ first_enum)
+ # save enum items into separate mapping so they don't get baked into
+ # the new class
+ members = dict((k, classdict[k]) for k in classdict._member_names)
+ for name in classdict._member_names:
+ del classdict[name]
+
+ # py2 support for definition order
+ __order__ = classdict.get('__order__')
+ if __order__ is None:
+ if pyver < 3.0:
+ try:
+ __order__ = [name for (name, value) in sorted(members.items(), key=lambda item: item[1])]
+ except TypeError:
+ __order__ = [name for name in sorted(members.keys())]
+ else:
+ __order__ = classdict._member_names
+ else:
+ del classdict['__order__']
+ if pyver < 3.0:
+ __order__ = __order__.replace(',', ' ').split()
+ aliases = [name for name in members if name not in __order__]
+ __order__ += aliases
+
+ # check for illegal enum names (any others?)
+ invalid_names = set(members) & set(['mro'])
+ if invalid_names:
+ raise ValueError('Invalid enum member name(s): %s' % (
+ ', '.join(invalid_names), ))
+
+ # create our new Enum type
+ enum_class = super(EnumMeta, metacls).__new__(metacls, cls, bases, classdict)
+ enum_class._member_names_ = [] # names in random order
+ if OrderedDict is not None:
+ enum_class._member_map_ = OrderedDict()
+ else:
+ enum_class._member_map_ = {} # name->value map
+ enum_class._member_type_ = member_type
+
+ # Reverse value->name map for hashable values.
+ enum_class._value2member_map_ = {}
+
+ # instantiate them, checking for duplicates as we go
+ # we instantiate first instead of checking for duplicates first in case
+ # a custom __new__ is doing something funky with the values -- such as
+ # auto-numbering ;)
+ if __new__ is None:
+ __new__ = enum_class.__new__
+ for member_name in __order__:
+ value = members[member_name]
+ if not isinstance(value, tuple):
+ args = (value, )
+ else:
+ args = value
+ if member_type is tuple: # special case for tuple enums
+ args = (args, ) # wrap it one more time
+ if not use_args or not args:
+ enum_member = __new__(enum_class)
+ if not hasattr(enum_member, '_value_'):
+ enum_member._value_ = value
+ else:
+ enum_member = __new__(enum_class, *args)
+ if not hasattr(enum_member, '_value_'):
+ enum_member._value_ = member_type(*args)
+ value = enum_member._value_
+ enum_member._name_ = member_name
+ enum_member.__objclass__ = enum_class
+ enum_member.__init__(*args)
+ # If another member with the same value was already defined, the
+ # new member becomes an alias to the existing one.
+ for name, canonical_member in enum_class._member_map_.items():
+ if canonical_member.value == enum_member._value_:
+ enum_member = canonical_member
+ break
+ else:
+ # Aliases don't appear in member names (only in __members__).
+ enum_class._member_names_.append(member_name)
+ enum_class._member_map_[member_name] = enum_member
+ try:
+ # This may fail if value is not hashable. We can't add the value
+ # to the map, and by-value lookups for this value will be
+ # linear.
+ enum_class._value2member_map_[value] = enum_member
+ except TypeError:
+ pass
+
+
+ # If a custom type is mixed into the Enum, and it does not know how
+ # to pickle itself, pickle.dumps will succeed but pickle.loads will
+ # fail. Rather than have the error show up later and possibly far
+ # from the source, sabotage the pickle protocol for this class so
+ # that pickle.dumps also fails.
+ #
+ # However, if the new class implements its own __reduce_ex__, do not
+ # sabotage -- it's on them to make sure it works correctly. We use
+ # __reduce_ex__ instead of any of the others as it is preferred by
+ # pickle over __reduce__, and it handles all pickle protocols.
+ unpicklable = False
+ if '__reduce_ex__' not in classdict:
+ if member_type is not object:
+ methods = ('__getnewargs_ex__', '__getnewargs__',
+ '__reduce_ex__', '__reduce__')
+ if not any(m in member_type.__dict__ for m in methods):
+ _make_class_unpicklable(enum_class)
+ unpicklable = True
+
+
+ # double check that repr and friends are not the mixin's or various
+ # things break (such as pickle)
+ for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'):
+ class_method = getattr(enum_class, name)
+ obj_method = getattr(member_type, name, None)
+ enum_method = getattr(first_enum, name, None)
+ if name not in classdict and class_method is not enum_method:
+ if name == '__reduce_ex__' and unpicklable:
+ continue
+ setattr(enum_class, name, enum_method)
+
+ # method resolution and int's are not playing nice
+ # Python's less than 2.6 use __cmp__
+
+ if pyver < 2.6:
+
+ if issubclass(enum_class, int):
+ setattr(enum_class, '__cmp__', getattr(int, '__cmp__'))
+
+ elif pyver < 3.0:
+
+ if issubclass(enum_class, int):
+ for method in (
+ '__le__',
+ '__lt__',
+ '__gt__',
+ '__ge__',
+ '__eq__',
+ '__ne__',
+ '__hash__',
+ ):
+ setattr(enum_class, method, getattr(int, method))
+
+ # replace any other __new__ with our own (as long as Enum is not None,
+ # anyway) -- again, this is to support pickle
+ if Enum is not None:
+ # if the user defined their own __new__, save it before it gets
+ # clobbered in case they subclass later
+ if save_new:
+ setattr(enum_class, '__member_new__', enum_class.__dict__['__new__'])
+ setattr(enum_class, '__new__', Enum.__dict__['__new__'])
+ return enum_class
+
+ def __call__(cls, value, names=None, module=None, type=None):
+ """Either returns an existing member, or creates a new enum class.
+
+ This method is used both when an enum class is given a value to match
+ to an enumeration member (i.e. Color(3)) and for the functional API
+ (i.e. Color = Enum('Color', names='red green blue')).
+
+ When used for the functional API: `module`, if set, will be stored in
+ the new class' __module__ attribute; `type`, if set, will be mixed in
+ as the first base class.
+
+ Note: if `module` is not set this routine will attempt to discover the
+ calling module by walking the frame stack; if this is unsuccessful
+ the resulting class will not be pickleable.
+
+ """
+ if names is None: # simple value lookup
+ return cls.__new__(cls, value)
+ # otherwise, functional API: we're creating a new Enum type
+ return cls._create_(value, names, module=module, type=type)
+
+ def __contains__(cls, member):
+ return isinstance(member, cls) and member.name in cls._member_map_
+
+ def __delattr__(cls, attr):
+ # nicer error message when someone tries to delete an attribute
+ # (see issue19025).
+ if attr in cls._member_map_:
+ raise AttributeError(
+ "%s: cannot delete Enum member." % cls.__name__)
+ super(EnumMeta, cls).__delattr__(attr)
+
+ def __dir__(self):
+ return (['__class__', '__doc__', '__members__', '__module__'] +
+ self._member_names_)
+
+ @property
+ def __members__(cls):
+ """Returns a mapping of member name->value.
+
+ This mapping lists all enum members, including aliases. Note that this
+ is a copy of the internal mapping.
+
+ """
+ return cls._member_map_.copy()
+
+ def __getattr__(cls, name):
+ """Return the enum member matching `name`
+
+ We use __getattr__ instead of descriptors or inserting into the enum
+ class' __dict__ in order to support `name` and `value` being both
+ properties for enum members (which live in the class' __dict__) and
+ enum members themselves.
+
+ """
+ if _is_dunder(name):
+ raise AttributeError(name)
+ try:
+ return cls._member_map_[name]
+ except KeyError:
+ raise AttributeError(name)
+
+ def __getitem__(cls, name):
+ return cls._member_map_[name]
+
+ def __iter__(cls):
+ return (cls._member_map_[name] for name in cls._member_names_)
+
+ def __reversed__(cls):
+ return (cls._member_map_[name] for name in reversed(cls._member_names_))
+
+ def __len__(cls):
+ return len(cls._member_names_)
+
+ def __repr__(cls):
+ return "<enum %r>" % cls.__name__
+
+ def __setattr__(cls, name, value):
+ """Block attempts to reassign Enum members.
+
+ A simple assignment to the class namespace only changes one of the
+ several possible ways to get an Enum member from the Enum class,
+ resulting in an inconsistent Enumeration.
+
+ """
+ member_map = cls.__dict__.get('_member_map_', {})
+ if name in member_map:
+ raise AttributeError('Cannot reassign members.')
+ super(EnumMeta, cls).__setattr__(name, value)
+
+ def _create_(cls, class_name, names=None, module=None, type=None):
+ """Convenience method to create a new Enum class.
+
+ `names` can be:
+
+ * A string containing member names, separated either with spaces or
+ commas. Values are auto-numbered from 1.
+ * An iterable of member names. Values are auto-numbered from 1.
+ * An iterable of (member name, value) pairs.
+ * A mapping of member name -> value.
+
+ """
+ if pyver < 3.0:
+ # if class_name is unicode, attempt a conversion to ASCII
+ if isinstance(class_name, unicode):
+ try:
+ class_name = class_name.encode('ascii')
+ except UnicodeEncodeError:
+ raise TypeError('%r is not representable in ASCII' % class_name)
+ metacls = cls.__class__
+ if type is None:
+ bases = (cls, )
+ else:
+ bases = (type, cls)
+ classdict = metacls.__prepare__(class_name, bases)
+ __order__ = []
+
+ # special processing needed for names?
+ if isinstance(names, basestring):
+ names = names.replace(',', ' ').split()
+ if isinstance(names, (tuple, list)) and isinstance(names[0], basestring):
+ names = [(e, i+1) for (i, e) in enumerate(names)]
+
+ # Here, names is either an iterable of (name, value) or a mapping.
+ for item in names:
+ if isinstance(item, basestring):
+ member_name, member_value = item, names[item]
+ else:
+ member_name, member_value = item
+ classdict[member_name] = member_value
+ __order__.append(member_name)
+ # only set __order__ in classdict if name/value was not from a mapping
+ if not isinstance(item, basestring):
+ classdict['__order__'] = ' '.join(__order__)
+ enum_class = metacls.__new__(metacls, class_name, bases, classdict)
+
+ # TODO: replace the frame hack if a blessed way to know the calling
+ # module is ever developed
+ if module is None:
+ try:
+ module = _sys._getframe(2).f_globals['__name__']
+ except (AttributeError, ValueError):
+ pass
+ if module is None:
+ _make_class_unpicklable(enum_class)
+ else:
+ enum_class.__module__ = module
+
+ return enum_class
+
+ @staticmethod
+ def _get_mixins_(bases):
+ """Returns the type for creating enum members, and the first inherited
+ enum class.
+
+ bases: the tuple of bases that was given to __new__
+
+ """
+ if not bases or Enum is None:
+ return object, Enum
+
+
+ # double check that we are not subclassing a class with existing
+ # enumeration members; while we're at it, see if any other data
+ # type has been mixed in so we can use the correct __new__
+ member_type = first_enum = None
+ for base in bases:
+ if (base is not Enum and
+ issubclass(base, Enum) and
+ base._member_names_):
+ raise TypeError("Cannot extend enumerations")
+ # base is now the last base in bases
+ if not issubclass(base, Enum):
+ raise TypeError("new enumerations must be created as "
+ "`ClassName([mixin_type,] enum_type)`")
+
+ # get correct mix-in type (either mix-in type of Enum subclass, or
+ # first base if last base is Enum)
+ if not issubclass(bases[0], Enum):
+ member_type = bases[0] # first data type
+ first_enum = bases[-1] # enum type
+ else:
+ for base in bases[0].__mro__:
+ # most common: (IntEnum, int, Enum, object)
+ # possible: (<Enum 'AutoIntEnum'>, <Enum 'IntEnum'>,
+ # <class 'int'>, <Enum 'Enum'>,
+ # <class 'object'>)
+ if issubclass(base, Enum):
+ if first_enum is None:
+ first_enum = base
+ else:
+ if member_type is None:
+ member_type = base
+
+ return member_type, first_enum
+
+ if pyver < 3.0:
+ @staticmethod
+ def _find_new_(classdict, member_type, first_enum):
+ """Returns the __new__ to be used for creating the enum members.
+
+ classdict: the class dictionary given to __new__
+ member_type: the data type whose __new__ will be used by default
+ first_enum: enumeration to check for an overriding __new__
+
+ """
+ # now find the correct __new__, checking to see of one was defined
+ # by the user; also check earlier enum classes in case a __new__ was
+ # saved as __member_new__
+ __new__ = classdict.get('__new__', None)
+ if __new__:
+ return None, True, True # __new__, save_new, use_args
+
+ N__new__ = getattr(None, '__new__')
+ O__new__ = getattr(object, '__new__')
+ if Enum is None:
+ E__new__ = N__new__
+ else:
+ E__new__ = Enum.__dict__['__new__']
+ # check all possibles for __member_new__ before falling back to
+ # __new__
+ for method in ('__member_new__', '__new__'):
+ for possible in (member_type, first_enum):
+ try:
+ target = possible.__dict__[method]
+ except (AttributeError, KeyError):
+ target = getattr(possible, method, None)
+ if target not in [
+ None,
+ N__new__,
+ O__new__,
+ E__new__,
+ ]:
+ if method == '__member_new__':
+ classdict['__new__'] = target
+ return None, False, True
+ if isinstance(target, staticmethod):
+ target = target.__get__(member_type)
+ __new__ = target
+ break
+ if __new__ is not None:
+ break
+ else:
+ __new__ = object.__new__
+
+ # if a non-object.__new__ is used then whatever value/tuple was
+ # assigned to the enum member name will be passed to __new__ and to the
+ # new enum member's __init__
+ if __new__ is object.__new__:
+ use_args = False
+ else:
+ use_args = True
+
+ return __new__, False, use_args
+ else:
+ @staticmethod
+ def _find_new_(classdict, member_type, first_enum):
+ """Returns the __new__ to be used for creating the enum members.
+
+ classdict: the class dictionary given to __new__
+ member_type: the data type whose __new__ will be used by default
+ first_enum: enumeration to check for an overriding __new__
+
+ """
+ # now find the correct __new__, checking to see of one was defined
+ # by the user; also check earlier enum classes in case a __new__ was
+ # saved as __member_new__
+ __new__ = classdict.get('__new__', None)
+
+ # should __new__ be saved as __member_new__ later?
+ save_new = __new__ is not None
+
+ if __new__ is None:
+ # check all possibles for __member_new__ before falling back to
+ # __new__
+ for method in ('__member_new__', '__new__'):
+ for possible in (member_type, first_enum):
+ target = getattr(possible, method, None)
+ if target not in (
+ None,
+ None.__new__,
+ object.__new__,
+ Enum.__new__,
+ ):
+ __new__ = target
+ break
+ if __new__ is not None:
+ break
+ else:
+ __new__ = object.__new__
+
+ # if a non-object.__new__ is used then whatever value/tuple was
+ # assigned to the enum member name will be passed to __new__ and to the
+ # new enum member's __init__
+ if __new__ is object.__new__:
+ use_args = False
+ else:
+ use_args = True
+
+ return __new__, save_new, use_args
+
+
+########################################################
+# In order to support Python 2 and 3 with a single
+# codebase we have to create the Enum methods separately
+# and then use the `type(name, bases, dict)` method to
+# create the class.
+########################################################
+temp_enum_dict = {}
+temp_enum_dict['__doc__'] = "Generic enumeration.\n\n Derive from this class to define new enumerations.\n\n"
+
+def __new__(cls, value):
+ # all enum instances are actually created during class construction
+ # without calling this method; this method is called by the metaclass'
+ # __call__ (i.e. Color(3) ), and by pickle
+ if type(value) is cls:
+ # For lookups like Color(Color.red)
+ value = value.value
+ #return value
+ # by-value search for a matching enum member
+ # see if it's in the reverse mapping (for hashable values)
+ try:
+ if value in cls._value2member_map_:
+ return cls._value2member_map_[value]
+ except TypeError:
+ # not there, now do long search -- O(n) behavior
+ for member in cls._member_map_.values():
+ if member.value == value:
+ return member
+ raise ValueError("%s is not a valid %s" % (value, cls.__name__))
+temp_enum_dict['__new__'] = __new__
+del __new__
+
+def __repr__(self):
+ return "<%s.%s: %r>" % (
+ self.__class__.__name__, self._name_, self._value_)
+temp_enum_dict['__repr__'] = __repr__
+del __repr__
+
+def __str__(self):
+ return "%s.%s" % (self.__class__.__name__, self._name_)
+temp_enum_dict['__str__'] = __str__
+del __str__
+
+def __dir__(self):
+ added_behavior = [
+ m
+ for cls in self.__class__.mro()
+ for m in cls.__dict__
+ if m[0] != '_'
+ ]
+ return (['__class__', '__doc__', '__module__', ] + added_behavior)
+temp_enum_dict['__dir__'] = __dir__
+del __dir__
+
+def __format__(self, format_spec):
+ # mixed-in Enums should use the mixed-in type's __format__, otherwise
+ # we can get strange results with the Enum name showing up instead of
+ # the value
+
+ # pure Enum branch
+ if self._member_type_ is object:
+ cls = str
+ val = str(self)
+ # mix-in branch
+ else:
+ cls = self._member_type_
+ val = self.value
+ return cls.__format__(val, format_spec)
+temp_enum_dict['__format__'] = __format__
+del __format__
+
+
+####################################
+# Python's less than 2.6 use __cmp__
+
+if pyver < 2.6:
+
+ def __cmp__(self, other):
+ if type(other) is self.__class__:
+ if self is other:
+ return 0
+ return -1
+ return NotImplemented
+ raise TypeError("unorderable types: %s() and %s()" % (self.__class__.__name__, other.__class__.__name__))
+ temp_enum_dict['__cmp__'] = __cmp__
+ del __cmp__
+
+else:
+
+ def __le__(self, other):
+ raise TypeError("unorderable types: %s() <= %s()" % (self.__class__.__name__, other.__class__.__name__))
+ temp_enum_dict['__le__'] = __le__
+ del __le__
+
+ def __lt__(self, other):
+ raise TypeError("unorderable types: %s() < %s()" % (self.__class__.__name__, other.__class__.__name__))
+ temp_enum_dict['__lt__'] = __lt__
+ del __lt__
+
+ def __ge__(self, other):
+ raise TypeError("unorderable types: %s() >= %s()" % (self.__class__.__name__, other.__class__.__name__))
+ temp_enum_dict['__ge__'] = __ge__
+ del __ge__
+
+ def __gt__(self, other):
+ raise TypeError("unorderable types: %s() > %s()" % (self.__class__.__name__, other.__class__.__name__))
+ temp_enum_dict['__gt__'] = __gt__
+ del __gt__
+
+
+def __eq__(self, other):
+ if type(other) is self.__class__:
+ return self is other
+ return NotImplemented
+temp_enum_dict['__eq__'] = __eq__
+del __eq__
+
+def __ne__(self, other):
+ if type(other) is self.__class__:
+ return self is not other
+ return NotImplemented
+temp_enum_dict['__ne__'] = __ne__
+del __ne__
+
+def __hash__(self):
+ return hash(self._name_)
+temp_enum_dict['__hash__'] = __hash__
+del __hash__
+
+def __reduce_ex__(self, proto):
+ return self.__class__, (self._value_, )
+temp_enum_dict['__reduce_ex__'] = __reduce_ex__
+del __reduce_ex__
+
+# _RouteClassAttributeToGetattr is used to provide access to the `name`
+# and `value` properties of enum members while keeping some measure of
+# protection from modification, while still allowing for an enumeration
+# to have members named `name` and `value`. This works because enumeration
+# members are not set directly on the enum class -- __getattr__ is
+# used to look them up.
+
+@_RouteClassAttributeToGetattr
+def name(self):
+ return self._name_
+temp_enum_dict['name'] = name
+del name
+
+@_RouteClassAttributeToGetattr
+def value(self):
+ return self._value_
+temp_enum_dict['value'] = value
+del value
+
+Enum = EnumMeta('Enum', (object, ), temp_enum_dict)
+del temp_enum_dict
+
+# Enum has now been created
+###########################
+
+class IntEnum(int, Enum):
+ """Enum where members are also (and must be) ints"""
+
+
+def unique(enumeration):
+ """Class decorator that ensures only unique members exist in an enumeration."""
+ duplicates = []
+ for name, member in enumeration.__members__.items():
+ if name != member.name:
+ duplicates.append((name, member.name))
+ if duplicates:
+ duplicate_names = ', '.join(
+ ["%s -> %s" % (alias, name) for (alias, name) in duplicates]
+ )
+ raise ValueError('duplicate names found in %r: %s' %
+ (enumeration, duplicate_names)
+ )
+ return enumeration
diff --git a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/doc/enum.rst b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/doc/enum.rst
new file mode 100755
index 00000000..0d429bfc
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/doc/enum.rst
@@ -0,0 +1,725 @@
+``enum`` --- support for enumerations
+========================================
+
+.. :synopsis: enumerations are sets of symbolic names bound to unique, constant
+ values.
+.. :moduleauthor:: Ethan Furman <ethan@stoneleaf.us>
+.. :sectionauthor:: Barry Warsaw <barry@python.org>,
+.. :sectionauthor:: Eli Bendersky <eliben@gmail.com>,
+.. :sectionauthor:: Ethan Furman <ethan@stoneleaf.us>
+
+----------------
+
+An enumeration is a set of symbolic names (members) bound to unique, constant
+values. Within an enumeration, the members can be compared by identity, and
+the enumeration itself can be iterated over.
+
+
+Module Contents
+---------------
+
+This module defines two enumeration classes that can be used to define unique
+sets of names and values: ``Enum`` and ``IntEnum``. It also defines
+one decorator, ``unique``.
+
+``Enum``
+
+Base class for creating enumerated constants. See section `Functional API`_
+for an alternate construction syntax.
+
+``IntEnum``
+
+Base class for creating enumerated constants that are also subclasses of ``int``.
+
+``unique``
+
+Enum class decorator that ensures only one name is bound to any one value.
+
+
+Creating an Enum
+----------------
+
+Enumerations are created using the ``class`` syntax, which makes them
+easy to read and write. An alternative creation method is described in
+`Functional API`_. To define an enumeration, subclass ``Enum`` as
+follows::
+
+ >>> from enum import Enum
+ >>> class Color(Enum):
+ ... red = 1
+ ... green = 2
+ ... blue = 3
+
+Note: Nomenclature
+
+ - The class ``Color`` is an *enumeration* (or *enum*)
+ - The attributes ``Color.red``, ``Color.green``, etc., are
+ *enumeration members* (or *enum members*).
+ - The enum members have *names* and *values* (the name of
+ ``Color.red`` is ``red``, the value of ``Color.blue`` is
+ ``3``, etc.)
+
+Note:
+
+ Even though we use the ``class`` syntax to create Enums, Enums
+ are not normal Python classes. See `How are Enums different?`_ for
+ more details.
+
+Enumeration members have human readable string representations::
+
+ >>> print(Color.red)
+ Color.red
+
+...while their ``repr`` has more information::
+
+ >>> print(repr(Color.red))
+ <Color.red: 1>
+
+The *type* of an enumeration member is the enumeration it belongs to::
+
+ >>> type(Color.red)
+ <enum 'Color'>
+ >>> isinstance(Color.green, Color)
+ True
+ >>>
+
+Enum members also have a property that contains just their item name::
+
+ >>> print(Color.red.name)
+ red
+
+Enumerations support iteration. In Python 3.x definition order is used; in
+Python 2.x the definition order is not available, but class attribute
+``__order__`` is supported; otherwise, value order is used::
+
+ >>> class Shake(Enum):
+ ... __order__ = 'vanilla chocolate cookies mint' # only needed in 2.x
+ ... vanilla = 7
+ ... chocolate = 4
+ ... cookies = 9
+ ... mint = 3
+ ...
+ >>> for shake in Shake:
+ ... print(shake)
+ ...
+ Shake.vanilla
+ Shake.chocolate
+ Shake.cookies
+ Shake.mint
+
+The ``__order__`` attribute is always removed, and in 3.x it is also ignored
+(order is definition order); however, in the stdlib version it will be ignored
+but not removed.
+
+Enumeration members are hashable, so they can be used in dictionaries and sets::
+
+ >>> apples = {}
+ >>> apples[Color.red] = 'red delicious'
+ >>> apples[Color.green] = 'granny smith'
+ >>> apples == {Color.red: 'red delicious', Color.green: 'granny smith'}
+ True
+
+
+Programmatic access to enumeration members and their attributes
+---------------------------------------------------------------
+
+Sometimes it's useful to access members in enumerations programmatically (i.e.
+situations where ``Color.red`` won't do because the exact color is not known
+at program-writing time). ``Enum`` allows such access::
+
+ >>> Color(1)
+ <Color.red: 1>
+ >>> Color(3)
+ <Color.blue: 3>
+
+If you want to access enum members by *name*, use item access::
+
+ >>> Color['red']
+ <Color.red: 1>
+ >>> Color['green']
+ <Color.green: 2>
+
+If have an enum member and need its ``name`` or ``value``::
+
+ >>> member = Color.red
+ >>> member.name
+ 'red'
+ >>> member.value
+ 1
+
+
+Duplicating enum members and values
+-----------------------------------
+
+Having two enum members (or any other attribute) with the same name is invalid;
+in Python 3.x this would raise an error, but in Python 2.x the second member
+simply overwrites the first::
+
+ >>> # python 2.x
+ >>> class Shape(Enum):
+ ... square = 2
+ ... square = 3
+ ...
+ >>> Shape.square
+ <Shape.square: 3>
+
+ >>> # python 3.x
+ >>> class Shape(Enum):
+ ... square = 2
+ ... square = 3
+ Traceback (most recent call last):
+ ...
+ TypeError: Attempted to reuse key: 'square'
+
+However, two enum members are allowed to have the same value. Given two members
+A and B with the same value (and A defined first), B is an alias to A. By-value
+lookup of the value of A and B will return A. By-name lookup of B will also
+return A::
+
+ >>> class Shape(Enum):
+ ... __order__ = 'square diamond circle alias_for_square' # only needed in 2.x
+ ... square = 2
+ ... diamond = 1
+ ... circle = 3
+ ... alias_for_square = 2
+ ...
+ >>> Shape.square
+ <Shape.square: 2>
+ >>> Shape.alias_for_square
+ <Shape.square: 2>
+ >>> Shape(2)
+ <Shape.square: 2>
+
+
+Allowing aliases is not always desirable. ``unique`` can be used to ensure
+that none exist in a particular enumeration::
+
+ >>> from enum import unique
+ >>> @unique
+ ... class Mistake(Enum):
+ ... __order__ = 'one two three four' # only needed in 2.x
+ ... one = 1
+ ... two = 2
+ ... three = 3
+ ... four = 3
+ Traceback (most recent call last):
+ ...
+ ValueError: duplicate names found in <enum 'Mistake'>: four -> three
+
+Iterating over the members of an enum does not provide the aliases::
+
+ >>> list(Shape)
+ [<Shape.square: 2>, <Shape.diamond: 1>, <Shape.circle: 3>]
+
+The special attribute ``__members__`` is a dictionary mapping names to members.
+It includes all names defined in the enumeration, including the aliases::
+
+ >>> for name, member in sorted(Shape.__members__.items()):
+ ... name, member
+ ...
+ ('alias_for_square', <Shape.square: 2>)
+ ('circle', <Shape.circle: 3>)
+ ('diamond', <Shape.diamond: 1>)
+ ('square', <Shape.square: 2>)
+
+The ``__members__`` attribute can be used for detailed programmatic access to
+the enumeration members. For example, finding all the aliases::
+
+ >>> [name for name, member in Shape.__members__.items() if member.name != name]
+ ['alias_for_square']
+
+Comparisons
+-----------
+
+Enumeration members are compared by identity::
+
+ >>> Color.red is Color.red
+ True
+ >>> Color.red is Color.blue
+ False
+ >>> Color.red is not Color.blue
+ True
+
+Ordered comparisons between enumeration values are *not* supported. Enum
+members are not integers (but see `IntEnum`_ below)::
+
+ >>> Color.red < Color.blue
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ TypeError: unorderable types: Color() < Color()
+
+.. warning::
+
+ In Python 2 *everything* is ordered, even though the ordering may not
+ make sense. If you want your enumerations to have a sensible ordering
+ check out the `OrderedEnum`_ recipe below.
+
+
+Equality comparisons are defined though::
+
+ >>> Color.blue == Color.red
+ False
+ >>> Color.blue != Color.red
+ True
+ >>> Color.blue == Color.blue
+ True
+
+Comparisons against non-enumeration values will always compare not equal
+(again, ``IntEnum`` was explicitly designed to behave differently, see
+below)::
+
+ >>> Color.blue == 2
+ False
+
+
+Allowed members and attributes of enumerations
+----------------------------------------------
+
+The examples above use integers for enumeration values. Using integers is
+short and handy (and provided by default by the `Functional API`_), but not
+strictly enforced. In the vast majority of use-cases, one doesn't care what
+the actual value of an enumeration is. But if the value *is* important,
+enumerations can have arbitrary values.
+
+Enumerations are Python classes, and can have methods and special methods as
+usual. If we have this enumeration::
+
+ >>> class Mood(Enum):
+ ... funky = 1
+ ... happy = 3
+ ...
+ ... def describe(self):
+ ... # self is the member here
+ ... return self.name, self.value
+ ...
+ ... def __str__(self):
+ ... return 'my custom str! {0}'.format(self.value)
+ ...
+ ... @classmethod
+ ... def favorite_mood(cls):
+ ... # cls here is the enumeration
+ ... return cls.happy
+
+Then::
+
+ >>> Mood.favorite_mood()
+ <Mood.happy: 3>
+ >>> Mood.happy.describe()
+ ('happy', 3)
+ >>> str(Mood.funky)
+ 'my custom str! 1'
+
+The rules for what is allowed are as follows: _sunder_ names (starting and
+ending with a single underscore) are reserved by enum and cannot be used;
+all other attributes defined within an enumeration will become members of this
+enumeration, with the exception of *__dunder__* names and descriptors (methods
+are also descriptors).
+
+Note:
+
+ If your enumeration defines ``__new__`` and/or ``__init__`` then
+ whatever value(s) were given to the enum member will be passed into
+ those methods. See `Planet`_ for an example.
+
+
+Restricted subclassing of enumerations
+--------------------------------------
+
+Subclassing an enumeration is allowed only if the enumeration does not define
+any members. So this is forbidden::
+
+ >>> class MoreColor(Color):
+ ... pink = 17
+ Traceback (most recent call last):
+ ...
+ TypeError: Cannot extend enumerations
+
+But this is allowed::
+
+ >>> class Foo(Enum):
+ ... def some_behavior(self):
+ ... pass
+ ...
+ >>> class Bar(Foo):
+ ... happy = 1
+ ... sad = 2
+ ...
+
+Allowing subclassing of enums that define members would lead to a violation of
+some important invariants of types and instances. On the other hand, it makes
+sense to allow sharing some common behavior between a group of enumerations.
+(See `OrderedEnum`_ for an example.)
+
+
+Pickling
+--------
+
+Enumerations can be pickled and unpickled::
+
+ >>> from enum.test_enum import Fruit
+ >>> from pickle import dumps, loads
+ >>> Fruit.tomato is loads(dumps(Fruit.tomato, 2))
+ True
+
+The usual restrictions for pickling apply: picklable enums must be defined in
+the top level of a module, since unpickling requires them to be importable
+from that module.
+
+Note:
+
+ With pickle protocol version 4 (introduced in Python 3.4) it is possible
+ to easily pickle enums nested in other classes.
+
+
+
+Functional API
+--------------
+
+The ``Enum`` class is callable, providing the following functional API::
+
+ >>> Animal = Enum('Animal', 'ant bee cat dog')
+ >>> Animal
+ <enum 'Animal'>
+ >>> Animal.ant
+ <Animal.ant: 1>
+ >>> Animal.ant.value
+ 1
+ >>> list(Animal)
+ [<Animal.ant: 1>, <Animal.bee: 2>, <Animal.cat: 3>, <Animal.dog: 4>]
+
+The semantics of this API resemble ``namedtuple``. The first argument
+of the call to ``Enum`` is the name of the enumeration.
+
+The second argument is the *source* of enumeration member names. It can be a
+whitespace-separated string of names, a sequence of names, a sequence of
+2-tuples with key/value pairs, or a mapping (e.g. dictionary) of names to
+values. The last two options enable assigning arbitrary values to
+enumerations; the others auto-assign increasing integers starting with 1. A
+new class derived from ``Enum`` is returned. In other words, the above
+assignment to ``Animal`` is equivalent to::
+
+ >>> class Animals(Enum):
+ ... ant = 1
+ ... bee = 2
+ ... cat = 3
+ ... dog = 4
+
+Pickling enums created with the functional API can be tricky as frame stack
+implementation details are used to try and figure out which module the
+enumeration is being created in (e.g. it will fail if you use a utility
+function in separate module, and also may not work on IronPython or Jython).
+The solution is to specify the module name explicitly as follows::
+
+ >>> Animals = Enum('Animals', 'ant bee cat dog', module=__name__)
+
+Derived Enumerations
+--------------------
+
+IntEnum
+^^^^^^^
+
+A variation of ``Enum`` is provided which is also a subclass of
+``int``. Members of an ``IntEnum`` can be compared to integers;
+by extension, integer enumerations of different types can also be compared
+to each other::
+
+ >>> from enum import IntEnum
+ >>> class Shape(IntEnum):
+ ... circle = 1
+ ... square = 2
+ ...
+ >>> class Request(IntEnum):
+ ... post = 1
+ ... get = 2
+ ...
+ >>> Shape == 1
+ False
+ >>> Shape.circle == 1
+ True
+ >>> Shape.circle == Request.post
+ True
+
+However, they still can't be compared to standard ``Enum`` enumerations::
+
+ >>> class Shape(IntEnum):
+ ... circle = 1
+ ... square = 2
+ ...
+ >>> class Color(Enum):
+ ... red = 1
+ ... green = 2
+ ...
+ >>> Shape.circle == Color.red
+ False
+
+``IntEnum`` values behave like integers in other ways you'd expect::
+
+ >>> int(Shape.circle)
+ 1
+ >>> ['a', 'b', 'c'][Shape.circle]
+ 'b'
+ >>> [i for i in range(Shape.square)]
+ [0, 1]
+
+For the vast majority of code, ``Enum`` is strongly recommended,
+since ``IntEnum`` breaks some semantic promises of an enumeration (by
+being comparable to integers, and thus by transitivity to other
+unrelated enumerations). It should be used only in special cases where
+there's no other choice; for example, when integer constants are
+replaced with enumerations and backwards compatibility is required with code
+that still expects integers.
+
+
+Others
+^^^^^^
+
+While ``IntEnum`` is part of the ``enum`` module, it would be very
+simple to implement independently::
+
+ class IntEnum(int, Enum):
+ pass
+
+This demonstrates how similar derived enumerations can be defined; for example
+a ``StrEnum`` that mixes in ``str`` instead of ``int``.
+
+Some rules:
+
+1. When subclassing ``Enum``, mix-in types must appear before
+ ``Enum`` itself in the sequence of bases, as in the ``IntEnum``
+ example above.
+2. While ``Enum`` can have members of any type, once you mix in an
+ additional type, all the members must have values of that type, e.g.
+ ``int`` above. This restriction does not apply to mix-ins which only
+ add methods and don't specify another data type such as ``int`` or
+ ``str``.
+3. When another data type is mixed in, the ``value`` attribute is *not the
+ same* as the enum member itself, although it is equivalant and will compare
+ equal.
+4. %-style formatting: ``%s`` and ``%r`` call ``Enum``'s ``__str__`` and
+ ``__repr__`` respectively; other codes (such as ``%i`` or ``%h`` for
+ IntEnum) treat the enum member as its mixed-in type.
+
+ Note: Prior to Python 3.4 there is a bug in ``str``'s %-formatting: ``int``
+ subclasses are printed as strings and not numbers when the ``%d``, ``%i``,
+ or ``%u`` codes are used.
+5. ``str.__format__`` (or ``format``) will use the mixed-in
+ type's ``__format__``. If the ``Enum``'s ``str`` or
+ ``repr`` is desired use the ``!s`` or ``!r`` ``str`` format codes.
+
+
+Decorators
+----------
+
+unique
+^^^^^^
+
+A ``class`` decorator specifically for enumerations. It searches an
+enumeration's ``__members__`` gathering any aliases it finds; if any are
+found ``ValueError`` is raised with the details::
+
+ >>> @unique
+ ... class NoDupes(Enum):
+ ... first = 'one'
+ ... second = 'two'
+ ... third = 'two'
+ Traceback (most recent call last):
+ ...
+ ValueError: duplicate names found in <enum 'NoDupes'>: third -> second
+
+
+Interesting examples
+--------------------
+
+While ``Enum`` and ``IntEnum`` are expected to cover the majority of
+use-cases, they cannot cover them all. Here are recipes for some different
+types of enumerations that can be used directly, or as examples for creating
+one's own.
+
+
+AutoNumber
+^^^^^^^^^^
+
+Avoids having to specify the value for each enumeration member::
+
+ >>> class AutoNumber(Enum):
+ ... def __new__(cls):
+ ... value = len(cls.__members__) + 1
+ ... obj = object.__new__(cls)
+ ... obj._value_ = value
+ ... return obj
+ ...
+ >>> class Color(AutoNumber):
+ ... __order__ = "red green blue" # only needed in 2.x
+ ... red = ()
+ ... green = ()
+ ... blue = ()
+ ...
+ >>> Color.green.value == 2
+ True
+
+Note:
+
+ The `__new__` method, if defined, is used during creation of the Enum
+ members; it is then replaced by Enum's `__new__` which is used after
+ class creation for lookup of existing members. Due to the way Enums are
+ supposed to behave, there is no way to customize Enum's `__new__`.
+
+
+UniqueEnum
+^^^^^^^^^^
+
+Raises an error if a duplicate member name is found instead of creating an
+alias::
+
+ >>> class UniqueEnum(Enum):
+ ... def __init__(self, *args):
+ ... cls = self.__class__
+ ... if any(self.value == e.value for e in cls):
+ ... a = self.name
+ ... e = cls(self.value).name
+ ... raise ValueError(
+ ... "aliases not allowed in UniqueEnum: %r --> %r"
+ ... % (a, e))
+ ...
+ >>> class Color(UniqueEnum):
+ ... red = 1
+ ... green = 2
+ ... blue = 3
+ ... grene = 2
+ Traceback (most recent call last):
+ ...
+ ValueError: aliases not allowed in UniqueEnum: 'grene' --> 'green'
+
+
+OrderedEnum
+^^^^^^^^^^^
+
+An ordered enumeration that is not based on ``IntEnum`` and so maintains
+the normal ``Enum`` invariants (such as not being comparable to other
+enumerations)::
+
+ >>> class OrderedEnum(Enum):
+ ... def __ge__(self, other):
+ ... if self.__class__ is other.__class__:
+ ... return self._value_ >= other._value_
+ ... return NotImplemented
+ ... def __gt__(self, other):
+ ... if self.__class__ is other.__class__:
+ ... return self._value_ > other._value_
+ ... return NotImplemented
+ ... def __le__(self, other):
+ ... if self.__class__ is other.__class__:
+ ... return self._value_ <= other._value_
+ ... return NotImplemented
+ ... def __lt__(self, other):
+ ... if self.__class__ is other.__class__:
+ ... return self._value_ < other._value_
+ ... return NotImplemented
+ ...
+ >>> class Grade(OrderedEnum):
+ ... __ordered__ = 'A B C D F'
+ ... A = 5
+ ... B = 4
+ ... C = 3
+ ... D = 2
+ ... F = 1
+ ...
+ >>> Grade.C < Grade.A
+ True
+
+
+Planet
+^^^^^^
+
+If ``__new__`` or ``__init__`` is defined the value of the enum member
+will be passed to those methods::
+
+ >>> class Planet(Enum):
+ ... MERCURY = (3.303e+23, 2.4397e6)
+ ... VENUS = (4.869e+24, 6.0518e6)
+ ... EARTH = (5.976e+24, 6.37814e6)
+ ... MARS = (6.421e+23, 3.3972e6)
+ ... JUPITER = (1.9e+27, 7.1492e7)
+ ... SATURN = (5.688e+26, 6.0268e7)
+ ... URANUS = (8.686e+25, 2.5559e7)
+ ... NEPTUNE = (1.024e+26, 2.4746e7)
+ ... def __init__(self, mass, radius):
+ ... self.mass = mass # in kilograms
+ ... self.radius = radius # in meters
+ ... @property
+ ... def surface_gravity(self):
+ ... # universal gravitational constant (m3 kg-1 s-2)
+ ... G = 6.67300E-11
+ ... return G * self.mass / (self.radius * self.radius)
+ ...
+ >>> Planet.EARTH.value
+ (5.976e+24, 6378140.0)
+ >>> Planet.EARTH.surface_gravity
+ 9.802652743337129
+
+
+How are Enums different?
+------------------------
+
+Enums have a custom metaclass that affects many aspects of both derived Enum
+classes and their instances (members).
+
+
+Enum Classes
+^^^^^^^^^^^^
+
+The ``EnumMeta`` metaclass is responsible for providing the
+``__contains__``, ``__dir__``, ``__iter__`` and other methods that
+allow one to do things with an ``Enum`` class that fail on a typical
+class, such as ``list(Color)`` or ``some_var in Color``. ``EnumMeta`` is
+responsible for ensuring that various other methods on the final ``Enum``
+class are correct (such as ``__new__``, ``__getnewargs__``,
+``__str__`` and ``__repr__``)
+
+
+Enum Members (aka instances)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The most interesting thing about Enum members is that they are singletons.
+``EnumMeta`` creates them all while it is creating the ``Enum``
+class itself, and then puts a custom ``__new__`` in place to ensure
+that no new ones are ever instantiated by returning only the existing
+member instances.
+
+
+Finer Points
+^^^^^^^^^^^^
+
+Enum members are instances of an Enum class, and even though they are
+accessible as ``EnumClass.member``, they are not accessible directly from
+the member::
+
+ >>> Color.red
+ <Color.red: 1>
+ >>> Color.red.blue
+ Traceback (most recent call last):
+ ...
+ AttributeError: 'Color' object has no attribute 'blue'
+
+Likewise, ``__members__`` is only available on the class.
+
+In Python 3.x ``__members__`` is always an ``OrderedDict``, with the order being
+the definition order. In Python 2.7 ``__members__`` is an ``OrderedDict`` if
+``__order__`` was specified, and a plain ``dict`` otherwise. In all other Python
+2.x versions ``__members__`` is a plain ``dict`` even if ``__order__`` was specified
+as the ``OrderedDict`` type didn't exist yet.
+
+If you give your ``Enum`` subclass extra methods, like the `Planet`_
+class above, those methods will show up in a `dir` of the member,
+but not of the class::
+
+ >>> dir(Planet)
+ ['EARTH', 'JUPITER', 'MARS', 'MERCURY', 'NEPTUNE', 'SATURN', 'URANUS',
+ 'VENUS', '__class__', '__doc__', '__members__', '__module__']
+ >>> dir(Planet.EARTH)
+ ['__class__', '__doc__', '__module__', 'name', 'surface_gravity', 'value']
+
+A ``__new__`` method will only be used for the creation of the
+``Enum`` members -- after that it is replaced. This means if you wish to
+change how ``Enum`` members are looked up you either have to write a
+helper function or a ``classmethod``.
diff --git a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/enum.py b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/enum.py
new file mode 100755
index 00000000..6a327a8a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/enum.py
@@ -0,0 +1,790 @@
+"""Python Enumerations"""
+
+import sys as _sys
+
+__all__ = ['Enum', 'IntEnum', 'unique']
+
+version = 1, 0, 4
+
+pyver = float('%s.%s' % _sys.version_info[:2])
+
+try:
+ any
+except NameError:
+ def any(iterable):
+ for element in iterable:
+ if element:
+ return True
+ return False
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ OrderedDict = None
+
+try:
+ basestring
+except NameError:
+ # In Python 2 basestring is the ancestor of both str and unicode
+ # in Python 3 it's just str, but was missing in 3.1
+ basestring = str
+
+try:
+ unicode
+except NameError:
+ # In Python 3 unicode no longer exists (it's just str)
+ unicode = str
+
+class _RouteClassAttributeToGetattr(object):
+ """Route attribute access on a class to __getattr__.
+
+ This is a descriptor, used to define attributes that act differently when
+ accessed through an instance and through a class. Instance access remains
+ normal, but access to an attribute through a class will be routed to the
+ class's __getattr__ method; this is done by raising AttributeError.
+
+ """
+ def __init__(self, fget=None):
+ self.fget = fget
+
+ def __get__(self, instance, ownerclass=None):
+ if instance is None:
+ raise AttributeError()
+ return self.fget(instance)
+
+ def __set__(self, instance, value):
+ raise AttributeError("can't set attribute")
+
+ def __delete__(self, instance):
+ raise AttributeError("can't delete attribute")
+
+
+def _is_descriptor(obj):
+ """Returns True if obj is a descriptor, False otherwise."""
+ return (
+ hasattr(obj, '__get__') or
+ hasattr(obj, '__set__') or
+ hasattr(obj, '__delete__'))
+
+
+def _is_dunder(name):
+ """Returns True if a __dunder__ name, False otherwise."""
+ return (name[:2] == name[-2:] == '__' and
+ name[2:3] != '_' and
+ name[-3:-2] != '_' and
+ len(name) > 4)
+
+
+def _is_sunder(name):
+ """Returns True if a _sunder_ name, False otherwise."""
+ return (name[0] == name[-1] == '_' and
+ name[1:2] != '_' and
+ name[-2:-1] != '_' and
+ len(name) > 2)
+
+
+def _make_class_unpicklable(cls):
+ """Make the given class un-picklable."""
+ def _break_on_call_reduce(self, protocol=None):
+ raise TypeError('%r cannot be pickled' % self)
+ cls.__reduce_ex__ = _break_on_call_reduce
+ cls.__module__ = '<unknown>'
+
+
+class _EnumDict(dict):
+ """Track enum member order and ensure member names are not reused.
+
+ EnumMeta will use the names found in self._member_names as the
+ enumeration member names.
+
+ """
+ def __init__(self):
+ super(_EnumDict, self).__init__()
+ self._member_names = []
+
+ def __setitem__(self, key, value):
+ """Changes anything not dundered or not a descriptor.
+
+ If a descriptor is added with the same name as an enum member, the name
+ is removed from _member_names (this may leave a hole in the numerical
+ sequence of values).
+
+ If an enum member name is used twice, an error is raised; duplicate
+ values are not checked for.
+
+ Single underscore (sunder) names are reserved.
+
+ Note: in 3.x __order__ is simply discarded as a not necessary piece
+ leftover from 2.x
+
+ """
+ if pyver >= 3.0 and key == '__order__':
+ return
+ if _is_sunder(key):
+ raise ValueError('_names_ are reserved for future Enum use')
+ elif _is_dunder(key):
+ pass
+ elif key in self._member_names:
+ # descriptor overwriting an enum?
+ raise TypeError('Attempted to reuse key: %r' % key)
+ elif not _is_descriptor(value):
+ if key in self:
+ # enum overwriting a descriptor?
+ raise TypeError('Key already defined as: %r' % self[key])
+ self._member_names.append(key)
+ super(_EnumDict, self).__setitem__(key, value)
+
+
+# Dummy value for Enum as EnumMeta explicity checks for it, but of course until
+# EnumMeta finishes running the first time the Enum class doesn't exist. This
+# is also why there are checks in EnumMeta like `if Enum is not None`
+Enum = None
+
+
+class EnumMeta(type):
+ """Metaclass for Enum"""
+ @classmethod
+ def __prepare__(metacls, cls, bases):
+ return _EnumDict()
+
+ def __new__(metacls, cls, bases, classdict):
+ # an Enum class is final once enumeration items have been defined; it
+ # cannot be mixed with other types (int, float, etc.) if it has an
+ # inherited __new__ unless a new __new__ is defined (or the resulting
+ # class will fail).
+ if type(classdict) is dict:
+ original_dict = classdict
+ classdict = _EnumDict()
+ for k, v in original_dict.items():
+ classdict[k] = v
+
+ member_type, first_enum = metacls._get_mixins_(bases)
+ __new__, save_new, use_args = metacls._find_new_(classdict, member_type,
+ first_enum)
+ # save enum items into separate mapping so they don't get baked into
+ # the new class
+ members = dict((k, classdict[k]) for k in classdict._member_names)
+ for name in classdict._member_names:
+ del classdict[name]
+
+ # py2 support for definition order
+ __order__ = classdict.get('__order__')
+ if __order__ is None:
+ if pyver < 3.0:
+ try:
+ __order__ = [name for (name, value) in sorted(members.items(), key=lambda item: item[1])]
+ except TypeError:
+ __order__ = [name for name in sorted(members.keys())]
+ else:
+ __order__ = classdict._member_names
+ else:
+ del classdict['__order__']
+ if pyver < 3.0:
+ __order__ = __order__.replace(',', ' ').split()
+ aliases = [name for name in members if name not in __order__]
+ __order__ += aliases
+
+ # check for illegal enum names (any others?)
+ invalid_names = set(members) & set(['mro'])
+ if invalid_names:
+ raise ValueError('Invalid enum member name(s): %s' % (
+ ', '.join(invalid_names), ))
+
+ # create our new Enum type
+ enum_class = super(EnumMeta, metacls).__new__(metacls, cls, bases, classdict)
+ enum_class._member_names_ = [] # names in random order
+ if OrderedDict is not None:
+ enum_class._member_map_ = OrderedDict()
+ else:
+ enum_class._member_map_ = {} # name->value map
+ enum_class._member_type_ = member_type
+
+ # Reverse value->name map for hashable values.
+ enum_class._value2member_map_ = {}
+
+ # instantiate them, checking for duplicates as we go
+ # we instantiate first instead of checking for duplicates first in case
+ # a custom __new__ is doing something funky with the values -- such as
+ # auto-numbering ;)
+ if __new__ is None:
+ __new__ = enum_class.__new__
+ for member_name in __order__:
+ value = members[member_name]
+ if not isinstance(value, tuple):
+ args = (value, )
+ else:
+ args = value
+ if member_type is tuple: # special case for tuple enums
+ args = (args, ) # wrap it one more time
+ if not use_args or not args:
+ enum_member = __new__(enum_class)
+ if not hasattr(enum_member, '_value_'):
+ enum_member._value_ = value
+ else:
+ enum_member = __new__(enum_class, *args)
+ if not hasattr(enum_member, '_value_'):
+ enum_member._value_ = member_type(*args)
+ value = enum_member._value_
+ enum_member._name_ = member_name
+ enum_member.__objclass__ = enum_class
+ enum_member.__init__(*args)
+ # If another member with the same value was already defined, the
+ # new member becomes an alias to the existing one.
+ for name, canonical_member in enum_class._member_map_.items():
+ if canonical_member.value == enum_member._value_:
+ enum_member = canonical_member
+ break
+ else:
+ # Aliases don't appear in member names (only in __members__).
+ enum_class._member_names_.append(member_name)
+ enum_class._member_map_[member_name] = enum_member
+ try:
+ # This may fail if value is not hashable. We can't add the value
+ # to the map, and by-value lookups for this value will be
+ # linear.
+ enum_class._value2member_map_[value] = enum_member
+ except TypeError:
+ pass
+
+
+ # If a custom type is mixed into the Enum, and it does not know how
+ # to pickle itself, pickle.dumps will succeed but pickle.loads will
+ # fail. Rather than have the error show up later and possibly far
+ # from the source, sabotage the pickle protocol for this class so
+ # that pickle.dumps also fails.
+ #
+ # However, if the new class implements its own __reduce_ex__, do not
+ # sabotage -- it's on them to make sure it works correctly. We use
+ # __reduce_ex__ instead of any of the others as it is preferred by
+ # pickle over __reduce__, and it handles all pickle protocols.
+ unpicklable = False
+ if '__reduce_ex__' not in classdict:
+ if member_type is not object:
+ methods = ('__getnewargs_ex__', '__getnewargs__',
+ '__reduce_ex__', '__reduce__')
+ if not any(m in member_type.__dict__ for m in methods):
+ _make_class_unpicklable(enum_class)
+ unpicklable = True
+
+
+ # double check that repr and friends are not the mixin's or various
+ # things break (such as pickle)
+ for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'):
+ class_method = getattr(enum_class, name)
+ obj_method = getattr(member_type, name, None)
+ enum_method = getattr(first_enum, name, None)
+ if name not in classdict and class_method is not enum_method:
+ if name == '__reduce_ex__' and unpicklable:
+ continue
+ setattr(enum_class, name, enum_method)
+
+ # method resolution and int's are not playing nice
+ # Python's less than 2.6 use __cmp__
+
+ if pyver < 2.6:
+
+ if issubclass(enum_class, int):
+ setattr(enum_class, '__cmp__', getattr(int, '__cmp__'))
+
+ elif pyver < 3.0:
+
+ if issubclass(enum_class, int):
+ for method in (
+ '__le__',
+ '__lt__',
+ '__gt__',
+ '__ge__',
+ '__eq__',
+ '__ne__',
+ '__hash__',
+ ):
+ setattr(enum_class, method, getattr(int, method))
+
+ # replace any other __new__ with our own (as long as Enum is not None,
+ # anyway) -- again, this is to support pickle
+ if Enum is not None:
+ # if the user defined their own __new__, save it before it gets
+ # clobbered in case they subclass later
+ if save_new:
+ setattr(enum_class, '__member_new__', enum_class.__dict__['__new__'])
+ setattr(enum_class, '__new__', Enum.__dict__['__new__'])
+ return enum_class
+
+ def __call__(cls, value, names=None, module=None, type=None):
+ """Either returns an existing member, or creates a new enum class.
+
+ This method is used both when an enum class is given a value to match
+ to an enumeration member (i.e. Color(3)) and for the functional API
+ (i.e. Color = Enum('Color', names='red green blue')).
+
+ When used for the functional API: `module`, if set, will be stored in
+ the new class' __module__ attribute; `type`, if set, will be mixed in
+ as the first base class.
+
+ Note: if `module` is not set this routine will attempt to discover the
+ calling module by walking the frame stack; if this is unsuccessful
+ the resulting class will not be pickleable.
+
+ """
+ if names is None: # simple value lookup
+ return cls.__new__(cls, value)
+ # otherwise, functional API: we're creating a new Enum type
+ return cls._create_(value, names, module=module, type=type)
+
+ def __contains__(cls, member):
+ return isinstance(member, cls) and member.name in cls._member_map_
+
+ def __delattr__(cls, attr):
+ # nicer error message when someone tries to delete an attribute
+ # (see issue19025).
+ if attr in cls._member_map_:
+ raise AttributeError(
+ "%s: cannot delete Enum member." % cls.__name__)
+ super(EnumMeta, cls).__delattr__(attr)
+
+ def __dir__(self):
+ return (['__class__', '__doc__', '__members__', '__module__'] +
+ self._member_names_)
+
+ @property
+ def __members__(cls):
+ """Returns a mapping of member name->value.
+
+ This mapping lists all enum members, including aliases. Note that this
+ is a copy of the internal mapping.
+
+ """
+ return cls._member_map_.copy()
+
+ def __getattr__(cls, name):
+ """Return the enum member matching `name`
+
+ We use __getattr__ instead of descriptors or inserting into the enum
+ class' __dict__ in order to support `name` and `value` being both
+ properties for enum members (which live in the class' __dict__) and
+ enum members themselves.
+
+ """
+ if _is_dunder(name):
+ raise AttributeError(name)
+ try:
+ return cls._member_map_[name]
+ except KeyError:
+ raise AttributeError(name)
+
+ def __getitem__(cls, name):
+ return cls._member_map_[name]
+
+ def __iter__(cls):
+ return (cls._member_map_[name] for name in cls._member_names_)
+
+ def __reversed__(cls):
+ return (cls._member_map_[name] for name in reversed(cls._member_names_))
+
+ def __len__(cls):
+ return len(cls._member_names_)
+
+ def __repr__(cls):
+ return "<enum %r>" % cls.__name__
+
+ def __setattr__(cls, name, value):
+ """Block attempts to reassign Enum members.
+
+ A simple assignment to the class namespace only changes one of the
+ several possible ways to get an Enum member from the Enum class,
+ resulting in an inconsistent Enumeration.
+
+ """
+ member_map = cls.__dict__.get('_member_map_', {})
+ if name in member_map:
+ raise AttributeError('Cannot reassign members.')
+ super(EnumMeta, cls).__setattr__(name, value)
+
+ def _create_(cls, class_name, names=None, module=None, type=None):
+ """Convenience method to create a new Enum class.
+
+ `names` can be:
+
+ * A string containing member names, separated either with spaces or
+ commas. Values are auto-numbered from 1.
+ * An iterable of member names. Values are auto-numbered from 1.
+ * An iterable of (member name, value) pairs.
+ * A mapping of member name -> value.
+
+ """
+ if pyver < 3.0:
+ # if class_name is unicode, attempt a conversion to ASCII
+ if isinstance(class_name, unicode):
+ try:
+ class_name = class_name.encode('ascii')
+ except UnicodeEncodeError:
+ raise TypeError('%r is not representable in ASCII' % class_name)
+ metacls = cls.__class__
+ if type is None:
+ bases = (cls, )
+ else:
+ bases = (type, cls)
+ classdict = metacls.__prepare__(class_name, bases)
+ __order__ = []
+
+ # special processing needed for names?
+ if isinstance(names, basestring):
+ names = names.replace(',', ' ').split()
+ if isinstance(names, (tuple, list)) and isinstance(names[0], basestring):
+ names = [(e, i+1) for (i, e) in enumerate(names)]
+
+ # Here, names is either an iterable of (name, value) or a mapping.
+ for item in names:
+ if isinstance(item, basestring):
+ member_name, member_value = item, names[item]
+ else:
+ member_name, member_value = item
+ classdict[member_name] = member_value
+ __order__.append(member_name)
+ # only set __order__ in classdict if name/value was not from a mapping
+ if not isinstance(item, basestring):
+ classdict['__order__'] = ' '.join(__order__)
+ enum_class = metacls.__new__(metacls, class_name, bases, classdict)
+
+ # TODO: replace the frame hack if a blessed way to know the calling
+ # module is ever developed
+ if module is None:
+ try:
+ module = _sys._getframe(2).f_globals['__name__']
+ except (AttributeError, ValueError):
+ pass
+ if module is None:
+ _make_class_unpicklable(enum_class)
+ else:
+ enum_class.__module__ = module
+
+ return enum_class
+
+ @staticmethod
+ def _get_mixins_(bases):
+ """Returns the type for creating enum members, and the first inherited
+ enum class.
+
+ bases: the tuple of bases that was given to __new__
+
+ """
+ if not bases or Enum is None:
+ return object, Enum
+
+
+ # double check that we are not subclassing a class with existing
+ # enumeration members; while we're at it, see if any other data
+ # type has been mixed in so we can use the correct __new__
+ member_type = first_enum = None
+ for base in bases:
+ if (base is not Enum and
+ issubclass(base, Enum) and
+ base._member_names_):
+ raise TypeError("Cannot extend enumerations")
+ # base is now the last base in bases
+ if not issubclass(base, Enum):
+ raise TypeError("new enumerations must be created as "
+ "`ClassName([mixin_type,] enum_type)`")
+
+ # get correct mix-in type (either mix-in type of Enum subclass, or
+ # first base if last base is Enum)
+ if not issubclass(bases[0], Enum):
+ member_type = bases[0] # first data type
+ first_enum = bases[-1] # enum type
+ else:
+ for base in bases[0].__mro__:
+ # most common: (IntEnum, int, Enum, object)
+ # possible: (<Enum 'AutoIntEnum'>, <Enum 'IntEnum'>,
+ # <class 'int'>, <Enum 'Enum'>,
+ # <class 'object'>)
+ if issubclass(base, Enum):
+ if first_enum is None:
+ first_enum = base
+ else:
+ if member_type is None:
+ member_type = base
+
+ return member_type, first_enum
+
+ if pyver < 3.0:
+ @staticmethod
+ def _find_new_(classdict, member_type, first_enum):
+ """Returns the __new__ to be used for creating the enum members.
+
+ classdict: the class dictionary given to __new__
+ member_type: the data type whose __new__ will be used by default
+ first_enum: enumeration to check for an overriding __new__
+
+ """
+ # now find the correct __new__, checking to see of one was defined
+ # by the user; also check earlier enum classes in case a __new__ was
+ # saved as __member_new__
+ __new__ = classdict.get('__new__', None)
+ if __new__:
+ return None, True, True # __new__, save_new, use_args
+
+ N__new__ = getattr(None, '__new__')
+ O__new__ = getattr(object, '__new__')
+ if Enum is None:
+ E__new__ = N__new__
+ else:
+ E__new__ = Enum.__dict__['__new__']
+ # check all possibles for __member_new__ before falling back to
+ # __new__
+ for method in ('__member_new__', '__new__'):
+ for possible in (member_type, first_enum):
+ try:
+ target = possible.__dict__[method]
+ except (AttributeError, KeyError):
+ target = getattr(possible, method, None)
+ if target not in [
+ None,
+ N__new__,
+ O__new__,
+ E__new__,
+ ]:
+ if method == '__member_new__':
+ classdict['__new__'] = target
+ return None, False, True
+ if isinstance(target, staticmethod):
+ target = target.__get__(member_type)
+ __new__ = target
+ break
+ if __new__ is not None:
+ break
+ else:
+ __new__ = object.__new__
+
+ # if a non-object.__new__ is used then whatever value/tuple was
+ # assigned to the enum member name will be passed to __new__ and to the
+ # new enum member's __init__
+ if __new__ is object.__new__:
+ use_args = False
+ else:
+ use_args = True
+
+ return __new__, False, use_args
+ else:
+ @staticmethod
+ def _find_new_(classdict, member_type, first_enum):
+ """Returns the __new__ to be used for creating the enum members.
+
+ classdict: the class dictionary given to __new__
+ member_type: the data type whose __new__ will be used by default
+ first_enum: enumeration to check for an overriding __new__
+
+ """
+ # now find the correct __new__, checking to see of one was defined
+ # by the user; also check earlier enum classes in case a __new__ was
+ # saved as __member_new__
+ __new__ = classdict.get('__new__', None)
+
+ # should __new__ be saved as __member_new__ later?
+ save_new = __new__ is not None
+
+ if __new__ is None:
+ # check all possibles for __member_new__ before falling back to
+ # __new__
+ for method in ('__member_new__', '__new__'):
+ for possible in (member_type, first_enum):
+ target = getattr(possible, method, None)
+ if target not in (
+ None,
+ None.__new__,
+ object.__new__,
+ Enum.__new__,
+ ):
+ __new__ = target
+ break
+ if __new__ is not None:
+ break
+ else:
+ __new__ = object.__new__
+
+ # if a non-object.__new__ is used then whatever value/tuple was
+ # assigned to the enum member name will be passed to __new__ and to the
+ # new enum member's __init__
+ if __new__ is object.__new__:
+ use_args = False
+ else:
+ use_args = True
+
+ return __new__, save_new, use_args
+
+
+########################################################
+# In order to support Python 2 and 3 with a single
+# codebase we have to create the Enum methods separately
+# and then use the `type(name, bases, dict)` method to
+# create the class.
+########################################################
+temp_enum_dict = {}
+temp_enum_dict['__doc__'] = "Generic enumeration.\n\n Derive from this class to define new enumerations.\n\n"
+
+def __new__(cls, value):
+ # all enum instances are actually created during class construction
+ # without calling this method; this method is called by the metaclass'
+ # __call__ (i.e. Color(3) ), and by pickle
+ if type(value) is cls:
+ # For lookups like Color(Color.red)
+ value = value.value
+ #return value
+ # by-value search for a matching enum member
+ # see if it's in the reverse mapping (for hashable values)
+ try:
+ if value in cls._value2member_map_:
+ return cls._value2member_map_[value]
+ except TypeError:
+ # not there, now do long search -- O(n) behavior
+ for member in cls._member_map_.values():
+ if member.value == value:
+ return member
+ raise ValueError("%s is not a valid %s" % (value, cls.__name__))
+temp_enum_dict['__new__'] = __new__
+del __new__
+
+def __repr__(self):
+ return "<%s.%s: %r>" % (
+ self.__class__.__name__, self._name_, self._value_)
+temp_enum_dict['__repr__'] = __repr__
+del __repr__
+
+def __str__(self):
+ return "%s.%s" % (self.__class__.__name__, self._name_)
+temp_enum_dict['__str__'] = __str__
+del __str__
+
+def __dir__(self):
+ added_behavior = [
+ m
+ for cls in self.__class__.mro()
+ for m in cls.__dict__
+ if m[0] != '_'
+ ]
+ return (['__class__', '__doc__', '__module__', ] + added_behavior)
+temp_enum_dict['__dir__'] = __dir__
+del __dir__
+
+def __format__(self, format_spec):
+ # mixed-in Enums should use the mixed-in type's __format__, otherwise
+ # we can get strange results with the Enum name showing up instead of
+ # the value
+
+ # pure Enum branch
+ if self._member_type_ is object:
+ cls = str
+ val = str(self)
+ # mix-in branch
+ else:
+ cls = self._member_type_
+ val = self.value
+ return cls.__format__(val, format_spec)
+temp_enum_dict['__format__'] = __format__
+del __format__
+
+
+####################################
+# Python's less than 2.6 use __cmp__
+
+if pyver < 2.6:
+
+ def __cmp__(self, other):
+ if type(other) is self.__class__:
+ if self is other:
+ return 0
+ return -1
+ return NotImplemented
+ raise TypeError("unorderable types: %s() and %s()" % (self.__class__.__name__, other.__class__.__name__))
+ temp_enum_dict['__cmp__'] = __cmp__
+ del __cmp__
+
+else:
+
+ def __le__(self, other):
+ raise TypeError("unorderable types: %s() <= %s()" % (self.__class__.__name__, other.__class__.__name__))
+ temp_enum_dict['__le__'] = __le__
+ del __le__
+
+ def __lt__(self, other):
+ raise TypeError("unorderable types: %s() < %s()" % (self.__class__.__name__, other.__class__.__name__))
+ temp_enum_dict['__lt__'] = __lt__
+ del __lt__
+
+ def __ge__(self, other):
+ raise TypeError("unorderable types: %s() >= %s()" % (self.__class__.__name__, other.__class__.__name__))
+ temp_enum_dict['__ge__'] = __ge__
+ del __ge__
+
+ def __gt__(self, other):
+ raise TypeError("unorderable types: %s() > %s()" % (self.__class__.__name__, other.__class__.__name__))
+ temp_enum_dict['__gt__'] = __gt__
+ del __gt__
+
+
+def __eq__(self, other):
+ if type(other) is self.__class__:
+ return self is other
+ return NotImplemented
+temp_enum_dict['__eq__'] = __eq__
+del __eq__
+
+def __ne__(self, other):
+ if type(other) is self.__class__:
+ return self is not other
+ return NotImplemented
+temp_enum_dict['__ne__'] = __ne__
+del __ne__
+
+def __hash__(self):
+ return hash(self._name_)
+temp_enum_dict['__hash__'] = __hash__
+del __hash__
+
+def __reduce_ex__(self, proto):
+ return self.__class__, (self._value_, )
+temp_enum_dict['__reduce_ex__'] = __reduce_ex__
+del __reduce_ex__
+
+# _RouteClassAttributeToGetattr is used to provide access to the `name`
+# and `value` properties of enum members while keeping some measure of
+# protection from modification, while still allowing for an enumeration
+# to have members named `name` and `value`. This works because enumeration
+# members are not set directly on the enum class -- __getattr__ is
+# used to look them up.
+
+@_RouteClassAttributeToGetattr
+def name(self):
+ return self._name_
+temp_enum_dict['name'] = name
+del name
+
+@_RouteClassAttributeToGetattr
+def value(self):
+ return self._value_
+temp_enum_dict['value'] = value
+del value
+
+Enum = EnumMeta('Enum', (object, ), temp_enum_dict)
+del temp_enum_dict
+
+# Enum has now been created
+###########################
+
+class IntEnum(int, Enum):
+ """Enum where members are also (and must be) ints"""
+
+
+def unique(enumeration):
+ """Class decorator that ensures only unique members exist in an enumeration."""
+ duplicates = []
+ for name, member in enumeration.__members__.items():
+ if name != member.name:
+ duplicates.append((name, member.name))
+ if duplicates:
+ duplicate_names = ', '.join(
+ ["%s -> %s" % (alias, name) for (alias, name) in duplicates]
+ )
+ raise ValueError('duplicate names found in %r: %s' %
+ (enumeration, duplicate_names)
+ )
+ return enumeration
diff --git a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/test_enum.py b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/test_enum.py
new file mode 100755
index 00000000..d7a97942
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/test_enum.py
@@ -0,0 +1,1690 @@
+import enum
+import sys
+import unittest
+from enum import Enum, IntEnum, unique, EnumMeta
+from pickle import dumps, loads, PicklingError, HIGHEST_PROTOCOL
+
+pyver = float('%s.%s' % sys.version_info[:2])
+
+try:
+ any
+except NameError:
+ def any(iterable):
+ for element in iterable:
+ if element:
+ return True
+ return False
+
+try:
+ unicode
+except NameError:
+ unicode = str
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ OrderedDict = None
+
+# for pickle tests
+try:
+ class Stooges(Enum):
+ LARRY = 1
+ CURLY = 2
+ MOE = 3
+except Exception:
+ Stooges = sys.exc_info()[1]
+
+try:
+ class IntStooges(int, Enum):
+ LARRY = 1
+ CURLY = 2
+ MOE = 3
+except Exception:
+ IntStooges = sys.exc_info()[1]
+
+try:
+ class FloatStooges(float, Enum):
+ LARRY = 1.39
+ CURLY = 2.72
+ MOE = 3.142596
+except Exception:
+ FloatStooges = sys.exc_info()[1]
+
+# for pickle test and subclass tests
+try:
+ class StrEnum(str, Enum):
+ 'accepts only string values'
+ class Name(StrEnum):
+ BDFL = 'Guido van Rossum'
+ FLUFL = 'Barry Warsaw'
+except Exception:
+ Name = sys.exc_info()[1]
+
+try:
+ Question = Enum('Question', 'who what when where why', module=__name__)
+except Exception:
+ Question = sys.exc_info()[1]
+
+try:
+ Answer = Enum('Answer', 'him this then there because')
+except Exception:
+ Answer = sys.exc_info()[1]
+
+try:
+ Theory = Enum('Theory', 'rule law supposition', qualname='spanish_inquisition')
+except Exception:
+ Theory = sys.exc_info()[1]
+
+# for doctests
+try:
+ class Fruit(Enum):
+ tomato = 1
+ banana = 2
+ cherry = 3
+except Exception:
+ pass
+
+def test_pickle_dump_load(assertion, source, target=None,
+ protocol=(0, HIGHEST_PROTOCOL)):
+ start, stop = protocol
+ failures = []
+ for protocol in range(start, stop+1):
+ try:
+ if target is None:
+ assertion(loads(dumps(source, protocol=protocol)) is source)
+ else:
+ assertion(loads(dumps(source, protocol=protocol)), target)
+ except Exception:
+ exc, tb = sys.exc_info()[1:]
+ failures.append('%2d: %s' %(protocol, exc))
+ if failures:
+ raise ValueError('Failed with protocols: %s' % ', '.join(failures))
+
+def test_pickle_exception(assertion, exception, obj,
+ protocol=(0, HIGHEST_PROTOCOL)):
+ start, stop = protocol
+ failures = []
+ for protocol in range(start, stop+1):
+ try:
+ assertion(exception, dumps, obj, protocol=protocol)
+ except Exception:
+ exc = sys.exc_info()[1]
+ failures.append('%d: %s %s' % (protocol, exc.__class__.__name__, exc))
+ if failures:
+ raise ValueError('Failed with protocols: %s' % ', '.join(failures))
+
+
+class TestHelpers(unittest.TestCase):
+ # _is_descriptor, _is_sunder, _is_dunder
+
+ def test_is_descriptor(self):
+ class foo:
+ pass
+ for attr in ('__get__','__set__','__delete__'):
+ obj = foo()
+ self.assertFalse(enum._is_descriptor(obj))
+ setattr(obj, attr, 1)
+ self.assertTrue(enum._is_descriptor(obj))
+
+ def test_is_sunder(self):
+ for s in ('_a_', '_aa_'):
+ self.assertTrue(enum._is_sunder(s))
+
+ for s in ('a', 'a_', '_a', '__a', 'a__', '__a__', '_a__', '__a_', '_',
+ '__', '___', '____', '_____',):
+ self.assertFalse(enum._is_sunder(s))
+
+ def test_is_dunder(self):
+ for s in ('__a__', '__aa__'):
+ self.assertTrue(enum._is_dunder(s))
+ for s in ('a', 'a_', '_a', '__a', 'a__', '_a_', '_a__', '__a_', '_',
+ '__', '___', '____', '_____',):
+ self.assertFalse(enum._is_dunder(s))
+
+
+class TestEnum(unittest.TestCase):
+ def setUp(self):
+ class Season(Enum):
+ SPRING = 1
+ SUMMER = 2
+ AUTUMN = 3
+ WINTER = 4
+ self.Season = Season
+
+ class Konstants(float, Enum):
+ E = 2.7182818
+ PI = 3.1415926
+ TAU = 2 * PI
+ self.Konstants = Konstants
+
+ class Grades(IntEnum):
+ A = 5
+ B = 4
+ C = 3
+ D = 2
+ F = 0
+ self.Grades = Grades
+
+ class Directional(str, Enum):
+ EAST = 'east'
+ WEST = 'west'
+ NORTH = 'north'
+ SOUTH = 'south'
+ self.Directional = Directional
+
+ from datetime import date
+ class Holiday(date, Enum):
+ NEW_YEAR = 2013, 1, 1
+ IDES_OF_MARCH = 2013, 3, 15
+ self.Holiday = Holiday
+
+ if pyver >= 2.6: # cannot specify custom `dir` on previous versions
+ def test_dir_on_class(self):
+ Season = self.Season
+ self.assertEqual(
+ set(dir(Season)),
+ set(['__class__', '__doc__', '__members__', '__module__',
+ 'SPRING', 'SUMMER', 'AUTUMN', 'WINTER']),
+ )
+
+ def test_dir_on_item(self):
+ Season = self.Season
+ self.assertEqual(
+ set(dir(Season.WINTER)),
+ set(['__class__', '__doc__', '__module__', 'name', 'value']),
+ )
+
+ def test_dir_on_sub_with_behavior_on_super(self):
+ # see issue22506
+ class SuperEnum(Enum):
+ def invisible(self):
+ return "did you see me?"
+ class SubEnum(SuperEnum):
+ sample = 5
+ self.assertEqual(
+ set(dir(SubEnum.sample)),
+ set(['__class__', '__doc__', '__module__', 'name', 'value', 'invisible']),
+ )
+
+ if pyver >= 2.7: # OrderedDict first available here
+ def test_members_is_ordereddict_if_ordered(self):
+ class Ordered(Enum):
+ __order__ = 'first second third'
+ first = 'bippity'
+ second = 'boppity'
+ third = 'boo'
+ self.assertTrue(type(Ordered.__members__) is OrderedDict)
+
+ def test_members_is_ordereddict_if_not_ordered(self):
+ class Unordered(Enum):
+ this = 'that'
+ these = 'those'
+ self.assertTrue(type(Unordered.__members__) is OrderedDict)
+
+ if pyver >= 3.0: # all objects are ordered in Python 2.x
+ def test_members_is_always_ordered(self):
+ class AlwaysOrdered(Enum):
+ first = 1
+ second = 2
+ third = 3
+ self.assertTrue(type(AlwaysOrdered.__members__) is OrderedDict)
+
+ def test_comparisons(self):
+ def bad_compare():
+ Season.SPRING > 4
+ Season = self.Season
+ self.assertNotEqual(Season.SPRING, 1)
+ self.assertRaises(TypeError, bad_compare)
+
+ class Part(Enum):
+ SPRING = 1
+ CLIP = 2
+ BARREL = 3
+
+ self.assertNotEqual(Season.SPRING, Part.SPRING)
+ def bad_compare():
+ Season.SPRING < Part.CLIP
+ self.assertRaises(TypeError, bad_compare)
+
+ def test_enum_in_enum_out(self):
+ Season = self.Season
+ self.assertTrue(Season(Season.WINTER) is Season.WINTER)
+
+ def test_enum_value(self):
+ Season = self.Season
+ self.assertEqual(Season.SPRING.value, 1)
+
+ def test_intenum_value(self):
+ self.assertEqual(IntStooges.CURLY.value, 2)
+
+ def test_enum(self):
+ Season = self.Season
+ lst = list(Season)
+ self.assertEqual(len(lst), len(Season))
+ self.assertEqual(len(Season), 4, Season)
+ self.assertEqual(
+ [Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER], lst)
+
+ for i, season in enumerate('SPRING SUMMER AUTUMN WINTER'.split()):
+ i += 1
+ e = Season(i)
+ self.assertEqual(e, getattr(Season, season))
+ self.assertEqual(e.value, i)
+ self.assertNotEqual(e, i)
+ self.assertEqual(e.name, season)
+ self.assertTrue(e in Season)
+ self.assertTrue(type(e) is Season)
+ self.assertTrue(isinstance(e, Season))
+ self.assertEqual(str(e), 'Season.' + season)
+ self.assertEqual(
+ repr(e),
+ '<Season.%s: %s>' % (season, i),
+ )
+
+ def test_value_name(self):
+ Season = self.Season
+ self.assertEqual(Season.SPRING.name, 'SPRING')
+ self.assertEqual(Season.SPRING.value, 1)
+ def set_name(obj, new_value):
+ obj.name = new_value
+ def set_value(obj, new_value):
+ obj.value = new_value
+ self.assertRaises(AttributeError, set_name, Season.SPRING, 'invierno', )
+ self.assertRaises(AttributeError, set_value, Season.SPRING, 2)
+
+ def test_attribute_deletion(self):
+ class Season(Enum):
+ SPRING = 1
+ SUMMER = 2
+ AUTUMN = 3
+ WINTER = 4
+
+ def spam(cls):
+ pass
+
+ self.assertTrue(hasattr(Season, 'spam'))
+ del Season.spam
+ self.assertFalse(hasattr(Season, 'spam'))
+
+ self.assertRaises(AttributeError, delattr, Season, 'SPRING')
+ self.assertRaises(AttributeError, delattr, Season, 'DRY')
+ self.assertRaises(AttributeError, delattr, Season.SPRING, 'name')
+
+ def test_invalid_names(self):
+ def create_bad_class_1():
+ class Wrong(Enum):
+ mro = 9
+ def create_bad_class_2():
+ class Wrong(Enum):
+ _reserved_ = 3
+ self.assertRaises(ValueError, create_bad_class_1)
+ self.assertRaises(ValueError, create_bad_class_2)
+
+ def test_contains(self):
+ Season = self.Season
+ self.assertTrue(Season.AUTUMN in Season)
+ self.assertTrue(3 not in Season)
+
+ val = Season(3)
+ self.assertTrue(val in Season)
+
+ class OtherEnum(Enum):
+ one = 1; two = 2
+ self.assertTrue(OtherEnum.two not in Season)
+
+ if pyver >= 2.6: # when `format` came into being
+
+ def test_format_enum(self):
+ Season = self.Season
+ self.assertEqual('{0}'.format(Season.SPRING),
+ '{0}'.format(str(Season.SPRING)))
+ self.assertEqual( '{0:}'.format(Season.SPRING),
+ '{0:}'.format(str(Season.SPRING)))
+ self.assertEqual('{0:20}'.format(Season.SPRING),
+ '{0:20}'.format(str(Season.SPRING)))
+ self.assertEqual('{0:^20}'.format(Season.SPRING),
+ '{0:^20}'.format(str(Season.SPRING)))
+ self.assertEqual('{0:>20}'.format(Season.SPRING),
+ '{0:>20}'.format(str(Season.SPRING)))
+ self.assertEqual('{0:<20}'.format(Season.SPRING),
+ '{0:<20}'.format(str(Season.SPRING)))
+
+ def test_format_enum_custom(self):
+ class TestFloat(float, Enum):
+ one = 1.0
+ two = 2.0
+ def __format__(self, spec):
+ return 'TestFloat success!'
+ self.assertEqual('{0}'.format(TestFloat.one), 'TestFloat success!')
+
+ def assertFormatIsValue(self, spec, member):
+ self.assertEqual(spec.format(member), spec.format(member.value))
+
+ def test_format_enum_date(self):
+ Holiday = self.Holiday
+ self.assertFormatIsValue('{0}', Holiday.IDES_OF_MARCH)
+ self.assertFormatIsValue('{0:}', Holiday.IDES_OF_MARCH)
+ self.assertFormatIsValue('{0:20}', Holiday.IDES_OF_MARCH)
+ self.assertFormatIsValue('{0:^20}', Holiday.IDES_OF_MARCH)
+ self.assertFormatIsValue('{0:>20}', Holiday.IDES_OF_MARCH)
+ self.assertFormatIsValue('{0:<20}', Holiday.IDES_OF_MARCH)
+ self.assertFormatIsValue('{0:%Y %m}', Holiday.IDES_OF_MARCH)
+ self.assertFormatIsValue('{0:%Y %m %M:00}', Holiday.IDES_OF_MARCH)
+
+ def test_format_enum_float(self):
+ Konstants = self.Konstants
+ self.assertFormatIsValue('{0}', Konstants.TAU)
+ self.assertFormatIsValue('{0:}', Konstants.TAU)
+ self.assertFormatIsValue('{0:20}', Konstants.TAU)
+ self.assertFormatIsValue('{0:^20}', Konstants.TAU)
+ self.assertFormatIsValue('{0:>20}', Konstants.TAU)
+ self.assertFormatIsValue('{0:<20}', Konstants.TAU)
+ self.assertFormatIsValue('{0:n}', Konstants.TAU)
+ self.assertFormatIsValue('{0:5.2}', Konstants.TAU)
+ self.assertFormatIsValue('{0:f}', Konstants.TAU)
+
+ def test_format_enum_int(self):
+ Grades = self.Grades
+ self.assertFormatIsValue('{0}', Grades.C)
+ self.assertFormatIsValue('{0:}', Grades.C)
+ self.assertFormatIsValue('{0:20}', Grades.C)
+ self.assertFormatIsValue('{0:^20}', Grades.C)
+ self.assertFormatIsValue('{0:>20}', Grades.C)
+ self.assertFormatIsValue('{0:<20}', Grades.C)
+ self.assertFormatIsValue('{0:+}', Grades.C)
+ self.assertFormatIsValue('{0:08X}', Grades.C)
+ self.assertFormatIsValue('{0:b}', Grades.C)
+
+ def test_format_enum_str(self):
+ Directional = self.Directional
+ self.assertFormatIsValue('{0}', Directional.WEST)
+ self.assertFormatIsValue('{0:}', Directional.WEST)
+ self.assertFormatIsValue('{0:20}', Directional.WEST)
+ self.assertFormatIsValue('{0:^20}', Directional.WEST)
+ self.assertFormatIsValue('{0:>20}', Directional.WEST)
+ self.assertFormatIsValue('{0:<20}', Directional.WEST)
+
+ def test_hash(self):
+ Season = self.Season
+ dates = {}
+ dates[Season.WINTER] = '1225'
+ dates[Season.SPRING] = '0315'
+ dates[Season.SUMMER] = '0704'
+ dates[Season.AUTUMN] = '1031'
+ self.assertEqual(dates[Season.AUTUMN], '1031')
+
+ def test_enum_duplicates(self):
+ __order__ = "SPRING SUMMER AUTUMN WINTER"
+ class Season(Enum):
+ SPRING = 1
+ SUMMER = 2
+ AUTUMN = FALL = 3
+ WINTER = 4
+ ANOTHER_SPRING = 1
+ lst = list(Season)
+ self.assertEqual(
+ lst,
+ [Season.SPRING, Season.SUMMER,
+ Season.AUTUMN, Season.WINTER,
+ ])
+ self.assertTrue(Season.FALL is Season.AUTUMN)
+ self.assertEqual(Season.FALL.value, 3)
+ self.assertEqual(Season.AUTUMN.value, 3)
+ self.assertTrue(Season(3) is Season.AUTUMN)
+ self.assertTrue(Season(1) is Season.SPRING)
+ self.assertEqual(Season.FALL.name, 'AUTUMN')
+ self.assertEqual(
+ set([k for k,v in Season.__members__.items() if v.name != k]),
+ set(['FALL', 'ANOTHER_SPRING']),
+ )
+
+ if pyver >= 3.0:
+ cls = vars()
+ result = {'Enum':Enum}
+ exec("""def test_duplicate_name(self):
+ with self.assertRaises(TypeError):
+ class Color(Enum):
+ red = 1
+ green = 2
+ blue = 3
+ red = 4
+
+ with self.assertRaises(TypeError):
+ class Color(Enum):
+ red = 1
+ green = 2
+ blue = 3
+ def red(self):
+ return 'red'
+
+ with self.assertRaises(TypeError):
+ class Color(Enum):
+ @property
+
+ def red(self):
+ return 'redder'
+ red = 1
+ green = 2
+ blue = 3""",
+ result)
+ cls['test_duplicate_name'] = result['test_duplicate_name']
+
+ def test_enum_with_value_name(self):
+ class Huh(Enum):
+ name = 1
+ value = 2
+ self.assertEqual(
+ list(Huh),
+ [Huh.name, Huh.value],
+ )
+ self.assertTrue(type(Huh.name) is Huh)
+ self.assertEqual(Huh.name.name, 'name')
+ self.assertEqual(Huh.name.value, 1)
+
+ def test_intenum_from_scratch(self):
+ class phy(int, Enum):
+ pi = 3
+ tau = 2 * pi
+ self.assertTrue(phy.pi < phy.tau)
+
+ def test_intenum_inherited(self):
+ class IntEnum(int, Enum):
+ pass
+ class phy(IntEnum):
+ pi = 3
+ tau = 2 * pi
+ self.assertTrue(phy.pi < phy.tau)
+
+ def test_floatenum_from_scratch(self):
+ class phy(float, Enum):
+ pi = 3.1415926
+ tau = 2 * pi
+ self.assertTrue(phy.pi < phy.tau)
+
+ def test_floatenum_inherited(self):
+ class FloatEnum(float, Enum):
+ pass
+ class phy(FloatEnum):
+ pi = 3.1415926
+ tau = 2 * pi
+ self.assertTrue(phy.pi < phy.tau)
+
+ def test_strenum_from_scratch(self):
+ class phy(str, Enum):
+ pi = 'Pi'
+ tau = 'Tau'
+ self.assertTrue(phy.pi < phy.tau)
+
+ def test_strenum_inherited(self):
+ class StrEnum(str, Enum):
+ pass
+ class phy(StrEnum):
+ pi = 'Pi'
+ tau = 'Tau'
+ self.assertTrue(phy.pi < phy.tau)
+
+ def test_intenum(self):
+ class WeekDay(IntEnum):
+ SUNDAY = 1
+ MONDAY = 2
+ TUESDAY = 3
+ WEDNESDAY = 4
+ THURSDAY = 5
+ FRIDAY = 6
+ SATURDAY = 7
+
+ self.assertEqual(['a', 'b', 'c'][WeekDay.MONDAY], 'c')
+ self.assertEqual([i for i in range(WeekDay.TUESDAY)], [0, 1, 2])
+
+ lst = list(WeekDay)
+ self.assertEqual(len(lst), len(WeekDay))
+ self.assertEqual(len(WeekDay), 7)
+ target = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY'
+ target = target.split()
+ for i, weekday in enumerate(target):
+ i += 1
+ e = WeekDay(i)
+ self.assertEqual(e, i)
+ self.assertEqual(int(e), i)
+ self.assertEqual(e.name, weekday)
+ self.assertTrue(e in WeekDay)
+ self.assertEqual(lst.index(e)+1, i)
+ self.assertTrue(0 < e < 8)
+ self.assertTrue(type(e) is WeekDay)
+ self.assertTrue(isinstance(e, int))
+ self.assertTrue(isinstance(e, Enum))
+
+ def test_intenum_duplicates(self):
+ class WeekDay(IntEnum):
+ __order__ = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY'
+ SUNDAY = 1
+ MONDAY = 2
+ TUESDAY = TEUSDAY = 3
+ WEDNESDAY = 4
+ THURSDAY = 5
+ FRIDAY = 6
+ SATURDAY = 7
+ self.assertTrue(WeekDay.TEUSDAY is WeekDay.TUESDAY)
+ self.assertEqual(WeekDay(3).name, 'TUESDAY')
+ self.assertEqual([k for k,v in WeekDay.__members__.items()
+ if v.name != k], ['TEUSDAY', ])
+
+ def test_pickle_enum(self):
+ if isinstance(Stooges, Exception):
+ raise Stooges
+ test_pickle_dump_load(self.assertTrue, Stooges.CURLY)
+ test_pickle_dump_load(self.assertTrue, Stooges)
+
+ def test_pickle_int(self):
+ if isinstance(IntStooges, Exception):
+ raise IntStooges
+ test_pickle_dump_load(self.assertTrue, IntStooges.CURLY)
+ test_pickle_dump_load(self.assertTrue, IntStooges)
+
+ def test_pickle_float(self):
+ if isinstance(FloatStooges, Exception):
+ raise FloatStooges
+ test_pickle_dump_load(self.assertTrue, FloatStooges.CURLY)
+ test_pickle_dump_load(self.assertTrue, FloatStooges)
+
+ def test_pickle_enum_function(self):
+ if isinstance(Answer, Exception):
+ raise Answer
+ test_pickle_dump_load(self.assertTrue, Answer.him)
+ test_pickle_dump_load(self.assertTrue, Answer)
+
+ def test_pickle_enum_function_with_module(self):
+ if isinstance(Question, Exception):
+ raise Question
+ test_pickle_dump_load(self.assertTrue, Question.who)
+ test_pickle_dump_load(self.assertTrue, Question)
+
+ if pyver >= 3.4:
+ def test_class_nested_enum_and_pickle_protocol_four(self):
+ # would normally just have this directly in the class namespace
+ class NestedEnum(Enum):
+ twigs = 'common'
+ shiny = 'rare'
+
+ self.__class__.NestedEnum = NestedEnum
+ self.NestedEnum.__qualname__ = '%s.NestedEnum' % self.__class__.__name__
+ test_pickle_exception(
+ self.assertRaises, PicklingError, self.NestedEnum.twigs,
+ protocol=(0, 3))
+ test_pickle_dump_load(self.assertTrue, self.NestedEnum.twigs,
+ protocol=(4, HIGHEST_PROTOCOL))
+
+ def test_exploding_pickle(self):
+ BadPickle = Enum('BadPickle', 'dill sweet bread-n-butter')
+ enum._make_class_unpicklable(BadPickle)
+ globals()['BadPickle'] = BadPickle
+ test_pickle_exception(self.assertRaises, TypeError, BadPickle.dill)
+ test_pickle_exception(self.assertRaises, PicklingError, BadPickle)
+
+ def test_string_enum(self):
+ class SkillLevel(str, Enum):
+ master = 'what is the sound of one hand clapping?'
+ journeyman = 'why did the chicken cross the road?'
+ apprentice = 'knock, knock!'
+ self.assertEqual(SkillLevel.apprentice, 'knock, knock!')
+
+ def test_getattr_getitem(self):
+ class Period(Enum):
+ morning = 1
+ noon = 2
+ evening = 3
+ night = 4
+ self.assertTrue(Period(2) is Period.noon)
+ self.assertTrue(getattr(Period, 'night') is Period.night)
+ self.assertTrue(Period['morning'] is Period.morning)
+
+ def test_getattr_dunder(self):
+ Season = self.Season
+ self.assertTrue(getattr(Season, '__hash__'))
+
+ def test_iteration_order(self):
+ class Season(Enum):
+ __order__ = 'SUMMER WINTER AUTUMN SPRING'
+ SUMMER = 2
+ WINTER = 4
+ AUTUMN = 3
+ SPRING = 1
+ self.assertEqual(
+ list(Season),
+ [Season.SUMMER, Season.WINTER, Season.AUTUMN, Season.SPRING],
+ )
+
+ def test_iteration_order_with_unorderable_values(self):
+ class Complex(Enum):
+ a = complex(7, 9)
+ b = complex(3.14, 2)
+ c = complex(1, -1)
+ d = complex(-77, 32)
+ self.assertEqual(
+ list(Complex),
+ [Complex.a, Complex.b, Complex.c, Complex.d],
+ )
+
+ def test_programatic_function_string(self):
+ SummerMonth = Enum('SummerMonth', 'june july august')
+ lst = list(SummerMonth)
+ self.assertEqual(len(lst), len(SummerMonth))
+ self.assertEqual(len(SummerMonth), 3, SummerMonth)
+ self.assertEqual(
+ [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+ lst,
+ )
+ for i, month in enumerate('june july august'.split()):
+ i += 1
+ e = SummerMonth(i)
+ self.assertEqual(int(e.value), i)
+ self.assertNotEqual(e, i)
+ self.assertEqual(e.name, month)
+ self.assertTrue(e in SummerMonth)
+ self.assertTrue(type(e) is SummerMonth)
+
+ def test_programatic_function_string_list(self):
+ SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'])
+ lst = list(SummerMonth)
+ self.assertEqual(len(lst), len(SummerMonth))
+ self.assertEqual(len(SummerMonth), 3, SummerMonth)
+ self.assertEqual(
+ [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+ lst,
+ )
+ for i, month in enumerate('june july august'.split()):
+ i += 1
+ e = SummerMonth(i)
+ self.assertEqual(int(e.value), i)
+ self.assertNotEqual(e, i)
+ self.assertEqual(e.name, month)
+ self.assertTrue(e in SummerMonth)
+ self.assertTrue(type(e) is SummerMonth)
+
+ def test_programatic_function_iterable(self):
+ SummerMonth = Enum(
+ 'SummerMonth',
+ (('june', 1), ('july', 2), ('august', 3))
+ )
+ lst = list(SummerMonth)
+ self.assertEqual(len(lst), len(SummerMonth))
+ self.assertEqual(len(SummerMonth), 3, SummerMonth)
+ self.assertEqual(
+ [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+ lst,
+ )
+ for i, month in enumerate('june july august'.split()):
+ i += 1
+ e = SummerMonth(i)
+ self.assertEqual(int(e.value), i)
+ self.assertNotEqual(e, i)
+ self.assertEqual(e.name, month)
+ self.assertTrue(e in SummerMonth)
+ self.assertTrue(type(e) is SummerMonth)
+
+ def test_programatic_function_from_dict(self):
+ SummerMonth = Enum(
+ 'SummerMonth',
+ dict((('june', 1), ('july', 2), ('august', 3)))
+ )
+ lst = list(SummerMonth)
+ self.assertEqual(len(lst), len(SummerMonth))
+ self.assertEqual(len(SummerMonth), 3, SummerMonth)
+ if pyver < 3.0:
+ self.assertEqual(
+ [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+ lst,
+ )
+ for i, month in enumerate('june july august'.split()):
+ i += 1
+ e = SummerMonth(i)
+ self.assertEqual(int(e.value), i)
+ self.assertNotEqual(e, i)
+ self.assertEqual(e.name, month)
+ self.assertTrue(e in SummerMonth)
+ self.assertTrue(type(e) is SummerMonth)
+
+ def test_programatic_function_type(self):
+ SummerMonth = Enum('SummerMonth', 'june july august', type=int)
+ lst = list(SummerMonth)
+ self.assertEqual(len(lst), len(SummerMonth))
+ self.assertEqual(len(SummerMonth), 3, SummerMonth)
+ self.assertEqual(
+ [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+ lst,
+ )
+ for i, month in enumerate('june july august'.split()):
+ i += 1
+ e = SummerMonth(i)
+ self.assertEqual(e, i)
+ self.assertEqual(e.name, month)
+ self.assertTrue(e in SummerMonth)
+ self.assertTrue(type(e) is SummerMonth)
+
+ def test_programatic_function_type_from_subclass(self):
+ SummerMonth = IntEnum('SummerMonth', 'june july august')
+ lst = list(SummerMonth)
+ self.assertEqual(len(lst), len(SummerMonth))
+ self.assertEqual(len(SummerMonth), 3, SummerMonth)
+ self.assertEqual(
+ [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+ lst,
+ )
+ for i, month in enumerate('june july august'.split()):
+ i += 1
+ e = SummerMonth(i)
+ self.assertEqual(e, i)
+ self.assertEqual(e.name, month)
+ self.assertTrue(e in SummerMonth)
+ self.assertTrue(type(e) is SummerMonth)
+
+ def test_programatic_function_unicode(self):
+ SummerMonth = Enum('SummerMonth', unicode('june july august'))
+ lst = list(SummerMonth)
+ self.assertEqual(len(lst), len(SummerMonth))
+ self.assertEqual(len(SummerMonth), 3, SummerMonth)
+ self.assertEqual(
+ [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+ lst,
+ )
+ for i, month in enumerate(unicode('june july august').split()):
+ i += 1
+ e = SummerMonth(i)
+ self.assertEqual(int(e.value), i)
+ self.assertNotEqual(e, i)
+ self.assertEqual(e.name, month)
+ self.assertTrue(e in SummerMonth)
+ self.assertTrue(type(e) is SummerMonth)
+
+ def test_programatic_function_unicode_list(self):
+ SummerMonth = Enum('SummerMonth', [unicode('june'), unicode('july'), unicode('august')])
+ lst = list(SummerMonth)
+ self.assertEqual(len(lst), len(SummerMonth))
+ self.assertEqual(len(SummerMonth), 3, SummerMonth)
+ self.assertEqual(
+ [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+ lst,
+ )
+ for i, month in enumerate(unicode('june july august').split()):
+ i += 1
+ e = SummerMonth(i)
+ self.assertEqual(int(e.value), i)
+ self.assertNotEqual(e, i)
+ self.assertEqual(e.name, month)
+ self.assertTrue(e in SummerMonth)
+ self.assertTrue(type(e) is SummerMonth)
+
+ def test_programatic_function_unicode_iterable(self):
+ SummerMonth = Enum(
+ 'SummerMonth',
+ ((unicode('june'), 1), (unicode('july'), 2), (unicode('august'), 3))
+ )
+ lst = list(SummerMonth)
+ self.assertEqual(len(lst), len(SummerMonth))
+ self.assertEqual(len(SummerMonth), 3, SummerMonth)
+ self.assertEqual(
+ [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+ lst,
+ )
+ for i, month in enumerate(unicode('june july august').split()):
+ i += 1
+ e = SummerMonth(i)
+ self.assertEqual(int(e.value), i)
+ self.assertNotEqual(e, i)
+ self.assertEqual(e.name, month)
+ self.assertTrue(e in SummerMonth)
+ self.assertTrue(type(e) is SummerMonth)
+
+ def test_programatic_function_from_unicode_dict(self):
+ SummerMonth = Enum(
+ 'SummerMonth',
+ dict(((unicode('june'), 1), (unicode('july'), 2), (unicode('august'), 3)))
+ )
+ lst = list(SummerMonth)
+ self.assertEqual(len(lst), len(SummerMonth))
+ self.assertEqual(len(SummerMonth), 3, SummerMonth)
+ if pyver < 3.0:
+ self.assertEqual(
+ [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+ lst,
+ )
+ for i, month in enumerate(unicode('june july august').split()):
+ i += 1
+ e = SummerMonth(i)
+ self.assertEqual(int(e.value), i)
+ self.assertNotEqual(e, i)
+ self.assertEqual(e.name, month)
+ self.assertTrue(e in SummerMonth)
+ self.assertTrue(type(e) is SummerMonth)
+
+ def test_programatic_function_unicode_type(self):
+ SummerMonth = Enum('SummerMonth', unicode('june july august'), type=int)
+ lst = list(SummerMonth)
+ self.assertEqual(len(lst), len(SummerMonth))
+ self.assertEqual(len(SummerMonth), 3, SummerMonth)
+ self.assertEqual(
+ [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+ lst,
+ )
+ for i, month in enumerate(unicode('june july august').split()):
+ i += 1
+ e = SummerMonth(i)
+ self.assertEqual(e, i)
+ self.assertEqual(e.name, month)
+ self.assertTrue(e in SummerMonth)
+ self.assertTrue(type(e) is SummerMonth)
+
+ def test_programatic_function_unicode_type_from_subclass(self):
+ SummerMonth = IntEnum('SummerMonth', unicode('june july august'))
+ lst = list(SummerMonth)
+ self.assertEqual(len(lst), len(SummerMonth))
+ self.assertEqual(len(SummerMonth), 3, SummerMonth)
+ self.assertEqual(
+ [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+ lst,
+ )
+ for i, month in enumerate(unicode('june july august').split()):
+ i += 1
+ e = SummerMonth(i)
+ self.assertEqual(e, i)
+ self.assertEqual(e.name, month)
+ self.assertTrue(e in SummerMonth)
+ self.assertTrue(type(e) is SummerMonth)
+
+ def test_programmatic_function_unicode_class(self):
+ if pyver < 3.0:
+ class_names = unicode('SummerMonth'), 'S\xfcmm\xe9rM\xf6nth'.decode('latin1')
+ else:
+ class_names = 'SummerMonth', 'S\xfcmm\xe9rM\xf6nth'
+ for i, class_name in enumerate(class_names):
+ if pyver < 3.0 and i == 1:
+ self.assertRaises(TypeError, Enum, class_name, unicode('june july august'))
+ else:
+ SummerMonth = Enum(class_name, unicode('june july august'))
+ lst = list(SummerMonth)
+ self.assertEqual(len(lst), len(SummerMonth))
+ self.assertEqual(len(SummerMonth), 3, SummerMonth)
+ self.assertEqual(
+ [SummerMonth.june, SummerMonth.july, SummerMonth.august],
+ lst,
+ )
+ for i, month in enumerate(unicode('june july august').split()):
+ i += 1
+ e = SummerMonth(i)
+ self.assertEqual(e.value, i)
+ self.assertEqual(e.name, month)
+ self.assertTrue(e in SummerMonth)
+ self.assertTrue(type(e) is SummerMonth)
+
+ def test_subclassing(self):
+ if isinstance(Name, Exception):
+ raise Name
+ self.assertEqual(Name.BDFL, 'Guido van Rossum')
+ self.assertTrue(Name.BDFL, Name('Guido van Rossum'))
+ self.assertTrue(Name.BDFL is getattr(Name, 'BDFL'))
+ test_pickle_dump_load(self.assertTrue, Name.BDFL)
+
+ def test_extending(self):
+ def bad_extension():
+ class Color(Enum):
+ red = 1
+ green = 2
+ blue = 3
+ class MoreColor(Color):
+ cyan = 4
+ magenta = 5
+ yellow = 6
+ self.assertRaises(TypeError, bad_extension)
+
+ def test_exclude_methods(self):
+ class whatever(Enum):
+ this = 'that'
+ these = 'those'
+ def really(self):
+ return 'no, not %s' % self.value
+ self.assertFalse(type(whatever.really) is whatever)
+ self.assertEqual(whatever.this.really(), 'no, not that')
+
+ def test_wrong_inheritance_order(self):
+ def wrong_inherit():
+ class Wrong(Enum, str):
+ NotHere = 'error before this point'
+ self.assertRaises(TypeError, wrong_inherit)
+
+ def test_intenum_transitivity(self):
+ class number(IntEnum):
+ one = 1
+ two = 2
+ three = 3
+ class numero(IntEnum):
+ uno = 1
+ dos = 2
+ tres = 3
+ self.assertEqual(number.one, numero.uno)
+ self.assertEqual(number.two, numero.dos)
+ self.assertEqual(number.three, numero.tres)
+
+ def test_introspection(self):
+ class Number(IntEnum):
+ one = 100
+ two = 200
+ self.assertTrue(Number.one._member_type_ is int)
+ self.assertTrue(Number._member_type_ is int)
+ class String(str, Enum):
+ yarn = 'soft'
+ rope = 'rough'
+ wire = 'hard'
+ self.assertTrue(String.yarn._member_type_ is str)
+ self.assertTrue(String._member_type_ is str)
+ class Plain(Enum):
+ vanilla = 'white'
+ one = 1
+ self.assertTrue(Plain.vanilla._member_type_ is object)
+ self.assertTrue(Plain._member_type_ is object)
+
+ def test_wrong_enum_in_call(self):
+ class Monochrome(Enum):
+ black = 0
+ white = 1
+ class Gender(Enum):
+ male = 0
+ female = 1
+ self.assertRaises(ValueError, Monochrome, Gender.male)
+
+ def test_wrong_enum_in_mixed_call(self):
+ class Monochrome(IntEnum):
+ black = 0
+ white = 1
+ class Gender(Enum):
+ male = 0
+ female = 1
+ self.assertRaises(ValueError, Monochrome, Gender.male)
+
+ def test_mixed_enum_in_call_1(self):
+ class Monochrome(IntEnum):
+ black = 0
+ white = 1
+ class Gender(IntEnum):
+ male = 0
+ female = 1
+ self.assertTrue(Monochrome(Gender.female) is Monochrome.white)
+
+ def test_mixed_enum_in_call_2(self):
+ class Monochrome(Enum):
+ black = 0
+ white = 1
+ class Gender(IntEnum):
+ male = 0
+ female = 1
+ self.assertTrue(Monochrome(Gender.male) is Monochrome.black)
+
+ def test_flufl_enum(self):
+ class Fluflnum(Enum):
+ def __int__(self):
+ return int(self.value)
+ class MailManOptions(Fluflnum):
+ option1 = 1
+ option2 = 2
+ option3 = 3
+ self.assertEqual(int(MailManOptions.option1), 1)
+
+ def test_no_such_enum_member(self):
+ class Color(Enum):
+ red = 1
+ green = 2
+ blue = 3
+ self.assertRaises(ValueError, Color, 4)
+ self.assertRaises(KeyError, Color.__getitem__, 'chartreuse')
+
+ def test_new_repr(self):
+ class Color(Enum):
+ red = 1
+ green = 2
+ blue = 3
+ def __repr__(self):
+ return "don't you just love shades of %s?" % self.name
+ self.assertEqual(
+ repr(Color.blue),
+ "don't you just love shades of blue?",
+ )
+
+ def test_inherited_repr(self):
+ class MyEnum(Enum):
+ def __repr__(self):
+ return "My name is %s." % self.name
+ class MyIntEnum(int, MyEnum):
+ this = 1
+ that = 2
+ theother = 3
+ self.assertEqual(repr(MyIntEnum.that), "My name is that.")
+
+ def test_multiple_mixin_mro(self):
+ class auto_enum(EnumMeta):
+ def __new__(metacls, cls, bases, classdict):
+ original_dict = classdict
+ classdict = enum._EnumDict()
+ for k, v in original_dict.items():
+ classdict[k] = v
+ temp = type(classdict)()
+ names = set(classdict._member_names)
+ i = 0
+ for k in classdict._member_names:
+ v = classdict[k]
+ if v == ():
+ v = i
+ else:
+ i = v
+ i += 1
+ temp[k] = v
+ for k, v in classdict.items():
+ if k not in names:
+ temp[k] = v
+ return super(auto_enum, metacls).__new__(
+ metacls, cls, bases, temp)
+
+ AutoNumberedEnum = auto_enum('AutoNumberedEnum', (Enum,), {})
+
+ AutoIntEnum = auto_enum('AutoIntEnum', (IntEnum,), {})
+
+ class TestAutoNumber(AutoNumberedEnum):
+ a = ()
+ b = 3
+ c = ()
+
+ class TestAutoInt(AutoIntEnum):
+ a = ()
+ b = 3
+ c = ()
+
+ def test_subclasses_with_getnewargs(self):
+ class NamedInt(int):
+ __qualname__ = 'NamedInt' # needed for pickle protocol 4
+ def __new__(cls, *args):
+ _args = args
+ if len(args) < 1:
+ raise TypeError("name and value must be specified")
+ name, args = args[0], args[1:]
+ self = int.__new__(cls, *args)
+ self._intname = name
+ self._args = _args
+ return self
+ def __getnewargs__(self):
+ return self._args
+ @property
+ def __name__(self):
+ return self._intname
+ def __repr__(self):
+ # repr() is updated to include the name and type info
+ return "%s(%r, %s)" % (type(self).__name__,
+ self.__name__,
+ int.__repr__(self))
+ def __str__(self):
+ # str() is unchanged, even if it relies on the repr() fallback
+ base = int
+ base_str = base.__str__
+ if base_str.__objclass__ is object:
+ return base.__repr__(self)
+ return base_str(self)
+ # for simplicity, we only define one operator that
+ # propagates expressions
+ def __add__(self, other):
+ temp = int(self) + int( other)
+ if isinstance(self, NamedInt) and isinstance(other, NamedInt):
+ return NamedInt(
+ '(%s + %s)' % (self.__name__, other.__name__),
+ temp )
+ else:
+ return temp
+
+ class NEI(NamedInt, Enum):
+ __qualname__ = 'NEI' # needed for pickle protocol 4
+ x = ('the-x', 1)
+ y = ('the-y', 2)
+
+ self.assertTrue(NEI.__new__ is Enum.__new__)
+ self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
+ globals()['NamedInt'] = NamedInt
+ globals()['NEI'] = NEI
+ NI5 = NamedInt('test', 5)
+ self.assertEqual(NI5, 5)
+ test_pickle_dump_load(self.assertTrue, NI5, 5)
+ self.assertEqual(NEI.y.value, 2)
+ test_pickle_dump_load(self.assertTrue, NEI.y)
+
+ if pyver >= 3.4:
+ def test_subclasses_with_getnewargs_ex(self):
+ class NamedInt(int):
+ __qualname__ = 'NamedInt' # needed for pickle protocol 4
+ def __new__(cls, *args):
+ _args = args
+ if len(args) < 2:
+ raise TypeError("name and value must be specified")
+ name, args = args[0], args[1:]
+ self = int.__new__(cls, *args)
+ self._intname = name
+ self._args = _args
+ return self
+ def __getnewargs_ex__(self):
+ return self._args, {}
+ @property
+ def __name__(self):
+ return self._intname
+ def __repr__(self):
+ # repr() is updated to include the name and type info
+ return "{}({!r}, {})".format(type(self).__name__,
+ self.__name__,
+ int.__repr__(self))
+ def __str__(self):
+ # str() is unchanged, even if it relies on the repr() fallback
+ base = int
+ base_str = base.__str__
+ if base_str.__objclass__ is object:
+ return base.__repr__(self)
+ return base_str(self)
+ # for simplicity, we only define one operator that
+ # propagates expressions
+ def __add__(self, other):
+ temp = int(self) + int( other)
+ if isinstance(self, NamedInt) and isinstance(other, NamedInt):
+ return NamedInt(
+ '({0} + {1})'.format(self.__name__, other.__name__),
+ temp )
+ else:
+ return temp
+
+ class NEI(NamedInt, Enum):
+ __qualname__ = 'NEI' # needed for pickle protocol 4
+ x = ('the-x', 1)
+ y = ('the-y', 2)
+
+
+ self.assertIs(NEI.__new__, Enum.__new__)
+ self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
+ globals()['NamedInt'] = NamedInt
+ globals()['NEI'] = NEI
+ NI5 = NamedInt('test', 5)
+ self.assertEqual(NI5, 5)
+ test_pickle_dump_load(self.assertEqual, NI5, 5, protocol=(4, HIGHEST_PROTOCOL))
+ self.assertEqual(NEI.y.value, 2)
+ test_pickle_dump_load(self.assertTrue, NEI.y, protocol=(4, HIGHEST_PROTOCOL))
+
+ def test_subclasses_with_reduce(self):
+ class NamedInt(int):
+ __qualname__ = 'NamedInt' # needed for pickle protocol 4
+ def __new__(cls, *args):
+ _args = args
+ if len(args) < 1:
+ raise TypeError("name and value must be specified")
+ name, args = args[0], args[1:]
+ self = int.__new__(cls, *args)
+ self._intname = name
+ self._args = _args
+ return self
+ def __reduce__(self):
+ return self.__class__, self._args
+ @property
+ def __name__(self):
+ return self._intname
+ def __repr__(self):
+ # repr() is updated to include the name and type info
+ return "%s(%r, %s)" % (type(self).__name__,
+ self.__name__,
+ int.__repr__(self))
+ def __str__(self):
+ # str() is unchanged, even if it relies on the repr() fallback
+ base = int
+ base_str = base.__str__
+ if base_str.__objclass__ is object:
+ return base.__repr__(self)
+ return base_str(self)
+ # for simplicity, we only define one operator that
+ # propagates expressions
+ def __add__(self, other):
+ temp = int(self) + int( other)
+ if isinstance(self, NamedInt) and isinstance(other, NamedInt):
+ return NamedInt(
+ '(%s + %s)' % (self.__name__, other.__name__),
+ temp )
+ else:
+ return temp
+
+ class NEI(NamedInt, Enum):
+ __qualname__ = 'NEI' # needed for pickle protocol 4
+ x = ('the-x', 1)
+ y = ('the-y', 2)
+
+
+ self.assertTrue(NEI.__new__ is Enum.__new__)
+ self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
+ globals()['NamedInt'] = NamedInt
+ globals()['NEI'] = NEI
+ NI5 = NamedInt('test', 5)
+ self.assertEqual(NI5, 5)
+ test_pickle_dump_load(self.assertEqual, NI5, 5)
+ self.assertEqual(NEI.y.value, 2)
+ test_pickle_dump_load(self.assertTrue, NEI.y)
+
+ def test_subclasses_with_reduce_ex(self):
+ class NamedInt(int):
+ __qualname__ = 'NamedInt' # needed for pickle protocol 4
+ def __new__(cls, *args):
+ _args = args
+ if len(args) < 1:
+ raise TypeError("name and value must be specified")
+ name, args = args[0], args[1:]
+ self = int.__new__(cls, *args)
+ self._intname = name
+ self._args = _args
+ return self
+ def __reduce_ex__(self, proto):
+ return self.__class__, self._args
+ @property
+ def __name__(self):
+ return self._intname
+ def __repr__(self):
+ # repr() is updated to include the name and type info
+ return "%s(%r, %s)" % (type(self).__name__,
+ self.__name__,
+ int.__repr__(self))
+ def __str__(self):
+ # str() is unchanged, even if it relies on the repr() fallback
+ base = int
+ base_str = base.__str__
+ if base_str.__objclass__ is object:
+ return base.__repr__(self)
+ return base_str(self)
+ # for simplicity, we only define one operator that
+ # propagates expressions
+ def __add__(self, other):
+ temp = int(self) + int( other)
+ if isinstance(self, NamedInt) and isinstance(other, NamedInt):
+ return NamedInt(
+ '(%s + %s)' % (self.__name__, other.__name__),
+ temp )
+ else:
+ return temp
+
+ class NEI(NamedInt, Enum):
+ __qualname__ = 'NEI' # needed for pickle protocol 4
+ x = ('the-x', 1)
+ y = ('the-y', 2)
+
+
+ self.assertTrue(NEI.__new__ is Enum.__new__)
+ self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
+ globals()['NamedInt'] = NamedInt
+ globals()['NEI'] = NEI
+ NI5 = NamedInt('test', 5)
+ self.assertEqual(NI5, 5)
+ test_pickle_dump_load(self.assertEqual, NI5, 5)
+ self.assertEqual(NEI.y.value, 2)
+ test_pickle_dump_load(self.assertTrue, NEI.y)
+
+ def test_subclasses_without_direct_pickle_support(self):
+ class NamedInt(int):
+ __qualname__ = 'NamedInt'
+ def __new__(cls, *args):
+ _args = args
+ name, args = args[0], args[1:]
+ if len(args) == 0:
+ raise TypeError("name and value must be specified")
+ self = int.__new__(cls, *args)
+ self._intname = name
+ self._args = _args
+ return self
+ @property
+ def __name__(self):
+ return self._intname
+ def __repr__(self):
+ # repr() is updated to include the name and type info
+ return "%s(%r, %s)" % (type(self).__name__,
+ self.__name__,
+ int.__repr__(self))
+ def __str__(self):
+ # str() is unchanged, even if it relies on the repr() fallback
+ base = int
+ base_str = base.__str__
+ if base_str.__objclass__ is object:
+ return base.__repr__(self)
+ return base_str(self)
+ # for simplicity, we only define one operator that
+ # propagates expressions
+ def __add__(self, other):
+ temp = int(self) + int( other)
+ if isinstance(self, NamedInt) and isinstance(other, NamedInt):
+ return NamedInt(
+ '(%s + %s)' % (self.__name__, other.__name__),
+ temp )
+ else:
+ return temp
+
+ class NEI(NamedInt, Enum):
+ __qualname__ = 'NEI'
+ x = ('the-x', 1)
+ y = ('the-y', 2)
+
+ self.assertTrue(NEI.__new__ is Enum.__new__)
+ self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
+ globals()['NamedInt'] = NamedInt
+ globals()['NEI'] = NEI
+ NI5 = NamedInt('test', 5)
+ self.assertEqual(NI5, 5)
+ self.assertEqual(NEI.y.value, 2)
+ test_pickle_exception(self.assertRaises, TypeError, NEI.x)
+ test_pickle_exception(self.assertRaises, PicklingError, NEI)
+
+ def test_subclasses_without_direct_pickle_support_using_name(self):
+ class NamedInt(int):
+ __qualname__ = 'NamedInt'
+ def __new__(cls, *args):
+ _args = args
+ name, args = args[0], args[1:]
+ if len(args) == 0:
+ raise TypeError("name and value must be specified")
+ self = int.__new__(cls, *args)
+ self._intname = name
+ self._args = _args
+ return self
+ @property
+ def __name__(self):
+ return self._intname
+ def __repr__(self):
+ # repr() is updated to include the name and type info
+ return "%s(%r, %s)" % (type(self).__name__,
+ self.__name__,
+ int.__repr__(self))
+ def __str__(self):
+ # str() is unchanged, even if it relies on the repr() fallback
+ base = int
+ base_str = base.__str__
+ if base_str.__objclass__ is object:
+ return base.__repr__(self)
+ return base_str(self)
+ # for simplicity, we only define one operator that
+ # propagates expressions
+ def __add__(self, other):
+ temp = int(self) + int( other)
+ if isinstance(self, NamedInt) and isinstance(other, NamedInt):
+ return NamedInt(
+ '(%s + %s)' % (self.__name__, other.__name__),
+ temp )
+ else:
+ return temp
+
+ class NEI(NamedInt, Enum):
+ __qualname__ = 'NEI'
+ x = ('the-x', 1)
+ y = ('the-y', 2)
+ def __reduce_ex__(self, proto):
+ return getattr, (self.__class__, self._name_)
+
+ self.assertTrue(NEI.__new__ is Enum.__new__)
+ self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
+ globals()['NamedInt'] = NamedInt
+ globals()['NEI'] = NEI
+ NI5 = NamedInt('test', 5)
+ self.assertEqual(NI5, 5)
+ self.assertEqual(NEI.y.value, 2)
+ test_pickle_dump_load(self.assertTrue, NEI.y)
+ test_pickle_dump_load(self.assertTrue, NEI)
+
+ def test_tuple_subclass(self):
+ class SomeTuple(tuple, Enum):
+ __qualname__ = 'SomeTuple'
+ first = (1, 'for the money')
+ second = (2, 'for the show')
+ third = (3, 'for the music')
+ self.assertTrue(type(SomeTuple.first) is SomeTuple)
+ self.assertTrue(isinstance(SomeTuple.second, tuple))
+ self.assertEqual(SomeTuple.third, (3, 'for the music'))
+ globals()['SomeTuple'] = SomeTuple
+ test_pickle_dump_load(self.assertTrue, SomeTuple.first)
+
+ def test_duplicate_values_give_unique_enum_items(self):
+ class AutoNumber(Enum):
+ __order__ = 'enum_m enum_d enum_y'
+ enum_m = ()
+ enum_d = ()
+ enum_y = ()
+ def __new__(cls):
+ value = len(cls.__members__) + 1
+ obj = object.__new__(cls)
+ obj._value_ = value
+ return obj
+ def __int__(self):
+ return int(self._value_)
+ self.assertEqual(int(AutoNumber.enum_d), 2)
+ self.assertEqual(AutoNumber.enum_y.value, 3)
+ self.assertTrue(AutoNumber(1) is AutoNumber.enum_m)
+ self.assertEqual(
+ list(AutoNumber),
+ [AutoNumber.enum_m, AutoNumber.enum_d, AutoNumber.enum_y],
+ )
+
+ def test_inherited_new_from_enhanced_enum(self):
+ class AutoNumber2(Enum):
+ def __new__(cls):
+ value = len(cls.__members__) + 1
+ obj = object.__new__(cls)
+ obj._value_ = value
+ return obj
+ def __int__(self):
+ return int(self._value_)
+ class Color(AutoNumber2):
+ __order__ = 'red green blue'
+ red = ()
+ green = ()
+ blue = ()
+ self.assertEqual(len(Color), 3, "wrong number of elements: %d (should be %d)" % (len(Color), 3))
+ self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
+ if pyver >= 3.0:
+ self.assertEqual(list(map(int, Color)), [1, 2, 3])
+
+ def test_inherited_new_from_mixed_enum(self):
+ class AutoNumber3(IntEnum):
+ def __new__(cls):
+ value = len(cls.__members__) + 1
+ obj = int.__new__(cls, value)
+ obj._value_ = value
+ return obj
+ class Color(AutoNumber3):
+ red = ()
+ green = ()
+ blue = ()
+ self.assertEqual(len(Color), 3, "wrong number of elements: %d (should be %d)" % (len(Color), 3))
+ Color.red
+ Color.green
+ Color.blue
+
+ def test_ordered_mixin(self):
+ class OrderedEnum(Enum):
+ def __ge__(self, other):
+ if self.__class__ is other.__class__:
+ return self._value_ >= other._value_
+ return NotImplemented
+ def __gt__(self, other):
+ if self.__class__ is other.__class__:
+ return self._value_ > other._value_
+ return NotImplemented
+ def __le__(self, other):
+ if self.__class__ is other.__class__:
+ return self._value_ <= other._value_
+ return NotImplemented
+ def __lt__(self, other):
+ if self.__class__ is other.__class__:
+ return self._value_ < other._value_
+ return NotImplemented
+ class Grade(OrderedEnum):
+ __order__ = 'A B C D F'
+ A = 5
+ B = 4
+ C = 3
+ D = 2
+ F = 1
+ self.assertEqual(list(Grade), [Grade.A, Grade.B, Grade.C, Grade.D, Grade.F])
+ self.assertTrue(Grade.A > Grade.B)
+ self.assertTrue(Grade.F <= Grade.C)
+ self.assertTrue(Grade.D < Grade.A)
+ self.assertTrue(Grade.B >= Grade.B)
+
+ def test_extending2(self):
+ def bad_extension():
+ class Shade(Enum):
+ def shade(self):
+ print(self.name)
+ class Color(Shade):
+ red = 1
+ green = 2
+ blue = 3
+ class MoreColor(Color):
+ cyan = 4
+ magenta = 5
+ yellow = 6
+ self.assertRaises(TypeError, bad_extension)
+
+ def test_extending3(self):
+ class Shade(Enum):
+ def shade(self):
+ return self.name
+ class Color(Shade):
+ def hex(self):
+ return '%s hexlified!' % self.value
+ class MoreColor(Color):
+ cyan = 4
+ magenta = 5
+ yellow = 6
+ self.assertEqual(MoreColor.magenta.hex(), '5 hexlified!')
+
+ def test_no_duplicates(self):
+ def bad_duplicates():
+ class UniqueEnum(Enum):
+ def __init__(self, *args):
+ cls = self.__class__
+ if any(self.value == e.value for e in cls):
+ a = self.name
+ e = cls(self.value).name
+ raise ValueError(
+ "aliases not allowed in UniqueEnum: %r --> %r"
+ % (a, e)
+ )
+ class Color(UniqueEnum):
+ red = 1
+ green = 2
+ blue = 3
+ class Color(UniqueEnum):
+ red = 1
+ green = 2
+ blue = 3
+ grene = 2
+ self.assertRaises(ValueError, bad_duplicates)
+
+ def test_reversed(self):
+ self.assertEqual(
+ list(reversed(self.Season)),
+ [self.Season.WINTER, self.Season.AUTUMN, self.Season.SUMMER,
+ self.Season.SPRING]
+ )
+
+ def test_init(self):
+ class Planet(Enum):
+ MERCURY = (3.303e+23, 2.4397e6)
+ VENUS = (4.869e+24, 6.0518e6)
+ EARTH = (5.976e+24, 6.37814e6)
+ MARS = (6.421e+23, 3.3972e6)
+ JUPITER = (1.9e+27, 7.1492e7)
+ SATURN = (5.688e+26, 6.0268e7)
+ URANUS = (8.686e+25, 2.5559e7)
+ NEPTUNE = (1.024e+26, 2.4746e7)
+ def __init__(self, mass, radius):
+ self.mass = mass # in kilograms
+ self.radius = radius # in meters
+ @property
+ def surface_gravity(self):
+ # universal gravitational constant (m3 kg-1 s-2)
+ G = 6.67300E-11
+ return G * self.mass / (self.radius * self.radius)
+ self.assertEqual(round(Planet.EARTH.surface_gravity, 2), 9.80)
+ self.assertEqual(Planet.EARTH.value, (5.976e+24, 6.37814e6))
+
+ def test_nonhash_value(self):
+ class AutoNumberInAList(Enum):
+ def __new__(cls):
+ value = [len(cls.__members__) + 1]
+ obj = object.__new__(cls)
+ obj._value_ = value
+ return obj
+ class ColorInAList(AutoNumberInAList):
+ __order__ = 'red green blue'
+ red = ()
+ green = ()
+ blue = ()
+ self.assertEqual(list(ColorInAList), [ColorInAList.red, ColorInAList.green, ColorInAList.blue])
+ self.assertEqual(ColorInAList.red.value, [1])
+ self.assertEqual(ColorInAList([1]), ColorInAList.red)
+
+ def test_conflicting_types_resolved_in_new(self):
+ class LabelledIntEnum(int, Enum):
+ def __new__(cls, *args):
+ value, label = args
+ obj = int.__new__(cls, value)
+ obj.label = label
+ obj._value_ = value
+ return obj
+
+ class LabelledList(LabelledIntEnum):
+ unprocessed = (1, "Unprocessed")
+ payment_complete = (2, "Payment Complete")
+
+ self.assertEqual(list(LabelledList), [LabelledList.unprocessed, LabelledList.payment_complete])
+ self.assertEqual(LabelledList.unprocessed, 1)
+ self.assertEqual(LabelledList(1), LabelledList.unprocessed)
+
+class TestUnique(unittest.TestCase):
+ """2.4 doesn't allow class decorators, use function syntax."""
+
+ def test_unique_clean(self):
+ class Clean(Enum):
+ one = 1
+ two = 'dos'
+ tres = 4.0
+ unique(Clean)
+ class Cleaner(IntEnum):
+ single = 1
+ double = 2
+ triple = 3
+ unique(Cleaner)
+
+ def test_unique_dirty(self):
+ try:
+ class Dirty(Enum):
+ __order__ = 'one two tres'
+ one = 1
+ two = 'dos'
+ tres = 1
+ unique(Dirty)
+ except ValueError:
+ exc = sys.exc_info()[1]
+ message = exc.args[0]
+ self.assertTrue('tres -> one' in message)
+
+ try:
+ class Dirtier(IntEnum):
+ __order__ = 'single double triple turkey'
+ single = 1
+ double = 1
+ triple = 3
+ turkey = 3
+ unique(Dirtier)
+ except ValueError:
+ exc = sys.exc_info()[1]
+ message = exc.args[0]
+ self.assertTrue('double -> single' in message)
+ self.assertTrue('turkey -> triple' in message)
+
+
+class TestMe(unittest.TestCase):
+
+ pass
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/setup.py b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/setup.py
new file mode 100755
index 00000000..ecb4944f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/setup.py
@@ -0,0 +1,44 @@
+import os
+import sys
+from distutils.core import setup
+
+if sys.version_info[:2] < (2, 7):
+ required = ['ordereddict']
+else:
+ required = []
+
+long_desc = open('enum/doc/enum.rst').read()
+
+setup( name='enum34',
+ version='1.0.4',
+ url='https://pypi.python.org/pypi/enum34',
+ packages=['enum'],
+ package_data={
+ 'enum' : [
+ 'LICENSE',
+ 'README',
+ 'doc/enum.rst',
+ 'doc/enum.pdf',
+ 'test_enum.py',
+ ]
+ },
+ license='BSD License',
+ description='Python 3.4 Enum backported to 3.3, 3.2, 3.1, 2.7, 2.6, 2.5, and 2.4',
+ long_description=long_desc,
+ provides=['enum'],
+ install_requires=required,
+ author='Ethan Furman',
+ author_email='ethan@stoneleaf.us',
+ classifiers=[
+ 'Development Status :: 5 - Production/Stable',
+ 'Intended Audience :: Developers',
+ 'License :: OSI Approved :: BSD License',
+ 'Programming Language :: Python',
+ 'Topic :: Software Development',
+ 'Programming Language :: Python :: 2.4',
+ 'Programming Language :: Python :: 2.5',
+ 'Programming Language :: Python :: 2.6',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3',
+ ],
+ )
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/LICENSE.txt b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/LICENSE.txt
new file mode 100755
index 00000000..51fca54c
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/LICENSE.txt
@@ -0,0 +1,11 @@
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/PKG-INFO b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/PKG-INFO
new file mode 100755
index 00000000..7082747b
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/PKG-INFO
@@ -0,0 +1,10 @@
+Metadata-Version: 1.0
+Name: jsonrpclib
+Version: 0.1.3
+Summary: This project is an implementation of the JSON-RPC v2.0 specification (backwards-compatible) as a client library.
+Home-page: http://github.com/joshmarshall/jsonrpclib/
+Author: Josh Marshall
+Author-email: catchjosh@gmail.com
+License: http://www.apache.org/licenses/LICENSE-2.0
+Description: UNKNOWN
+Platform: UNKNOWN
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/README.txt b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/README.txt
new file mode 100755
index 00000000..9d431a48
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/README.txt
@@ -0,0 +1,203 @@
+JSONRPClib
+==========
+This library is an implementation of the JSON-RPC specification.
+It supports both the original 1.0 specification, as well as the
+new (proposed) 2.0 spec, which includes batch submission, keyword
+arguments, etc.
+
+It is licensed under the Apache License, Version 2.0
+(http://www.apache.org/licenses/LICENSE-2.0.html).
+
+Communication
+-------------
+Feel free to send any questions, comments, or patches to our Google Group
+mailing list (you'll need to join to send a message):
+http://groups.google.com/group/jsonrpclib
+
+Summary
+-------
+This library implements the JSON-RPC 2.0 proposed specification in pure Python.
+It is designed to be as compatible with the syntax of xmlrpclib as possible
+(it extends where possible), so that projects using xmlrpclib could easily be
+modified to use JSON and experiment with the differences.
+
+It is backwards-compatible with the 1.0 specification, and supports all of the
+new proposed features of 2.0, including:
+
+* Batch submission (via MultiCall)
+* Keyword arguments
+* Notifications (both in a batch and 'normal')
+* Class translation using the 'jsonclass' key.
+
+I've added a "SimpleJSONRPCServer", which is intended to emulate the
+"SimpleXMLRPCServer" from the default Python distribution.
+
+Requirements
+------------
+It supports cjson and simplejson, and looks for the parsers in that order
+(searching first for cjson, then for the "built-in" simplejson as json in 2.6+,
+and then the simplejson external library). One of these must be installed to
+use this library, although if you have a standard distribution of 2.6+, you
+should already have one. Keep in mind that cjson is supposed to be the
+quickest, I believe, so if you are going for full-on optimization you may
+want to pick it up.
+
+Client Usage
+------------
+
+This is (obviously) taken from a console session.
+
+ >>> import jsonrpclib
+ >>> server = jsonrpclib.Server('http://localhost:8080')
+ >>> server.add(5,6)
+ 11
+ >>> print jsonrpclib.history.request
+ {"jsonrpc": "2.0", "params": [5, 6], "id": "gb3c9g37", "method": "add"}
+ >>> print jsonrpclib.history.response
+ {'jsonrpc': '2.0', 'result': 11, 'id': 'gb3c9g37'}
+ >>> server.add(x=5, y=10)
+ 15
+ >>> server._notify.add(5,6)
+ # No result returned...
+ >>> batch = jsonrpclib.MultiCall(server)
+ >>> batch.add(5, 6)
+ >>> batch.ping({'key':'value'})
+ >>> batch._notify.add(4, 30)
+ >>> results = batch()
+ >>> for result in results:
+ >>> ... print result
+ 11
+ {'key': 'value'}
+ # Note that there are only two responses -- this is according to spec.
+
+If you need 1.0 functionality, there are a bunch of places you can pass that
+in, although the best is just to change the value on
+jsonrpclib.config.version:
+
+ >>> import jsonrpclib
+ >>> jsonrpclib.config.version
+ 2.0
+ >>> jsonrpclib.config.version = 1.0
+ >>> server = jsonrpclib.Server('http://localhost:8080')
+ >>> server.add(7, 10)
+ 17
+ >>> print jsonrpclib..history.request
+ {"params": [7, 10], "id": "thes7tl2", "method": "add"}
+ >>> print jsonrpclib.history.response
+ {'id': 'thes7tl2', 'result': 17, 'error': None}
+ >>>
+
+The equivalent loads and dumps functions also exist, although with minor
+modifications. The dumps arguments are almost identical, but it adds three
+arguments: rpcid for the 'id' key, version to specify the JSON-RPC
+compatibility, and notify if it's a request that you want to be a
+notification.
+
+Additionally, the loads method does not return the params and method like
+xmlrpclib, but instead a.) parses for errors, raising ProtocolErrors, and
+b.) returns the entire structure of the request / response for manual parsing.
+
+SimpleJSONRPCServer
+-------------------
+This is identical in usage (or should be) to the SimpleXMLRPCServer in the default Python install. Some of the differences in features are that it obviously supports notification, batch calls, class translation (if left on), etc. Note: The import line is slightly different from the regular SimpleXMLRPCServer, since the SimpleJSONRPCServer is distributed within the jsonrpclib library.
+
+ from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
+
+ server = SimpleJSONRPCServer(('localhost', 8080))
+ server.register_function(pow)
+ server.register_function(lambda x,y: x+y, 'add')
+ server.register_function(lambda x: x, 'ping')
+ server.serve_forever()
+
+Class Translation
+-----------------
+I've recently added "automatic" class translation support, although it is
+turned off by default. This can be devastatingly slow if improperly used, so
+the following is just a short list of things to keep in mind when using it.
+
+* Keep It (the object) Simple Stupid. (for exceptions, keep reading.)
+* Do not require init params (for exceptions, keep reading)
+* Getter properties without setters could be dangerous (read: not tested)
+
+If any of the above are issues, use the _serialize method. (see usage below)
+The server and client must BOTH have use_jsonclass configuration item on and
+they must both have access to the same libraries used by the objects for
+this to work.
+
+If you have excessively nested arguments, it would be better to turn off the
+translation and manually invoke it on specific objects using
+jsonrpclib.jsonclass.dump / jsonrpclib.jsonclass.load (since the default
+behavior recursively goes through attributes and lists / dicts / tuples).
+
+[test_obj.py]
+
+ # This object is /very/ simple, and the system will look through the
+ # attributes and serialize what it can.
+ class TestObj(object):
+ foo = 'bar'
+
+ # This object requires __init__ params, so it uses the _serialize method
+ # and returns a tuple of init params and attribute values (the init params
+ # can be a dict or a list, but the attribute values must be a dict.)
+ class TestSerial(object):
+ foo = 'bar'
+ def __init__(self, *args):
+ self.args = args
+ def _serialize(self):
+ return (self.args, {'foo':self.foo,})
+
+[usage]
+
+ import jsonrpclib
+ import test_obj
+
+ jsonrpclib.config.use_jsonclass = True
+
+ testobj1 = test_obj.TestObj()
+ testobj2 = test_obj.TestSerial()
+ server = jsonrpclib.Server('http://localhost:8080')
+ # The 'ping' just returns whatever is sent
+ ping1 = server.ping(testobj1)
+ ping2 = server.ping(testobj2)
+ print jsonrpclib.history.request
+ # {"jsonrpc": "2.0", "params": [{"__jsonclass__": ["test_obj.TestSerial", ["foo"]]}], "id": "a0l976iv", "method": "ping"}
+ print jsonrpclib.history.result
+ # {'jsonrpc': '2.0', 'result': <test_obj.TestSerial object at 0x2744590>, 'id': 'a0l976iv'}
+
+To turn on this behaviour, just set jsonrpclib.config.use_jsonclass to True.
+If you want to use a different method for serialization, just set
+jsonrpclib.config.serialize_method to the method name. Finally, if you are
+using classes that you have defined in the implementation (as in, not a
+separate library), you'll need to add those (on BOTH the server and the
+client) using the jsonrpclib.config.classes.add() method.
+(Examples forthcoming.)
+
+Feedback on this "feature" is very, VERY much appreciated.
+
+Why JSON-RPC?
+-------------
+In my opinion, there are several reasons to choose JSON over XML for RPC:
+
+* Much simpler to read (I suppose this is opinion, but I know I'm right. :)
+* Size / Bandwidth - Main reason, a JSON object representation is just much smaller.
+* Parsing - JSON should be much quicker to parse than XML.
+* Easy class passing with jsonclass (when enabled)
+
+In the interest of being fair, there are also a few reasons to choose XML
+over JSON:
+
+* Your server doesn't do JSON (rather obvious)
+* Wider XML-RPC support across APIs (can we change this? :))
+* Libraries are more established, i.e. more stable (Let's change this too.)
+
+TESTS
+-----
+I've dropped almost-verbatim tests from the JSON-RPC spec 2.0 page.
+You can run it with:
+
+ python tests.py
+
+TODO
+----
+* Use HTTP error codes on SimpleJSONRPCServer
+* Test, test, test and optimize \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/SimpleJSONRPCServer.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/SimpleJSONRPCServer.py
new file mode 100755
index 00000000..d76da73e
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/SimpleJSONRPCServer.py
@@ -0,0 +1,229 @@
+import jsonrpclib
+from jsonrpclib import Fault
+from jsonrpclib.jsonrpc import USE_UNIX_SOCKETS
+import SimpleXMLRPCServer
+import SocketServer
+import socket
+import logging
+import os
+import types
+import traceback
+import sys
+try:
+ import fcntl
+except ImportError:
+ # For Windows
+ fcntl = None
+
+def get_version(request):
+ # must be a dict
+ if 'jsonrpc' in request.keys():
+ return 2.0
+ if 'id' in request.keys():
+ return 1.0
+ return None
+
+def validate_request(request):
+ if type(request) is not types.DictType:
+ fault = Fault(
+ -32600, 'Request must be {}, not %s.' % type(request)
+ )
+ return fault
+ rpcid = request.get('id', None)
+ version = get_version(request)
+ if not version:
+ fault = Fault(-32600, 'Request %s invalid.' % request, rpcid=rpcid)
+ return fault
+ request.setdefault('params', [])
+ method = request.get('method', None)
+ params = request.get('params')
+ param_types = (types.ListType, types.DictType, types.TupleType)
+ if not method or type(method) not in types.StringTypes or \
+ type(params) not in param_types:
+ fault = Fault(
+ -32600, 'Invalid request parameters or method.', rpcid=rpcid
+ )
+ return fault
+ return True
+
+class SimpleJSONRPCDispatcher(SimpleXMLRPCServer.SimpleXMLRPCDispatcher):
+
+ def __init__(self, encoding=None):
+ SimpleXMLRPCServer.SimpleXMLRPCDispatcher.__init__(self,
+ allow_none=True,
+ encoding=encoding)
+
+ def _marshaled_dispatch(self, data, dispatch_method = None):
+ response = None
+ try:
+ request = jsonrpclib.loads(data)
+ except Exception, e:
+ fault = Fault(-32700, 'Request %s invalid. (%s)' % (data, e))
+ response = fault.response()
+ return response
+ if not request:
+ fault = Fault(-32600, 'Request invalid -- no request data.')
+ return fault.response()
+ if type(request) is types.ListType:
+ # This SHOULD be a batch, by spec
+ responses = []
+ for req_entry in request:
+ result = validate_request(req_entry)
+ if type(result) is Fault:
+ responses.append(result.response())
+ continue
+ resp_entry = self._marshaled_single_dispatch(req_entry)
+ if resp_entry is not None:
+ responses.append(resp_entry)
+ if len(responses) > 0:
+ response = '[%s]' % ','.join(responses)
+ else:
+ response = ''
+ else:
+ result = validate_request(request)
+ if type(result) is Fault:
+ return result.response()
+ response = self._marshaled_single_dispatch(request)
+ return response
+
+ def _marshaled_single_dispatch(self, request):
+ # TODO - Use the multiprocessing and skip the response if
+ # it is a notification
+ # Put in support for custom dispatcher here
+ # (See SimpleXMLRPCServer._marshaled_dispatch)
+ method = request.get('method')
+ params = request.get('params')
+ try:
+ response = self._dispatch(method, params)
+ except:
+ exc_type, exc_value, exc_tb = sys.exc_info()
+ fault = Fault(-32603, '%s:%s' % (exc_type, exc_value))
+ return fault.response()
+ if 'id' not in request.keys() or request['id'] == None:
+ # It's a notification
+ return None
+ try:
+ response = jsonrpclib.dumps(response,
+ methodresponse=True,
+ rpcid=request['id']
+ )
+ return response
+ except:
+ exc_type, exc_value, exc_tb = sys.exc_info()
+ fault = Fault(-32603, '%s:%s' % (exc_type, exc_value))
+ return fault.response()
+
+ def _dispatch(self, method, params):
+ func = None
+ try:
+ func = self.funcs[method]
+ except KeyError:
+ if self.instance is not None:
+ if hasattr(self.instance, '_dispatch'):
+ return self.instance._dispatch(method, params)
+ else:
+ try:
+ func = SimpleXMLRPCServer.resolve_dotted_attribute(
+ self.instance,
+ method,
+ True
+ )
+ except AttributeError:
+ pass
+ if func is not None:
+ try:
+ if type(params) is types.ListType:
+ response = func(*params)
+ else:
+ response = func(**params)
+ return response
+ except TypeError:
+ return Fault(-32602, 'Invalid parameters.')
+ except:
+ err_lines = traceback.format_exc().splitlines()
+ trace_string = '%s | %s' % (err_lines[-3], err_lines[-1])
+ fault = jsonrpclib.Fault(-32603, 'Server error: %s' %
+ trace_string)
+ return fault
+ else:
+ return Fault(-32601, 'Method %s not supported.' % method)
+
+class SimpleJSONRPCRequestHandler(
+ SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
+
+ def do_POST(self):
+ if not self.is_rpc_path_valid():
+ self.report_404()
+ return
+ try:
+ max_chunk_size = 10*1024*1024
+ size_remaining = int(self.headers["content-length"])
+ L = []
+ while size_remaining:
+ chunk_size = min(size_remaining, max_chunk_size)
+ L.append(self.rfile.read(chunk_size))
+ size_remaining -= len(L[-1])
+ data = ''.join(L)
+ response = self.server._marshaled_dispatch(data)
+ self.send_response(200)
+ except Exception, e:
+ self.send_response(500)
+ err_lines = traceback.format_exc().splitlines()
+ trace_string = '%s | %s' % (err_lines[-3], err_lines[-1])
+ fault = jsonrpclib.Fault(-32603, 'Server error: %s' % trace_string)
+ response = fault.response()
+ if response == None:
+ response = ''
+ self.send_header("Content-type", "application/json-rpc")
+ self.send_header("Content-length", str(len(response)))
+ self.end_headers()
+ self.wfile.write(response)
+ self.wfile.flush()
+ self.connection.shutdown(1)
+
+class SimpleJSONRPCServer(SocketServer.TCPServer, SimpleJSONRPCDispatcher):
+
+ allow_reuse_address = True
+
+ def __init__(self, addr, requestHandler=SimpleJSONRPCRequestHandler,
+ logRequests=True, encoding=None, bind_and_activate=True,
+ address_family=socket.AF_INET):
+ self.logRequests = logRequests
+ SimpleJSONRPCDispatcher.__init__(self, encoding)
+ # TCPServer.__init__ has an extra parameter on 2.6+, so
+ # check Python version and decide on how to call it
+ vi = sys.version_info
+ self.address_family = address_family
+ if USE_UNIX_SOCKETS and address_family == socket.AF_UNIX:
+ # Unix sockets can't be bound if they already exist in the
+ # filesystem. The convention of e.g. X11 is to unlink
+ # before binding again.
+ if os.path.exists(addr):
+ try:
+ os.unlink(addr)
+ except OSError:
+ logging.warning("Could not unlink socket %s", addr)
+ # if python 2.5 and lower
+ if vi[0] < 3 and vi[1] < 6:
+ SocketServer.TCPServer.__init__(self, addr, requestHandler)
+ else:
+ SocketServer.TCPServer.__init__(self, addr, requestHandler,
+ bind_and_activate)
+ if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
+ flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
+ flags |= fcntl.FD_CLOEXEC
+ fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
+
+class CGIJSONRPCRequestHandler(SimpleJSONRPCDispatcher):
+
+ def __init__(self, encoding=None):
+ SimpleJSONRPCDispatcher.__init__(self, encoding)
+
+ def handle_jsonrpc(self, request_text):
+ response = self._marshaled_dispatch(request_text)
+ print 'Content-Type: application/json-rpc'
+ print 'Content-Length: %d' % len(response)
+ print
+ sys.stdout.write(response)
+
+ handle_xmlrpc = handle_jsonrpc
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/__init__.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/__init__.py
new file mode 100755
index 00000000..6e884b83
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/__init__.py
@@ -0,0 +1,6 @@
+from jsonrpclib.config import Config
+config = Config.instance()
+from jsonrpclib.history import History
+history = History.instance()
+from jsonrpclib.jsonrpc import Server, MultiCall, Fault
+from jsonrpclib.jsonrpc import ProtocolError, loads, dumps
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/config.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/config.py
new file mode 100755
index 00000000..4d28f1b1
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/config.py
@@ -0,0 +1,38 @@
+import sys
+
+class LocalClasses(dict):
+ def add(self, cls):
+ self[cls.__name__] = cls
+
+class Config(object):
+ """
+ This is pretty much used exclusively for the 'jsonclass'
+ functionality... set use_jsonclass to False to turn it off.
+ You can change serialize_method and ignore_attribute, or use
+ the local_classes.add(class) to include "local" classes.
+ """
+ use_jsonclass = True
+ # Change to False to keep __jsonclass__ entries raw.
+ serialize_method = '_serialize'
+ # The serialize_method should be a string that references the
+ # method on a custom class object which is responsible for
+ # returning a tuple of the constructor arguments and a dict of
+ # attributes.
+ ignore_attribute = '_ignore'
+ # The ignore attribute should be a string that references the
+ # attribute on a custom class object which holds strings and / or
+ # references of the attributes the class translator should ignore.
+ classes = LocalClasses()
+ # The list of classes to use for jsonclass translation.
+ version = 2.0
+ # Version of the JSON-RPC spec to support
+ user_agent = 'jsonrpclib/0.1 (Python %s)' % \
+ '.'.join([str(ver) for ver in sys.version_info[0:3]])
+ # User agent to use for calls.
+ _instance = None
+
+ @classmethod
+ def instance(cls):
+ if not cls._instance:
+ cls._instance = cls()
+ return cls._instance
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/history.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/history.py
new file mode 100755
index 00000000..d11863dc
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/history.py
@@ -0,0 +1,40 @@
+class History(object):
+ """
+ This holds all the response and request objects for a
+ session. A server using this should call "clear" after
+ each request cycle in order to keep it from clogging
+ memory.
+ """
+ requests = []
+ responses = []
+ _instance = None
+
+ @classmethod
+ def instance(cls):
+ if not cls._instance:
+ cls._instance = cls()
+ return cls._instance
+
+ def add_response(self, response_obj):
+ self.responses.append(response_obj)
+
+ def add_request(self, request_obj):
+ self.requests.append(request_obj)
+
+ @property
+ def request(self):
+ if len(self.requests) == 0:
+ return None
+ else:
+ return self.requests[-1]
+
+ @property
+ def response(self):
+ if len(self.responses) == 0:
+ return None
+ else:
+ return self.responses[-1]
+
+ def clear(self):
+ del self.requests[:]
+ del self.responses[:]
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/jsonclass.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/jsonclass.py
new file mode 100755
index 00000000..298c3da3
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/jsonclass.py
@@ -0,0 +1,145 @@
+import types
+import inspect
+import re
+import traceback
+
+from jsonrpclib import config
+
+iter_types = [
+ types.DictType,
+ types.ListType,
+ types.TupleType
+]
+
+string_types = [
+ types.StringType,
+ types.UnicodeType
+]
+
+numeric_types = [
+ types.IntType,
+ types.LongType,
+ types.FloatType
+]
+
+value_types = [
+ types.BooleanType,
+ types.NoneType
+]
+
+supported_types = iter_types+string_types+numeric_types+value_types
+invalid_module_chars = r'[^a-zA-Z0-9\_\.]'
+
+class TranslationError(Exception):
+ pass
+
+def dump(obj, serialize_method=None, ignore_attribute=None, ignore=[]):
+ if not serialize_method:
+ serialize_method = config.serialize_method
+ if not ignore_attribute:
+ ignore_attribute = config.ignore_attribute
+ obj_type = type(obj)
+ # Parse / return default "types"...
+ if obj_type in numeric_types+string_types+value_types:
+ return obj
+ if obj_type in iter_types:
+ if obj_type in (types.ListType, types.TupleType):
+ new_obj = []
+ for item in obj:
+ new_obj.append(dump(item, serialize_method,
+ ignore_attribute, ignore))
+ if obj_type is types.TupleType:
+ new_obj = tuple(new_obj)
+ return new_obj
+ # It's a dict...
+ else:
+ new_obj = {}
+ for key, value in obj.iteritems():
+ new_obj[key] = dump(value, serialize_method,
+ ignore_attribute, ignore)
+ return new_obj
+ # It's not a standard type, so it needs __jsonclass__
+ module_name = inspect.getmodule(obj).__name__
+ class_name = obj.__class__.__name__
+ json_class = class_name
+ if module_name not in ['', '__main__']:
+ json_class = '%s.%s' % (module_name, json_class)
+ return_obj = {"__jsonclass__":[json_class,]}
+ # If a serialization method is defined..
+ if serialize_method in dir(obj):
+ # Params can be a dict (keyword) or list (positional)
+ # Attrs MUST be a dict.
+ serialize = getattr(obj, serialize_method)
+ params, attrs = serialize()
+ return_obj['__jsonclass__'].append(params)
+ return_obj.update(attrs)
+ return return_obj
+ # Otherwise, try to figure it out
+ # Obviously, we can't assume to know anything about the
+ # parameters passed to __init__
+ return_obj['__jsonclass__'].append([])
+ attrs = {}
+ ignore_list = getattr(obj, ignore_attribute, [])+ignore
+ for attr_name, attr_value in obj.__dict__.iteritems():
+ if type(attr_value) in supported_types and \
+ attr_name not in ignore_list and \
+ attr_value not in ignore_list:
+ attrs[attr_name] = dump(attr_value, serialize_method,
+ ignore_attribute, ignore)
+ return_obj.update(attrs)
+ return return_obj
+
+def load(obj):
+ if type(obj) in string_types+numeric_types+value_types:
+ return obj
+ if type(obj) is types.ListType:
+ return_list = []
+ for entry in obj:
+ return_list.append(load(entry))
+ return return_list
+ # Othewise, it's a dict type
+ if '__jsonclass__' not in obj.keys():
+ return_dict = {}
+ for key, value in obj.iteritems():
+ new_value = load(value)
+ return_dict[key] = new_value
+ return return_dict
+ # It's a dict, and it's a __jsonclass__
+ orig_module_name = obj['__jsonclass__'][0]
+ params = obj['__jsonclass__'][1]
+ if orig_module_name == '':
+ raise TranslationError('Module name empty.')
+ json_module_clean = re.sub(invalid_module_chars, '', orig_module_name)
+ if json_module_clean != orig_module_name:
+ raise TranslationError('Module name %s has invalid characters.' %
+ orig_module_name)
+ json_module_parts = json_module_clean.split('.')
+ json_class = None
+ if len(json_module_parts) == 1:
+ # Local class name -- probably means it won't work
+ if json_module_parts[0] not in config.classes.keys():
+ raise TranslationError('Unknown class or module %s.' %
+ json_module_parts[0])
+ json_class = config.classes[json_module_parts[0]]
+ else:
+ json_class_name = json_module_parts.pop()
+ json_module_tree = '.'.join(json_module_parts)
+ try:
+ temp_module = __import__(json_module_tree)
+ except ImportError:
+ raise TranslationError('Could not import %s from module %s.' %
+ (json_class_name, json_module_tree))
+ json_class = getattr(temp_module, json_class_name)
+ # Creating the object...
+ new_obj = None
+ if type(params) is types.ListType:
+ new_obj = json_class(*params)
+ elif type(params) is types.DictType:
+ new_obj = json_class(**params)
+ else:
+ raise TranslationError('Constructor args must be a dict or list.')
+ for key, value in obj.iteritems():
+ if key == '__jsonclass__':
+ continue
+ setattr(new_obj, key, value)
+ return new_obj
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/jsonrpc.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/jsonrpc.py
new file mode 100755
index 00000000..e11939ae
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/jsonrpc.py
@@ -0,0 +1,556 @@
+"""
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+============================
+JSONRPC Library (jsonrpclib)
+============================
+
+This library is a JSON-RPC v.2 (proposed) implementation which
+follows the xmlrpclib API for portability between clients. It
+uses the same Server / ServerProxy, loads, dumps, etc. syntax,
+while providing features not present in XML-RPC like:
+
+* Keyword arguments
+* Notifications
+* Versioning
+* Batches and batch notifications
+
+Eventually, I'll add a SimpleXMLRPCServer compatible library,
+and other things to tie the thing off nicely. :)
+
+For a quick-start, just open a console and type the following,
+replacing the server address, method, and parameters
+appropriately.
+>>> import jsonrpclib
+>>> server = jsonrpclib.Server('http://localhost:8181')
+>>> server.add(5, 6)
+11
+>>> server._notify.add(5, 6)
+>>> batch = jsonrpclib.MultiCall(server)
+>>> batch.add(3, 50)
+>>> batch.add(2, 3)
+>>> batch._notify.add(3, 5)
+>>> batch()
+[53, 5]
+
+See http://code.google.com/p/jsonrpclib/ for more info.
+"""
+
+import types
+import sys
+from xmlrpclib import Transport as XMLTransport
+from xmlrpclib import SafeTransport as XMLSafeTransport
+from xmlrpclib import ServerProxy as XMLServerProxy
+from xmlrpclib import _Method as XML_Method
+import time
+import string
+import random
+
+# Library includes
+import jsonrpclib
+from jsonrpclib import config
+from jsonrpclib import history
+
+# JSON library importing
+cjson = None
+json = None
+try:
+ import cjson
+except ImportError:
+ try:
+ import json
+ except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ raise ImportError(
+ 'You must have the cjson, json, or simplejson ' +
+ 'module(s) available.'
+ )
+
+IDCHARS = string.ascii_lowercase+string.digits
+
+class UnixSocketMissing(Exception):
+ """
+ Just a properly named Exception if Unix Sockets usage is
+ attempted on a platform that doesn't support them (Windows)
+ """
+ pass
+
+#JSON Abstractions
+
+def jdumps(obj, encoding='utf-8'):
+ # Do 'serialize' test at some point for other classes
+ global cjson
+ if cjson:
+ return cjson.encode(obj)
+ else:
+ return json.dumps(obj, encoding=encoding)
+
+def jloads(json_string):
+ global cjson
+ if cjson:
+ return cjson.decode(json_string)
+ else:
+ return json.loads(json_string)
+
+
+# XMLRPClib re-implementations
+
+class ProtocolError(Exception):
+ pass
+
+class TransportMixIn(object):
+ """ Just extends the XMLRPC transport where necessary. """
+ user_agent = config.user_agent
+ # for Python 2.7 support
+ _connection = None
+
+ def send_content(self, connection, request_body):
+ connection.putheader("Content-Type", "application/json-rpc")
+ connection.putheader("Content-Length", str(len(request_body)))
+ connection.endheaders()
+ if request_body:
+ connection.send(request_body)
+
+ def getparser(self):
+ target = JSONTarget()
+ return JSONParser(target), target
+
+class JSONParser(object):
+ def __init__(self, target):
+ self.target = target
+
+ def feed(self, data):
+ self.target.feed(data)
+
+ def close(self):
+ pass
+
+class JSONTarget(object):
+ def __init__(self):
+ self.data = []
+
+ def feed(self, data):
+ self.data.append(data)
+
+ def close(self):
+ return ''.join(self.data)
+
+class Transport(TransportMixIn, XMLTransport):
+ pass
+
+class SafeTransport(TransportMixIn, XMLSafeTransport):
+ pass
+from httplib import HTTP, HTTPConnection
+from socket import socket
+
+USE_UNIX_SOCKETS = False
+
+try:
+ from socket import AF_UNIX, SOCK_STREAM
+ USE_UNIX_SOCKETS = True
+except ImportError:
+ pass
+
+if (USE_UNIX_SOCKETS):
+
+ class UnixHTTPConnection(HTTPConnection):
+ def connect(self):
+ self.sock = socket(AF_UNIX, SOCK_STREAM)
+ self.sock.connect(self.host)
+
+ class UnixHTTP(HTTP):
+ _connection_class = UnixHTTPConnection
+
+ class UnixTransport(TransportMixIn, XMLTransport):
+ def make_connection(self, host):
+ import httplib
+ host, extra_headers, x509 = self.get_host_info(host)
+ return UnixHTTP(host)
+
+
+class ServerProxy(XMLServerProxy):
+ """
+ Unfortunately, much more of this class has to be copied since
+ so much of it does the serialization.
+ """
+
+ def __init__(self, uri, transport=None, encoding=None,
+ verbose=0, version=None):
+ import urllib
+ if not version:
+ version = config.version
+ self.__version = version
+ schema, uri = urllib.splittype(uri)
+ if schema not in ('http', 'https', 'unix'):
+ raise IOError('Unsupported JSON-RPC protocol.')
+ if schema == 'unix':
+ if not USE_UNIX_SOCKETS:
+ # Don't like the "generic" Exception...
+ raise UnixSocketMissing("Unix sockets not available.")
+ self.__host = uri
+ self.__handler = '/'
+ else:
+ self.__host, self.__handler = urllib.splithost(uri)
+ if not self.__handler:
+ # Not sure if this is in the JSON spec?
+ #self.__handler = '/'
+ self.__handler == '/'
+ if transport is None:
+ if schema == 'unix':
+ transport = UnixTransport()
+ elif schema == 'https':
+ transport = SafeTransport()
+ else:
+ transport = Transport()
+ self.__transport = transport
+ self.__encoding = encoding
+ self.__verbose = verbose
+
+ def _request(self, methodname, params, rpcid=None):
+ request = dumps(params, methodname, encoding=self.__encoding,
+ rpcid=rpcid, version=self.__version)
+ response = self._run_request(request)
+ check_for_errors(response)
+ return response['result']
+
+ def _request_notify(self, methodname, params, rpcid=None):
+ request = dumps(params, methodname, encoding=self.__encoding,
+ rpcid=rpcid, version=self.__version, notify=True)
+ response = self._run_request(request, notify=True)
+ check_for_errors(response)
+ return
+
+ def _run_request(self, request, notify=None):
+ history.add_request(request)
+
+ response = self.__transport.request(
+ self.__host,
+ self.__handler,
+ request,
+ verbose=self.__verbose
+ )
+
+ # Here, the XMLRPC library translates a single list
+ # response to the single value -- should we do the
+ # same, and require a tuple / list to be passed to
+ # the response object, or expect the Server to be
+ # outputting the response appropriately?
+
+ history.add_response(response)
+ if not response:
+ return None
+ return_obj = loads(response)
+ return return_obj
+
+ def __getattr__(self, name):
+ # Same as original, just with new _Method reference
+ return _Method(self._request, name)
+
+ @property
+ def _notify(self):
+ # Just like __getattr__, but with notify namespace.
+ return _Notify(self._request_notify)
+
+
+class _Method(XML_Method):
+
+ def __call__(self, *args, **kwargs):
+ if len(args) > 0 and len(kwargs) > 0:
+ raise ProtocolError('Cannot use both positional ' +
+ 'and keyword arguments (according to JSON-RPC spec.)')
+ if len(args) > 0:
+ return self.__send(self.__name, args)
+ else:
+ return self.__send(self.__name, kwargs)
+
+ def __getattr__(self, name):
+ self.__name = '%s.%s' % (self.__name, name)
+ return self
+ # The old method returned a new instance, but this seemed wasteful.
+ # The only thing that changes is the name.
+ #return _Method(self.__send, "%s.%s" % (self.__name, name))
+
+class _Notify(object):
+ def __init__(self, request):
+ self._request = request
+
+ def __getattr__(self, name):
+ return _Method(self._request, name)
+
+# Batch implementation
+
+class MultiCallMethod(object):
+
+ def __init__(self, method, notify=False):
+ self.method = method
+ self.params = []
+ self.notify = notify
+
+ def __call__(self, *args, **kwargs):
+ if len(kwargs) > 0 and len(args) > 0:
+ raise ProtocolError('JSON-RPC does not support both ' +
+ 'positional and keyword arguments.')
+ if len(kwargs) > 0:
+ self.params = kwargs
+ else:
+ self.params = args
+
+ def request(self, encoding=None, rpcid=None):
+ return dumps(self.params, self.method, version=2.0,
+ encoding=encoding, rpcid=rpcid, notify=self.notify)
+
+ def __repr__(self):
+ return '%s' % self.request()
+
+ def __getattr__(self, method):
+ new_method = '%s.%s' % (self.method, method)
+ self.method = new_method
+ return self
+
+class MultiCallNotify(object):
+
+ def __init__(self, multicall):
+ self.multicall = multicall
+
+ def __getattr__(self, name):
+ new_job = MultiCallMethod(name, notify=True)
+ self.multicall._job_list.append(new_job)
+ return new_job
+
+class MultiCallIterator(object):
+
+ def __init__(self, results):
+ self.results = results
+
+ def __iter__(self):
+ for i in range(0, len(self.results)):
+ yield self[i]
+ raise StopIteration
+
+ def __getitem__(self, i):
+ item = self.results[i]
+ check_for_errors(item)
+ return item['result']
+
+ def __len__(self):
+ return len(self.results)
+
+class MultiCall(object):
+
+ def __init__(self, server):
+ self._server = server
+ self._job_list = []
+
+ def _request(self):
+ if len(self._job_list) < 1:
+ # Should we alert? This /is/ pretty obvious.
+ return
+ request_body = '[ %s ]' % ','.join([job.request() for
+ job in self._job_list])
+ responses = self._server._run_request(request_body)
+ del self._job_list[:]
+ if not responses:
+ responses = []
+ return MultiCallIterator(responses)
+
+ @property
+ def _notify(self):
+ return MultiCallNotify(self)
+
+ def __getattr__(self, name):
+ new_job = MultiCallMethod(name)
+ self._job_list.append(new_job)
+ return new_job
+
+ __call__ = _request
+
+# These lines conform to xmlrpclib's "compatibility" line.
+# Not really sure if we should include these, but oh well.
+Server = ServerProxy
+
+class Fault(object):
+ # JSON-RPC error class
+ def __init__(self, code=-32000, message='Server error', rpcid=None):
+ self.faultCode = code
+ self.faultString = message
+ self.rpcid = rpcid
+
+ def error(self):
+ return {'code':self.faultCode, 'message':self.faultString}
+
+ def response(self, rpcid=None, version=None):
+ if not version:
+ version = config.version
+ if rpcid:
+ self.rpcid = rpcid
+ return dumps(
+ self, methodresponse=True, rpcid=self.rpcid, version=version
+ )
+
+ def __repr__(self):
+ return '<Fault %s: %s>' % (self.faultCode, self.faultString)
+
+def random_id(length=8):
+ return_id = ''
+ for i in range(length):
+ return_id += random.choice(IDCHARS)
+ return return_id
+
+class Payload(dict):
+ def __init__(self, rpcid=None, version=None):
+ if not version:
+ version = config.version
+ self.id = rpcid
+ self.version = float(version)
+
+ def request(self, method, params=[]):
+ if type(method) not in types.StringTypes:
+ raise ValueError('Method name must be a string.')
+ if not self.id:
+ self.id = random_id()
+ request = { 'id':self.id, 'method':method }
+ if params:
+ request['params'] = params
+ if self.version >= 2:
+ request['jsonrpc'] = str(self.version)
+ return request
+
+ def notify(self, method, params=[]):
+ request = self.request(method, params)
+ if self.version >= 2:
+ del request['id']
+ else:
+ request['id'] = None
+ return request
+
+ def response(self, result=None):
+ response = {'result':result, 'id':self.id}
+ if self.version >= 2:
+ response['jsonrpc'] = str(self.version)
+ else:
+ response['error'] = None
+ return response
+
+ def error(self, code=-32000, message='Server error.'):
+ error = self.response()
+ if self.version >= 2:
+ del error['result']
+ else:
+ error['result'] = None
+ error['error'] = {'code':code, 'message':message}
+ return error
+
+def dumps(params=[], methodname=None, methodresponse=None,
+ encoding=None, rpcid=None, version=None, notify=None):
+ """
+ This differs from the Python implementation in that it implements
+ the rpcid argument since the 2.0 spec requires it for responses.
+ """
+ if not version:
+ version = config.version
+ valid_params = (types.TupleType, types.ListType, types.DictType)
+ if methodname in types.StringTypes and \
+ type(params) not in valid_params and \
+ not isinstance(params, Fault):
+ """
+ If a method, and params are not in a listish or a Fault,
+ error out.
+ """
+ raise TypeError('Params must be a dict, list, tuple or Fault ' +
+ 'instance.')
+ # Begin parsing object
+ payload = Payload(rpcid=rpcid, version=version)
+ if not encoding:
+ encoding = 'utf-8'
+ if type(params) is Fault:
+ response = payload.error(params.faultCode, params.faultString)
+ return jdumps(response, encoding=encoding)
+ if type(methodname) not in types.StringTypes and methodresponse != True:
+ raise ValueError('Method name must be a string, or methodresponse '+
+ 'must be set to True.')
+ if config.use_jsonclass == True:
+ from jsonrpclib import jsonclass
+ params = jsonclass.dump(params)
+ if methodresponse is True:
+ if rpcid is None:
+ raise ValueError('A method response must have an rpcid.')
+ response = payload.response(params)
+ return jdumps(response, encoding=encoding)
+ request = None
+ if notify == True:
+ request = payload.notify(methodname, params)
+ else:
+ request = payload.request(methodname, params)
+ return jdumps(request, encoding=encoding)
+
+def loads(data):
+ """
+ This differs from the Python implementation, in that it returns
+ the request structure in Dict format instead of the method, params.
+ It will return a list in the case of a batch request / response.
+ """
+ if data == '':
+ # notification
+ return None
+ result = jloads(data)
+ # if the above raises an error, the implementing server code
+ # should return something like the following:
+ # { 'jsonrpc':'2.0', 'error': fault.error(), id: None }
+ if config.use_jsonclass == True:
+ from jsonrpclib import jsonclass
+ result = jsonclass.load(result)
+ return result
+
+def check_for_errors(result):
+ if not result:
+ # Notification
+ return result
+ if type(result) is not types.DictType:
+ raise TypeError('Response is not a dict.')
+ if 'jsonrpc' in result.keys() and float(result['jsonrpc']) > 2.0:
+ raise NotImplementedError('JSON-RPC version not yet supported.')
+ if 'result' not in result.keys() and 'error' not in result.keys():
+ raise ValueError('Response does not have a result or error key.')
+ if 'error' in result.keys() and result['error'] != None:
+ code = result['error']['code']
+ message = result['error']['message']
+ raise ProtocolError((code, message))
+ return result
+
+def isbatch(result):
+ if type(result) not in (types.ListType, types.TupleType):
+ return False
+ if len(result) < 1:
+ return False
+ if type(result[0]) is not types.DictType:
+ return False
+ if 'jsonrpc' not in result[0].keys():
+ return False
+ try:
+ version = float(result[0]['jsonrpc'])
+ except ValueError:
+ raise ProtocolError('"jsonrpc" key must be a float(able) value.')
+ if version < 2:
+ return False
+ return True
+
+def isnotification(request):
+ if 'id' not in request.keys():
+ # 2.0 notification
+ return True
+ if request['id'] == None:
+ # 1.0 notification
+ return True
+ return False
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/setup.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/setup.py
new file mode 100755
index 00000000..569b6367
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/setup.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env/python
+"""
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import distutils.core
+
+distutils.core.setup(
+ name = "jsonrpclib",
+ version = "0.1.3",
+ packages = ["jsonrpclib"],
+ author = "Josh Marshall",
+ author_email = "catchjosh@gmail.com",
+ url = "http://github.com/joshmarshall/jsonrpclib/",
+ license = "http://www.apache.org/licenses/LICENSE-2.0",
+ description = "This project is an implementation of the JSON-RPC v2.0 " +
+ "specification (backwards-compatible) as a client library.",
+)
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/LICENSE.txt b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/LICENSE.txt
new file mode 100755
index 00000000..eb0864bd
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/LICENSE.txt
@@ -0,0 +1,11 @@
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/MANIFEST.in b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/MANIFEST.in
new file mode 100755
index 00000000..42f4acf5
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/MANIFEST.in
@@ -0,0 +1,2 @@
+include *.txt
+include README.rst
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/PKG-INFO b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/PKG-INFO
new file mode 100755
index 00000000..9d0f3fca
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/PKG-INFO
@@ -0,0 +1,460 @@
+Metadata-Version: 1.1
+Name: jsonrpclib-pelix
+Version: 0.2.5
+Summary: This project is an implementation of the JSON-RPC v2.0 specification (backwards-compatible) as a client library, for Python 2.6+ and Python 3.This version is a fork of jsonrpclib by Josh Marshall, usable with Pelix remote services.
+Home-page: http://github.com/tcalmant/jsonrpclib/
+Author: Thomas Calmant
+Author-email: thomas.calmant+github@gmail.com
+License: Apache License 2.0
+Description: JSONRPClib (patched for Pelix)
+ ##############################
+
+ .. image:: https://pypip.in/license/jsonrpclib-pelix/badge.svg
+ :target: https://pypi.python.org/pypi/jsonrpclib-pelix/
+
+ .. image:: https://travis-ci.org/tcalmant/jsonrpclib.svg?branch=master
+ :target: https://travis-ci.org/tcalmant/jsonrpclib
+
+ .. image:: https://coveralls.io/repos/tcalmant/jsonrpclib/badge.svg?branch=master
+ :target: https://coveralls.io/r/tcalmant/jsonrpclib?branch=master
+
+
+ This library is an implementation of the JSON-RPC specification.
+ It supports both the original 1.0 specification, as well as the
+ new (proposed) 2.0 specification, which includes batch submission, keyword
+ arguments, etc.
+
+ It is licensed under the Apache License, Version 2.0
+ (http://www.apache.org/licenses/LICENSE-2.0.html).
+
+
+ About this version
+ ******************
+
+ This is a patched version of the original ``jsonrpclib`` project by
+ Josh Marshall, available at https://github.com/joshmarshall/jsonrpclib.
+
+ The suffix *-pelix* only indicates that this version works with Pelix Remote
+ Services, but it is **not** a Pelix specific implementation.
+
+ * This version adds support for Python 3, staying compatible with Python 2.
+ * It is now possible to use the dispatch_method argument while extending
+ the SimpleJSONRPCDispatcher, to use a custom dispatcher.
+ This allows to use this package by Pelix Remote Services.
+ * It can use thread pools to control the number of threads spawned to handle
+ notification requests and clients connections.
+ * The modifications added in other forks of this project have been added:
+
+ * From https://github.com/drdaeman/jsonrpclib:
+
+ * Improved JSON-RPC 1.0 support
+ * Less strict error response handling
+
+ * From https://github.com/tuomassalo/jsonrpclib:
+
+ * In case of a non-pre-defined error, raise an AppError and give access to
+ *error.data*
+
+ * From https://github.com/dejw/jsonrpclib:
+
+ * Custom headers can be sent with request and associated tests
+
+ * The support for Unix sockets has been removed, as it is not trivial to convert
+ to Python 3 (and I don't use them)
+ * This version cannot be installed with the original ``jsonrpclib``, as it uses
+ the same package name.
+
+
+ Summary
+ *******
+
+ This library implements the JSON-RPC 2.0 proposed specification in pure Python.
+ It is designed to be as compatible with the syntax of ``xmlrpclib`` as possible
+ (it extends where possible), so that projects using ``xmlrpclib`` could easily
+ be modified to use JSON and experiment with the differences.
+
+ It is backwards-compatible with the 1.0 specification, and supports all of the
+ new proposed features of 2.0, including:
+
+ * Batch submission (via MultiCall)
+ * Keyword arguments
+ * Notifications (both in a batch and 'normal')
+ * Class translation using the ``__jsonclass__`` key.
+
+ I've added a "SimpleJSONRPCServer", which is intended to emulate the
+ "SimpleXMLRPCServer" from the default Python distribution.
+
+
+ Requirements
+ ************
+
+ It supports ``cjson`` and ``simplejson``, and looks for the parsers in that
+ order (searching first for ``cjson``, then for the *built-in* ``json`` in 2.6+,
+ and then the ``simplejson`` external library).
+ One of these must be installed to use this library, although if you have a
+ standard distribution of 2.6+, you should already have one.
+ Keep in mind that ``cjson`` is supposed to be the quickest, I believe, so if
+ you are going for full-on optimization you may want to pick it up.
+
+ Since library uses ``contextlib`` module, you should have at least Python 2.5
+ installed.
+
+
+ Installation
+ ************
+
+ You can install this from PyPI with one of the following commands (sudo
+ may be required):
+
+ .. code-block:: console
+
+ easy_install jsonrpclib-pelix
+ pip install jsonrpclib-pelix
+
+ Alternatively, you can download the source from the GitHub repository
+ at http://github.com/tcalmant/jsonrpclib and manually install it
+ with the following commands:
+
+ .. code-block:: console
+
+ git clone git://github.com/tcalmant/jsonrpclib.git
+ cd jsonrpclib
+ python setup.py install
+
+
+ SimpleJSONRPCServer
+ *******************
+
+ This is identical in usage (or should be) to the SimpleXMLRPCServer in the
+ Python standard library. Some of the differences in features are that it
+ obviously supports notification, batch calls, class translation (if left on),
+ etc.
+ Note: The import line is slightly different from the regular SimpleXMLRPCServer,
+ since the SimpleJSONRPCServer is distributed within the ``jsonrpclib`` library.
+
+ .. code-block:: python
+
+ from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
+
+ server = SimpleJSONRPCServer(('localhost', 8080))
+ server.register_function(pow)
+ server.register_function(lambda x,y: x+y, 'add')
+ server.register_function(lambda x: x, 'ping')
+ server.serve_forever()
+
+ To start protect the server with SSL, use the following snippet:
+
+ .. code-block:: python
+
+ from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
+
+ # Setup the SSL socket
+ server = SimpleJSONRPCServer(('localhost', 8080), bind_and_activate=False)
+ server.socket = ssl.wrap_socket(server.socket, certfile='server.pem',
+ server_side=True)
+ server.server_bind()
+ server.server_activate()
+
+ # ... register functions
+ # Start the server
+ server.serve_forever()
+
+
+ Notification Thread Pool
+ ========================
+
+ By default, notification calls are handled in the request handling thread.
+ It is possible to use a thread pool to handle them, by giving it to the server
+ using the ``set_notification_pool()`` method:
+
+ .. code-block:: python
+
+ from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
+ from jsonrpclib.threadpool import ThreadPool
+
+ # Setup the thread pool: between 0 and 10 threads
+ pool = ThreadPool(max_threads=10, min_threads=0)
+
+ # Don't forget to start it
+ pool.start()
+
+ # Setup the server
+ server = SimpleJSONRPCServer(('localhost', 8080), config)
+ server.set_notification_pool(pool)
+
+ # Register methods
+ server.register_function(pow)
+ server.register_function(lambda x,y: x+y, 'add')
+ server.register_function(lambda x: x, 'ping')
+
+ try:
+ server.serve_forever()
+ finally:
+ # Stop the thread pool (let threads finish their current task)
+ pool.stop()
+ server.set_notification_pool(None)
+
+
+ Threaded server
+ ===============
+
+ It is also possible to use a thread pool to handle clients requests, using the
+ ``PooledJSONRPCServer`` class.
+ By default, this class uses pool of 0 to 30 threads. A custom pool can be given
+ with the ``thread_pool`` parameter of the class constructor.
+
+ The notification pool and the request pool are different: by default, a server
+ with a request pool doesn't have a notification pool.
+
+ .. code-block:: python
+
+ from jsonrpclib.SimpleJSONRPCServer import PooledJSONRPCServer
+ from jsonrpclib.threadpool import ThreadPool
+
+ # Setup the notification and request pools
+ nofif_pool = ThreadPool(max_threads=10, min_threads=0)
+ request_pool = ThreadPool(max_threads=50, min_threads=10)
+
+ # Don't forget to start them
+ nofif_pool.start()
+ request_pool.start()
+
+ # Setup the server
+ server = PooledJSONRPCServer(('localhost', 8080), config,
+ thread_pool=request_pool)
+ server.set_notification_pool(nofif_pool)
+
+ # Register methods
+ server.register_function(pow)
+ server.register_function(lambda x,y: x+y, 'add')
+ server.register_function(lambda x: x, 'ping')
+
+ try:
+ server.serve_forever()
+ finally:
+ # Stop the thread pools (let threads finish their current task)
+ request_pool.stop()
+ nofif_pool.stop()
+ server.set_notification_pool(None)
+
+ Client Usage
+ ************
+
+ This is (obviously) taken from a console session.
+
+ .. code-block:: python
+
+ >>> import jsonrpclib
+ >>> server = jsonrpclib.ServerProxy('http://localhost:8080')
+ >>> server.add(5,6)
+ 11
+ >>> server.add(x=5, y=10)
+ 15
+ >>> server._notify.add(5,6)
+ # No result returned...
+ >>> batch = jsonrpclib.MultiCall(server)
+ >>> batch.add(5, 6)
+ >>> batch.ping({'key':'value'})
+ >>> batch._notify.add(4, 30)
+ >>> results = batch()
+ >>> for result in results:
+ >>> ... print(result)
+ 11
+ {'key': 'value'}
+ # Note that there are only two responses -- this is according to spec.
+
+ # Clean up
+ >>> server('close')()
+
+ # Using client history
+ >>> history = jsonrpclib.history.History()
+ >>> server = jsonrpclib.ServerProxy('http://localhost:8080', history=history)
+ >>> server.add(5,6)
+ 11
+ >>> print(history.request)
+ {"id": "f682b956-c8e1-4506-9db4-29fe8bc9fcaa", "jsonrpc": "2.0",
+ "method": "add", "params": [5, 6]}
+ >>> print(history.response)
+ {"id": "f682b956-c8e1-4506-9db4-29fe8bc9fcaa", "jsonrpc": "2.0",
+ "result": 11}
+
+ # Clean up
+ >>> server('close')()
+
+ If you need 1.0 functionality, there are a bunch of places you can pass that in,
+ although the best is just to give a specific configuration to
+ ``jsonrpclib.ServerProxy``:
+
+ .. code-block:: python
+
+ >>> import jsonrpclib
+ >>> jsonrpclib.config.DEFAULT.version
+ 2.0
+ >>> config = jsonrpclib.config.Config(version=1.0)
+ >>> history = jsonrpclib.history.History()
+ >>> server = jsonrpclib.ServerProxy('http://localhost:8080', config=config,
+ history=history)
+ >>> server.add(7, 10)
+ 17
+ >>> print(history.request)
+ {"id": "827b2923-5b37-49a5-8b36-e73920a16d32",
+ "method": "add", "params": [7, 10]}
+ >>> print(history.response)
+ {"id": "827b2923-5b37-49a5-8b36-e73920a16d32", "error": null, "result": 17}
+ >>> server('close')()
+
+ The equivalent ``loads`` and ``dumps`` functions also exist, although with minor
+ modifications. The ``dumps`` arguments are almost identical, but it adds three
+ arguments: ``rpcid`` for the 'id' key, ``version`` to specify the JSON-RPC
+ compatibility, and ``notify`` if it's a request that you want to be a
+ notification.
+
+ Additionally, the ``loads`` method does not return the params and method like
+ ``xmlrpclib``, but instead a.) parses for errors, raising ProtocolErrors, and
+ b.) returns the entire structure of the request / response for manual parsing.
+
+
+ Additional headers
+ ******************
+
+ If your remote service requires custom headers in request, you can pass them
+ as as a ``headers`` keyword argument, when creating the ``ServerProxy``:
+
+ .. code-block:: python
+
+ >>> import jsonrpclib
+ >>> server = jsonrpclib.ServerProxy("http://localhost:8080",
+ headers={'X-Test' : 'Test'})
+
+ You can also put additional request headers only for certain method invocation:
+
+ .. code-block:: python
+
+ >>> import jsonrpclib
+ >>> server = jsonrpclib.Server("http://localhost:8080")
+ >>> with server._additional_headers({'X-Test' : 'Test'}) as test_server:
+ ... test_server.ping(42)
+ ...
+ >>> # X-Test header will be no longer sent in requests
+
+ Of course ``_additional_headers`` contexts can be nested as well.
+
+
+ Class Translation
+ *****************
+
+ I've recently added "automatic" class translation support, although it is
+ turned off by default. This can be devastatingly slow if improperly used, so
+ the following is just a short list of things to keep in mind when using it.
+
+ * Keep It (the object) Simple Stupid. (for exceptions, keep reading.)
+ * Do not require init params (for exceptions, keep reading)
+ * Getter properties without setters could be dangerous (read: not tested)
+
+ If any of the above are issues, use the _serialize method. (see usage below)
+ The server and client must BOTH have use_jsonclass configuration item on and
+ they must both have access to the same libraries used by the objects for
+ this to work.
+
+ If you have excessively nested arguments, it would be better to turn off the
+ translation and manually invoke it on specific objects using
+ ``jsonrpclib.jsonclass.dump`` / ``jsonrpclib.jsonclass.load`` (since the default
+ behavior recursively goes through attributes and lists / dicts / tuples).
+
+ Sample file: *test_obj.py*
+
+ .. code-block:: python
+
+ # This object is /very/ simple, and the system will look through the
+ # attributes and serialize what it can.
+ class TestObj(object):
+ foo = 'bar'
+
+ # This object requires __init__ params, so it uses the _serialize method
+ # and returns a tuple of init params and attribute values (the init params
+ # can be a dict or a list, but the attribute values must be a dict.)
+ class TestSerial(object):
+ foo = 'bar'
+ def __init__(self, *args):
+ self.args = args
+ def _serialize(self):
+ return (self.args, {'foo':self.foo,})
+
+ * Sample usage
+
+ .. code-block:: python
+
+ >>> import jsonrpclib
+ >>> import test_obj
+
+ # History is used only to print the serialized form of beans
+ >>> history = jsonrpclib.history.History()
+ >>> testobj1 = test_obj.TestObj()
+ >>> testobj2 = test_obj.TestSerial()
+ >>> server = jsonrpclib.Server('http://localhost:8080', history=history)
+
+ # The 'ping' just returns whatever is sent
+ >>> ping1 = server.ping(testobj1)
+ >>> ping2 = server.ping(testobj2)
+
+ >>> print(history.request)
+ {"id": "7805f1f9-9abd-49c6-81dc-dbd47229fe13", "jsonrpc": "2.0",
+ "method": "ping", "params": [{"__jsonclass__":
+ ["test_obj.TestSerial", []], "foo": "bar"}
+ ]}
+ >>> print(history.response)
+ {"id": "7805f1f9-9abd-49c6-81dc-dbd47229fe13", "jsonrpc": "2.0",
+ "result": {"__jsonclass__": ["test_obj.TestSerial", []], "foo": "bar"}}
+
+ This behavior is turned by default. To deactivate it, just set the
+ ``use_jsonclass`` member of a server ``Config`` to False.
+ If you want to use a per-class serialization method, set its name in the
+ ``serialize_method`` member of a server ``Config``.
+ Finally, if you are using classes that you have defined in the implementation
+ (as in, not a separate library), you'll need to add those (on BOTH the server
+ and the client) using the ``config.classes.add()`` method.
+
+ Feedback on this "feature" is very, VERY much appreciated.
+
+ Why JSON-RPC?
+ *************
+
+ In my opinion, there are several reasons to choose JSON over XML for RPC:
+
+ * Much simpler to read (I suppose this is opinion, but I know I'm right. :)
+ * Size / Bandwidth - Main reason, a JSON object representation is just much smaller.
+ * Parsing - JSON should be much quicker to parse than XML.
+ * Easy class passing with ``jsonclass`` (when enabled)
+
+ In the interest of being fair, there are also a few reasons to choose XML
+ over JSON:
+
+ * Your server doesn't do JSON (rather obvious)
+ * Wider XML-RPC support across APIs (can we change this? :))
+ * Libraries are more established, i.e. more stable (Let's change this too.)
+
+ Tests
+ *****
+
+ Tests are an almost-verbatim drop from the JSON-RPC specification 2.0 page.
+ They can be run using *unittest* or *nosetest*:
+
+ .. code-block:: console
+
+ python -m unittest discover tests
+ python3 -m unittest discover tests
+ nosetests tests
+
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.0
+Classifier: Programming Language :: Python :: 3.1
+Classifier: Programming Language :: Python :: 3.2
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/README.rst b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/README.rst
new file mode 100755
index 00000000..29da2708
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/README.rst
@@ -0,0 +1,438 @@
+JSONRPClib (patched for Pelix)
+##############################
+
+.. image:: https://pypip.in/license/jsonrpclib-pelix/badge.svg
+ :target: https://pypi.python.org/pypi/jsonrpclib-pelix/
+
+.. image:: https://travis-ci.org/tcalmant/jsonrpclib.svg?branch=master
+ :target: https://travis-ci.org/tcalmant/jsonrpclib
+
+.. image:: https://coveralls.io/repos/tcalmant/jsonrpclib/badge.svg?branch=master
+ :target: https://coveralls.io/r/tcalmant/jsonrpclib?branch=master
+
+
+This library is an implementation of the JSON-RPC specification.
+It supports both the original 1.0 specification, as well as the
+new (proposed) 2.0 specification, which includes batch submission, keyword
+arguments, etc.
+
+It is licensed under the Apache License, Version 2.0
+(http://www.apache.org/licenses/LICENSE-2.0.html).
+
+
+About this version
+******************
+
+This is a patched version of the original ``jsonrpclib`` project by
+Josh Marshall, available at https://github.com/joshmarshall/jsonrpclib.
+
+The suffix *-pelix* only indicates that this version works with Pelix Remote
+Services, but it is **not** a Pelix specific implementation.
+
+* This version adds support for Python 3, staying compatible with Python 2.
+* It is now possible to use the dispatch_method argument while extending
+ the SimpleJSONRPCDispatcher, to use a custom dispatcher.
+ This allows to use this package by Pelix Remote Services.
+* It can use thread pools to control the number of threads spawned to handle
+ notification requests and clients connections.
+* The modifications added in other forks of this project have been added:
+
+ * From https://github.com/drdaeman/jsonrpclib:
+
+ * Improved JSON-RPC 1.0 support
+ * Less strict error response handling
+
+ * From https://github.com/tuomassalo/jsonrpclib:
+
+ * In case of a non-pre-defined error, raise an AppError and give access to
+ *error.data*
+
+ * From https://github.com/dejw/jsonrpclib:
+
+ * Custom headers can be sent with request and associated tests
+
+* The support for Unix sockets has been removed, as it is not trivial to convert
+ to Python 3 (and I don't use them)
+* This version cannot be installed with the original ``jsonrpclib``, as it uses
+ the same package name.
+
+
+Summary
+*******
+
+This library implements the JSON-RPC 2.0 proposed specification in pure Python.
+It is designed to be as compatible with the syntax of ``xmlrpclib`` as possible
+(it extends where possible), so that projects using ``xmlrpclib`` could easily
+be modified to use JSON and experiment with the differences.
+
+It is backwards-compatible with the 1.0 specification, and supports all of the
+new proposed features of 2.0, including:
+
+* Batch submission (via MultiCall)
+* Keyword arguments
+* Notifications (both in a batch and 'normal')
+* Class translation using the ``__jsonclass__`` key.
+
+I've added a "SimpleJSONRPCServer", which is intended to emulate the
+"SimpleXMLRPCServer" from the default Python distribution.
+
+
+Requirements
+************
+
+It supports ``cjson`` and ``simplejson``, and looks for the parsers in that
+order (searching first for ``cjson``, then for the *built-in* ``json`` in 2.6+,
+and then the ``simplejson`` external library).
+One of these must be installed to use this library, although if you have a
+standard distribution of 2.6+, you should already have one.
+Keep in mind that ``cjson`` is supposed to be the quickest, I believe, so if
+you are going for full-on optimization you may want to pick it up.
+
+Since library uses ``contextlib`` module, you should have at least Python 2.5
+installed.
+
+
+Installation
+************
+
+You can install this from PyPI with one of the following commands (sudo
+may be required):
+
+.. code-block:: console
+
+ easy_install jsonrpclib-pelix
+ pip install jsonrpclib-pelix
+
+Alternatively, you can download the source from the GitHub repository
+at http://github.com/tcalmant/jsonrpclib and manually install it
+with the following commands:
+
+.. code-block:: console
+
+ git clone git://github.com/tcalmant/jsonrpclib.git
+ cd jsonrpclib
+ python setup.py install
+
+
+SimpleJSONRPCServer
+*******************
+
+This is identical in usage (or should be) to the SimpleXMLRPCServer in the
+Python standard library. Some of the differences in features are that it
+obviously supports notification, batch calls, class translation (if left on),
+etc.
+Note: The import line is slightly different from the regular SimpleXMLRPCServer,
+since the SimpleJSONRPCServer is distributed within the ``jsonrpclib`` library.
+
+.. code-block:: python
+
+ from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
+
+ server = SimpleJSONRPCServer(('localhost', 8080))
+ server.register_function(pow)
+ server.register_function(lambda x,y: x+y, 'add')
+ server.register_function(lambda x: x, 'ping')
+ server.serve_forever()
+
+To start protect the server with SSL, use the following snippet:
+
+.. code-block:: python
+
+ from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
+
+ # Setup the SSL socket
+ server = SimpleJSONRPCServer(('localhost', 8080), bind_and_activate=False)
+ server.socket = ssl.wrap_socket(server.socket, certfile='server.pem',
+ server_side=True)
+ server.server_bind()
+ server.server_activate()
+
+ # ... register functions
+ # Start the server
+ server.serve_forever()
+
+
+Notification Thread Pool
+========================
+
+By default, notification calls are handled in the request handling thread.
+It is possible to use a thread pool to handle them, by giving it to the server
+using the ``set_notification_pool()`` method:
+
+.. code-block:: python
+
+ from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
+ from jsonrpclib.threadpool import ThreadPool
+
+ # Setup the thread pool: between 0 and 10 threads
+ pool = ThreadPool(max_threads=10, min_threads=0)
+
+ # Don't forget to start it
+ pool.start()
+
+ # Setup the server
+ server = SimpleJSONRPCServer(('localhost', 8080), config)
+ server.set_notification_pool(pool)
+
+ # Register methods
+ server.register_function(pow)
+ server.register_function(lambda x,y: x+y, 'add')
+ server.register_function(lambda x: x, 'ping')
+
+ try:
+ server.serve_forever()
+ finally:
+ # Stop the thread pool (let threads finish their current task)
+ pool.stop()
+ server.set_notification_pool(None)
+
+
+Threaded server
+===============
+
+It is also possible to use a thread pool to handle clients requests, using the
+``PooledJSONRPCServer`` class.
+By default, this class uses pool of 0 to 30 threads. A custom pool can be given
+with the ``thread_pool`` parameter of the class constructor.
+
+The notification pool and the request pool are different: by default, a server
+with a request pool doesn't have a notification pool.
+
+.. code-block:: python
+
+ from jsonrpclib.SimpleJSONRPCServer import PooledJSONRPCServer
+ from jsonrpclib.threadpool import ThreadPool
+
+ # Setup the notification and request pools
+ nofif_pool = ThreadPool(max_threads=10, min_threads=0)
+ request_pool = ThreadPool(max_threads=50, min_threads=10)
+
+ # Don't forget to start them
+ nofif_pool.start()
+ request_pool.start()
+
+ # Setup the server
+ server = PooledJSONRPCServer(('localhost', 8080), config,
+ thread_pool=request_pool)
+ server.set_notification_pool(nofif_pool)
+
+ # Register methods
+ server.register_function(pow)
+ server.register_function(lambda x,y: x+y, 'add')
+ server.register_function(lambda x: x, 'ping')
+
+ try:
+ server.serve_forever()
+ finally:
+ # Stop the thread pools (let threads finish their current task)
+ request_pool.stop()
+ nofif_pool.stop()
+ server.set_notification_pool(None)
+
+Client Usage
+************
+
+This is (obviously) taken from a console session.
+
+.. code-block:: python
+
+ >>> import jsonrpclib
+ >>> server = jsonrpclib.ServerProxy('http://localhost:8080')
+ >>> server.add(5,6)
+ 11
+ >>> server.add(x=5, y=10)
+ 15
+ >>> server._notify.add(5,6)
+ # No result returned...
+ >>> batch = jsonrpclib.MultiCall(server)
+ >>> batch.add(5, 6)
+ >>> batch.ping({'key':'value'})
+ >>> batch._notify.add(4, 30)
+ >>> results = batch()
+ >>> for result in results:
+ >>> ... print(result)
+ 11
+ {'key': 'value'}
+ # Note that there are only two responses -- this is according to spec.
+
+ # Clean up
+ >>> server('close')()
+
+ # Using client history
+ >>> history = jsonrpclib.history.History()
+ >>> server = jsonrpclib.ServerProxy('http://localhost:8080', history=history)
+ >>> server.add(5,6)
+ 11
+ >>> print(history.request)
+ {"id": "f682b956-c8e1-4506-9db4-29fe8bc9fcaa", "jsonrpc": "2.0",
+ "method": "add", "params": [5, 6]}
+ >>> print(history.response)
+ {"id": "f682b956-c8e1-4506-9db4-29fe8bc9fcaa", "jsonrpc": "2.0",
+ "result": 11}
+
+ # Clean up
+ >>> server('close')()
+
+If you need 1.0 functionality, there are a bunch of places you can pass that in,
+although the best is just to give a specific configuration to
+``jsonrpclib.ServerProxy``:
+
+.. code-block:: python
+
+ >>> import jsonrpclib
+ >>> jsonrpclib.config.DEFAULT.version
+ 2.0
+ >>> config = jsonrpclib.config.Config(version=1.0)
+ >>> history = jsonrpclib.history.History()
+ >>> server = jsonrpclib.ServerProxy('http://localhost:8080', config=config,
+ history=history)
+ >>> server.add(7, 10)
+ 17
+ >>> print(history.request)
+ {"id": "827b2923-5b37-49a5-8b36-e73920a16d32",
+ "method": "add", "params": [7, 10]}
+ >>> print(history.response)
+ {"id": "827b2923-5b37-49a5-8b36-e73920a16d32", "error": null, "result": 17}
+ >>> server('close')()
+
+The equivalent ``loads`` and ``dumps`` functions also exist, although with minor
+modifications. The ``dumps`` arguments are almost identical, but it adds three
+arguments: ``rpcid`` for the 'id' key, ``version`` to specify the JSON-RPC
+compatibility, and ``notify`` if it's a request that you want to be a
+notification.
+
+Additionally, the ``loads`` method does not return the params and method like
+``xmlrpclib``, but instead a.) parses for errors, raising ProtocolErrors, and
+b.) returns the entire structure of the request / response for manual parsing.
+
+
+Additional headers
+******************
+
+If your remote service requires custom headers in request, you can pass them
+as as a ``headers`` keyword argument, when creating the ``ServerProxy``:
+
+.. code-block:: python
+
+ >>> import jsonrpclib
+ >>> server = jsonrpclib.ServerProxy("http://localhost:8080",
+ headers={'X-Test' : 'Test'})
+
+You can also put additional request headers only for certain method invocation:
+
+.. code-block:: python
+
+ >>> import jsonrpclib
+ >>> server = jsonrpclib.Server("http://localhost:8080")
+ >>> with server._additional_headers({'X-Test' : 'Test'}) as test_server:
+ ... test_server.ping(42)
+ ...
+ >>> # X-Test header will be no longer sent in requests
+
+Of course ``_additional_headers`` contexts can be nested as well.
+
+
+Class Translation
+*****************
+
+I've recently added "automatic" class translation support, although it is
+turned off by default. This can be devastatingly slow if improperly used, so
+the following is just a short list of things to keep in mind when using it.
+
+* Keep It (the object) Simple Stupid. (for exceptions, keep reading.)
+* Do not require init params (for exceptions, keep reading)
+* Getter properties without setters could be dangerous (read: not tested)
+
+If any of the above are issues, use the _serialize method. (see usage below)
+The server and client must BOTH have use_jsonclass configuration item on and
+they must both have access to the same libraries used by the objects for
+this to work.
+
+If you have excessively nested arguments, it would be better to turn off the
+translation and manually invoke it on specific objects using
+``jsonrpclib.jsonclass.dump`` / ``jsonrpclib.jsonclass.load`` (since the default
+behavior recursively goes through attributes and lists / dicts / tuples).
+
+ Sample file: *test_obj.py*
+
+.. code-block:: python
+
+ # This object is /very/ simple, and the system will look through the
+ # attributes and serialize what it can.
+ class TestObj(object):
+ foo = 'bar'
+
+ # This object requires __init__ params, so it uses the _serialize method
+ # and returns a tuple of init params and attribute values (the init params
+ # can be a dict or a list, but the attribute values must be a dict.)
+ class TestSerial(object):
+ foo = 'bar'
+ def __init__(self, *args):
+ self.args = args
+ def _serialize(self):
+ return (self.args, {'foo':self.foo,})
+
+* Sample usage
+
+.. code-block:: python
+
+ >>> import jsonrpclib
+ >>> import test_obj
+
+ # History is used only to print the serialized form of beans
+ >>> history = jsonrpclib.history.History()
+ >>> testobj1 = test_obj.TestObj()
+ >>> testobj2 = test_obj.TestSerial()
+ >>> server = jsonrpclib.Server('http://localhost:8080', history=history)
+
+ # The 'ping' just returns whatever is sent
+ >>> ping1 = server.ping(testobj1)
+ >>> ping2 = server.ping(testobj2)
+
+ >>> print(history.request)
+ {"id": "7805f1f9-9abd-49c6-81dc-dbd47229fe13", "jsonrpc": "2.0",
+ "method": "ping", "params": [{"__jsonclass__":
+ ["test_obj.TestSerial", []], "foo": "bar"}
+ ]}
+ >>> print(history.response)
+ {"id": "7805f1f9-9abd-49c6-81dc-dbd47229fe13", "jsonrpc": "2.0",
+ "result": {"__jsonclass__": ["test_obj.TestSerial", []], "foo": "bar"}}
+
+This behavior is turned by default. To deactivate it, just set the
+``use_jsonclass`` member of a server ``Config`` to False.
+If you want to use a per-class serialization method, set its name in the
+``serialize_method`` member of a server ``Config``.
+Finally, if you are using classes that you have defined in the implementation
+(as in, not a separate library), you'll need to add those (on BOTH the server
+and the client) using the ``config.classes.add()`` method.
+
+Feedback on this "feature" is very, VERY much appreciated.
+
+Why JSON-RPC?
+*************
+
+In my opinion, there are several reasons to choose JSON over XML for RPC:
+
+* Much simpler to read (I suppose this is opinion, but I know I'm right. :)
+* Size / Bandwidth - Main reason, a JSON object representation is just much smaller.
+* Parsing - JSON should be much quicker to parse than XML.
+* Easy class passing with ``jsonclass`` (when enabled)
+
+In the interest of being fair, there are also a few reasons to choose XML
+over JSON:
+
+* Your server doesn't do JSON (rather obvious)
+* Wider XML-RPC support across APIs (can we change this? :))
+* Libraries are more established, i.e. more stable (Let's change this too.)
+
+Tests
+*****
+
+Tests are an almost-verbatim drop from the JSON-RPC specification 2.0 page.
+They can be run using *unittest* or *nosetest*:
+
+.. code-block:: console
+
+ python -m unittest discover tests
+ python3 -m unittest discover tests
+ nosetests tests
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/SimpleJSONRPCServer.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/SimpleJSONRPCServer.py
new file mode 100755
index 00000000..f7a7b652
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/SimpleJSONRPCServer.py
@@ -0,0 +1,602 @@
+#!/usr/bin/python
+# -- Content-Encoding: UTF-8 --
+"""
+Defines a request dispatcher, a HTTP request handler, a HTTP server and a
+CGI request handler.
+
+:authors: Josh Marshall, Thomas Calmant
+:copyright: Copyright 2015, isandlaTech
+:license: Apache License 2.0
+:version: 0.2.5
+
+..
+
+ Copyright 2015 isandlaTech
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+# Module version
+__version_info__ = (0, 2, 5)
+__version__ = ".".join(str(x) for x in __version_info__)
+
+# Documentation strings format
+__docformat__ = "restructuredtext en"
+
+# ------------------------------------------------------------------------------
+# Local modules
+from jsonrpclib import Fault
+import jsonrpclib.config
+import jsonrpclib.utils as utils
+import jsonrpclib.threadpool
+
+# Standard library
+import logging
+import socket
+import sys
+import traceback
+
+# Prepare the logger
+_logger = logging.getLogger(__name__)
+
+try:
+ # Python 3
+ # pylint: disable=F0401,E0611
+ import xmlrpc.server as xmlrpcserver
+ import socketserver
+except (ImportError, AttributeError):
+ # Python 2 or IronPython
+ # pylint: disable=F0401,E0611
+ import SimpleXMLRPCServer as xmlrpcserver
+ import SocketServer as socketserver
+
+try:
+ # Windows
+ import fcntl
+except ImportError:
+ # Other systems
+ # pylint: disable=C0103
+ fcntl = None
+
+# ------------------------------------------------------------------------------
+
+
+def get_version(request):
+ """
+ Computes the JSON-RPC version
+
+ :param request: A request dictionary
+ :return: The JSON-RPC version or None
+ """
+ if 'jsonrpc' in request:
+ return 2.0
+ elif 'id' in request:
+ return 1.0
+
+ return None
+
+
+def validate_request(request, json_config):
+ """
+ Validates the format of a request dictionary
+
+ :param request: A request dictionary
+ :param json_config: A JSONRPClib Config instance
+ :return: True if the dictionary is valid, else a Fault object
+ """
+ if not isinstance(request, utils.DictType):
+ # Invalid request type
+ fault = Fault(-32600, 'Request must be a dict, not {0}'
+ .format(type(request).__name__),
+ config=json_config)
+ _logger.warning("Invalid request content: %s", fault)
+ return fault
+
+ # Get the request ID
+ rpcid = request.get('id', None)
+
+ # Check request version
+ version = get_version(request)
+ if not version:
+ fault = Fault(-32600, 'Request {0} invalid.'.format(request),
+ rpcid=rpcid, config=json_config)
+ _logger.warning("No version in request: %s", fault)
+ return fault
+
+ # Default parameters: empty list
+ request.setdefault('params', [])
+
+ # Check parameters
+ method = request.get('method', None)
+ params = request.get('params')
+ param_types = (utils.ListType, utils.DictType, utils.TupleType)
+
+ if not method or not isinstance(method, utils.string_types) or \
+ not isinstance(params, param_types):
+ # Invalid type of method name or parameters
+ fault = Fault(-32600, 'Invalid request parameters or method.',
+ rpcid=rpcid, config=json_config)
+ _logger.warning("Invalid request content: %s", fault)
+ return fault
+
+ # Valid request
+ return True
+
+# ------------------------------------------------------------------------------
+
+
+class NoMulticallResult(Exception):
+ """
+ No result in multicall
+ """
+ pass
+
+
+class SimpleJSONRPCDispatcher(xmlrpcserver.SimpleXMLRPCDispatcher, object):
+ """
+ Mix-in class that dispatches JSON-RPC requests.
+
+ This class is used to register JSON-RPC method handlers
+ and then to dispatch them. This class doesn't need to be
+ instanced directly when used by SimpleJSONRPCServer.
+ """
+ def __init__(self, encoding=None, config=jsonrpclib.config.DEFAULT):
+ """
+ Sets up the dispatcher with the given encoding.
+ None values are allowed.
+ """
+ xmlrpcserver.SimpleXMLRPCDispatcher.__init__(
+ self, allow_none=True, encoding=encoding or "UTF-8")
+ self.json_config = config
+
+ # Notification thread pool
+ self.__notification_pool = None
+
+ def set_notification_pool(self, thread_pool):
+ """
+ Sets the thread pool to use to handle notifications
+ """
+ self.__notification_pool = thread_pool
+
+ def _unmarshaled_dispatch(self, request, dispatch_method=None):
+ """
+ Loads the request dictionary (unmarshaled), calls the method(s)
+ accordingly and returns a JSON-RPC dictionary (not marshaled)
+
+ :param request: JSON-RPC request dictionary (or list of)
+ :param dispatch_method: Custom dispatch method (for method resolution)
+ :return: A JSON-RPC dictionary (or an array of) or None if the request
+ was a notification
+ :raise NoMulticallResult: No result in batch
+ """
+ if not request:
+ # Invalid request dictionary
+ fault = Fault(-32600, 'Request invalid -- no request data.',
+ config=self.json_config)
+ _logger.warning("Invalid request: %s", fault)
+ return fault.dump()
+
+ if isinstance(request, utils.ListType):
+ # This SHOULD be a batch, by spec
+ responses = []
+ for req_entry in request:
+ # Validate the request
+ result = validate_request(req_entry, self.json_config)
+ if isinstance(result, Fault):
+ responses.append(result.dump())
+ continue
+
+ # Call the method
+ resp_entry = self._marshaled_single_dispatch(req_entry,
+ dispatch_method)
+
+ # Store its result
+ if isinstance(resp_entry, Fault):
+ # pylint: disable=E1103
+ responses.append(resp_entry.dump())
+ elif resp_entry is not None:
+ responses.append(resp_entry)
+
+ if not responses:
+ # No non-None result
+ _logger.error("No result in Multicall")
+ raise NoMulticallResult("No result")
+
+ return responses
+
+ else:
+ # Single call
+ result = validate_request(request, self.json_config)
+ if isinstance(result, Fault):
+ return result.dump()
+
+ # Call the method
+ response = self._marshaled_single_dispatch(request,
+ dispatch_method)
+ if isinstance(response, Fault):
+ # pylint: disable=E1103
+ return response.dump()
+
+ return response
+
+ def _marshaled_dispatch(self, data, dispatch_method=None, path=None):
+ """
+ Parses the request data (marshaled), calls method(s) and returns a
+ JSON string (marshaled)
+
+ :param data: A JSON request string
+ :param dispatch_method: Custom dispatch method (for method resolution)
+ :param path: Unused parameter, to keep compatibility with xmlrpclib
+ :return: A JSON-RPC response string (marshaled)
+ """
+ # Parse the request
+ try:
+ request = jsonrpclib.loads(data, self.json_config)
+ except Exception as ex:
+ # Parsing/loading error
+ fault = Fault(-32700, 'Request {0} invalid. ({1}:{2})'
+ .format(data, type(ex).__name__, ex),
+ config=self.json_config)
+ _logger.warning("Error parsing request: %s", fault)
+ return fault.response()
+
+ # Get the response dictionary
+ try:
+ response = self._unmarshaled_dispatch(request, dispatch_method)
+ if response is not None:
+ # Compute the string representation of the dictionary/list
+ return jsonrpclib.jdumps(response, self.encoding)
+ else:
+ # No result (notification)
+ return ''
+ except NoMulticallResult:
+ # Return an empty string (jsonrpclib internal behaviour)
+ return ''
+
+ def _marshaled_single_dispatch(self, request, dispatch_method=None):
+ """
+ Dispatches a single method call
+
+ :param request: A validated request dictionary
+ :param dispatch_method: Custom dispatch method (for method resolution)
+ :return: A JSON-RPC response dictionary, or None if it was a
+ notification request
+ """
+ method = request.get('method')
+ params = request.get('params')
+
+ # Prepare a request-specific configuration
+ if 'jsonrpc' not in request and self.json_config.version >= 2:
+ # JSON-RPC 1.0 request on a JSON-RPC 2.0
+ # => compatibility needed
+ config = self.json_config.copy()
+ config.version = 1.0
+ else:
+ # Keep server configuration as is
+ config = self.json_config
+
+ # Test if this is a notification request
+ is_notification = 'id' not in request or request['id'] in (None, '')
+ if is_notification and self.__notification_pool is not None:
+ # Use the thread pool for notifications
+ if dispatch_method is not None:
+ self.__notification_pool.enqueue(dispatch_method,
+ method, params)
+ else:
+ self.__notification_pool.enqueue(self._dispatch,
+ method, params, config)
+
+ # Return immediately
+ return None
+ else:
+ # Synchronous call
+ try:
+ # Call the method
+ if dispatch_method is not None:
+ response = dispatch_method(method, params)
+ else:
+ response = self._dispatch(method, params, config)
+ except Exception as ex:
+ # Return a fault
+ fault = Fault(-32603, '{0}:{1}'.format(type(ex).__name__, ex),
+ config=config)
+ _logger.error("Error calling method %s: %s", method, fault)
+ return fault.dump()
+
+ if is_notification:
+ # It's a notification, no result needed
+ # Do not use 'not id' as it might be the integer 0
+ return None
+
+ # Prepare a JSON-RPC dictionary
+ try:
+ return jsonrpclib.dump(response, rpcid=request['id'],
+ is_response=True, config=config)
+ except Exception as ex:
+ # JSON conversion exception
+ fault = Fault(-32603, '{0}:{1}'.format(type(ex).__name__, ex),
+ config=config)
+ _logger.error("Error preparing JSON-RPC result: %s", fault)
+ return fault.dump()
+
+ def _dispatch(self, method, params, config=None):
+ """
+ Default method resolver and caller
+
+ :param method: Name of the method to call
+ :param params: List of arguments to give to the method
+ :param config: Request-specific configuration
+ :return: The result of the method
+ """
+ config = config or self.json_config
+
+ func = None
+ try:
+ # Look into registered methods
+ func = self.funcs[method]
+ except KeyError:
+ if self.instance is not None:
+ # Try with the registered instance
+ try:
+ # Instance has a custom dispatcher
+ return getattr(self.instance, '_dispatch')(method, params)
+ except AttributeError:
+ # Resolve the method name in the instance
+ try:
+ func = xmlrpcserver.resolve_dotted_attribute(
+ self.instance, method, True)
+ except AttributeError:
+ # Unknown method
+ pass
+
+ if func is not None:
+ try:
+ # Call the method
+ if isinstance(params, utils.ListType):
+ return func(*params)
+ else:
+ return func(**params)
+ except TypeError as ex:
+ # Maybe the parameters are wrong
+ fault = Fault(-32602, 'Invalid parameters: {0}'.format(ex),
+ config=config)
+ _logger.warning("Invalid call parameters: %s", fault)
+ return fault
+ except:
+ # Method exception
+ err_lines = traceback.format_exc().splitlines()
+ trace_string = '{0} | {1}'.format(err_lines[-3], err_lines[-1])
+ fault = Fault(-32603, 'Server error: {0}'.format(trace_string),
+ config=config)
+ _logger.exception("Server-side exception: %s", fault)
+ return fault
+ else:
+ # Unknown method
+ fault = Fault(-32601, 'Method {0} not supported.'.format(method),
+ config=config)
+ _logger.warning("Unknown method: %s", fault)
+ return fault
+
+# ------------------------------------------------------------------------------
+
+
+class SimpleJSONRPCRequestHandler(xmlrpcserver.SimpleXMLRPCRequestHandler):
+ """
+ HTTP request handler.
+
+ The server that receives the requests must have a json_config member,
+ containing a JSONRPClib Config instance
+ """
+ def do_POST(self):
+ """
+ Handles POST requests
+ """
+ if not self.is_rpc_path_valid():
+ self.report_404()
+ return
+
+ # Retrieve the configuration
+ config = getattr(self.server, 'json_config', jsonrpclib.config.DEFAULT)
+
+ try:
+ # Read the request body
+ max_chunk_size = 10 * 1024 * 1024
+ size_remaining = int(self.headers["content-length"])
+ chunks = []
+ while size_remaining:
+ chunk_size = min(size_remaining, max_chunk_size)
+ raw_chunk = self.rfile.read(chunk_size)
+ if not raw_chunk:
+ break
+ chunks.append(utils.from_bytes(raw_chunk))
+ size_remaining -= len(chunks[-1])
+ data = ''.join(chunks)
+
+ try:
+ # Decode content
+ data = self.decode_request_content(data)
+ if data is None:
+ # Unknown encoding, response has been sent
+ return
+ except AttributeError:
+ # Available since Python 2.7
+ pass
+
+ # Execute the method
+ response = self.server._marshaled_dispatch(
+ data, getattr(self, '_dispatch', None), self.path)
+
+ # No exception: send a 200 OK
+ self.send_response(200)
+ except:
+ # Exception: send 500 Server Error
+ self.send_response(500)
+ err_lines = traceback.format_exc().splitlines()
+ trace_string = '{0} | {1}'.format(err_lines[-3], err_lines[-1])
+ fault = jsonrpclib.Fault(-32603, 'Server error: {0}'
+ .format(trace_string), config=config)
+ _logger.exception("Server-side error: %s", fault)
+ response = fault.response()
+
+ if response is None:
+ # Avoid to send None
+ response = ''
+
+ # Convert the response to the valid string format
+ response = utils.to_bytes(response)
+
+ # Send it
+ self.send_header("Content-type", config.content_type)
+ self.send_header("Content-length", str(len(response)))
+ self.end_headers()
+ if response:
+ self.wfile.write(response)
+
+# ------------------------------------------------------------------------------
+
+
+class SimpleJSONRPCServer(socketserver.TCPServer, SimpleJSONRPCDispatcher):
+ """
+ JSON-RPC server (and dispatcher)
+ """
+ # This simplifies server restart after error
+ allow_reuse_address = True
+
+ # pylint: disable=C0103
+ def __init__(self, addr, requestHandler=SimpleJSONRPCRequestHandler,
+ logRequests=True, encoding=None, bind_and_activate=True,
+ address_family=socket.AF_INET,
+ config=jsonrpclib.config.DEFAULT):
+ """
+ Sets up the server and the dispatcher
+
+ :param addr: The server listening address
+ :param requestHandler: Custom request handler
+ :param logRequests: Flag to(de)activate requests logging
+ :param encoding: The dispatcher request encoding
+ :param bind_and_activate: If True, starts the server immediately
+ :param address_family: The server listening address family
+ :param config: A JSONRPClib Config instance
+ """
+ # Set up the dispatcher fields
+ SimpleJSONRPCDispatcher.__init__(self, encoding, config)
+
+ # Prepare the server configuration
+ # logRequests is used by SimpleXMLRPCRequestHandler
+ self.logRequests = logRequests
+ self.address_family = address_family
+ self.json_config = config
+
+ # Work on the request handler
+ class RequestHandlerWrapper(requestHandler, object):
+ """
+ Wraps the request handle to have access to the configuration
+ """
+ def __init__(self, *args, **kwargs):
+ """
+ Constructs the wrapper after having stored the configuration
+ """
+ self.config = config
+ super(RequestHandlerWrapper, self).__init__(*args, **kwargs)
+
+ # Set up the server
+ socketserver.TCPServer.__init__(self, addr, requestHandler,
+ bind_and_activate)
+
+ # Windows-specific
+ if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
+ flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
+ flags |= fcntl.FD_CLOEXEC
+ fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
+
+# ------------------------------------------------------------------------------
+
+
+class PooledJSONRPCServer(SimpleJSONRPCServer, socketserver.ThreadingMixIn):
+ """
+ JSON-RPC server based on a thread pool
+ """
+ def __init__(self, addr, requestHandler=SimpleJSONRPCRequestHandler,
+ logRequests=True, encoding=None, bind_and_activate=True,
+ address_family=socket.AF_INET,
+ config=jsonrpclib.config.DEFAULT, thread_pool=None):
+ """
+ Sets up the server and the dispatcher
+
+ :param addr: The server listening address
+ :param requestHandler: Custom request handler
+ :param logRequests: Flag to(de)activate requests logging
+ :param encoding: The dispatcher request encoding
+ :param bind_and_activate: If True, starts the server immediately
+ :param address_family: The server listening address family
+ :param config: A JSONRPClib Config instance
+ :param thread_pool: A ThreadPool object. The pool must be started.
+ """
+ # Normalize the thread pool
+ if thread_pool is None:
+ # Start a thread pool with 30 threads max, 0 thread min
+ thread_pool = jsonrpclib.threadpool.ThreadPool(
+ 30, 0, logname="PooledJSONRPCServer")
+ thread_pool.start()
+
+ # Store the thread pool
+ self.__request_pool = thread_pool
+
+ # Prepare the server
+ SimpleJSONRPCServer.__init__(self, addr, requestHandler, logRequests,
+ encoding, bind_and_activate,
+ address_family, config)
+
+ def process_request(self, request, client_address):
+ """
+ Handle a client request: queue it in the thread pool
+ """
+ self.__request_pool.enqueue(self.process_request_thread,
+ request, client_address)
+
+ def server_close(self):
+ """
+ Clean up the server
+ """
+ SimpleJSONRPCServer.server_close(self)
+ self.__request_pool.stop()
+
+# ------------------------------------------------------------------------------
+
+
+class CGIJSONRPCRequestHandler(SimpleJSONRPCDispatcher):
+ """
+ JSON-RPC CGI handler (and dispatcher)
+ """
+ def __init__(self, encoding=None, config=jsonrpclib.config.DEFAULT):
+ """
+ Sets up the dispatcher
+
+ :param encoding: Dispatcher encoding
+ :param config: A JSONRPClib Config instance
+ """
+ SimpleJSONRPCDispatcher.__init__(self, encoding, config)
+
+ def handle_jsonrpc(self, request_text):
+ """
+ Handle a JSON-RPC request
+ """
+ response = self._marshaled_dispatch(request_text)
+ sys.stdout.write('Content-Type: {0}\r\n'
+ .format(self.json_config.content_type))
+ sys.stdout.write('Content-Length: {0:d}\r\n'.format(len(response)))
+ sys.stdout.write('\r\n')
+ sys.stdout.write(response)
+
+ # XML-RPC alias
+ handle_xmlrpc = handle_jsonrpc
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/__init__.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/__init__.py
new file mode 100755
index 00000000..2c7dc1c5
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/__init__.py
@@ -0,0 +1,34 @@
+#!/usr/bin/python
+# -- Content-Encoding: UTF-8 --
+"""
+Aliases to ease access to jsonrpclib classes
+
+:authors: Josh Marshall, Thomas Calmant
+:copyright: Copyright 2015, isandlaTech
+:license: Apache License 2.0
+:version: 0.2.5
+
+..
+
+ Copyright 2015 isandlaTech
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+# Easy access to utility methods and classes
+from jsonrpclib.jsonrpc import Server, ServerProxy
+from jsonrpclib.jsonrpc import MultiCall, Fault, ProtocolError, AppError
+from jsonrpclib.jsonrpc import loads, dumps, load, dump
+from jsonrpclib.jsonrpc import jloads, jdumps
+import jsonrpclib.history as history
+import jsonrpclib.utils as utils
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/config.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/config.py
new file mode 100755
index 00000000..d2c5a811
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/config.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+# -- Content-Encoding: UTF-8 --
+"""
+The configuration module.
+
+:copyright: Copyright 2015, isandlaTech
+:license: Apache License 2.0
+:version: 0.2.5
+
+..
+
+ Copyright 2015 isandlaTech
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+# Module version
+__version_info__ = (0, 2, 5)
+__version__ = ".".join(str(x) for x in __version_info__)
+
+# Documentation strings format
+__docformat__ = "restructuredtext en"
+
+# ------------------------------------------------------------------------------
+
+import sys
+
+# ------------------------------------------------------------------------------
+
+
+class LocalClasses(dict):
+ """
+ Associates local classes with their names (used in the jsonclass module)
+ """
+ def add(self, cls, name=None):
+ """
+ Stores a local class
+
+ :param cls: A class
+ :param name: Custom name used in the __jsonclass__ attribute
+ """
+ if not name:
+ name = cls.__name__
+ self[name] = cls
+
+# ------------------------------------------------------------------------------
+
+
+class Config(object):
+ """
+ This is pretty much used exclusively for the 'jsonclass'
+ functionality... set use_jsonclass to False to turn it off.
+ You can change serialize_method and ignore_attribute, or use
+ the local_classes.add(class) to include "local" classes.
+ """
+ def __init__(self, version=2.0, content_type="application/json-rpc",
+ user_agent=None, use_jsonclass=True,
+ serialize_method='_serialize',
+ ignore_attribute='_ignore',
+ serialize_handlers=None):
+ """
+ Sets up a configuration of JSONRPClib
+
+ :param version: JSON-RPC specification version
+ :param content_type: HTTP content type header value
+ :param user_agent: The HTTP request user agent
+ :param use_jsonclass: Allow bean marshalling
+ :param serialize_method: A string that references the method on a
+ custom class object which is responsible for
+ returning a tuple of the arguments and a dict
+ of attributes.
+ :param ignore_attribute: A string that references the attribute on a
+ custom class object which holds strings and/or
+ references of the attributes the class
+ translator should ignore.
+ :param serialize_handlers: A dictionary of dump handler functions by
+ type for additional type support and for
+ overriding dump of built-in types in utils
+ """
+ # JSON-RPC specification
+ self.version = version
+
+ # Change to False to keep __jsonclass__ entries raw.
+ self.use_jsonclass = use_jsonclass
+
+ # it SHOULD be 'application/json-rpc'
+ # but MAY be 'application/json' or 'application/jsonrequest'
+ self.content_type = content_type
+
+ # Default user agent
+ if user_agent is None:
+ user_agent = 'jsonrpclib/{0} (Python {1})'.format(
+ __version__, '.'.join(str(ver)
+ for ver in sys.version_info[0:3]))
+ self.user_agent = user_agent
+
+ # The list of classes to use for jsonclass translation.
+ self.classes = LocalClasses()
+
+ # The serialize_method should be a string that references the
+ # method on a custom class object which is responsible for
+ # returning a tuple of the constructor arguments and a dict of
+ # attributes.
+ self.serialize_method = serialize_method
+
+ # The ignore attribute should be a string that references the
+ # attribute on a custom class object which holds strings and / or
+ # references of the attributes the class translator should ignore.
+ self.ignore_attribute = ignore_attribute
+
+ # The list of serialize handler functions for jsonclass dump.
+ # Used for handling additional types and overriding built-in types.
+ # Functions are expected to have the same parameters as jsonclass dump
+ # (possibility to call standard jsonclass dump function within).
+ self.serialize_handlers = serialize_handlers or {}
+
+ def copy(self):
+ """
+ Returns a shallow copy of this configuration bean
+
+ :return: A shallow copy of this configuration
+ """
+ new_config = Config(self.version, self.content_type, self.user_agent,
+ self.use_jsonclass, self.serialize_method,
+ self.ignore_attribute, None)
+ new_config.classes = self.classes.copy()
+ new_config.serialize_handlers = self.serialize_handlers.copy()
+ return new_config
+
+# Default configuration
+DEFAULT = Config()
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/history.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/history.py
new file mode 100755
index 00000000..7062ab66
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/history.py
@@ -0,0 +1,95 @@
+#!/usr/bin/python
+# -- Content-Encoding: UTF-8 --
+"""
+The history module.
+
+:authors: Josh Marshall, Thomas Calmant
+:copyright: Copyright 2015, isandlaTech
+:license: Apache License 2.0
+:version: 0.2.5
+
+..
+
+ Copyright 2015 isandlaTech
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+# Module version
+__version_info__ = (0, 2, 5)
+__version__ = ".".join(str(x) for x in __version_info__)
+
+# Documentation strings format
+__docformat__ = "restructuredtext en"
+
+# ------------------------------------------------------------------------------
+
+
+class History(object):
+ """
+ This holds all the response and request objects for a
+ session. A server using this should call "clear" after
+ each request cycle in order to keep it from clogging
+ memory.
+ """
+ def __init__(self):
+ """
+ Sets up members
+ """
+ self.requests = []
+ self.responses = []
+
+ def add_response(self, response_obj):
+ """
+ Adds a response to the history
+
+ :param response_obj: Response content
+ """
+ self.responses.append(response_obj)
+
+ def add_request(self, request_obj):
+ """
+ Adds a request to the history
+
+ :param request_obj: A request object
+ """
+ self.requests.append(request_obj)
+
+ @property
+ def request(self):
+ """
+ Returns the latest stored request or None
+ """
+ try:
+ return self.requests[-1]
+
+ except IndexError:
+ return None
+
+ @property
+ def response(self):
+ """
+ Returns the latest stored response or None
+ """
+ try:
+ return self.responses[-1]
+
+ except IndexError:
+ return None
+
+ def clear(self):
+ """
+ Clears the history lists
+ """
+ del self.requests[:]
+ del self.responses[:]
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/jsonclass.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/jsonclass.py
new file mode 100755
index 00000000..c7cc4c35
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/jsonclass.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# -- Content-Encoding: UTF-8 --
+"""
+The serialization module
+
+:authors: Josh Marshall, Thomas Calmant
+:copyright: Copyright 2015, isandlaTech
+:license: Apache License 2.0
+:version: 0.2.5
+
+..
+
+ Copyright 2015 isandlaTech
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+# Module version
+__version_info__ = (0, 2, 5)
+__version__ = ".".join(str(x) for x in __version_info__)
+
+# Documentation strings format
+__docformat__ = "restructuredtext en"
+
+# ------------------------------------------------------------------------------
+
+# Local package
+import jsonrpclib.config
+import jsonrpclib.utils as utils
+
+# Standard library
+import inspect
+import re
+
+# ------------------------------------------------------------------------------
+
+# Supported transmitted code
+SUPPORTED_TYPES = (utils.DictType,) + utils.iterable_types \
+ + utils.primitive_types
+
+# Regex of invalid module characters
+INVALID_MODULE_CHARS = r'[^a-zA-Z0-9\_\.]'
+
+# ------------------------------------------------------------------------------
+
+
+class TranslationError(Exception):
+ """
+ Unmarshaling exception
+ """
+ pass
+
+
+def _slots_finder(clazz, fields_set):
+ """
+ Recursively visits the class hierarchy to find all slots
+
+ :param clazz: Class to analyze
+ :param fields_set: Set where to store __slots___ content
+ """
+ # ... class level
+ try:
+ fields_set.update(clazz.__slots__)
+ except AttributeError:
+ pass
+
+ # ... parent classes level
+ for base_class in clazz.__bases__:
+ _slots_finder(base_class, fields_set)
+
+
+def _find_fields(obj):
+ """
+ Returns the names of the fields of the given object
+
+ :param obj: An object to analyze
+ :return: A set of field names
+ """
+ # Find fields...
+ fields = set()
+
+ # ... using __dict__
+ try:
+ fields.update(obj.__dict__)
+ except AttributeError:
+ pass
+
+ # ... using __slots__
+ _slots_finder(obj.__class__, fields)
+ return fields
+
+
+def dump(obj, serialize_method=None, ignore_attribute=None, ignore=None,
+ config=jsonrpclib.config.DEFAULT):
+ """
+ Transforms the given object into a JSON-RPC compliant form.
+ Converts beans into dictionaries with a __jsonclass__ entry.
+ Doesn't change primitive types.
+
+ :param obj: An object to convert
+ :param serialize_method: Custom serialization method
+ :param ignore_attribute: Name of the object attribute containing the names
+ of members to ignore
+ :param ignore: A list of members to ignore
+ :param config: A JSONRPClib Config instance
+ :return: A JSON-RPC compliant object
+ """
+ # Normalize arguments
+ serialize_method = serialize_method or config.serialize_method
+ ignore_attribute = ignore_attribute or config.ignore_attribute
+ ignore = ignore or []
+
+ # Parse / return default "types"...
+ # Apply additional types, override built-in types
+ # (reminder: config.serialize_handlers is a dict)
+ try:
+ serializer = config.serialize_handlers[type(obj)]
+ except KeyError:
+ # Not a serializer
+ pass
+ else:
+ if serializer is not None:
+ return serializer(obj, serialize_method, ignore_attribute,
+ ignore, config)
+
+ # Primitive
+ if isinstance(obj, utils.primitive_types):
+ return obj
+
+ # Iterative
+ elif isinstance(obj, utils.iterable_types):
+ # List, set or tuple
+ return [dump(item, serialize_method, ignore_attribute, ignore, config)
+ for item in obj]
+
+ elif isinstance(obj, utils.DictType):
+ # Dictionary
+ return dict((key, dump(value, serialize_method,
+ ignore_attribute, ignore, config))
+ for key, value in obj.items())
+
+ # It's not a standard type, so it needs __jsonclass__
+ module_name = inspect.getmodule(type(obj)).__name__
+ json_class = obj.__class__.__name__
+
+ if module_name not in ('', '__main__'):
+ json_class = '{0}.{1}'.format(module_name, json_class)
+
+ # Keep the class name in the returned object
+ return_obj = {"__jsonclass__": [json_class]}
+
+ # If a serialization method is defined..
+ if hasattr(obj, serialize_method):
+ # Params can be a dict (keyword) or list (positional)
+ # Attrs MUST be a dict.
+ serialize = getattr(obj, serialize_method)
+ params, attrs = serialize()
+ return_obj['__jsonclass__'].append(params)
+ return_obj.update(attrs)
+ return return_obj
+
+ else:
+ # Otherwise, try to figure it out
+ # Obviously, we can't assume to know anything about the
+ # parameters passed to __init__
+ return_obj['__jsonclass__'].append([])
+
+ # Prepare filtering lists
+ known_types = SUPPORTED_TYPES + tuple(config.serialize_handlers)
+ ignore_list = getattr(obj, ignore_attribute, []) + ignore
+
+ # Find fields and filter them by name
+ fields = _find_fields(obj)
+ fields.difference_update(ignore_list)
+
+ # Dump field values
+ attrs = {}
+ for attr_name in fields:
+ attr_value = getattr(obj, attr_name)
+ if isinstance(attr_value, known_types) and \
+ attr_value not in ignore_list:
+ attrs[attr_name] = dump(attr_value, serialize_method,
+ ignore_attribute, ignore, config)
+ return_obj.update(attrs)
+ return return_obj
+
+# ------------------------------------------------------------------------------
+
+
+def load(obj, classes=None):
+ """
+ If 'obj' is a dictionary containing a __jsonclass__ entry, converts the
+ dictionary item into a bean of this class.
+
+ :param obj: An object from a JSON-RPC dictionary
+ :param classes: A custom {name: class} dictionary
+ :return: The loaded object
+ """
+ # Primitive
+ if isinstance(obj, utils.primitive_types):
+ return obj
+
+ # List, set or tuple
+ elif isinstance(obj, utils.iterable_types):
+ # This comes from a JSON parser, so it can only be a list...
+ return [load(entry) for entry in obj]
+
+ # Otherwise, it's a dict type
+ elif '__jsonclass__' not in obj:
+ return dict((key, load(value)) for key, value in obj.items())
+
+ # It's a dictionary, and it has a __jsonclass__
+ orig_module_name = obj['__jsonclass__'][0]
+ params = obj['__jsonclass__'][1]
+
+ # Validate the module name
+ if not orig_module_name:
+ raise TranslationError('Module name empty.')
+
+ json_module_clean = re.sub(INVALID_MODULE_CHARS, '', orig_module_name)
+ if json_module_clean != orig_module_name:
+ raise TranslationError('Module name {0} has invalid characters.'
+ .format(orig_module_name))
+
+ # Load the class
+ json_module_parts = json_module_clean.split('.')
+ json_class = None
+ if classes and len(json_module_parts) == 1:
+ # Local class name -- probably means it won't work
+ try:
+ json_class = classes[json_module_parts[0]]
+ except KeyError:
+ raise TranslationError('Unknown class or module {0}.'
+ .format(json_module_parts[0]))
+
+ else:
+ # Module + class
+ json_class_name = json_module_parts.pop()
+ json_module_tree = '.'.join(json_module_parts)
+ try:
+ # Use fromlist to load the module itself, not the package
+ temp_module = __import__(json_module_tree,
+ fromlist=[json_class_name])
+ except ImportError:
+ raise TranslationError('Could not import {0} from module {1}.'
+ .format(json_class_name, json_module_tree))
+
+ try:
+ json_class = getattr(temp_module, json_class_name)
+ except AttributeError:
+ raise TranslationError("Unknown class {0}.{1}."
+ .format(json_module_tree, json_class_name))
+
+ # Create the object
+ new_obj = None
+ if isinstance(params, utils.ListType):
+ try:
+ new_obj = json_class(*params)
+ except TypeError as ex:
+ raise TranslationError("Error instantiating {0}: {1}"
+ .format(json_class.__name__, ex))
+
+ elif isinstance(params, utils.DictType):
+ try:
+ new_obj = json_class(**params)
+ except TypeError as ex:
+ raise TranslationError("Error instantiating {0}: {1}"
+ .format(json_class.__name__, ex))
+
+ else:
+ raise TranslationError("Constructor args must be a dict or a list, "
+ "not {0}".format(type(params).__name__))
+
+ # Remove the class information, as it must be ignored during the
+ # reconstruction of the object
+ raw_jsonclass = obj.pop('__jsonclass__')
+
+ for key, value in obj.items():
+ # Recursive loading
+ setattr(new_obj, key, load(value, classes))
+
+ # Restore the class information for further usage
+ obj['__jsonclass__'] = raw_jsonclass
+
+ return new_obj
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/jsonrpc.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/jsonrpc.py
new file mode 100755
index 00000000..8ee902b0
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/jsonrpc.py
@@ -0,0 +1,1192 @@
+#!/usr/bin/python
+# -- Content-Encoding: UTF-8 --
+"""
+============================
+JSONRPC Library (jsonrpclib)
+============================
+
+This library is a JSON-RPC v.2 (proposed) implementation which
+follows the xmlrpclib API for portability between clients. It
+uses the same Server / ServerProxy, loads, dumps, etc. syntax,
+while providing features not present in XML-RPC like:
+
+* Keyword arguments
+* Notifications
+* Versioning
+* Batches and batch notifications
+
+Eventually, I'll add a SimpleXMLRPCServer compatible library,
+and other things to tie the thing off nicely. :)
+
+For a quick-start, just open a console and type the following,
+replacing the server address, method, and parameters
+appropriately.
+>>> import jsonrpclib
+>>> server = jsonrpclib.Server('http://localhost:8181')
+>>> server.add(5, 6)
+11
+>>> server._notify.add(5, 6)
+>>> batch = jsonrpclib.MultiCall(server)
+>>> batch.add(3, 50)
+>>> batch.add(2, 3)
+>>> batch._notify.add(3, 5)
+>>> batch()
+[53, 5]
+
+See https://github.com/tcalmant/jsonrpclib for more info.
+
+:authors: Josh Marshall, Thomas Calmant
+:copyright: Copyright 2015, isandlaTech
+:license: Apache License 2.0
+:version: 0.2.5
+
+..
+
+ Copyright 2015 isandlaTech
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+# Module version
+__version_info__ = (0, 2, 5)
+__version__ = ".".join(str(x) for x in __version_info__)
+
+# Documentation strings format
+__docformat__ = "restructuredtext en"
+
+# ------------------------------------------------------------------------------
+
+# Library includes
+import jsonrpclib.config
+import jsonrpclib.utils as utils
+
+# Standard library
+import contextlib
+import logging
+import sys
+import uuid
+
+# Create the logger
+_logger = logging.getLogger(__name__)
+
+try:
+ # Python 3
+ # pylint: disable=F0401,E0611
+ from urllib.parse import splittype
+ from urllib.parse import splithost
+ from xmlrpc.client import Transport as XMLTransport
+ from xmlrpc.client import SafeTransport as XMLSafeTransport
+ from xmlrpc.client import ServerProxy as XMLServerProxy
+ from xmlrpc.client import _Method as XML_Method
+
+except ImportError:
+ # Python 2
+ # pylint: disable=F0401,E0611
+ from urllib import splittype
+ from urllib import splithost
+ from xmlrpclib import Transport as XMLTransport
+ from xmlrpclib import SafeTransport as XMLSafeTransport
+ from xmlrpclib import ServerProxy as XMLServerProxy
+ from xmlrpclib import _Method as XML_Method
+
+# ------------------------------------------------------------------------------
+# JSON library import
+
+# JSON class serialization
+from jsonrpclib import jsonclass
+
+try:
+ # pylint: disable=F0401,E0611
+ # Using cjson
+ import cjson
+ _logger.debug("Using cjson as JSON library")
+
+ # Declare cjson methods
+ def jdumps(obj, encoding='utf-8'):
+ """
+ Serializes ``obj`` to a JSON formatted string, using cjson.
+ """
+ return cjson.encode(obj)
+
+ def jloads(json_string):
+ """
+ Deserializes ``json_string`` (a string containing a JSON document)
+ to a Python object, using cjson.
+ """
+ return cjson.decode(json_string)
+
+except ImportError:
+ # pylint: disable=F0401,E0611
+ # Use json or simplejson
+ try:
+ import json
+ _logger.debug("Using json as JSON library")
+
+ except ImportError:
+ try:
+ import simplejson as json
+ _logger.debug("Using simplejson as JSON library")
+ except ImportError:
+ _logger.error("No supported JSON library found")
+ raise ImportError('You must have the cjson, json, or simplejson '
+ 'module(s) available.')
+
+ # Declare json methods
+ if sys.version_info[0] < 3:
+ def jdumps(obj, encoding='utf-8'):
+ """
+ Serializes ``obj`` to a JSON formatted string.
+ """
+ # Python 2 (explicit encoding)
+ return json.dumps(obj, encoding=encoding)
+
+ else:
+ # Python 3
+ def jdumps(obj, encoding='utf-8'):
+ """
+ Serializes ``obj`` to a JSON formatted string.
+ """
+ # Python 3 (the encoding parameter has been removed)
+ return json.dumps(obj)
+
+ def jloads(json_string):
+ """
+ Deserializes ``json_string`` (a string containing a JSON document)
+ to a Python object.
+ """
+ return json.loads(json_string)
+
+# ------------------------------------------------------------------------------
+# XMLRPClib re-implementations
+
+
+class ProtocolError(Exception):
+ """
+ JSON-RPC error
+
+ ProtocolError.args[0] can be:
+ * an error message (string)
+ * a (code, message) tuple
+ """
+ pass
+
+
+class AppError(ProtocolError):
+ """
+ Application error: the error code is not in the pre-defined ones
+
+ AppError.args[0][0]: Error code
+ AppError.args[0][1]: Error message or trace
+ AppError.args[0][2]: Associated data
+ """
+ def data(self):
+ """
+ Retrieves the value found in the 'data' entry of the error, or None
+
+ :return: The data associated to the error, or None
+ """
+ return self.args[0][2]
+
+
+class JSONParser(object):
+ """
+ Default JSON parser
+ """
+ def __init__(self, target):
+ """
+ Associates the target loader to the parser
+
+ :param target: a JSONTarget instance
+ """
+ self.target = target
+
+ def feed(self, data):
+ """
+ Feeds the associated target with the given data
+ """
+ self.target.feed(data)
+
+ def close(self):
+ """
+ Does nothing
+ """
+ pass
+
+
+class JSONTarget(object):
+ """
+ Unmarshalls stream data to a string
+ """
+ def __init__(self):
+ """
+ Sets up the unmarshaller
+ """
+ self.data = []
+
+ def feed(self, data):
+ """
+ Stores the given raw data into a buffer
+ """
+ # Store raw data as it might not contain whole wide-character
+ self.data.append(data)
+
+ def close(self):
+ """
+ Unmarshalls the buffered data
+ """
+ if not self.data:
+ return ''
+ else:
+ # Use type to have a valid join (str vs. bytes)
+ data = type(self.data[0])().join(self.data)
+ try:
+ # Convert the whole final string
+ data = utils.from_bytes(data)
+ except:
+ # Try a pass-through
+ pass
+
+ return data
+
+
+class TransportMixIn(object):
+ """ Just extends the XMLRPC transport where necessary. """
+ # for Python 2.7 support
+ _connection = None
+
+ # List of non-overridable headers
+ # Use the configuration to change the content-type
+ readonly_headers = ('content-length', 'content-type')
+
+ def __init__(self, config=jsonrpclib.config.DEFAULT, context=None):
+ """
+ Sets up the transport
+
+ :param config: A JSONRPClib Config instance
+ """
+ # Store the configuration
+ self._config = config
+
+ # Store the SSL context
+ self.context = context
+
+ # Set up the user agent
+ self.user_agent = config.user_agent
+
+ # Additional headers: list of dictionaries
+ self.additional_headers = []
+
+ def push_headers(self, headers):
+ """
+ Adds a dictionary of headers to the additional headers list
+
+ :param headers: A dictionary
+ """
+ self.additional_headers.append(headers)
+
+ def pop_headers(self, headers):
+ """
+ Removes the given dictionary from the additional headers list.
+ Also validates that given headers are on top of the stack
+
+ :param headers: Headers to remove
+ :raise AssertionError: The given dictionary is not on the latest stored
+ in the additional headers list
+ """
+ assert self.additional_headers[-1] == headers
+ self.additional_headers.pop()
+
+ def emit_additional_headers(self, connection):
+ """
+ Puts headers as is in the request, filtered read only headers
+
+ :param connection: The request connection
+ """
+ additional_headers = {}
+
+ # Prepare the merged dictionary
+ for headers in self.additional_headers:
+ additional_headers.update(headers)
+
+ # Remove forbidden keys
+ for forbidden in self.readonly_headers:
+ additional_headers.pop(forbidden, None)
+
+ # Reversed order: in the case of multiple headers value definition,
+ # the latest pushed has priority
+ for key, value in additional_headers.items():
+ key = str(key)
+ if key.lower() not in self.readonly_headers:
+ # Only accept replaceable headers
+ connection.putheader(str(key), str(value))
+
+ def send_content(self, connection, request_body):
+ """
+ Completes the request headers and sends the request body of a JSON-RPC
+ request over a HTTPConnection
+
+ :param connection: An HTTPConnection object
+ :param request_body: JSON-RPC request body
+ """
+ # Convert the body first
+ request_body = utils.to_bytes(request_body)
+
+ # "static" headers
+ connection.putheader("Content-Type", self._config.content_type)
+ connection.putheader("Content-Length", str(len(request_body)))
+
+ # Emit additional headers here in order not to override content-length
+ self.emit_additional_headers(connection)
+
+ connection.endheaders()
+ if request_body:
+ connection.send(request_body)
+
+ def getparser(self):
+ """
+ Create an instance of the parser, and attach it to an unmarshalling
+ object. Return both objects.
+
+ :return: The parser and unmarshaller instances
+ """
+ target = JSONTarget()
+ return JSONParser(target), target
+
+
+class Transport(TransportMixIn, XMLTransport):
+ """
+ Mixed-in HTTP transport
+ """
+ pass
+
+
+class SafeTransport(TransportMixIn, XMLSafeTransport):
+ """
+ Mixed-in HTTPS transport
+ """
+ pass
+
+# ------------------------------------------------------------------------------
+
+
+class ServerProxy(XMLServerProxy):
+ """
+ Unfortunately, much more of this class has to be copied since
+ so much of it does the serialization.
+ """
+ def __init__(self, uri, transport=None, encoding=None,
+ verbose=0, version=None, headers=None, history=None,
+ config=jsonrpclib.config.DEFAULT, context=None):
+ """
+ Sets up the server proxy
+
+ :param uri: Request URI
+ :param transport: Custom transport handler
+ :param encoding: Specified encoding
+ :param verbose: Log verbosity level
+ :param version: JSON-RPC specification version
+ :param headers: Custom additional headers for each request
+ :param history: History object (for tests)
+ :param config: A JSONRPClib Config instance
+ :param context: The optional SSLContext to use
+ """
+ # Store the configuration
+ self._config = config
+ self.__version = version or config.version
+
+ schema, uri = splittype(uri)
+ if schema not in ('http', 'https'):
+ _logger.error("jsonrpclib only support http(s) URIs, not %s",
+ schema)
+ raise IOError('Unsupported JSON-RPC protocol.')
+
+ self.__host, self.__handler = splithost(uri)
+ if not self.__handler:
+ # Not sure if this is in the JSON spec?
+ self.__handler = '/'
+
+ if transport is None:
+ if schema == 'https':
+ transport = SafeTransport(config=config, context=context)
+ else:
+ transport = Transport(config=config)
+ self.__transport = transport
+
+ self.__encoding = encoding
+ self.__verbose = verbose
+ self.__history = history
+
+ # Global custom headers are injected into Transport
+ self.__transport.push_headers(headers or {})
+
+ def _request(self, methodname, params, rpcid=None):
+ """
+ Calls a method on the remote server
+
+ :param methodname: Name of the method to call
+ :param params: Method parameters
+ :param rpcid: ID of the remote call
+ :return: The parsed result of the call
+ """
+ request = dumps(params, methodname, encoding=self.__encoding,
+ rpcid=rpcid, version=self.__version,
+ config=self._config)
+ response = self._run_request(request)
+ check_for_errors(response)
+ return response['result']
+
+ def _request_notify(self, methodname, params, rpcid=None):
+ """
+ Calls a method as a notification
+
+ :param methodname: Name of the method to call
+ :param params: Method parameters
+ :param rpcid: ID of the remote call
+ """
+ request = dumps(params, methodname, encoding=self.__encoding,
+ rpcid=rpcid, version=self.__version, notify=True,
+ config=self._config)
+ response = self._run_request(request, notify=True)
+ check_for_errors(response)
+
+ def _run_request(self, request, notify=False):
+ """
+ Sends the given request to the remote server
+
+ :param request: The request to send
+ :param notify: Notification request flag (unused)
+ :return: The response as a parsed JSON object
+ """
+ if self.__history is not None:
+ self.__history.add_request(request)
+
+ response = self.__transport.request(
+ self.__host,
+ self.__handler,
+ request,
+ verbose=self.__verbose
+ )
+
+ # Here, the XMLRPC library translates a single list
+ # response to the single value -- should we do the
+ # same, and require a tuple / list to be passed to
+ # the response object, or expect the Server to be
+ # outputting the response appropriately?
+
+ if self.__history is not None:
+ self.__history.add_response(response)
+
+ if not response:
+ return None
+ else:
+ return_obj = loads(response, self._config)
+ return return_obj
+
+ def __getattr__(self, name):
+ """
+ Returns a callable object to call the remote service
+ """
+ # Same as original, just with new _Method reference
+ return _Method(self._request, name)
+
+ def __close(self):
+ """
+ Closes the transport layer
+ """
+ try:
+ self.__transport.close()
+ except AttributeError:
+ # Not available in Python 2.6
+ pass
+
+ def __call__(self, attr):
+ """
+ A workaround to get special attributes on the ServerProxy
+ without interfering with the magic __getattr__
+
+ (code from xmlrpclib in Python 2.7)
+ """
+ if attr == "close":
+ return self.__close
+
+ elif attr == "transport":
+ return self.__transport
+
+ raise AttributeError("Attribute {0} not found".format(attr))
+
+ @property
+ def _notify(self):
+ """
+ Like __getattr__, but sending a notification request instead of a call
+ """
+ return _Notify(self._request_notify)
+
+ @contextlib.contextmanager
+ def _additional_headers(self, headers):
+ """
+ Allows to specify additional headers, to be added inside the with
+ block.
+ Example of usage:
+
+ >>> with client._additional_headers({'X-Test' : 'Test'}) as new_client:
+ ... new_client.method()
+ ...
+ >>> # Here old headers are restored
+ """
+ self.__transport.push_headers(headers)
+ yield self
+ self.__transport.pop_headers(headers)
+
+# ------------------------------------------------------------------------------
+
+
+class _Method(XML_Method):
+ """
+ Some magic to bind an JSON-RPC method to an RPC server.
+ """
+ def __call__(self, *args, **kwargs):
+ """
+ Sends an RPC request and returns the unmarshalled result
+ """
+ if args and kwargs:
+ raise ProtocolError("Cannot use both positional and keyword "
+ "arguments (according to JSON-RPC spec.)")
+ if args:
+ return self.__send(self.__name, args)
+ else:
+ return self.__send(self.__name, kwargs)
+
+ def __getattr__(self, name):
+ """
+ Returns a Method object for nested calls
+ """
+ if name == "__name__":
+ return self.__name
+ return _Method(self.__send, "{0}.{1}".format(self.__name, name))
+
+
+class _Notify(object):
+ """
+ Same as _Method, but to send notifications
+ """
+ def __init__(self, request):
+ """
+ Sets the method to call to send a request to the server
+ """
+ self._request = request
+
+ def __getattr__(self, name):
+ """
+ Returns a Method object, to be called as a notification
+ """
+ return _Method(self._request, name)
+
+# ------------------------------------------------------------------------------
+# Batch implementation
+
+
+class MultiCallMethod(object):
+ """
+ Stores calls made to a MultiCall object for batch execution
+ """
+ def __init__(self, method, notify=False, config=jsonrpclib.config.DEFAULT):
+ """
+ Sets up the store
+
+ :param method: Name of the method to call
+ :param notify: Notification flag
+ :param config: Request configuration
+ """
+ self.method = method
+ self.params = []
+ self.notify = notify
+ self._config = config
+
+ def __call__(self, *args, **kwargs):
+ """
+ Normalizes call parameters
+ """
+ if kwargs and args:
+ raise ProtocolError('JSON-RPC does not support both ' +
+ 'positional and keyword arguments.')
+ if kwargs:
+ self.params = kwargs
+ else:
+ self.params = args
+
+ def request(self, encoding=None, rpcid=None):
+ """
+ Returns the request object as JSON-formatted string
+ """
+ return dumps(self.params, self.method, version=2.0,
+ encoding=encoding, rpcid=rpcid, notify=self.notify,
+ config=self._config)
+
+ def __repr__(self):
+ """
+ String representation
+ """
+ return str(self.request())
+
+ def __getattr__(self, method):
+ """
+ Updates the object for a nested call
+ """
+ self.method = "{0}.{1}".format(self.method, method)
+ return self
+
+
+class MultiCallNotify(object):
+ """
+ Same as MultiCallMethod but for notifications
+ """
+ def __init__(self, multicall, config=jsonrpclib.config.DEFAULT):
+ """
+ Sets ip the store
+
+ :param multicall: The parent MultiCall instance
+ :param config: Request configuration
+ """
+ self.multicall = multicall
+ self._config = config
+
+ def __getattr__(self, name):
+ """
+ Returns the MultiCallMethod to use as a notification
+ """
+ new_job = MultiCallMethod(name, notify=True, config=self._config)
+ self.multicall._job_list.append(new_job)
+ return new_job
+
+
+class MultiCallIterator(object):
+ """
+ Iterates over the results of a MultiCall.
+ Exceptions are raised in response to JSON-RPC faults
+ """
+ def __init__(self, results):
+ """
+ Sets up the results store
+ """
+ self.results = results
+
+ def __get_result(self, item):
+ """
+ Checks for error and returns the "real" result stored in a MultiCall
+ result.
+ """
+ check_for_errors(item)
+ return item['result']
+
+ def __iter__(self):
+ """
+ Iterates over all results
+ """
+ for item in self.results:
+ yield self.__get_result(item)
+ raise StopIteration
+
+ def __getitem__(self, i):
+ """
+ Returns the i-th object of the results
+ """
+ return self.__get_result(self.results[i])
+
+ def __len__(self):
+ """
+ Returns the number of results stored
+ """
+ return len(self.results)
+
+
+class MultiCall(object):
+ """
+ server -> a object used to boxcar method calls, where server should be a
+ ServerProxy object.
+
+ Methods can be added to the MultiCall using normal
+ method call syntax e.g.:
+
+ multicall = MultiCall(server_proxy)
+ multicall.add(2,3)
+ multicall.get_address("Guido")
+
+ To execute the multicall, call the MultiCall object e.g.:
+
+ add_result, address = multicall()
+ """
+ def __init__(self, server, config=jsonrpclib.config.DEFAULT):
+ """
+ Sets up the multicall
+
+ :param server: A ServerProxy object
+ :param config: Request configuration
+ """
+ self._server = server
+ self._job_list = []
+ self._config = config
+
+ def _request(self):
+ """
+ Sends the request to the server and returns the responses
+
+ :return: A MultiCallIterator object
+ """
+ if len(self._job_list) < 1:
+ # Should we alert? This /is/ pretty obvious.
+ return
+ request_body = "[ {0} ]".format(
+ ','.join(job.request() for job in self._job_list))
+ responses = self._server._run_request(request_body)
+ del self._job_list[:]
+ if not responses:
+ responses = []
+ return MultiCallIterator(responses)
+
+ @property
+ def _notify(self):
+ """
+ Prepares a notification call
+ """
+ return MultiCallNotify(self, self._config)
+
+ def __getattr__(self, name):
+ """
+ Registers a method call
+ """
+ new_job = MultiCallMethod(name, config=self._config)
+ self._job_list.append(new_job)
+ return new_job
+
+ __call__ = _request
+
+# These lines conform to xmlrpclib's "compatibility" line.
+# Not really sure if we should include these, but oh well.
+Server = ServerProxy
+
+# ------------------------------------------------------------------------------
+
+
+class Fault(object):
+ """
+ JSON-RPC error class
+ """
+ def __init__(self, code=-32000, message='Server error', rpcid=None,
+ config=jsonrpclib.config.DEFAULT, data=None):
+ """
+ Sets up the error description
+
+ :param code: Fault code
+ :param message: Associated message
+ :param rpcid: Request ID
+ :param config: A JSONRPClib Config instance
+ :param data: Extra information added to an error description
+ """
+ self.faultCode = code
+ self.faultString = message
+ self.rpcid = rpcid
+ self.config = config
+ self.data = data
+
+ def error(self):
+ """
+ Returns the error as a dictionary
+
+ :returns: A {'code', 'message'} dictionary
+ """
+ return {'code': self.faultCode, 'message': self.faultString,
+ 'data': self.data}
+
+ def response(self, rpcid=None, version=None):
+ """
+ Returns the error as a JSON-RPC response string
+
+ :param rpcid: Forced request ID
+ :param version: JSON-RPC version
+ :return: A JSON-RPC response string
+ """
+ if not version:
+ version = self.config.version
+
+ if rpcid:
+ self.rpcid = rpcid
+
+ return dumps(self, methodresponse=True, rpcid=self.rpcid,
+ version=version, config=self.config)
+
+ def dump(self, rpcid=None, version=None):
+ """
+ Returns the error as a JSON-RPC response dictionary
+
+ :param rpcid: Forced request ID
+ :param version: JSON-RPC version
+ :return: A JSON-RPC response dictionary
+ """
+ if not version:
+ version = self.config.version
+
+ if rpcid:
+ self.rpcid = rpcid
+
+ return dump(self, is_response=True, rpcid=self.rpcid,
+ version=version, config=self.config)
+
+ def __repr__(self):
+ """
+ String representation
+ """
+ return '<Fault {0}: {1}>'.format(self.faultCode, self.faultString)
+
+
+class Payload(object):
+ """
+ JSON-RPC content handler
+ """
+ def __init__(self, rpcid=None, version=None,
+ config=jsonrpclib.config.DEFAULT):
+ """
+ Sets up the JSON-RPC handler
+
+ :param rpcid: Request ID
+ :param version: JSON-RPC version
+ :param config: A JSONRPClib Config instance
+ """
+ if not version:
+ version = config.version
+
+ self.id = rpcid
+ self.version = float(version)
+
+ def request(self, method, params=None):
+ """
+ Prepares a method call request
+
+ :param method: Method name
+ :param params: Method parameters
+ :return: A JSON-RPC request dictionary
+ """
+ if not isinstance(method, utils.string_types):
+ raise ValueError('Method name must be a string.')
+
+ if not self.id:
+ # Generate a request ID
+ self.id = str(uuid.uuid4())
+
+ request = {'id': self.id, 'method': method}
+ if params or self.version < 1.1:
+ request['params'] = params or []
+
+ if self.version >= 2:
+ request['jsonrpc'] = str(self.version)
+
+ return request
+
+ def notify(self, method, params=None):
+ """
+ Prepares a notification request
+
+ :param method: Notification name
+ :param params: Notification parameters
+ :return: A JSON-RPC notification dictionary
+ """
+ # Prepare the request dictionary
+ request = self.request(method, params)
+
+ # Remove the request ID, as it's a notification
+ if self.version >= 2:
+ del request['id']
+ else:
+ request['id'] = None
+
+ return request
+
+ def response(self, result=None):
+ """
+ Prepares a response dictionary
+
+ :param result: The result of method call
+ :return: A JSON-RPC response dictionary
+ """
+ response = {'result': result, 'id': self.id}
+
+ if self.version >= 2:
+ response['jsonrpc'] = str(self.version)
+ else:
+ response['error'] = None
+
+ return response
+
+ def error(self, code=-32000, message='Server error.', data=None):
+ """
+ Prepares an error dictionary
+
+ :param code: Error code
+ :param message: Error message
+ :return: A JSON-RPC error dictionary
+ """
+ error = self.response()
+ if self.version >= 2:
+ del error['result']
+ else:
+ error['result'] = None
+ error['error'] = {'code': code, 'message': message}
+ if data is not None:
+ error['error']['data'] = data
+ return error
+
+# ------------------------------------------------------------------------------
+
+
+def dump(params=None, methodname=None, rpcid=None, version=None,
+ is_response=None, is_notify=None, config=jsonrpclib.config.DEFAULT):
+ """
+ Prepares a JSON-RPC dictionary (request, notification, response or error)
+
+ :param params: Method parameters (if a method name is given) or a Fault
+ :param methodname: Method name
+ :param rpcid: Request ID
+ :param version: JSON-RPC version
+ :param is_response: If True, this is a response dictionary
+ :param is_notify: If True, this is a notification request
+ :param config: A JSONRPClib Config instance
+ :return: A JSON-RPC dictionary
+ """
+ # Default version
+ if not version:
+ version = config.version
+
+ if not is_response and params is None:
+ params = []
+
+ # Validate method name and parameters
+ valid_params = [utils.TupleType, utils.ListType, utils.DictType, Fault]
+ if is_response:
+ valid_params.append(type(None))
+
+ if isinstance(methodname, utils.string_types) and \
+ not isinstance(params, tuple(valid_params)):
+ """
+ If a method, and params are not in a listish or a Fault,
+ error out.
+ """
+ raise TypeError("Params must be a dict, list, tuple "
+ "or Fault instance.")
+
+ # Prepares the JSON-RPC content
+ payload = Payload(rpcid=rpcid, version=version)
+
+ if isinstance(params, Fault):
+ # Prepare an error dictionary
+ # pylint: disable=E1103
+ return payload.error(params.faultCode, params.faultString, params.data)
+
+ if not isinstance(methodname, utils.string_types) and not is_response:
+ # Neither a request nor a response
+ raise ValueError('Method name must be a string, or is_response '
+ 'must be set to True.')
+
+ if config.use_jsonclass:
+ # Use jsonclass to convert the parameters
+ params = jsonclass.dump(params, config=config)
+
+ if is_response:
+ # Prepare a response dictionary
+ if rpcid is None:
+ # A response must have a request ID
+ raise ValueError('A method response must have an rpcid.')
+ return payload.response(params)
+
+ if is_notify:
+ # Prepare a notification dictionary
+ return payload.notify(methodname, params)
+ else:
+ # Prepare a method call dictionary
+ return payload.request(methodname, params)
+
+
+def dumps(params=None, methodname=None, methodresponse=None,
+ encoding=None, rpcid=None, version=None, notify=None,
+ config=jsonrpclib.config.DEFAULT):
+ """
+ Prepares a JSON-RPC request/response string
+
+ :param params: Method parameters (if a method name is given) or a Fault
+ :param methodname: Method name
+ :param methodresponse: If True, this is a response dictionary
+ :param encoding: Result string encoding
+ :param rpcid: Request ID
+ :param version: JSON-RPC version
+ :param notify: If True, this is a notification request
+ :param config: A JSONRPClib Config instance
+ :return: A JSON-RPC dictionary
+ """
+ # Prepare the dictionary
+ request = dump(params, methodname, rpcid, version, methodresponse, notify,
+ config)
+
+ # Returns it as a JSON string
+ return jdumps(request, encoding=encoding or "UTF-8")
+
+
+def load(data, config=jsonrpclib.config.DEFAULT):
+ """
+ Loads a JSON-RPC request/response dictionary. Calls jsonclass to load beans
+
+ :param data: A JSON-RPC dictionary
+ :param config: A JSONRPClib Config instance (or None for default values)
+ :return: A parsed dictionary or None
+ """
+ if data is None:
+ # Notification
+ return None
+
+ # if the above raises an error, the implementing server code
+ # should return something like the following:
+ # { 'jsonrpc':'2.0', 'error': fault.error(), id: None }
+ if config.use_jsonclass:
+ # Convert beans
+ data = jsonclass.load(data, config.classes)
+
+ return data
+
+
+def loads(data, config=jsonrpclib.config.DEFAULT):
+ """
+ Loads a JSON-RPC request/response string. Calls jsonclass to load beans
+
+ :param data: A JSON-RPC string
+ :param config: A JSONRPClib Config instance (or None for default values)
+ :return: A parsed dictionary or None
+ """
+ if data == '':
+ # Notification
+ return None
+
+ # Parse the JSON dictionary
+ result = jloads(data)
+
+ # Load the beans
+ return load(result, config)
+
+# ------------------------------------------------------------------------------
+
+
+def check_for_errors(result):
+ """
+ Checks if a result dictionary signals an error
+
+ :param result: A result dictionary
+ :raise TypeError: Invalid parameter
+ :raise NotImplementedError: Unknown JSON-RPC version
+ :raise ValueError: Invalid dictionary content
+ :raise ProtocolError: An error occurred on the server side
+ :return: The result parameter
+ """
+ if not result:
+ # Notification
+ return result
+
+ if not isinstance(result, utils.DictType):
+ # Invalid argument
+ raise TypeError('Response is not a dict.')
+
+ if 'jsonrpc' in result and float(result['jsonrpc']) > 2.0:
+ # Unknown JSON-RPC version
+ raise NotImplementedError('JSON-RPC version not yet supported.')
+
+ if 'result' not in result and 'error' not in result:
+ # Invalid dictionary content
+ raise ValueError('Response does not have a result or error key.')
+
+ if 'error' in result and result['error']:
+ # Server-side error
+ if 'code' in result['error']:
+ # Code + Message
+ code = result['error']['code']
+ try:
+ # Get the message (jsonrpclib)
+ message = result['error']['message']
+ except KeyError:
+ # Get the trace (jabsorb)
+ message = result['error'].get('trace', '<no error message>')
+
+ if -32700 <= code <= -32000:
+ # Pre-defined errors
+ # See http://www.jsonrpc.org/specification#error_object
+ raise ProtocolError((code, message))
+ else:
+ # Application error
+ data = result['error'].get('data', None)
+ raise AppError((code, message, data))
+
+ elif isinstance(result['error'], dict) and len(result['error']) == 1:
+ # Error with a single entry ('reason', ...): use its content
+ error_key = result['error'].keys()[0]
+ raise ProtocolError(result['error'][error_key])
+
+ else:
+ # Use the raw error content
+ raise ProtocolError(result['error'])
+
+ return result
+
+
+def isbatch(request):
+ """
+ Tests if the given request is a batch call, i.e. a list of multiple calls
+ :param request: a JSON-RPC request object
+ :return: True if the request is a batch call
+ """
+ if not isinstance(request, (utils.ListType, utils.TupleType)):
+ # Not a list: not a batch call
+ return False
+ elif len(request) < 1:
+ # Only one request: not a batch call
+ return False
+ elif not isinstance(request[0], utils.DictType):
+ # One of the requests is not a dictionary, i.e. a JSON Object
+ # therefore it is not a valid JSON-RPC request
+ return False
+ elif 'jsonrpc' not in request[0].keys():
+ # No "jsonrpc" version in the JSON object: not a request
+ return False
+
+ try:
+ version = float(request[0]['jsonrpc'])
+ except ValueError:
+ # Bad version of JSON-RPC
+ raise ProtocolError('"jsonrpc" key must be a float(able) value.')
+
+ if version < 2:
+ # Batch call were not supported before JSON-RPC 2.0
+ return False
+
+ return True
+
+
+def isnotification(request):
+ """
+ Tests if the given request is a notification
+
+ :param request: A request dictionary
+ :return: True if the request is a notification
+ """
+ if 'id' not in request:
+ # 2.0 notification
+ return True
+
+ if request['id'] is None:
+ # 1.0 notification
+ return True
+
+ return False
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/threadpool.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/threadpool.py
new file mode 100755
index 00000000..3919c105
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/threadpool.py
@@ -0,0 +1,490 @@
+#!/usr/bin/env python
+# -- Content-Encoding: UTF-8 --
+"""
+Cached thread pool, inspired from Pelix/iPOPO Thread Pool
+
+:author: Thomas Calmant
+:copyright: Copyright 2015, isandlaTech
+:license: Apache License 2.0
+:version: 0.2.5
+
+..
+
+ Copyright 2015 isandlaTech
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+# Documentation strings format
+__docformat__ = "restructuredtext en"
+
+# Module version
+__version_info__ = (0, 2, 5)
+__version__ = ".".join(str(x) for x in __version_info__)
+
+# ------------------------------------------------------------------------------
+
+# Standard library
+import logging
+import threading
+
+try:
+ # Python 3
+ # pylint: disable=F0401
+ import queue
+except ImportError:
+ # Python 2
+ # pylint: disable=F0401
+ import Queue as queue
+
+# ------------------------------------------------------------------------------
+
+
+class EventData(object):
+ """
+ A threading event with some associated data
+ """
+ def __init__(self):
+ """
+ Sets up the event
+ """
+ self.__event = threading.Event()
+ self.__data = None
+ self.__exception = None
+
+ @property
+ def data(self):
+ """
+ Returns the associated value
+ """
+ return self.__data
+
+ @property
+ def exception(self):
+ """
+ Returns the exception used to stop the wait() method
+ """
+ return self.__exception
+
+ def clear(self):
+ """
+ Clears the event
+ """
+ self.__event.clear()
+ self.__data = None
+ self.__exception = None
+
+ def is_set(self):
+ """
+ Checks if the event is set
+ """
+ return self.__event.is_set()
+
+ def set(self, data=None):
+ """
+ Sets the event
+ """
+ self.__data = data
+ self.__exception = None
+ self.__event.set()
+
+ def raise_exception(self, exception):
+ """
+ Raises an exception in wait()
+
+ :param exception: An Exception object
+ """
+ self.__data = None
+ self.__exception = exception
+ self.__event.set()
+
+ def wait(self, timeout=None):
+ """
+ Waits for the event or for the timeout
+
+ :param timeout: Wait timeout (in seconds)
+ :return: True if the event as been set, else False
+ """
+ # The 'or' part is for Python 2.6
+ result = self.__event.wait(timeout) or self.__event.is_set()
+ # pylint: disable=E0702
+ # Pylint seems to miss the "is None" check below
+ if self.__exception is None:
+ return result
+ else:
+ raise self.__exception
+
+
+class FutureResult(object):
+ """
+ An object to wait for the result of a threaded execution
+ """
+ def __init__(self, logger=None):
+ """
+ Sets up the FutureResult object
+
+ :param logger: The Logger to use in case of error (optional)
+ """
+ self._logger = logger or logging.getLogger(__name__)
+ self._done_event = EventData()
+ self.__callback = None
+ self.__extra = None
+
+ def __notify(self):
+ """
+ Notify the given callback about the result of the execution
+ """
+ if self.__callback is not None:
+ try:
+ self.__callback(self._done_event.data,
+ self._done_event.exception,
+ self.__extra)
+ except Exception as ex:
+ self._logger.exception("Error calling back method: %s", ex)
+
+ def set_callback(self, method, extra=None):
+ """
+ Sets a callback method, called once the result has been computed or in
+ case of exception.
+
+ The callback method must have the following signature:
+ ``callback(result, exception, extra)``.
+
+ :param method: The method to call back in the end of the execution
+ :param extra: Extra parameter to be given to the callback method
+ """
+ self.__callback = method
+ self.__extra = extra
+ if self._done_event.is_set():
+ # The execution has already finished
+ self.__notify()
+
+ def execute(self, method, args, kwargs):
+ """
+ Execute the given method and stores its result.
+ The result is considered "done" even if the method raises an exception
+
+ :param method: The method to execute
+ :param args: Method positional arguments
+ :param kwargs: Method keyword arguments
+ :raise Exception: The exception raised by the method
+ """
+ # Normalize arguments
+ if args is None:
+ args = []
+
+ if kwargs is None:
+ kwargs = {}
+
+ try:
+ # Call the method
+ result = method(*args, **kwargs)
+ except Exception as ex:
+ # Something went wrong: propagate to the event and to the caller
+ self._done_event.raise_exception(ex)
+ raise
+ else:
+ # Store the result
+ self._done_event.set(result)
+ finally:
+ # In any case: notify the call back (if any)
+ self.__notify()
+
+ def done(self):
+ """
+ Returns True if the job has finished, else False
+ """
+ return self._done_event.is_set()
+
+ def result(self, timeout=None):
+ """
+ Waits up to timeout for the result the threaded job.
+ Returns immediately the result if the job has already been done.
+
+ :param timeout: The maximum time to wait for a result (in seconds)
+ :raise OSError: The timeout raised before the job finished
+ :raise Exception: The exception encountered during the call, if any
+ """
+ if self._done_event.wait(timeout):
+ return self._done_event.data
+ else:
+ raise OSError("Timeout raised")
+
+# ------------------------------------------------------------------------------
+
+
+class ThreadPool(object):
+ """
+ Executes the tasks stored in a FIFO in a thread pool
+ """
+ def __init__(self, max_threads, min_threads=1, queue_size=0, timeout=60,
+ logname=None):
+ """
+ Sets up the thread pool.
+
+ Threads are kept alive 60 seconds (timeout argument).
+
+ :param max_threads: Maximum size of the thread pool
+ :param min_threads: Minimum size of the thread pool
+ :param queue_size: Size of the task queue (0 for infinite)
+ :param timeout: Queue timeout (in seconds, 60s by default)
+ :param logname: Name of the logger
+ :raise ValueError: Invalid number of threads
+ """
+ # Validate parameters
+ try:
+ max_threads = int(max_threads)
+ if max_threads < 1:
+ raise ValueError("Pool size must be greater than 0")
+ except (TypeError, ValueError) as ex:
+ raise ValueError("Invalid pool size: {0}".format(ex))
+
+ try:
+ min_threads = int(min_threads)
+ if min_threads < 0:
+ min_threads = 0
+ elif min_threads > max_threads:
+ min_threads = max_threads
+ except (TypeError, ValueError) as ex:
+ raise ValueError("Invalid pool size: {0}".format(ex))
+
+ # The logger
+ self._logger = logging.getLogger(logname or __name__)
+
+ # The loop control event
+ self._done_event = threading.Event()
+ self._done_event.set()
+
+ # The task queue
+ try:
+ queue_size = int(queue_size)
+ except (TypeError, ValueError):
+ # Not a valid integer
+ queue_size = 0
+
+ self._queue = queue.Queue(queue_size)
+ self._timeout = timeout
+ self.__lock = threading.RLock()
+
+ # The thread pool
+ self._min_threads = min_threads
+ self._max_threads = max_threads
+ self._threads = []
+
+ # Thread count
+ self._thread_id = 0
+
+ # Current number of threads, active and alive
+ self.__nb_threads = 0
+ self.__nb_active_threads = 0
+
+ def start(self):
+ """
+ Starts the thread pool. Does nothing if the pool is already started.
+ """
+ if not self._done_event.is_set():
+ # Stop event not set: we're running
+ return
+
+ # Clear the stop event
+ self._done_event.clear()
+
+ # Compute the number of threads to start to handle pending tasks
+ nb_pending_tasks = self._queue.qsize()
+ if nb_pending_tasks > self._max_threads:
+ nb_threads = self._max_threads
+ elif nb_pending_tasks < self._min_threads:
+ nb_threads = self._min_threads
+ else:
+ nb_threads = nb_pending_tasks
+
+ # Create the threads
+ for _ in range(nb_threads):
+ self.__start_thread()
+
+ def __start_thread(self):
+ """
+ Starts a new thread, if possible
+ """
+ with self.__lock:
+ if self.__nb_threads >= self._max_threads:
+ # Can't create more threads
+ return False
+
+ if self._done_event.is_set():
+ # We're stopped: do nothing
+ return False
+
+ # Prepare thread and start it
+ name = "{0}-{1}".format(self._logger.name, self._thread_id)
+ self._thread_id += 1
+
+ thread = threading.Thread(target=self.__run, name=name)
+ thread.daemon = True
+ self._threads.append(thread)
+ thread.start()
+ return True
+
+ def stop(self):
+ """
+ Stops the thread pool. Does nothing if the pool is already stopped.
+ """
+ if self._done_event.is_set():
+ # Stop event set: we're stopped
+ return
+
+ # Set the stop event
+ self._done_event.set()
+
+ with self.__lock:
+ # Add something in the queue (to unlock the join())
+ try:
+ for _ in self._threads:
+ self._queue.put(self._done_event, True, self._timeout)
+ except queue.Full:
+ # There is already something in the queue
+ pass
+
+ # Copy the list of threads to wait for
+ threads = self._threads[:]
+
+ # Join threads outside the lock
+ for thread in threads:
+ while thread.is_alive():
+ # Wait 3 seconds
+ thread.join(3)
+ if thread.is_alive():
+ # Thread is still alive: something might be wrong
+ self._logger.warning("Thread %s is still alive...",
+ thread.name)
+
+ # Clear storage
+ del self._threads[:]
+ self.clear()
+
+ def enqueue(self, method, *args, **kwargs):
+ """
+ Queues a task in the pool
+
+ :param method: Method to call
+ :return: A FutureResult object, to get the result of the task
+ :raise ValueError: Invalid method
+ :raise Full: The task queue is full
+ """
+ if not hasattr(method, '__call__'):
+ raise ValueError("{0} has no __call__ member."
+ .format(method.__name__))
+
+ # Prepare the future result object
+ future = FutureResult(self._logger)
+
+ # Use a lock, as we might be "resetting" the queue
+ with self.__lock:
+ # Add the task to the queue
+ self._queue.put((method, args, kwargs, future), True,
+ self._timeout)
+
+ if self.__nb_active_threads == self.__nb_threads:
+ # All threads are taken: start a new one
+ self.__start_thread()
+
+ return future
+
+ def clear(self):
+ """
+ Empties the current queue content.
+ Returns once the queue have been emptied.
+ """
+ with self.__lock:
+ # Empty the current queue
+ try:
+ while True:
+ self._queue.get_nowait()
+ self._queue.task_done()
+ except queue.Empty:
+ # Queue is now empty
+ pass
+
+ # Wait for the tasks currently executed
+ self.join()
+
+ def join(self, timeout=None):
+ """
+ Waits for all the tasks to be executed
+
+ :param timeout: Maximum time to wait (in seconds)
+ :return: True if the queue has been emptied, else False
+ """
+ if self._queue.empty():
+ # Nothing to wait for...
+ return True
+ elif timeout is None:
+ # Use the original join
+ self._queue.join()
+ return True
+ else:
+ # Wait for the condition
+ with self._queue.all_tasks_done:
+ self._queue.all_tasks_done.wait(timeout)
+ return not bool(self._queue.unfinished_tasks)
+
+ def __run(self):
+ """
+ The main loop
+ """
+ with self.__lock:
+ self.__nb_threads += 1
+
+ while not self._done_event.is_set():
+ try:
+ # Wait for an action (blocking)
+ task = self._queue.get(True, self._timeout)
+ if task is self._done_event:
+ # Stop event in the queue: get out
+ self._queue.task_done()
+ with self.__lock:
+ self.__nb_threads -= 1
+ return
+ except queue.Empty:
+ # Nothing to do yet
+ pass
+ else:
+ with self.__lock:
+ self.__nb_active_threads += 1
+
+ # Extract elements
+ method, args, kwargs, future = task
+ try:
+ # Call the method
+ future.execute(method, args, kwargs)
+ except Exception as ex:
+ self._logger.exception("Error executing %s: %s",
+ method.__name__, ex)
+ finally:
+ # Mark the action as executed
+ self._queue.task_done()
+
+ # Thread is not active anymore
+ self.__nb_active_threads -= 1
+
+ # Clean up thread if necessary
+ with self.__lock:
+ if self.__nb_threads > self._min_threads:
+ # No more work for this thread, and we're above the
+ # minimum number of threads: stop this one
+ self.__nb_threads -= 1
+ return
+
+ with self.__lock:
+ # Thread stops
+ self.__nb_threads -= 1
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/utils.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/utils.py
new file mode 100755
index 00000000..01b71fce
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/utils.py
@@ -0,0 +1,122 @@
+#!/usr/bin/python
+# -- Content-Encoding: UTF-8 --
+"""
+Utility methods, for compatibility between Python version
+
+:author: Thomas Calmant
+:copyright: Copyright 2015, isandlaTech
+:license: Apache License 2.0
+:version: 0.2.5
+
+..
+
+ Copyright 2015 isandlaTech
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+# Module version
+__version_info__ = (0, 2, 5)
+__version__ = ".".join(str(x) for x in __version_info__)
+
+# Documentation strings format
+__docformat__ = "restructuredtext en"
+
+# ------------------------------------------------------------------------------
+
+import sys
+
+# ------------------------------------------------------------------------------
+
+if sys.version_info[0] < 3:
+ # Python 2
+ import types
+ try:
+ string_types = (
+ types.StringType,
+ types.UnicodeType
+ )
+ except NameError:
+ # Python built without unicode support
+ string_types = (types.StringType,)
+
+ numeric_types = (
+ types.IntType,
+ types.LongType,
+ types.FloatType
+ )
+
+ def to_bytes(string):
+ """
+ Converts the given string into bytes
+ """
+ if type(string) is unicode:
+ return str(string)
+ return string
+
+ def from_bytes(data):
+ """
+ Converts the given bytes into a string
+ """
+ if type(data) is str:
+ return data
+ return str(data)
+
+else:
+ # Python 3
+ string_types = (
+ bytes,
+ str
+ )
+
+ numeric_types = (
+ int,
+ float
+ )
+
+ def to_bytes(string):
+ """
+ Converts the given string into bytes
+ """
+ if type(string) is bytes:
+ return string
+ return bytes(string, "UTF-8")
+
+ def from_bytes(data):
+ """
+ Converts the given bytes into a string
+ """
+ if type(data) is str:
+ return data
+ return str(data, "UTF-8")
+
+# ------------------------------------------------------------------------------
+# Common
+
+DictType = dict
+
+ListType = list
+TupleType = tuple
+
+iterable_types = (
+ list,
+ set, frozenset,
+ tuple
+)
+
+value_types = (
+ bool,
+ type(None)
+)
+
+primitive_types = string_types + numeric_types + value_types
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/PKG-INFO b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/PKG-INFO
new file mode 100755
index 00000000..9d0f3fca
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/PKG-INFO
@@ -0,0 +1,460 @@
+Metadata-Version: 1.1
+Name: jsonrpclib-pelix
+Version: 0.2.5
+Summary: This project is an implementation of the JSON-RPC v2.0 specification (backwards-compatible) as a client library, for Python 2.6+ and Python 3.This version is a fork of jsonrpclib by Josh Marshall, usable with Pelix remote services.
+Home-page: http://github.com/tcalmant/jsonrpclib/
+Author: Thomas Calmant
+Author-email: thomas.calmant+github@gmail.com
+License: Apache License 2.0
+Description: JSONRPClib (patched for Pelix)
+ ##############################
+
+ .. image:: https://pypip.in/license/jsonrpclib-pelix/badge.svg
+ :target: https://pypi.python.org/pypi/jsonrpclib-pelix/
+
+ .. image:: https://travis-ci.org/tcalmant/jsonrpclib.svg?branch=master
+ :target: https://travis-ci.org/tcalmant/jsonrpclib
+
+ .. image:: https://coveralls.io/repos/tcalmant/jsonrpclib/badge.svg?branch=master
+ :target: https://coveralls.io/r/tcalmant/jsonrpclib?branch=master
+
+
+ This library is an implementation of the JSON-RPC specification.
+ It supports both the original 1.0 specification, as well as the
+ new (proposed) 2.0 specification, which includes batch submission, keyword
+ arguments, etc.
+
+ It is licensed under the Apache License, Version 2.0
+ (http://www.apache.org/licenses/LICENSE-2.0.html).
+
+
+ About this version
+ ******************
+
+ This is a patched version of the original ``jsonrpclib`` project by
+ Josh Marshall, available at https://github.com/joshmarshall/jsonrpclib.
+
+ The suffix *-pelix* only indicates that this version works with Pelix Remote
+ Services, but it is **not** a Pelix specific implementation.
+
+ * This version adds support for Python 3, staying compatible with Python 2.
+ * It is now possible to use the dispatch_method argument while extending
+ the SimpleJSONRPCDispatcher, to use a custom dispatcher.
+ This allows to use this package by Pelix Remote Services.
+ * It can use thread pools to control the number of threads spawned to handle
+ notification requests and clients connections.
+ * The modifications added in other forks of this project have been added:
+
+ * From https://github.com/drdaeman/jsonrpclib:
+
+ * Improved JSON-RPC 1.0 support
+ * Less strict error response handling
+
+ * From https://github.com/tuomassalo/jsonrpclib:
+
+ * In case of a non-pre-defined error, raise an AppError and give access to
+ *error.data*
+
+ * From https://github.com/dejw/jsonrpclib:
+
+ * Custom headers can be sent with request and associated tests
+
+ * The support for Unix sockets has been removed, as it is not trivial to convert
+ to Python 3 (and I don't use them)
+ * This version cannot be installed with the original ``jsonrpclib``, as it uses
+ the same package name.
+
+
+ Summary
+ *******
+
+ This library implements the JSON-RPC 2.0 proposed specification in pure Python.
+ It is designed to be as compatible with the syntax of ``xmlrpclib`` as possible
+ (it extends where possible), so that projects using ``xmlrpclib`` could easily
+ be modified to use JSON and experiment with the differences.
+
+ It is backwards-compatible with the 1.0 specification, and supports all of the
+ new proposed features of 2.0, including:
+
+ * Batch submission (via MultiCall)
+ * Keyword arguments
+ * Notifications (both in a batch and 'normal')
+ * Class translation using the ``__jsonclass__`` key.
+
+ I've added a "SimpleJSONRPCServer", which is intended to emulate the
+ "SimpleXMLRPCServer" from the default Python distribution.
+
+
+ Requirements
+ ************
+
+ It supports ``cjson`` and ``simplejson``, and looks for the parsers in that
+ order (searching first for ``cjson``, then for the *built-in* ``json`` in 2.6+,
+ and then the ``simplejson`` external library).
+ One of these must be installed to use this library, although if you have a
+ standard distribution of 2.6+, you should already have one.
+ Keep in mind that ``cjson`` is supposed to be the quickest, I believe, so if
+ you are going for full-on optimization you may want to pick it up.
+
+ Since library uses ``contextlib`` module, you should have at least Python 2.5
+ installed.
+
+
+ Installation
+ ************
+
+ You can install this from PyPI with one of the following commands (sudo
+ may be required):
+
+ .. code-block:: console
+
+ easy_install jsonrpclib-pelix
+ pip install jsonrpclib-pelix
+
+ Alternatively, you can download the source from the GitHub repository
+ at http://github.com/tcalmant/jsonrpclib and manually install it
+ with the following commands:
+
+ .. code-block:: console
+
+ git clone git://github.com/tcalmant/jsonrpclib.git
+ cd jsonrpclib
+ python setup.py install
+
+
+ SimpleJSONRPCServer
+ *******************
+
+ This is identical in usage (or should be) to the SimpleXMLRPCServer in the
+ Python standard library. Some of the differences in features are that it
+ obviously supports notification, batch calls, class translation (if left on),
+ etc.
+ Note: The import line is slightly different from the regular SimpleXMLRPCServer,
+ since the SimpleJSONRPCServer is distributed within the ``jsonrpclib`` library.
+
+ .. code-block:: python
+
+ from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
+
+ server = SimpleJSONRPCServer(('localhost', 8080))
+ server.register_function(pow)
+ server.register_function(lambda x,y: x+y, 'add')
+ server.register_function(lambda x: x, 'ping')
+ server.serve_forever()
+
+ To start protect the server with SSL, use the following snippet:
+
+ .. code-block:: python
+
+ from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
+
+ # Setup the SSL socket
+ server = SimpleJSONRPCServer(('localhost', 8080), bind_and_activate=False)
+ server.socket = ssl.wrap_socket(server.socket, certfile='server.pem',
+ server_side=True)
+ server.server_bind()
+ server.server_activate()
+
+ # ... register functions
+ # Start the server
+ server.serve_forever()
+
+
+ Notification Thread Pool
+ ========================
+
+ By default, notification calls are handled in the request handling thread.
+ It is possible to use a thread pool to handle them, by giving it to the server
+ using the ``set_notification_pool()`` method:
+
+ .. code-block:: python
+
+ from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
+ from jsonrpclib.threadpool import ThreadPool
+
+ # Setup the thread pool: between 0 and 10 threads
+ pool = ThreadPool(max_threads=10, min_threads=0)
+
+ # Don't forget to start it
+ pool.start()
+
+ # Setup the server
+ server = SimpleJSONRPCServer(('localhost', 8080), config)
+ server.set_notification_pool(pool)
+
+ # Register methods
+ server.register_function(pow)
+ server.register_function(lambda x,y: x+y, 'add')
+ server.register_function(lambda x: x, 'ping')
+
+ try:
+ server.serve_forever()
+ finally:
+ # Stop the thread pool (let threads finish their current task)
+ pool.stop()
+ server.set_notification_pool(None)
+
+
+ Threaded server
+ ===============
+
+ It is also possible to use a thread pool to handle clients requests, using the
+ ``PooledJSONRPCServer`` class.
+ By default, this class uses pool of 0 to 30 threads. A custom pool can be given
+ with the ``thread_pool`` parameter of the class constructor.
+
+ The notification pool and the request pool are different: by default, a server
+ with a request pool doesn't have a notification pool.
+
+ .. code-block:: python
+
+ from jsonrpclib.SimpleJSONRPCServer import PooledJSONRPCServer
+ from jsonrpclib.threadpool import ThreadPool
+
+ # Setup the notification and request pools
+ nofif_pool = ThreadPool(max_threads=10, min_threads=0)
+ request_pool = ThreadPool(max_threads=50, min_threads=10)
+
+ # Don't forget to start them
+ nofif_pool.start()
+ request_pool.start()
+
+ # Setup the server
+ server = PooledJSONRPCServer(('localhost', 8080), config,
+ thread_pool=request_pool)
+ server.set_notification_pool(nofif_pool)
+
+ # Register methods
+ server.register_function(pow)
+ server.register_function(lambda x,y: x+y, 'add')
+ server.register_function(lambda x: x, 'ping')
+
+ try:
+ server.serve_forever()
+ finally:
+ # Stop the thread pools (let threads finish their current task)
+ request_pool.stop()
+ nofif_pool.stop()
+ server.set_notification_pool(None)
+
+ Client Usage
+ ************
+
+ This is (obviously) taken from a console session.
+
+ .. code-block:: python
+
+ >>> import jsonrpclib
+ >>> server = jsonrpclib.ServerProxy('http://localhost:8080')
+ >>> server.add(5,6)
+ 11
+ >>> server.add(x=5, y=10)
+ 15
+ >>> server._notify.add(5,6)
+ # No result returned...
+ >>> batch = jsonrpclib.MultiCall(server)
+ >>> batch.add(5, 6)
+ >>> batch.ping({'key':'value'})
+ >>> batch._notify.add(4, 30)
+ >>> results = batch()
+ >>> for result in results:
+ >>> ... print(result)
+ 11
+ {'key': 'value'}
+ # Note that there are only two responses -- this is according to spec.
+
+ # Clean up
+ >>> server('close')()
+
+ # Using client history
+ >>> history = jsonrpclib.history.History()
+ >>> server = jsonrpclib.ServerProxy('http://localhost:8080', history=history)
+ >>> server.add(5,6)
+ 11
+ >>> print(history.request)
+ {"id": "f682b956-c8e1-4506-9db4-29fe8bc9fcaa", "jsonrpc": "2.0",
+ "method": "add", "params": [5, 6]}
+ >>> print(history.response)
+ {"id": "f682b956-c8e1-4506-9db4-29fe8bc9fcaa", "jsonrpc": "2.0",
+ "result": 11}
+
+ # Clean up
+ >>> server('close')()
+
+ If you need 1.0 functionality, there are a bunch of places you can pass that in,
+ although the best is just to give a specific configuration to
+ ``jsonrpclib.ServerProxy``:
+
+ .. code-block:: python
+
+ >>> import jsonrpclib
+ >>> jsonrpclib.config.DEFAULT.version
+ 2.0
+ >>> config = jsonrpclib.config.Config(version=1.0)
+ >>> history = jsonrpclib.history.History()
+ >>> server = jsonrpclib.ServerProxy('http://localhost:8080', config=config,
+ history=history)
+ >>> server.add(7, 10)
+ 17
+ >>> print(history.request)
+ {"id": "827b2923-5b37-49a5-8b36-e73920a16d32",
+ "method": "add", "params": [7, 10]}
+ >>> print(history.response)
+ {"id": "827b2923-5b37-49a5-8b36-e73920a16d32", "error": null, "result": 17}
+ >>> server('close')()
+
+ The equivalent ``loads`` and ``dumps`` functions also exist, although with minor
+ modifications. The ``dumps`` arguments are almost identical, but it adds three
+ arguments: ``rpcid`` for the 'id' key, ``version`` to specify the JSON-RPC
+ compatibility, and ``notify`` if it's a request that you want to be a
+ notification.
+
+ Additionally, the ``loads`` method does not return the params and method like
+ ``xmlrpclib``, but instead a.) parses for errors, raising ProtocolErrors, and
+ b.) returns the entire structure of the request / response for manual parsing.
+
+
+ Additional headers
+ ******************
+
+ If your remote service requires custom headers in request, you can pass them
+ as as a ``headers`` keyword argument, when creating the ``ServerProxy``:
+
+ .. code-block:: python
+
+ >>> import jsonrpclib
+ >>> server = jsonrpclib.ServerProxy("http://localhost:8080",
+ headers={'X-Test' : 'Test'})
+
+ You can also put additional request headers only for certain method invocation:
+
+ .. code-block:: python
+
+ >>> import jsonrpclib
+ >>> server = jsonrpclib.Server("http://localhost:8080")
+ >>> with server._additional_headers({'X-Test' : 'Test'}) as test_server:
+ ... test_server.ping(42)
+ ...
+ >>> # X-Test header will be no longer sent in requests
+
+ Of course ``_additional_headers`` contexts can be nested as well.
+
+
+ Class Translation
+ *****************
+
+ I've recently added "automatic" class translation support, although it is
+ turned off by default. This can be devastatingly slow if improperly used, so
+ the following is just a short list of things to keep in mind when using it.
+
+ * Keep It (the object) Simple Stupid. (for exceptions, keep reading.)
+ * Do not require init params (for exceptions, keep reading)
+ * Getter properties without setters could be dangerous (read: not tested)
+
+ If any of the above are issues, use the _serialize method. (see usage below)
+ The server and client must BOTH have use_jsonclass configuration item on and
+ they must both have access to the same libraries used by the objects for
+ this to work.
+
+ If you have excessively nested arguments, it would be better to turn off the
+ translation and manually invoke it on specific objects using
+ ``jsonrpclib.jsonclass.dump`` / ``jsonrpclib.jsonclass.load`` (since the default
+ behavior recursively goes through attributes and lists / dicts / tuples).
+
+ Sample file: *test_obj.py*
+
+ .. code-block:: python
+
+ # This object is /very/ simple, and the system will look through the
+ # attributes and serialize what it can.
+ class TestObj(object):
+ foo = 'bar'
+
+ # This object requires __init__ params, so it uses the _serialize method
+ # and returns a tuple of init params and attribute values (the init params
+ # can be a dict or a list, but the attribute values must be a dict.)
+ class TestSerial(object):
+ foo = 'bar'
+ def __init__(self, *args):
+ self.args = args
+ def _serialize(self):
+ return (self.args, {'foo':self.foo,})
+
+ * Sample usage
+
+ .. code-block:: python
+
+ >>> import jsonrpclib
+ >>> import test_obj
+
+ # History is used only to print the serialized form of beans
+ >>> history = jsonrpclib.history.History()
+ >>> testobj1 = test_obj.TestObj()
+ >>> testobj2 = test_obj.TestSerial()
+ >>> server = jsonrpclib.Server('http://localhost:8080', history=history)
+
+ # The 'ping' just returns whatever is sent
+ >>> ping1 = server.ping(testobj1)
+ >>> ping2 = server.ping(testobj2)
+
+ >>> print(history.request)
+ {"id": "7805f1f9-9abd-49c6-81dc-dbd47229fe13", "jsonrpc": "2.0",
+ "method": "ping", "params": [{"__jsonclass__":
+ ["test_obj.TestSerial", []], "foo": "bar"}
+ ]}
+ >>> print(history.response)
+ {"id": "7805f1f9-9abd-49c6-81dc-dbd47229fe13", "jsonrpc": "2.0",
+ "result": {"__jsonclass__": ["test_obj.TestSerial", []], "foo": "bar"}}
+
+ This behavior is turned by default. To deactivate it, just set the
+ ``use_jsonclass`` member of a server ``Config`` to False.
+ If you want to use a per-class serialization method, set its name in the
+ ``serialize_method`` member of a server ``Config``.
+ Finally, if you are using classes that you have defined in the implementation
+ (as in, not a separate library), you'll need to add those (on BOTH the server
+ and the client) using the ``config.classes.add()`` method.
+
+ Feedback on this "feature" is very, VERY much appreciated.
+
+ Why JSON-RPC?
+ *************
+
+ In my opinion, there are several reasons to choose JSON over XML for RPC:
+
+ * Much simpler to read (I suppose this is opinion, but I know I'm right. :)
+ * Size / Bandwidth - Main reason, a JSON object representation is just much smaller.
+ * Parsing - JSON should be much quicker to parse than XML.
+ * Easy class passing with ``jsonclass`` (when enabled)
+
+ In the interest of being fair, there are also a few reasons to choose XML
+ over JSON:
+
+ * Your server doesn't do JSON (rather obvious)
+ * Wider XML-RPC support across APIs (can we change this? :))
+ * Libraries are more established, i.e. more stable (Let's change this too.)
+
+ Tests
+ *****
+
+ Tests are an almost-verbatim drop from the JSON-RPC specification 2.0 page.
+ They can be run using *unittest* or *nosetest*:
+
+ .. code-block:: console
+
+ python -m unittest discover tests
+ python3 -m unittest discover tests
+ nosetests tests
+
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.0
+Classifier: Programming Language :: Python :: 3.1
+Classifier: Programming Language :: Python :: 3.2
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/SOURCES.txt b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/SOURCES.txt
new file mode 100755
index 00000000..f5714032
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/SOURCES.txt
@@ -0,0 +1,17 @@
+LICENSE.txt
+MANIFEST.in
+README.rst
+setup.cfg
+setup.py
+jsonrpclib/SimpleJSONRPCServer.py
+jsonrpclib/__init__.py
+jsonrpclib/config.py
+jsonrpclib/history.py
+jsonrpclib/jsonclass.py
+jsonrpclib/jsonrpc.py
+jsonrpclib/threadpool.py
+jsonrpclib/utils.py
+jsonrpclib_pelix.egg-info/PKG-INFO
+jsonrpclib_pelix.egg-info/SOURCES.txt
+jsonrpclib_pelix.egg-info/dependency_links.txt
+jsonrpclib_pelix.egg-info/top_level.txt \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/dependency_links.txt b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/dependency_links.txt
new file mode 100755
index 00000000..8b137891
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/top_level.txt b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/top_level.txt
new file mode 100755
index 00000000..1410b2ff
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/top_level.txt
@@ -0,0 +1 @@
+jsonrpclib
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/setup.cfg b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/setup.cfg
new file mode 100755
index 00000000..26c67942
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/setup.cfg
@@ -0,0 +1,8 @@
+[bdist_wheel]
+universal = 1
+
+[egg_info]
+tag_date = 0
+tag_svn_revision = 0
+tag_build =
+
diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/setup.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/setup.py
new file mode 100755
index 00000000..a64f2fb0
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/setup.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+# -- Content-Encoding: UTF-8 --
+"""
+Installation script
+
+:authors: Josh Marshall, Thomas Calmant
+:copyright: Copyright 2015, isandlaTech
+:license: Apache License 2.0
+:version: 0.2.5
+
+..
+
+ Copyright 2015 isandlaTech
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+# Module version
+__version_info__ = (0, 2, 5)
+__version__ = ".".join(str(x) for x in __version_info__)
+
+# Documentation strings format
+__docformat__ = "restructuredtext en"
+
+# ------------------------------------------------------------------------------
+
+import sys
+
+try:
+ from setuptools import setup
+except ImportError:
+ from distutils.core import setup
+
+# ------------------------------------------------------------------------------
+
+setup(
+ name="jsonrpclib-pelix",
+ version=__version__,
+ license="Apache License 2.0",
+ author="Thomas Calmant",
+ author_email="thomas.calmant+github@gmail.com",
+ url="http://github.com/tcalmant/jsonrpclib/",
+ description=
+ "This project is an implementation of the JSON-RPC v2.0 specification "
+ "(backwards-compatible) as a client library, for Python 2.6+ and Python 3."
+ "This version is a fork of jsonrpclib by Josh Marshall, "
+ "usable with Pelix remote services.",
+ long_description=open("README.rst").read(),
+ packages=["jsonrpclib"],
+ classifiers=[
+ 'Development Status :: 5 - Production/Stable',
+ 'Intended Audience :: Developers',
+ 'License :: OSI Approved :: Apache Software License',
+ 'Operating System :: OS Independent',
+ 'Programming Language :: Python :: 2.6',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.0',
+ 'Programming Language :: Python :: 3.1',
+ 'Programming Language :: Python :: 3.2',
+ 'Programming Language :: Python :: 3.3',
+ 'Programming Language :: Python :: 3.4'],
+ tests_require=['unittest2'] if sys.version_info < (2, 7) else []
+)
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/ACKS b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/ACKS
new file mode 100755
index 00000000..44519d17
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/ACKS
@@ -0,0 +1,6 @@
+Thanks to the following people for help with lockfile.
+
+ Scott Dial
+ Ben Finney
+ Frank Niessink
+ Konstantin Veretennicov
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/AUTHORS b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/AUTHORS
new file mode 100755
index 00000000..fda721cd
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/AUTHORS
@@ -0,0 +1,5 @@
+(no author) <(no author)>
+Elmo Todurov <elmo.todurov@skype.net>
+Julien Danjou <julien@danjou.info>
+Skip Montanaro <skip@pobox.com>
+skip.montanaro <skip.montanaro@gmail.com>
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/ChangeLog b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/ChangeLog
new file mode 100755
index 00000000..3ba36a7d
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/ChangeLog
@@ -0,0 +1,165 @@
+CHANGES
+=======
+
+0.10.2
+------
+
+* Fix package name
+
+0.10.1
+------
+
+* Add missing cover env in tox
+
+0.10.0
+------
+
+* Fix documentation bug report address
+* Add py34 in tox
+* Remove old diff file
+* Add .gitreview, tox targets and use pbr
+* fix for timeout=0
+* remove 2.5, 3.1 and 3.4 from the list for the time being - may get added back later
+* Bugfix: locking two different files in the same directory caused an error during unlocking the last unlocking
+* typo
+
+0.9.1
+-----
+
+* ignore dist dir
+* update to python 3 imports
+* python 3 tweaks
+* python 3 tweaks
+* ignore Emacs backups
+* note nose as a dependency
+* remove this test file - way incompatible with current code
+* stuff to ignore
+* Add py33, py34, delete py24, py25
+* Update source location
+* merge delete
+* merge delete
+* more merge stuff
+* this didn't come across with svn merge
+* all screwed up now
+* merge
+* merge
+* Make it clear that the path and threaded attributes to SymlinkLockFile and MkdirLockFile have the same constraints as for LinkLockFile. In particular, the directory which will contain path must be writable
+* add pidlockfile test stuff from Ben Finney - still a few problems - maybe I can get him to solve them :-)
+* ignore Sphinx build directory
+* Catch up on a little documentation
+* adapt decorator patch from issue 5
+* Allow timeout in constructor - resolves issue 3
+* add info to raise statements - from issue 6, yyurevich@jellycrystal.com
+* add useful repr() - from issue 6, yyurevich@jellycrystal.com
+* add symlinklockfile module
+* + py24
+* good for the branch? must be good for the trunk
+* add tox stuff, ignore dist dir
+* new version, move to Google Code
+*
+*
+* * Thread support is currently broken. This is more likely because of problems in this module, but suppress those tests for now just the same
+* By the nature of what it's trying to do PIDLockFile doesn't support threaded operation
+* defer creating testdb until we've instantiated a SQLiteLockFile instance
+* tweak unique_name slightly
+* Specify mode in octal
+* update to match pidlockfile change
+* missing import
+* I think I finally have this correct
+* patch pidlockfile module too
+* use abs import here as well
+* *argh*
+* Update to elide new import syntax
+* * Move future import for division where it's used. * Use __absolute_import__ to spell relative imports
+* Some PIDLockFile tests are failing. Check in anyway so others can consider the problems
+* Account for fact that Thread objects in Python 2.4 and earlier do not have an ident attribute
+* Make this a daemon thread so if things go awry the test run won't hang
+* * Add pidlockfile (not quite working properly) * Rearrange MANIFEST.in slightly to include test directory
+* Split those test methods which try both threaded and non-threaded naming schemes. More to do. Obviously you need to have test cases when using the non-threaded naming scheme from multiple threads
+* acknowledge Ben and Frank, alphabetize list
+* I don't think these are needed any longer - they came back during the hg->svn conversion
+* grand renaming: "filelock" -> "lockfile" & "FileLock" -> "LockFile"
+* Update for packages
+* Avoid using the backwards compatibility functions for FileLock. That object is not deprecated
+* how does the test dir keep sneaking into MANIFEST? also, include 2.4.diff in dist
+* update for new structure, use of ident attr
+* adjust build setup
+* move test helpers into test dir
+* first cut at packagized lockfile
+* Protect some more complex locking stuff so if they fail we don't deadlock
+* merge r75 from head
+* * One implementation of tname, not two - make it an instance attribute as a result
+* beginnings of a packagized lockfile
+* get the structure right
+* start over with the branches..
+* hmmm
+* hmmm
+* get us back to lockfile 0.8
+* r72 from hg
+* r70 from hg
+* r69 from hg
+* r68 from hg
+* r67 from hg
+* r66 from hg
+* r65 from hg
+* r64 from hg
+* r64 from hg
+* r63 from hg
+* r62 from hg
+* r61 from hg
+* r60 from hg
+* r59 from hg
+* r58 from hg
+* r57 from hg
+* r56 from hg
+* r55 from hg
+* r54 from hg
+* r53 from hg
+* r52 from hg
+* r51 from hg
+* r50 from hg
+* r49 from hg
+* r47 from hg
+* r46 from hg
+* r45 from hg
+* r44 from hg
+* r43 from hg
+* r42 from hg
+* r41 from hg
+* r38 from hg
+* r37 from hg
+* r36 from hg
+* r35 from hg
+* r34 from hg
+* r33 from hg
+* r32 from hg
+* r31 from hg
+* r29 from hg
+* r28 from hg
+* r27 from hg
+* r26 from hg
+* r25 from hg
+* r24 from hg
+* r23 from hg
+* r22 from hg
+* r21 from hg
+* r20 from hg
+* r19 from hg
+* r18 from hg
+* r16 from hg
+* r14 from hg
+* r13 from hg
+* r12 from hg
+* r11 from hg
+* r10 from hg
+* r9 from hg
+* r8 from hg
+* r7 from hg
+* r6 from hg
+* r5 from hg
+* r4 from hg
+* r3 from hg
+* r2 from hg
+* r1 from hg
+* r0 from hg
+* Initial directory structure
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/LICENSE b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/LICENSE
new file mode 100755
index 00000000..610c0793
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/LICENSE
@@ -0,0 +1,21 @@
+This is the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+Copyright (c) 2007 Skip Montanaro.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to
+deal in the Software without restriction, including without limitation the
+rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+sell copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+IN THE SOFTWARE.
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/PKG-INFO b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/PKG-INFO
new file mode 100755
index 00000000..9f72376f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/PKG-INFO
@@ -0,0 +1,51 @@
+Metadata-Version: 1.1
+Name: lockfile
+Version: 0.10.2
+Summary: Platform-independent file locking module
+Home-page: http://launchpad.net/pylockfile
+Author: OpenStack
+Author-email: openstack-dev@lists.openstack.org
+License: UNKNOWN
+Description: The lockfile package exports a LockFile class which provides a simple API for
+ locking files. Unlike the Windows msvcrt.locking function, the fcntl.lockf
+ and flock functions, and the deprecated posixfile module, the API is
+ identical across both Unix (including Linux and Mac) and Windows platforms.
+ The lock mechanism relies on the atomic nature of the link (on Unix) and
+ mkdir (on Windows) system calls. An implementation based on SQLite is also
+ provided, more as a demonstration of the possibilities it provides than as
+ production-quality code.
+
+ Note: In version 0.9 the API changed in two significant ways:
+
+ * It changed from a module defining several classes to a package containing
+ several modules, each defining a single class.
+
+ * Where classes had been named SomethingFileLock before the last two words
+ have been reversed, so that class is now SomethingLockFile.
+
+ The previous module-level definitions of LinkFileLock, MkdirFileLock and
+ SQLiteFileLock will be retained until the 1.0 release.
+
+ Available on GitHub from:
+
+ git://github.com/smontanaro/pylockfile.git
+
+ To install:
+
+ python setup.py install
+
+
+Platform: UNKNOWN
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: POSIX :: Linux
+Classifier: Operating System :: MacOS
+Classifier: Operating System :: Microsoft :: Windows :: Windows NT/2000
+Classifier: Operating System :: POSIX
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/README b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/README
new file mode 100755
index 00000000..5f7acbc4
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/README
@@ -0,0 +1,27 @@
+The lockfile package exports a LockFile class which provides a simple API for
+locking files. Unlike the Windows msvcrt.locking function, the fcntl.lockf
+and flock functions, and the deprecated posixfile module, the API is
+identical across both Unix (including Linux and Mac) and Windows platforms.
+The lock mechanism relies on the atomic nature of the link (on Unix) and
+mkdir (on Windows) system calls. An implementation based on SQLite is also
+provided, more as a demonstration of the possibilities it provides than as
+production-quality code.
+
+Note: In version 0.9 the API changed in two significant ways:
+
+ * It changed from a module defining several classes to a package containing
+ several modules, each defining a single class.
+
+ * Where classes had been named SomethingFileLock before the last two words
+ have been reversed, so that class is now SomethingLockFile.
+
+The previous module-level definitions of LinkFileLock, MkdirFileLock and
+SQLiteFileLock will be retained until the 1.0 release.
+
+Available on GitHub from:
+
+ git://github.com/smontanaro/pylockfile.git
+
+To install:
+
+ python setup.py install
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/RELEASE-NOTES b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/RELEASE-NOTES
new file mode 100755
index 00000000..8b452ed1
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/RELEASE-NOTES
@@ -0,0 +1,50 @@
+Version 0.9.1
+=============
+
+* This release moves the source location to Google Code.
+
+* Threaded support is currently broken. (It might not actually be broken.
+ It might just be the tests which are broken.)
+
+Version 0.9
+===========
+
+* The lockfile module was reorganized into a package.
+
+* The names of the three main classes have changed as follows:
+
+ LinkFileLock -> LinkLockFile
+ MkdirFileLock -> MkdirLockFile
+ SQLiteFileLock -> SQLiteLockFile
+
+* A PIDLockFile class was added.
+
+Version 0.3
+===========
+
+* Fix 2.4.diff file error.
+
+* More documentation updates.
+
+Version 0.2
+===========
+
+* Added 2.4.diff file to patch lockfile to work with Python 2.4 (removes use
+ of with statement).
+
+* Renamed _FileLock base class to LockBase to expose it (and its docstrings)
+ to pydoc.
+
+* Got rid of time.sleep() calls in tests (thanks to Konstantin
+ Veretennicov).
+
+* Use thread.get_ident() as the thread discriminator.
+
+* Updated documentation a bit.
+
+* Added RELEASE-NOTES.
+
+Version 0.1
+===========
+
+* First release - All basic functionality there.
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/doc/source/Makefile b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/doc/source/Makefile
new file mode 100755
index 00000000..1b1e8d28
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/doc/source/Makefile
@@ -0,0 +1,73 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d .build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html web pickle htmlhelp latex changes linkcheck
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " pickle to make pickle files (usable by e.g. sphinx-web)"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " changes to make an overview over all changed/added/deprecated items"
+ @echo " linkcheck to check all external links for integrity"
+
+clean:
+ -rm -rf .build/*
+
+html:
+ mkdir -p .build/html .build/doctrees
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) .build/html
+ @echo
+ @echo "Build finished. The HTML pages are in .build/html."
+
+pickle:
+ mkdir -p .build/pickle .build/doctrees
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) .build/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files or run"
+ @echo " sphinx-web .build/pickle"
+ @echo "to start the sphinx-web server."
+
+web: pickle
+
+htmlhelp:
+ mkdir -p .build/htmlhelp .build/doctrees
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) .build/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in .build/htmlhelp."
+
+latex:
+ mkdir -p .build/latex .build/doctrees
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) .build/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in .build/latex."
+ @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
+ "run these through (pdf)latex."
+
+changes:
+ mkdir -p .build/changes .build/doctrees
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) .build/changes
+ @echo
+ @echo "The overview file is in .build/changes."
+
+linkcheck:
+ mkdir -p .build/linkcheck .build/doctrees
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) .build/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in .build/linkcheck/output.txt."
+
+html.zip: html
+ (cd .build/html ; zip -r ../../$@ *)
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/doc/source/conf.py b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/doc/source/conf.py
new file mode 100755
index 00000000..623edcb5
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/doc/source/conf.py
@@ -0,0 +1,179 @@
+# -*- coding: utf-8 -*-
+#
+# lockfile documentation build configuration file, created by
+# sphinx-quickstart on Sat Sep 13 17:54:17 2008.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# The contents of this file are pickled, so don't put values in the namespace
+# that aren't pickleable (module imports are okay, they're removed automatically).
+#
+# All configuration values have a default value; values that are commented out
+# serve to show the default value.
+
+import sys, os
+
+# If your extensions are in another directory, add it here. If the directory
+# is relative to the documentation root, use os.path.abspath to make it
+# absolute, like shown here.
+#sys.path.append(os.path.abspath('some/directory'))
+
+# General configuration
+# ---------------------
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = []
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['.templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General substitutions.
+project = 'lockfile'
+copyright = '2008, Skip Montanaro'
+
+# The default replacements for |version| and |release|, also used in various
+# other places throughout the built documents.
+#
+# The short X.Y version.
+version = '0.3'
+# The full version, including alpha/beta/rc tags.
+release = '0.3'
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+today_fmt = '%B %d, %Y'
+
+# List of documents that shouldn't be included in the build.
+#unused_docs = []
+
+# List of directories, relative to source directories, that shouldn't be searched
+# for source files.
+#exclude_dirs = []
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+
+# Options for HTML output
+# -----------------------
+
+# The style sheet to use for HTML and HTML Help pages. A file of that name
+# must exist either in Sphinx' static/ path, or in one of the custom paths
+# given in html_static_path.
+html_style = 'default.css'
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (within the static path) to place at the top of
+# the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+#html_static_path = ['.static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_use_modindex = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, the reST sources are included in the HTML build as _sources/<name>.
+#html_copy_source = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = ''
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'lockfiledoc'
+
+
+# Options for LaTeX output
+# ------------------------
+
+# The paper size ('letter' or 'a4').
+#latex_paper_size = 'letter'
+
+# The font size ('10pt', '11pt' or '12pt').
+#latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, document class [howto/manual]).
+latex_documents = [
+ ('lockfile', 'lockfile.tex', 'lockfile Documentation',
+ 'Skip Montanaro', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# Additional stuff for the LaTeX preamble.
+#latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_use_modindex = True
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/doc/source/index.rst b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/doc/source/index.rst
new file mode 100755
index 00000000..f76173dc
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/doc/source/index.rst
@@ -0,0 +1,275 @@
+
+:mod:`lockfile` --- Platform-independent file locking
+=====================================================
+
+.. module:: lockfile
+ :synopsis: Platform-independent file locking
+.. moduleauthor:: Skip Montanaro <skip@pobox.com>
+.. sectionauthor:: Skip Montanaro <skip@pobox.com>
+
+
+.. note::
+
+ This package is pre-release software. Between versions 0.8 and 0.9 it
+ was changed from a module to a package. It is quite possible that the
+ API and implementation will change again in important ways as people test
+ it and provide feedback and bug fixes. In particular, if the mkdir-based
+ locking scheme is sufficient for both Windows and Unix platforms, the
+ link-based scheme may be deleted so that only a single locking scheme is
+ used, providing cross-platform lockfile cooperation.
+
+.. note::
+
+ The implementation uses the `with` statement, both in the tests and in the
+ main code, so will only work out-of-the-box with Python 2.5 or later.
+ However, the use of the `with` statement is minimal, so if you apply the
+ patch in the included 2.4.diff file you can use it with Python 2.4. It's
+ possible that it will work in Python 2.3 with that patch applied as well,
+ though the doctest code relies on APIs new in 2.4, so will have to be
+ rewritten somewhat to allow testing on 2.3. As they say, patches welcome.
+ ``;-)``
+
+The :mod:`lockfile` package exports a :class:`LockFile` class which provides
+a simple API for locking files. Unlike the Windows :func:`msvcrt.locking`
+function, the Unix :func:`fcntl.flock`, :func:`fcntl.lockf` and the
+deprecated :mod:`posixfile` module, the API is identical across both Unix
+(including Linux and Mac) and Windows platforms. The lock mechanism relies
+on the atomic nature of the :func:`link` (on Unix) and :func:`mkdir` (On
+Windows) system calls. It also contains several lock-method-specific
+modules: :mod:`lockfile.linklockfile`, :mod:`lockfile.mkdirlockfile`, and
+:mod:`lockfile.sqlitelockfile`, each one exporting a single class. For
+backwards compatibility with versions before 0.9 the :class:`LinkFileLock`,
+:class:`MkdirFileLock` and :class:`SQLiteFileLock` objects are exposed as
+attributes of the top-level lockfile package, though this use was deprecated
+starting with version 0.9 and will be removed in version 1.0.
+
+.. note::
+
+ The current implementation uses :func:`os.link` on Unix, but since that
+ function is unavailable on Windows it uses :func:`os.mkdir` there. At
+ this point it's not clear that using the :func:`os.mkdir` method would be
+ insufficient on Unix systems. If it proves to be adequate on Unix then
+ the implementation could be simplified and truly cross-platform locking
+ would be possible.
+
+.. note::
+
+ The current implementation doesn't provide for shared vs. exclusive
+ locks. It should be possible for multiple reader processes to hold the
+ lock at the same time.
+
+The module defines the following exceptions:
+
+.. exception:: Error
+
+ This is the base class for all exceptions raised by the :class:`LockFile`
+ class.
+
+.. exception:: LockError
+
+ This is the base class for all exceptions raised when attempting to lock
+ a file.
+
+.. exception:: UnlockError
+
+ This is the base class for all exceptions raised when attempting to
+ unlock a file.
+
+.. exception:: LockTimeout
+
+ This exception is raised if the :func:`LockFile.acquire` method is
+ called with a timeout which expires before an existing lock is released.
+
+.. exception:: AlreadyLocked
+
+ This exception is raised if the :func:`LockFile.acquire` detects a
+ file is already locked when in non-blocking mode.
+
+.. exception:: LockFailed
+
+ This exception is raised if the :func:`LockFile.acquire` detects some
+ other condition (such as a non-writable directory) which prevents it from
+ creating its lock file.
+
+.. exception:: NotLocked
+
+ This exception is raised if the file is not locked when
+ :func:`LockFile.release` is called.
+
+.. exception:: NotMyLock
+
+ This exception is raised if the file is locked by another thread or
+ process when :func:`LockFile.release` is called.
+
+The following classes are provided:
+
+.. class:: linklockfile.LinkLockFile(path, threaded=True)
+
+ This class uses the :func:`link(2)` system call as the basic lock
+ mechanism. *path* is an object in the file system to be locked. It need
+ not exist, but its directory must exist and be writable at the time the
+ :func:`acquire` and :func:`release` methods are called. *threaded* is
+ optional, but when set to :const:`True` locks will be distinguished
+ between threads in the same process.
+
+.. class:: symlinklockfile.SymlinkLockFile(path, threaded=True)
+
+ This class uses the :func:`symlink(2)` system call as the basic lock
+ mechanism. The parameters have the same meaning and constraints as for
+ the :class:`LinkLockFile` class.
+
+.. class:: mkdirlockfile.MkdirLockFile(path, threaded=True)
+
+ This class uses the :func:`mkdir(2)` system call as the basic lock
+ mechanism. The parameters have the same meaning and constraints as for
+ the :class:`LinkLockFile` class.
+
+.. class:: sqlitelockfile.SQLiteLockFile(path, threaded=True)
+
+ This class uses the :mod:`sqlite3` module to implement the lock
+ mechanism. The parameters have the same meaning as for the
+ :class:`LinkLockFile` class.
+
+.. class:: LockBase(path, threaded=True)
+
+ This is the base class for all concrete implementations and is available
+ at the lockfile package level so programmers can implement other locking
+ schemes.
+
+.. function:: locked(path, timeout=None)
+
+ This function provides a decorator which insures the decorated function
+ is always called with the lock held.
+
+By default, the :const:`LockFile` object refers to the
+:class:`mkdirlockfile.MkdirLockFile` class on Windows. On all other
+platforms it refers to the :class:`linklockfile.LinkLockFile` class.
+
+When locking a file the :class:`linklockfile.LinkLockFile` class creates a
+uniquely named hard link to an empty lock file. That hard link contains the
+hostname, process id, and if locks between threads are distinguished, the
+thread identifier. For example, if you want to lock access to a file named
+"README", the lock file is named "README.lock". With per-thread locks
+enabled the hard link is named HOSTNAME-THREADID-PID. With only per-process
+locks enabled the hard link is named HOSTNAME--PID.
+
+When using the :class:`mkdirlockfile.MkdirLockFile` class the lock file is a
+directory. Referring to the example above, README.lock will be a directory
+and HOSTNAME-THREADID-PID will be an empty file within that directory.
+
+.. seealso::
+
+ Module :mod:`msvcrt`
+ Provides the :func:`locking` function, the standard Windows way of
+ locking (parts of) a file.
+
+ Module :mod:`posixfile`
+ The deprecated (since Python 1.5) way of locking files on Posix systems.
+
+ Module :mod:`fcntl`
+ Provides the current best way to lock files on Unix systems
+ (:func:`lockf` and :func:`flock`).
+
+LockFile Objects
+----------------
+
+:class:`LockFile` objects support the `context manager` protocol used by the
+statement:`with` statement. The timeout option is not supported when used in
+this fashion. While support for timeouts could be implemented, there is no
+support for handling the eventual :exc:`Timeout` exceptions raised by the
+:func:`__enter__` method, so you would have to protect the `with` statement with
+a `try` statement. The resulting construct would not be any simpler than just
+using a `try` statement in the first place.
+
+:class:`LockFile` has the following user-visible methods:
+
+.. method:: LockFile.acquire(timeout=None)
+
+ Lock the file associated with the :class:`LockFile` object. If the
+ *timeout* is omitted or :const:`None` the caller will block until the
+ file is unlocked by the object currently holding the lock. If the
+ *timeout* is zero or a negative number the :exc:`AlreadyLocked` exception
+ will be raised if the file is currently locked by another process or
+ thread. If the *timeout* is positive, the caller will block for that
+ many seconds waiting for the lock to be released. If the lock is not
+ released within that period the :exc:`LockTimeout` exception will be
+ raised.
+
+.. method:: LockFile.release()
+
+ Unlock the file associated with the :class:`LockFile` object. If the
+ file is not currently locked, the :exc:`NotLocked` exception is raised.
+ If the file is locked by another thread or process the :exc:`NotMyLock`
+ exception is raised.
+
+.. method:: is_locked()
+
+ Return the status of the lock on the current file. If any process or
+ thread (including the current one) is locking the file, :const:`True` is
+ returned, otherwise :const:`False` is returned.
+
+.. method:: break_lock()
+
+ If the file is currently locked, break it.
+
+.. method:: i_am_locking()
+
+ Returns true if the caller holds the lock.
+
+Examples
+--------
+
+This example is the "hello world" for the :mod:`lockfile` package::
+
+ from lockfile import LockFile
+ lock = LockFile("/some/file/or/other")
+ with lock:
+ print lock.path, 'is locked.'
+
+To use this with Python 2.4, you can execute::
+
+ from lockfile import LockFile
+ lock = LockFile("/some/file/or/other")
+ lock.acquire()
+ print lock.path, 'is locked.'
+ lock.release()
+
+If you don't want to wait forever, you might try::
+
+ from lockfile import LockFile
+ lock = LockFile("/some/file/or/other")
+ while not lock.i_am_locking():
+ try:
+ lock.acquire(timeout=60) # wait up to 60 seconds
+ except LockTimeout:
+ lock.break_lock()
+ lock.acquire()
+ print "I locked", lock.path
+ lock.release()
+
+You can also insure that a lock is always held when appropriately decorated
+functions are called::
+
+ from lockfile import locked
+ @locked("/tmp/mylock")
+ def func(a, b):
+ return a + b
+
+Other Libraries
+---------------
+
+The idea of implementing advisory locking with a standard API is not new
+with :mod:`lockfile`. There are a number of other libraries available:
+
+* locknix - http://pypi.python.org/pypi/locknix - Unix only
+* mx.MiscLockFile - from Marc André Lemburg, part of the mx.Base
+ distribution - cross-platform.
+* Twisted - http://twistedmatrix.com/trac/browser/trunk/twisted/python/lockfile.py
+* zc.lockfile - http://pypi.python.org/pypi/zc.lockfile
+
+
+Contacting the Author
+---------------------
+
+If you encounter any problems with ``lockfile``, would like help or want to
+submit a patch, check http://launchpad.net/pylockfile
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/PKG-INFO b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/PKG-INFO
new file mode 100755
index 00000000..9f72376f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/PKG-INFO
@@ -0,0 +1,51 @@
+Metadata-Version: 1.1
+Name: lockfile
+Version: 0.10.2
+Summary: Platform-independent file locking module
+Home-page: http://launchpad.net/pylockfile
+Author: OpenStack
+Author-email: openstack-dev@lists.openstack.org
+License: UNKNOWN
+Description: The lockfile package exports a LockFile class which provides a simple API for
+ locking files. Unlike the Windows msvcrt.locking function, the fcntl.lockf
+ and flock functions, and the deprecated posixfile module, the API is
+ identical across both Unix (including Linux and Mac) and Windows platforms.
+ The lock mechanism relies on the atomic nature of the link (on Unix) and
+ mkdir (on Windows) system calls. An implementation based on SQLite is also
+ provided, more as a demonstration of the possibilities it provides than as
+ production-quality code.
+
+ Note: In version 0.9 the API changed in two significant ways:
+
+ * It changed from a module defining several classes to a package containing
+ several modules, each defining a single class.
+
+ * Where classes had been named SomethingFileLock before the last two words
+ have been reversed, so that class is now SomethingLockFile.
+
+ The previous module-level definitions of LinkFileLock, MkdirFileLock and
+ SQLiteFileLock will be retained until the 1.0 release.
+
+ Available on GitHub from:
+
+ git://github.com/smontanaro/pylockfile.git
+
+ To install:
+
+ python setup.py install
+
+
+Platform: UNKNOWN
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: POSIX :: Linux
+Classifier: Operating System :: MacOS
+Classifier: Operating System :: Microsoft :: Windows :: Windows NT/2000
+Classifier: Operating System :: POSIX
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/SOURCES.txt b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/SOURCES.txt
new file mode 100755
index 00000000..4b289f3a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/SOURCES.txt
@@ -0,0 +1,26 @@
+ACKS
+AUTHORS
+ChangeLog
+LICENSE
+README
+RELEASE-NOTES
+setup.cfg
+setup.py
+test-requirements.txt
+tox.ini
+doc/source/Makefile
+doc/source/conf.py
+doc/source/index.rst
+lockfile/__init__.py
+lockfile/linklockfile.py
+lockfile/mkdirlockfile.py
+lockfile/pidlockfile.py
+lockfile/sqlitelockfile.py
+lockfile/symlinklockfile.py
+lockfile.egg-info/PKG-INFO
+lockfile.egg-info/SOURCES.txt
+lockfile.egg-info/dependency_links.txt
+lockfile.egg-info/not-zip-safe
+lockfile.egg-info/top_level.txt
+test/compliancetest.py
+test/test_lockfile.py \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/dependency_links.txt b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/dependency_links.txt
new file mode 100755
index 00000000..8b137891
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/not-zip-safe b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/not-zip-safe
new file mode 100755
index 00000000..8b137891
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/not-zip-safe
@@ -0,0 +1 @@
+
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/top_level.txt b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/top_level.txt
new file mode 100755
index 00000000..5a13159a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/top_level.txt
@@ -0,0 +1 @@
+lockfile
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/__init__.py b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/__init__.py
new file mode 100755
index 00000000..d905af96
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/__init__.py
@@ -0,0 +1,326 @@
+"""
+lockfile.py - Platform-independent advisory file locks.
+
+Requires Python 2.5 unless you apply 2.4.diff
+Locking is done on a per-thread basis instead of a per-process basis.
+
+Usage:
+
+>>> lock = LockFile('somefile')
+>>> try:
+... lock.acquire()
+... except AlreadyLocked:
+... print 'somefile', 'is locked already.'
+... except LockFailed:
+... print 'somefile', 'can\\'t be locked.'
+... else:
+... print 'got lock'
+got lock
+>>> print lock.is_locked()
+True
+>>> lock.release()
+
+>>> lock = LockFile('somefile')
+>>> print lock.is_locked()
+False
+>>> with lock:
+... print lock.is_locked()
+True
+>>> print lock.is_locked()
+False
+
+>>> lock = LockFile('somefile')
+>>> # It is okay to lock twice from the same thread...
+>>> with lock:
+... lock.acquire()
+...
+>>> # Though no counter is kept, so you can't unlock multiple times...
+>>> print lock.is_locked()
+False
+
+Exceptions:
+
+ Error - base class for other exceptions
+ LockError - base class for all locking exceptions
+ AlreadyLocked - Another thread or process already holds the lock
+ LockFailed - Lock failed for some other reason
+ UnlockError - base class for all unlocking exceptions
+ AlreadyUnlocked - File was not locked.
+ NotMyLock - File was locked but not by the current thread/process
+"""
+
+from __future__ import absolute_import
+
+import sys
+import socket
+import os
+import threading
+import time
+import urllib
+import warnings
+import functools
+
+# Work with PEP8 and non-PEP8 versions of threading module.
+if not hasattr(threading, "current_thread"):
+ threading.current_thread = threading.currentThread
+if not hasattr(threading.Thread, "get_name"):
+ threading.Thread.get_name = threading.Thread.getName
+
+__all__ = ['Error', 'LockError', 'LockTimeout', 'AlreadyLocked',
+ 'LockFailed', 'UnlockError', 'NotLocked', 'NotMyLock',
+ 'LinkLockFile', 'MkdirLockFile', 'SQLiteLockFile',
+ 'LockBase', 'locked']
+
+class Error(Exception):
+ """
+ Base class for other exceptions.
+
+ >>> try:
+ ... raise Error
+ ... except Exception:
+ ... pass
+ """
+ pass
+
+class LockError(Error):
+ """
+ Base class for error arising from attempts to acquire the lock.
+
+ >>> try:
+ ... raise LockError
+ ... except Error:
+ ... pass
+ """
+ pass
+
+class LockTimeout(LockError):
+ """Raised when lock creation fails within a user-defined period of time.
+
+ >>> try:
+ ... raise LockTimeout
+ ... except LockError:
+ ... pass
+ """
+ pass
+
+class AlreadyLocked(LockError):
+ """Some other thread/process is locking the file.
+
+ >>> try:
+ ... raise AlreadyLocked
+ ... except LockError:
+ ... pass
+ """
+ pass
+
+class LockFailed(LockError):
+ """Lock file creation failed for some other reason.
+
+ >>> try:
+ ... raise LockFailed
+ ... except LockError:
+ ... pass
+ """
+ pass
+
+class UnlockError(Error):
+ """
+ Base class for errors arising from attempts to release the lock.
+
+ >>> try:
+ ... raise UnlockError
+ ... except Error:
+ ... pass
+ """
+ pass
+
+class NotLocked(UnlockError):
+ """Raised when an attempt is made to unlock an unlocked file.
+
+ >>> try:
+ ... raise NotLocked
+ ... except UnlockError:
+ ... pass
+ """
+ pass
+
+class NotMyLock(UnlockError):
+ """Raised when an attempt is made to unlock a file someone else locked.
+
+ >>> try:
+ ... raise NotMyLock
+ ... except UnlockError:
+ ... pass
+ """
+ pass
+
+class LockBase:
+ """Base class for platform-specific lock classes."""
+ def __init__(self, path, threaded=True, timeout=None):
+ """
+ >>> lock = LockBase('somefile')
+ >>> lock = LockBase('somefile', threaded=False)
+ """
+ self.path = path
+ self.lock_file = os.path.abspath(path) + ".lock"
+ self.hostname = socket.gethostname()
+ self.pid = os.getpid()
+ if threaded:
+ t = threading.current_thread()
+ # Thread objects in Python 2.4 and earlier do not have ident
+ # attrs. Worm around that.
+ ident = getattr(t, "ident", hash(t))
+ self.tname = "-%x" % (ident & 0xffffffff)
+ else:
+ self.tname = ""
+ dirname = os.path.dirname(self.lock_file)
+
+ # unique name is mostly about the current process, but must
+ # also contain the path -- otherwise, two adjacent locked
+ # files conflict (one file gets locked, creating lock-file and
+ # unique file, the other one gets locked, creating lock-file
+ # and overwriting the already existing lock-file, then one
+ # gets unlocked, deleting both lock-file and unique file,
+ # finally the last lock errors out upon releasing.
+ self.unique_name = os.path.join(dirname,
+ "%s%s.%s%s" % (self.hostname,
+ self.tname,
+ self.pid,
+ hash(self.path)))
+ self.timeout = timeout
+
+ def acquire(self, timeout=None):
+ """
+ Acquire the lock.
+
+ * If timeout is omitted (or None), wait forever trying to lock the
+ file.
+
+ * If timeout > 0, try to acquire the lock for that many seconds. If
+ the lock period expires and the file is still locked, raise
+ LockTimeout.
+
+ * If timeout <= 0, raise AlreadyLocked immediately if the file is
+ already locked.
+ """
+ raise NotImplemented("implement in subclass")
+
+ def release(self):
+ """
+ Release the lock.
+
+ If the file is not locked, raise NotLocked.
+ """
+ raise NotImplemented("implement in subclass")
+
+ def is_locked(self):
+ """
+ Tell whether or not the file is locked.
+ """
+ raise NotImplemented("implement in subclass")
+
+ def i_am_locking(self):
+ """
+ Return True if this object is locking the file.
+ """
+ raise NotImplemented("implement in subclass")
+
+ def break_lock(self):
+ """
+ Remove a lock. Useful if a locking thread failed to unlock.
+ """
+ raise NotImplemented("implement in subclass")
+
+ def __enter__(self):
+ """
+ Context manager support.
+ """
+ self.acquire()
+ return self
+
+ def __exit__(self, *_exc):
+ """
+ Context manager support.
+ """
+ self.release()
+
+ def __repr__(self):
+ return "<%s: %r -- %r>" % (self.__class__.__name__, self.unique_name,
+ self.path)
+
+def _fl_helper(cls, mod, *args, **kwds):
+ warnings.warn("Import from %s module instead of lockfile package" % mod,
+ DeprecationWarning, stacklevel=2)
+ # This is a bit funky, but it's only for awhile. The way the unit tests
+ # are constructed this function winds up as an unbound method, so it
+ # actually takes three args, not two. We want to toss out self.
+ if not isinstance(args[0], str):
+ # We are testing, avoid the first arg
+ args = args[1:]
+ if len(args) == 1 and not kwds:
+ kwds["threaded"] = True
+ return cls(*args, **kwds)
+
+def LinkFileLock(*args, **kwds):
+ """Factory function provided for backwards compatibility.
+
+ Do not use in new code. Instead, import LinkLockFile from the
+ lockfile.linklockfile module.
+ """
+ from . import linklockfile
+ return _fl_helper(linklockfile.LinkLockFile, "lockfile.linklockfile",
+ *args, **kwds)
+
+def MkdirFileLock(*args, **kwds):
+ """Factory function provided for backwards compatibility.
+
+ Do not use in new code. Instead, import MkdirLockFile from the
+ lockfile.mkdirlockfile module.
+ """
+ from . import mkdirlockfile
+ return _fl_helper(mkdirlockfile.MkdirLockFile, "lockfile.mkdirlockfile",
+ *args, **kwds)
+
+def SQLiteFileLock(*args, **kwds):
+ """Factory function provided for backwards compatibility.
+
+ Do not use in new code. Instead, import SQLiteLockFile from the
+ lockfile.mkdirlockfile module.
+ """
+ from . import sqlitelockfile
+ return _fl_helper(sqlitelockfile.SQLiteLockFile, "lockfile.sqlitelockfile",
+ *args, **kwds)
+
+def locked(path, timeout=None):
+ """Decorator which enables locks for decorated function.
+
+ Arguments:
+ - path: path for lockfile.
+ - timeout (optional): Timeout for acquiring lock.
+
+ Usage:
+ @locked('/var/run/myname', timeout=0)
+ def myname(...):
+ ...
+ """
+ def decor(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ lock = FileLock(path, timeout=timeout)
+ lock.acquire()
+ try:
+ return func(*args, **kwargs)
+ finally:
+ lock.release()
+ return wrapper
+ return decor
+
+if hasattr(os, "link"):
+ from . import linklockfile as _llf
+ LockFile = _llf.LinkLockFile
+else:
+ from . import mkdirlockfile as _mlf
+ LockFile = _mlf.MkdirLockFile
+
+FileLock = LockFile
+
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/linklockfile.py b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/linklockfile.py
new file mode 100755
index 00000000..9c506734
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/linklockfile.py
@@ -0,0 +1,73 @@
+from __future__ import absolute_import
+
+import time
+import os
+
+from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
+ AlreadyLocked)
+
+class LinkLockFile(LockBase):
+ """Lock access to a file using atomic property of link(2).
+
+ >>> lock = LinkLockFile('somefile')
+ >>> lock = LinkLockFile('somefile', threaded=False)
+ """
+
+ def acquire(self, timeout=None):
+ try:
+ open(self.unique_name, "wb").close()
+ except IOError:
+ raise LockFailed("failed to create %s" % self.unique_name)
+
+ timeout = timeout is not None and timeout or self.timeout
+ end_time = time.time()
+ if timeout is not None and timeout > 0:
+ end_time += timeout
+
+ while True:
+ # Try and create a hard link to it.
+ try:
+ os.link(self.unique_name, self.lock_file)
+ except OSError:
+ # Link creation failed. Maybe we've double-locked?
+ nlinks = os.stat(self.unique_name).st_nlink
+ if nlinks == 2:
+ # The original link plus the one I created == 2. We're
+ # good to go.
+ return
+ else:
+ # Otherwise the lock creation failed.
+ if timeout is not None and time.time() > end_time:
+ os.unlink(self.unique_name)
+ if timeout > 0:
+ raise LockTimeout("Timeout waiting to acquire"
+ " lock for %s" %
+ self.path)
+ else:
+ raise AlreadyLocked("%s is already locked" %
+ self.path)
+ time.sleep(timeout is not None and timeout/10 or 0.1)
+ else:
+ # Link creation succeeded. We're good to go.
+ return
+
+ def release(self):
+ if not self.is_locked():
+ raise NotLocked("%s is not locked" % self.path)
+ elif not os.path.exists(self.unique_name):
+ raise NotMyLock("%s is locked, but not by me" % self.path)
+ os.unlink(self.unique_name)
+ os.unlink(self.lock_file)
+
+ def is_locked(self):
+ return os.path.exists(self.lock_file)
+
+ def i_am_locking(self):
+ return (self.is_locked() and
+ os.path.exists(self.unique_name) and
+ os.stat(self.unique_name).st_nlink == 2)
+
+ def break_lock(self):
+ if os.path.exists(self.lock_file):
+ os.unlink(self.lock_file)
+
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/mkdirlockfile.py b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/mkdirlockfile.py
new file mode 100755
index 00000000..8d2c801f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/mkdirlockfile.py
@@ -0,0 +1,83 @@
+from __future__ import absolute_import, division
+
+import time
+import os
+import sys
+import errno
+
+from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
+ AlreadyLocked)
+
+class MkdirLockFile(LockBase):
+ """Lock file by creating a directory."""
+ def __init__(self, path, threaded=True, timeout=None):
+ """
+ >>> lock = MkdirLockFile('somefile')
+ >>> lock = MkdirLockFile('somefile', threaded=False)
+ """
+ LockBase.__init__(self, path, threaded, timeout)
+ # Lock file itself is a directory. Place the unique file name into
+ # it.
+ self.unique_name = os.path.join(self.lock_file,
+ "%s.%s%s" % (self.hostname,
+ self.tname,
+ self.pid))
+
+ def acquire(self, timeout=None):
+ timeout = timeout is not None and timeout or self.timeout
+ end_time = time.time()
+ if timeout is not None and timeout > 0:
+ end_time += timeout
+
+ if timeout is None:
+ wait = 0.1
+ else:
+ wait = max(0, timeout / 10)
+
+ while True:
+ try:
+ os.mkdir(self.lock_file)
+ except OSError:
+ err = sys.exc_info()[1]
+ if err.errno == errno.EEXIST:
+ # Already locked.
+ if os.path.exists(self.unique_name):
+ # Already locked by me.
+ return
+ if timeout is not None and time.time() > end_time:
+ if timeout > 0:
+ raise LockTimeout("Timeout waiting to acquire"
+ " lock for %s" %
+ self.path)
+ else:
+ # Someone else has the lock.
+ raise AlreadyLocked("%s is already locked" %
+ self.path)
+ time.sleep(wait)
+ else:
+ # Couldn't create the lock for some other reason
+ raise LockFailed("failed to create %s" % self.lock_file)
+ else:
+ open(self.unique_name, "wb").close()
+ return
+
+ def release(self):
+ if not self.is_locked():
+ raise NotLocked("%s is not locked" % self.path)
+ elif not os.path.exists(self.unique_name):
+ raise NotMyLock("%s is locked, but not by me" % self.path)
+ os.unlink(self.unique_name)
+ os.rmdir(self.lock_file)
+
+ def is_locked(self):
+ return os.path.exists(self.lock_file)
+
+ def i_am_locking(self):
+ return (self.is_locked() and
+ os.path.exists(self.unique_name))
+
+ def break_lock(self):
+ if os.path.exists(self.lock_file):
+ for name in os.listdir(self.lock_file):
+ os.unlink(os.path.join(self.lock_file, name))
+ os.rmdir(self.lock_file)
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/pidlockfile.py b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/pidlockfile.py
new file mode 100755
index 00000000..e92f9ead
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/pidlockfile.py
@@ -0,0 +1,193 @@
+# -*- coding: utf-8 -*-
+
+# pidlockfile.py
+#
+# Copyright © 2008–2009 Ben Finney <ben+python@benfinney.id.au>
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Python Software Foundation License, version 2 or
+# later as published by the Python Software Foundation.
+# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
+
+""" Lockfile behaviour implemented via Unix PID files.
+ """
+
+from __future__ import absolute_import
+
+import os
+import sys
+import errno
+import time
+
+from . import (LockBase, AlreadyLocked, LockFailed, NotLocked, NotMyLock,
+ LockTimeout)
+
+
+class PIDLockFile(LockBase):
+ """ Lockfile implemented as a Unix PID file.
+
+ The lock file is a normal file named by the attribute `path`.
+ A lock's PID file contains a single line of text, containing
+ the process ID (PID) of the process that acquired the lock.
+
+ >>> lock = PIDLockFile('somefile')
+ >>> lock = PIDLockFile('somefile')
+ """
+
+ def __init__(self, path, threaded=False, timeout=None):
+ # pid lockfiles don't support threaded operation, so always force
+ # False as the threaded arg.
+ LockBase.__init__(self, path, False, timeout)
+ dirname = os.path.dirname(self.lock_file)
+ basename = os.path.split(self.path)[-1]
+ self.unique_name = self.path
+
+ def read_pid(self):
+ """ Get the PID from the lock file.
+ """
+ return read_pid_from_pidfile(self.path)
+
+ def is_locked(self):
+ """ Test if the lock is currently held.
+
+ The lock is held if the PID file for this lock exists.
+
+ """
+ return os.path.exists(self.path)
+
+ def i_am_locking(self):
+ """ Test if the lock is held by the current process.
+
+ Returns ``True`` if the current process ID matches the
+ number stored in the PID file.
+ """
+ return self.is_locked() and os.getpid() == self.read_pid()
+
+ def acquire(self, timeout=None):
+ """ Acquire the lock.
+
+ Creates the PID file for this lock, or raises an error if
+ the lock could not be acquired.
+ """
+
+ timeout = timeout is not None and timeout or self.timeout
+ end_time = time.time()
+ if timeout is not None and timeout > 0:
+ end_time += timeout
+
+ while True:
+ try:
+ write_pid_to_pidfile(self.path)
+ except OSError as exc:
+ if exc.errno == errno.EEXIST:
+ # The lock creation failed. Maybe sleep a bit.
+ if timeout is not None and time.time() > end_time:
+ if timeout > 0:
+ raise LockTimeout("Timeout waiting to acquire"
+ " lock for %s" %
+ self.path)
+ else:
+ raise AlreadyLocked("%s is already locked" %
+ self.path)
+ time.sleep(timeout is not None and timeout/10 or 0.1)
+ else:
+ raise LockFailed("failed to create %s" % self.path)
+ else:
+ return
+
+ def release(self):
+ """ Release the lock.
+
+ Removes the PID file to release the lock, or raises an
+ error if the current process does not hold the lock.
+
+ """
+ if not self.is_locked():
+ raise NotLocked("%s is not locked" % self.path)
+ if not self.i_am_locking():
+ raise NotMyLock("%s is locked, but not by me" % self.path)
+ remove_existing_pidfile(self.path)
+
+ def break_lock(self):
+ """ Break an existing lock.
+
+ Removes the PID file if it already exists, otherwise does
+ nothing.
+
+ """
+ remove_existing_pidfile(self.path)
+
+def read_pid_from_pidfile(pidfile_path):
+ """ Read the PID recorded in the named PID file.
+
+ Read and return the numeric PID recorded as text in the named
+ PID file. If the PID file cannot be read, or if the content is
+ not a valid PID, return ``None``.
+
+ """
+ pid = None
+ try:
+ pidfile = open(pidfile_path, 'r')
+ except IOError:
+ pass
+ else:
+ # According to the FHS 2.3 section on PID files in /var/run:
+ #
+ # The file must consist of the process identifier in
+ # ASCII-encoded decimal, followed by a newline character.
+ #
+ # Programs that read PID files should be somewhat flexible
+ # in what they accept; i.e., they should ignore extra
+ # whitespace, leading zeroes, absence of the trailing
+ # newline, or additional lines in the PID file.
+
+ line = pidfile.readline().strip()
+ try:
+ pid = int(line)
+ except ValueError:
+ pass
+ pidfile.close()
+
+ return pid
+
+
+def write_pid_to_pidfile(pidfile_path):
+ """ Write the PID in the named PID file.
+
+ Get the numeric process ID (“PID”) of the current process
+ and write it to the named file as a line of text.
+
+ """
+ open_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY)
+ open_mode = 0o644
+ pidfile_fd = os.open(pidfile_path, open_flags, open_mode)
+ pidfile = os.fdopen(pidfile_fd, 'w')
+
+ # According to the FHS 2.3 section on PID files in /var/run:
+ #
+ # The file must consist of the process identifier in
+ # ASCII-encoded decimal, followed by a newline character. For
+ # example, if crond was process number 25, /var/run/crond.pid
+ # would contain three characters: two, five, and newline.
+
+ pid = os.getpid()
+ line = "%(pid)d\n" % vars()
+ pidfile.write(line)
+ pidfile.close()
+
+
+def remove_existing_pidfile(pidfile_path):
+ """ Remove the named PID file if it exists.
+
+ Removing a PID file that doesn't already exist puts us in the
+ desired state, so we ignore the condition if the file does not
+ exist.
+
+ """
+ try:
+ os.remove(pidfile_path)
+ except OSError as exc:
+ if exc.errno == errno.ENOENT:
+ pass
+ else:
+ raise
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/sqlitelockfile.py b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/sqlitelockfile.py
new file mode 100755
index 00000000..7dee4a85
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/sqlitelockfile.py
@@ -0,0 +1,155 @@
+from __future__ import absolute_import, division
+
+import time
+import os
+
+try:
+ unicode
+except NameError:
+ unicode = str
+
+from . import LockBase, NotLocked, NotMyLock, LockTimeout, AlreadyLocked
+
+class SQLiteLockFile(LockBase):
+ "Demonstrate SQL-based locking."
+
+ testdb = None
+
+ def __init__(self, path, threaded=True, timeout=None):
+ """
+ >>> lock = SQLiteLockFile('somefile')
+ >>> lock = SQLiteLockFile('somefile', threaded=False)
+ """
+ LockBase.__init__(self, path, threaded, timeout)
+ self.lock_file = unicode(self.lock_file)
+ self.unique_name = unicode(self.unique_name)
+
+ if SQLiteLockFile.testdb is None:
+ import tempfile
+ _fd, testdb = tempfile.mkstemp()
+ os.close(_fd)
+ os.unlink(testdb)
+ del _fd, tempfile
+ SQLiteLockFile.testdb = testdb
+
+ import sqlite3
+ self.connection = sqlite3.connect(SQLiteLockFile.testdb)
+
+ c = self.connection.cursor()
+ try:
+ c.execute("create table locks"
+ "("
+ " lock_file varchar(32),"
+ " unique_name varchar(32)"
+ ")")
+ except sqlite3.OperationalError:
+ pass
+ else:
+ self.connection.commit()
+ import atexit
+ atexit.register(os.unlink, SQLiteLockFile.testdb)
+
+ def acquire(self, timeout=None):
+ timeout = timeout is not None and timeout or self.timeout
+ end_time = time.time()
+ if timeout is not None and timeout > 0:
+ end_time += timeout
+
+ if timeout is None:
+ wait = 0.1
+ elif timeout <= 0:
+ wait = 0
+ else:
+ wait = timeout / 10
+
+ cursor = self.connection.cursor()
+
+ while True:
+ if not self.is_locked():
+ # Not locked. Try to lock it.
+ cursor.execute("insert into locks"
+ " (lock_file, unique_name)"
+ " values"
+ " (?, ?)",
+ (self.lock_file, self.unique_name))
+ self.connection.commit()
+
+ # Check to see if we are the only lock holder.
+ cursor.execute("select * from locks"
+ " where unique_name = ?",
+ (self.unique_name,))
+ rows = cursor.fetchall()
+ if len(rows) > 1:
+ # Nope. Someone else got there. Remove our lock.
+ cursor.execute("delete from locks"
+ " where unique_name = ?",
+ (self.unique_name,))
+ self.connection.commit()
+ else:
+ # Yup. We're done, so go home.
+ return
+ else:
+ # Check to see if we are the only lock holder.
+ cursor.execute("select * from locks"
+ " where unique_name = ?",
+ (self.unique_name,))
+ rows = cursor.fetchall()
+ if len(rows) == 1:
+ # We're the locker, so go home.
+ return
+
+ # Maybe we should wait a bit longer.
+ if timeout is not None and time.time() > end_time:
+ if timeout > 0:
+ # No more waiting.
+ raise LockTimeout("Timeout waiting to acquire"
+ " lock for %s" %
+ self.path)
+ else:
+ # Someone else has the lock and we are impatient..
+ raise AlreadyLocked("%s is already locked" % self.path)
+
+ # Well, okay. We'll give it a bit longer.
+ time.sleep(wait)
+
+ def release(self):
+ if not self.is_locked():
+ raise NotLocked("%s is not locked" % self.path)
+ if not self.i_am_locking():
+ raise NotMyLock("%s is locked, but not by me (by %s)" %
+ (self.unique_name, self._who_is_locking()))
+ cursor = self.connection.cursor()
+ cursor.execute("delete from locks"
+ " where unique_name = ?",
+ (self.unique_name,))
+ self.connection.commit()
+
+ def _who_is_locking(self):
+ cursor = self.connection.cursor()
+ cursor.execute("select unique_name from locks"
+ " where lock_file = ?",
+ (self.lock_file,))
+ return cursor.fetchone()[0]
+
+ def is_locked(self):
+ cursor = self.connection.cursor()
+ cursor.execute("select * from locks"
+ " where lock_file = ?",
+ (self.lock_file,))
+ rows = cursor.fetchall()
+ return not not rows
+
+ def i_am_locking(self):
+ cursor = self.connection.cursor()
+ cursor.execute("select * from locks"
+ " where lock_file = ?"
+ " and unique_name = ?",
+ (self.lock_file, self.unique_name))
+ return not not cursor.fetchall()
+
+ def break_lock(self):
+ cursor = self.connection.cursor()
+ cursor.execute("delete from locks"
+ " where lock_file = ?",
+ (self.lock_file,))
+ self.connection.commit()
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/symlinklockfile.py b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/symlinklockfile.py
new file mode 100755
index 00000000..57551a36
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/symlinklockfile.py
@@ -0,0 +1,69 @@
+from __future__ import absolute_import
+
+import time
+import os
+
+from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
+ AlreadyLocked)
+
+class SymlinkLockFile(LockBase):
+ """Lock access to a file using symlink(2)."""
+
+ def __init__(self, path, threaded=True, timeout=None):
+ # super(SymlinkLockFile).__init(...)
+ LockBase.__init__(self, path, threaded, timeout)
+ # split it back!
+ self.unique_name = os.path.split(self.unique_name)[1]
+
+ def acquire(self, timeout=None):
+ # Hopefully unnecessary for symlink.
+ #try:
+ # open(self.unique_name, "wb").close()
+ #except IOError:
+ # raise LockFailed("failed to create %s" % self.unique_name)
+ timeout = timeout is not None and timeout or self.timeout
+ end_time = time.time()
+ if timeout is not None and timeout > 0:
+ end_time += timeout
+
+ while True:
+ # Try and create a symbolic link to it.
+ try:
+ os.symlink(self.unique_name, self.lock_file)
+ except OSError:
+ # Link creation failed. Maybe we've double-locked?
+ if self.i_am_locking():
+ # Linked to out unique name. Proceed.
+ return
+ else:
+ # Otherwise the lock creation failed.
+ if timeout is not None and time.time() > end_time:
+ if timeout > 0:
+ raise LockTimeout("Timeout waiting to acquire"
+ " lock for %s" %
+ self.path)
+ else:
+ raise AlreadyLocked("%s is already locked" %
+ self.path)
+ time.sleep(timeout/10 if timeout is not None else 0.1)
+ else:
+ # Link creation succeeded. We're good to go.
+ return
+
+ def release(self):
+ if not self.is_locked():
+ raise NotLocked("%s is not locked" % self.path)
+ elif not self.i_am_locking():
+ raise NotMyLock("%s is locked, but not by me" % self.path)
+ os.unlink(self.lock_file)
+
+ def is_locked(self):
+ return os.path.islink(self.lock_file)
+
+ def i_am_locking(self):
+ return os.path.islink(self.lock_file) and \
+ os.readlink(self.lock_file) == self.unique_name
+
+ def break_lock(self):
+ if os.path.islink(self.lock_file): # exists && link
+ os.unlink(self.lock_file)
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/setup.cfg b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/setup.cfg
new file mode 100755
index 00000000..c1fb3984
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/setup.cfg
@@ -0,0 +1,39 @@
+[metadata]
+name = lockfile
+summary = Platform-independent file locking module
+description-file =
+ README
+author = OpenStack
+author-email = openstack-dev@lists.openstack.org
+home-page = http://launchpad.net/pylockfile
+classifier =
+ Intended Audience :: Developers
+ License :: OSI Approved :: MIT License
+ Operating System :: POSIX :: Linux
+ Operating System :: MacOS
+ Operating System :: Microsoft :: Windows :: Windows NT/2000
+ Operating System :: POSIX
+ Programming Language :: Python
+ Programming Language :: Python :: 2
+ Programming Language :: Python :: 2.7
+ Programming Language :: Python :: 2.6
+ Programming Language :: Python :: 3
+ Programming Language :: Python :: 3.3
+ Topic :: Software Development :: Libraries :: Python Modules
+
+[files]
+packages = lockfile
+
+[pbr]
+warnerrors = true
+
+[build_sphinx]
+source-dir = doc/source
+build-dir = doc/build
+all_files = 1
+
+[egg_info]
+tag_build =
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/setup.py b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/setup.py
new file mode 100755
index 00000000..73637574
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/setup.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
+import setuptools
+
+# In python < 2.7.4, a lazy loading of package `pbr` will break
+# setuptools if some other modules registered functions in `atexit`.
+# solution from: http://bugs.python.org/issue15881#msg170215
+try:
+ import multiprocessing # noqa
+except ImportError:
+ pass
+
+setuptools.setup(
+ setup_requires=['pbr'],
+ pbr=True)
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/test-requirements.txt b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/test-requirements.txt
new file mode 100755
index 00000000..2e087ff1
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/test-requirements.txt
@@ -0,0 +1,2 @@
+nose
+sphinx>=1.1.2,!=1.2.0,<1.3
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/test/compliancetest.py b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/test/compliancetest.py
new file mode 100755
index 00000000..e0258b11
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/test/compliancetest.py
@@ -0,0 +1,261 @@
+import os
+import threading
+import shutil
+
+import lockfile
+
+class ComplianceTest(object):
+ def __init__(self):
+ self.saved_class = lockfile.LockFile
+
+ def _testfile(self):
+ """Return platform-appropriate file. Helper for tests."""
+ import tempfile
+ return os.path.join(tempfile.gettempdir(), 'trash-%s' % os.getpid())
+
+ def setup(self):
+ lockfile.LockFile = self.class_to_test
+
+ def teardown(self):
+ try:
+ tf = self._testfile()
+ if os.path.isdir(tf):
+ shutil.rmtree(tf)
+ elif os.path.isfile(tf):
+ os.unlink(tf)
+ elif not os.path.exists(tf):
+ pass
+ else:
+ raise SystemError("unrecognized file: %s" % tf)
+ finally:
+ lockfile.LockFile = self.saved_class
+
+ def _test_acquire_helper(self, tbool):
+ # As simple as it gets.
+ lock = lockfile.LockFile(self._testfile(), threaded=tbool)
+ lock.acquire()
+ assert lock.i_am_locking()
+ lock.release()
+ assert not lock.is_locked()
+
+## def test_acquire_basic_threaded(self):
+## self._test_acquire_helper(True)
+
+ def test_acquire_basic_unthreaded(self):
+ self._test_acquire_helper(False)
+
+ def _test_acquire_no_timeout_helper(self, tbool):
+ # No timeout test
+ e1, e2 = threading.Event(), threading.Event()
+ t = _in_thread(self._lock_wait_unlock, e1, e2)
+ e1.wait() # wait for thread t to acquire lock
+ lock2 = lockfile.LockFile(self._testfile(), threaded=tbool)
+ assert lock2.is_locked()
+ if tbool:
+ assert not lock2.i_am_locking()
+ else:
+ assert lock2.i_am_locking()
+
+ try:
+ lock2.acquire(timeout=-1)
+ except lockfile.AlreadyLocked:
+ pass
+ else:
+ lock2.release()
+ raise AssertionError("did not raise AlreadyLocked in"
+ " thread %s" %
+ threading.current_thread().get_name())
+
+ try:
+ lock2.acquire(timeout=0)
+ except lockfile.AlreadyLocked:
+ pass
+ else:
+ lock2.release()
+ raise AssertionError("did not raise AlreadyLocked in"
+ " thread %s" %
+ threading.current_thread().get_name())
+
+ e2.set() # tell thread t to release lock
+ t.join()
+
+## def test_acquire_no_timeout_threaded(self):
+## self._test_acquire_no_timeout_helper(True)
+
+## def test_acquire_no_timeout_unthreaded(self):
+## self._test_acquire_no_timeout_helper(False)
+
+ def _test_acquire_timeout_helper(self, tbool):
+ # Timeout test
+ e1, e2 = threading.Event(), threading.Event()
+ t = _in_thread(self._lock_wait_unlock, e1, e2)
+ e1.wait() # wait for thread t to acquire lock
+ lock2 = lockfile.LockFile(self._testfile(), threaded=tbool)
+ assert lock2.is_locked()
+ try:
+ lock2.acquire(timeout=0.1)
+ except lockfile.LockTimeout:
+ pass
+ else:
+ lock2.release()
+ raise AssertionError("did not raise LockTimeout in thread %s" %
+ threading.current_thread().get_name())
+
+ e2.set()
+ t.join()
+
+ def test_acquire_timeout_threaded(self):
+ self._test_acquire_timeout_helper(True)
+
+ def test_acquire_timeout_unthreaded(self):
+ self._test_acquire_timeout_helper(False)
+
+ def _test_context_timeout_helper(self, tbool):
+ # Timeout test
+ e1, e2 = threading.Event(), threading.Event()
+ t = _in_thread(self._lock_wait_unlock, e1, e2)
+ e1.wait() # wait for thread t to acquire lock
+ lock2 = lockfile.LockFile(self._testfile(), threaded=tbool,
+ timeout=0.2)
+ assert lock2.is_locked()
+ try:
+ lock2.acquire()
+ except lockfile.LockTimeout:
+ pass
+ else:
+ lock2.release()
+ raise AssertionError("did not raise LockTimeout in thread %s" %
+ threading.current_thread().get_name())
+
+ e2.set()
+ t.join()
+
+ def test_context_timeout_unthreaded(self):
+ self._test_context_timeout_helper(False)
+
+ def _test_release_basic_helper(self, tbool):
+ lock = lockfile.LockFile(self._testfile(), threaded=tbool)
+ lock.acquire()
+ assert lock.is_locked()
+ lock.release()
+ assert not lock.is_locked()
+ assert not lock.i_am_locking()
+ try:
+ lock.release()
+ except lockfile.NotLocked:
+ pass
+ except lockfile.NotMyLock:
+ raise AssertionError('unexpected exception: %s' %
+ lockfile.NotMyLock)
+ else:
+ raise AssertionError('erroneously unlocked file')
+
+## def test_release_basic_threaded(self):
+## self._test_release_basic_helper(True)
+
+ def test_release_basic_unthreaded(self):
+ self._test_release_basic_helper(False)
+
+## def test_release_from_thread(self):
+## e1, e2 = threading.Event(), threading.Event()
+## t = _in_thread(self._lock_wait_unlock, e1, e2)
+## e1.wait()
+## lock2 = lockfile.LockFile(self._testfile(), threaded=False)
+## assert not lock2.i_am_locking()
+## try:
+## lock2.release()
+## except lockfile.NotMyLock:
+## pass
+## else:
+## raise AssertionError('erroneously unlocked a file locked'
+## ' by another thread.')
+## e2.set()
+## t.join()
+
+ def _test_is_locked_helper(self, tbool):
+ lock = lockfile.LockFile(self._testfile(), threaded=tbool)
+ lock.acquire(timeout=2)
+ assert lock.is_locked()
+ lock.release()
+ assert not lock.is_locked(), "still locked after release!"
+
+## def test_is_locked_threaded(self):
+## self._test_is_locked_helper(True)
+
+ def test_is_locked_unthreaded(self):
+ self._test_is_locked_helper(False)
+
+## def test_i_am_locking_threaded(self):
+## self._test_i_am_locking_helper(True)
+
+ def test_i_am_locking_unthreaded(self):
+ self._test_i_am_locking_helper(False)
+
+ def _test_i_am_locking_helper(self, tbool):
+ lock1 = lockfile.LockFile(self._testfile(), threaded=tbool)
+ assert not lock1.is_locked()
+ lock1.acquire()
+ try:
+ assert lock1.i_am_locking()
+ lock2 = lockfile.LockFile(self._testfile(), threaded=tbool)
+ assert lock2.is_locked()
+ if tbool:
+ assert not lock2.i_am_locking()
+ finally:
+ lock1.release()
+
+ def _test_break_lock_helper(self, tbool):
+ lock = lockfile.LockFile(self._testfile(), threaded=tbool)
+ lock.acquire()
+ assert lock.is_locked()
+ lock2 = lockfile.LockFile(self._testfile(), threaded=tbool)
+ assert lock2.is_locked()
+ lock2.break_lock()
+ assert not lock2.is_locked()
+ try:
+ lock.release()
+ except lockfile.NotLocked:
+ pass
+ else:
+ raise AssertionError('break lock failed')
+
+## def test_break_lock_threaded(self):
+## self._test_break_lock_helper(True)
+
+ def test_break_lock_unthreaded(self):
+ self._test_break_lock_helper(False)
+
+ def _lock_wait_unlock(self, event1, event2):
+ """Lock from another thread. Helper for tests."""
+ l = lockfile.LockFile(self._testfile())
+ l.acquire()
+ try:
+ event1.set() # we're in,
+ event2.wait() # wait for boss's permission to leave
+ finally:
+ l.release()
+
+ def test_enter(self):
+ lock = lockfile.LockFile(self._testfile())
+ lock.acquire()
+ try:
+ assert lock.is_locked(), "Not locked after acquire!"
+ finally:
+ lock.release()
+ assert not lock.is_locked(), "still locked after release!"
+
+ def test_decorator(self):
+ @lockfile.locked(self._testfile())
+ def func(a, b):
+ return a + b
+ assert func(4, 3) == 7
+
+def _in_thread(func, *args, **kwargs):
+ """Execute func(*args, **kwargs) after dt seconds. Helper for tests."""
+ def _f():
+ func(*args, **kwargs)
+ t = threading.Thread(target=_f, name='/*/*')
+ t.setDaemon(True)
+ t.start()
+ return t
+
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/test/test_lockfile.py b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/test/test_lockfile.py
new file mode 100755
index 00000000..e1f4f72f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/test/test_lockfile.py
@@ -0,0 +1,36 @@
+import sys
+
+import lockfile.linklockfile
+import lockfile.mkdirlockfile
+import lockfile.pidlockfile
+import lockfile.symlinklockfile
+
+from compliancetest import ComplianceTest
+
+class TestLinkLockFile(ComplianceTest):
+ class_to_test = lockfile.linklockfile.LinkLockFile
+
+class TestSymlinkLockFile(ComplianceTest):
+ class_to_test = lockfile.symlinklockfile.SymlinkLockFile
+
+class TestMkdirLockFile(ComplianceTest):
+ class_to_test = lockfile.mkdirlockfile.MkdirLockFile
+
+class TestPIDLockFile(ComplianceTest):
+ class_to_test = lockfile.pidlockfile.PIDLockFile
+
+# Check backwards compatibility
+class TestLinkFileLock(ComplianceTest):
+ class_to_test = lockfile.LinkFileLock
+
+class TestMkdirFileLock(ComplianceTest):
+ class_to_test = lockfile.MkdirFileLock
+
+try:
+ import sqlite3
+except ImportError:
+ pass
+else:
+ import lockfile.sqlitelockfile
+ class TestSQLiteLockFile(ComplianceTest):
+ class_to_test = lockfile.sqlitelockfile.SQLiteLockFile
diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/tox.ini b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/tox.ini
new file mode 100755
index 00000000..b0a868a3
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/tox.ini
@@ -0,0 +1,28 @@
+# content of: tox.ini , put in same dir as setup.py
+[tox]
+envlist = py26,py27,py32,py33,py34
+
+[testenv]
+deps = -r{toxinidir}/test-requirements.txt
+commands=nosetests
+
+[testenv:venv]
+commands = {posargs}
+
+[testenv:pep8]
+deps = flake8
+commands = flake8
+
+[testenv:docs]
+commands = python setup.py build_sphinx
+
+[testenv:cover]
+deps = {[testenv]deps}
+ coverage
+commands =
+ nosetests --with-coverage --cover-erase --cover-package=lockfile --cover-inclusive []
+
+[flake8]
+ignore = E121,E123,E128,E221,E226,E261,E265,E301,E302,E713,F401,F841,W291,W293,W391
+exclude=.venv,.git,.tox,dist,doc
+show-source = True
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/ChangeLog b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/ChangeLog
new file mode 100755
index 00000000..4975f781
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/ChangeLog
@@ -0,0 +1,380 @@
+Version 2.0.5
+=============
+
+:Released: 2015-02-02
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Refine compatibility of exceptions for file operations.
+* Specify the text encoding when opening the changelog file.
+
+
+Version 2.0.4
+=============
+
+:Released: 2015-01-23
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Record version info via Setuptools commands.
+* Remove the custom Setuptools entry points.
+ This closes Alioth bug#314948.
+
+
+Version 2.0.3
+=============
+
+:Released: 2015-01-14
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Break circular import dependency for ‘setup.py’.
+* Refactor all initial metadata functionality to ‘daemon._metadata’.
+* Distribute ‘version’ (and its tests) only in source, not install.
+* Build a “universal” (Python 2 and Python 3) wheel.
+
+
+Version 2.0.2
+=============
+
+:Released: 2015-01-13
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Declare test-time dependency on recent ‘unittest2’.
+* Declare packaging-time dependency on ‘docutils’ library.
+* Include unit tests for ‘version’ module with source distribution.
+* Record version info consistent with distribution metadata.
+
+
+Version 2.0.1
+=============
+
+:Released: 2015-01-11
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Include the ‘version’ module with source distribution.
+
+
+Version 2.0
+===========
+
+:Released: 2015-01-10
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Support both Python 3 (version 3.2 or later) and Python 2 (version
+ 2.7 or later).
+* Document the API of all functions comprehensively in docstrings.
+* Add a hacking guide for developers.
+* Add explicit credit for contributors.
+* Document the security impact of the default umask.
+
+* Specify explicit text or binary mode when opening files.
+* Preserve exception context in custom exceptions.
+
+* Declare compatibility with current Python versions.
+* Depend on Python 3 compatible libraries.
+* Update package homepage to Alioth hosted project page.
+* Use ‘pydoc.splitdoc’ to get package description text.
+* Remove ASCII translation of package description, not needed now the
+ docstring is a proper Unicode text value.
+* Include test suite with source distribution.
+* Move package metadata to ‘daemon/_metadata.py’.
+* Migrate to JSON (instead of Python) for serialised version info.
+* Add unit tests for metadata.
+* Store and retrieve version info in Setuptools metadata.
+
+* Migrate to ‘str.format’ for interpolation of values into text.
+* Migrate to ‘mock’ library for mock objects in tests.
+* Migrate to ‘testscenarios’ library for unit test scenarios.
+* Migrate to ‘unittest2’ library for back-ported improvements.
+ Remove custom test suite creation.
+* Discriminate Python 2-and-3 compatible usage of dict methods.
+* Discriminate Python 2-and-3 compatible bytes versus text.
+* Declare explicit absolute and relative imports.
+* Discriminate between different ‘fileno’ method behaviours.
+ In Python 3, ‘StringIO.fileno’ is callable but raises an exception.
+* Migrate to built-in ‘next’ function.
+* Wrap the ‘fromlist’ parameter of ‘__import__’ for Python 3
+ compatibility.
+* Wrap function introspection for Python 3 compatibility.
+* Wrap standard library imports where names changed in Python 3.
+
+
+Version 1.6.1
+=============
+
+:Released: 2014-08-04
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Use unambiguous “except FooType as foo” syntax.
+ This is to ease the port to Python 3, where the ambiguous comma
+ usage is an error.
+* Ensure a ‘basestring’ name bound to the base type for strings.
+ This is to allow checks to work on Python 2 and 3.
+* Specify versions of Python supported, as trove classifiers.
+
+* Update copyright notices.
+* Add editor hints for most files.
+* Distinguish continuation-line indentation versus block indentation.
+
+* Use unicode literals by default, specifying bytes where necessary.
+ This is to ease the port to Python 3, where the default string type
+ is unicode.
+* Update copyright notices.
+* Update the GPL license file to version 3, as declared in our
+ copyright notices.
+
+* Change license of library code to Apache License 2.0. Rationale at
+ <URL:http://wiki.python.org/moin/PythonSoftwareFoundationLicenseFaq#Contributing_Code_to_Python>.
+
+
+Version 1.6
+===========
+
+:Released: 2010-05-10
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Use absolute imports to disambiguate provenance of names.
+* setup.py: Require ‘lockfile >=0.9’.
+* daemon/pidfile.py: Renamed from ‘daemon/pidlockfile.py’. Change
+ references elsewhere to use this new name.
+* test/test_pidfile.py: Renamed from ‘test/test_pidlockfile.py’.
+ Change references elsewhere to use this new name.
+* daemon/pidfile.py: Remove functionality now migrated to ‘lockfile’
+ library.
+
+* FAQ: Add some entries and re-structure the document.
+
+* Use ‘unicode’ data type for all text values.
+* Prepare for Python 3 upgrade by tweaking some names and imports.
+
+* MANIFEST.in: Include the documentation in the distribution.
+
+
+Version 1.5.5
+=============
+
+:Released: 2010-03-02
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Stop using ‘pkg_resources’ and revert to pre-1.5.3 version-string
+ handling, until a better way that doesn't break everyone else's
+ installation can be found.
+
+
+Version 1.5.4
+=============
+
+:Released: 2010-02-27
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* MANIFEST.in: Explicitly include version data file, otherwise
+ everything breaks for users of the sdist.
+
+
+Version 1.5.3
+=============
+
+:Released: 2010-02-26
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* daemon/daemon.py: Invoke the pidfile context manager's ‘__exit__’
+ method with the correct arguments (as per
+ <URL:http://docs.python.org/library/stdtypes.html#typecontextmanager>).
+ Thanks to Ludvig Ericson for the bug report.
+* version: New plain-text data file to store project version string.
+* setup.py: Read version string from data file.
+* daemon/version/__init__.py: Query version string with ‘pkg_resources’.
+
+* Add ‘pylint’ configuration for this project.
+* Update copyright notices.
+
+
+Version 1.5.2
+=============
+
+:Released: 2009-10-24
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Ensure we only prevent core dumps if ‘prevent_core’ is true.
+ Thanks to Denis Bilenko for reporting the lacking implementation of
+ this documented option.
+
+* Add initial Frequently Asked Questions document.
+
+
+Version 1.5.1
+=============
+
+:Released: 2009-09-26
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Make a separate collection of DaemonRunner test scenarios.
+* Handle a start request with a timeout on the PID file lock acquire.
+
+* Implement ‘TimeoutPIDLockFile’ to specify a timeout in advance of
+ lock acquisition.
+* Use lock with timeout for ‘DaemonRunner’.
+
+
+Version 1.5
+===========
+
+:Released: 2009-09-24
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Make a separate collection of PIDLockFile test scenarios.
+
+* Raise specific errors on ‘DaemonRunner’ failures.
+* Distinguish different conditions on reading and parsing PID file.
+* Refactor code to ‘_terminate_daemon_process’ method.
+* Improve explanations in comments and docstrings.
+* Don't set pidfile at all if no path specified to constructor.
+* Write the PID file using correct OS locking and permissions.
+* Close the PID file after writing.
+* Implement ‘PIDLockFile’ as subclass of ‘lockfile.LinkFileLock’.
+* Remove redundant checks for file existence.
+
+* Manage the excluded file descriptors as a set (not a list).
+* Only inspect the file descriptor of streams if they actually have
+ one (via a ‘fileno’ method) when determining which file descriptors
+ to close. Thanks to Ask Solem for revealing this bug.
+
+
+Version 1.4.8
+=============
+
+:Released: 2009-09-17
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Remove child-exit signal (‘SIGCLD’, ‘SIGCHLD’) from default signal
+ map. Thanks to Joel Martin for pinpointing this issue.
+* Document requirement for ensuring any operating-system specific
+ signal handlers are considered.
+* Refactor ‘fork_then_exit_parent’ functionality to avoid duplicate
+ code.
+* Remove redundant imports.
+* Remove unused code from unit test suite scaffold.
+* Add specific license terms for unit test suite scaffold.
+
+
+Version 1.4.7
+=============
+
+:Released: 2009-09-03
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Fix keywords argument for distribution setup.
+* Exclude ‘test’ package from distribution installation.
+
+
+Version 1.4.6
+=============
+
+:Released: 2009-06-21
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Update documentation for changes from latest PEP 3143 revision.
+* Implement DaemonContext.is_open method.
+
+
+Version 1.4.5
+=============
+
+:Released: 2009-05-17
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Register DaemonContext.close method for atexit processing.
+* Move PID file cleanup to close method.
+* Improve docstrings by reference to, and copy from, PEP 3143.
+* Use mock checking capabilities of newer ‘MiniMock’ library.
+* Automate building a versioned distribution tarball.
+* Include developer documentation files in source distribution.
+
+
+Version 1.4.4
+=============
+
+:Released: 2009-03-26
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Conform to current PEP version, now released as PEP 3143 “Standard
+ daemon process library”.
+* Ensure UID and GID are set in correct order.
+* Delay closing all open files until just before re-binding standard
+ streams.
+* Redirect standard streams to null device by default.
+
+
+Version 1.4.3
+=============
+
+:Released: 2009-03-19
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Close the PID file context on exit.
+
+
+Version 1.4.2
+=============
+
+:Released: 2009-03-18
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Context manager methods for DaemonContext.
+
+
+Version 1.4.1
+=============
+
+:Released: 2009-03-18
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Improvements to docstrings.
+* Further conformance with draft PEP.
+
+
+Version 1.4
+===========
+
+:Released: 2009-03-17
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Implement the interface from a draft PEP for process daemonisation.
+* Complete statement coverage from unit test suite.
+
+
+Version 1.3
+===========
+
+:Released: 2009-03-12
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Separate controller (now ‘DaemonRunner’) from daemon process
+ context (now ‘DaemonContext’).
+* Fix many corner cases and bugs.
+* Huge increase in unit test suite.
+
+
+Version 1.2
+===========
+
+:Released: 2009-01-27
+:Maintainer: Ben Finney <ben+python@benfinney.id.au>
+
+* Initial release of this project forked from ‘bda.daemon’. Thanks,
+ Robert Niederreiter.
+* Refactor some functionality out to helper functions.
+* Begin unit test suite.
+
+
+..
+ This is free software: you may copy, modify, and/or distribute this work
+ under the terms of the Apache License version 2.0 as published by the
+ Apache Software Foundation.
+ No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+..
+ Local variables:
+ coding: utf-8
+ mode: text
+ mode: rst
+ End:
+ vim: fileencoding=utf-8 filetype=rst :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/LICENSE.ASF-2 b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/LICENSE.ASF-2
new file mode 100755
index 00000000..d6456956
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/LICENSE.ASF-2
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/LICENSE.GPL-3 b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/LICENSE.GPL-3
new file mode 100755
index 00000000..94a9ed02
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/LICENSE.GPL-3
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/MANIFEST.in b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/MANIFEST.in
new file mode 100755
index 00000000..d3d4341e
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/MANIFEST.in
@@ -0,0 +1,7 @@
+include MANIFEST.in
+include LICENSE.*
+include ChangeLog
+recursive-include doc *
+include version.py
+include test_version.py
+recursive-include test *.py
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/PKG-INFO b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/PKG-INFO
new file mode 100755
index 00000000..fd81f509
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/PKG-INFO
@@ -0,0 +1,38 @@
+Metadata-Version: 1.1
+Name: python-daemon
+Version: 2.0.5
+Summary: Library to implement a well-behaved Unix daemon process.
+Home-page: https://alioth.debian.org/projects/python-daemon/
+Author: Ben Finney
+Author-email: ben+python@benfinney.id.au
+License: Apache-2
+Description: This library implements the well-behaved daemon specification of
+ :pep:`3143`, “Standard daemon process library”.
+
+ A well-behaved Unix daemon process is tricky to get right, but the
+ required steps are much the same for every daemon program. A
+ `DaemonContext` instance holds the behaviour and configured
+ process environment for the program; use the instance as a context
+ manager to enter a daemon state.
+
+ Simple example of usage::
+
+ import daemon
+
+ from spam import do_main_program
+
+ with daemon.DaemonContext():
+ do_main_program()
+
+ Customisation of the steps to become a daemon is available by
+ setting options on the `DaemonContext` instance; see the
+ documentation for that class for each option.
+Keywords: daemon,fork,unix
+Platform: UNKNOWN
+Classifier: Development Status :: 4 - Beta
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: POSIX
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Intended Audience :: Developers
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/__init__.py b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/__init__.py
new file mode 100755
index 00000000..4731a6ef
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/__init__.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+
+# daemon/__init__.py
+# Part of ‘python-daemon’, an implementation of PEP 3143.
+#
+# Copyright © 2009–2015 Ben Finney <ben+python@benfinney.id.au>
+# Copyright © 2006 Robert Niederreiter
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Apache License, version 2.0 as published by the
+# Apache Software Foundation.
+# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+""" Library to implement a well-behaved Unix daemon process.
+
+ This library implements the well-behaved daemon specification of
+ :pep:`3143`, “Standard daemon process library”.
+
+ A well-behaved Unix daemon process is tricky to get right, but the
+ required steps are much the same for every daemon program. A
+ `DaemonContext` instance holds the behaviour and configured
+ process environment for the program; use the instance as a context
+ manager to enter a daemon state.
+
+ Simple example of usage::
+
+ import daemon
+
+ from spam import do_main_program
+
+ with daemon.DaemonContext():
+ do_main_program()
+
+ Customisation of the steps to become a daemon is available by
+ setting options on the `DaemonContext` instance; see the
+ documentation for that class for each option.
+
+ """
+
+from __future__ import (absolute_import, unicode_literals)
+
+from .daemon import DaemonContext
+
+
+# Local variables:
+# coding: utf-8
+# mode: python
+# End:
+# vim: fileencoding=utf-8 filetype=python :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/_metadata.py b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/_metadata.py
new file mode 100755
index 00000000..6d22a2b7
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/_metadata.py
@@ -0,0 +1,152 @@
+# -*- coding: utf-8 -*-
+
+# daemon/_metadata.py
+# Part of ‘python-daemon’, an implementation of PEP 3143.
+#
+# Copyright © 2008–2015 Ben Finney <ben+python@benfinney.id.au>
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Apache License, version 2.0 as published by the
+# Apache Software Foundation.
+# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+""" Package metadata for the ‘python-daemon’ distribution. """
+
+from __future__ import (absolute_import, unicode_literals)
+
+import json
+import re
+import collections
+import datetime
+
+import pkg_resources
+
+
+distribution_name = "python-daemon"
+version_info_filename = "version_info.json"
+
+def get_distribution_version_info(filename=version_info_filename):
+ """ Get the version info from the installed distribution.
+
+ :param filename: Base filename of the version info resource.
+ :return: The version info as a mapping of fields. If the
+ distribution is not available, the mapping is empty.
+
+ The version info is stored as a metadata file in the
+ distribution.
+
+ """
+ version_info = {
+ 'release_date': "UNKNOWN",
+ 'version': "UNKNOWN",
+ 'maintainer': "UNKNOWN",
+ }
+
+ try:
+ distribution = pkg_resources.get_distribution(distribution_name)
+ except pkg_resources.DistributionNotFound:
+ distribution = None
+
+ if distribution is not None:
+ if distribution.has_metadata(version_info_filename):
+ content = distribution.get_metadata(version_info_filename)
+ version_info = json.loads(content)
+
+ return version_info
+
+version_info = get_distribution_version_info()
+
+version_installed = version_info['version']
+
+
+rfc822_person_regex = re.compile(
+ "^(?P<name>[^<]+) <(?P<email>[^>]+)>$")
+
+ParsedPerson = collections.namedtuple('ParsedPerson', ['name', 'email'])
+
+def parse_person_field(value):
+ """ Parse a person field into name and email address.
+
+ :param value: The text value specifying a person.
+ :return: A 2-tuple (name, email) for the person's details.
+
+ If the `value` does not match a standard person with email
+ address, the `email` item is ``None``.
+
+ """
+ result = (None, None)
+
+ match = rfc822_person_regex.match(value)
+ if len(value):
+ if match is not None:
+ result = ParsedPerson(
+ name=match.group('name'),
+ email=match.group('email'))
+ else:
+ result = ParsedPerson(name=value, email=None)
+
+ return result
+
+author_name = "Ben Finney"
+author_email = "ben+python@benfinney.id.au"
+author = "{name} <{email}>".format(name=author_name, email=author_email)
+
+
+class YearRange:
+ """ A range of years spanning a period. """
+
+ def __init__(self, begin, end=None):
+ self.begin = begin
+ self.end = end
+
+ def __unicode__(self):
+ text = "{range.begin:04d}".format(range=self)
+ if self.end is not None:
+ if self.end > self.begin:
+ text = "{range.begin:04d}–{range.end:04d}".format(range=self)
+ return text
+
+ __str__ = __unicode__
+
+
+def make_year_range(begin_year, end_date=None):
+ """ Construct the year range given a start and possible end date.
+
+ :param begin_date: The beginning year (text) for the range.
+ :param end_date: The end date (text, ISO-8601 format) for the
+ range, or a non-date token string.
+ :return: The range of years as a `YearRange` instance.
+
+ If the `end_date` is not a valid ISO-8601 date string, the
+ range has ``None`` for the end year.
+
+ """
+ begin_year = int(begin_year)
+
+ try:
+ end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d")
+ except (TypeError, ValueError):
+ # Specified end_date value is not a valid date.
+ end_year = None
+ else:
+ end_year = end_date.year
+
+ year_range = YearRange(begin=begin_year, end=end_year)
+
+ return year_range
+
+copyright_year_begin = "2001"
+build_date = version_info['release_date']
+copyright_year_range = make_year_range(copyright_year_begin, build_date)
+
+copyright = "Copyright © {year_range} {author} and others".format(
+ year_range=copyright_year_range, author=author)
+license = "Apache-2"
+url = "https://alioth.debian.org/projects/python-daemon/"
+
+
+# Local variables:
+# coding: utf-8
+# mode: python
+# End:
+# vim: fileencoding=utf-8 filetype=python :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/daemon.py b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/daemon.py
new file mode 100755
index 00000000..07810cf1
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/daemon.py
@@ -0,0 +1,926 @@
+# -*- coding: utf-8 -*-
+
+# daemon/daemon.py
+# Part of ‘python-daemon’, an implementation of PEP 3143.
+#
+# Copyright © 2008–2015 Ben Finney <ben+python@benfinney.id.au>
+# Copyright © 2007–2008 Robert Niederreiter, Jens Klein
+# Copyright © 2004–2005 Chad J. Schroeder
+# Copyright © 2003 Clark Evans
+# Copyright © 2002 Noah Spurrier
+# Copyright © 2001 Jürgen Hermann
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Apache License, version 2.0 as published by the
+# Apache Software Foundation.
+# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+""" Daemon process behaviour.
+ """
+
+from __future__ import (absolute_import, unicode_literals)
+
+import os
+import sys
+import resource
+import errno
+import signal
+import socket
+import atexit
+try:
+ # Python 2 has both ‘str’ (bytes) and ‘unicode’ (text).
+ basestring = basestring
+ unicode = unicode
+except NameError:
+ # Python 3 names the Unicode data type ‘str’.
+ basestring = str
+ unicode = str
+
+
+class DaemonError(Exception):
+ """ Base exception class for errors from this module. """
+
+ def __init__(self, *args, **kwargs):
+ self._chain_from_context()
+
+ super(DaemonError, self).__init__(*args, **kwargs)
+
+ def _chain_from_context(self):
+ _chain_exception_from_existing_exception_context(self, as_cause=True)
+
+
+class DaemonOSEnvironmentError(DaemonError, OSError):
+ """ Exception raised when daemon OS environment setup receives error. """
+
+
+class DaemonProcessDetachError(DaemonError, OSError):
+ """ Exception raised when process detach fails. """
+
+
+class DaemonContext:
+ """ Context for turning the current program into a daemon process.
+
+ A `DaemonContext` instance represents the behaviour settings and
+ process context for the program when it becomes a daemon. The
+ behaviour and environment is customised by setting options on the
+ instance, before calling the `open` method.
+
+ Each option can be passed as a keyword argument to the `DaemonContext`
+ constructor, or subsequently altered by assigning to an attribute on
+ the instance at any time prior to calling `open`. That is, for
+ options named `wibble` and `wubble`, the following invocation::
+
+ foo = daemon.DaemonContext(wibble=bar, wubble=baz)
+ foo.open()
+
+ is equivalent to::
+
+ foo = daemon.DaemonContext()
+ foo.wibble = bar
+ foo.wubble = baz
+ foo.open()
+
+ The following options are defined.
+
+ `files_preserve`
+ :Default: ``None``
+
+ List of files that should *not* be closed when starting the
+ daemon. If ``None``, all open file descriptors will be closed.
+
+ Elements of the list are file descriptors (as returned by a file
+ object's `fileno()` method) or Python `file` objects. Each
+ specifies a file that is not to be closed during daemon start.
+
+ `chroot_directory`
+ :Default: ``None``
+
+ Full path to a directory to set as the effective root directory of
+ the process. If ``None``, specifies that the root directory is not
+ to be changed.
+
+ `working_directory`
+ :Default: ``'/'``
+
+ Full path of the working directory to which the process should
+ change on daemon start.
+
+ Since a filesystem cannot be unmounted if a process has its
+ current working directory on that filesystem, this should either
+ be left at default or set to a directory that is a sensible “home
+ directory” for the daemon while it is running.
+
+ `umask`
+ :Default: ``0``
+
+ File access creation mask (“umask”) to set for the process on
+ daemon start.
+
+ A daemon should not rely on the parent process's umask value,
+ which is beyond its control and may prevent creating a file with
+ the required access mode. So when the daemon context opens, the
+ umask is set to an explicit known value.
+
+ If the conventional value of 0 is too open, consider setting a
+ value such as 0o022, 0o027, 0o077, or another specific value.
+ Otherwise, ensure the daemon creates every file with an
+ explicit access mode for the purpose.
+
+ `pidfile`
+ :Default: ``None``
+
+ Context manager for a PID lock file. When the daemon context opens
+ and closes, it enters and exits the `pidfile` context manager.
+
+ `detach_process`
+ :Default: ``None``
+
+ If ``True``, detach the process context when opening the daemon
+ context; if ``False``, do not detach.
+
+ If unspecified (``None``) during initialisation of the instance,
+ this will be set to ``True`` by default, and ``False`` only if
+ detaching the process is determined to be redundant; for example,
+ in the case when the process was started by `init`, by `initd`, or
+ by `inetd`.
+
+ `signal_map`
+ :Default: system-dependent
+
+ Mapping from operating system signals to callback actions.
+
+ The mapping is used when the daemon context opens, and determines
+ the action for each signal's signal handler:
+
+ * A value of ``None`` will ignore the signal (by setting the
+ signal action to ``signal.SIG_IGN``).
+
+ * A string value will be used as the name of an attribute on the
+ ``DaemonContext`` instance. The attribute's value will be used
+ as the action for the signal handler.
+
+ * Any other value will be used as the action for the
+ signal handler. See the ``signal.signal`` documentation
+ for details of the signal handler interface.
+
+ The default value depends on which signals are defined on the
+ running system. Each item from the list below whose signal is
+ actually defined in the ``signal`` module will appear in the
+ default map:
+
+ * ``signal.SIGTTIN``: ``None``
+
+ * ``signal.SIGTTOU``: ``None``
+
+ * ``signal.SIGTSTP``: ``None``
+
+ * ``signal.SIGTERM``: ``'terminate'``
+
+ Depending on how the program will interact with its child
+ processes, it may need to specify a signal map that
+ includes the ``signal.SIGCHLD`` signal (received when a
+ child process exits). See the specific operating system's
+ documentation for more detail on how to determine what
+ circumstances dictate the need for signal handlers.
+
+ `uid`
+ :Default: ``os.getuid()``
+
+ `gid`
+ :Default: ``os.getgid()``
+
+ The user ID (“UID”) value and group ID (“GID”) value to switch
+ the process to on daemon start.
+
+ The default values, the real UID and GID of the process, will
+ relinquish any effective privilege elevation inherited by the
+ process.
+
+ `prevent_core`
+ :Default: ``True``
+
+ If true, prevents the generation of core files, in order to avoid
+ leaking sensitive information from daemons run as `root`.
+
+ `stdin`
+ :Default: ``None``
+
+ `stdout`
+ :Default: ``None``
+
+ `stderr`
+ :Default: ``None``
+
+ Each of `stdin`, `stdout`, and `stderr` is a file-like object
+ which will be used as the new file for the standard I/O stream
+ `sys.stdin`, `sys.stdout`, and `sys.stderr` respectively. The file
+ should therefore be open, with a minimum of mode 'r' in the case
+ of `stdin`, and mimimum of mode 'w+' in the case of `stdout` and
+ `stderr`.
+
+ If the object has a `fileno()` method that returns a file
+ descriptor, the corresponding file will be excluded from being
+ closed during daemon start (that is, it will be treated as though
+ it were listed in `files_preserve`).
+
+ If ``None``, the corresponding system stream is re-bound to the
+ file named by `os.devnull`.
+
+ """
+
+ __metaclass__ = type
+
+ def __init__(
+ self,
+ chroot_directory=None,
+ working_directory="/",
+ umask=0,
+ uid=None,
+ gid=None,
+ prevent_core=True,
+ detach_process=None,
+ files_preserve=None,
+ pidfile=None,
+ stdin=None,
+ stdout=None,
+ stderr=None,
+ signal_map=None,
+ ):
+ """ Set up a new instance. """
+ self.chroot_directory = chroot_directory
+ self.working_directory = working_directory
+ self.umask = umask
+ self.prevent_core = prevent_core
+ self.files_preserve = files_preserve
+ self.pidfile = pidfile
+ self.stdin = stdin
+ self.stdout = stdout
+ self.stderr = stderr
+
+ if uid is None:
+ uid = os.getuid()
+ self.uid = uid
+ if gid is None:
+ gid = os.getgid()
+ self.gid = gid
+
+ if detach_process is None:
+ detach_process = is_detach_process_context_required()
+ self.detach_process = detach_process
+
+ if signal_map is None:
+ signal_map = make_default_signal_map()
+ self.signal_map = signal_map
+
+ self._is_open = False
+
+ @property
+ def is_open(self):
+ """ ``True`` if the instance is currently open. """
+ return self._is_open
+
+ def open(self):
+ """ Become a daemon process.
+
+ :return: ``None``.
+
+ Open the daemon context, turning the current program into a daemon
+ process. This performs the following steps:
+
+ * If this instance's `is_open` property is true, return
+ immediately. This makes it safe to call `open` multiple times on
+ an instance.
+
+ * If the `prevent_core` attribute is true, set the resource limits
+ for the process to prevent any core dump from the process.
+
+ * If the `chroot_directory` attribute is not ``None``, set the
+ effective root directory of the process to that directory (via
+ `os.chroot`).
+
+ This allows running the daemon process inside a “chroot gaol”
+ as a means of limiting the system's exposure to rogue behaviour
+ by the process. Note that the specified directory needs to
+ already be set up for this purpose.
+
+ * Set the process UID and GID to the `uid` and `gid` attribute
+ values.
+
+ * Close all open file descriptors. This excludes those listed in
+ the `files_preserve` attribute, and those that correspond to the
+ `stdin`, `stdout`, or `stderr` attributes.
+
+ * Change current working directory to the path specified by the
+ `working_directory` attribute.
+
+ * Reset the file access creation mask to the value specified by
+ the `umask` attribute.
+
+ * If the `detach_process` option is true, detach the current
+ process into its own process group, and disassociate from any
+ controlling terminal.
+
+ * Set signal handlers as specified by the `signal_map` attribute.
+
+ * If any of the attributes `stdin`, `stdout`, `stderr` are not
+ ``None``, bind the system streams `sys.stdin`, `sys.stdout`,
+ and/or `sys.stderr` to the files represented by the
+ corresponding attributes. Where the attribute has a file
+ descriptor, the descriptor is duplicated (instead of re-binding
+ the name).
+
+ * If the `pidfile` attribute is not ``None``, enter its context
+ manager.
+
+ * Mark this instance as open (for the purpose of future `open` and
+ `close` calls).
+
+ * Register the `close` method to be called during Python's exit
+ processing.
+
+ When the function returns, the running program is a daemon
+ process.
+
+ """
+ if self.is_open:
+ return
+
+ if self.chroot_directory is not None:
+ change_root_directory(self.chroot_directory)
+
+ if self.prevent_core:
+ prevent_core_dump()
+
+ change_file_creation_mask(self.umask)
+ change_working_directory(self.working_directory)
+ change_process_owner(self.uid, self.gid)
+
+ if self.detach_process:
+ detach_process_context()
+
+ signal_handler_map = self._make_signal_handler_map()
+ set_signal_handlers(signal_handler_map)
+
+ exclude_fds = self._get_exclude_file_descriptors()
+ close_all_open_files(exclude=exclude_fds)
+
+ redirect_stream(sys.stdin, self.stdin)
+ redirect_stream(sys.stdout, self.stdout)
+ redirect_stream(sys.stderr, self.stderr)
+
+ if self.pidfile is not None:
+ self.pidfile.__enter__()
+
+ self._is_open = True
+
+ register_atexit_function(self.close)
+
+ def __enter__(self):
+ """ Context manager entry point. """
+ self.open()
+ return self
+
+ def close(self):
+ """ Exit the daemon process context.
+
+ :return: ``None``.
+
+ Close the daemon context. This performs the following steps:
+
+ * If this instance's `is_open` property is false, return
+ immediately. This makes it safe to call `close` multiple times
+ on an instance.
+
+ * If the `pidfile` attribute is not ``None``, exit its context
+ manager.
+
+ * Mark this instance as closed (for the purpose of future `open`
+ and `close` calls).
+
+ """
+ if not self.is_open:
+ return
+
+ if self.pidfile is not None:
+ # Follow the interface for telling a context manager to exit,
+ # <URL:http://docs.python.org/library/stdtypes.html#typecontextmanager>.
+ self.pidfile.__exit__(None, None, None)
+
+ self._is_open = False
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ """ Context manager exit point. """
+ self.close()
+
+ def terminate(self, signal_number, stack_frame):
+ """ Signal handler for end-process signals.
+
+ :param signal_number: The OS signal number received.
+ :param stack_frame: The frame object at the point the
+ signal was received.
+ :return: ``None``.
+
+ Signal handler for the ``signal.SIGTERM`` signal. Performs the
+ following step:
+
+ * Raise a ``SystemExit`` exception explaining the signal.
+
+ """
+ exception = SystemExit(
+ "Terminating on signal {signal_number!r}".format(
+ signal_number=signal_number))
+ raise exception
+
+ def _get_exclude_file_descriptors(self):
+ """ Get the set of file descriptors to exclude closing.
+
+ :return: A set containing the file descriptors for the
+ files to be preserved.
+
+ The file descriptors to be preserved are those from the
+ items in `files_preserve`, and also each of `stdin`,
+ `stdout`, and `stderr`. For each item:
+
+ * If the item is ``None``, it is omitted from the return
+ set.
+
+ * If the item's ``fileno()`` method returns a value, that
+ value is in the return set.
+
+ * Otherwise, the item is in the return set verbatim.
+
+ """
+ files_preserve = self.files_preserve
+ if files_preserve is None:
+ files_preserve = []
+ files_preserve.extend(
+ item for item in [self.stdin, self.stdout, self.stderr]
+ if hasattr(item, 'fileno'))
+
+ exclude_descriptors = set()
+ for item in files_preserve:
+ if item is None:
+ continue
+ file_descriptor = _get_file_descriptor(item)
+ if file_descriptor is not None:
+ exclude_descriptors.add(file_descriptor)
+ else:
+ exclude_descriptors.add(item)
+
+ return exclude_descriptors
+
+ def _make_signal_handler(self, target):
+ """ Make the signal handler for a specified target object.
+
+ :param target: A specification of the target for the
+ handler; see below.
+ :return: The value for use by `signal.signal()`.
+
+ If `target` is ``None``, return ``signal.SIG_IGN``. If `target`
+ is a text string, return the attribute of this instance named
+ by that string. Otherwise, return `target` itself.
+
+ """
+ if target is None:
+ result = signal.SIG_IGN
+ elif isinstance(target, unicode):
+ name = target
+ result = getattr(self, name)
+ else:
+ result = target
+
+ return result
+
+ def _make_signal_handler_map(self):
+ """ Make the map from signals to handlers for this instance.
+
+ :return: The constructed signal map for this instance.
+
+ Construct a map from signal numbers to handlers for this
+ context instance, suitable for passing to
+ `set_signal_handlers`.
+
+ """
+ signal_handler_map = dict(
+ (signal_number, self._make_signal_handler(target))
+ for (signal_number, target) in self.signal_map.items())
+ return signal_handler_map
+
+
+def _get_file_descriptor(obj):
+ """ Get the file descriptor, if the object has one.
+
+ :param obj: The object expected to be a file-like object.
+ :return: The file descriptor iff the file supports it; otherwise
+ ``None``.
+
+ The object may be a non-file object. It may also be a
+ file-like object with no support for a file descriptor. In
+ either case, return ``None``.
+
+ """
+ file_descriptor = None
+ if hasattr(obj, 'fileno'):
+ try:
+ file_descriptor = obj.fileno()
+ except ValueError:
+ # The item doesn't support a file descriptor.
+ pass
+
+ return file_descriptor
+
+
+def change_working_directory(directory):
+ """ Change the working directory of this process.
+
+ :param directory: The target directory path.
+ :return: ``None``.
+
+ """
+ try:
+ os.chdir(directory)
+ except Exception as exc:
+ error = DaemonOSEnvironmentError(
+ "Unable to change working directory ({exc})".format(exc=exc))
+ raise error
+
+
+def change_root_directory(directory):
+ """ Change the root directory of this process.
+
+ :param directory: The target directory path.
+ :return: ``None``.
+
+ Set the current working directory, then the process root directory,
+ to the specified `directory`. Requires appropriate OS privileges
+ for this process.
+
+ """
+ try:
+ os.chdir(directory)
+ os.chroot(directory)
+ except Exception as exc:
+ error = DaemonOSEnvironmentError(
+ "Unable to change root directory ({exc})".format(exc=exc))
+ raise error
+
+
+def change_file_creation_mask(mask):
+ """ Change the file creation mask for this process.
+
+ :param mask: The numeric file creation mask to set.
+ :return: ``None``.
+
+ """
+ try:
+ os.umask(mask)
+ except Exception as exc:
+ error = DaemonOSEnvironmentError(
+ "Unable to change file creation mask ({exc})".format(exc=exc))
+ raise error
+
+
+def change_process_owner(uid, gid):
+ """ Change the owning UID and GID of this process.
+
+ :param uid: The target UID for the daemon process.
+ :param gid: The target GID for the daemon process.
+ :return: ``None``.
+
+ Set the GID then the UID of the process (in that order, to avoid
+ permission errors) to the specified `gid` and `uid` values.
+ Requires appropriate OS privileges for this process.
+
+ """
+ try:
+ os.setgid(gid)
+ os.setuid(uid)
+ except Exception as exc:
+ error = DaemonOSEnvironmentError(
+ "Unable to change process owner ({exc})".format(exc=exc))
+ raise error
+
+
+def prevent_core_dump():
+ """ Prevent this process from generating a core dump.
+
+ :return: ``None``.
+
+ Set the soft and hard limits for core dump size to zero. On Unix,
+ this entirely prevents the process from creating core dump.
+
+ """
+ core_resource = resource.RLIMIT_CORE
+
+ try:
+ # Ensure the resource limit exists on this platform, by requesting
+ # its current value.
+ core_limit_prev = resource.getrlimit(core_resource)
+ except ValueError as exc:
+ error = DaemonOSEnvironmentError(
+ "System does not support RLIMIT_CORE resource limit"
+ " ({exc})".format(exc=exc))
+ raise error
+
+ # Set hard and soft limits to zero, i.e. no core dump at all.
+ core_limit = (0, 0)
+ resource.setrlimit(core_resource, core_limit)
+
+
+def detach_process_context():
+ """ Detach the process context from parent and session.
+
+ :return: ``None``.
+
+ Detach from the parent process and session group, allowing the
+ parent to exit while this process continues running.
+
+ Reference: “Advanced Programming in the Unix Environment”,
+ section 13.3, by W. Richard Stevens, published 1993 by
+ Addison-Wesley.
+
+ """
+
+ def fork_then_exit_parent(error_message):
+ """ Fork a child process, then exit the parent process.
+
+ :param error_message: Message for the exception in case of a
+ detach failure.
+ :return: ``None``.
+ :raise DaemonProcessDetachError: If the fork fails.
+
+ """
+ try:
+ pid = os.fork()
+ if pid > 0:
+ os._exit(0)
+ except OSError as exc:
+ error = DaemonProcessDetachError(
+ "{message}: [{exc.errno:d}] {exc.strerror}".format(
+ message=error_message, exc=exc))
+ raise error
+
+ fork_then_exit_parent(error_message="Failed first fork")
+ os.setsid()
+ fork_then_exit_parent(error_message="Failed second fork")
+
+
+def is_process_started_by_init():
+ """ Determine whether the current process is started by `init`.
+
+ :return: ``True`` iff the parent process is `init`; otherwise
+ ``False``.
+
+ The `init` process is the one with process ID of 1.
+
+ """
+ result = False
+
+ init_pid = 1
+ if os.getppid() == init_pid:
+ result = True
+
+ return result
+
+
+def is_socket(fd):
+ """ Determine whether the file descriptor is a socket.
+
+ :param fd: The file descriptor to interrogate.
+ :return: ``True`` iff the file descriptor is a socket; otherwise
+ ``False``.
+
+ Query the socket type of `fd`. If there is no error, the file is a
+ socket.
+
+ """
+ result = False
+
+ file_socket = socket.fromfd(fd, socket.AF_INET, socket.SOCK_RAW)
+
+ try:
+ socket_type = file_socket.getsockopt(
+ socket.SOL_SOCKET, socket.SO_TYPE)
+ except socket.error as exc:
+ exc_errno = exc.args[0]
+ if exc_errno == errno.ENOTSOCK:
+ # Socket operation on non-socket.
+ pass
+ else:
+ # Some other socket error.
+ result = True
+ else:
+ # No error getting socket type.
+ result = True
+
+ return result
+
+
+def is_process_started_by_superserver():
+ """ Determine whether the current process is started by the superserver.
+
+ :return: ``True`` if this process was started by the internet
+ superserver; otherwise ``False``.
+
+ The internet superserver creates a network socket, and
+ attaches it to the standard streams of the child process. If
+ that is the case for this process, return ``True``, otherwise
+ ``False``.
+
+ """
+ result = False
+
+ stdin_fd = sys.__stdin__.fileno()
+ if is_socket(stdin_fd):
+ result = True
+
+ return result
+
+
+def is_detach_process_context_required():
+ """ Determine whether detaching the process context is required.
+
+ :return: ``True`` iff the process is already detached; otherwise
+ ``False``.
+
+ The process environment is interrogated for the following:
+
+ * Process was started by `init`; or
+
+ * Process was started by `inetd`.
+
+ If any of the above are true, the process is deemed to be already
+ detached.
+
+ """
+ result = True
+ if is_process_started_by_init() or is_process_started_by_superserver():
+ result = False
+
+ return result
+
+
+def close_file_descriptor_if_open(fd):
+ """ Close a file descriptor if already open.
+
+ :param fd: The file descriptor to close.
+ :return: ``None``.
+
+ Close the file descriptor `fd`, suppressing an error in the
+ case the file was not open.
+
+ """
+ try:
+ os.close(fd)
+ except EnvironmentError as exc:
+ if exc.errno == errno.EBADF:
+ # File descriptor was not open.
+ pass
+ else:
+ error = DaemonOSEnvironmentError(
+ "Failed to close file descriptor {fd:d} ({exc})".format(
+ fd=fd, exc=exc))
+ raise error
+
+
+MAXFD = 2048
+
+def get_maximum_file_descriptors():
+ """ Get the maximum number of open file descriptors for this process.
+
+ :return: The number (integer) to use as the maximum number of open
+ files for this process.
+
+ The maximum is the process hard resource limit of maximum number of
+ open file descriptors. If the limit is “infinity”, a default value
+ of ``MAXFD`` is returned.
+
+ """
+ limits = resource.getrlimit(resource.RLIMIT_NOFILE)
+ result = limits[1]
+ if result == resource.RLIM_INFINITY:
+ result = MAXFD
+ return result
+
+
+def close_all_open_files(exclude=set()):
+ """ Close all open file descriptors.
+
+ :param exclude: Collection of file descriptors to skip when closing
+ files.
+ :return: ``None``.
+
+ Closes every file descriptor (if open) of this process. If
+ specified, `exclude` is a set of file descriptors to *not*
+ close.
+
+ """
+ maxfd = get_maximum_file_descriptors()
+ for fd in reversed(range(maxfd)):
+ if fd not in exclude:
+ close_file_descriptor_if_open(fd)
+
+
+def redirect_stream(system_stream, target_stream):
+ """ Redirect a system stream to a specified file.
+
+ :param standard_stream: A file object representing a standard I/O
+ stream.
+ :param target_stream: The target file object for the redirected
+ stream, or ``None`` to specify the null device.
+ :return: ``None``.
+
+ `system_stream` is a standard system stream such as
+ ``sys.stdout``. `target_stream` is an open file object that
+ should replace the corresponding system stream object.
+
+ If `target_stream` is ``None``, defaults to opening the
+ operating system's null device and using its file descriptor.
+
+ """
+ if target_stream is None:
+ target_fd = os.open(os.devnull, os.O_RDWR)
+ else:
+ target_fd = target_stream.fileno()
+ os.dup2(target_fd, system_stream.fileno())
+
+
+def make_default_signal_map():
+ """ Make the default signal map for this system.
+
+ :return: A mapping from signal number to handler object.
+
+ The signals available differ by system. The map will not contain
+ any signals not defined on the running system.
+
+ """
+ name_map = {
+ 'SIGTSTP': None,
+ 'SIGTTIN': None,
+ 'SIGTTOU': None,
+ 'SIGTERM': 'terminate',
+ }
+ signal_map = dict(
+ (getattr(signal, name), target)
+ for (name, target) in name_map.items()
+ if hasattr(signal, name))
+
+ return signal_map
+
+
+def set_signal_handlers(signal_handler_map):
+ """ Set the signal handlers as specified.
+
+ :param signal_handler_map: A map from signal number to handler
+ object.
+ :return: ``None``.
+
+ See the `signal` module for details on signal numbers and signal
+ handlers.
+
+ """
+ for (signal_number, handler) in signal_handler_map.items():
+ signal.signal(signal_number, handler)
+
+
+def register_atexit_function(func):
+ """ Register a function for processing at program exit.
+
+ :param func: A callable function expecting no arguments.
+ :return: ``None``.
+
+ The function `func` is registered for a call with no arguments
+ at program exit.
+
+ """
+ atexit.register(func)
+
+
+def _chain_exception_from_existing_exception_context(exc, as_cause=False):
+ """ Decorate the specified exception with the existing exception context.
+
+ :param exc: The exception instance to decorate.
+ :param as_cause: If true, the existing context is declared to be
+ the cause of the exception.
+ :return: ``None``.
+
+ :PEP:`344` describes syntax and attributes (`__traceback__`,
+ `__context__`, `__cause__`) for use in exception chaining.
+
+ Python 2 does not have that syntax, so this function decorates
+ the exception with values from the current exception context.
+
+ """
+ (existing_exc_type, existing_exc, existing_traceback) = sys.exc_info()
+ if as_cause:
+ exc.__cause__ = existing_exc
+ else:
+ exc.__context__ = existing_exc
+ exc.__traceback__ = existing_traceback
+
+
+# Local variables:
+# coding: utf-8
+# mode: python
+# End:
+# vim: fileencoding=utf-8 filetype=python :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/pidfile.py b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/pidfile.py
new file mode 100755
index 00000000..4517ee0e
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/pidfile.py
@@ -0,0 +1,67 @@
+# -*- coding: utf-8 -*-
+
+# daemon/pidfile.py
+# Part of ‘python-daemon’, an implementation of PEP 3143.
+#
+# Copyright © 2008–2015 Ben Finney <ben+python@benfinney.id.au>
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Apache License, version 2.0 as published by the
+# Apache Software Foundation.
+# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+""" Lockfile behaviour implemented via Unix PID files.
+ """
+
+from __future__ import (absolute_import, unicode_literals)
+
+from lockfile.pidlockfile import PIDLockFile
+
+
+class TimeoutPIDLockFile(PIDLockFile, object):
+ """ Lockfile with default timeout, implemented as a Unix PID file.
+
+ This uses the ``PIDLockFile`` implementation, with the
+ following changes:
+
+ * The `acquire_timeout` parameter to the initialiser will be
+ used as the default `timeout` parameter for the `acquire`
+ method.
+
+ """
+
+ def __init__(self, path, acquire_timeout=None, *args, **kwargs):
+ """ Set up the parameters of a TimeoutPIDLockFile.
+
+ :param path: Filesystem path to the PID file.
+ :param acquire_timeout: Value to use by default for the
+ `acquire` call.
+ :return: ``None``.
+
+ """
+ self.acquire_timeout = acquire_timeout
+ super(TimeoutPIDLockFile, self).__init__(path, *args, **kwargs)
+
+ def acquire(self, timeout=None, *args, **kwargs):
+ """ Acquire the lock.
+
+ :param timeout: Specifies the timeout; see below for valid
+ values.
+ :return: ``None``.
+
+ The `timeout` defaults to the value set during
+ initialisation with the `acquire_timeout` parameter. It is
+ passed to `PIDLockFile.acquire`; see that method for
+ details.
+
+ """
+ if timeout is None:
+ timeout = self.acquire_timeout
+ super(TimeoutPIDLockFile, self).acquire(timeout, *args, **kwargs)
+
+
+# Local variables:
+# coding: utf-8
+# mode: python
+# End:
+# vim: fileencoding=utf-8 filetype=python :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/runner.py b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/runner.py
new file mode 100755
index 00000000..6973cf1c
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/runner.py
@@ -0,0 +1,324 @@
+# -*- coding: utf-8 -*-
+
+# daemon/runner.py
+# Part of ‘python-daemon’, an implementation of PEP 3143.
+#
+# Copyright © 2009–2015 Ben Finney <ben+python@benfinney.id.au>
+# Copyright © 2007–2008 Robert Niederreiter, Jens Klein
+# Copyright © 2003 Clark Evans
+# Copyright © 2002 Noah Spurrier
+# Copyright © 2001 Jürgen Hermann
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Apache License, version 2.0 as published by the
+# Apache Software Foundation.
+# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+""" Daemon runner library.
+ """
+
+from __future__ import (absolute_import, unicode_literals)
+
+import sys
+import os
+import signal
+import errno
+try:
+ # Python 3 standard library.
+ ProcessLookupError
+except NameError:
+ # No such class in Python 2.
+ ProcessLookupError = NotImplemented
+
+import lockfile
+
+from . import pidfile
+from .daemon import (basestring, unicode)
+from .daemon import DaemonContext
+from .daemon import _chain_exception_from_existing_exception_context
+
+
+class DaemonRunnerError(Exception):
+ """ Abstract base class for errors from DaemonRunner. """
+
+ def __init__(self, *args, **kwargs):
+ self._chain_from_context()
+
+ super(DaemonRunnerError, self).__init__(*args, **kwargs)
+
+ def _chain_from_context(self):
+ _chain_exception_from_existing_exception_context(self, as_cause=True)
+
+
+class DaemonRunnerInvalidActionError(DaemonRunnerError, ValueError):
+ """ Raised when specified action for DaemonRunner is invalid. """
+
+ def _chain_from_context(self):
+ # This exception is normally not caused by another.
+ _chain_exception_from_existing_exception_context(self, as_cause=False)
+
+
+class DaemonRunnerStartFailureError(DaemonRunnerError, RuntimeError):
+ """ Raised when failure starting DaemonRunner. """
+
+
+class DaemonRunnerStopFailureError(DaemonRunnerError, RuntimeError):
+ """ Raised when failure stopping DaemonRunner. """
+
+
+class DaemonRunner:
+ """ Controller for a callable running in a separate background process.
+
+ The first command-line argument is the action to take:
+
+ * 'start': Become a daemon and call `app.run()`.
+ * 'stop': Exit the daemon process specified in the PID file.
+ * 'restart': Stop, then start.
+
+ """
+
+ __metaclass__ = type
+
+ start_message = "started with pid {pid:d}"
+
+ def __init__(self, app):
+ """ Set up the parameters of a new runner.
+
+ :param app: The application instance; see below.
+ :return: ``None``.
+
+ The `app` argument must have the following attributes:
+
+ * `stdin_path`, `stdout_path`, `stderr_path`: Filesystem paths
+ to open and replace the existing `sys.stdin`, `sys.stdout`,
+ `sys.stderr`.
+
+ * `pidfile_path`: Absolute filesystem path to a file that will
+ be used as the PID file for the daemon. If ``None``, no PID
+ file will be used.
+
+ * `pidfile_timeout`: Used as the default acquisition timeout
+ value supplied to the runner's PID lock file.
+
+ * `run`: Callable that will be invoked when the daemon is
+ started.
+
+ """
+ self.parse_args()
+ self.app = app
+ self.daemon_context = DaemonContext()
+ self.daemon_context.stdin = open(app.stdin_path, 'rt')
+ self.daemon_context.stdout = open(app.stdout_path, 'w+t')
+ self.daemon_context.stderr = open(
+ app.stderr_path, 'w+t', buffering=0)
+
+ self.pidfile = None
+ if app.pidfile_path is not None:
+ self.pidfile = make_pidlockfile(
+ app.pidfile_path, app.pidfile_timeout)
+ self.daemon_context.pidfile = self.pidfile
+
+ def _usage_exit(self, argv):
+ """ Emit a usage message, then exit.
+
+ :param argv: The command-line arguments used to invoke the
+ program, as a sequence of strings.
+ :return: ``None``.
+
+ """
+ progname = os.path.basename(argv[0])
+ usage_exit_code = 2
+ action_usage = "|".join(self.action_funcs.keys())
+ message = "usage: {progname} {usage}".format(
+ progname=progname, usage=action_usage)
+ emit_message(message)
+ sys.exit(usage_exit_code)
+
+ def parse_args(self, argv=None):
+ """ Parse command-line arguments.
+
+ :param argv: The command-line arguments used to invoke the
+ program, as a sequence of strings.
+
+ :return: ``None``.
+
+ The parser expects the first argument as the program name, the
+ second argument as the action to perform.
+
+ If the parser fails to parse the arguments, emit a usage
+ message and exit the program.
+
+ """
+ if argv is None:
+ argv = sys.argv
+
+ min_args = 2
+ if len(argv) < min_args:
+ self._usage_exit(argv)
+
+ self.action = unicode(argv[1])
+ if self.action not in self.action_funcs:
+ self._usage_exit(argv)
+
+ def _start(self):
+ """ Open the daemon context and run the application.
+
+ :return: ``None``.
+ :raises DaemonRunnerStartFailureError: If the PID file cannot
+ be locked by this process.
+
+ """
+ if is_pidfile_stale(self.pidfile):
+ self.pidfile.break_lock()
+
+ try:
+ self.daemon_context.open()
+ except lockfile.AlreadyLocked:
+ error = DaemonRunnerStartFailureError(
+ "PID file {pidfile.path!r} already locked".format(
+ pidfile=self.pidfile))
+ raise error
+
+ pid = os.getpid()
+ message = self.start_message.format(pid=pid)
+ emit_message(message)
+
+ self.app.run()
+
+ def _terminate_daemon_process(self):
+ """ Terminate the daemon process specified in the current PID file.
+
+ :return: ``None``.
+ :raises DaemonRunnerStopFailureError: If terminating the daemon
+ fails with an OS error.
+
+ """
+ pid = self.pidfile.read_pid()
+ try:
+ os.kill(pid, signal.SIGTERM)
+ except OSError as exc:
+ error = DaemonRunnerStopFailureError(
+ "Failed to terminate {pid:d}: {exc}".format(
+ pid=pid, exc=exc))
+ raise error
+
+ def _stop(self):
+ """ Exit the daemon process specified in the current PID file.
+
+ :return: ``None``.
+ :raises DaemonRunnerStopFailureError: If the PID file is not
+ already locked.
+
+ """
+ if not self.pidfile.is_locked():
+ error = DaemonRunnerStopFailureError(
+ "PID file {pidfile.path!r} not locked".format(
+ pidfile=self.pidfile))
+ raise error
+
+ if is_pidfile_stale(self.pidfile):
+ self.pidfile.break_lock()
+ else:
+ self._terminate_daemon_process()
+
+ def _restart(self):
+ """ Stop, then start.
+ """
+ self._stop()
+ self._start()
+
+ action_funcs = {
+ 'start': _start,
+ 'stop': _stop,
+ 'restart': _restart,
+ }
+
+ def _get_action_func(self):
+ """ Get the function for the specified action.
+
+ :return: The function object corresponding to the specified
+ action.
+ :raises DaemonRunnerInvalidActionError: if the action is
+ unknown.
+
+ The action is specified by the `action` attribute, which is set
+ during `parse_args`.
+
+ """
+ try:
+ func = self.action_funcs[self.action]
+ except KeyError:
+ error = DaemonRunnerInvalidActionError(
+ "Unknown action: {action!r}".format(
+ action=self.action))
+ raise error
+ return func
+
+ def do_action(self):
+ """ Perform the requested action.
+
+ :return: ``None``.
+
+ The action is specified by the `action` attribute, which is set
+ during `parse_args`.
+
+ """
+ func = self._get_action_func()
+ func(self)
+
+
+def emit_message(message, stream=None):
+ """ Emit a message to the specified stream (default `sys.stderr`). """
+ if stream is None:
+ stream = sys.stderr
+ stream.write("{message}\n".format(message=message))
+ stream.flush()
+
+
+def make_pidlockfile(path, acquire_timeout):
+ """ Make a PIDLockFile instance with the given filesystem path. """
+ if not isinstance(path, basestring):
+ error = ValueError("Not a filesystem path: {path!r}".format(
+ path=path))
+ raise error
+ if not os.path.isabs(path):
+ error = ValueError("Not an absolute path: {path!r}".format(
+ path=path))
+ raise error
+ lockfile = pidfile.TimeoutPIDLockFile(path, acquire_timeout)
+
+ return lockfile
+
+
+def is_pidfile_stale(pidfile):
+ """ Determine whether a PID file is stale.
+
+ :return: ``True`` iff the PID file is stale; otherwise ``False``.
+
+ The PID file is “stale” if its contents are valid but do not
+ match the PID of a currently-running process.
+
+ """
+ result = False
+
+ pidfile_pid = pidfile.read_pid()
+ if pidfile_pid is not None:
+ try:
+ os.kill(pidfile_pid, signal.SIG_DFL)
+ except ProcessLookupError:
+ # The specified PID does not exist.
+ result = True
+ except OSError as exc:
+ if exc.errno == errno.ESRCH:
+ # Under Python 2, process lookup error is an OSError.
+ # The specified PID does not exist.
+ result = True
+
+ return result
+
+
+# Local variables:
+# coding: utf-8
+# mode: python
+# End:
+# vim: fileencoding=utf-8 filetype=python :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/CREDITS b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/CREDITS
new file mode 100755
index 00000000..feb65d5e
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/CREDITS
@@ -0,0 +1,53 @@
+Credits for contributors to ‘python-daemon’
+###########################################
+
+:Updated: 2014-12-23
+
+The ‘python-daemon’ library is the work of many contributors.
+
+
+Primary developers
+==================
+
+The library has been maintained over the years by:
+
+* Ben Finney <ben+python@benfinney.id.au>
+* Robert Niederreiter
+* Jens Klein
+
+
+Precursors
+==========
+
+The library code base is inherited from prior work by:
+
+* Chad J. Schroeder
+* Clark Evans
+* Noah Spurrier
+* Jürgen Hermann
+
+
+Additional contributors
+=======================
+
+People who have also contributed substantial improvements:
+
+
+
+..
+ This is free software: you may copy, modify, and/or distribute this work
+ under the terms of the Apache License version 2.0 as published by the
+ Apache Software Foundation.
+ No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+..
+ Local variables:
+ coding: utf-8
+ mode: text
+ mode: rst
+ time-stamp-format: "%:y-%02m-%02d"
+ time-stamp-start: "^:Updated:[ ]+"
+ time-stamp-end: "$"
+ time-stamp-line-limit: 20
+ End:
+ vim: fileencoding=utf-8 filetype=rst :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/FAQ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/FAQ
new file mode 100755
index 00000000..1fcc4658
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/FAQ
@@ -0,0 +1,156 @@
+‘python-daemon’ Frequently Asked Questions
+##########################################
+
+:Author: Ben Finney <ben+python@benfinney.id.au>
+:Updated: 2015-01-10
+
+.. contents::
+..
+ 1 General
+ 1.1 What is the purpose of the ‘python-daemon’ library?
+ 1.2 How can I run a service communicating with a separate daemon process?
+ 2 Security
+ 2.1 Why is the umask set to 0 by default?
+ 3 File descriptors
+ 3.1 Why does the output stop after opening the daemon context?
+ 3.2 How can I preserve a ‘logging’ handler's file descriptor?
+
+General
+=======
+
+What is the purpose of the ‘python-daemon’ library?
+---------------------------------------------------
+
+The ‘python-daemon’ library has a deliberately narrow focus: that of
+being a reference implementation for `PEP 3143`_, “Standard daemon
+process library”.
+
+.. _`PEP 3143`: http://www.python.org/dev/peps/pep-3143
+
+How can I run a service communicating with a separate daemon process?
+---------------------------------------------------------------------
+
+As specified in `PEP 3143`_, the ‘python-daemon’ library is
+specifically focussed on the goal of having the *current running
+program* become a well-behaved Unix daemon process. This leaves open
+the question of how this program is started, or about multiple
+programs interacting. As detailed in PEP 3143:
+
+ A daemon is not a service
+
+ There is a related concept in many systems, called a “service”. A
+ service differs from the model in this PEP, in that rather than
+ having the *current* program continue to run as a daemon process,
+ a service starts an *additional* process to run in the background,
+ and the current process communicates with that additional process
+ via some defined channels.
+
+ The Unix-style daemon model in this PEP can be used, among other
+ things, to implement the background-process part of a service; but
+ this PEP does not address the other aspects of setting up and
+ managing a service.
+
+A possible starting point for such a “service” model of execution is
+in a `message from 2009-01-30`_ to the ``python-ideas`` forum.
+
+.. _`message from 2009-01-30`: http://mail.python.org/pipermail/python-ideas/2009-January/002606.html
+
+
+Security
+========
+
+Why is the umask set to 0 by default?
+-------------------------------------
+
+A daemon should not rely on the parent process's umask value, which is
+beyond its control and may prevent creating a file with the required
+access mode. So when the daemon context opens, the umask is set to an
+explicit known value.
+
+If the conventional value of 0 is too open, consider setting a value
+such as 0o022, 0o027, 0o077, or another specific value. Otherwise,
+ensure the daemon creates every file with an explicit access mode for
+the purpose.
+
+
+File descriptors
+================
+
+Why does the output stop after opening the daemon context?
+----------------------------------------------------------
+
+The specified behaviour in `PEP 3143`_ includes the requirement to
+detach the process from the controlling terminal (to allow the process
+to continue to run as a daemon), and to close all file descriptors not
+known to be safe once detached (to ensure any files that continue to
+be used are under the control of the daemon process).
+
+If you want the process to generate output via the system streams
+‘sys.stdout’ and ‘sys.stderr’, set the ‘DaemonContext’'s ‘stdout’
+and/or ‘stderr’ options to a file-like object (e.g. the ‘stream’
+attribute of a ‘logging.Handler’ instance). If these objects have file
+descriptors, they will be preserved when the daemon context opens.
+
+How can I preserve a ‘logging’ handler's file descriptor?
+---------------------------------------------------------
+
+The ‘DaemonContext.open’ method conforms to `PEP 3143`_ by closing all
+open file descriptors, but excluding those files specified in the
+‘files_preserve’ option. This option is a list of files or file
+descriptors.
+
+The Python standard library ‘logging’ module provides log handlers
+that write to streams, including to files via the ‘StreamHandler’
+class and its sub-classes. The documentation (both the online `logging
+module documentation`_ and the docstrings for the code) makes no
+mention of a way to get at the stream associated with a handler
+object.
+
+However, looking at the source code for ‘StreamHandler’, in Python 2.5
+as ``/usr/lib/python2.5/logging/__init__.py``, shows a ‘stream’
+attribute that is bound to the stream object. The attribute is not
+marked private (i.e. it is not named with a leading underscore), so we
+can presume it is part of the public API.
+
+That attribute can then be used to specify that a logging handler's
+file descriptor should, when the ‘DaemonContext’ opens, be excluded
+from closure::
+
+ import logging
+ import daemon
+
+ # any subclass of StreamHandler should provide the ‘stream’ attribute.
+ lh = logging.handlers.TimedRotatingFileHandler(
+ "/var/log/foo.log",
+ # …
+ )
+
+ # … do some logging and other activity …
+
+ daemon_context = daemon.DaemonContext()
+ daemon_context.files_preserve = [lh.stream]
+
+ daemon_context.open()
+
+ # … continue as a daemon process …
+
+.. _`logging module documentation`: http://docs.python.org/library/logging
+
+
+..
+ This is free software: you may copy, modify, and/or distribute this work
+ under the terms of the Apache License version 2.0 as published by the
+ Apache Software Foundation.
+ No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+..
+ Local variables:
+ coding: utf-8
+ mode: text
+ mode: rst
+ time-stamp-format: "%:y-%02m-%02d"
+ time-stamp-start: "^:Updated:[ ]+"
+ time-stamp-end: "$"
+ time-stamp-line-limit: 20
+ End:
+ vim: fileencoding=utf-8 filetype=rst :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/TODO b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/TODO
new file mode 100755
index 00000000..81b41481
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/TODO
@@ -0,0 +1,95 @@
+TODO for ‘python-daemon’ library
+################################
+
+:Updated: 2015-01-10
+
+=======
+PENDING
+=======
+
+Tests
+=====
+
+Libraries
+=========
+
+* Evaluate switching to ‘flufl.lock’ library for PID lockfile behaviour
+ <http://pypi.python.org/pypi/flufl.lock>_.
+
+Features
+========
+
+Important
+---------
+
+Wishlist
+--------
+
+* Allow specification of a syslog service name to log as (default:
+ output to stdout and stderr, not syslog).
+
+Documentation
+=============
+
+Standard library inclusion
+==========================
+
+
+====
+DONE
+====
+
+* Convert to Python 2 and Python 3 compatible code base.
+
+* Work correctly with current ‘lockfile’ library (0.10 or later).
+
+* Write full unit tests for every new or changed behaviour at time of
+ commit.
+
+* Detect whether started by another process that handles
+ daemonisation, such as ‘inetd’, and behave appropriately.
+
+* Detach to new process and session group.
+
+* Allow specification of working directory (default: '/').
+
+* Allow specification of umask (default: 0o000).
+
+* Drop ‘suid’ and ‘sgid’ privileges if set.
+
+* Close all open file handles.
+
+* Re-open stdin, stdout, stderr to user-specified files.
+
+* Default re-open stdin, stdout, stderr to ‘/dev/null’.
+
+* Allow specification of a non-root user and group to drop to, if
+ started as ‘root’ (default: no change of user or group).
+
+* Implement context manager protocol for daemon context.
+
+* Allow specification of PID file with its own context manager
+ (default: no PID file).
+
+* Full docstrings for functions, classes, and modules.
+
+* PEP 3143 for adding this library to the Python standard library.
+
+
+..
+ This is free software: you may copy, modify, and/or distribute this work
+ under the terms of the Apache License version 2.0 as published by the
+ Apache Software Foundation.
+ No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+..
+ Local variables:
+ coding: utf-8
+ mode: text
+ mode: rst
+ time-stamp-format: "%:y-%02m-%02d"
+ time-stamp-start: "^:Updated:[ ]+"
+ time-stamp-end: "$"
+ time-stamp-line-limit: 20
+ End:
+ vim: fileencoding=utf-8 filetype=rst :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/hacking.txt b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/hacking.txt
new file mode 100755
index 00000000..9484dbd0
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/hacking.txt
@@ -0,0 +1,180 @@
+Developer's guide
+#################
+
+:Author: Ben Finney <ben+python@benfinney.id.au>
+:Updated: 2014-11-28
+
+
+Project layout
+==============
+
+::
+
+ ./ Top level of source tree
+ doc/ Project documentation
+ bin/ Executable programs
+ daemon/ Main ‘daemon’ library
+ test/ Unit tests
+
+
+Code style
+==========
+
+Python
+------
+
+All Python code should conform to the guidelines in PEP8_. In
+particular:
+
+* Indent each level using 4 spaces (``U+0020 SPACE``), and no TABs
+ (``U+0008 CHARACTER TABULATION``).
+
+* Name modules in lower case, ``multiplewordslikethis``.
+
+* Name classes in title case, ``MultipleWordsLikeThis``.
+
+* Name functions, instances and other variables in lower case,
+ ``multiple_words_like_this``.
+
+* Every module, class, and function has a Python doc string explaining
+ its purpose and API.
+
+ *Exception*: Functions whose purpose and API are mandated by Python
+ itself (dunder-named methods) do not need a doc string.
+
+* Doc strings are written as triple-quoted strings.
+
+ * The text of the doc string is marked up with reStructuredText.
+
+ * The first line is a one-line synopsis of the object. This summary
+ line appears on the same line as the opening triple-quote,
+ separated by a single space.
+
+ * Further lines, if needed, are separated from the first by one
+ blank line.
+
+ * The synopsis is separated by one space from the opening
+ triple-quote; this causes it to appear four columns past the
+ beginning of the line. All subsequent lines are indented at least
+ four columns also.
+
+ * The synopsis is followed by a reStructuredText field list. The
+ field names are: “param foo” for each parameter (where “foo” is
+ the parameter name), and “return” for the return value. The field
+ values describe the purpose of each.
+
+ * The closing triple-quote appears on a separate line.
+
+ Example::
+
+ def frobnicate(spam, algorithm="dv"):
+ """ Perform frobnication on ``spam``.
+
+ :param spam: A travortionate (as a sequence of strings).
+ :param algorithm: The name of the algorithm to use for
+ frobnicating the travortionate.
+ :return: The frobnicated travortionate, if it is
+ non-empty; otherwise None.
+
+ The frobnication is done by the Dietzel-Venkman algorithm,
+ and optimises for the case where ``spam`` is freebled and
+ agglutinative.
+
+ """
+ spagnify(spam)
+ # …
+
+* All ``import`` statements appear at the top of the module.
+
+* Each ``import`` statement imports a single module, or multiple names
+ from a single module.
+
+ Example::
+
+ import sys
+ import os
+ from spam import foo, bar, baz
+
+.. _PEP8: http://www.python.org/dev/peps/pep-0008/
+
+Additional style guidelines:
+
+* All text files (including program code) are encoded in UTF-8.
+
+* A page break (``U+000C FORM FEED``) whitespace character is used
+ within a module to break up semantically separate areas of the
+ module.
+
+* Editor hints for Emacs and Vim appear in a comment block at the
+ file's end::
+
+
+ # Local variables:
+ # coding: utf-8
+ # mode: python
+ # End:
+ # vim: fileencoding=utf-8 filetype=python :
+
+
+Unit tests
+==========
+
+All code should aim for 100% coverage by unit tests. New code, or
+changes to existing code, will only be considered for inclusion in the
+development tree when accompanied by corresponding additions or
+changes to the unit tests.
+
+Test-driven development
+-----------------------
+
+Where possible, practice test-driven development to implement program
+code.
+
+* During a development session, maintain a separate window or terminal
+ with the unit test suite for the project running continuously, or
+ automatically every few seconds.
+
+* Any time a test is failing, the only valid change is to make all
+ tests pass.
+
+* Develop new interface features (changes to the program unit's
+ behaviour) only when all current tests pass.
+
+* Refactor as needed, but only when all tests pass.
+
+ * Refactoring is any change to the code which does not alter its
+ interface or expected behaviour, such as performance
+ optimisations, readability improvements, modularisation
+ improvements etc.
+
+* Develop new or changed program behaviour by:
+
+ * *First* write a single, specific test case for that new behaviour,
+ then watch the test fail in the absence of the desired behaviour.
+
+ * Implement the minimum necessary change to satisfy the failing
+ test. Continue until all tests pass again, then stop making
+ functional changes.
+
+ * Once all tests (including the new test) pass, consider refactoring
+ the code and the tests immediately, then ensure all the tests pass
+ again after any changes.
+
+ * Iterate for each incremental change in interface or behaviour.
+
+Test-driven development is not absolutely necessary, but is the
+simplest, most direct way to generate the kind of program changes
+accompanied by unit tests that are necessary for inclusion in the
+project.
+
+
+..
+ Local variables:
+ coding: utf-8
+ mode: rst
+ time-stamp-format: "%:y-%02m-%02d"
+ time-stamp-start: "^:Updated:[ ]+"
+ time-stamp-end: "$"
+ time-stamp-line-limit: 20
+ End:
+ vim: fileencoding=utf-8 filetype=rst :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/PKG-INFO b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/PKG-INFO
new file mode 100755
index 00000000..fd81f509
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/PKG-INFO
@@ -0,0 +1,38 @@
+Metadata-Version: 1.1
+Name: python-daemon
+Version: 2.0.5
+Summary: Library to implement a well-behaved Unix daemon process.
+Home-page: https://alioth.debian.org/projects/python-daemon/
+Author: Ben Finney
+Author-email: ben+python@benfinney.id.au
+License: Apache-2
+Description: This library implements the well-behaved daemon specification of
+ :pep:`3143`, “Standard daemon process library”.
+
+ A well-behaved Unix daemon process is tricky to get right, but the
+ required steps are much the same for every daemon program. A
+ `DaemonContext` instance holds the behaviour and configured
+ process environment for the program; use the instance as a context
+ manager to enter a daemon state.
+
+ Simple example of usage::
+
+ import daemon
+
+ from spam import do_main_program
+
+ with daemon.DaemonContext():
+ do_main_program()
+
+ Customisation of the steps to become a daemon is available by
+ setting options on the `DaemonContext` instance; see the
+ documentation for that class for each option.
+Keywords: daemon,fork,unix
+Platform: UNKNOWN
+Classifier: Development Status :: 4 - Beta
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: POSIX
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Intended Audience :: Developers
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/SOURCES.txt b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/SOURCES.txt
new file mode 100755
index 00000000..6e176719
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/SOURCES.txt
@@ -0,0 +1,30 @@
+ChangeLog
+LICENSE.ASF-2
+LICENSE.GPL-3
+MANIFEST.in
+setup.cfg
+setup.py
+test_version.py
+version.py
+daemon/__init__.py
+daemon/_metadata.py
+daemon/daemon.py
+daemon/pidfile.py
+daemon/runner.py
+doc/CREDITS
+doc/FAQ
+doc/TODO
+doc/hacking.txt
+python_daemon.egg-info/PKG-INFO
+python_daemon.egg-info/SOURCES.txt
+python_daemon.egg-info/dependency_links.txt
+python_daemon.egg-info/not-zip-safe
+python_daemon.egg-info/requires.txt
+python_daemon.egg-info/top_level.txt
+python_daemon.egg-info/version_info.json
+test/__init__.py
+test/scaffold.py
+test/test_daemon.py
+test/test_metadata.py
+test/test_pidfile.py
+test/test_runner.py \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/dependency_links.txt b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/dependency_links.txt
new file mode 100755
index 00000000..8b137891
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/not-zip-safe b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/not-zip-safe
new file mode 100755
index 00000000..8b137891
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/not-zip-safe
@@ -0,0 +1 @@
+
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/requires.txt b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/requires.txt
new file mode 100755
index 00000000..d1496b02
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/requires.txt
@@ -0,0 +1,3 @@
+setuptools
+docutils
+lockfile >=0.10
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/top_level.txt b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/top_level.txt
new file mode 100755
index 00000000..28e3ee0c
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/top_level.txt
@@ -0,0 +1 @@
+daemon
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/version_info.json b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/version_info.json
new file mode 100755
index 00000000..bac1b84f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/version_info.json
@@ -0,0 +1,6 @@
+{
+ "release_date": "2015-02-02",
+ "version": "2.0.5",
+ "maintainer": "Ben Finney <ben+python@benfinney.id.au>",
+ "body": "* Refine compatibility of exceptions for file operations.\n* Specify the text encoding when opening the changelog file.\n"
+} \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/setup.cfg b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/setup.cfg
new file mode 100755
index 00000000..9d3d2c02
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/setup.cfg
@@ -0,0 +1,11 @@
+[aliases]
+distribute = register sdist bdist_wheel upload
+
+[bdist_wheel]
+universal = true
+
+[egg_info]
+tag_svn_revision = 0
+tag_date = 0
+tag_build =
+
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/setup.py b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/setup.py
new file mode 100755
index 00000000..16a6a6a6
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/setup.py
@@ -0,0 +1,106 @@
+# -*- coding: utf-8 -*-
+
+# setup.py
+# Part of ‘python-daemon’, an implementation of PEP 3143.
+#
+# Copyright © 2008–2015 Ben Finney <ben+python@benfinney.id.au>
+# Copyright © 2008 Robert Niederreiter, Jens Klein
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; version 3 of that license or any later version.
+# No warranty expressed or implied. See the file ‘LICENSE.GPL-3’ for details.
+
+""" Distribution setup for ‘python-daemon’ library. """
+
+from __future__ import (absolute_import, unicode_literals)
+
+import sys
+import os
+import os.path
+import pydoc
+import distutils.util
+
+from setuptools import (setup, find_packages)
+
+import version
+
+
+fromlist_expects_type = str
+if sys.version_info < (3, 0):
+ fromlist_expects_type = bytes
+
+
+main_module_name = 'daemon'
+main_module_fromlist = list(map(fromlist_expects_type, [
+ '_metadata']))
+main_module = __import__(
+ main_module_name,
+ level=0, fromlist=main_module_fromlist)
+metadata = main_module._metadata
+
+(synopsis, long_description) = pydoc.splitdoc(pydoc.getdoc(main_module))
+
+version_info = metadata.get_distribution_version_info()
+version_string = version_info['version']
+
+(maintainer_name, maintainer_email) = metadata.parse_person_field(
+ version_info['maintainer'])
+
+
+setup(
+ name=metadata.distribution_name,
+ version=version_string,
+ packages=find_packages(exclude=["test"]),
+ cmdclass={
+ "write_version_info": version.WriteVersionInfoCommand,
+ "egg_info": version.EggInfoCommand,
+ },
+
+ # Setuptools metadata.
+ maintainer=maintainer_name,
+ maintainer_email=maintainer_email,
+ zip_safe=False,
+ setup_requires=[
+ "docutils",
+ ],
+ test_suite="unittest2.collector",
+ tests_require=[
+ "unittest2 >=0.6",
+ "testtools",
+ "testscenarios >=0.4",
+ "mock >=1.0",
+ "docutils",
+ ],
+ install_requires=[
+ "setuptools",
+ "docutils",
+ "lockfile >=0.10",
+ ],
+
+ # PyPI metadata.
+ author=metadata.author_name,
+ author_email=metadata.author_email,
+ description=synopsis,
+ license=metadata.license,
+ keywords="daemon fork unix".split(),
+ url=metadata.url,
+ long_description=long_description,
+ classifiers=[
+ # Reference: http://pypi.python.org/pypi?%3Aaction=list_classifiers
+ "Development Status :: 4 - Beta",
+ "License :: OSI Approved :: Apache Software License",
+ "Operating System :: POSIX",
+ "Programming Language :: Python :: 2.7",
+ "Programming Language :: Python :: 3",
+ "Intended Audience :: Developers",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ ],
+ )
+
+
+# Local variables:
+# coding: utf-8
+# mode: python
+# End:
+# vim: fileencoding=utf-8 filetype=python :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/__init__.py b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/__init__.py
new file mode 100755
index 00000000..398519f1
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/__init__.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+#
+# test/__init__.py
+# Part of ‘python-daemon’, an implementation of PEP 3143.
+#
+# Copyright © 2008–2015 Ben Finney <ben+python@benfinney.id.au>
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Apache License, version 2.0 as published by the
+# Apache Software Foundation.
+# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+""" Unit test suite for ‘daemon’ package.
+ """
+
+from __future__ import (absolute_import, unicode_literals)
+
+
+# Local variables:
+# coding: utf-8
+# mode: python
+# End:
+# vim: fileencoding=utf-8 filetype=python :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/scaffold.py b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/scaffold.py
new file mode 100755
index 00000000..9a4f1150
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/scaffold.py
@@ -0,0 +1,322 @@
+# -*- coding: utf-8 -*-
+
+# test/scaffold.py
+# Part of ‘python-daemon’, an implementation of PEP 3143.
+#
+# Copyright © 2007–2015 Ben Finney <ben+python@benfinney.id.au>
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Apache License, version 2.0 as published by the
+# Apache Software Foundation.
+# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+""" Scaffolding for unit test modules.
+ """
+
+from __future__ import (absolute_import, unicode_literals)
+
+import unittest
+import doctest
+import logging
+import os
+import sys
+import operator
+import textwrap
+from copy import deepcopy
+import functools
+
+try:
+ # Python 2 has both ‘str’ (bytes) and ‘unicode’ (text).
+ basestring = basestring
+ unicode = unicode
+except NameError:
+ # Python 3 names the Unicode data type ‘str’.
+ basestring = str
+ unicode = str
+
+import testscenarios
+import testtools.testcase
+
+
+test_dir = os.path.dirname(os.path.abspath(__file__))
+parent_dir = os.path.dirname(test_dir)
+if not test_dir in sys.path:
+ sys.path.insert(1, test_dir)
+if not parent_dir in sys.path:
+ sys.path.insert(1, parent_dir)
+
+# Disable all but the most critical logging messages.
+logging.disable(logging.CRITICAL)
+
+
+def get_function_signature(func):
+ """ Get the function signature as a mapping of attributes.
+
+ :param func: The function object to interrogate.
+ :return: A mapping of the components of a function signature.
+
+ The signature is constructed as a mapping:
+
+ * 'name': The function's defined name.
+ * 'arg_count': The number of arguments expected by the function.
+ * 'arg_names': A sequence of the argument names, as strings.
+ * 'arg_defaults': A sequence of the default values for the arguments.
+ * 'va_args': The name bound to remaining positional arguments.
+ * 'va_kw_args': The name bound to remaining keyword arguments.
+
+ """
+ try:
+ # Python 3 function attributes.
+ func_code = func.__code__
+ func_defaults = func.__defaults__
+ except AttributeError:
+ # Python 2 function attributes.
+ func_code = func.func_code
+ func_defaults = func.func_defaults
+
+ arg_count = func_code.co_argcount
+ arg_names = func_code.co_varnames[:arg_count]
+
+ arg_defaults = {}
+ if func_defaults is not None:
+ arg_defaults = dict(
+ (name, value)
+ for (name, value) in
+ zip(arg_names[::-1], func_defaults[::-1]))
+
+ signature = {
+ 'name': func.__name__,
+ 'arg_count': arg_count,
+ 'arg_names': arg_names,
+ 'arg_defaults': arg_defaults,
+ }
+
+ non_pos_names = list(func_code.co_varnames[arg_count:])
+ COLLECTS_ARBITRARY_POSITIONAL_ARGS = 0x04
+ if func_code.co_flags & COLLECTS_ARBITRARY_POSITIONAL_ARGS:
+ signature['var_args'] = non_pos_names.pop(0)
+ COLLECTS_ARBITRARY_KEYWORD_ARGS = 0x08
+ if func_code.co_flags & COLLECTS_ARBITRARY_KEYWORD_ARGS:
+ signature['var_kw_args'] = non_pos_names.pop(0)
+
+ return signature
+
+
+def format_function_signature(func):
+ """ Format the function signature as printable text.
+
+ :param func: The function object to interrogate.
+ :return: A formatted text representation of the function signature.
+
+ The signature is rendered a text; for example::
+
+ foo(spam, eggs, ham=True, beans=None, *args, **kwargs)
+
+ """
+ signature = get_function_signature(func)
+
+ args_text = []
+ for arg_name in signature['arg_names']:
+ if arg_name in signature['arg_defaults']:
+ arg_text = "{name}={value!r}".format(
+ name=arg_name, value=signature['arg_defaults'][arg_name])
+ else:
+ arg_text = "{name}".format(
+ name=arg_name)
+ args_text.append(arg_text)
+ if 'var_args' in signature:
+ args_text.append("*{var_args}".format(signature))
+ if 'var_kw_args' in signature:
+ args_text.append("**{var_kw_args}".format(signature))
+ signature_args_text = ", ".join(args_text)
+
+ func_name = signature['name']
+ signature_text = "{name}({args})".format(
+ name=func_name, args=signature_args_text)
+
+ return signature_text
+
+
+class TestCase(testtools.testcase.TestCase):
+ """ Test case behaviour. """
+
+ def failUnlessOutputCheckerMatch(self, want, got, msg=None):
+ """ Fail unless the specified string matches the expected.
+
+ :param want: The desired output pattern.
+ :param got: The actual text to match.
+ :param msg: A message to prefix on the failure message.
+ :return: ``None``.
+ :raises self.failureException: If the text does not match.
+
+ Fail the test unless ``want`` matches ``got``, as determined by
+ a ``doctest.OutputChecker`` instance. This is not an equality
+ check, but a pattern match according to the ``OutputChecker``
+ rules.
+
+ """
+ checker = doctest.OutputChecker()
+ want = textwrap.dedent(want)
+ source = ""
+ example = doctest.Example(source, want)
+ got = textwrap.dedent(got)
+ checker_optionflags = functools.reduce(operator.or_, [
+ doctest.ELLIPSIS,
+ ])
+ if not checker.check_output(want, got, checker_optionflags):
+ if msg is None:
+ diff = checker.output_difference(
+ example, got, checker_optionflags)
+ msg = "\n".join([
+ "Output received did not match expected output",
+ "{diff}",
+ ]).format(
+ diff=diff)
+ raise self.failureException(msg)
+
+ assertOutputCheckerMatch = failUnlessOutputCheckerMatch
+
+ def failUnlessFunctionInTraceback(self, traceback, function, msg=None):
+ """ Fail if the function is not in the traceback.
+
+ :param traceback: The traceback object to interrogate.
+ :param function: The function object to match.
+ :param msg: A message to prefix on the failure message.
+ :return: ``None``.
+
+ :raises self.failureException: If the function is not in the
+ traceback.
+
+ Fail the test if the function ``function`` is not at any of the
+ levels in the traceback object ``traceback``.
+
+ """
+ func_in_traceback = False
+ expected_code = function.func_code
+ current_traceback = traceback
+ while current_traceback is not None:
+ if expected_code is current_traceback.tb_frame.f_code:
+ func_in_traceback = True
+ break
+ current_traceback = current_traceback.tb_next
+
+ if not func_in_traceback:
+ if msg is None:
+ msg = (
+ "Traceback did not lead to original function"
+ " {function}"
+ ).format(
+ function=function)
+ raise self.failureException(msg)
+
+ assertFunctionInTraceback = failUnlessFunctionInTraceback
+
+ def failUnlessFunctionSignatureMatch(self, first, second, msg=None):
+ """ Fail if the function signatures do not match.
+
+ :param first: The first function to compare.
+ :param second: The second function to compare.
+ :param msg: A message to prefix to the failure message.
+ :return: ``None``.
+
+ :raises self.failureException: If the function signatures do
+ not match.
+
+ Fail the test if the function signature does not match between
+ the ``first`` function and the ``second`` function.
+
+ The function signature includes:
+
+ * function name,
+
+ * count of named parameters,
+
+ * sequence of named parameters,
+
+ * default values of named parameters,
+
+ * collector for arbitrary positional arguments,
+
+ * collector for arbitrary keyword arguments.
+
+ """
+ first_signature = get_function_signature(first)
+ second_signature = get_function_signature(second)
+
+ if first_signature != second_signature:
+ if msg is None:
+ first_signature_text = format_function_signature(first)
+ second_signature_text = format_function_signature(second)
+ msg = (textwrap.dedent("""\
+ Function signatures do not match:
+ {first!r} != {second!r}
+ Expected:
+ {first_text}
+ Got:
+ {second_text}""")
+ ).format(
+ first=first_signature,
+ first_text=first_signature_text,
+ second=second_signature,
+ second_text=second_signature_text,
+ )
+ raise self.failureException(msg)
+
+ assertFunctionSignatureMatch = failUnlessFunctionSignatureMatch
+
+
+class TestCaseWithScenarios(testscenarios.WithScenarios, TestCase):
+ """ Test cases run per scenario. """
+
+
+class Exception_TestCase(TestCaseWithScenarios):
+ """ Test cases for exception classes. """
+
+ def test_exception_instance(self):
+ """ Exception instance should be created. """
+ self.assertIsNot(self.instance, None)
+
+ def test_exception_types(self):
+ """ Exception instance should match expected types. """
+ for match_type in self.types:
+ self.assertIsInstance(self.instance, match_type)
+
+
+def make_exception_scenarios(scenarios):
+ """ Make test scenarios for exception classes.
+
+ :param scenarios: Sequence of scenarios.
+ :return: List of scenarios with additional mapping entries.
+
+ Use this with `testscenarios` to adapt `Exception_TestCase`_ for
+ any exceptions that need testing.
+
+ Each scenario is a tuple (`name`, `map`) where `map` is a mapping
+ of attributes to be applied to each test case. Attributes map must
+ contain items for:
+
+ :key exc_type:
+ The exception type to be tested.
+ :key min_args:
+ The minimum argument count for the exception instance
+ initialiser.
+ :key types:
+ Sequence of types that should be superclasses of each
+ instance of the exception type.
+
+ """
+ updated_scenarios = deepcopy(scenarios)
+ for (name, scenario) in updated_scenarios:
+ args = (None,) * scenario['min_args']
+ scenario['args'] = args
+ instance = scenario['exc_type'](*args)
+ scenario['instance'] = instance
+
+ return updated_scenarios
+
+
+# Local variables:
+# coding: utf-8
+# mode: python
+# End:
+# vim: fileencoding=utf-8 filetype=python :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_daemon.py b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_daemon.py
new file mode 100755
index 00000000..a911858a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_daemon.py
@@ -0,0 +1,1744 @@
+# -*- coding: utf-8 -*-
+#
+# test/test_daemon.py
+# Part of ‘python-daemon’, an implementation of PEP 3143.
+#
+# Copyright © 2008–2015 Ben Finney <ben+python@benfinney.id.au>
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Apache License, version 2.0 as published by the
+# Apache Software Foundation.
+# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+""" Unit test for ‘daemon’ module.
+ """
+
+from __future__ import (absolute_import, unicode_literals)
+
+import os
+import sys
+import tempfile
+import resource
+import errno
+import signal
+import socket
+from types import ModuleType
+import collections
+import functools
+try:
+ # Standard library of Python 2.7 and later.
+ from io import StringIO
+except ImportError:
+ # Standard library of Python 2.6 and earlier.
+ from StringIO import StringIO
+
+import mock
+
+from . import scaffold
+from .scaffold import (basestring, unicode)
+from .test_pidfile import (
+ FakeFileDescriptorStringIO,
+ setup_pidfile_fixtures,
+ )
+
+import daemon
+
+
+class ModuleExceptions_TestCase(scaffold.Exception_TestCase):
+ """ Test cases for module exception classes. """
+
+ scenarios = scaffold.make_exception_scenarios([
+ ('daemon.daemon.DaemonError', dict(
+ exc_type = daemon.daemon.DaemonError,
+ min_args = 1,
+ types = [Exception],
+ )),
+ ('daemon.daemon.DaemonOSEnvironmentError', dict(
+ exc_type = daemon.daemon.DaemonOSEnvironmentError,
+ min_args = 1,
+ types = [daemon.daemon.DaemonError, OSError],
+ )),
+ ('daemon.daemon.DaemonProcessDetachError', dict(
+ exc_type = daemon.daemon.DaemonProcessDetachError,
+ min_args = 1,
+ types = [daemon.daemon.DaemonError, OSError],
+ )),
+ ])
+
+
+def setup_daemon_context_fixtures(testcase):
+ """ Set up common test fixtures for DaemonContext test case.
+
+ :param testcase: A ``TestCase`` instance to decorate.
+ :return: ``None``.
+
+ Decorate the `testcase` with fixtures for tests involving
+ `DaemonContext`.
+
+ """
+ setup_streams_fixtures(testcase)
+
+ setup_pidfile_fixtures(testcase)
+
+ testcase.fake_pidfile_path = tempfile.mktemp()
+ testcase.mock_pidlockfile = mock.MagicMock()
+ testcase.mock_pidlockfile.path = testcase.fake_pidfile_path
+
+ testcase.daemon_context_args = dict(
+ stdin=testcase.stream_files_by_name['stdin'],
+ stdout=testcase.stream_files_by_name['stdout'],
+ stderr=testcase.stream_files_by_name['stderr'],
+ )
+ testcase.test_instance = daemon.DaemonContext(
+ **testcase.daemon_context_args)
+
+fake_default_signal_map = object()
+
+@mock.patch.object(
+ daemon.daemon, "is_detach_process_context_required",
+ new=(lambda: True))
+@mock.patch.object(
+ daemon.daemon, "make_default_signal_map",
+ new=(lambda: fake_default_signal_map))
+@mock.patch.object(os, "setgid", new=(lambda x: object()))
+@mock.patch.object(os, "setuid", new=(lambda x: object()))
+class DaemonContext_BaseTestCase(scaffold.TestCase):
+ """ Base class for DaemonContext test case classes. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(DaemonContext_BaseTestCase, self).setUp()
+
+ setup_daemon_context_fixtures(self)
+
+
+class DaemonContext_TestCase(DaemonContext_BaseTestCase):
+ """ Test cases for DaemonContext class. """
+
+ def test_instantiate(self):
+ """ New instance of DaemonContext should be created. """
+ self.assertIsInstance(
+ self.test_instance, daemon.daemon.DaemonContext)
+
+ def test_minimum_zero_arguments(self):
+ """ Initialiser should not require any arguments. """
+ instance = daemon.daemon.DaemonContext()
+ self.assertIsNot(instance, None)
+
+ def test_has_specified_chroot_directory(self):
+ """ Should have specified chroot_directory option. """
+ args = dict(
+ chroot_directory=object(),
+ )
+ expected_directory = args['chroot_directory']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_directory, instance.chroot_directory)
+
+ def test_has_specified_working_directory(self):
+ """ Should have specified working_directory option. """
+ args = dict(
+ working_directory=object(),
+ )
+ expected_directory = args['working_directory']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_directory, instance.working_directory)
+
+ def test_has_default_working_directory(self):
+ """ Should have default working_directory option. """
+ args = dict()
+ expected_directory = "/"
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_directory, instance.working_directory)
+
+ def test_has_specified_creation_mask(self):
+ """ Should have specified umask option. """
+ args = dict(
+ umask=object(),
+ )
+ expected_mask = args['umask']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_mask, instance.umask)
+
+ def test_has_default_creation_mask(self):
+ """ Should have default umask option. """
+ args = dict()
+ expected_mask = 0
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_mask, instance.umask)
+
+ def test_has_specified_uid(self):
+ """ Should have specified uid option. """
+ args = dict(
+ uid=object(),
+ )
+ expected_id = args['uid']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_id, instance.uid)
+
+ def test_has_derived_uid(self):
+ """ Should have uid option derived from process. """
+ args = dict()
+ expected_id = os.getuid()
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_id, instance.uid)
+
+ def test_has_specified_gid(self):
+ """ Should have specified gid option. """
+ args = dict(
+ gid=object(),
+ )
+ expected_id = args['gid']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_id, instance.gid)
+
+ def test_has_derived_gid(self):
+ """ Should have gid option derived from process. """
+ args = dict()
+ expected_id = os.getgid()
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_id, instance.gid)
+
+ def test_has_specified_detach_process(self):
+ """ Should have specified detach_process option. """
+ args = dict(
+ detach_process=object(),
+ )
+ expected_value = args['detach_process']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_value, instance.detach_process)
+
+ def test_has_derived_detach_process(self):
+ """ Should have detach_process option derived from environment. """
+ args = dict()
+ func = daemon.daemon.is_detach_process_context_required
+ expected_value = func()
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_value, instance.detach_process)
+
+ def test_has_specified_files_preserve(self):
+ """ Should have specified files_preserve option. """
+ args = dict(
+ files_preserve=object(),
+ )
+ expected_files_preserve = args['files_preserve']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_files_preserve, instance.files_preserve)
+
+ def test_has_specified_pidfile(self):
+ """ Should have the specified pidfile. """
+ args = dict(
+ pidfile=object(),
+ )
+ expected_pidfile = args['pidfile']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_pidfile, instance.pidfile)
+
+ def test_has_specified_stdin(self):
+ """ Should have specified stdin option. """
+ args = dict(
+ stdin=object(),
+ )
+ expected_file = args['stdin']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_file, instance.stdin)
+
+ def test_has_specified_stdout(self):
+ """ Should have specified stdout option. """
+ args = dict(
+ stdout=object(),
+ )
+ expected_file = args['stdout']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_file, instance.stdout)
+
+ def test_has_specified_stderr(self):
+ """ Should have specified stderr option. """
+ args = dict(
+ stderr=object(),
+ )
+ expected_file = args['stderr']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_file, instance.stderr)
+
+ def test_has_specified_signal_map(self):
+ """ Should have specified signal_map option. """
+ args = dict(
+ signal_map=object(),
+ )
+ expected_signal_map = args['signal_map']
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_signal_map, instance.signal_map)
+
+ def test_has_derived_signal_map(self):
+ """ Should have signal_map option derived from system. """
+ args = dict()
+ expected_signal_map = daemon.daemon.make_default_signal_map()
+ instance = daemon.daemon.DaemonContext(**args)
+ self.assertEqual(expected_signal_map, instance.signal_map)
+
+
+class DaemonContext_is_open_TestCase(DaemonContext_BaseTestCase):
+ """ Test cases for DaemonContext.is_open property. """
+
+ def test_begin_false(self):
+ """ Initial value of is_open should be False. """
+ instance = self.test_instance
+ self.assertEqual(False, instance.is_open)
+
+ def test_write_fails(self):
+ """ Writing to is_open should fail. """
+ instance = self.test_instance
+ self.assertRaises(
+ AttributeError,
+ setattr, instance, 'is_open', object())
+
+
+class DaemonContext_open_TestCase(DaemonContext_BaseTestCase):
+ """ Test cases for DaemonContext.open method. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(DaemonContext_open_TestCase, self).setUp()
+
+ self.test_instance._is_open = False
+
+ self.mock_module_daemon = mock.MagicMock()
+ daemon_func_patchers = dict(
+ (func_name, mock.patch.object(
+ daemon.daemon, func_name))
+ for func_name in [
+ "detach_process_context",
+ "change_working_directory",
+ "change_root_directory",
+ "change_file_creation_mask",
+ "change_process_owner",
+ "prevent_core_dump",
+ "close_all_open_files",
+ "redirect_stream",
+ "set_signal_handlers",
+ "register_atexit_function",
+ ])
+ for (func_name, patcher) in daemon_func_patchers.items():
+ mock_func = patcher.start()
+ self.addCleanup(patcher.stop)
+ self.mock_module_daemon.attach_mock(mock_func, func_name)
+
+ self.mock_module_daemon.attach_mock(mock.Mock(), 'DaemonContext')
+
+ self.test_files_preserve_fds = object()
+ self.test_signal_handler_map = object()
+ daemoncontext_method_return_values = {
+ '_get_exclude_file_descriptors':
+ self.test_files_preserve_fds,
+ '_make_signal_handler_map':
+ self.test_signal_handler_map,
+ }
+ daemoncontext_func_patchers = dict(
+ (func_name, mock.patch.object(
+ daemon.daemon.DaemonContext,
+ func_name,
+ return_value=return_value))
+ for (func_name, return_value) in
+ daemoncontext_method_return_values.items())
+ for (func_name, patcher) in daemoncontext_func_patchers.items():
+ mock_func = patcher.start()
+ self.addCleanup(patcher.stop)
+ self.mock_module_daemon.DaemonContext.attach_mock(
+ mock_func, func_name)
+
+ def test_performs_steps_in_expected_sequence(self):
+ """ Should perform daemonisation steps in expected sequence. """
+ instance = self.test_instance
+ instance.chroot_directory = object()
+ instance.detach_process = True
+ instance.pidfile = self.mock_pidlockfile
+ self.mock_module_daemon.attach_mock(
+ self.mock_pidlockfile, 'pidlockfile')
+ expected_calls = [
+ mock.call.change_root_directory(mock.ANY),
+ mock.call.prevent_core_dump(),
+ mock.call.change_file_creation_mask(mock.ANY),
+ mock.call.change_working_directory(mock.ANY),
+ mock.call.change_process_owner(mock.ANY, mock.ANY),
+ mock.call.detach_process_context(),
+ mock.call.DaemonContext._make_signal_handler_map(),
+ mock.call.set_signal_handlers(mock.ANY),
+ mock.call.DaemonContext._get_exclude_file_descriptors(),
+ mock.call.close_all_open_files(exclude=mock.ANY),
+ mock.call.redirect_stream(mock.ANY, mock.ANY),
+ mock.call.redirect_stream(mock.ANY, mock.ANY),
+ mock.call.redirect_stream(mock.ANY, mock.ANY),
+ mock.call.pidlockfile.__enter__(),
+ mock.call.register_atexit_function(mock.ANY),
+ ]
+ instance.open()
+ self.mock_module_daemon.assert_has_calls(expected_calls)
+
+ def test_returns_immediately_if_is_open(self):
+ """ Should return immediately if is_open property is true. """
+ instance = self.test_instance
+ instance._is_open = True
+ instance.open()
+ self.assertEqual(0, len(self.mock_module_daemon.mock_calls))
+
+ def test_changes_root_directory_to_chroot_directory(self):
+ """ Should change root directory to `chroot_directory` option. """
+ instance = self.test_instance
+ chroot_directory = object()
+ instance.chroot_directory = chroot_directory
+ instance.open()
+ self.mock_module_daemon.change_root_directory.assert_called_with(
+ chroot_directory)
+
+ def test_omits_chroot_if_no_chroot_directory(self):
+ """ Should omit changing root directory if no `chroot_directory`. """
+ instance = self.test_instance
+ instance.chroot_directory = None
+ instance.open()
+ self.assertFalse(self.mock_module_daemon.change_root_directory.called)
+
+ def test_prevents_core_dump(self):
+ """ Should request prevention of core dumps. """
+ instance = self.test_instance
+ instance.open()
+ self.mock_module_daemon.prevent_core_dump.assert_called_with()
+
+ def test_omits_prevent_core_dump_if_prevent_core_false(self):
+ """ Should omit preventing core dumps if `prevent_core` is false. """
+ instance = self.test_instance
+ instance.prevent_core = False
+ instance.open()
+ self.assertFalse(self.mock_module_daemon.prevent_core_dump.called)
+
+ def test_closes_open_files(self):
+ """ Should close all open files, excluding `files_preserve`. """
+ instance = self.test_instance
+ expected_exclude = self.test_files_preserve_fds
+ instance.open()
+ self.mock_module_daemon.close_all_open_files.assert_called_with(
+ exclude=expected_exclude)
+
+ def test_changes_directory_to_working_directory(self):
+ """ Should change current directory to `working_directory` option. """
+ instance = self.test_instance
+ working_directory = object()
+ instance.working_directory = working_directory
+ instance.open()
+ self.mock_module_daemon.change_working_directory.assert_called_with(
+ working_directory)
+
+ def test_changes_creation_mask_to_umask(self):
+ """ Should change file creation mask to `umask` option. """
+ instance = self.test_instance
+ umask = object()
+ instance.umask = umask
+ instance.open()
+ self.mock_module_daemon.change_file_creation_mask.assert_called_with(
+ umask)
+
+ def test_changes_owner_to_specified_uid_and_gid(self):
+ """ Should change process UID and GID to `uid` and `gid` options. """
+ instance = self.test_instance
+ uid = object()
+ gid = object()
+ instance.uid = uid
+ instance.gid = gid
+ instance.open()
+ self.mock_module_daemon.change_process_owner.assert_called_with(
+ uid, gid)
+
+ def test_detaches_process_context(self):
+ """ Should request detach of process context. """
+ instance = self.test_instance
+ instance.open()
+ self.mock_module_daemon.detach_process_context.assert_called_with()
+
+ def test_omits_process_detach_if_not_required(self):
+ """ Should omit detach of process context if not required. """
+ instance = self.test_instance
+ instance.detach_process = False
+ instance.open()
+ self.assertFalse(self.mock_module_daemon.detach_process_context.called)
+
+ def test_sets_signal_handlers_from_signal_map(self):
+ """ Should set signal handlers according to `signal_map`. """
+ instance = self.test_instance
+ instance.signal_map = object()
+ expected_signal_handler_map = self.test_signal_handler_map
+ instance.open()
+ self.mock_module_daemon.set_signal_handlers.assert_called_with(
+ expected_signal_handler_map)
+
+ def test_redirects_standard_streams(self):
+ """ Should request redirection of standard stream files. """
+ instance = self.test_instance
+ (system_stdin, system_stdout, system_stderr) = (
+ sys.stdin, sys.stdout, sys.stderr)
+ (target_stdin, target_stdout, target_stderr) = (
+ self.stream_files_by_name[name]
+ for name in ['stdin', 'stdout', 'stderr'])
+ expected_calls = [
+ mock.call(system_stdin, target_stdin),
+ mock.call(system_stdout, target_stdout),
+ mock.call(system_stderr, target_stderr),
+ ]
+ instance.open()
+ self.mock_module_daemon.redirect_stream.assert_has_calls(
+ expected_calls, any_order=True)
+
+ def test_enters_pidfile_context(self):
+ """ Should enter the PID file context manager. """
+ instance = self.test_instance
+ instance.pidfile = self.mock_pidlockfile
+ instance.open()
+ self.mock_pidlockfile.__enter__.assert_called_with()
+
+ def test_sets_is_open_true(self):
+ """ Should set the `is_open` property to True. """
+ instance = self.test_instance
+ instance.open()
+ self.assertEqual(True, instance.is_open)
+
+ def test_registers_close_method_for_atexit(self):
+ """ Should register the `close` method for atexit processing. """
+ instance = self.test_instance
+ close_method = instance.close
+ instance.open()
+ self.mock_module_daemon.register_atexit_function.assert_called_with(
+ close_method)
+
+
+class DaemonContext_close_TestCase(DaemonContext_BaseTestCase):
+ """ Test cases for DaemonContext.close method. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(DaemonContext_close_TestCase, self).setUp()
+
+ self.test_instance._is_open = True
+
+ def test_returns_immediately_if_not_is_open(self):
+ """ Should return immediately if is_open property is false. """
+ instance = self.test_instance
+ instance._is_open = False
+ instance.pidfile = object()
+ instance.close()
+ self.assertFalse(self.mock_pidlockfile.__exit__.called)
+
+ def test_exits_pidfile_context(self):
+ """ Should exit the PID file context manager. """
+ instance = self.test_instance
+ instance.pidfile = self.mock_pidlockfile
+ instance.close()
+ self.mock_pidlockfile.__exit__.assert_called_with(None, None, None)
+
+ def test_returns_none(self):
+ """ Should return None. """
+ instance = self.test_instance
+ expected_result = None
+ result = instance.close()
+ self.assertIs(result, expected_result)
+
+ def test_sets_is_open_false(self):
+ """ Should set the `is_open` property to False. """
+ instance = self.test_instance
+ instance.close()
+ self.assertEqual(False, instance.is_open)
+
+
+@mock.patch.object(daemon.daemon.DaemonContext, "open")
+class DaemonContext_context_manager_enter_TestCase(DaemonContext_BaseTestCase):
+ """ Test cases for DaemonContext.__enter__ method. """
+
+ def test_opens_daemon_context(self, mock_func_daemoncontext_open):
+ """ Should open the DaemonContext. """
+ instance = self.test_instance
+ instance.__enter__()
+ mock_func_daemoncontext_open.assert_called_with()
+
+ def test_returns_self_instance(self, mock_func_daemoncontext_open):
+ """ Should return DaemonContext instance. """
+ instance = self.test_instance
+ expected_result = instance
+ result = instance.__enter__()
+ self.assertIs(result, expected_result)
+
+
+@mock.patch.object(daemon.daemon.DaemonContext, "close")
+class DaemonContext_context_manager_exit_TestCase(DaemonContext_BaseTestCase):
+ """ Test cases for DaemonContext.__exit__ method. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(DaemonContext_context_manager_exit_TestCase, self).setUp()
+
+ self.test_args = dict(
+ exc_type=object(),
+ exc_value=object(),
+ traceback=object(),
+ )
+
+ def test_closes_daemon_context(self, mock_func_daemoncontext_close):
+ """ Should close the DaemonContext. """
+ instance = self.test_instance
+ args = self.test_args
+ instance.__exit__(**args)
+ mock_func_daemoncontext_close.assert_called_with()
+
+ def test_returns_none(self, mock_func_daemoncontext_close):
+ """ Should return None, indicating exception was not handled. """
+ instance = self.test_instance
+ args = self.test_args
+ expected_result = None
+ result = instance.__exit__(**args)
+ self.assertIs(result, expected_result)
+
+
+class DaemonContext_terminate_TestCase(DaemonContext_BaseTestCase):
+ """ Test cases for DaemonContext.terminate method. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(DaemonContext_terminate_TestCase, self).setUp()
+
+ self.test_signal = signal.SIGTERM
+ self.test_frame = None
+ self.test_args = (self.test_signal, self.test_frame)
+
+ def test_raises_system_exit(self):
+ """ Should raise SystemExit. """
+ instance = self.test_instance
+ args = self.test_args
+ expected_exception = SystemExit
+ self.assertRaises(
+ expected_exception,
+ instance.terminate, *args)
+
+ def test_exception_message_contains_signal_number(self):
+ """ Should raise exception with a message containing signal number. """
+ instance = self.test_instance
+ args = self.test_args
+ signal_number = self.test_signal
+ expected_exception = SystemExit
+ exc = self.assertRaises(
+ expected_exception,
+ instance.terminate, *args)
+ self.assertIn(unicode(signal_number), unicode(exc))
+
+
+class DaemonContext_get_exclude_file_descriptors_TestCase(
+ DaemonContext_BaseTestCase):
+ """ Test cases for DaemonContext._get_exclude_file_descriptors function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(
+ DaemonContext_get_exclude_file_descriptors_TestCase,
+ self).setUp()
+
+ self.test_files = {
+ 2: FakeFileDescriptorStringIO(),
+ 5: 5,
+ 11: FakeFileDescriptorStringIO(),
+ 17: None,
+ 23: FakeFileDescriptorStringIO(),
+ 37: 37,
+ 42: FakeFileDescriptorStringIO(),
+ }
+ for (fileno, item) in self.test_files.items():
+ if hasattr(item, '_fileno'):
+ item._fileno = fileno
+ self.test_file_descriptors = set(
+ fd for (fd, item) in self.test_files.items()
+ if item is not None)
+ self.test_file_descriptors.update(
+ self.stream_files_by_name[name].fileno()
+ for name in ['stdin', 'stdout', 'stderr']
+ )
+
+ def test_returns_expected_file_descriptors(self):
+ """ Should return expected set of file descriptors. """
+ instance = self.test_instance
+ instance.files_preserve = list(self.test_files.values())
+ expected_result = self.test_file_descriptors
+ result = instance._get_exclude_file_descriptors()
+ self.assertEqual(expected_result, result)
+
+ def test_returns_stream_redirects_if_no_files_preserve(self):
+ """ Should return only stream redirects if no files_preserve. """
+ instance = self.test_instance
+ instance.files_preserve = None
+ expected_result = set(
+ stream.fileno()
+ for stream in self.stream_files_by_name.values())
+ result = instance._get_exclude_file_descriptors()
+ self.assertEqual(expected_result, result)
+
+ def test_returns_empty_set_if_no_files(self):
+ """ Should return empty set if no file options. """
+ instance = self.test_instance
+ for name in ['files_preserve', 'stdin', 'stdout', 'stderr']:
+ setattr(instance, name, None)
+ expected_result = set()
+ result = instance._get_exclude_file_descriptors()
+ self.assertEqual(expected_result, result)
+
+ def test_omits_non_file_streams(self):
+ """ Should omit non-file stream attributes. """
+ instance = self.test_instance
+ instance.files_preserve = list(self.test_files.values())
+ stream_files = self.stream_files_by_name
+ expected_result = self.test_file_descriptors.copy()
+ for (pseudo_stream_name, pseudo_stream) in stream_files.items():
+ test_non_file_object = object()
+ setattr(instance, pseudo_stream_name, test_non_file_object)
+ stream_fd = pseudo_stream.fileno()
+ expected_result.discard(stream_fd)
+ result = instance._get_exclude_file_descriptors()
+ self.assertEqual(expected_result, result)
+
+ def test_includes_verbatim_streams_without_file_descriptor(self):
+ """ Should include verbatim any stream without a file descriptor. """
+ instance = self.test_instance
+ instance.files_preserve = list(self.test_files.values())
+ stream_files = self.stream_files_by_name
+ mock_fileno_method = mock.MagicMock(
+ spec=sys.__stdin__.fileno,
+ side_effect=ValueError)
+ expected_result = self.test_file_descriptors.copy()
+ for (pseudo_stream_name, pseudo_stream) in stream_files.items():
+ test_non_fd_stream = StringIO()
+ if not hasattr(test_non_fd_stream, 'fileno'):
+ # Python < 3 StringIO doesn't have ‘fileno’ at all.
+ # Add a method which raises an exception.
+ test_non_fd_stream.fileno = mock_fileno_method
+ setattr(instance, pseudo_stream_name, test_non_fd_stream)
+ stream_fd = pseudo_stream.fileno()
+ expected_result.discard(stream_fd)
+ expected_result.add(test_non_fd_stream)
+ result = instance._get_exclude_file_descriptors()
+ self.assertEqual(expected_result, result)
+
+ def test_omits_none_streams(self):
+ """ Should omit any stream attribute which is None. """
+ instance = self.test_instance
+ instance.files_preserve = list(self.test_files.values())
+ stream_files = self.stream_files_by_name
+ expected_result = self.test_file_descriptors.copy()
+ for (pseudo_stream_name, pseudo_stream) in stream_files.items():
+ setattr(instance, pseudo_stream_name, None)
+ stream_fd = pseudo_stream.fileno()
+ expected_result.discard(stream_fd)
+ result = instance._get_exclude_file_descriptors()
+ self.assertEqual(expected_result, result)
+
+
+class DaemonContext_make_signal_handler_TestCase(DaemonContext_BaseTestCase):
+ """ Test cases for DaemonContext._make_signal_handler function. """
+
+ def test_returns_ignore_for_none(self):
+ """ Should return SIG_IGN when None handler specified. """
+ instance = self.test_instance
+ target = None
+ expected_result = signal.SIG_IGN
+ result = instance._make_signal_handler(target)
+ self.assertEqual(expected_result, result)
+
+ def test_returns_method_for_name(self):
+ """ Should return method of DaemonContext when name specified. """
+ instance = self.test_instance
+ target = 'terminate'
+ expected_result = instance.terminate
+ result = instance._make_signal_handler(target)
+ self.assertEqual(expected_result, result)
+
+ def test_raises_error_for_unknown_name(self):
+ """ Should raise AttributeError for unknown method name. """
+ instance = self.test_instance
+ target = 'b0gUs'
+ expected_error = AttributeError
+ self.assertRaises(
+ expected_error,
+ instance._make_signal_handler, target)
+
+ def test_returns_object_for_object(self):
+ """ Should return same object for any other object. """
+ instance = self.test_instance
+ target = object()
+ expected_result = target
+ result = instance._make_signal_handler(target)
+ self.assertEqual(expected_result, result)
+
+
+class DaemonContext_make_signal_handler_map_TestCase(
+ DaemonContext_BaseTestCase):
+ """ Test cases for DaemonContext._make_signal_handler_map function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(DaemonContext_make_signal_handler_map_TestCase, self).setUp()
+
+ self.test_instance.signal_map = {
+ object(): object(),
+ object(): object(),
+ object(): object(),
+ }
+
+ self.test_signal_handlers = dict(
+ (key, object())
+ for key in self.test_instance.signal_map.values())
+ self.test_signal_handler_map = dict(
+ (key, self.test_signal_handlers[target])
+ for (key, target) in self.test_instance.signal_map.items())
+
+ def fake_make_signal_handler(target):
+ return self.test_signal_handlers[target]
+
+ func_patcher_make_signal_handler = mock.patch.object(
+ daemon.daemon.DaemonContext, "_make_signal_handler",
+ side_effect=fake_make_signal_handler)
+ self.mock_func_make_signal_handler = (
+ func_patcher_make_signal_handler.start())
+ self.addCleanup(func_patcher_make_signal_handler.stop)
+
+ def test_returns_constructed_signal_handler_items(self):
+ """ Should return items as constructed via make_signal_handler. """
+ instance = self.test_instance
+ expected_result = self.test_signal_handler_map
+ result = instance._make_signal_handler_map()
+ self.assertEqual(expected_result, result)
+
+
+try:
+ FileNotFoundError
+except NameError:
+ # Python 2 uses IOError.
+ FileNotFoundError = functools.partial(IOError, errno.ENOENT)
+
+
+@mock.patch.object(os, "chdir")
+class change_working_directory_TestCase(scaffold.TestCase):
+ """ Test cases for change_working_directory function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(change_working_directory_TestCase, self).setUp()
+
+ self.test_directory = object()
+ self.test_args = dict(
+ directory=self.test_directory,
+ )
+
+ def test_changes_working_directory_to_specified_directory(
+ self,
+ mock_func_os_chdir):
+ """ Should change working directory to specified directory. """
+ args = self.test_args
+ directory = self.test_directory
+ daemon.daemon.change_working_directory(**args)
+ mock_func_os_chdir.assert_called_with(directory)
+
+ def test_raises_daemon_error_on_os_error(
+ self,
+ mock_func_os_chdir):
+ """ Should raise a DaemonError on receiving an IOError. """
+ args = self.test_args
+ test_error = FileNotFoundError("No such directory")
+ mock_func_os_chdir.side_effect = test_error
+ expected_error = daemon.daemon.DaemonOSEnvironmentError
+ exc = self.assertRaises(
+ expected_error,
+ daemon.daemon.change_working_directory, **args)
+ self.assertEqual(test_error, exc.__cause__)
+
+ def test_error_message_contains_original_error_message(
+ self,
+ mock_func_os_chdir):
+ """ Should raise a DaemonError with original message. """
+ args = self.test_args
+ test_error = FileNotFoundError("No such directory")
+ mock_func_os_chdir.side_effect = test_error
+ expected_error = daemon.daemon.DaemonOSEnvironmentError
+ exc = self.assertRaises(
+ expected_error,
+ daemon.daemon.change_working_directory, **args)
+ self.assertIn(unicode(test_error), unicode(exc))
+
+
+@mock.patch.object(os, "chroot")
+@mock.patch.object(os, "chdir")
+class change_root_directory_TestCase(scaffold.TestCase):
+ """ Test cases for change_root_directory function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(change_root_directory_TestCase, self).setUp()
+
+ self.test_directory = object()
+ self.test_args = dict(
+ directory=self.test_directory,
+ )
+
+ def test_changes_working_directory_to_specified_directory(
+ self,
+ mock_func_os_chdir, mock_func_os_chroot):
+ """ Should change working directory to specified directory. """
+ args = self.test_args
+ directory = self.test_directory
+ daemon.daemon.change_root_directory(**args)
+ mock_func_os_chdir.assert_called_with(directory)
+
+ def test_changes_root_directory_to_specified_directory(
+ self,
+ mock_func_os_chdir, mock_func_os_chroot):
+ """ Should change root directory to specified directory. """
+ args = self.test_args
+ directory = self.test_directory
+ daemon.daemon.change_root_directory(**args)
+ mock_func_os_chroot.assert_called_with(directory)
+
+ def test_raises_daemon_error_on_os_error_from_chdir(
+ self,
+ mock_func_os_chdir, mock_func_os_chroot):
+ """ Should raise a DaemonError on receiving an IOError from chdir. """
+ args = self.test_args
+ test_error = FileNotFoundError("No such directory")
+ mock_func_os_chdir.side_effect = test_error
+ expected_error = daemon.daemon.DaemonOSEnvironmentError
+ exc = self.assertRaises(
+ expected_error,
+ daemon.daemon.change_root_directory, **args)
+ self.assertEqual(test_error, exc.__cause__)
+
+ def test_raises_daemon_error_on_os_error_from_chroot(
+ self,
+ mock_func_os_chdir, mock_func_os_chroot):
+ """ Should raise a DaemonError on receiving an OSError from chroot. """
+ args = self.test_args
+ test_error = OSError(errno.EPERM, "No chroot for you!")
+ mock_func_os_chroot.side_effect = test_error
+ expected_error = daemon.daemon.DaemonOSEnvironmentError
+ exc = self.assertRaises(
+ expected_error,
+ daemon.daemon.change_root_directory, **args)
+ self.assertEqual(test_error, exc.__cause__)
+
+ def test_error_message_contains_original_error_message(
+ self,
+ mock_func_os_chdir, mock_func_os_chroot):
+ """ Should raise a DaemonError with original message. """
+ args = self.test_args
+ test_error = FileNotFoundError("No such directory")
+ mock_func_os_chdir.side_effect = test_error
+ expected_error = daemon.daemon.DaemonOSEnvironmentError
+ exc = self.assertRaises(
+ expected_error,
+ daemon.daemon.change_root_directory, **args)
+ self.assertIn(unicode(test_error), unicode(exc))
+
+
+@mock.patch.object(os, "umask")
+class change_file_creation_mask_TestCase(scaffold.TestCase):
+ """ Test cases for change_file_creation_mask function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(change_file_creation_mask_TestCase, self).setUp()
+
+ self.test_mask = object()
+ self.test_args = dict(
+ mask=self.test_mask,
+ )
+
+ def test_changes_umask_to_specified_mask(self, mock_func_os_umask):
+ """ Should change working directory to specified directory. """
+ args = self.test_args
+ mask = self.test_mask
+ daemon.daemon.change_file_creation_mask(**args)
+ mock_func_os_umask.assert_called_with(mask)
+
+ def test_raises_daemon_error_on_os_error_from_chdir(
+ self,
+ mock_func_os_umask):
+ """ Should raise a DaemonError on receiving an OSError from umask. """
+ args = self.test_args
+ test_error = OSError(errno.EINVAL, "Whatchoo talkin' 'bout?")
+ mock_func_os_umask.side_effect = test_error
+ expected_error = daemon.daemon.DaemonOSEnvironmentError
+ exc = self.assertRaises(
+ expected_error,
+ daemon.daemon.change_file_creation_mask, **args)
+ self.assertEqual(test_error, exc.__cause__)
+
+ def test_error_message_contains_original_error_message(
+ self,
+ mock_func_os_umask):
+ """ Should raise a DaemonError with original message. """
+ args = self.test_args
+ test_error = FileNotFoundError("No such directory")
+ mock_func_os_umask.side_effect = test_error
+ expected_error = daemon.daemon.DaemonOSEnvironmentError
+ exc = self.assertRaises(
+ expected_error,
+ daemon.daemon.change_file_creation_mask, **args)
+ self.assertIn(unicode(test_error), unicode(exc))
+
+
+@mock.patch.object(os, "setgid")
+@mock.patch.object(os, "setuid")
+class change_process_owner_TestCase(scaffold.TestCase):
+ """ Test cases for change_process_owner function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(change_process_owner_TestCase, self).setUp()
+
+ self.test_uid = object()
+ self.test_gid = object()
+ self.test_args = dict(
+ uid=self.test_uid,
+ gid=self.test_gid,
+ )
+
+ def test_changes_gid_and_uid_in_order(
+ self,
+ mock_func_os_setuid, mock_func_os_setgid):
+ """ Should change process GID and UID in correct order.
+
+ Since the process requires appropriate privilege to use
+ either of `setuid` or `setgid`, changing the UID must be
+ done last.
+
+ """
+ args = self.test_args
+ daemon.daemon.change_process_owner(**args)
+ mock_func_os_setuid.assert_called()
+ mock_func_os_setgid.assert_called()
+
+ def test_changes_group_id_to_gid(
+ self,
+ mock_func_os_setuid, mock_func_os_setgid):
+ """ Should change process GID to specified value. """
+ args = self.test_args
+ gid = self.test_gid
+ daemon.daemon.change_process_owner(**args)
+ mock_func_os_setgid.assert_called(gid)
+
+ def test_changes_user_id_to_uid(
+ self,
+ mock_func_os_setuid, mock_func_os_setgid):
+ """ Should change process UID to specified value. """
+ args = self.test_args
+ uid = self.test_uid
+ daemon.daemon.change_process_owner(**args)
+ mock_func_os_setuid.assert_called(uid)
+
+ def test_raises_daemon_error_on_os_error_from_setgid(
+ self,
+ mock_func_os_setuid, mock_func_os_setgid):
+ """ Should raise a DaemonError on receiving an OSError from setgid. """
+ args = self.test_args
+ test_error = OSError(errno.EPERM, "No switching for you!")
+ mock_func_os_setgid.side_effect = test_error
+ expected_error = daemon.daemon.DaemonOSEnvironmentError
+ exc = self.assertRaises(
+ expected_error,
+ daemon.daemon.change_process_owner, **args)
+ self.assertEqual(test_error, exc.__cause__)
+
+ def test_raises_daemon_error_on_os_error_from_setuid(
+ self,
+ mock_func_os_setuid, mock_func_os_setgid):
+ """ Should raise a DaemonError on receiving an OSError from setuid. """
+ args = self.test_args
+ test_error = OSError(errno.EPERM, "No switching for you!")
+ mock_func_os_setuid.side_effect = test_error
+ expected_error = daemon.daemon.DaemonOSEnvironmentError
+ exc = self.assertRaises(
+ expected_error,
+ daemon.daemon.change_process_owner, **args)
+ self.assertEqual(test_error, exc.__cause__)
+
+ def test_error_message_contains_original_error_message(
+ self,
+ mock_func_os_setuid, mock_func_os_setgid):
+ """ Should raise a DaemonError with original message. """
+ args = self.test_args
+ test_error = OSError(errno.EINVAL, "Whatchoo talkin' 'bout?")
+ mock_func_os_setuid.side_effect = test_error
+ expected_error = daemon.daemon.DaemonOSEnvironmentError
+ exc = self.assertRaises(
+ expected_error,
+ daemon.daemon.change_process_owner, **args)
+ self.assertIn(unicode(test_error), unicode(exc))
+
+
+RLimitResult = collections.namedtuple('RLimitResult', ['soft', 'hard'])
+
+fake_RLIMIT_CORE = object()
+
+@mock.patch.object(resource, "RLIMIT_CORE", new=fake_RLIMIT_CORE)
+@mock.patch.object(resource, "setrlimit", side_effect=(lambda x, y: None))
+@mock.patch.object(resource, "getrlimit", side_effect=(lambda x: None))
+class prevent_core_dump_TestCase(scaffold.TestCase):
+ """ Test cases for prevent_core_dump function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(prevent_core_dump_TestCase, self).setUp()
+
+ def test_sets_core_limit_to_zero(
+ self,
+ mock_func_resource_getrlimit, mock_func_resource_setrlimit):
+ """ Should set the RLIMIT_CORE resource to zero. """
+ expected_resource = fake_RLIMIT_CORE
+ expected_limit = tuple(RLimitResult(soft=0, hard=0))
+ daemon.daemon.prevent_core_dump()
+ mock_func_resource_getrlimit.assert_called_with(expected_resource)
+ mock_func_resource_setrlimit.assert_called_with(
+ expected_resource, expected_limit)
+
+ def test_raises_error_when_no_core_resource(
+ self,
+ mock_func_resource_getrlimit, mock_func_resource_setrlimit):
+ """ Should raise DaemonError if no RLIMIT_CORE resource. """
+ test_error = ValueError("Bogus platform doesn't have RLIMIT_CORE")
+ def fake_getrlimit(res):
+ if res == resource.RLIMIT_CORE:
+ raise test_error
+ else:
+ return None
+ mock_func_resource_getrlimit.side_effect = fake_getrlimit
+ expected_error = daemon.daemon.DaemonOSEnvironmentError
+ exc = self.assertRaises(
+ expected_error,
+ daemon.daemon.prevent_core_dump)
+ self.assertEqual(test_error, exc.__cause__)
+
+
+@mock.patch.object(os, "close")
+class close_file_descriptor_if_open_TestCase(scaffold.TestCase):
+ """ Test cases for close_file_descriptor_if_open function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(close_file_descriptor_if_open_TestCase, self).setUp()
+
+ self.fake_fd = 274
+
+ def test_requests_file_descriptor_close(self, mock_func_os_close):
+ """ Should request close of file descriptor. """
+ fd = self.fake_fd
+ daemon.daemon.close_file_descriptor_if_open(fd)
+ mock_func_os_close.assert_called_with(fd)
+
+ def test_ignores_badfd_error_on_close(self, mock_func_os_close):
+ """ Should ignore OSError EBADF when closing. """
+ fd = self.fake_fd
+ test_error = OSError(errno.EBADF, "Bad file descriptor")
+ def fake_os_close(fd):
+ raise test_error
+ mock_func_os_close.side_effect = fake_os_close
+ daemon.daemon.close_file_descriptor_if_open(fd)
+ mock_func_os_close.assert_called_with(fd)
+
+ def test_raises_error_if_oserror_on_close(self, mock_func_os_close):
+ """ Should raise DaemonError if an OSError occurs when closing. """
+ fd = self.fake_fd
+ test_error = OSError(object(), "Unexpected error")
+ def fake_os_close(fd):
+ raise test_error
+ mock_func_os_close.side_effect = fake_os_close
+ expected_error = daemon.daemon.DaemonOSEnvironmentError
+ exc = self.assertRaises(
+ expected_error,
+ daemon.daemon.close_file_descriptor_if_open, fd)
+ self.assertEqual(test_error, exc.__cause__)
+
+ def test_raises_error_if_ioerror_on_close(self, mock_func_os_close):
+ """ Should raise DaemonError if an IOError occurs when closing. """
+ fd = self.fake_fd
+ test_error = IOError(object(), "Unexpected error")
+ def fake_os_close(fd):
+ raise test_error
+ mock_func_os_close.side_effect = fake_os_close
+ expected_error = daemon.daemon.DaemonOSEnvironmentError
+ exc = self.assertRaises(
+ expected_error,
+ daemon.daemon.close_file_descriptor_if_open, fd)
+ self.assertEqual(test_error, exc.__cause__)
+
+
+class maxfd_TestCase(scaffold.TestCase):
+ """ Test cases for module MAXFD constant. """
+
+ def test_positive(self):
+ """ Should be a positive number. """
+ maxfd = daemon.daemon.MAXFD
+ self.assertTrue(maxfd > 0)
+
+ def test_integer(self):
+ """ Should be an integer. """
+ maxfd = daemon.daemon.MAXFD
+ self.assertEqual(int(maxfd), maxfd)
+
+ def test_reasonably_high(self):
+ """ Should be reasonably high for default open files limit.
+
+ If the system reports a limit of “infinity” on maximum
+ file descriptors, we still need a finite number in order
+ to close “all” of them. Ensure this is reasonably high
+ to catch most use cases.
+
+ """
+ expected_minimum = 2048
+ maxfd = daemon.daemon.MAXFD
+ self.assertTrue(
+ expected_minimum <= maxfd,
+ msg=(
+ "MAXFD should be at least {minimum!r}"
+ " (got {maxfd!r})".format(
+ minimum=expected_minimum, maxfd=maxfd)))
+
+
+fake_default_maxfd = 8
+fake_RLIMIT_NOFILE = object()
+fake_RLIM_INFINITY = object()
+fake_rlimit_nofile_large = 2468
+
+def fake_getrlimit_nofile_soft_infinity(resource):
+ result = RLimitResult(soft=fake_RLIM_INFINITY, hard=object())
+ if resource != fake_RLIMIT_NOFILE:
+ result = NotImplemented
+ return result
+
+def fake_getrlimit_nofile_hard_infinity(resource):
+ result = RLimitResult(soft=object(), hard=fake_RLIM_INFINITY)
+ if resource != fake_RLIMIT_NOFILE:
+ result = NotImplemented
+ return result
+
+def fake_getrlimit_nofile_hard_large(resource):
+ result = RLimitResult(soft=object(), hard=fake_rlimit_nofile_large)
+ if resource != fake_RLIMIT_NOFILE:
+ result = NotImplemented
+ return result
+
+@mock.patch.object(daemon.daemon, "MAXFD", new=fake_default_maxfd)
+@mock.patch.object(resource, "RLIMIT_NOFILE", new=fake_RLIMIT_NOFILE)
+@mock.patch.object(resource, "RLIM_INFINITY", new=fake_RLIM_INFINITY)
+@mock.patch.object(
+ resource, "getrlimit",
+ side_effect=fake_getrlimit_nofile_hard_large)
+class get_maximum_file_descriptors_TestCase(scaffold.TestCase):
+ """ Test cases for get_maximum_file_descriptors function. """
+
+ def test_returns_system_hard_limit(self, mock_func_resource_getrlimit):
+ """ Should return process hard limit on number of files. """
+ expected_result = fake_rlimit_nofile_large
+ result = daemon.daemon.get_maximum_file_descriptors()
+ self.assertEqual(expected_result, result)
+
+ def test_returns_module_default_if_hard_limit_infinity(
+ self, mock_func_resource_getrlimit):
+ """ Should return module MAXFD if hard limit is infinity. """
+ mock_func_resource_getrlimit.side_effect = (
+ fake_getrlimit_nofile_hard_infinity)
+ expected_result = fake_default_maxfd
+ result = daemon.daemon.get_maximum_file_descriptors()
+ self.assertEqual(expected_result, result)
+
+
+def fake_get_maximum_file_descriptors():
+ return fake_default_maxfd
+
+@mock.patch.object(resource, "RLIMIT_NOFILE", new=fake_RLIMIT_NOFILE)
+@mock.patch.object(resource, "RLIM_INFINITY", new=fake_RLIM_INFINITY)
+@mock.patch.object(
+ resource, "getrlimit",
+ new=fake_getrlimit_nofile_soft_infinity)
+@mock.patch.object(
+ daemon.daemon, "get_maximum_file_descriptors",
+ new=fake_get_maximum_file_descriptors)
+@mock.patch.object(daemon.daemon, "close_file_descriptor_if_open")
+class close_all_open_files_TestCase(scaffold.TestCase):
+ """ Test cases for close_all_open_files function. """
+
+ def test_requests_all_open_files_to_close(
+ self, mock_func_close_file_descriptor_if_open):
+ """ Should request close of all open files. """
+ expected_file_descriptors = range(fake_default_maxfd)
+ expected_calls = [
+ mock.call(fd) for fd in expected_file_descriptors]
+ daemon.daemon.close_all_open_files()
+ mock_func_close_file_descriptor_if_open.assert_has_calls(
+ expected_calls, any_order=True)
+
+ def test_requests_all_but_excluded_files_to_close(
+ self, mock_func_close_file_descriptor_if_open):
+ """ Should request close of all open files but those excluded. """
+ test_exclude = set([3, 7])
+ args = dict(
+ exclude=test_exclude,
+ )
+ expected_file_descriptors = set(
+ fd for fd in range(fake_default_maxfd)
+ if fd not in test_exclude)
+ expected_calls = [
+ mock.call(fd) for fd in expected_file_descriptors]
+ daemon.daemon.close_all_open_files(**args)
+ mock_func_close_file_descriptor_if_open.assert_has_calls(
+ expected_calls, any_order=True)
+
+
+class detach_process_context_TestCase(scaffold.TestCase):
+ """ Test cases for detach_process_context function. """
+
+ class FakeOSExit(SystemExit):
+ """ Fake exception raised for os._exit(). """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(detach_process_context_TestCase, self).setUp()
+
+ self.mock_module_os = mock.MagicMock(wraps=os)
+
+ fake_pids = [0, 0]
+ func_patcher_os_fork = mock.patch.object(
+ os, "fork",
+ side_effect=iter(fake_pids))
+ self.mock_func_os_fork = func_patcher_os_fork.start()
+ self.addCleanup(func_patcher_os_fork.stop)
+ self.mock_module_os.attach_mock(self.mock_func_os_fork, "fork")
+
+ func_patcher_os_setsid = mock.patch.object(os, "setsid")
+ self.mock_func_os_setsid = func_patcher_os_setsid.start()
+ self.addCleanup(func_patcher_os_setsid.stop)
+ self.mock_module_os.attach_mock(self.mock_func_os_setsid, "setsid")
+
+ def raise_os_exit(status=None):
+ raise self.FakeOSExit(status)
+
+ func_patcher_os_force_exit = mock.patch.object(
+ os, "_exit",
+ side_effect=raise_os_exit)
+ self.mock_func_os_force_exit = func_patcher_os_force_exit.start()
+ self.addCleanup(func_patcher_os_force_exit.stop)
+ self.mock_module_os.attach_mock(self.mock_func_os_force_exit, "_exit")
+
+ def test_parent_exits(self):
+ """ Parent process should exit. """
+ parent_pid = 23
+ self.mock_func_os_fork.side_effect = iter([parent_pid])
+ self.assertRaises(
+ self.FakeOSExit,
+ daemon.daemon.detach_process_context)
+ self.mock_module_os.assert_has_calls([
+ mock.call.fork(),
+ mock.call._exit(0),
+ ])
+
+ def test_first_fork_error_raises_error(self):
+ """ Error on first fork should raise DaemonProcessDetachError. """
+ fork_errno = 13
+ fork_strerror = "Bad stuff happened"
+ test_error = OSError(fork_errno, fork_strerror)
+ test_pids_iter = iter([test_error])
+
+ def fake_fork():
+ next_item = next(test_pids_iter)
+ if isinstance(next_item, Exception):
+ raise next_item
+ else:
+ return next_item
+
+ self.mock_func_os_fork.side_effect = fake_fork
+ exc = self.assertRaises(
+ daemon.daemon.DaemonProcessDetachError,
+ daemon.daemon.detach_process_context)
+ self.assertEqual(test_error, exc.__cause__)
+ self.mock_module_os.assert_has_calls([
+ mock.call.fork(),
+ ])
+
+ def test_child_starts_new_process_group(self):
+ """ Child should start new process group. """
+ daemon.daemon.detach_process_context()
+ self.mock_module_os.assert_has_calls([
+ mock.call.fork(),
+ mock.call.setsid(),
+ ])
+
+ def test_child_forks_next_parent_exits(self):
+ """ Child should fork, then exit if parent. """
+ fake_pids = [0, 42]
+ self.mock_func_os_fork.side_effect = iter(fake_pids)
+ self.assertRaises(
+ self.FakeOSExit,
+ daemon.daemon.detach_process_context)
+ self.mock_module_os.assert_has_calls([
+ mock.call.fork(),
+ mock.call.setsid(),
+ mock.call.fork(),
+ mock.call._exit(0),
+ ])
+
+ def test_second_fork_error_reports_to_stderr(self):
+ """ Error on second fork should cause report to stderr. """
+ fork_errno = 17
+ fork_strerror = "Nasty stuff happened"
+ test_error = OSError(fork_errno, fork_strerror)
+ test_pids_iter = iter([0, test_error])
+
+ def fake_fork():
+ next_item = next(test_pids_iter)
+ if isinstance(next_item, Exception):
+ raise next_item
+ else:
+ return next_item
+
+ self.mock_func_os_fork.side_effect = fake_fork
+ exc = self.assertRaises(
+ daemon.daemon.DaemonProcessDetachError,
+ daemon.daemon.detach_process_context)
+ self.assertEqual(test_error, exc.__cause__)
+ self.mock_module_os.assert_has_calls([
+ mock.call.fork(),
+ mock.call.setsid(),
+ mock.call.fork(),
+ ])
+
+ def test_child_forks_next_child_continues(self):
+ """ Child should fork, then continue if child. """
+ daemon.daemon.detach_process_context()
+ self.mock_module_os.assert_has_calls([
+ mock.call.fork(),
+ mock.call.setsid(),
+ mock.call.fork(),
+ ])
+
+
+@mock.patch("os.getppid", return_value=765)
+class is_process_started_by_init_TestCase(scaffold.TestCase):
+ """ Test cases for is_process_started_by_init function. """
+
+ def test_returns_false_by_default(self, mock_func_os_getppid):
+ """ Should return False under normal circumstances. """
+ expected_result = False
+ result = daemon.daemon.is_process_started_by_init()
+ self.assertIs(result, expected_result)
+
+ def test_returns_true_if_parent_process_is_init(
+ self, mock_func_os_getppid):
+ """ Should return True if parent process is `init`. """
+ init_pid = 1
+ mock_func_os_getppid.return_value = init_pid
+ expected_result = True
+ result = daemon.daemon.is_process_started_by_init()
+ self.assertIs(result, expected_result)
+
+
+class is_socket_TestCase(scaffold.TestCase):
+ """ Test cases for is_socket function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(is_socket_TestCase, self).setUp()
+
+ def fake_getsockopt(level, optname, buflen=None):
+ result = object()
+ if optname is socket.SO_TYPE:
+ result = socket.SOCK_RAW
+ return result
+
+ self.fake_socket_getsockopt_func = fake_getsockopt
+
+ self.fake_socket_error = socket.error(
+ errno.ENOTSOCK,
+ "Socket operation on non-socket")
+
+ self.mock_socket = mock.MagicMock(spec=socket.socket)
+ self.mock_socket.getsockopt.side_effect = self.fake_socket_error
+
+ def fake_socket_fromfd(fd, family, type, proto=None):
+ return self.mock_socket
+
+ func_patcher_socket_fromfd = mock.patch.object(
+ socket, "fromfd",
+ side_effect=fake_socket_fromfd)
+ func_patcher_socket_fromfd.start()
+ self.addCleanup(func_patcher_socket_fromfd.stop)
+
+ def test_returns_false_by_default(self):
+ """ Should return False under normal circumstances. """
+ test_fd = 23
+ expected_result = False
+ result = daemon.daemon.is_socket(test_fd)
+ self.assertIs(result, expected_result)
+
+ def test_returns_true_if_stdin_is_socket(self):
+ """ Should return True if `stdin` is a socket. """
+ test_fd = 23
+ getsockopt = self.mock_socket.getsockopt
+ getsockopt.side_effect = self.fake_socket_getsockopt_func
+ expected_result = True
+ result = daemon.daemon.is_socket(test_fd)
+ self.assertIs(result, expected_result)
+
+ def test_returns_false_if_stdin_socket_raises_error(self):
+ """ Should return True if `stdin` is a socket and raises error. """
+ test_fd = 23
+ getsockopt = self.mock_socket.getsockopt
+ getsockopt.side_effect = socket.error(
+ object(), "Weird socket stuff")
+ expected_result = True
+ result = daemon.daemon.is_socket(test_fd)
+ self.assertIs(result, expected_result)
+
+
+class is_process_started_by_superserver_TestCase(scaffold.TestCase):
+ """ Test cases for is_process_started_by_superserver function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(is_process_started_by_superserver_TestCase, self).setUp()
+
+ def fake_is_socket(fd):
+ if sys.__stdin__.fileno() == fd:
+ result = self.fake_stdin_is_socket_func()
+ else:
+ result = False
+ return result
+
+ self.fake_stdin_is_socket_func = (lambda: False)
+
+ func_patcher_is_socket = mock.patch.object(
+ daemon.daemon, "is_socket",
+ side_effect=fake_is_socket)
+ func_patcher_is_socket.start()
+ self.addCleanup(func_patcher_is_socket.stop)
+
+ def test_returns_false_by_default(self):
+ """ Should return False under normal circumstances. """
+ expected_result = False
+ result = daemon.daemon.is_process_started_by_superserver()
+ self.assertIs(result, expected_result)
+
+ def test_returns_true_if_stdin_is_socket(self):
+ """ Should return True if `stdin` is a socket. """
+ self.fake_stdin_is_socket_func = (lambda: True)
+ expected_result = True
+ result = daemon.daemon.is_process_started_by_superserver()
+ self.assertIs(result, expected_result)
+
+
+@mock.patch.object(
+ daemon.daemon, "is_process_started_by_superserver",
+ return_value=False)
+@mock.patch.object(
+ daemon.daemon, "is_process_started_by_init",
+ return_value=False)
+class is_detach_process_context_required_TestCase(scaffold.TestCase):
+ """ Test cases for is_detach_process_context_required function. """
+
+ def test_returns_true_by_default(
+ self,
+ mock_func_is_process_started_by_init,
+ mock_func_is_process_started_by_superserver):
+ """ Should return True under normal circumstances. """
+ expected_result = True
+ result = daemon.daemon.is_detach_process_context_required()
+ self.assertIs(result, expected_result)
+
+ def test_returns_false_if_started_by_init(
+ self,
+ mock_func_is_process_started_by_init,
+ mock_func_is_process_started_by_superserver):
+ """ Should return False if current process started by init. """
+ mock_func_is_process_started_by_init.return_value = True
+ expected_result = False
+ result = daemon.daemon.is_detach_process_context_required()
+ self.assertIs(result, expected_result)
+
+ def test_returns_true_if_started_by_superserver(
+ self,
+ mock_func_is_process_started_by_init,
+ mock_func_is_process_started_by_superserver):
+ """ Should return False if current process started by superserver. """
+ mock_func_is_process_started_by_superserver.return_value = True
+ expected_result = False
+ result = daemon.daemon.is_detach_process_context_required()
+ self.assertIs(result, expected_result)
+
+
+def setup_streams_fixtures(testcase):
+ """ Set up common test fixtures for standard streams. """
+ testcase.stream_file_paths = dict(
+ stdin=tempfile.mktemp(),
+ stdout=tempfile.mktemp(),
+ stderr=tempfile.mktemp(),
+ )
+
+ testcase.stream_files_by_name = dict(
+ (name, FakeFileDescriptorStringIO())
+ for name in ['stdin', 'stdout', 'stderr']
+ )
+
+ testcase.stream_files_by_path = dict(
+ (testcase.stream_file_paths[name],
+ testcase.stream_files_by_name[name])
+ for name in ['stdin', 'stdout', 'stderr']
+ )
+
+
+@mock.patch.object(os, "dup2")
+class redirect_stream_TestCase(scaffold.TestCase):
+ """ Test cases for redirect_stream function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(redirect_stream_TestCase, self).setUp()
+
+ self.test_system_stream = FakeFileDescriptorStringIO()
+ self.test_target_stream = FakeFileDescriptorStringIO()
+ self.test_null_file = FakeFileDescriptorStringIO()
+
+ def fake_os_open(path, flag, mode=None):
+ if path == os.devnull:
+ result = self.test_null_file.fileno()
+ else:
+ raise FileNotFoundError("No such file", path)
+ return result
+
+ func_patcher_os_open = mock.patch.object(
+ os, "open",
+ side_effect=fake_os_open)
+ self.mock_func_os_open = func_patcher_os_open.start()
+ self.addCleanup(func_patcher_os_open.stop)
+
+ def test_duplicates_target_file_descriptor(
+ self, mock_func_os_dup2):
+ """ Should duplicate file descriptor from target to system stream. """
+ system_stream = self.test_system_stream
+ system_fileno = system_stream.fileno()
+ target_stream = self.test_target_stream
+ target_fileno = target_stream.fileno()
+ daemon.daemon.redirect_stream(system_stream, target_stream)
+ mock_func_os_dup2.assert_called_with(target_fileno, system_fileno)
+
+ def test_duplicates_null_file_descriptor_by_default(
+ self, mock_func_os_dup2):
+ """ Should by default duplicate the null file to the system stream. """
+ system_stream = self.test_system_stream
+ system_fileno = system_stream.fileno()
+ target_stream = None
+ null_path = os.devnull
+ null_flag = os.O_RDWR
+ null_file = self.test_null_file
+ null_fileno = null_file.fileno()
+ daemon.daemon.redirect_stream(system_stream, target_stream)
+ self.mock_func_os_open.assert_called_with(null_path, null_flag)
+ mock_func_os_dup2.assert_called_with(null_fileno, system_fileno)
+
+
+class make_default_signal_map_TestCase(scaffold.TestCase):
+ """ Test cases for make_default_signal_map function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(make_default_signal_map_TestCase, self).setUp()
+
+ # Use whatever default string type this Python version needs.
+ signal_module_name = str('signal')
+ self.fake_signal_module = ModuleType(signal_module_name)
+
+ fake_signal_names = [
+ 'SIGHUP',
+ 'SIGCLD',
+ 'SIGSEGV',
+ 'SIGTSTP',
+ 'SIGTTIN',
+ 'SIGTTOU',
+ 'SIGTERM',
+ ]
+ for name in fake_signal_names:
+ setattr(self.fake_signal_module, name, object())
+
+ module_patcher_signal = mock.patch.object(
+ daemon.daemon, "signal", new=self.fake_signal_module)
+ module_patcher_signal.start()
+ self.addCleanup(module_patcher_signal.stop)
+
+ default_signal_map_by_name = {
+ 'SIGTSTP': None,
+ 'SIGTTIN': None,
+ 'SIGTTOU': None,
+ 'SIGTERM': 'terminate',
+ }
+ self.default_signal_map = dict(
+ (getattr(self.fake_signal_module, name), target)
+ for (name, target) in default_signal_map_by_name.items())
+
+ def test_returns_constructed_signal_map(self):
+ """ Should return map per default. """
+ expected_result = self.default_signal_map
+ result = daemon.daemon.make_default_signal_map()
+ self.assertEqual(expected_result, result)
+
+ def test_returns_signal_map_with_only_ids_in_signal_module(self):
+ """ Should return map with only signals in the `signal` module.
+
+ The `signal` module is documented to only define those
+ signals which exist on the running system. Therefore the
+ default map should not contain any signals which are not
+ defined in the `signal` module.
+
+ """
+ del(self.default_signal_map[self.fake_signal_module.SIGTTOU])
+ del(self.fake_signal_module.SIGTTOU)
+ expected_result = self.default_signal_map
+ result = daemon.daemon.make_default_signal_map()
+ self.assertEqual(expected_result, result)
+
+
+@mock.patch.object(daemon.daemon.signal, "signal")
+class set_signal_handlers_TestCase(scaffold.TestCase):
+ """ Test cases for set_signal_handlers function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(set_signal_handlers_TestCase, self).setUp()
+
+ self.signal_handler_map = {
+ signal.SIGQUIT: object(),
+ signal.SIGSEGV: object(),
+ signal.SIGINT: object(),
+ }
+
+ def test_sets_signal_handler_for_each_item(self, mock_func_signal_signal):
+ """ Should set signal handler for each item in map. """
+ signal_handler_map = self.signal_handler_map
+ expected_calls = [
+ mock.call(signal_number, handler)
+ for (signal_number, handler) in signal_handler_map.items()]
+ daemon.daemon.set_signal_handlers(signal_handler_map)
+ self.assertEquals(expected_calls, mock_func_signal_signal.mock_calls)
+
+
+@mock.patch.object(daemon.daemon.atexit, "register")
+class register_atexit_function_TestCase(scaffold.TestCase):
+ """ Test cases for register_atexit_function function. """
+
+ def test_registers_function_for_atexit_processing(
+ self, mock_func_atexit_register):
+ """ Should register specified function for atexit processing. """
+ func = object()
+ daemon.daemon.register_atexit_function(func)
+ mock_func_atexit_register.assert_called_with(func)
+
+
+# Local variables:
+# coding: utf-8
+# mode: python
+# End:
+# vim: fileencoding=utf-8 filetype=python :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_metadata.py b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_metadata.py
new file mode 100755
index 00000000..692753f4
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_metadata.py
@@ -0,0 +1,380 @@
+# -*- coding: utf-8 -*-
+#
+# test/test_metadata.py
+# Part of ‘python-daemon’, an implementation of PEP 3143.
+#
+# Copyright © 2008–2015 Ben Finney <ben+python@benfinney.id.au>
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Apache License, version 2.0 as published by the
+# Apache Software Foundation.
+# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+""" Unit test for ‘_metadata’ private module.
+ """
+
+from __future__ import (absolute_import, unicode_literals)
+
+import sys
+import errno
+import re
+try:
+ # Python 3 standard library.
+ import urllib.parse as urlparse
+except ImportError:
+ # Python 2 standard library.
+ import urlparse
+import functools
+import collections
+import json
+
+import pkg_resources
+import mock
+import testtools.helpers
+import testtools.matchers
+import testscenarios
+
+from . import scaffold
+from .scaffold import (basestring, unicode)
+
+import daemon._metadata as metadata
+
+
+class HasAttribute(testtools.matchers.Matcher):
+ """ A matcher to assert an object has a named attribute. """
+
+ def __init__(self, name):
+ self.attribute_name = name
+
+ def match(self, instance):
+ """ Assert the object `instance` has an attribute named `name`. """
+ result = None
+ if not testtools.helpers.safe_hasattr(instance, self.attribute_name):
+ result = AttributeNotFoundMismatch(instance, self.attribute_name)
+ return result
+
+
+class AttributeNotFoundMismatch(testtools.matchers.Mismatch):
+ """ The specified instance does not have the named attribute. """
+
+ def __init__(self, instance, name):
+ self.instance = instance
+ self.attribute_name = name
+
+ def describe(self):
+ """ Emit a text description of this mismatch. """
+ text = (
+ "{instance!r}"
+ " has no attribute named {name!r}").format(
+ instance=self.instance, name=self.attribute_name)
+ return text
+
+
+class metadata_value_TestCase(scaffold.TestCaseWithScenarios):
+ """ Test cases for metadata module values. """
+
+ expected_str_attributes = set([
+ 'version_installed',
+ 'author',
+ 'copyright',
+ 'license',
+ 'url',
+ ])
+
+ scenarios = [
+ (name, {'attribute_name': name})
+ for name in expected_str_attributes]
+ for (name, params) in scenarios:
+ if name == 'version_installed':
+ # No duck typing, this attribute might be None.
+ params['ducktype_attribute_name'] = NotImplemented
+ continue
+ # Expect an attribute of ‘str’ to test this value.
+ params['ducktype_attribute_name'] = 'isdigit'
+
+ def test_module_has_attribute(self):
+ """ Metadata should have expected value as a module attribute. """
+ self.assertThat(
+ metadata, HasAttribute(self.attribute_name))
+
+ def test_module_attribute_has_duck_type(self):
+ """ Metadata value should have expected duck-typing attribute. """
+ if self.ducktype_attribute_name == NotImplemented:
+ self.skipTest("Can't assert this attribute's type")
+ instance = getattr(metadata, self.attribute_name)
+ self.assertThat(
+ instance, HasAttribute(self.ducktype_attribute_name))
+
+
+class parse_person_field_TestCase(
+ testscenarios.WithScenarios, testtools.TestCase):
+ """ Test cases for ‘get_latest_version’ function. """
+
+ scenarios = [
+ ('simple', {
+ 'test_person': "Foo Bar <foo.bar@example.com>",
+ 'expected_result': ("Foo Bar", "foo.bar@example.com"),
+ }),
+ ('empty', {
+ 'test_person': "",
+ 'expected_result': (None, None),
+ }),
+ ('none', {
+ 'test_person': None,
+ 'expected_error': TypeError,
+ }),
+ ('no email', {
+ 'test_person': "Foo Bar",
+ 'expected_result': ("Foo Bar", None),
+ }),
+ ]
+
+ def test_returns_expected_result(self):
+ """ Should return expected result. """
+ if hasattr(self, 'expected_error'):
+ self.assertRaises(
+ self.expected_error,
+ metadata.parse_person_field, self.test_person)
+ else:
+ result = metadata.parse_person_field(self.test_person)
+ self.assertEqual(self.expected_result, result)
+
+
+class YearRange_TestCase(scaffold.TestCaseWithScenarios):
+ """ Test cases for ‘YearRange’ class. """
+
+ scenarios = [
+ ('simple', {
+ 'begin_year': 1970,
+ 'end_year': 1979,
+ 'expected_text': "1970–1979",
+ }),
+ ('same year', {
+ 'begin_year': 1970,
+ 'end_year': 1970,
+ 'expected_text': "1970",
+ }),
+ ('no end year', {
+ 'begin_year': 1970,
+ 'end_year': None,
+ 'expected_text': "1970",
+ }),
+ ]
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(YearRange_TestCase, self).setUp()
+
+ self.test_instance = metadata.YearRange(
+ self.begin_year, self.end_year)
+
+ def test_text_representation_as_expected(self):
+ """ Text representation should be as expected. """
+ result = unicode(self.test_instance)
+ self.assertEqual(result, self.expected_text)
+
+
+FakeYearRange = collections.namedtuple('FakeYearRange', ['begin', 'end'])
+
+@mock.patch.object(metadata, 'YearRange', new=FakeYearRange)
+class make_year_range_TestCase(scaffold.TestCaseWithScenarios):
+ """ Test cases for ‘make_year_range’ function. """
+
+ scenarios = [
+ ('simple', {
+ 'begin_year': "1970",
+ 'end_date': "1979-01-01",
+ 'expected_range': FakeYearRange(begin=1970, end=1979),
+ }),
+ ('same year', {
+ 'begin_year': "1970",
+ 'end_date': "1970-01-01",
+ 'expected_range': FakeYearRange(begin=1970, end=1970),
+ }),
+ ('no end year', {
+ 'begin_year': "1970",
+ 'end_date': None,
+ 'expected_range': FakeYearRange(begin=1970, end=None),
+ }),
+ ('end date UNKNOWN token', {
+ 'begin_year': "1970",
+ 'end_date': "UNKNOWN",
+ 'expected_range': FakeYearRange(begin=1970, end=None),
+ }),
+ ('end date FUTURE token', {
+ 'begin_year': "1970",
+ 'end_date': "FUTURE",
+ 'expected_range': FakeYearRange(begin=1970, end=None),
+ }),
+ ]
+
+ def test_result_matches_expected_range(self):
+ """ Result should match expected YearRange. """
+ result = metadata.make_year_range(self.begin_year, self.end_date)
+ self.assertEqual(result, self.expected_range)
+
+
+class metadata_content_TestCase(scaffold.TestCase):
+ """ Test cases for content of metadata. """
+
+ def test_copyright_formatted_correctly(self):
+ """ Copyright statement should be formatted correctly. """
+ regex_pattern = (
+ "Copyright © "
+ "\d{4}" # four-digit year
+ "(?:–\d{4})?" # optional range dash and ending four-digit year
+ )
+ regex_flags = re.UNICODE
+ self.assertThat(
+ metadata.copyright,
+ testtools.matchers.MatchesRegex(regex_pattern, regex_flags))
+
+ def test_author_formatted_correctly(self):
+ """ Author information should be formatted correctly. """
+ regex_pattern = (
+ ".+ " # name
+ "<[^>]+>" # email address, in angle brackets
+ )
+ regex_flags = re.UNICODE
+ self.assertThat(
+ metadata.author,
+ testtools.matchers.MatchesRegex(regex_pattern, regex_flags))
+
+ def test_copyright_contains_author(self):
+ """ Copyright information should contain author information. """
+ self.assertThat(
+ metadata.copyright,
+ testtools.matchers.Contains(metadata.author))
+
+ def test_url_parses_correctly(self):
+ """ Homepage URL should parse correctly. """
+ result = urlparse.urlparse(metadata.url)
+ self.assertIsInstance(
+ result, urlparse.ParseResult,
+ "URL value {url!r} did not parse correctly".format(
+ url=metadata.url))
+
+
+try:
+ FileNotFoundError
+except NameError:
+ # Python 2 uses IOError.
+ FileNotFoundError = functools.partial(IOError, errno.ENOENT)
+
+version_info_filename = "version_info.json"
+
+def fake_func_has_metadata(testcase, resource_name):
+ """ Fake the behaviour of ‘pkg_resources.Distribution.has_metadata’. """
+ if (
+ resource_name != testcase.expected_resource_name
+ or not hasattr(testcase, 'test_version_info')):
+ return False
+ return True
+
+
+def fake_func_get_metadata(testcase, resource_name):
+ """ Fake the behaviour of ‘pkg_resources.Distribution.get_metadata’. """
+ if not fake_func_has_metadata(testcase, resource_name):
+ error = FileNotFoundError(resource_name)
+ raise error
+ content = testcase.test_version_info
+ return content
+
+
+def fake_func_get_distribution(testcase, distribution_name):
+ """ Fake the behaviour of ‘pkg_resources.get_distribution’. """
+ if distribution_name != metadata.distribution_name:
+ raise pkg_resources.DistributionNotFound
+ if hasattr(testcase, 'get_distribution_error'):
+ raise testcase.get_distribution_error
+ mock_distribution = testcase.mock_distribution
+ mock_distribution.has_metadata.side_effect = functools.partial(
+ fake_func_has_metadata, testcase)
+ mock_distribution.get_metadata.side_effect = functools.partial(
+ fake_func_get_metadata, testcase)
+ return mock_distribution
+
+
+@mock.patch.object(metadata, 'distribution_name', new="mock-dist")
+class get_distribution_version_info_TestCase(scaffold.TestCaseWithScenarios):
+ """ Test cases for ‘get_distribution_version_info’ function. """
+
+ default_version_info = {
+ 'release_date': "UNKNOWN",
+ 'version': "UNKNOWN",
+ 'maintainer': "UNKNOWN",
+ }
+
+ scenarios = [
+ ('version 0.0', {
+ 'test_version_info': json.dumps({
+ 'version': "0.0",
+ }),
+ 'expected_version_info': {'version': "0.0"},
+ }),
+ ('version 1.0', {
+ 'test_version_info': json.dumps({
+ 'version': "1.0",
+ }),
+ 'expected_version_info': {'version': "1.0"},
+ }),
+ ('file lorem_ipsum.json', {
+ 'version_info_filename': "lorem_ipsum.json",
+ 'test_version_info': json.dumps({
+ 'version': "1.0",
+ }),
+ 'expected_version_info': {'version': "1.0"},
+ }),
+ ('not installed', {
+ 'get_distribution_error': pkg_resources.DistributionNotFound(),
+ 'expected_version_info': default_version_info,
+ }),
+ ('no version_info', {
+ 'expected_version_info': default_version_info,
+ }),
+ ]
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(get_distribution_version_info_TestCase, self).setUp()
+
+ if hasattr(self, 'expected_resource_name'):
+ self.test_args = {'filename': self.expected_resource_name}
+ else:
+ self.test_args = {}
+ self.expected_resource_name = version_info_filename
+
+ self.mock_distribution = mock.MagicMock()
+ func_patcher_get_distribution = mock.patch.object(
+ pkg_resources, 'get_distribution')
+ func_patcher_get_distribution.start()
+ self.addCleanup(func_patcher_get_distribution.stop)
+ pkg_resources.get_distribution.side_effect = functools.partial(
+ fake_func_get_distribution, self)
+
+ def test_requests_installed_distribution(self):
+ """ The package distribution should be retrieved. """
+ expected_distribution_name = metadata.distribution_name
+ version_info = metadata.get_distribution_version_info(**self.test_args)
+ pkg_resources.get_distribution.assert_called_with(
+ expected_distribution_name)
+
+ def test_requests_specified_filename(self):
+ """ The specified metadata resource name should be requested. """
+ if hasattr(self, 'get_distribution_error'):
+ self.skipTest("No access to distribution")
+ version_info = metadata.get_distribution_version_info(**self.test_args)
+ self.mock_distribution.has_metadata.assert_called_with(
+ self.expected_resource_name)
+
+ def test_result_matches_expected_items(self):
+ """ The result should match the expected items. """
+ version_info = metadata.get_distribution_version_info(**self.test_args)
+ self.assertEqual(self.expected_version_info, version_info)
+
+
+# Local variables:
+# coding: utf-8
+# mode: python
+# End:
+# vim: fileencoding=utf-8 filetype=python :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_pidfile.py b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_pidfile.py
new file mode 100755
index 00000000..9b636ec8
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_pidfile.py
@@ -0,0 +1,472 @@
+# -*- coding: utf-8 -*-
+#
+# test/test_pidfile.py
+# Part of ‘python-daemon’, an implementation of PEP 3143.
+#
+# Copyright © 2008–2015 Ben Finney <ben+python@benfinney.id.au>
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Apache License, version 2.0 as published by the
+# Apache Software Foundation.
+# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+""" Unit test for ‘pidfile’ module.
+ """
+
+from __future__ import (absolute_import, unicode_literals)
+
+try:
+ # Python 3 standard library.
+ import builtins
+except ImportError:
+ # Python 2 standard library.
+ import __builtin__ as builtins
+import os
+import itertools
+import tempfile
+import errno
+import functools
+try:
+ # Standard library of Python 2.7 and later.
+ from io import StringIO
+except ImportError:
+ # Standard library of Python 2.6 and earlier.
+ from StringIO import StringIO
+
+import mock
+import lockfile
+
+from . import scaffold
+
+import daemon.pidfile
+
+
+class FakeFileDescriptorStringIO(StringIO, object):
+ """ A StringIO class that fakes a file descriptor. """
+
+ _fileno_generator = itertools.count()
+
+ def __init__(self, *args, **kwargs):
+ self._fileno = next(self._fileno_generator)
+ super(FakeFileDescriptorStringIO, self).__init__(*args, **kwargs)
+
+ def fileno(self):
+ return self._fileno
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ pass
+
+
+try:
+ FileNotFoundError
+ PermissionError
+except NameError:
+ # Python 2 uses IOError.
+ FileNotFoundError = functools.partial(IOError, errno.ENOENT)
+ PermissionError = functools.partial(IOError, errno.EPERM)
+
+
+def make_pidlockfile_scenarios():
+ """ Make a collection of scenarios for testing `PIDLockFile` instances.
+
+ :return: A collection of scenarios for tests involving
+ `PIDLockfFile` instances.
+
+ The collection is a mapping from scenario name to a dictionary of
+ scenario attributes.
+
+ """
+
+ fake_current_pid = 235
+ fake_other_pid = 8642
+ fake_pidfile_path = tempfile.mktemp()
+
+ fake_pidfile_empty = FakeFileDescriptorStringIO()
+ fake_pidfile_current_pid = FakeFileDescriptorStringIO(
+ "{pid:d}\n".format(pid=fake_current_pid))
+ fake_pidfile_other_pid = FakeFileDescriptorStringIO(
+ "{pid:d}\n".format(pid=fake_other_pid))
+ fake_pidfile_bogus = FakeFileDescriptorStringIO(
+ "b0gUs")
+
+ scenarios = {
+ 'simple': {},
+ 'not-exist': {
+ 'open_func_name': 'fake_open_nonexist',
+ 'os_open_func_name': 'fake_os_open_nonexist',
+ },
+ 'not-exist-write-denied': {
+ 'open_func_name': 'fake_open_nonexist',
+ 'os_open_func_name': 'fake_os_open_nonexist',
+ },
+ 'not-exist-write-busy': {
+ 'open_func_name': 'fake_open_nonexist',
+ 'os_open_func_name': 'fake_os_open_nonexist',
+ },
+ 'exist-read-denied': {
+ 'open_func_name': 'fake_open_read_denied',
+ 'os_open_func_name': 'fake_os_open_read_denied',
+ },
+ 'exist-locked-read-denied': {
+ 'locking_pid': fake_other_pid,
+ 'open_func_name': 'fake_open_read_denied',
+ 'os_open_func_name': 'fake_os_open_read_denied',
+ },
+ 'exist-empty': {},
+ 'exist-invalid': {
+ 'pidfile': fake_pidfile_bogus,
+ },
+ 'exist-current-pid': {
+ 'pidfile': fake_pidfile_current_pid,
+ 'pidfile_pid': fake_current_pid,
+ },
+ 'exist-current-pid-locked': {
+ 'pidfile': fake_pidfile_current_pid,
+ 'pidfile_pid': fake_current_pid,
+ 'locking_pid': fake_current_pid,
+ },
+ 'exist-other-pid': {
+ 'pidfile': fake_pidfile_other_pid,
+ 'pidfile_pid': fake_other_pid,
+ },
+ 'exist-other-pid-locked': {
+ 'pidfile': fake_pidfile_other_pid,
+ 'pidfile_pid': fake_other_pid,
+ 'locking_pid': fake_other_pid,
+ },
+ }
+
+ for scenario in scenarios.values():
+ scenario['pid'] = fake_current_pid
+ scenario['pidfile_path'] = fake_pidfile_path
+ if 'pidfile' not in scenario:
+ scenario['pidfile'] = fake_pidfile_empty
+ if 'pidfile_pid' not in scenario:
+ scenario['pidfile_pid'] = None
+ if 'locking_pid' not in scenario:
+ scenario['locking_pid'] = None
+ if 'open_func_name' not in scenario:
+ scenario['open_func_name'] = 'fake_open_okay'
+ if 'os_open_func_name' not in scenario:
+ scenario['os_open_func_name'] = 'fake_os_open_okay'
+
+ return scenarios
+
+
+def setup_pidfile_fixtures(testcase):
+ """ Set up common fixtures for PID file test cases.
+
+ :param testcase: A `TestCase` instance to decorate.
+
+ Decorate the `testcase` with attributes to be fixtures for tests
+ involving `PIDLockFile` instances.
+
+ """
+ scenarios = make_pidlockfile_scenarios()
+ testcase.pidlockfile_scenarios = scenarios
+
+ def get_scenario_option(testcase, key, default=None):
+ value = default
+ try:
+ value = testcase.scenario[key]
+ except (NameError, TypeError, AttributeError, KeyError):
+ pass
+ return value
+
+ func_patcher_os_getpid = mock.patch.object(
+ os, "getpid",
+ return_value=scenarios['simple']['pid'])
+ func_patcher_os_getpid.start()
+ testcase.addCleanup(func_patcher_os_getpid.stop)
+
+ def make_fake_open_funcs(testcase):
+
+ def fake_open_nonexist(filename, mode, buffering):
+ if mode.startswith('r'):
+ error = FileNotFoundError(
+ "No such file {filename!r}".format(
+ filename=filename))
+ raise error
+ else:
+ result = testcase.scenario['pidfile']
+ return result
+
+ def fake_open_read_denied(filename, mode, buffering):
+ if mode.startswith('r'):
+ error = PermissionError(
+ "Read denied on {filename!r}".format(
+ filename=filename))
+ raise error
+ else:
+ result = testcase.scenario['pidfile']
+ return result
+
+ def fake_open_okay(filename, mode, buffering):
+ result = testcase.scenario['pidfile']
+ return result
+
+ def fake_os_open_nonexist(filename, flags, mode):
+ if (flags & os.O_CREAT):
+ result = testcase.scenario['pidfile'].fileno()
+ else:
+ error = FileNotFoundError(
+ "No such file {filename!r}".format(
+ filename=filename))
+ raise error
+ return result
+
+ def fake_os_open_read_denied(filename, flags, mode):
+ if (flags & os.O_CREAT):
+ result = testcase.scenario['pidfile'].fileno()
+ else:
+ error = PermissionError(
+ "Read denied on {filename!r}".format(
+ filename=filename))
+ raise error
+ return result
+
+ def fake_os_open_okay(filename, flags, mode):
+ result = testcase.scenario['pidfile'].fileno()
+ return result
+
+ funcs = dict(
+ (name, obj) for (name, obj) in vars().items()
+ if callable(obj))
+
+ return funcs
+
+ testcase.fake_pidfile_open_funcs = make_fake_open_funcs(testcase)
+
+ def fake_open(filename, mode='rt', buffering=None):
+ scenario_path = get_scenario_option(testcase, 'pidfile_path')
+ if filename == scenario_path:
+ func_name = testcase.scenario['open_func_name']
+ fake_open_func = testcase.fake_pidfile_open_funcs[func_name]
+ result = fake_open_func(filename, mode, buffering)
+ else:
+ result = FakeFileDescriptorStringIO()
+ return result
+
+ mock_open = mock.mock_open()
+ mock_open.side_effect = fake_open
+
+ func_patcher_builtin_open = mock.patch.object(
+ builtins, "open",
+ new=mock_open)
+ func_patcher_builtin_open.start()
+ testcase.addCleanup(func_patcher_builtin_open.stop)
+
+ def fake_os_open(filename, flags, mode=None):
+ scenario_path = get_scenario_option(testcase, 'pidfile_path')
+ if filename == scenario_path:
+ func_name = testcase.scenario['os_open_func_name']
+ fake_os_open_func = testcase.fake_pidfile_open_funcs[func_name]
+ result = fake_os_open_func(filename, flags, mode)
+ else:
+ result = FakeFileDescriptorStringIO().fileno()
+ return result
+
+ mock_os_open = mock.MagicMock(side_effect=fake_os_open)
+
+ func_patcher_os_open = mock.patch.object(
+ os, "open",
+ new=mock_os_open)
+ func_patcher_os_open.start()
+ testcase.addCleanup(func_patcher_os_open.stop)
+
+ def fake_os_fdopen(fd, mode='rt', buffering=None):
+ scenario_pidfile = get_scenario_option(
+ testcase, 'pidfile', FakeFileDescriptorStringIO())
+ if fd == testcase.scenario['pidfile'].fileno():
+ result = testcase.scenario['pidfile']
+ else:
+ raise OSError(errno.EBADF, "Bad file descriptor")
+ return result
+
+ mock_os_fdopen = mock.MagicMock(side_effect=fake_os_fdopen)
+
+ func_patcher_os_fdopen = mock.patch.object(
+ os, "fdopen",
+ new=mock_os_fdopen)
+ func_patcher_os_fdopen.start()
+ testcase.addCleanup(func_patcher_os_fdopen.stop)
+
+
+def make_lockfile_method_fakes(scenario):
+ """ Make common fake methods for lockfile class.
+
+ :param scenario: A scenario for testing with PIDLockFile.
+ :return: A mapping from normal function name to the corresponding
+ fake function.
+
+ Each fake function behaves appropriately for the specified `scenario`.
+
+ """
+
+ def fake_func_read_pid():
+ return scenario['pidfile_pid']
+ def fake_func_is_locked():
+ return (scenario['locking_pid'] is not None)
+ def fake_func_i_am_locking():
+ return (
+ scenario['locking_pid'] == scenario['pid'])
+ def fake_func_acquire(timeout=None):
+ if scenario['locking_pid'] is not None:
+ raise lockfile.AlreadyLocked()
+ scenario['locking_pid'] = scenario['pid']
+ def fake_func_release():
+ if scenario['locking_pid'] is None:
+ raise lockfile.NotLocked()
+ if scenario['locking_pid'] != scenario['pid']:
+ raise lockfile.NotMyLock()
+ scenario['locking_pid'] = None
+ def fake_func_break_lock():
+ scenario['locking_pid'] = None
+
+ fake_methods = dict(
+ (
+ func_name.replace('fake_func_', ''),
+ mock.MagicMock(side_effect=fake_func))
+ for (func_name, fake_func) in vars().items()
+ if func_name.startswith('fake_func_'))
+
+ return fake_methods
+
+
+def apply_lockfile_method_mocks(mock_lockfile, testcase, scenario):
+ """ Apply common fake methods to mock lockfile class.
+
+ :param mock_lockfile: An object providing the `LockFile` interface.
+ :param testcase: The `TestCase` instance providing the context for
+ the patch.
+ :param scenario: The `PIDLockFile` test scenario to use.
+
+ Mock the `LockFile` methods of `mock_lockfile`, by applying fake
+ methods customised for `scenario`. The mock is does by a patch
+ within the context of `testcase`.
+
+ """
+ fake_methods = dict(
+ (func_name, fake_func)
+ for (func_name, fake_func) in
+ make_lockfile_method_fakes(scenario).items()
+ if func_name not in ['read_pid'])
+
+ for (func_name, fake_func) in fake_methods.items():
+ func_patcher = mock.patch.object(
+ mock_lockfile, func_name,
+ new=fake_func)
+ func_patcher.start()
+ testcase.addCleanup(func_patcher.stop)
+
+
+def setup_pidlockfile_fixtures(testcase, scenario_name=None):
+ """ Set up common fixtures for PIDLockFile test cases.
+
+ :param testcase: The `TestCase` instance to decorate.
+ :param scenario_name: The name of the `PIDLockFile` scenario to use.
+
+ Decorate the `testcase` with attributes that are fixtures for test
+ cases involving `PIDLockFile` instances.`
+
+ """
+
+ setup_pidfile_fixtures(testcase)
+
+ for func_name in [
+ 'write_pid_to_pidfile',
+ 'remove_existing_pidfile',
+ ]:
+ func_patcher = mock.patch.object(lockfile.pidlockfile, func_name)
+ func_patcher.start()
+ testcase.addCleanup(func_patcher.stop)
+
+
+class TimeoutPIDLockFile_TestCase(scaffold.TestCase):
+ """ Test cases for ‘TimeoutPIDLockFile’ class. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(TimeoutPIDLockFile_TestCase, self).setUp()
+
+ pidlockfile_scenarios = make_pidlockfile_scenarios()
+ self.pidlockfile_scenario = pidlockfile_scenarios['simple']
+ pidfile_path = self.pidlockfile_scenario['pidfile_path']
+
+ for func_name in ['__init__', 'acquire']:
+ func_patcher = mock.patch.object(
+ lockfile.pidlockfile.PIDLockFile, func_name)
+ func_patcher.start()
+ self.addCleanup(func_patcher.stop)
+
+ self.scenario = {
+ 'pidfile_path': self.pidlockfile_scenario['pidfile_path'],
+ 'acquire_timeout': self.getUniqueInteger(),
+ }
+
+ self.test_kwargs = dict(
+ path=self.scenario['pidfile_path'],
+ acquire_timeout=self.scenario['acquire_timeout'],
+ )
+ self.test_instance = daemon.pidfile.TimeoutPIDLockFile(
+ **self.test_kwargs)
+
+ def test_inherits_from_pidlockfile(self):
+ """ Should inherit from PIDLockFile. """
+ instance = self.test_instance
+ self.assertIsInstance(instance, lockfile.pidlockfile.PIDLockFile)
+
+ def test_init_has_expected_signature(self):
+ """ Should have expected signature for ‘__init__’. """
+ def test_func(self, path, acquire_timeout=None, *args, **kwargs): pass
+ test_func.__name__ = str('__init__')
+ self.assertFunctionSignatureMatch(
+ test_func,
+ daemon.pidfile.TimeoutPIDLockFile.__init__)
+
+ def test_has_specified_acquire_timeout(self):
+ """ Should have specified ‘acquire_timeout’ value. """
+ instance = self.test_instance
+ expected_timeout = self.test_kwargs['acquire_timeout']
+ self.assertEqual(expected_timeout, instance.acquire_timeout)
+
+ @mock.patch.object(
+ lockfile.pidlockfile.PIDLockFile, "__init__",
+ autospec=True)
+ def test_calls_superclass_init(self, mock_init):
+ """ Should call the superclass ‘__init__’. """
+ expected_path = self.test_kwargs['path']
+ instance = daemon.pidfile.TimeoutPIDLockFile(**self.test_kwargs)
+ mock_init.assert_called_with(instance, expected_path)
+
+ @mock.patch.object(
+ lockfile.pidlockfile.PIDLockFile, "acquire",
+ autospec=True)
+ def test_acquire_uses_specified_timeout(self, mock_func_acquire):
+ """ Should call the superclass ‘acquire’ with specified timeout. """
+ instance = self.test_instance
+ test_timeout = self.getUniqueInteger()
+ expected_timeout = test_timeout
+ instance.acquire(test_timeout)
+ mock_func_acquire.assert_called_with(instance, expected_timeout)
+
+ @mock.patch.object(
+ lockfile.pidlockfile.PIDLockFile, "acquire",
+ autospec=True)
+ def test_acquire_uses_stored_timeout_by_default(self, mock_func_acquire):
+ """ Should call superclass ‘acquire’ with stored timeout by default. """
+ instance = self.test_instance
+ test_timeout = self.test_kwargs['acquire_timeout']
+ expected_timeout = test_timeout
+ instance.acquire()
+ mock_func_acquire.assert_called_with(instance, expected_timeout)
+
+
+# Local variables:
+# coding: utf-8
+# mode: python
+# End:
+# vim: fileencoding=utf-8 filetype=python :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_runner.py b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_runner.py
new file mode 100755
index 00000000..4c0c714a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_runner.py
@@ -0,0 +1,675 @@
+# -*- coding: utf-8 -*-
+#
+# test/test_runner.py
+# Part of ‘python-daemon’, an implementation of PEP 3143.
+#
+# Copyright © 2009–2015 Ben Finney <ben+python@benfinney.id.au>
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Apache License, version 2.0 as published by the
+# Apache Software Foundation.
+# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
+
+""" Unit test for ‘runner’ module.
+ """
+
+from __future__ import (absolute_import, unicode_literals)
+
+try:
+ # Python 3 standard library.
+ import builtins
+except ImportError:
+ # Python 2 standard library.
+ import __builtin__ as builtins
+import os
+import os.path
+import sys
+import tempfile
+import errno
+import signal
+import functools
+
+import lockfile
+import mock
+import testtools
+
+from . import scaffold
+from .scaffold import (basestring, unicode)
+from .test_pidfile import (
+ FakeFileDescriptorStringIO,
+ setup_pidfile_fixtures,
+ make_pidlockfile_scenarios,
+ apply_lockfile_method_mocks,
+ )
+from .test_daemon import (
+ setup_streams_fixtures,
+ )
+
+import daemon.daemon
+import daemon.runner
+import daemon.pidfile
+
+
+class ModuleExceptions_TestCase(scaffold.Exception_TestCase):
+ """ Test cases for module exception classes. """
+
+ scenarios = scaffold.make_exception_scenarios([
+ ('daemon.runner.DaemonRunnerError', dict(
+ exc_type = daemon.runner.DaemonRunnerError,
+ min_args = 1,
+ types = [Exception],
+ )),
+ ('daemon.runner.DaemonRunnerInvalidActionError', dict(
+ exc_type = daemon.runner.DaemonRunnerInvalidActionError,
+ min_args = 1,
+ types = [daemon.runner.DaemonRunnerError, ValueError],
+ )),
+ ('daemon.runner.DaemonRunnerStartFailureError', dict(
+ exc_type = daemon.runner.DaemonRunnerStartFailureError,
+ min_args = 1,
+ types = [daemon.runner.DaemonRunnerError, RuntimeError],
+ )),
+ ('daemon.runner.DaemonRunnerStopFailureError', dict(
+ exc_type = daemon.runner.DaemonRunnerStopFailureError,
+ min_args = 1,
+ types = [daemon.runner.DaemonRunnerError, RuntimeError],
+ )),
+ ])
+
+
+def make_runner_scenarios():
+ """ Make a collection of scenarios for testing `DaemonRunner` instances.
+
+ :return: A collection of scenarios for tests involving
+ `DaemonRunner` instances.
+
+ The collection is a mapping from scenario name to a dictionary of
+ scenario attributes.
+
+ """
+
+ pidlockfile_scenarios = make_pidlockfile_scenarios()
+
+ scenarios = {
+ 'simple': {
+ 'pidlockfile_scenario_name': 'simple',
+ },
+ 'pidfile-locked': {
+ 'pidlockfile_scenario_name': 'exist-other-pid-locked',
+ },
+ }
+
+ for scenario in scenarios.values():
+ if 'pidlockfile_scenario_name' in scenario:
+ pidlockfile_scenario = pidlockfile_scenarios.pop(
+ scenario['pidlockfile_scenario_name'])
+ scenario['pid'] = pidlockfile_scenario['pid']
+ scenario['pidfile_path'] = pidlockfile_scenario['pidfile_path']
+ scenario['pidfile_timeout'] = 23
+ scenario['pidlockfile_scenario'] = pidlockfile_scenario
+
+ return scenarios
+
+
+def set_runner_scenario(testcase, scenario_name):
+ """ Set the DaemonRunner test scenario for the test case.
+
+ :param testcase: The `TestCase` instance to decorate.
+ :param scenario_name: The name of the scenario to use.
+
+ Set the `DaemonRunner` test scenario name and decorate the
+ `testcase` with the corresponding scenario fixtures.
+
+ """
+ scenarios = testcase.runner_scenarios
+ testcase.scenario = scenarios[scenario_name]
+ apply_lockfile_method_mocks(
+ testcase.mock_runner_lockfile,
+ testcase,
+ testcase.scenario['pidlockfile_scenario'])
+
+
+def setup_runner_fixtures(testcase):
+ """ Set up common fixtures for `DaemonRunner` test cases.
+
+ :param testcase: A `TestCase` instance to decorate.
+
+ Decorate the `testcase` with attributes to be fixtures for tests
+ involving `DaemonRunner` instances.
+
+ """
+ setup_pidfile_fixtures(testcase)
+ setup_streams_fixtures(testcase)
+
+ testcase.runner_scenarios = make_runner_scenarios()
+
+ patcher_stderr = mock.patch.object(
+ sys, "stderr",
+ new=FakeFileDescriptorStringIO())
+ testcase.fake_stderr = patcher_stderr.start()
+ testcase.addCleanup(patcher_stderr.stop)
+
+ simple_scenario = testcase.runner_scenarios['simple']
+
+ testcase.mock_runner_lockfile = mock.MagicMock(
+ spec=daemon.pidfile.TimeoutPIDLockFile)
+ apply_lockfile_method_mocks(
+ testcase.mock_runner_lockfile,
+ testcase,
+ simple_scenario['pidlockfile_scenario'])
+ testcase.mock_runner_lockfile.path = simple_scenario['pidfile_path']
+
+ patcher_lockfile_class = mock.patch.object(
+ daemon.pidfile, "TimeoutPIDLockFile",
+ return_value=testcase.mock_runner_lockfile)
+ patcher_lockfile_class.start()
+ testcase.addCleanup(patcher_lockfile_class.stop)
+
+ class TestApp(object):
+
+ def __init__(self):
+ self.stdin_path = testcase.stream_file_paths['stdin']
+ self.stdout_path = testcase.stream_file_paths['stdout']
+ self.stderr_path = testcase.stream_file_paths['stderr']
+ self.pidfile_path = simple_scenario['pidfile_path']
+ self.pidfile_timeout = simple_scenario['pidfile_timeout']
+
+ run = mock.MagicMock(name="TestApp.run")
+
+ testcase.TestApp = TestApp
+
+ patcher_runner_daemoncontext = mock.patch.object(
+ daemon.runner, "DaemonContext", autospec=True)
+ patcher_runner_daemoncontext.start()
+ testcase.addCleanup(patcher_runner_daemoncontext.stop)
+
+ testcase.test_app = testcase.TestApp()
+
+ testcase.test_program_name = "bazprog"
+ testcase.test_program_path = os.path.join(
+ "/foo/bar", testcase.test_program_name)
+ testcase.valid_argv_params = {
+ 'start': [testcase.test_program_path, 'start'],
+ 'stop': [testcase.test_program_path, 'stop'],
+ 'restart': [testcase.test_program_path, 'restart'],
+ }
+
+ def fake_open(filename, mode=None, buffering=None):
+ if filename in testcase.stream_files_by_path:
+ result = testcase.stream_files_by_path[filename]
+ else:
+ result = FakeFileDescriptorStringIO()
+ result.mode = mode
+ result.buffering = buffering
+ return result
+
+ mock_open = mock.mock_open()
+ mock_open.side_effect = fake_open
+
+ func_patcher_builtin_open = mock.patch.object(
+ builtins, "open",
+ new=mock_open)
+ func_patcher_builtin_open.start()
+ testcase.addCleanup(func_patcher_builtin_open.stop)
+
+ func_patcher_os_kill = mock.patch.object(os, "kill")
+ func_patcher_os_kill.start()
+ testcase.addCleanup(func_patcher_os_kill.stop)
+
+ patcher_sys_argv = mock.patch.object(
+ sys, "argv",
+ new=testcase.valid_argv_params['start'])
+ patcher_sys_argv.start()
+ testcase.addCleanup(patcher_sys_argv.stop)
+
+ testcase.test_instance = daemon.runner.DaemonRunner(testcase.test_app)
+
+ testcase.scenario = NotImplemented
+
+
+class DaemonRunner_BaseTestCase(scaffold.TestCase):
+ """ Base class for DaemonRunner test case classes. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(DaemonRunner_BaseTestCase, self).setUp()
+
+ setup_runner_fixtures(self)
+ set_runner_scenario(self, 'simple')
+
+
+class DaemonRunner_TestCase(DaemonRunner_BaseTestCase):
+ """ Test cases for DaemonRunner class. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(DaemonRunner_TestCase, self).setUp()
+
+ func_patcher_parse_args = mock.patch.object(
+ daemon.runner.DaemonRunner, "parse_args")
+ func_patcher_parse_args.start()
+ self.addCleanup(func_patcher_parse_args.stop)
+
+ # Create a new instance now with our custom patches.
+ self.test_instance = daemon.runner.DaemonRunner(self.test_app)
+
+ def test_instantiate(self):
+ """ New instance of DaemonRunner should be created. """
+ self.assertIsInstance(self.test_instance, daemon.runner.DaemonRunner)
+
+ def test_parses_commandline_args(self):
+ """ Should parse commandline arguments. """
+ self.test_instance.parse_args.assert_called_with()
+
+ def test_has_specified_app(self):
+ """ Should have specified application object. """
+ self.assertIs(self.test_app, self.test_instance.app)
+
+ def test_sets_pidfile_none_when_pidfile_path_is_none(self):
+ """ Should set ‘pidfile’ to ‘None’ when ‘pidfile_path’ is ‘None’. """
+ pidfile_path = None
+ self.test_app.pidfile_path = pidfile_path
+ expected_pidfile = None
+ instance = daemon.runner.DaemonRunner(self.test_app)
+ self.assertIs(expected_pidfile, instance.pidfile)
+
+ def test_error_when_pidfile_path_not_string(self):
+ """ Should raise ValueError when PID file path not a string. """
+ pidfile_path = object()
+ self.test_app.pidfile_path = pidfile_path
+ expected_error = ValueError
+ self.assertRaises(
+ expected_error,
+ daemon.runner.DaemonRunner, self.test_app)
+
+ def test_error_when_pidfile_path_not_absolute(self):
+ """ Should raise ValueError when PID file path not absolute. """
+ pidfile_path = "foo/bar.pid"
+ self.test_app.pidfile_path = pidfile_path
+ expected_error = ValueError
+ self.assertRaises(
+ expected_error,
+ daemon.runner.DaemonRunner, self.test_app)
+
+ def test_creates_lock_with_specified_parameters(self):
+ """ Should create a TimeoutPIDLockFile with specified params. """
+ pidfile_path = self.scenario['pidfile_path']
+ pidfile_timeout = self.scenario['pidfile_timeout']
+ daemon.pidfile.TimeoutPIDLockFile.assert_called_with(
+ pidfile_path, pidfile_timeout)
+
+ def test_has_created_pidfile(self):
+ """ Should have new PID lock file as `pidfile` attribute. """
+ expected_pidfile = self.mock_runner_lockfile
+ instance = self.test_instance
+ self.assertIs(
+ expected_pidfile, instance.pidfile)
+
+ def test_daemon_context_has_created_pidfile(self):
+ """ DaemonContext component should have new PID lock file. """
+ expected_pidfile = self.mock_runner_lockfile
+ daemon_context = self.test_instance.daemon_context
+ self.assertIs(
+ expected_pidfile, daemon_context.pidfile)
+
+ def test_daemon_context_has_specified_stdin_stream(self):
+ """ DaemonContext component should have specified stdin file. """
+ test_app = self.test_app
+ expected_file = self.stream_files_by_name['stdin']
+ daemon_context = self.test_instance.daemon_context
+ self.assertEqual(expected_file, daemon_context.stdin)
+
+ def test_daemon_context_has_stdin_in_read_mode(self):
+ """ DaemonContext component should open stdin file for read. """
+ expected_mode = 'rt'
+ daemon_context = self.test_instance.daemon_context
+ self.assertIn(expected_mode, daemon_context.stdin.mode)
+
+ def test_daemon_context_has_specified_stdout_stream(self):
+ """ DaemonContext component should have specified stdout file. """
+ test_app = self.test_app
+ expected_file = self.stream_files_by_name['stdout']
+ daemon_context = self.test_instance.daemon_context
+ self.assertEqual(expected_file, daemon_context.stdout)
+
+ def test_daemon_context_has_stdout_in_append_mode(self):
+ """ DaemonContext component should open stdout file for append. """
+ expected_mode = 'w+t'
+ daemon_context = self.test_instance.daemon_context
+ self.assertIn(expected_mode, daemon_context.stdout.mode)
+
+ def test_daemon_context_has_specified_stderr_stream(self):
+ """ DaemonContext component should have specified stderr file. """
+ test_app = self.test_app
+ expected_file = self.stream_files_by_name['stderr']
+ daemon_context = self.test_instance.daemon_context
+ self.assertEqual(expected_file, daemon_context.stderr)
+
+ def test_daemon_context_has_stderr_in_append_mode(self):
+ """ DaemonContext component should open stderr file for append. """
+ expected_mode = 'w+t'
+ daemon_context = self.test_instance.daemon_context
+ self.assertIn(expected_mode, daemon_context.stderr.mode)
+
+ def test_daemon_context_has_stderr_with_no_buffering(self):
+ """ DaemonContext component should open stderr file unbuffered. """
+ expected_buffering = 0
+ daemon_context = self.test_instance.daemon_context
+ self.assertEqual(
+ expected_buffering, daemon_context.stderr.buffering)
+
+
+class DaemonRunner_usage_exit_TestCase(DaemonRunner_BaseTestCase):
+ """ Test cases for DaemonRunner.usage_exit method. """
+
+ def test_raises_system_exit(self):
+ """ Should raise SystemExit exception. """
+ instance = self.test_instance
+ argv = [self.test_program_path]
+ self.assertRaises(
+ SystemExit,
+ instance._usage_exit, argv)
+
+ def test_message_follows_conventional_format(self):
+ """ Should emit a conventional usage message. """
+ instance = self.test_instance
+ argv = [self.test_program_path]
+ expected_stderr_output = """\
+ usage: {progname} ...
+ """.format(
+ progname=self.test_program_name)
+ self.assertRaises(
+ SystemExit,
+ instance._usage_exit, argv)
+ self.assertOutputCheckerMatch(
+ expected_stderr_output, self.fake_stderr.getvalue())
+
+
+class DaemonRunner_parse_args_TestCase(DaemonRunner_BaseTestCase):
+ """ Test cases for DaemonRunner.parse_args method. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(DaemonRunner_parse_args_TestCase, self).setUp()
+
+ func_patcher_usage_exit = mock.patch.object(
+ daemon.runner.DaemonRunner, "_usage_exit",
+ side_effect=NotImplementedError)
+ func_patcher_usage_exit.start()
+ self.addCleanup(func_patcher_usage_exit.stop)
+
+ def test_emits_usage_message_if_insufficient_args(self):
+ """ Should emit a usage message and exit if too few arguments. """
+ instance = self.test_instance
+ argv = [self.test_program_path]
+ exc = self.assertRaises(
+ NotImplementedError,
+ instance.parse_args, argv)
+ daemon.runner.DaemonRunner._usage_exit.assert_called_with(argv)
+
+ def test_emits_usage_message_if_unknown_action_arg(self):
+ """ Should emit a usage message and exit if unknown action. """
+ instance = self.test_instance
+ progname = self.test_program_name
+ argv = [self.test_program_path, 'bogus']
+ exc = self.assertRaises(
+ NotImplementedError,
+ instance.parse_args, argv)
+ daemon.runner.DaemonRunner._usage_exit.assert_called_with(argv)
+
+ def test_should_parse_system_argv_by_default(self):
+ """ Should parse sys.argv by default. """
+ instance = self.test_instance
+ expected_action = 'start'
+ argv = self.valid_argv_params['start']
+ with mock.patch.object(sys, "argv", new=argv):
+ instance.parse_args()
+ self.assertEqual(expected_action, instance.action)
+
+ def test_sets_action_from_first_argument(self):
+ """ Should set action from first commandline argument. """
+ instance = self.test_instance
+ for name, argv in self.valid_argv_params.items():
+ expected_action = name
+ instance.parse_args(argv)
+ self.assertEqual(expected_action, instance.action)
+
+
+try:
+ ProcessLookupError
+except NameError:
+ # Python 2 uses OSError.
+ ProcessLookupError = functools.partial(OSError, errno.ESRCH)
+
+class DaemonRunner_do_action_TestCase(DaemonRunner_BaseTestCase):
+ """ Test cases for DaemonRunner.do_action method. """
+
+ def test_raises_error_if_unknown_action(self):
+ """ Should emit a usage message and exit if action is unknown. """
+ instance = self.test_instance
+ instance.action = 'bogus'
+ expected_error = daemon.runner.DaemonRunnerInvalidActionError
+ self.assertRaises(
+ expected_error,
+ instance.do_action)
+
+
+class DaemonRunner_do_action_start_TestCase(DaemonRunner_BaseTestCase):
+ """ Test cases for DaemonRunner.do_action method, action 'start'. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(DaemonRunner_do_action_start_TestCase, self).setUp()
+
+ self.test_instance.action = 'start'
+
+ def test_raises_error_if_pidfile_locked(self):
+ """ Should raise error if PID file is locked. """
+
+ instance = self.test_instance
+ instance.daemon_context.open.side_effect = lockfile.AlreadyLocked
+ pidfile_path = self.scenario['pidfile_path']
+ expected_error = daemon.runner.DaemonRunnerStartFailureError
+ expected_message_content = pidfile_path
+ exc = self.assertRaises(
+ expected_error,
+ instance.do_action)
+ self.assertIn(expected_message_content, unicode(exc))
+
+ def test_breaks_lock_if_no_such_process(self):
+ """ Should request breaking lock if PID file process is not running. """
+ set_runner_scenario(self, 'pidfile-locked')
+ instance = self.test_instance
+ self.mock_runner_lockfile.read_pid.return_value = (
+ self.scenario['pidlockfile_scenario']['pidfile_pid'])
+ pidfile_path = self.scenario['pidfile_path']
+ test_pid = self.scenario['pidlockfile_scenario']['pidfile_pid']
+ expected_signal = signal.SIG_DFL
+ test_error = ProcessLookupError("Not running")
+ os.kill.side_effect = test_error
+ instance.do_action()
+ os.kill.assert_called_with(test_pid, expected_signal)
+ self.mock_runner_lockfile.break_lock.assert_called_with()
+
+ def test_requests_daemon_context_open(self):
+ """ Should request the daemon context to open. """
+ instance = self.test_instance
+ instance.do_action()
+ instance.daemon_context.open.assert_called_with()
+
+ def test_emits_start_message_to_stderr(self):
+ """ Should emit start message to stderr. """
+ instance = self.test_instance
+ expected_stderr = """\
+ started with pid {pid:d}
+ """.format(
+ pid=self.scenario['pid'])
+ instance.do_action()
+ self.assertOutputCheckerMatch(
+ expected_stderr, self.fake_stderr.getvalue())
+
+ def test_requests_app_run(self):
+ """ Should request the application to run. """
+ instance = self.test_instance
+ instance.do_action()
+ self.test_app.run.assert_called_with()
+
+
+class DaemonRunner_do_action_stop_TestCase(DaemonRunner_BaseTestCase):
+ """ Test cases for DaemonRunner.do_action method, action 'stop'. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(DaemonRunner_do_action_stop_TestCase, self).setUp()
+
+ set_runner_scenario(self, 'pidfile-locked')
+
+ self.test_instance.action = 'stop'
+
+ self.mock_runner_lockfile.is_locked.return_value = True
+ self.mock_runner_lockfile.i_am_locking.return_value = False
+ self.mock_runner_lockfile.read_pid.return_value = (
+ self.scenario['pidlockfile_scenario']['pidfile_pid'])
+
+ def test_raises_error_if_pidfile_not_locked(self):
+ """ Should raise error if PID file is not locked. """
+ set_runner_scenario(self, 'simple')
+ instance = self.test_instance
+ self.mock_runner_lockfile.is_locked.return_value = False
+ self.mock_runner_lockfile.i_am_locking.return_value = False
+ self.mock_runner_lockfile.read_pid.return_value = (
+ self.scenario['pidlockfile_scenario']['pidfile_pid'])
+ pidfile_path = self.scenario['pidfile_path']
+ expected_error = daemon.runner.DaemonRunnerStopFailureError
+ expected_message_content = pidfile_path
+ exc = self.assertRaises(
+ expected_error,
+ instance.do_action)
+ self.assertIn(expected_message_content, unicode(exc))
+
+ def test_breaks_lock_if_pidfile_stale(self):
+ """ Should break lock if PID file is stale. """
+ instance = self.test_instance
+ pidfile_path = self.scenario['pidfile_path']
+ test_pid = self.scenario['pidlockfile_scenario']['pidfile_pid']
+ expected_signal = signal.SIG_DFL
+ test_error = OSError(errno.ESRCH, "Not running")
+ os.kill.side_effect = test_error
+ instance.do_action()
+ self.mock_runner_lockfile.break_lock.assert_called_with()
+
+ def test_sends_terminate_signal_to_process_from_pidfile(self):
+ """ Should send SIGTERM to the daemon process. """
+ instance = self.test_instance
+ test_pid = self.scenario['pidlockfile_scenario']['pidfile_pid']
+ expected_signal = signal.SIGTERM
+ instance.do_action()
+ os.kill.assert_called_with(test_pid, expected_signal)
+
+ def test_raises_error_if_cannot_send_signal_to_process(self):
+ """ Should raise error if cannot send signal to daemon process. """
+ instance = self.test_instance
+ test_pid = self.scenario['pidlockfile_scenario']['pidfile_pid']
+ pidfile_path = self.scenario['pidfile_path']
+ test_error = OSError(errno.EPERM, "Nice try")
+ os.kill.side_effect = test_error
+ expected_error = daemon.runner.DaemonRunnerStopFailureError
+ expected_message_content = unicode(test_pid)
+ exc = self.assertRaises(
+ expected_error,
+ instance.do_action)
+ self.assertIn(expected_message_content, unicode(exc))
+
+
+@mock.patch.object(daemon.runner.DaemonRunner, "_start")
+@mock.patch.object(daemon.runner.DaemonRunner, "_stop")
+class DaemonRunner_do_action_restart_TestCase(DaemonRunner_BaseTestCase):
+ """ Test cases for DaemonRunner.do_action method, action 'restart'. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(DaemonRunner_do_action_restart_TestCase, self).setUp()
+
+ set_runner_scenario(self, 'pidfile-locked')
+
+ self.test_instance.action = 'restart'
+
+ def test_requests_stop_then_start(
+ self,
+ mock_func_daemonrunner_start, mock_func_daemonrunner_stop):
+ """ Should request stop, then start. """
+ instance = self.test_instance
+ instance.do_action()
+ mock_func_daemonrunner_start.assert_called_with()
+ mock_func_daemonrunner_stop.assert_called_with()
+
+
+@mock.patch.object(sys, "stderr")
+class emit_message_TestCase(scaffold.TestCase):
+ """ Test cases for ‘emit_message’ function. """
+
+ def test_writes_specified_message_to_stream(self, mock_stderr):
+ """ Should write specified message to stream. """
+ test_message = self.getUniqueString()
+ expected_content = "{message}\n".format(message=test_message)
+ daemon.runner.emit_message(test_message, stream=mock_stderr)
+ mock_stderr.write.assert_called_with(expected_content)
+
+ def test_writes_to_specified_stream(self, mock_stderr):
+ """ Should write message to specified stream. """
+ test_message = self.getUniqueString()
+ mock_stream = mock.MagicMock()
+ daemon.runner.emit_message(test_message, stream=mock_stream)
+ mock_stream.write.assert_called_with(mock.ANY)
+
+ def test_writes_to_stderr_by_default(self, mock_stderr):
+ """ Should write message to ‘sys.stderr’ by default. """
+ test_message = self.getUniqueString()
+ daemon.runner.emit_message(test_message)
+ mock_stderr.write.assert_called_with(mock.ANY)
+
+
+class is_pidfile_stale_TestCase(scaffold.TestCase):
+ """ Test cases for ‘is_pidfile_stale’ function. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(is_pidfile_stale_TestCase, self).setUp()
+
+ func_patcher_os_kill = mock.patch.object(os, "kill")
+ func_patcher_os_kill.start()
+ self.addCleanup(func_patcher_os_kill.stop)
+ os.kill.return_value = None
+
+ self.test_pid = self.getUniqueInteger()
+ self.test_pidfile = mock.MagicMock(daemon.pidfile.TimeoutPIDLockFile)
+ self.test_pidfile.read_pid.return_value = self.test_pid
+
+ def test_returns_false_if_no_pid_in_file(self):
+ """ Should return False if the pidfile contains no PID. """
+ self.test_pidfile.read_pid.return_value = None
+ expected_result = False
+ result = daemon.runner.is_pidfile_stale(self.test_pidfile)
+ self.assertEqual(expected_result, result)
+
+ def test_returns_false_if_process_exists(self):
+ """ Should return False if the process with its PID exists. """
+ expected_result = False
+ result = daemon.runner.is_pidfile_stale(self.test_pidfile)
+ self.assertEqual(expected_result, result)
+
+ def test_returns_true_if_process_does_not_exist(self):
+ """ Should return True if the process does not exist. """
+ test_error = ProcessLookupError("No such process")
+ del os.kill.return_value
+ os.kill.side_effect = test_error
+ expected_result = True
+ result = daemon.runner.is_pidfile_stale(self.test_pidfile)
+ self.assertEqual(expected_result, result)
+
+
+# Local variables:
+# coding: utf-8
+# mode: python
+# End:
+# vim: fileencoding=utf-8 filetype=python :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test_version.py b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test_version.py
new file mode 100755
index 00000000..b52f521d
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test_version.py
@@ -0,0 +1,1373 @@
+# -*- coding: utf-8 -*-
+#
+# test_version.py
+# Part of ‘python-daemon’, an implementation of PEP 3143.
+#
+# Copyright © 2008–2015 Ben Finney <ben+python@benfinney.id.au>
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; version 3 of that license or any later version.
+# No warranty expressed or implied. See the file ‘LICENSE.GPL-3’ for details.
+
+""" Unit test for ‘version’ packaging module. """
+
+from __future__ import (absolute_import, unicode_literals)
+
+import os
+import os.path
+import io
+import errno
+import functools
+import collections
+import textwrap
+import json
+import tempfile
+import distutils.dist
+import distutils.cmd
+import distutils.errors
+import distutils.fancy_getopt
+try:
+ # Standard library of Python 2.7 and later.
+ from io import StringIO
+except ImportError:
+ # Standard library of Python 2.6 and earlier.
+ from StringIO import StringIO
+
+import mock
+import testtools
+import testscenarios
+import docutils
+import docutils.writers
+import docutils.nodes
+import setuptools
+import setuptools.command
+
+import version
+
+version.ensure_class_bases_begin_with(
+ version.__dict__, str('VersionInfoWriter'), docutils.writers.Writer)
+version.ensure_class_bases_begin_with(
+ version.__dict__, str('VersionInfoTranslator'),
+ docutils.nodes.SparseNodeVisitor)
+
+
+def make_test_classes_for_ensure_class_bases_begin_with():
+ """ Make test classes for use with ‘ensure_class_bases_begin_with’.
+
+ :return: Mapping {`name`: `type`} of the custom types created.
+
+ """
+
+ class quux_metaclass(type):
+ def __new__(metaclass, name, bases, namespace):
+ return super(quux_metaclass, metaclass).__new__(
+ metaclass, name, bases, namespace)
+
+ class Foo(object):
+ __metaclass__ = type
+
+ class Bar(object):
+ pass
+
+ class FooInheritingBar(Bar):
+ __metaclass__ = type
+
+ class FooWithCustomMetaclass(object):
+ __metaclass__ = quux_metaclass
+
+ result = dict(
+ (name, value) for (name, value) in locals().items()
+ if isinstance(value, type))
+
+ return result
+
+class ensure_class_bases_begin_with_TestCase(
+ testscenarios.WithScenarios, testtools.TestCase):
+ """ Test cases for ‘ensure_class_bases_begin_with’ function. """
+
+ test_classes = make_test_classes_for_ensure_class_bases_begin_with()
+
+ scenarios = [
+ ('simple', {
+ 'test_class': test_classes['Foo'],
+ 'base_class': test_classes['Bar'],
+ }),
+ ('custom metaclass', {
+ 'test_class': test_classes['FooWithCustomMetaclass'],
+ 'base_class': test_classes['Bar'],
+ 'expected_metaclass': test_classes['quux_metaclass'],
+ }),
+ ]
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(ensure_class_bases_begin_with_TestCase, self).setUp()
+
+ self.class_name = self.test_class.__name__
+ self.test_module_namespace = {self.class_name: self.test_class}
+
+ if not hasattr(self, 'expected_metaclass'):
+ self.expected_metaclass = type
+
+ patcher_metaclass = mock.patch.object(
+ self.test_class, '__metaclass__')
+ patcher_metaclass.start()
+ self.addCleanup(patcher_metaclass.stop)
+
+ self.fake_new_class = type(object)
+ self.test_class.__metaclass__.return_value = (
+ self.fake_new_class)
+
+ def test_module_namespace_contains_new_class(self):
+ """ Specified module namespace should have new class. """
+ version.ensure_class_bases_begin_with(
+ self.test_module_namespace, self.class_name, self.base_class)
+ self.assertIn(self.fake_new_class, self.test_module_namespace.values())
+
+ def test_calls_metaclass_with_expected_class_name(self):
+ """ Should call the metaclass with the expected class name. """
+ version.ensure_class_bases_begin_with(
+ self.test_module_namespace, self.class_name, self.base_class)
+ expected_class_name = self.class_name
+ self.test_class.__metaclass__.assert_called_with(
+ expected_class_name, mock.ANY, mock.ANY)
+
+ def test_calls_metaclass_with_expected_bases(self):
+ """ Should call the metaclass with the expected bases. """
+ version.ensure_class_bases_begin_with(
+ self.test_module_namespace, self.class_name, self.base_class)
+ expected_bases = tuple(
+ [self.base_class]
+ + list(self.test_class.__bases__))
+ self.test_class.__metaclass__.assert_called_with(
+ mock.ANY, expected_bases, mock.ANY)
+
+ def test_calls_metaclass_with_expected_namespace(self):
+ """ Should call the metaclass with the expected class namespace. """
+ version.ensure_class_bases_begin_with(
+ self.test_module_namespace, self.class_name, self.base_class)
+ expected_namespace = self.test_class.__dict__.copy()
+ del expected_namespace['__dict__']
+ self.test_class.__metaclass__.assert_called_with(
+ mock.ANY, mock.ANY, expected_namespace)
+
+
+class ensure_class_bases_begin_with_AlreadyHasBase_TestCase(
+ testscenarios.WithScenarios, testtools.TestCase):
+ """ Test cases for ‘ensure_class_bases_begin_with’ function.
+
+ These test cases test the conditions where the class's base is
+ already the specified base class.
+
+ """
+
+ test_classes = make_test_classes_for_ensure_class_bases_begin_with()
+
+ scenarios = [
+ ('already Bar subclass', {
+ 'test_class': test_classes['FooInheritingBar'],
+ 'base_class': test_classes['Bar'],
+ }),
+ ]
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(
+ ensure_class_bases_begin_with_AlreadyHasBase_TestCase,
+ self).setUp()
+
+ self.class_name = self.test_class.__name__
+ self.test_module_namespace = {self.class_name: self.test_class}
+
+ patcher_metaclass = mock.patch.object(
+ self.test_class, '__metaclass__')
+ patcher_metaclass.start()
+ self.addCleanup(patcher_metaclass.stop)
+
+ def test_metaclass_not_called(self):
+ """ Should not call metaclass to create a new type. """
+ version.ensure_class_bases_begin_with(
+ self.test_module_namespace, self.class_name, self.base_class)
+ self.assertFalse(self.test_class.__metaclass__.called)
+
+
+class VersionInfoWriter_TestCase(testtools.TestCase):
+ """ Test cases for ‘VersionInfoWriter’ class. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(VersionInfoWriter_TestCase, self).setUp()
+
+ self.test_instance = version.VersionInfoWriter()
+
+ def test_declares_version_info_support(self):
+ """ Should declare support for ‘version_info’. """
+ instance = self.test_instance
+ expected_support = "version_info"
+ result = instance.supports(expected_support)
+ self.assertTrue(result)
+
+
+class VersionInfoWriter_translate_TestCase(testtools.TestCase):
+ """ Test cases for ‘VersionInfoWriter.translate’ method. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(VersionInfoWriter_translate_TestCase, self).setUp()
+
+ patcher_translator = mock.patch.object(
+ version, 'VersionInfoTranslator')
+ self.mock_class_translator = patcher_translator.start()
+ self.addCleanup(patcher_translator.stop)
+ self.mock_translator = self.mock_class_translator.return_value
+
+ self.test_instance = version.VersionInfoWriter()
+ patcher_document = mock.patch.object(
+ self.test_instance, 'document')
+ patcher_document.start()
+ self.addCleanup(patcher_document.stop)
+
+ def test_creates_translator_with_document(self):
+ """ Should create a translator with the writer's document. """
+ instance = self.test_instance
+ expected_document = self.test_instance.document
+ instance.translate()
+ self.mock_class_translator.assert_called_with(expected_document)
+
+ def test_calls_document_walkabout_with_translator(self):
+ """ Should call document.walkabout with the translator. """
+ instance = self.test_instance
+ instance.translate()
+ instance.document.walkabout.assert_called_with(self.mock_translator)
+
+ def test_output_from_translator_astext(self):
+ """ Should have output from translator.astext(). """
+ instance = self.test_instance
+ instance.translate()
+ expected_output = self.mock_translator.astext.return_value
+ self.assertEqual(expected_output, instance.output)
+
+
+class ChangeLogEntry_TestCase(testtools.TestCase):
+ """ Test cases for ‘ChangeLogEntry’ class. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(ChangeLogEntry_TestCase, self).setUp()
+
+ self.test_instance = version.ChangeLogEntry()
+
+ def test_instantiate(self):
+ """ New instance of ‘ChangeLogEntry’ should be created. """
+ self.assertIsInstance(
+ self.test_instance, version.ChangeLogEntry)
+
+ def test_minimum_zero_arguments(self):
+ """ Initialiser should not require any arguments. """
+ instance = version.ChangeLogEntry()
+ self.assertIsNot(instance, None)
+
+
+class ChangeLogEntry_release_date_TestCase(
+ testscenarios.WithScenarios, testtools.TestCase):
+ """ Test cases for ‘ChangeLogEntry.release_date’ attribute. """
+
+ scenarios = [
+ ('default', {
+ 'test_args': {},
+ 'expected_release_date':
+ version.ChangeLogEntry.default_release_date,
+ }),
+ ('unknown token', {
+ 'test_args': {'release_date': "UNKNOWN"},
+ 'expected_release_date': "UNKNOWN",
+ }),
+ ('future token', {
+ 'test_args': {'release_date': "FUTURE"},
+ 'expected_release_date': "FUTURE",
+ }),
+ ('2001-01-01', {
+ 'test_args': {'release_date': "2001-01-01"},
+ 'expected_release_date': "2001-01-01",
+ }),
+ ('bogus', {
+ 'test_args': {'release_date': "b0gUs"},
+ 'expected_error': ValueError,
+ }),
+ ]
+
+ def test_has_expected_release_date(self):
+ """ Should have default `release_date` attribute. """
+ if hasattr(self, 'expected_error'):
+ self.assertRaises(
+ self.expected_error,
+ version.ChangeLogEntry, **self.test_args)
+ else:
+ instance = version.ChangeLogEntry(**self.test_args)
+ self.assertEqual(self.expected_release_date, instance.release_date)
+
+
+class ChangeLogEntry_version_TestCase(
+ testscenarios.WithScenarios, testtools.TestCase):
+ """ Test cases for ‘ChangeLogEntry.version’ attribute. """
+
+ scenarios = [
+ ('default', {
+ 'test_args': {},
+ 'expected_version':
+ version.ChangeLogEntry.default_version,
+ }),
+ ('unknown token', {
+ 'test_args': {'version': "UNKNOWN"},
+ 'expected_version': "UNKNOWN",
+ }),
+ ('0.0', {
+ 'test_args': {'version': "0.0"},
+ 'expected_version': "0.0",
+ }),
+ ]
+
+ def test_has_expected_version(self):
+ """ Should have default `version` attribute. """
+ instance = version.ChangeLogEntry(**self.test_args)
+ self.assertEqual(self.expected_version, instance.version)
+
+
+class ChangeLogEntry_maintainer_TestCase(
+ testscenarios.WithScenarios, testtools.TestCase):
+ """ Test cases for ‘ChangeLogEntry.maintainer’ attribute. """
+
+ scenarios = [
+ ('default', {
+ 'test_args': {},
+ 'expected_maintainer': None,
+ }),
+ ('person', {
+ 'test_args': {'maintainer': "Foo Bar <foo.bar@example.org>"},
+ 'expected_maintainer': "Foo Bar <foo.bar@example.org>",
+ }),
+ ('bogus', {
+ 'test_args': {'maintainer': "b0gUs"},
+ 'expected_error': ValueError,
+ }),
+ ]
+
+ def test_has_expected_maintainer(self):
+ """ Should have default `maintainer` attribute. """
+ if hasattr(self, 'expected_error'):
+ self.assertRaises(
+ self.expected_error,
+ version.ChangeLogEntry, **self.test_args)
+ else:
+ instance = version.ChangeLogEntry(**self.test_args)
+ self.assertEqual(self.expected_maintainer, instance.maintainer)
+
+
+class ChangeLogEntry_body_TestCase(
+ testscenarios.WithScenarios, testtools.TestCase):
+ """ Test cases for ‘ChangeLogEntry.body’ attribute. """
+
+ scenarios = [
+ ('default', {
+ 'test_args': {},
+ 'expected_body': None,
+ }),
+ ('simple', {
+ 'test_args': {'body': "Foo bar baz."},
+ 'expected_body': "Foo bar baz.",
+ }),
+ ]
+
+ def test_has_expected_body(self):
+ """ Should have default `body` attribute. """
+ instance = version.ChangeLogEntry(**self.test_args)
+ self.assertEqual(self.expected_body, instance.body)
+
+
+class ChangeLogEntry_as_version_info_entry_TestCase(
+ testscenarios.WithScenarios, testtools.TestCase):
+ """ Test cases for ‘ChangeLogEntry.as_version_info_entry’ attribute. """
+
+ scenarios = [
+ ('default', {
+ 'test_args': {},
+ 'expected_result': collections.OrderedDict([
+ ('release_date', version.ChangeLogEntry.default_release_date),
+ ('version', version.ChangeLogEntry.default_version),
+ ('maintainer', None),
+ ('body', None),
+ ]),
+ }),
+ ]
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(ChangeLogEntry_as_version_info_entry_TestCase, self).setUp()
+
+ self.test_instance = version.ChangeLogEntry(**self.test_args)
+
+ def test_returns_result(self):
+ """ Should return expected result. """
+ result = self.test_instance.as_version_info_entry()
+ self.assertEqual(self.expected_result, result)
+
+
+def make_mock_field_node(field_name, field_body):
+ """ Make a mock Docutils field node for tests. """
+
+ mock_field_node = mock.MagicMock(
+ name='field', spec=docutils.nodes.field)
+
+ mock_field_name_node = mock.MagicMock(
+ name='field_name', spec=docutils.nodes.field_name)
+ mock_field_name_node.parent = mock_field_node
+ mock_field_name_node.children = [field_name]
+
+ mock_field_body_node = mock.MagicMock(
+ name='field_body', spec=docutils.nodes.field_body)
+ mock_field_body_node.parent = mock_field_node
+ mock_field_body_node.children = [field_body]
+
+ mock_field_node.children = [mock_field_name_node, mock_field_body_node]
+
+ def fake_func_first_child_matching_class(node_class):
+ result = None
+ node_class_name = node_class.__name__
+ for (index, node) in enumerate(mock_field_node.children):
+ if node._mock_name == node_class_name:
+ result = index
+ break
+ return result
+
+ mock_field_node.first_child_matching_class.side_effect = (
+ fake_func_first_child_matching_class)
+
+ return mock_field_node
+
+
+class JsonEqual(testtools.matchers.Matcher):
+ """ A matcher to compare the value of JSON streams. """
+
+ def __init__(self, expected):
+ self.expected_value = expected
+
+ def match(self, content):
+ """ Assert the JSON `content` matches the `expected_content`. """
+ result = None
+ actual_value = json.loads(content.decode('utf-8'))
+ if actual_value != self.expected_value:
+ result = JsonValueMismatch(self.expected_value, actual_value)
+ return result
+
+
+class JsonValueMismatch(testtools.matchers.Mismatch):
+ """ The specified JSON stream does not evaluate to the expected value. """
+
+ def __init__(self, expected, actual):
+ self.expected_value = expected
+ self.actual_value = actual
+
+ def describe(self):
+ """ Emit a text description of this mismatch. """
+ expected_json_text = json.dumps(self.expected_value, indent=4)
+ actual_json_text = json.dumps(self.actual_value, indent=4)
+ text = (
+ "\n"
+ "reference: {expected}\n"
+ "actual: {actual}\n").format(
+ expected=expected_json_text, actual=actual_json_text)
+ return text
+
+
+class changelog_to_version_info_collection_TestCase(
+ testscenarios.WithScenarios, testtools.TestCase):
+ """ Test cases for ‘changelog_to_version_info_collection’ function. """
+
+ scenarios = [
+ ('single entry', {
+ 'test_input': textwrap.dedent("""\
+ Version 1.0
+ ===========
+
+ :Released: 2009-01-01
+ :Maintainer: Foo Bar <foo.bar@example.org>
+
+ * Lorem ipsum dolor sit amet.
+ """),
+ 'expected_version_info': [
+ {
+ 'release_date': "2009-01-01",
+ 'version': "1.0",
+ 'maintainer': "Foo Bar <foo.bar@example.org>",
+ 'body': "* Lorem ipsum dolor sit amet.\n",
+ },
+ ],
+ }),
+ ('multiple entries', {
+ 'test_input': textwrap.dedent("""\
+ Version 1.0
+ ===========
+
+ :Released: 2009-01-01
+ :Maintainer: Foo Bar <foo.bar@example.org>
+
+ * Lorem ipsum dolor sit amet.
+
+
+ Version 0.8
+ ===========
+
+ :Released: 2004-01-01
+ :Maintainer: Foo Bar <foo.bar@example.org>
+
+ * Donec venenatis nisl aliquam ipsum.
+
+
+ Version 0.7.2
+ =============
+
+ :Released: 2001-01-01
+ :Maintainer: Foo Bar <foo.bar@example.org>
+
+ * Pellentesque elementum mollis finibus.
+ """),
+ 'expected_version_info': [
+ {
+ 'release_date': "2009-01-01",
+ 'version': "1.0",
+ 'maintainer': "Foo Bar <foo.bar@example.org>",
+ 'body': "* Lorem ipsum dolor sit amet.\n",
+ },
+ {
+ 'release_date': "2004-01-01",
+ 'version': "0.8",
+ 'maintainer': "Foo Bar <foo.bar@example.org>",
+ 'body': "* Donec venenatis nisl aliquam ipsum.\n",
+ },
+ {
+ 'release_date': "2001-01-01",
+ 'version': "0.7.2",
+ 'maintainer': "Foo Bar <foo.bar@example.org>",
+ 'body': "* Pellentesque elementum mollis finibus.\n",
+ },
+ ],
+ }),
+ ('trailing comment', {
+ 'test_input': textwrap.dedent("""\
+ Version NEXT
+ ============
+
+ :Released: FUTURE
+ :Maintainer:
+
+ * Lorem ipsum dolor sit amet.
+
+ ..
+ Vivamus aliquam felis rutrum rutrum dictum.
+ """),
+ 'expected_version_info': [
+ {
+ 'release_date': "FUTURE",
+ 'version': "NEXT",
+ 'maintainer': "",
+ 'body': "* Lorem ipsum dolor sit amet.\n",
+ },
+ ],
+ }),
+ ('inline comment', {
+ 'test_input': textwrap.dedent("""\
+ Version NEXT
+ ============
+
+ :Released: FUTURE
+ :Maintainer:
+
+ ..
+ Vivamus aliquam felis rutrum rutrum dictum.
+
+ * Lorem ipsum dolor sit amet.
+ """),
+ 'expected_version_info': [
+ {
+ 'release_date': "FUTURE",
+ 'version': "NEXT",
+ 'maintainer': "",
+ 'body': "* Lorem ipsum dolor sit amet.\n",
+ },
+ ],
+ }),
+ ('unreleased entry', {
+ 'test_input': textwrap.dedent("""\
+ Version NEXT
+ ============
+
+ :Released: FUTURE
+ :Maintainer:
+
+ * Lorem ipsum dolor sit amet.
+
+
+ Version 0.8
+ ===========
+
+ :Released: 2001-01-01
+ :Maintainer: Foo Bar <foo.bar@example.org>
+
+ * Donec venenatis nisl aliquam ipsum.
+ """),
+ 'expected_version_info': [
+ {
+ 'release_date': "FUTURE",
+ 'version': "NEXT",
+ 'maintainer': "",
+ 'body': "* Lorem ipsum dolor sit amet.\n",
+ },
+ {
+ 'release_date': "2001-01-01",
+ 'version': "0.8",
+ 'maintainer': "Foo Bar <foo.bar@example.org>",
+ 'body': "* Donec venenatis nisl aliquam ipsum.\n",
+ },
+ ],
+ }),
+ ('no section', {
+ 'test_input': textwrap.dedent("""\
+ :Released: 2009-01-01
+ :Maintainer: Foo Bar <foo.bar@example.org>
+
+ * Lorem ipsum dolor sit amet.
+ """),
+ 'expected_error': version.InvalidFormatError,
+ }),
+ ('subsection', {
+ 'test_input': textwrap.dedent("""\
+ Version 1.0
+ ===========
+
+ :Released: 2009-01-01
+ :Maintainer: Foo Bar <foo.bar@example.org>
+
+ * Lorem ipsum dolor sit amet.
+
+ Ut ultricies fermentum quam
+ ---------------------------
+
+ * In commodo magna facilisis in.
+ """),
+ 'expected_error': version.InvalidFormatError,
+ 'subsection': True,
+ }),
+ ('unknown field', {
+ 'test_input': textwrap.dedent("""\
+ Version 1.0
+ ===========
+
+ :Released: 2009-01-01
+ :Maintainer: Foo Bar <foo.bar@example.org>
+ :Favourite: Spam
+
+ * Lorem ipsum dolor sit amet.
+ """),
+ 'expected_error': version.InvalidFormatError,
+ }),
+ ('invalid version word', {
+ 'test_input': textwrap.dedent("""\
+ BoGuS 1.0
+ =========
+
+ :Released: 2009-01-01
+ :Maintainer: Foo Bar <foo.bar@example.org>
+
+ * Lorem ipsum dolor sit amet.
+ """),
+ 'expected_error': version.InvalidFormatError,
+ }),
+ ('invalid section title', {
+ 'test_input': textwrap.dedent("""\
+ Lorem Ipsum 1.0
+ ===============
+
+ :Released: 2009-01-01
+ :Maintainer: Foo Bar <foo.bar@example.org>
+
+ * Lorem ipsum dolor sit amet.
+ """),
+ 'expected_error': version.InvalidFormatError,
+ }),
+ ]
+
+ def test_returns_expected_version_info(self):
+ """ Should return expected version info mapping. """
+ infile = StringIO(self.test_input)
+ if hasattr(self, 'expected_error'):
+ self.assertRaises(
+ self.expected_error,
+ version.changelog_to_version_info_collection, infile)
+ else:
+ result = version.changelog_to_version_info_collection(infile)
+ self.assertThat(result, JsonEqual(self.expected_version_info))
+
+
+try:
+ FileNotFoundError
+ PermissionError
+except NameError:
+ # Python 2 uses OSError.
+ FileNotFoundError = functools.partial(IOError, errno.ENOENT)
+ PermissionError = functools.partial(IOError, errno.EPERM)
+
+fake_version_info = {
+ 'release_date': "2001-01-01", 'version': "2.0",
+ 'maintainer': None, 'body': None,
+ }
+
+@mock.patch.object(
+ version, "get_latest_version", return_value=fake_version_info)
+class generate_version_info_from_changelog_TestCase(
+ testscenarios.WithScenarios, testtools.TestCase):
+ """ Test cases for ‘generate_version_info_from_changelog’ function. """
+
+ fake_open_side_effects = {
+ 'success': (
+ lambda *args, **kwargs: StringIO()),
+ 'file not found': FileNotFoundError(),
+ 'permission denied': PermissionError(),
+ }
+
+ scenarios = [
+ ('simple', {
+ 'open_scenario': 'success',
+ 'fake_versions_json': json.dumps([fake_version_info]),
+ 'expected_result': fake_version_info,
+ }),
+ ('file not found', {
+ 'open_scenario': 'file not found',
+ 'expected_result': {},
+ }),
+ ('permission denied', {
+ 'open_scenario': 'permission denied',
+ 'expected_result': {},
+ }),
+ ]
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(generate_version_info_from_changelog_TestCase, self).setUp()
+
+ self.fake_changelog_file_path = tempfile.mktemp()
+
+ def fake_open(filespec, *args, **kwargs):
+ if filespec == self.fake_changelog_file_path:
+ side_effect = self.fake_open_side_effects[self.open_scenario]
+ if callable(side_effect):
+ result = side_effect()
+ else:
+ raise side_effect
+ else:
+ result = StringIO()
+ return result
+
+ func_patcher_io_open = mock.patch.object(
+ io, "open")
+ func_patcher_io_open.start()
+ self.addCleanup(func_patcher_io_open.stop)
+ io.open.side_effect = fake_open
+
+ self.file_encoding = "utf-8"
+
+ func_patcher_changelog_to_version_info_collection = mock.patch.object(
+ version, "changelog_to_version_info_collection")
+ func_patcher_changelog_to_version_info_collection.start()
+ self.addCleanup(func_patcher_changelog_to_version_info_collection.stop)
+ if hasattr(self, 'fake_versions_json'):
+ version.changelog_to_version_info_collection.return_value = (
+ self.fake_versions_json.encode(self.file_encoding))
+
+ def test_returns_empty_collection_on_read_error(
+ self,
+ mock_func_get_latest_version):
+ """ Should return empty collection on error reading changelog. """
+ test_error = PermissionError("Not for you")
+ version.changelog_to_version_info_collection.side_effect = test_error
+ result = version.generate_version_info_from_changelog(
+ self.fake_changelog_file_path)
+ expected_result = {}
+ self.assertDictEqual(expected_result, result)
+
+ def test_opens_file_with_expected_encoding(
+ self,
+ mock_func_get_latest_version):
+ """ Should open changelog file in text mode with expected encoding. """
+ result = version.generate_version_info_from_changelog(
+ self.fake_changelog_file_path)
+ expected_file_path = self.fake_changelog_file_path
+ expected_open_mode = 'rt'
+ expected_encoding = self.file_encoding
+ (open_args_positional, open_args_kwargs) = io.open.call_args
+ (open_args_filespec, open_args_mode) = open_args_positional[:2]
+ open_args_encoding = open_args_kwargs['encoding']
+ self.assertEqual(expected_file_path, open_args_filespec)
+ self.assertEqual(expected_open_mode, open_args_mode)
+ self.assertEqual(expected_encoding, open_args_encoding)
+
+ def test_returns_expected_result(
+ self,
+ mock_func_get_latest_version):
+ """ Should return expected result. """
+ result = version.generate_version_info_from_changelog(
+ self.fake_changelog_file_path)
+ self.assertEqual(self.expected_result, result)
+
+
+DefaultNoneDict = functools.partial(collections.defaultdict, lambda: None)
+
+class get_latest_version_TestCase(
+ testscenarios.WithScenarios, testtools.TestCase):
+ """ Test cases for ‘get_latest_version’ function. """
+
+ scenarios = [
+ ('simple', {
+ 'test_versions': [
+ DefaultNoneDict({'release_date': "LATEST"}),
+ ],
+ 'expected_result': version.ChangeLogEntry.make_ordered_dict(
+ DefaultNoneDict({'release_date': "LATEST"})),
+ }),
+ ('no versions', {
+ 'test_versions': [],
+ 'expected_result': collections.OrderedDict(),
+ }),
+ ('ordered versions', {
+ 'test_versions': [
+ DefaultNoneDict({'release_date': "1"}),
+ DefaultNoneDict({'release_date': "2"}),
+ DefaultNoneDict({'release_date': "LATEST"}),
+ ],
+ 'expected_result': version.ChangeLogEntry.make_ordered_dict(
+ DefaultNoneDict({'release_date': "LATEST"})),
+ }),
+ ('un-ordered versions', {
+ 'test_versions': [
+ DefaultNoneDict({'release_date': "2"}),
+ DefaultNoneDict({'release_date': "LATEST"}),
+ DefaultNoneDict({'release_date': "1"}),
+ ],
+ 'expected_result': version.ChangeLogEntry.make_ordered_dict(
+ DefaultNoneDict({'release_date': "LATEST"})),
+ }),
+ ]
+
+ def test_returns_expected_result(self):
+ """ Should return expected result. """
+ result = version.get_latest_version(self.test_versions)
+ self.assertDictEqual(self.expected_result, result)
+
+
+@mock.patch.object(json, "dumps", side_effect=json.dumps)
+class serialise_version_info_from_mapping_TestCase(
+ testscenarios.WithScenarios, testtools.TestCase):
+ """ Test cases for ‘get_latest_version’ function. """
+
+ scenarios = [
+ ('simple', {
+ 'test_version_info': {'foo': "spam"},
+ }),
+ ]
+
+ for (name, scenario) in scenarios:
+ scenario['fake_json_dump'] = json.dumps(scenario['test_version_info'])
+ scenario['expected_value'] = scenario['test_version_info']
+
+ def test_passes_specified_object(self, mock_func_json_dumps):
+ """ Should pass the specified object to `json.dumps`. """
+ result = version.serialise_version_info_from_mapping(
+ self.test_version_info)
+ mock_func_json_dumps.assert_called_with(
+ self.test_version_info, indent=mock.ANY)
+
+ def test_returns_expected_result(self, mock_func_json_dumps):
+ """ Should return expected result. """
+ mock_func_json_dumps.return_value = self.fake_json_dump
+ result = version.serialise_version_info_from_mapping(
+ self.test_version_info)
+ value = json.loads(result)
+ self.assertEqual(self.expected_value, value)
+
+
+DistributionMetadata_defaults = {
+ name: None
+ for name in list(collections.OrderedDict.fromkeys(
+ distutils.dist.DistributionMetadata._METHOD_BASENAMES))}
+FakeDistributionMetadata = collections.namedtuple(
+ 'FakeDistributionMetadata', DistributionMetadata_defaults.keys())
+
+Distribution_defaults = {
+ 'metadata': None,
+ 'version': None,
+ 'release_date': None,
+ 'maintainer': None,
+ 'maintainer_email': None,
+ }
+FakeDistribution = collections.namedtuple(
+ 'FakeDistribution', Distribution_defaults.keys())
+
+def make_fake_distribution(
+ fields_override=None, metadata_fields_override=None):
+ metadata_fields = DistributionMetadata_defaults.copy()
+ if metadata_fields_override is not None:
+ metadata_fields.update(metadata_fields_override)
+ metadata = FakeDistributionMetadata(**metadata_fields)
+
+ fields = Distribution_defaults.copy()
+ fields['metadata'] = metadata
+ if fields_override is not None:
+ fields.update(fields_override)
+ distribution = FakeDistribution(**fields)
+
+ return distribution
+
+
+class get_changelog_path_TestCase(
+ testscenarios.WithScenarios, testtools.TestCase):
+ """ Test cases for ‘get_changelog_path’ function. """
+
+ default_path = "."
+ default_script_filename = "setup.py"
+
+ scenarios = [
+ ('simple', {}),
+ ('unusual script name', {
+ 'script_filename': "lorem_ipsum",
+ }),
+ ('relative script path', {
+ 'script_directory': "dolor/sit/amet",
+ }),
+ ('absolute script path', {
+ 'script_directory': "/dolor/sit/amet",
+ }),
+ ('specify filename', {
+ 'changelog_filename': "adipiscing",
+ }),
+ ]
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(get_changelog_path_TestCase, self).setUp()
+
+ self.test_distribution = mock.MagicMock(distutils.dist.Distribution)
+
+ if not hasattr(self, 'script_directory'):
+ self.script_directory = self.default_path
+ if not hasattr(self, 'script_filename'):
+ self.script_filename = self.default_script_filename
+ self.test_distribution.script_name = os.path.join(
+ self.script_directory, self.script_filename)
+
+ changelog_filename = version.changelog_filename
+ if hasattr(self, 'changelog_filename'):
+ changelog_filename = self.changelog_filename
+
+ self.expected_result = os.path.join(
+ self.script_directory, changelog_filename)
+
+ def test_returns_expected_result(self):
+ """ Should return expected result. """
+ args = {
+ 'distribution': self.test_distribution,
+ }
+ if hasattr(self, 'changelog_filename'):
+ args.update({'filename': self.changelog_filename})
+ result = version.get_changelog_path(**args)
+ self.assertEqual(self.expected_result, result)
+
+
+class WriteVersionInfoCommand_BaseTestCase(
+ testscenarios.WithScenarios, testtools.TestCase):
+ """ Base class for ‘WriteVersionInfoCommand’ test case classes. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(WriteVersionInfoCommand_BaseTestCase, self).setUp()
+
+ fake_distribution_name = self.getUniqueString()
+
+ self.test_distribution = distutils.dist.Distribution()
+ self.test_distribution.metadata.name = fake_distribution_name
+
+
+class WriteVersionInfoCommand_TestCase(WriteVersionInfoCommand_BaseTestCase):
+ """ Test cases for ‘WriteVersionInfoCommand’ class. """
+
+ def test_subclass_of_distutils_command(self):
+ """ Should be a subclass of ‘distutils.cmd.Command’. """
+ instance = version.WriteVersionInfoCommand(self.test_distribution)
+ self.assertIsInstance(instance, distutils.cmd.Command)
+
+
+class WriteVersionInfoCommand_user_options_TestCase(
+ WriteVersionInfoCommand_BaseTestCase):
+ """ Test cases for ‘WriteVersionInfoCommand.user_options’ attribute. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(WriteVersionInfoCommand_user_options_TestCase, self).setUp()
+
+ self.test_instance = version.WriteVersionInfoCommand(
+ self.test_distribution)
+ self.commandline_parser = distutils.fancy_getopt.FancyGetopt(
+ self.test_instance.user_options)
+
+ def test_parses_correctly_as_fancy_getopt(self):
+ """ Should parse correctly in ‘FancyGetopt’. """
+ self.assertIsInstance(
+ self.commandline_parser, distutils.fancy_getopt.FancyGetopt)
+
+ def test_includes_base_class_user_options(self):
+ """ Should include base class's user_options. """
+ base_command = setuptools.command.egg_info.egg_info
+ expected_user_options = base_command.user_options
+ self.assertThat(
+ set(expected_user_options),
+ IsSubset(set(self.test_instance.user_options)))
+
+ def test_has_option_changelog_path(self):
+ """ Should have a ‘changelog-path’ option. """
+ expected_option_name = "changelog-path="
+ result = self.commandline_parser.has_option(expected_option_name)
+ self.assertTrue(result)
+
+ def test_has_option_outfile_path(self):
+ """ Should have a ‘outfile-path’ option. """
+ expected_option_name = "outfile-path="
+ result = self.commandline_parser.has_option(expected_option_name)
+ self.assertTrue(result)
+
+
+class WriteVersionInfoCommand_initialize_options_TestCase(
+ WriteVersionInfoCommand_BaseTestCase):
+ """ Test cases for ‘WriteVersionInfoCommand.initialize_options’ method. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(
+ WriteVersionInfoCommand_initialize_options_TestCase, self
+ ).setUp()
+
+ patcher_func_egg_info_initialize_options = mock.patch.object(
+ setuptools.command.egg_info.egg_info, "initialize_options")
+ patcher_func_egg_info_initialize_options.start()
+ self.addCleanup(patcher_func_egg_info_initialize_options.stop)
+
+ def test_calls_base_class_method(self):
+ """ Should call base class's ‘initialize_options’ method. """
+ instance = version.WriteVersionInfoCommand(self.test_distribution)
+ base_command_class = setuptools.command.egg_info.egg_info
+ base_command_class.initialize_options.assert_called_with()
+
+ def test_sets_changelog_path_to_none(self):
+ """ Should set ‘changelog_path’ attribute to ``None``. """
+ instance = version.WriteVersionInfoCommand(self.test_distribution)
+ self.assertIs(instance.changelog_path, None)
+
+ def test_sets_outfile_path_to_none(self):
+ """ Should set ‘outfile_path’ attribute to ``None``. """
+ instance = version.WriteVersionInfoCommand(self.test_distribution)
+ self.assertIs(instance.outfile_path, None)
+
+
+class WriteVersionInfoCommand_finalize_options_TestCase(
+ WriteVersionInfoCommand_BaseTestCase):
+ """ Test cases for ‘WriteVersionInfoCommand.finalize_options’ method. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(WriteVersionInfoCommand_finalize_options_TestCase, self).setUp()
+
+ self.test_instance = version.WriteVersionInfoCommand(self.test_distribution)
+
+ patcher_func_egg_info_finalize_options = mock.patch.object(
+ setuptools.command.egg_info.egg_info, "finalize_options")
+ patcher_func_egg_info_finalize_options.start()
+ self.addCleanup(patcher_func_egg_info_finalize_options.stop)
+
+ self.fake_script_dir = self.getUniqueString()
+ self.test_distribution.script_name = os.path.join(
+ self.fake_script_dir, self.getUniqueString())
+
+ self.fake_egg_dir = self.getUniqueString()
+ self.test_instance.egg_info = self.fake_egg_dir
+
+ patcher_func_get_changelog_path = mock.patch.object(
+ version, "get_changelog_path")
+ patcher_func_get_changelog_path.start()
+ self.addCleanup(patcher_func_get_changelog_path.stop)
+
+ self.fake_changelog_path = self.getUniqueString()
+ version.get_changelog_path.return_value = self.fake_changelog_path
+
+ def test_calls_base_class_method(self):
+ """ Should call base class's ‘finalize_options’ method. """
+ base_command_class = setuptools.command.egg_info.egg_info
+ self.test_instance.finalize_options()
+ base_command_class.finalize_options.assert_called_with()
+
+ def test_sets_force_to_none(self):
+ """ Should set ‘force’ attribute to ``None``. """
+ self.test_instance.finalize_options()
+ self.assertIs(self.test_instance.force, None)
+
+ def test_sets_changelog_path_using_get_changelog_path(self):
+ """ Should set ‘changelog_path’ attribute if it was ``None``. """
+ self.test_instance.changelog_path = None
+ self.test_instance.finalize_options()
+ expected_changelog_path = self.fake_changelog_path
+ self.assertEqual(expected_changelog_path, self.test_instance.changelog_path)
+
+ def test_leaves_changelog_path_if_already_set(self):
+ """ Should leave ‘changelog_path’ attribute set. """
+ prior_changelog_path = self.getUniqueString()
+ self.test_instance.changelog_path = prior_changelog_path
+ self.test_instance.finalize_options()
+ expected_changelog_path = prior_changelog_path
+ self.assertEqual(expected_changelog_path, self.test_instance.changelog_path)
+
+ def test_sets_outfile_path_to_default(self):
+ """ Should set ‘outfile_path’ attribute to default value. """
+ fake_version_info_filename = self.getUniqueString()
+ with mock.patch.object(
+ version, "version_info_filename",
+ new=fake_version_info_filename):
+ self.test_instance.finalize_options()
+ expected_outfile_path = os.path.join(
+ self.fake_egg_dir, fake_version_info_filename)
+ self.assertEqual(expected_outfile_path, self.test_instance.outfile_path)
+
+ def test_leaves_outfile_path_if_already_set(self):
+ """ Should leave ‘outfile_path’ attribute set. """
+ prior_outfile_path = self.getUniqueString()
+ self.test_instance.outfile_path = prior_outfile_path
+ self.test_instance.finalize_options()
+ expected_outfile_path = prior_outfile_path
+ self.assertEqual(expected_outfile_path, self.test_instance.outfile_path)
+
+
+class has_changelog_TestCase(
+ testscenarios.WithScenarios, testtools.TestCase):
+ """ Test cases for ‘has_changelog’ function. """
+
+ fake_os_path_exists_side_effects = {
+ 'true': (lambda path: True),
+ 'false': (lambda path: False),
+ }
+
+ scenarios = [
+ ('no changelog path', {
+ 'changelog_path': None,
+ 'expected_result': False,
+ }),
+ ('changelog exists', {
+ 'os_path_exists_scenario': 'true',
+ 'expected_result': True,
+ }),
+ ('changelog not found', {
+ 'os_path_exists_scenario': 'false',
+ 'expected_result': False,
+ }),
+ ]
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(has_changelog_TestCase, self).setUp()
+
+ self.test_distribution = distutils.dist.Distribution()
+ self.test_command = version.EggInfoCommand(
+ self.test_distribution)
+
+ patcher_func_get_changelog_path = mock.patch.object(
+ version, "get_changelog_path")
+ patcher_func_get_changelog_path.start()
+ self.addCleanup(patcher_func_get_changelog_path.stop)
+
+ self.fake_changelog_file_path = self.getUniqueString()
+ if hasattr(self, 'changelog_path'):
+ self.fake_changelog_file_path = self.changelog_path
+ version.get_changelog_path.return_value = self.fake_changelog_file_path
+ self.fake_changelog_file = StringIO()
+
+ def fake_os_path_exists(path):
+ if path == self.fake_changelog_file_path:
+ side_effect = self.fake_os_path_exists_side_effects[
+ self.os_path_exists_scenario]
+ if callable(side_effect):
+ result = side_effect(path)
+ else:
+ raise side_effect
+ else:
+ result = False
+ return result
+
+ func_patcher_os_path_exists = mock.patch.object(
+ os.path, "exists")
+ func_patcher_os_path_exists.start()
+ self.addCleanup(func_patcher_os_path_exists.stop)
+ os.path.exists.side_effect = fake_os_path_exists
+
+ def test_gets_changelog_path_from_distribution(self):
+ """ Should call ‘get_changelog_path’ with distribution. """
+ result = version.has_changelog(self.test_command)
+ version.get_changelog_path.assert_called_with(
+ self.test_distribution)
+
+ def test_returns_expected_result(self):
+ """ Should be a subclass of ‘distutils.cmd.Command’. """
+ result = version.has_changelog(self.test_command)
+ self.assertEqual(self.expected_result, result)
+
+
+@mock.patch.object(version, 'generate_version_info_from_changelog')
+@mock.patch.object(version, 'serialise_version_info_from_mapping')
+@mock.patch.object(version.EggInfoCommand, "write_file")
+class WriteVersionInfoCommand_run_TestCase(
+ WriteVersionInfoCommand_BaseTestCase):
+ """ Test cases for ‘WriteVersionInfoCommand.run’ method. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(WriteVersionInfoCommand_run_TestCase, self).setUp()
+
+ self.test_instance = version.WriteVersionInfoCommand(
+ self.test_distribution)
+
+ self.fake_changelog_path = self.getUniqueString()
+ self.test_instance.changelog_path = self.fake_changelog_path
+
+ self.fake_outfile_path = self.getUniqueString()
+ self.test_instance.outfile_path = self.fake_outfile_path
+
+ def test_returns_none(
+ self,
+ mock_func_egg_info_write_file,
+ mock_func_serialise_version_info,
+ mock_func_generate_version_info):
+ """ Should return ``None``. """
+ result = self.test_instance.run()
+ self.assertIs(result, None)
+
+ def test_generates_version_info_from_changelog(
+ self,
+ mock_func_egg_info_write_file,
+ mock_func_serialise_version_info,
+ mock_func_generate_version_info):
+ """ Should generate version info from specified changelog. """
+ self.test_instance.run()
+ expected_changelog_path = self.test_instance.changelog_path
+ mock_func_generate_version_info.assert_called_with(
+ expected_changelog_path)
+
+ def test_serialises_version_info_from_mapping(
+ self,
+ mock_func_egg_info_write_file,
+ mock_func_serialise_version_info,
+ mock_func_generate_version_info):
+ """ Should serialise version info from specified mapping. """
+ self.test_instance.run()
+ expected_version_info = mock_func_generate_version_info.return_value
+ mock_func_serialise_version_info.assert_called_with(
+ expected_version_info)
+
+ def test_writes_file_using_command_context(
+ self,
+ mock_func_egg_info_write_file,
+ mock_func_serialise_version_info,
+ mock_func_generate_version_info):
+ """ Should write the metadata file using the command context. """
+ self.test_instance.run()
+ expected_content = mock_func_serialise_version_info.return_value
+ mock_func_egg_info_write_file.assert_called_with(
+ "version info", self.fake_outfile_path, expected_content)
+
+
+IsSubset = testtools.matchers.MatchesPredicateWithParams(
+ set.issubset, "{0} should be a subset of {1}")
+
+class EggInfoCommand_TestCase(testtools.TestCase):
+ """ Test cases for ‘EggInfoCommand’ class. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(EggInfoCommand_TestCase, self).setUp()
+
+ self.test_distribution = distutils.dist.Distribution()
+ self.test_instance = version.EggInfoCommand(self.test_distribution)
+
+ def test_subclass_of_setuptools_egg_info(self):
+ """ Should be a subclass of Setuptools ‘egg_info’. """
+ self.assertIsInstance(
+ self.test_instance, setuptools.command.egg_info.egg_info)
+
+ def test_sub_commands_include_base_class_sub_commands(self):
+ """ Should include base class's sub-commands in this sub_commands. """
+ base_command = setuptools.command.egg_info.egg_info
+ expected_sub_commands = base_command.sub_commands
+ self.assertThat(
+ set(expected_sub_commands),
+ IsSubset(set(self.test_instance.sub_commands)))
+
+ def test_sub_commands_includes_write_version_info_command(self):
+ """ Should include sub-command named ‘write_version_info’. """
+ commands_by_name = dict(self.test_instance.sub_commands)
+ expected_predicate = version.has_changelog
+ expected_item = ('write_version_info', expected_predicate)
+ self.assertIn(expected_item, commands_by_name.items())
+
+
+@mock.patch.object(setuptools.command.egg_info.egg_info, "run")
+class EggInfoCommand_run_TestCase(testtools.TestCase):
+ """ Test cases for ‘EggInfoCommand.run’ method. """
+
+ def setUp(self):
+ """ Set up test fixtures. """
+ super(EggInfoCommand_run_TestCase, self).setUp()
+
+ self.test_distribution = distutils.dist.Distribution()
+ self.test_instance = version.EggInfoCommand(self.test_distribution)
+
+ base_command = setuptools.command.egg_info.egg_info
+ patcher_func_egg_info_get_sub_commands = mock.patch.object(
+ base_command, "get_sub_commands")
+ patcher_func_egg_info_get_sub_commands.start()
+ self.addCleanup(patcher_func_egg_info_get_sub_commands.stop)
+
+ patcher_func_egg_info_run_command = mock.patch.object(
+ base_command, "run_command")
+ patcher_func_egg_info_run_command.start()
+ self.addCleanup(patcher_func_egg_info_run_command.stop)
+
+ self.fake_sub_commands = ["spam", "eggs", "beans"]
+ base_command.get_sub_commands.return_value = self.fake_sub_commands
+
+ def test_returns_none(self, mock_func_egg_info_run):
+ """ Should return ``None``. """
+ result = self.test_instance.run()
+ self.assertIs(result, None)
+
+ def test_runs_each_command_in_sub_commands(
+ self, mock_func_egg_info_run):
+ """ Should run each command in ‘self.get_sub_commands()’. """
+ base_command = setuptools.command.egg_info.egg_info
+ self.test_instance.run()
+ expected_calls = [mock.call(name) for name in self.fake_sub_commands]
+ base_command.run_command.assert_has_calls(expected_calls)
+
+ def test_calls_base_class_run(self, mock_func_egg_info_run):
+ """ Should call base class's ‘run’ method. """
+ result = self.test_instance.run()
+ mock_func_egg_info_run.assert_called_with()
+
+
+# Local variables:
+# coding: utf-8
+# mode: python
+# End:
+# vim: fileencoding=utf-8 filetype=python :
diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/version.py b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/version.py
new file mode 100755
index 00000000..7e4c4202
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/version.py
@@ -0,0 +1,547 @@
+# -*- coding: utf-8 -*-
+
+# version.py
+# Part of ‘python-daemon’, an implementation of PEP 3143.
+#
+# Copyright © 2008–2015 Ben Finney <ben+python@benfinney.id.au>
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; version 3 of that license or any later version.
+# No warranty expressed or implied. See the file ‘LICENSE.GPL-3’ for details.
+
+""" Version information unified for human- and machine-readable formats.
+
+ The project ‘ChangeLog’ file is a reStructuredText document, with
+ each section describing a version of the project. The document is
+ intended to be readable as-is by end users.
+
+ This module handles transformation from the ‘ChangeLog’ to a
+ mapping of version information, serialised as JSON. It also
+ provides functionality for Distutils to use this information.
+
+ Requires:
+
+ * Docutils <http://docutils.sourceforge.net/>
+ * JSON <https://docs.python.org/3/reference/json.html>
+
+ """
+
+from __future__ import (absolute_import, unicode_literals)
+
+import sys
+import os
+import io
+import errno
+import json
+import datetime
+import textwrap
+import re
+import functools
+import collections
+import distutils
+import distutils.errors
+import distutils.cmd
+try:
+ # Python 2 has both ‘str’ (bytes) and ‘unicode’ (text).
+ basestring = basestring
+ unicode = unicode
+except NameError:
+ # Python 3 names the Unicode data type ‘str’.
+ basestring = str
+ unicode = str
+
+import setuptools
+import setuptools.command.egg_info
+
+
+def ensure_class_bases_begin_with(namespace, class_name, base_class):
+ """ Ensure the named class's bases start with the base class.
+
+ :param namespace: The namespace containing the class name.
+ :param class_name: The name of the class to alter.
+ :param base_class: The type to be the first base class for the
+ newly created type.
+ :return: ``None``.
+
+ This function is a hack to circumvent a circular dependency:
+ using classes from a module which is not installed at the time
+ this module is imported.
+
+ Call this function after ensuring `base_class` is available,
+ before using the class named by `class_name`.
+
+ """
+ existing_class = namespace[class_name]
+ assert isinstance(existing_class, type)
+
+ bases = list(existing_class.__bases__)
+ if base_class is bases[0]:
+ # Already bound to a type with the right bases.
+ return
+ bases.insert(0, base_class)
+
+ new_class_namespace = existing_class.__dict__.copy()
+ # Type creation will assign the correct ‘__dict__’ attribute.
+ del new_class_namespace['__dict__']
+
+ metaclass = existing_class.__metaclass__
+ new_class = metaclass(class_name, tuple(bases), new_class_namespace)
+
+ namespace[class_name] = new_class
+
+
+class VersionInfoWriter(object):
+ """ Docutils writer to produce a version info JSON data stream. """
+
+ # This class needs its base class to be a class from `docutils`.
+ # But that would create a circular dependency: Setuptools cannot
+ # ensure `docutils` is available before importing this module.
+ #
+ # Use `ensure_class_bases_begin_with` after importing `docutils`, to
+ # re-bind the `VersionInfoWriter` name to a new type that inherits
+ # from `docutils.writers.Writer`.
+
+ __metaclass__ = type
+
+ supported = ['version_info']
+ """ Formats this writer supports. """
+
+ def __init__(self):
+ super(VersionInfoWriter, self).__init__()
+ self.translator_class = VersionInfoTranslator
+
+ def translate(self):
+ visitor = self.translator_class(self.document)
+ self.document.walkabout(visitor)
+ self.output = visitor.astext()
+
+
+rfc822_person_regex = re.compile(
+ "^(?P<name>[^<]+) <(?P<email>[^>]+)>$")
+
+class ChangeLogEntry:
+ """ An individual entry from the ‘ChangeLog’ document. """
+
+ __metaclass__ = type
+
+ field_names = [
+ 'release_date',
+ 'version',
+ 'maintainer',
+ 'body',
+ ]
+
+ date_format = "%Y-%m-%d"
+ default_version = "UNKNOWN"
+ default_release_date = "UNKNOWN"
+
+ def __init__(
+ self,
+ release_date=default_release_date, version=default_version,
+ maintainer=None, body=None):
+ self.validate_release_date(release_date)
+ self.release_date = release_date
+
+ self.version = version
+
+ self.validate_maintainer(maintainer)
+ self.maintainer = maintainer
+ self.body = body
+
+ @classmethod
+ def validate_release_date(cls, value):
+ """ Validate the `release_date` value.
+
+ :param value: The prospective `release_date` value.
+ :return: ``None`` if the value is valid.
+ :raises ValueError: If the value is invalid.
+
+ """
+ if value in ["UNKNOWN", "FUTURE"]:
+ # A valid non-date value.
+ return None
+
+ # Raises `ValueError` if parse fails.
+ datetime.datetime.strptime(value, ChangeLogEntry.date_format)
+
+ @classmethod
+ def validate_maintainer(cls, value):
+ """ Validate the `maintainer` value.
+
+ :param value: The prospective `maintainer` value.
+ :return: ``None`` if the value is valid.
+ :raises ValueError: If the value is invalid.
+
+ """
+ valid = False
+
+ if value is None:
+ valid = True
+ elif rfc822_person_regex.search(value):
+ valid = True
+
+ if not valid:
+ raise ValueError("Not a valid person specification {value!r}")
+ else:
+ return None
+
+ @classmethod
+ def make_ordered_dict(cls, fields):
+ """ Make an ordered dict of the fields. """
+ result = collections.OrderedDict(
+ (name, fields[name])
+ for name in cls.field_names)
+ return result
+
+ def as_version_info_entry(self):
+ """ Format the changelog entry as a version info entry. """
+ fields = vars(self)
+ entry = self.make_ordered_dict(fields)
+
+ return entry
+
+
+class InvalidFormatError(ValueError):
+ """ Raised when the document is not a valid ‘ChangeLog’ document. """
+
+
+class VersionInfoTranslator(object):
+ """ Translator from document nodes to a version info stream. """
+
+ # This class needs its base class to be a class from `docutils`.
+ # But that would create a circular dependency: Setuptools cannot
+ # ensure `docutils` is available before importing this module.
+ #
+ # Use `ensure_class_bases_begin_with` after importing `docutils`,
+ # to re-bind the `VersionInfoTranslator` name to a new type that
+ # inherits from `docutils.nodes.SparseNodeVisitor`.
+
+ __metaclass__ = type
+
+ wrap_width = 78
+ bullet_text = "* "
+
+ attr_convert_funcs_by_attr_name = {
+ 'released': ('release_date', unicode),
+ 'version': ('version', unicode),
+ 'maintainer': ('maintainer', unicode),
+ }
+
+ def __init__(self, document):
+ super(VersionInfoTranslator, self).__init__(document)
+ self.settings = document.settings
+ self.current_section_level = 0
+ self.current_field_name = None
+ self.content = []
+ self.indent_width = 0
+ self.initial_indent = ""
+ self.subsequent_indent = ""
+ self.current_entry = None
+
+ # Docutils is not available when this class is defined.
+ # Get the `docutils` module dynamically.
+ self._docutils = sys.modules['docutils']
+
+ def astext(self):
+ """ Return the translated document as text. """
+ text = json.dumps(self.content, indent=4)
+ return text
+
+ def append_to_current_entry(self, text):
+ if self.current_entry is not None:
+ if self.current_entry.body is not None:
+ self.current_entry.body += text
+
+ def visit_Text(self, node):
+ raw_text = node.astext()
+ text = textwrap.fill(
+ raw_text,
+ width=self.wrap_width,
+ initial_indent=self.initial_indent,
+ subsequent_indent=self.subsequent_indent)
+ self.append_to_current_entry(text)
+
+ def depart_Text(self, node):
+ pass
+
+ def visit_comment(self, node):
+ raise self._docutils.nodes.SkipNode
+
+ def visit_field_body(self, node):
+ field_list_node = node.parent.parent
+ if not isinstance(field_list_node, self._docutils.nodes.field_list):
+ raise InvalidFormatError(
+ "Unexpected field within {node!r}".format(
+ node=field_list_node))
+ (attr_name, convert_func) = self.attr_convert_funcs_by_attr_name[
+ self.current_field_name]
+ attr_value = convert_func(node.astext())
+ setattr(self.current_entry, attr_name, attr_value)
+
+ def depart_field_body(self, node):
+ pass
+
+ def visit_field_list(self, node):
+ pass
+
+ def depart_field_list(self, node):
+ self.current_field_name = None
+ self.current_entry.body = ""
+
+ def visit_field_name(self, node):
+ field_name = node.astext()
+ if self.current_section_level == 1:
+ # At a top-level section.
+ if field_name.lower() not in ["released", "maintainer"]:
+ raise InvalidFormatError(
+ "Unexpected field name {name!r}".format(name=field_name))
+ self.current_field_name = field_name.lower()
+
+ def depart_field_name(self, node):
+ pass
+
+ def visit_bullet_list(self, node):
+ self.current_context = []
+
+ def depart_bullet_list(self, node):
+ self.current_entry.changes = self.current_context
+ self.current_context = None
+
+ def adjust_indent_width(self, delta):
+ self.indent_width += delta
+ self.subsequent_indent = " " * self.indent_width
+ self.initial_indent = self.subsequent_indent
+
+ def visit_list_item(self, node):
+ indent_delta = +len(self.bullet_text)
+ self.adjust_indent_width(indent_delta)
+ self.initial_indent = self.subsequent_indent[:-indent_delta]
+ self.append_to_current_entry(self.initial_indent + self.bullet_text)
+
+ def depart_list_item(self, node):
+ indent_delta = +len(self.bullet_text)
+ self.adjust_indent_width(-indent_delta)
+ self.append_to_current_entry("\n")
+
+ def visit_section(self, node):
+ self.current_section_level += 1
+ if self.current_section_level == 1:
+ # At a top-level section.
+ self.current_entry = ChangeLogEntry()
+ else:
+ raise InvalidFormatError(
+ "Subsections not implemented for this writer")
+
+ def depart_section(self, node):
+ self.current_section_level -= 1
+ self.content.append(
+ self.current_entry.as_version_info_entry())
+ self.current_entry = None
+
+ _expected_title_word_length = len("Version FOO".split(" "))
+
+ def depart_title(self, node):
+ title_text = node.astext()
+ # At a top-level section.
+ words = title_text.split(" ")
+ version = None
+ if len(words) != self._expected_title_word_length:
+ raise InvalidFormatError(
+ "Unexpected title text {text!r}".format(text=title_text))
+ if words[0].lower() not in ["version"]:
+ raise InvalidFormatError(
+ "Unexpected title text {text!r}".format(text=title_text))
+ version = words[-1]
+ self.current_entry.version = version
+
+
+def changelog_to_version_info_collection(infile):
+ """ Render the ‘ChangeLog’ document to a version info collection.
+
+ :param infile: A file-like object containing the changelog.
+ :return: The serialised JSON data of the version info collection.
+
+ """
+
+ # Docutils is not available when Setuptools needs this module, so
+ # delay the imports to this function instead.
+ import docutils.core
+ import docutils.nodes
+ import docutils.writers
+
+ ensure_class_bases_begin_with(
+ globals(), str('VersionInfoWriter'), docutils.writers.Writer)
+ ensure_class_bases_begin_with(
+ globals(), str('VersionInfoTranslator'),
+ docutils.nodes.SparseNodeVisitor)
+
+ writer = VersionInfoWriter()
+ settings_overrides = {
+ 'doctitle_xform': False,
+ }
+ version_info_json = docutils.core.publish_string(
+ infile.read(), writer=writer,
+ settings_overrides=settings_overrides)
+
+ return version_info_json
+
+
+try:
+ lru_cache = functools.lru_cache
+except AttributeError:
+ # Python < 3.2 does not have the `functools.lru_cache` function.
+ # Not essential, so replace it with a no-op.
+ lru_cache = lambda maxsize=None, typed=False: lambda func: func
+
+
+@lru_cache(maxsize=128)
+def generate_version_info_from_changelog(infile_path):
+ """ Get the version info for the latest version in the changelog.
+
+ :param infile_path: Filesystem path to the input changelog file.
+ :return: The generated version info mapping; or ``None`` if the
+ file cannot be read.
+
+ The document is explicitly opened as UTF-8 encoded text.
+
+ """
+ version_info = collections.OrderedDict()
+
+ versions_all_json = None
+ try:
+ with io.open(infile_path, 'rt', encoding="utf-8") as infile:
+ versions_all_json = changelog_to_version_info_collection(infile)
+ except EnvironmentError:
+ # If we can't read the input file, leave the collection empty.
+ pass
+
+ if versions_all_json is not None:
+ versions_all = json.loads(versions_all_json.decode('utf-8'))
+ version_info = get_latest_version(versions_all)
+
+ return version_info
+
+
+def get_latest_version(versions):
+ """ Get the latest version from a collection of changelog entries.
+
+ :param versions: A collection of mappings for changelog entries.
+ :return: An ordered mapping of fields for the latest version,
+ if `versions` is non-empty; otherwise, an empty mapping.
+
+ """
+ version_info = collections.OrderedDict()
+
+ versions_by_release_date = {
+ item['release_date']: item
+ for item in versions}
+ if versions_by_release_date:
+ latest_release_date = max(versions_by_release_date.keys())
+ version_info = ChangeLogEntry.make_ordered_dict(
+ versions_by_release_date[latest_release_date])
+
+ return version_info
+
+
+def serialise_version_info_from_mapping(version_info):
+ """ Generate the version info serialised data.
+
+ :param version_info: Mapping of version info items.
+ :return: The version info serialised to JSON.
+
+ """
+ content = json.dumps(version_info, indent=4)
+
+ return content
+
+
+changelog_filename = "ChangeLog"
+
+def get_changelog_path(distribution, filename=changelog_filename):
+ """ Get the changelog file path for the distribution.
+
+ :param distribution: The distutils.dist.Distribution instance.
+ :param filename: The base filename of the changelog document.
+ :return: Filesystem path of the changelog document, or ``None``
+ if not discoverable.
+
+ """
+ setup_dirname = os.path.dirname(distribution.script_name)
+ filepath = os.path.join(setup_dirname, filename)
+
+ return filepath
+
+
+def has_changelog(command):
+ """ Return ``True`` iff the distribution's changelog file exists. """
+ result = False
+
+ changelog_path = get_changelog_path(command.distribution)
+ if changelog_path is not None:
+ if os.path.exists(changelog_path):
+ result = True
+
+ return result
+
+
+class EggInfoCommand(setuptools.command.egg_info.egg_info, object):
+ """ Custom ‘egg_info’ command for this distribution. """
+
+ sub_commands = ([
+ ('write_version_info', has_changelog),
+ ] + setuptools.command.egg_info.egg_info.sub_commands)
+
+ def run(self):
+ """ Execute this command. """
+ super(EggInfoCommand, self).run()
+
+ for command_name in self.get_sub_commands():
+ self.run_command(command_name)
+
+
+version_info_filename = "version_info.json"
+
+class WriteVersionInfoCommand(EggInfoCommand, object):
+ """ Setuptools command to serialise version info metadata. """
+
+ user_options = ([
+ ("changelog-path=", None,
+ "Filesystem path to the changelog document."),
+ ("outfile-path=", None,
+ "Filesystem path to the version info file."),
+ ] + EggInfoCommand.user_options)
+
+ def initialize_options(self):
+ """ Initialise command options to defaults. """
+ super(WriteVersionInfoCommand, self).initialize_options()
+ self.changelog_path = None
+ self.outfile_path = None
+
+ def finalize_options(self):
+ """ Finalise command options before execution. """
+ self.set_undefined_options(
+ 'build',
+ ('force', 'force'))
+
+ super(WriteVersionInfoCommand, self).finalize_options()
+
+ if self.changelog_path is None:
+ self.changelog_path = get_changelog_path(self.distribution)
+
+ if self.outfile_path is None:
+ egg_dir = self.egg_info
+ self.outfile_path = os.path.join(egg_dir, version_info_filename)
+
+ def run(self):
+ """ Execute this command. """
+ version_info = generate_version_info_from_changelog(self.changelog_path)
+ content = serialise_version_info_from_mapping(version_info)
+ self.write_file("version info", self.outfile_path, content)
+
+
+# Local variables:
+# coding: utf-8
+# mode: python
+# End:
+# vim: fileencoding=utf-8 filetype=python :
diff --git a/scripts/automation/trex_control_plane/python_lib/rednose-0.4.1/rednose.py b/scripts/automation/trex_control_plane/python_lib/rednose-0.4.1/rednose.py
new file mode 100755
index 00000000..1ff892ad
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/rednose-0.4.1/rednose.py
@@ -0,0 +1,387 @@
+# Copyright (c) 2009, Tim Cuthbertson # All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of the organisation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
+# WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import print_function
+import os
+import sys
+import linecache
+import re
+import time
+
+import nose
+
+import termstyle
+
+failure = 'FAILED'
+error = 'ERROR'
+success = 'passed'
+skip = 'skipped'
+line_length = 77
+
+PY3 = sys.version_info[0] >= 3
+if PY3:
+ to_unicode = str
+else:
+ def to_unicode(s):
+ try:
+ return unicode(s)
+ except UnicodeDecodeError:
+ return unicode(repr(str(s)))
+
+BLACKLISTED_WRITERS = [
+ 'nose[\\/]result\\.pyc?$',
+ 'unittest[\\/]runner\\.pyc?$'
+]
+REDNOSE_DEBUG = False
+
+
+class RedNose(nose.plugins.Plugin):
+ env_opt = 'NOSE_REDNOSE'
+ env_opt_color = 'NOSE_REDNOSE_COLOR'
+ score = 199 # just under the `coverage` module
+
+ def __init__(self, *args):
+ super(RedNose, self).__init__(*args)
+ self.reports = []
+ self.error = self.success = self.failure = self.skip = 0
+ self.total = 0
+ self.stream = None
+ self.verbose = False
+ self.enabled = False
+ self.tree = False
+
+ def options(self, parser, env=os.environ):
+ global REDNOSE_DEBUG
+ rednose_on = bool(env.get(self.env_opt, False))
+ rednose_color = env.get(self.env_opt_color, 'auto')
+ REDNOSE_DEBUG = bool(env.get('REDNOSE_DEBUG', False))
+
+ parser.add_option(
+ "--rednose",
+ action="store_true",
+ default=rednose_on,
+ dest="rednose",
+ help="enable colour output (alternatively, set $%s=1)" % (self.env_opt,)
+ )
+ parser.add_option(
+ "--no-color",
+ action="store_false",
+ dest="rednose",
+ help="disable colour output"
+ )
+ parser.add_option(
+ "--force-color",
+ action="store_const",
+ dest='rednose_color',
+ default=rednose_color,
+ const='force',
+ help="force colour output when not using a TTY (alternatively, set $%s=force)" % (self.env_opt_color,)
+ )
+ parser.add_option(
+ "--immediate",
+ action="store_true",
+ default=False,
+ help="print errors and failures as they happen, as well as at the end"
+ )
+
+ def configure(self, options, conf):
+ if options.rednose:
+ self.enabled = True
+ termstyle_init = {
+ 'force': termstyle.enable,
+ 'off': termstyle.disable
+ }.get(options.rednose_color, termstyle.auto)
+ termstyle_init()
+
+ self.immediate = options.immediate
+ self.verbose = options.verbosity >= 2
+
+ def begin(self):
+ self.start_time = time.time()
+ self._in_test = False
+
+ def _format_test_name(self, test):
+ return test.shortDescription() or to_unicode(test)
+
+ def prepareTestResult(self, result):
+ result.stream = FilteringStream(self.stream, BLACKLISTED_WRITERS)
+
+ def beforeTest(self, test):
+ self._in_test = True
+ if self.verbose:
+ self._out(self._format_test_name(test) + ' ... ')
+
+ def afterTest(self, test):
+ if self._in_test:
+ self.addSkip()
+
+ def _print_test(self, type_, color):
+ self.total += 1
+ if self.verbose:
+ self._outln(color(type_))
+ else:
+ if type_ == failure:
+ short_ = 'F'
+ elif type_ == error:
+ short_ = 'X'
+ elif type_ == skip:
+ short_ = '-'
+ else:
+ short_ = '.'
+ self._out(color(short_))
+ if self.total % line_length == 0:
+ self._outln()
+ self._in_test = False
+
+ def _add_report(self, report):
+ failure_type, test, err = report
+ self.reports.append(report)
+ if self.immediate:
+ self._outln()
+ self._report_test(len(self.reports), *report)
+
+ def addFailure(self, test, err):
+ self.failure += 1
+ self._add_report((failure, test, err))
+ self._print_test(failure, termstyle.red)
+
+ def addError(self, test, err):
+ if err[0].__name__ == 'SkipTest':
+ self.addSkip(test, err)
+ return
+ self.error += 1
+ self._add_report((error, test, err))
+ self._print_test(error, termstyle.yellow)
+
+ def addSuccess(self, test):
+ self.success += 1
+ self._print_test(success, termstyle.green)
+
+ def addSkip(self, test=None, err=None):
+ self.skip += 1
+ self._print_test(skip, termstyle.blue)
+
+ def setOutputStream(self, stream):
+ self.stream = stream
+
+ def report(self, stream):
+ """report on all registered failures and errors"""
+ self._outln()
+ if self.immediate:
+ for x in range(0, 5):
+ self._outln()
+ report_num = 0
+ if len(self.reports) > 0:
+ for report_num, report in enumerate(self.reports):
+ self._report_test(report_num + 1, *report)
+ self._outln()
+
+ self._summarize()
+
+ def _summarize(self):
+ """summarize all tests - the number of failures, errors and successes"""
+ self._line(termstyle.black)
+ self._out("%s test%s run in %0.1f seconds" % (
+ self.total,
+ self._plural(self.total),
+ time.time() - self.start_time))
+ if self.total > self.success:
+ self._outln(". ")
+ additionals = []
+ if self.failure > 0:
+ additionals.append(termstyle.red("%s FAILED" % (
+ self.failure,)))
+ if self.error > 0:
+ additionals.append(termstyle.yellow("%s error%s" % (
+ self.error,
+ self._plural(self.error) )))
+ if self.skip > 0:
+ additionals.append(termstyle.blue("%s skipped" % (
+ self.skip)))
+ self._out(', '.join(additionals))
+
+ self._out(termstyle.green(" (%s test%s passed)" % (
+ self.success,
+ self._plural(self.success) )))
+ self._outln()
+
+ def _report_test(self, report_num, type_, test, err):
+ """report the results of a single (failing or errored) test"""
+ self._line(termstyle.black)
+ self._out("%s) " % (report_num))
+ if type_ == failure:
+ color = termstyle.red
+ self._outln(color('FAIL: %s' % (self._format_test_name(test),)))
+ else:
+ color = termstyle.yellow
+ self._outln(color('ERROR: %s' % (self._format_test_name(test),)))
+
+ exc_type, exc_instance, exc_trace = err
+
+ self._outln()
+ self._outln(self._fmt_traceback(exc_trace))
+ self._out(color(' ', termstyle.bold(color(exc_type.__name__)), ": "))
+ self._outln(self._fmt_message(exc_instance, color))
+ self._outln()
+
+ def _relative_path(self, path):
+ """
+ If path is a child of the current working directory, the relative
+ path is returned surrounded by bold xterm escape sequences.
+ If path is not a child of the working directory, path is returned
+ """
+ try:
+ here = os.path.abspath(os.path.realpath(os.getcwd()))
+ fullpath = os.path.abspath(os.path.realpath(path))
+ except OSError:
+ return path
+ if fullpath.startswith(here):
+ return termstyle.bold(fullpath[len(here)+1:])
+ return path
+
+ def _file_line(self, tb):
+ """formats the file / lineno / function line of a traceback element"""
+ prefix = "file://"
+ prefix = ""
+
+ f = tb.tb_frame
+ if '__unittest' in f.f_globals:
+ # this is the magical flag that prevents unittest internal
+ # code from junking up the stacktrace
+ return None
+
+ filename = f.f_code.co_filename
+ lineno = tb.tb_lineno
+ linecache.checkcache(filename)
+ function_name = f.f_code.co_name
+
+ line_contents = linecache.getline(filename, lineno, f.f_globals).strip()
+
+ return " %s line %s in %s\n %s" % (
+ termstyle.blue(prefix, self._relative_path(filename)),
+ lineno,
+ termstyle.cyan(function_name),
+ line_contents)
+
+ def _fmt_traceback(self, trace):
+ """format a traceback"""
+ ret = []
+ ret.append(termstyle.default(" Traceback (most recent call last):"))
+ current_trace = trace
+ while current_trace is not None:
+ line = self._file_line(current_trace)
+ if line is not None:
+ ret.append(line)
+ current_trace = current_trace.tb_next
+ return '\n'.join(ret)
+
+ def _fmt_message(self, exception, color):
+ orig_message_lines = to_unicode(exception).splitlines()
+
+ if len(orig_message_lines) == 0:
+ return ''
+ message_lines = [color(orig_message_lines[0])]
+ for line in orig_message_lines[1:]:
+ match = re.match('^---.* begin captured stdout.*----$', line)
+ if match:
+ color = None
+ message_lines.append('')
+ line = ' ' + line
+ message_lines.append(color(line) if color is not None else line)
+ return '\n'.join(message_lines)
+
+ def _out(self, msg='', newline=False):
+ self.stream.write(msg)
+ if newline:
+ self.stream.write('\n')
+
+ def _outln(self, msg=''):
+ self._out(msg, True)
+
+ def _plural(self, num):
+ return '' if num == 1 else 's'
+
+ def _line(self, color=termstyle.reset, char='-'):
+ """
+ print a line of separator characters (default '-')
+ in the given colour (default black)
+ """
+ self._outln(color(char * line_length))
+
+
+import traceback
+import sys
+
+
+class FilteringStream(object):
+ """
+ A wrapper for a stream that will filter
+ calls to `write` and `writeln` to ignore calls
+ from blacklisted callers
+ (implemented as a regex on their filename, according
+ to traceback.extract_stack())
+
+ It's super hacky, but there seems to be no other way
+ to suppress nose's default output
+ """
+ def __init__(self, stream, excludes):
+ self.__stream = stream
+ self.__excludes = list(map(re.compile, excludes))
+
+ def __should_filter(self):
+ try:
+ stack = traceback.extract_stack(limit=3)[0]
+ filename = stack[0]
+ pattern_matches_filename = lambda pattern: pattern.search(filename)
+ should_filter = any(map(pattern_matches_filename, self.__excludes))
+ if REDNOSE_DEBUG:
+ print >> sys.stderr, "REDNOSE_DEBUG: got write call from %s, should_filter = %s" % (
+ filename, should_filter)
+ return should_filter
+ except StandardError as e:
+ if REDNOSE_DEBUG:
+ print("\nError in rednose filtering: %s" % (e,), file=sys.stderr)
+ traceback.print_exc(sys.stderr)
+ return False
+
+ def write(self, *a):
+ if self.__should_filter():
+ return
+ return self.__stream.write(*a)
+
+ def writeln(self, *a):
+ if self.__should_filter():
+ return
+ return self.__stream.writeln(*a)
+
+ # pass non-known methods through to self.__stream
+ def __getattr__(self, name):
+ if REDNOSE_DEBUG:
+ print("REDNOSE_DEBUG: getting attr %s" % (name,), file=sys.stderr)
+ return getattr(self.__stream, name)
diff --git a/scripts/automation/trex_control_plane/python_lib/rednose-0.4.1/setup.py b/scripts/automation/trex_control_plane/python_lib/rednose-0.4.1/setup.py
new file mode 100755
index 00000000..34cded4b
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/rednose-0.4.1/setup.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+
+## NOTE: ##
+## this setup.py was generated by zero2pypi:
+## http://gfxmonk.net/dist/0install/zero2pypi.xml
+
+from setuptools import *
+setup(
+ packages = find_packages(exclude=['test', 'test.*']),
+ description='coloured output for nosetests',
+ entry_points={'nose.plugins.0.10': ['NOSETESTS_PLUGINS = rednose:RedNose']},
+ install_requires=['setuptools', 'python-termstyle >=0.1.7'],
+ long_description="\n**Note**: This package has been built automatically by\n`zero2pypi <http://gfxmonk.net/dist/0install/zero2pypi.xml>`_.\nIf possible, you should use the zero-install feed instead:\nhttp://gfxmonk.net/dist/0install/rednose.xml\n\n----------------\n\n=========\nrednose\n=========\n\nrednose is a `nosetests`_\nplugin for adding colour (and readability) to nosetest console results.\n\nInstallation:\n-------------\n::\n\n\teasy_install rednose\n\t\nor from the source::\n\n\t./setup.py develop\n\nUsage:\n------\n::\n\n\tnosetests --rednose\n\nor::\n\n\texport NOSE_REDNOSE=1\n\tnosetests\n\nRednose by default uses auto-colouring, which will only use\ncolour if you're running it on a terminal (i.e not piping it\nto a file). To control colouring, use one of::\n\n\tnosetests --rednose --force-color\n\tnosetests --no-color\n\n(you can also control this by setting the environment variable NOSE_REDNOSE_COLOR to 'force' or 'no')\n\n.. _nosetests: http://somethingaboutorange.com/mrl/projects/nose/\n",
+ name='rednose',
+ py_modules=['rednose'],
+ url='http://gfxmonk.net/dist/0install/rednose.xml',
+ version='0.4.1',
+classifiers=[
+ "License :: OSI Approved :: BSD License",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Development Status :: 4 - Beta",
+ "Intended Audience :: Developers",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ "Topic :: Software Development :: Testing",
+ ],
+ keywords='test nosetests nose nosetest output colour console',
+ license='BSD',
+)
diff --git a/scripts/automation/trex_control_plane/python_lib/termstyle/MANIFEST.in b/scripts/automation/trex_control_plane/python_lib/termstyle/MANIFEST.in
new file mode 100755
index 00000000..14dafaf3
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/termstyle/MANIFEST.in
@@ -0,0 +1 @@
+include test*
diff --git a/scripts/automation/trex_control_plane/python_lib/termstyle/Makefile b/scripts/automation/trex_control_plane/python_lib/termstyle/Makefile
new file mode 100755
index 00000000..02151dca
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/termstyle/Makefile
@@ -0,0 +1,9 @@
+0:
+ mkzero-gfxmonk \
+ -v `cat VERSION` \
+ -p termstyle.py \
+ -p setup.py \
+ python-termstyle.xml
+
+pypi:
+ ./setup.py register sdist upload
diff --git a/scripts/automation/trex_control_plane/python_lib/termstyle/README.rst b/scripts/automation/trex_control_plane/python_lib/termstyle/README.rst
new file mode 100755
index 00000000..f3dfa0ab
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/termstyle/README.rst
@@ -0,0 +1,82 @@
+=========
+termstyle
+=========
+
+termstyle is a simple python library for adding coloured output to
+terminal (console) programs. The definitions come from ECMA-048_, the
+"Control Functions for Coded Character Sets" standard.
+
+Installation:
+-------------
+
+I thoroughly recommend using the zero-install feed (see the project homepage) to manage your dependencies if at all possible. zero-install_ provides a much better system than pip or easy_install, and works with absolutely any language and allows decentralised package management that requires no special privileges to install.
+
+Example Usage:
+--------------
+::
+
+ from termstyle import *
+ print "%s:%s" % (red('Hey'), green('how are you?'))
+ print blue('How ', bold('you'), ' doin?')
+
+or, you can use a colour just as a string::
+
+ print "%sBlue!%s" % (blue, reset)
+
+Styles:
+-------
+::
+
+ reset or default (no colour / style)
+
+colour::
+
+ black
+ red
+ green
+ yellow
+ blue
+ magenta
+ cyan
+ white
+
+background colour::
+
+ bg_black
+ bg_red
+ bg_green
+ bg_yellow
+ bg_blue
+ bg_magenta
+ bg_cyan
+ bg_white
+ bg_default
+
+In terminals supporting transparency ``bg_default`` is often used to set
+the background to transparent [#]_.
+
+weight::
+
+ bold
+ inverted
+
+style::
+
+ italic
+ underscore
+
+Controls:
+---------
+::
+
+ auto() - sets colouring on only if sys.stdout is a terminal
+ disabe() - disable colours
+ enable() - enable colours
+
+.. [#] Supporting terminals include rxvt-unicode_, and Eterm_.
+
+.. _ECMA-048: http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-048.pdf
+.. _rxvt-unicode: http://software.schmorp.de/
+.. _Eterm: http://www.eterm.org/
+.. _zero-install: http://0install.net/
+
diff --git a/scripts/automation/trex_control_plane/python_lib/termstyle/VERSION b/scripts/automation/trex_control_plane/python_lib/termstyle/VERSION
new file mode 100755
index 00000000..345f8cc0
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/termstyle/VERSION
@@ -0,0 +1 @@
+0.1.10 \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/python_lib/termstyle/python-termstyle.xml b/scripts/automation/trex_control_plane/python_lib/termstyle/python-termstyle.xml
new file mode 100755
index 00000000..b6b08bd7
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/termstyle/python-termstyle.xml
@@ -0,0 +1,183 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type='text/xsl' href='interface.xsl'?>
+<interface xmlns="http://zero-install.sourceforge.net/2004/injector/interface" uri="http://gfxmonk.net/dist/0install/python-termstyle.xml">
+ <name>termstyle</name>
+ <summary>console colouring for python</summary>
+ <homepage>http://github.com/gfxmonk/termstyle</homepage>
+ <description>
+=========
+termstyle
+=========
+
+termstyle is a simple python library for adding coloured output to
+terminal (console) programs. The definitions come from ECMA-048_, the
+"Control Functions for Coded Character Sets" standard.
+
+Installation:
+-------------
+
+I thoroughly recommend using the zero-install feed (see the project homepage) to manage your dependencies if at all possible. zero-install_ provides a much better system than pip or easy_install, and works with absolutely any language and allows decentralised package management that requires no special privileges to install.
+
+Example Usage:
+--------------
+::
+
+ from termstyle import *
+ print "%s:%s" % (red('Hey'), green('how are you?'))
+ print blue('How ', bold('you'), ' doin?')
+
+or, you can use a colour just as a string::
+
+ print "%sBlue!%s" % (blue, reset)
+
+Styles:
+-------
+::
+
+ reset or default (no colour / style)
+
+colour::
+
+ black
+ red
+ green
+ yellow
+ blue
+ magenta
+ cyan
+ white
+
+background colour::
+
+ bg_black
+ bg_red
+ bg_green
+ bg_yellow
+ bg_blue
+ bg_magenta
+ bg_cyan
+ bg_white
+ bg_default
+
+In terminals supporting transparency ``bg_default`` is often used to set
+the background to transparent [#]_.
+
+weight::
+
+ bold
+ inverted
+
+style::
+
+ italic
+ underscore
+
+Controls:
+---------
+::
+
+ auto() - sets colouring on only if sys.stdout is a terminal
+ disabe() - disable colours
+ enable() - enable colours
+
+.. [#] Supporting terminals include rxvt-unicode_, and Eterm_.
+
+.. _ECMA-048: http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-048.pdf
+.. _rxvt-unicode: http://software.schmorp.de/
+.. _Eterm: http://www.eterm.org/
+.. _zero-install: http://0install.net/
+
+ </description>
+ <pypi-extra xmlns="http://gfxmonk.net/dist/0install"><![CDATA[
+ classifiers=[
+ "License :: OSI Approved :: BSD License",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Intended Audience :: Developers",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ ],
+ keywords='output colour console ansi',
+ license='BSD',
+ ]]></pypi-extra>
+ <rich-description xmlns="http://gfxmonk.net/dist/0install">
+ <div xmlns="http://www.w3.org/1999/xhtml">
+ <h1 id="termstyle">termstyle</h1>
+ <p>termstyle is a simple python library for adding coloured output to terminal (console) programs. The definitions come from <a href="http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-048.pdf">ECMA-048</a>, the "Control Functions for Coded Character Sets" standard.</p>
+ <h2 id="installation">Installation:</h2>
+ <p>I thoroughly recommend using the zero-install feed (see the project homepage) to manage your dependencies if at all possible. <a href="http://0install.net/">zero-install</a> provides a much better system than pip or easy_install, and works with absolutely any language and allows decentralised package management that requires no special privileges to install.</p>
+ <h2 id="example-usage">Example Usage:</h2>
+ <pre><code>from termstyle import *
+print "%s:%s" % (red('Hey'), green('how are you?'))
+print blue('How ', bold('you'), ' doin?')
+</code></pre>
+ <p>or, you can use a colour just as a string:</p>
+ <pre><code>print "%sBlue!%s" % (blue, reset)
+</code></pre>
+ <h2 id="styles">Styles:</h2>
+ <pre><code>reset or default (no colour / style)
+</code></pre>
+ <p>colour:</p>
+ <pre><code>black
+red
+green
+yellow
+blue
+magenta
+cyan
+white
+</code></pre>
+ <p>background colour:</p>
+ <pre><code>bg_black
+bg_red
+bg_green
+bg_yellow
+bg_blue
+bg_magenta
+bg_cyan
+bg_white
+bg_default
+</code></pre>
+ <p>In terminals supporting transparency <code>bg_default</code> is often used to set the background to transparent [#]_.</p>
+ <p>weight:</p>
+ <pre><code>bold
+inverted
+</code></pre>
+ <p>style:</p>
+ <pre><code>italic
+underscore
+</code></pre>
+ <h2 id="controls">Controls:</h2>
+ <pre><code>auto() - sets colouring on only if sys.stdout is a terminal
+disabe() - disable colours
+enable() - enable colours
+</code></pre>
+ </div>
+ </rich-description>
+ <group>
+ <environment insert="" mode="prepend" name="PYTHONPATH"/>
+ <implementation id="sha1new=1f5b66dcd48fa38740aa98bb2796f0adb7eeab04" released="2010-07-18" version="0.1.5">
+ <manifest-digest sha256="9b0ff9b07c1d424484b5aa20fc2db7827a372d84515e8caf46b005f54a034e07"/>
+ <archive href="http://gfxmonk.net/dist/0install/python-termstyle/python-termstyle-0.1.2.tgz" size="3114"/>
+ </implementation>
+ <implementation id="sha1new=9f728d8ba0eb5379c907b85a9662c54935f1e0ca" released="2010-11-05" version="0.1.6">
+ <manifest-digest sha256="a7283c6fe262bc88ed270a91304da243c474d32353c2c9d6c6f4003bacfce27c"/>
+ <archive href="http://gfxmonk.net/dist/0install/python-termstyle/python-termstyle-0.1.6.tgz" size="3607"/>
+ </implementation>
+ <implementation id="sha1new=93d781ad4b723d154462f990254f78d3bab7456e" released="2011-01-29" version="0.1.7">
+ <manifest-digest sha256="31488cd005a3f9182e6738068a5e9ddfd6d28db370d8b0a0b16d5bbc53048665"/>
+ <archive href="http://gfxmonk.net/dist/0install/python-termstyle/python-termstyle-0.1.7.tgz" size="4265"/>
+ </implementation>
+ <implementation id="sha1new=ada58698e40a6f11b40dbebaa11f70061b47172d" released="2011-03-15" version="0.1.8">
+ <manifest-digest sha256="cbcee0509b194eec74a5fe923999b505293c5d0a20b3d79203fa6f47a942b24e"/>
+ <archive href="http://gfxmonk.net/dist/0install/python-termstyle/python-termstyle-0.1.8.tgz" size="4312"/>
+ </implementation>
+ <implementation id="sha1new=e718d149b78028f3ac39ee4de42e0b7644ad48fb" released="2011-03-16" version="0.1.9">
+ <manifest-digest sha256="ee3dc9b097ac0b7682fab8753a5dab45f0a9c189544c13a369ada8b9e1f4fde8"/>
+ <archive href="http://gfxmonk.net/dist/0install/python-termstyle/python-termstyle-0.1.9.tgz" size="2901"/>
+ </implementation>
+ <implementation id="sha1new=0c4adef2eccc7ddcb48264c4bc2d84fcfa1072a4" released="2011-04-30" version="0.1.10">
+ <manifest-digest sha256="df3b9e11077995f515239eb8382acdfa1741c35e867916917cf70ce53e49bd59"/>
+ <archive href="http://gfxmonk.net/dist/0install/python-termstyle/python-termstyle-0.1.10.tgz" size="2903"/>
+ </implementation>
+ </group>
+</interface>
diff --git a/scripts/automation/trex_control_plane/python_lib/termstyle/setup.py b/scripts/automation/trex_control_plane/python_lib/termstyle/setup.py
new file mode 100755
index 00000000..69b11cbb
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/termstyle/setup.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+
+## NOTE: ##
+## this setup.py was generated by zero2pypi:
+## http://gfxmonk.net/dist/0install/zero2pypi.xml
+
+from setuptools import *
+setup(
+ packages = find_packages(exclude=['test', 'test.*']),
+ install_requires=['setuptools'],
+ version='0.1.10',
+ url='http://gfxmonk.net/dist/0install/python-termstyle.xml',
+ description='console colouring for python',
+ long_description='\n**Note**: This package has been built automatically by\n`zero2pypi <http://gfxmonk.net/dist/0install/zero2pypi.xml>`_.\nIf possible, you should use the zero-install feed instead:\nhttp://gfxmonk.net/dist/0install/python-termstyle.xml\n\n----------------\n\n=========\ntermstyle\n=========\n\ntermstyle is a simple python library for adding coloured output to\nterminal (console) programs. The definitions come from ECMA-048_, the\n"Control Functions for Coded Character Sets" standard.\n\nInstallation:\n-------------\n\nI thoroughly recommend using the zero-install feed (see the project homepage) to manage your dependencies if at all possible. zero-install_ provides a much better system than pip or easy_install, and works with absolutely any language and allows decentralised package management that requires no special privileges to install.\n\nExample Usage:\n--------------\n::\n\n\tfrom termstyle import *\n\tprint "%s:%s" % (red(\'Hey\'), green(\'how are you?\'))\n\tprint blue(\'How \', bold(\'you\'), \' doin?\')\n\nor, you can use a colour just as a string::\n\n\tprint "%sBlue!%s" % (blue, reset)\n\nStyles:\n-------\n::\n\n\treset or default (no colour / style)\n\ncolour::\n\n\tblack\n\tred\n\tgreen\n\tyellow\n\tblue\n\tmagenta\n\tcyan\n\twhite\n\nbackground colour::\n\n\tbg_black\n\tbg_red\n\tbg_green\n\tbg_yellow\n\tbg_blue\n\tbg_magenta\n\tbg_cyan\n\tbg_white\n\tbg_default\n\nIn terminals supporting transparency ``bg_default`` is often used to set\nthe background to transparent [#]_.\n\nweight::\n\n\tbold\n\tinverted\n\nstyle::\n\n\titalic\n\tunderscore\n\nControls:\n---------\n::\n\n\tauto() - sets colouring on only if sys.stdout is a terminal\n\tdisabe() - disable colours\n\tenable() - enable colours\n\n.. [#] Supporting terminals include rxvt-unicode_, and Eterm_.\n\n.. _ECMA-048: http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-048.pdf\n.. _rxvt-unicode: http://software.schmorp.de/\n.. _Eterm: http://www.eterm.org/\n.. _zero-install: http://0install.net/\n',
+ name='python-termstyle',
+ download_url='http://gfxmonk.net/dist/0install/python-termstyle/python-termstyle-0.1.10.tgz',
+ py_modules=['termstyle'],
+classifiers=[
+ "License :: OSI Approved :: BSD License",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Intended Audience :: Developers",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ ],
+ keywords='output colour console ansi',
+ license='BSD',
+)
diff --git a/scripts/automation/trex_control_plane/python_lib/termstyle/termstyle.py b/scripts/automation/trex_control_plane/python_lib/termstyle/termstyle.py
new file mode 100755
index 00000000..62a3a920
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/termstyle/termstyle.py
@@ -0,0 +1,107 @@
+#!/usr/bin/env python
+# Copyright (c) 2009, Tim Cuthbertson
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of the organisation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
+# WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+
+class Style(object):
+ prefix='\x1b['
+ suffix='m'
+ enabled = True
+
+ def __init__(self, on_codes, off_codes = 0):
+ self._on = self.sequence(on_codes)
+ self._off = self.sequence(off_codes)
+
+ def _get_on(self): return self._on if self.enabled else ''
+ def _get_off(self): return self._off if self.enabled else ''
+ on = property(_get_on)
+ off = property(_get_off)
+
+ @classmethod
+ def sequence(cls, codes):
+ wrap_single = lambda code: "%s%s%s" % (cls.prefix, code, cls.suffix)
+ try:
+ return ''.join([wrap_single(code) for code in codes])
+ except TypeError:
+ return wrap_single(codes)
+
+ def __str__(self):
+ if not self.enabled:
+ return ''
+ return self.on
+
+ def __call__(self, *args):
+ contents = ''.join(["%s%s" % (self.on, arg) for arg in args])
+ return "%s%s" % (contents, self.off)
+
+
+def auto():
+ """set colouring on if STDOUT is a terminal device, off otherwise"""
+ try:
+ Style.enabled = False
+ Style.enabled = sys.stdout.isatty()
+ except (AttributeError, TypeError):
+ pass
+
+def enable():
+ """force coloured output"""
+ Style.enabled = True
+
+def disable():
+ """disable coloured output"""
+ Style.enabled = False
+
+default = reset = Style(0)
+
+black = Style(30)
+red = Style(31)
+green = Style(32)
+yellow = Style(33)
+blue = Style(34)
+magenta = Style(35)
+cyan = Style(36)
+white = Style(37)
+
+bg_black = Style(40)
+bg_red = Style(41)
+bg_green = Style(42)
+bg_yellow = Style(43)
+bg_blue = Style(44)
+bg_magenta = Style(45)
+bg_cyan = Style(46)
+bg_white = Style(47)
+bg_default = Style(49)
+
+bold = Style(1)
+underscore = Style(4)
+inverted = Style(7)
+italic = Style(3)
+
diff --git a/scripts/automation/trex_control_plane/python_lib/termstyle/test2.py b/scripts/automation/trex_control_plane/python_lib/termstyle/test2.py
new file mode 100755
index 00000000..2d84c375
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/termstyle/test2.py
@@ -0,0 +1,5 @@
+#!/usr/bin/env python2
+
+from termstyle import *
+
+print green(u"unicod\xe9!")
diff --git a/scripts/automation/trex_control_plane/python_lib/termstyle/test3.py b/scripts/automation/trex_control_plane/python_lib/termstyle/test3.py
new file mode 100755
index 00000000..861c44f9
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/termstyle/test3.py
@@ -0,0 +1,5 @@
+#!/usr/bin/env python3
+
+from termstyle import *
+
+print(green("unicod\xe9!"))
diff --git a/scripts/automation/trex_control_plane/python_lib/termstyle/test_all.sh b/scripts/automation/trex_control_plane/python_lib/termstyle/test_all.sh
new file mode 100755
index 00000000..d28545a9
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/termstyle/test_all.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+set -ex
+./test2.py
+./test3.py
diff --git a/scripts/automation/trex_control_plane/python_lib/zmq_fedora.tar.gz b/scripts/automation/trex_control_plane/python_lib/zmq_fedora.tar.gz
new file mode 100755
index 00000000..4f36749b
--- /dev/null
+++ b/scripts/automation/trex_control_plane/python_lib/zmq_fedora.tar.gz
Binary files differ
diff --git a/scripts/automation/trex_control_plane/server/CCustomLogger.py b/scripts/automation/trex_control_plane/server/CCustomLogger.py
new file mode 100755
index 00000000..ecf7d519
--- /dev/null
+++ b/scripts/automation/trex_control_plane/server/CCustomLogger.py
@@ -0,0 +1,100 @@
+
+import sys
+import os
+import logging
+
+
+def setup_custom_logger(name, log_path = None):
+ # first make sure path availabe
+# if log_path is None:
+# log_path = os.getcwd()+'/trex_log.log'
+# else:
+# directory = os.path.dirname(log_path)
+# if not os.path.exists(directory):
+# os.makedirs(directory)
+ logging.basicConfig(level = logging.INFO,
+ format = '%(asctime)s %(name)-10s %(module)-20s %(levelname)-8s %(message)s',
+ datefmt = '%m-%d %H:%M')
+# filename= log_path,
+# filemode= 'w')
+#
+# # define a Handler which writes INFO messages or higher to the sys.stderr
+# consoleLogger = logging.StreamHandler()
+# consoleLogger.setLevel(logging.ERROR)
+# # set a format which is simpler for console use
+# formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
+# # tell the handler to use this format
+# consoleLogger.setFormatter(formatter)
+#
+# # add the handler to the logger
+# logging.getLogger(name).addHandler(consoleLogger)
+
+def setup_daemon_logger (name, log_path = None):
+ # first make sure path availabe
+ logging.basicConfig(level = logging.INFO,
+ format = '%(asctime)s %(name)-10s %(module)-20s %(levelname)-8s %(message)s',
+ datefmt = '%m-%d %H:%M',
+ filename= log_path,
+ filemode= 'w')
+
+class CustomLogger(object):
+
+ def __init__(self, log_filename):
+ # Store the original stdout and stderr
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+ self.stdout_fd = os.dup(sys.stdout.fileno())
+ self.devnull = os.open('/dev/null', os.O_WRONLY)
+ self.log_file = open(log_filename, 'w')
+ self.silenced = False
+ self.pending_log_file_prints = 0
+
+ # silence all prints from stdout
+ def silence(self):
+ os.dup2(self.devnull, sys.stdout.fileno())
+ self.silenced = True
+
+ # restore stdout status
+ def restore(self):
+ sys.stdout.flush()
+ sys.stderr.flush()
+ # Restore normal stdout
+ os.dup2(self.stdout_fd, sys.stdout.fileno())
+ self.silenced = False
+
+ #print a message to the log (both stdout / log file)
+ def log(self, text, force = False, newline = True):
+ self.log_file.write((text + "\n") if newline else text)
+ self.pending_log_file_prints += 1
+
+ if (self.pending_log_file_prints >= 10):
+ self.log_file.flush()
+ self.pending_log_file_prints = 0
+
+ self.console(text, force, newline)
+
+ # print a message to the console alone
+ def console(self, text, force = False, newline = True):
+ _text = (text + "\n") if newline else text
+ # if we are silenced and not forced - go home
+ if self.silenced and not force:
+ return
+
+ if self.silenced:
+ os.write(self.stdout_fd, _text)
+ else:
+ sys.stdout.write(_text)
+
+ sys.stdout.flush()
+
+ # flush
+ def flush(self):
+ sys.stdout.flush()
+ self.log_file.flush()
+
+ def __exit__(self, type, value, traceback):
+ sys.stdout.flush()
+ self.log_file.flush()
+ os.close(self.devnull)
+ os.close(self.log_file)
diff --git a/scripts/automation/trex_control_plane/server/extended_daemon_runner.py b/scripts/automation/trex_control_plane/server/extended_daemon_runner.py
new file mode 100755
index 00000000..07eedd9f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/server/extended_daemon_runner.py
@@ -0,0 +1,144 @@
+#!/usr/bin/python
+
+import outer_packages
+import lockfile
+from daemon import runner,daemon
+from daemon.runner import *
+import os, sys
+from argparse import ArgumentParser
+from trex_server import trex_parser
+try:
+ from python_lib.termstyle import termstyle
+except ImportError:
+ import termstyle
+
+
+
+def daemonize_parser (parser_obj, action_funcs, help_menu):
+ """Update the regular process parser to deal with daemon process options"""
+ parser_obj.description += " (as a daemon process)"
+ parser_obj.usage = None
+ parser_obj.add_argument("action", choices = action_funcs,
+ action="store", help = help_menu )
+ return
+
+
+class ExtendedDaemonRunner(runner.DaemonRunner):
+ """ Controller for a callable running in a separate background process.
+
+ The first command-line argument is the action to take:
+
+ * 'start': Become a daemon and call `app.run()`.
+ * 'stop': Exit the daemon process specified in the PID file.
+ * 'restart': Stop, then start.
+
+ """
+
+ help_menu = """Specify action command to be applied on server.
+ (*) start : start the application in as a daemon process.
+ (*) show : prompt a updated status of daemon process (running/ not running).
+ (*) stop : exit the daemon process.
+ (*) restart : stop, then start again the application as daemon process
+ (*) start-live : start the application in live mode (no daemon process).
+ """
+
+ def __init__ (self, app, parser_obj):
+ """ Set up the parameters of a new runner.
+ THIS METHOD INTENTIONALLY DO NOT INVOKE SUPER __init__() METHOD
+
+ :param app: The application instance; see below.
+ :return: ``None``.
+
+ The `app` argument must have the following attributes:
+
+ * `stdin_path`, `stdout_path`, `stderr_path`: Filesystem paths
+ to open and replace the existing `sys.stdin`, `sys.stdout`,
+ `sys.stderr`.
+
+ * `pidfile_path`: Absolute filesystem path to a file that will
+ be used as the PID file for the daemon. If ``None``, no PID
+ file will be used.
+
+ * `pidfile_timeout`: Used as the default acquisition timeout
+ value supplied to the runner's PID lock file.
+
+ * `run`: Callable that will be invoked when the daemon is
+ started.
+
+ """
+ super(runner.DaemonRunner, self).__init__()
+ # update action_funcs to support more operations
+ self.update_action_funcs()
+
+ daemonize_parser(parser_obj, self.action_funcs, ExtendedDaemonRunner.help_menu)
+ args = parser_obj.parse_args()
+ self.action = unicode(args.action)
+
+ self.app = app
+ self.daemon_context = daemon.DaemonContext()
+ self.daemon_context.stdin = open(app.stdin_path, 'rt')
+ self.daemon_context.stdout = open(app.stdout_path, 'w+t')
+ self.daemon_context.stderr = open(
+ app.stderr_path, 'a+t', buffering=0)
+
+ self.pidfile = None
+ if app.pidfile_path is not None:
+ self.pidfile = make_pidlockfile(app.pidfile_path, app.pidfile_timeout)
+ self.daemon_context.pidfile = self.pidfile
+
+ # mask out all arguments that aren't relevant to main app script
+
+
+ def update_action_funcs (self):
+ self.action_funcs.update({u'start-live': self._start_live, u'show': self._show}) # add key (=action), value (=desired func)
+
+ @staticmethod
+ def _start_live (self):
+ self.app.run()
+
+ @staticmethod
+ def _show (self):
+ if self.pidfile.is_locked():
+ print termstyle.red("T-Rex server daemon is running")
+ else:
+ print termstyle.red("T-Rex server daemon is NOT running")
+
+ def do_action (self):
+ self.__prevent_duplicate_runs()
+ self.__prompt_init_msg()
+ try:
+ super(ExtendedDaemonRunner, self).do_action()
+ if self.action == 'stop':
+ self.__verify_termination()
+ except runner.DaemonRunnerStopFailureError:
+ if self.action == 'restart':
+ # error means server wasn't running in the first place- so start it!
+ self.action = 'start'
+ self.do_action()
+
+
+ def __prevent_duplicate_runs (self):
+ if self.action == 'start' and self.pidfile.is_locked():
+ print termstyle.green("Server daemon is already running")
+ exit(1)
+ elif self.action == 'stop' and not self.pidfile.is_locked():
+ print termstyle.green("Server daemon is not running")
+ exit(1)
+
+ def __prompt_init_msg (self):
+ if self.action == 'start':
+ print termstyle.green("Starting daemon server...")
+ elif self.action == 'stop':
+ print termstyle.green("Stopping daemon server...")
+
+ def __verify_termination (self):
+ pass
+# import time
+# while self.pidfile.is_locked():
+# time.sleep(2)
+# self._stop()
+#
+
+
+if __name__ == "__main__":
+ pass
diff --git a/scripts/automation/trex_control_plane/server/outer_packages.py b/scripts/automation/trex_control_plane/server/outer_packages.py
new file mode 100755
index 00000000..ab25ea68
--- /dev/null
+++ b/scripts/automation/trex_control_plane/server/outer_packages.py
@@ -0,0 +1,66 @@
+#!/router/bin/python
+
+import sys,site
+import platform,os
+import tarfile
+import errno
+import pwd
+
+CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
+ROOT_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir)) # path to trex_control_plane directory
+PATH_TO_PYTHON_LIB = os.path.abspath(os.path.join(ROOT_PATH, 'python_lib'))
+
+SERVER_MODULES = ['enum34-1.0.4',
+ # 'jsonrpclib-0.1.3',
+ 'jsonrpclib-pelix-0.2.5',
+ 'zmq',
+ 'python-daemon-2.0.5',
+ 'lockfile-0.10.2',
+ 'termstyle'
+ ]
+
+def extract_zmq_package ():
+ """make sure zmq package is available"""
+
+ os.chdir(PATH_TO_PYTHON_LIB)
+ if not os.path.exists('zmq'):
+ if os.path.exists('zmq_fedora.tar.gz'): # make sure tar file is available for extraction
+ try:
+ tar = tarfile.open("zmq_fedora.tar.gz")
+ # finally, extract the tarfile locally
+ tar.extractall()
+ except OSError as err:
+ if err.errno == errno.EACCES:
+ # fall back. try extracting using currently logged in user
+ stat_info = os.stat(PATH_TO_PYTHON_LIB)
+ uid = stat_info.st_uid
+ logged_user = pwd.getpwuid(uid).pw_name
+ if logged_user != 'root':
+ try:
+ os.system("sudo -u {user} tar -zxvf zmq_fedora.tar.gz".format(user = logged_user))
+ except:
+ raise OSError(13, 'Permission denied: Please make sure that logged user have sudo access and writing privileges to `python_lib` directory.')
+ else:
+ raise OSError(13, 'Permission denied: Please make sure that logged user have sudo access and writing privileges to `python_lib` directory.')
+ finally:
+ tar.close()
+ else:
+ raise IOError("File 'zmq_fedora.tar.gz' couldn't be located at python_lib directory.")
+ os.chdir(CURRENT_PATH)
+
+def import_server_modules ():
+ # must be in a higher priority
+ sys.path.insert(0, PATH_TO_PYTHON_LIB)
+ sys.path.append(ROOT_PATH)
+ extract_zmq_package()
+ import_module_list(SERVER_MODULES)
+
+def import_module_list (modules_list):
+ assert(isinstance(modules_list, list))
+ for p in modules_list:
+ full_path = os.path.join(PATH_TO_PYTHON_LIB, p)
+ fix_path = os.path.normcase(full_path)
+ site.addsitedir(full_path)
+
+
+import_server_modules()
diff --git a/scripts/automation/trex_control_plane/server/trex_daemon_server b/scripts/automation/trex_control_plane/server/trex_daemon_server
new file mode 100755
index 00000000..3494e303
--- /dev/null
+++ b/scripts/automation/trex_control_plane/server/trex_daemon_server
@@ -0,0 +1,25 @@
+#!/usr/bin/python
+
+import os
+import sys
+
+core = 0
+
+if '--core' in sys.argv:
+ try:
+ idx = sys.argv.index('--core')
+ core = int(sys.argv[idx + 1])
+ if core > 31 or core < 0:
+ print "Error: please provide core argument between 0 to 31"
+ exit(-1)
+ del sys.argv[idx:idx+2]
+ except IndexError:
+ print "Error: please make sure core option provided with argument"
+ exit(-1)
+ except ValueError:
+ print "Error: please make sure core option provided with integer argument"
+ exit(-1)
+
+str_argv = ' '.join(sys.argv[1:])
+cmd = "taskset -c {core} python automation/trex_control_plane/server/trex_daemon_server.py {argv}".format(core = core, argv = str_argv)
+os.system(cmd)
diff --git a/scripts/automation/trex_control_plane/server/trex_daemon_server.py b/scripts/automation/trex_control_plane/server/trex_daemon_server.py
new file mode 100755
index 00000000..5032423a
--- /dev/null
+++ b/scripts/automation/trex_control_plane/server/trex_daemon_server.py
@@ -0,0 +1,87 @@
+#!/usr/bin/python
+
+import outer_packages
+import daemon
+from trex_server import do_main_program, trex_parser
+import CCustomLogger
+
+import logging
+import time
+import sys
+import os, errno
+import grp
+import signal
+from daemon import runner
+from extended_daemon_runner import ExtendedDaemonRunner
+import lockfile
+import errno
+
+class TRexServerApp(object):
+ def __init__(self):
+ TRexServerApp.create_working_dirs()
+ self.stdin_path = '/dev/null'
+ self.stdout_path = '/dev/tty' # All standard prints will come up from this source.
+ self.stderr_path = "/var/log/trex/trex_daemon_server.log" # All log messages will come up from this source
+ self.pidfile_path = '/var/run/trex/trex_daemon_server.pid'
+ self.pidfile_timeout = 5 # timeout in seconds
+
+ def run(self):
+ do_main_program()
+
+
+ @staticmethod
+ def create_working_dirs():
+ if not os.path.exists('/var/log/trex'):
+ os.mkdir('/var/log/trex')
+ if not os.path.exists('/var/run/trex'):
+ os.mkdir('/var/run/trex')
+
+
+
+def main ():
+
+ trex_app = TRexServerApp()
+
+ # setup the logger
+ default_log_path = '/var/log/trex/trex_daemon_server.log'
+
+ try:
+ CCustomLogger.setup_daemon_logger('TRexServer', default_log_path)
+ logger = logging.getLogger('TRexServer')
+ logger.setLevel(logging.INFO)
+ formatter = logging.Formatter("%(asctime)s %(name)-10s %(module)-20s %(levelname)-8s %(message)s")
+ handler = logging.FileHandler("/var/log/trex/trex_daemon_server.log")
+ logger.addHandler(handler)
+ except EnvironmentError, e:
+ if e.errno == errno.EACCES: # catching permission denied error
+ print "Launching user must have sudo privileges in order to run T-Rex daemon.\nTerminating daemon process."
+ exit(-1)
+
+ try:
+ daemon_runner = ExtendedDaemonRunner(trex_app, trex_parser)
+ except IOError as err:
+ # catch 'tty' error when launching server from remote location
+ if err.errno == errno.ENXIO:
+ trex_app.stdout_path = "/dev/null"
+ daemon_runner = ExtendedDaemonRunner(trex_app, trex_parser)
+ else:
+ raise
+
+ #This ensures that the logger file handle does not get closed during daemonization
+ daemon_runner.daemon_context.files_preserve=[handler.stream]
+
+ try:
+ if not set(['start', 'stop']).isdisjoint(set(sys.argv)):
+ print "Logs are saved at: {log_path}".format( log_path = default_log_path )
+ daemon_runner.do_action()
+
+ except lockfile.LockTimeout as inst:
+ logger.error(inst)
+ print inst
+ print """
+ Please try again once the timeout has been reached.
+ If this error continues, consider killing the process manually and restart the daemon."""
+
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/automation/trex_control_plane/server/trex_launch_thread.py b/scripts/automation/trex_control_plane/server/trex_launch_thread.py
new file mode 100755
index 00000000..b4be60a9
--- /dev/null
+++ b/scripts/automation/trex_control_plane/server/trex_launch_thread.py
@@ -0,0 +1,92 @@
+#!/router/bin/python
+
+
+import os
+import signal
+import socket
+from common.trex_status_e import TRexStatus
+import subprocess
+import time
+import threading
+import logging
+import CCustomLogger
+
+# setup the logger
+CCustomLogger.setup_custom_logger('TRexServer')
+logger = logging.getLogger('TRexServer')
+
+
+class AsynchronousTRexSession(threading.Thread):
+ def __init__(self, trexObj , trex_launch_path, trex_cmd_data):
+ super(AsynchronousTRexSession, self).__init__()
+ self.stoprequest = threading.Event()
+ self.terminateFlag = False
+ self.launch_path = trex_launch_path
+ self.cmd, self.export_path, self.duration = trex_cmd_data
+ self.session = None
+ self.trexObj = trexObj
+ self.time_stamps = {'start' : None, 'run_time' : None}
+ self.trexObj.zmq_dump = {}
+
+ def run (self):
+
+ with open(os.devnull, 'w') as DEVNULL:
+ self.time_stamps['start'] = self.time_stamps['run_time'] = time.time()
+ self.session = subprocess.Popen("exec "+self.cmd, cwd = self.launch_path, shell=True, stdin = DEVNULL, stderr = subprocess.PIPE, preexec_fn=os.setsid)
+ logger.info("T-Rex session initialized successfully, Parent process pid is {pid}.".format( pid = self.session.pid ))
+ while self.session.poll() is None: # subprocess is NOT finished
+ time.sleep(0.5)
+ if self.stoprequest.is_set():
+ logger.debug("Abort request received by handling thread. Terminating T-Rex session." )
+ os.killpg(self.session.pid, signal.SIGUSR1)
+ self.trexObj.set_status(TRexStatus.Idle)
+ self.trexObj.set_verbose_status("T-Rex is Idle")
+ break
+
+ self.time_stamps['run_time'] = time.time() - self.time_stamps['start']
+
+ try:
+ if self.time_stamps['run_time'] < 5:
+ logger.error("T-Rex run failed due to wrong input parameters, or due to reachability issues.")
+ self.trexObj.set_verbose_status("T-Rex run failed due to wrong input parameters, or due to reachability issues.\n\nT-Rex command: {cmd}\n\nRun output:\n{output}".format(
+ cmd = self.cmd, output = self.load_trex_output(self.export_path)))
+ self.trexObj.errcode = -11
+ elif (self.session.returncode is not None and self.session.returncode < 0) or ( (self.time_stamps['run_time'] < self.duration) and (not self.stoprequest.is_set()) ):
+ if (self.session.returncode is not None and self.session.returncode < 0):
+ logger.debug("Failed T-Rex run due to session return code ({ret_code})".format( ret_code = self.session.returncode ) )
+ elif ( (self.time_stamps['run_time'] < self.duration) and not self.stoprequest.is_set()):
+ logger.debug("Failed T-Rex run due to running time ({runtime}) combined with no-stopping request.".format( runtime = self.time_stamps['run_time'] ) )
+
+ logger.warning("T-Rex run was terminated unexpectedly by outer process or by the hosting OS")
+ self.trexObj.set_verbose_status("T-Rex run was terminated unexpectedly by outer process or by the hosting OS.\n\nRun output:\n{output}".format(
+ output = self.load_trex_output(self.export_path)))
+ self.trexObj.errcode = -15
+ else:
+ logger.info("T-Rex run session finished.")
+ self.trexObj.set_verbose_status('T-Rex finished.')
+ self.trexObj.errcode = None
+
+ finally:
+ self.trexObj.set_status(TRexStatus.Idle)
+ logger.info("TRex running state changed to 'Idle'.")
+ self.trexObj.expect_trex.clear()
+ logger.debug("Finished handling a single run of T-Rex.")
+ self.trexObj.zmq_dump = None
+
+ def join (self, timeout = None):
+ self.stoprequest.set()
+ super(AsynchronousTRexSession, self).join(timeout)
+
+ def load_trex_output (self, export_path):
+ output = None
+ with open(export_path, 'r') as f:
+ output = f.read()
+ return output
+
+
+
+
+
+if __name__ == "__main__":
+ pass
+
diff --git a/scripts/automation/trex_control_plane/server/trex_server.py b/scripts/automation/trex_control_plane/server/trex_server.py
new file mode 100755
index 00000000..992a1d5f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/server/trex_server.py
@@ -0,0 +1,465 @@
+#!/usr/bin/python
+
+
+import os
+import stat
+import sys
+import time
+import outer_packages
+import zmq
+from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
+import jsonrpclib
+from jsonrpclib import Fault
+import binascii
+import socket
+import errno
+import signal
+import binascii
+from common.trex_status_e import TRexStatus
+from common.trex_exceptions import *
+import subprocess
+from random import randrange
+#import shlex
+import logging
+import threading
+import CCustomLogger
+from trex_launch_thread import AsynchronousTRexSession
+from zmq_monitor_thread import ZmqMonitorSession
+from argparse import ArgumentParser, RawTextHelpFormatter
+from json import JSONEncoder
+
+
+# setup the logger
+CCustomLogger.setup_custom_logger('TRexServer')
+logger = logging.getLogger('TRexServer')
+
+class CTRexServer(object):
+ """This class defines the server side of the RESTfull interaction with T-Rex"""
+ DEFAULT_TREX_PATH = '/auto/proj-pcube-b/apps/PL-b/tools/bp_sim2/v1.55/' #'/auto/proj-pcube-b/apps/PL-b/tools/nightly/trex_latest'
+ TREX_START_CMD = './t-rex-64'
+ DEFAULT_FILE_PATH = '/tmp/trex_files/'
+
+ def __init__(self, trex_path, trex_files_path, trex_host = socket.gethostname(), trex_daemon_port = 8090, trex_zmq_port = 4500):
+ """
+ Parameters
+ ----------
+ trex_host : str
+ a string of the t-rex ip address or hostname.
+ default value: machine hostname as fetched from socket.gethostname()
+ trex_daemon_port : int
+ the port number on which the trex-daemon server can be reached
+ default value: 8090
+ trex_zmq_port : int
+ the port number on which trex's zmq module will interact with daemon server
+ default value: 4500
+
+ Instatiate a T-Rex client object, and connecting it to listening daemon-server
+ """
+ self.TREX_PATH = os.path.abspath(os.path.dirname(trex_path+'/'))
+ self.trex_files_path = os.path.abspath(os.path.dirname(trex_files_path+'/'))
+ self.__check_trex_path_validity()
+ self.__check_files_path_validity()
+ self.trex = CTRex()
+ self.trex_host = trex_host
+ self.trex_daemon_port = trex_daemon_port
+ self.trex_zmq_port = trex_zmq_port
+ self.trex_server_path = "http://{hostname}:{port}".format( hostname = trex_host, port = trex_daemon_port )
+ self.start_lock = threading.Lock()
+ self.__reservation = None
+ self.zmq_monitor = ZmqMonitorSession(self.trex, self.trex_zmq_port) # intiate single ZMQ monitor thread for server usage
+
+ def add(self, x, y):
+ print "server function add ",x,y
+ logger.info("Processing add function. Parameters are: {0}, {1} ".format( x, y ))
+ return x + y
+ # return Fault(-10, "")
+
+ def push_file (self, filename, bin_data):
+ logger.info("Processing push_file() command.")
+ try:
+ filepath = os.path.abspath(os.path.join(self.trex_files_path, filename))
+ with open(filepath, 'wb') as f:
+ f.write(binascii.a2b_base64(bin_data))
+ logger.info("push_file() command finished. `{name}` was saved at {fpath}".format( name = filename, fpath = self.trex_files_path))
+ return True
+ except IOError as inst:
+ logger.error("push_file method failed. " + str(inst))
+ return False
+
+ def connectivity_check (self):
+ logger.info("Processing connectivity_check function.")
+ return True
+
+ def start(self):
+ """This method fires up the daemon server based on initialized parameters of the class"""
+ # initialize the server instance with given reasources
+ try:
+ print "Firing up T-Rex REST daemon @ port {trex_port} ...\n".format( trex_port = self.trex_daemon_port )
+ logger.info("Firing up T-Rex REST daemon @ port {trex_port} ...".format( trex_port = self.trex_daemon_port ))
+ logger.info("current working dir is: {0}".format(self.TREX_PATH) )
+ logger.info("current files dir is : {0}".format(self.trex_files_path) )
+ logger.debug("Starting TRex server. Registering methods to process.")
+ self.server = SimpleJSONRPCServer( (self.trex_host, self.trex_daemon_port) )
+ except socket.error as e:
+ if e.errno == errno.EADDRINUSE:
+ logger.error("T-Rex server requested address already in use. Aborting server launching.")
+ print "T-Rex server requested address already in use. Aborting server launching."
+ raise socket.error(errno.EADDRINUSE, "T-Rex daemon requested address already in use. Server launch aborted. Please make sure no other process is using the desired server properties.")
+
+ # set further functionality and peripherals to server instance
+ try:
+ self.server.register_function(self.add)
+ self.server.register_function(self.connectivity_check)
+ self.server.register_function(self.start_trex)
+ self.server.register_function(self.stop_trex)
+ self.server.register_function(self.wait_until_kickoff_finish)
+ self.server.register_function(self.get_running_status)
+ self.server.register_function(self.is_running)
+ self.server.register_function(self.get_running_info)
+ self.server.register_function(self.is_reserved)
+ self.server.register_function(self.get_files_path)
+ self.server.register_function(self.push_file)
+ self.server.register_function(self.reserve_trex)
+ self.server.register_function(self.cancel_reservation)
+ self.server.register_function(self.force_trex_kill)
+ signal.signal(signal.SIGTSTP, self.stop_handler)
+ signal.signal(signal.SIGTERM, self.stop_handler)
+ self.zmq_monitor.start()
+ self.server.serve_forever()
+ except KeyboardInterrupt:
+ logger.info("Daemon shutdown request detected." )
+ finally:
+ self.zmq_monitor.join() # close ZMQ monitor thread reasources
+ self.server.shutdown()
+ pass
+
+ def stop_handler (self, signum, frame):
+ logger.info("Daemon STOP request detected.")
+ if self.is_running():
+ # in case T-Rex process is currently running, stop it before terminating server process
+ self.stop_trex(self.trex.get_seq())
+ sys.exit(0)
+
+ def is_running (self):
+ run_status = self.trex.get_status()
+ logger.info("Processing is_running() command. Running status is: {stat}".format(stat = run_status) )
+ if run_status==TRexStatus.Running:
+ return True
+ else:
+ return False
+
+ def is_reserved (self):
+ logger.info("Processing is_reserved() command.")
+ return bool(self.__reservation)
+
+ def get_running_status (self):
+ run_status = self.trex.get_status()
+ logger.info("Processing get_running_status() command. Running status is: {stat}".format(stat = run_status) )
+ return { 'state' : run_status.value, 'verbose' : self.trex.get_verbose_status() }
+
+ def get_files_path (self):
+ logger.info("Processing get_files_path() command." )
+ return self.trex_files_path
+
+ def reserve_trex (self, user):
+ if user == "":
+ logger.info("T-Rex reservation cannot apply to empty string user. Request denied.")
+ return Fault(-33, "T-Rex reservation cannot apply to empty string user. Request denied.")
+
+ with self.start_lock:
+ logger.info("Processing reserve_trex() command.")
+ if self.is_reserved():
+ if user == self.__reservation['user']:
+ # return True is the same user is asking and already has the resrvation
+ logger.info("the same user is asking and already has the resrvation. Re-reserving T-Rex.")
+ return True
+
+ logger.info("T-Rex is already reserved to another user ({res_user}), cannot reserve to another user.".format( res_user = self.__reservation['user'] ))
+ return Fault(-33, "T-Rex is already reserved to another user ({res_user}). Please make sure T-Rex is free before reserving it.".format(
+ res_user = self.__reservation['user']) ) # raise at client TRexInUseError
+ elif self.trex.get_status() != TRexStatus.Idle:
+ logger.info("T-Rex is currently running, cannot reserve T-Rex unless in Idle state.")
+ return Fault(-13, 'T-Rex is currently running, cannot reserve T-Rex unless in Idle state. Please try again when T-Rex run finished.') # raise at client TRexInUseError
+ else:
+ logger.info("T-Rex is now reserved for user ({res_user}).".format( res_user = user ))
+ self.__reservation = {'user' : user, 'since' : time.ctime()}
+ logger.debug("Reservation details: "+ str(self.__reservation))
+ return True
+
+ def cancel_reservation (self, user):
+ with self.start_lock:
+ logger.info("Processing cancel_reservation() command.")
+ if self.is_reserved():
+ if self.__reservation['user'] == user:
+ logger.info("T-Rex reservation to {res_user} has been canceled successfully.".format(res_user = self.__reservation['user']))
+ self.__reservation = None
+ return True
+ else:
+ logger.warning("T-Rex is reserved to different user than the provided one. Reservation wasn't canceled.")
+ return Fault(-33, "Cancel reservation request is available to the user that holds the reservation. Request denied") # raise at client TRexRequestDenied
+
+ else:
+ logger.info("T-Rex is not reserved to anyone. No need to cancel anything")
+ assert(self.__reservation is None)
+ return False
+
+
+ def start_trex(self, trex_cmd_options, user, block_to_success = True, timeout = 30):
+ with self.start_lock:
+ logger.info("Processing start_trex() command.")
+ if self.is_reserved():
+ # check if this is not the user to which T-Rex is reserved
+ if self.__reservation['user'] != user:
+ logger.info("T-Rex is reserved to another user ({res_user}). Only that user is allowed to initiate new runs.".format(res_user = self.__reservation['user']))
+ return Fault(-33, "T-Rex is reserved to another user ({res_user}). Only that user is allowed to initiate new runs.".format(res_user = self.__reservation['user'])) # raise at client TRexRequestDenied
+ elif self.trex.get_status() != TRexStatus.Idle:
+ logger.info("T-Rex is already taken, cannot create another run until done.")
+ return Fault(-13, '') # raise at client TRexInUseError
+
+ try:
+ server_cmd_data = self.generate_run_cmd(**trex_cmd_options)
+ self.zmq_monitor.first_dump = True
+ self.trex.start_trex(self.TREX_PATH, server_cmd_data)
+ logger.info("T-Rex session has been successfully initiated.")
+ if block_to_success:
+ # delay server response until T-Rex is at 'Running' state.
+ start_time = time.time()
+ trex_state = None
+ while (time.time() - start_time) < timeout :
+ trex_state = self.trex.get_status()
+ if trex_state != TRexStatus.Starting:
+ break
+ else:
+ time.sleep(0.5)
+
+ # check for T-Rex run started normally
+ if trex_state == TRexStatus.Starting: # reached timeout
+ logger.warning("TimeoutError: T-Rex initiation outcome could not be obtained, since T-Rex stays at Starting state beyond defined timeout.")
+ return Fault(-12, 'TimeoutError: T-Rex initiation outcome could not be obtained, since T-Rex stays at Starting state beyond defined timeout.') # raise at client TRexWarning
+ elif trex_state == TRexStatus.Idle:
+ return Fault(-11, self.trex.get_verbose_status()) # raise at client TRexError
+
+ # reach here only if T-Rex is at 'Running' state
+ self.trex.gen_seq()
+ return self.trex.get_seq() # return unique seq number to client
+
+ except TypeError as e:
+ logger.error("T-Rex command generation failed, probably because either -f (traffic generation .yaml file) and -c (num of cores) was not specified correctly.\nReceived params: {params}".format( params = trex_cmd_options) )
+ raise TypeError('T-Rex -f (traffic generation .yaml file) and -c (num of cores) must be specified.')
+
+
+ def stop_trex(self, seq):
+ logger.info("Processing stop_trex() command.")
+ if self.trex.get_seq()== seq:
+ logger.debug("Abort request legit since seq# match")
+ return self.trex.stop_trex()
+ else:
+ if self.trex.get_status() != TRexStatus.Idle:
+ logger.warning("Abort request is only allowed to process initiated the run. Request denied.")
+
+ return Fault(-33, 'Abort request is only allowed to process initiated the run. Request denied.') # raise at client TRexRequestDenied
+ else:
+ return False
+
+ def force_trex_kill (self):
+ logger.info("Processing force_trex_kill() command. --> Killing T-Rex session indiscriminately.")
+ return self.trex.stop_trex()
+
+ def wait_until_kickoff_finish (self, timeout = 40):
+ # block until T-Rex exits Starting state
+ logger.info("Processing wait_until_kickoff_finish() command.")
+ trex_state = None
+ start_time = time.time()
+ while (time.time() - start_time) < timeout :
+ trex_state = self.trex.get_status()
+ if trex_state != TRexStatus.Starting:
+ return
+ return Fault(-12, 'TimeoutError: T-Rex initiation outcome could not be obtained, since T-Rex stays at Starting state beyond defined timeout.') # raise at client TRexWarning
+
+ def get_running_info (self):
+ logger.info("Processing get_running_info() command.")
+ return self.trex.get_running_info()
+
+ def generate_run_cmd (self, f, d, iom = 0, export_path="/tmp/trex.txt", **kwargs):
+ """ generate_run_cmd(self, trex_cmd_options, export_path) -> str
+
+ Generates a custom running command for the kick-off of the T-Rex traffic generator.
+ Returns a tuple of command (string) and export path (string) to be issued on the trex server
+
+ Parameters
+ ----------
+ trex_cmd_options : str
+ Defines the exact command to run on the t-rex
+ Example: "-c 2 -m 0.500000 -d 100 -f cap2/sfr.yaml --nc -p -l 1000"
+ export_path : str
+ a full system path to which the results of the trex-run will be logged.
+
+ """
+ if 'results_file_path' in kwargs:
+ export_path = kwargs['results_file_path']
+ del kwargs['results_file_path']
+
+
+ # adding additional options to the command
+ trex_cmd_options = ''
+ for key, value in kwargs.iteritems():
+ tmp_key = key.replace('_','-')
+ dash = ' -' if (len(key)==1) else ' --'
+ if (value == True) and (str(value) != '1'): # checking also int(value) to excape from situation that 1 translates by python to 'True'
+ trex_cmd_options += (dash + tmp_key)
+ else:
+ trex_cmd_options += (dash + '{k} {val}'.format( k = tmp_key, val = value ))
+
+ cmd = "{run_command} -f {gen_file} -d {duration} --iom {io} {cmd_options} --no-key > {export}".format( # -- iom 0 disables the periodic log to the screen (not needed)
+ run_command = self.TREX_START_CMD,
+ gen_file = f,
+ duration = d,
+ cmd_options = trex_cmd_options,
+ io = iom,
+ export = export_path )
+
+ logger.info("T-REX FULL COMMAND: {command}".format(command = cmd) )
+
+ return (cmd, export_path, long(d))
+
+ def __check_trex_path_validity(self):
+ # check for executable existance
+ if not os.path.exists(self.TREX_PATH+'/t-rex-64'):
+ print "The provided T-Rex path do not contain an executable T-Rex file.\nPlease check the path and retry."
+ logger.error("The provided T-Rex path do not contain an executable T-Rex file")
+ exit(-1)
+ # check for executable permissions
+ st = os.stat(self.TREX_PATH+'/t-rex-64')
+ if not bool(st.st_mode & (stat.S_IXUSR ) ):
+ print "The provided T-Rex path do not contain an T-Rex file with execution privileges.\nPlease check the files permissions and retry."
+ logger.error("The provided T-Rex path do not contain an T-Rex file with execution privileges")
+ exit(-1)
+ else:
+ return
+
+ def __check_files_path_validity(self):
+ # first, check for path existance. otherwise, try creating it with appropriate credentials
+ if not os.path.exists(self.trex_files_path):
+ try:
+ os.makedirs(self.trex_files_path, 0660)
+ return
+ except os.error as inst:
+ print "The provided files path does not exist and cannot be created with needed access credentials using root user.\nPlease check the path's permissions and retry."
+ logger.error("The provided files path does not exist and cannot be created with needed access credentials using root user.")
+ exit(-1)
+ elif os.access(self.trex_files_path, os.W_OK):
+ return
+ else:
+ print "The provided files path has insufficient access credentials for root user.\nPlease check the path's permissions and retry."
+ logger.error("The provided files path has insufficient access credentials for root user")
+ exit(-1)
+
+class CTRex(object):
+ def __init__(self):
+ self.status = TRexStatus.Idle
+ self.verbose_status = 'T-Rex is Idle'
+ self.errcode = None
+ self.session = None
+ self.zmq_monitor = None
+ self.zmq_dump = None
+ self.seq = None
+ self.expect_trex = threading.Event()
+ self.encoder = JSONEncoder()
+
+ def get_status(self):
+ return self.status
+
+ def set_status(self, new_status):
+ self.status = new_status
+
+ def get_verbose_status(self):
+ return self.verbose_status
+
+ def set_verbose_status(self, new_status):
+ self.verbose_status = new_status
+
+ def gen_seq (self):
+ self.seq = randrange(1,1000)
+
+ def get_seq (self):
+ return self.seq
+
+ def get_running_info (self):
+ if self.status == TRexStatus.Running:
+ return self.encoder.encode(self.zmq_dump)
+ else:
+ logger.info("T-Rex isn't running. Running information isn't available.")
+ if self.status == TRexStatus.Idle:
+ if self.errcode is not None: # some error occured
+ logger.info("T-Rex is in Idle state, with errors. returning fault")
+ return Fault(self.errcode, self.verbose_status) # raise at client relevant exception, depending on the reason the error occured
+ else:
+ logger.info("T-Rex is in Idle state, no errors. returning {}")
+ return u'{}'
+
+ return Fault(-12, self.verbose_status) # raise at client TRexWarning, indicating T-Rex is back to Idle state or still in Starting state
+
+ def stop_trex(self):
+ if self.status == TRexStatus.Idle:
+ # t-rex isn't running, nothing to abort
+ logger.info("T-Rex isn't running. No need to stop anything.")
+ if self.errcode is not None: # some error occured, notify client despite T-Rex already stopped
+ return Fault(self.errcode, self.verbose_status) # raise at client relevant exception, depending on the reason the error occured
+ return False
+ else:
+ # handle stopping t-rex's run
+ self.session.join()
+ logger.info("T-Rex session has been successfully aborted.")
+ return True
+
+ def start_trex(self, trex_launch_path, trex_cmd):
+ self.set_status(TRexStatus.Starting)
+ logger.info("TRex running state changed to 'Starting'.")
+ self.set_verbose_status('T-Rex is starting (data is not available yet)')
+
+ self.errcode = None
+ self.session = AsynchronousTRexSession(self, trex_launch_path, trex_cmd)
+ self.session.start()
+ self.expect_trex.set()
+# self.zmq_monitor= ZmqMonitorSession(self, zmq_port)
+# self.zmq_monitor.start()
+
+
+
+def generate_trex_parser ():
+ default_path = os.path.abspath(os.path.join(outer_packages.CURRENT_PATH, os.pardir, os.pardir, os.pardir))
+ default_files_path = os.path.abspath(CTRexServer.DEFAULT_FILE_PATH)
+
+ parser = ArgumentParser(description = 'Run server application for T-Rex traffic generator',
+ formatter_class = RawTextHelpFormatter,
+ usage = """
+trex_daemon_server [options]
+""" )
+
+ parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0')
+ parser.add_argument("-p", "--daemon-port", type=int, default = 8090, metavar="PORT", dest="daemon_port",
+ help="Select port on which the daemon runs.\nDefault port is 8090.", action="store")
+ parser.add_argument("-z", "--zmq-port", dest="zmq_port", type=int,
+ action="store", help="Select port on which the ZMQ module listens to T-Rex.\nDefault port is 4500.", metavar="PORT",
+ default = 4500)
+ parser.add_argument("-t", "--trex-path", dest="trex_path",
+ action="store", help="Specify the compiled T-Rex directory from which T-Rex would run.\nDefault path is: {def_path}.".format( def_path = default_path ),
+ metavar="PATH", default = default_path )
+ parser.add_argument("-f", "--files-path", dest="files_path",
+ action="store", help="Specify a path to directory on which pushed files will be saved at.\nDefault path is: {def_path}.".format( def_path = default_files_path ),
+ metavar="PATH", default = default_files_path )
+ return parser
+
+trex_parser = generate_trex_parser()
+
+def do_main_program ():
+
+ args = trex_parser.parse_args()
+
+ server = CTRexServer(trex_daemon_port = args.daemon_port, trex_zmq_port = args.zmq_port, trex_path = args.trex_path, trex_files_path = args.files_path)
+ server.start()
+
+
+if __name__ == "__main__":
+ do_main_program()
+
diff --git a/scripts/automation/trex_control_plane/server/zmq_monitor_thread.py b/scripts/automation/trex_control_plane/server/zmq_monitor_thread.py
new file mode 100755
index 00000000..28e154ee
--- /dev/null
+++ b/scripts/automation/trex_control_plane/server/zmq_monitor_thread.py
@@ -0,0 +1,80 @@
+#!/router/bin/python
+
+import os
+import outer_packages
+import zmq
+import threading
+import logging
+import CCustomLogger
+from json import JSONDecoder
+from common.trex_status_e import TRexStatus
+
+# setup the logger
+CCustomLogger.setup_custom_logger('TRexServer')
+logger = logging.getLogger('TRexServer')
+
+class ZmqMonitorSession(threading.Thread):
+ def __init__(self, trexObj , zmq_port):
+ super(ZmqMonitorSession, self).__init__()
+ self.stoprequest = threading.Event()
+# self.terminateFlag = False
+ self.first_dump = True
+ self.zmq_port = zmq_port
+ self.zmq_publisher = "tcp://localhost:{port}".format( port = self.zmq_port )
+# self.context = zmq.Context()
+# self.socket = self.context.socket(zmq.SUB)
+ self.trexObj = trexObj
+ self.expect_trex = self.trexObj.expect_trex # used to signal if T-Rex is expected to run and if data should be considered
+ self.decoder = JSONDecoder()
+ logger.info("ZMQ monitor initialization finished")
+
+ def run (self):
+ self.context = zmq.Context()
+ self.socket = self.context.socket(zmq.SUB)
+ logger.info("ZMQ monitor started listening @ {pub}".format( pub = self.zmq_publisher ) )
+ self.socket.connect(self.zmq_publisher)
+ self.socket.setsockopt(zmq.SUBSCRIBE, '')
+
+ while not self.stoprequest.is_set():
+ try:
+ zmq_dump = self.socket.recv() # This call is BLOCKING until data received!
+ if self.expect_trex.is_set():
+ self.parse_and_update_zmq_dump(zmq_dump)
+ logger.debug("ZMQ dump received on socket, and saved to trexObject.")
+ except Exception as e:
+ if self.stoprequest.is_set():
+ # allow this exception since it comes from ZMQ monitor termination
+ pass
+ else:
+ logger.error("ZMQ monitor thrown an exception. Received exception: {ex}".format(ex = e))
+ raise
+
+ def join (self, timeout = None):
+ self.stoprequest.set()
+ logger.debug("Handling termination of ZMQ monitor thread")
+ self.socket.close()
+ self.context.term()
+ logger.info("ZMQ monitor resources has been freed.")
+ super(ZmqMonitorSession, self).join(timeout)
+
+ def parse_and_update_zmq_dump (self, zmq_dump):
+ try:
+ dict_obj = self.decoder.decode(zmq_dump)
+ except ValueError:
+ logger.error("ZMQ dump failed JSON-RPC decode. Ignoring. Bad dump was: {dump}".format(dump = zmq_dump))
+ dict_obj = None
+
+ # add to trex_obj zmq latest dump, based on its 'name' header
+ if dict_obj is not None and dict_obj!={}:
+ self.trexObj.zmq_dump[dict_obj['name']] = dict_obj
+ if self.first_dump:
+ # change TRexStatus from starting to Running once the first ZMQ dump is obtained and parsed successfully
+ self.first_dump = False
+ self.trexObj.set_status(TRexStatus.Running)
+ self.trexObj.set_verbose_status("T-Rex is Running")
+ logger.info("First ZMQ dump received and successfully parsed. TRex running state changed to 'Running'.")
+
+
+if __name__ == "__main__":
+ pass
+
diff --git a/scripts/automation/trex_control_plane/unit_tests/__init__.py b/scripts/automation/trex_control_plane/unit_tests/__init__.py
new file mode 100755
index 00000000..d3f5a12f
--- /dev/null
+++ b/scripts/automation/trex_control_plane/unit_tests/__init__.py
@@ -0,0 +1 @@
+
diff --git a/scripts/automation/trex_control_plane/unit_tests/client_launching_test.py b/scripts/automation/trex_control_plane/unit_tests/client_launching_test.py
new file mode 100755
index 00000000..42d79af5
--- /dev/null
+++ b/scripts/automation/trex_control_plane/unit_tests/client_launching_test.py
@@ -0,0 +1,31 @@
+#!/router/bin/python
+from control_plane_general_test import CControlPlaneGeneral_Test
+from Client.trex_client import CTRexClient
+
+import socket
+from nose.tools import assert_raises
+
+
+class CClientLaunching_Test(CControlPlaneGeneral_Test):
+ def __init__(self):
+ super(CClientLaunching_Test, self).__init__()
+ pass
+
+ def setUp(self):
+ pass
+
+ def test_wrong_hostname(self):
+ # self.tmp_server = CTRexClient('some-invalid-hostname')
+ assert_raises (socket.gaierror, CTRexClient, 'some-invalid-hostname' )
+
+ # perform this test only if server is down, but server machine is up
+ def test_refused_connection(self):
+ assert_raises (socket.error, CTRexClient, 'trex-dan') # Assuming 'trex-dan' server is down! otherwise test fails
+
+
+ def test_verbose_mode(self):
+ tmp_client = CTRexClient(self.trex_server_name, verbose = True)
+ pass
+
+ def tearDown(self):
+ pass
diff --git a/scripts/automation/trex_control_plane/unit_tests/control_plane_general_test.py b/scripts/automation/trex_control_plane/unit_tests/control_plane_general_test.py
new file mode 100755
index 00000000..95f259b8
--- /dev/null
+++ b/scripts/automation/trex_control_plane/unit_tests/control_plane_general_test.py
@@ -0,0 +1,72 @@
+#!/router/bin/python
+
+__copyright__ = "Copyright 2015"
+
+"""
+Name:
+ control_plane_general_test.py
+
+
+Description:
+
+ This script creates the functionality to test the performance of the T-Rex traffic generator control plane.
+ The scenarios assumes a WORKING server is listening and processing the requests.
+
+::
+
+ Topology:
+
+ -------- --------
+ | | | |
+ | Client | <-----JSON-RPC------> | Server |
+ | | | |
+ -------- --------
+
+"""
+from nose.plugins import Plugin
+# import misc_methods
+import sys
+import os
+# from CPlatformUnderTest import *
+# from CPlatform import *
+import termstyle
+import threading
+from common.trex_exceptions import *
+from Client.trex_client import CTRexClient
+# import Client.outer_packages
+# import Client.trex_client
+
+TREX_SERVER = None
+
+class CTRexCP():
+ trex_server = None
+
+def setUpModule(module):
+ pass
+
+def tearDownModule(module):
+ pass
+
+
+class CControlPlaneGeneral_Test(object):#(unittest.TestCase):
+ """This class defines the general testcase of the control plane service"""
+ def __init__ (self):
+ self.trex_server_name = 'csi-kiwi-02'
+ self.trex = CTRexClient(self.trex_server_name)
+ pass
+
+ def setUp(self):
+ # initialize server connection for single client
+ # self.server = CTRexClient(self.trex_server)
+ pass
+
+ ########################################################################
+ #### DO NOT ADD TESTS TO THIS FILE ####
+ #### Added tests here will held once for EVERY test sub-class ####
+ ########################################################################
+
+ def tearDown(self):
+ pass
+
+ def check_for_trex_crash(self):
+ pass
diff --git a/scripts/automation/trex_control_plane/unit_tests/control_plane_unit_test.py b/scripts/automation/trex_control_plane/unit_tests/control_plane_unit_test.py
new file mode 100755
index 00000000..37130ee4
--- /dev/null
+++ b/scripts/automation/trex_control_plane/unit_tests/control_plane_unit_test.py
@@ -0,0 +1,73 @@
+#!/router/bin/python
+
+__copyright__ = "Copyright 2014"
+
+
+
+import os
+import sys
+import nose_outer_packages
+import nose
+from nose.plugins import Plugin
+from rednose import RedNose
+import termstyle
+import control_plane_general_test
+
+class TRexCPConfiguringPlugin(Plugin):
+ def options(self, parser, env = os.environ):
+ super(TRexCPConfiguringPlugin, self).options(parser, env)
+ parser.add_option('-t', '--trex-server', action='store',
+ dest='trex_server', default='trex-dan',
+ help='Specify T-Rex server hostname. This server will be used to test control-plane functionality.')
+
+ def configure(self, options, conf):
+ if options.trex_server:
+ self.trex_server = options.trex_server
+
+ def begin (self):
+ # initialize CTRexCP global testing class, to be used by and accessible all tests
+ print "assigned trex_server name"
+ control_plane_general_test.CTRexCP.trex_server = self.trex_server
+
+ def finalize(self, result):
+ pass
+
+
+
+if __name__ == "__main__":
+
+ # setting defaults. By default we run all the test suite
+ specific_tests = False
+ disableLogCapture = False
+ long_test = False
+ report_dir = "reports"
+
+ nose_argv= sys.argv + ['-s', '-v', '--exe', '--rednose', '--detailed-errors']
+
+ try:
+ result = nose.run(argv = nose_argv, addplugins = [RedNose(), TRexCPConfiguringPlugin()])
+ if (result == True):
+ print termstyle.green("""
+ ..::''''::..
+ .;'' ``;.
+ :: :: :: ::
+ :: :: :: ::
+ :: :: :: ::
+ :: .:' :: :: `:. ::
+ :: : : ::
+ :: `:. .:' ::
+ `;..``::::''..;'
+ ``::,,,,::''
+
+ ___ ___ __________
+ / _ \/ _ | / __/ __/ /
+ / ___/ __ |_\ \_\ \/_/
+ /_/ /_/ |_/___/___(_)
+
+ """)
+ sys.exit(0)
+ else:
+ sys.exit(-1)
+
+ finally:
+ pass \ No newline at end of file
diff --git a/scripts/automation/trex_control_plane/unit_tests/functional_test.py b/scripts/automation/trex_control_plane/unit_tests/functional_test.py
new file mode 100755
index 00000000..f742403d
--- /dev/null
+++ b/scripts/automation/trex_control_plane/unit_tests/functional_test.py
@@ -0,0 +1,160 @@
+#!/router/bin/python
+from control_plane_general_test import CControlPlaneGeneral_Test
+from Client.trex_client import CTRexClient
+
+import socket
+from nose.tools import assert_raises, assert_equal, assert_not_equal
+from common.trex_status_e import TRexStatus
+from common.trex_exceptions import *
+from enum import Enum
+import time
+
+
+class CTRexStartStop_Test(CControlPlaneGeneral_Test):
+ def __init__(self):
+ super(CTRexStartStop_Test, self).__init__()
+ self.valid_start_params = dict( c = 4,
+ m = 1.1,
+ d = 100,
+ f = 'avl/sfr_delay_10_1g.yaml',
+ nc = True,
+ p = True,
+ l = 1000)
+
+ def setUp(self):
+ pass
+
+ def test_mandatory_param_error(self):
+ start_params = dict( c = 4,
+ m = 1.1,
+ d = 70,
+ # f = 'avl/sfr_delay_10_1g.yaml', <-- f (mandatory) is not provided on purpose
+ nc = True,
+ p = True,
+ l = 1000)
+
+ assert_raises(TypeError, self.trex.start_trex, **start_params)
+
+ def test_parameter_name_error(self):
+ ret = self.trex.start_trex( c = 4,
+ wrong_key = 1.1, # <----- This key does not exists in T-Rex API
+ d = 70,
+ f = 'avl/sfr_delay_10_1g.yaml',
+ nc = True,
+ p = True,
+ l = 1000)
+
+ time.sleep(5)
+
+ # check for failure status
+ run_status = self.trex.get_running_status()
+ assert isinstance(run_status, dict)
+ assert_equal (run_status['state'], TRexStatus.Idle )
+ assert_equal (run_status['verbose'], "T-Rex run failed due to wrong input parameters, or due to reachability issues.")
+ assert_raises(TRexError, self.trex.get_running_info)
+
+ def test_too_early_sample(self):
+ ret = self.trex.start_trex(**self.valid_start_params)
+
+ assert ret==True
+ # issue get_running_info() too soon, without any(!) sleep
+ run_status = self.trex.get_running_status()
+ assert isinstance(run_status, dict)
+ assert_equal (run_status['state'], TRexStatus.Starting )
+ assert_raises(TRexWarning, self.trex.get_running_info)
+
+ ret = self.trex.stop_trex()
+ assert ret==True # make sure stop succeeded
+ assert self.trex.is_running() == False
+
+ def test_start_sampling_on_time(self):
+ ret = self.trex.start_trex(**self.valid_start_params)
+ assert ret==True
+ time.sleep(6)
+
+ run_status = self.trex.get_running_status()
+ assert isinstance(run_status, dict)
+ assert_equal (run_status['state'], TRexStatus.Running )
+
+ run_info = self.trex.get_running_info()
+ assert isinstance(run_info, dict)
+ ret = self.trex.stop_trex()
+ assert ret==True # make sure stop succeeded
+ assert self.trex.is_running() == False
+
+ def test_start_more_than_once_same_user(self):
+ assert self.trex.is_running() == False # first, make sure T-Rex is not running
+ ret = self.trex.start_trex(**self.valid_start_params) # start 1st T-Rex run
+ assert ret == True # make sure 1st run submitted successfuly
+ # time.sleep(1)
+ assert_raises(TRexInUseError, self.trex.start_trex, **self.valid_start_params) # try to start T-Rex again
+
+ ret = self.trex.stop_trex()
+ assert ret==True # make sure stop succeeded
+ assert self.trex.is_running() == False
+
+ def test_start_more_than_once_different_users(self):
+ assert self.trex.is_running() == False # first, make sure T-Rex is not running
+ ret = self.trex.start_trex(**self.valid_start_params) # start 1st T-Rex run
+ assert ret == True # make sure 1st run submitted successfuly
+ # time.sleep(1)
+
+ tmp_trex = CTRexClient(self.trex_server_name) # initialize another client connecting same server
+ assert_raises(TRexInUseError, tmp_trex.start_trex, **self.valid_start_params) # try to start T-Rex again
+
+ ret = self.trex.stop_trex()
+ assert ret==True # make sure stop succeeded
+ assert self.trex.is_running() == False
+
+ def test_simultaneous_sampling(self):
+ assert self.trex.is_running() == False # first, make sure T-Rex is not running
+ tmp_trex = CTRexClient(self.trex_server_name) # initialize another client connecting same server
+ ret = self.trex.start_trex(**self.valid_start_params) # start T-Rex run
+ assert ret == True # make sure 1st run submitted successfuly
+
+ time.sleep(6)
+ # now, sample server from both clients
+ while (self.trex.is_running()):
+ info_1 = self.trex.get_running_info()
+ info_2 = tmp_trex.get_running_info()
+
+ # make sure samples are consistent
+ if self.trex.get_result_obj().is_valid_hist():
+ assert tmp_trex.get_result_obj().is_valid_hist() == True
+ if self.trex.get_result_obj().is_done_warmup():
+ assert tmp_trex.get_result_obj().is_done_warmup() == True
+ # except TRexError as inst: # T-Rex might have stopped between is_running result and get_running_info() call
+ # # hence, ingore that case
+ # break
+
+ assert self.trex.is_running() == False
+
+ def test_fast_toggling(self):
+ assert self.trex.is_running() == False
+ for i in range(20):
+ ret = self.trex.start_trex(**self.valid_start_params) # start T-Rex run
+ assert ret == True
+ assert self.trex.is_running() == False # we expect the status to be 'Starting'
+ ret = self.trex.stop_trex()
+ assert ret == True
+ assert self.trex.is_running() == False
+ pass
+
+
+ def tearDown(self):
+ pass
+
+class CBasicQuery_Test(CControlPlaneGeneral_Test):
+ def __init__(self):
+ super(CBasicQuery_Test, self).__init__()
+ pass
+
+ def setUp(self):
+ pass
+
+ def test_is_running(self):
+ assert self.trex.is_running() == False
+
+
+ def tearDown(self):
+ pass
diff --git a/scripts/automation/trex_control_plane/unit_tests/nose_outer_packages.py b/scripts/automation/trex_control_plane/unit_tests/nose_outer_packages.py
new file mode 100755
index 00000000..b5b78db7
--- /dev/null
+++ b/scripts/automation/trex_control_plane/unit_tests/nose_outer_packages.py
@@ -0,0 +1,27 @@
+#!/router/bin/python
+
+import sys,site
+import platform,os
+
+CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
+ROOT_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir)) # path to trex_control_plane directory
+PATH_TO_PYTHON_LIB = os.path.abspath(os.path.join(ROOT_PATH, 'python_lib'))
+
+
+TEST_MODULES = ['nose-1.3.4',
+ 'rednose-0.4.1',
+ 'termstyle'
+ ]
+
+def import_test_modules ():
+ sys.path.append(ROOT_PATH)
+ import_module_list(TEST_MODULES)
+
+def import_module_list (modules_list):
+ assert(isinstance(modules_list, list))
+ for p in modules_list:
+ full_path = os.path.join(PATH_TO_PYTHON_LIB, p)
+ fix_path = os.path.normcase(full_path) #CURRENT_PATH+p)
+ site.addsitedir(full_path)
+
+import_test_modules()
diff --git a/scripts/automation/trex_control_plane/unit_tests/sock.py b/scripts/automation/trex_control_plane/unit_tests/sock.py
new file mode 100755
index 00000000..29248e3e
--- /dev/null
+++ b/scripts/automation/trex_control_plane/unit_tests/sock.py
@@ -0,0 +1,552 @@
+import os
+import dpkt
+import struct
+import socket
+import sys
+import argparse;
+
+
+H_SCRIPT_VER = "0.1"
+
+class sock_driver(object):
+ args=None;
+
+
+def nl (buf):
+ return ( struct.unpack('>I', buf)[0]);
+
+def dump_tuple (t):
+ for obj in t:
+ print hex(obj),",",
+
+class CFlowRec:
+ def __init__ (self):
+ self.is_init_dir=False;
+ self.bytes=0;
+ self.data=None;
+
+ def __str__ (self):
+ if self.is_init_dir :
+ s=" client "
+ else:
+ s=" server "
+ s+= " %d " %(self.bytes)
+ return (s);
+
+
+
+class CPcapFileReader:
+ def __init__ (self,file_name):
+ self.file_name=file_name;
+ self.tuple=None;
+ self.swap=False;
+ self.info=[];
+
+ def dump_info (self):
+ for obj in self.info:
+ print obj
+ #print "'",obj.data,"'"
+
+ def is_client_side (self,swap):
+ if self.swap ==swap:
+ return (True);
+ else:
+ return (False);
+
+ def add_server(self,server,data):
+ r=CFlowRec();
+ r.is_init_dir =False;
+ r.bytes = server
+ r.data=data
+ self.info.append(r);
+
+ def add_client(self,client,data):
+ r=CFlowRec();
+ r.is_init_dir =True;
+ r.bytes = client
+ r.data=data
+ self.info.append(r);
+
+ def check_tcp_flow (self):
+ f = open(self.file_name)
+ pcap = dpkt.pcap.Reader(f)
+ for ts, buf in pcap:
+ eth = dpkt.ethernet.Ethernet(buf)
+ ip = eth.data
+ tcp = ip.data
+ if ip.p != 6 :
+ raise Exception("not a TCP flow ..");
+ if tcp.flags != dpkt.tcp.TH_SYN :
+ raise Exception("first packet should be with SYN");
+ break;
+ f.close();
+
+ def check_one_flow (self):
+ cnt=1
+ client=0;
+ server=0;
+ client_data=''
+ server_data=''
+ is_c=False # the direction
+ is_s=False
+ f = open(self.file_name)
+ pcap = dpkt.pcap.Reader(f)
+ for ts, buf in pcap:
+ eth = dpkt.ethernet.Ethernet(buf)
+ ip = eth.data
+ tcp = ip.data
+ pld = tcp.data;
+
+ pkt_swap=False
+ if nl(ip.src) > nl(ip.dst):
+ pkt_swap=True
+ tuple= (nl(ip.dst),nl(ip.src), tcp.dport ,tcp.sport,ip.p );
+ else:
+ tuple= (nl(ip.src),nl(ip.dst) ,tcp.sport,tcp.dport,ip.p );
+
+ if self.tuple == None:
+ self.swap=pkt_swap
+ self.tuple=tuple
+ else:
+ if self.tuple != tuple:
+ raise Exception("More than one flow - can't process this flow");
+
+
+ print " %5d," % (cnt),
+ if self.is_client_side (pkt_swap):
+ print "client",
+ if len(pld) >0 :
+ if is_c==False:
+ is_c=True
+ if is_s:
+ self.add_server(server,server_data);
+ server=0;
+ server_data=''
+ is_s=False;
+
+ client+=len(pld);
+ client_data=client_data+pld;
+ else:
+ if len(pld) >0 :
+ if is_s==False:
+ is_s=True
+ if is_c:
+ self.add_client(client,client_data);
+ client=0;
+ client_data=''
+ is_c=False;
+
+ server+=len(pld)
+ server_data=server_data+pld;
+
+ print "server",
+ print " %5d" % (len(pld)),
+ dump_tuple (tuple)
+ print
+
+ cnt=cnt+1
+
+ if is_c:
+ self.add_client(client,client_data);
+ if is_s:
+ self.add_server(server,server_data);
+
+ f.close();
+
+
+class CClientServerCommon(object):
+
+ def __init__ (self):
+ pass;
+
+ def send_info (self,data):
+ print "server send %d bytes" % (len(data))
+ self.connection.sendall(data)
+
+ def rcv_info (self,msg_size):
+ print "server wait for %d bytes" % (msg_size)
+
+ bytes_recd = 0
+ while bytes_recd < msg_size:
+ chunk = self.connection.recv(min(msg_size - bytes_recd, 2048))
+ if chunk == '':
+ raise RuntimeError("socket connection broken")
+ bytes_recd = bytes_recd + len(chunk)
+
+
+ def process (self,is_server):
+ pcapinfo=self.pcapr.info
+ for obj in pcapinfo:
+ if is_server:
+ if obj.is_init_dir:
+ self.rcv_info (obj.bytes);
+ else:
+ self.send_info (obj.data);
+ else:
+ if obj.is_init_dir:
+ self.send_info (obj.data);
+ else:
+ self.rcv_info (obj.bytes);
+
+ self.connection.close();
+ self.connection = None
+
+
+class CServer(CClientServerCommon) :
+ def __init__ (self,pcapr,port):
+ super(CServer, self).__init__()
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+
+ server_address = ('', port)
+ print 'starting up on %s port %s' % server_address
+ sock.bind(server_address)
+ sock.listen(1)
+
+ self.pcapr=pcapr; # save the info
+
+ while True:
+ # Wait for a connection
+ print 'waiting for a connection'
+ connection, client_address = sock.accept()
+
+ try:
+ print 'connection from', client_address
+ self.connection = connection;
+
+ self.process(True);
+ finally:
+ if self.connection :
+ self.connection.close()
+ self.connection = None
+
+
+class CClient(CClientServerCommon):
+ def __init__ (self,pcapr,ip,port):
+ super(CClient, self).__init__()
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ #sock.setsockopt(socket.SOL_SOCKET,socket.TCP_MAXSEG,300)
+ server_address = (ip, port)
+ print 'connecting to %s port %s' % server_address
+
+ sock.connect(server_address)
+ self.connection=sock;
+ self.pcapr=pcapr; # save the info
+
+ try:
+
+ self.process(False);
+ finally:
+ if self.connection :
+ self.connection.close()
+ self.connection = None
+
+
+def test_file_load ():
+ pcapr= CPcapFileReader("delay_10_http_browsing_0.pcap")
+ pcapr.check_tcp_flow ()
+ pcapr.check_one_flow ()
+ pcapr.dump_info();
+
+
+def process_options ():
+ parser = argparse.ArgumentParser(usage="""
+ sock [-s|-c] -f file_name
+
+ """,
+ description="offline process a pcap file",
+ epilog=" written by hhaim");
+
+ parser.add_argument("-f", dest="file_name",
+ help=""" the file name to process """,
+ required=True)
+
+ parser.add_argument('-c', action='store_true',
+ help='client side')
+
+ parser.add_argument('-s', action='store_true',
+ help='server side ')
+
+ parser.add_argument('--fix-time', action='store_true',
+ help='fix_time ')
+
+ parser.add_argument('--port', type=int, default=1000,
+ help='server_port ')
+
+ parser.add_argument('--ip', default='127.0.0.1',
+ help='socket ip ')
+
+ parser.add_argument('--debug', action='store_true',
+ help='debug mode')
+
+ parser.add_argument('--version', action='version',
+ version=H_SCRIPT_VER )
+
+
+
+ sock_driver.args = parser.parse_args();
+
+ if sock_driver.args.fix_time :
+ return ;
+ if (sock_driver.args.c ^ sock_driver.args.s) ==0:
+ raise Exception ("you must set either client or server mode");
+
+def load_pcap_file ():
+ pcapr= CPcapFileReader(sock_driver.args.file_name)
+ pcapr.check_tcp_flow ()
+ pcapr.check_one_flow ()
+ pcapr.dump_info();
+ return pcapr
+
+def run_client_side ():
+ pcapr=load_pcap_file ()
+ socket_client = CClient(pcapr,sock_driver.args.ip,sock_driver.args.port);
+
+
+def run_server_side ():
+ pcapr=load_pcap_file ()
+ socket_server = CServer(pcapr,sock_driver.args.port);
+
+
+class CPktWithTime:
+ def __init__ (self,pkt,ts):
+ self.pkt=pkt;
+ self.ts=ts
+ def __cmp__ (self,other):
+ return cmp(self.ts,other.ts);
+
+ def __repr__ (self):
+ s=" %x:%d" %(self.pkt,self.ts)
+ return s;
+
+
+class CPcapFixTime:
+ def __init__ (self,in_file_name,
+ out_file_name):
+ self.in_file_name = in_file_name;
+ self.out_file_name = out_file_name;
+ self.tuple=None;
+ self.swap=False;
+ self.rtt =0;
+ self.rtt_syn_ack_ack =0; # ack on the syn ack
+ self.pkts=[]
+
+ def calc_rtt (self):
+ f = open(self.in_file_name)
+ pcap = dpkt.pcap.Reader(f)
+ cnt=0;
+ first_time_set=False;
+ first_time=0;
+ last_syn_time=0;
+ rtt=0;
+ rtt_syn_ack_ack=0;
+
+ for ts, buf in pcap:
+ eth = dpkt.ethernet.Ethernet(buf)
+ ip = eth.data
+ tcp = ip.data
+
+ if first_time_set ==False:
+ first_time=ts;
+ first_time_set=True;
+ else:
+ rtt=ts-first_time;
+
+ if ip.p != 6 :
+ raise Exception("not a TCP flow ..");
+
+ if cnt==0 or cnt==1:
+ if (tcp.flags & dpkt.tcp.TH_SYN) != dpkt.tcp.TH_SYN :
+ raise Exception("first packet should be with SYN");
+
+ if cnt==1:
+ last_syn_time=ts;
+
+ if cnt==2:
+ rtt_syn_ack_ack=ts-last_syn_time;
+
+ if cnt > 1 :
+ break;
+ cnt = cnt +1;
+
+ f.close();
+ self.rtt_syn_ack_ack = rtt_syn_ack_ack;
+ return (rtt);
+
+ def is_client_side (self,swap):
+ if self.swap ==swap:
+ return (True);
+ else:
+ return (False);
+
+ def calc_timing (self):
+ self.rtt=self.calc_rtt ();
+
+ def fix_timing (self):
+
+ rtt=self.calc_rtt ();
+ print "RTT is %f msec" % (rtt*1000)
+
+ if (rtt/2)*1000<5:
+ raise Exception ("RTT is less than 5msec, you should replay it");
+
+ time_to_center=rtt/4;
+
+ f = open(self.in_file_name)
+ fo = open(self.out_file_name,"wb")
+ pcap = dpkt.pcap.Reader(f)
+ pcap_out = dpkt.pcap.Writer(fo)
+
+ for ts, buf in pcap:
+ eth = dpkt.ethernet.Ethernet(buf)
+ ip = eth.data
+ tcp = ip.data
+ pld = tcp.data;
+
+ pkt_swap=False
+ if nl(ip.src) > nl(ip.dst):
+ pkt_swap=True
+ tuple= (nl(ip.dst),nl(ip.src), tcp.dport ,tcp.sport,ip.p );
+ else:
+ tuple= (nl(ip.src),nl(ip.dst) ,tcp.sport,tcp.dport,ip.p );
+
+ if self.tuple == None:
+ self.swap=pkt_swap
+ self.tuple=tuple
+ else:
+ if self.tuple != tuple:
+ raise Exception("More than one flow - can't process this flow");
+
+ if self.is_client_side (pkt_swap):
+ self.pkts.append(CPktWithTime( buf,ts+time_to_center));
+ else:
+ self.pkts.append(CPktWithTime( buf,ts-time_to_center));
+
+ self.pkts.sort();
+ for pkt in self.pkts:
+ pcap_out.writepkt(pkt.pkt, pkt.ts)
+
+ f.close()
+ fo.close();
+
+
+
+def main ():
+ process_options ()
+
+ if sock_driver.args.fix_time:
+ pcap = CPcapFixTime(sock_driver.args.file_name ,sock_driver.args.file_name+".fix.pcap")
+ pcap.fix_timing ()
+
+ if sock_driver.args.c:
+ run_client_side ();
+
+ if sock_driver.args.s:
+ run_server_side ();
+
+
+files_to_convert=[
+'citrix_0',
+'exchange_0',
+'http_browsing_0',
+'http_get_0',
+'http_post_0',
+'https_0',
+'mail_pop_0',
+'mail_pop_1',
+'mail_pop_2',
+'oracle_0',
+'rtsp_0',
+'smtp_0',
+'smtp_1',
+'smtp_2'
+];
+
+
+#files_to_convert=[
+#'http_browsing_0',
+#];
+
+def test_pcap_file ():
+ for file in files_to_convert:
+ fn='tun_'+file+'.pcap';
+ fno='_tun_'+file+'_fixed.pcap';
+ print "convert ",fn
+ pcap = CPcapFixTime(fn,fno)
+ pcap.fix_timing ()
+
+
+
+
+class CPcapFileState:
+ def __init__ (self,file_name):
+ self.file_name = file_name
+ self.is_one_tcp_flow = False;
+ self.is_rtt_valid = False;
+ self.rtt=0;
+ self.rtt_ack=0;
+
+ def calc_stats (self):
+ file = CPcapFileReader(self.file_name);
+ try:
+ file.check_tcp_flow()
+ file.check_one_flow ()
+ self.is_one_tcp_flow = True;
+ except Exception :
+ self.is_one_tcp_flow = False;
+
+ print self.is_one_tcp_flow
+ if self.is_one_tcp_flow :
+ pcap= CPcapFixTime(self.file_name,"");
+ try:
+ pcap.calc_timing ()
+ print "rtt : %d %d \n" % (pcap.rtt*1000,pcap.rtt_syn_ack_ack*1000);
+ if (pcap.rtt*1000) > 10 and (pcap.rtt_syn_ack_ack*1000) >0.0 and (pcap.rtt_syn_ack_ack*1000) <2.0 :
+ self.is_rtt_valid = True
+ self.rtt = pcap.rtt*1000;
+ self.rtt_ack =pcap.rtt_syn_ack_ack*1000;
+ except Exception :
+ pass;
+
+
+def test_pcap_file (file_name):
+ p= CPcapFileState(file_name)
+ p.calc_stats();
+ if p.is_rtt_valid:
+ return True
+ else:
+ return False
+
+def iterate_tree_files (dirwalk,path_to):
+ fl=open("res.csv","w+");
+ cnt=0;
+ cnt_valid=0
+ for root, _, files in os.walk(dirwalk):
+ for f in files:
+ fullpath = os.path.join(root, f)
+ p= CPcapFileState(fullpath)
+ p.calc_stats();
+
+ valid=test_pcap_file (fullpath)
+ s='%s,%d,%d,%d \n' %(fullpath,p.is_rtt_valid,p.rtt,p.rtt_ack)
+ cnt = cnt +1 ;
+ if p.is_rtt_valid:
+ cnt_valid = cnt_valid +1;
+ diro=path_to+"/"+root;
+ fo = os.path.join(diro, f)
+ os.system("mkdir -p "+ diro);
+ pcap = CPcapFixTime(fullpath,fo)
+ pcap.fix_timing ()
+
+ print s
+ fl.write(s);
+ print " %d %% %d valids \n" % (100*cnt_valid/cnt,cnt);
+ fl.close();
+
+path_code="/scratch/tftp/pFidelity/pcap_repository"
+
+iterate_tree_files (path_code,"output")
+#test_pcap_file ()
+#test_pcap_file ()
+#main();
+
diff --git a/scripts/automation/trex_control_plane/unit_tests/test.py b/scripts/automation/trex_control_plane/unit_tests/test.py
new file mode 100755
index 00000000..dac765d6
--- /dev/null
+++ b/scripts/automation/trex_control_plane/unit_tests/test.py
@@ -0,0 +1,36 @@
+from mininet.topo import Topo
+from mininet.link import TCLink
+from mininet.net import Mininet
+from mininet.node import CPULimitedHost
+from mininet.link import TCLink
+from mininet.util import dumpNodeConnections
+from mininet.log import setLogLevel
+
+class MyTopo( Topo ):
+ "Simple topology example."
+
+ def __init__( self ):
+ "Create custom topo."
+
+ # Initialize topology
+ Topo.__init__( self )
+
+ # Add hosts and switches
+ leftHost = self.addHost( 'h1' )
+ rightHost = self.addHost( 'h2' )
+ Switch = self.addSwitch( 's1' )
+
+ # Add links
+ self.addLink( leftHost, Switch ,bw=10, delay='5ms')
+ self.addLink( Switch, rightHost )
+
+
+topos = { 'mytopo': ( lambda: MyTopo() ) }
+
+# 1. http server example
+#
+#mininet> h1 python -m SimpleHTTPServer 80 &
+#mininet> h2 wget -O - h1
+# 2. limit mss example
+#decrease the MTU ifconfig eth0 mtu 488
+
diff --git a/scripts/automation/trex_perf.py b/scripts/automation/trex_perf.py
new file mode 100755
index 00000000..5d11f549
--- /dev/null
+++ b/scripts/automation/trex_perf.py
@@ -0,0 +1,1265 @@
+#!/router/bin/python-2.7.4
+import h_avc
+
+
+from trex_control_plane.client.trex_client import CTRexClient
+import ConfigParser
+import threading
+import time,signal
+import argparse
+import sys
+import os
+import subprocess
+from time import sleep
+import signal
+import textwrap
+import getpass
+import random
+import datetime
+from datetime import timedelta
+import traceback
+import math
+import re
+import termios
+import errno
+import smtplib
+from email.MIMEMultipart import MIMEMultipart
+from email.MIMEBase import MIMEBase
+from email.MIMEText import MIMEText
+from email.Utils import COMMASPACE, formatdate
+from email import Encoders
+from email.mime.image import MIMEImage
+
+from distutils.version import StrictVersion
+
+class TrexRunException(Exception):
+ def __init__ (self, reason, cmd = None, std_log = None, err_log = None):
+ self.reason = reason
+ self.std_log = std_log
+ self.err_log = err_log
+ # generate the error message
+ self.message = "\nSummary of error:\n\n %s\n" % (reason)
+
+ if std_log:
+ self.message += "\nConsole Log:\n\n %s\n" % (self.std_log)
+
+ if err_log:
+ self.message += "\nStd Error Log:\n\n %s\n" % (self.err_log)
+
+ def __str__(self):
+ return self.message
+
+
+############################# utility functions start #################################
+
+def verify_glibc_version ():
+ x = subprocess.check_output("/usr/bin/ldd --version", shell=True)
+ m = re.match("ldd \(GNU libc\) (.*)", x)
+ if not m:
+ raise Exception("Cannot determine LDD version")
+ current_version = m.group(1)
+
+ if StrictVersion(current_version) < StrictVersion("2.5"):
+ raise Exception("GNU ldd version required for graph plotting is at least 2.5, system is %s - please run simple 'find'" % current_version)
+
+def get_median(numericValues):
+ theValues = sorted(numericValues)
+ if len(theValues) % 2 == 1:
+ return theValues[(len(theValues)+1)/2-1]
+ else:
+ lower = theValues[len(theValues)/2-1]
+ upper = theValues[len(theValues)/2]
+ return (float(lower + upper)) / 2
+
+def list_to_clusters(l, n):
+ for i in xrange(0, len(l), n):
+ yield l[i:i+n]
+
+def cpu_histo_to_str (cpu_histo):
+ s = "\nCPU Samplings:\n\n"
+ period = 0
+
+ clusters = list(list_to_clusters(cpu_histo, 10))
+
+ for cluster in clusters:
+ period += 10
+ line = "%3s Seconds: [" % period
+
+ cluster += (10 - len(cluster)) * [None]
+
+ for x in cluster:
+ if (x != None):
+ line += "%5.1f%%, " % x
+ else:
+ line += " "
+
+ line = line[:-2] # trim the comma and space
+ line += " " # return the space
+
+ line += "]\n"
+
+ s += line
+
+ return s
+
+# Terminal Manager Class
+class TermMng:
+ def __enter__(self):
+ self.fd = sys.stdin.fileno()
+ self.old = termios.tcgetattr(self.fd)
+
+ # copy new and remove echo
+ new = self.old[:]
+ new[3] &= ~termios.ECHO
+
+ self.tcsetattr_flags = termios.TCSAFLUSH
+ if hasattr(termios, 'TCSASOFT'):
+ self.tcsetattr_flags |= termios.TCSASOFT
+
+ termios.tcsetattr(self.fd, self.tcsetattr_flags, new)
+
+ def __exit__ (self ,type, value, traceback):
+ termios.tcsetattr(self.fd, self.tcsetattr_flags, self.old)
+
+############################# utility functions stop #################################
+
+def send_mail(send_from, send_to, subject, html_text, txt_attachments=[], images=[], server="localhost"):
+ assert isinstance(send_to, list)
+ assert isinstance(txt_attachments, list)
+ assert isinstance(images, list)
+
+ # create a multi part message
+ msg = MIMEMultipart()
+ msg['From'] = send_from
+ msg['To'] = COMMASPACE.join(send_to)
+ msg['Date'] = formatdate(localtime=True)
+ msg['Subject'] = subject
+ msg['Cc'] = "imarom@cisco.com"
+
+ # add all images to the text as embbeded images
+ for image in images:
+ html_text += '<br><img src="cid:{0}"><br>'.format(image)
+ fp = open(image, 'rb')
+ image_object = MIMEImage(fp.read())
+ fp.close()
+ image_object.add_header('Content-ID', image)
+ msg.attach(image_object)
+
+ # attach the main report as embedded HTML
+ msg.attach( MIMEText(html_text, 'html') )
+
+ # attach regualr txt files
+ for f in txt_attachments:
+ part = MIMEBase('application', "octet-stream")
+ part.set_payload( open(f,"rb").read() )
+ Encoders.encode_base64(part)
+ part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(f))
+ msg.attach(part)
+
+ smtp = smtplib.SMTP(server)
+ smtp.sendmail(send_from, send_to, msg.as_string())
+ smtp.close()
+
+# convert HTML to image - returning a image file as a string
+def html2image (html_filename, image_filename):
+ cmd = "./phantom/phantomjs ./phantom/rasterize.js {0} {1}".format(html_filename, image_filename)
+ subprocess.call(cmd, shell=True)
+
+ assert os.path.exists(image_filename)
+
+ return (image_filename)
+
+# convert results of run to a string
+def run_results_to_str (results, cond_type):
+ output = ""
+
+ output += "M: {0:<12.6f}\n".format(results['m'])
+ output += "BW: {0:<12,.2f} [Mbps]\n".format(results['tx'])
+ output += "PPS: {0:<12,} [pkts]\n".format(int(results['total-pps']))
+ output += "CPU: {0:.4f} %\n".format(results['cpu_util'])
+ output += "Maximum Latency: {0:<12,} [usec]\n".format(int(results['maximum-latency']))
+ output += "Average Latency: {0:<12,} [usec]\n".format(int(results['average-latency']))
+ output += "Pkt Drop: {0:<12,} [pkts]\n".format(int(results['total-pkt-drop']))
+ output += "Condition: {0:<12} ({1})\n".format("Passed" if check_condition(cond_type, results) else "Failed", cond_type_to_str(cond_type))
+
+ return (output)
+
+############################# classes #################################
+class ErrorHandler(object):
+ def __init__ (self, exception, traceback):
+
+ if isinstance(exception, TrexRunException):
+ logger.log("\n*** Script Terminated Due To Trex Failure")
+ logger.log("\n********************** TRex Error - Report **************************\n")
+ logger.log(str(exception))
+ logger.flush()
+
+ elif isinstance(exception, IOError):
+ logger.log("\n*** Script Terminated Due To IO Error")
+ logger.log("\nEither Router address or the Trex config is bad or some file is missing - check traceback below")
+ logger.log("\n********************** IO Error - Report **************************\n")
+ logger.log(str(exception))
+ logger.log(str(traceback))
+ logger.flush()
+
+
+ else:
+ logger.log("\n*** Script Terminated Due To Fatal Error")
+ logger.log("\n********************** Internal Error - Report **************************\n")
+ logger.log(str(exception) + "\n")
+ logger.log(str(traceback))
+ logger.flush()
+
+
+ # call the handler
+ g_kill_cause = "error"
+ os.kill(os.getpid(), signal.SIGUSR1)
+
+
+# simple HTML table
+class HTMLTable:
+ def __init__ (self):
+ self.table_rows = []
+
+ def add_row (self, param, value):
+ self.table_rows.append([param, value])
+
+ def generate_table(self):
+ txt = '<table class="myWideTable" style="width:50%">'
+ txt += "<tr><th>Parameter</th><th>Results</th></tr>"
+
+ for row in self.table_rows:
+ txt += "<tr><td>{0}</td><td>{1}</td></tr>".format(row[0], row[1])
+
+ txt += "</table>"
+
+ return txt
+
+# process results and dispatch it
+class JobReporter:
+ def __init__ (self, job_summary):
+ self.job_summary = job_summary
+ pass
+
+ def __plot_results_to_str (self, plot_results):
+ output = "\nPlotted Points: \n\n"
+ for p in plot_results:
+ output += "BW : {0:8.2f}, ".format(p['tx'])
+ output += "PPS : {0:8,} ".format(int(p['total-pps']))
+ output += "CPU : {0:8.2f} %, ".format(p['cpu_util'])
+ output += "Max Latency : {0:10,}, ".format(int(p['maximum-latency']))
+ output += "Avg Latency : {0:10,}, ".format(int(p['average-latency']))
+ output += "Pkt Drop : {0:12,}, \n".format(int(p['total-pkt-drop']))
+
+ return (output + "\n")
+
+ def __summary_to_string (self):
+ output = ""
+
+ output += "\n-== Job Completed Successfully ==-\n\n"
+ output += "Job Report:\n\n"
+ output += "Job Name: {0}\n".format(self.job_summary['job_name'])
+ output += "YAML file: {0}\n".format(self.job_summary['yaml'])
+ output += "Job Type: {0}\n".format(self.job_summary['job_type_str'])
+ output += "Condition: {0}\n".format(self.job_summary['cond_name'])
+ output += "Job Dir: {0}\n".format(self.job_summary['job_dir'])
+ output += "Job Log: {0}\n".format(self.job_summary['log_filename'])
+ output += "Email Report: {0}\n".format(self.job_summary['email'])
+ output += "Job Total Time: {0}\n\n".format(self.job_summary['total_run_time'])
+
+ if (self.job_summary.get('find_results') != None):
+ find_results = self.job_summary['find_results']
+ output += ("Maximum BW Point Details:\n\n")
+ output += run_results_to_str(find_results, self.job_summary['cond_type'])
+
+ if (self.job_summary.get('plot_results') != None):
+ plot_results = self.job_summary['plot_results']
+ output += self.__plot_results_to_str(plot_results)
+
+ return output
+
+
+ # simple print to screen of the job summary
+ def print_summary (self):
+ summary = self.__summary_to_string()
+ logger.log(summary)
+
+ def __generate_graph_report (self, plot_results):
+ graph_data = str( [ [x['tx'], x['cpu_util']/100, x['maximum-latency'], x['average-latency']] for x in plot_results ] )
+ table_data = str( [ [x['tx'], x['total-pps'], x['cpu_util']/100, x['norm_cpu'], x['maximum-latency'], x['average-latency'], x['total-pkt-drop']] for x in plot_results ] )
+
+ with open ("graph_template.html", "r") as myfile:
+ data = myfile.read()
+ data = data.replace("!@#$template_fill_head!@#$", self.job_summary['yaml'])
+ data = data.replace("!@#$template_fill_graph!@#$", graph_data[1:(len(graph_data) - 1)])
+ data = data.replace("!@#$template_fill_table!@#$", table_data[1:(len(table_data) - 1)])
+
+ # generate HTML report
+ graph_filename = self.job_summary['graph_filename']
+ text_file = open(graph_filename, "w")
+ text_file.write(str(data))
+ text_file.close()
+
+ return graph_filename
+
+ def __generate_body_report (self):
+ job_setup_table = HTMLTable()
+
+ job_setup_table.add_row("User Name", self.job_summary['user'])
+ job_setup_table.add_row("Job Name", self.job_summary['job_name'])
+ job_setup_table.add_row("Job Type", self.job_summary['job_type_str'])
+ job_setup_table.add_row("Test Condition", self.job_summary['cond_name'])
+ job_setup_table.add_row("YAML File", self.job_summary['yaml'])
+ job_setup_table.add_row("Job Total Time", "{0}".format(self.job_summary['total_run_time']))
+
+ job_summary_table = HTMLTable()
+
+ find_results = self.job_summary['find_results']
+
+ if find_results != None:
+ job_summary_table.add_row("Maximum Bandwidth", "{0:,.2f} [Mbps]".format(find_results['tx']))
+ job_summary_table.add_row("Maximum PPS", "{0:,} [pkts]".format(int(find_results['total-pps'])))
+ job_summary_table.add_row("CPU Util.", "{0:.2f}%".format(find_results['cpu_util']))
+ job_summary_table.add_row("Maximum Latency", "{0:,} [usec]".format(int(find_results['maximum-latency'])))
+ job_summary_table.add_row("Average Latency", "{0:,} [usec]".format(int(find_results['average-latency'])))
+ job_summary_table.add_row("Total Pkt Drop", "{0:,} [pkts]".format(int(find_results['total-pkt-drop'])))
+
+ with open ("report_template.html", "r") as myfile:
+ data = myfile.read()
+ data = data.replace("!@#$template_fill_job_setup_table!@#$", job_setup_table.generate_table())
+ data = data.replace("!@#$template_fill_job_summary_table!@#$", job_summary_table.generate_table())
+
+ return data
+
+ # create an email report and send to the user
+ def send_email_report (self):
+ images = []
+
+ logger.log("\nCreating E-Mail Report...\n")
+
+ # generate main report
+ report_str = self.__generate_body_report()
+
+ # generate graph report (if exists)
+ plot_results = self.job_summary['plot_results']
+ if plot_results:
+ logger.log("Generating Plot Results HTML ...\n")
+ graph_filename = self.__generate_graph_report(plot_results)
+ logger.log("Converting HTML to image ...\n")
+ images.append(html2image(graph_filename, graph_filename + ".png"))
+
+ else:
+ graph_filename = None
+
+ # create email
+ from_addr = 'TrexReporter@cisco.com'
+ to_addr = []
+ to_addr.append(self.job_summary['email'])
+ to_addr.append('imarom@cisco.com')
+
+ attachments = []
+ attachments.append(self.job_summary['log_filename'])
+ logger.log("Attaching log {0}...".format(self.job_summary['log_filename']))
+
+ if graph_filename:
+ attachments.append(graph_filename)
+ logger.log("Attaching plotting report {0}...".format(graph_filename))
+
+ logger.flush()
+
+ send_mail(from_addr, to_addr, "TRex Performance Report", report_str, attachments, images)
+ logger.log("\nE-mail sent successfully to: " + self.job_summary['email'])
+
+# dummy logger in case logger creation failed
+class DummyLogger(object):
+ def __init__(self):
+ pass
+
+ def log(self, text, force = False, newline = True):
+ text_out = (text + "\n") if newline else text
+ sys.stdout.write(text_out)
+
+ def console(self, text, force = False, newline = True):
+ self.log(text, force, newline)
+
+ def flush (self):
+ pass
+
+# logger object
+class MyLogger(object):
+
+ def __init__(self, log_filename):
+ # Store the original stdout and stderr
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+ self.stdout_fd = os.dup(sys.stdout.fileno())
+ self.devnull = os.open('/dev/null', os.O_WRONLY)
+ self.log_file = open(log_filename, 'w')
+ self.silenced = False
+ self.pending_log_file_prints = 0
+ self.active = True
+
+ def shutdown (self):
+ self.active = False
+
+ def reactive (self):
+ self.active = True
+
+ # silence all prints from stdout
+ def silence(self):
+ os.dup2(self.devnull, sys.stdout.fileno())
+ self.silenced = True
+
+ # restore stdout status
+ def restore(self):
+ sys.stdout.flush()
+ sys.stderr.flush()
+ # Restore normal stdout
+ os.dup2(self.stdout_fd, sys.stdout.fileno())
+ self.silenced = False
+
+ #print a message to the log (both stdout / log file)
+ def log(self, text, force = False, newline = True):
+ if not self.active:
+ return
+
+ self.log_file.write((text + "\n") if newline else text)
+ self.pending_log_file_prints += 1
+
+ if (self.pending_log_file_prints >= 10):
+ self.log_file.flush()
+ self.pending_log_file_prints = 0
+
+ self.console(text, force, newline)
+
+ # print a message to the console alone
+ def console(self, text, force = False, newline = True):
+ if not self.active:
+ return
+
+ _text = (text + "\n") if newline else text
+
+ # if we are silenced and not forced - go home
+ if self.silenced and not force:
+ return
+
+ if self.silenced:
+ os.write(self.stdout_fd, _text)
+ else:
+ sys.stdout.write(_text)
+
+ sys.stdout.flush()
+
+ # flush
+ def flush(self):
+ sys.stdout.flush()
+ self.log_file.flush()
+
+ def __del__(self):
+ os.close(self.devnull)
+ if self.log_file:
+ self.log_file.flush()
+ self.log_file.close()
+
+
+# simple progress bar
+class ProgressBar(threading.Thread):
+ def __init__(self, time_sec, router):
+ super(ProgressBar, self).__init__()
+ self.active = True
+ self.time_sec = time_sec + 15
+ self.router = router
+
+ def run (self):
+ global g_stop
+
+ col = 40
+ delta_for_sec = float(col) / self.time_sec
+
+ accu = 0.0
+
+ for i in range(self.time_sec):
+ if (self.active == False):
+ # print 100% - done
+ bar = "\r[" + ('#' * col) + "] {0:.2f} %".format(100)
+ logger.console(bar, force = True, newline = False)
+ break
+
+ if (g_stop == True):
+ break
+
+ sleep(1)
+ accu += delta_for_sec
+ bar = "\r[" + ('#' * int(accu)) + (' ' * (col - int(accu))) + "] {0:.2f} %".format( (accu/col) * 100 )
+ bar += " / Router CPU: {0:.2f} %".format(self.router.get_last_cpu_util())
+ logger.console(bar, force = True, newline = False)
+
+ logger.console("\r\n", force = True, newline = False)
+ logger.flush()
+
+ def stop (self):
+ self.active = False
+ self.join()
+
+# global vars
+
+g_stop = False
+logger = DummyLogger()
+
+# cleanup list is a list of callables to be run when cntrl+c is caught
+cleanup_list = []
+
+################ threads ########################
+
+# sampler
+class Sample_Thread (threading.Thread):
+ def __init__(self, threadID, router):
+
+ threading.Thread.__init__(self)
+ self.threadID = threadID
+ self.router = router
+ self.stop = False
+
+ def run(self):
+ self.router.clear_sampling_stats()
+
+ try:
+ while (self.stop==False) and (g_stop==False):
+ self.router.sample_stats()
+ time.sleep(1);
+ except Exception as e:
+ ErrorHandler(e, traceback.format_exc())
+
+ def do_stop(self):
+ self.stop = True
+
+
+def general_cleanup_on_error ():
+ global g_stop
+ global cleanup_list
+
+ # mark all the threads to finish
+ g_stop = True;
+
+ # shutdown and flush the logger
+ logger.shutdown()
+ if logger:
+ logger.flush()
+
+ # execute the registered callables
+ for c in cleanup_list:
+ c()
+
+ # dummy wait for threads to finish (TODO: make this more smart)
+ time.sleep(2)
+ exit(-1)
+
+# just a dummy for preventing chain calls
+def signal_handler_dummy (sig_id, frame):
+ pass
+
+def error_signal_handler (sig_id, frame):
+ # make sure no chain of calls
+ signal.signal(signal.SIGUSR1, signal_handler_dummy)
+ signal.signal(signal.SIGINT, signal_handler_dummy)
+
+ general_cleanup_on_error()
+
+def int_signal_handler(sig_id, frame):
+ # make sure no chain of calls
+ signal.signal(signal.SIGINT, signal_handler_dummy)
+ signal.signal(signal.SIGUSR1, signal_handler_dummy)
+
+ logger.log("\n\nCaught Cntrl+C... Cleaning up!\n\n")
+
+ general_cleanup_on_error()
+
+
+# Trex with sampling
+class CTRexWithRouter:
+ def __init__(self, trex, trex_params):
+ self.trex = trex;
+ self.trex_params = trex_params
+
+ if self.trex_params['router_type'] == "ASR":
+ self.router = h_avc.ASR1k(self.trex_params['router_interface'], self.trex_params['router_port'], self.trex_params['router_password'])
+ elif self.trex_params['router_type'] == "ISR":
+ self.router = h_avc.ISR(self.trex_params['router_interface'], self.trex_params['router_port'], self.trex_params['router_password'])
+ else:
+ raise Exception("unknown router type in config file")
+
+ self.router.connect()
+
+ def get_router (self):
+ return self.router
+
+ def run(self, m, duration):
+
+ self.sample_thread = Sample_Thread(1, self.router)
+ self.sample_thread.start();
+
+ # launch trex
+ try:
+# trex_res = self.trex.run(m, duration);
+ self.trex.start_trex(c = self.trex_params['trex_cores'],
+ m = m,
+ d = duration,
+ f = self.trex_params['trex_yaml_file'],
+ nc = True,
+ l = self.trex_params['trex_latency'],
+ limit_ports = self.trex_params['trex_limit_ports'])
+ self.trex.sample_to_run_finish(20) # collect trex-sample every 20 seconds.
+ except Exception:
+ self.sample_thread.do_stop() # signal to stop
+ self.sample_thread.join() # wait for it to realy stop
+ raise
+
+ self.sample_thread.do_stop() # signal to stop
+ self.sample_thread.join() # wait for it to realy stop
+
+ self.res = self.trex.get_result_obj()
+
+ results = {}
+ results['status'] = True
+ results['trex_results'] = self.res
+ results['avc_results'] = self.router.get_stats()
+
+ return (results)
+ #return(trex_res.get_status() == STATUS_OK);
+
+# sanity checks to see run really went well
+def sanity_test_run (trex_r, avc_r):
+ pass
+ #if (sum(avc_r['cpu_histo']) == 0):
+ #raise h_trex.TrexRunException("CPU utilization from router is zero, check connectivity")
+
+def _trex_run (job_summary, m, duration):
+
+ trex_thread = job_summary['trex_thread']
+
+ p = ProgressBar(duration, trex_thread.get_router())
+ p.start()
+
+ try:
+ results = trex_thread.run(m, duration)
+ except Exception,e:
+ p.stop()
+ raise
+
+ p.stop()
+
+ if (results == None):
+ raise Exception("Failed to run Trex")
+
+ # fetch values
+ trex_r = results['trex_results']
+ avc_r = results['avc_results']
+
+ sanity_test_run(trex_r, avc_r)
+
+ res_dict = {}
+
+ res_dict['m'] = m
+ total_tx_bps = trex_r.get_last_value("trex-global.data.m_tx_bps")
+ res_dict['tx'] = total_tx_bps / (1000 * 1000) # EVENTUALLY CONTAINS IN MBPS (EXTRACTED IN BPS)
+
+ res_dict['cpu_util'] = avc_r['cpu_util']
+
+ if int(res_dict['cpu_util']) == 0:
+ res_dict['norm_cpu']=1;
+ else:
+ res_dict['norm_cpu'] = (res_dict['tx'] / res_dict['cpu_util']) * 100
+
+ res_dict['maximum-latency'] = max ( trex_r.get_max_latency().values() ) #trex_r.res['maximum-latency']
+ res_dict['average-latency'] = trex_r.get_avg_latency()['all'] #trex_r.res['average-latency']
+
+ logger.log(cpu_histo_to_str(avc_r['cpu_histo']))
+
+ res_dict['total-pkt-drop'] = trex_r.get_total_drops()
+ res_dict['expected-bps'] = trex_r.get_expected_tx_rate()['m_tx_expected_bps']
+ res_dict['total-pps'] = get_median( trex_r.get_value_list("trex-global.data.m_tx_pps") )#trex_r.res['total-pps']
+ res_dict['m_total_pkt'] = trex_r.get_last_value("trex-global.data.m_total_tx_pkts")
+
+ res_dict['latency_condition'] = job_summary['trex_params']['trex_latency_condition']
+
+ return res_dict
+
+def trex_run (job_summary, m, duration):
+ res = _trex_run (job_summary, m, duration)
+ return res
+
+
+def m_to_mbps (job_summary, m):
+ return (m * job_summary['base_m_unit'])
+
+# find the correct range of M
+def find_m_range (job_summary):
+
+ trex = job_summary['trex']
+ trex_config = job_summary['trex_params']
+
+ # if not provided - guess the correct range of bandwidth
+ if not job_summary['m_range']:
+ m_range = [0.0, 0.0]
+ # 1 Mbps -> 1 Gbps
+ LOW_TX = 1.0 * 1000 * 1000
+ MAX_TX = 1.0 * 1000 * 1000 * 1000
+
+ # for 10g go to 10g
+ if trex_config['trex_machine_type'] == "10G":
+ MAX_TX *= 10
+
+ # dual injection can potentially reach X2 speed
+ if trex_config['trex_is_dual'] == True:
+ MAX_TX *= 2
+
+ else:
+ m_range = job_summary['m_range']
+ LOW_TX = m_range[0] * 1000 * 1000
+ MAX_TX = m_range[1] * 1000 * 1000
+
+
+ logger.log("\nSystem Settings - Min: {0:,} Mbps / Max: {1:,} Mbps".format(LOW_TX / (1000 * 1000), MAX_TX / (1000 * 1000)))
+ logger.log("\nTrying to get system minimum M and maximum M...")
+
+ res_dict = trex_run(job_summary, 1, 30)
+
+ # figure out low / high M
+ m_range[0] = (LOW_TX / res_dict['expected-bps']) * 1
+ m_range[1] = (MAX_TX / res_dict['expected-bps']) * 1
+
+
+ # return both the m_range and the base m unit for future calculation
+ results = {}
+ results['m_range'] = m_range
+ results['base_m_unit'] = res_dict['expected-bps'] /(1000 * 1000)
+
+ return (results)
+
+# calculate points between m_range[0] and m_range[1]
+def calculate_plot_points (job_summary, m_range, plot_count):
+
+ cond_type = job_summary['cond_type']
+ delta_m = (m_range[1] - m_range[0]) / plot_count
+
+ m_current = m_range[0]
+ m_end = m_range[1]
+
+ logger.log("\nStarting Plot Graph Task ...\n")
+ logger.log("Plotting Range Is From: {0:.2f} [Mbps] To: {1:.2f} [Mbps] Over {2} Points".format(m_to_mbps(job_summary, m_range[0]),
+ m_to_mbps(job_summary, m_range[1]),
+ plot_count))
+ logger.log("Delta Between Points is {0:.2f} [Mbps]".format(m_to_mbps(job_summary, delta_m)))
+ plot_points = []
+
+ duration = 180
+
+ iter = 1
+
+ trex = job_summary['trex']
+ while (iter <= plot_count):
+ logger.log("\nPlotting Point [{0}/{1}]:\n".format(iter, plot_count))
+ logger.log("Estimated BW ~= {0:,.2f} [Mbps]\n".format(m_to_mbps(job_summary, m_current)))
+ logger.log("M = {0:.6f}".format(m_current))
+ logger.log("Duration = {0} seconds\n".format(duration))
+
+ res_dict = trex_run(job_summary, m_current, duration)
+ print_trex_results(res_dict, cond_type)
+
+ plot_points.append(dict(res_dict))
+
+ m_current += delta_m
+ iter = iter + 1
+
+ # last point - make sure its the maximum point
+ if (iter == plot_count):
+ m_current = m_range[1]
+
+ #print "waiting for system to stabilize ..."
+ #time.sleep(30);
+
+ return plot_points
+
+
+def cond_type_to_str (cond_type):
+ return "Max Latency" if cond_type=='latency' else "Pkt Drop"
+
+# success condition (latency or drop)
+def check_condition (cond_type, res_dict):
+ if cond_type == 'latency':
+ if res_dict['maximum-latency'] < res_dict['latency_condition']:
+ return True
+ else:
+ return False
+
+ # drop condition is a bit more complex - it should create high latency in addition to 0.2% drop
+ elif cond_type == 'drop':
+ if (res_dict['maximum-latency'] > (res_dict['latency_condition']+2000) ) and (res_dict['total-pkt-drop'] > (0.002 * res_dict['m_total_pkt'])):
+ return False
+ else:
+ return True
+
+ assert(0)
+
+def print_trex_results (res_dict, cond_type):
+ logger.log("\nRun Results:\n")
+ output = run_results_to_str(res_dict, cond_type)
+ logger.log(output)
+
+
+######################## describe a find job ########################
+class FindJob:
+ # init a job object with min / max
+ def __init__ (self, min, max, job_summary):
+ self.min = float(min)
+ self.max = float(max)
+ self.job_summary = job_summary
+ self.cond_type = job_summary['cond_type']
+ self.success_points = []
+ self.iter_num = 1
+ self.found = False
+ self.iter_duration = 300
+
+ def _distance (self):
+ return ( (self.max - self.min) / min(self.max, self.min) )
+
+ def time_to_end (self):
+ time_in_sec = (self.iters_to_end() * self.iter_duration)
+ return timedelta(seconds = time_in_sec)
+
+ def iters_to_end (self):
+ # find 2% point
+ ma = self.max
+ mi = self.min
+ iter = 0
+
+ while True:
+ dist = (ma - mi) / min(ma , mi)
+ if dist < 0.02:
+ break
+ if random.choice(["up", "down"]) == "down":
+ ma = (ma + mi) / 2
+ else:
+ mi = (ma + mi) / 2
+
+ iter += 1
+
+ return (iter)
+
+ def _cur (self):
+ return ( (self.min + self.max) / 2 )
+
+ def _add_success_point (self, res_dict):
+ self.success_points.append(res_dict.copy())
+
+ def _is_found (self):
+ return (self.found)
+
+ def _next_iter_duration (self):
+ return (self.iter_duration)
+
+ # execute iteration
+ def _execute (self):
+ # reset the found var before running
+ self.found = False
+
+ # run and print results
+ res_dict = trex_run(self.job_summary, self._cur(), self.iter_duration)
+
+ self.iter_num += 1
+ cur = self._cur()
+
+ if (self._distance() < 0.02):
+ if (check_condition(self.cond_type, res_dict)):
+ # distance < 2% and success - we are done
+ self.found = True
+ else:
+ # lower to 90% of current and retry
+ self.min = cur * 0.9
+ self.max = cur
+ else:
+ # success
+ if (check_condition(self.cond_type, res_dict)):
+ self.min = cur
+ else:
+ self.max = cur
+
+ if (check_condition(self.cond_type, res_dict)):
+ self._add_success_point(res_dict)
+
+ return res_dict
+
+ # find the max M before
+ def find_max_m (self):
+
+ res_dict = {}
+ while not self._is_found():
+
+ logger.log("\n-> Starting Find Iteration #{0}\n".format(self.iter_num))
+ logger.log("Estimated BW ~= {0:,.2f} [Mbps]".format(m_to_mbps(self.job_summary, self._cur())))
+ logger.log("M = {0:.6f}".format(self._cur()))
+ logger.log("Duration = {0} seconds".format(self._next_iter_duration()))
+ logger.log("Current BW Range = {0:,.2f} [Mbps] / {1:,.2f} [Mbps]".format(m_to_mbps(self.job_summary, self.min), m_to_mbps(self.job_summary, self.max)))
+ logger.log("Est. Iterations Left = {0} Iterations".format(self.iters_to_end()))
+ logger.log("Est. Time Left = {0}\n".format(self.time_to_end()))
+
+ res_dict = self._execute()
+
+ print_trex_results(res_dict, self.cond_type)
+
+ find_results = res_dict.copy()
+ find_results['max_m'] = self._cur()
+ return (find_results)
+
+######################## describe a plot job ########################
+class PlotJob:
+ def __init__(self, findjob):
+ self.job_summary = findjob.job_summary
+
+ self.plot_points = list(findjob.success_points)
+ self.plot_points.sort(key = lambda item:item['tx'])
+
+ def plot (self, duration = 300):
+ return self.plot_points
+
+ # add points if needed
+ #iter = 0
+ #for point in self.success_points:
+ #iter += 1
+ #logger.log("\nPlotting Point [{0}/{1}]:\n".format(iter, self.plot_count))
+ #logger.log("Estimated BW ~= {0:,.2f} [Mbps]\n".format(m_to_mbps(self.job_summary, point['m'])))
+ #logger.log("M = {0:.6f}".format(point['m']))
+ #logger.log("Duration = {0} seconds\n".format(duration))
+
+ #res_dict = trex_run(self.job_summary, point['m'], duration)
+ #print_trex_results(res_dict, self.job_summary['cond_type'])
+
+ #self.plot_points.append(dict(res_dict))
+
+ #self.plot_points = list(self.success_points)
+
+ #print self.plot_points
+ #self.plot_points.sort(key = lambda item:item['m'])
+ #print self.plot_points
+
+ #return self.plot_points
+
+
+def generate_job_id ():
+ return (str(int(random.getrandbits(32))))
+
+def print_header ():
+ logger.log("--== T-Trex Performance Tool v1.0 (2014) ==--")
+
+# print startup summary
+def log_startup_summary (job_summary):
+
+ trex = job_summary['trex']
+ trex_config = job_summary['trex_params']
+
+ logger.log("\nWork Request Details:\n")
+ logger.log("Setup Details:\n")
+ logger.log("T-Rex Config File: {0}".format(job_summary['config_file']))
+ logger.log("Machine Name: {0}".format(trex_config['trex_name']))
+ logger.log("T-Rex Type: {0}".format(trex_config['trex_machine_type']))
+ logger.log("T-Rex Dual Int. Tx: {0}".format(trex_config['trex_is_dual']))
+ logger.log("Router Interface: {0}".format(trex_config['router_interface']))
+
+ logger.log("\nJob Details:\n")
+ logger.log("Job Name: {0}".format(job_summary['job_name']))
+ logger.log("YAML file: {0}".format(job_summary['yaml']))
+ logger.log("Job Type: {0}".format(job_summary['job_type_str']))
+ logger.log("Condition Type: {0}".format(job_summary['cond_name']))
+ logger.log("Job Log: {0}".format(job_summary['log_filename']))
+ logger.log("Email Report: {0}".format(job_summary['email']))
+
+# logger.log("\nTrex Command Used:\n{0}".format(trex.build_cmd(1, 10)))
+
+def load_trex_config_params (filename, yaml_file):
+ config = {}
+
+ parser = ConfigParser.ConfigParser()
+
+ try:
+ parser.read(filename)
+
+ config['trex_name'] = parser.get("trex", "machine_name")
+ config['trex_port'] = parser.get("trex", "machine_port")
+ config['trex_hisory_size'] = parser.getint("trex", "history_size")
+
+ config['trex_latency_condition'] = parser.getint("trex", "latency_condition")
+ config['trex_yaml_file'] = yaml_file
+
+ # support legacy data
+ config['trex_latency'] = parser.getint("trex", "latency")
+ config['trex_limit_ports'] = parser.getint("trex", "limit_ports")
+ config['trex_cores'] = parser.getint("trex", "cores")
+ config['trex_machine_type'] = parser.get("trex", "machine_type")
+ config['trex_is_dual'] = parser.getboolean("trex", "is_dual")
+
+ # optional Trex parameters
+ if parser.has_option("trex", "config_file"):
+ config['trex_config_file'] = parser.get("trex", "config_file")
+ else:
+ config['trex_config_file'] = None
+
+ if parser.has_option("trex", "misc_params"):
+ config['trex_misc_params'] = parser.get("trex", "misc_params")
+ else:
+ config['trex_misc_params'] = None
+
+ # router section
+
+ if parser.has_option("router", "port"):
+ config['router_port'] = parser.get("router", "port")
+ else:
+ # simple telnet port
+ config['router_port'] = 23
+
+ config['router_interface'] = parser.get("router", "interface")
+ config['router_password'] = parser.get("router", "password")
+ config['router_type'] = parser.get("router", "type")
+
+ except Exception as inst:
+ raise TrexRunException("\nBad configuration file: '{0}'\n\n{1}".format(filename, inst))
+
+ return config
+
+def prepare_for_run (job_summary):
+ global logger
+
+ # generate unique id
+ job_summary['job_id'] = generate_job_id()
+ job_summary['job_dir'] = "trex_job_{0}".format(job_summary['job_id'])
+
+ job_summary['start_time'] = datetime.datetime.now()
+
+ if not job_summary['email']:
+ job_summary['user'] = getpass.getuser()
+ job_summary['email'] = "{0}@cisco.com".format(job_summary['user'])
+
+ # create dir for reports
+ try:
+ job_summary['job_dir'] = os.path.abspath( os.path.join(os.getcwd(), 'logs', job_summary['job_dir']) )
+ print job_summary['job_dir']
+ os.makedirs( job_summary['job_dir'] )
+
+ except OSError as err:
+ if err.errno == errno.EACCES:
+ # fall back. try creating the dir name at /tmp path
+ job_summary['job_dir'] = os.path.join("/tmp/", "trex_job_{0}".format(job_summary['job_id']) )
+ os.makedirs(job_summary['job_dir'])
+
+ job_summary['log_filename'] = os.path.join(job_summary['job_dir'], "trex_log_{0}.txt".format(job_summary['job_id']))
+ job_summary['graph_filename'] = os.path.join(job_summary['job_dir'], "trex_graph_{0}.html".format(job_summary['job_id']))
+
+ # init logger
+ logger = MyLogger(job_summary['log_filename'])
+
+ # mark those as not populated yet
+ job_summary['find_results'] = None
+ job_summary['plot_results'] = None
+
+ # create trex client instance
+ trex_params = load_trex_config_params(job_summary['config_file'],job_summary['yaml'])
+ trex = CTRexClient(trex_host = trex_params['trex_name'],
+ max_history_size = trex_params['trex_hisory_size'],
+ trex_daemon_port = trex_params['trex_port'])
+
+ job_summary['trex'] = trex
+ job_summary['trex_params'] = trex_params
+
+ # create trex task thread
+ job_summary['trex_thread'] = CTRexWithRouter(trex, trex_params);
+
+ # in case of an error we need to call the remote cleanup
+ cleanup_list.append(trex.stop_trex)
+
+ # signal handler
+ signal.signal(signal.SIGINT, int_signal_handler)
+ signal.signal(signal.SIGUSR1, error_signal_handler)
+
+
+def after_run (job_summary):
+
+ job_summary['total_run_time'] = datetime.datetime.now() - job_summary['start_time']
+ reporter = JobReporter(job_summary)
+ reporter.print_summary()
+ reporter.send_email_report()
+
+def launch (job_summary):
+
+ prepare_for_run(job_summary)
+
+ print_header()
+
+ log_startup_summary(job_summary)
+
+ # find the correct M range if not provided
+ range_results = find_m_range(job_summary)
+
+ job_summary['base_m_unit'] = range_results['base_m_unit']
+
+ if job_summary['m_range']:
+ m_range = job_summary['m_range']
+ else:
+ m_range = range_results['m_range']
+
+ logger.log("\nJob Bandwidth Working Range:\n")
+ logger.log("Min M = {0:.6f} / {1:,.2f} [Mbps] \nMax M = {2:.6f} / {3:,.2f} [Mbps]".format(m_range[0], m_to_mbps(job_summary, m_range[0]), m_range[1], m_to_mbps(job_summary, m_range[1])))
+
+ # job time
+ findjob = FindJob(m_range[0], m_range[1], job_summary)
+ job_summary['find_results'] = findjob.find_max_m()
+
+ if job_summary['job_type'] == "all":
+ # plot points to graph
+ plotjob = PlotJob(findjob)
+ job_summary['plot_results'] = plotjob.plot()
+
+ after_run(job_summary)
+
+
+# populate the fields for run
+def populate_fields (job_summary, args):
+ job_summary['config_file'] = args.config_file
+ job_summary['job_type'] = args.job
+ job_summary['cond_type'] = args.cond_type
+ job_summary['yaml'] = args.yaml
+
+ if args.n:
+ job_summary['job_name'] = args.n
+ else:
+ job_summary['job_name'] = "Nameless"
+
+ # did the user provided an email
+ if args.e:
+ job_summary['email'] = args.e
+ else:
+ job_summary['email'] = None
+
+ # did the user provide a range ?
+ if args.m:
+ job_summary['m_range'] = args.m
+ else:
+ job_summary['m_range'] = None
+
+ # some pretty shows
+ job_summary['cond_name'] = 'Drop Pkt' if (args.cond_type == 'drop') else 'High Latency'
+
+ if args.job == "find":
+ job_summary['job_type_str'] = "Find Max BW"
+ elif args.job == "plot":
+ job_summary['job_type_str'] = "Plot Graph"
+ else:
+ job_summary['job_type_str'] = "Find Max BW & Plot Graph"
+
+ if args.job != "find":
+ verify_glibc_version()
+
+
+
+# verify file exists for argparse
+def is_valid_file (parser, err_msg, filename):
+ if not os.path.exists(filename):
+ parser.error("{0}: '{1}'".format(err_msg, filename))
+ else:
+ return (filename) # return an open file handle
+
+def entry ():
+
+ job_summary = {}
+
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument("-n", help="Job Name",
+ type = str)
+
+ parser.add_argument("-m", help="M Range [default: auto calcuation]",
+ nargs = 2,
+ type = float)
+
+ parser.add_argument("-e", help="E-Mail for report [default: whoami@cisco.com]",
+ type = str)
+
+ parser.add_argument("-c", "--cfg", dest = "config_file", required = True,
+ help = "Configuration File For Trex/Router Pair",
+ type = lambda x: is_valid_file(parser, "config file does not exists",x))
+
+ parser.add_argument("job", help = "Job type",
+ type = str,
+ choices = ['find', 'plot', 'all'])
+
+ parser.add_argument("cond_type", help="type of failure condition",
+ type = str,
+ choices = ['latency','drop'])
+
+ parser.add_argument("-f", "--yaml", dest = "yaml", required = True,
+ help="YAML file to use", type = str)
+
+ args = parser.parse_args()
+
+ with TermMng():
+ try:
+ populate_fields(job_summary, args)
+ launch(job_summary)
+
+ except Exception as e:
+ ErrorHandler(e, traceback.format_exc())
+
+ logger.log("\nReport bugs to imarom@cisco.com\n")
+ g_stop = True
+
+def dummy_test ():
+ job_summary = {}
+ find_results = {}
+
+ job_summary['config_file'] = 'config/trex01-1g.cfg'
+ job_summary['yaml'] = 'dummy.yaml'
+ job_summary['email'] = 'imarom@cisco.com'
+ job_summary['job_name'] = 'test'
+ job_summary['job_type_str'] = 'test'
+
+ prepare_for_run(job_summary)
+
+ time.sleep(2)
+ job_summary['yaml'] = 'dummy.yaml'
+ job_summary['job_type'] = 'find'
+ job_summary['cond_name'] = 'Drop'
+ job_summary['cond_type'] = 'drop'
+ job_summary['job_id']= 94817231
+
+
+ find_results['tx'] = 210.23
+ find_results['m'] = 1.292812
+ find_results['total-pps'] = 1000
+ find_results['cpu_util'] = 74.0
+ find_results['maximum-latency'] = 4892
+ find_results['average-latency'] = 201
+ find_results['total-pkt-drop'] = 0
+
+
+ findjob = FindJob(1,1,job_summary)
+ plotjob = PlotJob(findjob)
+ job_summary['plot_results'] = plotjob.plot()
+
+ job_summary['find_results'] = find_results
+ job_summary['plot_results'] = [{'cpu_util': 2.0,'norm_cpu': 1.0, 'total-pps': 1000, 'expected-bps': 999980.0, 'average-latency': 85.0, 'tx': 0.00207*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 221.0},
+ {'cpu_util': 8.0,'norm_cpu': 1.0, 'total-pps': 1000,'expected-bps': 48500000.0, 'average-latency': 87.0, 'tx': 0.05005*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 279.0},
+ {'cpu_util': 14.0,'norm_cpu': 1.0, 'total-pps': 1000,'expected-bps': 95990000.0, 'average-latency': 92.0, 'tx': 0.09806*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 273.0},
+ {'cpu_util': 20.0,'norm_cpu': 1.0, 'total-pps': 1000,'expected-bps': 143490000.0, 'average-latency': 95.0, 'tx': 0.14613*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 271.0},
+ {'cpu_util': 25.0,'norm_cpu': 1.0, 'total-pps': 1000,'expected-bps': 190980000.0, 'average-latency': 97.0, 'tx': 0.1933*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 302.0},
+ {'cpu_util': 31.0,'norm_cpu': 1.0, 'total-pps': 1000,'expected-bps': 238480000.0, 'average-latency': 98.0, 'tx': 0.24213*1000, 'total-pkt-drop': 1.0, 'maximum-latency': 292.0},
+ {'cpu_util': 37.0,'norm_cpu': 1.0, 'total-pps': 1000, 'expected-bps': 285970000.0, 'average-latency': 99.0, 'tx': 0.29011*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 344.0},
+ {'cpu_util': 43.0,'norm_cpu': 1.0, 'total-pps': 1000, 'expected-bps': 333470000.0, 'average-latency': 100.0, 'tx': 0.3382*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 351.0},
+ {'cpu_util': 48.0,'norm_cpu': 1.0, 'total-pps': 1000, 'expected-bps': 380970000.0, 'average-latency': 100.0, 'tx': 0.38595*1000, 'total-pkt-drop': 0.0, 'maximum-latency': 342.0},
+ {'cpu_util': 54.0,'norm_cpu': 1.0, 'total-pps': 1000, 'expected-bps': 428460000.0, 'average-latency': 19852.0, 'tx': 0.43438*1000, 'total-pkt-drop': 1826229.0, 'maximum-latency': 25344.0}]
+
+
+
+ after_run(job_summary)
+
+if __name__ == "__main__":
+ entry ()
+
diff --git a/scripts/automation/wkhtmltopdf-amd64 b/scripts/automation/wkhtmltopdf-amd64
new file mode 100755
index 00000000..a173d2cf
--- /dev/null
+++ b/scripts/automation/wkhtmltopdf-amd64
Binary files differ
diff --git a/scripts/avl/_tun_citrix_0_fixed.pcap b/scripts/avl/_tun_citrix_0_fixed.pcap
new file mode 100755
index 00000000..be1664f8
--- /dev/null
+++ b/scripts/avl/_tun_citrix_0_fixed.pcap
Binary files differ
diff --git a/scripts/avl/_tun_exchange_0_fixed.pcap b/scripts/avl/_tun_exchange_0_fixed.pcap
new file mode 100755
index 00000000..e94d88ba
--- /dev/null
+++ b/scripts/avl/_tun_exchange_0_fixed.pcap
Binary files differ
diff --git a/scripts/avl/_tun_http_browsing_0_fixed.pcap b/scripts/avl/_tun_http_browsing_0_fixed.pcap
new file mode 100755
index 00000000..4cbeba57
--- /dev/null
+++ b/scripts/avl/_tun_http_browsing_0_fixed.pcap
Binary files differ
diff --git a/scripts/avl/_tun_http_get_0_fixed.pcap b/scripts/avl/_tun_http_get_0_fixed.pcap
new file mode 100755
index 00000000..ca959c5b
--- /dev/null
+++ b/scripts/avl/_tun_http_get_0_fixed.pcap
Binary files differ
diff --git a/scripts/avl/_tun_http_post_0_fixed.pcap b/scripts/avl/_tun_http_post_0_fixed.pcap
new file mode 100755
index 00000000..aa733b27
--- /dev/null
+++ b/scripts/avl/_tun_http_post_0_fixed.pcap
Binary files differ
diff --git a/scripts/avl/_tun_https_0_fixed.pcap b/scripts/avl/_tun_https_0_fixed.pcap
new file mode 100755
index 00000000..b3b48876
--- /dev/null
+++ b/scripts/avl/_tun_https_0_fixed.pcap
Binary files differ
diff --git a/scripts/avl/_tun_mail_pop_0_fixed.pcap b/scripts/avl/_tun_mail_pop_0_fixed.pcap
new file mode 100755
index 00000000..9a0dfef0
--- /dev/null
+++ b/scripts/avl/_tun_mail_pop_0_fixed.pcap
Binary files differ
diff --git a/scripts/avl/_tun_mail_pop_1_fixed.pcap b/scripts/avl/_tun_mail_pop_1_fixed.pcap
new file mode 100755
index 00000000..80001c6e
--- /dev/null
+++ b/scripts/avl/_tun_mail_pop_1_fixed.pcap
Binary files differ
diff --git a/scripts/avl/_tun_mail_pop_2_fixed.pcap b/scripts/avl/_tun_mail_pop_2_fixed.pcap
new file mode 100755
index 00000000..b0cf054f
--- /dev/null
+++ b/scripts/avl/_tun_mail_pop_2_fixed.pcap
Binary files differ
diff --git a/scripts/avl/_tun_oracle_0_fixed.pcap b/scripts/avl/_tun_oracle_0_fixed.pcap
new file mode 100755
index 00000000..b990a1e8
--- /dev/null
+++ b/scripts/avl/_tun_oracle_0_fixed.pcap
Binary files differ
diff --git a/scripts/avl/_tun_rtsp_0_fixed.pcap b/scripts/avl/_tun_rtsp_0_fixed.pcap
new file mode 100755
index 00000000..ae4e69bd
--- /dev/null
+++ b/scripts/avl/_tun_rtsp_0_fixed.pcap
Binary files differ
diff --git a/scripts/avl/_tun_smtp_0_fixed.pcap b/scripts/avl/_tun_smtp_0_fixed.pcap
new file mode 100755
index 00000000..96015bbc
--- /dev/null
+++ b/scripts/avl/_tun_smtp_0_fixed.pcap
Binary files differ
diff --git a/scripts/avl/_tun_smtp_1_fixed.pcap b/scripts/avl/_tun_smtp_1_fixed.pcap
new file mode 100755
index 00000000..9478b0b5
--- /dev/null
+++ b/scripts/avl/_tun_smtp_1_fixed.pcap
Binary files differ
diff --git a/scripts/avl/_tun_smtp_2_fixed.pcap b/scripts/avl/_tun_smtp_2_fixed.pcap
new file mode 100755
index 00000000..40ea1c6e
--- /dev/null
+++ b/scripts/avl/_tun_smtp_2_fixed.pcap
Binary files differ
diff --git a/scripts/avl/avl.csv b/scripts/avl/avl.csv
new file mode 100755
index 00000000..b09f6f1d
--- /dev/null
+++ b/scripts/avl/avl.csv
@@ -0,0 +1,26 @@
+id, name , cps , f-pkts , f-bytes , Mb/sec , MB/sec, c-flows , PPS , total-Mbytes-duration , errors ,flows
+ 00, avl/http_get.pcap , 102 , 44 , 38182 , 29.71 , 3.71 , 45 , 4488 , 0 , 0 , 1
+ 01, avl/http_post.pcap , 102 , 54 , 48900 , 38.05 , 4.76 , 55 , 5508 , 0 , 0 , 1
+ 02, avl/https.pcap , 33 , 96 , 92387 , 23.26 , 2.91 , 32 , 3168 , 0 , 0 , 1
+ 03, avl/http_browsing.pcap , 179 , 37 , 34721 , 47.42 , 5.93 , 66 , 6623 , 1 , 0 , 1
+ 04, avl/exchange.pcap , 64 , 43 , 10192 , 4.98 , 0.62 , 28 , 2752 , 0 , 0 , 1
+ 05, avl/email_pop1.pcap , 1 , 20 , 5763 , 0.05 , 0.01 , 0 , 24 , 0 , 0 , 1
+ 06, avl/email_pop2.pcap , 1 , 114 , 102429 , 0.94 , 0.12 , 1 , 137 , 0 , 0 , 1
+ 07, avl/email_pop4_29.pcap , 1 , 30 , 15870 , 0.15 , 0.02 , 0 , 36 , 0 , 0 , 1
+ 08, avl/oracle.pcap , 20 , 302 , 58547 , 8.93 , 1.12 , 60 , 6040 , 0 , 0 , 1
+ 09, avl/rtp_160_0.pcap , 1 , 84 , 97484 , 0.52 , 0.07 , 1 , 59 , 0 , 0 , 1
+ 10, avl/rtp_160_1.pcap , 1 , 1244 , 1138480 , 6.08 , 0.76 , 9 , 871 , 0 , 0 , 1
+ 11, avl/rtp_250k_1_0.pcap , 0 , 108 , 147128 , 0.56 , 0.07 , 1 , 54 , 0 , 0 , 1
+ 12, avl/rtp_250k_2_0.pcap , 0 , 1920 , 1767018 , 6.74 , 0.84 , 10 , 960 , 0 , 0 , 1
+ 13, avl/smtp_1.pcap , 2 , 22 , 5794 , 0.08 , 0.01 , 0 , 41 , 0 , 0 , 1
+ 14, avl/smtp_2.pcap , 2 , 35 , 18624 , 0.26 , 0.03 , 1 , 65 , 0 , 0 , 1
+ 15, avl/smtp_3.pcap , 2 , 110 , 97424 , 1.38 , 0.17 , 2 , 204 , 0 , 0 , 1
+ 16, avl/video_call_0.pcap , 3 , 2325 , 2551177 , 58.39 , 7.30 , 70 , 6975 , 1 , 0 , 1
+ 17, avl/video_rtp_1588_0.pcap , 7 , 1558 , 123704 , 6.98 , 0.87 , 115 , 11529 , 0 , 0 , 1
+ 18, avl/citrix_0.pcap , 11 , 272 , 86729 , 7.28 , 0.91 , 30 , 2992 , 0 , 0 , 1
+ 19, avl/dns_0.pcap , 498 , 2 , 178 , 0.68 , 0.08 , 10 , 996 , 0 , 0 , 1
+ 20, avl/sip_0.pcap , 7 , 7 , 2723 , 0.15 , 0.02 , 1 , 52 , 0 , 0 , 1
+ 21, avl/rtsp_0.pcap , 1 , 20 , 4038 , 0.04 , 0.00 , 0 , 24 , 0 , 0 , 1
+
+ 00, sum , 1040 , 8447 , 6447492 , 242.63 , 30.33 , 536 , 53596 , 3 , 0 , 22
+
diff --git a/scripts/avl/avl_delay_10.csv b/scripts/avl/avl_delay_10.csv
new file mode 100755
index 00000000..bd120cd3
--- /dev/null
+++ b/scripts/avl/avl_delay_10.csv
@@ -0,0 +1,27 @@
+ id, name , cps , f-pkts , f-bytes , Mb/sec , MB/sec, c-flows , PPS , total-Mbytes-duration , errors ,flows
+ 00, avl/delay_10_http_get_0.pcap , 102 , 44 , 38006 , 29.58 , 3.70 , 45 , 4488 , 0 , 0 , 1
+ 01, avl/delay_10_http_post_0.pcap , 102 , 54 , 48684 , 37.89 , 4.74 , 55 , 5508 , 0 , 0 , 1
+ 02, avl/delay_10_https_0.pcap , 33 , 96 , 92003 , 23.16 , 2.90 , 32 , 3168 , 0 , 0 , 1
+ 03, avl/delay_10_http_browsing_0.pcap , 179 , 37 , 34573 , 47.22 , 5.90 , 66 , 6623 , 1 , 0 , 1
+ 04, avl/delay_10_exchange_0.pcap , 64 , 43 , 10020 , 4.89 , 0.61 , 28 , 2752 , 0 , 0 , 1
+ 05, avl/delay_10_mail_pop_0.pcap , 1 , 20 , 5683 , 0.05 , 0.01 , 0 , 24 , 0 , 0 , 1
+ 06, avl/delay_10_mail_pop_1.pcap , 1 , 114 , 101973 , 0.93 , 0.12 , 1 , 137 , 0 , 0 , 1
+ 07, avl/delay_10_mail_pop_2.pcap , 1 , 30 , 15750 , 0.14 , 0.02 , 0 , 36 , 0 , 0 , 1
+ 08, avl/delay_10_oracle_0.pcap , 20 , 302 , 57339 , 8.75 , 1.09 , 60 , 6040 , 0 , 0 , 1
+ 09, avl/delay_10_rtp_160k_0.pcap , 1 , 85 , 98406 , 0.53 , 0.07 , 1 , 59 , 0 , 0 , 1
+ 10, avl/delay_10_rtp_160k_1.pcap , 1 , 1246 , 1135325 , 6.06 , 0.76 , 9 , 872 , 0 , 0 , 1
+ 11, avl/delay_10_rtp_250k_0_0.pcap , 0 , 126 , 166496 , 0.64 , 0.08 , 1 , 63 , 0 , 0 , 1
+ 12, avl/delay_10_rtp_250k_1_0.pcap , 0 , 1920 , 1759338 , 6.71 , 0.84 , 10 , 960 , 0 , 0 , 1
+ 13, avl/delay_10_smtp_0.pcap , 2 , 22 , 5706 , 0.08 , 0.01 , 0 , 41 , 0 , 0 , 1
+ 14, avl/delay_10_smtp_1.pcap , 2 , 35 , 18484 , 0.26 , 0.03 , 1 , 65 , 0 , 0 , 1
+ 15, avl/delay_10_smtp_2.pcap , 2 , 110 , 96984 , 1.37 , 0.17 , 2 , 204 , 0 , 0 , 1
+ 16, avl/delay_10_video_call_0.pcap , 3 , 2325 , 2541877 , 58.18 , 7.27 , 70 , 6975 , 1 , 0 , 1
+ 17, avl/delay_10_video_call_rtp_0.pcap , 7 , 1408 , 106412 , 6.01 , 0.75 , 104 , 10419 , 0 , 0 , 1
+ 18, avl/delay_10_citrix_0.pcap , 11 , 272 , 85641 , 7.19 , 0.90 , 30 , 2992 , 0 , 0 , 1
+ 19, avl/delay_10_dns_0.pcap , 498 , 2 , 170 , 0.65 , 0.08 , 10 , 996 , 0 , 0 , 1
+ 20, avl/delay_10_sip_0.pcap , 7 , 7 , 2695 , 0.15 , 0.02 , 1 , 52 , 0 , 0 , 1
+ 21, avl/delay_10_rtsp_0.pcap , 1 , 23 , 4382 , 0.04 , 0.01 , 0 , 28 , 0 , 0 , 1
+
+ 00, sum , 1040 , 8321 , 6425947 , 240.47 , 30.06 , 525 , 52501 , 3 , 0 , 22
+ create thread 0 nodes-0 socket: 0
+
diff --git a/scripts/avl/citrix_0.pcap b/scripts/avl/citrix_0.pcap
new file mode 100755
index 00000000..72ecd43b
--- /dev/null
+++ b/scripts/avl/citrix_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_citrix_0.pcap b/scripts/avl/delay_10_citrix_0.pcap
new file mode 100755
index 00000000..e783c9b8
--- /dev/null
+++ b/scripts/avl/delay_10_citrix_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_dns_0.pcap b/scripts/avl/delay_10_dns_0.pcap
new file mode 100755
index 00000000..bfc2c66c
--- /dev/null
+++ b/scripts/avl/delay_10_dns_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_exchange_0.pcap b/scripts/avl/delay_10_exchange_0.pcap
new file mode 100755
index 00000000..439a65d2
--- /dev/null
+++ b/scripts/avl/delay_10_exchange_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_http_browsing_0.pcap b/scripts/avl/delay_10_http_browsing_0.pcap
new file mode 100755
index 00000000..3e5768f3
--- /dev/null
+++ b/scripts/avl/delay_10_http_browsing_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_http_get_0.pcap b/scripts/avl/delay_10_http_get_0.pcap
new file mode 100755
index 00000000..fd9ed76c
--- /dev/null
+++ b/scripts/avl/delay_10_http_get_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_http_post_0.pcap b/scripts/avl/delay_10_http_post_0.pcap
new file mode 100755
index 00000000..a5511793
--- /dev/null
+++ b/scripts/avl/delay_10_http_post_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_https_0.pcap b/scripts/avl/delay_10_https_0.pcap
new file mode 100755
index 00000000..e56f52ce
--- /dev/null
+++ b/scripts/avl/delay_10_https_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_mail_pop_0.pcap b/scripts/avl/delay_10_mail_pop_0.pcap
new file mode 100755
index 00000000..3616f47b
--- /dev/null
+++ b/scripts/avl/delay_10_mail_pop_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_mail_pop_1.pcap b/scripts/avl/delay_10_mail_pop_1.pcap
new file mode 100755
index 00000000..0f28443b
--- /dev/null
+++ b/scripts/avl/delay_10_mail_pop_1.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_mail_pop_2.pcap b/scripts/avl/delay_10_mail_pop_2.pcap
new file mode 100755
index 00000000..d6f75cb6
--- /dev/null
+++ b/scripts/avl/delay_10_mail_pop_2.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_oracle_0.pcap b/scripts/avl/delay_10_oracle_0.pcap
new file mode 100755
index 00000000..2610fa59
--- /dev/null
+++ b/scripts/avl/delay_10_oracle_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_rtp_160k_0.pcap b/scripts/avl/delay_10_rtp_160k_0.pcap
new file mode 100755
index 00000000..c1043737
--- /dev/null
+++ b/scripts/avl/delay_10_rtp_160k_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_rtp_160k_1.pcap b/scripts/avl/delay_10_rtp_160k_1.pcap
new file mode 100755
index 00000000..8ccb79fe
--- /dev/null
+++ b/scripts/avl/delay_10_rtp_160k_1.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_rtp_160k_full.pcap b/scripts/avl/delay_10_rtp_160k_full.pcap
new file mode 100755
index 00000000..b91f5e6c
--- /dev/null
+++ b/scripts/avl/delay_10_rtp_160k_full.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_rtp_250k_0_0.pcap b/scripts/avl/delay_10_rtp_250k_0_0.pcap
new file mode 100755
index 00000000..7a6f0879
--- /dev/null
+++ b/scripts/avl/delay_10_rtp_250k_0_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_rtp_250k_1_0.pcap b/scripts/avl/delay_10_rtp_250k_1_0.pcap
new file mode 100755
index 00000000..cf0098f1
--- /dev/null
+++ b/scripts/avl/delay_10_rtp_250k_1_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_rtp_250k_full.pcap b/scripts/avl/delay_10_rtp_250k_full.pcap
new file mode 100755
index 00000000..689e2f81
--- /dev/null
+++ b/scripts/avl/delay_10_rtp_250k_full.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_rtsp_0.pcap b/scripts/avl/delay_10_rtsp_0.pcap
new file mode 100755
index 00000000..da7a9d03
--- /dev/null
+++ b/scripts/avl/delay_10_rtsp_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_sip_0.pcap b/scripts/avl/delay_10_sip_0.pcap
new file mode 100755
index 00000000..07043894
--- /dev/null
+++ b/scripts/avl/delay_10_sip_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_sip_video_call_full.pcap b/scripts/avl/delay_10_sip_video_call_full.pcap
new file mode 100755
index 00000000..f3802918
--- /dev/null
+++ b/scripts/avl/delay_10_sip_video_call_full.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_sip_video_call_short.pcap b/scripts/avl/delay_10_sip_video_call_short.pcap
new file mode 100755
index 00000000..f1bbf007
--- /dev/null
+++ b/scripts/avl/delay_10_sip_video_call_short.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_smtp_0.pcap b/scripts/avl/delay_10_smtp_0.pcap
new file mode 100755
index 00000000..37173a1c
--- /dev/null
+++ b/scripts/avl/delay_10_smtp_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_smtp_1.pcap b/scripts/avl/delay_10_smtp_1.pcap
new file mode 100755
index 00000000..620fe9b1
--- /dev/null
+++ b/scripts/avl/delay_10_smtp_1.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_smtp_2.pcap b/scripts/avl/delay_10_smtp_2.pcap
new file mode 100755
index 00000000..3fb3063d
--- /dev/null
+++ b/scripts/avl/delay_10_smtp_2.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_video_call_0.pcap b/scripts/avl/delay_10_video_call_0.pcap
new file mode 100755
index 00000000..bdeb082c
--- /dev/null
+++ b/scripts/avl/delay_10_video_call_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_10_video_call_rtp_0.pcap b/scripts/avl/delay_10_video_call_rtp_0.pcap
new file mode 100755
index 00000000..03375605
--- /dev/null
+++ b/scripts/avl/delay_10_video_call_rtp_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_citrix_0.pcap b/scripts/avl/delay_citrix_0.pcap
new file mode 100755
index 00000000..a53b0517
--- /dev/null
+++ b/scripts/avl/delay_citrix_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_dns_0.pcap b/scripts/avl/delay_dns_0.pcap
new file mode 100755
index 00000000..f4b96e25
--- /dev/null
+++ b/scripts/avl/delay_dns_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_email_pop_0.pcap b/scripts/avl/delay_email_pop_0.pcap
new file mode 100755
index 00000000..0975dc01
--- /dev/null
+++ b/scripts/avl/delay_email_pop_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_email_pop_1.pcap b/scripts/avl/delay_email_pop_1.pcap
new file mode 100755
index 00000000..4df4afc0
--- /dev/null
+++ b/scripts/avl/delay_email_pop_1.pcap
Binary files differ
diff --git a/scripts/avl/delay_email_pop_2.pcap b/scripts/avl/delay_email_pop_2.pcap
new file mode 100755
index 00000000..616a21f2
--- /dev/null
+++ b/scripts/avl/delay_email_pop_2.pcap
Binary files differ
diff --git a/scripts/avl/delay_exchange_0.pcap b/scripts/avl/delay_exchange_0.pcap
new file mode 100755
index 00000000..a1366ef8
--- /dev/null
+++ b/scripts/avl/delay_exchange_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_http_browsing_0.pcap b/scripts/avl/delay_http_browsing_0.pcap
new file mode 100755
index 00000000..697b64e2
--- /dev/null
+++ b/scripts/avl/delay_http_browsing_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_http_get_0.pcap b/scripts/avl/delay_http_get_0.pcap
new file mode 100755
index 00000000..136d102d
--- /dev/null
+++ b/scripts/avl/delay_http_get_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_http_post_0.pcap b/scripts/avl/delay_http_post_0.pcap
new file mode 100755
index 00000000..46d6f8f5
--- /dev/null
+++ b/scripts/avl/delay_http_post_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_https_0.pcap b/scripts/avl/delay_https_0.pcap
new file mode 100755
index 00000000..a8a902c8
--- /dev/null
+++ b/scripts/avl/delay_https_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_oracle_0.pcap b/scripts/avl/delay_oracle_0.pcap
new file mode 100755
index 00000000..c3882f30
--- /dev/null
+++ b/scripts/avl/delay_oracle_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_rtp_160k_1_1_0.pcap b/scripts/avl/delay_rtp_160k_1_1_0.pcap
new file mode 100755
index 00000000..9efff616
--- /dev/null
+++ b/scripts/avl/delay_rtp_160k_1_1_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_rtp_160k_1_1_1.pcap b/scripts/avl/delay_rtp_160k_1_1_1.pcap
new file mode 100755
index 00000000..29aa8ae8
--- /dev/null
+++ b/scripts/avl/delay_rtp_160k_1_1_1.pcap
Binary files differ
diff --git a/scripts/avl/delay_rtp_250k_0_0.pcap b/scripts/avl/delay_rtp_250k_0_0.pcap
new file mode 100755
index 00000000..a055b76e
--- /dev/null
+++ b/scripts/avl/delay_rtp_250k_0_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_rtp_250k_2_0.pcap b/scripts/avl/delay_rtp_250k_2_0.pcap
new file mode 100755
index 00000000..b57ea074
--- /dev/null
+++ b/scripts/avl/delay_rtp_250k_2_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_rtsp_0.pcap b/scripts/avl/delay_rtsp_0.pcap
new file mode 100755
index 00000000..73968c5a
--- /dev/null
+++ b/scripts/avl/delay_rtsp_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_sip_0.pcap b/scripts/avl/delay_sip_0.pcap
new file mode 100755
index 00000000..9e524eac
--- /dev/null
+++ b/scripts/avl/delay_sip_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_smtp_0.pcap b/scripts/avl/delay_smtp_0.pcap
new file mode 100755
index 00000000..9d004995
--- /dev/null
+++ b/scripts/avl/delay_smtp_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_smtp_1.pcap b/scripts/avl/delay_smtp_1.pcap
new file mode 100755
index 00000000..96f6b121
--- /dev/null
+++ b/scripts/avl/delay_smtp_1.pcap
Binary files differ
diff --git a/scripts/avl/delay_smtp_2.pcap b/scripts/avl/delay_smtp_2.pcap
new file mode 100755
index 00000000..a96ddc03
--- /dev/null
+++ b/scripts/avl/delay_smtp_2.pcap
Binary files differ
diff --git a/scripts/avl/delay_video_call_0.pcap b/scripts/avl/delay_video_call_0.pcap
new file mode 100755
index 00000000..0f365a1e
--- /dev/null
+++ b/scripts/avl/delay_video_call_0.pcap
Binary files differ
diff --git a/scripts/avl/delay_video_call_rtp_0.pcap b/scripts/avl/delay_video_call_rtp_0.pcap
new file mode 100755
index 00000000..1a0c5c70
--- /dev/null
+++ b/scripts/avl/delay_video_call_rtp_0.pcap
Binary files differ
diff --git a/scripts/avl/dns_0.pcap b/scripts/avl/dns_0.pcap
new file mode 100755
index 00000000..b3acca6d
--- /dev/null
+++ b/scripts/avl/dns_0.pcap
Binary files differ
diff --git a/scripts/avl/email_pop1.pcap b/scripts/avl/email_pop1.pcap
new file mode 100755
index 00000000..c4a6e7f9
--- /dev/null
+++ b/scripts/avl/email_pop1.pcap
Binary files differ
diff --git a/scripts/avl/email_pop1_1.pcap b/scripts/avl/email_pop1_1.pcap
new file mode 100755
index 00000000..479a2934
--- /dev/null
+++ b/scripts/avl/email_pop1_1.pcap
Binary files differ
diff --git a/scripts/avl/email_pop2.pcap b/scripts/avl/email_pop2.pcap
new file mode 100755
index 00000000..58e9d491
--- /dev/null
+++ b/scripts/avl/email_pop2.pcap
Binary files differ
diff --git a/scripts/avl/email_pop2_2.pcap b/scripts/avl/email_pop2_2.pcap
new file mode 100755
index 00000000..ff80d253
--- /dev/null
+++ b/scripts/avl/email_pop2_2.pcap
Binary files differ
diff --git a/scripts/avl/email_pop4_29.pcap b/scripts/avl/email_pop4_29.pcap
new file mode 100755
index 00000000..67429564
--- /dev/null
+++ b/scripts/avl/email_pop4_29.pcap
Binary files differ
diff --git a/scripts/avl/exchange.pcap b/scripts/avl/exchange.pcap
new file mode 100755
index 00000000..66d65367
--- /dev/null
+++ b/scripts/avl/exchange.pcap
Binary files differ
diff --git a/scripts/avl/http_browsing.pcap b/scripts/avl/http_browsing.pcap
new file mode 100755
index 00000000..8f051029
--- /dev/null
+++ b/scripts/avl/http_browsing.pcap
Binary files differ
diff --git a/scripts/avl/http_get.pcap b/scripts/avl/http_get.pcap
new file mode 100755
index 00000000..2f152cfb
--- /dev/null
+++ b/scripts/avl/http_get.pcap
Binary files differ
diff --git a/scripts/avl/http_post.pcap b/scripts/avl/http_post.pcap
new file mode 100755
index 00000000..6a9c3f73
--- /dev/null
+++ b/scripts/avl/http_post.pcap
Binary files differ
diff --git a/scripts/avl/https.pcap b/scripts/avl/https.pcap
new file mode 100755
index 00000000..58f872ea
--- /dev/null
+++ b/scripts/avl/https.pcap
Binary files differ
diff --git a/scripts/avl/mac_uit.yaml b/scripts/avl/mac_uit.yaml
new file mode 100755
index 00000000..d980d54d
--- /dev/null
+++ b/scripts/avl/mac_uit.yaml
@@ -0,0 +1,5 @@
+- items :
+ - ip : "16.0.0.1"
+ mac : [0x16,0x1,0x4,0x5,0x6,0x7]
+ - ip : "16.0.0.2"
+ mac : [0x16,0x2,0x0,0x1,0x0,0x0]
diff --git a/scripts/avl/oracle.pcap b/scripts/avl/oracle.pcap
new file mode 100755
index 00000000..63ffe9b2
--- /dev/null
+++ b/scripts/avl/oracle.pcap
Binary files differ
diff --git a/scripts/avl/rtp_160_0.pcap b/scripts/avl/rtp_160_0.pcap
new file mode 100755
index 00000000..fb7eb6fa
--- /dev/null
+++ b/scripts/avl/rtp_160_0.pcap
Binary files differ
diff --git a/scripts/avl/rtp_160_1.pcap b/scripts/avl/rtp_160_1.pcap
new file mode 100755
index 00000000..00cf2a20
--- /dev/null
+++ b/scripts/avl/rtp_160_1.pcap
Binary files differ
diff --git a/scripts/avl/rtp_250k_1_0.pcap b/scripts/avl/rtp_250k_1_0.pcap
new file mode 100755
index 00000000..f369c037
--- /dev/null
+++ b/scripts/avl/rtp_250k_1_0.pcap
Binary files differ
diff --git a/scripts/avl/rtp_250k_2_0.pcap b/scripts/avl/rtp_250k_2_0.pcap
new file mode 100755
index 00000000..c3fd7a5e
--- /dev/null
+++ b/scripts/avl/rtp_250k_2_0.pcap
Binary files differ
diff --git a/scripts/avl/rtsp_0.pcap b/scripts/avl/rtsp_0.pcap
new file mode 100755
index 00000000..ffbf4a7d
--- /dev/null
+++ b/scripts/avl/rtsp_0.pcap
Binary files differ
diff --git a/scripts/avl/sfr_branch_profile_delay_10.yaml b/scripts/avl/sfr_branch_profile_delay_10.yaml
new file mode 100755
index 00000000..71e69212
--- /dev/null
+++ b/scripts/avl/sfr_branch_profile_delay_10.yaml
@@ -0,0 +1,114 @@
+- duration : 0.1
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.1.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.62.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_ipg : true
+ #cap_ipg_min : 30
+ #cap_override_ipg : 200
+ wlength : 100
+ one_app_server : false
+ cap_info :
+ - name: avl/delay_10_http_get_0.pcap
+ cps : 432.576
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_http_post_0.pcap
+ cps : 432.576
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_https_0.pcap
+ cps : 135.863
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_http_browsing_0.pcap
+ cps : 361.55
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_exchange_0.pcap
+ cps : 623.781
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ one_app_server : true
+ - name: avl/delay_10_mail_pop_0.pcap
+ cps : 20.253
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_mail_pop_1.pcap
+ cps : 20.253
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_mail_pop_2.pcap
+ cps : 20.253
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_oracle_0.pcap
+ cps : 61.04
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_160k_full.pcap
+ cps : 2.388
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ plugin_id : 1
+ - name: avl/delay_10_rtp_250k_full.pcap
+ cps : 1.705
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ plugin_id : 1
+ - name: avl/delay_10_smtp_0.pcap
+ cps : 20.635
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_smtp_1.pcap
+ cps : 20.635
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_smtp_2.pcap
+ cps : 20.635
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_video_call_0.pcap
+ cps : 9.835
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_sip_video_call_full.pcap
+ cps : 98.482
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ plugin_id : 2
+ - name: avl/delay_10_citrix_0.pcap
+ cps : 43.789
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_dns_0.pcap
+ cps : 1471.196
+ ipg : 10000
+ rtt : 10000
+ w : 1
+
diff --git a/scripts/avl/sfr_delay_10.yaml b/scripts/avl/sfr_delay_10.yaml
new file mode 100755
index 00000000..1a3f82c3
--- /dev/null
+++ b/scripts/avl/sfr_delay_10.yaml
@@ -0,0 +1,119 @@
+- duration : 0.1
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.1.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.20.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ #vlan : { enable : 1 , vlan0 : 100 , vlan1 : 200 }
+ #mac_override_by_ip : true
+ cap_ipg : true
+ #cap_ipg_min : 30
+ #cap_override_ipg : 200
+ wlength : 107
+ one_app_server : false
+ cap_info :
+ - name: avl/delay_10_http_get_0.pcap
+ cps : 102.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_http_post_0.pcap
+ cps : 102.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_https_0.pcap
+ cps : 33.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_http_browsing_0.pcap
+ cps : 179.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_exchange_0.pcap
+ cps : 64.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_mail_pop_0.pcap
+ cps : 1.2
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_mail_pop_1.pcap
+ cps : 1.2
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_mail_pop_2.pcap
+ cps : 1.2
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_oracle_0.pcap
+ cps : 20.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_160k_full.pcap
+ cps : 0.7
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ one_app_server : false
+ plugin_id : 1
+ - name: avl/delay_10_rtp_250k_full.pcap
+ cps : 0.5
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ one_app_server : false
+ plugin_id : 1
+ - name: avl/delay_10_smtp_0.pcap
+ cps : 1.85
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_smtp_1.pcap
+ cps : 1.85
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_smtp_2.pcap
+ cps : 1.85
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_video_call_0.pcap
+ cps : 3.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ one_app_server : false
+ - name: avl/delay_10_sip_video_call_full.pcap
+ cps : 7.4
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ plugin_id : 2
+ one_app_server : false
+ - name: avl/delay_10_citrix_0.pcap
+ cps : 11.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_dns_0.pcap
+ cps : 498.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+
diff --git a/scripts/avl/sfr_delay_10_1g.yaml b/scripts/avl/sfr_delay_10_1g.yaml
new file mode 100755
index 00000000..925531fd
--- /dev/null
+++ b/scripts/avl/sfr_delay_10_1g.yaml
@@ -0,0 +1,118 @@
+- duration : 0.1
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.1.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.20.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_ipg : true
+ #cap_ipg_min : 30
+ #cap_override_ipg : 200
+ #wlength : 107
+ #one_app_server : false
+ cap_info :
+ - name: avl/delay_10_http_get_0.pcap
+ cps : 404.52
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_http_post_0.pcap
+ cps : 404.52
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_https_0.pcap
+ cps : 130.8745
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_http_browsing_0.pcap
+ cps : 709.89
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_exchange_0.pcap
+ cps : 253.81
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_mail_pop_0.pcap
+ cps : 4.759
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_mail_pop_1.pcap
+ cps : 4.759
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_mail_pop_2.pcap
+ cps : 4.759
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_oracle_0.pcap
+ cps : 79.3178
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_160k_full.pcap
+ cps : 2.776
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ one_app_server : false
+ plugin_id : 1
+ - name: avl/delay_10_rtp_250k_full.pcap
+ cps : 1.982
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ one_app_server : false
+ plugin_id : 1
+ - name: avl/delay_10_smtp_0.pcap
+ cps : 7.3369
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_smtp_1.pcap
+ cps : 7.3369
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_smtp_2.pcap
+ cps : 7.3369
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_video_call_0.pcap
+ cps : 11.8976
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ one_app_server : false
+ - name: avl/delay_10_sip_video_call_full.pcap
+ cps : 29.347
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ plugin_id : 2
+ one_app_server : false
+ - name: avl/delay_10_citrix_0.pcap
+ cps : 43.6248
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_dns_0.pcap
+ cps : 1975.015
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ wlength : 1
+
diff --git a/scripts/avl/sfr_delay_10_1g_no_bundeling.yaml b/scripts/avl/sfr_delay_10_1g_no_bundeling.yaml
new file mode 100755
index 00000000..361d32cc
--- /dev/null
+++ b/scripts/avl/sfr_delay_10_1g_no_bundeling.yaml
@@ -0,0 +1,130 @@
+- duration : 0.1
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.20.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_ipg : true
+ #cap_ipg_min : 30
+ #cap_override_ipg : 200
+ wlength : 107
+ one_app_server : false
+ cap_info :
+ - name: avl/delay_10_http_get_0.pcap
+ cps : 404.52
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_http_post_0.pcap
+ cps : 404.52
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_https_0.pcap
+ cps : 130.8745
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_http_browsing_0.pcap
+ cps : 709.89
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_exchange_0.pcap
+ cps : 253.81
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_mail_pop_0.pcap
+ cps : 4.759
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_mail_pop_1.pcap
+ cps : 4.759
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_mail_pop_2.pcap
+ cps : 4.759
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_oracle_0.pcap
+ cps : 79.3178
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_160k_0.pcap
+ cps : 2.776
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_160k_1.pcap
+ cps : 2.776
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_250k_0_0.pcap
+ cps : 1.982
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_250k_1_0.pcap
+ cps : 1.982
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_smtp_0.pcap
+ cps : 7.3369
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_smtp_1.pcap
+ cps : 7.3369
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_smtp_2.pcap
+ cps : 7.3369
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_video_call_0.pcap
+ cps : 11.8976
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_video_call_rtp_0.pcap
+ cps : 29.347
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_citrix_0.pcap
+ cps : 43.6248
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_dns_0.pcap
+ cps : 1975.015
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_sip_0.pcap
+ cps : 29.34
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtsp_0.pcap
+ cps : 4.759
+ ipg : 10000
+ rtt : 10000
+ w : 1
+
diff --git a/scripts/avl/sfr_delay_10_no_bundeling.yaml b/scripts/avl/sfr_delay_10_no_bundeling.yaml
new file mode 100755
index 00000000..8374eab4
--- /dev/null
+++ b/scripts/avl/sfr_delay_10_no_bundeling.yaml
@@ -0,0 +1,131 @@
+- duration : 0.1
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.20.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_ipg : true
+ #cap_ipg_min : 30
+ #cap_override_ipg : 200
+ wlength : 107
+ one_app_server : false
+ cap_info :
+ - name: avl/delay_10_http_get_0.pcap
+ cps : 102.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_http_post_0.pcap
+ cps : 102.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_https_0.pcap
+ cps : 33.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_http_browsing_0.pcap
+ cps : 179.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_exchange_0.pcap
+ cps : 64.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_mail_pop_0.pcap
+ cps : 1.2
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_mail_pop_1.pcap
+ cps : 1.2
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_mail_pop_2.pcap
+ cps : 1.2
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_oracle_0.pcap
+ cps : 20.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_160k_0.pcap
+ cps : 0.7
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_160k_1.pcap
+ cps : 0.7
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_250k_0_0.pcap
+ cps : 0.5
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_250k_1_0.pcap
+ cps : 0.5
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_smtp_0.pcap
+ cps : 1.85
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_smtp_1.pcap
+ cps : 1.85
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_smtp_2.pcap
+ cps : 1.85
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_video_call_0.pcap
+ cps : 3.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_video_call_rtp_0.pcap
+ cps : 7.4
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_citrix_0.pcap
+ cps : 11.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_dns_0.pcap
+ cps : 498.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ wlength : 1
+ - name: avl/delay_10_sip_0.pcap
+ cps : 7.4
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtsp_0.pcap
+ cps : 1.2
+ ipg : 10000
+ rtt : 10000
+ w : 1
+
diff --git a/scripts/avl/sfr_delay_50_tunnel_no_bundeling.yaml b/scripts/avl/sfr_delay_50_tunnel_no_bundeling.yaml
new file mode 100755
index 00000000..cd8e3c64
--- /dev/null
+++ b/scripts/avl/sfr_delay_50_tunnel_no_bundeling.yaml
@@ -0,0 +1,131 @@
+- duration : 0.1
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.20.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_ipg : true
+ #cap_ipg_min : 30
+ #cap_override_ipg : 200
+ wlength : 107
+ one_app_server : false
+ cap_info :
+ - name: avl/_tun_http_get_0_fixed.pcap
+ cps : 102.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/_tun_http_post_0_fixed.pcap
+ cps : 102.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/_tun_https_0_fixed.pcap
+ cps : 33.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/_tun_http_browsing_0_fixed.pcap
+ cps : 179.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/_tun_exchange_0_fixed.pcap
+ cps : 64.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/_tun_mail_pop_0_fixed.pcap
+ cps : 1.2
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/_tun_mail_pop_1_fixed.pcap
+ cps : 1.2
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/_tun_mail_pop_2_fixed.pcap
+ cps : 1.2
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/_tun_oracle_0_fixed.pcap
+ cps : 20.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_160k_0.pcap
+ cps : 0.7
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_160k_1.pcap
+ cps : 0.7
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_250k_0_0.pcap
+ cps : 0.5
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_250k_1_0.pcap
+ cps : 0.5
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/_tun_smtp_0_fixed.pcap
+ cps : 1.85
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/_tun_smtp_1_fixed.pcap
+ cps : 1.85
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/_tun_smtp_2_fixed.pcap
+ cps : 1.85
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_video_call_0.pcap
+ cps : 3.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_video_call_rtp_0.pcap
+ cps : 7.4
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/_tun_citrix_0_fixed.pcap
+ cps : 11.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_dns_0.pcap
+ cps : 498.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ wlength : 1
+ - name: avl/delay_10_sip_0.pcap
+ cps : 7.4
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/_tun_rtsp_0_fixed.pcap
+ cps : 1.2
+ ipg : 10000
+ rtt : 10000
+ w : 1
+
diff --git a/scripts/avl/sip_0.pcap b/scripts/avl/sip_0.pcap
new file mode 100755
index 00000000..9f6d6ac9
--- /dev/null
+++ b/scripts/avl/sip_0.pcap
Binary files differ
diff --git a/scripts/avl/smtp_1.pcap b/scripts/avl/smtp_1.pcap
new file mode 100755
index 00000000..a0638b94
--- /dev/null
+++ b/scripts/avl/smtp_1.pcap
Binary files differ
diff --git a/scripts/avl/smtp_2.pcap b/scripts/avl/smtp_2.pcap
new file mode 100755
index 00000000..cbace86b
--- /dev/null
+++ b/scripts/avl/smtp_2.pcap
Binary files differ
diff --git a/scripts/avl/smtp_3.pcap b/scripts/avl/smtp_3.pcap
new file mode 100755
index 00000000..890db92a
--- /dev/null
+++ b/scripts/avl/smtp_3.pcap
Binary files differ
diff --git a/scripts/avl/test_mac.yaml b/scripts/avl/test_mac.yaml
new file mode 100755
index 00000000..9d0ed1c4
--- /dev/null
+++ b/scripts/avl/test_mac.yaml
@@ -0,0 +1,29 @@
+- items :
+ - ip : "16.0.0.1"
+ mac : [0x16,0x1,0x4,0x5,0x6,0x7]
+ - ip : "16.0.0.2"
+ mac : [0x16,0x2,0x0,0x1,0x0,0x0]
+ - ip : "16.0.0.3"
+ mac : [0x16,0x3,0x0,0x1,0x0,0x0]
+ - ip : "16.0.0.4"
+ mac : [0x16,0x4,0x4,0x5,0x6,0x7]
+ - ip : "16.0.0.5"
+ mac : [0x16,0x5,0x4,0x5,0x6,0x7]
+ - ip : "16.0.0.6"
+ mac : [0x16,0x6,0x4,0x5,0x6,0x7]
+ - ip : "16.0.0.7"
+ mac : [0x16,0x7,0x4,0x5,0x6,0x7]
+ - ip : "17.0.0.2"
+ mac : [0x17,0x2,0x0,0x1,0x0,0x0]
+ - ip : "17.0.0.1"
+ mac : [0x17,0x1,0x0,0x1,0x0,0x0]
+ - ip : "17.0.0.3"
+ mac : [0x17,0x3,0x0,0x1,0x0,0x0]
+ - ip : "17.0.0.4"
+ mac : [0x17,0x4,0x0,0x1,0x0,0x0]
+ - ip : "17.0.0.5"
+ mac : [0x17,0x5,0x0,0x1,0x0,0x0]
+ - ip : "17.0.0.6"
+ mac : [0x17,0x6,0x0,0x1,0x0,0x0]
+ - ip : "17.0.0.8"
+ mac : [0x17,0x8,0x0,0x1,0x0,0x0]
diff --git a/scripts/avl/video_call_0.pcap b/scripts/avl/video_call_0.pcap
new file mode 100755
index 00000000..33949516
--- /dev/null
+++ b/scripts/avl/video_call_0.pcap
Binary files differ
diff --git a/scripts/avl/video_rtp_1588_0.pcap b/scripts/avl/video_rtp_1588_0.pcap
new file mode 100755
index 00000000..464bb1fe
--- /dev/null
+++ b/scripts/avl/video_rtp_1588_0.pcap
Binary files differ
diff --git a/scripts/cap2/Oracle.pcap b/scripts/cap2/Oracle.pcap
new file mode 100755
index 00000000..cc300a53
--- /dev/null
+++ b/scripts/cap2/Oracle.pcap
Binary files differ
diff --git a/scripts/cap2/Video_Calls.pcap b/scripts/cap2/Video_Calls.pcap
new file mode 100755
index 00000000..6fd4d202
--- /dev/null
+++ b/scripts/cap2/Video_Calls.pcap
Binary files differ
diff --git a/scripts/cap2/Voice_calls_rtp_only.pcap b/scripts/cap2/Voice_calls_rtp_only.pcap
new file mode 100755
index 00000000..eb26dbb9
--- /dev/null
+++ b/scripts/cap2/Voice_calls_rtp_only.pcap
Binary files differ
diff --git a/scripts/cap2/citrix.pcap b/scripts/cap2/citrix.pcap
new file mode 100755
index 00000000..f13eec55
--- /dev/null
+++ b/scripts/cap2/citrix.pcap
Binary files differ
diff --git a/scripts/cap2/delay_10_rtp_250k_short.pcap b/scripts/cap2/delay_10_rtp_250k_short.pcap
new file mode 100755
index 00000000..de8b28e7
--- /dev/null
+++ b/scripts/cap2/delay_10_rtp_250k_short.pcap
Binary files differ
diff --git a/scripts/cap2/dns.pcap b/scripts/cap2/dns.pcap
new file mode 100755
index 00000000..2bd7f460
--- /dev/null
+++ b/scripts/cap2/dns.pcap
Binary files differ
diff --git a/scripts/cap2/dns.yaml b/scripts/cap2/dns.yaml
new file mode 100755
index 00000000..dd577894
--- /dev/null
+++ b/scripts/cap2/dns.yaml
@@ -0,0 +1,23 @@
+- duration : 10.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.1.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.0.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 1
+ udp_aging : 1
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ #vlan : { enable : 1 , vlan0 : 100 , vlan1 : 200 }
+ #mac_override_by_ip : true
+ cap_info :
+ - name: cap2/dns.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+
+
diff --git a/scripts/cap2/dns_one_server.yaml b/scripts/cap2/dns_one_server.yaml
new file mode 100755
index 00000000..bac3c1d1
--- /dev/null
+++ b/scripts/cap2/dns_one_server.yaml
@@ -0,0 +1,33 @@
+- duration : 10.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.2"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ #cap_ipg : true
+ #cap_ipg_min : 30
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: cap2/dns.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ server_addr : "48.0.0.1"
+ one_app_server : true
+ wlength : 1
+ - name: cap2/dns.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ wlength : 1
+
+
diff --git a/scripts/cap2/dns_single_server.yaml b/scripts/cap2/dns_single_server.yaml
new file mode 100755
index 00000000..e2513484
--- /dev/null
+++ b/scripts/cap2/dns_single_server.yaml
@@ -0,0 +1,32 @@
+- duration : 10.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ #cap_ipg : true
+ #cap_ipg_min : 100
+ #cap_override_ipg : 200
+ one_app_server : true
+ wlength : 7
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: cap2/dns.pcap
+ cps : 0.5
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: cap2/dns.pcap
+ one_app_server : false
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 5
+
+
diff --git a/scripts/cap2/dns_wlen.yaml b/scripts/cap2/dns_wlen.yaml
new file mode 100755
index 00000000..fc653d4f
--- /dev/null
+++ b/scripts/cap2/dns_wlen.yaml
@@ -0,0 +1,25 @@
+- duration : 10.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ #cap_ipg : true
+ #cap_ipg_min : 30
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: cap2/dns.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ wlength : 2
+
+
diff --git a/scripts/cap2/dns_wlen1.yaml b/scripts/cap2/dns_wlen1.yaml
new file mode 100755
index 00000000..bf07bac3
--- /dev/null
+++ b/scripts/cap2/dns_wlen1.yaml
@@ -0,0 +1,25 @@
+- duration : 10.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ #cap_ipg : true
+ #cap_ipg_min : 30
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: cap2/dns.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ wlength : 1
+
+
diff --git a/scripts/cap2/dns_wlen2.yaml b/scripts/cap2/dns_wlen2.yaml
new file mode 100755
index 00000000..5488ae8e
--- /dev/null
+++ b/scripts/cap2/dns_wlen2.yaml
@@ -0,0 +1,32 @@
+- duration : 10.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ #cap_ipg : true
+ #cap_ipg_min : 30
+ #cap_override_ipg : 200
+ wlength : 10
+ one_app_server : true
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: cap2/dns.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 10
+ wlength : 1
+ - name: cap2/dns.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 10
+
+
diff --git a/scripts/cap2/dns_wlength.yaml b/scripts/cap2/dns_wlength.yaml
new file mode 100755
index 00000000..fc653d4f
--- /dev/null
+++ b/scripts/cap2/dns_wlength.yaml
@@ -0,0 +1,25 @@
+- duration : 10.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ #cap_ipg : true
+ #cap_ipg_min : 30
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: cap2/dns.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ wlength : 2
+
+
diff --git a/scripts/cap2/dyn_pyld1.yaml b/scripts/cap2/dyn_pyld1.yaml
new file mode 100755
index 00000000..1ea09aa7
--- /dev/null
+++ b/scripts/cap2/dyn_pyld1.yaml
@@ -0,0 +1,36 @@
+- duration : 10.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ #cap_ipg : true
+ #cap_ipg_min : 100
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: cap2/dns.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 5
+ dyn_pyload :
+ - pkt_id : 0 # 0 is first packet
+ pyld_offset : 0 # ofsset from pyload in bytes
+ type : 0 # 0- random , 1 - client_ip
+ len : 1 # len in uint32
+ mask : 0xffffffff #mask
+ - pkt_id : 1 # 0 is first packet
+ pyld_offset : 2 # ofsset from pyload in bytes
+ type : 1 # 0- random , 1 - client_ip
+ len : 2 # len in uint32
+ mask : 0xffffffff #mask
+
+
+
diff --git a/scripts/cap2/exchange.pcap b/scripts/cap2/exchange.pcap
new file mode 100755
index 00000000..1be671f2
--- /dev/null
+++ b/scripts/cap2/exchange.pcap
Binary files differ
diff --git a/scripts/cap2/http.yaml b/scripts/cap2/http.yaml
new file mode 100755
index 00000000..80d58369
--- /dev/null
+++ b/scripts/cap2/http.yaml
@@ -0,0 +1,22 @@
+- duration : 0.1
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.1.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_ipg : true
+ cap_info :
+ - name: avl/delay_10_rtp_160k_full.pcap
+ cps : 2.776
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ plugin_id : 1
+
diff --git a/scripts/cap2/http_browsing.pcap b/scripts/cap2/http_browsing.pcap
new file mode 100755
index 00000000..065e33ba
--- /dev/null
+++ b/scripts/cap2/http_browsing.pcap
Binary files differ
diff --git a/scripts/cap2/http_get.pcap b/scripts/cap2/http_get.pcap
new file mode 100755
index 00000000..3f1d42e4
--- /dev/null
+++ b/scripts/cap2/http_get.pcap
Binary files differ
diff --git a/scripts/cap2/http_plugin.yaml b/scripts/cap2/http_plugin.yaml
new file mode 100755
index 00000000..032409e4
--- /dev/null
+++ b/scripts/cap2/http_plugin.yaml
@@ -0,0 +1,24 @@
+- duration : 1.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ cap_ipg : true
+ #cap_ipg_min : 100
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: avl/delay_10_http_browsing_0.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ plugin_id : 4
+
diff --git a/scripts/cap2/http_post.pcap b/scripts/cap2/http_post.pcap
new file mode 100755
index 00000000..0ccb3b55
--- /dev/null
+++ b/scripts/cap2/http_post.pcap
Binary files differ
diff --git a/scripts/cap2/http_simple.yaml b/scripts/cap2/http_simple.yaml
new file mode 100755
index 00000000..ad4814f5
--- /dev/null
+++ b/scripts/cap2/http_simple.yaml
@@ -0,0 +1,21 @@
+- duration : 0.1
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_ipg : true
+ cap_info :
+ - name: avl/delay_10_http_browsing_0.pcap
+ cps : 2.776
+ ipg : 10000
+ rtt : 10000
+ w : 1
+
diff --git a/scripts/cap2/https.pcap b/scripts/cap2/https.pcap
new file mode 100755
index 00000000..e62fa29c
--- /dev/null
+++ b/scripts/cap2/https.pcap
Binary files differ
diff --git a/scripts/cap2/imix.yaml b/scripts/cap2/imix.yaml
new file mode 100755
index 00000000..a155e38b
--- /dev/null
+++ b/scripts/cap2/imix.yaml
@@ -0,0 +1,35 @@
+#
+# Simple IMIX test (7x64B, 5x594B, 1x1518B)
+#
+- duration : 3
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: cap2/udp_64B.pcap
+ cps : 28.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ limit : 7
+ - name: cap2/udp_594B.pcap
+ cps : 20.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ limit : 5
+ - name: cap2/udp_1518B.pcap
+ cps : 4.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ limit : 1
diff --git a/scripts/cap2/imix_1518.yaml b/scripts/cap2/imix_1518.yaml
new file mode 100755
index 00000000..ddccc987
--- /dev/null
+++ b/scripts/cap2/imix_1518.yaml
@@ -0,0 +1,70 @@
+#
+# Simple IMIX test 1518B
+#
+- duration : 3
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+#
+# the templates are duplicated in purpose , to utilized all DRAM BW and get better performance, we should do it automatically
+# but for now it like this , you should have at least 8
+#
+ cap_info :
+ - name: cap2/udp_1518B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+ - name: cap2/udp_1518B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+ - name: cap2/udp_1518B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+ - name: cap2/udp_1518B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+ - name: cap2/udp_1518B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+ - name: cap2/udp_1518B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+ - name: cap2/udp_1518B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+ - name: cap2/udp_1518B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+
diff --git a/scripts/cap2/imix_64.yaml b/scripts/cap2/imix_64.yaml
new file mode 100755
index 00000000..d3c92296
--- /dev/null
+++ b/scripts/cap2/imix_64.yaml
@@ -0,0 +1,70 @@
+#
+# Simple IMIX test 64B
+#
+- duration : 3
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+#
+# the templates are duplicated in purpose , to utilized all DRAM BW and get better performance, we should do it automaticly
+# but for now it like this , you should have at least 8
+#
+ cap_info :
+ - name: cap2/udp_64B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+ - name: cap2/udp_64B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+ - name: cap2/udp_64B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+ - name: cap2/udp_64B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+ - name: cap2/udp_64B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+ - name: cap2/udp_64B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+ - name: cap2/udp_64B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+ - name: cap2/udp_64B.pcap
+ cps : 1000.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 200
+
diff --git a/scripts/cap2/imix_fast_1g.yaml b/scripts/cap2/imix_fast_1g.yaml
new file mode 100755
index 00000000..12ce7c4d
--- /dev/null
+++ b/scripts/cap2/imix_fast_1g.yaml
@@ -0,0 +1,53 @@
+#
+# Simple IMIX faster test (7x64B, 5x594B, 1x1518B)
+# we duplicate the template to utilize the memory better
+- duration : 3
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: cap2/udp_64B.pcap
+ cps : 90615
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 199
+ - name: cap2/udp_594B.pcap
+ cps : 64725
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 199
+ - name: cap2/udp_1518B.pcap
+ cps : 12945
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 199
+ - name: cap2/udp_64B.pcap
+ cps : 90615
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 199
+ - name: cap2/udp_594B.pcap
+ cps : 64725
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 199
+ - name: cap2/udp_1518B.pcap
+ cps : 12945
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 199
diff --git a/scripts/cap2/imix_fast_1g_100k_flows.yaml b/scripts/cap2/imix_fast_1g_100k_flows.yaml
new file mode 100755
index 00000000..a5fb6bc0
--- /dev/null
+++ b/scripts/cap2/imix_fast_1g_100k_flows.yaml
@@ -0,0 +1,53 @@
+#
+# Simple IMIX faster test (7x64B, 5x594B, 1x1518B)
+# we duplicate the template to utilize the memory better
+- duration : 3
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: cap2/udp_64B.pcap
+ cps : 90615
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 16666
+ - name: cap2/udp_594B.pcap
+ cps : 64725
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 16666
+ - name: cap2/udp_1518B.pcap
+ cps : 12945
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 16667
+ - name: cap2/udp_64B.pcap
+ cps : 90615
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 16667
+ - name: cap2/udp_594B.pcap
+ cps : 64725
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 16667
+ - name: cap2/udp_1518B.pcap
+ cps : 12945
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 16667
diff --git a/scripts/cap2/ipv4_vlan.yaml b/scripts/cap2/ipv4_vlan.yaml
new file mode 100755
index 00000000..63f7db7d
--- /dev/null
+++ b/scripts/cap2/ipv4_vlan.yaml
@@ -0,0 +1,21 @@
+- duration : 10
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.1.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.0.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 1
+ udp_aging : 1
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ vlan : { enable : 1 , vlan0 : 100 , vlan1 : 200 }
+ cap_info :
+ - name: cap2/udp_64B.pcap
+ cps : 10.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ limit : 20
diff --git a/scripts/cap2/ipv6.pcap b/scripts/cap2/ipv6.pcap
new file mode 100755
index 00000000..5a3e23be
--- /dev/null
+++ b/scripts/cap2/ipv6.pcap
Binary files differ
diff --git a/scripts/cap2/ipv6.yaml b/scripts/cap2/ipv6.yaml
new file mode 100755
index 00000000..3de11f6c
--- /dev/null
+++ b/scripts/cap2/ipv6.yaml
@@ -0,0 +1,22 @@
+- duration : 10
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.1.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.0.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 1
+ udp_aging : 1
+ src_ipv6 : [0x2001,0x0232,0x1002,0x0051,0x0000,0x0000]
+ dst_ipv6 : [0x3001,0x0DB8,0x0003,0x0004,0x0000,0x0000]
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: cap2/ipv6.pcap
+ cps : 10.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ limit : 20
diff --git a/scripts/cap2/ipv6_vlan.yaml b/scripts/cap2/ipv6_vlan.yaml
new file mode 100755
index 00000000..bb91a4f8
--- /dev/null
+++ b/scripts/cap2/ipv6_vlan.yaml
@@ -0,0 +1,23 @@
+- duration : 10
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.1.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.0.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 1
+ udp_aging : 1
+ src_ipv6 : [0x2001,0x0232,0x1002,0x0051,0x0000,0x0000]
+ dst_ipv6 : [0x3001,0x0DB8,0x0003,0x0004,0x0000,0x0000]
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ vlan : { enable : 1 , vlan0 : 100 , vlan1 : 200 }
+ cap_info :
+ - name: cap2/ipv6.pcap
+ cps : 10.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ limit : 20
diff --git a/scripts/cap2/lb_ex1.yaml b/scripts/cap2/lb_ex1.yaml
new file mode 100755
index 00000000..ea7c38e7
--- /dev/null
+++ b/scripts/cap2/lb_ex1.yaml
@@ -0,0 +1,26 @@
+- duration : 0.1
+ generator :
+ distribution : "seq"
+ clients_start : "10.10.10.1"
+ clients_end : "10.10.10.10"
+ servers_start : "1.1.1.1"
+ servers_end : "1.1.1.8"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_ipg : true
+ one_app_server : false
+ cap_info :
+ - name: avl/delay_10_https_0.pcap
+ cps : 33.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ one_app_server : true
+ server_addr : "1.1.1.1"
+
+
+
diff --git a/scripts/cap2/limit_multi_pkt.yaml b/scripts/cap2/limit_multi_pkt.yaml
new file mode 100755
index 00000000..25e32e86
--- /dev/null
+++ b/scripts/cap2/limit_multi_pkt.yaml
@@ -0,0 +1,23 @@
+#
+# Test "limit" keyword using a pcap file that contains a flow with multiple packets
+#
+- duration : 10
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.0.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: cap2/dns.pcap
+ cps : 15.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 6
diff --git a/scripts/cap2/limit_single_pkt.yaml b/scripts/cap2/limit_single_pkt.yaml
new file mode 100755
index 00000000..03802fe8
--- /dev/null
+++ b/scripts/cap2/limit_single_pkt.yaml
@@ -0,0 +1,23 @@
+#
+# Test "limit" keyword using a pcap file that contains a flow with a single packet
+#
+- duration : 10
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.0.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: cap2/udp_64B.pcap
+ cps : 6.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ limit : 15
diff --git a/scripts/cap2/mail_pop.pcap b/scripts/cap2/mail_pop.pcap
new file mode 100755
index 00000000..431b05d7
--- /dev/null
+++ b/scripts/cap2/mail_pop.pcap
Binary files differ
diff --git a/scripts/cap2/nat_test.yaml b/scripts/cap2/nat_test.yaml
new file mode 100755
index 00000000..d4fdc7db
--- /dev/null
+++ b/scripts/cap2/nat_test.yaml
@@ -0,0 +1,46 @@
+- duration : 0.1
+ generator :
+ distribution : "seq"
+
+ clients_start : "16.0.0.3"
+ clients_end : "16.0.0.103"
+ servers_start : "48.0.0.3"
+ servers_end : "48.0.0.102"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_ipg : true
+ #cap_ipg_min : 30
+ #cap_override_ipg : 200
+ wlength : 107
+ one_app_server : false
+ cap_info :
+ - name: avl/delay_10_video_call_0.pcap
+ cps : 30
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_video_call_rtp_0.pcap
+ cps : 60
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_https_0.pcap
+ cps : 50
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_250k_0_0.pcap
+ cps : 19.82
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: avl/delay_10_rtp_250k_1_0.pcap
+ cps : 19.82
+ ipg : 10000
+ rtt : 10000
+ w : 1
+
diff --git a/scripts/cap2/rtp_160k.pcap b/scripts/cap2/rtp_160k.pcap
new file mode 100755
index 00000000..4fbd5e52
--- /dev/null
+++ b/scripts/cap2/rtp_160k.pcap
Binary files differ
diff --git a/scripts/cap2/rtp_250k_rtp_only.pcap b/scripts/cap2/rtp_250k_rtp_only.pcap
new file mode 100755
index 00000000..ec96df3a
--- /dev/null
+++ b/scripts/cap2/rtp_250k_rtp_only.pcap
Binary files differ
diff --git a/scripts/cap2/rtp_250k_rtp_only_1.pcap b/scripts/cap2/rtp_250k_rtp_only_1.pcap
new file mode 100755
index 00000000..4ac299d8
--- /dev/null
+++ b/scripts/cap2/rtp_250k_rtp_only_1.pcap
Binary files differ
diff --git a/scripts/cap2/rtp_250k_rtp_only_2.pcap b/scripts/cap2/rtp_250k_rtp_only_2.pcap
new file mode 100755
index 00000000..e587bfaa
--- /dev/null
+++ b/scripts/cap2/rtp_250k_rtp_only_2.pcap
Binary files differ
diff --git a/scripts/cap2/rtsp.yaml b/scripts/cap2/rtsp.yaml
new file mode 100755
index 00000000..7a91c4e9
--- /dev/null
+++ b/scripts/cap2/rtsp.yaml
@@ -0,0 +1,24 @@
+- duration : 2.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ #cap_ipg : true
+ #cap_ipg_min : 100
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: cap2/rtsp_short.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 5
+ plugin_id : 1
+
diff --git a/scripts/cap2/rtsp_full1.yaml b/scripts/cap2/rtsp_full1.yaml
new file mode 100755
index 00000000..c150f54a
--- /dev/null
+++ b/scripts/cap2/rtsp_full1.yaml
@@ -0,0 +1,17 @@
+- duration : 5.0
+ min_src_ip : 0x80f1c2c3
+ max_src_ip : 0x90f1c2c3
+ min_dst_ip : 0xa0f2c2c3
+ max_dst_ip : 0xb0f2c2c3
+ cap_ipg : true
+ #cap_ipg_min : 100
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: avl/delay_10_rtp_250k_full.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 5
+ plugin_id : 1
+
diff --git a/scripts/cap2/rtsp_full2.yaml b/scripts/cap2/rtsp_full2.yaml
new file mode 100755
index 00000000..eb75afec
--- /dev/null
+++ b/scripts/cap2/rtsp_full2.yaml
@@ -0,0 +1,24 @@
+- duration : 4.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ cap_ipg : true
+ #cap_ipg_min : 100
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: avl/delay_10_rtp_160k_full.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 5
+ plugin_id : 1
+
diff --git a/scripts/cap2/rtsp_short.pcap b/scripts/cap2/rtsp_short.pcap
new file mode 100755
index 00000000..36d5b775
--- /dev/null
+++ b/scripts/cap2/rtsp_short.pcap
Binary files differ
diff --git a/scripts/cap2/rtsp_short1.yaml b/scripts/cap2/rtsp_short1.yaml
new file mode 100755
index 00000000..8e0fd53d
--- /dev/null
+++ b/scripts/cap2/rtsp_short1.yaml
@@ -0,0 +1,24 @@
+- duration : 1.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.0.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 1
+ udp_aging : 1
+ cap_ipg : true
+ #cap_ipg_min : 100
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: cap2/delay_10_rtp_250k_short.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 5
+ plugin_id : 1
+
diff --git a/scripts/cap2/rtsp_short1_slow.yaml b/scripts/cap2/rtsp_short1_slow.yaml
new file mode 100755
index 00000000..7a3abb59
--- /dev/null
+++ b/scripts/cap2/rtsp_short1_slow.yaml
@@ -0,0 +1,24 @@
+- duration : 1.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.0.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 1
+ udp_aging : 1
+ cap_ipg : true
+ #cap_ipg_min : 100
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: cap2/delay_10_rtp_250k_short.pcap
+ cps : 0.01
+ ipg : 10000
+ rtt : 10000
+ w : 5
+ plugin_id : 1
+
diff --git a/scripts/cap2/rtsp_short2.yaml b/scripts/cap2/rtsp_short2.yaml
new file mode 100755
index 00000000..8e0fd53d
--- /dev/null
+++ b/scripts/cap2/rtsp_short2.yaml
@@ -0,0 +1,24 @@
+- duration : 1.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.0.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 1
+ udp_aging : 1
+ cap_ipg : true
+ #cap_ipg_min : 100
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: cap2/delay_10_rtp_250k_short.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 5
+ plugin_id : 1
+
diff --git a/scripts/cap2/rtsp_short3.yaml b/scripts/cap2/rtsp_short3.yaml
new file mode 100755
index 00000000..8e0fd53d
--- /dev/null
+++ b/scripts/cap2/rtsp_short3.yaml
@@ -0,0 +1,24 @@
+- duration : 1.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.0.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 1
+ udp_aging : 1
+ cap_ipg : true
+ #cap_ipg_min : 100
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: cap2/delay_10_rtp_250k_short.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 5
+ plugin_id : 1
+
diff --git a/scripts/cap2/sfr.yaml b/scripts/cap2/sfr.yaml
new file mode 100755
index 00000000..da6391d2
--- /dev/null
+++ b/scripts/cap2/sfr.yaml
@@ -0,0 +1,90 @@
+- duration : 40
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: cap2/Oracle.pcap
+ cps : 150.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/Video_Calls.pcap
+ cps : 11.4
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/rtp_160k.pcap
+ cps : 3.6
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/rtp_250k_rtp_only_1.pcap
+ cps : 4.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/rtp_250k_rtp_only_2.pcap
+ cps : 4.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/smtp.pcap
+ cps : 34.2
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/Voice_calls_rtp_only.pcap
+ cps : 66.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/citrix.pcap
+ cps : 105.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/dns.pcap
+ cps : 2400.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/exchange.pcap
+ cps : 630.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/http_browsing.pcap
+ cps : 267.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/http_get.pcap
+ cps : 345.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/http_post.pcap
+ cps : 345.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/https.pcap
+ cps : 111.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/mail_pop.pcap
+ cps : 34.2
+ ipg : 10000
+ rtt : 10000
+ w : 4
+
diff --git a/scripts/cap2/sfr2.yaml b/scripts/cap2/sfr2.yaml
new file mode 100755
index 00000000..910e36bf
--- /dev/null
+++ b/scripts/cap2/sfr2.yaml
@@ -0,0 +1,30 @@
+- duration : 10.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.0.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 1
+ udp_aging : 1
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: cap2/Oracle.pcap
+ cps : 1.0
+ ipg : 10
+ rtt : 10
+ w : 4
+ - name: cap2/smtp.pcap
+ cps : 1.2
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/dns.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+
diff --git a/scripts/cap2/sfr3.yaml b/scripts/cap2/sfr3.yaml
new file mode 100755
index 00000000..bb60fe9d
--- /dev/null
+++ b/scripts/cap2/sfr3.yaml
@@ -0,0 +1,90 @@
+- duration : 1.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: cap2/Oracle.pcap
+ cps : 0.5
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/Video_Calls.pcap
+ cps : 1.4
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/rtp_160k.pcap
+ cps : 2.6
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/rtp_250k_rtp_only_1.pcap
+ cps : 3.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/rtp_250k_rtp_only_2.pcap
+ cps : 4.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/smtp.pcap
+ cps : 1.2
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/Voice_calls_rtp_only.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/citrix.pcap
+ cps : 0.1
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/dns.pcap
+ cps : 0.2
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/exchange.pcap
+ cps : 0.3
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/http_browsing.pcap
+ cps : 0.4
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/http_get.pcap
+ cps : 0.5
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/http_post.pcap
+ cps : 0.6
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/https.pcap
+ cps : 0.7
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/mail_pop.pcap
+ cps : 0.8
+ ipg : 10000
+ rtt : 10000
+ w : 4
+
diff --git a/scripts/cap2/sfr4.yaml b/scripts/cap2/sfr4.yaml
new file mode 100755
index 00000000..49d7e9b3
--- /dev/null
+++ b/scripts/cap2/sfr4.yaml
@@ -0,0 +1,20 @@
+- duration : 1.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: cap2/http_get.pcap
+ cps : 1.0
+ ipg : 100
+ rtt : 10000
+ w : 1
+
diff --git a/scripts/cap2/sfr_agg_tcp14_udp11_http200msec_new_high_new_nir_profile.yaml b/scripts/cap2/sfr_agg_tcp14_udp11_http200msec_new_high_new_nir_profile.yaml
new file mode 100755
index 00000000..b36771c4
--- /dev/null
+++ b/scripts/cap2/sfr_agg_tcp14_udp11_http200msec_new_high_new_nir_profile.yaml
@@ -0,0 +1,83 @@
+- duration : 1800
+ min_src_ip : 0x10000001
+ max_src_ip : 0x90000001
+ min_dst_ip : 0xa0000001
+ max_dst_ip : 0xb0000001
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: cap2/Oracle.pcap
+ cps : 60
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/Video_Calls.pcap
+ cps : 9.6
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: cap2/rtp_160k.pcap
+ cps : 2.2
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: cap2/rtp_250k_rtp_only_1.pcap
+ cps : 2.2
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: cap2/rtp_250k_rtp_only_2.pcap
+ cps : 2.2
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: cap2/smtp.pcap
+ cps : 6.25
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/Voice_calls_rtp_only.pcap
+ cps : 35.4
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: cap2/citrix.pcap
+ cps : 42
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/dns.pcap
+ cps : 1500
+ ipg : 10000
+ rtt : 10000
+ w : 1
+ - name: cap2/exchange.pcap
+ cps : 372
+ ipg : 10000
+ rtt : 10000
+ w : 4
+ - name: cap2/http_browsing.pcap
+ cps : 770
+ ipg : 200000
+ rtt : 200000
+ w : 4
+ - name: cap2/http_get.pcap
+ cps : 430
+ ipg : 200000
+ rtt : 200000
+ w : 4
+ - name: cap2/http_post.pcap
+ cps : 430
+ ipg : 200000
+ rtt : 200000
+ w : 4
+ - name: cap2/https.pcap
+ cps : 70
+ ipg : 200000
+ rtt : 200000
+ w : 4
+ - name: cap2/mail_pop.pcap
+ cps : 37.5
+ ipg : 10000
+ rtt : 10000
+ w : 4
+
diff --git a/scripts/cap2/sfr_agg_tcp14_udp11_http200msec_new_high_new_nir_profile_ipg_mix.yaml b/scripts/cap2/sfr_agg_tcp14_udp11_http200msec_new_high_new_nir_profile_ipg_mix.yaml
new file mode 100755
index 00000000..2d8d9ce8
--- /dev/null
+++ b/scripts/cap2/sfr_agg_tcp14_udp11_http200msec_new_high_new_nir_profile_ipg_mix.yaml
@@ -0,0 +1,240 @@
+- duration : 1800
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: cap2/Oracle.pcap
+ cps : 21.6
+ ipg : 12
+ rtt : 12
+ w : 4
+ - name: cap2/Oracle.pcap
+ cps : 4.8
+ ipg : 200
+ rtt : 200
+ w : 4
+ - name: cap2/Oracle.pcap
+ cps : 33.6
+ ipg : 1000
+ rtt : 1000
+ w : 4
+ - name: cap2/Video_Calls.pcap
+ cps : 3.456
+ ipg : 12
+ rtt : 12
+ w : 1
+ - name: cap2/Video_Calls.pcap
+ cps : 0.768
+ ipg : 200
+ rtt : 200
+ w : 1
+ - name: cap2/Video_Calls.pcap
+ cps : 5.376
+ ipg : 1000
+ rtt : 1000
+ w : 1
+ - name: cap2/rtp_160k.pcap
+ cps : 0.792
+ ipg : 12
+ rtt : 12
+ w : 1
+ - name: cap2/rtp_160k.pcap
+ cps : 0.176
+ ipg : 200
+ rtt : 200
+ w : 1
+ - name: cap2/rtp_160k.pcap
+ cps : 1.232
+ ipg : 1000
+ rtt : 1000
+ w : 1
+ - name: cap2/rtp_250k_rtp_only_1.pcap
+ cps : 0.792
+ ipg : 12
+ rtt : 12
+ w : 1
+ - name: cap2/rtp_250k_rtp_only_1.pcap
+ cps : 0.176
+ ipg : 200
+ rtt : 200
+ w : 1
+ - name: cap2/rtp_250k_rtp_only_1.pcap
+ cps : 1.232
+ ipg : 1000
+ rtt : 1000
+ w : 1
+ - name: cap2/rtp_250k_rtp_only_2.pcap
+ cps : 0.792
+ ipg : 12
+ rtt : 12
+ w : 1
+ - name: cap2/rtp_250k_rtp_only_2.pcap
+ cps : 0.176
+ ipg : 200
+ rtt : 200
+ w : 1
+ - name: cap2/rtp_250k_rtp_only_2.pcap
+ cps : 1.232
+ ipg : 1000
+ rtt : 1000
+ w : 1
+ - name: cap2/smtp.pcap
+ cps : 2.25
+ ipg : 12
+ rtt : 12
+ w : 4
+ - name: cap2/smtp.pcap
+ cps : 0.5
+ ipg : 200
+ rtt : 200
+ w : 4
+ - name: cap2/smtp.pcap
+ cps : 3.5
+ ipg : 1000
+ rtt : 1000
+ w : 4
+ - name: cap2/Voice_calls_rtp_only.pcap
+ cps : 12.744
+ ipg : 12
+ rtt : 12
+ w : 1
+ - name: cap2/Voice_calls_rtp_only.pcap
+ cps : 2.832
+ ipg : 200
+ rtt : 200
+ w : 1
+ - name: cap2/Voice_calls_rtp_only.pcap
+ cps : 19.824
+ ipg : 1000
+ rtt : 1000
+ w : 1
+ - name: cap2/citrix.pcap
+ cps : 15.12
+ ipg : 12
+ rtt : 12
+ w : 4
+ - name: cap2/citrix.pcap
+ cps : 3.36
+ ipg : 200
+ rtt : 200
+ w : 4
+ - name: cap2/citrix.pcap
+ cps : 23.52
+ ipg : 1000
+ rtt : 1000
+ w : 4
+ - name: cap2/dns.pcap
+ cps : 540
+ ipg : 12
+ rtt : 12
+ w : 1
+ - name: cap2/dns.pcap
+ cps : 120
+ ipg : 200
+ rtt : 200
+ w : 1
+ - name: cap2/dns.pcap
+ cps : 840
+ ipg : 1000
+ rtt : 1000
+ w : 1
+ - name: cap2/exchange.pcap
+ cps : 133.92
+ ipg : 12
+ rtt : 12
+ w : 4
+ - name: cap2/exchange.pcap
+ cps : 29.76
+ ipg : 200
+ rtt : 200
+ w : 4
+ - name: cap2/exchange.pcap
+ cps : 208.32
+ ipg : 1000
+ rtt : 1000
+ w : 4
+ - name: cap2/http_browsing.pcap
+ cps : 277.2
+ ipg : 12
+ rtt : 12
+ w : 4
+ - name: cap2/http_browsing.pcap
+ cps : 61.6
+ ipg : 200
+ rtt : 200
+ w : 4
+ - name: cap2/http_browsing.pcap
+ cps : 431.2
+ ipg : 200000
+ rtt : 200000
+ w : 4
+ - name: cap2/http_get.pcap
+ cps : 154.8
+ ipg : 12
+ rtt : 12
+ w : 4
+ - name: cap2/http_get.pcap
+ cps : 34.4
+ ipg : 200
+ rtt : 200
+ w : 4
+ - name: cap2/http_get.pcap
+ cps : 240.8
+ ipg : 200000
+ rtt : 200000
+ w : 4
+ - name: cap2/http_post.pcap
+ cps : 154.8
+ ipg : 12
+ rtt : 12
+ w : 4
+ - name: cap2/http_post.pcap
+ cps : 34.4
+ ipg : 200
+ rtt : 200
+ w : 4
+ - name: cap2/http_post.pcap
+ cps : 240.8
+ ipg : 200000
+ rtt : 200000
+ w : 4
+ - name: cap2/https.pcap
+ cps : 25.2
+ ipg : 12
+ rtt : 12
+ w : 4
+ - name: cap2/https.pcap
+ cps : 5.6
+ ipg : 200
+ rtt : 200
+ w : 4
+ - name: cap2/https.pcap
+ cps : 39.2
+ ipg : 200000
+ rtt : 200000
+ w : 4
+ - name: cap2/mail_pop.pcap
+ cps : 13.5
+ ipg : 12
+ rtt : 12
+ w : 4
+ - name: cap2/mail_pop.pcap
+ cps : 3
+ ipg : 200
+ rtt : 200
+ w : 4
+ - name: cap2/mail_pop.pcap
+ cps : 21
+ ipg : 1000
+ rtt : 1000
+ w : 4
+
diff --git a/scripts/cap2/short_tcp.yaml b/scripts/cap2/short_tcp.yaml
new file mode 100755
index 00000000..2c58a002
--- /dev/null
+++ b/scripts/cap2/short_tcp.yaml
@@ -0,0 +1,20 @@
+- duration : 40
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+ cap_info :
+ - name: avl/delay_10_smtp_0.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 1
+
diff --git a/scripts/cap2/sip_short1.yaml b/scripts/cap2/sip_short1.yaml
new file mode 100755
index 00000000..e644bf56
--- /dev/null
+++ b/scripts/cap2/sip_short1.yaml
@@ -0,0 +1,24 @@
+- duration : 1.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ cap_ipg : true
+ #cap_ipg_min : 100
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: avl/delay_10_sip_video_call_short.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 5
+ plugin_id : 2
+
diff --git a/scripts/cap2/sip_short2.yaml b/scripts/cap2/sip_short2.yaml
new file mode 100755
index 00000000..e644bf56
--- /dev/null
+++ b/scripts/cap2/sip_short2.yaml
@@ -0,0 +1,24 @@
+- duration : 1.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ cap_ipg : true
+ #cap_ipg_min : 100
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: avl/delay_10_sip_video_call_short.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 5
+ plugin_id : 2
+
diff --git a/scripts/cap2/smtp.pcap b/scripts/cap2/smtp.pcap
new file mode 100755
index 00000000..2cb9d893
--- /dev/null
+++ b/scripts/cap2/smtp.pcap
Binary files differ
diff --git a/scripts/cap2/test_mac.yaml b/scripts/cap2/test_mac.yaml
new file mode 100755
index 00000000..8ae7eaea
--- /dev/null
+++ b/scripts/cap2/test_mac.yaml
@@ -0,0 +1,9 @@
+- min_ip : "1.1.1.1"
+ items :
+ - ip : "1.1.1.1"
+ mac : [0x2,0x0,0x0,0x1,0x0,0x0]
+
+ - ip : "1.1.1.2"
+ mac : [0x3,0x0,0x0,0x1,0x0,0x0]
+ - ip : "1.1.1.103"
+ mac : [0x3,0x0,0x0,0x1,0x0,0x0]
diff --git a/scripts/cap2/test_pcap_mode1.yaml b/scripts/cap2/test_pcap_mode1.yaml
new file mode 100755
index 00000000..3af7b088
--- /dev/null
+++ b/scripts/cap2/test_pcap_mode1.yaml
@@ -0,0 +1,24 @@
+- duration : 0.5
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ cap_ipg : true
+ #cap_ipg_min : 30
+ #cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: avl/citrix_0.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 5
+
+
diff --git a/scripts/cap2/test_pcap_mode2.yaml b/scripts/cap2/test_pcap_mode2.yaml
new file mode 100755
index 00000000..6bb6ba3a
--- /dev/null
+++ b/scripts/cap2/test_pcap_mode2.yaml
@@ -0,0 +1,24 @@
+- duration : 10.0
+ generator :
+ distribution : "seq"
+ clients_start : "16.0.0.1"
+ clients_end : "16.0.0.255"
+ servers_start : "48.0.0.1"
+ servers_end : "48.0.255.255"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+ tcp_aging : 0
+ udp_aging : 0
+ cap_ipg : true
+ cap_ipg_min : 30
+ cap_override_ipg : 200
+ mac : [0x00,0x00,0x00,0x01,0x00,0x00]
+ cap_info :
+ - name: avl/citrix_0.pcap
+ cps : 1.0
+ ipg : 10000
+ rtt : 10000
+ w : 5
+
+
diff --git a/scripts/cap2/tuple_gen.yaml b/scripts/cap2/tuple_gen.yaml
new file mode 100755
index 00000000..873d9336
--- /dev/null
+++ b/scripts/cap2/tuple_gen.yaml
@@ -0,0 +1,10 @@
+- distribution : "seq"
+ clients_start : "10.0.0.0"
+ clients_end : "20.0.0.0"
+ servers_start : "30.0.0.0"
+ servers_end : "40.0.0.0"
+ clients_per_gb : 201
+ min_clients : 101
+ dual_port_mask : "1.0.0.0"
+
+
diff --git a/scripts/cap2/udp_1518B.pcap b/scripts/cap2/udp_1518B.pcap
new file mode 100755
index 00000000..f4f7af15
--- /dev/null
+++ b/scripts/cap2/udp_1518B.pcap
Binary files differ
diff --git a/scripts/cap2/udp_594B.pcap b/scripts/cap2/udp_594B.pcap
new file mode 100755
index 00000000..6c94cbc3
--- /dev/null
+++ b/scripts/cap2/udp_594B.pcap
Binary files differ
diff --git a/scripts/cap2/udp_64B.pcap b/scripts/cap2/udp_64B.pcap
new file mode 100755
index 00000000..699b9c80
--- /dev/null
+++ b/scripts/cap2/udp_64B.pcap
Binary files differ
diff --git a/scripts/cfg/cfg_example1.yaml b/scripts/cfg/cfg_example1.yaml
new file mode 100755
index 00000000..bfd7fd88
--- /dev/null
+++ b/scripts/cfg/cfg_example1.yaml
@@ -0,0 +1,27 @@
+- port_limit : 2 # this option can limit the number of port of the platform
+ version : 2
+ interfaces : ["03:00.0","03:00.1"] #
+ interface_mask : [ "0000:11:00.00", "0000:11:00.01" ] # deprecated
+ scan_only_1g : true
+ enable_zmq_pub : true # enable publisher for stats data
+ zmq_pub_port : 4507
+ telnet_port : 4508 # the telnet port in case it is enable ( with intercative mode )
+ port_info : # set eh mac addr
+ - dest_mac : [0x1,0x0,0x0,0x1,0x0,0x00] # port 0
+ src_mac : [0x2,0x0,0x0,0x2,0x0,0x00]
+ - dest_mac : [0x3,0x0,0x0,0x3,0x0,0x00] # port 1
+ src_mac : [0x4,0x0,0x0,0x4,0x0,0x00]
+ - dest_mac : [0x5,0x0,0x0,0x5,0x0,0x00] # port 2
+ src_mac : [0x6,0x0,0x0,0x6,0x0,0x00]
+ - dest_mac : [0x7,0x0,0x0,0x7,0x0,0x01] # port 3
+ src_mac : [0x0,0x0,0x0,0x8,0x0,0x02]
+ - dest_mac : [0x0,0x0,0x0,0x9,0x0,0x03] # port 4
+ src_mac : [0x0,0x0,0x0,0xa,0x0,0x04]
+ - dest_mac : [0x0,0x0,0x0,0xb,0x0,0x05] # port 5
+ src_mac : [0x0,0x0,0x0,0xc,0x0,0x06]
+ - dest_mac : [0x0,0x0,0x0,0xd,0x0,0x07] # port 6
+ src_mac : [0x0,0x0,0x0,0xa,0x0,0x08]
+ - dest_mac : [0x0,0x0,0x0,0xb,0x0,0x09] # port 7
+ src_mac : [0x0,0x0,0x0,0xc,0x0,0x0a]
+
+
diff --git a/scripts/cfg/cfg_example2.yaml b/scripts/cfg/cfg_example2.yaml
new file mode 100755
index 00000000..a381fb2f
--- /dev/null
+++ b/scripts/cfg/cfg_example2.yaml
@@ -0,0 +1,26 @@
+- port_limit : 2 # this option can limit the number of port of the platform
+ version : 2
+ interfaces : ["03:00.0","03:00.1","13:00.1","13:00.0"] # list of the interfaces to bind run ./dpdk_nic_bind.py --status
+ interface_mask : [ "0000:22:00.00", "0000:22:00.01" ] # deprecated
+ enable_zmq_pub : false # enable publisher for stats data
+ zmq_pub_port : 4500
+ telnet_port : 4501 # the telnet port in case it is enable ( with intercative mode )
+ port_info : # set eh mac addr
+ - dest_mac : [0x1,0x0,0x0,0x1,0x0,0x00] # port 0
+ src_mac : [0x2,0x0,0x0,0x2,0x0,0x00]
+ - dest_mac : [0x3,0x0,0x0,0x3,0x0,0x00] # port 1
+ src_mac : [0x4,0x0,0x0,0x4,0x0,0x00]
+ - dest_mac : [0x5,0x0,0x0,0x5,0x0,0x00] # port 2
+ src_mac : [0x6,0x0,0x0,0x6,0x0,0x00]
+ - dest_mac : [0x7,0x0,0x0,0x7,0x0,0x01] # port 3
+ src_mac : [0x0,0x0,0x0,0x8,0x0,0x02]
+ - dest_mac : [0x0,0x0,0x0,0x9,0x0,0x03] # port 4
+ src_mac : [0x0,0x0,0x0,0xa,0x0,0x04]
+ - dest_mac : [0x0,0x0,0x0,0xb,0x0,0x05] # port 5
+ src_mac : [0x0,0x0,0x0,0xc,0x0,0x06]
+ - dest_mac : [0x0,0x0,0x0,0xd,0x0,0x07] # port 6
+ src_mac : [0x0,0x0,0x0,0xa,0x0,0x08]
+ - dest_mac : [0x0,0x0,0x0,0xb,0x0,0x09] # port 7
+ src_mac : [0x0,0x0,0x0,0xc,0x0,0x0a]
+
+
diff --git a/scripts/cfg/ins1.yaml b/scripts/cfg/ins1.yaml
new file mode 100755
index 00000000..49278db4
--- /dev/null
+++ b/scripts/cfg/ins1.yaml
@@ -0,0 +1,25 @@
+- version : 2
+ interfaces : ["03:00.0","03:00.1"]
+ port_limit : 2
+ enable_zmq_pub : true # enable publisher for stats data
+ zmq_pub_port : 4507
+ prefix : setup1
+ limit_memory : 1024
+ c : 4
+# for system of 1Gb/sec NIC or VM enable this
+ port_bandwidth_gb : 10 # port bandwidth 10Gb/sec , for VM put here 1 for XL710 put 40
+ platform :
+ master_thread_id : 0
+ latency_thread_id : 5
+ dual_if :
+ - socket : 0
+ threads : [1,2,3,4]
+
+
+
+
+
+
+
+
+
diff --git a/scripts/cfg/ins2.yaml b/scripts/cfg/ins2.yaml
new file mode 100755
index 00000000..3a010750
--- /dev/null
+++ b/scripts/cfg/ins2.yaml
@@ -0,0 +1,25 @@
+- version : 2
+ interfaces : ["82:00.0","82:00.1"]
+ port_limit : 2
+ enable_zmq_pub : true # enable publisher for stats data
+ zmq_pub_port : 4510
+ #prefix : setup3
+ #limit_memory : 2048
+ c : 4
+# for system of 1Gb/sec NIC or VM enable this
+ port_bandwidth_gb : 10 # port bandwidth 10Gb/sec , for VM put here 1 for XL710 put 40
+ platform :
+ master_thread_id : 12
+ latency_thread_id : 13
+ dual_if :
+ - socket : 1
+ threads : [8,9,10,11]
+
+
+
+
+
+
+
+
+
diff --git a/scripts/cfg/ins3.yaml b/scripts/cfg/ins3.yaml
new file mode 100755
index 00000000..b03f7428
--- /dev/null
+++ b/scripts/cfg/ins3.yaml
@@ -0,0 +1,27 @@
+- version : 2
+ interfaces : ["03:00.0","03:00.1","82:00.0","82:00.1"]
+ port_limit : 4
+ enable_zmq_pub : true # enable publisher for stats data
+ zmq_pub_port : 4500
+ #prefix : setup3
+ #limit_memory : 2048
+ c : 4
+# for system of 1Gb/sec NIC or VM enable this
+ port_bandwidth_gb : 10 # port bandwidth 10Gb/sec , for VM put here 1 for XL710 put 40
+ platform :
+ master_thread_id : 0
+ latency_thread_id : 5
+ dual_if :
+ - socket : 1
+ threads : [1,2,3,4]
+ - socket : 0
+ threads : [8,9,10,11]
+
+
+
+
+
+
+
+
+
diff --git a/scripts/cfg/ucs_h0.yaml b/scripts/cfg/ucs_h0.yaml
new file mode 100755
index 00000000..3b620e03
--- /dev/null
+++ b/scripts/cfg/ucs_h0.yaml
@@ -0,0 +1,9 @@
+- port_limit : 2
+ version : 2
+ interfaces : ["03:00.0","03:00.1","13:00.1","13:00.0"] # list of the interfaces to bind run ./dpdk_nic_bind.py --status
+ interface_mask : [ "0000:11:00.00", "0000:11:00.01" ,"0000:0b:00.02","0000:0b:00.03"] # deprecated
+ enable_zmq_pub : true # enable publisher for stats data
+ zmq_pub_port : 4500
+ telnet_port : 4501 # the telnet port in case it is enable ( with intercative mode )
+
+
diff --git a/scripts/cfg/ucs_h1.yaml b/scripts/cfg/ucs_h1.yaml
new file mode 100755
index 00000000..cb0caf54
--- /dev/null
+++ b/scripts/cfg/ucs_h1.yaml
@@ -0,0 +1,9 @@
+- port_limit : 2
+ version : 2
+ interfaces : ["03:00.0","03:00.1","13:00.1","13:00.0"] # list of the interfaces to bind run ./dpdk_nic_bind.py --status
+ interface_mask : [ "0000:11:00.00", "0000:11:00.01" ,"0000:0b:00.00","0000:0b:00.01"] # deprecated
+ enable_zmq_pub : true # enable publisher for stats data
+ zmq_pub_port : 4510
+ telnet_port : 4511 # the telnet port in case it is enable ( with intercative mode )
+
+
diff --git a/scripts/cfg/xl710.yaml b/scripts/cfg/xl710.yaml
new file mode 100755
index 00000000..ed66078a
--- /dev/null
+++ b/scripts/cfg/xl710.yaml
@@ -0,0 +1,29 @@
+- version : 2
+ interfaces : ["08:00.0","08:00.1"]
+ port_limit : 2
+ enable_zmq_pub : true # enable publisher for stats data
+ zmq_pub_port : 4507
+ c : 4
+# for system of 1Gb/sec NIC or VM enable this
+ port_bandwidth_gb : 40 # port bandwidth 10Gb/sec , for VM put here 1 for XL710 put 40
+ platform :
+ master_thread_id : 0
+ latency_thread_id : 5
+ dual_if :
+ - socket : 0
+ threads : [1,2,3,4,6,7]
+ port_info : # set eh mac addr
+ - dest_mac : [0x00,0x0c,0x29,0x55,0x37,0xc7] # port 0
+ src_mac : [0x00,0x0c,0x29,0x55,0x37,0xbd]
+ - dest_mac : [0x00,0x0c,0x29,0x55,0x37,0xbd] # port 1
+ src_mac : [0x00,0x0c,0x29,0x55,0x37,0xc7]
+
+
+
+
+
+
+
+
+
+
diff --git a/scripts/daemon_server b/scripts/daemon_server
new file mode 100755
index 00000000..90fc614d
--- /dev/null
+++ b/scripts/daemon_server
@@ -0,0 +1,27 @@
+#!/usr/bin/python
+
+import os
+import sys
+
+core = 0
+
+if '--core' in sys.argv:
+ try:
+ idx = sys.argv.index('--core')
+ core = int(sys.argv[idx + 1])
+ if core > 31 or core < 0:
+ print "Error: please provide core argument between 0 to 31"
+ exit(-1)
+ del sys.argv[idx:idx+2]
+ except IndexError:
+ print "Error: please make sure core option provided with argument"
+ exit(-1)
+ except ValueError:
+ print "Error: please make sure core option provided with integer argument"
+ exit(-1)
+
+str_argv = ' '.join(sys.argv[1:])
+cmd = "taskset -c {core} python automation/trex_control_plane/server/trex_daemon_server.py {argv}".format(core = core, argv = str_argv)
+os.system(cmd)
+
+
diff --git a/scripts/doc_process.py b/scripts/doc_process.py
new file mode 100755
index 00000000..d371cc02
--- /dev/null
+++ b/scripts/doc_process.py
@@ -0,0 +1,109 @@
+import sys,os
+import re
+import argparse
+
+
+class doc_driver(object):
+ args=None;
+
+def str_hex_to_ip(ip_hex):
+ bytes = ["".join(x) for x in zip(*[iter(ip_hex)]*2)]
+ bytes = [int(x, 16) for x in bytes]
+ s=".".join(str(x) for x in bytes)
+ return s
+
+class CTemplateFile:
+ def __init__ (self):
+ self.l=[];
+
+ def dump (self):
+ print "pkt,time sec,template,fid,flow-pkt-id,client_ip,client_port,server_ip ,desc"
+ for obj in self.l:
+ s='';
+ if obj[7]=='1':
+ s ='->'
+ else:
+ s ='<-'
+
+ print obj[0],",",obj[1],",",obj[6],",", obj[2],",",obj[4],",",str_hex_to_ip(obj[11]),",",obj[13],",",str_hex_to_ip(obj[12]),",",s
+
+ def dump_sj (self):
+ flows=0;
+ print "["
+ for obj in self.l:
+ print "[",
+ print obj[1],",",int("0x"+obj[2],16),",",obj[6],",",obj[4],
+ print "],"
+
+ print "]"
+
+
+
+ def load_file (self,file):
+ f=open(file,'r');
+ x=f.readlines()
+ f.close();
+ found=False;
+ for line in x:
+ if found :
+ line=line.rstrip('\n');
+ info=line.split(',');
+ if len(info)>5:
+ self.l+=[info];
+ else:
+ break;
+ else:
+ if 'pkt_id' in line:
+ found=True;
+
+
+
+
+def do_main ():
+ temp = CTemplateFile();
+ temp.load_file(doc_driver.args.file)
+ temp.dump();
+ temp.dump_sj();
+
+
+def process_options ():
+ parser = argparse.ArgumentParser(usage="""
+ doc_process -f <debug_output>
+ """,
+ description="process bp-sim output for docomentation",
+ epilog=" written by hhaim");
+
+ parser.add_argument("-f", "--file", dest="file",
+ metavar="file",
+ required=True)
+
+ parser.add_argument('--version', action='version',
+ version="0.1" )
+
+ doc_driver.args = parser.parse_args();
+
+
+
+def main ():
+ try:
+ process_options ();
+ do_main ()
+ exit(0);
+ except Exception, e:
+ print str(e);
+ exit(-1);
+
+
+
+if __name__ == "__main__":
+ main()
+
+
+
+
+
+
+
+
+
+
diff --git a/scripts/dpdk_nic_bind.py b/scripts/dpdk_nic_bind.py
new file mode 100755
index 00000000..08402227
--- /dev/null
+++ b/scripts/dpdk_nic_bind.py
@@ -0,0 +1,539 @@
+#! /usr/bin/python
+#
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+
+import sys, os, getopt, subprocess
+from os.path import exists, abspath, dirname, basename
+
+
+# The PCI device class for ETHERNET devices
+ETHERNET_CLASS = "0200"
+
+# global dict ethernet devices present. Dictionary indexed by PCI address.
+# Each device within this is itself a dictionary of device properties
+devices = {}
+# list of supported DPDK drivers
+dpdk_drivers = [ "igb_uio", "vfio-pci" ]
+
+# command-line arg flags
+b_flag = None
+status_flag = False
+force_flag = False
+args = []
+
+def usage():
+ '''Print usage information for the program'''
+ argv0 = basename(sys.argv[0])
+ print """
+Usage:
+------
+
+ %(argv0)s [options] DEVICE1 DEVICE2 ....
+
+where DEVICE1, DEVICE2 etc, are specified via PCI "domain:bus:slot.func" syntax
+or "bus:slot.func" syntax. For devices bound to Linux kernel drivers, they may
+also be referred to by Linux interface name e.g. eth0, eth1, em0, em1, etc.
+
+Options:
+ --help, --usage:
+ Display usage information and quit
+
+ --status:
+ Print the current status of all known network interfaces.
+ For each device, it displays the PCI domain, bus, slot and function,
+ along with a text description of the device. Depending upon whether the
+ device is being used by a kernel driver, the igb_uio driver, or no
+ driver, other relevant information will be displayed:
+ * the Linux interface name e.g. if=eth0
+ * the driver being used e.g. drv=igb_uio
+ * any suitable drivers not currently using that device
+ e.g. unused=igb_uio
+ NOTE: if this flag is passed along with a bind/unbind option, the status
+ display will always occur after the other operations have taken place.
+
+ -b driver, --bind=driver:
+ Select the driver to use or \"none\" to unbind the device
+
+ -u, --unbind:
+ Unbind a device (Equivalent to \"-b none\")
+
+ --force:
+ By default, devices which are used by Linux - as indicated by having
+ routes in the routing table - cannot be modified. Using the --force
+ flag overrides this behavior, allowing active links to be forcibly
+ unbound.
+ WARNING: This can lead to loss of network connection and should be used
+ with caution.
+
+Examples:
+---------
+
+To display current device status:
+ %(argv0)s --status
+
+To bind eth1 from the current driver and move to use igb_uio
+ %(argv0)s --bind=igb_uio eth1
+
+To unbind 0000:01:00.0 from using any driver
+ %(argv0)s -u 0000:01:00.0
+
+To bind 0000:02:00.0 and 0000:02:00.1 to the ixgbe kernel driver
+ %(argv0)s -b ixgbe 02:00.0 02:00.1
+
+ """ % locals() # replace items from local variables
+
+# This is roughly compatible with check_output function in subprocess module
+# which is only available in python 2.7.
+def check_output(args, stderr=None):
+ '''Run a command and capture its output'''
+ return subprocess.Popen(args, stdout=subprocess.PIPE,
+ stderr=stderr).communicate()[0]
+
+def find_module(mod):
+ '''find the .ko file for kernel module named mod.
+ Searches the $RTE_SDK/$RTE_TARGET directory, the kernel
+ modules directory and finally under the parent directory of
+ the script '''
+ # check $RTE_SDK/$RTE_TARGET directory
+ if 'RTE_SDK' in os.environ and 'RTE_TARGET' in os.environ:
+ path = "%s/%s/kmod/%s.ko" % (os.environ['RTE_SDK'],\
+ os.environ['RTE_TARGET'], mod)
+ if exists(path):
+ return path
+
+ # check using depmod
+ try:
+ depmod_out = check_output(["modinfo", "-n", mod], \
+ stderr=subprocess.STDOUT).lower()
+ if "error" not in depmod_out:
+ path = depmod_out.strip()
+ if exists(path):
+ return path
+ except: # if modinfo can't find module, it fails, so continue
+ pass
+
+ # check for a copy based off current path
+ tools_dir = dirname(abspath(sys.argv[0]))
+ if (tools_dir.endswith("tools")):
+ base_dir = dirname(tools_dir)
+ find_out = check_output(["find", base_dir, "-name", mod + ".ko"])
+ if len(find_out) > 0: #something matched
+ path = find_out.splitlines()[0]
+ if exists(path):
+ return path
+
+def check_modules():
+ '''Checks that igb_uio is loaded'''
+ global dpdk_drivers
+
+ fd = file("/proc/modules")
+ loaded_mods = fd.readlines()
+ fd.close()
+
+ # list of supported modules
+ mods = [{"Name" : driver, "Found" : False} for driver in dpdk_drivers]
+
+ # first check if module is loaded
+ for line in loaded_mods:
+ for mod in mods:
+ if line.startswith(mod["Name"]):
+ mod["Found"] = True
+ # special case for vfio_pci (module is named vfio-pci,
+ # but its .ko is named vfio_pci)
+ elif line.replace("_", "-").startswith(mod["Name"]):
+ mod["Found"] = True
+
+ # check if we have at least one loaded module
+ if True not in [mod["Found"] for mod in mods] and b_flag is not None:
+ print "Error - no supported modules are loaded"
+ sys.exit(1)
+
+ # change DPDK driver list to only contain drivers that are loaded
+ dpdk_drivers = [mod["Name"] for mod in mods if mod["Found"]]
+
+def has_driver(dev_id):
+ '''return true if a device is assigned to a driver. False otherwise'''
+ return "Driver_str" in devices[dev_id]
+
+def get_pci_device_details(dev_id):
+ '''This function gets additional details for a PCI device'''
+ device = {}
+
+ extra_info = check_output(["lspci", "-vmmks", dev_id]).splitlines()
+
+ # parse lspci details
+ for line in extra_info:
+ if len(line) == 0:
+ continue
+ name, value = line.split("\t", 1)
+ name = name.strip(":") + "_str"
+ device[name] = value
+ # check for a unix interface name
+ sys_path = "/sys/bus/pci/devices/%s/net/" % dev_id
+ if exists(sys_path):
+ device["Interface"] = ",".join(os.listdir(sys_path))
+ else:
+ device["Interface"] = ""
+ # check if a port is used for ssh connection
+ device["Ssh_if"] = False
+ device["Active"] = ""
+
+ return device
+
+def get_nic_details():
+ '''This function populates the "devices" dictionary. The keys used are
+ the pci addresses (domain:bus:slot.func). The values are themselves
+ dictionaries - one for each NIC.'''
+ global devices
+ global dpdk_drivers
+
+ # clear any old data
+ devices = {}
+ # first loop through and read details for all devices
+ # request machine readable format, with numeric IDs
+ dev = {};
+ dev_lines = check_output(["lspci", "-Dvmmn"]).splitlines()
+ for dev_line in dev_lines:
+ if (len(dev_line) == 0):
+ if dev["Class"] == ETHERNET_CLASS:
+ #convert device and vendor ids to numbers, then add to global
+ dev["Vendor"] = int(dev["Vendor"],16)
+ dev["Device"] = int(dev["Device"],16)
+ devices[dev["Slot"]] = dict(dev) # use dict to make copy of dev
+ else:
+ name, value = dev_line.split("\t", 1)
+ dev[name.rstrip(":")] = value
+
+ # check what is the interface if any for an ssh connection if
+ # any to this host, so we can mark it later.
+ ssh_if = []
+ route = check_output(["ip", "-o", "route"])
+ # filter out all lines for 169.254 routes
+ route = "\n".join(filter(lambda ln: not ln.startswith("169.254"),
+ route.splitlines()))
+ rt_info = route.split()
+ for i in xrange(len(rt_info) - 1):
+ if rt_info[i] == "dev":
+ ssh_if.append(rt_info[i+1])
+
+ # based on the basic info, get extended text details
+ for d in devices.keys():
+ # get additional info and add it to existing data
+ devices[d] = dict(devices[d].items() +
+ get_pci_device_details(d).items())
+
+ for _if in ssh_if:
+ if _if in devices[d]["Interface"].split(","):
+ devices[d]["Ssh_if"] = True
+ devices[d]["Active"] = "*Active*"
+ break;
+
+ # add igb_uio to list of supporting modules if needed
+ if "Module_str" in devices[d]:
+ for driver in dpdk_drivers:
+ if driver not in devices[d]["Module_str"]:
+ devices[d]["Module_str"] = devices[d]["Module_str"] + ",%s" % driver
+ else:
+ devices[d]["Module_str"] = ",".join(dpdk_drivers)
+
+ # make sure the driver and module strings do not have any duplicates
+ if has_driver(d):
+ modules = devices[d]["Module_str"].split(",")
+ if devices[d]["Driver_str"] in modules:
+ modules.remove(devices[d]["Driver_str"])
+ devices[d]["Module_str"] = ",".join(modules)
+
+def dev_id_from_dev_name(dev_name):
+ '''Take a device "name" - a string passed in by user to identify a NIC
+ device, and determine the device id - i.e. the domain:bus:slot.func - for
+ it, which can then be used to index into the devices array'''
+ dev = None
+ # check if it's already a suitable index
+ if dev_name in devices:
+ return dev_name
+ # check if it's an index just missing the domain part
+ elif "0000:" + dev_name in devices:
+ return "0000:" + dev_name
+ else:
+ # check if it's an interface name, e.g. eth1
+ for d in devices.keys():
+ if dev_name in devices[d]["Interface"].split(","):
+ return devices[d]["Slot"]
+ # if nothing else matches - error
+ print "Unknown device: %s. " \
+ "Please specify device in \"bus:slot.func\" format" % dev_name
+ sys.exit(1)
+
+def unbind_one(dev_id, force):
+ '''Unbind the device identified by "dev_id" from its current driver'''
+ dev = devices[dev_id]
+ if not has_driver(dev_id):
+ print "%s %s %s is not currently managed by any driver\n" % \
+ (dev["Slot"], dev["Device_str"], dev["Interface"])
+ return
+
+ # prevent us disconnecting ourselves
+ if dev["Ssh_if"] and not force:
+ print "Routing table indicates that interface %s is active" \
+ ". Skipping unbind" % (dev_id)
+ return
+
+ # write to /sys to unbind
+ filename = "/sys/bus/pci/drivers/%s/unbind" % dev["Driver_str"]
+ try:
+ f = open(filename, "a")
+ except:
+ print "Error: unbind failed for %s - Cannot open %s" % (dev_id, filename)
+ sys/exit(1)
+ f.write(dev_id)
+ f.close()
+
+def bind_one(dev_id, driver, force):
+ '''Bind the device given by "dev_id" to the driver "driver". If the device
+ is already bound to a different driver, it will be unbound first'''
+ dev = devices[dev_id]
+ saved_driver = None # used to rollback any unbind in case of failure
+
+ # prevent disconnection of our ssh session
+ if dev["Ssh_if"] and not force:
+ print "Routing table indicates that interface %s is active" \
+ ". Not modifying" % (dev_id)
+ return
+
+ # unbind any existing drivers we don't want
+ if has_driver(dev_id):
+ if dev["Driver_str"] == driver:
+ print "%s already bound to driver %s, skipping\n" % (dev_id, driver)
+ return
+ else:
+ saved_driver = dev["Driver_str"]
+ unbind_one(dev_id, force)
+ dev["Driver_str"] = "" # clear driver string
+
+ # if we are binding to one of DPDK drivers, add PCI id's to that driver
+ if driver in dpdk_drivers:
+ filename = "/sys/bus/pci/drivers/%s/new_id" % driver
+ try:
+ f = open(filename, "w")
+ except:
+ print "Error: bind failed for %s - Cannot open %s" % (dev_id, filename)
+ exit(-1)
+ return
+ try:
+ f.write("%04x %04x" % (dev["Vendor"], dev["Device"]))
+ f.close()
+ except:
+ print "Error: bind failed for %s - Cannot write new PCI ID to " \
+ "driver %s" % (dev_id, driver)
+ exit(-1)
+ return
+
+ # do the bind by writing to /sys
+ filename = "/sys/bus/pci/drivers/%s/bind" % driver
+ try:
+ f = open(filename, "a")
+ except:
+ print "Error: bind failed for %s - Cannot open %s" % (dev_id, filename)
+ if saved_driver is not None: # restore any previous driver
+ bind_one(dev_id, saved_driver, force)
+ return
+ try:
+ f.write(dev_id)
+ f.close()
+ except:
+ # for some reason, closing dev_id after adding a new PCI ID to new_id
+ # results in IOError. however, if the device was successfully bound,
+ # we don't care for any errors and can safely ignore IOError
+ tmp = get_pci_device_details(dev_id)
+ if "Driver_str" in tmp and tmp["Driver_str"] == driver:
+ return
+ print "Error: bind failed for %s - Cannot bind to driver %s" % (dev_id, driver)
+ if saved_driver is not None: # restore any previous driver
+ bind_one(dev_id, saved_driver, force)
+ return
+
+
+def unbind_all(dev_list, force=False):
+ """Unbind method, takes a list of device locations"""
+ dev_list = map(dev_id_from_dev_name, dev_list)
+ for d in dev_list:
+ unbind_one(d, force)
+
+def bind_all(dev_list, driver, force=False):
+ """Unbind method, takes a list of device locations"""
+ global devices
+
+ dev_list = map(dev_id_from_dev_name, dev_list)
+
+ for d in dev_list:
+ bind_one(d, driver, force)
+
+ # when binding devices to a generic driver (i.e. one that doesn't have a
+ # PCI ID table), some devices that are not bound to any other driver could
+ # be bound even if no one has asked them to. hence, we check the list of
+ # drivers again, and see if some of the previously-unbound devices were
+ # erroneously bound.
+ for d in devices.keys():
+ # skip devices that were already bound or that we know should be bound
+ if "Driver_str" in devices[d] or d in dev_list:
+ continue
+
+ # update information about this device
+ devices[d] = dict(devices[d].items() +
+ get_pci_device_details(d).items())
+
+ # check if updated information indicates that the device was bound
+ if "Driver_str" in devices[d]:
+ unbind_one(d, force)
+
+def display_devices(title, dev_list, extra_params = None):
+ '''Displays to the user the details of a list of devices given in "dev_list"
+ The "extra_params" parameter, if given, should contain a string with
+ %()s fields in it for replacement by the named fields in each device's
+ dictionary.'''
+ strings = [] # this holds the strings to print. We sort before printing
+ print "\n%s" % title
+ print "="*len(title)
+ if len(dev_list) == 0:
+ strings.append("<none>")
+ else:
+ for dev in dev_list:
+ if extra_params is not None:
+ strings.append("%s '%s' %s" % (dev["Slot"], \
+ dev["Device_str"], extra_params % dev))
+ else:
+ strings.append("%s '%s'" % (dev["Slot"], dev["Device_str"]))
+ # sort before printing, so that the entries appear in PCI order
+ strings.sort()
+ print "\n".join(strings) # print one per line
+
+def show_status():
+ '''Function called when the script is passed the "--status" option. Displays
+ to the user what devices are bound to the igb_uio driver, the kernel driver
+ or to no driver'''
+ global dpdk_drivers
+ kernel_drv = []
+ dpdk_drv = []
+ no_drv = []
+
+ # split our list of devices into the three categories above
+ for d in devices.keys():
+ if not has_driver(d):
+ no_drv.append(devices[d])
+ continue
+ if devices[d]["Driver_str"] in dpdk_drivers:
+ dpdk_drv.append(devices[d])
+ else:
+ kernel_drv.append(devices[d])
+
+ # print each category separately, so we can clearly see what's used by DPDK
+ display_devices("Network devices using DPDK-compatible driver", dpdk_drv, \
+ "drv=%(Driver_str)s unused=%(Module_str)s")
+ display_devices("Network devices using kernel driver", kernel_drv,
+ "if=%(Interface)s drv=%(Driver_str)s unused=%(Module_str)s %(Active)s")
+ display_devices("Other network devices", no_drv,\
+ "unused=%(Module_str)s")
+
+def parse_args():
+ '''Parses the command-line arguments given by the user and takes the
+ appropriate action for each'''
+ global b_flag
+ global status_flag
+ global force_flag
+ global args
+ if len(sys.argv) <= 1:
+ usage()
+ sys.exit(0)
+
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], "b:u",
+ ["help", "usage", "status", "force",
+ "bind=", "unbind"])
+ except getopt.GetoptError, error:
+ print str(error)
+ print "Run '%s --usage' for further information" % sys.argv[0]
+ sys.exit(1)
+
+ for opt, arg in opts:
+ if opt == "--help" or opt == "--usage":
+ usage()
+ sys.exit(0)
+ if opt == "--status":
+ status_flag = True
+ if opt == "--force":
+ force_flag = True
+ if opt == "-b" or opt == "-u" or opt == "--bind" or opt == "--unbind":
+ if b_flag is not None:
+ print "Error - Only one bind or unbind may be specified\n"
+ sys.exit(1)
+ if opt == "-u" or opt == "--unbind":
+ b_flag = "none"
+ else:
+ b_flag = arg
+
+def do_arg_actions():
+ '''do the actual action requested by the user'''
+ global b_flag
+ global status_flag
+ global force_flag
+ global args
+
+ if b_flag is None and not status_flag:
+ print "Error: No action specified for devices. Please give a -b or -u option"
+ print "Run '%s --usage' for further information" % sys.argv[0]
+ sys.exit(1)
+
+ if b_flag is not None and len(args) == 0:
+ print "Error: No devices specified."
+ print "Run '%s --usage' for further information" % sys.argv[0]
+ sys.exit(1)
+
+ if b_flag == "none" or b_flag == "None":
+ unbind_all(args, force_flag)
+ elif b_flag is not None:
+ bind_all(args, b_flag, force_flag)
+ if status_flag:
+ if b_flag is not None:
+ get_nic_details() # refresh if we have changed anything
+ show_status()
+
+def main():
+ '''program main function'''
+ parse_args()
+ check_modules()
+ get_nic_details()
+ do_arg_actions()
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/dpdk_setup_ports.py b/scripts/dpdk_setup_ports.py
new file mode 100755
index 00000000..8dfd742e
--- /dev/null
+++ b/scripts/dpdk_setup_ports.py
@@ -0,0 +1,232 @@
+#! /usr/bin/python
+# hhaim
+import sys,site
+site.addsitedir('python-lib')
+import yaml
+import os.path
+import os
+import dpdk_nic_bind
+import re
+import argparse;
+
+
+
+class map_driver(object):
+ args=None;
+ cfg_file='/etc/trex_cfg.yaml'
+
+class DpdkSetup(Exception):
+ pass
+
+class CIfMap:
+
+ def __init__(self, cfg_file):
+ self.m_cfg_file =cfg_file;
+ self.m_cfg_dict={};
+ self.m_devices={};
+
+ def dump_error (self,err):
+ s="""%s
+From this TRex version a configuration file must exist in /etc/ folder "
+The name of the configuration file should be /etc/trex_cfg.yaml "
+The minimum configuration file should include something like this
+- version : 2 # version 2 of the configuration file
+ interfaces : ["03:00.0","03:00.1","13:00.1","13:00.0"] # list of the interfaces to bind run ./dpdk_nic_bind.py --status to see the list
+ port_limit : 2 # number of ports to use valid is 2,4,6,8
+
+example of already bind devices
+
+$ ./dpdk_nic_bind.py --status
+
+Network devices using DPDK-compatible driver
+============================================
+0000:03:00.0 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv=igb_uio unused=
+0000:03:00.1 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv=igb_uio unused=
+0000:13:00.0 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv=igb_uio unused=
+0000:13:00.1 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv=igb_uio unused=
+
+Network devices using kernel driver
+===================================
+0000:02:00.0 '82545EM Gigabit Ethernet Controller (Copper)' if=eth2 drv=e1000 unused=igb_uio *Active*
+
+Other network devices
+=====================
+
+
+ """ % (err);
+ return s;
+
+
+ def raise_error (self,err):
+ s= self.dump_error (err)
+ raise DpdkSetup(s)
+
+ def load_config_file (self):
+
+ fcfg=self.m_cfg_file
+
+ if not os.path.isfile(fcfg) :
+ self.raise_error ("There is no valid configuration file %s " % fcfg)
+
+ try:
+ stream = open(fcfg, 'r')
+ self.m_cfg_dict= yaml.load(stream)
+ except Exception,e:
+ print e;
+ raise e
+
+ stream.close();
+
+ if not self.m_cfg_dict[0].has_key('version') :
+ self.raise_error ("Configuration file %s is old, should include version field" % fcfg )
+
+ if int(self.m_cfg_dict[0]['version'])<2 :
+ self.raise_error ("Configuration file %s is old, should include version field with value greater than 2" % fcfg)
+
+
+ if not self.m_cfg_dict[0].has_key('interfaces') :
+ self.raise_error ("Configuration file %s is old, should include interfaces field with 2,4,6,8 number of elemets" % fcfg)
+
+ if_list=self.m_cfg_dict[0]['interfaces']
+ if not (len(if_list) in [2,4,6,8]):
+ self.raise_error ("Configuration file %s should include interfaces field with 2,4,6,8 number of elemets" % fcfg)
+
+ def do_bind_one (self,key):
+ cmd='./dpdk_nic_bind.py --force --bind=igb_uio %s ' % ( key)
+ print cmd
+ res=os.system(cmd);
+ if res!=0:
+ raise DpdkSetup('')
+
+
+
+ def pci_name_to_full_name (self,pci_name):
+ c='[0-9A-Fa-f]';
+ sp='[:]'
+ s_short=c+c+sp+c+c+'[.]'+c;
+ s_full=c+c+c+c+sp+s_short
+ re_full = re.compile(s_full)
+ re_short = re.compile(s_short)
+
+ if re_short.match(pci_name):
+ return '0000:'+pci_name
+
+ if re_full.match(pci_name):
+ return pci_name
+
+ err=" %s is not a valid pci address \n" %pci_name;
+ raise DpdkSetup(err)
+
+
+ def run_dpdk_lspci (self):
+ dpdk_nic_bind.get_nic_details()
+ self.m_devices= dpdk_nic_bind.devices
+
+ def do_run (self):
+ self.load_config_file ()
+ self.run_dpdk_lspci ()
+
+ if_list=self.m_cfg_dict[0]['interfaces']
+
+ for obj in if_list:
+ key= self.pci_name_to_full_name (obj)
+ if not self.m_devices.has_key(key) :
+ err=" %s does not exist " %key;
+ raise DpdkSetup(err)
+
+
+ if self.m_devices[key].has_key('Driver_str'):
+ if self.m_devices[key]['Driver_str'] !='igb_uio' :
+ self.do_bind_one (key)
+ else:
+ self.do_bind_one (key)
+
+
+ def do_create (self):
+ print " not supported yet !"
+
+
+def parse_parent_cfg (parent_cfg):
+ l=parent_cfg.split(" ");
+ cfg_file='';
+ next=False;
+ for obj in l:
+ if next:
+ cfg_file=obj
+ next=False;
+ if obj == '--cfg':
+ next=True
+
+ return (cfg_file)
+
+def process_options ():
+ parser = argparse.ArgumentParser(usage="""
+
+Examples:
+---------
+
+To unbind the interfaces using the trex configuration file
+ dpdk_set_ports.py -l
+
+To create a default file
+ dpdk_set_ports.py -c
+
+To show status
+ dpdk_set_ports.py -s
+
+ """,
+ description=" unbind dpdk interfaces ",
+ epilog=" written by hhaim");
+
+ parser.add_argument("-l", "--load", action='store_true',
+ help=""" unbind the interfaces using the configuration file given """,
+ )
+
+ parser.add_argument("--cfg",
+ help=""" configuration file name """,
+ )
+
+ parser.add_argument("--parent",
+ help=""" parent configuration CLI, extract --cfg from it if given """,
+ )
+
+ parser.add_argument("-c", "--create", action='store_true',
+ help=""" try to create a configuration file. It is heuristic try to look into the file before """,
+ )
+
+ parser.add_argument("-s", "--show", action='store_true',
+ help=""" show the status """,
+ )
+
+ parser.add_argument('--version', action='version',
+ version="0.1" )
+
+ map_driver.args = parser.parse_args();
+
+ if map_driver.args.parent :
+ cfg = parse_parent_cfg (map_driver.args.parent)
+ if cfg != '':
+ map_driver.cfg_file = cfg;
+ if map_driver.args.cfg :
+ map_driver.cfg_file = map_driver.args.cfg;
+
+def main ():
+ try:
+ process_options ()
+
+ if map_driver.args.show:
+ res=os.system('./dpdk_nic_bind.py --status');
+ return(res);
+
+ obj =CIfMap(map_driver.cfg_file);
+
+ if map_driver.args.create:
+ obj.do_create();
+ else:
+ obj.do_run();
+ except Exception,e:
+ print e
+ exit(-1)
+
+main();
+
diff --git a/scripts/exp/dns-0-ex.erf b/scripts/exp/dns-0-ex.erf
new file mode 100755
index 00000000..5ffffcb3
--- /dev/null
+++ b/scripts/exp/dns-0-ex.erf
Binary files differ
diff --git a/scripts/exp/dns-0.erf b/scripts/exp/dns-0.erf
new file mode 100644
index 00000000..d8d601b7
--- /dev/null
+++ b/scripts/exp/dns-0.erf
Binary files differ
diff --git a/scripts/exp/dns_e-0-ex.erf b/scripts/exp/dns_e-0-ex.erf
new file mode 100755
index 00000000..7ebfd69a
--- /dev/null
+++ b/scripts/exp/dns_e-0-ex.erf
Binary files differ
diff --git a/scripts/exp/dns_e-0.erf b/scripts/exp/dns_e-0.erf
new file mode 100644
index 00000000..7ebfd69a
--- /dev/null
+++ b/scripts/exp/dns_e-0.erf
Binary files differ
diff --git a/scripts/exp/dns_flip-0-ex.erf b/scripts/exp/dns_flip-0-ex.erf
new file mode 100755
index 00000000..f6074ad7
--- /dev/null
+++ b/scripts/exp/dns_flip-0-ex.erf
Binary files differ
diff --git a/scripts/exp/dns_flip-0.erf b/scripts/exp/dns_flip-0.erf
new file mode 100644
index 00000000..f6074ad7
--- /dev/null
+++ b/scripts/exp/dns_flip-0.erf
Binary files differ
diff --git a/scripts/exp/dns_ipv6-0-ex.erf b/scripts/exp/dns_ipv6-0-ex.erf
new file mode 100755
index 00000000..c47a6496
--- /dev/null
+++ b/scripts/exp/dns_ipv6-0-ex.erf
Binary files differ
diff --git a/scripts/exp/dns_ipv6-0.erf b/scripts/exp/dns_ipv6-0.erf
new file mode 100644
index 00000000..53dee235
--- /dev/null
+++ b/scripts/exp/dns_ipv6-0.erf
Binary files differ
diff --git a/scripts/exp/dns_ipv6_rxcheck-ex.erf b/scripts/exp/dns_ipv6_rxcheck-ex.erf
new file mode 100755
index 00000000..ebf95197
--- /dev/null
+++ b/scripts/exp/dns_ipv6_rxcheck-ex.erf
Binary files differ
diff --git a/scripts/exp/dns_ipv6_rxcheck.erf b/scripts/exp/dns_ipv6_rxcheck.erf
new file mode 100644
index 00000000..ebf95197
--- /dev/null
+++ b/scripts/exp/dns_ipv6_rxcheck.erf
Binary files differ
diff --git a/scripts/exp/dns_one_server-0-ex.erf b/scripts/exp/dns_one_server-0-ex.erf
new file mode 100755
index 00000000..0d3d447b
--- /dev/null
+++ b/scripts/exp/dns_one_server-0-ex.erf
Binary files differ
diff --git a/scripts/exp/dns_one_server-0.erf b/scripts/exp/dns_one_server-0.erf
new file mode 100644
index 00000000..ef76a69b
--- /dev/null
+++ b/scripts/exp/dns_one_server-0.erf
Binary files differ
diff --git a/scripts/exp/dns_p-0-ex.erf b/scripts/exp/dns_p-0-ex.erf
new file mode 100755
index 00000000..ec313584
--- /dev/null
+++ b/scripts/exp/dns_p-0-ex.erf
Binary files differ
diff --git a/scripts/exp/dns_p-0.erf b/scripts/exp/dns_p-0.erf
new file mode 100644
index 00000000..ec313584
--- /dev/null
+++ b/scripts/exp/dns_p-0.erf
Binary files differ
diff --git a/scripts/exp/dns_rxcheck-ex.erf b/scripts/exp/dns_rxcheck-ex.erf
new file mode 100755
index 00000000..21a610a6
--- /dev/null
+++ b/scripts/exp/dns_rxcheck-ex.erf
Binary files differ
diff --git a/scripts/exp/dns_rxcheck.erf b/scripts/exp/dns_rxcheck.erf
new file mode 100644
index 00000000..21a610a6
--- /dev/null
+++ b/scripts/exp/dns_rxcheck.erf
Binary files differ
diff --git a/scripts/exp/dns_single_server-0-ex.erf b/scripts/exp/dns_single_server-0-ex.erf
new file mode 100755
index 00000000..244211eb
--- /dev/null
+++ b/scripts/exp/dns_single_server-0-ex.erf
Binary files differ
diff --git a/scripts/exp/dns_wlen-0-ex.erf b/scripts/exp/dns_wlen-0-ex.erf
new file mode 100755
index 00000000..069cd431
--- /dev/null
+++ b/scripts/exp/dns_wlen-0-ex.erf
Binary files differ
diff --git a/scripts/exp/dns_wlen1-0-ex.erf b/scripts/exp/dns_wlen1-0-ex.erf
new file mode 100755
index 00000000..226a614a
--- /dev/null
+++ b/scripts/exp/dns_wlen1-0-ex.erf
Binary files differ
diff --git a/scripts/exp/dns_wlen2-0-ex.erf b/scripts/exp/dns_wlen2-0-ex.erf
new file mode 100755
index 00000000..ae5d518e
--- /dev/null
+++ b/scripts/exp/dns_wlen2-0-ex.erf
Binary files differ
diff --git a/scripts/exp/dyn_pyld1-0-ex.erf b/scripts/exp/dyn_pyld1-0-ex.erf
new file mode 100755
index 00000000..7d2089db
--- /dev/null
+++ b/scripts/exp/dyn_pyld1-0-ex.erf
Binary files differ
diff --git a/scripts/exp/dyn_pyld1-0.erf b/scripts/exp/dyn_pyld1-0.erf
new file mode 100644
index 00000000..175a810c
--- /dev/null
+++ b/scripts/exp/dyn_pyld1-0.erf
Binary files differ
diff --git a/scripts/exp/http1_with_option-ex.pcap b/scripts/exp/http1_with_option-ex.pcap
new file mode 100755
index 00000000..ef5bf3c4
--- /dev/null
+++ b/scripts/exp/http1_with_option-ex.pcap
Binary files differ
diff --git a/scripts/exp/http1_with_option.pcap b/scripts/exp/http1_with_option.pcap
new file mode 100644
index 00000000..ef5bf3c4
--- /dev/null
+++ b/scripts/exp/http1_with_option.pcap
Binary files differ
diff --git a/scripts/exp/http1_with_option_ipv6-ex.pcap b/scripts/exp/http1_with_option_ipv6-ex.pcap
new file mode 100755
index 00000000..f70c1114
--- /dev/null
+++ b/scripts/exp/http1_with_option_ipv6-ex.pcap
Binary files differ
diff --git a/scripts/exp/http1_with_option_ipv6.pcap b/scripts/exp/http1_with_option_ipv6.pcap
new file mode 100644
index 00000000..f70c1114
--- /dev/null
+++ b/scripts/exp/http1_with_option_ipv6.pcap
Binary files differ
diff --git a/scripts/exp/http_plugin-0-ex.erf b/scripts/exp/http_plugin-0-ex.erf
new file mode 100755
index 00000000..f195e6ae
--- /dev/null
+++ b/scripts/exp/http_plugin-0-ex.erf
Binary files differ
diff --git a/scripts/exp/http_plugin-0.erf b/scripts/exp/http_plugin-0.erf
new file mode 100644
index 00000000..e2320eeb
--- /dev/null
+++ b/scripts/exp/http_plugin-0.erf
Binary files differ
diff --git a/scripts/exp/http_plugin_v6-0-ex.erf b/scripts/exp/http_plugin_v6-0-ex.erf
new file mode 100755
index 00000000..a0b43754
--- /dev/null
+++ b/scripts/exp/http_plugin_v6-0-ex.erf
Binary files differ
diff --git a/scripts/exp/http_plugin_v6-0.erf b/scripts/exp/http_plugin_v6-0.erf
new file mode 100644
index 00000000..96e79eae
--- /dev/null
+++ b/scripts/exp/http_plugin_v6-0.erf
Binary files differ
diff --git a/scripts/exp/imix-0-ex.erf b/scripts/exp/imix-0-ex.erf
new file mode 100755
index 00000000..233e6b31
--- /dev/null
+++ b/scripts/exp/imix-0-ex.erf
Binary files differ
diff --git a/scripts/exp/imix-0.erf b/scripts/exp/imix-0.erf
new file mode 100644
index 00000000..c41a3006
--- /dev/null
+++ b/scripts/exp/imix-0.erf
Binary files differ
diff --git a/scripts/exp/imix_v6-0-ex.erf b/scripts/exp/imix_v6-0-ex.erf
new file mode 100755
index 00000000..56412091
--- /dev/null
+++ b/scripts/exp/imix_v6-0-ex.erf
Binary files differ
diff --git a/scripts/exp/imix_v6-0.erf b/scripts/exp/imix_v6-0.erf
new file mode 100644
index 00000000..a85ed2b9
--- /dev/null
+++ b/scripts/exp/imix_v6-0.erf
Binary files differ
diff --git a/scripts/exp/ipv4_vlan-0-ex.erf b/scripts/exp/ipv4_vlan-0-ex.erf
new file mode 100755
index 00000000..abbdb652
--- /dev/null
+++ b/scripts/exp/ipv4_vlan-0-ex.erf
Binary files differ
diff --git a/scripts/exp/ipv4_vlan-0.erf b/scripts/exp/ipv4_vlan-0.erf
new file mode 100644
index 00000000..3a9ec6d1
--- /dev/null
+++ b/scripts/exp/ipv4_vlan-0.erf
Binary files differ
diff --git a/scripts/exp/ipv6-0-ex.erf b/scripts/exp/ipv6-0-ex.erf
new file mode 100755
index 00000000..85d30377
--- /dev/null
+++ b/scripts/exp/ipv6-0-ex.erf
Binary files differ
diff --git a/scripts/exp/ipv6-0.erf b/scripts/exp/ipv6-0.erf
new file mode 100644
index 00000000..50f89ece
--- /dev/null
+++ b/scripts/exp/ipv6-0.erf
Binary files differ
diff --git a/scripts/exp/ipv6_vlan-0-ex.erf b/scripts/exp/ipv6_vlan-0-ex.erf
new file mode 100755
index 00000000..ac7cc39b
--- /dev/null
+++ b/scripts/exp/ipv6_vlan-0-ex.erf
Binary files differ
diff --git a/scripts/exp/ipv6_vlan-0.erf b/scripts/exp/ipv6_vlan-0.erf
new file mode 100644
index 00000000..771f5c03
--- /dev/null
+++ b/scripts/exp/ipv6_vlan-0.erf
Binary files differ
diff --git a/scripts/exp/limit_multi_pkt-0-ex.erf b/scripts/exp/limit_multi_pkt-0-ex.erf
new file mode 100755
index 00000000..23d536fc
--- /dev/null
+++ b/scripts/exp/limit_multi_pkt-0-ex.erf
Binary files differ
diff --git a/scripts/exp/limit_multi_pkt-0.erf b/scripts/exp/limit_multi_pkt-0.erf
new file mode 100644
index 00000000..a2ff8815
--- /dev/null
+++ b/scripts/exp/limit_multi_pkt-0.erf
Binary files differ
diff --git a/scripts/exp/limit_single_pkt-0-ex.erf b/scripts/exp/limit_single_pkt-0-ex.erf
new file mode 100755
index 00000000..3f7f0ff2
--- /dev/null
+++ b/scripts/exp/limit_single_pkt-0-ex.erf
Binary files differ
diff --git a/scripts/exp/limit_single_pkt-0.erf b/scripts/exp/limit_single_pkt-0.erf
new file mode 100644
index 00000000..548d2e3f
--- /dev/null
+++ b/scripts/exp/limit_single_pkt-0.erf
Binary files differ
diff --git a/scripts/exp/pcap_mode1-0-ex.erf b/scripts/exp/pcap_mode1-0-ex.erf
new file mode 100755
index 00000000..245b5ffb
--- /dev/null
+++ b/scripts/exp/pcap_mode1-0-ex.erf
Binary files differ
diff --git a/scripts/exp/pcap_mode1-0.erf b/scripts/exp/pcap_mode1-0.erf
new file mode 100644
index 00000000..ad5469f8
--- /dev/null
+++ b/scripts/exp/pcap_mode1-0.erf
Binary files differ
diff --git a/scripts/exp/pcap_mode2-0-ex.erf b/scripts/exp/pcap_mode2-0-ex.erf
new file mode 100755
index 00000000..e19274f7
--- /dev/null
+++ b/scripts/exp/pcap_mode2-0-ex.erf
Binary files differ
diff --git a/scripts/exp/pcap_mode2-0.erf b/scripts/exp/pcap_mode2-0.erf
new file mode 100644
index 00000000..348d1a4c
--- /dev/null
+++ b/scripts/exp/pcap_mode2-0.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short1-0-ex.erf b/scripts/exp/rtsp_short1-0-ex.erf
new file mode 100755
index 00000000..7a7522b2
--- /dev/null
+++ b/scripts/exp/rtsp_short1-0-ex.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short1-0.erf b/scripts/exp/rtsp_short1-0.erf
new file mode 100644
index 00000000..09e180c6
--- /dev/null
+++ b/scripts/exp/rtsp_short1-0.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short1_ipv6_rxcheck-ex.erf b/scripts/exp/rtsp_short1_ipv6_rxcheck-ex.erf
new file mode 100755
index 00000000..06ac6484
--- /dev/null
+++ b/scripts/exp/rtsp_short1_ipv6_rxcheck-ex.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short1_ipv6_rxcheck.erf b/scripts/exp/rtsp_short1_ipv6_rxcheck.erf
new file mode 100644
index 00000000..06ac6484
--- /dev/null
+++ b/scripts/exp/rtsp_short1_ipv6_rxcheck.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short1_rxcheck-ex.erf b/scripts/exp/rtsp_short1_rxcheck-ex.erf
new file mode 100755
index 00000000..cd120df9
--- /dev/null
+++ b/scripts/exp/rtsp_short1_rxcheck-ex.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short1_rxcheck.erf b/scripts/exp/rtsp_short1_rxcheck.erf
new file mode 100644
index 00000000..cd120df9
--- /dev/null
+++ b/scripts/exp/rtsp_short1_rxcheck.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short1_v6-0-ex.erf b/scripts/exp/rtsp_short1_v6-0-ex.erf
new file mode 100755
index 00000000..1b6d6391
--- /dev/null
+++ b/scripts/exp/rtsp_short1_v6-0-ex.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short1_v6-0.erf b/scripts/exp/rtsp_short1_v6-0.erf
new file mode 100644
index 00000000..57373ba5
--- /dev/null
+++ b/scripts/exp/rtsp_short1_v6-0.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short2-0-ex.erf b/scripts/exp/rtsp_short2-0-ex.erf
new file mode 100755
index 00000000..7a7522b2
--- /dev/null
+++ b/scripts/exp/rtsp_short2-0-ex.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short2-0.erf b/scripts/exp/rtsp_short2-0.erf
new file mode 100644
index 00000000..09e180c6
--- /dev/null
+++ b/scripts/exp/rtsp_short2-0.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short2_v6-0-ex.erf b/scripts/exp/rtsp_short2_v6-0-ex.erf
new file mode 100755
index 00000000..1b6d6391
--- /dev/null
+++ b/scripts/exp/rtsp_short2_v6-0-ex.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short2_v6-0.erf b/scripts/exp/rtsp_short2_v6-0.erf
new file mode 100644
index 00000000..57373ba5
--- /dev/null
+++ b/scripts/exp/rtsp_short2_v6-0.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short3-0-ex.erf b/scripts/exp/rtsp_short3-0-ex.erf
new file mode 100755
index 00000000..bae34983
--- /dev/null
+++ b/scripts/exp/rtsp_short3-0-ex.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short3-0.erf b/scripts/exp/rtsp_short3-0.erf
new file mode 100644
index 00000000..93ad0fa4
--- /dev/null
+++ b/scripts/exp/rtsp_short3-0.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short3_v6-0-ex.erf b/scripts/exp/rtsp_short3_v6-0-ex.erf
new file mode 100755
index 00000000..e68dc787
--- /dev/null
+++ b/scripts/exp/rtsp_short3_v6-0-ex.erf
Binary files differ
diff --git a/scripts/exp/rtsp_short3_v6-0.erf b/scripts/exp/rtsp_short3_v6-0.erf
new file mode 100644
index 00000000..281bd36f
--- /dev/null
+++ b/scripts/exp/rtsp_short3_v6-0.erf
Binary files differ
diff --git a/scripts/exp/sctp-ex.erf b/scripts/exp/sctp-ex.erf
new file mode 100755
index 00000000..bcbaf0c9
--- /dev/null
+++ b/scripts/exp/sctp-ex.erf
Binary files differ
diff --git a/scripts/exp/sctp.erf b/scripts/exp/sctp.erf
new file mode 100644
index 00000000..6eaac10c
--- /dev/null
+++ b/scripts/exp/sctp.erf
Binary files differ
diff --git a/scripts/exp/sfr2-0-ex.erf b/scripts/exp/sfr2-0-ex.erf
new file mode 100755
index 00000000..5e2b791f
--- /dev/null
+++ b/scripts/exp/sfr2-0-ex.erf
Binary files differ
diff --git a/scripts/exp/sfr2-0.erf b/scripts/exp/sfr2-0.erf
new file mode 100644
index 00000000..bf5ff3ef
--- /dev/null
+++ b/scripts/exp/sfr2-0.erf
Binary files differ
diff --git a/scripts/exp/sfr3-0-ex.erf b/scripts/exp/sfr3-0-ex.erf
new file mode 100755
index 00000000..fa9b1008
--- /dev/null
+++ b/scripts/exp/sfr3-0-ex.erf
Binary files differ
diff --git a/scripts/exp/sfr3-0.erf b/scripts/exp/sfr3-0.erf
new file mode 100644
index 00000000..bd14de4f
--- /dev/null
+++ b/scripts/exp/sfr3-0.erf
Binary files differ
diff --git a/scripts/exp/sfr_4-0-ex.erf b/scripts/exp/sfr_4-0-ex.erf
new file mode 100755
index 00000000..a0cddc92
--- /dev/null
+++ b/scripts/exp/sfr_4-0-ex.erf
Binary files differ
diff --git a/scripts/exp/sfr_4-0.erf b/scripts/exp/sfr_4-0.erf
new file mode 100644
index 00000000..8cbf4275
--- /dev/null
+++ b/scripts/exp/sfr_4-0.erf
Binary files differ
diff --git a/scripts/exp/sip_short1-0-ex.erf b/scripts/exp/sip_short1-0-ex.erf
new file mode 100755
index 00000000..762fc157
--- /dev/null
+++ b/scripts/exp/sip_short1-0-ex.erf
Binary files differ
diff --git a/scripts/exp/sip_short1-0.erf b/scripts/exp/sip_short1-0.erf
new file mode 100644
index 00000000..c78c215e
--- /dev/null
+++ b/scripts/exp/sip_short1-0.erf
Binary files differ
diff --git a/scripts/exp/sip_short1_v6-0-ex.erf b/scripts/exp/sip_short1_v6-0-ex.erf
new file mode 100755
index 00000000..cb027045
--- /dev/null
+++ b/scripts/exp/sip_short1_v6-0-ex.erf
Binary files differ
diff --git a/scripts/exp/sip_short1_v6-0.erf b/scripts/exp/sip_short1_v6-0.erf
new file mode 100644
index 00000000..a77b5c60
--- /dev/null
+++ b/scripts/exp/sip_short1_v6-0.erf
Binary files differ
diff --git a/scripts/exp/sip_short2-0-ex.erf b/scripts/exp/sip_short2-0-ex.erf
new file mode 100755
index 00000000..762fc157
--- /dev/null
+++ b/scripts/exp/sip_short2-0-ex.erf
Binary files differ
diff --git a/scripts/exp/sip_short2-0.erf b/scripts/exp/sip_short2-0.erf
new file mode 100644
index 00000000..c78c215e
--- /dev/null
+++ b/scripts/exp/sip_short2-0.erf
Binary files differ
diff --git a/scripts/exp/sip_short2_v6-0-ex.erf b/scripts/exp/sip_short2_v6-0-ex.erf
new file mode 100755
index 00000000..cb027045
--- /dev/null
+++ b/scripts/exp/sip_short2_v6-0-ex.erf
Binary files differ
diff --git a/scripts/exp/sip_short2_v6-0.erf b/scripts/exp/sip_short2_v6-0.erf
new file mode 100644
index 00000000..a77b5c60
--- /dev/null
+++ b/scripts/exp/sip_short2_v6-0.erf
Binary files differ
diff --git a/scripts/exp/sip_short3-0-ex.erf b/scripts/exp/sip_short3-0-ex.erf
new file mode 100755
index 00000000..f6b0d5bb
--- /dev/null
+++ b/scripts/exp/sip_short3-0-ex.erf
Binary files differ
diff --git a/scripts/exp/sip_short3-0.erf b/scripts/exp/sip_short3-0.erf
new file mode 100644
index 00000000..26f38414
--- /dev/null
+++ b/scripts/exp/sip_short3-0.erf
Binary files differ
diff --git a/scripts/exp/sip_short3_v6-0-ex.erf b/scripts/exp/sip_short3_v6-0-ex.erf
new file mode 100755
index 00000000..431fab0b
--- /dev/null
+++ b/scripts/exp/sip_short3_v6-0-ex.erf
Binary files differ
diff --git a/scripts/exp/sip_short3_v6-0.erf b/scripts/exp/sip_short3_v6-0.erf
new file mode 100644
index 00000000..1f347a29
--- /dev/null
+++ b/scripts/exp/sip_short3_v6-0.erf
Binary files differ
diff --git a/scripts/ko/3.11.10-301.fc20.x86_64/igb_uio.ko b/scripts/ko/3.11.10-301.fc20.x86_64/igb_uio.ko
new file mode 100755
index 00000000..2e9148a2
--- /dev/null
+++ b/scripts/ko/3.11.10-301.fc20.x86_64/igb_uio.ko
Binary files differ
diff --git a/scripts/ko/3.13.0-32-generic/igb_uio.ko b/scripts/ko/3.13.0-32-generic/igb_uio.ko
new file mode 100755
index 00000000..9e93fda2
--- /dev/null
+++ b/scripts/ko/3.13.0-32-generic/igb_uio.ko
Binary files differ
diff --git a/scripts/ko/3.16.0-37-generic/igb_uio.ko b/scripts/ko/3.16.0-37-generic/igb_uio.ko
new file mode 100755
index 00000000..94eac790
--- /dev/null
+++ b/scripts/ko/3.16.0-37-generic/igb_uio.ko
Binary files differ
diff --git a/scripts/ko/3.17.4-301.fc21.x86_64/igb_uio.ko b/scripts/ko/3.17.4-301.fc21.x86_64/igb_uio.ko
new file mode 100755
index 00000000..bbd87c6b
--- /dev/null
+++ b/scripts/ko/3.17.4-301.fc21.x86_64/igb_uio.ko
Binary files differ
diff --git a/scripts/ko/3.18.9-100.fc20.x86_64/igb_uio.ko b/scripts/ko/3.18.9-100.fc20.x86_64/igb_uio.ko
new file mode 100755
index 00000000..562a3cf5
--- /dev/null
+++ b/scripts/ko/3.18.9-100.fc20.x86_64/igb_uio.ko
Binary files differ
diff --git a/scripts/ko/3.19.1-201.fc21.x86_64/igb_uio.ko b/scripts/ko/3.19.1-201.fc21.x86_64/igb_uio.ko
new file mode 100755
index 00000000..0be8ce08
--- /dev/null
+++ b/scripts/ko/3.19.1-201.fc21.x86_64/igb_uio.ko
Binary files differ
diff --git a/scripts/ko/3.6.10-4.fc18.x86_64/igb_uio.ko b/scripts/ko/3.6.10-4.fc18.x86_64/igb_uio.ko
new file mode 100755
index 00000000..d711ee3e
--- /dev/null
+++ b/scripts/ko/3.6.10-4.fc18.x86_64/igb_uio.ko
Binary files differ
diff --git a/scripts/ko/src/Makefile b/scripts/ko/src/Makefile
new file mode 100755
index 00000000..7966b1af
--- /dev/null
+++ b/scripts/ko/src/Makefile
@@ -0,0 +1,38 @@
+# obj-m is a list of what kernel modules to build. The .o and other
+# objects will be automatically built from the corresponding .c file -
+# no need to list the source files explicitly.
+
+obj-m := igb_uio.o
+
+# KDIR is the location of the kernel source. The current standard is
+# to link to the associated source tree from the directory containing
+# the compiled modules.
+KDIR := /lib/modules/$(shell uname -r)/build
+
+# PWD is the current working directory and the location of our module
+# source files.
+PWD := $(shell pwd)
+
+# default is the default make target. The rule here says to run make
+# with a working directory of the directory containing the kernel
+# source and compile only the modules in the PWD (local) directory.
+default:
+ $(MAKE) -C $(KDIR) M=$(PWD) modules
+
+clean:
+ -rm *.o >> test.log
+ -rm *.ko >> test.log
+ -rm *.*.cmd >> test.log
+ -rm *.mod.c >> test.log
+ -rm modules.order >> test.log
+ -rm Module.symvers >> test.log
+ -rm -rf .tmp_versions >> test.log
+ rm test.log
+
+
+
+# install the new driver
+install:
+ mkdir -p ../`uname -r`/
+ cp igb_uio.ko ../`uname -r`/
+
diff --git a/scripts/ko/src/compat.h b/scripts/ko/src/compat.h
new file mode 100755
index 00000000..c1d45a66
--- /dev/null
+++ b/scripts/ko/src/compat.h
@@ -0,0 +1,116 @@
+/*
+ * Minimal wrappers to allow compiling igb_uio on older kernels.
+ */
+
+#ifndef RHEL_RELEASE_VERSION
+#define RHEL_RELEASE_VERSION(a, b) (((a) << 8) + (b))
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
+#define pci_cfg_access_lock pci_block_user_cfg_access
+#define pci_cfg_access_unlock pci_unblock_user_cfg_access
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)
+#define HAVE_PTE_MASK_PAGE_IOMAP
+#endif
+
+#ifndef PCI_MSIX_ENTRY_SIZE
+#define PCI_MSIX_ENTRY_SIZE 16
+#define PCI_MSIX_ENTRY_LOWER_ADDR 0
+#define PCI_MSIX_ENTRY_UPPER_ADDR 4
+#define PCI_MSIX_ENTRY_DATA 8
+#define PCI_MSIX_ENTRY_VECTOR_CTRL 12
+#define PCI_MSIX_ENTRY_CTRL_MASKBIT 1
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 34) && \
+ (!(defined(RHEL_RELEASE_CODE) && \
+ RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 9)))
+
+static int pci_num_vf(struct pci_dev *dev)
+{
+ struct iov {
+ int pos;
+ int nres;
+ u32 cap;
+ u16 ctrl;
+ u16 total;
+ u16 initial;
+ u16 nr_virtfn;
+ } *iov = (struct iov *)dev->sriov;
+
+ if (!dev->is_physfn)
+ return 0;
+
+ return iov->nr_virtfn;
+}
+
+#endif /* < 2.6.34 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) && \
+ (!(defined(RHEL_RELEASE_CODE) && \
+ RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4)))
+
+#define kstrtoul strict_strtoul
+
+#endif /* < 2.6.39 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0) && \
+ (!(defined(RHEL_RELEASE_CODE) && \
+ RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 3)))
+
+/* Check if INTX works to control irq's.
+ * Set's INTX_DISABLE flag and reads it back
+ */
+static bool pci_intx_mask_supported(struct pci_dev *pdev)
+{
+ bool mask_supported = false;
+ uint16_t orig, new;
+
+ pci_block_user_cfg_access(pdev);
+ pci_read_config_word(pdev, PCI_COMMAND, &orig);
+ pci_write_config_word(pdev, PCI_COMMAND,
+ orig ^ PCI_COMMAND_INTX_DISABLE);
+ pci_read_config_word(pdev, PCI_COMMAND, &new);
+
+ if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
+ dev_err(&pdev->dev, "Command register changed from "
+ "0x%x to 0x%x: driver or hardware bug?\n", orig, new);
+ } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
+ mask_supported = true;
+ pci_write_config_word(pdev, PCI_COMMAND, orig);
+ }
+ pci_unblock_user_cfg_access(pdev);
+
+ return mask_supported;
+}
+
+static bool pci_check_and_mask_intx(struct pci_dev *pdev)
+{
+ bool pending;
+ uint32_t status;
+
+ pci_block_user_cfg_access(pdev);
+ pci_read_config_dword(pdev, PCI_COMMAND, &status);
+
+ /* interrupt is not ours, goes to out */
+ pending = (((status >> 16) & PCI_STATUS_INTERRUPT) != 0);
+ if (pending) {
+ uint16_t old, new;
+
+ old = status;
+ if (status != 0)
+ new = old & (~PCI_COMMAND_INTX_DISABLE);
+ else
+ new = old | PCI_COMMAND_INTX_DISABLE;
+
+ if (old != new)
+ pci_write_config_word(pdev, PCI_COMMAND, new);
+ }
+ pci_unblock_user_cfg_access(pdev);
+
+ return pending;
+}
+
+#endif /* < 3.3.0 */
diff --git a/scripts/ko/src/igb_uio.c b/scripts/ko/src/igb_uio.c
new file mode 100755
index 00000000..faeb0b68
--- /dev/null
+++ b/scripts/ko/src/igb_uio.c
@@ -0,0 +1,643 @@
+/*-
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/uio_driver.h>
+#include <linux/io.h>
+#include <linux/msi.h>
+#include <linux/version.h>
+
+#ifdef CONFIG_XEN_DOM0
+#include <xen/xen.h>
+#endif
+#include "rte_pci_dev_features.h"
+
+#include "compat.h"
+
+#ifdef RTE_PCI_CONFIG
+#define PCI_SYS_FILE_BUF_SIZE 10
+#define PCI_DEV_CAP_REG 0xA4
+#define PCI_DEV_CTRL_REG 0xA8
+#define PCI_DEV_CAP_EXT_TAG_MASK 0x20
+#define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
+#define PCI_DEV_CTRL_EXT_TAG_MASK (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
+#endif
+
+/**
+ * A structure describing the private information for a uio device.
+ */
+struct rte_uio_pci_dev {
+ struct uio_info info;
+ struct pci_dev *pdev;
+ enum rte_intr_mode mode;
+};
+
+static char *intr_mode = NULL;
+static enum rte_intr_mode igbuio_intr_mode_preferred = RTE_INTR_MODE_MSIX;
+
+static inline struct rte_uio_pci_dev *
+igbuio_get_uio_pci_dev(struct uio_info *info)
+{
+ return container_of(info, struct rte_uio_pci_dev, info);
+}
+
+/* sriov sysfs */
+static ssize_t
+show_max_vfs(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, 10, "%u\n",
+ pci_num_vf(container_of(dev, struct pci_dev, dev)));
+}
+
+static ssize_t
+store_max_vfs(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err = 0;
+ unsigned long max_vfs;
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+
+ if (0 != kstrtoul(buf, 0, &max_vfs))
+ return -EINVAL;
+
+ if (0 == max_vfs)
+ pci_disable_sriov(pdev);
+ else if (0 == pci_num_vf(pdev))
+ err = pci_enable_sriov(pdev, max_vfs);
+ else /* do nothing if change max_vfs number */
+ err = -EINVAL;
+
+ return err ? err : count;
+}
+
+#ifdef RTE_PCI_CONFIG
+static ssize_t
+show_extended_tag(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pci_dev = container_of(dev, struct pci_dev, dev);
+ uint32_t val = 0;
+
+ pci_read_config_dword(pci_dev, PCI_DEV_CAP_REG, &val);
+ if (!(val & PCI_DEV_CAP_EXT_TAG_MASK)) /* Not supported */
+ return snprintf(buf, PCI_SYS_FILE_BUF_SIZE, "%s\n", "invalid");
+
+ val = 0;
+ pci_bus_read_config_dword(pci_dev->bus, pci_dev->devfn,
+ PCI_DEV_CTRL_REG, &val);
+
+ return snprintf(buf, PCI_SYS_FILE_BUF_SIZE, "%s\n",
+ (val & PCI_DEV_CTRL_EXT_TAG_MASK) ? "on" : "off");
+}
+
+static ssize_t
+store_extended_tag(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct pci_dev *pci_dev = container_of(dev, struct pci_dev, dev);
+ uint32_t val = 0, enable;
+
+ if (strncmp(buf, "on", 2) == 0)
+ enable = 1;
+ else if (strncmp(buf, "off", 3) == 0)
+ enable = 0;
+ else
+ return -EINVAL;
+
+ pci_cfg_access_lock(pci_dev);
+ pci_bus_read_config_dword(pci_dev->bus, pci_dev->devfn,
+ PCI_DEV_CAP_REG, &val);
+ if (!(val & PCI_DEV_CAP_EXT_TAG_MASK)) { /* Not supported */
+ pci_cfg_access_unlock(pci_dev);
+ return -EPERM;
+ }
+
+ val = 0;
+ pci_bus_read_config_dword(pci_dev->bus, pci_dev->devfn,
+ PCI_DEV_CTRL_REG, &val);
+ if (enable)
+ val |= PCI_DEV_CTRL_EXT_TAG_MASK;
+ else
+ val &= ~PCI_DEV_CTRL_EXT_TAG_MASK;
+ pci_bus_write_config_dword(pci_dev->bus, pci_dev->devfn,
+ PCI_DEV_CTRL_REG, val);
+ pci_cfg_access_unlock(pci_dev);
+
+ return count;
+}
+
+static ssize_t
+show_max_read_request_size(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pci_dev = container_of(dev, struct pci_dev, dev);
+ int val = pcie_get_readrq(pci_dev);
+
+ return snprintf(buf, PCI_SYS_FILE_BUF_SIZE, "%d\n", val);
+}
+
+static ssize_t
+store_max_read_request_size(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct pci_dev *pci_dev = container_of(dev, struct pci_dev, dev);
+ unsigned long size = 0;
+ int ret;
+
+ if (0 != kstrtoul(buf, 0, &size))
+ return -EINVAL;
+
+ ret = pcie_set_readrq(pci_dev, (int)size);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+#endif
+
+static DEVICE_ATTR(max_vfs, S_IRUGO | S_IWUSR, show_max_vfs, store_max_vfs);
+#ifdef RTE_PCI_CONFIG
+static DEVICE_ATTR(extended_tag, S_IRUGO | S_IWUSR, show_extended_tag,
+ store_extended_tag);
+static DEVICE_ATTR(max_read_request_size, S_IRUGO | S_IWUSR,
+ show_max_read_request_size, store_max_read_request_size);
+#endif
+
+static struct attribute *dev_attrs[] = {
+ &dev_attr_max_vfs.attr,
+#ifdef RTE_PCI_CONFIG
+ &dev_attr_extended_tag.attr,
+ &dev_attr_max_read_request_size.attr,
+#endif
+ NULL,
+};
+
+static const struct attribute_group dev_attr_grp = {
+ .attrs = dev_attrs,
+};
+/*
+ * It masks the msix on/off of generating MSI-X messages.
+ */
+static void
+igbuio_msix_mask_irq(struct msi_desc *desc, int32_t state)
+{
+ u32 mask_bits = desc->masked;
+ unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_VECTOR_CTRL;
+
+ if (state != 0)
+ mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
+ else
+ mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
+
+ if (mask_bits != desc->masked) {
+ writel(mask_bits, desc->mask_base + offset);
+ readl(desc->mask_base);
+ desc->masked = mask_bits;
+ }
+}
+
+/**
+ * This is the irqcontrol callback to be registered to uio_info.
+ * It can be used to disable/enable interrupt from user space processes.
+ *
+ * @param info
+ * pointer to uio_info.
+ * @param irq_state
+ * state value. 1 to enable interrupt, 0 to disable interrupt.
+ *
+ * @return
+ * - On success, 0.
+ * - On failure, a negative value.
+ */
+static int
+igbuio_pci_irqcontrol(struct uio_info *info, s32 irq_state)
+{
+ struct rte_uio_pci_dev *udev = igbuio_get_uio_pci_dev(info);
+ struct pci_dev *pdev = udev->pdev;
+
+ pci_cfg_access_lock(pdev);
+ if (udev->mode == RTE_INTR_MODE_LEGACY)
+ pci_intx(pdev, !!irq_state);
+
+ else if (udev->mode == RTE_INTR_MODE_MSIX) {
+ struct msi_desc *desc;
+
+ list_for_each_entry(desc, &pdev->msi_list, list)
+ igbuio_msix_mask_irq(desc, irq_state);
+ }
+ pci_cfg_access_unlock(pdev);
+
+ return 0;
+}
+
+/**
+ * This is interrupt handler which will check if the interrupt is for the right device.
+ * If yes, disable it here and will be enable later.
+ */
+static irqreturn_t
+igbuio_pci_irqhandler(int irq, struct uio_info *info)
+{
+ struct rte_uio_pci_dev *udev = igbuio_get_uio_pci_dev(info);
+
+ /* Legacy mode need to mask in hardware */
+ if (udev->mode == RTE_INTR_MODE_LEGACY &&
+ !pci_check_and_mask_intx(udev->pdev))
+ return IRQ_NONE;
+
+ /* Message signal mode, no share IRQ and automasked */
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_XEN_DOM0
+static int
+igbuio_dom0_mmap_phys(struct uio_info *info, struct vm_area_struct *vma)
+{
+ int idx;
+
+ idx = (int)vma->vm_pgoff;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+#ifdef HAVE_PTE_MASK_PAGE_IOMAP
+ vma->vm_page_prot.pgprot |= _PAGE_IOMAP;
+#endif
+
+ return remap_pfn_range(vma,
+ vma->vm_start,
+ info->mem[idx].addr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+
+/**
+ * This is uio device mmap method which will use igbuio mmap for Xen
+ * Dom0 environment.
+ */
+static int
+igbuio_dom0_pci_mmap(struct uio_info *info, struct vm_area_struct *vma)
+{
+ int idx;
+
+ if (vma->vm_pgoff >= MAX_UIO_MAPS)
+ return -EINVAL;
+
+ if (info->mem[vma->vm_pgoff].size == 0)
+ return -EINVAL;
+
+ idx = (int)vma->vm_pgoff;
+ switch (info->mem[idx].memtype) {
+ case UIO_MEM_PHYS:
+ return igbuio_dom0_mmap_phys(info, vma);
+ case UIO_MEM_LOGICAL:
+ case UIO_MEM_VIRTUAL:
+ default:
+ return -EINVAL;
+ }
+}
+#endif
+
+/* Remap pci resources described by bar #pci_bar in uio resource n. */
+static int
+igbuio_pci_setup_iomem(struct pci_dev *dev, struct uio_info *info,
+ int n, int pci_bar, const char *name)
+{
+ unsigned long addr, len;
+ void *internal_addr;
+
+ if (sizeof(info->mem) / sizeof(info->mem[0]) <= n)
+ return -EINVAL;
+
+ addr = pci_resource_start(dev, pci_bar);
+ len = pci_resource_len(dev, pci_bar);
+ if (addr == 0 || len == 0)
+ return -1;
+ internal_addr = ioremap(addr, len);
+ if (internal_addr == NULL)
+ return -1;
+ info->mem[n].name = name;
+ info->mem[n].addr = addr;
+ info->mem[n].internal_addr = internal_addr;
+ info->mem[n].size = len;
+ info->mem[n].memtype = UIO_MEM_PHYS;
+ return 0;
+}
+
+/* Get pci port io resources described by bar #pci_bar in uio resource n. */
+static int
+igbuio_pci_setup_ioport(struct pci_dev *dev, struct uio_info *info,
+ int n, int pci_bar, const char *name)
+{
+ unsigned long addr, len;
+
+ if (sizeof(info->port) / sizeof(info->port[0]) <= n)
+ return -EINVAL;
+
+ addr = pci_resource_start(dev, pci_bar);
+ len = pci_resource_len(dev, pci_bar);
+ if (addr == 0 || len == 0)
+ return -EINVAL;
+
+ info->port[n].name = name;
+ info->port[n].start = addr;
+ info->port[n].size = len;
+ info->port[n].porttype = UIO_PORT_X86;
+
+ return 0;
+}
+
+/* Unmap previously ioremap'd resources */
+static void
+igbuio_pci_release_iomem(struct uio_info *info)
+{
+ int i;
+
+ for (i = 0; i < MAX_UIO_MAPS; i++) {
+ if (info->mem[i].internal_addr)
+ iounmap(info->mem[i].internal_addr);
+ }
+}
+
+static int
+igbuio_setup_bars(struct pci_dev *dev, struct uio_info *info)
+{
+ int i, iom, iop, ret;
+ unsigned long flags;
+ static const char *bar_names[PCI_STD_RESOURCE_END + 1] = {
+ "BAR0",
+ "BAR1",
+ "BAR2",
+ "BAR3",
+ "BAR4",
+ "BAR5",
+ };
+
+ iom = 0;
+ iop = 0;
+
+ for (i = 0; i != sizeof(bar_names) / sizeof(bar_names[0]); i++) {
+ if (pci_resource_len(dev, i) != 0 &&
+ pci_resource_start(dev, i) != 0) {
+ flags = pci_resource_flags(dev, i);
+ if (flags & IORESOURCE_MEM) {
+ ret = igbuio_pci_setup_iomem(dev, info, iom,
+ i, bar_names[i]);
+ if (ret != 0)
+ return ret;
+ iom++;
+ } else if (flags & IORESOURCE_IO) {
+ ret = igbuio_pci_setup_ioport(dev, info, iop,
+ i, bar_names[i]);
+ if (ret != 0)
+ return ret;
+ iop++;
+ }
+ }
+ }
+
+ return (iom != 0) ? ret : -ENOENT;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
+static int __devinit
+#else
+static int
+#endif
+igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ struct rte_uio_pci_dev *udev;
+ struct msix_entry msix_entry;
+ int err;
+
+ udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL);
+ if (!udev)
+ return -ENOMEM;
+
+ /*
+ * enable device: ask low-level code to enable I/O and
+ * memory
+ */
+ err = pci_enable_device(dev);
+ if (err != 0) {
+ dev_err(&dev->dev, "Cannot enable PCI device\n");
+ goto fail_free;
+ }
+
+ /*
+ * reserve device's PCI memory regions for use by this
+ * module
+ */
+ err = pci_request_regions(dev, "igb_uio");
+ if (err != 0) {
+ dev_err(&dev->dev, "Cannot request regions\n");
+ goto fail_disable;
+ }
+
+ /* enable bus mastering on the device */
+ pci_set_master(dev);
+
+ /* remap IO memory */
+ err = igbuio_setup_bars(dev, &udev->info);
+ if (err != 0)
+ goto fail_release_iomem;
+
+ /* set 64-bit DMA mask */
+ err = pci_set_dma_mask(dev, DMA_BIT_MASK(64));
+ if (err != 0) {
+ dev_err(&dev->dev, "Cannot set DMA mask\n");
+ goto fail_release_iomem;
+ }
+
+ err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64));
+ if (err != 0) {
+ dev_err(&dev->dev, "Cannot set consistent DMA mask\n");
+ goto fail_release_iomem;
+ }
+
+ /* fill uio infos */
+ udev->info.name = "igb_uio";
+ udev->info.version = "0.1";
+ udev->info.handler = igbuio_pci_irqhandler;
+ udev->info.irqcontrol = igbuio_pci_irqcontrol;
+#ifdef CONFIG_XEN_DOM0
+ /* check if the driver run on Xen Dom0 */
+ if (xen_initial_domain())
+ udev->info.mmap = igbuio_dom0_pci_mmap;
+#endif
+ udev->info.priv = udev;
+ udev->pdev = dev;
+
+ switch (igbuio_intr_mode_preferred) {
+ case RTE_INTR_MODE_MSIX:
+ /* Only 1 msi-x vector needed */
+ msix_entry.entry = 0;
+ if (pci_enable_msix(dev, &msix_entry, 1) == 0) {
+ dev_dbg(&dev->dev, "using MSI-X");
+ udev->info.irq = msix_entry.vector;
+ udev->mode = RTE_INTR_MODE_MSIX;
+ break;
+ }
+ /* fall back to INTX */
+ case RTE_INTR_MODE_LEGACY:
+ if (pci_intx_mask_supported(dev)) {
+ dev_dbg(&dev->dev, "using INTX");
+ udev->info.irq_flags = IRQF_SHARED;
+ udev->info.irq = dev->irq;
+ udev->mode = RTE_INTR_MODE_LEGACY;
+ break;
+ }
+ dev_notice(&dev->dev, "PCI INTX mask not supported\n");
+ /* fall back to no IRQ */
+ case RTE_INTR_MODE_NONE:
+ udev->mode = RTE_INTR_MODE_NONE;
+ udev->info.irq = 0;
+ break;
+
+ default:
+ dev_err(&dev->dev, "invalid IRQ mode %u",
+ igbuio_intr_mode_preferred);
+ err = -EINVAL;
+ goto fail_release_iomem;
+ }
+
+ err = sysfs_create_group(&dev->dev.kobj, &dev_attr_grp);
+ if (err != 0)
+ goto fail_release_iomem;
+
+ /* register uio driver */
+ err = uio_register_device(&dev->dev, &udev->info);
+ if (err != 0)
+ goto fail_remove_group;
+
+ pci_set_drvdata(dev, udev);
+
+ dev_info(&dev->dev, "uio device registered with irq %lx\n",
+ udev->info.irq);
+
+ return 0;
+
+fail_remove_group:
+ sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
+fail_release_iomem:
+ igbuio_pci_release_iomem(&udev->info);
+ if (udev->mode == RTE_INTR_MODE_MSIX)
+ pci_disable_msix(udev->pdev);
+ pci_release_regions(dev);
+fail_disable:
+ pci_disable_device(dev);
+fail_free:
+ kfree(udev);
+
+ return err;
+}
+
+static void
+igbuio_pci_remove(struct pci_dev *dev)
+{
+ struct uio_info *info = pci_get_drvdata(dev);
+ struct rte_uio_pci_dev *udev = igbuio_get_uio_pci_dev(info);
+
+ if (info->priv == NULL) {
+ pr_notice("Not igbuio device\n");
+ return;
+ }
+
+ sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
+ uio_unregister_device(info);
+ igbuio_pci_release_iomem(info);
+ if (udev->mode == RTE_INTR_MODE_MSIX)
+ pci_disable_msix(dev);
+ pci_release_regions(dev);
+ pci_disable_device(dev);
+ pci_set_drvdata(dev, NULL);
+ kfree(info);
+}
+
+static int
+igbuio_config_intr_mode(char *intr_str)
+{
+ if (!intr_str) {
+ pr_info("Use MSIX interrupt by default\n");
+ return 0;
+ }
+
+ if (!strcmp(intr_str, RTE_INTR_MODE_MSIX_NAME)) {
+ igbuio_intr_mode_preferred = RTE_INTR_MODE_MSIX;
+ pr_info("Use MSIX interrupt\n");
+ } else if (!strcmp(intr_str, RTE_INTR_MODE_LEGACY_NAME)) {
+ igbuio_intr_mode_preferred = RTE_INTR_MODE_LEGACY;
+ pr_info("Use legacy interrupt\n");
+ } else {
+ pr_info("Error: bad parameter - %s\n", intr_str);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct pci_driver igbuio_pci_driver = {
+ .name = "igb_uio",
+ .id_table = NULL,
+ .probe = igbuio_pci_probe,
+ .remove = igbuio_pci_remove,
+};
+
+static int __init
+igbuio_pci_init_module(void)
+{
+ int ret;
+
+ ret = igbuio_config_intr_mode(intr_mode);
+ if (ret < 0)
+ return ret;
+
+ return pci_register_driver(&igbuio_pci_driver);
+}
+
+static void __exit
+igbuio_pci_exit_module(void)
+{
+ pci_unregister_driver(&igbuio_pci_driver);
+}
+
+module_init(igbuio_pci_init_module);
+module_exit(igbuio_pci_exit_module);
+
+module_param(intr_mode, charp, S_IRUGO);
+MODULE_PARM_DESC(intr_mode,
+"igb_uio interrupt mode (default=msix):\n"
+" " RTE_INTR_MODE_MSIX_NAME " Use MSIX interrupt\n"
+" " RTE_INTR_MODE_LEGACY_NAME " Use Legacy interrupt\n"
+"\n");
+
+MODULE_DESCRIPTION("UIO driver for Intel IGB PCI cards");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Intel Corporation");
diff --git a/scripts/ko/src/readme.txt b/scripts/ko/src/readme.txt
new file mode 100755
index 00000000..a8d028c9
--- /dev/null
+++ b/scripts/ko/src/readme.txt
@@ -0,0 +1,16 @@
+
+
+1. make sure you have install this
+
+Fedora
+ $sudo yum install kernel-headers
+or
+Ubunto
+ $sudo apt-get install linux-headers-$(uname -r)
+
+2. from this dir do this
+
+$make
+$sudo modprobe uio
+$sudo insmod igb_uio.ko
+
diff --git a/scripts/ko/src/rte_pci_dev_feature_defs.h b/scripts/ko/src/rte_pci_dev_feature_defs.h
new file mode 100755
index 00000000..6316b6dd
--- /dev/null
+++ b/scripts/ko/src/rte_pci_dev_feature_defs.h
@@ -0,0 +1,45 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_PCI_DEV_DEFS_H_
+#define _RTE_PCI_DEV_DEFS_H_
+
+/* interrupt mode */
+enum rte_intr_mode {
+ RTE_INTR_MODE_NONE = 0,
+ RTE_INTR_MODE_LEGACY,
+ RTE_INTR_MODE_MSI,
+ RTE_INTR_MODE_MSIX
+};
+
+#endif /* _RTE_PCI_DEV_DEFS_H_ */
diff --git a/scripts/ko/src/rte_pci_dev_features.h b/scripts/ko/src/rte_pci_dev_features.h
new file mode 100755
index 00000000..dc5bc34f
--- /dev/null
+++ b/scripts/ko/src/rte_pci_dev_features.h
@@ -0,0 +1,44 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_PCI_DEV_FEATURES_H
+#define _RTE_PCI_DEV_FEATURES_H
+
+#include "rte_pci_dev_feature_defs.h"
+
+#define RTE_INTR_MODE_NONE_NAME "none"
+#define RTE_INTR_MODE_LEGACY_NAME "legacy"
+#define RTE_INTR_MODE_MSI_NAME "msi"
+#define RTE_INTR_MODE_MSIX_NAME "msix"
+
+#endif
diff --git a/scripts/libzmq.so.3 b/scripts/libzmq.so.3
new file mode 100755
index 00000000..16980c27
--- /dev/null
+++ b/scripts/libzmq.so.3
Binary files differ
diff --git a/scripts/libzmq.so.3.1.0 b/scripts/libzmq.so.3.1.0
new file mode 100755
index 00000000..16980c27
--- /dev/null
+++ b/scripts/libzmq.so.3.1.0
Binary files differ
diff --git a/scripts/python-lib/yaml/__init__.py b/scripts/python-lib/yaml/__init__.py
new file mode 100755
index 00000000..76e19e13
--- /dev/null
+++ b/scripts/python-lib/yaml/__init__.py
@@ -0,0 +1,315 @@
+
+from error import *
+
+from tokens import *
+from events import *
+from nodes import *
+
+from loader import *
+from dumper import *
+
+__version__ = '3.11'
+
+try:
+ from cyaml import *
+ __with_libyaml__ = True
+except ImportError:
+ __with_libyaml__ = False
+
+def scan(stream, Loader=Loader):
+ """
+ Scan a YAML stream and produce scanning tokens.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_token():
+ yield loader.get_token()
+ finally:
+ loader.dispose()
+
+def parse(stream, Loader=Loader):
+ """
+ Parse a YAML stream and produce parsing events.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_event():
+ yield loader.get_event()
+ finally:
+ loader.dispose()
+
+def compose(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding representation tree.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_node()
+ finally:
+ loader.dispose()
+
+def compose_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding representation trees.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_node():
+ yield loader.get_node()
+ finally:
+ loader.dispose()
+
+def load(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_data()
+ finally:
+ loader.dispose()
+
+def load_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_data():
+ yield loader.get_data()
+ finally:
+ loader.dispose()
+
+def safe_load(stream):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ Resolve only basic YAML tags.
+ """
+ return load(stream, SafeLoader)
+
+def safe_load_all(stream):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ Resolve only basic YAML tags.
+ """
+ return load_all(stream, SafeLoader)
+
+def emit(events, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+ """
+ Emit YAML parsing events into a stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ from StringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ try:
+ for event in events:
+ dumper.emit(event)
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize_all(nodes, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding='utf-8', explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of representation trees into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ from StringIO import StringIO
+ else:
+ from cStringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for node in nodes:
+ dumper.serialize(node)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize(node, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a representation tree into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return serialize_all([node], stream, Dumper=Dumper, **kwds)
+
+def dump_all(documents, stream=None, Dumper=Dumper,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding='utf-8', explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ from StringIO import StringIO
+ else:
+ from cStringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for data in documents:
+ dumper.represent(data)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def dump(data, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=Dumper, **kwds)
+
+def safe_dump_all(documents, stream=None, **kwds):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
+
+def safe_dump(data, stream=None, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=SafeDumper, **kwds)
+
+def add_implicit_resolver(tag, regexp, first=None,
+ Loader=Loader, Dumper=Dumper):
+ """
+ Add an implicit scalar detector.
+ If an implicit scalar value matches the given regexp,
+ the corresponding tag is assigned to the scalar.
+ first is a sequence of possible initial characters or None.
+ """
+ Loader.add_implicit_resolver(tag, regexp, first)
+ Dumper.add_implicit_resolver(tag, regexp, first)
+
+def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
+ """
+ Add a path based resolver for the given tag.
+ A path is a list of keys that forms a path
+ to a node in the representation tree.
+ Keys can be string values, integers, or None.
+ """
+ Loader.add_path_resolver(tag, path, kind)
+ Dumper.add_path_resolver(tag, path, kind)
+
+def add_constructor(tag, constructor, Loader=Loader):
+ """
+ Add a constructor for the given tag.
+ Constructor is a function that accepts a Loader instance
+ and a node object and produces the corresponding Python object.
+ """
+ Loader.add_constructor(tag, constructor)
+
+def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
+ """
+ Add a multi-constructor for the given tag prefix.
+ Multi-constructor is called for a node if its tag starts with tag_prefix.
+ Multi-constructor accepts a Loader instance, a tag suffix,
+ and a node object and produces the corresponding Python object.
+ """
+ Loader.add_multi_constructor(tag_prefix, multi_constructor)
+
+def add_representer(data_type, representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Representer is a function accepting a Dumper instance
+ and an instance of the given data type
+ and producing the corresponding representation node.
+ """
+ Dumper.add_representer(data_type, representer)
+
+def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Multi-representer is a function accepting a Dumper instance
+ and an instance of the given data type or subtype
+ and producing the corresponding representation node.
+ """
+ Dumper.add_multi_representer(data_type, multi_representer)
+
+class YAMLObjectMetaclass(type):
+ """
+ The metaclass for YAMLObject.
+ """
+ def __init__(cls, name, bases, kwds):
+ super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
+ if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
+ cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
+ cls.yaml_dumper.add_representer(cls, cls.to_yaml)
+
+class YAMLObject(object):
+ """
+ An object that can dump itself to a YAML stream
+ and load itself from a YAML stream.
+ """
+
+ __metaclass__ = YAMLObjectMetaclass
+ __slots__ = () # no direct instantiation, so allow immutable subclasses
+
+ yaml_loader = Loader
+ yaml_dumper = Dumper
+
+ yaml_tag = None
+ yaml_flow_style = None
+
+ def from_yaml(cls, loader, node):
+ """
+ Convert a representation node to a Python object.
+ """
+ return loader.construct_yaml_object(node, cls)
+ from_yaml = classmethod(from_yaml)
+
+ def to_yaml(cls, dumper, data):
+ """
+ Convert a Python object to a representation node.
+ """
+ return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
+ flow_style=cls.yaml_flow_style)
+ to_yaml = classmethod(to_yaml)
+
diff --git a/scripts/python-lib/yaml/__init__.pyc b/scripts/python-lib/yaml/__init__.pyc
new file mode 100644
index 00000000..dcaf27c0
--- /dev/null
+++ b/scripts/python-lib/yaml/__init__.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/composer.py b/scripts/python-lib/yaml/composer.py
new file mode 100755
index 00000000..06e5ac78
--- /dev/null
+++ b/scripts/python-lib/yaml/composer.py
@@ -0,0 +1,139 @@
+
+__all__ = ['Composer', 'ComposerError']
+
+from error import MarkedYAMLError
+from events import *
+from nodes import *
+
+class ComposerError(MarkedYAMLError):
+ pass
+
+class Composer(object):
+
+ def __init__(self):
+ self.anchors = {}
+
+ def check_node(self):
+ # Drop the STREAM-START event.
+ if self.check_event(StreamStartEvent):
+ self.get_event()
+
+ # If there are more documents available?
+ return not self.check_event(StreamEndEvent)
+
+ def get_node(self):
+ # Get the root node of the next document.
+ if not self.check_event(StreamEndEvent):
+ return self.compose_document()
+
+ def get_single_node(self):
+ # Drop the STREAM-START event.
+ self.get_event()
+
+ # Compose a document if the stream is not empty.
+ document = None
+ if not self.check_event(StreamEndEvent):
+ document = self.compose_document()
+
+ # Ensure that the stream contains no more documents.
+ if not self.check_event(StreamEndEvent):
+ event = self.get_event()
+ raise ComposerError("expected a single document in the stream",
+ document.start_mark, "but found another document",
+ event.start_mark)
+
+ # Drop the STREAM-END event.
+ self.get_event()
+
+ return document
+
+ def compose_document(self):
+ # Drop the DOCUMENT-START event.
+ self.get_event()
+
+ # Compose the root node.
+ node = self.compose_node(None, None)
+
+ # Drop the DOCUMENT-END event.
+ self.get_event()
+
+ self.anchors = {}
+ return node
+
+ def compose_node(self, parent, index):
+ if self.check_event(AliasEvent):
+ event = self.get_event()
+ anchor = event.anchor
+ if anchor not in self.anchors:
+ raise ComposerError(None, None, "found undefined alias %r"
+ % anchor.encode('utf-8'), event.start_mark)
+ return self.anchors[anchor]
+ event = self.peek_event()
+ anchor = event.anchor
+ if anchor is not None:
+ if anchor in self.anchors:
+ raise ComposerError("found duplicate anchor %r; first occurence"
+ % anchor.encode('utf-8'), self.anchors[anchor].start_mark,
+ "second occurence", event.start_mark)
+ self.descend_resolver(parent, index)
+ if self.check_event(ScalarEvent):
+ node = self.compose_scalar_node(anchor)
+ elif self.check_event(SequenceStartEvent):
+ node = self.compose_sequence_node(anchor)
+ elif self.check_event(MappingStartEvent):
+ node = self.compose_mapping_node(anchor)
+ self.ascend_resolver()
+ return node
+
+ def compose_scalar_node(self, anchor):
+ event = self.get_event()
+ tag = event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(ScalarNode, event.value, event.implicit)
+ node = ScalarNode(tag, event.value,
+ event.start_mark, event.end_mark, style=event.style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ return node
+
+ def compose_sequence_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(SequenceNode, None, start_event.implicit)
+ node = SequenceNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ index = 0
+ while not self.check_event(SequenceEndEvent):
+ node.value.append(self.compose_node(node, index))
+ index += 1
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
+ def compose_mapping_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(MappingNode, None, start_event.implicit)
+ node = MappingNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ while not self.check_event(MappingEndEvent):
+ #key_event = self.peek_event()
+ item_key = self.compose_node(node, None)
+ #if item_key in node.value:
+ # raise ComposerError("while composing a mapping", start_event.start_mark,
+ # "found duplicate key", key_event.start_mark)
+ item_value = self.compose_node(node, item_key)
+ #node.value[item_key] = item_value
+ node.value.append((item_key, item_value))
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
diff --git a/scripts/python-lib/yaml/composer.pyc b/scripts/python-lib/yaml/composer.pyc
new file mode 100644
index 00000000..7e5b71c1
--- /dev/null
+++ b/scripts/python-lib/yaml/composer.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/constructor.py b/scripts/python-lib/yaml/constructor.py
new file mode 100755
index 00000000..635faac3
--- /dev/null
+++ b/scripts/python-lib/yaml/constructor.py
@@ -0,0 +1,675 @@
+
+__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
+ 'ConstructorError']
+
+from error import *
+from nodes import *
+
+import datetime
+
+import binascii, re, sys, types
+
+class ConstructorError(MarkedYAMLError):
+ pass
+
+class BaseConstructor(object):
+
+ yaml_constructors = {}
+ yaml_multi_constructors = {}
+
+ def __init__(self):
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.state_generators = []
+ self.deep_construct = False
+
+ def check_data(self):
+ # If there are more documents available?
+ return self.check_node()
+
+ def get_data(self):
+ # Construct and return the next document.
+ if self.check_node():
+ return self.construct_document(self.get_node())
+
+ def get_single_data(self):
+ # Ensure that the stream contains a single document and construct it.
+ node = self.get_single_node()
+ if node is not None:
+ return self.construct_document(node)
+ return None
+
+ def construct_document(self, node):
+ data = self.construct_object(node)
+ while self.state_generators:
+ state_generators = self.state_generators
+ self.state_generators = []
+ for generator in state_generators:
+ for dummy in generator:
+ pass
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.deep_construct = False
+ return data
+
+ def construct_object(self, node, deep=False):
+ if node in self.constructed_objects:
+ return self.constructed_objects[node]
+ if deep:
+ old_deep = self.deep_construct
+ self.deep_construct = True
+ if node in self.recursive_objects:
+ raise ConstructorError(None, None,
+ "found unconstructable recursive node", node.start_mark)
+ self.recursive_objects[node] = None
+ constructor = None
+ tag_suffix = None
+ if node.tag in self.yaml_constructors:
+ constructor = self.yaml_constructors[node.tag]
+ else:
+ for tag_prefix in self.yaml_multi_constructors:
+ if node.tag.startswith(tag_prefix):
+ tag_suffix = node.tag[len(tag_prefix):]
+ constructor = self.yaml_multi_constructors[tag_prefix]
+ break
+ else:
+ if None in self.yaml_multi_constructors:
+ tag_suffix = node.tag
+ constructor = self.yaml_multi_constructors[None]
+ elif None in self.yaml_constructors:
+ constructor = self.yaml_constructors[None]
+ elif isinstance(node, ScalarNode):
+ constructor = self.__class__.construct_scalar
+ elif isinstance(node, SequenceNode):
+ constructor = self.__class__.construct_sequence
+ elif isinstance(node, MappingNode):
+ constructor = self.__class__.construct_mapping
+ if tag_suffix is None:
+ data = constructor(self, node)
+ else:
+ data = constructor(self, tag_suffix, node)
+ if isinstance(data, types.GeneratorType):
+ generator = data
+ data = generator.next()
+ if self.deep_construct:
+ for dummy in generator:
+ pass
+ else:
+ self.state_generators.append(generator)
+ self.constructed_objects[node] = data
+ del self.recursive_objects[node]
+ if deep:
+ self.deep_construct = old_deep
+ return data
+
+ def construct_scalar(self, node):
+ if not isinstance(node, ScalarNode):
+ raise ConstructorError(None, None,
+ "expected a scalar node, but found %s" % node.id,
+ node.start_mark)
+ return node.value
+
+ def construct_sequence(self, node, deep=False):
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(None, None,
+ "expected a sequence node, but found %s" % node.id,
+ node.start_mark)
+ return [self.construct_object(child, deep=deep)
+ for child in node.value]
+
+ def construct_mapping(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ mapping = {}
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ try:
+ hash(key)
+ except TypeError, exc:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "found unacceptable key (%s)" % exc, key_node.start_mark)
+ value = self.construct_object(value_node, deep=deep)
+ mapping[key] = value
+ return mapping
+
+ def construct_pairs(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ pairs = []
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ value = self.construct_object(value_node, deep=deep)
+ pairs.append((key, value))
+ return pairs
+
+ def add_constructor(cls, tag, constructor):
+ if not 'yaml_constructors' in cls.__dict__:
+ cls.yaml_constructors = cls.yaml_constructors.copy()
+ cls.yaml_constructors[tag] = constructor
+ add_constructor = classmethod(add_constructor)
+
+ def add_multi_constructor(cls, tag_prefix, multi_constructor):
+ if not 'yaml_multi_constructors' in cls.__dict__:
+ cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
+ cls.yaml_multi_constructors[tag_prefix] = multi_constructor
+ add_multi_constructor = classmethod(add_multi_constructor)
+
+class SafeConstructor(BaseConstructor):
+
+ def construct_scalar(self, node):
+ if isinstance(node, MappingNode):
+ for key_node, value_node in node.value:
+ if key_node.tag == u'tag:yaml.org,2002:value':
+ return self.construct_scalar(value_node)
+ return BaseConstructor.construct_scalar(self, node)
+
+ def flatten_mapping(self, node):
+ merge = []
+ index = 0
+ while index < len(node.value):
+ key_node, value_node = node.value[index]
+ if key_node.tag == u'tag:yaml.org,2002:merge':
+ del node.value[index]
+ if isinstance(value_node, MappingNode):
+ self.flatten_mapping(value_node)
+ merge.extend(value_node.value)
+ elif isinstance(value_node, SequenceNode):
+ submerge = []
+ for subnode in value_node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing a mapping",
+ node.start_mark,
+ "expected a mapping for merging, but found %s"
+ % subnode.id, subnode.start_mark)
+ self.flatten_mapping(subnode)
+ submerge.append(subnode.value)
+ submerge.reverse()
+ for value in submerge:
+ merge.extend(value)
+ else:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "expected a mapping or list of mappings for merging, but found %s"
+ % value_node.id, value_node.start_mark)
+ elif key_node.tag == u'tag:yaml.org,2002:value':
+ key_node.tag = u'tag:yaml.org,2002:str'
+ index += 1
+ else:
+ index += 1
+ if merge:
+ node.value = merge + node.value
+
+ def construct_mapping(self, node, deep=False):
+ if isinstance(node, MappingNode):
+ self.flatten_mapping(node)
+ return BaseConstructor.construct_mapping(self, node, deep=deep)
+
+ def construct_yaml_null(self, node):
+ self.construct_scalar(node)
+ return None
+
+ bool_values = {
+ u'yes': True,
+ u'no': False,
+ u'true': True,
+ u'false': False,
+ u'on': True,
+ u'off': False,
+ }
+
+ def construct_yaml_bool(self, node):
+ value = self.construct_scalar(node)
+ return self.bool_values[value.lower()]
+
+ def construct_yaml_int(self, node):
+ value = str(self.construct_scalar(node))
+ value = value.replace('_', '')
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '0':
+ return 0
+ elif value.startswith('0b'):
+ return sign*int(value[2:], 2)
+ elif value.startswith('0x'):
+ return sign*int(value[2:], 16)
+ elif value[0] == '0':
+ return sign*int(value, 8)
+ elif ':' in value:
+ digits = [int(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*int(value)
+
+ inf_value = 1e300
+ while inf_value != inf_value*inf_value:
+ inf_value *= inf_value
+ nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
+
+ def construct_yaml_float(self, node):
+ value = str(self.construct_scalar(node))
+ value = value.replace('_', '').lower()
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '.inf':
+ return sign*self.inf_value
+ elif value == '.nan':
+ return self.nan_value
+ elif ':' in value:
+ digits = [float(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0.0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*float(value)
+
+ def construct_yaml_binary(self, node):
+ value = self.construct_scalar(node)
+ try:
+ return str(value).decode('base64')
+ except (binascii.Error, UnicodeEncodeError), exc:
+ raise ConstructorError(None, None,
+ "failed to decode base64 data: %s" % exc, node.start_mark)
+
+ timestamp_regexp = re.compile(
+ ur'''^(?P<year>[0-9][0-9][0-9][0-9])
+ -(?P<month>[0-9][0-9]?)
+ -(?P<day>[0-9][0-9]?)
+ (?:(?:[Tt]|[ \t]+)
+ (?P<hour>[0-9][0-9]?)
+ :(?P<minute>[0-9][0-9])
+ :(?P<second>[0-9][0-9])
+ (?:\.(?P<fraction>[0-9]*))?
+ (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
+ (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
+
+ def construct_yaml_timestamp(self, node):
+ value = self.construct_scalar(node)
+ match = self.timestamp_regexp.match(node.value)
+ values = match.groupdict()
+ year = int(values['year'])
+ month = int(values['month'])
+ day = int(values['day'])
+ if not values['hour']:
+ return datetime.date(year, month, day)
+ hour = int(values['hour'])
+ minute = int(values['minute'])
+ second = int(values['second'])
+ fraction = 0
+ if values['fraction']:
+ fraction = values['fraction'][:6]
+ while len(fraction) < 6:
+ fraction += '0'
+ fraction = int(fraction)
+ delta = None
+ if values['tz_sign']:
+ tz_hour = int(values['tz_hour'])
+ tz_minute = int(values['tz_minute'] or 0)
+ delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+ if values['tz_sign'] == '-':
+ delta = -delta
+ data = datetime.datetime(year, month, day, hour, minute, second, fraction)
+ if delta:
+ data -= delta
+ return data
+
+ def construct_yaml_omap(self, node):
+ # Note: we do not check for duplicate keys, because it's too
+ # CPU-expensive.
+ omap = []
+ yield omap
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ omap.append((key, value))
+
+ def construct_yaml_pairs(self, node):
+ # Note: the same code as `construct_yaml_omap`.
+ pairs = []
+ yield pairs
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ pairs.append((key, value))
+
+ def construct_yaml_set(self, node):
+ data = set()
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_str(self, node):
+ value = self.construct_scalar(node)
+ try:
+ return value.encode('ascii')
+ except UnicodeEncodeError:
+ return value
+
+ def construct_yaml_seq(self, node):
+ data = []
+ yield data
+ data.extend(self.construct_sequence(node))
+
+ def construct_yaml_map(self, node):
+ data = {}
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_object(self, node, cls):
+ data = cls.__new__(cls)
+ yield data
+ if hasattr(data, '__setstate__'):
+ state = self.construct_mapping(node, deep=True)
+ data.__setstate__(state)
+ else:
+ state = self.construct_mapping(node)
+ data.__dict__.update(state)
+
+ def construct_undefined(self, node):
+ raise ConstructorError(None, None,
+ "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'),
+ node.start_mark)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:null',
+ SafeConstructor.construct_yaml_null)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:bool',
+ SafeConstructor.construct_yaml_bool)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:int',
+ SafeConstructor.construct_yaml_int)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:float',
+ SafeConstructor.construct_yaml_float)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:binary',
+ SafeConstructor.construct_yaml_binary)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:timestamp',
+ SafeConstructor.construct_yaml_timestamp)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:omap',
+ SafeConstructor.construct_yaml_omap)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:pairs',
+ SafeConstructor.construct_yaml_pairs)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:set',
+ SafeConstructor.construct_yaml_set)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:str',
+ SafeConstructor.construct_yaml_str)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:seq',
+ SafeConstructor.construct_yaml_seq)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:map',
+ SafeConstructor.construct_yaml_map)
+
+SafeConstructor.add_constructor(None,
+ SafeConstructor.construct_undefined)
+
+class Constructor(SafeConstructor):
+
+ def construct_python_str(self, node):
+ return self.construct_scalar(node).encode('utf-8')
+
+ def construct_python_unicode(self, node):
+ return self.construct_scalar(node)
+
+ def construct_python_long(self, node):
+ return long(self.construct_yaml_int(node))
+
+ def construct_python_complex(self, node):
+ return complex(self.construct_scalar(node))
+
+ def construct_python_tuple(self, node):
+ return tuple(self.construct_sequence(node))
+
+ def find_python_module(self, name, mark):
+ if not name:
+ raise ConstructorError("while constructing a Python module", mark,
+ "expected non-empty name appended to the tag", mark)
+ try:
+ __import__(name)
+ except ImportError, exc:
+ raise ConstructorError("while constructing a Python module", mark,
+ "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark)
+ return sys.modules[name]
+
+ def find_python_name(self, name, mark):
+ if not name:
+ raise ConstructorError("while constructing a Python object", mark,
+ "expected non-empty name appended to the tag", mark)
+ if u'.' in name:
+ module_name, object_name = name.rsplit('.', 1)
+ else:
+ module_name = '__builtin__'
+ object_name = name
+ try:
+ __import__(module_name)
+ except ImportError, exc:
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark)
+ module = sys.modules[module_name]
+ if not hasattr(module, object_name):
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find %r in the module %r" % (object_name.encode('utf-8'),
+ module.__name__), mark)
+ return getattr(module, object_name)
+
+ def construct_python_name(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python name", node.start_mark,
+ "expected the empty value, but found %r" % value.encode('utf-8'),
+ node.start_mark)
+ return self.find_python_name(suffix, node.start_mark)
+
+ def construct_python_module(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python module", node.start_mark,
+ "expected the empty value, but found %r" % value.encode('utf-8'),
+ node.start_mark)
+ return self.find_python_module(suffix, node.start_mark)
+
+ class classobj: pass
+
+ def make_python_instance(self, suffix, node,
+ args=None, kwds=None, newobj=False):
+ if not args:
+ args = []
+ if not kwds:
+ kwds = {}
+ cls = self.find_python_name(suffix, node.start_mark)
+ if newobj and isinstance(cls, type(self.classobj)) \
+ and not args and not kwds:
+ instance = self.classobj()
+ instance.__class__ = cls
+ return instance
+ elif newobj and isinstance(cls, type):
+ return cls.__new__(cls, *args, **kwds)
+ else:
+ return cls(*args, **kwds)
+
+ def set_python_instance_state(self, instance, state):
+ if hasattr(instance, '__setstate__'):
+ instance.__setstate__(state)
+ else:
+ slotstate = {}
+ if isinstance(state, tuple) and len(state) == 2:
+ state, slotstate = state
+ if hasattr(instance, '__dict__'):
+ instance.__dict__.update(state)
+ elif state:
+ slotstate.update(state)
+ for key, value in slotstate.items():
+ setattr(object, key, value)
+
+ def construct_python_object(self, suffix, node):
+ # Format:
+ # !!python/object:module.name { ... state ... }
+ instance = self.make_python_instance(suffix, node, newobj=True)
+ yield instance
+ deep = hasattr(instance, '__setstate__')
+ state = self.construct_mapping(node, deep=deep)
+ self.set_python_instance_state(instance, state)
+
+ def construct_python_object_apply(self, suffix, node, newobj=False):
+ # Format:
+ # !!python/object/apply # (or !!python/object/new)
+ # args: [ ... arguments ... ]
+ # kwds: { ... keywords ... }
+ # state: ... state ...
+ # listitems: [ ... listitems ... ]
+ # dictitems: { ... dictitems ... }
+ # or short format:
+ # !!python/object/apply [ ... arguments ... ]
+ # The difference between !!python/object/apply and !!python/object/new
+ # is how an object is created, check make_python_instance for details.
+ if isinstance(node, SequenceNode):
+ args = self.construct_sequence(node, deep=True)
+ kwds = {}
+ state = {}
+ listitems = []
+ dictitems = {}
+ else:
+ value = self.construct_mapping(node, deep=True)
+ args = value.get('args', [])
+ kwds = value.get('kwds', {})
+ state = value.get('state', {})
+ listitems = value.get('listitems', [])
+ dictitems = value.get('dictitems', {})
+ instance = self.make_python_instance(suffix, node, args, kwds, newobj)
+ if state:
+ self.set_python_instance_state(instance, state)
+ if listitems:
+ instance.extend(listitems)
+ if dictitems:
+ for key in dictitems:
+ instance[key] = dictitems[key]
+ return instance
+
+ def construct_python_object_new(self, suffix, node):
+ return self.construct_python_object_apply(suffix, node, newobj=True)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/none',
+ Constructor.construct_yaml_null)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/bool',
+ Constructor.construct_yaml_bool)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/str',
+ Constructor.construct_python_str)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/unicode',
+ Constructor.construct_python_unicode)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/int',
+ Constructor.construct_yaml_int)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/long',
+ Constructor.construct_python_long)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/float',
+ Constructor.construct_yaml_float)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/complex',
+ Constructor.construct_python_complex)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/list',
+ Constructor.construct_yaml_seq)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/tuple',
+ Constructor.construct_python_tuple)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/dict',
+ Constructor.construct_yaml_map)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/name:',
+ Constructor.construct_python_name)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/module:',
+ Constructor.construct_python_module)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object:',
+ Constructor.construct_python_object)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object/apply:',
+ Constructor.construct_python_object_apply)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object/new:',
+ Constructor.construct_python_object_new)
+
diff --git a/scripts/python-lib/yaml/constructor.pyc b/scripts/python-lib/yaml/constructor.pyc
new file mode 100644
index 00000000..2c0383fa
--- /dev/null
+++ b/scripts/python-lib/yaml/constructor.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/cyaml.py b/scripts/python-lib/yaml/cyaml.py
new file mode 100755
index 00000000..68dcd751
--- /dev/null
+++ b/scripts/python-lib/yaml/cyaml.py
@@ -0,0 +1,85 @@
+
+__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
+ 'CBaseDumper', 'CSafeDumper', 'CDumper']
+
+from _yaml import CParser, CEmitter
+
+from constructor import *
+
+from serializer import *
+from representer import *
+
+from resolver import *
+
+class CBaseLoader(CParser, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class CSafeLoader(CParser, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class CLoader(CParser, Constructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
+class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class CDumper(CEmitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
diff --git a/scripts/python-lib/yaml/cyaml.pyc b/scripts/python-lib/yaml/cyaml.pyc
new file mode 100644
index 00000000..feebb050
--- /dev/null
+++ b/scripts/python-lib/yaml/cyaml.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/dumper.py b/scripts/python-lib/yaml/dumper.py
new file mode 100755
index 00000000..f811d2c9
--- /dev/null
+++ b/scripts/python-lib/yaml/dumper.py
@@ -0,0 +1,62 @@
+
+__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
+
+from emitter import *
+from serializer import *
+from representer import *
+from resolver import *
+
+class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class Dumper(Emitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
diff --git a/scripts/python-lib/yaml/dumper.pyc b/scripts/python-lib/yaml/dumper.pyc
new file mode 100644
index 00000000..dfe78317
--- /dev/null
+++ b/scripts/python-lib/yaml/dumper.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/emitter.py b/scripts/python-lib/yaml/emitter.py
new file mode 100755
index 00000000..e5bcdccc
--- /dev/null
+++ b/scripts/python-lib/yaml/emitter.py
@@ -0,0 +1,1140 @@
+
+# Emitter expects events obeying the following grammar:
+# stream ::= STREAM-START document* STREAM-END
+# document ::= DOCUMENT-START node DOCUMENT-END
+# node ::= SCALAR | sequence | mapping
+# sequence ::= SEQUENCE-START node* SEQUENCE-END
+# mapping ::= MAPPING-START (node node)* MAPPING-END
+
+__all__ = ['Emitter', 'EmitterError']
+
+from error import YAMLError
+from events import *
+
+class EmitterError(YAMLError):
+ pass
+
+class ScalarAnalysis(object):
+ def __init__(self, scalar, empty, multiline,
+ allow_flow_plain, allow_block_plain,
+ allow_single_quoted, allow_double_quoted,
+ allow_block):
+ self.scalar = scalar
+ self.empty = empty
+ self.multiline = multiline
+ self.allow_flow_plain = allow_flow_plain
+ self.allow_block_plain = allow_block_plain
+ self.allow_single_quoted = allow_single_quoted
+ self.allow_double_quoted = allow_double_quoted
+ self.allow_block = allow_block
+
+class Emitter(object):
+
+ DEFAULT_TAG_PREFIXES = {
+ u'!' : u'!',
+ u'tag:yaml.org,2002:' : u'!!',
+ }
+
+ def __init__(self, stream, canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+
+ # The stream should have the methods `write` and possibly `flush`.
+ self.stream = stream
+
+ # Encoding can be overriden by STREAM-START.
+ self.encoding = None
+
+ # Emitter is a state machine with a stack of states to handle nested
+ # structures.
+ self.states = []
+ self.state = self.expect_stream_start
+
+ # Current event and the event queue.
+ self.events = []
+ self.event = None
+
+ # The current indentation level and the stack of previous indents.
+ self.indents = []
+ self.indent = None
+
+ # Flow level.
+ self.flow_level = 0
+
+ # Contexts.
+ self.root_context = False
+ self.sequence_context = False
+ self.mapping_context = False
+ self.simple_key_context = False
+
+ # Characteristics of the last emitted character:
+ # - current position.
+ # - is it a whitespace?
+ # - is it an indention character
+ # (indentation space, '-', '?', or ':')?
+ self.line = 0
+ self.column = 0
+ self.whitespace = True
+ self.indention = True
+
+ # Whether the document requires an explicit document indicator
+ self.open_ended = False
+
+ # Formatting details.
+ self.canonical = canonical
+ self.allow_unicode = allow_unicode
+ self.best_indent = 2
+ if indent and 1 < indent < 10:
+ self.best_indent = indent
+ self.best_width = 80
+ if width and width > self.best_indent*2:
+ self.best_width = width
+ self.best_line_break = u'\n'
+ if line_break in [u'\r', u'\n', u'\r\n']:
+ self.best_line_break = line_break
+
+ # Tag prefixes.
+ self.tag_prefixes = None
+
+ # Prepared anchor and tag.
+ self.prepared_anchor = None
+ self.prepared_tag = None
+
+ # Scalar analysis and style.
+ self.analysis = None
+ self.style = None
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def emit(self, event):
+ self.events.append(event)
+ while not self.need_more_events():
+ self.event = self.events.pop(0)
+ self.state()
+ self.event = None
+
+ # In some cases, we wait for a few next events before emitting.
+
+ def need_more_events(self):
+ if not self.events:
+ return True
+ event = self.events[0]
+ if isinstance(event, DocumentStartEvent):
+ return self.need_events(1)
+ elif isinstance(event, SequenceStartEvent):
+ return self.need_events(2)
+ elif isinstance(event, MappingStartEvent):
+ return self.need_events(3)
+ else:
+ return False
+
+ def need_events(self, count):
+ level = 0
+ for event in self.events[1:]:
+ if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
+ level += 1
+ elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
+ level -= 1
+ elif isinstance(event, StreamEndEvent):
+ level = -1
+ if level < 0:
+ return False
+ return (len(self.events) < count+1)
+
+ def increase_indent(self, flow=False, indentless=False):
+ self.indents.append(self.indent)
+ if self.indent is None:
+ if flow:
+ self.indent = self.best_indent
+ else:
+ self.indent = 0
+ elif not indentless:
+ self.indent += self.best_indent
+
+ # States.
+
+ # Stream handlers.
+
+ def expect_stream_start(self):
+ if isinstance(self.event, StreamStartEvent):
+ if self.event.encoding and not getattr(self.stream, 'encoding', None):
+ self.encoding = self.event.encoding
+ self.write_stream_start()
+ self.state = self.expect_first_document_start
+ else:
+ raise EmitterError("expected StreamStartEvent, but got %s"
+ % self.event)
+
+ def expect_nothing(self):
+ raise EmitterError("expected nothing, but got %s" % self.event)
+
+ # Document handlers.
+
+ def expect_first_document_start(self):
+ return self.expect_document_start(first=True)
+
+ def expect_document_start(self, first=False):
+ if isinstance(self.event, DocumentStartEvent):
+ if (self.event.version or self.event.tags) and self.open_ended:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ if self.event.version:
+ version_text = self.prepare_version(self.event.version)
+ self.write_version_directive(version_text)
+ self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
+ if self.event.tags:
+ handles = self.event.tags.keys()
+ handles.sort()
+ for handle in handles:
+ prefix = self.event.tags[handle]
+ self.tag_prefixes[prefix] = handle
+ handle_text = self.prepare_tag_handle(handle)
+ prefix_text = self.prepare_tag_prefix(prefix)
+ self.write_tag_directive(handle_text, prefix_text)
+ implicit = (first and not self.event.explicit and not self.canonical
+ and not self.event.version and not self.event.tags
+ and not self.check_empty_document())
+ if not implicit:
+ self.write_indent()
+ self.write_indicator(u'---', True)
+ if self.canonical:
+ self.write_indent()
+ self.state = self.expect_document_root
+ elif isinstance(self.event, StreamEndEvent):
+ if self.open_ended:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ self.write_stream_end()
+ self.state = self.expect_nothing
+ else:
+ raise EmitterError("expected DocumentStartEvent, but got %s"
+ % self.event)
+
+ def expect_document_end(self):
+ if isinstance(self.event, DocumentEndEvent):
+ self.write_indent()
+ if self.event.explicit:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ self.flush_stream()
+ self.state = self.expect_document_start
+ else:
+ raise EmitterError("expected DocumentEndEvent, but got %s"
+ % self.event)
+
+ def expect_document_root(self):
+ self.states.append(self.expect_document_end)
+ self.expect_node(root=True)
+
+ # Node handlers.
+
+ def expect_node(self, root=False, sequence=False, mapping=False,
+ simple_key=False):
+ self.root_context = root
+ self.sequence_context = sequence
+ self.mapping_context = mapping
+ self.simple_key_context = simple_key
+ if isinstance(self.event, AliasEvent):
+ self.expect_alias()
+ elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
+ self.process_anchor(u'&')
+ self.process_tag()
+ if isinstance(self.event, ScalarEvent):
+ self.expect_scalar()
+ elif isinstance(self.event, SequenceStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_sequence():
+ self.expect_flow_sequence()
+ else:
+ self.expect_block_sequence()
+ elif isinstance(self.event, MappingStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_mapping():
+ self.expect_flow_mapping()
+ else:
+ self.expect_block_mapping()
+ else:
+ raise EmitterError("expected NodeEvent, but got %s" % self.event)
+
+ def expect_alias(self):
+ if self.event.anchor is None:
+ raise EmitterError("anchor is not specified for alias")
+ self.process_anchor(u'*')
+ self.state = self.states.pop()
+
+ def expect_scalar(self):
+ self.increase_indent(flow=True)
+ self.process_scalar()
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+
+ # Flow sequence handlers.
+
+ def expect_flow_sequence(self):
+ self.write_indicator(u'[', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_sequence_item
+
+ def expect_first_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(u']', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ def expect_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(u',', False)
+ self.write_indent()
+ self.write_indicator(u']', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(u',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Flow mapping handlers.
+
+ def expect_flow_mapping(self):
+ self.write_indicator(u'{', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_mapping_key
+
+ def expect_first_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(u'}', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(u',', False)
+ self.write_indent()
+ self.write_indicator(u'}', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(u',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_simple_value(self):
+ self.write_indicator(u':', False)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_value(self):
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.write_indicator(u':', True)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Block sequence handlers.
+
+ def expect_block_sequence(self):
+ indentless = (self.mapping_context and not self.indention)
+ self.increase_indent(flow=False, indentless=indentless)
+ self.state = self.expect_first_block_sequence_item
+
+ def expect_first_block_sequence_item(self):
+ return self.expect_block_sequence_item(first=True)
+
+ def expect_block_sequence_item(self, first=False):
+ if not first and isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ self.write_indicator(u'-', True, indention=True)
+ self.states.append(self.expect_block_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Block mapping handlers.
+
+ def expect_block_mapping(self):
+ self.increase_indent(flow=False)
+ self.state = self.expect_first_block_mapping_key
+
+ def expect_first_block_mapping_key(self):
+ return self.expect_block_mapping_key(first=True)
+
+ def expect_block_mapping_key(self, first=False):
+ if not first and isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ if self.check_simple_key():
+ self.states.append(self.expect_block_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True, indention=True)
+ self.states.append(self.expect_block_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_simple_value(self):
+ self.write_indicator(u':', False)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_value(self):
+ self.write_indent()
+ self.write_indicator(u':', True, indention=True)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Checkers.
+
+ def check_empty_sequence(self):
+ return (isinstance(self.event, SequenceStartEvent) and self.events
+ and isinstance(self.events[0], SequenceEndEvent))
+
+ def check_empty_mapping(self):
+ return (isinstance(self.event, MappingStartEvent) and self.events
+ and isinstance(self.events[0], MappingEndEvent))
+
+ def check_empty_document(self):
+ if not isinstance(self.event, DocumentStartEvent) or not self.events:
+ return False
+ event = self.events[0]
+ return (isinstance(event, ScalarEvent) and event.anchor is None
+ and event.tag is None and event.implicit and event.value == u'')
+
+ def check_simple_key(self):
+ length = 0
+ if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ length += len(self.prepared_anchor)
+ if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
+ and self.event.tag is not None:
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(self.event.tag)
+ length += len(self.prepared_tag)
+ if isinstance(self.event, ScalarEvent):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ length += len(self.analysis.scalar)
+ return (length < 128 and (isinstance(self.event, AliasEvent)
+ or (isinstance(self.event, ScalarEvent)
+ and not self.analysis.empty and not self.analysis.multiline)
+ or self.check_empty_sequence() or self.check_empty_mapping()))
+
+ # Anchor, Tag, and Scalar processors.
+
+ def process_anchor(self, indicator):
+ if self.event.anchor is None:
+ self.prepared_anchor = None
+ return
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ if self.prepared_anchor:
+ self.write_indicator(indicator+self.prepared_anchor, True)
+ self.prepared_anchor = None
+
+ def process_tag(self):
+ tag = self.event.tag
+ if isinstance(self.event, ScalarEvent):
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ if ((not self.canonical or tag is None) and
+ ((self.style == '' and self.event.implicit[0])
+ or (self.style != '' and self.event.implicit[1]))):
+ self.prepared_tag = None
+ return
+ if self.event.implicit[0] and tag is None:
+ tag = u'!'
+ self.prepared_tag = None
+ else:
+ if (not self.canonical or tag is None) and self.event.implicit:
+ self.prepared_tag = None
+ return
+ if tag is None:
+ raise EmitterError("tag is not specified")
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(tag)
+ if self.prepared_tag:
+ self.write_indicator(self.prepared_tag, True)
+ self.prepared_tag = None
+
+ def choose_scalar_style(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.event.style == '"' or self.canonical:
+ return '"'
+ if not self.event.style and self.event.implicit[0]:
+ if (not (self.simple_key_context and
+ (self.analysis.empty or self.analysis.multiline))
+ and (self.flow_level and self.analysis.allow_flow_plain
+ or (not self.flow_level and self.analysis.allow_block_plain))):
+ return ''
+ if self.event.style and self.event.style in '|>':
+ if (not self.flow_level and not self.simple_key_context
+ and self.analysis.allow_block):
+ return self.event.style
+ if not self.event.style or self.event.style == '\'':
+ if (self.analysis.allow_single_quoted and
+ not (self.simple_key_context and self.analysis.multiline)):
+ return '\''
+ return '"'
+
+ def process_scalar(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ split = (not self.simple_key_context)
+ #if self.analysis.multiline and split \
+ # and (not self.style or self.style in '\'\"'):
+ # self.write_indent()
+ if self.style == '"':
+ self.write_double_quoted(self.analysis.scalar, split)
+ elif self.style == '\'':
+ self.write_single_quoted(self.analysis.scalar, split)
+ elif self.style == '>':
+ self.write_folded(self.analysis.scalar)
+ elif self.style == '|':
+ self.write_literal(self.analysis.scalar)
+ else:
+ self.write_plain(self.analysis.scalar, split)
+ self.analysis = None
+ self.style = None
+
+ # Analyzers.
+
+ def prepare_version(self, version):
+ major, minor = version
+ if major != 1:
+ raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
+ return u'%d.%d' % (major, minor)
+
+ def prepare_tag_handle(self, handle):
+ if not handle:
+ raise EmitterError("tag handle must not be empty")
+ if handle[0] != u'!' or handle[-1] != u'!':
+ raise EmitterError("tag handle must start and end with '!': %r"
+ % (handle.encode('utf-8')))
+ for ch in handle[1:-1]:
+ if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_'):
+ raise EmitterError("invalid character %r in the tag handle: %r"
+ % (ch.encode('utf-8'), handle.encode('utf-8')))
+ return handle
+
+ def prepare_tag_prefix(self, prefix):
+ if not prefix:
+ raise EmitterError("tag prefix must not be empty")
+ chunks = []
+ start = end = 0
+ if prefix[0] == u'!':
+ end = 1
+ while end < len(prefix):
+ ch = prefix[end]
+ if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?!:@&=+$,_.~*\'()[]':
+ end += 1
+ else:
+ if start < end:
+ chunks.append(prefix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append(u'%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(prefix[start:end])
+ return u''.join(chunks)
+
+ def prepare_tag(self, tag):
+ if not tag:
+ raise EmitterError("tag must not be empty")
+ if tag == u'!':
+ return tag
+ handle = None
+ suffix = tag
+ prefixes = self.tag_prefixes.keys()
+ prefixes.sort()
+ for prefix in prefixes:
+ if tag.startswith(prefix) \
+ and (prefix == u'!' or len(prefix) < len(tag)):
+ handle = self.tag_prefixes[prefix]
+ suffix = tag[len(prefix):]
+ chunks = []
+ start = end = 0
+ while end < len(suffix):
+ ch = suffix[end]
+ if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?:@&=+$,_.~*\'()[]' \
+ or (ch == u'!' and handle != u'!'):
+ end += 1
+ else:
+ if start < end:
+ chunks.append(suffix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append(u'%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(suffix[start:end])
+ suffix_text = u''.join(chunks)
+ if handle:
+ return u'%s%s' % (handle, suffix_text)
+ else:
+ return u'!<%s>' % suffix_text
+
+ def prepare_anchor(self, anchor):
+ if not anchor:
+ raise EmitterError("anchor must not be empty")
+ for ch in anchor:
+ if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_'):
+ raise EmitterError("invalid character %r in the anchor: %r"
+ % (ch.encode('utf-8'), anchor.encode('utf-8')))
+ return anchor
+
+ def analyze_scalar(self, scalar):
+
+ # Empty scalar is a special case.
+ if not scalar:
+ return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
+ allow_flow_plain=False, allow_block_plain=True,
+ allow_single_quoted=True, allow_double_quoted=True,
+ allow_block=False)
+
+ # Indicators and special characters.
+ block_indicators = False
+ flow_indicators = False
+ line_breaks = False
+ special_characters = False
+
+ # Important whitespace combinations.
+ leading_space = False
+ leading_break = False
+ trailing_space = False
+ trailing_break = False
+ break_space = False
+ space_break = False
+
+ # Check document indicators.
+ if scalar.startswith(u'---') or scalar.startswith(u'...'):
+ block_indicators = True
+ flow_indicators = True
+
+ # First character or preceded by a whitespace.
+ preceeded_by_whitespace = True
+
+ # Last character or followed by a whitespace.
+ followed_by_whitespace = (len(scalar) == 1 or
+ scalar[1] in u'\0 \t\r\n\x85\u2028\u2029')
+
+ # The previous character is a space.
+ previous_space = False
+
+ # The previous character is a break.
+ previous_break = False
+
+ index = 0
+ while index < len(scalar):
+ ch = scalar[index]
+
+ # Check for indicators.
+ if index == 0:
+ # Leading indicators are special characters.
+ if ch in u'#,[]{}&*!|>\'\"%@`':
+ flow_indicators = True
+ block_indicators = True
+ if ch in u'?:':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == u'-' and followed_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+ else:
+ # Some indicators cannot appear within a scalar as well.
+ if ch in u',?[]{}':
+ flow_indicators = True
+ if ch == u':':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == u'#' and preceeded_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+
+ # Check for line breaks, special, and unicode characters.
+ if ch in u'\n\x85\u2028\u2029':
+ line_breaks = True
+ if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
+ if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF'
+ or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF':
+ unicode_characters = True
+ if not self.allow_unicode:
+ special_characters = True
+ else:
+ special_characters = True
+
+ # Detect important whitespace combinations.
+ if ch == u' ':
+ if index == 0:
+ leading_space = True
+ if index == len(scalar)-1:
+ trailing_space = True
+ if previous_break:
+ break_space = True
+ previous_space = True
+ previous_break = False
+ elif ch in u'\n\x85\u2028\u2029':
+ if index == 0:
+ leading_break = True
+ if index == len(scalar)-1:
+ trailing_break = True
+ if previous_space:
+ space_break = True
+ previous_space = False
+ previous_break = True
+ else:
+ previous_space = False
+ previous_break = False
+
+ # Prepare for the next character.
+ index += 1
+ preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029')
+ followed_by_whitespace = (index+1 >= len(scalar) or
+ scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029')
+
+ # Let's decide what styles are allowed.
+ allow_flow_plain = True
+ allow_block_plain = True
+ allow_single_quoted = True
+ allow_double_quoted = True
+ allow_block = True
+
+ # Leading and trailing whitespaces are bad for plain scalars.
+ if (leading_space or leading_break
+ or trailing_space or trailing_break):
+ allow_flow_plain = allow_block_plain = False
+
+ # We do not permit trailing spaces for block scalars.
+ if trailing_space:
+ allow_block = False
+
+ # Spaces at the beginning of a new line are only acceptable for block
+ # scalars.
+ if break_space:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = False
+
+ # Spaces followed by breaks, as well as special character are only
+ # allowed for double quoted scalars.
+ if space_break or special_characters:
+ allow_flow_plain = allow_block_plain = \
+ allow_single_quoted = allow_block = False
+
+ # Although the plain scalar writer supports breaks, we never emit
+ # multiline plain scalars.
+ if line_breaks:
+ allow_flow_plain = allow_block_plain = False
+
+ # Flow indicators are forbidden for flow plain scalars.
+ if flow_indicators:
+ allow_flow_plain = False
+
+ # Block indicators are forbidden for block plain scalars.
+ if block_indicators:
+ allow_block_plain = False
+
+ return ScalarAnalysis(scalar=scalar,
+ empty=False, multiline=line_breaks,
+ allow_flow_plain=allow_flow_plain,
+ allow_block_plain=allow_block_plain,
+ allow_single_quoted=allow_single_quoted,
+ allow_double_quoted=allow_double_quoted,
+ allow_block=allow_block)
+
+ # Writers.
+
+ def flush_stream(self):
+ if hasattr(self.stream, 'flush'):
+ self.stream.flush()
+
+ def write_stream_start(self):
+ # Write BOM if needed.
+ if self.encoding and self.encoding.startswith('utf-16'):
+ self.stream.write(u'\uFEFF'.encode(self.encoding))
+
+ def write_stream_end(self):
+ self.flush_stream()
+
+ def write_indicator(self, indicator, need_whitespace,
+ whitespace=False, indention=False):
+ if self.whitespace or not need_whitespace:
+ data = indicator
+ else:
+ data = u' '+indicator
+ self.whitespace = whitespace
+ self.indention = self.indention and indention
+ self.column += len(data)
+ self.open_ended = False
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_indent(self):
+ indent = self.indent or 0
+ if not self.indention or self.column > indent \
+ or (self.column == indent and not self.whitespace):
+ self.write_line_break()
+ if self.column < indent:
+ self.whitespace = True
+ data = u' '*(indent-self.column)
+ self.column = indent
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_line_break(self, data=None):
+ if data is None:
+ data = self.best_line_break
+ self.whitespace = True
+ self.indention = True
+ self.line += 1
+ self.column = 0
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_version_directive(self, version_text):
+ data = u'%%YAML %s' % version_text
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ def write_tag_directive(self, handle_text, prefix_text):
+ data = u'%%TAG %s %s' % (handle_text, prefix_text)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ # Scalar streams.
+
+ def write_single_quoted(self, text, split=True):
+ self.write_indicator(u'\'', True)
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch is None or ch != u' ':
+ if start+1 == end and self.column > self.best_width and split \
+ and start != 0 and end != len(text):
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ if text[start] == u'\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'':
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch == u'\'':
+ data = u'\'\''
+ self.column += 2
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end + 1
+ if ch is not None:
+ spaces = (ch == u' ')
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+ self.write_indicator(u'\'', False)
+
+ ESCAPE_REPLACEMENTS = {
+ u'\0': u'0',
+ u'\x07': u'a',
+ u'\x08': u'b',
+ u'\x09': u't',
+ u'\x0A': u'n',
+ u'\x0B': u'v',
+ u'\x0C': u'f',
+ u'\x0D': u'r',
+ u'\x1B': u'e',
+ u'\"': u'\"',
+ u'\\': u'\\',
+ u'\x85': u'N',
+ u'\xA0': u'_',
+ u'\u2028': u'L',
+ u'\u2029': u'P',
+ }
+
+ def write_double_quoted(self, text, split=True):
+ self.write_indicator(u'"', True)
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \
+ or not (u'\x20' <= ch <= u'\x7E'
+ or (self.allow_unicode
+ and (u'\xA0' <= ch <= u'\uD7FF'
+ or u'\uE000' <= ch <= u'\uFFFD'))):
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ if ch in self.ESCAPE_REPLACEMENTS:
+ data = u'\\'+self.ESCAPE_REPLACEMENTS[ch]
+ elif ch <= u'\xFF':
+ data = u'\\x%02X' % ord(ch)
+ elif ch <= u'\uFFFF':
+ data = u'\\u%04X' % ord(ch)
+ else:
+ data = u'\\U%08X' % ord(ch)
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end+1
+ if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \
+ and self.column+(end-start) > self.best_width and split:
+ data = text[start:end]+u'\\'
+ if start < end:
+ start = end
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ if text[start] == u' ':
+ data = u'\\'
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ end += 1
+ self.write_indicator(u'"', False)
+
+ def determine_block_hints(self, text):
+ hints = u''
+ if text:
+ if text[0] in u' \n\x85\u2028\u2029':
+ hints += unicode(self.best_indent)
+ if text[-1] not in u'\n\x85\u2028\u2029':
+ hints += u'-'
+ elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029':
+ hints += u'+'
+ return hints
+
+ def write_folded(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator(u'>'+hints, True)
+ if hints[-1:] == u'+':
+ self.open_ended = True
+ self.write_line_break()
+ leading_space = True
+ spaces = False
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ if not leading_space and ch is not None and ch != u' ' \
+ and text[start] == u'\n':
+ self.write_line_break()
+ leading_space = (ch == u' ')
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ elif spaces:
+ if ch != u' ':
+ if start+1 == end and self.column > self.best_width:
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ spaces = (ch == u' ')
+ end += 1
+
+ def write_literal(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator(u'|'+hints, True)
+ if hints[-1:] == u'+':
+ self.open_ended = True
+ self.write_line_break()
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in u'\n\x85\u2028\u2029':
+ data = text[start:end]
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+
+ def write_plain(self, text, split=True):
+ if self.root_context:
+ self.open_ended = True
+ if not text:
+ return
+ if not self.whitespace:
+ data = u' '
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.whitespace = False
+ self.indention = False
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch != u' ':
+ if start+1 == end and self.column > self.best_width and split:
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch not in u'\n\x85\u2028\u2029':
+ if text[start] == u'\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ spaces = (ch == u' ')
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+
diff --git a/scripts/python-lib/yaml/emitter.pyc b/scripts/python-lib/yaml/emitter.pyc
new file mode 100644
index 00000000..5123fcd2
--- /dev/null
+++ b/scripts/python-lib/yaml/emitter.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/error.py b/scripts/python-lib/yaml/error.py
new file mode 100755
index 00000000..577686db
--- /dev/null
+++ b/scripts/python-lib/yaml/error.py
@@ -0,0 +1,75 @@
+
+__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
+
+class Mark(object):
+
+ def __init__(self, name, index, line, column, buffer, pointer):
+ self.name = name
+ self.index = index
+ self.line = line
+ self.column = column
+ self.buffer = buffer
+ self.pointer = pointer
+
+ def get_snippet(self, indent=4, max_length=75):
+ if self.buffer is None:
+ return None
+ head = ''
+ start = self.pointer
+ while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029':
+ start -= 1
+ if self.pointer-start > max_length/2-1:
+ head = ' ... '
+ start += 5
+ break
+ tail = ''
+ end = self.pointer
+ while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029':
+ end += 1
+ if end-self.pointer > max_length/2-1:
+ tail = ' ... '
+ end -= 5
+ break
+ snippet = self.buffer[start:end].encode('utf-8')
+ return ' '*indent + head + snippet + tail + '\n' \
+ + ' '*(indent+self.pointer-start+len(head)) + '^'
+
+ def __str__(self):
+ snippet = self.get_snippet()
+ where = " in \"%s\", line %d, column %d" \
+ % (self.name, self.line+1, self.column+1)
+ if snippet is not None:
+ where += ":\n"+snippet
+ return where
+
+class YAMLError(Exception):
+ pass
+
+class MarkedYAMLError(YAMLError):
+
+ def __init__(self, context=None, context_mark=None,
+ problem=None, problem_mark=None, note=None):
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+
+ def __str__(self):
+ lines = []
+ if self.context is not None:
+ lines.append(self.context)
+ if self.context_mark is not None \
+ and (self.problem is None or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None:
+ lines.append(self.note)
+ return '\n'.join(lines)
+
diff --git a/scripts/python-lib/yaml/error.pyc b/scripts/python-lib/yaml/error.pyc
new file mode 100644
index 00000000..a1fcd243
--- /dev/null
+++ b/scripts/python-lib/yaml/error.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/events.py b/scripts/python-lib/yaml/events.py
new file mode 100755
index 00000000..f79ad389
--- /dev/null
+++ b/scripts/python-lib/yaml/events.py
@@ -0,0 +1,86 @@
+
+# Abstract classes.
+
+class Event(object):
+ def __init__(self, start_mark=None, end_mark=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
+ if hasattr(self, key)]
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+class NodeEvent(Event):
+ def __init__(self, anchor, start_mark=None, end_mark=None):
+ self.anchor = anchor
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class CollectionStartEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
+ flow_style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class CollectionEndEvent(Event):
+ pass
+
+# Implementations.
+
+class StreamStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None, encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndEvent(Event):
+ pass
+
+class DocumentStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None, version=None, tags=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+ self.version = version
+ self.tags = tags
+
+class DocumentEndEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+
+class AliasEvent(NodeEvent):
+ pass
+
+class ScalarEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, value,
+ start_mark=None, end_mark=None, style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class SequenceStartEvent(CollectionStartEvent):
+ pass
+
+class SequenceEndEvent(CollectionEndEvent):
+ pass
+
+class MappingStartEvent(CollectionStartEvent):
+ pass
+
+class MappingEndEvent(CollectionEndEvent):
+ pass
+
diff --git a/scripts/python-lib/yaml/events.pyc b/scripts/python-lib/yaml/events.pyc
new file mode 100644
index 00000000..0b022f68
--- /dev/null
+++ b/scripts/python-lib/yaml/events.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/loader.py b/scripts/python-lib/yaml/loader.py
new file mode 100755
index 00000000..293ff467
--- /dev/null
+++ b/scripts/python-lib/yaml/loader.py
@@ -0,0 +1,40 @@
+
+__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
+
+from reader import *
+from scanner import *
+from parser import *
+from composer import *
+from constructor import *
+from resolver import *
+
+class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
diff --git a/scripts/python-lib/yaml/loader.pyc b/scripts/python-lib/yaml/loader.pyc
new file mode 100644
index 00000000..17e3e00e
--- /dev/null
+++ b/scripts/python-lib/yaml/loader.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/nodes.py b/scripts/python-lib/yaml/nodes.py
new file mode 100755
index 00000000..c4f070c4
--- /dev/null
+++ b/scripts/python-lib/yaml/nodes.py
@@ -0,0 +1,49 @@
+
+class Node(object):
+ def __init__(self, tag, value, start_mark, end_mark):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ value = self.value
+ #if isinstance(value, list):
+ # if len(value) == 0:
+ # value = '<empty>'
+ # elif len(value) == 1:
+ # value = '<1 item>'
+ # else:
+ # value = '<%d items>' % len(value)
+ #else:
+ # if len(value) > 75:
+ # value = repr(value[:70]+u' ... ')
+ # else:
+ # value = repr(value)
+ value = repr(value)
+ return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
+
+class ScalarNode(Node):
+ id = 'scalar'
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class CollectionNode(Node):
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, flow_style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class SequenceNode(CollectionNode):
+ id = 'sequence'
+
+class MappingNode(CollectionNode):
+ id = 'mapping'
+
diff --git a/scripts/python-lib/yaml/nodes.pyc b/scripts/python-lib/yaml/nodes.pyc
new file mode 100644
index 00000000..58922515
--- /dev/null
+++ b/scripts/python-lib/yaml/nodes.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/parser.py b/scripts/python-lib/yaml/parser.py
new file mode 100755
index 00000000..f9e3057f
--- /dev/null
+++ b/scripts/python-lib/yaml/parser.py
@@ -0,0 +1,589 @@
+
+# The following YAML grammar is LL(1) and is parsed by a recursive descent
+# parser.
+#
+# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+# implicit_document ::= block_node DOCUMENT-END*
+# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+# block_node_or_indentless_sequence ::=
+# ALIAS
+# | properties (block_content | indentless_block_sequence)?
+# | block_content
+# | indentless_block_sequence
+# block_node ::= ALIAS
+# | properties block_content?
+# | block_content
+# flow_node ::= ALIAS
+# | properties flow_content?
+# | flow_content
+# properties ::= TAG ANCHOR? | ANCHOR TAG?
+# block_content ::= block_collection | flow_collection | SCALAR
+# flow_content ::= flow_collection | SCALAR
+# block_collection ::= block_sequence | block_mapping
+# flow_collection ::= flow_sequence | flow_mapping
+# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+# block_mapping ::= BLOCK-MAPPING_START
+# ((KEY block_node_or_indentless_sequence?)?
+# (VALUE block_node_or_indentless_sequence?)?)*
+# BLOCK-END
+# flow_sequence ::= FLOW-SEQUENCE-START
+# (flow_sequence_entry FLOW-ENTRY)*
+# flow_sequence_entry?
+# FLOW-SEQUENCE-END
+# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+# flow_mapping ::= FLOW-MAPPING-START
+# (flow_mapping_entry FLOW-ENTRY)*
+# flow_mapping_entry?
+# FLOW-MAPPING-END
+# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+#
+# FIRST sets:
+#
+# stream: { STREAM-START }
+# explicit_document: { DIRECTIVE DOCUMENT-START }
+# implicit_document: FIRST(block_node)
+# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_sequence: { BLOCK-SEQUENCE-START }
+# block_mapping: { BLOCK-MAPPING-START }
+# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
+# indentless_sequence: { ENTRY }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_sequence: { FLOW-SEQUENCE-START }
+# flow_mapping: { FLOW-MAPPING-START }
+# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+
+__all__ = ['Parser', 'ParserError']
+
+from error import MarkedYAMLError
+from tokens import *
+from events import *
+from scanner import *
+
+class ParserError(MarkedYAMLError):
+ pass
+
+class Parser(object):
+ # Since writing a recursive-descendant parser is a straightforward task, we
+ # do not give many comments here.
+
+ DEFAULT_TAGS = {
+ u'!': u'!',
+ u'!!': u'tag:yaml.org,2002:',
+ }
+
+ def __init__(self):
+ self.current_event = None
+ self.yaml_version = None
+ self.tag_handles = {}
+ self.states = []
+ self.marks = []
+ self.state = self.parse_stream_start
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def check_event(self, *choices):
+ # Check the type of the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ if self.current_event is not None:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.current_event, choice):
+ return True
+ return False
+
+ def peek_event(self):
+ # Get the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ return self.current_event
+
+ def get_event(self):
+ # Get the next event and proceed further.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ value = self.current_event
+ self.current_event = None
+ return value
+
+ # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+ # implicit_document ::= block_node DOCUMENT-END*
+ # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+
+ def parse_stream_start(self):
+
+ # Parse the stream start.
+ token = self.get_token()
+ event = StreamStartEvent(token.start_mark, token.end_mark,
+ encoding=token.encoding)
+
+ # Prepare the next state.
+ self.state = self.parse_implicit_document_start
+
+ return event
+
+ def parse_implicit_document_start(self):
+
+ # Parse an implicit document.
+ if not self.check_token(DirectiveToken, DocumentStartToken,
+ StreamEndToken):
+ self.tag_handles = self.DEFAULT_TAGS
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=False)
+
+ # Prepare the next state.
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_block_node
+
+ return event
+
+ else:
+ return self.parse_document_start()
+
+ def parse_document_start(self):
+
+ # Parse any extra document end indicators.
+ while self.check_token(DocumentEndToken):
+ self.get_token()
+
+ # Parse an explicit document.
+ if not self.check_token(StreamEndToken):
+ token = self.peek_token()
+ start_mark = token.start_mark
+ version, tags = self.process_directives()
+ if not self.check_token(DocumentStartToken):
+ raise ParserError(None, None,
+ "expected '<document start>', but found %r"
+ % self.peek_token().id,
+ self.peek_token().start_mark)
+ token = self.get_token()
+ end_mark = token.end_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=True, version=version, tags=tags)
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_document_content
+ else:
+ # Parse the end of the stream.
+ token = self.get_token()
+ event = StreamEndEvent(token.start_mark, token.end_mark)
+ assert not self.states
+ assert not self.marks
+ self.state = None
+ return event
+
+ def parse_document_end(self):
+
+ # Parse the document end.
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ explicit = False
+ if self.check_token(DocumentEndToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ explicit = True
+ event = DocumentEndEvent(start_mark, end_mark,
+ explicit=explicit)
+
+ # Prepare the next state.
+ self.state = self.parse_document_start
+
+ return event
+
+ def parse_document_content(self):
+ if self.check_token(DirectiveToken,
+ DocumentStartToken, DocumentEndToken, StreamEndToken):
+ event = self.process_empty_scalar(self.peek_token().start_mark)
+ self.state = self.states.pop()
+ return event
+ else:
+ return self.parse_block_node()
+
+ def process_directives(self):
+ self.yaml_version = None
+ self.tag_handles = {}
+ while self.check_token(DirectiveToken):
+ token = self.get_token()
+ if token.name == u'YAML':
+ if self.yaml_version is not None:
+ raise ParserError(None, None,
+ "found duplicate YAML directive", token.start_mark)
+ major, minor = token.value
+ if major != 1:
+ raise ParserError(None, None,
+ "found incompatible YAML document (version 1.* is required)",
+ token.start_mark)
+ self.yaml_version = token.value
+ elif token.name == u'TAG':
+ handle, prefix = token.value
+ if handle in self.tag_handles:
+ raise ParserError(None, None,
+ "duplicate tag handle %r" % handle.encode('utf-8'),
+ token.start_mark)
+ self.tag_handles[handle] = prefix
+ if self.tag_handles:
+ value = self.yaml_version, self.tag_handles.copy()
+ else:
+ value = self.yaml_version, None
+ for key in self.DEFAULT_TAGS:
+ if key not in self.tag_handles:
+ self.tag_handles[key] = self.DEFAULT_TAGS[key]
+ return value
+
+ # block_node_or_indentless_sequence ::= ALIAS
+ # | properties (block_content | indentless_block_sequence)?
+ # | block_content
+ # | indentless_block_sequence
+ # block_node ::= ALIAS
+ # | properties block_content?
+ # | block_content
+ # flow_node ::= ALIAS
+ # | properties flow_content?
+ # | flow_content
+ # properties ::= TAG ANCHOR? | ANCHOR TAG?
+ # block_content ::= block_collection | flow_collection | SCALAR
+ # flow_content ::= flow_collection | SCALAR
+ # block_collection ::= block_sequence | block_mapping
+ # flow_collection ::= flow_sequence | flow_mapping
+
+ def parse_block_node(self):
+ return self.parse_node(block=True)
+
+ def parse_flow_node(self):
+ return self.parse_node()
+
+ def parse_block_node_or_indentless_sequence(self):
+ return self.parse_node(block=True, indentless_sequence=True)
+
+ def parse_node(self, block=False, indentless_sequence=False):
+ if self.check_token(AliasToken):
+ token = self.get_token()
+ event = AliasEvent(token.value, token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ else:
+ anchor = None
+ tag = None
+ start_mark = end_mark = tag_mark = None
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ anchor = token.value
+ if self.check_token(TagToken):
+ token = self.get_token()
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ elif self.check_token(TagToken):
+ token = self.get_token()
+ start_mark = tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ anchor = token.value
+ if tag is not None:
+ handle, suffix = tag
+ if handle is not None:
+ if handle not in self.tag_handles:
+ raise ParserError("while parsing a node", start_mark,
+ "found undefined tag handle %r" % handle.encode('utf-8'),
+ tag_mark)
+ tag = self.tag_handles[handle]+suffix
+ else:
+ tag = suffix
+ #if tag == u'!':
+ # raise ParserError("while parsing a node", start_mark,
+ # "found non-specific tag '!'", tag_mark,
+ # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
+ if start_mark is None:
+ start_mark = end_mark = self.peek_token().start_mark
+ event = None
+ implicit = (tag is None or tag == u'!')
+ if indentless_sequence and self.check_token(BlockEntryToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark)
+ self.state = self.parse_indentless_sequence_entry
+ else:
+ if self.check_token(ScalarToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ if (token.plain and tag is None) or tag == u'!':
+ implicit = (True, False)
+ elif tag is None:
+ implicit = (False, True)
+ else:
+ implicit = (False, False)
+ event = ScalarEvent(anchor, tag, implicit, token.value,
+ start_mark, end_mark, style=token.style)
+ self.state = self.states.pop()
+ elif self.check_token(FlowSequenceStartToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_sequence_first_entry
+ elif self.check_token(FlowMappingStartToken):
+ end_mark = self.peek_token().end_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_mapping_first_key
+ elif block and self.check_token(BlockSequenceStartToken):
+ end_mark = self.peek_token().start_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_sequence_first_entry
+ elif block and self.check_token(BlockMappingStartToken):
+ end_mark = self.peek_token().start_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_mapping_first_key
+ elif anchor is not None or tag is not None:
+ # Empty scalars are allowed even if a tag or an anchor is
+ # specified.
+ event = ScalarEvent(anchor, tag, (implicit, False), u'',
+ start_mark, end_mark)
+ self.state = self.states.pop()
+ else:
+ if block:
+ node = 'block'
+ else:
+ node = 'flow'
+ token = self.peek_token()
+ raise ParserError("while parsing a %s node" % node, start_mark,
+ "expected the node content, but found %r" % token.id,
+ token.start_mark)
+ return event
+
+ # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+
+ def parse_block_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_sequence_entry()
+
+ def parse_block_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken, BlockEndToken):
+ self.states.append(self.parse_block_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_block_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block collection", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+
+ def parse_indentless_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken,
+ KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_indentless_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_indentless_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ token = self.peek_token()
+ event = SequenceEndEvent(token.start_mark, token.start_mark)
+ self.state = self.states.pop()
+ return event
+
+ # block_mapping ::= BLOCK-MAPPING_START
+ # ((KEY block_node_or_indentless_sequence?)?
+ # (VALUE block_node_or_indentless_sequence?)?)*
+ # BLOCK-END
+
+ def parse_block_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_mapping_key()
+
+ def parse_block_mapping_key(self):
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_value)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block mapping", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_block_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_key)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_block_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ # flow_sequence ::= FLOW-SEQUENCE-START
+ # (flow_sequence_entry FLOW-ENTRY)*
+ # flow_sequence_entry?
+ # FLOW-SEQUENCE-END
+ # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ #
+ # Note that while production rules for both flow_sequence_entry and
+ # flow_mapping_entry are equal, their interpretations are different.
+ # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
+ # generate an inline mapping (set syntax).
+
+ def parse_flow_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_sequence_entry(first=True)
+
+ def parse_flow_sequence_entry(self, first=False):
+ if not self.check_token(FlowSequenceEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow sequence", self.marks[-1],
+ "expected ',' or ']', but got %r" % token.id, token.start_mark)
+
+ if self.check_token(KeyToken):
+ token = self.peek_token()
+ event = MappingStartEvent(None, None, True,
+ token.start_mark, token.end_mark,
+ flow_style=True)
+ self.state = self.parse_flow_sequence_entry_mapping_key
+ return event
+ elif not self.check_token(FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_sequence_entry_mapping_key(self):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+
+ def parse_flow_sequence_entry_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_end)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_sequence_entry_mapping_end(self):
+ self.state = self.parse_flow_sequence_entry
+ token = self.peek_token()
+ return MappingEndEvent(token.start_mark, token.start_mark)
+
+ # flow_mapping ::= FLOW-MAPPING-START
+ # (flow_mapping_entry FLOW-ENTRY)*
+ # flow_mapping_entry?
+ # FLOW-MAPPING-END
+ # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+ def parse_flow_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_mapping_key(first=True)
+
+ def parse_flow_mapping_key(self, first=False):
+ if not self.check_token(FlowMappingEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow mapping", self.marks[-1],
+ "expected ',' or '}', but got %r" % token.id, token.start_mark)
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ elif not self.check_token(FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_empty_value)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_key)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_mapping_empty_value(self):
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(self.peek_token().start_mark)
+
+ def process_empty_scalar(self, mark):
+ return ScalarEvent(None, None, (True, False), u'', mark, mark)
+
diff --git a/scripts/python-lib/yaml/parser.pyc b/scripts/python-lib/yaml/parser.pyc
new file mode 100644
index 00000000..ad508088
--- /dev/null
+++ b/scripts/python-lib/yaml/parser.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/reader.py b/scripts/python-lib/yaml/reader.py
new file mode 100755
index 00000000..3249e6b9
--- /dev/null
+++ b/scripts/python-lib/yaml/reader.py
@@ -0,0 +1,190 @@
+# This module contains abstractions for the input stream. You don't have to
+# looks further, there are no pretty code.
+#
+# We define two classes here.
+#
+# Mark(source, line, column)
+# It's just a record and its only use is producing nice error messages.
+# Parser does not use it for any other purposes.
+#
+# Reader(source, data)
+# Reader determines the encoding of `data` and converts it to unicode.
+# Reader provides the following methods and attributes:
+# reader.peek(length=1) - return the next `length` characters
+# reader.forward(length=1) - move the current position to `length` characters.
+# reader.index - the number of the current character.
+# reader.line, stream.column - the line and the column of the current character.
+
+__all__ = ['Reader', 'ReaderError']
+
+from error import YAMLError, Mark
+
+import codecs, re
+
+class ReaderError(YAMLError):
+
+ def __init__(self, name, position, character, encoding, reason):
+ self.name = name
+ self.character = character
+ self.position = position
+ self.encoding = encoding
+ self.reason = reason
+
+ def __str__(self):
+ if isinstance(self.character, str):
+ return "'%s' codec can't decode byte #x%02x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.encoding, ord(self.character), self.reason,
+ self.name, self.position)
+ else:
+ return "unacceptable character #x%04x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.character, self.reason,
+ self.name, self.position)
+
+class Reader(object):
+ # Reader:
+ # - determines the data encoding and converts it to unicode,
+ # - checks if characters are in allowed range,
+ # - adds '\0' to the end.
+
+ # Reader accepts
+ # - a `str` object,
+ # - a `unicode` object,
+ # - a file-like object with its `read` method returning `str`,
+ # - a file-like object with its `read` method returning `unicode`.
+
+ # Yeah, it's ugly and slow.
+
+ def __init__(self, stream):
+ self.name = None
+ self.stream = None
+ self.stream_pointer = 0
+ self.eof = True
+ self.buffer = u''
+ self.pointer = 0
+ self.raw_buffer = None
+ self.raw_decode = None
+ self.encoding = None
+ self.index = 0
+ self.line = 0
+ self.column = 0
+ if isinstance(stream, unicode):
+ self.name = "<unicode string>"
+ self.check_printable(stream)
+ self.buffer = stream+u'\0'
+ elif isinstance(stream, str):
+ self.name = "<string>"
+ self.raw_buffer = stream
+ self.determine_encoding()
+ else:
+ self.stream = stream
+ self.name = getattr(stream, 'name', "<file>")
+ self.eof = False
+ self.raw_buffer = ''
+ self.determine_encoding()
+
+ def peek(self, index=0):
+ try:
+ return self.buffer[self.pointer+index]
+ except IndexError:
+ self.update(index+1)
+ return self.buffer[self.pointer+index]
+
+ def prefix(self, length=1):
+ if self.pointer+length >= len(self.buffer):
+ self.update(length)
+ return self.buffer[self.pointer:self.pointer+length]
+
+ def forward(self, length=1):
+ if self.pointer+length+1 >= len(self.buffer):
+ self.update(length+1)
+ while length:
+ ch = self.buffer[self.pointer]
+ self.pointer += 1
+ self.index += 1
+ if ch in u'\n\x85\u2028\u2029' \
+ or (ch == u'\r' and self.buffer[self.pointer] != u'\n'):
+ self.line += 1
+ self.column = 0
+ elif ch != u'\uFEFF':
+ self.column += 1
+ length -= 1
+
+ def get_mark(self):
+ if self.stream is None:
+ return Mark(self.name, self.index, self.line, self.column,
+ self.buffer, self.pointer)
+ else:
+ return Mark(self.name, self.index, self.line, self.column,
+ None, None)
+
+ def determine_encoding(self):
+ while not self.eof and len(self.raw_buffer) < 2:
+ self.update_raw()
+ if not isinstance(self.raw_buffer, unicode):
+ if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
+ self.raw_decode = codecs.utf_16_le_decode
+ self.encoding = 'utf-16-le'
+ elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
+ self.raw_decode = codecs.utf_16_be_decode
+ self.encoding = 'utf-16-be'
+ else:
+ self.raw_decode = codecs.utf_8_decode
+ self.encoding = 'utf-8'
+ self.update(1)
+
+ NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
+ def check_printable(self, data):
+ match = self.NON_PRINTABLE.search(data)
+ if match:
+ character = match.group()
+ position = self.index+(len(self.buffer)-self.pointer)+match.start()
+ raise ReaderError(self.name, position, ord(character),
+ 'unicode', "special characters are not allowed")
+
+ def update(self, length):
+ if self.raw_buffer is None:
+ return
+ self.buffer = self.buffer[self.pointer:]
+ self.pointer = 0
+ while len(self.buffer) < length:
+ if not self.eof:
+ self.update_raw()
+ if self.raw_decode is not None:
+ try:
+ data, converted = self.raw_decode(self.raw_buffer,
+ 'strict', self.eof)
+ except UnicodeDecodeError, exc:
+ character = exc.object[exc.start]
+ if self.stream is not None:
+ position = self.stream_pointer-len(self.raw_buffer)+exc.start
+ else:
+ position = exc.start
+ raise ReaderError(self.name, position, character,
+ exc.encoding, exc.reason)
+ else:
+ data = self.raw_buffer
+ converted = len(data)
+ self.check_printable(data)
+ self.buffer += data
+ self.raw_buffer = self.raw_buffer[converted:]
+ if self.eof:
+ self.buffer += u'\0'
+ self.raw_buffer = None
+ break
+
+ def update_raw(self, size=1024):
+ data = self.stream.read(size)
+ if data:
+ self.raw_buffer += data
+ self.stream_pointer += len(data)
+ else:
+ self.eof = True
+
+#try:
+# import psyco
+# psyco.bind(Reader)
+#except ImportError:
+# pass
+
diff --git a/scripts/python-lib/yaml/reader.pyc b/scripts/python-lib/yaml/reader.pyc
new file mode 100644
index 00000000..4e917e38
--- /dev/null
+++ b/scripts/python-lib/yaml/reader.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/representer.py b/scripts/python-lib/yaml/representer.py
new file mode 100755
index 00000000..5f4fc70d
--- /dev/null
+++ b/scripts/python-lib/yaml/representer.py
@@ -0,0 +1,484 @@
+
+__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
+ 'RepresenterError']
+
+from error import *
+from nodes import *
+
+import datetime
+
+import sys, copy_reg, types
+
+class RepresenterError(YAMLError):
+ pass
+
+class BaseRepresenter(object):
+
+ yaml_representers = {}
+ yaml_multi_representers = {}
+
+ def __init__(self, default_style=None, default_flow_style=None):
+ self.default_style = default_style
+ self.default_flow_style = default_flow_style
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent(self, data):
+ node = self.represent_data(data)
+ self.serialize(node)
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def get_classobj_bases(self, cls):
+ bases = [cls]
+ for base in cls.__bases__:
+ bases.extend(self.get_classobj_bases(base))
+ return bases
+
+ def represent_data(self, data):
+ if self.ignore_aliases(data):
+ self.alias_key = None
+ else:
+ self.alias_key = id(data)
+ if self.alias_key is not None:
+ if self.alias_key in self.represented_objects:
+ node = self.represented_objects[self.alias_key]
+ #if node is None:
+ # raise RepresenterError("recursive objects are not allowed: %r" % data)
+ return node
+ #self.represented_objects[alias_key] = None
+ self.object_keeper.append(data)
+ data_types = type(data).__mro__
+ if type(data) is types.InstanceType:
+ data_types = self.get_classobj_bases(data.__class__)+list(data_types)
+ if data_types[0] in self.yaml_representers:
+ node = self.yaml_representers[data_types[0]](self, data)
+ else:
+ for data_type in data_types:
+ if data_type in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[data_type](self, data)
+ break
+ else:
+ if None in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[None](self, data)
+ elif None in self.yaml_representers:
+ node = self.yaml_representers[None](self, data)
+ else:
+ node = ScalarNode(None, unicode(data))
+ #if alias_key is not None:
+ # self.represented_objects[alias_key] = node
+ return node
+
+ def add_representer(cls, data_type, representer):
+ if not 'yaml_representers' in cls.__dict__:
+ cls.yaml_representers = cls.yaml_representers.copy()
+ cls.yaml_representers[data_type] = representer
+ add_representer = classmethod(add_representer)
+
+ def add_multi_representer(cls, data_type, representer):
+ if not 'yaml_multi_representers' in cls.__dict__:
+ cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
+ cls.yaml_multi_representers[data_type] = representer
+ add_multi_representer = classmethod(add_multi_representer)
+
+ def represent_scalar(self, tag, value, style=None):
+ if style is None:
+ style = self.default_style
+ node = ScalarNode(tag, value, style=style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
+
+ def represent_sequence(self, tag, sequence, flow_style=None):
+ value = []
+ node = SequenceNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ for item in sequence:
+ node_item = self.represent_data(item)
+ if not (isinstance(node_item, ScalarNode) and not node_item.style):
+ best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_mapping(self, tag, mapping, flow_style=None):
+ value = []
+ node = MappingNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ if hasattr(mapping, 'items'):
+ mapping = mapping.items()
+ mapping.sort()
+ for item_key, item_value in mapping:
+ node_key = self.represent_data(item_key)
+ node_value = self.represent_data(item_value)
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def ignore_aliases(self, data):
+ return False
+
+class SafeRepresenter(BaseRepresenter):
+
+ def ignore_aliases(self, data):
+ if data in [None, ()]:
+ return True
+ if isinstance(data, (str, unicode, bool, int, float)):
+ return True
+
+ def represent_none(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:null',
+ u'null')
+
+ def represent_str(self, data):
+ tag = None
+ style = None
+ try:
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ try:
+ data = unicode(data, 'utf-8')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ data = data.encode('base64')
+ tag = u'tag:yaml.org,2002:binary'
+ style = '|'
+ return self.represent_scalar(tag, data, style=style)
+
+ def represent_unicode(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:str', data)
+
+ def represent_bool(self, data):
+ if data:
+ value = u'true'
+ else:
+ value = u'false'
+ return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
+
+ def represent_int(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
+
+ def represent_long(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
+
+ inf_value = 1e300
+ while repr(inf_value) != repr(inf_value*inf_value):
+ inf_value *= inf_value
+
+ def represent_float(self, data):
+ if data != data or (data == 0.0 and data == 1.0):
+ value = u'.nan'
+ elif data == self.inf_value:
+ value = u'.inf'
+ elif data == -self.inf_value:
+ value = u'-.inf'
+ else:
+ value = unicode(repr(data)).lower()
+ # Note that in some cases `repr(data)` represents a float number
+ # without the decimal parts. For instance:
+ # >>> repr(1e17)
+ # '1e17'
+ # Unfortunately, this is not a valid float representation according
+ # to the definition of the `!!float` tag. We fix this by adding
+ # '.0' before the 'e' symbol.
+ if u'.' not in value and u'e' in value:
+ value = value.replace(u'e', u'.0e', 1)
+ return self.represent_scalar(u'tag:yaml.org,2002:float', value)
+
+ def represent_list(self, data):
+ #pairs = (len(data) > 0 and isinstance(data, list))
+ #if pairs:
+ # for item in data:
+ # if not isinstance(item, tuple) or len(item) != 2:
+ # pairs = False
+ # break
+ #if not pairs:
+ return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
+ #value = []
+ #for item_key, item_value in data:
+ # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
+ # [(item_key, item_value)]))
+ #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
+
+ def represent_dict(self, data):
+ return self.represent_mapping(u'tag:yaml.org,2002:map', data)
+
+ def represent_set(self, data):
+ value = {}
+ for key in data:
+ value[key] = None
+ return self.represent_mapping(u'tag:yaml.org,2002:set', value)
+
+ def represent_date(self, data):
+ value = unicode(data.isoformat())
+ return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
+
+ def represent_datetime(self, data):
+ value = unicode(data.isoformat(' '))
+ return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
+
+ def represent_yaml_object(self, tag, data, cls, flow_style=None):
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__.copy()
+ return self.represent_mapping(tag, state, flow_style=flow_style)
+
+ def represent_undefined(self, data):
+ raise RepresenterError("cannot represent an object: %s" % data)
+
+SafeRepresenter.add_representer(type(None),
+ SafeRepresenter.represent_none)
+
+SafeRepresenter.add_representer(str,
+ SafeRepresenter.represent_str)
+
+SafeRepresenter.add_representer(unicode,
+ SafeRepresenter.represent_unicode)
+
+SafeRepresenter.add_representer(bool,
+ SafeRepresenter.represent_bool)
+
+SafeRepresenter.add_representer(int,
+ SafeRepresenter.represent_int)
+
+SafeRepresenter.add_representer(long,
+ SafeRepresenter.represent_long)
+
+SafeRepresenter.add_representer(float,
+ SafeRepresenter.represent_float)
+
+SafeRepresenter.add_representer(list,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(tuple,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(dict,
+ SafeRepresenter.represent_dict)
+
+SafeRepresenter.add_representer(set,
+ SafeRepresenter.represent_set)
+
+SafeRepresenter.add_representer(datetime.date,
+ SafeRepresenter.represent_date)
+
+SafeRepresenter.add_representer(datetime.datetime,
+ SafeRepresenter.represent_datetime)
+
+SafeRepresenter.add_representer(None,
+ SafeRepresenter.represent_undefined)
+
+class Representer(SafeRepresenter):
+
+ def represent_str(self, data):
+ tag = None
+ style = None
+ try:
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ try:
+ data = unicode(data, 'utf-8')
+ tag = u'tag:yaml.org,2002:python/str'
+ except UnicodeDecodeError:
+ data = data.encode('base64')
+ tag = u'tag:yaml.org,2002:binary'
+ style = '|'
+ return self.represent_scalar(tag, data, style=style)
+
+ def represent_unicode(self, data):
+ tag = None
+ try:
+ data.encode('ascii')
+ tag = u'tag:yaml.org,2002:python/unicode'
+ except UnicodeEncodeError:
+ tag = u'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data)
+
+ def represent_long(self, data):
+ tag = u'tag:yaml.org,2002:int'
+ if int(data) is not data:
+ tag = u'tag:yaml.org,2002:python/long'
+ return self.represent_scalar(tag, unicode(data))
+
+ def represent_complex(self, data):
+ if data.imag == 0.0:
+ data = u'%r' % data.real
+ elif data.real == 0.0:
+ data = u'%rj' % data.imag
+ elif data.imag > 0:
+ data = u'%r+%rj' % (data.real, data.imag)
+ else:
+ data = u'%r%rj' % (data.real, data.imag)
+ return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
+
+ def represent_tuple(self, data):
+ return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
+
+ def represent_name(self, data):
+ name = u'%s.%s' % (data.__module__, data.__name__)
+ return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'')
+
+ def represent_module(self, data):
+ return self.represent_scalar(
+ u'tag:yaml.org,2002:python/module:'+data.__name__, u'')
+
+ def represent_instance(self, data):
+ # For instances of classic classes, we use __getinitargs__ and
+ # __getstate__ to serialize the data.
+
+ # If data.__getinitargs__ exists, the object must be reconstructed by
+ # calling cls(**args), where args is a tuple returned by
+ # __getinitargs__. Otherwise, the cls.__init__ method should never be
+ # called and the class instance is created by instantiating a trivial
+ # class and assigning to the instance's __class__ variable.
+
+ # If data.__getstate__ exists, it returns the state of the object.
+ # Otherwise, the state of the object is data.__dict__.
+
+ # We produce either a !!python/object or !!python/object/new node.
+ # If data.__getinitargs__ does not exist and state is a dictionary, we
+ # produce a !!python/object node . Otherwise we produce a
+ # !!python/object/new node.
+
+ cls = data.__class__
+ class_name = u'%s.%s' % (cls.__module__, cls.__name__)
+ args = None
+ state = None
+ if hasattr(data, '__getinitargs__'):
+ args = list(data.__getinitargs__())
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__
+ if args is None and isinstance(state, dict):
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object:'+class_name, state)
+ if isinstance(state, dict) and not state:
+ return self.represent_sequence(
+ u'tag:yaml.org,2002:python/object/new:'+class_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ value['state'] = state
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object/new:'+class_name, value)
+
+ def represent_object(self, data):
+ # We use __reduce__ API to save the data. data.__reduce__ returns
+ # a tuple of length 2-5:
+ # (function, args, state, listitems, dictitems)
+
+ # For reconstructing, we calls function(*args), then set its state,
+ # listitems, and dictitems if they are not None.
+
+ # A special case is when function.__name__ == '__newobj__'. In this
+ # case we create the object with args[0].__new__(*args).
+
+ # Another special case is when __reduce__ returns a string - we don't
+ # support it.
+
+ # We produce a !!python/object, !!python/object/new or
+ # !!python/object/apply node.
+
+ cls = type(data)
+ if cls in copy_reg.dispatch_table:
+ reduce = copy_reg.dispatch_table[cls](data)
+ elif hasattr(data, '__reduce_ex__'):
+ reduce = data.__reduce_ex__(2)
+ elif hasattr(data, '__reduce__'):
+ reduce = data.__reduce__()
+ else:
+ raise RepresenterError("cannot represent object: %r" % data)
+ reduce = (list(reduce)+[None]*5)[:5]
+ function, args, state, listitems, dictitems = reduce
+ args = list(args)
+ if state is None:
+ state = {}
+ if listitems is not None:
+ listitems = list(listitems)
+ if dictitems is not None:
+ dictitems = dict(dictitems)
+ if function.__name__ == '__newobj__':
+ function = args[0]
+ args = args[1:]
+ tag = u'tag:yaml.org,2002:python/object/new:'
+ newobj = True
+ else:
+ tag = u'tag:yaml.org,2002:python/object/apply:'
+ newobj = False
+ function_name = u'%s.%s' % (function.__module__, function.__name__)
+ if not args and not listitems and not dictitems \
+ and isinstance(state, dict) and newobj:
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object:'+function_name, state)
+ if not listitems and not dictitems \
+ and isinstance(state, dict) and not state:
+ return self.represent_sequence(tag+function_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ if state or not isinstance(state, dict):
+ value['state'] = state
+ if listitems:
+ value['listitems'] = listitems
+ if dictitems:
+ value['dictitems'] = dictitems
+ return self.represent_mapping(tag+function_name, value)
+
+Representer.add_representer(str,
+ Representer.represent_str)
+
+Representer.add_representer(unicode,
+ Representer.represent_unicode)
+
+Representer.add_representer(long,
+ Representer.represent_long)
+
+Representer.add_representer(complex,
+ Representer.represent_complex)
+
+Representer.add_representer(tuple,
+ Representer.represent_tuple)
+
+Representer.add_representer(type,
+ Representer.represent_name)
+
+Representer.add_representer(types.ClassType,
+ Representer.represent_name)
+
+Representer.add_representer(types.FunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.BuiltinFunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.ModuleType,
+ Representer.represent_module)
+
+Representer.add_multi_representer(types.InstanceType,
+ Representer.represent_instance)
+
+Representer.add_multi_representer(object,
+ Representer.represent_object)
+
diff --git a/scripts/python-lib/yaml/representer.pyc b/scripts/python-lib/yaml/representer.pyc
new file mode 100644
index 00000000..f6cd56c9
--- /dev/null
+++ b/scripts/python-lib/yaml/representer.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/resolver.py b/scripts/python-lib/yaml/resolver.py
new file mode 100755
index 00000000..6b5ab875
--- /dev/null
+++ b/scripts/python-lib/yaml/resolver.py
@@ -0,0 +1,224 @@
+
+__all__ = ['BaseResolver', 'Resolver']
+
+from error import *
+from nodes import *
+
+import re
+
+class ResolverError(YAMLError):
+ pass
+
+class BaseResolver(object):
+
+ DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
+ DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
+ DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
+
+ yaml_implicit_resolvers = {}
+ yaml_path_resolvers = {}
+
+ def __init__(self):
+ self.resolver_exact_paths = []
+ self.resolver_prefix_paths = []
+
+ def add_implicit_resolver(cls, tag, regexp, first):
+ if not 'yaml_implicit_resolvers' in cls.__dict__:
+ cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
+ if first is None:
+ first = [None]
+ for ch in first:
+ cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+ add_implicit_resolver = classmethod(add_implicit_resolver)
+
+ def add_path_resolver(cls, tag, path, kind=None):
+ # Note: `add_path_resolver` is experimental. The API could be changed.
+ # `new_path` is a pattern that is matched against the path from the
+ # root to the node that is being considered. `node_path` elements are
+ # tuples `(node_check, index_check)`. `node_check` is a node class:
+ # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
+ # matches any kind of a node. `index_check` could be `None`, a boolean
+ # value, a string value, or a number. `None` and `False` match against
+ # any _value_ of sequence and mapping nodes. `True` matches against
+ # any _key_ of a mapping node. A string `index_check` matches against
+ # a mapping value that corresponds to a scalar key which content is
+ # equal to the `index_check` value. An integer `index_check` matches
+ # against a sequence value with the index equal to `index_check`.
+ if not 'yaml_path_resolvers' in cls.__dict__:
+ cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
+ new_path = []
+ for element in path:
+ if isinstance(element, (list, tuple)):
+ if len(element) == 2:
+ node_check, index_check = element
+ elif len(element) == 1:
+ node_check = element[0]
+ index_check = True
+ else:
+ raise ResolverError("Invalid path element: %s" % element)
+ else:
+ node_check = None
+ index_check = element
+ if node_check is str:
+ node_check = ScalarNode
+ elif node_check is list:
+ node_check = SequenceNode
+ elif node_check is dict:
+ node_check = MappingNode
+ elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
+ and not isinstance(node_check, basestring) \
+ and node_check is not None:
+ raise ResolverError("Invalid node checker: %s" % node_check)
+ if not isinstance(index_check, (basestring, int)) \
+ and index_check is not None:
+ raise ResolverError("Invalid index checker: %s" % index_check)
+ new_path.append((node_check, index_check))
+ if kind is str:
+ kind = ScalarNode
+ elif kind is list:
+ kind = SequenceNode
+ elif kind is dict:
+ kind = MappingNode
+ elif kind not in [ScalarNode, SequenceNode, MappingNode] \
+ and kind is not None:
+ raise ResolverError("Invalid node kind: %s" % kind)
+ cls.yaml_path_resolvers[tuple(new_path), kind] = tag
+ add_path_resolver = classmethod(add_path_resolver)
+
+ def descend_resolver(self, current_node, current_index):
+ if not self.yaml_path_resolvers:
+ return
+ exact_paths = {}
+ prefix_paths = []
+ if current_node:
+ depth = len(self.resolver_prefix_paths)
+ for path, kind in self.resolver_prefix_paths[-1]:
+ if self.check_resolver_prefix(depth, path, kind,
+ current_node, current_index):
+ if len(path) > depth:
+ prefix_paths.append((path, kind))
+ else:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ for path, kind in self.yaml_path_resolvers:
+ if not path:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ prefix_paths.append((path, kind))
+ self.resolver_exact_paths.append(exact_paths)
+ self.resolver_prefix_paths.append(prefix_paths)
+
+ def ascend_resolver(self):
+ if not self.yaml_path_resolvers:
+ return
+ self.resolver_exact_paths.pop()
+ self.resolver_prefix_paths.pop()
+
+ def check_resolver_prefix(self, depth, path, kind,
+ current_node, current_index):
+ node_check, index_check = path[depth-1]
+ if isinstance(node_check, basestring):
+ if current_node.tag != node_check:
+ return
+ elif node_check is not None:
+ if not isinstance(current_node, node_check):
+ return
+ if index_check is True and current_index is not None:
+ return
+ if (index_check is False or index_check is None) \
+ and current_index is None:
+ return
+ if isinstance(index_check, basestring):
+ if not (isinstance(current_index, ScalarNode)
+ and index_check == current_index.value):
+ return
+ elif isinstance(index_check, int) and not isinstance(index_check, bool):
+ if index_check != current_index:
+ return
+ return True
+
+ def resolve(self, kind, value, implicit):
+ if kind is ScalarNode and implicit[0]:
+ if value == u'':
+ resolvers = self.yaml_implicit_resolvers.get(u'', [])
+ else:
+ resolvers = self.yaml_implicit_resolvers.get(value[0], [])
+ resolvers += self.yaml_implicit_resolvers.get(None, [])
+ for tag, regexp in resolvers:
+ if regexp.match(value):
+ return tag
+ implicit = implicit[1]
+ if self.yaml_path_resolvers:
+ exact_paths = self.resolver_exact_paths[-1]
+ if kind in exact_paths:
+ return exact_paths[kind]
+ if None in exact_paths:
+ return exact_paths[None]
+ if kind is ScalarNode:
+ return self.DEFAULT_SCALAR_TAG
+ elif kind is SequenceNode:
+ return self.DEFAULT_SEQUENCE_TAG
+ elif kind is MappingNode:
+ return self.DEFAULT_MAPPING_TAG
+
+class Resolver(BaseResolver):
+ pass
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:bool',
+ re.compile(ur'''^(?:yes|Yes|YES|no|No|NO
+ |true|True|TRUE|false|False|FALSE
+ |on|On|ON|off|Off|OFF)$''', re.X),
+ list(u'yYnNtTfFoO'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:float',
+ re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
+ |\.[0-9_]+(?:[eE][-+][0-9]+)?
+ |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
+ |[-+]?\.(?:inf|Inf|INF)
+ |\.(?:nan|NaN|NAN))$''', re.X),
+ list(u'-+0123456789.'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:int',
+ re.compile(ur'''^(?:[-+]?0b[0-1_]+
+ |[-+]?0[0-7_]+
+ |[-+]?(?:0|[1-9][0-9_]*)
+ |[-+]?0x[0-9a-fA-F_]+
+ |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
+ list(u'-+0123456789'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:merge',
+ re.compile(ur'^(?:<<)$'),
+ [u'<'])
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:null',
+ re.compile(ur'''^(?: ~
+ |null|Null|NULL
+ | )$''', re.X),
+ [u'~', u'n', u'N', u''])
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:timestamp',
+ re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
+ |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
+ (?:[Tt]|[ \t]+)[0-9][0-9]?
+ :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
+ (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
+ list(u'0123456789'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:value',
+ re.compile(ur'^(?:=)$'),
+ [u'='])
+
+# The following resolver is only for documentation purposes. It cannot work
+# because plain scalars cannot start with '!', '&', or '*'.
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:yaml',
+ re.compile(ur'^(?:!|&|\*)$'),
+ list(u'!&*'))
+
diff --git a/scripts/python-lib/yaml/resolver.pyc b/scripts/python-lib/yaml/resolver.pyc
new file mode 100644
index 00000000..d776f578
--- /dev/null
+++ b/scripts/python-lib/yaml/resolver.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/scanner.py b/scripts/python-lib/yaml/scanner.py
new file mode 100755
index 00000000..5228fad6
--- /dev/null
+++ b/scripts/python-lib/yaml/scanner.py
@@ -0,0 +1,1457 @@
+
+# Scanner produces tokens of the following types:
+# STREAM-START
+# STREAM-END
+# DIRECTIVE(name, value)
+# DOCUMENT-START
+# DOCUMENT-END
+# BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START
+# BLOCK-END
+# FLOW-SEQUENCE-START
+# FLOW-MAPPING-START
+# FLOW-SEQUENCE-END
+# FLOW-MAPPING-END
+# BLOCK-ENTRY
+# FLOW-ENTRY
+# KEY
+# VALUE
+# ALIAS(value)
+# ANCHOR(value)
+# TAG(value)
+# SCALAR(value, plain, style)
+#
+# Read comments in the Scanner code for more details.
+#
+
+__all__ = ['Scanner', 'ScannerError']
+
+from error import MarkedYAMLError
+from tokens import *
+
+class ScannerError(MarkedYAMLError):
+ pass
+
+class SimpleKey(object):
+ # See below simple keys treatment.
+
+ def __init__(self, token_number, required, index, line, column, mark):
+ self.token_number = token_number
+ self.required = required
+ self.index = index
+ self.line = line
+ self.column = column
+ self.mark = mark
+
+class Scanner(object):
+
+ def __init__(self):
+ """Initialize the scanner."""
+ # It is assumed that Scanner and Reader will have a common descendant.
+ # Reader do the dirty work of checking for BOM and converting the
+ # input data to Unicode. It also adds NUL to the end.
+ #
+ # Reader supports the following methods
+ # self.peek(i=0) # peek the next i-th character
+ # self.prefix(l=1) # peek the next l characters
+ # self.forward(l=1) # read the next l characters and move the pointer.
+
+ # Had we reached the end of the stream?
+ self.done = False
+
+ # The number of unclosed '{' and '['. `flow_level == 0` means block
+ # context.
+ self.flow_level = 0
+
+ # List of processed tokens that are not yet emitted.
+ self.tokens = []
+
+ # Add the STREAM-START token.
+ self.fetch_stream_start()
+
+ # Number of tokens that were emitted through the `get_token` method.
+ self.tokens_taken = 0
+
+ # The current indentation level.
+ self.indent = -1
+
+ # Past indentation levels.
+ self.indents = []
+
+ # Variables related to simple keys treatment.
+
+ # A simple key is a key that is not denoted by the '?' indicator.
+ # Example of simple keys:
+ # ---
+ # block simple key: value
+ # ? not a simple key:
+ # : { flow simple key: value }
+ # We emit the KEY token before all keys, so when we find a potential
+ # simple key, we try to locate the corresponding ':' indicator.
+ # Simple keys should be limited to a single line and 1024 characters.
+
+ # Can a simple key start at the current position? A simple key may
+ # start:
+ # - at the beginning of the line, not counting indentation spaces
+ # (in block context),
+ # - after '{', '[', ',' (in the flow context),
+ # - after '?', ':', '-' (in the block context).
+ # In the block context, this flag also signifies if a block collection
+ # may start at the current position.
+ self.allow_simple_key = True
+
+ # Keep track of possible simple keys. This is a dictionary. The key
+ # is `flow_level`; there can be no more that one possible simple key
+ # for each level. The value is a SimpleKey record:
+ # (token_number, required, index, line, column, mark)
+ # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
+ # '[', or '{' tokens.
+ self.possible_simple_keys = {}
+
+ # Public methods.
+
+ def check_token(self, *choices):
+ # Check if the next token is one of the given types.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.tokens[0], choice):
+ return True
+ return False
+
+ def peek_token(self):
+ # Return the next token, but do not delete if from the queue.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ return self.tokens[0]
+
+ def get_token(self):
+ # Return the next token.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ self.tokens_taken += 1
+ return self.tokens.pop(0)
+
+ # Private methods.
+
+ def need_more_tokens(self):
+ if self.done:
+ return False
+ if not self.tokens:
+ return True
+ # The current token may be a potential simple key, so we
+ # need to look further.
+ self.stale_possible_simple_keys()
+ if self.next_possible_simple_key() == self.tokens_taken:
+ return True
+
+ def fetch_more_tokens(self):
+
+ # Eat whitespaces and comments until we reach the next token.
+ self.scan_to_next_token()
+
+ # Remove obsolete possible simple keys.
+ self.stale_possible_simple_keys()
+
+ # Compare the current indentation and column. It may add some tokens
+ # and decrease the current indentation level.
+ self.unwind_indent(self.column)
+
+ # Peek the next character.
+ ch = self.peek()
+
+ # Is it the end of stream?
+ if ch == u'\0':
+ return self.fetch_stream_end()
+
+ # Is it a directive?
+ if ch == u'%' and self.check_directive():
+ return self.fetch_directive()
+
+ # Is it the document start?
+ if ch == u'-' and self.check_document_start():
+ return self.fetch_document_start()
+
+ # Is it the document end?
+ if ch == u'.' and self.check_document_end():
+ return self.fetch_document_end()
+
+ # TODO: support for BOM within a stream.
+ #if ch == u'\uFEFF':
+ # return self.fetch_bom() <-- issue BOMToken
+
+ # Note: the order of the following checks is NOT significant.
+
+ # Is it the flow sequence start indicator?
+ if ch == u'[':
+ return self.fetch_flow_sequence_start()
+
+ # Is it the flow mapping start indicator?
+ if ch == u'{':
+ return self.fetch_flow_mapping_start()
+
+ # Is it the flow sequence end indicator?
+ if ch == u']':
+ return self.fetch_flow_sequence_end()
+
+ # Is it the flow mapping end indicator?
+ if ch == u'}':
+ return self.fetch_flow_mapping_end()
+
+ # Is it the flow entry indicator?
+ if ch == u',':
+ return self.fetch_flow_entry()
+
+ # Is it the block entry indicator?
+ if ch == u'-' and self.check_block_entry():
+ return self.fetch_block_entry()
+
+ # Is it the key indicator?
+ if ch == u'?' and self.check_key():
+ return self.fetch_key()
+
+ # Is it the value indicator?
+ if ch == u':' and self.check_value():
+ return self.fetch_value()
+
+ # Is it an alias?
+ if ch == u'*':
+ return self.fetch_alias()
+
+ # Is it an anchor?
+ if ch == u'&':
+ return self.fetch_anchor()
+
+ # Is it a tag?
+ if ch == u'!':
+ return self.fetch_tag()
+
+ # Is it a literal scalar?
+ if ch == u'|' and not self.flow_level:
+ return self.fetch_literal()
+
+ # Is it a folded scalar?
+ if ch == u'>' and not self.flow_level:
+ return self.fetch_folded()
+
+ # Is it a single quoted scalar?
+ if ch == u'\'':
+ return self.fetch_single()
+
+ # Is it a double quoted scalar?
+ if ch == u'\"':
+ return self.fetch_double()
+
+ # It must be a plain scalar then.
+ if self.check_plain():
+ return self.fetch_plain()
+
+ # No? It's an error. Let's produce a nice error message.
+ raise ScannerError("while scanning for the next token", None,
+ "found character %r that cannot start any token"
+ % ch.encode('utf-8'), self.get_mark())
+
+ # Simple keys treatment.
+
+ def next_possible_simple_key(self):
+ # Return the number of the nearest possible simple key. Actually we
+ # don't need to loop through the whole dictionary. We may replace it
+ # with the following code:
+ # if not self.possible_simple_keys:
+ # return None
+ # return self.possible_simple_keys[
+ # min(self.possible_simple_keys.keys())].token_number
+ min_token_number = None
+ for level in self.possible_simple_keys:
+ key = self.possible_simple_keys[level]
+ if min_token_number is None or key.token_number < min_token_number:
+ min_token_number = key.token_number
+ return min_token_number
+
+ def stale_possible_simple_keys(self):
+ # Remove entries that are no longer possible simple keys. According to
+ # the YAML specification, simple keys
+ # - should be limited to a single line,
+ # - should be no longer than 1024 characters.
+ # Disabling this procedure will allow simple keys of any length and
+ # height (may cause problems if indentation is broken though).
+ for level in self.possible_simple_keys.keys():
+ key = self.possible_simple_keys[level]
+ if key.line != self.line \
+ or self.index-key.index > 1024:
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not found expected ':'", self.get_mark())
+ del self.possible_simple_keys[level]
+
+ def save_possible_simple_key(self):
+ # The next token may start a simple key. We check if it's possible
+ # and save its position. This function is called for
+ # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
+
+ # Check if a simple key is required at the current position.
+ required = not self.flow_level and self.indent == self.column
+
+ # A simple key is required only if it is the first token in the current
+ # line. Therefore it is always allowed.
+ assert self.allow_simple_key or not required
+
+ # The next token might be a simple key. Let's save it's number and
+ # position.
+ if self.allow_simple_key:
+ self.remove_possible_simple_key()
+ token_number = self.tokens_taken+len(self.tokens)
+ key = SimpleKey(token_number, required,
+ self.index, self.line, self.column, self.get_mark())
+ self.possible_simple_keys[self.flow_level] = key
+
+ def remove_possible_simple_key(self):
+ # Remove the saved possible key position at the current flow level.
+ if self.flow_level in self.possible_simple_keys:
+ key = self.possible_simple_keys[self.flow_level]
+
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not found expected ':'", self.get_mark())
+
+ del self.possible_simple_keys[self.flow_level]
+
+ # Indentation functions.
+
+ def unwind_indent(self, column):
+
+ ## In flow context, tokens should respect indentation.
+ ## Actually the condition should be `self.indent >= column` according to
+ ## the spec. But this condition will prohibit intuitively correct
+ ## constructions such as
+ ## key : {
+ ## }
+ #if self.flow_level and self.indent > column:
+ # raise ScannerError(None, None,
+ # "invalid intendation or unclosed '[' or '{'",
+ # self.get_mark())
+
+ # In the flow context, indentation is ignored. We make the scanner less
+ # restrictive then specification requires.
+ if self.flow_level:
+ return
+
+ # In block context, we may need to issue the BLOCK-END tokens.
+ while self.indent > column:
+ mark = self.get_mark()
+ self.indent = self.indents.pop()
+ self.tokens.append(BlockEndToken(mark, mark))
+
+ def add_indent(self, column):
+ # Check if we need to increase indentation.
+ if self.indent < column:
+ self.indents.append(self.indent)
+ self.indent = column
+ return True
+ return False
+
+ # Fetchers.
+
+ def fetch_stream_start(self):
+ # We always add STREAM-START as the first token and STREAM-END as the
+ # last token.
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-START.
+ self.tokens.append(StreamStartToken(mark, mark,
+ encoding=self.encoding))
+
+
+ def fetch_stream_end(self):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+ self.possible_simple_keys = {}
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-END.
+ self.tokens.append(StreamEndToken(mark, mark))
+
+ # The steam is finished.
+ self.done = True
+
+ def fetch_directive(self):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Scan and add DIRECTIVE.
+ self.tokens.append(self.scan_directive())
+
+ def fetch_document_start(self):
+ self.fetch_document_indicator(DocumentStartToken)
+
+ def fetch_document_end(self):
+ self.fetch_document_indicator(DocumentEndToken)
+
+ def fetch_document_indicator(self, TokenClass):
+
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys. Note that there could not be a block collection
+ # after '---'.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Add DOCUMENT-START or DOCUMENT-END.
+ start_mark = self.get_mark()
+ self.forward(3)
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_start(self):
+ self.fetch_flow_collection_start(FlowSequenceStartToken)
+
+ def fetch_flow_mapping_start(self):
+ self.fetch_flow_collection_start(FlowMappingStartToken)
+
+ def fetch_flow_collection_start(self, TokenClass):
+
+ # '[' and '{' may start a simple key.
+ self.save_possible_simple_key()
+
+ # Increase the flow level.
+ self.flow_level += 1
+
+ # Simple keys are allowed after '[' and '{'.
+ self.allow_simple_key = True
+
+ # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_end(self):
+ self.fetch_flow_collection_end(FlowSequenceEndToken)
+
+ def fetch_flow_mapping_end(self):
+ self.fetch_flow_collection_end(FlowMappingEndToken)
+
+ def fetch_flow_collection_end(self, TokenClass):
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Decrease the flow level.
+ self.flow_level -= 1
+
+ # No simple keys after ']' or '}'.
+ self.allow_simple_key = False
+
+ # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_entry(self):
+
+ # Simple keys are allowed after ','.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add FLOW-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(FlowEntryToken(start_mark, end_mark))
+
+ def fetch_block_entry(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a new entry?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "sequence entries are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-SEQUENCE-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockSequenceStartToken(mark, mark))
+
+ # It's an error for the block entry to occur in the flow context,
+ # but we let the parser detect this.
+ else:
+ pass
+
+ # Simple keys are allowed after '-'.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add BLOCK-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(BlockEntryToken(start_mark, end_mark))
+
+ def fetch_key(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a key (not nessesary a simple)?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping keys are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-MAPPING-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after '?' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add KEY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(KeyToken(start_mark, end_mark))
+
+ def fetch_value(self):
+
+ # Do we determine a simple key?
+ if self.flow_level in self.possible_simple_keys:
+
+ # Add KEY.
+ key = self.possible_simple_keys[self.flow_level]
+ del self.possible_simple_keys[self.flow_level]
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ KeyToken(key.mark, key.mark))
+
+ # If this key starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START.
+ if not self.flow_level:
+ if self.add_indent(key.column):
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ BlockMappingStartToken(key.mark, key.mark))
+
+ # There cannot be two simple keys one after another.
+ self.allow_simple_key = False
+
+ # It must be a part of a complex key.
+ else:
+
+ # Block context needs additional checks.
+ # (Do we really need them? They will be catched by the parser
+ # anyway.)
+ if not self.flow_level:
+
+ # We are allowed to start a complex value if and only if
+ # we can start a simple key.
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping values are not allowed here",
+ self.get_mark())
+
+ # If this value starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START. It will be detected as an error later by
+ # the parser.
+ if not self.flow_level:
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after ':' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add VALUE.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(ValueToken(start_mark, end_mark))
+
+ def fetch_alias(self):
+
+ # ALIAS could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ALIAS.
+ self.allow_simple_key = False
+
+ # Scan and add ALIAS.
+ self.tokens.append(self.scan_anchor(AliasToken))
+
+ def fetch_anchor(self):
+
+ # ANCHOR could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ANCHOR.
+ self.allow_simple_key = False
+
+ # Scan and add ANCHOR.
+ self.tokens.append(self.scan_anchor(AnchorToken))
+
+ def fetch_tag(self):
+
+ # TAG could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after TAG.
+ self.allow_simple_key = False
+
+ # Scan and add TAG.
+ self.tokens.append(self.scan_tag())
+
+ def fetch_literal(self):
+ self.fetch_block_scalar(style='|')
+
+ def fetch_folded(self):
+ self.fetch_block_scalar(style='>')
+
+ def fetch_block_scalar(self, style):
+
+ # A simple key may follow a block scalar.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_block_scalar(style))
+
+ def fetch_single(self):
+ self.fetch_flow_scalar(style='\'')
+
+ def fetch_double(self):
+ self.fetch_flow_scalar(style='"')
+
+ def fetch_flow_scalar(self, style):
+
+ # A flow scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after flow scalars.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_flow_scalar(style))
+
+ def fetch_plain(self):
+
+ # A plain scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after plain scalars. But note that `scan_plain` will
+ # change this flag if the scan is finished at the beginning of the
+ # line.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR. May change `allow_simple_key`.
+ self.tokens.append(self.scan_plain())
+
+ # Checkers.
+
+ def check_directive(self):
+
+ # DIRECTIVE: ^ '%' ...
+ # The '%' indicator is already checked.
+ if self.column == 0:
+ return True
+
+ def check_document_start(self):
+
+ # DOCUMENT-START: ^ '---' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == u'---' \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_document_end(self):
+
+ # DOCUMENT-END: ^ '...' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == u'...' \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_block_entry(self):
+
+ # BLOCK-ENTRY: '-' (' '|'\n')
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_key(self):
+
+ # KEY(flow context): '?'
+ if self.flow_level:
+ return True
+
+ # KEY(block context): '?' (' '|'\n')
+ else:
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_value(self):
+
+ # VALUE(flow context): ':'
+ if self.flow_level:
+ return True
+
+ # VALUE(block context): ':' (' '|'\n')
+ else:
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_plain(self):
+
+ # A plain scalar may start with any non-space character except:
+ # '-', '?', ':', ',', '[', ']', '{', '}',
+ # '#', '&', '*', '!', '|', '>', '\'', '\"',
+ # '%', '@', '`'.
+ #
+ # It may also start with
+ # '-', '?', ':'
+ # if it is followed by a non-space character.
+ #
+ # Note that we limit the last rule to the block context (except the
+ # '-' character) because we want the flow context to be space
+ # independent.
+ ch = self.peek()
+ return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
+ or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029'
+ and (ch == u'-' or (not self.flow_level and ch in u'?:')))
+
+ # Scanners.
+
+ def scan_to_next_token(self):
+ # We ignore spaces, line breaks and comments.
+ # If we find a line break in the block context, we set the flag
+ # `allow_simple_key` on.
+ # The byte order mark is stripped if it's the first character in the
+ # stream. We do not yet support BOM inside the stream as the
+ # specification requires. Any such mark will be considered as a part
+ # of the document.
+ #
+ # TODO: We need to make tab handling rules more sane. A good rule is
+ # Tabs cannot precede tokens
+ # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+ # KEY(block), VALUE(block), BLOCK-ENTRY
+ # So the checking code is
+ # if <TAB>:
+ # self.allow_simple_keys = False
+ # We also need to add the check for `allow_simple_keys == True` to
+ # `unwind_indent` before issuing BLOCK-END.
+ # Scanners for block, flow, and plain scalars need to be modified.
+
+ if self.index == 0 and self.peek() == u'\uFEFF':
+ self.forward()
+ found = False
+ while not found:
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ if self.scan_line_break():
+ if not self.flow_level:
+ self.allow_simple_key = True
+ else:
+ found = True
+
+ def scan_directive(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ self.forward()
+ name = self.scan_directive_name(start_mark)
+ value = None
+ if name == u'YAML':
+ value = self.scan_yaml_directive_value(start_mark)
+ end_mark = self.get_mark()
+ elif name == u'TAG':
+ value = self.scan_tag_directive_value(start_mark)
+ end_mark = self.get_mark()
+ else:
+ end_mark = self.get_mark()
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ self.scan_directive_ignored_line(start_mark)
+ return DirectiveToken(name, value, start_mark, end_mark)
+
+ def scan_directive_name(self, start_mark):
+ # See the specification for details.
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ return value
+
+ def scan_yaml_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ major = self.scan_yaml_directive_number(start_mark)
+ if self.peek() != '.':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or '.', but found %r"
+ % self.peek().encode('utf-8'),
+ self.get_mark())
+ self.forward()
+ minor = self.scan_yaml_directive_number(start_mark)
+ if self.peek() not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or ' ', but found %r"
+ % self.peek().encode('utf-8'),
+ self.get_mark())
+ return (major, minor)
+
+ def scan_yaml_directive_number(self, start_mark):
+ # See the specification for details.
+ ch = self.peek()
+ if not (u'0' <= ch <= u'9'):
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit, but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length = 0
+ while u'0' <= self.peek(length) <= u'9':
+ length += 1
+ value = int(self.prefix(length))
+ self.forward(length)
+ return value
+
+ def scan_tag_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ handle = self.scan_tag_directive_handle(start_mark)
+ while self.peek() == u' ':
+ self.forward()
+ prefix = self.scan_tag_directive_prefix(start_mark)
+ return (handle, prefix)
+
+ def scan_tag_directive_handle(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_handle('directive', start_mark)
+ ch = self.peek()
+ if ch != u' ':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return value
+
+ def scan_tag_directive_prefix(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_uri('directive', start_mark)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return value
+
+ def scan_directive_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ self.scan_line_break()
+
+ def scan_anchor(self, TokenClass):
+ # The specification does not restrict characters for anchors and
+ # aliases. This may lead to problems, for instance, the document:
+ # [ *alias, value ]
+ # can be interpteted in two ways, as
+ # [ "value" ]
+ # and
+ # [ *alias , "value" ]
+ # Therefore we restrict aliases to numbers and ASCII letters.
+ start_mark = self.get_mark()
+ indicator = self.peek()
+ if indicator == u'*':
+ name = 'alias'
+ else:
+ name = 'anchor'
+ self.forward()
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ end_mark = self.get_mark()
+ return TokenClass(value, start_mark, end_mark)
+
+ def scan_tag(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ ch = self.peek(1)
+ if ch == u'<':
+ handle = None
+ self.forward(2)
+ suffix = self.scan_tag_uri('tag', start_mark)
+ if self.peek() != u'>':
+ raise ScannerError("while parsing a tag", start_mark,
+ "expected '>', but found %r" % self.peek().encode('utf-8'),
+ self.get_mark())
+ self.forward()
+ elif ch in u'\0 \t\r\n\x85\u2028\u2029':
+ handle = None
+ suffix = u'!'
+ self.forward()
+ else:
+ length = 1
+ use_handle = False
+ while ch not in u'\0 \r\n\x85\u2028\u2029':
+ if ch == u'!':
+ use_handle = True
+ break
+ length += 1
+ ch = self.peek(length)
+ handle = u'!'
+ if use_handle:
+ handle = self.scan_tag_handle('tag', start_mark)
+ else:
+ handle = u'!'
+ self.forward()
+ suffix = self.scan_tag_uri('tag', start_mark)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a tag", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ value = (handle, suffix)
+ end_mark = self.get_mark()
+ return TagToken(value, start_mark, end_mark)
+
+ def scan_block_scalar(self, style):
+ # See the specification for details.
+
+ if style == '>':
+ folded = True
+ else:
+ folded = False
+
+ chunks = []
+ start_mark = self.get_mark()
+
+ # Scan the header.
+ self.forward()
+ chomping, increment = self.scan_block_scalar_indicators(start_mark)
+ self.scan_block_scalar_ignored_line(start_mark)
+
+ # Determine the indentation level and go to the first non-empty line.
+ min_indent = self.indent+1
+ if min_indent < 1:
+ min_indent = 1
+ if increment is None:
+ breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
+ indent = max(min_indent, max_indent)
+ else:
+ indent = min_indent+increment-1
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ line_break = u''
+
+ # Scan the inner part of the block scalar.
+ while self.column == indent and self.peek() != u'\0':
+ chunks.extend(breaks)
+ leading_non_space = self.peek() not in u' \t'
+ length = 0
+ while self.peek(length) not in u'\0\r\n\x85\u2028\u2029':
+ length += 1
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ line_break = self.scan_line_break()
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ if self.column == indent and self.peek() != u'\0':
+
+ # Unfortunately, folding rules are ambiguous.
+ #
+ # This is the folding according to the specification:
+
+ if folded and line_break == u'\n' \
+ and leading_non_space and self.peek() not in u' \t':
+ if not breaks:
+ chunks.append(u' ')
+ else:
+ chunks.append(line_break)
+
+ # This is Clark Evans's interpretation (also in the spec
+ # examples):
+ #
+ #if folded and line_break == u'\n':
+ # if not breaks:
+ # if self.peek() not in ' \t':
+ # chunks.append(u' ')
+ # else:
+ # chunks.append(line_break)
+ #else:
+ # chunks.append(line_break)
+ else:
+ break
+
+ # Chomp the tail.
+ if chomping is not False:
+ chunks.append(line_break)
+ if chomping is True:
+ chunks.extend(breaks)
+
+ # We are done.
+ return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ def scan_block_scalar_indicators(self, start_mark):
+ # See the specification for details.
+ chomping = None
+ increment = None
+ ch = self.peek()
+ if ch in u'+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch in u'0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ elif ch in u'0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ ch = self.peek()
+ if ch in u'+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected chomping or indentation indicators, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ return chomping, increment
+
+ def scan_block_scalar_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ self.scan_line_break()
+
+ def scan_block_scalar_indentation(self):
+ # See the specification for details.
+ chunks = []
+ max_indent = 0
+ end_mark = self.get_mark()
+ while self.peek() in u' \r\n\x85\u2028\u2029':
+ if self.peek() != u' ':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ else:
+ self.forward()
+ if self.column > max_indent:
+ max_indent = self.column
+ return chunks, max_indent, end_mark
+
+ def scan_block_scalar_breaks(self, indent):
+ # See the specification for details.
+ chunks = []
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == u' ':
+ self.forward()
+ while self.peek() in u'\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == u' ':
+ self.forward()
+ return chunks, end_mark
+
+ def scan_flow_scalar(self, style):
+ # See the specification for details.
+ # Note that we loose indentation rules for quoted scalars. Quoted
+ # scalars don't need to adhere indentation because " and ' clearly
+ # mark the beginning and the end of them. Therefore we are less
+ # restrictive then the specification requires. We only need to check
+ # that document separators are not included in scalars.
+ if style == '"':
+ double = True
+ else:
+ double = False
+ chunks = []
+ start_mark = self.get_mark()
+ quote = self.peek()
+ self.forward()
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ while self.peek() != quote:
+ chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ self.forward()
+ end_mark = self.get_mark()
+ return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ ESCAPE_REPLACEMENTS = {
+ u'0': u'\0',
+ u'a': u'\x07',
+ u'b': u'\x08',
+ u't': u'\x09',
+ u'\t': u'\x09',
+ u'n': u'\x0A',
+ u'v': u'\x0B',
+ u'f': u'\x0C',
+ u'r': u'\x0D',
+ u'e': u'\x1B',
+ u' ': u'\x20',
+ u'\"': u'\"',
+ u'\\': u'\\',
+ u'N': u'\x85',
+ u'_': u'\xA0',
+ u'L': u'\u2028',
+ u'P': u'\u2029',
+ }
+
+ ESCAPE_CODES = {
+ u'x': 2,
+ u'u': 4,
+ u'U': 8,
+ }
+
+ def scan_flow_scalar_non_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ length = 0
+ while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029':
+ length += 1
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ ch = self.peek()
+ if not double and ch == u'\'' and self.peek(1) == u'\'':
+ chunks.append(u'\'')
+ self.forward(2)
+ elif (double and ch == u'\'') or (not double and ch in u'\"\\'):
+ chunks.append(ch)
+ self.forward()
+ elif double and ch == u'\\':
+ self.forward()
+ ch = self.peek()
+ if ch in self.ESCAPE_REPLACEMENTS:
+ chunks.append(self.ESCAPE_REPLACEMENTS[ch])
+ self.forward()
+ elif ch in self.ESCAPE_CODES:
+ length = self.ESCAPE_CODES[ch]
+ self.forward()
+ for k in range(length):
+ if self.peek(k) not in u'0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "expected escape sequence of %d hexdecimal numbers, but found %r" %
+ (length, self.peek(k).encode('utf-8')), self.get_mark())
+ code = int(self.prefix(length), 16)
+ chunks.append(unichr(code))
+ self.forward(length)
+ elif ch in u'\r\n\x85\u2028\u2029':
+ self.scan_line_break()
+ chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
+ else:
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark())
+ else:
+ return chunks
+
+ def scan_flow_scalar_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ length = 0
+ while self.peek(length) in u' \t':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch == u'\0':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected end of stream", self.get_mark())
+ elif ch in u'\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ breaks = self.scan_flow_scalar_breaks(double, start_mark)
+ if line_break != u'\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(u' ')
+ chunks.extend(breaks)
+ else:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_flow_scalar_breaks(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ # Instead of checking indentation, we check for document
+ # separators.
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected document separator", self.get_mark())
+ while self.peek() in u' \t':
+ self.forward()
+ if self.peek() in u'\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ else:
+ return chunks
+
+ def scan_plain(self):
+ # See the specification for details.
+ # We add an additional restriction for the flow context:
+ # plain scalars in the flow context cannot contain ',', ':' and '?'.
+ # We also keep track of the `allow_simple_key` flag here.
+ # Indentation rules are loosed for the flow context.
+ chunks = []
+ start_mark = self.get_mark()
+ end_mark = start_mark
+ indent = self.indent+1
+ # We allow zero indentation for scalars, but then we need to check for
+ # document separators at the beginning of the line.
+ #if indent == 0:
+ # indent = 1
+ spaces = []
+ while True:
+ length = 0
+ if self.peek() == u'#':
+ break
+ while True:
+ ch = self.peek(length)
+ if ch in u'\0 \t\r\n\x85\u2028\u2029' \
+ or (not self.flow_level and ch == u':' and
+ self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \
+ or (self.flow_level and ch in u',:?[]{}'):
+ break
+ length += 1
+ # It's not clear what we should do with ':' in the flow context.
+ if (self.flow_level and ch == u':'
+ and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'):
+ self.forward(length)
+ raise ScannerError("while scanning a plain scalar", start_mark,
+ "found unexpected ':'", self.get_mark(),
+ "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
+ if length == 0:
+ break
+ self.allow_simple_key = False
+ chunks.extend(spaces)
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ end_mark = self.get_mark()
+ spaces = self.scan_plain_spaces(indent, start_mark)
+ if not spaces or self.peek() == u'#' \
+ or (not self.flow_level and self.column < indent):
+ break
+ return ScalarToken(u''.join(chunks), True, start_mark, end_mark)
+
+ def scan_plain_spaces(self, indent, start_mark):
+ # See the specification for details.
+ # The specification is really confusing about tabs in plain scalars.
+ # We just forbid them completely. Do not use tabs in YAML!
+ chunks = []
+ length = 0
+ while self.peek(length) in u' ':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch in u'\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ self.allow_simple_key = True
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return
+ breaks = []
+ while self.peek() in u' \r\n\x85\u2028\u2029':
+ if self.peek() == ' ':
+ self.forward()
+ else:
+ breaks.append(self.scan_line_break())
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return
+ if line_break != u'\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(u' ')
+ chunks.extend(breaks)
+ elif whitespaces:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_tag_handle(self, name, start_mark):
+ # See the specification for details.
+ # For some strange reasons, the specification does not allow '_' in
+ # tag handles. I have allowed it anyway.
+ ch = self.peek()
+ if ch != u'!':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length = 1
+ ch = self.peek(length)
+ if ch != u' ':
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if ch != u'!':
+ self.forward(length)
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length += 1
+ value = self.prefix(length)
+ self.forward(length)
+ return value
+
+ def scan_tag_uri(self, name, start_mark):
+ # See the specification for details.
+ # Note: we do not check if URI is well-formed.
+ chunks = []
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?:@&=+$,_.!~*\'()[]%':
+ if ch == u'%':
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ chunks.append(self.scan_uri_escapes(name, start_mark))
+ else:
+ length += 1
+ ch = self.peek(length)
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ if not chunks:
+ raise ScannerError("while parsing a %s" % name, start_mark,
+ "expected URI, but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return u''.join(chunks)
+
+ def scan_uri_escapes(self, name, start_mark):
+ # See the specification for details.
+ bytes = []
+ mark = self.get_mark()
+ while self.peek() == u'%':
+ self.forward()
+ for k in range(2):
+ if self.peek(k) not in u'0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected URI escape sequence of 2 hexdecimal numbers, but found %r" %
+ (self.peek(k).encode('utf-8')), self.get_mark())
+ bytes.append(chr(int(self.prefix(2), 16)))
+ self.forward(2)
+ try:
+ value = unicode(''.join(bytes), 'utf-8')
+ except UnicodeDecodeError, exc:
+ raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
+ return value
+
+ def scan_line_break(self):
+ # Transforms:
+ # '\r\n' : '\n'
+ # '\r' : '\n'
+ # '\n' : '\n'
+ # '\x85' : '\n'
+ # '\u2028' : '\u2028'
+ # '\u2029 : '\u2029'
+ # default : ''
+ ch = self.peek()
+ if ch in u'\r\n\x85':
+ if self.prefix(2) == u'\r\n':
+ self.forward(2)
+ else:
+ self.forward()
+ return u'\n'
+ elif ch in u'\u2028\u2029':
+ self.forward()
+ return ch
+ return u''
+
+#try:
+# import psyco
+# psyco.bind(Scanner)
+#except ImportError:
+# pass
+
diff --git a/scripts/python-lib/yaml/scanner.pyc b/scripts/python-lib/yaml/scanner.pyc
new file mode 100644
index 00000000..b22af786
--- /dev/null
+++ b/scripts/python-lib/yaml/scanner.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/serializer.py b/scripts/python-lib/yaml/serializer.py
new file mode 100755
index 00000000..0bf1e96d
--- /dev/null
+++ b/scripts/python-lib/yaml/serializer.py
@@ -0,0 +1,111 @@
+
+__all__ = ['Serializer', 'SerializerError']
+
+from error import YAMLError
+from events import *
+from nodes import *
+
+class SerializerError(YAMLError):
+ pass
+
+class Serializer(object):
+
+ ANCHOR_TEMPLATE = u'id%03d'
+
+ def __init__(self, encoding=None,
+ explicit_start=None, explicit_end=None, version=None, tags=None):
+ self.use_encoding = encoding
+ self.use_explicit_start = explicit_start
+ self.use_explicit_end = explicit_end
+ self.use_version = version
+ self.use_tags = tags
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+ self.closed = None
+
+ def open(self):
+ if self.closed is None:
+ self.emit(StreamStartEvent(encoding=self.use_encoding))
+ self.closed = False
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ else:
+ raise SerializerError("serializer is already opened")
+
+ def close(self):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif not self.closed:
+ self.emit(StreamEndEvent())
+ self.closed = True
+
+ #def __del__(self):
+ # self.close()
+
+ def serialize(self, node):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
+ version=self.use_version, tags=self.use_tags))
+ self.anchor_node(node)
+ self.serialize_node(node, None, None)
+ self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+
+ def anchor_node(self, node):
+ if node in self.anchors:
+ if self.anchors[node] is None:
+ self.anchors[node] = self.generate_anchor(node)
+ else:
+ self.anchors[node] = None
+ if isinstance(node, SequenceNode):
+ for item in node.value:
+ self.anchor_node(item)
+ elif isinstance(node, MappingNode):
+ for key, value in node.value:
+ self.anchor_node(key)
+ self.anchor_node(value)
+
+ def generate_anchor(self, node):
+ self.last_anchor_id += 1
+ return self.ANCHOR_TEMPLATE % self.last_anchor_id
+
+ def serialize_node(self, node, parent, index):
+ alias = self.anchors[node]
+ if node in self.serialized_nodes:
+ self.emit(AliasEvent(alias))
+ else:
+ self.serialized_nodes[node] = True
+ self.descend_resolver(parent, index)
+ if isinstance(node, ScalarNode):
+ detected_tag = self.resolve(ScalarNode, node.value, (True, False))
+ default_tag = self.resolve(ScalarNode, node.value, (False, True))
+ implicit = (node.tag == detected_tag), (node.tag == default_tag)
+ self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
+ style=node.style))
+ elif isinstance(node, SequenceNode):
+ implicit = (node.tag
+ == self.resolve(SequenceNode, node.value, True))
+ self.emit(SequenceStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ index = 0
+ for item in node.value:
+ self.serialize_node(item, node, index)
+ index += 1
+ self.emit(SequenceEndEvent())
+ elif isinstance(node, MappingNode):
+ implicit = (node.tag
+ == self.resolve(MappingNode, node.value, True))
+ self.emit(MappingStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ for key, value in node.value:
+ self.serialize_node(key, node, None)
+ self.serialize_node(value, node, key)
+ self.emit(MappingEndEvent())
+ self.ascend_resolver()
+
diff --git a/scripts/python-lib/yaml/serializer.pyc b/scripts/python-lib/yaml/serializer.pyc
new file mode 100644
index 00000000..da4989fa
--- /dev/null
+++ b/scripts/python-lib/yaml/serializer.pyc
Binary files differ
diff --git a/scripts/python-lib/yaml/tokens.py b/scripts/python-lib/yaml/tokens.py
new file mode 100755
index 00000000..4d0b48a3
--- /dev/null
+++ b/scripts/python-lib/yaml/tokens.py
@@ -0,0 +1,104 @@
+
+class Token(object):
+ def __init__(self, start_mark, end_mark):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in self.__dict__
+ if not key.endswith('_mark')]
+ attributes.sort()
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+#class BOMToken(Token):
+# id = '<byte order mark>'
+
+class DirectiveToken(Token):
+ id = '<directive>'
+ def __init__(self, name, value, start_mark, end_mark):
+ self.name = name
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class DocumentStartToken(Token):
+ id = '<document start>'
+
+class DocumentEndToken(Token):
+ id = '<document end>'
+
+class StreamStartToken(Token):
+ id = '<stream start>'
+ def __init__(self, start_mark=None, end_mark=None,
+ encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndToken(Token):
+ id = '<stream end>'
+
+class BlockSequenceStartToken(Token):
+ id = '<block sequence start>'
+
+class BlockMappingStartToken(Token):
+ id = '<block mapping start>'
+
+class BlockEndToken(Token):
+ id = '<block end>'
+
+class FlowSequenceStartToken(Token):
+ id = '['
+
+class FlowMappingStartToken(Token):
+ id = '{'
+
+class FlowSequenceEndToken(Token):
+ id = ']'
+
+class FlowMappingEndToken(Token):
+ id = '}'
+
+class KeyToken(Token):
+ id = '?'
+
+class ValueToken(Token):
+ id = ':'
+
+class BlockEntryToken(Token):
+ id = '-'
+
+class FlowEntryToken(Token):
+ id = ','
+
+class AliasToken(Token):
+ id = '<alias>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class AnchorToken(Token):
+ id = '<anchor>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class TagToken(Token):
+ id = '<tag>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class ScalarToken(Token):
+ id = '<scalar>'
+ def __init__(self, value, plain, start_mark, end_mark, style=None):
+ self.value = value
+ self.plain = plain
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
diff --git a/scripts/python-lib/yaml/tokens.pyc b/scripts/python-lib/yaml/tokens.pyc
new file mode 100644
index 00000000..1e0f5147
--- /dev/null
+++ b/scripts/python-lib/yaml/tokens.pyc
Binary files differ
diff --git a/scripts/stty_r b/scripts/stty_r
new file mode 100755
index 00000000..fdc3366b
--- /dev/null
+++ b/scripts/stty_r
@@ -0,0 +1,2 @@
+#! /bin/bash
+stty 500:5:bf:8a3b:3:1c:8:15:4:0:1:0:11:13:1a:0:12:f:17:16:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0
diff --git a/scripts/t-rex-64 b/scripts/t-rex-64
new file mode 100755
index 00000000..d33cc3e8
--- /dev/null
+++ b/scripts/t-rex-64
@@ -0,0 +1,25 @@
+#! /bin/bash
+./trex-cfg $@
+RESULT=$?
+if [ $RESULT -ne 0 ]; then
+ exit $RESULT
+fi
+
+
+cd $(dirname $0)
+export LD_LIBRARY_PATH=$PWD
+saveterm="$(stty -g)"
+# if we have a new core run optimized trex
+if cat /proc/cpuinfo | grep -q avx ; then
+ ./_$(basename $0) $@
+ if [ $? -eq 132 ]; then
+ echo " WARNING this program is optimized for the new Intel processors. "
+ echo " try the ./t-rex-64-o application that should work for any Intel processor but might be slower. "
+ echo " try to run t-rex-64-o .. "
+ ./_t-rex-64-o $@
+ fi
+else
+ ./_t-rex-64-o $@
+fi
+stty $saveterm
+
diff --git a/scripts/t-rex-64-debug b/scripts/t-rex-64-debug
new file mode 100755
index 00000000..4f3761d9
--- /dev/null
+++ b/scripts/t-rex-64-debug
@@ -0,0 +1,9 @@
+#! /bin/bash
+./trex-cfg
+cd $(dirname $0)
+export LD_LIBRARY_PATH=$PWD
+saveterm="$(stty -g)"
+./_$(basename $0) $@
+stty $saveterm
+
+
diff --git a/scripts/t-rex-64-debug-gdb b/scripts/t-rex-64-debug-gdb
new file mode 100755
index 00000000..087afb71
--- /dev/null
+++ b/scripts/t-rex-64-debug-gdb
@@ -0,0 +1,4 @@
+#! /bin/bash
+export LD_LIBRARY_PATH=`pwd`
+gdb --args ./_t-rex-64-debug $@
+
diff --git a/scripts/t-rex-64-debug-o b/scripts/t-rex-64-debug-o
new file mode 100755
index 00000000..4f3761d9
--- /dev/null
+++ b/scripts/t-rex-64-debug-o
@@ -0,0 +1,9 @@
+#! /bin/bash
+./trex-cfg
+cd $(dirname $0)
+export LD_LIBRARY_PATH=$PWD
+saveterm="$(stty -g)"
+./_$(basename $0) $@
+stty $saveterm
+
+
diff --git a/scripts/t-rex-64-debug-o-gdb b/scripts/t-rex-64-debug-o-gdb
new file mode 100755
index 00000000..629530ae
--- /dev/null
+++ b/scripts/t-rex-64-debug-o-gdb
@@ -0,0 +1,4 @@
+#! /bin/bash
+export LD_LIBRARY_PATH=`pwd`
+gdb --args ./_v-avc-64-debug-o $@
+
diff --git a/scripts/t-rex-64-o b/scripts/t-rex-64-o
new file mode 100755
index 00000000..4f3761d9
--- /dev/null
+++ b/scripts/t-rex-64-o
@@ -0,0 +1,9 @@
+#! /bin/bash
+./trex-cfg
+cd $(dirname $0)
+export LD_LIBRARY_PATH=$PWD
+saveterm="$(stty -g)"
+./_$(basename $0) $@
+stty $saveterm
+
+
diff --git a/scripts/trex-cfg b/scripts/trex-cfg
new file mode 100755
index 00000000..2aebf026
--- /dev/null
+++ b/scripts/trex-cfg
@@ -0,0 +1,57 @@
+#! /bin/bash
+SYS=`uname -r`
+if [ -f /etc/debian_version ]; then
+ OS=debian
+elif [ -f /etc/redhat-release ]; then
+ OS=redhat
+ systemctl stop firewalld.service
+else
+ OS=unknown
+fi
+
+
+if [ -d /mnt/huge ]; then
+ echo >> /dev/null
+else
+ echo "Create huge node"
+ mkdir -p /mnt/huge
+fi
+
+if ! mount | grep hugetlbfs >> /dev/null ; then
+ mount -t hugetlbfs nodev /mnt/huge
+fi
+
+
+for file in /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages /sys/devices/system/node/node1/hugepages/hugepages-2048kB/nr_hugepages ; do
+ if [ -e $file ] ; then
+ if [ "$(cat $file)" != "2048" ] ; then
+ echo 2048 > $file
+ fi
+ fi
+done
+
+if ! lsmod | grep -q igb_uio ; then
+ echo "Load the drivers for the first time "
+ modprobe uio
+ km=ko/$SYS/igb_uio.ko
+ if [ -e $km ] ; then
+ insmod $km
+ else
+ echo "ERROR kernel module is not supported for this OS"
+ echo "Please run the following commands: "
+ echo "\$cd ko/src "
+ echo "\$make "
+ echo "\$make install "
+ echo "\$cd - "
+ echo "and try to run it again "
+ exit 1
+ fi
+fi
+
+# try to bind the ports from the configuration file (new DPDK)
+PARENT_ARGS="$0 $@"
+if ! ./dpdk_setup_ports.py --parent "$PARENT_ARGS"; then
+ exit 1
+fi
+
+
diff --git a/scripts/trex_daemon_server b/scripts/trex_daemon_server
new file mode 100755
index 00000000..3494e303
--- /dev/null
+++ b/scripts/trex_daemon_server
@@ -0,0 +1,25 @@
+#!/usr/bin/python
+
+import os
+import sys
+
+core = 0
+
+if '--core' in sys.argv:
+ try:
+ idx = sys.argv.index('--core')
+ core = int(sys.argv[idx + 1])
+ if core > 31 or core < 0:
+ print "Error: please provide core argument between 0 to 31"
+ exit(-1)
+ del sys.argv[idx:idx+2]
+ except IndexError:
+ print "Error: please make sure core option provided with argument"
+ exit(-1)
+ except ValueError:
+ print "Error: please make sure core option provided with integer argument"
+ exit(-1)
+
+str_argv = ' '.join(sys.argv[1:])
+cmd = "taskset -c {core} python automation/trex_control_plane/server/trex_daemon_server.py {argv}".format(core = core, argv = str_argv)
+os.system(cmd)
diff --git a/scripts/version.txt b/scripts/version.txt
new file mode 100755
index 00000000..ef163be8
--- /dev/null
+++ b/scripts/version.txt
@@ -0,0 +1,4 @@
+
+moved to doc/release_notes.html
+
+
diff --git a/src/SimpleGlob.h b/src/SimpleGlob.h
new file mode 100755
index 00000000..ac57105b
--- /dev/null
+++ b/src/SimpleGlob.h
@@ -0,0 +1,979 @@
+/*! @file SimpleGlob.h
+
+ @version 3.5
+
+ @brief A cross-platform file globbing library providing the ability to
+ expand wildcards in command-line arguments to a list of all matching
+ files. It is designed explicitly to be portable to any platform and has
+ been tested on Windows and Linux. See CSimpleGlobTempl for the class
+ definition.
+
+ @section features FEATURES
+
+ - MIT Licence allows free use in all software (including GPL and
+ commercial)
+ - multi-platform (Windows 95/98/ME/NT/2K/XP, Linux, Unix)
+ - supports most of the standard linux glob() options
+ - recognition of a forward paths as equivalent to a backward slash
+ on Windows. e.g. "c:/path/foo*" is equivalent to "c:\path\foo*".
+ - implemented with only a single C++ header file
+ - char, wchar_t and Windows TCHAR in the same program
+ - complete working examples included
+ - compiles cleanly at warning level 4 (Windows/VC.NET 2003),
+ warning level 3 (Windows/VC6) and -Wall (Linux/gcc)
+
+ @section usage USAGE
+
+ The SimpleGlob class is used by following these steps:
+
+ <ol>
+ <li> Include the SimpleGlob.h header file
+
+ <pre>
+ \#include "SimpleGlob.h"
+ </pre>
+
+ <li> Instantiate a CSimpleGlob object supplying the appropriate flags.
+
+ <pre>
+ @link CSimpleGlobTempl CSimpleGlob @endlink glob(FLAGS);
+ </pre>
+
+ <li> Add all file specifications to the glob class.
+
+ <pre>
+ glob.Add("file*");
+ glob.Add(argc, argv);
+ </pre>
+
+ <li> Process all files with File(), Files() and FileCount()
+
+ <pre>
+ for (int n = 0; n < glob.FileCount(); ++n) {
+ ProcessFile(glob.File(n));
+ }
+ </pre>
+
+ </ol>
+
+ @section licence MIT LICENCE
+
+ The licence text below is the boilerplate "MIT Licence" used from:
+ http://www.opensource.org/licenses/mit-license.php
+
+ Copyright (c) 2006-2007, Brodie Thiesfield
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef INCLUDED_SimpleGlob
+#define INCLUDED_SimpleGlob
+
+/*! @brief The operation of SimpleGlob is fine-tuned via the use of a
+ combination of the following flags.
+
+ The flags may be passed at initialization of the class and used for every
+ filespec added, or alternatively they may optionally be specified in the
+ call to Add() and be different for each filespec.
+
+ @param SG_GLOB_ERR
+ Return upon read error (e.g. directory does not have read permission)
+
+ @param SG_GLOB_MARK
+ Append a slash (backslash in Windows) to every path which corresponds
+ to a directory
+
+ @param SG_GLOB_NOSORT
+ By default, files are returned in sorted into string order. With this
+ flag, no sorting is done. This is not compatible with
+ SG_GLOB_FULLSORT.
+
+ @param SG_GLOB_FULLSORT
+ By default, files are sorted in groups belonging to each filespec that
+ was added. For example if the filespec "b*" was added before the
+ filespec "a*" then the argv array will contain all b* files sorted in
+ order, followed by all a* files sorted in order. If this flag is
+ specified, the entire array will be sorted ignoring the filespec
+ groups.
+
+ @param SG_GLOB_NOCHECK
+ If the pattern doesn't match anything, return the original pattern.
+
+ @param SG_GLOB_TILDE
+ Tilde expansion is carried out (on Unix platforms)
+
+ @param SG_GLOB_ONLYDIR
+ Return only directories which match (not compatible with
+ SG_GLOB_ONLYFILE)
+
+ @param SG_GLOB_ONLYFILE
+ Return only files which match (not compatible with SG_GLOB_ONLYDIR)
+
+ @param SG_GLOB_NODOT
+ Do not return the "." or ".." special directories.
+ */
+enum SG_Flags {
+ SG_GLOB_ERR = 1 << 0,
+ SG_GLOB_MARK = 1 << 1,
+ SG_GLOB_NOSORT = 1 << 2,
+ SG_GLOB_NOCHECK = 1 << 3,
+ SG_GLOB_TILDE = 1 << 4,
+ SG_GLOB_ONLYDIR = 1 << 5,
+ SG_GLOB_ONLYFILE = 1 << 6,
+ SG_GLOB_NODOT = 1 << 7,
+ SG_GLOB_FULLSORT = 1 << 8
+};
+
+/*! @brief Error return codes */
+enum SG_Error {
+ SG_SUCCESS = 0,
+ SG_ERR_NOMATCH = 1,
+ SG_ERR_MEMORY = -1,
+ SG_ERR_FAILURE = -2
+};
+#ifndef MAX_PATH
+# define MAX_PATH 4096
+#endif
+// ---------------------------------------------------------------------------
+// Platform dependent implementations
+
+// if we aren't on Windows and we have ICU available, then enable ICU
+// by default. Define this to 0 to intentially disable it.
+#ifndef SG_HAVE_ICU
+# if !defined(WIN32) && defined(USTRING_H)
+# define SG_HAVE_ICU 1
+# else
+# define SG_HAVE_ICU 0
+# endif
+#endif
+
+// don't include this in documentation as it isn't relevant
+#ifndef DOXYGEN
+
+// on Windows we want to use MBCS aware string functions and mimic the
+// Unix glob functionality. On Unix we just use glob.
+#ifdef WIN32
+# include <mbstring.h>
+# define sg_strchr ::_mbschr
+# define sg_strrchr ::_mbsrchr
+# define sg_strlen ::_mbslen
+# if __STDC_WANT_SECURE_LIB__
+# define sg_strcpy_s(a,n,b) ::_mbscpy_s(a,n,b)
+# else
+# define sg_strcpy_s(a,n,b) ::_mbscpy(a,b)
+# endif
+# define sg_strcmp ::_mbscmp
+# define sg_strcasecmp ::_mbsicmp
+# define SOCHAR_T unsigned char
+#else
+# include <sys/types.h>
+# include <sys/stat.h>
+# include <glob.h>
+# include <limits.h>
+# define sg_strchr ::strchr
+# define sg_strrchr ::strrchr
+# define sg_strlen ::strlen
+# define sg_strcpy_s(a,n,b) ::strcpy(a,b)
+# define sg_strcmp ::strcmp
+# define sg_strcasecmp ::strcasecmp
+# define SOCHAR_T char
+#endif
+
+#include <stdlib.h>
+#include <string.h>
+#include <wchar.h>
+
+// use assertions to test the input data
+#ifdef _DEBUG
+# ifdef _MSC_VER
+# include <crtdbg.h>
+# define SG_ASSERT(b) _ASSERTE(b)
+# else
+# include <assert.h>
+# define SG_ASSERT(b) assert(b)
+# endif
+#else
+# define SG_ASSERT(b)
+#endif
+
+/*! @brief String manipulation functions. */
+class SimpleGlobUtil
+{
+public:
+ static const char * strchr(const char *s, char c) {
+ return (char *) sg_strchr((const SOCHAR_T *)s, c);
+ }
+ static const wchar_t * strchr(const wchar_t *s, wchar_t c) {
+ return ::wcschr(s, c);
+ }
+#if SG_HAVE_ICU
+ static const UChar * strchr(const UChar *s, UChar c) {
+ return ::u_strchr(s, c);
+ }
+#endif
+
+ static const char * strrchr(const char *s, char c) {
+ return (char *) sg_strrchr((const SOCHAR_T *)s, c);
+ }
+ static const wchar_t * strrchr(const wchar_t *s, wchar_t c) {
+ return ::wcsrchr(s, c);
+ }
+#if SG_HAVE_ICU
+ static const UChar * strrchr(const UChar *s, UChar c) {
+ return ::u_strrchr(s, c);
+ }
+#endif
+
+ // Note: char strlen returns number of bytes, not characters
+ static size_t strlen(const char *s) { return ::strlen(s); }
+ static size_t strlen(const wchar_t *s) { return ::wcslen(s); }
+#if SG_HAVE_ICU
+ static size_t strlen(const UChar *s) { return ::u_strlen(s); }
+#endif
+
+ static void strcpy_s(char *dst, size_t n, const char *src) {
+ (void) n;
+ sg_strcpy_s((SOCHAR_T *)dst, n, (const SOCHAR_T *)src);
+ }
+ static void strcpy_s(wchar_t *dst, size_t n, const wchar_t *src) {
+# if __STDC_WANT_SECURE_LIB__
+ ::wcscpy_s(dst, n, src);
+#else
+ (void) n;
+ ::wcscpy(dst, src);
+#endif
+ }
+#if SG_HAVE_ICU
+ static void strcpy_s(UChar *dst, size_t n, const UChar *src) {
+ ::u_strncpy(dst, src, n);
+ }
+#endif
+
+ static int strcmp(const char *s1, const char *s2) {
+ return sg_strcmp((const SOCHAR_T *)s1, (const SOCHAR_T *)s2);
+ }
+ static int strcmp(const wchar_t *s1, const wchar_t *s2) {
+ return ::wcscmp(s1, s2);
+ }
+#if SG_HAVE_ICU
+ static int strcmp(const UChar *s1, const UChar *s2) {
+ return ::u_strcmp(s1, s2);
+ }
+#endif
+
+ static int strcasecmp(const char *s1, const char *s2) {
+ return sg_strcasecmp((const SOCHAR_T *)s1, (const SOCHAR_T *)s2);
+ }
+#if WIN32
+ static int strcasecmp(const wchar_t *s1, const wchar_t *s2) {
+ return ::_wcsicmp(s1, s2);
+ }
+#endif // WIN32
+#if SG_HAVE_ICU
+ static int strcasecmp(const UChar *s1, const UChar *s2) {
+ return u_strcasecmp(s1, s2, 0);
+ }
+#endif
+};
+
+enum SG_FileType {
+ SG_FILETYPE_INVALID,
+ SG_FILETYPE_FILE,
+ SG_FILETYPE_DIR
+};
+
+#ifdef WIN32
+#ifndef INVALID_FILE_ATTRIBUTES
+# define INVALID_FILE_ATTRIBUTES ((uint32_t)-1)
+#endif
+
+#define SG_PATH_CHAR '\\'
+
+/*! @brief Windows glob implementation. */
+template<class SOCHAR>
+struct SimpleGlobBase
+{
+ SimpleGlobBase() : m_hFind(INVALID_HANDLE_VALUE) { }
+
+ int FindFirstFileS(const char * a_pszFileSpec, unsigned int) {
+ m_hFind = FindFirstFileA(a_pszFileSpec, &m_oFindDataA);
+ if (m_hFind != INVALID_HANDLE_VALUE) {
+ return SG_SUCCESS;
+ }
+ uint32_t dwErr = GetLastError();
+ if (dwErr == ERROR_FILE_NOT_FOUND) {
+ return SG_ERR_NOMATCH;
+ }
+ return SG_ERR_FAILURE;
+ }
+ /* int FindFirstFileS(const wchar_t * a_pszFileSpec, unsigned int) {
+ m_hFind = FindFirstFileW(a_pszFileSpec, &m_oFindDataW);
+ if (m_hFind != INVALID_HANDLE_VALUE) {
+ return SG_SUCCESS;
+ }
+ uint32_t dwErr = GetLastError();
+ if (dwErr == ERROR_FILE_NOT_FOUND) {
+ return SG_ERR_NOMATCH;
+ }
+ return SG_ERR_FAILURE;
+ }*/
+
+ bool FindNextFileS(char) {
+ return FindNextFileA(m_hFind, &m_oFindDataA) != FALSE;
+ }
+ /* bool FindNextFileS(wchar_t) {
+ return FindNextFileW(m_hFind, &m_oFindDataW) != FALSE;
+ }*/
+
+ void FindDone() {
+ FindClose(m_hFind);
+ }
+
+ const char * GetFileNameS(char) const {
+ return m_oFindDataA.cFileName;
+ }
+ /*const wchar_t * GetFileNameS(wchar_t) const {
+ return m_oFindDataW.cFileName;
+ }*/
+
+ bool IsDirS(char) const {
+ return GetFileTypeS(m_oFindDataA.dwFileAttributes) == SG_FILETYPE_DIR;
+ }
+ /*bool IsDirS(wchar_t) const {
+ return GetFileTypeS(m_oFindDataW.dwFileAttributes) == SG_FILETYPE_DIR;
+ }*/
+
+ SG_FileType GetFileTypeS(const char * a_pszPath) {
+ return GetFileTypeS(GetFileAttributesA(a_pszPath));
+ }
+ /*SG_FileType GetFileTypeS(const wchar_t * a_pszPath) {
+ return GetFileTypeS(GetFileAttributesW(a_pszPath));
+ }*/
+ SG_FileType GetFileTypeS(uint32_t a_dwAttribs) const {
+ if (a_dwAttribs == INVALID_FILE_ATTRIBUTES) {
+ return SG_FILETYPE_INVALID;
+ }
+ if (a_dwAttribs & FILE_ATTRIBUTE_DIRECTORY) {
+ return SG_FILETYPE_DIR;
+ }
+ return SG_FILETYPE_FILE;
+ }
+typedef struct _FILETIME {
+ uint32_t dwLowDateTime;
+ uint32_t dwHighDateTime;
+} FILETIME;
+
+
+typedef struct _WIN32_FIND_DATAA {
+ uint32_t dwFileAttributes;
+ FILETIME ftCreationTime;
+ FILETIME ftLastAccessTime;
+ FILETIME ftLastWriteTime;
+ uint32_t nFileSizeHigh;
+ uint32_t nFileSizerLow;
+ uint32_t dwReserved0;
+ uint32_t dwReserved1;
+ char cFileName[MAX_PATH];
+ char cAlternateFileName[14];
+} WIN32_FIND_DATAA;
+
+private:
+ void * m_hFind;
+ WIN32_FIND_DATAA m_oFindDataA;
+ WIN32_FIND_DATAA m_oFindDataW;
+};
+
+#else // !WIN32
+
+#define SG_PATH_CHAR '/'
+
+/*! @brief Unix glob implementation. */
+template<class SOCHAR>
+struct SimpleGlobBase
+{
+ SimpleGlobBase() {
+ memset(&m_glob, 0, sizeof(m_glob));
+ m_uiCurr = (size_t)-1;
+ }
+
+ ~SimpleGlobBase() {
+ globfree(&m_glob);
+ }
+
+ void FilePrep() {
+ m_bIsDir = false;
+ size_t len = strlen(m_glob.gl_pathv[m_uiCurr]);
+ if (m_glob.gl_pathv[m_uiCurr][len-1] == '/') {
+ m_bIsDir = true;
+ m_glob.gl_pathv[m_uiCurr][len-1] = 0;
+ }
+ }
+
+ int FindFirstFileS(const char * a_pszFileSpec, unsigned int a_uiFlags) {
+ int nFlags = GLOB_MARK | GLOB_NOSORT;
+ if (a_uiFlags & SG_GLOB_ERR) nFlags |= GLOB_ERR;
+ if (a_uiFlags & SG_GLOB_TILDE) nFlags |= GLOB_TILDE;
+ int rc = glob(a_pszFileSpec, nFlags, NULL, &m_glob);
+ if (rc == GLOB_NOSPACE) return SG_ERR_MEMORY;
+ if (rc == GLOB_ABORTED) return SG_ERR_FAILURE;
+ if (rc == GLOB_NOMATCH) return SG_ERR_NOMATCH;
+ m_uiCurr = 0;
+ FilePrep();
+ return SG_SUCCESS;
+ }
+
+#if SG_HAVE_ICU
+ int FindFirstFileS(const UChar * a_pszFileSpec, unsigned int a_uiFlags) {
+ char buf[PATH_MAX] = { 0 };
+ UErrorCode status = U_ZERO_ERROR;
+ u_strToUTF8(buf, sizeof(buf), NULL, a_pszFileSpec, -1, &status);
+ if (U_FAILURE(status)) return SG_ERR_FAILURE;
+ return FindFirstFileS(buf, a_uiFlags);
+ }
+#endif
+
+ bool FindNextFileS(char) {
+ SG_ASSERT(m_uiCurr != (size_t)-1);
+ if (++m_uiCurr >= m_glob.gl_pathc) {
+ return false;
+ }
+ FilePrep();
+ return true;
+ }
+
+#if SG_HAVE_ICU
+ bool FindNextFileS(UChar) {
+ return FindNextFileS((char)0);
+ }
+#endif
+
+ void FindDone() {
+ globfree(&m_glob);
+ memset(&m_glob, 0, sizeof(m_glob));
+ m_uiCurr = (size_t)-1;
+ }
+
+ const char * GetFileNameS(char) const {
+ SG_ASSERT(m_uiCurr != (size_t)-1);
+ return m_glob.gl_pathv[m_uiCurr];
+ }
+
+#if SG_HAVE_ICU
+ const UChar * GetFileNameS(UChar) const {
+ const char * pszFile = GetFileNameS((char)0);
+ if (!pszFile) return NULL;
+ UErrorCode status = U_ZERO_ERROR;
+ memset(m_szBuf, 0, sizeof(m_szBuf));
+ u_strFromUTF8(m_szBuf, PATH_MAX, NULL, pszFile, -1, &status);
+ if (U_FAILURE(status)) return NULL;
+ return m_szBuf;
+ }
+#endif
+
+ bool IsDirS(char) const {
+ SG_ASSERT(m_uiCurr != (size_t)-1);
+ return m_bIsDir;
+ }
+
+#if SG_HAVE_ICU
+ bool IsDirS(UChar) const {
+ return IsDirS((char)0);
+ }
+#endif
+
+ SG_FileType GetFileTypeS(const char * a_pszPath) const {
+ struct stat sb;
+ if (0 != stat(a_pszPath, &sb)) {
+ return SG_FILETYPE_INVALID;
+ }
+ if (S_ISDIR(sb.st_mode)) {
+ return SG_FILETYPE_DIR;
+ }
+ if (S_ISREG(sb.st_mode)) {
+ return SG_FILETYPE_FILE;
+ }
+ return SG_FILETYPE_INVALID;
+ }
+
+#if SG_HAVE_ICU
+ SG_FileType GetFileTypeS(const UChar * a_pszPath) const {
+ char buf[PATH_MAX] = { 0 };
+ UErrorCode status = U_ZERO_ERROR;
+ u_strToUTF8(buf, sizeof(buf), NULL, a_pszPath, -1, &status);
+ if (U_FAILURE(status)) return SG_FILETYPE_INVALID;
+ return GetFileTypeS(buf);
+ }
+#endif
+
+private:
+ glob_t m_glob;
+ size_t m_uiCurr;
+ bool m_bIsDir;
+#if SG_HAVE_ICU
+ mutable UChar m_szBuf[PATH_MAX];
+#endif
+};
+
+#endif // WIN32
+
+#endif // DOXYGEN
+
+// ---------------------------------------------------------------------------
+// MAIN TEMPLATE CLASS
+// ---------------------------------------------------------------------------
+
+/*! @brief Implementation of the SimpleGlob class */
+template<class SOCHAR>
+class CSimpleGlobTempl : private SimpleGlobBase<SOCHAR>
+{
+public:
+ /*! @brief Initialize the class.
+
+ @param a_uiFlags Combination of SG_GLOB flags.
+ @param a_nReservedSlots Number of slots in the argv array that
+ should be reserved. In the returned array these slots
+ argv[0] ... argv[a_nReservedSlots-1] will be left empty for
+ the caller to fill in.
+ */
+ CSimpleGlobTempl(unsigned int a_uiFlags = 0, int a_nReservedSlots = 0);
+
+ /*! @brief Deallocate all memory buffers. */
+ ~CSimpleGlobTempl();
+
+ /*! @brief Initialize (or re-initialize) the class in preparation for
+ adding new filespecs.
+
+ All existing files are cleared. Note that allocated memory is only
+ deallocated at object destruction.
+
+ @param a_uiFlags Combination of SG_GLOB flags.
+ @param a_nReservedSlots Number of slots in the argv array that
+ should be reserved. In the returned array these slots
+ argv[0] ... argv[a_nReservedSlots-1] will be left empty for
+ the caller to fill in.
+ */
+ int Init(unsigned int a_uiFlags = 0, int a_nReservedSlots = 0);
+
+ /*! @brief Add a new filespec to the glob.
+
+ The filesystem will be immediately scanned for all matching files and
+ directories and they will be added to the glob.
+
+ @param a_pszFileSpec Filespec to add to the glob.
+
+ @return SG_SUCCESS Matching files were added to the glob.
+ @return SG_ERR_NOMATCH Nothing matched the pattern. To ignore this
+ error compare return value to >= SG_SUCCESS.
+ @return SG_ERR_MEMORY Out of memory failure.
+ @return SG_ERR_FAILURE General failure.
+ */
+ int Add(const SOCHAR *a_pszFileSpec);
+
+ /*! @brief Add an array of filespec to the glob.
+
+ The filesystem will be immediately scanned for all matching files and
+ directories in each filespec and they will be added to the glob.
+
+ @param a_nCount Number of filespec in the array.
+ @param a_rgpszFileSpec Array of filespec to add to the glob.
+
+ @return SG_SUCCESS Matching files were added to the glob.
+ @return SG_ERR_NOMATCH Nothing matched the pattern. To ignore this
+ error compare return value to >= SG_SUCCESS.
+ @return SG_ERR_MEMORY Out of memory failure.
+ @return SG_ERR_FAILURE General failure.
+ */
+ int Add(int a_nCount, const SOCHAR * const * a_rgpszFileSpec);
+
+ /*! @brief Return the number of files in the argv array.
+ */
+ inline int FileCount() const { return m_nArgsLen; }
+
+ /*! @brief Return the full argv array. */
+ inline SOCHAR ** Files() {
+ SetArgvArrayType(POINTERS);
+ return m_rgpArgs;
+ }
+
+ /*! @brief Return the a single file. */
+ inline SOCHAR * File(int n) {
+ SG_ASSERT(n >= 0 && n < m_nArgsLen);
+ return Files()[n];
+ }
+
+private:
+ CSimpleGlobTempl(const CSimpleGlobTempl &); // disabled
+ CSimpleGlobTempl & operator=(const CSimpleGlobTempl &); // disabled
+
+ /*! @brief The argv array has it's members stored as either an offset into
+ the string buffer, or as pointers to their string in the buffer. The
+ offsets are used because if the string buffer is dynamically resized,
+ all pointers into that buffer would become invalid.
+ */
+ enum ARG_ARRAY_TYPE { OFFSETS, POINTERS };
+
+ /*! @brief Change the type of data stored in the argv array. */
+ void SetArgvArrayType(ARG_ARRAY_TYPE a_nNewType);
+
+ /*! @brief Add a filename to the array if it passes all requirements. */
+ int AppendName(const SOCHAR *a_pszFileName, bool a_bIsDir);
+
+ /*! @brief Grow the argv array to the required size. */
+ bool GrowArgvArray(int a_nNewLen);
+
+ /*! @brief Grow the string buffer to the required size. */
+ bool GrowStringBuffer(size_t a_uiMinSize);
+
+ /*! @brief Compare two (possible NULL) strings */
+ static int fileSortCompare(const void *a1, const void *a2);
+
+private:
+ unsigned int m_uiFlags;
+ ARG_ARRAY_TYPE m_nArgArrayType; //!< argv is indexes or pointers
+ SOCHAR ** m_rgpArgs; //!< argv
+ int m_nReservedSlots; //!< # client slots in argv array
+ int m_nArgsSize; //!< allocated size of array
+ int m_nArgsLen; //!< used length
+ SOCHAR * m_pBuffer; //!< argv string buffer
+ size_t m_uiBufferSize; //!< allocated size of buffer
+ size_t m_uiBufferLen; //!< used length of buffer
+ SOCHAR m_szPathPrefix[MAX_PATH]; //!< wildcard path prefix
+};
+
+// ---------------------------------------------------------------------------
+// IMPLEMENTATION
+// ---------------------------------------------------------------------------
+
+template<class SOCHAR>
+CSimpleGlobTempl<SOCHAR>::CSimpleGlobTempl(
+ unsigned int a_uiFlags,
+ int a_nReservedSlots
+ )
+{
+ m_rgpArgs = NULL;
+ m_nArgsSize = 0;
+ m_pBuffer = NULL;
+ m_uiBufferSize = 0;
+
+ Init(a_uiFlags, a_nReservedSlots);
+}
+
+template<class SOCHAR>
+CSimpleGlobTempl<SOCHAR>::~CSimpleGlobTempl()
+{
+ if (m_rgpArgs) free(m_rgpArgs);
+ if (m_pBuffer) free(m_pBuffer);
+}
+
+template<class SOCHAR>
+int
+CSimpleGlobTempl<SOCHAR>::Init(
+ unsigned int a_uiFlags,
+ int a_nReservedSlots
+ )
+{
+ m_nArgArrayType = POINTERS;
+ m_uiFlags = a_uiFlags;
+ m_nArgsLen = a_nReservedSlots;
+ m_nReservedSlots = a_nReservedSlots;
+ m_uiBufferLen = 0;
+
+ if (m_nReservedSlots > 0) {
+ if (!GrowArgvArray(m_nReservedSlots)) {
+ return SG_ERR_MEMORY;
+ }
+ for (int n = 0; n < m_nReservedSlots; ++n) {
+ m_rgpArgs[n] = NULL;
+ }
+ }
+
+ return SG_SUCCESS;
+}
+
+template<class SOCHAR>
+int
+CSimpleGlobTempl<SOCHAR>::Add(
+ const SOCHAR *a_pszFileSpec
+ )
+{
+#ifdef WIN32
+ // Windows FindFirst/FindNext recognizes forward slash as the same as
+ // backward slash and follows the directories. We need to do the same
+ // when calculating the prefix and when we have no wildcards.
+ SOCHAR szFileSpec[MAX_PATH];
+ SimpleGlobUtil::strcpy_s(szFileSpec, MAX_PATH, a_pszFileSpec);
+ const SOCHAR * pszPath = SimpleGlobUtil::strchr(szFileSpec, '/');
+ while (pszPath) {
+ szFileSpec[pszPath - szFileSpec] = SG_PATH_CHAR;
+ pszPath = SimpleGlobUtil::strchr(pszPath + 1, '/');
+ }
+ a_pszFileSpec = szFileSpec;
+#endif
+
+ // if this doesn't contain wildcards then we can just add it directly
+ m_szPathPrefix[0] = 0;
+ if (!SimpleGlobUtil::strchr(a_pszFileSpec, '*') &&
+ !SimpleGlobUtil::strchr(a_pszFileSpec, '?'))
+ {
+ SG_FileType nType = GetFileTypeS(a_pszFileSpec);
+ if (nType == SG_FILETYPE_INVALID) {
+ if (m_uiFlags & SG_GLOB_NOCHECK) {
+ return AppendName(a_pszFileSpec, false);
+ }
+ return SG_ERR_NOMATCH;
+ }
+ return AppendName(a_pszFileSpec, nType == SG_FILETYPE_DIR);
+ }
+
+#ifdef WIN32
+ // Windows doesn't return the directory with the filename, so we need to
+ // extract the path from the search string ourselves and prefix it to the
+ // filename we get back.
+ const SOCHAR * pszFilename =
+ SimpleGlobUtil::strrchr(a_pszFileSpec, SG_PATH_CHAR);
+ if (pszFilename) {
+ SimpleGlobUtil::strcpy_s(m_szPathPrefix, MAX_PATH, a_pszFileSpec);
+ m_szPathPrefix[pszFilename - a_pszFileSpec + 1] = 0;
+ }
+#endif
+
+ // search for the first match on the file
+ int rc = FindFirstFileS(a_pszFileSpec, m_uiFlags);
+ if (rc != SG_SUCCESS) {
+ if (rc == SG_ERR_NOMATCH && (m_uiFlags & SG_GLOB_NOCHECK)) {
+ int ok = AppendName(a_pszFileSpec, false);
+ if (ok != SG_SUCCESS) rc = ok;
+ }
+ return rc;
+ }
+
+ // add it and find all subsequent matches
+ int nError, nStartLen = m_nArgsLen;
+ bool bSuccess;
+ do {
+ nError = AppendName(GetFileNameS((SOCHAR)0), IsDirS((SOCHAR)0));
+ bSuccess = FindNextFileS((SOCHAR)0);
+ }
+ while (nError == SG_SUCCESS && bSuccess);
+ SimpleGlobBase<SOCHAR>::FindDone();
+
+ // sort these files if required
+ if (m_nArgsLen > nStartLen && !(m_uiFlags & SG_GLOB_NOSORT)) {
+ if (m_uiFlags & SG_GLOB_FULLSORT) {
+ nStartLen = m_nReservedSlots;
+ }
+ SetArgvArrayType(POINTERS);
+ qsort(
+ m_rgpArgs + nStartLen,
+ m_nArgsLen - nStartLen,
+ sizeof(m_rgpArgs[0]), fileSortCompare);
+ }
+
+ return nError;
+}
+
+template<class SOCHAR>
+int
+CSimpleGlobTempl<SOCHAR>::Add(
+ int a_nCount,
+ const SOCHAR * const * a_rgpszFileSpec
+ )
+{
+ int nResult;
+ for (int n = 0; n < a_nCount; ++n) {
+ nResult = Add(a_rgpszFileSpec[n]);
+ if (nResult != SG_SUCCESS) {
+ return nResult;
+ }
+ }
+ return SG_SUCCESS;
+}
+
+template<class SOCHAR>
+int
+CSimpleGlobTempl<SOCHAR>::AppendName(
+ const SOCHAR * a_pszFileName,
+ bool a_bIsDir
+ )
+{
+ // we need the argv array as offsets in case we resize it
+ SetArgvArrayType(OFFSETS);
+
+ // check for special cases which cause us to ignore this entry
+ if ((m_uiFlags & SG_GLOB_ONLYDIR) && !a_bIsDir) {
+ return SG_SUCCESS;
+ }
+ if ((m_uiFlags & SG_GLOB_ONLYFILE) && a_bIsDir) {
+ return SG_SUCCESS;
+ }
+ if ((m_uiFlags & SG_GLOB_NODOT) && a_bIsDir) {
+ if (a_pszFileName[0] == '.') {
+ if (a_pszFileName[1] == '\0') {
+ return SG_SUCCESS;
+ }
+ if (a_pszFileName[1] == '.' && a_pszFileName[2] == '\0') {
+ return SG_SUCCESS;
+ }
+ }
+ }
+
+ // ensure that we have enough room in the argv array
+ if (!GrowArgvArray(m_nArgsLen + 1)) {
+ return SG_ERR_MEMORY;
+ }
+
+ // ensure that we have enough room in the string buffer (+1 for null)
+ size_t uiPrefixLen = SimpleGlobUtil::strlen(m_szPathPrefix);
+ size_t uiLen = uiPrefixLen + SimpleGlobUtil::strlen(a_pszFileName) + 1;
+ if (a_bIsDir && (m_uiFlags & SG_GLOB_MARK) == SG_GLOB_MARK) {
+ ++uiLen; // need space for the backslash
+ }
+ if (!GrowStringBuffer(m_uiBufferLen + uiLen)) {
+ return SG_ERR_MEMORY;
+ }
+
+ // add this entry. m_uiBufferLen is offset from beginning of buffer.
+ m_rgpArgs[m_nArgsLen++] = (SOCHAR*)m_uiBufferLen;
+ SimpleGlobUtil::strcpy_s(m_pBuffer + m_uiBufferLen,
+ m_uiBufferSize - m_uiBufferLen, m_szPathPrefix);
+ SimpleGlobUtil::strcpy_s(m_pBuffer + m_uiBufferLen + uiPrefixLen,
+ m_uiBufferSize - m_uiBufferLen - uiPrefixLen, a_pszFileName);
+ m_uiBufferLen += uiLen;
+
+ // add the directory slash if desired
+ if (a_bIsDir && (m_uiFlags & SG_GLOB_MARK) == SG_GLOB_MARK) {
+ const static SOCHAR szDirSlash[] = { SG_PATH_CHAR, 0 };
+ SimpleGlobUtil::strcpy_s(m_pBuffer + m_uiBufferLen - 2,
+ m_uiBufferSize - (m_uiBufferLen - 2), szDirSlash);
+ }
+
+ return SG_SUCCESS;
+}
+
+template<class SOCHAR>
+void
+CSimpleGlobTempl<SOCHAR>::SetArgvArrayType(
+ ARG_ARRAY_TYPE a_nNewType
+ )
+{
+ if (m_nArgArrayType == a_nNewType) return;
+ if (a_nNewType == POINTERS) {
+ SG_ASSERT(m_nArgArrayType == OFFSETS);
+ for (int n = 0; n < m_nArgsLen; ++n) {
+ m_rgpArgs[n] = (m_rgpArgs[n] == (SOCHAR*)-1) ?
+ NULL : m_pBuffer + (size_t) m_rgpArgs[n];
+ }
+ }
+ else {
+ SG_ASSERT(a_nNewType == OFFSETS);
+ SG_ASSERT(m_nArgArrayType == POINTERS);
+ for (int n = 0; n < m_nArgsLen; ++n) {
+ m_rgpArgs[n] = (m_rgpArgs[n] == NULL) ?
+ (SOCHAR*) -1 : (SOCHAR*) (m_rgpArgs[n] - m_pBuffer);
+ }
+ }
+ m_nArgArrayType = a_nNewType;
+}
+
+template<class SOCHAR>
+bool
+CSimpleGlobTempl<SOCHAR>::GrowArgvArray(
+ int a_nNewLen
+ )
+{
+ if (a_nNewLen >= m_nArgsSize) {
+ static const int SG_ARGV_INITIAL_SIZE = 32;
+ int nNewSize = (m_nArgsSize > 0) ?
+ m_nArgsSize * 2 : SG_ARGV_INITIAL_SIZE;
+ while (a_nNewLen >= nNewSize) {
+ nNewSize *= 2;
+ }
+ void * pNewBuffer = realloc(m_rgpArgs, nNewSize * sizeof(SOCHAR*));
+ if (!pNewBuffer) return false;
+ m_nArgsSize = nNewSize;
+ m_rgpArgs = (SOCHAR**) pNewBuffer;
+ }
+ return true;
+}
+
+template<class SOCHAR>
+bool
+CSimpleGlobTempl<SOCHAR>::GrowStringBuffer(
+ size_t a_uiMinSize
+ )
+{
+ if (a_uiMinSize >= m_uiBufferSize) {
+ static const int SG_BUFFER_INITIAL_SIZE = 1024;
+ size_t uiNewSize = (m_uiBufferSize > 0) ?
+ m_uiBufferSize * 2 : SG_BUFFER_INITIAL_SIZE;
+ while (a_uiMinSize >= uiNewSize) {
+ uiNewSize *= 2;
+ }
+ void * pNewBuffer = realloc(m_pBuffer, uiNewSize * sizeof(SOCHAR));
+ if (!pNewBuffer) return false;
+ m_uiBufferSize = uiNewSize;
+ m_pBuffer = (SOCHAR*) pNewBuffer;
+ }
+ return true;
+}
+
+template<class SOCHAR>
+int
+CSimpleGlobTempl<SOCHAR>::fileSortCompare(
+ const void *a1,
+ const void *a2
+ )
+{
+ const SOCHAR * s1 = *(const SOCHAR **)a1;
+ const SOCHAR * s2 = *(const SOCHAR **)a2;
+ if (s1 && s2) {
+ return SimpleGlobUtil::strcasecmp(s1, s2);
+ }
+ // NULL sorts first
+ return s1 == s2 ? 0 : (s1 ? 1 : -1);
+}
+
+// ---------------------------------------------------------------------------
+// TYPE DEFINITIONS
+// ---------------------------------------------------------------------------
+
+/*! @brief ASCII/MBCS version of CSimpleGlob */
+typedef CSimpleGlobTempl<char> CSimpleGlobA;
+
+/*! @brief wchar_t version of CSimpleGlob */
+typedef CSimpleGlobTempl<wchar_t> CSimpleGlobW;
+
+#if SG_HAVE_ICU
+/*! @brief UChar version of CSimpleGlob */
+typedef CSimpleGlobTempl<UChar> CSimpleGlobU;
+#endif
+
+#ifdef _UNICODE
+/*! @brief TCHAR version dependent on if _UNICODE is defined */
+# if SG_HAVE_ICU
+# define CSimpleGlob CSimpleGlobU
+# else
+# define CSimpleGlob CSimpleGlobW
+# endif
+#else
+/*! @brief TCHAR version dependent on if _UNICODE is defined */
+# define CSimpleGlob CSimpleGlobA
+#endif
+
+#endif // INCLUDED_SimpleGlob
diff --git a/src/SimpleOpt.h b/src/SimpleOpt.h
new file mode 100755
index 00000000..9ca16c1d
--- /dev/null
+++ b/src/SimpleOpt.h
@@ -0,0 +1,1060 @@
+/*! @file SimpleOpt.h
+
+ @version 3.5
+
+ @brief A cross-platform command line library which can parse almost any
+ of the standard command line formats in use today. It is designed
+ explicitly to be portable to any platform and has been tested on Windows
+ and Linux. See CSimpleOptTempl for the class definition.
+
+ @section features FEATURES
+
+ - MIT Licence allows free use in all software (including GPL
+ and commercial)
+ - multi-platform (Windows 95/98/ME/NT/2K/XP, Linux, Unix)
+ - supports all lengths of option names:
+ <table width="60%">
+ <tr><td width="30%"> -
+ <td>switch character only (e.g. use stdin for input)
+ <tr><td> -o
+ <td>short (single character)
+ <tr><td> -long
+ <td>long (multiple character, single switch character)
+ <tr><td> --longer
+ <td>long (multiple character, multiple switch characters)
+ </table>
+ - supports all types of arguments for options:
+ <table width="60%">
+ <tr><td width="30%"> --option
+ <td>short/long option flag (no argument)
+ <tr><td> --option ARG
+ <td>short/long option with separate required argument
+ <tr><td> --option=ARG
+ <td>short/long option with combined required argument
+ <tr><td> --option[=ARG]
+ <td>short/long option with combined optional argument
+ <tr><td> -oARG
+ <td>short option with combined required argument
+ <tr><td> -o[ARG]
+ <td>short option with combined optional argument
+ </table>
+ - supports options with multiple or variable numbers of arguments:
+ <table width="60%">
+ <tr><td width="30%"> --multi ARG1 ARG2
+ <td>Multiple arguments
+ <tr><td> --multi N ARG-1 ARG-2 ... ARG-N
+ <td>Variable number of arguments
+ </table>
+ - supports case-insensitive option matching on short, long and/or
+ word arguments.
+ - supports options which do not use a switch character. i.e. a special
+ word which is construed as an option.
+ e.g. "foo.exe open /directory/file.txt"
+ - supports clumping of multiple short options (no arguments) in a string
+ e.g. "foo.exe -abcdef file1" <==> "foo.exe -a -b -c -d -e -f file1"
+ - automatic recognition of a single slash as equivalent to a single
+ hyphen on Windows, e.g. "/f FILE" is equivalent to "-f FILE".
+ - file arguments can appear anywhere in the argument list:
+ "foo.exe file1.txt -a ARG file2.txt --flag file3.txt file4.txt"
+ files will be returned to the application in the same order they were
+ supplied on the command line
+ - short-circuit option matching: "--man" will match "--mandate"
+ invalid options can be handled while continuing to parse the command
+ line valid options list can be changed dynamically during command line
+ processing, i.e. accept different options depending on an option
+ supplied earlier in the command line.
+ - implemented with only a single C++ header file
+ - optionally use no C runtime or OS functions
+ - char, wchar_t and Windows TCHAR in the same program
+ - complete working examples included
+ - compiles cleanly at warning level 4 (Windows/VC.NET 2003), warning
+ level 3 (Windows/VC6) and -Wall (Linux/gcc)
+
+ @section usage USAGE
+
+ The SimpleOpt class is used by following these steps:
+
+ <ol>
+ <li> Include the SimpleOpt.h header file
+
+ <pre>
+ \#include "SimpleOpt.h"
+ </pre>
+
+ <li> Define an array of valid options for your program.
+
+<pre>
+@link CSimpleOptTempl::SOption CSimpleOpt::SOption @endlink g_rgOptions[] = {
+ { OPT_FLAG, _T("-a"), SO_NONE }, // "-a"
+ { OPT_FLAG, _T("-b"), SO_NONE }, // "-b"
+ { OPT_ARG, _T("-f"), SO_REQ_SEP }, // "-f ARG"
+ { OPT_HELP, _T("-?"), SO_NONE }, // "-?"
+ { OPT_HELP, _T("--help"), SO_NONE }, // "--help"
+ SO_END_OF_OPTIONS // END
+};
+</pre>
+
+ Note that all options must start with a hyphen even if the slash will
+ be accepted. This is because the slash character is automatically
+ converted into a hyphen to test against the list of options.
+ For example, the following line matches both "-?" and "/?"
+ (on Windows).
+
+ <pre>
+ { OPT_HELP, _T("-?"), SO_NONE }, // "-?"
+ </pre>
+
+ <li> Instantiate a CSimpleOpt object supplying argc, argv and the option
+ table
+
+<pre>
+@link CSimpleOptTempl CSimpleOpt @endlink args(argc, argv, g_rgOptions);
+</pre>
+
+ <li> Process the arguments by calling Next() until it returns false.
+ On each call, first check for an error by calling LastError(), then
+ either handle the error or process the argument.
+
+<pre>
+while (args.Next()) {
+ if (args.LastError() == SO_SUCCESS) {
+ handle option: use OptionId(), OptionText() and OptionArg()
+ }
+ else {
+ handle error: see ESOError enums
+ }
+}
+</pre>
+
+ <li> Process all non-option arguments with File(), Files() and FileCount()
+
+<pre>
+ShowFiles(args.FileCount(), args.Files());
+</pre>
+
+ </ol>
+
+ @section notes NOTES
+
+ - In MBCS mode, this library is guaranteed to work correctly only when
+ all option names use only ASCII characters.
+ - Note that if case-insensitive matching is being used then the first
+ matching option in the argument list will be returned.
+
+ @section licence MIT LICENCE
+
+ The licence text below is the boilerplate "MIT Licence" used from:
+ http://www.opensource.org/licenses/mit-license.php
+
+ Copyright (c) 2006-2007, Brodie Thiesfield
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+/*! @mainpage
+
+ <table>
+ <tr><th>Library <td>SimpleOpt
+ <tr><th>Author <td>Brodie Thiesfield [code at jellycan dot com]
+ <tr><th>Source <td>http://code.jellycan.com/simpleopt/
+ </table>
+
+ @section SimpleOpt SimpleOpt
+
+ A cross-platform library providing a simple method to parse almost any of
+ the standard command-line formats in use today.
+
+ See the @link SimpleOpt.h SimpleOpt @endlink documentation for full
+ details.
+
+ @section SimpleGlob SimpleGlob
+
+ A cross-platform file globbing library providing the ability to
+ expand wildcards in command-line arguments to a list of all matching
+ files.
+
+ See the @link SimpleGlob.h SimpleGlob @endlink documentation for full
+ details.
+*/
+
+#ifndef INCLUDED_SimpleOpt
+#define INCLUDED_SimpleOpt
+
+// Default the max arguments to a fixed value. If you want to be able to
+// handle any number of arguments, then predefine this to 0 and it will
+// use an internal dynamically allocated buffer instead.
+#ifdef SO_MAX_ARGS
+# define SO_STATICBUF SO_MAX_ARGS
+#else
+# include <stdlib.h> // malloc, free
+# include <string.h> // memcpy
+# define SO_STATICBUF 50
+#endif
+
+//! Error values
+typedef enum _ESOError
+{
+ //! No error
+ SO_SUCCESS = 0,
+
+ /*! It looks like an option (it starts with a switch character), but
+ it isn't registered in the option table. */
+ SO_OPT_INVALID = -1,
+
+ /*! Multiple options matched the supplied option text.
+ Only returned when NOT using SO_O_EXACT. */
+ SO_OPT_MULTIPLE = -2,
+
+ /*! Option doesn't take an argument, but a combined argument was
+ supplied. */
+ SO_ARG_INVALID = -3,
+
+ /*! SO_REQ_CMB style-argument was supplied to a SO_REQ_SEP option
+ Only returned when using SO_O_PEDANTIC. */
+ SO_ARG_INVALID_TYPE = -4,
+
+ //! Required argument was not supplied
+ SO_ARG_MISSING = -5,
+
+ /*! Option argument looks like another option.
+ Only returned when NOT using SO_O_NOERR. */
+ SO_ARG_INVALID_DATA = -6
+} ESOError;
+
+//! Option flags
+enum _ESOFlags
+{
+ /*! Disallow partial matching of option names */
+ SO_O_EXACT = 0x0001,
+
+ /*! Disallow use of slash as an option marker on Windows.
+ Un*x only ever recognizes a hyphen. */
+ SO_O_NOSLASH = 0x0002,
+
+ /*! Permit arguments on single letter options with no equals sign.
+ e.g. -oARG or -o[ARG] */
+ SO_O_SHORTARG = 0x0004,
+
+ /*! Permit single character options to be clumped into a single
+ option string. e.g. "-a -b -c" <==> "-abc" */
+ SO_O_CLUMP = 0x0008,
+
+ /*! Process the entire argv array for options, including the
+ argv[0] entry. */
+ SO_O_USEALL = 0x0010,
+
+ /*! Do not generate an error for invalid options. errors for missing
+ arguments will still be generated. invalid options will be
+ treated as files. invalid options in clumps will be silently
+ ignored. */
+ SO_O_NOERR = 0x0020,
+
+ /*! Validate argument type pedantically. Return an error when a
+ separated argument "-opt arg" is supplied by the user as a
+ combined argument "-opt=arg". By default this is not considered
+ an error. */
+ SO_O_PEDANTIC = 0x0040,
+
+ /*! Case-insensitive comparisons for short arguments */
+ SO_O_ICASE_SHORT = 0x0100,
+
+ /*! Case-insensitive comparisons for long arguments */
+ SO_O_ICASE_LONG = 0x0200,
+
+ /*! Case-insensitive comparisons for word arguments
+ i.e. arguments without any hyphens at the start. */
+ SO_O_ICASE_WORD = 0x0400,
+
+ /*! Case-insensitive comparisons for all arg types */
+ SO_O_ICASE = 0x0700
+};
+
+/*! Types of arguments that options may have. Note that some of the _ESOFlags
+ are not compatible with all argument types. SO_O_SHORTARG requires that
+ relevant options use either SO_REQ_CMB or SO_OPT. SO_O_CLUMP requires
+ that relevant options use only SO_NONE.
+ */
+typedef enum _ESOArgType {
+ /*! No argument. Just the option flags.
+ e.g. -o --opt */
+ SO_NONE,
+
+ /*! Required separate argument.
+ e.g. -o ARG --opt ARG */
+ SO_REQ_SEP,
+
+ /*! Required combined argument.
+ e.g. -oARG -o=ARG --opt=ARG */
+ SO_REQ_CMB,
+
+ /*! Optional combined argument.
+ e.g. -o[ARG] -o[=ARG] --opt[=ARG] */
+ SO_OPT,
+
+ /*! Multiple separate arguments. The actual number of arguments is
+ determined programatically at the time the argument is processed.
+ e.g. -o N ARG1 ARG2 ... ARGN --opt N ARG1 ARG2 ... ARGN */
+ SO_MULTI
+} ESOArgType;
+
+//! this option definition must be the last entry in the table
+#define SO_END_OF_OPTIONS { -1, NULL, SO_NONE }
+
+#ifdef _DEBUG
+# ifdef _MSC_VER
+# include <crtdbg.h>
+# define SO_ASSERT(b) _ASSERTE(b)
+# else
+# include <assert.h>
+# define SO_ASSERT(b) assert(b)
+# endif
+#else
+# define SO_ASSERT(b) //!< assertion used to test input data
+#endif
+
+// ---------------------------------------------------------------------------
+// MAIN TEMPLATE CLASS
+// ---------------------------------------------------------------------------
+
+/*! @brief Implementation of the SimpleOpt class */
+template<class SOCHAR>
+class CSimpleOptTempl
+{
+public:
+ /*! @brief Structure used to define all known options. */
+ struct SOption {
+ /*! ID to return for this flag. Optional but must be >= 0 */
+ int nId;
+
+ /*! arg string to search for, e.g. "open", "-", "-f", "--file"
+ Note that on Windows the slash option marker will be converted
+ to a hyphen so that "-f" will also match "/f". */
+ const SOCHAR * pszArg;
+
+ /*! type of argument accepted by this option */
+ ESOArgType nArgType;
+ };
+
+ /*! @brief Initialize the class. Init() must be called later. */
+ CSimpleOptTempl()
+ : m_rgShuffleBuf(NULL)
+ {
+ Init(0, NULL, NULL, 0);
+ }
+
+ /*! @brief Initialize the class in preparation for use. */
+ CSimpleOptTempl(
+ int argc,
+ SOCHAR * argv[],
+ const SOption * a_rgOptions,
+ int a_nFlags = 0
+ )
+ : m_rgShuffleBuf(NULL)
+ {
+ Init(argc, argv, a_rgOptions, a_nFlags);
+ }
+
+#ifndef SO_MAX_ARGS
+ /*! @brief Deallocate any allocated memory. */
+ ~CSimpleOptTempl() { if (m_rgShuffleBuf) free(m_rgShuffleBuf); }
+#endif
+
+ /*! @brief Initialize the class in preparation for calling Next.
+
+ The table of options pointed to by a_rgOptions does not need to be
+ valid at the time that Init() is called. However on every call to
+ Next() the table pointed to must be a valid options table with the
+ last valid entry set to SO_END_OF_OPTIONS.
+
+ NOTE: the array pointed to by a_argv will be modified by this
+ class and must not be used or modified outside of member calls to
+ this class.
+
+ @param a_argc Argument array size
+ @param a_argv Argument array
+ @param a_rgOptions Valid option array
+ @param a_nFlags Optional flags to modify the processing of
+ the arguments
+
+ @return true Successful
+ @return false if SO_MAX_ARGC > 0: Too many arguments
+ if SO_MAX_ARGC == 0: Memory allocation failure
+ */
+ bool Init(
+ int a_argc,
+ SOCHAR * a_argv[],
+ const SOption * a_rgOptions,
+ int a_nFlags = 0
+ );
+
+ /*! @brief Change the current options table during option parsing.
+
+ @param a_rgOptions Valid option array
+ */
+ inline void SetOptions(const SOption * a_rgOptions) {
+ m_rgOptions = a_rgOptions;
+ }
+
+ /*! @brief Change the current flags during option parsing.
+
+ Note that changing the SO_O_USEALL flag here will have no affect.
+ It must be set using Init() or the constructor.
+
+ @param a_nFlags Flags to modify the processing of the arguments
+ */
+ inline void SetFlags(int a_nFlags) { m_nFlags = a_nFlags; }
+
+ /*! @brief Query if a particular flag is set */
+ inline bool HasFlag(int a_nFlag) const {
+ return (m_nFlags & a_nFlag) == a_nFlag;
+ }
+
+ /*! @brief Advance to the next option if available.
+
+ When all options have been processed it will return false. When true
+ has been returned, you must check for an invalid or unrecognized
+ option using the LastError() method. This will be return an error
+ value other than SO_SUCCESS on an error. All standard data
+ (e.g. OptionText(), OptionArg(), OptionId(), etc) will be available
+ depending on the error.
+
+ After all options have been processed, the remaining files from the
+ command line can be processed in same order as they were passed to
+ the program.
+
+ @return true option or error available for processing
+ @return false all options have been processed
+ */
+ bool Next();
+
+ /*! Stops processing of the command line and returns all remaining
+ arguments as files. The next call to Next() will return false.
+ */
+ void Stop();
+
+ /*! @brief Return the last error that occurred.
+
+ This function must always be called before processing the current
+ option. This function is available only when Next() has returned true.
+ */
+ inline ESOError LastError() const { return m_nLastError; }
+
+ /*! @brief Return the nId value from the options array for the current
+ option.
+
+ This function is available only when Next() has returned true.
+ */
+ inline int OptionId() const { return m_nOptionId; }
+
+ /*! @brief Return the pszArg from the options array for the current
+ option.
+
+ This function is available only when Next() has returned true.
+ */
+ inline const SOCHAR * OptionText() const { return m_pszOptionText; }
+
+ /*! @brief Return the argument for the current option where one exists.
+
+ If there is no argument for the option, this will return NULL.
+ This function is available only when Next() has returned true.
+ */
+ inline SOCHAR * OptionArg() const { return m_pszOptionArg; }
+
+ /*! @brief Validate and return the desired number of arguments.
+
+ This is only valid when OptionId() has return the ID of an option
+ that is registered as SO_MULTI. It may be called multiple times
+ each time returning the desired number of arguments. Previously
+ returned argument pointers are remain valid.
+
+ If an error occurs during processing, NULL will be returned and
+ the error will be available via LastError().
+
+ @param n Number of arguments to return.
+ */
+ SOCHAR ** MultiArg(int n);
+
+ /*! @brief Returned the number of entries in the Files() array.
+
+ After Next() has returned false, this will be the list of files (or
+ otherwise unprocessed arguments).
+ */
+ inline int FileCount() const { return m_argc - m_nLastArg; }
+
+ /*! @brief Return the specified file argument.
+
+ @param n Index of the file to return. This must be between 0
+ and FileCount() - 1;
+ */
+ inline SOCHAR * File(int n) const {
+ SO_ASSERT(n >= 0 && n < FileCount());
+ return m_argv[m_nLastArg + n];
+ }
+
+ /*! @brief Return the array of files. */
+ inline SOCHAR ** Files() const { return &m_argv[m_nLastArg]; }
+
+private:
+ CSimpleOptTempl(const CSimpleOptTempl &); // disabled
+ CSimpleOptTempl & operator=(const CSimpleOptTempl &); // disabled
+
+ SOCHAR PrepareArg(SOCHAR * a_pszString) const;
+ bool NextClumped();
+ void ShuffleArg(int a_nStartIdx, int a_nCount);
+ int LookupOption(const SOCHAR * a_pszOption) const;
+ int CalcMatch(const SOCHAR *a_pszSource, const SOCHAR *a_pszTest) const;
+
+ // Find the '=' character within a string.
+ inline SOCHAR * FindEquals(SOCHAR *s) const {
+ while (*s && *s != (SOCHAR)'=') ++s;
+ return *s ? s : NULL;
+ }
+ bool IsEqual(SOCHAR a_cLeft, SOCHAR a_cRight, int a_nArgType) const;
+
+ inline void Copy(SOCHAR ** ppDst, SOCHAR ** ppSrc, int nCount) const {
+#ifdef SO_MAX_ARGS
+ // keep our promise of no CLIB usage
+ while (nCount-- > 0) *ppDst++ = *ppSrc++;
+#else
+ memcpy(ppDst, ppSrc, nCount * sizeof(SOCHAR*));
+#endif
+ }
+
+private:
+ const SOption * m_rgOptions; //!< pointer to options table
+ int m_nFlags; //!< flags
+ int m_nOptionIdx; //!< current argv option index
+ int m_nOptionId; //!< id of current option (-1 = invalid)
+ int m_nNextOption; //!< index of next option
+ int m_nLastArg; //!< last argument, after this are files
+ int m_argc; //!< argc to process
+ SOCHAR ** m_argv; //!< argv
+ const SOCHAR * m_pszOptionText; //!< curr option text, e.g. "-f"
+ SOCHAR * m_pszOptionArg; //!< curr option arg, e.g. "c:\file.txt"
+ SOCHAR * m_pszClump; //!< clumped single character options
+ SOCHAR m_szShort[3]; //!< temp for clump and combined args
+ ESOError m_nLastError; //!< error status from the last call
+ SOCHAR ** m_rgShuffleBuf; //!< shuffle buffer for large argc
+};
+
+// ---------------------------------------------------------------------------
+// IMPLEMENTATION
+// ---------------------------------------------------------------------------
+
+template<class SOCHAR>
+bool
+CSimpleOptTempl<SOCHAR>::Init(
+ int a_argc,
+ SOCHAR * a_argv[],
+ const SOption * a_rgOptions,
+ int a_nFlags
+ )
+{
+ m_argc = a_argc;
+ m_nLastArg = a_argc;
+ m_argv = a_argv;
+ m_rgOptions = a_rgOptions;
+ m_nLastError = SO_SUCCESS;
+ m_nOptionIdx = 0;
+ m_nOptionId = -1;
+ m_pszOptionText = NULL;
+ m_pszOptionArg = NULL;
+ m_nNextOption = (a_nFlags & SO_O_USEALL) ? 0 : 1;
+ m_szShort[0] = (SOCHAR)'-';
+ m_szShort[2] = (SOCHAR)'\0';
+ m_nFlags = a_nFlags;
+ m_pszClump = NULL;
+
+#ifdef SO_MAX_ARGS
+ if (m_argc > SO_MAX_ARGS) {
+ m_nLastError = SO_ARG_INVALID_DATA;
+ m_nLastArg = 0;
+ return false;
+ }
+#else
+ if (m_rgShuffleBuf) {
+ free(m_rgShuffleBuf);
+ }
+ if (m_argc > SO_STATICBUF) {
+ m_rgShuffleBuf = (SOCHAR**) malloc(sizeof(SOCHAR*) * m_argc);
+ if (!m_rgShuffleBuf) {
+ return false;
+ }
+ }
+#endif
+
+ return true;
+}
+
+template<class SOCHAR>
+bool
+CSimpleOptTempl<SOCHAR>::Next()
+{
+#ifdef SO_MAX_ARGS
+ if (m_argc > SO_MAX_ARGS) {
+ SO_ASSERT(!"Too many args! Check the return value of Init()!");
+ return false;
+ }
+#endif
+
+ // process a clumped option string if appropriate
+ if (m_pszClump && *m_pszClump) {
+ // silently discard invalid clumped option
+ bool bIsValid = NextClumped();
+ while (*m_pszClump && !bIsValid && HasFlag(SO_O_NOERR)) {
+ bIsValid = NextClumped();
+ }
+
+ // return this option if valid or we are returning errors
+ if (bIsValid || !HasFlag(SO_O_NOERR)) {
+ return true;
+ }
+ }
+ SO_ASSERT(!m_pszClump || !*m_pszClump);
+ m_pszClump = NULL;
+
+ // init for the next option
+ m_nOptionIdx = m_nNextOption;
+ m_nOptionId = -1;
+ m_pszOptionText = NULL;
+ m_pszOptionArg = NULL;
+ m_nLastError = SO_SUCCESS;
+
+ // find the next option
+ SOCHAR cFirst;
+ int nTableIdx = -1;
+ int nOptIdx = m_nOptionIdx;
+ while (nTableIdx < 0 && nOptIdx < m_nLastArg) {
+ SOCHAR * pszArg = m_argv[nOptIdx];
+ m_pszOptionArg = NULL;
+
+ // find this option in the options table
+ cFirst = PrepareArg(pszArg);
+ if (pszArg[0] == (SOCHAR)'-') {
+ // find any combined argument string and remove equals sign
+ m_pszOptionArg = FindEquals(pszArg);
+ if (m_pszOptionArg) {
+ *m_pszOptionArg++ = (SOCHAR)'\0';
+ }
+ }
+ nTableIdx = LookupOption(pszArg);
+
+ // if we didn't find this option but if it is a short form
+ // option then we try the alternative forms
+ if (nTableIdx < 0
+ && !m_pszOptionArg
+ && pszArg[0] == (SOCHAR)'-'
+ && pszArg[1]
+ && pszArg[1] != (SOCHAR)'-'
+ && pszArg[2])
+ {
+ // test for a short-form with argument if appropriate
+ if (HasFlag(SO_O_SHORTARG)) {
+ m_szShort[1] = pszArg[1];
+ int nIdx = LookupOption(m_szShort);
+ if (nIdx >= 0
+ && (m_rgOptions[nIdx].nArgType == SO_REQ_CMB
+ || m_rgOptions[nIdx].nArgType == SO_OPT))
+ {
+ m_pszOptionArg = &pszArg[2];
+ pszArg = m_szShort;
+ nTableIdx = nIdx;
+ }
+ }
+
+ // test for a clumped short-form option string and we didn't
+ // match on the short-form argument above
+ if (nTableIdx < 0 && HasFlag(SO_O_CLUMP)) {
+ m_pszClump = &pszArg[1];
+ ++m_nNextOption;
+ if (nOptIdx > m_nOptionIdx) {
+ ShuffleArg(m_nOptionIdx, nOptIdx - m_nOptionIdx);
+ }
+ return Next();
+ }
+ }
+
+ // The option wasn't found. If it starts with a switch character
+ // and we are not suppressing errors for invalid options then it
+ // is reported as an error, otherwise it is data.
+ if (nTableIdx < 0) {
+ if (!HasFlag(SO_O_NOERR) && pszArg[0] == (SOCHAR)'-') {
+ m_pszOptionText = pszArg;
+ break;
+ }
+
+ pszArg[0] = cFirst;
+ ++nOptIdx;
+ if (m_pszOptionArg) {
+ *(--m_pszOptionArg) = (SOCHAR)'=';
+ }
+ }
+ }
+
+ // end of options
+ if (nOptIdx >= m_nLastArg) {
+ if (nOptIdx > m_nOptionIdx) {
+ ShuffleArg(m_nOptionIdx, nOptIdx - m_nOptionIdx);
+ }
+ return false;
+ }
+ ++m_nNextOption;
+
+ // get the option id
+ ESOArgType nArgType = SO_NONE;
+ if (nTableIdx < 0) {
+ m_nLastError = (ESOError) nTableIdx; // error code
+ }
+ else {
+ m_nOptionId = m_rgOptions[nTableIdx].nId;
+ m_pszOptionText = m_rgOptions[nTableIdx].pszArg;
+
+ // ensure that the arg type is valid
+ nArgType = m_rgOptions[nTableIdx].nArgType;
+ switch (nArgType) {
+ case SO_NONE:
+ if (m_pszOptionArg) {
+ m_nLastError = SO_ARG_INVALID;
+ }
+ break;
+
+ case SO_REQ_SEP:
+ if (m_pszOptionArg) {
+ // they wanted separate args, but we got a combined one,
+ // unless we are pedantic, just accept it.
+ if (HasFlag(SO_O_PEDANTIC)) {
+ m_nLastError = SO_ARG_INVALID_TYPE;
+ }
+ }
+ // more processing after we shuffle
+ break;
+
+ case SO_REQ_CMB:
+ if (!m_pszOptionArg) {
+ m_nLastError = SO_ARG_MISSING;
+ }
+ break;
+
+ case SO_OPT:
+ // nothing to do
+ break;
+
+ case SO_MULTI:
+ // nothing to do. Caller must now check for valid arguments
+ // using GetMultiArg()
+ break;
+ }
+ }
+
+ // shuffle the files out of the way
+ if (nOptIdx > m_nOptionIdx) {
+ ShuffleArg(m_nOptionIdx, nOptIdx - m_nOptionIdx);
+ }
+
+ // we need to return the separate arg if required, just re-use the
+ // multi-arg code because it all does the same thing
+ if ( nArgType == SO_REQ_SEP
+ && !m_pszOptionArg
+ && m_nLastError == SO_SUCCESS)
+ {
+ SOCHAR ** ppArgs = MultiArg(1);
+ if (ppArgs) {
+ m_pszOptionArg = *ppArgs;
+ }
+ }
+
+ return true;
+}
+
+template<class SOCHAR>
+void
+CSimpleOptTempl<SOCHAR>::Stop()
+{
+ if (m_nNextOption < m_nLastArg) {
+ ShuffleArg(m_nNextOption, m_nLastArg - m_nNextOption);
+ }
+}
+
+template<class SOCHAR>
+SOCHAR
+CSimpleOptTempl<SOCHAR>::PrepareArg(
+ SOCHAR * a_pszString
+ ) const
+{
+#ifdef _WIN32
+ // On Windows we can accept the forward slash as a single character
+ // option delimiter, but it cannot replace the '-' option used to
+ // denote stdin. On Un*x paths may start with slash so it may not
+ // be used to start an option.
+ if (!HasFlag(SO_O_NOSLASH)
+ && a_pszString[0] == (SOCHAR)'/'
+ && a_pszString[1]
+ && a_pszString[1] != (SOCHAR)'-')
+ {
+ a_pszString[0] = (SOCHAR)'-';
+ return (SOCHAR)'/';
+ }
+#endif
+ return a_pszString[0];
+}
+
+template<class SOCHAR>
+bool
+CSimpleOptTempl<SOCHAR>::NextClumped()
+{
+ // prepare for the next clumped option
+ m_szShort[1] = *m_pszClump++;
+ m_nOptionId = -1;
+ m_pszOptionText = NULL;
+ m_pszOptionArg = NULL;
+ m_nLastError = SO_SUCCESS;
+
+ // lookup this option, ensure that we are using exact matching
+ int nSavedFlags = m_nFlags;
+ m_nFlags = SO_O_EXACT;
+ int nTableIdx = LookupOption(m_szShort);
+ m_nFlags = nSavedFlags;
+
+ // unknown option
+ if (nTableIdx < 0) {
+ m_nLastError = (ESOError) nTableIdx; // error code
+ return false;
+ }
+
+ // valid option
+ m_pszOptionText = m_rgOptions[nTableIdx].pszArg;
+ ESOArgType nArgType = m_rgOptions[nTableIdx].nArgType;
+ if (nArgType == SO_NONE) {
+ m_nOptionId = m_rgOptions[nTableIdx].nId;
+ return true;
+ }
+
+ if (nArgType == SO_REQ_CMB && *m_pszClump) {
+ m_nOptionId = m_rgOptions[nTableIdx].nId;
+ m_pszOptionArg = m_pszClump;
+ while (*m_pszClump) ++m_pszClump; // must point to an empty string
+ return true;
+ }
+
+ // invalid option as it requires an argument
+ m_nLastError = SO_ARG_MISSING;
+ return true;
+}
+
+// Shuffle arguments to the end of the argv array.
+//
+// For example:
+// argv[] = { "0", "1", "2", "3", "4", "5", "6", "7", "8" };
+//
+// ShuffleArg(1, 1) = { "0", "2", "3", "4", "5", "6", "7", "8", "1" };
+// ShuffleArg(5, 2) = { "0", "1", "2", "3", "4", "7", "8", "5", "6" };
+// ShuffleArg(2, 4) = { "0", "1", "6", "7", "8", "2", "3", "4", "5" };
+template<class SOCHAR>
+void
+CSimpleOptTempl<SOCHAR>::ShuffleArg(
+ int a_nStartIdx,
+ int a_nCount
+ )
+{
+ SOCHAR * staticBuf[SO_STATICBUF];
+ SOCHAR ** buf = m_rgShuffleBuf ? m_rgShuffleBuf : staticBuf;
+ int nTail = m_argc - a_nStartIdx - a_nCount;
+
+ // make a copy of the elements to be moved
+ Copy(buf, m_argv + a_nStartIdx, a_nCount);
+
+ // move the tail down
+ Copy(m_argv + a_nStartIdx, m_argv + a_nStartIdx + a_nCount, nTail);
+
+ // append the moved elements to the tail
+ Copy(m_argv + a_nStartIdx + nTail, buf, a_nCount);
+
+ // update the index of the last unshuffled arg
+ m_nLastArg -= a_nCount;
+}
+
+// match on the long format strings. partial matches will be
+// accepted only if that feature is enabled.
+template<class SOCHAR>
+int
+CSimpleOptTempl<SOCHAR>::LookupOption(
+ const SOCHAR * a_pszOption
+ ) const
+{
+ int nBestMatch = -1; // index of best match so far
+ int nBestMatchLen = 0; // matching characters of best match
+ int nLastMatchLen = 0; // matching characters of last best match
+
+ for (int n = 0; m_rgOptions[n].nId >= 0; ++n) {
+ // the option table must use hyphens as the option character,
+ // the slash character is converted to a hyphen for testing.
+ SO_ASSERT(m_rgOptions[n].pszArg[0] != (SOCHAR)'/');
+
+ int nMatchLen = CalcMatch(m_rgOptions[n].pszArg, a_pszOption);
+ if (nMatchLen == -1) {
+ return n;
+ }
+ if (nMatchLen > 0 && nMatchLen >= nBestMatchLen) {
+ nLastMatchLen = nBestMatchLen;
+ nBestMatchLen = nMatchLen;
+ nBestMatch = n;
+ }
+ }
+
+ // only partial matches or no match gets to here, ensure that we
+ // don't return a partial match unless it is a clear winner
+ if (HasFlag(SO_O_EXACT) || nBestMatch == -1) {
+ return SO_OPT_INVALID;
+ }
+ return (nBestMatchLen > nLastMatchLen) ? nBestMatch : SO_OPT_MULTIPLE;
+}
+
+// calculate the number of characters that match (case-sensitive)
+// 0 = no match, > 0 == number of characters, -1 == perfect match
+template<class SOCHAR>
+int
+CSimpleOptTempl<SOCHAR>::CalcMatch(
+ const SOCHAR * a_pszSource,
+ const SOCHAR * a_pszTest
+ ) const
+{
+ if (!a_pszSource || !a_pszTest) {
+ return 0;
+ }
+
+ // determine the argument type
+ int nArgType = SO_O_ICASE_LONG;
+ if (a_pszSource[0] != '-') {
+ nArgType = SO_O_ICASE_WORD;
+ }
+ else if (a_pszSource[1] != '-' && !a_pszSource[2]) {
+ nArgType = SO_O_ICASE_SHORT;
+ }
+
+ // match and skip leading hyphens
+ while (*a_pszSource == (SOCHAR)'-' && *a_pszSource == *a_pszTest) {
+ ++a_pszSource;
+ ++a_pszTest;
+ }
+ if (*a_pszSource == (SOCHAR)'-' || *a_pszTest == (SOCHAR)'-') {
+ return 0;
+ }
+
+ // find matching number of characters in the strings
+ int nLen = 0;
+ while (*a_pszSource && IsEqual(*a_pszSource, *a_pszTest, nArgType)) {
+ ++a_pszSource;
+ ++a_pszTest;
+ ++nLen;
+ }
+
+ // if we have exhausted the source...
+ if (!*a_pszSource) {
+ // and the test strings, then it's a perfect match
+ if (!*a_pszTest) {
+ return -1;
+ }
+
+ // otherwise the match failed as the test is longer than
+ // the source. i.e. "--mant" will not match the option "--man".
+ return 0;
+ }
+
+ // if we haven't exhausted the test string then it is not a match
+ // i.e. "--mantle" will not best-fit match to "--mandate" at all.
+ if (*a_pszTest) {
+ return 0;
+ }
+
+ // partial match to the current length of the test string
+ return nLen;
+}
+
+template<class SOCHAR>
+bool
+CSimpleOptTempl<SOCHAR>::IsEqual(
+ SOCHAR a_cLeft,
+ SOCHAR a_cRight,
+ int a_nArgType
+ ) const
+{
+ // if this matches then we are doing case-insensitive matching
+ if (m_nFlags & a_nArgType) {
+ if (a_cLeft >= 'A' && a_cLeft <= 'Z') a_cLeft += 'a' - 'A';
+ if (a_cRight >= 'A' && a_cRight <= 'Z') a_cRight += 'a' - 'A';
+ }
+ return a_cLeft == a_cRight;
+}
+
+// calculate the number of characters that match (case-sensitive)
+// 0 = no match, > 0 == number of characters, -1 == perfect match
+template<class SOCHAR>
+SOCHAR **
+CSimpleOptTempl<SOCHAR>::MultiArg(
+ int a_nCount
+ )
+{
+ // ensure we have enough arguments
+ if (m_nNextOption + a_nCount > m_nLastArg) {
+ m_nLastError = SO_ARG_MISSING;
+ return NULL;
+ }
+
+ // our argument array
+ SOCHAR ** rgpszArg = &m_argv[m_nNextOption];
+
+ // Ensure that each of the following don't start with an switch character.
+ // Only make this check if we are returning errors for unknown arguments.
+ if (!HasFlag(SO_O_NOERR)) {
+ for (int n = 0; n < a_nCount; ++n) {
+ SOCHAR ch = PrepareArg(rgpszArg[n]);
+ if (rgpszArg[n][0] == (SOCHAR)'-') {
+ rgpszArg[n][0] = ch;
+ m_nLastError = SO_ARG_INVALID_DATA;
+ return NULL;
+ }
+ rgpszArg[n][0] = ch;
+ }
+ }
+
+ // all good
+ m_nNextOption += a_nCount;
+ return rgpszArg;
+}
+
+
+// ---------------------------------------------------------------------------
+// TYPE DEFINITIONS
+// ---------------------------------------------------------------------------
+
+/*! @brief ASCII/MBCS version of CSimpleOpt */
+typedef CSimpleOptTempl<char> CSimpleOptA;
+
+/*! @brief wchar_t version of CSimpleOpt */
+typedef CSimpleOptTempl<char> CSimpleOptW;
+
+#if defined(_UNICODE)
+/*! @brief TCHAR version dependent on if _UNICODE is defined */
+# define CSimpleOpt CSimpleOptW
+#else
+/*! @brief TCHAR version dependent on if _UNICODE is defined */
+# define CSimpleOpt CSimpleOptA
+#endif
+
+#endif // INCLUDED_SimpleOpt
diff --git a/src/bp_gtest.cpp b/src/bp_gtest.cpp
new file mode 100755
index 00000000..020276cf
--- /dev/null
+++ b/src/bp_gtest.cpp
@@ -0,0 +1,2792 @@
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "bp_sim.h"
+#include <common/gtest.h>
+#include <common/basic_utils.h>
+#include "utl_cpuu.h"
+#include "timer_wheel_pq.h"
+#include "rx_check.h"
+#include "time_histogram.h"
+#include "utl_jitter.h"
+#include "CRing.h"
+#include "msg_manager.h"
+#include <common/cgen_map.h>
+#include "platform_cfg.h"
+
+int test_policer(){
+ CPolicer policer;
+
+ policer.set_cir( 100.0);
+ policer.set_level(0.0);
+ policer.set_bucket_size(100.0);
+
+ double t;
+ uint32_t c=0;
+ for (t=0.001;t<10.0; t=t+0.001) {
+ if ( policer.update(1.0,t) ){
+ c++;
+ printf(" %f \n",t);
+ }
+ }
+ printf(" %u \n",c);
+ if ( ( c > 970.0) && ( c < 1000.0) ) {
+ printf(" ok \n");
+ return (0);
+ }else{
+ printf("error \n");
+ return (-1);
+ }
+}
+
+
+
+
+int test_priorty_queue(void){
+ CGenNode * node;
+ std::priority_queue<CGenNode *, std::vector<CGenNode *>,CGenNodeCompare> p_queue;
+ int i;
+ for (i=0; i<10; i++) {
+ node = new CGenNode();
+ printf(" +%x \n",node);
+ node->m_flow_id = 10-i;
+ node->m_pkt_info = (CFlowPktInfo *)(uintptr_t)i;
+ node->m_time = (double)i+0.1;
+ p_queue.push(node);
+ }
+ while (!p_queue.empty()) {
+ node = p_queue.top();
+ printf(" -->%x \n",node);
+ //node->Dump(stdout);
+ p_queue.pop();
+ //delete node;
+ }
+ return (0);
+}
+
+
+#if 0
+#ifdef WIN32
+
+int test_rate(){
+ int i;
+ CBwMeasure m;
+ uint64_t cnt=0;
+ for (i=0; i<10; i++) {
+ Sleep(100);
+ cnt+=10000;
+ printf (" %f \n",m.add(cnt));
+ }
+ return (0);
+}
+#endif
+#endif
+
+
+
+
+void histogram_test(){
+ CTimeHistogram t;
+ t.Create();
+ t.Add(0.0001);
+ t.Add(0.0002);
+ t.Add(0.0003);
+ int i;
+ for (i=0; i<100;i++) {
+ t.Add(1.0/1000000.0);
+ }
+ t.Dump(stdout);
+ t.Delete();
+}
+
+
+
+
+int test_human_p(){
+ printf ("%s \n",double_to_human_str(1024.0*1024.0,"Byte",KBYE_1024).c_str());
+ printf ("%s \n",double_to_human_str(1.0,"Byte",KBYE_1024).c_str());
+ printf ("%s \n",double_to_human_str(-3000.0,"Byte",KBYE_1024).c_str());
+ printf ("%s \n",double_to_human_str(-3000000.0,"Byte",KBYE_1024).c_str());
+ printf ("%s \n",double_to_human_str(-30000000.0,"Byte",KBYE_1024).c_str());
+ return (0);
+}
+
+
+
+static bool was_init=false;
+
+void gtest_init_once(){
+
+ if ( !was_init ){
+ CGlobalInfo::init_pools(1000);
+ time_init();
+ was_init=true;
+ }
+}
+
+
+
+#define EXPECT_EQ_UINT32(a,b) EXPECT_EQ((uint32_t)(a),(uint32_t)(b))
+
+
+class CTestBasic {
+
+public:
+ CTestBasic(){
+ m_threads=1;
+ m_time_diff=0.001;
+ m_req_ports=0;
+ m_dump_json=false;
+ }
+
+ bool init(void){
+
+ uint16 * ports;
+ CTupleBase tuple;
+
+ CErfIF erf_vif;
+
+
+ fl.Create();
+ m_saved_packet_padd_offset=0;
+
+ fl.load_from_yaml(CGlobalInfo::m_options.cfg_file,m_threads);
+ fl.generate_p_thread_info(m_threads);
+
+ CFlowGenListPerThread * lpt;
+
+ fl.m_threads_info[0]->set_vif(&erf_vif);
+
+
+ CErfCmp cmp;
+ cmp.dump=1;
+
+ bool res=true;
+
+
+ int i;
+ for (i=0; i<m_threads; i++) {
+ lpt=fl.m_threads_info[i];
+
+ CFlowPktInfo * pkt=lpt->m_cap_gen[0]->m_flow_info->GetPacket(0);
+ m_saved_packet_padd_offset =pkt->m_pkt_indication.m_packet_padding;
+
+ char buf[100];
+ char buf_ex[100];
+ sprintf(buf,"%s-%d.erf",CGlobalInfo::m_options.out_file.c_str(),i);
+ sprintf(buf_ex,"%s-%d-ex.erf",CGlobalInfo::m_options.out_file.c_str(),i);
+
+ if ( m_req_ports ){
+ /* generate from first template m_req_ports ports */
+ int i;
+ CTupleTemplateGeneratorSmart * lpg=&lpt->m_cap_gen[0]->tuple_gen;
+ ports = new uint16_t[m_req_ports];
+ lpg->GenerateTuple(tuple);
+ for (i=0 ; i<m_req_ports;i++) {
+ ports[i]=lpg->GenerateOneSourcePort();
+ }
+ }
+
+ lpt->generate_erf(buf,CGlobalInfo::m_options.preview);
+ lpt->m_node_gen.DumpHist(stdout);
+
+ cmp.d_sec = m_time_diff;
+ if ( cmp.compare(std::string(buf),std::string(buf_ex)) != true ) {
+ res=false;
+ }
+
+ }
+ if ( m_dump_json ){
+ printf(" dump json ...........\n");
+ std::string s;
+ fl.m_threads_info[0]->m_node_gen.dump_json(s);
+ printf(" %s \n",s.c_str());
+ }
+
+ if ( m_req_ports ){
+ int i;
+ fl.m_threads_info[0]->m_smart_gen.FreePort(tuple.getClient(),tuple.getClientPort());
+
+ for (i=0 ; i<m_req_ports;i++) {
+ fl.m_threads_info[0]->m_smart_gen.FreePort(tuple.getClient(),ports[i]);
+ }
+ delete []ports;
+ }
+
+ printf(" active %d \n", fl.m_threads_info[0]->m_smart_gen.ActiveSockets());
+ EXPECT_EQ_UINT32(fl.m_threads_info[0]->m_smart_gen.ActiveSockets(),0);
+ fl.Delete();
+ return (res);
+ }
+
+ uint16_t get_padd_offset_first_packet(){
+ return (m_saved_packet_padd_offset);
+
+ }
+
+
+
+public:
+ int m_req_ports;
+ int m_threads;
+ double m_time_diff;
+ bool m_dump_json;
+ uint16_t m_saved_packet_padd_offset;
+ CFlowGenList fl;
+};
+
+
+
+
+class basic : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ gtest_init_once();
+ }
+ virtual void TearDown() {
+ }
+public:
+};
+
+class cpu : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ gtest_init_once();
+ }
+ virtual void TearDown() {
+ }
+public:
+};
+
+
+
+TEST_F(basic, limit_single_pkt) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/limit_single_pkt.yaml";
+ po->out_file ="exp/limit_single_pkt";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+
+TEST_F(basic, limit_multi_pkt) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/limit_multi_pkt.yaml";
+ po->out_file ="exp/limit_multi_pkt";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+
+TEST_F(basic, imix) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/imix.yaml";
+ po->out_file ="exp/imix";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+
+
+TEST_F(basic, dns) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/dns.yaml";
+ po->out_file ="exp/dns";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+
+/* test -p function */
+TEST_F(basic, dns_flow_flip) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.setFileWrite(true);
+ po->preview.setClientServerFlowFlip(true);
+
+ po->cfg_file ="cap2/dns.yaml";
+ po->out_file ="exp/dns_p";
+ bool res=t1.init();
+ po->preview.setClientServerFlowFlip(false);
+
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+
+TEST_F(basic, dns_flip) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.setFileWrite(true);
+ po->preview.setClientServerFlip(true);
+
+ po->cfg_file ="cap2/dns.yaml";
+ po->out_file ="exp/dns_flip";
+ bool res=t1.init();
+ po->preview.setClientServerFlip(false);
+
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+
+TEST_F(basic, dns_e) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.setFileWrite(true);
+ po->preview.setClientServerFlowFlipAddr(true);
+
+ po->cfg_file ="cap2/dns.yaml";
+ po->out_file ="exp/dns_e";
+ bool res=t1.init();
+ po->preview.setClientServerFlowFlipAddr(false);
+
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+
+
+
+/* test the packet padding , must be valid for --rx-check to work */
+TEST_F(basic, dns_packet_padding_test) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/dns.yaml";
+ po->out_file ="exp/dns";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(t1.get_padd_offset_first_packet(),0);
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+
+
+TEST_F(basic, dns_ipv6) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.setFileWrite(true);
+ po->preview.set_ipv6_mode_enable(true);
+ po->cfg_file ="cap2/dns.yaml";
+ po->out_file ="exp/dns_ipv6";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+ EXPECT_EQ_UINT32(t1.get_padd_offset_first_packet(),0);
+ po->preview.set_ipv6_mode_enable(false);
+}
+
+TEST_F(basic, dns_json) {
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ t1.m_dump_json=true;
+ po->preview.setVMode(3);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/dns.yaml";
+ po->out_file ="exp/dns";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+
+
+#if 0
+
+TEST_F(basic, dns_wlen) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/dns_wlen.yaml";
+ po->out_file ="exp/dns_wlen";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+
+TEST_F(basic, dns_wlen1) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/dns_wlen1.yaml";
+ po->out_file ="exp/dns_wlen1";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+
+TEST_F(basic, dns_wlen2) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/dns_wlen2.yaml";
+ po->out_file ="exp/dns_wlen2";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+
+
+
+TEST_F(basic, dns_one_server_2) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/dns_single_server.yaml";
+ po->out_file ="exp/dns_single_server";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+
+#endif
+
+TEST_F(basic, dns_one_server) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/dns_one_server.yaml";
+ po->out_file ="exp/dns_one_server";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+
+
+TEST_F(basic, sfr2) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+
+ po->preview.setVMode(0);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/sfr2.yaml";
+ po->out_file ="exp/sfr2";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+
+
+TEST_F(basic, sfr3) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(0);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/sfr3.yaml";
+ po->out_file ="exp/sfr3";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+
+
+TEST_F(basic, sfr4) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(0);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/sfr4.yaml";
+ po->out_file ="exp/sfr_4";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+
+
+TEST_F(basic, ipv6_convert) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.set_ipv6_mode_enable(true);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/imix.yaml";
+ po->out_file ="exp/imix_v6";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+ po->preview.set_ipv6_mode_enable(false);
+}
+
+TEST_F(basic, ipv6) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.set_ipv6_mode_enable(true);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/ipv6.yaml";
+ po->out_file ="exp/ipv6";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+ po->preview.set_ipv6_mode_enable(false);
+}
+
+TEST_F(basic, ipv4_vlan) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/ipv4_vlan.yaml";
+ po->out_file ="exp/ipv4_vlan";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+
+TEST_F(basic, ipv6_vlan) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.set_ipv6_mode_enable(true);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/ipv6_vlan.yaml";
+ po->out_file ="exp/ipv6_vlan";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+ po->preview.set_ipv6_mode_enable(false);
+}
+
+
+/* exacly like cap file */
+TEST_F(basic, test_pcap_mode1) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/test_pcap_mode1.yaml";
+ po->out_file ="exp/pcap_mode1";
+ t1.m_time_diff = 0.000005; // 5 nsec
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+
+
+/* check override the low IPG */
+TEST_F(basic, test_pcap_mode2) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/test_pcap_mode2.yaml";
+ po->out_file ="exp/pcap_mode2";
+ t1.m_time_diff = 0.000005; // 5 nsec
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+
+
+
+
+
+TEST_F(basic, latency1) {
+ CLatencyPktInfo l;
+ l.Create();
+
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.setFileWrite(true);
+
+ uint8_t mac[]={0,0,0,1,0,0};
+
+ CErfIF erf_vif;
+ erf_vif.set_review_mode(&CGlobalInfo::m_options.preview);
+
+ erf_vif.open_file("exp/sctp.erf");
+
+
+ mac[0]=0;
+ mac[1]=0;
+ mac[2]=0;
+ mac[3]=1;
+ mac[4]=0;
+ mac[5]=0;
+
+
+ l.set_ip(0x10000000,0x20000000,0x01000000);
+
+ int i;
+ /* simulate 8 ports */
+ for (i=0;i<8; i++) {
+ rte_mbuf_t * m=l.generate_pkt(i);
+ erf_vif.send_node(l.getNode());
+ rte_pktmbuf_free(m);
+ }
+
+ erf_vif.close_file();
+
+
+ l.Delete();
+
+ CErfCmp cmp;
+ cmp.dump=1;
+
+ bool res=true;
+
+ if ( cmp.compare("exp/sctp.erf","exp/sctp-ex.erf") != true ) {
+ res=false;
+ }
+ EXPECT_EQ_UINT32((uint32_t)res?1:0, (uint32_t)1)<< "pass";
+}
+
+
+
+TEST_F(basic, latency2) {
+ CLatencyPktInfo l;
+ CCPortLatency port0;
+ l.Create();
+ port0.Create(0,0,l.get_payload_offset(),l.get_pkt_size(),0);
+
+
+ uint8_t mac[]={0,0,0,1,0,0};
+
+ mac[0]=0;
+ mac[1]=0;
+ mac[2]=0;
+ mac[3]=1;
+ mac[4]=0;
+ mac[5]=0;
+
+
+ l.set_ip(0x01000000,0x02000000,0x01000000);
+
+
+ int i;
+ for (i=0; i<100; i++) {
+ uint8_t *p;
+ rte_mbuf_t * m=l.generate_pkt(0);
+ p=rte_pktmbuf_mtod(m, uint8_t*);
+ //utl_DumpBuffer(stdout,p,l.get_pkt_size(),0);
+
+ port0.update_packet(m);
+
+ p=rte_pktmbuf_mtod(m, uint8_t*);
+ //utl_DumpBuffer(stdout,p,l.get_pkt_size(),0);
+ //printf("offset is : %d \n",l.get_payload_offset());
+
+ CRx_check_header * rx_p;
+ bool res=port0.check_packet(m,rx_p);
+ EXPECT_EQ_UINT32((uint32_t)res?1:0, (uint32_t)1)<< "pass";
+ if (!res ) {
+ printf(" ERROR \n");
+ }
+ rte_pktmbuf_free(m);
+ }
+ port0.DumpCounters(stdout);
+ EXPECT_EQ_UINT32(port0.m_pkt_ok, (uint32_t)100)<< "pass";
+
+ port0.Delete();
+ l.Delete();
+}
+
+
+TEST_F(basic, latency3) {
+ CLatencyPktInfo l;
+ CCPortLatency port0;
+ l.Create();
+ port0.Create(0,0,l.get_payload_offset(),l.get_pkt_size(),0);
+
+
+ uint8_t mac[]={0,0,0,1,0,0};
+
+
+ mac[0]=0;
+ mac[1]=0;
+ mac[2]=0;
+ mac[3]=1;
+ mac[4]=0;
+ mac[5]=0;
+
+
+ l.set_ip(0x01000000,0x02000000,0x01000000);
+
+ uint8_t *p;
+ rte_mbuf_t * m=l.generate_pkt(0);
+ port0.update_packet(m);
+ p=rte_pktmbuf_mtod(m, uint8_t*);
+ memset(p,0,l.get_pkt_size());
+
+ CRx_check_header * rx_p;
+ bool res=port0.check_packet(m,rx_p);
+ EXPECT_EQ_UINT32((uint32_t)res?0:1, (uint32_t)1)<< "pass";
+ if (!res ) {
+ printf(" OK \n");
+ }
+ rte_pktmbuf_free(m);
+ EXPECT_EQ_UINT32(port0.m_unsup_prot, (uint32_t)1)<< "pass";
+
+ port0.Delete();
+ l.Delete();
+}
+
+
+
+
+class CDummyLatencyHWBase : public CPortLatencyHWBase {
+public:
+ CDummyLatencyHWBase(){
+ m_queue=0;
+ m_port_id=0;
+ }
+
+ virtual int tx(rte_mbuf_t * m){
+ assert(m_queue==0);
+ //printf(" tx on port %d \n",m_port_id);
+ // utl_DumpBuffer(stdout,rte_pktmbuf_mtod(m, uint8_t*),rte_pktmbuf_pkt_len(m),0);
+ m_queue=m;
+ return (0);
+ }
+
+ virtual rte_mbuf_t * rx(){
+ //printf(" rx on port %d \n",m_port_id);
+ rte_mbuf_t * m=0;
+ if (m_queue ) {
+ m=m_queue;
+ m_queue=0;
+ }
+
+ /*if ( m ){
+ utl_DumpBuffer(stdout,rte_pktmbuf_mtod(m , uint8_t*),rte_pktmbuf_pkt_len(m ),0);
+ } */
+ return ( m );
+ }
+
+ virtual uint16_t rx_burst(struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts){
+ //printf(" rx on port %d \n",m_port_id);
+ rte_mbuf_t * m=rx();
+ if (m) {
+ rx_pkts[0]=m;
+ return (1);
+ }else{
+ return (0);
+ }
+ }
+
+
+private:
+ rte_mbuf_t * m_queue;
+public:
+ uint8_t m_port_id;
+};
+
+
+
+TEST_F(basic, latency4) {
+
+ uint8_t mac[]={0,0,0,1,0,0};
+
+ mac[0]=0;
+ mac[1]=0;
+ mac[2]=0;
+ mac[3]=1;
+ mac[4]=0;
+ mac[5]=0;
+
+
+ CLatencyManager mg;
+ CLatencyManagerCfg cfg;
+ CDummyLatencyHWBase dports[MAX_LATENCY_PORTS];
+ cfg.m_cps =10;
+ cfg.m_max_ports=4;
+ int i;
+ for (i=0; i<MAX_LATENCY_PORTS; i++) {
+ dports[i].m_port_id=i;
+ cfg.m_ports[i] = &dports[i];
+ }
+
+ mg.Create(&cfg);
+
+ printf(" before sending \n");
+ mg.Dump(stdout);
+ std::string json;
+ mg.dump_json_v2(json);
+ printf(" %s \n",json.c_str());
+
+
+ EXPECT_EQ_UINT32(mg.is_active()?1:0, (uint32_t)0)<< "pass";
+
+ mg.start(8);
+ mg.stop();
+ mg.Dump(stdout);
+ mg.DumpShort(stdout);
+ mg.dump_json_v2(json);
+ printf(" %s \n",json.c_str());
+
+ mg.Delete();
+}
+
+
+TEST_F(basic, hist1) {
+
+ CTimeHistogram hist1;
+
+ dsec_t dusec=1.0/1000000.0;
+
+ hist1.Create();
+ hist1.Add(dusec);
+ hist1.Add(2.0*dusec);
+ hist1.Add(11.0*dusec);
+ hist1.Add(110.0*dusec);
+ hist1.Add(1110.0*dusec);
+ hist1.Add(10110.0*dusec);
+ hist1.Add(40110.0*dusec);
+ hist1.Add(400110.0*dusec);
+ hist1.Dump(stdout);
+ hist1.Delete();
+}
+
+
+
+TEST_F(basic, rtsp1) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/rtsp_short1.yaml";
+ po->out_file ="exp/rtsp_short1";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+
+TEST_F(basic, rtsp2) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/rtsp_short2.yaml";
+ po->out_file ="exp/rtsp_short2";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+
+TEST_F(basic, rtsp3) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/rtsp_short3.yaml";
+ po->out_file ="exp/rtsp_short3";
+ t1.m_req_ports = 32000;
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+
+
+TEST_F(basic, rtsp1_ipv6) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.set_ipv6_mode_enable(true);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/rtsp_short1.yaml";
+ po->out_file ="exp/rtsp_short1_v6";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+ po->preview.set_ipv6_mode_enable(false);
+}
+
+TEST_F(basic, rtsp2_ipv6) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.set_ipv6_mode_enable(true);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/rtsp_short2.yaml";
+ po->out_file ="exp/rtsp_short2_v6";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+ po->preview.set_ipv6_mode_enable(false);
+}
+
+TEST_F(basic, rtsp3_ipv6) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.set_ipv6_mode_enable(true);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/rtsp_short3.yaml";
+ po->out_file ="exp/rtsp_short3_v6";
+ t1.m_req_ports = 32000;
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+ po->preview.set_ipv6_mode_enable(false);
+}
+
+
+TEST_F(basic, sip1) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/sip_short1.yaml";
+ po->out_file ="exp/sip_short1";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+
+
+TEST_F(basic, sip2) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/sip_short2.yaml";
+ po->out_file ="exp/sip_short2";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+
+TEST_F(basic, sip3) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/sip_short2.yaml";
+ po->out_file ="exp/sip_short3";
+ t1.m_req_ports = 32000;
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+
+
+TEST_F(basic, sip1_ipv6) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.set_ipv6_mode_enable(true);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/sip_short1.yaml";
+ po->out_file ="exp/sip_short1_v6";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+ po->preview.set_ipv6_mode_enable(false);
+}
+
+
+TEST_F(basic, sip2_ipv6) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.set_ipv6_mode_enable(true);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/sip_short2.yaml";
+ po->out_file ="exp/sip_short2_v6";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+ po->preview.set_ipv6_mode_enable(false);
+}
+
+TEST_F(basic, sip3_ipv6) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.set_ipv6_mode_enable(true);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/sip_short2.yaml";
+ po->out_file ="exp/sip_short3_v6";
+ t1.m_req_ports = 32000;
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+ po->preview.set_ipv6_mode_enable(false);
+}
+
+
+TEST_F(basic, dyn1) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/dyn_pyld1.yaml";
+ po->out_file ="exp/dyn_pyld1";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+
+TEST_F(basic, http1) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/http_plugin.yaml";
+ po->out_file ="exp/http_plugin";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+}
+
+TEST_F(basic, http1_ipv6) {
+
+ CTestBasic t1;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(3);
+ po->preview.set_ipv6_mode_enable(true);
+ po->preview.setFileWrite(true);
+ po->cfg_file ="cap2/http_plugin.yaml";
+ po->out_file ="exp/http_plugin_v6";
+ bool res=t1.init();
+ EXPECT_EQ_UINT32(1, res?1:0)<< "pass";
+ po->preview.set_ipv6_mode_enable(false);
+}
+
+
+
+void delay(int msec);
+
+
+TEST_F(cpu, cpu1) {
+ CCpuUtlDp cpu_dp;
+ CCpuUtlCp cpu_cp;
+ cpu_cp.Create(&cpu_dp);
+ int i;
+ for (i=0; i<5;i++) {
+ cpu_cp.Update();
+ double c=cpu_cp.GetVal();
+ printf (" %f \n",c);
+ EXPECT_EQ(c,(double)0.0);
+
+ delay(100);
+ }
+
+ cpu_cp.Delete();
+}
+
+TEST_F(cpu, cpu2) {
+ CCpuUtlDp cpu_dp;
+ CCpuUtlCp cpu_cp;
+ cpu_cp.Create(&cpu_dp);
+ int i;
+ for (i=0; i<100;i++) {
+ cpu_dp.start_work();
+ cpu_cp.Update();
+ double c1 = cpu_cp.GetVal();
+ printf(" cpu %2.0f \n",c1);
+ if (i>50) {
+ int s=( c1<80 && c1>30)?1:0;
+ EXPECT_EQ(s,1);
+ }
+ delay(1);
+ if ((i%2)==1) {
+ cpu_dp.commit();
+ }else{
+ cpu_dp.revert();
+ }
+ }
+
+ cpu_cp.Delete();
+}
+
+#if 0
+TEST_F(cpu, cpu3) {
+ CCpuUtlDp cpu_dp;
+ CCpuUtlCp cpu_cp;
+ cpu_cp.Create(&cpu_dp);
+ int i;
+ for (i=0; i<200;i++) {
+ cpu_dp.start_work();
+ if (i%10==0) {
+ cpu_cp.Update();
+ }
+ double c1 = cpu_cp.GetVal();
+ if (i>150) {
+ printf(" cpu %2.0f \n",c1);
+ int s=( c1<11 && c1>8)?1:0;
+ EXPECT_EQ(s,1);
+ }
+ delay(1);
+ if ((i%10)==1) {
+ cpu_dp.commit();
+ }else{
+ cpu_dp.revert();
+ }
+ }
+ cpu_cp.Delete();
+}
+#endif
+
+
+class timerwl : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ gtest_init_once();
+ }
+ virtual void TearDown() {
+ }
+public:
+};
+
+
+
+void flow_callback(CFlowTimerHandle * timer_handle);
+
+class CTestFlow {
+public:
+ CTestFlow(){
+ flow_id = 0;
+ m_timer_handle.m_callback=flow_callback;
+ m_timer_handle.m_object = (void *)this;
+ m_timer_handle.m_id = 0x1234;
+ }
+
+ uint32_t flow_id;
+ CFlowTimerHandle m_timer_handle;
+public:
+ void OnTimeOut(){
+ printf(" timeout %d \n",flow_id);
+ }
+};
+
+void flow_callback(CFlowTimerHandle * t){
+ CTestFlow * lp=(CTestFlow *)t->m_object;
+ assert(lp);
+ assert(t->m_id==0x1234);
+ lp->OnTimeOut();
+}
+
+
+TEST_F(timerwl, tw1) {
+ CTimerWheel my_tw;
+
+ CTestFlow f1;
+ CTestFlow f2;
+ CTestFlow f3;
+ CTestFlow f4;
+
+ f1.flow_id=1;
+ f2.flow_id=2;
+ f3.flow_id=3;
+ f4.flow_id=4;
+ double time;
+ EXPECT_EQ(my_tw.peek_top_time(time),false);
+ my_tw.restart_timer(&f1.m_timer_handle,10.0);
+ my_tw.restart_timer(&f2.m_timer_handle,5.0);
+ my_tw.restart_timer(&f3.m_timer_handle,1.0);
+ EXPECT_EQ(my_tw.peek_top_time(time),true);
+ printf(" time %f \n",time);
+ EXPECT_EQ(time ,1.0);
+
+ EXPECT_EQ(my_tw.peek_top_time(time),true);
+ printf(" time %f \n",time);
+ EXPECT_EQ(time ,1.0);
+
+ EXPECT_EQ(my_tw.peek_top_time(time),true);
+ printf(" time %f \n",time);
+ EXPECT_EQ(time ,1.0);
+
+ EXPECT_EQ(my_tw.handle(),true);
+
+ EXPECT_EQ(my_tw.peek_top_time(time),true);
+ printf(" time %f \n",time);
+ EXPECT_EQ(time ,5.0);
+
+ EXPECT_EQ(my_tw.handle(),true);
+
+ EXPECT_EQ(my_tw.peek_top_time(time),true);
+ printf(" time %f \n",time);
+ EXPECT_EQ(time ,10.0);
+
+ EXPECT_EQ(my_tw.handle(),true);
+
+}
+
+TEST_F(timerwl, tw2) {
+ CTimerWheel my_tw;
+
+ double mytime=0.1;
+ int i;
+ CTestFlow * af[100];
+
+ for (i=0; i<100; i++) {
+ CTestFlow * f=new CTestFlow();
+ af[i]=f;
+ f->flow_id=i;
+ my_tw.restart_timer(&f->m_timer_handle,mytime+10.0);
+ }
+ EXPECT_EQ(my_tw.m_st_alloc-my_tw.m_st_free,100);
+
+ my_tw.try_handle_events(mytime);
+
+ EXPECT_EQ(my_tw.m_st_alloc-my_tw.m_st_free,100);
+
+ for (i=0; i<100; i++) {
+ CTestFlow * f=af[i];
+ my_tw.stop_timer(&f->m_timer_handle);
+ }
+ EXPECT_EQ(my_tw.m_st_alloc-my_tw.m_st_free,100);
+
+ my_tw.try_handle_events(mytime);
+
+ /* expect to free all the object */
+ EXPECT_EQ(my_tw.m_st_alloc-my_tw.m_st_free,0);
+
+}
+
+TEST_F(timerwl, check_stop_timer) {
+
+ CTimerWheel my_tw;
+
+
+ CTestFlow f1;
+ CTestFlow f2;
+ CTestFlow f3;
+ CTestFlow f4;
+
+ f1.flow_id=1;
+ f2.flow_id=2;
+ f3.flow_id=3;
+ f4.flow_id=4;
+ double time;
+ assert(my_tw.peek_top_time(time)==false);
+ my_tw.restart_timer(&f1.m_timer_handle,10.0);
+ my_tw.restart_timer(&f2.m_timer_handle,5.0);
+ my_tw.restart_timer(&f3.m_timer_handle,1.0);
+ my_tw.stop_timer(&f1.m_timer_handle);
+
+ assert(my_tw.peek_top_time(time)==true);
+ printf(" time %f \n",time);
+ EXPECT_EQ(time ,1.0);
+
+ assert(my_tw.peek_top_time(time)==true);
+ printf(" time %f \n",time);
+ EXPECT_EQ(time ,1.0);
+
+ assert(my_tw.peek_top_time(time)==true);
+ printf(" time %f \n",time);
+ EXPECT_EQ(time ,1.0);
+
+ assert(my_tw.handle());
+
+ assert(my_tw.peek_top_time(time)==true);
+ printf(" time %f \n",time);
+ EXPECT_EQ(time ,5.0);
+
+
+ assert(my_tw.handle());
+
+ EXPECT_EQ(my_tw.peek_top_time(time) ,false);
+ my_tw.Dump(stdout);
+}
+
+
+static int many_timers_flow_id=0;
+
+void many_timers_flow_callback(CFlowTimerHandle * t){
+ CTestFlow * lp=(CTestFlow *)t->m_object;
+ assert(lp);
+ assert(t->m_id==0x1234);
+ assert(many_timers_flow_id==lp->flow_id);
+ many_timers_flow_id--;
+}
+
+
+TEST_F(timerwl, many_timers) {
+
+ CTimerWheel my_tw;
+
+ int i;
+ for (i=0; i<100; i++) {
+ CTestFlow * f= new CTestFlow();
+ f->m_timer_handle.m_callback=many_timers_flow_callback;
+ f->flow_id=(uint32_t)i;
+ my_tw.restart_timer(&f->m_timer_handle,100.0-(double)i);
+ }
+ many_timers_flow_id=99;
+
+ double time;
+ double ex_time=1.0;
+ while (true) {
+ if ( my_tw.peek_top_time(time) ){
+ assert(time==ex_time);
+ ex_time+=1.0;
+ assert(my_tw.handle());
+ }
+ else{
+ break;
+ }
+ }
+
+ my_tw.Dump(stdout);
+
+ EXPECT_EQ(my_tw.m_st_handle ,100);
+ EXPECT_EQ(my_tw.m_st_alloc ,100);
+ EXPECT_EQ(my_tw.m_st_free ,100);
+ EXPECT_EQ(my_tw.m_st_start ,100);
+
+}
+
+void many_timers_stop_flow_callback(CFlowTimerHandle * t){
+ CTestFlow * lp=(CTestFlow *)t->m_object;
+ assert(lp);
+ assert(t->m_id==0x1234);
+ assert(0);
+}
+
+TEST_F(timerwl, many_timers_with_stop) {
+ CTimerWheel my_tw;
+
+ int i;
+ for (i=0; i<100; i++) {
+ CTestFlow * f= new CTestFlow();
+ f->m_timer_handle.m_callback=many_timers_stop_flow_callback;
+ f->flow_id=(uint32_t)i;
+ my_tw.restart_timer(&f->m_timer_handle, 500.0 - (double)i);
+ my_tw.restart_timer(&f->m_timer_handle, 1000.0 - (double)i);
+ my_tw.restart_timer(&f->m_timer_handle, 100.0 - (double)i);
+ my_tw.stop_timer(&f->m_timer_handle);
+ }
+
+ double time;
+ while (true) {
+ if ( my_tw.peek_top_time(time) ){
+ assert(0);
+ assert(my_tw.handle());
+ }
+ else{
+ break;
+ }
+ }
+
+ my_tw.Dump(stdout);
+
+
+ EXPECT_EQ(my_tw.m_st_handle ,0);
+ EXPECT_EQ(my_tw.m_st_alloc-my_tw.m_st_free ,0);
+ EXPECT_EQ(my_tw.m_st_start ,300);
+}
+
+
+//////////////////////////////////////////////
+class rx_check : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ gtest_init_once();
+ m_rx_check.Create();
+
+ }
+ virtual void TearDown() {
+ m_rx_check.Delete();
+ }
+public:
+ RxCheckManager m_rx_check;
+};
+
+
+TEST_F(rx_check, rx_check_normal) {
+ int i;
+
+ for (i=0; i<10; i++) {
+ CRx_check_header rxh;
+ rxh.m_option_type=RX_CHECK_V4_OPT_TYPE;
+ rxh.m_option_len=RX_CHECK_V4_OPT_LEN;
+ rxh.m_time_stamp=0;
+ rxh.m_magic=RX_CHECK_MAGIC;
+ rxh.m_aging_sec=10;
+ rxh.m_pkt_id=i;
+ rxh.m_flow_size=10;
+
+ rxh.m_flow_id=7;
+ rxh.set_dir(0);
+ rxh.set_both_dir(0);
+
+ m_rx_check.handle_packet(&rxh);
+ }
+
+ EXPECT_EQ(m_rx_check.m_stats.get_total_err(),0);
+ EXPECT_EQ(m_rx_check.m_stats.m_add,1);
+ EXPECT_EQ(m_rx_check.m_stats.m_remove,1);
+ m_rx_check.Dump(stdout);
+}
+
+
+TEST_F(rx_check, rx_check_drop) {
+
+ int i;
+
+ for (i=0; i<10; i++) {
+ CRx_check_header rxh;
+ rxh.m_option_type=RX_CHECK_V4_OPT_TYPE;
+ rxh.m_option_len=RX_CHECK_V4_OPT_LEN;
+ rxh.m_time_stamp=0;
+ rxh.m_magic=RX_CHECK_MAGIC;
+ rxh.m_aging_sec=10;
+
+ if (i==4) {
+ /* drop packet 4 */
+ continue;
+ }
+ rxh.m_pkt_id=i;
+
+ rxh.m_flow_size=10;
+
+ rxh.set_dir(0);
+ rxh.set_both_dir(0);
+
+ rxh.m_flow_id=7;
+
+ rxh.m_flags=0;
+ m_rx_check.handle_packet(&rxh);
+ }
+ m_rx_check.tw_drain();
+ EXPECT_EQ(m_rx_check.m_stats.m_err_oo_late,1);
+ EXPECT_EQ(m_rx_check.m_stats.m_add,1);
+ EXPECT_EQ(m_rx_check.m_stats.m_remove,1);
+ m_rx_check.Dump(stdout);
+}
+
+TEST_F(rx_check, rx_check_ooo) {
+
+ m_rx_check.Create();
+ int i;
+
+ for (i=0; i<10; i++) {
+ CRx_check_header rxh;
+ rxh.m_option_type=RX_CHECK_V4_OPT_TYPE;
+ rxh.m_option_len=RX_CHECK_V4_OPT_LEN;
+ rxh.m_time_stamp=0;
+ rxh.m_magic=RX_CHECK_MAGIC;
+ rxh.m_aging_sec=10;
+
+ rxh.set_dir(0);
+ rxh.set_both_dir(0);
+
+
+ /* out of order */
+ if (i==4) {
+ rxh.m_pkt_id=5;
+ }else{
+ if (i==5) {
+ rxh.m_pkt_id=4;
+ }else{
+ rxh.m_pkt_id=i;
+ }
+ }
+
+ rxh.m_flow_size=10;
+
+ rxh.m_flow_id=7;
+
+ rxh.m_flags=0;
+ m_rx_check.handle_packet(&rxh);
+ }
+ m_rx_check.tw_drain();
+ EXPECT_EQ(m_rx_check.m_stats.m_err_oo_early,1);
+ EXPECT_EQ(m_rx_check.m_stats.m_err_oo_late,2);
+
+ m_rx_check.Dump(stdout);
+}
+
+
+TEST_F(rx_check, rx_check_ooo_1) {
+ int i;
+
+ for (i=0; i<10; i++) {
+ CRx_check_header rxh;
+ rxh.m_option_type=RX_CHECK_V4_OPT_TYPE;
+ rxh.m_option_len=RX_CHECK_V4_OPT_LEN;
+ rxh.m_time_stamp=0;
+ rxh.set_dir(0);
+ rxh.set_both_dir(0);
+
+ rxh.m_magic=RX_CHECK_MAGIC;
+ rxh.m_aging_sec=10;
+
+ /* out of order */
+ if (i==4) {
+ rxh.m_pkt_id=56565;
+ }else{
+ if (i==5) {
+ rxh.m_pkt_id=4;
+ }else{
+ rxh.m_pkt_id=i;
+ }
+ }
+ rxh.m_flow_size=10;
+ rxh.m_flow_id=7;
+ rxh.m_flags=0;
+ m_rx_check.handle_packet(&rxh);
+ }
+ m_rx_check.tw_drain();
+ EXPECT_EQ(m_rx_check.m_stats.m_err_wrong_pkt_id,1);
+ EXPECT_EQ(m_rx_check.m_stats.m_err_oo_late,1);
+
+ m_rx_check.Dump(stdout);
+}
+
+// start without first packet ( not FIF */
+TEST_F(rx_check, rx_check_ooo_2) {
+ int i;
+
+ for (i=0; i<10; i++) {
+ CRx_check_header rxh;
+ rxh.m_option_type=RX_CHECK_V4_OPT_TYPE;
+ rxh.m_option_len=RX_CHECK_V4_OPT_LEN;
+ rxh.m_time_stamp=0;
+ rxh.m_magic=RX_CHECK_MAGIC;
+ rxh.m_aging_sec=10;
+
+ /* out of order */
+ rxh.set_dir(0);
+ rxh.set_both_dir(0);
+
+
+ if (i==0) {
+ rxh.m_pkt_id=7;
+ }else{
+ if (i==7) {
+ rxh.m_pkt_id=0;
+ }else{
+ rxh.m_pkt_id=i;
+ }
+ }
+
+ rxh.m_flow_size=10;
+ rxh.m_flow_id=7;
+ rxh.m_flags=0;
+ m_rx_check.handle_packet(&rxh);
+ }
+ m_rx_check.tw_drain();
+ EXPECT_EQ(m_rx_check.m_stats.m_err_open_with_no_fif_pkt,1);
+ EXPECT_EQ(m_rx_check.m_stats. m_err_oo_late,1);
+ m_rx_check.Dump(stdout);
+}
+
+
+TEST_F(rx_check, rx_check_normal_two_dir) {
+ int i;
+
+ for (i=0; i<10; i++) {
+ CRx_check_header rxh;
+ rxh.m_option_type=RX_CHECK_V4_OPT_TYPE;
+ rxh.m_option_len=RX_CHECK_V4_OPT_LEN;
+ rxh.m_time_stamp=0;
+ rxh.m_magic=RX_CHECK_MAGIC;
+ rxh.m_aging_sec=10;
+
+ rxh.m_pkt_id=i;
+ rxh.m_flow_size=10;
+
+ rxh.m_flow_id=7;
+ rxh.set_dir(0);
+ rxh.set_both_dir(0);
+
+ m_rx_check.handle_packet(&rxh);
+ }
+
+ EXPECT_EQ(m_rx_check.m_stats.get_total_err(),0);
+ EXPECT_EQ(m_rx_check.m_stats.m_add,1);
+ EXPECT_EQ(m_rx_check.m_stats.m_remove,1);
+ m_rx_check.Dump(stdout);
+}
+
+
+
+TEST_F(rx_check, rx_check_normal_two_dir_fails) {
+ int i;
+
+ for (i=0; i<10; i++) {
+ CRx_check_header rxh;
+ rxh.m_option_type=RX_CHECK_V4_OPT_TYPE;
+ rxh.m_option_len=RX_CHECK_V4_OPT_LEN;
+ rxh.m_time_stamp=0;
+ rxh.m_magic=RX_CHECK_MAGIC;
+ rxh.m_aging_sec=10;
+
+ rxh.m_pkt_id=i;
+ rxh.m_flow_size=10;
+
+ rxh.m_flow_id=7;
+ rxh.set_dir(0);
+ rxh.set_both_dir(1);
+
+ m_rx_check.handle_packet(&rxh);
+ }
+
+ EXPECT_EQ(m_rx_check.m_stats.get_total_err(),0);
+ EXPECT_EQ(m_rx_check.m_stats.m_add,1);
+ EXPECT_EQ(m_rx_check.m_stats.m_remove,0);
+ m_rx_check.Dump(stdout);
+}
+
+TEST_F(rx_check, rx_check_normal_two_dir_ok) {
+ int i;
+
+ CRx_check_header rxh;
+ rxh.m_option_type=RX_CHECK_V4_OPT_TYPE;
+ rxh.m_option_len=RX_CHECK_V4_OPT_LEN;
+ rxh.m_time_stamp=0;
+ rxh.m_magic=RX_CHECK_MAGIC;
+ rxh.m_aging_sec=10;
+
+ rxh.set_both_dir(1);
+ rxh.m_flow_id=7;
+ rxh.m_flow_size=10;
+
+ for (i=0; i<10; i++) {
+ rxh.m_pkt_id=i;
+ printf(" first : %d \n",i);
+ rxh.set_dir(0);
+ m_rx_check.handle_packet(&rxh);
+ }
+
+ for (i=0; i<10; i++) {
+ printf(" se : %d \n",i);
+ rxh.m_pkt_id=i;
+ rxh.set_dir(1);
+ m_rx_check.handle_packet(&rxh);
+ }
+
+ EXPECT_EQ(m_rx_check.m_stats.get_total_err(),0);
+ EXPECT_EQ(m_rx_check.m_stats.m_add,1);
+ EXPECT_EQ(m_rx_check.m_stats.m_remove,1);
+ m_rx_check.Dump(stdout);
+}
+
+TEST_F(rx_check, rx_check_normal_one_pkt_one_dir) {
+ int i;
+
+ CRx_check_header rxh;
+ rxh.m_option_type=RX_CHECK_V4_OPT_TYPE;
+ rxh.m_option_len=RX_CHECK_V4_OPT_LEN;
+ rxh.m_time_stamp=0;
+ rxh.m_magic=RX_CHECK_MAGIC;
+ rxh.m_aging_sec=10;
+
+ rxh.set_both_dir(1);
+ rxh.m_flow_id=7;
+ rxh.m_flow_size=1;
+
+ for (i=0; i<1; i++) {
+ rxh.m_pkt_id=i;
+ printf(" first : %d \n",i);
+ rxh.set_dir(0);
+ m_rx_check.handle_packet(&rxh);
+ }
+ EXPECT_EQ(m_rx_check.m_stats.get_total_err(),0);
+ EXPECT_EQ(m_rx_check.m_stats.m_add,1);
+ EXPECT_EQ(m_rx_check.m_stats.m_remove,0);
+ m_rx_check.Dump(stdout);
+}
+
+TEST_F(rx_check, rx_check_normal_one_pkt_one_dir_0) {
+ int i;
+
+ CRx_check_header rxh;
+ rxh.m_option_type=RX_CHECK_V4_OPT_TYPE;
+ rxh.m_option_len=RX_CHECK_V4_OPT_LEN;
+ rxh.m_time_stamp=0;
+ rxh.m_magic=RX_CHECK_MAGIC;
+ rxh.m_aging_sec=10;
+
+ rxh.set_both_dir(0);
+ rxh.m_flow_id=7;
+ rxh.m_flow_size=1;
+
+ for (i=0; i<1; i++) {
+ rxh.m_pkt_id=i;
+ rxh.set_dir(0);
+ m_rx_check.handle_packet(&rxh);
+ }
+ EXPECT_EQ(m_rx_check.m_stats.get_total_err(),0);
+ EXPECT_EQ(m_rx_check.m_stats.m_add,1);
+ EXPECT_EQ(m_rx_check.m_stats.m_remove,1);
+ m_rx_check.Dump(stdout);
+}
+
+TEST_F(rx_check, rx_check_normal_one_pkt_two_dir_0) {
+ int i;
+
+ CRx_check_header rxh;
+ rxh.m_option_type=RX_CHECK_V4_OPT_TYPE;
+ rxh.m_option_len=RX_CHECK_V4_OPT_LEN;
+ rxh.m_time_stamp=0;
+ rxh.m_magic=RX_CHECK_MAGIC;
+ rxh.m_aging_sec=10;
+
+ rxh.set_both_dir(1);
+ rxh.m_flow_id=7;
+ rxh.m_flow_size=1;
+
+ for (i=0; i<1; i++) {
+ rxh.m_pkt_id=i;
+ rxh.set_dir(0);
+ m_rx_check.handle_packet(&rxh);
+ }
+
+ for (i=0; i<1; i++) {
+ rxh.m_pkt_id=i;
+ rxh.set_dir(1);
+ m_rx_check.handle_packet(&rxh);
+ }
+
+ EXPECT_EQ(m_rx_check.m_stats.get_total_err(),0);
+ EXPECT_EQ(m_rx_check.m_stats.m_add,1);
+ EXPECT_EQ(m_rx_check.m_stats.m_remove,1);
+ m_rx_check.Dump(stdout);
+}
+
+TEST_F(rx_check, rx_check_normal_one_pkt_two_dir_err1) {
+ int i;
+
+ CRx_check_header rxh;
+ rxh.m_option_type=RX_CHECK_V4_OPT_TYPE;
+ rxh.m_option_len=RX_CHECK_V4_OPT_LEN;
+ rxh.m_time_stamp=0;
+ rxh.m_magic=RX_CHECK_MAGIC;
+ rxh.m_aging_sec=10;
+
+ rxh.set_both_dir(1);
+ rxh.m_flow_id=7;
+ rxh.m_flow_size=10;
+
+ for (i=0; i<10; i++) {
+ rxh.m_pkt_id=i;
+ rxh.set_dir(0);
+ m_rx_check.handle_packet(&rxh);
+ }
+
+ for (i=0; i<10; i++) {
+ if (i==0) {
+ rxh.m_pkt_id=7;
+ }else{
+ if (i==7) {
+ rxh.m_pkt_id=0;
+ }else{
+ rxh.m_pkt_id=i;
+ }
+ }
+
+ rxh.set_dir(1);
+ m_rx_check.handle_packet(&rxh);
+ }
+
+ EXPECT_EQ(m_rx_check.m_stats.m_err_oo_late,1);
+ EXPECT_EQ(m_rx_check.m_stats.m_err_fif_seen_twice,1);
+ EXPECT_EQ(m_rx_check.m_stats.m_add,1);
+ EXPECT_EQ(m_rx_check.m_stats.m_remove,0);
+ m_rx_check.Dump(stdout);
+}
+
+
+TEST_F(rx_check, rx_check_normal_two_dir_oo) {
+ int i;
+
+ CRx_check_header rxh;
+ rxh.m_option_type=RX_CHECK_V4_OPT_TYPE;
+ rxh.m_option_len=RX_CHECK_V4_OPT_LEN;
+ rxh.m_time_stamp=0;
+ rxh.m_magic=RX_CHECK_MAGIC;
+ rxh.m_aging_sec=10;
+
+ rxh.set_both_dir(1);
+ rxh.m_flow_id=7;
+ rxh.m_flow_size=10;
+
+ for (i=0; i<10; i++) {
+ rxh.m_pkt_id=i;
+ rxh.set_dir(0);
+ m_rx_check.handle_packet(&rxh);
+ }
+
+ for (i=0; i<10; i++) {
+
+ if (i==4) {
+ rxh.m_pkt_id=5;
+ }else{
+ if (i==5) {
+ rxh.m_pkt_id=4;
+ }else{
+ rxh.m_pkt_id=i;
+ }
+ }
+
+
+ rxh.set_dir(1);
+ m_rx_check.handle_packet(&rxh);
+ }
+
+ EXPECT_EQ(m_rx_check.m_stats.m_err_oo_early,1);
+ EXPECT_EQ(m_rx_check.m_stats.m_err_oo_late,2);
+ EXPECT_EQ(m_rx_check.m_stats.m_add,1);
+ EXPECT_EQ(m_rx_check.m_stats.m_remove,0);
+ m_rx_check.Dump(stdout);
+}
+
+TEST_F(rx_check, rx_check_normal_aging) {
+ int i;
+
+ CRx_check_header rxh;
+ rxh.m_option_type=RX_CHECK_V4_OPT_TYPE;
+ rxh.m_option_len=RX_CHECK_V4_OPT_LEN;
+ rxh.m_magic=RX_CHECK_MAGIC;
+ rxh.m_aging_sec=2;
+
+ rxh.set_both_dir(0);
+ rxh.m_flow_id=7;
+ rxh.m_flow_size=10;
+ rxh.m_template_id=13;
+
+ for (i=0; i<9; i++) {
+ rxh.m_time_stamp=(uint32_t)now_sec();
+ rxh.m_pkt_id=i;
+ rxh.set_dir(0);
+ rxh.m_pkt_id=i;
+ if (i<5) {
+ m_rx_check.m_cur_time = (now_sec()+10);
+ }
+ m_rx_check.tw_handle();
+ m_rx_check.handle_packet(&rxh);
+ }
+
+ EXPECT_EQ(m_rx_check.m_template_info[13].get_error_counter()>0?1:0,1);
+ EXPECT_EQ(m_rx_check.m_stats.m_err_aged,4);
+ EXPECT_EQ(m_rx_check.m_stats.m_err_open_with_no_fif_pkt,4 );
+ EXPECT_EQ(m_rx_check.m_stats.m_err_oo_late,1);
+
+ m_rx_check.Dump(stdout);
+}
+
+TEST_F(rx_check, rx_check_normal_no_aging) {
+ int i;
+
+ CRx_check_header rxh;
+ rxh.m_option_type=RX_CHECK_V4_OPT_TYPE;
+ rxh.m_option_len=RX_CHECK_V4_OPT_LEN;
+ rxh.m_magic=RX_CHECK_MAGIC;
+ rxh.m_aging_sec=10;
+
+ rxh.set_both_dir(0);
+ rxh.m_flow_id=7;
+ rxh.m_flow_size=10;
+
+ for (i=0; i<9; i++) {
+ rxh.m_time_stamp=(uint32_t)now_sec();
+ rxh.m_pkt_id=i;
+ rxh.set_dir(0);
+ rxh.m_pkt_id=i;
+ m_rx_check.tw_handle();
+ m_rx_check.handle_packet(&rxh);
+ }
+
+ EXPECT_EQ(m_rx_check.m_stats.get_total_err(),0);
+ EXPECT_EQ(m_rx_check.m_stats.m_add,1);
+ EXPECT_EQ(m_rx_check.m_stats.m_remove,0);
+}
+
+
+///////////////////////////////////////////////////////////////
+// check the generation of template and check sample of it
+
+
+class CRxCheckCallbackBase {
+public:
+ virtual void handle_packet(rte_mbuf * m)=0;
+ void * obj;
+};
+
+class CRxCheckIF : public CVirtualIF {
+
+public:
+ CRxCheckIF(){
+ m_callback=NULL;
+ m_raw=NULL;
+ m_one_dir=true;
+ m_store_pcfg=false;
+ }
+public:
+
+ virtual int open_file(std::string file_name){
+ m_raw = new CCapPktRaw();
+ assert(m_raw);
+ return (0);
+ }
+
+ virtual int close_file(void){
+ assert(m_raw);
+ m_raw->raw=0;
+ delete m_raw;
+ return (0);
+ }
+
+
+ /**
+ * send one packet
+ *
+ * @param node
+ *
+ * @return
+ */
+ virtual int send_node(CGenNode * node);
+
+
+
+ /**
+ * flush all pending packets into the stream
+ *
+ * @return
+ */
+ virtual int flush_tx_queue(void){
+ return (0);
+ }
+
+
+public:
+ bool m_one_dir;
+ bool m_store_pcfg;
+ CErfIF erf_vif;
+ CCapPktRaw * m_raw;
+ CRxCheckCallbackBase * m_callback;
+};
+
+
+int CRxCheckIF::send_node(CGenNode * node){
+
+ CFlowPktInfo * lp=node->m_pkt_info;
+ rte_mbuf_t * m=lp->generate_new_mbuf(node);
+
+ /* update mac addr dest/src 12 bytes */
+ uint8_t *p=(uint8_t *)m_raw->raw;
+ uint8_t p_id = node->m_pkt_info->m_pkt_indication.m_desc.IsInitSide()?0:1;
+ memcpy(p,CGlobalInfo::m_options.get_dst_src_mac_addr(p_id),12);
+
+ if ( unlikely( node->is_rx_check_enabled() ) ) {
+ lp->do_generate_new_mbuf_rxcheck(m,node,p_id,m_one_dir);
+ }
+
+ fill_pkt(m_raw,m);
+ CPktNsecTimeStamp t_c(node->m_time);
+ m_raw->time_nsec = t_c.m_time_nsec;
+ m_raw->time_sec = t_c.m_time_sec;
+ m_raw->setInterface(node->m_pkt_info->m_pkt_indication.m_desc.IsInitSide());
+
+ if (m_store_pcfg) {
+ erf_vif.write_pkt(m_raw);
+ }
+
+ if ((m_callback) && (node->is_rx_check_enabled()) ) {
+ m_callback->handle_packet(m);
+ }
+
+ // just free it
+ rte_pktmbuf_free(m);
+ return (0);
+}
+
+
+class CRxCheckBasic {
+
+public:
+ CRxCheckBasic(){
+ m_threads=1;
+ lpVf=0;
+ }
+
+ bool init(void){
+ CFlowGenList fl;
+ fl.Create();
+ fl.load_from_yaml(CGlobalInfo::m_options.cfg_file,m_threads);
+ CGlobalInfo::m_options.set_rxcheck_const_ts();
+
+ fl.generate_p_thread_info(m_threads);
+ CFlowGenListPerThread * lpt;
+ fl.m_threads_info[0]->set_vif(lpVf);
+ int i;
+ for (i=0; i<m_threads; i++) {
+ lpt=fl.m_threads_info[i];
+ lpt->generate_erf("t1",CGlobalInfo::m_options.preview);
+ }
+ fl.Delete();
+ return (true);
+ }
+
+public:
+ int m_threads;
+ CRxCheckIF * lpVf;
+};
+
+
+class CRxCheck1 : public CRxCheckCallbackBase {
+public:
+
+ virtual void handle_packet(rte_mbuf_t * m){
+ char *mp=rte_pktmbuf_mtod(m, char*);
+ CRx_check_header * rx_p;
+ rte_mbuf_t * m2 = m->next;
+ rx_p=(CRx_check_header *)rte_pktmbuf_mtod(m2, char*);
+ mg->handle_packet(rx_p);
+ //pkt->Dump(stdout,1);
+ }
+ RxCheckManager * mg;
+};
+
+
+class rx_check_system : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ gtest_init_once();
+
+ m_rx_check.m_callback=&m_callback;
+ m_callback.mg =&m_mg;
+ m_mg.Create();
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(0);
+ po->preview.setFileWrite(true);
+ po->preview.set_rx_check_enable(true);
+
+ }
+
+ virtual void TearDown() {
+ m_mg.Delete();
+ }
+public:
+ CRxCheckBasic m_rxcs;
+ CRxCheckIF m_rx_check;
+ CRxCheck1 m_callback;
+ RxCheckManager m_mg;
+
+};
+
+
+// check DNS yaml with sample of 1/2 check that there is no errors
+TEST_F(rx_check_system, rx_system1) {
+
+ m_rxcs.lpVf=&m_rx_check;
+ CParserOption * po =&CGlobalInfo::m_options;
+
+ po->m_rx_check_sampe=2; /* sample rate */
+ po->m_duration=100;
+ po->cfg_file ="cap2/dns.yaml";
+
+ m_rxcs.init();
+ m_mg.tw_drain();
+
+ m_mg.Dump(stdout);
+
+ EXPECT_EQ(m_mg.m_stats.get_total_err(),0);
+}
+
+// check DNS with rxcheck and write results out to capture file
+TEST_F(rx_check_system, rx_system1_dns) {
+
+ m_rxcs.lpVf=&m_rx_check;
+ CParserOption * po =&CGlobalInfo::m_options;
+
+ po->m_rx_check_sampe=1; /* sample rate */
+ po->m_duration=1;
+ po->cfg_file ="cap2/dns.yaml";
+ m_rx_check.m_store_pcfg=true;
+
+ m_rx_check.erf_vif.set_review_mode(&CGlobalInfo::m_options.preview);
+ m_rx_check.erf_vif.open_file("exp/dns_rxcheck.erf");
+
+ m_rxcs.init();
+ m_mg.tw_drain();
+ m_rx_check.erf_vif.close_file();
+
+ CErfCmp cmp;
+ cmp.dump=1;
+ EXPECT_EQ(cmp.compare("exp/dns_rxcheck.erf","exp/dns_rxcheck-ex.erf"),true);
+}
+
+// check DNS yaml with sample of 1/4 using IPv6 packets
+TEST_F(rx_check_system, rx_system1_ipv6) {
+
+ m_rxcs.lpVf=&m_rx_check;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.set_ipv6_mode_enable(true);
+
+ po->m_rx_check_sampe=4; /* sample rate */
+ po->m_duration=100;
+ po->cfg_file ="cap2/dns.yaml";
+
+ m_rxcs.init();
+ m_mg.tw_drain();
+
+ m_mg.Dump(stdout);
+
+ EXPECT_EQ(m_mg.m_stats.get_total_err(),0);
+ po->preview.set_ipv6_mode_enable(false);
+}
+
+// check DNS with rxcheck using IPv6 packets
+// and write results out to capture file
+TEST_F(rx_check_system, rx_system1_dns_ipv6) {
+
+ m_rxcs.lpVf=&m_rx_check;
+ CParserOption * po =&CGlobalInfo::m_options;
+
+ po->preview.set_ipv6_mode_enable(true);
+ po->m_rx_check_sampe=1; /* sample rate */
+ po->m_duration=1;
+ po->cfg_file ="cap2/dns.yaml";
+ m_rx_check.m_store_pcfg=true;
+
+ m_rx_check.erf_vif.set_review_mode(&CGlobalInfo::m_options.preview);
+ m_rx_check.erf_vif.open_file("exp/dns_ipv6_rxcheck.erf");
+
+ m_rxcs.init();
+ m_mg.tw_drain();
+ m_rx_check.erf_vif.close_file();
+
+ CErfCmp cmp;
+ cmp.dump=1;
+ EXPECT_EQ(cmp.compare("exp/dns_ipv6_rxcheck.erf","exp/dns_ipv6_rxcheck-ex.erf"),true);
+ po->preview.set_ipv6_mode_enable(false);
+}
+
+TEST_F(rx_check_system, rx_system2_plugin_one_dir) {
+
+ m_rxcs.lpVf=&m_rx_check;
+ CParserOption * po =&CGlobalInfo::m_options;
+
+ po->m_rx_check_sampe=2; /* sample rate */
+ po->m_duration=100;
+ po->cfg_file ="cap2/rtsp_short1.yaml";
+
+ m_rxcs.init();
+ m_mg.tw_drain();
+
+ m_mg.Dump(stdout);
+
+ EXPECT_EQ(m_mg.m_stats.get_total_err(),0);
+}
+
+// check HTTP with rxcheck and write results out to capture file
+TEST_F(rx_check_system, rx_system2_plugin) {
+
+ m_rxcs.lpVf=&m_rx_check;
+ CParserOption * po =&CGlobalInfo::m_options;
+
+ po->m_rx_check_sampe=1; /* sample rate */
+ po->m_duration=1;
+ po->cfg_file ="cap2/rtsp_short1.yaml";
+ m_rx_check.m_store_pcfg=true;
+
+ m_rx_check.erf_vif.set_review_mode(&CGlobalInfo::m_options.preview);
+ m_rx_check.erf_vif.open_file("exp/rtsp_short1_rxcheck.erf");
+
+ m_rxcs.init();
+ m_mg.tw_drain();
+ m_rx_check.erf_vif.close_file();
+
+ CErfCmp cmp;
+ cmp.dump=1;
+ EXPECT_EQ(cmp.compare("exp/rtsp_short1_rxcheck.erf","exp/rtsp_short1_rxcheck-ex.erf"),true);
+}
+
+// check DNS with rxcheck using IPv6 packets
+// and write results out to capture file
+TEST_F(rx_check_system, rx_system2_plugin_ipv6) {
+
+ m_rxcs.lpVf=&m_rx_check;
+ CParserOption * po =&CGlobalInfo::m_options;
+
+ po->preview.set_ipv6_mode_enable(true);
+ po->m_rx_check_sampe=1; /* sample rate */
+ po->m_duration=1;
+ po->cfg_file ="cap2/rtsp_short1.yaml";
+ m_rx_check.m_store_pcfg=true;
+
+ m_rx_check.erf_vif.set_review_mode(&CGlobalInfo::m_options.preview);
+ m_rx_check.erf_vif.open_file("exp/rtsp_short1_ipv6_rxcheck.erf");
+
+ m_rxcs.init();
+ m_mg.tw_drain();
+ m_rx_check.erf_vif.close_file();
+
+ CErfCmp cmp;
+ cmp.dump=1;
+ EXPECT_EQ(cmp.compare("exp/rtsp_short1_ipv6_rxcheck.erf","exp/rtsp_short1_ipv6_rxcheck-ex.erf"),true);
+ po->preview.set_ipv6_mode_enable(false);
+}
+
+TEST_F(rx_check_system, rx_system2_plugin_two_dir) {
+
+ m_rxcs.lpVf=&m_rx_check;
+ CParserOption * po =&CGlobalInfo::m_options;
+
+ po->m_rx_check_sampe=2; /* sample rate */
+ po->m_duration=100;
+ po->cfg_file ="cap2/rtsp_short1_slow.yaml";
+ m_rx_check.m_one_dir=false;
+
+ m_rxcs.init();
+ m_mg.tw_drain();
+
+ m_mg.Dump(stdout);
+
+ EXPECT_EQ(m_mg.m_stats.get_total_err(),0);
+}
+
+TEST_F(rx_check_system, rx_system2_plugin_two_dir_2) {
+
+ m_rxcs.lpVf=&m_rx_check;
+ CParserOption * po =&CGlobalInfo::m_options;
+
+ po->m_rx_check_sampe=2; /* sample rate */
+ po->m_duration=100;
+ po->cfg_file ="cap2/rtsp_short1.yaml";
+ m_rx_check.m_one_dir=false;
+
+ m_rxcs.init();
+ m_mg.tw_drain();
+
+ m_mg.Dump(stdout);
+
+ EXPECT_EQ(m_mg.m_stats.get_total_err(),0);
+}
+
+TEST_F(rx_check_system, rx_system_two_dir) {
+
+ m_rxcs.lpVf=&m_rx_check;
+ CParserOption * po =&CGlobalInfo::m_options;
+
+ po->m_rx_check_sampe=2; /* sample rate */
+ po->m_duration=100;
+ po->cfg_file ="cap2/dns.yaml";
+ m_rx_check.m_one_dir=false;
+
+ m_rxcs.init();
+ m_mg.tw_drain();
+
+ m_mg.Dump(stdout);
+
+ EXPECT_EQ(m_mg.m_stats.get_total_err(),0);
+}
+
+
+TEST_F(rx_check_system, rx_json) {
+
+ m_rxcs.lpVf=&m_rx_check;
+ CParserOption * po =&CGlobalInfo::m_options;
+
+ po->m_rx_check_sampe=2; /* sample rate */
+ po->m_duration=100;
+ po->cfg_file ="cap2/dns.yaml";
+
+ m_rxcs.init();
+ m_mg.tw_drain();
+
+ std::string json;
+ m_mg.dump_json(json);
+ printf(" %s \n",json.c_str());
+}
+
+
+
+//////////////////////////////////////////////////////////////
+
+
+class CNatCheck1 : public CRxCheckCallbackBase {
+public:
+
+ virtual void handle_packet(rte_mbuf_t * m){
+ char *mp=rte_pktmbuf_mtod(m, char*);
+ CNatOption * option=(CNatOption *)(mp+14+20);
+ IPHeader * ipv4=(IPHeader *)(mp+14);
+ if ( ipv4->getHeaderLength()>20 ) {
+ assert(ipv4->getTimeToLive()==255);
+ /* ip option packet */
+ printf(" rx got ip option packet ! \n");
+ mg->handle_packet_ipv4(option,ipv4);
+ delay(10); // delay for queue flush
+ mg->handle_aging(); // flush the RxRing
+ }
+ }
+ CNatRxManager * mg;
+};
+
+
+
+class nat_check_system : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ gtest_init_once();
+
+ m_rx_check.m_callback=&m_callback;
+ m_callback.mg =&m_mg;
+ m_mg.Create();
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.setVMode(0);
+ po->preview.setFileWrite(true);
+ po->preview.set_lean_mode_enable(true);
+ }
+
+ virtual void TearDown() {
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.set_lean_mode_enable(false);
+ m_mg.Delete();
+ }
+public:
+ CRxCheckBasic m_rxcs;
+ CRxCheckIF m_rx_check;
+ CNatCheck1 m_callback;
+ CNatRxManager m_mg;
+
+};
+
+#if 0
+
+TEST_F(nat_check_system, nat_system1) {
+
+ m_rxcs.lpVf=&m_rx_check;
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->m_duration=2;
+ po->cfg_file ="cap2/dns.yaml";
+
+ m_rxcs.init();
+ m_mg.Dump(stdout);
+
+ //EXPECT_EQ(m_mg.m_stats.get_total_err(),0);
+}
+
+#endif
+
+//////////////////////////////////////////////////////////////
+
+class file_flow_info : public testing::Test {
+
+protected:
+ virtual void SetUp() {
+ gtest_init_once();
+ assert(m_flow_info.Create());
+ }
+
+ virtual void TearDown() {
+ m_flow_info.Delete();
+ }
+public:
+ CCapFileFlowInfo m_flow_info;
+
+};
+
+TEST_F(file_flow_info, f1) {
+ m_flow_info.load_cap_file("cap2/delay_10_rtp_250k_short.pcap",1,7) ;
+ m_flow_info.update_info();
+ //m_flow_info.Dump(stdout);
+
+ int i;
+ for (i=0; i<m_flow_info.Size(); i++) {
+ CFlowPktInfo * lp=m_flow_info.GetPacket((uint32_t)i);
+ uint16_t flow_id=lp->m_pkt_indication.m_desc.getFlowId();
+ switch (flow_id) {
+ case 0:
+ EXPECT_EQ(lp->m_pkt_indication.m_desc.GetMaxPktsPerFlow(),23);
+ EXPECT_EQ(lp->m_pkt_indication.m_desc.GetMaxFlowTimeout(),64);
+ EXPECT_EQ(lp->m_pkt_indication.m_desc.IsBiDirectionalFlow(),1);
+ break;
+ case 1:
+ EXPECT_EQ(lp->m_pkt_indication.m_desc.GetMaxPktsPerFlow(),7);
+ EXPECT_EQ(lp->m_pkt_indication.m_desc.GetMaxFlowTimeout(),10);
+ EXPECT_EQ(lp->m_pkt_indication.m_desc.IsBiDirectionalFlow(),0);
+
+ break;
+ case 2:
+ EXPECT_EQ(lp->m_pkt_indication.m_desc.GetMaxPktsPerFlow(),7);
+ EXPECT_EQ(lp->m_pkt_indication.m_desc.GetMaxFlowTimeout(),5);
+ EXPECT_EQ(lp->m_pkt_indication.m_desc.IsBiDirectionalFlow(),0);
+
+ break;
+ default:
+ assert(0);
+ }
+ }
+
+}
+
+TEST_F(file_flow_info, f2) {
+ m_flow_info.load_cap_file("cap2/citrix.pcap",1,0) ;
+ m_flow_info.update_info();
+
+
+ int i;
+ for (i=0; i<m_flow_info.Size(); i++) {
+ CFlowPktInfo * lp=m_flow_info.GetPacket((uint32_t)i);
+ uint16_t flow_id=lp->m_pkt_indication.m_desc.getFlowId();
+ switch (flow_id) {
+ case 0:
+ EXPECT_EQ(lp->m_pkt_indication.m_desc.GetMaxPktsPerFlow(),271);
+ EXPECT_EQ(lp->m_pkt_indication.m_desc.GetMaxFlowTimeout(),5);
+ break;
+ default:
+ assert(0);
+ }
+ }
+}
+
+TEST_F(file_flow_info, http_two_dir) {
+ m_flow_info.load_cap_file("avl/delay_10_http_browsing_0.pcap",1,0) ;
+ m_flow_info.update_info();
+ CFlowPktInfo * lp=m_flow_info.GetPacket((uint32_t)0);
+ EXPECT_EQ(lp->m_pkt_indication.m_desc.IsOneDirectionalFlow(),0);
+}
+
+TEST_F(file_flow_info, one_dir) {
+
+ m_flow_info.load_cap_file("avl/delay_rtp_160k_1_1_0.pcap",1,0) ;
+ m_flow_info.update_info();
+ CFlowPktInfo * lp=m_flow_info.GetPacket((uint32_t)0);
+ EXPECT_EQ(lp->m_pkt_indication.m_desc.IsOneDirectionalFlow(),1);
+}
+
+
+
+TEST_F(file_flow_info, nat_option_check) {
+ uint8_t buffer[8];
+ CNatOption *lp=(CNatOption *)&buffer[0];
+ lp->set_init_ipv4_header();
+ lp->set_fid(0x12345678);
+ lp->set_thread_id(7);
+ lp->dump(stdout);
+ EXPECT_EQ(lp->is_valid_ipv4_magic(),true);
+
+ lp->set_init_ipv6_header();
+ lp->dump(stdout);
+ EXPECT_EQ(lp->is_valid_ipv6_magic(),true);
+}
+
+TEST_F(file_flow_info, http_add_ipv4_option) {
+ m_flow_info.load_cap_file("avl/delay_10_http_browsing_0.pcap",1,0) ;
+ m_flow_info.update_info();
+ CFlowPktInfo * lp=m_flow_info.GetPacket((uint32_t)0);
+ printf(" before the change \n");
+ //lp->Dump(stdout);
+ //lp->m_packet->Dump(stdout,1);
+ CNatOption *lpNat =(CNatOption *)lp->push_ipv4_option_offline(8);
+ lpNat->set_init_ipv4_header();
+ lpNat->set_fid(0x12345678);
+ lpNat->set_thread_id(7);
+ lp->m_pkt_indication.l3.m_ipv4->updateCheckSum();
+ m_flow_info.save_to_erf("exp/http1_with_option.pcap",true);
+
+ m_flow_info.Delete();
+ CErfCmp cmp;
+ cmp.dump=1;
+ EXPECT_EQ(cmp.compare("exp/http1_with_option.pcap","exp/http1_with_option-ex.pcap"),true);
+}
+
+TEST_F(file_flow_info, http_add_ipv6_option) {
+ /* convert it to ipv6 */
+ CParserOption * po =&CGlobalInfo::m_options;
+ po->preview.set_ipv6_mode_enable(true);
+
+ m_flow_info.load_cap_file("avl/delay_10_http_browsing_0.pcap",1,0) ;
+ m_flow_info.update_info();
+ CFlowPktInfo * lp=m_flow_info.GetPacket((uint32_t)0);
+ //lp->Dump(stdout);
+ //lp->m_packet->Dump(stdout,1);
+ CNatOption *lpNat =(CNatOption *)lp->push_ipv6_option_offline(8);
+ lpNat->set_init_ipv6_header();
+ lpNat->set_fid(0x12345678);
+ lpNat->set_thread_id(7);
+ m_flow_info.save_to_erf("exp/http1_with_option_ipv6.pcap",true);
+ m_flow_info.Delete();
+ CErfCmp cmp;
+ cmp.dump=1;
+ EXPECT_EQ(cmp.compare("exp/http1_with_option_ipv6.pcap","exp/http1_with_option_ipv6-ex.pcap"),true);
+ po->preview.set_ipv6_mode_enable(false);
+}
+
+
+
+
+
+//////////////////////////////////////////////////////////////
+
+class time_histogram : public testing::Test {
+
+protected:
+ virtual void SetUp() {
+ m_hist.Create();
+ }
+
+ virtual void TearDown() {
+ m_hist.Delete();
+ }
+public:
+ CTimeHistogram m_hist;
+};
+
+TEST_F(time_histogram, test_average) {
+ int i;
+ int j;
+ for (j=0; j<10; j++) {
+ for (i=0; i<100; i++) {
+ m_hist.Add(10e-6);
+ }
+ for (i=0; i<100; i++) {
+ m_hist.Add(10e-3);
+ }
+ m_hist.update();
+ }
+
+ EXPECT_GT(m_hist.get_average_latency(),7400.0);
+ EXPECT_LT(m_hist.get_average_latency(),7600.0);
+
+ m_hist.Dump(stdout);
+}
+
+TEST_F(time_histogram, test_json) {
+ int i;
+ int j;
+ for (j=0; j<10; j++) {
+ for (i=0; i<100; i++) {
+ m_hist.Add(10e-6);
+ }
+ for (i=0; i<100; i++) {
+ m_hist.Add(10e-3);
+ }
+ m_hist.update();
+ }
+
+ m_hist.Dump(stdout);
+ std::string json ;
+ m_hist.dump_json("myHis",json );
+ printf(" %s \n",json.c_str());
+}
+
+
+
+class gt_jitter : public testing::Test {
+
+protected:
+ virtual void SetUp() {
+ }
+
+ virtual void TearDown() {
+ }
+public:
+ CJitter m_jitter;
+};
+
+
+class gt_jitter_uint : public testing::Test {
+
+protected:
+ virtual void SetUp() {
+ }
+
+ virtual void TearDown() {
+ }
+public:
+ CJitterUint m_jitter;
+};
+
+
+
+TEST_F(gt_jitter, jitter1) {
+ int i;
+ double a=0.000030;
+ for (i=0; i<100; i++) {
+ if (i%2) {
+ a+=0.000100;
+ }else{
+ a-=0.000100;
+ }
+ m_jitter.calc(a);
+ }
+ EXPECT_EQ((uint32_t)(m_jitter.get_jitter()*1000000.0), 99);
+}
+
+TEST_F(gt_jitter_uint, jitter2) {
+ int i;
+ int32_t a=30;
+ for (i=0; i<100; i++) {
+ if (i%2) {
+ a+=20;
+ }else{
+ a-=20;
+ }
+ m_jitter.calc(a);
+ }
+ EXPECT_EQ((uint32_t)(m_jitter.get_jitter()), 19);
+}
+
+
+class gt_ring : public testing::Test {
+
+protected:
+ virtual void SetUp() {
+ }
+
+ virtual void TearDown() {
+ }
+public:
+};
+
+TEST_F(gt_ring, ring1) {
+
+ CTRingSp<uint32_t> my;
+ bool res=my.Create("a",1024,0);
+ assert(res);
+
+ int i;
+ for (i=0; i<10; i++) {
+ uint32_t *p=new uint32_t();
+ *p=i;
+ assert(my.Enqueue(p)==0);
+ }
+ for (i=0; i<10; i++) {
+ uint32_t *p;
+ assert(my.Dequeue(p)==0);
+ EXPECT_EQ_UINT32(*p, i);
+ }
+ uint32_t *p;
+ assert(my.Dequeue(p)!=0);
+
+ EXPECT_EQ(my.isEmpty(), true);
+ EXPECT_EQ(my.isFull(), false);
+
+ my.Delete();
+}
+
+
+TEST_F(gt_ring, ring2) {
+ CMessagingManager ringmg;
+ ringmg.Create(8);
+
+ int i;
+ for (i=0; i<8; i++) {
+ CNodeRing * ln=ringmg.getRingDpToCp(i);
+ assert(ln);
+ CGenNode * node=new CGenNode();
+ node->m_flow_id=i;
+ assert(ln->Enqueue(node)==0);
+ }
+
+ for (i=0; i<8; i++) {
+ CNodeRing * ln=ringmg.getRingDpToCp(i);
+ assert(ln);
+ CGenNode * node;
+ assert(ln->Dequeue(node)==0);
+ EXPECT_EQ(node->m_flow_id, i);
+ delete node;
+ }
+
+ ringmg.Delete();
+}
+
+
+
+
+
+
+
+void my_free_map_uint32_t(uint32_t *p){
+ printf("before free %d \n",*p);
+ delete p;
+}
+
+
+TEST_F(gt_ring, ring3) {
+
+ typedef CGenericMap<uint32_t,uint32_t> my_test_map;
+ my_test_map my_map;
+
+ my_map.Create();
+ int i;
+
+ uint32_t *p;
+ for (i=0; i<10;i++) {
+ p=new uint32_t(i);
+ my_map.add((uint32_t)i,p);
+ }
+
+ for (i=0; i<10;i++) {
+ p=my_map.lookup((uint32_t)i);
+ printf(" %d \n",*p);
+ }
+
+ my_map.remove_all(my_free_map_uint32_t);
+ #if 0
+ for (i=0; i<10;i++) {
+ p=my_map.remove((uint32_t)i);
+ assert(p);
+ delete p;
+ }
+ #endif
+ my_map.Delete();
+}
+
+
+class gt_conf : public testing::Test {
+
+protected:
+ virtual void SetUp() {
+ }
+
+ virtual void TearDown() {
+ }
+public:
+};
+
+
+#if 0
+TEST_F(gt_conf, t1) {
+ CPlatformYamlInfo info;
+ info.load_from_yaml_file("cfg/ex1.yaml");
+ info.Dump(stdout);
+ CPlatformSocketInfoConfig cfg;
+ cfg.Create(&info.m_platform);
+
+ cfg.set_latency_thread_is_enabled(true);
+ cfg.set_number_of_dual_ports(1);
+ cfg.set_number_of_threads_per_ports(1);
+
+
+ cfg.sanity_check();
+ cfg.dump(stdout);
+}
+
+#endif
+
+
diff --git a/src/bp_sim.cpp b/src/bp_sim.cpp
new file mode 100755
index 00000000..f81ef446
--- /dev/null
+++ b/src/bp_sim.cpp
@@ -0,0 +1,6622 @@
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "bp_sim.h"
+#include "utl_json.h"
+#include "utl_yaml.h"
+#include "msg_manager.h"
+#include <common/basic_utils.h>
+
+
+#undef VALG
+
+#ifdef VALG
+#include <valgrind/callgrind.h>
+#endif
+
+
+CPluginCallback * CPluginCallback::callback;
+
+
+uint32_t getDualPortId(uint32_t thread_id){
+ return ( thread_id % (CGlobalInfo::m_options.get_expected_dual_ports()) );
+}
+
+
+
+CRteMemPool CGlobalInfo::m_mem_pool[MAX_SOCKETS_SUPPORTED];
+
+uint32_t CGlobalInfo::m_nodes_pool_size = 10*1024;
+CParserOption CGlobalInfo::m_options;
+CGlobalMemory CGlobalInfo::m_memory_cfg;
+CPlatformSocketInfo CGlobalInfo::m_socket;
+
+
+
+
+
+void CGlobalMemory::Dump(FILE *fd){
+ fprintf(fd," Total Memory : \n");
+
+ const std::string * names =get_mbuf_names();
+
+ uint32_t c_size=64;
+ uint32_t c_total=0;
+
+ int i=0;
+ for (i=0; i<MBUF_SIZE; i++) {
+ if ( (i>MBUF_2048) && (i<MBUF_DP_FLOWS)){
+ continue;
+ }
+ if ( i<TRAFFIC_MBUF_64 ){
+ c_total= m_mbuf[i] *c_size;
+ c_size=c_size*2;
+ }
+
+ fprintf(fd," %-40s : %lu \n",names[i].c_str(),m_mbuf[i]);
+ }
+ c_total += (m_mbuf[MBUF_DP_FLOWS] * sizeof(CGenNode));
+
+ fprintf(fd," %-40s : %lu \n","get_each_core_dp_flows",get_each_core_dp_flows());
+ fprintf(fd," %-40s : %s \n","Total memory",double_to_human_str(c_total,"bytes",KBYE_1024).c_str() );
+}
+
+
+void CGlobalMemory::set(const CPlatformMemoryYamlInfo &info,float mul){
+ int i;
+ for (i=0; i<MBUF_SIZE; i++) {
+ m_mbuf[i]=(uint32_t)((float)info.m_mbuf[i]*mul);
+ }
+ /* no need to multiply */
+ m_mbuf[MBUF_64] += info.m_mbuf[TRAFFIC_MBUF_64];
+ m_mbuf[MBUF_128] += info.m_mbuf[TRAFFIC_MBUF_128];
+ m_mbuf[MBUF_256] += info.m_mbuf[TRAFFIC_MBUF_256];
+ m_mbuf[MBUF_512] += info.m_mbuf[TRAFFIC_MBUF_512];
+ m_mbuf[MBUF_1024] += info.m_mbuf[TRAFFIC_MBUF_1024];
+ m_mbuf[MBUF_2048] += info.m_mbuf[TRAFFIC_MBUF_2048];
+}
+
+
+////////////////////////////////////////
+
+
+bool CPlatformSocketInfoNoConfig::is_sockets_enable(socket_id_t socket){
+ if ( socket==0 ) {
+ return(true);
+ }
+ return (false);
+}
+
+socket_id_t CPlatformSocketInfoNoConfig::max_num_active_sockets(){
+ return (1);
+}
+
+
+socket_id_t CPlatformSocketInfoNoConfig::port_to_socket(port_id_t port){
+ return (0);
+}
+
+
+void CPlatformSocketInfoNoConfig::set_latency_thread_is_enabled(bool enable){
+ m_latency_is_enabled = enable;
+}
+
+void CPlatformSocketInfoNoConfig::set_number_of_dual_ports(uint8_t num_dual_ports){
+ m_dual_if = num_dual_ports;
+}
+
+
+void CPlatformSocketInfoNoConfig::set_number_of_threads_per_ports(uint8_t num_threads){
+ m_threads_per_dual_if = num_threads;
+}
+
+bool CPlatformSocketInfoNoConfig::sanity_check(){
+ return (true);
+}
+
+/* return the core mask */
+uint64_t CPlatformSocketInfoNoConfig::get_cores_mask(){
+
+ uint32_t cores_number = m_threads_per_dual_if*m_dual_if;
+ if ( m_latency_is_enabled ) {
+ cores_number += 2;
+ }else{
+ cores_number += 1; /* only MASTER*/
+ }
+ int i;
+ int offset=0;
+ /* master */
+ uint32_t res=1;
+ uint32_t mask=(1<<(offset+1));
+ for (i=0; i<(cores_number-1); i++) {
+ res |= mask ;
+ mask = mask <<1;
+ }
+ return (res);
+}
+
+virtual_thread_id_t CPlatformSocketInfoNoConfig::thread_phy_to_virt(physical_thread_id_t phy_id){
+ return (phy_id);
+}
+
+physical_thread_id_t CPlatformSocketInfoNoConfig::thread_virt_to_phy(virtual_thread_id_t virt_id){
+ return (virt_id);
+}
+
+bool CPlatformSocketInfoNoConfig::thread_phy_is_master(physical_thread_id_t phy_id){
+ return (phy_id==0);
+}
+
+bool CPlatformSocketInfoNoConfig::thread_phy_is_latency(physical_thread_id_t phy_id){
+ return (phy_id==(m_threads_per_dual_if*m_dual_if+1));
+}
+
+
+void CPlatformSocketInfoNoConfig::dump(FILE *fd){
+ fprintf(fd," there is no configuration file given \n");
+}
+
+////////////////////////////////////////
+
+bool CPlatformSocketInfoConfig::Create(CPlatformCoresYamlInfo * platform){
+ m_platform=platform;
+ assert(m_platform);
+ assert(m_platform->m_is_exists);
+ reset();
+ return (true);
+}
+
+bool CPlatformSocketInfoConfig::init(){
+
+ /* iterate the sockets */
+ uint32_t num_threads=0;
+ uint32_t num_dual_if = m_platform->m_dual_if.size();
+
+ if ( m_num_dual_if > num_dual_if ){
+ printf("ERROR number of dual if %d is higher than defined in configuration file %d\n",
+ (int)m_num_dual_if,
+ (int)num_dual_if);
+ }
+
+ int i;
+ for (i=0; i<m_num_dual_if; i++) {
+ CPlatformDualIfYamlInfo * lp=&m_platform->m_dual_if[i];
+ if ( lp->m_socket>=MAX_SOCKETS_SUPPORTED ){
+ printf("ERROR socket %d is bigger than max %d \n",lp->m_socket,MAX_SOCKETS_SUPPORTED);
+ exit(1);
+ }
+
+ if (!m_sockets_enable[lp->m_socket] ) {
+ m_sockets_enable[lp->m_socket]=true;
+ m_sockets_enabled++;
+ }
+
+ m_socket_per_dual_if[i]=lp->m_socket;
+
+ /* learn how many threads per dual-if */
+ if (i==0) {
+ num_threads = lp->m_threads.size();
+ m_max_threads_per_dual_if = num_threads;
+ }else{
+ if (lp->m_threads.size() != num_threads) {
+ printf("ERROR number of threads per dual ports should be the same for all dual ports\n");
+ exit(1);
+ }
+ }
+
+ int j;
+
+ for (j=0; j<m_threads_per_dual_if; j++) {
+ uint8_t virt_thread = 1+ i + j*m_num_dual_if; /* virtual thread */
+ uint8_t phy_thread = lp->m_threads[j];
+
+ if (phy_thread>MAX_THREADS_SUPPORTED) {
+ printf("ERROR physical thread id is %d higher than max %d \n",phy_thread,MAX_THREADS_SUPPORTED);
+ exit(1);
+ }
+
+ if (virt_thread>MAX_THREADS_SUPPORTED) {
+ printf("ERROR virtual thread id is %d higher than max %d \n",virt_thread,MAX_THREADS_SUPPORTED);
+ exit(1);
+ }
+
+ if ( m_thread_phy_to_virtual[phy_thread] ){
+ printf("ERROR physical thread %d defined twice %d \n",phy_thread);
+ exit(1);
+ }
+ m_thread_phy_to_virtual[phy_thread]=virt_thread;
+ m_thread_virt_to_phy[virt_thread] =phy_thread;
+ }
+ }
+
+ if ( m_thread_phy_to_virtual[m_platform->m_master_thread] ){
+ printf("ERROR physical master thread %d already defined \n",m_platform->m_master_thread);
+ exit(1);
+ }
+
+ if ( m_thread_phy_to_virtual[m_platform->m_latency_thread] ){
+ printf("ERROR physical latency thread %d already defined \n",m_platform->m_latency_thread);
+ exit(1);
+ }
+
+ if (m_max_threads_per_dual_if < m_threads_per_dual_if ) {
+ printf("ERROR number of threads asked per dual if is %d lower than max %d \n",
+ (int)m_threads_per_dual_if,
+ (int)m_max_threads_per_dual_if);
+ exit(1);
+ }
+ return (true);
+}
+
+
+void CPlatformSocketInfoConfig::dump(FILE *fd){
+ fprintf(fd," core_mask %x \n",get_cores_mask());
+ fprintf(fd," sockets :");
+ int i;
+ for (i=0; i<MAX_SOCKETS_SUPPORTED; i++) {
+ if ( is_sockets_enable(i) ){
+ fprintf(fd," %d ",i);
+ }
+ }
+ fprintf(fd," \n");
+ fprintf(fd," active sockets : %d \n",max_num_active_sockets());
+
+ fprintf(fd," ports_sockets : \n",max_num_active_sockets());
+
+ for (i=0; i<(MAX_LATENCY_PORTS); i++) {
+ fprintf(fd,"%d,",port_to_socket(i));
+ }
+ fprintf(fd,"\n");
+
+ fprintf(fd," phy | virt \n");
+ for (i=0; i<MAX_THREADS_SUPPORTED; i++) {
+ virtual_thread_id_t virt=thread_phy_to_virt(i);
+ if ( virt ){
+ fprintf(fd," %d %d \n",i,virt);
+ }
+ }
+}
+
+
+void CPlatformSocketInfoConfig::reset(){
+ m_sockets_enabled=0;
+ int i;
+ for (i=0; i<MAX_SOCKETS_SUPPORTED; i++) {
+ m_sockets_enable[i]=false;
+ }
+
+ for (i=0; i<MAX_THREADS_SUPPORTED; i++) {
+ m_thread_virt_to_phy[i]=0;
+ }
+ for (i=0; i<MAX_THREADS_SUPPORTED; i++) {
+ m_thread_phy_to_virtual[i]=0;
+ }
+ for (i=0; i<(MAX_LATENCY_PORTS>>1); i++) {
+ m_socket_per_dual_if[i]=0;
+ }
+
+ m_num_dual_if=0;
+
+ m_threads_per_dual_if=0;
+ m_latency_is_enabled=false;
+ m_max_threads_per_dual_if=0;
+}
+
+
+void CPlatformSocketInfoConfig::Delete(){
+
+}
+
+bool CPlatformSocketInfoConfig::is_sockets_enable(socket_id_t socket){
+ assert(socket<MAX_SOCKETS_SUPPORTED);
+ return ( m_sockets_enable[socket] );
+}
+
+socket_id_t CPlatformSocketInfoConfig::max_num_active_sockets(){
+ return ((socket_id_t)m_sockets_enabled);
+}
+
+socket_id_t CPlatformSocketInfoConfig::port_to_socket(port_id_t port){
+ return ( m_socket_per_dual_if[(port>>1)]);
+}
+
+void CPlatformSocketInfoConfig::set_latency_thread_is_enabled(bool enable){
+ m_latency_is_enabled =enable;
+}
+
+void CPlatformSocketInfoConfig::set_number_of_dual_ports(uint8_t num_dual_ports){
+ m_num_dual_if = num_dual_ports;
+}
+
+void CPlatformSocketInfoConfig::set_number_of_threads_per_ports(uint8_t num_threads){
+ m_threads_per_dual_if =num_threads;
+}
+
+bool CPlatformSocketInfoConfig::sanity_check(){
+ return (init());
+}
+
+/* return the core mask */
+uint64_t CPlatformSocketInfoConfig::get_cores_mask(){
+ int i;
+ uint64_t mask=0;
+ for (i=0; i<MAX_THREADS_SUPPORTED; i++) {
+ if ( m_thread_phy_to_virtual[i] ) {
+
+ if (i>=64) {
+ printf(" ERROR phy threads can't be higher than 64 \n");
+ exit(1);
+ }
+ mask |=(1<<i);
+ }
+ }
+
+ mask |=(1<<m_platform->m_master_thread);
+ assert(m_platform->m_master_thread<64);
+ if (m_latency_is_enabled) {
+ mask |=(1<<m_platform->m_latency_thread);
+ assert(m_platform->m_latency_thread<64);
+ }
+ return (mask);
+}
+
+virtual_thread_id_t CPlatformSocketInfoConfig::thread_phy_to_virt(physical_thread_id_t phy_id){
+ return (m_thread_phy_to_virtual[phy_id]);
+}
+
+physical_thread_id_t CPlatformSocketInfoConfig::thread_virt_to_phy(virtual_thread_id_t virt_id){
+ return ( m_thread_virt_to_phy[virt_id]);
+}
+
+bool CPlatformSocketInfoConfig::thread_phy_is_master(physical_thread_id_t phy_id){
+ return (m_platform->m_master_thread==phy_id?true:false);
+}
+
+bool CPlatformSocketInfoConfig::thread_phy_is_latency(physical_thread_id_t phy_id){
+ return (m_platform->m_latency_thread == phy_id?true:false);
+}
+
+
+
+////////////////////////////////////////
+
+
+bool CPlatformSocketInfo::Create(CPlatformCoresYamlInfo * platform){
+ if ( (platform) && (platform->m_is_exists) ) {
+ CPlatformSocketInfoConfig * lp=new CPlatformSocketInfoConfig();
+ assert(lp);
+ lp->Create(platform);
+ m_obj= lp;
+ }else{
+ m_obj= new CPlatformSocketInfoNoConfig();
+ }
+ return(true);
+}
+
+void CPlatformSocketInfo::Delete(){
+ if ( m_obj ){
+ delete m_obj;
+ m_obj=NULL;
+ }
+}
+
+bool CPlatformSocketInfo::is_sockets_enable(socket_id_t socket){
+ return ( m_obj->is_sockets_enable(socket) );
+}
+
+socket_id_t CPlatformSocketInfo::max_num_active_sockets(){
+ return ( m_obj->max_num_active_sockets() );
+}
+
+
+socket_id_t CPlatformSocketInfo::port_to_socket(port_id_t port){
+ return ( m_obj->port_to_socket(port) );
+}
+
+
+void CPlatformSocketInfo::set_latency_thread_is_enabled(bool enable){
+ m_obj->set_latency_thread_is_enabled(enable);
+}
+
+void CPlatformSocketInfo::set_number_of_dual_ports(uint8_t num_dual_ports){
+ m_obj->set_number_of_dual_ports(num_dual_ports);
+}
+
+void CPlatformSocketInfo::set_number_of_threads_per_ports(uint8_t num_threads){
+ m_obj->set_number_of_threads_per_ports(num_threads);
+}
+
+bool CPlatformSocketInfo::sanity_check(){
+ return ( m_obj->sanity_check());
+}
+
+/* return the core mask */
+uint64_t CPlatformSocketInfo::get_cores_mask(){
+ return ( m_obj->get_cores_mask());
+}
+
+virtual_thread_id_t CPlatformSocketInfo::thread_phy_to_virt(physical_thread_id_t phy_id){
+ return ( m_obj->thread_phy_to_virt(phy_id));
+}
+
+physical_thread_id_t CPlatformSocketInfo::thread_virt_to_phy(virtual_thread_id_t virt_id){
+ return ( m_obj->thread_virt_to_phy(virt_id));
+}
+
+bool CPlatformSocketInfo::thread_phy_is_master(physical_thread_id_t phy_id){
+ return ( m_obj->thread_phy_is_master(phy_id));
+}
+
+bool CPlatformSocketInfo::thread_phy_is_latency(physical_thread_id_t phy_id){
+ return ( m_obj->thread_phy_is_latency(phy_id));
+}
+
+void CPlatformSocketInfo::dump(FILE *fd){
+ m_obj->dump(fd);
+}
+
+////////////////////////////////////////
+
+
+void CRteMemPool::dump_in_case_of_error(FILE *fd){
+ fprintf(fd," ERROR ERROR there is no enough memory in socket %d \n",m_pool_id);
+ fprintf(fd," Try to enlarge the memory values in the configuration file /etc/trex_cfg.yaml \n");
+ dump(fd);
+}
+
+
+void CRteMemPool::dump(FILE *fd){
+ #define DUMP_MBUF(a,b) { float p=(100.0*(float)rte_mempool_count(b)/(float)b->size); fprintf(fd," %-30s : %.2f %% %s \n",a,p,(p<5.0?"<-":"OK") ); }
+
+ DUMP_MBUF("mbuf_64",m_small_mbuf_pool);
+ DUMP_MBUF("mbuf_128",m_mbuf_pool_128);
+ DUMP_MBUF("mbuf_256",m_mbuf_pool_256);
+ DUMP_MBUF("mbuf_512",m_mbuf_pool_512);
+ DUMP_MBUF("mbuf_1024",m_mbuf_pool_1024);
+ DUMP_MBUF("mbuf_2048",m_big_mbuf_pool);
+}
+////////////////////////////////////////
+
+void CGlobalInfo::init_pools(uint32_t rx_buffers){
+ /* this include the pkt from 64- */
+ CGlobalMemory * lp=&CGlobalInfo::m_memory_cfg;
+ CPlatformSocketInfo * lpSocket =&m_socket;
+
+ CRteMemPool * lpmem;
+
+ int i;
+ for (i=0; i<(int)MAX_SOCKETS_SUPPORTED; i++) {
+ if (lpSocket->is_sockets_enable((socket_id_t)i)) {
+ lpmem= &m_mem_pool[i];
+ lpmem->m_pool_id=i;
+
+ lpmem->m_big_mbuf_pool = utl_rte_mempool_create("big-pkt-const",
+ (lp->get_2k_num_blocks()+rx_buffers),
+ CONST_MBUF_SIZE,
+ 32,
+ (i<<5)+ 1,i);
+ assert(lpmem->m_big_mbuf_pool);
+
+ /* this include the packet from 0-64 this is for small packets */
+ lpmem->m_small_mbuf_pool =utl_rte_mempool_create("small-pkt-const",
+ lp->m_mbuf[MBUF_64],
+ CONST_SMALL_MBUF_SIZE,
+ 32,(i<<5)+ 2,i);
+ assert(lpmem->m_small_mbuf_pool);
+
+
+
+
+ lpmem->m_mbuf_pool_128=utl_rte_mempool_create("_128-pkt-const",
+ lp->m_mbuf[MBUF_128],
+ CONST_128_MBUF_SIZE,
+ 32,(i<<5)+ 6,i);
+
+
+ assert(lpmem->m_mbuf_pool_128);
+
+
+ lpmem->m_mbuf_pool_256=utl_rte_mempool_create("_256-pkt-const",
+ lp->m_mbuf[MBUF_256],
+ CONST_256_MBUF_SIZE,
+ 32,(i<<5)+ 3,i);
+
+ assert(lpmem->m_mbuf_pool_256);
+
+ lpmem->m_mbuf_pool_512=utl_rte_mempool_create("_512_-pkt-const",
+ lp->m_mbuf[MBUF_512],
+ CONST_512_MBUF_SIZE,
+ 32,(i<<5)+ 4,i);
+ assert(lpmem->m_mbuf_pool_512);
+
+ lpmem->m_mbuf_pool_1024=utl_rte_mempool_create("_1024-pkt-const",
+ lp->m_mbuf[MBUF_1024],
+ CONST_1024_MBUF_SIZE,
+ 32,(i<<5)+ 5,i);
+
+ assert(lpmem->m_mbuf_pool_1024);
+
+
+ }
+ }
+
+ /* global always from socket 0 */
+ m_mem_pool[0].m_mbuf_global_nodes = utl_rte_mempool_create_non_pkt("global-nodes",
+ lp->m_mbuf[MBUF_GLOBAL_FLOWS],
+ sizeof(CGenNode),
+ 128,
+ 0 ,
+ SOCKET_ID_ANY);
+
+ assert(m_mem_pool[0].m_mbuf_global_nodes);
+
+
+}
+
+
+
+void CFlowYamlInfo::Dump(FILE *fd){
+ fprintf(fd,"name : %s \n",m_name.c_str());
+ fprintf(fd,"cps : %f \n",m_k_cps);
+ fprintf(fd,"ipg : %f \n",m_ipg_sec);
+ fprintf(fd,"rtt : %f \n",m_rtt_sec);
+ fprintf(fd,"w : %d \n",m_w);
+ fprintf(fd,"wlength : %d \n",m_wlength);
+ fprintf(fd,"limit : %d \n",m_limit);
+ fprintf(fd,"limit_was_set : %d \n",m_limit_was_set?1:0);
+ fprintf(fd,"cap_mode : %d \n",m_cap_mode?1:0);
+ fprintf(fd,"plugin_id : %d \n",m_plugin_id);
+ fprintf(fd,"one_server : %d \n",m_one_app_server?1:0);
+ fprintf(fd,"one_server_was_set : %d \n",m_one_app_server_was_set?1:0);
+ if (m_dpPkt) {
+ m_dpPkt->Dump(fd);
+ }
+}
+
+
+
+
+void dump_mac_addr(FILE* fd,uint8_t *p){
+ int i;
+ for (i=0; i<6; i++) {
+ uint8_t a=p[i];
+ if (i==5) {
+ fprintf(fd,"%02x",a);
+ }else{
+ fprintf(fd,"%02x:",a);
+ }
+ }
+
+}
+
+
+
+static uint8_t human_tbl[]={
+ ' ',
+ 'K',
+ 'M',
+ 'G',
+ 'T'
+};
+
+std::string double_to_human_str(double num,
+ std::string units,
+ human_kbyte_t etype){
+ double abs_num=num;
+ if (num<0.0) {
+ abs_num=-num;
+ }
+ int i=0;
+ int max_cnt=sizeof(human_tbl)/sizeof(human_tbl[0]);
+ double div =1.0;
+ double f=1000.0;
+ if (etype ==KBYE_1024){
+ f=1024.0;
+ }
+ while ((abs_num > f ) && (i< max_cnt)){
+ abs_num/=f;
+ div*=f;
+ i++;
+ }
+
+ char buf [100];
+ sprintf(buf,"%10.2f %c%s",num/div,human_tbl[i],units.c_str());
+ std::string res(buf);
+ return (res);
+}
+
+
+void CPreviewMode::Dump(FILE *fd){
+ fprintf(fd," flags : %x\n", m_flags);
+ fprintf(fd," write_file : %d\n", getFileWrite()?1:0);
+ fprintf(fd," verbose : %d\n", (int)getVMode() );
+ fprintf(fd," realtime : %d\n", (int)getRealTime() );
+ fprintf(fd," flip : %d\n", (int)getClientServerFlip() );
+ fprintf(fd," cores : %d\n", (int)getCores() );
+ fprintf(fd," single core : %d\n", (int)getSingleCore() );
+ fprintf(fd," flow-flip : %d\n", (int)getClientServerFlowFlip() );
+ fprintf(fd," no clean close : %d\n", (int)getNoCleanFlowClose() );
+ fprintf(fd," 1g mode : %d\n", (int)get_1g_mode() );
+ fprintf(fd," zmq_publish : %d\n", (int)get_zmq_publish_enable() );
+ fprintf(fd," vlan_enable : %d\n", (int)get_vlan_mode_enable() );
+ fprintf(fd," mbuf_cache_disable : %d\n", (int)isMbufCacheDisabled() );
+ fprintf(fd," mac_ip_features : %d\n", (int)get_mac_ip_features_enable()?1:0 );
+ fprintf(fd," mac_ip_map : %d\n", (int)get_mac_ip_mapping_enable()?1:0 );
+ fprintf(fd," vm mode : %d\n", (int)get_vm_one_queue_enable()?1:0 );
+}
+
+void CFlowGenStats::clear(){
+ m_nat_lookup_no_flow_id=0;
+ m_total_bytes=0;
+ m_total_pkt=0;
+ m_total_open_flows =0;
+ m_total_close_flows =0;
+ m_nat_lookup_no_flow_id=0;
+ m_nat_lookup_remove_flow_id=0;
+ m_nat_lookup_add_flow_id=0;
+ m_nat_flow_timeout=0;
+ m_nat_flow_learn_error=0;
+}
+
+void CFlowGenStats::dump(FILE *fd){
+ std::string s_bytes=double_to_human_str((double )(m_total_bytes),
+ "bytes",
+ KBYE_1024);
+
+ std::string s_pkt=double_to_human_str((double )(m_total_pkt),
+ "pkt",
+ KBYE_1000);
+
+ std::string s_flows=double_to_human_str((double )(m_total_open_flows),
+ "flows",
+ KBYE_1000);
+
+ DP_S(m_total_bytes,s_bytes);
+ DP_S(m_total_pkt,s_pkt);
+ DP_S(m_total_open_flows,s_flows);
+ DP(m_total_pkt);
+ DP(m_total_open_flows);
+ DP(m_total_close_flows);
+ DP_name("active",(m_total_open_flows-m_total_close_flows));
+ DP(m_total_bytes);
+ DP(m_nat_lookup_no_flow_id);
+
+ DP(m_nat_lookup_no_flow_id);
+ DP(m_nat_lookup_remove_flow_id);
+ DP(m_nat_lookup_add_flow_id);
+ DP(m_nat_flow_timeout);
+ DP_name("active_nat",(m_nat_lookup_add_flow_id-m_nat_lookup_remove_flow_id));
+ DP(m_nat_flow_learn_error);
+}
+
+
+
+int CErfIF::open_file(std::string file_name){
+ BP_ASSERT(m_writer==0);
+
+ if ( m_preview_mode->getFileWrite() ){
+ capture_type_e file_type=ERF;
+ if ( m_preview_mode->get_pcap_mode_enable() ){
+ file_type=LIBPCAP;
+ }
+ m_writer = CCapWriterFactory::CreateWriter(file_type,(char *)file_name.c_str());
+ if (m_writer == NULL) {
+ fprintf(stderr,"ERROR can't create cap file %s ",(char *)file_name.c_str());
+ return (-1);
+ }
+ }
+ m_raw = new CCapPktRaw();
+ return (0);
+}
+
+
+int CErfIF::write_pkt(CCapPktRaw *pkt_raw){
+
+ BP_ASSERT(m_writer);
+
+ if ( m_preview_mode->getFileWrite() ){
+ BP_ASSERT(m_writer);
+ bool res=m_writer->write_packet(pkt_raw);
+ if (res != true) {
+ fprintf(stderr,"ERROR can't write to cap file");
+ return (-1);
+ }
+ }
+ return (0);
+}
+
+
+int CErfIF::close_file(void){
+
+ BP_ASSERT(m_raw);
+ m_raw->raw=0;
+ delete m_raw;
+
+ if ( m_preview_mode->getFileWrite() ){
+ BP_ASSERT(m_writer);
+ delete m_writer;
+ m_writer=0;
+ }
+ return (0);
+}
+
+
+
+void CFlowKey::Clean(){
+ m_ipaddr1=0;
+ m_ipaddr2=0;
+ m_port1=0;
+ m_port2=0;
+ m_ip_proto=0;
+ m_l2_proto=0;
+ m_vrfid=0;
+}
+
+void CFlowKey::Dump(FILE *fd){
+ fprintf(fd," %x:%x:%x:%x:%x:%x:%x\n",m_ipaddr1,m_ipaddr2,m_port1,m_port2,m_ip_proto,m_l2_proto,m_vrfid);
+}
+
+
+
+void CPacketDescriptor::Dump(FILE *fd){
+ fprintf(fd," IsSwapTuple : %d \n",IsSwapTuple()?1:0);
+ fprintf(fd," IsSInitDir : %d \n",IsInitSide()?1:0);
+ fprintf(fd," Isvalid : %d ",IsValidPkt()?1:0);
+ fprintf(fd," IsRtt : %d ",IsRtt()?1:0);
+ fprintf(fd," IsLearn : %d ",IsLearn()?1:0);
+
+ if (IsTcp() ) {
+ fprintf(fd," TCP ");
+ }else{
+ fprintf(fd," UDP ");
+ }
+ fprintf(fd," IsLast Pkt : %d ", IsLastPkt() ?1:0);
+ fprintf(fd," id : %d \n",getId() );
+
+ fprintf(fd," flow_ID : %d , max_pkts : %u, max_aging: %d sec , pkt_id : %u, init: %d ( dir:%d dir_max :%d ) bid:%d \n",getFlowId(),
+ GetMaxPktsPerFlow(),
+ GetMaxFlowTimeout() ,
+ getFlowPktNum(),
+ IsInitSide(),
+ GetDirInfo()->GetPktNum(),
+ GetDirInfo()->GetMaxPkts(),
+ IsBiDirectionalFlow()?1:0
+
+ );
+ fprintf(fd,"\n");
+}
+
+
+void CPacketIndication::UpdateOffsets(){
+ m_ether_offset = getEtherOffset();
+ m_ip_offset = getIpOffset();
+ m_udp_tcp_offset = getTcpOffset();
+ m_payload_offset = getPayloadOffset();
+}
+
+void CPacketIndication::UpdatePacketPadding(){
+ m_packet_padding = m_packet->getTotalLen() - (l3.m_ipv4->getTotalLength()+ getIpOffset());
+}
+
+
+void CPacketIndication::RefreshPointers(){
+
+ char *pobase=getBasePtr();
+ CPacketIndication * obj=this;
+
+ m_ether = (EthernetHeader *) (pobase + m_ether_offset);
+ l3.m_ipv4 = (IPHeader *) (pobase + m_ip_offset);
+ l4.m_tcp= (TCPHeader *)(pobase + m_udp_tcp_offset);
+ if ( m_payload_offset ){
+ m_payload =(uint8_t *)(pobase + m_payload_offset);
+ }else{
+ m_payload =(uint8_t *)(0);
+ }
+}
+
+// copy ref assume pkt point to a fresh
+void CPacketIndication::Clone(CPacketIndication * obj,CCapPktRaw * pkt){
+ Clean();
+ m_cap_ipg = obj->m_cap_ipg;
+ m_packet = pkt;
+ char *pobase=getBasePtr();
+ m_flow = obj->m_flow;
+
+ m_ether = (EthernetHeader *) (pobase + obj->getEtherOffset());
+ l3.m_ipv4 = (IPHeader *) (pobase + obj->getIpOffset());
+ m_is_ipv6 = obj->m_is_ipv6;
+ l4.m_tcp= (TCPHeader *)(pobase + obj->getTcpOffset());
+ if ( obj->getPayloadOffset() ){
+ m_payload =(uint8_t *)(pobase + obj->getPayloadOffset());
+ }else{
+ m_payload =(uint8_t *)(0);
+ }
+ m_payload_len = obj->m_payload_len;
+ m_flow_key = obj->m_flow_key;
+ m_desc = obj->m_desc;
+
+ m_packet_padding = obj->m_packet_padding;
+ /* copy offsets*/
+ m_ether_offset = obj->m_ether_offset;
+ m_ip_offset = obj->m_ip_offset;
+ m_udp_tcp_offset = obj->m_udp_tcp_offset;;
+ m_payload_offset = obj->m_payload_offset;
+}
+
+
+
+void CPacketIndication::Dump(FILE *fd,int verbose){
+ fprintf(fd," ipg : %f \n",m_cap_ipg);
+ fprintf(fd," key \n");
+ fprintf(fd," ------\n");
+ m_flow_key.Dump(fd);
+
+ fprintf(fd," L2 info \n");
+ fprintf(fd," ------\n");
+ m_packet->Dump(fd,verbose);
+
+ fprintf(fd," Descriptor \n");
+ fprintf(fd," ------\n");
+ m_desc.Dump(fd);
+
+ if ( m_desc.IsValidPkt() ) {
+ fprintf(fd," ipv4 \n");
+ l3.m_ipv4->dump(fd);
+ if ( m_desc.IsUdp() ) {
+ l4.m_udp->dump(fd);
+ }else{
+ l4.m_tcp->dump(fd);
+ }
+ fprintf(fd," payload len : %d \n",m_payload_len);
+ }else{
+ fprintf(fd," not valid packet \n");
+ }
+}
+
+void CPacketIndication::Clean(){
+ m_desc.Clear();
+ m_ether=0;
+ l3.m_ipv4=0;
+ l4.m_tcp=0;
+ m_payload=0;
+ m_payload_len=0;
+}
+
+
+
+uint64_t CCPacketParserCounters::getTotalErrors(){
+ uint64_t res=
+ m_non_ip+
+ m_arp+
+ m_mpls+
+ m_non_valid_ipv4_ver+
+ m_ip_checksum_error+
+ m_ip_length_error+
+ m_ip_not_first_fragment_error+
+ m_ip_ttl_is_zero_error+
+ m_ip_multicast_error+
+
+ m_non_tcp_udp_ah+
+ m_non_tcp_udp_esp+
+ m_non_tcp_udp_icmp+
+ m_non_tcp_udp_gre+
+ m_non_tcp_udp_ip+
+ m_tcp_udp_pkt_length_error;
+ return (res);
+}
+
+void CCPacketParserCounters::Clear(){
+ m_pkt=0;
+ m_non_ip=0;
+ m_vlan=0;
+ m_arp=0;
+ m_mpls=0;
+
+ m_non_valid_ipv4_ver=0;
+ m_ip_checksum_error=0;
+ m_ip_length_error=0;
+ m_ip_not_first_fragment_error=0;
+ m_ip_ttl_is_zero_error=0;
+ m_ip_multicast_error=0;
+ m_ip_header_options=0;
+
+ m_non_tcp_udp=0;
+ m_non_tcp_udp_ah=0;
+ m_non_tcp_udp_esp=0;
+ m_non_tcp_udp_icmp=0;
+ m_non_tcp_udp_gre=0;
+ m_non_tcp_udp_ip=0;
+ m_tcp_header_options=0;
+ m_tcp_udp_pkt_length_error=0;
+ m_tcp=0;
+ m_udp=0;
+ m_valid_udp_tcp=0;
+}
+
+
+void CCPacketParserCounters::Dump(FILE *fd){
+
+ DP (m_pkt);
+ DP (m_non_ip);
+ DP (m_vlan);
+ DP (m_arp);
+ DP (m_mpls);
+
+ DP (m_non_valid_ipv4_ver);
+ DP (m_ip_checksum_error);
+ DP (m_ip_length_error);
+ DP (m_ip_not_first_fragment_error);
+ DP (m_ip_ttl_is_zero_error);
+ DP (m_ip_multicast_error);
+ DP (m_ip_header_options);
+
+ DP (m_non_tcp_udp);
+ DP (m_non_tcp_udp_ah);
+ DP (m_non_tcp_udp_esp);
+ DP (m_non_tcp_udp_icmp);
+ DP (m_non_tcp_udp_gre);
+ DP (m_non_tcp_udp_ip);
+ DP (m_tcp_header_options);
+ DP (m_tcp_udp_pkt_length_error);
+ DP (m_tcp);
+ DP (m_udp);
+ DP (m_valid_udp_tcp);
+}
+
+
+bool CPacketParser::Create(){
+ m_counter.Clear();
+ return (true);
+}
+
+void CPacketParser::Delete(){
+}
+
+
+bool CPacketParser::ProcessPacket(CPacketIndication * pkt_indication,
+ CCapPktRaw * raw_packet){
+ BP_ASSERT(pkt_indication);
+ pkt_indication->ProcessPacket(this,raw_packet);
+ if (pkt_indication->m_desc.IsValidPkt()) {
+ return (true);
+ }
+ return (false);
+}
+
+void CPacketParser::Dump(FILE *fd){
+ fprintf(fd," parser statistic \n");
+ fprintf(fd," ===================== \n");
+ m_counter.Dump(fd);
+}
+
+
+void CPacketIndication::SetKey(void){
+ uint32_t ip_src, ip_dst;
+
+ m_desc.SetIsValidPkt(true);
+ if (is_ipv6()){
+ uint16_t ipv6_src[8];
+ uint16_t ipv6_dst[8];
+
+ l3.m_ipv6->getSourceIpv6(&ipv6_src[0]);
+ l3.m_ipv6->getDestIpv6(&ipv6_dst[0]);
+ ip_src=(ipv6_src[6] << 16) | ipv6_src[7];
+ ip_dst=(ipv6_dst[6] << 16) | ipv6_dst[7];
+ m_flow_key.m_ip_proto = l3.m_ipv6->getNextHdr();
+ }else{
+ ip_src=l3.m_ipv4->getSourceIp();
+ ip_dst=l3.m_ipv4->getDestIp();
+ m_flow_key.m_ip_proto = l3.m_ipv4->getProtocol();
+ }
+
+ /* UDP/TCP has same place */
+ uint16_t src_port = l4.m_udp->getSourcePort();
+ uint16_t dst_port = l4.m_udp->getDestPort();
+ if (ip_src > ip_dst ) {
+ m_flow_key.m_ipaddr1 =ip_dst;
+ m_flow_key.m_ipaddr2 =ip_src;
+ m_flow_key.m_port1 = dst_port;
+ m_flow_key.m_port2 = src_port;
+ }else{
+ m_desc.SetSwapTuple(true);
+ m_flow_key.m_ipaddr1 = ip_src;
+ m_flow_key.m_ipaddr2 = ip_dst;
+ m_flow_key.m_port1 = src_port;
+ m_flow_key.m_port2 = dst_port;
+ }
+ m_flow_key.m_l2_proto = 0;
+ m_flow_key.m_vrfid = 0;
+}
+
+uint8_t CPacketIndication::ProcessIpPacketProtocol(CCPacketParserCounters *m_cnt,
+ uint8_t protocol, int *offset){
+
+ char * packetBase = m_packet->raw;
+ TCPHeader * tcp=0;
+ UDPHeader * udp=0;
+ uint16_t tcp_header_len=0;
+
+ switch (protocol) {
+ case IPHeader::Protocol::TCP :
+ m_desc.SetIsTcp(true);
+ tcp =(TCPHeader *)(packetBase +*offset);
+ l4.m_tcp = tcp;
+
+ tcp_header_len = tcp->getHeaderLength();
+ if ( tcp_header_len > (5*4) ){
+ // we have ip option
+ m_cnt->m_tcp_header_options++;
+ }
+ *offset += tcp_header_len;
+ m_cnt->m_tcp++;
+ break;
+ case IPHeader::Protocol::UDP :
+ m_desc.SetIsUdp(true);
+ udp =(UDPHeader *)(packetBase +*offset);
+ l4.m_udp = udp;
+ *offset += 8;
+ m_cnt->m_udp++;
+ break;
+ case IPHeader::Protocol::AH:
+ m_cnt->m_non_tcp_udp_ah++;
+ return (1);
+ break;
+ case IPHeader::Protocol::ESP:
+ m_cnt->m_non_tcp_udp_esp++;
+ return (1);
+ break;
+ case IPHeader::Protocol::ICMP:
+ case IPHeader::Protocol::IPV6_ICMP:
+ m_cnt->m_non_tcp_udp_icmp++;
+ return (1);
+ break;
+ case IPHeader::Protocol::GRE:
+ m_cnt->m_non_tcp_udp_gre++;
+ return (1);
+ break;
+ case IPHeader::Protocol::IP:
+ m_cnt->m_non_ip++;
+ return (1);
+ break;
+
+ default:
+ m_cnt->m_non_tcp_udp++;
+ return (1);
+ break;
+ }
+
+ /* out of packet */
+ if ( *offset > m_packet->getTotalLen() ) {
+ m_cnt->m_tcp_udp_pkt_length_error++;
+ return (1);
+ }
+ return (0);
+}
+
+
+void CPacketIndication::ProcessIpPacket(CPacketParser *parser,
+ int offset){
+
+ char * packetBase;
+ CCPacketParserCounters * m_cnt=&parser->m_counter;
+ packetBase = m_packet->raw;
+ uint8_t protocol;
+ BP_ASSERT(l3.m_ipv4);
+
+ parser->m_counter.m_pkt++;
+
+ if ( l3.m_ipv4->getVersion() == 4 ){
+ m_cnt->m_ipv4++;
+ }else{
+ m_cnt->m_non_valid_ipv4_ver++;
+ return;
+ }
+ // check the IP Length
+ if( (uint32_t)(l3.m_ipv4->getTotalLength()+offset) > (uint32_t)( m_packet->getTotalLen()) ){
+ m_cnt->m_ip_length_error++;
+ return;
+ }
+
+ uint16_t ip_offset=offset;
+ uint16_t ip_header_length = l3.m_ipv4->getHeaderLength();
+
+ if ( ip_header_length >(5*4) ){
+ m_cnt->m_ip_header_options++;
+ }
+
+ if ( (uint32_t)(ip_header_length + offset) > (uint32_t)m_packet->getTotalLen() ) {
+ m_cnt->m_ip_length_error++;
+ return;
+ }
+ offset += ip_header_length;
+
+ if (!l3.m_ipv4->isChecksumOK() ){
+ m_cnt->m_ip_checksum_error++;
+ }
+ if( l3.m_ipv4->isMulticast() ){
+ m_cnt->m_ip_multicast_error++;
+ return;
+ }
+
+ if( l3.m_ipv4->getTimeToLive() ==0 ){
+ m_cnt->m_ip_ttl_is_zero_error++;
+ return;
+ }
+
+ if( l3.m_ipv4->isNotFirstFragment() ) {
+ m_cnt->m_ip_not_first_fragment_error++;
+ return;
+ }
+
+ protocol = l3.m_ipv4->getProtocol();
+ if (ProcessIpPacketProtocol(m_cnt,protocol,&offset) != 0) {
+ return;
+ };
+
+ uint16_t payload_offset_from_ip = offset-ip_offset;
+ if ( payload_offset_from_ip > l3.m_ipv4->getTotalLength() ) {
+ m_cnt->m_tcp_udp_pkt_length_error++;
+ return;
+ }
+
+ // Set packet length and include padding if needed
+ m_packet->pkt_len = l3.m_ipv4->getTotalLength() + getIpOffset();
+ if (m_packet->pkt_len < 60) { m_packet->pkt_len = 60; }
+
+ m_cnt->m_valid_udp_tcp++;
+ m_payload_len = l3.m_ipv4->getTotalLength() - (payload_offset_from_ip);
+ m_payload = (uint8_t *)(packetBase +offset);
+ UpdatePacketPadding();
+ SetKey();
+}
+
+
+
+void CPacketIndication::ProcessIpv6Packet(CPacketParser *parser,
+ int offset){
+
+ char * packetBase = m_packet->raw;
+ CCPacketParserCounters * m_cnt=&parser->m_counter;
+ uint16_t src_ipv6[6];
+ uint16_t dst_ipv6[6];
+ uint16_t idx;
+ uint8_t protocol;
+ BP_ASSERT(l3.m_ipv6);
+
+ parser->m_counter.m_pkt++;
+
+ if ( l3.m_ipv6->getVersion() == 6 ){
+ m_cnt->m_ipv6++;
+ }else{
+ m_cnt->m_non_valid_ipv6_ver++;
+ return;
+ }
+
+ // Check length
+ if ((uint32_t)(l3.m_ipv6->getPayloadLen()+offset+l3.m_ipv6->getHeaderLength()) >
+ (uint32_t)( m_packet->getTotalLen()) ){
+ m_cnt->m_ipv6_length_error++;
+ return;
+ }
+
+ for (idx=0; idx<6; idx++){
+ src_ipv6[idx] = CGlobalInfo::m_options.m_src_ipv6[idx];
+ dst_ipv6[idx] = CGlobalInfo::m_options.m_dst_ipv6[idx];
+ }
+ l3.m_ipv6->updateMSBIpv6Src(&src_ipv6[0]);
+ l3.m_ipv6->updateMSBIpv6Dst(&dst_ipv6[0]);
+
+ offset += l3.m_ipv6->getHeaderLength();
+ protocol = l3.m_ipv6->getNextHdr();
+ if (ProcessIpPacketProtocol(m_cnt,protocol,&offset) != 0) {
+ return;
+ };
+
+ // Set packet length and include padding if needed
+ uint16_t real_pkt_size = l3.m_ipv6->getPayloadLen()+ getIpOffset() + l3.m_ipv6->getHeaderLength();
+ m_packet->pkt_len = real_pkt_size;
+ if (m_packet->pkt_len < 60) { m_packet->pkt_len = 60; }
+
+ m_cnt->m_valid_udp_tcp++;
+ m_payload_len = l3.m_ipv6->getPayloadLen();
+ m_payload = (uint8_t *)(packetBase +offset);
+
+ m_packet_padding = m_packet->getTotalLen() - real_pkt_size;
+ assert( m_packet->getTotalLen()>= real_pkt_size );
+ SetKey();
+}
+
+
+static uint8_t cbuff[MAX_PKT_SIZE];
+
+bool CPacketIndication::ConvertPacketToIpv6InPlace(CCapPktRaw * pkt,
+ int offset){
+
+ // Copy l2 data and set l2 type to ipv6
+ memcpy(cbuff, pkt->raw, offset);
+ *(uint16_t*)(cbuff+12) = PKT_HTONS(EthernetHeader::Protocol::IPv6);
+
+ // Create the ipv6 header
+ IPHeader *ipv4 = (IPHeader *) (pkt->raw+offset);
+ IPv6Header *ipv6 = (IPv6Header *) (cbuff+offset);
+ uint8_t ipv6_hdrlen = ipv6->getHeaderLength();
+ memset(ipv6,0,ipv6_hdrlen);
+ ipv6->setVersion(6);
+ if (ipv4->getTotalLength() < ipv4->getHeaderLength()) {
+ return(false);
+ }
+ // Calculate the payload length
+ uint16_t p_len = ipv4->getTotalLength() - ipv4->getHeaderLength();
+ ipv6->setPayloadLen(p_len);
+ uint8_t l4_proto = ipv4->getProtocol();
+ ipv6->setNextHdr(l4_proto);
+ ipv6->setHopLimit(64);
+
+ // Update the least signficant 32-bits of ipv6 address
+ // using the ipv4 address
+ ipv6->updateLSBIpv6Src(ipv4->getSourceIp());
+ ipv6->updateLSBIpv6Dst(ipv4->getDestIp());
+
+ // Copy rest of packet
+ uint16_t ipv4_offset = offset + ipv4->getHeaderLength();
+ uint16_t ipv6_offset = offset + ipv6_hdrlen;
+ memcpy(cbuff+ipv6_offset,pkt->raw+ipv4_offset,p_len);
+
+ ipv6_offset+=p_len;
+ memcpy(pkt->raw,cbuff,ipv6_offset);
+
+ // Set packet length
+ pkt->pkt_len = ipv6_offset;
+ m_is_ipv6 = true;
+
+ return (true);
+}
+
+
+void CPacketIndication::ProcessPacket(CPacketParser *parser,
+ CCapPktRaw * pkt){
+ _ProcessPacket(parser,pkt);
+ UpdateOffsets(); /* update fast offsets */
+}
+
+
+
+/* process packet */
+void CPacketIndication::_ProcessPacket(CPacketParser *parser,
+ CCapPktRaw * pkt){
+
+ BP_ASSERT(pkt);
+ m_packet =pkt;
+ Clean();
+ CCPacketParserCounters * m_cnt=&parser->m_counter;
+
+ int offset = 0;
+ char * packetBase;
+ packetBase = m_packet->raw;
+ BP_ASSERT(packetBase);
+ m_ether = (EthernetHeader *)packetBase;
+ m_is_ipv6 = false;
+
+ // IP
+ switch( m_ether->getNextProtocol() ) {
+ case EthernetHeader::Protocol::IP :
+ offset = 14;
+ l3.m_ipv4 =(IPHeader *)(packetBase+offset);
+ break;
+ case EthernetHeader::Protocol::IPv6 :
+ offset = 14;
+ l3.m_ipv6 =(IPv6Header *)(packetBase+offset);
+ m_is_ipv6 = true;
+ break;
+ case EthernetHeader::Protocol::VLAN :
+ m_cnt->m_vlan++;
+ switch ( m_ether->getVlanProtocol() ){
+ case EthernetHeader::Protocol::IP:
+ offset = 18;
+ l3.m_ipv4 =(IPHeader *)(packetBase+offset);
+ break;
+ case EthernetHeader::Protocol::IPv6 :
+ offset = 18;
+ l3.m_ipv6 =(IPv6Header *)(packetBase+offset);
+ m_is_ipv6 = true;
+ break;
+ case EthernetHeader::Protocol::MPLS_Multicast :
+ case EthernetHeader::Protocol::MPLS_Unicast :
+ m_cnt->m_mpls++;
+ return;
+
+ case EthernetHeader::Protocol::ARP :
+ m_cnt->m_arp++;
+ return;
+
+ default:
+ m_cnt->m_non_ip++;
+ return ; /* Non IP */
+ }
+ break;
+ case EthernetHeader::Protocol::ARP :
+ m_cnt->m_arp++;
+ return; /* Non IP */
+ break;
+
+ case EthernetHeader::Protocol::MPLS_Multicast :
+ case EthernetHeader::Protocol::MPLS_Unicast :
+ m_cnt->m_mpls++;
+ return; /* Non IP */
+ break;
+
+ default:
+ m_cnt->m_non_ip++;
+ return; /* Non IP */
+ }
+
+ if (is_ipv6() == false) {
+ if( (14+20) > (uint32_t)( m_packet->getTotalLen()) ){
+ m_cnt->m_ip_length_error++;
+ return;
+ }
+ }
+
+ // For now, we can not mix ipv4 and ipv4 packets
+ // so we require --ipv6 option be set for ipv6 packets
+ if ((m_is_ipv6) && (CGlobalInfo::is_ipv6_enable() == false)){
+ fprintf(stderr,"ERROR --ipv6 must be set to process ipv6 packets\n");
+ exit(-1);
+ }
+
+ // Convert to Ipv6 if requested and not already Ipv6
+ if ((CGlobalInfo::is_ipv6_enable()) && (is_ipv6() == false )) {
+ if (ConvertPacketToIpv6InPlace(pkt, offset) == false){
+ /* Move to next packet as this was erroneous */
+ printf(" unable to convert packet to IPv6, skipping...\n");
+ return;
+ }
+ }
+
+ if (is_ipv6()){
+ ProcessIpv6Packet(parser,offset);
+ }else{
+ ProcessIpPacket(parser,offset);
+ }
+}
+
+
+
+void CFlowTableStats::Clear(){
+ m_lookup=0;
+ m_found=0;
+ m_fif=0;
+ m_add=0;
+ m_remove=0;
+ m_fif_err=0;
+ m_active=0;
+}
+
+void CFlowTableStats::Dump(FILE *fd){
+ DP (m_lookup);
+ DP (m_found);
+ DP (m_fif);
+ DP (m_add);
+ DP (m_remove);
+ DP (m_fif_err);
+ DP (m_active);
+}
+
+
+void CFlow::Dump(FILE *fd){
+ fprintf(fd," fif is swap : %d \n",is_fif_swap);
+}
+
+
+void CFlowTableManagerBase::Dump(FILE *fd){
+ m_stats.Dump(fd);
+}
+
+CFlow * CFlowTableManagerBase::process(CFlowKey & key,bool &is_fif ){
+ m_stats.m_lookup++;
+ is_fif=false;
+ CFlow * lp=lookup(key);
+ if ( lp ) {
+ m_stats.m_found++;
+ return (lp);
+ }else{
+ m_stats.m_fif++;
+ m_stats.m_active++;
+ m_stats.m_add++;
+ is_fif=true;
+ lp= add(key );
+ if (lp) {
+ }else{
+ m_stats.m_fif_err++;
+ }
+ }
+ return (lp);
+}
+
+
+bool CFlowTableMap::Create(int max_size){
+ m_stats.Clear();
+ return (true);
+}
+
+void CFlowTableMap::Delete(){
+ remove_all();
+}
+
+void CFlowTableMap::remove(CFlowKey & key ){
+ CFlow *lp=lookup(key);
+ if ( lp ) {
+ delete lp;
+ m_stats.m_remove++;
+ m_stats.m_active--;
+ m_map.erase(key);
+ }else{
+ BP_ASSERT(0);
+ }
+}
+
+
+CFlow * CFlowTableMap::lookup(CFlowKey & key ){
+ flow_map_t::iterator iter;
+ iter = m_map.find(key);
+ if (iter != m_map.end() ) {
+ return ( (*iter).second );
+ }else{
+ return (( CFlow*)0);
+ }
+}
+
+CFlow * CFlowTableMap::add(CFlowKey & key ){
+ CFlow * flow = new CFlow();
+ m_map.insert(flow_map_t::value_type(key,flow));
+ return (flow);
+}
+
+void CFlowTableMap::remove_all(){
+ if ( m_map.empty() )
+ return;
+ flow_map_iter_t it;
+ for (it= m_map.begin(); it != m_map.end(); ++it) {
+ CFlow *lp = it->second;
+ delete lp;
+ }
+ m_map.clear();
+}
+
+uint64_t CFlowTableMap::count(){
+ return ( m_map.size());
+}
+
+
+/*
+ * This function will insert an IP option header containing metadata for the
+ * rx-check feature.
+ *
+ * An mbuf is created to hold the new option header plus the portion of the
+ * packet after the base IP header (includes any IP options header that might
+ * exist). This mbuf is then linked into the existing mbufs (becoming the
+ * second mbuf).
+ *
+ * Note that the rxcheck option header is inserted as the first option header,
+ * and any existing IP option headers are placed after it.
+ */
+void CFlowPktInfo::do_generate_new_mbuf_rxcheck(rte_mbuf_t * m,
+ CGenNode * node,
+ pkt_dir_t dir,
+ bool single_port){
+
+ /* retrieve size of rx-check header, must be multiple of 8 */
+ uint16_t opt_len = RX_CHECK_LEN;
+ uint16_t current_opt_len = 0;
+ assert( (opt_len % 8) == 0 );
+
+ /* determine starting move location */
+ char *mp1 = rte_pktmbuf_mtod(m, char*);
+ uint16_t mp1_offset = m_pkt_indication.getFastIpOffsetFast();
+ if (unlikely (m_pkt_indication.is_ipv6()) ) {
+ mp1_offset += IPv6Header::DefaultSize;
+ }else{
+ mp1_offset += IPHeader::DefaultSize;
+ }
+ char *move_from = mp1 + mp1_offset;
+
+ /* determine size of new mbuf required */
+ uint16_t move_len = m->data_len - mp1_offset;
+ uint16_t new_mbuf_size = move_len + opt_len;
+ uint16_t mp2_offset = opt_len;
+
+ /* obtain a new mbuf */
+ rte_mbuf_t * new_mbuf = CGlobalInfo::pktmbuf_alloc(node->get_socket_id(), new_mbuf_size);
+ assert(new_mbuf);
+ char * mp2 = rte_pktmbuf_append(new_mbuf, new_mbuf_size);
+ char * move_to = mp2 + mp2_offset;
+
+ /* move part of packet from first mbuf to new mbuf */
+ memmove(move_to, move_from, move_len);
+
+ /* trim first mbuf and set pointer to option header*/
+ CRx_check_header *rxhdr;
+ uint16_t buf_adjust = move_len;
+ rxhdr = (CRx_check_header *)mp2;
+ m->data_len -= buf_adjust;
+
+ /* insert rx-check data as an IPv4 option header or IPv6 extension header*/
+ CFlowPktInfo * lp=node->m_pkt_info;
+ CPacketDescriptor * desc=&lp->m_pkt_indication.m_desc;
+
+ /* set option type and update ip header length */
+ IPHeader * ipv4=(IPHeader *)(mp1 + 14);
+ if (unlikely (m_pkt_indication.is_ipv6()) ) {
+ IPv6Header * ipv6=(IPv6Header *)(mp1 + 14);
+ uint8_t save_header= ipv6->getNextHdr();
+ ipv6->setNextHdr(RX_CHECK_V6_OPT_TYPE);
+ ipv6->setHopLimit(TTL_RESERVE_DUPLICATE);
+ ipv6->setPayloadLen( ipv6->getPayloadLen() +
+ sizeof(CRx_check_header));
+ rxhdr->m_option_type = save_header;
+ rxhdr->m_option_len = RX_CHECK_V6_OPT_LEN;
+ }else{
+ current_opt_len = ipv4->getHeaderLength();
+ ipv4->setHeaderLength(current_opt_len+opt_len);
+ ipv4->setTotalLength(ipv4->getTotalLength()+opt_len);
+ ipv4->setTimeToLive(TTL_RESERVE_DUPLICATE);
+ rxhdr->m_option_type = RX_CHECK_V4_OPT_TYPE;
+ rxhdr->m_option_len = RX_CHECK_V4_OPT_LEN;
+ }
+
+ /* fill in the rx-check metadata in the options header */
+ if ( CGlobalInfo::m_options.is_rxcheck_const_ts() ){
+ /* Runtime flag to use a constant value for the timestamp field. */
+ /* This is used by simulation to provide consistency across runs. */
+ rxhdr->m_time_stamp = 0xB3B2B1B0;
+ }else{
+ rxhdr->m_time_stamp = os_get_hr_tick_32();
+ }
+ rxhdr->m_magic = RX_CHECK_MAGIC;
+ rxhdr->m_flow_id = node->m_flow_id | ( ( (uint64_t)(desc->getFlowId() & 0xf))<<52 ) ; // include thread_id, node->flow_id, sub_flow in case of multi-flow template
+ rxhdr->m_flags = 0;
+ rxhdr->m_aging_sec = desc->GetMaxFlowTimeout();
+ rxhdr->m_template_id = (uint8_t)desc->getId();
+
+ /* add the flow packets goes to the same port */
+ if (single_port) {
+ rxhdr->m_pkt_id = desc->getFlowPktNum();
+ rxhdr->m_flow_size = desc->GetMaxPktsPerFlow();
+
+ }else{
+ rxhdr->m_pkt_id = desc->GetDirInfo()->GetPktNum();
+ rxhdr->m_flow_size = desc->GetDirInfo()->GetMaxPkts();
+ /* set dir */
+ rxhdr->set_dir(desc->IsInitSide()?1:0);
+ rxhdr->set_both_dir(desc->IsBiDirectionalFlow()?1:0);
+ }
+
+ /* update checksum for IPv4, split across 2 mbufs */
+ if (likely ( ! m_pkt_indication.is_ipv6()) ) {
+ ipv4->updateCheckSum2((uint8_t *)ipv4, current_opt_len, (uint8_t *)rxhdr, opt_len);
+ }
+
+ /* link new mbuf */
+ new_mbuf->next = m->next;
+ new_mbuf->nb_segs++;
+ m->next = new_mbuf;
+ m->nb_segs++;
+ m->pkt_len += opt_len;
+}
+
+
+char * CFlowPktInfo::push_ipv4_option_offline(uint8_t bytes){
+ /* must be align by 4*/
+ assert( (bytes % 4)== 0 );
+ assert(m_pkt_indication.is_ipv6()==false);
+ if ( m_pkt_indication.l3.m_ipv4->getHeaderLength()+bytes>60 ){
+ printf(" ERROR ipv4 options size is too big, should be able to add %d bytes for internal info \n",bytes);
+ return((char *)0);
+ }
+ /* now we can do that !*/
+
+ /* add more bytes to the packet */
+ m_packet->append(bytes);
+ uint8_t ip_offset_to_move= m_pkt_indication.getFastIpOffsetFast()+IPHeader::DefaultSize;
+ char *p=m_packet->raw+ip_offset_to_move;
+ uint16_t bytes_to_move= m_packet->pkt_len - ip_offset_to_move -bytes;
+
+ /* move the start of ipv4 options */
+ memmove(p+bytes ,p, bytes_to_move);
+
+ /* fix all other stuff */
+ if ( m_pkt_indication.m_udp_tcp_offset ){
+ m_pkt_indication.m_udp_tcp_offset+=bytes;
+ }
+ if ( m_pkt_indication.m_payload_offset ) {
+ m_pkt_indication.m_payload_offset+=bytes;
+ }
+
+ m_pkt_indication.RefreshPointers();
+ /* now pointer are updated we can manipulate ipv4 header */
+ IPHeader * ipv4=m_pkt_indication.l3.m_ipv4;
+
+ ipv4->setTotalLength(ipv4->getTotalLength()+bytes);
+ ipv4->setHeaderLength(ipv4->getHeaderLength()+(bytes));
+
+ m_pkt_indication.UpdatePacketPadding();
+
+ /* refresh the global mbuf */
+ free_const_mbuf();
+ alloc_const_mbuf();
+ return (p);
+}
+
+
+void CFlowPktInfo::mask_as_learn(){
+ char *p;
+ CNatOption *lpNat;
+ if ( m_pkt_indication.is_ipv6() ){
+ lpNat=(CNatOption *)push_ipv6_option_offline(CNatOption::noOPTION_LEN);
+ lpNat->set_init_ipv6_header();
+ lpNat->set_fid(0);
+ lpNat->set_thread_id(0);
+ }else{
+ lpNat=(CNatOption *)push_ipv4_option_offline(CNatOption::noOPTION_LEN);
+ lpNat->set_init_ipv4_header();
+ lpNat->set_fid(0);
+ lpNat->set_thread_id(0);
+ m_pkt_indication.l3.m_ipv4->updateCheckSum();
+ }
+ /* learn is true */
+ m_pkt_indication.m_desc.SetLearn(true);
+
+}
+
+
+char * CFlowPktInfo::push_ipv6_option_offline(uint8_t bytes){
+
+ /* must be align by 8*/
+ assert( (bytes % 8)== 0 );
+ assert(m_pkt_indication.is_ipv6()==true);
+
+ /* add more bytes to the packet */
+ m_packet->append(bytes);
+ uint8_t ip_offset_to_move= m_pkt_indication.getFastIpOffsetFast()+IPv6Header::DefaultSize;
+ char *p=m_packet->raw+ip_offset_to_move;
+ uint16_t bytes_to_move= m_packet->pkt_len - ip_offset_to_move -bytes;
+
+ /* move the start of ipv4 options */
+ memmove(p+bytes ,p, bytes_to_move);
+
+ /* fix all other stuff */
+ if ( m_pkt_indication.m_udp_tcp_offset ){
+ m_pkt_indication.m_udp_tcp_offset+=bytes;
+ }
+ if ( m_pkt_indication.m_payload_offset ) {
+ m_pkt_indication.m_payload_offset+=bytes;
+ }
+
+ m_pkt_indication.RefreshPointers();
+ /* now pointer are updated we can manipulate ipv6 header */
+ IPv6Header * ipv6=m_pkt_indication.l3.m_ipv6;
+
+ ipv6->setPayloadLen(ipv6->getPayloadLen()+bytes);
+ uint8_t save_header= ipv6->getNextHdr();
+ *p=save_header; /* copy next header */
+ ipv6->setNextHdr(CNatOption::noIPV6_OPTION);
+
+ m_pkt_indication.UpdatePacketPadding();
+
+ /* refresh the global mbuf */
+ free_const_mbuf();
+ alloc_const_mbuf();
+ return (p);
+}
+
+
+void CFlowPktInfo::alloc_const_mbuf(){
+
+ if ( m_packet->pkt_len > FIRST_PKT_SIZE ) {
+ /* pkt size in bigger than FIRST_PKT_SIZE let's create a offline buffer */
+ int i;
+ for (i=0; i<MAX_SOCKETS_SUPPORTED; i++) {
+ if ( CGlobalInfo::m_socket.is_sockets_enable(i) ){
+
+ rte_mbuf_t * m;
+ uint16_t pkt_s=(m_packet->pkt_len - FIRST_PKT_SIZE);
+
+ m = CGlobalInfo::pktmbuf_alloc(i,pkt_s);
+ BP_ASSERT(m);
+ char *p=rte_pktmbuf_append(m, pkt_s);
+ rte_memcpy(p,(m_packet->raw+FIRST_PKT_SIZE),pkt_s);
+
+ assert(m_big_mbuf[i]==NULL);
+ m_big_mbuf[i]=m;
+ }
+ }
+ }
+}
+
+void CFlowPktInfo::free_const_mbuf(){
+ int i;
+ for (i=0; i<MAX_SOCKETS_SUPPORTED; i++) {
+ rte_mbuf_t * m=m_big_mbuf[i];
+ if (m) {
+ rte_pktmbuf_free(m );
+ m_big_mbuf[i]=NULL;
+ }
+ }
+}
+
+
+bool CFlowPktInfo::Create(CPacketIndication * pkt_ind){
+ /* clone the packet*/
+ m_packet = new CCapPktRaw(pkt_ind->m_packet);
+ /* clone of the offsets */
+ m_pkt_indication.Clone(pkt_ind,m_packet);
+
+ int i;
+ for (i=0; i<MAX_SOCKETS_SUPPORTED; i++) {
+ m_big_mbuf[i] = NULL;
+ }
+ alloc_const_mbuf();
+ return (true);
+}
+
+void CFlowPktInfo::Delete(){
+ free_const_mbuf();
+ delete m_packet;
+}
+
+void CFlowPktInfo::Dump(FILE *fd){
+ m_pkt_indication.Dump(fd,0);
+}
+
+
+
+
+void CCapFileFlowInfo::save_to_erf(std::string cap_file_name,int pcap){
+ if (Size() ==0) {
+ fprintf(stderr,"ERROR no info for this flow ");
+ return ;
+ }
+ capture_type_e file_type=ERF;
+ if ( pcap ){
+ file_type=LIBPCAP;
+ }
+
+
+ CFileWriterBase * lpWriter=CCapWriterFactory::CreateWriter(file_type,(char *)cap_file_name.c_str());
+ if (lpWriter == NULL) {
+ fprintf(stderr,"ERROR can't create cap file %s ",(char *)cap_file_name.c_str());
+ return ;
+ }
+ int i;
+
+ for (i=0; i<(int)Size(); i++) {
+ CFlowPktInfo * lp=GetPacket((uint32_t)i);
+ bool res=lpWriter->write_packet(lp->m_packet);
+ BP_ASSERT(res);
+ }
+ delete lpWriter;
+}
+
+
+
+struct CTmpFlowPerDirInfo {
+ CTmpFlowPerDirInfo(){
+ m_pkt_id=0;
+ }
+
+ uint16_t m_pkt_id;
+};
+
+class CTmpFlowInfo {
+public:
+ CTmpFlowInfo(){
+ m_max_pkts=0;
+ m_max_aging_sec=0.0;
+ m_last_pkt=0.0;
+
+ }
+ ~CTmpFlowInfo(){
+ }
+public:
+ uint32_t m_max_pkts;
+ dsec_t m_max_aging_sec;
+ dsec_t m_last_pkt;
+
+ CTmpFlowPerDirInfo m_per_dir[CS_NUM];
+};
+
+typedef CTmpFlowInfo * flow_tmp_t;
+typedef std::map<uint16_t, flow_tmp_t> flow_tmp_map_t;
+typedef flow_tmp_map_t::iterator flow_tmp_map_iter_t;
+
+
+
+bool CCapFileFlowInfo::is_valid_template_load_time(std::string & err){
+ err="";
+ int i;
+ for (i=0; i<Size(); i++) {
+ CFlowPktInfo * lp= GetPacket((uint32_t)i);
+ CPacketIndication * lpd=&lp->m_pkt_indication;
+ if ( lpd->getEtherOffset() !=0 ){
+ err=" supported template Ether offset start is 0 \n";
+ return (false);
+ }
+ if ( lpd->getIpOffset() !=14 ){
+ err=" supported template ip offset is 14 \n";
+ return (false);
+ }
+ if ( lpd->is_ipv6() ){
+ if ( lpd->getTcpOffset() != (14+40) ){
+ err=" supported template tcp/udp offset is 54, no ipv6 option header is supported \n";
+ return (false);
+ }
+ }else{
+ if ( lpd->getTcpOffset() != (14+20) ){
+ err=" supported template tcp/udp offset is 34, no ipv4 option is allowed in this version \n";
+ return (false);
+ }
+ }
+ }
+
+ if ( CGlobalInfo::is_learn_mode() ) {
+ if ( GetPacket(0)->m_pkt_indication.m_desc.IsPluginEnable() ) {
+ err="plugins are not supported with --learn mode \n";
+ return(false);
+ }
+ }
+ return(true);
+}
+
+
+/**
+ * update global info
+ * 1. maximum aging
+ * 2. per sub-flow pkt_num/max-pkt per dir and per global
+ */
+void CCapFileFlowInfo::update_info(){
+ flow_tmp_map_iter_t iter;
+ flow_tmp_map_t ft;
+ CTmpFlowInfo * lpFlow;
+ int i;
+ dsec_t ctime=0.0;
+
+ // first iteration, lern all the info into a temp flow table
+ for (i=0; i<Size(); i++) {
+ CFlowPktInfo * lp= GetPacket((uint32_t)i);
+ // extract flow_id
+ CPacketDescriptor * desc=&lp->m_pkt_indication.m_desc;
+ uint16_t flow_id = desc->getFlowId();
+ CPacketDescriptorPerDir * lpCurPacket = desc->GetDirInfo();
+ pkt_dir_t dir=desc->IsInitSide()?CLIENT_SIDE:SERVER_SIDE; // with respect to the first sub-flow in the template
+
+ //update lpFlow
+ iter = ft.find(flow_id);
+ if (iter != ft.end() ) {
+ lpFlow=(*iter).second;
+ }else{
+ lpFlow = new CTmpFlowInfo();
+ assert(lpFlow);
+ ft.insert(flow_tmp_map_t::value_type(flow_id,lpFlow));
+ //add it
+
+ }
+
+ // main info
+ lpCurPacket->SetPktNum(lpFlow->m_per_dir[dir].m_pkt_id);
+ lpFlow->m_max_pkts++;
+ lpFlow->m_per_dir[dir].m_pkt_id++;
+
+ dsec_t delta = ctime - lpFlow->m_last_pkt ;
+ lpFlow->m_last_pkt = ctime;
+ if (delta > lpFlow->m_max_aging_sec) {
+ lpFlow->m_max_aging_sec = delta;
+ }
+ // per direction info
+
+ if (i<Size()) {
+ ctime += lp->m_pkt_indication.m_cap_ipg;
+ }
+ }
+
+
+ for (i=0; i<Size(); i++) {
+ CFlowPktInfo * lp= GetPacket((uint32_t)i);
+
+ CPacketDescriptor * desc=&lp->m_pkt_indication.m_desc;
+ uint16_t flow_id = desc->getFlowId();
+ CPacketDescriptorPerDir * lpCurPacket = desc->GetDirInfo();
+ pkt_dir_t dir=desc->IsInitSide()?CLIENT_SIDE:SERVER_SIDE; // with respect to the first sub-flow in the template
+
+ iter = ft.find(flow_id);
+ assert( iter != ft.end() );
+ lpFlow=(*iter).second;
+
+ if ( (lpFlow->m_per_dir[0].m_pkt_id >0) &&
+ (lpFlow->m_per_dir[1].m_pkt_id >0) ) {
+ /* we have both dir */
+ lp->m_pkt_indication.m_desc.SetBiPluginEnable(true);
+ }
+
+
+ lpCurPacket->SetMaxPkts(lpFlow->m_per_dir[dir].m_pkt_id);
+ lp->m_pkt_indication.m_desc.SetMaxPktsPerFlow(lpFlow->m_max_pkts);
+ lp->m_pkt_indication.m_desc.SetMaxFlowTimeout(lpFlow->m_max_aging_sec);
+ }
+
+
+ /* in case of learn mode , we need to mark the first packet */
+ if ( CGlobalInfo::is_learn_mode() ) {
+ CFlowPktInfo * lp= GetPacket(0);
+ assert(lp);
+ /* only for bi directionl traffic mask the learn flag , only for the first packet */
+ if ( lp->m_pkt_indication.m_desc.IsBiDirectionalFlow() ){
+ lp->mask_as_learn();
+ }
+ }
+
+ if ( ft.empty() )
+ return;
+
+ flow_tmp_map_iter_t it;
+ for (it= ft.begin(); it != ft.end(); ++it) {
+ CTmpFlowInfo *lp = it->second;
+ assert(lp);
+ delete lp;
+ }
+ ft.clear();
+}
+
+
+int CCapFileFlowInfo::load_cap_file(std::string cap_file,uint16_t _id,uint8_t plugin_id){
+ RemoveAll();
+
+ fprintf(stdout," -- loading cap file %s \n",cap_file.c_str());
+ CPacketParser parser;
+ CPacketIndication pkt_indication;
+ CCapReaderBase * lp=CCapReaderFactory::CreateReader((char *)cap_file.c_str(),0);
+
+ if (lp == 0) {
+ printf(" ERROR file %s does not exist or not supported \n",(char *)cap_file.c_str());
+ return (-1);
+ }
+ bool multi_flow_enable =( (plugin_id!=0)?true:false);
+
+
+ CFlowTableMap flow;
+
+ parser.Create();
+ flow.Create(0);
+ m_total_bytes=0;
+ m_total_flows=0;
+ m_total_errors=0;
+ CFlow * first_flow=0;
+ bool first_flow_fif_is_swap=false;
+
+ bool time_was_set=false;
+ double last_time=0.0;
+ CCapPktRaw raw_packet;
+ int cnt=0;
+ while ( true ) {
+ /* read packet */
+ if ( lp->ReadPacket(&raw_packet) ==false ){
+ break;
+ }
+ cnt++;
+
+ if ( !time_was_set ){
+ last_time=raw_packet.get_time();
+ time_was_set=true;
+ }else{
+ if (raw_packet.get_time()<last_time) {
+ printf(" ERROR not valid pcap file,timestamp is negative at packet %d \n",cnt);
+ exit(-1);
+ }
+ last_time=raw_packet.get_time();
+ }
+
+ if ( parser.ProcessPacket(&pkt_indication, &raw_packet) ){
+
+ if ( pkt_indication.m_desc.IsValidPkt() ) {
+ pkt_indication.m_desc.SetPluginEnable(multi_flow_enable);
+ pkt_indication.m_desc.SetPluginId(plugin_id);
+
+ pkt_indication.m_desc.SetId(_id);
+ bool is_fif;
+ CFlow * lpflow=flow.process(pkt_indication.m_flow_key,is_fif);
+ m_total_bytes += pkt_indication.m_packet->pkt_len;
+ pkt_indication.m_cap_ipg = raw_packet.get_time();
+
+ pkt_indication.m_flow =lpflow;
+ pkt_indication.m_desc.SetFlowPktNum(lpflow->pkt_id);
+ /* inc pkt_id inside the flow */
+ lpflow->pkt_id++;
+
+ /* check that we don't have reserve TTL for duplication */
+ uint8_t ttl = pkt_indication.getTTL();
+ if ( (ttl == TTL_RESERVE_DUPLICATE) ||
+ (ttl == (TTL_RESERVE_DUPLICATE-1)) ) {
+ pkt_indication.setTTL(TTL_RESERVE_DUPLICATE-4);
+ }
+
+ if (is_fif) {
+
+ lpflow->flow_id = m_total_flows;
+
+ pkt_indication.m_desc.SetFlowId(lpflow->flow_id);
+
+ if (m_total_flows == 0) {
+ /* first flow */
+ first_flow =lpflow;/* save it for single flow support , to signal error */
+ lpflow->is_fif_swap =pkt_indication.m_desc.IsSwapTuple();
+ first_flow_fif_is_swap = pkt_indication.m_desc.IsSwapTuple();
+ pkt_indication.m_desc.SetInitSide(true);
+ Append(&pkt_indication);
+ m_total_flows++;
+
+ }else{
+ if ( multi_flow_enable ){
+
+ lpflow->is_fif_swap = pkt_indication.m_desc.IsSwapTuple();
+ /* in respect to the first flow */
+
+ bool init_side_in_repect_to_first_flow =
+ ((first_flow_fif_is_swap?true:false) == lpflow->is_fif_swap)?true:false;
+
+ pkt_indication.m_desc.SetInitSide(init_side_in_repect_to_first_flow);
+ Append(&pkt_indication);
+ m_total_flows++;
+
+ }else{
+ printf(" more than one flow in this cap ignore it !! \n");
+ pkt_indication.m_flow_key.Dump(stdout);
+ m_total_errors++;
+ }
+ }
+
+
+ }else{ /* no FIF */
+
+ pkt_indication.m_desc.SetFlowId(lpflow->flow_id);
+
+ if ( multi_flow_enable ==false ){
+
+ if (lpflow == first_flow) {
+ // add to
+ bool init_side=
+ ((lpflow->is_fif_swap?true:false) == pkt_indication.m_desc.IsSwapTuple())?true:false;
+ pkt_indication.m_desc.SetInitSide( init_side );
+ Append(&pkt_indication);
+ }else{
+ //printf(" more than one flow in this cap ignot it !! \n");
+ m_total_errors++;
+ }
+ }else{
+ /* support multi-flow, */
+
+ /* work in respect to first flow */
+ bool init_side=
+ ((first_flow_fif_is_swap?true:false) == pkt_indication.m_desc.IsSwapTuple())?true:false;
+ pkt_indication.m_desc.SetInitSide( init_side );
+ Append(&pkt_indication);
+
+ }
+ }
+ }
+ }
+ }
+
+
+ /* set the last */
+ CFlowPktInfo * last_pkt =GetPacket((uint32_t)(Size()-1));
+ last_pkt->m_pkt_indication.m_desc.SetIsLastPkt(true);
+
+ int i;
+
+ for (i=1; i<Size(); i++) {
+ CFlowPktInfo * lp_prev= GetPacket((uint32_t)i-1);
+ CFlowPktInfo * lp= GetPacket((uint32_t)i);
+
+ lp_prev->m_pkt_indication.m_cap_ipg = lp->m_pkt_indication.m_cap_ipg-
+ lp_prev->m_pkt_indication.m_cap_ipg;
+
+
+
+ if ( lp->m_pkt_indication.m_desc.IsInitSide() !=
+ lp_prev->m_pkt_indication.m_desc.IsInitSide()) {
+ lp_prev->m_pkt_indication.m_desc.SetRtt(true);
+ }
+ }
+
+ GetPacket((uint32_t)Size()-1)->m_pkt_indication.m_cap_ipg=0.0;
+ m_total_errors += parser.m_counter.getTotalErrors();
+
+
+ /* dump the flow */
+ //Dump(stdout);
+
+ //flow.Dump(stdout);
+ flow.Delete();
+ //parser.Dump(stdout);
+ parser.Delete();
+ //fprintf(stdout," -- finish loading cap file \n");
+ //fprintf(stdout,"\n");
+ delete lp;
+ if ( m_total_errors > 0 ) {
+ parser.m_counter.Dump(stdout);
+ printf(" ERORR in one of the cap file, you should have one flow per cap file or valid plugin \n");
+ return(-1);
+ }
+ return (0);
+}
+
+void CCapFileFlowInfo::update_pcap_mode(){
+ int i;
+ for (i=0; i<(int)Size(); i++) {
+ CFlowPktInfo * lp=GetPacket((uint32_t)i);
+ lp->m_pkt_indication.m_desc.SetPcapTiming(true);
+ }
+}
+
+void CCapFileFlowInfo::get_total_memory(CCCapFileMemoryUsage & memory){
+ memory.clear();
+ int i;
+ for (i=0; i<(int)Size(); i++) {
+ CFlowPktInfo * lp=GetPacket((uint32_t)i);
+ if ( lp->m_packet->pkt_len > FIRST_PKT_SIZE ) {
+ memory.add_size(lp->m_packet->pkt_len - FIRST_PKT_SIZE);
+ }
+ }
+}
+
+
+double CCapFileFlowInfo::get_cap_file_length_sec(){
+ dsec_t sum=0.0;
+ int i;
+ for (i=0; i<(int)Size(); i++) {
+ CFlowPktInfo * lp=GetPacket((uint32_t)i);
+ sum+=lp->m_pkt_indication.m_cap_ipg;
+ }
+ return (sum);
+}
+
+
+void CCapFileFlowInfo::update_min_ipg(dsec_t min_ipg,
+ dsec_t override_ipg){
+
+ int i;
+ for (i=0; i<(int)Size(); i++) {
+ CFlowPktInfo * lp=GetPacket((uint32_t)i);
+ if ( lp->m_pkt_indication.m_cap_ipg < min_ipg ){
+ lp->m_pkt_indication.m_cap_ipg=override_ipg;
+ }
+ if ( lp->m_pkt_indication.m_cap_ipg < override_ipg ){
+ lp->m_pkt_indication.m_cap_ipg=override_ipg;
+ }
+ }
+}
+
+
+void CCapFileFlowInfo::Dump(FILE *fd){
+
+
+ int i;
+ //CCapPacket::DumpHeader(fd);
+ for (i=0; i<(int)Size(); i++) {
+ fprintf(fd,"pkt_id : %d \n",i+1);
+ fprintf(fd,"-----------\n");
+ CFlowPktInfo * lp=GetPacket((uint32_t)i);
+ lp->Dump(fd);
+ }
+}
+
+// add pkt indication
+void CCapFileFlowInfo::Append(CPacketIndication * pkt_indication){
+
+ CFlowPktInfo * lp;
+ lp = new CFlowPktInfo();
+ lp->Create( pkt_indication );
+ m_flow_pkts.push_back(lp);
+}
+
+
+
+void CCCapFileMemoryUsage::Add(const CCCapFileMemoryUsage & obj){
+ int i;
+ for (i=0; i<CCCapFileMemoryUsage::MASK_SIZE; i++) {
+ m_buf[i] += obj.m_buf[i];
+ }
+ m_total_bytes +=obj.m_total_bytes;
+
+}
+
+
+void CCCapFileMemoryUsage::dump(FILE *fd){
+ fprintf(fd, " Memory usage \n");
+ int i;
+ int c_size=CCCapFileMemoryUsage::SIZE_MIN;
+ int c_total=0;
+
+ for (i=0; i<CCCapFileMemoryUsage::MASK_SIZE; i++) {
+ fprintf(fd," size_%-7d : %lu \n",c_size,m_buf[i]);
+ c_total +=m_buf[i]*c_size;
+ c_size = c_size*2;
+ }
+ fprintf(fd," Total : %s %.0f%% util due to buckets \n",double_to_human_str(c_total,"bytes",KBYE_1024).c_str(),100.0*float(c_total)/float(m_total_bytes) );
+}
+
+
+bool CCapFileFlowInfo::Create(){
+ m_total_bytes=0;
+ m_total_errors = 0;
+ m_total_flows = 0;
+ return (true);
+}
+
+
+void CCapFileFlowInfo::dump_pkt_sizes(void){
+ int i;
+ for (i=0; i<(int)Size(); i++) {
+ flow_pkt_info_t lp=GetPacket((uint32_t)i);
+ CGenNode node;
+ node.m_dest_ip = 0x10000110;
+ node.m_src_ip = 0x20000110;
+ node.m_src_port = 12;
+ rte_mbuf_t * buf=lp->generate_new_mbuf(&node);
+ //rte_pktmbuf_dump(buf, buf->pkt_len);
+ rte_pktmbuf_free(buf);
+ }
+}
+
+void CCapFileFlowInfo::RemoveAll(){
+ int i;
+ m_total_bytes=0;
+ m_total_errors = 0;
+ m_total_flows = 0;
+ for (i=0; i<(int)Size(); i++) {
+ flow_pkt_info_t lp=GetPacket((uint32_t)i);
+ lp->Delete();
+ delete lp;
+ }
+ // free all the pointers
+ m_flow_pkts.clear();
+}
+
+void CCapFileFlowInfo::Delete(){
+ RemoveAll();
+}
+
+void operator >> (const YAML::Node& node, mac_mapping_t &fi) {
+ utl_yaml_read_ip_addr(node,"ip", fi.ip);
+ const YAML::Node& mac_info = node["mac"];
+ for(unsigned i=0;i<mac_info.size();i++) {
+ const YAML::Node & node_2 =mac_info;
+ uint32_t value;
+ node_2[i] >> value;
+ fi.mac.mac[i] = value;
+ }
+}
+
+void operator >> (const YAML::Node& node, std::map<uint32_t, mac_addr_align_t> &mac_info) {
+ const YAML::Node& mac_node = node["items"];
+ mac_mapping_t mac_mapping;
+ for (unsigned i=0;i<mac_node.size();i++) {
+ mac_node[i] >> mac_mapping;
+ mac_info[mac_mapping.ip] = mac_mapping.mac;
+ }
+}
+
+void operator >> (const YAML::Node& node, CFlowYamlDpPkt & fi) {
+ uint32_t val;
+ node["pkt_id"] >> val;
+ fi.m_pkt_id =(uint8_t)val;
+ node["pyld_offset"] >> val;
+ fi.m_pyld_offset =(uint8_t)val;
+ node["type"] >> val;
+ fi.m_type =(uint8_t)val;
+ node["len"] >> val;
+ fi.m_len =(uint8_t)val;
+ node["mask"] >> val;
+ fi.m_pkt_mask =val;
+}
+
+void operator >> (const YAML::Node& node, CVlanYamlInfo & fi) {
+
+ uint32_t tmp;
+ try {
+ node["enable"] >> tmp ;
+ fi.m_enable=tmp;
+ }catch ( const std::exception& e ) {
+
+ }
+
+ try {
+ node["vlan0"] >> tmp;
+ fi.m_vlan_per_port[0] = tmp;
+ node["vlan1"] >> tmp;
+ fi.m_vlan_per_port[1] = tmp;
+ }catch ( const std::exception& e ) {
+ // there is a default
+
+ }
+}
+
+
+
+void operator >> (const YAML::Node& node, CFlowYamlInfo & fi) {
+ node["name"] >> fi.m_name;
+ node["cps"] >> fi.m_k_cps;
+ fi.m_k_cps = fi.m_k_cps/1000.0;
+ double t;
+ node["ipg"] >> t;
+ fi.m_ipg_sec =t/1000000.0;
+ node["rtt"] >> t;
+ fi.m_rtt_sec = t/1000000.0;
+ node["w"] >> fi.m_w;
+
+ try {
+ node["cap_ipg"] >> fi.m_cap_mode;
+ fi.m_cap_mode_was_set =true;
+ } catch ( const std::exception& e ) {
+ fi.m_cap_mode_was_set =false;
+ }
+
+ try {
+ node["wlength"] >> fi.m_wlength;
+ fi.m_wlength_set=true;
+ } catch ( const std::exception& e ) {
+ fi.m_wlength_set=false;
+ fi.m_wlength =500;
+ }
+
+ try {
+ node["limit"] >> fi.m_limit;
+ fi.m_limit_was_set = true;
+ } catch ( const std::exception& e ) {
+ fi.m_limit_was_set = false;
+ fi.m_limit = 0;
+ }
+
+ try {
+ uint32_t plugin_val;
+ node["plugin_id"] >> plugin_val;
+ fi.m_plugin_id=plugin_val;
+ } catch ( const std::exception& e ) {
+ fi.m_plugin_id=0;
+ }
+
+ fi.m_one_app_server_was_set = false;
+ fi.m_one_app_server = false;
+ if ( utl_yaml_read_ip_addr(node,
+ "server_addr",
+ fi.m_server_addr) ){
+ try {
+ node["one_app_server"] >> fi.m_one_app_server;
+ fi.m_one_app_server_was_set=true;
+ } catch ( const std::exception& e ) {
+ fi.m_one_app_server_was_set = false;
+ fi.m_one_app_server = false;
+ }
+ }
+
+
+
+ if ( ( fi.m_limit_was_set ) && (fi.m_plugin_id !=0) ){
+ fprintf(stderr," limit can't be non zero when plugin is set, you must have only one of the options set");
+ exit(-1);
+ }
+
+
+ try {
+ int i;
+ const YAML::Node& dyn_pyload = node["dyn_pyload"];
+ for(unsigned i=0;i<dyn_pyload.size();i++) {
+ CFlowYamlDpPkt fd;
+ dyn_pyload[i] >> fd;
+ if ( fi.m_dpPkt == 0 ){
+ fi.m_dpPkt = new CFlowYamlDynamicPyloadPlugin();
+ if (fi.m_plugin_id == 0) {
+ fi.m_plugin_id = mpDYN_PYLOAD;
+ }else{
+ fprintf(stderr," plugin should be zero with dynamic pyload program");
+ exit(-1);
+ }
+ }
+
+ fd.Dump(stdout);
+
+ fi.m_dpPkt->Add(fd);
+ printf(" here ");
+ }
+ } catch ( const std::exception& e ) {
+ fi.m_dpPkt=0;
+ }
+}
+
+
+
+void operator >> (const YAML::Node& node, CFlowsYamlInfo & flows_info) {
+
+ node["duration"] >> flows_info.m_duration_sec;
+
+ try {
+ node["generator"] >> flows_info.m_tuple_gen;
+ flows_info.m_tuple_gen_was_set =true;
+ } catch ( const std::exception& e ) {
+ flows_info.m_tuple_gen_was_set =false;
+ }
+
+
+ // m_ipv6_set will be true if and only if both src_ipv6
+ // and dst_ipv6 are provided. These are used to set
+ // the most significant 96-bits of the IPv6 address; the
+ // least significant 32-bits come from the ipv4 address
+ // (what is set above).
+ //
+ // If the IPv6 src/dst is not provided in the yaml file,
+ // then the most significant 96-bits will be set to 0
+ // which represents an IPv4-compatible IPv6 address.
+ //
+ // If desired, an IPv4-mapped IPv6 address can be
+ // formed by providing src_ipv6,dst_ipv6 and specifying
+ // {0,0,0,0,0,0xffff}
+ flows_info.m_ipv6_set=true;
+ try {
+ const YAML::Node& src_ipv6_info = node["src_ipv6"];
+ if (src_ipv6_info.size() == 6 ){
+ for(unsigned i=0;i<src_ipv6_info.size();i++) {
+ uint32_t fi;
+ const YAML::Node & node =src_ipv6_info;
+ node[i] >> fi;
+ flows_info.m_src_ipv6.push_back(fi);
+ }
+ }else{
+ flows_info.m_ipv6_set=false;
+ }
+ } catch ( const std::exception& e ) {
+ flows_info.m_ipv6_set=false;
+ }
+
+ try {
+ const YAML::Node& dst_ipv6_info = node["dst_ipv6"];
+ if (dst_ipv6_info.size() == 6 ){
+ for(unsigned i=0;i<dst_ipv6_info.size();i++) {
+ uint32_t fi;
+ const YAML::Node & node =dst_ipv6_info;
+ node[i] >> fi;
+ flows_info.m_dst_ipv6.push_back(fi);
+ }
+ }else{
+ flows_info.m_ipv6_set=false;
+ }
+ } catch ( const std::exception& e ) {
+ flows_info.m_ipv6_set=false;
+ }
+
+ try {
+ node["cap_ipg"] >> flows_info.m_cap_mode;
+ flows_info.m_cap_mode_set=true;
+ } catch ( const std::exception& e ) {
+ flows_info.m_cap_mode=false;
+ flows_info.m_cap_mode_set=false;
+ }
+ double t;
+
+ try {
+ node["cap_ipg_min"] >> t ;
+ flows_info.m_cap_ipg_min = t/1000000.0;
+ flows_info.m_cap_ipg_min_set=true;
+ } catch ( const std::exception& e ) {
+ flows_info.m_cap_ipg_min_set=false;
+ flows_info.m_cap_ipg_min = 20;
+ }
+
+ try {
+ node["cap_override_ipg"] >> t;
+ flows_info.m_cap_overide_ipg = t/1000000.0;
+ flows_info.m_cap_overide_ipg_set = true;
+ } catch ( const std::exception& e ) {
+ flows_info.m_cap_overide_ipg_set = false;
+ flows_info.m_cap_overide_ipg = 0;
+ }
+
+ try {
+ node["wlength"] >> flows_info.m_wlength;
+ flows_info.m_wlength_set=true;
+ } catch ( const std::exception& e ) {
+ flows_info.m_wlength_set=false;
+ flows_info.m_wlength =100;
+ }
+
+ try {
+ node["one_app_server"] >> flows_info.m_one_app_server;
+ flows_info.m_one_app_server_was_set=true;
+ } catch ( const std::exception& e ) {
+ flows_info.m_one_app_server =false;
+ flows_info.m_one_app_server_was_set=false;
+ }
+
+ try {
+ node["vlan"] >> flows_info.m_vlan_info;
+ } catch ( const std::exception& e ) {
+ }
+
+ try {
+ node["mac_override_by_ip"] >> flows_info.m_mac_replace_by_ip;
+ } catch ( const std::exception& e ) {
+ flows_info.m_mac_replace_by_ip =false;
+ }
+
+
+ const YAML::Node& mac_info = node["mac"];
+ for(unsigned i=0;i<mac_info.size();i++) {
+ uint32_t fi;
+ const YAML::Node & node =mac_info;
+ node[i] >> fi;
+ flows_info.m_mac_base.push_back(fi);
+ }
+
+ const YAML::Node& cap_info = node["cap_info"];
+ for(unsigned i=0;i<cap_info.size();i++) {
+ CFlowYamlInfo fi;
+ cap_info[i] >> fi;
+ flows_info.m_vec.push_back(fi);
+ }
+}
+
+void CVlanYamlInfo::Dump(FILE *fd){
+ fprintf(fd," vlan enable : %d \n",m_enable);
+ fprintf(fd," vlan val : %d ,%d \n",m_vlan_per_port[0],m_vlan_per_port[1]);
+}
+
+
+void CFlowsYamlInfo::Dump(FILE *fd){
+ fprintf(fd," duration : %f sec \n",m_duration_sec);
+ m_tuple_gen.Dump(fd);
+
+ fprintf(fd,"\n");
+ if (CGlobalInfo::is_ipv6_enable()) {
+ int idx;
+ fprintf(fd," src_ipv6 : ");
+ for (idx=0; idx<5; idx++){
+ fprintf(fd,"%04x:", CGlobalInfo::m_options.m_src_ipv6[idx]);
+ }
+ fprintf(fd,"%04x\n", CGlobalInfo::m_options.m_src_ipv6[5]);
+ fprintf(fd," dst_ipv6 : ");
+ for (idx=0; idx<5; idx++){
+ fprintf(fd,"%04x:", CGlobalInfo::m_options.m_dst_ipv6[idx]);
+ }
+ fprintf(fd,"%04x\n", CGlobalInfo::m_options.m_dst_ipv6[5]);
+ }
+ if ( !m_cap_mode_set ) {
+ fprintf(fd," cap_ipg : wasn't set \n");
+ }else{
+ fprintf(fd," cap_ipg : %d \n",m_cap_mode?1:0);
+ }
+
+ if ( !m_cap_ipg_min_set ){
+ fprintf(fd," cap_ipg_min : wasn't set \n");
+ }else{
+ fprintf(fd," cap_ipg_min : %f \n",m_cap_ipg_min);
+ }
+
+ if ( !m_cap_overide_ipg_set ){
+ fprintf(fd," cap_override_ipg : wasn't set \n");
+ }else{
+ fprintf(fd," cap_override_ipg : %f \n",m_cap_overide_ipg);
+ }
+
+ if ( !m_wlength_set ){
+ fprintf(fd," wlength : wasn't set \n");
+ }else{
+ fprintf(fd," m_wlength : %d \n",m_wlength);
+ }
+ fprintf(fd," one_server_for_application : %d \n",m_one_app_server?1:0);
+ fprintf(fd," one_server_for_application_was_set : %d \n",m_one_app_server_was_set?1:0);
+
+ m_vlan_info.Dump(fd);
+
+ fprintf(fd," mac base : ");
+ int i;
+ for (i=0; i<(int)m_mac_base.size(); i++) {
+ if (i< (int)(m_mac_base.size()-1) ) {
+ fprintf(fd,"0x%02x,",m_mac_base[i]);
+ }else{
+ fprintf(fd,"0x%02x",m_mac_base[i]);
+ }
+ }
+ fprintf(fd,"\n");
+
+ fprintf(fd," cap file info \n");
+ fprintf(fd," ------------- \n");
+ for (i=0; i<(int)m_vec.size(); i++) {
+ m_vec[i].Dump(fd);
+ }
+}
+
+
+/*
+
+example for YAML file
+
+- duration : 10.0
+ cap_info :
+ - name: hey1.pcap
+ cps : 12.0
+ ipg : 0.0001
+ - name: hey2.pcap
+ cps : 11.0
+ ipg : 0.0001
+
+
+*/
+
+bool CFlowsYamlInfo::verify_correctness(uint32_t num_threads) {
+ if ( m_tuple_gen_was_set ==false ){
+ printf(" ERROR there must be a generator field in YAML , the old format is deprecated \n");
+ printf(" This is not supported : \n");
+ printf(" min_src_ip : 0x10000001 \n");
+ printf(" max_src_ip : 0x50000001 \n");
+ printf(" min_dst_ip : 0x60000001 \n");
+ printf(" max_dst_ip : 0x60000010 \n");
+ printf(" This is supported : \n");
+ printf("generator : \n");
+ printf(" distribution : \"seq\" \n");
+ printf(" clients_start : \"16.0.0.1\" \n");
+ printf(" clients_end : \"16.0.1.255\" \n");
+ printf(" servers_start : \"48.0.0.1\" \n");
+ printf(" servers_end : \"48.0.0.255\" \n");
+ printf(" clients_per_gb : 201 \n");
+ printf(" min_clients : 101 \n");
+ printf(" dual_port_mask : \"1.0.0.0\" \n");
+ printf(" tcp_aging : 1 \n");
+ printf(" udp_aging : 1 \n");
+ return(false);
+ }
+ if ( !m_tuple_gen.is_valid(num_threads,is_any_plugin_configured()) ){
+ return (false);
+ }
+
+ return(true);
+}
+
+
+
+int CFlowsYamlInfo::load_from_yaml_file(std::string file_name){
+ m_vec.clear();
+
+ if ( !utl_is_file_exists (file_name) ){
+ printf(" ERROR file %s does not exist \n",file_name.c_str());
+ exit(-1);
+ }
+
+ try {
+ std::ifstream fin((char *)file_name.c_str());
+ YAML::Parser parser(fin);
+ YAML::Node doc;
+
+ parser.GetNextDocument(doc);
+ for(unsigned i=0;i<doc.size();i++) {
+ doc[i] >> *this;
+ break;
+ }
+ } catch ( const std::exception& e ) {
+ std::cout << e.what() << "\n";
+ exit(-1);
+ }
+
+ /* update from user input */
+ if (CGlobalInfo::m_options.m_duration > 0.1) {
+ m_duration_sec = CGlobalInfo::m_options.m_duration;
+ }
+ int i;
+ m_is_plugin_configured=false;
+ for (i=0; i<(int)m_vec.size(); i++) {
+ m_vec[i].m_k_cps =m_vec[i].m_k_cps*CGlobalInfo::m_options.m_factor;
+ if (( ! m_vec[i].m_cap_mode_was_set ) && (m_cap_mode_set ) ){
+ m_vec[i].m_cap_mode = m_cap_mode;
+ }
+ if (( ! m_vec[i].m_wlength_set ) && (m_wlength_set ) ){
+ m_vec[i].m_wlength = m_wlength;
+ }
+
+ if (( ! m_vec[i].m_one_app_server_was_set ) && (m_one_app_server_was_set ) ){
+ m_vec[i].m_one_app_server = m_one_app_server;
+ }
+
+ if ( m_cap_overide_ipg_set ){
+ m_vec[i].m_ipg_sec = m_cap_overide_ipg;
+ m_vec[i].m_rtt_sec = m_cap_overide_ipg;
+ }
+
+ if ( m_vec[i].m_plugin_id ){
+ m_is_plugin_configured=true;
+ }
+ }
+ return 0;
+}
+
+
+
+void CFlowStats::Clear(){
+
+ m_id=0;
+ m_name="";
+ m_pkt=0.0;
+ m_bytes=0.0;
+ m_cps=0.0;
+ m_mb_sec=0.0;
+ m_mB_sec=0.0;
+ m_c_flows=0.0;
+ m_pps =0.0;
+ m_total_Mbytes=00 ;
+ m_errors =0;
+ m_flows =0 ;
+ m_memory.clear();
+}
+
+void CFlowStats::Add(const CFlowStats & obj){
+
+ m_pkt += obj.m_pkt ;
+ m_bytes += obj.m_bytes ;
+ m_cps += obj.m_cps ;
+ m_mb_sec += obj.m_mb_sec ;
+ m_mB_sec += obj.m_mB_sec ;
+ m_c_flows += obj.m_c_flows ;
+ m_pps += obj.m_pps ;
+ m_total_Mbytes +=obj.m_total_Mbytes ;
+ m_errors +=obj.m_errors;
+ m_flows +=obj.m_flows ;
+
+ m_memory.Add(obj.m_memory);
+}
+
+
+void CFlowStats::DumpHeader(FILE *fd){
+ fprintf(fd," %2s,%-40s,%4s,%4s,%5s,%7s,%9s,%9s,%9s,%10s,%5s,%7s,%4s,%4s \n",
+ "id","name","tps","cps","f-pkts","f-bytes","duration","Mb/sec","MB/sec","c-flows","PPS","total-Mbytes-duration","errors","flows");
+}
+void CFlowStats::Dump(FILE *fd){
+ //"name","cps","f-pkts","f-bytes","Mb/sec","MB/sec","c-flows","PPS","total-Mbytes-duration","errors","flows"
+ fprintf(fd," %02d, %-40s ,%4.2f,%4.2f, %5.0f , %7.0f ,%7.2f ,%7.2f , %7.2f , %10.0f , %5.0f , %7.0f , %llu , %llu \n",
+ m_id,m_name.c_str(),m_cps,get_normal_cps(),
+ m_pkt,m_bytes,duration_sec,m_mb_sec,m_mB_sec,m_c_flows,m_pps,m_total_Mbytes,m_errors,m_flows);
+}
+
+bool CFlowGeneratorRecPerThread::Create(CTupleGeneratorSmart * global_gen,
+ CFlowYamlInfo * info,
+ CFlowsYamlInfo * yaml_flow_info,
+ CCapFileFlowInfo * flow_info,
+ uint16_t _id,
+ uint32_t thread_id ){
+
+ BP_ASSERT(info);
+ m_thread_id =thread_id ;
+
+ tuple_gen.Create(global_gen);
+ CTupleGenYamlInfo * lpt=&yaml_flow_info->m_tuple_gen;
+
+ tuple_gen.SetSingleServer(info->m_one_app_server,
+ info->m_server_addr,
+ getDualPortId(thread_id),
+ lpt->m_dual_interface_mask
+ );
+
+ tuple_gen.SetW(info->m_w);
+
+
+
+ m_id =_id;
+ m_info =info;
+ m_flows_info = yaml_flow_info;
+ // set policer give bucket size for bursts
+ m_policer.set_cir(info->m_k_cps*1000.0);
+ m_policer.set_level(0.0);
+ m_policer.set_bucket_size(100.0);
+ /* pointer to global */
+ m_flow_info = flow_info;
+ return (true);
+}
+
+
+void CFlowGeneratorRecPerThread::Delete(){
+ tuple_gen.Delete();
+}
+
+
+
+
+void CFlowGeneratorRecPerThread::Dump(FILE *fd){
+ fprintf(fd," configuration info ");
+ fprintf(fd," -----------------");
+ m_info->Dump(fd);
+ fprintf(fd," -----------------");
+ m_flow_info->Dump(fd);
+}
+
+
+void CFlowGeneratorRecPerThread::getFlowStats(CFlowStats * stats){
+
+ double t_pkt=(double)m_flow_info->Size();
+ double t_bytes=(double)m_flow_info->get_total_bytes();
+ double cps=m_info->m_k_cps *1000.0;
+ double mb_sec = (cps*t_bytes*8.0)/(_1Mb_DOUBLE);
+ double mB_sec = (cps*t_bytes)/(_1Mb_DOUBLE);
+
+ double c_flow_windows_sec=0.0;
+
+ if (m_info->m_cap_mode) {
+ c_flow_windows_sec = m_flow_info->get_cap_file_length_sec();
+ }else{
+ c_flow_windows_sec = t_pkt * m_info->m_ipg_sec;
+ }
+
+
+ double c_flows = cps*c_flow_windows_sec*m_flow_info->get_total_flows();
+ double pps =cps*t_pkt;
+ double total_Mbytes = mB_sec * m_flows_info->m_duration_sec;
+ uint64_t errors = m_flow_info->get_total_errors();
+ uint64_t flows = m_flow_info->get_total_flows();
+
+
+ stats->m_id = m_id;
+ stats->m_pkt = t_pkt;
+ stats->m_bytes = t_bytes;
+ stats->duration_sec = c_flow_windows_sec;
+ stats->m_name = m_info->m_name.c_str();
+ stats->m_cps = cps;
+ stats->m_mb_sec = mb_sec;
+ stats->m_mB_sec = mB_sec;
+ stats->m_c_flows = c_flows;
+ stats->m_pps = pps;
+ stats->m_total_Mbytes = total_Mbytes;
+ stats->m_errors = errors;
+ stats->m_flows = flows;
+}
+
+
+
+void CFlowGeneratorRec::Dump(FILE *fd){
+ fprintf(fd," configuration info ");
+ fprintf(fd," -----------------");
+ m_info->Dump(fd);
+ fprintf(fd," -----------------");
+ m_flow_info.Dump(fd);
+}
+
+
+void CFlowGeneratorRec::getFlowStats(CFlowStats * stats){
+
+ double t_pkt=(double)m_flow_info.Size();
+ double t_bytes=(double)m_flow_info.get_total_bytes();
+ double cps=m_info->m_k_cps *1000.0;
+ double mb_sec = (cps*t_bytes*8.0)/(_1Mb_DOUBLE);
+ double mB_sec = (cps*t_bytes)/(_1Mb_DOUBLE);
+
+ double c_flow_windows_sec=0.0;
+
+ if (m_info->m_cap_mode) {
+ c_flow_windows_sec = m_flow_info.get_cap_file_length_sec();
+ }else{
+ c_flow_windows_sec = t_pkt * m_info->m_ipg_sec;
+ }
+
+ m_flow_info.get_total_memory(stats->m_memory);
+
+
+ double c_flows = cps*c_flow_windows_sec;
+ double pps =cps*t_pkt;
+ double total_Mbytes = mB_sec * m_flows_info->m_duration_sec;
+ uint64_t errors = m_flow_info.get_total_errors();
+ uint64_t flows = m_flow_info.get_total_flows();
+
+
+ stats->m_id = m_id;
+ stats->m_pkt = t_pkt;
+ stats->m_bytes = t_bytes;
+ stats->duration_sec = c_flow_windows_sec;
+ stats->m_name = m_info->m_name.c_str();
+ stats->m_cps = cps;
+ stats->m_mb_sec = mb_sec;
+ stats->m_mB_sec = mB_sec;
+ stats->m_c_flows = c_flows;
+ stats->m_pps = pps;
+ stats->m_total_Mbytes = total_Mbytes;
+ stats->m_errors = errors;
+ stats->m_flows = flows;
+}
+
+
+void CFlowGeneratorRec::fixup_ipg_if_needed(void){
+ if ( m_flows_info->m_cap_mode ) {
+ m_flow_info.update_pcap_mode();
+ }
+
+ if ( (m_flows_info->m_cap_mode) &&
+ (m_flows_info->m_cap_ipg_min_set) &&
+ (m_flows_info->m_cap_overide_ipg_set)
+ ){
+ m_flow_info.update_min_ipg(m_flows_info->m_cap_ipg_min,
+ m_flows_info->m_cap_overide_ipg);
+ }
+}
+
+
+bool CFlowGeneratorRec::Create(CFlowYamlInfo * info,
+ CFlowsYamlInfo * flows_info,
+ uint16_t _id){
+ BP_ASSERT(info);
+ m_id=_id;
+ m_info=info;
+ m_flows_info=flows_info;
+ m_flow_info.Create();
+
+ // set policer give bucket size for bursts
+ m_policer.set_cir(info->m_k_cps*1000.0);
+ m_policer.set_level(0.0);
+ m_policer.set_bucket_size(100.0);
+
+ int res=m_flow_info.load_cap_file(info->m_name.c_str(),_id,m_info->m_plugin_id);
+ if ( res==0 ) {
+ fixup_ipg_if_needed();
+ std::string err;
+ /* verify that template are valid */
+ bool is_valid=m_flow_info.is_valid_template_load_time(err);
+ if (!is_valid) {
+ printf("\n ERROR template file is not valid '%s' \n",err.c_str());
+ return (false);
+ }
+ m_flow_info.update_info();
+ return (true);
+ }else{
+ return (false);
+ }
+}
+
+void CFlowGeneratorRec::Delete(){
+ m_flow_info.Delete();
+}
+
+
+void CGenNode::DumpHeader(FILE *fd){
+ fprintf(fd," pkt_id,time,fid,pkt_info,pkt,len,type,is_init,is_last,type,thread_id,src_ip,dest_ip,src_port \n");
+}
+
+void CGenNode::Dump(FILE *fd){
+ fprintf(fd,"%.6f,%llx,%p,%llu,%d,%d,%d,%d,%d,%d,%x,%x,%d\n",m_time,m_flow_id,m_pkt_info,
+ m_pkt_info->m_pkt_indication.m_packet->pkt_cnt,
+ m_pkt_info->m_pkt_indication.m_packet->pkt_len,
+ m_pkt_info->m_pkt_indication.m_desc.getId(),
+ (m_pkt_info->m_pkt_indication.m_desc.IsInitSide()?1:0),
+ m_pkt_info->m_pkt_indication.m_desc.IsLastPkt(),
+ m_type,
+ m_thread_id,
+ m_src_ip,
+ m_dest_ip,
+ m_src_port
+
+
+
+ );
+
+}
+
+void CNodeGenerator::set_vif(CVirtualIF * v_if){
+ m_v_if = v_if;
+}
+
+bool CNodeGenerator::Create(CFlowGenListPerThread * parent){
+ m_v_if =0;
+ m_parent=parent;
+ m_socket_id =0;
+ m_is_realtime =CGlobalInfo::is_realtime();
+ m_realtime_his.Create();
+ return(true);
+}
+
+void CNodeGenerator::Delete(){
+ m_realtime_his.Delete();
+}
+
+
+void CNodeGenerator::add_node(CGenNode * mynode){
+ m_p_queue.push(mynode);
+}
+
+
+
+void CNodeGenerator::remove_all(CFlowGenListPerThread * thread){
+ CGenNode *node;
+ while (!m_p_queue.empty()) {
+ node = m_p_queue.top();
+ m_p_queue.pop();
+ thread->free_node( node);
+ }
+}
+
+int CNodeGenerator::open_file(std::string file_name,
+ CPreviewMode * preview_mode){
+ BP_ASSERT(m_v_if);
+ m_preview_mode =*preview_mode;
+ /* ser preview mode */
+ m_v_if->set_review_mode(preview_mode);
+ m_v_if->open_file(file_name);
+ m_cnt = 1;
+ return (0);
+}
+
+int CNodeGenerator::close_file(CFlowGenListPerThread * thread){
+ remove_all(thread);
+ BP_ASSERT(m_v_if);
+ m_v_if->close_file();
+ return (0);
+}
+
+int CNodeGenerator::flush_one_node_to_file(CGenNode * node){
+ BP_ASSERT(m_v_if);
+ return (m_v_if->send_node(node));
+}
+
+int CNodeGenerator::update_stats(CGenNode * node){
+ if ( m_preview_mode.getVMode() >2 ){
+ fprintf(stdout," %llu ,",m_cnt);
+ node->Dump(stdout);
+ m_cnt++;
+ }
+ return (0);
+}
+
+
+bool CFlowGenListPerThread::Create(uint32_t thread_id,
+ uint32_t core_id,
+ CFlowGenList * flow_list,
+ uint32_t max_threads){
+
+
+ m_flow_list =flow_list;
+ m_core_id= core_id;
+ m_tcp_dpc= 0;
+ m_udp_dpc=0;
+ m_max_threads=max_threads;
+ m_thread_id=thread_id;
+
+ m_cpu_cp_u.Create(&m_cpu_dp_u);
+
+ uint32_t socket_id=rte_lcore_to_socket_id(m_core_id);
+
+ char name[100];
+ sprintf(name,"nodes-%d",m_core_id);
+ printf(" create thread %d %s socket: %d \n",m_core_id,name,socket_id);
+ m_node_pool = utl_rte_mempool_create_non_pkt(name,
+ CGlobalInfo::m_memory_cfg.get_each_core_dp_flows(),
+ sizeof(CGenNode),
+ 128,
+ 0 ,
+ socket_id);
+
+ printf(" pool %p \n",m_node_pool);
+ m_node_gen.Create(this);
+ m_flow_id_to_node_lookup.Create();
+
+ /* split the clients to threads */
+ CTupleGenYamlInfo * tuple_gen = &m_flow_list->m_yaml_info.m_tuple_gen;
+
+ /* split the clients to threads using the mask */
+ CClientPortion portion;
+ split_clients(m_thread_id,
+ m_max_threads,
+ getDualPortId(),
+ *tuple_gen,
+ portion);
+
+ init_from_global(portion);
+ m_smart_gen.Create(0,m_thread_id,
+ cdSEQ_DIST,
+ portion.m_client_start,
+ portion.m_client_end,
+ portion.m_server_start,
+ portion.m_server_end,
+ get_longest_flow(),
+ get_total_kcps()*1000,
+ m_flow_list);
+
+
+ CMessagingManager * rx_dp=CMsgIns::Ins()->getRxDp();
+
+ m_ring_from_rx = rx_dp->getRingCpToDp(thread_id);
+ m_ring_to_rx =rx_dp->getRingDpToCp(thread_id);
+
+ assert(m_ring_from_rx);
+ assert(m_ring_to_rx);
+ return (true);
+}
+
+/* return the client ip , port */
+FORCE_NO_INLINE void CFlowGenListPerThread::handler_defer_job(CGenNode *p){
+ CGenNodeDeferPort * defer=(CGenNodeDeferPort *)p;
+ int i;
+ for (i=0; i<defer->m_cnt; i++) {
+ m_smart_gen.FreePort(defer->m_clients[i],defer->m_ports[i]);
+ }
+}
+
+FORCE_NO_INLINE void CFlowGenListPerThread::handler_defer_job_flush(void){
+ /* flush the pending job of free ports */
+ if (m_tcp_dpc) {
+ handler_defer_job((CGenNode *)m_tcp_dpc);
+ free_node((CGenNode *)m_tcp_dpc);
+ m_tcp_dpc=0;
+ }
+ if (m_udp_dpc) {
+ handler_defer_job((CGenNode *)m_udp_dpc);
+ free_node((CGenNode *)m_udp_dpc);
+ m_udp_dpc=0;
+ }
+}
+
+
+void CFlowGenListPerThread::defer_client_port_free(bool is_tcp,
+ uint32_t c_ip,
+ uint16_t port){
+ /* free is not required in this case */
+ if (!m_smart_gen.IsFreePortRequired() ){
+ return;
+ }
+ CGenNodeDeferPort * defer;
+ if (is_tcp) {
+ if (CGlobalInfo::m_options.m_tcp_aging==0) {
+ m_smart_gen.FreePort(c_ip,port);
+ return;
+ }
+ defer=get_tcp_defer();
+ }else{
+ if (CGlobalInfo::m_options.m_udp_aging==0) {
+ m_smart_gen.FreePort(c_ip,port);
+ return;
+ }
+ defer=get_udp_defer();
+ }
+ if ( defer->add_client(c_ip,port) ){
+ if (is_tcp) {
+ m_node_gen.schedule_node((CGenNode *)defer,CGlobalInfo::m_options.m_tcp_aging);
+ m_tcp_dpc=0;
+ }else{
+ m_node_gen.schedule_node((CGenNode *)defer,CGlobalInfo::m_options.m_udp_aging);
+ m_udp_dpc=0;
+ }
+ }
+}
+
+
+void CFlowGenListPerThread::defer_client_port_free(CGenNode *p){
+ defer_client_port_free(p->m_pkt_info->m_pkt_indication.m_desc.IsTcp(),p->m_src_ip,p->m_src_port);
+}
+
+
+
+/* copy all info from global and div by num of threads */
+void CFlowGenListPerThread::init_from_global(CClientPortion& portion){
+ /* copy generator , it is the same */
+ m_yaml_info =m_flow_list->m_yaml_info;
+
+ /* copy first the flow info */
+ int i;
+ for (i=0; i<(int)m_flow_list->m_cap_gen.size(); i++) {
+ CFlowGeneratorRec * lp=m_flow_list->m_cap_gen[i];
+ CFlowGeneratorRecPerThread * lp_thread=new CFlowGeneratorRecPerThread();
+ /* TBD leak of memory */
+ CFlowYamlInfo * yaml_info =new CFlowYamlInfo();
+
+ yaml_info->m_name = lp->m_info->m_name;
+ yaml_info->m_k_cps = lp->m_info->m_k_cps/(double)m_max_threads;
+ yaml_info->m_ipg_sec = lp->m_info->m_ipg_sec;
+ yaml_info->m_rtt_sec = lp->m_info->m_rtt_sec;
+ yaml_info->m_w = lp->m_info->m_w;
+ yaml_info->m_cap_mode =lp->m_info->m_cap_mode;
+ yaml_info->m_wlength =lp->m_info->m_wlength;
+ yaml_info->m_plugin_id = lp->m_info->m_plugin_id;
+ yaml_info->m_one_app_server = lp->m_info->m_one_app_server;
+ yaml_info->m_server_addr = lp->m_info->m_server_addr;
+ yaml_info->m_dpPkt =lp->m_info->m_dpPkt;
+
+ /* fix this */
+ assert(m_max_threads>0);
+ if ( m_max_threads == 1 ) {
+ /* we have one thread the limit */
+ yaml_info->m_limit = lp->m_info->m_limit;
+ }else{
+ yaml_info->m_limit = lp->m_info->m_limit/m_max_threads;
+ /* thread is zero base */
+ if ( m_thread_id == 0){
+ yaml_info->m_limit += lp->m_info->m_limit % m_max_threads;
+ }
+ if (yaml_info->m_limit==0) {
+ yaml_info->m_limit=1;
+ }
+ }
+
+ yaml_info->m_limit_was_set = lp->m_info->m_limit_was_set;
+ yaml_info->m_flowcnt = 0;
+ yaml_info->m_restart_time = ( yaml_info->m_limit_was_set ) ?
+ (yaml_info->m_limit / (yaml_info->m_k_cps * 1000.0)) : 0;
+
+
+ lp_thread->Create( &m_smart_gen,
+ yaml_info,
+ lp->m_flows_info,
+ &lp->m_flow_info,
+ lp->m_id,
+ m_thread_id);
+
+ m_cap_gen.push_back(lp_thread);
+ }
+}
+
+static void free_map_flow_id_to_node(CGenNode *p){
+ CGlobalInfo::free_node(p);
+}
+
+
+void CFlowGenListPerThread::Delete(){
+
+ // free all current maps
+ m_flow_id_to_node_lookup.remove_all(free_map_flow_id_to_node);
+ // free object
+ m_flow_id_to_node_lookup.Delete();
+
+ m_smart_gen.Delete();
+ m_node_gen.Delete();
+ Clean();
+ m_cpu_cp_u.Delete();
+}
+
+
+
+void CFlowGenListPerThread::Clean(){
+ int i;
+ for (i=0; i<(int)m_cap_gen.size(); i++) {
+ CFlowGeneratorRecPerThread * lp=m_cap_gen[i];
+ lp->Delete();
+ delete lp;
+ }
+ m_cap_gen.clear();
+}
+
+//uint64_t _start_time;
+
+void CNodeGenerator::dump_json(std::string & json){
+
+ json="{\"name\":\"tx-gen\",\"type\":0,\"data\":{";
+ m_realtime_his.dump_json("realtime-hist",json);
+ json+="\"unknown\":0}}" ;
+}
+
+
+int CNodeGenerator::flush_file(dsec_t max_time,
+ dsec_t d_time,
+ bool always,
+ CFlowGenListPerThread * thread,
+ double &old_offset){
+ CGenNode * node;
+ dsec_t flush_time=now_sec();
+ dsec_t offset=0.0;
+ dsec_t n_time;
+ if (always) {
+ offset=old_offset;
+ }
+ uint32_t events=0;
+ bool done=false;
+
+ thread->m_cpu_dp_u.start_work();
+ while (!m_p_queue.empty()) {
+ node = m_p_queue.top();
+ n_time = node->m_time+ offset;
+
+ if (( (n_time) > max_time ) &&
+ (always==false) ) {
+ /* nothing to do */
+ break;
+ }
+ events++;
+/*#ifdef VALG
+ if (events > 1 ) {
+ CALLGRIND_START_INSTRUMENTATION;
+ }
+#endif*/
+
+ if ( likely ( m_is_realtime ) ){
+ dsec_t dt ;
+ thread->m_cpu_dp_u.commit();
+ bool once=false;
+
+ while ( true ) {
+ dt = now_sec() - n_time ;
+
+ if (dt> (-0.00003)) {
+ break;
+ }
+
+ if (!once) {
+ /* check the msg queue once */
+ thread->check_msgs();
+ once=true;
+ }
+
+ rte_pause();
+ }
+ thread->m_cpu_dp_u.start_work();
+
+ /* add offset in case of faliures more than 100usec */
+ if ( unlikely( dt > 0.000100 ) ) {
+ offset += dt;
+ }
+ /* update histogram */
+ if ( unlikely( events % 16 ) ==0 ) {
+ m_realtime_his.Add(dt);
+ }
+ /* flush evey 10 usec */
+ if ( now_sec() - flush_time > 0.00001 ){
+ m_v_if->flush_tx_queue();
+ flush_time=now_sec();
+ }
+ }
+ #ifndef RTE_DPDK
+ thread->check_msgs();
+ #endif
+
+ uint8_t type=node->m_type;
+
+ if ( likely( type == CGenNode::FLOW_PKT ) ) {
+ /* PKT */
+ if ( !(node->is_repeat_flow()) || (always==false)) {
+ flush_one_node_to_file(node);
+ #ifdef _DEBUG
+ update_stats(node);
+ #endif
+ }
+ m_p_queue.pop();
+ if ( node->is_last_in_flow() ) {
+ if ((node->is_repeat_flow()) && (always==false)) {
+ /* Flow is repeated, reschedule it */
+ thread->reschedule_flow( node);
+ }else{
+ /* Flow will not be repeated, so free node */
+ thread->free_last_flow_node( node);
+ }
+ }else{
+ node->update_next_pkt_in_flow();
+ m_p_queue.push(node);
+ }
+ }else{
+ if ((type == CGenNode::FLOW_FIF)) {
+ /* callback to our method */
+ m_p_queue.pop();
+ if ( always == false) {
+ thread->m_cur_time_sec = node->m_time ;
+
+ if ( thread->generate_flows_roundrobin(&done) <0){
+ break;
+ }
+ if (!done) {
+ node->m_time +=d_time;
+ m_p_queue.push(node);
+ }else{
+ thread->free_node(node);
+ }
+ }else{
+ thread->free_node(node);
+ }
+
+ }else{
+ handle_slow_messages(type,node,thread,always);
+ }
+ }
+ }
+
+
+ if (!always) {
+ old_offset =offset;
+ }else{
+ // free the left other
+ thread->handler_defer_job_flush();
+ }
+ return (0);
+}
+
+void CNodeGenerator::handle_slow_messages(uint8_t type,
+ CGenNode * node,
+ CFlowGenListPerThread * thread,
+ bool always){
+
+ if (unlikely (type == CGenNode::FLOW_DEFER_PORT_RELEASE) ) {
+ m_p_queue.pop();
+ thread->handler_defer_job(node);
+ thread->free_node(node);
+ }else{
+ if (type == CGenNode::FLOW_PKT_NAT) {
+ /*repeat and NAT is not supported */
+ if ( node->is_nat_first_state() ){
+ node->set_nat_wait_state();
+ flush_one_node_to_file(node);
+ #ifdef _DEBUG
+ update_stats(node);
+ #endif
+ }else{
+ if ( node->is_nat_wait_state() ) {
+ if (node->is_responder_pkt()) {
+ m_p_queue.pop();
+ /* time out, need to free the flow and remove the association , we didn't get convertion yet*/
+ thread->terminate_nat_flows(node);
+ return;
+
+ }else{
+ flush_one_node_to_file(node);
+ #ifdef _DEBUG
+ update_stats(node);
+ #endif
+ }
+ }else{
+ assert(0);
+ }
+ }
+ m_p_queue.pop();
+ if ( node->is_last_in_flow() ) {
+ thread->free_last_flow_node( node);
+ }else{
+ node->update_next_pkt_in_flow();
+ m_p_queue.push(node);
+ }
+
+ }else{
+ if ( type == CGenNode::FLOW_SYNC ){
+ thread->check_msgs(); /* check messages */
+ m_v_if->flush_tx_queue(); /* flush pkt each timeout */
+ m_p_queue.pop();
+ if ( always == false) {
+ node->m_time += SYNC_TIME_OUT;
+ m_p_queue.push(node);
+ }else{
+ thread->free_node(node);
+ }
+
+ }else{
+ printf(" ERROR type is not valid %d \n",type);
+ assert(0);
+ }
+ }
+ }
+}
+
+
+
+
+void CFlowGenListPerThread::Dump(FILE *fd){
+ fprintf(fd,"yaml info ");
+ m_yaml_info.Dump(fd);
+
+ fprintf(fd,"\n");
+ fprintf(fd,"cap file info");
+ int i;
+ for (i=0; i<(int)m_cap_gen.size(); i++) {
+ CFlowGeneratorRecPerThread * lp=m_cap_gen[i];
+ lp->Dump(stdout);
+ }
+}
+
+
+void CFlowGenListPerThread::DumpStats(FILE *fd){
+ m_stats.dump(fd);
+}
+
+
+void CFlowGenListPerThread::DumpCsv(FILE *fd){
+ CFlowStats::DumpHeader(fd);
+
+ CFlowStats stats;
+ CFlowStats sum;
+ int i;
+
+ for (i=0; i<(int)m_cap_gen.size(); i++) {
+ CFlowGeneratorRecPerThread * lp=m_cap_gen[i];
+ lp->getFlowStats(&stats);
+ stats.Dump(fd);
+ sum.Add(stats);
+ }
+ fprintf(fd,"\n");
+ sum.m_name= "sum";
+ sum.Dump(fd);
+}
+
+
+uint32_t CFlowGenListPerThread::getDualPortId(){
+ return ( ::getDualPortId(m_thread_id) );
+}
+
+double CFlowGenListPerThread::get_longest_flow(){
+ int i;
+ double longest_flow = 0.0;
+ for (i=0;i<(int)m_cap_gen.size(); i++) {
+ CFlowGeneratorRecPerThread * lp=m_cap_gen[i];
+ double tmp_len;
+ tmp_len = lp->m_flow_info->get_cap_file_length_sec();
+ if (longest_flow < tmp_len ) {
+ longest_flow = tmp_len;
+ }
+ }
+ return longest_flow;
+}
+
+double CFlowGenListPerThread::get_total_kcps(){
+ int i;
+ double total=0.0;
+ for (i=0; i<(int)m_cap_gen.size(); i++) {
+ CFlowGeneratorRecPerThread * lp=m_cap_gen[i];
+ total +=lp->m_info->m_k_cps;
+ }
+ return (total);
+}
+
+double CFlowGenListPerThread::get_delta_flow_is_sec(){
+ return (1.0/(1000.0*get_total_kcps()));
+}
+
+
+void CFlowGenListPerThread::inc_current_template(void){
+ m_cur_template++;
+ if (m_cur_template == m_cap_gen.size()) {
+ m_cur_template=0;
+ }
+}
+
+
+int CFlowGenListPerThread::generate_flows_roundrobin(bool *done){
+ // round robin
+
+ CFlowGeneratorRecPerThread * cur;
+ bool found=false;
+ // try current
+ int i;
+ *done = true;
+ for (i=0;i<(int)m_cap_gen.size();i++ ) {
+ cur=m_cap_gen[m_cur_template];
+ if (!(cur->m_info->m_limit_was_set) ||
+ (cur->m_info->m_flowcnt < cur->m_info->m_limit)) {
+ *done = false;
+ if ( cur->m_policer.update(1.0,m_cur_time_sec) ){
+ cur->m_info->m_flowcnt++;
+ found=true;
+ break;
+ }
+ }
+ inc_current_template();
+ }
+
+ if (found) {
+ /* generate the flow into the generator*/
+ CGenNode * node= create_node() ;
+
+ cur->generate_flow(&m_node_gen,m_cur_time_sec,m_cur_flow_id,node);
+ m_cur_flow_id++;
+
+ /* this is estimation */
+ m_stats.m_total_open_flows += cur->m_flow_info->get_total_flows();
+ m_stats.m_total_bytes += cur->m_flow_info->get_total_bytes();
+ m_stats.m_total_pkt += cur->m_flow_info->Size();
+ inc_current_template();
+ }
+ return (0);
+}
+
+
+int CFlowGenListPerThread::reschedule_flow(CGenNode *node){
+
+ // Re-schedule the node
+ node->reset_pkt_in_flow();
+ node->m_time += node->m_template_info->m_restart_time;
+ m_node_gen.add_node(node);
+
+ m_stats.m_total_bytes += node->m_flow_info->get_total_bytes();
+ m_stats.m_total_pkt += node->m_flow_info->Size();
+
+ return (0);
+}
+
+void CFlowGenListPerThread::terminate_nat_flows(CGenNode *p){
+ m_stats.m_nat_flow_timeout++;
+ m_stats.m_nat_lookup_remove_flow_id++;
+ m_flow_id_to_node_lookup.remove_no_lookup(p->get_short_fid());
+ free_last_flow_node( p);
+}
+
+
+void CFlowGenListPerThread::handel_latecy_pkt_msg(CGenNodeLatencyPktInfo * msg){
+ /* send the packet */
+ #ifdef LATENCY_QUEUE_TRACE_
+ printf(" latency msg dir %d\n",msg->m_dir);
+ struct rte_mbuf * m;
+ m=msg->m_pkt;
+ rte_pktmbuf_dump(stdout,m, rte_pktmbuf_pkt_len(m));
+ #endif
+
+ /* update timestamp */
+ struct rte_mbuf * m;
+ m=msg->m_pkt;
+ uint8_t *p=rte_pktmbuf_mtod(m, uint8_t*);
+ latency_header * h=(latency_header *)(p+msg->m_latency_offset);
+ h->time_stamp = os_get_hr_tick_64();
+
+ m_node_gen.m_v_if->send_one_pkt((pkt_dir_t)msg->m_dir,msg->m_pkt);
+}
+
+
+void CFlowGenListPerThread::handel_nat_msg(CGenNodeNatInfo * msg){
+ int i;
+ for (i=0; i<msg->m_cnt; i++) {
+ CNatFlowInfo * nat_msg=&msg->m_data[i];
+ CGenNode * node=m_flow_id_to_node_lookup.lookup(nat_msg->m_fid);
+ if (!node) {
+ /* this should be move to a notification module */
+ #ifdef NAT_TRACE_
+ printf(" ERORR not valid flow_id %d probably flow was aged \n",nat_msg->m_fid);
+ #endif
+ m_stats.m_nat_lookup_no_flow_id++;
+ continue;
+ }
+ #ifdef NAT_TRACE_
+ printf(" %.03f RX :set node %p:%x %x:%x:%x \n",now_sec() ,node,nat_msg->m_fid,nat_msg->m_external_ip,nat_msg->m_external_ip_server,nat_msg->m_external_port);
+ #endif
+ node->set_nat_ipv4_addr(nat_msg->m_external_ip);
+ node->set_nat_ipv4_port(nat_msg->m_external_port);
+ node->set_nat_ipv4_addr_server(nat_msg->m_external_ip_server);
+
+ assert(node->is_nat_wait_state());
+ if ( CGlobalInfo::is_learn_verify_mode() ){
+ if (!node->is_external_is_eq_to_internal_ip() ){
+ m_stats.m_nat_flow_learn_error++;
+ }
+ }
+ node->set_nat_learn_state();
+ /* remove from the hash */
+ m_flow_id_to_node_lookup.remove_no_lookup(nat_msg->m_fid);
+ m_stats.m_nat_lookup_remove_flow_id++;
+
+ }
+}
+
+
+void CFlowGenListPerThread::check_msgs(void){
+ if ( likely ( m_ring_from_rx->isEmpty() ) ){
+ return;
+ }
+ #ifdef NAT_TRACE_
+ printf(" %.03f got message from RX \n",now_sec());
+ #endif
+ while ( true ) {
+ CGenNode * node;
+ if ( m_ring_from_rx->Dequeue(node)!=0 ){
+ break;
+ }
+ assert(node);
+ //printf ( " message: thread %d, node->m_flow_id : %d \n", m_thread_id,node->m_flow_id);
+ /* only one message is supported right now */
+
+ CGenNodeMsgBase * msg=(CGenNodeMsgBase *)node;
+
+ uint8_t msg_type = msg->m_msg_type;
+ switch (msg_type ) {
+ case CGenNodeMsgBase::NAT_FIRST:
+ handel_nat_msg((CGenNodeNatInfo * )msg);
+ break;
+ case CGenNodeMsgBase::LATENCY_PKT:
+ handel_latecy_pkt_msg((CGenNodeLatencyPktInfo *) msg);
+ break;
+ default:
+ printf("ERROR pkt-thread message type is not valid %d \n",msg_type);
+ assert(0);
+ }
+
+ CGlobalInfo::free_node(node);
+ }
+}
+
+
+void CFlowGenListPerThread::generate_erf(std::string erf_file_name,
+ CPreviewMode & preview){
+ /* now we are ready to generate*/
+ if ( m_cap_gen.size()==0 ){
+ fprintf(stderr," nothing to generate no template loaded \n");
+ return;
+ }
+ m_preview_mode = preview;
+ m_node_gen.open_file(erf_file_name,&m_preview_mode);
+ dsec_t d_time_flow=get_delta_flow_is_sec();
+ m_cur_time_sec = 0.01+m_thread_id*m_flow_list->get_delta_flow_is_sec();
+ if ( CGlobalInfo::is_realtime() ){
+ m_cur_time_sec += now_sec() + 0.5 ;
+ }
+ dsec_t c_stop_sec = m_cur_time_sec + m_yaml_info.m_duration_sec;
+ m_stop_time_sec =c_stop_sec;
+ m_cur_flow_id =1;
+ m_cur_template =(m_thread_id % m_cap_gen.size());
+ m_stats.clear();
+
+ fprintf(stdout," Generating erf file ... \n");
+ CGenNode * node= create_node() ;
+ /* add periodic */
+ node->m_type = CGenNode::FLOW_FIF;
+ node->m_time = m_cur_time_sec;
+ m_node_gen.add_node(node);
+
+ double old_offset=0.0;
+
+ node= create_node() ;
+ node->m_type = CGenNode::FLOW_SYNC;
+ node->m_time = m_cur_time_sec + SYNC_TIME_OUT ;
+ m_node_gen.add_node(node);
+
+ #ifdef _DEBUG
+ if ( m_preview_mode.getVMode() >2 ){
+
+ CGenNode::DumpHeader(stdout);
+ }
+ #endif
+
+ m_node_gen.flush_file(c_stop_sec,d_time_flow, false,this,old_offset);
+#ifdef VALG
+ CALLGRIND_STOP_INSTRUMENTATION;
+ printf (" %llu \n",os_get_hr_tick_64()-_start_time);
+#endif
+ if ( !CGlobalInfo::m_options.preview.getNoCleanFlowClose() ){
+ /* clean close */
+ m_node_gen.flush_file(m_cur_time_sec, d_time_flow, true,this,old_offset);
+ }
+
+ if (m_preview_mode.getVMode() > 1 ) {
+ fprintf(stdout,"\n\n");
+ fprintf(stdout,"\n\n");
+ fprintf(stdout,"file stats \n");
+ fprintf(stdout,"=================\n");
+ m_stats.dump(stdout);
+ }
+ m_node_gen.close_file(this);
+}
+
+
+bool CFlowGenList::Create(){
+ check_objects_sizes();
+ CPluginCallback::callback= new CPluginCallbackSimple();
+ return (true);
+}
+
+
+void CFlowGenList::generate_p_thread_info(uint32_t num_threads){
+ clean_p_thread_info();
+ BP_ASSERT(num_threads < 64);
+ int i;
+ for (i=0; i<(int)num_threads; i++) {
+ CFlowGenListPerThread * lp= new CFlowGenListPerThread();
+ lp->Create(i,i,this,num_threads);
+ m_threads_info.push_back(lp);
+ }
+}
+
+
+void CFlowGenList::clean_p_thread_info(void){
+ int i;
+ for (i=0; i<(int)m_threads_info.size(); i++) {
+ CFlowGenListPerThread * lp=m_threads_info[i];
+ lp->Delete();
+ delete lp;
+ }
+ m_threads_info.clear();
+}
+
+
+void CFlowGenList::Delete(){
+ clean_p_thread_info();
+ Clean();
+}
+
+int CFlowGenList::load_from_mac_file(std::string file_name) {
+ if ( !utl_is_file_exists (file_name) ){
+ printf(" ERROR no mac_file is set, file %s does not exist \n",file_name.c_str());
+ exit(-1);
+ }
+ is_mac_info_configured = true;
+
+ try {
+ std::ifstream fin((char *)file_name.c_str());
+ YAML::Parser parser(fin);
+ YAML::Node doc;
+
+ parser.GetNextDocument(doc);
+ doc[0] >> m_mac_info;
+ } catch ( const std::exception& e ) {
+ std::cout << e.what() << "\n";
+ m_mac_info.clear();
+ exit(-1);
+ }
+
+}
+
+
+int CFlowGenList::load_from_yaml(std::string file_name,
+ uint32_t num_threads){
+ is_mac_info_configured = false;
+ uint8_t idx;
+ m_yaml_info.load_from_yaml_file(file_name);
+ if (m_yaml_info.verify_correctness(num_threads) ==false){
+ exit(0);
+ }
+
+ /* move it to global info, better CPU D-cache usage */
+ CGlobalInfo::m_options.preview.set_vlan_mode_enable(m_yaml_info.m_vlan_info.m_enable);
+ CGlobalInfo::m_options.m_vlan_port[0] = m_yaml_info.m_vlan_info.m_vlan_per_port[0];
+ CGlobalInfo::m_options.m_vlan_port[1] = m_yaml_info.m_vlan_info.m_vlan_per_port[1];
+ CGlobalInfo::m_options.preview.set_mac_ip_overide_enable(m_yaml_info.m_mac_replace_by_ip);
+ CGlobalInfo::m_options.m_tcp_aging = m_yaml_info.m_tuple_gen.m_tcp_aging_sec;
+ CGlobalInfo::m_options.m_udp_aging = m_yaml_info.m_tuple_gen.m_udp_aging_sec;
+
+
+ if ( m_yaml_info.m_mac_base.size() != 6 ){
+ printf(" mac addr is not valid \n");
+ exit(0);
+ }
+
+ if (m_yaml_info.m_ipv6_set == true) {
+ // Copy the most significant 96-bits from yaml data
+ for (idx=0; idx<6; idx++){
+ CGlobalInfo::m_options.m_src_ipv6[idx] = m_yaml_info.m_src_ipv6[idx];
+ CGlobalInfo::m_options.m_dst_ipv6[idx] = m_yaml_info.m_dst_ipv6[idx];
+ }
+ }else{
+ // Set the most signifcant 96-bits to zero which represents an
+ // IPv4-compatible IPv6 address
+ for (idx=0; idx<6; idx++){
+ CGlobalInfo::m_options.m_src_ipv6[idx] = 0;
+ CGlobalInfo::m_options.m_dst_ipv6[idx] = 0;
+ }
+ }
+
+ int i=0;
+ Clean();
+ bool all_template_has_one_direction=true;
+ for (i=0; i<(int)m_yaml_info.m_vec.size(); i++) {
+ CFlowGeneratorRec * lp=new CFlowGeneratorRec();
+ if ( lp->Create(&m_yaml_info.m_vec[i],&m_yaml_info,i) == false){
+ fprintf(stdout,"\n ERROR reading YAML template files, please verify that they are valid \n\n");
+ exit(-1);
+ return (-1);
+ }
+ m_cap_gen.push_back(lp);
+
+ if (lp->m_flow_info.GetPacket(0)->m_pkt_indication.m_desc.IsBiDirectionalFlow() ) {
+ all_template_has_one_direction=false;
+ }
+ }
+
+ if ( CGlobalInfo::is_learn_mode() && all_template_has_one_direction ) {
+ fprintf(stdout,"\n Warning --learn mode has nothing to do when all templates are one directional, please remove it \n");
+ }
+ return (0);
+}
+
+
+
+void CFlowGenList::Clean(){
+ int i;
+ for (i=0; i<(int)m_cap_gen.size(); i++) {
+ CFlowGeneratorRec * lp=m_cap_gen[i];
+ lp->Delete();
+ delete lp;
+ }
+ m_cap_gen.clear();
+}
+
+double CFlowGenList::GetCpuUtil(){
+ int i;
+ double c=0.0;
+ for (i=0; i<(int)m_threads_info.size(); i++) {
+ CFlowGenListPerThread * lp=m_threads_info[i];
+ c+=lp->m_cpu_cp_u.GetVal();
+ }
+ return (c/m_threads_info.size());
+}
+
+
+void CFlowGenList::Update(){
+
+ int i;
+ for (i=0; i<(int)m_threads_info.size(); i++) {
+ CFlowGenListPerThread * lp=m_threads_info[i];
+ lp->Update();
+ }
+}
+
+
+
+void CFlowGenList::Dump(FILE *fd){
+ fprintf(fd,"yaml info \n");
+ fprintf(fd,"--------------\n");
+ m_yaml_info.Dump(fd);
+
+ fprintf(fd,"\n");
+ fprintf(fd,"cap file info \n");
+ fprintf(fd,"----------------------\n");
+ int i;
+ for (i=0; i<(int)m_cap_gen.size(); i++) {
+ CFlowGeneratorRec * lp=m_cap_gen[i];
+ lp->Dump(stdout);
+ }
+}
+
+
+void CFlowGenList::DumpPktSize(){
+
+ int i;
+ for (i=0; i<(int)m_cap_gen.size(); i++) {
+ CFlowGeneratorRec * lp=m_cap_gen[i];
+ lp->m_flow_info.dump_pkt_sizes();
+ }
+}
+
+
+void CFlowGenList::DumpCsv(FILE *fd){
+ CFlowStats::DumpHeader(fd);
+
+ CFlowStats stats;
+ CFlowStats sum;
+ int i;
+
+ for (i=0; i<(int)m_cap_gen.size(); i++) {
+ CFlowGeneratorRec * lp=m_cap_gen[i];
+ lp->getFlowStats(&stats);
+ stats.Dump(fd);
+ sum.Add(stats);
+ }
+ fprintf(fd,"\n");
+ sum.m_name= "sum";
+ sum.Dump(fd);
+ sum.m_memory.dump(fd);
+}
+
+
+uint32_t CFlowGenList::get_total_repeat_flows(){
+ uint32_t flows=0;
+ int i;
+ for (i=0; i<(int)m_cap_gen.size(); i++) {
+ CFlowGeneratorRec * lp=m_cap_gen[i];
+ flows+=lp->m_info->m_limit ;
+ }
+ return (flows);
+}
+
+
+double CFlowGenList::get_total_tx_bps(){
+ CFlowStats stats;
+ double total=0.0;
+ int i;
+
+ for (i=0; i<(int)m_cap_gen.size(); i++) {
+ CFlowGeneratorRec * lp=m_cap_gen[i];
+ lp->getFlowStats(&stats);
+ total+=(stats.m_mb_sec);
+ }
+ return (_1Mb_DOUBLE*total);
+}
+
+double CFlowGenList::get_total_pps(){
+
+ CFlowStats stats;
+ double total=0.0;
+ int i;
+
+ for (i=0; i<(int)m_cap_gen.size(); i++) {
+ CFlowGeneratorRec * lp=m_cap_gen[i];
+ lp->getFlowStats(&stats);
+ total+=stats.m_pps;
+ }
+ return (total);
+}
+
+
+double CFlowGenList::get_total_kcps(){
+
+ CFlowStats stats;
+ double total=0.0;
+ int i;
+
+ for (i=0; i<(int)m_cap_gen.size(); i++) {
+ CFlowGeneratorRec * lp=m_cap_gen[i];
+ lp->getFlowStats(&stats);
+ total+= stats.get_normal_cps();
+ }
+ return ((total/1000.0));
+}
+
+double CFlowGenList::get_delta_flow_is_sec(){
+ return (1.0/(1000.0*get_total_kcps()));
+}
+
+
+
+bool CPolicer::update(double dsize,double now_sec){
+ if ( m_last_time ==0.0 ) {
+ /* first time */
+ m_last_time = now_sec;
+ return (true);
+ }
+ if (m_cir == 0.0) {
+ return (false);
+ }
+
+ // check if there is a need to add tokens
+ if(now_sec > m_last_time) {
+ dsec_t dtime=(now_sec - m_last_time);
+ dsec_t dsize =dtime*m_cir;
+ m_level +=dsize;
+ if (m_level > m_bucket_size) {
+ m_level = m_bucket_size;
+ }
+ m_last_time = now_sec;
+ }
+
+ if (m_level > dsize) {
+ m_level -= dsize;
+ return (true);
+ }else{
+ return (false);
+ }
+}
+
+
+float CPPSMeasure::add(uint64_t pkts){
+ if ( false == m_start ){
+ m_start=true;
+ m_last_time_msec = os_get_time_msec() ;
+ m_last_pkts=pkts;
+ return (0.0);
+ }
+
+ uint32_t ctime=os_get_time_msec();
+ if ((ctime - m_last_time_msec) <os_get_time_freq() ) {
+ return (m_last_result);
+ }
+
+ uint32_t dtime_msec = ctime-m_last_time_msec;
+ uint32_t dpkts = (pkts - m_last_pkts);
+
+ m_last_time_msec = ctime;
+ m_last_pkts = pkts;
+
+ m_last_result= 0.5*calc_pps(dtime_msec,dpkts) +0.5*(m_last_result);
+ return ( m_last_result );
+}
+
+
+
+CBwMeasure::CBwMeasure() {
+ reset();
+}
+
+void CBwMeasure::reset(void) {
+ m_start=false;
+ m_last_time_msec=0;
+ m_last_bytes=0;
+ m_last_result=0.0;
+};
+
+double CBwMeasure::calc_MBsec(uint32_t dtime_msec,
+ uint64_t dbytes){
+ double rate=0.000008*( ( (double)dbytes*(double)os_get_time_freq())/((double)dtime_msec) );
+ return (rate);
+}
+
+double CBwMeasure::add(uint64_t size) {
+ if ( false == m_start ){
+ m_start=true;
+ m_last_time_msec = os_get_time_msec() ;
+ m_last_bytes=size;
+ return (0.0);
+ }
+
+ uint32_t ctime=os_get_time_msec();
+ if ((ctime - m_last_time_msec) <os_get_time_freq() ) {
+ return (m_last_result);
+ }
+
+ uint32_t dtime_msec = ctime-m_last_time_msec;
+ uint64_t dbytes = size - m_last_bytes;
+
+ m_last_time_msec = ctime;
+ m_last_bytes = size;
+
+ m_last_result= 0.5*calc_MBsec(dtime_msec,dbytes) +0.5*(m_last_result);
+ return ( m_last_result );
+}
+
+
+
+
+
+
+void CParserOption::dump(FILE *fd){
+ preview.Dump(fd);
+ fprintf(fd," cfg file : %s \n",cfg_file.c_str());
+ fprintf(fd," mac file : %s \n",mac_file.c_str());
+ fprintf(fd," out file : %s \n",out_file.c_str());
+ fprintf(fd," duration : %.0f \n",m_duration);
+ fprintf(fd," factor : %.0f \n",m_factor);
+ fprintf(fd," latency : %d pkt/sec \n",m_latency_rate);
+ fprintf(fd," zmq_port : %d \n",m_zmq_port);
+ fprintf(fd," telnet_port : %d \n",m_telnet_port);
+ fprintf(fd," expected_ports : %d \n",m_expected_portd);
+ if (preview.get_vlan_mode_enable() ) {
+ fprintf(fd," vlans : [%d,%d] \n",m_vlan_port[0],m_vlan_port[1]);
+ }
+ fprintf(fd," mac spreading: %d \n",(int)m_mac_splitter);
+
+
+ int i;
+ for (i=0; i<MAX_LATENCY_PORTS; i++) {
+ fprintf(fd," port : %d dst:",i);
+ CMacAddrCfg * lp=&m_mac_addr[i];
+ dump_mac_addr(fd,lp->u.m_mac.dest);
+ fprintf(fd," src:");
+ dump_mac_addr(fd,lp->u.m_mac.src);
+ fprintf(fd,"\n");
+ }
+}
+
+#if 0
+
+void CTupleGlobalGenerator::Dump(FILE *fd){
+ fprintf(fd," src:%x dest: %x \n",m_result_src_ip,m_result_dest_ip);
+}
+
+bool CTupleGlobalGenerator::Create(){
+ was_generated=false;
+ return (true);
+}
+
+
+void CTupleGlobalGenerator::Copy(CTupleGlobalGenerator * gen){
+ was_generated=false;
+ m_min_src_ip = gen->m_min_src_ip;
+ m_max_src_ip = gen->m_max_src_ip;
+ m_min_dest_ip = gen->m_min_dest_ip;
+ m_max_dest_ip = gen->m_max_dest_ip;
+}
+
+
+void CTupleGlobalGenerator::Delete(){
+ was_generated=false;
+}
+
+#endif
+
+static uint32_t get_rand_32(uint32_t MinimumRange ,
+ uint32_t MaximumRange );
+
+
+#if 0
+void CTupleGlobalGenerator::Generate(uint32_t thread_id,
+ uint32_t num_addr ){
+ if ( was_generated == false) {
+ /* first time */
+ was_generated = true;
+ cur_src_ip = m_min_src_ip;
+ cur_dst_ip = m_min_dest_ip;
+ }
+
+ if ( ( cur_src_ip + num_addr ) > m_max_src_ip ) {
+ cur_src_ip = m_min_src_ip;
+ }
+
+ /* copy the results */
+ m_result_src_ip = cur_src_ip;
+ m_result_dest_ip = cur_dst_ip;
+ cur_src_ip += num_addr;
+ cur_dst_ip += 1;
+ if (cur_dst_ip > m_max_dest_ip ) {
+ cur_dst_ip = m_min_dest_ip;
+ }
+}
+
+
+
+
+void CTupleTemplateGenerator::Dump(FILE *fd){
+ fprintf(fd," id: %x, %x:%x - %x \n",m_id,m_result_src_ip,m_result_dest_ip,m_result_src_port);
+}
+
+
+bool CTupleTemplateGenerator::Create(CTupleGlobalGenerator * global_gen,
+ uint16_t w,
+ uint16_t wlength,
+ uint32_t _id,
+ uint32_t thread_id){
+ m_was_generated = false;
+ m_thread_id = thread_id;
+ m_lp_global_gen = global_gen;
+ BP_ASSERT(m_lp_global_gen);
+ m_cur_src_port = 1;
+ m_cur_src_port_cnt=0;
+
+ m_w = w;
+ m_wlength = wlength;
+
+ m_id = _id;
+ m_was_init=true;
+ return(true);
+}
+
+void CTupleTemplateGenerator::Delete(){
+ m_was_generated = false;
+ m_was_init=false;
+}
+
+void CTupleTemplateGenerator::Generate_src_dest(){
+ /* TBD need to fix the 100*/
+ m_lp_global_gen->Generate(m_thread_id,m_wlength);
+ m_result_src_ip = m_lp_global_gen->m_result_src_ip;
+
+ m_dest_ip = m_lp_global_gen->m_result_dest_ip;
+ m_result_dest_ip = update_dest_ip(m_dest_ip );
+ m_cnt=0;
+}
+
+uint16_t CTupleTemplateGenerator::GenerateOneSourcePort(){
+ /* handle port */
+ m_cur_src_port++;
+ /* do not use port zero */
+ if (m_cur_src_port == 0) {
+ m_cur_src_port=1;
+ }
+ m_result_src_port=m_cur_src_port;
+ return (m_cur_src_port);
+}
+
+void CTupleTemplateGenerator::Generate(){
+ BP_ASSERT(m_was_init);
+ if ( m_was_generated == false ) {
+ /* first time */
+ Generate_src_dest();
+ m_was_generated = true;
+ }else{
+ /* ip+cnt,dest+cnt*/
+ m_cnt++;
+ if ( m_cnt >= m_wlength ) {
+ m_cnt =0;
+ m_result_src_ip -=m_wlength;
+ m_result_dest_ip = m_dest_ip;
+ m_cur_src_port_cnt++;
+ if (m_cur_src_port_cnt >= m_w ) {
+ Generate_src_dest();
+ m_cur_src_port_cnt=0;
+ }
+ }
+ m_result_src_ip += 1;
+ m_result_dest_ip = update_dest_ip(m_dest_ip +m_cnt );
+ }
+
+
+ /* handle port */
+ m_cur_src_port++;
+ /* do not use port zero */
+ if (m_cur_src_port == 0) {
+ m_cur_src_port=1;
+ }
+ m_result_src_ip =update_src_ip( m_result_src_ip );
+ m_result_src_port=m_cur_src_port;
+}
+
+#endif
+
+
+static uint32_t get_rand_32(uint32_t MinimumRange ,
+ uint32_t MaximumRange ){
+ enum {RANDS_NUM = 2 , RAND_MAX_BITS = 0xf , UNSIGNED_INT_BITS = 0x20 , TWO_BITS_MASK = 0x3};
+
+ const double TWO_POWER_32_BITS = 0x10000000 * (double)0x10;
+
+ uint32_t RandomNumber = 0;
+ for (int i = 0 ; i < RANDS_NUM;i++) {
+ RandomNumber = (RandomNumber<<RAND_MAX_BITS) + rand();
+ }
+
+ RandomNumber = (RandomNumber<<(UNSIGNED_INT_BITS - RAND_MAX_BITS * RANDS_NUM)) + (rand() | TWO_BITS_MASK);
+
+ uint32_t Range;
+ if ((Range = MaximumRange - MinimumRange) == 0xffffffff) {
+ return RandomNumber;
+ }
+ return (uint32_t)(((Range + 1) / TWO_POWER_32_BITS * RandomNumber) + MinimumRange );
+}
+
+
+
+int CNullIF::send_node(CGenNode * node){
+ #if 0
+ CFlowPktInfo * lp=node->m_pkt_info;
+ rte_mbuf_t * buf=lp->generate_new_mbuf(node);
+ //rte_pktmbuf_dump(buf, buf->pkt_len);
+ //sending it ??
+ // free it here as if driver does
+ rte_pktmbuf_free(buf);
+ #endif
+ return (0);
+}
+
+
+int CErfIF::send_node(CGenNode * node){
+ if ( m_preview_mode->getFileWrite() ){
+
+ CFlowPktInfo * lp=node->m_pkt_info;
+ rte_mbuf_t * m=lp->generate_new_mbuf(node);
+ fill_pkt(m_raw,m);
+ CPktNsecTimeStamp t_c(node->m_time);
+ m_raw->time_nsec = t_c.m_time_nsec;
+ m_raw->time_sec = t_c.m_time_sec;
+
+ pkt_dir_t dir=node->cur_interface_dir();
+ uint8_t p_id = (uint8_t)dir;
+
+ m_raw->setInterface(p_id);
+
+ /* update mac addr dest/src 12 bytes */
+ uint8_t *p=(uint8_t *)m_raw->raw;
+ memcpy(p,CGlobalInfo::m_options.get_dst_src_mac_addr(p_id),12);
+
+ /* If vlan is enabled, add vlan header */
+ if ( unlikely( CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) ){
+ /* retrieve vlan ID and form vlan tag */
+ uint8_t vlan_port = (node->m_src_ip &1);
+ uint16_t vlan_protocol = EthernetHeader::Protocol::VLAN;
+ uint16_t vlan_id = CGlobalInfo::m_options.m_vlan_port[vlan_port];
+ uint32_t vlan_tag = (vlan_protocol << 16) | vlan_id;
+ vlan_tag = PKT_HTONL(vlan_tag);
+
+ /* insert vlan tag and adjust packet size */
+ memcpy(cbuff+4, p+12, m_raw->pkt_len-12);
+ memcpy(cbuff, &vlan_tag, 4);
+ memcpy(p+12, cbuff, m_raw->pkt_len-8);
+ m_raw->pkt_len += 4;
+ }
+
+ //utl_DumpBuffer(stdout,p, 12,0);
+
+ BP_ASSERT(m_writer);
+
+ bool res=m_writer->write_packet(m_raw);
+
+ //utl_DumpBuffer(stdout,m_raw->raw,m_raw->pkt_len,0);
+
+ BP_ASSERT(res);
+ rte_pktmbuf_free(m);
+ }
+ return (0);
+}
+
+
+int CErfIF::flush_tx_queue(void){
+ return (0);
+}
+
+
+
+const uint8_t sctp_pkt[]={
+
+ 0x00,0x04,0x96,0x08,0xe0,0x40,
+ 0x00,0x0e,0x2e,0x24,0x37,0x5f,
+ 0x08,0x00,
+
+ 0x45,0x02,0x00,0x30,
+ 0x00,0x00,0x40,0x00,
+ 0x40,0x84,0xbd,0x04,
+ 0x9b,0xe6,0x18,0x9b, //sIP
+ 0xcb,0xff,0xfc,0xc2, //DIP
+
+ 0x80,0x44,//SPORT
+ 0x00,0x50,//DPORT
+
+ 0x00,0x00,0x00,0x00, //checksum
+
+ 0x11,0x22,0x33,0x44, // magic
+ 0x00,0x00,0x00,0x00, //64 bit counter
+ 0x00,0x00,0x00,0x00,
+ 0x00,0x01,0xa0,0x00, //seq
+ 0x00,0x00,0x00,0x00,
+
+};
+
+// 20+8+20`
+
+void CLatencyPktInfo::Create(){
+ m_packet = new CCapPktRaw( sizeof(sctp_pkt) );
+ m_packet->pkt_cnt=0;
+ m_packet->time_sec=0;
+ m_packet->time_nsec=0;
+ memcpy(m_packet->raw,sctp_pkt,sizeof(sctp_pkt));
+ m_packet->pkt_len=sizeof(sctp_pkt);
+
+ m_pkt_indication.m_packet =m_packet;
+
+ m_pkt_indication.m_ether = (EthernetHeader *)m_packet->raw;
+ m_pkt_indication.l3.m_ipv4=(IPHeader *)(m_packet->raw+14);
+ m_pkt_indication.m_is_ipv6 = false;
+ m_pkt_indication.l4.m_udp=(UDPHeader *)m_packet->raw+14+20;
+ m_pkt_indication.m_payload=(uint8_t *)m_packet->raw+14+20+16;
+ m_pkt_indication.m_payload_len=0;
+ m_pkt_indication.m_packet_padding=4;
+
+
+ m_pkt_indication.m_ether_offset =0;
+ m_pkt_indication.m_ip_offset =14;
+ m_pkt_indication.m_udp_tcp_offset = 34;
+ m_pkt_indication.m_payload_offset = 34+8;
+
+ CPacketDescriptor * lpd=&m_pkt_indication.m_desc;
+ lpd->Clear();
+ lpd->SetInitSide(true);
+ lpd->SetSwapTuple(false);
+ lpd->SetIsValidPkt(true);
+ lpd->SetIsUdp(true);
+ lpd->SetIsLastPkt(true);
+ m_pkt_info.Create(&m_pkt_indication);
+
+ memset(&m_dummy_node,0,sizeof(m_dummy_node));
+
+ m_dummy_node.set_socket_id( CGlobalInfo::m_socket.port_to_socket(0) );
+
+ m_dummy_node.m_time =0.1;
+ m_dummy_node.m_pkt_info = &m_pkt_info;
+ m_dummy_node.m_dest_ip = 0;
+ m_dummy_node.m_src_ip = 0;
+ m_dummy_node.m_src_port = 0x11;
+ m_dummy_node.m_flow_id =0;
+ m_dummy_node.m_flags =CGenNode::NODE_FLAGS_LATENCY;
+
+}
+
+
+rte_mbuf_t * CLatencyPktInfo::generate_pkt(int port_id,uint32_t extern_ip){
+
+ bool is_client_to_serever=(port_id%2==0)?true:false;
+
+ int dual_port_index=(port_id>>1);
+ uint32_t c=m_client_ip.v4;
+ uint32_t s=m_server_ip.v4;
+ if ( extern_ip ){
+ c=extern_ip;
+ }
+
+ if (!is_client_to_serever) {
+ /*swap */
+ uint32_t t=c;
+ c=s;
+ s=t;
+ }
+ uint32_t mask=dual_port_index*m_dual_port_mask;
+ if ( extern_ip==0 ){
+ c+=mask;
+ }
+ s+=mask;
+ m_dummy_node.m_src_ip = c;
+ m_dummy_node.m_dest_ip = s;
+
+ rte_mbuf_t * m=m_pkt_info.generate_new_mbuf(&m_dummy_node);
+ return (m);
+
+
+}
+
+
+void CLatencyPktInfo::set_ip(uint32_t src,
+ uint32_t dst,
+ uint32_t dual_port_mask){
+
+ m_client_ip.v4=src;
+ m_server_ip.v4=dst;
+ m_dual_port_mask=dual_port_mask;
+
+}
+
+
+void CLatencyPktInfo::Delete(){
+ m_pkt_info.Delete();
+ delete m_packet;
+}
+
+void CCPortLatency::reset(){
+ m_rx_seq =m_tx_seq;
+ m_pad = 0;
+
+ m_tx_pkt_err=0;
+ m_tx_pkt_ok =0;
+ m_pkt_ok=0;
+ m_no_magic=0;
+ m_unsup_prot=0;
+ m_no_id=0;
+ m_seq_error=0;
+ m_length_error=0;
+ m_no_ipv4_option=0;
+ m_hist.Reset();
+}
+
+
+static uint8_t nat_is_port_can_send(uint8_t port_id){
+ uint8_t offset= ((port_id>>1)<<1);
+ uint8_t client_index = (port_id %2);
+ return (client_index ==0 ?1:0);
+}
+
+
+bool CCPortLatency::Create(CLatencyManager * parent,
+ uint8_t id,
+ uint16_t offset,
+ uint16_t pkt_size,
+ CCPortLatency * rx_port){
+ m_parent = parent;
+ m_id = id;
+ m_tx_seq =0x12345678;
+ m_offset = offset;
+ m_pkt_size = pkt_size;
+ m_rx_port = rx_port;
+ m_nat_can_send = nat_is_port_can_send(m_id);
+ m_nat_learn = m_nat_can_send;
+ m_nat_external_ip=0;
+
+ m_hist.Create();
+ reset();
+ return (true);
+}
+
+void CCPortLatency::Delete(){
+ m_hist.Delete();
+}
+
+void CCPortLatency::update_packet(rte_mbuf_t * m){
+ uint8_t *p=rte_pktmbuf_mtod(m, uint8_t*);
+ /* update mac addr dest/src 12 bytes */
+ memcpy(p,CGlobalInfo::m_options.get_dst_src_mac_addr(m_id),12);
+
+ latency_header * h=(latency_header *)(p+m_offset);
+ h->magic = LATENCY_MAGIC | m_id ;
+ h->time_stamp = os_get_hr_tick_64();
+ h->seq = m_tx_seq;
+ m_tx_seq++;
+}
+
+
+void CCPortLatency::DumpShortHeader(FILE *fd){
+
+
+ fprintf(fd," if| tx_ok , rx_ok , rx ,error, average , max , Jitter , max window \n");
+ fprintf(fd," | , , check, , latency(usec),latency (usec) ,(usec) , \n");
+ fprintf(fd," ---------------------------------------------------------------------------------------------------------------- \n");
+}
+
+
+
+std::string CCPortLatency::get_field(std::string name,float f){
+ char buff[200];
+ sprintf(buff,"\"%s-%d\":%.1f,",name.c_str(),m_id,f);
+ return (std::string(buff));
+}
+
+
+void CCPortLatency::dump_json_v2(std::string & json ){
+ char buff[200];
+ sprintf(buff,"\"port-%d\": {",m_id);
+ json+=std::string(buff);
+ m_hist.dump_json("hist",json);
+ dump_counters_json(json);
+ json+="},";
+}
+
+void CCPortLatency::dump_json(std::string & json ){
+ json += get_field("avg",m_hist.get_average_latency() );
+ json += get_field("max",m_hist.get_max_latency() );
+ json += get_field("c-max",m_hist.get_max_latency_last_update() );
+ json += get_field("error",(float)(m_unsup_prot+m_no_magic+m_no_id+m_seq_error+m_length_error) );
+ json += get_field("jitter",(float)get_jitter_usec() );
+}
+
+
+void CCPortLatency::DumpShort(FILE *fd){
+
+ m_hist.update();
+ fprintf(fd,"%8lu,%8lu,%10lu,%4lu,",
+ m_tx_pkt_ok,
+ m_pkt_ok,
+ m_rx_check,
+ m_unsup_prot+m_no_magic+m_no_id+m_seq_error+m_length_error+m_no_ipv4_option+m_tx_pkt_err
+ );
+
+ fprintf(fd," %8.0f ,%8.0f,%8d ",
+ m_hist.get_average_latency(),
+ m_hist.get_max_latency(),
+ get_jitter_usec()
+ );
+ fprintf(fd," | ");
+ m_hist.DumpWinMax(fd);
+
+}
+
+#define DPL_J(f) json+=add_json(#f,f);
+#define DPL_J_LAST(f) json+=add_json(#f,f,true);
+
+void CCPortLatency::dump_counters_json(std::string & json ){
+
+ json+="\"stats\" : {";
+ DPL_J(m_tx_pkt_ok);
+ DPL_J(m_tx_pkt_err);
+ DPL_J(m_pkt_ok);
+ DPL_J(m_unsup_prot);
+ DPL_J(m_no_magic);
+ DPL_J(m_no_id);
+ DPL_J(m_seq_error);
+ DPL_J(m_length_error);
+ DPL_J(m_no_ipv4_option);
+ json+=add_json("m_jitter",get_jitter_usec());
+ /* must be last */
+ DPL_J_LAST(m_rx_check);
+ json+="}";
+
+
+}
+
+void CCPortLatency::DumpCounters(FILE *fd){
+ #define DP_A1(f) if (f) fprintf(fd," %-40s : %llu \n",#f,f)
+
+ fprintf(fd," counter \n");
+ fprintf(fd," -----------\n");
+
+ DP_A1(m_tx_pkt_err);
+ DP_A1(m_tx_pkt_ok);
+ DP_A1(m_pkt_ok);
+ DP_A1(m_unsup_prot);
+ DP_A1(m_no_magic);
+ DP_A1(m_no_id);
+ DP_A1(m_seq_error);
+ DP_A1(m_length_error);
+ DP_A1(m_rx_check);
+ DP_A1(m_no_ipv4_option);
+
+
+ fprintf(fd," -----------\n");
+ m_hist.Dump(fd);
+ fprintf(fd," %-40s : %llu \n","jitter",get_jitter_usec());
+}
+
+bool CCPortLatency::dump_packet(rte_mbuf_t * m){
+ fprintf(stdout," %f.03 dump packet ..\n",now_sec());
+ uint8_t *p=rte_pktmbuf_mtod(m, uint8_t*);
+ uint16_t pkt_size=rte_pktmbuf_pkt_len(m);
+ utl_DumpBuffer(stdout,p,pkt_size,0);
+ return (0);
+
+
+
+ if (pkt_size < ( sizeof(CRx_check_header)+14+20) ) {
+ assert(0);
+ }
+ CRx_check_header * lp=(CRx_check_header *)(p+pkt_size-sizeof(CRx_check_header));
+
+ lp->dump(stdout);
+
+
+ uint16_t vlan_offset=0;
+ if ( unlikely( CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) ){
+ vlan_offset=4;
+ }
+// utl_DumpBuffer(stdout,p,pkt_size,0);
+ return (0);
+
+}
+
+bool CCPortLatency::check_rx_check(rte_mbuf_t * m){
+ m_rx_check++;
+ return (true);
+}
+
+bool CCPortLatency::do_learn(uint32_t external_ip){
+ m_nat_learn=true;
+ m_nat_can_send=true;
+ m_nat_external_ip=external_ip;
+ return (true);
+}
+
+bool CCPortLatency::check_packet(rte_mbuf_t * m,CRx_check_header * & rx_p){
+
+ CSimplePacketParser parser(m);
+ if ( !parser.Parse() ){
+ m_unsup_prot++; // Unsupported protocol
+ return (false);
+ }
+
+ uint16_t pkt_size=rte_pktmbuf_pkt_len(m);
+ /* check if CRC was extracted */
+ if ( parser.getPktSize() == pkt_size-4) {
+ // CRC was not extracted by driver (VM E1000 driver issue) extract it
+ pkt_size=pkt_size-4;
+ }
+
+ uint16_t vlan_offset=parser.m_vlan_offset;
+ uint8_t *p=rte_pktmbuf_mtod(m, uint8_t*);
+
+ rx_p=(CRx_check_header *)0;
+ bool managed_by_ip_options=false;
+ bool is_rx_check=true;
+
+ if ( !parser.IsLatencyPkt() ){
+
+ #ifdef NAT_TRACE_
+ printf(" %.3f RX : got packet !!! \n",now_sec() );
+ #endif
+
+ /* ipv6+rx-check */
+ if ( parser.m_ipv6 ) {
+ /* if we have ipv6 packet */
+ if (parser.m_protocol == RX_CHECK_V6_OPT_TYPE) {
+ if ( get_is_rx_check_mode() ){
+ m_rx_check++;
+ rx_p=(CRx_check_header *)((uint8_t*)parser.m_ipv6 +IPv6Header::DefaultSize);
+ return (true);
+ }
+
+ }
+ m_seq_error++;
+ return (false);
+ }
+
+ uint8_t opt_len = parser.m_ipv4->getOptionLen();
+ uint8_t *opt_ptr = parser.m_ipv4->getOption();
+ /* Process IP option header(s) */
+ while ( opt_len != 0 ) {
+ switch (*opt_ptr) {
+ case RX_CHECK_V4_OPT_TYPE:
+ /* rx-check option header */
+ if ( ( !get_is_rx_check_mode() ) ||
+ (opt_len < RX_CHECK_LEN) ) {
+ m_seq_error++;
+ return (false);
+ }
+ m_rx_check++;
+ rx_p=(CRx_check_header *)opt_ptr;
+ opt_len -= RX_CHECK_LEN;
+ opt_ptr += RX_CHECK_LEN;
+ break;
+ case CNatOption::noIPV4_OPTION:
+ /* NAT learn option header */
+ CNatOption *lp;
+ if ( ( !CGlobalInfo::is_learn_mode() ) ||
+ (opt_len < CNatOption::noOPTION_LEN) ) {
+ m_seq_error++;
+ return (false);
+ }
+ lp = (CNatOption *)opt_ptr;
+ if ( !lp->is_valid_ipv4_magic() ) {
+ m_no_ipv4_option++;
+ return (false);
+ }
+ m_parent->get_nat_manager()->handle_packet_ipv4(lp,parser.m_ipv4);
+ opt_len -= CNatOption::noOPTION_LEN;
+ opt_ptr += CNatOption::noOPTION_LEN;
+ break;
+ default:
+ m_seq_error++;
+ return (false);
+ } // End of switch
+ } // End of while
+
+ return (true);
+ } // End of check for non-latency packet
+
+ if ( CGlobalInfo::is_learn_mode() && (m_nat_learn ==false) ) {
+ do_learn(parser.m_ipv4->getSourceIp());
+ }
+
+ if ( (pkt_size-vlan_offset) != m_pkt_size ) {
+ m_length_error++;
+ return (false);
+ }
+
+ latency_header * h=(latency_header *)(p+m_offset+vlan_offset);
+
+ if ( (h->magic & 0xffffff00) != LATENCY_MAGIC ){
+ m_no_magic++;
+ return (false);
+ }
+
+ if ( h->seq != m_rx_seq ){
+ m_seq_error++;
+ m_rx_seq =h->seq +1;
+ return (false);
+ }else{
+ m_rx_seq++;
+ }
+ m_pkt_ok++;
+ uint64_t d = (os_get_hr_tick_64() - h->time_stamp );
+ dsec_t ctime=ptime_convert_hr_dsec(d);
+ m_hist.Add(ctime);
+ m_jitter.calc(ctime);
+ return (true);
+}
+
+void CLatencyManager::Delete(){
+ m_pkt_gen.Delete();
+
+ if ( get_is_rx_check_mode() ) {
+ m_rx_check_manager.Delete();
+ }
+ if ( CGlobalInfo::is_learn_mode() ){
+ m_nat_check_manager.Delete();
+ }
+ m_cpu_cp_u.Delete();
+}
+
+/* 0->1
+ 1->0
+ 2->3
+ 3->2
+*/
+static uint8_t swap_port(uint8_t port_id){
+ uint8_t offset= ((port_id>>1)<<1);
+ uint8_t client_index = (port_id %2);
+ return (offset+client_index^1);
+}
+
+
+
+bool CLatencyManager::Create(CLatencyManagerCfg * cfg){
+ m_max_ports=cfg->m_max_ports;
+ assert (m_max_ports<=MAX_LATENCY_PORTS);
+ assert ((m_max_ports%2)==0);
+ m_port_mask =0xffffffff;
+ m_do_stop =false;
+ m_is_active =false;
+ m_pkt_gen.Create();
+ int i;
+ for (i=0; i<m_max_ports; i++) {
+ CLatencyManagerPerPort * lp=&m_ports[i];
+ CCPortLatency * lpo=&m_ports[swap_port(i)].m_port;
+
+ lp->m_io=cfg->m_ports[i];
+ lp->m_port.Create(this,
+ i,
+ m_pkt_gen.get_payload_offset(),
+ m_pkt_gen.get_pkt_size(),lpo );
+ }
+ m_cps= cfg->m_cps;
+ m_d_time =ptime_convert_dsec_hr((1.0/m_cps));
+ m_delta_sec =(1.0/m_cps);
+
+
+ if ( get_is_rx_check_mode() ) {
+ assert(m_rx_check_manager.Create());
+ m_rx_check_manager.m_cur_time= now_sec();
+ }
+
+
+ m_pkt_gen.set_ip(cfg->m_client_ip.v4,cfg->m_server_ip.v4,cfg->m_dual_port_mask);
+ m_cpu_cp_u.Create(&m_cpu_dp_u);
+ if ( CGlobalInfo::is_learn_mode() ){
+ m_nat_check_manager.Create();
+ }
+ return (true);
+}
+
+
+void CLatencyManager::send_pkt_all_ports(){
+ m_start_time = os_get_hr_tick_64();
+ int i;
+ for (i=0; i<m_max_ports; i++) {
+ if ( m_port_mask & (1<<i) ){
+ CLatencyManagerPerPort * lp=&m_ports[i];
+ if (lp->m_port.can_send_packet() ){
+ rte_mbuf_t * m=m_pkt_gen.generate_pkt(i,lp->m_port.external_nat_ip());
+ lp->m_port.update_packet(m);
+ if ( lp->m_io->tx(m) == 0 ){
+ lp->m_port.m_tx_pkt_ok++;
+ }else{
+ lp->m_port.m_tx_pkt_err++;
+ }
+
+ }
+ }
+ }
+}
+
+
+void CLatencyManager::wait_for_rx_dump(){
+ rte_mbuf_t * rx_pkts[64];
+ int i;
+ while ( true ) {
+ rte_pause();
+ rte_pause();
+ rte_pause();
+ for (i=0; i<m_max_ports; i++) {
+ CLatencyManagerPerPort * lp=&m_ports[i];
+ rte_mbuf_t * m;
+ uint16_t cnt_p = lp->m_io->rx_burst(rx_pkts, 64);
+ if (cnt_p) {
+ int j;
+ for (j=0; j<cnt_p; j++) {
+ m=rx_pkts[j] ;
+ lp->m_port.dump_packet( m);
+ rte_pktmbuf_free(m);
+ }
+ } /*cnt_p*/
+ }/* for*/
+ }
+}
+
+
+void CLatencyManager::handle_rx_pkt(CLatencyManagerPerPort * lp,
+ rte_mbuf_t * m){
+ CRx_check_header *rxc;
+ lp->m_port.check_packet(m,rxc);
+ if ( unlikely(rxc!=NULL) ){
+ m_rx_check_manager.handle_packet(rxc);
+ }
+ rte_pktmbuf_free(m);
+}
+
+void CLatencyManager::handle_latecy_pkt_msg(uint8_t thread_id,
+ CGenNodeLatencyPktInfo * msg){
+
+ assert(msg->m_latency_offset==0xdead);
+
+ uint8_t rx_port_index=(thread_id<<1)+(msg->m_dir&1);
+ assert( rx_port_index <m_max_ports ) ;
+ CLatencyManagerPerPort * lp=&m_ports[rx_port_index];
+ handle_rx_pkt(lp,(rte_mbuf_t *)msg->m_pkt);
+}
+
+
+void CLatencyManager::run_rx_queue_msgs(uint8_t thread_id,
+ CNodeRing * r){
+
+ while ( true ) {
+ CGenNode * node;
+ if ( r->Dequeue(node)!=0 ){
+ break;
+ }
+ assert(node);
+
+ CGenNodeMsgBase * msg=(CGenNodeMsgBase *)node;
+
+ CGenNodeLatencyPktInfo * msg1=(CGenNodeLatencyPktInfo *)msg;
+
+ uint8_t msg_type = msg->m_msg_type;
+ switch (msg_type ) {
+ case CGenNodeMsgBase::LATENCY_PKT:
+ handle_latecy_pkt_msg(thread_id,(CGenNodeLatencyPktInfo *) msg);
+ break;
+ default:
+ printf("ERROR latency-thread message type is not valid %d \n",msg_type);
+ assert(0);
+ }
+
+ CGlobalInfo::free_node(node);
+ }
+}
+
+void CLatencyManager::try_rx_queues(){
+
+ CMessagingManager * rx_dp = CMsgIns::Ins()->getRxDp();
+ uint8_t threads=CMsgIns::Ins()->get_num_threads();
+ int ti;
+ for (ti=0; ti<(int)threads; ti++) {
+ CNodeRing * r = rx_dp->getRingDpToCp(ti);
+ if ( !r->isEmpty() ){
+ run_rx_queue_msgs((uint8_t)ti,r);
+ }
+ }
+}
+
+
+void CLatencyManager::try_rx(){
+ rte_mbuf_t * rx_pkts[64];
+ int i;
+ for (i=0; i<m_max_ports; i++) {
+ CLatencyManagerPerPort * lp=&m_ports[i];
+ rte_mbuf_t * m;
+ m_cpu_dp_u.start_work();
+ /* try to read 64 packets clean up the queue */
+ uint16_t cnt_p = lp->m_io->rx_burst(rx_pkts, 64);
+ if (cnt_p) {
+ int j;
+ for (j=0; j<cnt_p; j++) {
+ m=rx_pkts[j] ;
+ handle_rx_pkt(lp,m);
+ }
+ /* commit only if there was work to do ! */
+ m_cpu_dp_u.commit();
+ }/* if work */
+ }// all ports
+}
+
+
+void CLatencyManager::reset(){
+
+ int i;
+ for (i=0; i<m_max_ports; i++) {
+ CLatencyManagerPerPort * lp=&m_ports[i];
+ lp->m_port.reset();
+ }
+
+}
+
+void CLatencyManager::start(int iter){
+ m_do_stop =false;
+ m_is_active =false;
+ int cnt=0;
+
+ double n_time;
+ CGenNode * node = new CGenNode();
+ node->m_type = CGenNode::FLOW_SYNC; /* general stuff */
+ node->m_time = now_sec()+0.007;
+ m_p_queue.push(node);
+
+ node = new CGenNode();
+ node->m_type = CGenNode::FLOW_PKT; /* latency */
+ node->m_time = now_sec(); /* 1/cps rate */
+ m_p_queue.push(node);
+ bool do_try_rx_queue =CGlobalInfo::m_options.preview.get_vm_one_queue_enable()?true:false;
+
+
+ while ( !m_p_queue.empty() ) {
+ node = m_p_queue.top();
+ n_time = node->m_time;
+
+ /* wait for event */
+ while ( true ) {
+ double dt = now_sec() - n_time ;
+ if (dt> (0.0)) {
+ break;
+ }
+ if (do_try_rx_queue){
+ try_rx_queues();
+ }
+ try_rx();
+ rte_pause();
+ }
+
+ switch (node->m_type) {
+ case CGenNode::FLOW_SYNC:
+ if ( CGlobalInfo::is_learn_mode() ) {
+ m_nat_check_manager.handle_aging();
+ }
+
+ m_p_queue.pop();
+ node->m_time += SYNC_TIME_OUT;
+ m_p_queue.push(node);
+
+ break;
+ case CGenNode::FLOW_PKT:
+ m_cpu_dp_u.start_work();
+ send_pkt_all_ports();
+ m_p_queue.pop();
+ node->m_time += m_delta_sec;
+ m_p_queue.push(node);
+ m_cpu_dp_u.commit();
+ break;
+ }
+
+ /* this will be called every sync which is 1msec */
+ if ( m_do_stop ) {
+ break;
+ }
+ if ( iter>0 ){
+ if ( ( cnt>iter) ){
+ printf("stop due iter %d %d \n",iter);
+ break;
+ }
+ }
+ cnt++;
+ }
+
+ /* free all nodes in the queue */
+ while (!m_p_queue.empty()) {
+ node = m_p_queue.top();
+ m_p_queue.pop();
+ delete node;
+ }
+
+ printf(" latency daemon has stopped\n");
+ if ( get_is_rx_check_mode() ) {
+ m_rx_check_manager.tw_drain();
+ }
+
+}
+
+void CLatencyManager::stop(){
+ m_do_stop =true;
+}
+
+bool CLatencyManager::is_active(){
+ return (m_is_active);
+}
+
+
+double CLatencyManager::get_max_latency(){
+ double l=0.0;
+ int i;
+ for (i=0; i<m_max_ports; i++) {
+ CLatencyManagerPerPort * lp=&m_ports[i];
+ if ( l <lp->m_port.m_hist.get_max_latency() ){
+ l=lp->m_port.m_hist.get_max_latency();
+ }
+ }
+ return (l);
+}
+
+double CLatencyManager::get_avr_latency(){
+ double l=0.0;
+ int i;
+ for (i=0; i<m_max_ports; i++) {
+ CLatencyManagerPerPort * lp=&m_ports[i];
+ if ( l <lp->m_port.m_hist.get_average_latency() ){
+ l=lp->m_port.m_hist.get_average_latency();
+ }
+ }
+ return (l);
+}
+
+uint64_t CLatencyManager::get_total_pkt(){
+ int i;
+ uint64_t t=0;
+ for (i=0; i<m_max_ports; i++) {
+ CLatencyManagerPerPort * lp=&m_ports[i];
+ t+=lp->m_port.m_tx_pkt_ok ;
+ }
+ return t;
+}
+
+uint64_t CLatencyManager::get_total_bytes(){
+ int i;
+ uint64_t t=0;
+ for (i=0; i<m_max_ports; i++) {
+ CLatencyManagerPerPort * lp=&m_ports[i];
+ t+=lp->m_port.m_tx_pkt_ok* (m_pkt_gen.get_pkt_size()+4);
+ }
+ return t;
+
+}
+
+
+bool CLatencyManager::is_any_error(){
+ int i;
+ for (i=0; i<m_max_ports; i++) {
+ CLatencyManagerPerPort * lp=&m_ports[i];
+ if ( lp->m_port.is_any_err() ){
+ return (true);
+ }
+ }
+ return (false);
+}
+
+
+void CLatencyManager::dump_json(std::string & json ){
+ json="{\"name\":\"trex-latecny\",\"type\":0,\"data\":{";
+ int i;
+ for (i=0; i<m_max_ports; i++) {
+ CLatencyManagerPerPort * lp=&m_ports[i];
+ lp->m_port.dump_json(json);
+ }
+
+ json+="\"unknown\":0}}" ;
+
+}
+
+void CLatencyManager::dump_json_v2(std::string & json ){
+ json="{\"name\":\"trex-latecny-v2\",\"type\":0,\"data\":{";
+ json+=add_json("cpu_util",m_cpu_cp_u.GetVal());
+
+ int i;
+ for (i=0; i<m_max_ports; i++) {
+ CLatencyManagerPerPort * lp=&m_ports[i];
+ lp->m_port.dump_json_v2(json);
+ }
+
+ json+="\"unknown\":0}}" ;
+
+}
+
+void CLatencyManager::DumpRxCheck(FILE *fd){
+ if ( get_is_rx_check_mode() ) {
+ fprintf(fd," rx checker : \n");
+ m_rx_check_manager.DumpShort(fd);
+ m_rx_check_manager.Dump(fd);
+ }
+}
+
+void CLatencyManager::DumpShortRxCheck(FILE *fd){
+ if ( get_is_rx_check_mode() ) {
+ m_rx_check_manager.DumpShort(fd);
+ }
+}
+
+void CLatencyManager::rx_check_dump_json(std::string & json){
+ if ( get_is_rx_check_mode() ) {
+ m_rx_check_manager.dump_json(json );
+ }
+}
+
+void CLatencyManager::update(){
+ m_cpu_cp_u.Update() ;
+}
+
+void CLatencyManager::DumpShort(FILE *fd){
+ int i;
+ fprintf(fd," Cpu Utilization : %2.1f %% \n",m_cpu_cp_u.GetVal());
+ CCPortLatency::DumpShortHeader(fd);
+ for (i=0; i<m_max_ports; i++) {
+ fprintf(fd," %d | ",i);
+ CLatencyManagerPerPort * lp=&m_ports[i];
+ lp->m_port.DumpShort(fd);
+ fprintf(fd,"\n");
+ }
+
+
+}
+
+void CLatencyManager::Dump(FILE *fd){
+ int i;
+ fprintf(fd," cpu : %2.1f %% \n",m_cpu_cp_u.GetVal());
+ for (i=0; i<m_max_ports; i++) {
+ fprintf(fd," port %d \n",i);
+ fprintf(fd," -----------------\n");
+ CLatencyManagerPerPort * lp=&m_ports[i];
+ lp->m_port.DumpCounters(fd);
+ }
+}
+
+void CLatencyManager::DumpRxCheckVerification(FILE *fd,
+ uint64_t total_tx_rx_check){
+ if ( !get_is_rx_check_mode() ) {
+ fprintf(fd," rx_checker is disabled \n");
+ return;
+ }
+ fprintf(fd," rx_check Tx : %u \n",total_tx_rx_check);
+ fprintf(fd," rx_check Rx : %u \n",m_rx_check_manager.getTotalRx() );
+ fprintf(fd," rx_check verification :" );
+ if (m_rx_check_manager.getTotalRx() == total_tx_rx_check) {
+ fprintf(fd," OK \n" );
+ }else{
+ fprintf(fd," FAIL \n" );
+ }
+}
+
+
+
+void CTcpSeq::update(uint8_t *p, CFlowPktInfo *pkt_info, int16_t s_size){
+ TCPHeader *tcp= (TCPHeader *)(p+pkt_info->m_pkt_indication.getFastTcpOffset());
+ uint32_t seqnum, acknum;
+
+ // This routine will adjust the TCP segment size for packets
+ // based on the modifications made by the plugins.
+ // Basically it will keep track of the size changes
+ // and adjust the TCP sequence numbers accordingly.
+
+ bool is_init=pkt_info->m_pkt_indication.m_desc.IsInitSide();
+
+ // Update TCP seq number
+ seqnum = tcp->getSeqNumber();
+ acknum = tcp->getAckNumber();
+ if (is_init) {
+ // Packet is from client
+ seqnum += client_seq_delta;
+ acknum += server_seq_delta;
+ } else {
+ // Packet is from server
+ seqnum += server_seq_delta;
+ acknum += client_seq_delta;
+ }
+ tcp->setSeqNumber(seqnum);
+ tcp->setAckNumber(acknum);
+
+ // Adjust delta being tracked
+ if (is_init) {
+ client_seq_delta += s_size;
+ } else {
+ server_seq_delta += s_size;
+ }
+}
+
+
+void on_node_first(uint8_t plugin_id,CGenNode * node,
+ CFlowYamlInfo * template_info,
+ CTupleTemplateGeneratorSmart * tuple_gen,
+ CFlowGenListPerThread * flow_gen){
+
+ if (CPluginCallback::callback) {
+ CPluginCallback::callback->on_node_first(plugin_id,node,template_info, tuple_gen,flow_gen);
+ }
+}
+
+void on_node_last(uint8_t plugin_id,CGenNode * node){
+ if (CPluginCallback::callback) {
+ CPluginCallback::callback->on_node_last(plugin_id,node);
+ }
+
+}
+
+rte_mbuf_t * on_node_generate_mbuf(uint8_t plugin_id,CGenNode * node,CFlowPktInfo * pkt_info){
+ if (CPluginCallback::callback) {
+ CPluginCallback::callback->on_node_generate_mbuf(plugin_id,node,pkt_info);
+ }
+
+}
+
+
+class CPlugin_rtsp : public CTcpSeq {
+public:
+ void * m_gen;
+ uint16_t rtp_client_0;
+ uint16_t rtp_client_1;
+};
+
+
+void CPluginCallbackSimple::on_node_first(uint8_t plugin_id,
+ CGenNode * node,
+ CFlowYamlInfo * template_info,
+ CTupleTemplateGeneratorSmart * tuple_gen,
+ CFlowGenListPerThread * flow_gen ){
+ //printf(" on on_node_first callback %d node %x! \n",(int)plugin_id,node);
+ /* generate 2 ports from client side */
+
+ if ( (plugin_id == mpRTSP) || (plugin_id == mpSIP_VOICE) ) {
+ CPlugin_rtsp * lpP=new CPlugin_rtsp();
+ assert(lpP);
+
+ /* TBD need to be fixed using new API */
+ lpP->rtp_client_0 = tuple_gen->GenerateOneSourcePort();
+ lpP->rtp_client_1 = tuple_gen->GenerateOneSourcePort();
+ lpP->m_gen=flow_gen;
+ node->m_plugin_info = (void *)lpP;
+ }else{
+ if (plugin_id ==mpDYN_PYLOAD) {
+ /* nothing to do */
+ }else{
+ if (plugin_id ==mpAVL_HTTP_BROWSIN) {
+ CTcpSeq * lpP=new CTcpSeq();
+ assert(lpP);
+ node->m_plugin_info = (void *)lpP;
+ }else{
+ /* do not support this */
+ assert(0);
+ }
+ }
+ }
+}
+
+void CPluginCallbackSimple::on_node_last(uint8_t plugin_id,CGenNode * node){
+ //printf(" on on_node_last callback %d %x! \n",(int)plugin_id,node);
+ if ( (plugin_id == mpRTSP) || (plugin_id == mpSIP_VOICE) ) {
+ CPlugin_rtsp * lpP=(CPlugin_rtsp * )node->m_plugin_info;
+ /* free the ports */
+ CFlowGenListPerThread * flow_gen=(CFlowGenListPerThread *) lpP->m_gen;
+ bool is_tcp=node->m_pkt_info->m_pkt_indication.m_desc.IsTcp();
+ flow_gen->defer_client_port_free(is_tcp,node->m_src_ip,lpP->rtp_client_0);
+ flow_gen->defer_client_port_free(is_tcp,node->m_src_ip,lpP->rtp_client_1);
+ assert(lpP);
+ delete lpP;
+ node->m_plugin_info=0;
+ }else{
+ if (plugin_id ==mpDYN_PYLOAD) {
+ /* nothing to do */
+ }else{
+ if (plugin_id ==mpAVL_HTTP_BROWSIN) {
+ /* nothing to do */
+ CTcpSeq * lpP=(CTcpSeq * )node->m_plugin_info;
+ delete lpP;
+ node->m_plugin_info=0;
+ }else{
+ /* do not support this */
+ assert(0);
+ }
+ }
+ }
+}
+
+rte_mbuf_t * CPluginCallbackSimple::http_plugin(uint8_t plugin_id,
+ CGenNode * node,
+ CFlowPktInfo * pkt_info){
+ CPacketDescriptor * lpd=&pkt_info->m_pkt_indication.m_desc;
+ assert(lpd->getFlowId()==0); /* only one flow */
+ CMiniVMCmdBase * program[2];
+ CMiniVMReplaceIP replace_cmd;
+ CMiniVMCmdBase eop_cmd;
+ CTcpSeq * lpP=(CTcpSeq * )node->m_plugin_info;
+ assert(lpP);
+ rte_mbuf_t *mbuf;
+ int16_t s_size=0;
+
+ if ( likely (lpd->getFlowPktNum() != 3) ){
+ if (unlikely (CGlobalInfo::is_ipv6_enable()) ) {
+ // Request a larger initial segment for IPv6
+ mbuf = pkt_info->do_generate_new_mbuf_big(node);
+ }else{
+ mbuf = pkt_info->do_generate_new_mbuf(node);
+ }
+
+ }else{
+ CFlowInfo flow_info;
+ flow_info.vm_program=0;
+
+ flow_info.client_ip = node->m_src_ip;
+ flow_info.server_ip = node->m_dest_ip;
+ flow_info.client_port = node->m_src_port;
+ flow_info.server_port = 0;
+ flow_info.replace_server_port =false;
+ flow_info.is_init_ip_dir = (node->cur_pkt_ip_addr_dir() == CLIENT_SIDE?true:false);
+ flow_info.is_init_port_dir = (node->cur_pkt_port_addr_dir() ==CLIENT_SIDE?true:false);
+
+
+ replace_cmd.m_cmd = VM_REPLACE_IP_OFFSET;
+ replace_cmd.m_flags = 0;
+
+ // Determine how much larger the packet needs to be to
+ // handle the largest IP address. There is a single address
+ // string of 8 bytes that needs to be replaced.
+ if (CGlobalInfo::is_ipv6_enable() ) {
+ // For IPv6, accomodate use of brackets (+2 bytes)
+ replace_cmd.m_add_pkt_len = (INET6_ADDRSTRLEN + 2) - 8;
+
+ // Mark as IPv6 and set the upper 96-bits
+ replace_cmd.m_flags |= CMiniVMCmdBase::MIN_VM_V6;
+ for (uint8_t idx=0; idx<6; idx++){
+ replace_cmd.m_server_ip.v6[idx] = CGlobalInfo::m_options.m_dst_ipv6[idx];
+ }
+ } else {
+ replace_cmd.m_add_pkt_len = INET_ADDRSTRLEN - 8;
+ }
+
+ // Set m_start_0/m_stop_1 at start/end of IP address to be replaced.
+ // For this packet we know the IP addr string length is 8 bytes.
+ replace_cmd.m_start_0 = 10+16;
+ replace_cmd.m_stop_1 = replace_cmd.m_start_0 + 8;
+
+ replace_cmd.m_server_ip.v4 = flow_info.server_ip;
+
+ eop_cmd.m_cmd = VM_EOP;
+
+ program[0] = &replace_cmd;
+ program[1] = &eop_cmd;
+
+ flow_info.vm_program = program;
+
+ mbuf = pkt_info->do_generate_new_mbuf_ex_vm(node,&flow_info, &s_size);
+ }
+
+ // Fixup the TCP sequence numbers
+ uint8_t *p=rte_pktmbuf_mtod(mbuf, uint8_t*);
+
+ // Update TCP sequence numbers
+ lpP->update(p, pkt_info, s_size);
+
+ return(mbuf);
+}
+
+rte_mbuf_t * CPluginCallbackSimple::dyn_pyload_plugin(uint8_t plugin_id,
+ CGenNode * node,
+ CFlowPktInfo * pkt_info){
+
+ CMiniVMCmdBase * program[2];
+
+ CMiniVMDynPyload dyn_cmd;
+ CMiniVMCmdBase eop_cmd;
+
+ CPacketDescriptor * lpd=&pkt_info->m_pkt_indication.m_desc;
+ CFlowYamlDynamicPyloadPlugin * lpt = node->m_template_info->m_dpPkt;
+ assert(lpt);
+ CFlowInfo flow_info;
+ flow_info.vm_program=0;
+ int16_t s_size=0;
+
+ // IPv6 packets are not supported
+ if (CGlobalInfo::is_ipv6_enable() ) {
+ fprintf (stderr," IPv6 is not supported for dynamic pyload change\n");
+ exit(-1);
+ }
+
+ if ( lpd->getFlowId() == 0 ) {
+
+ flow_info.client_ip = node->m_src_ip;
+ flow_info.server_ip = node->m_dest_ip;
+ flow_info.client_port = node->m_src_port;
+ flow_info.server_port = 0;
+ flow_info.replace_server_port =false;
+ flow_info.is_init_ip_dir = (node->cur_pkt_ip_addr_dir() == CLIENT_SIDE?true:false);
+ flow_info.is_init_port_dir = (node->cur_pkt_port_addr_dir() ==CLIENT_SIDE?true:false);
+
+ uint32_t pkt_num = lpd->getFlowPktNum();
+ if (pkt_num < 253) {
+ int i;
+ /* fast filter */
+ for (i=0; i<lpt->m_num; i++) {
+ if (lpt->m_pkt_ids[i] == pkt_num ) {
+ //add a program here
+ dyn_cmd.m_cmd = VM_DYN_PYLOAD;
+ dyn_cmd.m_ptr= &lpt->m_program[i];
+ dyn_cmd.m_flags = 0;
+ dyn_cmd.m_add_pkt_len = INET_ADDRSTRLEN - 8;
+ dyn_cmd.m_ip.v4=node->m_src_ip;
+
+ eop_cmd.m_cmd = VM_EOP;
+ program[0] = &dyn_cmd;
+ program[1] = &eop_cmd;
+
+ flow_info.vm_program = program;
+ }
+ }
+ }
+ // only for the first flow
+ }else{
+ fprintf (stderr," only one flow is allowed for dynamic pyload change \n");
+ exit(-1);
+ }/* only for the first flow */
+
+ if ( unlikely( flow_info.vm_program != 0 ) ) {
+
+ return ( pkt_info->do_generate_new_mbuf_ex_vm(node,&flow_info, &s_size) );
+ }else{
+ return ( pkt_info->do_generate_new_mbuf_ex(node,&flow_info) );
+ }
+}
+
+rte_mbuf_t * CPluginCallbackSimple::sip_voice_plugin(uint8_t plugin_id,CGenNode * node,CFlowPktInfo * pkt_info){
+ CMiniVMCmdBase * program[2];
+
+ CMiniVMReplaceIP_PORT_IP_IP_Port via_replace_cmd;
+ CMiniVMCmdBase eop_cmd;
+
+ CPacketDescriptor * lpd=&pkt_info->m_pkt_indication.m_desc;
+ CPlugin_rtsp * lpP=(CPlugin_rtsp * )node->m_plugin_info;
+ assert(lpP);
+ // printf(" %d %d \n",lpd->getFlowId(),lpd->getFlowPktNum());
+ CFlowInfo flow_info;
+ flow_info.vm_program=0;
+ int16_t s_size=0;
+
+ switch ( lpd->getFlowId() ) {
+ /* flow - SIP , packet #0,#1 control */
+ case 0:
+ flow_info.client_ip = node->m_src_ip;
+ flow_info.server_ip = node->m_dest_ip;
+ flow_info.client_port = node->m_src_port;
+ flow_info.server_port = 0;
+ flow_info.replace_server_port =false;
+ flow_info.is_init_ip_dir = (node->cur_pkt_ip_addr_dir() == CLIENT_SIDE?true:false);
+ flow_info.is_init_port_dir = (node->cur_pkt_port_addr_dir() ==CLIENT_SIDE?true:false);
+
+
+ /* program to replace ip server */
+ switch ( lpd->getFlowPktNum() ) {
+ case 0:
+ {
+ via_replace_cmd.m_cmd = VM_REPLACE_IPVIA_IP_IP_PORT;
+ via_replace_cmd.m_flags = 0;
+ via_replace_cmd.m_start_0 = 0;
+ via_replace_cmd.m_stop_1 = 0;
+
+ // Determine how much larger the packet needs to be to
+ // handle the largest IP address. There are 3 address
+ // strings (each 9 bytes) that needs to be replaced.
+ // We also need to accomodate IPv6 use of brackets
+ // (+2 bytes) in a URI.
+ // There are also 2 port strings that needs to be
+ // replaced (1 is 4 bytes the other is 5 bytes).
+ if (CGlobalInfo::is_ipv6_enable() ) {
+ via_replace_cmd.m_add_pkt_len = (((INET6_ADDRSTRLEN + 2) - 9) * 3) +
+ ((INET_PORTSTRLEN * 2) - 9);
+
+ // Mark as IPv6 and set the upper 96-bits
+ via_replace_cmd.m_flags |= CMiniVMCmdBase::MIN_VM_V6;
+ for (uint8_t idx=0; idx<6; idx++){
+ via_replace_cmd.m_ip.v6[idx] = CGlobalInfo::m_options.m_src_ipv6[idx];
+ via_replace_cmd.m_ip_via.v6[idx] = CGlobalInfo::m_options.m_src_ipv6[idx];
+ }
+ } else {
+ via_replace_cmd.m_add_pkt_len = ((INET_ADDRSTRLEN - 9) * 3) +
+ ((INET_PORTSTRLEN * 2) - 9);
+ }
+ via_replace_cmd.m_ip.v4 =node->m_src_ip;
+ via_replace_cmd.m_ip0_start = 377;
+ via_replace_cmd.m_ip0_stop = 377+9;
+
+ via_replace_cmd.m_ip1_start = 409;
+ via_replace_cmd.m_ip1_stop = 409+9;
+
+
+ via_replace_cmd.m_port =lpP->rtp_client_0;
+ via_replace_cmd.m_port_start = 435;
+ via_replace_cmd.m_port_stop = 435+5;
+
+ via_replace_cmd.m_ip_via.v4 = node->m_src_ip;
+ via_replace_cmd.m_port_via = node->m_src_port;
+
+ via_replace_cmd.m_ip_via_start = 208;
+ via_replace_cmd.m_ip_via_stop = 208+9+5;
+
+
+ eop_cmd.m_cmd = VM_EOP;
+
+ program[0] = &via_replace_cmd;
+ program[1] = &eop_cmd;
+
+ flow_info.vm_program = program;
+ }
+ break;
+ case 1:
+ {
+ via_replace_cmd.m_cmd = VM_REPLACE_IPVIA_IP_IP_PORT;
+ via_replace_cmd.m_flags = 0;
+ via_replace_cmd.m_start_0 = 0;
+ via_replace_cmd.m_stop_1 = 0;
+
+ // Determine how much larger the packet needs to be to
+ // handle the largest IP address. There are 3 address
+ // strings (each 9 bytes) that needs to be replaced.
+ // We also need to accomodate IPv6 use of brackets
+ // (+2 bytes) in a URI.
+ // There are also 2 port strings that needs to be
+ // replaced (1 is 4 bytes the other is 5 bytes).
+ if (CGlobalInfo::is_ipv6_enable() ) {
+ via_replace_cmd.m_add_pkt_len = (((INET6_ADDRSTRLEN + 2) - 9) * 3) +
+ ((INET_PORTSTRLEN * 2) - 9);
+
+ // Mark as IPv6 and set the upper 96-bits
+ via_replace_cmd.m_flags |= CMiniVMCmdBase::MIN_VM_V6;
+ for (uint8_t idx=0; idx<6; idx++){
+ via_replace_cmd.m_ip.v6[idx] = CGlobalInfo::m_options.m_dst_ipv6[idx];
+ via_replace_cmd.m_ip_via.v6[idx] = CGlobalInfo::m_options.m_src_ipv6[idx];
+ }
+ } else {
+ via_replace_cmd.m_add_pkt_len = ((INET_ADDRSTRLEN - 9) * 3) +
+ ((INET_PORTSTRLEN * 2) - 9);
+ }
+
+ via_replace_cmd.m_ip.v4 =node->m_dest_ip;
+ via_replace_cmd.m_ip0_start = 370;
+ via_replace_cmd.m_ip0_stop = 370+8;
+
+ via_replace_cmd.m_ip1_start = 401;
+ via_replace_cmd.m_ip1_stop = 401+8;
+
+
+ via_replace_cmd.m_port =lpP->rtp_client_0;
+ via_replace_cmd.m_port_start = 426;
+ via_replace_cmd.m_port_stop = 426+5;
+
+
+ via_replace_cmd.m_ip_via.v4 = node->m_src_ip;
+ via_replace_cmd.m_port_via = node->m_src_port;
+
+ via_replace_cmd.m_ip_via_start = 207;
+ via_replace_cmd.m_ip_via_stop = 207+9+5;
+
+ eop_cmd.m_cmd = VM_EOP;
+
+ program[0] = &via_replace_cmd;
+ program[1] = &eop_cmd;
+
+ flow_info.vm_program = program;
+ }
+ break;
+
+
+ }/* end of big switch on packet */
+ break;
+
+ case 1:
+ flow_info.client_ip = node->m_src_ip ;
+ flow_info.server_ip = node->m_dest_ip;
+ flow_info.client_port = lpP->rtp_client_0;
+ /* this is tricky ..*/
+ flow_info.server_port = lpP->rtp_client_0;
+ flow_info.replace_server_port = true;
+ flow_info.is_init_ip_dir = (node->cur_pkt_ip_addr_dir() == CLIENT_SIDE?true:false);
+ flow_info.is_init_port_dir = (node->cur_pkt_port_addr_dir() ==CLIENT_SIDE?true:false);
+
+ break;
+ default:
+ assert(0);
+ break;
+ };
+
+ //printf(" c_ip:%x s_ip:%x c_po:%x s_po:%x init:%x replace:%x \n",flow_info.client_ip,flow_info.server_ip,flow_info.client_port,flow_info.server_port,flow_info.is_init_dir,flow_info.replace_server_port);
+
+ //printf(" program %p \n",flow_info.vm_program);
+ if ( unlikely( flow_info.vm_program != 0 ) ) {
+
+ return ( pkt_info->do_generate_new_mbuf_ex_vm(node,&flow_info, &s_size) );
+ }else{
+ return ( pkt_info->do_generate_new_mbuf_ex(node,&flow_info) );
+ }
+}
+
+rte_mbuf_t * CPluginCallbackSimple::rtsp_plugin(uint8_t plugin_id,CGenNode * node,CFlowPktInfo * pkt_info){
+
+ CMiniVMCmdBase * program[2];
+
+ CMiniVMReplaceIP replace_cmd;
+ CMiniVMCmdBase eop_cmd;
+ CMiniVMReplaceIPWithPort replace_port_cmd;
+ rte_mbuf_t *mbuf;
+
+ CPacketDescriptor * lpd=&pkt_info->m_pkt_indication.m_desc;
+ CPlugin_rtsp * lpP=(CPlugin_rtsp * )node->m_plugin_info;
+
+ assert(lpP);
+ // printf(" %d %d \n",lpd->getFlowId(),lpd->getFlowPktNum());
+ CFlowInfo flow_info;
+ flow_info.vm_program=0;
+ int16_t s_size=0;
+
+ switch ( lpd->getFlowId() ) {
+ /* flow - control */
+ case 0:
+ flow_info.client_ip = node->m_src_ip;
+ flow_info.server_ip = node->m_dest_ip;
+ flow_info.client_port = node->m_src_port;
+ flow_info.server_port = 0;
+ flow_info.replace_server_port =false;
+ flow_info.is_init_ip_dir = (node->cur_pkt_ip_addr_dir() == CLIENT_SIDE?true:false);
+ flow_info.is_init_port_dir = (node->cur_pkt_port_addr_dir() ==CLIENT_SIDE?true:false);
+
+
+ /* program to replace ip server */
+ switch ( lpd->getFlowPktNum() ) {
+ case 3:
+ {
+ replace_cmd.m_cmd = VM_REPLACE_IP_OFFSET;
+ replace_cmd.m_flags = 0;
+ replace_cmd.m_start_0 = 16;
+ replace_cmd.m_stop_1 = 16+9;
+
+ // Determine how much larger the packet needs to be to
+ // handle the largest IP address. There is a single address
+ // string of 9 bytes that needs to be replaced.
+ if (CGlobalInfo::is_ipv6_enable() ) {
+ // For IPv6, accomodate use of brackets (+2 bytes)
+ replace_cmd.m_add_pkt_len = (INET6_ADDRSTRLEN + 2) - 9;
+
+ // Mark as IPv6 and set the upper 96-bits
+ replace_cmd.m_flags |= CMiniVMCmdBase::MIN_VM_V6;
+ for (uint8_t idx=0; idx<6; idx++){
+ replace_cmd.m_server_ip.v6[idx] = CGlobalInfo::m_options.m_dst_ipv6[idx];
+ }
+ } else {
+ replace_cmd.m_add_pkt_len = INET_ADDRSTRLEN - 9;
+ }
+ replace_cmd.m_server_ip.v4 = flow_info.server_ip;
+
+ eop_cmd.m_cmd = VM_EOP;
+
+ program[0] = &replace_cmd;
+ program[1] = &eop_cmd;
+
+ flow_info.vm_program = program;
+ }
+ break;
+ case 4:
+ {
+ replace_cmd.m_cmd = VM_REPLACE_IP_OFFSET;
+ replace_cmd.m_flags = 0;
+ replace_cmd.m_start_0 = 46;
+ replace_cmd.m_stop_1 = 46+9;
+
+ // Determine how much larger the packet needs to be to
+ // handle the largest IP address. There is a single address
+ // string of 9 bytes that needs to be replaced.
+ if (CGlobalInfo::is_ipv6_enable() ) {
+ // For IPv6, accomodate use of brackets (+2 bytes)
+ replace_cmd.m_add_pkt_len = (INET6_ADDRSTRLEN + 2) - 9;
+
+ // Mark as IPv6 and set the upper 96-bits
+ replace_cmd.m_flags |= CMiniVMCmdBase::MIN_VM_V6;
+ for (uint8_t idx=0; idx<6; idx++){
+ replace_cmd.m_server_ip.v6[idx] = CGlobalInfo::m_options.m_dst_ipv6[idx];
+ }
+ } else {
+ replace_cmd.m_add_pkt_len = INET_ADDRSTRLEN - 9;
+ }
+ replace_cmd.m_server_ip.v4 = flow_info.server_ip;
+
+ eop_cmd.m_cmd = VM_EOP;
+
+ program[0] = &replace_cmd;
+ program[1] = &eop_cmd;
+
+ flow_info.vm_program = program;
+ }
+ break;
+
+ case 5:
+ {
+
+ replace_port_cmd.m_cmd = VM_REPLACE_IP_PORT_OFFSET;
+ replace_port_cmd.m_flags = 0;
+ replace_port_cmd.m_start_0 = 13;
+ replace_port_cmd.m_stop_1 = 13+9;
+
+ // Determine how much larger the packet needs to be to
+ // handle the largest IP address. There is a single address
+ // string of 9 bytes that needs to be replaced.
+ // There are also 2 port strings (8 bytes) that needs to be
+ // replaced.
+ if (CGlobalInfo::is_ipv6_enable() ) {
+ // For IPv6, accomodate use of brackets (+2 bytes)
+ replace_port_cmd.m_add_pkt_len = ((INET6_ADDRSTRLEN + 2) - 9) +
+ ((INET_PORTSTRLEN * 2) - 8);
+
+ // Mark as IPv6 and set the upper 96-bits
+ replace_port_cmd.m_flags |= CMiniVMCmdBase::MIN_VM_V6;
+ for (uint8_t idx=0; idx<6; idx++){
+ replace_port_cmd.m_server_ip.v6[idx] = CGlobalInfo::m_options.m_dst_ipv6[idx];
+ }
+ } else {
+ replace_port_cmd.m_add_pkt_len = (INET_ADDRSTRLEN - 9) +
+ ((INET_PORTSTRLEN * 2) - 8);
+ }
+ replace_port_cmd.m_server_ip.v4 = flow_info.server_ip;
+ replace_port_cmd.m_start_port = 164;
+ replace_port_cmd.m_stop_port = 164+(4*2)+1;
+ replace_port_cmd.m_client_port = lpP->rtp_client_0;
+ replace_port_cmd.m_server_port =0;
+
+
+ eop_cmd.m_cmd = VM_EOP;
+
+ program[0] = &replace_port_cmd;
+ program[1] = &eop_cmd;
+
+ flow_info.vm_program = program;
+ }
+ break;
+
+ case 6:
+ {
+
+ replace_port_cmd.m_cmd = VM_REPLACE_IP_PORT_RESPONSE_OFFSET;
+ replace_port_cmd.m_flags = 0;
+ replace_port_cmd.m_start_0 = 0;
+ replace_port_cmd.m_stop_1 = 0;
+
+ // Determine how much larger the packet needs to be to
+ // handle the largest port addresses. There are 4 port address
+ // strings (16 bytes) that needs to be replaced.
+ replace_port_cmd.m_add_pkt_len = ((INET_PORTSTRLEN * 4) - 16);
+
+ replace_port_cmd.m_server_ip.v4 = flow_info.server_ip;
+ replace_port_cmd.m_start_port = 247;
+ replace_port_cmd.m_stop_port = 247+(4*4)+2+13;
+ replace_port_cmd.m_client_port = lpP->rtp_client_0;
+ replace_port_cmd.m_server_port = lpP->rtp_client_0;
+
+
+ eop_cmd.m_cmd = VM_EOP;
+
+ program[0] = &replace_port_cmd;
+ program[1] = &eop_cmd;
+
+ flow_info.vm_program = program;
+ }
+ break;
+
+
+ case 7:
+ {
+
+ replace_port_cmd.m_cmd = VM_REPLACE_IP_PORT_OFFSET;
+ replace_port_cmd.m_flags = 0;
+ replace_port_cmd.m_start_0 = 13;
+ replace_port_cmd.m_stop_1 = 13+9;
+
+ // Determine how much larger the packet needs to be to
+ // handle the largest IP address. There is a single address
+ // string of 9 bytes that needs to be replaced.
+ // There are also 2 port strings (8 bytes) that needs to be
+ // replaced.
+ if (CGlobalInfo::is_ipv6_enable() ) {
+ // For IPv6, accomodate use of brackets (+2 bytes)
+ replace_port_cmd.m_add_pkt_len = ((INET6_ADDRSTRLEN + 2) - 9) +
+ ((INET_PORTSTRLEN * 2) - 8);
+
+ // Mark as IPv6 and set the upper 96-bits
+ replace_port_cmd.m_flags |= CMiniVMCmdBase::MIN_VM_V6;
+ for (uint8_t idx=0; idx<6; idx++){
+ replace_port_cmd.m_server_ip.v6[idx] = CGlobalInfo::m_options.m_dst_ipv6[idx];
+ }
+ } else {
+ replace_port_cmd.m_add_pkt_len = (INET_ADDRSTRLEN - 9) +
+ ((INET_PORTSTRLEN * 2) - 8);
+ }
+ replace_port_cmd.m_server_ip.v4 = flow_info.server_ip;
+ replace_port_cmd.m_start_port = 164;
+ replace_port_cmd.m_stop_port = 164+(4*2)+1;
+ replace_port_cmd.m_client_port = lpP->rtp_client_1;
+ replace_port_cmd.m_server_port =0;
+
+
+ eop_cmd.m_cmd = VM_EOP;
+
+ program[0] = &replace_port_cmd;
+ program[1] = &eop_cmd;
+
+ flow_info.vm_program = program;
+ }
+ break;
+
+ case 8:
+
+ {
+
+ replace_port_cmd.m_cmd = VM_REPLACE_IP_PORT_RESPONSE_OFFSET;
+ replace_port_cmd.m_flags = 0;
+ replace_port_cmd.m_start_0 = 0;
+ replace_port_cmd.m_stop_1 = 0;
+
+ // Determine how much larger the packet needs to be to
+ // handle the largest port addresses. There are 4 port address
+ // strings (16 bytes) that needs to be replaced.
+ replace_port_cmd.m_add_pkt_len = ((INET_PORTSTRLEN * 4) - 16);
+
+ replace_port_cmd.m_server_ip.v4 = flow_info.server_ip;
+ replace_port_cmd.m_start_port = 247;
+ replace_port_cmd.m_stop_port = 247+(4*4)+2+13;
+ replace_port_cmd.m_client_port = lpP->rtp_client_1;
+ replace_port_cmd.m_server_port = lpP->rtp_client_1;
+
+
+ eop_cmd.m_cmd = VM_EOP;
+
+ program[0] = &replace_port_cmd;
+ program[1] = &eop_cmd;
+
+ flow_info.vm_program = program;
+ }
+ break;
+
+ /* PLAY */
+ case 9:
+ {
+
+ replace_cmd.m_cmd = VM_REPLACE_IP_OFFSET;
+ replace_cmd.m_flags = 0;
+ replace_cmd.m_start_0 = 12;
+ replace_cmd.m_stop_1 = 12+9;
+
+ // Determine how much larger the packet needs to be to
+ // handle the largest IP address. There is a single address
+ // string of 9 bytes that needs to be replaced.
+ if (CGlobalInfo::is_ipv6_enable() ) {
+ // For IPv6, accomodate use of brackets (+2 bytes)
+ replace_cmd.m_add_pkt_len = (INET6_ADDRSTRLEN + 2) - 9;
+
+ // Mark as IPv6 and set the upper 96-bits
+ replace_cmd.m_flags |= CMiniVMCmdBase::MIN_VM_V6;
+ for (uint8_t idx=0; idx<6; idx++){
+ replace_cmd.m_server_ip.v6[idx] = CGlobalInfo::m_options.m_dst_ipv6[idx];
+ }
+ } else {
+ replace_cmd.m_add_pkt_len = INET_ADDRSTRLEN - 9;
+ }
+ replace_cmd.m_server_ip.v4 = flow_info.server_ip;
+
+ eop_cmd.m_cmd = VM_EOP;
+
+ program[0] = &replace_cmd;
+ program[1] = &eop_cmd;
+
+ flow_info.vm_program = program;
+ }
+ break;
+
+ /*OPTION 0*/
+ case 12:
+ {
+
+ replace_cmd.m_cmd = VM_REPLACE_IP_OFFSET;
+ replace_cmd.m_flags = 0;
+ replace_cmd.m_start_0 = 15;
+ replace_cmd.m_stop_1 = 15+9;
+
+ // Determine how much larger the packet needs to be to
+ // handle the largest IP address. There is a single address
+ // string of 9 bytes that needs to be replaced.
+ if (CGlobalInfo::is_ipv6_enable() ) {
+ // For IPv6, accomodate use of brackets (+2 bytes)
+ replace_cmd.m_add_pkt_len = (INET6_ADDRSTRLEN + 2) - 9;
+
+ // Mark as IPv6 and set the upper 96-bits
+ replace_cmd.m_flags |= CMiniVMCmdBase::MIN_VM_V6;
+ for (uint8_t idx=0; idx<6; idx++){
+ replace_cmd.m_server_ip.v6[idx] = CGlobalInfo::m_options.m_dst_ipv6[idx];
+ }
+ } else {
+ replace_cmd.m_add_pkt_len = INET_ADDRSTRLEN - 9;
+ }
+ replace_cmd.m_server_ip.v4 = flow_info.server_ip;
+
+ eop_cmd.m_cmd = VM_EOP;
+
+ program[0] = &replace_cmd;
+ program[1] = &eop_cmd;
+
+ flow_info.vm_program = program;
+ }
+ break;
+
+ /* option #2*/
+ case 15:
+ {
+
+ replace_cmd.m_cmd = VM_REPLACE_IP_OFFSET;
+ replace_cmd.m_flags = 0;
+ replace_cmd.m_start_0 = 15;
+ replace_cmd.m_stop_1 = 15+9;
+
+ // Determine how much larger the packet needs to be to
+ // handle the largest IP address. There is a single address
+ // string of 9 bytes that needs to be replaced.
+ if (CGlobalInfo::is_ipv6_enable() ) {
+ // For IPv6, accomodate use of brackets (+2 bytes)
+ replace_cmd.m_add_pkt_len = (INET6_ADDRSTRLEN + 2) - 9;
+
+ // Mark as IPv6 and set the upper 96-bits
+ replace_cmd.m_flags |= CMiniVMCmdBase::MIN_VM_V6;
+ for (uint8_t idx=0; idx<6; idx++){
+ replace_cmd.m_server_ip.v6[idx] = CGlobalInfo::m_options.m_dst_ipv6[idx];
+ }
+ } else {
+ replace_cmd.m_add_pkt_len = INET_ADDRSTRLEN - 9;
+ }
+ replace_cmd.m_server_ip.v4 = flow_info.server_ip;
+
+ eop_cmd.m_cmd = VM_EOP;
+
+ program[0] = &replace_cmd;
+ program[1] = &eop_cmd;
+
+ flow_info.vm_program = program;
+ }
+ break;
+
+ case 18:
+ {
+
+ replace_cmd.m_cmd = VM_REPLACE_IP_OFFSET;
+ replace_cmd.m_flags = 0;
+ replace_cmd.m_start_0 = 16;
+ replace_cmd.m_stop_1 = 16+9;
+
+ // Determine how much larger the packet needs to be to
+ // handle the largest IP address. There is a single address
+ // string of 9 bytes that needs to be replaced.
+ if (CGlobalInfo::is_ipv6_enable() ) {
+ // For IPv6, accomodate use of brackets (+2 bytes)
+ replace_cmd.m_add_pkt_len = (INET6_ADDRSTRLEN + 2) - 9;
+
+ // Mark as IPv6 and set the upper 96-bits
+ replace_cmd.m_flags |= CMiniVMCmdBase::MIN_VM_V6;
+ for (uint8_t idx=0; idx<6; idx++){
+ replace_cmd.m_server_ip.v6[idx] = CGlobalInfo::m_options.m_dst_ipv6[idx];
+ }
+ } else {
+ replace_cmd.m_add_pkt_len = INET_ADDRSTRLEN - 9;
+ }
+ replace_cmd.m_server_ip.v4 = flow_info.server_ip;
+
+ eop_cmd.m_cmd = VM_EOP;
+
+ program[0] = &replace_cmd;
+ program[1] = &eop_cmd;
+
+ flow_info.vm_program = program;
+ }
+ break;
+
+
+ }/* end of big switch on packet */
+ break;
+
+ case 1:
+ flow_info.client_ip = node->m_src_ip ;
+ flow_info.server_ip = node->m_dest_ip;
+ flow_info.client_port = lpP->rtp_client_0;
+ /* this is tricky ..*/
+ flow_info.server_port = lpP->rtp_client_0;
+ flow_info.replace_server_port = true;
+ flow_info.is_init_ip_dir = (node->cur_pkt_ip_addr_dir() == CLIENT_SIDE?true:false);
+ flow_info.is_init_port_dir = (node->cur_pkt_port_addr_dir() ==CLIENT_SIDE?true:false);
+
+ break;
+ case 2:
+ flow_info.client_ip = node->m_src_ip ;
+ flow_info.server_ip = node->m_dest_ip;
+ flow_info.client_port = lpP->rtp_client_1;
+ /* this is tricky ..*/
+ flow_info.server_port = lpP->rtp_client_1;
+ flow_info.replace_server_port =true;
+ flow_info.is_init_ip_dir = (node->cur_pkt_ip_addr_dir() == CLIENT_SIDE?true:false);
+ flow_info.is_init_port_dir = (node->cur_pkt_port_addr_dir() ==CLIENT_SIDE?true:false);
+
+ break;
+ default:
+ assert(0);
+ break;
+ };
+
+ //printf(" c_ip:%x s_ip:%x c_po:%x s_po:%x init:%x replace:%x \n",flow_info.client_ip,flow_info.server_ip,flow_info.client_port,flow_info.server_port,flow_info.is_init_dir,flow_info.replace_server_port);
+
+ //printf(" program %p \n",flow_info.vm_program);
+ if ( unlikely( flow_info.vm_program != 0 ) ) {
+
+ mbuf = pkt_info->do_generate_new_mbuf_ex_vm(node,&flow_info, &s_size);
+ }else{
+ if (unlikely (CGlobalInfo::is_ipv6_enable()) ) {
+ // Request a larger initial segment for IPv6
+ mbuf = pkt_info->do_generate_new_mbuf_ex_big(node,&flow_info);
+ }else{
+ mbuf = pkt_info->do_generate_new_mbuf_ex(node,&flow_info);
+ }
+ }
+
+ // Fixup the TCP sequence numbers for the TCP flow
+ if ( lpd->getFlowId() == 0 ) {
+ uint8_t *p=rte_pktmbuf_mtod(mbuf, uint8_t*);
+
+ // Update TCP sequence numbers
+ lpP->update(p, pkt_info, s_size);
+ }
+
+ return(mbuf);
+}
+
+
+/* replace the tuples */
+rte_mbuf_t * CPluginCallbackSimple::on_node_generate_mbuf(uint8_t plugin_id,CGenNode * node,CFlowPktInfo * pkt_info){
+ switch (plugin_id) {
+ case mpRTSP:
+ rtsp_plugin(plugin_id,node,pkt_info);
+ break;
+ case mpSIP_VOICE:
+ sip_voice_plugin(plugin_id,node,pkt_info);
+ break;
+ case mpDYN_PYLOAD:
+ dyn_pyload_plugin(plugin_id,node,pkt_info);
+ break;
+ case mpAVL_HTTP_BROWSIN:
+ http_plugin(plugin_id,node,pkt_info);
+ break;
+ default:
+ assert(0);
+ }
+}
+
+
+int CMiniVM::mini_vm_run(CMiniVMCmdBase * cmds[]){
+
+ m_new_pkt_size=0;
+ bool need_to_stop=false;
+ int cnt=0;
+ CMiniVMCmdBase * cmd=cmds[cnt];
+ while (! need_to_stop) {
+ switch (cmd->m_cmd) {
+ case VM_REPLACE_IP_OFFSET:
+ mini_vm_replace_ip((CMiniVMReplaceIP *)cmd);
+ break;
+ case VM_REPLACE_IP_PORT_OFFSET:
+ mini_vm_replace_port_ip((CMiniVMReplaceIPWithPort *)cmd);
+ break;
+ case VM_REPLACE_IP_PORT_RESPONSE_OFFSET:
+ mini_vm_replace_ports((CMiniVMReplaceIPWithPort *)cmd);
+ break;
+
+ case VM_REPLACE_IP_IP_PORT:
+ mini_vm_replace_ip_ip_ports((CMiniVMReplaceIP_IP_Port * )cmd);
+ break;
+
+ case VM_REPLACE_IPVIA_IP_IP_PORT:
+ mini_vm_replace_ip_via_ip_ip_ports((CMiniVMReplaceIP_PORT_IP_IP_Port *)cmd);
+ break;
+
+ case VM_DYN_PYLOAD:
+ mini_vm_dyn_payload((CMiniVMDynPyload *)cmd);
+ break;
+
+ case VM_EOP:
+ need_to_stop=true;
+ break;
+ default:
+ printf(" vm cmd %d does not exist \n",cmd->m_cmd);
+ assert(0);
+ }
+ cnt++;
+ cmd=cmds[cnt];
+ }
+ return (0);
+}
+
+inline int cp_pkt_len(char *to,char *from,uint16_t from_offset,uint16_t len){
+ memcpy(to, from+from_offset , len);
+ return (len);
+}
+
+/* not including the to_offset
+
+ 0 1
+ x
+
+*/
+inline int cp_pkt_to_from(char *to,char *from,uint16_t from_offset,uint16_t to_offset){
+ memcpy(to, from+from_offset , to_offset-from_offset) ;
+ return (to_offset-from_offset);
+}
+
+
+int CMiniVM::mini_vm_dyn_payload( CMiniVMDynPyload * cmd){
+ /* copy all the packet */
+ CFlowYamlDpPkt * dyn=(CFlowYamlDpPkt *)cmd->m_ptr;
+ uint16_t l7_offset = m_pkt_info->m_pkt_indication.getFastPayloadOffset();
+ uint16_t len = m_pkt_info->m_packet->pkt_len - l7_offset ;
+ char * original_l7_ptr=m_pkt_info->m_packet->raw+l7_offset;
+ char * p=m_pyload_mbuf_ptr;
+ /* copy payload */
+ memcpy(p,original_l7_ptr,len);
+ if ( ( dyn->m_pyld_offset+ (dyn->m_len*4)) < ( len-4) ){
+ // we can change the packet
+ int i;
+ uint32_t *l=(uint32_t *)(p+dyn->m_pyld_offset);
+ for (i=0; i<dyn->m_len; i++) {
+ if ( dyn->m_type==0 ) {
+ *l=(rand() & dyn->m_pkt_mask);
+ }else if (dyn->m_type==1){
+ *l=(PKT_NTOHL(cmd->m_ip.v4) & dyn->m_pkt_mask);
+ }
+ l++;
+ }
+
+ }
+
+ // Return packet size which hasn't changed
+ m_new_pkt_size = m_pkt_info->m_packet->pkt_len;
+
+ return (0);
+}
+
+
+int CMiniVM::mini_vm_replace_ip_via_ip_ip_ports(CMiniVMReplaceIP_PORT_IP_IP_Port * cmd){
+ uint16_t l7_offset = m_pkt_info->m_pkt_indication.getFastPayloadOffset();
+ uint16_t len = m_pkt_info->m_packet->pkt_len - l7_offset;
+ char * original_l7_ptr=m_pkt_info->m_packet->raw+l7_offset;
+ char * p=m_pyload_mbuf_ptr;
+
+ p+=cp_pkt_to_from(p,original_l7_ptr,
+ 0,
+ cmd->m_ip_via_start);
+
+ if (cmd->m_flags & CMiniVMCmdBase::MIN_VM_V6) {
+ p+=ipv6_to_str(&cmd->m_ip_via,p);
+ } else {
+ p+=ip_to_str(cmd->m_ip_via.v4,p);
+ }
+ p+=sprintf(p,":%u",cmd->m_port_via);
+
+ /* up to the IP */
+ p+=cp_pkt_to_from(p,original_l7_ptr,
+ cmd->m_ip_via_stop,
+ cmd->m_ip0_start);
+ /*IP */
+ if (cmd->m_flags & CMiniVMCmdBase::MIN_VM_V6) {
+ p[-2] = '6';
+ p+=ipv6_to_str(&cmd->m_ip,p);
+ } else {
+ p+=ip_to_str(cmd->m_ip.v4,p);
+ }
+ /* up to IP 2 */
+ p+=cp_pkt_to_from(p, original_l7_ptr ,
+ cmd->m_ip0_stop,
+ cmd->m_ip1_start);
+ /* IP2 */
+ if (cmd->m_flags & CMiniVMCmdBase::MIN_VM_V6) {
+ p[-2] = '6';
+ p+=ipv6_to_str(&cmd->m_ip,p);
+ } else {
+ p+=ip_to_str(cmd->m_ip.v4,p);
+ }
+
+ /* up to port */
+ p+=cp_pkt_to_from(p, original_l7_ptr ,
+ cmd->m_ip1_stop,
+ cmd->m_port_start);
+ /* port */
+ p+=sprintf(p,"%u",cmd->m_port);
+
+ /* up to end */
+ p+=cp_pkt_to_from(p, original_l7_ptr ,
+ cmd->m_port_stop,
+ len);
+
+ // Determine new packet size
+ m_new_pkt_size= ((p+l7_offset) - m_pyload_mbuf_ptr);
+
+ return (0);
+}
+
+
+int CMiniVM::mini_vm_replace_ip_ip_ports(CMiniVMReplaceIP_IP_Port * cmd){
+ uint16_t l7_offset = m_pkt_info->m_pkt_indication.getFastPayloadOffset();
+ uint16_t len = m_pkt_info->m_packet->pkt_len - l7_offset;
+ char * original_l7_ptr=m_pkt_info->m_packet->raw+l7_offset;
+ char * p=m_pyload_mbuf_ptr;
+
+ /* up to the IP */
+ p+=cp_pkt_to_from(p,original_l7_ptr,
+ 0,
+ cmd->m_ip0_start);
+ /*IP */
+ if (cmd->m_flags & CMiniVMCmdBase::MIN_VM_V6) {
+ p+=ipv6_to_str(&cmd->m_ip,p);
+ } else {
+ p+=ip_to_str(cmd->m_ip.v4,p);
+ }
+ /* up to IP 2 */
+ p+=cp_pkt_to_from(p, original_l7_ptr ,
+ cmd->m_ip0_stop,
+ cmd->m_ip1_start);
+ /* IP2 */
+ if (cmd->m_flags & CMiniVMCmdBase::MIN_VM_V6) {
+ p+=ipv6_to_str(&cmd->m_ip,p);
+ } else {
+ p+=ip_to_str(cmd->m_ip.v4,p);
+ }
+
+ /* up to port */
+ p+=cp_pkt_to_from(p, original_l7_ptr ,
+ cmd->m_ip1_stop,
+ cmd->m_port_start);
+ /* port */
+ p+=sprintf(p,"%u",cmd->m_port);
+
+ /* up to end */
+ p+=cp_pkt_to_from(p, original_l7_ptr ,
+ cmd->m_port_stop,
+ len);
+
+ // Determine new packet size
+ m_new_pkt_size= ((p+l7_offset) - m_pyload_mbuf_ptr);
+
+ return (0);
+}
+
+int CMiniVM::mini_vm_replace_ports(CMiniVMReplaceIPWithPort * cmd){
+ uint16_t l7_offset = m_pkt_info->m_pkt_indication.getFastPayloadOffset();
+ uint16_t len = m_pkt_info->m_packet->pkt_len - l7_offset;
+ char * original_l7_ptr=m_pkt_info->m_packet->raw+l7_offset;
+
+ memcpy(m_pyload_mbuf_ptr, original_l7_ptr,cmd->m_start_port);
+ char * p=m_pyload_mbuf_ptr+cmd->m_start_port;
+ p+=sprintf(p,"%u-%u;server_port=%u-%u",cmd->m_client_port,cmd->m_client_port+1,cmd->m_server_port,cmd->m_server_port+1);
+ memcpy(p, original_l7_ptr+cmd->m_stop_port,len-cmd->m_stop_port);
+ p+=(len-cmd->m_stop_port);
+
+ // Determine new packet size
+ m_new_pkt_size= ((p+l7_offset) - m_pyload_mbuf_ptr);
+
+ return (0);
+}
+
+
+int CMiniVM::mini_vm_replace_port_ip(CMiniVMReplaceIPWithPort * cmd){
+ uint16_t l7_offset=m_pkt_info->m_pkt_indication.getFastPayloadOffset();
+ uint16_t len = m_pkt_info->m_packet->pkt_len - l7_offset - 0;
+ char * original_l7_ptr=m_pkt_info->m_packet->raw+l7_offset;
+
+ memcpy(m_pyload_mbuf_ptr, original_l7_ptr,cmd->m_start_0);
+ char *p=m_pyload_mbuf_ptr+cmd->m_start_0;
+ if (cmd->m_flags & CMiniVMCmdBase::MIN_VM_V6) {
+ p+=ipv6_to_str(&cmd->m_server_ip,p);
+ } else {
+ p+=ip_to_str(cmd->m_server_ip.v4,p);
+ }
+ /* copy until the port start offset */
+ int len1=cmd->m_start_port-cmd->m_stop_1 ;
+ memcpy(p, original_l7_ptr+cmd->m_stop_1,len1);
+ p+=len1;
+ p+=sprintf(p,"%u-%u",cmd->m_client_port,cmd->m_client_port+1);
+ memcpy(p, original_l7_ptr+cmd->m_stop_port,len-cmd->m_stop_port);
+ p+=len-cmd->m_stop_port;
+
+ // Determine new packet size
+ m_new_pkt_size= ((p+l7_offset) - m_pyload_mbuf_ptr);
+
+ return (0);
+}
+
+int CMiniVM::mini_vm_replace_ip(CMiniVMReplaceIP * cmd){
+ uint16_t l7_offset = m_pkt_info->m_pkt_indication.getFastPayloadOffset();
+ uint16_t len = m_pkt_info->m_packet->pkt_len - l7_offset;
+ char * original_l7_ptr = m_pkt_info->m_packet->raw+l7_offset;
+
+ memcpy(m_pyload_mbuf_ptr, original_l7_ptr,cmd->m_start_0);
+ char *p=m_pyload_mbuf_ptr+cmd->m_start_0;
+
+ int n_size=0;
+ if (cmd->m_flags & CMiniVMCmdBase::MIN_VM_V6) {
+ n_size=ipv6_to_str(&cmd->m_server_ip,p);
+ } else {
+ n_size=ip_to_str(cmd->m_server_ip.v4,p);
+ }
+ p+=n_size;
+ memcpy(p, original_l7_ptr+cmd->m_stop_1,len-cmd->m_stop_1);
+
+ // Determine new packet size
+ m_new_pkt_size= ((p+l7_offset+(len-cmd->m_stop_1)) - m_pyload_mbuf_ptr);
+
+ return (0);
+}
+
+
+void CFlowYamlDpPkt::Dump(FILE *fd){
+ fprintf(fd," pkt_id : %d \n",(int)m_pkt_id);
+ fprintf(fd," offset : %d \n",(int)m_pyld_offset);
+ fprintf(fd," offset : %d \n",(int)m_type);
+ fprintf(fd," len : %d \n",(int)m_len);
+ fprintf(fd," mask : 0x%x \n",(int)m_pkt_mask);
+}
+
+
+void CFlowYamlDynamicPyloadPlugin::Add(CFlowYamlDpPkt & fd){
+ if (m_num ==MAX_PYLOAD_PKT_CHANGE) {
+ fprintf (stderr,"ERROR can set only %d rules \n",MAX_PYLOAD_PKT_CHANGE);
+ exit(-1);
+ }
+ m_pkt_ids[m_num]=fd.m_pkt_id;
+ m_program[m_num]=fd;
+ m_num+=1;
+}
+
+void CFlowYamlDynamicPyloadPlugin::Dump(FILE *fd){
+ int i;
+ fprintf(fd," pkts :");
+ for (i=0; i<m_num; i++) {
+ fprintf(fd," %d ",m_pkt_ids[i]);
+ }
+ fprintf(fd,"\n");
+ for (i=0; i<m_num; i++) {
+ fprintf(fd," program : %d \n",i);
+ fprintf(fd,"---------------- \n");
+ m_program[i].Dump(fd);
+ }
+}
+
+bool is_mac_info_conf(CFlowGenList *fl_list) {
+ if (fl_list) {
+ return fl_list->is_mac_info_configured;
+ }
+ return false;
+}
+
+mac_addr_align_t * get_mac_addr_by_ip(CFlowGenList *fl_list,
+ uint32_t ip) {
+ if (fl_list &&
+ fl_list->is_mac_info_configured &&
+ fl_list->m_mac_info.count(ip)>0) {
+ return &fl_list->m_mac_info[ip];
+ }
+ return NULL;
+}
+
+
+
+uint16_t CSimplePacketParser::getPktSize(){
+ uint16_t ip_len=0;
+ if (m_ipv4) {
+ ip_len=m_ipv4->getTotalLength();
+ }
+ if (m_ipv6) {
+ ip_len=m_ipv6->getSize()+m_ipv6->getPayloadLen();
+ }
+ return ( ip_len +m_vlan_offset+14);
+}
+
+
+
+
+uint8_t CSimplePacketParser::getTTl(){
+ if (m_ipv4) {
+ return ( m_ipv4->getTimeToLive() );
+ }
+ if (m_ipv6) {
+ return ( m_ipv6->getHopLimit() );
+ }
+ return (0);
+}
+
+bool CSimplePacketParser::Parse(){
+
+ rte_mbuf_t * m=m_m;
+ uint8_t *p=rte_pktmbuf_mtod(m, uint8_t*);
+ EthernetHeader *m_ether = (EthernetHeader *)p;
+ IPHeader * ipv4=0;
+ IPv6Header * ipv6=0;
+ uint16_t pkt_size=rte_pktmbuf_pkt_len(m);
+ m_vlan_offset=0;
+ m_option_offset=0;
+
+ uint8_t protocol = 0;
+
+ // Retrieve the protocol type from the packet
+ switch( m_ether->getNextProtocol() ) {
+ case EthernetHeader::Protocol::IP :
+ // IPv4 packet
+ ipv4=(IPHeader *)(p+14);
+ protocol = ipv4->getProtocol();
+ m_option_offset = 14 + IPV4_HDR_LEN;
+ break;
+ case EthernetHeader::Protocol::IPv6 :
+ // IPv6 packet
+ ipv6=(IPv6Header *)(p+14);
+ protocol = ipv6->getNextHdr();
+ m_option_offset = 14 +IPV6_HDR_LEN;
+ break;
+ case EthernetHeader::Protocol::VLAN :
+ m_vlan_offset = 4;
+ switch ( m_ether->getVlanProtocol() ){
+ case EthernetHeader::Protocol::IP:
+ // IPv4 packet
+ ipv4=(IPHeader *)(p+18);
+ protocol = ipv4->getProtocol();
+ m_option_offset = 18+ IPV4_HDR_LEN;
+ break;
+ case EthernetHeader::Protocol::IPv6 :
+ // IPv6 packet
+ ipv6=(IPv6Header *)(p+18);
+ protocol = ipv6->getNextHdr();
+ m_option_offset = 18 + IPV6_HDR_LEN;
+ break;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+ m_protocol =protocol;
+ m_ipv4=ipv4;
+ m_ipv6=ipv6;
+
+ if ( protocol == 0 ){
+ return (false);
+ }
+ return (true);
+}
+
+
+
+
+
+
diff --git a/src/bp_sim.h b/src/bp_sim.h
new file mode 100755
index 00000000..002c9d0a
--- /dev/null
+++ b/src/bp_sim.h
@@ -0,0 +1,3990 @@
+#ifndef BP_SIM_H
+#define BP_SIM_H
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+#include <stddef.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <vector>
+#include <algorithm>
+#include <map>
+#include <string>
+#include <iostream>
+#include <fstream>
+#include <string>
+#include <queue>
+#include "mbuf.h"
+#include <common/c_common.h>
+#include <common/captureFile.h>
+#include <string>
+#include <common/Network/Packet/TcpHeader.h>
+#include <common/Network/Packet/UdpHeader.h>
+#include <common/Network/Packet/IPHeader.h>
+#include <common/Network/Packet/IPv6Header.h>
+#include <common/Network/Packet/EthernetHeader.h>
+#include <math.h>
+#include <common/bitMan.h>
+#include <yaml-cpp/yaml.h>
+#include "os_time.h"
+#include "pal_utl.h"
+#include "rx_check_header.h"
+#include "rx_check.h"
+#include "time_histogram.h"
+#include "utl_cpuu.h"
+#include "tuple_gen.h"
+#include "utl_jitter.h"
+#include "msg_manager.h"
+#include "nat_check.h"
+#include <common/cgen_map.h>
+#include <arpa/inet.h>
+#include "platform_cfg.h"
+
+#undef NAT_TRACE_
+
+
+#define FORCE_NO_INLINE __attribute__ ((noinline))
+
+#define MAX_LATENCY_PORTS 12
+
+/* IP address, last 32-bits of IPv6 remaps IPv4 */
+typedef struct {
+ uint16_t v6[6]; /* First 96-bits of IPv6 */
+ uint32_t v4; /* Last 32-bits IPv6 overloads v4 */
+} ipaddr_t;
+
+/* reserve both 0xFF and 0xFE , router will -1 FF */
+#define TTL_RESERVE_DUPLICATE 0xff
+
+/*
+ * Length of string needed to hold the largest port (16-bit) address
+ */
+#define INET_PORTSTRLEN 5
+
+
+
+
+
+/* VM commands */
+
+class CMiniVMCmdBase {
+public:
+ enum MV_FLAGS {
+ MIN_VM_V6=1 // IPv6 addressing
+ };
+ uint8_t m_cmd;
+ uint8_t m_flags;
+ uint16_t m_start_0;
+ uint16_t m_stop_1;
+ uint16_t m_add_pkt_len; /* request more length for mbuf packet the size */
+};
+
+class CMiniVMReplaceIP : public CMiniVMCmdBase {
+public:
+ ipaddr_t m_server_ip;
+};
+
+class CMiniVMReplaceIPWithPort : public CMiniVMReplaceIP {
+public:
+ uint16_t m_start_port;
+ uint16_t m_stop_port;
+ uint16_t m_client_port;
+ uint16_t m_server_port;
+};
+
+/* this command replace IP in 2 diffrent location and port
+
+c = 10.1.1.2
+o = 10.1.1.2
+m = audio 102000
+
+==>
+
+c = xx.xx.xx.xx
+o = xx.xx.xx.xx
+m = audio yyyy
+
+*/
+
+class CMiniVMReplaceIP_IP_Port : public CMiniVMCmdBase {
+public:
+ ipaddr_t m_ip;
+ uint16_t m_ip0_start;
+ uint16_t m_ip0_stop;
+
+ uint16_t m_ip1_start;
+ uint16_t m_ip1_stop;
+
+
+ uint16_t m_port;
+ uint16_t m_port_start;
+ uint16_t m_port_stop;
+};
+
+class CMiniVMReplaceIP_PORT_IP_IP_Port : public CMiniVMReplaceIP_IP_Port {
+public:
+ ipaddr_t m_ip_via;
+ uint16_t m_port_via;
+
+ uint16_t m_ip_via_start;
+ uint16_t m_ip_via_stop;
+};
+
+class CMiniVMDynPyload : public CMiniVMCmdBase {
+public:
+ void * m_ptr;
+ ipaddr_t m_ip;
+} ;
+
+/* VM with SIMD commands for RTSP we can add SIP/FTP commands too */
+
+typedef enum { VM_REPLACE_IP_OFFSET =0x12, /* fix ip at offset */
+ VM_REPLACE_IP_PORT_OFFSET, /* fix ip at offset and client port*/
+ VM_REPLACE_IP_PORT_RESPONSE_OFFSET, /* fix client port and server port */
+ VM_REPLACE_IP_IP_PORT,/* SMID command to replace IPV4 , IPV4, PORT in 3 diffrent location , see CMiniVMReplaceIP_IP_Port*/
+ VM_REPLACE_IPVIA_IP_IP_PORT,/* SMID command to replace ip,port IPV4 , IPV4, PORT in 3 diffrent location , see CMiniVMReplaceIP_PORT_IP_IP_Port*/
+ VM_DYN_PYLOAD,
+
+
+ VM_EOP /* end of program */
+ } mini_vm_op_code_t;
+
+
+/* work only on x86 littel */
+#define MY_B(b) (((int)b)&0xff)
+
+// Routine to create IPv4 address string
+inline int ip_to_str(uint32_t ip,char * str){
+ uint32_t ipv4 = PKT_HTONL(ip);
+ inet_ntop(AF_INET, (const char *)&ipv4, str, INET_ADDRSTRLEN);
+ return(strlen(str));
+}
+
+// Routine to create IPv6 address string
+inline int ipv6_to_str(ipaddr_t *ip,char * str){
+ int idx=0;
+ uint16_t ipv6[8];
+ for (uint8_t i=0; i<6; i++) {
+ ipv6[i] = PKT_HTONS(ip->v6[i]);
+ }
+ uint32_t ipv4 = PKT_HTONL(ip->v4);
+ ipv6[6] = ipv4 & 0xffff;
+ ipv6[7] = ipv4 >> 16;
+
+ str[idx++] = '[';
+ inet_ntop(AF_INET6, (const char *)&ipv6, &str[1], INET6_ADDRSTRLEN);
+ idx = strlen(str);
+ str[idx++] = ']';
+ str[idx] = 0;
+ return(idx);
+}
+
+class CFlowPktInfo ;
+
+class CMiniVM {
+
+public:
+ CMiniVM(){
+ m_new_pkt_size=0;
+ }
+
+ int mini_vm_run(CMiniVMCmdBase * cmds[]);
+ int mini_vm_replace_ip(CMiniVMReplaceIP * cmd);
+ int mini_vm_replace_port_ip(CMiniVMReplaceIPWithPort * cmd);
+ int mini_vm_replace_ports(CMiniVMReplaceIPWithPort * cmd);
+ int mini_vm_replace_ip_ip_ports(CMiniVMReplaceIP_IP_Port * cmd);
+ int mini_vm_replace_ip_via_ip_ip_ports(CMiniVMReplaceIP_PORT_IP_IP_Port * cmd);
+ int mini_vm_dyn_payload( CMiniVMDynPyload * cmd);
+
+
+private:
+ int append_with_end_of_line(uint16_t len){
+ //assert(m_new_pkt_size<=0);
+ if (m_new_pkt_size <0 ) {
+ memset(m_pyload_mbuf_ptr+len+m_new_pkt_size,0xa,(-m_new_pkt_size));
+ }
+
+ }
+public:
+ int16_t m_new_pkt_size; /* New packet size after transform by plugin */
+ CFlowPktInfo * m_pkt_info;
+ char * m_pyload_mbuf_ptr; /* pointer to the pyload pointer of new allocated packet from mbuf */
+};
+
+
+
+
+
+class CGenNode;
+class CFlowYamlInfo;
+class CFlowGenListPerThread ;
+
+
+/* callback */
+void on_node_first(uint8_t plugin_id,CGenNode * node,
+ CFlowYamlInfo * template_info,
+ CTupleTemplateGeneratorSmart * tuple_gen,
+ CFlowGenListPerThread * flow_gen
+ );
+
+void on_node_last(uint8_t plugin_id,CGenNode * node);
+
+rte_mbuf_t * on_node_generate_mbuf(uint8_t plugin_id,CGenNode * node,CFlowPktInfo * pkt_info);
+
+
+
+
+class CPreviewMode ;
+struct CGenNode;
+/* represent the virtual interface
+*/
+
+/* counters per side */
+class CVirtualIFPerSideStats {
+public:
+ CVirtualIFPerSideStats(){
+ Clear();
+ m_template.Clear();
+ }
+
+ uint64_t m_tx_pkt;
+ uint64_t m_tx_rx_check_pkt;
+ uint64_t m_tx_bytes;
+ uint64_t m_tx_drop;
+ uint64_t m_tx_queue_full;
+ uint64_t m_tx_alloc_error;
+
+ CPerTxthreadTemplateInfo m_template;
+
+public:
+
+ void Add(CVirtualIFPerSideStats * obj){
+ m_tx_pkt += obj->m_tx_pkt;
+ m_tx_rx_check_pkt +=obj->m_tx_rx_check_pkt;
+ m_tx_bytes += obj->m_tx_bytes;
+ m_tx_drop += obj->m_tx_drop;
+ m_tx_alloc_error += obj->m_tx_alloc_error;
+ m_tx_queue_full +=obj->m_tx_queue_full;
+ m_template.Add(&obj->m_template);
+ }
+
+ void Clear(){
+ m_tx_pkt=0;
+ m_tx_rx_check_pkt=0;
+ m_tx_bytes=0;
+ m_tx_drop=0;
+ m_tx_alloc_error=0;
+ m_tx_queue_full=0;
+ m_template.Clear();
+ }
+
+ inline void Dump(FILE *fd);
+
+};
+
+
+void CVirtualIFPerSideStats::Dump(FILE *fd){
+
+ #define DP_B(f) if (f) printf(" %-40s : %llu \n",#f,f)
+ DP_B(m_tx_pkt);
+ DP_B(m_tx_rx_check_pkt);
+ DP_B(m_tx_bytes);
+ DP_B(m_tx_drop);
+ DP_B(m_tx_alloc_error);
+ DP_B(m_tx_queue_full);
+ m_template.Dump(fd);
+}
+
+
+
+
+
+
+class CVirtualIF {
+public:
+
+
+ CVirtualIF (){
+ m_preview_mode =NULL;
+ }
+public:
+
+ virtual int open_file(std::string file_name)=0;
+
+ virtual int close_file(void)=0;
+
+
+ /**
+ * send one packet
+ *
+ * @param node
+ *
+ * @return
+ */
+ virtual int send_node(CGenNode * node) =0;
+
+
+ /**
+ * send one packet to a specific dir. flush all packets
+ *
+ * @param dir
+ * @param m
+ */
+ virtual void send_one_pkt(pkt_dir_t dir, rte_mbuf_t *m){
+ }
+
+
+ /**
+ * flush all pending packets into the stream
+ *
+ * @return
+ */
+ virtual int flush_tx_queue(void)=0;
+
+public:
+
+
+ void set_review_mode(CPreviewMode * preview_mode){
+ m_preview_mode =preview_mode;
+ }
+
+protected :
+ CPreviewMode * m_preview_mode;
+
+public:
+ CVirtualIFPerSideStats m_stats[CS_NUM];
+};
+
+
+
+/* global info */
+
+#define CONST_NB_MBUF 16380
+
+
+#define MAX_BUF_SIZE (2048)
+#define CONST_MBUF_SIZE (MAX_BUF_SIZE + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+
+/* this is the first small part of the packet that we manipulate */
+#define FIRST_PKT_SIZE 64
+#define CONST_SMALL_MBUF_SIZE (FIRST_PKT_SIZE + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+
+
+#define _128_MBUF_SIZE 128
+#define _256_MBUF_SIZE 256
+#define _512_MBUF_SIZE 512
+#define _1024_MBUF_SIZE 1024
+
+
+
+#define CONST_128_MBUF_SIZE (128 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define CONST_256_MBUF_SIZE (256 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define CONST_512_MBUF_SIZE (512 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define CONST_1024_MBUF_SIZE (1024 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+
+
+class CPreviewMode {
+public:
+ CPreviewMode(){
+ clean();
+ }
+ void clean(){
+ m_flags = 0;
+ m_flags1=0;
+ setCores(1);
+ set_zmq_publish_enable(true);
+ }
+
+ void setFileWrite(bool enable){
+ btSetMaskBit32(m_flags,0,0,enable?1:0);
+ }
+
+ bool getFileWrite(){
+ return (btGetMaskBit32(m_flags,0,0) ? true:false);
+ }
+
+ void setDisableMbufCache(bool enable){
+ btSetMaskBit32(m_flags,2,2,enable?1:0);
+ }
+
+ bool isMbufCacheDisabled(){
+ return (btGetMaskBit32(m_flags,2,2) ? true:false);
+ }
+
+ void set_disable_flow_control_setting(bool enable){
+ btSetMaskBit32(m_flags,4,4,enable?1:0);
+ }
+
+ bool get_is_disable_flow_control_setting(){
+ return (btGetMaskBit32(m_flags,4,4) ? true:false);
+ }
+
+
+ /* learn & verify mode */
+ void set_lean_and_verify_mode_enable(bool enable){
+ btSetMaskBit32(m_flags,5,5,enable?1:0);
+ }
+
+ bool get_learn_and_verify_mode_enable(){
+ return (btGetMaskBit32(m_flags,5,5) ? true:false);
+ }
+
+
+ /* learn mode */
+ void set_lean_mode_enable(bool enable){
+ btSetMaskBit32(m_flags,6,6,enable?1:0);
+ }
+
+ bool get_learn_mode_enable(){
+ return (btGetMaskBit32(m_flags,6,6) ? true:false);
+ }
+
+
+ /* IPv6 enable/disable */
+ void set_ipv6_mode_enable(bool enable){
+ btSetMaskBit32(m_flags,7,7,enable?1:0);
+ }
+
+ bool get_ipv6_mode_enable(){
+ return (btGetMaskBit32(m_flags,7,7) ? true:false);
+ }
+
+ void setVMode(uint8_t vmode){
+ btSetMaskBit32(m_flags,10,8,vmode);
+ }
+ uint8_t getVMode(){
+ return (btGetMaskBit32(m_flags,10,8) );
+ }
+
+
+ void setRealTime(bool enable){
+ btSetMaskBit32(m_flags,11,11,enable?1:0);
+ }
+
+ bool getRealTime(){
+ return (btGetMaskBit32(m_flags,11,11) ? true:false);
+ }
+
+ void setClientServerFlip(bool enable){
+ btSetMaskBit32(m_flags,12,12,enable?1:0);
+ }
+
+ bool getClientServerFlip(){
+ return (btGetMaskBit32(m_flags,12,12) ? true:false);
+ }
+
+ void setSingleCore(bool enable){
+ btSetMaskBit32(m_flags,13,13,enable?1:0);
+ }
+
+ bool getSingleCore(){
+ return (btGetMaskBit32(m_flags,13,13) ? true:false);
+ }
+
+ /* -p */
+ void setClientServerFlowFlip(bool enable){
+ btSetMaskBit32(m_flags,14,14,enable?1:0);
+ }
+
+ bool getClientServerFlowFlip(){
+ return (btGetMaskBit32(m_flags,14,14) ? true:false);
+ }
+
+
+
+ void setNoCleanFlowClose(bool enable){
+ btSetMaskBit32(m_flags,15,15,enable?1:0);
+ }
+
+ bool getNoCleanFlowClose(){
+ return (btGetMaskBit32(m_flags,15,15) ? true:false);
+ }
+
+ void setCores(uint8_t cores){
+ btSetMaskBit32(m_flags,24,16,cores);
+ }
+
+ uint8_t getCores(){
+ return (btGetMaskBit32(m_flags,24,16) );
+ }
+
+ bool getIsOneCore(){
+ return (getCores()==1?true:false);
+ }
+
+ void setOnlyLatency(bool enable){
+ btSetMaskBit32(m_flags,25,25,enable?1:0);
+ }
+
+ bool getOnlyLatency(){
+ return (btGetMaskBit32(m_flags,25,25) ? true:false);
+ }
+
+
+ void set_1g_mode(bool enable){
+ btSetMaskBit32(m_flags,26,26,enable?1:0);
+ }
+
+ bool get_1g_mode(){
+ return (btGetMaskBit32(m_flags,26,26) ? true:false);
+ }
+
+
+ void set_zmq_publish_enable(bool enable){
+ btSetMaskBit32(m_flags,27,27,enable?1:0);
+ }
+
+ bool get_zmq_publish_enable(){
+ return (btGetMaskBit32(m_flags,27,27) ? true:false);
+ }
+
+ void set_pcap_mode_enable(bool enable){
+ btSetMaskBit32(m_flags,28,28,enable?1:0);
+ }
+
+ bool get_pcap_mode_enable(){
+ return (btGetMaskBit32(m_flags,28,28) ? true:false);
+ }
+
+ /* VLAN enable/disable */
+ bool get_vlan_mode_enable(){
+ return (btGetMaskBit32(m_flags,29,29) ? true:false);
+ }
+
+ void set_vlan_mode_enable(bool enable){
+ btSetMaskBit32(m_flags,29,29,enable?1:0);
+ }
+
+ bool get_mac_ip_overide_enable(){
+ return (btGetMaskBit32(m_flags,30,30) ? true:false);
+ }
+
+ void set_mac_ip_overide_enable(bool enable){
+ btSetMaskBit32(m_flags,30,30,enable?1:0);
+ if (enable) {
+ set_mac_ip_features_enable(enable);
+ }
+ }
+
+ bool get_is_rx_check_enable(){
+ return (btGetMaskBit32(m_flags,31,31) ? true:false);
+ }
+
+ void set_rx_check_enable(bool enable){
+ btSetMaskBit32(m_flags,31,31,enable?1:0);
+ }
+
+
+ bool get_mac_ip_features_enable(){
+ return (btGetMaskBit32(m_flags1,0,0) ? true:false);
+ }
+
+ void set_mac_ip_features_enable(bool enable){
+ btSetMaskBit32(m_flags1,0,0,enable?1:0);
+ }
+
+ bool get_mac_ip_mapping_enable(){
+ return (btGetMaskBit32(m_flags1,1,1) ? true:false);
+ }
+
+ void set_mac_ip_mapping_enable(bool enable){
+ btSetMaskBit32(m_flags1,1,1,enable?1:0);
+ if (enable) {
+ set_mac_ip_features_enable(enable);
+ }
+ }
+
+ bool get_vm_one_queue_enable(){
+ return (btGetMaskBit32(m_flags1,2,2) ? true:false);
+ }
+
+ void set_no_keyboard(bool enable){
+ btSetMaskBit32(m_flags1,5,5,enable?1:0);
+ }
+
+ bool get_no_keyboard(){
+ return (btGetMaskBit32(m_flags1,5,5) ? true:false);
+ }
+
+ void set_vm_one_queue_enable(bool enable){
+ btSetMaskBit32(m_flags1,2,2,enable?1:0);
+ }
+
+ /* -e */
+ void setClientServerFlowFlipAddr(bool enable){
+ btSetMaskBit32(m_flags1,3,3,enable?1:0);
+ }
+
+ bool getClientServerFlowFlipAddr(){
+ return (btGetMaskBit32(m_flags1,3,3) ? true:false);
+ }
+
+
+ /* split mac is enabled */
+ void setDestMacSplit(bool enable){
+ btSetMaskBit32(m_flags1,4,4,enable?1:0);
+ }
+
+ bool getDestMacSplit(){
+ return (btGetMaskBit32(m_flags1,4,4) ? true:false);
+ }
+
+
+
+
+public:
+ void Dump(FILE *fd);
+
+private:
+ uint32_t m_flags;
+ uint32_t m_flags1;
+
+
+};
+
+
+
+typedef struct mac_align_t_ {
+ uint8_t dest[6];
+ uint8_t src[6];
+ uint8_t pad[4];
+} mac_align_t ;
+
+struct CMacAddrCfg {
+public:
+ CMacAddrCfg (){
+ memset(u.m_data,0,sizeof(u.m_data));
+ u.m_mac.dest[3]=1;
+ u.m_mac.src[3]=1;
+ }
+ union {
+ mac_align_t m_mac;
+ uint8_t m_data[16];
+ } u;
+} __rte_cache_aligned; ;
+
+struct CParserOption {
+
+public:
+ /* Runtime flags */
+ enum {
+ RUN_FLAGS_RXCHECK_CONST_TS =1,
+ };
+
+public:
+ CParserOption(){
+ m_factor=1.0;
+ m_duration=0.0;
+ m_latency_rate =0;
+ m_latency_mask =0xffffffff;
+ m_latency_prev=0;
+ m_zmq_port=4500;
+ m_telnet_port =4501;
+ m_platform_factor=1.0;
+ m_expected_portd = 4; /* should be at least the number of ports found in the system but could be less */
+ m_vlan_port[0]=100;
+ m_vlan_port[1]=100;
+ m_rx_check_sampe=0;
+ m_rx_check_hops = 0;
+ m_io_mode=1;
+ m_run_flags=0;
+ prefix="";
+ m_mac_splitter=0;
+ }
+
+ CPreviewMode preview;
+ float m_factor;
+ float m_duration;
+ float m_platform_factor;
+ uint16_t m_vlan_port[2]; /* vlan value */
+ uint16_t m_src_ipv6[6]; /* Most signficant 96-bits */
+ uint16_t m_dst_ipv6[6]; /* Most signficant 96-bits */
+ uint16_t m_tcp_aging;
+ uint16_t m_udp_aging;
+
+ uint32_t m_latency_rate; /* pkt/sec for each thread/port zero disable */
+ uint32_t m_latency_mask;
+ uint32_t m_latency_prev;
+ uint16_t m_rx_check_sampe; /* the sample rate of flows */
+ uint16_t m_rx_check_hops;
+ uint16_t m_zmq_port;
+ uint16_t m_telnet_port;
+ uint16_t m_expected_portd;
+ uint16_t m_io_mode; //0,1,2 0 disable, 1- normal , 2 - short
+ uint16_t m_run_flags;
+ uint8_t m_mac_splitter;
+ uint8_t m_pad;
+
+
+ std::string cfg_file;
+ std::string mac_file;
+ std::string platform_cfg_file;
+
+ std::string out_file;
+ std::string prefix;
+
+
+ CMacAddrCfg m_mac_addr[MAX_LATENCY_PORTS];
+
+ uint8_t * get_src_mac_addr(int if_index){
+ return (m_mac_addr[if_index].u.m_mac.src);
+ }
+ uint8_t * get_dst_src_mac_addr(int if_index){
+ return (m_mac_addr[if_index].u.m_mac.dest);
+ }
+
+public:
+ uint32_t get_expected_ports(){
+ return (m_expected_portd);
+ }
+
+ /* how many dual ports supported */
+ uint32_t get_expected_dual_ports(void){
+ return (m_expected_portd>>1);
+ }
+
+ uint32_t get_number_of_dp_cores_needed() {
+ return ( (m_expected_portd>>1) * preview.getCores());
+ }
+ bool is_latency_disabled(){
+ return ( m_latency_rate == 0 ?true:false);
+ }
+
+ bool is_latency_enabled(){
+ return ( !is_latency_disabled() );
+ }
+ inline void set_rxcheck_const_ts(){
+ m_run_flags |= RUN_FLAGS_RXCHECK_CONST_TS;
+ }
+ inline void clear_rxcheck_const_ts(){
+ m_run_flags &=~ RUN_FLAGS_RXCHECK_CONST_TS;
+ }
+
+ inline bool is_rxcheck_const_ts(){
+ return ( (m_run_flags &RUN_FLAGS_RXCHECK_CONST_TS)?true:false );
+ }
+
+ void dump(FILE *fd);
+};
+
+
+class CGlobalMemory {
+
+public:
+ CGlobalMemory(){
+ CPlatformMemoryYamlInfo info;
+ set(info,1.0);
+ m_num_cores=1;
+ }
+ void set(const CPlatformMemoryYamlInfo &info,float mul);
+
+ uint32_t get_2k_num_blocks(){
+ return ( m_mbuf[MBUF_2048]);
+ }
+
+ uint32_t get_each_core_dp_flows(){
+ return ( m_mbuf[MBUF_DP_FLOWS]/m_num_cores );
+ }
+ void set_number_of_dp_cors(uint32_t cores){
+ m_num_cores = cores;
+ }
+
+ void Dump(FILE *fd);
+
+public:
+ uint32_t m_mbuf[MBUF_SIZE]; // relative to traffic norm to 2x10G ports
+ uint32_t m_num_cores;
+
+};
+
+typedef uint8_t socket_id_t;
+typedef uint8_t port_id_t;
+/* the real phsical thread id */
+typedef uint8_t physical_thread_id_t;
+
+
+typedef uint8_t virtual_thread_id_t;
+/*
+
+ virtual thread 0 (v0)- is always the master
+
+for 2 dual ports ( 2x2 =4 ports) the virtual thread looks like that
+-----------------
+DEFAULT:
+-----------------
+ (0,1) (2,3)
+ dual-if0 dual-if-1
+ v1 v2
+ v3 v4
+ v5 v6
+ v7 v8
+
+ latency is v9
+
+ */
+
+#define MAX_SOCKETS_SUPPORTED (4)
+#define MAX_THREADS_SUPPORTED (120)
+
+
+class CPlatformSocketInfoBase {
+
+
+public:
+ /* sockets API */
+
+ /* is socket enabled */
+ virtual bool is_sockets_enable(socket_id_t socket)=0;
+
+ /* number of main active sockets. socket #0 is always used */
+ virtual socket_id_t max_num_active_sockets()=0;
+
+public:
+ /* which socket to allocate memory to each port */
+ virtual socket_id_t port_to_socket(port_id_t port)=0;
+
+public:
+ /* this is from CLI, number of thread per dual port */
+ virtual void set_number_of_threads_per_ports(uint8_t num_threads)=0;
+ virtual void set_latency_thread_is_enabled(bool enable)=0;
+ virtual void set_number_of_dual_ports(uint8_t num_dual_ports)=0;
+
+
+ virtual bool sanity_check()=0;
+
+ /* return the core mask */
+ virtual uint64_t get_cores_mask()=0;
+
+ /* virtual thread_id is always from 1..number of threads virtual */
+ virtual virtual_thread_id_t thread_phy_to_virt(physical_thread_id_t phy_id)=0;
+
+ /* return the map betwean virtual to phy id */
+ virtual physical_thread_id_t thread_virt_to_phy(virtual_thread_id_t virt_id)=0;
+
+ virtual bool thread_phy_is_master(physical_thread_id_t phy_id)=0;
+ virtual bool thread_phy_is_latency(physical_thread_id_t phy_id)=0;
+
+ virtual void dump(FILE *fd)=0;
+};
+
+class CPlatformSocketInfoNoConfig : public CPlatformSocketInfoBase {
+
+public:
+ CPlatformSocketInfoNoConfig(){
+ m_dual_if=0;
+ m_threads_per_dual_if=0;
+ m_latency_is_enabled=false;
+ }
+
+ /* is socket enabled */
+ bool is_sockets_enable(socket_id_t socket);
+
+ /* number of main active sockets. socket #0 is always used */
+ socket_id_t max_num_active_sockets();
+
+public:
+ /* which socket to allocate memory to each port */
+ socket_id_t port_to_socket(port_id_t port);
+
+public:
+ /* this is from CLI, number of thread per dual port */
+ void set_number_of_threads_per_ports(uint8_t num_threads);
+ void set_latency_thread_is_enabled(bool enable);
+ void set_number_of_dual_ports(uint8_t num_dual_ports);
+
+ bool sanity_check();
+
+ /* return the core mask */
+ uint64_t get_cores_mask();
+
+ /* virtual thread_id is always from 1..number of threads virtual */
+ virtual_thread_id_t thread_phy_to_virt(physical_thread_id_t phy_id);
+
+ /* return the map betwean virtual to phy id */
+ physical_thread_id_t thread_virt_to_phy(virtual_thread_id_t virt_id);
+
+ bool thread_phy_is_master(physical_thread_id_t phy_id);
+ bool thread_phy_is_latency(physical_thread_id_t phy_id);
+
+ virtual void dump(FILE *fd);
+
+private:
+ uint32_t m_dual_if;
+ uint32_t m_threads_per_dual_if;
+ bool m_latency_is_enabled;
+};
+
+
+
+/* there is a configuration file */
+class CPlatformSocketInfoConfig : public CPlatformSocketInfoBase {
+public:
+ bool Create(CPlatformCoresYamlInfo * platform);
+ void Delete();
+
+ /* is socket enabled */
+ bool is_sockets_enable(socket_id_t socket);
+
+ /* number of main active sockets. socket #0 is always used */
+ socket_id_t max_num_active_sockets();
+
+public:
+ /* which socket to allocate memory to each port */
+ socket_id_t port_to_socket(port_id_t port);
+
+public:
+ /* this is from CLI, number of thread per dual port */
+ void set_number_of_threads_per_ports(uint8_t num_threads);
+ void set_latency_thread_is_enabled(bool enable);
+ void set_number_of_dual_ports(uint8_t num_dual_ports);
+
+ bool sanity_check();
+
+ /* return the core mask */
+ uint64_t get_cores_mask();
+
+ /* virtual thread_id is always from 1..number of threads virtual */
+ virtual_thread_id_t thread_phy_to_virt(physical_thread_id_t phy_id);
+
+ /* return the map betwean virtual to phy id */
+ physical_thread_id_t thread_virt_to_phy(virtual_thread_id_t virt_id);
+
+ bool thread_phy_is_master(physical_thread_id_t phy_id);
+ bool thread_phy_is_latency(physical_thread_id_t phy_id);
+
+public:
+ virtual void dump(FILE *fd);
+private:
+ void reset();
+ bool init();
+
+private:
+ bool m_sockets_enable[MAX_SOCKETS_SUPPORTED];
+ uint32_t m_sockets_enabled;
+ socket_id_t m_socket_per_dual_if[(MAX_LATENCY_PORTS>>1)];
+
+ uint32_t m_max_threads_per_dual_if;
+
+ uint32_t m_num_dual_if;
+ uint32_t m_threads_per_dual_if;
+ bool m_latency_is_enabled;
+ uint8_t m_thread_virt_to_phy[MAX_THREADS_SUPPORTED];
+ uint8_t m_thread_phy_to_virtual[MAX_THREADS_SUPPORTED];
+
+ CPlatformCoresYamlInfo * m_platform;
+};
+
+
+
+class CPlatformSocketInfo {
+
+public:
+ bool Create(CPlatformCoresYamlInfo * platform);
+ void Delete();
+
+public:
+ /* sockets API */
+
+ /* is socket enabled */
+ bool is_sockets_enable(socket_id_t socket);
+
+ /* number of main active sockets. socket #0 is always used */
+ socket_id_t max_num_active_sockets();
+
+public:
+ /* which socket to allocate memory to each port */
+ socket_id_t port_to_socket(port_id_t port);
+
+public:
+ /* this is from CLI, number of thread per dual port */
+ void set_number_of_threads_per_ports(uint8_t num_threads);
+ void set_latency_thread_is_enabled(bool enable);
+ void set_number_of_dual_ports(uint8_t num_dual_ports);
+
+
+ bool sanity_check();
+
+ /* return the core mask */
+ uint64_t get_cores_mask();
+
+ /* virtual thread_id is always from 1..number of threads virtual */
+ virtual_thread_id_t thread_phy_to_virt(physical_thread_id_t phy_id);
+
+ /* return the map betwean virtual to phy id */
+ physical_thread_id_t thread_virt_to_phy(virtual_thread_id_t virt_id);
+
+ bool thread_phy_is_master(physical_thread_id_t phy_id);
+ bool thread_phy_is_latency(physical_thread_id_t phy_id);
+
+ void dump(FILE *fd);
+
+
+private:
+ CPlatformSocketInfoBase * m_obj;
+ CPlatformCoresYamlInfo * m_platform;
+};
+
+class CRteMemPool {
+
+public:
+ inline rte_mbuf_t * _rte_pktmbuf_alloc(rte_mempool_t * mp ){
+ rte_mbuf_t * m=rte_pktmbuf_alloc(mp);
+ if ( likely(m>0) ) {
+ return (m);
+ }
+ dump_in_case_of_error(stderr);
+ assert(0);
+ }
+
+ inline rte_mbuf_t * pktmbuf_alloc(uint16_t size){
+
+ rte_mbuf_t * m;
+ if ( size < _128_MBUF_SIZE) {
+ m = _rte_pktmbuf_alloc(m_mbuf_pool_128);
+ }else if ( size < _256_MBUF_SIZE) {
+ m = _rte_pktmbuf_alloc(m_mbuf_pool_256);
+ }else if (size < _512_MBUF_SIZE) {
+ m = _rte_pktmbuf_alloc(m_mbuf_pool_512);
+ }else if (size < _1024_MBUF_SIZE) {
+ m = _rte_pktmbuf_alloc(m_mbuf_pool_1024);
+ }else{
+ assert(size<MAX_BUF_SIZE);
+ m = _rte_pktmbuf_alloc(m_big_mbuf_pool);
+ }
+ return (m);
+ }
+
+ inline rte_mbuf_t * pktmbuf_alloc_small(){
+ return ( _rte_pktmbuf_alloc(m_small_mbuf_pool) );
+ }
+
+ inline rte_mbuf_t * pktmbuf_alloc_big(){
+ return ( _rte_pktmbuf_alloc(m_big_mbuf_pool) );
+ }
+
+ void dump(FILE *fd);
+
+ void dump_in_case_of_error(FILE *fd);
+
+public:
+ rte_mempool_t * m_big_mbuf_pool; /* pool for const packets */
+ rte_mempool_t * m_small_mbuf_pool; /* pool for start packets */
+ rte_mempool_t * m_mbuf_pool_128;
+ rte_mempool_t * m_mbuf_pool_256;
+ rte_mempool_t * m_mbuf_pool_512;
+ rte_mempool_t * m_mbuf_pool_1024;
+ rte_mempool_t * m_mbuf_global_nodes;
+ uint32_t m_pool_id;
+};
+
+
+
+
+class CGlobalInfo {
+public:
+ static void init_pools(uint32_t rx_buffers);
+
+ static inline rte_mbuf_t * pktmbuf_alloc_small(socket_id_t socket){
+ return ( m_mem_pool[socket].pktmbuf_alloc_small() );
+ }
+
+ static inline rte_mbuf_t * pktmbuf_alloc_big(socket_id_t socket){
+ return ( m_mem_pool[socket].pktmbuf_alloc_big() );
+ }
+
+
+
+ /**
+ * try to allocate small buffers too
+ * _alloc allocate big buffers only
+ *
+ * @param socket
+ * @param size
+ *
+ * @return
+ */
+ static inline rte_mbuf_t * pktmbuf_alloc(socket_id_t socket,uint16_t size){
+ if (size<FIRST_PKT_SIZE) {
+ return ( pktmbuf_alloc_small(socket));
+ }
+ return (m_mem_pool[socket].pktmbuf_alloc(size));
+ }
+
+
+ static inline bool is_learn_verify_mode(){
+ return ( m_options.preview.get_learn_mode_enable() && m_options.preview.get_learn_and_verify_mode_enable());
+ }
+
+ static inline bool is_learn_mode(){
+ return ( m_options.preview.get_learn_mode_enable() );
+ }
+
+ static inline bool is_ipv6_enable(void){
+ return ( m_options.preview.get_ipv6_mode_enable() );
+ }
+
+ static inline bool is_realtime(void){
+ //return (false);
+ return ( m_options.preview.getRealTime() );
+ }
+
+ static inline void set_realtime(bool enable){
+ m_options.preview.setRealTime(enable);
+ }
+
+ static uint32_t get_node_pool_size(){
+ return (m_nodes_pool_size);
+ }
+
+ static inline CGenNode * create_node(void){
+ CGenNode * res;
+ if ( unlikely (rte_mempool_get(m_mem_pool[0].m_mbuf_global_nodes, (void **)&res) <0) ){
+ rte_exit(EXIT_FAILURE, "can't allocate m_mbuf_global_nodes objects try to tune the configuration file \n");
+ return (0);
+ }
+ return (res);
+ }
+
+ static inline void free_node(CGenNode *p){
+ rte_mempool_put(m_mem_pool[0].m_mbuf_global_nodes, p);
+ }
+
+
+public:
+ static CRteMemPool m_mem_pool[MAX_SOCKETS_SUPPORTED];
+
+ static uint32_t m_nodes_pool_size;
+ static CParserOption m_options;
+ static CGlobalMemory m_memory_cfg;
+ static CPlatformSocketInfo m_socket;
+};
+
+
+static inline int get_is_rx_check_mode(){
+ return (CGlobalInfo::m_options.preview.get_is_rx_check_enable() ?1:0);
+}
+
+static inline bool get_is_rx_filter_enable(){
+ return ( ( get_is_rx_check_mode() || CGlobalInfo::is_learn_mode()) ?true:false );
+}
+static inline uint16_t get_rx_check_hops() {
+ return (CGlobalInfo::m_options.m_rx_check_hops);
+}
+
+#define MAX_PYLOAD_PKT_CHANGE 4
+/* info for the dynamic plugin */
+
+
+struct CFlowYamlDpPkt {
+ CFlowYamlDpPkt(){
+ m_pkt_id=0xff;
+ m_pyld_offset=0;
+ m_type=0;
+ m_len=0;
+ m_pkt_mask=0xffffffff;
+ }
+
+ uint8_t m_pkt_id; /* number of packet */
+ uint8_t m_pyld_offset; /* 0-10 */
+ uint8_t m_type; /* 0 -random , 1 - inc */
+ uint8_t m_len; /* number of 32bit data 1,2,3,*/
+
+ uint32_t m_pkt_mask; /* 0xffffffff take all the packet */
+public:
+ void Dump(FILE *fd);
+};
+
+struct CFlowYamlDynamicPyloadPlugin {
+
+ CFlowYamlDynamicPyloadPlugin(){
+ m_num=0;
+ int i;
+ for (i=0;i<MAX_PYLOAD_PKT_CHANGE;i++ ) {
+ m_pkt_ids[i]=0xff;
+ }
+ }
+
+ uint8_t m_num;/* number of pkts_id*/
+ uint8_t m_pkt_ids[MAX_PYLOAD_PKT_CHANGE]; /* -1 for not valid - fast mask */
+ CFlowYamlDpPkt m_program[MAX_PYLOAD_PKT_CHANGE];
+public:
+ void Add(CFlowYamlDpPkt & fd);
+ void Dump(FILE *fd);
+};
+
+struct CVlanYamlInfo {
+ CVlanYamlInfo(){
+ m_enable=0;
+ m_vlan_per_port[0]=100;
+ m_vlan_per_port[1]=200;
+ }
+ bool m_enable;
+ uint16_t m_vlan_per_port[2];
+
+public:
+ void Dump(FILE *fd);
+
+};
+
+
+
+struct CFlowYamlInfo {
+ CFlowYamlInfo(){
+ m_dpPkt=0;
+ m_server_addr=0;
+ }
+
+ std::string m_name;
+ double m_k_cps; //k CPS
+ double m_restart_time; /* restart time of this template */
+ dsec_t m_ipg_sec; // ipg in sec
+ dsec_t m_rtt_sec; // rtt in sec
+ uint32_t m_w;
+ uint32_t m_wlength;
+ uint32_t m_limit;
+ uint32_t m_flowcnt;
+ uint8_t m_plugin_id; /* 0 - default , 1 - RTSP160 , 2- RTSP250 */
+ bool m_one_app_server;
+ uint32_t m_server_addr;
+ bool m_one_app_server_was_set;
+ bool m_cap_mode;
+ bool m_cap_mode_was_set;
+ bool m_wlength_set;
+ bool m_limit_was_set;
+ CFlowYamlDynamicPyloadPlugin * m_dpPkt; /* plugin */
+
+public:
+ void Dump(FILE *fd);
+};
+
+
+
+
+#define _1MB_DOUBLE ((double)(1024.0*1024.0))
+#define _1GB_DOUBLE ((double)(1024.0*1024.0*1024.0))
+
+#define _1Mb_DOUBLE ((double)(1000.0*1000.0))
+
+
+#define _1MB ((1024*1024)ULL)
+#define _1GB 1000000000ULL
+#define _500GB (_1GB*500)
+
+
+
+#define DP(f) if (f) printf(" %-40s: %llu \n",#f,f)
+#define DP_name(n,f) if (f) printf(" %-40s: %llu \n",n,f)
+
+#define DP_S(f,f_s) if (f) printf(" %-40s: %s \n",#f,f_s.c_str())
+
+class CFlowPktInfo;
+
+
+
+typedef enum {
+ KBYE_1024,
+ KBYE_1000
+} human_kbyte_t;
+
+std::string double_to_human_str(double num,
+ std::string units,
+ human_kbyte_t etype);
+
+
+
+class CCapFileFlowInfo ;
+
+#define SYNC_TIME_OUT ( 1.0/1000)
+/* this is a simple struct, do not add constructor and destractor here!
+ we are optimizing the allocation dealocation !!!
+ */
+struct CGenNode {
+public:
+
+ enum {
+ FLOW_PKT=0,
+ FLOW_FIF=1,
+ FLOW_DEFER_PORT_RELEASE=2,
+ FLOW_PKT_NAT=3,
+ FLOW_SYNC=4 /* called evey 1 msec */
+
+ };
+
+ /* flags MASKS*/
+ enum {
+ NODE_FLAGS_DIR =1,
+ NODE_FLAGS_MBUF_CACHE =2,
+ NODE_FLAGS_SAMPLE_RX_CHECK =4,
+
+ NODE_FLAGS_LEARN_MODE =8, /* bits 3,4 MASK 0x18 wait for second direction packet */
+ NODE_FLAGS_LEARN_MSG_PROCESSED =0x10, /* got NAT msg */
+
+ NODE_FLAGS_LATENCY =0x20, /* got NAT msg */
+ NODE_FLAGS_INIT_START_FROM_SERVER_SIDE = 0x40,
+ NODE_FLAGS_ALL_FLOW_SAME_PORT_SIDE = 0x80,
+ NODE_FLAGS_INIT_START_FROM_SERVER_SIDE_SERVER_ADDR = 0x100 /* init packet start from server side with server addr */
+ };
+
+public:
+ /* C1 */
+ uint8_t m_type;
+ uint8_t m_thread_id; /* zero base */
+ uint8_t m_socket_id;
+ uint8_t m_pad2;
+
+ uint16_t m_src_port;
+ uint16_t m_flags; /* BIT 0 - DIR ,
+ BIT 1 - mbug_cache
+ BIT 2 - SAMPLE DUPLICATE */
+
+ double m_time;
+
+ uint32_t m_src_ip; /* client ip */
+ uint32_t m_dest_ip; /* server ip */
+
+ uint64_t m_flow_id; /* id that goes up for each flow */
+
+ /*c2*/
+ CFlowPktInfo * m_pkt_info;
+
+ CCapFileFlowInfo * m_flow_info;
+ CFlowYamlInfo * m_template_info;
+
+ void * m_plugin_info;
+
+//private:
+
+ // cache line 1 - 64bytes waste of space !
+ uint32_t m_nat_external_ipv4; /* client */
+ uint32_t m_nat_external_ipv4_server;
+ uint16_t m_nat_external_port;
+
+ uint16_t m_nat_pad;
+ mac_addr_align_t m_src_mac;
+ uint32_t m_end_of_cache_line[11];
+
+public:
+ bool operator <(const CGenNode * rsh ) const {
+ return (m_time<rsh->m_time);
+ }
+ bool operator ==(const CGenNode * rsh ) const {
+ return (m_time==rsh->m_time);
+ }
+ bool operator >(const CGenNode * rsh ) const {
+ return (m_time>rsh->m_time);
+ }
+public:
+ void Dump(FILE *fd);
+
+ void set_socket_id(socket_id_t socket){
+ m_socket_id=socket;
+ }
+
+ socket_id_t get_socket_id(){
+ return ( m_socket_id );
+ }
+
+
+ static void DumpHeader(FILE *fd);
+ inline bool is_last_in_flow();
+ inline uint16_t get_template_id();
+ inline bool is_repeat_flow();
+ inline bool can_cache_mbuf(void);
+
+ /* is it possible to cache MBUF */
+
+ inline void update_next_pkt_in_flow(void);
+ inline void reset_pkt_in_flow(void);
+ inline uint8_t get_plugin_id(void){
+ return ( m_template_info->m_plugin_id);
+ }
+
+ inline bool is_responder_pkt();
+ inline bool is_initiator_pkt();
+
+
+ inline bool is_eligible_from_server_side(){
+ return ( (m_src_ip&1==1)?true:false);
+ }
+
+
+ inline void set_initiator_start_from_server_side_with_server_addr(bool enable){
+ if (enable) {
+ m_flags |= NODE_FLAGS_INIT_START_FROM_SERVER_SIDE_SERVER_ADDR;
+ }else{
+ m_flags &=~ NODE_FLAGS_INIT_START_FROM_SERVER_SIDE_SERVER_ADDR;
+ }
+ }
+
+ inline bool get_is_initiator_start_from_server_with_server_addr(){
+ return ( (m_flags &NODE_FLAGS_INIT_START_FROM_SERVER_SIDE_SERVER_ADDR)?true:false );
+ }
+
+ inline void set_initiator_start_from_server(bool enable){
+ if (enable) {
+ m_flags |= NODE_FLAGS_INIT_START_FROM_SERVER_SIDE;
+ }else{
+ m_flags &=~ NODE_FLAGS_INIT_START_FROM_SERVER_SIDE;
+ }
+ }
+ inline bool get_is_initiator_start_from_server(){
+ return ( (m_flags &NODE_FLAGS_INIT_START_FROM_SERVER_SIDE)?true:false );
+ }
+
+ inline void set_all_flow_from_same_dir(bool enable){
+ if (enable) {
+ m_flags |= NODE_FLAGS_ALL_FLOW_SAME_PORT_SIDE;
+ }else{
+ m_flags &=~ NODE_FLAGS_ALL_FLOW_SAME_PORT_SIDE;
+ }
+ }
+
+ inline bool get_is_all_flow_from_same_dir(void){
+ return ( (m_flags &NODE_FLAGS_ALL_FLOW_SAME_PORT_SIDE)?true:false );
+ }
+
+
+ /* direction for ip addr */
+ inline pkt_dir_t cur_pkt_ip_addr_dir();
+ /* direction for TCP/UDP port */
+ inline pkt_dir_t cur_pkt_port_addr_dir();
+ /* from which interface dir to get out */
+ inline pkt_dir_t cur_interface_dir();
+
+
+ inline void set_mbuf_cache_dir(pkt_dir_t dir){
+ if (dir) {
+ m_flags |=NODE_FLAGS_DIR;
+ }else{
+ m_flags &=~NODE_FLAGS_DIR;
+ }
+ }
+
+ inline pkt_dir_t get_mbuf_cache_dir(){
+ return ((pkt_dir_t)( m_flags &1));
+ }
+
+ inline void set_cache_mbuf(rte_mbuf_t * m){
+ m_plugin_info=(void *)m;
+ m_flags |= NODE_FLAGS_MBUF_CACHE;
+ }
+
+ inline rte_mbuf_t * get_cache_mbuf(){
+ if ( m_flags &NODE_FLAGS_MBUF_CACHE ) {
+ return ((rte_mbuf_t *)m_plugin_info);
+ }else{
+ return ((rte_mbuf_t *)0);
+ }
+ }
+
+public:
+
+ inline void set_rx_check(){
+ m_flags |= NODE_FLAGS_SAMPLE_RX_CHECK;
+ }
+
+ inline bool is_rx_check_enabled(){
+ return ((m_flags & NODE_FLAGS_SAMPLE_RX_CHECK)?true:false);
+ }
+
+public:
+
+ inline void set_nat_first_state(){
+ btSetMaskBit16(m_flags,4,3,1);
+ m_type=FLOW_PKT_NAT;
+ }
+
+ inline bool is_nat_first_state(){
+ return (btGetMaskBit16(m_flags,4,3)==1?true:false) ;
+ }
+
+ inline void set_nat_wait_state(){
+ btSetMaskBit16(m_flags,4,3,2);
+ }
+
+
+ inline bool is_nat_wait_state(){
+ return (btGetMaskBit16(m_flags,4,3)==2?true:false) ;
+ }
+
+ inline void set_nat_learn_state(){
+ m_type=FLOW_PKT; /* normal operation .. repeat might work too */
+ }
+
+public:
+
+
+ inline uint32_t get_short_fid(void){
+ return ((uint32_t)m_flow_id);
+ }
+
+ inline uint8_t get_thread_id(void){
+ return (m_thread_id);
+ }
+
+ inline void set_nat_ipv4_addr_server(uint32_t ip){
+ m_nat_external_ipv4_server =ip;
+ }
+
+ inline uint32_t get_nat_ipv4_addr_server(){
+ return ( m_nat_external_ipv4_server );
+ }
+
+
+ inline void set_nat_ipv4_addr(uint32_t ip){
+ m_nat_external_ipv4 =ip;
+ }
+
+ inline void set_nat_ipv4_port(uint16_t port){
+ m_nat_external_port = port;
+ }
+
+ inline uint32_t get_nat_ipv4_addr(){
+ return ( m_nat_external_ipv4 );
+ }
+
+ inline uint16_t get_nat_ipv4_port(){
+ return ( m_nat_external_port );
+ }
+
+ bool is_external_is_eq_to_internal_ip(){
+ /* this API is used to check TRex itself */
+ if ( (get_nat_ipv4_addr() == m_src_ip ) &&
+ (get_nat_ipv4_port()==m_src_port) &&
+ ( get_nat_ipv4_addr_server() == m_dest_ip) ) {
+ return (true);
+ }else{
+ return (false);
+ }
+ }
+
+
+public:
+ inline void replace_tuple(void);
+
+} __rte_cache_aligned;
+
+
+#if __x86_64__
+/* size of 64 bytes */
+ #define DEFER_CLIENTS_NUM (18)
+#else
+ #define DEFER_CLIENTS_NUM (16)
+#endif
+
+/* this class must be in the same size of CGenNode */
+struct CGenNodeDeferPort {
+ /* this header must be the same as CGenNode */
+ uint8_t m_type;
+ uint8_t m_pad3;
+ uint16_t m_pad2;
+ uint32_t m_cnt;
+ double m_time;
+
+ uint32_t m_clients[DEFER_CLIENTS_NUM];
+ uint16_t m_ports[DEFER_CLIENTS_NUM];
+public:
+ void init(void){
+ m_type=CGenNode::FLOW_DEFER_PORT_RELEASE;
+ m_cnt=0;
+ }
+
+ /* return true if object is full */
+ bool add_client(uint32_t client,
+ uint16_t port){
+ m_clients[m_cnt]=client;
+ m_ports[m_cnt]=port;
+ m_cnt++;
+ if ( m_cnt == DEFER_CLIENTS_NUM ) {
+ return (true);
+ }
+ return (false);
+ }
+
+} __rte_cache_aligned ;
+
+/* run time verification of objects size and offsets
+ need to clean this up and derive this objects from base object but require too much refactoring right now
+ hhaim
+*/
+inline int check_objects_sizes(void){
+ if ( sizeof(CGenNodeDeferPort) != sizeof(CGenNode) ) {
+ printf("ERROR sizeof(CGenNodeDeferPort) %d != sizeof(CGenNode) %d must be the same size \n",sizeof(CGenNodeDeferPort),sizeof(CGenNode));
+ assert(0);
+ }
+ if ( (int)offsetof(struct CGenNodeDeferPort,m_type)!=offsetof(struct CGenNode,m_type) ){
+ printf("ERROR offsetof(struct CGenNodeDeferPort,m_type)!=offsetof(struct CGenNode,m_type) \n");
+ assert(0);
+ }
+ if ( (int)offsetof(struct CGenNodeDeferPort,m_time)!=offsetof(struct CGenNode,m_time) ){
+ printf("ERROR offsetof(struct CGenNodeDeferPort,m_time)!=offsetof(struct CGenNode,m_time) \n");
+ assert(0);
+ }
+ return (0);
+}
+
+
+struct CGenNodeCompare
+{
+ bool operator() (const CGenNode * lhs, const CGenNode * rhs)
+ {
+ return lhs->m_time > rhs->m_time;
+ }
+};
+
+
+class CCapPktRaw;
+class CFileWriterBase;
+
+
+
+class CFlowGenStats {
+public:
+ CFlowGenStats(){
+ clear();
+ }
+ // stats
+ uint64_t m_total_bytes;
+ uint64_t m_total_pkt;
+ uint64_t m_total_open_flows;
+ uint64_t m_total_close_flows;
+ uint64_t m_nat_lookup_no_flow_id;
+ uint64_t m_nat_lookup_remove_flow_id;
+ uint64_t m_nat_lookup_add_flow_id;
+ uint64_t m_nat_flow_timeout;
+ uint64_t m_nat_flow_learn_error;
+
+public:
+ void clear();
+ void dump(FILE *fd);
+};
+
+
+
+typedef std::priority_queue<CGenNode *, std::vector<CGenNode *>,CGenNodeCompare> pqueue_t;
+
+
+
+class CErfIF : public CVirtualIF {
+
+public:
+ CErfIF(){
+ m_writer=NULL;
+ m_raw=NULL;
+ }
+public:
+
+ virtual int open_file(std::string file_name);
+ virtual int write_pkt(CCapPktRaw *pkt_raw);
+ virtual int close_file(void);
+
+
+ /**
+ * send one packet
+ *
+ * @param node
+ *
+ * @return
+ */
+ virtual int send_node(CGenNode * node);
+
+
+
+ /**
+ * flush all pending packets into the stream
+ *
+ * @return
+ */
+ virtual int flush_tx_queue(void);
+
+
+private:
+ CFileWriterBase * m_writer;
+ CCapPktRaw * m_raw;
+};
+
+static inline int fill_pkt(CCapPktRaw * raw,rte_mbuf_t * m){
+ raw->pkt_len = m->pkt_len;
+ char *p=raw->raw;
+
+ rte_mbuf_t *m_next;
+
+ while (m != NULL) {
+ m_next = m->next;
+ rte_memcpy(p,m->buf_addr,m->data_len);
+ p+=m->data_len;
+ m = m_next;
+ }
+ return (0);
+}
+
+
+class CNullIF : public CVirtualIF {
+
+public:
+ CNullIF(){
+ }
+
+public:
+
+ virtual int open_file(std::string file_name){
+ return (0);
+ }
+
+ virtual int write_pkt(CCapPktRaw *pkt_raw){
+ return (0);
+ }
+
+ virtual int close_file(void){
+ return (0);
+ }
+
+
+ virtual int send_node(CGenNode * node);
+
+ virtual int flush_tx_queue(void){
+ return (0);
+
+ }
+};
+
+
+class CNodeGenerator {
+public:
+ bool Create(CFlowGenListPerThread * parent);
+ void Delete();
+
+ void set_vif(CVirtualIF * v_if);
+
+ CFlowGenListPerThread * Parent(){
+ return (m_parent);
+ }
+public:
+ void add_node(CGenNode * mynode);
+ void remove_all(CFlowGenListPerThread * thread);
+ int open_file(std::string file_name,
+ CPreviewMode * preview);
+ int close_file(CFlowGenListPerThread * thread);
+ int flush_file(dsec_t max_time,
+ dsec_t d_time,
+ bool always,
+ CFlowGenListPerThread * thread,
+ double & old_offset);
+ int defer_handler(CFlowGenListPerThread * thread);
+
+ void schedule_node(CGenNode * node,double delay){
+ node->m_time = (now_sec()+ delay);
+ add_node(node);
+ }
+
+
+ void DumpHist(FILE *fd){
+ fprintf(fd,"\n");
+ fprintf(fd,"\n");
+ fprintf(fd,"normal\n");
+ fprintf(fd,"-------------\n");
+ m_realtime_his.Dump(fd);
+ }
+
+ void dump_json(std::string & json);
+
+
+private:
+ int flush_one_node_to_file(CGenNode * node);
+ int update_stats(CGenNode * node);
+ FORCE_NO_INLINE void handle_slow_messages(uint8_t type,
+ CGenNode * node,
+ CFlowGenListPerThread * thread,
+ bool always);
+
+
+public:
+ pqueue_t m_p_queue;
+ socket_id_t m_socket_id;
+ bool m_is_realtime;
+ CVirtualIF * m_v_if;
+ CFlowGenListPerThread * m_parent;
+ CPreviewMode m_preview_mode;
+ uint64_t m_cnt;
+ CTimeHistogram m_realtime_his;
+};
+
+
+class CPolicer {
+
+public:
+
+ CPolicer(){
+ ClearMeter();
+ }
+
+ void ClearMeter(){
+ m_cir=0.0;
+ m_bucket_size=1.0;
+ m_level=0.0;
+ m_last_time=0.0;
+ }
+
+ bool update(double dsize,dsec_t now_sec);
+
+ void set_cir(double cir){
+ BP_ASSERT(cir>=0.0);
+ m_cir=cir;
+ }
+ void set_level(double level){
+ m_level =level;
+ }
+
+ void set_bucket_size(double bucket){
+ m_bucket_size =bucket;
+ }
+
+private:
+
+ double m_cir;
+
+ double m_bucket_size;
+
+ double m_level;
+
+ double m_last_time;
+};
+
+class CFlowKey {
+public:
+ uint32_t m_ipaddr1;
+ uint32_t m_ipaddr2;
+
+ uint16_t m_port1;
+ uint16_t m_port2;
+
+ uint8_t m_ip_proto; /* TCP/UDP 6/17*/
+ uint8_t m_l2_proto; /*IPV4/IPV6*/
+ uint16_t m_vrfid;
+
+public:
+ inline bool operator <(const CFlowKey& rhs) const;
+ inline bool operator >(const CFlowKey& rhs) const;
+ inline bool operator ==(const CFlowKey& rhs) const;
+public:
+ void Dump(FILE *fd);
+ void Clean();
+};
+
+
+inline bool CFlowKey::operator <(const CFlowKey& rhs) const{
+ int cmp=memcmp(&m_ipaddr1,&rhs.m_ipaddr1 ,sizeof(CFlowKey));
+ if (cmp>0) {
+ return (true);
+ }else{
+ return (false);
+ }
+}
+
+inline bool CFlowKey::operator >(const CFlowKey& rhs) const{
+ int cmp=memcmp(&m_ipaddr1,&rhs.m_ipaddr1 ,sizeof(CFlowKey));
+ if (cmp<0) {
+ return (true);
+ }else{
+ return (false);
+ }
+}
+
+inline bool CFlowKey::operator ==(const CFlowKey& rhs) const{
+ int cmp=memcmp(&m_ipaddr1,&rhs.m_ipaddr1 ,sizeof(CFlowKey));
+ if (cmp==0) {
+ return (true);
+ }else{
+ return (false);
+ }
+}
+
+
+
+/***********************************************************/
+/* descriptor flags */
+
+#define IS_SWAP_S 0
+#define IS_SWAP_E 0
+
+#define IS_VALID_S 1
+#define IS_VALID_E 1
+
+#define IS_TCP_S 2
+#define IS_TCP_E 2
+
+#define IS_UDP_S 3
+#define IS_UDP_E 3
+
+#define IS_INIT_SIDE 4
+
+#define IS_LAST_PKT_S 5
+#define IS_LAST_PKT_E 5
+
+#define IS_RTT 6
+
+#define IS_PCAP_TIMING 7
+
+// 8-12 is used
+#define FLOW_ID 8
+
+
+#define PLUGIN_ENABLE_S 13
+#define PLUGIN_ENABLE_E 13
+#define BOTH_DIR_FLOW_SE 14
+#define LEARN_MODE_ENABLE 15
+
+/***********************************************************/
+
+class CPacketDescriptorPerDir {
+public:
+ CPacketDescriptorPerDir(){
+ m_dir_pkt_num=0;
+ m_max_dir_flow_pkts=0;
+ }
+public:
+ void SetMaxPkts( uint32_t val){
+ assert(val<65000);
+ m_max_dir_flow_pkts = (uint16_t)val;
+ }
+ uint16_t GetMaxPkts(void){
+ return (m_max_dir_flow_pkts);
+ }
+
+ void SetPktNum(uint32_t pkt_id){
+ assert(pkt_id<65000);
+ m_dir_pkt_num=(uint16_t)pkt_id;
+ }
+
+ uint16_t GetPktNum(void){
+ return (m_dir_pkt_num);
+ }
+
+private:
+ // per direction info
+ uint16_t m_dir_pkt_num; // pkt id
+ uint16_t m_max_dir_flow_pkts;
+};
+
+
+class CPacketDescriptor {
+
+public:
+
+ inline void Clear(){
+ m_flags = 0;
+ m_flow_pkt_num=0;
+ m_plugin_id=0;
+ m_max_flow_pkts=0;
+ m_max_flow_aging=0;
+ }
+
+ inline uint8_t getPluginId(){
+ return (m_plugin_id);
+ }
+ inline void SetPluginId(uint8_t plugin_id){
+ m_plugin_id=plugin_id;
+ }
+
+ inline bool IsLearn(){
+ return (btGetMaskBit32(m_flags,LEARN_MODE_ENABLE,LEARN_MODE_ENABLE) ? true:false);
+ }
+ inline void SetLearn(bool enable){
+ btSetMaskBit32(m_flags,LEARN_MODE_ENABLE ,LEARN_MODE_ENABLE ,enable?1:0);
+ }
+
+
+ inline bool IsPluginEnable(){
+ return (btGetMaskBit32(m_flags,PLUGIN_ENABLE_S,PLUGIN_ENABLE_S) ? true:false);
+ }
+ inline void SetPluginEnable(bool enable){
+ btSetMaskBit32(m_flags,PLUGIN_ENABLE_S ,PLUGIN_ENABLE_S ,enable?1:0);
+ }
+
+ inline bool IsBiDirectionalFlow(){
+ return (btGetMaskBit32(m_flags,BOTH_DIR_FLOW_SE,BOTH_DIR_FLOW_SE) ? true:false);
+ }
+ inline void SetBiPluginEnable(bool enable){
+ btSetMaskBit32(m_flags,BOTH_DIR_FLOW_SE ,BOTH_DIR_FLOW_SE ,enable?1:0);
+ }
+
+
+ /* packet number inside the global flow */
+ inline void SetFlowPktNum(uint32_t pkt_id){
+ m_flow_pkt_num = pkt_id;
+
+ }
+ /**
+ * start from zero 0,1,2,.. , it is on global flow if you have couple of flows it will count all of the flows
+ *
+ * flow FlowPktNum
+ * 0 0
+ * 0 1
+ * 0 2
+ * 1 0
+ * 1 1
+ * 2 0
+ *
+ * @return
+ */
+ inline uint32_t getFlowPktNum(){
+ return ( m_flow_pkt_num);
+ }
+
+
+ inline void SetFlowId(uint16_t flow_id){
+ btSetMaskBit32(m_flags,12,8,flow_id);
+
+ }
+ inline uint16_t getFlowId(){
+ return ( ( uint16_t)btGetMaskBit32(m_flags,12,8));
+ }
+
+ inline void SetPcapTiming(bool is_pcap){
+ btSetMaskBit32(m_flags,IS_PCAP_TIMING,IS_PCAP_TIMING,is_pcap?1:0);
+ }
+ inline bool IsPcapTiming(){
+ return (btGetMaskBit32(m_flags,IS_PCAP_TIMING,IS_PCAP_TIMING) ? true:false);
+ }
+
+
+ /* return true if this packet in diff direction from prev flow packet ,
+ if true need to choose RTT else IPG for inter packet gap */
+ inline bool IsRtt(){
+ return (btGetMaskBit32(m_flags,IS_RTT,IS_RTT) ? true:false);
+ }
+ inline void SetRtt(bool is_rtt){
+ btSetMaskBit32(m_flags,IS_RTT,IS_RTT,is_rtt?1:0);
+ }
+
+ /* this is in respect to the first flow */
+ inline bool IsInitSide(){
+ return (btGetMaskBit32(m_flags,IS_INIT_SIDE,IS_INIT_SIDE) ? true:false);
+ }
+
+ /* this is in respect to the first flow , this is what is needed when we replace IP source / destiniation */
+ inline void SetInitSide(bool is_init_side){
+ btSetMaskBit32(m_flags,IS_INIT_SIDE,IS_INIT_SIDE,is_init_side?1:0);
+ }
+
+ /* per flow */
+ inline bool IsSwapTuple(){
+ return (btGetMaskBit32(m_flags,IS_SWAP_S,IS_SWAP_E) ? true:false);
+ }
+ inline void SetSwapTuple(bool is_swap){
+ btSetMaskBit32(m_flags,IS_SWAP_S,IS_SWAP_E,is_swap?1:0);
+ }
+
+ inline bool IsValidPkt(){
+ return (btGetMaskBit32(m_flags,IS_VALID_S,IS_VALID_E) ? true:false);
+ }
+
+ inline void SetIsValidPkt(bool is_valid){
+ btSetMaskBit32(m_flags,IS_VALID_S,IS_VALID_E,is_valid?1:0);
+ }
+
+ inline void SetIsTcp(bool is_valid){
+ btSetMaskBit32(m_flags,IS_TCP_S,IS_TCP_E,is_valid?1:0);
+ }
+
+ inline bool IsTcp(){
+ return (btGetMaskBit32(m_flags,IS_TCP_S,IS_TCP_E) ? true:false);
+ }
+
+ inline void SetIsUdp(bool is_valid){
+ btSetMaskBit32(m_flags,IS_UDP_S,IS_UDP_E,is_valid?1:0);
+ }
+
+ inline bool IsUdp(){
+ return (btGetMaskBit32(m_flags,IS_UDP_S,IS_UDP_E) ? true:false);
+ }
+
+ inline void SetId(uint16_t _id){
+ btSetMaskBit32(m_flags,31,16,_id);
+
+ }
+ inline uint16_t getId(){
+ return ( ( uint16_t)btGetMaskBit32(m_flags,31,16));
+ }
+
+ inline void SetIsLastPkt(bool is_last){
+ btSetMaskBit32(m_flags,IS_LAST_PKT_S,IS_LAST_PKT_E,is_last?1:0);
+ }
+
+ /* last packet of couple of flows */
+ inline bool IsLastPkt(){
+ return (btGetMaskBit32(m_flags,IS_LAST_PKT_S,IS_LAST_PKT_E) ? true:false);
+ }
+
+ // there could be couple of flows per template in case of plugin
+ inline void SetMaxPktsPerFlow(uint32_t pkts){
+ assert(pkts<65000);
+ m_max_flow_pkts=pkts;
+ }
+ inline uint16_t GetMaxPktsPerFlow(){
+ return ( m_max_flow_pkts );
+ }
+ // there could be couple of flows per template in case of plugin
+ inline void SetMaxFlowTimeout(double sec){
+ //assert (sec<65000);
+ sec = sec*2.0+5.0;
+ if ( sec > 65000) {
+ printf("Warning pcap file aging is %f truncating it \n",sec);
+ sec = 65000;
+ }
+ m_max_flow_aging = (uint16_t)sec;
+ }
+
+ inline uint16_t GetMaxFlowTimeout(void){
+ return ( m_max_flow_aging );
+ }
+
+ /* return per dir info , the dir is with respect to the first flow client/server side , this is tricky */
+ CPacketDescriptorPerDir * GetDirInfo(void){
+ return (&m_per_dir[IsInitSide()?CLIENT_SIDE:SERVER_SIDE]);
+ }
+
+ bool IsOneDirectionalFlow(void){
+ if ( ( m_per_dir[CLIENT_SIDE].GetMaxPkts() == GetMaxPktsPerFlow()) || ( m_per_dir[SERVER_SIDE].GetMaxPkts() == GetMaxPktsPerFlow()) ) {
+ return (true);
+ }else{
+ return (false);
+ }
+ }
+
+public:
+ void Dump(FILE *fd);
+
+private:
+ uint32_t m_flags;
+ uint16_t m_flow_pkt_num; // packet number inside the flow
+ uint8_t m_plugin_id; // packet number inside the flow
+ uint8_t m_pad;
+ uint16_t m_max_flow_pkts; // how many packet per this flow getFlowId()
+ uint16_t m_max_flow_aging; // maximum aging in sec
+ CPacketDescriptorPerDir m_per_dir[CS_NUM]; // per direction info
+};
+
+
+class CPacketParser;
+class CFlow ;
+
+
+class CCPacketParserCounters {
+public:
+ uint64_t m_pkt;
+ uint64_t m_ipv4;
+ uint64_t m_ipv6;
+ uint64_t m_non_ip;
+ uint64_t m_vlan;
+ uint64_t m_arp;
+ uint64_t m_mpls;
+
+
+ /* IP stats */
+ uint64_t m_non_valid_ipv4_ver;
+ uint64_t m_non_valid_ipv6_ver;
+ uint64_t m_ip_checksum_error;
+ uint64_t m_ip_length_error;
+ uint64_t m_ipv6_length_error;
+ uint64_t m_ip_not_first_fragment_error;
+ uint64_t m_ip_ttl_is_zero_error;
+ uint64_t m_ip_multicast_error;
+ uint64_t m_ip_header_options;
+
+ /* TCP/UDP */
+ uint64_t m_non_tcp_udp;
+ uint64_t m_non_tcp_udp_ah;
+ uint64_t m_non_tcp_udp_esp;
+ uint64_t m_non_tcp_udp_icmp;
+ uint64_t m_non_tcp_udp_gre;
+ uint64_t m_non_tcp_udp_ip;
+ uint64_t m_tcp_header_options;
+ uint64_t m_tcp_udp_pkt_length_error;
+ uint64_t m_tcp;
+ uint64_t m_udp;
+ uint64_t m_valid_udp_tcp;
+
+public:
+ void Clear();
+ uint64_t getTotalErrors();
+ void Dump(FILE *fd);
+};
+
+
+class CPacketIndication {
+
+public:
+ dsec_t m_cap_ipg; /* ipg from cap file */
+ CCapPktRaw * m_packet;
+
+ CFlow * m_flow;
+ EthernetHeader * m_ether;
+ union {
+ IPHeader * m_ipv4;
+ IPv6Header * m_ipv6;
+ } l3;
+ bool m_is_ipv6;
+ union {
+ TCPHeader * m_tcp;
+ UDPHeader * m_udp;
+ } l4;
+ uint8_t * m_payload;
+ uint16_t m_payload_len;
+ uint16_t m_packet_padding; /* total packet size - IP total length */
+
+
+ CFlowKey m_flow_key;
+ CPacketDescriptor m_desc;
+
+ uint8_t m_ether_offset;
+ uint8_t m_ip_offset;
+ uint8_t m_udp_tcp_offset;
+ uint8_t m_payload_offset;
+
+public:
+
+ void Dump(FILE *fd,int verbose);
+ void Clean();
+ bool ConvertPacketToIpv6InPlace(CCapPktRaw * pkt,
+ int offset);
+ void ProcessPacket(CPacketParser *parser,CCapPktRaw * pkt);
+ void Clone(CPacketIndication * obj,CCapPktRaw * pkt);
+ void RefreshPointers(void);
+ void UpdatePacketPadding();
+
+public:
+ bool is_ipv6(){
+ return (m_is_ipv6);
+ }
+ char * getBasePtr(){
+ return ((char *)m_packet->raw);
+ }
+
+ uint32_t getEtherOffset(){
+ BP_ASSERT(m_ether);
+ return (uint32_t)((uintptr_t) (((char *)m_ether)- getBasePtr()) );
+ }
+ uint32_t getIpOffset(){
+ if (l3.m_ipv4 != NULL) {
+ return (uint32_t)((uintptr_t)( ((char *)l3.m_ipv4)-getBasePtr()) );
+ }else{
+ BP_ASSERT(0);
+ }
+ }
+
+
+ /**
+ * return the application ipv4/ipv6 option offset
+ * if learn bit is ON , it is always the first options ( IPV6/IPV4)
+ *
+ * @return
+ */
+ uint32_t getIpAppOptionOffset(){
+ if ( is_ipv6() ) {
+ return ( getIpOffset()+IPv6Header::DefaultSize);
+ }else{
+ return ( getIpOffset()+IPHeader::DefaultSize);
+ }
+ }
+
+ uint32_t getTcpOffset(){
+ BP_ASSERT(l4.m_tcp);
+ return (uint32_t)((uintptr_t) ((char *)l4.m_tcp-getBasePtr()) );
+ }
+ uint32_t getPayloadOffset(){
+ if (m_payload) {
+ return (uint32_t)((uintptr_t) ((char *)m_payload-getBasePtr()) );
+ }else{
+ return (0);
+ }
+ }
+ uint8_t getTTL(){
+ BP_ASSERT(l3.m_ipv4);
+ if (is_ipv6()) {
+ return(l3.m_ipv6->getHopLimit());
+ }else{
+ return(l3.m_ipv4->getTimeToLive());
+ }
+ }
+ void setTTL(uint8_t ttl){
+ BP_ASSERT(l3.m_ipv4);
+ if (is_ipv6()) {
+ l3.m_ipv6->setHopLimit(ttl);
+ }else{
+ l3.m_ipv4->setTimeToLive(ttl);
+ l3.m_ipv4->updateCheckSum();
+ }
+ }
+
+
+ uint8_t getFastEtherOffset(void){
+ return (m_ether_offset);
+ }
+ uint8_t getFastIpOffsetFast(void){
+ return (m_ip_offset);
+ }
+ uint8_t getFastTcpOffset(void){
+ return (m_udp_tcp_offset );
+ }
+ uint8_t getFastPayloadOffset(void){
+ return (m_payload_offset );
+ }
+private:
+ void SetKey(void);
+ uint8_t ProcessIpPacketProtocol(CCPacketParserCounters *m_cnt,
+ uint8_t protocol, int *offset);
+ void ProcessIpPacket(CPacketParser *parser,int offset);
+ void ProcessIpv6Packet(CPacketParser *parser,int offset);
+ void _ProcessPacket(CPacketParser *parser,CCapPktRaw * pkt);
+
+ void UpdateOffsets();
+};
+
+
+
+#define SRC_IP_BASE 0x10000001
+#define DST_IP_BASE 0x20000001
+
+class CFlowTemplateGenerator {
+public:
+ CFlowTemplateGenerator(uint64_t fid){
+ src_ip_base=((SRC_IP_BASE + (uint32_t)fid )& 0x7fffffff);
+ dst_ip_base=((DST_IP_BASE + (uint32_t) ((fid & 0xffffffff00000000ULL)>>32)) & 0x7fffffff);
+ }
+public:
+ uint32_t src_ip_base;
+ uint32_t dst_ip_base;
+};
+
+
+class CPacketParser {
+
+public:
+ bool Create();
+ void Delete();
+ bool ProcessPacket(CPacketIndication * pkt_indication,
+ CCapPktRaw * raw_packet);
+public:
+ CCPacketParserCounters m_counter;
+public:
+ void Dump(FILE *fd);
+};
+
+
+class CFlowTableStats {
+public:
+ uint64_t m_lookup;
+ uint64_t m_found;
+ uint64_t m_fif;
+ uint64_t m_add;
+ uint64_t m_remove;
+ uint64_t m_fif_err;
+ uint64_t m_active;
+public:
+ void Clear();
+ void Dump(FILE *fd);
+};
+
+
+
+class CFlow {
+public:
+ CFlow(){
+ is_fif_swap=0;
+ pkt_id=0;
+ }
+ ~CFlow(){
+ }
+public:
+ void Dump(FILE *fd);
+public:
+ uint8_t is_fif_swap;
+ uint32_t pkt_id;
+ uint32_t flow_id;
+};
+
+class CFlowTableInterator {
+public:
+ virtual void do_flow(CFlow *flow)=0;
+};
+
+class CFlowTableManagerBase {
+public:
+ virtual bool Create(int max_size)=0;
+ virtual void Delete()=0;
+public:
+ CFlow * process(CFlowKey & key,bool &is_fif );
+ virtual void remove(CFlowKey & key )=0;
+ virtual void remove_all()=0;
+ virtual uint64_t count()=0;
+public:
+ void Dump(FILE *fd);
+protected:
+ virtual CFlow * lookup(CFlowKey & key )=0;
+ virtual CFlow * add(CFlowKey & key )=0;
+
+ //virtual IterateFlows(CFlowTableInterator * iter)=0;
+protected:
+ CFlowTableStats m_stats;
+};
+
+
+
+typedef CFlow * flow_ptr;
+typedef std::map<CFlowKey, flow_ptr, std::less<CFlowKey> > flow_map_t;
+typedef flow_map_t::iterator flow_map_iter_t;
+
+
+class CFlowTableMap : public CFlowTableManagerBase {
+public:
+ virtual bool Create(int max_size);
+ virtual void Delete();
+ virtual void remove(CFlowKey & key );
+
+protected:
+ virtual CFlow * lookup(CFlowKey & key );
+ virtual CFlow * add(CFlowKey & key );
+ virtual void remove_all(void);
+ uint64_t count(void);
+private:
+ flow_map_t m_map;
+};
+
+class CFlowInfo {
+public:
+ uint32_t client_ip;
+ uint32_t server_ip;
+ uint32_t client_port;
+ uint32_t server_port;
+ bool is_init_ip_dir;
+ bool is_init_port_dir;
+
+ bool replace_server_port;
+ CMiniVMCmdBase ** vm_program;/* pointer to vm program */
+};
+
+class CFlowPktInfo {
+public:
+ bool Create(CPacketIndication * pkt_ind);
+ void Delete();
+ void Dump(FILE *fd);
+ inline void replace_tuple(CGenNode * node);
+
+ /* generate a new packet */
+ inline rte_mbuf_t * generate_new_mbuf(CGenNode * node);
+ inline rte_mbuf_t * do_generate_new_mbuf(CGenNode * node);
+ inline rte_mbuf_t * do_generate_new_mbuf_big(CGenNode * node);
+
+ /* new packet with rx check info in IP option */
+ void do_generate_new_mbuf_rxcheck(rte_mbuf_t * m,
+ CGenNode * node,
+ pkt_dir_t dir,
+ bool single_port);
+
+ inline rte_mbuf_t * do_generate_new_mbuf_ex(CGenNode * node,CFlowInfo * flow_info);
+ inline rte_mbuf_t * do_generate_new_mbuf_ex_big(CGenNode * node,CFlowInfo * flow_info);
+ inline rte_mbuf_t * do_generate_new_mbuf_ex_vm(CGenNode * node,
+ CFlowInfo * flow_info, int16_t * s_size);
+
+public:
+ /* push the number of bytes into the packets and make more room
+ should be used by NAT feature that should have ipv4 option in the first packet
+ this function should not be called in runtime, only when template is loaded due to it heavey cost of operation ( malloc/free memory )
+ */
+ char * push_ipv4_option_offline(uint8_t bytes);
+ char * push_ipv6_option_offline(uint8_t bytes);
+
+
+
+ /**
+ * mark this packet as learn packet
+ * should
+ * 1. push ipv4 option ( 8 bytes)
+ * 2. mask the packet as learn
+ * 3. update the option pointer
+ */
+ void mask_as_learn();
+
+private:
+ inline void append_big_mbuf(rte_mbuf_t * m,
+ CGenNode * node);
+
+ inline void update_pkt_info(char *p,
+ CGenNode * node);
+ inline void update_pkt_info2(char *p,
+ CFlowInfo * flow_info,
+ int update_len,
+ CGenNode * node
+ );
+
+ void alloc_const_mbuf();
+
+ void free_const_mbuf();
+
+ rte_mbuf_t * get_big_mbuf(socket_id_t socket_id){
+ return (m_big_mbuf[socket_id]);
+ }
+
+
+public:
+ CPacketIndication m_pkt_indication;
+ CCapPktRaw * m_packet;
+ rte_mbuf_t * m_big_mbuf[MAX_SOCKETS_SUPPORTED]; /* allocate big mbug per socket */
+};
+
+
+inline void CFlowPktInfo::replace_tuple(CGenNode * node){
+ update_pkt_info(m_packet->raw,node);
+}
+
+inline void CFlowPktInfo::update_pkt_info2(char *p,
+ CFlowInfo * flow_info,
+ int update_len ,
+ CGenNode * node
+ ){
+ IPHeader * ipv4=
+ (IPHeader *)(p + m_pkt_indication.getFastIpOffsetFast());
+
+ EthernetHeader * et =
+ (EthernetHeader * )(p + m_pkt_indication.getFastEtherOffset());
+
+ if ( unlikely (m_pkt_indication.is_ipv6())) {
+ IPv6Header *ipv6= (IPv6Header *)ipv4;
+
+ if ( update_len ){
+ ipv6->setPayloadLen(ipv6->getPayloadLen() + update_len);
+ }
+
+ if ( flow_info->is_init_ip_dir ) {
+ ipv6->updateLSBIpv6Src(flow_info->client_ip);
+ ipv6->updateLSBIpv6Dst(flow_info->server_ip);
+ }else{
+ ipv6->updateLSBIpv6Src(flow_info->server_ip);
+ ipv6->updateLSBIpv6Dst(flow_info->client_ip);
+ }
+
+ }else{
+ if ( update_len ){
+ ipv4->setTotalLength((ipv4->getTotalLength() + update_len));
+ }
+
+ if ( flow_info->is_init_ip_dir ) {
+ ipv4->setSourceIp(flow_info->client_ip);
+ ipv4->setDestIp(flow_info->server_ip);
+ }else{
+ ipv4->setSourceIp(flow_info->server_ip);
+ ipv4->setDestIp(flow_info->client_ip);
+ }
+ ipv4->updateCheckSum();
+ }
+
+
+
+ /* replace port base on TCP/UDP */
+ if ( m_pkt_indication.m_desc.IsTcp() ) {
+ TCPHeader * m_tcp = (TCPHeader *)(p +m_pkt_indication.getFastTcpOffset());
+ BP_ASSERT(m_tcp);
+ /* replace port */
+ if ( flow_info->is_init_port_dir ) {
+ m_tcp->setSourcePort(flow_info->client_port);
+ if ( flow_info->replace_server_port ){
+ m_tcp->setDestPort(flow_info->server_port);
+ }
+ }else{
+ m_tcp->setDestPort(flow_info->client_port);
+ if ( flow_info->replace_server_port ){
+ m_tcp->setSourcePort(flow_info->server_port);
+ }
+ }
+
+ }else {
+ if ( m_pkt_indication.m_desc.IsUdp() ){
+ UDPHeader * m_udp =(UDPHeader *)(p +m_pkt_indication.getFastTcpOffset() );
+ BP_ASSERT(m_udp);
+ m_udp->setLength(m_udp->getLength() + update_len);
+ m_udp->setChecksum(0);
+ if ( flow_info->is_init_port_dir ) {
+ m_udp->setSourcePort(flow_info->client_port);
+ if ( flow_info->replace_server_port ){
+ m_udp->setDestPort(flow_info->server_port);
+ }
+ }else{
+ m_udp->setDestPort(flow_info->client_port);
+ if ( flow_info->replace_server_port ){
+ m_udp->setSourcePort(flow_info->server_port);
+ }
+ }
+ }else{
+ BP_ASSERT(0);
+ }
+ }
+}
+
+
+inline void CFlowPktInfo::update_pkt_info(char *p,
+ CGenNode * node){
+
+ IPHeader * ipv4=
+ (IPHeader *)(p + m_pkt_indication.getFastIpOffsetFast());
+
+ EthernetHeader * et =
+ (EthernetHeader * )(p + m_pkt_indication.getFastEtherOffset());
+
+ uint16_t src_port = node->m_src_port;
+
+ pkt_dir_t ip_dir = node->cur_pkt_ip_addr_dir();
+ pkt_dir_t port_dir = node->cur_pkt_port_addr_dir();
+
+
+ if ( unlikely (m_pkt_indication.is_ipv6())) {
+
+ // Update the IPv6 address
+ IPv6Header *ipv6= (IPv6Header *)ipv4;
+
+ if ( ip_dir == CLIENT_SIDE ) {
+ ipv6->updateLSBIpv6Src(node->m_src_ip);
+ ipv6->updateLSBIpv6Dst(node->m_dest_ip);
+ }else{
+ ipv6->updateLSBIpv6Src(node->m_dest_ip);
+ ipv6->updateLSBIpv6Dst(node->m_src_ip);
+ }
+ }else{
+
+ if ( unlikely ( CGlobalInfo::is_learn_mode() ) ){
+
+ if (m_pkt_indication.m_desc.IsLearn()) {
+ /* might be done twice */
+ #ifdef NAT_TRACE_
+ printf(" %.3f : DP : learn packet !\n",now_sec());
+ #endif
+ ipv4->setTimeToLive(TTL_RESERVE_DUPLICATE);
+
+ /* first ipv4 option add the info in case of learn packet, usualy only the first packet */
+ CNatOption *lpNat =(CNatOption *)ipv4->getOption();
+ lpNat->set_fid(node->get_short_fid());
+ lpNat->set_thread_id(node->get_thread_id());
+ lpNat->set_rx_check(node->is_rx_check_enabled());
+ }
+ /* in call cases update the ip using the outside ip */
+
+ if ( m_pkt_indication.m_desc.IsInitSide() ) {
+ #ifdef NAT_TRACE_
+ if (node->m_flags != CGenNode::NODE_FLAGS_LATENCY ) {
+ printf(" %.3f : DP : i %x:%x -> %x flow_id: %x\n",now_sec(),node->m_src_ip,node->m_src_port,node->m_dest_ip,node->m_flow_id);
+ }
+ #endif
+
+ ipv4->updateIpSrc(node->m_src_ip);
+ ipv4->updateIpDst(node->m_dest_ip);
+ }else{
+ #ifdef NAT_TRACE_
+ if (node->m_flags != CGenNode::NODE_FLAGS_LATENCY ) {
+ printf(" %.3f : r %x -> %x:%x flow_id: %x \n",now_sec(),node->m_dest_ip,node->m_src_ip,node->m_src_port,node->m_flow_id);
+ }
+ #endif
+ src_port = node->get_nat_ipv4_port();
+ ipv4->updateIpSrc(node->get_nat_ipv4_addr_server());
+ ipv4->updateIpDst(node->get_nat_ipv4_addr());
+ }
+
+ /* TBD remove this */
+ #ifdef NAT_TRACE_
+ if (node->m_flags != CGenNode::NODE_FLAGS_LATENCY ) {
+ if ( m_pkt_indication.m_desc.IsInitSide() ==false ){
+ printf(" %.3f : pkt ==> %x:%x %x:%x \n",now_sec(),node->get_nat_ipv4_addr(),node->get_nat_ipv4_addr_server(),
+ node->get_nat_ipv4_port(),node->m_src_port);
+ }else{
+ printf(" %.3f : pkt ==> init pkt sent \n",now_sec());
+ }
+ }
+ #endif
+
+
+ }else{
+ if ( ip_dir == CLIENT_SIDE ) {
+ #ifdef NAT_TRACE_
+ if (node->m_flags != CGenNode::NODE_FLAGS_LATENCY ) {
+ printf(" %.3f : i %x:%x -> %x \n",now_sec(),node->m_src_ip,node->m_src_port,node->m_dest_ip);
+ }
+ #endif
+ ipv4->updateIpSrc(node->m_src_ip);
+ ipv4->updateIpDst(node->m_dest_ip);
+ }else{
+ #ifdef NAT_TRACE_
+ if (node->m_flags != CGenNode::NODE_FLAGS_LATENCY ) {
+ printf(" %.3f : r %x -> %x:%x \n",now_sec(),node->m_dest_ip,node->m_src_ip,node->m_src_port);
+ }
+ #endif
+ ipv4->updateIpSrc(node->m_dest_ip);
+ ipv4->updateIpDst(node->m_src_ip);
+ }
+ }
+
+ ipv4->updateCheckSum();
+ }
+
+
+ /* replace port base on TCP/UDP */
+ if ( m_pkt_indication.m_desc.IsTcp() ) {
+ TCPHeader * m_tcp = (TCPHeader *)(p +m_pkt_indication.getFastTcpOffset());
+ BP_ASSERT(m_tcp);
+ /* replace port */
+ if ( port_dir == CLIENT_SIDE ) {
+ m_tcp->setSourcePort(src_port);
+ }else{
+ m_tcp->setDestPort(src_port);
+ }
+ }else {
+ if ( m_pkt_indication.m_desc.IsUdp() ){
+ UDPHeader * m_udp =(UDPHeader *)(p +m_pkt_indication.getFastTcpOffset() );
+ BP_ASSERT(m_udp);
+ m_udp->setChecksum(0);
+ if ( port_dir == CLIENT_SIDE ) {
+ m_udp->setSourcePort(src_port);
+ }else{
+ m_udp->setDestPort(src_port);
+ }
+ }else{
+ BP_ASSERT(0);
+ }
+ }
+}
+
+
+inline rte_mbuf_t * CFlowPktInfo::do_generate_new_mbuf_ex(CGenNode * node,
+ CFlowInfo * flow_info){
+ rte_mbuf_t * m;
+ /* alloc small packet buffer*/
+ m = CGlobalInfo::pktmbuf_alloc_small(node->get_socket_id());
+ assert(m);
+ uint16_t len= ( m_packet->pkt_len > FIRST_PKT_SIZE) ?FIRST_PKT_SIZE:m_packet->pkt_len;
+ /* append*/
+ char *p=rte_pktmbuf_append(m, len);
+
+ BP_ASSERT ( (((uintptr_t)m_packet->raw) & 0x7f )== 0) ;
+
+ memcpy(p,m_packet->raw,len);
+
+ update_pkt_info2(p,flow_info,0,node);
+
+ append_big_mbuf(m,node);
+
+ return(m);
+}
+
+
+inline rte_mbuf_t * CFlowPktInfo::do_generate_new_mbuf_ex_big(CGenNode * node,
+ CFlowInfo * flow_info){
+ rte_mbuf_t * m;
+ uint16_t len = m_packet->pkt_len;
+
+ /* alloc big buffer to update it*/
+ m = CGlobalInfo::pktmbuf_alloc(node->get_socket_id(), len);
+ assert(m);
+
+ /* append*/
+ char *p=rte_pktmbuf_append(m, len);
+
+ BP_ASSERT ( (((uintptr_t)m_packet->raw) & 0x7f )== 0) ;
+
+ memcpy(p,m_packet->raw,len);
+
+ update_pkt_info2(p,flow_info,0,node);
+
+ return(m);
+}
+
+
+inline rte_mbuf_t * CFlowPktInfo::do_generate_new_mbuf_ex_vm(CGenNode * node,
+ CFlowInfo * flow_info, int16_t * s_size){
+ rte_mbuf_t * m;
+
+ /* sanity check we need to have payload */
+ if ( unlikely( m_pkt_indication.m_payload_len == 0) ){
+ printf(" ERROR nothing to do \n");
+ return (do_generate_new_mbuf_ex(node,flow_info));
+ }
+
+ CMiniVMCmdBase ** cmds=flow_info->vm_program;
+ BP_ASSERT(cmds);
+
+ /* packet is going to be changed update len with what we expect ( written in first command ) */
+ uint16_t len = m_packet->pkt_len + cmds[0]->m_add_pkt_len;
+
+ /* alloc big buffer to update it*/
+ m = CGlobalInfo::pktmbuf_alloc(node->get_socket_id(), len);
+ assert(m);
+
+ /* append the additional bytes requested and update later */
+ char *p=rte_pktmbuf_append(m, len);
+
+ BP_ASSERT ( (((uintptr_t)m_packet->raw) & 0x7f )== 0) ;
+
+ /* copy the headers until the payload */
+ memcpy(p, m_packet->raw, m_pkt_indication.getPayloadOffset() );
+ CMiniVM vm;
+ vm.m_pkt_info = this;
+ vm.m_pyload_mbuf_ptr = p+m_pkt_indication.getPayloadOffset();
+ vm.mini_vm_run(cmds);
+
+ /* need to update the mbuf size here .., this is not must but needed for accuracy */
+ uint16_t buf_adjust = len - vm.m_new_pkt_size;
+ int rc = rte_pktmbuf_trim(m, buf_adjust);
+
+ /* update IP length , and TCP checksum , we can accelerate this using hardware ! */
+ uint16_t pkt_adjust = vm.m_new_pkt_size - m_packet->pkt_len;
+ update_pkt_info2(p,flow_info,pkt_adjust,node);
+
+ /* return change in packet size due to packet tranforms */
+ *s_size = vm.m_new_pkt_size - m_packet->pkt_len;
+
+ //printf(" new length : actual %d , update:%d \n",m_packet->pkt_len,m_packet->pkt_len + vm.m_new_pkt_size);
+ return(m);
+}
+
+
+inline void CFlowPktInfo::append_big_mbuf(rte_mbuf_t * m,
+ CGenNode * node){
+
+ rte_mbuf_t * mbig= get_big_mbuf(node->get_socket_id());
+
+ if ( mbig == NULL) {
+ return ;
+ }
+
+ utl_rte_pktmbuf_add_after(m,mbig);
+}
+
+
+inline rte_mbuf_t * CFlowPktInfo::do_generate_new_mbuf(CGenNode * node){
+ rte_mbuf_t * m;
+ /* alloc small packet buffer*/
+ m = CGlobalInfo::pktmbuf_alloc_small(node->get_socket_id());
+ assert(m);
+ uint16_t len= ( m_packet->pkt_len > FIRST_PKT_SIZE) ?FIRST_PKT_SIZE:m_packet->pkt_len;
+ /* append*/
+ char *p=rte_pktmbuf_append(m, len);
+
+ BP_ASSERT ( (((uintptr_t)m_packet->raw) & 0x7f )== 0) ;
+
+ memcpy(p,m_packet->raw,len);
+
+ update_pkt_info(p,node);
+
+ append_big_mbuf(m,node);
+
+ return m;
+}
+
+
+inline rte_mbuf_t * CFlowPktInfo::do_generate_new_mbuf_big(CGenNode * node){
+ rte_mbuf_t * m;
+ uint16_t len = m_packet->pkt_len;
+
+ /* alloc big buffer to update it*/
+ m = CGlobalInfo::pktmbuf_alloc(node->get_socket_id(), len);
+ assert(m);
+
+ /* append*/
+ char *p=rte_pktmbuf_append(m, len);
+
+ BP_ASSERT ( (((uintptr_t)m_packet->raw) & 0x7f )== 0) ;
+
+ memcpy(p,m_packet->raw,len);
+
+ update_pkt_info(p,node);
+
+ return(m);
+}
+
+
+inline rte_mbuf_t * CFlowPktInfo::generate_new_mbuf(CGenNode * node){
+
+ if ( m_pkt_indication.m_desc.IsPluginEnable() ) {
+ return ( on_node_generate_mbuf( node->get_plugin_id(),node,this) );
+ }
+ return (do_generate_new_mbuf(node));
+}
+
+
+
+typedef CFlowPktInfo * flow_pkt_info_t;
+
+class CCCapFileMemoryUsage {
+
+public:
+
+ enum { SIZE_MIN = 64,
+ SIZE_64 = 64,
+ SIZE_128 = 128,
+ SIZE_256 = 256,
+ SIZE_512 = 512,
+ SIZE_1024 = 1024,
+ SIZE_2048 = 2048,
+ MASK_SIZE =6
+ };
+
+ void clear(){
+ int i;
+ for (i=0; i<CCCapFileMemoryUsage::MASK_SIZE; i++) {
+ m_buf[i] = 0;
+ }
+ m_total_bytes=0;
+ }
+
+ void add_size(uint32_t size){
+ m_total_bytes+=size;
+ int c_size=CCCapFileMemoryUsage::SIZE_MIN;
+ int i;
+ for (i=0; i<CCCapFileMemoryUsage::MASK_SIZE; i++) {
+ if (size<c_size) {
+ m_buf[i]+=1;
+ return;
+ }
+ c_size = c_size*2;
+ }
+ printf("ERROR pkt size bigger than %d is not supported !\n",CCCapFileMemoryUsage::SIZE_2048);
+ exit(1);
+ }
+ void dump(FILE *fd);
+
+ void Add(const CCCapFileMemoryUsage & obj);
+
+public:
+ uint32_t m_buf[MASK_SIZE];
+ uint64_t m_total_bytes;
+};
+
+
+class CCapFileFlowInfo {
+public:
+ bool Create();
+ void Delete();
+ uint64_t Size(void){
+ return (m_flow_pkts.size());
+ }
+ inline CFlowPktInfo * GetPacket(uint32_t index);
+ void Append(CPacketIndication * pkt_indication);
+ void RemoveAll();
+ void dump_pkt_sizes(void);
+ int load_cap_file(std::string cap_file,uint16_t _id,uint8_t plugin_id);
+
+ /* update flow info */
+ void update_info();
+
+ bool is_valid_template_load_time(std::string & err);
+
+ void save_to_erf(std::string cap_file_name,int pcap);
+
+ inline void generate_flow(CTupleTemplateGeneratorSmart * tuple_gen,
+ CNodeGenerator * gen,
+ dsec_t time,
+ uint64_t flow_id,
+ CFlowYamlInfo * template_info,
+ CGenNode * node);
+
+ inline uint64_t get_total_bytes(){
+ return (m_total_bytes);
+ }
+ inline uint64_t get_total_flows(){
+ return (m_total_flows);
+ }
+
+ inline uint64_t get_total_errors(){
+ return (m_total_errors);
+ }
+
+ // return the cap file length in sec
+ double get_cap_file_length_sec();
+
+ void get_total_memory(CCCapFileMemoryUsage & memory);
+
+public:
+ void update_min_ipg(dsec_t min_ipg,
+ dsec_t override_ipg);
+
+ void update_pcap_mode();
+
+public:
+ void Dump(FILE *fd);
+
+private:
+ std::vector<flow_pkt_info_t> m_flow_pkts;
+ uint64_t m_total_bytes;
+ uint64_t m_total_flows;
+ uint64_t m_total_errors;
+};
+
+
+
+inline CFlowPktInfo * CCapFileFlowInfo::GetPacket(uint32_t index){
+ BP_ASSERT(index<m_flow_pkts.size());
+ return (m_flow_pkts[index]);
+}
+
+
+
+
+struct CFlowsYamlInfo {
+public:
+ double m_duration_sec; //duration in sec for the cap file
+
+// IPv4 addressing
+
+// IPv6 addressing
+ std::vector <uint16_t> m_src_ipv6;
+ std::vector <uint16_t> m_dst_ipv6;
+ bool m_ipv6_set;
+
+// new section
+ bool m_cap_mode;
+ bool m_cap_mode_set;
+
+ double m_cap_ipg_min;
+ bool m_cap_ipg_min_set;
+
+ double m_cap_overide_ipg;
+ bool m_cap_overide_ipg_set;
+
+ uint32_t m_wlength;
+ bool m_wlength_set;
+
+ bool m_one_app_server;
+ bool m_one_app_server_was_set;
+ bool m_mac_replace_by_ip;
+
+ CVlanYamlInfo m_vlan_info;
+ CTupleGenYamlInfo m_tuple_gen;
+ bool m_tuple_gen_was_set;
+
+
+ std::vector <uint8_t> m_mac_base;
+
+ std::vector <CFlowYamlInfo> m_vec;
+
+ bool m_is_plugin_configured; /* any plugin is configured */
+public:
+ void Dump(FILE *fd);
+ int load_from_yaml_file(std::string file_name);
+ bool verify_correctness(uint32_t num_threads) ;
+ bool is_any_plugin_configured(){
+ return ( m_is_plugin_configured);
+ }
+};
+
+
+
+
+class CFlowStats {
+public:
+ CFlowStats(){
+ Clear();
+ }
+ uint16_t m_id;
+ std::string m_name;
+ double m_pkt;
+ double m_bytes;
+ double duration_sec;
+ double m_cps;
+ double m_mb_sec;
+ double m_mB_sec;
+ double m_c_flows;
+ double m_pps ;
+ double m_total_Mbytes ;
+ uint64_t m_errors ;
+ uint64_t m_flows ;
+ CCCapFileMemoryUsage m_memory;
+
+ /* normalized CPS by the number of flows */
+ double get_normal_cps(){
+ return ( m_cps*(double)m_flows );
+ }
+public:
+ void Clear();
+ void Add(const CFlowStats & obj);
+
+public:
+ static void DumpHeader(FILE *fd);
+ void Dump(FILE *fd);
+};
+
+
+class CFlowGeneratorRecPerThread {
+
+public:
+ bool Create(CTupleGeneratorSmart * global_gen,
+ CFlowYamlInfo * info,
+ CFlowsYamlInfo * yaml_flow_info,
+ CCapFileFlowInfo * flow_info,
+ uint16_t _id,
+ uint32_t thread_id );
+ void Delete();
+public:
+ void Dump(FILE *fd);
+ inline void generate_flow(CNodeGenerator * gen,
+ dsec_t time,
+ uint64_t flow_id,
+ CGenNode * node);
+ void getFlowStats(CFlowStats * stats);
+
+public:
+ CTupleTemplateGeneratorSmart tuple_gen;
+
+ CCapFileFlowInfo * m_flow_info;
+ CFlowYamlInfo * m_info;
+ CFlowsYamlInfo * m_flows_info;
+ CPolicer m_policer;
+ uint16_t m_id ;
+ uint32_t m_thread_id;
+} __rte_cache_aligned;
+
+
+
+
+class CFlowGeneratorRec {
+
+public:
+ bool Create(CFlowYamlInfo * info,
+ CFlowsYamlInfo * flow_info,
+ uint16_t _id);
+ void Delete();
+public:
+
+ void Dump(FILE *fd);
+ void getFlowStats(CFlowStats * stats);
+public:
+ CCapFileFlowInfo m_flow_info;
+ CFlowYamlInfo * m_info;
+ CFlowsYamlInfo * m_flows_info;
+ CPolicer m_policer;
+ uint16_t m_id;
+private:
+ void fixup_ipg_if_needed();
+};
+
+class CPPSMeasure {
+public:
+ CPPSMeasure(){
+ reset();
+ }
+ //reset
+ void reset(void){
+ m_start=false;
+ m_last_time_msec=0;
+ m_last_pkts=0;
+ m_last_result=0.0;
+ }
+ //add packet size
+ float add(uint64_t pkts);
+
+private:
+ float calc_pps(uint32_t dtime_msec,
+ uint32_t pkts){
+ float rate=( ( (float)pkts*(float)os_get_time_freq())/((float)dtime_msec) );
+ return (rate);
+
+ }
+
+public:
+ bool m_start;
+ uint32_t m_last_time_msec;
+ uint64_t m_last_pkts;
+ float m_last_result;
+};
+
+
+
+class CBwMeasure {
+public:
+ CBwMeasure();
+ //reset
+ void reset(void);
+ //add packet size
+ double add(uint64_t size);
+
+private:
+ double calc_MBsec(uint32_t dtime_msec,
+ uint64_t dbytes);
+
+public:
+ bool m_start;
+ uint32_t m_last_time_msec;
+ uint64_t m_last_bytes;
+ double m_last_result;
+};
+
+
+class CFlowGenList;
+
+typedef uint32_t flow_id_t;
+
+
+class CTcpSeq {
+public:
+ CTcpSeq (){
+ client_seq_delta = 0;
+ server_seq_delta = 0;
+ server_seq_init=false;
+ };
+ void update(uint8_t *p, CFlowPktInfo *pkt_info, int16_t s_size);
+private:
+ uint32_t client_seq_delta; /* Delta to TCP seq number for client */
+ uint32_t server_seq_delta; /* Delta to TCP seq number for server */
+ bool server_seq_init; /* TCP seq been init for server? */
+};
+
+
+/////////////////////////////////////////////////////////////////////////////////
+/* per thread info */
+class CFlowGenListPerThread {
+
+public:
+ friend class CNodeGenerator;
+ friend class CPluginCallbackSimple;
+ friend class CCapFileFlowInfo;
+
+ typedef CGenericMap<flow_id_t,CGenNode> flow_id_node_t;
+
+ bool Create(uint32_t thread_id,
+ uint32_t core_id,
+ CFlowGenList * flow_list,
+ uint32_t max_threads);
+ void Delete();
+
+ void set_vif(CVirtualIF * v_if){
+ m_node_gen.set_vif(v_if);
+ }
+
+ /* return the dual port ID this thread is attached to in 4 ports configuration
+ there are 2 dual-ports
+
+ thread 0 - dual 0
+ thread 1 - dual 1
+
+ thread 2 - dual 0
+ thread 3 - dual 1
+
+ */
+ uint32_t getDualPortId();
+public :
+ double get_total_kcps();
+ double get_delta_flow_is_sec();
+ double get_longest_flow();
+ void inc_current_template(void);
+ int generate_flows_roundrobin(bool *done);
+ int reschedule_flow(CGenNode *node);
+
+
+ inline CGenNode * create_node(void);
+ inline void free_node(CGenNode *p);
+ inline void free_last_flow_node(CGenNode *p);
+
+
+public:
+ void Clean();
+ void generate_erf(std::string erf_file_name,CPreviewMode &preview);
+ void Dump(FILE *fd);
+ void DumpCsv(FILE *fd);
+ void DumpStats(FILE *fd);
+ void Update(void){
+ m_cpu_cp_u.Update();
+ }
+ double getCpuUtil(void){
+ return ( m_cpu_cp_u.GetVal());
+ }
+
+private:
+ void check_msgs(void);
+ void handel_nat_msg(CGenNodeNatInfo * msg);
+ void handel_latecy_pkt_msg(CGenNodeLatencyPktInfo * msg);
+
+ void terminate_nat_flows(CGenNode *node);
+
+
+ void init_from_global(CClientPortion &);
+ void defer_client_port_free(CGenNode *p);
+ void defer_client_port_free(bool is_tcp,uint32_t c_ip,uint16_t port);
+
+
+ FORCE_NO_INLINE void handler_defer_job(CGenNode *p);
+ FORCE_NO_INLINE void handler_defer_job_flush(void);
+
+
+ inline CGenNodeDeferPort * get_tcp_defer(void){
+ if (m_tcp_dpc==0) {
+ m_tcp_dpc =(CGenNodeDeferPort *)create_node();
+ m_tcp_dpc->init();
+ }
+ return (m_tcp_dpc);
+ }
+
+ inline CGenNodeDeferPort * get_udp_defer(void){
+ if (m_udp_dpc==0) {
+ m_udp_dpc =(CGenNodeDeferPort *)create_node();
+ m_udp_dpc->init();
+ }
+ return (m_udp_dpc);
+ }
+
+private:
+ FORCE_NO_INLINE void associate(uint32_t fid,CGenNode * node ){
+ assert(m_flow_id_to_node_lookup.lookup(fid)==0);
+ m_stats.m_nat_lookup_add_flow_id++;
+ m_flow_id_to_node_lookup.add(fid,node);
+ }
+
+public:
+ uint32_t m_thread_id; /* virtual */
+ uint32_t m_core_id; /* phsical */
+
+ uint32_t m_max_threads;
+ CFlowGenList * m_flow_list;
+ rte_mempool_t * m_node_pool;
+
+ std::vector<CFlowGeneratorRecPerThread *> m_cap_gen;
+
+ CFlowsYamlInfo m_yaml_info;
+
+ CTupleGeneratorSmart m_smart_gen;
+
+
+public:
+ CNodeGenerator m_node_gen;
+public:
+ uint32_t m_cur_template;
+ uint64_t m_cur_flow_id;
+ double m_cur_time_sec;
+ double m_stop_time_sec;
+
+ CPreviewMode m_preview_mode;
+public:
+ CFlowGenStats m_stats;
+ CBwMeasure m_mb_sec;
+ CCpuUtlDp m_cpu_dp_u;
+ CCpuUtlCp m_cpu_cp_u;
+
+private:
+ CGenNodeDeferPort * m_tcp_dpc;
+ CGenNodeDeferPort * m_udp_dpc;
+
+ CNodeRing * m_ring_from_rx; /* ring latency thread -> dp */
+ CNodeRing * m_ring_to_rx; /* ring dp -> latency thread */
+
+ flow_id_node_t m_flow_id_to_node_lookup;
+};
+
+inline CGenNode * CFlowGenListPerThread::create_node(void){
+ CGenNode * res;
+ if ( unlikely (rte_mempool_sc_get(m_node_pool, (void **)&res) <0) ){
+ rte_exit(EXIT_FAILURE, "cant allocate object , need more \n");
+ return (0);
+ }
+ return (res);
+}
+
+inline void CFlowGenListPerThread::free_node(CGenNode *p){
+ rte_mempool_sp_put(m_node_pool, p);
+}
+
+inline void CFlowGenListPerThread::free_last_flow_node(CGenNode *p){
+ m_stats.m_total_close_flows +=p->m_flow_info->get_total_flows();
+
+ uint8_t plugin_id =p->get_plugin_id();
+ if ( plugin_id ) {
+ /* free memory of the plugin */
+ on_node_last(plugin_id,p);
+ }
+ defer_client_port_free(p);
+ free_node( p);
+}
+
+
+typedef struct mac_mapping_ {
+ mac_addr_align_t mac;
+ uint32_t ip;
+} mac_mapping_t;
+
+class CFlowGenList {
+
+public:
+ bool Create();
+ void Delete();
+ void Clean();
+public:
+ void generate_p_thread_info(uint32_t num_threads);
+ void clean_p_thread_info(void);
+
+public:
+
+ int load_from_yaml(std::string csv_file,uint32_t num_threads);
+ int load_from_mac_file(std::string csv_file);
+public:
+ void Dump(FILE *fd);
+ void DumpCsv(FILE *fd);
+ void DumpPktSize();
+ void Update();
+ double GetCpuUtil();
+
+public:
+ double get_total_kcps();
+ double get_total_pps();
+ double get_total_tx_bps();
+ uint32_t get_total_repeat_flows();
+ bool is_ip_legal(uint32_t ip) {
+ return (ip >= m_yaml_info.m_tuple_gen.m_clients_ip_start &&
+ ip <= m_yaml_info.m_tuple_gen.m_clients_ip_end );
+ }
+ double get_delta_flow_is_sec();
+public:
+ std::vector<CFlowGeneratorRec *> m_cap_gen; /* global info */
+ CFlowsYamlInfo m_yaml_info; /* global yaml*/
+ std::vector<CFlowGenListPerThread *> m_threads_info;
+ bool is_mac_info_configured;
+ std::map<uint32_t, mac_addr_align_t> m_mac_info; /* global mac info loaded form mac_file*/
+};
+
+
+
+
+
+
+inline void CCapFileFlowInfo::generate_flow(CTupleTemplateGeneratorSmart * tuple_gen,
+ CNodeGenerator * gen,
+ dsec_t time,
+ uint64_t flow_id,
+ CFlowYamlInfo * template_info,
+ CGenNode * node){
+ dsec_t c_time = time;
+
+ node->m_type=CGenNode::FLOW_PKT;
+ CTupleBase tuple;
+ tuple_gen->GenerateTuple(tuple);
+
+ /* add the first packet of the flow */
+ CFlowPktInfo * lp=GetPacket((uint32_t)0);
+
+ node->set_socket_id(gen->m_socket_id);
+
+ node->m_thread_id = tuple_gen->GetThreadId();
+ node->m_flow_id = (flow_id & (0x000fffffffffffffULL)) |
+ ( ((uint64_t)(tuple_gen->GetThreadId()& 0xff)) <<56 ) ;
+
+ node->m_time = c_time;
+ node->m_pkt_info = lp;
+ node->m_flow_info = this;
+ node->m_flags=0;
+ node->m_template_info =template_info;
+ node->m_src_ip= tuple.getClient();
+ node->m_dest_ip = tuple.getServer();
+ node->m_src_port = tuple.getClientPort();
+ memcpy(&node->m_src_mac,
+ tuple.getClientMac(),
+ sizeof(mac_addr_align_t));
+ node->m_plugin_info =(void *)0;
+
+ if ( unlikely( CGlobalInfo::is_learn_mode() ) ){
+ // check if flow is two direction
+ if ( lp->m_pkt_indication.m_desc.IsBiDirectionalFlow() ) {
+ /* we are in learn mode */
+ CFlowGenListPerThread * lpThread=gen->Parent();
+ lpThread->associate((uint32_t)flow_id,node); /* assosiate flow_id=>node */
+ node->set_nat_first_state();
+ }
+ }
+
+ if ( unlikely( get_is_rx_check_mode()) ) {
+ if ( (CGlobalInfo::m_options.m_rx_check_sampe == 1 ) ||
+ ( ( rte_rand() % CGlobalInfo::m_options.m_rx_check_sampe ) == 1 )){
+ if (unlikely(!node->is_repeat_flow() )) {
+ node->set_rx_check();
+ }
+ }
+ }
+
+ if ( unlikely( CGlobalInfo::m_options.preview.getClientServerFlowFlipAddr() ) ){
+ node->set_initiator_start_from_server_side_with_server_addr(node->is_eligible_from_server_side());
+ }else{
+ /* -p */
+ if ( likely( CGlobalInfo::m_options.preview.getClientServerFlowFlip() ) ){
+ node->set_initiator_start_from_server(node->is_eligible_from_server_side());
+ node->set_all_flow_from_same_dir(true);
+ }else{
+ /* --flip */
+ if ( unlikely( CGlobalInfo::m_options.preview.getClientServerFlip() ) ){
+ node->set_initiator_start_from_server(node->is_eligible_from_server_side());
+ }
+ }
+ }
+
+
+ /* in case of plugin we need to call the callback */
+ if ( template_info->m_plugin_id ) {
+ /* alloc the info , generate the ports */
+ on_node_first(template_info->m_plugin_id,node,template_info,tuple_gen,gen->Parent() );
+ }
+
+ gen->add_node(node);
+}
+
+
+inline void CFlowGeneratorRecPerThread::generate_flow(CNodeGenerator * gen,
+ dsec_t time,
+ uint64_t flow_id,
+ CGenNode * node){
+
+ m_flow_info->generate_flow(&tuple_gen,
+ gen,
+ time,
+ flow_id,
+ m_info,
+ node);
+}
+
+
+
+class CLatencyPktInfo {
+public:
+ void Create();
+ void Delete();
+ void set_ip(uint32_t src,
+ uint32_t dst,
+ uint32_t dual_port_mask);
+ rte_mbuf_t * generate_pkt(int port_id,uint32_t extern_ip=0);
+
+ CGenNode * getNode(){
+ return (&m_dummy_node);
+ }
+
+ uint16_t get_payload_offset(void){
+ return ( m_pkt_indication.getFastPayloadOffset());
+ }
+
+ uint16_t get_pkt_size(void){
+ return ( m_packet->pkt_len );
+ }
+
+private:
+ ipaddr_t m_client_ip;
+ ipaddr_t m_server_ip;
+ uint32_t m_dual_port_mask;
+
+ CGenNode m_dummy_node;
+ CFlowPktInfo m_pkt_info;
+ CPacketIndication m_pkt_indication;
+ CCapPktRaw * m_packet;
+};
+
+
+#define LATENCY_MAGIC 0x12345600
+
+struct latency_header {
+
+ uint64_t time_stamp;
+ uint32_t magic;
+ uint32_t seq;
+
+ uint8_t get_id(){
+ return( magic & 0xff);
+ }
+};
+
+
+class CSimplePacketParser {
+public:
+
+ CSimplePacketParser(rte_mbuf_t * m){
+ m_m=m;
+ }
+
+ bool Parse();
+ uint8_t getTTl();
+ uint16_t getPktSize();
+
+
+
+ inline bool IsLatencyPkt(){
+ return ( (m_protocol ==0x84 )?true:false );
+ }
+
+
+public:
+ IPHeader * m_ipv4;
+ IPv6Header * m_ipv6;
+ uint8_t m_protocol;
+ uint16_t m_vlan_offset;
+ uint16_t m_option_offset;
+private:
+ rte_mbuf_t * m_m ;
+};
+
+
+
+class CLatencyManager ;
+// per port
+class CCPortLatency {
+public:
+ bool Create(CLatencyManager * parent,
+ uint8_t id,
+ uint16_t offset,
+ uint16_t pkt_size,
+ CCPortLatency * rx_port
+ );
+ void Delete();
+ void reset();
+ bool can_send_packet(){
+ if ( !CGlobalInfo::is_learn_mode() ) {
+ return(true);
+ }
+ return ( m_nat_can_send );
+ }
+ uint32_t external_nat_ip(){
+ return (m_nat_external_ip);
+ }
+
+ void update_packet(rte_mbuf_t * m);
+
+ bool do_learn(uint32_t external_ip);
+
+ bool check_packet(rte_mbuf_t * m,
+ CRx_check_header * & rx_p);
+ bool check_rx_check(rte_mbuf_t * m);
+
+
+ bool dump_packet(rte_mbuf_t * m);
+
+ void DumpCounters(FILE *fd);
+ void dump_counters_json(std::string & json );
+
+ void DumpShort(FILE *fd);
+ void dump_json(std::string & json );
+ void dump_json_v2(std::string & json );
+
+ uint32_t get_jitter_usec(void){
+ return ((uint32_t)(m_jitter.get_jitter()*1000000.0));
+ }
+
+
+ static void DumpShortHeader(FILE *fd);
+
+ bool is_any_err(){
+ if ( (m_tx_pkt_ok == m_rx_port->m_pkt_ok ) &&
+
+ ((m_unsup_prot+
+ m_no_magic+
+ m_no_id+
+ m_seq_error+
+ m_length_error+m_no_ipv4_option+m_tx_pkt_err)==0) ) {
+ return (false);
+ }
+ return (true);
+ }
+
+private:
+ std::string get_field(std::string name,float f);
+
+
+
+private:
+ CLatencyManager * m_parent;
+ CCPortLatency * m_rx_port; /* corespond rx port */
+ bool m_nat_learn;
+ bool m_nat_can_send;
+ uint32_t m_nat_external_ip;
+
+ uint32_t m_tx_seq;
+ uint32_t m_rx_seq;
+
+ uint8_t m_pad;
+ uint8_t m_id;
+ uint16_t m_offset;
+
+ uint16_t m_pkt_size;
+ uint16_t pad1[3];
+
+public:
+ uint64_t m_tx_pkt_ok;
+ uint64_t m_tx_pkt_err;
+
+ uint64_t m_pkt_ok;
+ uint64_t m_unsup_prot;
+ uint64_t m_no_magic;
+ uint64_t m_no_id;
+ uint64_t m_seq_error;
+ uint64_t m_rx_check;
+ uint64_t m_no_ipv4_option;
+
+
+ uint64_t m_length_error;
+ CTimeHistogram m_hist; /* all window */
+ CJitter m_jitter;
+};
+
+
+class CPortLatencyHWBase {
+public:
+ virtual int tx(rte_mbuf_t * m)=0;
+ virtual rte_mbuf_t * rx()=0;
+ virtual uint16_t rx_burst(struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts){
+ return(0);
+ }
+};
+
+
+class CLatencyManagerCfg {
+public:
+ CLatencyManagerCfg (){
+ m_max_ports=0;
+ m_cps=0.0;
+ m_client_ip.v4=0x10000000;
+ m_server_ip.v4=0x20000000;
+ m_dual_port_mask=0x01000000;
+ }
+ uint32_t m_max_ports;
+ double m_cps;// CPS
+ CPortLatencyHWBase * m_ports[MAX_LATENCY_PORTS];
+ ipaddr_t m_client_ip;
+ ipaddr_t m_server_ip;
+ uint32_t m_dual_port_mask;
+
+};
+
+
+
+class CLatencyManagerPerPort {
+public:
+ CCPortLatency m_port;
+ CPortLatencyHWBase * m_io;
+ uint32_t m_flag;
+
+};
+
+
+class CLatencyManager {
+public:
+ bool Create(CLatencyManagerCfg * cfg);
+ void Delete();
+
+public:
+ void reset();
+ void start(int iter);
+ void stop();
+ bool is_active();
+
+ void set_ip(uint32_t client_ip,
+ uint32_t server_ip,
+ uint32_t mask_dual_port){
+ m_pkt_gen.set_ip(client_ip,server_ip,mask_dual_port);
+ }
+
+public:
+ void Dump(FILE *fd); // dump all
+ void DumpShort(FILE *fd); // dump short histogram of latency
+
+ void DumpRxCheck(FILE *fd); // dump all
+ void DumpShortRxCheck(FILE *fd); // dump short histogram of latency
+ void rx_check_dump_json(std::string & json);
+ uint16_t get_latency_header_offset(){
+ return ( m_pkt_gen.get_payload_offset() );
+ }
+ void update();
+ void dump_json(std::string & json ); // dump to json
+ void dump_json_v2(std::string & json );
+
+
+
+ void DumpRxCheckVerification(FILE *fd,uint64_t total_tx_rx_check);
+ void set_mask(uint32_t mask){
+ m_port_mask=mask;
+ }
+
+ double get_max_latency(void);
+ double get_avr_latency(void);
+ bool is_any_error();
+ uint64_t get_total_pkt();
+ uint64_t get_total_bytes();
+ CNatRxManager * get_nat_manager(){
+ return ( &m_nat_check_manager );
+ }
+
+private:
+ void send_pkt_all_ports();
+ void try_rx();
+ void try_rx_queues();
+ void run_rx_queue_msgs(uint8_t thread_id,
+ CNodeRing * r);
+ void wait_for_rx_dump();
+ void handle_rx_pkt(CLatencyManagerPerPort * lp,
+ rte_mbuf_t * m);
+
+
+private:
+ /* messages handlers */
+ void handle_latecy_pkt_msg(uint8_t thread_id,
+ CGenNodeLatencyPktInfo * msg);
+
+
+
+private:
+ pqueue_t m_p_queue; /* priorty queue */
+ bool m_is_active;
+ CLatencyPktInfo m_pkt_gen;
+ CLatencyManagerPerPort m_ports[MAX_LATENCY_PORTS];
+ uint64_t m_d_time; // calc tick betwen sending
+ double m_cps;
+ double m_delta_sec;
+ uint64_t m_start_time; // calc tick betwen sending
+ uint32_t m_port_mask;
+ uint32_t m_max_ports;
+ RxCheckManager m_rx_check_manager;
+ CNatRxManager m_nat_check_manager;
+ CCpuUtlDp m_cpu_dp_u;
+ CCpuUtlCp m_cpu_cp_u;
+
+ volatile bool m_do_stop __rte_cache_aligned ;
+
+};
+
+
+inline bool CGenNode::is_responder_pkt(){
+ return ( m_pkt_info->m_pkt_indication.m_desc.IsInitSide() ?false:true );
+}
+
+inline bool CGenNode::is_initiator_pkt(){
+ return ( m_pkt_info->m_pkt_indication.m_desc.IsInitSide() ?true:false );
+}
+
+
+
+inline uint16_t CGenNode::get_template_id(){
+ return ( m_pkt_info->m_pkt_indication.m_desc.getId() );
+}
+
+
+inline bool CGenNode::is_last_in_flow(){
+ return ( m_pkt_info->m_pkt_indication.m_desc.IsLastPkt());
+}
+
+inline bool CGenNode::is_repeat_flow(){
+ return ( m_template_info->m_limit_was_set);
+}
+
+inline void CGenNode::update_next_pkt_in_flow(void){
+ if ( likely ( m_pkt_info->m_pkt_indication.m_desc.IsPcapTiming()) ){
+ m_time += m_pkt_info->m_pkt_indication.m_cap_ipg ;
+ }else{
+ if ( m_pkt_info->m_pkt_indication.m_desc.IsRtt() ){
+ m_time += m_template_info->m_rtt_sec ;
+ }else{
+ m_time += m_template_info->m_ipg_sec;
+ }
+ }
+
+ uint32_t pkt_index = m_pkt_info->m_pkt_indication.m_packet->pkt_cnt;
+ pkt_index++;
+ m_pkt_info = m_flow_info->GetPacket((pkt_index-1));
+}
+
+inline void CGenNode::reset_pkt_in_flow(void){
+ m_pkt_info = m_flow_info->GetPacket(0);
+}
+
+inline void CGenNode::replace_tuple(void){
+ m_pkt_info->replace_tuple(this);
+}
+
+enum MINVM_PLUGIN_ID{
+ mpRTSP=1,
+ mpSIP_VOICE=2,
+ mpDYN_PYLOAD=3,
+ mpAVL_HTTP_BROWSIN=4 /* this is a way to change the host ip by client ip */
+};
+
+class CPluginCallback {
+public:
+ virtual void on_node_first(uint8_t plugin_id,CGenNode * node,CFlowYamlInfo * template_info, CTupleTemplateGeneratorSmart * tuple_gen,CFlowGenListPerThread * flow_gen) =0;
+ virtual void on_node_last(uint8_t plugin_id,CGenNode * node)=0;
+ virtual rte_mbuf_t * on_node_generate_mbuf(uint8_t plugin_id,CGenNode * node,CFlowPktInfo * pkt_info)=0;
+public:
+ static CPluginCallback * callback;
+};
+
+class CPluginCallbackSimple : public CPluginCallback {
+public:
+ virtual void on_node_first(uint8_t plugin_id,CGenNode * node,
+ CFlowYamlInfo * template_info,
+ CTupleTemplateGeneratorSmart * tuple_gen,
+ CFlowGenListPerThread * flow_gen);
+ virtual void on_node_last(uint8_t plugin_id,CGenNode * node);
+ virtual rte_mbuf_t * on_node_generate_mbuf(uint8_t plugin_id,CGenNode * node,CFlowPktInfo * pkt_info);
+
+private:
+ rte_mbuf_t * rtsp_plugin(uint8_t plugin_id,CGenNode * node,CFlowPktInfo * pkt_info);
+ rte_mbuf_t * sip_voice_plugin(uint8_t plugin_id,CGenNode * node,CFlowPktInfo * pkt_info);
+ rte_mbuf_t * dyn_pyload_plugin(uint8_t plugin_id,CGenNode * node,CFlowPktInfo * pkt_info);
+ rte_mbuf_t * http_plugin(uint8_t plugin_id,CGenNode * node,CFlowPktInfo * pkt_info);
+
+};
+
+
+inline bool CGenNode::can_cache_mbuf(void){
+ if ( is_repeat_flow() && ( m_flow_info->Size()==1 ) ){
+ return (true);
+ }else{
+ return (false);
+ }
+}
+
+
+/* direction for ip addr SERVER put tuple from server side client put addr of client side */
+inline pkt_dir_t CGenNode::cur_pkt_ip_addr_dir(){
+
+ CFlowPktInfo * lp=m_pkt_info;
+ bool init_from_server=get_is_initiator_start_from_server_with_server_addr();
+ bool is_init=lp->m_pkt_indication.m_desc.IsInitSide() ^ init_from_server;
+ return ( is_init ?CLIENT_SIDE:SERVER_SIDE);
+}
+
+/* direction for TCP/UDP port */
+inline pkt_dir_t CGenNode::cur_pkt_port_addr_dir(){
+ CFlowPktInfo * lp=m_pkt_info;
+ bool is_init=lp->m_pkt_indication.m_desc.IsInitSide() ;
+ return ( is_init ?CLIENT_SIDE:SERVER_SIDE);
+}
+/* from which interface dir to get out */
+inline pkt_dir_t CGenNode::cur_interface_dir(){
+
+ CFlowPktInfo * lp=m_pkt_info;
+
+ bool init_from_server=(get_is_initiator_start_from_server()||
+ get_is_initiator_start_from_server_with_server_addr());
+ bool is_init=lp->m_pkt_indication.m_desc.IsInitSide() ^ init_from_server;
+
+ if (get_is_all_flow_from_same_dir()) {
+ return (is_eligible_from_server_side()?SERVER_SIDE:CLIENT_SIDE);
+ }else{
+ return ( is_init ?CLIENT_SIDE:SERVER_SIDE);
+ }
+}
+
+
+
+#endif
diff --git a/src/common/BigEndianBitManipulation.h b/src/common/BigEndianBitManipulation.h
new file mode 100755
index 00000000..e8ea02fe
--- /dev/null
+++ b/src/common/BigEndianBitManipulation.h
@@ -0,0 +1,73 @@
+#ifndef BIG_ENDIAN_BIT_MAN_H
+#define BIG_ENDIAN_BIT_MAN_H
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "bitMan.h"
+
+inline void setMaskBit8(uint8_t & a,
+ int startbit,
+ int stopbit,
+ uint8_t newVal )
+{
+ btSetMaskBitBigE8(a, startbit, stopbit, newVal);
+}
+
+
+inline void setMaskBit16(uint16_t & a,
+ int startbit,
+ int stopbit,
+ uint16_t newVal )
+{
+ btSetMaskBitBigE16(a, startbit, stopbit, newVal);
+}
+
+
+inline void setMaskBit32(uint32_t & a,
+ int startbit,
+ int stopbit,
+ uint32_t newVal )
+{
+ btSetMaskBitBigE32(a, startbit, stopbit, newVal);
+}
+
+
+
+
+inline unsigned char getMaskBit8(uint8_t a,
+ int startbit,
+ int stopbit) {
+ return btGetMaskBitBigE8(a,startbit,stopbit);
+}
+
+
+inline unsigned short getMaskBit16(uint16_t a,
+ int startbit,
+ int stopbit) {
+ return btGetMaskBitBigE16(a,startbit,stopbit);
+}
+
+
+inline unsigned long getMaskBit32(uint32_t a,
+ int startbit,
+ int stopbit) {
+ return btGetMaskBitBigE32(a,startbit,stopbit);
+}
+
+
+#endif // BIG_ENDIAN_BIT_MAN_H
+
+
diff --git a/src/common/Env.h b/src/common/Env.h
new file mode 100755
index 00000000..95b26816
--- /dev/null
+++ b/src/common/Env.h
@@ -0,0 +1,34 @@
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef OS_ENV
+#define OS_ENV
+
+#ifdef __GNUC__
+ #if (_BYTE_ORDER == _BIG_ENDIAN)
+ #define BIG_EDIAN
+ #else
+ #undef BIG_EDIAN
+ #endif
+#endif
+
+#ifdef _MSC_VER
+ #define BYTE_ORDER
+ #undef BIG_EDIAN
+#endif
+
+
+#endif
diff --git a/src/common/Network/Packet/CPktCmn.cpp b/src/common/Network/Packet/CPktCmn.cpp
new file mode 100755
index 00000000..9f682826
--- /dev/null
+++ b/src/common/Network/Packet/CPktCmn.cpp
@@ -0,0 +1,145 @@
+#include "CPktCmn.h"
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+
+#define TouchCacheLine(a)
+
+uint16_t pkt_InetChecksum(uint8_t* data ,
+ uint16_t len, uint8_t* data2 , uint16_t len2){
+
+ TouchCacheLine(data2);
+ TouchCacheLine(data2+32);
+ TouchCacheLine(data2+64);
+
+ int sum = 0;
+ while(len>1){
+ TouchCacheLine(data+96); //three lines ahead !
+ sum += PKT_NTOHS(*((uint16_t*)data));
+ data += 2;
+ len -= 2;
+ }
+
+ while(len2>1){
+ TouchCacheLine(data2+96); //three lines ahead !
+ sum += PKT_NTOHS(*((uint16_t*)data2));
+ data2 += 2;
+ len2 -= 2;
+ }
+
+ if(len2){
+ sum += (PKT_NTOHS(*((uint16_t*)data2)) & 0xff00);
+ }
+
+ while(sum >> 16){
+ sum = (sum & 0xffff) + (sum >> 16);
+ }
+
+ return PKT_NTOHS((uint16_t)(~sum));
+}
+
+uint16_t pkt_InetChecksum(uint8_t* data , uint16_t len){
+
+ int sum = 0;
+ while(len>1){
+ TouchCacheLine(data+96); //three line ahead !
+ sum += PKT_NTOHS(*((uint16_t*)data));
+ data += 2;
+ len -= 2;
+ }
+
+ if(len){
+ sum += (PKT_NTOHS(*((uint16_t*)data)) & 0xff00);
+ }
+
+ while(sum >> 16){
+ sum = (sum & 0xffff) + (sum >> 16);
+ }
+
+ return PKT_NTOHS((uint16_t)(~sum));
+}
+
+uint16_t pkt_UpdateInetChecksum(uint16_t csFieldFromPacket, uint16_t oldVal, uint16_t newVal){
+ uint32_t newCS;
+ newCS = (uint16_t)(~PKT_NTOHS(csFieldFromPacket));
+ newCS += (uint16_t)(~PKT_NTOHS(oldVal));
+ newCS += (uint16_t)PKT_NTOHS(newVal);
+ while(newCS >> 16){
+ newCS = (newCS & 0xffff) + (newCS >> 16);
+ }
+ return PKT_NTOHS((uint16_t)(~newCS));
+}
+
+uint16_t pkt_SubtractInetChecksum(uint16_t checksum, uint16_t csToSubtract){
+ uint32_t newCS;
+ newCS = (uint16_t)(~PKT_NTOHS(checksum));
+
+ // since the cs is already in ~ format in the packet, there is no need
+ // to negate it for subtraction in 1's complement.
+ newCS += (uint16_t)PKT_NTOHS(csToSubtract);
+
+ while(newCS >> 16){
+ newCS = (newCS & 0xffff) + (newCS >> 16);
+ }
+ return PKT_NTOHS((uint16_t)(~newCS));
+}
+
+uint16_t pkt_AddInetChecksum(uint16_t checksum, uint16_t csToAdd){
+ uint32_t newCS;
+ newCS = (uint16_t)(~PKT_NTOHS(checksum));
+
+ // since the cs is already in ~ format in the packet, there is a need
+ // to negate it for addition in 1's complement.
+ newCS += (uint16_t)PKT_NTOHS(~csToAdd);
+
+ while(newCS >> 16){
+ newCS = (newCS & 0xffff) + (newCS >> 16);
+ }
+ return PKT_NTOHS((uint16_t)(~newCS));
+}
+
+
+extern "C" void pkt_ChecksumTest(){
+
+ uint16_t cs;
+ uint8_t data[5] = {0xcd,0x7a,0x55,0x55,0xa1};
+
+ cs = pkt_InetChecksum((uint8_t*)data,5);
+ printf("CS = 0x%04x: ",cs);
+ if(cs == 0x2F3C){
+ printf("CS func with odd len OK.\n");
+ }else{
+ printf("ERROR: CS func produced wrong value with odd number of bytes.\n");
+ }
+
+ cs = pkt_InetChecksum((uint8_t*)data,4);
+ printf("CS = 0x%04x: ",cs);
+ if(cs == 0x2FDD){
+ printf("CS func with even len OK.\n");
+ }else{
+ printf("ERROR: CS func produced wrong value with even number of bytes.\n");
+ }
+
+ cs = pkt_UpdateInetChecksum(cs,0x5555,0x8532);
+ printf("CS = 0x%04x: ",cs);
+ if(cs == 0x0000){
+ printf("Update CS func OK.\n");
+ }else{
+ printf("ERROR: Update CS func produced wrong value.\n");
+ }
+
+}
diff --git a/src/common/Network/Packet/CPktCmn.h b/src/common/Network/Packet/CPktCmn.h
new file mode 100755
index 00000000..a107c5a3
--- /dev/null
+++ b/src/common/Network/Packet/CPktCmn.h
@@ -0,0 +1,88 @@
+#ifndef PKT_CMN_H
+#define PKT_CMN_H
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+
+#include <common/c_common.h>
+#include <common/bitMan.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include "pal_utl.h"
+
+
+#define PKT_HTONL(x) (PAL_NTOHL(x))
+#define PKT_HTONS(x) (PAL_NTOHS(x))
+
+#define PKT_NTOHL(x) (PAL_NTOHL(x))
+#define PKT_NTOHS(x) (PAL_NTOHS(x))
+
+
+// returns cs in NETWROK order
+uint16_t pkt_InetChecksum(uint8_t* data , uint16_t len);
+
+// len MUST be an even number !!!
+// len2 can be odd.
+// returns cs in NETWROK order
+uint16_t pkt_InetChecksum(uint8_t* data , uint16_t len, uint8_t* data2 , uint16_t len2);
+
+// this functiion updates an inet-checksum.
+// It accepts the checksum field AS IS from the packet, the old byte's value
+// and the new byte's value.
+// the cs, old and new values must be entered in NETWORK byte order !!!
+// the return value is also in NETWORK byte order !!
+uint16_t pkt_UpdateInetChecksum(uint16_t csFieldFromPacket, uint16_t oldVal, uint16_t newVal);
+
+// checksum and csToSubtract are two uint16_t cs fields AS THEY APPEAR INSIDE A PACKET !
+uint16_t pkt_SubtractInetChecksum(uint16_t checksum, uint16_t csToSubtract);
+
+// checksum and csToAdd are two uint16_t cs fields AS THEY APPEAR INSIDE A PACKET !
+uint16_t pkt_AddInetChecksum(uint16_t checksum, uint16_t csToAdd);
+
+
+struct Tunnels
+{
+ enum Type
+ {
+ // basic tunnels have a bit each. They can be bitwise OR ed.
+ // WARNING: We use this number as a Uint8 in some places - don't go over 1 byte !!!
+
+ //Another warning: DO NOT change the values of these symbols, unless you have permission from everyone who
+ //uses them. These values are externally exposed through CmdlTunnel interfaces, and therefore JRT relies on these
+ //specific values. (Assi - Jan 2006)
+ Empty = 0x00,
+ UNTUNNELED_Marker = 0x01,
+ VLAN = 0x01,
+ MPLS = 0x02,
+ L2TP = 0x04,
+ IPinIP = 0x08,
+ GRE = 0x10,
+ Ethernet = 0x20,//This is not tunneled. It's an exception, until all these values are changed.
+ AnyIP = 0x40,
+ AnyTunneled = 0x7f,
+ TUNNELED_Marker = AnyTunneled,//Any sum of the values written above mustn't reach this value
+ Unrecognized = 0x80,
+ GTP = 0x81
+ };
+};
+
+
+
+
+#endif
+
diff --git a/src/common/Network/Packet/EthernetHeader.cpp b/src/common/Network/Packet/EthernetHeader.cpp
new file mode 100755
index 00000000..2c6891a1
--- /dev/null
+++ b/src/common/Network/Packet/EthernetHeader.cpp
@@ -0,0 +1,32 @@
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+#include "EthernetHeader.h"
+
+void EthernetHeader::dump(FILE* fd)
+{
+ fprintf(fd,"EthernetHeader\n");
+ fprintf(fd,"Destination : ");
+ myDestination.dump(fd);;
+ fprintf(fd,"Source : ");
+ mySource.dump(fd);;
+ fprintf(fd,"Protocol : 0x%04x \n",getNextProtocol());
+}
+
+
+
+
diff --git a/src/common/Network/Packet/EthernetHeader.h b/src/common/Network/Packet/EthernetHeader.h
new file mode 100755
index 00000000..87d1ed91
--- /dev/null
+++ b/src/common/Network/Packet/EthernetHeader.h
@@ -0,0 +1,98 @@
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef _ETHERNET_HEADER_H_
+#define _ETHERNET_HEADER_H_
+
+#include "PacketHeaderBase.h"
+#include "MacAddress.h"
+
+
+/**
+ * This class encapsulates an ethernet header.
+ * It has fields that are equivalent to the ethernet header fields.
+ * The data is saved in network byte order, and therefore the class can be used to create a packet in a buffer
+ * and send it over the network.
+ */
+class EthernetHeader
+{
+public:
+
+ struct Protocol
+ {
+ enum Type
+ {
+ IP = 0x0800,
+ VLAN = 0x8100,
+ ARP = 0x0806,
+ IPv6 = 0x86DD,
+ MPLS_Unicast = 0x8847,
+ MPLS_Multicast = 0x8848,
+ PPP = 0x880b,
+ PPPoED = 0x8863,
+ PPPoES = 0x8864
+ };
+ };
+
+
+////////////////////////////////////////////////////////////////////////////////////////
+// Common Header Interface
+////////////////////////////////////////////////////////////////////////////////////////
+
+public:
+ //Empty constructor
+ EthernetHeader() :
+ myProtocol(0)
+ {}
+ //Construct an EthernetHeader object from a given buffer, ordered in Network byte order
+ inline EthernetHeader(uint8_t* packet);
+
+ inline uint8_t* getPointer (){return (uint8_t*)this;}
+ static inline uint32_t getSize (){return (uint32_t)sizeof(EthernetHeader);}
+
+ // Get dest MAC pointer
+ MacAddress *getDestMacP() { return &myDestination; }
+
+ // Get source MAC pointer
+ MacAddress *getSrcMacP() { return &mySource; }
+
+ //Returns the next protocol, in host byte order
+ inline uint16_t getNextProtocol ();
+
+ //Set the next protocol value. The argument is receieved in host byte order.
+ inline void setNextProtocol (uint16_t);
+
+ // Retrieve VLAN fields for tag and protocol information
+ inline uint16_t getVlanTag ();
+ inline uint16_t getVlanProtocol ();
+
+ void dump (FILE* fd);
+
+
+public:
+ MacAddress myDestination;
+ MacAddress mySource;
+private:
+ uint16_t myProtocol;
+ uint16_t myVlanTag;
+ uint16_t myVlanProtocol;
+
+};
+
+#include "EthernetHeader.inl"
+
+#endif //_ETHERNET_HEADER_H_
+
diff --git a/src/common/Network/Packet/EthernetHeader.inl b/src/common/Network/Packet/EthernetHeader.inl
new file mode 100755
index 00000000..0d6e32c9
--- /dev/null
+++ b/src/common/Network/Packet/EthernetHeader.inl
@@ -0,0 +1,39 @@
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+inline EthernetHeader::EthernetHeader(uint8_t* packet)
+{
+ *this = *((EthernetHeader*)packet);
+}
+inline void EthernetHeader::setNextProtocol(uint16_t argProtocol)
+{
+ myProtocol = PKT_HTONS(argProtocol);
+}
+
+inline uint16_t EthernetHeader::getNextProtocol()
+{
+ return( PKT_HTONS(myProtocol));
+}
+
+inline uint16_t EthernetHeader::getVlanProtocol()
+{
+ return( PKT_HTONS(myVlanProtocol));
+}
+
+inline uint16_t EthernetHeader::getVlanTag()
+{
+ return( PKT_HTONS(myVlanTag));
+}
diff --git a/src/common/Network/Packet/IPHeader.cpp b/src/common/Network/Packet/IPHeader.cpp
new file mode 100755
index 00000000..3b90a1aa
--- /dev/null
+++ b/src/common/Network/Packet/IPHeader.cpp
@@ -0,0 +1,71 @@
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "IPHeader.h"
+
+
+char * IPHeader::Protocol::interpretIpProtocolName(uint8_t argType)
+{
+ switch (argType)
+ {
+ case TCP:
+ return (char *)"TCP";
+ break;
+ case UDP:
+ return (char *)"UDP";
+ break;
+ case IP:
+ return (char *)"IP";
+ break;
+ case ICMP:
+ return (char *)"ICMP";
+ break;
+ case ESP:
+ return (char *)"ESP";
+ break;
+ case AH:
+ return (char *)"AH";
+ break;
+ case IGMP:
+ return (char *)"IGMP";
+ break;
+ default:
+ return (char *)NULL;
+ break;
+ }
+}
+
+void IPHeader::dump(FILE *fd)
+{
+ fprintf(fd, "\nIPHeader");
+ fprintf(fd, "\nSource 0x%.8lX, Destination 0x%.8lX, Protocol 0x%.1X",
+ getSourceIp(), getDestIp(), getProtocol());
+ fprintf(fd, "\nTTL : %d, Id : 0x%.2X, Ver %d, Header Length %d, Total Length %d",
+ getTimeToLive(), getId(), getVersion(), getHeaderLength(), getTotalLength());
+ if(isFragmented())
+ {
+ fprintf(fd,"\nIsFirst %d, IsMiddle %d, IsLast %d, Offset %d",
+ isFirstFragment(), isMiddleFragment(), isLastFragment(), getFragmentOffset());
+ }
+ else
+ {
+ fprintf(fd, "\nDont fragment %d", isDontFragment());
+ }
+ fprintf(fd, "\n");
+}
+
+
+
diff --git a/src/common/Network/Packet/IPHeader.h b/src/common/Network/Packet/IPHeader.h
new file mode 100755
index 00000000..5dfd03d8
--- /dev/null
+++ b/src/common/Network/Packet/IPHeader.h
@@ -0,0 +1,197 @@
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef _IP_HEADER_H_
+#define _IP_HEADER_H_
+
+#include "PacketHeaderBase.h"
+
+#define IPV4_HDR_LEN 20
+
+class IPHeader
+{
+
+public:
+ IPHeader()
+ {
+ myVer_HeaderLength = 0; //must initialize it cause when doing setLenght or setVersion we mask it (found in purify)
+ setDestIp(0xDEADBEEF);
+ setSourceIp(0xBEEFDEAD);
+ setProtocol(0xDD);
+ };
+
+ IPHeader (uint32_t argSource,
+ uint32_t argDestinaction,
+ uint8_t argProtocol)
+ {
+ myVer_HeaderLength = 0; //must initialize it cause when doing setLenght or setVersion we mask it
+ setDestIp(argDestinaction);
+ setSourceIp(argSource);
+ setProtocol(argProtocol);
+ };
+
+
+ struct Protocol
+ {
+ enum Type
+ {
+ IP = 0x04 ,
+ TCP = 0x06 ,
+ UDP = 0x11 ,
+ ICMP = 0x01 ,
+ IGMP = 0x02,
+ ESP = 0x32,
+ AH = 0x33,
+ GRE = 0x2F,
+ IPV6_ICMP = 0x3A,
+ IPV6_NONXT = 0x3B,
+ };
+ //get the IP-Protocol name by protocol number
+ static char * interpretIpProtocolName(uint8_t argType);
+ };
+
+ enum
+ {
+ DefaultSize = 20
+ };
+
+
+public:
+
+ inline uint8_t getVersion ();
+ inline void setVersion (uint8_t);
+
+ /**
+ * Return the header length in bytes
+ *
+ * @return
+ */
+ inline uint8_t getHeaderLength ();
+
+ /**
+ * Receives the header length in bytes and sets it
+ * appropriately in the packet
+ */
+ inline void setHeaderLength (uint8_t);
+
+ inline uint8_t getTOS ();
+ inline void setTOS (uint8_t);
+
+ inline uint16_t getTotalLength ();
+ inline void setTotalLength (uint16_t);
+
+ inline uint16_t getId ();
+ inline void setId (uint16_t);
+
+ inline uint16_t getFragmentOffset ();// return the result in bytes
+ inline bool isFirstFragment ();
+ inline bool isMiddleFragment ();
+ inline bool isFragmented ();
+ inline bool isMoreFragments ();
+ inline bool isDontFragment ();
+ inline bool isLastFragment ();
+ inline bool isNotFirstFragment ();
+ inline void setFragment (uint16_t argOffset,
+ bool argMoreFrag,
+ bool argDontFrag);
+
+ inline uint8_t getTimeToLive (void);
+ inline void setTimeToLive (uint8_t);
+
+ inline uint8_t getProtocol (void);
+ inline void setProtocol (uint8_t);
+
+ inline uint32_t getSourceIp ();
+ inline void setSourceIp (uint32_t);
+
+ inline uint32_t getDestIp ();
+ inline void setDestIp (uint32_t);
+
+ bool isMulticast ();
+ bool isBroadcast ();
+
+ inline uint16_t getChecksum ();
+ bool isChecksumOK ();
+
+public:
+
+ inline void updateTos (uint8_t newTos);
+ inline void updateTotalLength (uint16_t newlen);
+ inline void updateIpSrc(uint32_t ipsrc);
+ inline void updateIpDst(uint32_t ipsrc);
+
+
+
+ inline void updateCheckSum ();
+ inline void updateCheckSum2(uint8_t* data1, uint16_t len1, uint8_t* data2 , uint16_t len2);
+
+ inline void swapSrcDest ();
+
+////////////////////////////////////////////////////////////////////////////////////////
+// Common Header Interface
+////////////////////////////////////////////////////////////////////////////////////////
+
+public:
+ inline uint8_t* getPointer (){return (uint8_t*)this;}
+ inline uint32_t getSize (){return getHeaderLength();}
+
+ inline uint8_t getNextProtocol ();
+
+ void dump (FILE* fd);
+
+ inline uint8_t* getOption (){ return ((uint8_t*)&myOption[0]); }
+ inline uint8_t getOptionLen() { return ( getHeaderLength() - DefaultSize ); }
+
+
+public:
+ uint8_t myVer_HeaderLength;
+ uint8_t myTos;
+ uint16_t myLength;
+
+ uint16_t myId;
+ uint16_t myFrag;
+
+ uint8_t myTTL;
+ uint8_t myProtocol;
+ uint16_t myChecksum;
+
+ uint32_t mySource;
+ uint32_t myDestination;
+ uint32_t myOption[1];
+};
+
+
+class IPPseudoHeader
+{
+public:
+ uint32_t m_source_ip;
+ uint32_t m_dest_ip;
+ uint8_t m_zero;
+ uint8_t m_protocol;
+ uint16_t m_length;
+public:
+ inline uint8_t* getPointer(){return (uint8_t*)this;}
+
+ inline uint32_t getSize();
+
+ inline uint16_t inetChecksum();
+};
+
+
+#include "IPHeader.inl"
+
+#endif
+
diff --git a/src/common/Network/Packet/IPHeader.inl b/src/common/Network/Packet/IPHeader.inl
new file mode 100755
index 00000000..e7b87f06
--- /dev/null
+++ b/src/common/Network/Packet/IPHeader.inl
@@ -0,0 +1,290 @@
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include <common/BigEndianBitManipulation.h>
+
+
+inline void IPHeader::setVersion(uint8_t argVersion)
+{
+ setMaskBit8(myVer_HeaderLength, 0, 3, argVersion);
+}
+
+inline uint8_t IPHeader::getVersion()
+{
+ return getMaskBit8(myVer_HeaderLength, 0, 3);
+}
+
+inline uint8_t IPHeader::getHeaderLength()
+{
+ return (getMaskBit8(myVer_HeaderLength, 4, 7) << 2);
+}
+
+inline void IPHeader::setHeaderLength(uint8_t argLength)
+{
+ setMaskBit8(myVer_HeaderLength, 4, 7, (argLength>>2));
+}
+
+inline uint8_t IPHeader::getNextProtocol()
+{
+ return myProtocol;
+}
+
+//--------------------------------
+
+inline void IPHeader::setTOS(uint8_t argTOS)
+{
+ myTos = argTOS;
+}
+
+inline uint8_t IPHeader::getTOS()
+{
+ return myTos;
+}
+
+//--------------------------------
+// length of ip packet
+inline void IPHeader::setTotalLength(uint16_t argLength)
+{
+ myLength = PKT_NTOHS(argLength);
+}
+
+inline uint16_t IPHeader::getTotalLength()
+{
+ return(PKT_NTOHS(myLength));
+}
+
+//--------------------------------
+inline uint16_t IPHeader::getId()
+{
+ return(PKT_NTOHS(myId));
+}
+
+inline void IPHeader::setId(uint16_t argID)
+{
+ myId = PKT_NTOHS(argID);
+}
+
+//--------------------------------
+
+uint16_t IPHeader::getFragmentOffset()
+{
+ uint16_t theFrag = (PKT_NTOHS(myFrag));
+ return ((theFrag & 0x1FFF) << 3);
+ //return (getMaskBit16(theFrag, 3, 15) << 3);//The field is in 8 byte units
+}
+
+bool IPHeader::isMoreFragments()
+{
+ uint16_t theFrag = (PKT_NTOHS(myFrag));
+ if(theFrag & 0x2000)
+ //if(getMaskBit16(theFrag, 2, 2) == 1)
+ {
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+}
+
+bool IPHeader::isDontFragment()
+{
+ uint16_t theFrag = (PKT_NTOHS(myFrag));
+ if(theFrag & 0x4000)
+ //if(getMaskBit16(theFrag, 1, 1) == 1)
+ {
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+}
+
+inline void IPHeader::setFragment (uint16_t argOffset ,
+ bool argMoreFrag,
+ bool argDontFrag)
+{
+ uint16_t theFragmentWord = 0;
+ setMaskBit16(theFragmentWord, 3, 15, argOffset);
+ setMaskBit16(theFragmentWord, 1, 1, argDontFrag ? 1 : 0);
+ setMaskBit16(theFragmentWord, 2, 2, argMoreFrag ? 1 : 0);
+ myFrag = (PKT_NTOHS(theFragmentWord));
+}
+
+inline bool IPHeader::isFirstFragment()
+{
+ return((getFragmentOffset() == 0) && (isMoreFragments() == true));
+}
+
+inline bool IPHeader::isMiddleFragment()
+{
+ return((getFragmentOffset() != 0) && (isMoreFragments() == true));
+}
+
+inline bool IPHeader::isFragmented()
+{
+ return((getFragmentOffset() != 0) || (isMoreFragments() == true));
+}
+
+inline bool IPHeader::isLastFragment()
+{
+ return((getFragmentOffset() != 0) && (isMoreFragments() == false));
+}
+
+//return true if this is fragment but not first
+inline bool IPHeader::isNotFirstFragment()
+{
+ return((isFragmented() == true) && (isFirstFragment() == false));
+
+}
+
+
+//--------------------------------
+inline uint8_t IPHeader::getTimeToLive()
+{
+ return(myTTL);
+}
+inline void IPHeader::setTimeToLive(uint8_t argTTL)
+{
+ myTTL = argTTL;
+}
+
+//--------------------------------
+inline uint8_t IPHeader::getProtocol()
+{
+ return (myProtocol);
+}
+
+inline void IPHeader::setProtocol(uint8_t argProtocol)
+{
+ myProtocol = argProtocol;
+}
+//--------------------------------
+
+
+inline uint32_t IPHeader::getSourceIp()
+{
+ return (PKT_NTOHL(mySource));
+}
+
+inline void IPHeader::setSourceIp(uint32_t argSourceAddress)
+{
+ mySource = PKT_NTOHL(argSourceAddress);
+}
+
+//--------------------------------
+inline uint32_t IPHeader::getDestIp()
+{
+ return (PKT_NTOHL(myDestination));
+}
+
+inline void IPHeader::setDestIp(uint32_t argDestAddress)
+{
+ myDestination = PKT_NTOHL(argDestAddress);
+}
+
+//--------------------------------
+inline bool IPHeader::isMulticast()
+{
+ return((getDestIp() & 0xf0) == 0xe0);
+}
+
+inline bool IPHeader::isBroadcast()
+{
+ return(getDestIp() == 0xffffffff);
+}
+//--------------------------------
+
+inline uint16_t IPHeader::getChecksum()
+{
+ return PKT_NTOHS(myChecksum);
+}
+
+inline bool IPHeader::isChecksumOK()
+{
+ uint16_t theChecksum = pkt_InetChecksum(getPointer(),(uint16_t)getSize() );
+ return(theChecksum == 0);
+}
+
+
+inline void IPHeader::updateTos(uint8_t newTos)
+{
+ uint16_t oldWord = *((uint16_t *)(getPointer()));
+ myTos = newTos;
+ uint16_t newWord = *((uint16_t *)(getPointer()));
+ myChecksum = pkt_UpdateInetChecksum(myChecksum,oldWord,newWord);
+}
+
+
+
+
+inline void IPHeader::updateIpDst(uint32_t ipsrc){
+ uint32_t old = myDestination;
+ uint32_t _new_src = PKT_NTOHL(ipsrc);
+
+ myChecksum = pkt_UpdateInetChecksum(myChecksum,(old&0xffff0000)>>16,((_new_src&0xffff0000)>>16));
+ myChecksum = pkt_UpdateInetChecksum(myChecksum,(old&0xffff),(_new_src&0xffff));
+ myDestination=_new_src;
+}
+
+inline void IPHeader::updateIpSrc(uint32_t ipsrc){
+ uint32_t old = mySource;
+ uint32_t _new_src = PKT_NTOHL(ipsrc);
+
+ myChecksum = pkt_UpdateInetChecksum(myChecksum,(old&0xffff0000)>>16,((_new_src&0xffff0000)>>16));
+ myChecksum = pkt_UpdateInetChecksum(myChecksum,(old&0xffff),(_new_src&0xffff));
+ mySource=_new_src;
+}
+
+
+inline void IPHeader::updateTotalLength(uint16_t newlen)
+{
+ uint16_t oldLen = myLength;
+ myLength = PKT_NTOHS(newlen);
+ myChecksum = pkt_UpdateInetChecksum(myChecksum,oldLen,myLength);
+}
+
+inline void IPHeader::updateCheckSum()
+{
+ myChecksum = 0;
+ myChecksum = pkt_InetChecksum(getPointer(), (uint16_t)getSize());
+}
+
+inline void IPHeader::updateCheckSum2(uint8_t* data1, uint16_t len1, uint8_t* data2 , uint16_t len2)
+{
+ myChecksum = 0;
+ myChecksum = pkt_InetChecksum(data1, len1, data2, len2);
+}
+
+inline void IPHeader::swapSrcDest()
+{
+ uint32_t tmp = myDestination;
+ myDestination = mySource;
+ mySource = tmp;
+}
+
+inline uint32_t IPPseudoHeader::getSize()
+{
+ return sizeof(IPPseudoHeader);
+}
+inline uint16_t IPPseudoHeader::inetChecksum()
+{
+ return(pkt_InetChecksum(getPointer(), (uint16_t)getSize()));
+}
+
+
+
diff --git a/src/common/Network/Packet/IPv6Header.cpp b/src/common/Network/Packet/IPv6Header.cpp
new file mode 100755
index 00000000..cf1f632a
--- /dev/null
+++ b/src/common/Network/Packet/IPv6Header.cpp
@@ -0,0 +1,71 @@
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "IPv6Header.h"
+
+
+char * IPv6Header::Protocol::interpretIpProtocolName(uint8_t argType)
+{
+ switch (argType)
+ {
+ case TCP:
+ return (char *)"TCP";
+ break;
+ case UDP:
+ return (char *)"UDP";
+ break;
+ case IP:
+ return (char *)"IP";
+ break;
+ case ICMP:
+ return (char *)"ICMP";
+ break;
+ case ESP:
+ return (char *)"ESP";
+ break;
+ case AH:
+ return (char *)"AH";
+ break;
+ case IGMP:
+ return (char *)"IGMP";
+ break;
+ default:
+ return (char *)NULL;
+ break;
+ }
+}
+
+void IPv6Header::dump(FILE *fd)
+{
+ fprintf(fd, "\nIPv6Header");
+ fprintf(fd, "\nSource 0x%.8lX, Destination 0x%.8lX, Protocol 0x%.1X",
+ getSourceIp(), getDestIp(), getProtocol());
+ fprintf(fd, "\nTTL : %d, Id : 0x%.2X, Ver %d, Header Length %d, Total Length %d",
+ getTimeToLive(), getId(), getVersion(), getHeaderLength(), getTotalLength());
+ if(isFragmented())
+ {
+ fprintf(fd,"\nIsFirst %d, IsMiddle %d, IsLast %d, Offset %d",
+ isFirstFragment(), isMiddleFragment(), isLastFragment(), getFragmentOffset());
+ }
+ else
+ {
+ fprintf(fd, "\nDont fragment %d", isDontFragment());
+ }
+ fprintf(fd, "\n");
+}
+
+
+
diff --git a/src/common/Network/Packet/IPv6Header.h b/src/common/Network/Packet/IPv6Header.h
new file mode 100755
index 00000000..8a246359
--- /dev/null
+++ b/src/common/Network/Packet/IPv6Header.h
@@ -0,0 +1,142 @@
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef _IPV6_HEADER_H_
+#define _IPV6_HEADER_H_
+
+#include "PacketHeaderBase.h"
+
+#define IPV6_16b_ADDR_GROUPS 8
+#define IPV6_16b_ADDR_GROUPS_MSB 6
+#define IPV6_HDR_LEN 40
+static uint16_t default_ipv6[8] = { 0xDEAD, 0xBEEF,
+ 0xDEAD, 0xBEEF,
+ 0xDEAD, 0xBEEF,
+ 0xDEAD, 0xBEEF };
+
+
+class IPv6Header
+{
+
+public:
+ IPv6Header()
+ {
+ setDestIpv6(default_ipv6);
+ setSourceIpv6(default_ipv6);
+ setTrafficClass(0xDD);
+ };
+
+ IPv6Header (uint16_t *argSource,
+ uint16_t *argDestinaction,
+ uint8_t argTrafficClass)
+ {
+ setDestIpv6(argDestinaction);
+ setSourceIpv6(argSource);
+ setTrafficClass(argTrafficClass);
+ };
+
+ enum
+ {
+ DefaultSize = 40
+ };
+
+
+public:
+
+ inline uint8_t getVersion ();
+ inline void setVersion (uint8_t);
+
+ inline uint8_t getHeaderLength (){return (IPV6_HDR_LEN);}
+
+ inline uint16_t getTrafficClass ();
+ inline void setTrafficClass (uint16_t);
+
+ inline uint32_t getFlowLabel ();
+ inline void setFlowLabel (uint32_t);
+
+ inline uint16_t getPayloadLen ();
+ inline void setPayloadLen (uint16_t);
+
+ inline uint8_t getNextHdr ();
+ inline void setNextHdr (uint8_t);
+
+ inline uint8_t getHopLimit ();
+ inline void setHopLimit (uint8_t);
+
+ inline void getSourceIpv6 (uint16_t *);
+ inline void setSourceIpv6 (uint16_t *);
+
+ inline void getDestIpv6 (uint16_t *);
+ inline void setDestIpv6 (uint16_t *);
+
+public:
+
+ inline void updateTrafficClass(uint8_t newclass);
+ inline void updatePayloadLength(uint16_t newlen);
+ inline void updateIpv6Src(uint16_t *ipsrc);
+ inline void updateIpv6Dst(uint16_t *ipdst);
+ inline void updateMSBIpv6Src(uint16_t *ipsrc);
+ inline void updateMSBIpv6Dst(uint16_t *ipdst);
+ inline void updateLSBIpv6Src(uint32_t ipsrc);
+ inline void updateLSBIpv6Dst(uint32_t ipdst);
+
+ inline void swapSrcDest();
+
+////////////////////////////////////////////////////////////////////////////////////////
+// Common Header Interface
+////////////////////////////////////////////////////////////////////////////////////////
+
+public:
+ inline uint8_t* getPointer (){return (uint8_t*)this;}
+ inline uint32_t getSize (){return getHeaderLength();}
+
+ void dump (FILE* fd);
+
+
+public:
+ uint32_t myVer_TrafficClass_FlowLabel;
+
+ uint16_t myPayloadLen;
+ uint8_t myNextHdr;
+ uint8_t myHopLimit;
+
+ uint16_t mySource[IPV6_16b_ADDR_GROUPS];
+ uint16_t myDestination[IPV6_16b_ADDR_GROUPS];
+ uint32_t myOption[1];
+};
+
+
+class IPv6PseudoHeader
+{
+public:
+ uint32_t m_source_ip;
+ uint32_t m_dest_ip;
+ uint8_t m_zero;
+ uint8_t m_protocol;
+ uint16_t m_length;
+public:
+ inline uint8_t* getPointer(){return (uint8_t*)this;}
+
+ inline uint32_t getSize();
+
+ inline uint16_t inetChecksum();
+};
+
+
+#include "IPv6Header.inl"
+
+#endif
+
diff --git a/src/common/Network/Packet/IPv6Header.inl b/src/common/Network/Packet/IPv6Header.inl
new file mode 100755
index 00000000..5098bc3e
--- /dev/null
+++ b/src/common/Network/Packet/IPv6Header.inl
@@ -0,0 +1,182 @@
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+#include <common/BigEndianBitManipulation.h>
+
+//--------------------------------
+inline uint8_t IPv6Header::getVersion()
+{
+ return getMaskBit32(PKT_NTOHL(myVer_TrafficClass_FlowLabel), 0, 3);
+}
+
+inline void IPv6Header::setVersion(uint8_t argVersion)
+{
+ uint32_t myVer = PKT_HTONL(myVer_TrafficClass_FlowLabel);
+ setMaskBit32(myVer, 0, 3, argVersion);
+ myVer_TrafficClass_FlowLabel = PKT_NTOHL(myVer);
+}
+
+//--------------------------------
+inline uint16_t IPv6Header::getTrafficClass()
+{
+ return getMaskBit32(PKT_NTOHL(myVer_TrafficClass_FlowLabel), 4, 11);
+}
+
+inline void
+IPv6Header::setTrafficClass(uint16_t argTrafficClass)
+{
+ uint32_t myTrafficClass = PKT_HTONL(myVer_TrafficClass_FlowLabel);
+ setMaskBit32(myTrafficClass, 4, 11, argTrafficClass);
+ myVer_TrafficClass_FlowLabel = PKT_NTOHL(myTrafficClass);
+}
+
+//--------------------------------
+inline uint32_t IPv6Header::getFlowLabel()
+{
+ return getMaskBit32(PKT_NTOHL(myVer_TrafficClass_FlowLabel), 12, 31);
+}
+
+inline void IPv6Header::setFlowLabel(uint32_t argFlowLabel)
+{
+ uint32_t myFlowLabel = PKT_HTONL(myVer_TrafficClass_FlowLabel);
+ setMaskBit32(myFlowLabel, 12, 31, argFlowLabel);
+ myVer_TrafficClass_FlowLabel = PKT_NTOHL(myFlowLabel);
+}
+
+//--------------------------------
+inline uint16_t IPv6Header::getPayloadLen()
+{
+ return PKT_NTOHS(myPayloadLen);
+}
+
+inline void IPv6Header::setPayloadLen(uint16_t argPayloadLen)
+{
+ myPayloadLen = PKT_HTONS(argPayloadLen);
+}
+
+//--------------------------------
+inline uint8_t IPv6Header::getNextHdr()
+{
+ return myNextHdr;
+}
+
+inline void IPv6Header::setNextHdr(uint8_t argNextHdr)
+{
+ myNextHdr = argNextHdr;
+}
+
+//--------------------------------
+inline uint8_t IPv6Header::getHopLimit()
+{
+ return myHopLimit;
+}
+
+inline void IPv6Header::setHopLimit(uint8_t argHopLimit)
+{
+ myHopLimit = argHopLimit;
+}
+
+//--------------------------------
+inline void IPv6Header::getSourceIpv6(uint16_t *argSourceAddress)
+{
+ uint8_t i;
+ for (i=0; i<IPV6_16b_ADDR_GROUPS; i++) {
+ argSourceAddress[i] = PKT_NTOHS(mySource[i]);
+ }
+}
+
+inline void IPv6Header::setSourceIpv6(uint16_t *argSourceAddress)
+{
+ uint8_t i;
+ for (i=0; i<IPV6_16b_ADDR_GROUPS; i++) {
+ mySource[i] = PKT_HTONS(argSourceAddress[i]);
+ }
+}
+
+//--------------------------------
+inline void IPv6Header::getDestIpv6(uint16_t *argDestAddress)
+{
+ uint8_t i;
+ for (i=0; i<IPV6_16b_ADDR_GROUPS; i++) {
+ argDestAddress[i] = PKT_NTOHS(myDestination[i]);
+ }
+}
+
+inline void IPv6Header::setDestIpv6(uint16_t *argDestAddress)
+{
+ uint8_t i;
+ for (i=0; i<IPV6_16b_ADDR_GROUPS; i++) {
+ myDestination[i] = PKT_HTONS(argDestAddress[i]);
+ }
+}
+
+//--------------------------------
+inline void IPv6Header::updateIpv6Src(uint16_t *ipsrc)
+{
+ uint8_t i;
+ for (i=0; i<IPV6_16b_ADDR_GROUPS; i++) {
+ mySource[i] = PKT_HTONS(ipsrc[i]);
+ }
+}
+
+inline void IPv6Header::updateIpv6Dst(uint16_t *ipdst)
+{
+ uint8_t i;
+ for (i=0; i<IPV6_16b_ADDR_GROUPS; i++) {
+ myDestination[i] = PKT_HTONS(ipdst[i]);
+ }
+}
+
+//--------------------------------
+inline void IPv6Header::updateMSBIpv6Src(uint16_t *ipsrc)
+{
+ uint8_t i;
+ for (i=0; i<IPV6_16b_ADDR_GROUPS_MSB; i++) {
+ mySource[i] = PKT_HTONS(ipsrc[i]);
+ }
+}
+
+inline void IPv6Header::updateMSBIpv6Dst(uint16_t *ipdst)
+{
+ uint8_t i;
+ for (i=0; i<IPV6_16b_ADDR_GROUPS_MSB; i++) {
+ myDestination[i] = PKT_HTONS(ipdst[i]);
+ }
+}
+
+//--------------------------------
+inline void IPv6Header::updateLSBIpv6Src(uint32_t ipsrc)
+{
+ uint32_t *lsb = (uint32_t *)&mySource[6];
+ *lsb = PKT_HTONL(ipsrc);
+}
+
+inline void IPv6Header::updateLSBIpv6Dst(uint32_t ipdst)
+{
+ uint32_t *lsb = (uint32_t *)&myDestination[6];
+ *lsb = PKT_HTONL(ipdst);
+}
+
+//--------------------------------
+inline void IPv6Header::swapSrcDest()
+{
+ uint8_t i;
+ uint16_t tmp[IPV6_16b_ADDR_GROUPS];
+ for (i=0; i<IPV6_16b_ADDR_GROUPS; i++) {
+ tmp[i] = myDestination[i];
+ myDestination[i] = mySource[i];
+ mySource[i] = tmp[i];
+ }
+}
diff --git a/src/common/Network/Packet/MacAddress.cpp b/src/common/Network/Packet/MacAddress.cpp
new file mode 100755
index 00000000..42331e50
--- /dev/null
+++ b/src/common/Network/Packet/MacAddress.cpp
@@ -0,0 +1,24 @@
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "MacAddress.h"
+
+
+void MacAddress::dump(FILE *fd) const
+{
+
+}
+
diff --git a/src/common/Network/Packet/MacAddress.h b/src/common/Network/Packet/MacAddress.h
new file mode 100755
index 00000000..69272339
--- /dev/null
+++ b/src/common/Network/Packet/MacAddress.h
@@ -0,0 +1,130 @@
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef _MAC_ADDRESS_H_
+#define _MAC_ADDRESS_H_
+
+#include "CPktCmn.h"
+
+
+class MacAddress
+{
+public:
+
+ MacAddress()
+ {
+ set(0xca, 0xfe, 0xde, 0xad, 0xbe, 0xef);
+ };
+
+ MacAddress(uint8_t a0,
+ uint8_t a1,
+ uint8_t a2,
+ uint8_t a3,
+ uint8_t a4,
+ uint8_t a5)
+ {
+ set(a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5);
+ };
+
+ MacAddress(uint8_t macAddr[6])
+ {
+ set(macAddr[0],
+ macAddr[1],
+ macAddr[2],
+ macAddr[3],
+ macAddr[4],
+ macAddr[5] );
+ };
+
+ void set(uint8_t a0,
+ uint8_t a1,
+ uint8_t a2,
+ uint8_t a3,
+ uint8_t a4,
+ uint8_t a5)
+ {
+ data[0]=a0;
+ data[1]=a1;
+ data[2]=a2;
+ data[3]=a3;
+ data[4]=a4;
+ data[5]=a5;
+ };
+
+ void set(uint8_t *argPtr) {
+ memcpy( data, argPtr, sizeof(data) );
+ }
+
+ void set(uint8_t *argPtr,uint8_t val) {
+ memcpy( data, argPtr, sizeof(data) );
+ data[5]=val;
+ }
+
+
+ bool isInvalidAddress() const
+ {
+ static MacAddress allZeros(0,0,0,0,0,0);
+ static MacAddress cafeDeadBeef;
+ return (*this == allZeros || *this == cafeDeadBeef);
+ }
+ void setIdentifierAsBogusAddr(uint32_t identifier)
+ {
+ *(uint32_t*)data = identifier;
+ }
+
+ uint32_t getIdentifierFromBogusAddr()
+ {
+ return *(uint32_t*)data;
+ }
+
+ bool operator == (const MacAddress& rhs) const
+ {
+ for(int i=0; i<6; i++)
+ {
+ if(data[i] != rhs.data[i])
+ return false;
+ }
+
+ return true;
+ }
+
+ uint8_t* GetBuffer()
+ {
+ return data;
+ }
+
+ const uint8_t* GetConstBuffer() const
+ {
+ return data;
+ }
+ void dump(FILE *fd) const;
+
+ void copyToArray(uint8_t *arrayToFill) const
+ {
+ ((uint32_t*)arrayToFill)[0] = ((uint32_t*)data)[0];//Copy first 32bit
+ ((uint16_t*)arrayToFill)[2] = ((uint16_t*)data)[2];//Copy last 16bit
+ }
+
+public:
+ uint8_t data[6];
+};
+
+#endif //_MAC_ADDRESS_H_
diff --git a/src/common/Network/Packet/PacketHeaderBase.h b/src/common/Network/Packet/PacketHeaderBase.h
new file mode 100755
index 00000000..14a570c8
--- /dev/null
+++ b/src/common/Network/Packet/PacketHeaderBase.h
@@ -0,0 +1,50 @@
+#ifndef _PACKET_HEADER_BASE_H_
+#define _PACKET_HEADER_BASE_H_
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+#include "CPktCmn.h"
+
+/**
+ * This class should be the base class for all packet headers in the system.
+ * Its target is to obligate all the headers to implement some common interface.
+ * e.g. Providing the pointer for the header, its size, dumping itself etc.
+ * Since that all header are being casted over some memory because
+ * of performance orientation, we are not using the pure virtual feature
+ * of C++. Thus this obligation is enforced at link time since there will be
+ * no implmentation in the base and if the interface will be used in some derived
+ * that didn't implement it, it will fail at link time.
+ */
+class PacketHeaderBase
+{
+public:
+ uint8_t* getPointer (){return (uint8_t*)this;};
+ uint32_t getSize ();
+
+ uint16_t getNextProtocol ();
+ void setNextProtocol (uint16_t);
+
+ void dump (FILE* fd);
+};
+
+enum
+{
+ TCPDefaultHeaderSize = 20,//TCP
+ UDPDefaultHeaderSize = 8 //UDP
+};
+
+#endif //_PACKET_HEADER_BASE_H_
diff --git a/src/common/Network/Packet/TCPHeader.cpp b/src/common/Network/Packet/TCPHeader.cpp
new file mode 100755
index 00000000..bf28db2e
--- /dev/null
+++ b/src/common/Network/Packet/TCPHeader.cpp
@@ -0,0 +1,35 @@
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+#include "TcpHeader.h"
+
+
+
+void TCPHeader::dump(FILE *fd)
+{
+ fprintf(fd, "\nTCPHeader");
+ fprintf(fd, "\nSourcePort 0x%.4X, DestPort 0x%.4X",
+ getSourcePort(), getDestPort());
+ fprintf(fd, "\nSeqNum 0x%.8lX, AckNum 0x%.8lX, Window %d",
+ getSeqNumber(), getAckNumber(), getWindowSize());
+ fprintf(fd, "\nHeader Length : %d, Checksum : 0x%.4X",
+ getHeaderLength(), getChecksum());
+ fprintf(fd, "\nFlags : SYN - %d, FIN - %d, ACK - %d, URG - %d, RST - %d, PSH - %d",
+ getSynFlag(), getFinFlag(), getAckFlag(), getUrgentFlag(), getResetFlag(), getPushFlag());
+ fprintf(fd, "\nUrgent Offset %d", getUrgentOffset());
+ fprintf(fd, "\n");
+}
diff --git a/src/common/Network/Packet/TCPOptions.cpp b/src/common/Network/Packet/TCPOptions.cpp
new file mode 100755
index 00000000..d8312ccc
--- /dev/null
+++ b/src/common/Network/Packet/TCPOptions.cpp
@@ -0,0 +1,176 @@
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+#include "TCPOptions.h"
+#include "IPHeader.h"
+
+bool TCPOptions::ourOneShotDump = false;
+TCPOptions::Counters TCPOptions::ourCounters = {0,0,0};
+
+TCPOptions::TCPOptions(uint8_t* argOptionsP, uint16_t argOptionsSize):
+myOptionsP (argOptionsP),
+myOptionsSize (argOptionsSize),
+myCurrentOptionP((Option*)argOptionsP)
+{
+ ;
+}
+
+bool TCPOptions::doesContain(TCPOptions::Kind::Val argKind)
+{
+ uint32_t optionTypeIdx = 0;// Used to verify that we're not in infinite loop
+
+ // we'll run over the whole list of TLVs and check each relative to the time stamp
+ // option kind value.
+ do
+ {
+ Kind::Val theCurrentOptionKind;
+ uint8_t theOptionLength;
+ getCurrentOption(theCurrentOptionKind, theOptionLength);
+ optionTypeIdx++;
+ if( theCurrentOptionKind == argKind )
+ {
+ return true;
+ }
+ } while( (nextOption() == true) && (optionTypeIdx < MaxOptionsInPacket));
+
+ if(optionTypeIdx >= MaxOptionsInPacket)
+ {
+ ourCounters.itsPossibleEndlessLoop++;
+ }
+
+ // none of the TLV was of type TimeStamp
+ return false;
+}
+
+//--------------------------------------------------------------------------------------------------
+// Set methods
+//--------------------------------------------------------------------------------------------------
+
+//--------------------------------------------------------------------------------------------------
+// Miscellaneous Operations
+//--------------------------------------------------------------------------------------------------
+
+void TCPOptions::dump(FILE* argOutputFile)
+{
+ fprintf(argOutputFile, "\nNot supported yet!\n");
+}
+
+void TCPOptions::get(TCPOptions::Counters& argCounters)
+{
+ argCounters.itsOptionsSizeMismatch = ourCounters.itsOptionsSizeMismatch;
+ argCounters.itsZeroLengthOptions = ourCounters.itsZeroLengthOptions;
+ argCounters.itsPossibleEndlessLoop = ourCounters.itsPossibleEndlessLoop;
+}
+
+uint8_t* TCPOptions::getCurrentOption (Kind::Val& argKind, uint8_t& argLength)
+{
+ argKind = (Kind::Val)myCurrentOptionP->theKind;
+ argLength = getCurrentOptionLength();
+ return(uint8_t*)myCurrentOptionP;
+}
+
+
+uint8_t TCPOptions::getCurrentOptionLength()
+{
+ if(myCurrentOptionP->theKind == Kind::NO_OP ||
+ myCurrentOptionP->theKind == Kind::EOL)
+ {
+ return 1;
+ }
+ else
+ {
+ return myCurrentOptionP->theLength;
+ }
+}
+
+
+// This is a patch for temporary packet dump facility
+//
+#define MAX_BUFFER_SIZE (8 * 2048)
+static char theDumpBuffer[MAX_BUFFER_SIZE] ={0};
+
+bool TCPOptions::isLastOption ()
+{
+ // Below is the total length of the options we've passed so far
+ uint32_t theCurrentOffset = (uint32_t)((uintptr_t)((char *)myCurrentOptionP - (char *)myOptionsP));
+
+ uint32_t theCurrentLength = getCurrentOptionLength();
+ if( (theCurrentOffset + theCurrentLength) >= myOptionsSize )
+ {
+ // debug check
+ if( (theCurrentOffset + theCurrentLength) > myOptionsSize )
+ {
+ ourCounters.itsOptionsSizeMismatch++;
+ if(ourOneShotDump == false)
+ {
+ ourOneShotDump = true;//disable for next time
+ uint8_t* theIPPacket = (uint8_t*)((uintptr_t)(myOptionsP - 40));
+ IPHeader* theIPHeader = (IPHeader*)theIPPacket;
+ uint16_t thePacketLength = theIPHeader->getTotalLength();
+
+ if(thePacketLength < (MAX_BUFFER_SIZE / 8))
+ {
+ int numWritten = sprintf(theDumpBuffer, "\nDump Size %u\n", thePacketLength);
+
+ for (uint32_t i = 0; i < thePacketLength; i++)
+ {
+ numWritten += sprintf(theDumpBuffer + numWritten,"%.2x ", theIPPacket[i]);
+
+ if ((i % 16) == 15)
+ {
+ numWritten += sprintf(theDumpBuffer + numWritten,"\n");
+ }
+ }
+ }
+ }
+ }
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+}
+
+//--------------------------------------------------------------------------------------------------
+// Set methods
+//--------------------------------------------------------------------------------------------------
+
+bool TCPOptions::nextOption ()
+{
+ if( isLastOption() == true )
+ {
+ return false;
+ }
+ else
+ {
+ uint32_t theCurrentOptionLength = getCurrentOptionLength();
+ if(theCurrentOptionLength > 0)
+ {
+ uint8_t* theCurrentP = (uint8_t*)myCurrentOptionP;
+ theCurrentP += theCurrentOptionLength;
+ myCurrentOptionP = (Option*)theCurrentP;
+ return true;
+ }
+ else
+ {
+ ourCounters.itsZeroLengthOptions++;
+ return false;
+ }
+ }
+}
+
diff --git a/src/common/Network/Packet/TCPOptions.h b/src/common/Network/Packet/TCPOptions.h
new file mode 100755
index 00000000..2133db5a
--- /dev/null
+++ b/src/common/Network/Packet/TCPOptions.h
@@ -0,0 +1,163 @@
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+#ifndef _TCP_OPTIONS_H_
+#define _TCP_OPTIONS_H_
+
+#include "PacketHeaderBase.h"
+
+
+class TCPOptions
+{
+
+public:
+
+//--------------------------------------------------------------------------------------------------
+// Typedef & Enum
+//--------------------------------------------------------------------------------------------------
+
+ struct Kind
+ {
+ enum Val
+ {
+ EOL = 0,
+ NO_OP = 1,
+ MSS = 2,
+ WIN_SCL = 3,
+ SACK_PER = 4,
+ SACK = 5,
+ TIME_STAMP = 8
+ };
+ };
+
+ enum
+ {
+ TimeStampSize = 12
+ };
+
+ enum
+ {
+ MaxOptionsInPacket = 64
+ };
+
+ struct Option
+ {
+ uint8_t theKind;
+ uint8_t theLength; //Not always valid, depends on theKind
+ uint8_t theData[1];//variable. [1] only for compilation
+ };
+
+ struct Counters
+ {
+ uint32_t itsOptionsSizeMismatch;
+ uint32_t itsZeroLengthOptions;
+ uint32_t itsPossibleEndlessLoop;
+ };
+
+
+//--------------------------------------------------------------------------------------------------
+// Constructor & Destructor
+//--------------------------------------------------------------------------------------------------
+
+ TCPOptions(uint8_t* argOptionsP, uint16_t argOptionsSize);
+
+//--------------------------------------------------------------------------------------------------
+// Utility methods
+//--------------------------------------------------------------------------------------------------
+
+ /**
+ * This method allows the user of this class to query whether
+ * the options in hand contains a specific type of its
+ * interest.
+ *
+ * @return true if the specified option exist.
+ */
+ bool doesContain(Kind::Val);
+
+//--------------------------------------------------------------------------------------------------
+// Get methods
+//--------------------------------------------------------------------------------------------------
+
+
+ /**
+ * Returns a pointer to the current option.
+ * With the kind and length of it.
+ *
+ * @param argKind
+ * @param argLength
+ * @return
+ */
+ uint8_t* getCurrentOption (Kind::Val& argKind, uint8_t& argLength);
+
+ /**
+ * This method gives the length of the current option.
+ * Can vary since there are two options that are exception to
+ * the usual TLV definition.
+ *
+ * @return
+ */
+ uint8_t getCurrentOptionLength();
+
+ /**
+ * This is an internal method that verifies based on the current
+ * pointer, the total options length and the current options length,
+ * whether this is the last option.
+ *
+ * @return
+ */
+ bool isLastOption ();
+
+//--------------------------------------------------------------------------------------------------
+// Set methods
+//--------------------------------------------------------------------------------------------------
+
+ /**
+ * This method tells the object to advance one option ahead.
+ * i.e. to the next option.
+ *
+ * @return True if next option exist.
+ * False if we are at the last option.
+ */
+ bool nextOption ();
+
+
+//--------------------------------------------------------------------------------------------------
+// Miscellaneous Operations
+//--------------------------------------------------------------------------------------------------
+
+ static void get (Counters&);
+
+ void dump(FILE* argOutputFile);
+
+
+private:
+
+//--------------------------------------------------------------------------------------------------
+// Data members
+//--------------------------------------------------------------------------------------------------
+
+ uint8_t* myOptionsP;
+ uint16_t myOptionsSize;
+
+ Option* myCurrentOptionP;
+
+ static Counters ourCounters;
+
+ static bool ourOneShotDump;
+};
+
+
+#endif // _TCP_OPTIONS_H_
+
diff --git a/src/common/Network/Packet/TcpHeader.h b/src/common/Network/Packet/TcpHeader.h
new file mode 100755
index 00000000..e07df927
--- /dev/null
+++ b/src/common/Network/Packet/TcpHeader.h
@@ -0,0 +1,124 @@
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+#ifndef _TCP_HEADER_H_
+#define _TCP_HEADER_H_
+
+#include "PacketHeaderBase.h"
+
+
+class TCPHeader
+{
+
+public:
+ TCPHeader(){}
+
+ TCPHeader(uint16_t argSourcePort,
+ uint16_t argDestinationPort,
+ uint8_t argFlags,
+ uint32_t argSeqNum,
+ uint32_t argAckNum);
+
+ struct Flag
+ {
+ enum Type
+ {
+ FIN = 0x01,
+ SYN = 0x02,
+ RST = 0x04,
+ PSH = 0x08,
+ ACK = 0x10,
+ URG = 0x20
+ };
+ };
+
+ void setSourcePort (uint16_t);
+ uint16_t getSourcePort ();
+
+ void setDestPort (uint16_t);
+ uint16_t getDestPort ();
+
+ void setSeqNumber (uint32_t);
+ uint32_t getSeqNumber ();
+
+ void setAckNumber (uint32_t);
+ uint32_t getAckNumber ();
+
+ //this is factor 4
+ void setHeaderLength (uint8_t);
+ uint8_t getHeaderLength ();
+
+ void setFlag (uint8_t);
+ uint8_t getFlags ();
+
+ void setFinFlag (bool);
+ bool getFinFlag ();
+
+ void setSynFlag (bool);
+ bool getSynFlag ();
+
+ void setResetFlag (bool);
+ bool getResetFlag ();
+
+ void setPushFlag (bool);
+ bool getPushFlag ();
+
+ void setAckFlag (bool);
+ bool getAckFlag ();
+
+ void setUrgentFlag (bool);
+ bool getUrgentFlag ();
+
+ void setWindowSize (uint16_t);
+ uint16_t getWindowSize ();
+
+ void setChecksum (uint16_t);
+ uint16_t getChecksum ();
+
+ void setUrgentOffset (uint16_t);
+ uint16_t getUrgentOffset ();
+
+ uint32_t* getOptionPtr ();
+
+
+////////////////////////////////////////////////////////////////////////////////////////
+// Common Header Interface
+////////////////////////////////////////////////////////////////////////////////////////
+
+public:
+ uint8_t* getPointer (){return (uint8_t*)this;}
+ uint32_t getSize (){return getHeaderLength();}
+
+ uint16_t getNextProtocol ();
+ void setNextProtocol (uint16_t);
+
+ void dump (FILE* fd);
+
+private:
+ uint16_t mySourcePort;
+ uint16_t myDestinationPort;
+ uint32_t mySeqNum;
+ uint32_t myAckNum;
+ uint8_t myHeaderLength;
+ uint8_t myFlags;
+ uint16_t myWindowSize;
+ uint16_t myChecksum;
+ uint16_t myUrgentPtr;
+ uint32_t myOption[1];
+};
+
+#include "TcpHeader.inl"
+
+#endif //_TCP_HEADER_H_
diff --git a/src/common/Network/Packet/TcpHeader.inl b/src/common/Network/Packet/TcpHeader.inl
new file mode 100755
index 00000000..52364d06
--- /dev/null
+++ b/src/common/Network/Packet/TcpHeader.inl
@@ -0,0 +1,220 @@
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include <common/BigEndianBitManipulation.h>
+
+inline void TCPHeader::setSourcePort(uint16_t argSourcePort)
+{
+ mySourcePort = PKT_NTOHS(argSourcePort);
+}
+
+inline uint16_t TCPHeader::getSourcePort()
+{
+ return(PKT_NTOHS(mySourcePort));
+}
+
+inline void TCPHeader::setDestPort(uint16_t argDestinationPort)
+{
+ myDestinationPort = PKT_NTOHS(argDestinationPort);
+}
+
+inline uint16_t TCPHeader::getDestPort()
+{
+ return (PKT_NTOHS(myDestinationPort));
+}
+
+inline void TCPHeader::setSeqNumber(uint32_t argSeqNum)
+{
+ mySeqNum = PKT_NTOHL(argSeqNum);
+}
+
+inline uint32_t TCPHeader::getSeqNumber()
+{
+ return (PKT_NTOHL(mySeqNum));
+}
+
+inline void TCPHeader::setAckNumber(uint32_t argAckNum)
+{
+ myAckNum = PKT_NTOHL(argAckNum);
+}
+
+inline uint32_t TCPHeader::getAckNumber()
+{
+ return(PKT_NTOHL(myAckNum));
+}
+
+inline void TCPHeader::setHeaderLength(uint8_t argHeaderLength)
+{
+ setMaskBit8(myHeaderLength, 0, 3, argHeaderLength >> 2);
+}
+
+inline uint8_t TCPHeader::getHeaderLength()
+{
+ return getMaskBit8(myHeaderLength, 0, 3) << 2;
+}
+
+inline void TCPHeader::setFlag(uint8_t data)
+{
+ btSetMaskBit8(myFlags,5,0,data);
+}
+
+inline uint8_t TCPHeader::getFlags()
+{
+ return(myFlags & 0x3f);
+}
+
+inline void TCPHeader::setFinFlag(bool toSet)
+{
+ if(toSet)
+ {
+ myFlags |= Flag::FIN;
+ }
+ else
+ {
+ myFlags &= ~Flag::FIN;
+ }
+}
+
+inline bool TCPHeader::getFinFlag()
+{
+ return(getFlags() & Flag::FIN)?true:false;
+}
+
+inline void TCPHeader::setSynFlag(bool toSet)
+{
+ if(toSet)
+ {
+ myFlags|=Flag::SYN;
+ }
+ else
+ {
+ myFlags&=~Flag::SYN;
+ }
+}
+
+inline bool TCPHeader::getSynFlag()
+{
+ return(getFlags() & Flag::SYN)?true:false;
+}
+
+inline void TCPHeader::setResetFlag(bool toSet)
+{
+ if(toSet)
+ {
+ myFlags|=Flag::RST;
+ }
+ else
+ {
+ myFlags&=~Flag::RST;
+ }
+}
+
+inline bool TCPHeader::getResetFlag()
+{
+ return(getFlags() & Flag::RST)?true:false;
+}
+
+inline void TCPHeader::setPushFlag(bool toSet)
+{
+ if(toSet)
+ {
+ myFlags|=Flag::PSH;
+ }
+ else
+ {
+ myFlags&=~Flag::PSH;
+ }
+}
+
+inline bool TCPHeader::getPushFlag()
+{
+ return(getFlags() & Flag::PSH)?true:false;
+}
+
+inline void TCPHeader::setAckFlag(bool toSet)
+{
+ if(toSet)
+ {
+ myFlags|=Flag::ACK;
+ }
+ else
+ {
+ myFlags&=~Flag::ACK;
+ }
+}
+
+inline bool TCPHeader::getAckFlag()
+{
+ return(getFlags() & Flag::ACK)?true:false;
+}
+
+inline void TCPHeader::setUrgentFlag(bool toSet)
+{
+ if(toSet)
+ {
+ myFlags|=Flag::URG;
+ }
+ else
+ {
+ myFlags&=~Flag::URG;
+ }
+}
+
+inline bool TCPHeader::getUrgentFlag()
+{
+ return(getFlags() & Flag::URG)?true:false;
+}
+
+//---------------------------------------
+inline void TCPHeader::setWindowSize(uint16_t argWindowSize)
+{
+ myWindowSize = PKT_NTOHS(argWindowSize);
+}
+inline uint16_t TCPHeader::getWindowSize()
+{
+ return PKT_NTOHS(myWindowSize);
+}
+
+inline void TCPHeader::setChecksum(uint16_t argChecksum)
+{
+ myChecksum = PKT_NTOHS(argChecksum);
+
+}
+
+inline uint16_t TCPHeader::getChecksum()
+{
+ return PKT_NTOHS(myChecksum);
+}
+
+inline void TCPHeader::setUrgentOffset(uint16_t argUrgentOffset)
+{
+ myUrgentPtr = argUrgentOffset;
+}
+
+inline uint16_t TCPHeader::getUrgentOffset()
+{
+ return PKT_NTOHS(myUrgentPtr);
+}
+
+inline uint32_t * TCPHeader::getOptionPtr()
+{
+ return(myOption);
+}
+
+
+
+
+
diff --git a/src/common/Network/Packet/UDPHeader.cpp b/src/common/Network/Packet/UDPHeader.cpp
new file mode 100755
index 00000000..159e14fc
--- /dev/null
+++ b/src/common/Network/Packet/UDPHeader.cpp
@@ -0,0 +1,28 @@
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "UdpHeader.h"
+
+
+void UDPHeader::dump(FILE *fd)
+{
+ fprintf(fd, "\nUDPHeader");
+ fprintf(fd, "\nSourcePort 0x%.4X, DestPort 0x%.4X",
+ getSourcePort(), getDestPort());
+ fprintf(fd, "\nLength : %d, Checksum : 0x%.4X",
+ getLength(), getChecksum());
+ fprintf(fd, "\n");
+}
diff --git a/src/common/Network/Packet/UdpHeader.h b/src/common/Network/Packet/UdpHeader.h
new file mode 100755
index 00000000..43d08fe1
--- /dev/null
+++ b/src/common/Network/Packet/UdpHeader.h
@@ -0,0 +1,86 @@
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef _UDP_HEADER_H_
+#define _UDP_HEADER_H_
+
+#include "PacketHeaderBase.h"
+#include "IPHeader.h"
+
+class UDPHeader
+{
+
+public:
+ UDPHeader()
+ {
+ setDestPort(0xDEAD);
+ setSourcePort(0xBEEF);
+ setChecksum(0);
+ setLength(0);
+ }
+
+ UDPHeader(uint16_t argSourcePort,
+ uint16_t argDestinationPort)
+ {
+ setDestPort(argDestinationPort);
+ setSourcePort(argSourcePort);
+ }
+
+
+ inline void setSourcePort(uint16_t data);
+ inline uint16_t getSourcePort();
+
+ inline void setDestPort(uint16_t data);
+ inline uint16_t getDestPort();
+
+ inline void setLength(uint16_t data);
+ inline uint16_t getLength();
+
+ inline void setChecksum(uint16_t data);
+ inline uint16_t getChecksum();
+
+ inline void updateCheckSum(IPHeader *ipHeader);
+ inline bool isCheckSumOk(IPHeader *ipHeader);
+ inline uint16_t calcCheckSum(IPHeader *ipHeader);
+
+ inline void swapSrcDest();
+
+////////////////////////////////////////////////////////////////////////////////////////
+// Common Header Interface
+////////////////////////////////////////////////////////////////////////////////////////
+
+public:
+ inline uint8_t* getPointer (){return (uint8_t*)this;}
+ inline uint32_t getSize (){return 8;}
+
+ inline uint16_t getNextProtocol (){return getDestPort();};
+ inline void setNextProtocol (uint16_t argNextProtcool){setDestPort(argNextProtcool);};
+
+ void dump (FILE* fd);
+
+
+private:
+ uint16_t mySourcePort;
+ uint16_t myDestinationPort;
+ uint16_t myLength;
+ uint16_t myChecksum;
+};
+
+
+#include "UdpHeader.inl"
+
+#endif
+
diff --git a/src/common/Network/Packet/UdpHeader.inl b/src/common/Network/Packet/UdpHeader.inl
new file mode 100755
index 00000000..77afc193
--- /dev/null
+++ b/src/common/Network/Packet/UdpHeader.inl
@@ -0,0 +1,102 @@
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+inline void UDPHeader::setSourcePort(uint16_t argSourcePort)
+{
+ mySourcePort = PKT_NTOHS(argSourcePort);
+}
+
+uint16_t UDPHeader::getSourcePort()
+{
+ return PKT_NTOHS(mySourcePort);
+}
+
+inline void UDPHeader::setDestPort(uint16_t argDestPort)
+{
+ myDestinationPort = PKT_NTOHS(argDestPort);
+}
+
+uint16_t UDPHeader::getDestPort()
+{
+ return PKT_NTOHS(myDestinationPort);
+}
+
+inline void UDPHeader::setLength(uint16_t argLength)
+{
+ myLength = PKT_NTOHS(argLength);
+}
+
+uint16_t UDPHeader::getLength()
+{
+ return PKT_NTOHS(myLength);
+}
+
+inline void UDPHeader::setChecksum(uint16_t argNewChecksum)
+{
+ myChecksum = PKT_NTOHS(argNewChecksum);
+}
+
+uint16_t UDPHeader::getChecksum()
+{
+ return PKT_NTOHS(myChecksum);
+}
+
+void UDPHeader::updateCheckSum(IPHeader *ipHeader)
+{
+ setChecksum(0);// must be here
+
+ myChecksum =calcCheckSum(ipHeader);
+}
+
+bool UDPHeader::isCheckSumOk(IPHeader *ipHeader)
+{
+ uint16_t theChecksum= PKT_NTOHS(calcCheckSum(ipHeader));
+
+ return(theChecksum == 0);
+}
+
+uint16_t UDPHeader::calcCheckSum(IPHeader *ipHeader)
+{
+ IPPseudoHeader pseudo;
+
+ uint16_t length= ipHeader->getTotalLength() - ipHeader->getHeaderLength();
+
+ pseudo.m_source_ip = PKT_NTOHL(ipHeader->getSourceIp());
+
+ pseudo.m_dest_ip = PKT_NTOHL(ipHeader->getDestIp());
+
+ pseudo.m_zero = 0;
+
+ pseudo.m_protocol = ipHeader->getProtocol();
+
+ pseudo.m_length = PKT_NTOHS(length);
+
+ uint16_t theChecksum = pkt_InetChecksum((uint8_t*)this,length);
+
+ theChecksum = pkt_AddInetChecksum(theChecksum,pseudo.inetChecksum());
+
+ return(theChecksum);
+}
+
+void UDPHeader::swapSrcDest()
+{
+ uint16_t tmp = myDestinationPort;
+ myDestinationPort = mySourcePort;
+ mySourcePort = tmp;
+}
+
+
+
diff --git a/src/common/Network/Packet/VLANHeader.cpp b/src/common/Network/Packet/VLANHeader.cpp
new file mode 100755
index 00000000..3bb48fdd
--- /dev/null
+++ b/src/common/Network/Packet/VLANHeader.cpp
@@ -0,0 +1,100 @@
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "VLANHeader.h"
+
+
+/*
+ VLAN Header Fields
+ ------------------
+
+ 0 2 3 4 15
+ ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ |Priority| CFI | Tag |
+ ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ | Type |
+ ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+*/
+
+
+void VLANHeader::dump (FILE* fd)
+{
+ fprintf(fd, "\nVLAN Header");
+ fprintf(fd, "\nTag %d (0x%.2X), Pri %d, CFI - %d, Next protocol - %d (0x%.2X)",
+ getTagID(), getTagID(), getTagUserPriorty(), getTagCFI(), getNextProtocolHostOrder(), getNextProtocolHostOrder());
+ fprintf(fd, "\n");
+}
+
+uint8_t VLANHeader::reconstructFromBuffer(uint8_t* destBuffer, uint8_t* srcBuffer)
+{
+ uint8_t type = srcBuffer[0];
+ uint8_t size = srcBuffer[1];
+ if((type != Tunnels::VLAN) || (size != sizeof(VLANHeader)))
+ {
+ // DBG_Error2(PACKET_DBG_TUNNEL_RECONSTRUCTION_ERROR,Tunnels::VLAN,size);
+ return 0;
+ }
+ memcpy(destBuffer,srcBuffer+2,sizeof(VLANHeader));
+ return size;
+}
+
+uint8_t VLANHeader::fillReconstructionBuffer(uint8_t* destBuffer, uint8_t* srcBuffer)
+{
+ destBuffer[0] = (uint8_t)Tunnels::VLAN;
+ destBuffer[1] = sizeof(VLANHeader);
+ memcpy(destBuffer+2,srcBuffer,sizeof(VLANHeader));
+ return sizeof(VLANHeader)+2;
+}
+
+uint8_t VLANHeader::fillReconstructionBuffer(uint8_t* destBuffer, VLANHeader& vHeader)
+{
+ destBuffer[0] = (uint8_t)Tunnels::VLAN;
+ destBuffer[1] = sizeof(VLANHeader);
+ vHeader.reconstructPkt(destBuffer+2);
+ return sizeof(VLANHeader)+2;
+}
+
+#if 0
+Status VLANHeader::parseAsText(uint8_t* srcBuffer, TextCollectorInterface &tc)
+{
+ uint8_t type = srcBuffer[0];
+ uint8_t size = srcBuffer[1];
+ if((type != Tunnels::VLAN) //The buffer doesn't contain valid information
+ || (size < sizeof(VLANHeader))) //The buffer isn't big enough
+ {
+ return FAIL;
+ }
+ tc << "VLAN: ";
+ uint32_t offset = 0;
+ while (offset < size)
+ {
+ VLANHeader vlanHeader;
+ vlanHeader.setFromPkt(&srcBuffer[2+offset]);
+
+ tc.printf("Tag %d (0x%.2X), Pri %d, CFI - %d, Next protocol - %d (0x%.2X)\n",
+ vlanHeader.getTagID(), vlanHeader.getTagID(), vlanHeader.getTagUserPriorty(), vlanHeader.getTagCFI(), vlanHeader.getNextProtocolHostOrder(), vlanHeader.getNextProtocolHostOrder());
+ offset += sizeof(VLANHeader);
+ }
+
+ return SUCCESS;
+}
+#endif
+
+
+
+
+
+
diff --git a/src/common/Network/Packet/VLANHeader.h b/src/common/Network/Packet/VLANHeader.h
new file mode 100755
index 00000000..cf6b2eff
--- /dev/null
+++ b/src/common/Network/Packet/VLANHeader.h
@@ -0,0 +1,107 @@
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef _VLAN_HEADER_H_
+#define _VLAN_HEADER_H_
+
+#include "PacketHeaderBase.h"
+#include "EthernetHeader.h"
+
+
+/*
+ VLAN Header Fields
+ ------------------
+
+ 0 2 3 4 15
+ ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ |Priority| CFI | |
+ ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ | Type |
+ ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+*/
+/**
+ * This class encapsulates a VLAN header.
+ * It has fields that are equivalent to the VLAN header fields.
+ * The data is saved in network byte order, and therefore the class can be used to create a packet in a buffer
+ * and send it over the network.
+ */
+class VLANHeader
+{
+
+
+////////////////////////////////////////////////////////////////////////////////////////
+// Field Manipulation
+////////////////////////////////////////////////////////////////////////////////////////
+public:
+
+ // Sets the complete tag field without sub fields manipulation
+ void setVlanTag (uint16_t data);
+ uint16_t getVlanTag ();
+
+ void setTagUserPriorty (uint8_t data);
+ uint8_t getTagUserPriorty ();
+
+ bool getTagCFI ();
+ void setTagCFI (bool);
+
+ uint16_t getTagID ();
+ void setTagID (uint16_t);
+
+ void incrementTagID(uint16_t inc_value);
+
+ void setFromPkt (uint8_t* data);
+
+ uint8_t reconstructPkt (uint8_t* destBuff);
+
+
+////////////////////////////////////////////////////////////////////////////////////////
+// Common Interface
+////////////////////////////////////////////////////////////////////////////////////////
+
+public:
+ uint8_t* getPointer (){return (uint8_t*)this;}
+ uint32_t getSize (){return (uint32_t)sizeof(VLANHeader);}
+
+ uint16_t getNextProtocolNetOrder ();
+ uint16_t getNextProtocolHostOrder ();
+ void setNextProtocolFromNetOrder(uint16_t);
+ void setNextProtocolFromHostOrder(uint16_t);
+
+ void dump (FILE* fd);
+
+ static uint16_t bytesToSkip(uint8_t* base)
+ {
+ return sizeof(VLANHeader);
+ }
+
+ static uint8_t reconstructFromBuffer(uint8_t* destBuffer, uint8_t* srcBuffer);
+
+ static uint8_t fillReconstructionBuffer(uint8_t* destBuffer, uint8_t* srcBuffer);
+
+ static uint8_t fillReconstructionBuffer(uint8_t* destBuffer, VLANHeader& vHeader);
+
+
+ static uint32_t getMaxVlanTag() { return (1<<12) - 1 ; }
+
+public:
+ uint16_t myTag;
+ uint16_t myNextProtocol;
+};
+
+#include "VLANHeader.inl"
+
+#endif //_VLAN_HEADER_H_
+
diff --git a/src/common/Network/Packet/VLANHeader.inl b/src/common/Network/Packet/VLANHeader.inl
new file mode 100755
index 00000000..a1c584c1
--- /dev/null
+++ b/src/common/Network/Packet/VLANHeader.inl
@@ -0,0 +1,111 @@
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include <common/BigEndianBitManipulation.h>
+
+inline void VLANHeader::setVlanTag(uint16_t data)
+{
+ myTag = PKT_HTONS(data);
+}
+
+inline uint16_t VLANHeader::getVlanTag()
+{
+ return(PKT_HTONS(myTag));
+}
+
+inline void VLANHeader::setTagUserPriorty(uint8_t argUserPriority)
+{
+ uint16_t tempTag = myTag;
+ setMaskBit16(tempTag, 0, 2, argUserPriority);
+ myTag = tempTag;
+}
+
+inline uint8_t VLANHeader::getTagUserPriorty()
+{
+ return (uint8_t)(getMaskBit16(myTag, 0, 2));
+}
+
+
+inline void VLANHeader::setTagCFI(bool isSet)
+{
+ uint16_t tempTag = myTag;
+ setMaskBit16(tempTag, 3, 3, isSet? 1 : 0);
+ myTag = tempTag;
+}
+
+inline bool VLANHeader::getTagCFI()
+{
+ return (getMaskBit16(myTag, 3, 3) == 1);
+}
+
+// This returns host order
+inline uint16_t VLANHeader::getTagID(void)
+{
+ return getMaskBit16(myTag, 4, 15);
+}
+
+inline void VLANHeader::setTagID(uint16_t argNewTag)
+{
+ uint16_t tempTag = myTag;
+ setMaskBit16(tempTag, 4, 15, argNewTag);
+ myTag = tempTag;
+}
+
+inline void VLANHeader::incrementTagID(uint16_t inc_value)
+{
+ uint16_t tempTag_Host = myTag;
+ uint16_t curTagID_Host = getTagID();
+ uint16_t newTagId_Host = (0xfff & (curTagID_Host + (0xfff & inc_value))); // addition with 12 LSBits
+ setMaskBit16(tempTag_Host, 4, 15, newTagId_Host);
+ myTag = tempTag_Host;
+}
+
+inline uint16_t VLANHeader::getNextProtocolNetOrder()
+{
+ return myNextProtocol;
+}
+
+inline uint16_t VLANHeader::getNextProtocolHostOrder()
+{
+ return (PKT_HTONS(myNextProtocol));
+}
+
+inline void VLANHeader::setNextProtocolFromHostOrder(uint16_t argNextProtocol)
+{
+ myNextProtocol = PKT_HTONS(argNextProtocol);
+}
+
+inline void VLANHeader::setNextProtocolFromNetOrder(uint16_t argNextProtocol)
+{
+ myNextProtocol = argNextProtocol;
+}
+
+inline void VLANHeader::setFromPkt (uint8_t* data)
+{
+ // set the tag from the data
+ setVlanTag(*(uint16_t*)data);
+ setNextProtocolFromNetOrder(*((uint16_t*)(data + 2))); // next protocol is after the vlan tag
+}
+
+inline uint8_t VLANHeader::reconstructPkt (uint8_t* destBuff)
+{
+ *(uint16_t*)destBuff = getVlanTag();
+ *(uint16_t*)(destBuff+2) = getNextProtocolNetOrder();
+ return sizeof(VLANHeader);
+}
+
+
+
diff --git a/src/common/arg/SimpleGlob.h b/src/common/arg/SimpleGlob.h
new file mode 100755
index 00000000..ac57105b
--- /dev/null
+++ b/src/common/arg/SimpleGlob.h
@@ -0,0 +1,979 @@
+/*! @file SimpleGlob.h
+
+ @version 3.5
+
+ @brief A cross-platform file globbing library providing the ability to
+ expand wildcards in command-line arguments to a list of all matching
+ files. It is designed explicitly to be portable to any platform and has
+ been tested on Windows and Linux. See CSimpleGlobTempl for the class
+ definition.
+
+ @section features FEATURES
+
+ - MIT Licence allows free use in all software (including GPL and
+ commercial)
+ - multi-platform (Windows 95/98/ME/NT/2K/XP, Linux, Unix)
+ - supports most of the standard linux glob() options
+ - recognition of a forward paths as equivalent to a backward slash
+ on Windows. e.g. "c:/path/foo*" is equivalent to "c:\path\foo*".
+ - implemented with only a single C++ header file
+ - char, wchar_t and Windows TCHAR in the same program
+ - complete working examples included
+ - compiles cleanly at warning level 4 (Windows/VC.NET 2003),
+ warning level 3 (Windows/VC6) and -Wall (Linux/gcc)
+
+ @section usage USAGE
+
+ The SimpleGlob class is used by following these steps:
+
+ <ol>
+ <li> Include the SimpleGlob.h header file
+
+ <pre>
+ \#include "SimpleGlob.h"
+ </pre>
+
+ <li> Instantiate a CSimpleGlob object supplying the appropriate flags.
+
+ <pre>
+ @link CSimpleGlobTempl CSimpleGlob @endlink glob(FLAGS);
+ </pre>
+
+ <li> Add all file specifications to the glob class.
+
+ <pre>
+ glob.Add("file*");
+ glob.Add(argc, argv);
+ </pre>
+
+ <li> Process all files with File(), Files() and FileCount()
+
+ <pre>
+ for (int n = 0; n < glob.FileCount(); ++n) {
+ ProcessFile(glob.File(n));
+ }
+ </pre>
+
+ </ol>
+
+ @section licence MIT LICENCE
+
+ The licence text below is the boilerplate "MIT Licence" used from:
+ http://www.opensource.org/licenses/mit-license.php
+
+ Copyright (c) 2006-2007, Brodie Thiesfield
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef INCLUDED_SimpleGlob
+#define INCLUDED_SimpleGlob
+
+/*! @brief The operation of SimpleGlob is fine-tuned via the use of a
+ combination of the following flags.
+
+ The flags may be passed at initialization of the class and used for every
+ filespec added, or alternatively they may optionally be specified in the
+ call to Add() and be different for each filespec.
+
+ @param SG_GLOB_ERR
+ Return upon read error (e.g. directory does not have read permission)
+
+ @param SG_GLOB_MARK
+ Append a slash (backslash in Windows) to every path which corresponds
+ to a directory
+
+ @param SG_GLOB_NOSORT
+ By default, files are returned in sorted into string order. With this
+ flag, no sorting is done. This is not compatible with
+ SG_GLOB_FULLSORT.
+
+ @param SG_GLOB_FULLSORT
+ By default, files are sorted in groups belonging to each filespec that
+ was added. For example if the filespec "b*" was added before the
+ filespec "a*" then the argv array will contain all b* files sorted in
+ order, followed by all a* files sorted in order. If this flag is
+ specified, the entire array will be sorted ignoring the filespec
+ groups.
+
+ @param SG_GLOB_NOCHECK
+ If the pattern doesn't match anything, return the original pattern.
+
+ @param SG_GLOB_TILDE
+ Tilde expansion is carried out (on Unix platforms)
+
+ @param SG_GLOB_ONLYDIR
+ Return only directories which match (not compatible with
+ SG_GLOB_ONLYFILE)
+
+ @param SG_GLOB_ONLYFILE
+ Return only files which match (not compatible with SG_GLOB_ONLYDIR)
+
+ @param SG_GLOB_NODOT
+ Do not return the "." or ".." special directories.
+ */
+enum SG_Flags {
+ SG_GLOB_ERR = 1 << 0,
+ SG_GLOB_MARK = 1 << 1,
+ SG_GLOB_NOSORT = 1 << 2,
+ SG_GLOB_NOCHECK = 1 << 3,
+ SG_GLOB_TILDE = 1 << 4,
+ SG_GLOB_ONLYDIR = 1 << 5,
+ SG_GLOB_ONLYFILE = 1 << 6,
+ SG_GLOB_NODOT = 1 << 7,
+ SG_GLOB_FULLSORT = 1 << 8
+};
+
+/*! @brief Error return codes */
+enum SG_Error {
+ SG_SUCCESS = 0,
+ SG_ERR_NOMATCH = 1,
+ SG_ERR_MEMORY = -1,
+ SG_ERR_FAILURE = -2
+};
+#ifndef MAX_PATH
+# define MAX_PATH 4096
+#endif
+// ---------------------------------------------------------------------------
+// Platform dependent implementations
+
+// if we aren't on Windows and we have ICU available, then enable ICU
+// by default. Define this to 0 to intentially disable it.
+#ifndef SG_HAVE_ICU
+# if !defined(WIN32) && defined(USTRING_H)
+# define SG_HAVE_ICU 1
+# else
+# define SG_HAVE_ICU 0
+# endif
+#endif
+
+// don't include this in documentation as it isn't relevant
+#ifndef DOXYGEN
+
+// on Windows we want to use MBCS aware string functions and mimic the
+// Unix glob functionality. On Unix we just use glob.
+#ifdef WIN32
+# include <mbstring.h>
+# define sg_strchr ::_mbschr
+# define sg_strrchr ::_mbsrchr
+# define sg_strlen ::_mbslen
+# if __STDC_WANT_SECURE_LIB__
+# define sg_strcpy_s(a,n,b) ::_mbscpy_s(a,n,b)
+# else
+# define sg_strcpy_s(a,n,b) ::_mbscpy(a,b)
+# endif
+# define sg_strcmp ::_mbscmp
+# define sg_strcasecmp ::_mbsicmp
+# define SOCHAR_T unsigned char
+#else
+# include <sys/types.h>
+# include <sys/stat.h>
+# include <glob.h>
+# include <limits.h>
+# define sg_strchr ::strchr
+# define sg_strrchr ::strrchr
+# define sg_strlen ::strlen
+# define sg_strcpy_s(a,n,b) ::strcpy(a,b)
+# define sg_strcmp ::strcmp
+# define sg_strcasecmp ::strcasecmp
+# define SOCHAR_T char
+#endif
+
+#include <stdlib.h>
+#include <string.h>
+#include <wchar.h>
+
+// use assertions to test the input data
+#ifdef _DEBUG
+# ifdef _MSC_VER
+# include <crtdbg.h>
+# define SG_ASSERT(b) _ASSERTE(b)
+# else
+# include <assert.h>
+# define SG_ASSERT(b) assert(b)
+# endif
+#else
+# define SG_ASSERT(b)
+#endif
+
+/*! @brief String manipulation functions. */
+class SimpleGlobUtil
+{
+public:
+ static const char * strchr(const char *s, char c) {
+ return (char *) sg_strchr((const SOCHAR_T *)s, c);
+ }
+ static const wchar_t * strchr(const wchar_t *s, wchar_t c) {
+ return ::wcschr(s, c);
+ }
+#if SG_HAVE_ICU
+ static const UChar * strchr(const UChar *s, UChar c) {
+ return ::u_strchr(s, c);
+ }
+#endif
+
+ static const char * strrchr(const char *s, char c) {
+ return (char *) sg_strrchr((const SOCHAR_T *)s, c);
+ }
+ static const wchar_t * strrchr(const wchar_t *s, wchar_t c) {
+ return ::wcsrchr(s, c);
+ }
+#if SG_HAVE_ICU
+ static const UChar * strrchr(const UChar *s, UChar c) {
+ return ::u_strrchr(s, c);
+ }
+#endif
+
+ // Note: char strlen returns number of bytes, not characters
+ static size_t strlen(const char *s) { return ::strlen(s); }
+ static size_t strlen(const wchar_t *s) { return ::wcslen(s); }
+#if SG_HAVE_ICU
+ static size_t strlen(const UChar *s) { return ::u_strlen(s); }
+#endif
+
+ static void strcpy_s(char *dst, size_t n, const char *src) {
+ (void) n;
+ sg_strcpy_s((SOCHAR_T *)dst, n, (const SOCHAR_T *)src);
+ }
+ static void strcpy_s(wchar_t *dst, size_t n, const wchar_t *src) {
+# if __STDC_WANT_SECURE_LIB__
+ ::wcscpy_s(dst, n, src);
+#else
+ (void) n;
+ ::wcscpy(dst, src);
+#endif
+ }
+#if SG_HAVE_ICU
+ static void strcpy_s(UChar *dst, size_t n, const UChar *src) {
+ ::u_strncpy(dst, src, n);
+ }
+#endif
+
+ static int strcmp(const char *s1, const char *s2) {
+ return sg_strcmp((const SOCHAR_T *)s1, (const SOCHAR_T *)s2);
+ }
+ static int strcmp(const wchar_t *s1, const wchar_t *s2) {
+ return ::wcscmp(s1, s2);
+ }
+#if SG_HAVE_ICU
+ static int strcmp(const UChar *s1, const UChar *s2) {
+ return ::u_strcmp(s1, s2);
+ }
+#endif
+
+ static int strcasecmp(const char *s1, const char *s2) {
+ return sg_strcasecmp((const SOCHAR_T *)s1, (const SOCHAR_T *)s2);
+ }
+#if WIN32
+ static int strcasecmp(const wchar_t *s1, const wchar_t *s2) {
+ return ::_wcsicmp(s1, s2);
+ }
+#endif // WIN32
+#if SG_HAVE_ICU
+ static int strcasecmp(const UChar *s1, const UChar *s2) {
+ return u_strcasecmp(s1, s2, 0);
+ }
+#endif
+};
+
+enum SG_FileType {
+ SG_FILETYPE_INVALID,
+ SG_FILETYPE_FILE,
+ SG_FILETYPE_DIR
+};
+
+#ifdef WIN32
+#ifndef INVALID_FILE_ATTRIBUTES
+# define INVALID_FILE_ATTRIBUTES ((uint32_t)-1)
+#endif
+
+#define SG_PATH_CHAR '\\'
+
+/*! @brief Windows glob implementation. */
+template<class SOCHAR>
+struct SimpleGlobBase
+{
+ SimpleGlobBase() : m_hFind(INVALID_HANDLE_VALUE) { }
+
+ int FindFirstFileS(const char * a_pszFileSpec, unsigned int) {
+ m_hFind = FindFirstFileA(a_pszFileSpec, &m_oFindDataA);
+ if (m_hFind != INVALID_HANDLE_VALUE) {
+ return SG_SUCCESS;
+ }
+ uint32_t dwErr = GetLastError();
+ if (dwErr == ERROR_FILE_NOT_FOUND) {
+ return SG_ERR_NOMATCH;
+ }
+ return SG_ERR_FAILURE;
+ }
+ /* int FindFirstFileS(const wchar_t * a_pszFileSpec, unsigned int) {
+ m_hFind = FindFirstFileW(a_pszFileSpec, &m_oFindDataW);
+ if (m_hFind != INVALID_HANDLE_VALUE) {
+ return SG_SUCCESS;
+ }
+ uint32_t dwErr = GetLastError();
+ if (dwErr == ERROR_FILE_NOT_FOUND) {
+ return SG_ERR_NOMATCH;
+ }
+ return SG_ERR_FAILURE;
+ }*/
+
+ bool FindNextFileS(char) {
+ return FindNextFileA(m_hFind, &m_oFindDataA) != FALSE;
+ }
+ /* bool FindNextFileS(wchar_t) {
+ return FindNextFileW(m_hFind, &m_oFindDataW) != FALSE;
+ }*/
+
+ void FindDone() {
+ FindClose(m_hFind);
+ }
+
+ const char * GetFileNameS(char) const {
+ return m_oFindDataA.cFileName;
+ }
+ /*const wchar_t * GetFileNameS(wchar_t) const {
+ return m_oFindDataW.cFileName;
+ }*/
+
+ bool IsDirS(char) const {
+ return GetFileTypeS(m_oFindDataA.dwFileAttributes) == SG_FILETYPE_DIR;
+ }
+ /*bool IsDirS(wchar_t) const {
+ return GetFileTypeS(m_oFindDataW.dwFileAttributes) == SG_FILETYPE_DIR;
+ }*/
+
+ SG_FileType GetFileTypeS(const char * a_pszPath) {
+ return GetFileTypeS(GetFileAttributesA(a_pszPath));
+ }
+ /*SG_FileType GetFileTypeS(const wchar_t * a_pszPath) {
+ return GetFileTypeS(GetFileAttributesW(a_pszPath));
+ }*/
+ SG_FileType GetFileTypeS(uint32_t a_dwAttribs) const {
+ if (a_dwAttribs == INVALID_FILE_ATTRIBUTES) {
+ return SG_FILETYPE_INVALID;
+ }
+ if (a_dwAttribs & FILE_ATTRIBUTE_DIRECTORY) {
+ return SG_FILETYPE_DIR;
+ }
+ return SG_FILETYPE_FILE;
+ }
+typedef struct _FILETIME {
+ uint32_t dwLowDateTime;
+ uint32_t dwHighDateTime;
+} FILETIME;
+
+
+typedef struct _WIN32_FIND_DATAA {
+ uint32_t dwFileAttributes;
+ FILETIME ftCreationTime;
+ FILETIME ftLastAccessTime;
+ FILETIME ftLastWriteTime;
+ uint32_t nFileSizeHigh;
+ uint32_t nFileSizerLow;
+ uint32_t dwReserved0;
+ uint32_t dwReserved1;
+ char cFileName[MAX_PATH];
+ char cAlternateFileName[14];
+} WIN32_FIND_DATAA;
+
+private:
+ void * m_hFind;
+ WIN32_FIND_DATAA m_oFindDataA;
+ WIN32_FIND_DATAA m_oFindDataW;
+};
+
+#else // !WIN32
+
+#define SG_PATH_CHAR '/'
+
+/*! @brief Unix glob implementation. */
+template<class SOCHAR>
+struct SimpleGlobBase
+{
+ SimpleGlobBase() {
+ memset(&m_glob, 0, sizeof(m_glob));
+ m_uiCurr = (size_t)-1;
+ }
+
+ ~SimpleGlobBase() {
+ globfree(&m_glob);
+ }
+
+ void FilePrep() {
+ m_bIsDir = false;
+ size_t len = strlen(m_glob.gl_pathv[m_uiCurr]);
+ if (m_glob.gl_pathv[m_uiCurr][len-1] == '/') {
+ m_bIsDir = true;
+ m_glob.gl_pathv[m_uiCurr][len-1] = 0;
+ }
+ }
+
+ int FindFirstFileS(const char * a_pszFileSpec, unsigned int a_uiFlags) {
+ int nFlags = GLOB_MARK | GLOB_NOSORT;
+ if (a_uiFlags & SG_GLOB_ERR) nFlags |= GLOB_ERR;
+ if (a_uiFlags & SG_GLOB_TILDE) nFlags |= GLOB_TILDE;
+ int rc = glob(a_pszFileSpec, nFlags, NULL, &m_glob);
+ if (rc == GLOB_NOSPACE) return SG_ERR_MEMORY;
+ if (rc == GLOB_ABORTED) return SG_ERR_FAILURE;
+ if (rc == GLOB_NOMATCH) return SG_ERR_NOMATCH;
+ m_uiCurr = 0;
+ FilePrep();
+ return SG_SUCCESS;
+ }
+
+#if SG_HAVE_ICU
+ int FindFirstFileS(const UChar * a_pszFileSpec, unsigned int a_uiFlags) {
+ char buf[PATH_MAX] = { 0 };
+ UErrorCode status = U_ZERO_ERROR;
+ u_strToUTF8(buf, sizeof(buf), NULL, a_pszFileSpec, -1, &status);
+ if (U_FAILURE(status)) return SG_ERR_FAILURE;
+ return FindFirstFileS(buf, a_uiFlags);
+ }
+#endif
+
+ bool FindNextFileS(char) {
+ SG_ASSERT(m_uiCurr != (size_t)-1);
+ if (++m_uiCurr >= m_glob.gl_pathc) {
+ return false;
+ }
+ FilePrep();
+ return true;
+ }
+
+#if SG_HAVE_ICU
+ bool FindNextFileS(UChar) {
+ return FindNextFileS((char)0);
+ }
+#endif
+
+ void FindDone() {
+ globfree(&m_glob);
+ memset(&m_glob, 0, sizeof(m_glob));
+ m_uiCurr = (size_t)-1;
+ }
+
+ const char * GetFileNameS(char) const {
+ SG_ASSERT(m_uiCurr != (size_t)-1);
+ return m_glob.gl_pathv[m_uiCurr];
+ }
+
+#if SG_HAVE_ICU
+ const UChar * GetFileNameS(UChar) const {
+ const char * pszFile = GetFileNameS((char)0);
+ if (!pszFile) return NULL;
+ UErrorCode status = U_ZERO_ERROR;
+ memset(m_szBuf, 0, sizeof(m_szBuf));
+ u_strFromUTF8(m_szBuf, PATH_MAX, NULL, pszFile, -1, &status);
+ if (U_FAILURE(status)) return NULL;
+ return m_szBuf;
+ }
+#endif
+
+ bool IsDirS(char) const {
+ SG_ASSERT(m_uiCurr != (size_t)-1);
+ return m_bIsDir;
+ }
+
+#if SG_HAVE_ICU
+ bool IsDirS(UChar) const {
+ return IsDirS((char)0);
+ }
+#endif
+
+ SG_FileType GetFileTypeS(const char * a_pszPath) const {
+ struct stat sb;
+ if (0 != stat(a_pszPath, &sb)) {
+ return SG_FILETYPE_INVALID;
+ }
+ if (S_ISDIR(sb.st_mode)) {
+ return SG_FILETYPE_DIR;
+ }
+ if (S_ISREG(sb.st_mode)) {
+ return SG_FILETYPE_FILE;
+ }
+ return SG_FILETYPE_INVALID;
+ }
+
+#if SG_HAVE_ICU
+ SG_FileType GetFileTypeS(const UChar * a_pszPath) const {
+ char buf[PATH_MAX] = { 0 };
+ UErrorCode status = U_ZERO_ERROR;
+ u_strToUTF8(buf, sizeof(buf), NULL, a_pszPath, -1, &status);
+ if (U_FAILURE(status)) return SG_FILETYPE_INVALID;
+ return GetFileTypeS(buf);
+ }
+#endif
+
+private:
+ glob_t m_glob;
+ size_t m_uiCurr;
+ bool m_bIsDir;
+#if SG_HAVE_ICU
+ mutable UChar m_szBuf[PATH_MAX];
+#endif
+};
+
+#endif // WIN32
+
+#endif // DOXYGEN
+
+// ---------------------------------------------------------------------------
+// MAIN TEMPLATE CLASS
+// ---------------------------------------------------------------------------
+
+/*! @brief Implementation of the SimpleGlob class */
+template<class SOCHAR>
+class CSimpleGlobTempl : private SimpleGlobBase<SOCHAR>
+{
+public:
+ /*! @brief Initialize the class.
+
+ @param a_uiFlags Combination of SG_GLOB flags.
+ @param a_nReservedSlots Number of slots in the argv array that
+ should be reserved. In the returned array these slots
+ argv[0] ... argv[a_nReservedSlots-1] will be left empty for
+ the caller to fill in.
+ */
+ CSimpleGlobTempl(unsigned int a_uiFlags = 0, int a_nReservedSlots = 0);
+
+ /*! @brief Deallocate all memory buffers. */
+ ~CSimpleGlobTempl();
+
+ /*! @brief Initialize (or re-initialize) the class in preparation for
+ adding new filespecs.
+
+ All existing files are cleared. Note that allocated memory is only
+ deallocated at object destruction.
+
+ @param a_uiFlags Combination of SG_GLOB flags.
+ @param a_nReservedSlots Number of slots in the argv array that
+ should be reserved. In the returned array these slots
+ argv[0] ... argv[a_nReservedSlots-1] will be left empty for
+ the caller to fill in.
+ */
+ int Init(unsigned int a_uiFlags = 0, int a_nReservedSlots = 0);
+
+ /*! @brief Add a new filespec to the glob.
+
+ The filesystem will be immediately scanned for all matching files and
+ directories and they will be added to the glob.
+
+ @param a_pszFileSpec Filespec to add to the glob.
+
+ @return SG_SUCCESS Matching files were added to the glob.
+ @return SG_ERR_NOMATCH Nothing matched the pattern. To ignore this
+ error compare return value to >= SG_SUCCESS.
+ @return SG_ERR_MEMORY Out of memory failure.
+ @return SG_ERR_FAILURE General failure.
+ */
+ int Add(const SOCHAR *a_pszFileSpec);
+
+ /*! @brief Add an array of filespec to the glob.
+
+ The filesystem will be immediately scanned for all matching files and
+ directories in each filespec and they will be added to the glob.
+
+ @param a_nCount Number of filespec in the array.
+ @param a_rgpszFileSpec Array of filespec to add to the glob.
+
+ @return SG_SUCCESS Matching files were added to the glob.
+ @return SG_ERR_NOMATCH Nothing matched the pattern. To ignore this
+ error compare return value to >= SG_SUCCESS.
+ @return SG_ERR_MEMORY Out of memory failure.
+ @return SG_ERR_FAILURE General failure.
+ */
+ int Add(int a_nCount, const SOCHAR * const * a_rgpszFileSpec);
+
+ /*! @brief Return the number of files in the argv array.
+ */
+ inline int FileCount() const { return m_nArgsLen; }
+
+ /*! @brief Return the full argv array. */
+ inline SOCHAR ** Files() {
+ SetArgvArrayType(POINTERS);
+ return m_rgpArgs;
+ }
+
+ /*! @brief Return the a single file. */
+ inline SOCHAR * File(int n) {
+ SG_ASSERT(n >= 0 && n < m_nArgsLen);
+ return Files()[n];
+ }
+
+private:
+ CSimpleGlobTempl(const CSimpleGlobTempl &); // disabled
+ CSimpleGlobTempl & operator=(const CSimpleGlobTempl &); // disabled
+
+ /*! @brief The argv array has it's members stored as either an offset into
+ the string buffer, or as pointers to their string in the buffer. The
+ offsets are used because if the string buffer is dynamically resized,
+ all pointers into that buffer would become invalid.
+ */
+ enum ARG_ARRAY_TYPE { OFFSETS, POINTERS };
+
+ /*! @brief Change the type of data stored in the argv array. */
+ void SetArgvArrayType(ARG_ARRAY_TYPE a_nNewType);
+
+ /*! @brief Add a filename to the array if it passes all requirements. */
+ int AppendName(const SOCHAR *a_pszFileName, bool a_bIsDir);
+
+ /*! @brief Grow the argv array to the required size. */
+ bool GrowArgvArray(int a_nNewLen);
+
+ /*! @brief Grow the string buffer to the required size. */
+ bool GrowStringBuffer(size_t a_uiMinSize);
+
+ /*! @brief Compare two (possible NULL) strings */
+ static int fileSortCompare(const void *a1, const void *a2);
+
+private:
+ unsigned int m_uiFlags;
+ ARG_ARRAY_TYPE m_nArgArrayType; //!< argv is indexes or pointers
+ SOCHAR ** m_rgpArgs; //!< argv
+ int m_nReservedSlots; //!< # client slots in argv array
+ int m_nArgsSize; //!< allocated size of array
+ int m_nArgsLen; //!< used length
+ SOCHAR * m_pBuffer; //!< argv string buffer
+ size_t m_uiBufferSize; //!< allocated size of buffer
+ size_t m_uiBufferLen; //!< used length of buffer
+ SOCHAR m_szPathPrefix[MAX_PATH]; //!< wildcard path prefix
+};
+
+// ---------------------------------------------------------------------------
+// IMPLEMENTATION
+// ---------------------------------------------------------------------------
+
+template<class SOCHAR>
+CSimpleGlobTempl<SOCHAR>::CSimpleGlobTempl(
+ unsigned int a_uiFlags,
+ int a_nReservedSlots
+ )
+{
+ m_rgpArgs = NULL;
+ m_nArgsSize = 0;
+ m_pBuffer = NULL;
+ m_uiBufferSize = 0;
+
+ Init(a_uiFlags, a_nReservedSlots);
+}
+
+template<class SOCHAR>
+CSimpleGlobTempl<SOCHAR>::~CSimpleGlobTempl()
+{
+ if (m_rgpArgs) free(m_rgpArgs);
+ if (m_pBuffer) free(m_pBuffer);
+}
+
+template<class SOCHAR>
+int
+CSimpleGlobTempl<SOCHAR>::Init(
+ unsigned int a_uiFlags,
+ int a_nReservedSlots
+ )
+{
+ m_nArgArrayType = POINTERS;
+ m_uiFlags = a_uiFlags;
+ m_nArgsLen = a_nReservedSlots;
+ m_nReservedSlots = a_nReservedSlots;
+ m_uiBufferLen = 0;
+
+ if (m_nReservedSlots > 0) {
+ if (!GrowArgvArray(m_nReservedSlots)) {
+ return SG_ERR_MEMORY;
+ }
+ for (int n = 0; n < m_nReservedSlots; ++n) {
+ m_rgpArgs[n] = NULL;
+ }
+ }
+
+ return SG_SUCCESS;
+}
+
+template<class SOCHAR>
+int
+CSimpleGlobTempl<SOCHAR>::Add(
+ const SOCHAR *a_pszFileSpec
+ )
+{
+#ifdef WIN32
+ // Windows FindFirst/FindNext recognizes forward slash as the same as
+ // backward slash and follows the directories. We need to do the same
+ // when calculating the prefix and when we have no wildcards.
+ SOCHAR szFileSpec[MAX_PATH];
+ SimpleGlobUtil::strcpy_s(szFileSpec, MAX_PATH, a_pszFileSpec);
+ const SOCHAR * pszPath = SimpleGlobUtil::strchr(szFileSpec, '/');
+ while (pszPath) {
+ szFileSpec[pszPath - szFileSpec] = SG_PATH_CHAR;
+ pszPath = SimpleGlobUtil::strchr(pszPath + 1, '/');
+ }
+ a_pszFileSpec = szFileSpec;
+#endif
+
+ // if this doesn't contain wildcards then we can just add it directly
+ m_szPathPrefix[0] = 0;
+ if (!SimpleGlobUtil::strchr(a_pszFileSpec, '*') &&
+ !SimpleGlobUtil::strchr(a_pszFileSpec, '?'))
+ {
+ SG_FileType nType = GetFileTypeS(a_pszFileSpec);
+ if (nType == SG_FILETYPE_INVALID) {
+ if (m_uiFlags & SG_GLOB_NOCHECK) {
+ return AppendName(a_pszFileSpec, false);
+ }
+ return SG_ERR_NOMATCH;
+ }
+ return AppendName(a_pszFileSpec, nType == SG_FILETYPE_DIR);
+ }
+
+#ifdef WIN32
+ // Windows doesn't return the directory with the filename, so we need to
+ // extract the path from the search string ourselves and prefix it to the
+ // filename we get back.
+ const SOCHAR * pszFilename =
+ SimpleGlobUtil::strrchr(a_pszFileSpec, SG_PATH_CHAR);
+ if (pszFilename) {
+ SimpleGlobUtil::strcpy_s(m_szPathPrefix, MAX_PATH, a_pszFileSpec);
+ m_szPathPrefix[pszFilename - a_pszFileSpec + 1] = 0;
+ }
+#endif
+
+ // search for the first match on the file
+ int rc = FindFirstFileS(a_pszFileSpec, m_uiFlags);
+ if (rc != SG_SUCCESS) {
+ if (rc == SG_ERR_NOMATCH && (m_uiFlags & SG_GLOB_NOCHECK)) {
+ int ok = AppendName(a_pszFileSpec, false);
+ if (ok != SG_SUCCESS) rc = ok;
+ }
+ return rc;
+ }
+
+ // add it and find all subsequent matches
+ int nError, nStartLen = m_nArgsLen;
+ bool bSuccess;
+ do {
+ nError = AppendName(GetFileNameS((SOCHAR)0), IsDirS((SOCHAR)0));
+ bSuccess = FindNextFileS((SOCHAR)0);
+ }
+ while (nError == SG_SUCCESS && bSuccess);
+ SimpleGlobBase<SOCHAR>::FindDone();
+
+ // sort these files if required
+ if (m_nArgsLen > nStartLen && !(m_uiFlags & SG_GLOB_NOSORT)) {
+ if (m_uiFlags & SG_GLOB_FULLSORT) {
+ nStartLen = m_nReservedSlots;
+ }
+ SetArgvArrayType(POINTERS);
+ qsort(
+ m_rgpArgs + nStartLen,
+ m_nArgsLen - nStartLen,
+ sizeof(m_rgpArgs[0]), fileSortCompare);
+ }
+
+ return nError;
+}
+
+template<class SOCHAR>
+int
+CSimpleGlobTempl<SOCHAR>::Add(
+ int a_nCount,
+ const SOCHAR * const * a_rgpszFileSpec
+ )
+{
+ int nResult;
+ for (int n = 0; n < a_nCount; ++n) {
+ nResult = Add(a_rgpszFileSpec[n]);
+ if (nResult != SG_SUCCESS) {
+ return nResult;
+ }
+ }
+ return SG_SUCCESS;
+}
+
+template<class SOCHAR>
+int
+CSimpleGlobTempl<SOCHAR>::AppendName(
+ const SOCHAR * a_pszFileName,
+ bool a_bIsDir
+ )
+{
+ // we need the argv array as offsets in case we resize it
+ SetArgvArrayType(OFFSETS);
+
+ // check for special cases which cause us to ignore this entry
+ if ((m_uiFlags & SG_GLOB_ONLYDIR) && !a_bIsDir) {
+ return SG_SUCCESS;
+ }
+ if ((m_uiFlags & SG_GLOB_ONLYFILE) && a_bIsDir) {
+ return SG_SUCCESS;
+ }
+ if ((m_uiFlags & SG_GLOB_NODOT) && a_bIsDir) {
+ if (a_pszFileName[0] == '.') {
+ if (a_pszFileName[1] == '\0') {
+ return SG_SUCCESS;
+ }
+ if (a_pszFileName[1] == '.' && a_pszFileName[2] == '\0') {
+ return SG_SUCCESS;
+ }
+ }
+ }
+
+ // ensure that we have enough room in the argv array
+ if (!GrowArgvArray(m_nArgsLen + 1)) {
+ return SG_ERR_MEMORY;
+ }
+
+ // ensure that we have enough room in the string buffer (+1 for null)
+ size_t uiPrefixLen = SimpleGlobUtil::strlen(m_szPathPrefix);
+ size_t uiLen = uiPrefixLen + SimpleGlobUtil::strlen(a_pszFileName) + 1;
+ if (a_bIsDir && (m_uiFlags & SG_GLOB_MARK) == SG_GLOB_MARK) {
+ ++uiLen; // need space for the backslash
+ }
+ if (!GrowStringBuffer(m_uiBufferLen + uiLen)) {
+ return SG_ERR_MEMORY;
+ }
+
+ // add this entry. m_uiBufferLen is offset from beginning of buffer.
+ m_rgpArgs[m_nArgsLen++] = (SOCHAR*)m_uiBufferLen;
+ SimpleGlobUtil::strcpy_s(m_pBuffer + m_uiBufferLen,
+ m_uiBufferSize - m_uiBufferLen, m_szPathPrefix);
+ SimpleGlobUtil::strcpy_s(m_pBuffer + m_uiBufferLen + uiPrefixLen,
+ m_uiBufferSize - m_uiBufferLen - uiPrefixLen, a_pszFileName);
+ m_uiBufferLen += uiLen;
+
+ // add the directory slash if desired
+ if (a_bIsDir && (m_uiFlags & SG_GLOB_MARK) == SG_GLOB_MARK) {
+ const static SOCHAR szDirSlash[] = { SG_PATH_CHAR, 0 };
+ SimpleGlobUtil::strcpy_s(m_pBuffer + m_uiBufferLen - 2,
+ m_uiBufferSize - (m_uiBufferLen - 2), szDirSlash);
+ }
+
+ return SG_SUCCESS;
+}
+
+template<class SOCHAR>
+void
+CSimpleGlobTempl<SOCHAR>::SetArgvArrayType(
+ ARG_ARRAY_TYPE a_nNewType
+ )
+{
+ if (m_nArgArrayType == a_nNewType) return;
+ if (a_nNewType == POINTERS) {
+ SG_ASSERT(m_nArgArrayType == OFFSETS);
+ for (int n = 0; n < m_nArgsLen; ++n) {
+ m_rgpArgs[n] = (m_rgpArgs[n] == (SOCHAR*)-1) ?
+ NULL : m_pBuffer + (size_t) m_rgpArgs[n];
+ }
+ }
+ else {
+ SG_ASSERT(a_nNewType == OFFSETS);
+ SG_ASSERT(m_nArgArrayType == POINTERS);
+ for (int n = 0; n < m_nArgsLen; ++n) {
+ m_rgpArgs[n] = (m_rgpArgs[n] == NULL) ?
+ (SOCHAR*) -1 : (SOCHAR*) (m_rgpArgs[n] - m_pBuffer);
+ }
+ }
+ m_nArgArrayType = a_nNewType;
+}
+
+template<class SOCHAR>
+bool
+CSimpleGlobTempl<SOCHAR>::GrowArgvArray(
+ int a_nNewLen
+ )
+{
+ if (a_nNewLen >= m_nArgsSize) {
+ static const int SG_ARGV_INITIAL_SIZE = 32;
+ int nNewSize = (m_nArgsSize > 0) ?
+ m_nArgsSize * 2 : SG_ARGV_INITIAL_SIZE;
+ while (a_nNewLen >= nNewSize) {
+ nNewSize *= 2;
+ }
+ void * pNewBuffer = realloc(m_rgpArgs, nNewSize * sizeof(SOCHAR*));
+ if (!pNewBuffer) return false;
+ m_nArgsSize = nNewSize;
+ m_rgpArgs = (SOCHAR**) pNewBuffer;
+ }
+ return true;
+}
+
+template<class SOCHAR>
+bool
+CSimpleGlobTempl<SOCHAR>::GrowStringBuffer(
+ size_t a_uiMinSize
+ )
+{
+ if (a_uiMinSize >= m_uiBufferSize) {
+ static const int SG_BUFFER_INITIAL_SIZE = 1024;
+ size_t uiNewSize = (m_uiBufferSize > 0) ?
+ m_uiBufferSize * 2 : SG_BUFFER_INITIAL_SIZE;
+ while (a_uiMinSize >= uiNewSize) {
+ uiNewSize *= 2;
+ }
+ void * pNewBuffer = realloc(m_pBuffer, uiNewSize * sizeof(SOCHAR));
+ if (!pNewBuffer) return false;
+ m_uiBufferSize = uiNewSize;
+ m_pBuffer = (SOCHAR*) pNewBuffer;
+ }
+ return true;
+}
+
+template<class SOCHAR>
+int
+CSimpleGlobTempl<SOCHAR>::fileSortCompare(
+ const void *a1,
+ const void *a2
+ )
+{
+ const SOCHAR * s1 = *(const SOCHAR **)a1;
+ const SOCHAR * s2 = *(const SOCHAR **)a2;
+ if (s1 && s2) {
+ return SimpleGlobUtil::strcasecmp(s1, s2);
+ }
+ // NULL sorts first
+ return s1 == s2 ? 0 : (s1 ? 1 : -1);
+}
+
+// ---------------------------------------------------------------------------
+// TYPE DEFINITIONS
+// ---------------------------------------------------------------------------
+
+/*! @brief ASCII/MBCS version of CSimpleGlob */
+typedef CSimpleGlobTempl<char> CSimpleGlobA;
+
+/*! @brief wchar_t version of CSimpleGlob */
+typedef CSimpleGlobTempl<wchar_t> CSimpleGlobW;
+
+#if SG_HAVE_ICU
+/*! @brief UChar version of CSimpleGlob */
+typedef CSimpleGlobTempl<UChar> CSimpleGlobU;
+#endif
+
+#ifdef _UNICODE
+/*! @brief TCHAR version dependent on if _UNICODE is defined */
+# if SG_HAVE_ICU
+# define CSimpleGlob CSimpleGlobU
+# else
+# define CSimpleGlob CSimpleGlobW
+# endif
+#else
+/*! @brief TCHAR version dependent on if _UNICODE is defined */
+# define CSimpleGlob CSimpleGlobA
+#endif
+
+#endif // INCLUDED_SimpleGlob
diff --git a/src/common/arg/SimpleOpt.h b/src/common/arg/SimpleOpt.h
new file mode 100755
index 00000000..9ca16c1d
--- /dev/null
+++ b/src/common/arg/SimpleOpt.h
@@ -0,0 +1,1060 @@
+/*! @file SimpleOpt.h
+
+ @version 3.5
+
+ @brief A cross-platform command line library which can parse almost any
+ of the standard command line formats in use today. It is designed
+ explicitly to be portable to any platform and has been tested on Windows
+ and Linux. See CSimpleOptTempl for the class definition.
+
+ @section features FEATURES
+
+ - MIT Licence allows free use in all software (including GPL
+ and commercial)
+ - multi-platform (Windows 95/98/ME/NT/2K/XP, Linux, Unix)
+ - supports all lengths of option names:
+ <table width="60%">
+ <tr><td width="30%"> -
+ <td>switch character only (e.g. use stdin for input)
+ <tr><td> -o
+ <td>short (single character)
+ <tr><td> -long
+ <td>long (multiple character, single switch character)
+ <tr><td> --longer
+ <td>long (multiple character, multiple switch characters)
+ </table>
+ - supports all types of arguments for options:
+ <table width="60%">
+ <tr><td width="30%"> --option
+ <td>short/long option flag (no argument)
+ <tr><td> --option ARG
+ <td>short/long option with separate required argument
+ <tr><td> --option=ARG
+ <td>short/long option with combined required argument
+ <tr><td> --option[=ARG]
+ <td>short/long option with combined optional argument
+ <tr><td> -oARG
+ <td>short option with combined required argument
+ <tr><td> -o[ARG]
+ <td>short option with combined optional argument
+ </table>
+ - supports options with multiple or variable numbers of arguments:
+ <table width="60%">
+ <tr><td width="30%"> --multi ARG1 ARG2
+ <td>Multiple arguments
+ <tr><td> --multi N ARG-1 ARG-2 ... ARG-N
+ <td>Variable number of arguments
+ </table>
+ - supports case-insensitive option matching on short, long and/or
+ word arguments.
+ - supports options which do not use a switch character. i.e. a special
+ word which is construed as an option.
+ e.g. "foo.exe open /directory/file.txt"
+ - supports clumping of multiple short options (no arguments) in a string
+ e.g. "foo.exe -abcdef file1" <==> "foo.exe -a -b -c -d -e -f file1"
+ - automatic recognition of a single slash as equivalent to a single
+ hyphen on Windows, e.g. "/f FILE" is equivalent to "-f FILE".
+ - file arguments can appear anywhere in the argument list:
+ "foo.exe file1.txt -a ARG file2.txt --flag file3.txt file4.txt"
+ files will be returned to the application in the same order they were
+ supplied on the command line
+ - short-circuit option matching: "--man" will match "--mandate"
+ invalid options can be handled while continuing to parse the command
+ line valid options list can be changed dynamically during command line
+ processing, i.e. accept different options depending on an option
+ supplied earlier in the command line.
+ - implemented with only a single C++ header file
+ - optionally use no C runtime or OS functions
+ - char, wchar_t and Windows TCHAR in the same program
+ - complete working examples included
+ - compiles cleanly at warning level 4 (Windows/VC.NET 2003), warning
+ level 3 (Windows/VC6) and -Wall (Linux/gcc)
+
+ @section usage USAGE
+
+ The SimpleOpt class is used by following these steps:
+
+ <ol>
+ <li> Include the SimpleOpt.h header file
+
+ <pre>
+ \#include "SimpleOpt.h"
+ </pre>
+
+ <li> Define an array of valid options for your program.
+
+<pre>
+@link CSimpleOptTempl::SOption CSimpleOpt::SOption @endlink g_rgOptions[] = {
+ { OPT_FLAG, _T("-a"), SO_NONE }, // "-a"
+ { OPT_FLAG, _T("-b"), SO_NONE }, // "-b"
+ { OPT_ARG, _T("-f"), SO_REQ_SEP }, // "-f ARG"
+ { OPT_HELP, _T("-?"), SO_NONE }, // "-?"
+ { OPT_HELP, _T("--help"), SO_NONE }, // "--help"
+ SO_END_OF_OPTIONS // END
+};
+</pre>
+
+ Note that all options must start with a hyphen even if the slash will
+ be accepted. This is because the slash character is automatically
+ converted into a hyphen to test against the list of options.
+ For example, the following line matches both "-?" and "/?"
+ (on Windows).
+
+ <pre>
+ { OPT_HELP, _T("-?"), SO_NONE }, // "-?"
+ </pre>
+
+ <li> Instantiate a CSimpleOpt object supplying argc, argv and the option
+ table
+
+<pre>
+@link CSimpleOptTempl CSimpleOpt @endlink args(argc, argv, g_rgOptions);
+</pre>
+
+ <li> Process the arguments by calling Next() until it returns false.
+ On each call, first check for an error by calling LastError(), then
+ either handle the error or process the argument.
+
+<pre>
+while (args.Next()) {
+ if (args.LastError() == SO_SUCCESS) {
+ handle option: use OptionId(), OptionText() and OptionArg()
+ }
+ else {
+ handle error: see ESOError enums
+ }
+}
+</pre>
+
+ <li> Process all non-option arguments with File(), Files() and FileCount()
+
+<pre>
+ShowFiles(args.FileCount(), args.Files());
+</pre>
+
+ </ol>
+
+ @section notes NOTES
+
+ - In MBCS mode, this library is guaranteed to work correctly only when
+ all option names use only ASCII characters.
+ - Note that if case-insensitive matching is being used then the first
+ matching option in the argument list will be returned.
+
+ @section licence MIT LICENCE
+
+ The licence text below is the boilerplate "MIT Licence" used from:
+ http://www.opensource.org/licenses/mit-license.php
+
+ Copyright (c) 2006-2007, Brodie Thiesfield
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+/*! @mainpage
+
+ <table>
+ <tr><th>Library <td>SimpleOpt
+ <tr><th>Author <td>Brodie Thiesfield [code at jellycan dot com]
+ <tr><th>Source <td>http://code.jellycan.com/simpleopt/
+ </table>
+
+ @section SimpleOpt SimpleOpt
+
+ A cross-platform library providing a simple method to parse almost any of
+ the standard command-line formats in use today.
+
+ See the @link SimpleOpt.h SimpleOpt @endlink documentation for full
+ details.
+
+ @section SimpleGlob SimpleGlob
+
+ A cross-platform file globbing library providing the ability to
+ expand wildcards in command-line arguments to a list of all matching
+ files.
+
+ See the @link SimpleGlob.h SimpleGlob @endlink documentation for full
+ details.
+*/
+
+#ifndef INCLUDED_SimpleOpt
+#define INCLUDED_SimpleOpt
+
+// Default the max arguments to a fixed value. If you want to be able to
+// handle any number of arguments, then predefine this to 0 and it will
+// use an internal dynamically allocated buffer instead.
+#ifdef SO_MAX_ARGS
+# define SO_STATICBUF SO_MAX_ARGS
+#else
+# include <stdlib.h> // malloc, free
+# include <string.h> // memcpy
+# define SO_STATICBUF 50
+#endif
+
+//! Error values
+typedef enum _ESOError
+{
+ //! No error
+ SO_SUCCESS = 0,
+
+ /*! It looks like an option (it starts with a switch character), but
+ it isn't registered in the option table. */
+ SO_OPT_INVALID = -1,
+
+ /*! Multiple options matched the supplied option text.
+ Only returned when NOT using SO_O_EXACT. */
+ SO_OPT_MULTIPLE = -2,
+
+ /*! Option doesn't take an argument, but a combined argument was
+ supplied. */
+ SO_ARG_INVALID = -3,
+
+ /*! SO_REQ_CMB style-argument was supplied to a SO_REQ_SEP option
+ Only returned when using SO_O_PEDANTIC. */
+ SO_ARG_INVALID_TYPE = -4,
+
+ //! Required argument was not supplied
+ SO_ARG_MISSING = -5,
+
+ /*! Option argument looks like another option.
+ Only returned when NOT using SO_O_NOERR. */
+ SO_ARG_INVALID_DATA = -6
+} ESOError;
+
+//! Option flags
+enum _ESOFlags
+{
+ /*! Disallow partial matching of option names */
+ SO_O_EXACT = 0x0001,
+
+ /*! Disallow use of slash as an option marker on Windows.
+ Un*x only ever recognizes a hyphen. */
+ SO_O_NOSLASH = 0x0002,
+
+ /*! Permit arguments on single letter options with no equals sign.
+ e.g. -oARG or -o[ARG] */
+ SO_O_SHORTARG = 0x0004,
+
+ /*! Permit single character options to be clumped into a single
+ option string. e.g. "-a -b -c" <==> "-abc" */
+ SO_O_CLUMP = 0x0008,
+
+ /*! Process the entire argv array for options, including the
+ argv[0] entry. */
+ SO_O_USEALL = 0x0010,
+
+ /*! Do not generate an error for invalid options. errors for missing
+ arguments will still be generated. invalid options will be
+ treated as files. invalid options in clumps will be silently
+ ignored. */
+ SO_O_NOERR = 0x0020,
+
+ /*! Validate argument type pedantically. Return an error when a
+ separated argument "-opt arg" is supplied by the user as a
+ combined argument "-opt=arg". By default this is not considered
+ an error. */
+ SO_O_PEDANTIC = 0x0040,
+
+ /*! Case-insensitive comparisons for short arguments */
+ SO_O_ICASE_SHORT = 0x0100,
+
+ /*! Case-insensitive comparisons for long arguments */
+ SO_O_ICASE_LONG = 0x0200,
+
+ /*! Case-insensitive comparisons for word arguments
+ i.e. arguments without any hyphens at the start. */
+ SO_O_ICASE_WORD = 0x0400,
+
+ /*! Case-insensitive comparisons for all arg types */
+ SO_O_ICASE = 0x0700
+};
+
+/*! Types of arguments that options may have. Note that some of the _ESOFlags
+ are not compatible with all argument types. SO_O_SHORTARG requires that
+ relevant options use either SO_REQ_CMB or SO_OPT. SO_O_CLUMP requires
+ that relevant options use only SO_NONE.
+ */
+typedef enum _ESOArgType {
+ /*! No argument. Just the option flags.
+ e.g. -o --opt */
+ SO_NONE,
+
+ /*! Required separate argument.
+ e.g. -o ARG --opt ARG */
+ SO_REQ_SEP,
+
+ /*! Required combined argument.
+ e.g. -oARG -o=ARG --opt=ARG */
+ SO_REQ_CMB,
+
+ /*! Optional combined argument.
+ e.g. -o[ARG] -o[=ARG] --opt[=ARG] */
+ SO_OPT,
+
+ /*! Multiple separate arguments. The actual number of arguments is
+ determined programatically at the time the argument is processed.
+ e.g. -o N ARG1 ARG2 ... ARGN --opt N ARG1 ARG2 ... ARGN */
+ SO_MULTI
+} ESOArgType;
+
+//! this option definition must be the last entry in the table
+#define SO_END_OF_OPTIONS { -1, NULL, SO_NONE }
+
+#ifdef _DEBUG
+# ifdef _MSC_VER
+# include <crtdbg.h>
+# define SO_ASSERT(b) _ASSERTE(b)
+# else
+# include <assert.h>
+# define SO_ASSERT(b) assert(b)
+# endif
+#else
+# define SO_ASSERT(b) //!< assertion used to test input data
+#endif
+
+// ---------------------------------------------------------------------------
+// MAIN TEMPLATE CLASS
+// ---------------------------------------------------------------------------
+
+/*! @brief Implementation of the SimpleOpt class */
+template<class SOCHAR>
+class CSimpleOptTempl
+{
+public:
+ /*! @brief Structure used to define all known options. */
+ struct SOption {
+ /*! ID to return for this flag. Optional but must be >= 0 */
+ int nId;
+
+ /*! arg string to search for, e.g. "open", "-", "-f", "--file"
+ Note that on Windows the slash option marker will be converted
+ to a hyphen so that "-f" will also match "/f". */
+ const SOCHAR * pszArg;
+
+ /*! type of argument accepted by this option */
+ ESOArgType nArgType;
+ };
+
+ /*! @brief Initialize the class. Init() must be called later. */
+ CSimpleOptTempl()
+ : m_rgShuffleBuf(NULL)
+ {
+ Init(0, NULL, NULL, 0);
+ }
+
+ /*! @brief Initialize the class in preparation for use. */
+ CSimpleOptTempl(
+ int argc,
+ SOCHAR * argv[],
+ const SOption * a_rgOptions,
+ int a_nFlags = 0
+ )
+ : m_rgShuffleBuf(NULL)
+ {
+ Init(argc, argv, a_rgOptions, a_nFlags);
+ }
+
+#ifndef SO_MAX_ARGS
+ /*! @brief Deallocate any allocated memory. */
+ ~CSimpleOptTempl() { if (m_rgShuffleBuf) free(m_rgShuffleBuf); }
+#endif
+
+ /*! @brief Initialize the class in preparation for calling Next.
+
+ The table of options pointed to by a_rgOptions does not need to be
+ valid at the time that Init() is called. However on every call to
+ Next() the table pointed to must be a valid options table with the
+ last valid entry set to SO_END_OF_OPTIONS.
+
+ NOTE: the array pointed to by a_argv will be modified by this
+ class and must not be used or modified outside of member calls to
+ this class.
+
+ @param a_argc Argument array size
+ @param a_argv Argument array
+ @param a_rgOptions Valid option array
+ @param a_nFlags Optional flags to modify the processing of
+ the arguments
+
+ @return true Successful
+ @return false if SO_MAX_ARGC > 0: Too many arguments
+ if SO_MAX_ARGC == 0: Memory allocation failure
+ */
+ bool Init(
+ int a_argc,
+ SOCHAR * a_argv[],
+ const SOption * a_rgOptions,
+ int a_nFlags = 0
+ );
+
+ /*! @brief Change the current options table during option parsing.
+
+ @param a_rgOptions Valid option array
+ */
+ inline void SetOptions(const SOption * a_rgOptions) {
+ m_rgOptions = a_rgOptions;
+ }
+
+ /*! @brief Change the current flags during option parsing.
+
+ Note that changing the SO_O_USEALL flag here will have no affect.
+ It must be set using Init() or the constructor.
+
+ @param a_nFlags Flags to modify the processing of the arguments
+ */
+ inline void SetFlags(int a_nFlags) { m_nFlags = a_nFlags; }
+
+ /*! @brief Query if a particular flag is set */
+ inline bool HasFlag(int a_nFlag) const {
+ return (m_nFlags & a_nFlag) == a_nFlag;
+ }
+
+ /*! @brief Advance to the next option if available.
+
+ When all options have been processed it will return false. When true
+ has been returned, you must check for an invalid or unrecognized
+ option using the LastError() method. This will be return an error
+ value other than SO_SUCCESS on an error. All standard data
+ (e.g. OptionText(), OptionArg(), OptionId(), etc) will be available
+ depending on the error.
+
+ After all options have been processed, the remaining files from the
+ command line can be processed in same order as they were passed to
+ the program.
+
+ @return true option or error available for processing
+ @return false all options have been processed
+ */
+ bool Next();
+
+ /*! Stops processing of the command line and returns all remaining
+ arguments as files. The next call to Next() will return false.
+ */
+ void Stop();
+
+ /*! @brief Return the last error that occurred.
+
+ This function must always be called before processing the current
+ option. This function is available only when Next() has returned true.
+ */
+ inline ESOError LastError() const { return m_nLastError; }
+
+ /*! @brief Return the nId value from the options array for the current
+ option.
+
+ This function is available only when Next() has returned true.
+ */
+ inline int OptionId() const { return m_nOptionId; }
+
+ /*! @brief Return the pszArg from the options array for the current
+ option.
+
+ This function is available only when Next() has returned true.
+ */
+ inline const SOCHAR * OptionText() const { return m_pszOptionText; }
+
+ /*! @brief Return the argument for the current option where one exists.
+
+ If there is no argument for the option, this will return NULL.
+ This function is available only when Next() has returned true.
+ */
+ inline SOCHAR * OptionArg() const { return m_pszOptionArg; }
+
+ /*! @brief Validate and return the desired number of arguments.
+
+ This is only valid when OptionId() has return the ID of an option
+ that is registered as SO_MULTI. It may be called multiple times
+ each time returning the desired number of arguments. Previously
+ returned argument pointers are remain valid.
+
+ If an error occurs during processing, NULL will be returned and
+ the error will be available via LastError().
+
+ @param n Number of arguments to return.
+ */
+ SOCHAR ** MultiArg(int n);
+
+ /*! @brief Returned the number of entries in the Files() array.
+
+ After Next() has returned false, this will be the list of files (or
+ otherwise unprocessed arguments).
+ */
+ inline int FileCount() const { return m_argc - m_nLastArg; }
+
+ /*! @brief Return the specified file argument.
+
+ @param n Index of the file to return. This must be between 0
+ and FileCount() - 1;
+ */
+ inline SOCHAR * File(int n) const {
+ SO_ASSERT(n >= 0 && n < FileCount());
+ return m_argv[m_nLastArg + n];
+ }
+
+ /*! @brief Return the array of files. */
+ inline SOCHAR ** Files() const { return &m_argv[m_nLastArg]; }
+
+private:
+ CSimpleOptTempl(const CSimpleOptTempl &); // disabled
+ CSimpleOptTempl & operator=(const CSimpleOptTempl &); // disabled
+
+ SOCHAR PrepareArg(SOCHAR * a_pszString) const;
+ bool NextClumped();
+ void ShuffleArg(int a_nStartIdx, int a_nCount);
+ int LookupOption(const SOCHAR * a_pszOption) const;
+ int CalcMatch(const SOCHAR *a_pszSource, const SOCHAR *a_pszTest) const;
+
+ // Find the '=' character within a string.
+ inline SOCHAR * FindEquals(SOCHAR *s) const {
+ while (*s && *s != (SOCHAR)'=') ++s;
+ return *s ? s : NULL;
+ }
+ bool IsEqual(SOCHAR a_cLeft, SOCHAR a_cRight, int a_nArgType) const;
+
+ inline void Copy(SOCHAR ** ppDst, SOCHAR ** ppSrc, int nCount) const {
+#ifdef SO_MAX_ARGS
+ // keep our promise of no CLIB usage
+ while (nCount-- > 0) *ppDst++ = *ppSrc++;
+#else
+ memcpy(ppDst, ppSrc, nCount * sizeof(SOCHAR*));
+#endif
+ }
+
+private:
+ const SOption * m_rgOptions; //!< pointer to options table
+ int m_nFlags; //!< flags
+ int m_nOptionIdx; //!< current argv option index
+ int m_nOptionId; //!< id of current option (-1 = invalid)
+ int m_nNextOption; //!< index of next option
+ int m_nLastArg; //!< last argument, after this are files
+ int m_argc; //!< argc to process
+ SOCHAR ** m_argv; //!< argv
+ const SOCHAR * m_pszOptionText; //!< curr option text, e.g. "-f"
+ SOCHAR * m_pszOptionArg; //!< curr option arg, e.g. "c:\file.txt"
+ SOCHAR * m_pszClump; //!< clumped single character options
+ SOCHAR m_szShort[3]; //!< temp for clump and combined args
+ ESOError m_nLastError; //!< error status from the last call
+ SOCHAR ** m_rgShuffleBuf; //!< shuffle buffer for large argc
+};
+
+// ---------------------------------------------------------------------------
+// IMPLEMENTATION
+// ---------------------------------------------------------------------------
+
+template<class SOCHAR>
+bool
+CSimpleOptTempl<SOCHAR>::Init(
+ int a_argc,
+ SOCHAR * a_argv[],
+ const SOption * a_rgOptions,
+ int a_nFlags
+ )
+{
+ m_argc = a_argc;
+ m_nLastArg = a_argc;
+ m_argv = a_argv;
+ m_rgOptions = a_rgOptions;
+ m_nLastError = SO_SUCCESS;
+ m_nOptionIdx = 0;
+ m_nOptionId = -1;
+ m_pszOptionText = NULL;
+ m_pszOptionArg = NULL;
+ m_nNextOption = (a_nFlags & SO_O_USEALL) ? 0 : 1;
+ m_szShort[0] = (SOCHAR)'-';
+ m_szShort[2] = (SOCHAR)'\0';
+ m_nFlags = a_nFlags;
+ m_pszClump = NULL;
+
+#ifdef SO_MAX_ARGS
+ if (m_argc > SO_MAX_ARGS) {
+ m_nLastError = SO_ARG_INVALID_DATA;
+ m_nLastArg = 0;
+ return false;
+ }
+#else
+ if (m_rgShuffleBuf) {
+ free(m_rgShuffleBuf);
+ }
+ if (m_argc > SO_STATICBUF) {
+ m_rgShuffleBuf = (SOCHAR**) malloc(sizeof(SOCHAR*) * m_argc);
+ if (!m_rgShuffleBuf) {
+ return false;
+ }
+ }
+#endif
+
+ return true;
+}
+
+template<class SOCHAR>
+bool
+CSimpleOptTempl<SOCHAR>::Next()
+{
+#ifdef SO_MAX_ARGS
+ if (m_argc > SO_MAX_ARGS) {
+ SO_ASSERT(!"Too many args! Check the return value of Init()!");
+ return false;
+ }
+#endif
+
+ // process a clumped option string if appropriate
+ if (m_pszClump && *m_pszClump) {
+ // silently discard invalid clumped option
+ bool bIsValid = NextClumped();
+ while (*m_pszClump && !bIsValid && HasFlag(SO_O_NOERR)) {
+ bIsValid = NextClumped();
+ }
+
+ // return this option if valid or we are returning errors
+ if (bIsValid || !HasFlag(SO_O_NOERR)) {
+ return true;
+ }
+ }
+ SO_ASSERT(!m_pszClump || !*m_pszClump);
+ m_pszClump = NULL;
+
+ // init for the next option
+ m_nOptionIdx = m_nNextOption;
+ m_nOptionId = -1;
+ m_pszOptionText = NULL;
+ m_pszOptionArg = NULL;
+ m_nLastError = SO_SUCCESS;
+
+ // find the next option
+ SOCHAR cFirst;
+ int nTableIdx = -1;
+ int nOptIdx = m_nOptionIdx;
+ while (nTableIdx < 0 && nOptIdx < m_nLastArg) {
+ SOCHAR * pszArg = m_argv[nOptIdx];
+ m_pszOptionArg = NULL;
+
+ // find this option in the options table
+ cFirst = PrepareArg(pszArg);
+ if (pszArg[0] == (SOCHAR)'-') {
+ // find any combined argument string and remove equals sign
+ m_pszOptionArg = FindEquals(pszArg);
+ if (m_pszOptionArg) {
+ *m_pszOptionArg++ = (SOCHAR)'\0';
+ }
+ }
+ nTableIdx = LookupOption(pszArg);
+
+ // if we didn't find this option but if it is a short form
+ // option then we try the alternative forms
+ if (nTableIdx < 0
+ && !m_pszOptionArg
+ && pszArg[0] == (SOCHAR)'-'
+ && pszArg[1]
+ && pszArg[1] != (SOCHAR)'-'
+ && pszArg[2])
+ {
+ // test for a short-form with argument if appropriate
+ if (HasFlag(SO_O_SHORTARG)) {
+ m_szShort[1] = pszArg[1];
+ int nIdx = LookupOption(m_szShort);
+ if (nIdx >= 0
+ && (m_rgOptions[nIdx].nArgType == SO_REQ_CMB
+ || m_rgOptions[nIdx].nArgType == SO_OPT))
+ {
+ m_pszOptionArg = &pszArg[2];
+ pszArg = m_szShort;
+ nTableIdx = nIdx;
+ }
+ }
+
+ // test for a clumped short-form option string and we didn't
+ // match on the short-form argument above
+ if (nTableIdx < 0 && HasFlag(SO_O_CLUMP)) {
+ m_pszClump = &pszArg[1];
+ ++m_nNextOption;
+ if (nOptIdx > m_nOptionIdx) {
+ ShuffleArg(m_nOptionIdx, nOptIdx - m_nOptionIdx);
+ }
+ return Next();
+ }
+ }
+
+ // The option wasn't found. If it starts with a switch character
+ // and we are not suppressing errors for invalid options then it
+ // is reported as an error, otherwise it is data.
+ if (nTableIdx < 0) {
+ if (!HasFlag(SO_O_NOERR) && pszArg[0] == (SOCHAR)'-') {
+ m_pszOptionText = pszArg;
+ break;
+ }
+
+ pszArg[0] = cFirst;
+ ++nOptIdx;
+ if (m_pszOptionArg) {
+ *(--m_pszOptionArg) = (SOCHAR)'=';
+ }
+ }
+ }
+
+ // end of options
+ if (nOptIdx >= m_nLastArg) {
+ if (nOptIdx > m_nOptionIdx) {
+ ShuffleArg(m_nOptionIdx, nOptIdx - m_nOptionIdx);
+ }
+ return false;
+ }
+ ++m_nNextOption;
+
+ // get the option id
+ ESOArgType nArgType = SO_NONE;
+ if (nTableIdx < 0) {
+ m_nLastError = (ESOError) nTableIdx; // error code
+ }
+ else {
+ m_nOptionId = m_rgOptions[nTableIdx].nId;
+ m_pszOptionText = m_rgOptions[nTableIdx].pszArg;
+
+ // ensure that the arg type is valid
+ nArgType = m_rgOptions[nTableIdx].nArgType;
+ switch (nArgType) {
+ case SO_NONE:
+ if (m_pszOptionArg) {
+ m_nLastError = SO_ARG_INVALID;
+ }
+ break;
+
+ case SO_REQ_SEP:
+ if (m_pszOptionArg) {
+ // they wanted separate args, but we got a combined one,
+ // unless we are pedantic, just accept it.
+ if (HasFlag(SO_O_PEDANTIC)) {
+ m_nLastError = SO_ARG_INVALID_TYPE;
+ }
+ }
+ // more processing after we shuffle
+ break;
+
+ case SO_REQ_CMB:
+ if (!m_pszOptionArg) {
+ m_nLastError = SO_ARG_MISSING;
+ }
+ break;
+
+ case SO_OPT:
+ // nothing to do
+ break;
+
+ case SO_MULTI:
+ // nothing to do. Caller must now check for valid arguments
+ // using GetMultiArg()
+ break;
+ }
+ }
+
+ // shuffle the files out of the way
+ if (nOptIdx > m_nOptionIdx) {
+ ShuffleArg(m_nOptionIdx, nOptIdx - m_nOptionIdx);
+ }
+
+ // we need to return the separate arg if required, just re-use the
+ // multi-arg code because it all does the same thing
+ if ( nArgType == SO_REQ_SEP
+ && !m_pszOptionArg
+ && m_nLastError == SO_SUCCESS)
+ {
+ SOCHAR ** ppArgs = MultiArg(1);
+ if (ppArgs) {
+ m_pszOptionArg = *ppArgs;
+ }
+ }
+
+ return true;
+}
+
+template<class SOCHAR>
+void
+CSimpleOptTempl<SOCHAR>::Stop()
+{
+ if (m_nNextOption < m_nLastArg) {
+ ShuffleArg(m_nNextOption, m_nLastArg - m_nNextOption);
+ }
+}
+
+template<class SOCHAR>
+SOCHAR
+CSimpleOptTempl<SOCHAR>::PrepareArg(
+ SOCHAR * a_pszString
+ ) const
+{
+#ifdef _WIN32
+ // On Windows we can accept the forward slash as a single character
+ // option delimiter, but it cannot replace the '-' option used to
+ // denote stdin. On Un*x paths may start with slash so it may not
+ // be used to start an option.
+ if (!HasFlag(SO_O_NOSLASH)
+ && a_pszString[0] == (SOCHAR)'/'
+ && a_pszString[1]
+ && a_pszString[1] != (SOCHAR)'-')
+ {
+ a_pszString[0] = (SOCHAR)'-';
+ return (SOCHAR)'/';
+ }
+#endif
+ return a_pszString[0];
+}
+
+template<class SOCHAR>
+bool
+CSimpleOptTempl<SOCHAR>::NextClumped()
+{
+ // prepare for the next clumped option
+ m_szShort[1] = *m_pszClump++;
+ m_nOptionId = -1;
+ m_pszOptionText = NULL;
+ m_pszOptionArg = NULL;
+ m_nLastError = SO_SUCCESS;
+
+ // lookup this option, ensure that we are using exact matching
+ int nSavedFlags = m_nFlags;
+ m_nFlags = SO_O_EXACT;
+ int nTableIdx = LookupOption(m_szShort);
+ m_nFlags = nSavedFlags;
+
+ // unknown option
+ if (nTableIdx < 0) {
+ m_nLastError = (ESOError) nTableIdx; // error code
+ return false;
+ }
+
+ // valid option
+ m_pszOptionText = m_rgOptions[nTableIdx].pszArg;
+ ESOArgType nArgType = m_rgOptions[nTableIdx].nArgType;
+ if (nArgType == SO_NONE) {
+ m_nOptionId = m_rgOptions[nTableIdx].nId;
+ return true;
+ }
+
+ if (nArgType == SO_REQ_CMB && *m_pszClump) {
+ m_nOptionId = m_rgOptions[nTableIdx].nId;
+ m_pszOptionArg = m_pszClump;
+ while (*m_pszClump) ++m_pszClump; // must point to an empty string
+ return true;
+ }
+
+ // invalid option as it requires an argument
+ m_nLastError = SO_ARG_MISSING;
+ return true;
+}
+
+// Shuffle arguments to the end of the argv array.
+//
+// For example:
+// argv[] = { "0", "1", "2", "3", "4", "5", "6", "7", "8" };
+//
+// ShuffleArg(1, 1) = { "0", "2", "3", "4", "5", "6", "7", "8", "1" };
+// ShuffleArg(5, 2) = { "0", "1", "2", "3", "4", "7", "8", "5", "6" };
+// ShuffleArg(2, 4) = { "0", "1", "6", "7", "8", "2", "3", "4", "5" };
+template<class SOCHAR>
+void
+CSimpleOptTempl<SOCHAR>::ShuffleArg(
+ int a_nStartIdx,
+ int a_nCount
+ )
+{
+ SOCHAR * staticBuf[SO_STATICBUF];
+ SOCHAR ** buf = m_rgShuffleBuf ? m_rgShuffleBuf : staticBuf;
+ int nTail = m_argc - a_nStartIdx - a_nCount;
+
+ // make a copy of the elements to be moved
+ Copy(buf, m_argv + a_nStartIdx, a_nCount);
+
+ // move the tail down
+ Copy(m_argv + a_nStartIdx, m_argv + a_nStartIdx + a_nCount, nTail);
+
+ // append the moved elements to the tail
+ Copy(m_argv + a_nStartIdx + nTail, buf, a_nCount);
+
+ // update the index of the last unshuffled arg
+ m_nLastArg -= a_nCount;
+}
+
+// match on the long format strings. partial matches will be
+// accepted only if that feature is enabled.
+template<class SOCHAR>
+int
+CSimpleOptTempl<SOCHAR>::LookupOption(
+ const SOCHAR * a_pszOption
+ ) const
+{
+ int nBestMatch = -1; // index of best match so far
+ int nBestMatchLen = 0; // matching characters of best match
+ int nLastMatchLen = 0; // matching characters of last best match
+
+ for (int n = 0; m_rgOptions[n].nId >= 0; ++n) {
+ // the option table must use hyphens as the option character,
+ // the slash character is converted to a hyphen for testing.
+ SO_ASSERT(m_rgOptions[n].pszArg[0] != (SOCHAR)'/');
+
+ int nMatchLen = CalcMatch(m_rgOptions[n].pszArg, a_pszOption);
+ if (nMatchLen == -1) {
+ return n;
+ }
+ if (nMatchLen > 0 && nMatchLen >= nBestMatchLen) {
+ nLastMatchLen = nBestMatchLen;
+ nBestMatchLen = nMatchLen;
+ nBestMatch = n;
+ }
+ }
+
+ // only partial matches or no match gets to here, ensure that we
+ // don't return a partial match unless it is a clear winner
+ if (HasFlag(SO_O_EXACT) || nBestMatch == -1) {
+ return SO_OPT_INVALID;
+ }
+ return (nBestMatchLen > nLastMatchLen) ? nBestMatch : SO_OPT_MULTIPLE;
+}
+
+// calculate the number of characters that match (case-sensitive)
+// 0 = no match, > 0 == number of characters, -1 == perfect match
+template<class SOCHAR>
+int
+CSimpleOptTempl<SOCHAR>::CalcMatch(
+ const SOCHAR * a_pszSource,
+ const SOCHAR * a_pszTest
+ ) const
+{
+ if (!a_pszSource || !a_pszTest) {
+ return 0;
+ }
+
+ // determine the argument type
+ int nArgType = SO_O_ICASE_LONG;
+ if (a_pszSource[0] != '-') {
+ nArgType = SO_O_ICASE_WORD;
+ }
+ else if (a_pszSource[1] != '-' && !a_pszSource[2]) {
+ nArgType = SO_O_ICASE_SHORT;
+ }
+
+ // match and skip leading hyphens
+ while (*a_pszSource == (SOCHAR)'-' && *a_pszSource == *a_pszTest) {
+ ++a_pszSource;
+ ++a_pszTest;
+ }
+ if (*a_pszSource == (SOCHAR)'-' || *a_pszTest == (SOCHAR)'-') {
+ return 0;
+ }
+
+ // find matching number of characters in the strings
+ int nLen = 0;
+ while (*a_pszSource && IsEqual(*a_pszSource, *a_pszTest, nArgType)) {
+ ++a_pszSource;
+ ++a_pszTest;
+ ++nLen;
+ }
+
+ // if we have exhausted the source...
+ if (!*a_pszSource) {
+ // and the test strings, then it's a perfect match
+ if (!*a_pszTest) {
+ return -1;
+ }
+
+ // otherwise the match failed as the test is longer than
+ // the source. i.e. "--mant" will not match the option "--man".
+ return 0;
+ }
+
+ // if we haven't exhausted the test string then it is not a match
+ // i.e. "--mantle" will not best-fit match to "--mandate" at all.
+ if (*a_pszTest) {
+ return 0;
+ }
+
+ // partial match to the current length of the test string
+ return nLen;
+}
+
+template<class SOCHAR>
+bool
+CSimpleOptTempl<SOCHAR>::IsEqual(
+ SOCHAR a_cLeft,
+ SOCHAR a_cRight,
+ int a_nArgType
+ ) const
+{
+ // if this matches then we are doing case-insensitive matching
+ if (m_nFlags & a_nArgType) {
+ if (a_cLeft >= 'A' && a_cLeft <= 'Z') a_cLeft += 'a' - 'A';
+ if (a_cRight >= 'A' && a_cRight <= 'Z') a_cRight += 'a' - 'A';
+ }
+ return a_cLeft == a_cRight;
+}
+
+// calculate the number of characters that match (case-sensitive)
+// 0 = no match, > 0 == number of characters, -1 == perfect match
+template<class SOCHAR>
+SOCHAR **
+CSimpleOptTempl<SOCHAR>::MultiArg(
+ int a_nCount
+ )
+{
+ // ensure we have enough arguments
+ if (m_nNextOption + a_nCount > m_nLastArg) {
+ m_nLastError = SO_ARG_MISSING;
+ return NULL;
+ }
+
+ // our argument array
+ SOCHAR ** rgpszArg = &m_argv[m_nNextOption];
+
+ // Ensure that each of the following don't start with an switch character.
+ // Only make this check if we are returning errors for unknown arguments.
+ if (!HasFlag(SO_O_NOERR)) {
+ for (int n = 0; n < a_nCount; ++n) {
+ SOCHAR ch = PrepareArg(rgpszArg[n]);
+ if (rgpszArg[n][0] == (SOCHAR)'-') {
+ rgpszArg[n][0] = ch;
+ m_nLastError = SO_ARG_INVALID_DATA;
+ return NULL;
+ }
+ rgpszArg[n][0] = ch;
+ }
+ }
+
+ // all good
+ m_nNextOption += a_nCount;
+ return rgpszArg;
+}
+
+
+// ---------------------------------------------------------------------------
+// TYPE DEFINITIONS
+// ---------------------------------------------------------------------------
+
+/*! @brief ASCII/MBCS version of CSimpleOpt */
+typedef CSimpleOptTempl<char> CSimpleOptA;
+
+/*! @brief wchar_t version of CSimpleOpt */
+typedef CSimpleOptTempl<char> CSimpleOptW;
+
+#if defined(_UNICODE)
+/*! @brief TCHAR version dependent on if _UNICODE is defined */
+# define CSimpleOpt CSimpleOptW
+#else
+/*! @brief TCHAR version dependent on if _UNICODE is defined */
+# define CSimpleOpt CSimpleOptA
+#endif
+
+#endif // INCLUDED_SimpleOpt
diff --git a/src/common/basic_utils.cpp b/src/common/basic_utils.cpp
new file mode 100755
index 00000000..1cd5ce8f
--- /dev/null
+++ b/src/common/basic_utils.cpp
@@ -0,0 +1,163 @@
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+#include "basic_utils.h"
+#include <ctype.h>
+#include <stdio.h>
+#include <string>
+
+bool utl_is_file_exists (const std::string& name) {
+ if (FILE *file = fopen(name.c_str(), "r")) {
+ fclose(file);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+
+void utl_DumpBuffer(FILE* fp,void * src, unsigned int size,int offset) {
+ unsigned int i;
+ for ( i=0; i<size;i++ ) {
+ if ( (!(i%16)) && (i!=0) ) fprintf(fp,"]\n");
+ if ( !(i%4) && !(i%16) ) fprintf(fp,"[");
+ if ( !(i%4) && (i%16) ) fprintf(fp,"] [");
+ fprintf(fp,"%02x",((unsigned char *)src)[i + offset]);
+ if ( (i+1)%4 ) fprintf(fp," ");
+ }
+ for ( ;i%4;i++ )
+ fprintf(fp," ");
+ fprintf(fp,"]");
+
+ fprintf(fp,"\n");;
+}
+
+
+void utl_DumpChar(FILE* fd,
+ void * src,
+ unsigned int eln,
+ unsigned int width
+ ){
+ int size=eln*width;
+ unsigned char * p=(unsigned char *)src;
+ int i;
+ fprintf(fd," - ");
+ for (i=0; i<size;i++) {
+ if ( isprint(*p) ) {
+ fprintf(fd,"%c",*p);
+ }else{
+ fprintf(fd,"%c",'.');
+ }
+ p++;
+ }
+}
+
+void utl_DumpBufferLine(FILE* fd,
+ void * src,
+ int offset,
+ unsigned int eln,
+ unsigned int width ,
+ unsigned int mask){
+ unsigned char * p=(unsigned char *)src;
+ uint32 addr;
+
+ if ( mask & SHOW_BUFFER_ADDR_EN){
+ addr=offset;
+ fprintf(fd,"%08x: ",(int)addr);
+ }
+ int i;
+ for (i=0; i<(int)eln; i++) {
+ switch (width) {
+ case 1:
+ fprintf(fd,"%02x ",*p);
+ p++;
+ break;
+ case 2:
+ fprintf(fd,"%04x ",*((uint16 *)p));
+ p+=2;
+ break;
+ case 4:
+ fprintf(fd,"%08x ",*((int *)p));
+ p+=4;
+ break;
+ case 8:
+ fprintf(fd,"%08x",*((int *)p));
+ fprintf(fd,"%08x ",*((int *)(p+4)));
+ p+=8;
+ break;
+ }
+ }
+ if (mask & SHOW_BUFFER_CHAR) {
+ utl_DumpChar(fd, src,eln,width);
+ }
+ fprintf(fd,"\n");
+}
+
+void utl_DumpBuffer2(FILE* fd,
+ void * src,
+ unsigned int size, //buffer size
+ unsigned int width ,
+ unsigned int width_line ,
+ unsigned int mask
+ ) {
+ if (!( (width==1) || (width==2) || (width==4) || (width==8) )){
+ width=1;
+ }
+
+ int nlen=(size)/(width_line );
+ if ( ( (size % width_line))!=0 ) {
+ nlen++;
+ }
+ int i;
+ char *p=(char *)src;
+ int offset=0;
+
+ if (mask & SHOW_BUFFER_ADDR_EN){
+ if (mask & SHOW_BUFFER_ADDR) {
+ offset=(int)((uintptr_t)p);
+ }else{
+ offset=0;
+ }
+ }
+ unsigned int eln_w;
+ int len_exist=size;
+
+ for (i=0; i<nlen; i++) {
+ if ( len_exist > (int)(width_line /width) ){
+ eln_w=width_line /width;
+ }else{
+ eln_w=(len_exist+width-1)/width;
+ }
+ utl_DumpBufferLine(fd, p,offset,eln_w,width,mask);
+ p+=width_line;
+ offset+=width_line;
+ len_exist-= width_line;
+ }
+}
+
+
+void TestDump(void){
+
+ char buffer[100];
+ int i;
+ for (i=0;i<100;i++) {
+ buffer[i]=0x61+i;
+ }
+
+
+ utl_DumpBuffer2(stdout,buffer,31,1,4,SHOW_BUFFER_ADDR_EN |SHOW_BUFFER_CHAR);
+}
+
+
diff --git a/src/common/basic_utils.h b/src/common/basic_utils.h
new file mode 100755
index 00000000..4bd208d3
--- /dev/null
+++ b/src/common/basic_utils.h
@@ -0,0 +1,91 @@
+#ifndef _BASIC_UTILS_H
+#define _BASIC_UTILS_H
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "c_common.h"
+#include <stdio.h>
+#include <string>
+
+
+
+/**
+ * the round must be power 2 e.g 2,4,8...
+ *
+ * @param num
+ * @param round
+ * @return
+ */
+inline uint utl_align_up(uint num,uint round){
+ if ((num & ((round-1)) )==0) {
+ //the number align
+ return(num);
+ }
+ return( (num+round) & (~(round-1)) );
+}
+
+inline uint utl_align_down(uint num,uint round){
+ return( (num) & (~(round-1)) );
+}
+
+
+void utl_DumpBuffer(FILE* fp,void * src, unsigned int size,int offset=0);
+
+
+
+#define SHOW_BUFFER_ADDR_EN 1
+#define SHOW_BUFFER_ADDR 2
+#define SHOW_BUFFER_CHAR 4
+
+#define SHOW_BUFFER_ALL (SHOW_BUFFER_ADDR_EN|SHOW_BUFFER_ADDR|SHOW_BUFFER_CHAR)
+
+void utl_DumpBuffer2(FILE* fd,
+ void * src,
+ unsigned int size, //buffer size
+ unsigned int width ,
+ unsigned int width_line ,
+ unsigned int mask);
+
+
+
+#undef min
+#undef max
+
+template <class T>
+inline const T& utl_min(const T& a, const T& b) {
+ return b < a ? b : a;
+}
+
+template <class T>
+inline const T& utl_max(const T& a, const T& b) {
+ return a < b ? b : a;
+}
+
+template <class T>
+inline void utl_swap(T& a, T& b) {
+ T tmp = a;
+ a = b;
+ b = tmp;
+}
+
+
+bool utl_is_file_exists (const std::string& name) ;
+
+
+#endif
+
+
diff --git a/src/common/bitMan.h b/src/common/bitMan.h
new file mode 100755
index 00000000..ffa05598
--- /dev/null
+++ b/src/common/bitMan.h
@@ -0,0 +1,185 @@
+#ifndef BIT_MAN_H
+#define BIT_MAN_H
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+
+template <class T>
+inline T btGetShift(unsigned int stopbit){
+ return(T)((sizeof(T)*8)-stopbit-1);
+}
+
+//this function return mask with 1 from the start bit
+// 0 in this bit are the MSB - big edian mode
+// if T is int (32) bit 31 is the last
+template <class T>
+inline T btGetMask(unsigned int startbit,
+ unsigned int stopbit){
+ register T shft=btGetShift<T>(stopbit);
+ return ((T)( (((1<<(stopbit-startbit+1))-1)<<shft)) );
+}
+
+
+//this function are used for big endian mode
+// e.x btGetMaskBitBigE(0x80000000,0,0)==1
+// e.x btGetMaskBitBigE(0xc0000000,0,1)==3
+template <class T>
+inline T btGetMaskBitBigE(T a,
+ int startbit,
+ int stopbit ) {
+ if((sizeof(T) * 8) == (stopbit - startbit + 1))// the case where the mask is the whole data
+ {
+ return a;
+ }
+ else
+ {
+ register T mask=btGetMask<T>(startbit,stopbit);
+ register T shift=btGetShift<T>(stopbit);
+ T result;
+ result=((a & mask) >>shift);
+ return(result);
+ }
+}
+
+inline uint32_t btGetMaskBitBigE32(uint32_t a,
+ int startbit,
+ int stopbit ) {
+ return(btGetMaskBitBigE<uint32_t>(a,startbit,stopbit));
+}
+
+inline unsigned short btGetMaskBitBigE16(uint16_t a,
+ int startbit,
+ int stopbit ) {
+ return(btGetMaskBitBigE<uint16_t>(a,startbit,stopbit));
+}
+
+inline uint8_t btGetMaskBitBigE8(uint8_t a,
+ int startbit,
+ int stopbit ) {
+ return(btGetMaskBitBigE<uint8_t>(a,startbit,stopbit));
+}
+
+
+template <class T>
+inline void btSetMaskBitBigE(T & a,
+ int startbit,
+ int stopbit,
+ T newval) {
+ if((sizeof(T) * 8) == (stopbit - startbit + 1))// the case where the mask is the whole data
+ {
+ a = newval;
+ }
+ else
+ {
+ register T mask=btGetMask<T>(startbit,stopbit);
+ register T shift=btGetShift<T>(stopbit);
+ a=((a & ~mask) | ( (newval <<shift) & mask ) );
+ }
+}
+
+
+
+inline void btSetMaskBitBigE32(uint32_t & a,
+ int startbit,
+ int stopbit,
+ uint32_t newVal
+ ) {
+ btSetMaskBitBigE<uint32_t>(a,startbit,stopbit,newVal);
+}
+
+inline void btSetMaskBitBigE16(uint16_t & a,
+ int startbit,
+ int stopbit,
+ uint16_t newVal ) {
+ btSetMaskBitBigE<uint16_t>(a,startbit,stopbit,newVal);
+}
+
+inline void btSetMaskBitBigE8(uint8_t & a,
+ int startbit,
+ int stopbit,
+ uint8_t newVal ) {
+ btSetMaskBitBigE<uint8_t>(a,startbit,stopbit,newVal);
+}
+
+
+
+template <class T>
+inline T btGetMaskBit(T a,
+ int startbit,
+ int stopbit ) {
+ return(btGetMaskBitBigE<T>(a,(sizeof(T)*8)-1-startbit,(sizeof(T)*8)-1-stopbit));
+}
+
+template <class T>
+inline void btSetMaskBit(T & a,
+ int startbit,
+ int stopbit,
+ T newval) {
+ btSetMaskBitBigE<T>(a,(sizeof(T)*8)-1-startbit,((sizeof(T)*8)-1-stopbit),newval);
+}
+
+
+inline unsigned int btGetMaskBit32(unsigned int a,
+ int startbit,
+ int stopbit ) {
+ return(btGetMaskBit<unsigned int>(a,startbit,stopbit));
+}
+
+inline unsigned short btGetMaskBit16(unsigned short a,
+ int startbit,
+ int stopbit ) {
+ return(btGetMaskBit<unsigned short>(a,startbit,stopbit));
+}
+
+inline uint8_t btGetMaskBit8(uint8_t a,
+ int startbit,
+ int stopbit ) {
+ return(btGetMaskBit<uint8_t>(a,startbit,stopbit));
+}
+
+
+inline void btSetMaskBit32(unsigned int & a,
+ int startbit,
+ int stopbit,
+ unsigned int newVal
+ ) {
+ btSetMaskBit<unsigned int>(a,startbit,stopbit,newVal);
+}
+
+/* start > stop startbit = 10 ,
+ stop = 8
+
+count like big E
+
+*/
+inline void btSetMaskBit16(unsigned short & a,
+ int startbit,
+ int stopbit,
+ unsigned short newVal ) {
+ btSetMaskBit<unsigned short>(a,startbit,stopbit,newVal);
+}
+
+inline void btSetMaskBit8(uint8_t & a,
+ int startbit,
+ int stopbit,
+ uint8_t newVal ) {
+ btSetMaskBit<uint8_t>(a,startbit,stopbit,newVal);
+}
+
+#endif
+
+
diff --git a/src/common/c_common.h b/src/common/c_common.h
new file mode 100755
index 00000000..d8320aaa
--- /dev/null
+++ b/src/common/c_common.h
@@ -0,0 +1,52 @@
+#ifndef C_COMMON
+#define C_COMMON
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+#include <stdint.h>
+#include <assert.h>
+
+typedef uint8_t uint8;
+typedef int8_t int8;
+
+typedef uint16_t uint16;
+typedef int16_t int16;
+
+typedef uint32_t uint32; //32 bit
+typedef int32_t int32;
+
+
+typedef uint32_t uint;
+typedef uint8_t uchar;
+typedef uint16_t ushort;
+
+typedef void* c_pvoid;
+
+#ifndef NULL
+#ifdef __cplusplus
+#define NULL 0
+#else /* __cplusplus */
+#define NULL ((void *)0)
+#endif /* __cplusplus */
+#endif /* NULL */
+
+
+#ifdef _DEBUG
+ #define BP_ASSERT(a) assert(a)
+#else
+ #define BP_ASSERT(a)
+#endif
+
+#endif
diff --git a/src/common/captureFile.cpp b/src/common/captureFile.cpp
new file mode 100755
index 00000000..a4fe78be
--- /dev/null
+++ b/src/common/captureFile.cpp
@@ -0,0 +1,328 @@
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+#include "captureFile.h"
+#include "pcap.h"
+#include "erf.h"
+#include "basic_utils.h"
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+#include <math.h>
+
+
+void CPktNsecTimeStamp::Dump(FILE *fd){
+ fprintf(fd,"%.6f [%x:%x]",getNsec(),m_time_sec,m_time_nsec );
+}
+
+
+void * CAlignMalloc::malloc(uint16_t size,uint8_t align){
+ assert(m_p==0);
+ m_p=(char *)::malloc(size+align);
+ uintptr_t v =(uintptr_t)m_p;
+ char *p=(char *)my_utl_align_up( v,align);
+ return (void *)p;
+}
+
+void CAlignMalloc::free(){
+ ::free(m_p);
+ m_p=0;
+}
+
+#define PKT_ALIGN 128
+
+
+CCapPktRaw::CCapPktRaw(CCapPktRaw *obj){
+ flags =0;
+ pkt_cnt=obj->pkt_cnt;
+ pkt_len=obj->pkt_len;
+ time_sec=obj->time_sec;
+ time_nsec=obj->time_nsec;
+ assert ((pkt_len>0) && (pkt_len<MAX_PKT_SIZE) );
+ raw = (char *)m_handle.malloc(pkt_len,PKT_ALIGN);
+ // copy the packet
+ memcpy(raw,obj->raw,pkt_len);
+}
+
+
+CCapPktRaw::CCapPktRaw(int size){
+ pkt_cnt=0;
+ flags =0;
+ pkt_len=size;
+ time_sec=0;
+ time_nsec=0;
+ if (size==0) {
+ raw =0;
+ }else{
+ raw = (char *)m_handle.malloc(size,PKT_ALIGN);
+ memset(raw,0xee,size);
+ }
+
+}
+
+CCapPktRaw::CCapPktRaw(){
+ flags =0;
+ pkt_cnt=0;
+ pkt_len=0;
+ time_sec=0;
+ time_nsec=0;
+ raw = (char *)m_handle.malloc((uint16_t)MAX_PKT_SIZE,PKT_ALIGN);
+ memset(raw,0xee,MAX_PKT_SIZE);
+}
+
+CCapPktRaw::~CCapPktRaw(){
+ if (raw && (getDoNotFree()==false) ) {
+ m_handle.free();
+ }
+}
+
+char * CCapPktRaw::append(uint16_t len){
+ CAlignMalloc h;
+ char * p;
+ char * new_raw = (char *)h.malloc(pkt_len+len,PKT_ALIGN);
+ memcpy(new_raw,raw,pkt_len);
+ m_handle.free();
+ raw=new_raw;
+ p= raw+pkt_len;
+ pkt_len+=len;
+ m_handle.m_p =h.m_p;
+ /* new pointer */
+ return(p);
+}
+
+
+void CCapPktRaw::CloneShalow(CCapPktRaw *obj){
+ pkt_len=obj->pkt_len;
+ raw = obj->raw;
+ setDoNotFree(true);
+}
+
+void CCapPktRaw::Dump(FILE *fd,int verbose){
+ fprintf(fd," =>pkt (%p) %llu , len %d, time [%x:%x] \n",raw,pkt_cnt,pkt_len,time_sec,time_nsec);
+ if (verbose) {
+ utl_DumpBuffer(fd,raw,pkt_len,0);
+ }
+}
+
+
+bool CCapPktRaw::Compare(CCapPktRaw * obj,int dump,double dsec){
+
+ if (pkt_len != obj->pkt_len) {
+ if ( dump ){
+ printf(" ERROR len is not eq \n");
+ }
+ return (false);
+ }
+
+ if ( getInterface() != obj->getInterface() ){
+ printf(" ERROR original packet from if=%d and cur packet from if=%d \n",getInterface(),obj->getInterface());
+ return (false);
+ }
+
+ CPktNsecTimeStamp t1(time_sec,time_nsec);
+ CPktNsecTimeStamp t2(obj->time_sec,obj->time_nsec);
+ if ( t1.diff(t2) > dsec ){
+ if ( dump ){
+ printf(" ERROR diff of 1 msec in time \n");
+ }
+ return (false);
+ }
+
+ if ( memcmp(raw,obj->raw,pkt_len) == 0 ){
+ return (true);
+ }else{
+ if ( dump ){
+ fprintf(stdout," ERROR buffer not the same \n");
+ fprintf(stdout," B1 \n");
+ fprintf(stdout," ---------------\n");
+ utl_DumpBuffer(stdout,raw,pkt_len,0);
+ fprintf(stdout," B2 \n");
+ fprintf(stdout," ---------------\n");
+
+ utl_DumpBuffer(stdout,obj->raw,obj->pkt_len,0);
+ }
+ return (false);
+ }
+}
+
+
+bool CErfCmp::compare(std::string f1, std::string f2 ){
+
+ if ( dump ){
+ printf(" compare %s %s \n",f1.c_str(),f2.c_str());
+ }
+ bool res=true;
+ CCapReaderBase * lp1=CCapReaderFactory::CreateReader((char *)f1.c_str(),0);
+ if (lp1 == 0) {
+ if ( dump ){
+ printf(" ERROR file %s does not exits or not supported \n",(char *)f1.c_str());
+ }
+ return (false);
+ }
+
+ CCapReaderBase * lp2=CCapReaderFactory::CreateReader((char *)f2.c_str(),0);
+ if (lp2 == 0) {
+ delete lp1;
+ if ( dump ){
+ printf(" ERROR file %s does not exits or not supported \n",(char *)f2.c_str());
+ }
+ return (false);
+ }
+
+ CCapPktRaw raw_packet1;
+ bool has_pkt1;
+ CCapPktRaw raw_packet2;
+ bool has_pkt2;
+
+ int pkt_cnt=1;
+ while ( true ) {
+ /* read packet */
+ has_pkt1 = lp1->ReadPacket(&raw_packet1) ;
+ has_pkt2 = lp2->ReadPacket(&raw_packet2) ;
+
+ /* one has finished */
+ if ( !has_pkt1 || !has_pkt2 ) {
+ if (has_pkt1 != has_pkt2 ) {
+ if ( dump ){
+ printf(" ERROR not the same number of packets \n");
+ }
+ res=false;
+ }
+ break;
+ }
+ if (!raw_packet1.Compare(&raw_packet2,true,d_sec) ){
+ res=false;
+ printf(" ERROR in pkt %d \n",pkt_cnt);
+ break;
+ }
+
+
+ pkt_cnt++;
+ }
+
+ delete lp1;
+ delete lp2;
+ return (res);
+}
+
+
+
+/**
+ * try to create type by type
+ * @param name
+ *
+ * @return CCapReaderBase*
+ */
+CCapReaderBase * CCapReaderFactory::CreateReader(char * name, int loops)
+{
+ if (name == NULL) {
+ printf("Got null file name\n");
+ return NULL;
+ }
+
+ /* make sure we have a file */
+ FILE * f = CAP_FOPEN_64(name, "rb");
+ if (f == NULL) {
+ if (errno == ENOENT) {
+ printf("\nERROR: Cap file not found %s\n\n",name);
+ } else {
+ printf("\nERROR: Failed to open cap file '%s' with errno %d\n\n", name, errno);
+ }
+ return NULL;
+ }
+ // close the file
+ fclose(f);
+
+ for (capture_type_e i = LIBPCAP ; i<LAST_TYPE ; i = (capture_type_e(i+1)) )
+ {
+ CCapReaderBase * next = CCapReaderFactory::CreateReaderInstace(i);
+ if (next == NULL || next->Create(name,loops)) {
+ return next;
+ }
+ delete next;
+ }
+
+ printf("\nERROR: file %s format not supported",name);
+ printf("\nERROR: formats supported are LIBPCAP and ERF. other formats are deprecated\n\n");
+
+ return NULL;
+}
+
+CCapReaderBase * CCapReaderFactory::CreateReaderInstace(capture_type_e type)
+{
+ switch(type)
+ {
+ case ERF:
+ return new CErfFileReader();
+ case LIBPCAP:
+ return new LibPCapReader();
+ default:
+ printf("Got unsupported file type\n");
+ return NULL;
+ }
+
+}
+
+
+
+/**
+ * The factory function will create the matching reader instance
+ * according to the type.
+ *
+ * @param type - the foramt
+ * @param name - new file name
+ *
+ * @return CCapWriter* - return pointer to the writer instance
+ * or NULL if failed from some reason. Instance user
+ * should relase memory when instance not needed
+ * anymore.
+ */
+CFileWriterBase * CCapWriterFactory::CreateWriter(capture_type_e type ,char * name)
+{
+ if (name == NULL) {
+ return NULL;
+ }
+
+ CFileWriterBase * toRet = CCapWriterFactory::createWriterInsance(type);
+
+ if (toRet) {
+ if (!toRet->Create(name)) {
+ delete toRet;
+ toRet = NULL;
+ }
+ }
+
+ return toRet;
+}
+
+/**
+ * Create instance for writer if type is supported.
+ * @param type
+ *
+ * @return CFileWriterBase*
+ */
+CFileWriterBase * CCapWriterFactory::createWriterInsance(capture_type_e type )
+{
+ switch(type) {
+ case LIBPCAP:
+ return new LibPCapWriter();
+ case ERF:
+ return new CErfFileWriter();
+ // other is not supported yet.
+ default:
+ return NULL;
+ }
+}
+
diff --git a/src/common/captureFile.h b/src/common/captureFile.h
new file mode 100755
index 00000000..027f1fcf
--- /dev/null
+++ b/src/common/captureFile.h
@@ -0,0 +1,289 @@
+#ifndef __CAPTURE_FILE_H__
+#define __CAPTURE_FILE_H__
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+
+#include "c_common.h"
+#include <stdio.h>
+#include "bitMan.h"
+#include <math.h>
+#include <stdlib.h>
+#include <string>
+#ifdef WIN32
+#pragma warning(disable:4786)
+#endif
+
+
+typedef enum capture_type {
+ LIBPCAP,
+ ERF,
+ LAST_TYPE
+} capture_type_e;
+
+#define MAX_PKT_SIZE (2048)
+
+#define READER_MAX_PACKET_SIZE MAX_PKT_SIZE
+
+class CAlignMalloc {
+public:
+ CAlignMalloc(){
+ m_p=0;
+ }
+ void * malloc(uint16_t size,uint8_t align);
+ void free();
+public:
+ char * m_p;
+};
+
+static inline uintptr_t my_utl_align_up(uintptr_t num,uint16_t round){
+ if ((num & ((round-1)) )==0) {
+ //the number align
+ return(num);
+ }
+ return( (num+round) & (~(round-1)) );
+}
+
+
+
+
+class CPktNsecTimeStamp {
+public:
+
+ #define _NSEC_TO_SEC 1000000000.0
+ CPktNsecTimeStamp(){
+ m_time_sec =0;
+ m_time_nsec =0;
+ }
+
+ CPktNsecTimeStamp(uint32_t sec,uint32_t nsec){
+ m_time_sec =sec;
+ m_time_nsec =nsec;
+ }
+ CPktNsecTimeStamp(double nsec){
+ m_time_sec = (uint32_t)floor (nsec);
+ nsec -= m_time_sec;
+ m_time_nsec = (uint32_t)floor(nsec*_NSEC_TO_SEC);
+ }
+
+ double getNsec() const {
+ return ((double)m_time_sec +(double)m_time_nsec/(_NSEC_TO_SEC));
+ }
+
+ double diff(const CPktNsecTimeStamp & obj){
+ return (abs(getNsec() - obj.getNsec() ) );
+ }
+
+ void Dump(FILE *fd);
+public:
+ uint32_t m_time_sec;
+ uint32_t m_time_nsec;
+};
+
+
+
+class CCapPktRaw {
+
+public:
+ CCapPktRaw();
+ CCapPktRaw(int size);
+ CCapPktRaw(CCapPktRaw *obj);
+ virtual ~CCapPktRaw();
+
+ uint32_t time_sec;
+ uint32_t time_nsec;
+ char * raw;
+ uint64_t pkt_cnt;
+
+ uint16_t pkt_len;
+private:
+ uint16_t flags;
+ CAlignMalloc m_handle;
+public:
+ double get_time(void) {
+ CPktNsecTimeStamp t1(time_sec,time_nsec);
+ return ( t1.getNsec());
+ }
+ void set_new_time(double new_time){
+ CPktNsecTimeStamp t1(new_time);
+ time_sec =t1.m_time_sec;
+ time_nsec=t1.m_time_nsec;
+ }
+
+ /* enlarge the packet */
+ char * append(uint16_t len);
+
+ void CloneShalow(CCapPktRaw *obj);
+
+ void setInterface(uint8_t _if){
+ btSetMaskBit16(flags,10,8,_if);
+ }
+
+ uint8_t getInterface(){
+ return ((uint8_t)btGetMaskBit16(flags,10,8));
+ }
+
+ void setDoNotFree(bool do_not_free){
+ btSetMaskBit16(flags,0,0,do_not_free?1:0);
+ }
+
+ bool getDoNotFree(){
+ return ( ( btGetMaskBit16(flags,0,0) ? true:false) );
+ }
+
+ bool Compare(CCapPktRaw * obj,int dump,double dsec);
+
+
+public:
+ inline uint16_t getTotalLen(void) {
+ return (pkt_len);
+ }
+ void Dump(FILE *fd,int verbose);
+};
+
+/**
+ * Interface for capture file reader.
+ *
+ */
+class CCapReaderBase
+{
+public:
+
+ virtual ~CCapReaderBase(){}
+
+ virtual bool ReadPacket(CCapPktRaw * lpPacket)=0;
+
+
+ /* by default all reader reads one packet
+ and gives the feature one packet
+ */
+ virtual uint32_t get_last_pkt_count() {return 1;}
+
+ /* method for rewind the reader
+ abstract and optional
+ */
+ virtual void Rewind() {};
+ /**
+ * open file for reading.
+ */
+ virtual bool Create(char * name, int loops = 0) = 0;
+protected:
+ int m_loops;
+ uint64_t m_file_size;
+};
+
+/**
+ * Factory for creating reader inteface of some of the supported
+ * formats.
+ *
+ */
+class CCapReaderFactory {
+public:
+ /**
+ * The function will try to create the matching reader for the
+ * file format (libpcap,ngsniffer...etc). Since there is no real
+ * connection (stile repository) between file suffix and its
+ * type we just try one bye one,
+ * @param name - cature file name
+ * @param loops - number of loops for the same capture. use 0
+ * for one time transmition
+ * @return CCapReaderBase* - pointer to new instance (allocated
+ * by the function). the user should release the
+ * instance once it has no use any more.
+ */
+ static CCapReaderBase * CreateReader(char * name, int loops = 0);
+
+
+private:
+ static CCapReaderBase * CreateReaderInstace(capture_type_e type);
+};
+
+/**
+ * Interface for capture file writer.
+ *
+ */
+class CFileWriterBase {
+
+public:
+
+ virtual ~CFileWriterBase(){};
+ virtual bool Create(char * name) = 0;
+ virtual bool write_packet(CCapPktRaw * lpPacket)=0;
+
+};
+
+
+/**
+ * Factory for creating capture file interface of some of the
+ * supported formats.
+ *
+ */
+class CCapWriterFactory {
+public:
+
+ /**
+ * The factory function will create the matching reader instance
+ * according to the type.
+ *
+ * @param type - the foramt
+ * @param name - new file name
+ *
+ * @return CCapWriter* - return pointer to the writer instance
+ * or NULL if failed from some reason (or unsupported
+ * format).Instance user
+ * should relase memory when instance not needed
+ * anymore.
+ */
+ static CFileWriterBase * CreateWriter(capture_type_e type ,char * name);
+
+private:
+
+ static CFileWriterBase * createWriterInsance(capture_type_e type );
+};
+
+
+#if WIN32
+
+#define CAP_FOPEN_64 fopen
+#define CAP_FSEEK_64 fseek
+#define CAP_FTELL_64 ftell
+
+#else
+
+#define CAP_FOPEN_64 fopen64
+#define CAP_FSEEK_64 fseeko64
+#define CAP_FTELL_64 ftello64
+
+#endif
+
+
+class CErfCmp
+ {
+public:
+ CErfCmp(){
+ dump=false;
+ d_sec=0.001;
+ }
+ bool compare(std::string f1, std::string f2 );
+public:
+ bool dump;
+ double d_sec;
+};
+
+
+
+#endif
diff --git a/src/common/cgen_map.h b/src/common/cgen_map.h
new file mode 100755
index 00000000..e224df4b
--- /dev/null
+++ b/src/common/cgen_map.h
@@ -0,0 +1,96 @@
+#ifndef C_GEN_MAP
+#define C_GEN_MAP
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+#include <stdint.h>
+#include <map>
+#include <string>
+
+template<class KEY, class VAL>
+class CGenericMap {
+public:
+ typedef std::map<KEY, VAL*, std::less<KEY> > gen_map_t;
+ typename gen_map_t::iterator gen_map_iter_t;
+ typedef void (free_map_object_func_t)(VAL *p);
+
+
+ bool Create(void){
+ return(true);
+ }
+ void Delete(){
+ //remove_all();
+ }
+ VAL * remove(KEY key ){
+ VAL *lp = lookup(key);
+ if ( lp ) {
+ m_map.erase(key);
+ return (lp);
+ }else{
+ return(0);
+ }
+ }
+
+
+ void remove_no_lookup(KEY key ){
+ m_map.erase(key);
+ }
+
+
+ VAL * lookup(KEY key ){
+ typename gen_map_t::iterator iter;
+ iter = m_map.find(key);
+ if (iter != m_map.end() ) {
+ return ( (*iter).second );
+ }else{
+ return (( VAL*)0);
+ }
+ }
+ void add(KEY key,VAL * val){
+ m_map.insert(typename gen_map_t::value_type(key,val));
+ }
+
+
+ void remove_all(free_map_object_func_t func){
+ if ( m_map.empty() )
+ return;
+
+ typename gen_map_t::iterator it;
+ for (it= m_map.begin(); it != m_map.end(); ++it) {
+ VAL *lp = it->second;
+ func(lp);
+ }
+ m_map.clear();
+ }
+
+ void dump_all(FILE *fd){
+ typename gen_map_t::iterator it;
+ for (it= m_map.begin(); it != m_map.end(); ++it) {
+ VAL *lp = it->second;
+ lp->Dump(fd);
+ }
+ }
+
+ uint64_t count(void){
+ return ( m_map.size());
+ }
+
+public:
+ gen_map_t m_map;
+};
+
+#endif
diff --git a/src/common/erf.cpp b/src/common/erf.cpp
new file mode 100755
index 00000000..304f758b
--- /dev/null
+++ b/src/common/erf.cpp
@@ -0,0 +1,454 @@
+/*
+*
+* Copyright (c) 2003 Endace Technology Ltd, Hamilton, New Zealand.
+* All rights reserved.
+*
+* This software and documentation has been developed by Endace Technology Ltd.
+* along with the DAG PCI network capture cards. For further information please
+* visit http://www.endace.com/.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are met:
+*
+* 1. Redistributions of source code must retain the above copyright notice,
+* this list of conditions and the following disclaimer.
+*
+* 2. Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in the
+* documentation and/or other materials provided with the distribution.
+*
+* 3. The name of Endace Technology Ltd may not be used to endorse or promote
+* products derived from this software without specific prior written
+* permission.
+*
+* THIS SOFTWARE IS PROVIDED BY ENDACE TECHNOLOGY LTD ``AS IS'' AND ANY EXPRESS
+* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+* EVENT SHALL ENDACE TECHNOLOGY LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+* POSSIBILITY OF SUCH DAMAGE.
+*
+* $Id: erf.c 15544 2005-08-26 19:40:46Z guy $
+*/
+
+/*****
+ * NAME
+ *
+ *
+ * AUTHOR
+ * taken from SCE
+ *
+ * COPYRIGHT
+ * Copyright (c) 2004-2011 by cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * DESCRIPTION
+ *
+ ****/
+
+/*
+ * erf - Endace ERF (Extensible Record Format)
+ *
+ * See
+ *
+ * http://www.endace.com/support/EndaceRecordFormat.pdf
+ */
+
+
+
+#include <stdlib.h>
+#include <string.h>
+#include "erf.h"
+#include "basic_utils.h"
+#include "pal_utl.h"
+
+
+#define MAX_ERF_PACKET READER_MAX_PACKET_SIZE
+
+
+extern long file_seek(void *stream, long offset, int whence, int *err);
+#define file_read fread
+#define file_write fwrite
+#define file_close fclose
+extern int file_error(FILE *fh);
+#define file_getc fgetc
+#define file_gets fgets
+#define file_eof feof
+
+
+long file_seek(FILE *stream, long offset, int whence, int *err)
+{
+ long ret;
+
+ ret = CAP_FSEEK_64(stream, offset, whence);
+ if (ret == -1)
+ *err = file_error(stream);
+ return ret;
+}
+
+
+int file_error(FILE *fh)
+{
+ if (ferror(fh))
+ return -1;
+ else
+ return 0;
+}
+
+
+int erf_open(wtap *wth, int *err)
+{
+ guint32 i;
+ int common_type = 0;
+ erf_timestamp_t prevts;
+
+ memset(&prevts, 0, sizeof(prevts));
+
+ int records_for_erf_check = 10;
+
+ /* ERF is a little hard because there's no magic number */
+
+ for (i = 0; i < (guint32)records_for_erf_check; i++) {
+
+ erf_header_t header;
+ guint32 packet_size;
+ erf_timestamp_t ts;
+
+ if (file_read(&header,1,sizeof(header),wth->fh) != sizeof(header)) {
+ if ((*err = file_error(wth->fh)) != 0)
+ return -1;
+ else
+ break; /* eof */
+ }
+
+ packet_size = g_ntohs(header.rlen) - sizeof(header);
+
+ /* fail on invalid record type, decreasing timestamps or non-zero pad-bits */
+ if (header.type == 0 || header.type != TYPE_ETH ||
+ (header.flags & 0xc0) != 0) {
+ return 0;
+ }
+
+ if ((ts = pletohll(&header.ts)) < prevts) {
+ /* reassembled AAL5 records may not be in time order, so allow 1 sec fudge */
+ if (header.type != TYPE_AAL5 || ((prevts-ts)>>32) > 1) {
+ return 0;
+ }
+ }
+ memcpy(&prevts, &ts, sizeof(prevts));
+
+ if (common_type == 0) {
+ common_type = header.type;
+ } else
+ if (common_type > 0 && common_type != header.type) {
+ common_type = -1;
+ }
+
+ if (header.type == TYPE_HDLC_POS ) {
+ // do not support HDLS
+ return (-1);
+ }
+ if (file_seek(wth->fh, packet_size, SEEK_CUR, err) == -1) {
+ return -1;
+ }
+ }
+
+ if (file_seek(wth->fh, 0L, SEEK_SET, err) == -1) { /* rewind */
+ return -1;
+ }
+ wth->data_offset = 0;
+ // VALID ERF file
+ return 1;
+}
+
+
+int erf_read(wtap *wth,char *p,uint32_t *sec,uint32_t *nsec)
+{
+ erf_header_t header;
+ int common_type = 0;
+ if (file_read(&header,1,sizeof(header),wth->fh) != sizeof(header)) {
+ if (( file_error(wth->fh)) != 0)
+ return -1;
+ else
+ return (0); // end
+ }
+
+ guint32 packet_size = g_ntohs(header.rlen) - sizeof(header);
+
+ /* fail on invalid record type, decreasing timestamps or non-zero pad-bits */
+ if ( header.type != TYPE_ETH ||
+ (header.flags & 0xc0) != 0) {
+ //printf(" ERF header not supported \n");
+ // no valid
+ return -1;
+ }
+
+ if (common_type == 0) {
+ common_type = header.type;
+ } else
+ if (common_type > 0 && common_type != header.type) {
+ common_type = -1;
+ }
+
+
+ if ( ( packet_size >= MAX_ERF_PACKET ) ||
+ (g_ntohs(header.wlen)>MAX_ERF_PACKET) ) {
+ printf(" ERF packet size too big \n");
+ assert(0);
+ return (-1);
+ }
+
+ int err;
+ if (file_seek(wth->fh, 2, SEEK_CUR, &err) == -1) {
+ return -1;
+ }
+ int realpkt_size = packet_size-2;
+
+ if (file_read(p,1,realpkt_size ,wth->fh) == realpkt_size) {
+ guint64 ts = pletohll(&header.ts);
+ *sec = (uint32_t) (ts >> 32);
+ uint32_t frac =(ts &0xffffffff);
+ double usec_frac =(double)frac*(1000000000.0/(4294967296.0));
+ *nsec = (uint32_t) (usec_frac);
+ return (g_ntohs(header.wlen));
+ }else{
+ return (-1);
+ }
+}
+
+
+bool CErfFileWriter::Create(char *file_name){
+ m_fd=CAP_FOPEN_64(file_name,"wb");
+ if (m_fd==0) {
+ printf(" ERROR create file \n");
+ return(false);
+ }
+ m_cnt=0;
+ return(true);
+}
+
+void CErfFileWriter::Delete(){
+ if (m_fd) {
+ fclose(m_fd);
+ m_fd=0;
+ }
+}
+
+
+typedef struct erf_dummy_header_ {
+ uint16_t dummy;
+}erf_dummy_header_t ;
+
+static uint32_t frame_check[20];
+
+bool CErfFileWriter::write_packet(CCapPktRaw * lpPacket){
+ erf_header_t header;
+ erf_dummy_header_t dummy;
+
+ dummy.dummy =0;
+ memset(&header,0,sizeof(erf_header_t));
+ double nsec_frac = 4294967295.9 *(lpPacket->time_nsec /1000000000.0);
+ uint64_t ts= (((uint64_t)lpPacket->time_sec) <<32) +((uint32_t)nsec_frac);
+ header.ts =ts;
+
+ uint16_t size=lpPacket->pkt_len;
+
+ uint16_t total_size=(uint16_t)size+sizeof(erf_header_t)+2+4;
+ uint16_t align = (total_size & 0x7);
+ if (align >0 ) {
+ align = 8-align;
+ }
+
+ header.flags =4+lpPacket->getInterface();
+ header.type =TYPE_ETH;
+ header.wlen = g_ntohs((uint16_t)size+4);
+ header.rlen = g_ntohs(total_size+align);
+
+ int n = fwrite(&header,1,sizeof(header),m_fd);
+ n+= fwrite(&dummy,1,sizeof(dummy),m_fd);
+ n+= fwrite(lpPacket->raw,1,size,m_fd);
+ n+= fwrite(frame_check,1,4+align,m_fd);
+
+ if (n < (int)(total_size+align)) {
+ return false;
+ }
+ return true;
+}
+
+
+
+bool CPcapFileWriter::Create(char *file_name){
+ m_fd=CAP_FOPEN_64(file_name,"wb");
+ if (m_fd==0) {
+ printf(" ERROR create file \n");
+ return(false);
+ }
+ m_cnt=0;
+ return(true);
+}
+
+void CPcapFileWriter::Delete(){
+ if (m_fd) {
+ fclose(m_fd);
+ m_fd=0;
+ }
+}
+
+
+
+
+#define PCAP_MAGIC 0xa1b2c3d4
+#define PCAP_SWAPPED_MAGIC 0xd4c3b2a1
+#define PCAP_MODIFIED_MAGIC 0xa1b2cd34
+#define PCAP_SWAPPED_MODIFIED_MAGIC 0x34cdb2a1
+#define PCAP_NSEC_MAGIC 0xa1b23c4d
+#define PCAP_SWAPPED_NSEC_MAGIC 0x4d3cb2a1
+
+/* "libpcap" file header (minus magic number). */
+struct pcap_hdr {
+ guint32 magic_number;
+ guint16 version_major; /* major version number */
+ guint16 version_minor; /* minor version number */
+ gint32 thiszone; /* GMT to local correction */
+ guint32 sigfigs; /* accuracy of timestamps */
+ guint32 snaplen; /* max length of captured packets, in octets */
+ guint32 network; /* data link type */
+};
+
+/* "libpcap" record header. */
+struct pcaprec_hdr {
+ guint32 ts_sec; /* timestamp seconds */
+ guint32 ts_usec; /* timestamp microseconds (nsecs for PCAP_NSEC_MAGIC) */
+ guint32 incl_len; /* number of octets of packet saved in file */
+ guint32 orig_len; /* actual length of packet */
+};
+
+
+bool CPcapFileWriter::write_packet(CCapPktRaw * lpPacket){
+ if (m_cnt == 0) {
+ pcap_hdr header;
+ header.magic_number = PCAP_NSEC_MAGIC;
+ header.version_major = 0x0002;
+ header.version_minor = 0x0004;
+ header.thiszone = 0;
+ header.sigfigs = 0;
+ header.snaplen = 2000;
+ header.network = 1;
+ fwrite(&header,1,sizeof(header),m_fd);
+ }
+ pcaprec_hdr pkt_header;
+ pkt_header.ts_sec = lpPacket->time_sec ;
+ pkt_header.ts_usec = lpPacket->time_nsec;
+ pkt_header.incl_len = lpPacket->pkt_len;
+ pkt_header.orig_len = lpPacket->pkt_len;
+ fwrite(&pkt_header,1,sizeof(pkt_header),m_fd);
+ fwrite(lpPacket->raw,1,lpPacket->pkt_len,m_fd);
+ m_cnt++;
+ return true;
+}
+
+
+
+#if 0
+ //erf_create(wtap *wth,char *p,uint32_t *sec)
+
+static uint8_t DataPacket0[]={
+
+0x00, 0x50, 0x04, 0xB9 ,0xC8, 0xA0,
+0x00, 0x50, 0x04, 0xB9, 0xC5, 0x83,
+0x08, 0x00,
+
+0x45, 0x10, 0x00, 0x40,
+0x00, 0x00, 0x40, 0x00,
+0x80, 0x06, 0xDD, 0x99,
+
+0x0A, 0x01, 0x04, 0x91,
+0x0A, 0x01, 0x04, 0x90,
+
+0x05, 0x11,
+0x00, 0x50,
+
+0x00, 0x00, 0xF9, 0x00,
+0x00, 0x00, 0x00, 0x00,
+
+0x60, 0x00, 0x20, 0x00,
+0x5C, 0xA2, 0x00, 0x00,
+0x02, 0x04, 0x05, 0xB4,
+0x00, 0x00, 0x76, 0x4A,
+
+0x60, 0x02, 0x20, 0x00,
+0x5C, 0xA2, 0x00, 0x00,
+0x02, 0x04, 0x05, 0xB4,
+0x00, 0x00, 0x76, 0x4A,
+0x60, 0x02, 0x20, 0x00,
+0x5C, 0xA2, 0x00, 0x00,
+0x02, 0x04, 0x05, 0xB4,
+0x00, 0x00, 0x76, 0x4A,
+
+};
+
+
+void test_erf_create(void){
+ CPcapFileWriter erf ;
+ erf.Create("my_test.erf");
+ int i;
+ for (i=0; i<10; i++) {
+ erf.write_packet((char *)&DataPacket0[0],sizeof(DataPacket0));
+ }
+ erf.Delete();
+}
+#endif
+
+
+bool CErfFileReader::Create(char *filename, int loops){
+
+ this->m_loops = loops;
+ m_handle = CAP_FOPEN_64(filename, "rb");
+ if (m_handle == NULL) {
+ fprintf(stderr, "Failed to open file `%s'.\n", filename);
+ return false;
+ }
+
+
+ CAP_FSEEK_64 (m_handle, 0, SEEK_END);
+ m_file_size = CAP_FTELL_64(m_handle);
+ rewind (m_handle);
+
+ wtap wth;
+ memset(&wth,0,sizeof(wtap));
+ int err=0;
+ wth.fh =m_handle;
+ if ( erf_open(&wth, &err)== 1){
+ return (true);
+ }else{
+ return (false);
+ }
+}
+
+void CErfFileReader::Delete(){
+ if (m_handle) {
+ fclose(m_handle);
+ m_handle=0;
+ }
+}
+
+
+bool CErfFileReader::ReadPacket(CCapPktRaw * lpPacket){
+ wtap wth;
+ wth.fh = m_handle;
+ int length;
+ length=erf_read(&wth,lpPacket->raw,&lpPacket->time_sec,
+ &lpPacket->time_nsec
+ );
+ if ( length >0 ) {
+ lpPacket->pkt_len =(uint16_t)length;
+ lpPacket->pkt_cnt++;
+ return (true);
+ }
+ return (false);
+}
+
diff --git a/src/common/erf.h b/src/common/erf.h
new file mode 100755
index 00000000..299bcdc5
--- /dev/null
+++ b/src/common/erf.h
@@ -0,0 +1,257 @@
+/*
+*
+* Copyright (c) 2003 Endace Technology Ltd, Hamilton, New Zealand.
+* All rights reserved.
+*
+* This software and documentation has been developed by Endace Technology Ltd.
+* along with the DAG PCI network capture cards. For further information please
+* visit http://www.endace.com/.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are met:
+*
+* 1. Redistributions of source code must retain the above copyright notice,
+* this list of conditions and the following disclaimer.
+*
+* 2. Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in the
+* documentation and/or other materials provided with the distribution.
+*
+* 3. The name of Endace Technology Ltd may not be used to endorse or promote
+* products derived from this software without specific prior written
+* permission.
+*
+* THIS SOFTWARE IS PROVIDED BY ENDACE TECHNOLOGY LTD ``AS IS'' AND ANY EXPRESS
+* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+* EVENT SHALL ENDACE TECHNOLOGY LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+* POSSIBILITY OF SUCH DAMAGE.
+*
+* $Id: erf.h 15544 2005-08-26 19:40:46Z guy $
+*/
+/*****
+ * NAME
+ *
+ *
+ * AUTHOR
+ * taken from SCE
+ *
+ * COPYRIGHT
+ * Copyright (c) 2004-2011 by cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * DESCRIPTION
+ *
+ ****/
+
+
+#ifndef __W_ERF_H__
+#define __W_ERF_H__
+#include "captureFile.h"
+/* Record type defines */
+#define TYPE_LEGACY 0
+#define TYPE_HDLC_POS 1
+#define TYPE_ETH 2
+#define TYPE_ATM 3
+#define TYPE_AAL5 4
+#include <stdio.h>
+
+typedef uint64_t guint64 ;
+typedef uint32_t guint32 ;
+typedef int32_t gint32;
+typedef uint16_t guint16 ;
+typedef uint8_t guint8 ;
+typedef uint8_t gchar;
+
+
+
+
+#define g_htonl PAL_NTOHL
+#define g_htons PAL_NTOHS
+#define g_ntohs PAL_NTOHS
+#define g_ntohl PAL_NTOHL
+
+
+#ifndef pntohs
+#define pntohs(p) ((guint16) \
+ ((guint16)*((const guint8 *)(p)+0)<<8| \
+ (guint16)*((const guint8 *)(p)+1)<<0))
+#endif
+
+#ifndef pntoh24
+#define pntoh24(p) ((guint32)*((const guint8 *)(p)+0)<<16| \
+ (guint32)*((const guint8 *)(p)+1)<<8| \
+ (guint32)*((const guint8 *)(p)+2)<<0)
+#endif
+
+#ifndef pntohl
+#define pntohl(p) ((guint32)*((const guint8 *)(p)+0)<<24| \
+ (guint32)*((const guint8 *)(p)+1)<<16| \
+ (guint32)*((const guint8 *)(p)+2)<<8| \
+ (guint32)*((const guint8 *)(p)+3)<<0)
+#endif
+
+#ifndef pntohll
+#define pntohll(p) ((guint64)*((const guint8 *)(p)+0)<<56| \
+ (guint64)*((const guint8 *)(p)+1)<<48| \
+ (guint64)*((const guint8 *)(p)+2)<<40| \
+ (guint64)*((const guint8 *)(p)+3)<<32| \
+ (guint64)*((const guint8 *)(p)+4)<<24| \
+ (guint64)*((const guint8 *)(p)+5)<<16| \
+ (guint64)*((const guint8 *)(p)+6)<<8| \
+ (guint64)*((const guint8 *)(p)+7)<<0)
+#endif
+
+
+#ifndef phtons
+#define phtons(p) ((guint16) \
+ ((guint16)*((const guint8 *)(p)+0)<<8| \
+ (guint16)*((const guint8 *)(p)+1)<<0))
+#endif
+
+#ifndef phtonl
+#define phtonl(p) ((guint32)*((const guint8 *)(p)+0)<<24| \
+ (guint32)*((const guint8 *)(p)+1)<<16| \
+ (guint32)*((const guint8 *)(p)+2)<<8| \
+ (guint32)*((const guint8 *)(p)+3)<<0)
+#endif
+
+#ifndef pletohs
+#define pletohs(p) ((guint16) \
+ ((guint16)*((const guint8 *)(p)+1)<<8| \
+ (guint16)*((const guint8 *)(p)+0)<<0))
+#endif
+
+#ifndef pletoh24
+#define pletoh24(p) ((guint32)*((const guint8 *)(p)+2)<<16| \
+ (guint32)*((const guint8 *)(p)+1)<<8| \
+ (guint32)*((const guint8 *)(p)+0)<<0)
+#endif
+
+
+#ifndef pletohl
+#define pletohl(p) ((guint32)*((const guint8 *)(p)+3)<<24| \
+ (guint32)*((const guint8 *)(p)+2)<<16| \
+ (guint32)*((const guint8 *)(p)+1)<<8| \
+ (guint32)*((const guint8 *)(p)+0)<<0)
+#endif
+
+
+#ifndef pletohll
+#define pletohll(p) ((guint64)*((const guint8 *)(p)+7)<<56| \
+ (guint64)*((const guint8 *)(p)+6)<<48| \
+ (guint64)*((const guint8 *)(p)+5)<<40| \
+ (guint64)*((const guint8 *)(p)+4)<<32| \
+ (guint64)*((const guint8 *)(p)+3)<<24| \
+ (guint64)*((const guint8 *)(p)+2)<<16| \
+ (guint64)*((const guint8 *)(p)+1)<<8| \
+ (guint64)*((const guint8 *)(p)+0)<<0)
+#endif
+
+
+ /*
+ * The timestamp is 64bit unsigned fixed point little-endian value with
+ * 32 bits for second and 32 bits for fraction.
+ */
+typedef guint64 erf_timestamp_t;
+
+typedef struct erf_record {
+ erf_timestamp_t ts;
+ guint8 type;
+ guint8 flags;
+ guint16 rlen;
+ guint16 lctr;
+ guint16 wlen;
+} erf_header_t;
+
+#define MAX_RECORD_LEN 0x10000 /* 64k */
+#define RECORDS_FOR_ERF_CHECK 3
+#define FCS_BITS 32
+
+#ifndef min
+#define min(a, b) ((a) > (b) ? (b) : (a))
+#endif
+
+/*
+ * ATM snaplength
+ */
+#define ATM_SNAPLEN 48
+
+/*
+ * Size of ATM payload
+ */
+#define ATM_SLEN(h, e) ATM_SNAPLEN
+#define ATM_WLEN(h, e) ATM_SNAPLEN
+
+/*
+ * Size of Ethernet payload
+ */
+#define ETHERNET_WLEN(h, e) (g_htons((h)->wlen))
+#define ETHERNET_SLEN(h, e) min(ETHERNET_WLEN(h, e), g_htons((h)->rlen) - sizeof(*(h)) - 2)
+
+/*
+ * Size of HDLC payload
+ */
+#define HDLC_WLEN(h, e) (g_htons((h)->wlen))
+#define HDLC_SLEN(h, e) min(HDLC_WLEN(h, e), g_htons((h)->rlen) - sizeof(*(h)))
+
+//int erf_open(wtap *wth, int *err, gchar **err_info);
+
+
+
+struct wtap {
+ FILE * fh;
+ int file_type;
+ long data_offset;
+};
+
+int erf_open(wtap *wth, int *err);
+
+int erf_read(wtap *wth,char *p,uint32_t *sec,uint32_t *nsec);
+
+
+
+class CErfFileWriter : public CFileWriterBase {
+public:
+ virtual ~CErfFileWriter(){
+ Delete();
+ }
+ virtual bool Create(char *file_name);
+ void Delete();
+ virtual bool write_packet(CCapPktRaw * lpPacket);
+private:
+ FILE *m_fd;
+ int m_cnt;
+
+};
+
+
+class CPcapFileWriter : CFileWriterBase{
+public:
+ bool Create(char *file_name);
+ void Delete();
+
+ bool write_packet(CCapPktRaw * lpPacket);
+private:
+ FILE *m_fd;
+ int m_cnt;
+};
+
+class CErfFileReader : public CCapReaderBase {
+public:
+ virtual ~CErfFileReader() { Delete();}
+ bool Create(char *filename, int loops = 0);
+ void Delete();
+ virtual bool ReadPacket(CCapPktRaw * lpPacket);
+private:
+ FILE * m_handle;
+};
+
+
+
+#endif /* __W_ERF_H__ */
diff --git a/src/common/erf_reader.h b/src/common/erf_reader.h
new file mode 100755
index 00000000..a7e57466
--- /dev/null
+++ b/src/common/erf_reader.h
@@ -0,0 +1,27 @@
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef C_ERF_READER
+#define C_ERF_READER
+
+#include <stdio.h>
+#include <stdlib.h>
+#include "captureFile.h"
+
+
+
+#endif
+
diff --git a/src/common/gtest-all.cc b/src/common/gtest-all.cc
new file mode 100755
index 00000000..5b86ed28
--- /dev/null
+++ b/src/common/gtest-all.cc
@@ -0,0 +1,8528 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: mheule@google.com (Markus Heule)
+//
+// Google C++ Testing Framework (Google Test)
+//
+// Sometimes it's desirable to build Google Test by compiling a single file.
+// This file serves this purpose.
+
+// This line ensures that gtest.h can be compiled on its own, even
+// when it's fused.
+//
+//
+/*****
+ * NAME
+ *
+ *
+ * AUTHOR
+ * google
+ *
+ * COPYRIGHT
+ * Copyright (c) 2004-2011 by cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * DESCRIPTION
+ *
+ ****/
+
+#include <common/gtest.h>
+
+// The following lines pull in the real gtest *.cc files.
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// The Google C++ Testing Framework (Google Test)
+
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// Utilities for testing Google Test itself and code that uses Google Test
+// (e.g. frameworks built on top of Google Test).
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_SPI_H_
+#define GTEST_INCLUDE_GTEST_GTEST_SPI_H_
+
+
+namespace testing {
+
+// This helper class can be used to mock out Google Test failure reporting
+// so that we can test Google Test or code that builds on Google Test.
+//
+// An object of this class appends a TestPartResult object to the
+// TestPartResultArray object given in the constructor whenever a Google Test
+// failure is reported. It can either intercept only failures that are
+// generated in the same thread that created this object or it can intercept
+// all generated failures. The scope of this mock object can be controlled with
+// the second argument to the two arguments constructor.
+class GTEST_API_ ScopedFakeTestPartResultReporter
+ : public TestPartResultReporterInterface {
+ public:
+ // The two possible mocking modes of this object.
+ enum InterceptMode {
+ INTERCEPT_ONLY_CURRENT_THREAD, // Intercepts only thread local failures.
+ INTERCEPT_ALL_THREADS // Intercepts all failures.
+ };
+
+ // The c'tor sets this object as the test part result reporter used
+ // by Google Test. The 'result' parameter specifies where to report the
+ // results. This reporter will only catch failures generated in the current
+ // thread. DEPRECATED
+ explicit ScopedFakeTestPartResultReporter(TestPartResultArray* result);
+
+ // Same as above, but you can choose the interception scope of this object.
+ ScopedFakeTestPartResultReporter(InterceptMode intercept_mode,
+ TestPartResultArray* result);
+
+ // The d'tor restores the previous test part result reporter.
+ virtual ~ScopedFakeTestPartResultReporter();
+
+ // Appends the TestPartResult object to the TestPartResultArray
+ // received in the constructor.
+ //
+ // This method is from the TestPartResultReporterInterface
+ // interface.
+ virtual void ReportTestPartResult(const TestPartResult& result);
+ private:
+ void Init();
+
+ const InterceptMode intercept_mode_;
+ TestPartResultReporterInterface* old_reporter_;
+ TestPartResultArray* const result_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedFakeTestPartResultReporter);
+};
+
+namespace internal {
+
+// A helper class for implementing EXPECT_FATAL_FAILURE() and
+// EXPECT_NONFATAL_FAILURE(). Its destructor verifies that the given
+// TestPartResultArray contains exactly one failure that has the given
+// type and contains the given substring. If that's not the case, a
+// non-fatal failure will be generated.
+class GTEST_API_ SingleFailureChecker {
+ public:
+ // The constructor remembers the arguments.
+ SingleFailureChecker(const TestPartResultArray* results,
+ TestPartResult::Type type,
+ const char* substr);
+ ~SingleFailureChecker();
+ private:
+ const TestPartResultArray* const results_;
+ const TestPartResult::Type type_;
+ const String substr_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(SingleFailureChecker);
+};
+
+} // namespace internal
+
+} // namespace testing
+
+// A set of macros for testing Google Test assertions or code that's expected
+// to generate Google Test fatal failures. It verifies that the given
+// statement will cause exactly one fatal Google Test failure with 'substr'
+// being part of the failure message.
+//
+// There are two different versions of this macro. EXPECT_FATAL_FAILURE only
+// affects and considers failures generated in the current thread and
+// EXPECT_FATAL_FAILURE_ON_ALL_THREADS does the same but for all threads.
+//
+// The verification of the assertion is done correctly even when the statement
+// throws an exception or aborts the current function.
+//
+// Known restrictions:
+// - 'statement' cannot reference local non-static variables or
+// non-static members of the current object.
+// - 'statement' cannot return a value.
+// - You cannot stream a failure message to this macro.
+//
+// Note that even though the implementations of the following two
+// macros are much alike, we cannot refactor them to use a common
+// helper macro, due to some peculiarity in how the preprocessor
+// works. The AcceptsMacroThatExpandsToUnprotectedComma test in
+// gtest_unittest.cc will fail to compile if we do that.
+#define EXPECT_FATAL_FAILURE(statement, substr) \
+ do { \
+ class GTestExpectFatalFailureHelper {\
+ public:\
+ static void Execute() { statement; }\
+ };\
+ ::testing::TestPartResultArray gtest_failures;\
+ ::testing::internal::SingleFailureChecker gtest_checker(\
+ &gtest_failures, ::testing::TestPartResult::kFatalFailure, (substr));\
+ {\
+ ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
+ ::testing::ScopedFakeTestPartResultReporter:: \
+ INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures);\
+ GTestExpectFatalFailureHelper::Execute();\
+ }\
+ } while (::testing::internal::AlwaysFalse())
+
+#define EXPECT_FATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
+ do { \
+ class GTestExpectFatalFailureHelper {\
+ public:\
+ static void Execute() { statement; }\
+ };\
+ ::testing::TestPartResultArray gtest_failures;\
+ ::testing::internal::SingleFailureChecker gtest_checker(\
+ &gtest_failures, ::testing::TestPartResult::kFatalFailure, (substr));\
+ {\
+ ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
+ ::testing::ScopedFakeTestPartResultReporter:: \
+ INTERCEPT_ALL_THREADS, &gtest_failures);\
+ GTestExpectFatalFailureHelper::Execute();\
+ }\
+ } while (::testing::internal::AlwaysFalse())
+
+// A macro for testing Google Test assertions or code that's expected to
+// generate Google Test non-fatal failures. It asserts that the given
+// statement will cause exactly one non-fatal Google Test failure with 'substr'
+// being part of the failure message.
+//
+// There are two different versions of this macro. EXPECT_NONFATAL_FAILURE only
+// affects and considers failures generated in the current thread and
+// EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS does the same but for all threads.
+//
+// 'statement' is allowed to reference local variables and members of
+// the current object.
+//
+// The verification of the assertion is done correctly even when the statement
+// throws an exception or aborts the current function.
+//
+// Known restrictions:
+// - You cannot stream a failure message to this macro.
+//
+// Note that even though the implementations of the following two
+// macros are much alike, we cannot refactor them to use a common
+// helper macro, due to some peculiarity in how the preprocessor
+// works. If we do that, the code won't compile when the user gives
+// EXPECT_NONFATAL_FAILURE() a statement that contains a macro that
+// expands to code containing an unprotected comma. The
+// AcceptsMacroThatExpandsToUnprotectedComma test in gtest_unittest.cc
+// catches that.
+//
+// For the same reason, we have to write
+// if (::testing::internal::AlwaysTrue()) { statement; }
+// instead of
+// GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement)
+// to avoid an MSVC warning on unreachable code.
+#define EXPECT_NONFATAL_FAILURE(statement, substr) \
+ do {\
+ ::testing::TestPartResultArray gtest_failures;\
+ ::testing::internal::SingleFailureChecker gtest_checker(\
+ &gtest_failures, ::testing::TestPartResult::kNonFatalFailure, \
+ (substr));\
+ {\
+ ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
+ ::testing::ScopedFakeTestPartResultReporter:: \
+ INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures);\
+ if (::testing::internal::AlwaysTrue()) { statement; }\
+ }\
+ } while (::testing::internal::AlwaysFalse())
+
+#define EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
+ do {\
+ ::testing::TestPartResultArray gtest_failures;\
+ ::testing::internal::SingleFailureChecker gtest_checker(\
+ &gtest_failures, ::testing::TestPartResult::kNonFatalFailure, \
+ (substr));\
+ {\
+ ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
+ ::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS,\
+ &gtest_failures);\
+ if (::testing::internal::AlwaysTrue()) { statement; }\
+ }\
+ } while (::testing::internal::AlwaysFalse())
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_SPI_H_
+
+#include <ctype.h>
+#include <math.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <wchar.h>
+#include <wctype.h>
+
+#include <algorithm>
+#include <ostream>
+#include <sstream>
+#include <vector>
+
+#if GTEST_OS_LINUX
+
+// TODO(kenton@google.com): Use autoconf to detect availability of
+// gettimeofday().
+#define GTEST_HAS_GETTIMEOFDAY_ 1
+
+#include <fcntl.h>
+#include <limits.h>
+#include <sched.h>
+// Declares vsnprintf(). This header is not available on Windows.
+#include <strings.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+#include <unistd.h>
+#include <string>
+#include <vector>
+
+#elif GTEST_OS_SYMBIAN
+#define GTEST_HAS_GETTIMEOFDAY_ 1
+#include <sys/time.h> // NOLINT
+
+#elif GTEST_OS_ZOS
+#define GTEST_HAS_GETTIMEOFDAY_ 1
+#include <sys/time.h> // NOLINT
+
+// On z/OS we additionally need strings.h for strcasecmp.
+#include <strings.h> // NOLINT
+
+#elif GTEST_OS_WINDOWS_MOBILE // We are on Windows CE.
+
+#include <windows.h> // NOLINT
+
+#elif GTEST_OS_WINDOWS // We are on Windows proper.
+
+#include <io.h> // NOLINT
+#include <sys/timeb.h> // NOLINT
+#include <sys/types.h> // NOLINT
+#include <sys/stat.h> // NOLINT
+
+#if GTEST_OS_WINDOWS_MINGW
+// MinGW has gettimeofday() but not _ftime64().
+// TODO(kenton@google.com): Use autoconf to detect availability of
+// gettimeofday().
+// TODO(kenton@google.com): There are other ways to get the time on
+// Windows, like GetTickCount() or GetSystemTimeAsFileTime(). MinGW
+// supports these. consider using them instead.
+#define GTEST_HAS_GETTIMEOFDAY_ 1
+#include <sys/time.h> // NOLINT
+#endif // GTEST_OS_WINDOWS_MINGW
+
+// cpplint thinks that the header is already included, so we want to
+// silence it.
+#include <windows.h> // NOLINT
+
+#else
+
+// Assume other platforms have gettimeofday().
+// TODO(kenton@google.com): Use autoconf to detect availability of
+// gettimeofday().
+#define GTEST_HAS_GETTIMEOFDAY_ 1
+
+// cpplint thinks that the header is already included, so we want to
+// silence it.
+#include <sys/time.h> // NOLINT
+#include <unistd.h> // NOLINT
+
+#endif // GTEST_OS_LINUX
+
+#if GTEST_HAS_EXCEPTIONS
+#include <stdexcept>
+#endif
+
+// Indicates that this translation unit is part of Google Test's
+// implementation. It must come before gtest-internal-inl.h is
+// included, or there will be a compiler error. This trick is to
+// prevent a user from accidentally including gtest-internal-inl.h in
+// his code.
+#define GTEST_IMPLEMENTATION_ 1
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Utility functions and classes used by the Google C++ testing framework.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// This file contains purely Google Test's internal implementation. Please
+// DO NOT #INCLUDE IT IN A USER PROGRAM.
+
+#ifndef GTEST_SRC_GTEST_INTERNAL_INL_H_
+#define GTEST_SRC_GTEST_INTERNAL_INL_H_
+
+// GTEST_IMPLEMENTATION_ is defined to 1 iff the current translation unit is
+// part of Google Test's implementation; otherwise it's undefined.
+#if !GTEST_IMPLEMENTATION_
+// A user is trying to include this from his code - just say no.
+#error "gtest-internal-inl.h is part of Google Test's internal implementation."
+#error "It must not be included except by Google Test itself."
+#endif // GTEST_IMPLEMENTATION_
+
+#ifndef _WIN32_WCE
+#include <errno.h>
+#endif // !_WIN32_WCE
+#include <stddef.h>
+#include <stdlib.h> // For strtoll/_strtoul64/malloc/free.
+#include <string.h> // For memmove.
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+
+#if GTEST_OS_WINDOWS
+#include <windows.h> // For DWORD.
+#endif // GTEST_OS_WINDOWS
+
+
+namespace testing {
+
+// Declares the flags.
+//
+// We don't want the users to modify this flag in the code, but want
+// Google Test's own unit tests to be able to access it. Therefore we
+// declare it here as opposed to in gtest.h.
+GTEST_DECLARE_bool_(death_test_use_fork);
+
+namespace internal {
+
+// The value of GetTestTypeId() as seen from within the Google Test
+// library. This is solely for testing GetTestTypeId().
+GTEST_API_ extern const TypeId kTestTypeIdInGoogleTest;
+
+// Names of the flags (needed for parsing Google Test flags).
+const char kAlsoRunDisabledTestsFlag[] = "also_run_disabled_tests";
+const char kBreakOnFailureFlag[] = "break_on_failure";
+const char kCatchExceptionsFlag[] = "catch_exceptions";
+const char kColorFlag[] = "color";
+const char kFilterFlag[] = "filter";
+const char kListTestsFlag[] = "list_tests";
+const char kOutputFlag[] = "output";
+const char kPrintTimeFlag[] = "print_time";
+const char kRandomSeedFlag[] = "random_seed";
+const char kRepeatFlag[] = "repeat";
+const char kShuffleFlag[] = "shuffle";
+const char kStackTraceDepthFlag[] = "stack_trace_depth";
+const char kThrowOnFailureFlag[] = "throw_on_failure";
+
+// A valid random seed must be in [1, kMaxRandomSeed].
+const int kMaxRandomSeed = 99999;
+
+// g_help_flag is true iff the --help flag or an equivalent form is
+// specified on the command line.
+GTEST_API_ extern bool g_help_flag;
+
+// Returns the current time in milliseconds.
+GTEST_API_ TimeInMillis GetTimeInMillis();
+
+// Returns true iff Google Test should use colors in the output.
+GTEST_API_ bool ShouldUseColor(bool stdout_is_tty);
+
+// Formats the given time in milliseconds as seconds.
+GTEST_API_ std::string FormatTimeInMillisAsSeconds(TimeInMillis ms);
+
+// Parses a string for an Int32 flag, in the form of "--flag=value".
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+GTEST_API_ bool ParseInt32Flag(
+ const char* str, const char* flag, Int32* value);
+
+// Returns a random seed in range [1, kMaxRandomSeed] based on the
+// given --gtest_random_seed flag value.
+inline int GetRandomSeedFromFlag(Int32 random_seed_flag) {
+ const unsigned int raw_seed = (random_seed_flag == 0) ?
+ static_cast<unsigned int>(GetTimeInMillis()) :
+ static_cast<unsigned int>(random_seed_flag);
+
+ // Normalizes the actual seed to range [1, kMaxRandomSeed] such that
+ // it's easy to type.
+ const int normalized_seed =
+ static_cast<int>((raw_seed - 1U) %
+ static_cast<unsigned int>(kMaxRandomSeed)) + 1;
+ return normalized_seed;
+}
+
+// Returns the first valid random seed after 'seed'. The behavior is
+// undefined if 'seed' is invalid. The seed after kMaxRandomSeed is
+// considered to be 1.
+inline int GetNextRandomSeed(int seed) {
+ GTEST_CHECK_(1 <= seed && seed <= kMaxRandomSeed)
+ << "Invalid random seed " << seed << " - must be in [1, "
+ << kMaxRandomSeed << "].";
+ const int next_seed = seed + 1;
+ return (next_seed > kMaxRandomSeed) ? 1 : next_seed;
+}
+
+// This class saves the values of all Google Test flags in its c'tor, and
+// restores them in its d'tor.
+class GTestFlagSaver {
+ public:
+ // The c'tor.
+ GTestFlagSaver() {
+ also_run_disabled_tests_ = GTEST_FLAG(also_run_disabled_tests);
+ break_on_failure_ = GTEST_FLAG(break_on_failure);
+ catch_exceptions_ = GTEST_FLAG(catch_exceptions);
+ color_ = GTEST_FLAG(color);
+ death_test_style_ = GTEST_FLAG(death_test_style);
+ death_test_use_fork_ = GTEST_FLAG(death_test_use_fork);
+ filter_ = GTEST_FLAG(filter);
+ internal_run_death_test_ = GTEST_FLAG(internal_run_death_test);
+ list_tests_ = GTEST_FLAG(list_tests);
+ output_ = GTEST_FLAG(output);
+ print_time_ = GTEST_FLAG(print_time);
+ random_seed_ = GTEST_FLAG(random_seed);
+ repeat_ = GTEST_FLAG(repeat);
+ shuffle_ = GTEST_FLAG(shuffle);
+ stack_trace_depth_ = GTEST_FLAG(stack_trace_depth);
+ throw_on_failure_ = GTEST_FLAG(throw_on_failure);
+ }
+
+ // The d'tor is not virtual. DO NOT INHERIT FROM THIS CLASS.
+ ~GTestFlagSaver() {
+ GTEST_FLAG(also_run_disabled_tests) = also_run_disabled_tests_;
+ GTEST_FLAG(break_on_failure) = break_on_failure_;
+ GTEST_FLAG(catch_exceptions) = catch_exceptions_;
+ GTEST_FLAG(color) = color_;
+ GTEST_FLAG(death_test_style) = death_test_style_;
+ GTEST_FLAG(death_test_use_fork) = death_test_use_fork_;
+ GTEST_FLAG(filter) = filter_;
+ GTEST_FLAG(internal_run_death_test) = internal_run_death_test_;
+ GTEST_FLAG(list_tests) = list_tests_;
+ GTEST_FLAG(output) = output_;
+ GTEST_FLAG(print_time) = print_time_;
+ GTEST_FLAG(random_seed) = random_seed_;
+ GTEST_FLAG(repeat) = repeat_;
+ GTEST_FLAG(shuffle) = shuffle_;
+ GTEST_FLAG(stack_trace_depth) = stack_trace_depth_;
+ GTEST_FLAG(throw_on_failure) = throw_on_failure_;
+ }
+ private:
+ // Fields for saving the original values of flags.
+ bool also_run_disabled_tests_;
+ bool break_on_failure_;
+ bool catch_exceptions_;
+ String color_;
+ String death_test_style_;
+ bool death_test_use_fork_;
+ String filter_;
+ String internal_run_death_test_;
+ bool list_tests_;
+ String output_;
+ bool print_time_;
+ bool pretty_;
+ internal::Int32 random_seed_;
+ internal::Int32 repeat_;
+ bool shuffle_;
+ internal::Int32 stack_trace_depth_;
+ bool throw_on_failure_;
+} GTEST_ATTRIBUTE_UNUSED_;
+
+// Converts a Unicode code point to a narrow string in UTF-8 encoding.
+// code_point parameter is of type UInt32 because wchar_t may not be
+// wide enough to contain a code point.
+// The output buffer str must containt at least 32 characters.
+// The function returns the address of the output buffer.
+// If the code_point is not a valid Unicode code point
+// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be output
+// as '(Invalid Unicode 0xXXXXXXXX)'.
+GTEST_API_ char* CodePointToUtf8(UInt32 code_point, char* str);
+
+// Converts a wide string to a narrow string in UTF-8 encoding.
+// The wide string is assumed to have the following encoding:
+// UTF-16 if sizeof(wchar_t) == 2 (on Windows, Cygwin, Symbian OS)
+// UTF-32 if sizeof(wchar_t) == 4 (on Linux)
+// Parameter str points to a null-terminated wide string.
+// Parameter num_chars may additionally limit the number
+// of wchar_t characters processed. -1 is used when the entire string
+// should be processed.
+// If the string contains code points that are not valid Unicode code points
+// (i.e. outside of Unicode range U+0 to U+10FFFF) they will be output
+// as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding
+// and contains invalid UTF-16 surrogate pairs, values in those pairs
+// will be encoded as individual Unicode characters from Basic Normal Plane.
+GTEST_API_ String WideStringToUtf8(const wchar_t* str, int num_chars);
+
+// Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file
+// if the variable is present. If a file already exists at this location, this
+// function will write over it. If the variable is present, but the file cannot
+// be created, prints an error and exits.
+void WriteToShardStatusFileIfNeeded();
+
+// Checks whether sharding is enabled by examining the relevant
+// environment variable values. If the variables are present,
+// but inconsistent (e.g., shard_index >= total_shards), prints
+// an error and exits. If in_subprocess_for_death_test, sharding is
+// disabled because it must only be applied to the original test
+// process. Otherwise, we could filter out death tests we intended to execute.
+GTEST_API_ bool ShouldShard(const char* total_shards_str,
+ const char* shard_index_str,
+ bool in_subprocess_for_death_test);
+
+// Parses the environment variable var as an Int32. If it is unset,
+// returns default_val. If it is not an Int32, prints an error and
+// and aborts.
+GTEST_API_ Int32 Int32FromEnvOrDie(const char* env_var, Int32 default_val);
+
+// Given the total number of shards, the shard index, and the test id,
+// returns true iff the test should be run on this shard. The test id is
+// some arbitrary but unique non-negative integer assigned to each test
+// method. Assumes that 0 <= shard_index < total_shards.
+GTEST_API_ bool ShouldRunTestOnShard(
+ int total_shards, int shard_index, int test_id);
+
+// STL container utilities.
+
+// Returns the number of elements in the given container that satisfy
+// the given predicate.
+template <class Container, typename Predicate>
+inline int CountIf(const Container& c, Predicate predicate) {
+ return static_cast<int>(std::count_if(c.begin(), c.end(), predicate));
+}
+
+// Applies a function/functor to each element in the container.
+template <class Container, typename Functor>
+void ForEach(const Container& c, Functor functor) {
+ std::for_each(c.begin(), c.end(), functor);
+}
+
+// Returns the i-th element of the vector, or default_value if i is not
+// in range [0, v.size()).
+template <typename E>
+inline E GetElementOr(const std::vector<E>& v, int i, E default_value) {
+ return (i < 0 || i >= static_cast<int>(v.size())) ? default_value : v[i];
+}
+
+// Performs an in-place shuffle of a range of the vector's elements.
+// 'begin' and 'end' are element indices as an STL-style range;
+// i.e. [begin, end) are shuffled, where 'end' == size() means to
+// shuffle to the end of the vector.
+template <typename E>
+void ShuffleRange(internal::Random* random, int begin, int end,
+ std::vector<E>* v) {
+ const int size = static_cast<int>(v->size());
+ GTEST_CHECK_(0 <= begin && begin <= size)
+ << "Invalid shuffle range start " << begin << ": must be in range [0, "
+ << size << "].";
+ GTEST_CHECK_(begin <= end && end <= size)
+ << "Invalid shuffle range finish " << end << ": must be in range ["
+ << begin << ", " << size << "].";
+
+ // Fisher-Yates shuffle, from
+ // http://en.wikipedia.org/wiki/Fisher-Yates_shuffle
+ for (int range_width = end - begin; range_width >= 2; range_width--) {
+ const int last_in_range = begin + range_width - 1;
+ const int selected = begin + random->Generate(range_width);
+ std::swap((*v)[selected], (*v)[last_in_range]);
+ }
+}
+
+// Performs an in-place shuffle of the vector's elements.
+template <typename E>
+inline void Shuffle(internal::Random* random, std::vector<E>* v) {
+ ShuffleRange(random, 0, static_cast<int>(v->size()), v);
+}
+
+// A function for deleting an object. Handy for being used as a
+// functor.
+template <typename T>
+static void Delete(T* x) {
+ delete x;
+}
+
+// A predicate that checks the key of a TestProperty against a known key.
+//
+// TestPropertyKeyIs is copyable.
+class TestPropertyKeyIs {
+ public:
+ // Constructor.
+ //
+ // TestPropertyKeyIs has NO default constructor.
+ explicit TestPropertyKeyIs(const char* key)
+ : key_(key) {}
+
+ // Returns true iff the test name of test property matches on key_.
+ bool operator()(const TestProperty& test_property) const {
+ return String(test_property.key()).Compare(key_) == 0;
+ }
+
+ private:
+ String key_;
+};
+
+class TestInfoImpl {
+ public:
+ TestInfoImpl(TestInfo* parent, const char* test_case_name,
+ const char* name, const char* test_case_comment,
+ const char* comment, TypeId fixture_class_id,
+ internal::TestFactoryBase* factory);
+ ~TestInfoImpl();
+
+ // Returns true if this test should run.
+ bool should_run() const { return should_run_; }
+
+ // Sets the should_run member.
+ void set_should_run(bool should) { should_run_ = should; }
+
+ // Returns true if this test is disabled. Disabled tests are not run.
+ bool is_disabled() const { return is_disabled_; }
+
+ // Sets the is_disabled member.
+ void set_is_disabled(bool is) { is_disabled_ = is; }
+
+ // Returns true if this test matches the filter specified by the user.
+ bool matches_filter() const { return matches_filter_; }
+
+ // Sets the matches_filter member.
+ void set_matches_filter(bool matches) { matches_filter_ = matches; }
+
+ // Returns the test case name.
+ const char* test_case_name() const { return test_case_name_.c_str(); }
+
+ // Returns the test name.
+ const char* name() const { return name_.c_str(); }
+
+ // Returns the test case comment.
+ const char* test_case_comment() const { return test_case_comment_.c_str(); }
+
+ // Returns the test comment.
+ const char* comment() const { return comment_.c_str(); }
+
+ // Returns the ID of the test fixture class.
+ TypeId fixture_class_id() const { return fixture_class_id_; }
+
+ // Returns the test result.
+ TestResult* result() { return &result_; }
+ const TestResult* result() const { return &result_; }
+
+ // Creates the test object, runs it, records its result, and then
+ // deletes it.
+ void Run();
+
+ // Clears the test result.
+ void ClearResult() { result_.Clear(); }
+
+ // Clears the test result in the given TestInfo object.
+ static void ClearTestResult(TestInfo * test_info) {
+ test_info->impl()->ClearResult();
+ }
+
+ private:
+ // These fields are immutable properties of the test.
+ TestInfo* const parent_; // The owner of this object
+ const String test_case_name_; // Test case name
+ const String name_; // Test name
+ const String test_case_comment_; // Test case comment
+ const String comment_; // Test comment
+ const TypeId fixture_class_id_; // ID of the test fixture class
+ bool should_run_; // True iff this test should run
+ bool is_disabled_; // True iff this test is disabled
+ bool matches_filter_; // True if this test matches the
+ // user-specified filter.
+ internal::TestFactoryBase* const factory_; // The factory that creates
+ // the test object
+
+ // This field is mutable and needs to be reset before running the
+ // test for the second time.
+ TestResult result_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestInfoImpl);
+};
+
+// Class UnitTestOptions.
+//
+// This class contains functions for processing options the user
+// specifies when running the tests. It has only static members.
+//
+// In most cases, the user can specify an option using either an
+// environment variable or a command line flag. E.g. you can set the
+// test filter using either GTEST_FILTER or --gtest_filter. If both
+// the variable and the flag are present, the latter overrides the
+// former.
+class GTEST_API_ UnitTestOptions {
+ public:
+ // Functions for processing the gtest_output flag.
+
+ // Returns the output format, or "" for normal printed output.
+ static String GetOutputFormat();
+
+ // Returns the absolute path of the requested output file, or the
+ // default (test_detail.xml in the original working directory) if
+ // none was explicitly specified.
+ static String GetAbsolutePathToOutputFile();
+
+ // Functions for processing the gtest_filter flag.
+
+ // Returns true iff the wildcard pattern matches the string. The
+ // first ':' or '\0' character in pattern marks the end of it.
+ //
+ // This recursive algorithm isn't very efficient, but is clear and
+ // works well enough for matching test names, which are short.
+ static bool PatternMatchesString(const char *pattern, const char *str);
+
+ // Returns true iff the user-specified filter matches the test case
+ // name and the test name.
+ static bool FilterMatchesTest(const String &test_case_name,
+ const String &test_name);
+
+#if GTEST_OS_WINDOWS
+ // Function for supporting the gtest_catch_exception flag.
+
+ // Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the
+ // given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise.
+ // This function is useful as an __except condition.
+ static int GTestShouldProcessSEH(DWORD exception_code);
+#endif // GTEST_OS_WINDOWS
+
+ // Returns true if "name" matches the ':' separated list of glob-style
+ // filters in "filter".
+ static bool MatchesFilter(const String& name, const char* filter);
+};
+
+// Returns the current application's name, removing directory path if that
+// is present. Used by UnitTestOptions::GetOutputFile.
+GTEST_API_ FilePath GetCurrentExecutableName();
+
+// The role interface for getting the OS stack trace as a string.
+class OsStackTraceGetterInterface {
+ public:
+ OsStackTraceGetterInterface() {}
+ virtual ~OsStackTraceGetterInterface() {}
+
+ // Returns the current OS stack trace as a String. Parameters:
+ //
+ // max_depth - the maximum number of stack frames to be included
+ // in the trace.
+ // skip_count - the number of top frames to be skipped; doesn't count
+ // against max_depth.
+ virtual String CurrentStackTrace(int max_depth, int skip_count) = 0;
+
+ // UponLeavingGTest() should be called immediately before Google Test calls
+ // user code. It saves some information about the current stack that
+ // CurrentStackTrace() will use to find and hide Google Test stack frames.
+ virtual void UponLeavingGTest() = 0;
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetterInterface);
+};
+
+// A working implementation of the OsStackTraceGetterInterface interface.
+class OsStackTraceGetter : public OsStackTraceGetterInterface {
+ public:
+ OsStackTraceGetter() : caller_frame_(NULL) {}
+ virtual String CurrentStackTrace(int max_depth, int skip_count);
+ virtual void UponLeavingGTest();
+
+ // This string is inserted in place of stack frames that are part of
+ // Google Test's implementation.
+ static const char* const kElidedFramesMarker;
+
+ private:
+ Mutex mutex_; // protects all internal state
+
+ // We save the stack frame below the frame that calls user code.
+ // We do this because the address of the frame immediately below
+ // the user code changes between the call to UponLeavingGTest()
+ // and any calls to CurrentStackTrace() from within the user code.
+ void* caller_frame_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetter);
+};
+
+// Information about a Google Test trace point.
+struct TraceInfo {
+ const char* file;
+ int line;
+ String message;
+};
+
+// This is the default global test part result reporter used in UnitTestImpl.
+// This class should only be used by UnitTestImpl.
+class DefaultGlobalTestPartResultReporter
+ : public TestPartResultReporterInterface {
+ public:
+ explicit DefaultGlobalTestPartResultReporter(UnitTestImpl* unit_test);
+ // Implements the TestPartResultReporterInterface. Reports the test part
+ // result in the current test.
+ virtual void ReportTestPartResult(const TestPartResult& result);
+
+ private:
+ UnitTestImpl* const unit_test_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultGlobalTestPartResultReporter);
+};
+
+// This is the default per thread test part result reporter used in
+// UnitTestImpl. This class should only be used by UnitTestImpl.
+class DefaultPerThreadTestPartResultReporter
+ : public TestPartResultReporterInterface {
+ public:
+ explicit DefaultPerThreadTestPartResultReporter(UnitTestImpl* unit_test);
+ // Implements the TestPartResultReporterInterface. The implementation just
+ // delegates to the current global test part result reporter of *unit_test_.
+ virtual void ReportTestPartResult(const TestPartResult& result);
+
+ private:
+ UnitTestImpl* const unit_test_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultPerThreadTestPartResultReporter);
+};
+
+// The private implementation of the UnitTest class. We don't protect
+// the methods under a mutex, as this class is not accessible by a
+// user and the UnitTest class that delegates work to this class does
+// proper locking.
+class GTEST_API_ UnitTestImpl {
+ public:
+ explicit UnitTestImpl(UnitTest* parent);
+ virtual ~UnitTestImpl();
+
+ // There are two different ways to register your own TestPartResultReporter.
+ // You can register your own repoter to listen either only for test results
+ // from the current thread or for results from all threads.
+ // By default, each per-thread test result repoter just passes a new
+ // TestPartResult to the global test result reporter, which registers the
+ // test part result for the currently running test.
+
+ // Returns the global test part result reporter.
+ TestPartResultReporterInterface* GetGlobalTestPartResultReporter();
+
+ // Sets the global test part result reporter.
+ void SetGlobalTestPartResultReporter(
+ TestPartResultReporterInterface* reporter);
+
+ // Returns the test part result reporter for the current thread.
+ TestPartResultReporterInterface* GetTestPartResultReporterForCurrentThread();
+
+ // Sets the test part result reporter for the current thread.
+ void SetTestPartResultReporterForCurrentThread(
+ TestPartResultReporterInterface* reporter);
+
+ // Gets the number of successful test cases.
+ int successful_test_case_count() const;
+
+ // Gets the number of failed test cases.
+ int failed_test_case_count() const;
+
+ // Gets the number of all test cases.
+ int total_test_case_count() const;
+
+ // Gets the number of all test cases that contain at least one test
+ // that should run.
+ int test_case_to_run_count() const;
+
+ // Gets the number of successful tests.
+ int successful_test_count() const;
+
+ // Gets the number of failed tests.
+ int failed_test_count() const;
+
+ // Gets the number of disabled tests.
+ int disabled_test_count() const;
+
+ // Gets the number of all tests.
+ int total_test_count() const;
+
+ // Gets the number of tests that should run.
+ int test_to_run_count() const;
+
+ // Gets the elapsed time, in milliseconds.
+ TimeInMillis elapsed_time() const { return elapsed_time_; }
+
+ // Returns true iff the unit test passed (i.e. all test cases passed).
+ bool Passed() const { return !Failed(); }
+
+ // Returns true iff the unit test failed (i.e. some test case failed
+ // or something outside of all tests failed).
+ bool Failed() const {
+ return failed_test_case_count() > 0 || ad_hoc_test_result()->Failed();
+ }
+
+ // Gets the i-th test case among all the test cases. i can range from 0 to
+ // total_test_case_count() - 1. If i is not in that range, returns NULL.
+ const TestCase* GetTestCase(int i) const {
+ const int index = GetElementOr(test_case_indices_, i, -1);
+ return index < 0 ? NULL : test_cases_[i];
+ }
+
+ // Gets the i-th test case among all the test cases. i can range from 0 to
+ // total_test_case_count() - 1. If i is not in that range, returns NULL.
+ TestCase* GetMutableTestCase(int i) {
+ const int index = GetElementOr(test_case_indices_, i, -1);
+ return index < 0 ? NULL : test_cases_[index];
+ }
+
+ // Provides access to the event listener list.
+ TestEventListeners* listeners() { return &listeners_; }
+
+ // Returns the TestResult for the test that's currently running, or
+ // the TestResult for the ad hoc test if no test is running.
+ TestResult* current_test_result();
+
+ // Returns the TestResult for the ad hoc test.
+ const TestResult* ad_hoc_test_result() const { return &ad_hoc_test_result_; }
+
+ // Sets the OS stack trace getter.
+ //
+ // Does nothing if the input and the current OS stack trace getter
+ // are the same; otherwise, deletes the old getter and makes the
+ // input the current getter.
+ void set_os_stack_trace_getter(OsStackTraceGetterInterface* getter);
+
+ // Returns the current OS stack trace getter if it is not NULL;
+ // otherwise, creates an OsStackTraceGetter, makes it the current
+ // getter, and returns it.
+ OsStackTraceGetterInterface* os_stack_trace_getter();
+
+ // Returns the current OS stack trace as a String.
+ //
+ // The maximum number of stack frames to be included is specified by
+ // the gtest_stack_trace_depth flag. The skip_count parameter
+ // specifies the number of top frames to be skipped, which doesn't
+ // count against the number of frames to be included.
+ //
+ // For example, if Foo() calls Bar(), which in turn calls
+ // CurrentOsStackTraceExceptTop(1), Foo() will be included in the
+ // trace but Bar() and CurrentOsStackTraceExceptTop() won't.
+ String CurrentOsStackTraceExceptTop(int skip_count);
+
+ // Finds and returns a TestCase with the given name. If one doesn't
+ // exist, creates one and returns it.
+ //
+ // Arguments:
+ //
+ // test_case_name: name of the test case
+ // set_up_tc: pointer to the function that sets up the test case
+ // tear_down_tc: pointer to the function that tears down the test case
+ TestCase* GetTestCase(const char* test_case_name,
+ const char* comment,
+ Test::SetUpTestCaseFunc set_up_tc,
+ Test::TearDownTestCaseFunc tear_down_tc);
+
+ // Adds a TestInfo to the unit test.
+ //
+ // Arguments:
+ //
+ // set_up_tc: pointer to the function that sets up the test case
+ // tear_down_tc: pointer to the function that tears down the test case
+ // test_info: the TestInfo object
+ void AddTestInfo(Test::SetUpTestCaseFunc set_up_tc,
+ Test::TearDownTestCaseFunc tear_down_tc,
+ TestInfo * test_info) {
+ // In order to support thread-safe death tests, we need to
+ // remember the original working directory when the test program
+ // was first invoked. We cannot do this in RUN_ALL_TESTS(), as
+ // the user may have changed the current directory before calling
+ // RUN_ALL_TESTS(). Therefore we capture the current directory in
+ // AddTestInfo(), which is called to register a TEST or TEST_F
+ // before main() is reached.
+ if (original_working_dir_.IsEmpty()) {
+ original_working_dir_.Set(FilePath::GetCurrentDir());
+ GTEST_CHECK_(!original_working_dir_.IsEmpty())
+ << "Failed to get the current working directory.";
+ }
+
+ GetTestCase(test_info->test_case_name(),
+ test_info->test_case_comment(),
+ set_up_tc,
+ tear_down_tc)->AddTestInfo(test_info);
+ }
+
+#if GTEST_HAS_PARAM_TEST
+ // Returns ParameterizedTestCaseRegistry object used to keep track of
+ // value-parameterized tests and instantiate and register them.
+ internal::ParameterizedTestCaseRegistry& parameterized_test_registry() {
+ return parameterized_test_registry_;
+ }
+#endif // GTEST_HAS_PARAM_TEST
+
+ // Sets the TestCase object for the test that's currently running.
+ void set_current_test_case(TestCase* a_current_test_case) {
+ current_test_case_ = a_current_test_case;
+ }
+
+ // Sets the TestInfo object for the test that's currently running. If
+ // current_test_info is NULL, the assertion results will be stored in
+ // ad_hoc_test_result_.
+ void set_current_test_info(TestInfo* a_current_test_info) {
+ current_test_info_ = a_current_test_info;
+ }
+
+ // Registers all parameterized tests defined using TEST_P and
+ // INSTANTIATE_TEST_P, creating regular tests for each test/parameter
+ // combination. This method can be called more then once; it has
+ // guards protecting from registering the tests more then once.
+ // If value-parameterized tests are disabled, RegisterParameterizedTests
+ // is present but does nothing.
+ void RegisterParameterizedTests();
+
+ // Runs all tests in this UnitTest object, prints the result, and
+ // returns 0 if all tests are successful, or 1 otherwise. If any
+ // exception is thrown during a test on Windows, this test is
+ // considered to be failed, but the rest of the tests will still be
+ // run. (We disable exceptions on Linux and Mac OS X, so the issue
+ // doesn't apply there.)
+ int RunAllTests();
+
+ // Clears the results of all tests, including the ad hoc test.
+ void ClearResult() {
+ ForEach(test_cases_, TestCase::ClearTestCaseResult);
+ ad_hoc_test_result_.Clear();
+ }
+
+ enum ReactionToSharding {
+ HONOR_SHARDING_PROTOCOL,
+ IGNORE_SHARDING_PROTOCOL
+ };
+
+ // Matches the full name of each test against the user-specified
+ // filter to decide whether the test should run, then records the
+ // result in each TestCase and TestInfo object.
+ // If shard_tests == HONOR_SHARDING_PROTOCOL, further filters tests
+ // based on sharding variables in the environment.
+ // Returns the number of tests that should run.
+ int FilterTests(ReactionToSharding shard_tests);
+
+ // Prints the names of the tests matching the user-specified filter flag.
+ void ListTestsMatchingFilter();
+
+ const TestCase* current_test_case() const { return current_test_case_; }
+ TestInfo* current_test_info() { return current_test_info_; }
+ const TestInfo* current_test_info() const { return current_test_info_; }
+
+ // Returns the vector of environments that need to be set-up/torn-down
+ // before/after the tests are run.
+ std::vector<Environment*>& environments() { return environments_; }
+
+ // Getters for the per-thread Google Test trace stack.
+ std::vector<TraceInfo>& gtest_trace_stack() {
+ return *(gtest_trace_stack_.pointer());
+ }
+ const std::vector<TraceInfo>& gtest_trace_stack() const {
+ return gtest_trace_stack_.get();
+ }
+
+#if GTEST_HAS_DEATH_TEST
+ void InitDeathTestSubprocessControlInfo() {
+ internal_run_death_test_flag_.reset(ParseInternalRunDeathTestFlag());
+ }
+ // Returns a pointer to the parsed --gtest_internal_run_death_test
+ // flag, or NULL if that flag was not specified.
+ // This information is useful only in a death test child process.
+ // Must not be called before a call to InitGoogleTest.
+ const InternalRunDeathTestFlag* internal_run_death_test_flag() const {
+ return internal_run_death_test_flag_.get();
+ }
+
+ // Returns a pointer to the current death test factory.
+ internal::DeathTestFactory* death_test_factory() {
+ return death_test_factory_.get();
+ }
+
+ void SuppressTestEventsIfInSubprocess();
+
+ friend class ReplaceDeathTestFactory;
+#endif // GTEST_HAS_DEATH_TEST
+
+ // Initializes the event listener performing XML output as specified by
+ // UnitTestOptions. Must not be called before InitGoogleTest.
+ void ConfigureXmlOutput();
+
+ // Performs initialization dependent upon flag values obtained in
+ // ParseGoogleTestFlagsOnly. Is called from InitGoogleTest after the call to
+ // ParseGoogleTestFlagsOnly. In case a user neglects to call InitGoogleTest
+ // this function is also called from RunAllTests. Since this function can be
+ // called more than once, it has to be idempotent.
+ void PostFlagParsingInit();
+
+ // Gets the random seed used at the start of the current test iteration.
+ int random_seed() const { return random_seed_; }
+
+ // Gets the random number generator.
+ internal::Random* random() { return &random_; }
+
+ // Shuffles all test cases, and the tests within each test case,
+ // making sure that death tests are still run first.
+ void ShuffleTests();
+
+ // Restores the test cases and tests to their order before the first shuffle.
+ void UnshuffleTests();
+
+ private:
+ friend class ::testing::UnitTest;
+
+ // The UnitTest object that owns this implementation object.
+ UnitTest* const parent_;
+
+ // The working directory when the first TEST() or TEST_F() was
+ // executed.
+ internal::FilePath original_working_dir_;
+
+ // The default test part result reporters.
+ DefaultGlobalTestPartResultReporter default_global_test_part_result_reporter_;
+ DefaultPerThreadTestPartResultReporter
+ default_per_thread_test_part_result_reporter_;
+
+ // Points to (but doesn't own) the global test part result reporter.
+ TestPartResultReporterInterface* global_test_part_result_repoter_;
+
+ // Protects read and write access to global_test_part_result_reporter_.
+ internal::Mutex global_test_part_result_reporter_mutex_;
+
+ // Points to (but doesn't own) the per-thread test part result reporter.
+ internal::ThreadLocal<TestPartResultReporterInterface*>
+ per_thread_test_part_result_reporter_;
+
+ // The vector of environments that need to be set-up/torn-down
+ // before/after the tests are run.
+ std::vector<Environment*> environments_;
+
+ // The vector of TestCases in their original order. It owns the
+ // elements in the vector.
+ std::vector<TestCase*> test_cases_;
+
+ // Provides a level of indirection for the test case list to allow
+ // easy shuffling and restoring the test case order. The i-th
+ // element of this vector is the index of the i-th test case in the
+ // shuffled order.
+ std::vector<int> test_case_indices_;
+
+#if GTEST_HAS_PARAM_TEST
+ // ParameterizedTestRegistry object used to register value-parameterized
+ // tests.
+ internal::ParameterizedTestCaseRegistry parameterized_test_registry_;
+
+ // Indicates whether RegisterParameterizedTests() has been called already.
+ bool parameterized_tests_registered_;
+#endif // GTEST_HAS_PARAM_TEST
+
+ // Index of the last death test case registered. Initially -1.
+ int last_death_test_case_;
+
+ // This points to the TestCase for the currently running test. It
+ // changes as Google Test goes through one test case after another.
+ // When no test is running, this is set to NULL and Google Test
+ // stores assertion results in ad_hoc_test_result_. Initially NULL.
+ TestCase* current_test_case_;
+
+ // This points to the TestInfo for the currently running test. It
+ // changes as Google Test goes through one test after another. When
+ // no test is running, this is set to NULL and Google Test stores
+ // assertion results in ad_hoc_test_result_. Initially NULL.
+ TestInfo* current_test_info_;
+
+ // Normally, a user only writes assertions inside a TEST or TEST_F,
+ // or inside a function called by a TEST or TEST_F. Since Google
+ // Test keeps track of which test is current running, it can
+ // associate such an assertion with the test it belongs to.
+ //
+ // If an assertion is encountered when no TEST or TEST_F is running,
+ // Google Test attributes the assertion result to an imaginary "ad hoc"
+ // test, and records the result in ad_hoc_test_result_.
+ TestResult ad_hoc_test_result_;
+
+ // The list of event listeners that can be used to track events inside
+ // Google Test.
+ TestEventListeners listeners_;
+
+ // The OS stack trace getter. Will be deleted when the UnitTest
+ // object is destructed. By default, an OsStackTraceGetter is used,
+ // but the user can set this field to use a custom getter if that is
+ // desired.
+ OsStackTraceGetterInterface* os_stack_trace_getter_;
+
+ // True iff PostFlagParsingInit() has been called.
+ bool post_flag_parse_init_performed_;
+
+ // The random number seed used at the beginning of the test run.
+ int random_seed_;
+
+ // Our random number generator.
+ internal::Random random_;
+
+ // How long the test took to run, in milliseconds.
+ TimeInMillis elapsed_time_;
+
+#if GTEST_HAS_DEATH_TEST
+ // The decomposed components of the gtest_internal_run_death_test flag,
+ // parsed when RUN_ALL_TESTS is called.
+ internal::scoped_ptr<InternalRunDeathTestFlag> internal_run_death_test_flag_;
+ internal::scoped_ptr<internal::DeathTestFactory> death_test_factory_;
+#endif // GTEST_HAS_DEATH_TEST
+
+ // A per-thread stack of traces created by the SCOPED_TRACE() macro.
+ internal::ThreadLocal<std::vector<TraceInfo> > gtest_trace_stack_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTestImpl);
+}; // class UnitTestImpl
+
+// Convenience function for accessing the global UnitTest
+// implementation object.
+inline UnitTestImpl* GetUnitTestImpl() {
+ return UnitTest::GetInstance()->impl();
+}
+
+// Internal helper functions for implementing the simple regular
+// expression matcher.
+GTEST_API_ bool IsInSet(char ch, const char* str);
+GTEST_API_ bool IsDigit(char ch);
+GTEST_API_ bool IsPunct(char ch);
+GTEST_API_ bool IsRepeat(char ch);
+GTEST_API_ bool IsWhiteSpace(char ch);
+GTEST_API_ bool IsWordChar(char ch);
+GTEST_API_ bool IsValidEscape(char ch);
+GTEST_API_ bool AtomMatchesChar(bool escaped, char pattern, char ch);
+GTEST_API_ bool ValidateRegex(const char* regex);
+GTEST_API_ bool MatchRegexAtHead(const char* regex, const char* str);
+GTEST_API_ bool MatchRepetitionAndRegexAtHead(
+ bool escaped, char ch, char repeat, const char* regex, const char* str);
+GTEST_API_ bool MatchRegexAnywhere(const char* regex, const char* str);
+
+// Parses the command line for Google Test flags, without initializing
+// other parts of Google Test.
+GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, char** argv);
+GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv);
+
+#if GTEST_HAS_DEATH_TEST
+
+// Returns the message describing the last system error, regardless of the
+// platform.
+String GetLastErrnoDescription();
+
+#if GTEST_OS_WINDOWS
+// Provides leak-safe Windows kernel handle ownership.
+class AutoHandle {
+ public:
+ AutoHandle() : handle_(INVALID_HANDLE_VALUE) {}
+ explicit AutoHandle(HANDLE handle) : handle_(handle) {}
+
+ ~AutoHandle() { Reset(); }
+
+ HANDLE Get() const { return handle_; }
+ void Reset() { Reset(INVALID_HANDLE_VALUE); }
+ void Reset(HANDLE handle) {
+ if (handle != handle_) {
+ if (handle_ != INVALID_HANDLE_VALUE)
+ ::CloseHandle(handle_);
+ handle_ = handle;
+ }
+ }
+
+ private:
+ HANDLE handle_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(AutoHandle);
+};
+#endif // GTEST_OS_WINDOWS
+
+// Attempts to parse a string into a positive integer pointed to by the
+// number parameter. Returns true if that is possible.
+// GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we can use
+// it here.
+template <typename Integer>
+bool ParseNaturalNumber(const ::std::string& str, Integer* number) {
+ // Fail fast if the given string does not begin with a digit;
+ // this bypasses strtoXXX's "optional leading whitespace and plus
+ // or minus sign" semantics, which are undesirable here.
+ if (str.empty() || !isdigit(str[0])) {
+ return false;
+ }
+ errno = 0;
+
+ char* end;
+ // BiggestConvertible is the largest integer type that system-provided
+ // string-to-number conversion routines can return.
+#if GTEST_OS_WINDOWS && !defined(__GNUC__)
+ // MSVC and C++ Builder define __int64 instead of the standard long long.
+ typedef unsigned __int64 BiggestConvertible;
+ const BiggestConvertible parsed = _strtoui64(str.c_str(), &end, 10);
+#else
+ typedef unsigned long long BiggestConvertible; // NOLINT
+ const BiggestConvertible parsed = strtoull(str.c_str(), &end, 10);
+#endif // GTEST_OS_WINDOWS && !defined(__GNUC__)
+ const bool parse_success = *end == '\0' && errno == 0;
+
+ // TODO(vladl@google.com): Convert this to compile time assertion when it is
+ // available.
+ GTEST_CHECK_(sizeof(Integer) <= sizeof(parsed));
+
+ const Integer result = static_cast<Integer>(parsed);
+ if (parse_success && static_cast<BiggestConvertible>(result) == parsed) {
+ *number = result;
+ return true;
+ }
+ return false;
+}
+#endif // GTEST_HAS_DEATH_TEST
+
+// TestResult contains some private methods that should be hidden from
+// Google Test user but are required for testing. This class allow our tests
+// to access them.
+//
+// This class is supplied only for the purpose of testing Google Test's own
+// constructs. Do not use it in user tests, either directly or indirectly.
+class TestResultAccessor {
+ public:
+ static void RecordProperty(TestResult* test_result,
+ const TestProperty& property) {
+ test_result->RecordProperty(property);
+ }
+
+ static void ClearTestPartResults(TestResult* test_result) {
+ test_result->ClearTestPartResults();
+ }
+
+ static const std::vector<testing::TestPartResult>& test_part_results(
+ const TestResult& test_result) {
+ return test_result.test_part_results();
+ }
+};
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_SRC_GTEST_INTERNAL_INL_H_
+#undef GTEST_IMPLEMENTATION_
+
+#if GTEST_OS_WINDOWS
+#define vsnprintf _vsnprintf
+#endif // GTEST_OS_WINDOWS
+
+namespace testing {
+
+using internal::CountIf;
+using internal::ForEach;
+using internal::GetElementOr;
+using internal::Shuffle;
+
+// Constants.
+
+// A test whose test case name or test name matches this filter is
+// disabled and not run.
+static const char kDisableTestFilter[] = "DISABLED_*:*/DISABLED_*";
+
+// A test case whose name matches this filter is considered a death
+// test case and will be run before test cases whose name doesn't
+// match this filter.
+static const char kDeathTestCaseFilter[] = "*DeathTest:*DeathTest/*";
+
+// A test filter that matches everything.
+static const char kUniversalFilter[] = "*";
+
+// The default output file for XML output.
+static const char kDefaultOutputFile[] = "test_detail.xml";
+
+// The environment variable name for the test shard index.
+static const char kTestShardIndex[] = "GTEST_SHARD_INDEX";
+// The environment variable name for the total number of test shards.
+static const char kTestTotalShards[] = "GTEST_TOTAL_SHARDS";
+// The environment variable name for the test shard status file.
+static const char kTestShardStatusFile[] = "GTEST_SHARD_STATUS_FILE";
+
+namespace internal {
+
+// The text used in failure messages to indicate the start of the
+// stack trace.
+const char kStackTraceMarker[] = "\nStack trace:\n";
+
+// g_help_flag is true iff the --help flag or an equivalent form is
+// specified on the command line.
+bool g_help_flag = false;
+
+} // namespace internal
+
+GTEST_DEFINE_bool_(
+ also_run_disabled_tests,
+ internal::BoolFromGTestEnv("also_run_disabled_tests", false),
+ "Run disabled tests too, in addition to the tests normally being run.");
+
+GTEST_DEFINE_bool_(
+ break_on_failure,
+ internal::BoolFromGTestEnv("break_on_failure", false),
+ "True iff a failed assertion should be a debugger break-point.");
+
+GTEST_DEFINE_bool_(
+ catch_exceptions,
+ internal::BoolFromGTestEnv("catch_exceptions", false),
+ "True iff " GTEST_NAME_
+ " should catch exceptions and treat them as test failures.");
+
+GTEST_DEFINE_string_(
+ color,
+ internal::StringFromGTestEnv("color", "auto"),
+ "Whether to use colors in the output. Valid values: yes, no, "
+ "and auto. 'auto' means to use colors if the output is "
+ "being sent to a terminal and the TERM environment variable "
+ "is set to xterm, xterm-color, xterm-256color, linux or cygwin.");
+
+GTEST_DEFINE_string_(
+ filter,
+ internal::StringFromGTestEnv("filter", kUniversalFilter),
+ "A colon-separated list of glob (not regex) patterns "
+ "for filtering the tests to run, optionally followed by a "
+ "'-' and a : separated list of negative patterns (tests to "
+ "exclude). A test is run if it matches one of the positive "
+ "patterns and does not match any of the negative patterns.");
+
+GTEST_DEFINE_bool_(list_tests, false,
+ "List all tests without running them.");
+
+GTEST_DEFINE_string_(
+ output,
+ internal::StringFromGTestEnv("output", ""),
+ "A format (currently must be \"xml\"), optionally followed "
+ "by a colon and an output file name or directory. A directory "
+ "is indicated by a trailing pathname separator. "
+ "Examples: \"xml:filename.xml\", \"xml::directoryname/\". "
+ "If a directory is specified, output files will be created "
+ "within that directory, with file-names based on the test "
+ "executable's name and, if necessary, made unique by adding "
+ "digits.");
+
+GTEST_DEFINE_bool_(
+ print_time,
+ internal::BoolFromGTestEnv("print_time", true),
+ "True iff " GTEST_NAME_
+ " should display elapsed time in text output.");
+
+GTEST_DEFINE_int32_(
+ random_seed,
+ internal::Int32FromGTestEnv("random_seed", 0),
+ "Random number seed to use when shuffling test orders. Must be in range "
+ "[1, 99999], or 0 to use a seed based on the current time.");
+
+GTEST_DEFINE_int32_(
+ repeat,
+ internal::Int32FromGTestEnv("repeat", 1),
+ "How many times to repeat each test. Specify a negative number "
+ "for repeating forever. Useful for shaking out flaky tests.");
+
+GTEST_DEFINE_bool_(
+ show_internal_stack_frames, false,
+ "True iff " GTEST_NAME_ " should include internal stack frames when "
+ "printing test failure stack traces.");
+
+GTEST_DEFINE_bool_(
+ shuffle,
+ internal::BoolFromGTestEnv("shuffle", false),
+ "True iff " GTEST_NAME_
+ " should randomize tests' order on every run.");
+
+GTEST_DEFINE_int32_(
+ stack_trace_depth,
+ internal::Int32FromGTestEnv("stack_trace_depth", kMaxStackTraceDepth),
+ "The maximum number of stack frames to print when an "
+ "assertion fails. The valid range is 0 through 100, inclusive.");
+
+GTEST_DEFINE_bool_(
+ throw_on_failure,
+ internal::BoolFromGTestEnv("throw_on_failure", false),
+ "When this flag is specified, a failed assertion will throw an exception "
+ "if exceptions are enabled or exit the program with a non-zero code "
+ "otherwise.");
+
+namespace internal {
+
+// Generates a random number from [0, range), using a Linear
+// Congruential Generator (LCG). Crashes if 'range' is 0 or greater
+// than kMaxRange.
+UInt32 Random::Generate(UInt32 range) {
+ // These constants are the same as are used in glibc's rand(3).
+ state_ = (1103515245U*state_ + 12345U) % kMaxRange;
+
+ GTEST_CHECK_(range > 0)
+ << "Cannot generate a number in the range [0, 0).";
+ GTEST_CHECK_(range <= kMaxRange)
+ << "Generation of a number in [0, " << range << ") was requested, "
+ << "but this can only generate numbers in [0, " << kMaxRange << ").";
+
+ // Converting via modulus introduces a bit of downward bias, but
+ // it's simple, and a linear congruential generator isn't too good
+ // to begin with.
+ return state_ % range;
+}
+
+// GTestIsInitialized() returns true iff the user has initialized
+// Google Test. Useful for catching the user mistake of not initializing
+// Google Test before calling RUN_ALL_TESTS().
+//
+// A user must call testing::InitGoogleTest() to initialize Google
+// Test. g_init_gtest_count is set to the number of times
+// InitGoogleTest() has been called. We don't protect this variable
+// under a mutex as it is only accessed in the main thread.
+int g_init_gtest_count = 0;
+static bool GTestIsInitialized() { return g_init_gtest_count != 0; }
+
+// Iterates over a vector of TestCases, keeping a running sum of the
+// results of calling a given int-returning method on each.
+// Returns the sum.
+static int SumOverTestCaseList(const std::vector<TestCase*>& case_list,
+ int (TestCase::*method)() const) {
+ int sum = 0;
+ for (size_t i = 0; i < case_list.size(); i++) {
+ sum += (case_list[i]->*method)();
+ }
+ return sum;
+}
+
+// Returns true iff the test case passed.
+static bool TestCasePassed(const TestCase* test_case) {
+ return test_case->should_run() && test_case->Passed();
+}
+
+// Returns true iff the test case failed.
+static bool TestCaseFailed(const TestCase* test_case) {
+ return test_case->should_run() && test_case->Failed();
+}
+
+// Returns true iff test_case contains at least one test that should
+// run.
+static bool ShouldRunTestCase(const TestCase* test_case) {
+ return test_case->should_run();
+}
+
+// AssertHelper constructor.
+AssertHelper::AssertHelper(TestPartResult::Type type,
+ const char* file,
+ int line,
+ const char* message)
+ : data_(new AssertHelperData(type, file, line, message)) {
+}
+
+AssertHelper::~AssertHelper() {
+ delete data_;
+}
+
+// Message assignment, for assertion streaming support.
+void AssertHelper::operator=(const Message& message) const {
+ UnitTest::GetInstance()->
+ AddTestPartResult(data_->type, data_->file, data_->line,
+ AppendUserMessage(data_->message, message),
+ UnitTest::GetInstance()->impl()
+ ->CurrentOsStackTraceExceptTop(1)
+ // Skips the stack frame for this function itself.
+ ); // NOLINT
+}
+
+// Mutex for linked pointers.
+GTEST_DEFINE_STATIC_MUTEX_(g_linked_ptr_mutex);
+
+// Application pathname gotten in InitGoogleTest.
+String g_executable_path;
+
+// Returns the current application's name, removing directory path if that
+// is present.
+FilePath GetCurrentExecutableName() {
+ FilePath result;
+
+#if GTEST_OS_WINDOWS
+ result.Set(FilePath(g_executable_path).RemoveExtension("exe"));
+#else
+ result.Set(FilePath(g_executable_path));
+#endif // GTEST_OS_WINDOWS
+
+ return result.RemoveDirectoryName();
+}
+
+// Functions for processing the gtest_output flag.
+
+// Returns the output format, or "" for normal printed output.
+String UnitTestOptions::GetOutputFormat() {
+ const char* const gtest_output_flag = GTEST_FLAG(output).c_str();
+ if (gtest_output_flag == NULL) return String("");
+
+ const char* const colon = strchr(gtest_output_flag, ':');
+ return (colon == NULL) ?
+ String(gtest_output_flag) :
+ String(gtest_output_flag, colon - gtest_output_flag);
+}
+
+// Returns the name of the requested output file, or the default if none
+// was explicitly specified.
+String UnitTestOptions::GetAbsolutePathToOutputFile() {
+ const char* const gtest_output_flag = GTEST_FLAG(output).c_str();
+ if (gtest_output_flag == NULL)
+ return String("");
+
+ const char* const colon = strchr(gtest_output_flag, ':');
+ if (colon == NULL)
+ return String(internal::FilePath::ConcatPaths(
+ internal::FilePath(
+ UnitTest::GetInstance()->original_working_dir()),
+ internal::FilePath(kDefaultOutputFile)).ToString() );
+
+ internal::FilePath output_name(colon + 1);
+ if (!output_name.IsAbsolutePath())
+ // TODO(wan@google.com): on Windows \some\path is not an absolute
+ // path (as its meaning depends on the current drive), yet the
+ // following logic for turning it into an absolute path is wrong.
+ // Fix it.
+ output_name = internal::FilePath::ConcatPaths(
+ internal::FilePath(UnitTest::GetInstance()->original_working_dir()),
+ internal::FilePath(colon + 1));
+
+ if (!output_name.IsDirectory())
+ return output_name.ToString();
+
+ internal::FilePath result(internal::FilePath::GenerateUniqueFileName(
+ output_name, internal::GetCurrentExecutableName(),
+ GetOutputFormat().c_str()));
+ return result.ToString();
+}
+
+// Returns true iff the wildcard pattern matches the string. The
+// first ':' or '\0' character in pattern marks the end of it.
+//
+// This recursive algorithm isn't very efficient, but is clear and
+// works well enough for matching test names, which are short.
+bool UnitTestOptions::PatternMatchesString(const char *pattern,
+ const char *str) {
+ switch (*pattern) {
+ case '\0':
+ case ':': // Either ':' or '\0' marks the end of the pattern.
+ return *str == '\0';
+ case '?': // Matches any single character.
+ return *str != '\0' && PatternMatchesString(pattern + 1, str + 1);
+ case '*': // Matches any string (possibly empty) of characters.
+ return (*str != '\0' && PatternMatchesString(pattern, str + 1)) ||
+ PatternMatchesString(pattern + 1, str);
+ default: // Non-special character. Matches itself.
+ return *pattern == *str &&
+ PatternMatchesString(pattern + 1, str + 1);
+ }
+}
+
+bool UnitTestOptions::MatchesFilter(const String& name, const char* filter) {
+ const char *cur_pattern = filter;
+ for (;;) {
+ if (PatternMatchesString(cur_pattern, name.c_str())) {
+ return true;
+ }
+
+ // Finds the next pattern in the filter.
+ cur_pattern = strchr(cur_pattern, ':');
+
+ // Returns if no more pattern can be found.
+ if (cur_pattern == NULL) {
+ return false;
+ }
+
+ // Skips the pattern separater (the ':' character).
+ cur_pattern++;
+ }
+}
+
+// TODO(keithray): move String function implementations to gtest-string.cc.
+
+// Returns true iff the user-specified filter matches the test case
+// name and the test name.
+bool UnitTestOptions::FilterMatchesTest(const String &test_case_name,
+ const String &test_name) {
+ const String& full_name = String::Format("%s.%s",
+ test_case_name.c_str(),
+ test_name.c_str());
+
+ // Split --gtest_filter at '-', if there is one, to separate into
+ // positive filter and negative filter portions
+ const char* const p = GTEST_FLAG(filter).c_str();
+ const char* const dash = strchr(p, '-');
+ String positive;
+ String negative;
+ if (dash == NULL) {
+ positive = GTEST_FLAG(filter).c_str(); // Whole string is a positive filter
+ negative = String("");
+ } else {
+ positive = String(p, dash - p); // Everything up to the dash
+ negative = String(dash+1); // Everything after the dash
+ if (positive.empty()) {
+ // Treat '-test1' as the same as '*-test1'
+ positive = kUniversalFilter;
+ }
+ }
+
+ // A filter is a colon-separated list of patterns. It matches a
+ // test if any pattern in it matches the test.
+ return (MatchesFilter(full_name, positive.c_str()) &&
+ !MatchesFilter(full_name, negative.c_str()));
+}
+
+#if GTEST_OS_WINDOWS
+// Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the
+// given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise.
+// This function is useful as an __except condition.
+int UnitTestOptions::GTestShouldProcessSEH(DWORD exception_code) {
+ // Google Test should handle an exception if:
+ // 1. the user wants it to, AND
+ // 2. this is not a breakpoint exception.
+ return (GTEST_FLAG(catch_exceptions) &&
+ exception_code != EXCEPTION_BREAKPOINT) ?
+ EXCEPTION_EXECUTE_HANDLER :
+ EXCEPTION_CONTINUE_SEARCH;
+}
+#endif // GTEST_OS_WINDOWS
+
+} // namespace internal
+
+// The c'tor sets this object as the test part result reporter used by
+// Google Test. The 'result' parameter specifies where to report the
+// results. Intercepts only failures from the current thread.
+ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter(
+ TestPartResultArray* result)
+ : intercept_mode_(INTERCEPT_ONLY_CURRENT_THREAD),
+ result_(result) {
+ Init();
+}
+
+// The c'tor sets this object as the test part result reporter used by
+// Google Test. The 'result' parameter specifies where to report the
+// results.
+ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter(
+ InterceptMode intercept_mode, TestPartResultArray* result)
+ : intercept_mode_(intercept_mode),
+ result_(result) {
+ Init();
+}
+
+void ScopedFakeTestPartResultReporter::Init() {
+ internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+ if (intercept_mode_ == INTERCEPT_ALL_THREADS) {
+ old_reporter_ = impl->GetGlobalTestPartResultReporter();
+ impl->SetGlobalTestPartResultReporter(this);
+ } else {
+ old_reporter_ = impl->GetTestPartResultReporterForCurrentThread();
+ impl->SetTestPartResultReporterForCurrentThread(this);
+ }
+}
+
+// The d'tor restores the test part result reporter used by Google Test
+// before.
+ScopedFakeTestPartResultReporter::~ScopedFakeTestPartResultReporter() {
+ internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+ if (intercept_mode_ == INTERCEPT_ALL_THREADS) {
+ impl->SetGlobalTestPartResultReporter(old_reporter_);
+ } else {
+ impl->SetTestPartResultReporterForCurrentThread(old_reporter_);
+ }
+}
+
+// Increments the test part result count and remembers the result.
+// This method is from the TestPartResultReporterInterface interface.
+void ScopedFakeTestPartResultReporter::ReportTestPartResult(
+ const TestPartResult& result) {
+ result_->Append(result);
+}
+
+namespace internal {
+
+// Returns the type ID of ::testing::Test. We should always call this
+// instead of GetTypeId< ::testing::Test>() to get the type ID of
+// testing::Test. This is to work around a suspected linker bug when
+// using Google Test as a framework on Mac OS X. The bug causes
+// GetTypeId< ::testing::Test>() to return different values depending
+// on whether the call is from the Google Test framework itself or
+// from user test code. GetTestTypeId() is guaranteed to always
+// return the same value, as it always calls GetTypeId<>() from the
+// gtest.cc, which is within the Google Test framework.
+TypeId GetTestTypeId() {
+ return GetTypeId<Test>();
+}
+
+// The value of GetTestTypeId() as seen from within the Google Test
+// library. This is solely for testing GetTestTypeId().
+extern const TypeId kTestTypeIdInGoogleTest = GetTestTypeId();
+
+// This predicate-formatter checks that 'results' contains a test part
+// failure of the given type and that the failure message contains the
+// given substring.
+AssertionResult HasOneFailure(const char* /* results_expr */,
+ const char* /* type_expr */,
+ const char* /* substr_expr */,
+ const TestPartResultArray& results,
+ TestPartResult::Type type,
+ const char* substr) {
+ const String expected(type == TestPartResult::kFatalFailure ?
+ "1 fatal failure" :
+ "1 non-fatal failure");
+ Message msg;
+ if (results.size() != 1) {
+ msg << "Expected: " << expected << "\n"
+ << " Actual: " << results.size() << " failures";
+ for (int i = 0; i < results.size(); i++) {
+ msg << "\n" << results.GetTestPartResult(i);
+ }
+ return AssertionFailure(msg);
+ }
+
+ const TestPartResult& r = results.GetTestPartResult(0);
+ if (r.type() != type) {
+ msg << "Expected: " << expected << "\n"
+ << " Actual:\n"
+ << r;
+ return AssertionFailure(msg);
+ }
+
+ if (strstr(r.message(), substr) == NULL) {
+ msg << "Expected: " << expected << " containing \""
+ << substr << "\"\n"
+ << " Actual:\n"
+ << r;
+ return AssertionFailure(msg);
+ }
+
+ return AssertionSuccess();
+}
+
+// The constructor of SingleFailureChecker remembers where to look up
+// test part results, what type of failure we expect, and what
+// substring the failure message should contain.
+SingleFailureChecker:: SingleFailureChecker(
+ const TestPartResultArray* results,
+ TestPartResult::Type type,
+ const char* substr)
+ : results_(results),
+ type_(type),
+ substr_(substr) {}
+
+// The destructor of SingleFailureChecker verifies that the given
+// TestPartResultArray contains exactly one failure that has the given
+// type and contains the given substring. If that's not the case, a
+// non-fatal failure will be generated.
+SingleFailureChecker::~SingleFailureChecker() {
+ EXPECT_PRED_FORMAT3(HasOneFailure, *results_, type_, substr_.c_str());
+}
+
+DefaultGlobalTestPartResultReporter::DefaultGlobalTestPartResultReporter(
+ UnitTestImpl* unit_test) : unit_test_(unit_test) {}
+
+void DefaultGlobalTestPartResultReporter::ReportTestPartResult(
+ const TestPartResult& result) {
+ unit_test_->current_test_result()->AddTestPartResult(result);
+ unit_test_->listeners()->repeater()->OnTestPartResult(result);
+}
+
+DefaultPerThreadTestPartResultReporter::DefaultPerThreadTestPartResultReporter(
+ UnitTestImpl* unit_test) : unit_test_(unit_test) {}
+
+void DefaultPerThreadTestPartResultReporter::ReportTestPartResult(
+ const TestPartResult& result) {
+ unit_test_->GetGlobalTestPartResultReporter()->ReportTestPartResult(result);
+}
+
+// Returns the global test part result reporter.
+TestPartResultReporterInterface*
+UnitTestImpl::GetGlobalTestPartResultReporter() {
+ internal::MutexLock lock(&global_test_part_result_reporter_mutex_);
+ return global_test_part_result_repoter_;
+}
+
+// Sets the global test part result reporter.
+void UnitTestImpl::SetGlobalTestPartResultReporter(
+ TestPartResultReporterInterface* reporter) {
+ internal::MutexLock lock(&global_test_part_result_reporter_mutex_);
+ global_test_part_result_repoter_ = reporter;
+}
+
+// Returns the test part result reporter for the current thread.
+TestPartResultReporterInterface*
+UnitTestImpl::GetTestPartResultReporterForCurrentThread() {
+ return per_thread_test_part_result_reporter_.get();
+}
+
+// Sets the test part result reporter for the current thread.
+void UnitTestImpl::SetTestPartResultReporterForCurrentThread(
+ TestPartResultReporterInterface* reporter) {
+ per_thread_test_part_result_reporter_.set(reporter);
+}
+
+// Gets the number of successful test cases.
+int UnitTestImpl::successful_test_case_count() const {
+ return CountIf(test_cases_, TestCasePassed);
+}
+
+// Gets the number of failed test cases.
+int UnitTestImpl::failed_test_case_count() const {
+ return CountIf(test_cases_, TestCaseFailed);
+}
+
+// Gets the number of all test cases.
+int UnitTestImpl::total_test_case_count() const {
+ return static_cast<int>(test_cases_.size());
+}
+
+// Gets the number of all test cases that contain at least one test
+// that should run.
+int UnitTestImpl::test_case_to_run_count() const {
+ return CountIf(test_cases_, ShouldRunTestCase);
+}
+
+// Gets the number of successful tests.
+int UnitTestImpl::successful_test_count() const {
+ return SumOverTestCaseList(test_cases_, &TestCase::successful_test_count);
+}
+
+// Gets the number of failed tests.
+int UnitTestImpl::failed_test_count() const {
+ return SumOverTestCaseList(test_cases_, &TestCase::failed_test_count);
+}
+
+// Gets the number of disabled tests.
+int UnitTestImpl::disabled_test_count() const {
+ return SumOverTestCaseList(test_cases_, &TestCase::disabled_test_count);
+}
+
+// Gets the number of all tests.
+int UnitTestImpl::total_test_count() const {
+ return SumOverTestCaseList(test_cases_, &TestCase::total_test_count);
+}
+
+// Gets the number of tests that should run.
+int UnitTestImpl::test_to_run_count() const {
+ return SumOverTestCaseList(test_cases_, &TestCase::test_to_run_count);
+}
+
+// Returns the current OS stack trace as a String.
+//
+// The maximum number of stack frames to be included is specified by
+// the gtest_stack_trace_depth flag. The skip_count parameter
+// specifies the number of top frames to be skipped, which doesn't
+// count against the number of frames to be included.
+//
+// For example, if Foo() calls Bar(), which in turn calls
+// CurrentOsStackTraceExceptTop(1), Foo() will be included in the
+// trace but Bar() and CurrentOsStackTraceExceptTop() won't.
+String UnitTestImpl::CurrentOsStackTraceExceptTop(int skip_count) {
+ (void)skip_count;
+ return String("");
+}
+
+// Returns the current time in milliseconds.
+TimeInMillis GetTimeInMillis() {
+#if GTEST_OS_WINDOWS_MOBILE || defined(__BORLANDC__)
+ // Difference between 1970-01-01 and 1601-01-01 in milliseconds.
+ // http://analogous.blogspot.com/2005/04/epoch.html
+ const TimeInMillis kJavaEpochToWinFileTimeDelta =
+ static_cast<TimeInMillis>(116444736UL) * 100000UL;
+ const DWORD kTenthMicrosInMilliSecond = 10000;
+
+ SYSTEMTIME now_systime;
+ FILETIME now_filetime;
+ ULARGE_INTEGER now_int64;
+ // TODO(kenton@google.com): Shouldn't this just use
+ // GetSystemTimeAsFileTime()?
+ GetSystemTime(&now_systime);
+ if (SystemTimeToFileTime(&now_systime, &now_filetime)) {
+ now_int64.LowPart = now_filetime.dwLowDateTime;
+ now_int64.HighPart = now_filetime.dwHighDateTime;
+ now_int64.QuadPart = (now_int64.QuadPart / kTenthMicrosInMilliSecond) -
+ kJavaEpochToWinFileTimeDelta;
+ return now_int64.QuadPart;
+ }
+ return 0;
+#elif GTEST_OS_WINDOWS && !GTEST_HAS_GETTIMEOFDAY_
+ __timeb64 now;
+#ifdef _MSC_VER
+ // MSVC 8 deprecates _ftime64(), so we want to suppress warning 4996
+ // (deprecated function) there.
+ // TODO(kenton@google.com): Use GetTickCount()? Or use
+ // SystemTimeToFileTime()
+#pragma warning(push) // Saves the current warning state.
+#pragma warning(disable:4996) // Temporarily disables warning 4996.
+ _ftime64(&now);
+#pragma warning(pop) // Restores the warning state.
+#else
+ _ftime64(&now);
+#endif // _MSC_VER
+ return static_cast<TimeInMillis>(now.time) * 1000 + now.millitm;
+#elif GTEST_HAS_GETTIMEOFDAY_
+ struct timeval now;
+ gettimeofday(&now, NULL);
+ return static_cast<TimeInMillis>(now.tv_sec) * 1000 + now.tv_usec / 1000;
+#else
+#error "Don't know how to get the current time on your system."
+#endif
+}
+
+// Utilities
+
+// class String
+
+// Returns the input enclosed in double quotes if it's not NULL;
+// otherwise returns "(null)". For example, "\"Hello\"" is returned
+// for input "Hello".
+//
+// This is useful for printing a C string in the syntax of a literal.
+//
+// Known issue: escape sequences are not handled yet.
+String String::ShowCStringQuoted(const char* c_str) {
+ return c_str ? String::Format("\"%s\"", c_str) : String("(null)");
+}
+
+// Copies at most length characters from str into a newly-allocated
+// piece of memory of size length+1. The memory is allocated with new[].
+// A terminating null byte is written to the memory, and a pointer to it
+// is returned. If str is NULL, NULL is returned.
+static char* CloneString(const char* str, size_t length) {
+ if (str == NULL) {
+ return NULL;
+ } else {
+ char* const clone = new char[length + 1];
+ posix::StrNCpy(clone, str, length);
+ clone[length] = '\0';
+ return clone;
+ }
+}
+
+// Clones a 0-terminated C string, allocating memory using new. The
+// caller is responsible for deleting[] the return value. Returns the
+// cloned string, or NULL if the input is NULL.
+const char * String::CloneCString(const char* c_str) {
+ return (c_str == NULL) ?
+ NULL : CloneString(c_str, strlen(c_str));
+}
+
+#if GTEST_OS_WINDOWS_MOBILE
+// Creates a UTF-16 wide string from the given ANSI string, allocating
+// memory using new. The caller is responsible for deleting the return
+// value using delete[]. Returns the wide string, or NULL if the
+// input is NULL.
+LPCWSTR String::AnsiToUtf16(const char* ansi) {
+ if (!ansi) return NULL;
+ const int length = strlen(ansi);
+ const int unicode_length =
+ MultiByteToWideChar(CP_ACP, 0, ansi, length,
+ NULL, 0);
+ WCHAR* unicode = new WCHAR[unicode_length + 1];
+ MultiByteToWideChar(CP_ACP, 0, ansi, length,
+ unicode, unicode_length);
+ unicode[unicode_length] = 0;
+ return unicode;
+}
+
+// Creates an ANSI string from the given wide string, allocating
+// memory using new. The caller is responsible for deleting the return
+// value using delete[]. Returns the ANSI string, or NULL if the
+// input is NULL.
+const char* String::Utf16ToAnsi(LPCWSTR utf16_str) {
+ if (!utf16_str) return NULL;
+ const int ansi_length =
+ WideCharToMultiByte(CP_ACP, 0, utf16_str, -1,
+ NULL, 0, NULL, NULL);
+ char* ansi = new char[ansi_length + 1];
+ WideCharToMultiByte(CP_ACP, 0, utf16_str, -1,
+ ansi, ansi_length, NULL, NULL);
+ ansi[ansi_length] = 0;
+ return ansi;
+}
+
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+// Compares two C strings. Returns true iff they have the same content.
+//
+// Unlike strcmp(), this function can handle NULL argument(s). A NULL
+// C string is considered different to any non-NULL C string,
+// including the empty string.
+bool String::CStringEquals(const char * lhs, const char * rhs) {
+ if ( lhs == NULL ) return rhs == NULL;
+
+ if ( rhs == NULL ) return false;
+
+ return strcmp(lhs, rhs) == 0;
+}
+
+#if GTEST_HAS_STD_WSTRING || GTEST_HAS_GLOBAL_WSTRING
+
+// Converts an array of wide chars to a narrow string using the UTF-8
+// encoding, and streams the result to the given Message object.
+static void StreamWideCharsToMessage(const wchar_t* wstr, size_t length,
+ Message* msg) {
+ // TODO(wan): consider allowing a testing::String object to
+ // contain '\0'. This will make it behave more like std::string,
+ // and will allow ToUtf8String() to return the correct encoding
+ // for '\0' s.t. we can get rid of the conditional here (and in
+ // several other places).
+ for (size_t i = 0; i != length; ) { // NOLINT
+ if (wstr[i] != L'\0') {
+ *msg << WideStringToUtf8(wstr + i, static_cast<int>(length - i));
+ while (i != length && wstr[i] != L'\0')
+ i++;
+ } else {
+ *msg << '\0';
+ i++;
+ }
+ }
+}
+
+#endif // GTEST_HAS_STD_WSTRING || GTEST_HAS_GLOBAL_WSTRING
+
+} // namespace internal
+
+#if GTEST_HAS_STD_WSTRING
+// Converts the given wide string to a narrow string using the UTF-8
+// encoding, and streams the result to this Message object.
+Message& Message::operator <<(const ::std::wstring& wstr) {
+ internal::StreamWideCharsToMessage(wstr.c_str(), wstr.length(), this);
+ return *this;
+}
+#endif // GTEST_HAS_STD_WSTRING
+
+#if GTEST_HAS_GLOBAL_WSTRING
+// Converts the given wide string to a narrow string using the UTF-8
+// encoding, and streams the result to this Message object.
+Message& Message::operator <<(const ::wstring& wstr) {
+ internal::StreamWideCharsToMessage(wstr.c_str(), wstr.length(), this);
+ return *this;
+}
+#endif // GTEST_HAS_GLOBAL_WSTRING
+
+namespace internal {
+
+// Formats a value to be used in a failure message.
+
+// For a char value, we print it as a C++ char literal and as an
+// unsigned integer (both in decimal and in hexadecimal).
+String FormatForFailureMessage(char ch) {
+ const unsigned int ch_as_uint = ch;
+ // A String object cannot contain '\0', so we print "\\0" when ch is
+ // '\0'.
+ return String::Format("'%s' (%u, 0x%X)",
+ ch ? String::Format("%c", ch).c_str() : "\\0",
+ ch_as_uint, ch_as_uint);
+}
+
+// For a wchar_t value, we print it as a C++ wchar_t literal and as an
+// unsigned integer (both in decimal and in hexidecimal).
+String FormatForFailureMessage(wchar_t wchar) {
+ // The C++ standard doesn't specify the exact size of the wchar_t
+ // type. It just says that it shall have the same size as another
+ // integral type, called its underlying type.
+ //
+ // Therefore, in order to print a wchar_t value in the numeric form,
+ // we first convert it to the largest integral type (UInt64) and
+ // then print the converted value.
+ //
+ // We use streaming to print the value as "%llu" doesn't work
+ // correctly with MSVC 7.1.
+ const UInt64 wchar_as_uint64 = wchar;
+ Message msg;
+ // A String object cannot contain '\0', so we print "\\0" when wchar is
+ // L'\0'.
+ char buffer[32]; // CodePointToUtf8 requires a buffer that big.
+ msg << "L'"
+ << (wchar ? CodePointToUtf8(static_cast<UInt32>(wchar), buffer) : "\\0")
+ << "' (" << wchar_as_uint64 << ", 0x" << ::std::setbase(16)
+ << wchar_as_uint64 << ")";
+ return msg.GetString();
+}
+
+} // namespace internal
+
+// AssertionResult constructors.
+// Used in EXPECT_TRUE/FALSE(assertion_result).
+AssertionResult::AssertionResult(const AssertionResult& other)
+ : success_(other.success_),
+ message_(other.message_.get() != NULL ?
+ new internal::String(*other.message_) :
+ static_cast<internal::String*>(NULL)) {
+}
+
+// Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE.
+AssertionResult AssertionResult::operator!() const {
+ AssertionResult negation(!success_);
+ if (message_.get() != NULL)
+ negation << *message_;
+ return negation;
+}
+
+// Makes a successful assertion result.
+AssertionResult AssertionSuccess() {
+ return AssertionResult(true);
+}
+
+// Makes a failed assertion result.
+AssertionResult AssertionFailure() {
+ return AssertionResult(false);
+}
+
+// Makes a failed assertion result with the given failure message.
+// Deprecated; use AssertionFailure() << message.
+AssertionResult AssertionFailure(const Message& message) {
+ return AssertionFailure() << message;
+}
+
+namespace internal {
+
+// Constructs and returns the message for an equality assertion
+// (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure.
+//
+// The first four parameters are the expressions used in the assertion
+// and their values, as strings. For example, for ASSERT_EQ(foo, bar)
+// where foo is 5 and bar is 6, we have:
+//
+// expected_expression: "foo"
+// actual_expression: "bar"
+// expected_value: "5"
+// actual_value: "6"
+//
+// The ignoring_case parameter is true iff the assertion is a
+// *_STRCASEEQ*. When it's true, the string " (ignoring case)" will
+// be inserted into the message.
+AssertionResult EqFailure(const char* expected_expression,
+ const char* actual_expression,
+ const String& expected_value,
+ const String& actual_value,
+ bool ignoring_case) {
+ Message msg;
+ msg << "Value of: " << actual_expression;
+ if (actual_value != actual_expression) {
+ msg << "\n Actual: " << actual_value;
+ }
+
+ msg << "\nExpected: " << expected_expression;
+ if (ignoring_case) {
+ msg << " (ignoring case)";
+ }
+ if (expected_value != expected_expression) {
+ msg << "\nWhich is: " << expected_value;
+ }
+
+ return AssertionFailure(msg);
+}
+
+// Constructs a failure message for Boolean assertions such as EXPECT_TRUE.
+String GetBoolAssertionFailureMessage(const AssertionResult& assertion_result,
+ const char* expression_text,
+ const char* actual_predicate_value,
+ const char* expected_predicate_value) {
+ const char* actual_message = assertion_result.message();
+ Message msg;
+ msg << "Value of: " << expression_text
+ << "\n Actual: " << actual_predicate_value;
+ if (actual_message[0] != '\0')
+ msg << " (" << actual_message << ")";
+ msg << "\nExpected: " << expected_predicate_value;
+ return msg.GetString();
+}
+
+// Helper function for implementing ASSERT_NEAR.
+AssertionResult DoubleNearPredFormat(const char* expr1,
+ const char* expr2,
+ const char* abs_error_expr,
+ double val1,
+ double val2,
+ double abs_error) {
+ const double diff = fabs(val1 - val2);
+ if (diff <= abs_error) return AssertionSuccess();
+
+ // TODO(wan): do not print the value of an expression if it's
+ // already a literal.
+ Message msg;
+ msg << "The difference between " << expr1 << " and " << expr2
+ << " is " << diff << ", which exceeds " << abs_error_expr << ", where\n"
+ << expr1 << " evaluates to " << val1 << ",\n"
+ << expr2 << " evaluates to " << val2 << ", and\n"
+ << abs_error_expr << " evaluates to " << abs_error << ".";
+ return AssertionFailure(msg);
+}
+
+
+// Helper template for implementing FloatLE() and DoubleLE().
+template <typename RawType>
+AssertionResult FloatingPointLE(const char* expr1,
+ const char* expr2,
+ RawType val1,
+ RawType val2) {
+ // Returns success if val1 is less than val2,
+ if (val1 < val2) {
+ return AssertionSuccess();
+ }
+
+ // or if val1 is almost equal to val2.
+ const FloatingPoint<RawType> lhs(val1), rhs(val2);
+ if (lhs.AlmostEquals(rhs)) {
+ return AssertionSuccess();
+ }
+
+ // Note that the above two checks will both fail if either val1 or
+ // val2 is NaN, as the IEEE floating-point standard requires that
+ // any predicate involving a NaN must return false.
+
+ StrStream val1_ss;
+ val1_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)
+ << val1;
+
+ StrStream val2_ss;
+ val2_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)
+ << val2;
+
+ Message msg;
+ msg << "Expected: (" << expr1 << ") <= (" << expr2 << ")\n"
+ << " Actual: " << StrStreamToString(&val1_ss) << " vs "
+ << StrStreamToString(&val2_ss);
+
+ return AssertionFailure(msg);
+}
+
+} // namespace internal
+
+// Asserts that val1 is less than, or almost equal to, val2. Fails
+// otherwise. In particular, it fails if either val1 or val2 is NaN.
+AssertionResult FloatLE(const char* expr1, const char* expr2,
+ float val1, float val2) {
+ return internal::FloatingPointLE<float>(expr1, expr2, val1, val2);
+}
+
+// Asserts that val1 is less than, or almost equal to, val2. Fails
+// otherwise. In particular, it fails if either val1 or val2 is NaN.
+AssertionResult DoubleLE(const char* expr1, const char* expr2,
+ double val1, double val2) {
+ return internal::FloatingPointLE<double>(expr1, expr2, val1, val2);
+}
+
+namespace internal {
+
+// The helper function for {ASSERT|EXPECT}_EQ with int or enum
+// arguments.
+AssertionResult CmpHelperEQ(const char* expected_expression,
+ const char* actual_expression,
+ BiggestInt expected,
+ BiggestInt actual) {
+ if (expected == actual) {
+ return AssertionSuccess();
+ }
+
+ return EqFailure(expected_expression,
+ actual_expression,
+ FormatForComparisonFailureMessage(expected, actual),
+ FormatForComparisonFailureMessage(actual, expected),
+ false);
+}
+
+// A macro for implementing the helper functions needed to implement
+// ASSERT_?? and EXPECT_?? with integer or enum arguments. It is here
+// just to avoid copy-and-paste of similar code.
+#define GTEST_IMPL_CMP_HELPER_(op_name, op)\
+AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \
+ BiggestInt val1, BiggestInt val2) {\
+ if (val1 op val2) {\
+ return AssertionSuccess();\
+ } else {\
+ Message msg;\
+ msg << "Expected: (" << expr1 << ") " #op " (" << expr2\
+ << "), actual: " << FormatForComparisonFailureMessage(val1, val2)\
+ << " vs " << FormatForComparisonFailureMessage(val2, val1);\
+ return AssertionFailure(msg);\
+ }\
+}
+
+// Implements the helper function for {ASSERT|EXPECT}_NE with int or
+// enum arguments.
+GTEST_IMPL_CMP_HELPER_(NE, !=)
+// Implements the helper function for {ASSERT|EXPECT}_LE with int or
+// enum arguments.
+GTEST_IMPL_CMP_HELPER_(LE, <=)
+// Implements the helper function for {ASSERT|EXPECT}_LT with int or
+// enum arguments.
+GTEST_IMPL_CMP_HELPER_(LT, < )
+// Implements the helper function for {ASSERT|EXPECT}_GE with int or
+// enum arguments.
+GTEST_IMPL_CMP_HELPER_(GE, >=)
+// Implements the helper function for {ASSERT|EXPECT}_GT with int or
+// enum arguments.
+GTEST_IMPL_CMP_HELPER_(GT, > )
+
+#undef GTEST_IMPL_CMP_HELPER_
+
+// The helper function for {ASSERT|EXPECT}_STREQ.
+AssertionResult CmpHelperSTREQ(const char* expected_expression,
+ const char* actual_expression,
+ const char* expected,
+ const char* actual) {
+ if (String::CStringEquals(expected, actual)) {
+ return AssertionSuccess();
+ }
+
+ return EqFailure(expected_expression,
+ actual_expression,
+ String::ShowCStringQuoted(expected),
+ String::ShowCStringQuoted(actual),
+ false);
+}
+
+// The helper function for {ASSERT|EXPECT}_STRCASEEQ.
+AssertionResult CmpHelperSTRCASEEQ(const char* expected_expression,
+ const char* actual_expression,
+ const char* expected,
+ const char* actual) {
+ if (String::CaseInsensitiveCStringEquals(expected, actual)) {
+ return AssertionSuccess();
+ }
+
+ return EqFailure(expected_expression,
+ actual_expression,
+ String::ShowCStringQuoted(expected),
+ String::ShowCStringQuoted(actual),
+ true);
+}
+
+// The helper function for {ASSERT|EXPECT}_STRNE.
+AssertionResult CmpHelperSTRNE(const char* s1_expression,
+ const char* s2_expression,
+ const char* s1,
+ const char* s2) {
+ if (!String::CStringEquals(s1, s2)) {
+ return AssertionSuccess();
+ } else {
+ Message msg;
+ msg << "Expected: (" << s1_expression << ") != ("
+ << s2_expression << "), actual: \""
+ << s1 << "\" vs \"" << s2 << "\"";
+ return AssertionFailure(msg);
+ }
+}
+
+// The helper function for {ASSERT|EXPECT}_STRCASENE.
+AssertionResult CmpHelperSTRCASENE(const char* s1_expression,
+ const char* s2_expression,
+ const char* s1,
+ const char* s2) {
+ if (!String::CaseInsensitiveCStringEquals(s1, s2)) {
+ return AssertionSuccess();
+ } else {
+ Message msg;
+ msg << "Expected: (" << s1_expression << ") != ("
+ << s2_expression << ") (ignoring case), actual: \""
+ << s1 << "\" vs \"" << s2 << "\"";
+ return AssertionFailure(msg);
+ }
+}
+
+} // namespace internal
+
+namespace {
+
+// Helper functions for implementing IsSubString() and IsNotSubstring().
+
+// This group of overloaded functions return true iff needle is a
+// substring of haystack. NULL is considered a substring of itself
+// only.
+
+bool IsSubstringPred(const char* needle, const char* haystack) {
+ if (needle == NULL || haystack == NULL)
+ return needle == haystack;
+
+ return strstr(haystack, needle) != NULL;
+}
+
+bool IsSubstringPred(const wchar_t* needle, const wchar_t* haystack) {
+ if (needle == NULL || haystack == NULL)
+ return needle == haystack;
+
+ return wcsstr(haystack, needle) != NULL;
+}
+
+// StringType here can be either ::std::string or ::std::wstring.
+template <typename StringType>
+bool IsSubstringPred(const StringType& needle,
+ const StringType& haystack) {
+ return haystack.find(needle) != StringType::npos;
+}
+
+// This function implements either IsSubstring() or IsNotSubstring(),
+// depending on the value of the expected_to_be_substring parameter.
+// StringType here can be const char*, const wchar_t*, ::std::string,
+// or ::std::wstring.
+template <typename StringType>
+AssertionResult IsSubstringImpl(
+ bool expected_to_be_substring,
+ const char* needle_expr, const char* haystack_expr,
+ const StringType& needle, const StringType& haystack) {
+ if (IsSubstringPred(needle, haystack) == expected_to_be_substring)
+ return AssertionSuccess();
+
+ const bool is_wide_string = sizeof(needle[0]) > 1;
+ const char* const begin_string_quote = is_wide_string ? "L\"" : "\"";
+ return AssertionFailure(
+ Message()
+ << "Value of: " << needle_expr << "\n"
+ << " Actual: " << begin_string_quote << needle << "\"\n"
+ << "Expected: " << (expected_to_be_substring ? "" : "not ")
+ << "a substring of " << haystack_expr << "\n"
+ << "Which is: " << begin_string_quote << haystack << "\"");
+}
+
+} // namespace
+
+// IsSubstring() and IsNotSubstring() check whether needle is a
+// substring of haystack (NULL is considered a substring of itself
+// only), and return an appropriate error message when they fail.
+
+AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const char* needle, const char* haystack) {
+ return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);
+}
+
+AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const wchar_t* needle, const wchar_t* haystack) {
+ return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);
+}
+
+AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const char* needle, const char* haystack) {
+ return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);
+}
+
+AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const wchar_t* needle, const wchar_t* haystack) {
+ return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);
+}
+
+AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::string& needle, const ::std::string& haystack) {
+ return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);
+}
+
+AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::string& needle, const ::std::string& haystack) {
+ return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);
+}
+
+#if GTEST_HAS_STD_WSTRING
+AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::wstring& needle, const ::std::wstring& haystack) {
+ return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);
+}
+
+AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::wstring& needle, const ::std::wstring& haystack) {
+ return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);
+}
+#endif // GTEST_HAS_STD_WSTRING
+
+namespace internal {
+
+#if GTEST_OS_WINDOWS
+
+namespace {
+
+// Helper function for IsHRESULT{SuccessFailure} predicates
+AssertionResult HRESULTFailureHelper(const char* expr,
+ const char* expected,
+ long hr) { // NOLINT
+#if GTEST_OS_WINDOWS_MOBILE
+ // Windows CE doesn't support FormatMessage.
+ const char error_text[] = "";
+#else
+ // Looks up the human-readable system message for the HRESULT code
+ // and since we're not passing any params to FormatMessage, we don't
+ // want inserts expanded.
+ const DWORD kFlags = FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS;
+ const DWORD kBufSize = 4096; // String::Format can't exceed this length.
+ // Gets the system's human readable message string for this HRESULT.
+ char error_text[kBufSize] = { '\0' };
+ DWORD message_length = ::FormatMessageA(kFlags,
+ 0, // no source, we're asking system
+ hr, // the error
+ 0, // no line width restrictions
+ error_text, // output buffer
+ kBufSize, // buf size
+ NULL); // no arguments for inserts
+ // Trims tailing white space (FormatMessage leaves a trailing cr-lf)
+ for (; message_length && isspace(error_text[message_length - 1]);
+ --message_length) {
+ error_text[message_length - 1] = '\0';
+ }
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+ const String error_hex(String::Format("0x%08X ", hr));
+ Message msg;
+ msg << "Expected: " << expr << " " << expected << ".\n"
+ << " Actual: " << error_hex << error_text << "\n";
+
+ return ::testing::AssertionFailure(msg);
+}
+
+} // namespace
+
+AssertionResult IsHRESULTSuccess(const char* expr, long hr) { // NOLINT
+ if (SUCCEEDED(hr)) {
+ return AssertionSuccess();
+ }
+ return HRESULTFailureHelper(expr, "succeeds", hr);
+}
+
+AssertionResult IsHRESULTFailure(const char* expr, long hr) { // NOLINT
+ if (FAILED(hr)) {
+ return AssertionSuccess();
+ }
+ return HRESULTFailureHelper(expr, "fails", hr);
+}
+
+#endif // GTEST_OS_WINDOWS
+
+// Utility functions for encoding Unicode text (wide strings) in
+// UTF-8.
+
+// A Unicode code-point can have upto 21 bits, and is encoded in UTF-8
+// like this:
+//
+// Code-point length Encoding
+// 0 - 7 bits 0xxxxxxx
+// 8 - 11 bits 110xxxxx 10xxxxxx
+// 12 - 16 bits 1110xxxx 10xxxxxx 10xxxxxx
+// 17 - 21 bits 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+
+// The maximum code-point a one-byte UTF-8 sequence can represent.
+const UInt32 kMaxCodePoint1 = (static_cast<UInt32>(1) << 7) - 1;
+
+// The maximum code-point a two-byte UTF-8 sequence can represent.
+const UInt32 kMaxCodePoint2 = (static_cast<UInt32>(1) << (5 + 6)) - 1;
+
+// The maximum code-point a three-byte UTF-8 sequence can represent.
+const UInt32 kMaxCodePoint3 = (static_cast<UInt32>(1) << (4 + 2*6)) - 1;
+
+// The maximum code-point a four-byte UTF-8 sequence can represent.
+const UInt32 kMaxCodePoint4 = (static_cast<UInt32>(1) << (3 + 3*6)) - 1;
+
+// Chops off the n lowest bits from a bit pattern. Returns the n
+// lowest bits. As a side effect, the original bit pattern will be
+// shifted to the right by n bits.
+inline UInt32 ChopLowBits(UInt32* bits, int n) {
+ const UInt32 low_bits = *bits & ((static_cast<UInt32>(1) << n) - 1);
+ *bits >>= n;
+ return low_bits;
+}
+
+// Converts a Unicode code point to a narrow string in UTF-8 encoding.
+// code_point parameter is of type UInt32 because wchar_t may not be
+// wide enough to contain a code point.
+// The output buffer str must containt at least 32 characters.
+// The function returns the address of the output buffer.
+// If the code_point is not a valid Unicode code point
+// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be output
+// as '(Invalid Unicode 0xXXXXXXXX)'.
+char* CodePointToUtf8(UInt32 code_point, char* str) {
+ if (code_point <= kMaxCodePoint1) {
+ str[1] = '\0';
+ str[0] = static_cast<char>(code_point); // 0xxxxxxx
+ } else if (code_point <= kMaxCodePoint2) {
+ str[2] = '\0';
+ str[1] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx
+ str[0] = static_cast<char>(0xC0 | code_point); // 110xxxxx
+ } else if (code_point <= kMaxCodePoint3) {
+ str[3] = '\0';
+ str[2] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx
+ str[1] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx
+ str[0] = static_cast<char>(0xE0 | code_point); // 1110xxxx
+ } else if (code_point <= kMaxCodePoint4) {
+ str[4] = '\0';
+ str[3] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx
+ str[2] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx
+ str[1] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx
+ str[0] = static_cast<char>(0xF0 | code_point); // 11110xxx
+ } else {
+ // The longest string String::Format can produce when invoked
+ // with these parameters is 28 character long (not including
+ // the terminating nul character). We are asking for 32 character
+ // buffer just in case. This is also enough for strncpy to
+ // null-terminate the destination string.
+ posix::StrNCpy(
+ str, String::Format("(Invalid Unicode 0x%X)", code_point).c_str(), 32);
+ str[31] = '\0'; // Makes sure no change in the format to strncpy leaves
+ // the result unterminated.
+ }
+ return str;
+}
+
+// The following two functions only make sense if the the system
+// uses UTF-16 for wide string encoding. All supported systems
+// with 16 bit wchar_t (Windows, Cygwin, Symbian OS) do use UTF-16.
+
+// Determines if the arguments constitute UTF-16 surrogate pair
+// and thus should be combined into a single Unicode code point
+// using CreateCodePointFromUtf16SurrogatePair.
+inline bool IsUtf16SurrogatePair(wchar_t first, wchar_t second) {
+ return sizeof(wchar_t) == 2 &&
+ (first & 0xFC00) == 0xD800 && (second & 0xFC00) == 0xDC00;
+}
+
+// Creates a Unicode code point from UTF16 surrogate pair.
+inline UInt32 CreateCodePointFromUtf16SurrogatePair(wchar_t first,
+ wchar_t second) {
+ const UInt32 mask = (1 << 10) - 1;
+ return (sizeof(wchar_t) == 2) ?
+ (((first & mask) << 10) | (second & mask)) + 0x10000 :
+ // This function should not be called when the condition is
+ // false, but we provide a sensible default in case it is.
+ static_cast<UInt32>(first);
+}
+
+// Converts a wide string to a narrow string in UTF-8 encoding.
+// The wide string is assumed to have the following encoding:
+// UTF-16 if sizeof(wchar_t) == 2 (on Windows, Cygwin, Symbian OS)
+// UTF-32 if sizeof(wchar_t) == 4 (on Linux)
+// Parameter str points to a null-terminated wide string.
+// Parameter num_chars may additionally limit the number
+// of wchar_t characters processed. -1 is used when the entire string
+// should be processed.
+// If the string contains code points that are not valid Unicode code points
+// (i.e. outside of Unicode range U+0 to U+10FFFF) they will be output
+// as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding
+// and contains invalid UTF-16 surrogate pairs, values in those pairs
+// will be encoded as individual Unicode characters from Basic Normal Plane.
+String WideStringToUtf8(const wchar_t* str, int num_chars) {
+ if (num_chars == -1)
+ num_chars = static_cast<int>(wcslen(str));
+
+ StrStream stream;
+ for (int i = 0; i < num_chars; ++i) {
+ UInt32 unicode_code_point;
+
+ if (str[i] == L'\0') {
+ break;
+ } else if (i + 1 < num_chars && IsUtf16SurrogatePair(str[i], str[i + 1])) {
+ unicode_code_point = CreateCodePointFromUtf16SurrogatePair(str[i],
+ str[i + 1]);
+ i++;
+ } else {
+ unicode_code_point = static_cast<UInt32>(str[i]);
+ }
+
+ char buffer[32]; // CodePointToUtf8 requires a buffer this big.
+ stream << CodePointToUtf8(unicode_code_point, buffer);
+ }
+ return StrStreamToString(&stream);
+}
+
+// Converts a wide C string to a String using the UTF-8 encoding.
+// NULL will be converted to "(null)".
+String String::ShowWideCString(const wchar_t * wide_c_str) {
+ if (wide_c_str == NULL) return String("(null)");
+
+ return String(internal::WideStringToUtf8(wide_c_str, -1).c_str());
+}
+
+// Similar to ShowWideCString(), except that this function encloses
+// the converted string in double quotes.
+String String::ShowWideCStringQuoted(const wchar_t* wide_c_str) {
+ if (wide_c_str == NULL) return String("(null)");
+
+ return String::Format("L\"%s\"",
+ String::ShowWideCString(wide_c_str).c_str());
+}
+
+// Compares two wide C strings. Returns true iff they have the same
+// content.
+//
+// Unlike wcscmp(), this function can handle NULL argument(s). A NULL
+// C string is considered different to any non-NULL C string,
+// including the empty string.
+bool String::WideCStringEquals(const wchar_t * lhs, const wchar_t * rhs) {
+ if (lhs == NULL) return rhs == NULL;
+
+ if (rhs == NULL) return false;
+
+ return wcscmp(lhs, rhs) == 0;
+}
+
+// Helper function for *_STREQ on wide strings.
+AssertionResult CmpHelperSTREQ(const char* expected_expression,
+ const char* actual_expression,
+ const wchar_t* expected,
+ const wchar_t* actual) {
+ if (String::WideCStringEquals(expected, actual)) {
+ return AssertionSuccess();
+ }
+
+ return EqFailure(expected_expression,
+ actual_expression,
+ String::ShowWideCStringQuoted(expected),
+ String::ShowWideCStringQuoted(actual),
+ false);
+}
+
+// Helper function for *_STRNE on wide strings.
+AssertionResult CmpHelperSTRNE(const char* s1_expression,
+ const char* s2_expression,
+ const wchar_t* s1,
+ const wchar_t* s2) {
+ if (!String::WideCStringEquals(s1, s2)) {
+ return AssertionSuccess();
+ }
+
+ Message msg;
+ msg << "Expected: (" << s1_expression << ") != ("
+ << s2_expression << "), actual: "
+ << String::ShowWideCStringQuoted(s1)
+ << " vs " << String::ShowWideCStringQuoted(s2);
+ return AssertionFailure(msg);
+}
+
+// Compares two C strings, ignoring case. Returns true iff they have
+// the same content.
+//
+// Unlike strcasecmp(), this function can handle NULL argument(s). A
+// NULL C string is considered different to any non-NULL C string,
+// including the empty string.
+bool String::CaseInsensitiveCStringEquals(const char * lhs, const char * rhs) {
+ if (lhs == NULL)
+ return rhs == NULL;
+ if (rhs == NULL)
+ return false;
+ return posix::StrCaseCmp(lhs, rhs) == 0;
+}
+
+ // Compares two wide C strings, ignoring case. Returns true iff they
+ // have the same content.
+ //
+ // Unlike wcscasecmp(), this function can handle NULL argument(s).
+ // A NULL C string is considered different to any non-NULL wide C string,
+ // including the empty string.
+ // NB: The implementations on different platforms slightly differ.
+ // On windows, this method uses _wcsicmp which compares according to LC_CTYPE
+ // environment variable. On GNU platform this method uses wcscasecmp
+ // which compares according to LC_CTYPE category of the current locale.
+ // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the
+ // current locale.
+bool String::CaseInsensitiveWideCStringEquals(const wchar_t* lhs,
+ const wchar_t* rhs) {
+ if ( lhs == NULL ) return rhs == NULL;
+
+ if ( rhs == NULL ) return false;
+
+#if GTEST_OS_WINDOWS
+ return _wcsicmp(lhs, rhs) == 0;
+#elif GTEST_OS_LINUX
+ return wcscasecmp(lhs, rhs) == 0;
+#else
+ // Mac OS X and Cygwin don't define wcscasecmp. Other unknown OSes
+ // may not define it either.
+ wint_t left, right;
+ do {
+ left = towlower(*lhs++);
+ right = towlower(*rhs++);
+ } while (left && left == right);
+ return left == right;
+#endif // OS selector
+}
+
+// Compares this with another String.
+// Returns < 0 if this is less than rhs, 0 if this is equal to rhs, or > 0
+// if this is greater than rhs.
+int String::Compare(const String & rhs) const {
+ const char* const lhs_c_str = c_str();
+ const char* const rhs_c_str = rhs.c_str();
+
+ if (lhs_c_str == NULL) {
+ return rhs_c_str == NULL ? 0 : -1; // NULL < anything except NULL
+ } else if (rhs_c_str == NULL) {
+ return 1;
+ }
+
+ const size_t shorter_str_len =
+ length() <= rhs.length() ? length() : rhs.length();
+ for (size_t i = 0; i != shorter_str_len; i++) {
+ if (lhs_c_str[i] < rhs_c_str[i]) {
+ return -1;
+ } else if (lhs_c_str[i] > rhs_c_str[i]) {
+ return 1;
+ }
+ }
+ return (length() < rhs.length()) ? -1 :
+ (length() > rhs.length()) ? 1 : 0;
+}
+
+// Returns true iff this String ends with the given suffix. *Any*
+// String is considered to end with a NULL or empty suffix.
+bool String::EndsWith(const char* suffix) const {
+ if (suffix == NULL || CStringEquals(suffix, "")) return true;
+
+ if (c_str() == NULL) return false;
+
+ const size_t this_len = strlen(c_str());
+ const size_t suffix_len = strlen(suffix);
+ return (this_len >= suffix_len) &&
+ CStringEquals(c_str() + this_len - suffix_len, suffix);
+}
+
+// Returns true iff this String ends with the given suffix, ignoring case.
+// Any String is considered to end with a NULL or empty suffix.
+bool String::EndsWithCaseInsensitive(const char* suffix) const {
+ if (suffix == NULL || CStringEquals(suffix, "")) return true;
+
+ if (c_str() == NULL) return false;
+
+ const size_t this_len = strlen(c_str());
+ const size_t suffix_len = strlen(suffix);
+ return (this_len >= suffix_len) &&
+ CaseInsensitiveCStringEquals(c_str() + this_len - suffix_len, suffix);
+}
+
+// Formats a list of arguments to a String, using the same format
+// spec string as for printf.
+//
+// We do not use the StringPrintf class as it is not universally
+// available.
+//
+// The result is limited to 4096 characters (including the tailing 0).
+// If 4096 characters are not enough to format the input, or if
+// there's an error, "<formatting error or buffer exceeded>" is
+// returned.
+String String::Format(const char * format, ...) {
+ va_list args;
+ va_start(args, format);
+
+ char buffer[4096];
+ const int kBufferSize = sizeof(buffer)/sizeof(buffer[0]);
+
+ // MSVC 8 deprecates vsnprintf(), so we want to suppress warning
+ // 4996 (deprecated function) there.
+#ifdef _MSC_VER // We are using MSVC.
+#pragma warning(push) // Saves the current warning state.
+#pragma warning(disable:4996) // Temporarily disables warning 4996.
+ const int size = vsnprintf(buffer, kBufferSize, format, args);
+#pragma warning(pop) // Restores the warning state.
+#else // We are not using MSVC.
+ const int size = vsnprintf(buffer, kBufferSize, format, args);
+#endif // _MSC_VER
+ va_end(args);
+
+ // vsnprintf()'s behavior is not portable. When the buffer is not
+ // big enough, it returns a negative value in MSVC, and returns the
+ // needed buffer size on Linux. When there is an output error, it
+ // always returns a negative value. For simplicity, we lump the two
+ // error cases together.
+ if (size < 0 || size >= kBufferSize) {
+ return String("<formatting error or buffer exceeded>");
+ } else {
+ return String(buffer, size);
+ }
+}
+
+// Converts the buffer in a StrStream to a String, converting NUL
+// bytes to "\\0" along the way.
+String StrStreamToString(StrStream* ss) {
+ const ::std::string& str = ss->str();
+ const char* const start = str.c_str();
+ const char* const end = start + str.length();
+
+ // We need to use a helper StrStream to do this transformation
+ // because String doesn't support push_back().
+ StrStream helper;
+ for (const char* ch = start; ch != end; ++ch) {
+ if (*ch == '\0') {
+ helper << "\\0"; // Replaces NUL with "\\0";
+ } else {
+ helper.put(*ch);
+ }
+ }
+
+ return String(helper.str().c_str());
+}
+
+// Appends the user-supplied message to the Google-Test-generated message.
+String AppendUserMessage(const String& gtest_msg,
+ const Message& user_msg) {
+ // Appends the user message if it's non-empty.
+ const String user_msg_string = user_msg.GetString();
+ if (user_msg_string.empty()) {
+ return gtest_msg;
+ }
+
+ Message msg;
+ msg << gtest_msg << "\n" << user_msg_string;
+
+ return msg.GetString();
+}
+
+} // namespace internal
+
+// class TestResult
+
+// Creates an empty TestResult.
+TestResult::TestResult()
+ : death_test_count_(0),
+ elapsed_time_(0) {
+}
+
+// D'tor.
+TestResult::~TestResult() {
+}
+
+// Returns the i-th test part result among all the results. i can
+// range from 0 to total_part_count() - 1. If i is not in that range,
+// aborts the program.
+const TestPartResult& TestResult::GetTestPartResult(int i) const {
+ if (i < 0 || i >= total_part_count())
+ internal::posix::Abort();
+ return test_part_results_.at(i);
+}
+
+// Returns the i-th test property. i can range from 0 to
+// test_property_count() - 1. If i is not in that range, aborts the
+// program.
+const TestProperty& TestResult::GetTestProperty(int i) const {
+ if (i < 0 || i >= test_property_count())
+ internal::posix::Abort();
+ return test_properties_.at(i);
+}
+
+// Clears the test part results.
+void TestResult::ClearTestPartResults() {
+ test_part_results_.clear();
+}
+
+// Adds a test part result to the list.
+void TestResult::AddTestPartResult(const TestPartResult& test_part_result) {
+ test_part_results_.push_back(test_part_result);
+}
+
+// Adds a test property to the list. If a property with the same key as the
+// supplied property is already represented, the value of this test_property
+// replaces the old value for that key.
+void TestResult::RecordProperty(const TestProperty& test_property) {
+ if (!ValidateTestProperty(test_property)) {
+ return;
+ }
+ internal::MutexLock lock(&test_properites_mutex_);
+ const std::vector<TestProperty>::iterator property_with_matching_key =
+ std::find_if(test_properties_.begin(), test_properties_.end(),
+ internal::TestPropertyKeyIs(test_property.key()));
+ if (property_with_matching_key == test_properties_.end()) {
+ test_properties_.push_back(test_property);
+ return;
+ }
+ property_with_matching_key->SetValue(test_property.value());
+}
+
+// Adds a failure if the key is a reserved attribute of Google Test
+// testcase tags. Returns true if the property is valid.
+bool TestResult::ValidateTestProperty(const TestProperty& test_property) {
+ internal::String key(test_property.key());
+ if (key == "name" || key == "status" || key == "time" || key == "classname") {
+ ADD_FAILURE()
+ << "Reserved key used in RecordProperty(): "
+ << key
+ << " ('name', 'status', 'time', and 'classname' are reserved by "
+ << GTEST_NAME_ << ")";
+ return false;
+ }
+ return true;
+}
+
+// Clears the object.
+void TestResult::Clear() {
+ test_part_results_.clear();
+ test_properties_.clear();
+ death_test_count_ = 0;
+ elapsed_time_ = 0;
+}
+
+// Returns true iff the test failed.
+bool TestResult::Failed() const {
+ for (int i = 0; i < total_part_count(); ++i) {
+ if (GetTestPartResult(i).failed())
+ return true;
+ }
+ return false;
+}
+
+// Returns true iff the test part fatally failed.
+static bool TestPartFatallyFailed(const TestPartResult& result) {
+ return result.fatally_failed();
+}
+
+// Returns true iff the test fatally failed.
+bool TestResult::HasFatalFailure() const {
+ return CountIf(test_part_results_, TestPartFatallyFailed) > 0;
+}
+
+// Returns true iff the test part non-fatally failed.
+static bool TestPartNonfatallyFailed(const TestPartResult& result) {
+ return result.nonfatally_failed();
+}
+
+// Returns true iff the test has a non-fatal failure.
+bool TestResult::HasNonfatalFailure() const {
+ return CountIf(test_part_results_, TestPartNonfatallyFailed) > 0;
+}
+
+// Gets the number of all test parts. This is the sum of the number
+// of successful test parts and the number of failed test parts.
+int TestResult::total_part_count() const {
+ return static_cast<int>(test_part_results_.size());
+}
+
+// Returns the number of the test properties.
+int TestResult::test_property_count() const {
+ return static_cast<int>(test_properties_.size());
+}
+
+// class Test
+
+// Creates a Test object.
+
+// The c'tor saves the values of all Google Test flags.
+Test::Test()
+ : gtest_flag_saver_(new internal::GTestFlagSaver) {
+}
+
+// The d'tor restores the values of all Google Test flags.
+Test::~Test() {
+ delete gtest_flag_saver_;
+}
+
+// Sets up the test fixture.
+//
+// A sub-class may override this.
+void Test::SetUp() {
+}
+
+// Tears down the test fixture.
+//
+// A sub-class may override this.
+void Test::TearDown() {
+}
+
+// Allows user supplied key value pairs to be recorded for later output.
+void Test::RecordProperty(const char* key, const char* value) {
+ UnitTest::GetInstance()->RecordPropertyForCurrentTest(key, value);
+}
+
+// Allows user supplied key value pairs to be recorded for later output.
+void Test::RecordProperty(const char* key, int value) {
+ Message value_message;
+ value_message << value;
+ RecordProperty(key, value_message.GetString().c_str());
+}
+
+namespace internal {
+
+void ReportFailureInUnknownLocation(TestPartResult::Type result_type,
+ const String& message) {
+ // This function is a friend of UnitTest and as such has access to
+ // AddTestPartResult.
+ UnitTest::GetInstance()->AddTestPartResult(
+ result_type,
+ NULL, // No info about the source file where the exception occurred.
+ -1, // We have no info on which line caused the exception.
+ message,
+ String()); // No stack trace, either.
+}
+
+} // namespace internal
+
+#if GTEST_OS_WINDOWS
+// We are on Windows.
+
+// Adds an "exception thrown" fatal failure to the current test.
+static void AddExceptionThrownFailure(DWORD exception_code,
+ const char* location) {
+ Message message;
+ message << "Exception thrown with code 0x" << std::setbase(16) <<
+ exception_code << std::setbase(10) << " in " << location << ".";
+
+ internal::ReportFailureInUnknownLocation(TestPartResult::kFatalFailure,
+ message.GetString());
+}
+
+#endif // GTEST_OS_WINDOWS
+
+// Google Test requires all tests in the same test case to use the same test
+// fixture class. This function checks if the current test has the
+// same fixture class as the first test in the current test case. If
+// yes, it returns true; otherwise it generates a Google Test failure and
+// returns false.
+bool Test::HasSameFixtureClass() {
+ internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+ const TestCase* const test_case = impl->current_test_case();
+
+ // Info about the first test in the current test case.
+ const internal::TestInfoImpl* const first_test_info =
+ test_case->test_info_list()[0]->impl();
+ const internal::TypeId first_fixture_id = first_test_info->fixture_class_id();
+ const char* const first_test_name = first_test_info->name();
+
+ // Info about the current test.
+ const internal::TestInfoImpl* const this_test_info =
+ impl->current_test_info()->impl();
+ const internal::TypeId this_fixture_id = this_test_info->fixture_class_id();
+ const char* const this_test_name = this_test_info->name();
+
+ if (this_fixture_id != first_fixture_id) {
+ // Is the first test defined using TEST?
+ const bool first_is_TEST = first_fixture_id == internal::GetTestTypeId();
+ // Is this test defined using TEST?
+ const bool this_is_TEST = this_fixture_id == internal::GetTestTypeId();
+
+ if (first_is_TEST || this_is_TEST) {
+ // The user mixed TEST and TEST_F in this test case - we'll tell
+ // him/her how to fix it.
+
+ // Gets the name of the TEST and the name of the TEST_F. Note
+ // that first_is_TEST and this_is_TEST cannot both be true, as
+ // the fixture IDs are different for the two tests.
+ const char* const TEST_name =
+ first_is_TEST ? first_test_name : this_test_name;
+ const char* const TEST_F_name =
+ first_is_TEST ? this_test_name : first_test_name;
+
+ ADD_FAILURE()
+ << "All tests in the same test case must use the same test fixture\n"
+ << "class, so mixing TEST_F and TEST in the same test case is\n"
+ << "illegal. In test case " << this_test_info->test_case_name()
+ << ",\n"
+ << "test " << TEST_F_name << " is defined using TEST_F but\n"
+ << "test " << TEST_name << " is defined using TEST. You probably\n"
+ << "want to change the TEST to TEST_F or move it to another test\n"
+ << "case.";
+ } else {
+ // The user defined two fixture classes with the same name in
+ // two namespaces - we'll tell him/her how to fix it.
+ ADD_FAILURE()
+ << "All tests in the same test case must use the same test fixture\n"
+ << "class. However, in test case "
+ << this_test_info->test_case_name() << ",\n"
+ << "you defined test " << first_test_name
+ << " and test " << this_test_name << "\n"
+ << "using two different test fixture classes. This can happen if\n"
+ << "the two classes are from different namespaces or translation\n"
+ << "units and have the same name. You should probably rename one\n"
+ << "of the classes to put the tests into different test cases.";
+ }
+ return false;
+ }
+
+ return true;
+}
+
+// Runs the test and updates the test result.
+void Test::Run() {
+ if (!HasSameFixtureClass()) return;
+
+ internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+#if GTEST_HAS_SEH
+ // Catch SEH-style exceptions.
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+ __try {
+ SetUp();
+ } __except(internal::UnitTestOptions::GTestShouldProcessSEH(
+ GetExceptionCode())) {
+ AddExceptionThrownFailure(GetExceptionCode(), "SetUp()");
+ }
+
+ // We will run the test only if SetUp() had no fatal failure.
+ if (!HasFatalFailure()) {
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+ __try {
+ TestBody();
+ } __except(internal::UnitTestOptions::GTestShouldProcessSEH(
+ GetExceptionCode())) {
+ AddExceptionThrownFailure(GetExceptionCode(), "the test body");
+ }
+ }
+
+ // However, we want to clean up as much as possible. Hence we will
+ // always call TearDown(), even if SetUp() or the test body has
+ // failed.
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+ __try {
+ TearDown();
+ } __except(internal::UnitTestOptions::GTestShouldProcessSEH(
+ GetExceptionCode())) {
+ AddExceptionThrownFailure(GetExceptionCode(), "TearDown()");
+ }
+
+#else // We are on a compiler or platform that doesn't support SEH.
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+ SetUp();
+
+ // We will run the test only if SetUp() was successful.
+ if (!HasFatalFailure()) {
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+ TestBody();
+ }
+
+ // However, we want to clean up as much as possible. Hence we will
+ // always call TearDown(), even if SetUp() or the test body has
+ // failed.
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+ TearDown();
+#endif // GTEST_HAS_SEH
+}
+
+
+// Returns true iff the current test has a fatal failure.
+bool Test::HasFatalFailure() {
+ return internal::GetUnitTestImpl()->current_test_result()->HasFatalFailure();
+}
+
+// Returns true iff the current test has a non-fatal failure.
+bool Test::HasNonfatalFailure() {
+ return internal::GetUnitTestImpl()->current_test_result()->
+ HasNonfatalFailure();
+}
+
+// class TestInfo
+
+// Constructs a TestInfo object. It assumes ownership of the test factory
+// object via impl_.
+TestInfo::TestInfo(const char* a_test_case_name,
+ const char* a_name,
+ const char* a_test_case_comment,
+ const char* a_comment,
+ internal::TypeId fixture_class_id,
+ internal::TestFactoryBase* factory) {
+ impl_ = new internal::TestInfoImpl(this, a_test_case_name, a_name,
+ a_test_case_comment, a_comment,
+ fixture_class_id, factory);
+}
+
+// Destructs a TestInfo object.
+TestInfo::~TestInfo() {
+ delete impl_;
+}
+
+namespace internal {
+
+// Creates a new TestInfo object and registers it with Google Test;
+// returns the created object.
+//
+// Arguments:
+//
+// test_case_name: name of the test case
+// name: name of the test
+// test_case_comment: a comment on the test case that will be included in
+// the test output
+// comment: a comment on the test that will be included in the
+// test output
+// fixture_class_id: ID of the test fixture class
+// set_up_tc: pointer to the function that sets up the test case
+// tear_down_tc: pointer to the function that tears down the test case
+// factory: pointer to the factory that creates a test object.
+// The newly created TestInfo instance will assume
+// ownership of the factory object.
+TestInfo* MakeAndRegisterTestInfo(
+ const char* test_case_name, const char* name,
+ const char* test_case_comment, const char* comment,
+ TypeId fixture_class_id,
+ SetUpTestCaseFunc set_up_tc,
+ TearDownTestCaseFunc tear_down_tc,
+ TestFactoryBase* factory) {
+ TestInfo* const test_info =
+ new TestInfo(test_case_name, name, test_case_comment, comment,
+ fixture_class_id, factory);
+ GetUnitTestImpl()->AddTestInfo(set_up_tc, tear_down_tc, test_info);
+ return test_info;
+}
+
+#if GTEST_HAS_PARAM_TEST
+void ReportInvalidTestCaseType(const char* test_case_name,
+ const char* file, int line) {
+ Message errors;
+ errors
+ << "Attempted redefinition of test case " << test_case_name << ".\n"
+ << "All tests in the same test case must use the same test fixture\n"
+ << "class. However, in test case " << test_case_name << ", you tried\n"
+ << "to define a test using a fixture class different from the one\n"
+ << "used earlier. This can happen if the two fixture classes are\n"
+ << "from different namespaces and have the same name. You should\n"
+ << "probably rename one of the classes to put the tests into different\n"
+ << "test cases.";
+
+ fprintf(stderr, "%s %s", FormatFileLocation(file, line).c_str(),
+ errors.GetString().c_str());
+}
+#endif // GTEST_HAS_PARAM_TEST
+
+} // namespace internal
+
+// Returns the test case name.
+const char* TestInfo::test_case_name() const {
+ return impl_->test_case_name();
+}
+
+// Returns the test name.
+const char* TestInfo::name() const {
+ return impl_->name();
+}
+
+// Returns the test case comment.
+const char* TestInfo::test_case_comment() const {
+ return impl_->test_case_comment();
+}
+
+// Returns the test comment.
+const char* TestInfo::comment() const {
+ return impl_->comment();
+}
+
+// Returns true if this test should run.
+bool TestInfo::should_run() const { return impl_->should_run(); }
+
+// Returns true if this test matches the user-specified filter.
+bool TestInfo::matches_filter() const { return impl_->matches_filter(); }
+
+// Returns the result of the test.
+const TestResult* TestInfo::result() const { return impl_->result(); }
+
+// Increments the number of death tests encountered in this test so
+// far.
+int TestInfo::increment_death_test_count() {
+ return impl_->result()->increment_death_test_count();
+}
+
+namespace {
+
+// A predicate that checks the test name of a TestInfo against a known
+// value.
+//
+// This is used for implementation of the TestCase class only. We put
+// it in the anonymous namespace to prevent polluting the outer
+// namespace.
+//
+// TestNameIs is copyable.
+class TestNameIs {
+ public:
+ // Constructor.
+ //
+ // TestNameIs has NO default constructor.
+ explicit TestNameIs(const char* name)
+ : name_(name) {}
+
+ // Returns true iff the test name of test_info matches name_.
+ bool operator()(const TestInfo * test_info) const {
+ return test_info && internal::String(test_info->name()).Compare(name_) == 0;
+ }
+
+ private:
+ internal::String name_;
+};
+
+} // namespace
+
+namespace internal {
+
+// This method expands all parameterized tests registered with macros TEST_P
+// and INSTANTIATE_TEST_CASE_P into regular tests and registers those.
+// This will be done just once during the program runtime.
+void UnitTestImpl::RegisterParameterizedTests() {
+#if GTEST_HAS_PARAM_TEST
+ if (!parameterized_tests_registered_) {
+ parameterized_test_registry_.RegisterTests();
+ parameterized_tests_registered_ = true;
+ }
+#endif
+}
+
+// Creates the test object, runs it, records its result, and then
+// deletes it.
+void TestInfoImpl::Run() {
+ if (!should_run_) return;
+
+ // Tells UnitTest where to store test result.
+ UnitTestImpl* const impl = internal::GetUnitTestImpl();
+ impl->set_current_test_info(parent_);
+
+ TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater();
+
+ // Notifies the unit test event listeners that a test is about to start.
+ repeater->OnTestStart(*parent_);
+
+ const TimeInMillis start = GetTimeInMillis();
+
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+#if GTEST_HAS_SEH
+ // Catch SEH-style exceptions.
+ Test* test = NULL;
+
+ __try {
+ // Creates the test object.
+ test = factory_->CreateTest();
+ } __except(internal::UnitTestOptions::GTestShouldProcessSEH(
+ GetExceptionCode())) {
+ AddExceptionThrownFailure(GetExceptionCode(),
+ "the test fixture's constructor");
+ return;
+ }
+#else // We are on a compiler or platform that doesn't support SEH.
+
+ // TODO(wan): If test->Run() throws, test won't be deleted. This is
+ // not a problem now as we don't use exceptions. If we were to
+ // enable exceptions, we should revise the following to be
+ // exception-safe.
+
+ // Creates the test object.
+ Test* test = factory_->CreateTest();
+#endif // GTEST_HAS_SEH
+
+ // Runs the test only if the constructor of the test fixture didn't
+ // generate a fatal failure.
+ if (!Test::HasFatalFailure()) {
+ test->Run();
+ }
+
+ // Deletes the test object.
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+ delete test;
+ test = NULL;
+
+ result_.set_elapsed_time(GetTimeInMillis() - start);
+
+ // Notifies the unit test event listener that a test has just finished.
+ repeater->OnTestEnd(*parent_);
+
+ // Tells UnitTest to stop associating assertion results to this
+ // test.
+ impl->set_current_test_info(NULL);
+}
+
+} // namespace internal
+
+// class TestCase
+
+// Gets the number of successful tests in this test case.
+int TestCase::successful_test_count() const {
+ return CountIf(test_info_list_, TestPassed);
+}
+
+// Gets the number of failed tests in this test case.
+int TestCase::failed_test_count() const {
+ return CountIf(test_info_list_, TestFailed);
+}
+
+int TestCase::disabled_test_count() const {
+ return CountIf(test_info_list_, TestDisabled);
+}
+
+// Get the number of tests in this test case that should run.
+int TestCase::test_to_run_count() const {
+ return CountIf(test_info_list_, ShouldRunTest);
+}
+
+// Gets the number of all tests.
+int TestCase::total_test_count() const {
+ return static_cast<int>(test_info_list_.size());
+}
+
+// Creates a TestCase with the given name.
+//
+// Arguments:
+//
+// name: name of the test case
+// set_up_tc: pointer to the function that sets up the test case
+// tear_down_tc: pointer to the function that tears down the test case
+TestCase::TestCase(const char* a_name, const char* a_comment,
+ Test::SetUpTestCaseFunc set_up_tc,
+ Test::TearDownTestCaseFunc tear_down_tc)
+ : name_(a_name),
+ comment_(a_comment),
+ set_up_tc_(set_up_tc),
+ tear_down_tc_(tear_down_tc),
+ should_run_(false),
+ elapsed_time_(0) {
+}
+
+// Destructor of TestCase.
+TestCase::~TestCase() {
+ // Deletes every Test in the collection.
+ ForEach(test_info_list_, internal::Delete<TestInfo>);
+}
+
+// Returns the i-th test among all the tests. i can range from 0 to
+// total_test_count() - 1. If i is not in that range, returns NULL.
+const TestInfo* TestCase::GetTestInfo(int i) const {
+ const int index = GetElementOr(test_indices_, i, -1);
+ return index < 0 ? NULL : test_info_list_[index];
+}
+
+// Returns the i-th test among all the tests. i can range from 0 to
+// total_test_count() - 1. If i is not in that range, returns NULL.
+TestInfo* TestCase::GetMutableTestInfo(int i) {
+ const int index = GetElementOr(test_indices_, i, -1);
+ return index < 0 ? NULL : test_info_list_[index];
+}
+
+// Adds a test to this test case. Will delete the test upon
+// destruction of the TestCase object.
+void TestCase::AddTestInfo(TestInfo * test_info) {
+ test_info_list_.push_back(test_info);
+ test_indices_.push_back(static_cast<int>(test_indices_.size()));
+}
+
+// Runs every test in this TestCase.
+void TestCase::Run() {
+ if (!should_run_) return;
+
+ internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+ impl->set_current_test_case(this);
+
+ TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater();
+
+ repeater->OnTestCaseStart(*this);
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+ set_up_tc_();
+
+ const internal::TimeInMillis start = internal::GetTimeInMillis();
+ for (int i = 0; i < total_test_count(); i++) {
+ GetMutableTestInfo(i)->impl()->Run();
+ }
+ elapsed_time_ = internal::GetTimeInMillis() - start;
+
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+ tear_down_tc_();
+ repeater->OnTestCaseEnd(*this);
+ impl->set_current_test_case(NULL);
+}
+
+// Clears the results of all tests in this test case.
+void TestCase::ClearResult() {
+ ForEach(test_info_list_, internal::TestInfoImpl::ClearTestResult);
+}
+
+// Returns true iff test passed.
+bool TestCase::TestPassed(const TestInfo * test_info) {
+ const internal::TestInfoImpl* const impl = test_info->impl();
+ return impl->should_run() && impl->result()->Passed();
+}
+
+// Returns true iff test failed.
+bool TestCase::TestFailed(const TestInfo * test_info) {
+ const internal::TestInfoImpl* const impl = test_info->impl();
+ return impl->should_run() && impl->result()->Failed();
+}
+
+// Returns true iff test is disabled.
+bool TestCase::TestDisabled(const TestInfo * test_info) {
+ return test_info->impl()->is_disabled();
+}
+
+// Returns true if the given test should run.
+bool TestCase::ShouldRunTest(const TestInfo *test_info) {
+ return test_info->impl()->should_run();
+}
+
+// Shuffles the tests in this test case.
+void TestCase::ShuffleTests(internal::Random* random) {
+ Shuffle(random, &test_indices_);
+}
+
+// Restores the test order to before the first shuffle.
+void TestCase::UnshuffleTests() {
+ for (size_t i = 0; i < test_indices_.size(); i++) {
+ test_indices_[i] = static_cast<int>(i);
+ }
+}
+
+// Formats a countable noun. Depending on its quantity, either the
+// singular form or the plural form is used. e.g.
+//
+// FormatCountableNoun(1, "formula", "formuli") returns "1 formula".
+// FormatCountableNoun(5, "book", "books") returns "5 books".
+static internal::String FormatCountableNoun(int count,
+ const char * singular_form,
+ const char * plural_form) {
+ return internal::String::Format("%d %s", count,
+ count == 1 ? singular_form : plural_form);
+}
+
+// Formats the count of tests.
+static internal::String FormatTestCount(int test_count) {
+ return FormatCountableNoun(test_count, "test", "tests");
+}
+
+// Formats the count of test cases.
+static internal::String FormatTestCaseCount(int test_case_count) {
+ return FormatCountableNoun(test_case_count, "test case", "test cases");
+}
+
+// Converts a TestPartResult::Type enum to human-friendly string
+// representation. Both kNonFatalFailure and kFatalFailure are translated
+// to "Failure", as the user usually doesn't care about the difference
+// between the two when viewing the test result.
+static const char * TestPartResultTypeToString(TestPartResult::Type type) {
+ switch (type) {
+ case TestPartResult::kSuccess:
+ return "Success";
+
+ case TestPartResult::kNonFatalFailure:
+ case TestPartResult::kFatalFailure:
+#ifdef _MSC_VER
+ return "error: ";
+#else
+ return "Failure\n";
+#endif
+ }
+
+ return "Unknown result type";
+}
+
+// Prints a TestPartResult to a String.
+static internal::String PrintTestPartResultToString(
+ const TestPartResult& test_part_result) {
+ return (Message()
+ << internal::FormatFileLocation(test_part_result.file_name(),
+ test_part_result.line_number())
+ << " " << TestPartResultTypeToString(test_part_result.type())
+ << test_part_result.message()).GetString();
+}
+
+// Prints a TestPartResult.
+static void PrintTestPartResult(const TestPartResult& test_part_result) {
+ const internal::String& result =
+ PrintTestPartResultToString(test_part_result);
+ printf("%s\n", result.c_str());
+ fflush(stdout);
+ // If the test program runs in Visual Studio or a debugger, the
+ // following statements add the test part result message to the Output
+ // window such that the user can double-click on it to jump to the
+ // corresponding source code location; otherwise they do nothing.
+#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE
+ // We don't call OutputDebugString*() on Windows Mobile, as printing
+ // to stdout is done by OutputDebugString() there already - we don't
+ // want the same message printed twice.
+ ::OutputDebugStringA(result.c_str());
+ ::OutputDebugStringA("\n");
+#endif
+}
+
+// class PrettyUnitTestResultPrinter
+
+namespace internal {
+
+enum GTestColor {
+ COLOR_DEFAULT,
+ COLOR_RED,
+ COLOR_GREEN,
+ COLOR_YELLOW
+};
+
+#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE
+
+// Returns the character attribute for the given color.
+WORD GetColorAttribute(GTestColor color) {
+ switch (color) {
+ case COLOR_RED: return FOREGROUND_RED;
+ case COLOR_GREEN: return FOREGROUND_GREEN;
+ case COLOR_YELLOW: return FOREGROUND_RED | FOREGROUND_GREEN;
+ default: return 0;
+ }
+}
+
+#else
+
+// Returns the ANSI color code for the given color. COLOR_DEFAULT is
+// an invalid input.
+const char* GetAnsiColorCode(GTestColor color) {
+ switch (color) {
+ case COLOR_RED: return "1";
+ case COLOR_GREEN: return "2";
+ case COLOR_YELLOW: return "3";
+ default: return NULL;
+ };
+}
+
+#endif // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE
+
+// Returns true iff Google Test should use colors in the output.
+bool ShouldUseColor(bool stdout_is_tty) {
+ const char* const gtest_color = GTEST_FLAG(color).c_str();
+
+ if (String::CaseInsensitiveCStringEquals(gtest_color, "auto")) {
+#if GTEST_OS_WINDOWS
+ // On Windows the TERM variable is usually not set, but the
+ // console there does support colors.
+ return stdout_is_tty;
+#else
+ // On non-Windows platforms, we rely on the TERM variable.
+ const char* const term = posix::GetEnv("TERM");
+ const bool term_supports_color =
+ String::CStringEquals(term, "xterm") ||
+ String::CStringEquals(term, "xterm-color") ||
+ String::CStringEquals(term, "xterm-256color") ||
+ String::CStringEquals(term, "linux") ||
+ String::CStringEquals(term, "cygwin");
+ return stdout_is_tty && term_supports_color;
+#endif // GTEST_OS_WINDOWS
+ }
+
+ return String::CaseInsensitiveCStringEquals(gtest_color, "yes") ||
+ String::CaseInsensitiveCStringEquals(gtest_color, "true") ||
+ String::CaseInsensitiveCStringEquals(gtest_color, "t") ||
+ String::CStringEquals(gtest_color, "1");
+ // We take "yes", "true", "t", and "1" as meaning "yes". If the
+ // value is neither one of these nor "auto", we treat it as "no" to
+ // be conservative.
+}
+
+// Helpers for printing colored strings to stdout. Note that on Windows, we
+// cannot simply emit special characters and have the terminal change colors.
+// This routine must actually emit the characters rather than return a string
+// that would be colored when printed, as can be done on Linux.
+void ColoredPrintf(GTestColor color, const char* fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+
+#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || GTEST_OS_ZOS
+ const bool use_color = false;
+#else
+ static const bool in_color_mode =
+ ShouldUseColor(posix::IsATTY(posix::FileNo(stdout)) != 0);
+ const bool use_color = in_color_mode && (color != COLOR_DEFAULT);
+#endif // GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || GTEST_OS_ZOS
+ // The '!= 0' comparison is necessary to satisfy MSVC 7.1.
+
+ if (!use_color) {
+ vprintf(fmt, args);
+ va_end(args);
+ return;
+ }
+
+#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE
+ const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE);
+
+ // Gets the current text color.
+ CONSOLE_SCREEN_BUFFER_INFO buffer_info;
+ GetConsoleScreenBufferInfo(stdout_handle, &buffer_info);
+ const WORD old_color_attrs = buffer_info.wAttributes;
+
+ // We need to flush the stream buffers into the console before each
+ // SetConsoleTextAttribute call lest it affect the text that is already
+ // printed but has not yet reached the console.
+ fflush(stdout);
+ SetConsoleTextAttribute(stdout_handle,
+ GetColorAttribute(color) | FOREGROUND_INTENSITY);
+ vprintf(fmt, args);
+
+ fflush(stdout);
+ // Restores the text color.
+ SetConsoleTextAttribute(stdout_handle, old_color_attrs);
+#else
+ printf("\033[0;3%sm", GetAnsiColorCode(color));
+ vprintf(fmt, args);
+ printf("\033[m"); // Resets the terminal to default.
+#endif // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE
+ va_end(args);
+}
+
+// This class implements the TestEventListener interface.
+//
+// Class PrettyUnitTestResultPrinter is copyable.
+class PrettyUnitTestResultPrinter : public TestEventListener {
+ public:
+ PrettyUnitTestResultPrinter() {}
+ static void PrintTestName(const char * test_case, const char * test) {
+ printf("%s.%s", test_case, test);
+ }
+
+ // The following methods override what's in the TestEventListener class.
+ virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) {}
+ virtual void OnTestIterationStart(const UnitTest& unit_test, int iteration);
+ virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test);
+ virtual void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) {}
+ virtual void OnTestCaseStart(const TestCase& test_case);
+ virtual void OnTestStart(const TestInfo& test_info);
+ virtual void OnTestPartResult(const TestPartResult& result);
+ virtual void OnTestEnd(const TestInfo& test_info);
+ virtual void OnTestCaseEnd(const TestCase& test_case);
+ virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test);
+ virtual void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) {}
+ virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration);
+ virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) {}
+
+ private:
+ static void PrintFailedTests(const UnitTest& unit_test);
+
+ internal::String test_case_name_;
+};
+
+ // Fired before each iteration of tests starts.
+void PrettyUnitTestResultPrinter::OnTestIterationStart(
+ const UnitTest& unit_test, int iteration) {
+ if (GTEST_FLAG(repeat) != 1)
+ printf("\nRepeating all tests (iteration %d) . . .\n\n", iteration + 1);
+
+ const char* const filter = GTEST_FLAG(filter).c_str();
+
+ // Prints the filter if it's not *. This reminds the user that some
+ // tests may be skipped.
+ if (!internal::String::CStringEquals(filter, kUniversalFilter)) {
+ ColoredPrintf(COLOR_YELLOW,
+ "Note: %s filter = %s\n", GTEST_NAME_, filter);
+ }
+
+ if (internal::ShouldShard(kTestTotalShards, kTestShardIndex, false)) {
+ ColoredPrintf(COLOR_YELLOW,
+ "Note: This is test shard %s of %s.\n",
+ internal::posix::GetEnv(kTestShardIndex),
+ internal::posix::GetEnv(kTestTotalShards));
+ }
+
+ if (GTEST_FLAG(shuffle)) {
+ ColoredPrintf(COLOR_YELLOW,
+ "Note: Randomizing tests' orders with a seed of %d .\n",
+ unit_test.random_seed());
+ }
+
+ ColoredPrintf(COLOR_GREEN, "[==========] ");
+ printf("Running %s from %s.\n",
+ FormatTestCount(unit_test.test_to_run_count()).c_str(),
+ FormatTestCaseCount(unit_test.test_case_to_run_count()).c_str());
+ fflush(stdout);
+}
+
+void PrettyUnitTestResultPrinter::OnEnvironmentsSetUpStart(
+ const UnitTest& /*unit_test*/) {
+ ColoredPrintf(COLOR_GREEN, "[----------] ");
+ printf("Global test environment set-up.\n");
+ fflush(stdout);
+}
+
+void PrettyUnitTestResultPrinter::OnTestCaseStart(const TestCase& test_case) {
+ test_case_name_ = test_case.name();
+ const internal::String counts =
+ FormatCountableNoun(test_case.test_to_run_count(), "test", "tests");
+ ColoredPrintf(COLOR_GREEN, "[----------] ");
+ printf("%s from %s", counts.c_str(), test_case_name_.c_str());
+ if (test_case.comment()[0] == '\0') {
+ printf("\n");
+ } else {
+ printf(", where %s\n", test_case.comment());
+ }
+ fflush(stdout);
+}
+
+void PrettyUnitTestResultPrinter::OnTestStart(const TestInfo& test_info) {
+ ColoredPrintf(COLOR_GREEN, "[ RUN ] ");
+ PrintTestName(test_case_name_.c_str(), test_info.name());
+ if (test_info.comment()[0] == '\0') {
+ printf("\n");
+ } else {
+ printf(", where %s\n", test_info.comment());
+ }
+ fflush(stdout);
+}
+
+// Called after an assertion failure.
+void PrettyUnitTestResultPrinter::OnTestPartResult(
+ const TestPartResult& result) {
+ // If the test part succeeded, we don't need to do anything.
+ if (result.type() == TestPartResult::kSuccess)
+ return;
+
+ // Print failure message from the assertion (e.g. expected this and got that).
+ PrintTestPartResult(result);
+ fflush(stdout);
+}
+
+void PrettyUnitTestResultPrinter::OnTestEnd(const TestInfo& test_info) {
+ if (test_info.result()->Passed()) {
+ ColoredPrintf(COLOR_GREEN, "[ OK ] ");
+ } else {
+ ColoredPrintf(COLOR_RED, "[ FAILED ] ");
+ }
+ PrintTestName(test_case_name_.c_str(), test_info.name());
+ if (GTEST_FLAG(print_time)) {
+ printf(" (%s ms)\n", internal::StreamableToString(
+ test_info.result()->elapsed_time()).c_str());
+ } else {
+ printf("\n");
+ }
+ fflush(stdout);
+}
+
+void PrettyUnitTestResultPrinter::OnTestCaseEnd(const TestCase& test_case) {
+ if (!GTEST_FLAG(print_time)) return;
+
+ test_case_name_ = test_case.name();
+ const internal::String counts =
+ FormatCountableNoun(test_case.test_to_run_count(), "test", "tests");
+ ColoredPrintf(COLOR_GREEN, "[----------] ");
+ printf("%s from %s (%s ms total)\n\n",
+ counts.c_str(), test_case_name_.c_str(),
+ internal::StreamableToString(test_case.elapsed_time()).c_str());
+ fflush(stdout);
+}
+
+void PrettyUnitTestResultPrinter::OnEnvironmentsTearDownStart(
+ const UnitTest& /*unit_test*/) {
+ ColoredPrintf(COLOR_GREEN, "[----------] ");
+ printf("Global test environment tear-down\n");
+ fflush(stdout);
+}
+
+// Internal helper for printing the list of failed tests.
+void PrettyUnitTestResultPrinter::PrintFailedTests(const UnitTest& unit_test) {
+ const int failed_test_count = unit_test.failed_test_count();
+ if (failed_test_count == 0) {
+ return;
+ }
+
+ for (int i = 0; i < unit_test.total_test_case_count(); ++i) {
+ const TestCase& test_case = *unit_test.GetTestCase(i);
+ if (!test_case.should_run() || (test_case.failed_test_count() == 0)) {
+ continue;
+ }
+ for (int j = 0; j < test_case.total_test_count(); ++j) {
+ const TestInfo& test_info = *test_case.GetTestInfo(j);
+ if (!test_info.should_run() || test_info.result()->Passed()) {
+ continue;
+ }
+ ColoredPrintf(COLOR_RED, "[ FAILED ] ");
+ printf("%s.%s", test_case.name(), test_info.name());
+ if (test_case.comment()[0] != '\0' ||
+ test_info.comment()[0] != '\0') {
+ printf(", where %s", test_case.comment());
+ if (test_case.comment()[0] != '\0' &&
+ test_info.comment()[0] != '\0') {
+ printf(" and ");
+ }
+ }
+ printf("%s\n", test_info.comment());
+ }
+ }
+}
+
+ void PrettyUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test,
+ int /*iteration*/) {
+ ColoredPrintf(COLOR_GREEN, "[==========] ");
+ printf("%s from %s ran.",
+ FormatTestCount(unit_test.test_to_run_count()).c_str(),
+ FormatTestCaseCount(unit_test.test_case_to_run_count()).c_str());
+ if (GTEST_FLAG(print_time)) {
+ printf(" (%s ms total)",
+ internal::StreamableToString(unit_test.elapsed_time()).c_str());
+ }
+ printf("\n");
+ ColoredPrintf(COLOR_GREEN, "[ PASSED ] ");
+ printf("%s.\n", FormatTestCount(unit_test.successful_test_count()).c_str());
+
+ int num_failures = unit_test.failed_test_count();
+ if (!unit_test.Passed()) {
+ const int failed_test_count = unit_test.failed_test_count();
+ ColoredPrintf(COLOR_RED, "[ FAILED ] ");
+ printf("%s, listed below:\n", FormatTestCount(failed_test_count).c_str());
+ PrintFailedTests(unit_test);
+ printf("\n%2d FAILED %s\n", num_failures,
+ num_failures == 1 ? "TEST" : "TESTS");
+ }
+
+ int num_disabled = unit_test.disabled_test_count();
+ if (num_disabled && !GTEST_FLAG(also_run_disabled_tests)) {
+ if (!num_failures) {
+ printf("\n"); // Add a spacer if no FAILURE banner is displayed.
+ }
+ ColoredPrintf(COLOR_YELLOW,
+ " YOU HAVE %d DISABLED %s\n\n",
+ num_disabled,
+ num_disabled == 1 ? "TEST" : "TESTS");
+ }
+ // Ensure that Google Test output is printed before, e.g., heapchecker output.
+ fflush(stdout);
+}
+
+// End PrettyUnitTestResultPrinter
+
+// class TestEventRepeater
+//
+// This class forwards events to other event listeners.
+class TestEventRepeater : public TestEventListener {
+ public:
+ TestEventRepeater() : forwarding_enabled_(true) {}
+ virtual ~TestEventRepeater();
+ void Append(TestEventListener *listener);
+ TestEventListener* Release(TestEventListener* listener);
+
+ // Controls whether events will be forwarded to listeners_. Set to false
+ // in death test child processes.
+ bool forwarding_enabled() const { return forwarding_enabled_; }
+ void set_forwarding_enabled(bool enable) { forwarding_enabled_ = enable; }
+
+ virtual void OnTestProgramStart(const UnitTest& unit_test);
+ virtual void OnTestIterationStart(const UnitTest& unit_test, int iteration);
+ virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test);
+ virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test);
+ virtual void OnTestCaseStart(const TestCase& test_case);
+ virtual void OnTestStart(const TestInfo& test_info);
+ virtual void OnTestPartResult(const TestPartResult& result);
+ virtual void OnTestEnd(const TestInfo& test_info);
+ virtual void OnTestCaseEnd(const TestCase& test_case);
+ virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test);
+ virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test);
+ virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration);
+ virtual void OnTestProgramEnd(const UnitTest& unit_test);
+
+ private:
+ // Controls whether events will be forwarded to listeners_. Set to false
+ // in death test child processes.
+ bool forwarding_enabled_;
+ // The list of listeners that receive events.
+ std::vector<TestEventListener*> listeners_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventRepeater);
+};
+
+TestEventRepeater::~TestEventRepeater() {
+ ForEach(listeners_, Delete<TestEventListener>);
+}
+
+void TestEventRepeater::Append(TestEventListener *listener) {
+ listeners_.push_back(listener);
+}
+
+// TODO(vladl@google.com): Factor the search functionality into Vector::Find.
+TestEventListener* TestEventRepeater::Release(TestEventListener *listener) {
+ for (size_t i = 0; i < listeners_.size(); ++i) {
+ if (listeners_[i] == listener) {
+ listeners_.erase(listeners_.begin() + i);
+ return listener;
+ }
+ }
+
+ return NULL;
+}
+
+// Since most methods are very similar, use macros to reduce boilerplate.
+// This defines a member that forwards the call to all listeners.
+#define GTEST_REPEATER_METHOD_(Name, Type) \
+void TestEventRepeater::Name(const Type& parameter) { \
+ if (forwarding_enabled_) { \
+ for (size_t i = 0; i < listeners_.size(); i++) { \
+ listeners_[i]->Name(parameter); \
+ } \
+ } \
+}
+// This defines a member that forwards the call to all listeners in reverse
+// order.
+#define GTEST_REVERSE_REPEATER_METHOD_(Name, Type) \
+void TestEventRepeater::Name(const Type& parameter) { \
+ if (forwarding_enabled_) { \
+ for (int i = static_cast<int>(listeners_.size()) - 1; i >= 0; i--) { \
+ listeners_[i]->Name(parameter); \
+ } \
+ } \
+}
+
+GTEST_REPEATER_METHOD_(OnTestProgramStart, UnitTest)
+GTEST_REPEATER_METHOD_(OnEnvironmentsSetUpStart, UnitTest)
+GTEST_REPEATER_METHOD_(OnTestCaseStart, TestCase)
+GTEST_REPEATER_METHOD_(OnTestStart, TestInfo)
+GTEST_REPEATER_METHOD_(OnTestPartResult, TestPartResult)
+GTEST_REPEATER_METHOD_(OnEnvironmentsTearDownStart, UnitTest)
+GTEST_REVERSE_REPEATER_METHOD_(OnEnvironmentsSetUpEnd, UnitTest)
+GTEST_REVERSE_REPEATER_METHOD_(OnEnvironmentsTearDownEnd, UnitTest)
+GTEST_REVERSE_REPEATER_METHOD_(OnTestEnd, TestInfo)
+GTEST_REVERSE_REPEATER_METHOD_(OnTestCaseEnd, TestCase)
+GTEST_REVERSE_REPEATER_METHOD_(OnTestProgramEnd, UnitTest)
+
+#undef GTEST_REPEATER_METHOD_
+#undef GTEST_REVERSE_REPEATER_METHOD_
+
+void TestEventRepeater::OnTestIterationStart(const UnitTest& unit_test,
+ int iteration) {
+ if (forwarding_enabled_) {
+ for (size_t i = 0; i < listeners_.size(); i++) {
+ listeners_[i]->OnTestIterationStart(unit_test, iteration);
+ }
+ }
+}
+
+void TestEventRepeater::OnTestIterationEnd(const UnitTest& unit_test,
+ int iteration) {
+ if (forwarding_enabled_) {
+ for (int i = static_cast<int>(listeners_.size()) - 1; i >= 0; i--) {
+ listeners_[i]->OnTestIterationEnd(unit_test, iteration);
+ }
+ }
+}
+
+// End TestEventRepeater
+
+// This class generates an XML output file.
+class XmlUnitTestResultPrinter : public EmptyTestEventListener {
+ public:
+ explicit XmlUnitTestResultPrinter(const char* output_file);
+
+ virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration);
+
+ private:
+ // Is c a whitespace character that is normalized to a space character
+ // when it appears in an XML attribute value?
+ static bool IsNormalizableWhitespace(char c) {
+ return c == 0x9 || c == 0xA || c == 0xD;
+ }
+
+ // May c appear in a well-formed XML document?
+ static bool IsValidXmlCharacter(char c) {
+ return IsNormalizableWhitespace(c) || c >= 0x20;
+ }
+
+ // Returns an XML-escaped copy of the input string str. If
+ // is_attribute is true, the text is meant to appear as an attribute
+ // value, and normalizable whitespace is preserved by replacing it
+ // with character references.
+ static String EscapeXml(const char* str, bool is_attribute);
+
+ // Returns the given string with all characters invalid in XML removed.
+ static String RemoveInvalidXmlCharacters(const char* str);
+
+ // Convenience wrapper around EscapeXml when str is an attribute value.
+ static String EscapeXmlAttribute(const char* str) {
+ return EscapeXml(str, true);
+ }
+
+ // Convenience wrapper around EscapeXml when str is not an attribute value.
+ static String EscapeXmlText(const char* str) { return EscapeXml(str, false); }
+
+ // Streams an XML CDATA section, escaping invalid CDATA sequences as needed.
+ static void OutputXmlCDataSection(::std::ostream* stream, const char* data);
+
+ // Streams an XML representation of a TestInfo object.
+ static void OutputXmlTestInfo(::std::ostream* stream,
+ const char* test_case_name,
+ const TestInfo& test_info);
+
+ // Prints an XML representation of a TestCase object
+ static void PrintXmlTestCase(FILE* out, const TestCase& test_case);
+
+ // Prints an XML summary of unit_test to output stream out.
+ static void PrintXmlUnitTest(FILE* out, const UnitTest& unit_test);
+
+ // Produces a string representing the test properties in a result as space
+ // delimited XML attributes based on the property key="value" pairs.
+ // When the String is not empty, it includes a space at the beginning,
+ // to delimit this attribute from prior attributes.
+ static String TestPropertiesAsXmlAttributes(const TestResult& result);
+
+ // The output file.
+ const String output_file_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(XmlUnitTestResultPrinter);
+};
+
+// Creates a new XmlUnitTestResultPrinter.
+XmlUnitTestResultPrinter::XmlUnitTestResultPrinter(const char* output_file)
+ : output_file_(output_file) {
+ if (output_file_.c_str() == NULL || output_file_.empty()) {
+ fprintf(stderr, "XML output file may not be null\n");
+ fflush(stderr);
+ exit(EXIT_FAILURE);
+ }
+}
+
+// Called after the unit test ends.
+void XmlUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test,
+ int /*iteration*/) {
+ FILE* xmlout = NULL;
+ FilePath output_file(output_file_);
+ FilePath output_dir(output_file.RemoveFileName());
+
+ if (output_dir.CreateDirectoriesRecursively()) {
+ xmlout = posix::FOpen(output_file_.c_str(), "w");
+ }
+ if (xmlout == NULL) {
+ // TODO(wan): report the reason of the failure.
+ //
+ // We don't do it for now as:
+ //
+ // 1. There is no urgent need for it.
+ // 2. It's a bit involved to make the errno variable thread-safe on
+ // all three operating systems (Linux, Windows, and Mac OS).
+ // 3. To interpret the meaning of errno in a thread-safe way,
+ // we need the strerror_r() function, which is not available on
+ // Windows.
+ fprintf(stderr,
+ "Unable to open file \"%s\"\n",
+ output_file_.c_str());
+ fflush(stderr);
+ exit(EXIT_FAILURE);
+ }
+ PrintXmlUnitTest(xmlout, unit_test);
+ fclose(xmlout);
+}
+
+// Returns an XML-escaped copy of the input string str. If is_attribute
+// is true, the text is meant to appear as an attribute value, and
+// normalizable whitespace is preserved by replacing it with character
+// references.
+//
+// Invalid XML characters in str, if any, are stripped from the output.
+// It is expected that most, if not all, of the text processed by this
+// module will consist of ordinary English text.
+// If this module is ever modified to produce version 1.1 XML output,
+// most invalid characters can be retained using character references.
+// TODO(wan): It might be nice to have a minimally invasive, human-readable
+// escaping scheme for invalid characters, rather than dropping them.
+String XmlUnitTestResultPrinter::EscapeXml(const char* str, bool is_attribute) {
+ Message m;
+
+ if (str != NULL) {
+ for (const char* src = str; *src; ++src) {
+ switch (*src) {
+ case '<':
+ m << "&lt;";
+ break;
+ case '>':
+ m << "&gt;";
+ break;
+ case '&':
+ m << "&amp;";
+ break;
+ case '\'':
+ if (is_attribute)
+ m << "&apos;";
+ else
+ m << '\'';
+ break;
+ case '"':
+ if (is_attribute)
+ m << "&quot;";
+ else
+ m << '"';
+ break;
+ default:
+ if (IsValidXmlCharacter(*src)) {
+ if (is_attribute && IsNormalizableWhitespace(*src))
+ m << String::Format("&#x%02X;", unsigned(*src));
+ else
+ m << *src;
+ }
+ break;
+ }
+ }
+ }
+
+ return m.GetString();
+}
+
+// Returns the given string with all characters invalid in XML removed.
+// Currently invalid characters are dropped from the string. An
+// alternative is to replace them with certain characters such as . or ?.
+String XmlUnitTestResultPrinter::RemoveInvalidXmlCharacters(const char* str) {
+ char* const output = new char[strlen(str) + 1];
+ char* appender = output;
+ for (char ch = *str; ch != '\0'; ch = *++str)
+ if (IsValidXmlCharacter(ch))
+ *appender++ = ch;
+ *appender = '\0';
+
+ String ret_value(output);
+ delete[] output;
+ return ret_value;
+}
+
+// The following routines generate an XML representation of a UnitTest
+// object.
+//
+// This is how Google Test concepts map to the DTD:
+//
+// <testsuites name="AllTests"> <-- corresponds to a UnitTest object
+// <testsuite name="testcase-name"> <-- corresponds to a TestCase object
+// <testcase name="test-name"> <-- corresponds to a TestInfo object
+// <failure message="...">...</failure>
+// <failure message="...">...</failure>
+// <failure message="...">...</failure>
+// <-- individual assertion failures
+// </testcase>
+// </testsuite>
+// </testsuites>
+
+// Formats the given time in milliseconds as seconds.
+std::string FormatTimeInMillisAsSeconds(TimeInMillis ms) {
+ ::std::stringstream ss;
+ ss << ms/1000.0;
+ return ss.str();
+}
+
+// Streams an XML CDATA section, escaping invalid CDATA sequences as needed.
+void XmlUnitTestResultPrinter::OutputXmlCDataSection(::std::ostream* stream,
+ const char* data) {
+ const char* segment = data;
+ *stream << "<![CDATA[";
+ for (;;) {
+ const char* const next_segment = strstr(segment, "]]>");
+ if (next_segment != NULL) {
+ stream->write(
+ segment, static_cast<std::streamsize>(next_segment - segment));
+ *stream << "]]>]]&gt;<![CDATA[";
+ segment = next_segment + strlen("]]>");
+ } else {
+ *stream << segment;
+ break;
+ }
+ }
+ *stream << "]]>";
+}
+
+// Prints an XML representation of a TestInfo object.
+// TODO(wan): There is also value in printing properties with the plain printer.
+void XmlUnitTestResultPrinter::OutputXmlTestInfo(::std::ostream* stream,
+ const char* test_case_name,
+ const TestInfo& test_info) {
+ const TestResult& result = *test_info.result();
+ *stream << " <testcase name=\""
+ << EscapeXmlAttribute(test_info.name()).c_str()
+ << "\" status=\""
+ << (test_info.should_run() ? "run" : "notrun")
+ << "\" time=\""
+ << FormatTimeInMillisAsSeconds(result.elapsed_time())
+ << "\" classname=\""<< GTEST_FLAG(output).c_str()<<"." << EscapeXmlAttribute(test_case_name).c_str()
+ << "\"" << TestPropertiesAsXmlAttributes(result).c_str();
+
+ int failures = 0;
+ for (int i = 0; i < result.total_part_count(); ++i) {
+ const TestPartResult& part = result.GetTestPartResult(i);
+ if (part.failed()) {
+ if (++failures == 1)
+ *stream << ">\n";
+ *stream << " <failure message=\""
+ << EscapeXmlAttribute(part.summary()).c_str()
+ << "\" type=\"\">";
+ const String message = RemoveInvalidXmlCharacters(String::Format(
+ "%s:%d\n%s",
+ part.file_name(), part.line_number(),
+ part.message()).c_str());
+ OutputXmlCDataSection(stream, message.c_str());
+ *stream << "</failure>\n";
+ }
+ }
+
+ if (failures == 0)
+ *stream << " />\n";
+ else
+ *stream << " </testcase>\n";
+}
+
+// Prints an XML representation of a TestCase object
+void XmlUnitTestResultPrinter::PrintXmlTestCase(FILE* out,
+ const TestCase& test_case) {
+ fprintf(out,
+ " <testsuite name=\"%s\" tests=\"%d\" failures=\"%d\" "
+ "disabled=\"%d\" ",
+ EscapeXmlAttribute(test_case.name()).c_str(),
+ test_case.total_test_count(),
+ test_case.failed_test_count(),
+ test_case.disabled_test_count());
+ fprintf(out,
+ "errors=\"0\" time=\"%s\">\n",
+ FormatTimeInMillisAsSeconds(test_case.elapsed_time()).c_str());
+ for (int i = 0; i < test_case.total_test_count(); ++i) {
+ StrStream stream;
+ OutputXmlTestInfo(&stream, test_case.name(), *test_case.GetTestInfo(i));
+ fprintf(out, "%s", StrStreamToString(&stream).c_str());
+ }
+ fprintf(out, " </testsuite>\n");
+}
+
+// Prints an XML summary of unit_test to output stream out.
+void XmlUnitTestResultPrinter::PrintXmlUnitTest(FILE* out,
+ const UnitTest& unit_test) {
+ fprintf(out, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
+ fprintf(out,
+ "<testsuites tests=\"%d\" failures=\"%d\" disabled=\"%d\" "
+ "errors=\"0\" time=\"%s\" ",
+ unit_test.total_test_count(),
+ unit_test.failed_test_count(),
+ unit_test.disabled_test_count(),
+ FormatTimeInMillisAsSeconds(unit_test.elapsed_time()).c_str());
+ if (GTEST_FLAG(shuffle)) {
+ fprintf(out, "random_seed=\"%d\" ", unit_test.random_seed());
+ }
+ fprintf(out, "name=\"%s\">\n",GTEST_FLAG(output).c_str());
+ for (int i = 0; i < unit_test.total_test_case_count(); ++i)
+ PrintXmlTestCase(out, *unit_test.GetTestCase(i));
+ fprintf(out, "</testsuites>\n");
+}
+
+// Produces a string representing the test properties in a result as space
+// delimited XML attributes based on the property key="value" pairs.
+String XmlUnitTestResultPrinter::TestPropertiesAsXmlAttributes(
+ const TestResult& result) {
+ Message attributes;
+ for (int i = 0; i < result.test_property_count(); ++i) {
+ const TestProperty& property = result.GetTestProperty(i);
+ attributes << " " << property.key() << "="
+ << "\"" << EscapeXmlAttribute(property.value()) << "\"";
+ }
+ return attributes.GetString();
+}
+
+// End XmlUnitTestResultPrinter
+
+// Class ScopedTrace
+
+// Pushes the given source file location and message onto a per-thread
+// trace stack maintained by Google Test.
+// L < UnitTest::mutex_
+ScopedTrace::ScopedTrace(const char* file, int line, const Message& message) {
+ TraceInfo trace;
+ trace.file = file;
+ trace.line = line;
+ trace.message = message.GetString();
+
+ UnitTest::GetInstance()->PushGTestTrace(trace);
+}
+
+// Pops the info pushed by the c'tor.
+// L < UnitTest::mutex_
+ScopedTrace::~ScopedTrace() {
+ UnitTest::GetInstance()->PopGTestTrace();
+}
+
+
+// class OsStackTraceGetter
+
+// Returns the current OS stack trace as a String. Parameters:
+//
+// max_depth - the maximum number of stack frames to be included
+// in the trace.
+// skip_count - the number of top frames to be skipped; doesn't count
+// against max_depth.
+//
+// L < mutex_
+// We use "L < mutex_" to denote that the function may acquire mutex_.
+String OsStackTraceGetter::CurrentStackTrace(int, int) {
+ return String("");
+}
+
+// L < mutex_
+void OsStackTraceGetter::UponLeavingGTest() {
+}
+
+const char* const
+OsStackTraceGetter::kElidedFramesMarker =
+ "... " GTEST_NAME_ " internal frames ...";
+
+} // namespace internal
+
+// class TestEventListeners
+
+TestEventListeners::TestEventListeners()
+ : repeater_(new internal::TestEventRepeater()),
+ default_result_printer_(NULL),
+ default_xml_generator_(NULL) {
+}
+
+TestEventListeners::~TestEventListeners() { delete repeater_; }
+
+// Returns the standard listener responsible for the default console
+// output. Can be removed from the listeners list to shut down default
+// console output. Note that removing this object from the listener list
+// with Release transfers its ownership to the user.
+void TestEventListeners::Append(TestEventListener* listener) {
+ repeater_->Append(listener);
+}
+
+// Removes the given event listener from the list and returns it. It then
+// becomes the caller's responsibility to delete the listener. Returns
+// NULL if the listener is not found in the list.
+TestEventListener* TestEventListeners::Release(TestEventListener* listener) {
+ if (listener == default_result_printer_)
+ default_result_printer_ = NULL;
+ else if (listener == default_xml_generator_)
+ default_xml_generator_ = NULL;
+ return repeater_->Release(listener);
+}
+
+// Returns repeater that broadcasts the TestEventListener events to all
+// subscribers.
+TestEventListener* TestEventListeners::repeater() { return repeater_; }
+
+// Sets the default_result_printer attribute to the provided listener.
+// The listener is also added to the listener list and previous
+// default_result_printer is removed from it and deleted. The listener can
+// also be NULL in which case it will not be added to the list. Does
+// nothing if the previous and the current listener objects are the same.
+void TestEventListeners::SetDefaultResultPrinter(TestEventListener* listener) {
+ if (default_result_printer_ != listener) {
+ // It is an error to pass this method a listener that is already in the
+ // list.
+ delete Release(default_result_printer_);
+ default_result_printer_ = listener;
+ if (listener != NULL)
+ Append(listener);
+ }
+}
+
+// Sets the default_xml_generator attribute to the provided listener. The
+// listener is also added to the listener list and previous
+// default_xml_generator is removed from it and deleted. The listener can
+// also be NULL in which case it will not be added to the list. Does
+// nothing if the previous and the current listener objects are the same.
+void TestEventListeners::SetDefaultXmlGenerator(TestEventListener* listener) {
+ if (default_xml_generator_ != listener) {
+ // It is an error to pass this method a listener that is already in the
+ // list.
+ delete Release(default_xml_generator_);
+ default_xml_generator_ = listener;
+ if (listener != NULL)
+ Append(listener);
+ }
+}
+
+// Controls whether events will be forwarded by the repeater to the
+// listeners in the list.
+bool TestEventListeners::EventForwardingEnabled() const {
+ return repeater_->forwarding_enabled();
+}
+
+void TestEventListeners::SuppressEventForwarding() {
+ repeater_->set_forwarding_enabled(false);
+}
+
+// class UnitTest
+
+// Gets the singleton UnitTest object. The first time this method is
+// called, a UnitTest object is constructed and returned. Consecutive
+// calls will return the same object.
+//
+// We don't protect this under mutex_ as a user is not supposed to
+// call this before main() starts, from which point on the return
+// value will never change.
+UnitTest * UnitTest::GetInstance() {
+ // When compiled with MSVC 7.1 in optimized mode, destroying the
+ // UnitTest object upon exiting the program messes up the exit code,
+ // causing successful tests to appear failed. We have to use a
+ // different implementation in this case to bypass the compiler bug.
+ // This implementation makes the compiler happy, at the cost of
+ // leaking the UnitTest object.
+
+ // CodeGear C++Builder insists on a public destructor for the
+ // default implementation. Use this implementation to keep good OO
+ // design with private destructor.
+
+#if (_MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__)
+ static UnitTest* const instance = new UnitTest;
+ return instance;
+#else
+ static UnitTest instance;
+ return &instance;
+#endif // (_MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__)
+}
+
+// Gets the number of successful test cases.
+int UnitTest::successful_test_case_count() const {
+ return impl()->successful_test_case_count();
+}
+
+// Gets the number of failed test cases.
+int UnitTest::failed_test_case_count() const {
+ return impl()->failed_test_case_count();
+}
+
+// Gets the number of all test cases.
+int UnitTest::total_test_case_count() const {
+ return impl()->total_test_case_count();
+}
+
+// Gets the number of all test cases that contain at least one test
+// that should run.
+int UnitTest::test_case_to_run_count() const {
+ return impl()->test_case_to_run_count();
+}
+
+// Gets the number of successful tests.
+int UnitTest::successful_test_count() const {
+ return impl()->successful_test_count();
+}
+
+// Gets the number of failed tests.
+int UnitTest::failed_test_count() const { return impl()->failed_test_count(); }
+
+// Gets the number of disabled tests.
+int UnitTest::disabled_test_count() const {
+ return impl()->disabled_test_count();
+}
+
+// Gets the number of all tests.
+int UnitTest::total_test_count() const { return impl()->total_test_count(); }
+
+// Gets the number of tests that should run.
+int UnitTest::test_to_run_count() const { return impl()->test_to_run_count(); }
+
+// Gets the elapsed time, in milliseconds.
+internal::TimeInMillis UnitTest::elapsed_time() const {
+ return impl()->elapsed_time();
+}
+
+// Returns true iff the unit test passed (i.e. all test cases passed).
+bool UnitTest::Passed() const { return impl()->Passed(); }
+
+// Returns true iff the unit test failed (i.e. some test case failed
+// or something outside of all tests failed).
+bool UnitTest::Failed() const { return impl()->Failed(); }
+
+// Gets the i-th test case among all the test cases. i can range from 0 to
+// total_test_case_count() - 1. If i is not in that range, returns NULL.
+const TestCase* UnitTest::GetTestCase(int i) const {
+ return impl()->GetTestCase(i);
+}
+
+// Gets the i-th test case among all the test cases. i can range from 0 to
+// total_test_case_count() - 1. If i is not in that range, returns NULL.
+TestCase* UnitTest::GetMutableTestCase(int i) {
+ return impl()->GetMutableTestCase(i);
+}
+
+// Returns the list of event listeners that can be used to track events
+// inside Google Test.
+TestEventListeners& UnitTest::listeners() {
+ return *impl()->listeners();
+}
+
+// Registers and returns a global test environment. When a test
+// program is run, all global test environments will be set-up in the
+// order they were registered. After all tests in the program have
+// finished, all global test environments will be torn-down in the
+// *reverse* order they were registered.
+//
+// The UnitTest object takes ownership of the given environment.
+//
+// We don't protect this under mutex_, as we only support calling it
+// from the main thread.
+Environment* UnitTest::AddEnvironment(Environment* env) {
+ if (env == NULL) {
+ return NULL;
+ }
+
+ impl_->environments().push_back(env);
+ return env;
+}
+
+#if GTEST_HAS_EXCEPTIONS
+// A failed Google Test assertion will throw an exception of this type
+// when exceptions are enabled. We derive it from std::runtime_error,
+// which is for errors presumably detectable only at run time. Since
+// std::runtime_error inherits from std::exception, many testing
+// frameworks know how to extract and print the message inside it.
+class GoogleTestFailureException : public ::std::runtime_error {
+ public:
+ explicit GoogleTestFailureException(const TestPartResult& failure)
+ : ::std::runtime_error(PrintTestPartResultToString(failure).c_str()) {}
+};
+#endif
+
+// Adds a TestPartResult to the current TestResult object. All Google Test
+// assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc) eventually call
+// this to report their results. The user code should use the
+// assertion macros instead of calling this directly.
+// L < mutex_
+void UnitTest::AddTestPartResult(TestPartResult::Type result_type,
+ const char* file_name,
+ int line_number,
+ const internal::String& message,
+ const internal::String& os_stack_trace) {
+ Message msg;
+ msg << message;
+
+ internal::MutexLock lock(&mutex_);
+ if (impl_->gtest_trace_stack().size() > 0) {
+ msg << "\n" << GTEST_NAME_ << " trace:";
+
+ for (int i = static_cast<int>(impl_->gtest_trace_stack().size());
+ i > 0; --i) {
+ const internal::TraceInfo& trace = impl_->gtest_trace_stack()[i - 1];
+ msg << "\n" << internal::FormatFileLocation(trace.file, trace.line)
+ << " " << trace.message;
+ }
+ }
+
+ if (os_stack_trace.c_str() != NULL && !os_stack_trace.empty()) {
+ msg << internal::kStackTraceMarker << os_stack_trace;
+ }
+
+ const TestPartResult result =
+ TestPartResult(result_type, file_name, line_number,
+ msg.GetString().c_str());
+ impl_->GetTestPartResultReporterForCurrentThread()->
+ ReportTestPartResult(result);
+
+ if (result_type != TestPartResult::kSuccess) {
+ // gtest_break_on_failure takes precedence over
+ // gtest_throw_on_failure. This allows a user to set the latter
+ // in the code (perhaps in order to use Google Test assertions
+ // with another testing framework) and specify the former on the
+ // command line for debugging.
+ if (GTEST_FLAG(break_on_failure)) {
+#if GTEST_OS_WINDOWS
+ // Using DebugBreak on Windows allows gtest to still break into a debugger
+ // when a failure happens and both the --gtest_break_on_failure and
+ // the --gtest_catch_exceptions flags are specified.
+ DebugBreak();
+#else
+ *static_cast<int*>(NULL) = 1;
+#endif // GTEST_OS_WINDOWS
+ } else if (GTEST_FLAG(throw_on_failure)) {
+#if GTEST_HAS_EXCEPTIONS
+ throw GoogleTestFailureException(result);
+#else
+ // We cannot call abort() as it generates a pop-up in debug mode
+ // that cannot be suppressed in VC 7.1 or below.
+ exit(1);
+#endif
+ }
+ }
+}
+
+// Creates and adds a property to the current TestResult. If a property matching
+// the supplied value already exists, updates its value instead.
+void UnitTest::RecordPropertyForCurrentTest(const char* key,
+ const char* value) {
+ const TestProperty test_property(key, value);
+ impl_->current_test_result()->RecordProperty(test_property);
+}
+
+// Runs all tests in this UnitTest object and prints the result.
+// Returns 0 if successful, or 1 otherwise.
+//
+// We don't protect this under mutex_, as we only support calling it
+// from the main thread.
+int UnitTest::Run() {
+#if GTEST_HAS_SEH
+ // Catch SEH-style exceptions.
+
+ const bool in_death_test_child_process =
+ internal::GTEST_FLAG(internal_run_death_test).length() > 0;
+
+ // Either the user wants Google Test to catch exceptions thrown by the
+ // tests or this is executing in the context of death test child
+ // process. In either case the user does not want to see pop-up dialogs
+ // about crashes - they are expected..
+ if (GTEST_FLAG(catch_exceptions) || in_death_test_child_process) {
+#if !GTEST_OS_WINDOWS_MOBILE
+ // SetErrorMode doesn't exist on CE.
+ SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOALIGNMENTFAULTEXCEPT |
+ SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX);
+#endif // !GTEST_OS_WINDOWS_MOBILE
+
+#if (defined(_MSC_VER) || GTEST_OS_WINDOWS_MINGW) && !GTEST_OS_WINDOWS_MOBILE
+ // Death test children can be terminated with _abort(). On Windows,
+ // _abort() can show a dialog with a warning message. This forces the
+ // abort message to go to stderr instead.
+ _set_error_mode(_OUT_TO_STDERR);
+#endif
+
+#if _MSC_VER >= 1400 && !GTEST_OS_WINDOWS_MOBILE
+ // In the debug version, Visual Studio pops up a separate dialog
+ // offering a choice to debug the aborted program. We need to suppress
+ // this dialog or it will pop up for every EXPECT/ASSERT_DEATH statement
+ // executed. Google Test will notify the user of any unexpected
+ // failure via stderr.
+ //
+ // VC++ doesn't define _set_abort_behavior() prior to the version 8.0.
+ // Users of prior VC versions shall suffer the agony and pain of
+ // clicking through the countless debug dialogs.
+ // TODO(vladl@google.com): find a way to suppress the abort dialog() in the
+ // debug mode when compiled with VC 7.1 or lower.
+ if (!GTEST_FLAG(break_on_failure))
+ _set_abort_behavior(
+ 0x0, // Clear the following flags:
+ _WRITE_ABORT_MSG | _CALL_REPORTFAULT); // pop-up window, core dump.
+#endif
+ }
+
+ __try {
+ return impl_->RunAllTests();
+ } __except(internal::UnitTestOptions::GTestShouldProcessSEH(
+ GetExceptionCode())) {
+ printf("Exception thrown with code 0x%x.\nFAIL\n", GetExceptionCode());
+ fflush(stdout);
+ return 1;
+ }
+
+#else // We are on a compiler or platform that doesn't support SEH.
+
+ return impl_->RunAllTests();
+#endif // GTEST_HAS_SEH
+}
+
+// Returns the working directory when the first TEST() or TEST_F() was
+// executed.
+const char* UnitTest::original_working_dir() const {
+ return impl_->original_working_dir_.c_str();
+}
+
+// Returns the TestCase object for the test that's currently running,
+// or NULL if no test is running.
+// L < mutex_
+const TestCase* UnitTest::current_test_case() const {
+ internal::MutexLock lock(&mutex_);
+ return impl_->current_test_case();
+}
+
+// Returns the TestInfo object for the test that's currently running,
+// or NULL if no test is running.
+// L < mutex_
+const TestInfo* UnitTest::current_test_info() const {
+ internal::MutexLock lock(&mutex_);
+ return impl_->current_test_info();
+}
+
+// Returns the random seed used at the start of the current test run.
+int UnitTest::random_seed() const { return impl_->random_seed(); }
+
+#if GTEST_HAS_PARAM_TEST
+// Returns ParameterizedTestCaseRegistry object used to keep track of
+// value-parameterized tests and instantiate and register them.
+// L < mutex_
+internal::ParameterizedTestCaseRegistry&
+ UnitTest::parameterized_test_registry() {
+ return impl_->parameterized_test_registry();
+}
+#endif // GTEST_HAS_PARAM_TEST
+
+// Creates an empty UnitTest.
+UnitTest::UnitTest() {
+ impl_ = new internal::UnitTestImpl(this);
+}
+
+// Destructor of UnitTest.
+UnitTest::~UnitTest() {
+ delete impl_;
+}
+
+// Pushes a trace defined by SCOPED_TRACE() on to the per-thread
+// Google Test trace stack.
+// L < mutex_
+void UnitTest::PushGTestTrace(const internal::TraceInfo& trace) {
+ internal::MutexLock lock(&mutex_);
+ impl_->gtest_trace_stack().push_back(trace);
+}
+
+// Pops a trace from the per-thread Google Test trace stack.
+// L < mutex_
+void UnitTest::PopGTestTrace() {
+ internal::MutexLock lock(&mutex_);
+ impl_->gtest_trace_stack().pop_back();
+}
+
+namespace internal {
+
+UnitTestImpl::UnitTestImpl(UnitTest* parent)
+ : parent_(parent),
+#ifdef _MSC_VER
+#pragma warning(push) // Saves the current warning state.
+#pragma warning(disable:4355) // Temporarily disables warning 4355
+ // (using this in initializer).
+ default_global_test_part_result_reporter_(this),
+ default_per_thread_test_part_result_reporter_(this),
+#pragma warning(pop) // Restores the warning state again.
+#else
+ default_global_test_part_result_reporter_(this),
+ default_per_thread_test_part_result_reporter_(this),
+#endif // _MSC_VER
+ global_test_part_result_repoter_(
+ &default_global_test_part_result_reporter_),
+ per_thread_test_part_result_reporter_(
+ &default_per_thread_test_part_result_reporter_),
+#if GTEST_HAS_PARAM_TEST
+ parameterized_test_registry_(),
+ parameterized_tests_registered_(false),
+#endif // GTEST_HAS_PARAM_TEST
+ last_death_test_case_(-1),
+ current_test_case_(NULL),
+ current_test_info_(NULL),
+ ad_hoc_test_result_(),
+ os_stack_trace_getter_(NULL),
+ post_flag_parse_init_performed_(false),
+ random_seed_(0), // Will be overridden by the flag before first use.
+ random_(0), // Will be reseeded before first use.
+#if GTEST_HAS_DEATH_TEST
+ elapsed_time_(0),
+ internal_run_death_test_flag_(NULL),
+ death_test_factory_(new DefaultDeathTestFactory) {
+#else
+ elapsed_time_(0) {
+#endif // GTEST_HAS_DEATH_TEST
+ listeners()->SetDefaultResultPrinter(new PrettyUnitTestResultPrinter);
+}
+
+UnitTestImpl::~UnitTestImpl() {
+ // Deletes every TestCase.
+ ForEach(test_cases_, internal::Delete<TestCase>);
+
+ // Deletes every Environment.
+ ForEach(environments_, internal::Delete<Environment>);
+
+ delete os_stack_trace_getter_;
+}
+
+#if GTEST_HAS_DEATH_TEST
+// Disables event forwarding if the control is currently in a death test
+// subprocess. Must not be called before InitGoogleTest.
+void UnitTestImpl::SuppressTestEventsIfInSubprocess() {
+ if (internal_run_death_test_flag_.get() != NULL)
+ listeners()->SuppressEventForwarding();
+}
+#endif // GTEST_HAS_DEATH_TEST
+
+// Initializes event listeners performing XML output as specified by
+// UnitTestOptions. Must not be called before InitGoogleTest.
+void UnitTestImpl::ConfigureXmlOutput() {
+ const String& output_format = UnitTestOptions::GetOutputFormat();
+ if (output_format == "xml") {
+ listeners()->SetDefaultXmlGenerator(new XmlUnitTestResultPrinter(
+ UnitTestOptions::GetAbsolutePathToOutputFile().c_str()));
+ } else if (output_format != "") {
+ printf("WARNING: unrecognized output format \"%s\" ignored.\n",
+ output_format.c_str());
+ fflush(stdout);
+ }
+}
+
+// Performs initialization dependent upon flag values obtained in
+// ParseGoogleTestFlagsOnly. Is called from InitGoogleTest after the call to
+// ParseGoogleTestFlagsOnly. In case a user neglects to call InitGoogleTest
+// this function is also called from RunAllTests. Since this function can be
+// called more than once, it has to be idempotent.
+void UnitTestImpl::PostFlagParsingInit() {
+ // Ensures that this function does not execute more than once.
+ if (!post_flag_parse_init_performed_) {
+ post_flag_parse_init_performed_ = true;
+
+#if GTEST_HAS_DEATH_TEST
+ InitDeathTestSubprocessControlInfo();
+ SuppressTestEventsIfInSubprocess();
+#endif // GTEST_HAS_DEATH_TEST
+
+ // Registers parameterized tests. This makes parameterized tests
+ // available to the UnitTest reflection API without running
+ // RUN_ALL_TESTS.
+ RegisterParameterizedTests();
+
+ // Configures listeners for XML output. This makes it possible for users
+ // to shut down the default XML output before invoking RUN_ALL_TESTS.
+ ConfigureXmlOutput();
+ }
+}
+
+// A predicate that checks the name of a TestCase against a known
+// value.
+//
+// This is used for implementation of the UnitTest class only. We put
+// it in the anonymous namespace to prevent polluting the outer
+// namespace.
+//
+// TestCaseNameIs is copyable.
+class TestCaseNameIs {
+ public:
+ // Constructor.
+ explicit TestCaseNameIs(const String& name)
+ : name_(name) {}
+
+ // Returns true iff the name of test_case matches name_.
+ bool operator()(const TestCase* test_case) const {
+ return test_case != NULL && strcmp(test_case->name(), name_.c_str()) == 0;
+ }
+
+ private:
+ String name_;
+};
+
+// Finds and returns a TestCase with the given name. If one doesn't
+// exist, creates one and returns it. It's the CALLER'S
+// RESPONSIBILITY to ensure that this function is only called WHEN THE
+// TESTS ARE NOT SHUFFLED.
+//
+// Arguments:
+//
+// test_case_name: name of the test case
+// set_up_tc: pointer to the function that sets up the test case
+// tear_down_tc: pointer to the function that tears down the test case
+TestCase* UnitTestImpl::GetTestCase(const char* test_case_name,
+ const char* comment,
+ Test::SetUpTestCaseFunc set_up_tc,
+ Test::TearDownTestCaseFunc tear_down_tc) {
+ // Can we find a TestCase with the given name?
+ const std::vector<TestCase*>::const_iterator test_case =
+ std::find_if(test_cases_.begin(), test_cases_.end(),
+ TestCaseNameIs(test_case_name));
+
+ if (test_case != test_cases_.end())
+ return *test_case;
+
+ // No. Let's create one.
+ TestCase* const new_test_case =
+ new TestCase(test_case_name, comment, set_up_tc, tear_down_tc);
+
+ // Is this a death test case?
+ if (internal::UnitTestOptions::MatchesFilter(String(test_case_name),
+ kDeathTestCaseFilter)) {
+ // Yes. Inserts the test case after the last death test case
+ // defined so far. This only works when the test cases haven't
+ // been shuffled. Otherwise we may end up running a death test
+ // after a non-death test.
+ ++last_death_test_case_;
+ test_cases_.insert(test_cases_.begin() + last_death_test_case_,
+ new_test_case);
+ } else {
+ // No. Appends to the end of the list.
+ test_cases_.push_back(new_test_case);
+ }
+
+ test_case_indices_.push_back(static_cast<int>(test_case_indices_.size()));
+ return new_test_case;
+}
+
+// Helpers for setting up / tearing down the given environment. They
+// are for use in the ForEach() function.
+static void SetUpEnvironment(Environment* env) { env->SetUp(); }
+static void TearDownEnvironment(Environment* env) { env->TearDown(); }
+
+// Runs all tests in this UnitTest object, prints the result, and
+// returns 0 if all tests are successful, or 1 otherwise. If any
+// exception is thrown during a test on Windows, this test is
+// considered to be failed, but the rest of the tests will still be
+// run. (We disable exceptions on Linux and Mac OS X, so the issue
+// doesn't apply there.)
+// When parameterized tests are enabled, it expands and registers
+// parameterized tests first in RegisterParameterizedTests().
+// All other functions called from RunAllTests() may safely assume that
+// parameterized tests are ready to be counted and run.
+int UnitTestImpl::RunAllTests() {
+ // Makes sure InitGoogleTest() was called.
+ if (!GTestIsInitialized()) {
+ printf("%s",
+ "\nThis test program did NOT call ::testing::InitGoogleTest "
+ "before calling RUN_ALL_TESTS(). Please fix it.\n");
+ return 1;
+ }
+
+ // Do not run any test if the --help flag was specified.
+ if (g_help_flag)
+ return 0;
+
+ // Repeats the call to the post-flag parsing initialization in case the
+ // user didn't call InitGoogleTest.
+ PostFlagParsingInit();
+
+ // Even if sharding is not on, test runners may want to use the
+ // GTEST_SHARD_STATUS_FILE to query whether the test supports the sharding
+ // protocol.
+ internal::WriteToShardStatusFileIfNeeded();
+
+ // True iff we are in a subprocess for running a thread-safe-style
+ // death test.
+ bool in_subprocess_for_death_test = false;
+
+#if GTEST_HAS_DEATH_TEST
+ in_subprocess_for_death_test = (internal_run_death_test_flag_.get() != NULL);
+#endif // GTEST_HAS_DEATH_TEST
+
+ const bool should_shard = ShouldShard(kTestTotalShards, kTestShardIndex,
+ in_subprocess_for_death_test);
+
+ // Compares the full test names with the filter to decide which
+ // tests to run.
+ const bool has_tests_to_run = FilterTests(should_shard
+ ? HONOR_SHARDING_PROTOCOL
+ : IGNORE_SHARDING_PROTOCOL) > 0;
+
+ // Lists the tests and exits if the --gtest_list_tests flag was specified.
+ if (GTEST_FLAG(list_tests)) {
+ // This must be called *after* FilterTests() has been called.
+ ListTestsMatchingFilter();
+ return 0;
+ }
+
+ random_seed_ = GTEST_FLAG(shuffle) ?
+ GetRandomSeedFromFlag(GTEST_FLAG(random_seed)) : 0;
+
+ // True iff at least one test has failed.
+ bool failed = false;
+
+ TestEventListener* repeater = listeners()->repeater();
+
+ repeater->OnTestProgramStart(*parent_);
+
+ // How many times to repeat the tests? We don't want to repeat them
+ // when we are inside the subprocess of a death test.
+ const int repeat = in_subprocess_for_death_test ? 1 : GTEST_FLAG(repeat);
+ // Repeats forever if the repeat count is negative.
+ const bool forever = repeat < 0;
+ for (int i = 0; forever || i != repeat; i++) {
+ ClearResult();
+
+ const TimeInMillis start = GetTimeInMillis();
+
+ // Shuffles test cases and tests if requested.
+ if (has_tests_to_run && GTEST_FLAG(shuffle)) {
+ random()->Reseed(random_seed_);
+ // This should be done before calling OnTestIterationStart(),
+ // such that a test event listener can see the actual test order
+ // in the event.
+ ShuffleTests();
+ }
+
+ // Tells the unit test event listeners that the tests are about to start.
+ repeater->OnTestIterationStart(*parent_, i);
+
+ // Runs each test case if there is at least one test to run.
+ if (has_tests_to_run) {
+ // Sets up all environments beforehand.
+ repeater->OnEnvironmentsSetUpStart(*parent_);
+ ForEach(environments_, SetUpEnvironment);
+ repeater->OnEnvironmentsSetUpEnd(*parent_);
+
+ // Runs the tests only if there was no fatal failure during global
+ // set-up.
+ if (!Test::HasFatalFailure()) {
+ for (int test_index = 0; test_index < total_test_case_count();
+ test_index++) {
+ GetMutableTestCase(test_index)->Run();
+ }
+ }
+
+ // Tears down all environments in reverse order afterwards.
+ repeater->OnEnvironmentsTearDownStart(*parent_);
+ std::for_each(environments_.rbegin(), environments_.rend(),
+ TearDownEnvironment);
+ repeater->OnEnvironmentsTearDownEnd(*parent_);
+ }
+
+ elapsed_time_ = GetTimeInMillis() - start;
+
+ // Tells the unit test event listener that the tests have just finished.
+ repeater->OnTestIterationEnd(*parent_, i);
+
+ // Gets the result and clears it.
+ if (!Passed()) {
+ failed = true;
+ }
+
+ // Restores the original test order after the iteration. This
+ // allows the user to quickly repro a failure that happens in the
+ // N-th iteration without repeating the first (N - 1) iterations.
+ // This is not enclosed in "if (GTEST_FLAG(shuffle)) { ... }", in
+ // case the user somehow changes the value of the flag somewhere
+ // (it's always safe to unshuffle the tests).
+ UnshuffleTests();
+
+ if (GTEST_FLAG(shuffle)) {
+ // Picks a new random seed for each iteration.
+ random_seed_ = GetNextRandomSeed(random_seed_);
+ }
+ }
+
+ repeater->OnTestProgramEnd(*parent_);
+
+ // Returns 0 if all tests passed, or 1 other wise.
+ return failed ? 1 : 0;
+}
+
+// Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file
+// if the variable is present. If a file already exists at this location, this
+// function will write over it. If the variable is present, but the file cannot
+// be created, prints an error and exits.
+void WriteToShardStatusFileIfNeeded() {
+ const char* const test_shard_file = posix::GetEnv(kTestShardStatusFile);
+ if (test_shard_file != NULL) {
+ FILE* const file = posix::FOpen(test_shard_file, "w");
+ if (file == NULL) {
+ ColoredPrintf(COLOR_RED,
+ "Could not write to the test shard status file \"%s\" "
+ "specified by the %s environment variable.\n",
+ test_shard_file, kTestShardStatusFile);
+ fflush(stdout);
+ exit(EXIT_FAILURE);
+ }
+ fclose(file);
+ }
+}
+
+// Checks whether sharding is enabled by examining the relevant
+// environment variable values. If the variables are present,
+// but inconsistent (i.e., shard_index >= total_shards), prints
+// an error and exits. If in_subprocess_for_death_test, sharding is
+// disabled because it must only be applied to the original test
+// process. Otherwise, we could filter out death tests we intended to execute.
+bool ShouldShard(const char* total_shards_env,
+ const char* shard_index_env,
+ bool in_subprocess_for_death_test) {
+ if (in_subprocess_for_death_test) {
+ return false;
+ }
+
+ const Int32 total_shards = Int32FromEnvOrDie(total_shards_env, -1);
+ const Int32 shard_index = Int32FromEnvOrDie(shard_index_env, -1);
+
+ if (total_shards == -1 && shard_index == -1) {
+ return false;
+ } else if (total_shards == -1 && shard_index != -1) {
+ const Message msg = Message()
+ << "Invalid environment variables: you have "
+ << kTestShardIndex << " = " << shard_index
+ << ", but have left " << kTestTotalShards << " unset.\n";
+ ColoredPrintf(COLOR_RED, msg.GetString().c_str());
+ fflush(stdout);
+ exit(EXIT_FAILURE);
+ } else if (total_shards != -1 && shard_index == -1) {
+ const Message msg = Message()
+ << "Invalid environment variables: you have "
+ << kTestTotalShards << " = " << total_shards
+ << ", but have left " << kTestShardIndex << " unset.\n";
+ ColoredPrintf(COLOR_RED, msg.GetString().c_str());
+ fflush(stdout);
+ exit(EXIT_FAILURE);
+ } else if (shard_index < 0 || shard_index >= total_shards) {
+ const Message msg = Message()
+ << "Invalid environment variables: we require 0 <= "
+ << kTestShardIndex << " < " << kTestTotalShards
+ << ", but you have " << kTestShardIndex << "=" << shard_index
+ << ", " << kTestTotalShards << "=" << total_shards << ".\n";
+ ColoredPrintf(COLOR_RED, msg.GetString().c_str());
+ fflush(stdout);
+ exit(EXIT_FAILURE);
+ }
+
+ return total_shards > 1;
+}
+
+// Parses the environment variable var as an Int32. If it is unset,
+// returns default_val. If it is not an Int32, prints an error
+// and aborts.
+Int32 Int32FromEnvOrDie(const char* const var, Int32 default_val) {
+ const char* str_val = posix::GetEnv(var);
+ if (str_val == NULL) {
+ return default_val;
+ }
+
+ Int32 result;
+ if (!ParseInt32(Message() << "The value of environment variable " << var,
+ str_val, &result)) {
+ exit(EXIT_FAILURE);
+ }
+ return result;
+}
+
+// Given the total number of shards, the shard index, and the test id,
+// returns true iff the test should be run on this shard. The test id is
+// some arbitrary but unique non-negative integer assigned to each test
+// method. Assumes that 0 <= shard_index < total_shards.
+bool ShouldRunTestOnShard(int total_shards, int shard_index, int test_id) {
+ return (test_id % total_shards) == shard_index;
+}
+
+// Compares the name of each test with the user-specified filter to
+// decide whether the test should be run, then records the result in
+// each TestCase and TestInfo object.
+// If shard_tests == true, further filters tests based on sharding
+// variables in the environment - see
+// http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide.
+// Returns the number of tests that should run.
+int UnitTestImpl::FilterTests(ReactionToSharding shard_tests) {
+ const Int32 total_shards = shard_tests == HONOR_SHARDING_PROTOCOL ?
+ Int32FromEnvOrDie(kTestTotalShards, -1) : -1;
+ const Int32 shard_index = shard_tests == HONOR_SHARDING_PROTOCOL ?
+ Int32FromEnvOrDie(kTestShardIndex, -1) : -1;
+
+ // num_runnable_tests are the number of tests that will
+ // run across all shards (i.e., match filter and are not disabled).
+ // num_selected_tests are the number of tests to be run on
+ // this shard.
+ int num_runnable_tests = 0;
+ int num_selected_tests = 0;
+ for (size_t i = 0; i < test_cases_.size(); i++) {
+ TestCase* const test_case = test_cases_[i];
+ const String &test_case_name = test_case->name();
+ test_case->set_should_run(false);
+
+ for (size_t j = 0; j < test_case->test_info_list().size(); j++) {
+ TestInfo* const test_info = test_case->test_info_list()[j];
+ const String test_name(test_info->name());
+ // A test is disabled if test case name or test name matches
+ // kDisableTestFilter.
+ const bool is_disabled =
+ internal::UnitTestOptions::MatchesFilter(test_case_name,
+ kDisableTestFilter) ||
+ internal::UnitTestOptions::MatchesFilter(test_name,
+ kDisableTestFilter);
+ test_info->impl()->set_is_disabled(is_disabled);
+
+ const bool matches_filter =
+ internal::UnitTestOptions::FilterMatchesTest(test_case_name,
+ test_name);
+ test_info->impl()->set_matches_filter(matches_filter);
+
+ const bool is_runnable =
+ (GTEST_FLAG(also_run_disabled_tests) || !is_disabled) &&
+ matches_filter;
+
+ const bool is_selected = is_runnable &&
+ (shard_tests == IGNORE_SHARDING_PROTOCOL ||
+ ShouldRunTestOnShard(total_shards, shard_index,
+ num_runnable_tests));
+
+ num_runnable_tests += is_runnable;
+ num_selected_tests += is_selected;
+
+ test_info->impl()->set_should_run(is_selected);
+ test_case->set_should_run(test_case->should_run() || is_selected);
+ }
+ }
+ return num_selected_tests;
+}
+
+// Prints the names of the tests matching the user-specified filter flag.
+void UnitTestImpl::ListTestsMatchingFilter() {
+ for (size_t i = 0; i < test_cases_.size(); i++) {
+ const TestCase* const test_case = test_cases_[i];
+ bool printed_test_case_name = false;
+
+ for (size_t j = 0; j < test_case->test_info_list().size(); j++) {
+ const TestInfo* const test_info =
+ test_case->test_info_list()[j];
+ if (test_info->matches_filter()) {
+ if (!printed_test_case_name) {
+ printed_test_case_name = true;
+ printf("%s.\n", test_case->name());
+ }
+ printf(" %s\n", test_info->name());
+ }
+ }
+ }
+ fflush(stdout);
+}
+
+// Sets the OS stack trace getter.
+//
+// Does nothing if the input and the current OS stack trace getter are
+// the same; otherwise, deletes the old getter and makes the input the
+// current getter.
+void UnitTestImpl::set_os_stack_trace_getter(
+ OsStackTraceGetterInterface* getter) {
+ if (os_stack_trace_getter_ != getter) {
+ delete os_stack_trace_getter_;
+ os_stack_trace_getter_ = getter;
+ }
+}
+
+// Returns the current OS stack trace getter if it is not NULL;
+// otherwise, creates an OsStackTraceGetter, makes it the current
+// getter, and returns it.
+OsStackTraceGetterInterface* UnitTestImpl::os_stack_trace_getter() {
+ if (os_stack_trace_getter_ == NULL) {
+ os_stack_trace_getter_ = new OsStackTraceGetter;
+ }
+
+ return os_stack_trace_getter_;
+}
+
+// Returns the TestResult for the test that's currently running, or
+// the TestResult for the ad hoc test if no test is running.
+TestResult* UnitTestImpl::current_test_result() {
+ return current_test_info_ ?
+ current_test_info_->impl()->result() : &ad_hoc_test_result_;
+}
+
+// Shuffles all test cases, and the tests within each test case,
+// making sure that death tests are still run first.
+void UnitTestImpl::ShuffleTests() {
+ // Shuffles the death test cases.
+ ShuffleRange(random(), 0, last_death_test_case_ + 1, &test_case_indices_);
+
+ // Shuffles the non-death test cases.
+ ShuffleRange(random(), last_death_test_case_ + 1,
+ static_cast<int>(test_cases_.size()), &test_case_indices_);
+
+ // Shuffles the tests inside each test case.
+ for (size_t i = 0; i < test_cases_.size(); i++) {
+ test_cases_[i]->ShuffleTests(random());
+ }
+}
+
+// Restores the test cases and tests to their order before the first shuffle.
+void UnitTestImpl::UnshuffleTests() {
+ for (size_t i = 0; i < test_cases_.size(); i++) {
+ // Unshuffles the tests in each test case.
+ test_cases_[i]->UnshuffleTests();
+ // Resets the index of each test case.
+ test_case_indices_[i] = static_cast<int>(i);
+ }
+}
+
+// TestInfoImpl constructor. The new instance assumes ownership of the test
+// factory object.
+TestInfoImpl::TestInfoImpl(TestInfo* parent,
+ const char* a_test_case_name,
+ const char* a_name,
+ const char* a_test_case_comment,
+ const char* a_comment,
+ TypeId a_fixture_class_id,
+ internal::TestFactoryBase* factory) :
+ parent_(parent),
+ test_case_name_(String(a_test_case_name)),
+ name_(String(a_name)),
+ test_case_comment_(String(a_test_case_comment)),
+ comment_(String(a_comment)),
+ fixture_class_id_(a_fixture_class_id),
+ should_run_(false),
+ is_disabled_(false),
+ matches_filter_(false),
+ factory_(factory) {
+}
+
+// TestInfoImpl destructor.
+TestInfoImpl::~TestInfoImpl() {
+ delete factory_;
+}
+
+// Returns the current OS stack trace as a String.
+//
+// The maximum number of stack frames to be included is specified by
+// the gtest_stack_trace_depth flag. The skip_count parameter
+// specifies the number of top frames to be skipped, which doesn't
+// count against the number of frames to be included.
+//
+// For example, if Foo() calls Bar(), which in turn calls
+// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in
+// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't.
+String GetCurrentOsStackTraceExceptTop(UnitTest* /*unit_test*/,
+ int skip_count) {
+ // We pass skip_count + 1 to skip this wrapper function in addition
+ // to what the user really wants to skip.
+ return GetUnitTestImpl()->CurrentOsStackTraceExceptTop(skip_count + 1);
+}
+
+// Used by the GTEST_HIDE_UNREACHABLE_CODE_ macro to suppress unreachable
+// code warnings.
+namespace {
+class ClassUniqueToAlwaysTrue {};
+}
+
+bool IsTrue(bool condition) { return condition; }
+
+bool AlwaysTrue() {
+#if GTEST_HAS_EXCEPTIONS
+ // This condition is always false so AlwaysTrue() never actually throws,
+ // but it makes the compiler think that it may throw.
+ if (IsTrue(false))
+ throw ClassUniqueToAlwaysTrue();
+#endif // GTEST_HAS_EXCEPTIONS
+ return true;
+}
+
+// If *pstr starts with the given prefix, modifies *pstr to be right
+// past the prefix and returns true; otherwise leaves *pstr unchanged
+// and returns false. None of pstr, *pstr, and prefix can be NULL.
+bool SkipPrefix(const char* prefix, const char** pstr) {
+ const size_t prefix_len = strlen(prefix);
+ if (strncmp(*pstr, prefix, prefix_len) == 0) {
+ *pstr += prefix_len;
+ return true;
+ }
+ return false;
+}
+
+// Parses a string as a command line flag. The string should have
+// the format "--flag=value". When def_optional is true, the "=value"
+// part can be omitted.
+//
+// Returns the value of the flag, or NULL if the parsing failed.
+const char* ParseFlagValue(const char* str,
+ const char* flag,
+ bool def_optional) {
+ // str and flag must not be NULL.
+ if (str == NULL || flag == NULL) return NULL;
+
+ // The flag must start with "--" followed by GTEST_FLAG_PREFIX_.
+ const String flag_str = String::Format("--%s%s", GTEST_FLAG_PREFIX_, flag);
+ const size_t flag_len = flag_str.length();
+ if (strncmp(str, flag_str.c_str(), flag_len) != 0) return NULL;
+
+ // Skips the flag name.
+ const char* flag_end = str + flag_len;
+
+ // When def_optional is true, it's OK to not have a "=value" part.
+ if (def_optional && (flag_end[0] == '\0')) {
+ return flag_end;
+ }
+
+ // If def_optional is true and there are more characters after the
+ // flag name, or if def_optional is false, there must be a '=' after
+ // the flag name.
+ if (flag_end[0] != '=') return NULL;
+
+ // Returns the string after "=".
+ return flag_end + 1;
+}
+
+// Parses a string for a bool flag, in the form of either
+// "--flag=value" or "--flag".
+//
+// In the former case, the value is taken as true as long as it does
+// not start with '0', 'f', or 'F'.
+//
+// In the latter case, the value is taken as true.
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+bool ParseBoolFlag(const char* str, const char* flag, bool* value) {
+ // Gets the value of the flag as a string.
+ const char* const value_str = ParseFlagValue(str, flag, true);
+
+ // Aborts if the parsing failed.
+ if (value_str == NULL) return false;
+
+ // Converts the string value to a bool.
+ *value = !(*value_str == '0' || *value_str == 'f' || *value_str == 'F');
+ return true;
+}
+
+// Parses a string for an Int32 flag, in the form of
+// "--flag=value".
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+bool ParseInt32Flag(const char* str, const char* flag, Int32* value) {
+ // Gets the value of the flag as a string.
+ const char* const value_str = ParseFlagValue(str, flag, false);
+
+ // Aborts if the parsing failed.
+ if (value_str == NULL) return false;
+
+ // Sets *value to the value of the flag.
+ return ParseInt32(Message() << "The value of flag --" << flag,
+ value_str, value);
+}
+
+// Parses a string for a string flag, in the form of
+// "--flag=value".
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+bool ParseStringFlag(const char* str, const char* flag, String* value) {
+ // Gets the value of the flag as a string.
+ const char* const value_str = ParseFlagValue(str, flag, false);
+
+ // Aborts if the parsing failed.
+ if (value_str == NULL) return false;
+
+ // Sets *value to the value of the flag.
+ *value = value_str;
+ return true;
+}
+
+// Determines whether a string has a prefix that Google Test uses for its
+// flags, i.e., starts with GTEST_FLAG_PREFIX_ or GTEST_FLAG_PREFIX_DASH_.
+// If Google Test detects that a command line flag has its prefix but is not
+// recognized, it will print its help message. Flags starting with
+// GTEST_INTERNAL_PREFIX_ followed by "internal_" are considered Google Test
+// internal flags and do not trigger the help message.
+static bool HasGoogleTestFlagPrefix(const char* str) {
+ return (SkipPrefix("--", &str) ||
+ SkipPrefix("-", &str) ||
+ SkipPrefix("/", &str)) &&
+ !SkipPrefix(GTEST_FLAG_PREFIX_ "internal_", &str) &&
+ (SkipPrefix(GTEST_FLAG_PREFIX_, &str) ||
+ SkipPrefix(GTEST_FLAG_PREFIX_DASH_, &str));
+}
+
+// Prints a string containing code-encoded text. The following escape
+// sequences can be used in the string to control the text color:
+//
+// @@ prints a single '@' character.
+// @R changes the color to red.
+// @G changes the color to green.
+// @Y changes the color to yellow.
+// @D changes to the default terminal text color.
+//
+// TODO(wan@google.com): Write tests for this once we add stdout
+// capturing to Google Test.
+static void PrintColorEncoded(const char* str) {
+ GTestColor color = COLOR_DEFAULT; // The current color.
+
+ // Conceptually, we split the string into segments divided by escape
+ // sequences. Then we print one segment at a time. At the end of
+ // each iteration, the str pointer advances to the beginning of the
+ // next segment.
+ for (;;) {
+ const char* p = strchr(str, '@');
+ if (p == NULL) {
+ ColoredPrintf(color, "%s", str);
+ return;
+ }
+
+ ColoredPrintf(color, "%s", String(str, p - str).c_str());
+
+ const char ch = p[1];
+ str = p + 2;
+ if (ch == '@') {
+ ColoredPrintf(color, "@");
+ } else if (ch == 'D') {
+ color = COLOR_DEFAULT;
+ } else if (ch == 'R') {
+ color = COLOR_RED;
+ } else if (ch == 'G') {
+ color = COLOR_GREEN;
+ } else if (ch == 'Y') {
+ color = COLOR_YELLOW;
+ } else {
+ --str;
+ }
+ }
+}
+
+static const char kColorEncodedHelpMessage[] =
+"This program contains tests written using " GTEST_NAME_ ". You can use the\n"
+"following command line flags to control its behavior:\n"
+"\n"
+"Test Selection:\n"
+" @G--" GTEST_FLAG_PREFIX_ "list_tests@D\n"
+" List the names of all tests instead of running them. The name of\n"
+" TEST(Foo, Bar) is \"Foo.Bar\".\n"
+" @G--" GTEST_FLAG_PREFIX_ "filter=@YPOSTIVE_PATTERNS"
+ "[@G-@YNEGATIVE_PATTERNS]@D\n"
+" Run only the tests whose name matches one of the positive patterns but\n"
+" none of the negative patterns. '?' matches any single character; '*'\n"
+" matches any substring; ':' separates two patterns.\n"
+" @G--" GTEST_FLAG_PREFIX_ "also_run_disabled_tests@D\n"
+" Run all disabled tests too.\n"
+"\n"
+"Test Execution:\n"
+" @G--" GTEST_FLAG_PREFIX_ "repeat=@Y[COUNT]@D\n"
+" Run the tests repeatedly; use a negative count to repeat forever.\n"
+" @G--" GTEST_FLAG_PREFIX_ "shuffle@D\n"
+" Randomize tests' orders on every iteration.\n"
+" @G--" GTEST_FLAG_PREFIX_ "random_seed=@Y[NUMBER]@D\n"
+" Random number seed to use for shuffling test orders (between 1 and\n"
+" 99999, or 0 to use a seed based on the current time).\n"
+"\n"
+"Test Output:\n"
+" @G--" GTEST_FLAG_PREFIX_ "color=@Y(@Gyes@Y|@Gno@Y|@Gauto@Y)@D\n"
+" Enable/disable colored output. The default is @Gauto@D.\n"
+" -@G-" GTEST_FLAG_PREFIX_ "print_time=0@D\n"
+" Don't print the elapsed time of each test.\n"
+" @G--" GTEST_FLAG_PREFIX_ "output=xml@Y[@G:@YDIRECTORY_PATH@G"
+ GTEST_PATH_SEP_ "@Y|@G:@YFILE_PATH]@D\n"
+" Generate an XML report in the given directory or with the given file\n"
+" name. @YFILE_PATH@D defaults to @Gtest_details.xml@D.\n"
+"\n"
+"Assertion Behavior:\n"
+#if GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS
+" @G--" GTEST_FLAG_PREFIX_ "death_test_style=@Y(@Gfast@Y|@Gthreadsafe@Y)@D\n"
+" Set the default death test style.\n"
+#endif // GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS
+" @G--" GTEST_FLAG_PREFIX_ "break_on_failure@D\n"
+" Turn assertion failures into debugger break-points.\n"
+" @G--" GTEST_FLAG_PREFIX_ "throw_on_failure@D\n"
+" Turn assertion failures into C++ exceptions.\n"
+#if GTEST_OS_WINDOWS
+" @G--" GTEST_FLAG_PREFIX_ "catch_exceptions@D\n"
+" Suppress pop-ups caused by exceptions.\n"
+#endif // GTEST_OS_WINDOWS
+"\n"
+"Except for @G--" GTEST_FLAG_PREFIX_ "list_tests@D, you can alternatively set "
+ "the corresponding\n"
+"environment variable of a flag (all letters in upper-case). For example, to\n"
+"disable colored text output, you can either specify @G--" GTEST_FLAG_PREFIX_
+ "color=no@D or set\n"
+"the @G" GTEST_FLAG_PREFIX_UPPER_ "COLOR@D environment variable to @Gno@D.\n"
+"\n"
+"For more information, please read the " GTEST_NAME_ " documentation at\n"
+"@G" GTEST_PROJECT_URL_ "@D. If you find a bug in " GTEST_NAME_ "\n"
+"(not one in your own code or tests), please report it to\n"
+"@G<" GTEST_DEV_EMAIL_ ">@D.\n";
+
+// Parses the command line for Google Test flags, without initializing
+// other parts of Google Test. The type parameter CharType can be
+// instantiated to either char or wchar_t.
+template <typename CharType>
+void ParseGoogleTestFlagsOnlyImpl(int* argc, CharType** argv) {
+ for (int i = 1; i < *argc; i++) {
+ const String arg_string = StreamableToString(argv[i]);
+ const char* const arg = arg_string.c_str();
+
+ using internal::ParseBoolFlag;
+ using internal::ParseInt32Flag;
+ using internal::ParseStringFlag;
+
+ // Do we see a Google Test flag?
+ if (ParseBoolFlag(arg, kAlsoRunDisabledTestsFlag,
+ &GTEST_FLAG(also_run_disabled_tests)) ||
+ ParseBoolFlag(arg, kBreakOnFailureFlag,
+ &GTEST_FLAG(break_on_failure)) ||
+ ParseBoolFlag(arg, kCatchExceptionsFlag,
+ &GTEST_FLAG(catch_exceptions)) ||
+ ParseStringFlag(arg, kColorFlag, &GTEST_FLAG(color)) ||
+ ParseStringFlag(arg, kDeathTestStyleFlag,
+ &GTEST_FLAG(death_test_style)) ||
+ ParseBoolFlag(arg, kDeathTestUseFork,
+ &GTEST_FLAG(death_test_use_fork)) ||
+ ParseStringFlag(arg, kFilterFlag, &GTEST_FLAG(filter)) ||
+ ParseStringFlag(arg, kInternalRunDeathTestFlag,
+ &GTEST_FLAG(internal_run_death_test)) ||
+ ParseBoolFlag(arg, kListTestsFlag, &GTEST_FLAG(list_tests)) ||
+ ParseStringFlag(arg, kOutputFlag, &GTEST_FLAG(output)) ||
+ ParseBoolFlag(arg, kPrintTimeFlag, &GTEST_FLAG(print_time)) ||
+ ParseInt32Flag(arg, kRandomSeedFlag, &GTEST_FLAG(random_seed)) ||
+ ParseInt32Flag(arg, kRepeatFlag, &GTEST_FLAG(repeat)) ||
+ ParseBoolFlag(arg, kShuffleFlag, &GTEST_FLAG(shuffle)) ||
+ ParseInt32Flag(arg, kStackTraceDepthFlag,
+ &GTEST_FLAG(stack_trace_depth)) ||
+ ParseBoolFlag(arg, kThrowOnFailureFlag, &GTEST_FLAG(throw_on_failure))
+ ) {
+ // Yes. Shift the remainder of the argv list left by one. Note
+ // that argv has (*argc + 1) elements, the last one always being
+ // NULL. The following loop moves the trailing NULL element as
+ // well.
+ for (int j = i; j != *argc; j++) {
+ argv[j] = argv[j + 1];
+ }
+
+ // Decrements the argument count.
+ (*argc)--;
+
+ // We also need to decrement the iterator as we just removed
+ // an element.
+ i--;
+ } else if (arg_string == "--help" || arg_string == "-h" ||
+ arg_string == "-?" || arg_string == "/?" ||
+ HasGoogleTestFlagPrefix(arg)) {
+ // Both help flag and unrecognized Google Test flags (excluding
+ // internal ones) trigger help display.
+ g_help_flag = true;
+ }
+ }
+
+ if (g_help_flag) {
+ // We print the help here instead of in RUN_ALL_TESTS(), as the
+ // latter may not be called at all if the user is using Google
+ // Test with another testing framework.
+ PrintColorEncoded(kColorEncodedHelpMessage);
+ }
+}
+
+// Parses the command line for Google Test flags, without initializing
+// other parts of Google Test.
+void ParseGoogleTestFlagsOnly(int* argc, char** argv) {
+ ParseGoogleTestFlagsOnlyImpl(argc, argv);
+}
+void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv) {
+ ParseGoogleTestFlagsOnlyImpl(argc, argv);
+}
+
+// The internal implementation of InitGoogleTest().
+//
+// The type parameter CharType can be instantiated to either char or
+// wchar_t.
+template <typename CharType>
+void InitGoogleTestImpl(int* argc, CharType** argv) {
+ g_init_gtest_count++;
+
+ // We don't want to run the initialization code twice.
+ if (g_init_gtest_count != 1) return;
+
+ if (*argc <= 0) return;
+
+ internal::g_executable_path = internal::StreamableToString(argv[0]);
+
+#if GTEST_HAS_DEATH_TEST
+ g_argvs.clear();
+ for (int i = 0; i != *argc; i++) {
+ g_argvs.push_back(StreamableToString(argv[i]));
+ }
+#endif // GTEST_HAS_DEATH_TEST
+
+ ParseGoogleTestFlagsOnly(argc, argv);
+ GetUnitTestImpl()->PostFlagParsingInit();
+}
+
+} // namespace internal
+
+// Initializes Google Test. This must be called before calling
+// RUN_ALL_TESTS(). In particular, it parses a command line for the
+// flags that Google Test recognizes. Whenever a Google Test flag is
+// seen, it is removed from argv, and *argc is decremented.
+//
+// No value is returned. Instead, the Google Test flag variables are
+// updated.
+//
+// Calling the function for the second time has no user-visible effect.
+void InitGoogleTest(int* argc, char** argv) {
+ internal::InitGoogleTestImpl(argc, argv);
+}
+
+// This overloaded version can be used in Windows programs compiled in
+// UNICODE mode.
+void InitGoogleTest(int* argc, wchar_t** argv) {
+ internal::InitGoogleTestImpl(argc, argv);
+}
+
+} // namespace testing
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan), vladl@google.com (Vlad Losev)
+//
+// This file implements death tests.
+
+
+#if GTEST_HAS_DEATH_TEST
+
+#if GTEST_OS_MAC
+#include <crt_externs.h>
+#endif // GTEST_OS_MAC
+
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <stdarg.h>
+
+#if GTEST_OS_WINDOWS
+#include <windows.h>
+#else
+#include <sys/mman.h>
+#include <sys/wait.h>
+#endif // GTEST_OS_WINDOWS
+
+#endif // GTEST_HAS_DEATH_TEST
+
+
+// Indicates that this translation unit is part of Google Test's
+// implementation. It must come before gtest-internal-inl.h is
+// included, or there will be a compiler error. This trick is to
+// prevent a user from accidentally including gtest-internal-inl.h in
+// his code.
+#define GTEST_IMPLEMENTATION_ 1
+#undef GTEST_IMPLEMENTATION_
+
+namespace testing {
+
+// Constants.
+
+// The default death test style.
+static const char kDefaultDeathTestStyle[] = "fast";
+
+GTEST_DEFINE_string_(
+ death_test_style,
+ internal::StringFromGTestEnv("death_test_style", kDefaultDeathTestStyle),
+ "Indicates how to run a death test in a forked child process: "
+ "\"threadsafe\" (child process re-executes the test binary "
+ "from the beginning, running only the specific death test) or "
+ "\"fast\" (child process runs the death test immediately "
+ "after forking).");
+
+GTEST_DEFINE_bool_(
+ death_test_use_fork,
+ internal::BoolFromGTestEnv("death_test_use_fork", false),
+ "Instructs to use fork()/_exit() instead of clone() in death tests. "
+ "Ignored and always uses fork() on POSIX systems where clone() is not "
+ "implemented. Useful when running under valgrind or similar tools if "
+ "those do not support clone(). Valgrind 3.3.1 will just fail if "
+ "it sees an unsupported combination of clone() flags. "
+ "It is not recommended to use this flag w/o valgrind though it will "
+ "work in 99% of the cases. Once valgrind is fixed, this flag will "
+ "most likely be removed.");
+
+namespace internal {
+GTEST_DEFINE_string_(
+ internal_run_death_test, "",
+ "Indicates the file, line number, temporal index of "
+ "the single death test to run, and a file descriptor to "
+ "which a success code may be sent, all separated by "
+ "colons. This flag is specified if and only if the current "
+ "process is a sub-process launched for running a thread-safe "
+ "death test. FOR INTERNAL USE ONLY.");
+} // namespace internal
+
+#if GTEST_HAS_DEATH_TEST
+
+// ExitedWithCode constructor.
+ExitedWithCode::ExitedWithCode(int exit_code) : exit_code_(exit_code) {
+}
+
+// ExitedWithCode function-call operator.
+bool ExitedWithCode::operator()(int exit_status) const {
+#if GTEST_OS_WINDOWS
+ return exit_status == exit_code_;
+#else
+ return WIFEXITED(exit_status) && WEXITSTATUS(exit_status) == exit_code_;
+#endif // GTEST_OS_WINDOWS
+}
+
+#if !GTEST_OS_WINDOWS
+// KilledBySignal constructor.
+KilledBySignal::KilledBySignal(int signum) : signum_(signum) {
+}
+
+// KilledBySignal function-call operator.
+bool KilledBySignal::operator()(int exit_status) const {
+ return WIFSIGNALED(exit_status) && WTERMSIG(exit_status) == signum_;
+}
+#endif // !GTEST_OS_WINDOWS
+
+namespace internal {
+
+// Utilities needed for death tests.
+
+// Generates a textual description of a given exit code, in the format
+// specified by wait(2).
+static String ExitSummary(int exit_code) {
+ Message m;
+#if GTEST_OS_WINDOWS
+ m << "Exited with exit status " << exit_code;
+#else
+ if (WIFEXITED(exit_code)) {
+ m << "Exited with exit status " << WEXITSTATUS(exit_code);
+ } else if (WIFSIGNALED(exit_code)) {
+ m << "Terminated by signal " << WTERMSIG(exit_code);
+ }
+#ifdef WCOREDUMP
+ if (WCOREDUMP(exit_code)) {
+ m << " (core dumped)";
+ }
+#endif
+#endif // GTEST_OS_WINDOWS
+ return m.GetString();
+}
+
+// Returns true if exit_status describes a process that was terminated
+// by a signal, or exited normally with a nonzero exit code.
+bool ExitedUnsuccessfully(int exit_status) {
+ return !ExitedWithCode(0)(exit_status);
+}
+
+#if !GTEST_OS_WINDOWS
+// Generates a textual failure message when a death test finds more than
+// one thread running, or cannot determine the number of threads, prior
+// to executing the given statement. It is the responsibility of the
+// caller not to pass a thread_count of 1.
+static String DeathTestThreadWarning(size_t thread_count) {
+ Message msg;
+ msg << "Death tests use fork(), which is unsafe particularly"
+ << " in a threaded context. For this test, " << GTEST_NAME_ << " ";
+ if (thread_count == 0)
+ msg << "couldn't detect the number of threads.";
+ else
+ msg << "detected " << thread_count << " threads.";
+ return msg.GetString();
+}
+#endif // !GTEST_OS_WINDOWS
+
+// Flag characters for reporting a death test that did not die.
+static const char kDeathTestLived = 'L';
+static const char kDeathTestReturned = 'R';
+static const char kDeathTestInternalError = 'I';
+
+// An enumeration describing all of the possible ways that a death test
+// can conclude. DIED means that the process died while executing the
+// test code; LIVED means that process lived beyond the end of the test
+// code; and RETURNED means that the test statement attempted a "return,"
+// which is not allowed. IN_PROGRESS means the test has not yet
+// concluded.
+enum DeathTestOutcome { IN_PROGRESS, DIED, LIVED, RETURNED };
+
+// Routine for aborting the program which is safe to call from an
+// exec-style death test child process, in which case the error
+// message is propagated back to the parent process. Otherwise, the
+// message is simply printed to stderr. In either case, the program
+// then exits with status 1.
+void DeathTestAbort(const String& message) {
+ // On a POSIX system, this function may be called from a threadsafe-style
+ // death test child process, which operates on a very small stack. Use
+ // the heap for any additional non-minuscule memory requirements.
+ const InternalRunDeathTestFlag* const flag =
+ GetUnitTestImpl()->internal_run_death_test_flag();
+ if (flag != NULL) {
+ FILE* parent = posix::FDOpen(flag->write_fd(), "w");
+ fputc(kDeathTestInternalError, parent);
+ fprintf(parent, "%s", message.c_str());
+ fflush(parent);
+ _exit(1);
+ } else {
+ fprintf(stderr, "%s", message.c_str());
+ fflush(stderr);
+ abort();
+ }
+}
+
+// A replacement for CHECK that calls DeathTestAbort if the assertion
+// fails.
+#define GTEST_DEATH_TEST_CHECK_(expression) \
+ do { \
+ if (!::testing::internal::IsTrue(expression)) { \
+ DeathTestAbort(::testing::internal::String::Format( \
+ "CHECK failed: File %s, line %d: %s", \
+ __FILE__, __LINE__, #expression)); \
+ } \
+ } while (::testing::internal::AlwaysFalse())
+
+// This macro is similar to GTEST_DEATH_TEST_CHECK_, but it is meant for
+// evaluating any system call that fulfills two conditions: it must return
+// -1 on failure, and set errno to EINTR when it is interrupted and
+// should be tried again. The macro expands to a loop that repeatedly
+// evaluates the expression as long as it evaluates to -1 and sets
+// errno to EINTR. If the expression evaluates to -1 but errno is
+// something other than EINTR, DeathTestAbort is called.
+#define GTEST_DEATH_TEST_CHECK_SYSCALL_(expression) \
+ do { \
+ int gtest_retval; \
+ do { \
+ gtest_retval = (expression); \
+ } while (gtest_retval == -1 && errno == EINTR); \
+ if (gtest_retval == -1) { \
+ DeathTestAbort(::testing::internal::String::Format( \
+ "CHECK failed: File %s, line %d: %s != -1", \
+ __FILE__, __LINE__, #expression)); \
+ } \
+ } while (::testing::internal::AlwaysFalse())
+
+// Returns the message describing the last system error in errno.
+String GetLastErrnoDescription() {
+ return String(errno == 0 ? "" : posix::StrError(errno));
+}
+
+// This is called from a death test parent process to read a failure
+// message from the death test child process and log it with the FATAL
+// severity. On Windows, the message is read from a pipe handle. On other
+// platforms, it is read from a file descriptor.
+static void FailFromInternalError(int fd) {
+ Message error;
+ char buffer[256];
+ int num_read;
+
+ do {
+ while ((num_read = posix::Read(fd, buffer, 255)) > 0) {
+ buffer[num_read] = '\0';
+ error << buffer;
+ }
+ } while (num_read == -1 && errno == EINTR);
+
+ if (num_read == 0) {
+ GTEST_LOG_(FATAL) << error.GetString();
+ } else {
+ const int last_error = errno;
+ GTEST_LOG_(FATAL) << "Error while reading death test internal: "
+ << GetLastErrnoDescription() << " [" << last_error << "]";
+ }
+}
+
+// Death test constructor. Increments the running death test count
+// for the current test.
+DeathTest::DeathTest() {
+ TestInfo* const info = GetUnitTestImpl()->current_test_info();
+ if (info == NULL) {
+ DeathTestAbort("Cannot run a death test outside of a TEST or "
+ "TEST_F construct");
+ }
+}
+
+// Creates and returns a death test by dispatching to the current
+// death test factory.
+bool DeathTest::Create(const char* statement, const RE* regex,
+ const char* file, int line, DeathTest** test) {
+ return GetUnitTestImpl()->death_test_factory()->Create(
+ statement, regex, file, line, test);
+}
+
+const char* DeathTest::LastMessage() {
+ return last_death_test_message_.c_str();
+}
+
+void DeathTest::set_last_death_test_message(const String& message) {
+ last_death_test_message_ = message;
+}
+
+String DeathTest::last_death_test_message_;
+
+// Provides cross platform implementation for some death functionality.
+class DeathTestImpl : public DeathTest {
+ protected:
+ DeathTestImpl(const char* a_statement, const RE* a_regex)
+ : statement_(a_statement),
+ regex_(a_regex),
+ spawned_(false),
+ status_(-1),
+ outcome_(IN_PROGRESS),
+ read_fd_(-1),
+ write_fd_(-1) {}
+
+ // read_fd_ is expected to be closed and cleared by a derived class.
+ ~DeathTestImpl() { GTEST_DEATH_TEST_CHECK_(read_fd_ == -1); }
+
+ void Abort(AbortReason reason);
+ virtual bool Passed(bool status_ok);
+
+ const char* statement() const { return statement_; }
+ const RE* regex() const { return regex_; }
+ bool spawned() const { return spawned_; }
+ void set_spawned(bool is_spawned) { spawned_ = is_spawned; }
+ int status() const { return status_; }
+ void set_status(int a_status) { status_ = a_status; }
+ DeathTestOutcome outcome() const { return outcome_; }
+ void set_outcome(DeathTestOutcome an_outcome) { outcome_ = an_outcome; }
+ int read_fd() const { return read_fd_; }
+ void set_read_fd(int fd) { read_fd_ = fd; }
+ int write_fd() const { return write_fd_; }
+ void set_write_fd(int fd) { write_fd_ = fd; }
+
+ // Called in the parent process only. Reads the result code of the death
+ // test child process via a pipe, interprets it to set the outcome_
+ // member, and closes read_fd_. Outputs diagnostics and terminates in
+ // case of unexpected codes.
+ void ReadAndInterpretStatusByte();
+
+ private:
+ // The textual content of the code this object is testing. This class
+ // doesn't own this string and should not attempt to delete it.
+ const char* const statement_;
+ // The regular expression which test output must match. DeathTestImpl
+ // doesn't own this object and should not attempt to delete it.
+ const RE* const regex_;
+ // True if the death test child process has been successfully spawned.
+ bool spawned_;
+ // The exit status of the child process.
+ int status_;
+ // How the death test concluded.
+ DeathTestOutcome outcome_;
+ // Descriptor to the read end of the pipe to the child process. It is
+ // always -1 in the child process. The child keeps its write end of the
+ // pipe in write_fd_.
+ int read_fd_;
+ // Descriptor to the child's write end of the pipe to the parent process.
+ // It is always -1 in the parent process. The parent keeps its end of the
+ // pipe in read_fd_.
+ int write_fd_;
+};
+
+// Called in the parent process only. Reads the result code of the death
+// test child process via a pipe, interprets it to set the outcome_
+// member, and closes read_fd_. Outputs diagnostics and terminates in
+// case of unexpected codes.
+void DeathTestImpl::ReadAndInterpretStatusByte() {
+ char flag;
+ int bytes_read;
+
+ // The read() here blocks until data is available (signifying the
+ // failure of the death test) or until the pipe is closed (signifying
+ // its success), so it's okay to call this in the parent before
+ // the child process has exited.
+ do {
+ bytes_read = posix::Read(read_fd(), &flag, 1);
+ } while (bytes_read == -1 && errno == EINTR);
+
+ if (bytes_read == 0) {
+ set_outcome(DIED);
+ } else if (bytes_read == 1) {
+ switch (flag) {
+ case kDeathTestReturned:
+ set_outcome(RETURNED);
+ break;
+ case kDeathTestLived:
+ set_outcome(LIVED);
+ break;
+ case kDeathTestInternalError:
+ FailFromInternalError(read_fd()); // Does not return.
+ break;
+ default:
+ GTEST_LOG_(FATAL) << "Death test child process reported "
+ << "unexpected status byte ("
+ << static_cast<unsigned int>(flag) << ")";
+ }
+ } else {
+ GTEST_LOG_(FATAL) << "Read from death test child process failed: "
+ << GetLastErrnoDescription();
+ }
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Close(read_fd()));
+ set_read_fd(-1);
+}
+
+// Signals that the death test code which should have exited, didn't.
+// Should be called only in a death test child process.
+// Writes a status byte to the child's status file descriptor, then
+// calls _exit(1).
+void DeathTestImpl::Abort(AbortReason reason) {
+ // The parent process considers the death test to be a failure if
+ // it finds any data in our pipe. So, here we write a single flag byte
+ // to the pipe, then exit.
+ const char status_ch =
+ reason == TEST_DID_NOT_DIE ? kDeathTestLived : kDeathTestReturned;
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Write(write_fd(), &status_ch, 1));
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Close(write_fd()));
+ _exit(1); // Exits w/o any normal exit hooks (we were supposed to crash)
+}
+
+// Assesses the success or failure of a death test, using both private
+// members which have previously been set, and one argument:
+//
+// Private data members:
+// outcome: An enumeration describing how the death test
+// concluded: DIED, LIVED, or RETURNED. The death test fails
+// in the latter two cases.
+// status: The exit status of the child process. On *nix, it is in the
+// in the format specified by wait(2). On Windows, this is the
+// value supplied to the ExitProcess() API or a numeric code
+// of the exception that terminated the program.
+// regex: A regular expression object to be applied to
+// the test's captured standard error output; the death test
+// fails if it does not match.
+//
+// Argument:
+// status_ok: true if exit_status is acceptable in the context of
+// this particular death test, which fails if it is false
+//
+// Returns true iff all of the above conditions are met. Otherwise, the
+// first failing condition, in the order given above, is the one that is
+// reported. Also sets the last death test message string.
+bool DeathTestImpl::Passed(bool status_ok) {
+ if (!spawned())
+ return false;
+
+ const String error_message = GetCapturedStderr();
+
+ bool success = false;
+ Message buffer;
+
+ buffer << "Death test: " << statement() << "\n";
+ switch (outcome()) {
+ case LIVED:
+ buffer << " Result: failed to die.\n"
+ << " Error msg: " << error_message;
+ break;
+ case RETURNED:
+ buffer << " Result: illegal return in test statement.\n"
+ << " Error msg: " << error_message;
+ break;
+ case DIED:
+ if (status_ok) {
+ const bool matched = RE::PartialMatch(error_message.c_str(), *regex());
+ if (matched) {
+ success = true;
+ } else {
+ buffer << " Result: died but not with expected error.\n"
+ << " Expected: " << regex()->pattern() << "\n"
+ << "Actual msg: " << error_message;
+ }
+ } else {
+ buffer << " Result: died but not with expected exit code:\n"
+ << " " << ExitSummary(status()) << "\n";
+ }
+ break;
+ case IN_PROGRESS:
+ default:
+ GTEST_LOG_(FATAL)
+ << "DeathTest::Passed somehow called before conclusion of test";
+ }
+
+ DeathTest::set_last_death_test_message(buffer.GetString());
+ return success;
+}
+
+#if GTEST_OS_WINDOWS
+// WindowsDeathTest implements death tests on Windows. Due to the
+// specifics of starting new processes on Windows, death tests there are
+// always threadsafe, and Google Test considers the
+// --gtest_death_test_style=fast setting to be equivalent to
+// --gtest_death_test_style=threadsafe there.
+//
+// A few implementation notes: Like the Linux version, the Windows
+// implementation uses pipes for child-to-parent communication. But due to
+// the specifics of pipes on Windows, some extra steps are required:
+//
+// 1. The parent creates a communication pipe and stores handles to both
+// ends of it.
+// 2. The parent starts the child and provides it with the information
+// necessary to acquire the handle to the write end of the pipe.
+// 3. The child acquires the write end of the pipe and signals the parent
+// using a Windows event.
+// 4. Now the parent can release the write end of the pipe on its side. If
+// this is done before step 3, the object's reference count goes down to
+// 0 and it is destroyed, preventing the child from acquiring it. The
+// parent now has to release it, or read operations on the read end of
+// the pipe will not return when the child terminates.
+// 5. The parent reads child's output through the pipe (outcome code and
+// any possible error messages) from the pipe, and its stderr and then
+// determines whether to fail the test.
+//
+// Note: to distinguish Win32 API calls from the local method and function
+// calls, the former are explicitly resolved in the global namespace.
+//
+class WindowsDeathTest : public DeathTestImpl {
+ public:
+ WindowsDeathTest(const char* statement,
+ const RE* regex,
+ const char* file,
+ int line)
+ : DeathTestImpl(statement, regex), file_(file), line_(line) {}
+
+ // All of these virtual functions are inherited from DeathTest.
+ virtual int Wait();
+ virtual TestRole AssumeRole();
+
+ private:
+ // The name of the file in which the death test is located.
+ const char* const file_;
+ // The line number on which the death test is located.
+ const int line_;
+ // Handle to the write end of the pipe to the child process.
+ AutoHandle write_handle_;
+ // Child process handle.
+ AutoHandle child_handle_;
+ // Event the child process uses to signal the parent that it has
+ // acquired the handle to the write end of the pipe. After seeing this
+ // event the parent can release its own handles to make sure its
+ // ReadFile() calls return when the child terminates.
+ AutoHandle event_handle_;
+};
+
+// Waits for the child in a death test to exit, returning its exit
+// status, or 0 if no child process exists. As a side effect, sets the
+// outcome data member.
+int WindowsDeathTest::Wait() {
+ if (!spawned())
+ return 0;
+
+ // Wait until the child either signals that it has acquired the write end
+ // of the pipe or it dies.
+ const HANDLE wait_handles[2] = { child_handle_.Get(), event_handle_.Get() };
+ switch (::WaitForMultipleObjects(2,
+ wait_handles,
+ FALSE, // Waits for any of the handles.
+ INFINITE)) {
+ case WAIT_OBJECT_0:
+ case WAIT_OBJECT_0 + 1:
+ break;
+ default:
+ GTEST_DEATH_TEST_CHECK_(false); // Should not get here.
+ }
+
+ // The child has acquired the write end of the pipe or exited.
+ // We release the handle on our side and continue.
+ write_handle_.Reset();
+ event_handle_.Reset();
+
+ ReadAndInterpretStatusByte();
+
+ // Waits for the child process to exit if it haven't already. This
+ // returns immediately if the child has already exited, regardless of
+ // whether previous calls to WaitForMultipleObjects synchronized on this
+ // handle or not.
+ GTEST_DEATH_TEST_CHECK_(
+ WAIT_OBJECT_0 == ::WaitForSingleObject(child_handle_.Get(),
+ INFINITE));
+ DWORD status;
+ GTEST_DEATH_TEST_CHECK_(::GetExitCodeProcess(child_handle_.Get(), &status)
+ != FALSE);
+ child_handle_.Reset();
+ set_status(static_cast<int>(status));
+ return this->status();
+}
+
+// The AssumeRole process for a Windows death test. It creates a child
+// process with the same executable as the current process to run the
+// death test. The child process is given the --gtest_filter and
+// --gtest_internal_run_death_test flags such that it knows to run the
+// current death test only.
+DeathTest::TestRole WindowsDeathTest::AssumeRole() {
+ const UnitTestImpl* const impl = GetUnitTestImpl();
+ const InternalRunDeathTestFlag* const flag =
+ impl->internal_run_death_test_flag();
+ const TestInfo* const info = impl->current_test_info();
+ const int death_test_index = info->result()->death_test_count();
+
+ if (flag != NULL) {
+ // ParseInternalRunDeathTestFlag() has performed all the necessary
+ // processing.
+ set_write_fd(flag->write_fd());
+ return EXECUTE_TEST;
+ }
+
+ // WindowsDeathTest uses an anonymous pipe to communicate results of
+ // a death test.
+ SECURITY_ATTRIBUTES handles_are_inheritable = {
+ sizeof(SECURITY_ATTRIBUTES), NULL, TRUE };
+ HANDLE read_handle, write_handle;
+ GTEST_DEATH_TEST_CHECK_(
+ ::CreatePipe(&read_handle, &write_handle, &handles_are_inheritable,
+ 0) // Default buffer size.
+ != FALSE);
+ set_read_fd(::_open_osfhandle(reinterpret_cast<intptr_t>(read_handle),
+ O_RDONLY));
+ write_handle_.Reset(write_handle);
+ event_handle_.Reset(::CreateEvent(
+ &handles_are_inheritable,
+ TRUE, // The event will automatically reset to non-signaled state.
+ FALSE, // The initial state is non-signalled.
+ NULL)); // The even is unnamed.
+ GTEST_DEATH_TEST_CHECK_(event_handle_.Get() != NULL);
+ const String filter_flag = String::Format("--%s%s=%s.%s",
+ GTEST_FLAG_PREFIX_, kFilterFlag,
+ info->test_case_name(),
+ info->name());
+ const String internal_flag = String::Format(
+ "--%s%s=%s|%d|%d|%u|%Iu|%Iu",
+ GTEST_FLAG_PREFIX_,
+ kInternalRunDeathTestFlag,
+ file_, line_,
+ death_test_index,
+ static_cast<unsigned int>(::GetCurrentProcessId()),
+ // size_t has the same with as pointers on both 32-bit and 64-bit
+ // Windows platforms.
+ // See http://msdn.microsoft.com/en-us/library/tcxf1dw6.aspx.
+ reinterpret_cast<size_t>(write_handle),
+ reinterpret_cast<size_t>(event_handle_.Get()));
+
+ char executable_path[_MAX_PATH + 1]; // NOLINT
+ GTEST_DEATH_TEST_CHECK_(
+ _MAX_PATH + 1 != ::GetModuleFileNameA(NULL,
+ executable_path,
+ _MAX_PATH));
+
+ String command_line = String::Format("%s %s \"%s\"",
+ ::GetCommandLineA(),
+ filter_flag.c_str(),
+ internal_flag.c_str());
+
+ DeathTest::set_last_death_test_message("");
+
+ CaptureStderr();
+ // Flush the log buffers since the log streams are shared with the child.
+ FlushInfoLog();
+
+ // The child process will share the standard handles with the parent.
+ STARTUPINFOA startup_info;
+ memset(&startup_info, 0, sizeof(STARTUPINFO));
+ startup_info.dwFlags = STARTF_USESTDHANDLES;
+ startup_info.hStdInput = ::GetStdHandle(STD_INPUT_HANDLE);
+ startup_info.hStdOutput = ::GetStdHandle(STD_OUTPUT_HANDLE);
+ startup_info.hStdError = ::GetStdHandle(STD_ERROR_HANDLE);
+
+ PROCESS_INFORMATION process_info;
+ GTEST_DEATH_TEST_CHECK_(::CreateProcessA(
+ executable_path,
+ const_cast<char*>(command_line.c_str()),
+ NULL, // Retuned process handle is not inheritable.
+ NULL, // Retuned thread handle is not inheritable.
+ TRUE, // Child inherits all inheritable handles (for write_handle_).
+ 0x0, // Default creation flags.
+ NULL, // Inherit the parent's environment.
+ UnitTest::GetInstance()->original_working_dir(),
+ &startup_info,
+ &process_info) != FALSE);
+ child_handle_.Reset(process_info.hProcess);
+ ::CloseHandle(process_info.hThread);
+ set_spawned(true);
+ return OVERSEE_TEST;
+}
+#else // We are not on Windows.
+
+// ForkingDeathTest provides implementations for most of the abstract
+// methods of the DeathTest interface. Only the AssumeRole method is
+// left undefined.
+class ForkingDeathTest : public DeathTestImpl {
+ public:
+ ForkingDeathTest(const char* statement, const RE* regex);
+
+ // All of these virtual functions are inherited from DeathTest.
+ virtual int Wait();
+
+ protected:
+ void set_child_pid(pid_t child_pid) { child_pid_ = child_pid; }
+
+ private:
+ // PID of child process during death test; 0 in the child process itself.
+ pid_t child_pid_;
+};
+
+// Constructs a ForkingDeathTest.
+ForkingDeathTest::ForkingDeathTest(const char* a_statement, const RE* a_regex)
+ : DeathTestImpl(a_statement, a_regex),
+ child_pid_(-1) {}
+
+// Waits for the child in a death test to exit, returning its exit
+// status, or 0 if no child process exists. As a side effect, sets the
+// outcome data member.
+int ForkingDeathTest::Wait() {
+ if (!spawned())
+ return 0;
+
+ ReadAndInterpretStatusByte();
+
+ int status_value;
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(waitpid(child_pid_, &status_value, 0));
+ set_status(status_value);
+ return status_value;
+}
+
+// A concrete death test class that forks, then immediately runs the test
+// in the child process.
+class NoExecDeathTest : public ForkingDeathTest {
+ public:
+ NoExecDeathTest(const char* a_statement, const RE* a_regex) :
+ ForkingDeathTest(a_statement, a_regex) { }
+ virtual TestRole AssumeRole();
+};
+
+// The AssumeRole process for a fork-and-run death test. It implements a
+// straightforward fork, with a simple pipe to transmit the status byte.
+DeathTest::TestRole NoExecDeathTest::AssumeRole() {
+ const size_t thread_count = GetThreadCount();
+ if (thread_count != 1) {
+ GTEST_LOG_(WARNING) << DeathTestThreadWarning(thread_count);
+ }
+
+ int pipe_fd[2];
+ GTEST_DEATH_TEST_CHECK_(pipe(pipe_fd) != -1);
+
+ DeathTest::set_last_death_test_message("");
+ CaptureStderr();
+ // When we fork the process below, the log file buffers are copied, but the
+ // file descriptors are shared. We flush all log files here so that closing
+ // the file descriptors in the child process doesn't throw off the
+ // synchronization between descriptors and buffers in the parent process.
+ // This is as close to the fork as possible to avoid a race condition in case
+ // there are multiple threads running before the death test, and another
+ // thread writes to the log file.
+ FlushInfoLog();
+
+ const pid_t child_pid = fork();
+ GTEST_DEATH_TEST_CHECK_(child_pid != -1);
+ set_child_pid(child_pid);
+ if (child_pid == 0) {
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[0]));
+ set_write_fd(pipe_fd[1]);
+ // Redirects all logging to stderr in the child process to prevent
+ // concurrent writes to the log files. We capture stderr in the parent
+ // process and append the child process' output to a log.
+ LogToStderr();
+ // Event forwarding to the listeners of event listener API mush be shut
+ // down in death test subprocesses.
+ GetUnitTestImpl()->listeners()->SuppressEventForwarding();
+ return EXECUTE_TEST;
+ } else {
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1]));
+ set_read_fd(pipe_fd[0]);
+ set_spawned(true);
+ return OVERSEE_TEST;
+ }
+}
+
+// A concrete death test class that forks and re-executes the main
+// program from the beginning, with command-line flags set that cause
+// only this specific death test to be run.
+class ExecDeathTest : public ForkingDeathTest {
+ public:
+ ExecDeathTest(const char* a_statement, const RE* a_regex,
+ const char* file, int line) :
+ ForkingDeathTest(a_statement, a_regex), file_(file), line_(line) { }
+ virtual TestRole AssumeRole();
+ private:
+ // The name of the file in which the death test is located.
+ const char* const file_;
+ // The line number on which the death test is located.
+ const int line_;
+};
+
+// Utility class for accumulating command-line arguments.
+class Arguments {
+ public:
+ Arguments() {
+ args_.push_back(NULL);
+ }
+
+ ~Arguments() {
+ for (std::vector<char*>::iterator i = args_.begin(); i != args_.end();
+ ++i) {
+ free(*i);
+ }
+ }
+ void AddArgument(const char* argument) {
+ args_.insert(args_.end() - 1, posix::StrDup(argument));
+ }
+
+ template <typename Str>
+ void AddArguments(const ::std::vector<Str>& arguments) {
+ for (typename ::std::vector<Str>::const_iterator i = arguments.begin();
+ i != arguments.end();
+ ++i) {
+ args_.insert(args_.end() - 1, posix::StrDup(i->c_str()));
+ }
+ }
+ char* const* Argv() {
+ return &args_[0];
+ }
+ private:
+ std::vector<char*> args_;
+};
+
+// A struct that encompasses the arguments to the child process of a
+// threadsafe-style death test process.
+struct ExecDeathTestArgs {
+ char* const* argv; // Command-line arguments for the child's call to exec
+ int close_fd; // File descriptor to close; the read end of a pipe
+};
+
+#if GTEST_OS_MAC
+inline char** GetEnviron() {
+ // When Google Test is built as a framework on MacOS X, the environ variable
+ // is unavailable. Apple's documentation (man environ) recommends using
+ // _NSGetEnviron() instead.
+ return *_NSGetEnviron();
+}
+#else
+// Some POSIX platforms expect you to declare environ. extern "C" makes
+// it reside in the global namespace.
+extern "C" char** environ;
+inline char** GetEnviron() { return environ; }
+#endif // GTEST_OS_MAC
+
+// The main function for a threadsafe-style death test child process.
+// This function is called in a clone()-ed process and thus must avoid
+// any potentially unsafe operations like malloc or libc functions.
+static int ExecDeathTestChildMain(void* child_arg) {
+ ExecDeathTestArgs* const args = static_cast<ExecDeathTestArgs*>(child_arg);
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(close(args->close_fd));
+
+ // We need to execute the test program in the same environment where
+ // it was originally invoked. Therefore we change to the original
+ // working directory first.
+ const char* const original_dir =
+ UnitTest::GetInstance()->original_working_dir();
+ // We can safely call chdir() as it's a direct system call.
+ if (chdir(original_dir) != 0) {
+ DeathTestAbort(String::Format("chdir(\"%s\") failed: %s",
+ original_dir,
+ GetLastErrnoDescription().c_str()));
+ return EXIT_FAILURE;
+ }
+
+ // We can safely call execve() as it's a direct system call. We
+ // cannot use execvp() as it's a libc function and thus potentially
+ // unsafe. Since execve() doesn't search the PATH, the user must
+ // invoke the test program via a valid path that contains at least
+ // one path separator.
+ execve(args->argv[0], args->argv, GetEnviron());
+ DeathTestAbort(String::Format("execve(%s, ...) in %s failed: %s",
+ args->argv[0],
+ original_dir,
+ GetLastErrnoDescription().c_str()));
+ return EXIT_FAILURE;
+}
+
+// Two utility routines that together determine the direction the stack
+// grows.
+// This could be accomplished more elegantly by a single recursive
+// function, but we want to guard against the unlikely possibility of
+// a smart compiler optimizing the recursion away.
+bool StackLowerThanAddress(const void* ptr) {
+ int dummy;
+ return &dummy < ptr;
+}
+
+bool StackGrowsDown() {
+ int dummy;
+ return StackLowerThanAddress(&dummy);
+}
+
+// A threadsafe implementation of fork(2) for threadsafe-style death tests
+// that uses clone(2). It dies with an error message if anything goes
+// wrong.
+static pid_t ExecDeathTestFork(char* const* argv, int close_fd) {
+ ExecDeathTestArgs args = { argv, close_fd };
+ pid_t child_pid = -1;
+
+#if GTEST_HAS_CLONE
+ const bool use_fork = GTEST_FLAG(death_test_use_fork);
+
+ if (!use_fork) {
+ static const bool stack_grows_down = StackGrowsDown();
+ const size_t stack_size = getpagesize();
+ // MMAP_ANONYMOUS is not defined on Mac, so we use MAP_ANON instead.
+ void* const stack = mmap(NULL, stack_size, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE, -1, 0);
+ GTEST_DEATH_TEST_CHECK_(stack != MAP_FAILED);
+ void* const stack_top =
+ static_cast<char*>(stack) + (stack_grows_down ? stack_size : 0);
+
+ child_pid = clone(&ExecDeathTestChildMain, stack_top, SIGCHLD, &args);
+
+ GTEST_DEATH_TEST_CHECK_(munmap(stack, stack_size) != -1);
+ }
+#else
+ const bool use_fork = true;
+#endif // GTEST_HAS_CLONE
+
+ if (use_fork && (child_pid = fork()) == 0) {
+ ExecDeathTestChildMain(&args);
+ _exit(0);
+ }
+
+ GTEST_DEATH_TEST_CHECK_(child_pid != -1);
+ return child_pid;
+}
+
+// The AssumeRole process for a fork-and-exec death test. It re-executes the
+// main program from the beginning, setting the --gtest_filter
+// and --gtest_internal_run_death_test flags to cause only the current
+// death test to be re-run.
+DeathTest::TestRole ExecDeathTest::AssumeRole() {
+ const UnitTestImpl* const impl = GetUnitTestImpl();
+ const InternalRunDeathTestFlag* const flag =
+ impl->internal_run_death_test_flag();
+ const TestInfo* const info = impl->current_test_info();
+ const int death_test_index = info->result()->death_test_count();
+
+ if (flag != NULL) {
+ set_write_fd(flag->write_fd());
+ return EXECUTE_TEST;
+ }
+
+ int pipe_fd[2];
+ GTEST_DEATH_TEST_CHECK_(pipe(pipe_fd) != -1);
+ // Clear the close-on-exec flag on the write end of the pipe, lest
+ // it be closed when the child process does an exec:
+ GTEST_DEATH_TEST_CHECK_(fcntl(pipe_fd[1], F_SETFD, 0) != -1);
+
+ const String filter_flag =
+ String::Format("--%s%s=%s.%s",
+ GTEST_FLAG_PREFIX_, kFilterFlag,
+ info->test_case_name(), info->name());
+ const String internal_flag =
+ String::Format("--%s%s=%s|%d|%d|%d",
+ GTEST_FLAG_PREFIX_, kInternalRunDeathTestFlag,
+ file_, line_, death_test_index, pipe_fd[1]);
+ Arguments args;
+ args.AddArguments(GetArgvs());
+ args.AddArgument(filter_flag.c_str());
+ args.AddArgument(internal_flag.c_str());
+
+ DeathTest::set_last_death_test_message("");
+
+ CaptureStderr();
+ // See the comment in NoExecDeathTest::AssumeRole for why the next line
+ // is necessary.
+ FlushInfoLog();
+
+ const pid_t child_pid = ExecDeathTestFork(args.Argv(), pipe_fd[0]);
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1]));
+ set_child_pid(child_pid);
+ set_read_fd(pipe_fd[0]);
+ set_spawned(true);
+ return OVERSEE_TEST;
+}
+
+#endif // !GTEST_OS_WINDOWS
+
+// Creates a concrete DeathTest-derived class that depends on the
+// --gtest_death_test_style flag, and sets the pointer pointed to
+// by the "test" argument to its address. If the test should be
+// skipped, sets that pointer to NULL. Returns true, unless the
+// flag is set to an invalid value.
+bool DefaultDeathTestFactory::Create(const char* statement, const RE* regex,
+ const char* file, int line,
+ DeathTest** test) {
+ UnitTestImpl* const impl = GetUnitTestImpl();
+ const InternalRunDeathTestFlag* const flag =
+ impl->internal_run_death_test_flag();
+ const int death_test_index = impl->current_test_info()
+ ->increment_death_test_count();
+
+ if (flag != NULL) {
+ if (death_test_index > flag->index()) {
+ DeathTest::set_last_death_test_message(String::Format(
+ "Death test count (%d) somehow exceeded expected maximum (%d)",
+ death_test_index, flag->index()));
+ return false;
+ }
+
+ if (!(flag->file() == file && flag->line() == line &&
+ flag->index() == death_test_index)) {
+ *test = NULL;
+ return true;
+ }
+ }
+
+#if GTEST_OS_WINDOWS
+ if (GTEST_FLAG(death_test_style) == "threadsafe" ||
+ GTEST_FLAG(death_test_style) == "fast") {
+ *test = new WindowsDeathTest(statement, regex, file, line);
+ }
+#else
+ if (GTEST_FLAG(death_test_style) == "threadsafe") {
+ *test = new ExecDeathTest(statement, regex, file, line);
+ } else if (GTEST_FLAG(death_test_style) == "fast") {
+ *test = new NoExecDeathTest(statement, regex);
+ }
+#endif // GTEST_OS_WINDOWS
+ else { // NOLINT - this is more readable than unbalanced brackets inside #if.
+ DeathTest::set_last_death_test_message(String::Format(
+ "Unknown death test style \"%s\" encountered",
+ GTEST_FLAG(death_test_style).c_str()));
+ return false;
+ }
+
+ return true;
+}
+
+// Splits a given string on a given delimiter, populating a given
+// vector with the fields. GTEST_HAS_DEATH_TEST implies that we have
+// ::std::string, so we can use it here.
+static void SplitString(const ::std::string& str, char delimiter,
+ ::std::vector< ::std::string>* dest) {
+ ::std::vector< ::std::string> parsed;
+ ::std::string::size_type pos = 0;
+ while (::testing::internal::AlwaysTrue()) {
+ const ::std::string::size_type colon = str.find(delimiter, pos);
+ if (colon == ::std::string::npos) {
+ parsed.push_back(str.substr(pos));
+ break;
+ } else {
+ parsed.push_back(str.substr(pos, colon - pos));
+ pos = colon + 1;
+ }
+ }
+ dest->swap(parsed);
+}
+
+#if GTEST_OS_WINDOWS
+// Recreates the pipe and event handles from the provided parameters,
+// signals the event, and returns a file descriptor wrapped around the pipe
+// handle. This function is called in the child process only.
+int GetStatusFileDescriptor(unsigned int parent_process_id,
+ size_t write_handle_as_size_t,
+ size_t event_handle_as_size_t) {
+ AutoHandle parent_process_handle(::OpenProcess(PROCESS_DUP_HANDLE,
+ FALSE, // Non-inheritable.
+ parent_process_id));
+ if (parent_process_handle.Get() == INVALID_HANDLE_VALUE) {
+ DeathTestAbort(String::Format("Unable to open parent process %u",
+ parent_process_id));
+ }
+
+ // TODO(vladl@google.com): Replace the following check with a
+ // compile-time assertion when available.
+ GTEST_CHECK_(sizeof(HANDLE) <= sizeof(size_t));
+
+ const HANDLE write_handle =
+ reinterpret_cast<HANDLE>(write_handle_as_size_t);
+ HANDLE dup_write_handle;
+
+ // The newly initialized handle is accessible only in in the parent
+ // process. To obtain one accessible within the child, we need to use
+ // DuplicateHandle.
+ if (!::DuplicateHandle(parent_process_handle.Get(), write_handle,
+ ::GetCurrentProcess(), &dup_write_handle,
+ 0x0, // Requested privileges ignored since
+ // DUPLICATE_SAME_ACCESS is used.
+ FALSE, // Request non-inheritable handler.
+ DUPLICATE_SAME_ACCESS)) {
+ DeathTestAbort(String::Format(
+ "Unable to duplicate the pipe handle %Iu from the parent process %u",
+ write_handle_as_size_t, parent_process_id));
+ }
+
+ const HANDLE event_handle = reinterpret_cast<HANDLE>(event_handle_as_size_t);
+ HANDLE dup_event_handle;
+
+ if (!::DuplicateHandle(parent_process_handle.Get(), event_handle,
+ ::GetCurrentProcess(), &dup_event_handle,
+ 0x0,
+ FALSE,
+ DUPLICATE_SAME_ACCESS)) {
+ DeathTestAbort(String::Format(
+ "Unable to duplicate the event handle %Iu from the parent process %u",
+ event_handle_as_size_t, parent_process_id));
+ }
+
+ const int write_fd =
+ ::_open_osfhandle(reinterpret_cast<intptr_t>(dup_write_handle), O_APPEND);
+ if (write_fd == -1) {
+ DeathTestAbort(String::Format(
+ "Unable to convert pipe handle %Iu to a file descriptor",
+ write_handle_as_size_t));
+ }
+
+ // Signals the parent that the write end of the pipe has been acquired
+ // so the parent can release its own write end.
+ ::SetEvent(dup_event_handle);
+
+ return write_fd;
+}
+#endif // GTEST_OS_WINDOWS
+
+// Returns a newly created InternalRunDeathTestFlag object with fields
+// initialized from the GTEST_FLAG(internal_run_death_test) flag if
+// the flag is specified; otherwise returns NULL.
+InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag() {
+ if (GTEST_FLAG(internal_run_death_test) == "") return NULL;
+
+ // GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we
+ // can use it here.
+ int line = -1;
+ int index = -1;
+ ::std::vector< ::std::string> fields;
+ SplitString(GTEST_FLAG(internal_run_death_test).c_str(), '|', &fields);
+ int write_fd = -1;
+
+#if GTEST_OS_WINDOWS
+ unsigned int parent_process_id = 0;
+ size_t write_handle_as_size_t = 0;
+ size_t event_handle_as_size_t = 0;
+
+ if (fields.size() != 6
+ || !ParseNaturalNumber(fields[1], &line)
+ || !ParseNaturalNumber(fields[2], &index)
+ || !ParseNaturalNumber(fields[3], &parent_process_id)
+ || !ParseNaturalNumber(fields[4], &write_handle_as_size_t)
+ || !ParseNaturalNumber(fields[5], &event_handle_as_size_t)) {
+ DeathTestAbort(String::Format(
+ "Bad --gtest_internal_run_death_test flag: %s",
+ GTEST_FLAG(internal_run_death_test).c_str()));
+ }
+ write_fd = GetStatusFileDescriptor(parent_process_id,
+ write_handle_as_size_t,
+ event_handle_as_size_t);
+#else
+ if (fields.size() != 4
+ || !ParseNaturalNumber(fields[1], &line)
+ || !ParseNaturalNumber(fields[2], &index)
+ || !ParseNaturalNumber(fields[3], &write_fd)) {
+ DeathTestAbort(String::Format(
+ "Bad --gtest_internal_run_death_test flag: %s",
+ GTEST_FLAG(internal_run_death_test).c_str()));
+ }
+#endif // GTEST_OS_WINDOWS
+ return new InternalRunDeathTestFlag(fields[0], line, index, write_fd);
+}
+
+} // namespace internal
+
+#endif // GTEST_HAS_DEATH_TEST
+
+} // namespace testing
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: keith.ray@gmail.com (Keith Ray)
+
+
+#include <stdlib.h>
+
+#if GTEST_OS_WINDOWS_MOBILE
+#include <windows.h>
+#elif GTEST_OS_WINDOWS
+#include <direct.h>
+#include <io.h>
+#elif GTEST_OS_SYMBIAN
+// Symbian OpenC has PATH_MAX in sys/syslimits.h
+#include <sys/syslimits.h>
+#else
+#include <limits.h>
+#include <climits> // Some Linux distributions define PATH_MAX here.
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+#if GTEST_OS_WINDOWS
+#define GTEST_PATH_MAX_ _MAX_PATH
+#elif defined(PATH_MAX)
+#define GTEST_PATH_MAX_ PATH_MAX
+#elif defined(_XOPEN_PATH_MAX)
+#define GTEST_PATH_MAX_ _XOPEN_PATH_MAX
+#else
+#define GTEST_PATH_MAX_ _POSIX_PATH_MAX
+#endif // GTEST_OS_WINDOWS
+
+
+namespace testing {
+namespace internal {
+
+#if GTEST_OS_WINDOWS
+// On Windows, '\\' is the standard path separator, but many tools and the
+// Windows API also accept '/' as an alternate path separator. Unless otherwise
+// noted, a file path can contain either kind of path separators, or a mixture
+// of them.
+const char kPathSeparator = '\\';
+const char kAlternatePathSeparator = '/';
+const char kPathSeparatorString[] = "\\";
+const char kAlternatePathSeparatorString[] = "/";
+#if GTEST_OS_WINDOWS_MOBILE
+// Windows CE doesn't have a current directory. You should not use
+// the current directory in tests on Windows CE, but this at least
+// provides a reasonable fallback.
+const char kCurrentDirectoryString[] = "\\";
+// Windows CE doesn't define INVALID_FILE_ATTRIBUTES
+const DWORD kInvalidFileAttributes = 0xffffffff;
+#else
+const char kCurrentDirectoryString[] = ".\\";
+#endif // GTEST_OS_WINDOWS_MOBILE
+#else
+const char kPathSeparator = '/';
+const char kPathSeparatorString[] = "/";
+const char kCurrentDirectoryString[] = "./";
+#endif // GTEST_OS_WINDOWS
+
+// Returns whether the given character is a valid path separator.
+static bool IsPathSeparator(char c) {
+#if GTEST_HAS_ALT_PATH_SEP_
+ return (c == kPathSeparator) || (c == kAlternatePathSeparator);
+#else
+ return c == kPathSeparator;
+#endif
+}
+
+// Returns the current working directory, or "" if unsuccessful.
+FilePath FilePath::GetCurrentDir() {
+#if GTEST_OS_WINDOWS_MOBILE
+ // Windows CE doesn't have a current directory, so we just return
+ // something reasonable.
+ return FilePath(kCurrentDirectoryString);
+#elif GTEST_OS_WINDOWS
+ char cwd[GTEST_PATH_MAX_ + 1] = { '\0' };
+ return FilePath(_getcwd(cwd, sizeof(cwd)) == NULL ? "" : cwd);
+#else
+ char cwd[GTEST_PATH_MAX_ + 1] = { '\0' };
+ return FilePath(getcwd(cwd, sizeof(cwd)) == NULL ? "" : cwd);
+#endif // GTEST_OS_WINDOWS_MOBILE
+}
+
+// Returns a copy of the FilePath with the case-insensitive extension removed.
+// Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns
+// FilePath("dir/file"). If a case-insensitive extension is not
+// found, returns a copy of the original FilePath.
+FilePath FilePath::RemoveExtension(const char* extension) const {
+ String dot_extension(String::Format(".%s", extension));
+ if (pathname_.EndsWithCaseInsensitive(dot_extension.c_str())) {
+ return FilePath(String(pathname_.c_str(), pathname_.length() - 4));
+ }
+ return *this;
+}
+
+// Returns a pointer to the last occurence of a valid path separator in
+// the FilePath. On Windows, for example, both '/' and '\' are valid path
+// separators. Returns NULL if no path separator was found.
+const char* FilePath::FindLastPathSeparator() const {
+ const char* const last_sep = strrchr(c_str(), kPathSeparator);
+#if GTEST_HAS_ALT_PATH_SEP_
+ const char* const last_alt_sep = strrchr(c_str(), kAlternatePathSeparator);
+ // Comparing two pointers of which only one is NULL is undefined.
+ if (last_alt_sep != NULL &&
+ (last_sep == NULL || last_alt_sep > last_sep)) {
+ return last_alt_sep;
+ }
+#endif
+ return last_sep;
+}
+
+// Returns a copy of the FilePath with the directory part removed.
+// Example: FilePath("path/to/file").RemoveDirectoryName() returns
+// FilePath("file"). If there is no directory part ("just_a_file"), it returns
+// the FilePath unmodified. If there is no file part ("just_a_dir/") it
+// returns an empty FilePath ("").
+// On Windows platform, '\' is the path separator, otherwise it is '/'.
+FilePath FilePath::RemoveDirectoryName() const {
+ const char* const last_sep = FindLastPathSeparator();
+ return last_sep ? FilePath(String(last_sep + 1)) : *this;
+}
+
+// RemoveFileName returns the directory path with the filename removed.
+// Example: FilePath("path/to/file").RemoveFileName() returns "path/to/".
+// If the FilePath is "a_file" or "/a_file", RemoveFileName returns
+// FilePath("./") or, on Windows, FilePath(".\\"). If the filepath does
+// not have a file, like "just/a/dir/", it returns the FilePath unmodified.
+// On Windows platform, '\' is the path separator, otherwise it is '/'.
+FilePath FilePath::RemoveFileName() const {
+ const char* const last_sep = FindLastPathSeparator();
+ String dir;
+ if (last_sep) {
+ dir = String(c_str(), last_sep + 1 - c_str());
+ } else {
+ dir = kCurrentDirectoryString;
+ }
+ return FilePath(dir);
+}
+
+
+// Helper functions for naming files in a directory for xml output.
+
+// Given directory = "dir", base_name = "test", number = 0,
+// extension = "xml", returns "dir/test.xml". If number is greater
+// than zero (e.g., 12), returns "dir/test_12.xml".
+// On Windows platform, uses \ as the separator rather than /.
+FilePath FilePath::MakeFileName(const FilePath& directory,
+ const FilePath& base_name,
+ int number,
+ const char* extension) {
+ String file;
+ if (number == 0) {
+ file = String::Format("%s.%s", base_name.c_str(), extension);
+ } else {
+ file = String::Format("%s_%d.%s", base_name.c_str(), number, extension);
+ }
+ return ConcatPaths(directory, FilePath(file));
+}
+
+// Given directory = "dir", relative_path = "test.xml", returns "dir/test.xml".
+// On Windows, uses \ as the separator rather than /.
+FilePath FilePath::ConcatPaths(const FilePath& directory,
+ const FilePath& relative_path) {
+ if (directory.IsEmpty())
+ return relative_path;
+ const FilePath dir(directory.RemoveTrailingPathSeparator());
+ return FilePath(String::Format("%s%c%s", dir.c_str(), kPathSeparator,
+ relative_path.c_str()));
+}
+
+// Returns true if pathname describes something findable in the file-system,
+// either a file, directory, or whatever.
+bool FilePath::FileOrDirectoryExists() const {
+#if GTEST_OS_WINDOWS_MOBILE
+ LPCWSTR unicode = String::AnsiToUtf16(pathname_.c_str());
+ const DWORD attributes = GetFileAttributes(unicode);
+ delete [] unicode;
+ return attributes != kInvalidFileAttributes;
+#else
+ posix::StatStruct file_stat;
+ return posix::Stat(pathname_.c_str(), &file_stat) == 0;
+#endif // GTEST_OS_WINDOWS_MOBILE
+}
+
+// Returns true if pathname describes a directory in the file-system
+// that exists.
+bool FilePath::DirectoryExists() const {
+ bool result = false;
+#if GTEST_OS_WINDOWS
+ // Don't strip off trailing separator if path is a root directory on
+ // Windows (like "C:\\").
+ const FilePath& path(IsRootDirectory() ? *this :
+ RemoveTrailingPathSeparator());
+#else
+ const FilePath& path(*this);
+#endif
+
+#if GTEST_OS_WINDOWS_MOBILE
+ LPCWSTR unicode = String::AnsiToUtf16(path.c_str());
+ const DWORD attributes = GetFileAttributes(unicode);
+ delete [] unicode;
+ if ((attributes != kInvalidFileAttributes) &&
+ (attributes & FILE_ATTRIBUTE_DIRECTORY)) {
+ result = true;
+ }
+#else
+ posix::StatStruct file_stat;
+ result = posix::Stat(path.c_str(), &file_stat) == 0 &&
+ posix::IsDir(file_stat);
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+ return result;
+}
+
+// Returns true if pathname describes a root directory. (Windows has one
+// root directory per disk drive.)
+bool FilePath::IsRootDirectory() const {
+#if GTEST_OS_WINDOWS
+ // TODO(wan@google.com): on Windows a network share like
+ // \\server\share can be a root directory, although it cannot be the
+ // current directory. Handle this properly.
+ return pathname_.length() == 3 && IsAbsolutePath();
+#else
+ return pathname_.length() == 1 && IsPathSeparator(pathname_.c_str()[0]);
+#endif
+}
+
+// Returns true if pathname describes an absolute path.
+bool FilePath::IsAbsolutePath() const {
+ const char* const name = pathname_.c_str();
+#if GTEST_OS_WINDOWS
+ return pathname_.length() >= 3 &&
+ ((name[0] >= 'a' && name[0] <= 'z') ||
+ (name[0] >= 'A' && name[0] <= 'Z')) &&
+ name[1] == ':' &&
+ IsPathSeparator(name[2]);
+#else
+ return IsPathSeparator(name[0]);
+#endif
+}
+
+// Returns a pathname for a file that does not currently exist. The pathname
+// will be directory/base_name.extension or
+// directory/base_name_<number>.extension if directory/base_name.extension
+// already exists. The number will be incremented until a pathname is found
+// that does not already exist.
+// Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'.
+// There could be a race condition if two or more processes are calling this
+// function at the same time -- they could both pick the same filename.
+FilePath FilePath::GenerateUniqueFileName(const FilePath& directory,
+ const FilePath& base_name,
+ const char* extension) {
+ FilePath full_pathname;
+ int number = 0;
+ do {
+ full_pathname.Set(MakeFileName(directory, base_name, number++, extension));
+ } while (full_pathname.FileOrDirectoryExists());
+ return full_pathname;
+}
+
+// Returns true if FilePath ends with a path separator, which indicates that
+// it is intended to represent a directory. Returns false otherwise.
+// This does NOT check that a directory (or file) actually exists.
+bool FilePath::IsDirectory() const {
+ return !pathname_.empty() &&
+ IsPathSeparator(pathname_.c_str()[pathname_.length() - 1]);
+}
+
+// Create directories so that path exists. Returns true if successful or if
+// the directories already exist; returns false if unable to create directories
+// for any reason.
+bool FilePath::CreateDirectoriesRecursively() const {
+ if (!this->IsDirectory()) {
+ return false;
+ }
+
+ if (pathname_.length() == 0 || this->DirectoryExists()) {
+ return true;
+ }
+
+ const FilePath parent(this->RemoveTrailingPathSeparator().RemoveFileName());
+ return parent.CreateDirectoriesRecursively() && this->CreateFolder();
+}
+
+// Create the directory so that path exists. Returns true if successful or
+// if the directory already exists; returns false if unable to create the
+// directory for any reason, including if the parent directory does not
+// exist. Not named "CreateDirectory" because that's a macro on Windows.
+bool FilePath::CreateFolder() const {
+#if GTEST_OS_WINDOWS_MOBILE
+ FilePath removed_sep(this->RemoveTrailingPathSeparator());
+ LPCWSTR unicode = String::AnsiToUtf16(removed_sep.c_str());
+ int result = CreateDirectory(unicode, NULL) ? 0 : -1;
+ delete [] unicode;
+#elif GTEST_OS_WINDOWS
+ int result = _mkdir(pathname_.c_str());
+#else
+ int result = mkdir(pathname_.c_str(), 0777);
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+ if (result == -1) {
+ return this->DirectoryExists(); // An error is OK if the directory exists.
+ }
+ return true; // No error.
+}
+
+// If input name has a trailing separator character, remove it and return the
+// name, otherwise return the name string unmodified.
+// On Windows platform, uses \ as the separator, other platforms use /.
+FilePath FilePath::RemoveTrailingPathSeparator() const {
+ return IsDirectory()
+ ? FilePath(String(pathname_.c_str(), pathname_.length() - 1))
+ : *this;
+}
+
+// Removes any redundant separators that might be in the pathname.
+// For example, "bar///foo" becomes "bar/foo". Does not eliminate other
+// redundancies that might be in a pathname involving "." or "..".
+// TODO(wan@google.com): handle Windows network shares (e.g. \\server\share).
+void FilePath::Normalize() {
+ if (pathname_.c_str() == NULL) {
+ pathname_ = "";
+ return;
+ }
+ const char* src = pathname_.c_str();
+ char* const dest = new char[pathname_.length() + 1];
+ char* dest_ptr = dest;
+ memset(dest_ptr, 0, pathname_.length() + 1);
+
+ while (*src != '\0') {
+ *dest_ptr = *src;
+ if (!IsPathSeparator(*src)) {
+ src++;
+ } else {
+#if GTEST_HAS_ALT_PATH_SEP_
+ if (*dest_ptr == kAlternatePathSeparator) {
+ *dest_ptr = kPathSeparator;
+ }
+#endif
+ while (IsPathSeparator(*src))
+ src++;
+ }
+ dest_ptr++;
+ }
+ *dest_ptr = '\0';
+ pathname_ = dest;
+ delete[] dest;
+}
+
+} // namespace internal
+} // namespace testing
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+
+#include <limits.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#if GTEST_OS_WINDOWS_MOBILE
+#include <windows.h> // For TerminateProcess()
+#elif GTEST_OS_WINDOWS
+#include <io.h>
+#include <sys/stat.h>
+#else
+#include <unistd.h>
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+#if GTEST_OS_MAC
+#include <mach/mach_init.h>
+#include <mach/task.h>
+#include <mach/vm_map.h>
+#endif // GTEST_OS_MAC
+
+
+// Indicates that this translation unit is part of Google Test's
+// implementation. It must come before gtest-internal-inl.h is
+// included, or there will be a compiler error. This trick is to
+// prevent a user from accidentally including gtest-internal-inl.h in
+// his code.
+#define GTEST_IMPLEMENTATION_ 1
+#undef GTEST_IMPLEMENTATION_
+
+namespace testing {
+namespace internal {
+
+#if defined(_MSC_VER) || defined(__BORLANDC__)
+// MSVC and C++Builder do not provide a definition of STDERR_FILENO.
+const int kStdOutFileno = 1;
+const int kStdErrFileno = 2;
+#else
+const int kStdOutFileno = STDOUT_FILENO;
+const int kStdErrFileno = STDERR_FILENO;
+#endif // _MSC_VER
+
+#if GTEST_OS_MAC
+
+// Returns the number of threads running in the process, or 0 to indicate that
+// we cannot detect it.
+size_t GetThreadCount() {
+ const task_t task = mach_task_self();
+ mach_msg_type_number_t thread_count;
+ thread_act_array_t thread_list;
+ const kern_return_t status = task_threads(task, &thread_list, &thread_count);
+ if (status == KERN_SUCCESS) {
+ // task_threads allocates resources in thread_list and we need to free them
+ // to avoid leaks.
+ vm_deallocate(task,
+ reinterpret_cast<vm_address_t>(thread_list),
+ sizeof(thread_t) * thread_count);
+ return static_cast<size_t>(thread_count);
+ } else {
+ return 0;
+ }
+}
+
+#else
+
+size_t GetThreadCount() {
+ // There's no portable way to detect the number of threads, so we just
+ // return 0 to indicate that we cannot detect it.
+ return 0;
+}
+
+#endif // GTEST_OS_MAC
+
+#if GTEST_USES_POSIX_RE
+
+// Implements RE. Currently only needed for death tests.
+
+RE::~RE() {
+ if (is_valid_) {
+ // regfree'ing an invalid regex might crash because the content
+ // of the regex is undefined. Since the regex's are essentially
+ // the same, one cannot be valid (or invalid) without the other
+ // being so too.
+ regfree(&partial_regex_);
+ regfree(&full_regex_);
+ }
+ free(const_cast<char*>(pattern_));
+}
+
+// Returns true iff regular expression re matches the entire str.
+bool RE::FullMatch(const char* str, const RE& re) {
+ if (!re.is_valid_) return false;
+
+ regmatch_t match;
+ return regexec(&re.full_regex_, str, 1, &match, 0) == 0;
+}
+
+// Returns true iff regular expression re matches a substring of str
+// (including str itself).
+bool RE::PartialMatch(const char* str, const RE& re) {
+ if (!re.is_valid_) return false;
+
+ regmatch_t match;
+ return regexec(&re.partial_regex_, str, 1, &match, 0) == 0;
+}
+
+// Initializes an RE from its string representation.
+void RE::Init(const char* regex) {
+ pattern_ = posix::StrDup(regex);
+
+ // Reserves enough bytes to hold the regular expression used for a
+ // full match.
+ const size_t full_regex_len = strlen(regex) + 10;
+ char* const full_pattern = new char[full_regex_len];
+
+ snprintf(full_pattern, full_regex_len, "^(%s)$", regex);
+ is_valid_ = regcomp(&full_regex_, full_pattern, REG_EXTENDED) == 0;
+ // We want to call regcomp(&partial_regex_, ...) even if the
+ // previous expression returns false. Otherwise partial_regex_ may
+ // not be properly initialized can may cause trouble when it's
+ // freed.
+ //
+ // Some implementation of POSIX regex (e.g. on at least some
+ // versions of Cygwin) doesn't accept the empty string as a valid
+ // regex. We change it to an equivalent form "()" to be safe.
+ if (is_valid_) {
+ const char* const partial_regex = (*regex == '\0') ? "()" : regex;
+ is_valid_ = regcomp(&partial_regex_, partial_regex, REG_EXTENDED) == 0;
+ }
+ EXPECT_TRUE(is_valid_)
+ << "Regular expression \"" << regex
+ << "\" is not a valid POSIX Extended regular expression.";
+
+ delete[] full_pattern;
+}
+
+#elif GTEST_USES_SIMPLE_RE
+
+// Returns true iff ch appears anywhere in str (excluding the
+// terminating '\0' character).
+bool IsInSet(char ch, const char* str) {
+ return ch != '\0' && strchr(str, ch) != NULL;
+}
+
+// Returns true iff ch belongs to the given classification. Unlike
+// similar functions in <ctype.h>, these aren't affected by the
+// current locale.
+bool IsDigit(char ch) { return '0' <= ch && ch <= '9'; }
+bool IsPunct(char ch) {
+ return IsInSet(ch, "^-!\"#$%&'()*+,./:;<=>?@[\\]_`{|}~");
+}
+bool IsRepeat(char ch) { return IsInSet(ch, "?*+"); }
+bool IsWhiteSpace(char ch) { return IsInSet(ch, " \f\n\r\t\v"); }
+bool IsWordChar(char ch) {
+ return ('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') ||
+ ('0' <= ch && ch <= '9') || ch == '_';
+}
+
+// Returns true iff "\\c" is a supported escape sequence.
+bool IsValidEscape(char c) {
+ return (IsPunct(c) || IsInSet(c, "dDfnrsStvwW"));
+}
+
+// Returns true iff the given atom (specified by escaped and pattern)
+// matches ch. The result is undefined if the atom is invalid.
+bool AtomMatchesChar(bool escaped, char pattern_char, char ch) {
+ if (escaped) { // "\\p" where p is pattern_char.
+ switch (pattern_char) {
+ case 'd': return IsDigit(ch);
+ case 'D': return !IsDigit(ch);
+ case 'f': return ch == '\f';
+ case 'n': return ch == '\n';
+ case 'r': return ch == '\r';
+ case 's': return IsWhiteSpace(ch);
+ case 'S': return !IsWhiteSpace(ch);
+ case 't': return ch == '\t';
+ case 'v': return ch == '\v';
+ case 'w': return IsWordChar(ch);
+ case 'W': return !IsWordChar(ch);
+ }
+ return IsPunct(pattern_char) && pattern_char == ch;
+ }
+
+ return (pattern_char == '.' && ch != '\n') || pattern_char == ch;
+}
+
+// Helper function used by ValidateRegex() to format error messages.
+String FormatRegexSyntaxError(const char* regex, int index) {
+ return (Message() << "Syntax error at index " << index
+ << " in simple regular expression \"" << regex << "\": ").GetString();
+}
+
+// Generates non-fatal failures and returns false if regex is invalid;
+// otherwise returns true.
+bool ValidateRegex(const char* regex) {
+ if (regex == NULL) {
+ // TODO(wan@google.com): fix the source file location in the
+ // assertion failures to match where the regex is used in user
+ // code.
+ ADD_FAILURE() << "NULL is not a valid simple regular expression.";
+ return false;
+ }
+
+ bool is_valid = true;
+
+ // True iff ?, *, or + can follow the previous atom.
+ bool prev_repeatable = false;
+ for (int i = 0; regex[i]; i++) {
+ if (regex[i] == '\\') { // An escape sequence
+ i++;
+ if (regex[i] == '\0') {
+ ADD_FAILURE() << FormatRegexSyntaxError(regex, i - 1)
+ << "'\\' cannot appear at the end.";
+ return false;
+ }
+
+ if (!IsValidEscape(regex[i])) {
+ ADD_FAILURE() << FormatRegexSyntaxError(regex, i - 1)
+ << "invalid escape sequence \"\\" << regex[i] << "\".";
+ is_valid = false;
+ }
+ prev_repeatable = true;
+ } else { // Not an escape sequence.
+ const char ch = regex[i];
+
+ if (ch == '^' && i > 0) {
+ ADD_FAILURE() << FormatRegexSyntaxError(regex, i)
+ << "'^' can only appear at the beginning.";
+ is_valid = false;
+ } else if (ch == '$' && regex[i + 1] != '\0') {
+ ADD_FAILURE() << FormatRegexSyntaxError(regex, i)
+ << "'$' can only appear at the end.";
+ is_valid = false;
+ } else if (IsInSet(ch, "()[]{}|")) {
+ ADD_FAILURE() << FormatRegexSyntaxError(regex, i)
+ << "'" << ch << "' is unsupported.";
+ is_valid = false;
+ } else if (IsRepeat(ch) && !prev_repeatable) {
+ ADD_FAILURE() << FormatRegexSyntaxError(regex, i)
+ << "'" << ch << "' can only follow a repeatable token.";
+ is_valid = false;
+ }
+
+ prev_repeatable = !IsInSet(ch, "^$?*+");
+ }
+ }
+
+ return is_valid;
+}
+
+// Matches a repeated regex atom followed by a valid simple regular
+// expression. The regex atom is defined as c if escaped is false,
+// or \c otherwise. repeat is the repetition meta character (?, *,
+// or +). The behavior is undefined if str contains too many
+// characters to be indexable by size_t, in which case the test will
+// probably time out anyway. We are fine with this limitation as
+// std::string has it too.
+bool MatchRepetitionAndRegexAtHead(
+ bool escaped, char c, char repeat, const char* regex,
+ const char* str) {
+ const size_t min_count = (repeat == '+') ? 1 : 0;
+ const size_t max_count = (repeat == '?') ? 1 :
+ static_cast<size_t>(-1) - 1;
+ // We cannot call numeric_limits::max() as it conflicts with the
+ // max() macro on Windows.
+
+ for (size_t i = 0; i <= max_count; ++i) {
+ // We know that the atom matches each of the first i characters in str.
+ if (i >= min_count && MatchRegexAtHead(regex, str + i)) {
+ // We have enough matches at the head, and the tail matches too.
+ // Since we only care about *whether* the pattern matches str
+ // (as opposed to *how* it matches), there is no need to find a
+ // greedy match.
+ return true;
+ }
+ if (str[i] == '\0' || !AtomMatchesChar(escaped, c, str[i]))
+ return false;
+ }
+ return false;
+}
+
+// Returns true iff regex matches a prefix of str. regex must be a
+// valid simple regular expression and not start with "^", or the
+// result is undefined.
+bool MatchRegexAtHead(const char* regex, const char* str) {
+ if (*regex == '\0') // An empty regex matches a prefix of anything.
+ return true;
+
+ // "$" only matches the end of a string. Note that regex being
+ // valid guarantees that there's nothing after "$" in it.
+ if (*regex == '$')
+ return *str == '\0';
+
+ // Is the first thing in regex an escape sequence?
+ const bool escaped = *regex == '\\';
+ if (escaped)
+ ++regex;
+ if (IsRepeat(regex[1])) {
+ // MatchRepetitionAndRegexAtHead() calls MatchRegexAtHead(), so
+ // here's an indirect recursion. It terminates as the regex gets
+ // shorter in each recursion.
+ return MatchRepetitionAndRegexAtHead(
+ escaped, regex[0], regex[1], regex + 2, str);
+ } else {
+ // regex isn't empty, isn't "$", and doesn't start with a
+ // repetition. We match the first atom of regex with the first
+ // character of str and recurse.
+ return (*str != '\0') && AtomMatchesChar(escaped, *regex, *str) &&
+ MatchRegexAtHead(regex + 1, str + 1);
+ }
+}
+
+// Returns true iff regex matches any substring of str. regex must be
+// a valid simple regular expression, or the result is undefined.
+//
+// The algorithm is recursive, but the recursion depth doesn't exceed
+// the regex length, so we won't need to worry about running out of
+// stack space normally. In rare cases the time complexity can be
+// exponential with respect to the regex length + the string length,
+// but usually it's must faster (often close to linear).
+bool MatchRegexAnywhere(const char* regex, const char* str) {
+ if (regex == NULL || str == NULL)
+ return false;
+
+ if (*regex == '^')
+ return MatchRegexAtHead(regex + 1, str);
+
+ // A successful match can be anywhere in str.
+ do {
+ if (MatchRegexAtHead(regex, str))
+ return true;
+ } while (*str++ != '\0');
+ return false;
+}
+
+// Implements the RE class.
+
+RE::~RE() {
+ free(const_cast<char*>(pattern_));
+ free(const_cast<char*>(full_pattern_));
+}
+
+// Returns true iff regular expression re matches the entire str.
+bool RE::FullMatch(const char* str, const RE& re) {
+ return re.is_valid_ && MatchRegexAnywhere(re.full_pattern_, str);
+}
+
+// Returns true iff regular expression re matches a substring of str
+// (including str itself).
+bool RE::PartialMatch(const char* str, const RE& re) {
+ return re.is_valid_ && MatchRegexAnywhere(re.pattern_, str);
+}
+
+// Initializes an RE from its string representation.
+void RE::Init(const char* regex) {
+ pattern_ = full_pattern_ = NULL;
+ if (regex != NULL) {
+ pattern_ = posix::StrDup(regex);
+ }
+
+ is_valid_ = ValidateRegex(regex);
+ if (!is_valid_) {
+ // No need to calculate the full pattern when the regex is invalid.
+ return;
+ }
+
+ const size_t len = strlen(regex);
+ // Reserves enough bytes to hold the regular expression used for a
+ // full match: we need space to prepend a '^', append a '$', and
+ // terminate the string with '\0'.
+ char* buffer = static_cast<char*>(malloc(len + 3));
+ full_pattern_ = buffer;
+
+ if (*regex != '^')
+ *buffer++ = '^'; // Makes sure full_pattern_ starts with '^'.
+
+ // We don't use snprintf or strncpy, as they trigger a warning when
+ // compiled with VC++ 8.0.
+ memcpy(buffer, regex, len);
+ buffer += len;
+
+ if (len == 0 || regex[len - 1] != '$')
+ *buffer++ = '$'; // Makes sure full_pattern_ ends with '$'.
+
+ *buffer = '\0';
+}
+
+#endif // GTEST_USES_POSIX_RE
+
+
+GTestLog::GTestLog(GTestLogSeverity severity, const char* file, int line)
+ : severity_(severity) {
+ const char* const marker =
+ severity == GTEST_INFO ? "[ INFO ]" :
+ severity == GTEST_WARNING ? "[WARNING]" :
+ severity == GTEST_ERROR ? "[ ERROR ]" : "[ FATAL ]";
+ GetStream() << ::std::endl << marker << " "
+ << FormatFileLocation(file, line).c_str() << ": ";
+}
+
+// Flushes the buffers and, if severity is GTEST_FATAL, aborts the program.
+GTestLog::~GTestLog() {
+ GetStream() << ::std::endl;
+ if (severity_ == GTEST_FATAL) {
+ fflush(stderr);
+ posix::Abort();
+ }
+}
+// Disable Microsoft deprecation warnings for POSIX functions called from
+// this class (creat, dup, dup2, and close)
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable: 4996)
+#endif // _MSC_VER
+
+#if GTEST_HAS_STREAM_REDIRECTION_
+
+// Object that captures an output stream (stdout/stderr).
+class CapturedStream {
+ public:
+ // The ctor redirects the stream to a temporary file.
+ CapturedStream(int fd) : fd_(fd), uncaptured_fd_(dup(fd)) {
+#if GTEST_OS_WINDOWS
+ char temp_dir_path[MAX_PATH + 1] = { '\0' }; // NOLINT
+ char temp_file_path[MAX_PATH + 1] = { '\0' }; // NOLINT
+
+ ::GetTempPathA(sizeof(temp_dir_path), temp_dir_path);
+ const UINT success = ::GetTempFileNameA(temp_dir_path,
+ "gtest_redir",
+ 0, // Generate unique file name.
+ temp_file_path);
+ GTEST_CHECK_(success != 0)
+ << "Unable to create a temporary file in " << temp_dir_path;
+ const int captured_fd = creat(temp_file_path, _S_IREAD | _S_IWRITE);
+ GTEST_CHECK_(captured_fd != -1) << "Unable to open temporary file "
+ << temp_file_path;
+ filename_ = temp_file_path;
+#else
+ // There's no guarantee that a test has write access to the
+ // current directory, so we create the temporary file in the /tmp
+ // directory instead.
+ char name_template[] = "/tmp/captured_stream.XXXXXX";
+ const int captured_fd = mkstemp(name_template);
+ filename_ = name_template;
+#endif // GTEST_OS_WINDOWS
+ fflush(NULL);
+ dup2(captured_fd, fd_);
+ close(captured_fd);
+ }
+
+ ~CapturedStream() {
+ remove(filename_.c_str());
+ }
+
+ String GetCapturedString() {
+ if (uncaptured_fd_ != -1) {
+ // Restores the original stream.
+ fflush(NULL);
+ dup2(uncaptured_fd_, fd_);
+ close(uncaptured_fd_);
+ uncaptured_fd_ = -1;
+ }
+
+ FILE* const file = posix::FOpen(filename_.c_str(), "r");
+ const String content = ReadEntireFile(file);
+ posix::FClose(file);
+ return content;
+ }
+
+ private:
+ // Reads the entire content of a file as a String.
+ static String ReadEntireFile(FILE* file);
+
+ // Returns the size (in bytes) of a file.
+ static size_t GetFileSize(FILE* file);
+
+ const int fd_; // A stream to capture.
+ int uncaptured_fd_;
+ // Name of the temporary file holding the stderr output.
+ ::std::string filename_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(CapturedStream);
+};
+
+// Returns the size (in bytes) of a file.
+size_t CapturedStream::GetFileSize(FILE* file) {
+ fseek(file, 0, SEEK_END);
+ return static_cast<size_t>(ftell(file));
+}
+
+// Reads the entire content of a file as a string.
+String CapturedStream::ReadEntireFile(FILE* file) {
+ const size_t file_size = GetFileSize(file);
+ char* const buffer = new char[file_size];
+
+ size_t bytes_last_read = 0; // # of bytes read in the last fread()
+ size_t bytes_read = 0; // # of bytes read so far
+
+ fseek(file, 0, SEEK_SET);
+
+ // Keeps reading the file until we cannot read further or the
+ // pre-determined file size is reached.
+ do {
+ bytes_last_read = fread(buffer+bytes_read, 1, file_size-bytes_read, file);
+ bytes_read += bytes_last_read;
+ } while (bytes_last_read > 0 && bytes_read < file_size);
+
+ const String content(buffer, bytes_read);
+ delete[] buffer;
+
+ return content;
+}
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif // _MSC_VER
+
+static CapturedStream* g_captured_stderr = NULL;
+static CapturedStream* g_captured_stdout = NULL;
+
+// Starts capturing an output stream (stdout/stderr).
+void CaptureStream(int fd, const char* stream_name, CapturedStream** stream) {
+ if (*stream != NULL) {
+ GTEST_LOG_(FATAL) << "Only one " << stream_name
+ << " capturer can exist at a time.";
+ }
+ *stream = new CapturedStream(fd);
+}
+
+// Stops capturing the output stream and returns the captured string.
+String GetCapturedStream(CapturedStream** captured_stream) {
+ const String content = (*captured_stream)->GetCapturedString();
+
+ delete *captured_stream;
+ *captured_stream = NULL;
+
+ return content;
+}
+
+// Starts capturing stdout.
+void CaptureStdout() {
+ CaptureStream(kStdOutFileno, "stdout", &g_captured_stdout);
+}
+
+// Starts capturing stderr.
+void CaptureStderr() {
+ CaptureStream(kStdErrFileno, "stderr", &g_captured_stderr);
+}
+
+// Stops capturing stdout and returns the captured string.
+String GetCapturedStdout() { return GetCapturedStream(&g_captured_stdout); }
+
+// Stops capturing stderr and returns the captured string.
+String GetCapturedStderr() { return GetCapturedStream(&g_captured_stderr); }
+
+#endif // GTEST_HAS_STREAM_REDIRECTION_
+
+#if GTEST_HAS_DEATH_TEST
+
+// A copy of all command line arguments. Set by InitGoogleTest().
+::std::vector<String> g_argvs;
+
+// Returns the command line as a vector of strings.
+const ::std::vector<String>& GetArgvs() { return g_argvs; }
+
+#endif // GTEST_HAS_DEATH_TEST
+
+#if GTEST_OS_WINDOWS_MOBILE
+namespace posix {
+void Abort() {
+ DebugBreak();
+ TerminateProcess(GetCurrentProcess(), 1);
+}
+} // namespace posix
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+// Returns the name of the environment variable corresponding to the
+// given flag. For example, FlagToEnvVar("foo") will return
+// "GTEST_FOO" in the open-source version.
+static String FlagToEnvVar(const char* flag) {
+ const String full_flag =
+ (Message() << GTEST_FLAG_PREFIX_ << flag).GetString();
+
+ Message env_var;
+ for (size_t i = 0; i != full_flag.length(); i++) {
+ env_var << static_cast<char>(toupper(full_flag.c_str()[i]));
+ }
+
+ return env_var.GetString();
+}
+
+// Parses 'str' for a 32-bit signed integer. If successful, writes
+// the result to *value and returns true; otherwise leaves *value
+// unchanged and returns false.
+bool ParseInt32(const Message& src_text, const char* str, Int32* value) {
+ // Parses the environment variable as a decimal integer.
+ char* end = NULL;
+ const long long_value = strtol(str, &end, 10); // NOLINT
+
+ // Has strtol() consumed all characters in the string?
+ if (*end != '\0') {
+ // No - an invalid character was encountered.
+ Message msg;
+ msg << "WARNING: " << src_text
+ << " is expected to be a 32-bit integer, but actually"
+ << " has value \"" << str << "\".\n";
+ printf("%s", msg.GetString().c_str());
+ fflush(stdout);
+ return false;
+ }
+
+ // Is the parsed value in the range of an Int32?
+ const Int32 result = static_cast<Int32>(long_value);
+ if (long_value == LONG_MAX || long_value == LONG_MIN ||
+ // The parsed value overflows as a long. (strtol() returns
+ // LONG_MAX or LONG_MIN when the input overflows.)
+ result != long_value
+ // The parsed value overflows as an Int32.
+ ) {
+ Message msg;
+ msg << "WARNING: " << src_text
+ << " is expected to be a 32-bit integer, but actually"
+ << " has value " << str << ", which overflows.\n";
+ printf("%s", msg.GetString().c_str());
+ fflush(stdout);
+ return false;
+ }
+
+ *value = result;
+ return true;
+}
+
+// Reads and returns the Boolean environment variable corresponding to
+// the given flag; if it's not set, returns default_value.
+//
+// The value is considered true iff it's not "0".
+bool BoolFromGTestEnv(const char* flag, bool default_value) {
+ const String env_var = FlagToEnvVar(flag);
+ const char* const string_value = posix::GetEnv(env_var.c_str());
+ return string_value == NULL ?
+ default_value : strcmp(string_value, "0") != 0;
+}
+
+// Reads and returns a 32-bit integer stored in the environment
+// variable corresponding to the given flag; if it isn't set or
+// doesn't represent a valid 32-bit integer, returns default_value.
+Int32 Int32FromGTestEnv(const char* flag, Int32 default_value) {
+ const String env_var = FlagToEnvVar(flag);
+ const char* const string_value = posix::GetEnv(env_var.c_str());
+ if (string_value == NULL) {
+ // The environment variable is not set.
+ return default_value;
+ }
+
+ Int32 result = default_value;
+ if (!ParseInt32(Message() << "Environment variable " << env_var,
+ string_value, &result)) {
+ printf("The default value %s is used.\n",
+ (Message() << default_value).GetString().c_str());
+ fflush(stdout);
+ return default_value;
+ }
+
+ return result;
+}
+
+// Reads and returns the string environment variable corresponding to
+// the given flag; if it's not set, returns default_value.
+const char* StringFromGTestEnv(const char* flag, const char* default_value) {
+ const String env_var = FlagToEnvVar(flag);
+ const char* const value = posix::GetEnv(env_var.c_str());
+ return value == NULL ? default_value : value;
+}
+
+} // namespace internal
+} // namespace testing
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: mheule@google.com (Markus Heule)
+//
+// The Google C++ Testing Framework (Google Test)
+
+
+// Indicates that this translation unit is part of Google Test's
+// implementation. It must come before gtest-internal-inl.h is
+// included, or there will be a compiler error. This trick is to
+// prevent a user from accidentally including gtest-internal-inl.h in
+// his code.
+#define GTEST_IMPLEMENTATION_ 1
+#undef GTEST_IMPLEMENTATION_
+
+namespace testing {
+
+using internal::GetUnitTestImpl;
+
+// Gets the summary of the failure message by omitting the stack trace
+// in it.
+internal::String TestPartResult::ExtractSummary(const char* message) {
+ const char* const stack_trace = strstr(message, internal::kStackTraceMarker);
+ return stack_trace == NULL ? internal::String(message) :
+ internal::String(message, stack_trace - message);
+}
+
+// Prints a TestPartResult object.
+std::ostream& operator<<(std::ostream& os, const TestPartResult& result) {
+ return os
+ << result.file_name() << ":" << result.line_number() << ": "
+ << (result.type() == TestPartResult::kSuccess ? "Success" :
+ result.type() == TestPartResult::kFatalFailure ? "Fatal failure" :
+ "Non-fatal failure") << ":\n"
+ << result.message() << std::endl;
+}
+
+// Appends a TestPartResult to the array.
+void TestPartResultArray::Append(const TestPartResult& result) {
+ array_.push_back(result);
+}
+
+// Returns the TestPartResult at the given index (0-based).
+const TestPartResult& TestPartResultArray::GetTestPartResult(int index) const {
+ if (index < 0 || index >= size()) {
+ printf("\nInvalid index (%d) into TestPartResultArray.\n", index);
+ internal::posix::Abort();
+ }
+
+ return array_[index];
+}
+
+// Returns the number of TestPartResult objects in the array.
+int TestPartResultArray::size() const {
+ return static_cast<int>(array_.size());
+}
+
+namespace internal {
+
+HasNewFatalFailureHelper::HasNewFatalFailureHelper()
+ : has_new_fatal_failure_(false),
+ original_reporter_(GetUnitTestImpl()->
+ GetTestPartResultReporterForCurrentThread()) {
+ GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread(this);
+}
+
+HasNewFatalFailureHelper::~HasNewFatalFailureHelper() {
+ GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread(
+ original_reporter_);
+}
+
+void HasNewFatalFailureHelper::ReportTestPartResult(
+ const TestPartResult& result) {
+ if (result.fatally_failed())
+ has_new_fatal_failure_ = true;
+ original_reporter_->ReportTestPartResult(result);
+}
+
+} // namespace internal
+
+} // namespace testing
+// Copyright 2008 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+
+namespace testing {
+namespace internal {
+
+#if GTEST_HAS_TYPED_TEST_P
+
+// Skips to the first non-space char in str. Returns an empty string if str
+// contains only whitespace characters.
+static const char* SkipSpaces(const char* str) {
+ while (isspace(*str))
+ str++;
+ return str;
+}
+
+// Verifies that registered_tests match the test names in
+// defined_test_names_; returns registered_tests if successful, or
+// aborts the program otherwise.
+const char* TypedTestCasePState::VerifyRegisteredTestNames(
+ const char* file, int line, const char* registered_tests) {
+ typedef ::std::set<const char*>::const_iterator DefinedTestIter;
+ registered_ = true;
+
+ // Skip initial whitespace in registered_tests since some
+ // preprocessors prefix stringizied literals with whitespace.
+ registered_tests = SkipSpaces(registered_tests);
+
+ Message errors;
+ ::std::set<String> tests;
+ for (const char* names = registered_tests; names != NULL;
+ names = SkipComma(names)) {
+ const String name = GetPrefixUntilComma(names);
+ if (tests.count(name) != 0) {
+ errors << "Test " << name << " is listed more than once.\n";
+ continue;
+ }
+
+ bool found = false;
+ for (DefinedTestIter it = defined_test_names_.begin();
+ it != defined_test_names_.end();
+ ++it) {
+ if (name == *it) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found) {
+ tests.insert(name);
+ } else {
+ errors << "No test named " << name
+ << " can be found in this test case.\n";
+ }
+ }
+
+ for (DefinedTestIter it = defined_test_names_.begin();
+ it != defined_test_names_.end();
+ ++it) {
+ if (tests.count(*it) == 0) {
+ errors << "You forgot to list test " << *it << ".\n";
+ }
+ }
+
+ const String& errors_str = errors.GetString();
+ if (errors_str != "") {
+ fprintf(stderr, "%s %s", FormatFileLocation(file, line).c_str(),
+ errors_str.c_str());
+ fflush(stderr);
+ posix::Abort();
+ }
+
+ return registered_tests;
+}
+
+#endif // GTEST_HAS_TYPED_TEST_P
+
+} // namespace internal
+} // namespace testing
diff --git a/src/common/gtest.h b/src/common/gtest.h
new file mode 100755
index 00000000..7957f488
--- /dev/null
+++ b/src/common/gtest.h
@@ -0,0 +1,18065 @@
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file defines the public API for Google Test. It should be
+// included by any test program that uses Google Test.
+//
+// IMPORTANT NOTE: Due to limitation of the C++ language, we have to
+// leave some internal implementation details in this header file.
+// They are clearly marked by comments like this:
+//
+// // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+//
+// Such code is NOT meant to be used by a user directly, and is subject
+// to CHANGE WITHOUT NOTICE. Therefore DO NOT DEPEND ON IT in a user
+// program!
+//
+// Acknowledgment: Google Test borrowed the idea of automatic test
+// registration from Barthelemy Dagenais' (barthelemy@prologique.com)
+// easyUnit framework.
+//
+/*****
+ * NAME
+ *
+ *
+ * AUTHOR
+ * google
+ *
+ * COPYRIGHT
+ * Copyright (c) 2004-2011 by cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * DESCRIPTION
+ *
+ ****/
+
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
+#define GTEST_INCLUDE_GTEST_GTEST_H_
+
+#include <limits>
+#include <vector>
+
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file declares functions and macros used internally by
+// Google Test. They are subject to change without notice.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_
+
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: wan@google.com (Zhanyong Wan)
+//
+// Low-level types and utilities for porting Google Test to various
+// platforms. They are subject to change without notice. DO NOT USE
+// THEM IN USER CODE.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_
+
+// The user can define the following macros in the build script to
+// control Google Test's behavior. If the user doesn't define a macro
+// in this list, Google Test will define it.
+//
+// GTEST_HAS_CLONE - Define it to 1/0 to indicate that clone(2)
+// is/isn't available.
+// GTEST_HAS_EXCEPTIONS - Define it to 1/0 to indicate that exceptions
+// are enabled.
+// GTEST_HAS_GLOBAL_STRING - Define it to 1/0 to indicate that ::string
+// is/isn't available (some systems define
+// ::string, which is different to std::string).
+// GTEST_HAS_GLOBAL_WSTRING - Define it to 1/0 to indicate that ::string
+// is/isn't available (some systems define
+// ::wstring, which is different to std::wstring).
+// GTEST_HAS_PTHREAD - Define it to 1/0 to indicate that <pthread.h>
+// is/isn't available.
+// GTEST_HAS_RTTI - Define it to 1/0 to indicate that RTTI is/isn't
+// enabled.
+// GTEST_HAS_STD_WSTRING - Define it to 1/0 to indicate that
+// std::wstring does/doesn't work (Google Test can
+// be used where std::wstring is unavailable).
+// GTEST_HAS_TR1_TUPLE - Define it to 1/0 to indicate tr1::tuple
+// is/isn't available.
+// GTEST_HAS_SEH - Define it to 1/0 to indicate whether the
+// compiler supports Microsoft's "Structured
+// Exception Handling".
+// GTEST_USE_OWN_TR1_TUPLE - Define it to 1/0 to indicate whether Google
+// Test's own tr1 tuple implementation should be
+// used. Unused when the user sets
+// GTEST_HAS_TR1_TUPLE to 0.
+// GTEST_LINKED_AS_SHARED_LIBRARY
+// - Define to 1 when compiling tests that use
+// Google Test as a shared library (known as
+// DLL on Windows).
+// GTEST_CREATE_SHARED_LIBRARY
+// - Define to 1 when compiling Google Test itself
+// as a shared library.
+
+// This header defines the following utilities:
+//
+// Macros indicating the current platform (defined to 1 if compiled on
+// the given platform; otherwise undefined):
+// GTEST_OS_AIX - IBM AIX
+// GTEST_OS_CYGWIN - Cygwin
+// GTEST_OS_LINUX - Linux
+// GTEST_OS_MAC - Mac OS X
+// GTEST_OS_SOLARIS - Sun Solaris
+// GTEST_OS_SYMBIAN - Symbian
+// GTEST_OS_WINDOWS - Windows (Desktop, MinGW, or Mobile)
+// GTEST_OS_WINDOWS_DESKTOP - Windows Desktop
+// GTEST_OS_WINDOWS_MINGW - MinGW
+// GTEST_OS_WINDOWS_MOBILE - Windows Mobile
+// GTEST_OS_ZOS - z/OS
+//
+// Among the platforms, Cygwin, Linux, Max OS X, and Windows have the
+// most stable support. Since core members of the Google Test project
+// don't have access to other platforms, support for them may be less
+// stable. If you notice any problems on your platform, please notify
+// googletestframework@googlegroups.com (patches for fixing them are
+// even more welcome!).
+//
+// Note that it is possible that none of the GTEST_OS_* macros are defined.
+//
+// Macros indicating available Google Test features (defined to 1 if
+// the corresponding feature is supported; otherwise undefined):
+// GTEST_HAS_COMBINE - the Combine() function (for value-parameterized
+// tests)
+// GTEST_HAS_DEATH_TEST - death tests
+// GTEST_HAS_PARAM_TEST - value-parameterized tests
+// GTEST_HAS_TYPED_TEST - typed tests
+// GTEST_HAS_TYPED_TEST_P - type-parameterized tests
+// GTEST_USES_POSIX_RE - enhanced POSIX regex is used.
+// GTEST_USES_SIMPLE_RE - our own simple regex is used;
+// the above two are mutually exclusive.
+// GTEST_CAN_COMPARE_NULL - accepts untyped NULL in EXPECT_EQ().
+//
+// Macros for basic C++ coding:
+// GTEST_AMBIGUOUS_ELSE_BLOCKER_ - for disabling a gcc warning.
+// GTEST_ATTRIBUTE_UNUSED_ - declares that a class' instances or a
+// variable don't have to be used.
+// GTEST_DISALLOW_ASSIGN_ - disables operator=.
+// GTEST_DISALLOW_COPY_AND_ASSIGN_ - disables copy ctor and operator=.
+// GTEST_MUST_USE_RESULT_ - declares that a function's result must be used.
+//
+// Synchronization:
+// Mutex, MutexLock, ThreadLocal, GetThreadCount()
+// - synchronization primitives.
+// GTEST_IS_THREADSAFE - defined to 1 to indicate that the above
+// synchronization primitives have real implementations
+// and Google Test is thread-safe; or 0 otherwise.
+//
+// Template meta programming:
+// is_pointer - as in TR1; needed on Symbian and IBM XL C/C++ only.
+//
+// Smart pointers:
+// scoped_ptr - as in TR2.
+//
+// Regular expressions:
+// RE - a simple regular expression class using the POSIX
+// Extended Regular Expression syntax. Not available on
+// Windows.
+//
+// Logging:
+// GTEST_LOG_() - logs messages at the specified severity level.
+// LogToStderr() - directs all log messages to stderr.
+// FlushInfoLog() - flushes informational log messages.
+//
+// Stdout and stderr capturing:
+// CaptureStdout() - starts capturing stdout.
+// GetCapturedStdout() - stops capturing stdout and returns the captured
+// string.
+// CaptureStderr() - starts capturing stderr.
+// GetCapturedStderr() - stops capturing stderr and returns the captured
+// string.
+//
+// Integer types:
+// TypeWithSize - maps an integer to a int type.
+// Int32, UInt32, Int64, UInt64, TimeInMillis
+// - integers of known sizes.
+// BiggestInt - the biggest signed integer type.
+//
+// Command-line utilities:
+// GTEST_FLAG() - references a flag.
+// GTEST_DECLARE_*() - declares a flag.
+// GTEST_DEFINE_*() - defines a flag.
+// GetArgvs() - returns the command line as a vector of strings.
+//
+// Environment variable utilities:
+// GetEnv() - gets the value of an environment variable.
+// BoolFromGTestEnv() - parses a bool environment variable.
+// Int32FromGTestEnv() - parses an Int32 environment variable.
+// StringFromGTestEnv() - parses a string environment variable.
+
+#include <stddef.h> // For ptrdiff_t
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#ifndef _WIN32_WCE
+#include <sys/stat.h>
+#endif // !_WIN32_WCE
+
+#include <iostream> // NOLINT
+#include <sstream> // NOLINT
+#include <string> // NOLINT
+
+#define GTEST_DEV_EMAIL_ "googletestframework@@googlegroups.com"
+#define GTEST_FLAG_PREFIX_ "gtest_"
+#define GTEST_FLAG_PREFIX_DASH_ "gtest-"
+#define GTEST_FLAG_PREFIX_UPPER_ "GTEST_"
+#define GTEST_NAME_ "Google Test"
+#define GTEST_PROJECT_URL_ "http://code.google.com/p/googletest/"
+
+// Determines the version of gcc that is used to compile this.
+#ifdef __GNUC__
+// 40302 means version 4.3.2.
+#define GTEST_GCC_VER_ \
+ (__GNUC__*10000 + __GNUC_MINOR__*100 + __GNUC_PATCHLEVEL__)
+#endif // __GNUC__
+
+// Determines the platform on which Google Test is compiled.
+#ifdef __CYGWIN__
+#define GTEST_OS_CYGWIN 1
+#elif defined __SYMBIAN32__
+#define GTEST_OS_SYMBIAN 1
+#elif defined _WIN32
+#define GTEST_OS_WINDOWS 1
+#ifdef _WIN32_WCE
+#define GTEST_OS_WINDOWS_MOBILE 1
+#elif defined(__MINGW__) || defined(__MINGW32__)
+#define GTEST_OS_WINDOWS_MINGW 1
+#else
+#define GTEST_OS_WINDOWS_DESKTOP 1
+#endif // _WIN32_WCE
+#elif defined __APPLE__
+#define GTEST_OS_MAC 1
+#elif defined __linux__
+#define GTEST_OS_LINUX 1
+#elif defined __MVS__
+#define GTEST_OS_ZOS 1
+#elif defined(__sun) && defined(__SVR4)
+#define GTEST_OS_SOLARIS 1
+#elif defined(_AIX)
+#define GTEST_OS_AIX 1
+#endif // __CYGWIN__
+
+#if GTEST_OS_CYGWIN || GTEST_OS_LINUX || GTEST_OS_MAC || GTEST_OS_SYMBIAN || \
+ GTEST_OS_SOLARIS || GTEST_OS_AIX
+
+// On some platforms, <regex.h> needs someone to define size_t, and
+// won't compile otherwise. We can #include it here as we already
+// included <stdlib.h>, which is guaranteed to define size_t through
+// <stddef.h>.
+#include <regex.h> // NOLINT
+#include <strings.h> // NOLINT
+#include <sys/types.h> // NOLINT
+#include <time.h> // NOLINT
+#include <unistd.h> // NOLINT
+
+#define GTEST_USES_POSIX_RE 1
+
+#elif GTEST_OS_WINDOWS
+
+#if !GTEST_OS_WINDOWS_MOBILE
+#include <direct.h> // NOLINT
+#include <io.h> // NOLINT
+#endif
+
+// <regex.h> is not available on Windows. Use our own simple regex
+// implementation instead.
+#define GTEST_USES_SIMPLE_RE 1
+
+#else
+
+// <regex.h> may not be available on this platform. Use our own
+// simple regex implementation instead.
+#define GTEST_USES_SIMPLE_RE 1
+
+#endif // GTEST_OS_CYGWIN || GTEST_OS_LINUX || GTEST_OS_MAC ||
+ // GTEST_OS_SYMBIAN || GTEST_OS_SOLARIS || GTEST_OS_AIX
+
+#ifndef GTEST_HAS_EXCEPTIONS
+// The user didn't tell us whether exceptions are enabled, so we need
+// to figure it out.
+#if defined(_MSC_VER) || defined(__BORLANDC__)
+// MSVC's and C++Builder's implementations of the STL use the _HAS_EXCEPTIONS
+// macro to enable exceptions, so we'll do the same.
+// Assumes that exceptions are enabled by default.
+#ifndef _HAS_EXCEPTIONS
+#define _HAS_EXCEPTIONS 1
+#endif // _HAS_EXCEPTIONS
+#define GTEST_HAS_EXCEPTIONS _HAS_EXCEPTIONS
+#elif defined(__GNUC__) && __EXCEPTIONS
+// gcc defines __EXCEPTIONS to 1 iff exceptions are enabled.
+#define GTEST_HAS_EXCEPTIONS 1
+#elif defined(__SUNPRO_CC)
+// Sun Pro CC supports exceptions. However, there is no compile-time way of
+// detecting whether they are enabled or not. Therefore, we assume that
+// they are enabled unless the user tells us otherwise.
+#define GTEST_HAS_EXCEPTIONS 1
+#elif defined(__IBMCPP__) && __EXCEPTIONS
+// xlC defines __EXCEPTIONS to 1 iff exceptions are enabled.
+#define GTEST_HAS_EXCEPTIONS 1
+#else
+// For other compilers, we assume exceptions are disabled to be
+// conservative.
+#define GTEST_HAS_EXCEPTIONS 0
+#endif // defined(_MSC_VER) || defined(__BORLANDC__)
+#endif // GTEST_HAS_EXCEPTIONS
+
+#if !defined(GTEST_HAS_STD_STRING)
+// Even though we don't use this macro any longer, we keep it in case
+// some clients still depend on it.
+
+#if !defined OCTEON_BUILD
+#define GTEST_HAS_STD_STRING 1
+#endif
+
+
+#elif !GTEST_HAS_STD_STRING
+// The user told us that ::std::string isn't available.
+#error "Google Test cannot be used where ::std::string isn't available."
+#endif // !defined(GTEST_HAS_STD_STRING)
+
+#ifndef GTEST_HAS_GLOBAL_STRING
+// The user didn't tell us whether ::string is available, so we need
+// to figure it out.
+
+#define GTEST_HAS_GLOBAL_STRING 0
+
+#endif // GTEST_HAS_GLOBAL_STRING
+
+#ifndef GTEST_HAS_STD_WSTRING
+// The user didn't tell us whether ::std::wstring is available, so we need
+// to figure it out.
+// TODO(wan@google.com): uses autoconf to detect whether ::std::wstring
+// is available.
+
+// Cygwin 1.5 and below doesn't support ::std::wstring.
+// Cygwin 1.7 might add wstring support; this should be updated when clear.
+// Solaris' libc++ doesn't support it either.
+
+#if !defined OCTEON_BUILD
+#define GTEST_HAS_STD_WSTRING (!(GTEST_OS_CYGWIN || GTEST_OS_SOLARIS))
+#endif
+
+#endif // GTEST_HAS_STD_WSTRING
+
+#ifndef GTEST_HAS_GLOBAL_WSTRING
+// The user didn't tell us whether ::wstring is available, so we need
+// to figure it out.
+#define GTEST_HAS_GLOBAL_WSTRING \
+ (GTEST_HAS_STD_WSTRING && GTEST_HAS_GLOBAL_STRING)
+#endif // GTEST_HAS_GLOBAL_WSTRING
+
+// Determines whether RTTI is available.
+#ifndef GTEST_HAS_RTTI
+// The user didn't tell us whether RTTI is enabled, so we need to
+// figure it out.
+
+#ifdef _MSC_VER
+
+#ifdef _CPPRTTI // MSVC defines this macro iff RTTI is enabled.
+#define GTEST_HAS_RTTI 1
+#else
+#define GTEST_HAS_RTTI 0
+#endif
+
+// Starting with version 4.3.2, gcc defines __GXX_RTTI iff RTTI is enabled.
+#elif defined(__GNUC__) && (GTEST_GCC_VER_ >= 40302)
+
+#ifdef __GXX_RTTI
+#define GTEST_HAS_RTTI 1
+#else
+#define GTEST_HAS_RTTI 0
+#endif // __GXX_RTTI
+
+// Starting with version 9.0 IBM Visual Age defines __RTTI_ALL__ to 1 if
+// both the typeid and dynamic_cast features are present.
+#elif defined(__IBMCPP__) && (__IBMCPP__ >= 900)
+
+#ifdef __RTTI_ALL__
+#define GTEST_HAS_RTTI 1
+#else
+#define GTEST_HAS_RTTI 0
+#endif
+
+#else
+
+// For all other compilers, we assume RTTI is enabled.
+#define GTEST_HAS_RTTI 1
+
+#endif // _MSC_VER
+
+#endif // GTEST_HAS_RTTI
+
+// It's this header's responsibility to #include <typeinfo> when RTTI
+// is enabled.
+#if GTEST_HAS_RTTI
+#include <typeinfo>
+#endif
+
+// Determines whether Google Test can use the pthreads library.
+#ifndef GTEST_HAS_PTHREAD
+// The user didn't tell us explicitly, so we assume pthreads support is
+// available on Linux and Mac.
+//
+// To disable threading support in Google Test, add -DGTEST_HAS_PTHREAD=0
+// to your compiler flags.
+#define GTEST_HAS_PTHREAD (GTEST_OS_LINUX || GTEST_OS_MAC)
+#endif // GTEST_HAS_PTHREAD
+
+// Determines whether Google Test can use tr1/tuple. You can define
+// this macro to 0 to prevent Google Test from using tuple (any
+// feature depending on tuple with be disabled in this mode).
+#ifndef GTEST_HAS_TR1_TUPLE
+// The user didn't tell us not to do it, so we assume it's OK.
+#define GTEST_HAS_TR1_TUPLE 1
+#endif // GTEST_HAS_TR1_TUPLE
+
+// Determines whether Google Test's own tr1 tuple implementation
+// should be used.
+#ifndef GTEST_USE_OWN_TR1_TUPLE
+// The user didn't tell us, so we need to figure it out.
+
+// We use our own TR1 tuple if we aren't sure the user has an
+// implementation of it already. At this time, GCC 4.0.0+ and MSVC
+// 2010 are the only mainstream compilers that come with a TR1 tuple
+// implementation. NVIDIA's CUDA NVCC compiler pretends to be GCC by
+// defining __GNUC__ and friends, but cannot compile GCC's tuple
+// implementation. MSVC 2008 (9.0) provides TR1 tuple in a 323 MB
+// Feature Pack download, which we cannot assume the user has.
+#if (defined(__GNUC__) && !defined(__CUDACC__) && (GTEST_GCC_VER_ >= 40000)) \
+ || _MSC_VER >= 1600
+#define GTEST_USE_OWN_TR1_TUPLE 0
+#else
+#define GTEST_USE_OWN_TR1_TUPLE 1
+#endif
+
+#endif // GTEST_USE_OWN_TR1_TUPLE
+
+// To avoid conditional compilation everywhere, we make it
+// gtest-port.h's responsibility to #include the header implementing
+// tr1/tuple.
+#if GTEST_HAS_TR1_TUPLE
+
+#if GTEST_USE_OWN_TR1_TUPLE
+// This file was GENERATED by a script. DO NOT EDIT BY HAND!!!
+
+// Copyright 2009 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Implements a subset of TR1 tuple needed by Google Test and Google Mock.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_
+
+#include <utility> // For ::std::pair.
+
+// The compiler used in Symbian has a bug that prevents us from declaring the
+// tuple template as a friend (it complains that tuple is redefined). This
+// hack bypasses the bug by declaring the members that should otherwise be
+// private as public.
+// Sun Studio versions < 12 also have the above bug.
+#if defined(__SYMBIAN32__) || (defined(__SUNPRO_CC) && __SUNPRO_CC < 0x590)
+#define GTEST_DECLARE_TUPLE_AS_FRIEND_ public:
+#else
+#define GTEST_DECLARE_TUPLE_AS_FRIEND_ \
+ template <GTEST_10_TYPENAMES_(U)> friend class tuple; \
+ private:
+#endif
+
+// GTEST_n_TUPLE_(T) is the type of an n-tuple.
+#define GTEST_0_TUPLE_(T) tuple<>
+#define GTEST_1_TUPLE_(T) tuple<T##0, void, void, void, void, void, void, \
+ void, void, void>
+#define GTEST_2_TUPLE_(T) tuple<T##0, T##1, void, void, void, void, void, \
+ void, void, void>
+#define GTEST_3_TUPLE_(T) tuple<T##0, T##1, T##2, void, void, void, void, \
+ void, void, void>
+#define GTEST_4_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, void, void, void, \
+ void, void, void>
+#define GTEST_5_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, void, void, \
+ void, void, void>
+#define GTEST_6_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, void, \
+ void, void, void>
+#define GTEST_7_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
+ void, void, void>
+#define GTEST_8_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
+ T##7, void, void>
+#define GTEST_9_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
+ T##7, T##8, void>
+#define GTEST_10_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
+ T##7, T##8, T##9>
+
+// GTEST_n_TYPENAMES_(T) declares a list of n typenames.
+#define GTEST_0_TYPENAMES_(T)
+#define GTEST_1_TYPENAMES_(T) typename T##0
+#define GTEST_2_TYPENAMES_(T) typename T##0, typename T##1
+#define GTEST_3_TYPENAMES_(T) typename T##0, typename T##1, typename T##2
+#define GTEST_4_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3
+#define GTEST_5_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3, typename T##4
+#define GTEST_6_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3, typename T##4, typename T##5
+#define GTEST_7_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3, typename T##4, typename T##5, typename T##6
+#define GTEST_8_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3, typename T##4, typename T##5, typename T##6, typename T##7
+#define GTEST_9_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3, typename T##4, typename T##5, typename T##6, \
+ typename T##7, typename T##8
+#define GTEST_10_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3, typename T##4, typename T##5, typename T##6, \
+ typename T##7, typename T##8, typename T##9
+
+// In theory, defining stuff in the ::std namespace is undefined
+// behavior. We can do this as we are playing the role of a standard
+// library vendor.
+namespace std {
+namespace tr1 {
+
+template <typename T0 = void, typename T1 = void, typename T2 = void,
+ typename T3 = void, typename T4 = void, typename T5 = void,
+ typename T6 = void, typename T7 = void, typename T8 = void,
+ typename T9 = void>
+class tuple;
+
+// Anything in namespace gtest_internal is Google Test's INTERNAL
+// IMPLEMENTATION DETAIL and MUST NOT BE USED DIRECTLY in user code.
+namespace gtest_internal {
+
+// ByRef<T>::type is T if T is a reference; otherwise it's const T&.
+template <typename T>
+struct ByRef { typedef const T& type; }; // NOLINT
+template <typename T>
+struct ByRef<T&> { typedef T& type; }; // NOLINT
+
+// A handy wrapper for ByRef.
+#define GTEST_BY_REF_(T) typename ::std::tr1::gtest_internal::ByRef<T>::type
+
+// AddRef<T>::type is T if T is a reference; otherwise it's T&. This
+// is the same as tr1::add_reference<T>::type.
+template <typename T>
+struct AddRef { typedef T& type; }; // NOLINT
+template <typename T>
+struct AddRef<T&> { typedef T& type; }; // NOLINT
+
+// A handy wrapper for AddRef.
+#define GTEST_ADD_REF_(T) typename ::std::tr1::gtest_internal::AddRef<T>::type
+
+// A helper for implementing get<k>().
+template <int k> class Get;
+
+// A helper for implementing tuple_element<k, T>. kIndexValid is true
+// iff k < the number of fields in tuple type T.
+template <bool kIndexValid, int kIndex, class Tuple>
+struct TupleElement;
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 0, GTEST_10_TUPLE_(T)> { typedef T0 type; };
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 1, GTEST_10_TUPLE_(T)> { typedef T1 type; };
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 2, GTEST_10_TUPLE_(T)> { typedef T2 type; };
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 3, GTEST_10_TUPLE_(T)> { typedef T3 type; };
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 4, GTEST_10_TUPLE_(T)> { typedef T4 type; };
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 5, GTEST_10_TUPLE_(T)> { typedef T5 type; };
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 6, GTEST_10_TUPLE_(T)> { typedef T6 type; };
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 7, GTEST_10_TUPLE_(T)> { typedef T7 type; };
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 8, GTEST_10_TUPLE_(T)> { typedef T8 type; };
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 9, GTEST_10_TUPLE_(T)> { typedef T9 type; };
+
+} // namespace gtest_internal
+
+template <>
+class tuple<> {
+ public:
+ tuple() {}
+ tuple(const tuple& /* t */) {}
+ tuple& operator=(const tuple& /* t */) { return *this; }
+};
+
+template <GTEST_1_TYPENAMES_(T)>
+class GTEST_1_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0) : f0_(f0) {}
+
+ tuple(const tuple& t) : f0_(t.f0_) {}
+
+ template <GTEST_1_TYPENAMES_(U)>
+ tuple(const GTEST_1_TUPLE_(U)& t) : f0_(t.f0_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_1_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_1_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_1_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_1_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ return *this;
+ }
+
+ T0 f0_;
+};
+
+template <GTEST_2_TYPENAMES_(T)>
+class GTEST_2_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1) : f0_(f0),
+ f1_(f1) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_) {}
+
+ template <GTEST_2_TYPENAMES_(U)>
+ tuple(const GTEST_2_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_) {}
+ template <typename U0, typename U1>
+ tuple(const ::std::pair<U0, U1>& p) : f0_(p.first), f1_(p.second) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_2_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_2_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+ template <typename U0, typename U1>
+ tuple& operator=(const ::std::pair<U0, U1>& p) {
+ f0_ = p.first;
+ f1_ = p.second;
+ return *this;
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_2_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_2_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+};
+
+template <GTEST_3_TYPENAMES_(T)>
+class GTEST_3_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2) : f0_(f0), f1_(f1), f2_(f2) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_) {}
+
+ template <GTEST_3_TYPENAMES_(U)>
+ tuple(const GTEST_3_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_3_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_3_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_3_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_3_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+};
+
+template <GTEST_4_TYPENAMES_(T)>
+class GTEST_4_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3) : f0_(f0), f1_(f1), f2_(f2),
+ f3_(f3) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_) {}
+
+ template <GTEST_4_TYPENAMES_(U)>
+ tuple(const GTEST_4_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_4_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_4_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_4_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_4_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+};
+
+template <GTEST_5_TYPENAMES_(T)>
+class GTEST_5_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_(), f4_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3,
+ GTEST_BY_REF_(T4) f4) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+ f4_(t.f4_) {}
+
+ template <GTEST_5_TYPENAMES_(U)>
+ tuple(const GTEST_5_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_), f4_(t.f4_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_5_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_5_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_5_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_5_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ f4_ = t.f4_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+ T4 f4_;
+};
+
+template <GTEST_6_TYPENAMES_(T)>
+class GTEST_6_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+ GTEST_BY_REF_(T5) f5) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4),
+ f5_(f5) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+ f4_(t.f4_), f5_(t.f5_) {}
+
+ template <GTEST_6_TYPENAMES_(U)>
+ tuple(const GTEST_6_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_), f4_(t.f4_), f5_(t.f5_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_6_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_6_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_6_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_6_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ f4_ = t.f4_;
+ f5_ = t.f5_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+ T4 f4_;
+ T5 f5_;
+};
+
+template <GTEST_7_TYPENAMES_(T)>
+class GTEST_7_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+ GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6) : f0_(f0), f1_(f1), f2_(f2),
+ f3_(f3), f4_(f4), f5_(f5), f6_(f6) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+ f4_(t.f4_), f5_(t.f5_), f6_(t.f6_) {}
+
+ template <GTEST_7_TYPENAMES_(U)>
+ tuple(const GTEST_7_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_7_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_7_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_7_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_7_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ f4_ = t.f4_;
+ f5_ = t.f5_;
+ f6_ = t.f6_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+ T4 f4_;
+ T5 f5_;
+ T6 f6_;
+};
+
+template <GTEST_8_TYPENAMES_(T)>
+class GTEST_8_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+ GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6,
+ GTEST_BY_REF_(T7) f7) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4),
+ f5_(f5), f6_(f6), f7_(f7) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+ f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_) {}
+
+ template <GTEST_8_TYPENAMES_(U)>
+ tuple(const GTEST_8_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_8_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_8_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_8_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_8_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ f4_ = t.f4_;
+ f5_ = t.f5_;
+ f6_ = t.f6_;
+ f7_ = t.f7_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+ T4 f4_;
+ T5 f5_;
+ T6 f6_;
+ T7 f7_;
+};
+
+template <GTEST_9_TYPENAMES_(T)>
+class GTEST_9_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_(), f8_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+ GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, GTEST_BY_REF_(T7) f7,
+ GTEST_BY_REF_(T8) f8) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4),
+ f5_(f5), f6_(f6), f7_(f7), f8_(f8) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+ f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_) {}
+
+ template <GTEST_9_TYPENAMES_(U)>
+ tuple(const GTEST_9_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_9_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_9_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_9_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_9_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ f4_ = t.f4_;
+ f5_ = t.f5_;
+ f6_ = t.f6_;
+ f7_ = t.f7_;
+ f8_ = t.f8_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+ T4 f4_;
+ T5 f5_;
+ T6 f6_;
+ T7 f7_;
+ T8 f8_;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+class tuple {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_(), f8_(),
+ f9_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+ GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, GTEST_BY_REF_(T7) f7,
+ GTEST_BY_REF_(T8) f8, GTEST_BY_REF_(T9) f9) : f0_(f0), f1_(f1), f2_(f2),
+ f3_(f3), f4_(f4), f5_(f5), f6_(f6), f7_(f7), f8_(f8), f9_(f9) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+ f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_), f9_(t.f9_) {}
+
+ template <GTEST_10_TYPENAMES_(U)>
+ tuple(const GTEST_10_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_),
+ f9_(t.f9_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_10_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_10_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_10_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_10_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ f4_ = t.f4_;
+ f5_ = t.f5_;
+ f6_ = t.f6_;
+ f7_ = t.f7_;
+ f8_ = t.f8_;
+ f9_ = t.f9_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+ T4 f4_;
+ T5 f5_;
+ T6 f6_;
+ T7 f7_;
+ T8 f8_;
+ T9 f9_;
+};
+
+// 6.1.3.2 Tuple creation functions.
+
+// Known limitations: we don't support passing an
+// std::tr1::reference_wrapper<T> to make_tuple(). And we don't
+// implement tie().
+
+inline tuple<> make_tuple() { return tuple<>(); }
+
+template <GTEST_1_TYPENAMES_(T)>
+inline GTEST_1_TUPLE_(T) make_tuple(const T0& f0) {
+ return GTEST_1_TUPLE_(T)(f0);
+}
+
+template <GTEST_2_TYPENAMES_(T)>
+inline GTEST_2_TUPLE_(T) make_tuple(const T0& f0, const T1& f1) {
+ return GTEST_2_TUPLE_(T)(f0, f1);
+}
+
+template <GTEST_3_TYPENAMES_(T)>
+inline GTEST_3_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2) {
+ return GTEST_3_TUPLE_(T)(f0, f1, f2);
+}
+
+template <GTEST_4_TYPENAMES_(T)>
+inline GTEST_4_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3) {
+ return GTEST_4_TUPLE_(T)(f0, f1, f2, f3);
+}
+
+template <GTEST_5_TYPENAMES_(T)>
+inline GTEST_5_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3, const T4& f4) {
+ return GTEST_5_TUPLE_(T)(f0, f1, f2, f3, f4);
+}
+
+template <GTEST_6_TYPENAMES_(T)>
+inline GTEST_6_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3, const T4& f4, const T5& f5) {
+ return GTEST_6_TUPLE_(T)(f0, f1, f2, f3, f4, f5);
+}
+
+template <GTEST_7_TYPENAMES_(T)>
+inline GTEST_7_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3, const T4& f4, const T5& f5, const T6& f6) {
+ return GTEST_7_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6);
+}
+
+template <GTEST_8_TYPENAMES_(T)>
+inline GTEST_8_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7) {
+ return GTEST_8_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7);
+}
+
+template <GTEST_9_TYPENAMES_(T)>
+inline GTEST_9_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7,
+ const T8& f8) {
+ return GTEST_9_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7, f8);
+}
+
+template <GTEST_10_TYPENAMES_(T)>
+inline GTEST_10_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7,
+ const T8& f8, const T9& f9) {
+ return GTEST_10_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9);
+}
+
+// 6.1.3.3 Tuple helper classes.
+
+template <typename Tuple> struct tuple_size;
+
+template <GTEST_0_TYPENAMES_(T)>
+struct tuple_size<GTEST_0_TUPLE_(T)> { static const int value = 0; };
+
+template <GTEST_1_TYPENAMES_(T)>
+struct tuple_size<GTEST_1_TUPLE_(T)> { static const int value = 1; };
+
+template <GTEST_2_TYPENAMES_(T)>
+struct tuple_size<GTEST_2_TUPLE_(T)> { static const int value = 2; };
+
+template <GTEST_3_TYPENAMES_(T)>
+struct tuple_size<GTEST_3_TUPLE_(T)> { static const int value = 3; };
+
+template <GTEST_4_TYPENAMES_(T)>
+struct tuple_size<GTEST_4_TUPLE_(T)> { static const int value = 4; };
+
+template <GTEST_5_TYPENAMES_(T)>
+struct tuple_size<GTEST_5_TUPLE_(T)> { static const int value = 5; };
+
+template <GTEST_6_TYPENAMES_(T)>
+struct tuple_size<GTEST_6_TUPLE_(T)> { static const int value = 6; };
+
+template <GTEST_7_TYPENAMES_(T)>
+struct tuple_size<GTEST_7_TUPLE_(T)> { static const int value = 7; };
+
+template <GTEST_8_TYPENAMES_(T)>
+struct tuple_size<GTEST_8_TUPLE_(T)> { static const int value = 8; };
+
+template <GTEST_9_TYPENAMES_(T)>
+struct tuple_size<GTEST_9_TUPLE_(T)> { static const int value = 9; };
+
+template <GTEST_10_TYPENAMES_(T)>
+struct tuple_size<GTEST_10_TUPLE_(T)> { static const int value = 10; };
+
+template <int k, class Tuple>
+struct tuple_element {
+ typedef typename gtest_internal::TupleElement<
+ k < (tuple_size<Tuple>::value), k, Tuple>::type type;
+};
+
+#define GTEST_TUPLE_ELEMENT_(k, Tuple) typename tuple_element<k, Tuple >::type
+
+// 6.1.3.4 Element access.
+
+namespace gtest_internal {
+
+template <>
+class Get<0> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(0, Tuple))
+ Field(Tuple& t) { return t.f0_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(0, Tuple))
+ ConstField(const Tuple& t) { return t.f0_; }
+};
+
+template <>
+class Get<1> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(1, Tuple))
+ Field(Tuple& t) { return t.f1_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(1, Tuple))
+ ConstField(const Tuple& t) { return t.f1_; }
+};
+
+template <>
+class Get<2> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(2, Tuple))
+ Field(Tuple& t) { return t.f2_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(2, Tuple))
+ ConstField(const Tuple& t) { return t.f2_; }
+};
+
+template <>
+class Get<3> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(3, Tuple))
+ Field(Tuple& t) { return t.f3_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(3, Tuple))
+ ConstField(const Tuple& t) { return t.f3_; }
+};
+
+template <>
+class Get<4> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(4, Tuple))
+ Field(Tuple& t) { return t.f4_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(4, Tuple))
+ ConstField(const Tuple& t) { return t.f4_; }
+};
+
+template <>
+class Get<5> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(5, Tuple))
+ Field(Tuple& t) { return t.f5_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(5, Tuple))
+ ConstField(const Tuple& t) { return t.f5_; }
+};
+
+template <>
+class Get<6> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(6, Tuple))
+ Field(Tuple& t) { return t.f6_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(6, Tuple))
+ ConstField(const Tuple& t) { return t.f6_; }
+};
+
+template <>
+class Get<7> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(7, Tuple))
+ Field(Tuple& t) { return t.f7_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(7, Tuple))
+ ConstField(const Tuple& t) { return t.f7_; }
+};
+
+template <>
+class Get<8> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(8, Tuple))
+ Field(Tuple& t) { return t.f8_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(8, Tuple))
+ ConstField(const Tuple& t) { return t.f8_; }
+};
+
+template <>
+class Get<9> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(9, Tuple))
+ Field(Tuple& t) { return t.f9_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(9, Tuple))
+ ConstField(const Tuple& t) { return t.f9_; }
+};
+
+} // namespace gtest_internal
+
+template <int k, GTEST_10_TYPENAMES_(T)>
+GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(k, GTEST_10_TUPLE_(T)))
+get(GTEST_10_TUPLE_(T)& t) {
+ return gtest_internal::Get<k>::Field(t);
+}
+
+template <int k, GTEST_10_TYPENAMES_(T)>
+GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(k, GTEST_10_TUPLE_(T)))
+get(const GTEST_10_TUPLE_(T)& t) {
+ return gtest_internal::Get<k>::ConstField(t);
+}
+
+// 6.1.3.5 Relational operators
+
+// We only implement == and !=, as we don't have a need for the rest yet.
+
+namespace gtest_internal {
+
+// SameSizeTuplePrefixComparator<k, k>::Eq(t1, t2) returns true if the
+// first k fields of t1 equals the first k fields of t2.
+// SameSizeTuplePrefixComparator(k1, k2) would be a compiler error if
+// k1 != k2.
+template <int kSize1, int kSize2>
+struct SameSizeTuplePrefixComparator;
+
+template <>
+struct SameSizeTuplePrefixComparator<0, 0> {
+ template <class Tuple1, class Tuple2>
+ static bool Eq(const Tuple1& /* t1 */, const Tuple2& /* t2 */) {
+ return true;
+ }
+};
+
+template <int k>
+struct SameSizeTuplePrefixComparator<k, k> {
+ template <class Tuple1, class Tuple2>
+ static bool Eq(const Tuple1& t1, const Tuple2& t2) {
+ return SameSizeTuplePrefixComparator<k - 1, k - 1>::Eq(t1, t2) &&
+ ::std::tr1::get<k - 1>(t1) == ::std::tr1::get<k - 1>(t2);
+ }
+};
+
+} // namespace gtest_internal
+
+template <GTEST_10_TYPENAMES_(T), GTEST_10_TYPENAMES_(U)>
+inline bool operator==(const GTEST_10_TUPLE_(T)& t,
+ const GTEST_10_TUPLE_(U)& u) {
+ return gtest_internal::SameSizeTuplePrefixComparator<
+ tuple_size<GTEST_10_TUPLE_(T)>::value,
+ tuple_size<GTEST_10_TUPLE_(U)>::value>::Eq(t, u);
+}
+
+template <GTEST_10_TYPENAMES_(T), GTEST_10_TYPENAMES_(U)>
+inline bool operator!=(const GTEST_10_TUPLE_(T)& t,
+ const GTEST_10_TUPLE_(U)& u) { return !(t == u); }
+
+// 6.1.4 Pairs.
+// Unimplemented.
+
+} // namespace tr1
+} // namespace std
+
+#undef GTEST_0_TUPLE_
+#undef GTEST_1_TUPLE_
+#undef GTEST_2_TUPLE_
+#undef GTEST_3_TUPLE_
+#undef GTEST_4_TUPLE_
+#undef GTEST_5_TUPLE_
+#undef GTEST_6_TUPLE_
+#undef GTEST_7_TUPLE_
+#undef GTEST_8_TUPLE_
+#undef GTEST_9_TUPLE_
+#undef GTEST_10_TUPLE_
+
+#undef GTEST_0_TYPENAMES_
+#undef GTEST_1_TYPENAMES_
+#undef GTEST_2_TYPENAMES_
+#undef GTEST_3_TYPENAMES_
+#undef GTEST_4_TYPENAMES_
+#undef GTEST_5_TYPENAMES_
+#undef GTEST_6_TYPENAMES_
+#undef GTEST_7_TYPENAMES_
+#undef GTEST_8_TYPENAMES_
+#undef GTEST_9_TYPENAMES_
+#undef GTEST_10_TYPENAMES_
+
+#undef GTEST_DECLARE_TUPLE_AS_FRIEND_
+#undef GTEST_BY_REF_
+#undef GTEST_ADD_REF_
+#undef GTEST_TUPLE_ELEMENT_
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_
+#elif GTEST_OS_SYMBIAN
+
+// On Symbian, BOOST_HAS_TR1_TUPLE causes Boost's TR1 tuple library to
+// use STLport's tuple implementation, which unfortunately doesn't
+// work as the copy of STLport distributed with Symbian is incomplete.
+// By making sure BOOST_HAS_TR1_TUPLE is undefined, we force Boost to
+// use its own tuple implementation.
+#ifdef BOOST_HAS_TR1_TUPLE
+#undef BOOST_HAS_TR1_TUPLE
+#endif // BOOST_HAS_TR1_TUPLE
+
+// This prevents <boost/tr1/detail/config.hpp>, which defines
+// BOOST_HAS_TR1_TUPLE, from being #included by Boost's <tuple>.
+#define BOOST_TR1_DETAIL_CONFIG_HPP_INCLUDED
+#include <tuple>
+
+#elif defined(__GNUC__) && (GTEST_GCC_VER_ >= 40000)
+// GCC 4.0+ implements tr1/tuple in the <tr1/tuple> header. This does
+// not conform to the TR1 spec, which requires the header to be <tuple>.
+
+#if !GTEST_HAS_RTTI && GTEST_GCC_VER_ < 40302
+// Until version 4.3.2, gcc has a bug that causes <tr1/functional>,
+// which is #included by <tr1/tuple>, to not compile when RTTI is
+// disabled. _TR1_FUNCTIONAL is the header guard for
+// <tr1/functional>. Hence the following #define is a hack to prevent
+// <tr1/functional> from being included.
+#define _TR1_FUNCTIONAL 1
+#include <tr1/tuple>
+#undef _TR1_FUNCTIONAL // Allows the user to #include
+ // <tr1/functional> if he chooses to.
+#else
+#include <tr1/tuple> // NOLINT
+#endif // !GTEST_HAS_RTTI && GTEST_GCC_VER_ < 40302
+
+#else
+// If the compiler is not GCC 4.0+, we assume the user is using a
+// spec-conforming TR1 implementation.
+#include <tuple> // NOLINT
+#endif // GTEST_USE_OWN_TR1_TUPLE
+
+#endif // GTEST_HAS_TR1_TUPLE
+
+// Determines whether clone(2) is supported.
+// Usually it will only be available on Linux, excluding
+// Linux on the Itanium architecture.
+// Also see http://linux.die.net/man/2/clone.
+#ifndef GTEST_HAS_CLONE
+// The user didn't tell us, so we need to figure it out.
+
+#if GTEST_OS_LINUX && !defined(__ia64__)
+#define GTEST_HAS_CLONE 1
+#else
+#define GTEST_HAS_CLONE 0
+#endif // GTEST_OS_LINUX && !defined(__ia64__)
+
+#endif // GTEST_HAS_CLONE
+
+// Determines whether to support stream redirection. This is used to test
+// output correctness and to implement death tests.
+#if !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_SYMBIAN
+#define GTEST_HAS_STREAM_REDIRECTION_ 1
+#endif // !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_SYMBIAN
+
+// Determines whether to support death tests.
+// Google Test does not support death tests for VC 7.1 and earlier as
+// abort() in a VC 7.1 application compiled as GUI in debug config
+// pops up a dialog window that cannot be suppressed programmatically.
+#if (GTEST_OS_LINUX || GTEST_OS_MAC || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS || \
+ (GTEST_OS_WINDOWS_DESKTOP && _MSC_VER >= 1400) || \
+ GTEST_OS_WINDOWS_MINGW || GTEST_OS_AIX)
+#define GTEST_HAS_DEATH_TEST 1
+#include <vector> // NOLINT
+#endif
+
+// We don't support MSVC 7.1 with exceptions disabled now. Therefore
+// all the compilers we care about are adequate for supporting
+// value-parameterized tests.
+#define GTEST_HAS_PARAM_TEST 1
+
+// Determines whether to support type-driven tests.
+
+// Typed tests need <typeinfo> and variadic macros, which GCC, VC++ 8.0,
+// Sun Pro CC, and IBM Visual Age support.
+#if defined(__GNUC__) || (_MSC_VER >= 1400) || defined(__SUNPRO_CC) || \
+ defined(__IBMCPP__)
+#define GTEST_HAS_TYPED_TEST 1
+#define GTEST_HAS_TYPED_TEST_P 1
+#endif
+
+// Determines whether to support Combine(). This only makes sense when
+// value-parameterized tests are enabled. The implementation doesn't
+// work on Sun Studio since it doesn't understand templated conversion
+// operators.
+#if GTEST_HAS_PARAM_TEST && GTEST_HAS_TR1_TUPLE && !defined(__SUNPRO_CC)
+#define GTEST_HAS_COMBINE 1
+#endif
+
+// Determines whether the system compiler uses UTF-16 for encoding wide strings.
+#define GTEST_WIDE_STRING_USES_UTF16_ \
+ (GTEST_OS_WINDOWS || GTEST_OS_CYGWIN || GTEST_OS_SYMBIAN || GTEST_OS_AIX)
+
+// Defines some utility macros.
+
+// The GNU compiler emits a warning if nested "if" statements are followed by
+// an "else" statement and braces are not used to explicitly disambiguate the
+// "else" binding. This leads to problems with code like:
+//
+// if (gate)
+// ASSERT_*(condition) << "Some message";
+//
+// The "switch (0) case 0:" idiom is used to suppress this.
+#ifdef __INTEL_COMPILER
+#define GTEST_AMBIGUOUS_ELSE_BLOCKER_
+#else
+#define GTEST_AMBIGUOUS_ELSE_BLOCKER_ switch (0) case 0: // NOLINT
+#endif
+
+// Use this annotation at the end of a struct/class definition to
+// prevent the compiler from optimizing away instances that are never
+// used. This is useful when all interesting logic happens inside the
+// c'tor and / or d'tor. Example:
+//
+// struct Foo {
+// Foo() { ... }
+// } GTEST_ATTRIBUTE_UNUSED_;
+//
+// Also use it after a variable or parameter declaration to tell the
+// compiler the variable/parameter does not have to be used.
+#if defined(__GNUC__) && !defined(COMPILER_ICC)
+#define GTEST_ATTRIBUTE_UNUSED_ __attribute__ ((unused))
+#else
+#define GTEST_ATTRIBUTE_UNUSED_
+#endif
+
+// A macro to disallow operator=
+// This should be used in the private: declarations for a class.
+#define GTEST_DISALLOW_ASSIGN_(type)\
+ void operator=(type const &)
+
+// A macro to disallow copy constructor and operator=
+// This should be used in the private: declarations for a class.
+#define GTEST_DISALLOW_COPY_AND_ASSIGN_(type)\
+ type(type const &);\
+ GTEST_DISALLOW_ASSIGN_(type)
+
+// Tell the compiler to warn about unused return values for functions declared
+// with this macro. The macro should be used on function declarations
+// following the argument list:
+//
+// Sprocket* AllocateSprocket() GTEST_MUST_USE_RESULT_;
+#if defined(__GNUC__) && (GTEST_GCC_VER_ >= 30400) && !defined(COMPILER_ICC)
+#define GTEST_MUST_USE_RESULT_ __attribute__ ((warn_unused_result))
+#else
+#define GTEST_MUST_USE_RESULT_
+#endif // __GNUC__ && (GTEST_GCC_VER_ >= 30400) && !COMPILER_ICC
+
+// Determine whether the compiler supports Microsoft's Structured Exception
+// Handling. This is supported by several Windows compilers but generally
+// does not exist on any other system.
+#ifndef GTEST_HAS_SEH
+// The user didn't tell us, so we need to figure it out.
+
+#if defined(_MSC_VER) || defined(__BORLANDC__)
+// These two compilers are known to support SEH.
+#define GTEST_HAS_SEH 1
+#else
+// Assume no SEH.
+#define GTEST_HAS_SEH 0
+#endif
+
+#endif // GTEST_HAS_SEH
+
+#ifdef _MSC_VER
+
+#if GTEST_LINKED_AS_SHARED_LIBRARY
+#define GTEST_API_ __declspec(dllimport)
+#elif GTEST_CREATE_SHARED_LIBRARY
+#define GTEST_API_ __declspec(dllexport)
+#endif
+
+#endif // _MSC_VER
+
+#ifndef GTEST_API_
+#define GTEST_API_
+#endif
+
+namespace testing {
+
+class Message;
+
+namespace internal {
+
+class String;
+
+typedef ::std::stringstream StrStream;
+
+// A helper for suppressing warnings on constant condition. It just
+// returns 'condition'.
+GTEST_API_ bool IsTrue(bool condition);
+
+// Defines scoped_ptr.
+
+// This implementation of scoped_ptr is PARTIAL - it only contains
+// enough stuff to satisfy Google Test's need.
+template <typename T>
+class scoped_ptr {
+ public:
+ typedef T element_type;
+
+ explicit scoped_ptr(T* p = NULL) : ptr_(p) {}
+ ~scoped_ptr() { reset(); }
+
+ T& operator*() const { return *ptr_; }
+ T* operator->() const { return ptr_; }
+ T* get() const { return ptr_; }
+
+ T* release() {
+ T* const ptr = ptr_;
+ ptr_ = NULL;
+ return ptr;
+ }
+
+ void reset(T* p = NULL) {
+ if (p != ptr_) {
+ if (IsTrue(sizeof(T) > 0)) { // Makes sure T is a complete type.
+ delete ptr_;
+ }
+ ptr_ = p;
+ }
+ }
+ private:
+ T* ptr_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(scoped_ptr);
+};
+
+// Defines RE.
+
+// A simple C++ wrapper for <regex.h>. It uses the POSIX Extended
+// Regular Expression syntax.
+class GTEST_API_ RE {
+ public:
+ // A copy constructor is required by the Standard to initialize object
+ // references from r-values.
+ RE(const RE& other) { Init(other.pattern()); }
+
+ // Constructs an RE from a string.
+ RE(const ::std::string& regex) { Init(regex.c_str()); } // NOLINT
+
+#if GTEST_HAS_GLOBAL_STRING
+ RE(const ::string& regex) { Init(regex.c_str()); } // NOLINT
+#endif // GTEST_HAS_GLOBAL_STRING
+
+ RE(const char* regex) { Init(regex); } // NOLINT
+ ~RE();
+
+ // Returns the string representation of the regex.
+ const char* pattern() const { return pattern_; }
+
+ // FullMatch(str, re) returns true iff regular expression re matches
+ // the entire str.
+ // PartialMatch(str, re) returns true iff regular expression re
+ // matches a substring of str (including str itself).
+ //
+ // TODO(wan@google.com): make FullMatch() and PartialMatch() work
+ // when str contains NUL characters.
+ static bool FullMatch(const ::std::string& str, const RE& re) {
+ return FullMatch(str.c_str(), re);
+ }
+ static bool PartialMatch(const ::std::string& str, const RE& re) {
+ return PartialMatch(str.c_str(), re);
+ }
+
+#if GTEST_HAS_GLOBAL_STRING
+ static bool FullMatch(const ::string& str, const RE& re) {
+ return FullMatch(str.c_str(), re);
+ }
+ static bool PartialMatch(const ::string& str, const RE& re) {
+ return PartialMatch(str.c_str(), re);
+ }
+#endif // GTEST_HAS_GLOBAL_STRING
+
+ static bool FullMatch(const char* str, const RE& re);
+ static bool PartialMatch(const char* str, const RE& re);
+
+ private:
+ void Init(const char* regex);
+
+ // We use a const char* instead of a string, as Google Test may be used
+ // where string is not available. We also do not use Google Test's own
+ // String type here, in order to simplify dependencies between the
+ // files.
+ const char* pattern_;
+ bool is_valid_;
+#if GTEST_USES_POSIX_RE
+ regex_t full_regex_; // For FullMatch().
+ regex_t partial_regex_; // For PartialMatch().
+#else // GTEST_USES_SIMPLE_RE
+ const char* full_pattern_; // For FullMatch();
+#endif
+
+ GTEST_DISALLOW_ASSIGN_(RE);
+};
+
+// Defines logging utilities:
+// GTEST_LOG_(severity) - logs messages at the specified severity level. The
+// message itself is streamed into the macro.
+// LogToStderr() - directs all log messages to stderr.
+// FlushInfoLog() - flushes informational log messages.
+
+enum GTestLogSeverity {
+ GTEST_INFO,
+ GTEST_WARNING,
+ GTEST_ERROR,
+ GTEST_FATAL
+};
+
+// Formats log entry severity, provides a stream object for streaming the
+// log message, and terminates the message with a newline when going out of
+// scope.
+class GTEST_API_ GTestLog {
+ public:
+ GTestLog(GTestLogSeverity severity, const char* file, int line);
+
+ // Flushes the buffers and, if severity is GTEST_FATAL, aborts the program.
+ ~GTestLog();
+
+ ::std::ostream& GetStream() { return ::std::cerr; }
+
+ private:
+ const GTestLogSeverity severity_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestLog);
+};
+
+#define GTEST_LOG_(severity) \
+ ::testing::internal::GTestLog(::testing::internal::GTEST_##severity, \
+ __FILE__, __LINE__).GetStream()
+
+inline void LogToStderr() {}
+inline void FlushInfoLog() { fflush(NULL); }
+
+// INTERNAL IMPLEMENTATION - DO NOT USE.
+//
+// GTEST_CHECK_ is an all-mode assert. It aborts the program if the condition
+// is not satisfied.
+// Synopsys:
+// GTEST_CHECK_(boolean_condition);
+// or
+// GTEST_CHECK_(boolean_condition) << "Additional message";
+//
+// This checks the condition and if the condition is not satisfied
+// it prints message about the condition violation, including the
+// condition itself, plus additional message streamed into it, if any,
+// and then it aborts the program. It aborts the program irrespective of
+// whether it is built in the debug mode or not.
+#define GTEST_CHECK_(condition) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::IsTrue(condition)) \
+ ; \
+ else \
+ GTEST_LOG_(FATAL) << "Condition " #condition " failed. "
+
+// An all-mode assert to verify that the given POSIX-style function
+// call returns 0 (indicating success). Known limitation: this
+// doesn't expand to a balanced 'if' statement, so enclose the macro
+// in {} if you need to use it as the only statement in an 'if'
+// branch.
+#define GTEST_CHECK_POSIX_SUCCESS_(posix_call) \
+ if (const int gtest_error = (posix_call)) \
+ GTEST_LOG_(FATAL) << #posix_call << "failed with error " \
+ << gtest_error
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Downcasts the pointer of type Base to Derived.
+// Derived must be a subclass of Base. The parameter MUST
+// point to a class of type Derived, not any subclass of it.
+// When RTTI is available, the function performs a runtime
+// check to enforce this.
+template <class Derived, class Base>
+Derived* CheckedDowncastToActualType(Base* base) {
+#if GTEST_HAS_RTTI
+ GTEST_CHECK_(typeid(*base) == typeid(Derived));
+ return dynamic_cast<Derived*>(base); // NOLINT
+#else
+ return static_cast<Derived*>(base); // Poor man's downcast.
+#endif
+}
+
+#if GTEST_HAS_STREAM_REDIRECTION_
+
+// Defines the stderr capturer:
+// CaptureStdout - starts capturing stdout.
+// GetCapturedStdout - stops capturing stdout and returns the captured string.
+// CaptureStderr - starts capturing stderr.
+// GetCapturedStderr - stops capturing stderr and returns the captured string.
+//
+GTEST_API_ void CaptureStdout();
+GTEST_API_ String GetCapturedStdout();
+GTEST_API_ void CaptureStderr();
+GTEST_API_ String GetCapturedStderr();
+
+#endif // GTEST_HAS_STREAM_REDIRECTION_
+
+
+#if GTEST_HAS_DEATH_TEST
+
+// A copy of all command line arguments. Set by InitGoogleTest().
+extern ::std::vector<String> g_argvs;
+
+// GTEST_HAS_DEATH_TEST implies we have ::std::string.
+const ::std::vector<String>& GetArgvs();
+
+#endif // GTEST_HAS_DEATH_TEST
+
+// Defines synchronization primitives.
+
+#if GTEST_HAS_PTHREAD
+
+// Sleeps for (roughly) n milli-seconds. This function is only for
+// testing Google Test's own constructs. Don't use it in user tests,
+// either directly or indirectly.
+inline void SleepMilliseconds(int n) {
+ const timespec time = {
+ 0, // 0 seconds.
+ n * 1000L * 1000L, // And n ms.
+ };
+ nanosleep(&time, NULL);
+}
+
+// Allows a controller thread to pause execution of newly created
+// threads until notified. Instances of this class must be created
+// and destroyed in the controller thread.
+//
+// This class is only for testing Google Test's own constructs. Do not
+// use it in user tests, either directly or indirectly.
+class Notification {
+ public:
+ Notification() : notified_(false) {}
+
+ // Notifies all threads created with this notification to start. Must
+ // be called from the controller thread.
+ void Notify() { notified_ = true; }
+
+ // Blocks until the controller thread notifies. Must be called from a test
+ // thread.
+ void WaitForNotification() {
+ while(!notified_) {
+ SleepMilliseconds(10);
+ }
+ }
+
+ private:
+ volatile bool notified_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(Notification);
+};
+
+// As a C-function, ThreadFuncWithCLinkage cannot be templated itself.
+// Consequently, it cannot select a correct instantiation of ThreadWithParam
+// in order to call its Run(). Introducing ThreadWithParamBase as a
+// non-templated base class for ThreadWithParam allows us to bypass this
+// problem.
+class ThreadWithParamBase {
+ public:
+ virtual ~ThreadWithParamBase() {}
+ virtual void Run() = 0;
+};
+
+// pthread_create() accepts a pointer to a function type with the C linkage.
+// According to the Standard (7.5/1), function types with different linkages
+// are different even if they are otherwise identical. Some compilers (for
+// example, SunStudio) treat them as different types. Since class methods
+// cannot be defined with C-linkage we need to define a free C-function to
+// pass into pthread_create().
+extern "C" inline void* ThreadFuncWithCLinkage(void* thread) {
+ static_cast<ThreadWithParamBase*>(thread)->Run();
+ return NULL;
+}
+
+// Helper class for testing Google Test's multi-threading constructs.
+// To use it, write:
+//
+// void ThreadFunc(int param) { /* Do things with param */ }
+// Notification thread_can_start;
+// ...
+// // The thread_can_start parameter is optional; you can supply NULL.
+// ThreadWithParam<int> thread(&ThreadFunc, 5, &thread_can_start);
+// thread_can_start.Notify();
+//
+// These classes are only for testing Google Test's own constructs. Do
+// not use them in user tests, either directly or indirectly.
+template <typename T>
+class ThreadWithParam : public ThreadWithParamBase {
+ public:
+ typedef void (*UserThreadFunc)(T);
+
+ ThreadWithParam(
+ UserThreadFunc func, T param, Notification* thread_can_start)
+ : func_(func),
+ param_(param),
+ thread_can_start_(thread_can_start),
+ finished_(false) {
+ ThreadWithParamBase* const base = this;
+ // The thread can be created only after all fields except thread_
+ // have been initialized.
+ GTEST_CHECK_POSIX_SUCCESS_(
+ pthread_create(&thread_, 0, &ThreadFuncWithCLinkage, base));
+ }
+ ~ThreadWithParam() { Join(); }
+
+ void Join() {
+ if (!finished_) {
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_join(thread_, 0));
+ finished_ = true;
+ }
+ }
+
+ virtual void Run() {
+ if (thread_can_start_ != NULL)
+ thread_can_start_->WaitForNotification();
+ func_(param_);
+ }
+
+ private:
+ const UserThreadFunc func_; // User-supplied thread function.
+ const T param_; // User-supplied parameter to the thread function.
+ // When non-NULL, used to block execution until the controller thread
+ // notifies.
+ Notification* const thread_can_start_;
+ bool finished_; // true iff we know that the thread function has finished.
+ pthread_t thread_; // The native thread object.
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParam);
+};
+
+// gtest-port.h guarantees to #include <pthread.h> when GTEST_HAS_PTHREAD is
+// true.
+#include <pthread.h>
+
+// MutexBase and Mutex implement mutex on pthreads-based platforms. They
+// are used in conjunction with class MutexLock:
+//
+// Mutex mutex;
+// ...
+// MutexLock lock(&mutex); // Acquires the mutex and releases it at the end
+// // of the current scope.
+//
+// MutexBase implements behavior for both statically and dynamically
+// allocated mutexes. Do not use MutexBase directly. Instead, write
+// the following to define a static mutex:
+//
+// GTEST_DEFINE_STATIC_MUTEX_(g_some_mutex);
+//
+// You can forward declare a static mutex like this:
+//
+// GTEST_DECLARE_STATIC_MUTEX_(g_some_mutex);
+//
+// To create a dynamic mutex, just define an object of type Mutex.
+class MutexBase {
+ public:
+ // Acquires this mutex.
+ void Lock() {
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_lock(&mutex_));
+ owner_ = pthread_self();
+ }
+
+ // Releases this mutex.
+ void Unlock() {
+ // We don't protect writing to owner_ here, as it's the caller's
+ // responsibility to ensure that the current thread holds the
+ // mutex when this is called.
+ owner_ = 0;
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_unlock(&mutex_));
+ }
+
+ // Does nothing if the current thread holds the mutex. Otherwise, crashes
+ // with high probability.
+ void AssertHeld() const {
+ GTEST_CHECK_(owner_ == pthread_self())
+ << "The current thread is not holding the mutex @" << this;
+ }
+
+ // A static mutex may be used before main() is entered. It may even
+ // be used before the dynamic initialization stage. Therefore we
+ // must be able to initialize a static mutex object at link time.
+ // This means MutexBase has to be a POD and its member variables
+ // have to be public.
+ public:
+ pthread_mutex_t mutex_; // The underlying pthread mutex.
+ pthread_t owner_; // The thread holding the mutex; 0 means no one holds it.
+};
+
+// Forward-declares a static mutex.
+#define GTEST_DECLARE_STATIC_MUTEX_(mutex) \
+ extern ::testing::internal::MutexBase mutex
+
+// Defines and statically (i.e. at link time) initializes a static mutex.
+#define GTEST_DEFINE_STATIC_MUTEX_(mutex) \
+ ::testing::internal::MutexBase mutex = { PTHREAD_MUTEX_INITIALIZER, 0 }
+
+// The Mutex class can only be used for mutexes created at runtime. It
+// shares its API with MutexBase otherwise.
+class Mutex : public MutexBase {
+ public:
+ Mutex() {
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, NULL));
+ owner_ = 0;
+ }
+ ~Mutex() {
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_destroy(&mutex_));
+ }
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(Mutex);
+};
+
+// We cannot name this class MutexLock as the ctor declaration would
+// conflict with a macro named MutexLock, which is defined on some
+// platforms. Hence the typedef trick below.
+class GTestMutexLock {
+ public:
+ explicit GTestMutexLock(MutexBase* mutex)
+ : mutex_(mutex) { mutex_->Lock(); }
+
+ ~GTestMutexLock() { mutex_->Unlock(); }
+
+ private:
+ MutexBase* const mutex_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestMutexLock);
+};
+
+typedef GTestMutexLock MutexLock;
+
+// Helpers for ThreadLocal.
+
+// pthread_key_create() requires DeleteThreadLocalValue() to have
+// C-linkage. Therefore it cannot be templatized to access
+// ThreadLocal<T>. Hence the need for class
+// ThreadLocalValueHolderBase.
+class ThreadLocalValueHolderBase {
+ public:
+ virtual ~ThreadLocalValueHolderBase() {}
+};
+
+// Called by pthread to delete thread-local data stored by
+// pthread_setspecific().
+extern "C" inline void DeleteThreadLocalValue(void* value_holder) {
+ delete static_cast<ThreadLocalValueHolderBase*>(value_holder);
+}
+
+// Implements thread-local storage on pthreads-based systems.
+//
+// // Thread 1
+// ThreadLocal<int> tl(100); // 100 is the default value for each thread.
+//
+// // Thread 2
+// tl.set(150); // Changes the value for thread 2 only.
+// EXPECT_EQ(150, tl.get());
+//
+// // Thread 1
+// EXPECT_EQ(100, tl.get()); // In thread 1, tl has the original value.
+// tl.set(200);
+// EXPECT_EQ(200, tl.get());
+//
+// The template type argument T must have a public copy constructor.
+// In addition, the default ThreadLocal constructor requires T to have
+// a public default constructor.
+//
+// An object managed for a thread by a ThreadLocal instance is deleted
+// when the thread exits. Or, if the ThreadLocal instance dies in
+// that thread, when the ThreadLocal dies. It's the user's
+// responsibility to ensure that all other threads using a ThreadLocal
+// have exited when it dies, or the per-thread objects for those
+// threads will not be deleted.
+//
+// Google Test only uses global ThreadLocal objects. That means they
+// will die after main() has returned. Therefore, no per-thread
+// object managed by Google Test will be leaked as long as all threads
+// using Google Test have exited when main() returns.
+template <typename T>
+class ThreadLocal {
+ public:
+ ThreadLocal() : key_(CreateKey()),
+ default_() {}
+ explicit ThreadLocal(const T& value) : key_(CreateKey()),
+ default_(value) {}
+
+ ~ThreadLocal() {
+ // Destroys the managed object for the current thread, if any.
+ DeleteThreadLocalValue(pthread_getspecific(key_));
+
+ // Releases resources associated with the key. This will *not*
+ // delete managed objects for other threads.
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_key_delete(key_));
+ }
+
+ T* pointer() { return GetOrCreateValue(); }
+ const T* pointer() const { return GetOrCreateValue(); }
+ const T& get() const { return *pointer(); }
+ void set(const T& value) { *pointer() = value; }
+
+ private:
+ // Holds a value of type T.
+ class ValueHolder : public ThreadLocalValueHolderBase {
+ public:
+ explicit ValueHolder(const T& value) : value_(value) {}
+
+ T* pointer() { return &value_; }
+
+ private:
+ T value_;
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolder);
+ };
+
+ static pthread_key_t CreateKey() {
+ pthread_key_t key;
+ // When a thread exits, DeleteThreadLocalValue() will be called on
+ // the object managed for that thread.
+ GTEST_CHECK_POSIX_SUCCESS_(
+ pthread_key_create(&key, &DeleteThreadLocalValue));
+ return key;
+ }
+
+ T* GetOrCreateValue() const {
+ ThreadLocalValueHolderBase* const holder =
+ static_cast<ThreadLocalValueHolderBase*>(pthread_getspecific(key_));
+ if (holder != NULL) {
+ return CheckedDowncastToActualType<ValueHolder>(holder)->pointer();
+ }
+
+ ValueHolder* const new_holder = new ValueHolder(default_);
+ ThreadLocalValueHolderBase* const holder_base = new_holder;
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_setspecific(key_, holder_base));
+ return new_holder->pointer();
+ }
+
+ // A key pthreads uses for looking up per-thread values.
+ const pthread_key_t key_;
+ const T default_; // The default value for each thread.
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocal);
+};
+
+#define GTEST_IS_THREADSAFE 1
+
+#else // GTEST_HAS_PTHREAD
+
+// A dummy implementation of synchronization primitives (mutex, lock,
+// and thread-local variable). Necessary for compiling Google Test where
+// mutex is not supported - using Google Test in multiple threads is not
+// supported on such platforms.
+
+class Mutex {
+ public:
+ Mutex() {}
+ void AssertHeld() const {}
+};
+
+#define GTEST_DECLARE_STATIC_MUTEX_(mutex) \
+ extern ::testing::internal::Mutex mutex
+
+#define GTEST_DEFINE_STATIC_MUTEX_(mutex) ::testing::internal::Mutex mutex
+
+class GTestMutexLock {
+ public:
+ explicit GTestMutexLock(Mutex*) {} // NOLINT
+};
+
+typedef GTestMutexLock MutexLock;
+
+template <typename T>
+class ThreadLocal {
+ public:
+ ThreadLocal() : value_() {}
+ explicit ThreadLocal(const T& value) : value_(value) {}
+ T* pointer() { return &value_; }
+ const T* pointer() const { return &value_; }
+ const T& get() const { return value_; }
+ void set(const T& value) { value_ = value; }
+ private:
+ T value_;
+};
+
+// The above synchronization primitives have dummy implementations.
+// Therefore Google Test is not thread-safe.
+#define GTEST_IS_THREADSAFE 0
+
+#endif // GTEST_HAS_PTHREAD
+
+// Returns the number of threads running in the process, or 0 to indicate that
+// we cannot detect it.
+GTEST_API_ size_t GetThreadCount();
+
+// Passing non-POD classes through ellipsis (...) crashes the ARM
+// compiler and generates a warning in Sun Studio. The Nokia Symbian
+// and the IBM XL C/C++ compiler try to instantiate a copy constructor
+// for objects passed through ellipsis (...), failing for uncopyable
+// objects. We define this to ensure that only POD is passed through
+// ellipsis on these systems.
+#if defined(__SYMBIAN32__) || defined(__IBMCPP__) || defined(__SUNPRO_CC)
+// We lose support for NULL detection where the compiler doesn't like
+// passing non-POD classes through ellipsis (...).
+#define GTEST_ELLIPSIS_NEEDS_POD_ 1
+#else
+#define GTEST_CAN_COMPARE_NULL 1
+#endif
+
+// The Nokia Symbian and IBM XL C/C++ compilers cannot decide between
+// const T& and const T* in a function template. These compilers
+// _can_ decide between class template specializations for T and T*,
+// so a tr1::type_traits-like is_pointer works.
+#if defined(__SYMBIAN32__) || defined(__IBMCPP__)
+#define GTEST_NEEDS_IS_POINTER_ 1
+#endif
+
+template <bool bool_value>
+struct bool_constant {
+ typedef bool_constant<bool_value> type;
+ static const bool value = bool_value;
+};
+template <bool bool_value> const bool bool_constant<bool_value>::value;
+
+typedef bool_constant<false> false_type;
+typedef bool_constant<true> true_type;
+
+template <typename T>
+struct is_pointer : public false_type {};
+
+template <typename T>
+struct is_pointer<T*> : public true_type {};
+
+#if GTEST_OS_WINDOWS
+#define GTEST_PATH_SEP_ "\\"
+#define GTEST_HAS_ALT_PATH_SEP_ 1
+// The biggest signed integer type the compiler supports.
+typedef __int64 BiggestInt;
+#else
+#define GTEST_PATH_SEP_ "/"
+#define GTEST_HAS_ALT_PATH_SEP_ 0
+typedef long long BiggestInt; // NOLINT
+#endif // GTEST_OS_WINDOWS
+
+// The testing::internal::posix namespace holds wrappers for common
+// POSIX functions. These wrappers hide the differences between
+// Windows/MSVC and POSIX systems. Since some compilers define these
+// standard functions as macros, the wrapper cannot have the same name
+// as the wrapped function.
+
+namespace posix {
+
+// Functions with a different name on Windows.
+
+#if GTEST_OS_WINDOWS
+
+typedef struct _stat StatStruct;
+
+#ifdef __BORLANDC__
+inline int IsATTY(int fd) { return isatty(fd); }
+inline int StrCaseCmp(const char* s1, const char* s2) {
+ return stricmp(s1, s2);
+}
+inline char* StrDup(const char* src) { return strdup(src); }
+#else // !__BORLANDC__
+#if GTEST_OS_WINDOWS_MOBILE
+inline int IsATTY(int /* fd */) { return 0; }
+#else
+inline int IsATTY(int fd) { return _isatty(fd); }
+#endif // GTEST_OS_WINDOWS_MOBILE
+inline int StrCaseCmp(const char* s1, const char* s2) {
+ return _stricmp(s1, s2);
+}
+inline char* StrDup(const char* src) { return _strdup(src); }
+#endif // __BORLANDC__
+
+#if GTEST_OS_WINDOWS_MOBILE
+inline int FileNo(FILE* file) { return reinterpret_cast<int>(_fileno(file)); }
+// Stat(), RmDir(), and IsDir() are not needed on Windows CE at this
+// time and thus not defined there.
+#else
+inline int FileNo(FILE* file) { return _fileno(file); }
+inline int Stat(const char* path, StatStruct* buf) { return _stat(path, buf); }
+inline int RmDir(const char* dir) { return _rmdir(dir); }
+inline bool IsDir(const StatStruct& st) {
+ return (_S_IFDIR & st.st_mode) != 0;
+}
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+#else
+
+typedef struct stat StatStruct;
+
+inline int FileNo(FILE* file) { return fileno(file); }
+#if defined OCTEON_BUILD
+inline int IsATTY(int fd) { return 0; }
+inline int RmDir(const char* dir) { return 0; }
+#else
+inline int IsATTY(int fd) { return isatty(fd); }
+inline int RmDir(const char* dir) { return rmdir(dir); }
+#endif
+inline int Stat(const char* path, StatStruct* buf) { return stat(path, buf); }
+inline int StrCaseCmp(const char* s1, const char* s2) {
+ return strcasecmp(s1, s2);
+}
+inline char* StrDup(const char* src) { return strdup(src); }
+inline bool IsDir(const StatStruct& st) { return S_ISDIR(st.st_mode); }
+
+#endif // GTEST_OS_WINDOWS
+
+// Functions deprecated by MSVC 8.0.
+
+#ifdef _MSC_VER
+// Temporarily disable warning 4996 (deprecated function).
+#pragma warning(push)
+#pragma warning(disable:4996)
+#endif
+
+inline const char* StrNCpy(char* dest, const char* src, size_t n) {
+ return strncpy(dest, src, n);
+}
+
+// ChDir(), FReopen(), FDOpen(), Read(), Write(), Close(), and
+// StrError() aren't needed on Windows CE at this time and thus not
+// defined there.
+
+#if !GTEST_OS_WINDOWS_MOBILE
+#ifdef OCTEON_BUILD
+inline int ChDir(const char* dir) { return 0; }
+#else
+inline int ChDir(const char* dir) { return chdir(dir); }
+#endif
+#endif
+inline FILE* FOpen(const char* path, const char* mode) {
+ return fopen(path, mode);
+}
+#if !GTEST_OS_WINDOWS_MOBILE
+inline FILE *FReopen(const char* path, const char* mode, FILE* stream) {
+ return freopen(path, mode, stream);
+}
+inline FILE* FDOpen(int fd, const char* mode) { return fdopen(fd, mode); }
+#endif
+inline int FClose(FILE* fp) { return fclose(fp); }
+#if !GTEST_OS_WINDOWS_MOBILE
+#ifdef OCTEON_BUILD
+inline int Read(int fd, void* buf, unsigned int count) {
+ return 0;
+}
+inline int Write(int fd, const void* buf, unsigned int count) {
+ return 0;
+}
+inline int Close(int fd) { return 0; }
+
+#else
+inline int Read(int fd, void* buf, unsigned int count) {
+ return static_cast<int>(read(fd, buf, count));
+}
+inline int Write(int fd, const void* buf, unsigned int count) {
+ return static_cast<int>(write(fd, buf, count));
+}
+inline int Close(int fd) { return close(fd); }
+inline const char* StrError(int errnum) { return strerror(errnum); }
+#endif
+#endif
+inline const char* GetEnv(const char* name) {
+#if GTEST_OS_WINDOWS_MOBILE
+ // We are on Windows CE, which has no environment variables.
+ return NULL;
+#elif defined(__BORLANDC__) || defined(__SunOS_5_8) || defined(__SunOS_5_9)
+ // Environment variables which we programmatically clear will be set to the
+ // empty string rather than unset (NULL). Handle that case.
+ const char* const env = getenv(name);
+ return (env != NULL && env[0] != '\0') ? env : NULL;
+#else
+ return getenv(name);
+#endif
+}
+
+#ifdef _MSC_VER
+#pragma warning(pop) // Restores the warning state.
+#endif
+
+#if GTEST_OS_WINDOWS_MOBILE
+// Windows CE has no C library. The abort() function is used in
+// several places in Google Test. This implementation provides a reasonable
+// imitation of standard behaviour.
+void Abort();
+#else
+inline void Abort() { abort(); }
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+} // namespace posix
+
+// The maximum number a BiggestInt can represent. This definition
+// works no matter BiggestInt is represented in one's complement or
+// two's complement.
+//
+// We cannot rely on numeric_limits in STL, as __int64 and long long
+// are not part of standard C++ and numeric_limits doesn't need to be
+// defined for them.
+const BiggestInt kMaxBiggestInt =
+ ~(static_cast<BiggestInt>(1) << (8*sizeof(BiggestInt) - 1));
+
+// This template class serves as a compile-time function from size to
+// type. It maps a size in bytes to a primitive type with that
+// size. e.g.
+//
+// TypeWithSize<4>::UInt
+//
+// is typedef-ed to be unsigned int (unsigned integer made up of 4
+// bytes).
+//
+// Such functionality should belong to STL, but I cannot find it
+// there.
+//
+// Google Test uses this class in the implementation of floating-point
+// comparison.
+//
+// For now it only handles UInt (unsigned int) as that's all Google Test
+// needs. Other types can be easily added in the future if need
+// arises.
+template <size_t size>
+class TypeWithSize {
+ public:
+ // This prevents the user from using TypeWithSize<N> with incorrect
+ // values of N.
+ typedef void UInt;
+};
+
+// The specialization for size 4.
+template <>
+class TypeWithSize<4> {
+ public:
+ // unsigned int has size 4 in both gcc and MSVC.
+ //
+ // As base/basictypes.h doesn't compile on Windows, we cannot use
+ // uint32, uint64, and etc here.
+ typedef int Int;
+ typedef unsigned int UInt;
+};
+
+// The specialization for size 8.
+template <>
+class TypeWithSize<8> {
+ public:
+#if GTEST_OS_WINDOWS
+ typedef __int64 Int;
+ typedef unsigned __int64 UInt;
+#else
+ typedef long long Int; // NOLINT
+ typedef unsigned long long UInt; // NOLINT
+#endif // GTEST_OS_WINDOWS
+};
+
+// Integer types of known sizes.
+typedef TypeWithSize<4>::Int Int32;
+typedef TypeWithSize<4>::UInt UInt32;
+typedef TypeWithSize<8>::Int Int64;
+typedef TypeWithSize<8>::UInt UInt64;
+typedef TypeWithSize<8>::Int TimeInMillis; // Represents time in milliseconds.
+
+// Utilities for command line flags and environment variables.
+
+// Macro for referencing flags.
+#define GTEST_FLAG(name) FLAGS_gtest_##name
+
+// Macros for declaring flags.
+#define GTEST_DECLARE_bool_(name) GTEST_API_ extern bool GTEST_FLAG(name)
+#define GTEST_DECLARE_int32_(name) \
+ GTEST_API_ extern ::testing::internal::Int32 GTEST_FLAG(name)
+#define GTEST_DECLARE_string_(name) \
+ GTEST_API_ extern ::testing::internal::String GTEST_FLAG(name)
+
+// Macros for defining flags.
+#define GTEST_DEFINE_bool_(name, default_val, doc) \
+ GTEST_API_ bool GTEST_FLAG(name) = (default_val)
+#define GTEST_DEFINE_int32_(name, default_val, doc) \
+ GTEST_API_ ::testing::internal::Int32 GTEST_FLAG(name) = (default_val)
+#define GTEST_DEFINE_string_(name, default_val, doc) \
+ GTEST_API_ ::testing::internal::String GTEST_FLAG(name) = (default_val)
+
+// Parses 'str' for a 32-bit signed integer. If successful, writes the result
+// to *value and returns true; otherwise leaves *value unchanged and returns
+// false.
+// TODO(chandlerc): Find a better way to refactor flag and environment parsing
+// out of both gtest-port.cc and gtest.cc to avoid exporting this utility
+// function.
+bool ParseInt32(const Message& src_text, const char* str, Int32* value);
+
+// Parses a bool/Int32/string from the environment variable
+// corresponding to the given Google Test flag.
+bool BoolFromGTestEnv(const char* flag, bool default_val);
+GTEST_API_ Int32 Int32FromGTestEnv(const char* flag, Int32 default_val);
+const char* StringFromGTestEnv(const char* flag, const char* default_val);
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_
+
+#if GTEST_OS_LINUX
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#endif // GTEST_OS_LINUX
+
+#include <ctype.h>
+#include <string.h>
+#include <iomanip>
+#include <limits>
+#include <set>
+
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file declares the String class and functions used internally by
+// Google Test. They are subject to change without notice. They should not used
+// by code external to Google Test.
+//
+// This header file is #included by <gtest/internal/gtest-internal.h>.
+// It should not be #included by other files.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_
+
+#ifdef __BORLANDC__
+// string.h is not guaranteed to provide strcpy on C++ Builder.
+#include <mem.h>
+#endif
+
+#include <string.h>
+
+#include <string>
+
+namespace testing {
+namespace internal {
+
+// String - a UTF-8 string class.
+//
+// For historic reasons, we don't use std::string.
+//
+// TODO(wan@google.com): replace this class with std::string or
+// implement it in terms of the latter.
+//
+// Note that String can represent both NULL and the empty string,
+// while std::string cannot represent NULL.
+//
+// NULL and the empty string are considered different. NULL is less
+// than anything (including the empty string) except itself.
+//
+// This class only provides minimum functionality necessary for
+// implementing Google Test. We do not intend to implement a full-fledged
+// string class here.
+//
+// Since the purpose of this class is to provide a substitute for
+// std::string on platforms where it cannot be used, we define a copy
+// constructor and assignment operators such that we don't need
+// conditional compilation in a lot of places.
+//
+// In order to make the representation efficient, the d'tor of String
+// is not virtual. Therefore DO NOT INHERIT FROM String.
+class GTEST_API_ String {
+ public:
+ // Static utility methods
+
+ // Returns the input enclosed in double quotes if it's not NULL;
+ // otherwise returns "(null)". For example, "\"Hello\"" is returned
+ // for input "Hello".
+ //
+ // This is useful for printing a C string in the syntax of a literal.
+ //
+ // Known issue: escape sequences are not handled yet.
+ static String ShowCStringQuoted(const char* c_str);
+
+ // Clones a 0-terminated C string, allocating memory using new. The
+ // caller is responsible for deleting the return value using
+ // delete[]. Returns the cloned string, or NULL if the input is
+ // NULL.
+ //
+ // This is different from strdup() in string.h, which allocates
+ // memory using malloc().
+ static const char* CloneCString(const char* c_str);
+
+#if GTEST_OS_WINDOWS_MOBILE
+ // Windows CE does not have the 'ANSI' versions of Win32 APIs. To be
+ // able to pass strings to Win32 APIs on CE we need to convert them
+ // to 'Unicode', UTF-16.
+
+ // Creates a UTF-16 wide string from the given ANSI string, allocating
+ // memory using new. The caller is responsible for deleting the return
+ // value using delete[]. Returns the wide string, or NULL if the
+ // input is NULL.
+ //
+ // The wide string is created using the ANSI codepage (CP_ACP) to
+ // match the behaviour of the ANSI versions of Win32 calls and the
+ // C runtime.
+ static LPCWSTR AnsiToUtf16(const char* c_str);
+
+ // Creates an ANSI string from the given wide string, allocating
+ // memory using new. The caller is responsible for deleting the return
+ // value using delete[]. Returns the ANSI string, or NULL if the
+ // input is NULL.
+ //
+ // The returned string is created using the ANSI codepage (CP_ACP) to
+ // match the behaviour of the ANSI versions of Win32 calls and the
+ // C runtime.
+ static const char* Utf16ToAnsi(LPCWSTR utf16_str);
+#endif
+
+ // Compares two C strings. Returns true iff they have the same content.
+ //
+ // Unlike strcmp(), this function can handle NULL argument(s). A
+ // NULL C string is considered different to any non-NULL C string,
+ // including the empty string.
+ static bool CStringEquals(const char* lhs, const char* rhs);
+
+ // Converts a wide C string to a String using the UTF-8 encoding.
+ // NULL will be converted to "(null)". If an error occurred during
+ // the conversion, "(failed to convert from wide string)" is
+ // returned.
+ static String ShowWideCString(const wchar_t* wide_c_str);
+
+ // Similar to ShowWideCString(), except that this function encloses
+ // the converted string in double quotes.
+ static String ShowWideCStringQuoted(const wchar_t* wide_c_str);
+
+ // Compares two wide C strings. Returns true iff they have the same
+ // content.
+ //
+ // Unlike wcscmp(), this function can handle NULL argument(s). A
+ // NULL C string is considered different to any non-NULL C string,
+ // including the empty string.
+ static bool WideCStringEquals(const wchar_t* lhs, const wchar_t* rhs);
+
+ // Compares two C strings, ignoring case. Returns true iff they
+ // have the same content.
+ //
+ // Unlike strcasecmp(), this function can handle NULL argument(s).
+ // A NULL C string is considered different to any non-NULL C string,
+ // including the empty string.
+ static bool CaseInsensitiveCStringEquals(const char* lhs,
+ const char* rhs);
+
+ // Compares two wide C strings, ignoring case. Returns true iff they
+ // have the same content.
+ //
+ // Unlike wcscasecmp(), this function can handle NULL argument(s).
+ // A NULL C string is considered different to any non-NULL wide C string,
+ // including the empty string.
+ // NB: The implementations on different platforms slightly differ.
+ // On windows, this method uses _wcsicmp which compares according to LC_CTYPE
+ // environment variable. On GNU platform this method uses wcscasecmp
+ // which compares according to LC_CTYPE category of the current locale.
+ // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the
+ // current locale.
+ static bool CaseInsensitiveWideCStringEquals(const wchar_t* lhs,
+ const wchar_t* rhs);
+
+ // Formats a list of arguments to a String, using the same format
+ // spec string as for printf.
+ //
+ // We do not use the StringPrintf class as it is not universally
+ // available.
+ //
+ // The result is limited to 4096 characters (including the tailing
+ // 0). If 4096 characters are not enough to format the input,
+ // "<buffer exceeded>" is returned.
+ static String Format(const char* format, ...);
+
+ // C'tors
+
+ // The default c'tor constructs a NULL string.
+ String() : c_str_(NULL), length_(0) {}
+
+ // Constructs a String by cloning a 0-terminated C string.
+ String(const char* a_c_str) { // NOLINT
+ if (a_c_str == NULL) {
+ c_str_ = NULL;
+ length_ = 0;
+ } else {
+ ConstructNonNull(a_c_str, strlen(a_c_str));
+ }
+ }
+
+ // Constructs a String by copying a given number of chars from a
+ // buffer. E.g. String("hello", 3) creates the string "hel",
+ // String("a\0bcd", 4) creates "a\0bc", String(NULL, 0) creates "",
+ // and String(NULL, 1) results in access violation.
+ String(const char* buffer, size_t a_length) {
+ ConstructNonNull(buffer, a_length);
+ }
+
+ // The copy c'tor creates a new copy of the string. The two
+ // String objects do not share content.
+ String(const String& str) : c_str_(NULL), length_(0) { *this = str; }
+
+ // D'tor. String is intended to be a final class, so the d'tor
+ // doesn't need to be virtual.
+ ~String() { delete[] c_str_; }
+
+ // Allows a String to be implicitly converted to an ::std::string or
+ // ::string, and vice versa. Converting a String containing a NULL
+ // pointer to ::std::string or ::string is undefined behavior.
+ // Converting a ::std::string or ::string containing an embedded NUL
+ // character to a String will result in the prefix up to the first
+ // NUL character.
+ String(const ::std::string& str) {
+ ConstructNonNull(str.c_str(), str.length());
+ }
+
+ operator ::std::string() const { return ::std::string(c_str(), length()); }
+
+#if GTEST_HAS_GLOBAL_STRING
+ String(const ::string& str) {
+ ConstructNonNull(str.c_str(), str.length());
+ }
+
+ operator ::string() const { return ::string(c_str(), length()); }
+#endif // GTEST_HAS_GLOBAL_STRING
+
+ // Returns true iff this is an empty string (i.e. "").
+ bool empty() const { return (c_str() != NULL) && (length() == 0); }
+
+ // Compares this with another String.
+ // Returns < 0 if this is less than rhs, 0 if this is equal to rhs, or > 0
+ // if this is greater than rhs.
+ int Compare(const String& rhs) const;
+
+ // Returns true iff this String equals the given C string. A NULL
+ // string and a non-NULL string are considered not equal.
+ bool operator==(const char* a_c_str) const { return Compare(a_c_str) == 0; }
+
+ // Returns true iff this String is less than the given String. A
+ // NULL string is considered less than "".
+ bool operator<(const String& rhs) const { return Compare(rhs) < 0; }
+
+ // Returns true iff this String doesn't equal the given C string. A NULL
+ // string and a non-NULL string are considered not equal.
+ bool operator!=(const char* a_c_str) const { return !(*this == a_c_str); }
+
+ // Returns true iff this String ends with the given suffix. *Any*
+ // String is considered to end with a NULL or empty suffix.
+ bool EndsWith(const char* suffix) const;
+
+ // Returns true iff this String ends with the given suffix, not considering
+ // case. Any String is considered to end with a NULL or empty suffix.
+ bool EndsWithCaseInsensitive(const char* suffix) const;
+
+ // Returns the length of the encapsulated string, or 0 if the
+ // string is NULL.
+ size_t length() const { return length_; }
+
+ // Gets the 0-terminated C string this String object represents.
+ // The String object still owns the string. Therefore the caller
+ // should NOT delete the return value.
+ const char* c_str() const { return c_str_; }
+
+ // Assigns a C string to this object. Self-assignment works.
+ const String& operator=(const char* a_c_str) {
+ return *this = String(a_c_str);
+ }
+
+ // Assigns a String object to this object. Self-assignment works.
+ const String& operator=(const String& rhs) {
+ if (this != &rhs) {
+ delete[] c_str_;
+ if (rhs.c_str() == NULL) {
+ c_str_ = NULL;
+ length_ = 0;
+ } else {
+ ConstructNonNull(rhs.c_str(), rhs.length());
+ }
+ }
+
+ return *this;
+ }
+
+ private:
+ // Constructs a non-NULL String from the given content. This
+ // function can only be called when data_ has not been allocated.
+ // ConstructNonNull(NULL, 0) results in an empty string ("").
+ // ConstructNonNull(NULL, non_zero) is undefined behavior.
+ void ConstructNonNull(const char* buffer, size_t a_length) {
+ char* const str = new char[a_length + 1];
+ memcpy(str, buffer, a_length);
+ str[a_length] = '\0';
+ c_str_ = str;
+ length_ = a_length;
+ }
+
+ const char* c_str_;
+ size_t length_;
+}; // class String
+
+// Streams a String to an ostream. Each '\0' character in the String
+// is replaced with "\\0".
+inline ::std::ostream& operator<<(::std::ostream& os, const String& str) {
+ if (str.c_str() == NULL) {
+ os << "(null)";
+ } else {
+ const char* const c_str = str.c_str();
+ for (size_t i = 0; i != str.length(); i++) {
+ if (c_str[i] == '\0') {
+ os << "\\0";
+ } else {
+ os << c_str[i];
+ }
+ }
+ }
+ return os;
+}
+
+// Gets the content of the StrStream's buffer as a String. Each '\0'
+// character in the buffer is replaced with "\\0".
+GTEST_API_ String StrStreamToString(StrStream* stream);
+
+// Converts a streamable value to a String. A NULL pointer is
+// converted to "(null)". When the input value is a ::string,
+// ::std::string, ::wstring, or ::std::wstring object, each NUL
+// character in it is replaced with "\\0".
+
+// Declared here but defined in gtest.h, so that it has access
+// to the definition of the Message class, required by the ARM
+// compiler.
+template <typename T>
+String StreamableToString(const T& streamable);
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keith.ray@gmail.com (Keith Ray)
+//
+// Google Test filepath utilities
+//
+// This header file declares classes and functions used internally by
+// Google Test. They are subject to change without notice.
+//
+// This file is #included in <gtest/internal/gtest-internal.h>.
+// Do not include this header file separately!
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_
+
+
+namespace testing {
+namespace internal {
+
+// FilePath - a class for file and directory pathname manipulation which
+// handles platform-specific conventions (like the pathname separator).
+// Used for helper functions for naming files in a directory for xml output.
+// Except for Set methods, all methods are const or static, which provides an
+// "immutable value object" -- useful for peace of mind.
+// A FilePath with a value ending in a path separator ("like/this/") represents
+// a directory, otherwise it is assumed to represent a file. In either case,
+// it may or may not represent an actual file or directory in the file system.
+// Names are NOT checked for syntax correctness -- no checking for illegal
+// characters, malformed paths, etc.
+
+class GTEST_API_ FilePath {
+ public:
+ FilePath() : pathname_("") { }
+ FilePath(const FilePath& rhs) : pathname_(rhs.pathname_) { }
+
+ explicit FilePath(const char* pathname) : pathname_(pathname) {
+ Normalize();
+ }
+
+ explicit FilePath(const String& pathname) : pathname_(pathname) {
+ Normalize();
+ }
+
+ FilePath& operator=(const FilePath& rhs) {
+ Set(rhs);
+ return *this;
+ }
+
+ void Set(const FilePath& rhs) {
+ pathname_ = rhs.pathname_;
+ }
+
+ String ToString() const { return pathname_; }
+ const char* c_str() const { return pathname_.c_str(); }
+
+ // Returns the current working directory, or "" if unsuccessful.
+ static FilePath GetCurrentDir();
+
+ // Given directory = "dir", base_name = "test", number = 0,
+ // extension = "xml", returns "dir/test.xml". If number is greater
+ // than zero (e.g., 12), returns "dir/test_12.xml".
+ // On Windows platform, uses \ as the separator rather than /.
+ static FilePath MakeFileName(const FilePath& directory,
+ const FilePath& base_name,
+ int number,
+ const char* extension);
+
+ // Given directory = "dir", relative_path = "test.xml",
+ // returns "dir/test.xml".
+ // On Windows, uses \ as the separator rather than /.
+ static FilePath ConcatPaths(const FilePath& directory,
+ const FilePath& relative_path);
+
+ // Returns a pathname for a file that does not currently exist. The pathname
+ // will be directory/base_name.extension or
+ // directory/base_name_<number>.extension if directory/base_name.extension
+ // already exists. The number will be incremented until a pathname is found
+ // that does not already exist.
+ // Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'.
+ // There could be a race condition if two or more processes are calling this
+ // function at the same time -- they could both pick the same filename.
+ static FilePath GenerateUniqueFileName(const FilePath& directory,
+ const FilePath& base_name,
+ const char* extension);
+
+ // Returns true iff the path is NULL or "".
+ bool IsEmpty() const { return c_str() == NULL || *c_str() == '\0'; }
+
+ // If input name has a trailing separator character, removes it and returns
+ // the name, otherwise return the name string unmodified.
+ // On Windows platform, uses \ as the separator, other platforms use /.
+ FilePath RemoveTrailingPathSeparator() const;
+
+ // Returns a copy of the FilePath with the directory part removed.
+ // Example: FilePath("path/to/file").RemoveDirectoryName() returns
+ // FilePath("file"). If there is no directory part ("just_a_file"), it returns
+ // the FilePath unmodified. If there is no file part ("just_a_dir/") it
+ // returns an empty FilePath ("").
+ // On Windows platform, '\' is the path separator, otherwise it is '/'.
+ FilePath RemoveDirectoryName() const;
+
+ // RemoveFileName returns the directory path with the filename removed.
+ // Example: FilePath("path/to/file").RemoveFileName() returns "path/to/".
+ // If the FilePath is "a_file" or "/a_file", RemoveFileName returns
+ // FilePath("./") or, on Windows, FilePath(".\\"). If the filepath does
+ // not have a file, like "just/a/dir/", it returns the FilePath unmodified.
+ // On Windows platform, '\' is the path separator, otherwise it is '/'.
+ FilePath RemoveFileName() const;
+
+ // Returns a copy of the FilePath with the case-insensitive extension removed.
+ // Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns
+ // FilePath("dir/file"). If a case-insensitive extension is not
+ // found, returns a copy of the original FilePath.
+ FilePath RemoveExtension(const char* extension) const;
+
+ // Creates directories so that path exists. Returns true if successful or if
+ // the directories already exist; returns false if unable to create
+ // directories for any reason. Will also return false if the FilePath does
+ // not represent a directory (that is, it doesn't end with a path separator).
+ bool CreateDirectoriesRecursively() const;
+
+ // Create the directory so that path exists. Returns true if successful or
+ // if the directory already exists; returns false if unable to create the
+ // directory for any reason, including if the parent directory does not
+ // exist. Not named "CreateDirectory" because that's a macro on Windows.
+ bool CreateFolder() const;
+
+ // Returns true if FilePath describes something in the file-system,
+ // either a file, directory, or whatever, and that something exists.
+ bool FileOrDirectoryExists() const;
+
+ // Returns true if pathname describes a directory in the file-system
+ // that exists.
+ bool DirectoryExists() const;
+
+ // Returns true if FilePath ends with a path separator, which indicates that
+ // it is intended to represent a directory. Returns false otherwise.
+ // This does NOT check that a directory (or file) actually exists.
+ bool IsDirectory() const;
+
+ // Returns true if pathname describes a root directory. (Windows has one
+ // root directory per disk drive.)
+ bool IsRootDirectory() const;
+
+ // Returns true if pathname describes an absolute path.
+ bool IsAbsolutePath() const;
+
+ private:
+ // Replaces multiple consecutive separators with a single separator.
+ // For example, "bar///foo" becomes "bar/foo". Does not eliminate other
+ // redundancies that might be in a pathname involving "." or "..".
+ //
+ // A pathname with multiple consecutive separators may occur either through
+ // user error or as a result of some scripts or APIs that generate a pathname
+ // with a trailing separator. On other platforms the same API or script
+ // may NOT generate a pathname with a trailing "/". Then elsewhere that
+ // pathname may have another "/" and pathname components added to it,
+ // without checking for the separator already being there.
+ // The script language and operating system may allow paths like "foo//bar"
+ // but some of the functions in FilePath will not handle that correctly. In
+ // particular, RemoveTrailingPathSeparator() only removes one separator, and
+ // it is called in CreateDirectoriesRecursively() assuming that it will change
+ // a pathname from directory syntax (trailing separator) to filename syntax.
+ //
+ // On Windows this method also replaces the alternate path separator '/' with
+ // the primary path separator '\\', so that for example "bar\\/\\foo" becomes
+ // "bar\\foo".
+
+ void Normalize();
+
+ // Returns a pointer to the last occurence of a valid path separator in
+ // the FilePath. On Windows, for example, both '/' and '\' are valid path
+ // separators. Returns NULL if no path separator was found.
+ const char* FindLastPathSeparator() const;
+
+ String pathname_;
+}; // class FilePath
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_
+// This file was GENERATED by command:
+// pump.py gtest-type-util.h.pump
+// DO NOT EDIT BY HAND!!!
+
+// Copyright 2008 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Type utilities needed for implementing typed and type-parameterized
+// tests. This file is generated by a SCRIPT. DO NOT EDIT BY HAND!
+//
+// Currently we support at most 50 types in a list, and at most 50
+// type-parameterized tests in one type-parameterized test case.
+// Please contact googletestframework@googlegroups.com if you need
+// more.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_
+
+
+#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
+
+// #ifdef __GNUC__ is too general here. It is possible to use gcc without using
+// libstdc++ (which is where cxxabi.h comes from).
+#ifdef __GLIBCXX__
+#include <cxxabi.h>
+#endif // __GLIBCXX__
+
+namespace testing {
+namespace internal {
+
+// AssertyTypeEq<T1, T2>::type is defined iff T1 and T2 are the same
+// type. This can be used as a compile-time assertion to ensure that
+// two types are equal.
+
+template <typename T1, typename T2>
+struct AssertTypeEq;
+
+template <typename T>
+struct AssertTypeEq<T, T> {
+ typedef bool type;
+};
+
+// GetTypeName<T>() returns a human-readable name of type T.
+template <typename T>
+String GetTypeName() {
+#if GTEST_HAS_RTTI
+
+ const char* const name = typeid(T).name();
+#ifdef __GLIBCXX__
+ int status = 0;
+ // gcc's implementation of typeid(T).name() mangles the type name,
+ // so we have to demangle it.
+ char* const readable_name = abi::__cxa_demangle(name, 0, 0, &status);
+ const String name_str(status == 0 ? readable_name : name);
+ free(readable_name);
+ return name_str;
+#else
+ return name;
+#endif // __GLIBCXX__
+
+#else
+ return "<type>";
+#endif // GTEST_HAS_RTTI
+}
+
+// A unique type used as the default value for the arguments of class
+// template Types. This allows us to simulate variadic templates
+// (e.g. Types<int>, Type<int, double>, and etc), which C++ doesn't
+// support directly.
+struct None {};
+
+// The following family of struct and struct templates are used to
+// represent type lists. In particular, TypesN<T1, T2, ..., TN>
+// represents a type list with N types (T1, T2, ..., and TN) in it.
+// Except for Types0, every struct in the family has two member types:
+// Head for the first type in the list, and Tail for the rest of the
+// list.
+
+// The empty type list.
+struct Types0 {};
+
+// Type lists of length 1, 2, 3, and so on.
+
+template <typename T1>
+struct Types1 {
+ typedef T1 Head;
+ typedef Types0 Tail;
+};
+template <typename T1, typename T2>
+struct Types2 {
+ typedef T1 Head;
+ typedef Types1<T2> Tail;
+};
+
+template <typename T1, typename T2, typename T3>
+struct Types3 {
+ typedef T1 Head;
+ typedef Types2<T2, T3> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4>
+struct Types4 {
+ typedef T1 Head;
+ typedef Types3<T2, T3, T4> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+struct Types5 {
+ typedef T1 Head;
+ typedef Types4<T2, T3, T4, T5> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+struct Types6 {
+ typedef T1 Head;
+ typedef Types5<T2, T3, T4, T5, T6> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+struct Types7 {
+ typedef T1 Head;
+ typedef Types6<T2, T3, T4, T5, T6, T7> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+struct Types8 {
+ typedef T1 Head;
+ typedef Types7<T2, T3, T4, T5, T6, T7, T8> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+struct Types9 {
+ typedef T1 Head;
+ typedef Types8<T2, T3, T4, T5, T6, T7, T8, T9> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+struct Types10 {
+ typedef T1 Head;
+ typedef Types9<T2, T3, T4, T5, T6, T7, T8, T9, T10> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11>
+struct Types11 {
+ typedef T1 Head;
+ typedef Types10<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12>
+struct Types12 {
+ typedef T1 Head;
+ typedef Types11<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13>
+struct Types13 {
+ typedef T1 Head;
+ typedef Types12<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14>
+struct Types14 {
+ typedef T1 Head;
+ typedef Types13<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15>
+struct Types15 {
+ typedef T1 Head;
+ typedef Types14<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16>
+struct Types16 {
+ typedef T1 Head;
+ typedef Types15<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17>
+struct Types17 {
+ typedef T1 Head;
+ typedef Types16<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18>
+struct Types18 {
+ typedef T1 Head;
+ typedef Types17<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19>
+struct Types19 {
+ typedef T1 Head;
+ typedef Types18<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20>
+struct Types20 {
+ typedef T1 Head;
+ typedef Types19<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21>
+struct Types21 {
+ typedef T1 Head;
+ typedef Types20<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22>
+struct Types22 {
+ typedef T1 Head;
+ typedef Types21<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23>
+struct Types23 {
+ typedef T1 Head;
+ typedef Types22<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24>
+struct Types24 {
+ typedef T1 Head;
+ typedef Types23<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25>
+struct Types25 {
+ typedef T1 Head;
+ typedef Types24<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26>
+struct Types26 {
+ typedef T1 Head;
+ typedef Types25<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27>
+struct Types27 {
+ typedef T1 Head;
+ typedef Types26<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28>
+struct Types28 {
+ typedef T1 Head;
+ typedef Types27<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29>
+struct Types29 {
+ typedef T1 Head;
+ typedef Types28<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30>
+struct Types30 {
+ typedef T1 Head;
+ typedef Types29<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31>
+struct Types31 {
+ typedef T1 Head;
+ typedef Types30<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32>
+struct Types32 {
+ typedef T1 Head;
+ typedef Types31<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33>
+struct Types33 {
+ typedef T1 Head;
+ typedef Types32<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34>
+struct Types34 {
+ typedef T1 Head;
+ typedef Types33<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35>
+struct Types35 {
+ typedef T1 Head;
+ typedef Types34<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36>
+struct Types36 {
+ typedef T1 Head;
+ typedef Types35<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37>
+struct Types37 {
+ typedef T1 Head;
+ typedef Types36<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38>
+struct Types38 {
+ typedef T1 Head;
+ typedef Types37<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39>
+struct Types39 {
+ typedef T1 Head;
+ typedef Types38<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40>
+struct Types40 {
+ typedef T1 Head;
+ typedef Types39<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41>
+struct Types41 {
+ typedef T1 Head;
+ typedef Types40<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42>
+struct Types42 {
+ typedef T1 Head;
+ typedef Types41<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43>
+struct Types43 {
+ typedef T1 Head;
+ typedef Types42<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44>
+struct Types44 {
+ typedef T1 Head;
+ typedef Types43<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45>
+struct Types45 {
+ typedef T1 Head;
+ typedef Types44<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46>
+struct Types46 {
+ typedef T1 Head;
+ typedef Types45<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47>
+struct Types47 {
+ typedef T1 Head;
+ typedef Types46<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48>
+struct Types48 {
+ typedef T1 Head;
+ typedef Types47<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47, T48> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49>
+struct Types49 {
+ typedef T1 Head;
+ typedef Types48<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47, T48, T49> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49, typename T50>
+struct Types50 {
+ typedef T1 Head;
+ typedef Types49<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47, T48, T49, T50> Tail;
+};
+
+
+} // namespace internal
+
+// We don't want to require the users to write TypesN<...> directly,
+// as that would require them to count the length. Types<...> is much
+// easier to write, but generates horrible messages when there is a
+// compiler error, as gcc insists on printing out each template
+// argument, even if it has the default value (this means Types<int>
+// will appear as Types<int, None, None, ..., None> in the compiler
+// errors).
+//
+// Our solution is to combine the best part of the two approaches: a
+// user would write Types<T1, ..., TN>, and Google Test will translate
+// that to TypesN<T1, ..., TN> internally to make error messages
+// readable. The translation is done by the 'type' member of the
+// Types template.
+template <typename T1 = internal::None, typename T2 = internal::None,
+ typename T3 = internal::None, typename T4 = internal::None,
+ typename T5 = internal::None, typename T6 = internal::None,
+ typename T7 = internal::None, typename T8 = internal::None,
+ typename T9 = internal::None, typename T10 = internal::None,
+ typename T11 = internal::None, typename T12 = internal::None,
+ typename T13 = internal::None, typename T14 = internal::None,
+ typename T15 = internal::None, typename T16 = internal::None,
+ typename T17 = internal::None, typename T18 = internal::None,
+ typename T19 = internal::None, typename T20 = internal::None,
+ typename T21 = internal::None, typename T22 = internal::None,
+ typename T23 = internal::None, typename T24 = internal::None,
+ typename T25 = internal::None, typename T26 = internal::None,
+ typename T27 = internal::None, typename T28 = internal::None,
+ typename T29 = internal::None, typename T30 = internal::None,
+ typename T31 = internal::None, typename T32 = internal::None,
+ typename T33 = internal::None, typename T34 = internal::None,
+ typename T35 = internal::None, typename T36 = internal::None,
+ typename T37 = internal::None, typename T38 = internal::None,
+ typename T39 = internal::None, typename T40 = internal::None,
+ typename T41 = internal::None, typename T42 = internal::None,
+ typename T43 = internal::None, typename T44 = internal::None,
+ typename T45 = internal::None, typename T46 = internal::None,
+ typename T47 = internal::None, typename T48 = internal::None,
+ typename T49 = internal::None, typename T50 = internal::None>
+struct Types {
+ typedef internal::Types50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44, T45, T46, T47, T48, T49, T50> type;
+};
+
+template <>
+struct Types<internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types0 type;
+};
+template <typename T1>
+struct Types<T1, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types1<T1> type;
+};
+template <typename T1, typename T2>
+struct Types<T1, T2, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types2<T1, T2> type;
+};
+template <typename T1, typename T2, typename T3>
+struct Types<T1, T2, T3, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types3<T1, T2, T3> type;
+};
+template <typename T1, typename T2, typename T3, typename T4>
+struct Types<T1, T2, T3, T4, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types4<T1, T2, T3, T4> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+struct Types<T1, T2, T3, T4, T5, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types5<T1, T2, T3, T4, T5> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+struct Types<T1, T2, T3, T4, T5, T6, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types6<T1, T2, T3, T4, T5, T6> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+struct Types<T1, T2, T3, T4, T5, T6, T7, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types7<T1, T2, T3, T4, T5, T6, T7> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types8<T1, T2, T3, T4, T5, T6, T7, T8> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types9<T1, T2, T3, T4, T5, T6, T7, T8, T9> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44, T45> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
+ T46, internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44, T45, T46> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
+ T46, T47, internal::None, internal::None, internal::None> {
+ typedef internal::Types47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44, T45, T46, T47> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
+ T46, T47, T48, internal::None, internal::None> {
+ typedef internal::Types48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44, T45, T46, T47, T48> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
+ T46, T47, T48, T49, internal::None> {
+ typedef internal::Types49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44, T45, T46, T47, T48, T49> type;
+};
+
+namespace internal {
+
+#define GTEST_TEMPLATE_ template <typename T> class
+
+// The template "selector" struct TemplateSel<Tmpl> is used to
+// represent Tmpl, which must be a class template with one type
+// parameter, as a type. TemplateSel<Tmpl>::Bind<T>::type is defined
+// as the type Tmpl<T>. This allows us to actually instantiate the
+// template "selected" by TemplateSel<Tmpl>.
+//
+// This trick is necessary for simulating typedef for class templates,
+// which C++ doesn't support directly.
+template <GTEST_TEMPLATE_ Tmpl>
+struct TemplateSel {
+ template <typename T>
+ struct Bind {
+ typedef Tmpl<T> type;
+ };
+};
+
+#define GTEST_BIND_(TmplSel, T) \
+ TmplSel::template Bind<T>::type
+
+// A unique struct template used as the default value for the
+// arguments of class template Templates. This allows us to simulate
+// variadic templates (e.g. Templates<int>, Templates<int, double>,
+// and etc), which C++ doesn't support directly.
+template <typename T>
+struct NoneT {};
+
+// The following family of struct and struct templates are used to
+// represent template lists. In particular, TemplatesN<T1, T2, ...,
+// TN> represents a list of N templates (T1, T2, ..., and TN). Except
+// for Templates0, every struct in the family has two member types:
+// Head for the selector of the first template in the list, and Tail
+// for the rest of the list.
+
+// The empty template list.
+struct Templates0 {};
+
+// Template lists of length 1, 2, 3, and so on.
+
+template <GTEST_TEMPLATE_ T1>
+struct Templates1 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates0 Tail;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2>
+struct Templates2 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates1<T2> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3>
+struct Templates3 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates2<T2, T3> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4>
+struct Templates4 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates3<T2, T3, T4> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5>
+struct Templates5 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates4<T2, T3, T4, T5> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6>
+struct Templates6 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates5<T2, T3, T4, T5, T6> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7>
+struct Templates7 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates6<T2, T3, T4, T5, T6, T7> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8>
+struct Templates8 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates7<T2, T3, T4, T5, T6, T7, T8> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9>
+struct Templates9 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates8<T2, T3, T4, T5, T6, T7, T8, T9> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10>
+struct Templates10 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates9<T2, T3, T4, T5, T6, T7, T8, T9, T10> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11>
+struct Templates11 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates10<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12>
+struct Templates12 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates11<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13>
+struct Templates13 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates12<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14>
+struct Templates14 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates13<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15>
+struct Templates15 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates14<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16>
+struct Templates16 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates15<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17>
+struct Templates17 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates16<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18>
+struct Templates18 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates17<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19>
+struct Templates19 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates18<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20>
+struct Templates20 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates19<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21>
+struct Templates21 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates20<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22>
+struct Templates22 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates21<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23>
+struct Templates23 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates22<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24>
+struct Templates24 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates23<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25>
+struct Templates25 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates24<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26>
+struct Templates26 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates25<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27>
+struct Templates27 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates26<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28>
+struct Templates28 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates27<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29>
+struct Templates29 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates28<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30>
+struct Templates30 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates29<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31>
+struct Templates31 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates30<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32>
+struct Templates32 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates31<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33>
+struct Templates33 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates32<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34>
+struct Templates34 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates33<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35>
+struct Templates35 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates34<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36>
+struct Templates36 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates35<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37>
+struct Templates37 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates36<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38>
+struct Templates38 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates37<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39>
+struct Templates39 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates38<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40>
+struct Templates40 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates39<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41>
+struct Templates41 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates40<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42>
+struct Templates42 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates41<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43>
+struct Templates43 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates42<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44>
+struct Templates44 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates43<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43, T44> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45>
+struct Templates45 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates44<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43, T44, T45> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46>
+struct Templates46 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates45<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43, T44, T45, T46> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47>
+struct Templates47 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates46<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43, T44, T45, T46, T47> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48>
+struct Templates48 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates47<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43, T44, T45, T46, T47, T48> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48,
+ GTEST_TEMPLATE_ T49>
+struct Templates49 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates48<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43, T44, T45, T46, T47, T48, T49> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48,
+ GTEST_TEMPLATE_ T49, GTEST_TEMPLATE_ T50>
+struct Templates50 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates49<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43, T44, T45, T46, T47, T48, T49, T50> Tail;
+};
+
+
+// We don't want to require the users to write TemplatesN<...> directly,
+// as that would require them to count the length. Templates<...> is much
+// easier to write, but generates horrible messages when there is a
+// compiler error, as gcc insists on printing out each template
+// argument, even if it has the default value (this means Templates<list>
+// will appear as Templates<list, NoneT, NoneT, ..., NoneT> in the compiler
+// errors).
+//
+// Our solution is to combine the best part of the two approaches: a
+// user would write Templates<T1, ..., TN>, and Google Test will translate
+// that to TemplatesN<T1, ..., TN> internally to make error messages
+// readable. The translation is done by the 'type' member of the
+// Templates template.
+template <GTEST_TEMPLATE_ T1 = NoneT, GTEST_TEMPLATE_ T2 = NoneT,
+ GTEST_TEMPLATE_ T3 = NoneT, GTEST_TEMPLATE_ T4 = NoneT,
+ GTEST_TEMPLATE_ T5 = NoneT, GTEST_TEMPLATE_ T6 = NoneT,
+ GTEST_TEMPLATE_ T7 = NoneT, GTEST_TEMPLATE_ T8 = NoneT,
+ GTEST_TEMPLATE_ T9 = NoneT, GTEST_TEMPLATE_ T10 = NoneT,
+ GTEST_TEMPLATE_ T11 = NoneT, GTEST_TEMPLATE_ T12 = NoneT,
+ GTEST_TEMPLATE_ T13 = NoneT, GTEST_TEMPLATE_ T14 = NoneT,
+ GTEST_TEMPLATE_ T15 = NoneT, GTEST_TEMPLATE_ T16 = NoneT,
+ GTEST_TEMPLATE_ T17 = NoneT, GTEST_TEMPLATE_ T18 = NoneT,
+ GTEST_TEMPLATE_ T19 = NoneT, GTEST_TEMPLATE_ T20 = NoneT,
+ GTEST_TEMPLATE_ T21 = NoneT, GTEST_TEMPLATE_ T22 = NoneT,
+ GTEST_TEMPLATE_ T23 = NoneT, GTEST_TEMPLATE_ T24 = NoneT,
+ GTEST_TEMPLATE_ T25 = NoneT, GTEST_TEMPLATE_ T26 = NoneT,
+ GTEST_TEMPLATE_ T27 = NoneT, GTEST_TEMPLATE_ T28 = NoneT,
+ GTEST_TEMPLATE_ T29 = NoneT, GTEST_TEMPLATE_ T30 = NoneT,
+ GTEST_TEMPLATE_ T31 = NoneT, GTEST_TEMPLATE_ T32 = NoneT,
+ GTEST_TEMPLATE_ T33 = NoneT, GTEST_TEMPLATE_ T34 = NoneT,
+ GTEST_TEMPLATE_ T35 = NoneT, GTEST_TEMPLATE_ T36 = NoneT,
+ GTEST_TEMPLATE_ T37 = NoneT, GTEST_TEMPLATE_ T38 = NoneT,
+ GTEST_TEMPLATE_ T39 = NoneT, GTEST_TEMPLATE_ T40 = NoneT,
+ GTEST_TEMPLATE_ T41 = NoneT, GTEST_TEMPLATE_ T42 = NoneT,
+ GTEST_TEMPLATE_ T43 = NoneT, GTEST_TEMPLATE_ T44 = NoneT,
+ GTEST_TEMPLATE_ T45 = NoneT, GTEST_TEMPLATE_ T46 = NoneT,
+ GTEST_TEMPLATE_ T47 = NoneT, GTEST_TEMPLATE_ T48 = NoneT,
+ GTEST_TEMPLATE_ T49 = NoneT, GTEST_TEMPLATE_ T50 = NoneT>
+struct Templates {
+ typedef Templates50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43, T44, T45, T46, T47, T48, T49, T50> type;
+};
+
+template <>
+struct Templates<NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT> {
+ typedef Templates0 type;
+};
+template <GTEST_TEMPLATE_ T1>
+struct Templates<T1, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT> {
+ typedef Templates1<T1> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2>
+struct Templates<T1, T2, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT> {
+ typedef Templates2<T1, T2> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3>
+struct Templates<T1, T2, T3, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates3<T1, T2, T3> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4>
+struct Templates<T1, T2, T3, T4, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates4<T1, T2, T3, T4> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5>
+struct Templates<T1, T2, T3, T4, T5, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates5<T1, T2, T3, T4, T5> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6>
+struct Templates<T1, T2, T3, T4, T5, T6, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates6<T1, T2, T3, T4, T5, T6> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates7<T1, T2, T3, T4, T5, T6, T7> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates8<T1, T2, T3, T4, T5, T6, T7, T8> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates9<T1, T2, T3, T4, T5, T6, T7, T8, T9> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT> {
+ typedef Templates22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT> {
+ typedef Templates23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT> {
+ typedef Templates24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT> {
+ typedef Templates25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT> {
+ typedef Templates26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT> {
+ typedef Templates27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT> {
+ typedef Templates28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT> {
+ typedef Templates29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43, T44> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+ T45, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43, T44, T45> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+ T45, T46, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43, T44, T45, T46> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+ T45, T46, T47, NoneT, NoneT, NoneT> {
+ typedef Templates47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43, T44, T45, T46, T47> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+ T45, T46, T47, T48, NoneT, NoneT> {
+ typedef Templates48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43, T44, T45, T46, T47, T48> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48,
+ GTEST_TEMPLATE_ T49>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+ T45, T46, T47, T48, T49, NoneT> {
+ typedef Templates49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43, T44, T45, T46, T47, T48, T49> type;
+};
+
+// The TypeList template makes it possible to use either a single type
+// or a Types<...> list in TYPED_TEST_CASE() and
+// INSTANTIATE_TYPED_TEST_CASE_P().
+
+template <typename T>
+struct TypeList { typedef Types1<T> type; };
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49, typename T50>
+struct TypeList<Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47, T48, T49, T50> > {
+ typedef typename Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44, T45, T46, T47, T48, T49, T50>::type type;
+};
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_
+
+// Due to C++ preprocessor weirdness, we need double indirection to
+// concatenate two tokens when one of them is __LINE__. Writing
+//
+// foo ## __LINE__
+//
+// will result in the token foo__LINE__, instead of foo followed by
+// the current line number. For more details, see
+// http://www.parashift.com/c++-faq-lite/misc-technical-issues.html#faq-39.6
+#define GTEST_CONCAT_TOKEN_(foo, bar) GTEST_CONCAT_TOKEN_IMPL_(foo, bar)
+#define GTEST_CONCAT_TOKEN_IMPL_(foo, bar) foo ## bar
+
+// Google Test defines the testing::Message class to allow construction of
+// test messages via the << operator. The idea is that anything
+// streamable to std::ostream can be streamed to a testing::Message.
+// This allows a user to use his own types in Google Test assertions by
+// overloading the << operator.
+//
+// util/gtl/stl_logging-inl.h overloads << for STL containers. These
+// overloads cannot be defined in the std namespace, as that will be
+// undefined behavior. Therefore, they are defined in the global
+// namespace instead.
+//
+// C++'s symbol lookup rule (i.e. Koenig lookup) says that these
+// overloads are visible in either the std namespace or the global
+// namespace, but not other namespaces, including the testing
+// namespace which Google Test's Message class is in.
+//
+// To allow STL containers (and other types that has a << operator
+// defined in the global namespace) to be used in Google Test assertions,
+// testing::Message must access the custom << operator from the global
+// namespace. Hence this helper function.
+//
+// Note: Jeffrey Yasskin suggested an alternative fix by "using
+// ::operator<<;" in the definition of Message's operator<<. That fix
+// doesn't require a helper function, but unfortunately doesn't
+// compile with MSVC.
+template <typename T>
+inline void GTestStreamToHelper(std::ostream* os, const T& val) {
+ *os << val;
+}
+
+namespace testing {
+
+// Forward declaration of classes.
+
+class AssertionResult; // Result of an assertion.
+class Message; // Represents a failure message.
+class Test; // Represents a test.
+class TestInfo; // Information about a test.
+class TestPartResult; // Result of a test part.
+class UnitTest; // A collection of test cases.
+
+namespace internal {
+
+struct TraceInfo; // Information about a trace point.
+class ScopedTrace; // Implements scoped trace.
+class TestInfoImpl; // Opaque implementation of TestInfo
+class UnitTestImpl; // Opaque implementation of UnitTest
+
+// How many times InitGoogleTest() has been called.
+extern int g_init_gtest_count;
+
+// The text used in failure messages to indicate the start of the
+// stack trace.
+GTEST_API_ extern const char kStackTraceMarker[];
+
+// A secret type that Google Test users don't know about. It has no
+// definition on purpose. Therefore it's impossible to create a
+// Secret object, which is what we want.
+class Secret;
+
+// Two overloaded helpers for checking at compile time whether an
+// expression is a null pointer literal (i.e. NULL or any 0-valued
+// compile-time integral constant). Their return values have
+// different sizes, so we can use sizeof() to test which version is
+// picked by the compiler. These helpers have no implementations, as
+// we only need their signatures.
+//
+// Given IsNullLiteralHelper(x), the compiler will pick the first
+// version if x can be implicitly converted to Secret*, and pick the
+// second version otherwise. Since Secret is a secret and incomplete
+// type, the only expression a user can write that has type Secret* is
+// a null pointer literal. Therefore, we know that x is a null
+// pointer literal if and only if the first version is picked by the
+// compiler.
+char IsNullLiteralHelper(Secret* p);
+char (&IsNullLiteralHelper(...))[2]; // NOLINT
+
+// A compile-time bool constant that is true if and only if x is a
+// null pointer literal (i.e. NULL or any 0-valued compile-time
+// integral constant).
+#ifdef GTEST_ELLIPSIS_NEEDS_POD_
+// We lose support for NULL detection where the compiler doesn't like
+// passing non-POD classes through ellipsis (...).
+#define GTEST_IS_NULL_LITERAL_(x) false
+#else
+#define GTEST_IS_NULL_LITERAL_(x) \
+ (sizeof(::testing::internal::IsNullLiteralHelper(x)) == 1)
+#endif // GTEST_ELLIPSIS_NEEDS_POD_
+
+// Appends the user-supplied message to the Google-Test-generated message.
+GTEST_API_ String AppendUserMessage(const String& gtest_msg,
+ const Message& user_msg);
+
+// A helper class for creating scoped traces in user programs.
+class GTEST_API_ ScopedTrace {
+ public:
+ // The c'tor pushes the given source file location and message onto
+ // a trace stack maintained by Google Test.
+ ScopedTrace(const char* file, int line, const Message& message);
+
+ // The d'tor pops the info pushed by the c'tor.
+ //
+ // Note that the d'tor is not virtual in order to be efficient.
+ // Don't inherit from ScopedTrace!
+ ~ScopedTrace();
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedTrace);
+} GTEST_ATTRIBUTE_UNUSED_; // A ScopedTrace object does its job in its
+ // c'tor and d'tor. Therefore it doesn't
+ // need to be used otherwise.
+
+// Converts a streamable value to a String. A NULL pointer is
+// converted to "(null)". When the input value is a ::string,
+// ::std::string, ::wstring, or ::std::wstring object, each NUL
+// character in it is replaced with "\\0".
+// Declared here but defined in gtest.h, so that it has access
+// to the definition of the Message class, required by the ARM
+// compiler.
+template <typename T>
+String StreamableToString(const T& streamable);
+
+// Formats a value to be used in a failure message.
+
+#ifdef GTEST_NEEDS_IS_POINTER_
+
+// These are needed as the Nokia Symbian and IBM XL C/C++ compilers
+// cannot decide between const T& and const T* in a function template.
+// These compilers _can_ decide between class template specializations
+// for T and T*, so a tr1::type_traits-like is_pointer works, and we
+// can overload on that.
+
+// This overload makes sure that all pointers (including
+// those to char or wchar_t) are printed as raw pointers.
+template <typename T>
+inline String FormatValueForFailureMessage(internal::true_type /*dummy*/,
+ T* pointer) {
+ return StreamableToString(static_cast<const void*>(pointer));
+}
+
+template <typename T>
+inline String FormatValueForFailureMessage(internal::false_type /*dummy*/,
+ const T& value) {
+ return StreamableToString(value);
+}
+
+template <typename T>
+inline String FormatForFailureMessage(const T& value) {
+ return FormatValueForFailureMessage(
+ typename internal::is_pointer<T>::type(), value);
+}
+
+#else
+
+// These are needed as the above solution using is_pointer has the
+// limitation that T cannot be a type without external linkage, when
+// compiled using MSVC.
+
+template <typename T>
+inline String FormatForFailureMessage(const T& value) {
+ return StreamableToString(value);
+}
+
+// This overload makes sure that all pointers (including
+// those to char or wchar_t) are printed as raw pointers.
+template <typename T>
+inline String FormatForFailureMessage(T* pointer) {
+ return StreamableToString(static_cast<const void*>(pointer));
+}
+
+#endif // GTEST_NEEDS_IS_POINTER_
+
+// These overloaded versions handle narrow and wide characters.
+GTEST_API_ String FormatForFailureMessage(char ch);
+GTEST_API_ String FormatForFailureMessage(wchar_t wchar);
+
+// When this operand is a const char* or char*, and the other operand
+// is a ::std::string or ::string, we print this operand as a C string
+// rather than a pointer. We do the same for wide strings.
+
+// This internal macro is used to avoid duplicated code.
+#define GTEST_FORMAT_IMPL_(operand2_type, operand1_printer)\
+inline String FormatForComparisonFailureMessage(\
+ operand2_type::value_type* str, const operand2_type& /*operand2*/) {\
+ return operand1_printer(str);\
+}\
+inline String FormatForComparisonFailureMessage(\
+ const operand2_type::value_type* str, const operand2_type& /*operand2*/) {\
+ return operand1_printer(str);\
+}
+
+GTEST_FORMAT_IMPL_(::std::string, String::ShowCStringQuoted)
+#if GTEST_HAS_STD_WSTRING
+GTEST_FORMAT_IMPL_(::std::wstring, String::ShowWideCStringQuoted)
+#endif // GTEST_HAS_STD_WSTRING
+
+#if GTEST_HAS_GLOBAL_STRING
+GTEST_FORMAT_IMPL_(::string, String::ShowCStringQuoted)
+#endif // GTEST_HAS_GLOBAL_STRING
+#if GTEST_HAS_GLOBAL_WSTRING
+GTEST_FORMAT_IMPL_(::wstring, String::ShowWideCStringQuoted)
+#endif // GTEST_HAS_GLOBAL_WSTRING
+
+#undef GTEST_FORMAT_IMPL_
+
+// Constructs and returns the message for an equality assertion
+// (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure.
+//
+// The first four parameters are the expressions used in the assertion
+// and their values, as strings. For example, for ASSERT_EQ(foo, bar)
+// where foo is 5 and bar is 6, we have:
+//
+// expected_expression: "foo"
+// actual_expression: "bar"
+// expected_value: "5"
+// actual_value: "6"
+//
+// The ignoring_case parameter is true iff the assertion is a
+// *_STRCASEEQ*. When it's true, the string " (ignoring case)" will
+// be inserted into the message.
+GTEST_API_ AssertionResult EqFailure(const char* expected_expression,
+ const char* actual_expression,
+ const String& expected_value,
+ const String& actual_value,
+ bool ignoring_case);
+
+// Constructs a failure message for Boolean assertions such as EXPECT_TRUE.
+GTEST_API_ String GetBoolAssertionFailureMessage(
+ const AssertionResult& assertion_result,
+ const char* expression_text,
+ const char* actual_predicate_value,
+ const char* expected_predicate_value);
+
+// This template class represents an IEEE floating-point number
+// (either single-precision or double-precision, depending on the
+// template parameters).
+//
+// The purpose of this class is to do more sophisticated number
+// comparison. (Due to round-off error, etc, it's very unlikely that
+// two floating-points will be equal exactly. Hence a naive
+// comparison by the == operation often doesn't work.)
+//
+// Format of IEEE floating-point:
+//
+// The most-significant bit being the leftmost, an IEEE
+// floating-point looks like
+//
+// sign_bit exponent_bits fraction_bits
+//
+// Here, sign_bit is a single bit that designates the sign of the
+// number.
+//
+// For float, there are 8 exponent bits and 23 fraction bits.
+//
+// For double, there are 11 exponent bits and 52 fraction bits.
+//
+// More details can be found at
+// http://en.wikipedia.org/wiki/IEEE_floating-point_standard.
+//
+// Template parameter:
+//
+// RawType: the raw floating-point type (either float or double)
+template <typename RawType>
+class FloatingPoint {
+ public:
+ // Defines the unsigned integer type that has the same size as the
+ // floating point number.
+ typedef typename TypeWithSize<sizeof(RawType)>::UInt Bits;
+
+ // Constants.
+
+ // # of bits in a number.
+ static const size_t kBitCount = 8*sizeof(RawType);
+
+ // # of fraction bits in a number.
+ static const size_t kFractionBitCount =
+ std::numeric_limits<RawType>::digits - 1;
+
+ // # of exponent bits in a number.
+ static const size_t kExponentBitCount = kBitCount - 1 - kFractionBitCount;
+
+ // The mask for the sign bit.
+ static const Bits kSignBitMask = static_cast<Bits>(1) << (kBitCount - 1);
+
+ // The mask for the fraction bits.
+ static const Bits kFractionBitMask =
+ ~static_cast<Bits>(0) >> (kExponentBitCount + 1);
+
+ // The mask for the exponent bits.
+ static const Bits kExponentBitMask = ~(kSignBitMask | kFractionBitMask);
+
+ // How many ULP's (Units in the Last Place) we want to tolerate when
+ // comparing two numbers. The larger the value, the more error we
+ // allow. A 0 value means that two numbers must be exactly the same
+ // to be considered equal.
+ //
+ // The maximum error of a single floating-point operation is 0.5
+ // units in the last place. On Intel CPU's, all floating-point
+ // calculations are done with 80-bit precision, while double has 64
+ // bits. Therefore, 4 should be enough for ordinary use.
+ //
+ // See the following article for more details on ULP:
+ // http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm.
+ static const size_t kMaxUlps = 4;
+
+ // Constructs a FloatingPoint from a raw floating-point number.
+ //
+ // On an Intel CPU, passing a non-normalized NAN (Not a Number)
+ // around may change its bits, although the new value is guaranteed
+ // to be also a NAN. Therefore, don't expect this constructor to
+ // preserve the bits in x when x is a NAN.
+ explicit FloatingPoint(const RawType& x) { u_.value_ = x; }
+
+ // Static methods
+
+ // Reinterprets a bit pattern as a floating-point number.
+ //
+ // This function is needed to test the AlmostEquals() method.
+ static RawType ReinterpretBits(const Bits bits) {
+ FloatingPoint fp(0);
+ fp.u_.bits_ = bits;
+ return fp.u_.value_;
+ }
+
+ // Returns the floating-point number that represent positive infinity.
+ static RawType Infinity() {
+ return ReinterpretBits(kExponentBitMask);
+ }
+
+ // Non-static methods
+
+ // Returns the bits that represents this number.
+ const Bits &bits() const { return u_.bits_; }
+
+ // Returns the exponent bits of this number.
+ Bits exponent_bits() const { return kExponentBitMask & u_.bits_; }
+
+ // Returns the fraction bits of this number.
+ Bits fraction_bits() const { return kFractionBitMask & u_.bits_; }
+
+ // Returns the sign bit of this number.
+ Bits sign_bit() const { return kSignBitMask & u_.bits_; }
+
+ // Returns true iff this is NAN (not a number).
+ bool is_nan() const {
+ // It's a NAN if the exponent bits are all ones and the fraction
+ // bits are not entirely zeros.
+ return (exponent_bits() == kExponentBitMask) && (fraction_bits() != 0);
+ }
+
+ // Returns true iff this number is at most kMaxUlps ULP's away from
+ // rhs. In particular, this function:
+ //
+ // - returns false if either number is (or both are) NAN.
+ // - treats really large numbers as almost equal to infinity.
+ // - thinks +0.0 and -0.0 are 0 DLP's apart.
+ bool AlmostEquals(const FloatingPoint& rhs) const {
+ // The IEEE standard says that any comparison operation involving
+ // a NAN must return false.
+ if (is_nan() || rhs.is_nan()) return false;
+
+ return DistanceBetweenSignAndMagnitudeNumbers(u_.bits_, rhs.u_.bits_)
+ <= kMaxUlps;
+ }
+
+ private:
+ // The data type used to store the actual floating-point number.
+ union FloatingPointUnion {
+ RawType value_; // The raw floating-point number.
+ Bits bits_; // The bits that represent the number.
+ };
+
+ // Converts an integer from the sign-and-magnitude representation to
+ // the biased representation. More precisely, let N be 2 to the
+ // power of (kBitCount - 1), an integer x is represented by the
+ // unsigned number x + N.
+ //
+ // For instance,
+ //
+ // -N + 1 (the most negative number representable using
+ // sign-and-magnitude) is represented by 1;
+ // 0 is represented by N; and
+ // N - 1 (the biggest number representable using
+ // sign-and-magnitude) is represented by 2N - 1.
+ //
+ // Read http://en.wikipedia.org/wiki/Signed_number_representations
+ // for more details on signed number representations.
+ static Bits SignAndMagnitudeToBiased(const Bits &sam) {
+ if (kSignBitMask & sam) {
+ // sam represents a negative number.
+ return ~sam + 1;
+ } else {
+ // sam represents a positive number.
+ return kSignBitMask | sam;
+ }
+ }
+
+ // Given two numbers in the sign-and-magnitude representation,
+ // returns the distance between them as an unsigned number.
+ static Bits DistanceBetweenSignAndMagnitudeNumbers(const Bits &sam1,
+ const Bits &sam2) {
+ const Bits biased1 = SignAndMagnitudeToBiased(sam1);
+ const Bits biased2 = SignAndMagnitudeToBiased(sam2);
+ return (biased1 >= biased2) ? (biased1 - biased2) : (biased2 - biased1);
+ }
+
+ FloatingPointUnion u_;
+};
+
+// Typedefs the instances of the FloatingPoint template class that we
+// care to use.
+typedef FloatingPoint<float> Float;
+typedef FloatingPoint<double> Double;
+
+// In order to catch the mistake of putting tests that use different
+// test fixture classes in the same test case, we need to assign
+// unique IDs to fixture classes and compare them. The TypeId type is
+// used to hold such IDs. The user should treat TypeId as an opaque
+// type: the only operation allowed on TypeId values is to compare
+// them for equality using the == operator.
+typedef const void* TypeId;
+
+template <typename T>
+class TypeIdHelper {
+ public:
+ // dummy_ must not have a const type. Otherwise an overly eager
+ // compiler (e.g. MSVC 7.1 & 8.0) may try to merge
+ // TypeIdHelper<T>::dummy_ for different Ts as an "optimization".
+ static bool dummy_;
+};
+
+template <typename T>
+bool TypeIdHelper<T>::dummy_ = false;
+
+// GetTypeId<T>() returns the ID of type T. Different values will be
+// returned for different types. Calling the function twice with the
+// same type argument is guaranteed to return the same ID.
+template <typename T>
+TypeId GetTypeId() {
+ // The compiler is required to allocate a different
+ // TypeIdHelper<T>::dummy_ variable for each T used to instantiate
+ // the template. Therefore, the address of dummy_ is guaranteed to
+ // be unique.
+ return &(TypeIdHelper<T>::dummy_);
+}
+
+// Returns the type ID of ::testing::Test. Always call this instead
+// of GetTypeId< ::testing::Test>() to get the type ID of
+// ::testing::Test, as the latter may give the wrong result due to a
+// suspected linker bug when compiling Google Test as a Mac OS X
+// framework.
+GTEST_API_ TypeId GetTestTypeId();
+
+// Defines the abstract factory interface that creates instances
+// of a Test object.
+class TestFactoryBase {
+ public:
+ virtual ~TestFactoryBase() {}
+
+ // Creates a test instance to run. The instance is both created and destroyed
+ // within TestInfoImpl::Run()
+ virtual Test* CreateTest() = 0;
+
+ protected:
+ TestFactoryBase() {}
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestFactoryBase);
+};
+
+// This class provides implementation of TeastFactoryBase interface.
+// It is used in TEST and TEST_F macros.
+template <class TestClass>
+class TestFactoryImpl : public TestFactoryBase {
+ public:
+ virtual Test* CreateTest() { return new TestClass; }
+};
+
+#if GTEST_OS_WINDOWS
+
+// Predicate-formatters for implementing the HRESULT checking macros
+// {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED}
+// We pass a long instead of HRESULT to avoid causing an
+// include dependency for the HRESULT type.
+GTEST_API_ AssertionResult IsHRESULTSuccess(const char* expr,
+ long hr); // NOLINT
+GTEST_API_ AssertionResult IsHRESULTFailure(const char* expr,
+ long hr); // NOLINT
+
+#endif // GTEST_OS_WINDOWS
+
+// Formats a source file path and a line number as they would appear
+// in a compiler error message.
+inline String FormatFileLocation(const char* file, int line) {
+ const char* const file_name = file == NULL ? "unknown file" : file;
+ if (line < 0) {
+ return String::Format("%s:", file_name);
+ }
+#ifdef _MSC_VER
+ return String::Format("%s(%d):", file_name, line);
+#else
+ return String::Format("%s:%d:", file_name, line);
+#endif // _MSC_VER
+}
+
+// Types of SetUpTestCase() and TearDownTestCase() functions.
+typedef void (*SetUpTestCaseFunc)();
+typedef void (*TearDownTestCaseFunc)();
+
+// Creates a new TestInfo object and registers it with Google Test;
+// returns the created object.
+//
+// Arguments:
+//
+// test_case_name: name of the test case
+// name: name of the test
+// test_case_comment: a comment on the test case that will be included in
+// the test output
+// comment: a comment on the test that will be included in the
+// test output
+// fixture_class_id: ID of the test fixture class
+// set_up_tc: pointer to the function that sets up the test case
+// tear_down_tc: pointer to the function that tears down the test case
+// factory: pointer to the factory that creates a test object.
+// The newly created TestInfo instance will assume
+// ownership of the factory object.
+GTEST_API_ TestInfo* MakeAndRegisterTestInfo(
+ const char* test_case_name, const char* name,
+ const char* test_case_comment, const char* comment,
+ TypeId fixture_class_id,
+ SetUpTestCaseFunc set_up_tc,
+ TearDownTestCaseFunc tear_down_tc,
+ TestFactoryBase* factory);
+
+// If *pstr starts with the given prefix, modifies *pstr to be right
+// past the prefix and returns true; otherwise leaves *pstr unchanged
+// and returns false. None of pstr, *pstr, and prefix can be NULL.
+bool SkipPrefix(const char* prefix, const char** pstr);
+
+#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
+
+// State of the definition of a type-parameterized test case.
+class GTEST_API_ TypedTestCasePState {
+ public:
+ TypedTestCasePState() : registered_(false) {}
+
+ // Adds the given test name to defined_test_names_ and return true
+ // if the test case hasn't been registered; otherwise aborts the
+ // program.
+ bool AddTestName(const char* file, int line, const char* case_name,
+ const char* test_name) {
+ if (registered_) {
+ fprintf(stderr, "%s Test %s must be defined before "
+ "REGISTER_TYPED_TEST_CASE_P(%s, ...).\n",
+ FormatFileLocation(file, line).c_str(), test_name, case_name);
+ fflush(stderr);
+ posix::Abort();
+ }
+ defined_test_names_.insert(test_name);
+ return true;
+ }
+
+ // Verifies that registered_tests match the test names in
+ // defined_test_names_; returns registered_tests if successful, or
+ // aborts the program otherwise.
+ const char* VerifyRegisteredTestNames(
+ const char* file, int line, const char* registered_tests);
+
+ private:
+ bool registered_;
+ ::std::set<const char*> defined_test_names_;
+};
+
+// Skips to the first non-space char after the first comma in 'str';
+// returns NULL if no comma is found in 'str'.
+inline const char* SkipComma(const char* str) {
+ const char* comma = strchr(str, ',');
+ if (comma == NULL) {
+ return NULL;
+ }
+ while (isspace(*(++comma))) {}
+ return comma;
+}
+
+// Returns the prefix of 'str' before the first comma in it; returns
+// the entire string if it contains no comma.
+inline String GetPrefixUntilComma(const char* str) {
+ const char* comma = strchr(str, ',');
+ return comma == NULL ? String(str) : String(str, comma - str);
+}
+
+// TypeParameterizedTest<Fixture, TestSel, Types>::Register()
+// registers a list of type-parameterized tests with Google Test. The
+// return value is insignificant - we just need to return something
+// such that we can call this function in a namespace scope.
+//
+// Implementation note: The GTEST_TEMPLATE_ macro declares a template
+// template parameter. It's defined in gtest-type-util.h.
+template <GTEST_TEMPLATE_ Fixture, class TestSel, typename Types>
+class TypeParameterizedTest {
+ public:
+ // 'index' is the index of the test in the type list 'Types'
+ // specified in INSTANTIATE_TYPED_TEST_CASE_P(Prefix, TestCase,
+ // Types). Valid values for 'index' are [0, N - 1] where N is the
+ // length of Types.
+ static bool Register(const char* prefix, const char* case_name,
+ const char* test_names, int index) {
+ typedef typename Types::Head Type;
+ typedef Fixture<Type> FixtureClass;
+ typedef typename GTEST_BIND_(TestSel, Type) TestClass;
+
+ // First, registers the first type-parameterized test in the type
+ // list.
+ MakeAndRegisterTestInfo(
+ String::Format("%s%s%s/%d", prefix, prefix[0] == '\0' ? "" : "/",
+ case_name, index).c_str(),
+ GetPrefixUntilComma(test_names).c_str(),
+ String::Format("TypeParam = %s", GetTypeName<Type>().c_str()).c_str(),
+ "",
+ GetTypeId<FixtureClass>(),
+ TestClass::SetUpTestCase,
+ TestClass::TearDownTestCase,
+ new TestFactoryImpl<TestClass>);
+
+ // Next, recurses (at compile time) with the tail of the type list.
+ return TypeParameterizedTest<Fixture, TestSel, typename Types::Tail>
+ ::Register(prefix, case_name, test_names, index + 1);
+ }
+};
+
+// The base case for the compile time recursion.
+template <GTEST_TEMPLATE_ Fixture, class TestSel>
+class TypeParameterizedTest<Fixture, TestSel, Types0> {
+ public:
+ static bool Register(const char* /*prefix*/, const char* /*case_name*/,
+ const char* /*test_names*/, int /*index*/) {
+ return true;
+ }
+};
+
+// TypeParameterizedTestCase<Fixture, Tests, Types>::Register()
+// registers *all combinations* of 'Tests' and 'Types' with Google
+// Test. The return value is insignificant - we just need to return
+// something such that we can call this function in a namespace scope.
+template <GTEST_TEMPLATE_ Fixture, typename Tests, typename Types>
+class TypeParameterizedTestCase {
+ public:
+ static bool Register(const char* prefix, const char* case_name,
+ const char* test_names) {
+ typedef typename Tests::Head Head;
+
+ // First, register the first test in 'Test' for each type in 'Types'.
+ TypeParameterizedTest<Fixture, Head, Types>::Register(
+ prefix, case_name, test_names, 0);
+
+ // Next, recurses (at compile time) with the tail of the test list.
+ return TypeParameterizedTestCase<Fixture, typename Tests::Tail, Types>
+ ::Register(prefix, case_name, SkipComma(test_names));
+ }
+};
+
+// The base case for the compile time recursion.
+template <GTEST_TEMPLATE_ Fixture, typename Types>
+class TypeParameterizedTestCase<Fixture, Templates0, Types> {
+ public:
+ static bool Register(const char* /*prefix*/, const char* /*case_name*/,
+ const char* /*test_names*/) {
+ return true;
+ }
+};
+
+#endif // GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
+
+// Returns the current OS stack trace as a String.
+//
+// The maximum number of stack frames to be included is specified by
+// the gtest_stack_trace_depth flag. The skip_count parameter
+// specifies the number of top frames to be skipped, which doesn't
+// count against the number of frames to be included.
+//
+// For example, if Foo() calls Bar(), which in turn calls
+// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in
+// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't.
+GTEST_API_ String GetCurrentOsStackTraceExceptTop(UnitTest* unit_test,
+ int skip_count);
+
+// Helpers for suppressing warnings on unreachable code or constant
+// condition.
+
+// Always returns true.
+GTEST_API_ bool AlwaysTrue();
+
+// Always returns false.
+inline bool AlwaysFalse() { return !AlwaysTrue(); }
+
+// A simple Linear Congruential Generator for generating random
+// numbers with a uniform distribution. Unlike rand() and srand(), it
+// doesn't use global state (and therefore can't interfere with user
+// code). Unlike rand_r(), it's portable. An LCG isn't very random,
+// but it's good enough for our purposes.
+class GTEST_API_ Random {
+ public:
+ static const UInt32 kMaxRange = 1u << 31;
+
+ explicit Random(UInt32 seed) : state_(seed) {}
+
+ void Reseed(UInt32 seed) { state_ = seed; }
+
+ // Generates a random number from [0, range). Crashes if 'range' is
+ // 0 or greater than kMaxRange.
+ UInt32 Generate(UInt32 range);
+
+ private:
+ UInt32 state_;
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(Random);
+};
+
+} // namespace internal
+} // namespace testing
+
+#define GTEST_MESSAGE_(message, result_type) \
+ ::testing::internal::AssertHelper(result_type, __FILE__, __LINE__, message) \
+ = ::testing::Message()
+
+#define GTEST_FATAL_FAILURE_(message) \
+ return GTEST_MESSAGE_(message, ::testing::TestPartResult::kFatalFailure)
+
+#define GTEST_NONFATAL_FAILURE_(message) \
+ GTEST_MESSAGE_(message, ::testing::TestPartResult::kNonFatalFailure)
+
+#define GTEST_SUCCESS_(message) \
+ GTEST_MESSAGE_(message, ::testing::TestPartResult::kSuccess)
+
+// Suppresses MSVC warnings 4072 (unreachable code) for the code following
+// statement if it returns or throws (or doesn't return or throw in some
+// situations).
+#define GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) \
+ if (::testing::internal::AlwaysTrue()) { statement; }
+
+#define GTEST_TEST_THROW_(statement, expected_exception, fail) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (const char* gtest_msg = "") { \
+ bool gtest_caught_expected = false; \
+ try { \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ } \
+ catch (expected_exception const&) { \
+ gtest_caught_expected = true; \
+ } \
+ catch (...) { \
+ gtest_msg = "Expected: " #statement " throws an exception of type " \
+ #expected_exception ".\n Actual: it throws a different " \
+ "type."; \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \
+ } \
+ if (!gtest_caught_expected) { \
+ gtest_msg = "Expected: " #statement " throws an exception of type " \
+ #expected_exception ".\n Actual: it throws nothing."; \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \
+ } \
+ } else \
+ GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__): \
+ fail(gtest_msg)
+
+#define GTEST_TEST_NO_THROW_(statement, fail) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (const char* gtest_msg = "") { \
+ try { \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ } \
+ catch (...) { \
+ gtest_msg = "Expected: " #statement " doesn't throw an exception.\n" \
+ " Actual: it throws."; \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__); \
+ } \
+ } else \
+ GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__): \
+ fail(gtest_msg)
+
+#define GTEST_TEST_ANY_THROW_(statement, fail) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (const char* gtest_msg = "") { \
+ bool gtest_caught_any = false; \
+ try { \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ } \
+ catch (...) { \
+ gtest_caught_any = true; \
+ } \
+ if (!gtest_caught_any) { \
+ gtest_msg = "Expected: " #statement " throws an exception.\n" \
+ " Actual: it doesn't."; \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__); \
+ } \
+ } else \
+ GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__): \
+ fail(gtest_msg)
+
+
+// Implements Boolean test assertions such as EXPECT_TRUE. expression can be
+// either a boolean expression or an AssertionResult. text is a textual
+// represenation of expression as it was passed into the EXPECT_TRUE.
+#define GTEST_TEST_BOOLEAN_(expression, text, actual, expected, fail) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (const ::testing::AssertionResult gtest_ar_ = \
+ ::testing::AssertionResult(expression)) \
+ ; \
+ else \
+ fail(::testing::internal::GetBoolAssertionFailureMessage(\
+ gtest_ar_, text, #actual, #expected).c_str())
+
+#define GTEST_TEST_NO_FATAL_FAILURE_(statement, fail) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (const char* gtest_msg = "") { \
+ ::testing::internal::HasNewFatalFailureHelper gtest_fatal_failure_checker; \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ if (gtest_fatal_failure_checker.has_new_fatal_failure()) { \
+ gtest_msg = "Expected: " #statement " doesn't generate new fatal " \
+ "failures in the current thread.\n" \
+ " Actual: it does."; \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__); \
+ } \
+ } else \
+ GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__): \
+ fail(gtest_msg)
+
+// Expands to the name of the class that implements the given test.
+#define GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \
+ test_case_name##_##test_name##_Test
+
+// Helper macro for defining tests.
+#define GTEST_TEST_(test_case_name, test_name, parent_class, parent_id)\
+class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) : public parent_class {\
+ public:\
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {}\
+ private:\
+ virtual void TestBody();\
+ static ::testing::TestInfo* const test_info_;\
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(\
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name));\
+};\
+\
+::testing::TestInfo* const GTEST_TEST_CLASS_NAME_(test_case_name, test_name)\
+ ::test_info_ =\
+ ::testing::internal::MakeAndRegisterTestInfo(\
+ #test_case_name, #test_name, "", "", \
+ (parent_id), \
+ parent_class::SetUpTestCase, \
+ parent_class::TearDownTestCase, \
+ new ::testing::internal::TestFactoryImpl<\
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)>);\
+void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody()
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file defines the public API for death tests. It is
+// #included by gtest.h so a user doesn't need to include this
+// directly.
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_
+#define GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_
+
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file defines internal utilities needed for implementing
+// death tests. They are subject to change without notice.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_
+
+
+namespace testing {
+namespace internal {
+
+GTEST_DECLARE_string_(internal_run_death_test);
+
+// Names of the flags (needed for parsing Google Test flags).
+const char kDeathTestStyleFlag[] = "death_test_style";
+const char kDeathTestUseFork[] = "death_test_use_fork";
+const char kInternalRunDeathTestFlag[] = "internal_run_death_test";
+
+#if GTEST_HAS_DEATH_TEST
+
+// DeathTest is a class that hides much of the complexity of the
+// GTEST_DEATH_TEST_ macro. It is abstract; its static Create method
+// returns a concrete class that depends on the prevailing death test
+// style, as defined by the --gtest_death_test_style and/or
+// --gtest_internal_run_death_test flags.
+
+// In describing the results of death tests, these terms are used with
+// the corresponding definitions:
+//
+// exit status: The integer exit information in the format specified
+// by wait(2)
+// exit code: The integer code passed to exit(3), _exit(2), or
+// returned from main()
+class GTEST_API_ DeathTest {
+ public:
+ // Create returns false if there was an error determining the
+ // appropriate action to take for the current death test; for example,
+ // if the gtest_death_test_style flag is set to an invalid value.
+ // The LastMessage method will return a more detailed message in that
+ // case. Otherwise, the DeathTest pointer pointed to by the "test"
+ // argument is set. If the death test should be skipped, the pointer
+ // is set to NULL; otherwise, it is set to the address of a new concrete
+ // DeathTest object that controls the execution of the current test.
+ static bool Create(const char* statement, const RE* regex,
+ const char* file, int line, DeathTest** test);
+ DeathTest();
+ virtual ~DeathTest() { }
+
+ // A helper class that aborts a death test when it's deleted.
+ class ReturnSentinel {
+ public:
+ explicit ReturnSentinel(DeathTest* test) : test_(test) { }
+ ~ReturnSentinel() { test_->Abort(TEST_ENCOUNTERED_RETURN_STATEMENT); }
+ private:
+ DeathTest* const test_;
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ReturnSentinel);
+ } GTEST_ATTRIBUTE_UNUSED_;
+
+ // An enumeration of possible roles that may be taken when a death
+ // test is encountered. EXECUTE means that the death test logic should
+ // be executed immediately. OVERSEE means that the program should prepare
+ // the appropriate environment for a child process to execute the death
+ // test, then wait for it to complete.
+ enum TestRole { OVERSEE_TEST, EXECUTE_TEST };
+
+ // An enumeration of the two reasons that a test might be aborted.
+ enum AbortReason { TEST_ENCOUNTERED_RETURN_STATEMENT, TEST_DID_NOT_DIE };
+
+ // Assumes one of the above roles.
+ virtual TestRole AssumeRole() = 0;
+
+ // Waits for the death test to finish and returns its status.
+ virtual int Wait() = 0;
+
+ // Returns true if the death test passed; that is, the test process
+ // exited during the test, its exit status matches a user-supplied
+ // predicate, and its stderr output matches a user-supplied regular
+ // expression.
+ // The user-supplied predicate may be a macro expression rather
+ // than a function pointer or functor, or else Wait and Passed could
+ // be combined.
+ virtual bool Passed(bool exit_status_ok) = 0;
+
+ // Signals that the death test did not die as expected.
+ virtual void Abort(AbortReason reason) = 0;
+
+ // Returns a human-readable outcome message regarding the outcome of
+ // the last death test.
+ static const char* LastMessage();
+
+ static void set_last_death_test_message(const String& message);
+
+ private:
+ // A string containing a description of the outcome of the last death test.
+ static String last_death_test_message_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(DeathTest);
+};
+
+// Factory interface for death tests. May be mocked out for testing.
+class DeathTestFactory {
+ public:
+ virtual ~DeathTestFactory() { }
+ virtual bool Create(const char* statement, const RE* regex,
+ const char* file, int line, DeathTest** test) = 0;
+};
+
+// A concrete DeathTestFactory implementation for normal use.
+class DefaultDeathTestFactory : public DeathTestFactory {
+ public:
+ virtual bool Create(const char* statement, const RE* regex,
+ const char* file, int line, DeathTest** test);
+};
+
+// Returns true if exit_status describes a process that was terminated
+// by a signal, or exited normally with a nonzero exit code.
+GTEST_API_ bool ExitedUnsuccessfully(int exit_status);
+
+// This macro is for implementing ASSERT_DEATH*, EXPECT_DEATH*,
+// ASSERT_EXIT*, and EXPECT_EXIT*.
+#define GTEST_DEATH_TEST_(statement, predicate, regex, fail) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::AlwaysTrue()) { \
+ const ::testing::internal::RE& gtest_regex = (regex); \
+ ::testing::internal::DeathTest* gtest_dt; \
+ if (!::testing::internal::DeathTest::Create(#statement, &gtest_regex, \
+ __FILE__, __LINE__, &gtest_dt)) { \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \
+ } \
+ if (gtest_dt != NULL) { \
+ ::testing::internal::scoped_ptr< ::testing::internal::DeathTest> \
+ gtest_dt_ptr(gtest_dt); \
+ switch (gtest_dt->AssumeRole()) { \
+ case ::testing::internal::DeathTest::OVERSEE_TEST: \
+ if (!gtest_dt->Passed(predicate(gtest_dt->Wait()))) { \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \
+ } \
+ break; \
+ case ::testing::internal::DeathTest::EXECUTE_TEST: { \
+ ::testing::internal::DeathTest::ReturnSentinel \
+ gtest_sentinel(gtest_dt); \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ gtest_dt->Abort(::testing::internal::DeathTest::TEST_DID_NOT_DIE); \
+ break; \
+ } \
+ } \
+ } \
+ } else \
+ GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__): \
+ fail(::testing::internal::DeathTest::LastMessage())
+// The symbol "fail" here expands to something into which a message
+// can be streamed.
+
+// A class representing the parsed contents of the
+// --gtest_internal_run_death_test flag, as it existed when
+// RUN_ALL_TESTS was called.
+class InternalRunDeathTestFlag {
+ public:
+ InternalRunDeathTestFlag(const String& a_file,
+ int a_line,
+ int an_index,
+ int a_write_fd)
+ : file_(a_file), line_(a_line), index_(an_index),
+ write_fd_(a_write_fd) {}
+
+ ~InternalRunDeathTestFlag() {
+ if (write_fd_ >= 0)
+ posix::Close(write_fd_);
+ }
+
+ String file() const { return file_; }
+ int line() const { return line_; }
+ int index() const { return index_; }
+ int write_fd() const { return write_fd_; }
+
+ private:
+ String file_;
+ int line_;
+ int index_;
+ int write_fd_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(InternalRunDeathTestFlag);
+};
+
+// Returns a newly created InternalRunDeathTestFlag object with fields
+// initialized from the GTEST_FLAG(internal_run_death_test) flag if
+// the flag is specified; otherwise returns NULL.
+InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag();
+
+#else // GTEST_HAS_DEATH_TEST
+
+// This macro is used for implementing macros such as
+// EXPECT_DEATH_IF_SUPPORTED and ASSERT_DEATH_IF_SUPPORTED on systems where
+// death tests are not supported. Those macros must compile on such systems
+// iff EXPECT_DEATH and ASSERT_DEATH compile with the same parameters on
+// systems that support death tests. This allows one to write such a macro
+// on a system that does not support death tests and be sure that it will
+// compile on a death-test supporting system.
+//
+// Parameters:
+// statement - A statement that a macro such as EXPECT_DEATH would test
+// for program termination. This macro has to make sure this
+// statement is compiled but not executed, to ensure that
+// EXPECT_DEATH_IF_SUPPORTED compiles with a certain
+// parameter iff EXPECT_DEATH compiles with it.
+// regex - A regex that a macro such as EXPECT_DEATH would use to test
+// the output of statement. This parameter has to be
+// compiled but not evaluated by this macro, to ensure that
+// this macro only accepts expressions that a macro such as
+// EXPECT_DEATH would accept.
+// terminator - Must be an empty statement for EXPECT_DEATH_IF_SUPPORTED
+// and a return statement for ASSERT_DEATH_IF_SUPPORTED.
+// This ensures that ASSERT_DEATH_IF_SUPPORTED will not
+// compile inside functions where ASSERT_DEATH doesn't
+// compile.
+//
+// The branch that has an always false condition is used to ensure that
+// statement and regex are compiled (and thus syntactically correct) but
+// never executed. The unreachable code macro protects the terminator
+// statement from generating an 'unreachable code' warning in case
+// statement unconditionally returns or throws. The Message constructor at
+// the end allows the syntax of streaming additional messages into the
+// macro, for compilational compatibility with EXPECT_DEATH/ASSERT_DEATH.
+#define GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, terminator) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::AlwaysTrue()) { \
+ GTEST_LOG_(WARNING) \
+ << "Death tests are not supported on this platform.\n" \
+ << "Statement '" #statement "' cannot be verified."; \
+ } else if (::testing::internal::AlwaysFalse()) { \
+ ::testing::internal::RE::PartialMatch(".*", (regex)); \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ terminator; \
+ } else \
+ ::testing::Message()
+
+#endif // GTEST_HAS_DEATH_TEST
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_
+
+namespace testing {
+
+// This flag controls the style of death tests. Valid values are "threadsafe",
+// meaning that the death test child process will re-execute the test binary
+// from the start, running only a single death test, or "fast",
+// meaning that the child process will execute the test logic immediately
+// after forking.
+GTEST_DECLARE_string_(death_test_style);
+
+#if GTEST_HAS_DEATH_TEST
+
+// The following macros are useful for writing death tests.
+
+// Here's what happens when an ASSERT_DEATH* or EXPECT_DEATH* is
+// executed:
+//
+// 1. It generates a warning if there is more than one active
+// thread. This is because it's safe to fork() or clone() only
+// when there is a single thread.
+//
+// 2. The parent process clone()s a sub-process and runs the death
+// test in it; the sub-process exits with code 0 at the end of the
+// death test, if it hasn't exited already.
+//
+// 3. The parent process waits for the sub-process to terminate.
+//
+// 4. The parent process checks the exit code and error message of
+// the sub-process.
+//
+// Examples:
+//
+// ASSERT_DEATH(server.SendMessage(56, "Hello"), "Invalid port number");
+// for (int i = 0; i < 5; i++) {
+// EXPECT_DEATH(server.ProcessRequest(i),
+// "Invalid request .* in ProcessRequest()")
+// << "Failed to die on request " << i);
+// }
+//
+// ASSERT_EXIT(server.ExitNow(), ::testing::ExitedWithCode(0), "Exiting");
+//
+// bool KilledBySIGHUP(int exit_code) {
+// return WIFSIGNALED(exit_code) && WTERMSIG(exit_code) == SIGHUP;
+// }
+//
+// ASSERT_EXIT(client.HangUpServer(), KilledBySIGHUP, "Hanging up!");
+//
+// On the regular expressions used in death tests:
+//
+// On POSIX-compliant systems (*nix), we use the <regex.h> library,
+// which uses the POSIX extended regex syntax.
+//
+// On other platforms (e.g. Windows), we only support a simple regex
+// syntax implemented as part of Google Test. This limited
+// implementation should be enough most of the time when writing
+// death tests; though it lacks many features you can find in PCRE
+// or POSIX extended regex syntax. For example, we don't support
+// union ("x|y"), grouping ("(xy)"), brackets ("[xy]"), and
+// repetition count ("x{5,7}"), among others.
+//
+// Below is the syntax that we do support. We chose it to be a
+// subset of both PCRE and POSIX extended regex, so it's easy to
+// learn wherever you come from. In the following: 'A' denotes a
+// literal character, period (.), or a single \\ escape sequence;
+// 'x' and 'y' denote regular expressions; 'm' and 'n' are for
+// natural numbers.
+//
+// c matches any literal character c
+// \\d matches any decimal digit
+// \\D matches any character that's not a decimal digit
+// \\f matches \f
+// \\n matches \n
+// \\r matches \r
+// \\s matches any ASCII whitespace, including \n
+// \\S matches any character that's not a whitespace
+// \\t matches \t
+// \\v matches \v
+// \\w matches any letter, _, or decimal digit
+// \\W matches any character that \\w doesn't match
+// \\c matches any literal character c, which must be a punctuation
+// . matches any single character except \n
+// A? matches 0 or 1 occurrences of A
+// A* matches 0 or many occurrences of A
+// A+ matches 1 or many occurrences of A
+// ^ matches the beginning of a string (not that of each line)
+// $ matches the end of a string (not that of each line)
+// xy matches x followed by y
+//
+// If you accidentally use PCRE or POSIX extended regex features
+// not implemented by us, you will get a run-time failure. In that
+// case, please try to rewrite your regular expression within the
+// above syntax.
+//
+// This implementation is *not* meant to be as highly tuned or robust
+// as a compiled regex library, but should perform well enough for a
+// death test, which already incurs significant overhead by launching
+// a child process.
+//
+// Known caveats:
+//
+// A "threadsafe" style death test obtains the path to the test
+// program from argv[0] and re-executes it in the sub-process. For
+// simplicity, the current implementation doesn't search the PATH
+// when launching the sub-process. This means that the user must
+// invoke the test program via a path that contains at least one
+// path separator (e.g. path/to/foo_test and
+// /absolute/path/to/bar_test are fine, but foo_test is not). This
+// is rarely a problem as people usually don't put the test binary
+// directory in PATH.
+//
+// TODO(wan@google.com): make thread-safe death tests search the PATH.
+
+// Asserts that a given statement causes the program to exit, with an
+// integer exit status that satisfies predicate, and emitting error output
+// that matches regex.
+#define ASSERT_EXIT(statement, predicate, regex) \
+ GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_FATAL_FAILURE_)
+
+// Like ASSERT_EXIT, but continues on to successive tests in the
+// test case, if any:
+#define EXPECT_EXIT(statement, predicate, regex) \
+ GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_NONFATAL_FAILURE_)
+
+// Asserts that a given statement causes the program to exit, either by
+// explicitly exiting with a nonzero exit code or being killed by a
+// signal, and emitting error output that matches regex.
+#define ASSERT_DEATH(statement, regex) \
+ ASSERT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)
+
+// Like ASSERT_DEATH, but continues on to successive tests in the
+// test case, if any:
+#define EXPECT_DEATH(statement, regex) \
+ EXPECT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)
+
+// Two predicate classes that can be used in {ASSERT,EXPECT}_EXIT*:
+
+// Tests that an exit code describes a normal exit with a given exit code.
+class GTEST_API_ ExitedWithCode {
+ public:
+ explicit ExitedWithCode(int exit_code);
+ bool operator()(int exit_status) const;
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ExitedWithCode& other);
+
+ const int exit_code_;
+};
+
+#if !GTEST_OS_WINDOWS
+// Tests that an exit code describes an exit due to termination by a
+// given signal.
+class GTEST_API_ KilledBySignal {
+ public:
+ explicit KilledBySignal(int signum);
+ bool operator()(int exit_status) const;
+ private:
+ const int signum_;
+};
+#endif // !GTEST_OS_WINDOWS
+
+// EXPECT_DEBUG_DEATH asserts that the given statements die in debug mode.
+// The death testing framework causes this to have interesting semantics,
+// since the sideeffects of the call are only visible in opt mode, and not
+// in debug mode.
+//
+// In practice, this can be used to test functions that utilize the
+// LOG(DFATAL) macro using the following style:
+//
+// int DieInDebugOr12(int* sideeffect) {
+// if (sideeffect) {
+// *sideeffect = 12;
+// }
+// LOG(DFATAL) << "death";
+// return 12;
+// }
+//
+// TEST(TestCase, TestDieOr12WorksInDgbAndOpt) {
+// int sideeffect = 0;
+// // Only asserts in dbg.
+// EXPECT_DEBUG_DEATH(DieInDebugOr12(&sideeffect), "death");
+//
+// #ifdef NDEBUG
+// // opt-mode has sideeffect visible.
+// EXPECT_EQ(12, sideeffect);
+// #else
+// // dbg-mode no visible sideeffect.
+// EXPECT_EQ(0, sideeffect);
+// #endif
+// }
+//
+// This will assert that DieInDebugReturn12InOpt() crashes in debug
+// mode, usually due to a DCHECK or LOG(DFATAL), but returns the
+// appropriate fallback value (12 in this case) in opt mode. If you
+// need to test that a function has appropriate side-effects in opt
+// mode, include assertions against the side-effects. A general
+// pattern for this is:
+//
+// EXPECT_DEBUG_DEATH({
+// // Side-effects here will have an effect after this statement in
+// // opt mode, but none in debug mode.
+// EXPECT_EQ(12, DieInDebugOr12(&sideeffect));
+// }, "death");
+//
+#ifdef NDEBUG
+
+#define EXPECT_DEBUG_DEATH(statement, regex) \
+ do { statement; } while (::testing::internal::AlwaysFalse())
+
+#define ASSERT_DEBUG_DEATH(statement, regex) \
+ do { statement; } while (::testing::internal::AlwaysFalse())
+
+#else
+
+#define EXPECT_DEBUG_DEATH(statement, regex) \
+ EXPECT_DEATH(statement, regex)
+
+#define ASSERT_DEBUG_DEATH(statement, regex) \
+ ASSERT_DEATH(statement, regex)
+
+#endif // NDEBUG for EXPECT_DEBUG_DEATH
+#endif // GTEST_HAS_DEATH_TEST
+
+// EXPECT_DEATH_IF_SUPPORTED(statement, regex) and
+// ASSERT_DEATH_IF_SUPPORTED(statement, regex) expand to real death tests if
+// death tests are supported; otherwise they just issue a warning. This is
+// useful when you are combining death test assertions with normal test
+// assertions in one test.
+#if GTEST_HAS_DEATH_TEST
+#define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \
+ EXPECT_DEATH(statement, regex)
+#define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \
+ ASSERT_DEATH(statement, regex)
+#else
+#define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \
+ GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, )
+#define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \
+ GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, return)
+#endif
+
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file defines the Message class.
+//
+// IMPORTANT NOTE: Due to limitation of the C++ language, we have to
+// leave some internal implementation details in this header file.
+// They are clearly marked by comments like this:
+//
+// // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+//
+// Such code is NOT meant to be used by a user directly, and is subject
+// to CHANGE WITHOUT NOTICE. Therefore DO NOT DEPEND ON IT in a user
+// program!
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_
+#define GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_
+
+#include <limits>
+
+
+namespace testing {
+
+// The Message class works like an ostream repeater.
+//
+// Typical usage:
+//
+// 1. You stream a bunch of values to a Message object.
+// It will remember the text in a StrStream.
+// 2. Then you stream the Message object to an ostream.
+// This causes the text in the Message to be streamed
+// to the ostream.
+//
+// For example;
+//
+// testing::Message foo;
+// foo << 1 << " != " << 2;
+// std::cout << foo;
+//
+// will print "1 != 2".
+//
+// Message is not intended to be inherited from. In particular, its
+// destructor is not virtual.
+//
+// Note that StrStream behaves differently in gcc and in MSVC. You
+// can stream a NULL char pointer to it in the former, but not in the
+// latter (it causes an access violation if you do). The Message
+// class hides this difference by treating a NULL char pointer as
+// "(null)".
+class GTEST_API_ Message {
+ private:
+ // The type of basic IO manipulators (endl, ends, and flush) for
+ // narrow streams.
+ typedef std::ostream& (*BasicNarrowIoManip)(std::ostream&);
+
+ public:
+ // Constructs an empty Message.
+ // We allocate the StrStream separately because it otherwise each use of
+ // ASSERT/EXPECT in a procedure adds over 200 bytes to the procedure's
+ // stack frame leading to huge stack frames in some cases; gcc does not reuse
+ // the stack space.
+ Message() : ss_(new internal::StrStream) {
+ // By default, we want there to be enough precision when printing
+ // a double to a Message.
+ *ss_ << std::setprecision(std::numeric_limits<double>::digits10 + 2);
+ }
+
+ // Copy constructor.
+ Message(const Message& msg) : ss_(new internal::StrStream) { // NOLINT
+ *ss_ << msg.GetString();
+ }
+
+ // Constructs a Message from a C-string.
+ explicit Message(const char* str) : ss_(new internal::StrStream) {
+ *ss_ << str;
+ }
+
+ ~Message() { delete ss_; }
+#if GTEST_OS_SYMBIAN
+ // Streams a value (either a pointer or not) to this object.
+ template <typename T>
+ inline Message& operator <<(const T& value) {
+ StreamHelper(typename internal::is_pointer<T>::type(), value);
+ return *this;
+ }
+#else
+ // Streams a non-pointer value to this object.
+ template <typename T>
+ inline Message& operator <<(const T& val) {
+ ::GTestStreamToHelper(ss_, val);
+ return *this;
+ }
+
+ // Streams a pointer value to this object.
+ //
+ // This function is an overload of the previous one. When you
+ // stream a pointer to a Message, this definition will be used as it
+ // is more specialized. (The C++ Standard, section
+ // [temp.func.order].) If you stream a non-pointer, then the
+ // previous definition will be used.
+ //
+ // The reason for this overload is that streaming a NULL pointer to
+ // ostream is undefined behavior. Depending on the compiler, you
+ // may get "0", "(nil)", "(null)", or an access violation. To
+ // ensure consistent result across compilers, we always treat NULL
+ // as "(null)".
+ template <typename T>
+ inline Message& operator <<(T* const& pointer) { // NOLINT
+ if (pointer == NULL) {
+ *ss_ << "(null)";
+ } else {
+ ::GTestStreamToHelper(ss_, pointer);
+ }
+ return *this;
+ }
+#endif // GTEST_OS_SYMBIAN
+
+ // Since the basic IO manipulators are overloaded for both narrow
+ // and wide streams, we have to provide this specialized definition
+ // of operator <<, even though its body is the same as the
+ // templatized version above. Without this definition, streaming
+ // endl or other basic IO manipulators to Message will confuse the
+ // compiler.
+ Message& operator <<(BasicNarrowIoManip val) {
+ *ss_ << val;
+ return *this;
+ }
+
+ // Instead of 1/0, we want to see true/false for bool values.
+ Message& operator <<(bool b) {
+ return *this << (b ? "true" : "false");
+ }
+
+ // These two overloads allow streaming a wide C string to a Message
+ // using the UTF-8 encoding.
+ Message& operator <<(const wchar_t* wide_c_str) {
+ return *this << internal::String::ShowWideCString(wide_c_str);
+ }
+ Message& operator <<(wchar_t* wide_c_str) {
+ return *this << internal::String::ShowWideCString(wide_c_str);
+ }
+
+#if GTEST_HAS_STD_WSTRING
+ // Converts the given wide string to a narrow string using the UTF-8
+ // encoding, and streams the result to this Message object.
+ Message& operator <<(const ::std::wstring& wstr);
+#endif // GTEST_HAS_STD_WSTRING
+
+#if GTEST_HAS_GLOBAL_WSTRING
+ // Converts the given wide string to a narrow string using the UTF-8
+ // encoding, and streams the result to this Message object.
+ Message& operator <<(const ::wstring& wstr);
+#endif // GTEST_HAS_GLOBAL_WSTRING
+
+ // Gets the text streamed to this object so far as a String.
+ // Each '\0' character in the buffer is replaced with "\\0".
+ //
+ // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+ internal::String GetString() const {
+ return internal::StrStreamToString(ss_);
+ }
+
+ private:
+#if GTEST_OS_SYMBIAN
+ // These are needed as the Nokia Symbian Compiler cannot decide between
+ // const T& and const T* in a function template. The Nokia compiler _can_
+ // decide between class template specializations for T and T*, so a
+ // tr1::type_traits-like is_pointer works, and we can overload on that.
+ template <typename T>
+ inline void StreamHelper(internal::true_type /*dummy*/, T* pointer) {
+ if (pointer == NULL) {
+ *ss_ << "(null)";
+ } else {
+ ::GTestStreamToHelper(ss_, pointer);
+ }
+ }
+ template <typename T>
+ inline void StreamHelper(internal::false_type /*dummy*/, const T& value) {
+ ::GTestStreamToHelper(ss_, value);
+ }
+#endif // GTEST_OS_SYMBIAN
+
+ // We'll hold the text streamed to this object here.
+ internal::StrStream* const ss_;
+
+ // We declare (but don't implement) this to prevent the compiler
+ // from implementing the assignment operator.
+ void operator=(const Message&);
+};
+
+// Streams a Message to an ostream.
+inline std::ostream& operator <<(std::ostream& os, const Message& sb) {
+ return os << sb.GetString();
+}
+
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_
+// This file was GENERATED by a script. DO NOT EDIT BY HAND!!!
+
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: vladl@google.com (Vlad Losev)
+//
+// Macros and functions for implementing parameterized tests
+// in Google C++ Testing Framework (Google Test)
+//
+// This file is generated by a SCRIPT. DO NOT EDIT BY HAND!
+//
+#ifndef GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
+#define GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
+
+
+// Value-parameterized tests allow you to test your code with different
+// parameters without writing multiple copies of the same test.
+//
+// Here is how you use value-parameterized tests:
+
+#if 0
+
+// To write value-parameterized tests, first you should define a fixture
+// class. It must be derived from testing::TestWithParam<T>, where T is
+// the type of your parameter values. TestWithParam<T> is itself derived
+// from testing::Test. T can be any copyable type. If it's a raw pointer,
+// you are responsible for managing the lifespan of the pointed values.
+
+class FooTest : public ::testing::TestWithParam<const char*> {
+ // You can implement all the usual class fixture members here.
+};
+
+// Then, use the TEST_P macro to define as many parameterized tests
+// for this fixture as you want. The _P suffix is for "parameterized"
+// or "pattern", whichever you prefer to think.
+
+TEST_P(FooTest, DoesBlah) {
+ // Inside a test, access the test parameter with the GetParam() method
+ // of the TestWithParam<T> class:
+ EXPECT_TRUE(foo.Blah(GetParam()));
+ ...
+}
+
+TEST_P(FooTest, HasBlahBlah) {
+ ...
+}
+
+// Finally, you can use INSTANTIATE_TEST_CASE_P to instantiate the test
+// case with any set of parameters you want. Google Test defines a number
+// of functions for generating test parameters. They return what we call
+// (surprise!) parameter generators. Here is a summary of them, which
+// are all in the testing namespace:
+//
+//
+// Range(begin, end [, step]) - Yields values {begin, begin+step,
+// begin+step+step, ...}. The values do not
+// include end. step defaults to 1.
+// Values(v1, v2, ..., vN) - Yields values {v1, v2, ..., vN}.
+// ValuesIn(container) - Yields values from a C-style array, an STL
+// ValuesIn(begin,end) container, or an iterator range [begin, end).
+// Bool() - Yields sequence {false, true}.
+// Combine(g1, g2, ..., gN) - Yields all combinations (the Cartesian product
+// for the math savvy) of the values generated
+// by the N generators.
+//
+// For more details, see comments at the definitions of these functions below
+// in this file.
+//
+// The following statement will instantiate tests from the FooTest test case
+// each with parameter values "meeny", "miny", and "moe".
+
+INSTANTIATE_TEST_CASE_P(InstantiationName,
+ FooTest,
+ Values("meeny", "miny", "moe"));
+
+// To distinguish different instances of the pattern, (yes, you
+// can instantiate it more then once) the first argument to the
+// INSTANTIATE_TEST_CASE_P macro is a prefix that will be added to the
+// actual test case name. Remember to pick unique prefixes for different
+// instantiations. The tests from the instantiation above will have
+// these names:
+//
+// * InstantiationName/FooTest.DoesBlah/0 for "meeny"
+// * InstantiationName/FooTest.DoesBlah/1 for "miny"
+// * InstantiationName/FooTest.DoesBlah/2 for "moe"
+// * InstantiationName/FooTest.HasBlahBlah/0 for "meeny"
+// * InstantiationName/FooTest.HasBlahBlah/1 for "miny"
+// * InstantiationName/FooTest.HasBlahBlah/2 for "moe"
+//
+// You can use these names in --gtest_filter.
+//
+// This statement will instantiate all tests from FooTest again, each
+// with parameter values "cat" and "dog":
+
+const char* pets[] = {"cat", "dog"};
+INSTANTIATE_TEST_CASE_P(AnotherInstantiationName, FooTest, ValuesIn(pets));
+
+// The tests from the instantiation above will have these names:
+//
+// * AnotherInstantiationName/FooTest.DoesBlah/0 for "cat"
+// * AnotherInstantiationName/FooTest.DoesBlah/1 for "dog"
+// * AnotherInstantiationName/FooTest.HasBlahBlah/0 for "cat"
+// * AnotherInstantiationName/FooTest.HasBlahBlah/1 for "dog"
+//
+// Please note that INSTANTIATE_TEST_CASE_P will instantiate all tests
+// in the given test case, whether their definitions come before or
+// AFTER the INSTANTIATE_TEST_CASE_P statement.
+//
+// Please also note that generator expressions (including parameters to the
+// generators) are evaluated in InitGoogleTest(), after main() has started.
+// This allows the user on one hand, to adjust generator parameters in order
+// to dynamically determine a set of tests to run and on the other hand,
+// give the user a chance to inspect the generated tests with Google Test
+// reflection API before RUN_ALL_TESTS() is executed.
+//
+// You can see samples/sample7_unittest.cc and samples/sample8_unittest.cc
+// for more examples.
+//
+// In the future, we plan to publish the API for defining new parameter
+// generators. But for now this interface remains part of the internal
+// implementation and is subject to change.
+
+#endif // 0
+
+
+#if !GTEST_OS_SYMBIAN
+#include <utility>
+#endif
+
+// scripts/fuse_gtest.py depends on gtest's own header being #included
+// *unconditionally*. Therefore these #includes cannot be moved
+// inside #if GTEST_HAS_PARAM_TEST.
+// Copyright 2008 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vladl@google.com (Vlad Losev)
+
+// Type and function utilities for implementing parameterized tests.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_
+
+#include <iterator>
+#include <utility>
+#include <vector>
+
+// scripts/fuse_gtest.py depends on gtest's own header being #included
+// *unconditionally*. Therefore these #includes cannot be moved
+// inside #if GTEST_HAS_PARAM_TEST.
+// Copyright 2003 Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: Dan Egnor (egnor@google.com)
+//
+// A "smart" pointer type with reference tracking. Every pointer to a
+// particular object is kept on a circular linked list. When the last pointer
+// to an object is destroyed or reassigned, the object is deleted.
+//
+// Used properly, this deletes the object when the last reference goes away.
+// There are several caveats:
+// - Like all reference counting schemes, cycles lead to leaks.
+// - Each smart pointer is actually two pointers (8 bytes instead of 4).
+// - Every time a pointer is assigned, the entire list of pointers to that
+// object is traversed. This class is therefore NOT SUITABLE when there
+// will often be more than two or three pointers to a particular object.
+// - References are only tracked as long as linked_ptr<> objects are copied.
+// If a linked_ptr<> is converted to a raw pointer and back, BAD THINGS
+// will happen (double deletion).
+//
+// A good use of this class is storing object references in STL containers.
+// You can safely put linked_ptr<> in a vector<>.
+// Other uses may not be as good.
+//
+// Note: If you use an incomplete type with linked_ptr<>, the class
+// *containing* linked_ptr<> must have a constructor and destructor (even
+// if they do nothing!).
+//
+// Bill Gibbons suggested we use something like this.
+//
+// Thread Safety:
+// Unlike other linked_ptr implementations, in this implementation
+// a linked_ptr object is thread-safe in the sense that:
+// - it's safe to copy linked_ptr objects concurrently,
+// - it's safe to copy *from* a linked_ptr and read its underlying
+// raw pointer (e.g. via get()) concurrently, and
+// - it's safe to write to two linked_ptrs that point to the same
+// shared object concurrently.
+// TODO(wan@google.com): rename this to safe_linked_ptr to avoid
+// confusion with normal linked_ptr.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_
+
+#include <stdlib.h>
+#include <assert.h>
+
+
+namespace testing {
+namespace internal {
+
+// Protects copying of all linked_ptr objects.
+GTEST_API_ GTEST_DECLARE_STATIC_MUTEX_(g_linked_ptr_mutex);
+
+// This is used internally by all instances of linked_ptr<>. It needs to be
+// a non-template class because different types of linked_ptr<> can refer to
+// the same object (linked_ptr<Superclass>(obj) vs linked_ptr<Subclass>(obj)).
+// So, it needs to be possible for different types of linked_ptr to participate
+// in the same circular linked list, so we need a single class type here.
+//
+// DO NOT USE THIS CLASS DIRECTLY YOURSELF. Use linked_ptr<T>.
+class linked_ptr_internal {
+ public:
+ // Create a new circle that includes only this instance.
+ void join_new() {
+ next_ = this;
+ }
+
+ // Many linked_ptr operations may change p.link_ for some linked_ptr
+ // variable p in the same circle as this object. Therefore we need
+ // to prevent two such operations from occurring concurrently.
+ //
+ // Note that different types of linked_ptr objects can coexist in a
+ // circle (e.g. linked_ptr<Base>, linked_ptr<Derived1>, and
+ // linked_ptr<Derived2>). Therefore we must use a single mutex to
+ // protect all linked_ptr objects. This can create serious
+ // contention in production code, but is acceptable in a testing
+ // framework.
+
+ // Join an existing circle.
+ // L < g_linked_ptr_mutex
+ void join(linked_ptr_internal const* ptr) {
+ MutexLock lock(&g_linked_ptr_mutex);
+
+ linked_ptr_internal const* p = ptr;
+ while (p->next_ != ptr) p = p->next_;
+ p->next_ = this;
+ next_ = ptr;
+ }
+
+ // Leave whatever circle we're part of. Returns true if we were the
+ // last member of the circle. Once this is done, you can join() another.
+ // L < g_linked_ptr_mutex
+ bool depart() {
+ MutexLock lock(&g_linked_ptr_mutex);
+
+ if (next_ == this) return true;
+ linked_ptr_internal const* p = next_;
+ while (p->next_ != this) p = p->next_;
+ p->next_ = next_;
+ return false;
+ }
+
+ private:
+ mutable linked_ptr_internal const* next_;
+};
+
+template <typename T>
+class linked_ptr {
+ public:
+ typedef T element_type;
+
+ // Take over ownership of a raw pointer. This should happen as soon as
+ // possible after the object is created.
+ explicit linked_ptr(T* ptr = NULL) { capture(ptr); }
+ ~linked_ptr() { depart(); }
+
+ // Copy an existing linked_ptr<>, adding ourselves to the list of references.
+ template <typename U> linked_ptr(linked_ptr<U> const& ptr) { copy(&ptr); }
+ linked_ptr(linked_ptr const& ptr) { // NOLINT
+ assert(&ptr != this);
+ copy(&ptr);
+ }
+
+ // Assignment releases the old value and acquires the new.
+ template <typename U> linked_ptr& operator=(linked_ptr<U> const& ptr) {
+ depart();
+ copy(&ptr);
+ return *this;
+ }
+
+ linked_ptr& operator=(linked_ptr const& ptr) {
+ if (&ptr != this) {
+ depart();
+ copy(&ptr);
+ }
+ return *this;
+ }
+
+ // Smart pointer members.
+ void reset(T* ptr = NULL) {
+ depart();
+ capture(ptr);
+ }
+ T* get() const { return value_; }
+ T* operator->() const { return value_; }
+ T& operator*() const { return *value_; }
+ // Release ownership of the pointed object and returns it.
+ // Sole ownership by this linked_ptr object is required.
+ T* release() {
+ bool last = link_.depart();
+ assert(last);
+ T* v = value_;
+ value_ = NULL;
+ return v;
+ }
+
+ bool operator==(T* p) const { return value_ == p; }
+ bool operator!=(T* p) const { return value_ != p; }
+ template <typename U>
+ bool operator==(linked_ptr<U> const& ptr) const {
+ return value_ == ptr.get();
+ }
+ template <typename U>
+ bool operator!=(linked_ptr<U> const& ptr) const {
+ return value_ != ptr.get();
+ }
+
+ private:
+ template <typename U>
+ friend class linked_ptr;
+
+ T* value_;
+ linked_ptr_internal link_;
+
+ void depart() {
+ if (link_.depart()) delete value_;
+ }
+
+ void capture(T* ptr) {
+ value_ = ptr;
+ link_.join_new();
+ }
+
+ template <typename U> void copy(linked_ptr<U> const* ptr) {
+ value_ = ptr->get();
+ if (value_)
+ link_.join(&ptr->link_);
+ else
+ link_.join_new();
+ }
+};
+
+template<typename T> inline
+bool operator==(T* ptr, const linked_ptr<T>& x) {
+ return ptr == x.get();
+}
+
+template<typename T> inline
+bool operator!=(T* ptr, const linked_ptr<T>& x) {
+ return ptr != x.get();
+}
+
+// A function to convert T* into linked_ptr<T>
+// Doing e.g. make_linked_ptr(new FooBarBaz<type>(arg)) is a shorter notation
+// for linked_ptr<FooBarBaz<type> >(new FooBarBaz<type>(arg))
+template <typename T>
+linked_ptr<T> make_linked_ptr(T* ptr) {
+ return linked_ptr<T>(ptr);
+}
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_
+
+#if GTEST_HAS_PARAM_TEST
+
+namespace testing {
+namespace internal {
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Outputs a message explaining invalid registration of different
+// fixture class for the same test case. This may happen when
+// TEST_P macro is used to define two tests with the same name
+// but in different namespaces.
+GTEST_API_ void ReportInvalidTestCaseType(const char* test_case_name,
+ const char* file, int line);
+
+template <typename> class ParamGeneratorInterface;
+template <typename> class ParamGenerator;
+
+// Interface for iterating over elements provided by an implementation
+// of ParamGeneratorInterface<T>.
+template <typename T>
+class ParamIteratorInterface {
+ public:
+ virtual ~ParamIteratorInterface() {}
+ // A pointer to the base generator instance.
+ // Used only for the purposes of iterator comparison
+ // to make sure that two iterators belong to the same generator.
+ virtual const ParamGeneratorInterface<T>* BaseGenerator() const = 0;
+ // Advances iterator to point to the next element
+ // provided by the generator. The caller is responsible
+ // for not calling Advance() on an iterator equal to
+ // BaseGenerator()->End().
+ virtual void Advance() = 0;
+ // Clones the iterator object. Used for implementing copy semantics
+ // of ParamIterator<T>.
+ virtual ParamIteratorInterface* Clone() const = 0;
+ // Dereferences the current iterator and provides (read-only) access
+ // to the pointed value. It is the caller's responsibility not to call
+ // Current() on an iterator equal to BaseGenerator()->End().
+ // Used for implementing ParamGenerator<T>::operator*().
+ virtual const T* Current() const = 0;
+ // Determines whether the given iterator and other point to the same
+ // element in the sequence generated by the generator.
+ // Used for implementing ParamGenerator<T>::operator==().
+ virtual bool Equals(const ParamIteratorInterface& other) const = 0;
+};
+
+// Class iterating over elements provided by an implementation of
+// ParamGeneratorInterface<T>. It wraps ParamIteratorInterface<T>
+// and implements the const forward iterator concept.
+template <typename T>
+class ParamIterator {
+ public:
+ typedef T value_type;
+ typedef const T& reference;
+ typedef ptrdiff_t difference_type;
+
+ // ParamIterator assumes ownership of the impl_ pointer.
+ ParamIterator(const ParamIterator& other) : impl_(other.impl_->Clone()) {}
+ ParamIterator& operator=(const ParamIterator& other) {
+ if (this != &other)
+ impl_.reset(other.impl_->Clone());
+ return *this;
+ }
+
+ const T& operator*() const { return *impl_->Current(); }
+ const T* operator->() const { return impl_->Current(); }
+ // Prefix version of operator++.
+ ParamIterator& operator++() {
+ impl_->Advance();
+ return *this;
+ }
+ // Postfix version of operator++.
+ ParamIterator operator++(int /*unused*/) {
+ ParamIteratorInterface<T>* clone = impl_->Clone();
+ impl_->Advance();
+ return ParamIterator(clone);
+ }
+ bool operator==(const ParamIterator& other) const {
+ return impl_.get() == other.impl_.get() || impl_->Equals(*other.impl_);
+ }
+ bool operator!=(const ParamIterator& other) const {
+ return !(*this == other);
+ }
+
+ private:
+ friend class ParamGenerator<T>;
+ explicit ParamIterator(ParamIteratorInterface<T>* impl) : impl_(impl) {}
+ scoped_ptr<ParamIteratorInterface<T> > impl_;
+};
+
+// ParamGeneratorInterface<T> is the binary interface to access generators
+// defined in other translation units.
+template <typename T>
+class ParamGeneratorInterface {
+ public:
+ typedef T ParamType;
+
+ virtual ~ParamGeneratorInterface() {}
+
+ // Generator interface definition
+ virtual ParamIteratorInterface<T>* Begin() const = 0;
+ virtual ParamIteratorInterface<T>* End() const = 0;
+};
+
+// Wraps ParamGeneratorInterface<T> and provides general generator syntax
+// compatible with the STL Container concept.
+// This class implements copy initialization semantics and the contained
+// ParamGeneratorInterface<T> instance is shared among all copies
+// of the original object. This is possible because that instance is immutable.
+template<typename T>
+class ParamGenerator {
+ public:
+ typedef ParamIterator<T> iterator;
+
+ explicit ParamGenerator(ParamGeneratorInterface<T>* impl) : impl_(impl) {}
+ ParamGenerator(const ParamGenerator& other) : impl_(other.impl_) {}
+
+ ParamGenerator& operator=(const ParamGenerator& other) {
+ impl_ = other.impl_;
+ return *this;
+ }
+
+ iterator begin() const { return iterator(impl_->Begin()); }
+ iterator end() const { return iterator(impl_->End()); }
+
+ private:
+ ::testing::internal::linked_ptr<const ParamGeneratorInterface<T> > impl_;
+};
+
+// Generates values from a range of two comparable values. Can be used to
+// generate sequences of user-defined types that implement operator+() and
+// operator<().
+// This class is used in the Range() function.
+template <typename T, typename IncrementT>
+class RangeGenerator : public ParamGeneratorInterface<T> {
+ public:
+ RangeGenerator(T begin, T end, IncrementT step)
+ : begin_(begin), end_(end),
+ step_(step), end_index_(CalculateEndIndex(begin, end, step)) {}
+ virtual ~RangeGenerator() {}
+
+ virtual ParamIteratorInterface<T>* Begin() const {
+ return new Iterator(this, begin_, 0, step_);
+ }
+ virtual ParamIteratorInterface<T>* End() const {
+ return new Iterator(this, end_, end_index_, step_);
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<T> {
+ public:
+ Iterator(const ParamGeneratorInterface<T>* base, T value, int index,
+ IncrementT step)
+ : base_(base), value_(value), index_(index), step_(step) {}
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<T>* BaseGenerator() const {
+ return base_;
+ }
+ virtual void Advance() {
+ value_ = value_ + step_;
+ index_++;
+ }
+ virtual ParamIteratorInterface<T>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const T* Current() const { return &value_; }
+ virtual bool Equals(const ParamIteratorInterface<T>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const int other_index =
+ CheckedDowncastToActualType<const Iterator>(&other)->index_;
+ return index_ == other_index;
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : ParamIteratorInterface<T>(),
+ base_(other.base_), value_(other.value_), index_(other.index_),
+ step_(other.step_) {}
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<T>* const base_;
+ T value_;
+ int index_;
+ const IncrementT step_;
+ }; // class RangeGenerator::Iterator
+
+ static int CalculateEndIndex(const T& begin,
+ const T& end,
+ const IncrementT& step) {
+ int end_index = 0;
+ for (T i = begin; i < end; i = i + step)
+ end_index++;
+ return end_index;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const RangeGenerator& other);
+
+ const T begin_;
+ const T end_;
+ const IncrementT step_;
+ // The index for the end() iterator. All the elements in the generated
+ // sequence are indexed (0-based) to aid iterator comparison.
+ const int end_index_;
+}; // class RangeGenerator
+
+
+// Generates values from a pair of STL-style iterators. Used in the
+// ValuesIn() function. The elements are copied from the source range
+// since the source can be located on the stack, and the generator
+// is likely to persist beyond that stack frame.
+template <typename T>
+class ValuesInIteratorRangeGenerator : public ParamGeneratorInterface<T> {
+ public:
+ template <typename ForwardIterator>
+ ValuesInIteratorRangeGenerator(ForwardIterator begin, ForwardIterator end)
+ : container_(begin, end) {}
+ virtual ~ValuesInIteratorRangeGenerator() {}
+
+ virtual ParamIteratorInterface<T>* Begin() const {
+ return new Iterator(this, container_.begin());
+ }
+ virtual ParamIteratorInterface<T>* End() const {
+ return new Iterator(this, container_.end());
+ }
+
+ private:
+ typedef typename ::std::vector<T> ContainerType;
+
+ class Iterator : public ParamIteratorInterface<T> {
+ public:
+ Iterator(const ParamGeneratorInterface<T>* base,
+ typename ContainerType::const_iterator iterator)
+ : base_(base), iterator_(iterator) {}
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<T>* BaseGenerator() const {
+ return base_;
+ }
+ virtual void Advance() {
+ ++iterator_;
+ value_.reset();
+ }
+ virtual ParamIteratorInterface<T>* Clone() const {
+ return new Iterator(*this);
+ }
+ // We need to use cached value referenced by iterator_ because *iterator_
+ // can return a temporary object (and of type other then T), so just
+ // having "return &*iterator_;" doesn't work.
+ // value_ is updated here and not in Advance() because Advance()
+ // can advance iterator_ beyond the end of the range, and we cannot
+ // detect that fact. The client code, on the other hand, is
+ // responsible for not calling Current() on an out-of-range iterator.
+ virtual const T* Current() const {
+ if (value_.get() == NULL)
+ value_.reset(new T(*iterator_));
+ return value_.get();
+ }
+ virtual bool Equals(const ParamIteratorInterface<T>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ return iterator_ ==
+ CheckedDowncastToActualType<const Iterator>(&other)->iterator_;
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ // The explicit constructor call suppresses a false warning
+ // emitted by gcc when supplied with the -Wextra option.
+ : ParamIteratorInterface<T>(),
+ base_(other.base_),
+ iterator_(other.iterator_) {}
+
+ const ParamGeneratorInterface<T>* const base_;
+ typename ContainerType::const_iterator iterator_;
+ // A cached value of *iterator_. We keep it here to allow access by
+ // pointer in the wrapping iterator's operator->().
+ // value_ needs to be mutable to be accessed in Current().
+ // Use of scoped_ptr helps manage cached value's lifetime,
+ // which is bound by the lifespan of the iterator itself.
+ mutable scoped_ptr<const T> value_;
+ }; // class ValuesInIteratorRangeGenerator::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const ValuesInIteratorRangeGenerator& other);
+
+ const ContainerType container_;
+}; // class ValuesInIteratorRangeGenerator
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Stores a parameter value and later creates tests parameterized with that
+// value.
+template <class TestClass>
+class ParameterizedTestFactory : public TestFactoryBase {
+ public:
+ typedef typename TestClass::ParamType ParamType;
+ explicit ParameterizedTestFactory(ParamType parameter) :
+ parameter_(parameter) {}
+ virtual Test* CreateTest() {
+ TestClass::SetParam(&parameter_);
+ return new TestClass();
+ }
+
+ private:
+ const ParamType parameter_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestFactory);
+};
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// TestMetaFactoryBase is a base class for meta-factories that create
+// test factories for passing into MakeAndRegisterTestInfo function.
+template <class ParamType>
+class TestMetaFactoryBase {
+ public:
+ virtual ~TestMetaFactoryBase() {}
+
+ virtual TestFactoryBase* CreateTestFactory(ParamType parameter) = 0;
+};
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// TestMetaFactory creates test factories for passing into
+// MakeAndRegisterTestInfo function. Since MakeAndRegisterTestInfo receives
+// ownership of test factory pointer, same factory object cannot be passed
+// into that method twice. But ParameterizedTestCaseInfo is going to call
+// it for each Test/Parameter value combination. Thus it needs meta factory
+// creator class.
+template <class TestCase>
+class TestMetaFactory
+ : public TestMetaFactoryBase<typename TestCase::ParamType> {
+ public:
+ typedef typename TestCase::ParamType ParamType;
+
+ TestMetaFactory() {}
+
+ virtual TestFactoryBase* CreateTestFactory(ParamType parameter) {
+ return new ParameterizedTestFactory<TestCase>(parameter);
+ }
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestMetaFactory);
+};
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// ParameterizedTestCaseInfoBase is a generic interface
+// to ParameterizedTestCaseInfo classes. ParameterizedTestCaseInfoBase
+// accumulates test information provided by TEST_P macro invocations
+// and generators provided by INSTANTIATE_TEST_CASE_P macro invocations
+// and uses that information to register all resulting test instances
+// in RegisterTests method. The ParameterizeTestCaseRegistry class holds
+// a collection of pointers to the ParameterizedTestCaseInfo objects
+// and calls RegisterTests() on each of them when asked.
+class ParameterizedTestCaseInfoBase {
+ public:
+ virtual ~ParameterizedTestCaseInfoBase() {}
+
+ // Base part of test case name for display purposes.
+ virtual const String& GetTestCaseName() const = 0;
+ // Test case id to verify identity.
+ virtual TypeId GetTestCaseTypeId() const = 0;
+ // UnitTest class invokes this method to register tests in this
+ // test case right before running them in RUN_ALL_TESTS macro.
+ // This method should not be called more then once on any single
+ // instance of a ParameterizedTestCaseInfoBase derived class.
+ virtual void RegisterTests() = 0;
+
+ protected:
+ ParameterizedTestCaseInfoBase() {}
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseInfoBase);
+};
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// ParameterizedTestCaseInfo accumulates tests obtained from TEST_P
+// macro invocations for a particular test case and generators
+// obtained from INSTANTIATE_TEST_CASE_P macro invocations for that
+// test case. It registers tests with all values generated by all
+// generators when asked.
+template <class TestCase>
+class ParameterizedTestCaseInfo : public ParameterizedTestCaseInfoBase {
+ public:
+ // ParamType and GeneratorCreationFunc are private types but are required
+ // for declarations of public methods AddTestPattern() and
+ // AddTestCaseInstantiation().
+ typedef typename TestCase::ParamType ParamType;
+ // A function that returns an instance of appropriate generator type.
+ typedef ParamGenerator<ParamType>(GeneratorCreationFunc)();
+
+ explicit ParameterizedTestCaseInfo(const char* name)
+ : test_case_name_(name) {}
+
+ // Test case base name for display purposes.
+ virtual const String& GetTestCaseName() const { return test_case_name_; }
+ // Test case id to verify identity.
+ virtual TypeId GetTestCaseTypeId() const { return GetTypeId<TestCase>(); }
+ // TEST_P macro uses AddTestPattern() to record information
+ // about a single test in a LocalTestInfo structure.
+ // test_case_name is the base name of the test case (without invocation
+ // prefix). test_base_name is the name of an individual test without
+ // parameter index. For the test SequenceA/FooTest.DoBar/1 FooTest is
+ // test case base name and DoBar is test base name.
+ void AddTestPattern(const char* test_case_name,
+ const char* test_base_name,
+ TestMetaFactoryBase<ParamType>* meta_factory) {
+ tests_.push_back(linked_ptr<TestInfo>(new TestInfo(test_case_name,
+ test_base_name,
+ meta_factory)));
+ }
+ // INSTANTIATE_TEST_CASE_P macro uses AddGenerator() to record information
+ // about a generator.
+ int AddTestCaseInstantiation(const char* instantiation_name,
+ GeneratorCreationFunc* func,
+ const char* /* file */,
+ int /* line */) {
+ instantiations_.push_back(::std::make_pair(instantiation_name, func));
+ return 0; // Return value used only to run this method in namespace scope.
+ }
+ // UnitTest class invokes this method to register tests in this test case
+ // test cases right before running tests in RUN_ALL_TESTS macro.
+ // This method should not be called more then once on any single
+ // instance of a ParameterizedTestCaseInfoBase derived class.
+ // UnitTest has a guard to prevent from calling this method more then once.
+ virtual void RegisterTests() {
+ for (typename TestInfoContainer::iterator test_it = tests_.begin();
+ test_it != tests_.end(); ++test_it) {
+ linked_ptr<TestInfo> test_info = *test_it;
+ for (typename InstantiationContainer::iterator gen_it =
+ instantiations_.begin(); gen_it != instantiations_.end();
+ ++gen_it) {
+ const String& instantiation_name = gen_it->first;
+ ParamGenerator<ParamType> generator((*gen_it->second)());
+
+ Message test_case_name_stream;
+ if ( !instantiation_name.empty() )
+ test_case_name_stream << instantiation_name.c_str() << "/";
+ test_case_name_stream << test_info->test_case_base_name.c_str();
+
+ int i = 0;
+ for (typename ParamGenerator<ParamType>::iterator param_it =
+ generator.begin();
+ param_it != generator.end(); ++param_it, ++i) {
+ Message test_name_stream;
+ test_name_stream << test_info->test_base_name.c_str() << "/" << i;
+ ::testing::internal::MakeAndRegisterTestInfo(
+ test_case_name_stream.GetString().c_str(),
+ test_name_stream.GetString().c_str(),
+ "", // test_case_comment
+ "", // comment; TODO(vladl@google.com): provide parameter value
+ // representation.
+ GetTestCaseTypeId(),
+ TestCase::SetUpTestCase,
+ TestCase::TearDownTestCase,
+ test_info->test_meta_factory->CreateTestFactory(*param_it));
+ } // for param_it
+ } // for gen_it
+ } // for test_it
+ } // RegisterTests
+
+ private:
+ // LocalTestInfo structure keeps information about a single test registered
+ // with TEST_P macro.
+ struct TestInfo {
+ TestInfo(const char* a_test_case_base_name,
+ const char* a_test_base_name,
+ TestMetaFactoryBase<ParamType>* a_test_meta_factory) :
+ test_case_base_name(a_test_case_base_name),
+ test_base_name(a_test_base_name),
+ test_meta_factory(a_test_meta_factory) {}
+
+ const String test_case_base_name;
+ const String test_base_name;
+ const scoped_ptr<TestMetaFactoryBase<ParamType> > test_meta_factory;
+ };
+ typedef ::std::vector<linked_ptr<TestInfo> > TestInfoContainer;
+ // Keeps pairs of <Instantiation name, Sequence generator creation function>
+ // received from INSTANTIATE_TEST_CASE_P macros.
+ typedef ::std::vector<std::pair<String, GeneratorCreationFunc*> >
+ InstantiationContainer;
+
+ const String test_case_name_;
+ TestInfoContainer tests_;
+ InstantiationContainer instantiations_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseInfo);
+}; // class ParameterizedTestCaseInfo
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// ParameterizedTestCaseRegistry contains a map of ParameterizedTestCaseInfoBase
+// classes accessed by test case names. TEST_P and INSTANTIATE_TEST_CASE_P
+// macros use it to locate their corresponding ParameterizedTestCaseInfo
+// descriptors.
+class ParameterizedTestCaseRegistry {
+ public:
+ ParameterizedTestCaseRegistry() {}
+ ~ParameterizedTestCaseRegistry() {
+ for (TestCaseInfoContainer::iterator it = test_case_infos_.begin();
+ it != test_case_infos_.end(); ++it) {
+ delete *it;
+ }
+ }
+
+ // Looks up or creates and returns a structure containing information about
+ // tests and instantiations of a particular test case.
+ template <class TestCase>
+ ParameterizedTestCaseInfo<TestCase>* GetTestCasePatternHolder(
+ const char* test_case_name,
+ const char* file,
+ int line) {
+ ParameterizedTestCaseInfo<TestCase>* typed_test_info = NULL;
+ for (TestCaseInfoContainer::iterator it = test_case_infos_.begin();
+ it != test_case_infos_.end(); ++it) {
+ if ((*it)->GetTestCaseName() == test_case_name) {
+ if ((*it)->GetTestCaseTypeId() != GetTypeId<TestCase>()) {
+ // Complain about incorrect usage of Google Test facilities
+ // and terminate the program since we cannot guaranty correct
+ // test case setup and tear-down in this case.
+ ReportInvalidTestCaseType(test_case_name, file, line);
+ abort();
+ } else {
+ // At this point we are sure that the object we found is of the same
+ // type we are looking for, so we downcast it to that type
+ // without further checks.
+ typed_test_info = CheckedDowncastToActualType<
+ ParameterizedTestCaseInfo<TestCase> >(*it);
+ }
+ break;
+ }
+ }
+ if (typed_test_info == NULL) {
+ typed_test_info = new ParameterizedTestCaseInfo<TestCase>(test_case_name);
+ test_case_infos_.push_back(typed_test_info);
+ }
+ return typed_test_info;
+ }
+ void RegisterTests() {
+ for (TestCaseInfoContainer::iterator it = test_case_infos_.begin();
+ it != test_case_infos_.end(); ++it) {
+ (*it)->RegisterTests();
+ }
+ }
+
+ private:
+ typedef ::std::vector<ParameterizedTestCaseInfoBase*> TestCaseInfoContainer;
+
+ TestCaseInfoContainer test_case_infos_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseRegistry);
+};
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_HAS_PARAM_TEST
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_
+// This file was GENERATED by a script. DO NOT EDIT BY HAND!!!
+
+// Copyright 2008 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vladl@google.com (Vlad Losev)
+
+// Type and function utilities for implementing parameterized tests.
+// This file is generated by a SCRIPT. DO NOT EDIT BY HAND!
+//
+// Currently Google Test supports at most 50 arguments in Values,
+// and at most 10 arguments in Combine. Please contact
+// googletestframework@googlegroups.com if you need more.
+// Please note that the number of arguments to Combine is limited
+// by the maximum arity of the implementation of tr1::tuple which is
+// currently set at 10.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_
+
+// scripts/fuse_gtest.py depends on gtest's own header being #included
+// *unconditionally*. Therefore these #includes cannot be moved
+// inside #if GTEST_HAS_PARAM_TEST.
+
+#if GTEST_HAS_PARAM_TEST
+
+namespace testing {
+
+// Forward declarations of ValuesIn(), which is implemented in
+// include/gtest/gtest-param-test.h.
+template <typename ForwardIterator>
+internal::ParamGenerator<
+ typename ::std::iterator_traits<ForwardIterator>::value_type> ValuesIn(
+ ForwardIterator begin, ForwardIterator end);
+
+template <typename T, size_t N>
+internal::ParamGenerator<T> ValuesIn(const T (&array)[N]);
+
+template <class Container>
+internal::ParamGenerator<typename Container::value_type> ValuesIn(
+ const Container& container);
+
+namespace internal {
+
+// Used in the Values() function to provide polymorphic capabilities.
+template <typename T1>
+class ValueArray1 {
+ public:
+ explicit ValueArray1(T1 v1) : v1_(v1) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const { return ValuesIn(&v1_, &v1_ + 1); }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray1& other);
+
+ const T1 v1_;
+};
+
+template <typename T1, typename T2>
+class ValueArray2 {
+ public:
+ ValueArray2(T1 v1, T2 v2) : v1_(v1), v2_(v2) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray2& other);
+
+ const T1 v1_;
+ const T2 v2_;
+};
+
+template <typename T1, typename T2, typename T3>
+class ValueArray3 {
+ public:
+ ValueArray3(T1 v1, T2 v2, T3 v3) : v1_(v1), v2_(v2), v3_(v3) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray3& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4>
+class ValueArray4 {
+ public:
+ ValueArray4(T1 v1, T2 v2, T3 v3, T4 v4) : v1_(v1), v2_(v2), v3_(v3),
+ v4_(v4) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray4& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+class ValueArray5 {
+ public:
+ ValueArray5(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5) : v1_(v1), v2_(v2), v3_(v3),
+ v4_(v4), v5_(v5) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray5& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+class ValueArray6 {
+ public:
+ ValueArray6(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6) : v1_(v1), v2_(v2),
+ v3_(v3), v4_(v4), v5_(v5), v6_(v6) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray6& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+class ValueArray7 {
+ public:
+ ValueArray7(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7) : v1_(v1),
+ v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray7& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+class ValueArray8 {
+ public:
+ ValueArray8(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+ T8 v8) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray8& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+class ValueArray9 {
+ public:
+ ValueArray9(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,
+ T9 v9) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray9& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+class ValueArray10 {
+ public:
+ ValueArray10(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray10& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11>
+class ValueArray11 {
+ public:
+ ValueArray11(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
+ v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray11& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12>
+class ValueArray12 {
+ public:
+ ValueArray12(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
+ v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray12& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13>
+class ValueArray13 {
+ public:
+ ValueArray13(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
+ v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
+ v12_(v12), v13_(v13) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray13& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14>
+class ValueArray14 {
+ public:
+ ValueArray14(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14) : v1_(v1), v2_(v2), v3_(v3),
+ v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray14& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15>
+class ValueArray15 {
+ public:
+ ValueArray15(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15) : v1_(v1), v2_(v2),
+ v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray15& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16>
+class ValueArray16 {
+ public:
+ ValueArray16(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16) : v1_(v1),
+ v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
+ v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
+ v16_(v16) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray16& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17>
+class ValueArray17 {
+ public:
+ ValueArray17(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16,
+ T17 v17) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray17& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18>
+class ValueArray18 {
+ public:
+ ValueArray18(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray18& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19>
+class ValueArray19 {
+ public:
+ ValueArray19(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
+ v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),
+ v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray19& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20>
+class ValueArray20 {
+ public:
+ ValueArray20(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
+ v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),
+ v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),
+ v19_(v19), v20_(v20) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray20& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21>
+class ValueArray21 {
+ public:
+ ValueArray21(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
+ v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
+ v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),
+ v18_(v18), v19_(v19), v20_(v20), v21_(v21) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray21& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22>
+class ValueArray22 {
+ public:
+ ValueArray22(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22) : v1_(v1), v2_(v2), v3_(v3),
+ v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray22& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23>
+class ValueArray23 {
+ public:
+ ValueArray23(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23) : v1_(v1), v2_(v2),
+ v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+ v23_(v23) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_,
+ v23_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray23& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24>
+class ValueArray24 {
+ public:
+ ValueArray24(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24) : v1_(v1),
+ v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
+ v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
+ v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),
+ v22_(v22), v23_(v23), v24_(v24) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray24& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25>
+class ValueArray25 {
+ public:
+ ValueArray25(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24,
+ T25 v25) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray25& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26>
+class ValueArray26 {
+ public:
+ ValueArray26(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray26& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27>
+class ValueArray27 {
+ public:
+ ValueArray27(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
+ v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),
+ v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19),
+ v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25),
+ v26_(v26), v27_(v27) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray27& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28>
+class ValueArray28 {
+ public:
+ ValueArray28(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
+ v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),
+ v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),
+ v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24),
+ v25_(v25), v26_(v26), v27_(v27), v28_(v28) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray28& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29>
+class ValueArray29 {
+ public:
+ ValueArray29(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
+ v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
+ v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),
+ v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23),
+ v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray29& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30>
+class ValueArray30 {
+ public:
+ ValueArray30(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30) : v1_(v1), v2_(v2), v3_(v3),
+ v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+ v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
+ v29_(v29), v30_(v30) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray30& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31>
+class ValueArray31 {
+ public:
+ ValueArray31(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31) : v1_(v1), v2_(v2),
+ v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+ v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
+ v29_(v29), v30_(v30), v31_(v31) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray31& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32>
+class ValueArray32 {
+ public:
+ ValueArray32(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32) : v1_(v1),
+ v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
+ v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
+ v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),
+ v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27),
+ v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray32& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33>
+class ValueArray33 {
+ public:
+ ValueArray33(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32,
+ T33 v33) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
+ v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
+ v33_(v33) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray33& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34>
+class ValueArray34 {
+ public:
+ ValueArray34(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
+ v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
+ v33_(v33), v34_(v34) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray34& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35>
+class ValueArray35 {
+ public:
+ ValueArray35(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
+ v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),
+ v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19),
+ v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25),
+ v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31),
+ v32_(v32), v33_(v33), v34_(v34), v35_(v35) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_,
+ v35_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray35& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36>
+class ValueArray36 {
+ public:
+ ValueArray36(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
+ v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),
+ v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),
+ v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24),
+ v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30),
+ v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
+ v36_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray36& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37>
+class ValueArray37 {
+ public:
+ ValueArray37(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
+ v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
+ v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),
+ v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23),
+ v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29),
+ v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35),
+ v36_(v36), v37_(v37) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
+ v36_, v37_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray37& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38>
+class ValueArray38 {
+ public:
+ ValueArray38(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38) : v1_(v1), v2_(v2), v3_(v3),
+ v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+ v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
+ v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),
+ v35_(v35), v36_(v36), v37_(v37), v38_(v38) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
+ v36_, v37_, v38_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray38& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39>
+class ValueArray39 {
+ public:
+ ValueArray39(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39) : v1_(v1), v2_(v2),
+ v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+ v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
+ v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),
+ v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
+ v36_, v37_, v38_, v39_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray39& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40>
+class ValueArray40 {
+ public:
+ ValueArray40(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40) : v1_(v1),
+ v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
+ v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
+ v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),
+ v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27),
+ v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33),
+ v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39),
+ v40_(v40) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
+ v36_, v37_, v38_, v39_, v40_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray40& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41>
+class ValueArray41 {
+ public:
+ ValueArray41(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40,
+ T41 v41) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
+ v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
+ v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),
+ v39_(v39), v40_(v40), v41_(v41) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
+ v36_, v37_, v38_, v39_, v40_, v41_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray41& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42>
+class ValueArray42 {
+ public:
+ ValueArray42(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
+ v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
+ v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),
+ v39_(v39), v40_(v40), v41_(v41), v42_(v42) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
+ v36_, v37_, v38_, v39_, v40_, v41_, v42_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray42& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43>
+class ValueArray43 {
+ public:
+ ValueArray43(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
+ v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),
+ v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19),
+ v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25),
+ v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31),
+ v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37),
+ v38_(v38), v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
+ v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray43& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44>
+class ValueArray44 {
+ public:
+ ValueArray44(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
+ v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),
+ v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),
+ v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24),
+ v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30),
+ v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36),
+ v37_(v37), v38_(v38), v39_(v39), v40_(v40), v41_(v41), v42_(v42),
+ v43_(v43), v44_(v44) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
+ v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray44& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+ const T44 v44_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45>
+class ValueArray45 {
+ public:
+ ValueArray45(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44, T45 v45) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
+ v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
+ v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),
+ v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23),
+ v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29),
+ v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35),
+ v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40), v41_(v41),
+ v42_(v42), v43_(v43), v44_(v44), v45_(v45) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
+ v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray45& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+ const T44 v44_;
+ const T45 v45_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46>
+class ValueArray46 {
+ public:
+ ValueArray46(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44, T45 v45, T46 v46) : v1_(v1), v2_(v2), v3_(v3),
+ v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+ v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
+ v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),
+ v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40),
+ v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), v46_(v46) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
+ v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_, v46_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray46& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+ const T44 v44_;
+ const T45 v45_;
+ const T46 v46_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47>
+class ValueArray47 {
+ public:
+ ValueArray47(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47) : v1_(v1), v2_(v2),
+ v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+ v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
+ v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),
+ v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40),
+ v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), v46_(v46),
+ v47_(v47) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
+ v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_, v46_,
+ v47_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray47& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+ const T44 v44_;
+ const T45 v45_;
+ const T46 v46_;
+ const T47 v47_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48>
+class ValueArray48 {
+ public:
+ ValueArray48(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48) : v1_(v1),
+ v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
+ v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
+ v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),
+ v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27),
+ v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33),
+ v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39),
+ v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45),
+ v46_(v46), v47_(v47), v48_(v48) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
+ v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_, v46_, v47_,
+ v48_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray48& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+ const T44 v44_;
+ const T45 v45_;
+ const T46 v46_;
+ const T47 v47_;
+ const T48 v48_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49>
+class ValueArray49 {
+ public:
+ ValueArray49(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48,
+ T49 v49) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
+ v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
+ v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),
+ v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44),
+ v45_(v45), v46_(v46), v47_(v47), v48_(v48), v49_(v49) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
+ v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_, v46_, v47_,
+ v48_, v49_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray49& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+ const T44 v44_;
+ const T45 v45_;
+ const T46 v46_;
+ const T47 v47_;
+ const T48 v48_;
+ const T49 v49_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49, typename T50>
+class ValueArray50 {
+ public:
+ ValueArray50(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48, T49 v49,
+ T50 v50) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
+ v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
+ v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),
+ v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44),
+ v45_(v45), v46_(v46), v47_(v47), v48_(v48), v49_(v49), v50_(v50) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
+ v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_, v46_, v47_,
+ v48_, v49_, v50_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray50& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+ const T44 v44_;
+ const T45 v45_;
+ const T46 v46_;
+ const T47 v47_;
+ const T48 v48_;
+ const T49 v49_;
+ const T50 v50_;
+};
+
+#if GTEST_HAS_COMBINE
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Generates values from the Cartesian product of values produced
+// by the argument generators.
+//
+template <typename T1, typename T2>
+class CartesianProductGenerator2
+ : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2> > {
+ public:
+ typedef ::std::tr1::tuple<T1, T2> ParamType;
+
+ CartesianProductGenerator2(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2)
+ : g1_(g1), g2_(g2) {}
+ virtual ~CartesianProductGenerator2() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current2_;
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator2::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator2& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+}; // class CartesianProductGenerator2
+
+
+template <typename T1, typename T2, typename T3>
+class CartesianProductGenerator3
+ : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3> > {
+ public:
+ typedef ::std::tr1::tuple<T1, T2, T3> ParamType;
+
+ CartesianProductGenerator3(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3)
+ : g1_(g1), g2_(g2), g3_(g3) {}
+ virtual ~CartesianProductGenerator3() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current3_;
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator3::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator3& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+}; // class CartesianProductGenerator3
+
+
+template <typename T1, typename T2, typename T3, typename T4>
+class CartesianProductGenerator4
+ : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3, T4> > {
+ public:
+ typedef ::std::tr1::tuple<T1, T2, T3, T4> ParamType;
+
+ CartesianProductGenerator4(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+ const ParamGenerator<T4>& g4)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4) {}
+ virtual ~CartesianProductGenerator4() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin(), g4_, g4_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+ g4_, g4_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3,
+ const ParamGenerator<T4>& g4,
+ const typename ParamGenerator<T4>::iterator& current4)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+ begin4_(g4.begin()), end4_(g4.end()), current4_(current4) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current4_;
+ if (current4_ == end4_) {
+ current4_ = begin4_;
+ ++current3_;
+ }
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_ &&
+ current4_ == typed_other->current4_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_),
+ begin4_(other.begin4_),
+ end4_(other.end4_),
+ current4_(other.current4_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_,
+ *current4_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_ ||
+ current4_ == end4_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ const typename ParamGenerator<T4>::iterator begin4_;
+ const typename ParamGenerator<T4>::iterator end4_;
+ typename ParamGenerator<T4>::iterator current4_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator4::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator4& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+ const ParamGenerator<T4> g4_;
+}; // class CartesianProductGenerator4
+
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+class CartesianProductGenerator5
+ : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3, T4, T5> > {
+ public:
+ typedef ::std::tr1::tuple<T1, T2, T3, T4, T5> ParamType;
+
+ CartesianProductGenerator5(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+ const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5) {}
+ virtual ~CartesianProductGenerator5() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+ g4_, g4_.end(), g5_, g5_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3,
+ const ParamGenerator<T4>& g4,
+ const typename ParamGenerator<T4>::iterator& current4,
+ const ParamGenerator<T5>& g5,
+ const typename ParamGenerator<T5>::iterator& current5)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+ begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
+ begin5_(g5.begin()), end5_(g5.end()), current5_(current5) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current5_;
+ if (current5_ == end5_) {
+ current5_ = begin5_;
+ ++current4_;
+ }
+ if (current4_ == end4_) {
+ current4_ = begin4_;
+ ++current3_;
+ }
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_ &&
+ current4_ == typed_other->current4_ &&
+ current5_ == typed_other->current5_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_),
+ begin4_(other.begin4_),
+ end4_(other.end4_),
+ current4_(other.current4_),
+ begin5_(other.begin5_),
+ end5_(other.end5_),
+ current5_(other.current5_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_,
+ *current4_, *current5_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_ ||
+ current4_ == end4_ ||
+ current5_ == end5_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ const typename ParamGenerator<T4>::iterator begin4_;
+ const typename ParamGenerator<T4>::iterator end4_;
+ typename ParamGenerator<T4>::iterator current4_;
+ const typename ParamGenerator<T5>::iterator begin5_;
+ const typename ParamGenerator<T5>::iterator end5_;
+ typename ParamGenerator<T5>::iterator current5_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator5::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator5& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+ const ParamGenerator<T4> g4_;
+ const ParamGenerator<T5> g5_;
+}; // class CartesianProductGenerator5
+
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+class CartesianProductGenerator6
+ : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3, T4, T5,
+ T6> > {
+ public:
+ typedef ::std::tr1::tuple<T1, T2, T3, T4, T5, T6> ParamType;
+
+ CartesianProductGenerator6(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+ const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
+ const ParamGenerator<T6>& g6)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6) {}
+ virtual ~CartesianProductGenerator6() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+ g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3,
+ const ParamGenerator<T4>& g4,
+ const typename ParamGenerator<T4>::iterator& current4,
+ const ParamGenerator<T5>& g5,
+ const typename ParamGenerator<T5>::iterator& current5,
+ const ParamGenerator<T6>& g6,
+ const typename ParamGenerator<T6>::iterator& current6)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+ begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
+ begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
+ begin6_(g6.begin()), end6_(g6.end()), current6_(current6) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current6_;
+ if (current6_ == end6_) {
+ current6_ = begin6_;
+ ++current5_;
+ }
+ if (current5_ == end5_) {
+ current5_ = begin5_;
+ ++current4_;
+ }
+ if (current4_ == end4_) {
+ current4_ = begin4_;
+ ++current3_;
+ }
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_ &&
+ current4_ == typed_other->current4_ &&
+ current5_ == typed_other->current5_ &&
+ current6_ == typed_other->current6_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_),
+ begin4_(other.begin4_),
+ end4_(other.end4_),
+ current4_(other.current4_),
+ begin5_(other.begin5_),
+ end5_(other.end5_),
+ current5_(other.current5_),
+ begin6_(other.begin6_),
+ end6_(other.end6_),
+ current6_(other.current6_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_,
+ *current4_, *current5_, *current6_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_ ||
+ current4_ == end4_ ||
+ current5_ == end5_ ||
+ current6_ == end6_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ const typename ParamGenerator<T4>::iterator begin4_;
+ const typename ParamGenerator<T4>::iterator end4_;
+ typename ParamGenerator<T4>::iterator current4_;
+ const typename ParamGenerator<T5>::iterator begin5_;
+ const typename ParamGenerator<T5>::iterator end5_;
+ typename ParamGenerator<T5>::iterator current5_;
+ const typename ParamGenerator<T6>::iterator begin6_;
+ const typename ParamGenerator<T6>::iterator end6_;
+ typename ParamGenerator<T6>::iterator current6_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator6::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator6& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+ const ParamGenerator<T4> g4_;
+ const ParamGenerator<T5> g5_;
+ const ParamGenerator<T6> g6_;
+}; // class CartesianProductGenerator6
+
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+class CartesianProductGenerator7
+ : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6,
+ T7> > {
+ public:
+ typedef ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7> ParamType;
+
+ CartesianProductGenerator7(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+ const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
+ const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7) {}
+ virtual ~CartesianProductGenerator7() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,
+ g7_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+ g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3,
+ const ParamGenerator<T4>& g4,
+ const typename ParamGenerator<T4>::iterator& current4,
+ const ParamGenerator<T5>& g5,
+ const typename ParamGenerator<T5>::iterator& current5,
+ const ParamGenerator<T6>& g6,
+ const typename ParamGenerator<T6>::iterator& current6,
+ const ParamGenerator<T7>& g7,
+ const typename ParamGenerator<T7>::iterator& current7)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+ begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
+ begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
+ begin6_(g6.begin()), end6_(g6.end()), current6_(current6),
+ begin7_(g7.begin()), end7_(g7.end()), current7_(current7) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current7_;
+ if (current7_ == end7_) {
+ current7_ = begin7_;
+ ++current6_;
+ }
+ if (current6_ == end6_) {
+ current6_ = begin6_;
+ ++current5_;
+ }
+ if (current5_ == end5_) {
+ current5_ = begin5_;
+ ++current4_;
+ }
+ if (current4_ == end4_) {
+ current4_ = begin4_;
+ ++current3_;
+ }
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_ &&
+ current4_ == typed_other->current4_ &&
+ current5_ == typed_other->current5_ &&
+ current6_ == typed_other->current6_ &&
+ current7_ == typed_other->current7_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_),
+ begin4_(other.begin4_),
+ end4_(other.end4_),
+ current4_(other.current4_),
+ begin5_(other.begin5_),
+ end5_(other.end5_),
+ current5_(other.current5_),
+ begin6_(other.begin6_),
+ end6_(other.end6_),
+ current6_(other.current6_),
+ begin7_(other.begin7_),
+ end7_(other.end7_),
+ current7_(other.current7_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_,
+ *current4_, *current5_, *current6_, *current7_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_ ||
+ current4_ == end4_ ||
+ current5_ == end5_ ||
+ current6_ == end6_ ||
+ current7_ == end7_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ const typename ParamGenerator<T4>::iterator begin4_;
+ const typename ParamGenerator<T4>::iterator end4_;
+ typename ParamGenerator<T4>::iterator current4_;
+ const typename ParamGenerator<T5>::iterator begin5_;
+ const typename ParamGenerator<T5>::iterator end5_;
+ typename ParamGenerator<T5>::iterator current5_;
+ const typename ParamGenerator<T6>::iterator begin6_;
+ const typename ParamGenerator<T6>::iterator end6_;
+ typename ParamGenerator<T6>::iterator current6_;
+ const typename ParamGenerator<T7>::iterator begin7_;
+ const typename ParamGenerator<T7>::iterator end7_;
+ typename ParamGenerator<T7>::iterator current7_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator7::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator7& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+ const ParamGenerator<T4> g4_;
+ const ParamGenerator<T5> g5_;
+ const ParamGenerator<T6> g6_;
+ const ParamGenerator<T7> g7_;
+}; // class CartesianProductGenerator7
+
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+class CartesianProductGenerator8
+ : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6,
+ T7, T8> > {
+ public:
+ typedef ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8> ParamType;
+
+ CartesianProductGenerator8(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+ const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
+ const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7,
+ const ParamGenerator<T8>& g8)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7),
+ g8_(g8) {}
+ virtual ~CartesianProductGenerator8() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,
+ g7_.begin(), g8_, g8_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+ g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_,
+ g8_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3,
+ const ParamGenerator<T4>& g4,
+ const typename ParamGenerator<T4>::iterator& current4,
+ const ParamGenerator<T5>& g5,
+ const typename ParamGenerator<T5>::iterator& current5,
+ const ParamGenerator<T6>& g6,
+ const typename ParamGenerator<T6>::iterator& current6,
+ const ParamGenerator<T7>& g7,
+ const typename ParamGenerator<T7>::iterator& current7,
+ const ParamGenerator<T8>& g8,
+ const typename ParamGenerator<T8>::iterator& current8)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+ begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
+ begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
+ begin6_(g6.begin()), end6_(g6.end()), current6_(current6),
+ begin7_(g7.begin()), end7_(g7.end()), current7_(current7),
+ begin8_(g8.begin()), end8_(g8.end()), current8_(current8) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current8_;
+ if (current8_ == end8_) {
+ current8_ = begin8_;
+ ++current7_;
+ }
+ if (current7_ == end7_) {
+ current7_ = begin7_;
+ ++current6_;
+ }
+ if (current6_ == end6_) {
+ current6_ = begin6_;
+ ++current5_;
+ }
+ if (current5_ == end5_) {
+ current5_ = begin5_;
+ ++current4_;
+ }
+ if (current4_ == end4_) {
+ current4_ = begin4_;
+ ++current3_;
+ }
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_ &&
+ current4_ == typed_other->current4_ &&
+ current5_ == typed_other->current5_ &&
+ current6_ == typed_other->current6_ &&
+ current7_ == typed_other->current7_ &&
+ current8_ == typed_other->current8_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_),
+ begin4_(other.begin4_),
+ end4_(other.end4_),
+ current4_(other.current4_),
+ begin5_(other.begin5_),
+ end5_(other.end5_),
+ current5_(other.current5_),
+ begin6_(other.begin6_),
+ end6_(other.end6_),
+ current6_(other.current6_),
+ begin7_(other.begin7_),
+ end7_(other.end7_),
+ current7_(other.current7_),
+ begin8_(other.begin8_),
+ end8_(other.end8_),
+ current8_(other.current8_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_,
+ *current4_, *current5_, *current6_, *current7_, *current8_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_ ||
+ current4_ == end4_ ||
+ current5_ == end5_ ||
+ current6_ == end6_ ||
+ current7_ == end7_ ||
+ current8_ == end8_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ const typename ParamGenerator<T4>::iterator begin4_;
+ const typename ParamGenerator<T4>::iterator end4_;
+ typename ParamGenerator<T4>::iterator current4_;
+ const typename ParamGenerator<T5>::iterator begin5_;
+ const typename ParamGenerator<T5>::iterator end5_;
+ typename ParamGenerator<T5>::iterator current5_;
+ const typename ParamGenerator<T6>::iterator begin6_;
+ const typename ParamGenerator<T6>::iterator end6_;
+ typename ParamGenerator<T6>::iterator current6_;
+ const typename ParamGenerator<T7>::iterator begin7_;
+ const typename ParamGenerator<T7>::iterator end7_;
+ typename ParamGenerator<T7>::iterator current7_;
+ const typename ParamGenerator<T8>::iterator begin8_;
+ const typename ParamGenerator<T8>::iterator end8_;
+ typename ParamGenerator<T8>::iterator current8_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator8::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator8& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+ const ParamGenerator<T4> g4_;
+ const ParamGenerator<T5> g5_;
+ const ParamGenerator<T6> g6_;
+ const ParamGenerator<T7> g7_;
+ const ParamGenerator<T8> g8_;
+}; // class CartesianProductGenerator8
+
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+class CartesianProductGenerator9
+ : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6,
+ T7, T8, T9> > {
+ public:
+ typedef ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9> ParamType;
+
+ CartesianProductGenerator9(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+ const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
+ const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7,
+ const ParamGenerator<T8>& g8, const ParamGenerator<T9>& g9)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),
+ g9_(g9) {}
+ virtual ~CartesianProductGenerator9() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,
+ g7_.begin(), g8_, g8_.begin(), g9_, g9_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+ g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_,
+ g8_.end(), g9_, g9_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3,
+ const ParamGenerator<T4>& g4,
+ const typename ParamGenerator<T4>::iterator& current4,
+ const ParamGenerator<T5>& g5,
+ const typename ParamGenerator<T5>::iterator& current5,
+ const ParamGenerator<T6>& g6,
+ const typename ParamGenerator<T6>::iterator& current6,
+ const ParamGenerator<T7>& g7,
+ const typename ParamGenerator<T7>::iterator& current7,
+ const ParamGenerator<T8>& g8,
+ const typename ParamGenerator<T8>::iterator& current8,
+ const ParamGenerator<T9>& g9,
+ const typename ParamGenerator<T9>::iterator& current9)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+ begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
+ begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
+ begin6_(g6.begin()), end6_(g6.end()), current6_(current6),
+ begin7_(g7.begin()), end7_(g7.end()), current7_(current7),
+ begin8_(g8.begin()), end8_(g8.end()), current8_(current8),
+ begin9_(g9.begin()), end9_(g9.end()), current9_(current9) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current9_;
+ if (current9_ == end9_) {
+ current9_ = begin9_;
+ ++current8_;
+ }
+ if (current8_ == end8_) {
+ current8_ = begin8_;
+ ++current7_;
+ }
+ if (current7_ == end7_) {
+ current7_ = begin7_;
+ ++current6_;
+ }
+ if (current6_ == end6_) {
+ current6_ = begin6_;
+ ++current5_;
+ }
+ if (current5_ == end5_) {
+ current5_ = begin5_;
+ ++current4_;
+ }
+ if (current4_ == end4_) {
+ current4_ = begin4_;
+ ++current3_;
+ }
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_ &&
+ current4_ == typed_other->current4_ &&
+ current5_ == typed_other->current5_ &&
+ current6_ == typed_other->current6_ &&
+ current7_ == typed_other->current7_ &&
+ current8_ == typed_other->current8_ &&
+ current9_ == typed_other->current9_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_),
+ begin4_(other.begin4_),
+ end4_(other.end4_),
+ current4_(other.current4_),
+ begin5_(other.begin5_),
+ end5_(other.end5_),
+ current5_(other.current5_),
+ begin6_(other.begin6_),
+ end6_(other.end6_),
+ current6_(other.current6_),
+ begin7_(other.begin7_),
+ end7_(other.end7_),
+ current7_(other.current7_),
+ begin8_(other.begin8_),
+ end8_(other.end8_),
+ current8_(other.current8_),
+ begin9_(other.begin9_),
+ end9_(other.end9_),
+ current9_(other.current9_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_,
+ *current4_, *current5_, *current6_, *current7_, *current8_,
+ *current9_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_ ||
+ current4_ == end4_ ||
+ current5_ == end5_ ||
+ current6_ == end6_ ||
+ current7_ == end7_ ||
+ current8_ == end8_ ||
+ current9_ == end9_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ const typename ParamGenerator<T4>::iterator begin4_;
+ const typename ParamGenerator<T4>::iterator end4_;
+ typename ParamGenerator<T4>::iterator current4_;
+ const typename ParamGenerator<T5>::iterator begin5_;
+ const typename ParamGenerator<T5>::iterator end5_;
+ typename ParamGenerator<T5>::iterator current5_;
+ const typename ParamGenerator<T6>::iterator begin6_;
+ const typename ParamGenerator<T6>::iterator end6_;
+ typename ParamGenerator<T6>::iterator current6_;
+ const typename ParamGenerator<T7>::iterator begin7_;
+ const typename ParamGenerator<T7>::iterator end7_;
+ typename ParamGenerator<T7>::iterator current7_;
+ const typename ParamGenerator<T8>::iterator begin8_;
+ const typename ParamGenerator<T8>::iterator end8_;
+ typename ParamGenerator<T8>::iterator current8_;
+ const typename ParamGenerator<T9>::iterator begin9_;
+ const typename ParamGenerator<T9>::iterator end9_;
+ typename ParamGenerator<T9>::iterator current9_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator9::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator9& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+ const ParamGenerator<T4> g4_;
+ const ParamGenerator<T5> g5_;
+ const ParamGenerator<T6> g6_;
+ const ParamGenerator<T7> g7_;
+ const ParamGenerator<T8> g8_;
+ const ParamGenerator<T9> g9_;
+}; // class CartesianProductGenerator9
+
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+class CartesianProductGenerator10
+ : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6,
+ T7, T8, T9, T10> > {
+ public:
+ typedef ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> ParamType;
+
+ CartesianProductGenerator10(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+ const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
+ const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7,
+ const ParamGenerator<T8>& g8, const ParamGenerator<T9>& g9,
+ const ParamGenerator<T10>& g10)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),
+ g9_(g9), g10_(g10) {}
+ virtual ~CartesianProductGenerator10() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,
+ g7_.begin(), g8_, g8_.begin(), g9_, g9_.begin(), g10_, g10_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+ g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_,
+ g8_.end(), g9_, g9_.end(), g10_, g10_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3,
+ const ParamGenerator<T4>& g4,
+ const typename ParamGenerator<T4>::iterator& current4,
+ const ParamGenerator<T5>& g5,
+ const typename ParamGenerator<T5>::iterator& current5,
+ const ParamGenerator<T6>& g6,
+ const typename ParamGenerator<T6>::iterator& current6,
+ const ParamGenerator<T7>& g7,
+ const typename ParamGenerator<T7>::iterator& current7,
+ const ParamGenerator<T8>& g8,
+ const typename ParamGenerator<T8>::iterator& current8,
+ const ParamGenerator<T9>& g9,
+ const typename ParamGenerator<T9>::iterator& current9,
+ const ParamGenerator<T10>& g10,
+ const typename ParamGenerator<T10>::iterator& current10)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+ begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
+ begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
+ begin6_(g6.begin()), end6_(g6.end()), current6_(current6),
+ begin7_(g7.begin()), end7_(g7.end()), current7_(current7),
+ begin8_(g8.begin()), end8_(g8.end()), current8_(current8),
+ begin9_(g9.begin()), end9_(g9.end()), current9_(current9),
+ begin10_(g10.begin()), end10_(g10.end()), current10_(current10) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current10_;
+ if (current10_ == end10_) {
+ current10_ = begin10_;
+ ++current9_;
+ }
+ if (current9_ == end9_) {
+ current9_ = begin9_;
+ ++current8_;
+ }
+ if (current8_ == end8_) {
+ current8_ = begin8_;
+ ++current7_;
+ }
+ if (current7_ == end7_) {
+ current7_ = begin7_;
+ ++current6_;
+ }
+ if (current6_ == end6_) {
+ current6_ = begin6_;
+ ++current5_;
+ }
+ if (current5_ == end5_) {
+ current5_ = begin5_;
+ ++current4_;
+ }
+ if (current4_ == end4_) {
+ current4_ = begin4_;
+ ++current3_;
+ }
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_ &&
+ current4_ == typed_other->current4_ &&
+ current5_ == typed_other->current5_ &&
+ current6_ == typed_other->current6_ &&
+ current7_ == typed_other->current7_ &&
+ current8_ == typed_other->current8_ &&
+ current9_ == typed_other->current9_ &&
+ current10_ == typed_other->current10_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_),
+ begin4_(other.begin4_),
+ end4_(other.end4_),
+ current4_(other.current4_),
+ begin5_(other.begin5_),
+ end5_(other.end5_),
+ current5_(other.current5_),
+ begin6_(other.begin6_),
+ end6_(other.end6_),
+ current6_(other.current6_),
+ begin7_(other.begin7_),
+ end7_(other.end7_),
+ current7_(other.current7_),
+ begin8_(other.begin8_),
+ end8_(other.end8_),
+ current8_(other.current8_),
+ begin9_(other.begin9_),
+ end9_(other.end9_),
+ current9_(other.current9_),
+ begin10_(other.begin10_),
+ end10_(other.end10_),
+ current10_(other.current10_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_,
+ *current4_, *current5_, *current6_, *current7_, *current8_,
+ *current9_, *current10_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_ ||
+ current4_ == end4_ ||
+ current5_ == end5_ ||
+ current6_ == end6_ ||
+ current7_ == end7_ ||
+ current8_ == end8_ ||
+ current9_ == end9_ ||
+ current10_ == end10_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ const typename ParamGenerator<T4>::iterator begin4_;
+ const typename ParamGenerator<T4>::iterator end4_;
+ typename ParamGenerator<T4>::iterator current4_;
+ const typename ParamGenerator<T5>::iterator begin5_;
+ const typename ParamGenerator<T5>::iterator end5_;
+ typename ParamGenerator<T5>::iterator current5_;
+ const typename ParamGenerator<T6>::iterator begin6_;
+ const typename ParamGenerator<T6>::iterator end6_;
+ typename ParamGenerator<T6>::iterator current6_;
+ const typename ParamGenerator<T7>::iterator begin7_;
+ const typename ParamGenerator<T7>::iterator end7_;
+ typename ParamGenerator<T7>::iterator current7_;
+ const typename ParamGenerator<T8>::iterator begin8_;
+ const typename ParamGenerator<T8>::iterator end8_;
+ typename ParamGenerator<T8>::iterator current8_;
+ const typename ParamGenerator<T9>::iterator begin9_;
+ const typename ParamGenerator<T9>::iterator end9_;
+ typename ParamGenerator<T9>::iterator current9_;
+ const typename ParamGenerator<T10>::iterator begin10_;
+ const typename ParamGenerator<T10>::iterator end10_;
+ typename ParamGenerator<T10>::iterator current10_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator10::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator10& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+ const ParamGenerator<T4> g4_;
+ const ParamGenerator<T5> g5_;
+ const ParamGenerator<T6> g6_;
+ const ParamGenerator<T7> g7_;
+ const ParamGenerator<T8> g8_;
+ const ParamGenerator<T9> g9_;
+ const ParamGenerator<T10> g10_;
+}; // class CartesianProductGenerator10
+
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Helper classes providing Combine() with polymorphic features. They allow
+// casting CartesianProductGeneratorN<T> to ParamGenerator<U> if T is
+// convertible to U.
+//
+template <class Generator1, class Generator2>
+class CartesianProductHolder2 {
+ public:
+CartesianProductHolder2(const Generator1& g1, const Generator2& g2)
+ : g1_(g1), g2_(g2) {}
+ template <typename T1, typename T2>
+ operator ParamGenerator< ::std::tr1::tuple<T1, T2> >() const {
+ return ParamGenerator< ::std::tr1::tuple<T1, T2> >(
+ new CartesianProductGenerator2<T1, T2>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder2& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+}; // class CartesianProductHolder2
+
+template <class Generator1, class Generator2, class Generator3>
+class CartesianProductHolder3 {
+ public:
+CartesianProductHolder3(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3)
+ : g1_(g1), g2_(g2), g3_(g3) {}
+ template <typename T1, typename T2, typename T3>
+ operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3> >() const {
+ return ParamGenerator< ::std::tr1::tuple<T1, T2, T3> >(
+ new CartesianProductGenerator3<T1, T2, T3>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder3& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+}; // class CartesianProductHolder3
+
+template <class Generator1, class Generator2, class Generator3,
+ class Generator4>
+class CartesianProductHolder4 {
+ public:
+CartesianProductHolder4(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3, const Generator4& g4)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4) {}
+ template <typename T1, typename T2, typename T3, typename T4>
+ operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4> >() const {
+ return ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4> >(
+ new CartesianProductGenerator4<T1, T2, T3, T4>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_),
+ static_cast<ParamGenerator<T4> >(g4_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder4& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+ const Generator4 g4_;
+}; // class CartesianProductHolder4
+
+template <class Generator1, class Generator2, class Generator3,
+ class Generator4, class Generator5>
+class CartesianProductHolder5 {
+ public:
+CartesianProductHolder5(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3, const Generator4& g4, const Generator5& g5)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5) {}
+ template <typename T1, typename T2, typename T3, typename T4, typename T5>
+ operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5> >() const {
+ return ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5> >(
+ new CartesianProductGenerator5<T1, T2, T3, T4, T5>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_),
+ static_cast<ParamGenerator<T4> >(g4_),
+ static_cast<ParamGenerator<T5> >(g5_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder5& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+ const Generator4 g4_;
+ const Generator5 g5_;
+}; // class CartesianProductHolder5
+
+template <class Generator1, class Generator2, class Generator3,
+ class Generator4, class Generator5, class Generator6>
+class CartesianProductHolder6 {
+ public:
+CartesianProductHolder6(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3, const Generator4& g4, const Generator5& g5,
+ const Generator6& g6)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6) {}
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+ operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6> >() const {
+ return ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6> >(
+ new CartesianProductGenerator6<T1, T2, T3, T4, T5, T6>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_),
+ static_cast<ParamGenerator<T4> >(g4_),
+ static_cast<ParamGenerator<T5> >(g5_),
+ static_cast<ParamGenerator<T6> >(g6_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder6& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+ const Generator4 g4_;
+ const Generator5 g5_;
+ const Generator6 g6_;
+}; // class CartesianProductHolder6
+
+template <class Generator1, class Generator2, class Generator3,
+ class Generator4, class Generator5, class Generator6, class Generator7>
+class CartesianProductHolder7 {
+ public:
+CartesianProductHolder7(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3, const Generator4& g4, const Generator5& g5,
+ const Generator6& g6, const Generator7& g7)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7) {}
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+ operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6,
+ T7> >() const {
+ return ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7> >(
+ new CartesianProductGenerator7<T1, T2, T3, T4, T5, T6, T7>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_),
+ static_cast<ParamGenerator<T4> >(g4_),
+ static_cast<ParamGenerator<T5> >(g5_),
+ static_cast<ParamGenerator<T6> >(g6_),
+ static_cast<ParamGenerator<T7> >(g7_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder7& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+ const Generator4 g4_;
+ const Generator5 g5_;
+ const Generator6 g6_;
+ const Generator7 g7_;
+}; // class CartesianProductHolder7
+
+template <class Generator1, class Generator2, class Generator3,
+ class Generator4, class Generator5, class Generator6, class Generator7,
+ class Generator8>
+class CartesianProductHolder8 {
+ public:
+CartesianProductHolder8(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3, const Generator4& g4, const Generator5& g5,
+ const Generator6& g6, const Generator7& g7, const Generator8& g8)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7),
+ g8_(g8) {}
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+ operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7,
+ T8> >() const {
+ return ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8> >(
+ new CartesianProductGenerator8<T1, T2, T3, T4, T5, T6, T7, T8>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_),
+ static_cast<ParamGenerator<T4> >(g4_),
+ static_cast<ParamGenerator<T5> >(g5_),
+ static_cast<ParamGenerator<T6> >(g6_),
+ static_cast<ParamGenerator<T7> >(g7_),
+ static_cast<ParamGenerator<T8> >(g8_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder8& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+ const Generator4 g4_;
+ const Generator5 g5_;
+ const Generator6 g6_;
+ const Generator7 g7_;
+ const Generator8 g8_;
+}; // class CartesianProductHolder8
+
+template <class Generator1, class Generator2, class Generator3,
+ class Generator4, class Generator5, class Generator6, class Generator7,
+ class Generator8, class Generator9>
+class CartesianProductHolder9 {
+ public:
+CartesianProductHolder9(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3, const Generator4& g4, const Generator5& g5,
+ const Generator6& g6, const Generator7& g7, const Generator8& g8,
+ const Generator9& g9)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),
+ g9_(g9) {}
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+ operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8,
+ T9> >() const {
+ return ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8,
+ T9> >(
+ new CartesianProductGenerator9<T1, T2, T3, T4, T5, T6, T7, T8, T9>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_),
+ static_cast<ParamGenerator<T4> >(g4_),
+ static_cast<ParamGenerator<T5> >(g5_),
+ static_cast<ParamGenerator<T6> >(g6_),
+ static_cast<ParamGenerator<T7> >(g7_),
+ static_cast<ParamGenerator<T8> >(g8_),
+ static_cast<ParamGenerator<T9> >(g9_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder9& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+ const Generator4 g4_;
+ const Generator5 g5_;
+ const Generator6 g6_;
+ const Generator7 g7_;
+ const Generator8 g8_;
+ const Generator9 g9_;
+}; // class CartesianProductHolder9
+
+template <class Generator1, class Generator2, class Generator3,
+ class Generator4, class Generator5, class Generator6, class Generator7,
+ class Generator8, class Generator9, class Generator10>
+class CartesianProductHolder10 {
+ public:
+CartesianProductHolder10(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3, const Generator4& g4, const Generator5& g5,
+ const Generator6& g6, const Generator7& g7, const Generator8& g8,
+ const Generator9& g9, const Generator10& g10)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),
+ g9_(g9), g10_(g10) {}
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+ operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8,
+ T9, T10> >() const {
+ return ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8,
+ T9, T10> >(
+ new CartesianProductGenerator10<T1, T2, T3, T4, T5, T6, T7, T8, T9,
+ T10>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_),
+ static_cast<ParamGenerator<T4> >(g4_),
+ static_cast<ParamGenerator<T5> >(g5_),
+ static_cast<ParamGenerator<T6> >(g6_),
+ static_cast<ParamGenerator<T7> >(g7_),
+ static_cast<ParamGenerator<T8> >(g8_),
+ static_cast<ParamGenerator<T9> >(g9_),
+ static_cast<ParamGenerator<T10> >(g10_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder10& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+ const Generator4 g4_;
+ const Generator5 g5_;
+ const Generator6 g6_;
+ const Generator7 g7_;
+ const Generator8 g8_;
+ const Generator9 g9_;
+ const Generator10 g10_;
+}; // class CartesianProductHolder10
+
+#endif // GTEST_HAS_COMBINE
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_HAS_PARAM_TEST
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_
+
+#if GTEST_HAS_PARAM_TEST
+
+namespace testing {
+
+// Functions producing parameter generators.
+//
+// Google Test uses these generators to produce parameters for value-
+// parameterized tests. When a parameterized test case is instantiated
+// with a particular generator, Google Test creates and runs tests
+// for each element in the sequence produced by the generator.
+//
+// In the following sample, tests from test case FooTest are instantiated
+// each three times with parameter values 3, 5, and 8:
+//
+// class FooTest : public TestWithParam<int> { ... };
+//
+// TEST_P(FooTest, TestThis) {
+// }
+// TEST_P(FooTest, TestThat) {
+// }
+// INSTANTIATE_TEST_CASE_P(TestSequence, FooTest, Values(3, 5, 8));
+//
+
+// Range() returns generators providing sequences of values in a range.
+//
+// Synopsis:
+// Range(start, end)
+// - returns a generator producing a sequence of values {start, start+1,
+// start+2, ..., }.
+// Range(start, end, step)
+// - returns a generator producing a sequence of values {start, start+step,
+// start+step+step, ..., }.
+// Notes:
+// * The generated sequences never include end. For example, Range(1, 5)
+// returns a generator producing a sequence {1, 2, 3, 4}. Range(1, 9, 2)
+// returns a generator producing {1, 3, 5, 7}.
+// * start and end must have the same type. That type may be any integral or
+// floating-point type or a user defined type satisfying these conditions:
+// * It must be assignable (have operator=() defined).
+// * It must have operator+() (operator+(int-compatible type) for
+// two-operand version).
+// * It must have operator<() defined.
+// Elements in the resulting sequences will also have that type.
+// * Condition start < end must be satisfied in order for resulting sequences
+// to contain any elements.
+//
+template <typename T, typename IncrementT>
+internal::ParamGenerator<T> Range(T start, T end, IncrementT step) {
+ return internal::ParamGenerator<T>(
+ new internal::RangeGenerator<T, IncrementT>(start, end, step));
+}
+
+template <typename T>
+internal::ParamGenerator<T> Range(T start, T end) {
+ return Range(start, end, 1);
+}
+
+// ValuesIn() function allows generation of tests with parameters coming from
+// a container.
+//
+// Synopsis:
+// ValuesIn(const T (&array)[N])
+// - returns a generator producing sequences with elements from
+// a C-style array.
+// ValuesIn(const Container& container)
+// - returns a generator producing sequences with elements from
+// an STL-style container.
+// ValuesIn(Iterator begin, Iterator end)
+// - returns a generator producing sequences with elements from
+// a range [begin, end) defined by a pair of STL-style iterators. These
+// iterators can also be plain C pointers.
+//
+// Please note that ValuesIn copies the values from the containers
+// passed in and keeps them to generate tests in RUN_ALL_TESTS().
+//
+// Examples:
+//
+// This instantiates tests from test case StringTest
+// each with C-string values of "foo", "bar", and "baz":
+//
+// const char* strings[] = {"foo", "bar", "baz"};
+// INSTANTIATE_TEST_CASE_P(StringSequence, SrtingTest, ValuesIn(strings));
+//
+// This instantiates tests from test case StlStringTest
+// each with STL strings with values "a" and "b":
+//
+// ::std::vector< ::std::string> GetParameterStrings() {
+// ::std::vector< ::std::string> v;
+// v.push_back("a");
+// v.push_back("b");
+// return v;
+// }
+//
+// INSTANTIATE_TEST_CASE_P(CharSequence,
+// StlStringTest,
+// ValuesIn(GetParameterStrings()));
+//
+//
+// This will also instantiate tests from CharTest
+// each with parameter values 'a' and 'b':
+//
+// ::std::list<char> GetParameterChars() {
+// ::std::list<char> list;
+// list.push_back('a');
+// list.push_back('b');
+// return list;
+// }
+// ::std::list<char> l = GetParameterChars();
+// INSTANTIATE_TEST_CASE_P(CharSequence2,
+// CharTest,
+// ValuesIn(l.begin(), l.end()));
+//
+template <typename ForwardIterator>
+internal::ParamGenerator<
+ typename ::std::iterator_traits<ForwardIterator>::value_type> ValuesIn(
+ ForwardIterator begin,
+ ForwardIterator end) {
+ typedef typename ::std::iterator_traits<ForwardIterator>::value_type
+ ParamType;
+ return internal::ParamGenerator<ParamType>(
+ new internal::ValuesInIteratorRangeGenerator<ParamType>(begin, end));
+}
+
+template <typename T, size_t N>
+internal::ParamGenerator<T> ValuesIn(const T (&array)[N]) {
+ return ValuesIn(array, array + N);
+}
+
+template <class Container>
+internal::ParamGenerator<typename Container::value_type> ValuesIn(
+ const Container& container) {
+ return ValuesIn(container.begin(), container.end());
+}
+
+// Values() allows generating tests from explicitly specified list of
+// parameters.
+//
+// Synopsis:
+// Values(T v1, T v2, ..., T vN)
+// - returns a generator producing sequences with elements v1, v2, ..., vN.
+//
+// For example, this instantiates tests from test case BarTest each
+// with values "one", "two", and "three":
+//
+// INSTANTIATE_TEST_CASE_P(NumSequence, BarTest, Values("one", "two", "three"));
+//
+// This instantiates tests from test case BazTest each with values 1, 2, 3.5.
+// The exact type of values will depend on the type of parameter in BazTest.
+//
+// INSTANTIATE_TEST_CASE_P(FloatingNumbers, BazTest, Values(1, 2, 3.5));
+//
+// Currently, Values() supports from 1 to 50 parameters.
+//
+template <typename T1>
+internal::ValueArray1<T1> Values(T1 v1) {
+ return internal::ValueArray1<T1>(v1);
+}
+
+template <typename T1, typename T2>
+internal::ValueArray2<T1, T2> Values(T1 v1, T2 v2) {
+ return internal::ValueArray2<T1, T2>(v1, v2);
+}
+
+template <typename T1, typename T2, typename T3>
+internal::ValueArray3<T1, T2, T3> Values(T1 v1, T2 v2, T3 v3) {
+ return internal::ValueArray3<T1, T2, T3>(v1, v2, v3);
+}
+
+template <typename T1, typename T2, typename T3, typename T4>
+internal::ValueArray4<T1, T2, T3, T4> Values(T1 v1, T2 v2, T3 v3, T4 v4) {
+ return internal::ValueArray4<T1, T2, T3, T4>(v1, v2, v3, v4);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+internal::ValueArray5<T1, T2, T3, T4, T5> Values(T1 v1, T2 v2, T3 v3, T4 v4,
+ T5 v5) {
+ return internal::ValueArray5<T1, T2, T3, T4, T5>(v1, v2, v3, v4, v5);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+internal::ValueArray6<T1, T2, T3, T4, T5, T6> Values(T1 v1, T2 v2, T3 v3,
+ T4 v4, T5 v5, T6 v6) {
+ return internal::ValueArray6<T1, T2, T3, T4, T5, T6>(v1, v2, v3, v4, v5, v6);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+internal::ValueArray7<T1, T2, T3, T4, T5, T6, T7> Values(T1 v1, T2 v2, T3 v3,
+ T4 v4, T5 v5, T6 v6, T7 v7) {
+ return internal::ValueArray7<T1, T2, T3, T4, T5, T6, T7>(v1, v2, v3, v4, v5,
+ v6, v7);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+internal::ValueArray8<T1, T2, T3, T4, T5, T6, T7, T8> Values(T1 v1, T2 v2,
+ T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8) {
+ return internal::ValueArray8<T1, T2, T3, T4, T5, T6, T7, T8>(v1, v2, v3, v4,
+ v5, v6, v7, v8);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+internal::ValueArray9<T1, T2, T3, T4, T5, T6, T7, T8, T9> Values(T1 v1, T2 v2,
+ T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9) {
+ return internal::ValueArray9<T1, T2, T3, T4, T5, T6, T7, T8, T9>(v1, v2, v3,
+ v4, v5, v6, v7, v8, v9);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+internal::ValueArray10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> Values(T1 v1,
+ T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10) {
+ return internal::ValueArray10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>(v1,
+ v2, v3, v4, v5, v6, v7, v8, v9, v10);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11>
+internal::ValueArray11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10,
+ T11> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11) {
+ return internal::ValueArray11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10,
+ T11>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12>
+internal::ValueArray12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12) {
+ return internal::ValueArray12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13>
+internal::ValueArray13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13) {
+ return internal::ValueArray13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14>
+internal::ValueArray14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14) {
+ return internal::ValueArray14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13,
+ v14);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15>
+internal::ValueArray15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,
+ T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15) {
+ return internal::ValueArray15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12,
+ v13, v14, v15);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16>
+internal::ValueArray16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+ T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16) {
+ return internal::ValueArray16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11,
+ v12, v13, v14, v15, v16);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17>
+internal::ValueArray17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+ T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17) {
+ return internal::ValueArray17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10,
+ v11, v12, v13, v14, v15, v16, v17);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18>
+internal::ValueArray18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6,
+ T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17, T18 v18) {
+ return internal::ValueArray18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18>(v1, v2, v3, v4, v5, v6, v7, v8, v9,
+ v10, v11, v12, v13, v14, v15, v16, v17, v18);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19>
+internal::ValueArray19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5,
+ T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14,
+ T15 v15, T16 v16, T17 v17, T18 v18, T19 v19) {
+ return internal::ValueArray19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19>(v1, v2, v3, v4, v5, v6, v7, v8,
+ v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20>
+internal::ValueArray20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20> Values(T1 v1, T2 v2, T3 v3, T4 v4,
+ T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
+ T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20) {
+ return internal::ValueArray20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20>(v1, v2, v3, v4, v5, v6, v7,
+ v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21>
+internal::ValueArray21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21> Values(T1 v1, T2 v2, T3 v3, T4 v4,
+ T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
+ T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21) {
+ return internal::ValueArray21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21>(v1, v2, v3, v4, v5, v6,
+ v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22>
+internal::ValueArray22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22> Values(T1 v1, T2 v2, T3 v3,
+ T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
+ T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
+ T21 v21, T22 v22) {
+ return internal::ValueArray22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>(v1, v2, v3, v4,
+ v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
+ v20, v21, v22);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23>
+internal::ValueArray23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> Values(T1 v1, T2 v2,
+ T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
+ T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
+ T21 v21, T22 v22, T23 v23) {
+ return internal::ValueArray23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23>(v1, v2, v3,
+ v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
+ v20, v21, v22, v23);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24>
+internal::ValueArray24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> Values(T1 v1, T2 v2,
+ T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
+ T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
+ T21 v21, T22 v22, T23 v23, T24 v24) {
+ return internal::ValueArray24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>(v1, v2,
+ v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18,
+ v19, v20, v21, v22, v23, v24);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25>
+internal::ValueArray25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> Values(T1 v1,
+ T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11,
+ T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19,
+ T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25) {
+ return internal::ValueArray25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25>(v1,
+ v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17,
+ v18, v19, v20, v21, v22, v23, v24, v25);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26>
+internal::ValueArray26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26) {
+ return internal::ValueArray26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15,
+ v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27>
+internal::ValueArray27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27) {
+ return internal::ValueArray27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14,
+ v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28>
+internal::ValueArray28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28) {
+ return internal::ValueArray28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13,
+ v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27,
+ v28);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29>
+internal::ValueArray29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29) {
+ return internal::ValueArray29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12,
+ v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26,
+ v27, v28, v29);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30>
+internal::ValueArray30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,
+ T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16,
+ T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24,
+ T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30) {
+ return internal::ValueArray30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11,
+ v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25,
+ v26, v27, v28, v29, v30);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31>
+internal::ValueArray31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+ T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
+ T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31) {
+ return internal::ValueArray31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10,
+ v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24,
+ v25, v26, v27, v28, v29, v30, v31);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32>
+internal::ValueArray32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+ T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
+ T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
+ T32 v32) {
+ return internal::ValueArray32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32>(v1, v2, v3, v4, v5, v6, v7, v8, v9,
+ v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,
+ v24, v25, v26, v27, v28, v29, v30, v31, v32);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33>
+internal::ValueArray33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6,
+ T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
+ T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
+ T32 v32, T33 v33) {
+ return internal::ValueArray33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33>(v1, v2, v3, v4, v5, v6, v7, v8,
+ v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,
+ v24, v25, v26, v27, v28, v29, v30, v31, v32, v33);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34>
+internal::ValueArray34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5,
+ T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14,
+ T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22,
+ T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30,
+ T31 v31, T32 v32, T33 v33, T34 v34) {
+ return internal::ValueArray34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34>(v1, v2, v3, v4, v5, v6, v7,
+ v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22,
+ v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35>
+internal::ValueArray35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35> Values(T1 v1, T2 v2, T3 v3, T4 v4,
+ T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
+ T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21,
+ T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29,
+ T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35) {
+ return internal::ValueArray35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35>(v1, v2, v3, v4, v5, v6,
+ v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21,
+ v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36>
+internal::ValueArray36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36> Values(T1 v1, T2 v2, T3 v3, T4 v4,
+ T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
+ T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21,
+ T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29,
+ T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36) {
+ return internal::ValueArray36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36>(v1, v2, v3, v4,
+ v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
+ v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33,
+ v34, v35, v36);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37>
+internal::ValueArray37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37> Values(T1 v1, T2 v2, T3 v3,
+ T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
+ T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
+ T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28,
+ T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36,
+ T37 v37) {
+ return internal::ValueArray37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37>(v1, v2, v3,
+ v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
+ v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33,
+ v34, v35, v36, v37);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38>
+internal::ValueArray38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> Values(T1 v1, T2 v2,
+ T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
+ T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
+ T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28,
+ T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36,
+ T37 v37, T38 v38) {
+ return internal::ValueArray38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38>(v1, v2,
+ v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18,
+ v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32,
+ v33, v34, v35, v36, v37, v38);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39>
+internal::ValueArray39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> Values(T1 v1, T2 v2,
+ T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
+ T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
+ T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28,
+ T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36,
+ T37 v37, T38 v38, T39 v39) {
+ return internal::ValueArray39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39>(v1,
+ v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17,
+ v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31,
+ v32, v33, v34, v35, v36, v37, v38, v39);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40>
+internal::ValueArray40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> Values(T1 v1,
+ T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11,
+ T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19,
+ T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27,
+ T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35,
+ T36 v36, T37 v37, T38 v38, T39 v39, T40 v40) {
+ return internal::ValueArray40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15,
+ v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29,
+ v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41>
+internal::ValueArray41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41) {
+ return internal::ValueArray41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14,
+ v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28,
+ v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42>
+internal::ValueArray42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42) {
+ return internal::ValueArray42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13,
+ v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27,
+ v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41,
+ v42);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43>
+internal::ValueArray43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43) {
+ return internal::ValueArray43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12,
+ v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26,
+ v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40,
+ v41, v42, v43);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44>
+internal::ValueArray44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44) {
+ return internal::ValueArray44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43, T44>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11,
+ v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25,
+ v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39,
+ v40, v41, v42, v43, v44);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45>
+internal::ValueArray45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,
+ T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16,
+ T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24,
+ T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32,
+ T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40,
+ T41 v41, T42 v42, T43 v43, T44 v44, T45 v45) {
+ return internal::ValueArray45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43, T44, T45>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10,
+ v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24,
+ v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38,
+ v39, v40, v41, v42, v43, v44, v45);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46>
+internal::ValueArray46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+ T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
+ T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
+ T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39,
+ T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46) {
+ return internal::ValueArray46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43, T44, T45, T46>(v1, v2, v3, v4, v5, v6, v7, v8, v9,
+ v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,
+ v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37,
+ v38, v39, v40, v41, v42, v43, v44, v45, v46);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47>
+internal::ValueArray47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+ T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
+ T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
+ T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39,
+ T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47) {
+ return internal::ValueArray47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43, T44, T45, T46, T47>(v1, v2, v3, v4, v5, v6, v7, v8,
+ v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,
+ v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37,
+ v38, v39, v40, v41, v42, v43, v44, v45, v46, v47);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48>
+internal::ValueArray48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47, T48> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6,
+ T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
+ T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
+ T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39,
+ T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47,
+ T48 v48) {
+ return internal::ValueArray48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43, T44, T45, T46, T47, T48>(v1, v2, v3, v4, v5, v6, v7,
+ v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22,
+ v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36,
+ v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49>
+internal::ValueArray49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47, T48, T49> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5,
+ T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14,
+ T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22,
+ T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30,
+ T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38,
+ T39 v39, T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46,
+ T47 v47, T48 v48, T49 v49) {
+ return internal::ValueArray49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43, T44, T45, T46, T47, T48, T49>(v1, v2, v3, v4, v5, v6,
+ v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21,
+ v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35,
+ v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49, typename T50>
+internal::ValueArray50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47, T48, T49, T50> Values(T1 v1, T2 v2, T3 v3, T4 v4,
+ T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
+ T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21,
+ T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29,
+ T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37,
+ T38 v38, T39 v39, T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45,
+ T46 v46, T47 v47, T48 v48, T49 v49, T50 v50) {
+ return internal::ValueArray50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43, T44, T45, T46, T47, T48, T49, T50>(v1, v2, v3, v4,
+ v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
+ v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33,
+ v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47,
+ v48, v49, v50);
+}
+
+// Bool() allows generating tests with parameters in a set of (false, true).
+//
+// Synopsis:
+// Bool()
+// - returns a generator producing sequences with elements {false, true}.
+//
+// It is useful when testing code that depends on Boolean flags. Combinations
+// of multiple flags can be tested when several Bool()'s are combined using
+// Combine() function.
+//
+// In the following example all tests in the test case FlagDependentTest
+// will be instantiated twice with parameters false and true.
+//
+// class FlagDependentTest : public testing::TestWithParam<bool> {
+// virtual void SetUp() {
+// external_flag = GetParam();
+// }
+// }
+// INSTANTIATE_TEST_CASE_P(BoolSequence, FlagDependentTest, Bool());
+//
+inline internal::ParamGenerator<bool> Bool() {
+ return Values(false, true);
+}
+
+#if GTEST_HAS_COMBINE
+// Combine() allows the user to combine two or more sequences to produce
+// values of a Cartesian product of those sequences' elements.
+//
+// Synopsis:
+// Combine(gen1, gen2, ..., genN)
+// - returns a generator producing sequences with elements coming from
+// the Cartesian product of elements from the sequences generated by
+// gen1, gen2, ..., genN. The sequence elements will have a type of
+// tuple<T1, T2, ..., TN> where T1, T2, ..., TN are the types
+// of elements from sequences produces by gen1, gen2, ..., genN.
+//
+// Combine can have up to 10 arguments. This number is currently limited
+// by the maximum number of elements in the tuple implementation used by Google
+// Test.
+//
+// Example:
+//
+// This will instantiate tests in test case AnimalTest each one with
+// the parameter values tuple("cat", BLACK), tuple("cat", WHITE),
+// tuple("dog", BLACK), and tuple("dog", WHITE):
+//
+// enum Color { BLACK, GRAY, WHITE };
+// class AnimalTest
+// : public testing::TestWithParam<tuple<const char*, Color> > {...};
+//
+// TEST_P(AnimalTest, AnimalLooksNice) {...}
+//
+// INSTANTIATE_TEST_CASE_P(AnimalVariations, AnimalTest,
+// Combine(Values("cat", "dog"),
+// Values(BLACK, WHITE)));
+//
+// This will instantiate tests in FlagDependentTest with all variations of two
+// Boolean flags:
+//
+// class FlagDependentTest
+// : public testing::TestWithParam<tuple(bool, bool)> > {
+// virtual void SetUp() {
+// // Assigns external_flag_1 and external_flag_2 values from the tuple.
+// tie(external_flag_1, external_flag_2) = GetParam();
+// }
+// };
+//
+// TEST_P(FlagDependentTest, TestFeature1) {
+// // Test your code using external_flag_1 and external_flag_2 here.
+// }
+// INSTANTIATE_TEST_CASE_P(TwoBoolSequence, FlagDependentTest,
+// Combine(Bool(), Bool()));
+//
+template <typename Generator1, typename Generator2>
+internal::CartesianProductHolder2<Generator1, Generator2> Combine(
+ const Generator1& g1, const Generator2& g2) {
+ return internal::CartesianProductHolder2<Generator1, Generator2>(
+ g1, g2);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3>
+internal::CartesianProductHolder3<Generator1, Generator2, Generator3> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3) {
+ return internal::CartesianProductHolder3<Generator1, Generator2, Generator3>(
+ g1, g2, g3);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+ typename Generator4>
+internal::CartesianProductHolder4<Generator1, Generator2, Generator3,
+ Generator4> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3,
+ const Generator4& g4) {
+ return internal::CartesianProductHolder4<Generator1, Generator2, Generator3,
+ Generator4>(
+ g1, g2, g3, g4);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+ typename Generator4, typename Generator5>
+internal::CartesianProductHolder5<Generator1, Generator2, Generator3,
+ Generator4, Generator5> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3,
+ const Generator4& g4, const Generator5& g5) {
+ return internal::CartesianProductHolder5<Generator1, Generator2, Generator3,
+ Generator4, Generator5>(
+ g1, g2, g3, g4, g5);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+ typename Generator4, typename Generator5, typename Generator6>
+internal::CartesianProductHolder6<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3,
+ const Generator4& g4, const Generator5& g5, const Generator6& g6) {
+ return internal::CartesianProductHolder6<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6>(
+ g1, g2, g3, g4, g5, g6);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+ typename Generator4, typename Generator5, typename Generator6,
+ typename Generator7>
+internal::CartesianProductHolder7<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3,
+ const Generator4& g4, const Generator5& g5, const Generator6& g6,
+ const Generator7& g7) {
+ return internal::CartesianProductHolder7<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7>(
+ g1, g2, g3, g4, g5, g6, g7);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+ typename Generator4, typename Generator5, typename Generator6,
+ typename Generator7, typename Generator8>
+internal::CartesianProductHolder8<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7, Generator8> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3,
+ const Generator4& g4, const Generator5& g5, const Generator6& g6,
+ const Generator7& g7, const Generator8& g8) {
+ return internal::CartesianProductHolder8<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7, Generator8>(
+ g1, g2, g3, g4, g5, g6, g7, g8);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+ typename Generator4, typename Generator5, typename Generator6,
+ typename Generator7, typename Generator8, typename Generator9>
+internal::CartesianProductHolder9<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7, Generator8,
+ Generator9> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3,
+ const Generator4& g4, const Generator5& g5, const Generator6& g6,
+ const Generator7& g7, const Generator8& g8, const Generator9& g9) {
+ return internal::CartesianProductHolder9<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7, Generator8, Generator9>(
+ g1, g2, g3, g4, g5, g6, g7, g8, g9);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+ typename Generator4, typename Generator5, typename Generator6,
+ typename Generator7, typename Generator8, typename Generator9,
+ typename Generator10>
+internal::CartesianProductHolder10<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7, Generator8, Generator9,
+ Generator10> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3,
+ const Generator4& g4, const Generator5& g5, const Generator6& g6,
+ const Generator7& g7, const Generator8& g8, const Generator9& g9,
+ const Generator10& g10) {
+ return internal::CartesianProductHolder10<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7, Generator8, Generator9,
+ Generator10>(
+ g1, g2, g3, g4, g5, g6, g7, g8, g9, g10);
+}
+#endif // GTEST_HAS_COMBINE
+
+
+
+#define TEST_P(test_case_name, test_name) \
+ class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \
+ : public test_case_name { \
+ public: \
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {} \
+ virtual void TestBody(); \
+ private: \
+ static int AddToRegistry() { \
+ ::testing::UnitTest::GetInstance()->parameterized_test_registry(). \
+ GetTestCasePatternHolder<test_case_name>(\
+ #test_case_name, __FILE__, __LINE__)->AddTestPattern(\
+ #test_case_name, \
+ #test_name, \
+ new ::testing::internal::TestMetaFactory< \
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)>()); \
+ return 0; \
+ } \
+ static int gtest_registering_dummy_; \
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(\
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)); \
+ }; \
+ int GTEST_TEST_CLASS_NAME_(test_case_name, \
+ test_name)::gtest_registering_dummy_ = \
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::AddToRegistry(); \
+ void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody()
+
+#define INSTANTIATE_TEST_CASE_P(prefix, test_case_name, generator) \
+ ::testing::internal::ParamGenerator<test_case_name::ParamType> \
+ gtest_##prefix##test_case_name##_EvalGenerator_() { return generator; } \
+ int gtest_##prefix##test_case_name##_dummy_ = \
+ ::testing::UnitTest::GetInstance()->parameterized_test_registry(). \
+ GetTestCasePatternHolder<test_case_name>(\
+ #test_case_name, __FILE__, __LINE__)->AddTestCaseInstantiation(\
+ #prefix, \
+ &gtest_##prefix##test_case_name##_EvalGenerator_, \
+ __FILE__, __LINE__)
+
+} // namespace testing
+
+#endif // GTEST_HAS_PARAM_TEST
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
+// Copyright 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// Google C++ Testing Framework definitions useful in production code.
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_PROD_H_
+#define GTEST_INCLUDE_GTEST_GTEST_PROD_H_
+
+// When you need to test the private or protected members of a class,
+// use the FRIEND_TEST macro to declare your tests as friends of the
+// class. For example:
+//
+// class MyClass {
+// private:
+// void MyMethod();
+// FRIEND_TEST(MyClassTest, MyMethod);
+// };
+//
+// class MyClassTest : public testing::Test {
+// // ...
+// };
+//
+// TEST_F(MyClassTest, MyMethod) {
+// // Can call MyClass::MyMethod() here.
+// }
+
+#define FRIEND_TEST(test_case_name, test_name)\
+friend class test_case_name##_##test_name##_Test
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_PROD_H_
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: mheule@google.com (Markus Heule)
+//
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
+#define GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
+
+#include <iosfwd>
+#include <vector>
+
+namespace testing {
+
+// A copyable object representing the result of a test part (i.e. an
+// assertion or an explicit FAIL(), ADD_FAILURE(), or SUCCESS()).
+//
+// Don't inherit from TestPartResult as its destructor is not virtual.
+class GTEST_API_ TestPartResult {
+ public:
+ // The possible outcomes of a test part (i.e. an assertion or an
+ // explicit SUCCEED(), FAIL(), or ADD_FAILURE()).
+ enum Type {
+ kSuccess, // Succeeded.
+ kNonFatalFailure, // Failed but the test can continue.
+ kFatalFailure // Failed and the test should be terminated.
+ };
+
+ // C'tor. TestPartResult does NOT have a default constructor.
+ // Always use this constructor (with parameters) to create a
+ // TestPartResult object.
+ TestPartResult(Type a_type,
+ const char* a_file_name,
+ int a_line_number,
+ const char* a_message)
+ : type_(a_type),
+ file_name_(a_file_name),
+ line_number_(a_line_number),
+ summary_(ExtractSummary(a_message)),
+ message_(a_message) {
+ }
+
+ // Gets the outcome of the test part.
+ Type type() const { return type_; }
+
+ // Gets the name of the source file where the test part took place, or
+ // NULL if it's unknown.
+ const char* file_name() const { return file_name_.c_str(); }
+
+ // Gets the line in the source file where the test part took place,
+ // or -1 if it's unknown.
+ int line_number() const { return line_number_; }
+
+ // Gets the summary of the failure message.
+ const char* summary() const { return summary_.c_str(); }
+
+ // Gets the message associated with the test part.
+ const char* message() const { return message_.c_str(); }
+
+ // Returns true iff the test part passed.
+ bool passed() const { return type_ == kSuccess; }
+
+ // Returns true iff the test part failed.
+ bool failed() const { return type_ != kSuccess; }
+
+ // Returns true iff the test part non-fatally failed.
+ bool nonfatally_failed() const { return type_ == kNonFatalFailure; }
+
+ // Returns true iff the test part fatally failed.
+ bool fatally_failed() const { return type_ == kFatalFailure; }
+ private:
+ Type type_;
+
+ // Gets the summary of the failure message by omitting the stack
+ // trace in it.
+ static internal::String ExtractSummary(const char* message);
+
+ // The name of the source file where the test part took place, or
+ // NULL if the source file is unknown.
+ internal::String file_name_;
+ // The line in the source file where the test part took place, or -1
+ // if the line number is unknown.
+ int line_number_;
+ internal::String summary_; // The test failure summary.
+ internal::String message_; // The test failure message.
+};
+
+// Prints a TestPartResult object.
+std::ostream& operator<<(std::ostream& os, const TestPartResult& result);
+
+// An array of TestPartResult objects.
+//
+// Don't inherit from TestPartResultArray as its destructor is not
+// virtual.
+class GTEST_API_ TestPartResultArray {
+ public:
+ TestPartResultArray() {}
+
+ // Appends the given TestPartResult to the array.
+ void Append(const TestPartResult& result);
+
+ // Returns the TestPartResult at the given index (0-based).
+ const TestPartResult& GetTestPartResult(int index) const;
+
+ // Returns the number of TestPartResult objects in the array.
+ int size() const;
+
+ private:
+ std::vector<TestPartResult> array_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestPartResultArray);
+};
+
+// This interface knows how to report a test part result.
+class TestPartResultReporterInterface {
+ public:
+ virtual ~TestPartResultReporterInterface() {}
+
+ virtual void ReportTestPartResult(const TestPartResult& result) = 0;
+};
+
+namespace internal {
+
+// This helper class is used by {ASSERT|EXPECT}_NO_FATAL_FAILURE to check if a
+// statement generates new fatal failures. To do so it registers itself as the
+// current test part result reporter. Besides checking if fatal failures were
+// reported, it only delegates the reporting to the former result reporter.
+// The original result reporter is restored in the destructor.
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+class GTEST_API_ HasNewFatalFailureHelper
+ : public TestPartResultReporterInterface {
+ public:
+ HasNewFatalFailureHelper();
+ virtual ~HasNewFatalFailureHelper();
+ virtual void ReportTestPartResult(const TestPartResult& result);
+ bool has_new_fatal_failure() const { return has_new_fatal_failure_; }
+ private:
+ bool has_new_fatal_failure_;
+ TestPartResultReporterInterface* original_reporter_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(HasNewFatalFailureHelper);
+};
+
+} // namespace internal
+
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
+// Copyright 2008 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_
+#define GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_
+
+// This header implements typed tests and type-parameterized tests.
+
+// Typed (aka type-driven) tests repeat the same test for types in a
+// list. You must know which types you want to test with when writing
+// typed tests. Here's how you do it:
+
+#if 0
+
+// First, define a fixture class template. It should be parameterized
+// by a type. Remember to derive it from testing::Test.
+template <typename T>
+class FooTest : public testing::Test {
+ public:
+ ...
+ typedef std::list<T> List;
+ static T shared_;
+ T value_;
+};
+
+// Next, associate a list of types with the test case, which will be
+// repeated for each type in the list. The typedef is necessary for
+// the macro to parse correctly.
+typedef testing::Types<char, int, unsigned int> MyTypes;
+TYPED_TEST_CASE(FooTest, MyTypes);
+
+// If the type list contains only one type, you can write that type
+// directly without Types<...>:
+// TYPED_TEST_CASE(FooTest, int);
+
+// Then, use TYPED_TEST() instead of TEST_F() to define as many typed
+// tests for this test case as you want.
+TYPED_TEST(FooTest, DoesBlah) {
+ // Inside a test, refer to TypeParam to get the type parameter.
+ // Since we are inside a derived class template, C++ requires use to
+ // visit the members of FooTest via 'this'.
+ TypeParam n = this->value_;
+
+ // To visit static members of the fixture, add the TestFixture::
+ // prefix.
+ n += TestFixture::shared_;
+
+ // To refer to typedefs in the fixture, add the "typename
+ // TestFixture::" prefix.
+ typename TestFixture::List values;
+ values.push_back(n);
+ ...
+}
+
+TYPED_TEST(FooTest, HasPropertyA) { ... }
+
+#endif // 0
+
+// Type-parameterized tests are abstract test patterns parameterized
+// by a type. Compared with typed tests, type-parameterized tests
+// allow you to define the test pattern without knowing what the type
+// parameters are. The defined pattern can be instantiated with
+// different types any number of times, in any number of translation
+// units.
+//
+// If you are designing an interface or concept, you can define a
+// suite of type-parameterized tests to verify properties that any
+// valid implementation of the interface/concept should have. Then,
+// each implementation can easily instantiate the test suite to verify
+// that it conforms to the requirements, without having to write
+// similar tests repeatedly. Here's an example:
+
+#if 0
+
+// First, define a fixture class template. It should be parameterized
+// by a type. Remember to derive it from testing::Test.
+template <typename T>
+class FooTest : public testing::Test {
+ ...
+};
+
+// Next, declare that you will define a type-parameterized test case
+// (the _P suffix is for "parameterized" or "pattern", whichever you
+// prefer):
+TYPED_TEST_CASE_P(FooTest);
+
+// Then, use TYPED_TEST_P() to define as many type-parameterized tests
+// for this type-parameterized test case as you want.
+TYPED_TEST_P(FooTest, DoesBlah) {
+ // Inside a test, refer to TypeParam to get the type parameter.
+ TypeParam n = 0;
+ ...
+}
+
+TYPED_TEST_P(FooTest, HasPropertyA) { ... }
+
+// Now the tricky part: you need to register all test patterns before
+// you can instantiate them. The first argument of the macro is the
+// test case name; the rest are the names of the tests in this test
+// case.
+REGISTER_TYPED_TEST_CASE_P(FooTest,
+ DoesBlah, HasPropertyA);
+
+// Finally, you are free to instantiate the pattern with the types you
+// want. If you put the above code in a header file, you can #include
+// it in multiple C++ source files and instantiate it multiple times.
+//
+// To distinguish different instances of the pattern, the first
+// argument to the INSTANTIATE_* macro is a prefix that will be added
+// to the actual test case name. Remember to pick unique prefixes for
+// different instances.
+typedef testing::Types<char, int, unsigned int> MyTypes;
+INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, MyTypes);
+
+// If the type list contains only one type, you can write that type
+// directly without Types<...>:
+// INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, int);
+
+#endif // 0
+
+
+// Implements typed tests.
+
+#if GTEST_HAS_TYPED_TEST
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Expands to the name of the typedef for the type parameters of the
+// given test case.
+#define GTEST_TYPE_PARAMS_(TestCaseName) gtest_type_params_##TestCaseName##_
+
+// The 'Types' template argument below must have spaces around it
+// since some compilers may choke on '>>' when passing a template
+// instance (e.g. Types<int>)
+#define TYPED_TEST_CASE(CaseName, Types) \
+ typedef ::testing::internal::TypeList< Types >::type \
+ GTEST_TYPE_PARAMS_(CaseName)
+
+#define TYPED_TEST(CaseName, TestName) \
+ template <typename gtest_TypeParam_> \
+ class GTEST_TEST_CLASS_NAME_(CaseName, TestName) \
+ : public CaseName<gtest_TypeParam_> { \
+ private: \
+ typedef CaseName<gtest_TypeParam_> TestFixture; \
+ typedef gtest_TypeParam_ TypeParam; \
+ virtual void TestBody(); \
+ }; \
+ bool gtest_##CaseName##_##TestName##_registered_ = \
+ ::testing::internal::TypeParameterizedTest< \
+ CaseName, \
+ ::testing::internal::TemplateSel< \
+ GTEST_TEST_CLASS_NAME_(CaseName, TestName)>, \
+ GTEST_TYPE_PARAMS_(CaseName)>::Register(\
+ "", #CaseName, #TestName, 0); \
+ template <typename gtest_TypeParam_> \
+ void GTEST_TEST_CLASS_NAME_(CaseName, TestName)<gtest_TypeParam_>::TestBody()
+
+#endif // GTEST_HAS_TYPED_TEST
+
+// Implements type-parameterized tests.
+
+#if GTEST_HAS_TYPED_TEST_P
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Expands to the namespace name that the type-parameterized tests for
+// the given type-parameterized test case are defined in. The exact
+// name of the namespace is subject to change without notice.
+#define GTEST_CASE_NAMESPACE_(TestCaseName) \
+ gtest_case_##TestCaseName##_
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Expands to the name of the variable used to remember the names of
+// the defined tests in the given test case.
+#define GTEST_TYPED_TEST_CASE_P_STATE_(TestCaseName) \
+ gtest_typed_test_case_p_state_##TestCaseName##_
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE DIRECTLY.
+//
+// Expands to the name of the variable used to remember the names of
+// the registered tests in the given test case.
+#define GTEST_REGISTERED_TEST_NAMES_(TestCaseName) \
+ gtest_registered_test_names_##TestCaseName##_
+
+// The variables defined in the type-parameterized test macros are
+// static as typically these macros are used in a .h file that can be
+// #included in multiple translation units linked together.
+#define TYPED_TEST_CASE_P(CaseName) \
+ static ::testing::internal::TypedTestCasePState \
+ GTEST_TYPED_TEST_CASE_P_STATE_(CaseName)
+
+#define TYPED_TEST_P(CaseName, TestName) \
+ namespace GTEST_CASE_NAMESPACE_(CaseName) { \
+ template <typename gtest_TypeParam_> \
+ class TestName : public CaseName<gtest_TypeParam_> { \
+ private: \
+ typedef CaseName<gtest_TypeParam_> TestFixture; \
+ typedef gtest_TypeParam_ TypeParam; \
+ virtual void TestBody(); \
+ }; \
+ static bool gtest_##TestName##_defined_ = \
+ GTEST_TYPED_TEST_CASE_P_STATE_(CaseName).AddTestName(\
+ __FILE__, __LINE__, #CaseName, #TestName); \
+ } \
+ template <typename gtest_TypeParam_> \
+ void GTEST_CASE_NAMESPACE_(CaseName)::TestName<gtest_TypeParam_>::TestBody()
+
+#define REGISTER_TYPED_TEST_CASE_P(CaseName, ...) \
+ namespace GTEST_CASE_NAMESPACE_(CaseName) { \
+ typedef ::testing::internal::Templates<__VA_ARGS__>::type gtest_AllTests_; \
+ } \
+ static const char* const GTEST_REGISTERED_TEST_NAMES_(CaseName) = \
+ GTEST_TYPED_TEST_CASE_P_STATE_(CaseName).VerifyRegisteredTestNames(\
+ __FILE__, __LINE__, #__VA_ARGS__)
+
+// The 'Types' template argument below must have spaces around it
+// since some compilers may choke on '>>' when passing a template
+// instance (e.g. Types<int>)
+#define INSTANTIATE_TYPED_TEST_CASE_P(Prefix, CaseName, Types) \
+ bool gtest_##Prefix##_##CaseName = \
+ ::testing::internal::TypeParameterizedTestCase<CaseName, \
+ GTEST_CASE_NAMESPACE_(CaseName)::gtest_AllTests_, \
+ ::testing::internal::TypeList< Types >::type>::Register(\
+ #Prefix, #CaseName, GTEST_REGISTERED_TEST_NAMES_(CaseName))
+
+#endif // GTEST_HAS_TYPED_TEST_P
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_
+
+// Depending on the platform, different string classes are available.
+// On Linux, in addition to ::std::string, Google also makes use of
+// class ::string, which has the same interface as ::std::string, but
+// has a different implementation.
+//
+// The user can define GTEST_HAS_GLOBAL_STRING to 1 to indicate that
+// ::string is available AND is a distinct type to ::std::string, or
+// define it to 0 to indicate otherwise.
+//
+// If the user's ::std::string and ::string are the same class due to
+// aliasing, he should define GTEST_HAS_GLOBAL_STRING to 0.
+//
+// If the user doesn't define GTEST_HAS_GLOBAL_STRING, it is defined
+// heuristically.
+
+namespace testing {
+
+// Declares the flags.
+
+// This flag temporary enables the disabled tests.
+GTEST_DECLARE_bool_(also_run_disabled_tests);
+
+// This flag brings the debugger on an assertion failure.
+GTEST_DECLARE_bool_(break_on_failure);
+
+// This flag controls whether Google Test catches all test-thrown exceptions
+// and logs them as failures.
+GTEST_DECLARE_bool_(catch_exceptions);
+
+// This flag enables using colors in terminal output. Available values are
+// "yes" to enable colors, "no" (disable colors), or "auto" (the default)
+// to let Google Test decide.
+GTEST_DECLARE_string_(color);
+
+// This flag sets up the filter to select by name using a glob pattern
+// the tests to run. If the filter is not given all tests are executed.
+GTEST_DECLARE_string_(filter);
+
+// This flag causes the Google Test to list tests. None of the tests listed
+// are actually run if the flag is provided.
+GTEST_DECLARE_bool_(list_tests);
+
+// This flag controls whether Google Test emits a detailed XML report to a file
+// in addition to its normal textual output.
+GTEST_DECLARE_string_(output);
+
+// This flags control whether Google Test prints the elapsed time for each
+// test.
+GTEST_DECLARE_bool_(print_time);
+
+// This flag specifies the random number seed.
+GTEST_DECLARE_int32_(random_seed);
+
+// This flag sets how many times the tests are repeated. The default value
+// is 1. If the value is -1 the tests are repeating forever.
+GTEST_DECLARE_int32_(repeat);
+
+// This flag controls whether Google Test includes Google Test internal
+// stack frames in failure stack traces.
+GTEST_DECLARE_bool_(show_internal_stack_frames);
+
+// When this flag is specified, tests' order is randomized on every iteration.
+GTEST_DECLARE_bool_(shuffle);
+
+// This flag specifies the maximum number of stack frames to be
+// printed in a failure message.
+GTEST_DECLARE_int32_(stack_trace_depth);
+
+// When this flag is specified, a failed assertion will throw an
+// exception if exceptions are enabled, or exit the program with a
+// non-zero code otherwise.
+GTEST_DECLARE_bool_(throw_on_failure);
+
+// The upper limit for valid stack trace depths.
+const int kMaxStackTraceDepth = 100;
+
+namespace internal {
+
+class AssertHelper;
+class DefaultGlobalTestPartResultReporter;
+class ExecDeathTest;
+class NoExecDeathTest;
+class FinalSuccessChecker;
+class GTestFlagSaver;
+class TestInfoImpl;
+class TestResultAccessor;
+class TestEventListenersAccessor;
+class TestEventRepeater;
+class WindowsDeathTest;
+class UnitTestImpl* GetUnitTestImpl();
+void ReportFailureInUnknownLocation(TestPartResult::Type result_type,
+ const String& message);
+class PrettyUnitTestResultPrinter;
+class XmlUnitTestResultPrinter;
+
+// Converts a streamable value to a String. A NULL pointer is
+// converted to "(null)". When the input value is a ::string,
+// ::std::string, ::wstring, or ::std::wstring object, each NUL
+// character in it is replaced with "\\0".
+// Declared in gtest-internal.h but defined here, so that it has access
+// to the definition of the Message class, required by the ARM
+// compiler.
+template <typename T>
+String StreamableToString(const T& streamable) {
+ return (Message() << streamable).GetString();
+}
+
+} // namespace internal
+
+// A class for indicating whether an assertion was successful. When
+// the assertion wasn't successful, the AssertionResult object
+// remembers a non-empty message that describes how it failed.
+//
+// To create an instance of this class, use one of the factory functions
+// (AssertionSuccess() and AssertionFailure()).
+//
+// This class is useful for two purposes:
+// 1. Defining predicate functions to be used with Boolean test assertions
+// EXPECT_TRUE/EXPECT_FALSE and their ASSERT_ counterparts
+// 2. Defining predicate-format functions to be
+// used with predicate assertions (ASSERT_PRED_FORMAT*, etc).
+//
+// For example, if you define IsEven predicate:
+//
+// testing::AssertionResult IsEven(int n) {
+// if ((n % 2) == 0)
+// return testing::AssertionSuccess();
+// else
+// return testing::AssertionFailure() << n << " is odd";
+// }
+//
+// Then the failed expectation EXPECT_TRUE(IsEven(Fib(5)))
+// will print the message
+//
+// Value of: IsEven(Fib(5))
+// Actual: false (5 is odd)
+// Expected: true
+//
+// instead of a more opaque
+//
+// Value of: IsEven(Fib(5))
+// Actual: false
+// Expected: true
+//
+// in case IsEven is a simple Boolean predicate.
+//
+// If you expect your predicate to be reused and want to support informative
+// messages in EXPECT_FALSE and ASSERT_FALSE (negative assertions show up
+// about half as often as positive ones in our tests), supply messages for
+// both success and failure cases:
+//
+// testing::AssertionResult IsEven(int n) {
+// if ((n % 2) == 0)
+// return testing::AssertionSuccess() << n << " is even";
+// else
+// return testing::AssertionFailure() << n << " is odd";
+// }
+//
+// Then a statement EXPECT_FALSE(IsEven(Fib(6))) will print
+//
+// Value of: IsEven(Fib(6))
+// Actual: true (8 is even)
+// Expected: false
+//
+// NB: Predicates that support negative Boolean assertions have reduced
+// performance in positive ones so be careful not to use them in tests
+// that have lots (tens of thousands) of positive Boolean assertions.
+//
+// To use this class with EXPECT_PRED_FORMAT assertions such as:
+//
+// // Verifies that Foo() returns an even number.
+// EXPECT_PRED_FORMAT1(IsEven, Foo());
+//
+// you need to define:
+//
+// testing::AssertionResult IsEven(const char* expr, int n) {
+// if ((n % 2) == 0)
+// return testing::AssertionSuccess();
+// else
+// return testing::AssertionFailure()
+// << "Expected: " << expr << " is even\n Actual: it's " << n;
+// }
+//
+// If Foo() returns 5, you will see the following message:
+//
+// Expected: Foo() is even
+// Actual: it's 5
+//
+class GTEST_API_ AssertionResult {
+ public:
+ // Copy constructor.
+ // Used in EXPECT_TRUE/FALSE(assertion_result).
+ AssertionResult(const AssertionResult& other);
+ // Used in the EXPECT_TRUE/FALSE(bool_expression).
+ explicit AssertionResult(bool success) : success_(success) {}
+
+ // Returns true iff the assertion succeeded.
+ operator bool() const { return success_; } // NOLINT
+
+ // Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE.
+ AssertionResult operator!() const;
+
+ // Returns the text streamed into this AssertionResult. Test assertions
+ // use it when they fail (i.e., the predicate's outcome doesn't match the
+ // assertion's expectation). When nothing has been streamed into the
+ // object, returns an empty string.
+ const char* message() const {
+ return message_.get() != NULL && message_->c_str() != NULL ?
+ message_->c_str() : "";
+ }
+ // TODO(vladl@google.com): Remove this after making sure no clients use it.
+ // Deprecated; please use message() instead.
+ const char* failure_message() const { return message(); }
+
+ // Streams a custom failure message into this object.
+ template <typename T> AssertionResult& operator<<(const T& value);
+
+ private:
+ // No implementation - we want AssertionResult to be
+ // copy-constructible but not assignable.
+ void operator=(const AssertionResult& other);
+
+ // Stores result of the assertion predicate.
+ bool success_;
+ // Stores the message describing the condition in case the expectation
+ // construct is not satisfied with the predicate's outcome.
+ // Referenced via a pointer to avoid taking too much stack frame space
+ // with test assertions.
+ internal::scoped_ptr<internal::String> message_;
+}; // class AssertionResult
+
+// Streams a custom failure message into this object.
+template <typename T>
+AssertionResult& AssertionResult::operator<<(const T& value) {
+ Message msg;
+ if (message_.get() != NULL)
+ msg << *message_;
+ msg << value;
+ message_.reset(new internal::String(msg.GetString()));
+ return *this;
+}
+
+// Makes a successful assertion result.
+GTEST_API_ AssertionResult AssertionSuccess();
+
+// Makes a failed assertion result.
+GTEST_API_ AssertionResult AssertionFailure();
+
+// Makes a failed assertion result with the given failure message.
+// Deprecated; use AssertionFailure() << msg.
+GTEST_API_ AssertionResult AssertionFailure(const Message& msg);
+
+// The abstract class that all tests inherit from.
+//
+// In Google Test, a unit test program contains one or many TestCases, and
+// each TestCase contains one or many Tests.
+//
+// When you define a test using the TEST macro, you don't need to
+// explicitly derive from Test - the TEST macro automatically does
+// this for you.
+//
+// The only time you derive from Test is when defining a test fixture
+// to be used a TEST_F. For example:
+//
+// class FooTest : public testing::Test {
+// protected:
+// virtual void SetUp() { ... }
+// virtual void TearDown() { ... }
+// ...
+// };
+//
+// TEST_F(FooTest, Bar) { ... }
+// TEST_F(FooTest, Baz) { ... }
+//
+// Test is not copyable.
+class GTEST_API_ Test {
+ public:
+ friend class internal::TestInfoImpl;
+
+ // Defines types for pointers to functions that set up and tear down
+ // a test case.
+ typedef internal::SetUpTestCaseFunc SetUpTestCaseFunc;
+ typedef internal::TearDownTestCaseFunc TearDownTestCaseFunc;
+
+ // The d'tor is virtual as we intend to inherit from Test.
+ virtual ~Test();
+
+ // Sets up the stuff shared by all tests in this test case.
+ //
+ // Google Test will call Foo::SetUpTestCase() before running the first
+ // test in test case Foo. Hence a sub-class can define its own
+ // SetUpTestCase() method to shadow the one defined in the super
+ // class.
+ static void SetUpTestCase() {}
+
+ // Tears down the stuff shared by all tests in this test case.
+ //
+ // Google Test will call Foo::TearDownTestCase() after running the last
+ // test in test case Foo. Hence a sub-class can define its own
+ // TearDownTestCase() method to shadow the one defined in the super
+ // class.
+ static void TearDownTestCase() {}
+
+ // Returns true iff the current test has a fatal failure.
+ static bool HasFatalFailure();
+
+ // Returns true iff the current test has a non-fatal failure.
+ static bool HasNonfatalFailure();
+
+ // Returns true iff the current test has a (either fatal or
+ // non-fatal) failure.
+ static bool HasFailure() { return HasFatalFailure() || HasNonfatalFailure(); }
+
+ // Logs a property for the current test. Only the last value for a given
+ // key is remembered.
+ // These are public static so they can be called from utility functions
+ // that are not members of the test fixture.
+ // The arguments are const char* instead strings, as Google Test is used
+ // on platforms where string doesn't compile.
+ //
+ // Note that a driving consideration for these RecordProperty methods
+ // was to produce xml output suited to the Greenspan charting utility,
+ // which at present will only chart values that fit in a 32-bit int. It
+ // is the user's responsibility to restrict their values to 32-bit ints
+ // if they intend them to be used with Greenspan.
+ static void RecordProperty(const char* key, const char* value);
+ static void RecordProperty(const char* key, int value);
+
+ protected:
+ // Creates a Test object.
+ Test();
+
+ // Sets up the test fixture.
+ virtual void SetUp();
+
+ // Tears down the test fixture.
+ virtual void TearDown();
+
+ private:
+ // Returns true iff the current test has the same fixture class as
+ // the first test in the current test case.
+ static bool HasSameFixtureClass();
+
+ // Runs the test after the test fixture has been set up.
+ //
+ // A sub-class must implement this to define the test logic.
+ //
+ // DO NOT OVERRIDE THIS FUNCTION DIRECTLY IN A USER PROGRAM.
+ // Instead, use the TEST or TEST_F macro.
+ virtual void TestBody() = 0;
+
+ // Sets up, executes, and tears down the test.
+ void Run();
+
+ // Uses a GTestFlagSaver to save and restore all Google Test flags.
+ const internal::GTestFlagSaver* const gtest_flag_saver_;
+
+ // Often a user mis-spells SetUp() as Setup() and spends a long time
+ // wondering why it is never called by Google Test. The declaration of
+ // the following method is solely for catching such an error at
+ // compile time:
+ //
+ // - The return type is deliberately chosen to be not void, so it
+ // will be a conflict if a user declares void Setup() in his test
+ // fixture.
+ //
+ // - This method is private, so it will be another compiler error
+ // if a user calls it from his test fixture.
+ //
+ // DO NOT OVERRIDE THIS FUNCTION.
+ //
+ // If you see an error about overriding the following function or
+ // about it being private, you have mis-spelled SetUp() as Setup().
+ struct Setup_should_be_spelled_SetUp {};
+ virtual Setup_should_be_spelled_SetUp* Setup() { return NULL; }
+
+ // We disallow copying Tests.
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(Test);
+};
+
+typedef internal::TimeInMillis TimeInMillis;
+
+// A copyable object representing a user specified test property which can be
+// output as a key/value string pair.
+//
+// Don't inherit from TestProperty as its destructor is not virtual.
+class TestProperty {
+ public:
+ // C'tor. TestProperty does NOT have a default constructor.
+ // Always use this constructor (with parameters) to create a
+ // TestProperty object.
+ TestProperty(const char* a_key, const char* a_value) :
+ key_(a_key), value_(a_value) {
+ }
+
+ // Gets the user supplied key.
+ const char* key() const {
+ return key_.c_str();
+ }
+
+ // Gets the user supplied value.
+ const char* value() const {
+ return value_.c_str();
+ }
+
+ // Sets a new value, overriding the one supplied in the constructor.
+ void SetValue(const char* new_value) {
+ value_ = new_value;
+ }
+
+ private:
+ // The key supplied by the user.
+ internal::String key_;
+ // The value supplied by the user.
+ internal::String value_;
+};
+
+// The result of a single Test. This includes a list of
+// TestPartResults, a list of TestProperties, a count of how many
+// death tests there are in the Test, and how much time it took to run
+// the Test.
+//
+// TestResult is not copyable.
+class GTEST_API_ TestResult {
+ public:
+ // Creates an empty TestResult.
+ TestResult();
+
+ // D'tor. Do not inherit from TestResult.
+ ~TestResult();
+
+ // Gets the number of all test parts. This is the sum of the number
+ // of successful test parts and the number of failed test parts.
+ int total_part_count() const;
+
+ // Returns the number of the test properties.
+ int test_property_count() const;
+
+ // Returns true iff the test passed (i.e. no test part failed).
+ bool Passed() const { return !Failed(); }
+
+ // Returns true iff the test failed.
+ bool Failed() const;
+
+ // Returns true iff the test fatally failed.
+ bool HasFatalFailure() const;
+
+ // Returns true iff the test has a non-fatal failure.
+ bool HasNonfatalFailure() const;
+
+ // Returns the elapsed time, in milliseconds.
+ TimeInMillis elapsed_time() const { return elapsed_time_; }
+
+ // Returns the i-th test part result among all the results. i can range
+ // from 0 to test_property_count() - 1. If i is not in that range, aborts
+ // the program.
+ const TestPartResult& GetTestPartResult(int i) const;
+
+ // Returns the i-th test property. i can range from 0 to
+ // test_property_count() - 1. If i is not in that range, aborts the
+ // program.
+ const TestProperty& GetTestProperty(int i) const;
+
+ private:
+ friend class TestInfo;
+ friend class UnitTest;
+ friend class internal::DefaultGlobalTestPartResultReporter;
+ friend class internal::ExecDeathTest;
+ friend class internal::TestInfoImpl;
+ friend class internal::TestResultAccessor;
+ friend class internal::UnitTestImpl;
+ friend class internal::WindowsDeathTest;
+
+ // Gets the vector of TestPartResults.
+ const std::vector<TestPartResult>& test_part_results() const {
+ return test_part_results_;
+ }
+
+ // Gets the vector of TestProperties.
+ const std::vector<TestProperty>& test_properties() const {
+ return test_properties_;
+ }
+
+ // Sets the elapsed time.
+ void set_elapsed_time(TimeInMillis elapsed) { elapsed_time_ = elapsed; }
+
+ // Adds a test property to the list. The property is validated and may add
+ // a non-fatal failure if invalid (e.g., if it conflicts with reserved
+ // key names). If a property is already recorded for the same key, the
+ // value will be updated, rather than storing multiple values for the same
+ // key.
+ void RecordProperty(const TestProperty& test_property);
+
+ // Adds a failure if the key is a reserved attribute of Google Test
+ // testcase tags. Returns true if the property is valid.
+ // TODO(russr): Validate attribute names are legal and human readable.
+ static bool ValidateTestProperty(const TestProperty& test_property);
+
+ // Adds a test part result to the list.
+ void AddTestPartResult(const TestPartResult& test_part_result);
+
+ // Returns the death test count.
+ int death_test_count() const { return death_test_count_; }
+
+ // Increments the death test count, returning the new count.
+ int increment_death_test_count() { return ++death_test_count_; }
+
+ // Clears the test part results.
+ void ClearTestPartResults();
+
+ // Clears the object.
+ void Clear();
+
+ // Protects mutable state of the property vector and of owned
+ // properties, whose values may be updated.
+ internal::Mutex test_properites_mutex_;
+
+ // The vector of TestPartResults
+ std::vector<TestPartResult> test_part_results_;
+ // The vector of TestProperties
+ std::vector<TestProperty> test_properties_;
+ // Running count of death tests.
+ int death_test_count_;
+ // The elapsed time, in milliseconds.
+ TimeInMillis elapsed_time_;
+
+ // We disallow copying TestResult.
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestResult);
+}; // class TestResult
+
+// A TestInfo object stores the following information about a test:
+//
+// Test case name
+// Test name
+// Whether the test should be run
+// A function pointer that creates the test object when invoked
+// Test result
+//
+// The constructor of TestInfo registers itself with the UnitTest
+// singleton such that the RUN_ALL_TESTS() macro knows which tests to
+// run.
+class GTEST_API_ TestInfo {
+ public:
+ // Destructs a TestInfo object. This function is not virtual, so
+ // don't inherit from TestInfo.
+ ~TestInfo();
+
+ // Returns the test case name.
+ const char* test_case_name() const;
+
+ // Returns the test name.
+ const char* name() const;
+
+ // Returns the test case comment.
+ const char* test_case_comment() const;
+
+ // Returns the test comment.
+ const char* comment() const;
+
+ // Returns true if this test should run, that is if the test is not disabled
+ // (or it is disabled but the also_run_disabled_tests flag has been specified)
+ // and its full name matches the user-specified filter.
+ //
+ // Google Test allows the user to filter the tests by their full names.
+ // The full name of a test Bar in test case Foo is defined as
+ // "Foo.Bar". Only the tests that match the filter will run.
+ //
+ // A filter is a colon-separated list of glob (not regex) patterns,
+ // optionally followed by a '-' and a colon-separated list of
+ // negative patterns (tests to exclude). A test is run if it
+ // matches one of the positive patterns and does not match any of
+ // the negative patterns.
+ //
+ // For example, *A*:Foo.* is a filter that matches any string that
+ // contains the character 'A' or starts with "Foo.".
+ bool should_run() const;
+
+ // Returns the result of the test.
+ const TestResult* result() const;
+
+ private:
+#if GTEST_HAS_DEATH_TEST
+ friend class internal::DefaultDeathTestFactory;
+#endif // GTEST_HAS_DEATH_TEST
+ friend class Test;
+ friend class TestCase;
+ friend class internal::TestInfoImpl;
+ friend class internal::UnitTestImpl;
+ friend TestInfo* internal::MakeAndRegisterTestInfo(
+ const char* test_case_name, const char* name,
+ const char* test_case_comment, const char* comment,
+ internal::TypeId fixture_class_id,
+ Test::SetUpTestCaseFunc set_up_tc,
+ Test::TearDownTestCaseFunc tear_down_tc,
+ internal::TestFactoryBase* factory);
+
+ // Returns true if this test matches the user-specified filter.
+ bool matches_filter() const;
+
+ // Increments the number of death tests encountered in this test so
+ // far.
+ int increment_death_test_count();
+
+ // Accessors for the implementation object.
+ internal::TestInfoImpl* impl() { return impl_; }
+ const internal::TestInfoImpl* impl() const { return impl_; }
+
+ // Constructs a TestInfo object. The newly constructed instance assumes
+ // ownership of the factory object.
+ TestInfo(const char* test_case_name, const char* name,
+ const char* test_case_comment, const char* comment,
+ internal::TypeId fixture_class_id,
+ internal::TestFactoryBase* factory);
+
+ // An opaque implementation object.
+ internal::TestInfoImpl* impl_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestInfo);
+};
+
+// A test case, which consists of a vector of TestInfos.
+//
+// TestCase is not copyable.
+class GTEST_API_ TestCase {
+ public:
+ // Creates a TestCase with the given name.
+ //
+ // TestCase does NOT have a default constructor. Always use this
+ // constructor to create a TestCase object.
+ //
+ // Arguments:
+ //
+ // name: name of the test case
+ // set_up_tc: pointer to the function that sets up the test case
+ // tear_down_tc: pointer to the function that tears down the test case
+ TestCase(const char* name, const char* comment,
+ Test::SetUpTestCaseFunc set_up_tc,
+ Test::TearDownTestCaseFunc tear_down_tc);
+
+ // Destructor of TestCase.
+ virtual ~TestCase();
+
+ // Gets the name of the TestCase.
+ const char* name() const { return name_.c_str(); }
+
+ // Returns the test case comment.
+ const char* comment() const { return comment_.c_str(); }
+
+ // Returns true if any test in this test case should run.
+ bool should_run() const { return should_run_; }
+
+ // Gets the number of successful tests in this test case.
+ int successful_test_count() const;
+
+ // Gets the number of failed tests in this test case.
+ int failed_test_count() const;
+
+ // Gets the number of disabled tests in this test case.
+ int disabled_test_count() const;
+
+ // Get the number of tests in this test case that should run.
+ int test_to_run_count() const;
+
+ // Gets the number of all tests in this test case.
+ int total_test_count() const;
+
+ // Returns true iff the test case passed.
+ bool Passed() const { return !Failed(); }
+
+ // Returns true iff the test case failed.
+ bool Failed() const { return failed_test_count() > 0; }
+
+ // Returns the elapsed time, in milliseconds.
+ TimeInMillis elapsed_time() const { return elapsed_time_; }
+
+ // Returns the i-th test among all the tests. i can range from 0 to
+ // total_test_count() - 1. If i is not in that range, returns NULL.
+ const TestInfo* GetTestInfo(int i) const;
+
+ private:
+ friend class Test;
+ friend class internal::UnitTestImpl;
+
+ // Gets the (mutable) vector of TestInfos in this TestCase.
+ std::vector<TestInfo*>& test_info_list() { return test_info_list_; }
+
+ // Gets the (immutable) vector of TestInfos in this TestCase.
+ const std::vector<TestInfo*>& test_info_list() const {
+ return test_info_list_;
+ }
+
+ // Returns the i-th test among all the tests. i can range from 0 to
+ // total_test_count() - 1. If i is not in that range, returns NULL.
+ TestInfo* GetMutableTestInfo(int i);
+
+ // Sets the should_run member.
+ void set_should_run(bool should) { should_run_ = should; }
+
+ // Adds a TestInfo to this test case. Will delete the TestInfo upon
+ // destruction of the TestCase object.
+ void AddTestInfo(TestInfo * test_info);
+
+ // Clears the results of all tests in this test case.
+ void ClearResult();
+
+ // Clears the results of all tests in the given test case.
+ static void ClearTestCaseResult(TestCase* test_case) {
+ test_case->ClearResult();
+ }
+
+ // Runs every test in this TestCase.
+ void Run();
+
+ // Returns true iff test passed.
+ static bool TestPassed(const TestInfo * test_info);
+
+ // Returns true iff test failed.
+ static bool TestFailed(const TestInfo * test_info);
+
+ // Returns true iff test is disabled.
+ static bool TestDisabled(const TestInfo * test_info);
+
+ // Returns true if the given test should run.
+ static bool ShouldRunTest(const TestInfo *test_info);
+
+ // Shuffles the tests in this test case.
+ void ShuffleTests(internal::Random* random);
+
+ // Restores the test order to before the first shuffle.
+ void UnshuffleTests();
+
+ // Name of the test case.
+ internal::String name_;
+ // Comment on the test case.
+ internal::String comment_;
+ // The vector of TestInfos in their original order. It owns the
+ // elements in the vector.
+ std::vector<TestInfo*> test_info_list_;
+ // Provides a level of indirection for the test list to allow easy
+ // shuffling and restoring the test order. The i-th element in this
+ // vector is the index of the i-th test in the shuffled test list.
+ std::vector<int> test_indices_;
+ // Pointer to the function that sets up the test case.
+ Test::SetUpTestCaseFunc set_up_tc_;
+ // Pointer to the function that tears down the test case.
+ Test::TearDownTestCaseFunc tear_down_tc_;
+ // True iff any test in this test case should run.
+ bool should_run_;
+ // Elapsed time, in milliseconds.
+ TimeInMillis elapsed_time_;
+
+ // We disallow copying TestCases.
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestCase);
+};
+
+// An Environment object is capable of setting up and tearing down an
+// environment. The user should subclass this to define his own
+// environment(s).
+//
+// An Environment object does the set-up and tear-down in virtual
+// methods SetUp() and TearDown() instead of the constructor and the
+// destructor, as:
+//
+// 1. You cannot safely throw from a destructor. This is a problem
+// as in some cases Google Test is used where exceptions are enabled, and
+// we may want to implement ASSERT_* using exceptions where they are
+// available.
+// 2. You cannot use ASSERT_* directly in a constructor or
+// destructor.
+class Environment {
+ public:
+ // The d'tor is virtual as we need to subclass Environment.
+ virtual ~Environment() {}
+
+ // Override this to define how to set up the environment.
+ virtual void SetUp() {}
+
+ // Override this to define how to tear down the environment.
+ virtual void TearDown() {}
+ private:
+ // If you see an error about overriding the following function or
+ // about it being private, you have mis-spelled SetUp() as Setup().
+ struct Setup_should_be_spelled_SetUp {};
+ virtual Setup_should_be_spelled_SetUp* Setup() { return NULL; }
+};
+
+// The interface for tracing execution of tests. The methods are organized in
+// the order the corresponding events are fired.
+class TestEventListener {
+ public:
+ virtual ~TestEventListener() {}
+
+ // Fired before any test activity starts.
+ virtual void OnTestProgramStart(const UnitTest& unit_test) = 0;
+
+ // Fired before each iteration of tests starts. There may be more than
+ // one iteration if GTEST_FLAG(repeat) is set. iteration is the iteration
+ // index, starting from 0.
+ virtual void OnTestIterationStart(const UnitTest& unit_test,
+ int iteration) = 0;
+
+ // Fired before environment set-up for each iteration of tests starts.
+ virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test) = 0;
+
+ // Fired after environment set-up for each iteration of tests ends.
+ virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test) = 0;
+
+ // Fired before the test case starts.
+ virtual void OnTestCaseStart(const TestCase& test_case) = 0;
+
+ // Fired before the test starts.
+ virtual void OnTestStart(const TestInfo& test_info) = 0;
+
+ // Fired after a failed assertion or a SUCCESS().
+ virtual void OnTestPartResult(const TestPartResult& test_part_result) = 0;
+
+ // Fired after the test ends.
+ virtual void OnTestEnd(const TestInfo& test_info) = 0;
+
+ // Fired after the test case ends.
+ virtual void OnTestCaseEnd(const TestCase& test_case) = 0;
+
+ // Fired before environment tear-down for each iteration of tests starts.
+ virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test) = 0;
+
+ // Fired after environment tear-down for each iteration of tests ends.
+ virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test) = 0;
+
+ // Fired after each iteration of tests finishes.
+ virtual void OnTestIterationEnd(const UnitTest& unit_test,
+ int iteration) = 0;
+
+ // Fired after all test activities have ended.
+ virtual void OnTestProgramEnd(const UnitTest& unit_test) = 0;
+};
+
+// The convenience class for users who need to override just one or two
+// methods and are not concerned that a possible change to a signature of
+// the methods they override will not be caught during the build. For
+// comments about each method please see the definition of TestEventListener
+// above.
+class EmptyTestEventListener : public TestEventListener {
+ public:
+ virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) {}
+ virtual void OnTestIterationStart(const UnitTest& /*unit_test*/,
+ int /*iteration*/) {}
+ virtual void OnEnvironmentsSetUpStart(const UnitTest& /*unit_test*/) {}
+ virtual void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) {}
+ virtual void OnTestCaseStart(const TestCase& /*test_case*/) {}
+ virtual void OnTestStart(const TestInfo& /*test_info*/) {}
+ virtual void OnTestPartResult(const TestPartResult& /*test_part_result*/) {}
+ virtual void OnTestEnd(const TestInfo& /*test_info*/) {}
+ virtual void OnTestCaseEnd(const TestCase& /*test_case*/) {}
+ virtual void OnEnvironmentsTearDownStart(const UnitTest& /*unit_test*/) {}
+ virtual void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) {}
+ virtual void OnTestIterationEnd(const UnitTest& /*unit_test*/,
+ int /*iteration*/) {}
+ virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) {}
+};
+
+// TestEventListeners lets users add listeners to track events in Google Test.
+class GTEST_API_ TestEventListeners {
+ public:
+ TestEventListeners();
+ ~TestEventListeners();
+
+ // Appends an event listener to the end of the list. Google Test assumes
+ // the ownership of the listener (i.e. it will delete the listener when
+ // the test program finishes).
+ void Append(TestEventListener* listener);
+
+ // Removes the given event listener from the list and returns it. It then
+ // becomes the caller's responsibility to delete the listener. Returns
+ // NULL if the listener is not found in the list.
+ TestEventListener* Release(TestEventListener* listener);
+
+ // Returns the standard listener responsible for the default console
+ // output. Can be removed from the listeners list to shut down default
+ // console output. Note that removing this object from the listener list
+ // with Release transfers its ownership to the caller and makes this
+ // function return NULL the next time.
+ TestEventListener* default_result_printer() const {
+ return default_result_printer_;
+ }
+
+ // Returns the standard listener responsible for the default XML output
+ // controlled by the --gtest_output=xml flag. Can be removed from the
+ // listeners list by users who want to shut down the default XML output
+ // controlled by this flag and substitute it with custom one. Note that
+ // removing this object from the listener list with Release transfers its
+ // ownership to the caller and makes this function return NULL the next
+ // time.
+ TestEventListener* default_xml_generator() const {
+ return default_xml_generator_;
+ }
+
+ private:
+ friend class TestCase;
+ friend class internal::DefaultGlobalTestPartResultReporter;
+ friend class internal::NoExecDeathTest;
+ friend class internal::TestEventListenersAccessor;
+ friend class internal::TestInfoImpl;
+ friend class internal::UnitTestImpl;
+
+ // Returns repeater that broadcasts the TestEventListener events to all
+ // subscribers.
+ TestEventListener* repeater();
+
+ // Sets the default_result_printer attribute to the provided listener.
+ // The listener is also added to the listener list and previous
+ // default_result_printer is removed from it and deleted. The listener can
+ // also be NULL in which case it will not be added to the list. Does
+ // nothing if the previous and the current listener objects are the same.
+ void SetDefaultResultPrinter(TestEventListener* listener);
+
+ // Sets the default_xml_generator attribute to the provided listener. The
+ // listener is also added to the listener list and previous
+ // default_xml_generator is removed from it and deleted. The listener can
+ // also be NULL in which case it will not be added to the list. Does
+ // nothing if the previous and the current listener objects are the same.
+ void SetDefaultXmlGenerator(TestEventListener* listener);
+
+ // Controls whether events will be forwarded by the repeater to the
+ // listeners in the list.
+ bool EventForwardingEnabled() const;
+ void SuppressEventForwarding();
+
+ // The actual list of listeners.
+ internal::TestEventRepeater* repeater_;
+ // Listener responsible for the standard result output.
+ TestEventListener* default_result_printer_;
+ // Listener responsible for the creation of the XML output file.
+ TestEventListener* default_xml_generator_;
+
+ // We disallow copying TestEventListeners.
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventListeners);
+};
+
+// A UnitTest consists of a vector of TestCases.
+//
+// This is a singleton class. The only instance of UnitTest is
+// created when UnitTest::GetInstance() is first called. This
+// instance is never deleted.
+//
+// UnitTest is not copyable.
+//
+// This class is thread-safe as long as the methods are called
+// according to their specification.
+class GTEST_API_ UnitTest {
+ public:
+ // Gets the singleton UnitTest object. The first time this method
+ // is called, a UnitTest object is constructed and returned.
+ // Consecutive calls will return the same object.
+ static UnitTest* GetInstance();
+
+ // Runs all tests in this UnitTest object and prints the result.
+ // Returns 0 if successful, or 1 otherwise.
+ //
+ // This method can only be called from the main thread.
+ //
+ // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+ int Run() GTEST_MUST_USE_RESULT_;
+
+ // Returns the working directory when the first TEST() or TEST_F()
+ // was executed. The UnitTest object owns the string.
+ const char* original_working_dir() const;
+
+ // Returns the TestCase object for the test that's currently running,
+ // or NULL if no test is running.
+ const TestCase* current_test_case() const;
+
+ // Returns the TestInfo object for the test that's currently running,
+ // or NULL if no test is running.
+ const TestInfo* current_test_info() const;
+
+ // Returns the random seed used at the start of the current test run.
+ int random_seed() const;
+
+#if GTEST_HAS_PARAM_TEST
+ // Returns the ParameterizedTestCaseRegistry object used to keep track of
+ // value-parameterized tests and instantiate and register them.
+ //
+ // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+ internal::ParameterizedTestCaseRegistry& parameterized_test_registry();
+#endif // GTEST_HAS_PARAM_TEST
+
+ // Gets the number of successful test cases.
+ int successful_test_case_count() const;
+
+ // Gets the number of failed test cases.
+ int failed_test_case_count() const;
+
+ // Gets the number of all test cases.
+ int total_test_case_count() const;
+
+ // Gets the number of all test cases that contain at least one test
+ // that should run.
+ int test_case_to_run_count() const;
+
+ // Gets the number of successful tests.
+ int successful_test_count() const;
+
+ // Gets the number of failed tests.
+ int failed_test_count() const;
+
+ // Gets the number of disabled tests.
+ int disabled_test_count() const;
+
+ // Gets the number of all tests.
+ int total_test_count() const;
+
+ // Gets the number of tests that should run.
+ int test_to_run_count() const;
+
+ // Gets the elapsed time, in milliseconds.
+ TimeInMillis elapsed_time() const;
+
+ // Returns true iff the unit test passed (i.e. all test cases passed).
+ bool Passed() const;
+
+ // Returns true iff the unit test failed (i.e. some test case failed
+ // or something outside of all tests failed).
+ bool Failed() const;
+
+ // Gets the i-th test case among all the test cases. i can range from 0 to
+ // total_test_case_count() - 1. If i is not in that range, returns NULL.
+ const TestCase* GetTestCase(int i) const;
+
+ // Returns the list of event listeners that can be used to track events
+ // inside Google Test.
+ TestEventListeners& listeners();
+
+ private:
+ // Registers and returns a global test environment. When a test
+ // program is run, all global test environments will be set-up in
+ // the order they were registered. After all tests in the program
+ // have finished, all global test environments will be torn-down in
+ // the *reverse* order they were registered.
+ //
+ // The UnitTest object takes ownership of the given environment.
+ //
+ // This method can only be called from the main thread.
+ Environment* AddEnvironment(Environment* env);
+
+ // Adds a TestPartResult to the current TestResult object. All
+ // Google Test assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc)
+ // eventually call this to report their results. The user code
+ // should use the assertion macros instead of calling this directly.
+ void AddTestPartResult(TestPartResult::Type result_type,
+ const char* file_name,
+ int line_number,
+ const internal::String& message,
+ const internal::String& os_stack_trace);
+
+ // Adds a TestProperty to the current TestResult object. If the result already
+ // contains a property with the same key, the value will be updated.
+ void RecordPropertyForCurrentTest(const char* key, const char* value);
+
+ // Gets the i-th test case among all the test cases. i can range from 0 to
+ // total_test_case_count() - 1. If i is not in that range, returns NULL.
+ TestCase* GetMutableTestCase(int i);
+
+ // Accessors for the implementation object.
+ internal::UnitTestImpl* impl() { return impl_; }
+ const internal::UnitTestImpl* impl() const { return impl_; }
+
+ // These classes and funcions are friends as they need to access private
+ // members of UnitTest.
+ friend class Test;
+ friend class internal::AssertHelper;
+ friend class internal::ScopedTrace;
+ friend Environment* AddGlobalTestEnvironment(Environment* env);
+ friend internal::UnitTestImpl* internal::GetUnitTestImpl();
+ friend void internal::ReportFailureInUnknownLocation(
+ TestPartResult::Type result_type,
+ const internal::String& message);
+
+ // Creates an empty UnitTest.
+ UnitTest();
+
+ // D'tor
+ virtual ~UnitTest();
+
+ // Pushes a trace defined by SCOPED_TRACE() on to the per-thread
+ // Google Test trace stack.
+ void PushGTestTrace(const internal::TraceInfo& trace);
+
+ // Pops a trace from the per-thread Google Test trace stack.
+ void PopGTestTrace();
+
+ // Protects mutable state in *impl_. This is mutable as some const
+ // methods need to lock it too.
+ mutable internal::Mutex mutex_;
+
+ // Opaque implementation object. This field is never changed once
+ // the object is constructed. We don't mark it as const here, as
+ // doing so will cause a warning in the constructor of UnitTest.
+ // Mutable state in *impl_ is protected by mutex_.
+ internal::UnitTestImpl* impl_;
+
+ // We disallow copying UnitTest.
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTest);
+};
+
+// A convenient wrapper for adding an environment for the test
+// program.
+//
+// You should call this before RUN_ALL_TESTS() is called, probably in
+// main(). If you use gtest_main, you need to call this before main()
+// starts for it to take effect. For example, you can define a global
+// variable like this:
+//
+// testing::Environment* const foo_env =
+// testing::AddGlobalTestEnvironment(new FooEnvironment);
+//
+// However, we strongly recommend you to write your own main() and
+// call AddGlobalTestEnvironment() there, as relying on initialization
+// of global variables makes the code harder to read and may cause
+// problems when you register multiple environments from different
+// translation units and the environments have dependencies among them
+// (remember that the compiler doesn't guarantee the order in which
+// global variables from different translation units are initialized).
+inline Environment* AddGlobalTestEnvironment(Environment* env) {
+ return UnitTest::GetInstance()->AddEnvironment(env);
+}
+
+// Initializes Google Test. This must be called before calling
+// RUN_ALL_TESTS(). In particular, it parses a command line for the
+// flags that Google Test recognizes. Whenever a Google Test flag is
+// seen, it is removed from argv, and *argc is decremented.
+//
+// No value is returned. Instead, the Google Test flag variables are
+// updated.
+//
+// Calling the function for the second time has no user-visible effect.
+GTEST_API_ void InitGoogleTest(int* argc, char** argv);
+
+// This overloaded version can be used in Windows programs compiled in
+// UNICODE mode.
+GTEST_API_ void InitGoogleTest(int* argc, wchar_t** argv);
+
+namespace internal {
+
+// These overloaded versions handle ::std::string and ::std::wstring.
+GTEST_API_ inline String FormatForFailureMessage(const ::std::string& str) {
+ return (Message() << '"' << str << '"').GetString();
+}
+
+#if GTEST_HAS_STD_WSTRING
+GTEST_API_ inline String FormatForFailureMessage(const ::std::wstring& wstr) {
+ return (Message() << "L\"" << wstr << '"').GetString();
+}
+#endif // GTEST_HAS_STD_WSTRING
+
+// These overloaded versions handle ::string and ::wstring.
+#if GTEST_HAS_GLOBAL_STRING
+GTEST_API_ inline String FormatForFailureMessage(const ::string& str) {
+ return (Message() << '"' << str << '"').GetString();
+}
+#endif // GTEST_HAS_GLOBAL_STRING
+
+#if GTEST_HAS_GLOBAL_WSTRING
+GTEST_API_ inline String FormatForFailureMessage(const ::wstring& wstr) {
+ return (Message() << "L\"" << wstr << '"').GetString();
+}
+#endif // GTEST_HAS_GLOBAL_WSTRING
+
+// Formats a comparison assertion (e.g. ASSERT_EQ, EXPECT_LT, and etc)
+// operand to be used in a failure message. The type (but not value)
+// of the other operand may affect the format. This allows us to
+// print a char* as a raw pointer when it is compared against another
+// char*, and print it as a C string when it is compared against an
+// std::string object, for example.
+//
+// The default implementation ignores the type of the other operand.
+// Some specialized versions are used to handle formatting wide or
+// narrow C strings.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+template <typename T1, typename T2>
+String FormatForComparisonFailureMessage(const T1& value,
+ const T2& /* other_operand */) {
+ return FormatForFailureMessage(value);
+}
+
+// The helper function for {ASSERT|EXPECT}_EQ.
+template <typename T1, typename T2>
+AssertionResult CmpHelperEQ(const char* expected_expression,
+ const char* actual_expression,
+ const T1& expected,
+ const T2& actual) {
+#ifdef _MSC_VER
+#pragma warning(push) // Saves the current warning state.
+#pragma warning(disable:4389) // Temporarily disables warning on
+ // signed/unsigned mismatch.
+#endif
+
+ if (expected == actual) {
+ return AssertionSuccess();
+ }
+
+#ifdef _MSC_VER
+#pragma warning(pop) // Restores the warning state.
+#endif
+
+ return EqFailure(expected_expression,
+ actual_expression,
+ FormatForComparisonFailureMessage(expected, actual),
+ FormatForComparisonFailureMessage(actual, expected),
+ false);
+}
+
+// With this overloaded version, we allow anonymous enums to be used
+// in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous enums
+// can be implicitly cast to BiggestInt.
+GTEST_API_ AssertionResult CmpHelperEQ(const char* expected_expression,
+ const char* actual_expression,
+ BiggestInt expected,
+ BiggestInt actual);
+
+// The helper class for {ASSERT|EXPECT}_EQ. The template argument
+// lhs_is_null_literal is true iff the first argument to ASSERT_EQ()
+// is a null pointer literal. The following default implementation is
+// for lhs_is_null_literal being false.
+template <bool lhs_is_null_literal>
+class EqHelper {
+ public:
+ // This templatized version is for the general case.
+ template <typename T1, typename T2>
+ static AssertionResult Compare(const char* expected_expression,
+ const char* actual_expression,
+ const T1& expected,
+ const T2& actual) {
+ return CmpHelperEQ(expected_expression, actual_expression, expected,
+ actual);
+ }
+
+ // With this overloaded version, we allow anonymous enums to be used
+ // in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous
+ // enums can be implicitly cast to BiggestInt.
+ //
+ // Even though its body looks the same as the above version, we
+ // cannot merge the two, as it will make anonymous enums unhappy.
+ static AssertionResult Compare(const char* expected_expression,
+ const char* actual_expression,
+ BiggestInt expected,
+ BiggestInt actual) {
+ return CmpHelperEQ(expected_expression, actual_expression, expected,
+ actual);
+ }
+};
+
+// This specialization is used when the first argument to ASSERT_EQ()
+// is a null pointer literal.
+template <>
+class EqHelper<true> {
+ public:
+ // We define two overloaded versions of Compare(). The first
+ // version will be picked when the second argument to ASSERT_EQ() is
+ // NOT a pointer, e.g. ASSERT_EQ(0, AnIntFunction()) or
+ // EXPECT_EQ(false, a_bool).
+ template <typename T1, typename T2>
+ static AssertionResult Compare(const char* expected_expression,
+ const char* actual_expression,
+ const T1& expected,
+ const T2& actual) {
+ return CmpHelperEQ(expected_expression, actual_expression, expected,
+ actual);
+ }
+
+ // This version will be picked when the second argument to
+ // ASSERT_EQ() is a pointer, e.g. ASSERT_EQ(NULL, a_pointer).
+ template <typename T1, typename T2>
+ static AssertionResult Compare(const char* expected_expression,
+ const char* actual_expression,
+ const T1& /* expected */,
+ T2* actual) {
+ // We already know that 'expected' is a null pointer.
+ return CmpHelperEQ(expected_expression, actual_expression,
+ static_cast<T2*>(NULL), actual);
+ }
+};
+
+// A macro for implementing the helper functions needed to implement
+// ASSERT_?? and EXPECT_??. It is here just to avoid copy-and-paste
+// of similar code.
+//
+// For each templatized helper function, we also define an overloaded
+// version for BiggestInt in order to reduce code bloat and allow
+// anonymous enums to be used with {ASSERT|EXPECT}_?? when compiled
+// with gcc 4.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+#define GTEST_IMPL_CMP_HELPER_(op_name, op)\
+template <typename T1, typename T2>\
+AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \
+ const T1& val1, const T2& val2) {\
+ if (val1 op val2) {\
+ return AssertionSuccess();\
+ } else {\
+ Message msg;\
+ msg << "Expected: (" << expr1 << ") " #op " (" << expr2\
+ << "), actual: " << FormatForComparisonFailureMessage(val1, val2)\
+ << " vs " << FormatForComparisonFailureMessage(val2, val1);\
+ return AssertionFailure(msg);\
+ }\
+}\
+GTEST_API_ AssertionResult CmpHelper##op_name(\
+ const char* expr1, const char* expr2, BiggestInt val1, BiggestInt val2)
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+#ifdef WIN32
+#pragma warning(disable:4018)
+#endif
+
+// Implements the helper function for {ASSERT|EXPECT}_NE
+GTEST_IMPL_CMP_HELPER_(NE, !=);
+// Implements the helper function for {ASSERT|EXPECT}_LE
+GTEST_IMPL_CMP_HELPER_(LE, <=);
+// Implements the helper function for {ASSERT|EXPECT}_LT
+GTEST_IMPL_CMP_HELPER_(LT, < );
+// Implements the helper function for {ASSERT|EXPECT}_GE
+GTEST_IMPL_CMP_HELPER_(GE, >=);
+// Implements the helper function for {ASSERT|EXPECT}_GT
+GTEST_IMPL_CMP_HELPER_(GT, > );
+
+#undef GTEST_IMPL_CMP_HELPER_
+
+// The helper function for {ASSERT|EXPECT}_STREQ.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTREQ(const char* expected_expression,
+ const char* actual_expression,
+ const char* expected,
+ const char* actual);
+
+// The helper function for {ASSERT|EXPECT}_STRCASEEQ.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTRCASEEQ(const char* expected_expression,
+ const char* actual_expression,
+ const char* expected,
+ const char* actual);
+
+// The helper function for {ASSERT|EXPECT}_STRNE.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression,
+ const char* s2_expression,
+ const char* s1,
+ const char* s2);
+
+// The helper function for {ASSERT|EXPECT}_STRCASENE.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTRCASENE(const char* s1_expression,
+ const char* s2_expression,
+ const char* s1,
+ const char* s2);
+
+
+// Helper function for *_STREQ on wide strings.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTREQ(const char* expected_expression,
+ const char* actual_expression,
+ const wchar_t* expected,
+ const wchar_t* actual);
+
+// Helper function for *_STRNE on wide strings.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression,
+ const char* s2_expression,
+ const wchar_t* s1,
+ const wchar_t* s2);
+
+} // namespace internal
+
+// IsSubstring() and IsNotSubstring() are intended to be used as the
+// first argument to {EXPECT,ASSERT}_PRED_FORMAT2(), not by
+// themselves. They check whether needle is a substring of haystack
+// (NULL is considered a substring of itself only), and return an
+// appropriate error message when they fail.
+//
+// The {needle,haystack}_expr arguments are the stringified
+// expressions that generated the two real arguments.
+GTEST_API_ AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const char* needle, const char* haystack);
+GTEST_API_ AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const wchar_t* needle, const wchar_t* haystack);
+GTEST_API_ AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const char* needle, const char* haystack);
+GTEST_API_ AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const wchar_t* needle, const wchar_t* haystack);
+GTEST_API_ AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::string& needle, const ::std::string& haystack);
+GTEST_API_ AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::string& needle, const ::std::string& haystack);
+
+#if GTEST_HAS_STD_WSTRING
+GTEST_API_ AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::wstring& needle, const ::std::wstring& haystack);
+GTEST_API_ AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::wstring& needle, const ::std::wstring& haystack);
+#endif // GTEST_HAS_STD_WSTRING
+
+namespace internal {
+
+// Helper template function for comparing floating-points.
+//
+// Template parameter:
+//
+// RawType: the raw floating-point type (either float or double)
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+template <typename RawType>
+AssertionResult CmpHelperFloatingPointEQ(const char* expected_expression,
+ const char* actual_expression,
+ RawType expected,
+ RawType actual) {
+ const FloatingPoint<RawType> lhs(expected), rhs(actual);
+
+ if (lhs.AlmostEquals(rhs)) {
+ return AssertionSuccess();
+ }
+
+ StrStream expected_ss;
+ expected_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)
+ << expected;
+
+ StrStream actual_ss;
+ actual_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)
+ << actual;
+
+ return EqFailure(expected_expression,
+ actual_expression,
+ StrStreamToString(&expected_ss),
+ StrStreamToString(&actual_ss),
+ false);
+}
+
+// Helper function for implementing ASSERT_NEAR.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult DoubleNearPredFormat(const char* expr1,
+ const char* expr2,
+ const char* abs_error_expr,
+ double val1,
+ double val2,
+ double abs_error);
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+// A class that enables one to stream messages to assertion macros
+class GTEST_API_ AssertHelper {
+ public:
+ // Constructor.
+ AssertHelper(TestPartResult::Type type,
+ const char* file,
+ int line,
+ const char* message);
+ ~AssertHelper();
+
+ // Message assignment is a semantic trick to enable assertion
+ // streaming; see the GTEST_MESSAGE_ macro below.
+ void operator=(const Message& message) const;
+
+ private:
+ // We put our data in a struct so that the size of the AssertHelper class can
+ // be as small as possible. This is important because gcc is incapable of
+ // re-using stack space even for temporary variables, so every EXPECT_EQ
+ // reserves stack space for another AssertHelper.
+ struct AssertHelperData {
+ AssertHelperData(TestPartResult::Type t,
+ const char* srcfile,
+ int line_num,
+ const char* msg)
+ : type(t), file(srcfile), line(line_num), message(msg) { }
+
+ TestPartResult::Type const type;
+ const char* const file;
+ int const line;
+ String const message;
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelperData);
+ };
+
+ AssertHelperData* const data_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelper);
+};
+
+} // namespace internal
+
+#if GTEST_HAS_PARAM_TEST
+// The abstract base class that all value-parameterized tests inherit from.
+//
+// This class adds support for accessing the test parameter value via
+// the GetParam() method.
+//
+// Use it with one of the parameter generator defining functions, like Range(),
+// Values(), ValuesIn(), Bool(), and Combine().
+//
+// class FooTest : public ::testing::TestWithParam<int> {
+// protected:
+// FooTest() {
+// // Can use GetParam() here.
+// }
+// virtual ~FooTest() {
+// // Can use GetParam() here.
+// }
+// virtual void SetUp() {
+// // Can use GetParam() here.
+// }
+// virtual void TearDown {
+// // Can use GetParam() here.
+// }
+// };
+// TEST_P(FooTest, DoesBar) {
+// // Can use GetParam() method here.
+// Foo foo;
+// ASSERT_TRUE(foo.DoesBar(GetParam()));
+// }
+// INSTANTIATE_TEST_CASE_P(OneToTenRange, FooTest, ::testing::Range(1, 10));
+
+template <typename T>
+class TestWithParam : public Test {
+ public:
+ typedef T ParamType;
+
+ // The current parameter value. Is also available in the test fixture's
+ // constructor.
+ const ParamType& GetParam() const { return *parameter_; }
+
+ private:
+ // Sets parameter value. The caller is responsible for making sure the value
+ // remains alive and unchanged throughout the current test.
+ static void SetParam(const ParamType* parameter) {
+ parameter_ = parameter;
+ }
+
+ // Static value used for accessing parameter during a test lifetime.
+ static const ParamType* parameter_;
+
+ // TestClass must be a subclass of TestWithParam<T>.
+ template <class TestClass> friend class internal::ParameterizedTestFactory;
+};
+
+template <typename T>
+const T* TestWithParam<T>::parameter_ = NULL;
+
+#endif // GTEST_HAS_PARAM_TEST
+
+// Macros for indicating success/failure in test code.
+
+// ADD_FAILURE unconditionally adds a failure to the current test.
+// SUCCEED generates a success - it doesn't automatically make the
+// current test successful, as a test is only successful when it has
+// no failure.
+//
+// EXPECT_* verifies that a certain condition is satisfied. If not,
+// it behaves like ADD_FAILURE. In particular:
+//
+// EXPECT_TRUE verifies that a Boolean condition is true.
+// EXPECT_FALSE verifies that a Boolean condition is false.
+//
+// FAIL and ASSERT_* are similar to ADD_FAILURE and EXPECT_*, except
+// that they will also abort the current function on failure. People
+// usually want the fail-fast behavior of FAIL and ASSERT_*, but those
+// writing data-driven tests often find themselves using ADD_FAILURE
+// and EXPECT_* more.
+//
+// Examples:
+//
+// EXPECT_TRUE(server.StatusIsOK());
+// ASSERT_FALSE(server.HasPendingRequest(port))
+// << "There are still pending requests " << "on port " << port;
+
+// Generates a nonfatal failure with a generic message.
+#define ADD_FAILURE() GTEST_NONFATAL_FAILURE_("Failed")
+
+// Generates a fatal failure with a generic message.
+#define GTEST_FAIL() GTEST_FATAL_FAILURE_("Failed")
+
+// Define this macro to 1 to omit the definition of FAIL(), which is a
+// generic name and clashes with some other libraries.
+#if !GTEST_DONT_DEFINE_FAIL
+//#define FAIL() GTEST_FAIL()
+#endif
+
+// Generates a success with a generic message.
+#define GTEST_SUCCEED() GTEST_SUCCESS_("Succeeded")
+
+// Define this macro to 1 to omit the definition of SUCCEED(), which
+// is a generic name and clashes with some other libraries.
+#if !GTEST_DONT_DEFINE_SUCCEED
+#define SUCCEED() GTEST_SUCCEED()
+#endif
+
+// Macros for testing exceptions.
+//
+// * {ASSERT|EXPECT}_THROW(statement, expected_exception):
+// Tests that the statement throws the expected exception.
+// * {ASSERT|EXPECT}_NO_THROW(statement):
+// Tests that the statement doesn't throw any exception.
+// * {ASSERT|EXPECT}_ANY_THROW(statement):
+// Tests that the statement throws an exception.
+
+#define EXPECT_THROW(statement, expected_exception) \
+ GTEST_TEST_THROW_(statement, expected_exception, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_NO_THROW(statement) \
+ GTEST_TEST_NO_THROW_(statement, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_ANY_THROW(statement) \
+ GTEST_TEST_ANY_THROW_(statement, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_THROW(statement, expected_exception) \
+ GTEST_TEST_THROW_(statement, expected_exception, GTEST_FATAL_FAILURE_)
+#define ASSERT_NO_THROW(statement) \
+ GTEST_TEST_NO_THROW_(statement, GTEST_FATAL_FAILURE_)
+#define ASSERT_ANY_THROW(statement) \
+ GTEST_TEST_ANY_THROW_(statement, GTEST_FATAL_FAILURE_)
+
+// Boolean assertions. Condition can be either a Boolean expression or an
+// AssertionResult. For more information on how to use AssertionResult with
+// these macros see comments on that class.
+#define EXPECT_TRUE(condition) \
+ GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \
+ GTEST_NONFATAL_FAILURE_)
+#define EXPECT_FALSE(condition) \
+ GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \
+ GTEST_NONFATAL_FAILURE_)
+#define ASSERT_TRUE(condition) \
+ GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \
+ GTEST_FATAL_FAILURE_)
+#define ASSERT_FALSE(condition) \
+ GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \
+ GTEST_FATAL_FAILURE_)
+
+// Includes the auto-generated header that implements a family of
+// generic predicate assertion macros.
+// Copyright 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is AUTOMATICALLY GENERATED on 10/02/2008 by command
+// 'gen_gtest_pred_impl.py 5'. DO NOT EDIT BY HAND!
+//
+// Implements a family of generic predicate assertion macros.
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
+#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
+
+// Makes sure this header is not included before gtest.h.
+#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
+#error Do not include gtest_pred_impl.h directly. Include gtest.h instead.
+#endif // GTEST_INCLUDE_GTEST_GTEST_H_
+
+// This header implements a family of generic predicate assertion
+// macros:
+//
+// ASSERT_PRED_FORMAT1(pred_format, v1)
+// ASSERT_PRED_FORMAT2(pred_format, v1, v2)
+// ...
+//
+// where pred_format is a function or functor that takes n (in the
+// case of ASSERT_PRED_FORMATn) values and their source expression
+// text, and returns a testing::AssertionResult. See the definition
+// of ASSERT_EQ in gtest.h for an example.
+//
+// If you don't care about formatting, you can use the more
+// restrictive version:
+//
+// ASSERT_PRED1(pred, v1)
+// ASSERT_PRED2(pred, v1, v2)
+// ...
+//
+// where pred is an n-ary function or functor that returns bool,
+// and the values v1, v2, ..., must support the << operator for
+// streaming to std::ostream.
+//
+// We also define the EXPECT_* variations.
+//
+// For now we only support predicates whose arity is at most 5.
+// Please email googletestframework@googlegroups.com if you need
+// support for higher arities.
+
+// GTEST_ASSERT_ is the basic statement to which all of the assertions
+// in this file reduce. Don't use this in your code.
+
+#define GTEST_ASSERT_(expression, on_failure) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (const ::testing::AssertionResult gtest_ar = (expression)) \
+ ; \
+ else \
+ on_failure(gtest_ar.failure_message())
+
+
+// Helper function for implementing {EXPECT|ASSERT}_PRED1. Don't use
+// this in your code.
+template <typename Pred,
+ typename T1>
+AssertionResult AssertPred1Helper(const char* pred_text,
+ const char* e1,
+ Pred pred,
+ const T1& v1) {
+ if (pred(v1)) return AssertionSuccess();
+
+ Message msg;
+ msg << pred_text << "("
+ << e1 << ") evaluates to false, where"
+ << "\n" << e1 << " evaluates to " << v1;
+ return AssertionFailure(msg);
+}
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT1.
+// Don't use this in your code.
+#define GTEST_PRED_FORMAT1_(pred_format, v1, on_failure)\
+ GTEST_ASSERT_(pred_format(#v1, v1),\
+ on_failure)
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED1. Don't use
+// this in your code.
+#define GTEST_PRED1_(pred, v1, on_failure)\
+ GTEST_ASSERT_(::testing::AssertPred1Helper(#pred, \
+ #v1, \
+ pred, \
+ v1), on_failure)
+
+// Unary predicate assertion macros.
+#define EXPECT_PRED_FORMAT1(pred_format, v1) \
+ GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_PRED1(pred, v1) \
+ GTEST_PRED1_(pred, v1, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_PRED_FORMAT1(pred_format, v1) \
+ GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_FATAL_FAILURE_)
+#define ASSERT_PRED1(pred, v1) \
+ GTEST_PRED1_(pred, v1, GTEST_FATAL_FAILURE_)
+
+
+
+// Helper function for implementing {EXPECT|ASSERT}_PRED2. Don't use
+// this in your code.
+template <typename Pred,
+ typename T1,
+ typename T2>
+AssertionResult AssertPred2Helper(const char* pred_text,
+ const char* e1,
+ const char* e2,
+ Pred pred,
+ const T1& v1,
+ const T2& v2) {
+ if (pred(v1, v2)) return AssertionSuccess();
+
+ Message msg;
+ msg << pred_text << "("
+ << e1 << ", "
+ << e2 << ") evaluates to false, where"
+ << "\n" << e1 << " evaluates to " << v1
+ << "\n" << e2 << " evaluates to " << v2;
+ return AssertionFailure(msg);
+}
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT2.
+// Don't use this in your code.
+#define GTEST_PRED_FORMAT2_(pred_format, v1, v2, on_failure)\
+ GTEST_ASSERT_(pred_format(#v1, #v2, v1, v2),\
+ on_failure)
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED2. Don't use
+// this in your code.
+#define GTEST_PRED2_(pred, v1, v2, on_failure)\
+ GTEST_ASSERT_(::testing::AssertPred2Helper(#pred, \
+ #v1, \
+ #v2, \
+ pred, \
+ v1, \
+ v2), on_failure)
+
+// Binary predicate assertion macros.
+#define EXPECT_PRED_FORMAT2(pred_format, v1, v2) \
+ GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_PRED2(pred, v1, v2) \
+ GTEST_PRED2_(pred, v1, v2, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_PRED_FORMAT2(pred_format, v1, v2) \
+ GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_FATAL_FAILURE_)
+#define ASSERT_PRED2(pred, v1, v2) \
+ GTEST_PRED2_(pred, v1, v2, GTEST_FATAL_FAILURE_)
+
+
+
+// Helper function for implementing {EXPECT|ASSERT}_PRED3. Don't use
+// this in your code.
+template <typename Pred,
+ typename T1,
+ typename T2,
+ typename T3>
+AssertionResult AssertPred3Helper(const char* pred_text,
+ const char* e1,
+ const char* e2,
+ const char* e3,
+ Pred pred,
+ const T1& v1,
+ const T2& v2,
+ const T3& v3) {
+ if (pred(v1, v2, v3)) return AssertionSuccess();
+
+ Message msg;
+ msg << pred_text << "("
+ << e1 << ", "
+ << e2 << ", "
+ << e3 << ") evaluates to false, where"
+ << "\n" << e1 << " evaluates to " << v1
+ << "\n" << e2 << " evaluates to " << v2
+ << "\n" << e3 << " evaluates to " << v3;
+ return AssertionFailure(msg);
+}
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT3.
+// Don't use this in your code.
+#define GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, on_failure)\
+ GTEST_ASSERT_(pred_format(#v1, #v2, #v3, v1, v2, v3),\
+ on_failure)
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED3. Don't use
+// this in your code.
+#define GTEST_PRED3_(pred, v1, v2, v3, on_failure)\
+ GTEST_ASSERT_(::testing::AssertPred3Helper(#pred, \
+ #v1, \
+ #v2, \
+ #v3, \
+ pred, \
+ v1, \
+ v2, \
+ v3), on_failure)
+
+// Ternary predicate assertion macros.
+#define EXPECT_PRED_FORMAT3(pred_format, v1, v2, v3) \
+ GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_PRED3(pred, v1, v2, v3) \
+ GTEST_PRED3_(pred, v1, v2, v3, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_PRED_FORMAT3(pred_format, v1, v2, v3) \
+ GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_FATAL_FAILURE_)
+#define ASSERT_PRED3(pred, v1, v2, v3) \
+ GTEST_PRED3_(pred, v1, v2, v3, GTEST_FATAL_FAILURE_)
+
+
+
+// Helper function for implementing {EXPECT|ASSERT}_PRED4. Don't use
+// this in your code.
+template <typename Pred,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4>
+AssertionResult AssertPred4Helper(const char* pred_text,
+ const char* e1,
+ const char* e2,
+ const char* e3,
+ const char* e4,
+ Pred pred,
+ const T1& v1,
+ const T2& v2,
+ const T3& v3,
+ const T4& v4) {
+ if (pred(v1, v2, v3, v4)) return AssertionSuccess();
+
+ Message msg;
+ msg << pred_text << "("
+ << e1 << ", "
+ << e2 << ", "
+ << e3 << ", "
+ << e4 << ") evaluates to false, where"
+ << "\n" << e1 << " evaluates to " << v1
+ << "\n" << e2 << " evaluates to " << v2
+ << "\n" << e3 << " evaluates to " << v3
+ << "\n" << e4 << " evaluates to " << v4;
+ return AssertionFailure(msg);
+}
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT4.
+// Don't use this in your code.
+#define GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, on_failure)\
+ GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, v1, v2, v3, v4),\
+ on_failure)
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED4. Don't use
+// this in your code.
+#define GTEST_PRED4_(pred, v1, v2, v3, v4, on_failure)\
+ GTEST_ASSERT_(::testing::AssertPred4Helper(#pred, \
+ #v1, \
+ #v2, \
+ #v3, \
+ #v4, \
+ pred, \
+ v1, \
+ v2, \
+ v3, \
+ v4), on_failure)
+
+// 4-ary predicate assertion macros.
+#define EXPECT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \
+ GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_PRED4(pred, v1, v2, v3, v4) \
+ GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \
+ GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_FATAL_FAILURE_)
+#define ASSERT_PRED4(pred, v1, v2, v3, v4) \
+ GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_FATAL_FAILURE_)
+
+
+
+// Helper function for implementing {EXPECT|ASSERT}_PRED5. Don't use
+// this in your code.
+template <typename Pred,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5>
+AssertionResult AssertPred5Helper(const char* pred_text,
+ const char* e1,
+ const char* e2,
+ const char* e3,
+ const char* e4,
+ const char* e5,
+ Pred pred,
+ const T1& v1,
+ const T2& v2,
+ const T3& v3,
+ const T4& v4,
+ const T5& v5) {
+ if (pred(v1, v2, v3, v4, v5)) return AssertionSuccess();
+
+ Message msg;
+ msg << pred_text << "("
+ << e1 << ", "
+ << e2 << ", "
+ << e3 << ", "
+ << e4 << ", "
+ << e5 << ") evaluates to false, where"
+ << "\n" << e1 << " evaluates to " << v1
+ << "\n" << e2 << " evaluates to " << v2
+ << "\n" << e3 << " evaluates to " << v3
+ << "\n" << e4 << " evaluates to " << v4
+ << "\n" << e5 << " evaluates to " << v5;
+ return AssertionFailure(msg);
+}
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT5.
+// Don't use this in your code.
+#define GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, on_failure)\
+ GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, #v5, v1, v2, v3, v4, v5),\
+ on_failure)
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED5. Don't use
+// this in your code.
+#define GTEST_PRED5_(pred, v1, v2, v3, v4, v5, on_failure)\
+ GTEST_ASSERT_(::testing::AssertPred5Helper(#pred, \
+ #v1, \
+ #v2, \
+ #v3, \
+ #v4, \
+ #v5, \
+ pred, \
+ v1, \
+ v2, \
+ v3, \
+ v4, \
+ v5), on_failure)
+
+// 5-ary predicate assertion macros.
+#define EXPECT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \
+ GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_PRED5(pred, v1, v2, v3, v4, v5) \
+ GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \
+ GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_)
+#define ASSERT_PRED5(pred, v1, v2, v3, v4, v5) \
+ GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_)
+
+
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
+
+// Macros for testing equalities and inequalities.
+//
+// * {ASSERT|EXPECT}_EQ(expected, actual): Tests that expected == actual
+// * {ASSERT|EXPECT}_NE(v1, v2): Tests that v1 != v2
+// * {ASSERT|EXPECT}_LT(v1, v2): Tests that v1 < v2
+// * {ASSERT|EXPECT}_LE(v1, v2): Tests that v1 <= v2
+// * {ASSERT|EXPECT}_GT(v1, v2): Tests that v1 > v2
+// * {ASSERT|EXPECT}_GE(v1, v2): Tests that v1 >= v2
+//
+// When they are not, Google Test prints both the tested expressions and
+// their actual values. The values must be compatible built-in types,
+// or you will get a compiler error. By "compatible" we mean that the
+// values can be compared by the respective operator.
+//
+// Note:
+//
+// 1. It is possible to make a user-defined type work with
+// {ASSERT|EXPECT}_??(), but that requires overloading the
+// comparison operators and is thus discouraged by the Google C++
+// Usage Guide. Therefore, you are advised to use the
+// {ASSERT|EXPECT}_TRUE() macro to assert that two objects are
+// equal.
+//
+// 2. The {ASSERT|EXPECT}_??() macros do pointer comparisons on
+// pointers (in particular, C strings). Therefore, if you use it
+// with two C strings, you are testing how their locations in memory
+// are related, not how their content is related. To compare two C
+// strings by content, use {ASSERT|EXPECT}_STR*().
+//
+// 3. {ASSERT|EXPECT}_EQ(expected, actual) is preferred to
+// {ASSERT|EXPECT}_TRUE(expected == actual), as the former tells you
+// what the actual value is when it fails, and similarly for the
+// other comparisons.
+//
+// 4. Do not depend on the order in which {ASSERT|EXPECT}_??()
+// evaluate their arguments, which is undefined.
+//
+// 5. These macros evaluate their arguments exactly once.
+//
+// Examples:
+//
+// EXPECT_NE(5, Foo());
+// EXPECT_EQ(NULL, a_pointer);
+// ASSERT_LT(i, array_size);
+// ASSERT_GT(records.size(), 0) << "There is no record left.";
+
+#define EXPECT_EQ(expected, actual) \
+ EXPECT_PRED_FORMAT2(::testing::internal:: \
+ EqHelper<GTEST_IS_NULL_LITERAL_(expected)>::Compare, \
+ expected, actual)
+
+
+
+#define EXPECT_NE(expected, actual) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperNE, expected, actual)
+#define EXPECT_LE(val1, val2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2)
+#define EXPECT_LT(val1, val2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2)
+#define EXPECT_GE(val1, val2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2)
+#define EXPECT_GT(val1, val2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2)
+
+#define EXPECT_EQ_UINT32(a,b) EXPECT_EQ((uint32_t)(a),(uint32_t)(b))
+#define EXPECT_NE_UINT32(a,b) EXPECT_NE((uint32_t)(a),(uint32_t)(b))
+#define EXPECT_LE_UINT32(a,b) EXPECT_LE((uint32_t)(a),(uint32_t)(b))
+#define EXPECT_LT_UINT32(a,b) EXPECT_LT((uint32_t)(a),(uint32_t)(b))
+#define EXPECT_GE_UINT32(a,b) EXPECT_GE((uint32_t)(a),(uint32_t)(b))
+#define EXPECT_GT_UINT32(a,b) EXPECT_GT((uint32_t)(a),(uint32_t)(b))
+
+
+#define ASSERT_EQ(expected, actual) \
+ ASSERT_PRED_FORMAT2(::testing::internal:: \
+ EqHelper<GTEST_IS_NULL_LITERAL_(expected)>::Compare, \
+ expected, actual)
+#define ASSERT_NE(val1, val2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperNE, val1, val2)
+#define ASSERT_LE(val1, val2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2)
+#define ASSERT_LT(val1, val2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2)
+#define ASSERT_GE(val1, val2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2)
+#define ASSERT_GT(val1, val2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2)
+
+// C String Comparisons. All tests treat NULL and any non-NULL string
+// as different. Two NULLs are equal.
+//
+// * {ASSERT|EXPECT}_STREQ(s1, s2): Tests that s1 == s2
+// * {ASSERT|EXPECT}_STRNE(s1, s2): Tests that s1 != s2
+// * {ASSERT|EXPECT}_STRCASEEQ(s1, s2): Tests that s1 == s2, ignoring case
+// * {ASSERT|EXPECT}_STRCASENE(s1, s2): Tests that s1 != s2, ignoring case
+//
+// For wide or narrow string objects, you can use the
+// {ASSERT|EXPECT}_??() macros.
+//
+// Don't depend on the order in which the arguments are evaluated,
+// which is undefined.
+//
+// These macros evaluate their arguments exactly once.
+
+#define EXPECT_STREQ(expected, actual) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, expected, actual)
+#define EXPECT_STRNE(s1, s2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2)
+#define EXPECT_STRCASEEQ(expected, actual) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, expected, actual)
+#define EXPECT_STRCASENE(s1, s2)\
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2)
+
+#define ASSERT_STREQ(expected, actual) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, expected, actual)
+#define ASSERT_STRNE(s1, s2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2)
+#define ASSERT_STRCASEEQ(expected, actual) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, expected, actual)
+#define ASSERT_STRCASENE(s1, s2)\
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2)
+
+// Macros for comparing floating-point numbers.
+//
+// * {ASSERT|EXPECT}_FLOAT_EQ(expected, actual):
+// Tests that two float values are almost equal.
+// * {ASSERT|EXPECT}_DOUBLE_EQ(expected, actual):
+// Tests that two double values are almost equal.
+// * {ASSERT|EXPECT}_NEAR(v1, v2, abs_error):
+// Tests that v1 and v2 are within the given distance to each other.
+//
+// Google Test uses ULP-based comparison to automatically pick a default
+// error bound that is appropriate for the operands. See the
+// FloatingPoint template class in gtest-internal.h if you are
+// interested in the implementation details.
+
+#define EXPECT_FLOAT_EQ(expected, actual)\
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<float>, \
+ expected, actual)
+
+#define EXPECT_DOUBLE_EQ(expected, actual)\
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<double>, \
+ expected, actual)
+
+#define ASSERT_FLOAT_EQ(expected, actual)\
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<float>, \
+ expected, actual)
+
+#define ASSERT_DOUBLE_EQ(expected, actual)\
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<double>, \
+ expected, actual)
+
+#define EXPECT_NEAR(val1, val2, abs_error)\
+ EXPECT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \
+ val1, val2, abs_error)
+
+#define ASSERT_NEAR(val1, val2, abs_error)\
+ ASSERT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \
+ val1, val2, abs_error)
+
+// These predicate format functions work on floating-point values, and
+// can be used in {ASSERT|EXPECT}_PRED_FORMAT2*(), e.g.
+//
+// EXPECT_PRED_FORMAT2(testing::DoubleLE, Foo(), 5.0);
+
+// Asserts that val1 is less than, or almost equal to, val2. Fails
+// otherwise. In particular, it fails if either val1 or val2 is NaN.
+GTEST_API_ AssertionResult FloatLE(const char* expr1, const char* expr2,
+ float val1, float val2);
+GTEST_API_ AssertionResult DoubleLE(const char* expr1, const char* expr2,
+ double val1, double val2);
+
+
+#if GTEST_OS_WINDOWS
+
+// Macros that test for HRESULT failure and success, these are only useful
+// on Windows, and rely on Windows SDK macros and APIs to compile.
+//
+// * {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED}(expr)
+//
+// When expr unexpectedly fails or succeeds, Google Test prints the
+// expected result and the actual result with both a human-readable
+// string representation of the error, if available, as well as the
+// hex result code.
+#define EXPECT_HRESULT_SUCCEEDED(expr) \
+ EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr))
+
+#define ASSERT_HRESULT_SUCCEEDED(expr) \
+ ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr))
+
+#define EXPECT_HRESULT_FAILED(expr) \
+ EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr))
+
+#define ASSERT_HRESULT_FAILED(expr) \
+ ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr))
+
+#endif // GTEST_OS_WINDOWS
+
+// Macros that execute statement and check that it doesn't generate new fatal
+// failures in the current thread.
+//
+// * {ASSERT|EXPECT}_NO_FATAL_FAILURE(statement);
+//
+// Examples:
+//
+// EXPECT_NO_FATAL_FAILURE(Process());
+// ASSERT_NO_FATAL_FAILURE(Process()) << "Process() failed";
+//
+#define ASSERT_NO_FATAL_FAILURE(statement) \
+ GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_FATAL_FAILURE_)
+#define EXPECT_NO_FATAL_FAILURE(statement) \
+ GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_NONFATAL_FAILURE_)
+
+// Causes a trace (including the source file path, the current line
+// number, and the given message) to be included in every test failure
+// message generated by code in the current scope. The effect is
+// undone when the control leaves the current scope.
+//
+// The message argument can be anything streamable to std::ostream.
+//
+// In the implementation, we include the current line number as part
+// of the dummy variable name, thus allowing multiple SCOPED_TRACE()s
+// to appear in the same block - as long as they are on different
+// lines.
+#define SCOPED_TRACE(message) \
+ ::testing::internal::ScopedTrace GTEST_CONCAT_TOKEN_(gtest_trace_, __LINE__)(\
+ __FILE__, __LINE__, ::testing::Message() << (message))
+
+namespace internal {
+
+// This template is declared, but intentionally undefined.
+template <typename T1, typename T2>
+struct StaticAssertTypeEqHelper;
+
+template <typename T>
+struct StaticAssertTypeEqHelper<T, T> {};
+
+} // namespace internal
+
+// Compile-time assertion for type equality.
+// StaticAssertTypeEq<type1, type2>() compiles iff type1 and type2 are
+// the same type. The value it returns is not interesting.
+//
+// Instead of making StaticAssertTypeEq a class template, we make it a
+// function template that invokes a helper class template. This
+// prevents a user from misusing StaticAssertTypeEq<T1, T2> by
+// defining objects of that type.
+//
+// CAVEAT:
+//
+// When used inside a method of a class template,
+// StaticAssertTypeEq<T1, T2>() is effective ONLY IF the method is
+// instantiated. For example, given:
+//
+// template <typename T> class Foo {
+// public:
+// void Bar() { testing::StaticAssertTypeEq<int, T>(); }
+// };
+//
+// the code:
+//
+// void Test1() { Foo<bool> foo; }
+//
+// will NOT generate a compiler error, as Foo<bool>::Bar() is never
+// actually instantiated. Instead, you need:
+//
+// void Test2() { Foo<bool> foo; foo.Bar(); }
+//
+// to cause a compiler error.
+template <typename T1, typename T2>
+bool StaticAssertTypeEq() {
+ internal::StaticAssertTypeEqHelper<T1, T2>();
+ return true;
+}
+
+// Defines a test.
+//
+// The first parameter is the name of the test case, and the second
+// parameter is the name of the test within the test case.
+//
+// The convention is to end the test case name with "Test". For
+// example, a test case for the Foo class can be named FooTest.
+//
+// The user should put his test code between braces after using this
+// macro. Example:
+//
+// TEST(FooTest, InitializesCorrectly) {
+// Foo foo;
+// EXPECT_TRUE(foo.StatusIsOK());
+// }
+
+// Note that we call GetTestTypeId() instead of GetTypeId<
+// ::testing::Test>() here to get the type ID of testing::Test. This
+// is to work around a suspected linker bug when using Google Test as
+// a framework on Mac OS X. The bug causes GetTypeId<
+// ::testing::Test>() to return different values depending on whether
+// the call is from the Google Test framework itself or from user test
+// code. GetTestTypeId() is guaranteed to always return the same
+// value, as it always calls GetTypeId<>() from the Google Test
+// framework.
+#define GTEST_TEST(test_case_name, test_name)\
+ GTEST_TEST_(test_case_name, test_name, \
+ ::testing::Test, ::testing::internal::GetTestTypeId())
+
+// Define this macro to 1 to omit the definition of TEST(), which
+// is a generic name and clashes with some other libraries.
+#if !GTEST_DONT_DEFINE_TEST
+#define TEST(test_case_name, test_name) GTEST_TEST(test_case_name, test_name)
+#endif
+
+// Defines a test that uses a test fixture.
+//
+// The first parameter is the name of the test fixture class, which
+// also doubles as the test case name. The second parameter is the
+// name of the test within the test case.
+//
+// A test fixture class must be declared earlier. The user should put
+// his test code between braces after using this macro. Example:
+//
+// class FooTest : public testing::Test {
+// protected:
+// virtual void SetUp() { b_.AddElement(3); }
+//
+// Foo a_;
+// Foo b_;
+// };
+//
+// TEST_F(FooTest, InitializesCorrectly) {
+// EXPECT_TRUE(a_.StatusIsOK());
+// }
+//
+// TEST_F(FooTest, ReturnsElementCountCorrectly) {
+// EXPECT_EQ(0, a_.size());
+// EXPECT_EQ(1, b_.size());
+// }
+
+#define TEST_F(test_fixture, test_name)\
+ GTEST_TEST_(test_fixture, test_name, test_fixture, \
+ ::testing::internal::GetTypeId<test_fixture>())
+
+// Use this macro in main() to run all tests. It returns 0 if all
+// tests are successful, or 1 otherwise.
+//
+// RUN_ALL_TESTS() should be invoked after the command line has been
+// parsed by InitGoogleTest().
+
+#define RUN_ALL_TESTS()\
+ (::testing::UnitTest::GetInstance()->Run())
+
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_H_
diff --git a/src/common/gtest_main.cc b/src/common/gtest_main.cc
new file mode 100755
index 00000000..0b82dd42
--- /dev/null
+++ b/src/common/gtest_main.cc
@@ -0,0 +1,51 @@
+// Copyright 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*****
+ * NAME
+ *
+ *
+ * AUTHOR
+ * google
+ *
+ * COPYRIGHT
+ * Copyright (c) 2004-2011 by cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * DESCRIPTION
+ *
+ ****/
+
+#include <iostream>
+#include <common/gtest.h>
+
+int gtest_main(int argc, char **argv) {
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/src/common/os_types.h b/src/common/os_types.h
new file mode 100755
index 00000000..ce3a2a45
--- /dev/null
+++ b/src/common/os_types.h
@@ -0,0 +1,40 @@
+#ifndef MY_OS_TYPES_h
+#define MY_OS_TYPES_h
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+
+#include <sys/types.h>
+#include <unistd.h>
+
+#define TASKFUNC
+
+#ifndef STDIN_FILENO
+ #define STDIN_FILENO 0
+#endif
+
+#ifndef STDOUT_FILENO
+ #define STDOUT_FILENO 1
+#endif
+
+#ifndef STDERR_FILENO
+ #define STDERR_FILENO 2
+#endif
+
+
+#endif
diff --git a/src/common/pcap.cpp b/src/common/pcap.cpp
new file mode 100755
index 00000000..6dd54514
--- /dev/null
+++ b/src/common/pcap.cpp
@@ -0,0 +1,299 @@
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "pcap.h"
+#include <errno.h>
+#include <string.h>
+#include "pal_utl.h"
+
+
+
+
+static uint32_t MAGIC_NUM_FLIP = 0xd4c3b2a1;
+static uint32_t MAGIC_NUM_DONT_FLIP = 0xa1b2c3d4;
+
+
+LibPCapReader::LibPCapReader()
+{
+ m_is_open = false;
+ m_last_time = 0;
+ m_is_valid = false;
+ m_file_handler = NULL;
+ m_is_flip = false;
+}
+
+LibPCapReader::~LibPCapReader()
+{
+ if (m_is_open && m_file_handler) {
+ fclose(m_file_handler);
+ }
+}
+
+void LibPCapReader::Rewind() {
+ if (m_is_open && m_file_handler) {
+ rewind(m_file_handler);
+ this->init();
+ }
+}
+
+/**
+ * open file for read.
+ * @param name
+ *
+ * @return bool
+ */
+bool LibPCapReader::Create(char * name, int loops)
+{
+ this->m_loops = loops;
+
+ if(name == NULL) {
+ return false;
+ }
+
+ if (m_is_open) {
+ return true;
+ }
+ m_file_handler = CAP_FOPEN_64(name,"rb");
+
+ if (m_file_handler == 0) {
+ printf(" failed to open cap file %s : errno : %d\n",name, errno);
+ return false;
+ }
+
+ CAP_FSEEK_64 (m_file_handler, 0, SEEK_END);
+ m_file_size = CAP_FTELL_64 (m_file_handler);
+ rewind (m_file_handler);
+
+ if (init()) {
+ m_is_open = true;
+ return true;
+ }
+
+ fclose(m_file_handler);
+
+ return false;
+}
+
+/**
+ * init the reader.
+ * First read the header of the file make sure it is libpacp.
+ * If so read the flip value. records are senstive to the local
+ * recording machine endianity.
+ *
+ * @return bool
+ */
+bool LibPCapReader::init()
+{
+ packet_file_header_t header;
+ memset(&header,0,sizeof(packet_file_header_t));
+ size_t n = fread(&header,1,sizeof(packet_file_header_t),m_file_handler);
+ if (n < sizeof(packet_file_header_t)) {
+ return false;
+ }
+
+ if (header.magic == MAGIC_NUM_FLIP) {
+ m_is_flip = true;
+ } else if (header.magic == MAGIC_NUM_DONT_FLIP){
+ m_is_flip = false;
+ } else {
+ // capture file in not libpcap format.
+ m_is_valid = false;
+ return false;
+ }
+
+ m_is_valid = true;
+ return true;
+}
+
+/**
+ * flip header values.
+ * @param toflip
+ */
+void LibPCapReader::flip(sf_pkthdr_t * toflip)
+{
+ toflip->ts.sec = PAL_NTOHL(toflip->ts.sec);
+ toflip->ts.msec = PAL_NTOHL(toflip->ts.msec);
+ toflip->len = PAL_NTOHL(toflip->len);
+ toflip->caplen = PAL_NTOHL(toflip->caplen);
+}
+
+bool LibPCapReader::ReadPacket(CCapPktRaw *lpPacket)
+{
+ if(!m_is_valid || !m_is_open)
+ return false;
+
+ sf_pkthdr_t pkt_header;
+ memset(&pkt_header,0,sizeof(sf_pkthdr_t));
+
+ if (CAP_FTELL_64(m_file_handler) == m_file_size) {
+ /* reached end of file - do we loop ?*/
+ if (m_loops > 0) {
+ rewind(m_file_handler);
+ this->init();
+
+ }
+ }
+ int n = fread(&pkt_header,1,sizeof(sf_pkthdr_t),m_file_handler);
+ if (n < sizeof(sf_pkthdr_t)) {
+ return false;
+ }
+
+ if (m_is_flip) {
+ flip(&pkt_header);
+ }
+ if (pkt_header.len > READER_MAX_PACKET_SIZE) {
+ /* cannot read this packet */
+ assert(0);
+ return false;
+ }
+
+ lpPacket->pkt_len = fread(lpPacket->raw,1,pkt_header.len,m_file_handler);
+
+ lpPacket->time_sec = pkt_header.ts.sec;
+ lpPacket->time_nsec = pkt_header.ts.msec*1000;
+
+ if ( lpPacket->pkt_len < pkt_header.len) {
+ lpPacket->pkt_len = 0;
+ return false;
+ }
+
+ /* decrease packet limit count */
+ if (m_loops > 0) {
+ m_loops--;
+ }
+ lpPacket->pkt_cnt++;
+ return true;
+}
+
+LibPCapWriter::LibPCapWriter()
+{
+ m_file_handler = NULL;
+ m_timestamp = 0;
+ m_is_open = false;
+}
+
+LibPCapWriter::~LibPCapWriter()
+{
+ Close();
+}
+
+/**
+ * close and release file desc.
+*/
+void LibPCapWriter::Close()
+{
+ if (m_is_open) {
+ fclose(m_file_handler);
+ m_file_handler = NULL;
+ m_is_open = false;
+ }
+}
+
+/**
+ * Try to open file for writing.
+ * @param name - file nae
+ *
+ * @return bool
+ */
+bool LibPCapWriter::Create(char * name)
+{
+ if (name == NULL) {
+ return false;
+ }
+
+ if (m_is_open) {
+ return true;
+ }
+
+ m_file_handler = CAP_FOPEN_64(name,"wb");
+ if (m_file_handler == 0) {
+ printf(" ERROR create file \n");
+ return(false);
+ }
+ /* prepare the write counter */
+ m_pkt_count = 0;
+ return init();
+}
+
+/**
+ *
+ * Write the libpcap header.
+ *
+ * @return bool - true on success
+ */
+bool LibPCapWriter::init()
+{
+
+ // prepare the file header (one time header for each libpcap file)
+ // and write it.
+ packet_file_header_t header;
+ header.magic = MAGIC_NUM_DONT_FLIP;
+ header.version_major = 0x0002;
+ header.version_minor = 0x0004;
+ header.thiszone = 0;
+ header.sigfigs = 0;
+ header.snaplen = 2000;
+ header.linktype = 1;
+
+ int n = fwrite(&header,1,sizeof(header),m_file_handler);
+ if ( n == sizeof(packet_file_header_t )) {
+ m_is_open = true;
+ return true;
+ }
+ fclose(m_file_handler);
+ return false;
+}
+
+
+/**
+ * Write packet to file.
+ * Must be called after successfull Create call.
+ * @param p
+ * @param size
+ *
+ * @return bool - true on success.
+ */
+bool LibPCapWriter::write_packet(CCapPktRaw * lpPacket)
+{
+ if (!m_is_open) {
+ return false;
+ }
+
+ // build the packet libpcap header
+ sf_pkthdr_t pkt_header;
+ pkt_header.caplen = lpPacket->pkt_len;
+ pkt_header.len = lpPacket->pkt_len;
+ pkt_header.ts.msec = (lpPacket->time_nsec/1000);
+ pkt_header.ts.sec = lpPacket->time_sec;
+
+ m_timestamp++;
+
+ // write header and then the packet.
+ int n = fwrite(&pkt_header,1,sizeof(sf_pkthdr_t),m_file_handler);
+ n+= fwrite(lpPacket->raw,1,lpPacket->pkt_len,m_file_handler);
+
+ if (n< ( (int)sizeof(sf_pkthdr_t) + lpPacket->pkt_len)) {
+ return false;
+ }
+ /* advance the counter on success */
+ m_pkt_count++;
+ return true;
+}
+
+uint32_t LibPCapWriter::get_pkt_count() {
+ return m_pkt_count;
+}
+
diff --git a/src/common/pcap.h b/src/common/pcap.h
new file mode 100755
index 00000000..fb266e31
--- /dev/null
+++ b/src/common/pcap.h
@@ -0,0 +1,150 @@
+#ifndef __LIBPCAP_H__
+#define __LIBPCAP_H__
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+#include "captureFile.h"
+#include <stdio.h>
+
+typedef struct pcaptime {
+ uint32_t sec;
+ uint32_t msec;
+} pcaptime_t;
+
+typedef struct packet_file_header
+{
+ uint32_t magic;
+ uint16_t version_major;
+ uint16_t version_minor;
+ uint32_t thiszone;
+ uint32_t sigfigs;
+ uint32_t snaplen;
+ uint32_t linktype;
+}packet_file_header_t ;
+
+typedef struct sf_pkthdr {
+ pcaptime_t ts;
+ uint32_t caplen;
+ uint32_t len;
+} sf_pkthdr_t;
+
+/**
+ * Implements the CCAPReaderBase interface.
+ *
+ */
+class LibPCapReader : public CCapReaderBase
+{
+public:
+ LibPCapReader();
+
+ virtual ~LibPCapReader();
+
+ /**
+ * open file for reading.
+ * (can be called once).
+ * @param name
+ *
+ * @return bool
+ */
+ bool Create(char * name, int loops = 0);
+
+ /**
+ * When called after open will return true only if
+ * capture file is libpcap format.
+ *
+ * @return bool
+ */
+ bool isValid() { return m_is_valid; }
+
+ /**
+ * Fill the structure with the new packet.
+ * @param lpPacket
+ *
+ * @return bool - return true if packet were read and false
+ * otherwise (reached eof)
+ */
+ virtual bool ReadPacket(CCapPktRaw *lpPacket);
+ virtual void Rewind();
+
+
+private:
+ LibPCapReader(LibPCapReader &);
+
+
+ bool init();
+ void flip(sf_pkthdr_t * tofilp);
+ bool m_is_open;
+ uint64_t m_last_time;
+ bool m_is_valid;
+ FILE * m_file_handler;
+ bool m_is_flip;
+
+};
+
+/**
+ * Libpcap file format writer.
+ * Implements CFileWrirerBase interface
+ */
+class LibPCapWriter: public CFileWriterBase
+{
+
+public:
+
+ LibPCapWriter();
+ virtual ~LibPCapWriter();
+
+ /**
+ * Open file for writing. Rewrite from scratch (no append).
+ * @param name - the file name
+ *
+ * @return bool - return true if File was open successfully.
+ */
+ bool Create(char * name);
+
+ /**
+ * Write packet to file (must be called only after successfull
+ * Create).
+ *
+ * @param p - buffer pointer
+ * @param size - buffer length
+ *
+ * @return true on success.
+ */
+ virtual bool write_packet(CCapPktRaw * lpPacket);
+ /**
+ *
+ * returns the count of packets so far written
+ *
+ * @return uint32_t
+ */
+ uint32_t get_pkt_count();
+
+ /**
+ * Close file and flush all.
+ */
+ void Close();
+
+private:
+
+ bool init();
+ FILE * m_file_handler;
+ uint64_t m_timestamp;
+ bool m_is_open;
+ uint32_t m_pkt_count;
+};
+#endif
diff --git a/src/dpdk_lib18/librte_acl/Makefile b/src/dpdk_lib18/librte_acl/Makefile
new file mode 100755
index 00000000..65e566df
--- /dev/null
+++ b/src/dpdk_lib18/librte_acl/Makefile
@@ -0,0 +1,63 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_acl.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_ACL) += tb_mem.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_ACL) += rte_acl.c
+SRCS-$(CONFIG_RTE_LIBRTE_ACL) += acl_bld.c
+SRCS-$(CONFIG_RTE_LIBRTE_ACL) += acl_gen.c
+SRCS-$(CONFIG_RTE_LIBRTE_ACL) += acl_run_scalar.c
+SRCS-$(CONFIG_RTE_LIBRTE_ACL) += acl_run_sse.c
+
+CFLAGS_acl_run_sse.o += -msse4.1
+
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_ACL)-include := rte_acl_osdep.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_ACL)-include += rte_acl.h
+
+ifeq ($(CONFIG_RTE_LIBRTE_ACL_STANDALONE),y)
+# standalone build
+SYMLINK-$(CONFIG_RTE_LIBRTE_ACL)-include += rte_acl_osdep_alone.h
+else
+# this lib needs eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_ACL) += lib/librte_eal lib/librte_malloc
+endif
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_acl/acl.h b/src/dpdk_lib18/librte_acl/acl.h
new file mode 100755
index 00000000..102fa51a
--- /dev/null
+++ b/src/dpdk_lib18/librte_acl/acl.h
@@ -0,0 +1,196 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ACL_H_
+#define _ACL_H_
+
+#ifdef __cplusplus
+extern"C" {
+#endif /* __cplusplus */
+
+#define RTE_ACL_QUAD_MAX 5
+#define RTE_ACL_QUAD_SIZE 4
+#define RTE_ACL_QUAD_SINGLE UINT64_C(0x7f7f7f7f00000000)
+
+#define RTE_ACL_SINGLE_TRIE_SIZE 2000
+
+#define RTE_ACL_DFA_MAX UINT8_MAX
+#define RTE_ACL_DFA_SIZE (UINT8_MAX + 1)
+
+typedef int bits_t;
+
+#define RTE_ACL_BIT_SET_SIZE ((UINT8_MAX + 1) / (sizeof(bits_t) * CHAR_BIT))
+
+struct rte_acl_bitset {
+ bits_t bits[RTE_ACL_BIT_SET_SIZE];
+};
+
+#define RTE_ACL_NODE_DFA (0 << RTE_ACL_TYPE_SHIFT)
+#define RTE_ACL_NODE_SINGLE (1U << RTE_ACL_TYPE_SHIFT)
+#define RTE_ACL_NODE_QEXACT (2U << RTE_ACL_TYPE_SHIFT)
+#define RTE_ACL_NODE_QRANGE (3U << RTE_ACL_TYPE_SHIFT)
+#define RTE_ACL_NODE_MATCH (4U << RTE_ACL_TYPE_SHIFT)
+#define RTE_ACL_NODE_TYPE (7U << RTE_ACL_TYPE_SHIFT)
+#define RTE_ACL_NODE_UNDEFINED UINT32_MAX
+
+/*
+ * Structure of a node is a set of ptrs and each ptr has a bit map
+ * of values associated with this transition.
+ */
+struct rte_acl_ptr_set {
+ struct rte_acl_bitset values; /* input values associated with ptr */
+ struct rte_acl_node *ptr; /* transition to next node */
+};
+
+struct rte_acl_classifier_results {
+ int results[RTE_ACL_MAX_CATEGORIES];
+};
+
+struct rte_acl_match_results {
+ uint32_t results[RTE_ACL_MAX_CATEGORIES];
+ int32_t priority[RTE_ACL_MAX_CATEGORIES];
+};
+
+struct rte_acl_node {
+ uint64_t node_index; /* index for this node */
+ uint32_t level; /* level 0-n in the trie */
+ uint32_t ref_count; /* ref count for this node */
+ struct rte_acl_bitset values;
+ /* set of all values that map to another node
+ * (union of bits in each transition.
+ */
+ uint32_t num_ptrs; /* number of ptr_set in use */
+ uint32_t max_ptrs; /* number of allocated ptr_set */
+ uint32_t min_add; /* number of ptr_set per allocation */
+ struct rte_acl_ptr_set *ptrs; /* transitions array for this node */
+ int32_t match_flag;
+ int32_t match_index; /* index to match data */
+ uint32_t node_type;
+ int32_t fanout;
+ /* number of ranges (transitions w/ consecutive bits) */
+ int32_t id;
+ struct rte_acl_match_results *mrt; /* only valid when match_flag != 0 */
+ char transitions[RTE_ACL_QUAD_SIZE];
+ /* boundaries for ranged node */
+ struct rte_acl_node *next;
+ /* free list link or pointer to duplicate node during merge */
+ struct rte_acl_node *prev;
+ /* points to node from which this node was duplicated */
+
+ uint32_t subtree_id;
+ uint32_t subtree_ref_count;
+
+};
+enum {
+ RTE_ACL_SUBTREE_NODE = 0x80000000
+};
+
+/*
+ * Types of tries used to generate runtime structure(s)
+ */
+enum {
+ RTE_ACL_FULL_TRIE = 0,
+ RTE_ACL_NOSRC_TRIE = 1,
+ RTE_ACL_NODST_TRIE = 2,
+ RTE_ACL_NOPORTS_TRIE = 4,
+ RTE_ACL_NOVLAN_TRIE = 8,
+ RTE_ACL_UNUSED_TRIE = 0x80000000
+};
+
+
+/** MAX number of tries per one ACL context.*/
+#define RTE_ACL_MAX_TRIES 8
+
+/** Max number of characters in PM name.*/
+#define RTE_ACL_NAMESIZE 32
+
+
+struct rte_acl_trie {
+ uint32_t type;
+ uint32_t count;
+ int32_t smallest; /* smallest rule in this trie */
+ uint32_t root_index;
+ const uint32_t *data_index;
+ uint32_t num_data_indexes;
+};
+
+struct rte_acl_bld_trie {
+ struct rte_acl_node *trie;
+};
+
+struct rte_acl_ctx {
+ char name[RTE_ACL_NAMESIZE];
+ /** Name of the ACL context. */
+ int32_t socket_id;
+ /** Socket ID to allocate memory from. */
+ enum rte_acl_classify_alg alg;
+ void *rules;
+ uint32_t max_rules;
+ uint32_t rule_sz;
+ uint32_t num_rules;
+ uint32_t num_categories;
+ uint32_t num_tries;
+ uint32_t match_index;
+ uint64_t no_match;
+ uint64_t idle;
+ uint64_t *trans_table;
+ uint32_t *data_indexes;
+ struct rte_acl_trie trie[RTE_ACL_MAX_TRIES];
+ void *mem;
+ size_t mem_sz;
+ struct rte_acl_config config; /* copy of build config. */
+};
+
+int rte_acl_gen(struct rte_acl_ctx *ctx, struct rte_acl_trie *trie,
+ struct rte_acl_bld_trie *node_bld_trie, uint32_t num_tries,
+ uint32_t num_categories, uint32_t data_index_sz, int match_num);
+
+typedef int (*rte_acl_classify_t)
+(const struct rte_acl_ctx *, const uint8_t **, uint32_t *, uint32_t, uint32_t);
+
+/*
+ * Different implementations of ACL classify.
+ */
+int
+rte_acl_classify_scalar(const struct rte_acl_ctx *ctx, const uint8_t **data,
+ uint32_t *results, uint32_t num, uint32_t categories);
+
+int
+rte_acl_classify_sse(const struct rte_acl_ctx *ctx, const uint8_t **data,
+ uint32_t *results, uint32_t num, uint32_t categories);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _ACL_H_ */
diff --git a/src/dpdk_lib18/librte_acl/acl_bld.c b/src/dpdk_lib18/librte_acl/acl_bld.c
new file mode 100755
index 00000000..d6e0c451
--- /dev/null
+++ b/src/dpdk_lib18/librte_acl/acl_bld.c
@@ -0,0 +1,2008 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_acl.h>
+#include "tb_mem.h"
+#include "acl.h"
+
+#define ACL_POOL_ALIGN 8
+#define ACL_POOL_ALLOC_MIN 0x800000
+
+/* number of pointers per alloc */
+#define ACL_PTR_ALLOC 32
+
+/* variable for dividing rule sets */
+#define NODE_MAX 2500
+#define NODE_PERCENTAGE (0.40)
+#define RULE_PERCENTAGE (0.40)
+
+/* TALLY are statistics per field */
+enum {
+ TALLY_0 = 0, /* number of rules that are 0% or more wild. */
+ TALLY_25, /* number of rules that are 25% or more wild. */
+ TALLY_50,
+ TALLY_75,
+ TALLY_100,
+ TALLY_DEACTIVATED, /* deactivated fields (100% wild in all rules). */
+ TALLY_DEPTH,
+ /* number of rules that are 100% wild for this field and higher. */
+ TALLY_NUM
+};
+
+static const uint32_t wild_limits[TALLY_DEACTIVATED] = {0, 25, 50, 75, 100};
+
+enum {
+ ACL_INTERSECT_NONE = 0,
+ ACL_INTERSECT_A = 1, /* set A is a superset of A and B intersect */
+ ACL_INTERSECT_B = 2, /* set B is a superset of A and B intersect */
+ ACL_INTERSECT = 4, /* sets A and B intersect */
+};
+
+enum {
+ ACL_PRIORITY_EQUAL = 0,
+ ACL_PRIORITY_NODE_A = 1,
+ ACL_PRIORITY_NODE_B = 2,
+ ACL_PRIORITY_MIXED = 3
+};
+
+
+struct acl_mem_block {
+ uint32_t block_size;
+ void *mem_ptr;
+};
+
+#define MEM_BLOCK_NUM 16
+
+/* Single ACL rule, build representation.*/
+struct rte_acl_build_rule {
+ struct rte_acl_build_rule *next;
+ struct rte_acl_config *config;
+ /**< configuration for each field in the rule. */
+ const struct rte_acl_rule *f;
+ uint32_t *wildness;
+};
+
+/* Context for build phase */
+struct acl_build_context {
+ const struct rte_acl_ctx *acx;
+ struct rte_acl_build_rule *build_rules;
+ struct rte_acl_config cfg;
+ uint32_t node;
+ uint32_t num_nodes;
+ uint32_t category_mask;
+ uint32_t num_rules;
+ uint32_t node_id;
+ uint32_t src_mask;
+ uint32_t num_build_rules;
+ uint32_t num_tries;
+ struct tb_mem_pool pool;
+ struct rte_acl_trie tries[RTE_ACL_MAX_TRIES];
+ struct rte_acl_bld_trie bld_tries[RTE_ACL_MAX_TRIES];
+ uint32_t data_indexes[RTE_ACL_MAX_TRIES][RTE_ACL_MAX_FIELDS];
+
+ /* memory free lists for nodes and blocks used for node ptrs */
+ struct acl_mem_block blocks[MEM_BLOCK_NUM];
+ struct rte_acl_node *node_free_list;
+};
+
+static int acl_merge_trie(struct acl_build_context *context,
+ struct rte_acl_node *node_a, struct rte_acl_node *node_b,
+ uint32_t level, uint32_t subtree_id, struct rte_acl_node **node_c);
+
+static int acl_merge(struct acl_build_context *context,
+ struct rte_acl_node *node_a, struct rte_acl_node *node_b,
+ int move, int a_subset, int level);
+
+static void
+acl_deref_ptr(struct acl_build_context *context,
+ struct rte_acl_node *node, int index);
+
+static void *
+acl_build_alloc(struct acl_build_context *context, size_t n, size_t s)
+{
+ uint32_t m;
+ void *p;
+ size_t alloc_size = n * s;
+
+ /*
+ * look for memory in free lists
+ */
+ for (m = 0; m < RTE_DIM(context->blocks); m++) {
+ if (context->blocks[m].block_size ==
+ alloc_size && context->blocks[m].mem_ptr != NULL) {
+ p = context->blocks[m].mem_ptr;
+ context->blocks[m].mem_ptr = *((void **)p);
+ memset(p, 0, alloc_size);
+ return p;
+ }
+ }
+
+ /*
+ * return allocation from memory pool
+ */
+ p = tb_alloc(&context->pool, alloc_size);
+ return p;
+}
+
+/*
+ * Free memory blocks (kept in context for reuse).
+ */
+static void
+acl_build_free(struct acl_build_context *context, size_t s, void *p)
+{
+ uint32_t n;
+
+ for (n = 0; n < RTE_DIM(context->blocks); n++) {
+ if (context->blocks[n].block_size == s) {
+ *((void **)p) = context->blocks[n].mem_ptr;
+ context->blocks[n].mem_ptr = p;
+ return;
+ }
+ }
+ for (n = 0; n < RTE_DIM(context->blocks); n++) {
+ if (context->blocks[n].block_size == 0) {
+ context->blocks[n].block_size = s;
+ *((void **)p) = NULL;
+ context->blocks[n].mem_ptr = p;
+ return;
+ }
+ }
+}
+
+/*
+ * Allocate and initialize a new node.
+ */
+static struct rte_acl_node *
+acl_alloc_node(struct acl_build_context *context, int level)
+{
+ struct rte_acl_node *node;
+
+ if (context->node_free_list != NULL) {
+ node = context->node_free_list;
+ context->node_free_list = node->next;
+ memset(node, 0, sizeof(struct rte_acl_node));
+ } else {
+ node = acl_build_alloc(context, sizeof(struct rte_acl_node), 1);
+ }
+
+ if (node != NULL) {
+ node->num_ptrs = 0;
+ node->level = level;
+ node->node_type = RTE_ACL_NODE_UNDEFINED;
+ node->node_index = RTE_ACL_NODE_UNDEFINED;
+ context->num_nodes++;
+ node->id = context->node_id++;
+ }
+ return node;
+}
+
+/*
+ * Dereference all nodes to which this node points
+ */
+static void
+acl_free_node(struct acl_build_context *context,
+ struct rte_acl_node *node)
+{
+ uint32_t n;
+
+ if (node->prev != NULL)
+ node->prev->next = NULL;
+ for (n = 0; n < node->num_ptrs; n++)
+ acl_deref_ptr(context, node, n);
+
+ /* free mrt if this is a match node */
+ if (node->mrt != NULL) {
+ acl_build_free(context, sizeof(struct rte_acl_match_results),
+ node->mrt);
+ node->mrt = NULL;
+ }
+
+ /* free transitions to other nodes */
+ if (node->ptrs != NULL) {
+ acl_build_free(context,
+ node->max_ptrs * sizeof(struct rte_acl_ptr_set),
+ node->ptrs);
+ node->ptrs = NULL;
+ }
+
+ /* put it on the free list */
+ context->num_nodes--;
+ node->next = context->node_free_list;
+ context->node_free_list = node;
+}
+
+
+/*
+ * Include src bitset in dst bitset
+ */
+static void
+acl_include(struct rte_acl_bitset *dst, struct rte_acl_bitset *src, bits_t mask)
+{
+ uint32_t n;
+
+ for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++)
+ dst->bits[n] = (dst->bits[n] & mask) | src->bits[n];
+}
+
+/*
+ * Set dst to bits of src1 that are not in src2
+ */
+static int
+acl_exclude(struct rte_acl_bitset *dst,
+ struct rte_acl_bitset *src1,
+ struct rte_acl_bitset *src2)
+{
+ uint32_t n;
+ bits_t all_bits = 0;
+
+ for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++) {
+ dst->bits[n] = src1->bits[n] & ~src2->bits[n];
+ all_bits |= dst->bits[n];
+ }
+ return all_bits != 0;
+}
+
+/*
+ * Add a pointer (ptr) to a node.
+ */
+static int
+acl_add_ptr(struct acl_build_context *context,
+ struct rte_acl_node *node,
+ struct rte_acl_node *ptr,
+ struct rte_acl_bitset *bits)
+{
+ uint32_t n, num_ptrs;
+ struct rte_acl_ptr_set *ptrs = NULL;
+
+ /*
+ * If there's already a pointer to the same node, just add to the bitset
+ */
+ for (n = 0; n < node->num_ptrs; n++) {
+ if (node->ptrs[n].ptr != NULL) {
+ if (node->ptrs[n].ptr == ptr) {
+ acl_include(&node->ptrs[n].values, bits, -1);
+ acl_include(&node->values, bits, -1);
+ return 0;
+ }
+ }
+ }
+
+ /* if there's no room for another pointer, make room */
+ if (node->num_ptrs >= node->max_ptrs) {
+ /* add room for more pointers */
+ num_ptrs = node->max_ptrs + ACL_PTR_ALLOC;
+ ptrs = acl_build_alloc(context, num_ptrs, sizeof(*ptrs));
+ if (ptrs == NULL)
+ return -ENOMEM;
+
+ /* copy current points to new memory allocation */
+ if (node->ptrs != NULL) {
+ memcpy(ptrs, node->ptrs,
+ node->num_ptrs * sizeof(*ptrs));
+ acl_build_free(context, node->max_ptrs * sizeof(*ptrs),
+ node->ptrs);
+ }
+ node->ptrs = ptrs;
+ node->max_ptrs = num_ptrs;
+ }
+
+ /* Find available ptr and add a new pointer to this node */
+ for (n = node->min_add; n < node->max_ptrs; n++) {
+ if (node->ptrs[n].ptr == NULL) {
+ node->ptrs[n].ptr = ptr;
+ acl_include(&node->ptrs[n].values, bits, 0);
+ acl_include(&node->values, bits, -1);
+ if (ptr != NULL)
+ ptr->ref_count++;
+ if (node->num_ptrs <= n)
+ node->num_ptrs = n + 1;
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Add a pointer for a range of values
+ */
+static int
+acl_add_ptr_range(struct acl_build_context *context,
+ struct rte_acl_node *root,
+ struct rte_acl_node *node,
+ uint8_t low,
+ uint8_t high)
+{
+ uint32_t n;
+ struct rte_acl_bitset bitset;
+
+ /* clear the bitset values */
+ for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++)
+ bitset.bits[n] = 0;
+
+ /* for each bit in range, add bit to set */
+ for (n = 0; n < UINT8_MAX + 1; n++)
+ if (n >= low && n <= high)
+ bitset.bits[n / (sizeof(bits_t) * 8)] |=
+ 1 << (n % (sizeof(bits_t) * 8));
+
+ return acl_add_ptr(context, root, node, &bitset);
+}
+
+/*
+ * Generate a bitset from a byte value and mask.
+ */
+static int
+acl_gen_mask(struct rte_acl_bitset *bitset, uint32_t value, uint32_t mask)
+{
+ int range = 0;
+ uint32_t n;
+
+ /* clear the bitset values */
+ for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++)
+ bitset->bits[n] = 0;
+
+ /* for each bit in value/mask, add bit to set */
+ for (n = 0; n < UINT8_MAX + 1; n++) {
+ if ((n & mask) == value) {
+ range++;
+ bitset->bits[n / (sizeof(bits_t) * 8)] |=
+ 1 << (n % (sizeof(bits_t) * 8));
+ }
+ }
+ return range;
+}
+
+/*
+ * Determine how A and B intersect.
+ * Determine if A and/or B are supersets of the intersection.
+ */
+static int
+acl_intersect_type(struct rte_acl_bitset *a_bits,
+ struct rte_acl_bitset *b_bits,
+ struct rte_acl_bitset *intersect)
+{
+ uint32_t n;
+ bits_t intersect_bits = 0;
+ bits_t a_superset = 0;
+ bits_t b_superset = 0;
+
+ /*
+ * calculate and store intersection and check if A and/or B have
+ * bits outside the intersection (superset)
+ */
+ for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++) {
+ intersect->bits[n] = a_bits->bits[n] & b_bits->bits[n];
+ a_superset |= a_bits->bits[n] ^ intersect->bits[n];
+ b_superset |= b_bits->bits[n] ^ intersect->bits[n];
+ intersect_bits |= intersect->bits[n];
+ }
+
+ n = (intersect_bits == 0 ? ACL_INTERSECT_NONE : ACL_INTERSECT) |
+ (b_superset == 0 ? 0 : ACL_INTERSECT_B) |
+ (a_superset == 0 ? 0 : ACL_INTERSECT_A);
+
+ return n;
+}
+
+/*
+ * Check if all bits in the bitset are on
+ */
+static int
+acl_full(struct rte_acl_node *node)
+{
+ uint32_t n;
+ bits_t all_bits = -1;
+
+ for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++)
+ all_bits &= node->values.bits[n];
+ return all_bits == -1;
+}
+
+/*
+ * Check if all bits in the bitset are off
+ */
+static int
+acl_empty(struct rte_acl_node *node)
+{
+ uint32_t n;
+
+ if (node->ref_count == 0) {
+ for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++) {
+ if (0 != node->values.bits[n])
+ return 0;
+ }
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+/*
+ * Compute intersection of A and B
+ * return 1 if there is an intersection else 0.
+ */
+static int
+acl_intersect(struct rte_acl_bitset *a_bits,
+ struct rte_acl_bitset *b_bits,
+ struct rte_acl_bitset *intersect)
+{
+ uint32_t n;
+ bits_t all_bits = 0;
+
+ for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++) {
+ intersect->bits[n] = a_bits->bits[n] & b_bits->bits[n];
+ all_bits |= intersect->bits[n];
+ }
+ return all_bits != 0;
+}
+
+/*
+ * Duplicate a node
+ */
+static struct rte_acl_node *
+acl_dup_node(struct acl_build_context *context, struct rte_acl_node *node)
+{
+ uint32_t n;
+ struct rte_acl_node *next;
+
+ next = acl_alloc_node(context, node->level);
+ if (next == NULL)
+ return NULL;
+
+ /* allocate the pointers */
+ if (node->num_ptrs > 0) {
+ next->ptrs = acl_build_alloc(context,
+ node->max_ptrs,
+ sizeof(struct rte_acl_ptr_set));
+ if (next->ptrs == NULL)
+ return NULL;
+ next->max_ptrs = node->max_ptrs;
+ }
+
+ /* copy over the pointers */
+ for (n = 0; n < node->num_ptrs; n++) {
+ if (node->ptrs[n].ptr != NULL) {
+ next->ptrs[n].ptr = node->ptrs[n].ptr;
+ next->ptrs[n].ptr->ref_count++;
+ acl_include(&next->ptrs[n].values,
+ &node->ptrs[n].values, -1);
+ }
+ }
+
+ next->num_ptrs = node->num_ptrs;
+
+ /* copy over node's match results */
+ if (node->match_flag == 0)
+ next->match_flag = 0;
+ else {
+ next->match_flag = -1;
+ next->mrt = acl_build_alloc(context, 1, sizeof(*next->mrt));
+ memcpy(next->mrt, node->mrt, sizeof(*next->mrt));
+ }
+
+ /* copy over node's bitset */
+ acl_include(&next->values, &node->values, -1);
+
+ node->next = next;
+ next->prev = node;
+
+ return next;
+}
+
+/*
+ * Dereference a pointer from a node
+ */
+static void
+acl_deref_ptr(struct acl_build_context *context,
+ struct rte_acl_node *node, int index)
+{
+ struct rte_acl_node *ref_node;
+
+ /* De-reference the node at the specified pointer */
+ if (node != NULL && node->ptrs[index].ptr != NULL) {
+ ref_node = node->ptrs[index].ptr;
+ ref_node->ref_count--;
+ if (ref_node->ref_count == 0)
+ acl_free_node(context, ref_node);
+ }
+}
+
+/*
+ * Exclude bitset from a node pointer
+ * returns 0 if poiter was deref'd
+ * 1 otherwise.
+ */
+static int
+acl_exclude_ptr(struct acl_build_context *context,
+ struct rte_acl_node *node,
+ int index,
+ struct rte_acl_bitset *b_bits)
+{
+ int retval = 1;
+
+ /*
+ * remove bitset from node pointer and deref
+ * if the bitset becomes empty.
+ */
+ if (!acl_exclude(&node->ptrs[index].values,
+ &node->ptrs[index].values,
+ b_bits)) {
+ acl_deref_ptr(context, node, index);
+ node->ptrs[index].ptr = NULL;
+ retval = 0;
+ }
+
+ /* exclude bits from the composite bits for the node */
+ acl_exclude(&node->values, &node->values, b_bits);
+ return retval;
+}
+
+/*
+ * Remove a bitset from src ptr and move remaining ptr to dst
+ */
+static int
+acl_move_ptr(struct acl_build_context *context,
+ struct rte_acl_node *dst,
+ struct rte_acl_node *src,
+ int index,
+ struct rte_acl_bitset *b_bits)
+{
+ int rc;
+
+ if (b_bits != NULL)
+ if (!acl_exclude_ptr(context, src, index, b_bits))
+ return 0;
+
+ /* add src pointer to dst node */
+ rc = acl_add_ptr(context, dst, src->ptrs[index].ptr,
+ &src->ptrs[index].values);
+ if (rc < 0)
+ return rc;
+
+ /* remove ptr from src */
+ acl_exclude_ptr(context, src, index, &src->ptrs[index].values);
+ return 1;
+}
+
+/*
+ * acl_exclude rte_acl_bitset from src and copy remaining pointer to dst
+ */
+static int
+acl_copy_ptr(struct acl_build_context *context,
+ struct rte_acl_node *dst,
+ struct rte_acl_node *src,
+ int index,
+ struct rte_acl_bitset *b_bits)
+{
+ int rc;
+ struct rte_acl_bitset bits;
+
+ if (b_bits != NULL)
+ if (!acl_exclude(&bits, &src->ptrs[index].values, b_bits))
+ return 0;
+
+ rc = acl_add_ptr(context, dst, src->ptrs[index].ptr, &bits);
+ if (rc < 0)
+ return rc;
+ return 1;
+}
+
+/*
+ * Fill in gaps in ptrs list with the ptr at the end of the list
+ */
+static void
+acl_compact_node_ptrs(struct rte_acl_node *node_a)
+{
+ uint32_t n;
+ int min_add = node_a->min_add;
+
+ while (node_a->num_ptrs > 0 &&
+ node_a->ptrs[node_a->num_ptrs - 1].ptr == NULL)
+ node_a->num_ptrs--;
+
+ for (n = min_add; n + 1 < node_a->num_ptrs; n++) {
+
+ /* if this entry is empty */
+ if (node_a->ptrs[n].ptr == NULL) {
+
+ /* move the last pointer to this entry */
+ acl_include(&node_a->ptrs[n].values,
+ &node_a->ptrs[node_a->num_ptrs - 1].values,
+ 0);
+ node_a->ptrs[n].ptr =
+ node_a->ptrs[node_a->num_ptrs - 1].ptr;
+
+ /*
+ * mark the end as empty and adjust the number
+ * of used pointer enum_tries
+ */
+ node_a->ptrs[node_a->num_ptrs - 1].ptr = NULL;
+ while (node_a->num_ptrs > 0 &&
+ node_a->ptrs[node_a->num_ptrs - 1].ptr == NULL)
+ node_a->num_ptrs--;
+ }
+ }
+}
+
+/*
+ * acl_merge helper routine.
+ */
+static int
+acl_merge_intersect(struct acl_build_context *context,
+ struct rte_acl_node *node_a, uint32_t idx_a,
+ struct rte_acl_node *node_b, uint32_t idx_b,
+ int next_move, int level,
+ struct rte_acl_bitset *intersect_ptr)
+{
+ struct rte_acl_node *node_c;
+
+ /* Duplicate A for intersection */
+ node_c = acl_dup_node(context, node_a->ptrs[idx_a].ptr);
+ if (node_c == NULL)
+ return -1;
+
+ /* Remove intersection from A */
+ acl_exclude_ptr(context, node_a, idx_a, intersect_ptr);
+
+ /*
+ * Added link from A to C for all transitions
+ * in the intersection
+ */
+ if (acl_add_ptr(context, node_a, node_c, intersect_ptr) < 0)
+ return -1;
+
+ /* merge B->node into C */
+ return acl_merge(context, node_c, node_b->ptrs[idx_b].ptr, next_move,
+ 0, level + 1);
+}
+
+
+/*
+ * Merge the children of nodes A and B together.
+ *
+ * if match node
+ * For each category
+ * node A result = highest priority result
+ * if any pointers in A intersect with any in B
+ * For each intersection
+ * C = copy of node that A points to
+ * remove intersection from A pointer
+ * add a pointer to A that points to C for the intersection
+ * Merge C and node that B points to
+ * Compact the pointers in A and B
+ * if move flag
+ * If B has only one reference
+ * Move B pointers to A
+ * else
+ * Copy B pointers to A
+ */
+static int
+acl_merge(struct acl_build_context *context,
+ struct rte_acl_node *node_a, struct rte_acl_node *node_b,
+ int move, int a_subset, int level)
+{
+ uint32_t n, m, ptrs_a, ptrs_b;
+ uint32_t min_add_a, min_add_b;
+ int intersect_type;
+ int node_intersect_type;
+ int b_full, next_move, rc;
+ struct rte_acl_bitset intersect_values;
+ struct rte_acl_bitset intersect_ptr;
+
+ min_add_a = 0;
+ min_add_b = 0;
+ intersect_type = 0;
+ node_intersect_type = 0;
+
+ if (level == 0)
+ a_subset = 1;
+
+ /*
+ * Resolve match priorities
+ */
+ if (node_a->match_flag != 0 || node_b->match_flag != 0) {
+
+ if (node_a->match_flag == 0 || node_b->match_flag == 0)
+ RTE_LOG(ERR, ACL, "Not both matches\n");
+
+ if (node_b->match_flag < node_a->match_flag)
+ RTE_LOG(ERR, ACL, "Not same match\n");
+
+ for (n = 0; n < context->cfg.num_categories; n++) {
+ if (node_a->mrt->priority[n] <
+ node_b->mrt->priority[n]) {
+ node_a->mrt->priority[n] =
+ node_b->mrt->priority[n];
+ node_a->mrt->results[n] =
+ node_b->mrt->results[n];
+ }
+ }
+ }
+
+ /*
+ * If the two node transitions intersect then merge the transitions.
+ * Check intersection for entire node (all pointers)
+ */
+ node_intersect_type = acl_intersect_type(&node_a->values,
+ &node_b->values,
+ &intersect_values);
+
+ if (node_intersect_type & ACL_INTERSECT) {
+
+ b_full = acl_full(node_b);
+
+ min_add_b = node_b->min_add;
+ node_b->min_add = node_b->num_ptrs;
+ ptrs_b = node_b->num_ptrs;
+
+ min_add_a = node_a->min_add;
+ node_a->min_add = node_a->num_ptrs;
+ ptrs_a = node_a->num_ptrs;
+
+ for (n = 0; n < ptrs_a; n++) {
+ for (m = 0; m < ptrs_b; m++) {
+
+ if (node_a->ptrs[n].ptr == NULL ||
+ node_b->ptrs[m].ptr == NULL ||
+ node_a->ptrs[n].ptr ==
+ node_b->ptrs[m].ptr)
+ continue;
+
+ intersect_type = acl_intersect_type(
+ &node_a->ptrs[n].values,
+ &node_b->ptrs[m].values,
+ &intersect_ptr);
+
+ /* If this node is not a 'match' node */
+ if ((intersect_type & ACL_INTERSECT) &&
+ (context->cfg.num_categories != 1 ||
+ !(node_a->ptrs[n].ptr->match_flag))) {
+
+ /*
+ * next merge is a 'move' pointer,
+ * if this one is and B is a
+ * subset of the intersection.
+ */
+ next_move = move &&
+ (intersect_type &
+ ACL_INTERSECT_B) == 0;
+
+ if (a_subset && b_full) {
+ rc = acl_merge(context,
+ node_a->ptrs[n].ptr,
+ node_b->ptrs[m].ptr,
+ next_move,
+ 1, level + 1);
+ if (rc != 0)
+ return rc;
+ } else {
+ rc = acl_merge_intersect(
+ context, node_a, n,
+ node_b, m, next_move,
+ level, &intersect_ptr);
+ if (rc != 0)
+ return rc;
+ }
+ }
+ }
+ }
+ }
+
+ /* Compact pointers */
+ node_a->min_add = min_add_a;
+ acl_compact_node_ptrs(node_a);
+ node_b->min_add = min_add_b;
+ acl_compact_node_ptrs(node_b);
+
+ /*
+ * Either COPY or MOVE pointers from B to A
+ */
+ acl_intersect(&node_a->values, &node_b->values, &intersect_values);
+
+ if (move && node_b->ref_count == 1) {
+ for (m = 0; m < node_b->num_ptrs; m++) {
+ if (node_b->ptrs[m].ptr != NULL &&
+ acl_move_ptr(context, node_a, node_b, m,
+ &intersect_values) < 0)
+ return -1;
+ }
+ } else {
+ for (m = 0; m < node_b->num_ptrs; m++) {
+ if (node_b->ptrs[m].ptr != NULL &&
+ acl_copy_ptr(context, node_a, node_b, m,
+ &intersect_values) < 0)
+ return -1;
+ }
+ }
+
+ /*
+ * Free node if its empty (no longer used)
+ */
+ if (acl_empty(node_b))
+ acl_free_node(context, node_b);
+ return 0;
+}
+
+static int
+acl_resolve_leaf(struct acl_build_context *context,
+ struct rte_acl_node *node_a,
+ struct rte_acl_node *node_b,
+ struct rte_acl_node **node_c)
+{
+ uint32_t n;
+ int combined_priority = ACL_PRIORITY_EQUAL;
+
+ for (n = 0; n < context->cfg.num_categories; n++) {
+ if (node_a->mrt->priority[n] != node_b->mrt->priority[n]) {
+ combined_priority |= (node_a->mrt->priority[n] >
+ node_b->mrt->priority[n]) ?
+ ACL_PRIORITY_NODE_A : ACL_PRIORITY_NODE_B;
+ }
+ }
+
+ /*
+ * if node a is higher or equal priority for all categories,
+ * then return node_a.
+ */
+ if (combined_priority == ACL_PRIORITY_NODE_A ||
+ combined_priority == ACL_PRIORITY_EQUAL) {
+ *node_c = node_a;
+ return 0;
+ }
+
+ /*
+ * if node b is higher or equal priority for all categories,
+ * then return node_b.
+ */
+ if (combined_priority == ACL_PRIORITY_NODE_B) {
+ *node_c = node_b;
+ return 0;
+ }
+
+ /*
+ * mixed priorities - create a new node with the highest priority
+ * for each category.
+ */
+
+ /* force new duplication. */
+ node_a->next = NULL;
+
+ *node_c = acl_dup_node(context, node_a);
+ for (n = 0; n < context->cfg.num_categories; n++) {
+ if ((*node_c)->mrt->priority[n] < node_b->mrt->priority[n]) {
+ (*node_c)->mrt->priority[n] = node_b->mrt->priority[n];
+ (*node_c)->mrt->results[n] = node_b->mrt->results[n];
+ }
+ }
+ return 0;
+}
+
+/*
+* Within the existing trie structure, determine which nodes are
+* part of the subtree of the trie to be merged.
+*
+* For these purposes, a subtree is defined as the set of nodes that
+* are 1) not a superset of the intersection with the same level of
+* the merging tree, and 2) do not have any references from a node
+* outside of the subtree.
+*/
+static void
+mark_subtree(struct rte_acl_node *node,
+ struct rte_acl_bitset *level_bits,
+ uint32_t level,
+ uint32_t id)
+{
+ uint32_t n;
+
+ /* mark this node as part of the subtree */
+ node->subtree_id = id | RTE_ACL_SUBTREE_NODE;
+
+ for (n = 0; n < node->num_ptrs; n++) {
+
+ if (node->ptrs[n].ptr != NULL) {
+
+ struct rte_acl_bitset intersect_bits;
+ int intersect;
+
+ /*
+ * Item 1) :
+ * check if this child pointer is not a superset of the
+ * same level of the merging tree.
+ */
+ intersect = acl_intersect_type(&node->ptrs[n].values,
+ &level_bits[level],
+ &intersect_bits);
+
+ if ((intersect & ACL_INTERSECT_A) == 0) {
+
+ struct rte_acl_node *child = node->ptrs[n].ptr;
+
+ /*
+ * reset subtree reference if this is
+ * the first visit by this subtree.
+ */
+ if (child->subtree_id != id) {
+ child->subtree_id = id;
+ child->subtree_ref_count = 0;
+ }
+
+ /*
+ * Item 2) :
+ * increment the subtree reference count and if
+ * all references are from this subtree then
+ * recurse to that child
+ */
+ child->subtree_ref_count++;
+ if (child->subtree_ref_count ==
+ child->ref_count)
+ mark_subtree(child, level_bits,
+ level + 1, id);
+ }
+ }
+ }
+}
+
+/*
+ * Build the set of bits that define the set of transitions
+ * for each level of a trie.
+ */
+static void
+build_subset_mask(struct rte_acl_node *node,
+ struct rte_acl_bitset *level_bits,
+ int level)
+{
+ uint32_t n;
+
+ /* Add this node's transitions to the set for this level */
+ for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++)
+ level_bits[level].bits[n] &= node->values.bits[n];
+
+ /* For each child, add the transitions for the next level */
+ for (n = 0; n < node->num_ptrs; n++)
+ if (node->ptrs[n].ptr != NULL)
+ build_subset_mask(node->ptrs[n].ptr, level_bits,
+ level + 1);
+}
+
+
+/*
+ * Merge nodes A and B together,
+ * returns a node that is the path for the intersection
+ *
+ * If match node (leaf on trie)
+ * For each category
+ * return node = highest priority result
+ *
+ * Create C as a duplicate of A to point to child intersections
+ * If any pointers in C intersect with any in B
+ * For each intersection
+ * merge children
+ * remove intersection from C pointer
+ * add a pointer from C to child intersection node
+ * Compact the pointers in A and B
+ * Copy any B pointers that are outside of the intersection to C
+ * If C has no references to the B trie
+ * free C and return A
+ * Else If C has no references to the A trie
+ * free C and return B
+ * Else
+ * return C
+ */
+static int
+acl_merge_trie(struct acl_build_context *context,
+ struct rte_acl_node *node_a, struct rte_acl_node *node_b,
+ uint32_t level, uint32_t subtree_id, struct rte_acl_node **return_c)
+{
+ uint32_t n, m, ptrs_c, ptrs_b;
+ uint32_t min_add_c, min_add_b;
+ int node_intersect_type;
+ struct rte_acl_bitset node_intersect;
+ struct rte_acl_node *node_c;
+ struct rte_acl_node *node_a_next;
+ int node_b_refs;
+ int node_a_refs;
+
+ node_c = node_a;
+ node_a_next = node_a->next;
+ min_add_c = 0;
+ min_add_b = 0;
+ node_a_refs = node_a->num_ptrs;
+ node_b_refs = 0;
+ node_intersect_type = 0;
+
+ /* Resolve leaf nodes (matches) */
+ if (node_a->match_flag != 0) {
+ acl_resolve_leaf(context, node_a, node_b, return_c);
+ return 0;
+ }
+
+ /*
+ * Create node C as a copy of node A if node A is not part of
+ * a subtree of the merging tree (node B side). Otherwise,
+ * just use node A.
+ */
+ if (level > 0 &&
+ node_a->subtree_id !=
+ (subtree_id | RTE_ACL_SUBTREE_NODE)) {
+ node_c = acl_dup_node(context, node_a);
+ node_c->subtree_id = subtree_id | RTE_ACL_SUBTREE_NODE;
+ }
+
+ /*
+ * If the two node transitions intersect then merge the transitions.
+ * Check intersection for entire node (all pointers)
+ */
+ node_intersect_type = acl_intersect_type(&node_c->values,
+ &node_b->values,
+ &node_intersect);
+
+ if (node_intersect_type & ACL_INTERSECT) {
+
+ min_add_b = node_b->min_add;
+ node_b->min_add = node_b->num_ptrs;
+ ptrs_b = node_b->num_ptrs;
+
+ min_add_c = node_c->min_add;
+ node_c->min_add = node_c->num_ptrs;
+ ptrs_c = node_c->num_ptrs;
+
+ for (n = 0; n < ptrs_c; n++) {
+ if (node_c->ptrs[n].ptr == NULL) {
+ node_a_refs--;
+ continue;
+ }
+ node_c->ptrs[n].ptr->next = NULL;
+ for (m = 0; m < ptrs_b; m++) {
+
+ struct rte_acl_bitset child_intersect;
+ int child_intersect_type;
+ struct rte_acl_node *child_node_c = NULL;
+
+ if (node_b->ptrs[m].ptr == NULL ||
+ node_c->ptrs[n].ptr ==
+ node_b->ptrs[m].ptr)
+ continue;
+
+ child_intersect_type = acl_intersect_type(
+ &node_c->ptrs[n].values,
+ &node_b->ptrs[m].values,
+ &child_intersect);
+
+ if ((child_intersect_type & ACL_INTERSECT) !=
+ 0) {
+ if (acl_merge_trie(context,
+ node_c->ptrs[n].ptr,
+ node_b->ptrs[m].ptr,
+ level + 1, subtree_id,
+ &child_node_c))
+ return 1;
+
+ if (child_node_c != NULL &&
+ child_node_c !=
+ node_c->ptrs[n].ptr) {
+
+ node_b_refs++;
+
+ /*
+ * Added link from C to
+ * child_C for all transitions
+ * in the intersection.
+ */
+ acl_add_ptr(context, node_c,
+ child_node_c,
+ &child_intersect);
+
+ /*
+ * inc refs if pointer is not
+ * to node b.
+ */
+ node_a_refs += (child_node_c !=
+ node_b->ptrs[m].ptr);
+
+ /*
+ * Remove intersection from C
+ * pointer.
+ */
+ if (!acl_exclude(
+ &node_c->ptrs[n].values,
+ &node_c->ptrs[n].values,
+ &child_intersect)) {
+ acl_deref_ptr(context,
+ node_c, n);
+ node_c->ptrs[n].ptr =
+ NULL;
+ node_a_refs--;
+ }
+ }
+ }
+ }
+ }
+
+ /* Compact pointers */
+ node_c->min_add = min_add_c;
+ acl_compact_node_ptrs(node_c);
+ node_b->min_add = min_add_b;
+ acl_compact_node_ptrs(node_b);
+ }
+
+ /*
+ * Copy pointers outside of the intersection from B to C
+ */
+ if ((node_intersect_type & ACL_INTERSECT_B) != 0) {
+ node_b_refs++;
+ for (m = 0; m < node_b->num_ptrs; m++)
+ if (node_b->ptrs[m].ptr != NULL)
+ acl_copy_ptr(context, node_c,
+ node_b, m, &node_intersect);
+ }
+
+ /*
+ * Free node C if top of trie is contained in A or B
+ * if node C is a duplicate of node A &&
+ * node C was not an existing duplicate
+ */
+ if (node_c != node_a && node_c != node_a_next) {
+
+ /*
+ * if the intersection has no references to the
+ * B side, then it is contained in A
+ */
+ if (node_b_refs == 0) {
+ acl_free_node(context, node_c);
+ node_c = node_a;
+ } else {
+ /*
+ * if the intersection has no references to the
+ * A side, then it is contained in B.
+ */
+ if (node_a_refs == 0) {
+ acl_free_node(context, node_c);
+ node_c = node_b;
+ }
+ }
+ }
+
+ if (return_c != NULL)
+ *return_c = node_c;
+
+ if (level == 0)
+ acl_free_node(context, node_b);
+
+ return 0;
+}
+
+/*
+ * Reset current runtime fields before next build:
+ * - free allocated RT memory.
+ * - reset all RT related fields to zero.
+ */
+static void
+acl_build_reset(struct rte_acl_ctx *ctx)
+{
+ rte_free(ctx->mem);
+ memset(&ctx->num_categories, 0,
+ sizeof(*ctx) - offsetof(struct rte_acl_ctx, num_categories));
+}
+
+static void
+acl_gen_range(struct acl_build_context *context,
+ const uint8_t *hi, const uint8_t *lo, int size, int level,
+ struct rte_acl_node *root, struct rte_acl_node *end)
+{
+ struct rte_acl_node *node, *prev;
+ uint32_t n;
+
+ prev = root;
+ for (n = size - 1; n > 0; n--) {
+ node = acl_alloc_node(context, level++);
+ acl_add_ptr_range(context, prev, node, lo[n], hi[n]);
+ prev = node;
+ }
+ acl_add_ptr_range(context, prev, end, lo[0], hi[0]);
+}
+
+static struct rte_acl_node *
+acl_gen_range_trie(struct acl_build_context *context,
+ const void *min, const void *max,
+ int size, int level, struct rte_acl_node **pend)
+{
+ int32_t n;
+ struct rte_acl_node *root;
+ const uint8_t *lo = (const uint8_t *)min;
+ const uint8_t *hi = (const uint8_t *)max;
+
+ *pend = acl_alloc_node(context, level+size);
+ root = acl_alloc_node(context, level++);
+
+ if (lo[size - 1] == hi[size - 1]) {
+ acl_gen_range(context, hi, lo, size, level, root, *pend);
+ } else {
+ uint8_t limit_lo[64];
+ uint8_t limit_hi[64];
+ uint8_t hi_ff = UINT8_MAX;
+ uint8_t lo_00 = 0;
+
+ memset(limit_lo, 0, RTE_DIM(limit_lo));
+ memset(limit_hi, UINT8_MAX, RTE_DIM(limit_hi));
+
+ for (n = size - 2; n >= 0; n--) {
+ hi_ff = (uint8_t)(hi_ff & hi[n]);
+ lo_00 = (uint8_t)(lo_00 | lo[n]);
+ }
+
+ if (hi_ff != UINT8_MAX) {
+ limit_lo[size - 1] = hi[size - 1];
+ acl_gen_range(context, hi, limit_lo, size, level,
+ root, *pend);
+ }
+
+ if (lo_00 != 0) {
+ limit_hi[size - 1] = lo[size - 1];
+ acl_gen_range(context, limit_hi, lo, size, level,
+ root, *pend);
+ }
+
+ if (hi[size - 1] - lo[size - 1] > 1 ||
+ lo_00 == 0 ||
+ hi_ff == UINT8_MAX) {
+ limit_lo[size-1] = (uint8_t)(lo[size-1] + (lo_00 != 0));
+ limit_hi[size-1] = (uint8_t)(hi[size-1] -
+ (hi_ff != UINT8_MAX));
+ acl_gen_range(context, limit_hi, limit_lo, size,
+ level, root, *pend);
+ }
+ }
+ return root;
+}
+
+static struct rte_acl_node *
+acl_gen_mask_trie(struct acl_build_context *context,
+ const void *value, const void *mask,
+ int size, int level, struct rte_acl_node **pend)
+{
+ int32_t n;
+ struct rte_acl_node *root;
+ struct rte_acl_node *node, *prev;
+ struct rte_acl_bitset bits;
+ const uint8_t *val = (const uint8_t *)value;
+ const uint8_t *msk = (const uint8_t *)mask;
+
+ root = acl_alloc_node(context, level++);
+ prev = root;
+
+ for (n = size - 1; n >= 0; n--) {
+ node = acl_alloc_node(context, level++);
+ acl_gen_mask(&bits, val[n] & msk[n], msk[n]);
+ acl_add_ptr(context, prev, node, &bits);
+ prev = node;
+ }
+
+ *pend = prev;
+ return root;
+}
+
+static struct rte_acl_node *
+build_trie(struct acl_build_context *context, struct rte_acl_build_rule *head,
+ struct rte_acl_build_rule **last, uint32_t *count)
+{
+ uint32_t n, m;
+ int field_index, node_count;
+ struct rte_acl_node *trie;
+ struct rte_acl_build_rule *prev, *rule;
+ struct rte_acl_node *end, *merge, *root, *end_prev;
+ const struct rte_acl_field *fld;
+ struct rte_acl_bitset level_bits[RTE_ACL_MAX_LEVELS];
+
+ prev = head;
+ rule = head;
+
+ trie = acl_alloc_node(context, 0);
+ if (trie == NULL)
+ return NULL;
+
+ while (rule != NULL) {
+
+ root = acl_alloc_node(context, 0);
+ if (root == NULL)
+ return NULL;
+
+ root->ref_count = 1;
+ end = root;
+
+ for (n = 0; n < rule->config->num_fields; n++) {
+
+ field_index = rule->config->defs[n].field_index;
+ fld = rule->f->field + field_index;
+ end_prev = end;
+
+ /* build a mini-trie for this field */
+ switch (rule->config->defs[n].type) {
+
+ case RTE_ACL_FIELD_TYPE_BITMASK:
+ merge = acl_gen_mask_trie(context,
+ &fld->value,
+ &fld->mask_range,
+ rule->config->defs[n].size,
+ end->level + 1,
+ &end);
+ break;
+
+ case RTE_ACL_FIELD_TYPE_MASK:
+ {
+ /*
+ * set msb for the size of the field and
+ * all higher bits.
+ */
+ uint64_t mask;
+
+ if (fld->mask_range.u32 == 0) {
+ mask = 0;
+
+ /*
+ * arithmetic right shift for the length of
+ * the mask less the msb.
+ */
+ } else {
+ mask = -1 <<
+ (rule->config->defs[n].size *
+ CHAR_BIT - fld->mask_range.u32);
+ }
+
+ /* gen a mini-trie for this field */
+ merge = acl_gen_mask_trie(context,
+ &fld->value,
+ (char *)&mask,
+ rule->config->defs[n].size,
+ end->level + 1,
+ &end);
+ }
+ break;
+
+ case RTE_ACL_FIELD_TYPE_RANGE:
+ merge = acl_gen_range_trie(context,
+ &rule->f->field[field_index].value,
+ &rule->f->field[field_index].mask_range,
+ rule->config->defs[n].size,
+ end->level + 1,
+ &end);
+ break;
+
+ default:
+ RTE_LOG(ERR, ACL,
+ "Error in rule[%u] type - %hhu\n",
+ rule->f->data.userdata,
+ rule->config->defs[n].type);
+ return NULL;
+ }
+
+ /* merge this field on to the end of the rule */
+ if (acl_merge_trie(context, end_prev, merge, 0,
+ 0, NULL) != 0) {
+ return NULL;
+ }
+ }
+
+ end->match_flag = ++context->num_build_rules;
+
+ /*
+ * Setup the results for this rule.
+ * The result and priority of each category.
+ */
+ if (end->mrt == NULL &&
+ (end->mrt = acl_build_alloc(context, 1,
+ sizeof(*end->mrt))) == NULL)
+ return NULL;
+
+ for (m = 0; m < context->cfg.num_categories; m++) {
+ if (rule->f->data.category_mask & (1 << m)) {
+ end->mrt->results[m] = rule->f->data.userdata;
+ end->mrt->priority[m] = rule->f->data.priority;
+ } else {
+ end->mrt->results[m] = 0;
+ end->mrt->priority[m] = 0;
+ }
+ }
+
+ node_count = context->num_nodes;
+
+ memset(&level_bits[0], UINT8_MAX, sizeof(level_bits));
+ build_subset_mask(root, &level_bits[0], 0);
+ mark_subtree(trie, &level_bits[0], 0, end->match_flag);
+ (*count)++;
+
+ /* merge this rule into the trie */
+ if (acl_merge_trie(context, trie, root, 0, end->match_flag,
+ NULL))
+ return NULL;
+
+ node_count = context->num_nodes - node_count;
+ if (node_count > NODE_MAX) {
+ *last = prev;
+ return trie;
+ }
+
+ prev = rule;
+ rule = rule->next;
+ }
+
+ *last = NULL;
+ return trie;
+}
+
+static int
+acl_calc_wildness(struct rte_acl_build_rule *head,
+ const struct rte_acl_config *config)
+{
+ uint32_t n;
+ struct rte_acl_build_rule *rule;
+
+ for (rule = head; rule != NULL; rule = rule->next) {
+
+ for (n = 0; n < config->num_fields; n++) {
+
+ double wild = 0;
+ double size = CHAR_BIT * config->defs[n].size;
+ int field_index = config->defs[n].field_index;
+ const struct rte_acl_field *fld = rule->f->field +
+ field_index;
+
+ switch (rule->config->defs[n].type) {
+ case RTE_ACL_FIELD_TYPE_BITMASK:
+ wild = (size - __builtin_popcount(
+ fld->mask_range.u8)) /
+ size;
+ break;
+
+ case RTE_ACL_FIELD_TYPE_MASK:
+ wild = (size - fld->mask_range.u32) / size;
+ break;
+
+ case RTE_ACL_FIELD_TYPE_RANGE:
+ switch (rule->config->defs[n].size) {
+ case sizeof(uint8_t):
+ wild = ((double)fld->mask_range.u8 -
+ fld->value.u8) / UINT8_MAX;
+ break;
+ case sizeof(uint16_t):
+ wild = ((double)fld->mask_range.u16 -
+ fld->value.u16) / UINT16_MAX;
+ break;
+ case sizeof(uint32_t):
+ wild = ((double)fld->mask_range.u32 -
+ fld->value.u32) / UINT32_MAX;
+ break;
+ case sizeof(uint64_t):
+ wild = ((double)fld->mask_range.u64 -
+ fld->value.u64) / UINT64_MAX;
+ break;
+ default:
+ RTE_LOG(ERR, ACL,
+ "%s(rule: %u) invalid %u-th "
+ "field, type: %hhu, "
+ "unknown size: %hhu\n",
+ __func__,
+ rule->f->data.userdata,
+ n,
+ rule->config->defs[n].type,
+ rule->config->defs[n].size);
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ RTE_LOG(ERR, ACL,
+ "%s(rule: %u) invalid %u-th "
+ "field, unknown type: %hhu\n",
+ __func__,
+ rule->f->data.userdata,
+ n,
+ rule->config->defs[n].type);
+ return -EINVAL;
+
+ }
+
+ rule->wildness[field_index] = (uint32_t)(wild * 100);
+ }
+ }
+
+ return 0;
+}
+
+static int
+acl_rule_stats(struct rte_acl_build_rule *head, struct rte_acl_config *config,
+ uint32_t *wild_limit)
+{
+ int min;
+ struct rte_acl_build_rule *rule;
+ uint32_t n, m, fields_deactivated = 0;
+ uint32_t start = 0, deactivate = 0;
+ int tally[RTE_ACL_MAX_LEVELS][TALLY_NUM];
+
+ memset(tally, 0, sizeof(tally));
+
+ for (rule = head; rule != NULL; rule = rule->next) {
+
+ for (n = 0; n < config->num_fields; n++) {
+ uint32_t field_index = config->defs[n].field_index;
+
+ tally[n][TALLY_0]++;
+ for (m = 1; m < RTE_DIM(wild_limits); m++) {
+ if (rule->wildness[field_index] >=
+ wild_limits[m])
+ tally[n][m]++;
+ }
+ }
+
+ for (n = config->num_fields - 1; n > 0; n--) {
+ uint32_t field_index = config->defs[n].field_index;
+
+ if (rule->wildness[field_index] == 100)
+ tally[n][TALLY_DEPTH]++;
+ else
+ break;
+ }
+ }
+
+ /*
+ * Look for any field that is always wild and drop it from the config
+ * Only deactivate if all fields for a given input loop are deactivated.
+ */
+ for (n = 1; n < config->num_fields; n++) {
+ if (config->defs[n].input_index !=
+ config->defs[n - 1].input_index) {
+ for (m = start; m < n; m++)
+ tally[m][TALLY_DEACTIVATED] = deactivate;
+ fields_deactivated += deactivate;
+ start = n;
+ deactivate = 1;
+ }
+
+ /* if the field is not always completely wild */
+ if (tally[n][TALLY_100] != tally[n][TALLY_0])
+ deactivate = 0;
+ }
+
+ for (m = start; m < n; m++)
+ tally[m][TALLY_DEACTIVATED] = deactivate;
+
+ fields_deactivated += deactivate;
+
+ /* remove deactivated fields */
+ if (fields_deactivated) {
+ uint32_t k, l = 0;
+
+ for (k = 0; k < config->num_fields; k++) {
+ if (tally[k][TALLY_DEACTIVATED] == 0) {
+ memcpy(&tally[l][0], &tally[k][0],
+ TALLY_NUM * sizeof(tally[0][0]));
+ memcpy(&config->defs[l++],
+ &config->defs[k],
+ sizeof(struct rte_acl_field_def));
+ }
+ }
+ config->num_fields = l;
+ }
+
+ min = RTE_ACL_SINGLE_TRIE_SIZE;
+ if (config->num_fields == 2)
+ min *= 4;
+ else if (config->num_fields == 3)
+ min *= 3;
+ else if (config->num_fields == 4)
+ min *= 2;
+
+ if (tally[0][TALLY_0] < min)
+ return 0;
+ for (n = 0; n < config->num_fields; n++)
+ wild_limit[n] = 0;
+
+ /*
+ * If trailing fields are 100% wild, group those together.
+ * This allows the search length of the trie to be shortened.
+ */
+ for (n = 1; n < config->num_fields; n++) {
+
+ double rule_percentage = (double)tally[n][TALLY_DEPTH] /
+ tally[n][0];
+
+ if (rule_percentage > RULE_PERCENTAGE) {
+ /* if it crosses an input boundary then round up */
+ while (config->defs[n - 1].input_index ==
+ config->defs[n].input_index)
+ n++;
+
+ /* set the limit for selecting rules */
+ while (n < config->num_fields)
+ wild_limit[n++] = 100;
+
+ if (wild_limit[n - 1] == 100)
+ return 1;
+ }
+ }
+
+ /* look for the most wild that's 40% or more of the rules */
+ for (n = 1; n < config->num_fields; n++) {
+ for (m = TALLY_100; m > 0; m--) {
+
+ double rule_percentage = (double)tally[n][m] /
+ tally[n][0];
+
+ if (tally[n][TALLY_DEACTIVATED] == 0 &&
+ tally[n][TALLY_0] >
+ RTE_ACL_SINGLE_TRIE_SIZE &&
+ rule_percentage > NODE_PERCENTAGE &&
+ rule_percentage < 0.80) {
+ wild_limit[n] = wild_limits[m];
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+static int
+order(struct rte_acl_build_rule **insert, struct rte_acl_build_rule *rule)
+{
+ uint32_t n;
+ struct rte_acl_build_rule *left = *insert;
+
+ if (left == NULL)
+ return 0;
+
+ for (n = 1; n < left->config->num_fields; n++) {
+ int field_index = left->config->defs[n].field_index;
+
+ if (left->wildness[field_index] != rule->wildness[field_index])
+ return (left->wildness[field_index] >=
+ rule->wildness[field_index]);
+ }
+ return 0;
+}
+
+static struct rte_acl_build_rule *
+ordered_insert_rule(struct rte_acl_build_rule *head,
+ struct rte_acl_build_rule *rule)
+{
+ struct rte_acl_build_rule **insert;
+
+ if (rule == NULL)
+ return head;
+
+ rule->next = head;
+ if (head == NULL)
+ return rule;
+
+ insert = &head;
+ while (order(insert, rule))
+ insert = &(*insert)->next;
+
+ rule->next = *insert;
+ *insert = rule;
+ return head;
+}
+
+static struct rte_acl_build_rule *
+sort_rules(struct rte_acl_build_rule *head)
+{
+ struct rte_acl_build_rule *rule, *reordered_head = NULL;
+ struct rte_acl_build_rule *last_rule = NULL;
+
+ for (rule = head; rule != NULL; rule = rule->next) {
+ reordered_head = ordered_insert_rule(reordered_head, last_rule);
+ last_rule = rule;
+ }
+
+ if (last_rule != reordered_head)
+ reordered_head = ordered_insert_rule(reordered_head, last_rule);
+
+ return reordered_head;
+}
+
+static uint32_t
+acl_build_index(const struct rte_acl_config *config, uint32_t *data_index)
+{
+ uint32_t n, m;
+ int32_t last_header;
+
+ m = 0;
+ last_header = -1;
+
+ for (n = 0; n < config->num_fields; n++) {
+ if (last_header != config->defs[n].input_index) {
+ last_header = config->defs[n].input_index;
+ data_index[m++] = config->defs[n].offset;
+ }
+ }
+
+ return m;
+}
+
+static int
+acl_build_tries(struct acl_build_context *context,
+ struct rte_acl_build_rule *head)
+{
+ int32_t rc;
+ uint32_t n, m, num_tries;
+ struct rte_acl_config *config;
+ struct rte_acl_build_rule *last, *rule;
+ uint32_t wild_limit[RTE_ACL_MAX_LEVELS];
+ struct rte_acl_build_rule *rule_sets[RTE_ACL_MAX_TRIES];
+
+ config = head->config;
+ rule = head;
+ rule_sets[0] = head;
+ num_tries = 1;
+
+ /* initialize tries */
+ for (n = 0; n < RTE_DIM(context->tries); n++) {
+ context->tries[n].type = RTE_ACL_UNUSED_TRIE;
+ context->bld_tries[n].trie = NULL;
+ context->tries[n].count = 0;
+ context->tries[n].smallest = INT32_MAX;
+ }
+
+ context->tries[0].type = RTE_ACL_FULL_TRIE;
+
+ /* calc wildness of each field of each rule */
+ rc = acl_calc_wildness(head, config);
+ if (rc != 0)
+ return rc;
+
+ n = acl_rule_stats(head, config, &wild_limit[0]);
+
+ /* put all rules that fit the wildness criteria into a seperate trie */
+ while (n > 0 && num_tries < RTE_ACL_MAX_TRIES) {
+
+ struct rte_acl_config *new_config;
+ struct rte_acl_build_rule **prev = &rule_sets[num_tries - 1];
+ struct rte_acl_build_rule *next = head->next;
+
+ new_config = acl_build_alloc(context, 1, sizeof(*new_config));
+ if (new_config == NULL) {
+ RTE_LOG(ERR, ACL,
+ "Failed to get space for new config\n");
+ return -ENOMEM;
+ }
+
+ memcpy(new_config, config, sizeof(*new_config));
+ config = new_config;
+ rule_sets[num_tries] = NULL;
+
+ for (rule = head; rule != NULL; rule = next) {
+
+ int move = 1;
+
+ next = rule->next;
+ for (m = 0; m < config->num_fields; m++) {
+ int x = config->defs[m].field_index;
+ if (rule->wildness[x] < wild_limit[m]) {
+ move = 0;
+ break;
+ }
+ }
+
+ if (move) {
+ rule->config = new_config;
+ rule->next = rule_sets[num_tries];
+ rule_sets[num_tries] = rule;
+ *prev = next;
+ } else
+ prev = &rule->next;
+ }
+
+ head = rule_sets[num_tries];
+ n = acl_rule_stats(rule_sets[num_tries], config,
+ &wild_limit[0]);
+ num_tries++;
+ }
+
+ if (n > 0)
+ RTE_LOG(DEBUG, ACL,
+ "Number of tries(%d) exceeded.\n", RTE_ACL_MAX_TRIES);
+
+ for (n = 0; n < num_tries; n++) {
+
+ rule_sets[n] = sort_rules(rule_sets[n]);
+ context->tries[n].type = RTE_ACL_FULL_TRIE;
+ context->tries[n].count = 0;
+ context->tries[n].num_data_indexes =
+ acl_build_index(rule_sets[n]->config,
+ context->data_indexes[n]);
+ context->tries[n].data_index = context->data_indexes[n];
+
+ context->bld_tries[n].trie =
+ build_trie(context, rule_sets[n],
+ &last, &context->tries[n].count);
+ if (context->bld_tries[n].trie == NULL) {
+ RTE_LOG(ERR, ACL, "Build of %u-th trie failed\n", n);
+ return -ENOMEM;
+ }
+
+ if (last != NULL) {
+ rule_sets[num_tries++] = last->next;
+ last->next = NULL;
+ acl_free_node(context, context->bld_tries[n].trie);
+ context->tries[n].count = 0;
+
+ context->bld_tries[n].trie =
+ build_trie(context, rule_sets[n],
+ &last, &context->tries[n].count);
+ if (context->bld_tries[n].trie == NULL) {
+ RTE_LOG(ERR, ACL,
+ "Build of %u-th trie failed\n", n);
+ return -ENOMEM;
+ }
+ }
+ }
+
+ context->num_tries = num_tries;
+ return 0;
+}
+
+static void
+acl_build_log(const struct acl_build_context *ctx)
+{
+ uint32_t n;
+
+ RTE_LOG(DEBUG, ACL, "Build phase for ACL \"%s\":\n"
+ "memory consumed: %zu\n",
+ ctx->acx->name,
+ ctx->pool.alloc);
+
+ for (n = 0; n < RTE_DIM(ctx->tries); n++) {
+ if (ctx->tries[n].count != 0)
+ RTE_LOG(DEBUG, ACL,
+ "trie %u: number of rules: %u\n",
+ n, ctx->tries[n].count);
+ }
+}
+
+static int
+acl_build_rules(struct acl_build_context *bcx)
+{
+ struct rte_acl_build_rule *br, *head;
+ const struct rte_acl_rule *rule;
+ uint32_t *wp;
+ uint32_t fn, i, n, num;
+ size_t ofs, sz;
+
+ fn = bcx->cfg.num_fields;
+ n = bcx->acx->num_rules;
+ ofs = n * sizeof(*br);
+ sz = ofs + n * fn * sizeof(*wp);
+
+ br = tb_alloc(&bcx->pool, sz);
+ if (br == NULL) {
+ RTE_LOG(ERR, ACL, "ACL context %s: failed to create a copy "
+ "of %u build rules (%zu bytes)\n",
+ bcx->acx->name, n, sz);
+ return -ENOMEM;
+ }
+
+ wp = (uint32_t *)((uintptr_t)br + ofs);
+ num = 0;
+ head = NULL;
+
+ for (i = 0; i != n; i++) {
+ rule = (const struct rte_acl_rule *)
+ ((uintptr_t)bcx->acx->rules + bcx->acx->rule_sz * i);
+ if ((rule->data.category_mask & bcx->category_mask) != 0) {
+ br[num].next = head;
+ br[num].config = &bcx->cfg;
+ br[num].f = rule;
+ br[num].wildness = wp;
+ wp += fn;
+ head = br + num;
+ num++;
+ }
+ }
+
+ bcx->num_rules = num;
+ bcx->build_rules = head;
+
+ return 0;
+}
+
+/*
+ * Copy data_indexes for each trie into RT location.
+ */
+static void
+acl_set_data_indexes(struct rte_acl_ctx *ctx)
+{
+ uint32_t i, n, ofs;
+
+ ofs = 0;
+ for (i = 0; i != ctx->num_tries; i++) {
+ n = ctx->trie[i].num_data_indexes;
+ memcpy(ctx->data_indexes + ofs, ctx->trie[i].data_index,
+ n * sizeof(ctx->data_indexes[0]));
+ ctx->trie[i].data_index = ctx->data_indexes + ofs;
+ ofs += n;
+ }
+}
+
+
+int
+rte_acl_build(struct rte_acl_ctx *ctx, const struct rte_acl_config *cfg)
+{
+ int rc;
+ struct acl_build_context bcx;
+
+ if (ctx == NULL || cfg == NULL || cfg->num_categories == 0 ||
+ cfg->num_categories > RTE_ACL_MAX_CATEGORIES)
+ return -EINVAL;
+
+ acl_build_reset(ctx);
+
+ memset(&bcx, 0, sizeof(bcx));
+ bcx.acx = ctx;
+ bcx.pool.alignment = ACL_POOL_ALIGN;
+ bcx.pool.min_alloc = ACL_POOL_ALLOC_MIN;
+ bcx.cfg = *cfg;
+ bcx.category_mask = LEN2MASK(bcx.cfg.num_categories);
+
+
+ /* Create a build rules copy. */
+ rc = acl_build_rules(&bcx);
+ if (rc != 0)
+ return rc;
+
+ /* No rules to build for that context+config */
+ if (bcx.build_rules == NULL) {
+ rc = -EINVAL;
+
+ /* build internal trie representation. */
+ } else if ((rc = acl_build_tries(&bcx, bcx.build_rules)) == 0) {
+
+ /* allocate and fill run-time structures. */
+ rc = rte_acl_gen(ctx, bcx.tries, bcx.bld_tries,
+ bcx.num_tries, bcx.cfg.num_categories,
+ RTE_ACL_IPV4VLAN_NUM * RTE_DIM(bcx.tries),
+ bcx.num_build_rules);
+ if (rc == 0) {
+
+ /* set data indexes. */
+ acl_set_data_indexes(ctx);
+
+ /* copy in build config. */
+ ctx->config = *cfg;
+ }
+ }
+
+ acl_build_log(&bcx);
+
+ /* cleanup after build. */
+ tb_free_pool(&bcx.pool);
+ return rc;
+}
diff --git a/src/dpdk_lib18/librte_acl/acl_gen.c b/src/dpdk_lib18/librte_acl/acl_gen.c
new file mode 100755
index 00000000..b1f766bb
--- /dev/null
+++ b/src/dpdk_lib18/librte_acl/acl_gen.c
@@ -0,0 +1,475 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_acl.h>
+#include "acl_vect.h"
+#include "acl.h"
+
+#define QRANGE_MIN ((uint8_t)INT8_MIN)
+
+#define RTE_ACL_VERIFY(exp) do { \
+ if (!(exp)) \
+ rte_panic("line %d\tassert \"" #exp "\" failed\n", __LINE__); \
+} while (0)
+
+struct acl_node_counters {
+ int match;
+ int match_used;
+ int single;
+ int quad;
+ int quad_vectors;
+ int dfa;
+ int smallest_match;
+};
+
+struct rte_acl_indices {
+ int dfa_index;
+ int quad_index;
+ int single_index;
+ int match_index;
+};
+
+static void
+acl_gen_log_stats(const struct rte_acl_ctx *ctx,
+ const struct acl_node_counters *counts)
+{
+ RTE_LOG(DEBUG, ACL, "Gen phase for ACL \"%s\":\n"
+ "runtime memory footprint on socket %d:\n"
+ "single nodes/bytes used: %d/%zu\n"
+ "quad nodes/bytes used: %d/%zu\n"
+ "DFA nodes/bytes used: %d/%zu\n"
+ "match nodes/bytes used: %d/%zu\n"
+ "total: %zu bytes\n",
+ ctx->name, ctx->socket_id,
+ counts->single, counts->single * sizeof(uint64_t),
+ counts->quad, counts->quad_vectors * sizeof(uint64_t),
+ counts->dfa, counts->dfa * RTE_ACL_DFA_SIZE * sizeof(uint64_t),
+ counts->match,
+ counts->match * sizeof(struct rte_acl_match_results),
+ ctx->mem_sz);
+}
+
+/*
+* Counts the number of groups of sequential bits that are
+* either 0 or 1, as specified by the zero_one parameter. This is used to
+* calculate the number of ranges in a node to see if it fits in a quad range
+* node.
+*/
+static int
+acl_count_sequential_groups(struct rte_acl_bitset *bits, int zero_one)
+{
+ int n, ranges, last_bit;
+
+ ranges = 0;
+ last_bit = zero_one ^ 1;
+
+ for (n = QRANGE_MIN; n < UINT8_MAX + 1; n++) {
+ if (bits->bits[n / (sizeof(bits_t) * 8)] &
+ (1 << (n % (sizeof(bits_t) * 8)))) {
+ if (zero_one == 1 && last_bit != 1)
+ ranges++;
+ last_bit = 1;
+ } else {
+ if (zero_one == 0 && last_bit != 0)
+ ranges++;
+ last_bit = 0;
+ }
+ }
+ for (n = 0; n < QRANGE_MIN; n++) {
+ if (bits->bits[n / (sizeof(bits_t) * 8)] &
+ (1 << (n % (sizeof(bits_t) * 8)))) {
+ if (zero_one == 1 && last_bit != 1)
+ ranges++;
+ last_bit = 1;
+ } else {
+ if (zero_one == 0 && last_bit != 0)
+ ranges++;
+ last_bit = 0;
+ }
+ }
+
+ return ranges;
+}
+
+/*
+ * Count number of ranges spanned by the node's pointers
+ */
+static int
+acl_count_fanout(struct rte_acl_node *node)
+{
+ uint32_t n;
+ int ranges;
+
+ if (node->fanout != 0)
+ return node->fanout;
+
+ ranges = acl_count_sequential_groups(&node->values, 0);
+
+ for (n = 0; n < node->num_ptrs; n++) {
+ if (node->ptrs[n].ptr != NULL)
+ ranges += acl_count_sequential_groups(
+ &node->ptrs[n].values, 1);
+ }
+
+ node->fanout = ranges;
+ return node->fanout;
+}
+
+/*
+ * Determine the type of nodes and count each type
+ */
+static int
+acl_count_trie_types(struct acl_node_counters *counts,
+ struct rte_acl_node *node, int match, int force_dfa)
+{
+ uint32_t n;
+ int num_ptrs;
+
+ /* skip if this node has been counted */
+ if (node->node_type != (uint32_t)RTE_ACL_NODE_UNDEFINED)
+ return match;
+
+ if (node->match_flag != 0 || node->num_ptrs == 0) {
+ counts->match++;
+ if (node->match_flag == -1)
+ node->match_flag = match++;
+ node->node_type = RTE_ACL_NODE_MATCH;
+ if (counts->smallest_match > node->match_flag)
+ counts->smallest_match = node->match_flag;
+ return match;
+ }
+
+ num_ptrs = acl_count_fanout(node);
+
+ /* Force type to dfa */
+ if (force_dfa)
+ num_ptrs = RTE_ACL_DFA_SIZE;
+
+ /* determine node type based on number of ranges */
+ if (num_ptrs == 1) {
+ counts->single++;
+ node->node_type = RTE_ACL_NODE_SINGLE;
+ } else if (num_ptrs <= RTE_ACL_QUAD_MAX) {
+ counts->quad++;
+ counts->quad_vectors += node->fanout;
+ node->node_type = RTE_ACL_NODE_QRANGE;
+ } else {
+ counts->dfa++;
+ node->node_type = RTE_ACL_NODE_DFA;
+ }
+
+ /*
+ * recursively count the types of all children
+ */
+ for (n = 0; n < node->num_ptrs; n++) {
+ if (node->ptrs[n].ptr != NULL)
+ match = acl_count_trie_types(counts, node->ptrs[n].ptr,
+ match, 0);
+ }
+
+ return match;
+}
+
+static void
+acl_add_ptrs(struct rte_acl_node *node, uint64_t *node_array, uint64_t no_match,
+ int resolved)
+{
+ uint32_t n, x;
+ int m, ranges, last_bit;
+ struct rte_acl_node *child;
+ struct rte_acl_bitset *bits;
+ uint64_t *node_a, index, dfa[RTE_ACL_DFA_SIZE];
+
+ ranges = 0;
+ last_bit = 0;
+
+ for (n = 0; n < RTE_DIM(dfa); n++)
+ dfa[n] = no_match;
+
+ for (x = 0; x < node->num_ptrs; x++) {
+
+ child = node->ptrs[x].ptr;
+ if (child == NULL)
+ continue;
+
+ bits = &node->ptrs[x].values;
+ for (n = 0; n < RTE_DIM(dfa); n++) {
+
+ if (bits->bits[n / (sizeof(bits_t) * CHAR_BIT)] &
+ (1 << (n % (sizeof(bits_t) * CHAR_BIT)))) {
+
+ dfa[n] = resolved ? child->node_index : x;
+ ranges += (last_bit == 0);
+ last_bit = 1;
+ } else {
+ last_bit = 0;
+ }
+ }
+ }
+
+ /*
+ * Rather than going from 0 to 256, the range count and
+ * the layout are from 80-ff then 0-7f due to signed compare
+ * for SSE (cmpgt).
+ */
+ if (node->node_type == RTE_ACL_NODE_QRANGE) {
+
+ m = 0;
+ node_a = node_array;
+ index = dfa[QRANGE_MIN];
+ *node_a++ = index;
+
+ for (x = QRANGE_MIN + 1; x < UINT8_MAX + 1; x++) {
+ if (dfa[x] != index) {
+ index = dfa[x];
+ *node_a++ = index;
+ node->transitions[m++] = (uint8_t)(x - 1);
+ }
+ }
+
+ for (x = 0; x < INT8_MAX + 1; x++) {
+ if (dfa[x] != index) {
+ index = dfa[x];
+ *node_a++ = index;
+ node->transitions[m++] = (uint8_t)(x - 1);
+ }
+ }
+
+ /* fill unused locations with max value - nothing is greater */
+ for (; m < RTE_ACL_QUAD_SIZE; m++)
+ node->transitions[m] = INT8_MAX;
+
+ RTE_ACL_VERIFY(m <= RTE_ACL_QUAD_SIZE);
+
+ } else if (node->node_type == RTE_ACL_NODE_DFA && resolved) {
+ for (n = 0; n < RTE_DIM(dfa); n++)
+ node_array[n] = dfa[n];
+ }
+}
+
+/*
+ * Routine that allocates space for this node and recursively calls
+ * to allocate space for each child. Once all the children are allocated,
+ * then resolve all transitions for this node.
+ */
+static void
+acl_gen_node(struct rte_acl_node *node, uint64_t *node_array,
+ uint64_t no_match, struct rte_acl_indices *index, int num_categories)
+{
+ uint32_t n, *qtrp;
+ uint64_t *array_ptr;
+ struct rte_acl_match_results *match;
+
+ if (node->node_index != RTE_ACL_NODE_UNDEFINED)
+ return;
+
+ array_ptr = NULL;
+
+ switch (node->node_type) {
+ case RTE_ACL_NODE_DFA:
+ node->node_index = index->dfa_index | node->node_type;
+ array_ptr = &node_array[index->dfa_index];
+ index->dfa_index += RTE_ACL_DFA_SIZE;
+ for (n = 0; n < RTE_ACL_DFA_SIZE; n++)
+ array_ptr[n] = no_match;
+ break;
+ case RTE_ACL_NODE_SINGLE:
+ node->node_index = RTE_ACL_QUAD_SINGLE | index->single_index |
+ node->node_type;
+ array_ptr = &node_array[index->single_index];
+ index->single_index += 1;
+ array_ptr[0] = no_match;
+ break;
+ case RTE_ACL_NODE_QRANGE:
+ array_ptr = &node_array[index->quad_index];
+ acl_add_ptrs(node, array_ptr, no_match, 0);
+ qtrp = (uint32_t *)node->transitions;
+ node->node_index = qtrp[0];
+ node->node_index <<= sizeof(index->quad_index) * CHAR_BIT;
+ node->node_index |= index->quad_index | node->node_type;
+ index->quad_index += node->fanout;
+ break;
+ case RTE_ACL_NODE_MATCH:
+ match = ((struct rte_acl_match_results *)
+ (node_array + index->match_index));
+ memcpy(match + node->match_flag, node->mrt, sizeof(*node->mrt));
+ node->node_index = node->match_flag | node->node_type;
+ break;
+ case RTE_ACL_NODE_UNDEFINED:
+ RTE_ACL_VERIFY(node->node_type !=
+ (uint32_t)RTE_ACL_NODE_UNDEFINED);
+ break;
+ }
+
+ /* recursively allocate space for all children */
+ for (n = 0; n < node->num_ptrs; n++) {
+ if (node->ptrs[n].ptr != NULL)
+ acl_gen_node(node->ptrs[n].ptr,
+ node_array,
+ no_match,
+ index,
+ num_categories);
+ }
+
+ /* All children are resolved, resolve this node's pointers */
+ switch (node->node_type) {
+ case RTE_ACL_NODE_DFA:
+ acl_add_ptrs(node, array_ptr, no_match, 1);
+ break;
+ case RTE_ACL_NODE_SINGLE:
+ for (n = 0; n < node->num_ptrs; n++) {
+ if (node->ptrs[n].ptr != NULL)
+ array_ptr[0] = node->ptrs[n].ptr->node_index;
+ }
+ break;
+ case RTE_ACL_NODE_QRANGE:
+ acl_add_ptrs(node, array_ptr, no_match, 1);
+ break;
+ case RTE_ACL_NODE_MATCH:
+ break;
+ case RTE_ACL_NODE_UNDEFINED:
+ RTE_ACL_VERIFY(node->node_type !=
+ (uint32_t)RTE_ACL_NODE_UNDEFINED);
+ break;
+ }
+}
+
+static int
+acl_calc_counts_indices(struct acl_node_counters *counts,
+ struct rte_acl_indices *indices, struct rte_acl_trie *trie,
+ struct rte_acl_bld_trie *node_bld_trie, uint32_t num_tries,
+ int match_num)
+{
+ uint32_t n;
+
+ memset(indices, 0, sizeof(*indices));
+ memset(counts, 0, sizeof(*counts));
+
+ /* Get stats on nodes */
+ for (n = 0; n < num_tries; n++) {
+ counts->smallest_match = INT32_MAX;
+ match_num = acl_count_trie_types(counts, node_bld_trie[n].trie,
+ match_num, 1);
+ trie[n].smallest = counts->smallest_match;
+ }
+
+ indices->dfa_index = RTE_ACL_DFA_SIZE + 1;
+ indices->quad_index = indices->dfa_index +
+ counts->dfa * RTE_ACL_DFA_SIZE;
+ indices->single_index = indices->quad_index + counts->quad_vectors;
+ indices->match_index = indices->single_index + counts->single + 1;
+ indices->match_index = RTE_ALIGN(indices->match_index,
+ (XMM_SIZE / sizeof(uint64_t)));
+
+ return match_num;
+}
+
+/*
+ * Generate the runtime structure using build structure
+ */
+int
+rte_acl_gen(struct rte_acl_ctx *ctx, struct rte_acl_trie *trie,
+ struct rte_acl_bld_trie *node_bld_trie, uint32_t num_tries,
+ uint32_t num_categories, uint32_t data_index_sz, int match_num)
+{
+ void *mem;
+ size_t total_size;
+ uint64_t *node_array, no_match;
+ uint32_t n, match_index;
+ struct rte_acl_match_results *match;
+ struct acl_node_counters counts;
+ struct rte_acl_indices indices;
+
+ /* Fill counts and indices arrays from the nodes. */
+ match_num = acl_calc_counts_indices(&counts, &indices, trie,
+ node_bld_trie, num_tries, match_num);
+
+ /* Allocate runtime memory (align to cache boundary) */
+ total_size = RTE_ALIGN(data_index_sz, RTE_CACHE_LINE_SIZE) +
+ indices.match_index * sizeof(uint64_t) +
+ (match_num + 2) * sizeof(struct rte_acl_match_results) +
+ XMM_SIZE;
+
+ mem = rte_zmalloc_socket(ctx->name, total_size, RTE_CACHE_LINE_SIZE,
+ ctx->socket_id);
+ if (mem == NULL) {
+ RTE_LOG(ERR, ACL,
+ "allocation of %zu bytes on socket %d for %s failed\n",
+ total_size, ctx->socket_id, ctx->name);
+ return -ENOMEM;
+ }
+
+ /* Fill the runtime structure */
+ match_index = indices.match_index;
+ node_array = (uint64_t *)((uintptr_t)mem +
+ RTE_ALIGN(data_index_sz, RTE_CACHE_LINE_SIZE));
+
+ /*
+ * Setup the NOMATCH node (a SINGLE at the
+ * highest index, that points to itself)
+ */
+
+ node_array[RTE_ACL_DFA_SIZE] = RTE_ACL_DFA_SIZE | RTE_ACL_NODE_SINGLE;
+ no_match = RTE_ACL_NODE_MATCH;
+
+ for (n = 0; n < RTE_ACL_DFA_SIZE; n++)
+ node_array[n] = no_match;
+
+ match = ((struct rte_acl_match_results *)(node_array + match_index));
+ memset(match, 0, sizeof(*match));
+
+ for (n = 0; n < num_tries; n++) {
+
+ acl_gen_node(node_bld_trie[n].trie, node_array, no_match,
+ &indices, num_categories);
+
+ if (node_bld_trie[n].trie->node_index == no_match)
+ trie[n].root_index = 0;
+ else
+ trie[n].root_index = node_bld_trie[n].trie->node_index;
+ }
+
+ ctx->mem = mem;
+ ctx->mem_sz = total_size;
+ ctx->data_indexes = mem;
+ ctx->num_tries = num_tries;
+ ctx->num_categories = num_categories;
+ ctx->match_index = match_index;
+ ctx->no_match = no_match;
+ ctx->idle = node_array[RTE_ACL_DFA_SIZE];
+ ctx->trans_table = node_array;
+ memcpy(ctx->trie, trie, sizeof(ctx->trie));
+
+ acl_gen_log_stats(ctx, &counts);
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_acl/acl_run.h b/src/dpdk_lib18/librte_acl/acl_run.h
new file mode 100755
index 00000000..c191053c
--- /dev/null
+++ b/src/dpdk_lib18/librte_acl/acl_run.h
@@ -0,0 +1,268 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ACL_RUN_H_
+#define _ACL_RUN_H_
+
+#include <rte_acl.h>
+#include "acl_vect.h"
+#include "acl.h"
+
+#define MAX_SEARCHES_SSE8 8
+#define MAX_SEARCHES_SSE4 4
+#define MAX_SEARCHES_SSE2 2
+#define MAX_SEARCHES_SCALAR 2
+
+#define GET_NEXT_4BYTES(prm, idx) \
+ (*((const int32_t *)((prm)[(idx)].data + *(prm)[idx].data_index++)))
+
+
+#define RTE_ACL_NODE_INDEX ((uint32_t)~RTE_ACL_NODE_TYPE)
+
+#define SCALAR_QRANGE_MULT 0x01010101
+#define SCALAR_QRANGE_MASK 0x7f7f7f7f
+#define SCALAR_QRANGE_MIN 0x80808080
+
+/*
+ * Structure to manage N parallel trie traversals.
+ * The runtime trie traversal routines can process 8, 4, or 2 tries
+ * in parallel. Each packet may require multiple trie traversals (up to 4).
+ * This structure is used to fill the slots (0 to n-1) for parallel processing
+ * with the trie traversals needed for each packet.
+ */
+struct acl_flow_data {
+ uint32_t num_packets;
+ /* number of packets processed */
+ uint32_t started;
+ /* number of trie traversals in progress */
+ uint32_t trie;
+ /* current trie index (0 to N-1) */
+ uint32_t cmplt_size;
+ uint32_t total_packets;
+ uint32_t categories;
+ /* number of result categories per packet. */
+ /* maximum number of packets to process */
+ const uint64_t *trans;
+ const uint8_t **data;
+ uint32_t *results;
+ struct completion *last_cmplt;
+ struct completion *cmplt_array;
+};
+
+/*
+ * Structure to maintain running results for
+ * a single packet (up to 4 tries).
+ */
+struct completion {
+ uint32_t *results; /* running results. */
+ int32_t priority[RTE_ACL_MAX_CATEGORIES]; /* running priorities. */
+ uint32_t count; /* num of remaining tries */
+ /* true for allocated struct */
+} __attribute__((aligned(XMM_SIZE)));
+
+/*
+ * One parms structure for each slot in the search engine.
+ */
+struct parms {
+ const uint8_t *data;
+ /* input data for this packet */
+ const uint32_t *data_index;
+ /* data indirection for this trie */
+ struct completion *cmplt;
+ /* completion data for this packet */
+};
+
+/*
+ * Define an global idle node for unused engine slots
+ */
+static const uint32_t idle[UINT8_MAX + 1];
+
+/*
+ * Allocate a completion structure to manage the tries for a packet.
+ */
+static inline struct completion *
+alloc_completion(struct completion *p, uint32_t size, uint32_t tries,
+ uint32_t *results)
+{
+ uint32_t n;
+
+ for (n = 0; n < size; n++) {
+
+ if (p[n].count == 0) {
+
+ /* mark as allocated and set number of tries. */
+ p[n].count = tries;
+ p[n].results = results;
+ return &(p[n]);
+ }
+ }
+
+ /* should never get here */
+ return NULL;
+}
+
+/*
+ * Resolve priority for a single result trie.
+ */
+static inline void
+resolve_single_priority(uint64_t transition, int n,
+ const struct rte_acl_ctx *ctx, struct parms *parms,
+ const struct rte_acl_match_results *p)
+{
+ if (parms[n].cmplt->count == ctx->num_tries ||
+ parms[n].cmplt->priority[0] <=
+ p[transition].priority[0]) {
+
+ parms[n].cmplt->priority[0] = p[transition].priority[0];
+ parms[n].cmplt->results[0] = p[transition].results[0];
+ }
+}
+
+/*
+ * Routine to fill a slot in the parallel trie traversal array (parms) from
+ * the list of packets (flows).
+ */
+static inline uint64_t
+acl_start_next_trie(struct acl_flow_data *flows, struct parms *parms, int n,
+ const struct rte_acl_ctx *ctx)
+{
+ uint64_t transition;
+
+ /* if there are any more packets to process */
+ if (flows->num_packets < flows->total_packets) {
+ parms[n].data = flows->data[flows->num_packets];
+ parms[n].data_index = ctx->trie[flows->trie].data_index;
+
+ /* if this is the first trie for this packet */
+ if (flows->trie == 0) {
+ flows->last_cmplt = alloc_completion(flows->cmplt_array,
+ flows->cmplt_size, ctx->num_tries,
+ flows->results +
+ flows->num_packets * flows->categories);
+ }
+
+ /* set completion parameters and starting index for this slot */
+ parms[n].cmplt = flows->last_cmplt;
+ transition =
+ flows->trans[parms[n].data[*parms[n].data_index++] +
+ ctx->trie[flows->trie].root_index];
+
+ /*
+ * if this is the last trie for this packet,
+ * then setup next packet.
+ */
+ flows->trie++;
+ if (flows->trie >= ctx->num_tries) {
+ flows->trie = 0;
+ flows->num_packets++;
+ }
+
+ /* keep track of number of active trie traversals */
+ flows->started++;
+
+ /* no more tries to process, set slot to an idle position */
+ } else {
+ transition = ctx->idle;
+ parms[n].data = (const uint8_t *)idle;
+ parms[n].data_index = idle;
+ }
+ return transition;
+}
+
+static inline void
+acl_set_flow(struct acl_flow_data *flows, struct completion *cmplt,
+ uint32_t cmplt_size, const uint8_t **data, uint32_t *results,
+ uint32_t data_num, uint32_t categories, const uint64_t *trans)
+{
+ flows->num_packets = 0;
+ flows->started = 0;
+ flows->trie = 0;
+ flows->last_cmplt = NULL;
+ flows->cmplt_array = cmplt;
+ flows->total_packets = data_num;
+ flows->categories = categories;
+ flows->cmplt_size = cmplt_size;
+ flows->data = data;
+ flows->results = results;
+ flows->trans = trans;
+}
+
+typedef void (*resolve_priority_t)
+(uint64_t transition, int n, const struct rte_acl_ctx *ctx,
+ struct parms *parms, const struct rte_acl_match_results *p,
+ uint32_t categories);
+
+/*
+ * Detect matches. If a match node transition is found, then this trie
+ * traversal is complete and fill the slot with the next trie
+ * to be processed.
+ */
+static inline uint64_t
+acl_match_check(uint64_t transition, int slot,
+ const struct rte_acl_ctx *ctx, struct parms *parms,
+ struct acl_flow_data *flows, resolve_priority_t resolve_priority)
+{
+ const struct rte_acl_match_results *p;
+
+ p = (const struct rte_acl_match_results *)
+ (flows->trans + ctx->match_index);
+
+ if (transition & RTE_ACL_NODE_MATCH) {
+
+ /* Remove flags from index and decrement active traversals */
+ transition &= RTE_ACL_NODE_INDEX;
+ flows->started--;
+
+ /* Resolve priorities for this trie and running results */
+ if (flows->categories == 1)
+ resolve_single_priority(transition, slot, ctx,
+ parms, p);
+ else
+ resolve_priority(transition, slot, ctx, parms,
+ p, flows->categories);
+
+ /* Count down completed tries for this search request */
+ parms[slot].cmplt->count--;
+
+ /* Fill the slot with the next trie or idle trie */
+ transition = acl_start_next_trie(flows, parms, slot, ctx);
+
+ } else if (transition == ctx->idle) {
+ /* reset indirection table for idle slots */
+ parms[slot].data_index = idle;
+ }
+
+ return transition;
+}
+
+#endif /* _ACL_RUN_H_ */
diff --git a/src/dpdk_lib18/librte_acl/acl_run_scalar.c b/src/dpdk_lib18/librte_acl/acl_run_scalar.c
new file mode 100755
index 00000000..43c8fc3e
--- /dev/null
+++ b/src/dpdk_lib18/librte_acl/acl_run_scalar.c
@@ -0,0 +1,193 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "acl_run.h"
+
+/*
+ * Resolve priority for multiple results (scalar version).
+ * This consists comparing the priority of the current traversal with the
+ * running set of results for the packet.
+ * For each result, keep a running array of the result (rule number) and
+ * its priority for each category.
+ */
+static inline void
+resolve_priority_scalar(uint64_t transition, int n,
+ const struct rte_acl_ctx *ctx, struct parms *parms,
+ const struct rte_acl_match_results *p, uint32_t categories)
+{
+ uint32_t i;
+ int32_t *saved_priority;
+ uint32_t *saved_results;
+ const int32_t *priority;
+ const uint32_t *results;
+
+ saved_results = parms[n].cmplt->results;
+ saved_priority = parms[n].cmplt->priority;
+
+ /* results and priorities for completed trie */
+ results = p[transition].results;
+ priority = p[transition].priority;
+
+ /* if this is not the first completed trie */
+ if (parms[n].cmplt->count != ctx->num_tries) {
+ for (i = 0; i < categories; i += RTE_ACL_RESULTS_MULTIPLIER) {
+
+ if (saved_priority[i] <= priority[i]) {
+ saved_priority[i] = priority[i];
+ saved_results[i] = results[i];
+ }
+ if (saved_priority[i + 1] <= priority[i + 1]) {
+ saved_priority[i + 1] = priority[i + 1];
+ saved_results[i + 1] = results[i + 1];
+ }
+ if (saved_priority[i + 2] <= priority[i + 2]) {
+ saved_priority[i + 2] = priority[i + 2];
+ saved_results[i + 2] = results[i + 2];
+ }
+ if (saved_priority[i + 3] <= priority[i + 3]) {
+ saved_priority[i + 3] = priority[i + 3];
+ saved_results[i + 3] = results[i + 3];
+ }
+ }
+ } else {
+ for (i = 0; i < categories; i += RTE_ACL_RESULTS_MULTIPLIER) {
+ saved_priority[i] = priority[i];
+ saved_priority[i + 1] = priority[i + 1];
+ saved_priority[i + 2] = priority[i + 2];
+ saved_priority[i + 3] = priority[i + 3];
+
+ saved_results[i] = results[i];
+ saved_results[i + 1] = results[i + 1];
+ saved_results[i + 2] = results[i + 2];
+ saved_results[i + 3] = results[i + 3];
+ }
+ }
+}
+
+/*
+ * When processing the transition, rather than using if/else
+ * construct, the offset is calculated for DFA and QRANGE and
+ * then conditionally added to the address based on node type.
+ * This is done to avoid branch mis-predictions. Since the
+ * offset is rather simple calculation it is more efficient
+ * to do the calculation and do a condition move rather than
+ * a conditional branch to determine which calculation to do.
+ */
+static inline uint32_t
+scan_forward(uint32_t input, uint32_t max)
+{
+ return (input == 0) ? max : rte_bsf32(input);
+}
+
+static inline uint64_t
+scalar_transition(const uint64_t *trans_table, uint64_t transition,
+ uint8_t input)
+{
+ uint32_t addr, index, ranges, x, a, b, c;
+
+ /* break transition into component parts */
+ ranges = transition >> (sizeof(index) * CHAR_BIT);
+
+ /* calc address for a QRANGE node */
+ c = input * SCALAR_QRANGE_MULT;
+ a = ranges | SCALAR_QRANGE_MIN;
+ index = transition & ~RTE_ACL_NODE_INDEX;
+ a -= (c & SCALAR_QRANGE_MASK);
+ b = c & SCALAR_QRANGE_MIN;
+ addr = transition ^ index;
+ a &= SCALAR_QRANGE_MIN;
+ a ^= (ranges ^ b) & (a ^ b);
+ x = scan_forward(a, 32) >> 3;
+ addr += (index == RTE_ACL_NODE_DFA) ? input : x;
+
+ /* pickup next transition */
+ transition = *(trans_table + addr);
+ return transition;
+}
+
+int
+rte_acl_classify_scalar(const struct rte_acl_ctx *ctx, const uint8_t **data,
+ uint32_t *results, uint32_t num, uint32_t categories)
+{
+ int n;
+ uint64_t transition0, transition1;
+ uint32_t input0, input1;
+ struct acl_flow_data flows;
+ uint64_t index_array[MAX_SEARCHES_SCALAR];
+ struct completion cmplt[MAX_SEARCHES_SCALAR];
+ struct parms parms[MAX_SEARCHES_SCALAR];
+
+ if (categories != 1 &&
+ ((RTE_ACL_RESULTS_MULTIPLIER - 1) & categories) != 0)
+ return -EINVAL;
+
+ acl_set_flow(&flows, cmplt, RTE_DIM(cmplt), data, results, num,
+ categories, ctx->trans_table);
+
+ for (n = 0; n < MAX_SEARCHES_SCALAR; n++) {
+ cmplt[n].count = 0;
+ index_array[n] = acl_start_next_trie(&flows, parms, n, ctx);
+ }
+
+ transition0 = index_array[0];
+ transition1 = index_array[1];
+
+ while (flows.started > 0) {
+
+ input0 = GET_NEXT_4BYTES(parms, 0);
+ input1 = GET_NEXT_4BYTES(parms, 1);
+
+ for (n = 0; n < 4; n++) {
+ if (likely((transition0 & RTE_ACL_NODE_MATCH) == 0))
+ transition0 = scalar_transition(flows.trans,
+ transition0, (uint8_t)input0);
+
+ input0 >>= CHAR_BIT;
+
+ if (likely((transition1 & RTE_ACL_NODE_MATCH) == 0))
+ transition1 = scalar_transition(flows.trans,
+ transition1, (uint8_t)input1);
+
+ input1 >>= CHAR_BIT;
+
+ }
+ if ((transition0 | transition1) & RTE_ACL_NODE_MATCH) {
+ transition0 = acl_match_check(transition0,
+ 0, ctx, parms, &flows, resolve_priority_scalar);
+ transition1 = acl_match_check(transition1,
+ 1, ctx, parms, &flows, resolve_priority_scalar);
+
+ }
+ }
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_acl/acl_run_sse.c b/src/dpdk_lib18/librte_acl/acl_run_sse.c
new file mode 100755
index 00000000..69a9d775
--- /dev/null
+++ b/src/dpdk_lib18/librte_acl/acl_run_sse.c
@@ -0,0 +1,626 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "acl_run.h"
+
+enum {
+ SHUFFLE32_SLOT1 = 0xe5,
+ SHUFFLE32_SLOT2 = 0xe6,
+ SHUFFLE32_SLOT3 = 0xe7,
+ SHUFFLE32_SWAP64 = 0x4e,
+};
+
+static const rte_xmm_t mm_type_quad_range = {
+ .u32 = {
+ RTE_ACL_NODE_QRANGE,
+ RTE_ACL_NODE_QRANGE,
+ RTE_ACL_NODE_QRANGE,
+ RTE_ACL_NODE_QRANGE,
+ },
+};
+
+static const rte_xmm_t mm_type_quad_range64 = {
+ .u32 = {
+ RTE_ACL_NODE_QRANGE,
+ RTE_ACL_NODE_QRANGE,
+ 0,
+ 0,
+ },
+};
+
+static const rte_xmm_t mm_shuffle_input = {
+ .u32 = {0x00000000, 0x04040404, 0x08080808, 0x0c0c0c0c},
+};
+
+static const rte_xmm_t mm_shuffle_input64 = {
+ .u32 = {0x00000000, 0x04040404, 0x80808080, 0x80808080},
+};
+
+static const rte_xmm_t mm_ones_16 = {
+ .u16 = {1, 1, 1, 1, 1, 1, 1, 1},
+};
+
+static const rte_xmm_t mm_bytes = {
+ .u32 = {UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX},
+};
+
+static const rte_xmm_t mm_bytes64 = {
+ .u32 = {UINT8_MAX, UINT8_MAX, 0, 0},
+};
+
+static const rte_xmm_t mm_match_mask = {
+ .u32 = {
+ RTE_ACL_NODE_MATCH,
+ RTE_ACL_NODE_MATCH,
+ RTE_ACL_NODE_MATCH,
+ RTE_ACL_NODE_MATCH,
+ },
+};
+
+static const rte_xmm_t mm_match_mask64 = {
+ .u32 = {
+ RTE_ACL_NODE_MATCH,
+ 0,
+ RTE_ACL_NODE_MATCH,
+ 0,
+ },
+};
+
+static const rte_xmm_t mm_index_mask = {
+ .u32 = {
+ RTE_ACL_NODE_INDEX,
+ RTE_ACL_NODE_INDEX,
+ RTE_ACL_NODE_INDEX,
+ RTE_ACL_NODE_INDEX,
+ },
+};
+
+static const rte_xmm_t mm_index_mask64 = {
+ .u32 = {
+ RTE_ACL_NODE_INDEX,
+ RTE_ACL_NODE_INDEX,
+ 0,
+ 0,
+ },
+};
+
+
+/*
+ * Resolve priority for multiple results (sse version).
+ * This consists comparing the priority of the current traversal with the
+ * running set of results for the packet.
+ * For each result, keep a running array of the result (rule number) and
+ * its priority for each category.
+ */
+static inline void
+resolve_priority_sse(uint64_t transition, int n, const struct rte_acl_ctx *ctx,
+ struct parms *parms, const struct rte_acl_match_results *p,
+ uint32_t categories)
+{
+ uint32_t x;
+ xmm_t results, priority, results1, priority1, selector;
+ xmm_t *saved_results, *saved_priority;
+
+ for (x = 0; x < categories; x += RTE_ACL_RESULTS_MULTIPLIER) {
+
+ saved_results = (xmm_t *)(&parms[n].cmplt->results[x]);
+ saved_priority =
+ (xmm_t *)(&parms[n].cmplt->priority[x]);
+
+ /* get results and priorities for completed trie */
+ results = MM_LOADU((const xmm_t *)&p[transition].results[x]);
+ priority = MM_LOADU((const xmm_t *)&p[transition].priority[x]);
+
+ /* if this is not the first completed trie */
+ if (parms[n].cmplt->count != ctx->num_tries) {
+
+ /* get running best results and their priorities */
+ results1 = MM_LOADU(saved_results);
+ priority1 = MM_LOADU(saved_priority);
+
+ /* select results that are highest priority */
+ selector = MM_CMPGT32(priority1, priority);
+ results = MM_BLENDV8(results, results1, selector);
+ priority = MM_BLENDV8(priority, priority1, selector);
+ }
+
+ /* save running best results and their priorities */
+ MM_STOREU(saved_results, results);
+ MM_STOREU(saved_priority, priority);
+ }
+}
+
+/*
+ * Extract transitions from an XMM register and check for any matches
+ */
+static void
+acl_process_matches(xmm_t *indices, int slot, const struct rte_acl_ctx *ctx,
+ struct parms *parms, struct acl_flow_data *flows)
+{
+ uint64_t transition1, transition2;
+
+ /* extract transition from low 64 bits. */
+ transition1 = MM_CVT64(*indices);
+
+ /* extract transition from high 64 bits. */
+ *indices = MM_SHUFFLE32(*indices, SHUFFLE32_SWAP64);
+ transition2 = MM_CVT64(*indices);
+
+ transition1 = acl_match_check(transition1, slot, ctx,
+ parms, flows, resolve_priority_sse);
+ transition2 = acl_match_check(transition2, slot + 1, ctx,
+ parms, flows, resolve_priority_sse);
+
+ /* update indices with new transitions. */
+ *indices = MM_SET64(transition2, transition1);
+}
+
+/*
+ * Check for a match in 2 transitions (contained in SSE register)
+ */
+static inline void
+acl_match_check_x2(int slot, const struct rte_acl_ctx *ctx, struct parms *parms,
+ struct acl_flow_data *flows, xmm_t *indices, xmm_t match_mask)
+{
+ xmm_t temp;
+
+ temp = MM_AND(match_mask, *indices);
+ while (!MM_TESTZ(temp, temp)) {
+ acl_process_matches(indices, slot, ctx, parms, flows);
+ temp = MM_AND(match_mask, *indices);
+ }
+}
+
+/*
+ * Check for any match in 4 transitions (contained in 2 SSE registers)
+ */
+static inline void
+acl_match_check_x4(int slot, const struct rte_acl_ctx *ctx, struct parms *parms,
+ struct acl_flow_data *flows, xmm_t *indices1, xmm_t *indices2,
+ xmm_t match_mask)
+{
+ xmm_t temp;
+
+ /* put low 32 bits of each transition into one register */
+ temp = (xmm_t)MM_SHUFFLEPS((__m128)*indices1, (__m128)*indices2,
+ 0x88);
+ /* test for match node */
+ temp = MM_AND(match_mask, temp);
+
+ while (!MM_TESTZ(temp, temp)) {
+ acl_process_matches(indices1, slot, ctx, parms, flows);
+ acl_process_matches(indices2, slot + 2, ctx, parms, flows);
+
+ temp = (xmm_t)MM_SHUFFLEPS((__m128)*indices1,
+ (__m128)*indices2,
+ 0x88);
+ temp = MM_AND(match_mask, temp);
+ }
+}
+
+/*
+ * Calculate the address of the next transition for
+ * all types of nodes. Note that only DFA nodes and range
+ * nodes actually transition to another node. Match
+ * nodes don't move.
+ */
+static inline xmm_t
+acl_calc_addr(xmm_t index_mask, xmm_t next_input, xmm_t shuffle_input,
+ xmm_t ones_16, xmm_t bytes, xmm_t type_quad_range,
+ xmm_t *indices1, xmm_t *indices2)
+{
+ xmm_t addr, node_types, temp;
+
+ /*
+ * Note that no transition is done for a match
+ * node and therefore a stream freezes when
+ * it reaches a match.
+ */
+
+ /* Shuffle low 32 into temp and high 32 into indices2 */
+ temp = (xmm_t)MM_SHUFFLEPS((__m128)*indices1, (__m128)*indices2,
+ 0x88);
+ *indices2 = (xmm_t)MM_SHUFFLEPS((__m128)*indices1,
+ (__m128)*indices2, 0xdd);
+
+ /* Calc node type and node addr */
+ node_types = MM_ANDNOT(index_mask, temp);
+ addr = MM_AND(index_mask, temp);
+
+ /*
+ * Calc addr for DFAs - addr = dfa_index + input_byte
+ */
+
+ /* mask for DFA type (0) nodes */
+ temp = MM_CMPEQ32(node_types, MM_XOR(node_types, node_types));
+
+ /* add input byte to DFA position */
+ temp = MM_AND(temp, bytes);
+ temp = MM_AND(temp, next_input);
+ addr = MM_ADD32(addr, temp);
+
+ /*
+ * Calc addr for Range nodes -> range_index + range(input)
+ */
+ node_types = MM_CMPEQ32(node_types, type_quad_range);
+
+ /*
+ * Calculate number of range boundaries that are less than the
+ * input value. Range boundaries for each node are in signed 8 bit,
+ * ordered from -128 to 127 in the indices2 register.
+ * This is effectively a popcnt of bytes that are greater than the
+ * input byte.
+ */
+
+ /* shuffle input byte to all 4 positions of 32 bit value */
+ temp = MM_SHUFFLE8(next_input, shuffle_input);
+
+ /* check ranges */
+ temp = MM_CMPGT8(temp, *indices2);
+
+ /* convert -1 to 1 (bytes greater than input byte */
+ temp = MM_SIGN8(temp, temp);
+
+ /* horizontal add pairs of bytes into words */
+ temp = MM_MADD8(temp, temp);
+
+ /* horizontal add pairs of words into dwords */
+ temp = MM_MADD16(temp, ones_16);
+
+ /* mask to range type nodes */
+ temp = MM_AND(temp, node_types);
+
+ /* add index into node position */
+ return MM_ADD32(addr, temp);
+}
+
+/*
+ * Process 4 transitions (in 2 SIMD registers) in parallel
+ */
+static inline xmm_t
+transition4(xmm_t index_mask, xmm_t next_input, xmm_t shuffle_input,
+ xmm_t ones_16, xmm_t bytes, xmm_t type_quad_range,
+ const uint64_t *trans, xmm_t *indices1, xmm_t *indices2)
+{
+ xmm_t addr;
+ uint64_t trans0, trans2;
+
+ /* Calculate the address (array index) for all 4 transitions. */
+
+ addr = acl_calc_addr(index_mask, next_input, shuffle_input, ones_16,
+ bytes, type_quad_range, indices1, indices2);
+
+ /* Gather 64 bit transitions and pack back into 2 registers. */
+
+ trans0 = trans[MM_CVT32(addr)];
+
+ /* get slot 2 */
+
+ /* {x0, x1, x2, x3} -> {x2, x1, x2, x3} */
+ addr = MM_SHUFFLE32(addr, SHUFFLE32_SLOT2);
+ trans2 = trans[MM_CVT32(addr)];
+
+ /* get slot 1 */
+
+ /* {x2, x1, x2, x3} -> {x1, x1, x2, x3} */
+ addr = MM_SHUFFLE32(addr, SHUFFLE32_SLOT1);
+ *indices1 = MM_SET64(trans[MM_CVT32(addr)], trans0);
+
+ /* get slot 3 */
+
+ /* {x1, x1, x2, x3} -> {x3, x1, x2, x3} */
+ addr = MM_SHUFFLE32(addr, SHUFFLE32_SLOT3);
+ *indices2 = MM_SET64(trans[MM_CVT32(addr)], trans2);
+
+ return MM_SRL32(next_input, 8);
+}
+
+/*
+ * Execute trie traversal with 8 traversals in parallel
+ */
+static inline int
+search_sse_8(const struct rte_acl_ctx *ctx, const uint8_t **data,
+ uint32_t *results, uint32_t total_packets, uint32_t categories)
+{
+ int n;
+ struct acl_flow_data flows;
+ uint64_t index_array[MAX_SEARCHES_SSE8];
+ struct completion cmplt[MAX_SEARCHES_SSE8];
+ struct parms parms[MAX_SEARCHES_SSE8];
+ xmm_t input0, input1;
+ xmm_t indices1, indices2, indices3, indices4;
+
+ acl_set_flow(&flows, cmplt, RTE_DIM(cmplt), data, results,
+ total_packets, categories, ctx->trans_table);
+
+ for (n = 0; n < MAX_SEARCHES_SSE8; n++) {
+ cmplt[n].count = 0;
+ index_array[n] = acl_start_next_trie(&flows, parms, n, ctx);
+ }
+
+ /*
+ * indices1 contains index_array[0,1]
+ * indices2 contains index_array[2,3]
+ * indices3 contains index_array[4,5]
+ * indices4 contains index_array[6,7]
+ */
+
+ indices1 = MM_LOADU((xmm_t *) &index_array[0]);
+ indices2 = MM_LOADU((xmm_t *) &index_array[2]);
+
+ indices3 = MM_LOADU((xmm_t *) &index_array[4]);
+ indices4 = MM_LOADU((xmm_t *) &index_array[6]);
+
+ /* Check for any matches. */
+ acl_match_check_x4(0, ctx, parms, &flows,
+ &indices1, &indices2, mm_match_mask.m);
+ acl_match_check_x4(4, ctx, parms, &flows,
+ &indices3, &indices4, mm_match_mask.m);
+
+ while (flows.started > 0) {
+
+ /* Gather 4 bytes of input data for each stream. */
+ input0 = MM_INSERT32(mm_ones_16.m, GET_NEXT_4BYTES(parms, 0),
+ 0);
+ input1 = MM_INSERT32(mm_ones_16.m, GET_NEXT_4BYTES(parms, 4),
+ 0);
+
+ input0 = MM_INSERT32(input0, GET_NEXT_4BYTES(parms, 1), 1);
+ input1 = MM_INSERT32(input1, GET_NEXT_4BYTES(parms, 5), 1);
+
+ input0 = MM_INSERT32(input0, GET_NEXT_4BYTES(parms, 2), 2);
+ input1 = MM_INSERT32(input1, GET_NEXT_4BYTES(parms, 6), 2);
+
+ input0 = MM_INSERT32(input0, GET_NEXT_4BYTES(parms, 3), 3);
+ input1 = MM_INSERT32(input1, GET_NEXT_4BYTES(parms, 7), 3);
+
+ /* Process the 4 bytes of input on each stream. */
+
+ input0 = transition4(mm_index_mask.m, input0,
+ mm_shuffle_input.m, mm_ones_16.m,
+ mm_bytes.m, mm_type_quad_range.m,
+ flows.trans, &indices1, &indices2);
+
+ input1 = transition4(mm_index_mask.m, input1,
+ mm_shuffle_input.m, mm_ones_16.m,
+ mm_bytes.m, mm_type_quad_range.m,
+ flows.trans, &indices3, &indices4);
+
+ input0 = transition4(mm_index_mask.m, input0,
+ mm_shuffle_input.m, mm_ones_16.m,
+ mm_bytes.m, mm_type_quad_range.m,
+ flows.trans, &indices1, &indices2);
+
+ input1 = transition4(mm_index_mask.m, input1,
+ mm_shuffle_input.m, mm_ones_16.m,
+ mm_bytes.m, mm_type_quad_range.m,
+ flows.trans, &indices3, &indices4);
+
+ input0 = transition4(mm_index_mask.m, input0,
+ mm_shuffle_input.m, mm_ones_16.m,
+ mm_bytes.m, mm_type_quad_range.m,
+ flows.trans, &indices1, &indices2);
+
+ input1 = transition4(mm_index_mask.m, input1,
+ mm_shuffle_input.m, mm_ones_16.m,
+ mm_bytes.m, mm_type_quad_range.m,
+ flows.trans, &indices3, &indices4);
+
+ input0 = transition4(mm_index_mask.m, input0,
+ mm_shuffle_input.m, mm_ones_16.m,
+ mm_bytes.m, mm_type_quad_range.m,
+ flows.trans, &indices1, &indices2);
+
+ input1 = transition4(mm_index_mask.m, input1,
+ mm_shuffle_input.m, mm_ones_16.m,
+ mm_bytes.m, mm_type_quad_range.m,
+ flows.trans, &indices3, &indices4);
+
+ /* Check for any matches. */
+ acl_match_check_x4(0, ctx, parms, &flows,
+ &indices1, &indices2, mm_match_mask.m);
+ acl_match_check_x4(4, ctx, parms, &flows,
+ &indices3, &indices4, mm_match_mask.m);
+ }
+
+ return 0;
+}
+
+/*
+ * Execute trie traversal with 4 traversals in parallel
+ */
+static inline int
+search_sse_4(const struct rte_acl_ctx *ctx, const uint8_t **data,
+ uint32_t *results, int total_packets, uint32_t categories)
+{
+ int n;
+ struct acl_flow_data flows;
+ uint64_t index_array[MAX_SEARCHES_SSE4];
+ struct completion cmplt[MAX_SEARCHES_SSE4];
+ struct parms parms[MAX_SEARCHES_SSE4];
+ xmm_t input, indices1, indices2;
+
+ acl_set_flow(&flows, cmplt, RTE_DIM(cmplt), data, results,
+ total_packets, categories, ctx->trans_table);
+
+ for (n = 0; n < MAX_SEARCHES_SSE4; n++) {
+ cmplt[n].count = 0;
+ index_array[n] = acl_start_next_trie(&flows, parms, n, ctx);
+ }
+
+ indices1 = MM_LOADU((xmm_t *) &index_array[0]);
+ indices2 = MM_LOADU((xmm_t *) &index_array[2]);
+
+ /* Check for any matches. */
+ acl_match_check_x4(0, ctx, parms, &flows,
+ &indices1, &indices2, mm_match_mask.m);
+
+ while (flows.started > 0) {
+
+ /* Gather 4 bytes of input data for each stream. */
+ input = MM_INSERT32(mm_ones_16.m, GET_NEXT_4BYTES(parms, 0), 0);
+ input = MM_INSERT32(input, GET_NEXT_4BYTES(parms, 1), 1);
+ input = MM_INSERT32(input, GET_NEXT_4BYTES(parms, 2), 2);
+ input = MM_INSERT32(input, GET_NEXT_4BYTES(parms, 3), 3);
+
+ /* Process the 4 bytes of input on each stream. */
+ input = transition4(mm_index_mask.m, input,
+ mm_shuffle_input.m, mm_ones_16.m,
+ mm_bytes.m, mm_type_quad_range.m,
+ flows.trans, &indices1, &indices2);
+
+ input = transition4(mm_index_mask.m, input,
+ mm_shuffle_input.m, mm_ones_16.m,
+ mm_bytes.m, mm_type_quad_range.m,
+ flows.trans, &indices1, &indices2);
+
+ input = transition4(mm_index_mask.m, input,
+ mm_shuffle_input.m, mm_ones_16.m,
+ mm_bytes.m, mm_type_quad_range.m,
+ flows.trans, &indices1, &indices2);
+
+ input = transition4(mm_index_mask.m, input,
+ mm_shuffle_input.m, mm_ones_16.m,
+ mm_bytes.m, mm_type_quad_range.m,
+ flows.trans, &indices1, &indices2);
+
+ /* Check for any matches. */
+ acl_match_check_x4(0, ctx, parms, &flows,
+ &indices1, &indices2, mm_match_mask.m);
+ }
+
+ return 0;
+}
+
+static inline xmm_t
+transition2(xmm_t index_mask, xmm_t next_input, xmm_t shuffle_input,
+ xmm_t ones_16, xmm_t bytes, xmm_t type_quad_range,
+ const uint64_t *trans, xmm_t *indices1)
+{
+ uint64_t t;
+ xmm_t addr, indices2;
+
+ indices2 = MM_XOR(ones_16, ones_16);
+
+ addr = acl_calc_addr(index_mask, next_input, shuffle_input, ones_16,
+ bytes, type_quad_range, indices1, &indices2);
+
+ /* Gather 64 bit transitions and pack 2 per register. */
+
+ t = trans[MM_CVT32(addr)];
+
+ /* get slot 1 */
+ addr = MM_SHUFFLE32(addr, SHUFFLE32_SLOT1);
+ *indices1 = MM_SET64(trans[MM_CVT32(addr)], t);
+
+ return MM_SRL32(next_input, 8);
+}
+
+/*
+ * Execute trie traversal with 2 traversals in parallel.
+ */
+static inline int
+search_sse_2(const struct rte_acl_ctx *ctx, const uint8_t **data,
+ uint32_t *results, uint32_t total_packets, uint32_t categories)
+{
+ int n;
+ struct acl_flow_data flows;
+ uint64_t index_array[MAX_SEARCHES_SSE2];
+ struct completion cmplt[MAX_SEARCHES_SSE2];
+ struct parms parms[MAX_SEARCHES_SSE2];
+ xmm_t input, indices;
+
+ acl_set_flow(&flows, cmplt, RTE_DIM(cmplt), data, results,
+ total_packets, categories, ctx->trans_table);
+
+ for (n = 0; n < MAX_SEARCHES_SSE2; n++) {
+ cmplt[n].count = 0;
+ index_array[n] = acl_start_next_trie(&flows, parms, n, ctx);
+ }
+
+ indices = MM_LOADU((xmm_t *) &index_array[0]);
+
+ /* Check for any matches. */
+ acl_match_check_x2(0, ctx, parms, &flows, &indices, mm_match_mask64.m);
+
+ while (flows.started > 0) {
+
+ /* Gather 4 bytes of input data for each stream. */
+ input = MM_INSERT32(mm_ones_16.m, GET_NEXT_4BYTES(parms, 0), 0);
+ input = MM_INSERT32(input, GET_NEXT_4BYTES(parms, 1), 1);
+
+ /* Process the 4 bytes of input on each stream. */
+
+ input = transition2(mm_index_mask64.m, input,
+ mm_shuffle_input64.m, mm_ones_16.m,
+ mm_bytes64.m, mm_type_quad_range64.m,
+ flows.trans, &indices);
+
+ input = transition2(mm_index_mask64.m, input,
+ mm_shuffle_input64.m, mm_ones_16.m,
+ mm_bytes64.m, mm_type_quad_range64.m,
+ flows.trans, &indices);
+
+ input = transition2(mm_index_mask64.m, input,
+ mm_shuffle_input64.m, mm_ones_16.m,
+ mm_bytes64.m, mm_type_quad_range64.m,
+ flows.trans, &indices);
+
+ input = transition2(mm_index_mask64.m, input,
+ mm_shuffle_input64.m, mm_ones_16.m,
+ mm_bytes64.m, mm_type_quad_range64.m,
+ flows.trans, &indices);
+
+ /* Check for any matches. */
+ acl_match_check_x2(0, ctx, parms, &flows, &indices,
+ mm_match_mask64.m);
+ }
+
+ return 0;
+}
+
+int
+rte_acl_classify_sse(const struct rte_acl_ctx *ctx, const uint8_t **data,
+ uint32_t *results, uint32_t num, uint32_t categories)
+{
+ if (categories != 1 &&
+ ((RTE_ACL_RESULTS_MULTIPLIER - 1) & categories) != 0)
+ return -EINVAL;
+
+ if (likely(num >= MAX_SEARCHES_SSE8))
+ return search_sse_8(ctx, data, results, num, categories);
+ else if (num >= MAX_SEARCHES_SSE4)
+ return search_sse_4(ctx, data, results, num, categories);
+ else
+ return search_sse_2(ctx, data, results, num, categories);
+}
diff --git a/src/dpdk_lib18/librte_acl/acl_vect.h b/src/dpdk_lib18/librte_acl/acl_vect.h
new file mode 100755
index 00000000..d8136003
--- /dev/null
+++ b/src/dpdk_lib18/librte_acl/acl_vect.h
@@ -0,0 +1,132 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ACL_VECT_H_
+#define _RTE_ACL_VECT_H_
+
+/**
+ * @file
+ *
+ * RTE ACL SSE/AVX related header.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MM_ADD16(a, b) _mm_add_epi16(a, b)
+#define MM_ADD32(a, b) _mm_add_epi32(a, b)
+#define MM_ALIGNR8(a, b, c) _mm_alignr_epi8(a, b, c)
+#define MM_AND(a, b) _mm_and_si128(a, b)
+#define MM_ANDNOT(a, b) _mm_andnot_si128(a, b)
+#define MM_BLENDV8(a, b, c) _mm_blendv_epi8(a, b, c)
+#define MM_CMPEQ16(a, b) _mm_cmpeq_epi16(a, b)
+#define MM_CMPEQ32(a, b) _mm_cmpeq_epi32(a, b)
+#define MM_CMPEQ8(a, b) _mm_cmpeq_epi8(a, b)
+#define MM_CMPGT32(a, b) _mm_cmpgt_epi32(a, b)
+#define MM_CMPGT8(a, b) _mm_cmpgt_epi8(a, b)
+#define MM_CVT(a) _mm_cvtsi32_si128(a)
+#define MM_CVT32(a) _mm_cvtsi128_si32(a)
+#define MM_CVTU32(a) _mm_cvtsi32_si128(a)
+#define MM_INSERT16(a, c, b) _mm_insert_epi16(a, c, b)
+#define MM_INSERT32(a, c, b) _mm_insert_epi32(a, c, b)
+#define MM_LOAD(a) _mm_load_si128(a)
+#define MM_LOADH_PI(a, b) _mm_loadh_pi(a, b)
+#define MM_LOADU(a) _mm_loadu_si128(a)
+#define MM_MADD16(a, b) _mm_madd_epi16(a, b)
+#define MM_MADD8(a, b) _mm_maddubs_epi16(a, b)
+#define MM_MOVEMASK8(a) _mm_movemask_epi8(a)
+#define MM_OR(a, b) _mm_or_si128(a, b)
+#define MM_SET1_16(a) _mm_set1_epi16(a)
+#define MM_SET1_32(a) _mm_set1_epi32(a)
+#define MM_SET1_64(a) _mm_set1_epi64(a)
+#define MM_SET1_8(a) _mm_set1_epi8(a)
+#define MM_SET32(a, b, c, d) _mm_set_epi32(a, b, c, d)
+#define MM_SHUFFLE32(a, b) _mm_shuffle_epi32(a, b)
+#define MM_SHUFFLE8(a, b) _mm_shuffle_epi8(a, b)
+#define MM_SHUFFLEPS(a, b, c) _mm_shuffle_ps(a, b, c)
+#define MM_SIGN8(a, b) _mm_sign_epi8(a, b)
+#define MM_SLL64(a, b) _mm_sll_epi64(a, b)
+#define MM_SRL128(a, b) _mm_srli_si128(a, b)
+#define MM_SRL16(a, b) _mm_srli_epi16(a, b)
+#define MM_SRL32(a, b) _mm_srli_epi32(a, b)
+#define MM_STORE(a, b) _mm_store_si128(a, b)
+#define MM_STOREU(a, b) _mm_storeu_si128(a, b)
+#define MM_TESTZ(a, b) _mm_testz_si128(a, b)
+#define MM_XOR(a, b) _mm_xor_si128(a, b)
+
+#define MM_SET16(a, b, c, d, e, f, g, h) \
+ _mm_set_epi16(a, b, c, d, e, f, g, h)
+
+#define MM_SET8(c0, c1, c2, c3, c4, c5, c6, c7, \
+ c8, c9, cA, cB, cC, cD, cE, cF) \
+ _mm_set_epi8(c0, c1, c2, c3, c4, c5, c6, c7, \
+ c8, c9, cA, cB, cC, cD, cE, cF)
+
+#ifdef RTE_ARCH_X86_64
+
+#define MM_CVT64(a) _mm_cvtsi128_si64(a)
+
+#else
+
+#define MM_CVT64(a) ({ \
+ rte_xmm_t m; \
+ m.m = (a); \
+ (m.u64[0]); \
+})
+
+#endif /*RTE_ARCH_X86_64 */
+
+/*
+ * Prior to version 12.1 icc doesn't support _mm_set_epi64x.
+ */
+#if (defined(__ICC) && __ICC < 1210)
+
+#define MM_SET64(a, b) ({ \
+ rte_xmm_t m; \
+ m.u64[0] = b; \
+ m.u64[1] = a; \
+ (m.m); \
+})
+
+#else
+
+#define MM_SET64(a, b) _mm_set_epi64x(a, b)
+
+#endif /* (defined(__ICC) && __ICC < 1210) */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_ACL_VECT_H_ */
diff --git a/src/dpdk_lib18/librte_acl/rte_acl.c b/src/dpdk_lib18/librte_acl/rte_acl.c
new file mode 100755
index 00000000..547e6dae
--- /dev/null
+++ b/src/dpdk_lib18/librte_acl/rte_acl.c
@@ -0,0 +1,516 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_acl.h>
+#include "acl.h"
+
+#define BIT_SIZEOF(x) (sizeof(x) * CHAR_BIT)
+
+TAILQ_HEAD(rte_acl_list, rte_tailq_entry);
+
+static const rte_acl_classify_t classify_fns[] = {
+ [RTE_ACL_CLASSIFY_DEFAULT] = rte_acl_classify_scalar,
+ [RTE_ACL_CLASSIFY_SCALAR] = rte_acl_classify_scalar,
+ [RTE_ACL_CLASSIFY_SSE] = rte_acl_classify_sse,
+};
+
+/* by default, use always available scalar code path. */
+static enum rte_acl_classify_alg rte_acl_default_classify =
+ RTE_ACL_CLASSIFY_SCALAR;
+
+static void
+rte_acl_set_default_classify(enum rte_acl_classify_alg alg)
+{
+ rte_acl_default_classify = alg;
+}
+
+extern int
+rte_acl_set_ctx_classify(struct rte_acl_ctx *ctx, enum rte_acl_classify_alg alg)
+{
+ if (ctx == NULL || (uint32_t)alg >= RTE_DIM(classify_fns))
+ return -EINVAL;
+
+ ctx->alg = alg;
+ return 0;
+}
+
+static void __attribute__((constructor))
+rte_acl_init(void)
+{
+ enum rte_acl_classify_alg alg = RTE_ACL_CLASSIFY_DEFAULT;
+
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
+ alg = RTE_ACL_CLASSIFY_SSE;
+
+ rte_acl_set_default_classify(alg);
+}
+
+int
+rte_acl_classify(const struct rte_acl_ctx *ctx, const uint8_t **data,
+ uint32_t *results, uint32_t num, uint32_t categories)
+{
+ return classify_fns[ctx->alg](ctx, data, results, num, categories);
+}
+
+int
+rte_acl_classify_alg(const struct rte_acl_ctx *ctx, const uint8_t **data,
+ uint32_t *results, uint32_t num, uint32_t categories,
+ enum rte_acl_classify_alg alg)
+{
+ return classify_fns[alg](ctx, data, results, num, categories);
+}
+
+struct rte_acl_ctx *
+rte_acl_find_existing(const char *name)
+{
+ struct rte_acl_ctx *ctx = NULL;
+ struct rte_acl_list *acl_list;
+ struct rte_tailq_entry *te;
+
+ /* check that we have an initialised tail queue */
+ acl_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_ACL, rte_acl_list);
+ if (acl_list == NULL) {
+ rte_errno = E_RTE_NO_TAILQ;
+ return NULL;
+ }
+
+ rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
+ TAILQ_FOREACH(te, acl_list, next) {
+ ctx = (struct rte_acl_ctx *) te->data;
+ if (strncmp(name, ctx->name, sizeof(ctx->name)) == 0)
+ break;
+ }
+ rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ if (te == NULL) {
+ rte_errno = ENOENT;
+ return NULL;
+ }
+ return ctx;
+}
+
+void
+rte_acl_free(struct rte_acl_ctx *ctx)
+{
+ struct rte_acl_list *acl_list;
+ struct rte_tailq_entry *te;
+
+ if (ctx == NULL)
+ return;
+
+ /* check that we have an initialised tail queue */
+ acl_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_ACL, rte_acl_list);
+ if (acl_list == NULL) {
+ rte_errno = E_RTE_NO_TAILQ;
+ return;
+ }
+
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ /* find our tailq entry */
+ TAILQ_FOREACH(te, acl_list, next) {
+ if (te->data == (void *) ctx)
+ break;
+ }
+ if (te == NULL) {
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ return;
+ }
+
+ TAILQ_REMOVE(acl_list, te, next);
+
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ rte_free(ctx->mem);
+ rte_free(ctx);
+ rte_free(te);
+}
+
+struct rte_acl_ctx *
+rte_acl_create(const struct rte_acl_param *param)
+{
+ size_t sz;
+ struct rte_acl_ctx *ctx;
+ struct rte_acl_list *acl_list;
+ struct rte_tailq_entry *te;
+ char name[sizeof(ctx->name)];
+
+ /* check that we have an initialised tail queue */
+ acl_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_ACL, rte_acl_list);
+ if (acl_list == NULL) {
+ rte_errno = E_RTE_NO_TAILQ;
+ return NULL;
+ }
+
+ /* check that input parameters are valid. */
+ if (param == NULL || param->name == NULL) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ snprintf(name, sizeof(name), "ACL_%s", param->name);
+
+ /* calculate amount of memory required for pattern set. */
+ sz = sizeof(*ctx) + param->max_rule_num * param->rule_size;
+
+ /* get EAL TAILQ lock. */
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ /* if we already have one with that name */
+ TAILQ_FOREACH(te, acl_list, next) {
+ ctx = (struct rte_acl_ctx *) te->data;
+ if (strncmp(param->name, ctx->name, sizeof(ctx->name)) == 0)
+ break;
+ }
+
+ /* if ACL with such name doesn't exist, then create a new one. */
+ if (te == NULL) {
+ ctx = NULL;
+ te = rte_zmalloc("ACL_TAILQ_ENTRY", sizeof(*te), 0);
+
+ if (te == NULL) {
+ RTE_LOG(ERR, ACL, "Cannot allocate tailq entry!\n");
+ goto exit;
+ }
+
+ ctx = rte_zmalloc_socket(name, sz, RTE_CACHE_LINE_SIZE, param->socket_id);
+
+ if (ctx == NULL) {
+ RTE_LOG(ERR, ACL,
+ "allocation of %zu bytes on socket %d for %s failed\n",
+ sz, param->socket_id, name);
+ rte_free(te);
+ goto exit;
+ }
+ /* init new allocated context. */
+ ctx->rules = ctx + 1;
+ ctx->max_rules = param->max_rule_num;
+ ctx->rule_sz = param->rule_size;
+ ctx->socket_id = param->socket_id;
+ ctx->alg = rte_acl_default_classify;
+ snprintf(ctx->name, sizeof(ctx->name), "%s", param->name);
+
+ te->data = (void *) ctx;
+
+ TAILQ_INSERT_TAIL(acl_list, te, next);
+ }
+
+exit:
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ return ctx;
+}
+
+static int
+acl_add_rules(struct rte_acl_ctx *ctx, const void *rules, uint32_t num)
+{
+ uint8_t *pos;
+
+ if (num + ctx->num_rules > ctx->max_rules)
+ return -ENOMEM;
+
+ pos = ctx->rules;
+ pos += ctx->rule_sz * ctx->num_rules;
+ memcpy(pos, rules, num * ctx->rule_sz);
+ ctx->num_rules += num;
+
+ return 0;
+}
+
+static int
+acl_check_rule(const struct rte_acl_rule_data *rd)
+{
+ if ((rd->category_mask & LEN2MASK(RTE_ACL_MAX_CATEGORIES)) == 0 ||
+ rd->priority > RTE_ACL_MAX_PRIORITY ||
+ rd->priority < RTE_ACL_MIN_PRIORITY ||
+ rd->userdata == RTE_ACL_INVALID_USERDATA)
+ return -EINVAL;
+ return 0;
+}
+
+int
+rte_acl_add_rules(struct rte_acl_ctx *ctx, const struct rte_acl_rule *rules,
+ uint32_t num)
+{
+ const struct rte_acl_rule *rv;
+ uint32_t i;
+ int32_t rc;
+
+ if (ctx == NULL || rules == NULL || 0 == ctx->rule_sz)
+ return -EINVAL;
+
+ for (i = 0; i != num; i++) {
+ rv = (const struct rte_acl_rule *)
+ ((uintptr_t)rules + i * ctx->rule_sz);
+ rc = acl_check_rule(&rv->data);
+ if (rc != 0) {
+ RTE_LOG(ERR, ACL, "%s(%s): rule #%u is invalid\n",
+ __func__, ctx->name, i + 1);
+ return rc;
+ }
+ }
+
+ return acl_add_rules(ctx, rules, num);
+}
+
+/*
+ * Reset all rules.
+ * Note that RT structures are not affected.
+ */
+void
+rte_acl_reset_rules(struct rte_acl_ctx *ctx)
+{
+ if (ctx != NULL)
+ ctx->num_rules = 0;
+}
+
+/*
+ * Reset all rules and destroys RT structures.
+ */
+void
+rte_acl_reset(struct rte_acl_ctx *ctx)
+{
+ if (ctx != NULL) {
+ rte_acl_reset_rules(ctx);
+ rte_acl_build(ctx, &ctx->config);
+ }
+}
+
+/*
+ * Dump ACL context to the stdout.
+ */
+void
+rte_acl_dump(const struct rte_acl_ctx *ctx)
+{
+ if (!ctx)
+ return;
+ printf("acl context <%s>@%p\n", ctx->name, ctx);
+ printf(" socket_id=%"PRId32"\n", ctx->socket_id);
+ printf(" alg=%"PRId32"\n", ctx->alg);
+ printf(" max_rules=%"PRIu32"\n", ctx->max_rules);
+ printf(" rule_size=%"PRIu32"\n", ctx->rule_sz);
+ printf(" num_rules=%"PRIu32"\n", ctx->num_rules);
+ printf(" num_categories=%"PRIu32"\n", ctx->num_categories);
+ printf(" num_tries=%"PRIu32"\n", ctx->num_tries);
+}
+
+/*
+ * Dump all ACL contexts to the stdout.
+ */
+void
+rte_acl_list_dump(void)
+{
+ struct rte_acl_ctx *ctx;
+ struct rte_acl_list *acl_list;
+ struct rte_tailq_entry *te;
+
+ /* check that we have an initialised tail queue */
+ acl_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_ACL, rte_acl_list);
+ if (acl_list == NULL) {
+ rte_errno = E_RTE_NO_TAILQ;
+ return;
+ }
+
+ rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
+ TAILQ_FOREACH(te, acl_list, next) {
+ ctx = (struct rte_acl_ctx *) te->data;
+ rte_acl_dump(ctx);
+ }
+ rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
+}
+
+/*
+ * Support for legacy ipv4vlan rules.
+ */
+
+RTE_ACL_RULE_DEF(acl_ipv4vlan_rule, RTE_ACL_IPV4VLAN_NUM_FIELDS);
+
+static int
+acl_ipv4vlan_check_rule(const struct rte_acl_ipv4vlan_rule *rule)
+{
+ if (rule->src_port_low > rule->src_port_high ||
+ rule->dst_port_low > rule->dst_port_high ||
+ rule->src_mask_len > BIT_SIZEOF(rule->src_addr) ||
+ rule->dst_mask_len > BIT_SIZEOF(rule->dst_addr))
+ return -EINVAL;
+
+ return acl_check_rule(&rule->data);
+}
+
+static void
+acl_ipv4vlan_convert_rule(const struct rte_acl_ipv4vlan_rule *ri,
+ struct acl_ipv4vlan_rule *ro)
+{
+ ro->data = ri->data;
+
+ ro->field[RTE_ACL_IPV4VLAN_PROTO_FIELD].value.u8 = ri->proto;
+ ro->field[RTE_ACL_IPV4VLAN_VLAN1_FIELD].value.u16 = ri->vlan;
+ ro->field[RTE_ACL_IPV4VLAN_VLAN2_FIELD].value.u16 = ri->domain;
+ ro->field[RTE_ACL_IPV4VLAN_SRC_FIELD].value.u32 = ri->src_addr;
+ ro->field[RTE_ACL_IPV4VLAN_DST_FIELD].value.u32 = ri->dst_addr;
+ ro->field[RTE_ACL_IPV4VLAN_SRCP_FIELD].value.u16 = ri->src_port_low;
+ ro->field[RTE_ACL_IPV4VLAN_DSTP_FIELD].value.u16 = ri->dst_port_low;
+
+ ro->field[RTE_ACL_IPV4VLAN_PROTO_FIELD].mask_range.u8 = ri->proto_mask;
+ ro->field[RTE_ACL_IPV4VLAN_VLAN1_FIELD].mask_range.u16 = ri->vlan_mask;
+ ro->field[RTE_ACL_IPV4VLAN_VLAN2_FIELD].mask_range.u16 =
+ ri->domain_mask;
+ ro->field[RTE_ACL_IPV4VLAN_SRC_FIELD].mask_range.u32 =
+ ri->src_mask_len;
+ ro->field[RTE_ACL_IPV4VLAN_DST_FIELD].mask_range.u32 = ri->dst_mask_len;
+ ro->field[RTE_ACL_IPV4VLAN_SRCP_FIELD].mask_range.u16 =
+ ri->src_port_high;
+ ro->field[RTE_ACL_IPV4VLAN_DSTP_FIELD].mask_range.u16 =
+ ri->dst_port_high;
+}
+
+int
+rte_acl_ipv4vlan_add_rules(struct rte_acl_ctx *ctx,
+ const struct rte_acl_ipv4vlan_rule *rules,
+ uint32_t num)
+{
+ int32_t rc;
+ uint32_t i;
+ struct acl_ipv4vlan_rule rv;
+
+ if (ctx == NULL || rules == NULL || ctx->rule_sz != sizeof(rv))
+ return -EINVAL;
+
+ /* check input rules. */
+ for (i = 0; i != num; i++) {
+ rc = acl_ipv4vlan_check_rule(rules + i);
+ if (rc != 0) {
+ RTE_LOG(ERR, ACL, "%s(%s): rule #%u is invalid\n",
+ __func__, ctx->name, i + 1);
+ return rc;
+ }
+ }
+
+ if (num + ctx->num_rules > ctx->max_rules)
+ return -ENOMEM;
+
+ /* perform conversion to the internal format and add to the context. */
+ for (i = 0, rc = 0; i != num && rc == 0; i++) {
+ acl_ipv4vlan_convert_rule(rules + i, &rv);
+ rc = acl_add_rules(ctx, &rv, 1);
+ }
+
+ return rc;
+}
+
+static void
+acl_ipv4vlan_config(struct rte_acl_config *cfg,
+ const uint32_t layout[RTE_ACL_IPV4VLAN_NUM],
+ uint32_t num_categories)
+{
+ static const struct rte_acl_field_def
+ ipv4_defs[RTE_ACL_IPV4VLAN_NUM_FIELDS] = {
+ {
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint8_t),
+ .field_index = RTE_ACL_IPV4VLAN_PROTO_FIELD,
+ .input_index = RTE_ACL_IPV4VLAN_PROTO,
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint16_t),
+ .field_index = RTE_ACL_IPV4VLAN_VLAN1_FIELD,
+ .input_index = RTE_ACL_IPV4VLAN_VLAN,
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint16_t),
+ .field_index = RTE_ACL_IPV4VLAN_VLAN2_FIELD,
+ .input_index = RTE_ACL_IPV4VLAN_VLAN,
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = RTE_ACL_IPV4VLAN_SRC_FIELD,
+ .input_index = RTE_ACL_IPV4VLAN_SRC,
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = RTE_ACL_IPV4VLAN_DST_FIELD,
+ .input_index = RTE_ACL_IPV4VLAN_DST,
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = RTE_ACL_IPV4VLAN_SRCP_FIELD,
+ .input_index = RTE_ACL_IPV4VLAN_PORTS,
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = RTE_ACL_IPV4VLAN_DSTP_FIELD,
+ .input_index = RTE_ACL_IPV4VLAN_PORTS,
+ },
+ };
+
+ memcpy(&cfg->defs, ipv4_defs, sizeof(ipv4_defs));
+ cfg->num_fields = RTE_DIM(ipv4_defs);
+
+ cfg->defs[RTE_ACL_IPV4VLAN_PROTO_FIELD].offset =
+ layout[RTE_ACL_IPV4VLAN_PROTO];
+ cfg->defs[RTE_ACL_IPV4VLAN_VLAN1_FIELD].offset =
+ layout[RTE_ACL_IPV4VLAN_VLAN];
+ cfg->defs[RTE_ACL_IPV4VLAN_VLAN2_FIELD].offset =
+ layout[RTE_ACL_IPV4VLAN_VLAN] +
+ cfg->defs[RTE_ACL_IPV4VLAN_VLAN1_FIELD].size;
+ cfg->defs[RTE_ACL_IPV4VLAN_SRC_FIELD].offset =
+ layout[RTE_ACL_IPV4VLAN_SRC];
+ cfg->defs[RTE_ACL_IPV4VLAN_DST_FIELD].offset =
+ layout[RTE_ACL_IPV4VLAN_DST];
+ cfg->defs[RTE_ACL_IPV4VLAN_SRCP_FIELD].offset =
+ layout[RTE_ACL_IPV4VLAN_PORTS];
+ cfg->defs[RTE_ACL_IPV4VLAN_DSTP_FIELD].offset =
+ layout[RTE_ACL_IPV4VLAN_PORTS] +
+ cfg->defs[RTE_ACL_IPV4VLAN_SRCP_FIELD].size;
+
+ cfg->num_categories = num_categories;
+}
+
+int
+rte_acl_ipv4vlan_build(struct rte_acl_ctx *ctx,
+ const uint32_t layout[RTE_ACL_IPV4VLAN_NUM],
+ uint32_t num_categories)
+{
+ struct rte_acl_config cfg;
+
+ if (ctx == NULL || layout == NULL)
+ return -EINVAL;
+
+ acl_ipv4vlan_config(&cfg, layout, num_categories);
+ return rte_acl_build(ctx, &cfg);
+}
diff --git a/src/dpdk_lib18/librte_acl/rte_acl.h b/src/dpdk_lib18/librte_acl/rte_acl.h
new file mode 100755
index 00000000..0d913eed
--- /dev/null
+++ b/src/dpdk_lib18/librte_acl/rte_acl.h
@@ -0,0 +1,485 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ACL_H_
+#define _RTE_ACL_H_
+
+/**
+ * @file
+ *
+ * RTE Classifier.
+ */
+
+#include <rte_acl_osdep.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_ACL_MAX_CATEGORIES 16
+
+#define RTE_ACL_RESULTS_MULTIPLIER (XMM_SIZE / sizeof(uint32_t))
+
+#define RTE_ACL_MAX_LEVELS 64
+#define RTE_ACL_MAX_FIELDS 64
+
+union rte_acl_field_types {
+ uint8_t u8;
+ uint16_t u16;
+ uint32_t u32;
+ uint64_t u64;
+};
+
+enum {
+ RTE_ACL_FIELD_TYPE_MASK = 0,
+ RTE_ACL_FIELD_TYPE_RANGE,
+ RTE_ACL_FIELD_TYPE_BITMASK
+};
+
+/**
+ * ACL Field definition.
+ * Each field in the ACL rule has an associate definition.
+ * It defines the type of field, its size, its offset in the input buffer,
+ * the field index, and the input index.
+ * For performance reasons, the inner loop of the search function is unrolled
+ * to process four input bytes at a time. This requires the input to be grouped
+ * into sets of 4 consecutive bytes. The loop processes the first input byte as
+ * part of the setup and then subsequent bytes must be in groups of 4
+ * consecutive bytes.
+ */
+struct rte_acl_field_def {
+ uint8_t type; /**< type - RTE_ACL_FIELD_TYPE_*. */
+ uint8_t size; /**< size of field 1,2,4, or 8. */
+ uint8_t field_index; /**< index of field inside the rule. */
+ uint8_t input_index; /**< 0-N input index. */
+ uint32_t offset; /**< offset to start of field. */
+};
+
+/**
+ * ACL build configuration.
+ * Defines the fields of an ACL trie and number of categories to build with.
+ */
+struct rte_acl_config {
+ uint32_t num_categories; /**< Number of categories to build with. */
+ uint32_t num_fields; /**< Number of field definitions. */
+ struct rte_acl_field_def defs[RTE_ACL_MAX_FIELDS];
+ /**< array of field definitions. */
+};
+
+/**
+ * Defines the value of a field for a rule.
+ */
+struct rte_acl_field {
+ union rte_acl_field_types value;
+ /**< a 1,2,4, or 8 byte value of the field. */
+ union rte_acl_field_types mask_range;
+ /**<
+ * depending on field type:
+ * mask -> 1.2.3.4/32 value=0x1020304, mask_range=32,
+ * range -> 0 : 65535 value=0, mask_range=65535,
+ * bitmask -> 0x06/0xff value=6, mask_range=0xff.
+ */
+};
+
+enum {
+ RTE_ACL_TYPE_SHIFT = 29,
+ RTE_ACL_MAX_INDEX = LEN2MASK(RTE_ACL_TYPE_SHIFT),
+ RTE_ACL_MAX_PRIORITY = RTE_ACL_MAX_INDEX,
+ RTE_ACL_MIN_PRIORITY = 0,
+};
+
+#define RTE_ACL_INVALID_USERDATA 0
+
+/**
+ * Miscellaneous data for ACL rule.
+ */
+struct rte_acl_rule_data {
+ uint32_t category_mask; /**< Mask of categories for that rule. */
+ int32_t priority; /**< Priority for that rule. */
+ uint32_t userdata; /**< Associated with the rule user data. */
+};
+
+/**
+ * Defines single ACL rule.
+ * data - miscellaneous data for the rule.
+ * field[] - value and mask or range for each field.
+ */
+#define RTE_ACL_RULE_DEF(name, fld_num) struct name {\
+ struct rte_acl_rule_data data; \
+ struct rte_acl_field field[fld_num]; \
+}
+
+RTE_ACL_RULE_DEF(rte_acl_rule, 0);
+
+#define RTE_ACL_RULE_SZ(fld_num) \
+ (sizeof(struct rte_acl_rule) + sizeof(struct rte_acl_field) * (fld_num))
+
+
+/** Max number of characters in name.*/
+#define RTE_ACL_NAMESIZE 32
+
+/**
+ * Parameters used when creating the ACL context.
+ */
+struct rte_acl_param {
+ const char *name; /**< Name of the ACL context. */
+ int socket_id; /**< Socket ID to allocate memory for. */
+ uint32_t rule_size; /**< Size of each rule. */
+ uint32_t max_rule_num; /**< Maximum number of rules. */
+};
+
+
+/**
+ * Create a new ACL context.
+ *
+ * @param param
+ * Parameters used to create and initialise the ACL context.
+ * @return
+ * Pointer to ACL context structure that is used in future ACL
+ * operations, or NULL on error, with error code set in rte_errno.
+ * Possible rte_errno errors include:
+ * - E_RTE_NO_TAILQ - no tailq list could be got for the ACL context list
+ * - EINVAL - invalid parameter passed to function
+ */
+struct rte_acl_ctx *
+rte_acl_create(const struct rte_acl_param *param);
+
+/**
+ * Find an existing ACL context object and return a pointer to it.
+ *
+ * @param name
+ * Name of the ACL context as passed to rte_acl_create()
+ * @return
+ * Pointer to ACL context or NULL if object not found
+ * with rte_errno set appropriately. Possible rte_errno values include:
+ * - ENOENT - value not available for return
+ */
+struct rte_acl_ctx *
+rte_acl_find_existing(const char *name);
+
+/**
+ * De-allocate all memory used by ACL context.
+ *
+ * @param ctx
+ * ACL context to free
+ */
+void
+rte_acl_free(struct rte_acl_ctx *ctx);
+
+/**
+ * Add rules to an existing ACL context.
+ * This function is not multi-thread safe.
+ *
+ * @param ctx
+ * ACL context to add patterns to.
+ * @param rules
+ * Array of rules to add to the ACL context.
+ * Note that all fields in rte_acl_rule structures are expected
+ * to be in host byte order.
+ * Each rule expected to be in the same format and not exceed size
+ * specified at ACL context creation time.
+ * @param num
+ * Number of elements in the input array of rules.
+ * @return
+ * - -ENOMEM if there is no space in the ACL context for these rules.
+ * - -EINVAL if the parameters are invalid.
+ * - Zero if operation completed successfully.
+ */
+int
+rte_acl_add_rules(struct rte_acl_ctx *ctx, const struct rte_acl_rule *rules,
+ uint32_t num);
+
+/**
+ * Delete all rules from the ACL context.
+ * This function is not multi-thread safe.
+ * Note that internal run-time structures are not affected.
+ *
+ * @param ctx
+ * ACL context to delete rules from.
+ */
+void
+rte_acl_reset_rules(struct rte_acl_ctx *ctx);
+
+/**
+ * Analyze set of rules and build required internal run-time structures.
+ * This function is not multi-thread safe.
+ *
+ * @param ctx
+ * ACL context to build.
+ * @param cfg
+ * Pointer to struct rte_acl_config - defines build parameters.
+ * @return
+ * - -ENOMEM if couldn't allocate enough memory.
+ * - -EINVAL if the parameters are invalid.
+ * - Negative error code if operation failed.
+ * - Zero if operation completed successfully.
+ */
+int
+rte_acl_build(struct rte_acl_ctx *ctx, const struct rte_acl_config *cfg);
+
+/**
+ * Delete all rules from the ACL context and
+ * destroy all internal run-time structures.
+ * This function is not multi-thread safe.
+ *
+ * @param ctx
+ * ACL context to reset.
+ */
+void
+rte_acl_reset(struct rte_acl_ctx *ctx);
+
+/**
+ * Available implementations of ACL classify.
+ */
+enum rte_acl_classify_alg {
+ RTE_ACL_CLASSIFY_DEFAULT = 0,
+ RTE_ACL_CLASSIFY_SCALAR = 1, /**< generic implementation. */
+ RTE_ACL_CLASSIFY_SSE = 2, /**< requires SSE4.1 support. */
+};
+
+/**
+ * Perform search for a matching ACL rule for each input data buffer.
+ * Each input data buffer can have up to *categories* matches.
+ * That implies that results array should be big enough to hold
+ * (categories * num) elements.
+ * Also categories parameter should be either one or multiple of
+ * RTE_ACL_RESULTS_MULTIPLIER and can't be bigger than RTE_ACL_MAX_CATEGORIES.
+ * If more than one rule is applicable for given input buffer and
+ * given category, then rule with highest priority will be returned as a match.
+ * Note, that it is a caller's responsibility to ensure that input parameters
+ * are valid and point to correct memory locations.
+ *
+ * @param ctx
+ * ACL context to search with.
+ * @param data
+ * Array of pointers to input data buffers to perform search.
+ * Note that all fields in input data buffers supposed to be in network
+ * byte order (MSB).
+ * @param results
+ * Array of search results, *categories* results per each input data buffer.
+ * @param num
+ * Number of elements in the input data buffers array.
+ * @param categories
+ * Number of maximum possible matches for each input buffer, one possible
+ * match per category.
+ * @return
+ * zero on successful completion.
+ * -EINVAL for incorrect arguments.
+ */
+extern int
+rte_acl_classify(const struct rte_acl_ctx *ctx,
+ const uint8_t **data,
+ uint32_t *results, uint32_t num,
+ uint32_t categories);
+
+/**
+ * Perform search using specified algorithm for a matching ACL rule for
+ * each input data buffer.
+ * Each input data buffer can have up to *categories* matches.
+ * That implies that results array should be big enough to hold
+ * (categories * num) elements.
+ * Also categories parameter should be either one or multiple of
+ * RTE_ACL_RESULTS_MULTIPLIER and can't be bigger than RTE_ACL_MAX_CATEGORIES.
+ * If more than one rule is applicable for given input buffer and
+ * given category, then rule with highest priority will be returned as a match.
+ * Note, that it is a caller's responsibility to ensure that input parameters
+ * are valid and point to correct memory locations.
+ *
+ * @param ctx
+ * ACL context to search with.
+ * @param data
+ * Array of pointers to input data buffers to perform search.
+ * Note that all fields in input data buffers supposed to be in network
+ * byte order (MSB).
+ * @param results
+ * Array of search results, *categories* results per each input data buffer.
+ * @param num
+ * Number of elements in the input data buffers array.
+ * @param categories
+ * Number of maximum possible matches for each input buffer, one possible
+ * match per category.
+ * @param alg
+ * Algorithm to be used for the search.
+ * It is the caller responsibility to ensure that the value refers to the
+ * existing algorithm, and that it could be run on the given CPU.
+ * @return
+ * zero on successful completion.
+ * -EINVAL for incorrect arguments.
+ */
+extern int
+rte_acl_classify_alg(const struct rte_acl_ctx *ctx,
+ const uint8_t **data,
+ uint32_t *results, uint32_t num,
+ uint32_t categories,
+ enum rte_acl_classify_alg alg);
+
+/*
+ * Override the default classifier function for a given ACL context.
+ * @param ctx
+ * ACL context to change classify function for.
+ * @param alg
+ * New default classify algorithm for given ACL context.
+ * It is the caller responsibility to ensure that the value refers to the
+ * existing algorithm, and that it could be run on the given CPU.
+ * @return
+ * - -EINVAL if the parameters are invalid.
+ * - Zero if operation completed successfully.
+ */
+extern int
+rte_acl_set_ctx_classify(struct rte_acl_ctx *ctx,
+ enum rte_acl_classify_alg alg);
+
+/**
+ * Dump an ACL context structure to the console.
+ *
+ * @param ctx
+ * ACL context to dump.
+ */
+void
+rte_acl_dump(const struct rte_acl_ctx *ctx);
+
+/**
+ * Dump all ACL context structures to the console.
+ */
+void
+rte_acl_list_dump(void);
+
+/**
+ * Legacy support for 7-tuple IPv4 and VLAN rule.
+ * This structure and corresponding API is deprecated.
+ */
+struct rte_acl_ipv4vlan_rule {
+ struct rte_acl_rule_data data; /**< Miscellaneous data for the rule. */
+ uint8_t proto; /**< IPv4 protocol ID. */
+ uint8_t proto_mask; /**< IPv4 protocol ID mask. */
+ uint16_t vlan; /**< VLAN ID. */
+ uint16_t vlan_mask; /**< VLAN ID mask. */
+ uint16_t domain; /**< VLAN domain. */
+ uint16_t domain_mask; /**< VLAN domain mask. */
+ uint32_t src_addr; /**< IPv4 source address. */
+ uint32_t src_mask_len; /**< IPv4 source address mask. */
+ uint32_t dst_addr; /**< IPv4 destination address. */
+ uint32_t dst_mask_len; /**< IPv4 destination address mask. */
+ uint16_t src_port_low; /**< L4 source port low. */
+ uint16_t src_port_high; /**< L4 source port high. */
+ uint16_t dst_port_low; /**< L4 destination port low. */
+ uint16_t dst_port_high; /**< L4 destination port high. */
+};
+
+/**
+ * Specifies fields layout inside rte_acl_rule for rte_acl_ipv4vlan_rule.
+ */
+enum {
+ RTE_ACL_IPV4VLAN_PROTO_FIELD,
+ RTE_ACL_IPV4VLAN_VLAN1_FIELD,
+ RTE_ACL_IPV4VLAN_VLAN2_FIELD,
+ RTE_ACL_IPV4VLAN_SRC_FIELD,
+ RTE_ACL_IPV4VLAN_DST_FIELD,
+ RTE_ACL_IPV4VLAN_SRCP_FIELD,
+ RTE_ACL_IPV4VLAN_DSTP_FIELD,
+ RTE_ACL_IPV4VLAN_NUM_FIELDS
+};
+
+/**
+ * Macro to define rule size for rte_acl_ipv4vlan_rule.
+ */
+#define RTE_ACL_IPV4VLAN_RULE_SZ \
+ RTE_ACL_RULE_SZ(RTE_ACL_IPV4VLAN_NUM_FIELDS)
+
+/*
+ * That effectively defines order of IPV4VLAN classifications:
+ * - PROTO
+ * - VLAN (TAG and DOMAIN)
+ * - SRC IP ADDRESS
+ * - DST IP ADDRESS
+ * - PORTS (SRC and DST)
+ */
+enum {
+ RTE_ACL_IPV4VLAN_PROTO,
+ RTE_ACL_IPV4VLAN_VLAN,
+ RTE_ACL_IPV4VLAN_SRC,
+ RTE_ACL_IPV4VLAN_DST,
+ RTE_ACL_IPV4VLAN_PORTS,
+ RTE_ACL_IPV4VLAN_NUM
+};
+
+/**
+ * Add ipv4vlan rules to an existing ACL context.
+ * This function is not multi-thread safe.
+ *
+ * @param ctx
+ * ACL context to add patterns to.
+ * @param rules
+ * Array of rules to add to the ACL context.
+ * Note that all fields in rte_acl_ipv4vlan_rule structures are expected
+ * to be in host byte order.
+ * @param num
+ * Number of elements in the input array of rules.
+ * @return
+ * - -ENOMEM if there is no space in the ACL context for these rules.
+ * - -EINVAL if the parameters are invalid.
+ * - Zero if operation completed successfully.
+ */
+int
+rte_acl_ipv4vlan_add_rules(struct rte_acl_ctx *ctx,
+ const struct rte_acl_ipv4vlan_rule *rules,
+ uint32_t num);
+
+/**
+ * Analyze set of ipv4vlan rules and build required internal
+ * run-time structures.
+ * This function is not multi-thread safe.
+ *
+ * @param ctx
+ * ACL context to build.
+ * @param layout
+ * Layout of input data to search through.
+ * @param num_categories
+ * Maximum number of categories to use in that build.
+ * @return
+ * - -ENOMEM if couldn't allocate enough memory.
+ * - -EINVAL if the parameters are invalid.
+ * - Negative error code if operation failed.
+ * - Zero if operation completed successfully.
+ */
+int
+rte_acl_ipv4vlan_build(struct rte_acl_ctx *ctx,
+ const uint32_t layout[RTE_ACL_IPV4VLAN_NUM],
+ uint32_t num_categories);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_ACL_H_ */
diff --git a/src/dpdk_lib18/librte_acl/rte_acl_osdep.h b/src/dpdk_lib18/librte_acl/rte_acl_osdep.h
new file mode 100755
index 00000000..046b22db
--- /dev/null
+++ b/src/dpdk_lib18/librte_acl/rte_acl_osdep.h
@@ -0,0 +1,92 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ACL_OSDEP_H_
+#define _RTE_ACL_OSDEP_H_
+
+/**
+ * @file
+ *
+ * RTE ACL DPDK/OS dependent file.
+ */
+
+#include <stdint.h>
+#include <stddef.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <ctype.h>
+#include <string.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <sys/queue.h>
+
+/*
+ * Common defines.
+ */
+
+#define LEN2MASK(ln) ((uint32_t)(((uint64_t)1 << (ln)) - 1))
+
+#define DIM(x) RTE_DIM(x)
+
+/*
+ * To build ACL standalone.
+ */
+#ifdef RTE_LIBRTE_ACL_STANDALONE
+#include <rte_acl_osdep_alone.h>
+#else
+
+#include <rte_common.h>
+#include <rte_common_vect.h>
+#include <rte_memory.h>
+#include <rte_log.h>
+#include <rte_memcpy.h>
+#include <rte_prefetch.h>
+#include <rte_byteorder.h>
+#include <rte_branch_prediction.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_per_lcore.h>
+#include <rte_errno.h>
+#include <rte_string_fns.h>
+#include <rte_cpuflags.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+
+#endif /* RTE_LIBRTE_ACL_STANDALONE */
+
+#endif /* _RTE_ACL_OSDEP_H_ */
diff --git a/src/dpdk_lib18/librte_acl/rte_acl_osdep_alone.h b/src/dpdk_lib18/librte_acl/rte_acl_osdep_alone.h
new file mode 100755
index 00000000..a84b6f97
--- /dev/null
+++ b/src/dpdk_lib18/librte_acl/rte_acl_osdep_alone.h
@@ -0,0 +1,278 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ACL_OSDEP_ALONE_H_
+#define _RTE_ACL_OSDEP_ALONE_H_
+
+/**
+ * @file
+ *
+ * RTE ACL OS dependent file.
+ * An example how to build/use ACL library standalone
+ * (without rest of DPDK).
+ * Don't include that file on it's own, use <rte_acl_osdep.h>.
+ */
+
+#if (defined(__ICC) || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
+
+#ifdef __SSE__
+#include <xmmintrin.h>
+#endif
+
+#ifdef __SSE2__
+#include <emmintrin.h>
+#endif
+
+#if defined(__SSE4_2__) || defined(__SSE4_1__)
+#include <smmintrin.h>
+#endif
+
+#else
+
+#include <x86intrin.h>
+
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define DUMMY_MACRO do {} while (0)
+
+/*
+ * rte_common related.
+ */
+#define __rte_unused __attribute__((__unused__))
+
+#define RTE_PTR_ADD(ptr, x) ((typeof(ptr))((uintptr_t)(ptr) + (x)))
+
+#define RTE_PTR_ALIGN_FLOOR(ptr, align) \
+ (typeof(ptr))((uintptr_t)(ptr) & ~((uintptr_t)(align) - 1))
+
+#define RTE_PTR_ALIGN_CEIL(ptr, align) \
+ RTE_PTR_ALIGN_FLOOR(RTE_PTR_ADD(ptr, (align) - 1), align)
+
+#define RTE_PTR_ALIGN(ptr, align) RTE_PTR_ALIGN_CEIL(ptr, align)
+
+#define RTE_ALIGN_FLOOR(val, align) \
+ (typeof(val))((val) & (~((typeof(val))((align) - 1))))
+
+#define RTE_ALIGN_CEIL(val, align) \
+ RTE_ALIGN_FLOOR(((val) + ((typeof(val))(align) - 1)), align)
+
+#define RTE_ALIGN(ptr, align) RTE_ALIGN_CEIL(ptr, align)
+
+#define RTE_MIN(a, b) ({ \
+ typeof(a) _a = (a); \
+ typeof(b) _b = (b); \
+ _a < _b ? _a : _b; \
+ })
+
+#define RTE_DIM(a) (sizeof(a) / sizeof((a)[0]))
+
+/**
+ * Searches the input parameter for the least significant set bit
+ * (starting from zero).
+ * If a least significant 1 bit is found, its bit index is returned.
+ * If the content of the input parameter is zero, then the content of the return
+ * value is undefined.
+ * @param v
+ * input parameter, should not be zero.
+ * @return
+ * least significant set bit in the input parameter.
+ */
+static inline uint32_t
+rte_bsf32(uint32_t v)
+{
+ asm("bsf %1,%0"
+ : "=r" (v)
+ : "rm" (v));
+ return v;
+}
+
+/*
+ * rte_common_vect related.
+ */
+typedef __m128i xmm_t;
+
+#define XMM_SIZE (sizeof(xmm_t))
+#define XMM_MASK (XMM_SIZE - 1)
+
+typedef union rte_mmsse {
+ xmm_t m;
+ uint8_t u8[XMM_SIZE / sizeof(uint8_t)];
+ uint16_t u16[XMM_SIZE / sizeof(uint16_t)];
+ uint32_t u32[XMM_SIZE / sizeof(uint32_t)];
+ uint64_t u64[XMM_SIZE / sizeof(uint64_t)];
+ double pd[XMM_SIZE / sizeof(double)];
+} rte_xmm_t;
+
+/*
+ * rte_cycles related.
+ */
+static inline uint64_t
+rte_rdtsc(void)
+{
+ union {
+ uint64_t tsc_64;
+ struct {
+ uint32_t lo_32;
+ uint32_t hi_32;
+ };
+ } tsc;
+
+ asm volatile("rdtsc" :
+ "=a" (tsc.lo_32),
+ "=d" (tsc.hi_32));
+ return tsc.tsc_64;
+}
+
+/*
+ * rte_lcore related.
+ */
+#define rte_lcore_id() (0)
+
+/*
+ * rte_errno related.
+ */
+#define rte_errno errno
+#define E_RTE_NO_TAILQ (-1)
+
+/*
+ * rte_rwlock related.
+ */
+#define rte_rwlock_read_lock(x) DUMMY_MACRO
+#define rte_rwlock_read_unlock(x) DUMMY_MACRO
+#define rte_rwlock_write_lock(x) DUMMY_MACRO
+#define rte_rwlock_write_unlock(x) DUMMY_MACRO
+
+/*
+ * rte_memory related.
+ */
+#define SOCKET_ID_ANY -1 /**< Any NUMA socket. */
+#define RTE_CACHE_LINE_SIZE 64 /**< Cache line size. */
+#define RTE_CACHE_LINE_MASK (RTE_CACHE_LINE_SIZE-1) /**< Cache line mask. */
+
+/**
+ * Force alignment to cache line.
+ */
+#define __rte_cache_aligned __attribute__((__aligned__(RTE_CACHE_LINE_SIZE)))
+
+
+/*
+ * rte_byteorder related.
+ */
+#define rte_le_to_cpu_16(x) (x)
+#define rte_le_to_cpu_32(x) (x)
+
+#define rte_cpu_to_be_16(x) \
+ (((x) & UINT8_MAX) << CHAR_BIT | ((x) >> CHAR_BIT & UINT8_MAX))
+#define rte_cpu_to_be_32(x) __builtin_bswap32(x)
+
+/*
+ * rte_branch_prediction related.
+ */
+#ifndef likely
+#define likely(x) __builtin_expect((x), 1)
+#endif /* likely */
+
+#ifndef unlikely
+#define unlikely(x) __builtin_expect((x), 0)
+#endif /* unlikely */
+
+
+/*
+ * rte_tailq related.
+ */
+static inline void *
+rte_dummy_tailq(void)
+{
+ static __thread TAILQ_HEAD(rte_dummy_head, rte_dummy) dummy_head;
+ TAILQ_INIT(&dummy_head);
+ return &dummy_head;
+}
+
+#define RTE_TAILQ_LOOKUP_BY_IDX(idx, struct_name) rte_dummy_tailq()
+
+#define RTE_EAL_TAILQ_REMOVE(idx, type, elm) DUMMY_MACRO
+
+/*
+ * rte_string related
+ */
+#define snprintf(str, len, frmt, args...) snprintf(str, len, frmt, ##args)
+
+/*
+ * rte_log related
+ */
+#define RTE_LOG(l, t, fmt, args...) printf(fmt, ##args)
+
+/*
+ * rte_malloc related
+ */
+#define rte_free(x) free(x)
+
+static inline void *
+rte_zmalloc_socket(__rte_unused const char *type, size_t size, unsigned align,
+ __rte_unused int socket)
+{
+ void *ptr;
+ int rc;
+
+ rc = posix_memalign(&ptr, align, size);
+ if (rc != 0) {
+ rte_errno = rc;
+ return NULL;
+ }
+
+ memset(ptr, 0, size);
+ return ptr;
+}
+
+/*
+ * rte_debug related
+ */
+#define rte_panic(fmt, args...) do { \
+ RTE_LOG(CRIT, EAL, fmt, ##args); \
+ abort(); \
+} while (0)
+
+#define rte_exit(err, fmt, args...) do { \
+ RTE_LOG(CRIT, EAL, fmt, ##args); \
+ exit(err); \
+} while (0)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_ACL_OSDEP_ALONE_H_ */
diff --git a/src/dpdk_lib18/librte_acl/tb_mem.c b/src/dpdk_lib18/librte_acl/tb_mem.c
new file mode 100755
index 00000000..fdf3080b
--- /dev/null
+++ b/src/dpdk_lib18/librte_acl/tb_mem.c
@@ -0,0 +1,104 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "tb_mem.h"
+
+/*
+ * Memory management routines for temporary memory.
+ * That memory is used only during build phase and is released after
+ * build is finished.
+ */
+
+static struct tb_mem_block *
+tb_pool(struct tb_mem_pool *pool, size_t sz)
+{
+ struct tb_mem_block *block;
+ uint8_t *ptr;
+ size_t size;
+
+ size = sz + pool->alignment - 1;
+ block = calloc(1, size + sizeof(*pool->block));
+ if (block == NULL) {
+ RTE_LOG(ERR, MALLOC, "%s(%zu)\n failed, currently allocated "
+ "by pool: %zu bytes\n", __func__, sz, pool->alloc);
+ return NULL;
+ }
+
+ block->pool = pool;
+
+ block->next = pool->block;
+ pool->block = block;
+
+ pool->alloc += size;
+
+ ptr = (uint8_t *)(block + 1);
+ block->mem = RTE_PTR_ALIGN_CEIL(ptr, pool->alignment);
+ block->size = size - (block->mem - ptr);
+
+ return block;
+}
+
+void *
+tb_alloc(struct tb_mem_pool *pool, size_t size)
+{
+ struct tb_mem_block *block;
+ void *ptr;
+ size_t new_sz;
+
+ size = RTE_ALIGN_CEIL(size, pool->alignment);
+
+ block = pool->block;
+ if (block == NULL || block->size < size) {
+ new_sz = (size > pool->min_alloc) ? size : pool->min_alloc;
+ block = tb_pool(pool, new_sz);
+ if (block == NULL)
+ return NULL;
+ }
+ ptr = block->mem;
+ block->size -= size;
+ block->mem += size;
+ return ptr;
+}
+
+void
+tb_free_pool(struct tb_mem_pool *pool)
+{
+ struct tb_mem_block *next, *block;
+
+ for (block = pool->block; block != NULL; block = next) {
+ next = block->next;
+ free(block);
+ }
+ pool->block = NULL;
+ pool->alloc = 0;
+}
diff --git a/src/dpdk_lib18/librte_acl/tb_mem.h b/src/dpdk_lib18/librte_acl/tb_mem.h
new file mode 100755
index 00000000..a8dae94d
--- /dev/null
+++ b/src/dpdk_lib18/librte_acl/tb_mem.h
@@ -0,0 +1,73 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TB_MEM_H_
+#define _TB_MEM_H_
+
+/**
+ * @file
+ *
+ * RTE ACL temporary (build phase) memory management.
+ * Contains structures and functions to manage temporary (used by build only)
+ * memory. Memory allocated in large blocks to speed 'free' when trie is
+ * destructed (finish of build phase).
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_acl_osdep.h>
+
+struct tb_mem_block {
+ struct tb_mem_block *next;
+ struct tb_mem_pool *pool;
+ size_t size;
+ uint8_t *mem;
+};
+
+struct tb_mem_pool {
+ struct tb_mem_block *block;
+ size_t alignment;
+ size_t min_alloc;
+ size_t alloc;
+};
+
+void *tb_alloc(struct tb_mem_pool *pool, size_t size);
+void tb_free_pool(struct tb_mem_pool *pool);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _TB_MEM_H_ */
diff --git a/src/dpdk_lib18/librte_cfgfile/Makefile b/src/dpdk_lib18/librte_cfgfile/Makefile
new file mode 100755
index 00000000..55e87015
--- /dev/null
+++ b/src/dpdk_lib18/librte_cfgfile/Makefile
@@ -0,0 +1,53 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_cfgfile.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_CFGFILE) += rte_cfgfile.c
+
+# install includes
+SYMLINK-$(CONFIG_RTE_LIBRTE_CFGFILE)-include += rte_cfgfile.h
+
+# this lib needs eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_CFGFILE) += lib/librte_eal
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_cfgfile/rte_cfgfile.c b/src/dpdk_lib18/librte_cfgfile/rte_cfgfile.c
new file mode 100755
index 00000000..b81c2738
--- /dev/null
+++ b/src/dpdk_lib18/librte_cfgfile/rte_cfgfile.c
@@ -0,0 +1,356 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+#include <rte_string_fns.h>
+
+#include "rte_cfgfile.h"
+
+struct rte_cfgfile_section {
+ char name[CFG_NAME_LEN];
+ int num_entries;
+ struct rte_cfgfile_entry *entries[0];
+};
+
+struct rte_cfgfile {
+ int flags;
+ int num_sections;
+ struct rte_cfgfile_section *sections[0];
+};
+
+/** when we resize a file structure, how many extra entries
+ * for new sections do we add in */
+#define CFG_ALLOC_SECTION_BATCH 8
+/** when we resize a section structure, how many extra entries
+ * for new entries do we add in */
+#define CFG_ALLOC_ENTRY_BATCH 16
+
+static unsigned
+_strip(char *str, unsigned len)
+{
+ int newlen = len;
+ if (len == 0)
+ return 0;
+
+ if (isspace(str[len-1])) {
+ /* strip trailing whitespace */
+ while (newlen > 0 && isspace(str[newlen - 1]))
+ str[--newlen] = '\0';
+ }
+
+ if (isspace(str[0])) {
+ /* strip leading whitespace */
+ int i, start = 1;
+ while (isspace(str[start]) && start < newlen)
+ start++
+ ; /* do nothing */
+ newlen -= start;
+ for (i = 0; i < newlen; i++)
+ str[i] = str[i+start];
+ str[i] = '\0';
+ }
+ return newlen;
+}
+
+struct rte_cfgfile *
+rte_cfgfile_load(const char *filename, int flags)
+{
+ int allocated_sections = CFG_ALLOC_SECTION_BATCH;
+ int allocated_entries = 0;
+ int curr_section = -1;
+ int curr_entry = -1;
+ char buffer[256];
+ int lineno = 0;
+ struct rte_cfgfile *cfg = NULL;
+
+ FILE *f = fopen(filename, "r");
+ if (f == NULL)
+ return NULL;
+
+ cfg = malloc(sizeof(*cfg) + sizeof(cfg->sections[0]) *
+ allocated_sections);
+ if (cfg == NULL)
+ goto error2;
+
+ memset(cfg->sections, 0, sizeof(cfg->sections[0]) * allocated_sections);
+
+ while (fgets(buffer, sizeof(buffer), f) != NULL) {
+ char *pos = NULL;
+ size_t len = strnlen(buffer, sizeof(buffer));
+ lineno++;
+ if ((len >= sizeof(buffer) - 1) && (buffer[len-1] != '\n')) {
+ printf("Error line %d - no \\n found on string. "
+ "Check if line too long\n", lineno);
+ goto error1;
+ }
+ pos = memchr(buffer, ';', sizeof(buffer));
+ if (pos != NULL) {
+ *pos = '\0';
+ len = pos - buffer;
+ }
+
+ len = _strip(buffer, len);
+ if (buffer[0] != '[' && memchr(buffer, '=', len) == NULL)
+ continue;
+
+ if (buffer[0] == '[') {
+ /* section heading line */
+ char *end = memchr(buffer, ']', len);
+ if (end == NULL) {
+ printf("Error line %d - no terminating '['"
+ "character found\n", lineno);
+ goto error1;
+ }
+ *end = '\0';
+ _strip(&buffer[1], end - &buffer[1]);
+
+ /* close off old section and add start new one */
+ if (curr_section >= 0)
+ cfg->sections[curr_section]->num_entries =
+ curr_entry + 1;
+ curr_section++;
+
+ /* resize overall struct if we don't have room for more
+ sections */
+ if (curr_section == allocated_sections) {
+ allocated_sections += CFG_ALLOC_SECTION_BATCH;
+ struct rte_cfgfile *n_cfg = realloc(cfg,
+ sizeof(*cfg) + sizeof(cfg->sections[0])
+ * allocated_sections);
+ if (n_cfg == NULL) {
+ printf("Error - no more memory\n");
+ goto error1;
+ }
+ cfg = n_cfg;
+ }
+
+ /* allocate space for new section */
+ allocated_entries = CFG_ALLOC_ENTRY_BATCH;
+ curr_entry = -1;
+ cfg->sections[curr_section] = malloc(
+ sizeof(*cfg->sections[0]) +
+ sizeof(cfg->sections[0]->entries[0]) *
+ allocated_entries);
+ if (cfg->sections[curr_section] == NULL) {
+ printf("Error - no more memory\n");
+ goto error1;
+ }
+
+ snprintf(cfg->sections[curr_section]->name,
+ sizeof(cfg->sections[0]->name),
+ "%s", &buffer[1]);
+ } else {
+ /* value line */
+ if (curr_section < 0) {
+ printf("Error line %d - value outside of"
+ "section\n", lineno);
+ goto error1;
+ }
+
+ struct rte_cfgfile_section *sect =
+ cfg->sections[curr_section];
+ char *split[2];
+ if (rte_strsplit(buffer, sizeof(buffer), split, 2, '=')
+ != 2) {
+ printf("Error at line %d - cannot split "
+ "string\n", lineno);
+ goto error1;
+ }
+
+ curr_entry++;
+ if (curr_entry == allocated_entries) {
+ allocated_entries += CFG_ALLOC_ENTRY_BATCH;
+ struct rte_cfgfile_section *n_sect = realloc(
+ sect, sizeof(*sect) +
+ sizeof(sect->entries[0]) *
+ allocated_entries);
+ if (n_sect == NULL) {
+ printf("Error - no more memory\n");
+ goto error1;
+ }
+ sect = cfg->sections[curr_section] = n_sect;
+ }
+
+ sect->entries[curr_entry] = malloc(
+ sizeof(*sect->entries[0]));
+ if (sect->entries[curr_entry] == NULL) {
+ printf("Error - no more memory\n");
+ goto error1;
+ }
+
+ struct rte_cfgfile_entry *entry = sect->entries[
+ curr_entry];
+ snprintf(entry->name, sizeof(entry->name), "%s",
+ split[0]);
+ snprintf(entry->value, sizeof(entry->value), "%s",
+ split[1]);
+ _strip(entry->name, strnlen(entry->name,
+ sizeof(entry->name)));
+ _strip(entry->value, strnlen(entry->value,
+ sizeof(entry->value)));
+ }
+ }
+ fclose(f);
+ cfg->flags = flags;
+ cfg->num_sections = curr_section + 1;
+ /* curr_section will still be -1 if we have an empty file */
+ if (curr_section >= 0)
+ cfg->sections[curr_section]->num_entries = curr_entry + 1;
+ return cfg;
+
+error1:
+ rte_cfgfile_close(cfg);
+error2:
+ fclose(f);
+ return NULL;
+}
+
+
+int rte_cfgfile_close(struct rte_cfgfile *cfg)
+{
+ int i, j;
+
+ if (cfg == NULL)
+ return -1;
+
+ for (i = 0; i < cfg->num_sections; i++) {
+ if (cfg->sections[i] != NULL) {
+ if (cfg->sections[i]->num_entries) {
+ for (j = 0; j < cfg->sections[i]->num_entries;
+ j++) {
+ if (cfg->sections[i]->entries[j] !=
+ NULL)
+ free(cfg->sections[i]->
+ entries[j]);
+ }
+ }
+ free(cfg->sections[i]);
+ }
+ }
+ free(cfg);
+
+ return 0;
+}
+
+int
+rte_cfgfile_num_sections(struct rte_cfgfile *cfg, const char *sectionname,
+size_t length)
+{
+ int i;
+ int num_sections = 0;
+ for (i = 0; i < cfg->num_sections; i++) {
+ if (strncmp(cfg->sections[i]->name, sectionname, length) == 0)
+ num_sections++;
+ }
+ return num_sections;
+}
+
+int
+rte_cfgfile_sections(struct rte_cfgfile *cfg, char *sections[],
+ int max_sections)
+{
+ int i;
+
+ for (i = 0; i < cfg->num_sections && i < max_sections; i++)
+ snprintf(sections[i], CFG_NAME_LEN, "%s",
+ cfg->sections[i]->name);
+
+ return i;
+}
+
+static const struct rte_cfgfile_section *
+_get_section(struct rte_cfgfile *cfg, const char *sectionname)
+{
+ int i;
+ for (i = 0; i < cfg->num_sections; i++) {
+ if (strncmp(cfg->sections[i]->name, sectionname,
+ sizeof(cfg->sections[0]->name)) == 0)
+ return cfg->sections[i];
+ }
+ return NULL;
+}
+
+int
+rte_cfgfile_has_section(struct rte_cfgfile *cfg, const char *sectionname)
+{
+ return (_get_section(cfg, sectionname) != NULL);
+}
+
+int
+rte_cfgfile_section_num_entries(struct rte_cfgfile *cfg,
+ const char *sectionname)
+{
+ const struct rte_cfgfile_section *s = _get_section(cfg, sectionname);
+ if (s == NULL)
+ return -1;
+ return s->num_entries;
+}
+
+
+int
+rte_cfgfile_section_entries(struct rte_cfgfile *cfg, const char *sectionname,
+ struct rte_cfgfile_entry *entries, int max_entries)
+{
+ int i;
+ const struct rte_cfgfile_section *sect = _get_section(cfg, sectionname);
+ if (sect == NULL)
+ return -1;
+ for (i = 0; i < max_entries && i < sect->num_entries; i++)
+ entries[i] = *sect->entries[i];
+ return i;
+}
+
+const char *
+rte_cfgfile_get_entry(struct rte_cfgfile *cfg, const char *sectionname,
+ const char *entryname)
+{
+ int i;
+ const struct rte_cfgfile_section *sect = _get_section(cfg, sectionname);
+ if (sect == NULL)
+ return NULL;
+ for (i = 0; i < sect->num_entries; i++)
+ if (strncmp(sect->entries[i]->name, entryname, CFG_NAME_LEN)
+ == 0)
+ return sect->entries[i]->value;
+ return NULL;
+}
+
+int
+rte_cfgfile_has_entry(struct rte_cfgfile *cfg, const char *sectionname,
+ const char *entryname)
+{
+ return (rte_cfgfile_get_entry(cfg, sectionname, entryname) != NULL);
+}
diff --git a/src/dpdk_lib18/librte_cfgfile/rte_cfgfile.h b/src/dpdk_lib18/librte_cfgfile/rte_cfgfile.h
new file mode 100755
index 00000000..7c9fc916
--- /dev/null
+++ b/src/dpdk_lib18/librte_cfgfile/rte_cfgfile.h
@@ -0,0 +1,195 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_CFGFILE_H__
+#define __INCLUDE_RTE_CFGFILE_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+* @file
+* RTE Configuration File
+*
+* This library allows reading application defined parameters from standard
+* format configuration file.
+*
+***/
+
+#define CFG_NAME_LEN 32
+#define CFG_VALUE_LEN 64
+
+/** Configuration file */
+struct rte_cfgfile;
+
+/** Configuration file entry */
+struct rte_cfgfile_entry {
+ char name[CFG_NAME_LEN]; /**< Name */
+ char value[CFG_VALUE_LEN]; /**< Value */
+};
+
+/**
+* Open config file
+*
+* @param filename
+* Config file name
+* @param flags
+* Config file flags, Reserved for future use. Must be set to 0.
+* @return
+* Handle to configuration file
+*/
+struct rte_cfgfile *rte_cfgfile_load(const char *filename, int flags);
+
+/**
+* Get number of sections in config file
+*
+* @param cfg
+* Config file
+* @param sec_name
+* Section name
+* @param length
+* Maximum section name length
+* @return
+* 0 on success, error code otherwise
+*/
+int rte_cfgfile_num_sections(struct rte_cfgfile *cfg, const char *sec_name,
+ size_t length);
+
+/**
+* Get name of all config file sections.
+*
+* Fills in the array sections with the name of all the sections in the file
+* (up to the number of max_sections sections).
+*
+* @param cfg
+* Config file
+* @param sections
+* Array containing section names after successful invocation. Each elemen
+* of this array should be preallocated by the user with at least
+* CFG_NAME_LEN characters.
+* @param max_sections
+* Maximum number of section names to be stored in sections array
+* @return
+* 0 on success, error code otherwise
+*/
+int rte_cfgfile_sections(struct rte_cfgfile *cfg, char *sections[],
+ int max_sections);
+
+/**
+* Check if given section exists in config file
+*
+* @param cfg
+* Config file
+* @param sectionname
+* Section name
+* @return
+* TRUE (value different than 0) if section exists, FALSE (value 0) otherwise
+*/
+int rte_cfgfile_has_section(struct rte_cfgfile *cfg, const char *sectionname);
+
+/**
+* Get number of entries in given config file section
+*
+* @param cfg
+* Config file
+* @param sectionname
+* Section name
+* @return
+* Number of entries in section
+*/
+int rte_cfgfile_section_num_entries(struct rte_cfgfile *cfg,
+ const char *sectionname);
+
+/** Get section entries as key-value pairs
+*
+* @param cfg
+* Config file
+* @param sectionname
+* Section name
+* @param entries
+* Pre-allocated array of at least max_entries entries where the section
+* entries are stored as key-value pair after successful invocation
+* @param max_entries
+* Maximum number of section entries to be stored in entries array
+* @return
+* 0 on success, error code otherwise
+*/
+int rte_cfgfile_section_entries(struct rte_cfgfile *cfg,
+ const char *sectionname,
+ struct rte_cfgfile_entry *entries,
+ int max_entries);
+
+/** Get value of the named entry in named config file section
+*
+* @param cfg
+* Config file
+* @param sectionname
+* Section name
+* @param entryname
+* Entry name
+* @return
+* Entry value
+*/
+const char *rte_cfgfile_get_entry(struct rte_cfgfile *cfg,
+ const char *sectionname,
+ const char *entryname);
+
+/** Check if given entry exists in named config file section
+*
+* @param cfg
+* Config file
+* @param sectionname
+* Section name
+* @param entryname
+* Entry name
+* @return
+* TRUE (value different than 0) if entry exists, FALSE (value 0) otherwise
+*/
+int rte_cfgfile_has_entry(struct rte_cfgfile *cfg, const char *sectionname,
+ const char *entryname);
+
+/** Close config file
+*
+* @param cfg
+* Config file
+* @return
+* 0 on success, error code otherwise
+*/
+int rte_cfgfile_close(struct rte_cfgfile *cfg);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_cmdline/Makefile b/src/dpdk_lib18/librte_cmdline/Makefile
new file mode 100755
index 00000000..7eae4493
--- /dev/null
+++ b/src/dpdk_lib18/librte_cmdline/Makefile
@@ -0,0 +1,63 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_cmdline.a
+
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) := cmdline.c
+SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_cirbuf.c
+SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_parse.c
+SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_parse_etheraddr.c
+SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_parse_ipaddr.c
+SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_parse_num.c
+SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_parse_string.c
+SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_rdline.c
+SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_vt100.c
+SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_socket.c
+SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_parse_portlist.c
+
+CFLAGS += -D_GNU_SOURCE
+
+# install includes
+INCS := cmdline.h cmdline_parse.h cmdline_parse_num.h cmdline_parse_ipaddr.h
+INCS += cmdline_parse_etheraddr.h cmdline_parse_string.h cmdline_rdline.h
+INCS += cmdline_vt100.h cmdline_socket.h cmdline_cirbuf.h cmdline_parse_portlist.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_CMDLINE)-include := $(INCS)
+
+# this lib needs eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += lib/librte_eal
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline.c b/src/dpdk_lib18/librte_cmdline/cmdline.c
new file mode 100755
index 00000000..e61c4f2c
--- /dev/null
+++ b/src/dpdk_lib18/librte_cmdline/cmdline.c
@@ -0,0 +1,264 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the University of California, Berkeley nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <termios.h>
+#include <netinet/in.h>
+
+#include <rte_string_fns.h>
+
+#include "cmdline_parse.h"
+#include "cmdline_rdline.h"
+#include "cmdline.h"
+
+static void
+cmdline_valid_buffer(struct rdline *rdl, const char *buf,
+ __attribute__((unused)) unsigned int size)
+{
+ struct cmdline *cl = rdl->opaque;
+ int ret;
+ ret = cmdline_parse(cl, buf);
+ if (ret == CMDLINE_PARSE_AMBIGUOUS)
+ cmdline_printf(cl, "Ambiguous command\n");
+ else if (ret == CMDLINE_PARSE_NOMATCH)
+ cmdline_printf(cl, "Command not found\n");
+ else if (ret == CMDLINE_PARSE_BAD_ARGS)
+ cmdline_printf(cl, "Bad arguments\n");
+}
+
+static int
+cmdline_complete_buffer(struct rdline *rdl, const char *buf,
+ char *dstbuf, unsigned int dstsize,
+ int *state)
+{
+ struct cmdline *cl = rdl->opaque;
+ return cmdline_complete(cl, buf, state, dstbuf, dstsize);
+}
+
+int
+cmdline_write_char(struct rdline *rdl, char c)
+{
+ int ret = -1;
+ struct cmdline *cl;
+
+ if (!rdl)
+ return -1;
+
+ cl = rdl->opaque;
+
+ if (cl->s_out >= 0)
+ ret = write(cl->s_out, &c, 1);
+
+ return ret;
+}
+
+
+void
+cmdline_set_prompt(struct cmdline *cl, const char *prompt)
+{
+ if (!cl || !prompt)
+ return;
+ snprintf(cl->prompt, sizeof(cl->prompt), "%s", prompt);
+}
+
+struct cmdline *
+cmdline_new(cmdline_parse_ctx_t *ctx, const char *prompt, int s_in, int s_out)
+{
+ struct cmdline *cl;
+
+ if (!ctx || !prompt)
+ return NULL;
+
+ cl = malloc(sizeof(struct cmdline));
+ if (cl == NULL)
+ return NULL;
+ memset(cl, 0, sizeof(struct cmdline));
+ cl->s_in = s_in;
+ cl->s_out = s_out;
+ cl->ctx = ctx;
+
+ rdline_init(&cl->rdl, cmdline_write_char,
+ cmdline_valid_buffer, cmdline_complete_buffer);
+ cl->rdl.opaque = cl;
+ cmdline_set_prompt(cl, prompt);
+ rdline_newline(&cl->rdl, cl->prompt);
+
+ return cl;
+}
+
+void
+cmdline_free(struct cmdline *cl)
+{
+ dprintf("called\n");
+
+ if (!cl)
+ return;
+
+ if (cl->s_in > 2)
+ close(cl->s_in);
+ if (cl->s_out != cl->s_in && cl->s_out > 2)
+ close(cl->s_out);
+ free(cl);
+}
+
+void
+cmdline_printf(const struct cmdline *cl, const char *fmt, ...)
+{
+ va_list ap;
+
+ if (!cl || !fmt)
+ return;
+
+#ifdef _GNU_SOURCE
+ if (cl->s_out < 0)
+ return;
+ va_start(ap, fmt);
+ vdprintf(cl->s_out, fmt, ap);
+ va_end(ap);
+#else
+ int ret;
+ char *buf;
+
+ if (cl->s_out < 0)
+ return;
+
+ buf = malloc(BUFSIZ);
+ if (buf == NULL)
+ return;
+ va_start(ap, fmt);
+ ret = vsnprintf(buf, BUFSIZ, fmt, ap);
+ va_end(ap);
+ if (ret < 0)
+ return;
+ if (ret >= BUFSIZ)
+ ret = BUFSIZ - 1;
+ write(cl->s_out, buf, ret);
+ free(buf);
+#endif
+}
+
+int
+cmdline_in(struct cmdline *cl, const char *buf, int size)
+{
+ const char *history, *buffer;
+ size_t histlen, buflen;
+ int ret = 0;
+ int i, same;
+
+ if (!cl || !buf)
+ return -1;
+
+ for (i=0; i<size; i++) {
+ ret = rdline_char_in(&cl->rdl, buf[i]);
+
+ if (ret == RDLINE_RES_VALIDATED) {
+ buffer = rdline_get_buffer(&cl->rdl);
+ history = rdline_get_history_item(&cl->rdl, 0);
+ if (history) {
+ histlen = strnlen(history, RDLINE_BUF_SIZE);
+ same = !memcmp(buffer, history, histlen) &&
+ buffer[histlen] == '\n';
+ }
+ else
+ same = 0;
+ buflen = strnlen(buffer, RDLINE_BUF_SIZE);
+ if (buflen > 1 && !same)
+ rdline_add_history(&cl->rdl, buffer);
+ rdline_newline(&cl->rdl, cl->prompt);
+ }
+ else if (ret == RDLINE_RES_EOF)
+ return -1;
+ else if (ret == RDLINE_RES_EXITED)
+ return -1;
+ }
+ return i;
+}
+
+void
+cmdline_quit(struct cmdline *cl)
+{
+ if (!cl)
+ return;
+ rdline_quit(&cl->rdl);
+}
+
+void
+cmdline_interact(struct cmdline *cl)
+{
+ char c;
+
+ if (!cl)
+ return;
+
+ c = -1;
+ while (1) {
+ if (read(cl->s_in, &c, 1) <= 0)
+ break;
+ if (cmdline_in(cl, &c, 1) < 0)
+ break;
+ }
+}
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline.h b/src/dpdk_lib18/librte_cmdline/cmdline.h
new file mode 100755
index 00000000..06ae0866
--- /dev/null
+++ b/src/dpdk_lib18/librte_cmdline/cmdline.h
@@ -0,0 +1,91 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the University of California, Berkeley nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CMDLINE_H_
+#define _CMDLINE_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct cmdline {
+ int s_in;
+ int s_out;
+ cmdline_parse_ctx_t *ctx;
+ struct rdline rdl;
+ char prompt[RDLINE_PROMPT_SIZE];
+ struct termios oldterm;
+};
+
+struct cmdline *cmdline_new(cmdline_parse_ctx_t *ctx, const char *prompt, int s_in, int s_out);
+void cmdline_set_prompt(struct cmdline *cl, const char *prompt);
+void cmdline_free(struct cmdline *cl);
+void cmdline_printf(const struct cmdline *cl, const char *fmt, ...)
+ __attribute__((format(printf,2,3)));
+int cmdline_in(struct cmdline *cl, const char *buf, int size);
+int cmdline_write_char(struct rdline *rdl, char c);
+void cmdline_interact(struct cmdline *cl);
+void cmdline_quit(struct cmdline *cl);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _CMDLINE_SOCKET_H_ */
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_cirbuf.c b/src/dpdk_lib18/librte_cmdline/cmdline_cirbuf.c
new file mode 100755
index 00000000..b9f9f4bc
--- /dev/null
+++ b/src/dpdk_lib18/librte_cmdline/cmdline_cirbuf.c
@@ -0,0 +1,467 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the University of California, Berkeley nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <errno.h>
+#include <stdio.h>
+
+#include "cmdline_cirbuf.h"
+
+
+int
+cirbuf_init(struct cirbuf *cbuf, char *buf, unsigned int start, unsigned int maxlen)
+{
+ if (!cbuf || !buf)
+ return -EINVAL;
+ cbuf->maxlen = maxlen;
+ cbuf->len = 0;
+ cbuf->start = start;
+ cbuf->end = start;
+ cbuf->buf = buf;
+ return 0;
+}
+
+/* multiple add */
+
+int
+cirbuf_add_buf_head(struct cirbuf *cbuf, const char *c, unsigned int n)
+{
+ unsigned int e;
+
+ if (!cbuf || !c || !n || n > CIRBUF_GET_FREELEN(cbuf))
+ return -EINVAL;
+
+ e = CIRBUF_IS_EMPTY(cbuf) ? 1 : 0;
+
+ if (n < cbuf->start + e) {
+ dprintf("s[%d] -> d[%d] (%d)\n", 0, cbuf->start - n + e, n);
+ memcpy(cbuf->buf + cbuf->start - n + e, c, n);
+ }
+ else {
+ dprintf("s[%d] -> d[%d] (%d)\n", + n - (cbuf->start + e), 0,
+ cbuf->start + e);
+ dprintf("s[%d] -> d[%d] (%d)\n", cbuf->maxlen - n +
+ (cbuf->start + e), 0, n - (cbuf->start + e));
+ memcpy(cbuf->buf, c + n - (cbuf->start + e) , cbuf->start + e);
+ memcpy(cbuf->buf + cbuf->maxlen - n + (cbuf->start + e), c,
+ n - (cbuf->start + e));
+ }
+ cbuf->len += n;
+ cbuf->start += (cbuf->maxlen - n + e);
+ cbuf->start %= cbuf->maxlen;
+ return n;
+}
+
+/* multiple add */
+
+int
+cirbuf_add_buf_tail(struct cirbuf *cbuf, const char *c, unsigned int n)
+{
+ unsigned int e;
+
+ if (!cbuf || !c || !n || n > CIRBUF_GET_FREELEN(cbuf))
+ return -EINVAL;
+
+ e = CIRBUF_IS_EMPTY(cbuf) ? 1 : 0;
+
+ if (n < cbuf->maxlen - cbuf->end - 1 + e) {
+ dprintf("s[%d] -> d[%d] (%d)\n", 0, cbuf->end + !e, n);
+ memcpy(cbuf->buf + cbuf->end + !e, c, n);
+ }
+ else {
+ dprintf("s[%d] -> d[%d] (%d)\n", cbuf->end + !e, 0,
+ cbuf->maxlen - cbuf->end - 1 + e);
+ dprintf("s[%d] -> d[%d] (%d)\n", cbuf->maxlen - cbuf->end - 1 +
+ e, 0, n - cbuf->maxlen + cbuf->end + 1 - e);
+ memcpy(cbuf->buf + cbuf->end + !e, c, cbuf->maxlen -
+ cbuf->end - 1 + e);
+ memcpy(cbuf->buf, c + cbuf->maxlen - cbuf->end - 1 + e,
+ n - cbuf->maxlen + cbuf->end + 1 - e);
+ }
+ cbuf->len += n;
+ cbuf->end += n - e;
+ cbuf->end %= cbuf->maxlen;
+ return n;
+}
+
+/* add at head */
+
+static inline void
+__cirbuf_add_head(struct cirbuf * cbuf, char c)
+{
+ if (!CIRBUF_IS_EMPTY(cbuf)) {
+ cbuf->start += (cbuf->maxlen - 1);
+ cbuf->start %= cbuf->maxlen;
+ }
+ cbuf->buf[cbuf->start] = c;
+ cbuf->len ++;
+}
+
+int
+cirbuf_add_head_safe(struct cirbuf * cbuf, char c)
+{
+ if (cbuf && !CIRBUF_IS_FULL(cbuf)) {
+ __cirbuf_add_head(cbuf, c);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+void
+cirbuf_add_head(struct cirbuf * cbuf, char c)
+{
+ __cirbuf_add_head(cbuf, c);
+}
+
+/* add at tail */
+
+static inline void
+__cirbuf_add_tail(struct cirbuf * cbuf, char c)
+{
+ if (!CIRBUF_IS_EMPTY(cbuf)) {
+ cbuf->end ++;
+ cbuf->end %= cbuf->maxlen;
+ }
+ cbuf->buf[cbuf->end] = c;
+ cbuf->len ++;
+}
+
+int
+cirbuf_add_tail_safe(struct cirbuf * cbuf, char c)
+{
+ if (cbuf && !CIRBUF_IS_FULL(cbuf)) {
+ __cirbuf_add_tail(cbuf, c);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+void
+cirbuf_add_tail(struct cirbuf * cbuf, char c)
+{
+ __cirbuf_add_tail(cbuf, c);
+}
+
+
+static inline void
+__cirbuf_shift_left(struct cirbuf *cbuf)
+{
+ unsigned int i;
+ char tmp = cbuf->buf[cbuf->start];
+
+ for (i=0 ; i<cbuf->len ; i++) {
+ cbuf->buf[(cbuf->start+i)%cbuf->maxlen] =
+ cbuf->buf[(cbuf->start+i+1)%cbuf->maxlen];
+ }
+ cbuf->buf[(cbuf->start-1+cbuf->maxlen)%cbuf->maxlen] = tmp;
+ cbuf->start += (cbuf->maxlen - 1);
+ cbuf->start %= cbuf->maxlen;
+ cbuf->end += (cbuf->maxlen - 1);
+ cbuf->end %= cbuf->maxlen;
+}
+
+static inline void
+__cirbuf_shift_right(struct cirbuf *cbuf)
+{
+ unsigned int i;
+ char tmp = cbuf->buf[cbuf->end];
+
+ for (i=0 ; i<cbuf->len ; i++) {
+ cbuf->buf[(cbuf->end+cbuf->maxlen-i)%cbuf->maxlen] =
+ cbuf->buf[(cbuf->end+cbuf->maxlen-i-1)%cbuf->maxlen];
+ }
+ cbuf->buf[(cbuf->end+1)%cbuf->maxlen] = tmp;
+ cbuf->start += 1;
+ cbuf->start %= cbuf->maxlen;
+ cbuf->end += 1;
+ cbuf->end %= cbuf->maxlen;
+}
+
+/* XXX we could do a better algorithm here... */
+int
+cirbuf_align_left(struct cirbuf * cbuf)
+{
+ if (!cbuf)
+ return -EINVAL;
+
+ if (cbuf->start < cbuf->maxlen/2) {
+ while (cbuf->start != 0) {
+ __cirbuf_shift_left(cbuf);
+ }
+ }
+ else {
+ while (cbuf->start != 0) {
+ __cirbuf_shift_right(cbuf);
+ }
+ }
+
+ return 0;
+}
+
+/* XXX we could do a better algorithm here... */
+int
+cirbuf_align_right(struct cirbuf * cbuf)
+{
+ if (!cbuf)
+ return -EINVAL;
+
+ if (cbuf->start >= cbuf->maxlen/2) {
+ while (cbuf->end != cbuf->maxlen-1) {
+ __cirbuf_shift_left(cbuf);
+ }
+ }
+ else {
+ while (cbuf->start != cbuf->maxlen-1) {
+ __cirbuf_shift_right(cbuf);
+ }
+ }
+
+ return 0;
+}
+
+/* buffer del */
+
+int
+cirbuf_del_buf_head(struct cirbuf *cbuf, unsigned int size)
+{
+ if (!cbuf || !size || size > CIRBUF_GET_LEN(cbuf))
+ return -EINVAL;
+
+ cbuf->len -= size;
+ if (CIRBUF_IS_EMPTY(cbuf)) {
+ cbuf->start += size - 1;
+ cbuf->start %= cbuf->maxlen;
+ }
+ else {
+ cbuf->start += size;
+ cbuf->start %= cbuf->maxlen;
+ }
+ return 0;
+}
+
+/* buffer del */
+
+int
+cirbuf_del_buf_tail(struct cirbuf *cbuf, unsigned int size)
+{
+ if (!cbuf || !size || size > CIRBUF_GET_LEN(cbuf))
+ return -EINVAL;
+
+ cbuf->len -= size;
+ if (CIRBUF_IS_EMPTY(cbuf)) {
+ cbuf->end += (cbuf->maxlen - size + 1);
+ cbuf->end %= cbuf->maxlen;
+ }
+ else {
+ cbuf->end += (cbuf->maxlen - size);
+ cbuf->end %= cbuf->maxlen;
+ }
+ return 0;
+}
+
+/* del at head */
+
+static inline void
+__cirbuf_del_head(struct cirbuf * cbuf)
+{
+ cbuf->len --;
+ if (!CIRBUF_IS_EMPTY(cbuf)) {
+ cbuf->start ++;
+ cbuf->start %= cbuf->maxlen;
+ }
+}
+
+int
+cirbuf_del_head_safe(struct cirbuf * cbuf)
+{
+ if (cbuf && !CIRBUF_IS_EMPTY(cbuf)) {
+ __cirbuf_del_head(cbuf);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+void
+cirbuf_del_head(struct cirbuf * cbuf)
+{
+ __cirbuf_del_head(cbuf);
+}
+
+/* del at tail */
+
+static inline void
+__cirbuf_del_tail(struct cirbuf * cbuf)
+{
+ cbuf->len --;
+ if (!CIRBUF_IS_EMPTY(cbuf)) {
+ cbuf->end += (cbuf->maxlen - 1);
+ cbuf->end %= cbuf->maxlen;
+ }
+}
+
+int
+cirbuf_del_tail_safe(struct cirbuf * cbuf)
+{
+ if (cbuf && !CIRBUF_IS_EMPTY(cbuf)) {
+ __cirbuf_del_tail(cbuf);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+void
+cirbuf_del_tail(struct cirbuf * cbuf)
+{
+ __cirbuf_del_tail(cbuf);
+}
+
+/* convert to buffer */
+
+int
+cirbuf_get_buf_head(struct cirbuf *cbuf, char *c, unsigned int size)
+{
+ unsigned int n;
+
+ if (!cbuf || !c)
+ return -EINVAL;
+
+ n = (size < CIRBUF_GET_LEN(cbuf)) ? size : CIRBUF_GET_LEN(cbuf);
+
+ if (!n)
+ return 0;
+
+ if (cbuf->start <= cbuf->end) {
+ dprintf("s[%d] -> d[%d] (%d)\n", cbuf->start, 0, n);
+ memcpy(c, cbuf->buf + cbuf->start , n);
+ }
+ else {
+ /* check if we need to go from end to the beginning */
+ if (n <= cbuf->maxlen - cbuf->start) {
+ dprintf("s[%d] -> d[%d] (%d)\n", 0, cbuf->start, n);
+ memcpy(c, cbuf->buf + cbuf->start , n);
+ }
+ else {
+ dprintf("s[%d] -> d[%d] (%d)\n", cbuf->start, 0,
+ cbuf->maxlen - cbuf->start);
+ dprintf("s[%d] -> d[%d] (%d)\n", 0, cbuf->maxlen - cbuf->start,
+ n - cbuf->maxlen + cbuf->start);
+ memcpy(c, cbuf->buf + cbuf->start , cbuf->maxlen - cbuf->start);
+ memcpy(c + cbuf->maxlen - cbuf->start, cbuf->buf,
+ n - cbuf->maxlen + cbuf->start);
+ }
+ }
+ return n;
+}
+
+/* convert to buffer */
+
+int
+cirbuf_get_buf_tail(struct cirbuf *cbuf, char *c, unsigned int size)
+{
+ unsigned int n;
+
+ if (!cbuf || !c)
+ return -EINVAL;
+
+ n = (size < CIRBUF_GET_LEN(cbuf)) ? size : CIRBUF_GET_LEN(cbuf);
+
+ if (!n)
+ return 0;
+
+ if (cbuf->start <= cbuf->end) {
+ dprintf("s[%d] -> d[%d] (%d)\n", cbuf->end - n + 1, 0, n);
+ memcpy(c, cbuf->buf + cbuf->end - n + 1, n);
+ }
+ else {
+ /* check if we need to go from end to the beginning */
+ if (n <= cbuf->end + 1) {
+ dprintf("s[%d] -> d[%d] (%d)\n", 0, cbuf->end - n + 1, n);
+ memcpy(c, cbuf->buf + cbuf->end - n + 1, n);
+ }
+ else {
+ dprintf("s[%d] -> d[%d] (%d)\n", 0,
+ cbuf->maxlen - cbuf->start, cbuf->end + 1);
+ dprintf("s[%d] -> d[%d] (%d)\n",
+ cbuf->maxlen - n + cbuf->end + 1, 0, n - cbuf->end - 1);
+ memcpy(c + cbuf->maxlen - cbuf->start,
+ cbuf->buf, cbuf->end + 1);
+ memcpy(c, cbuf->buf + cbuf->maxlen - n + cbuf->end +1,
+ n - cbuf->end - 1);
+ }
+ }
+ return n;
+}
+
+/* get head or get tail */
+
+char
+cirbuf_get_head(struct cirbuf * cbuf)
+{
+ return cbuf->buf[cbuf->start];
+}
+
+/* get head or get tail */
+
+char
+cirbuf_get_tail(struct cirbuf * cbuf)
+{
+ return cbuf->buf[cbuf->end];
+}
+
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_cirbuf.h b/src/dpdk_lib18/librte_cmdline/cmdline_cirbuf.h
new file mode 100755
index 00000000..6321dec5
--- /dev/null
+++ b/src/dpdk_lib18/librte_cmdline/cmdline_cirbuf.h
@@ -0,0 +1,245 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the University of California, Berkeley nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CIRBUF_H_
+#define _CIRBUF_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * This structure is the header of a cirbuf type.
+ */
+struct cirbuf {
+ unsigned int maxlen; /**< total len of the fifo (number of elements) */
+ unsigned int start; /**< indice of the first elt */
+ unsigned int end; /**< indice of the last elt */
+ unsigned int len; /**< current len of fifo */
+ char *buf;
+};
+
+#ifdef RTE_LIBRTE_CMDLINE_DEBUG
+#define dprintf_(fmt, ...) printf("line %3.3d - " fmt "%.0s", __LINE__, __VA_ARGS__)
+#define dprintf(...) dprintf_(__VA_ARGS__, "dummy")
+#else
+#define dprintf(...) (void)0
+#endif
+
+
+/**
+ * Init the circular buffer
+ */
+int cirbuf_init(struct cirbuf *cbuf, char *buf, unsigned int start, unsigned int maxlen);
+
+
+/**
+ * Return 1 if the circular buffer is full
+ */
+#define CIRBUF_IS_FULL(cirbuf) ((cirbuf)->maxlen == (cirbuf)->len)
+
+/**
+ * Return 1 if the circular buffer is empty
+ */
+#define CIRBUF_IS_EMPTY(cirbuf) ((cirbuf)->len == 0)
+
+/**
+ * return current size of the circular buffer (number of used elements)
+ */
+#define CIRBUF_GET_LEN(cirbuf) ((cirbuf)->len)
+
+/**
+ * return size of the circular buffer (used + free elements)
+ */
+#define CIRBUF_GET_MAXLEN(cirbuf) ((cirbuf)->maxlen)
+
+/**
+ * return the number of free elts
+ */
+#define CIRBUF_GET_FREELEN(cirbuf) ((cirbuf)->maxlen - (cirbuf)->len)
+
+/**
+ * Iterator for a circular buffer
+ * c: struct cirbuf pointer
+ * i: an integer type internally used in the macro
+ * e: char that takes the value for each iteration
+ */
+#define CIRBUF_FOREACH(c, i, e) \
+ for ( i=0, e=(c)->buf[(c)->start] ; \
+ i<((c)->len) ; \
+ i ++, e=(c)->buf[((c)->start+i)%((c)->maxlen)])
+
+
+/**
+ * Add a character at head of the circular buffer. Return 0 on success, or
+ * a negative value on error.
+ */
+int cirbuf_add_head_safe(struct cirbuf *cbuf, char c);
+
+/**
+ * Add a character at head of the circular buffer. You _must_ check that you
+ * have enough free space in the buffer before calling this func.
+ */
+void cirbuf_add_head(struct cirbuf *cbuf, char c);
+
+/**
+ * Add a character at tail of the circular buffer. Return 0 on success, or
+ * a negative value on error.
+ */
+int cirbuf_add_tail_safe(struct cirbuf *cbuf, char c);
+
+/**
+ * Add a character at tail of the circular buffer. You _must_ check that you
+ * have enough free space in the buffer before calling this func.
+ */
+void cirbuf_add_tail(struct cirbuf *cbuf, char c);
+
+/**
+ * Remove a char at the head of the circular buffer. Return 0 on
+ * success, or a negative value on error.
+ */
+int cirbuf_del_head_safe(struct cirbuf *cbuf);
+
+/**
+ * Remove a char at the head of the circular buffer. You _must_ check
+ * that buffer is not empty before calling the function.
+ */
+void cirbuf_del_head(struct cirbuf *cbuf);
+
+/**
+ * Remove a char at the tail of the circular buffer. Return 0 on
+ * success, or a negative value on error.
+ */
+int cirbuf_del_tail_safe(struct cirbuf *cbuf);
+
+/**
+ * Remove a char at the tail of the circular buffer. You _must_ check
+ * that buffer is not empty before calling the function.
+ */
+void cirbuf_del_tail(struct cirbuf *cbuf);
+
+/**
+ * Return the head of the circular buffer. You _must_ check that
+ * buffer is not empty before calling the function.
+ */
+char cirbuf_get_head(struct cirbuf *cbuf);
+
+/**
+ * Return the tail of the circular buffer. You _must_ check that
+ * buffer is not empty before calling the function.
+ */
+char cirbuf_get_tail(struct cirbuf *cbuf);
+
+/**
+ * Add a buffer at head of the circular buffer. 'c' is a pointer to a
+ * buffer, and n is the number of char to add. Return the number of
+ * copied bytes on success, or a negative value on error.
+ */
+int cirbuf_add_buf_head(struct cirbuf *cbuf, const char *c, unsigned int n);
+
+/**
+ * Add a buffer at tail of the circular buffer. 'c' is a pointer to a
+ * buffer, and n is the number of char to add. Return the number of
+ * copied bytes on success, or a negative value on error.
+ */
+int cirbuf_add_buf_tail(struct cirbuf *cbuf, const char *c, unsigned int n);
+
+/**
+ * Remove chars at the head of the circular buffer. Return 0 on
+ * success, or a negative value on error.
+ */
+int cirbuf_del_buf_head(struct cirbuf *cbuf, unsigned int size);
+
+/**
+ * Remove chars at the tail of the circular buffer. Return 0 on
+ * success, or a negative value on error.
+ */
+int cirbuf_del_buf_tail(struct cirbuf *cbuf, unsigned int size);
+
+/**
+ * Copy a maximum of 'size' characters from the head of the circular
+ * buffer to a flat one pointed by 'c'. Return the number of copied
+ * chars.
+ */
+int cirbuf_get_buf_head(struct cirbuf *cbuf, char *c, unsigned int size);
+
+/**
+ * Copy a maximum of 'size' characters from the tail of the circular
+ * buffer to a flat one pointed by 'c'. Return the number of copied
+ * chars.
+ */
+int cirbuf_get_buf_tail(struct cirbuf *cbuf, char *c, unsigned int size);
+
+
+/**
+ * Set the start of the data to the index 0 of the internal buffer.
+ */
+int cirbuf_align_left(struct cirbuf *cbuf);
+
+/**
+ * Set the end of the data to the last index of the internal buffer.
+ */
+int cirbuf_align_right(struct cirbuf *cbuf);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _CIRBUF_H_ */
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_parse.c b/src/dpdk_lib18/librte_cmdline/cmdline_parse.c
new file mode 100755
index 00000000..dfc885cf
--- /dev/null
+++ b/src/dpdk_lib18/librte_cmdline/cmdline_parse.c
@@ -0,0 +1,564 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the University of California, Berkeley nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <string.h>
+#include <inttypes.h>
+#include <ctype.h>
+#include <termios.h>
+
+#include <netinet/in.h>
+
+#include <rte_string_fns.h>
+
+#include "cmdline_rdline.h"
+#include "cmdline_parse.h"
+#include "cmdline.h"
+
+#ifdef RTE_LIBRTE_CMDLINE_DEBUG
+#define debug_printf printf
+#else
+#define debug_printf(args...) do {} while(0)
+#endif
+
+#define CMDLINE_BUFFER_SIZE 64
+
+/* isblank() needs _XOPEN_SOURCE >= 600 || _ISOC99_SOURCE, so use our
+ * own. */
+static int
+isblank2(char c)
+{
+ if (c == ' ' ||
+ c == '\t' )
+ return 1;
+ return 0;
+}
+
+static int
+isendofline(char c)
+{
+ if (c == '\n' ||
+ c == '\r' )
+ return 1;
+ return 0;
+}
+
+static int
+iscomment(char c)
+{
+ if (c == '#')
+ return 1;
+ return 0;
+}
+
+int
+cmdline_isendoftoken(char c)
+{
+ if (!c || iscomment(c) || isblank2(c) || isendofline(c))
+ return 1;
+ return 0;
+}
+
+static unsigned int
+nb_common_chars(const char * s1, const char * s2)
+{
+ unsigned int i=0;
+
+ while (*s1==*s2 && *s1) {
+ s1++;
+ s2++;
+ i++;
+ }
+ return i;
+}
+
+/**
+ * try to match the buffer with an instruction (only the first
+ * nb_match_token tokens if != 0). Return 0 if we match all the
+ * tokens, else the number of matched tokens, else -1.
+ */
+static int
+match_inst(cmdline_parse_inst_t *inst, const char *buf,
+ unsigned int nb_match_token, void *resbuf, unsigned resbuf_size)
+{
+ unsigned int token_num=0;
+ cmdline_parse_token_hdr_t * token_p;
+ unsigned int i=0;
+ int n = 0;
+ struct cmdline_token_hdr token_hdr;
+
+ token_p = inst->tokens[token_num];
+ if (token_p)
+ memcpy(&token_hdr, token_p, sizeof(token_hdr));
+
+ /* check if we match all tokens of inst */
+ while (token_p && (!nb_match_token || i<nb_match_token)) {
+ debug_printf("TK\n");
+ /* skip spaces */
+ while (isblank2(*buf)) {
+ buf++;
+ }
+
+ /* end of buf */
+ if ( isendofline(*buf) || iscomment(*buf) )
+ break;
+
+ if (resbuf == NULL) {
+ n = token_hdr.ops->parse(token_p, buf, NULL, 0);
+ } else {
+ unsigned rb_sz;
+
+ if (token_hdr.offset > resbuf_size) {
+ printf("Parse error(%s:%d): Token offset(%u) "
+ "exceeds maximum size(%u)\n",
+ __FILE__, __LINE__,
+ token_hdr.offset, resbuf_size);
+ return -ENOBUFS;
+ }
+ rb_sz = resbuf_size - token_hdr.offset;
+
+ n = token_hdr.ops->parse(token_p, buf, (char *)resbuf +
+ token_hdr.offset, rb_sz);
+ }
+
+ if (n < 0)
+ break;
+
+ debug_printf("TK parsed (len=%d)\n", n);
+ i++;
+ buf += n;
+
+ token_num ++;
+ token_p = inst->tokens[token_num];
+ if (token_p)
+ memcpy(&token_hdr, token_p, sizeof(token_hdr));
+ }
+
+ /* does not match */
+ if (i==0)
+ return -1;
+
+ /* in case we want to match a specific num of token */
+ if (nb_match_token) {
+ if (i == nb_match_token) {
+ return 0;
+ }
+ return i;
+ }
+
+ /* we don't match all the tokens */
+ if (token_p) {
+ return i;
+ }
+
+ /* are there are some tokens more */
+ while (isblank2(*buf)) {
+ buf++;
+ }
+
+ /* end of buf */
+ if ( isendofline(*buf) || iscomment(*buf) )
+ return 0;
+
+ /* garbage after inst */
+ return i;
+}
+
+
+int
+cmdline_parse(struct cmdline *cl, const char * buf)
+{
+ unsigned int inst_num=0;
+ cmdline_parse_inst_t *inst;
+ const char *curbuf;
+ char result_buf[CMDLINE_PARSE_RESULT_BUFSIZE];
+ void (*f)(void *, struct cmdline *, void *) = NULL;
+ void *data = NULL;
+ int comment = 0;
+ int linelen = 0;
+ int parse_it = 0;
+ int err = CMDLINE_PARSE_NOMATCH;
+ int tok;
+ cmdline_parse_ctx_t *ctx;
+#ifdef RTE_LIBRTE_CMDLINE_DEBUG
+ char debug_buf[BUFSIZ];
+#endif
+
+ if (!cl || !buf)
+ return CMDLINE_PARSE_BAD_ARGS;
+
+ ctx = cl->ctx;
+
+ /*
+ * - look if the buffer contains at least one line
+ * - look if line contains only spaces or comments
+ * - count line length
+ */
+ curbuf = buf;
+ while (! isendofline(*curbuf)) {
+ if ( *curbuf == '\0' ) {
+ debug_printf("Incomplete buf (len=%d)\n", linelen);
+ return 0;
+ }
+ if ( iscomment(*curbuf) ) {
+ comment = 1;
+ }
+ if ( ! isblank2(*curbuf) && ! comment) {
+ parse_it = 1;
+ }
+ curbuf++;
+ linelen++;
+ }
+
+ /* skip all endofline chars */
+ while (isendofline(buf[linelen])) {
+ linelen++;
+ }
+
+ /* empty line */
+ if ( parse_it == 0 ) {
+ debug_printf("Empty line (len=%d)\n", linelen);
+ return linelen;
+ }
+
+#ifdef RTE_LIBRTE_CMDLINE_DEBUG
+ snprintf(debug_buf, (linelen>64 ? 64 : linelen), "%s", buf);
+ debug_printf("Parse line : len=%d, <%s>\n", linelen, debug_buf);
+#endif
+
+ /* parse it !! */
+ inst = ctx[inst_num];
+ while (inst) {
+ debug_printf("INST %d\n", inst_num);
+
+ /* fully parsed */
+ tok = match_inst(inst, buf, 0, result_buf, sizeof(result_buf));
+
+ if (tok > 0) /* we matched at least one token */
+ err = CMDLINE_PARSE_BAD_ARGS;
+
+ else if (!tok) {
+ debug_printf("INST fully parsed\n");
+ /* skip spaces */
+ while (isblank2(*curbuf)) {
+ curbuf++;
+ }
+
+ /* if end of buf -> there is no garbage after inst */
+ if (isendofline(*curbuf) || iscomment(*curbuf)) {
+ if (!f) {
+ memcpy(&f, &inst->f, sizeof(f));
+ memcpy(&data, &inst->data, sizeof(data));
+ }
+ else {
+ /* more than 1 inst matches */
+ err = CMDLINE_PARSE_AMBIGUOUS;
+ f=NULL;
+ debug_printf("Ambiguous cmd\n");
+ break;
+ }
+ }
+ }
+
+ inst_num ++;
+ inst = ctx[inst_num];
+ }
+
+ /* call func */
+ if (f) {
+ f(result_buf, cl, data);
+ }
+
+ /* no match */
+ else {
+ debug_printf("No match err=%d\n", err);
+ return err;
+ }
+
+ return linelen;
+}
+
+int
+cmdline_complete(struct cmdline *cl, const char *buf, int *state,
+ char *dst, unsigned int size)
+{
+ const char *partial_tok = buf;
+ unsigned int inst_num = 0;
+ cmdline_parse_inst_t *inst;
+ cmdline_parse_token_hdr_t *token_p;
+ struct cmdline_token_hdr token_hdr;
+ char tmpbuf[CMDLINE_BUFFER_SIZE], comp_buf[CMDLINE_BUFFER_SIZE];
+ unsigned int partial_tok_len;
+ int comp_len = -1;
+ int tmp_len = -1;
+ int nb_token = 0;
+ unsigned int i, n;
+ int l;
+ unsigned int nb_completable;
+ unsigned int nb_non_completable;
+ int local_state = 0;
+ const char *help_str;
+ cmdline_parse_ctx_t *ctx;
+
+ if (!cl || !buf || !state || !dst)
+ return -1;
+
+ ctx = cl->ctx;
+
+ debug_printf("%s called\n", __func__);
+ memset(&token_hdr, 0, sizeof(token_hdr));
+
+ /* count the number of complete token to parse */
+ for (i=0 ; buf[i] ; i++) {
+ if (!isblank2(buf[i]) && isblank2(buf[i+1]))
+ nb_token++;
+ if (isblank2(buf[i]) && !isblank2(buf[i+1]))
+ partial_tok = buf+i+1;
+ }
+ partial_tok_len = strnlen(partial_tok, RDLINE_BUF_SIZE);
+
+ /* first call -> do a first pass */
+ if (*state <= 0) {
+ debug_printf("try complete <%s>\n", buf);
+ debug_printf("there is %d complete tokens, <%s> is incomplete\n",
+ nb_token, partial_tok);
+
+ nb_completable = 0;
+ nb_non_completable = 0;
+
+ inst = ctx[inst_num];
+ while (inst) {
+ /* parse the first tokens of the inst */
+ if (nb_token && match_inst(inst, buf, nb_token, NULL, 0))
+ goto next;
+
+ debug_printf("instruction match\n");
+ token_p = inst->tokens[nb_token];
+ if (token_p)
+ memcpy(&token_hdr, token_p, sizeof(token_hdr));
+
+ /* non completable */
+ if (!token_p ||
+ !token_hdr.ops->complete_get_nb ||
+ !token_hdr.ops->complete_get_elt ||
+ (n = token_hdr.ops->complete_get_nb(token_p)) == 0) {
+ nb_non_completable++;
+ goto next;
+ }
+
+ debug_printf("%d choices for this token\n", n);
+ for (i=0 ; i<n ; i++) {
+ if (token_hdr.ops->complete_get_elt(token_p, i,
+ tmpbuf,
+ sizeof(tmpbuf)) < 0)
+ continue;
+
+ /* we have at least room for one char */
+ tmp_len = strnlen(tmpbuf, sizeof(tmpbuf));
+ if (tmp_len < CMDLINE_BUFFER_SIZE - 1) {
+ tmpbuf[tmp_len] = ' ';
+ tmpbuf[tmp_len+1] = 0;
+ }
+
+ debug_printf(" choice <%s>\n", tmpbuf);
+
+ /* does the completion match the
+ * beginning of the word ? */
+ if (!strncmp(partial_tok, tmpbuf,
+ partial_tok_len)) {
+ if (comp_len == -1) {
+ snprintf(comp_buf, sizeof(comp_buf),
+ "%s", tmpbuf + partial_tok_len);
+ comp_len =
+ strnlen(tmpbuf + partial_tok_len,
+ sizeof(tmpbuf) - partial_tok_len);
+
+ }
+ else {
+ comp_len =
+ nb_common_chars(comp_buf,
+ tmpbuf+partial_tok_len);
+ comp_buf[comp_len] = 0;
+ }
+ nb_completable++;
+ }
+ }
+ next:
+ debug_printf("next\n");
+ inst_num ++;
+ inst = ctx[inst_num];
+ }
+
+ debug_printf("total choices %d for this completion\n",
+ nb_completable);
+
+ /* no possible completion */
+ if (nb_completable == 0 && nb_non_completable == 0)
+ return 0;
+
+ /* if multichoice is not required */
+ if (*state == 0 && partial_tok_len > 0) {
+ /* one or several choices starting with the
+ same chars */
+ if (comp_len > 0) {
+ if ((unsigned)(comp_len + 1) > size)
+ return 0;
+
+ snprintf(dst, size, "%s", comp_buf);
+ dst[comp_len] = 0;
+ return 2;
+ }
+ }
+ }
+
+ /* init state correctly */
+ if (*state == -1)
+ *state = 0;
+
+ debug_printf("Multiple choice STATE=%d\n", *state);
+
+ inst_num = 0;
+ inst = ctx[inst_num];
+ while (inst) {
+ /* we need to redo it */
+ inst = ctx[inst_num];
+
+ if (nb_token && match_inst(inst, buf, nb_token, NULL, 0))
+ goto next2;
+
+ token_p = inst->tokens[nb_token];
+ if (token_p)
+ memcpy(&token_hdr, token_p, sizeof(token_hdr));
+
+ /* one choice for this token */
+ if (!token_p ||
+ !token_hdr.ops->complete_get_nb ||
+ !token_hdr.ops->complete_get_elt ||
+ (n = token_hdr.ops->complete_get_nb(token_p)) == 0) {
+ if (local_state < *state) {
+ local_state++;
+ goto next2;
+ }
+ (*state)++;
+ if (token_p && token_hdr.ops->get_help) {
+ token_hdr.ops->get_help(token_p, tmpbuf,
+ sizeof(tmpbuf));
+ help_str = inst->help_str;
+ if (help_str)
+ snprintf(dst, size, "[%s]: %s", tmpbuf,
+ help_str);
+ else
+ snprintf(dst, size, "[%s]: No help",
+ tmpbuf);
+ }
+ else {
+ snprintf(dst, size, "[RETURN]");
+ }
+ return 1;
+ }
+
+ /* several choices */
+ for (i=0 ; i<n ; i++) {
+ if (token_hdr.ops->complete_get_elt(token_p, i, tmpbuf,
+ sizeof(tmpbuf)) < 0)
+ continue;
+ /* we have at least room for one char */
+ tmp_len = strnlen(tmpbuf, sizeof(tmpbuf));
+ if (tmp_len < CMDLINE_BUFFER_SIZE - 1) {
+ tmpbuf[tmp_len] = ' ';
+ tmpbuf[tmp_len + 1] = 0;
+ }
+
+ debug_printf(" choice <%s>\n", tmpbuf);
+
+ /* does the completion match the beginning of
+ * the word ? */
+ if (!strncmp(partial_tok, tmpbuf,
+ partial_tok_len)) {
+ if (local_state < *state) {
+ local_state++;
+ continue;
+ }
+ (*state)++;
+ l=snprintf(dst, size, "%s", tmpbuf);
+ if (l>=0 && token_hdr.ops->get_help) {
+ token_hdr.ops->get_help(token_p, tmpbuf,
+ sizeof(tmpbuf));
+ help_str = inst->help_str;
+ if (help_str)
+ snprintf(dst+l, size-l, "[%s]: %s",
+ tmpbuf, help_str);
+ else
+ snprintf(dst+l, size-l,
+ "[%s]: No help", tmpbuf);
+ }
+
+ return 1;
+ }
+ }
+ next2:
+ inst_num ++;
+ inst = ctx[inst_num];
+ }
+ return 0;
+}
+
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_parse.h b/src/dpdk_lib18/librte_cmdline/cmdline_parse.h
new file mode 100755
index 00000000..4b25c456
--- /dev/null
+++ b/src/dpdk_lib18/librte_cmdline/cmdline_parse.h
@@ -0,0 +1,191 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the University of California, Berkeley nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CMDLINE_PARSE_H_
+#define _CMDLINE_PARSE_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef offsetof
+#define offsetof(type, field) ((size_t) &( ((type *)0)->field) )
+#endif
+
+/* return status for parsing */
+#define CMDLINE_PARSE_SUCCESS 0
+#define CMDLINE_PARSE_AMBIGUOUS -1
+#define CMDLINE_PARSE_NOMATCH -2
+#define CMDLINE_PARSE_BAD_ARGS -3
+
+/* return status for completion */
+#define CMDLINE_PARSE_COMPLETE_FINISHED 0
+#define CMDLINE_PARSE_COMPLETE_AGAIN 1
+#define CMDLINE_PARSE_COMPLETED_BUFFER 2
+
+/* maximum buffer size for parsed result */
+#define CMDLINE_PARSE_RESULT_BUFSIZE 8192
+
+/**
+ * Stores a pointer to the ops struct, and the offset: the place to
+ * write the parsed result in the destination structure.
+ */
+struct cmdline_token_hdr {
+ struct cmdline_token_ops *ops;
+ unsigned int offset;
+};
+typedef struct cmdline_token_hdr cmdline_parse_token_hdr_t;
+
+/**
+ * A token is defined by this structure.
+ *
+ * parse() takes the token as first argument, then the source buffer
+ * starting at the token we want to parse. The 3rd arg is a pointer
+ * where we store the parsed data (as binary). It returns the number of
+ * parsed chars on success and a negative value on error.
+ *
+ * complete_get_nb() returns the number of possible values for this
+ * token if completion is possible. If it is NULL or if it returns 0,
+ * no completion is possible.
+ *
+ * complete_get_elt() copy in dstbuf (the size is specified in the
+ * parameter) the i-th possible completion for this token. returns 0
+ * on success or and a negative value on error.
+ *
+ * get_help() fills the dstbuf with the help for the token. It returns
+ * -1 on error and 0 on success.
+ */
+struct cmdline_token_ops {
+ /** parse(token ptr, buf, res pts, buf len) */
+ int (*parse)(cmdline_parse_token_hdr_t *, const char *, void *,
+ unsigned int);
+ /** return the num of possible choices for this token */
+ int (*complete_get_nb)(cmdline_parse_token_hdr_t *);
+ /** return the elt x for this token (token, idx, dstbuf, size) */
+ int (*complete_get_elt)(cmdline_parse_token_hdr_t *, int, char *,
+ unsigned int);
+ /** get help for this token (token, dstbuf, size) */
+ int (*get_help)(cmdline_parse_token_hdr_t *, char *, unsigned int);
+};
+
+struct cmdline;
+/**
+ * Store a instruction, which is a pointer to a callback function and
+ * its parameter that is called when the instruction is parsed, a help
+ * string, and a list of token composing this instruction.
+ */
+struct cmdline_inst {
+ /* f(parsed_struct, data) */
+ void (*f)(void *, struct cmdline *, void *);
+ void *data;
+ const char *help_str;
+ cmdline_parse_token_hdr_t *tokens[];
+};
+typedef struct cmdline_inst cmdline_parse_inst_t;
+
+/**
+ * A context is identified by its name, and contains a list of
+ * instruction
+ *
+ */
+typedef cmdline_parse_inst_t *cmdline_parse_ctx_t;
+
+/**
+ * Try to parse a buffer according to the specified context. The
+ * argument buf must ends with "\n\0". The function returns
+ * CMDLINE_PARSE_AMBIGUOUS, CMDLINE_PARSE_NOMATCH or
+ * CMDLINE_PARSE_BAD_ARGS on error. Else it calls the associated
+ * function (defined in the context) and returns 0
+ * (CMDLINE_PARSE_SUCCESS).
+ */
+int cmdline_parse(struct cmdline *cl, const char *buf);
+
+/**
+ * complete() must be called with *state==0 (try to complete) or
+ * with *state==-1 (just display choices), then called without
+ * modifying *state until it returns CMDLINE_PARSE_COMPLETED_BUFFER or
+ * CMDLINE_PARSE_COMPLETED_BUFFER.
+ *
+ * It returns < 0 on error.
+ *
+ * Else it returns:
+ * - CMDLINE_PARSE_COMPLETED_BUFFER on completion (one possible
+ * choice). In this case, the chars are appended in dst buffer.
+ * - CMDLINE_PARSE_COMPLETE_AGAIN if there is several possible
+ * choices. In this case, you must call the function again,
+ * keeping the value of state intact.
+ * - CMDLINE_PARSE_COMPLETED_BUFFER when the iteration is
+ * finished. The dst is not valid for this last call.
+ *
+ * The returned dst buf ends with \0.
+ */
+int cmdline_complete(struct cmdline *cl, const char *buf, int *state,
+ char *dst, unsigned int size);
+
+
+/* return true if(!c || iscomment(c) || isblank(c) ||
+ * isendofline(c)) */
+int cmdline_isendoftoken(char c);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _CMDLINE_PARSE_H_ */
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_parse_etheraddr.c b/src/dpdk_lib18/librte_cmdline/cmdline_parse_etheraddr.c
new file mode 100755
index 00000000..64ae86c7
--- /dev/null
+++ b/src/dpdk_lib18/librte_cmdline/cmdline_parse_etheraddr.c
@@ -0,0 +1,180 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the University of California, Berkeley nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <ctype.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <net/ethernet.h>
+
+#include <rte_string_fns.h>
+
+#include "cmdline_parse.h"
+#include "cmdline_parse_etheraddr.h"
+
+struct cmdline_token_ops cmdline_token_etheraddr_ops = {
+ .parse = cmdline_parse_etheraddr,
+ .complete_get_nb = NULL,
+ .complete_get_elt = NULL,
+ .get_help = cmdline_get_help_etheraddr,
+};
+
+/* the format can be either XX:XX:XX:XX:XX:XX or XXXX:XXXX:XXXX */
+#define ETHER_ADDRSTRLENLONG 18
+#define ETHER_ADDRSTRLENSHORT 15
+
+#ifdef __linux__
+#define ea_oct ether_addr_octet
+#else
+#define ea_oct octet
+#endif
+
+
+static struct ether_addr *
+my_ether_aton(const char *a)
+{
+ int i;
+ char *end;
+ unsigned long o[ETHER_ADDR_LEN];
+ static struct ether_addr ether_addr;
+
+ i = 0;
+ do {
+ errno = 0;
+ o[i] = strtoul(a, &end, 16);
+ if (errno != 0 || end == a || (end[0] != ':' && end[0] != 0))
+ return (NULL);
+ a = end + 1;
+ } while (++i != sizeof (o) / sizeof (o[0]) && end[0] != 0);
+
+ /* Junk at the end of line */
+ if (end[0] != 0)
+ return (NULL);
+
+ /* Support the format XX:XX:XX:XX:XX:XX */
+ if (i == ETHER_ADDR_LEN) {
+ while (i-- != 0) {
+ if (o[i] > UINT8_MAX)
+ return (NULL);
+ ether_addr.ea_oct[i] = (uint8_t)o[i];
+ }
+ /* Support the format XXXX:XXXX:XXXX */
+ } else if (i == ETHER_ADDR_LEN / 2) {
+ while (i-- != 0) {
+ if (o[i] > UINT16_MAX)
+ return (NULL);
+ ether_addr.ea_oct[i * 2] = (uint8_t)(o[i] >> 8);
+ ether_addr.ea_oct[i * 2 + 1] = (uint8_t)(o[i] & 0xff);
+ }
+ /* unknown format */
+ } else
+ return (NULL);
+
+ return (struct ether_addr *)&ether_addr;
+}
+
+int
+cmdline_parse_etheraddr(__attribute__((unused)) cmdline_parse_token_hdr_t *tk,
+ const char *buf, void *res, unsigned ressize)
+{
+ unsigned int token_len = 0;
+ char ether_str[ETHER_ADDRSTRLENLONG+1];
+ struct ether_addr *tmp;
+
+ if (res && ressize < sizeof(struct ether_addr))
+ return -1;
+
+ if (!buf || ! *buf)
+ return -1;
+
+ while (!cmdline_isendoftoken(buf[token_len]))
+ token_len++;
+
+ /* if token doesn't match possible string lengths... */
+ if ((token_len != ETHER_ADDRSTRLENLONG - 1) &&
+ (token_len != ETHER_ADDRSTRLENSHORT - 1))
+ return -1;
+
+ snprintf(ether_str, token_len+1, "%s", buf);
+
+ tmp = my_ether_aton(ether_str);
+ if (tmp == NULL)
+ return -1;
+ if (res)
+ memcpy(res, tmp, sizeof(struct ether_addr));
+ return token_len;
+}
+
+int
+cmdline_get_help_etheraddr(__attribute__((unused)) cmdline_parse_token_hdr_t *tk,
+ char *dstbuf, unsigned int size)
+{
+ int ret;
+
+ ret = snprintf(dstbuf, size, "Ethernet address");
+ if (ret < 0)
+ return -1;
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_parse_etheraddr.h b/src/dpdk_lib18/librte_cmdline/cmdline_parse_etheraddr.h
new file mode 100755
index 00000000..0085bb3b
--- /dev/null
+++ b/src/dpdk_lib18/librte_cmdline/cmdline_parse_etheraddr.h
@@ -0,0 +1,94 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the University of California, Berkeley nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _PARSE_ETHERADDR_H_
+#define _PARSE_ETHERADDR_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct cmdline_token_etheraddr {
+ struct cmdline_token_hdr hdr;
+};
+typedef struct cmdline_token_etheraddr cmdline_parse_token_etheraddr_t;
+
+extern struct cmdline_token_ops cmdline_token_etheraddr_ops;
+
+int cmdline_parse_etheraddr(cmdline_parse_token_hdr_t *tk, const char *srcbuf,
+ void *res, unsigned ressize);
+int cmdline_get_help_etheraddr(cmdline_parse_token_hdr_t *tk, char *dstbuf,
+ unsigned int size);
+
+#define TOKEN_ETHERADDR_INITIALIZER(structure, field) \
+{ \
+ /* hdr */ \
+ { \
+ &cmdline_token_etheraddr_ops, /* ops */ \
+ offsetof(structure, field), /* offset */ \
+ }, \
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* _PARSE_ETHERADDR_H_ */
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_parse_ipaddr.c b/src/dpdk_lib18/librte_cmdline/cmdline_parse_ipaddr.c
new file mode 100755
index 00000000..7f335994
--- /dev/null
+++ b/src/dpdk_lib18/librte_cmdline/cmdline_parse_ipaddr.c
@@ -0,0 +1,408 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the University of California, Berkeley nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * For inet_ntop() functions:
+ *
+ * Copyright (c) 1996 by Internet Software Consortium.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS
+ * ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE
+ * CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
+ * SOFTWARE.
+ */
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <ctype.h>
+#include <string.h>
+#include <errno.h>
+#include <netinet/in.h>
+#ifndef __linux__
+#ifndef __FreeBSD__
+#include <net/socket.h>
+#else
+#include <sys/socket.h>
+#endif
+#endif
+
+#include <rte_string_fns.h>
+
+#include "cmdline_parse.h"
+#include "cmdline_parse_ipaddr.h"
+
+struct cmdline_token_ops cmdline_token_ipaddr_ops = {
+ .parse = cmdline_parse_ipaddr,
+ .complete_get_nb = NULL,
+ .complete_get_elt = NULL,
+ .get_help = cmdline_get_help_ipaddr,
+};
+
+#define INADDRSZ 4
+#define IN6ADDRSZ 16
+#define PREFIXMAX 128
+#define V4PREFIXMAX 32
+
+/*
+ * WARNING: Don't even consider trying to compile this on a system where
+ * sizeof(int) < 4. sizeof(int) > 4 is fine; all the world's not a VAX.
+ */
+
+static int inet_pton4(const char *src, unsigned char *dst);
+static int inet_pton6(const char *src, unsigned char *dst);
+
+/* int
+ * inet_pton(af, src, dst)
+ * convert from presentation format (which usually means ASCII printable)
+ * to network format (which is usually some kind of binary format).
+ * return:
+ * 1 if the address was valid for the specified address family
+ * 0 if the address wasn't valid (`dst' is untouched in this case)
+ * -1 if some other error occurred (`dst' is untouched in this case, too)
+ * author:
+ * Paul Vixie, 1996.
+ */
+static int
+my_inet_pton(int af, const char *src, void *dst)
+{
+ switch (af) {
+ case AF_INET:
+ return (inet_pton4(src, dst));
+ case AF_INET6:
+ return (inet_pton6(src, dst));
+ default:
+ errno = EAFNOSUPPORT;
+ return (-1);
+ }
+ /* NOTREACHED */
+}
+
+/* int
+ * inet_pton4(src, dst)
+ * like inet_aton() but without all the hexadecimal and shorthand.
+ * return:
+ * 1 if `src' is a valid dotted quad, else 0.
+ * notice:
+ * does not touch `dst' unless it's returning 1.
+ * author:
+ * Paul Vixie, 1996.
+ */
+static int
+inet_pton4(const char *src, unsigned char *dst)
+{
+ static const char digits[] = "0123456789";
+ int saw_digit, octets, ch;
+ unsigned char tmp[INADDRSZ], *tp;
+
+ saw_digit = 0;
+ octets = 0;
+ *(tp = tmp) = 0;
+ while ((ch = *src++) != '\0') {
+ const char *pch;
+
+ if ((pch = strchr(digits, ch)) != NULL) {
+ unsigned int new = *tp * 10 + (pch - digits);
+
+ if (new > 255)
+ return (0);
+ if (! saw_digit) {
+ if (++octets > 4)
+ return (0);
+ saw_digit = 1;
+ }
+ *tp = (unsigned char)new;
+ } else if (ch == '.' && saw_digit) {
+ if (octets == 4)
+ return (0);
+ *++tp = 0;
+ saw_digit = 0;
+ } else
+ return (0);
+ }
+ if (octets < 4)
+ return (0);
+
+ memcpy(dst, tmp, INADDRSZ);
+ return (1);
+}
+
+/* int
+ * inet_pton6(src, dst)
+ * convert presentation level address to network order binary form.
+ * return:
+ * 1 if `src' is a valid [RFC1884 2.2] address, else 0.
+ * notice:
+ * (1) does not touch `dst' unless it's returning 1.
+ * (2) :: in a full address is silently ignored.
+ * credit:
+ * inspired by Mark Andrews.
+ * author:
+ * Paul Vixie, 1996.
+ */
+static int
+inet_pton6(const char *src, unsigned char *dst)
+{
+ static const char xdigits_l[] = "0123456789abcdef",
+ xdigits_u[] = "0123456789ABCDEF";
+ unsigned char tmp[IN6ADDRSZ], *tp = 0, *endp = 0, *colonp = 0;
+ const char *xdigits = 0, *curtok = 0;
+ int ch = 0, saw_xdigit = 0, count_xdigit = 0;
+ unsigned int val = 0;
+ unsigned dbloct_count = 0;
+
+ memset((tp = tmp), '\0', IN6ADDRSZ);
+ endp = tp + IN6ADDRSZ;
+ colonp = NULL;
+ /* Leading :: requires some special handling. */
+ if (*src == ':')
+ if (*++src != ':')
+ return (0);
+ curtok = src;
+ saw_xdigit = count_xdigit = 0;
+ val = 0;
+
+ while ((ch = *src++) != '\0') {
+ const char *pch;
+
+ if ((pch = strchr((xdigits = xdigits_l), ch)) == NULL)
+ pch = strchr((xdigits = xdigits_u), ch);
+ if (pch != NULL) {
+ if (count_xdigit >= 4)
+ return (0);
+ val <<= 4;
+ val |= (pch - xdigits);
+ if (val > 0xffff)
+ return (0);
+ saw_xdigit = 1;
+ count_xdigit++;
+ continue;
+ }
+ if (ch == ':') {
+ curtok = src;
+ if (!saw_xdigit) {
+ if (colonp)
+ return (0);
+ colonp = tp;
+ continue;
+ } else if (*src == '\0') {
+ return (0);
+ }
+ if (tp + sizeof(int16_t) > endp)
+ return (0);
+ *tp++ = (unsigned char) ((val >> 8) & 0xff);
+ *tp++ = (unsigned char) (val & 0xff);
+ saw_xdigit = 0;
+ count_xdigit = 0;
+ val = 0;
+ dbloct_count++;
+ continue;
+ }
+ if (ch == '.' && ((tp + INADDRSZ) <= endp) &&
+ inet_pton4(curtok, tp) > 0) {
+ tp += INADDRSZ;
+ saw_xdigit = 0;
+ dbloct_count += 2;
+ break; /* '\0' was seen by inet_pton4(). */
+ }
+ return (0);
+ }
+ if (saw_xdigit) {
+ if (tp + sizeof(int16_t) > endp)
+ return (0);
+ *tp++ = (unsigned char) ((val >> 8) & 0xff);
+ *tp++ = (unsigned char) (val & 0xff);
+ dbloct_count++;
+ }
+ if (colonp != NULL) {
+ /* if we already have 8 double octets, having a colon means error */
+ if (dbloct_count == 8)
+ return 0;
+
+ /*
+ * Since some memmove()'s erroneously fail to handle
+ * overlapping regions, we'll do the shift by hand.
+ */
+ const int n = tp - colonp;
+ int i;
+
+ for (i = 1; i <= n; i++) {
+ endp[- i] = colonp[n - i];
+ colonp[n - i] = 0;
+ }
+ tp = endp;
+ }
+ if (tp != endp)
+ return (0);
+ memcpy(dst, tmp, IN6ADDRSZ);
+ return (1);
+}
+
+int
+cmdline_parse_ipaddr(cmdline_parse_token_hdr_t *tk, const char *buf, void *res,
+ unsigned ressize)
+{
+ struct cmdline_token_ipaddr *tk2;
+ unsigned int token_len = 0;
+ char ip_str[INET6_ADDRSTRLEN+4+1]; /* '+4' is for prefixlen (if any) */
+ cmdline_ipaddr_t ipaddr;
+ char *prefix, *prefix_end;
+ long prefixlen = 0;
+
+ if (res && ressize < sizeof(cmdline_ipaddr_t))
+ return -1;
+
+ if (!buf || !tk || ! *buf)
+ return -1;
+
+ tk2 = (struct cmdline_token_ipaddr *)tk;
+
+ while (!cmdline_isendoftoken(buf[token_len]))
+ token_len++;
+
+ /* if token is too big... */
+ if (token_len >= INET6_ADDRSTRLEN+4)
+ return -1;
+
+ snprintf(ip_str, token_len+1, "%s", buf);
+
+ /* convert the network prefix */
+ if (tk2->ipaddr_data.flags & CMDLINE_IPADDR_NETWORK) {
+ prefix = strrchr(ip_str, '/');
+ if (prefix == NULL)
+ return -1;
+ *prefix = '\0';
+ prefix ++;
+ errno = 0;
+ prefixlen = strtol(prefix, &prefix_end, 10);
+ if (errno || (*prefix_end != '\0')
+ || prefixlen < 0 || prefixlen > PREFIXMAX)
+ return -1;
+ ipaddr.prefixlen = prefixlen;
+ }
+ else {
+ ipaddr.prefixlen = 0;
+ }
+
+ /* convert the IP addr */
+ if ((tk2->ipaddr_data.flags & CMDLINE_IPADDR_V4) &&
+ my_inet_pton(AF_INET, ip_str, &ipaddr.addr.ipv4) == 1 &&
+ prefixlen <= V4PREFIXMAX) {
+ ipaddr.family = AF_INET;
+ if (res)
+ memcpy(res, &ipaddr, sizeof(ipaddr));
+ return token_len;
+ }
+ if ((tk2->ipaddr_data.flags & CMDLINE_IPADDR_V6) &&
+ my_inet_pton(AF_INET6, ip_str, &ipaddr.addr.ipv6) == 1) {
+ ipaddr.family = AF_INET6;
+ if (res)
+ memcpy(res, &ipaddr, sizeof(ipaddr));
+ return token_len;
+ }
+ return -1;
+
+}
+
+int cmdline_get_help_ipaddr(cmdline_parse_token_hdr_t *tk, char *dstbuf,
+ unsigned int size)
+{
+ struct cmdline_token_ipaddr *tk2;
+
+ if (!tk || !dstbuf)
+ return -1;
+
+ tk2 = (struct cmdline_token_ipaddr *)tk;
+
+ switch (tk2->ipaddr_data.flags) {
+ case CMDLINE_IPADDR_V4:
+ snprintf(dstbuf, size, "IPv4");
+ break;
+ case CMDLINE_IPADDR_V6:
+ snprintf(dstbuf, size, "IPv6");
+ break;
+ case CMDLINE_IPADDR_V4|CMDLINE_IPADDR_V6:
+ snprintf(dstbuf, size, "IPv4/IPv6");
+ break;
+ case CMDLINE_IPADDR_NETWORK|CMDLINE_IPADDR_V4:
+ snprintf(dstbuf, size, "IPv4 network");
+ break;
+ case CMDLINE_IPADDR_NETWORK|CMDLINE_IPADDR_V6:
+ snprintf(dstbuf, size, "IPv6 network");
+ break;
+ case CMDLINE_IPADDR_NETWORK|CMDLINE_IPADDR_V4|CMDLINE_IPADDR_V6:
+ snprintf(dstbuf, size, "IPv4/IPv6 network");
+ break;
+ default:
+ snprintf(dstbuf, size, "IPaddr (bad flags)");
+ break;
+ }
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_parse_ipaddr.h b/src/dpdk_lib18/librte_cmdline/cmdline_parse_ipaddr.h
new file mode 100755
index 00000000..296c374a
--- /dev/null
+++ b/src/dpdk_lib18/librte_cmdline/cmdline_parse_ipaddr.h
@@ -0,0 +1,186 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the University of California, Berkeley nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _PARSE_IPADDR_H_
+#define _PARSE_IPADDR_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define CMDLINE_IPADDR_V4 0x01
+#define CMDLINE_IPADDR_V6 0x02
+#define CMDLINE_IPADDR_NETWORK 0x04
+
+struct cmdline_ipaddr {
+ uint8_t family;
+ union {
+ struct in_addr ipv4;
+ struct in6_addr ipv6;
+ } addr;
+ unsigned int prefixlen; /* in case of network only */
+};
+typedef struct cmdline_ipaddr cmdline_ipaddr_t;
+
+struct cmdline_token_ipaddr_data {
+ uint8_t flags;
+};
+
+struct cmdline_token_ipaddr {
+ struct cmdline_token_hdr hdr;
+ struct cmdline_token_ipaddr_data ipaddr_data;
+};
+typedef struct cmdline_token_ipaddr cmdline_parse_token_ipaddr_t;
+
+extern struct cmdline_token_ops cmdline_token_ipaddr_ops;
+
+int cmdline_parse_ipaddr(cmdline_parse_token_hdr_t *tk, const char *srcbuf,
+ void *res, unsigned ressize);
+int cmdline_get_help_ipaddr(cmdline_parse_token_hdr_t *tk, char *dstbuf,
+ unsigned int size);
+
+#define TOKEN_IPADDR_INITIALIZER(structure, field) \
+{ \
+ /* hdr */ \
+ { \
+ &cmdline_token_ipaddr_ops, /* ops */ \
+ offsetof(structure, field), /* offset */ \
+ }, \
+ /* ipaddr_data */ \
+ { \
+ CMDLINE_IPADDR_V4 | /* flags */ \
+ CMDLINE_IPADDR_V6, \
+ }, \
+}
+
+#define TOKEN_IPV4_INITIALIZER(structure, field) \
+{ \
+ /* hdr */ \
+ { \
+ &cmdline_token_ipaddr_ops, /* ops */ \
+ offsetof(structure, field), /* offset */ \
+ }, \
+ /* ipaddr_data */ \
+ { \
+ CMDLINE_IPADDR_V4, /* flags */ \
+ }, \
+}
+
+#define TOKEN_IPV6_INITIALIZER(structure, field) \
+{ \
+ /* hdr */ \
+ { \
+ &cmdline_token_ipaddr_ops, /* ops */ \
+ offsetof(structure, field), /* offset */ \
+ }, \
+ /* ipaddr_data */ \
+ { \
+ CMDLINE_IPADDR_V6, /* flags */ \
+ }, \
+}
+
+#define TOKEN_IPNET_INITIALIZER(structure, field) \
+{ \
+ /* hdr */ \
+ { \
+ &cmdline_token_ipaddr_ops, /* ops */ \
+ offsetof(structure, field), /* offset */ \
+ }, \
+ /* ipaddr_data */ \
+ { \
+ CMDLINE_IPADDR_V4 | /* flags */ \
+ CMDLINE_IPADDR_V6 | \
+ CMDLINE_IPADDR_NETWORK, \
+ }, \
+}
+
+#define TOKEN_IPV4NET_INITIALIZER(structure, field) \
+{ \
+ /* hdr */ \
+ { \
+ &cmdline_token_ipaddr_ops, /* ops */ \
+ offsetof(structure, field), /* offset */ \
+ }, \
+ /* ipaddr_data */ \
+ { \
+ CMDLINE_IPADDR_V4 | /* flags */ \
+ CMDLINE_IPADDR_NETWORK, \
+ }, \
+}
+
+#define TOKEN_IPV6NET_INITIALIZER(structure, field) \
+{ \
+ /* hdr */ \
+ { \
+ &cmdline_token_ipaddr_ops, /* ops */ \
+ offsetof(structure, field), /* offset */ \
+ }, \
+ /* ipaddr_data */ \
+ { \
+ CMDLINE_IPADDR_V4 | /* flags */ \
+ CMDLINE_IPADDR_NETWORK, \
+ }, \
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _PARSE_IPADDR_H_ */
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_parse_num.c b/src/dpdk_lib18/librte_cmdline/cmdline_parse_num.c
new file mode 100755
index 00000000..d8cf37f0
--- /dev/null
+++ b/src/dpdk_lib18/librte_cmdline/cmdline_parse_num.c
@@ -0,0 +1,402 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the University of California, Berkeley nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <ctype.h>
+#include <string.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <rte_string_fns.h>
+
+#include "cmdline_parse.h"
+#include "cmdline_parse_num.h"
+
+#ifdef RTE_LIBRTE_CMDLINE_DEBUG
+#define debug_printf(args...) printf(args)
+#else
+#define debug_printf(args...) do {} while(0)
+#endif
+
+struct cmdline_token_ops cmdline_token_num_ops = {
+ .parse = cmdline_parse_num,
+ .complete_get_nb = NULL,
+ .complete_get_elt = NULL,
+ .get_help = cmdline_get_help_num,
+};
+
+
+enum num_parse_state_t {
+ START,
+ DEC_NEG,
+ BIN,
+ HEX,
+
+ ERROR,
+
+ FIRST_OK, /* not used */
+ ZERO_OK,
+ HEX_OK,
+ OCTAL_OK,
+ BIN_OK,
+ DEC_NEG_OK,
+ DEC_POS_OK,
+};
+
+/* Keep it sync with enum in .h */
+static const char * num_help[] = {
+ "UINT8", "UINT16", "UINT32", "UINT64",
+ "INT8", "INT16", "INT32", "INT64",
+};
+
+static inline int
+add_to_res(unsigned int c, uint64_t *res, unsigned int base)
+{
+ /* overflow */
+ if ( (UINT64_MAX - c) / base < *res ) {
+ return -1;
+ }
+
+ *res = (uint64_t) (*res * base + c);
+ return 0;
+}
+
+static int
+check_res_size(struct cmdline_token_num_data *nd, unsigned ressize)
+{
+ switch (nd->type) {
+ case INT8:
+ case UINT8:
+ if (ressize < sizeof(int8_t))
+ return -1;
+ break;
+ case INT16:
+ case UINT16:
+ if (ressize < sizeof(int16_t))
+ return -1;
+ break;
+ case INT32:
+ case UINT32:
+ if (ressize < sizeof(int32_t))
+ return -1;
+ break;
+ case INT64:
+ case UINT64:
+ if (ressize < sizeof(int64_t))
+ return -1;
+ break;
+ default:
+ return -1;
+ }
+ return 0;
+}
+
+/* parse an int */
+int
+cmdline_parse_num(cmdline_parse_token_hdr_t *tk, const char *srcbuf, void *res,
+ unsigned ressize)
+{
+ struct cmdline_token_num_data nd;
+ enum num_parse_state_t st = START;
+ const char * buf;
+ char c;
+ uint64_t res1 = 0;
+
+ if (!tk)
+ return -1;
+
+ if (!srcbuf || !*srcbuf)
+ return -1;
+
+ buf = srcbuf;
+ c = *buf;
+
+ memcpy(&nd, &((struct cmdline_token_num *)tk)->num_data, sizeof(nd));
+
+ /* check that we have enough room in res */
+ if (res) {
+ if (check_res_size(&nd, ressize) < 0)
+ return -1;
+ }
+
+ while ( st != ERROR && c && ! cmdline_isendoftoken(c) ) {
+ debug_printf("%c %x -> ", c, c);
+ switch (st) {
+ case START:
+ if (c == '-') {
+ st = DEC_NEG;
+ }
+ else if (c == '0') {
+ st = ZERO_OK;
+ }
+ else if (c >= '1' && c <= '9') {
+ if (add_to_res(c - '0', &res1, 10) < 0)
+ st = ERROR;
+ else
+ st = DEC_POS_OK;
+ }
+ else {
+ st = ERROR;
+ }
+ break;
+
+ case ZERO_OK:
+ if (c == 'x') {
+ st = HEX;
+ }
+ else if (c == 'b') {
+ st = BIN;
+ }
+ else if (c >= '0' && c <= '7') {
+ if (add_to_res(c - '0', &res1, 10) < 0)
+ st = ERROR;
+ else
+ st = OCTAL_OK;
+ }
+ else {
+ st = ERROR;
+ }
+ break;
+
+ case DEC_NEG:
+ if (c >= '0' && c <= '9') {
+ if (add_to_res(c - '0', &res1, 10) < 0)
+ st = ERROR;
+ else
+ st = DEC_NEG_OK;
+ }
+ else {
+ st = ERROR;
+ }
+ break;
+
+ case DEC_NEG_OK:
+ if (c >= '0' && c <= '9') {
+ if (add_to_res(c - '0', &res1, 10) < 0)
+ st = ERROR;
+ }
+ else {
+ st = ERROR;
+ }
+ break;
+
+ case DEC_POS_OK:
+ if (c >= '0' && c <= '9') {
+ if (add_to_res(c - '0', &res1, 10) < 0)
+ st = ERROR;
+ }
+ else {
+ st = ERROR;
+ }
+ break;
+
+ case HEX:
+ st = HEX_OK;
+ /* no break */
+ case HEX_OK:
+ if (c >= '0' && c <= '9') {
+ if (add_to_res(c - '0', &res1, 16) < 0)
+ st = ERROR;
+ }
+ else if (c >= 'a' && c <= 'f') {
+ if (add_to_res(c - 'a' + 10, &res1, 16) < 0)
+ st = ERROR;
+ }
+ else if (c >= 'A' && c <= 'F') {
+ if (add_to_res(c - 'A' + 10, &res1, 16) < 0)
+ st = ERROR;
+ }
+ else {
+ st = ERROR;
+ }
+ break;
+
+
+ case OCTAL_OK:
+ if (c >= '0' && c <= '7') {
+ if (add_to_res(c - '0', &res1, 8) < 0)
+ st = ERROR;
+ }
+ else {
+ st = ERROR;
+ }
+ break;
+
+ case BIN:
+ st = BIN_OK;
+ /* no break */
+ case BIN_OK:
+ if (c >= '0' && c <= '1') {
+ if (add_to_res(c - '0', &res1, 2) < 0)
+ st = ERROR;
+ }
+ else {
+ st = ERROR;
+ }
+ break;
+ default:
+ debug_printf("not impl ");
+
+ }
+
+ debug_printf("(%"PRIu64")\n", res1);
+
+ buf ++;
+ c = *buf;
+
+ /* token too long */
+ if (buf-srcbuf > 127)
+ return -1;
+ }
+
+ switch (st) {
+ case ZERO_OK:
+ case DEC_POS_OK:
+ case HEX_OK:
+ case OCTAL_OK:
+ case BIN_OK:
+ if ( nd.type == INT8 && res1 <= INT8_MAX ) {
+ if (res) *(int8_t *)res = (int8_t) res1;
+ return (buf-srcbuf);
+ }
+ else if ( nd.type == INT16 && res1 <= INT16_MAX ) {
+ if (res) *(int16_t *)res = (int16_t) res1;
+ return (buf-srcbuf);
+ }
+ else if ( nd.type == INT32 && res1 <= INT32_MAX ) {
+ if (res) *(int32_t *)res = (int32_t) res1;
+ return (buf-srcbuf);
+ }
+ else if ( nd.type == INT64 && res1 <= INT64_MAX ) {
+ if (res) *(int64_t *)res = (int64_t) res1;
+ return (buf-srcbuf);
+ }
+ else if ( nd.type == UINT8 && res1 <= UINT8_MAX ) {
+ if (res) *(uint8_t *)res = (uint8_t) res1;
+ return (buf-srcbuf);
+ }
+ else if (nd.type == UINT16 && res1 <= UINT16_MAX ) {
+ if (res) *(uint16_t *)res = (uint16_t) res1;
+ return (buf-srcbuf);
+ }
+ else if ( nd.type == UINT32 && res1 <= UINT32_MAX ) {
+ if (res) *(uint32_t *)res = (uint32_t) res1;
+ return (buf-srcbuf);
+ }
+ else if ( nd.type == UINT64 ) {
+ if (res) *(uint64_t *)res = res1;
+ return (buf-srcbuf);
+ }
+ else {
+ return -1;
+ }
+ break;
+
+ case DEC_NEG_OK:
+ if ( nd.type == INT8 && res1 <= INT8_MAX + 1 ) {
+ if (res) *(int8_t *)res = (int8_t) (-res1);
+ return (buf-srcbuf);
+ }
+ else if ( nd.type == INT16 && res1 <= (uint16_t)INT16_MAX + 1 ) {
+ if (res) *(int16_t *)res = (int16_t) (-res1);
+ return (buf-srcbuf);
+ }
+ else if ( nd.type == INT32 && res1 <= (uint32_t)INT32_MAX + 1 ) {
+ if (res) *(int32_t *)res = (int32_t) (-res1);
+ return (buf-srcbuf);
+ }
+ else if ( nd.type == INT64 && res1 <= (uint64_t)INT64_MAX + 1 ) {
+ if (res) *(int64_t *)res = (int64_t) (-res1);
+ return (buf-srcbuf);
+ }
+ else {
+ return -1;
+ }
+ break;
+ default:
+ debug_printf("error\n");
+ return -1;
+ }
+}
+
+
+/* parse an int */
+int
+cmdline_get_help_num(cmdline_parse_token_hdr_t *tk, char *dstbuf, unsigned int size)
+{
+ struct cmdline_token_num_data nd;
+ int ret;
+
+ if (!tk)
+ return -1;
+
+ memcpy(&nd, &((struct cmdline_token_num *)tk)->num_data, sizeof(nd));
+
+ /* should not happen.... don't so this test */
+ /* if (nd.type >= (sizeof(num_help)/sizeof(const char *))) */
+ /* return -1; */
+
+ ret = snprintf(dstbuf, size, "%s", num_help[nd.type]);
+ if (ret < 0)
+ return -1;
+ dstbuf[size-1] = '\0';
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_parse_num.h b/src/dpdk_lib18/librte_cmdline/cmdline_parse_num.h
new file mode 100755
index 00000000..5376806f
--- /dev/null
+++ b/src/dpdk_lib18/librte_cmdline/cmdline_parse_num.h
@@ -0,0 +1,113 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the University of California, Berkeley nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _PARSE_NUM_H_
+#define _PARSE_NUM_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum cmdline_numtype {
+ UINT8 = 0,
+ UINT16,
+ UINT32,
+ UINT64,
+ INT8,
+ INT16,
+ INT32,
+ INT64
+};
+
+struct cmdline_token_num_data {
+ enum cmdline_numtype type;
+};
+
+struct cmdline_token_num {
+ struct cmdline_token_hdr hdr;
+ struct cmdline_token_num_data num_data;
+};
+typedef struct cmdline_token_num cmdline_parse_token_num_t;
+
+extern struct cmdline_token_ops cmdline_token_num_ops;
+
+int cmdline_parse_num(cmdline_parse_token_hdr_t *tk,
+ const char *srcbuf, void *res, unsigned ressize);
+int cmdline_get_help_num(cmdline_parse_token_hdr_t *tk,
+ char *dstbuf, unsigned int size);
+
+#define TOKEN_NUM_INITIALIZER(structure, field, numtype) \
+{ \
+ /* hdr */ \
+ { \
+ &cmdline_token_num_ops, /* ops */ \
+ offsetof(structure, field), /* offset */ \
+ }, \
+ /* num_data */ \
+ { \
+ numtype, /* type */ \
+ }, \
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _PARSE_NUM_H_ */
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_parse_portlist.c b/src/dpdk_lib18/librte_cmdline/cmdline_parse_portlist.c
new file mode 100755
index 00000000..834f2e6e
--- /dev/null
+++ b/src/dpdk_lib18/librte_cmdline/cmdline_parse_portlist.c
@@ -0,0 +1,173 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2010, Keith Wiles <keith.wiles@windriver.com>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the University of California, Berkeley nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <inttypes.h>
+#include <ctype.h>
+#include <string.h>
+#include <errno.h>
+#include <stdarg.h>
+
+#include <rte_string_fns.h>
+#include "cmdline_parse.h"
+#include "cmdline_parse_portlist.h"
+
+struct cmdline_token_ops cmdline_token_portlist_ops = {
+ .parse = cmdline_parse_portlist,
+ .complete_get_nb = NULL,
+ .complete_get_elt = NULL,
+ .get_help = cmdline_get_help_portlist,
+};
+
+static void
+parse_set_list(cmdline_portlist_t * pl, int low, int high)
+{
+ do {
+ pl->map |= (1 << low++);
+ } while (low <= high);
+}
+
+static int
+parse_ports(cmdline_portlist_t * pl, const char * str)
+{
+ size_t ps, pe;
+ const char *first, *last;
+ char *end;
+
+ for (first = str, last = first;
+ first != NULL && last != NULL;
+ first = last + 1) {
+
+ last = strchr(first, ',');
+
+ errno = 0;
+ ps = strtoul(first, &end, 10);
+ if (errno != 0 || end == first ||
+ (end[0] != '-' && end[0] != 0 && end != last))
+ return (-1);
+
+ /* Support for N-M portlist format */
+ if (end[0] == '-') {
+ errno = 0;
+ first = end + 1;
+ pe = strtoul(first, &end, 10);
+ if (errno != 0 || end == first ||
+ (end[0] != 0 && end != last))
+ return (-1);
+ } else {
+ pe = ps;
+ }
+
+ if (ps > pe || pe >= sizeof (pl->map) * 8)
+ return (-1);
+
+ parse_set_list(pl, ps, pe);
+ }
+
+ return (0);
+}
+
+int
+cmdline_parse_portlist(__attribute__((unused)) cmdline_parse_token_hdr_t *tk,
+ const char *buf, void *res, unsigned ressize)
+{
+ unsigned int token_len = 0;
+ char portlist_str[PORTLIST_TOKEN_SIZE+1];
+ cmdline_portlist_t *pl;
+
+ if (!buf || ! *buf)
+ return (-1);
+
+ if (res && ressize < PORTLIST_TOKEN_SIZE)
+ return -1;
+
+ pl = res;
+
+ while (!cmdline_isendoftoken(buf[token_len]) &&
+ (token_len < PORTLIST_TOKEN_SIZE))
+ token_len++;
+
+ if (token_len >= PORTLIST_TOKEN_SIZE)
+ return (-1);
+
+ snprintf(portlist_str, token_len+1, "%s", buf);
+
+ if (pl) {
+ pl->map = 0;
+ if (strcmp("all", portlist_str) == 0)
+ pl->map = UINT32_MAX;
+ else if (parse_ports(pl, portlist_str) != 0)
+ return (-1);
+ }
+
+ return token_len;
+}
+
+int
+cmdline_get_help_portlist(__attribute__((unused)) cmdline_parse_token_hdr_t *tk,
+ char *dstbuf, unsigned int size)
+{
+ int ret;
+ ret = snprintf(dstbuf, size, "range of ports as 3,4-6,8-19,20");
+ if (ret < 0)
+ return -1;
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_parse_portlist.h b/src/dpdk_lib18/librte_cmdline/cmdline_parse_portlist.h
new file mode 100755
index 00000000..85050595
--- /dev/null
+++ b/src/dpdk_lib18/librte_cmdline/cmdline_parse_portlist.h
@@ -0,0 +1,101 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2010, Keith Wiles <keith.wiles@windriver.com>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the University of California, Berkeley nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _PARSE_PORTLIST_H_
+#define _PARSE_PORTLIST_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* size of a parsed string */
+#define PORTLIST_TOKEN_SIZE 128
+#define PORTLIST_MAX_TOKENS 32
+
+typedef struct cmdline_portlist {
+ uint32_t map;
+} cmdline_portlist_t;
+
+struct cmdline_token_portlist {
+ struct cmdline_token_hdr hdr;
+};
+typedef struct cmdline_token_portlist cmdline_parse_token_portlist_t;
+
+extern struct cmdline_token_ops cmdline_token_portlist_ops;
+
+int cmdline_parse_portlist(cmdline_parse_token_hdr_t *tk,
+ const char *srcbuf, void *res, unsigned ressize);
+int cmdline_get_help_portlist(cmdline_parse_token_hdr_t *tk,
+ char *dstbuf, unsigned int size);
+
+#define TOKEN_PORTLIST_INITIALIZER(structure, field) \
+{ \
+ /* hdr */ \
+ { \
+ &cmdline_token_portlist_ops, /* ops */ \
+ offsetof(structure, field), /* offset */ \
+ }, \
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _PARSE_PORTLIST_H_ */
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_parse_string.c b/src/dpdk_lib18/librte_cmdline/cmdline_parse_string.c
new file mode 100755
index 00000000..45883b3e
--- /dev/null
+++ b/src/dpdk_lib18/librte_cmdline/cmdline_parse_string.c
@@ -0,0 +1,253 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the University of California, Berkeley nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <inttypes.h>
+#include <ctype.h>
+#include <string.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <rte_string_fns.h>
+
+#include "cmdline_parse.h"
+#include "cmdline_parse_string.h"
+
+struct cmdline_token_ops cmdline_token_string_ops = {
+ .parse = cmdline_parse_string,
+ .complete_get_nb = cmdline_complete_get_nb_string,
+ .complete_get_elt = cmdline_complete_get_elt_string,
+ .get_help = cmdline_get_help_string,
+};
+
+#define MULTISTRING_HELP "Mul-choice STRING"
+#define ANYSTRING_HELP "Any STRING"
+#define FIXEDSTRING_HELP "Fixed STRING"
+
+static unsigned int
+get_token_len(const char *s)
+{
+ char c;
+ unsigned int i=0;
+
+ c = s[i];
+ while (c!='#' && c!='\0') {
+ i++;
+ c = s[i];
+ }
+ return i;
+}
+
+static const char *
+get_next_token(const char *s)
+{
+ unsigned int i;
+ i = get_token_len(s);
+ if (s[i] == '#')
+ return s+i+1;
+ return NULL;
+}
+
+int
+cmdline_parse_string(cmdline_parse_token_hdr_t *tk, const char *buf, void *res,
+ unsigned ressize)
+{
+ struct cmdline_token_string *tk2;
+ struct cmdline_token_string_data *sd;
+ unsigned int token_len;
+ const char *str;
+
+ if (res && ressize < STR_TOKEN_SIZE)
+ return -1;
+
+ if (!tk || !buf || ! *buf)
+ return -1;
+
+ tk2 = (struct cmdline_token_string *)tk;
+
+ sd = &tk2->string_data;
+
+ /* fixed string */
+ if (sd->str) {
+ str = sd->str;
+ do {
+ token_len = get_token_len(str);
+
+ /* if token is too big... */
+ if (token_len >= STR_TOKEN_SIZE - 1) {
+ continue;
+ }
+
+ if ( strncmp(buf, str, token_len) ) {
+ continue;
+ }
+
+ if ( !cmdline_isendoftoken(*(buf+token_len)) ) {
+ continue;
+ }
+
+ break;
+ } while ( (str = get_next_token(str)) != NULL );
+
+ if (!str)
+ return -1;
+ }
+ /* unspecified string */
+ else {
+ token_len = 0;
+ while(!cmdline_isendoftoken(buf[token_len]) &&
+ token_len < (STR_TOKEN_SIZE-1))
+ token_len++;
+
+ /* return if token too long */
+ if (token_len >= STR_TOKEN_SIZE - 1) {
+ return -1;
+ }
+ }
+
+ if (res) {
+ /* we are sure that token_len is < STR_TOKEN_SIZE-1 */
+ snprintf(res, STR_TOKEN_SIZE, "%s", buf);
+ *((char *)res + token_len) = 0;
+ }
+
+
+ return token_len;
+}
+
+int cmdline_complete_get_nb_string(cmdline_parse_token_hdr_t *tk)
+{
+ struct cmdline_token_string *tk2;
+ struct cmdline_token_string_data *sd;
+ const char *str;
+ int ret = 1;
+
+ if (!tk)
+ return -1;
+
+ tk2 = (struct cmdline_token_string *)tk;
+ sd = &tk2->string_data;
+
+ if (!sd->str)
+ return 0;
+
+ str = sd->str;
+ while( (str = get_next_token(str)) != NULL ) {
+ ret++;
+ }
+ return ret;
+}
+
+int cmdline_complete_get_elt_string(cmdline_parse_token_hdr_t *tk, int idx,
+ char *dstbuf, unsigned int size)
+{
+ struct cmdline_token_string *tk2;
+ struct cmdline_token_string_data *sd;
+ const char *s;
+ unsigned int len;
+
+ if (!tk || !dstbuf || idx < 0)
+ return -1;
+
+ tk2 = (struct cmdline_token_string *)tk;
+ sd = &tk2->string_data;
+
+ s = sd->str;
+
+ while (idx-- && s)
+ s = get_next_token(s);
+
+ if (!s)
+ return -1;
+
+ len = get_token_len(s);
+ if (len > size - 1)
+ return -1;
+
+ memcpy(dstbuf, s, len);
+ dstbuf[len] = '\0';
+ return 0;
+}
+
+
+int cmdline_get_help_string(cmdline_parse_token_hdr_t *tk, char *dstbuf,
+ unsigned int size)
+{
+ struct cmdline_token_string *tk2;
+ struct cmdline_token_string_data *sd;
+ const char *s;
+
+ if (!tk || !dstbuf)
+ return -1;
+
+ tk2 = (struct cmdline_token_string *)tk;
+ sd = &tk2->string_data;
+
+ s = sd->str;
+
+ if (s) {
+ if (get_next_token(s))
+ snprintf(dstbuf, size, MULTISTRING_HELP);
+ else
+ snprintf(dstbuf, size, FIXEDSTRING_HELP);
+ } else
+ snprintf(dstbuf, size, ANYSTRING_HELP);
+
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_parse_string.h b/src/dpdk_lib18/librte_cmdline/cmdline_parse_string.h
new file mode 100755
index 00000000..c2056226
--- /dev/null
+++ b/src/dpdk_lib18/librte_cmdline/cmdline_parse_string.h
@@ -0,0 +1,110 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the University of California, Berkeley nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _PARSE_STRING_H_
+#define _PARSE_STRING_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* size of a parsed string */
+#define STR_TOKEN_SIZE 128
+
+typedef char cmdline_fixed_string_t[STR_TOKEN_SIZE];
+
+struct cmdline_token_string_data {
+ const char *str;
+};
+
+struct cmdline_token_string {
+ struct cmdline_token_hdr hdr;
+ struct cmdline_token_string_data string_data;
+};
+typedef struct cmdline_token_string cmdline_parse_token_string_t;
+
+extern struct cmdline_token_ops cmdline_token_string_ops;
+
+int cmdline_parse_string(cmdline_parse_token_hdr_t *tk, const char *srcbuf,
+ void *res, unsigned ressize);
+int cmdline_complete_get_nb_string(cmdline_parse_token_hdr_t *tk);
+int cmdline_complete_get_elt_string(cmdline_parse_token_hdr_t *tk, int idx,
+ char *dstbuf, unsigned int size);
+int cmdline_get_help_string(cmdline_parse_token_hdr_t *tk, char *dstbuf,
+ unsigned int size);
+
+#define TOKEN_STRING_INITIALIZER(structure, field, string) \
+{ \
+ /* hdr */ \
+ { \
+ &cmdline_token_string_ops, /* ops */ \
+ offsetof(structure, field), /* offset */ \
+ }, \
+ /* string_data */ \
+ { \
+ string, /* str */ \
+ }, \
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _PARSE_STRING_H_ */
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_rdline.c b/src/dpdk_lib18/librte_cmdline/cmdline_rdline.c
new file mode 100755
index 00000000..f79ebe31
--- /dev/null
+++ b/src/dpdk_lib18/librte_cmdline/cmdline_rdline.c
@@ -0,0 +1,698 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the University of California, Berkeley nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <ctype.h>
+
+#include "cmdline_cirbuf.h"
+#include "cmdline_rdline.h"
+
+static void rdline_puts(struct rdline *rdl, const char *buf);
+static void rdline_miniprintf(struct rdline *rdl,
+ const char *buf, unsigned int val);
+
+static void rdline_remove_old_history_item(struct rdline *rdl);
+static void rdline_remove_first_history_item(struct rdline *rdl);
+static unsigned int rdline_get_history_size(struct rdline *rdl);
+
+
+/* isblank() needs _XOPEN_SOURCE >= 600 || _ISOC99_SOURCE, so use our
+ * own. */
+static int
+isblank2(char c)
+{
+ if (c == ' ' ||
+ c == '\t' )
+ return 1;
+ return 0;
+}
+
+int
+rdline_init(struct rdline *rdl,
+ rdline_write_char_t *write_char,
+ rdline_validate_t *validate,
+ rdline_complete_t *complete)
+{
+ if (!rdl || !write_char || !validate || !complete)
+ return -EINVAL;
+ memset(rdl, 0, sizeof(*rdl));
+ rdl->validate = validate;
+ rdl->complete = complete;
+ rdl->write_char = write_char;
+ rdl->status = RDLINE_INIT;
+ return cirbuf_init(&rdl->history, rdl->history_buf, 0, RDLINE_HISTORY_BUF_SIZE);
+}
+
+void
+rdline_newline(struct rdline *rdl, const char *prompt)
+{
+ unsigned int i;
+
+ if (!rdl || !prompt)
+ return;
+
+ vt100_init(&rdl->vt100);
+ cirbuf_init(&rdl->left, rdl->left_buf, 0, RDLINE_BUF_SIZE);
+ cirbuf_init(&rdl->right, rdl->right_buf, 0, RDLINE_BUF_SIZE);
+
+ rdl->prompt_size = strnlen(prompt, RDLINE_PROMPT_SIZE-1);
+ if (prompt != rdl->prompt)
+ memcpy(rdl->prompt, prompt, rdl->prompt_size);
+ rdl->prompt[RDLINE_PROMPT_SIZE-1] = '\0';
+
+ for (i=0 ; i<rdl->prompt_size ; i++)
+ rdl->write_char(rdl, rdl->prompt[i]);
+ rdl->status = RDLINE_RUNNING;
+
+ rdl->history_cur_line = -1;
+}
+
+void
+rdline_stop(struct rdline *rdl)
+{
+ if (!rdl)
+ return;
+ rdl->status = RDLINE_INIT;
+}
+
+void
+rdline_quit(struct rdline *rdl)
+{
+ if (!rdl)
+ return;
+ rdl->status = RDLINE_EXITED;
+}
+
+void
+rdline_restart(struct rdline *rdl)
+{
+ if (!rdl)
+ return;
+ rdl->status = RDLINE_RUNNING;
+}
+
+void
+rdline_reset(struct rdline *rdl)
+{
+ if (!rdl)
+ return;
+ vt100_init(&rdl->vt100);
+ cirbuf_init(&rdl->left, rdl->left_buf, 0, RDLINE_BUF_SIZE);
+ cirbuf_init(&rdl->right, rdl->right_buf, 0, RDLINE_BUF_SIZE);
+
+ rdl->status = RDLINE_RUNNING;
+
+ rdl->history_cur_line = -1;
+}
+
+const char *
+rdline_get_buffer(struct rdline *rdl)
+{
+ if (!rdl)
+ return NULL;
+ unsigned int len_l, len_r;
+ cirbuf_align_left(&rdl->left);
+ cirbuf_align_left(&rdl->right);
+
+ len_l = CIRBUF_GET_LEN(&rdl->left);
+ len_r = CIRBUF_GET_LEN(&rdl->right);
+ memcpy(rdl->left_buf+len_l, rdl->right_buf, len_r);
+
+ rdl->left_buf[len_l + len_r] = '\n';
+ rdl->left_buf[len_l + len_r + 1] = '\0';
+ return rdl->left_buf;
+}
+
+static void
+display_right_buffer(struct rdline *rdl, int force)
+{
+ unsigned int i;
+ char tmp;
+
+ if (!force && CIRBUF_IS_EMPTY(&rdl->right))
+ return;
+
+ rdline_puts(rdl, vt100_clear_right);
+ CIRBUF_FOREACH(&rdl->right, i, tmp) {
+ rdl->write_char(rdl, tmp);
+ }
+ if (!CIRBUF_IS_EMPTY(&rdl->right))
+ rdline_miniprintf(rdl, vt100_multi_left,
+ CIRBUF_GET_LEN(&rdl->right));
+}
+
+void
+rdline_redisplay(struct rdline *rdl)
+{
+ unsigned int i;
+ char tmp;
+
+ if (!rdl)
+ return;
+
+ rdline_puts(rdl, vt100_home);
+ for (i=0 ; i<rdl->prompt_size ; i++)
+ rdl->write_char(rdl, rdl->prompt[i]);
+ CIRBUF_FOREACH(&rdl->left, i, tmp) {
+ rdl->write_char(rdl, tmp);
+ }
+ display_right_buffer(rdl, 1);
+}
+
+int
+rdline_char_in(struct rdline *rdl, char c)
+{
+ unsigned int i;
+ int cmd;
+ char tmp;
+ char *buf;
+
+ if (!rdl)
+ return -EINVAL;
+
+ if (rdl->status == RDLINE_EXITED)
+ return RDLINE_RES_EXITED;
+ if (rdl->status != RDLINE_RUNNING)
+ return RDLINE_RES_NOT_RUNNING;
+
+ cmd = vt100_parser(&rdl->vt100, c);
+ if (cmd == -2)
+ return RDLINE_RES_SUCCESS;
+
+ if (cmd >= 0) {
+ switch (cmd) {
+ /* move caret 1 char to the left */
+ case CMDLINE_KEY_CTRL_B:
+ case CMDLINE_KEY_LEFT_ARR:
+ if (CIRBUF_IS_EMPTY(&rdl->left))
+ break;
+ tmp = cirbuf_get_tail(&rdl->left);
+ cirbuf_del_tail(&rdl->left);
+ cirbuf_add_head(&rdl->right, tmp);
+ rdline_puts(rdl, vt100_left_arr);
+ break;
+
+ /* move caret 1 char to the right */
+ case CMDLINE_KEY_CTRL_F:
+ case CMDLINE_KEY_RIGHT_ARR:
+ if (CIRBUF_IS_EMPTY(&rdl->right))
+ break;
+ tmp = cirbuf_get_head(&rdl->right);
+ cirbuf_del_head(&rdl->right);
+ cirbuf_add_tail(&rdl->left, tmp);
+ rdline_puts(rdl, vt100_right_arr);
+ break;
+
+ /* move caret 1 word to the left */
+ /* keyboard equivalent: Alt+B */
+ case CMDLINE_KEY_WLEFT:
+ while (! CIRBUF_IS_EMPTY(&rdl->left) &&
+ (tmp = cirbuf_get_tail(&rdl->left)) &&
+ isblank2(tmp)) {
+ rdline_puts(rdl, vt100_left_arr);
+ cirbuf_del_tail(&rdl->left);
+ cirbuf_add_head(&rdl->right, tmp);
+ }
+ while (! CIRBUF_IS_EMPTY(&rdl->left) &&
+ (tmp = cirbuf_get_tail(&rdl->left)) &&
+ !isblank2(tmp)) {
+ rdline_puts(rdl, vt100_left_arr);
+ cirbuf_del_tail(&rdl->left);
+ cirbuf_add_head(&rdl->right, tmp);
+ }
+ break;
+
+ /* move caret 1 word to the right */
+ /* keyboard equivalent: Alt+F */
+ case CMDLINE_KEY_WRIGHT:
+ while (! CIRBUF_IS_EMPTY(&rdl->right) &&
+ (tmp = cirbuf_get_head(&rdl->right)) &&
+ isblank2(tmp)) {
+ rdline_puts(rdl, vt100_right_arr);
+ cirbuf_del_head(&rdl->right);
+ cirbuf_add_tail(&rdl->left, tmp);
+ }
+ while (! CIRBUF_IS_EMPTY(&rdl->right) &&
+ (tmp = cirbuf_get_head(&rdl->right)) &&
+ !isblank2(tmp)) {
+ rdline_puts(rdl, vt100_right_arr);
+ cirbuf_del_head(&rdl->right);
+ cirbuf_add_tail(&rdl->left, tmp);
+ }
+ break;
+
+ /* move caret to the left */
+ case CMDLINE_KEY_CTRL_A:
+ if (CIRBUF_IS_EMPTY(&rdl->left))
+ break;
+ rdline_miniprintf(rdl, vt100_multi_left,
+ CIRBUF_GET_LEN(&rdl->left));
+ while (! CIRBUF_IS_EMPTY(&rdl->left)) {
+ tmp = cirbuf_get_tail(&rdl->left);
+ cirbuf_del_tail(&rdl->left);
+ cirbuf_add_head(&rdl->right, tmp);
+ }
+ break;
+
+ /* move caret to the right */
+ case CMDLINE_KEY_CTRL_E:
+ if (CIRBUF_IS_EMPTY(&rdl->right))
+ break;
+ rdline_miniprintf(rdl, vt100_multi_right,
+ CIRBUF_GET_LEN(&rdl->right));
+ while (! CIRBUF_IS_EMPTY(&rdl->right)) {
+ tmp = cirbuf_get_head(&rdl->right);
+ cirbuf_del_head(&rdl->right);
+ cirbuf_add_tail(&rdl->left, tmp);
+ }
+ break;
+
+ /* delete 1 char from the left */
+ case CMDLINE_KEY_BKSPACE:
+ if(!cirbuf_del_tail_safe(&rdl->left)) {
+ rdline_puts(rdl, vt100_bs);
+ display_right_buffer(rdl, 1);
+ }
+ break;
+
+ /* delete 1 char from the right */
+ case CMDLINE_KEY_SUPPR:
+ case CMDLINE_KEY_CTRL_D:
+ if (cmd == CMDLINE_KEY_CTRL_D &&
+ CIRBUF_IS_EMPTY(&rdl->left) &&
+ CIRBUF_IS_EMPTY(&rdl->right)) {
+ return RDLINE_RES_EOF;
+ }
+ if (!cirbuf_del_head_safe(&rdl->right)) {
+ display_right_buffer(rdl, 1);
+ }
+ break;
+
+ /* delete 1 word from the left */
+ case CMDLINE_KEY_META_BKSPACE:
+ case CMDLINE_KEY_CTRL_W:
+ while (! CIRBUF_IS_EMPTY(&rdl->left) && isblank2(cirbuf_get_tail(&rdl->left))) {
+ rdline_puts(rdl, vt100_bs);
+ cirbuf_del_tail(&rdl->left);
+ }
+ while (! CIRBUF_IS_EMPTY(&rdl->left) && !isblank2(cirbuf_get_tail(&rdl->left))) {
+ rdline_puts(rdl, vt100_bs);
+ cirbuf_del_tail(&rdl->left);
+ }
+ display_right_buffer(rdl, 1);
+ break;
+
+ /* delete 1 word from the right */
+ case CMDLINE_KEY_META_D:
+ while (! CIRBUF_IS_EMPTY(&rdl->right) && isblank2(cirbuf_get_head(&rdl->right)))
+ cirbuf_del_head(&rdl->right);
+ while (! CIRBUF_IS_EMPTY(&rdl->right) && !isblank2(cirbuf_get_head(&rdl->right)))
+ cirbuf_del_head(&rdl->right);
+ display_right_buffer(rdl, 1);
+ break;
+
+ /* set kill buffer to contents on the right side of caret */
+ case CMDLINE_KEY_CTRL_K:
+ cirbuf_get_buf_head(&rdl->right, rdl->kill_buf, RDLINE_BUF_SIZE);
+ rdl->kill_size = CIRBUF_GET_LEN(&rdl->right);
+ cirbuf_del_buf_head(&rdl->right, rdl->kill_size);
+ rdline_puts(rdl, vt100_clear_right);
+ break;
+
+ /* paste contents of kill buffer to the left side of caret */
+ case CMDLINE_KEY_CTRL_Y:
+ i=0;
+ while(CIRBUF_GET_LEN(&rdl->right) + CIRBUF_GET_LEN(&rdl->left) <
+ RDLINE_BUF_SIZE &&
+ i < rdl->kill_size) {
+ cirbuf_add_tail(&rdl->left, rdl->kill_buf[i]);
+ rdl->write_char(rdl, rdl->kill_buf[i]);
+ i++;
+ }
+ display_right_buffer(rdl, 0);
+ break;
+
+ /* clear and newline */
+ case CMDLINE_KEY_CTRL_C:
+ rdline_puts(rdl, "\r\n");
+ rdline_newline(rdl, rdl->prompt);
+ break;
+
+ /* redisplay (helps when prompt is lost in other output) */
+ case CMDLINE_KEY_CTRL_L:
+ rdline_redisplay(rdl);
+ break;
+
+ /* autocomplete */
+ case CMDLINE_KEY_TAB:
+ case CMDLINE_KEY_HELP:
+ cirbuf_align_left(&rdl->left);
+ rdl->left_buf[CIRBUF_GET_LEN(&rdl->left)] = '\0';
+ if (rdl->complete) {
+ char tmp_buf[BUFSIZ];
+ int complete_state;
+ int ret;
+ unsigned int tmp_size;
+
+ if (cmd == CMDLINE_KEY_TAB)
+ complete_state = 0;
+ else
+ complete_state = -1;
+
+ /* see in parse.h for help on complete() */
+ ret = rdl->complete(rdl, rdl->left_buf,
+ tmp_buf, sizeof(tmp_buf),
+ &complete_state);
+ /* no completion or error */
+ if (ret <= 0) {
+ return RDLINE_RES_COMPLETE;
+ }
+
+ tmp_size = strnlen(tmp_buf, sizeof(tmp_buf));
+ /* add chars */
+ if (ret == RDLINE_RES_COMPLETE) {
+ i=0;
+ while(CIRBUF_GET_LEN(&rdl->right) + CIRBUF_GET_LEN(&rdl->left) <
+ RDLINE_BUF_SIZE &&
+ i < tmp_size) {
+ cirbuf_add_tail(&rdl->left, tmp_buf[i]);
+ rdl->write_char(rdl, tmp_buf[i]);
+ i++;
+ }
+ display_right_buffer(rdl, 1);
+ return RDLINE_RES_COMPLETE; /* ?? */
+ }
+
+ /* choice */
+ rdline_puts(rdl, "\r\n");
+ while (ret) {
+ rdl->write_char(rdl, ' ');
+ for (i=0 ; tmp_buf[i] ; i++)
+ rdl->write_char(rdl, tmp_buf[i]);
+ rdline_puts(rdl, "\r\n");
+ ret = rdl->complete(rdl, rdl->left_buf,
+ tmp_buf, sizeof(tmp_buf),
+ &complete_state);
+ }
+
+ rdline_redisplay(rdl);
+ }
+ return RDLINE_RES_COMPLETE;
+
+ /* complete buffer */
+ case CMDLINE_KEY_RETURN:
+ case CMDLINE_KEY_RETURN2:
+ rdline_get_buffer(rdl);
+ rdl->status = RDLINE_INIT;
+ rdline_puts(rdl, "\r\n");
+ if (rdl->history_cur_line != -1)
+ rdline_remove_first_history_item(rdl);
+
+ if (rdl->validate)
+ rdl->validate(rdl, rdl->left_buf, CIRBUF_GET_LEN(&rdl->left)+2);
+ /* user may have stopped rdline */
+ if (rdl->status == RDLINE_EXITED)
+ return RDLINE_RES_EXITED;
+ return RDLINE_RES_VALIDATED;
+
+ /* previous element in history */
+ case CMDLINE_KEY_UP_ARR:
+ case CMDLINE_KEY_CTRL_P:
+ if (rdl->history_cur_line == 0) {
+ rdline_remove_first_history_item(rdl);
+ }
+ if (rdl->history_cur_line <= 0) {
+ rdline_add_history(rdl, rdline_get_buffer(rdl));
+ rdl->history_cur_line = 0;
+ }
+
+ buf = rdline_get_history_item(rdl, rdl->history_cur_line + 1);
+ if (!buf)
+ break;
+
+ rdl->history_cur_line ++;
+ vt100_init(&rdl->vt100);
+ cirbuf_init(&rdl->left, rdl->left_buf, 0, RDLINE_BUF_SIZE);
+ cirbuf_init(&rdl->right, rdl->right_buf, 0, RDLINE_BUF_SIZE);
+ cirbuf_add_buf_tail(&rdl->left, buf, strnlen(buf, RDLINE_BUF_SIZE));
+ rdline_redisplay(rdl);
+ break;
+
+ /* next element in history */
+ case CMDLINE_KEY_DOWN_ARR:
+ case CMDLINE_KEY_CTRL_N:
+ if (rdl->history_cur_line - 1 < 0)
+ break;
+
+ rdl->history_cur_line --;
+ buf = rdline_get_history_item(rdl, rdl->history_cur_line);
+ if (!buf)
+ break;
+ vt100_init(&rdl->vt100);
+ cirbuf_init(&rdl->left, rdl->left_buf, 0, RDLINE_BUF_SIZE);
+ cirbuf_init(&rdl->right, rdl->right_buf, 0, RDLINE_BUF_SIZE);
+ cirbuf_add_buf_tail(&rdl->left, buf, strnlen(buf, RDLINE_BUF_SIZE));
+ rdline_redisplay(rdl);
+
+ break;
+
+
+ default:
+ break;
+ }
+
+ return RDLINE_RES_SUCCESS;
+ }
+
+ if (!isprint((int)c))
+ return RDLINE_RES_SUCCESS;
+
+ /* standard chars */
+ if (CIRBUF_GET_LEN(&rdl->left) + CIRBUF_GET_LEN(&rdl->right) >= RDLINE_BUF_SIZE)
+ return RDLINE_RES_SUCCESS;
+
+ if (cirbuf_add_tail_safe(&rdl->left, c))
+ return RDLINE_RES_SUCCESS;
+
+ rdl->write_char(rdl, c);
+ display_right_buffer(rdl, 0);
+
+ return RDLINE_RES_SUCCESS;
+}
+
+
+/* HISTORY */
+
+static void
+rdline_remove_old_history_item(struct rdline * rdl)
+{
+ char tmp;
+
+ while (! CIRBUF_IS_EMPTY(&rdl->history) ) {
+ tmp = cirbuf_get_head(&rdl->history);
+ cirbuf_del_head(&rdl->history);
+ if (!tmp)
+ break;
+ }
+}
+
+static void
+rdline_remove_first_history_item(struct rdline * rdl)
+{
+ char tmp;
+
+ if ( CIRBUF_IS_EMPTY(&rdl->history) ) {
+ return;
+ }
+ else {
+ cirbuf_del_tail(&rdl->history);
+ }
+
+ while (! CIRBUF_IS_EMPTY(&rdl->history) ) {
+ tmp = cirbuf_get_tail(&rdl->history);
+ if (!tmp)
+ break;
+ cirbuf_del_tail(&rdl->history);
+ }
+}
+
+static unsigned int
+rdline_get_history_size(struct rdline * rdl)
+{
+ unsigned int i, tmp, ret=0;
+
+ CIRBUF_FOREACH(&rdl->history, i, tmp) {
+ if (tmp == 0)
+ ret ++;
+ }
+
+ return ret;
+}
+
+char *
+rdline_get_history_item(struct rdline * rdl, unsigned int idx)
+{
+ unsigned int len, i, tmp;
+
+ if (!rdl)
+ return NULL;
+
+ len = rdline_get_history_size(rdl);
+ if ( idx >= len ) {
+ return NULL;
+ }
+
+ cirbuf_align_left(&rdl->history);
+
+ CIRBUF_FOREACH(&rdl->history, i, tmp) {
+ if ( idx == len - 1) {
+ return rdl->history_buf + i;
+ }
+ if (tmp == 0)
+ len --;
+ }
+
+ return NULL;
+}
+
+int
+rdline_add_history(struct rdline * rdl, const char * buf)
+{
+ unsigned int len, i;
+
+ if (!rdl || !buf)
+ return -EINVAL;
+
+ len = strnlen(buf, RDLINE_BUF_SIZE);
+ for (i=0; i<len ; i++) {
+ if (buf[i] == '\n') {
+ len = i;
+ break;
+ }
+ }
+
+ if ( len >= RDLINE_HISTORY_BUF_SIZE )
+ return -1;
+
+ while ( len >= CIRBUF_GET_FREELEN(&rdl->history) ) {
+ rdline_remove_old_history_item(rdl);
+ }
+
+ cirbuf_add_buf_tail(&rdl->history, buf, len);
+ cirbuf_add_tail(&rdl->history, 0);
+
+ return 0;
+}
+
+void
+rdline_clear_history(struct rdline * rdl)
+{
+ if (!rdl)
+ return;
+ cirbuf_init(&rdl->history, rdl->history_buf, 0, RDLINE_HISTORY_BUF_SIZE);
+}
+
+
+/* STATIC USEFUL FUNCS */
+
+static void
+rdline_puts(struct rdline * rdl, const char * buf)
+{
+ char c;
+ while ( (c = *(buf++)) != '\0' ) {
+ rdl->write_char(rdl, c);
+ }
+}
+
+/* a very very basic printf with one arg and one format 'u' */
+static void
+rdline_miniprintf(struct rdline *rdl, const char * buf, unsigned int val)
+{
+ char c, started=0, div=100;
+
+ while ( (c=*(buf++)) ) {
+ if (c != '%') {
+ rdl->write_char(rdl, c);
+ continue;
+ }
+ c = *(buf++);
+ if (c != 'u') {
+ rdl->write_char(rdl, '%');
+ rdl->write_char(rdl, c);
+ continue;
+ }
+ /* val is never more than 255 */
+ while (div) {
+ c = (char)(val / div);
+ if (c || started) {
+ rdl->write_char(rdl, (char)(c+'0'));
+ started = 1;
+ }
+ val %= div;
+ div /= 10;
+ }
+ }
+}
+
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_rdline.h b/src/dpdk_lib18/librte_cmdline/cmdline_rdline.h
new file mode 100755
index 00000000..ae6e24e8
--- /dev/null
+++ b/src/dpdk_lib18/librte_cmdline/cmdline_rdline.h
@@ -0,0 +1,254 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the University of California, Berkeley nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDLINE_H_
+#define _RDLINE_H_
+
+/**
+ * This file is a small equivalent to the GNU readline library, but it
+ * was originally designed for small systems, like Atmel AVR
+ * microcontrollers (8 bits). Indeed, we don't use any malloc that is
+ * sometimes not implemented (or just not recommended) on such
+ * systems.
+ *
+ * Obviously, it does not support as many things as the GNU readline,
+ * but at least it supports some interesting features like a kill
+ * buffer and a command history.
+ *
+ * It also have a feature that does not have the GNU readline (as far
+ * as I know): we can have several instances of it running at the same
+ * time, even on a monothread program, since it works with callbacks.
+ *
+ * The lib is designed for a client-side or a server-side use:
+ * - server-side: the server receives all data from a socket, including
+ * control chars, like arrows, tabulations, ... The client is
+ * very simple, it can be a telnet or a minicom through a serial line.
+ * - client-side: the client receives its data through its stdin for
+ * instance.
+ */
+
+#include <cmdline_cirbuf.h>
+#include <cmdline_vt100.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* configuration */
+#define RDLINE_BUF_SIZE 256
+#define RDLINE_PROMPT_SIZE 32
+#define RDLINE_VT100_BUF_SIZE 8
+#define RDLINE_HISTORY_BUF_SIZE BUFSIZ
+#define RDLINE_HISTORY_MAX_LINE 64
+
+enum rdline_status {
+ RDLINE_INIT,
+ RDLINE_RUNNING,
+ RDLINE_EXITED
+};
+
+struct rdline;
+
+typedef int (rdline_write_char_t)(struct rdline *rdl, char);
+typedef void (rdline_validate_t)(struct rdline *rdl,
+ const char *buf, unsigned int size);
+typedef int (rdline_complete_t)(struct rdline *rdl, const char *buf,
+ char *dstbuf, unsigned int dstsize,
+ int *state);
+
+struct rdline {
+ enum rdline_status status;
+ /* rdline bufs */
+ struct cirbuf left;
+ struct cirbuf right;
+ char left_buf[RDLINE_BUF_SIZE+2]; /* reserve 2 chars for the \n\0 */
+ char right_buf[RDLINE_BUF_SIZE];
+
+ char prompt[RDLINE_PROMPT_SIZE];
+ unsigned int prompt_size;
+
+ char kill_buf[RDLINE_BUF_SIZE];
+ unsigned int kill_size;
+
+ /* history */
+ struct cirbuf history;
+ char history_buf[RDLINE_HISTORY_BUF_SIZE];
+ int history_cur_line;
+
+ /* callbacks and func pointers */
+ rdline_write_char_t *write_char;
+ rdline_validate_t *validate;
+ rdline_complete_t *complete;
+
+ /* vt100 parser */
+ struct cmdline_vt100 vt100;
+
+ /* opaque pointer */
+ void *opaque;
+};
+
+/**
+ * Init fields for a struct rdline. Call this only once at the beginning
+ * of your program.
+ * \param rdl A pointer to an uninitialized struct rdline
+ * \param write_char The function used by the function to write a character
+ * \param validate A pointer to the function to execute when the
+ * user validates the buffer.
+ * \param complete A pointer to the function to execute when the
+ * user completes the buffer.
+ */
+int rdline_init(struct rdline *rdl,
+ rdline_write_char_t *write_char,
+ rdline_validate_t *validate,
+ rdline_complete_t *complete);
+
+
+/**
+ * Init the current buffer, and display a prompt.
+ * \param rdl A pointer to a struct rdline
+ * \param prompt A string containing the prompt
+ */
+void rdline_newline(struct rdline *rdl, const char *prompt);
+
+/**
+ * Call it and all received chars will be ignored.
+ * \param rdl A pointer to a struct rdline
+ */
+void rdline_stop(struct rdline *rdl);
+
+/**
+ * Same than rdline_stop() except that next calls to rdline_char_in()
+ * will return RDLINE_RES_EXITED.
+ * \param rdl A pointer to a struct rdline
+ */
+void rdline_quit(struct rdline *rdl);
+
+/**
+ * Restart after a call to rdline_stop() or rdline_quit()
+ * \param rdl A pointer to a struct rdline
+ */
+void rdline_restart(struct rdline *rdl);
+
+/**
+ * Redisplay the current buffer
+ * \param rdl A pointer to a struct rdline
+ */
+void rdline_redisplay(struct rdline *rdl);
+
+/**
+ * Reset the current buffer and setup for a new line.
+ * \param rdl A pointer to a struct rdline
+ */
+void rdline_reset(struct rdline *rdl);
+
+
+/* return status for rdline_char_in() */
+#define RDLINE_RES_SUCCESS 0
+#define RDLINE_RES_VALIDATED 1
+#define RDLINE_RES_COMPLETE 2
+#define RDLINE_RES_NOT_RUNNING -1
+#define RDLINE_RES_EOF -2
+#define RDLINE_RES_EXITED -3
+
+/**
+ * append a char to the readline buffer.
+ * Return RDLINE_RES_VALIDATE when the line has been validated.
+ * Return RDLINE_RES_COMPLETE when the user asked to complete the buffer.
+ * Return RDLINE_RES_NOT_RUNNING if it is not running.
+ * Return RDLINE_RES_EOF if EOF (ctrl-d on an empty line).
+ * Else return RDLINE_RES_SUCCESS.
+ * XXX error case when the buffer is full ?
+ *
+ * \param rdl A pointer to a struct rdline
+ * \param c The character to append
+ */
+int rdline_char_in(struct rdline *rdl, char c);
+
+/**
+ * Return the current buffer, terminated by '\0'.
+ * \param rdl A pointer to a struct rdline
+ */
+const char *rdline_get_buffer(struct rdline *rdl);
+
+
+/**
+ * Add the buffer to history.
+ * return < 0 on error.
+ * \param rdl A pointer to a struct rdline
+ * \param buf A buffer that is terminated by '\0'
+ */
+int rdline_add_history(struct rdline *rdl, const char *buf);
+
+/**
+ * Clear current history
+ * \param rdl A pointer to a struct rdline
+ */
+void rdline_clear_history(struct rdline *rdl);
+
+/**
+ * Get the i-th history item
+ */
+char *rdline_get_history_item(struct rdline *rdl, unsigned int i);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RDLINE_H_ */
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_socket.c b/src/dpdk_lib18/librte_cmdline/cmdline_socket.c
new file mode 100755
index 00000000..6820b6df
--- /dev/null
+++ b/src/dpdk_lib18/librte_cmdline/cmdline_socket.c
@@ -0,0 +1,119 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the University of California, Berkeley nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <fcntl.h>
+#include <termios.h>
+
+#include "cmdline_parse.h"
+#include "cmdline_rdline.h"
+#include "cmdline_socket.h"
+#include "cmdline.h"
+
+struct cmdline *
+cmdline_file_new(cmdline_parse_ctx_t *ctx, const char *prompt, const char *path)
+{
+ int fd;
+
+ /* everything else is checked in cmdline_new() */
+ if (!path)
+ return NULL;
+
+ fd = open(path, O_RDONLY, 0);
+ if (fd < 0) {
+ dprintf("open() failed\n");
+ return NULL;
+ }
+ return (cmdline_new(ctx, prompt, fd, -1));
+}
+
+struct cmdline *
+cmdline_stdin_new(cmdline_parse_ctx_t *ctx, const char *prompt)
+{
+ struct cmdline *cl;
+ struct termios oldterm, term;
+
+ tcgetattr(0, &oldterm);
+ memcpy(&term, &oldterm, sizeof(term));
+ term.c_lflag &= ~(ICANON | ECHO | ISIG);
+ tcsetattr(0, TCSANOW, &term);
+ setbuf(stdin, NULL);
+
+ cl = cmdline_new(ctx, prompt, 0, 1);
+
+ if (cl)
+ memcpy(&cl->oldterm, &oldterm, sizeof(term));
+
+ return cl;
+}
+
+void
+cmdline_stdin_exit(struct cmdline *cl)
+{
+ if (!cl)
+ return;
+
+ tcsetattr(fileno(stdin), TCSANOW, &cl->oldterm);
+}
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_socket.h b/src/dpdk_lib18/librte_cmdline/cmdline_socket.h
new file mode 100755
index 00000000..8cc2dfbc
--- /dev/null
+++ b/src/dpdk_lib18/librte_cmdline/cmdline_socket.h
@@ -0,0 +1,76 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the University of California, Berkeley nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CMDLINE_SOCKET_H_
+#define _CMDLINE_SOCKET_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct cmdline *cmdline_file_new(cmdline_parse_ctx_t *ctx, const char *prompt, const char *path);
+struct cmdline *cmdline_stdin_new(cmdline_parse_ctx_t *ctx, const char *prompt);
+void cmdline_stdin_exit(struct cmdline *cl);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _CMDLINE_SOCKET_H_ */
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_vt100.c b/src/dpdk_lib18/librte_cmdline/cmdline_vt100.c
new file mode 100755
index 00000000..a253e8b6
--- /dev/null
+++ b/src/dpdk_lib18/librte_cmdline/cmdline_vt100.c
@@ -0,0 +1,185 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the University of California, Berkeley nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <termios.h>
+
+#include "cmdline_vt100.h"
+
+const char *cmdline_vt100_commands[] = {
+ vt100_up_arr,
+ vt100_down_arr,
+ vt100_right_arr,
+ vt100_left_arr,
+ "\177",
+ "\n",
+ "\001",
+ "\005",
+ "\013",
+ "\031",
+ "\003",
+ "\006",
+ "\002",
+ vt100_suppr,
+ vt100_tab,
+ "\004",
+ "\014",
+ "\r",
+ "\033\177",
+ vt100_word_left,
+ vt100_word_right,
+ "?",
+ "\027",
+ "\020",
+ "\016",
+ "\033\144",
+};
+
+void
+vt100_init(struct cmdline_vt100 *vt)
+{
+ if (!vt)
+ return;
+ vt->state = CMDLINE_VT100_INIT;
+}
+
+
+static int
+match_command(char *buf, unsigned int size)
+{
+ const char *cmd;
+ size_t cmdlen;
+ unsigned int i = 0;
+
+ for (i=0 ; i<sizeof(cmdline_vt100_commands)/sizeof(const char *) ; i++) {
+ cmd = *(cmdline_vt100_commands + i);
+
+ cmdlen = strnlen(cmd, CMDLINE_VT100_BUF_SIZE);
+ if (size == cmdlen &&
+ !strncmp(buf, cmd, cmdlen)) {
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+int
+vt100_parser(struct cmdline_vt100 *vt, char ch)
+{
+ unsigned int size;
+ uint8_t c = (uint8_t) ch;
+
+ if (!vt)
+ return -1;
+
+ if (vt->bufpos >= CMDLINE_VT100_BUF_SIZE) {
+ vt->state = CMDLINE_VT100_INIT;
+ vt->bufpos = 0;
+ }
+
+ vt->buf[vt->bufpos++] = c;
+ size = vt->bufpos;
+
+ switch (vt->state) {
+ case CMDLINE_VT100_INIT:
+ if (c == 033) {
+ vt->state = CMDLINE_VT100_ESCAPE;
+ }
+ else {
+ vt->bufpos = 0;
+ goto match_command;
+ }
+ break;
+
+ case CMDLINE_VT100_ESCAPE:
+ if (c == 0133) {
+ vt->state = CMDLINE_VT100_ESCAPE_CSI;
+ }
+ else if (c >= 060 && c <= 0177) { /* XXX 0177 ? */
+ vt->bufpos = 0;
+ vt->state = CMDLINE_VT100_INIT;
+ goto match_command;
+ }
+ break;
+
+ case CMDLINE_VT100_ESCAPE_CSI:
+ if (c >= 0100 && c <= 0176) {
+ vt->bufpos = 0;
+ vt->state = CMDLINE_VT100_INIT;
+ goto match_command;
+ }
+ break;
+
+ default:
+ vt->bufpos = 0;
+ break;
+ }
+
+ return -2;
+
+ match_command:
+ return match_command(vt->buf, size);
+}
diff --git a/src/dpdk_lib18/librte_cmdline/cmdline_vt100.h b/src/dpdk_lib18/librte_cmdline/cmdline_vt100.h
new file mode 100755
index 00000000..b9840f6c
--- /dev/null
+++ b/src/dpdk_lib18/librte_cmdline/cmdline_vt100.h
@@ -0,0 +1,151 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the University of California, Berkeley nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CMDLINE_VT100_H_
+#define _CMDLINE_VT100_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define vt100_bell "\007"
+#define vt100_bs "\010"
+#define vt100_bs_clear "\010 \010"
+#define vt100_tab "\011"
+#define vt100_crnl "\012\015"
+#define vt100_clear_right "\033[0K"
+#define vt100_clear_left "\033[1K"
+#define vt100_clear_down "\033[0J"
+#define vt100_clear_up "\033[1J"
+#define vt100_clear_line "\033[2K"
+#define vt100_clear_screen "\033[2J"
+#define vt100_up_arr "\033\133\101"
+#define vt100_down_arr "\033\133\102"
+#define vt100_right_arr "\033\133\103"
+#define vt100_left_arr "\033\133\104"
+#define vt100_multi_right "\033\133%uC"
+#define vt100_multi_left "\033\133%uD"
+#define vt100_suppr "\033\133\063\176"
+#define vt100_home "\033M\033E"
+#define vt100_word_left "\033\142"
+#define vt100_word_right "\033\146"
+
+/* Result of parsing : it must be synchronized with
+ * cmdline_vt100_commands[] in vt100.c */
+#define CMDLINE_KEY_UP_ARR 0
+#define CMDLINE_KEY_DOWN_ARR 1
+#define CMDLINE_KEY_RIGHT_ARR 2
+#define CMDLINE_KEY_LEFT_ARR 3
+#define CMDLINE_KEY_BKSPACE 4
+#define CMDLINE_KEY_RETURN 5
+#define CMDLINE_KEY_CTRL_A 6
+#define CMDLINE_KEY_CTRL_E 7
+#define CMDLINE_KEY_CTRL_K 8
+#define CMDLINE_KEY_CTRL_Y 9
+#define CMDLINE_KEY_CTRL_C 10
+#define CMDLINE_KEY_CTRL_F 11
+#define CMDLINE_KEY_CTRL_B 12
+#define CMDLINE_KEY_SUPPR 13
+#define CMDLINE_KEY_TAB 14
+#define CMDLINE_KEY_CTRL_D 15
+#define CMDLINE_KEY_CTRL_L 16
+#define CMDLINE_KEY_RETURN2 17
+#define CMDLINE_KEY_META_BKSPACE 18
+#define CMDLINE_KEY_WLEFT 19
+#define CMDLINE_KEY_WRIGHT 20
+#define CMDLINE_KEY_HELP 21
+#define CMDLINE_KEY_CTRL_W 22
+#define CMDLINE_KEY_CTRL_P 23
+#define CMDLINE_KEY_CTRL_N 24
+#define CMDLINE_KEY_META_D 25
+
+extern const char *cmdline_vt100_commands[];
+
+enum cmdline_vt100_parser_state {
+ CMDLINE_VT100_INIT,
+ CMDLINE_VT100_ESCAPE,
+ CMDLINE_VT100_ESCAPE_CSI
+};
+
+#define CMDLINE_VT100_BUF_SIZE 8
+struct cmdline_vt100 {
+ uint8_t bufpos;
+ char buf[CMDLINE_VT100_BUF_SIZE];
+ enum cmdline_vt100_parser_state state;
+};
+
+/**
+ * Init
+ */
+void vt100_init(struct cmdline_vt100 *vt);
+
+/**
+ * Input a new character.
+ * Return -1 if the character is not part of a control sequence
+ * Return -2 if c is not the last char of a control sequence
+ * Else return the index in vt100_commands[]
+ */
+int vt100_parser(struct cmdline_vt100 *vt, char c);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_distributor/Makefile b/src/dpdk_lib18/librte_distributor/Makefile
new file mode 100755
index 00000000..36699f84
--- /dev/null
+++ b/src/dpdk_lib18/librte_distributor/Makefile
@@ -0,0 +1,50 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_distributor.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) := rte_distributor.c
+
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR)-include := rte_distributor.h
+
+# this lib needs eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += lib/librte_mbuf
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_distributor/rte_distributor.c b/src/dpdk_lib18/librte_distributor/rte_distributor.c
new file mode 100755
index 00000000..e0fdb4c1
--- /dev/null
+++ b/src/dpdk_lib18/librte_distributor/rte_distributor.c
@@ -0,0 +1,488 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <sys/queue.h>
+#include <string.h>
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_errno.h>
+#include <rte_string_fns.h>
+#include <rte_tailq.h>
+#include <rte_eal_memconfig.h>
+#include "rte_distributor.h"
+
+#define NO_FLAGS 0
+#define RTE_DISTRIB_PREFIX "DT_"
+
+/* we will use the bottom four bits of pointer for flags, shifting out
+ * the top four bits to make room (since a 64-bit pointer actually only uses
+ * 48 bits). An arithmetic-right-shift will then appropriately restore the
+ * original pointer value with proper sign extension into the top bits. */
+#define RTE_DISTRIB_FLAG_BITS 4
+#define RTE_DISTRIB_FLAGS_MASK (0x0F)
+#define RTE_DISTRIB_NO_BUF 0 /**< empty flags: no buffer requested */
+#define RTE_DISTRIB_GET_BUF (1) /**< worker requests a buffer, returns old */
+#define RTE_DISTRIB_RETURN_BUF (2) /**< worker returns a buffer, no request */
+
+#define RTE_DISTRIB_BACKLOG_SIZE 8
+#define RTE_DISTRIB_BACKLOG_MASK (RTE_DISTRIB_BACKLOG_SIZE - 1)
+
+#define RTE_DISTRIB_MAX_RETURNS 128
+#define RTE_DISTRIB_RETURNS_MASK (RTE_DISTRIB_MAX_RETURNS - 1)
+
+/**
+ * Maximum number of workers allowed.
+ * Be aware of increasing the limit, becaus it is limited by how we track
+ * in-flight tags. See @in_flight_bitmask and @rte_distributor_process
+ */
+#define RTE_DISTRIB_MAX_WORKERS 64
+
+/**
+ * Buffer structure used to pass the pointer data between cores. This is cache
+ * line aligned, but to improve performance and prevent adjacent cache-line
+ * prefetches of buffers for other workers, e.g. when worker 1's buffer is on
+ * the next cache line to worker 0, we pad this out to three cache lines.
+ * Only 64-bits of the memory is actually used though.
+ */
+union rte_distributor_buffer {
+ volatile int64_t bufptr64;
+ char pad[RTE_CACHE_LINE_SIZE*3];
+} __rte_cache_aligned;
+
+struct rte_distributor_backlog {
+ unsigned start;
+ unsigned count;
+ int64_t pkts[RTE_DISTRIB_BACKLOG_SIZE];
+};
+
+struct rte_distributor_returned_pkts {
+ unsigned start;
+ unsigned count;
+ struct rte_mbuf *mbufs[RTE_DISTRIB_MAX_RETURNS];
+};
+
+struct rte_distributor {
+ TAILQ_ENTRY(rte_distributor) next; /**< Next in list. */
+
+ char name[RTE_DISTRIBUTOR_NAMESIZE]; /**< Name of the ring. */
+ unsigned num_workers; /**< Number of workers polling */
+
+ uint32_t in_flight_tags[RTE_DISTRIB_MAX_WORKERS];
+ /**< Tracks the tag being processed per core */
+ uint64_t in_flight_bitmask;
+ /**< on/off bits for in-flight tags.
+ * Note that if RTE_DISTRIB_MAX_WORKERS is larger than 64 then
+ * the bitmask has to expand.
+ */
+
+ struct rte_distributor_backlog backlog[RTE_DISTRIB_MAX_WORKERS];
+
+ union rte_distributor_buffer bufs[RTE_DISTRIB_MAX_WORKERS];
+
+ struct rte_distributor_returned_pkts returns;
+};
+
+TAILQ_HEAD(rte_distributor_list, rte_distributor);
+
+/**** APIs called by workers ****/
+
+void
+rte_distributor_request_pkt(struct rte_distributor *d,
+ unsigned worker_id, struct rte_mbuf *oldpkt)
+{
+ union rte_distributor_buffer *buf = &d->bufs[worker_id];
+ int64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
+ | RTE_DISTRIB_GET_BUF;
+ while (unlikely(buf->bufptr64 & RTE_DISTRIB_FLAGS_MASK))
+ rte_pause();
+ buf->bufptr64 = req;
+}
+
+struct rte_mbuf *
+rte_distributor_poll_pkt(struct rte_distributor *d,
+ unsigned worker_id)
+{
+ union rte_distributor_buffer *buf = &d->bufs[worker_id];
+ if (buf->bufptr64 & RTE_DISTRIB_GET_BUF)
+ return NULL;
+
+ /* since bufptr64 is signed, this should be an arithmetic shift */
+ int64_t ret = buf->bufptr64 >> RTE_DISTRIB_FLAG_BITS;
+ return (struct rte_mbuf *)((uintptr_t)ret);
+}
+
+struct rte_mbuf *
+rte_distributor_get_pkt(struct rte_distributor *d,
+ unsigned worker_id, struct rte_mbuf *oldpkt)
+{
+ struct rte_mbuf *ret;
+ rte_distributor_request_pkt(d, worker_id, oldpkt);
+ while ((ret = rte_distributor_poll_pkt(d, worker_id)) == NULL)
+ rte_pause();
+ return ret;
+}
+
+int
+rte_distributor_return_pkt(struct rte_distributor *d,
+ unsigned worker_id, struct rte_mbuf *oldpkt)
+{
+ union rte_distributor_buffer *buf = &d->bufs[worker_id];
+ uint64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
+ | RTE_DISTRIB_RETURN_BUF;
+ buf->bufptr64 = req;
+ return 0;
+}
+
+/**** APIs called on distributor core ***/
+
+/* as name suggests, adds a packet to the backlog for a particular worker */
+static int
+add_to_backlog(struct rte_distributor_backlog *bl, int64_t item)
+{
+ if (bl->count == RTE_DISTRIB_BACKLOG_SIZE)
+ return -1;
+
+ bl->pkts[(bl->start + bl->count++) & (RTE_DISTRIB_BACKLOG_MASK)]
+ = item;
+ return 0;
+}
+
+/* takes the next packet for a worker off the backlog */
+static int64_t
+backlog_pop(struct rte_distributor_backlog *bl)
+{
+ bl->count--;
+ return bl->pkts[bl->start++ & RTE_DISTRIB_BACKLOG_MASK];
+}
+
+/* stores a packet returned from a worker inside the returns array */
+static inline void
+store_return(uintptr_t oldbuf, struct rte_distributor *d,
+ unsigned *ret_start, unsigned *ret_count)
+{
+ /* store returns in a circular buffer - code is branch-free */
+ d->returns.mbufs[(*ret_start + *ret_count) & RTE_DISTRIB_RETURNS_MASK]
+ = (void *)oldbuf;
+ *ret_start += (*ret_count == RTE_DISTRIB_RETURNS_MASK) & !!(oldbuf);
+ *ret_count += (*ret_count != RTE_DISTRIB_RETURNS_MASK) & !!(oldbuf);
+}
+
+static inline void
+handle_worker_shutdown(struct rte_distributor *d, unsigned wkr)
+{
+ d->in_flight_tags[wkr] = 0;
+ d->in_flight_bitmask &= ~(1UL << wkr);
+ d->bufs[wkr].bufptr64 = 0;
+ if (unlikely(d->backlog[wkr].count != 0)) {
+ /* On return of a packet, we need to move the
+ * queued packets for this core elsewhere.
+ * Easiest solution is to set things up for
+ * a recursive call. That will cause those
+ * packets to be queued up for the next free
+ * core, i.e. it will return as soon as a
+ * core becomes free to accept the first
+ * packet, as subsequent ones will be added to
+ * the backlog for that core.
+ */
+ struct rte_mbuf *pkts[RTE_DISTRIB_BACKLOG_SIZE];
+ unsigned i;
+ struct rte_distributor_backlog *bl = &d->backlog[wkr];
+
+ for (i = 0; i < bl->count; i++) {
+ unsigned idx = (bl->start + i) &
+ RTE_DISTRIB_BACKLOG_MASK;
+ pkts[i] = (void *)((uintptr_t)(bl->pkts[idx] >>
+ RTE_DISTRIB_FLAG_BITS));
+ }
+ /* recursive call.
+ * Note that the tags were set before first level call
+ * to rte_distributor_process.
+ */
+ rte_distributor_process(d, pkts, i);
+ bl->count = bl->start = 0;
+ }
+}
+
+/* this function is called when process() fn is called without any new
+ * packets. It goes through all the workers and clears any returned packets
+ * to do a partial flush.
+ */
+static int
+process_returns(struct rte_distributor *d)
+{
+ unsigned wkr;
+ unsigned flushed = 0;
+ unsigned ret_start = d->returns.start,
+ ret_count = d->returns.count;
+
+ for (wkr = 0; wkr < d->num_workers; wkr++) {
+
+ const int64_t data = d->bufs[wkr].bufptr64;
+ uintptr_t oldbuf = 0;
+
+ if (data & RTE_DISTRIB_GET_BUF) {
+ flushed++;
+ if (d->backlog[wkr].count)
+ d->bufs[wkr].bufptr64 =
+ backlog_pop(&d->backlog[wkr]);
+ else {
+ d->bufs[wkr].bufptr64 = RTE_DISTRIB_GET_BUF;
+ d->in_flight_tags[wkr] = 0;
+ d->in_flight_bitmask &= ~(1UL << wkr);
+ }
+ oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
+ } else if (data & RTE_DISTRIB_RETURN_BUF) {
+ handle_worker_shutdown(d, wkr);
+ oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
+ }
+
+ store_return(oldbuf, d, &ret_start, &ret_count);
+ }
+
+ d->returns.start = ret_start;
+ d->returns.count = ret_count;
+
+ return flushed;
+}
+
+/* process a set of packets to distribute them to workers */
+int
+rte_distributor_process(struct rte_distributor *d,
+ struct rte_mbuf **mbufs, unsigned num_mbufs)
+{
+ unsigned next_idx = 0;
+ unsigned wkr = 0;
+ struct rte_mbuf *next_mb = NULL;
+ int64_t next_value = 0;
+ uint32_t new_tag = 0;
+ unsigned ret_start = d->returns.start,
+ ret_count = d->returns.count;
+
+ if (unlikely(num_mbufs == 0))
+ return process_returns(d);
+
+ while (next_idx < num_mbufs || next_mb != NULL) {
+
+ int64_t data = d->bufs[wkr].bufptr64;
+ uintptr_t oldbuf = 0;
+
+ if (!next_mb) {
+ next_mb = mbufs[next_idx++];
+ next_value = (((int64_t)(uintptr_t)next_mb)
+ << RTE_DISTRIB_FLAG_BITS);
+ /*
+ * User is advocated to set tag vaue for each
+ * mbuf before calling rte_distributor_process.
+ * User defined tags are used to identify flows,
+ * or sessions.
+ */
+ new_tag = next_mb->hash.usr;
+
+ /*
+ * Note that if RTE_DISTRIB_MAX_WORKERS is larger than 64
+ * then the size of match has to be expanded.
+ */
+ uint64_t match = 0;
+ unsigned i;
+ /*
+ * to scan for a match use "xor" and "not" to get a 0/1
+ * value, then use shifting to merge to single "match"
+ * variable, where a one-bit indicates a match for the
+ * worker given by the bit-position
+ */
+ for (i = 0; i < d->num_workers; i++)
+ match |= (!(d->in_flight_tags[i] ^ new_tag)
+ << i);
+
+ /* Only turned-on bits are considered as match */
+ match &= d->in_flight_bitmask;
+
+ if (match) {
+ next_mb = NULL;
+ unsigned worker = __builtin_ctzl(match);
+ if (add_to_backlog(&d->backlog[worker],
+ next_value) < 0)
+ next_idx--;
+ }
+ }
+
+ if ((data & RTE_DISTRIB_GET_BUF) &&
+ (d->backlog[wkr].count || next_mb)) {
+
+ if (d->backlog[wkr].count)
+ d->bufs[wkr].bufptr64 =
+ backlog_pop(&d->backlog[wkr]);
+
+ else {
+ d->bufs[wkr].bufptr64 = next_value;
+ d->in_flight_tags[wkr] = new_tag;
+ d->in_flight_bitmask |= (1UL << wkr);
+ next_mb = NULL;
+ }
+ oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
+ } else if (data & RTE_DISTRIB_RETURN_BUF) {
+ handle_worker_shutdown(d, wkr);
+ oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
+ }
+
+ /* store returns in a circular buffer */
+ store_return(oldbuf, d, &ret_start, &ret_count);
+
+ if (++wkr == d->num_workers)
+ wkr = 0;
+ }
+ /* to finish, check all workers for backlog and schedule work for them
+ * if they are ready */
+ for (wkr = 0; wkr < d->num_workers; wkr++)
+ if (d->backlog[wkr].count &&
+ (d->bufs[wkr].bufptr64 & RTE_DISTRIB_GET_BUF)) {
+
+ int64_t oldbuf = d->bufs[wkr].bufptr64 >>
+ RTE_DISTRIB_FLAG_BITS;
+ store_return(oldbuf, d, &ret_start, &ret_count);
+
+ d->bufs[wkr].bufptr64 = backlog_pop(&d->backlog[wkr]);
+ }
+
+ d->returns.start = ret_start;
+ d->returns.count = ret_count;
+ return num_mbufs;
+}
+
+/* return to the caller, packets returned from workers */
+int
+rte_distributor_returned_pkts(struct rte_distributor *d,
+ struct rte_mbuf **mbufs, unsigned max_mbufs)
+{
+ struct rte_distributor_returned_pkts *returns = &d->returns;
+ unsigned retval = (max_mbufs < returns->count) ?
+ max_mbufs : returns->count;
+ unsigned i;
+
+ for (i = 0; i < retval; i++) {
+ unsigned idx = (returns->start + i) & RTE_DISTRIB_RETURNS_MASK;
+ mbufs[i] = returns->mbufs[idx];
+ }
+ returns->start += i;
+ returns->count -= i;
+
+ return retval;
+}
+
+/* return the number of packets in-flight in a distributor, i.e. packets
+ * being workered on or queued up in a backlog. */
+static inline unsigned
+total_outstanding(const struct rte_distributor *d)
+{
+ unsigned wkr, total_outstanding;
+
+ total_outstanding = __builtin_popcountl(d->in_flight_bitmask);
+
+ for (wkr = 0; wkr < d->num_workers; wkr++)
+ total_outstanding += d->backlog[wkr].count;
+
+ return total_outstanding;
+}
+
+/* flush the distributor, so that there are no outstanding packets in flight or
+ * queued up. */
+int
+rte_distributor_flush(struct rte_distributor *d)
+{
+ const unsigned flushed = total_outstanding(d);
+
+ while (total_outstanding(d) > 0)
+ rte_distributor_process(d, NULL, 0);
+
+ return flushed;
+}
+
+/* clears the internal returns array in the distributor */
+void
+rte_distributor_clear_returns(struct rte_distributor *d)
+{
+ d->returns.start = d->returns.count = 0;
+#ifndef __OPTIMIZE__
+ memset(d->returns.mbufs, 0, sizeof(d->returns.mbufs));
+#endif
+}
+
+/* creates a distributor instance */
+struct rte_distributor *
+rte_distributor_create(const char *name,
+ unsigned socket_id,
+ unsigned num_workers)
+{
+ struct rte_distributor *d;
+ struct rte_distributor_list *distributor_list;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+
+ /* compilation-time checks */
+ RTE_BUILD_BUG_ON((sizeof(*d) & RTE_CACHE_LINE_MASK) != 0);
+ RTE_BUILD_BUG_ON((RTE_DISTRIB_MAX_WORKERS & 7) != 0);
+ RTE_BUILD_BUG_ON(RTE_DISTRIB_MAX_WORKERS >
+ sizeof(d->in_flight_bitmask) * CHAR_BIT);
+
+ if (name == NULL || num_workers >= RTE_DISTRIB_MAX_WORKERS) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /* check that we have an initialised tail queue */
+ distributor_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_DISTRIBUTOR,
+ rte_distributor_list);
+ if (distributor_list == NULL) {
+ rte_errno = E_RTE_NO_TAILQ;
+ return NULL;
+ }
+
+ snprintf(mz_name, sizeof(mz_name), RTE_DISTRIB_PREFIX"%s", name);
+ mz = rte_memzone_reserve(mz_name, sizeof(*d), socket_id, NO_FLAGS);
+ if (mz == NULL) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ d = mz->addr;
+ snprintf(d->name, sizeof(d->name), "%s", name);
+ d->num_workers = num_workers;
+
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+ TAILQ_INSERT_TAIL(distributor_list, d, next);
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ return d;
+}
diff --git a/src/dpdk_lib18/librte_distributor/rte_distributor.h b/src/dpdk_lib18/librte_distributor/rte_distributor.h
new file mode 100755
index 00000000..cc1d5590
--- /dev/null
+++ b/src/dpdk_lib18/librte_distributor/rte_distributor.h
@@ -0,0 +1,248 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_DISTRIBUTE_H_
+#define _RTE_DISTRIBUTE_H_
+
+/**
+ * @file
+ * RTE distributor
+ *
+ * The distributor is a component which is designed to pass packets
+ * one-at-a-time to workers, with dynamic load balancing.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_mbuf.h>
+
+#define RTE_DISTRIBUTOR_NAMESIZE 32 /**< Length of name for instance */
+
+struct rte_distributor;
+
+/**
+ * Function to create a new distributor instance
+ *
+ * Reserves the memory needed for the distributor operation and
+ * initializes the distributor to work with the configured number of workers.
+ *
+ * @param name
+ * The name to be given to the distributor instance.
+ * @param socket_id
+ * The NUMA node on which the memory is to be allocated
+ * @param num_workers
+ * The maximum number of workers that will request packets from this
+ * distributor
+ * @return
+ * The newly created distributor instance
+ */
+struct rte_distributor *
+rte_distributor_create(const char *name, unsigned socket_id,
+ unsigned num_workers);
+
+/* *** APIS to be called on the distributor lcore *** */
+/*
+ * The following APIs are the public APIs which are designed for use on a
+ * single lcore which acts as the distributor lcore for a given distributor
+ * instance. These functions cannot be called on multiple cores simultaneously
+ * without using locking to protect access to the internals of the distributor.
+ *
+ * NOTE: a given lcore cannot act as both a distributor lcore and a worker lcore
+ * for the same distributor instance, otherwise deadlock will result.
+ */
+
+/**
+ * Process a set of packets by distributing them among workers that request
+ * packets. The distributor will ensure that no two packets that have the
+ * same flow id, or tag, in the mbuf will be procesed at the same time.
+ *
+ * The user is advocated to set tag for each mbuf before calling this function.
+ * If user doesn't set the tag, the tag value can be various values depending on
+ * driver implementation and configuration.
+ *
+ * This is not multi-thread safe and should only be called on a single lcore.
+ *
+ * @param d
+ * The distributor instance to be used
+ * @param mbufs
+ * The mbufs to be distributed
+ * @param num_mbufs
+ * The number of mbufs in the mbufs array
+ * @return
+ * The number of mbufs processed.
+ */
+int
+rte_distributor_process(struct rte_distributor *d,
+ struct rte_mbuf **mbufs, unsigned num_mbufs);
+
+/**
+ * Get a set of mbufs that have been returned to the distributor by workers
+ *
+ * This should only be called on the same lcore as rte_distributor_process()
+ *
+ * @param d
+ * The distributor instance to be used
+ * @param mbufs
+ * The mbufs pointer array to be filled in
+ * @param max_mbufs
+ * The size of the mbufs array
+ * @return
+ * The number of mbufs returned in the mbufs array.
+ */
+int
+rte_distributor_returned_pkts(struct rte_distributor *d,
+ struct rte_mbuf **mbufs, unsigned max_mbufs);
+
+/**
+ * Flush the distributor component, so that there are no in-flight or
+ * backlogged packets awaiting processing
+ *
+ * This should only be called on the same lcore as rte_distributor_process()
+ *
+ * @param d
+ * The distributor instance to be used
+ * @return
+ * The number of queued/in-flight packets that were completed by this call.
+ */
+int
+rte_distributor_flush(struct rte_distributor *d);
+
+/**
+ * Clears the array of returned packets used as the source for the
+ * rte_distributor_returned_pkts() API call.
+ *
+ * This should only be called on the same lcore as rte_distributor_process()
+ *
+ * @param d
+ * The distributor instance to be used
+ */
+void
+rte_distributor_clear_returns(struct rte_distributor *d);
+
+/* *** APIS to be called on the worker lcores *** */
+/*
+ * The following APIs are the public APIs which are designed for use on
+ * multiple lcores which act as workers for a distributor. Each lcore should use
+ * a unique worker id when requesting packets.
+ *
+ * NOTE: a given lcore cannot act as both a distributor lcore and a worker lcore
+ * for the same distributor instance, otherwise deadlock will result.
+ */
+
+/**
+ * API called by a worker to get a new packet to process. Any previous packet
+ * given to the worker is assumed to have completed processing, and may be
+ * optionally returned to the distributor via the oldpkt parameter.
+ *
+ * @param d
+ * The distributor instance to be used
+ * @param worker_id
+ * The worker instance number to use - must be less that num_workers passed
+ * at distributor creation time.
+ * @param oldpkt
+ * The previous packet, if any, being processed by the worker
+ *
+ * @return
+ * A new packet to be processed by the worker thread.
+ */
+struct rte_mbuf *
+rte_distributor_get_pkt(struct rte_distributor *d,
+ unsigned worker_id, struct rte_mbuf *oldpkt);
+
+/**
+ * API called by a worker to return a completed packet without requesting a
+ * new packet, for example, because a worker thread is shutting down
+ *
+ * @param d
+ * The distributor instance to be used
+ * @param worker_id
+ * The worker instance number to use - must be less that num_workers passed
+ * at distributor creation time.
+ * @param mbuf
+ * The previous packet being processed by the worker
+ */
+int
+rte_distributor_return_pkt(struct rte_distributor *d, unsigned worker_id,
+ struct rte_mbuf *mbuf);
+
+/**
+ * API called by a worker to request a new packet to process.
+ * Any previous packet given to the worker is assumed to have completed
+ * processing, and may be optionally returned to the distributor via
+ * the oldpkt parameter.
+ * Unlike rte_distributor_get_pkt(), this function does not wait for a new
+ * packet to be provided by the distributor.
+ *
+ * NOTE: after calling this function, rte_distributor_poll_pkt() should
+ * be used to poll for the packet requested. The rte_distributor_get_pkt()
+ * API should *not* be used to try and retrieve the new packet.
+ *
+ * @param d
+ * The distributor instance to be used
+ * @param worker_id
+ * The worker instance number to use - must be less that num_workers passed
+ * at distributor creation time.
+ * @param oldpkt
+ * The previous packet, if any, being processed by the worker
+ */
+void
+rte_distributor_request_pkt(struct rte_distributor *d,
+ unsigned worker_id, struct rte_mbuf *oldpkt);
+
+/**
+ * API called by a worker to check for a new packet that was previously
+ * requested by a call to rte_distributor_request_pkt(). It does not wait
+ * for the new packet to be available, but returns NULL if the request has
+ * not yet been fulfilled by the distributor.
+ *
+ * @param d
+ * The distributor instance to be used
+ * @param worker_id
+ * The worker instance number to use - must be less that num_workers passed
+ * at distributor creation time.
+ *
+ * @return
+ * A new packet to be processed by the worker thread, or NULL if no
+ * packet is yet available.
+ */
+struct rte_mbuf *
+rte_distributor_poll_pkt(struct rte_distributor *d,
+ unsigned worker_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_eal/Makefile b/src/dpdk_lib18/librte_eal/Makefile
new file mode 100755
index 00000000..69003cfe
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/Makefile
@@ -0,0 +1,39 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+DIRS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += common
+DIRS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += linuxapp
+DIRS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += common
+DIRS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += bsdapp
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/Makefile b/src/dpdk_lib18/librte_eal/bsdapp/Makefile
new file mode 100755
index 00000000..57548203
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/bsdapp/Makefile
@@ -0,0 +1,38 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+DIRS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal
+DIRS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += contigmem
+DIRS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += nic_uio
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/contigmem/BSDmakefile b/src/dpdk_lib18/librte_eal/bsdapp/contigmem/BSDmakefile
new file mode 100755
index 00000000..f64374c6
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/bsdapp/contigmem/BSDmakefile
@@ -0,0 +1,36 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+KMOD= contigmem
+SRCS= contigmem.c device_if.h bus_if.h
+
+.include <bsd.kmod.mk>
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/contigmem/Makefile b/src/dpdk_lib18/librte_eal/bsdapp/contigmem/Makefile
new file mode 100755
index 00000000..bab005fd
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/bsdapp/contigmem/Makefile
@@ -0,0 +1,52 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# module name and path
+#
+MODULE = contigmem
+
+#
+# CFLAGS
+#
+MODULE_CFLAGS += -I$(SRCDIR)
+MODULE_CFLAGS += -I$(RTE_OUTPUT)/include
+MODULE_CFLAGS += -Winline -Wall -Werror
+MODULE_CFLAGS += -include $(RTE_OUTPUT)/include/rte_config.h
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-y := contigmem.c
+
+include $(RTE_SDK)/mk/rte.bsdmodule.mk
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/contigmem/contigmem.c b/src/dpdk_lib18/librte_eal/bsdapp/contigmem/contigmem.c
new file mode 100755
index 00000000..b1a23fa6
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/bsdapp/contigmem/contigmem.c
@@ -0,0 +1,233 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bio.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/proc.h>
+#include <sys/rwlock.h>
+#include <sys/systm.h>
+#include <sys/sysctl.h>
+
+#include <machine/bus.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pager.h>
+
+static int contigmem_load(void);
+static int contigmem_unload(void);
+static int contigmem_physaddr(SYSCTL_HANDLER_ARGS);
+
+static d_mmap_t contigmem_mmap;
+static d_mmap_single_t contigmem_mmap_single;
+static d_open_t contigmem_open;
+
+static int contigmem_num_buffers = RTE_CONTIGMEM_DEFAULT_NUM_BUFS;
+static int contigmem_buffer_size = RTE_CONTIGMEM_DEFAULT_BUF_SIZE;
+
+static eventhandler_tag contigmem_eh_tag;
+static void *contigmem_buffers[RTE_CONTIGMEM_MAX_NUM_BUFS];
+static struct cdev *contigmem_cdev = NULL;
+
+TUNABLE_INT("hw.contigmem.num_buffers", &contigmem_num_buffers);
+TUNABLE_INT("hw.contigmem.buffer_size", &contigmem_buffer_size);
+
+static SYSCTL_NODE(_hw, OID_AUTO, contigmem, CTLFLAG_RD, 0, "contigmem");
+
+SYSCTL_INT(_hw_contigmem, OID_AUTO, num_buffers, CTLFLAG_RD,
+ &contigmem_num_buffers, 0, "Number of contigmem buffers allocated");
+SYSCTL_INT(_hw_contigmem, OID_AUTO, buffer_size, CTLFLAG_RD,
+ &contigmem_buffer_size, 0, "Size of each contiguous buffer");
+
+static SYSCTL_NODE(_hw_contigmem, OID_AUTO, physaddr, CTLFLAG_RD, 0,
+ "physaddr");
+
+MALLOC_DEFINE(M_CONTIGMEM, "contigmem", "contigmem(4) allocations");
+
+static int contigmem_modevent(module_t mod, int type, void *arg)
+{
+ int error = 0;
+
+ switch (type) {
+ case MOD_LOAD:
+ error = contigmem_load();
+ break;
+ case MOD_UNLOAD:
+ error = contigmem_unload();
+ break;
+ default:
+ break;
+ }
+
+ return (error);
+}
+
+moduledata_t contigmem_mod = {
+ "contigmem",
+ (modeventhand_t)contigmem_modevent,
+ 0
+};
+
+DECLARE_MODULE(contigmem, contigmem_mod, SI_SUB_DRIVERS, SI_ORDER_ANY);
+MODULE_VERSION(contigmem, 1);
+
+static struct cdevsw contigmem_ops = {
+ .d_name = "contigmem",
+ .d_version = D_VERSION,
+ .d_mmap = contigmem_mmap,
+ .d_mmap_single = contigmem_mmap_single,
+ .d_open = contigmem_open,
+};
+
+static int
+contigmem_load()
+{
+ char index_string[8], description[32];
+ int i;
+
+ if (contigmem_num_buffers > RTE_CONTIGMEM_MAX_NUM_BUFS) {
+ printf("%d buffers requested is greater than %d allowed\n",
+ contigmem_num_buffers, RTE_CONTIGMEM_MAX_NUM_BUFS);
+ return (EINVAL);
+ }
+
+ if (contigmem_buffer_size < PAGE_SIZE ||
+ (contigmem_buffer_size & (contigmem_buffer_size - 1)) != 0) {
+ printf("buffer size 0x%x is not greater than PAGE_SIZE and "
+ "power of two\n", contigmem_buffer_size);
+ return (EINVAL);
+ }
+
+ for (i = 0; i < contigmem_num_buffers; i++) {
+ contigmem_buffers[i] =
+ contigmalloc(contigmem_buffer_size, M_CONTIGMEM, M_ZERO, 0,
+ BUS_SPACE_MAXADDR, contigmem_buffer_size, 0);
+
+ if (contigmem_buffers[i] == NULL) {
+ printf("contigmalloc failed for buffer %d\n", i);
+ return (ENOMEM);
+ }
+
+ printf("%2u: virt=%p phys=%p\n", i, contigmem_buffers[i],
+ (void *)pmap_kextract((vm_offset_t)contigmem_buffers[i]));
+
+ snprintf(index_string, sizeof(index_string), "%d", i);
+ snprintf(description, sizeof(description),
+ "phys addr for buffer %d", i);
+ SYSCTL_ADD_PROC(NULL,
+ &SYSCTL_NODE_CHILDREN(_hw_contigmem, physaddr), OID_AUTO,
+ index_string, CTLTYPE_U64 | CTLFLAG_RD,
+ (void *)(uintptr_t)i, 0, contigmem_physaddr, "LU",
+ description);
+ }
+
+ contigmem_cdev = make_dev_credf(0, &contigmem_ops, 0, NULL, UID_ROOT,
+ GID_WHEEL, 0600, "contigmem");
+
+ return (0);
+}
+
+static int
+contigmem_unload()
+{
+ int i;
+
+ if (contigmem_cdev != NULL)
+ destroy_dev(contigmem_cdev);
+
+ if (contigmem_eh_tag != NULL)
+ EVENTHANDLER_DEREGISTER(process_exit, contigmem_eh_tag);
+
+ for (i = 0; i < RTE_CONTIGMEM_MAX_NUM_BUFS; i++)
+ if (contigmem_buffers[i] != NULL)
+ contigfree(contigmem_buffers[i], contigmem_buffer_size,
+ M_CONTIGMEM);
+
+ return (0);
+}
+
+static int
+contigmem_physaddr(SYSCTL_HANDLER_ARGS)
+{
+ uint64_t physaddr;
+ int index = (int)(uintptr_t)arg1;
+
+ physaddr = (uint64_t)vtophys(contigmem_buffers[index]);
+ return (sysctl_handle_64(oidp, &physaddr, 0, req));
+}
+
+static int
+contigmem_open(struct cdev *cdev, int fflags, int devtype,
+ struct thread *td)
+{
+ return (0);
+}
+
+static int
+contigmem_mmap(struct cdev *cdev, vm_ooffset_t offset, vm_paddr_t *paddr,
+ int prot, vm_memattr_t *memattr)
+{
+
+ *paddr = offset;
+ return (0);
+}
+
+static int
+contigmem_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t size,
+ struct vm_object **obj, int nprot)
+{
+ /*
+ * The buffer index is encoded in the offset. Divide the offset by
+ * PAGE_SIZE to get the index of the buffer requested by the user
+ * app.
+ */
+ if ((*offset/PAGE_SIZE) >= contigmem_num_buffers)
+ return (EINVAL);
+
+ *offset = (vm_ooffset_t)vtophys(contigmem_buffers[*offset/PAGE_SIZE]);
+ *obj = vm_pager_allocate(OBJT_DEVICE, cdev, size, nprot, *offset,
+ curthread->td_ucred);
+
+ return (0);
+}
+
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/eal/Makefile b/src/dpdk_lib18/librte_eal/bsdapp/eal/Makefile
new file mode 100755
index 00000000..d4348822
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/bsdapp/eal/Makefile
@@ -0,0 +1,97 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+LIB = librte_eal.a
+
+VPATH += $(RTE_SDK)/lib/librte_eal/common
+
+CFLAGS += -I$(SRCDIR)/include
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
+CFLAGS += -I$(RTE_SDK)/lib/librte_ring
+CFLAGS += -I$(RTE_SDK)/lib/librte_mempool
+CFLAGS += -I$(RTE_SDK)/lib/librte_malloc
+CFLAGS += -I$(RTE_SDK)/lib/librte_ether
+CFLAGS += -I$(RTE_SDK)/lib/librte_pmd_ring
+CFLAGS += -I$(RTE_SDK)/lib/librte_pmd_pcap
+CFLAGS += $(WERROR_FLAGS) -O3
+
+# specific to linuxapp exec-env
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) := eal.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_memory.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_hugepage_info.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_thread.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_log.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_pci.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_debug.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_lcore.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_timer.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_interrupts.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_alarm.c
+
+# from common dir
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_common_memzone.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_common_log.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_common_launch.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_common_pci.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_common_memory.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_common_tailqs.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_common_errno.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_common_cpuflags.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_common_string_fns.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_common_hexdump.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_common_devargs.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_common_dev.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += eal_common_options.c
+
+CFLAGS_eal.o := -D_GNU_SOURCE
+#CFLAGS_eal_thread.o := -D_GNU_SOURCE
+CFLAGS_eal_log.o := -D_GNU_SOURCE
+CFLAGS_eal_common_log.o := -D_GNU_SOURCE
+
+# workaround for a gcc bug with noreturn attribute
+# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_eal_thread.o += -Wno-return-type
+CFLAGS_eal_hpet.o += -Wno-return-type
+endif
+
+INC := rte_interrupts.h
+
+SYMLINK-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP)-include/exec-env := \
+ $(addprefix include/exec-env/,$(INC))
+
+DEPDIRS-$(CONFIG_RTE_LIBRTE_EAL_BSDAPP) += lib/librte_eal/common
+
+include $(RTE_SDK)/mk/rte.lib.mk
+
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal.c b/src/dpdk_lib18/librte_eal/bsdapp/eal/eal.c
new file mode 100755
index 00000000..69f3c034
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/bsdapp/eal/eal.c
@@ -0,0 +1,563 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2014 6WIND S.A.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <syslog.h>
+#include <getopt.h>
+#include <sys/file.h>
+#include <stddef.h>
+#include <errno.h>
+#include <limits.h>
+#include <errno.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_random.h>
+#include <rte_cycles.h>
+#include <rte_string_fns.h>
+#include <rte_cpuflags.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_dev.h>
+#include <rte_devargs.h>
+#include <rte_common.h>
+#include <rte_version.h>
+#include <rte_atomic.h>
+#include <malloc_heap.h>
+#include <rte_eth_ring.h>
+
+#include "eal_private.h"
+#include "eal_thread.h"
+#include "eal_internal_cfg.h"
+#include "eal_filesystem.h"
+#include "eal_hugepages.h"
+#include "eal_options.h"
+
+#define MEMSIZE_IF_NO_HUGE_PAGE (64ULL * 1024ULL * 1024ULL)
+
+/* Allow the application to print its usage message too if set */
+static rte_usage_hook_t rte_application_usage_hook = NULL;
+/* early configuration structure, when memory config is not mmapped */
+static struct rte_mem_config early_mem_config;
+
+/* define fd variable here, because file needs to be kept open for the
+ * duration of the program, as we hold a write lock on it in the primary proc */
+static int mem_cfg_fd = -1;
+
+static struct flock wr_lock = {
+ .l_type = F_WRLCK,
+ .l_whence = SEEK_SET,
+ .l_start = offsetof(struct rte_mem_config, memseg),
+ .l_len = sizeof(early_mem_config.memseg),
+};
+
+/* Address of global and public configuration */
+static struct rte_config rte_config = {
+ .mem_config = &early_mem_config,
+};
+
+/* internal configuration (per-core) */
+struct lcore_config lcore_config[RTE_MAX_LCORE];
+
+/* internal configuration */
+struct internal_config internal_config;
+
+/* used by rte_rdtsc() */
+int rte_cycles_vmware_tsc_map;
+
+/* Return a pointer to the configuration structure */
+struct rte_config *
+rte_eal_get_configuration(void)
+{
+ return &rte_config;
+}
+
+/* parse a sysfs (or other) file containing one integer value */
+int
+eal_parse_sysfs_value(const char *filename, unsigned long *val)
+{
+ FILE *f;
+ char buf[BUFSIZ];
+ char *end = NULL;
+
+ if ((f = fopen(filename, "r")) == NULL) {
+ RTE_LOG(ERR, EAL, "%s(): cannot open sysfs value %s\n",
+ __func__, filename);
+ return -1;
+ }
+
+ if (fgets(buf, sizeof(buf), f) == NULL) {
+ RTE_LOG(ERR, EAL, "%s(): cannot read sysfs value %s\n",
+ __func__, filename);
+ fclose(f);
+ return -1;
+ }
+ *val = strtoul(buf, &end, 0);
+ if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) {
+ RTE_LOG(ERR, EAL, "%s(): cannot parse sysfs value %s\n",
+ __func__, filename);
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+ return 0;
+}
+
+
+/* create memory configuration in shared/mmap memory. Take out
+ * a write lock on the memsegs, so we can auto-detect primary/secondary.
+ * This means we never close the file while running (auto-close on exit).
+ * We also don't lock the whole file, so that in future we can use read-locks
+ * on other parts, e.g. memzones, to detect if there are running secondary
+ * processes. */
+static void
+rte_eal_config_create(void)
+{
+ void *rte_mem_cfg_addr;
+ int retval;
+
+ const char *pathname = eal_runtime_config_path();
+
+ if (internal_config.no_shconf)
+ return;
+
+ if (mem_cfg_fd < 0){
+ mem_cfg_fd = open(pathname, O_RDWR | O_CREAT, 0660);
+ if (mem_cfg_fd < 0)
+ rte_panic("Cannot open '%s' for rte_mem_config\n", pathname);
+ }
+
+ retval = ftruncate(mem_cfg_fd, sizeof(*rte_config.mem_config));
+ if (retval < 0){
+ close(mem_cfg_fd);
+ rte_panic("Cannot resize '%s' for rte_mem_config\n", pathname);
+ }
+
+ retval = fcntl(mem_cfg_fd, F_SETLK, &wr_lock);
+ if (retval < 0){
+ close(mem_cfg_fd);
+ rte_exit(EXIT_FAILURE, "Cannot create lock on '%s'. Is another primary "
+ "process running?\n", pathname);
+ }
+
+ rte_mem_cfg_addr = mmap(NULL, sizeof(*rte_config.mem_config),
+ PROT_READ | PROT_WRITE, MAP_SHARED, mem_cfg_fd, 0);
+
+ if (rte_mem_cfg_addr == MAP_FAILED){
+ rte_panic("Cannot mmap memory for rte_config\n");
+ }
+ memcpy(rte_mem_cfg_addr, &early_mem_config, sizeof(early_mem_config));
+ rte_config.mem_config = (struct rte_mem_config *) rte_mem_cfg_addr;
+}
+
+/* attach to an existing shared memory config */
+static void
+rte_eal_config_attach(void)
+{
+ void *rte_mem_cfg_addr;
+ const char *pathname = eal_runtime_config_path();
+
+ if (internal_config.no_shconf)
+ return;
+
+ if (mem_cfg_fd < 0){
+ mem_cfg_fd = open(pathname, O_RDWR);
+ if (mem_cfg_fd < 0)
+ rte_panic("Cannot open '%s' for rte_mem_config\n", pathname);
+ }
+
+ rte_mem_cfg_addr = mmap(NULL, sizeof(*rte_config.mem_config),
+ PROT_READ | PROT_WRITE, MAP_SHARED, mem_cfg_fd, 0);
+ close(mem_cfg_fd);
+ if (rte_mem_cfg_addr == MAP_FAILED)
+ rte_panic("Cannot mmap memory for rte_config\n");
+
+ rte_config.mem_config = (struct rte_mem_config *) rte_mem_cfg_addr;
+}
+
+/* Detect if we are a primary or a secondary process */
+enum rte_proc_type_t
+eal_proc_type_detect(void)
+{
+ enum rte_proc_type_t ptype = RTE_PROC_PRIMARY;
+ const char *pathname = eal_runtime_config_path();
+
+ /* if we can open the file but not get a write-lock we are a secondary
+ * process. NOTE: if we get a file handle back, we keep that open
+ * and don't close it to prevent a race condition between multiple opens */
+ if (((mem_cfg_fd = open(pathname, O_RDWR)) >= 0) &&
+ (fcntl(mem_cfg_fd, F_SETLK, &wr_lock) < 0))
+ ptype = RTE_PROC_SECONDARY;
+
+ RTE_LOG(INFO, EAL, "Auto-detected process type: %s\n",
+ ptype == RTE_PROC_PRIMARY ? "PRIMARY" : "SECONDARY");
+
+ return ptype;
+}
+
+/* Sets up rte_config structure with the pointer to shared memory config.*/
+static void
+rte_config_init(void)
+{
+ rte_config.process_type = internal_config.process_type;
+
+ switch (rte_config.process_type){
+ case RTE_PROC_PRIMARY:
+ rte_eal_config_create();
+ break;
+ case RTE_PROC_SECONDARY:
+ rte_eal_config_attach();
+ rte_eal_mcfg_wait_complete(rte_config.mem_config);
+ break;
+ case RTE_PROC_AUTO:
+ case RTE_PROC_INVALID:
+ rte_panic("Invalid process type\n");
+ }
+}
+
+/* display usage */
+static void
+eal_usage(const char *prgname)
+{
+ printf("\nUsage: %s ", prgname);
+ eal_common_usage();
+ /* Allow the application to print its usage message too if hook is set */
+ if ( rte_application_usage_hook ) {
+ printf("===== Application Usage =====\n\n");
+ rte_application_usage_hook(prgname);
+ }
+}
+
+/* Set a per-application usage message */
+rte_usage_hook_t
+rte_set_application_usage_hook( rte_usage_hook_t usage_func )
+{
+ rte_usage_hook_t old_func;
+
+ /* Will be NULL on the first call to denote the last usage routine. */
+ old_func = rte_application_usage_hook;
+ rte_application_usage_hook = usage_func;
+
+ return old_func;
+}
+
+static inline size_t
+eal_get_hugepage_mem_size(void)
+{
+ uint64_t size = 0;
+ unsigned i, j;
+
+ for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
+ struct hugepage_info *hpi = &internal_config.hugepage_info[i];
+ if (hpi->hugedir != NULL) {
+ for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
+ size += hpi->hugepage_sz * hpi->num_pages[j];
+ }
+ }
+ }
+
+ return (size < SIZE_MAX) ? (size_t)(size) : SIZE_MAX;
+}
+
+/* Parse the argument given in the command line of the application */
+static int
+eal_parse_args(int argc, char **argv)
+{
+ int opt, ret;
+ char **argvopt;
+ int option_index;
+ char *prgname = argv[0];
+
+ argvopt = argv;
+
+ eal_reset_internal_config(&internal_config);
+
+ while ((opt = getopt_long(argc, argvopt, eal_short_options,
+ eal_long_options, &option_index)) != EOF) {
+
+ int ret;
+
+ /* getopt is not happy, stop right now */
+ if (opt == '?')
+ return -1;
+
+ ret = eal_parse_common_option(opt, optarg, &internal_config);
+ /* common parser is not happy */
+ if (ret < 0) {
+ eal_usage(prgname);
+ return -1;
+ }
+ /* common parser handled this option */
+ if (ret == 0)
+ continue;
+
+ switch (opt) {
+ default:
+ if (opt < OPT_LONG_MIN_NUM && isprint(opt)) {
+ RTE_LOG(ERR, EAL, "Option %c is not supported "
+ "on FreeBSD\n", opt);
+ } else if (opt >= OPT_LONG_MIN_NUM &&
+ opt < OPT_LONG_MAX_NUM) {
+ RTE_LOG(ERR, EAL, "Option %s is not supported "
+ "on FreeBSD\n",
+ eal_long_options[option_index].name);
+ } else {
+ RTE_LOG(ERR, EAL, "Option %d is not supported "
+ "on FreeBSD\n", opt);
+ }
+ eal_usage(prgname);
+ return -1;
+ }
+ }
+
+ if (eal_adjust_config(&internal_config) != 0)
+ return -1;
+
+ /* sanity checks */
+ if (eal_check_common_options(&internal_config) != 0) {
+ eal_usage(prgname);
+ return -1;
+ }
+
+ if (optind >= 0)
+ argv[optind-1] = prgname;
+ ret = optind-1;
+ optind = 0; /* reset getopt lib */
+ return ret;
+}
+
+static void
+eal_check_mem_on_local_socket(void)
+{
+ const struct rte_memseg *ms;
+ int i, socket_id;
+
+ socket_id = rte_lcore_to_socket_id(rte_config.master_lcore);
+
+ ms = rte_eal_get_physmem_layout();
+
+ for (i = 0; i < RTE_MAX_MEMSEG; i++)
+ if (ms[i].socket_id == socket_id &&
+ ms[i].len > 0)
+ return;
+
+ RTE_LOG(WARNING, EAL, "WARNING: Master core has no "
+ "memory on local socket!\n");
+}
+
+static int
+sync_func(__attribute__((unused)) void *arg)
+{
+ return 0;
+}
+
+inline static void
+rte_eal_mcfg_complete(void)
+{
+ /* ALL shared mem_config related INIT DONE */
+ if (rte_config.process_type == RTE_PROC_PRIMARY)
+ rte_config.mem_config->magic = RTE_MAGIC;
+}
+
+/* return non-zero if hugepages are enabled. */
+int rte_eal_has_hugepages(void)
+{
+ return !internal_config.no_hugetlbfs;
+}
+
+/* Abstraction for port I/0 privilege */
+int
+rte_eal_iopl_init(void)
+{
+ int fd = -1;
+ fd = open("/dev/io", O_RDWR);
+ if (fd < 0)
+ return -1;
+ close(fd);
+ return 0;
+}
+
+/* Launch threads, called at application init(). */
+int
+rte_eal_init(int argc, char **argv)
+{
+ int i, fctret, ret;
+ pthread_t thread_id;
+ static rte_atomic32_t run_once = RTE_ATOMIC32_INIT(0);
+
+ if (!rte_atomic32_test_and_set(&run_once))
+ return -1;
+
+ thread_id = pthread_self();
+
+ if (rte_eal_log_early_init() < 0)
+ rte_panic("Cannot init early logs\n");
+
+ if (rte_eal_cpu_init() < 0)
+ rte_panic("Cannot detect lcores\n");
+
+ fctret = eal_parse_args(argc, argv);
+ if (fctret < 0)
+ exit(1);
+
+ /* set log level as early as possible */
+ rte_set_log_level(internal_config.log_level);
+
+ if (internal_config.no_hugetlbfs == 0 &&
+ internal_config.process_type != RTE_PROC_SECONDARY &&
+ eal_hugepage_info_init() < 0)
+ rte_panic("Cannot get hugepage information\n");
+
+ if (internal_config.memory == 0 && internal_config.force_sockets == 0) {
+ if (internal_config.no_hugetlbfs)
+ internal_config.memory = MEMSIZE_IF_NO_HUGE_PAGE;
+ else
+ internal_config.memory = eal_get_hugepage_mem_size();
+ }
+
+ if (internal_config.vmware_tsc_map == 1) {
+#ifdef RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT
+ rte_cycles_vmware_tsc_map = 1;
+ RTE_LOG (DEBUG, EAL, "Using VMWARE TSC MAP, "
+ "you must have monitor_control.pseudo_perfctr = TRUE\n");
+#else
+ RTE_LOG (WARNING, EAL, "Ignoring --vmware-tsc-map because "
+ "RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT is not set\n");
+#endif
+ }
+
+ rte_srand(rte_rdtsc());
+
+ rte_config_init();
+
+ if (rte_eal_memory_init() < 0)
+ rte_panic("Cannot init memory\n");
+
+ if (rte_eal_memzone_init() < 0)
+ rte_panic("Cannot init memzone\n");
+
+ if (rte_eal_tailqs_init() < 0)
+ rte_panic("Cannot init tail queues for objects\n");
+
+/* if (rte_eal_log_init(argv[0], internal_config.syslog_facility) < 0)
+ rte_panic("Cannot init logs\n");*/
+
+ if (rte_eal_alarm_init() < 0)
+ rte_panic("Cannot init interrupt-handling thread\n");
+
+ if (rte_eal_intr_init() < 0)
+ rte_panic("Cannot init interrupt-handling thread\n");
+
+ if (rte_eal_timer_init() < 0)
+ rte_panic("Cannot init HPET or TSC timers\n");
+
+ if (rte_eal_pci_init() < 0)
+ rte_panic("Cannot init PCI\n");
+
+ RTE_LOG(DEBUG, EAL, "Master core %u is ready (tid=%p)\n",
+ rte_config.master_lcore, thread_id);
+
+ eal_check_mem_on_local_socket();
+
+ rte_eal_mcfg_complete();
+
+ if (rte_eal_dev_init() < 0)
+ rte_panic("Cannot init pmd devices\n");
+
+ RTE_LCORE_FOREACH_SLAVE(i) {
+
+ /*
+ * create communication pipes between master thread
+ * and children
+ */
+ if (pipe(lcore_config[i].pipe_master2slave) < 0)
+ rte_panic("Cannot create pipe\n");
+ if (pipe(lcore_config[i].pipe_slave2master) < 0)
+ rte_panic("Cannot create pipe\n");
+
+ lcore_config[i].state = WAIT;
+
+ /* create a thread for each lcore */
+ ret = pthread_create(&lcore_config[i].thread_id, NULL,
+ eal_thread_loop, NULL);
+ if (ret != 0)
+ rte_panic("Cannot create thread\n");
+ }
+
+ eal_thread_init_master(rte_config.master_lcore);
+
+ /*
+ * Launch a dummy function on all slave lcores, so that master lcore
+ * knows they are all ready when this function returns.
+ */
+ rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MASTER);
+ rte_eal_mp_wait_lcore();
+
+ /* Probe & Initialize PCI devices */
+ if (rte_eal_pci_probe())
+ rte_panic("Cannot probe PCI\n");
+
+ return fctret;
+}
+
+/* get core role */
+enum rte_lcore_role_t
+rte_eal_lcore_role(unsigned lcore_id)
+{
+ return (rte_config.lcore_role[lcore_id]);
+}
+
+enum rte_proc_type_t
+rte_eal_process_type(void)
+{
+ return (rte_config.process_type);
+}
+
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_alarm.c b/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_alarm.c
new file mode 100755
index 00000000..204df85d
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_alarm.c
@@ -0,0 +1,60 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <stdlib.h>
+#include <errno.h>
+
+#include <rte_alarm.h>
+#include <rte_common.h>
+#include "eal_private.h"
+
+int
+rte_eal_alarm_init(void)
+{
+ return 0;
+}
+
+
+int
+rte_eal_alarm_set(uint64_t us __rte_unused,
+ rte_eal_alarm_callback cb_fn __rte_unused,
+ void *cb_arg __rte_unused)
+{
+ return -ENOTSUP;
+}
+
+int
+rte_eal_alarm_cancel(rte_eal_alarm_callback cb_fn __rte_unused,
+ void *cb_arg __rte_unused)
+{
+ return -ENOTSUP;
+}
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_debug.c b/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_debug.c
new file mode 100755
index 00000000..44fc4f33
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_debug.c
@@ -0,0 +1,113 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <execinfo.h>
+#include <stdarg.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_common.h>
+
+#define BACKTRACE_SIZE 256
+
+/* dump the stack of the calling core */
+void rte_dump_stack(void)
+{
+ void *func[BACKTRACE_SIZE];
+ char **symb = NULL;
+ int size;
+
+ size = backtrace(func, BACKTRACE_SIZE);
+ symb = backtrace_symbols(func, size);
+ while (size > 0) {
+ rte_log(RTE_LOG_ERR, RTE_LOGTYPE_EAL,
+ "%d: [%s]\n", size, symb[size - 1]);
+ size --;
+ }
+}
+
+/* not implemented in this environment */
+void rte_dump_registers(void)
+{
+ return;
+}
+
+/* call abort(), it will generate a coredump if enabled */
+void __rte_panic(const char *funcname, const char *format, ...)
+{
+ va_list ap;
+
+ /* disable history */
+ rte_log_set_history(0);
+
+ rte_log(RTE_LOG_CRIT, RTE_LOGTYPE_EAL, "PANIC in %s():\n", funcname);
+ va_start(ap, format);
+ rte_vlog(RTE_LOG_CRIT, RTE_LOGTYPE_EAL, format, ap);
+ va_end(ap);
+ rte_dump_stack();
+ rte_dump_registers();
+ abort();
+}
+
+/*
+ * Like rte_panic this terminates the application. However, no traceback is
+ * provided and no core-dump is generated.
+ */
+void
+rte_exit(int exit_code, const char *format, ...)
+{
+ va_list ap;
+
+ /* disable history */
+ rte_log_set_history(0);
+
+ if (exit_code != 0)
+ RTE_LOG(CRIT, EAL, "Error - exiting with code: %d\n"
+ " Cause: ", exit_code);
+
+ va_start(ap, format);
+ rte_vlog(RTE_LOG_CRIT, RTE_LOGTYPE_EAL, format, ap);
+ va_end(ap);
+
+#ifndef RTE_EAL_ALWAYS_PANIC_ON_ERROR
+ exit(exit_code);
+#else
+ rte_dump_stack();
+ rte_dump_registers();
+ abort();
+#endif
+}
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_hugepage_info.c b/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_hugepage_info.c
new file mode 100755
index 00000000..24248fbc
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_hugepage_info.c
@@ -0,0 +1,133 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#include <sys/mman.h>
+#include <string.h>
+
+#include <rte_log.h>
+#include <fcntl.h>
+#include "eal_hugepages.h"
+#include "eal_internal_cfg.h"
+#include "eal_filesystem.h"
+
+#define CONTIGMEM_DEV "/dev/contigmem"
+
+/*
+ * Uses mmap to create a shared memory area for storage of data
+ * Used in this file to store the hugepage file map on disk
+ */
+static void *
+create_shared_memory(const char *filename, const size_t mem_size)
+{
+ void *retval;
+ int fd = open(filename, O_CREAT | O_RDWR, 0666);
+ if (fd < 0)
+ return NULL;
+ if (ftruncate(fd, mem_size) < 0) {
+ close(fd);
+ return NULL;
+ }
+ retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ close(fd);
+ return retval;
+}
+
+/*
+ * No hugepage support on freebsd, but we dummy it, using contigmem driver
+ */
+int
+eal_hugepage_info_init(void)
+{
+ size_t sysctl_size;
+ int buffer_size, num_buffers, fd, error;
+ /* re-use the linux "internal config" structure for our memory data */
+ struct hugepage_info *hpi = &internal_config.hugepage_info[0];
+ struct hugepage_info *tmp_hpi;
+
+ sysctl_size = sizeof(num_buffers);
+ error = sysctlbyname("hw.contigmem.num_buffers", &num_buffers,
+ &sysctl_size, NULL, 0);
+
+ if (error != 0) {
+ RTE_LOG(ERR, EAL, "could not read sysctl hw.contigmem.num_buffers");
+ return -1;
+ }
+
+ sysctl_size = sizeof(buffer_size);
+ error = sysctlbyname("hw.contigmem.buffer_size", &buffer_size,
+ &sysctl_size, NULL, 0);
+
+ if (error != 0) {
+ RTE_LOG(ERR, EAL, "could not read sysctl hw.contigmem.buffer_size");
+ return -1;
+ }
+
+ fd = open(CONTIGMEM_DEV, O_RDWR);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "could not open "CONTIGMEM_DEV"\n");
+ return -1;
+ }
+
+ if (buffer_size >= 1<<30)
+ RTE_LOG(INFO, EAL, "Contigmem driver has %d buffers, each of size %dGB\n",
+ num_buffers, buffer_size>>30);
+ else if (buffer_size >= 1<<20)
+ RTE_LOG(INFO, EAL, "Contigmem driver has %d buffers, each of size %dMB\n",
+ num_buffers, buffer_size>>20);
+ else
+ RTE_LOG(INFO, EAL, "Contigmem driver has %d buffers, each of size %dKB\n",
+ num_buffers, buffer_size>>10);
+
+ internal_config.num_hugepage_sizes = 1;
+ hpi->hugedir = CONTIGMEM_DEV;
+ hpi->hugepage_sz = buffer_size;
+ hpi->num_pages[0] = num_buffers;
+ hpi->lock_descriptor = fd;
+
+ tmp_hpi = create_shared_memory(eal_hugepage_info_path(),
+ sizeof(struct hugepage_info));
+ if (tmp_hpi == NULL ) {
+ RTE_LOG(ERR, EAL, "Failed to create shared memory!\n");
+ return -1;
+ }
+
+ memcpy(tmp_hpi, hpi, sizeof(struct hugepage_info));
+
+ if ( munmap(tmp_hpi, sizeof(struct hugepage_info)) < 0) {
+ RTE_LOG(ERR, EAL, "Failed to unmap shared memory!\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_interrupts.c b/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_interrupts.c
new file mode 100755
index 00000000..cb7d4f13
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_interrupts.c
@@ -0,0 +1,71 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_interrupts.h>
+#include "eal_private.h"
+
+int
+rte_intr_callback_register(struct rte_intr_handle *intr_handle __rte_unused,
+ rte_intr_callback_fn cb __rte_unused,
+ void *cb_arg __rte_unused)
+{
+ return -ENOTSUP;
+}
+
+int
+rte_intr_callback_unregister(struct rte_intr_handle *intr_handle __rte_unused,
+ rte_intr_callback_fn cb_fn __rte_unused,
+ void *cb_arg __rte_unused)
+{
+ return -ENOTSUP;
+}
+
+int
+rte_intr_enable(struct rte_intr_handle *intr_handle __rte_unused)
+{
+ return -ENOTSUP;
+}
+
+int
+rte_intr_disable(struct rte_intr_handle *intr_handle __rte_unused)
+{
+ return -ENOTSUP;
+}
+
+int
+rte_eal_intr_init(void)
+{
+ return 0;
+}
+
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_lcore.c b/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_lcore.c
new file mode 100755
index 00000000..662f0245
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_lcore.c
@@ -0,0 +1,107 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+#include <sys/sysctl.h>
+
+#include <rte_log.h>
+#include <rte_eal.h>
+#include <rte_lcore.h>
+#include <rte_common.h>
+#include <rte_debug.h>
+
+#include "eal_private.h"
+
+/* No topology information available on FreeBSD including NUMA info */
+#define cpu_core_id(X) 0
+#define cpu_socket_id(X) 0
+
+static int
+get_ncpus(void)
+{
+ int mib[2] = {CTL_HW, HW_NCPU};
+ int ncpu;
+ size_t len = sizeof(ncpu);
+
+ sysctl(mib, 2, &ncpu, &len, NULL, 0);
+ RTE_LOG(INFO, EAL, "Sysctl reports %d cpus\n", ncpu);
+ return ncpu;
+}
+
+/*
+ * fill the cpu_info structure with as much info as we can get.
+ * code is similar to linux version, but sadly available info is less.
+ */
+int
+rte_eal_cpu_init(void)
+{
+ /* pointer to global configuration */
+ struct rte_config *config = rte_eal_get_configuration();
+ unsigned lcore_id;
+ unsigned count = 0;
+
+ const unsigned ncpus = get_ncpus();
+ /*
+ * Parse the maximum set of logical cores, detect the subset of running
+ * ones and enable them by default.
+ */
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ lcore_config[lcore_id].detected = (lcore_id < ncpus);
+ if (lcore_config[lcore_id].detected == 0) {
+ config->lcore_role[lcore_id] = ROLE_OFF;
+ continue;
+ }
+ /* By default, each detected core is enabled */
+ config->lcore_role[lcore_id] = ROLE_RTE;
+ lcore_config[lcore_id].core_id = cpu_core_id(lcore_id);
+ lcore_config[lcore_id].socket_id = cpu_socket_id(lcore_id);
+ if (lcore_config[lcore_id].socket_id >= RTE_MAX_NUMA_NODES)
+#ifdef RTE_EAL_ALLOW_INV_SOCKET_ID
+ lcore_config[lcore_id].socket_id = 0;
+#else
+ rte_panic("Socket ID (%u) is greater than "
+ "RTE_MAX_NUMA_NODES (%d)\n",
+ lcore_config[lcore_id].socket_id, RTE_MAX_NUMA_NODES);
+#endif
+ RTE_LOG(DEBUG, EAL, "Detected lcore %u\n",
+ lcore_id);
+ count++;
+ }
+ /* Set the count of enabled logical cores of the EAL configuration */
+ config->lcore_count = count;
+ RTE_LOG(DEBUG, EAL, "Support maximum %u logical core(s) by configuration.\n",
+ RTE_MAX_LCORE);
+ RTE_LOG(DEBUG, EAL, "Detected %u lcore(s)\n", config->lcore_count);
+
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_log.c b/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_log.c
new file mode 100755
index 00000000..a425f7a8
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_log.c
@@ -0,0 +1,57 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <rte_common.h>
+#include <rte_log.h>
+
+#include <eal_private.h>
+
+/*
+ * set the log to default function, called during eal init process,
+ * once memzones are available.
+ */
+int
+rte_eal_log_init(const char *id __rte_unused, int facility __rte_unused)
+{
+ if (rte_eal_common_log_init(stderr) < 0)
+ return -1;
+ return 0;
+}
+
+int
+rte_eal_log_early_init(void)
+{
+ rte_openlog_stream(stderr);
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_memory.c b/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_memory.c
new file mode 100755
index 00000000..65ee87d8
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_memory.c
@@ -0,0 +1,224 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <sys/mman.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#include <inttypes.h>
+#include <fcntl.h>
+
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_log.h>
+#include <rte_string_fns.h>
+#include "eal_private.h"
+#include "eal_internal_cfg.h"
+#include "eal_filesystem.h"
+
+#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
+
+/*
+ * Get physical address of any mapped virtual address in the current process.
+ */
+phys_addr_t
+rte_mem_virt2phy(const void *virtaddr)
+{
+ /* XXX not implemented. This function is only used by
+ * rte_mempool_virt2phy() when hugepages are disabled. */
+ (void)virtaddr;
+ return RTE_BAD_PHYS_ADDR;
+}
+
+static int
+rte_eal_contigmem_init(void)
+{
+ struct rte_mem_config *mcfg;
+ uint64_t total_mem = 0;
+ void *addr;
+ unsigned i, j, seg_idx = 0;
+
+ /* get pointer to global configuration */
+ mcfg = rte_eal_get_configuration()->mem_config;
+
+ /* for debug purposes, hugetlbfs can be disabled */
+ if (internal_config.no_hugetlbfs) {
+ addr = malloc(internal_config.memory);
+ mcfg->memseg[0].phys_addr = (phys_addr_t)(uintptr_t)addr;
+ mcfg->memseg[0].addr = addr;
+ mcfg->memseg[0].len = internal_config.memory;
+ mcfg->memseg[0].socket_id = 0;
+ return 0;
+ }
+
+ /* map all hugepages and sort them */
+ for (i = 0; i < internal_config.num_hugepage_sizes; i ++){
+ struct hugepage_info *hpi;
+
+ hpi = &internal_config.hugepage_info[i];
+ for (j = 0; j < hpi->num_pages[0]; j++) {
+ struct rte_memseg *seg;
+ uint64_t physaddr;
+ int error;
+ size_t sysctl_size = sizeof(physaddr);
+ char physaddr_str[64];
+
+ addr = mmap(NULL, hpi->hugepage_sz, PROT_READ|PROT_WRITE,
+ MAP_SHARED, hpi->lock_descriptor, j * PAGE_SIZE);
+ if (addr == MAP_FAILED) {
+ RTE_LOG(ERR, EAL, "Failed to mmap buffer %u from %s\n",
+ j, hpi->hugedir);
+ return -1;
+ }
+
+ snprintf(physaddr_str, sizeof(physaddr_str), "hw.contigmem"
+ ".physaddr.%d", j);
+ error = sysctlbyname(physaddr_str, &physaddr, &sysctl_size,
+ NULL, 0);
+ if (error < 0) {
+ RTE_LOG(ERR, EAL, "Failed to get physical addr for buffer %u "
+ "from %s\n", j, hpi->hugedir);
+ return -1;
+ }
+
+ seg = &mcfg->memseg[seg_idx++];
+ seg->addr = addr;
+ seg->phys_addr = physaddr;
+ seg->hugepage_sz = hpi->hugepage_sz;
+ seg->len = hpi->hugepage_sz;
+ seg->nchannel = mcfg->nchannel;
+ seg->nrank = mcfg->nrank;
+ seg->socket_id = 0;
+
+ RTE_LOG(INFO, EAL, "Mapped memory segment %u @ %p: physaddr:0x%"
+ PRIx64", len %zu\n",
+ seg_idx, addr, physaddr, hpi->hugepage_sz);
+ if (total_mem >= internal_config.memory ||
+ seg_idx >= RTE_MAX_MEMSEG)
+ break;
+ }
+ }
+ return 0;
+}
+
+static int
+rte_eal_contigmem_attach(void)
+{
+ const struct hugepage_info *hpi;
+ int fd_hugepage_info, fd_hugepage = -1;
+ unsigned i = 0;
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+
+ /* Obtain a file descriptor for hugepage_info */
+ fd_hugepage_info = open(eal_hugepage_info_path(), O_RDONLY);
+ if (fd_hugepage_info < 0) {
+ RTE_LOG(ERR, EAL, "Could not open %s\n", eal_hugepage_info_path());
+ return -1;
+ }
+
+ /* Map the shared hugepage_info into the process address spaces */
+ hpi = mmap(NULL, sizeof(struct hugepage_info), PROT_READ, MAP_PRIVATE,
+ fd_hugepage_info, 0);
+ if (hpi == NULL) {
+ RTE_LOG(ERR, EAL, "Could not mmap %s\n", eal_hugepage_info_path());
+ goto error;
+ }
+
+ /* Obtain a file descriptor for contiguous memory */
+ fd_hugepage = open(hpi->hugedir, O_RDWR);
+ if (fd_hugepage < 0) {
+ RTE_LOG(ERR, EAL, "Could not open %s\n", hpi->hugedir);
+ goto error;
+ }
+
+ /* Map the contiguous memory into each memory segment */
+ for (i = 0; i < hpi->num_pages[0]; i++) {
+
+ void *addr;
+ struct rte_memseg *seg = &mcfg->memseg[i];
+
+ addr = mmap(seg->addr, hpi->hugepage_sz, PROT_READ|PROT_WRITE,
+ MAP_SHARED|MAP_FIXED, fd_hugepage, i * PAGE_SIZE);
+ if (addr == MAP_FAILED || addr != seg->addr) {
+ RTE_LOG(ERR, EAL, "Failed to mmap buffer %u from %s\n",
+ i, hpi->hugedir);
+ goto error;
+ }
+
+ }
+
+ /* hugepage_info is no longer required */
+ munmap((void *)(uintptr_t)hpi, sizeof(struct hugepage_info));
+ close(fd_hugepage_info);
+ close(fd_hugepage);
+ return 0;
+
+error:
+ if (fd_hugepage_info >= 0)
+ close(fd_hugepage_info);
+ if (fd_hugepage >= 0)
+ close(fd_hugepage);
+ return -1;
+}
+
+
+static int
+rte_eal_memdevice_init(void)
+{
+ struct rte_config *config;
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+ return 0;
+
+ config = rte_eal_get_configuration();
+ config->mem_config->nchannel = internal_config.force_nchannel;
+ config->mem_config->nrank = internal_config.force_nrank;
+
+ return 0;
+}
+
+/* init memory subsystem */
+int
+rte_eal_memory_init(void)
+{
+ RTE_LOG(INFO, EAL, "Setting up physically contiguous memory...\n");
+ const int retval = rte_eal_process_type() == RTE_PROC_PRIMARY ?
+ rte_eal_contigmem_init() :
+ rte_eal_contigmem_attach();
+ if (retval < 0)
+ return -1;
+
+ if (internal_config.no_shconf == 0 && rte_eal_memdevice_init() < 0)
+ return -1;
+
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_pci.c b/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_pci.c
new file mode 100755
index 00000000..74ecce75
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_pci.c
@@ -0,0 +1,510 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <dirent.h>
+#include <limits.h>
+#include <sys/queue.h>
+#include <sys/mman.h>
+#include <sys/ioctl.h>
+#include <sys/pciio.h>
+#include <dev/pci/pcireg.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_pci.h>
+#include <rte_common.h>
+#include <rte_launch.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+#include <rte_debug.h>
+#include <rte_devargs.h>
+
+#include "rte_pci_dev_ids.h"
+#include "eal_filesystem.h"
+#include "eal_private.h"
+
+/**
+ * @file
+ * PCI probing under linux
+ *
+ * This code is used to simulate a PCI probe by parsing information in
+ * sysfs. Moreover, when a registered driver matches a device, the
+ * kernel driver currently using it is unloaded and replaced by
+ * igb_uio module, which is a very minimal userland driver for Intel
+ * network card, only providing access to PCI BAR to applications, and
+ * enabling bus master.
+ */
+
+struct uio_map {
+ void *addr;
+ uint64_t offset;
+ uint64_t size;
+ uint64_t phaddr;
+};
+
+/*
+ * For multi-process we need to reproduce all PCI mappings in secondary
+ * processes, so save them in a tailq.
+ */
+struct uio_resource {
+ TAILQ_ENTRY(uio_resource) next;
+
+ struct rte_pci_addr pci_addr;
+ char path[PATH_MAX];
+ size_t nb_maps;
+ struct uio_map maps[PCI_MAX_RESOURCE];
+};
+
+TAILQ_HEAD(uio_res_list, uio_resource);
+
+static struct uio_res_list *uio_res_list = NULL;
+
+/* unbind kernel driver for this device */
+static int
+pci_unbind_kernel_driver(struct rte_pci_device *dev __rte_unused)
+{
+ RTE_LOG(ERR, EAL, "RTE_PCI_DRV_FORCE_UNBIND flag is not implemented "
+ "for BSD\n");
+ return -ENOTSUP;
+}
+
+/* map a particular resource from a file */
+static void *
+pci_map_resource(void *requested_addr, const char *devname, off_t offset,
+ size_t size)
+{
+ int fd;
+ void *mapaddr;
+
+ /*
+ * open devname, to mmap it
+ */
+ fd = open(devname, O_RDWR);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
+ devname, strerror(errno));
+ goto fail;
+ }
+
+ /* Map the PCI memory resource of device */
+ mapaddr = mmap(requested_addr, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, offset);
+ close(fd);
+ if (mapaddr == MAP_FAILED ||
+ (requested_addr != NULL && mapaddr != requested_addr)) {
+ RTE_LOG(ERR, EAL, "%s(): cannot mmap(%s(%d), %p, 0x%lx, 0x%lx):"
+ " %s (%p)\n", __func__, devname, fd, requested_addr,
+ (unsigned long)size, (unsigned long)offset,
+ strerror(errno), mapaddr);
+ goto fail;
+ }
+
+ RTE_LOG(DEBUG, EAL, " PCI memory mapped at %p\n", mapaddr);
+
+ return mapaddr;
+
+fail:
+ return NULL;
+}
+
+static int
+pci_uio_map_secondary(struct rte_pci_device *dev)
+{
+ size_t i;
+ struct uio_resource *uio_res;
+
+ TAILQ_FOREACH(uio_res, uio_res_list, next) {
+
+ /* skip this element if it doesn't match our PCI address */
+ if (memcmp(&uio_res->pci_addr, &dev->addr, sizeof(dev->addr)))
+ continue;
+
+ for (i = 0; i != uio_res->nb_maps; i++) {
+ if (pci_map_resource(uio_res->maps[i].addr,
+ uio_res->path,
+ (off_t)uio_res->maps[i].offset,
+ (size_t)uio_res->maps[i].size)
+ != uio_res->maps[i].addr) {
+ RTE_LOG(ERR, EAL,
+ "Cannot mmap device resource\n");
+ return (-1);
+ }
+ }
+ return (0);
+ }
+
+ RTE_LOG(ERR, EAL, "Cannot find resource for device\n");
+ return 1;
+}
+
+/* map the PCI resource of a PCI device in virtual memory */
+static int
+pci_uio_map_resource(struct rte_pci_device *dev)
+{
+ int i, j;
+ char devname[PATH_MAX]; /* contains the /dev/uioX */
+ void *mapaddr;
+ uint64_t phaddr;
+ uint64_t offset;
+ uint64_t pagesz;
+ struct rte_pci_addr *loc = &dev->addr;
+ struct uio_resource *uio_res;
+ struct uio_map *maps;
+
+ dev->intr_handle.fd = -1;
+ dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+
+ /* secondary processes - use already recorded details */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return (pci_uio_map_secondary(dev));
+
+ snprintf(devname, sizeof(devname), "/dev/uio@pci:%u:%u:%u",
+ dev->addr.bus, dev->addr.devid, dev->addr.function);
+
+ if (access(devname, O_RDWR) < 0) {
+ RTE_LOG(WARNING, EAL, " "PCI_PRI_FMT" not managed by UIO driver, "
+ "skipping\n", loc->domain, loc->bus, loc->devid, loc->function);
+ return 1;
+ }
+
+ /* save fd if in primary process */
+ dev->intr_handle.fd = open(devname, O_RDWR);
+ if (dev->intr_handle.fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
+ devname, strerror(errno));
+ return -1;
+ }
+ dev->intr_handle.type = RTE_INTR_HANDLE_UIO;
+
+ /* allocate the mapping details for secondary processes*/
+ if ((uio_res = rte_zmalloc("UIO_RES", sizeof (*uio_res), 0)) == NULL) {
+ RTE_LOG(ERR, EAL,
+ "%s(): cannot store uio mmap details\n", __func__);
+ return (-1);
+ }
+
+ snprintf(uio_res->path, sizeof(uio_res->path), "%s", devname);
+ memcpy(&uio_res->pci_addr, &dev->addr, sizeof(uio_res->pci_addr));
+
+
+ /* Map all BARs */
+ pagesz = sysconf(_SC_PAGESIZE);
+
+ maps = uio_res->maps;
+ for (i = uio_res->nb_maps = 0; i != PCI_MAX_RESOURCE; i++) {
+
+ j = uio_res->nb_maps;
+ /* skip empty BAR */
+ if ((phaddr = dev->mem_resource[i].phys_addr) == 0)
+ continue;
+
+ /* if matching map is found, then use it */
+ offset = i * pagesz;
+ maps[j].offset = offset;
+ maps[j].phaddr = dev->mem_resource[i].phys_addr;
+ maps[j].size = dev->mem_resource[i].len;
+ if (maps[j].addr != NULL ||
+ (mapaddr = pci_map_resource(NULL, devname, (off_t)offset,
+ (size_t)maps[j].size)
+ ) == NULL) {
+ rte_free(uio_res);
+ return (-1);
+ }
+
+ maps[j].addr = mapaddr;
+ uio_res->nb_maps++;
+ dev->mem_resource[i].addr = mapaddr;
+ }
+
+ TAILQ_INSERT_TAIL(uio_res_list, uio_res, next);
+
+ return (0);
+}
+
+/* Compare two PCI device addresses. */
+static int
+pci_addr_comparison(struct rte_pci_addr *addr, struct rte_pci_addr *addr2)
+{
+ uint64_t dev_addr = (addr->domain << 24) + (addr->bus << 16) + (addr->devid << 8) + addr->function;
+ uint64_t dev_addr2 = (addr2->domain << 24) + (addr2->bus << 16) + (addr2->devid << 8) + addr2->function;
+
+ if (dev_addr > dev_addr2)
+ return 1;
+ else
+ return 0;
+}
+
+
+/* Scan one pci sysfs entry, and fill the devices list from it. */
+static int
+pci_scan_one(int dev_pci_fd, struct pci_conf *conf)
+{
+ struct rte_pci_device *dev;
+ struct pci_bar_io bar;
+ unsigned i, max;
+
+ dev = malloc(sizeof(*dev));
+ if (dev == NULL) {
+ return -1;
+ }
+
+ memset(dev, 0, sizeof(*dev));
+ dev->addr.domain = conf->pc_sel.pc_domain;
+ dev->addr.bus = conf->pc_sel.pc_bus;
+ dev->addr.devid = conf->pc_sel.pc_dev;
+ dev->addr.function = conf->pc_sel.pc_func;
+
+ /* get vendor id */
+ dev->id.vendor_id = conf->pc_vendor;
+
+ /* get device id */
+ dev->id.device_id = conf->pc_device;
+
+ /* get subsystem_vendor id */
+ dev->id.subsystem_vendor_id = conf->pc_subvendor;
+
+ /* get subsystem_device id */
+ dev->id.subsystem_device_id = conf->pc_subdevice;
+
+ /* TODO: get max_vfs */
+ dev->max_vfs = 0;
+
+ /* FreeBSD has no NUMA support (yet) */
+ dev->numa_node = 0;
+
+/* parse resources */
+ switch (conf->pc_hdr & PCIM_HDRTYPE) {
+ case PCIM_HDRTYPE_NORMAL:
+ max = PCIR_MAX_BAR_0;
+ break;
+ case PCIM_HDRTYPE_BRIDGE:
+ max = PCIR_MAX_BAR_1;
+ break;
+ case PCIM_HDRTYPE_CARDBUS:
+ max = PCIR_MAX_BAR_2;
+ break;
+ default:
+ goto skipdev;
+ }
+
+ for (i = 0; i <= max; i++) {
+ bar.pbi_sel = conf->pc_sel;
+ bar.pbi_reg = PCIR_BAR(i);
+ if (ioctl(dev_pci_fd, PCIOCGETBAR, &bar) < 0)
+ continue;
+
+ dev->mem_resource[i].len = bar.pbi_length;
+ if (PCI_BAR_IO(bar.pbi_base)) {
+ dev->mem_resource[i].addr = (void *)(bar.pbi_base & ~((uint64_t)0xf));
+ continue;
+ }
+ dev->mem_resource[i].phys_addr = bar.pbi_base & ~((uint64_t)0xf);
+ }
+
+ /* device is valid, add in list (sorted) */
+ if (TAILQ_EMPTY(&pci_device_list)) {
+ TAILQ_INSERT_TAIL(&pci_device_list, dev, next);
+ }
+ else {
+ struct rte_pci_device *dev2 = NULL;
+
+ TAILQ_FOREACH(dev2, &pci_device_list, next) {
+ if (pci_addr_comparison(&dev->addr, &dev2->addr))
+ continue;
+ else {
+ TAILQ_INSERT_BEFORE(dev2, dev, next);
+ return 0;
+ }
+ }
+ TAILQ_INSERT_TAIL(&pci_device_list, dev, next);
+ }
+
+ return 0;
+
+skipdev:
+ free(dev);
+ return 0;
+}
+
+/*
+ * Scan the content of the PCI bus, and add the devices in the devices
+ * list. Call pci_scan_one() for each pci entry found.
+ */
+static int
+pci_scan(void)
+{
+ int fd = -1;
+ unsigned dev_count = 0;
+ struct pci_conf matches[16];
+ struct pci_conf_io conf_io = {
+ .pat_buf_len = 0,
+ .num_patterns = 0,
+ .patterns = NULL,
+ .match_buf_len = sizeof(matches),
+ .matches = &matches[0],
+ };
+
+ fd = open("/dev/pci", O_RDONLY);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
+ goto error;
+ }
+
+ do {
+ unsigned i;
+ if (ioctl(fd, PCIOCGETCONF, &conf_io) < 0) {
+ RTE_LOG(ERR, EAL, "%s(): error with ioctl on /dev/pci: %s\n",
+ __func__, strerror(errno));
+ goto error;
+ }
+
+ for (i = 0; i < conf_io.num_matches; i++)
+ if (pci_scan_one(fd, &matches[i]) < 0)
+ goto error;
+
+ dev_count += conf_io.num_matches;
+ } while(conf_io.status == PCI_GETCONF_MORE_DEVS);
+
+ close(fd);
+
+ RTE_LOG(ERR, EAL, "PCI scan found %u devices\n", dev_count);
+ return 0;
+
+error:
+ if (fd >= 0)
+ close(fd);
+ return -1;
+}
+
+/*
+ * If vendor/device ID match, call the devinit() function of the
+ * driver.
+ */
+int
+rte_eal_pci_probe_one_driver(struct rte_pci_driver *dr, struct rte_pci_device *dev)
+{
+ struct rte_pci_id *id_table;
+ int ret;
+
+ for (id_table = dr->id_table ; id_table->vendor_id != 0; id_table++) {
+
+ /* check if device's identifiers match the driver's ones */
+ if (id_table->vendor_id != dev->id.vendor_id &&
+ id_table->vendor_id != PCI_ANY_ID)
+ continue;
+ if (id_table->device_id != dev->id.device_id &&
+ id_table->device_id != PCI_ANY_ID)
+ continue;
+ if (id_table->subsystem_vendor_id != dev->id.subsystem_vendor_id &&
+ id_table->subsystem_vendor_id != PCI_ANY_ID)
+ continue;
+ if (id_table->subsystem_device_id != dev->id.subsystem_device_id &&
+ id_table->subsystem_device_id != PCI_ANY_ID)
+ continue;
+
+ struct rte_pci_addr *loc = &dev->addr;
+
+ RTE_LOG(DEBUG, EAL, "PCI device "PCI_PRI_FMT" on NUMA socket %i\n",
+ loc->domain, loc->bus, loc->devid, loc->function,
+ dev->numa_node);
+
+ RTE_LOG(DEBUG, EAL, " probe driver: %x:%x %s\n", dev->id.vendor_id,
+ dev->id.device_id, dr->name);
+
+ /* no initialization when blacklisted, return without error */
+ if (dev->devargs != NULL &&
+ dev->devargs->type == RTE_DEVTYPE_BLACKLISTED_PCI) {
+
+ RTE_LOG(DEBUG, EAL, " Device is blacklisted, not initializing\n");
+ return 0;
+ }
+
+ if (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) {
+ /* map resources for devices that use igb_uio */
+ ret = pci_uio_map_resource(dev);
+ if (ret != 0)
+ return ret;
+ } else if (dr->drv_flags & RTE_PCI_DRV_FORCE_UNBIND &&
+ rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ /* unbind current driver */
+ if (pci_unbind_kernel_driver(dev) < 0)
+ return -1;
+ }
+
+ /* reference driver structure */
+ dev->driver = dr;
+
+ /* call the driver devinit() function */
+ return dr->devinit(dr, dev);
+ }
+ /* return positive value if driver is not found */
+ return 1;
+}
+
+/* Init the PCI EAL subsystem */
+int
+rte_eal_pci_init(void)
+{
+ TAILQ_INIT(&pci_driver_list);
+ TAILQ_INIT(&pci_device_list);
+ uio_res_list = RTE_TAILQ_RESERVE_BY_IDX(RTE_TAILQ_PCI, uio_res_list);
+
+ /* for debug purposes, PCI can be disabled */
+ if (internal_config.no_pci)
+ return 0;
+
+ if (pci_scan() < 0) {
+ RTE_LOG(ERR, EAL, "%s(): Cannot scan PCI bus\n", __func__);
+ return -1;
+ }
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_thread.c b/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_thread.c
new file mode 100755
index 00000000..ab05368d
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_thread.c
@@ -0,0 +1,233 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <sched.h>
+#include <pthread_np.h>
+#include <sys/queue.h>
+
+#include <rte_debug.h>
+#include <rte_atomic.h>
+#include <rte_launch.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_per_lcore.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+
+#include "eal_private.h"
+#include "eal_thread.h"
+
+RTE_DEFINE_PER_LCORE(unsigned, _lcore_id);
+
+/*
+ * Send a message to a slave lcore identified by slave_id to call a
+ * function f with argument arg. Once the execution is done, the
+ * remote lcore switch in FINISHED state.
+ */
+int
+rte_eal_remote_launch(int (*f)(void *), void *arg, unsigned slave_id)
+{
+ int n;
+ char c = 0;
+ int m2s = lcore_config[slave_id].pipe_master2slave[1];
+ int s2m = lcore_config[slave_id].pipe_slave2master[0];
+
+ if (lcore_config[slave_id].state != WAIT)
+ return -EBUSY;
+
+ lcore_config[slave_id].f = f;
+ lcore_config[slave_id].arg = arg;
+
+ /* send message */
+ n = 0;
+ while (n == 0 || (n < 0 && errno == EINTR))
+ n = write(m2s, &c, 1);
+ if (n < 0)
+ rte_panic("cannot write on configuration pipe\n");
+
+ /* wait ack */
+ do {
+ n = read(s2m, &c, 1);
+ } while (n < 0 && errno == EINTR);
+
+ if (n <= 0)
+ rte_panic("cannot read on configuration pipe\n");
+
+ return 0;
+}
+
+/* set affinity for current thread */
+static int
+eal_thread_set_affinity(void)
+{
+ int s;
+ pthread_t thread;
+
+/*
+ * According to the section VERSIONS of the CPU_ALLOC man page:
+ *
+ * The CPU_ZERO(), CPU_SET(), CPU_CLR(), and CPU_ISSET() macros were added
+ * in glibc 2.3.3.
+ *
+ * CPU_COUNT() first appeared in glibc 2.6.
+ *
+ * CPU_AND(), CPU_OR(), CPU_XOR(), CPU_EQUAL(), CPU_ALLOC(),
+ * CPU_ALLOC_SIZE(), CPU_FREE(), CPU_ZERO_S(), CPU_SET_S(), CPU_CLR_S(),
+ * CPU_ISSET_S(), CPU_AND_S(), CPU_OR_S(), CPU_XOR_S(), and CPU_EQUAL_S()
+ * first appeared in glibc 2.7.
+ */
+#if defined(CPU_ALLOC)
+ size_t size;
+ cpu_set_t *cpusetp;
+
+ cpusetp = CPU_ALLOC(RTE_MAX_LCORE);
+ if (cpusetp == NULL) {
+ RTE_LOG(ERR, EAL, "CPU_ALLOC failed\n");
+ return -1;
+ }
+
+ size = CPU_ALLOC_SIZE(RTE_MAX_LCORE);
+ CPU_ZERO_S(size, cpusetp);
+ CPU_SET_S(rte_lcore_id(), size, cpusetp);
+
+ thread = pthread_self();
+ s = pthread_setaffinity_np(thread, size, cpusetp);
+ if (s != 0) {
+ RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n");
+ CPU_FREE(cpusetp);
+ return -1;
+ }
+
+ CPU_FREE(cpusetp);
+#else /* CPU_ALLOC */
+ cpuset_t cpuset;
+ CPU_ZERO( &cpuset );
+ CPU_SET( rte_lcore_id(), &cpuset );
+
+ thread = pthread_self();
+ s = pthread_setaffinity_np(thread, sizeof( cpuset ), &cpuset);
+ if (s != 0) {
+ RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n");
+ return -1;
+ }
+#endif
+ return 0;
+}
+
+void eal_thread_init_master(unsigned lcore_id)
+{
+ /* set the lcore ID in per-lcore memory area */
+ RTE_PER_LCORE(_lcore_id) = lcore_id;
+
+ /* set CPU affinity */
+ if (eal_thread_set_affinity() < 0)
+ rte_panic("cannot set affinity\n");
+}
+
+/* main loop of threads */
+__attribute__((noreturn)) void *
+eal_thread_loop(__attribute__((unused)) void *arg)
+{
+ char c;
+ int n, ret;
+ unsigned lcore_id;
+ pthread_t thread_id;
+ int m2s, s2m;
+
+ thread_id = pthread_self();
+
+ /* retrieve our lcore_id from the configuration structure */
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ if (thread_id == lcore_config[lcore_id].thread_id)
+ break;
+ }
+ if (lcore_id == RTE_MAX_LCORE)
+ rte_panic("cannot retrieve lcore id\n");
+
+ RTE_LOG(DEBUG, EAL, "Core %u is ready (tid=%p)\n",
+ lcore_id, thread_id);
+
+ m2s = lcore_config[lcore_id].pipe_master2slave[0];
+ s2m = lcore_config[lcore_id].pipe_slave2master[1];
+
+ /* set the lcore ID in per-lcore memory area */
+ RTE_PER_LCORE(_lcore_id) = lcore_id;
+
+ /* set CPU affinity */
+ if (eal_thread_set_affinity() < 0)
+ rte_panic("cannot set affinity\n");
+
+ /* read on our pipe to get commands */
+ while (1) {
+ void *fct_arg;
+
+ /* wait command */
+ do {
+ n = read(m2s, &c, 1);
+ } while (n < 0 && errno == EINTR);
+
+ if (n <= 0)
+ rte_panic("cannot read on configuration pipe\n");
+
+ lcore_config[lcore_id].state = RUNNING;
+
+ /* send ack */
+ n = 0;
+ while (n == 0 || (n < 0 && errno == EINTR))
+ n = write(s2m, &c, 1);
+ if (n < 0)
+ rte_panic("cannot write on configuration pipe\n");
+
+ if (lcore_config[lcore_id].f == NULL)
+ rte_panic("NULL function pointer\n");
+
+ /* call the function and store the return value */
+ fct_arg = lcore_config[lcore_id].arg;
+ ret = lcore_config[lcore_id].f(fct_arg);
+ lcore_config[lcore_id].ret = ret;
+ rte_wmb();
+ lcore_config[lcore_id].state = FINISHED;
+ }
+
+ /* never reached */
+ /* pthread_exit(NULL); */
+ /* return NULL; */
+}
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_timer.c b/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_timer.c
new file mode 100755
index 00000000..3e698647
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/bsdapp/eal/eal_timer.c
@@ -0,0 +1,149 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if 0
+
+#include <string.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <inttypes.h>
+//#include <sys/types.h>
+//#include <sys/sysctl.h>
+#include <errno.h>
+
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_cycles.h>
+#include <rte_tailq.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_debug.h>
+
+#include "eal_private.h"
+#include "eal_internal_cfg.h"
+
+#ifdef RTE_LIBEAL_USE_HPET
+#error "should not be enabled"
+//#warning HPET is not supported in FreeBSD
+#endif
+
+enum timer_source eal_timer_source = EAL_TIMER_TSC;
+
+/* The frequency of the RDTSC timer resolution */
+static uint64_t eal_tsc_resolution_hz = 0;
+
+void
+rte_delay_us(unsigned us)
+{
+ const uint64_t start = rte_get_timer_cycles();
+ const uint64_t ticks = (uint64_t)us * rte_get_timer_hz() / 1E6;
+ while ((rte_get_timer_cycles() - start) < ticks)
+ rte_pause();
+}
+
+uint64_t
+rte_get_tsc_hz(void)
+{
+ return eal_tsc_resolution_hz;
+}
+
+#if 0
+static int
+set_tsc_freq_from_sysctl(void)
+{
+ size_t sz;
+ int tmp;
+
+ sz = sizeof(tmp);
+ tmp = 0;
+
+ if (sysctlbyname("kern.timecounter.smp_tsc", &tmp, &sz, NULL, 0))
+ RTE_LOG(WARNING, EAL, "%s\n", strerror(errno));
+ else if (tmp != 1)
+ RTE_LOG(WARNING, EAL, "TSC is not safe to use in SMP mode\n");
+
+ tmp = 0;
+
+ if (sysctlbyname("kern.timecounter.invariant_tsc", &tmp, &sz, NULL, 0))
+ RTE_LOG(WARNING, EAL, "%s\n", strerror(errno));
+ else if (tmp != 1)
+ RTE_LOG(WARNING, EAL, "TSC is not invariant\n");
+
+ sz = sizeof(eal_tsc_resolution_hz);
+ if (sysctlbyname("machdep.tsc_freq", &eal_tsc_resolution_hz, &sz, NULL, 0)) {
+ RTE_LOG(WARNING, EAL, "%s\n", strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+#endif
+
+static void
+set_tsc_freq_fallback(void)
+{
+ RTE_LOG(WARNING, EAL, "WARNING: clock_gettime cannot use "
+ "CLOCK_MONOTONIC_RAW and HPET is not available"
+ " - clock timings may be less accurate.\n");
+ /* assume that the sleep(1) will sleep for 1 second */
+ uint64_t start = rte_rdtsc();
+ sleep(1);
+ eal_tsc_resolution_hz = rte_rdtsc() - start;
+}
+
+/*
+ * This function measures the TSC frequency. It uses a variety of approaches.
+ *
+ * 1. Read the TSC frequency value provided by the kernel
+ * 2. If above does not work, just sleep for 1 second and tune off that,
+ * printing a warning about inaccuracy of timing
+ */
+static void
+set_tsc_freq(void)
+{
+ //if (set_tsc_freq_from_sysctl() < 0)
+ set_tsc_freq_fallback();
+
+ RTE_LOG(INFO, EAL, "TSC frequency is ~%"PRIu64" KHz\n",
+ eal_tsc_resolution_hz/1000);
+}
+
+int
+rte_eal_timer_init(void)
+{
+ set_tsc_freq();
+ return 0;
+}
+#endif
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/eal/include/exec-env/rte_dom0_common.h b/src/dpdk_lib18/librte_eal/bsdapp/eal/include/exec-env/rte_dom0_common.h
new file mode 100755
index 00000000..99a33432
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/bsdapp/eal/include/exec-env/rte_dom0_common.h
@@ -0,0 +1,107 @@
+/*-
+ * This file is provided under a dual BSD/LGPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GNU LESSER GENERAL PUBLIC LICENSE
+ *
+ * Copyright(c) 2007-2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Contact Information:
+ * Intel Corporation
+ *
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _RTE_DOM0_COMMON_H_
+#define _RTE_DOM0_COMMON_H_
+
+#ifdef __KERNEL__
+#include <linux/if.h>
+#endif
+
+#define DOM0_NAME_MAX 256
+#define DOM0_MM_DEV "/dev/dom0_mm"
+
+#define DOM0_CONTIG_NUM_ORDER 9 /**< 2M order */
+#define DOM0_NUM_MEMSEG 512 /**< Maximum nb. of memory segment. */
+#define DOM0_MEMBLOCK_SIZE 0x200000 /**< Maximum nb. of memory block(2M). */
+#define DOM0_CONFIG_MEMSIZE 4096 /**< Maximum config memory size(4G). */
+#define DOM0_NUM_MEMBLOCK (DOM0_CONFIG_MEMSIZE / 2) /**< Maximum nb. of 2M memory block. */
+
+#define RTE_DOM0_IOCTL_PREPARE_MEMSEG _IOWR(0, 1 , struct memory_info)
+#define RTE_DOM0_IOCTL_ATTACH_TO_MEMSEG _IOWR(0, 2 , char *)
+#define RTE_DOM0_IOCTL_GET_NUM_MEMSEG _IOWR(0, 3, int)
+#define RTE_DOM0_IOCTL_GET_MEMSEG_INFO _IOWR(0, 4, void *)
+
+/**
+ * A structure used to store memory information.
+ */
+struct memory_info {
+ char name[DOM0_NAME_MAX];
+ uint64_t size;
+};
+
+/**
+ * A structure used to store memory segment information.
+ */
+struct memseg_info {
+ uint32_t idx;
+ uint64_t pfn;
+ uint64_t size;
+ uint64_t mfn[DOM0_NUM_MEMBLOCK];
+};
+
+/**
+ * A structure used to store memory block information.
+ */
+struct memblock_info {
+ uint8_t exchange_flag;
+ uint64_t vir_addr;
+ uint64_t pfn;
+ uint64_t mfn;
+};
+#endif /* _RTE_DOM0_COMMON_H_ */
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/eal/include/exec-env/rte_interrupts.h b/src/dpdk_lib18/librte_eal/bsdapp/eal/include/exec-env/rte_interrupts.h
new file mode 100755
index 00000000..87a9cf69
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/bsdapp/eal/include/exec-env/rte_interrupts.h
@@ -0,0 +1,54 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_INTERRUPTS_H_
+#error "don't include this file directly, please include generic <rte_interrupts.h>"
+#endif
+
+#ifndef _RTE_LINUXAPP_INTERRUPTS_H_
+#define _RTE_LINUXAPP_INTERRUPTS_H_
+
+enum rte_intr_handle_type {
+ RTE_INTR_HANDLE_UNKNOWN = 0,
+ RTE_INTR_HANDLE_UIO, /**< uio device handle */
+ RTE_INTR_HANDLE_ALARM, /**< alarm handle */
+ RTE_INTR_HANDLE_MAX
+};
+
+/** Handle for interrupts. */
+struct rte_intr_handle {
+ int fd; /**< file descriptor */
+ enum rte_intr_handle_type type; /**< handle type */
+};
+
+#endif /* _RTE_LINUXAPP_INTERRUPTS_H_ */
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/nic_uio/BSDmakefile b/src/dpdk_lib18/librte_eal/bsdapp/nic_uio/BSDmakefile
new file mode 100755
index 00000000..5454ed85
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/bsdapp/nic_uio/BSDmakefile
@@ -0,0 +1,36 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+KMOD= nic_uio
+SRCS= nic_uio.c device_if.h bus_if.h pci_if.h
+
+.include <bsd.kmod.mk>
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/nic_uio/Makefile b/src/dpdk_lib18/librte_eal/bsdapp/nic_uio/Makefile
new file mode 100755
index 00000000..89957615
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/bsdapp/nic_uio/Makefile
@@ -0,0 +1,52 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# module name and path
+#
+MODULE = nic_uio
+
+#
+# CFLAGS
+#
+MODULE_CFLAGS += -I$(SRCDIR)
+MODULE_CFLAGS += -I$(RTE_OUTPUT)/include
+MODULE_CFLAGS += -Winline -Wall -Werror
+MODULE_CFLAGS += -include $(RTE_OUTPUT)/include/rte_config.h
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-y := nic_uio.c
+
+include $(RTE_SDK)/mk/rte.bsdmodule.mk
diff --git a/src/dpdk_lib18/librte_eal/bsdapp/nic_uio/nic_uio.c b/src/dpdk_lib18/librte_eal/bsdapp/nic_uio/nic_uio.c
new file mode 100755
index 00000000..ed11d845
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/bsdapp/nic_uio/nic_uio.c
@@ -0,0 +1,329 @@
+/* -
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h> /* defines used in kernel.h */
+#include <sys/module.h>
+#include <sys/kernel.h> /* types used in module initialization */
+#include <sys/conf.h> /* cdevsw struct */
+#include <sys/bus.h> /* structs, prototypes for pci bus stuff and DEVMETHOD */
+#include <sys/rman.h>
+#include <sys/systm.h>
+#include <sys/rwlock.h>
+#include <sys/proc.h>
+
+#include <machine/bus.h>
+#include <dev/pci/pcivar.h> /* For pci_get macros! */
+#include <dev/pci/pcireg.h> /* The softc holds our per-instance data. */
+#include <vm/vm.h>
+#include <vm/uma.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pager.h>
+
+
+#define MAX_BARS (PCIR_MAX_BAR_0 + 1)
+
+
+struct nic_uio_softc {
+ device_t dev_t;
+ struct cdev *my_cdev;
+ int bar_id[MAX_BARS];
+ struct resource *bar_res[MAX_BARS];
+ u_long bar_start[MAX_BARS];
+ u_long bar_size[MAX_BARS];
+};
+
+/* Function prototypes */
+static d_open_t nic_uio_open;
+static d_close_t nic_uio_close;
+static d_mmap_t nic_uio_mmap;
+static d_mmap_single_t nic_uio_mmap_single;
+static int nic_uio_probe(device_t dev);
+static int nic_uio_attach(device_t dev);
+static int nic_uio_detach(device_t dev);
+static int nic_uio_shutdown(void);
+static int nic_uio_modevent(module_t mod, int type, void *arg);
+
+static struct cdevsw uio_cdevsw = {
+ .d_name = "nic_uio",
+ .d_version = D_VERSION,
+ .d_open = nic_uio_open,
+ .d_close = nic_uio_close,
+ .d_mmap = nic_uio_mmap,
+ .d_mmap_single = nic_uio_mmap_single,
+};
+
+static device_method_t nic_uio_methods[] = {
+ DEVMETHOD(device_probe, nic_uio_probe),
+ DEVMETHOD(device_attach, nic_uio_attach),
+ DEVMETHOD(device_detach, nic_uio_detach),
+ DEVMETHOD_END
+};
+
+struct device {
+ int vend;
+ int dev;
+};
+
+struct pci_bdf {
+ uint32_t bus;
+ uint32_t devid;
+ uint32_t function;
+};
+
+
+#define RTE_PCI_DEV_ID_DECL_EM(vend, dev) {vend, dev},
+#define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) {vend, dev},
+#define RTE_PCI_DEV_ID_DECL_IGBVF(vend, dev) {vend, dev},
+#define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) {vend, dev},
+#define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev) {vend, dev},
+#define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {vend, dev},
+#define RTE_PCI_DEV_ID_DECL_I40EVF(vend, dev) {vend, dev},
+#define RTE_PCI_DEV_ID_DECL_VIRTIO(vend, dev) {vend, dev},
+#define RTE_PCI_DEV_ID_DECL_VMXNET3(vend, dev) {vend, dev},
+
+const struct device devices[] = {
+#include <rte_pci_dev_ids.h>
+};
+#define NUM_DEVICES (sizeof(devices)/sizeof(devices[0]))
+
+
+static devclass_t nic_uio_devclass;
+
+DEFINE_CLASS_0(nic_uio, nic_uio_driver, nic_uio_methods, sizeof(struct nic_uio_softc));
+DRIVER_MODULE(nic_uio, pci, nic_uio_driver, nic_uio_devclass, nic_uio_modevent, 0);
+
+static int
+nic_uio_mmap(struct cdev *cdev, vm_ooffset_t offset, vm_paddr_t *paddr,
+ int prot, vm_memattr_t *memattr)
+{
+ *paddr = offset;
+ return (0);
+}
+
+static int
+nic_uio_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t size,
+ struct vm_object **obj, int nprot)
+{
+ /*
+ * The BAR index is encoded in the offset. Divide the offset by
+ * PAGE_SIZE to get the index of the bar requested by the user
+ * app.
+ */
+ unsigned bar = *offset/PAGE_SIZE;
+ struct nic_uio_softc *sc = cdev->si_drv1;
+
+ if (bar >= MAX_BARS)
+ return EINVAL;
+
+ if (sc->bar_res[bar] == NULL) {
+ sc->bar_id[bar] = PCIR_BAR(bar);
+
+ if (PCI_BAR_IO(pci_read_config(sc->dev_t, sc->bar_id[bar], 4)))
+ sc->bar_res[bar] = bus_alloc_resource_any(sc->dev_t, SYS_RES_IOPORT,
+ &sc->bar_id[bar], RF_ACTIVE);
+ else
+ sc->bar_res[bar] = bus_alloc_resource_any(sc->dev_t, SYS_RES_MEMORY,
+ &sc->bar_id[bar], RF_ACTIVE);
+ }
+ if (sc->bar_res[bar] == NULL)
+ return ENXIO;
+
+ sc->bar_start[bar] = rman_get_start(sc->bar_res[bar]);
+ sc->bar_size[bar] = rman_get_size(sc->bar_res[bar]);
+
+ device_printf(sc->dev_t, "Bar %u @ %lx, size %lx\n", bar,
+ sc->bar_start[bar], sc->bar_size[bar]);
+
+ *offset = sc->bar_start[bar];
+ *obj = vm_pager_allocate(OBJT_DEVICE, cdev, size, nprot, *offset,
+ curthread->td_ucred);
+ return 0;
+}
+
+
+int
+nic_uio_open(struct cdev *dev, int oflags, int devtype, d_thread_t *td)
+{
+ return 0;
+}
+
+int
+nic_uio_close(struct cdev *dev, int fflag, int devtype, d_thread_t *td)
+{
+ return 0;
+}
+
+static int
+nic_uio_probe (device_t dev)
+{
+ int i;
+
+ for (i = 0; i < NUM_DEVICES; i++)
+ if (pci_get_vendor(dev) == devices[i].vend &&
+ pci_get_device(dev) == devices[i].dev) {
+
+ device_set_desc(dev, "Intel(R) DPDK PCI Device");
+ return (BUS_PROBE_SPECIFIC);
+ }
+
+ return (ENXIO);
+}
+
+static int
+nic_uio_attach(device_t dev)
+{
+ int i;
+ struct nic_uio_softc *sc;
+
+ sc = device_get_softc(dev);
+ sc->dev_t = dev;
+ sc->my_cdev = make_dev(&uio_cdevsw, device_get_unit(dev),
+ UID_ROOT, GID_WHEEL, 0600, "uio@pci:%u:%u:%u",
+ pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
+ if (sc->my_cdev == NULL)
+ return ENXIO;
+ sc->my_cdev->si_drv1 = sc;
+
+ for (i = 0; i < MAX_BARS; i++)
+ sc->bar_res[i] = NULL;
+
+ pci_enable_busmaster(dev);
+
+ return 0;
+}
+
+static int
+nic_uio_detach(device_t dev)
+{
+ int i;
+ struct nic_uio_softc *sc;
+ sc = device_get_softc(dev);
+
+ for (i = 0; i < MAX_BARS; i++)
+ if (sc->bar_res[i] != NULL) {
+
+ if (PCI_BAR_IO(pci_read_config(dev, sc->bar_id[i], 4)))
+ bus_release_resource(dev, SYS_RES_IOPORT, sc->bar_id[i],
+ sc->bar_res[i]);
+ else
+ bus_release_resource(dev, SYS_RES_MEMORY, sc->bar_id[i],
+ sc->bar_res[i]);
+ }
+
+ if (sc->my_cdev != NULL)
+ destroy_dev(sc->my_cdev);
+ return 0;
+}
+
+static void
+nic_uio_load(void)
+{
+ uint32_t bus, device, function;
+ int i;
+ device_t dev;
+ char bdf_str[256];
+ char *token, *remaining;
+
+ memset(bdf_str, 0, sizeof(bdf_str));
+ TUNABLE_STR_FETCH("hw.nic_uio.bdfs", bdf_str, sizeof(bdf_str));
+ remaining = bdf_str;
+ /*
+ * Users should specify PCI BDFs in the format "b:d:f,b:d:f,b:d:f".
+ * But the code below does not try differentiate between : and ,
+ * and just blindly uses 3 tokens at a time to construct a
+ * bus/device/function tuple.
+ *
+ * There is no checking on strtol() return values, but this should
+ * be OK. Worst case is it cannot convert and returns 0. This
+ * could give us a different BDF than intended, but as long as the
+ * PCI device/vendor ID does not match it will not matter.
+ */
+ while (1) {
+ if (remaining == NULL || remaining[0] == '\0')
+ break;
+ token = strsep(&remaining, ",:");
+ if (token == NULL)
+ break;
+ bus = strtol(token, NULL, 10);
+ token = strsep(&remaining, ",:");
+ if (token == NULL)
+ break;
+ device = strtol(token, NULL, 10);
+ token = strsep(&remaining, ",:");
+ if (token == NULL)
+ break;
+ function = strtol(token, NULL, 10);
+
+ dev = pci_find_bsf(bus, device, function);
+ if (dev != NULL)
+ for (i = 0; i < NUM_DEVICES; i++)
+ if (pci_get_vendor(dev) == devices[i].vend &&
+ pci_get_device(dev) == devices[i].dev)
+ device_detach(dev);
+ }
+}
+
+static void
+nic_uio_unload(void)
+{
+}
+
+static int
+nic_uio_shutdown(void)
+{
+ return (0);
+}
+
+static int
+nic_uio_modevent(module_t mod, int type, void *arg)
+{
+
+ switch (type) {
+ case MOD_LOAD:
+ nic_uio_load();
+ break;
+ case MOD_UNLOAD:
+ nic_uio_unload();
+ break;
+ case MOD_SHUTDOWN:
+ nic_uio_shutdown();
+ break;
+ default:
+ break;
+ }
+
+ return (0);
+}
diff --git a/src/dpdk_lib18/librte_eal/common/Makefile b/src/dpdk_lib18/librte_eal/common/Makefile
new file mode 100755
index 00000000..52c1a5f3
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/Makefile
@@ -0,0 +1,61 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+INC := rte_branch_prediction.h rte_common.h
+INC += rte_debug.h rte_eal.h rte_errno.h rte_launch.h rte_lcore.h
+INC += rte_log.h rte_memory.h rte_memzone.h rte_pci.h
+INC += rte_pci_dev_ids.h rte_per_lcore.h rte_random.h
+INC += rte_rwlock.h rte_tailq.h rte_interrupts.h rte_alarm.h
+INC += rte_string_fns.h rte_version.h rte_tailq_elem.h
+INC += rte_eal_memconfig.h rte_malloc_heap.h
+INC += rte_hexdump.h rte_devargs.h rte_dev.h
+INC += rte_common_vect.h
+INC += rte_pci_dev_feature_defs.h rte_pci_dev_features.h
+
+ifeq ($(CONFIG_RTE_INSECURE_FUNCTION_WARNING),y)
+INC += rte_warnings.h
+endif
+
+GENERIC_INC := rte_atomic.h rte_byteorder.h rte_cycles.h rte_prefetch.h
+GENERIC_INC += rte_spinlock.h rte_memcpy.h rte_cpuflags.h
+# defined in mk/arch/$(RTE_ARCH)/rte.vars.mk
+ARCH_DIR ?= $(RTE_ARCH)
+ARCH_INC := $(notdir $(wildcard $(RTE_SDK)/lib/librte_eal/common/include/arch/$(ARCH_DIR)/*.h))
+
+SYMLINK-$(CONFIG_RTE_LIBRTE_EAL)-include := $(addprefix include/,$(INC))
+SYMLINK-$(CONFIG_RTE_LIBRTE_EAL)-include += \
+ $(addprefix include/arch/$(ARCH_DIR)/,$(ARCH_INC))
+SYMLINK-$(CONFIG_RTE_LIBRTE_EAL)-include/generic := \
+ $(addprefix include/generic/,$(GENERIC_INC))
+
+include $(RTE_SDK)/mk/rte.install.mk
diff --git a/src/dpdk_lib18/librte_eal/common/eal_common_cpuflags.c b/src/dpdk_lib18/librte_eal/common/eal_common_cpuflags.c
new file mode 100755
index 00000000..6fd360cb
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/eal_common_cpuflags.c
@@ -0,0 +1,85 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <rte_cpuflags.h>
+
+/*
+ * This should prevent use of advanced instruction sets in this file. Otherwise
+ * the check function itself could cause a crash.
+ */
+#ifdef __INTEL_COMPILER
+#pragma optimize ("", off)
+#else
+#define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
+#if GCC_VERSION > 404000
+#pragma GCC optimize ("O0")
+#endif
+#endif
+
+/**
+ * Checks if the machine is adequate for running the binary. If it is not, the
+ * program exits with status 1.
+ * The function attribute forces this function to be called before main(). But
+ * with ICC, the check is generated by the compiler.
+ */
+#ifndef __INTEL_COMPILER
+void __attribute__ ((__constructor__))
+#else
+void
+#endif
+rte_cpu_check_supported(void)
+{
+ /* This is generated at compile-time by the build system */
+ static const enum rte_cpu_flag_t compile_time_flags[] = {
+ RTE_COMPILE_TIME_CPUFLAGS
+ };
+ unsigned i;
+ int ret;
+
+ for (i = 0; i < sizeof(compile_time_flags)/sizeof(compile_time_flags[0]); i++) {
+ ret = rte_cpu_get_flag_enabled(compile_time_flags[i]);
+
+ if (ret < 0) {
+ fprintf(stderr,
+ "ERROR: CPU feature flag lookup failed with error %d\n",
+ ret);
+ exit(1);
+ }
+ if (!ret) {
+ fprintf(stderr,
+ "ERROR: This system does not support \"%s\".\n"
+ "Please check that RTE_MACHINE is set correctly.\n",
+ cpu_feature_table[compile_time_flags[i]].name);
+ exit(1);
+ }
+ }
+}
diff --git a/src/dpdk_lib18/librte_eal/common/eal_common_dev.c b/src/dpdk_lib18/librte_eal/common/eal_common_dev.c
new file mode 100755
index 00000000..eae56565
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/eal_common_dev.c
@@ -0,0 +1,109 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2014 6WIND S.A.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <inttypes.h>
+#include <sys/queue.h>
+
+#include <rte_dev.h>
+#include <rte_devargs.h>
+#include <rte_debug.h>
+#include <rte_devargs.h>
+
+#include "eal_private.h"
+
+/** Global list of device drivers. */
+static struct rte_driver_list dev_driver_list =
+ TAILQ_HEAD_INITIALIZER(dev_driver_list);
+
+/* register a driver */
+void
+rte_eal_driver_register(struct rte_driver *driver)
+{
+ TAILQ_INSERT_TAIL(&dev_driver_list, driver, next);
+}
+
+/* unregister a driver */
+void
+rte_eal_driver_unregister(struct rte_driver *driver)
+{
+ TAILQ_REMOVE(&dev_driver_list, driver, next);
+}
+
+int
+rte_eal_dev_init(void)
+{
+ struct rte_devargs *devargs;
+ struct rte_driver *driver;
+
+ /*
+ * Note that the dev_driver_list is populated here
+ * from calls made to rte_eal_driver_register from constructor functions
+ * embedded into PMD modules via the PMD_REGISTER_DRIVER macro
+ */
+
+ /* call the init function for each virtual device */
+ TAILQ_FOREACH(devargs, &devargs_list, next) {
+
+ if (devargs->type != RTE_DEVTYPE_VIRTUAL)
+ continue;
+
+ TAILQ_FOREACH(driver, &dev_driver_list, next) {
+ if (driver->type != PMD_VDEV)
+ continue;
+
+ /* search a driver prefix in virtual device name */
+ if (!strncmp(driver->name, devargs->virtual.drv_name,
+ strlen(driver->name))) {
+ driver->init(devargs->virtual.drv_name,
+ devargs->args);
+ break;
+ }
+ }
+
+ if (driver == NULL) {
+ rte_panic("no driver found for %s\n",
+ devargs->virtual.drv_name);
+ }
+ }
+
+ /* Once the vdevs are initalized, start calling all the pdev drivers */
+ TAILQ_FOREACH(driver, &dev_driver_list, next) {
+ if (driver->type != PMD_PDEV)
+ continue;
+ /* PDEV drivers don't get passed any parameters */
+ driver->init(NULL, NULL);
+ }
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_eal/common/eal_common_devargs.c b/src/dpdk_lib18/librte_eal/common/eal_common_devargs.c
new file mode 100755
index 00000000..4c7d11af
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/eal_common_devargs.c
@@ -0,0 +1,152 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2014 6WIND S.A.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A nor the names of its contributors
+ * may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* This file manages the list of devices and their arguments, as given
+ * by the user at startup */
+
+#include <string.h>
+
+#include <rte_log.h>
+#include <rte_pci.h>
+#include <rte_devargs.h>
+#include "eal_private.h"
+
+/** Global list of user devices */
+struct rte_devargs_list devargs_list =
+ TAILQ_HEAD_INITIALIZER(devargs_list);
+
+/* store a whitelist parameter for later parsing */
+int
+rte_eal_devargs_add(enum rte_devtype devtype, const char *devargs_str)
+{
+ struct rte_devargs *devargs;
+ char buf[RTE_DEVARGS_LEN];
+ char *sep;
+ int ret;
+
+ ret = snprintf(buf, sizeof(buf), "%s", devargs_str);
+ if (ret < 0 || ret >= (int)sizeof(buf)) {
+ RTE_LOG(ERR, EAL, "user device args too large: <%s>\n",
+ devargs_str);
+ return -1;
+ }
+
+ /* use malloc instead of rte_malloc as it's called early at init */
+ devargs = malloc(sizeof(*devargs));
+ if (devargs == NULL) {
+ RTE_LOG(ERR, EAL, "cannot allocate devargs\n");
+ return -1;
+ }
+ memset(devargs, 0, sizeof(*devargs));
+ devargs->type = devtype;
+
+ /* set the first ',' to '\0' to split name and arguments */
+ sep = strchr(buf, ',');
+ if (sep != NULL) {
+ sep[0] = '\0';
+ snprintf(devargs->args, sizeof(devargs->args), "%s", sep + 1);
+ }
+
+ switch (devargs->type) {
+ case RTE_DEVTYPE_WHITELISTED_PCI:
+ case RTE_DEVTYPE_BLACKLISTED_PCI:
+ /* try to parse pci identifier */
+ if (eal_parse_pci_BDF(buf, &devargs->pci.addr) != 0 &&
+ eal_parse_pci_DomBDF(buf, &devargs->pci.addr) != 0) {
+ RTE_LOG(ERR, EAL,
+ "invalid PCI identifier <%s>\n", buf);
+ free(devargs);
+ return -1;
+ }
+ break;
+ case RTE_DEVTYPE_VIRTUAL:
+ /* save driver name */
+ ret = snprintf(devargs->virtual.drv_name,
+ sizeof(devargs->virtual.drv_name), "%s", buf);
+ if (ret < 0 || ret >= (int)sizeof(devargs->virtual.drv_name)) {
+ RTE_LOG(ERR, EAL,
+ "driver name too large: <%s>\n", buf);
+ free(devargs);
+ return -1;
+ }
+ break;
+ }
+
+ TAILQ_INSERT_TAIL(&devargs_list, devargs, next);
+ return 0;
+}
+
+/* count the number of devices of a specified type */
+unsigned int
+rte_eal_devargs_type_count(enum rte_devtype devtype)
+{
+ struct rte_devargs *devargs;
+ unsigned int count = 0;
+
+ TAILQ_FOREACH(devargs, &devargs_list, next) {
+ if (devargs->type != devtype)
+ continue;
+ count++;
+ }
+ return count;
+}
+
+/* dump the user devices on the console */
+void
+rte_eal_devargs_dump(FILE *f)
+{
+ struct rte_devargs *devargs;
+
+ fprintf(f, "User device white list:\n");
+ TAILQ_FOREACH(devargs, &devargs_list, next) {
+ if (devargs->type == RTE_DEVTYPE_WHITELISTED_PCI)
+ fprintf(f, " PCI whitelist " PCI_PRI_FMT " %s\n",
+ devargs->pci.addr.domain,
+ devargs->pci.addr.bus,
+ devargs->pci.addr.devid,
+ devargs->pci.addr.function,
+ devargs->args);
+ else if (devargs->type == RTE_DEVTYPE_BLACKLISTED_PCI)
+ fprintf(f, " PCI blacklist " PCI_PRI_FMT " %s\n",
+ devargs->pci.addr.domain,
+ devargs->pci.addr.bus,
+ devargs->pci.addr.devid,
+ devargs->pci.addr.function,
+ devargs->args);
+ else if (devargs->type == RTE_DEVTYPE_VIRTUAL)
+ fprintf(f, " VIRTUAL %s %s\n",
+ devargs->virtual.drv_name,
+ devargs->args);
+ else
+ fprintf(f, " UNKNOWN %s\n", devargs->args);
+ }
+}
diff --git a/src/dpdk_lib18/librte_eal/common/eal_common_errno.c b/src/dpdk_lib18/librte_eal/common/eal_common_errno.c
new file mode 100755
index 00000000..259f8958
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/eal_common_errno.c
@@ -0,0 +1,74 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdarg.h>
+#include <errno.h>
+
+#include <rte_per_lcore.h>
+#include <rte_errno.h>
+#include <rte_string_fns.h>
+
+RTE_DEFINE_PER_LCORE(int, _rte_errno);
+
+const char *
+rte_strerror(int errnum)
+{
+#define RETVAL_SZ 256
+ static RTE_DEFINE_PER_LCORE(char[RETVAL_SZ], retval);
+
+ /* since some implementations of strerror_r throw an error
+ * themselves if errnum is too big, we handle that case here */
+ if (errnum > RTE_MAX_ERRNO)
+ snprintf(RTE_PER_LCORE(retval), RETVAL_SZ,
+#ifdef RTE_EXEC_ENV_BSDAPP
+ "Unknown error: %d", errnum);
+#else
+ "Unknown error %d", errnum);
+#endif
+ else
+ switch (errnum){
+ case E_RTE_SECONDARY:
+ return "Invalid call in secondary process";
+ case E_RTE_NO_CONFIG:
+ return "Missing rte_config structure";
+ case E_RTE_NO_TAILQ:
+ return "No TAILQ initialised";
+ default:
+ strerror_r(errnum, RTE_PER_LCORE(retval), RETVAL_SZ);
+ }
+
+ return RTE_PER_LCORE(retval);
+}
diff --git a/src/dpdk_lib18/librte_eal/common/eal_common_hexdump.c b/src/dpdk_lib18/librte_eal/common/eal_common_hexdump.c
new file mode 100755
index 00000000..6135133f
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/eal_common_hexdump.c
@@ -0,0 +1,121 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <rte_hexdump.h>
+#include <rte_string_fns.h>
+
+#define LINE_LEN 128
+
+/**************************************************************************//**
+*
+* rte_hexdump - Dump out memory in a special hex dump format.
+*
+* DESCRIPTION
+* Dump out the message buffer in a special hex dump output format with characters
+* printed for each line of 16 hex values.
+*
+* RETURNS: N/A
+*
+* SEE ALSO:
+*/
+
+void
+rte_hexdump(FILE *f, const char * title, const void * buf, unsigned int len)
+{
+ unsigned int i, out, ofs;
+ const unsigned char *data = buf;
+ char line[LINE_LEN]; /* space needed 8+16*3+3+16 == 75 */
+
+ fprintf(f, "%s at [%p], len=%u\n", (title)? title : " Dump data", data, len);
+ ofs = 0;
+ while (ofs < len) {
+ /* format the line in the buffer, then use printf to output to screen */
+ out = snprintf(line, LINE_LEN, "%08X:", ofs);
+ for (i = 0; ((ofs + i) < len) && (i < 16); i++)
+ out += snprintf(line+out, LINE_LEN - out, " %02X", (data[ofs+i] & 0xff));
+ for(; i <= 16; i++)
+ out += snprintf(line+out, LINE_LEN - out, " | ");
+ for(i = 0; (ofs < len) && (i < 16); i++, ofs++) {
+ unsigned char c = data[ofs];
+ if ( (c < ' ') || (c > '~'))
+ c = '.';
+ out += snprintf(line+out, LINE_LEN - out, "%c", c);
+ }
+ fprintf(f, "%s\n", line);
+ }
+ fflush(f);
+}
+
+/**************************************************************************//**
+*
+* rte_memdump - Dump out memory in hex bytes with colons.
+*
+* DESCRIPTION
+* Dump out the message buffer in hex bytes with colons xx:xx:xx:xx:...
+*
+* RETURNS: N/A
+*
+* SEE ALSO:
+*/
+
+void
+rte_memdump(FILE *f, const char * title, const void * buf, unsigned int len)
+{
+ unsigned int i, out;
+ const unsigned char *data = buf;
+ char line[LINE_LEN];
+
+ if ( title )
+ fprintf(f, "%s: ", title);
+
+ line[0] = '\0';
+ for (i = 0, out = 0; i < len; i++) {
+ // Make sure we do not overrun the line buffer length.
+ if ( out >= (LINE_LEN - 4) ) {
+ fprintf(f, "%s", line);
+ out = 0;
+ line[out] = '\0';
+ }
+ out += snprintf(line+out, LINE_LEN - out, "%02x%s",
+ (data[i] & 0xff), ((i+1) < len)? ":" : "");
+ }
+ if ( out > 0 )
+ fprintf(f, "%s", line);
+ fprintf(f, "\n");
+
+ fflush(f);
+}
+
diff --git a/src/dpdk_lib18/librte_eal/common/eal_common_launch.c b/src/dpdk_lib18/librte_eal/common/eal_common_launch.c
new file mode 100755
index 00000000..599f83b5
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/eal_common_launch.c
@@ -0,0 +1,120 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <sys/queue.h>
+
+#include <rte_launch.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_atomic.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+
+/*
+ * Wait until a lcore finished its job.
+ */
+int
+rte_eal_wait_lcore(unsigned slave_id)
+{
+ if (lcore_config[slave_id].state == WAIT)
+ return 0;
+
+ while (lcore_config[slave_id].state != WAIT &&
+ lcore_config[slave_id].state != FINISHED);
+
+ rte_rmb();
+
+ /* we are in finished state, go to wait state */
+ lcore_config[slave_id].state = WAIT;
+ return lcore_config[slave_id].ret;
+}
+
+/*
+ * Check that every SLAVE lcores are in WAIT state, then call
+ * rte_eal_remote_launch() for all of them. If call_master is true
+ * (set to CALL_MASTER), also call the function on the master lcore.
+ */
+int
+rte_eal_mp_remote_launch(int (*f)(void *), void *arg,
+ enum rte_rmt_call_master_t call_master)
+{
+ int lcore_id;
+ int master = rte_get_master_lcore();
+
+ /* check state of lcores */
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ if (lcore_config[lcore_id].state != WAIT)
+ return -EBUSY;
+ }
+
+ /* send messages to cores */
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ rte_eal_remote_launch(f, arg, lcore_id);
+ }
+
+ if (call_master == CALL_MASTER) {
+ lcore_config[master].ret = f(arg);
+ lcore_config[master].state = FINISHED;
+ }
+
+ return 0;
+}
+
+/*
+ * Return the state of the lcore identified by slave_id.
+ */
+enum rte_lcore_state_t
+rte_eal_get_lcore_state(unsigned lcore_id)
+{
+ return lcore_config[lcore_id].state;
+}
+
+/*
+ * Do a rte_eal_wait_lcore() for every lcore. The return values are
+ * ignored.
+ */
+void
+rte_eal_mp_wait_lcore(void)
+{
+ unsigned lcore_id;
+
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ rte_eal_wait_lcore(lcore_id);
+ }
+}
+
diff --git a/src/dpdk_lib18/librte_eal/common/eal_common_log.c b/src/dpdk_lib18/librte_eal/common/eal_common_log.c
new file mode 100755
index 00000000..cf576195
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/eal_common_log.c
@@ -0,0 +1,320 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <sys/types.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_debug.h>
+#include <rte_spinlock.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+
+#include "eal_private.h"
+
+#define LOG_ELT_SIZE 2048
+
+#define LOG_HISTORY_MP_NAME "log_history"
+
+STAILQ_HEAD(log_history_list, log_history);
+
+/**
+ * The structure of a message log in the log history.
+ */
+struct log_history {
+ STAILQ_ENTRY(log_history) next;
+ unsigned size;
+ char buf[0];
+};
+
+static struct rte_mempool *log_history_mp = NULL;
+static unsigned log_history_size = 0;
+static struct log_history_list log_history;
+
+/* global log structure */
+struct rte_logs rte_logs = {
+ .type = ~0,
+ .level = RTE_LOG_DEBUG,
+ .file = NULL,
+};
+
+static rte_spinlock_t log_dump_lock = RTE_SPINLOCK_INITIALIZER;
+static rte_spinlock_t log_list_lock = RTE_SPINLOCK_INITIALIZER;
+static FILE *default_log_stream;
+static int history_enabled = 1;
+
+/**
+ * This global structure stores some informations about the message
+ * that is currently beeing processed by one lcore
+ */
+struct log_cur_msg {
+ uint32_t loglevel; /**< log level - see rte_log.h */
+ uint32_t logtype; /**< log type - see rte_log.h */
+} __rte_cache_aligned;
+static struct log_cur_msg log_cur_msg[RTE_MAX_LCORE]; /**< per core log */
+
+
+/* default logs */
+
+int
+rte_log_add_in_history(const char *buf, size_t size)
+{
+ struct log_history *hist_buf = NULL;
+ static const unsigned hist_buf_size = LOG_ELT_SIZE - sizeof(*hist_buf);
+ void *obj;
+
+ if (history_enabled == 0)
+ return 0;
+
+ rte_spinlock_lock(&log_list_lock);
+
+ /* get a buffer for adding in history */
+ if (log_history_size > RTE_LOG_HISTORY) {
+ hist_buf = STAILQ_FIRST(&log_history);
+ STAILQ_REMOVE_HEAD(&log_history, next);
+ }
+ else {
+ if (rte_mempool_mc_get(log_history_mp, &obj) < 0)
+ obj = NULL;
+ hist_buf = obj;
+ }
+
+ /* no buffer */
+ if (hist_buf == NULL) {
+ rte_spinlock_unlock(&log_list_lock);
+ return -ENOBUFS;
+ }
+
+ /* not enough room for msg, buffer go back in mempool */
+ if (size >= hist_buf_size) {
+ rte_mempool_mp_put(log_history_mp, hist_buf);
+ rte_spinlock_unlock(&log_list_lock);
+ return -ENOBUFS;
+ }
+
+ /* add in history */
+ memcpy(hist_buf->buf, buf, size);
+ hist_buf->buf[size] = hist_buf->buf[hist_buf_size-1] = '\0';
+ hist_buf->size = size;
+ STAILQ_INSERT_TAIL(&log_history, hist_buf, next);
+ log_history_size++;
+ rte_spinlock_unlock(&log_list_lock);
+
+ return 0;
+}
+
+void
+rte_log_set_history(int enable)
+{
+ history_enabled = enable;
+}
+
+/* Change the stream that will be used by logging system */
+int
+rte_openlog_stream(FILE *f)
+{
+ if (f == NULL)
+ rte_logs.file = default_log_stream;
+ else
+ rte_logs.file = f;
+ return 0;
+}
+
+/* Set global log level */
+void
+rte_set_log_level(uint32_t level)
+{
+ rte_logs.level = (uint32_t)level;
+}
+
+/* Get global log level */
+uint32_t
+rte_get_log_level(void)
+{
+ return rte_logs.level;
+}
+
+/* Set global log type */
+void
+rte_set_log_type(uint32_t type, int enable)
+{
+ if (enable)
+ rte_logs.type |= type;
+ else
+ rte_logs.type &= (~type);
+}
+
+/* get the current loglevel for the message beeing processed */
+int rte_log_cur_msg_loglevel(void)
+{
+ unsigned lcore_id;
+ lcore_id = rte_lcore_id();
+ return log_cur_msg[lcore_id].loglevel;
+}
+
+/* get the current logtype for the message beeing processed */
+int rte_log_cur_msg_logtype(void)
+{
+ unsigned lcore_id;
+ lcore_id = rte_lcore_id();
+ return log_cur_msg[lcore_id].logtype;
+}
+
+/* Dump log history to file */
+void
+rte_log_dump_history(FILE *out)
+{
+ struct log_history_list tmp_log_history;
+ struct log_history *hist_buf;
+ unsigned i;
+
+ /* only one dump at a time */
+ rte_spinlock_lock(&log_dump_lock);
+
+ /* save list, and re-init to allow logging during dump */
+ rte_spinlock_lock(&log_list_lock);
+ tmp_log_history = log_history;
+ STAILQ_INIT(&log_history);
+ rte_spinlock_unlock(&log_list_lock);
+
+ for (i=0; i<RTE_LOG_HISTORY; i++) {
+
+ /* remove one message from history list */
+ hist_buf = STAILQ_FIRST(&tmp_log_history);
+
+ if (hist_buf == NULL)
+ break;
+
+ STAILQ_REMOVE_HEAD(&tmp_log_history, next);
+
+ /* write on stdout */
+ if (fwrite(hist_buf->buf, hist_buf->size, 1, out) == 0) {
+ rte_mempool_mp_put(log_history_mp, hist_buf);
+ break;
+ }
+
+ /* put back message structure in pool */
+ rte_mempool_mp_put(log_history_mp, hist_buf);
+ }
+ fflush(out);
+
+ rte_spinlock_unlock(&log_dump_lock);
+}
+
+/*
+ * Generates a log message The message will be sent in the stream
+ * defined by the previous call to rte_openlog_stream().
+ */
+int
+rte_vlog(__attribute__((unused)) uint32_t level,
+ __attribute__((unused)) uint32_t logtype,
+ const char *format, va_list ap)
+{
+ int ret;
+ FILE *f = rte_logs.file;
+ unsigned lcore_id;
+
+ /* save loglevel and logtype in a global per-lcore variable */
+ lcore_id = rte_lcore_id();
+ log_cur_msg[lcore_id].loglevel = level;
+ log_cur_msg[lcore_id].logtype = logtype;
+
+ ret = vfprintf(f, format, ap);
+ fflush(f);
+ return ret;
+}
+
+/*
+ * Generates a log message The message will be sent in the stream
+ * defined by the previous call to rte_openlog_stream().
+ */
+int
+rte_log(uint32_t level, uint32_t logtype, const char *format, ...)
+{
+ va_list ap;
+ int ret;
+
+ va_start(ap, format);
+ ret = rte_vlog(level, logtype, format, ap);
+ va_end(ap);
+ return ret;
+}
+
+/*
+ * called by environment-specific log init function to initialize log
+ * history
+ */
+int
+rte_eal_common_log_init(FILE *default_log)
+{
+ STAILQ_INIT(&log_history);
+
+ /* reserve RTE_LOG_HISTORY*2 elements, so we can dump and
+ * keep logging during this time */
+ log_history_mp = rte_mempool_create(LOG_HISTORY_MP_NAME, RTE_LOG_HISTORY*2,
+ LOG_ELT_SIZE, 0, 0,
+ NULL, NULL,
+ NULL, NULL,
+ SOCKET_ID_ANY, 0);
+
+ if ((log_history_mp == NULL) &&
+ ((log_history_mp = rte_mempool_lookup(LOG_HISTORY_MP_NAME)) == NULL)){
+ RTE_LOG(ERR, EAL, "%s(): cannot create log_history mempool\n",
+ __func__);
+ return -1;
+ }
+
+ default_log_stream = default_log;
+ rte_openlog_stream(default_log);
+ return 0;
+}
+
diff --git a/src/dpdk_lib18/librte_eal/common/eal_common_memory.c b/src/dpdk_lib18/librte_eal/common/eal_common_memory.c
new file mode 100755
index 00000000..77830f80
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/eal_common_memory.c
@@ -0,0 +1,121 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <sys/queue.h>
+
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_log.h>
+
+#include "eal_private.h"
+
+/*
+ * Return a pointer to a read-only table of struct rte_physmem_desc
+ * elements, containing the layout of all addressable physical
+ * memory. The last element of the table contains a NULL address.
+ */
+const struct rte_memseg *
+rte_eal_get_physmem_layout(void)
+{
+ return rte_eal_get_configuration()->mem_config->memseg;
+}
+
+
+/* get the total size of memory */
+uint64_t
+rte_eal_get_physmem_size(void)
+{
+ const struct rte_mem_config *mcfg;
+ unsigned i = 0;
+ uint64_t total_len = 0;
+
+ /* get pointer to global configuration */
+ mcfg = rte_eal_get_configuration()->mem_config;
+
+ for (i=0; i<RTE_MAX_MEMSEG; i++) {
+ if (mcfg->memseg[i].addr == NULL)
+ break;
+
+ total_len += mcfg->memseg[i].len;
+ }
+
+ return total_len;
+}
+
+/* Dump the physical memory layout on console */
+void
+rte_dump_physmem_layout(FILE *f)
+{
+ const struct rte_mem_config *mcfg;
+ unsigned i = 0;
+
+ /* get pointer to global configuration */
+ mcfg = rte_eal_get_configuration()->mem_config;
+
+ for (i=0; i<RTE_MAX_MEMSEG; i++) {
+ if (mcfg->memseg[i].addr == NULL)
+ break;
+
+ fprintf(f, "Segment %u: phys:0x%"PRIx64", len:%zu, "
+ "virt:%p, socket_id:%"PRId32", "
+ "hugepage_sz:%"PRIu64", nchannel:%"PRIx32", "
+ "nrank:%"PRIx32"\n", i,
+ mcfg->memseg[i].phys_addr,
+ mcfg->memseg[i].len,
+ mcfg->memseg[i].addr,
+ mcfg->memseg[i].socket_id,
+ mcfg->memseg[i].hugepage_sz,
+ mcfg->memseg[i].nchannel,
+ mcfg->memseg[i].nrank);
+ }
+}
+
+/* return the number of memory channels */
+unsigned rte_memory_get_nchannel(void)
+{
+ return rte_eal_get_configuration()->mem_config->nchannel;
+}
+
+/* return the number of memory rank */
+unsigned rte_memory_get_nrank(void)
+{
+ return rte_eal_get_configuration()->mem_config->nrank;
+}
diff --git a/src/dpdk_lib18/librte_eal/common/eal_common_memzone.c b/src/dpdk_lib18/librte_eal/common/eal_common_memzone.c
new file mode 100755
index 00000000..b5a5d727
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/eal_common_memzone.c
@@ -0,0 +1,533 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_per_lcore.h>
+#include <rte_errno.h>
+#include <rte_string_fns.h>
+#include <rte_common.h>
+
+#include "eal_private.h"
+
+/* internal copy of free memory segments */
+static struct rte_memseg *free_memseg = NULL;
+
+static inline const struct rte_memzone *
+memzone_lookup_thread_unsafe(const char *name)
+{
+ const struct rte_mem_config *mcfg;
+ unsigned i = 0;
+
+ /* get pointer to global configuration */
+ mcfg = rte_eal_get_configuration()->mem_config;
+
+ /*
+ * the algorithm is not optimal (linear), but there are few
+ * zones and this function should be called at init only
+ */
+ for (i = 0; i < RTE_MAX_MEMZONE && mcfg->memzone[i].addr != NULL; i++) {
+ if (!strncmp(name, mcfg->memzone[i].name, RTE_MEMZONE_NAMESIZE))
+ return &mcfg->memzone[i];
+ }
+
+ return NULL;
+}
+
+/*
+ * Return a pointer to a correctly filled memzone descriptor. If the
+ * allocation cannot be done, return NULL.
+ */
+const struct rte_memzone *
+rte_memzone_reserve(const char *name, size_t len, int socket_id,
+ unsigned flags)
+{
+ return rte_memzone_reserve_aligned(name,
+ len, socket_id, flags, RTE_CACHE_LINE_SIZE);
+}
+
+/*
+ * Helper function for memzone_reserve_aligned_thread_unsafe().
+ * Calculate address offset from the start of the segment.
+ * Align offset in that way that it satisfy istart alignmnet and
+ * buffer of the requested length would not cross specified boundary.
+ */
+static inline phys_addr_t
+align_phys_boundary(const struct rte_memseg *ms, size_t len, size_t align,
+ size_t bound)
+{
+ phys_addr_t addr_offset, bmask, end, start;
+ size_t step;
+
+ step = RTE_MAX(align, bound);
+ bmask = ~((phys_addr_t)bound - 1);
+
+ /* calculate offset to closest alignment */
+ start = RTE_ALIGN_CEIL(ms->phys_addr, align);
+ addr_offset = start - ms->phys_addr;
+
+ while (addr_offset + len < ms->len) {
+
+ /* check, do we meet boundary condition */
+ end = start + len - (len != 0);
+ if ((start & bmask) == (end & bmask))
+ break;
+
+ /* calculate next offset */
+ start = RTE_ALIGN_CEIL(start + 1, step);
+ addr_offset = start - ms->phys_addr;
+ }
+
+ return (addr_offset);
+}
+
+static const struct rte_memzone *
+memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
+ int socket_id, unsigned flags, unsigned align, unsigned bound)
+{
+ struct rte_mem_config *mcfg;
+ unsigned i = 0;
+ int memseg_idx = -1;
+ uint64_t addr_offset, seg_offset = 0;
+ size_t requested_len;
+ size_t memseg_len = 0;
+ phys_addr_t memseg_physaddr;
+ void *memseg_addr;
+
+ /* get pointer to global configuration */
+ mcfg = rte_eal_get_configuration()->mem_config;
+
+ /* no more room in config */
+ if (mcfg->memzone_idx >= RTE_MAX_MEMZONE) {
+ RTE_LOG(ERR, EAL, "%s(): No more room in config\n", __func__);
+ rte_errno = ENOSPC;
+ return NULL;
+ }
+
+ /* zone already exist */
+ if ((memzone_lookup_thread_unsafe(name)) != NULL) {
+ RTE_LOG(DEBUG, EAL, "%s(): memzone <%s> already exists\n",
+ __func__, name);
+ rte_errno = EEXIST;
+ return NULL;
+ }
+
+ /* if alignment is not a power of two */
+ if (!rte_is_power_of_2(align)) {
+ RTE_LOG(ERR, EAL, "%s(): Invalid alignment: %u\n", __func__,
+ align);
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /* alignment less than cache size is not allowed */
+ if (align < RTE_CACHE_LINE_SIZE)
+ align = RTE_CACHE_LINE_SIZE;
+
+
+ /* align length on cache boundary. Check for overflow before doing so */
+ if (len > SIZE_MAX - RTE_CACHE_LINE_MASK) {
+ rte_errno = EINVAL; /* requested size too big */
+ return NULL;
+ }
+
+ len += RTE_CACHE_LINE_MASK;
+ len &= ~((size_t) RTE_CACHE_LINE_MASK);
+
+ /* save minimal requested length */
+ requested_len = RTE_MAX((size_t)RTE_CACHE_LINE_SIZE, len);
+
+ /* check that boundary condition is valid */
+ if (bound != 0 &&
+ (requested_len > bound || !rte_is_power_of_2(bound))) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /* find the smallest segment matching requirements */
+ for (i = 0; i < RTE_MAX_MEMSEG; i++) {
+ /* last segment */
+ if (free_memseg[i].addr == NULL)
+ break;
+
+ /* empty segment, skip it */
+ if (free_memseg[i].len == 0)
+ continue;
+
+ /* bad socket ID */
+ if (socket_id != SOCKET_ID_ANY &&
+ free_memseg[i].socket_id != SOCKET_ID_ANY &&
+ socket_id != free_memseg[i].socket_id)
+ continue;
+
+ /*
+ * calculate offset to closest alignment that
+ * meets boundary conditions.
+ */
+ addr_offset = align_phys_boundary(free_memseg + i,
+ requested_len, align, bound);
+
+ /* check len */
+ if ((requested_len + addr_offset) > free_memseg[i].len)
+ continue;
+
+ /* check flags for hugepage sizes */
+ if ((flags & RTE_MEMZONE_2MB) &&
+ free_memseg[i].hugepage_sz == RTE_PGSIZE_1G)
+ continue;
+ if ((flags & RTE_MEMZONE_1GB) &&
+ free_memseg[i].hugepage_sz == RTE_PGSIZE_2M)
+ continue;
+ if ((flags & RTE_MEMZONE_16MB) &&
+ free_memseg[i].hugepage_sz == RTE_PGSIZE_16G)
+ continue;
+ if ((flags & RTE_MEMZONE_16GB) &&
+ free_memseg[i].hugepage_sz == RTE_PGSIZE_16M)
+ continue;
+
+ /* this segment is the best until now */
+ if (memseg_idx == -1) {
+ memseg_idx = i;
+ memseg_len = free_memseg[i].len;
+ seg_offset = addr_offset;
+ }
+ /* find the biggest contiguous zone */
+ else if (len == 0) {
+ if (free_memseg[i].len > memseg_len) {
+ memseg_idx = i;
+ memseg_len = free_memseg[i].len;
+ seg_offset = addr_offset;
+ }
+ }
+ /*
+ * find the smallest (we already checked that current
+ * zone length is > len
+ */
+ else if (free_memseg[i].len + align < memseg_len ||
+ (free_memseg[i].len <= memseg_len + align &&
+ addr_offset < seg_offset)) {
+ memseg_idx = i;
+ memseg_len = free_memseg[i].len;
+ seg_offset = addr_offset;
+ }
+ }
+
+ /* no segment found */
+ if (memseg_idx == -1) {
+ /*
+ * If RTE_MEMZONE_SIZE_HINT_ONLY flag is specified,
+ * try allocating again without the size parameter otherwise -fail.
+ */
+ if ((flags & RTE_MEMZONE_SIZE_HINT_ONLY) &&
+ ((flags & RTE_MEMZONE_1GB) || (flags & RTE_MEMZONE_2MB)
+ || (flags & RTE_MEMZONE_16MB) || (flags & RTE_MEMZONE_16GB)))
+ return memzone_reserve_aligned_thread_unsafe(name,
+ len, socket_id, 0, align, bound);
+
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ /* save aligned physical and virtual addresses */
+ memseg_physaddr = free_memseg[memseg_idx].phys_addr + seg_offset;
+ memseg_addr = RTE_PTR_ADD(free_memseg[memseg_idx].addr,
+ (uintptr_t) seg_offset);
+
+ /* if we are looking for a biggest memzone */
+ if (len == 0) {
+ if (bound == 0)
+ requested_len = memseg_len - seg_offset;
+ else
+ requested_len = RTE_ALIGN_CEIL(memseg_physaddr + 1,
+ bound) - memseg_physaddr;
+ }
+
+ /* set length to correct value */
+ len = (size_t)seg_offset + requested_len;
+
+ /* update our internal state */
+ free_memseg[memseg_idx].len -= len;
+ free_memseg[memseg_idx].phys_addr += len;
+ free_memseg[memseg_idx].addr =
+ (char *)free_memseg[memseg_idx].addr + len;
+
+ /* fill the zone in config */
+ struct rte_memzone *mz = &mcfg->memzone[mcfg->memzone_idx++];
+ snprintf(mz->name, sizeof(mz->name), "%s", name);
+ mz->phys_addr = memseg_physaddr;
+ mz->addr = memseg_addr;
+ mz->len = requested_len;
+ mz->hugepage_sz = free_memseg[memseg_idx].hugepage_sz;
+ mz->socket_id = free_memseg[memseg_idx].socket_id;
+ mz->flags = 0;
+ mz->memseg_id = memseg_idx;
+
+ return mz;
+}
+
+/*
+ * Return a pointer to a correctly filled memzone descriptor (with a
+ * specified alignment). If the allocation cannot be done, return NULL.
+ */
+const struct rte_memzone *
+rte_memzone_reserve_aligned(const char *name, size_t len,
+ int socket_id, unsigned flags, unsigned align)
+{
+ struct rte_mem_config *mcfg;
+ const struct rte_memzone *mz = NULL;
+
+ /* both sizes cannot be explicitly called for */
+ if (((flags & RTE_MEMZONE_1GB) && (flags & RTE_MEMZONE_2MB))
+ || ((flags & RTE_MEMZONE_16MB) && (flags & RTE_MEMZONE_16GB))) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /* get pointer to global configuration */
+ mcfg = rte_eal_get_configuration()->mem_config;
+
+ rte_rwlock_write_lock(&mcfg->mlock);
+
+ mz = memzone_reserve_aligned_thread_unsafe(
+ name, len, socket_id, flags, align, 0);
+
+ rte_rwlock_write_unlock(&mcfg->mlock);
+
+ return mz;
+}
+
+/*
+ * Return a pointer to a correctly filled memzone descriptor (with a
+ * specified alignment and boundary).
+ * If the allocation cannot be done, return NULL.
+ */
+const struct rte_memzone *
+rte_memzone_reserve_bounded(const char *name, size_t len,
+ int socket_id, unsigned flags, unsigned align, unsigned bound)
+{
+ struct rte_mem_config *mcfg;
+ const struct rte_memzone *mz = NULL;
+
+ /* both sizes cannot be explicitly called for */
+ if (((flags & RTE_MEMZONE_1GB) && (flags & RTE_MEMZONE_2MB))
+ || ((flags & RTE_MEMZONE_16MB) && (flags & RTE_MEMZONE_16GB))) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /* get pointer to global configuration */
+ mcfg = rte_eal_get_configuration()->mem_config;
+
+ rte_rwlock_write_lock(&mcfg->mlock);
+
+ mz = memzone_reserve_aligned_thread_unsafe(
+ name, len, socket_id, flags, align, bound);
+
+ rte_rwlock_write_unlock(&mcfg->mlock);
+
+ return mz;
+}
+
+
+/*
+ * Lookup for the memzone identified by the given name
+ */
+const struct rte_memzone *
+rte_memzone_lookup(const char *name)
+{
+ struct rte_mem_config *mcfg;
+ const struct rte_memzone *memzone = NULL;
+
+ mcfg = rte_eal_get_configuration()->mem_config;
+
+ rte_rwlock_read_lock(&mcfg->mlock);
+
+ memzone = memzone_lookup_thread_unsafe(name);
+
+ rte_rwlock_read_unlock(&mcfg->mlock);
+
+ return memzone;
+}
+
+/* Dump all reserved memory zones on console */
+void
+rte_memzone_dump(FILE *f)
+{
+ struct rte_mem_config *mcfg;
+ unsigned i = 0;
+
+ /* get pointer to global configuration */
+ mcfg = rte_eal_get_configuration()->mem_config;
+
+ rte_rwlock_read_lock(&mcfg->mlock);
+ /* dump all zones */
+ for (i=0; i<RTE_MAX_MEMZONE; i++) {
+ if (mcfg->memzone[i].addr == NULL)
+ break;
+ fprintf(f, "Zone %u: name:<%s>, phys:0x%"PRIx64", len:0x%zx"
+ ", virt:%p, socket_id:%"PRId32", flags:%"PRIx32"\n", i,
+ mcfg->memzone[i].name,
+ mcfg->memzone[i].phys_addr,
+ mcfg->memzone[i].len,
+ mcfg->memzone[i].addr,
+ mcfg->memzone[i].socket_id,
+ mcfg->memzone[i].flags);
+ }
+ rte_rwlock_read_unlock(&mcfg->mlock);
+}
+
+/*
+ * called by init: modify the free memseg list to have cache-aligned
+ * addresses and cache-aligned lengths
+ */
+static int
+memseg_sanitize(struct rte_memseg *memseg)
+{
+ unsigned phys_align;
+ unsigned virt_align;
+ unsigned off;
+
+ phys_align = memseg->phys_addr & RTE_CACHE_LINE_MASK;
+ virt_align = (unsigned long)memseg->addr & RTE_CACHE_LINE_MASK;
+
+ /*
+ * sanity check: phys_addr and addr must have the same
+ * alignment
+ */
+ if (phys_align != virt_align)
+ return -1;
+
+ /* memseg is really too small, don't bother with it */
+ if (memseg->len < (2 * RTE_CACHE_LINE_SIZE)) {
+ memseg->len = 0;
+ return 0;
+ }
+
+ /* align start address */
+ off = (RTE_CACHE_LINE_SIZE - phys_align) & RTE_CACHE_LINE_MASK;
+ memseg->phys_addr += off;
+ memseg->addr = (char *)memseg->addr + off;
+ memseg->len -= off;
+
+ /* align end address */
+ memseg->len &= ~((uint64_t)RTE_CACHE_LINE_MASK);
+
+ return 0;
+}
+
+/*
+ * Init the memzone subsystem
+ */
+int
+rte_eal_memzone_init(void)
+{
+ struct rte_mem_config *mcfg;
+ const struct rte_memseg *memseg;
+ unsigned i = 0;
+
+ /* get pointer to global configuration */
+ mcfg = rte_eal_get_configuration()->mem_config;
+
+ /* mirror the runtime memsegs from config */
+ free_memseg = mcfg->free_memseg;
+
+ /* secondary processes don't need to initialise anything */
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+ return 0;
+
+ memseg = rte_eal_get_physmem_layout();
+ if (memseg == NULL) {
+ RTE_LOG(ERR, EAL, "%s(): Cannot get physical layout\n", __func__);
+ return -1;
+ }
+
+ rte_rwlock_write_lock(&mcfg->mlock);
+
+ /* fill in uninitialized free_memsegs */
+ for (i = 0; i < RTE_MAX_MEMSEG; i++) {
+ if (memseg[i].addr == NULL)
+ break;
+ if (free_memseg[i].addr != NULL)
+ continue;
+ memcpy(&free_memseg[i], &memseg[i], sizeof(struct rte_memseg));
+ }
+
+ /* make all zones cache-aligned */
+ for (i = 0; i < RTE_MAX_MEMSEG; i++) {
+ if (free_memseg[i].addr == NULL)
+ break;
+ if (memseg_sanitize(&free_memseg[i]) < 0) {
+ RTE_LOG(ERR, EAL, "%s(): Sanity check failed\n", __func__);
+ rte_rwlock_write_unlock(&mcfg->mlock);
+ return -1;
+ }
+ }
+
+ /* delete all zones */
+ mcfg->memzone_idx = 0;
+ memset(mcfg->memzone, 0, sizeof(mcfg->memzone));
+
+ rte_rwlock_write_unlock(&mcfg->mlock);
+
+ return 0;
+}
+
+/* Walk all reserved memory zones */
+void rte_memzone_walk(void (*func)(const struct rte_memzone *, void *),
+ void *arg)
+{
+ struct rte_mem_config *mcfg;
+ unsigned i;
+
+ mcfg = rte_eal_get_configuration()->mem_config;
+
+ rte_rwlock_read_lock(&mcfg->mlock);
+ for (i=0; i<RTE_MAX_MEMZONE; i++) {
+ if (mcfg->memzone[i].addr != NULL)
+ (*func)(&mcfg->memzone[i], arg);
+ }
+ rte_rwlock_read_unlock(&mcfg->mlock);
+}
diff --git a/src/dpdk_lib18/librte_eal/common/eal_common_options.c b/src/dpdk_lib18/librte_eal/common/eal_common_options.c
new file mode 100755
index 00000000..e2810ab9
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/eal_common_options.c
@@ -0,0 +1,611 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2014 6WIND S.A.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <syslog.h>
+#include <ctype.h>
+#include <limits.h>
+#include <errno.h>
+#include <getopt.h>
+
+#include <rte_eal.h>
+#include <rte_log.h>
+#include <rte_lcore.h>
+#include <rte_version.h>
+#include <rte_devargs.h>
+
+#include "eal_internal_cfg.h"
+#include "eal_options.h"
+#include "eal_filesystem.h"
+
+#define BITS_PER_HEX 4
+
+const char
+eal_short_options[] =
+ "b:" /* pci-blacklist */
+ "w:" /* pci-whitelist */
+ "c:" /* coremask */
+ "d:"
+ "l:" /* corelist */
+ "m:"
+ "n:"
+ "r:"
+ "v";
+
+const struct option
+eal_long_options[] = {
+ {OPT_HUGE_DIR, 1, 0, OPT_HUGE_DIR_NUM},
+ {OPT_MASTER_LCORE, 1, 0, OPT_MASTER_LCORE_NUM},
+ {OPT_PROC_TYPE, 1, 0, OPT_PROC_TYPE_NUM},
+ {OPT_NO_SHCONF, 0, 0, OPT_NO_SHCONF_NUM},
+ {OPT_NO_HPET, 0, 0, OPT_NO_HPET_NUM},
+ {OPT_VMWARE_TSC_MAP, 0, 0, OPT_VMWARE_TSC_MAP_NUM},
+ {OPT_NO_PCI, 0, 0, OPT_NO_PCI_NUM},
+ {OPT_NO_HUGE, 0, 0, OPT_NO_HUGE_NUM},
+ {OPT_FILE_PREFIX, 1, 0, OPT_FILE_PREFIX_NUM},
+ {OPT_SOCKET_MEM, 1, 0, OPT_SOCKET_MEM_NUM},
+ {OPT_PCI_WHITELIST, 1, 0, OPT_PCI_WHITELIST_NUM},
+ {OPT_PCI_BLACKLIST, 1, 0, OPT_PCI_BLACKLIST_NUM},
+ {OPT_VDEV, 1, 0, OPT_VDEV_NUM},
+ {OPT_SYSLOG, 1, NULL, OPT_SYSLOG_NUM},
+ {OPT_LOG_LEVEL, 1, NULL, OPT_LOG_LEVEL_NUM},
+ {OPT_BASE_VIRTADDR, 1, 0, OPT_BASE_VIRTADDR_NUM},
+ {OPT_XEN_DOM0, 0, 0, OPT_XEN_DOM0_NUM},
+ {OPT_CREATE_UIO_DEV, 1, NULL, OPT_CREATE_UIO_DEV_NUM},
+ {OPT_VFIO_INTR, 1, NULL, OPT_VFIO_INTR_NUM},
+ {0, 0, 0, 0}
+};
+
+static int lcores_parsed;
+static int master_lcore_parsed;
+static int mem_parsed;
+
+void
+eal_reset_internal_config(struct internal_config *internal_cfg)
+{
+ int i;
+
+ internal_cfg->memory = 0;
+ internal_cfg->force_nrank = 0;
+ internal_cfg->force_nchannel = 0;
+ internal_cfg->hugefile_prefix = HUGEFILE_PREFIX_DEFAULT;
+ internal_cfg->hugepage_dir = NULL;
+ internal_cfg->force_sockets = 0;
+ /* zero out the NUMA config */
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ internal_cfg->socket_mem[i] = 0;
+ /* zero out hugedir descriptors */
+ for (i = 0; i < MAX_HUGEPAGE_SIZES; i++)
+ internal_cfg->hugepage_info[i].lock_descriptor = -1;
+ internal_cfg->base_virtaddr = 0;
+
+ internal_cfg->syslog_facility = LOG_DAEMON;
+ /* default value from build option */
+ internal_cfg->log_level = RTE_LOG_LEVEL;
+
+ internal_cfg->xen_dom0_support = 0;
+
+ /* if set to NONE, interrupt mode is determined automatically */
+ internal_cfg->vfio_intr_mode = RTE_INTR_MODE_NONE;
+
+#ifdef RTE_LIBEAL_USE_HPET
+ internal_cfg->no_hpet = 0;
+#else
+ internal_cfg->no_hpet = 1;
+#endif
+ internal_cfg->vmware_tsc_map = 0;
+}
+
+/*
+ * Parse the coremask given as argument (hexadecimal string) and fill
+ * the global configuration (core role and core count) with the parsed
+ * value.
+ */
+static int xdigit2val(unsigned char c)
+{
+ int val;
+
+ if (isdigit(c))
+ val = c - '0';
+ else if (isupper(c))
+ val = c - 'A' + 10;
+ else
+ val = c - 'a' + 10;
+ return val;
+}
+
+static int
+eal_parse_coremask(const char *coremask)
+{
+ struct rte_config *cfg = rte_eal_get_configuration();
+ int i, j, idx = 0;
+ unsigned count = 0;
+ char c;
+ int val;
+
+ if (coremask == NULL)
+ return -1;
+ /* Remove all blank characters ahead and after .
+ * Remove 0x/0X if exists.
+ */
+ while (isblank(*coremask))
+ coremask++;
+ if (coremask[0] == '0' && ((coremask[1] == 'x')
+ || (coremask[1] == 'X')))
+ coremask += 2;
+ i = strnlen(coremask, PATH_MAX);
+ while ((i > 0) && isblank(coremask[i - 1]))
+ i--;
+ if (i == 0)
+ return -1;
+
+ for (i = i - 1; i >= 0 && idx < RTE_MAX_LCORE; i--) {
+ c = coremask[i];
+ if (isxdigit(c) == 0) {
+ /* invalid characters */
+ return -1;
+ }
+ val = xdigit2val(c);
+ for (j = 0; j < BITS_PER_HEX && idx < RTE_MAX_LCORE; j++, idx++)
+ {
+ if ((1 << j) & val) {
+ if (!lcore_config[idx].detected) {
+ RTE_LOG(ERR, EAL, "lcore %u "
+ "unavailable\n", idx);
+ return -1;
+ }
+ cfg->lcore_role[idx] = ROLE_RTE;
+ lcore_config[idx].core_index = count;
+ count++;
+ } else {
+ cfg->lcore_role[idx] = ROLE_OFF;
+ lcore_config[idx].core_index = -1;
+ }
+ }
+ }
+ for (; i >= 0; i--)
+ if (coremask[i] != '0')
+ return -1;
+ for (; idx < RTE_MAX_LCORE; idx++) {
+ cfg->lcore_role[idx] = ROLE_OFF;
+ lcore_config[idx].core_index = -1;
+ }
+ if (count == 0)
+ return -1;
+ /* Update the count of enabled logical cores of the EAL configuration */
+ cfg->lcore_count = count;
+ lcores_parsed = 1;
+ return 0;
+}
+
+static int
+eal_parse_corelist(const char *corelist)
+{
+ struct rte_config *cfg = rte_eal_get_configuration();
+ int i, idx = 0;
+ unsigned count = 0;
+ char *end = NULL;
+ int min, max;
+
+ if (corelist == NULL)
+ return -1;
+
+ /* Remove all blank characters ahead and after */
+ while (isblank(*corelist))
+ corelist++;
+ i = strnlen(corelist, sysconf(_SC_ARG_MAX));
+ while ((i > 0) && isblank(corelist[i - 1]))
+ i--;
+
+ /* Reset config */
+ for (idx = 0; idx < RTE_MAX_LCORE; idx++) {
+ cfg->lcore_role[idx] = ROLE_OFF;
+ lcore_config[idx].core_index = -1;
+ }
+
+ /* Get list of cores */
+ min = RTE_MAX_LCORE;
+ do {
+ while (isblank(*corelist))
+ corelist++;
+ if (*corelist == '\0')
+ return -1;
+ errno = 0;
+ idx = strtoul(corelist, &end, 10);
+ if (errno || end == NULL)
+ return -1;
+ while (isblank(*end))
+ end++;
+ if (*end == '-') {
+ min = idx;
+ } else if ((*end == ',') || (*end == '\0')) {
+ max = idx;
+ if (min == RTE_MAX_LCORE)
+ min = idx;
+ for (idx = min; idx <= max; idx++) {
+ cfg->lcore_role[idx] = ROLE_RTE;
+ lcore_config[idx].core_index = count;
+ count++;
+ }
+ min = RTE_MAX_LCORE;
+ } else
+ return -1;
+ corelist = end + 1;
+ } while (*end != '\0');
+
+ if (count == 0)
+ return -1;
+
+ lcores_parsed = 1;
+ return 0;
+}
+
+/* Changes the lcore id of the master thread */
+static int
+eal_parse_master_lcore(const char *arg)
+{
+ char *parsing_end;
+ struct rte_config *cfg = rte_eal_get_configuration();
+
+ errno = 0;
+ cfg->master_lcore = (uint32_t) strtol(arg, &parsing_end, 0);
+ if (errno || parsing_end[0] != 0)
+ return -1;
+ if (cfg->master_lcore >= RTE_MAX_LCORE)
+ return -1;
+ master_lcore_parsed = 1;
+ return 0;
+}
+
+static int
+eal_parse_syslog(const char *facility, struct internal_config *conf)
+{
+ int i;
+ static struct {
+ const char *name;
+ int value;
+ } map[] = {
+ { "auth", LOG_AUTH },
+ { "cron", LOG_CRON },
+ { "daemon", LOG_DAEMON },
+ { "ftp", LOG_FTP },
+ { "kern", LOG_KERN },
+ { "lpr", LOG_LPR },
+ { "mail", LOG_MAIL },
+ { "news", LOG_NEWS },
+ { "syslog", LOG_SYSLOG },
+ { "user", LOG_USER },
+ { "uucp", LOG_UUCP },
+ { "local0", LOG_LOCAL0 },
+ { "local1", LOG_LOCAL1 },
+ { "local2", LOG_LOCAL2 },
+ { "local3", LOG_LOCAL3 },
+ { "local4", LOG_LOCAL4 },
+ { "local5", LOG_LOCAL5 },
+ { "local6", LOG_LOCAL6 },
+ { "local7", LOG_LOCAL7 },
+ { NULL, 0 }
+ };
+
+ for (i = 0; map[i].name; i++) {
+ if (!strcmp(facility, map[i].name)) {
+ conf->syslog_facility = map[i].value;
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static int
+eal_parse_log_level(const char *level, uint32_t *log_level)
+{
+ char *end;
+ unsigned long tmp;
+
+ errno = 0;
+ tmp = strtoul(level, &end, 0);
+
+ /* check for errors */
+ if ((errno != 0) || (level[0] == '\0') ||
+ end == NULL || (*end != '\0'))
+ return -1;
+
+ /* log_level is a uint32_t */
+ if (tmp >= UINT32_MAX)
+ return -1;
+
+ *log_level = tmp;
+ return 0;
+}
+
+static enum rte_proc_type_t
+eal_parse_proc_type(const char *arg)
+{
+ if (strncasecmp(arg, "primary", sizeof("primary")) == 0)
+ return RTE_PROC_PRIMARY;
+ if (strncasecmp(arg, "secondary", sizeof("secondary")) == 0)
+ return RTE_PROC_SECONDARY;
+ if (strncasecmp(arg, "auto", sizeof("auto")) == 0)
+ return RTE_PROC_AUTO;
+
+ return RTE_PROC_INVALID;
+}
+
+int
+eal_parse_common_option(int opt, const char *optarg,
+ struct internal_config *conf)
+{
+ switch (opt) {
+ /* blacklist */
+ case 'b':
+ if (rte_eal_devargs_add(RTE_DEVTYPE_BLACKLISTED_PCI,
+ optarg) < 0) {
+ return -1;
+ }
+ break;
+ /* whitelist */
+ case 'w':
+ if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED_PCI,
+ optarg) < 0) {
+ return -1;
+ }
+ break;
+ /* coremask */
+ case 'c':
+ if (eal_parse_coremask(optarg) < 0) {
+ RTE_LOG(ERR, EAL, "invalid coremask\n");
+ return -1;
+ }
+ break;
+ /* corelist */
+ case 'l':
+ if (eal_parse_corelist(optarg) < 0) {
+ RTE_LOG(ERR, EAL, "invalid core list\n");
+ return -1;
+ }
+ break;
+ /* size of memory */
+ case 'm':
+ conf->memory = atoi(optarg);
+ conf->memory *= 1024ULL;
+ conf->memory *= 1024ULL;
+ mem_parsed = 1;
+ break;
+ /* force number of channels */
+ case 'n':
+ conf->force_nchannel = atoi(optarg);
+ if (conf->force_nchannel == 0 ||
+ conf->force_nchannel > 4) {
+ RTE_LOG(ERR, EAL, "invalid channel number\n");
+ return -1;
+ }
+ break;
+ /* force number of ranks */
+ case 'r':
+ conf->force_nrank = atoi(optarg);
+ if (conf->force_nrank == 0 ||
+ conf->force_nrank > 16) {
+ RTE_LOG(ERR, EAL, "invalid rank number\n");
+ return -1;
+ }
+ break;
+ case 'v':
+ /* since message is explicitly requested by user, we
+ * write message at highest log level so it can always
+ * be seen
+ * even if info or warning messages are disabled */
+ RTE_LOG(CRIT, EAL, "RTE Version: '%s'\n", rte_version());
+ break;
+
+ /* long options */
+ case OPT_NO_HUGE_NUM:
+ conf->no_hugetlbfs = 1;
+ break;
+
+ case OPT_NO_PCI_NUM:
+ conf->no_pci = 1;
+ break;
+
+ case OPT_NO_HPET_NUM:
+ conf->no_hpet = 1;
+ break;
+
+ case OPT_VMWARE_TSC_MAP_NUM:
+ conf->vmware_tsc_map = 1;
+ break;
+
+ case OPT_NO_SHCONF_NUM:
+ conf->no_shconf = 1;
+ break;
+
+ case OPT_PROC_TYPE_NUM:
+ conf->process_type = eal_parse_proc_type(optarg);
+ break;
+
+ case OPT_MASTER_LCORE_NUM:
+ if (eal_parse_master_lcore(optarg) < 0) {
+ RTE_LOG(ERR, EAL, "invalid parameter for --"
+ OPT_MASTER_LCORE "\n");
+ return -1;
+ }
+ break;
+
+ case OPT_VDEV_NUM:
+ if (rte_eal_devargs_add(RTE_DEVTYPE_VIRTUAL,
+ optarg) < 0) {
+ return -1;
+ }
+ break;
+
+ case OPT_SYSLOG_NUM:
+ if (eal_parse_syslog(optarg, conf) < 0) {
+ RTE_LOG(ERR, EAL, "invalid parameters for --"
+ OPT_SYSLOG "\n");
+ return -1;
+ }
+ break;
+
+ case OPT_LOG_LEVEL_NUM: {
+ uint32_t log;
+
+ if (eal_parse_log_level(optarg, &log) < 0) {
+ RTE_LOG(ERR, EAL,
+ "invalid parameters for --"
+ OPT_LOG_LEVEL "\n");
+ return -1;
+ }
+ conf->log_level = log;
+ break;
+ }
+
+ /* don't know what to do, leave this to caller */
+ default:
+ return 1;
+
+ }
+
+ return 0;
+}
+
+int
+eal_adjust_config(struct internal_config *internal_cfg)
+{
+ int i;
+ struct rte_config *cfg = rte_eal_get_configuration();
+
+ if (internal_config.process_type == RTE_PROC_AUTO)
+ internal_config.process_type = eal_proc_type_detect();
+
+ /* default master lcore is the first one */
+ if (!master_lcore_parsed)
+ cfg->master_lcore = rte_get_next_lcore(-1, 0, 0);
+
+ /* if no memory amounts were requested, this will result in 0 and
+ * will be overridden later, right after eal_hugepage_info_init() */
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ internal_cfg->memory += internal_cfg->socket_mem[i];
+
+ return 0;
+}
+
+int
+eal_check_common_options(struct internal_config *internal_cfg)
+{
+ struct rte_config *cfg = rte_eal_get_configuration();
+
+ if (!lcores_parsed) {
+ RTE_LOG(ERR, EAL, "CPU cores must be enabled with options "
+ "-c or -l\n");
+ return -1;
+ }
+ if (cfg->lcore_role[cfg->master_lcore] != ROLE_RTE) {
+ RTE_LOG(ERR, EAL, "Master lcore is not enabled for DPDK\n");
+ return -1;
+ }
+
+ if (internal_cfg->process_type == RTE_PROC_INVALID) {
+ RTE_LOG(ERR, EAL, "Invalid process type specified\n");
+ return -1;
+ }
+ if (internal_cfg->process_type == RTE_PROC_PRIMARY &&
+ internal_cfg->force_nchannel == 0) {
+ RTE_LOG(ERR, EAL, "Number of memory channels (-n) not "
+ "specified\n");
+ return -1;
+ }
+ if (index(internal_cfg->hugefile_prefix, '%') != NULL) {
+ RTE_LOG(ERR, EAL, "Invalid char, '%%', in --"OPT_FILE_PREFIX" "
+ "option\n");
+ return -1;
+ }
+ if (mem_parsed && internal_cfg->force_sockets == 1) {
+ RTE_LOG(ERR, EAL, "Options -m and --"OPT_SOCKET_MEM" cannot "
+ "be specified at the same time\n");
+ return -1;
+ }
+ if (internal_cfg->no_hugetlbfs &&
+ (mem_parsed || internal_cfg->force_sockets == 1)) {
+ RTE_LOG(ERR, EAL, "Options -m or --"OPT_SOCKET_MEM" cannot "
+ "be specified together with --"OPT_NO_HUGE"\n");
+ return -1;
+ }
+
+ if (rte_eal_devargs_type_count(RTE_DEVTYPE_WHITELISTED_PCI) != 0 &&
+ rte_eal_devargs_type_count(RTE_DEVTYPE_BLACKLISTED_PCI) != 0) {
+ RTE_LOG(ERR, EAL, "Options blacklist (-b) and whitelist (-w) "
+ "cannot be used at the same time\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+void
+eal_common_usage(void)
+{
+ printf("-c COREMASK -n NUM [-m NB] [-r NUM] [-b <domain:bus:devid.func>]"
+ "[--proc-type primary|secondary|auto]\n\n"
+ "EAL common options:\n"
+ " -c COREMASK : A hexadecimal bitmask of cores to run on\n"
+ " -l CORELIST : List of cores to run on\n"
+ " The argument format is <c1>[-c2][,c3[-c4],...]\n"
+ " where c1, c2, etc are core indexes between 0 and %d\n"
+ " --"OPT_MASTER_LCORE" ID: Core ID that is used as master\n"
+ " -n NUM : Number of memory channels\n"
+ " -v : Display version information on startup\n"
+ " -m MB : memory to allocate (see also --"OPT_SOCKET_MEM")\n"
+ " -r NUM : force number of memory ranks (don't detect)\n"
+ " --"OPT_SYSLOG" : set syslog facility\n"
+ " --"OPT_LOG_LEVEL" : set default log level\n"
+ " --"OPT_PROC_TYPE" : type of this process\n"
+ " --"OPT_PCI_BLACKLIST", -b: add a PCI device in black list.\n"
+ " Prevent EAL from using this PCI device. The argument\n"
+ " format is <domain:bus:devid.func>.\n"
+ " --"OPT_PCI_WHITELIST", -w: add a PCI device in white list.\n"
+ " Only use the specified PCI devices. The argument format\n"
+ " is <[domain:]bus:devid.func>. This option can be present\n"
+ " several times (once per device).\n"
+ " [NOTE: PCI whitelist cannot be used with -b option]\n"
+ " --"OPT_VDEV": add a virtual device.\n"
+ " The argument format is <driver><id>[,key=val,...]\n"
+ " (ex: --vdev=eth_pcap0,iface=eth2).\n"
+ " --"OPT_VMWARE_TSC_MAP": use VMware TSC map instead of native RDTSC\n"
+ "\nEAL options for DEBUG use only:\n"
+ " --"OPT_NO_HUGE" : use malloc instead of hugetlbfs\n"
+ " --"OPT_NO_PCI" : disable pci\n"
+ " --"OPT_NO_HPET" : disable hpet\n"
+ " --"OPT_NO_SHCONF": no shared config (mmap'd files)\n"
+ "\n", RTE_MAX_LCORE);
+}
diff --git a/src/dpdk_lib18/librte_eal/common/eal_common_pci.c b/src/dpdk_lib18/librte_eal/common/eal_common_pci.c
new file mode 100755
index 00000000..f3c7f71a
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/eal_common_pci.c
@@ -0,0 +1,207 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/* BSD LICENSE
+ *
+ * Copyright 2013-2014 6WIND S.A.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <sys/queue.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_pci.h>
+#include <rte_per_lcore.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_string_fns.h>
+#include <rte_common.h>
+#include <rte_devargs.h>
+
+#include "eal_private.h"
+
+struct pci_driver_list pci_driver_list;
+struct pci_device_list pci_device_list;
+
+static struct rte_devargs *pci_devargs_lookup(struct rte_pci_device *dev)
+{
+ struct rte_devargs *devargs;
+
+ TAILQ_FOREACH(devargs, &devargs_list, next) {
+ if (devargs->type != RTE_DEVTYPE_BLACKLISTED_PCI &&
+ devargs->type != RTE_DEVTYPE_WHITELISTED_PCI)
+ continue;
+ if (!memcmp(&dev->addr, &devargs->pci.addr, sizeof(dev->addr)))
+ return devargs;
+ }
+ return NULL;
+}
+
+/*
+ * If vendor/device ID match, call the devinit() function of all
+ * registered driver for the given device. Return -1 if initialization
+ * failed, return 1 if no driver is found for this device.
+ */
+static int
+pci_probe_all_drivers(struct rte_pci_device *dev)
+{
+ struct rte_pci_driver *dr = NULL;
+ int rc;
+
+ TAILQ_FOREACH(dr, &pci_driver_list, next) {
+ rc = rte_eal_pci_probe_one_driver(dr, dev);
+ if (rc < 0)
+ /* negative value is an error */
+ return -1;
+ if (rc > 0)
+ /* positive value means driver not found */
+ continue;
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * Scan the content of the PCI bus, and call the devinit() function for
+ * all registered drivers that have a matching entry in its id_table
+ * for discovered devices.
+ */
+int
+rte_eal_pci_probe(void)
+{
+ struct rte_pci_device *dev = NULL;
+ struct rte_devargs *devargs;
+ int probe_all = 0;
+ int ret = 0;
+
+ if (rte_eal_devargs_type_count(RTE_DEVTYPE_WHITELISTED_PCI) == 0)
+ probe_all = 1;
+
+ TAILQ_FOREACH(dev, &pci_device_list, next) {
+
+ /* set devargs in PCI structure */
+ devargs = pci_devargs_lookup(dev);
+ if (devargs != NULL)
+ dev->devargs = devargs;
+
+ /* probe all or only whitelisted devices */
+ if (probe_all)
+ ret = pci_probe_all_drivers(dev);
+ else if (devargs != NULL &&
+ devargs->type == RTE_DEVTYPE_WHITELISTED_PCI)
+ ret = pci_probe_all_drivers(dev);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Requested device " PCI_PRI_FMT
+ " cannot be used\n", dev->addr.domain, dev->addr.bus,
+ dev->addr.devid, dev->addr.function);
+ }
+
+ return 0;
+}
+
+/* dump one device */
+static int
+pci_dump_one_device(FILE *f, struct rte_pci_device *dev)
+{
+ int i;
+
+ fprintf(f, PCI_PRI_FMT, dev->addr.domain, dev->addr.bus,
+ dev->addr.devid, dev->addr.function);
+ fprintf(f, " - vendor:%x device:%x\n", dev->id.vendor_id,
+ dev->id.device_id);
+
+ for (i = 0; i != sizeof(dev->mem_resource) /
+ sizeof(dev->mem_resource[0]); i++) {
+ fprintf(f, " %16.16"PRIx64" %16.16"PRIx64"\n",
+ dev->mem_resource[i].phys_addr,
+ dev->mem_resource[i].len);
+ }
+ return 0;
+}
+
+/* dump devices on the bus */
+void
+rte_eal_pci_dump(FILE *f)
+{
+ struct rte_pci_device *dev = NULL;
+
+ TAILQ_FOREACH(dev, &pci_device_list, next) {
+ pci_dump_one_device(f, dev);
+ }
+}
+
+/* register a driver */
+void
+rte_eal_pci_register(struct rte_pci_driver *driver)
+{
+ TAILQ_INSERT_TAIL(&pci_driver_list, driver, next);
+}
+
+/* unregister a driver */
+void
+rte_eal_pci_unregister(struct rte_pci_driver *driver)
+{
+ TAILQ_REMOVE(&pci_driver_list, driver, next);
+}
diff --git a/src/dpdk_lib18/librte_eal/common/eal_common_string_fns.c b/src/dpdk_lib18/librte_eal/common/eal_common_string_fns.c
new file mode 100755
index 00000000..125a3e2d
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/eal_common_string_fns.c
@@ -0,0 +1,69 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <errno.h>
+
+#include <rte_string_fns.h>
+
+/* split string into tokens */
+int
+rte_strsplit(char *string, int stringlen,
+ char **tokens, int maxtokens, char delim)
+{
+ int i, tok = 0;
+ int tokstart = 1; /* first token is right at start of string */
+
+ if (string == NULL || tokens == NULL)
+ goto einval_error;
+
+ for (i = 0; i < stringlen; i++) {
+ if (string[i] == '\0' || tok >= maxtokens)
+ break;
+ if (tokstart) {
+ tokstart = 0;
+ tokens[tok++] = &string[i];
+ }
+ if (string[i] == delim) {
+ string[i] = '\0';
+ tokstart = 1;
+ }
+ }
+ return tok;
+
+einval_error:
+ errno = EINVAL;
+ return -1;
+}
diff --git a/src/dpdk_lib18/librte_eal/common/eal_common_tailqs.c b/src/dpdk_lib18/librte_eal/common/eal_common_tailqs.c
new file mode 100755
index 00000000..db9a1850
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/eal_common_tailqs.c
@@ -0,0 +1,146 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdint.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_memory.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_log.h>
+#include <rte_string_fns.h>
+
+#include "eal_private.h"
+
+/**
+ * Name of tailq_head
+ */
+const char* rte_tailq_names[RTE_MAX_TAILQ] = {
+#define rte_tailq_elem(idx, name) name,
+#include <rte_tailq_elem.h>
+};
+
+struct rte_tailq_head *
+rte_eal_tailq_lookup(const char *name)
+{
+ unsigned i;
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+
+ if (name == NULL)
+ return NULL;
+
+ for (i = 0; i < RTE_MAX_TAILQ; i++) {
+ if (rte_tailq_names[i] == NULL)
+ continue;
+ if (!strncmp(name, rte_tailq_names[i], RTE_TAILQ_NAMESIZE-1))
+ return &mcfg->tailq_head[i];
+ }
+
+ return NULL;
+}
+
+inline struct rte_tailq_head *
+rte_eal_tailq_lookup_by_idx(const unsigned tailq_idx)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+
+ if (tailq_idx >= RTE_MAX_TAILQ) {
+ RTE_LOG(ERR, EAL, "%s(): No more room in config\n", __func__);
+ return NULL;
+ }
+
+ return &mcfg->tailq_head[tailq_idx];
+}
+
+struct rte_tailq_head *
+rte_eal_tailq_reserve(const char *name)
+{
+ return rte_eal_tailq_lookup(name);
+}
+
+inline struct rte_tailq_head *
+rte_eal_tailq_reserve_by_idx(const unsigned tailq_idx)
+{
+ return rte_eal_tailq_lookup_by_idx(tailq_idx);
+}
+
+void
+rte_dump_tailq(FILE *f)
+{
+ struct rte_mem_config *mcfg;
+ unsigned i = 0;
+
+ mcfg = rte_eal_get_configuration()->mem_config;
+
+ rte_rwlock_read_lock(&mcfg->qlock);
+ for (i=0; i < RTE_MAX_TAILQ; i++) {
+ const struct rte_tailq_head *tailq = &mcfg->tailq_head[i];
+ const struct rte_tailq_entry_head *head = &tailq->tailq_head;
+
+ fprintf(f, "Tailq %u: qname:<%s>, tqh_first:%p, tqh_last:%p\n", i,
+ (rte_tailq_names[i] != NULL ? rte_tailq_names[i]:"nil"),
+ head->tqh_first, head->tqh_last);
+ }
+ rte_rwlock_read_unlock(&mcfg->qlock);
+}
+
+int
+rte_eal_tailqs_init(void)
+{
+ unsigned i;
+ struct rte_mem_config *mcfg = NULL;
+
+ RTE_BUILD_BUG_ON(RTE_MAX_TAILQ < RTE_TAILQ_NUM);
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ mcfg = rte_eal_get_configuration()->mem_config;
+ for (i = 0; i < RTE_MAX_TAILQ; i++)
+ TAILQ_INIT(&mcfg->tailq_head[i].tailq_head);
+ }
+
+ return 0;
+}
+
diff --git a/src/dpdk_lib18/librte_eal/common/eal_filesystem.h b/src/dpdk_lib18/librte_eal/common/eal_filesystem.h
new file mode 100755
index 00000000..fdb4a70b
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/eal_filesystem.h
@@ -0,0 +1,118 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file
+ * Stores functions and path defines for files and directories
+ * on the filesystem for Linux, that are used by the Linux EAL.
+ */
+
+#ifndef EAL_FILESYSTEM_H
+#define EAL_FILESYSTEM_H
+
+/** Path of rte config file. */
+#define RUNTIME_CONFIG_FMT "%s/.%s_config"
+
+#include <stdint.h>
+#include <limits.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+#include <rte_string_fns.h>
+#include "eal_internal_cfg.h"
+
+static const char *default_config_dir = "/var/run";
+
+static inline const char *
+eal_runtime_config_path(void)
+{
+ static char buffer[PATH_MAX]; /* static so auto-zeroed */
+ const char *directory = default_config_dir;
+ const char *home_dir = getenv("HOME");
+
+ if (getuid() != 0 && home_dir != NULL)
+ directory = home_dir;
+ snprintf(buffer, sizeof(buffer) - 1, RUNTIME_CONFIG_FMT, directory,
+ internal_config.hugefile_prefix);
+ return buffer;
+}
+
+/** Path of hugepage info file. */
+#define HUGEPAGE_INFO_FMT "%s/.%s_hugepage_info"
+
+static inline const char *
+eal_hugepage_info_path(void)
+{
+ static char buffer[PATH_MAX]; /* static so auto-zeroed */
+ const char *directory = default_config_dir;
+ const char *home_dir = getenv("HOME");
+
+ if (getuid() != 0 && home_dir != NULL)
+ directory = home_dir;
+ snprintf(buffer, sizeof(buffer) - 1, HUGEPAGE_INFO_FMT, directory,
+ internal_config.hugefile_prefix);
+ return buffer;
+}
+
+/** String format for hugepage map files. */
+#define HUGEFILE_FMT "%s/%smap_%d"
+#define TEMP_HUGEFILE_FMT "%s/%smap_temp_%d"
+
+static inline const char *
+eal_get_hugefile_path(char *buffer, size_t buflen, const char *hugedir, int f_id)
+{
+ snprintf(buffer, buflen, HUGEFILE_FMT, hugedir,
+ internal_config.hugefile_prefix, f_id);
+ buffer[buflen - 1] = '\0';
+ return buffer;
+}
+
+#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
+static inline const char *
+eal_get_hugefile_temp_path(char *buffer, size_t buflen, const char *hugedir, int f_id)
+{
+ snprintf(buffer, buflen, TEMP_HUGEFILE_FMT, hugedir,
+ internal_config.hugefile_prefix, f_id);
+ buffer[buflen - 1] = '\0';
+ return buffer;
+}
+#endif
+
+/** define the default filename prefix for the %s values above */
+#define HUGEFILE_PREFIX_DEFAULT "rte"
+
+/** Function to read a single numeric value from a file on the filesystem.
+ * Used to read information from files on /sys */
+int eal_parse_sysfs_value(const char *filename, unsigned long *val);
+
+#endif /* EAL_FILESYSTEM_H */
diff --git a/src/dpdk_lib18/librte_eal/common/eal_hugepages.h b/src/dpdk_lib18/librte_eal/common/eal_hugepages.h
new file mode 100755
index 00000000..38edac03
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/eal_hugepages.h
@@ -0,0 +1,67 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef EAL_HUGEPAGES_H
+#define EAL_HUGEPAGES_H
+
+#include <stddef.h>
+#include <stdint.h>
+#include <limits.h>
+
+#define MAX_HUGEPAGE_PATH PATH_MAX
+
+/**
+ * Structure used to store informations about hugepages that we mapped
+ * through the files in hugetlbfs.
+ */
+struct hugepage_file {
+ void *orig_va; /**< virtual addr of first mmap() */
+ void *final_va; /**< virtual addr of 2nd mmap() */
+ uint64_t physaddr; /**< physical addr */
+ size_t size; /**< the page size */
+ int socket_id; /**< NUMA socket ID */
+ int file_id; /**< the '%d' in HUGEFILE_FMT */
+ int memseg_id; /**< the memory segment to which page belongs */
+#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
+ int repeated; /**< number of times the page size is repeated */
+#endif
+ char filepath[MAX_HUGEPAGE_PATH]; /**< path to backing file on filesystem */
+};
+
+/**
+ * Read the information from linux on what hugepages are available
+ * for the EAL to use
+ */
+int eal_hugepage_info_init(void);
+
+#endif /* EAL_HUGEPAGES_H */
diff --git a/src/dpdk_lib18/librte_eal/common/eal_internal_cfg.h b/src/dpdk_lib18/librte_eal/common/eal_internal_cfg.h
new file mode 100755
index 00000000..e2ecb0d0
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/eal_internal_cfg.h
@@ -0,0 +1,93 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file
+ * Holds the structures for the eal internal configuration
+ */
+
+#ifndef EAL_INTERNAL_CFG_H
+#define EAL_INTERNAL_CFG_H
+
+#include <rte_eal.h>
+#include <rte_pci_dev_feature_defs.h>
+
+#define MAX_HUGEPAGE_SIZES 3 /**< support up to 3 page sizes */
+
+/*
+ * internal configuration structure for the number, size and
+ * mount points of hugepages
+ */
+struct hugepage_info {
+ uint64_t hugepage_sz; /**< size of a huge page */
+ const char *hugedir; /**< dir where hugetlbfs is mounted */
+ uint32_t num_pages[RTE_MAX_NUMA_NODES];
+ /**< number of hugepages of that size on each socket */
+ int lock_descriptor; /**< file descriptor for hugepage dir */
+};
+
+/**
+ * internal configuration
+ */
+struct internal_config {
+ volatile size_t memory; /**< amount of asked memory */
+ volatile unsigned force_nchannel; /**< force number of channels */
+ volatile unsigned force_nrank; /**< force number of ranks */
+ volatile unsigned no_hugetlbfs; /**< true to disable hugetlbfs */
+ volatile unsigned xen_dom0_support; /**< support app running on Xen Dom0*/
+ volatile unsigned no_pci; /**< true to disable PCI */
+ volatile unsigned no_hpet; /**< true to disable HPET */
+ volatile unsigned vmware_tsc_map; /**< true to use VMware TSC mapping
+ * instead of native TSC */
+ volatile unsigned no_shconf; /**< true if there is no shared config */
+ volatile unsigned create_uio_dev; /**< true to create /dev/uioX devices */
+ volatile enum rte_proc_type_t process_type; /**< multi-process proc type */
+ /** true to try allocating memory on specific sockets */
+ volatile unsigned force_sockets;
+ volatile uint64_t socket_mem[RTE_MAX_NUMA_NODES]; /**< amount of memory per socket */
+ uintptr_t base_virtaddr; /**< base address to try and reserve memory from */
+ volatile int syslog_facility; /**< facility passed to openlog() */
+ volatile uint32_t log_level; /**< default log level */
+ /** default interrupt mode for VFIO */
+ volatile enum rte_intr_mode vfio_intr_mode;
+ const char *hugefile_prefix; /**< the base filename of hugetlbfs files */
+ const char *hugepage_dir; /**< specific hugetlbfs directory to use */
+
+ unsigned num_hugepage_sizes; /**< how many sizes on this system */
+ struct hugepage_info hugepage_info[MAX_HUGEPAGE_SIZES];
+};
+extern struct internal_config internal_config; /**< Global EAL configuration. */
+
+void eal_reset_internal_config(struct internal_config *internal_cfg);
+
+#endif /* EAL_INTERNAL_CFG_H */
diff --git a/src/dpdk_lib18/librte_eal/common/eal_options.h b/src/dpdk_lib18/librte_eal/common/eal_options.h
new file mode 100755
index 00000000..e476f8d8
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/eal_options.h
@@ -0,0 +1,93 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 6WIND S.A.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef EAL_OPTIONS_H
+#define EAL_OPTIONS_H
+
+enum {
+ /* long options mapped to a short option */
+#define OPT_PCI_WHITELIST "pci-whitelist"
+ OPT_PCI_WHITELIST_NUM = 'w',
+#define OPT_PCI_BLACKLIST "pci-blacklist"
+ OPT_PCI_BLACKLIST_NUM = 'b',
+
+ /* first long only option value must be >= 256, so that we won't
+ * conflict with short options */
+ OPT_LONG_MIN_NUM = 256,
+#define OPT_HUGE_DIR "huge-dir"
+ OPT_HUGE_DIR_NUM = OPT_LONG_MIN_NUM,
+#define OPT_MASTER_LCORE "master-lcore"
+ OPT_MASTER_LCORE_NUM,
+#define OPT_PROC_TYPE "proc-type"
+ OPT_PROC_TYPE_NUM,
+#define OPT_NO_SHCONF "no-shconf"
+ OPT_NO_SHCONF_NUM,
+#define OPT_NO_HPET "no-hpet"
+ OPT_NO_HPET_NUM,
+#define OPT_VMWARE_TSC_MAP "vmware-tsc-map"
+ OPT_VMWARE_TSC_MAP_NUM,
+#define OPT_NO_PCI "no-pci"
+ OPT_NO_PCI_NUM,
+#define OPT_NO_HUGE "no-huge"
+ OPT_NO_HUGE_NUM,
+#define OPT_FILE_PREFIX "file-prefix"
+ OPT_FILE_PREFIX_NUM,
+#define OPT_SOCKET_MEM "socket-mem"
+ OPT_SOCKET_MEM_NUM,
+#define OPT_VDEV "vdev"
+ OPT_VDEV_NUM,
+#define OPT_SYSLOG "syslog"
+ OPT_SYSLOG_NUM,
+#define OPT_LOG_LEVEL "log-level"
+ OPT_LOG_LEVEL_NUM,
+#define OPT_BASE_VIRTADDR "base-virtaddr"
+ OPT_BASE_VIRTADDR_NUM,
+#define OPT_XEN_DOM0 "xen-dom0"
+ OPT_XEN_DOM0_NUM,
+#define OPT_CREATE_UIO_DEV "create-uio-dev"
+ OPT_CREATE_UIO_DEV_NUM,
+#define OPT_VFIO_INTR "vfio-intr"
+ OPT_VFIO_INTR_NUM,
+ OPT_LONG_MAX_NUM
+};
+
+extern const char eal_short_options[];
+extern const struct option eal_long_options[];
+
+int eal_parse_common_option(int opt, const char *argv,
+ struct internal_config *conf);
+int eal_adjust_config(struct internal_config *internal_cfg);
+int eal_check_common_options(struct internal_config *internal_cfg);
+void eal_common_usage(void);
+enum rte_proc_type_t eal_proc_type_detect(void);
+
+#endif /* EAL_OPTIONS_H */
diff --git a/src/dpdk_lib18/librte_eal/common/eal_private.h b/src/dpdk_lib18/librte_eal/common/eal_private.h
new file mode 100755
index 00000000..232fcecc
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/eal_private.h
@@ -0,0 +1,206 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _EAL_PRIVATE_H_
+#define _EAL_PRIVATE_H_
+
+#include <stdio.h>
+
+/**
+ * Initialize the memzone subsystem (private to eal).
+ *
+ * @return
+ * - 0 on success
+ * - Negative on error
+ */
+int rte_eal_memzone_init(void);
+
+/**
+ * Common log initialization function (private to eal).
+ *
+ * Called by environment-specific log initialization function to initialize
+ * log history.
+ *
+ * @param default_log
+ * The default log stream to be used.
+ * @return
+ * - 0 on success
+ * - Negative on error
+ */
+int rte_eal_common_log_init(FILE *default_log);
+
+/**
+ * Fill configuration with number of physical and logical processors
+ *
+ * This function is private to EAL.
+ *
+ * Parse /proc/cpuinfo to get the number of physical and logical
+ * processors on the machine.
+ *
+ * @return
+ * 0 on success, negative on error
+ */
+int rte_eal_cpu_init(void);
+
+/**
+ * Map memory
+ *
+ * This function is private to EAL.
+ *
+ * Fill configuration structure with these infos, and return 0 on success.
+ *
+ * @return
+ * 0 on success, negative on error
+ */
+int rte_eal_memory_init(void);
+
+/**
+ * Configure timers
+ *
+ * This function is private to EAL.
+ *
+ * Mmap memory areas used by HPET (high precision event timer) that will
+ * provide our time reference, and configure the TSC frequency also for it
+ * to be used as a reference.
+ *
+ * @return
+ * 0 on success, negative on error
+ */
+int rte_eal_timer_init(void);
+
+/**
+ * Init early logs
+ *
+ * This function is private to EAL.
+ *
+ * @return
+ * 0 on success, negative on error
+ */
+int rte_eal_log_early_init(void);
+
+/**
+ * Init the default log stream
+ *
+ * This function is private to EAL.
+ *
+ * @return
+ * 0 on success, negative on error
+ */
+int rte_eal_log_init(const char *id, int facility);
+
+/**
+ * Init the default log stream
+ *
+ * This function is private to EAL.
+ *
+ * @return
+ * 0 on success, negative on error
+ */
+int rte_eal_pci_init(void);
+
+#ifdef RTE_LIBRTE_IVSHMEM
+/**
+ * Init the memory from IVSHMEM devices
+ *
+ * This function is private to EAL.
+ *
+ * @return
+ * 0 on success, negative on error
+ */
+int rte_eal_ivshmem_init(void);
+
+/**
+ * Init objects in IVSHMEM devices
+ *
+ * This function is private to EAL.
+ *
+ * @return
+ * 0 on success, negative on error
+ */
+int rte_eal_ivshmem_obj_init(void);
+#endif
+
+struct rte_pci_driver;
+struct rte_pci_device;
+
+/**
+ * Mmap memory for single PCI device
+ *
+ * This function is private to EAL.
+ *
+ * @return
+ * 0 on success, negative on error
+ */
+int rte_eal_pci_probe_one_driver(struct rte_pci_driver *dr,
+ struct rte_pci_device *dev);
+
+/**
+ * Init tail queues for non-EAL library structures. This is to allow
+ * the rings, mempools, etc. lists to be shared among multiple processes
+ *
+ * This function is private to EAL
+ *
+ * @return
+ * 0 on success, negative on error
+ */
+int rte_eal_tailqs_init(void);
+
+/**
+ * Init interrupt handling.
+ *
+ * This function is private to EAL.
+ *
+ * @return
+ * 0 on success, negative on error
+ */
+int rte_eal_intr_init(void);
+
+/**
+ * Init alarm mechanism. This is to allow a callback be called after
+ * specific time.
+ *
+ * This function is private to EAL.
+ *
+ * @return
+ * 0 on success, negative on error
+ */
+int rte_eal_alarm_init(void);
+
+/**
+ * This function initialises any virtual devices
+ *
+ * This function is private to the EAL.
+ */
+int rte_eal_dev_init(void);
+
+#endif /* _EAL_PRIVATE_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/eal_thread.h b/src/dpdk_lib18/librte_eal/common/eal_thread.h
new file mode 100755
index 00000000..b53b84d3
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/eal_thread.h
@@ -0,0 +1,53 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef EAL_THREAD_H
+#define EAL_THREAD_H
+
+/**
+ * basic loop of thread, called for each thread by eal_init().
+ *
+ * @param arg
+ * opaque pointer
+ */
+__attribute__((noreturn)) void *eal_thread_loop(void *arg);
+
+/**
+ * Init per-lcore info for master thread
+ *
+ * @param lcore_id
+ * identifier of master lcore
+ */
+void eal_thread_init_master(unsigned lcore_id);
+
+#endif /* EAL_THREAD_H */
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_atomic.h b/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_atomic.h
new file mode 100755
index 00000000..fb7af2bd
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_atomic.h
@@ -0,0 +1,426 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) IBM Corporation 2014.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of IBM Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/*
+ * Inspired from FreeBSD src/sys/powerpc/include/atomic.h
+ * Copyright (c) 2008 Marcel Moolenaar
+ * Copyright (c) 2001 Benno Rice
+ * Copyright (c) 2001 David E. O'Brien
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ */
+
+#ifndef _RTE_ATOMIC_PPC_64_H_
+#define _RTE_ATOMIC_PPC_64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_atomic.h"
+
+/**
+ * General memory barrier.
+ *
+ * Guarantees that the LOAD and STORE operations generated before the
+ * barrier occur before the LOAD and STORE operations generated after.
+ */
+#define rte_mb() {asm volatile("sync" : : : "memory"); }
+
+/**
+ * Write memory barrier.
+ *
+ * Guarantees that the STORE operations generated before the barrier
+ * occur before the STORE operations generated after.
+ */
+#define rte_wmb() {asm volatile("sync" : : : "memory"); }
+
+/**
+ * Read memory barrier.
+ *
+ * Guarantees that the LOAD operations generated before the barrier
+ * occur before the LOAD operations generated after.
+ */
+#define rte_rmb() {asm volatile("sync" : : : "memory"); }
+
+/*------------------------- 16 bit atomic operations -------------------------*/
+/* To be compatible with Power7, use GCC built-in functions for 16 bit
+ * operations */
+
+#ifndef RTE_FORCE_INTRINSICS
+static inline int
+rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
+{
+ return __atomic_compare_exchange(dst, &exp, &src, 0, __ATOMIC_ACQUIRE,
+ __ATOMIC_ACQUIRE) ? 1 : 0;
+}
+
+static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
+{
+ return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
+}
+
+static inline void
+rte_atomic16_inc(rte_atomic16_t *v)
+{
+ __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE);
+}
+
+static inline void
+rte_atomic16_dec(rte_atomic16_t *v)
+{
+ __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE);
+}
+
+static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
+{
+ return (__atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0);
+}
+
+static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
+{
+ return (__atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0);
+}
+
+/*------------------------- 32 bit atomic operations -------------------------*/
+
+static inline int
+rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
+{
+ unsigned int ret = 0;
+
+ asm volatile(
+ "\tlwsync\n"
+ "1:\tlwarx %[ret], 0, %[dst]\n"
+ "cmplw %[exp], %[ret]\n"
+ "bne 2f\n"
+ "stwcx. %[src], 0, %[dst]\n"
+ "bne- 1b\n"
+ "li %[ret], 1\n"
+ "b 3f\n"
+ "2:\n"
+ "stwcx. %[ret], 0, %[dst]\n"
+ "li %[ret], 0\n"
+ "3:\n"
+ "isync\n"
+ : [ret] "=&r" (ret), "=m" (*dst)
+ : [dst] "r" (dst),
+ [exp] "r" (exp),
+ [src] "r" (src),
+ "m" (*dst)
+ : "cc", "memory");
+
+ return ret;
+}
+
+static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
+{
+ return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
+}
+
+static inline void
+rte_atomic32_inc(rte_atomic32_t *v)
+{
+ int t;
+
+ asm volatile(
+ "1: lwarx %[t],0,%[cnt]\n"
+ "addic %[t],%[t],1\n"
+ "stwcx. %[t],0,%[cnt]\n"
+ "bne- 1b\n"
+ : [t] "=&r" (t), "=m" (v->cnt)
+ : [cnt] "r" (&v->cnt), "m" (v->cnt)
+ : "cc", "xer", "memory");
+}
+
+static inline void
+rte_atomic32_dec(rte_atomic32_t *v)
+{
+ int t;
+
+ asm volatile(
+ "1: lwarx %[t],0,%[cnt]\n"
+ "addic %[t],%[t],-1\n"
+ "stwcx. %[t],0,%[cnt]\n"
+ "bne- 1b\n"
+ : [t] "=&r" (t), "=m" (v->cnt)
+ : [cnt] "r" (&v->cnt), "m" (v->cnt)
+ : "cc", "xer", "memory");
+}
+
+static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
+{
+ int ret;
+
+ asm volatile(
+ "\n\tlwsync\n"
+ "1: lwarx %[ret],0,%[cnt]\n"
+ "addic %[ret],%[ret],1\n"
+ "stwcx. %[ret],0,%[cnt]\n"
+ "bne- 1b\n"
+ "isync\n"
+ : [ret] "=&r" (ret)
+ : [cnt] "r" (&v->cnt)
+ : "cc", "xer", "memory");
+
+ return (ret == 0);
+}
+
+static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
+{
+ int ret;
+
+ asm volatile(
+ "\n\tlwsync\n"
+ "1: lwarx %[ret],0,%[cnt]\n"
+ "addic %[ret],%[ret],-1\n"
+ "stwcx. %[ret],0,%[cnt]\n"
+ "bne- 1b\n"
+ "isync\n"
+ : [ret] "=&r" (ret)
+ : [cnt] "r" (&v->cnt)
+ : "cc", "xer", "memory");
+
+ return (ret == 0);
+}
+/*------------------------- 64 bit atomic operations -------------------------*/
+
+static inline int
+rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
+{
+ unsigned int ret = 0;
+
+ asm volatile (
+ "\tlwsync\n"
+ "1: ldarx %[ret], 0, %[dst]\n"
+ "cmpld %[exp], %[ret]\n"
+ "bne 2f\n"
+ "stdcx. %[src], 0, %[dst]\n"
+ "bne- 1b\n"
+ "li %[ret], 1\n"
+ "b 3f\n"
+ "2:\n"
+ "stdcx. %[ret], 0, %[dst]\n"
+ "li %[ret], 0\n"
+ "3:\n"
+ "isync\n"
+ : [ret] "=&r" (ret), "=m" (*dst)
+ : [dst] "r" (dst),
+ [exp] "r" (exp),
+ [src] "r" (src),
+ "m" (*dst)
+ : "cc", "memory");
+ return ret;
+}
+
+static inline void
+rte_atomic64_init(rte_atomic64_t *v)
+{
+ v->cnt = 0;
+}
+
+static inline int64_t
+rte_atomic64_read(rte_atomic64_t *v)
+{
+ long ret;
+
+ asm volatile("ld%U1%X1 %[ret],%[cnt]"
+ : [ret] "=r"(ret)
+ : [cnt] "m"(v->cnt));
+
+ return ret;
+}
+
+static inline void
+rte_atomic64_set(rte_atomic64_t *v, int64_t new_value)
+{
+ asm volatile("std%U0%X0 %[new_value],%[cnt]"
+ : [cnt] "=m"(v->cnt)
+ : [new_value] "r"(new_value));
+}
+
+static inline void
+rte_atomic64_add(rte_atomic64_t *v, int64_t inc)
+{
+ long t;
+
+ asm volatile(
+ "1: ldarx %[t],0,%[cnt]\n"
+ "add %[t],%[inc],%[t]\n"
+ "stdcx. %[t],0,%[cnt]\n"
+ "bne- 1b\n"
+ : [t] "=&r" (t), "=m" (v->cnt)
+ : [cnt] "r" (&v->cnt), [inc] "r" (inc), "m" (v->cnt)
+ : "cc", "memory");
+}
+
+static inline void
+rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
+{
+ long t;
+
+ asm volatile(
+ "1: ldarx %[t],0,%[cnt]\n"
+ "subf %[t],%[dec],%[t]\n"
+ "stdcx. %[t],0,%[cnt]\n"
+ "bne- 1b\n"
+ : [t] "=&r" (t), "+m" (v->cnt)
+ : [cnt] "r" (&v->cnt), [dec] "r" (dec), "m" (v->cnt)
+ : "cc", "memory");
+}
+
+static inline void
+rte_atomic64_inc(rte_atomic64_t *v)
+{
+ long t;
+
+ asm volatile(
+ "1: ldarx %[t],0,%[cnt]\n"
+ "addic %[t],%[t],1\n"
+ "stdcx. %[t],0,%[cnt]\n"
+ "bne- 1b\n"
+ : [t] "=&r" (t), "+m" (v->cnt)
+ : [cnt] "r" (&v->cnt), "m" (v->cnt)
+ : "cc", "xer", "memory");
+}
+
+static inline void
+rte_atomic64_dec(rte_atomic64_t *v)
+{
+ long t;
+
+ asm volatile(
+ "1: ldarx %[t],0,%[cnt]\n"
+ "addic %[t],%[t],-1\n"
+ "stdcx. %[t],0,%[cnt]\n"
+ "bne- 1b\n"
+ : [t] "=&r" (t), "+m" (v->cnt)
+ : [cnt] "r" (&v->cnt), "m" (v->cnt)
+ : "cc", "xer", "memory");
+}
+
+static inline int64_t
+rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)
+{
+ long ret;
+
+ asm volatile(
+ "\n\tlwsync\n"
+ "1: ldarx %[ret],0,%[cnt]\n"
+ "add %[ret],%[inc],%[ret]\n"
+ "stdcx. %[ret],0,%[cnt]\n"
+ "bne- 1b\n"
+ "isync\n"
+ : [ret] "=&r" (ret)
+ : [inc] "r" (inc), [cnt] "r" (&v->cnt)
+ : "cc", "memory");
+
+ return ret;
+}
+
+static inline int64_t
+rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
+{
+ long ret;
+
+ asm volatile(
+ "\n\tlwsync\n"
+ "1: ldarx %[ret],0,%[cnt]\n"
+ "subf %[ret],%[dec],%[ret]\n"
+ "stdcx. %[ret],0,%[cnt]\n"
+ "bne- 1b\n"
+ "isync\n"
+ : [ret] "=&r" (ret)
+ : [dec] "r" (dec), [cnt] "r" (&v->cnt)
+ : "cc", "memory");
+
+ return ret;
+}
+
+static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v)
+{
+ long ret;
+
+ asm volatile(
+ "\n\tlwsync\n"
+ "1: ldarx %[ret],0,%[cnt]\n"
+ "addic %[ret],%[ret],1\n"
+ "stdcx. %[ret],0,%[cnt]\n"
+ "bne- 1b\n"
+ "isync\n"
+ : [ret] "=&r" (ret)
+ : [cnt] "r" (&v->cnt)
+ : "cc", "xer", "memory");
+
+ return (ret == 0);
+}
+
+static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
+{
+ long ret;
+
+ asm volatile(
+ "\n\tlwsync\n"
+ "1: ldarx %[ret],0,%[cnt]\n"
+ "addic %[ret],%[ret],-1\n"
+ "stdcx. %[ret],0,%[cnt]\n"
+ "bne- 1b\n"
+ "isync\n"
+ : [ret] "=&r" (ret)
+ : [cnt] "r" (&v->cnt)
+ : "cc", "xer", "memory");
+
+ return (ret == 0);
+}
+
+static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
+{
+ return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
+}
+
+/**
+ * Atomically set a 64-bit counter to 0.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void rte_atomic64_clear(rte_atomic64_t *v)
+{
+ v->cnt = 0;
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_ATOMIC_PPC_64_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_byteorder.h b/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_byteorder.h
new file mode 100755
index 00000000..80436f24
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_byteorder.h
@@ -0,0 +1,149 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) IBM Corporation 2014.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of IBM Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/* Inspired from FreeBSD src/sys/powerpc/include/endian.h
+ * Copyright (c) 1987, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+*/
+
+#ifndef _RTE_BYTEORDER_PPC_64_H_
+#define _RTE_BYTEORDER_PPC_64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_byteorder.h"
+
+/*
+ * An architecture-optimized byte swap for a 16-bit value.
+ *
+ * Do not use this function directly. The preferred function is rte_bswap16().
+ */
+static inline uint16_t rte_arch_bswap16(uint16_t _x)
+{
+ return ((_x >> 8) | ((_x << 8) & 0xff00));
+}
+
+/*
+ * An architecture-optimized byte swap for a 32-bit value.
+ *
+ * Do not use this function directly. The preferred function is rte_bswap32().
+ */
+static inline uint32_t rte_arch_bswap32(uint32_t _x)
+{
+ return ((_x >> 24) | ((_x >> 8) & 0xff00) | ((_x << 8) & 0xff0000) |
+ ((_x << 24) & 0xff000000));
+}
+
+/*
+ * An architecture-optimized byte swap for a 64-bit value.
+ *
+ * Do not use this function directly. The preferred function is rte_bswap64().
+ */
+/* 64-bit mode */
+static inline uint64_t rte_arch_bswap64(uint64_t _x)
+{
+ return ((_x >> 56) | ((_x >> 40) & 0xff00) | ((_x >> 24) & 0xff0000) |
+ ((_x >> 8) & 0xff000000) | ((_x << 8) & (0xffULL << 32)) |
+ ((_x << 24) & (0xffULL << 40)) |
+ ((_x << 40) & (0xffULL << 48)) | ((_x << 56)));
+}
+
+#ifndef RTE_FORCE_INTRINSICS
+#define rte_bswap16(x) ((uint16_t)(__builtin_constant_p(x) ? \
+ rte_constant_bswap16(x) : \
+ rte_arch_bswap16(x)))
+
+#define rte_bswap32(x) ((uint32_t)(__builtin_constant_p(x) ? \
+ rte_constant_bswap32(x) : \
+ rte_arch_bswap32(x)))
+
+#define rte_bswap64(x) ((uint64_t)(__builtin_constant_p(x) ? \
+ rte_constant_bswap64(x) : \
+ rte_arch_bswap64(x)))
+#else
+/*
+ * __builtin_bswap16 is only available gcc 4.8 and upwards
+ */
+#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 8)
+#define rte_bswap16(x) ((uint16_t)(__builtin_constant_p(x) ? \
+ rte_constant_bswap16(x) : \
+ rte_arch_bswap16(x)))
+#endif
+#endif
+
+/* Power 8 have both little endian and big endian mode
+ * Power 7 only support big endian
+ */
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+
+#define rte_cpu_to_le_16(x) (x)
+#define rte_cpu_to_le_32(x) (x)
+#define rte_cpu_to_le_64(x) (x)
+
+#define rte_cpu_to_be_16(x) rte_bswap16(x)
+#define rte_cpu_to_be_32(x) rte_bswap32(x)
+#define rte_cpu_to_be_64(x) rte_bswap64(x)
+
+#define rte_le_to_cpu_16(x) (x)
+#define rte_le_to_cpu_32(x) (x)
+#define rte_le_to_cpu_64(x) (x)
+
+#define rte_be_to_cpu_16(x) rte_bswap16(x)
+#define rte_be_to_cpu_32(x) rte_bswap32(x)
+#define rte_be_to_cpu_64(x) rte_bswap64(x)
+
+#else /* RTE_BIG_ENDIAN */
+
+#define rte_cpu_to_le_16(x) rte_bswap16(x)
+#define rte_cpu_to_le_32(x) rte_bswap32(x)
+#define rte_cpu_to_le_64(x) rte_bswap64(x)
+
+#define rte_cpu_to_be_16(x) (x)
+#define rte_cpu_to_be_32(x) (x)
+#define rte_cpu_to_be_64(x) (x)
+
+#define rte_le_to_cpu_16(x) rte_bswap16(x)
+#define rte_le_to_cpu_32(x) rte_bswap32(x)
+#define rte_le_to_cpu_64(x) rte_bswap64(x)
+
+#define rte_be_to_cpu_16(x) (x)
+#define rte_be_to_cpu_32(x) (x)
+#define rte_be_to_cpu_64(x) (x)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_BYTEORDER_PPC_64_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_cpuflags.h b/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_cpuflags.h
new file mode 100755
index 00000000..df450470
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_cpuflags.h
@@ -0,0 +1,187 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) IBM Corporation 2014.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of IBM Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef _RTE_CPUFLAGS_PPC_64_H_
+#define _RTE_CPUFLAGS_PPC_64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <elf.h>
+#include <fcntl.h>
+#include <assert.h>
+#include <unistd.h>
+
+#include "generic/rte_cpuflags.h"
+
+/* Symbolic values for the entries in the auxiliary table */
+#define AT_HWCAP 16
+#define AT_HWCAP2 26
+
+/* software based registers */
+enum cpu_register_t {
+ REG_HWCAP = 0,
+ REG_HWCAP2,
+};
+
+/**
+ * Enumeration of all CPU features supported
+ */
+enum rte_cpu_flag_t {
+ RTE_CPUFLAG_PPC_LE = 0,
+ RTE_CPUFLAG_TRUE_LE,
+ RTE_CPUFLAG_PSERIES_PERFMON_COMPAT,
+ RTE_CPUFLAG_VSX,
+ RTE_CPUFLAG_ARCH_2_06,
+ RTE_CPUFLAG_POWER6_EXT,
+ RTE_CPUFLAG_DFP,
+ RTE_CPUFLAG_PA6T,
+ RTE_CPUFLAG_ARCH_2_05,
+ RTE_CPUFLAG_ICACHE_SNOOP,
+ RTE_CPUFLAG_SMT,
+ RTE_CPUFLAG_BOOKE,
+ RTE_CPUFLAG_CELLBE,
+ RTE_CPUFLAG_POWER5_PLUS,
+ RTE_CPUFLAG_POWER5,
+ RTE_CPUFLAG_POWER4,
+ RTE_CPUFLAG_NOTB,
+ RTE_CPUFLAG_EFP_DOUBLE,
+ RTE_CPUFLAG_EFP_SINGLE,
+ RTE_CPUFLAG_SPE,
+ RTE_CPUFLAG_UNIFIED_CACHE,
+ RTE_CPUFLAG_4xxMAC,
+ RTE_CPUFLAG_MMU,
+ RTE_CPUFLAG_FPU,
+ RTE_CPUFLAG_ALTIVEC,
+ RTE_CPUFLAG_PPC601,
+ RTE_CPUFLAG_PPC64,
+ RTE_CPUFLAG_PPC32,
+ RTE_CPUFLAG_TAR,
+ RTE_CPUFLAG_LSEL,
+ RTE_CPUFLAG_EBB,
+ RTE_CPUFLAG_DSCR,
+ RTE_CPUFLAG_HTM,
+ RTE_CPUFLAG_ARCH_2_07,
+ /* The last item */
+ RTE_CPUFLAG_NUMFLAGS,/**< This should always be the last! */
+};
+
+static const struct feature_entry cpu_feature_table[] = {
+ FEAT_DEF(PPC_LE, 0x00000001, 0, REG_HWCAP, 0)
+ FEAT_DEF(TRUE_LE, 0x00000001, 0, REG_HWCAP, 1)
+ FEAT_DEF(PSERIES_PERFMON_COMPAT, 0x00000001, 0, REG_HWCAP, 6)
+ FEAT_DEF(VSX, 0x00000001, 0, REG_HWCAP, 7)
+ FEAT_DEF(ARCH_2_06, 0x00000001, 0, REG_HWCAP, 8)
+ FEAT_DEF(POWER6_EXT, 0x00000001, 0, REG_HWCAP, 9)
+ FEAT_DEF(DFP, 0x00000001, 0, REG_HWCAP, 10)
+ FEAT_DEF(PA6T, 0x00000001, 0, REG_HWCAP, 11)
+ FEAT_DEF(ARCH_2_05, 0x00000001, 0, REG_HWCAP, 12)
+ FEAT_DEF(ICACHE_SNOOP, 0x00000001, 0, REG_HWCAP, 13)
+ FEAT_DEF(SMT, 0x00000001, 0, REG_HWCAP, 14)
+ FEAT_DEF(BOOKE, 0x00000001, 0, REG_HWCAP, 15)
+ FEAT_DEF(CELLBE, 0x00000001, 0, REG_HWCAP, 16)
+ FEAT_DEF(POWER5_PLUS, 0x00000001, 0, REG_HWCAP, 17)
+ FEAT_DEF(POWER5, 0x00000001, 0, REG_HWCAP, 18)
+ FEAT_DEF(POWER4, 0x00000001, 0, REG_HWCAP, 19)
+ FEAT_DEF(NOTB, 0x00000001, 0, REG_HWCAP, 20)
+ FEAT_DEF(EFP_DOUBLE, 0x00000001, 0, REG_HWCAP, 21)
+ FEAT_DEF(EFP_SINGLE, 0x00000001, 0, REG_HWCAP, 22)
+ FEAT_DEF(SPE, 0x00000001, 0, REG_HWCAP, 23)
+ FEAT_DEF(UNIFIED_CACHE, 0x00000001, 0, REG_HWCAP, 24)
+ FEAT_DEF(4xxMAC, 0x00000001, 0, REG_HWCAP, 25)
+ FEAT_DEF(MMU, 0x00000001, 0, REG_HWCAP, 26)
+ FEAT_DEF(FPU, 0x00000001, 0, REG_HWCAP, 27)
+ FEAT_DEF(ALTIVEC, 0x00000001, 0, REG_HWCAP, 28)
+ FEAT_DEF(PPC601, 0x00000001, 0, REG_HWCAP, 29)
+ FEAT_DEF(PPC64, 0x00000001, 0, REG_HWCAP, 30)
+ FEAT_DEF(PPC32, 0x00000001, 0, REG_HWCAP, 31)
+ FEAT_DEF(TAR, 0x00000001, 0, REG_HWCAP2, 26)
+ FEAT_DEF(LSEL, 0x00000001, 0, REG_HWCAP2, 27)
+ FEAT_DEF(EBB, 0x00000001, 0, REG_HWCAP2, 28)
+ FEAT_DEF(DSCR, 0x00000001, 0, REG_HWCAP2, 29)
+ FEAT_DEF(HTM, 0x00000001, 0, REG_HWCAP2, 30)
+ FEAT_DEF(ARCH_2_07, 0x00000001, 0, REG_HWCAP2, 31)
+};
+
+/*
+ * Read AUXV software register and get cpu features for Power
+ */
+static inline void
+rte_cpu_get_features(__attribute__((unused)) uint32_t leaf,
+ __attribute__((unused)) uint32_t subleaf, cpuid_registers_t out)
+{
+ int auxv_fd;
+ Elf64_auxv_t auxv;
+
+ auxv_fd = open("/proc/self/auxv", O_RDONLY);
+ assert(auxv_fd);
+ while (read(auxv_fd, &auxv,
+ sizeof(Elf64_auxv_t)) == sizeof(Elf64_auxv_t)) {
+ if (auxv.a_type == AT_HWCAP)
+ out[REG_HWCAP] = auxv.a_un.a_val;
+ else if (auxv.a_type == AT_HWCAP2)
+ out[REG_HWCAP2] = auxv.a_un.a_val;
+ }
+}
+
+/*
+ * Checks if a particular flag is available on current machine.
+ */
+static inline int
+rte_cpu_get_flag_enabled(enum rte_cpu_flag_t feature)
+{
+ const struct feature_entry *feat;
+ cpuid_registers_t regs = {0};
+
+ if (feature >= RTE_CPUFLAG_NUMFLAGS)
+ /* Flag does not match anything in the feature tables */
+ return -ENOENT;
+
+ feat = &cpu_feature_table[feature];
+
+ if (!feat->leaf)
+ /* This entry in the table wasn't filled out! */
+ return -EFAULT;
+
+ /* get the cpuid leaf containing the desired feature */
+ rte_cpu_get_features(feat->leaf, feat->subleaf, regs);
+
+ /* check if the feature is enabled */
+ return (regs[feat->reg] >> feat->bit) & 1;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CPUFLAGS_PPC_64_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_cycles.h b/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_cycles.h
new file mode 100755
index 00000000..fd26e8e7
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_cycles.h
@@ -0,0 +1,87 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) IBM Corporation 2014.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of IBM Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef _RTE_CYCLES_PPC_64_H_
+#define _RTE_CYCLES_PPC_64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_cycles.h"
+
+/**
+ * Read the time base register.
+ *
+ * @return
+ * The time base for this lcore.
+ */
+static inline uint64_t
+rte_rdtsc(void)
+{
+ union {
+ uint64_t tsc_64;
+ struct {
+ uint32_t hi_32;
+ uint32_t lo_32;
+ };
+ } tsc;
+ uint32_t tmp;
+
+ asm volatile(
+ "0:\n"
+ "mftbu %[hi32]\n"
+ "mftb %[lo32]\n"
+ "mftbu %[tmp]\n"
+ "cmpw %[tmp],%[hi32]\n"
+ "bne 0b\n"
+ : [hi32] "=r"(tsc.hi_32), [lo32] "=r"(tsc.lo_32),
+ [tmp] "=r"(tmp)
+ );
+ return tsc.tsc_64;
+}
+
+static inline uint64_t
+rte_rdtsc_precise(void)
+{
+ rte_mb();
+ return rte_rdtsc();
+}
+
+static inline uint64_t
+rte_get_tsc_cycles(void) { return rte_rdtsc(); }
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CYCLES_PPC_64_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_memcpy.h b/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_memcpy.h
new file mode 100755
index 00000000..acf7aac2
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_memcpy.h
@@ -0,0 +1,225 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) IBM Corporation 2014.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of IBM Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef _RTE_MEMCPY_PPC_64_H_
+#define _RTE_MEMCPY_PPC_64_H_
+
+#include <stdint.h>
+#include <string.h>
+/*To include altivec.h, GCC version must >= 4.8 */
+#include <altivec.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_memcpy.h"
+
+static inline void
+rte_mov16(uint8_t *dst, const uint8_t *src)
+{
+ vec_vsx_st(vec_vsx_ld(0, src), 0, dst);
+}
+
+static inline void
+rte_mov32(uint8_t *dst, const uint8_t *src)
+{
+ vec_vsx_st(vec_vsx_ld(0, src), 0, dst);
+ vec_vsx_st(vec_vsx_ld(16, src), 16, dst);
+}
+
+static inline void
+rte_mov48(uint8_t *dst, const uint8_t *src)
+{
+ vec_vsx_st(vec_vsx_ld(0, src), 0, dst);
+ vec_vsx_st(vec_vsx_ld(16, src), 16, dst);
+ vec_vsx_st(vec_vsx_ld(32, src), 32, dst);
+}
+
+static inline void
+rte_mov64(uint8_t *dst, const uint8_t *src)
+{
+ vec_vsx_st(vec_vsx_ld(0, src), 0, dst);
+ vec_vsx_st(vec_vsx_ld(16, src), 16, dst);
+ vec_vsx_st(vec_vsx_ld(32, src), 32, dst);
+ vec_vsx_st(vec_vsx_ld(48, src), 48, dst);
+}
+
+static inline void
+rte_mov128(uint8_t *dst, const uint8_t *src)
+{
+ vec_vsx_st(vec_vsx_ld(0, src), 0, dst);
+ vec_vsx_st(vec_vsx_ld(16, src), 16, dst);
+ vec_vsx_st(vec_vsx_ld(32, src), 32, dst);
+ vec_vsx_st(vec_vsx_ld(48, src), 48, dst);
+ vec_vsx_st(vec_vsx_ld(64, src), 64, dst);
+ vec_vsx_st(vec_vsx_ld(80, src), 80, dst);
+ vec_vsx_st(vec_vsx_ld(96, src), 96, dst);
+ vec_vsx_st(vec_vsx_ld(112, src), 112, dst);
+}
+
+static inline void
+rte_mov256(uint8_t *dst, const uint8_t *src)
+{
+ rte_mov128(dst, src);
+ rte_mov128(dst + 128, src + 128);
+}
+
+#define rte_memcpy(dst, src, n) \
+ ({ (__builtin_constant_p(n)) ? \
+ memcpy((dst), (src), (n)) : \
+ rte_memcpy_func((dst), (src), (n)); })
+
+static inline void *
+rte_memcpy_func(void *dst, const void *src, size_t n)
+{
+ void *ret = dst;
+
+ /* We can't copy < 16 bytes using XMM registers so do it manually. */
+ if (n < 16) {
+ if (n & 0x01) {
+ *(uint8_t *)dst = *(const uint8_t *)src;
+ dst = (uint8_t *)dst + 1;
+ src = (const uint8_t *)src + 1;
+ }
+ if (n & 0x02) {
+ *(uint16_t *)dst = *(const uint16_t *)src;
+ dst = (uint16_t *)dst + 1;
+ src = (const uint16_t *)src + 1;
+ }
+ if (n & 0x04) {
+ *(uint32_t *)dst = *(const uint32_t *)src;
+ dst = (uint32_t *)dst + 1;
+ src = (const uint32_t *)src + 1;
+ }
+ if (n & 0x08)
+ *(uint64_t *)dst = *(const uint64_t *)src;
+ return ret;
+ }
+
+ /* Special fast cases for <= 128 bytes */
+ if (n <= 32) {
+ rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+ rte_mov16((uint8_t *)dst - 16 + n,
+ (const uint8_t *)src - 16 + n);
+ return ret;
+ }
+
+ if (n <= 64) {
+ rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+ rte_mov32((uint8_t *)dst - 32 + n,
+ (const uint8_t *)src - 32 + n);
+ return ret;
+ }
+
+ if (n <= 128) {
+ rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+ rte_mov64((uint8_t *)dst - 64 + n,
+ (const uint8_t *)src - 64 + n);
+ return ret;
+ }
+
+ /*
+ * For large copies > 128 bytes. This combination of 256, 64 and 16 byte
+ * copies was found to be faster than doing 128 and 32 byte copies as
+ * well.
+ */
+ for ( ; n >= 256; n -= 256) {
+ rte_mov256((uint8_t *)dst, (const uint8_t *)src);
+ dst = (uint8_t *)dst + 256;
+ src = (const uint8_t *)src + 256;
+ }
+
+ /*
+ * We split the remaining bytes (which will be less than 256) into
+ * 64byte (2^6) chunks.
+ * Using incrementing integers in the case labels of a switch statement
+ * enourages the compiler to use a jump table. To get incrementing
+ * integers, we shift the 2 relevant bits to the LSB position to first
+ * get decrementing integers, and then subtract.
+ */
+ switch (3 - (n >> 6)) {
+ case 0x00:
+ rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+ n -= 64;
+ dst = (uint8_t *)dst + 64;
+ src = (const uint8_t *)src + 64; /* fallthrough */
+ case 0x01:
+ rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+ n -= 64;
+ dst = (uint8_t *)dst + 64;
+ src = (const uint8_t *)src + 64; /* fallthrough */
+ case 0x02:
+ rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+ n -= 64;
+ dst = (uint8_t *)dst + 64;
+ src = (const uint8_t *)src + 64; /* fallthrough */
+ default:
+ ;
+ }
+
+ /*
+ * We split the remaining bytes (which will be less than 64) into
+ * 16byte (2^4) chunks, using the same switch structure as above.
+ */
+ switch (3 - (n >> 4)) {
+ case 0x00:
+ rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+ n -= 16;
+ dst = (uint8_t *)dst + 16;
+ src = (const uint8_t *)src + 16; /* fallthrough */
+ case 0x01:
+ rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+ n -= 16;
+ dst = (uint8_t *)dst + 16;
+ src = (const uint8_t *)src + 16; /* fallthrough */
+ case 0x02:
+ rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+ n -= 16;
+ dst = (uint8_t *)dst + 16;
+ src = (const uint8_t *)src + 16; /* fallthrough */
+ default:
+ ;
+ }
+
+ /* Copy any remaining bytes, without going beyond end of buffers */
+ if (n != 0)
+ rte_mov16((uint8_t *)dst - 16 + n,
+ (const uint8_t *)src - 16 + n);
+ return ret;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MEMCPY_PPC_64_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_prefetch.h b/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_prefetch.h
new file mode 100755
index 00000000..9df0d13c
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_prefetch.h
@@ -0,0 +1,61 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) IBM Corporation 2014.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of IBM Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef _RTE_PREFETCH_PPC_64_H_
+#define _RTE_PREFETCH_PPC_64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_prefetch.h"
+
+static inline void rte_prefetch0(volatile void *p)
+{
+ asm volatile ("dcbt 0,%[p],1" : : [p] "r" (p));
+}
+
+static inline void rte_prefetch1(volatile void *p)
+{
+ asm volatile ("dcbt 0,%[p],1" : : [p] "r" (p));
+}
+
+static inline void rte_prefetch2(volatile void *p)
+{
+ asm volatile ("dcbt 0,%[p],1" : : [p] "r" (p));
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_PREFETCH_PPC_64_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_spinlock.h b/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_spinlock.h
new file mode 100755
index 00000000..cf8b81ad
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/arch/ppc_64/rte_spinlock.h
@@ -0,0 +1,73 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) IBM Corporation 2014.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of IBM Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef _RTE_SPINLOCK_PPC_64_H_
+#define _RTE_SPINLOCK_PPC_64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_common.h>
+#include "generic/rte_spinlock.h"
+
+/* Fixme: Use intrinsics to implement the spinlock on Power architecture */
+
+#ifndef RTE_FORCE_INTRINSICS
+
+static inline void
+rte_spinlock_lock(rte_spinlock_t *sl)
+{
+ while (__sync_lock_test_and_set(&sl->locked, 1))
+ while (sl->locked)
+ rte_pause();
+}
+
+static inline void
+rte_spinlock_unlock(rte_spinlock_t *sl)
+{
+ __sync_lock_release(&sl->locked);
+}
+
+static inline int
+rte_spinlock_trylock(rte_spinlock_t *sl)
+{
+ return (__sync_lock_test_and_set(&sl->locked, 1) == 0);
+}
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_SPINLOCK_PPC_64_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_atomic.h b/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_atomic.h
new file mode 100755
index 00000000..e93e8eef
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_atomic.h
@@ -0,0 +1,216 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ATOMIC_X86_H_
+#define _RTE_ATOMIC_X86_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <emmintrin.h>
+#include "generic/rte_atomic.h"
+
+#if RTE_MAX_LCORE == 1
+#define MPLOCKED /**< No need to insert MP lock prefix. */
+#else
+#define MPLOCKED "lock ; " /**< Insert MP lock prefix. */
+#endif
+
+#define rte_mb() _mm_mfence()
+
+#define rte_wmb() _mm_sfence()
+
+#define rte_rmb() _mm_lfence()
+
+/*------------------------- 16 bit atomic operations -------------------------*/
+
+#ifndef RTE_FORCE_INTRINSICS
+static inline int
+rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
+{
+ uint8_t res;
+
+ asm volatile(
+ MPLOCKED
+ "cmpxchgw %[src], %[dst];"
+ "sete %[res];"
+ : [res] "=a" (res), /* output */
+ [dst] "=m" (*dst)
+ : [src] "r" (src), /* input */
+ "a" (exp),
+ "m" (*dst)
+ : "memory"); /* no-clobber list */
+ return res;
+}
+
+static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
+{
+ return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
+}
+
+static inline void
+rte_atomic16_inc(rte_atomic16_t *v)
+{
+ asm volatile(
+ MPLOCKED
+ "incw %[cnt]"
+ : [cnt] "=m" (v->cnt) /* output */
+ : "m" (v->cnt) /* input */
+ );
+}
+
+static inline void
+rte_atomic16_dec(rte_atomic16_t *v)
+{
+ asm volatile(
+ MPLOCKED
+ "decw %[cnt]"
+ : [cnt] "=m" (v->cnt) /* output */
+ : "m" (v->cnt) /* input */
+ );
+}
+
+static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
+{
+ uint8_t ret;
+
+ asm volatile(
+ MPLOCKED
+ "incw %[cnt] ; "
+ "sete %[ret]"
+ : [cnt] "+m" (v->cnt), /* output */
+ [ret] "=qm" (ret)
+ );
+ return (ret != 0);
+}
+
+static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
+{
+ uint8_t ret;
+
+ asm volatile(MPLOCKED
+ "decw %[cnt] ; "
+ "sete %[ret]"
+ : [cnt] "+m" (v->cnt), /* output */
+ [ret] "=qm" (ret)
+ );
+ return (ret != 0);
+}
+
+/*------------------------- 32 bit atomic operations -------------------------*/
+
+static inline int
+rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
+{
+ uint8_t res;
+
+ asm volatile(
+ MPLOCKED
+ "cmpxchgl %[src], %[dst];"
+ "sete %[res];"
+ : [res] "=a" (res), /* output */
+ [dst] "=m" (*dst)
+ : [src] "r" (src), /* input */
+ "a" (exp),
+ "m" (*dst)
+ : "memory"); /* no-clobber list */
+ return res;
+}
+
+static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
+{
+ return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
+}
+
+static inline void
+rte_atomic32_inc(rte_atomic32_t *v)
+{
+ asm volatile(
+ MPLOCKED
+ "incl %[cnt]"
+ : [cnt] "=m" (v->cnt) /* output */
+ : "m" (v->cnt) /* input */
+ );
+}
+
+static inline void
+rte_atomic32_dec(rte_atomic32_t *v)
+{
+ asm volatile(
+ MPLOCKED
+ "decl %[cnt]"
+ : [cnt] "=m" (v->cnt) /* output */
+ : "m" (v->cnt) /* input */
+ );
+}
+
+static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
+{
+ uint8_t ret;
+
+ asm volatile(
+ MPLOCKED
+ "incl %[cnt] ; "
+ "sete %[ret]"
+ : [cnt] "+m" (v->cnt), /* output */
+ [ret] "=qm" (ret)
+ );
+ return (ret != 0);
+}
+
+static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
+{
+ uint8_t ret;
+
+ asm volatile(MPLOCKED
+ "decl %[cnt] ; "
+ "sete %[ret]"
+ : [cnt] "+m" (v->cnt), /* output */
+ [ret] "=qm" (ret)
+ );
+ return (ret != 0);
+}
+#endif
+
+#ifdef RTE_ARCH_I686
+#include "rte_atomic_32.h"
+#else
+#include "rte_atomic_64.h"
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_ATOMIC_X86_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_atomic_32.h b/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_atomic_32.h
new file mode 100755
index 00000000..400d8a96
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_atomic_32.h
@@ -0,0 +1,222 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Inspired from FreeBSD src/sys/i386/include/atomic.h
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ */
+
+#ifndef _RTE_ATOMIC_I686_H_
+#define _RTE_ATOMIC_I686_H_
+
+/*------------------------- 64 bit atomic operations -------------------------*/
+
+#ifndef RTE_FORCE_INTRINSICS
+static inline int
+rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
+{
+ uint8_t res;
+ union {
+ struct {
+ uint32_t l32;
+ uint32_t h32;
+ };
+ uint64_t u64;
+ } _exp, _src;
+
+ _exp.u64 = exp;
+ _src.u64 = src;
+
+#ifndef __PIC__
+ asm volatile (
+ MPLOCKED
+ "cmpxchg8b (%[dst]);"
+ "setz %[res];"
+ : [res] "=a" (res) /* result in eax */
+ : [dst] "S" (dst), /* esi */
+ "b" (_src.l32), /* ebx */
+ "c" (_src.h32), /* ecx */
+ "a" (_exp.l32), /* eax */
+ "d" (_exp.h32) /* edx */
+ : "memory" ); /* no-clobber list */
+#else
+ asm volatile (
+ "mov %%ebx, %%edi\n"
+ MPLOCKED
+ "cmpxchg8b (%[dst]);"
+ "setz %[res];"
+ "xchgl %%ebx, %%edi;\n"
+ : [res] "=a" (res) /* result in eax */
+ : [dst] "S" (dst), /* esi */
+ "D" (_src.l32), /* ebx */
+ "c" (_src.h32), /* ecx */
+ "a" (_exp.l32), /* eax */
+ "d" (_exp.h32) /* edx */
+ : "memory" ); /* no-clobber list */
+#endif
+
+ return res;
+}
+
+static inline void
+rte_atomic64_init(rte_atomic64_t *v)
+{
+ int success = 0;
+ uint64_t tmp;
+
+ while (success == 0) {
+ tmp = v->cnt;
+ success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
+ tmp, 0);
+ }
+}
+
+static inline int64_t
+rte_atomic64_read(rte_atomic64_t *v)
+{
+ int success = 0;
+ uint64_t tmp;
+
+ while (success == 0) {
+ tmp = v->cnt;
+ /* replace the value by itself */
+ success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
+ tmp, tmp);
+ }
+ return tmp;
+}
+
+static inline void
+rte_atomic64_set(rte_atomic64_t *v, int64_t new_value)
+{
+ int success = 0;
+ uint64_t tmp;
+
+ while (success == 0) {
+ tmp = v->cnt;
+ success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
+ tmp, new_value);
+ }
+}
+
+static inline void
+rte_atomic64_add(rte_atomic64_t *v, int64_t inc)
+{
+ int success = 0;
+ uint64_t tmp;
+
+ while (success == 0) {
+ tmp = v->cnt;
+ success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
+ tmp, tmp + inc);
+ }
+}
+
+static inline void
+rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
+{
+ int success = 0;
+ uint64_t tmp;
+
+ while (success == 0) {
+ tmp = v->cnt;
+ success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
+ tmp, tmp - dec);
+ }
+}
+
+static inline void
+rte_atomic64_inc(rte_atomic64_t *v)
+{
+ rte_atomic64_add(v, 1);
+}
+
+static inline void
+rte_atomic64_dec(rte_atomic64_t *v)
+{
+ rte_atomic64_sub(v, 1);
+}
+
+static inline int64_t
+rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)
+{
+ int success = 0;
+ uint64_t tmp;
+
+ while (success == 0) {
+ tmp = v->cnt;
+ success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
+ tmp, tmp + inc);
+ }
+
+ return tmp + inc;
+}
+
+static inline int64_t
+rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
+{
+ int success = 0;
+ uint64_t tmp;
+
+ while (success == 0) {
+ tmp = v->cnt;
+ success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
+ tmp, tmp - dec);
+ }
+
+ return tmp - dec;
+}
+
+static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v)
+{
+ return rte_atomic64_add_return(v, 1) == 0;
+}
+
+static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
+{
+ return rte_atomic64_sub_return(v, 1) == 0;
+}
+
+static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
+{
+ return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
+}
+
+static inline void rte_atomic64_clear(rte_atomic64_t *v)
+{
+ rte_atomic64_set(v, 0);
+}
+#endif
+
+#endif /* _RTE_ATOMIC_I686_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_atomic_64.h b/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_atomic_64.h
new file mode 100755
index 00000000..4de66000
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_atomic_64.h
@@ -0,0 +1,191 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Inspired from FreeBSD src/sys/amd64/include/atomic.h
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ */
+
+#ifndef _RTE_ATOMIC_X86_64_H_
+#define _RTE_ATOMIC_X86_64_H_
+
+/*------------------------- 64 bit atomic operations -------------------------*/
+
+#ifndef RTE_FORCE_INTRINSICS
+static inline int
+rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
+{
+ uint8_t res;
+
+
+ asm volatile(
+ MPLOCKED
+ "cmpxchgq %[src], %[dst];"
+ "sete %[res];"
+ : [res] "=a" (res), /* output */
+ [dst] "=m" (*dst)
+ : [src] "r" (src), /* input */
+ "a" (exp),
+ "m" (*dst)
+ : "memory"); /* no-clobber list */
+
+ return res;
+}
+
+static inline void
+rte_atomic64_init(rte_atomic64_t *v)
+{
+ v->cnt = 0;
+}
+
+static inline int64_t
+rte_atomic64_read(rte_atomic64_t *v)
+{
+ return v->cnt;
+}
+
+static inline void
+rte_atomic64_set(rte_atomic64_t *v, int64_t new_value)
+{
+ v->cnt = new_value;
+}
+
+static inline void
+rte_atomic64_add(rte_atomic64_t *v, int64_t inc)
+{
+ asm volatile(
+ MPLOCKED
+ "addq %[inc], %[cnt]"
+ : [cnt] "=m" (v->cnt) /* output */
+ : [inc] "ir" (inc), /* input */
+ "m" (v->cnt)
+ );
+}
+
+static inline void
+rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
+{
+ asm volatile(
+ MPLOCKED
+ "subq %[dec], %[cnt]"
+ : [cnt] "=m" (v->cnt) /* output */
+ : [dec] "ir" (dec), /* input */
+ "m" (v->cnt)
+ );
+}
+
+static inline void
+rte_atomic64_inc(rte_atomic64_t *v)
+{
+ asm volatile(
+ MPLOCKED
+ "incq %[cnt]"
+ : [cnt] "=m" (v->cnt) /* output */
+ : "m" (v->cnt) /* input */
+ );
+}
+
+static inline void
+rte_atomic64_dec(rte_atomic64_t *v)
+{
+ asm volatile(
+ MPLOCKED
+ "decq %[cnt]"
+ : [cnt] "=m" (v->cnt) /* output */
+ : "m" (v->cnt) /* input */
+ );
+}
+
+static inline int64_t
+rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)
+{
+ int64_t prev = inc;
+
+ asm volatile(
+ MPLOCKED
+ "xaddq %[prev], %[cnt]"
+ : [prev] "+r" (prev), /* output */
+ [cnt] "=m" (v->cnt)
+ : "m" (v->cnt) /* input */
+ );
+ return prev + inc;
+}
+
+static inline int64_t
+rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
+{
+ return rte_atomic64_add_return(v, -dec);
+}
+
+static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v)
+{
+ uint8_t ret;
+
+ asm volatile(
+ MPLOCKED
+ "incq %[cnt] ; "
+ "sete %[ret]"
+ : [cnt] "+m" (v->cnt), /* output */
+ [ret] "=qm" (ret)
+ );
+
+ return ret != 0;
+}
+
+static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
+{
+ uint8_t ret;
+
+ asm volatile(
+ MPLOCKED
+ "decq %[cnt] ; "
+ "sete %[ret]"
+ : [cnt] "+m" (v->cnt), /* output */
+ [ret] "=qm" (ret)
+ );
+ return ret != 0;
+}
+
+static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
+{
+ return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
+}
+
+static inline void rte_atomic64_clear(rte_atomic64_t *v)
+{
+ v->cnt = 0;
+}
+#endif
+
+#endif /* _RTE_ATOMIC_X86_64_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_byteorder.h b/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_byteorder.h
new file mode 100755
index 00000000..ffdb6ef5
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_byteorder.h
@@ -0,0 +1,125 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_BYTEORDER_X86_H_
+#define _RTE_BYTEORDER_X86_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_byteorder.h"
+
+#ifndef RTE_BYTE_ORDER
+#define RTE_BYTE_ORDER RTE_LITTLE_ENDIAN
+#endif
+
+/*
+ * An architecture-optimized byte swap for a 16-bit value.
+ *
+ * Do not use this function directly. The preferred function is rte_bswap16().
+ */
+static inline uint16_t rte_arch_bswap16(uint16_t _x)
+{
+ register uint16_t x = _x;
+ asm volatile ("xchgb %b[x1],%h[x2]"
+ : [x1] "=Q" (x)
+ : [x2] "0" (x)
+ );
+ return x;
+}
+
+/*
+ * An architecture-optimized byte swap for a 32-bit value.
+ *
+ * Do not use this function directly. The preferred function is rte_bswap32().
+ */
+static inline uint32_t rte_arch_bswap32(uint32_t _x)
+{
+ register uint32_t x = _x;
+ asm volatile ("bswap %[x]"
+ : [x] "+r" (x)
+ );
+ return x;
+}
+
+#ifndef RTE_FORCE_INTRINSICS
+#define rte_bswap16(x) ((uint16_t)(__builtin_constant_p(x) ? \
+ rte_constant_bswap16(x) : \
+ rte_arch_bswap16(x)))
+
+#define rte_bswap32(x) ((uint32_t)(__builtin_constant_p(x) ? \
+ rte_constant_bswap32(x) : \
+ rte_arch_bswap32(x)))
+
+#define rte_bswap64(x) ((uint64_t)(__builtin_constant_p(x) ? \
+ rte_constant_bswap64(x) : \
+ rte_arch_bswap64(x)))
+#else
+/*
+ * __builtin_bswap16 is only available gcc 4.8 and upwards
+ */
+#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 8)
+#define rte_bswap16(x) ((uint16_t)(__builtin_constant_p(x) ? \
+ rte_constant_bswap16(x) : \
+ rte_arch_bswap16(x)))
+#endif
+#endif
+
+#define rte_cpu_to_le_16(x) (x)
+#define rte_cpu_to_le_32(x) (x)
+#define rte_cpu_to_le_64(x) (x)
+
+#define rte_cpu_to_be_16(x) rte_bswap16(x)
+#define rte_cpu_to_be_32(x) rte_bswap32(x)
+#define rte_cpu_to_be_64(x) rte_bswap64(x)
+
+#define rte_le_to_cpu_16(x) (x)
+#define rte_le_to_cpu_32(x) (x)
+#define rte_le_to_cpu_64(x) (x)
+
+#define rte_be_to_cpu_16(x) rte_bswap16(x)
+#define rte_be_to_cpu_32(x) rte_bswap32(x)
+#define rte_be_to_cpu_64(x) rte_bswap64(x)
+
+#ifdef RTE_ARCH_I686
+#include "rte_byteorder_32.h"
+#else
+#include "rte_byteorder_64.h"
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_BYTEORDER_X86_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_byteorder_32.h b/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_byteorder_32.h
new file mode 100755
index 00000000..51c306f8
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_byteorder_32.h
@@ -0,0 +1,51 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_BYTEORDER_I686_H_
+#define _RTE_BYTEORDER_I686_H_
+
+/*
+ * An architecture-optimized byte swap for a 64-bit value.
+ *
+ * Do not use this function directly. The preferred function is rte_bswap64().
+ */
+/* Compat./Leg. mode */
+static inline uint64_t rte_arch_bswap64(uint64_t x)
+{
+ uint64_t ret = 0;
+ ret |= ((uint64_t)rte_arch_bswap32(x & 0xffffffffUL) << 32);
+ ret |= ((uint64_t)rte_arch_bswap32((x >> 32) & 0xffffffffUL));
+ return ret;
+}
+
+#endif /* _RTE_BYTEORDER_I686_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_byteorder_64.h b/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_byteorder_64.h
new file mode 100755
index 00000000..dda572bd
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_byteorder_64.h
@@ -0,0 +1,52 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_BYTEORDER_X86_64_H_
+#define _RTE_BYTEORDER_X86_64_H_
+
+/*
+ * An architecture-optimized byte swap for a 64-bit value.
+ *
+ * Do not use this function directly. The preferred function is rte_bswap64().
+ */
+/* 64-bit mode */
+static inline uint64_t rte_arch_bswap64(uint64_t _x)
+{
+ register uint64_t x = _x;
+ asm volatile ("bswap %[x]"
+ : [x] "+r" (x)
+ );
+ return x;
+}
+
+#endif /* _RTE_BYTEORDER_X86_64_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_cpuflags.h b/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_cpuflags.h
new file mode 100755
index 00000000..a58dd7bc
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_cpuflags.h
@@ -0,0 +1,310 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_CPUFLAGS_X86_64_H_
+#define _RTE_CPUFLAGS_X86_64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+
+#include "generic/rte_cpuflags.h"
+
+enum rte_cpu_flag_t {
+ /* (EAX 01h) ECX features*/
+ RTE_CPUFLAG_SSE3 = 0, /**< SSE3 */
+ RTE_CPUFLAG_PCLMULQDQ, /**< PCLMULQDQ */
+ RTE_CPUFLAG_DTES64, /**< DTES64 */
+ RTE_CPUFLAG_MONITOR, /**< MONITOR */
+ RTE_CPUFLAG_DS_CPL, /**< DS_CPL */
+ RTE_CPUFLAG_VMX, /**< VMX */
+ RTE_CPUFLAG_SMX, /**< SMX */
+ RTE_CPUFLAG_EIST, /**< EIST */
+ RTE_CPUFLAG_TM2, /**< TM2 */
+ RTE_CPUFLAG_SSSE3, /**< SSSE3 */
+ RTE_CPUFLAG_CNXT_ID, /**< CNXT_ID */
+ RTE_CPUFLAG_FMA, /**< FMA */
+ RTE_CPUFLAG_CMPXCHG16B, /**< CMPXCHG16B */
+ RTE_CPUFLAG_XTPR, /**< XTPR */
+ RTE_CPUFLAG_PDCM, /**< PDCM */
+ RTE_CPUFLAG_PCID, /**< PCID */
+ RTE_CPUFLAG_DCA, /**< DCA */
+ RTE_CPUFLAG_SSE4_1, /**< SSE4_1 */
+ RTE_CPUFLAG_SSE4_2, /**< SSE4_2 */
+ RTE_CPUFLAG_X2APIC, /**< X2APIC */
+ RTE_CPUFLAG_MOVBE, /**< MOVBE */
+ RTE_CPUFLAG_POPCNT, /**< POPCNT */
+ RTE_CPUFLAG_TSC_DEADLINE, /**< TSC_DEADLINE */
+ RTE_CPUFLAG_AES, /**< AES */
+ RTE_CPUFLAG_XSAVE, /**< XSAVE */
+ RTE_CPUFLAG_OSXSAVE, /**< OSXSAVE */
+ RTE_CPUFLAG_AVX, /**< AVX */
+ RTE_CPUFLAG_F16C, /**< F16C */
+ RTE_CPUFLAG_RDRAND, /**< RDRAND */
+
+ /* (EAX 01h) EDX features */
+ RTE_CPUFLAG_FPU, /**< FPU */
+ RTE_CPUFLAG_VME, /**< VME */
+ RTE_CPUFLAG_DE, /**< DE */
+ RTE_CPUFLAG_PSE, /**< PSE */
+ RTE_CPUFLAG_TSC, /**< TSC */
+ RTE_CPUFLAG_MSR, /**< MSR */
+ RTE_CPUFLAG_PAE, /**< PAE */
+ RTE_CPUFLAG_MCE, /**< MCE */
+ RTE_CPUFLAG_CX8, /**< CX8 */
+ RTE_CPUFLAG_APIC, /**< APIC */
+ RTE_CPUFLAG_SEP, /**< SEP */
+ RTE_CPUFLAG_MTRR, /**< MTRR */
+ RTE_CPUFLAG_PGE, /**< PGE */
+ RTE_CPUFLAG_MCA, /**< MCA */
+ RTE_CPUFLAG_CMOV, /**< CMOV */
+ RTE_CPUFLAG_PAT, /**< PAT */
+ RTE_CPUFLAG_PSE36, /**< PSE36 */
+ RTE_CPUFLAG_PSN, /**< PSN */
+ RTE_CPUFLAG_CLFSH, /**< CLFSH */
+ RTE_CPUFLAG_DS, /**< DS */
+ RTE_CPUFLAG_ACPI, /**< ACPI */
+ RTE_CPUFLAG_MMX, /**< MMX */
+ RTE_CPUFLAG_FXSR, /**< FXSR */
+ RTE_CPUFLAG_SSE, /**< SSE */
+ RTE_CPUFLAG_SSE2, /**< SSE2 */
+ RTE_CPUFLAG_SS, /**< SS */
+ RTE_CPUFLAG_HTT, /**< HTT */
+ RTE_CPUFLAG_TM, /**< TM */
+ RTE_CPUFLAG_PBE, /**< PBE */
+
+ /* (EAX 06h) EAX features */
+ RTE_CPUFLAG_DIGTEMP, /**< DIGTEMP */
+ RTE_CPUFLAG_TRBOBST, /**< TRBOBST */
+ RTE_CPUFLAG_ARAT, /**< ARAT */
+ RTE_CPUFLAG_PLN, /**< PLN */
+ RTE_CPUFLAG_ECMD, /**< ECMD */
+ RTE_CPUFLAG_PTM, /**< PTM */
+
+ /* (EAX 06h) ECX features */
+ RTE_CPUFLAG_MPERF_APERF_MSR, /**< MPERF_APERF_MSR */
+ RTE_CPUFLAG_ACNT2, /**< ACNT2 */
+ RTE_CPUFLAG_ENERGY_EFF, /**< ENERGY_EFF */
+
+ /* (EAX 07h, ECX 0h) EBX features */
+ RTE_CPUFLAG_FSGSBASE, /**< FSGSBASE */
+ RTE_CPUFLAG_BMI1, /**< BMI1 */
+ RTE_CPUFLAG_HLE, /**< Hardware Lock elision */
+ RTE_CPUFLAG_AVX2, /**< AVX2 */
+ RTE_CPUFLAG_SMEP, /**< SMEP */
+ RTE_CPUFLAG_BMI2, /**< BMI2 */
+ RTE_CPUFLAG_ERMS, /**< ERMS */
+ RTE_CPUFLAG_INVPCID, /**< INVPCID */
+ RTE_CPUFLAG_RTM, /**< Transactional memory */
+
+ /* (EAX 80000001h) ECX features */
+ RTE_CPUFLAG_LAHF_SAHF, /**< LAHF_SAHF */
+ RTE_CPUFLAG_LZCNT, /**< LZCNT */
+
+ /* (EAX 80000001h) EDX features */
+ RTE_CPUFLAG_SYSCALL, /**< SYSCALL */
+ RTE_CPUFLAG_XD, /**< XD */
+ RTE_CPUFLAG_1GB_PG, /**< 1GB_PG */
+ RTE_CPUFLAG_RDTSCP, /**< RDTSCP */
+ RTE_CPUFLAG_EM64T, /**< EM64T */
+
+ /* (EAX 80000007h) EDX features */
+ RTE_CPUFLAG_INVTSC, /**< INVTSC */
+
+ /* The last item */
+ RTE_CPUFLAG_NUMFLAGS, /**< This should always be the last! */
+};
+
+enum cpu_register_t {
+ REG_EAX = 0,
+ REG_EBX,
+ REG_ECX,
+ REG_EDX,
+};
+
+static const struct feature_entry cpu_feature_table[] = {
+ FEAT_DEF(SSE3, 0x00000001, 0, REG_ECX, 0)
+ FEAT_DEF(PCLMULQDQ, 0x00000001, 0, REG_ECX, 1)
+ FEAT_DEF(DTES64, 0x00000001, 0, REG_ECX, 2)
+ FEAT_DEF(MONITOR, 0x00000001, 0, REG_ECX, 3)
+ FEAT_DEF(DS_CPL, 0x00000001, 0, REG_ECX, 4)
+ FEAT_DEF(VMX, 0x00000001, 0, REG_ECX, 5)
+ FEAT_DEF(SMX, 0x00000001, 0, REG_ECX, 6)
+ FEAT_DEF(EIST, 0x00000001, 0, REG_ECX, 7)
+ FEAT_DEF(TM2, 0x00000001, 0, REG_ECX, 8)
+ FEAT_DEF(SSSE3, 0x00000001, 0, REG_ECX, 9)
+ FEAT_DEF(CNXT_ID, 0x00000001, 0, REG_ECX, 10)
+ FEAT_DEF(FMA, 0x00000001, 0, REG_ECX, 12)
+ FEAT_DEF(CMPXCHG16B, 0x00000001, 0, REG_ECX, 13)
+ FEAT_DEF(XTPR, 0x00000001, 0, REG_ECX, 14)
+ FEAT_DEF(PDCM, 0x00000001, 0, REG_ECX, 15)
+ FEAT_DEF(PCID, 0x00000001, 0, REG_ECX, 17)
+ FEAT_DEF(DCA, 0x00000001, 0, REG_ECX, 18)
+ FEAT_DEF(SSE4_1, 0x00000001, 0, REG_ECX, 19)
+ FEAT_DEF(SSE4_2, 0x00000001, 0, REG_ECX, 20)
+ FEAT_DEF(X2APIC, 0x00000001, 0, REG_ECX, 21)
+ FEAT_DEF(MOVBE, 0x00000001, 0, REG_ECX, 22)
+ FEAT_DEF(POPCNT, 0x00000001, 0, REG_ECX, 23)
+ FEAT_DEF(TSC_DEADLINE, 0x00000001, 0, REG_ECX, 24)
+ FEAT_DEF(AES, 0x00000001, 0, REG_ECX, 25)
+ FEAT_DEF(XSAVE, 0x00000001, 0, REG_ECX, 26)
+ FEAT_DEF(OSXSAVE, 0x00000001, 0, REG_ECX, 27)
+ FEAT_DEF(AVX, 0x00000001, 0, REG_ECX, 28)
+ FEAT_DEF(F16C, 0x00000001, 0, REG_ECX, 29)
+ FEAT_DEF(RDRAND, 0x00000001, 0, REG_ECX, 30)
+
+ FEAT_DEF(FPU, 0x00000001, 0, REG_EDX, 0)
+ FEAT_DEF(VME, 0x00000001, 0, REG_EDX, 1)
+ FEAT_DEF(DE, 0x00000001, 0, REG_EDX, 2)
+ FEAT_DEF(PSE, 0x00000001, 0, REG_EDX, 3)
+ FEAT_DEF(TSC, 0x00000001, 0, REG_EDX, 4)
+ FEAT_DEF(MSR, 0x00000001, 0, REG_EDX, 5)
+ FEAT_DEF(PAE, 0x00000001, 0, REG_EDX, 6)
+ FEAT_DEF(MCE, 0x00000001, 0, REG_EDX, 7)
+ FEAT_DEF(CX8, 0x00000001, 0, REG_EDX, 8)
+ FEAT_DEF(APIC, 0x00000001, 0, REG_EDX, 9)
+ FEAT_DEF(SEP, 0x00000001, 0, REG_EDX, 11)
+ FEAT_DEF(MTRR, 0x00000001, 0, REG_EDX, 12)
+ FEAT_DEF(PGE, 0x00000001, 0, REG_EDX, 13)
+ FEAT_DEF(MCA, 0x00000001, 0, REG_EDX, 14)
+ FEAT_DEF(CMOV, 0x00000001, 0, REG_EDX, 15)
+ FEAT_DEF(PAT, 0x00000001, 0, REG_EDX, 16)
+ FEAT_DEF(PSE36, 0x00000001, 0, REG_EDX, 17)
+ FEAT_DEF(PSN, 0x00000001, 0, REG_EDX, 18)
+ FEAT_DEF(CLFSH, 0x00000001, 0, REG_EDX, 19)
+ FEAT_DEF(DS, 0x00000001, 0, REG_EDX, 21)
+ FEAT_DEF(ACPI, 0x00000001, 0, REG_EDX, 22)
+ FEAT_DEF(MMX, 0x00000001, 0, REG_EDX, 23)
+ FEAT_DEF(FXSR, 0x00000001, 0, REG_EDX, 24)
+ FEAT_DEF(SSE, 0x00000001, 0, REG_EDX, 25)
+ FEAT_DEF(SSE2, 0x00000001, 0, REG_EDX, 26)
+ FEAT_DEF(SS, 0x00000001, 0, REG_EDX, 27)
+ FEAT_DEF(HTT, 0x00000001, 0, REG_EDX, 28)
+ FEAT_DEF(TM, 0x00000001, 0, REG_EDX, 29)
+ FEAT_DEF(PBE, 0x00000001, 0, REG_EDX, 31)
+
+ FEAT_DEF(DIGTEMP, 0x00000006, 0, REG_EAX, 0)
+ FEAT_DEF(TRBOBST, 0x00000006, 0, REG_EAX, 1)
+ FEAT_DEF(ARAT, 0x00000006, 0, REG_EAX, 2)
+ FEAT_DEF(PLN, 0x00000006, 0, REG_EAX, 4)
+ FEAT_DEF(ECMD, 0x00000006, 0, REG_EAX, 5)
+ FEAT_DEF(PTM, 0x00000006, 0, REG_EAX, 6)
+
+ FEAT_DEF(MPERF_APERF_MSR, 0x00000006, 0, REG_ECX, 0)
+ FEAT_DEF(ACNT2, 0x00000006, 0, REG_ECX, 1)
+ FEAT_DEF(ENERGY_EFF, 0x00000006, 0, REG_ECX, 3)
+
+ FEAT_DEF(FSGSBASE, 0x00000007, 0, REG_EBX, 0)
+ FEAT_DEF(BMI1, 0x00000007, 0, REG_EBX, 2)
+ FEAT_DEF(HLE, 0x00000007, 0, REG_EBX, 4)
+ FEAT_DEF(AVX2, 0x00000007, 0, REG_EBX, 5)
+ FEAT_DEF(SMEP, 0x00000007, 0, REG_EBX, 6)
+ FEAT_DEF(BMI2, 0x00000007, 0, REG_EBX, 7)
+ FEAT_DEF(ERMS, 0x00000007, 0, REG_EBX, 8)
+ FEAT_DEF(INVPCID, 0x00000007, 0, REG_EBX, 10)
+ FEAT_DEF(RTM, 0x00000007, 0, REG_EBX, 11)
+
+ FEAT_DEF(LAHF_SAHF, 0x80000001, 0, REG_ECX, 0)
+ FEAT_DEF(LZCNT, 0x80000001, 0, REG_ECX, 4)
+
+ FEAT_DEF(SYSCALL, 0x80000001, 0, REG_EDX, 11)
+ FEAT_DEF(XD, 0x80000001, 0, REG_EDX, 20)
+ FEAT_DEF(1GB_PG, 0x80000001, 0, REG_EDX, 26)
+ FEAT_DEF(RDTSCP, 0x80000001, 0, REG_EDX, 27)
+ FEAT_DEF(EM64T, 0x80000001, 0, REG_EDX, 29)
+
+ FEAT_DEF(INVTSC, 0x80000007, 0, REG_EDX, 8)
+};
+
+static inline void
+rte_cpu_get_features(uint32_t leaf, uint32_t subleaf, cpuid_registers_t out)
+{
+#if defined(__i386__) && defined(__PIC__)
+ /* %ebx is a forbidden register if we compile with -fPIC or -fPIE */
+ asm volatile("movl %%ebx,%0 ; cpuid ; xchgl %%ebx,%0"
+ : "=r" (out[REG_EBX]),
+ "=a" (out[REG_EAX]),
+ "=c" (out[REG_ECX]),
+ "=d" (out[REG_EDX])
+ : "a" (leaf), "c" (subleaf));
+#else
+
+ asm volatile("cpuid"
+ : "=a" (out[REG_EAX]),
+ "=b" (out[REG_EBX]),
+ "=c" (out[REG_ECX]),
+ "=d" (out[REG_EDX])
+ : "a" (leaf), "c" (subleaf));
+
+#endif
+}
+
+static inline int
+rte_cpu_get_flag_enabled(enum rte_cpu_flag_t feature)
+{
+ const struct feature_entry *feat;
+ cpuid_registers_t regs;
+
+
+ if (feature >= RTE_CPUFLAG_NUMFLAGS)
+ /* Flag does not match anything in the feature tables */
+ return -ENOENT;
+
+ feat = &cpu_feature_table[feature];
+
+ if (!feat->leaf)
+ /* This entry in the table wasn't filled out! */
+ return -EFAULT;
+
+ rte_cpu_get_features(feat->leaf & 0xffff0000, 0, regs);
+ if (((regs[REG_EAX] ^ feat->leaf) & 0xffff0000) ||
+ regs[REG_EAX] < feat->leaf)
+ return 0;
+
+ /* get the cpuid leaf containing the desired feature */
+ rte_cpu_get_features(feat->leaf, feat->subleaf, regs);
+
+ /* check if the feature is enabled */
+ return (regs[feat->reg] >> feat->bit) & 1;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CPUFLAGS_X86_64_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_cycles.h b/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_cycles.h
new file mode 100755
index 00000000..6e3c7d89
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_cycles.h
@@ -0,0 +1,121 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/* BSD LICENSE
+ *
+ * Copyright(c) 2013 6WIND.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_CYCLES_X86_64_H_
+#define _RTE_CYCLES_X86_64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_cycles.h"
+
+#ifdef RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT
+/* Global switch to use VMWARE mapping of TSC instead of RDTSC */
+extern int rte_cycles_vmware_tsc_map;
+#include <rte_branch_prediction.h>
+#endif
+
+static inline uint64_t
+rte_rdtsc(void)
+{
+ union {
+ uint64_t tsc_64;
+ struct {
+ uint32_t lo_32;
+ uint32_t hi_32;
+ };
+ } tsc;
+
+#ifdef RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT
+ if (unlikely(rte_cycles_vmware_tsc_map)) {
+ /* ecx = 0x10000 corresponds to the physical TSC for VMware */
+ asm volatile("rdpmc" :
+ "=a" (tsc.lo_32),
+ "=d" (tsc.hi_32) :
+ "c"(0x10000));
+ return tsc.tsc_64;
+ }
+#endif
+
+ asm volatile("rdtsc" :
+ "=a" (tsc.lo_32),
+ "=d" (tsc.hi_32));
+ return tsc.tsc_64;
+}
+
+static inline uint64_t
+rte_rdtsc_precise(void)
+{
+ rte_mb();
+ return rte_rdtsc();
+}
+
+static inline uint64_t
+rte_get_tsc_cycles(void) { return rte_rdtsc(); }
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CYCLES_X86_64_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_memcpy.h b/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_memcpy.h
new file mode 100755
index 00000000..fb9eba87
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_memcpy.h
@@ -0,0 +1,297 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_MEMCPY_X86_64_H_
+#define _RTE_MEMCPY_X86_64_H_
+
+#include <stdint.h>
+#include <string.h>
+#include <emmintrin.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_memcpy.h"
+
+#ifdef __INTEL_COMPILER
+#pragma warning(disable:593) /* Stop unused variable warning (reg_a etc). */
+#endif
+
+static inline void
+rte_mov16(uint8_t *dst, const uint8_t *src)
+{
+ __m128i reg_a;
+ asm volatile (
+ "movdqu (%[src]), %[reg_a]\n\t"
+ "movdqu %[reg_a], (%[dst])\n\t"
+ : [reg_a] "=x" (reg_a)
+ : [src] "r" (src),
+ [dst] "r"(dst)
+ : "memory"
+ );
+}
+
+static inline void
+rte_mov32(uint8_t *dst, const uint8_t *src)
+{
+ __m128i reg_a, reg_b;
+ asm volatile (
+ "movdqu (%[src]), %[reg_a]\n\t"
+ "movdqu 16(%[src]), %[reg_b]\n\t"
+ "movdqu %[reg_a], (%[dst])\n\t"
+ "movdqu %[reg_b], 16(%[dst])\n\t"
+ : [reg_a] "=x" (reg_a),
+ [reg_b] "=x" (reg_b)
+ : [src] "r" (src),
+ [dst] "r"(dst)
+ : "memory"
+ );
+}
+
+static inline void
+rte_mov48(uint8_t *dst, const uint8_t *src)
+{
+ __m128i reg_a, reg_b, reg_c;
+ asm volatile (
+ "movdqu (%[src]), %[reg_a]\n\t"
+ "movdqu 16(%[src]), %[reg_b]\n\t"
+ "movdqu 32(%[src]), %[reg_c]\n\t"
+ "movdqu %[reg_a], (%[dst])\n\t"
+ "movdqu %[reg_b], 16(%[dst])\n\t"
+ "movdqu %[reg_c], 32(%[dst])\n\t"
+ : [reg_a] "=x" (reg_a),
+ [reg_b] "=x" (reg_b),
+ [reg_c] "=x" (reg_c)
+ : [src] "r" (src),
+ [dst] "r"(dst)
+ : "memory"
+ );
+}
+
+static inline void
+rte_mov64(uint8_t *dst, const uint8_t *src)
+{
+ __m128i reg_a, reg_b, reg_c, reg_d;
+ asm volatile (
+ "movdqu (%[src]), %[reg_a]\n\t"
+ "movdqu 16(%[src]), %[reg_b]\n\t"
+ "movdqu 32(%[src]), %[reg_c]\n\t"
+ "movdqu 48(%[src]), %[reg_d]\n\t"
+ "movdqu %[reg_a], (%[dst])\n\t"
+ "movdqu %[reg_b], 16(%[dst])\n\t"
+ "movdqu %[reg_c], 32(%[dst])\n\t"
+ "movdqu %[reg_d], 48(%[dst])\n\t"
+ : [reg_a] "=x" (reg_a),
+ [reg_b] "=x" (reg_b),
+ [reg_c] "=x" (reg_c),
+ [reg_d] "=x" (reg_d)
+ : [src] "r" (src),
+ [dst] "r"(dst)
+ : "memory"
+ );
+}
+
+static inline void
+rte_mov128(uint8_t *dst, const uint8_t *src)
+{
+ __m128i reg_a, reg_b, reg_c, reg_d, reg_e, reg_f, reg_g, reg_h;
+ asm volatile (
+ "movdqu (%[src]), %[reg_a]\n\t"
+ "movdqu 16(%[src]), %[reg_b]\n\t"
+ "movdqu 32(%[src]), %[reg_c]\n\t"
+ "movdqu 48(%[src]), %[reg_d]\n\t"
+ "movdqu 64(%[src]), %[reg_e]\n\t"
+ "movdqu 80(%[src]), %[reg_f]\n\t"
+ "movdqu 96(%[src]), %[reg_g]\n\t"
+ "movdqu 112(%[src]), %[reg_h]\n\t"
+ "movdqu %[reg_a], (%[dst])\n\t"
+ "movdqu %[reg_b], 16(%[dst])\n\t"
+ "movdqu %[reg_c], 32(%[dst])\n\t"
+ "movdqu %[reg_d], 48(%[dst])\n\t"
+ "movdqu %[reg_e], 64(%[dst])\n\t"
+ "movdqu %[reg_f], 80(%[dst])\n\t"
+ "movdqu %[reg_g], 96(%[dst])\n\t"
+ "movdqu %[reg_h], 112(%[dst])\n\t"
+ : [reg_a] "=x" (reg_a),
+ [reg_b] "=x" (reg_b),
+ [reg_c] "=x" (reg_c),
+ [reg_d] "=x" (reg_d),
+ [reg_e] "=x" (reg_e),
+ [reg_f] "=x" (reg_f),
+ [reg_g] "=x" (reg_g),
+ [reg_h] "=x" (reg_h)
+ : [src] "r" (src),
+ [dst] "r"(dst)
+ : "memory"
+ );
+}
+
+#ifdef __INTEL_COMPILER
+#pragma warning(enable:593)
+#endif
+
+static inline void
+rte_mov256(uint8_t *dst, const uint8_t *src)
+{
+ rte_mov128(dst, src);
+ rte_mov128(dst + 128, src + 128);
+}
+
+#define rte_memcpy(dst, src, n) \
+ ({ (__builtin_constant_p(n)) ? \
+ memcpy((dst), (src), (n)) : \
+ rte_memcpy_func((dst), (src), (n)); })
+
+static inline void *
+rte_memcpy_func(void *dst, const void *src, size_t n)
+{
+ void *ret = dst;
+
+ /* We can't copy < 16 bytes using XMM registers so do it manually. */
+ if (n < 16) {
+ if (n & 0x01) {
+ *(uint8_t *)dst = *(const uint8_t *)src;
+ dst = (uint8_t *)dst + 1;
+ src = (const uint8_t *)src + 1;
+ }
+ if (n & 0x02) {
+ *(uint16_t *)dst = *(const uint16_t *)src;
+ dst = (uint16_t *)dst + 1;
+ src = (const uint16_t *)src + 1;
+ }
+ if (n & 0x04) {
+ *(uint32_t *)dst = *(const uint32_t *)src;
+ dst = (uint32_t *)dst + 1;
+ src = (const uint32_t *)src + 1;
+ }
+ if (n & 0x08) {
+ *(uint64_t *)dst = *(const uint64_t *)src;
+ }
+ return ret;
+ }
+
+ /* Special fast cases for <= 128 bytes */
+ if (n <= 32) {
+ rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+ rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+ return ret;
+ }
+
+ if (n <= 64) {
+ rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+ rte_mov32((uint8_t *)dst - 32 + n, (const uint8_t *)src - 32 + n);
+ return ret;
+ }
+
+ if (n <= 128) {
+ rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+ rte_mov64((uint8_t *)dst - 64 + n, (const uint8_t *)src - 64 + n);
+ return ret;
+ }
+
+ /*
+ * For large copies > 128 bytes. This combination of 256, 64 and 16 byte
+ * copies was found to be faster than doing 128 and 32 byte copies as
+ * well.
+ */
+ for ( ; n >= 256; n -= 256) {
+ rte_mov256((uint8_t *)dst, (const uint8_t *)src);
+ dst = (uint8_t *)dst + 256;
+ src = (const uint8_t *)src + 256;
+ }
+
+ /*
+ * We split the remaining bytes (which will be less than 256) into
+ * 64byte (2^6) chunks.
+ * Using incrementing integers in the case labels of a switch statement
+ * enourages the compiler to use a jump table. To get incrementing
+ * integers, we shift the 2 relevant bits to the LSB position to first
+ * get decrementing integers, and then subtract.
+ */
+ switch (3 - (n >> 6)) {
+ case 0x00:
+ rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+ n -= 64;
+ dst = (uint8_t *)dst + 64;
+ src = (const uint8_t *)src + 64; /* fallthrough */
+ case 0x01:
+ rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+ n -= 64;
+ dst = (uint8_t *)dst + 64;
+ src = (const uint8_t *)src + 64; /* fallthrough */
+ case 0x02:
+ rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+ n -= 64;
+ dst = (uint8_t *)dst + 64;
+ src = (const uint8_t *)src + 64; /* fallthrough */
+ default:
+ ;
+ }
+
+ /*
+ * We split the remaining bytes (which will be less than 64) into
+ * 16byte (2^4) chunks, using the same switch structure as above.
+ */
+ switch (3 - (n >> 4)) {
+ case 0x00:
+ rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+ n -= 16;
+ dst = (uint8_t *)dst + 16;
+ src = (const uint8_t *)src + 16; /* fallthrough */
+ case 0x01:
+ rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+ n -= 16;
+ dst = (uint8_t *)dst + 16;
+ src = (const uint8_t *)src + 16; /* fallthrough */
+ case 0x02:
+ rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+ n -= 16;
+ dst = (uint8_t *)dst + 16;
+ src = (const uint8_t *)src + 16; /* fallthrough */
+ default:
+ ;
+ }
+
+ /* Copy any remaining bytes, without going beyond end of buffers */
+ if (n != 0) {
+ rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+ }
+ return ret;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MEMCPY_X86_64_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_prefetch.h b/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_prefetch.h
new file mode 100755
index 00000000..ec2454dc
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_prefetch.h
@@ -0,0 +1,62 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_PREFETCH_X86_64_H_
+#define _RTE_PREFETCH_X86_64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_prefetch.h"
+
+static inline void rte_prefetch0(volatile void *p)
+{
+ asm volatile ("prefetcht0 %[p]" : [p] "+m" (*(volatile char *)p));
+}
+
+static inline void rte_prefetch1(volatile void *p)
+{
+ asm volatile ("prefetcht1 %[p]" : [p] "+m" (*(volatile char *)p));
+}
+
+static inline void rte_prefetch2(volatile void *p)
+{
+ asm volatile ("prefetcht2 %[p]" : [p] "+m" (*(volatile char *)p));
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_PREFETCH_X86_64_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_spinlock.h b/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_spinlock.h
new file mode 100755
index 00000000..54fba957
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/arch/x86/rte_spinlock.h
@@ -0,0 +1,94 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_SPINLOCK_X86_64_H_
+#define _RTE_SPINLOCK_X86_64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_spinlock.h"
+
+#ifndef RTE_FORCE_INTRINSICS
+static inline void
+rte_spinlock_lock(rte_spinlock_t *sl)
+{
+ int lock_val = 1;
+ asm volatile (
+ "1:\n"
+ "xchg %[locked], %[lv]\n"
+ "test %[lv], %[lv]\n"
+ "jz 3f\n"
+ "2:\n"
+ "pause\n"
+ "cmpl $0, %[locked]\n"
+ "jnz 2b\n"
+ "jmp 1b\n"
+ "3:\n"
+ : [locked] "=m" (sl->locked), [lv] "=q" (lock_val)
+ : "[lv]" (lock_val)
+ : "memory");
+}
+
+static inline void
+rte_spinlock_unlock (rte_spinlock_t *sl)
+{
+ int unlock_val = 0;
+ asm volatile (
+ "xchg %[locked], %[ulv]\n"
+ : [locked] "=m" (sl->locked), [ulv] "=q" (unlock_val)
+ : "[ulv]" (unlock_val)
+ : "memory");
+}
+
+static inline int
+rte_spinlock_trylock (rte_spinlock_t *sl)
+{
+ int lockval = 1;
+
+ asm volatile (
+ "xchg %[locked], %[lockval]"
+ : [locked] "=m" (sl->locked), [lockval] "=q" (lockval)
+ : "[lockval]" (lockval)
+ : "memory");
+
+ return (lockval == 0);
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_SPINLOCK_X86_64_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/generic/rte_atomic.h b/src/dpdk_lib18/librte_eal/common/include/generic/rte_atomic.h
new file mode 100755
index 00000000..6c7581ad
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/generic/rte_atomic.h
@@ -0,0 +1,918 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ATOMIC_H_
+#define _RTE_ATOMIC_H_
+
+/**
+ * @file
+ * Atomic Operations
+ *
+ * This file defines a generic API for atomic operations.
+ */
+
+#include <stdint.h>
+
+#ifdef __DOXYGEN__
+
+/**
+ * General memory barrier.
+ *
+ * Guarantees that the LOAD and STORE operations generated before the
+ * barrier occur before the LOAD and STORE operations generated after.
+ * This function is architecture dependent.
+ */
+static inline void rte_mb(void);
+
+/**
+ * Write memory barrier.
+ *
+ * Guarantees that the STORE operations generated before the barrier
+ * occur before the STORE operations generated after.
+ * This function is architecture dependent.
+ */
+static inline void rte_wmb(void);
+
+/**
+ * Read memory barrier.
+ *
+ * Guarantees that the LOAD operations generated before the barrier
+ * occur before the LOAD operations generated after.
+ * This function is architecture dependent.
+ */
+static inline void rte_rmb(void);
+
+#endif /* __DOXYGEN__ */
+
+/**
+ * Compiler barrier.
+ *
+ * Guarantees that operation reordering does not occur at compile time
+ * for operations directly before and after the barrier.
+ */
+#define rte_compiler_barrier() do { \
+ asm volatile ("" : : : "memory"); \
+} while(0)
+
+/*------------------------- 16 bit atomic operations -------------------------*/
+
+/**
+ * Atomic compare and set.
+ *
+ * (atomic) equivalent to:
+ * if (*dst == exp)
+ * *dst = src (all 16-bit words)
+ *
+ * @param dst
+ * The destination location into which the value will be written.
+ * @param exp
+ * The expected value.
+ * @param src
+ * The new value.
+ * @return
+ * Non-zero on success; 0 on failure.
+ */
+static inline int
+rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int
+rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
+{
+ return __sync_bool_compare_and_swap(dst, exp, src);
+}
+#endif
+
+/**
+ * The atomic counter structure.
+ */
+typedef struct {
+ volatile int16_t cnt; /**< An internal counter value. */
+} rte_atomic16_t;
+
+/**
+ * Static initializer for an atomic counter.
+ */
+#define RTE_ATOMIC16_INIT(val) { (val) }
+
+/**
+ * Initialize an atomic counter.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic16_init(rte_atomic16_t *v)
+{
+ v->cnt = 0;
+}
+
+/**
+ * Atomically read a 16-bit value from a counter.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * The value of the counter.
+ */
+static inline int16_t
+rte_atomic16_read(const rte_atomic16_t *v)
+{
+ return v->cnt;
+}
+
+/**
+ * Atomically set a counter to a 16-bit value.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @param new_value
+ * The new value for the counter.
+ */
+static inline void
+rte_atomic16_set(rte_atomic16_t *v, int16_t new_value)
+{
+ v->cnt = new_value;
+}
+
+/**
+ * Atomically add a 16-bit value to an atomic counter.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @param inc
+ * The value to be added to the counter.
+ */
+static inline void
+rte_atomic16_add(rte_atomic16_t *v, int16_t inc)
+{
+ __sync_fetch_and_add(&v->cnt, inc);
+}
+
+/**
+ * Atomically subtract a 16-bit value from an atomic counter.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @param dec
+ * The value to be subtracted from the counter.
+ */
+static inline void
+rte_atomic16_sub(rte_atomic16_t *v, int16_t dec)
+{
+ __sync_fetch_and_sub(&v->cnt, dec);
+}
+
+/**
+ * Atomically increment a counter by one.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic16_inc(rte_atomic16_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline void
+rte_atomic16_inc(rte_atomic16_t *v)
+{
+ rte_atomic16_add(v, 1);
+}
+#endif
+
+/**
+ * Atomically decrement a counter by one.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic16_dec(rte_atomic16_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline void
+rte_atomic16_dec(rte_atomic16_t *v)
+{
+ rte_atomic16_sub(v, 1);
+}
+#endif
+
+/**
+ * Atomically add a 16-bit value to a counter and return the result.
+ *
+ * Atomically adds the 16-bits value (inc) to the atomic counter (v) and
+ * returns the value of v after addition.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @param inc
+ * The value to be added to the counter.
+ * @return
+ * The value of v after the addition.
+ */
+static inline int16_t
+rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc)
+{
+ return __sync_add_and_fetch(&v->cnt, inc);
+}
+
+/**
+ * Atomically subtract a 16-bit value from a counter and return
+ * the result.
+ *
+ * Atomically subtracts the 16-bit value (inc) from the atomic counter
+ * (v) and returns the value of v after the subtraction.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @param dec
+ * The value to be subtracted from the counter.
+ * @return
+ * The value of v after the subtraction.
+ */
+static inline int16_t
+rte_atomic16_sub_return(rte_atomic16_t *v, int16_t dec)
+{
+ return __sync_sub_and_fetch(&v->cnt, dec);
+}
+
+/**
+ * Atomically increment a 16-bit counter by one and test.
+ *
+ * Atomically increments the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * True if the result after the increment operation is 0; false otherwise.
+ */
+static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
+{
+ return (__sync_add_and_fetch(&v->cnt, 1) == 0);
+}
+#endif
+
+/**
+ * Atomically decrement a 16-bit counter by one and test.
+ *
+ * Atomically decrements the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * True if the result after the decrement operation is 0; false otherwise.
+ */
+static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
+{
+ return (__sync_sub_and_fetch(&v->cnt, 1) == 0);
+}
+#endif
+
+/**
+ * Atomically test and set a 16-bit atomic counter.
+ *
+ * If the counter value is already set, return 0 (failed). Otherwise, set
+ * the counter value to 1 and return 1 (success).
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * 0 if failed; else 1, success.
+ */
+static inline int rte_atomic16_test_and_set(rte_atomic16_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
+{
+ return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
+}
+#endif
+
+/**
+ * Atomically set a 16-bit counter to 0.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void rte_atomic16_clear(rte_atomic16_t *v)
+{
+ v->cnt = 0;
+}
+
+/*------------------------- 32 bit atomic operations -------------------------*/
+
+/**
+ * Atomic compare and set.
+ *
+ * (atomic) equivalent to:
+ * if (*dst == exp)
+ * *dst = src (all 32-bit words)
+ *
+ * @param dst
+ * The destination location into which the value will be written.
+ * @param exp
+ * The expected value.
+ * @param src
+ * The new value.
+ * @return
+ * Non-zero on success; 0 on failure.
+ */
+static inline int
+rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int
+rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
+{
+ return __sync_bool_compare_and_swap(dst, exp, src);
+}
+#endif
+
+/**
+ * The atomic counter structure.
+ */
+typedef struct {
+ volatile int32_t cnt; /**< An internal counter value. */
+} rte_atomic32_t;
+
+/**
+ * Static initializer for an atomic counter.
+ */
+#define RTE_ATOMIC32_INIT(val) { (val) }
+
+/**
+ * Initialize an atomic counter.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic32_init(rte_atomic32_t *v)
+{
+ v->cnt = 0;
+}
+
+/**
+ * Atomically read a 32-bit value from a counter.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * The value of the counter.
+ */
+static inline int32_t
+rte_atomic32_read(const rte_atomic32_t *v)
+{
+ return v->cnt;
+}
+
+/**
+ * Atomically set a counter to a 32-bit value.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @param new_value
+ * The new value for the counter.
+ */
+static inline void
+rte_atomic32_set(rte_atomic32_t *v, int32_t new_value)
+{
+ v->cnt = new_value;
+}
+
+/**
+ * Atomically add a 32-bit value to an atomic counter.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @param inc
+ * The value to be added to the counter.
+ */
+static inline void
+rte_atomic32_add(rte_atomic32_t *v, int32_t inc)
+{
+ __sync_fetch_and_add(&v->cnt, inc);
+}
+
+/**
+ * Atomically subtract a 32-bit value from an atomic counter.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @param dec
+ * The value to be subtracted from the counter.
+ */
+static inline void
+rte_atomic32_sub(rte_atomic32_t *v, int32_t dec)
+{
+ __sync_fetch_and_sub(&v->cnt, dec);
+}
+
+/**
+ * Atomically increment a counter by one.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic32_inc(rte_atomic32_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline void
+rte_atomic32_inc(rte_atomic32_t *v)
+{
+ rte_atomic32_add(v, 1);
+}
+#endif
+
+/**
+ * Atomically decrement a counter by one.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic32_dec(rte_atomic32_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline void
+rte_atomic32_dec(rte_atomic32_t *v)
+{
+ rte_atomic32_sub(v,1);
+}
+#endif
+
+/**
+ * Atomically add a 32-bit value to a counter and return the result.
+ *
+ * Atomically adds the 32-bits value (inc) to the atomic counter (v) and
+ * returns the value of v after addition.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @param inc
+ * The value to be added to the counter.
+ * @return
+ * The value of v after the addition.
+ */
+static inline int32_t
+rte_atomic32_add_return(rte_atomic32_t *v, int32_t inc)
+{
+ return __sync_add_and_fetch(&v->cnt, inc);
+}
+
+/**
+ * Atomically subtract a 32-bit value from a counter and return
+ * the result.
+ *
+ * Atomically subtracts the 32-bit value (inc) from the atomic counter
+ * (v) and returns the value of v after the subtraction.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @param dec
+ * The value to be subtracted from the counter.
+ * @return
+ * The value of v after the subtraction.
+ */
+static inline int32_t
+rte_atomic32_sub_return(rte_atomic32_t *v, int32_t dec)
+{
+ return __sync_sub_and_fetch(&v->cnt, dec);
+}
+
+/**
+ * Atomically increment a 32-bit counter by one and test.
+ *
+ * Atomically increments the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * True if the result after the increment operation is 0; false otherwise.
+ */
+static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
+{
+ return (__sync_add_and_fetch(&v->cnt, 1) == 0);
+}
+#endif
+
+/**
+ * Atomically decrement a 32-bit counter by one and test.
+ *
+ * Atomically decrements the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * True if the result after the decrement operation is 0; false otherwise.
+ */
+static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
+{
+ return (__sync_sub_and_fetch(&v->cnt, 1) == 0);
+}
+#endif
+
+/**
+ * Atomically test and set a 32-bit atomic counter.
+ *
+ * If the counter value is already set, return 0 (failed). Otherwise, set
+ * the counter value to 1 and return 1 (success).
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * 0 if failed; else 1, success.
+ */
+static inline int rte_atomic32_test_and_set(rte_atomic32_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
+{
+ return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
+}
+#endif
+
+/**
+ * Atomically set a 32-bit counter to 0.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void rte_atomic32_clear(rte_atomic32_t *v)
+{
+ v->cnt = 0;
+}
+
+/*------------------------- 64 bit atomic operations -------------------------*/
+
+/**
+ * An atomic compare and set function used by the mutex functions.
+ * (atomic) equivalent to:
+ * if (*dst == exp)
+ * *dst = src (all 64-bit words)
+ *
+ * @param dst
+ * The destination into which the value will be written.
+ * @param exp
+ * The expected value.
+ * @param src
+ * The new value.
+ * @return
+ * Non-zero on success; 0 on failure.
+ */
+static inline int
+rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int
+rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
+{
+ return __sync_bool_compare_and_swap(dst, exp, src);
+}
+#endif
+
+/**
+ * The atomic counter structure.
+ */
+typedef struct {
+ volatile int64_t cnt; /**< Internal counter value. */
+} rte_atomic64_t;
+
+/**
+ * Static initializer for an atomic counter.
+ */
+#define RTE_ATOMIC64_INIT(val) { (val) }
+
+/**
+ * Initialize the atomic counter.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic64_init(rte_atomic64_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline void
+rte_atomic64_init(rte_atomic64_t *v)
+{
+#ifdef __LP64__
+ v->cnt = 0;
+#else
+ int success = 0;
+ uint64_t tmp;
+
+ while (success == 0) {
+ tmp = v->cnt;
+ success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
+ tmp, 0);
+ }
+#endif
+}
+#endif
+
+/**
+ * Atomically read a 64-bit counter.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * The value of the counter.
+ */
+static inline int64_t
+rte_atomic64_read(rte_atomic64_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int64_t
+rte_atomic64_read(rte_atomic64_t *v)
+{
+#ifdef __LP64__
+ return v->cnt;
+#else
+ int success = 0;
+ uint64_t tmp;
+
+ while (success == 0) {
+ tmp = v->cnt;
+ /* replace the value by itself */
+ success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
+ tmp, tmp);
+ }
+ return tmp;
+#endif
+}
+#endif
+
+/**
+ * Atomically set a 64-bit counter.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @param new_value
+ * The new value of the counter.
+ */
+static inline void
+rte_atomic64_set(rte_atomic64_t *v, int64_t new_value);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline void
+rte_atomic64_set(rte_atomic64_t *v, int64_t new_value)
+{
+#ifdef __LP64__
+ v->cnt = new_value;
+#else
+ int success = 0;
+ uint64_t tmp;
+
+ while (success == 0) {
+ tmp = v->cnt;
+ success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
+ tmp, new_value);
+ }
+#endif
+}
+#endif
+
+/**
+ * Atomically add a 64-bit value to a counter.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @param inc
+ * The value to be added to the counter.
+ */
+static inline void
+rte_atomic64_add(rte_atomic64_t *v, int64_t inc);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline void
+rte_atomic64_add(rte_atomic64_t *v, int64_t inc)
+{
+ __sync_fetch_and_add(&v->cnt, inc);
+}
+#endif
+
+/**
+ * Atomically subtract a 64-bit value from a counter.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @param dec
+ * The value to be subtracted from the counter.
+ */
+static inline void
+rte_atomic64_sub(rte_atomic64_t *v, int64_t dec);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline void
+rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
+{
+ __sync_fetch_and_sub(&v->cnt, dec);
+}
+#endif
+
+/**
+ * Atomically increment a 64-bit counter by one and test.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic64_inc(rte_atomic64_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline void
+rte_atomic64_inc(rte_atomic64_t *v)
+{
+ rte_atomic64_add(v, 1);
+}
+#endif
+
+/**
+ * Atomically decrement a 64-bit counter by one and test.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic64_dec(rte_atomic64_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline void
+rte_atomic64_dec(rte_atomic64_t *v)
+{
+ rte_atomic64_sub(v, 1);
+}
+#endif
+
+/**
+ * Add a 64-bit value to an atomic counter and return the result.
+ *
+ * Atomically adds the 64-bit value (inc) to the atomic counter (v) and
+ * returns the value of v after the addition.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @param inc
+ * The value to be added to the counter.
+ * @return
+ * The value of v after the addition.
+ */
+static inline int64_t
+rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int64_t
+rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)
+{
+ return __sync_add_and_fetch(&v->cnt, inc);
+}
+#endif
+
+/**
+ * Subtract a 64-bit value from an atomic counter and return the result.
+ *
+ * Atomically subtracts the 64-bit value (dec) from the atomic counter (v)
+ * and returns the value of v after the subtraction.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @param dec
+ * The value to be subtracted from the counter.
+ * @return
+ * The value of v after the subtraction.
+ */
+static inline int64_t
+rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int64_t
+rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
+{
+ return __sync_sub_and_fetch(&v->cnt, dec);
+}
+#endif
+
+/**
+ * Atomically increment a 64-bit counter by one and test.
+ *
+ * Atomically increments the atomic counter (v) by one and returns
+ * true if the result is 0, or false in all other cases.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * True if the result after the addition is 0; false otherwise.
+ */
+static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v)
+{
+ return rte_atomic64_add_return(v, 1) == 0;
+}
+#endif
+
+/**
+ * Atomically decrement a 64-bit counter by one and test.
+ *
+ * Atomically decrements the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * True if the result after subtraction is 0; false otherwise.
+ */
+static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
+{
+ return rte_atomic64_sub_return(v, 1) == 0;
+}
+#endif
+
+/**
+ * Atomically test and set a 64-bit atomic counter.
+ *
+ * If the counter value is already set, return 0 (failed). Otherwise, set
+ * the counter value to 1 and return 1 (success).
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * 0 if failed; else 1, success.
+ */
+static inline int rte_atomic64_test_and_set(rte_atomic64_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
+{
+ return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
+}
+#endif
+
+/**
+ * Atomically set a 64-bit counter to 0.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void rte_atomic64_clear(rte_atomic64_t *v);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline void rte_atomic64_clear(rte_atomic64_t *v)
+{
+ rte_atomic64_set(v, 0);
+}
+#endif
+
+#endif /* _RTE_ATOMIC_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/generic/rte_byteorder.h b/src/dpdk_lib18/librte_eal/common/include/generic/rte_byteorder.h
new file mode 100755
index 00000000..c46fdcf2
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/generic/rte_byteorder.h
@@ -0,0 +1,217 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_BYTEORDER_H_
+#define _RTE_BYTEORDER_H_
+
+/**
+ * @file
+ *
+ * Byte Swap Operations
+ *
+ * This file defines a generic API for byte swap operations. Part of
+ * the implementation is architecture-specific.
+ */
+
+#include <stdint.h>
+#ifdef RTE_EXEC_ENV_BSDAPP
+#include <sys/endian.h>
+#else
+#include <endian.h>
+#endif
+
+/*
+ * Compile-time endianness detection
+ */
+#define RTE_BIG_ENDIAN 1
+#define RTE_LITTLE_ENDIAN 2
+#if defined __BYTE_ORDER__
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define RTE_BYTE_ORDER RTE_BIG_ENDIAN
+#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define RTE_BYTE_ORDER RTE_LITTLE_ENDIAN
+#endif /* __BYTE_ORDER__ */
+#elif defined __BYTE_ORDER
+#if __BYTE_ORDER == __BIG_ENDIAN
+#define RTE_BYTE_ORDER RTE_BIG_ENDIAN
+#elif __BYTE_ORDER == __LITTLE_ENDIAN
+#define RTE_BYTE_ORDER RTE_LITTLE_ENDIAN
+#endif /* __BYTE_ORDER */
+#elif defined __BIG_ENDIAN__
+#define RTE_BYTE_ORDER RTE_BIG_ENDIAN
+#elif defined __LITTLE_ENDIAN__
+#define RTE_BYTE_ORDER RTE_LITTLE_ENDIAN
+#endif
+
+/*
+ * An internal function to swap bytes in a 16-bit value.
+ *
+ * It is used by rte_bswap16() when the value is constant. Do not use
+ * this function directly; rte_bswap16() is preferred.
+ */
+static inline uint16_t
+rte_constant_bswap16(uint16_t x)
+{
+ return (uint16_t)(((x & 0x00ffU) << 8) |
+ ((x & 0xff00U) >> 8));
+}
+
+/*
+ * An internal function to swap bytes in a 32-bit value.
+ *
+ * It is used by rte_bswap32() when the value is constant. Do not use
+ * this function directly; rte_bswap32() is preferred.
+ */
+static inline uint32_t
+rte_constant_bswap32(uint32_t x)
+{
+ return ((x & 0x000000ffUL) << 24) |
+ ((x & 0x0000ff00UL) << 8) |
+ ((x & 0x00ff0000UL) >> 8) |
+ ((x & 0xff000000UL) >> 24);
+}
+
+/*
+ * An internal function to swap bytes of a 64-bit value.
+ *
+ * It is used by rte_bswap64() when the value is constant. Do not use
+ * this function directly; rte_bswap64() is preferred.
+ */
+static inline uint64_t
+rte_constant_bswap64(uint64_t x)
+{
+ return ((x & 0x00000000000000ffULL) << 56) |
+ ((x & 0x000000000000ff00ULL) << 40) |
+ ((x & 0x0000000000ff0000ULL) << 24) |
+ ((x & 0x00000000ff000000ULL) << 8) |
+ ((x & 0x000000ff00000000ULL) >> 8) |
+ ((x & 0x0000ff0000000000ULL) >> 24) |
+ ((x & 0x00ff000000000000ULL) >> 40) |
+ ((x & 0xff00000000000000ULL) >> 56);
+}
+
+
+#ifdef __DOXYGEN__
+
+/**
+ * Swap bytes in a 16-bit value.
+ */
+static uint16_t rte_bswap16(uint16_t _x);
+
+/**
+ * Swap bytes in a 32-bit value.
+ */
+static uint32_t rte_bswap32(uint32_t x);
+
+/**
+ * Swap bytes in a 64-bit value.
+ */
+static uint64_t rte_bswap64(uint64_t x);
+
+/**
+ * Convert a 16-bit value from CPU order to little endian.
+ */
+static uint16_t rte_cpu_to_le_16(uint16_t x);
+
+/**
+ * Convert a 32-bit value from CPU order to little endian.
+ */
+static uint32_t rte_cpu_to_le_32(uint32_t x);
+
+/**
+ * Convert a 64-bit value from CPU order to little endian.
+ */
+static uint64_t rte_cpu_to_le_64(uint64_t x);
+
+
+/**
+ * Convert a 16-bit value from CPU order to big endian.
+ */
+static uint16_t rte_cpu_to_be_16(uint16_t x);
+
+/**
+ * Convert a 32-bit value from CPU order to big endian.
+ */
+static uint32_t rte_cpu_to_be_32(uint32_t x);
+
+/**
+ * Convert a 64-bit value from CPU order to big endian.
+ */
+static uint64_t rte_cpu_to_be_64(uint64_t x);
+
+
+/**
+ * Convert a 16-bit value from little endian to CPU order.
+ */
+static uint16_t rte_le_to_cpu_16(uint16_t x);
+
+/**
+ * Convert a 32-bit value from little endian to CPU order.
+ */
+static uint32_t rte_le_to_cpu_32(uint32_t x);
+
+/**
+ * Convert a 64-bit value from little endian to CPU order.
+ */
+static uint64_t rte_le_to_cpu_64(uint64_t x);
+
+
+/**
+ * Convert a 16-bit value from big endian to CPU order.
+ */
+static uint16_t rte_be_to_cpu_16(uint16_t x);
+
+/**
+ * Convert a 32-bit value from big endian to CPU order.
+ */
+static uint32_t rte_be_to_cpu_32(uint32_t x);
+
+/**
+ * Convert a 64-bit value from big endian to CPU order.
+ */
+static uint64_t rte_be_to_cpu_64(uint64_t x);
+
+#endif /* __DOXYGEN__ */
+
+#ifdef RTE_FORCE_INTRINSICS
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
+#define rte_bswap16(x) __builtin_bswap16(x)
+#endif
+
+#define rte_bswap32(x) __builtin_bswap32(x)
+
+#define rte_bswap64(x) __builtin_bswap64(x)
+
+#endif
+
+#endif /* _RTE_BYTEORDER_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/generic/rte_cpuflags.h b/src/dpdk_lib18/librte_eal/common/include/generic/rte_cpuflags.h
new file mode 100755
index 00000000..7f048387
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/generic/rte_cpuflags.h
@@ -0,0 +1,110 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_CPUFLAGS_H_
+#define _RTE_CPUFLAGS_H_
+
+/**
+ * @file
+ * Architecture specific API to determine available CPU features at runtime.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+
+/**
+ * Enumeration of all CPU features supported
+ */
+enum rte_cpu_flag_t;
+
+/**
+ * Enumeration of CPU registers
+ */
+enum cpu_register_t;
+
+typedef uint32_t cpuid_registers_t[4];
+
+#define CPU_FLAG_NAME_MAX_LEN 64
+
+/**
+ * Struct to hold a processor feature entry
+ */
+struct feature_entry {
+ uint32_t leaf; /**< cpuid leaf */
+ uint32_t subleaf; /**< cpuid subleaf */
+ uint32_t reg; /**< cpuid register */
+ uint32_t bit; /**< cpuid register bit */
+ char name[CPU_FLAG_NAME_MAX_LEN]; /**< String for printing */
+};
+
+#define FEAT_DEF(name, leaf, subleaf, reg, bit) \
+ [RTE_CPUFLAG_##name] = {leaf, subleaf, reg, bit, #name },
+
+/**
+ * An array that holds feature entries
+ */
+static const struct feature_entry cpu_feature_table[];
+
+/**
+ * Execute CPUID instruction and get contents of a specific register
+ *
+ * This function, when compiled with GCC, will generate architecture-neutral
+ * code, as per GCC manual.
+ */
+static inline void
+rte_cpu_get_features(uint32_t leaf, uint32_t subleaf, cpuid_registers_t out);
+
+/**
+ * Function for checking a CPU flag availability
+ *
+ * @param flag
+ * CPU flag to query CPU for
+ * @return
+ * 1 if flag is available
+ * 0 if flag is not available
+ * -ENOENT if flag is invalid
+ */
+static inline int
+rte_cpu_get_flag_enabled(enum rte_cpu_flag_t feature);
+
+/**
+ * This function checks that the currently used CPU supports the CPU features
+ * that were specified at compile time. It is called automatically within the
+ * EAL, so does not need to be used by applications.
+ */
+void
+rte_cpu_check_supported(void);
+
+#endif /* _RTE_CPUFLAGS_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/generic/rte_cycles.h b/src/dpdk_lib18/librte_eal/common/include/generic/rte_cycles.h
new file mode 100755
index 00000000..7700f411
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/generic/rte_cycles.h
@@ -0,0 +1,205 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/* BSD LICENSE
+ *
+ * Copyright(c) 2013 6WIND.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_CYCLES_H_
+#define _RTE_CYCLES_H_
+
+/**
+ * @file
+ *
+ * Simple Time Reference Functions (Cycles and HPET).
+ */
+
+#include <stdint.h>
+#include <rte_debug.h>
+#include <rte_atomic.h>
+
+#define MS_PER_S 1000
+#define US_PER_S 1000000
+#define NS_PER_S 1000000000
+
+enum timer_source {
+ EAL_TIMER_TSC = 0,
+ EAL_TIMER_HPET
+};
+extern enum timer_source eal_timer_source;
+
+/**
+ * Get the measured frequency of the RDTSC counter
+ *
+ * @return
+ * The TSC frequency for this lcore
+ */
+uint64_t
+rte_get_tsc_hz(void);
+
+/**
+ * Return the number of TSC cycles since boot
+ *
+ * @return
+ * the number of cycles
+ */
+static inline uint64_t
+rte_get_tsc_cycles(void);
+
+#ifdef RTE_LIBEAL_USE_HPET
+/**
+ * Return the number of HPET cycles since boot
+ *
+ * This counter is global for all execution units. The number of
+ * cycles in one second can be retrieved using rte_get_hpet_hz().
+ *
+ * @return
+ * the number of cycles
+ */
+uint64_t
+rte_get_hpet_cycles(void);
+
+/**
+ * Get the number of HPET cycles in one second.
+ *
+ * @return
+ * The number of cycles in one second.
+ */
+uint64_t
+rte_get_hpet_hz(void);
+
+/**
+ * Initialise the HPET for use. This must be called before the rte_get_hpet_hz
+ * and rte_get_hpet_cycles APIs are called. If this function does not succeed,
+ * then the HPET functions are unavailable and should not be called.
+ *
+ * @param make_default
+ * If set, the hpet timer becomes the default timer whose values are
+ * returned by the rte_get_timer_hz/cycles API calls
+ *
+ * @return
+ * 0 on success,
+ * -1 on error, and the make_default parameter is ignored.
+ */
+int rte_eal_hpet_init(int make_default);
+
+#endif
+
+/**
+ * Get the number of cycles since boot from the default timer.
+ *
+ * @return
+ * The number of cycles
+ */
+static inline uint64_t
+rte_get_timer_cycles(void)
+{
+ switch(eal_timer_source) {
+ case EAL_TIMER_TSC:
+ return rte_get_tsc_cycles();
+ case EAL_TIMER_HPET:
+#ifdef RTE_LIBEAL_USE_HPET
+ return rte_get_hpet_cycles();
+#endif
+ default: rte_panic("Invalid timer source specified\n");
+ }
+}
+
+/**
+ * Get the number of cycles in one second for the default timer.
+ *
+ * @return
+ * The number of cycles in one second.
+ */
+static inline uint64_t
+rte_get_timer_hz(void)
+{
+ switch(eal_timer_source) {
+ case EAL_TIMER_TSC:
+ return rte_get_tsc_hz();
+ case EAL_TIMER_HPET:
+#ifdef RTE_LIBEAL_USE_HPET
+ return rte_get_hpet_hz();
+#endif
+ default: rte_panic("Invalid timer source specified\n");
+ }
+}
+
+/**
+ * Wait at least us microseconds.
+ *
+ * @param us
+ * The number of microseconds to wait.
+ */
+void
+rte_delay_us(unsigned us);
+
+/**
+ * Wait at least ms milliseconds.
+ *
+ * @param ms
+ * The number of milliseconds to wait.
+ */
+static inline void
+rte_delay_ms(unsigned ms)
+{
+ rte_delay_us(ms * 1000);
+}
+
+#endif /* _RTE_CYCLES_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/generic/rte_memcpy.h b/src/dpdk_lib18/librte_eal/common/include/generic/rte_memcpy.h
new file mode 100755
index 00000000..03e84773
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/generic/rte_memcpy.h
@@ -0,0 +1,144 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_MEMCPY_H_
+#define _RTE_MEMCPY_H_
+
+/**
+ * @file
+ *
+ * Functions for vectorised implementation of memcpy().
+ */
+
+/**
+ * Copy 16 bytes from one location to another using optimised
+ * instructions. The locations should not overlap.
+ *
+ * @param dst
+ * Pointer to the destination of the data.
+ * @param src
+ * Pointer to the source data.
+ */
+static inline void
+rte_mov16(uint8_t *dst, const uint8_t *src);
+
+/**
+ * Copy 32 bytes from one location to another using optimised
+ * instructions. The locations should not overlap.
+ *
+ * @param dst
+ * Pointer to the destination of the data.
+ * @param src
+ * Pointer to the source data.
+ */
+static inline void
+rte_mov32(uint8_t *dst, const uint8_t *src);
+
+/**
+ * Copy 48 bytes from one location to another using optimised
+ * instructions. The locations should not overlap.
+ *
+ * @param dst
+ * Pointer to the destination of the data.
+ * @param src
+ * Pointer to the source data.
+ */
+static inline void
+rte_mov48(uint8_t *dst, const uint8_t *src);
+
+/**
+ * Copy 64 bytes from one location to another using optimised
+ * instructions. The locations should not overlap.
+ *
+ * @param dst
+ * Pointer to the destination of the data.
+ * @param src
+ * Pointer to the source data.
+ */
+static inline void
+rte_mov64(uint8_t *dst, const uint8_t *src);
+
+/**
+ * Copy 128 bytes from one location to another using optimised
+ * instructions. The locations should not overlap.
+ *
+ * @param dst
+ * Pointer to the destination of the data.
+ * @param src
+ * Pointer to the source data.
+ */
+static inline void
+rte_mov128(uint8_t *dst, const uint8_t *src);
+
+/**
+ * Copy 256 bytes from one location to another using optimised
+ * instructions. The locations should not overlap.
+ *
+ * @param dst
+ * Pointer to the destination of the data.
+ * @param src
+ * Pointer to the source data.
+ */
+static inline void
+rte_mov256(uint8_t *dst, const uint8_t *src);
+
+#ifdef __DOXYGEN__
+
+/**
+ * Copy bytes from one location to another. The locations must not overlap.
+ *
+ * @note This is implemented as a macro, so it's address should not be taken
+ * and care is needed as parameter expressions may be evaluated multiple times.
+ *
+ * @param dst
+ * Pointer to the destination of the data.
+ * @param src
+ * Pointer to the source data.
+ * @param n
+ * Number of bytes to copy.
+ * @return
+ * Pointer to the destination data.
+ */
+static void *
+rte_memcpy(void *dst, const void *src, size_t n);
+
+#endif /* __DOXYGEN__ */
+
+/*
+ * memcpy() function used by rte_memcpy macro
+ */
+static inline void *
+rte_memcpy_func(void *dst, const void *src, size_t n) __attribute__((always_inline));
+
+
+#endif /* _RTE_MEMCPY_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/generic/rte_prefetch.h b/src/dpdk_lib18/librte_eal/common/include/generic/rte_prefetch.h
new file mode 100755
index 00000000..217f319b
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/generic/rte_prefetch.h
@@ -0,0 +1,71 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_PREFETCH_H_
+#define _RTE_PREFETCH_H_
+
+/**
+ * @file
+ *
+ * Prefetch operations.
+ *
+ * This file defines an API for prefetch macros / inline-functions,
+ * which are architecture-dependent. Prefetching occurs when a
+ * processor requests an instruction or data from memory to cache
+ * before it is actually needed, potentially speeding up the execution of the
+ * program.
+ */
+
+/**
+ * Prefetch a cache line into all cache levels.
+ * @param p
+ * Address to prefetch
+ */
+static inline void rte_prefetch0(volatile void *p);
+
+/**
+ * Prefetch a cache line into all cache levels except the 0th cache level.
+ * @param p
+ * Address to prefetch
+ */
+static inline void rte_prefetch1(volatile void *p);
+
+/**
+ * Prefetch a cache line into all cache levels except the 0th and 1th cache
+ * levels.
+ * @param p
+ * Address to prefetch
+ */
+static inline void rte_prefetch2(volatile void *p);
+
+#endif /* _RTE_PREFETCH_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/generic/rte_spinlock.h b/src/dpdk_lib18/librte_eal/common/include/generic/rte_spinlock.h
new file mode 100755
index 00000000..dea885c3
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/generic/rte_spinlock.h
@@ -0,0 +1,226 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_SPINLOCK_H_
+#define _RTE_SPINLOCK_H_
+
+/**
+ * @file
+ *
+ * RTE Spinlocks
+ *
+ * This file defines an API for read-write locks, which are implemented
+ * in an architecture-specific way. This kind of lock simply waits in
+ * a loop repeatedly checking until the lock becomes available.
+ *
+ * All locks must be initialised before use, and only initialised once.
+ *
+ */
+
+#include <rte_lcore.h>
+#ifdef RTE_FORCE_INTRINSICS
+#include <rte_common.h>
+#endif
+
+/**
+ * The rte_spinlock_t type.
+ */
+typedef struct {
+ volatile int locked; /**< lock status 0 = unlocked, 1 = locked */
+} rte_spinlock_t;
+
+/**
+ * A static spinlock initializer.
+ */
+#define RTE_SPINLOCK_INITIALIZER { 0 }
+
+/**
+ * Initialize the spinlock to an unlocked state.
+ *
+ * @param sl
+ * A pointer to the spinlock.
+ */
+static inline void
+rte_spinlock_init(rte_spinlock_t *sl)
+{
+ sl->locked = 0;
+}
+
+/**
+ * Take the spinlock.
+ *
+ * @param sl
+ * A pointer to the spinlock.
+ */
+static inline void
+rte_spinlock_lock(rte_spinlock_t *sl);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline void
+rte_spinlock_lock(rte_spinlock_t *sl)
+{
+ while (__sync_lock_test_and_set(&sl->locked, 1))
+ while(sl->locked)
+ rte_pause();
+}
+#endif
+
+/**
+ * Release the spinlock.
+ *
+ * @param sl
+ * A pointer to the spinlock.
+ */
+static inline void
+rte_spinlock_unlock (rte_spinlock_t *sl);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline void
+rte_spinlock_unlock (rte_spinlock_t *sl)
+{
+ __sync_lock_release(&sl->locked);
+}
+#endif
+
+/**
+ * Try to take the lock.
+ *
+ * @param sl
+ * A pointer to the spinlock.
+ * @return
+ * 1 if the lock is successfully taken; 0 otherwise.
+ */
+static inline int
+rte_spinlock_trylock (rte_spinlock_t *sl);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline int
+rte_spinlock_trylock (rte_spinlock_t *sl)
+{
+ return (__sync_lock_test_and_set(&sl->locked,1) == 0);
+}
+#endif
+
+/**
+ * Test if the lock is taken.
+ *
+ * @param sl
+ * A pointer to the spinlock.
+ * @return
+ * 1 if the lock is currently taken; 0 otherwise.
+ */
+static inline int rte_spinlock_is_locked (rte_spinlock_t *sl)
+{
+ return sl->locked;
+}
+
+/**
+ * The rte_spinlock_recursive_t type.
+ */
+typedef struct {
+ rte_spinlock_t sl; /**< the actual spinlock */
+ volatile int user; /**< core id using lock, -1 for unused */
+ volatile int count; /**< count of time this lock has been called */
+} rte_spinlock_recursive_t;
+
+/**
+ * A static recursive spinlock initializer.
+ */
+#define RTE_SPINLOCK_RECURSIVE_INITIALIZER {RTE_SPINLOCK_INITIALIZER, -1, 0}
+
+/**
+ * Initialize the recursive spinlock to an unlocked state.
+ *
+ * @param slr
+ * A pointer to the recursive spinlock.
+ */
+static inline void rte_spinlock_recursive_init(rte_spinlock_recursive_t *slr)
+{
+ rte_spinlock_init(&slr->sl);
+ slr->user = -1;
+ slr->count = 0;
+}
+
+/**
+ * Take the recursive spinlock.
+ *
+ * @param slr
+ * A pointer to the recursive spinlock.
+ */
+static inline void rte_spinlock_recursive_lock(rte_spinlock_recursive_t *slr)
+{
+ int id = rte_lcore_id();
+
+ if (slr->user != id) {
+ rte_spinlock_lock(&slr->sl);
+ slr->user = id;
+ }
+ slr->count++;
+}
+/**
+ * Release the recursive spinlock.
+ *
+ * @param slr
+ * A pointer to the recursive spinlock.
+ */
+static inline void rte_spinlock_recursive_unlock(rte_spinlock_recursive_t *slr)
+{
+ if (--(slr->count) == 0) {
+ slr->user = -1;
+ rte_spinlock_unlock(&slr->sl);
+ }
+
+}
+
+/**
+ * Try to take the recursive lock.
+ *
+ * @param slr
+ * A pointer to the recursive spinlock.
+ * @return
+ * 1 if the lock is successfully taken; 0 otherwise.
+ */
+static inline int rte_spinlock_recursive_trylock(rte_spinlock_recursive_t *slr)
+{
+ int id = rte_lcore_id();
+
+ if (slr->user != id) {
+ if (rte_spinlock_trylock(&slr->sl) == 0)
+ return 0;
+ slr->user = id;
+ }
+ slr->count++;
+ return 1;
+}
+
+#endif /* _RTE_SPINLOCK_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_alarm.h b/src/dpdk_lib18/librte_eal/common/include/rte_alarm.h
new file mode 100755
index 00000000..4012cd67
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/rte_alarm.h
@@ -0,0 +1,106 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ALARM_H_
+#define _RTE_ALARM_H_
+
+/**
+ * @file
+ *
+ * Alarm functions
+ *
+ * Simple alarm-clock functionality supplied by eal.
+ * Does not require hpet support.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/**
+ * Signature of callback back function called when an alarm goes off.
+ */
+typedef void (*rte_eal_alarm_callback)(void *arg);
+
+/**
+ * Function to set a callback to be triggered when us microseconds
+ * have expired. Accuracy of timing to the microsecond is not guaranteed. The
+ * alarm function will not be called *before* the requested time, but may
+ * be called a short period of time afterwards.
+ * The alarm handler will be called only once. There is no need to call
+ * "rte_eal_alarm_cancel" from within the callback function.
+ *
+ * @param us
+ * The time in microseconds before the callback is called
+ * @param cb
+ * The function to be called when the alarm expires
+ * @param cb_arg
+ * Pointer parameter to be passed to the callback function
+ *
+ * @return
+ * On success, zero.
+ * On failure, a negative error number
+ */
+int rte_eal_alarm_set(uint64_t us, rte_eal_alarm_callback cb, void *cb_arg);
+
+/**
+ * Function to cancel an alarm callback which has been registered before. If
+ * used outside alarm callback it wait for all callbacks to finish execution.
+ *
+ * @param cb_fn
+ * alarm callback
+ * @param cb_arg
+ * Pointer parameter to be passed to the callback function. To remove all
+ * copies of a given callback function, irrespective of parameter, (void *)-1
+ * can be used here.
+ *
+ * @return
+ * - value greater than 0 and rte_errno not changed - returned value is
+ * the number of canceled alarm callback functions
+ * - value greater or equal 0 and rte_errno set to EINPROGRESS, at least one
+ * alarm could not be canceled because cancellation was requested from alarm
+ * callback context. Returned value is the number of succesfuly canceled
+ * alarm callbacks
+ * - 0 and rte_errno set to ENOENT - no alarm found
+ * - -1 and rte_errno set to EINVAL - invalid parameter (NULL callback)
+ */
+int rte_eal_alarm_cancel(rte_eal_alarm_callback cb_fn, void *cb_arg);
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* _RTE_ALARM_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_branch_prediction.h b/src/dpdk_lib18/librte_eal/common/include/rte_branch_prediction.h
new file mode 100755
index 00000000..a6a56d17
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/rte_branch_prediction.h
@@ -0,0 +1,70 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file
+ * Branch Prediction Helpers in RTE
+ */
+
+#ifndef _RTE_BRANCH_PREDICTION_H_
+#define _RTE_BRANCH_PREDICTION_H_
+
+/**
+ * Check if a branch is likely to be taken.
+ *
+ * This compiler builtin allows the developer to indicate if a branch is
+ * likely to be taken. Example:
+ *
+ * if (likely(x > 1))
+ * do_stuff();
+ *
+ */
+#ifndef likely
+#define likely(x) __builtin_expect((x),1)
+#endif /* likely */
+
+/**
+ * Check if a branch is unlikely to be taken.
+ *
+ * This compiler builtin allows the developer to indicate if a branch is
+ * unlikely to be taken. Example:
+ *
+ * if (unlikely(x < 1))
+ * do_stuff();
+ *
+ */
+#ifndef unlikely
+#define unlikely(x) __builtin_expect((x),0)
+#endif /* unlikely */
+
+#endif /* _RTE_BRANCH_PREDICTION_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_common.h b/src/dpdk_lib18/librte_eal/common/include/rte_common.h
new file mode 100755
index 00000000..921b91f3
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/rte_common.h
@@ -0,0 +1,389 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_COMMON_H_
+#define _RTE_COMMON_H_
+
+/**
+ * @file
+ *
+ * Generic, commonly-used macro and inline function definitions
+ * for Intel DPDK.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <errno.h>
+#include <limits.h>
+
+/*********** Macros to eliminate unused variable warnings ********/
+
+/**
+ * short definition to mark a function parameter unused
+ */
+#define __rte_unused __attribute__((__unused__))
+
+/**
+ * definition to mark a variable or function parameter as used so
+ * as to avoid a compiler warning
+ */
+#define RTE_SET_USED(x) (void)(x)
+
+/*********** Macros for pointer arithmetic ********/
+
+/**
+ * add a byte-value offset from a pointer
+ */
+#define RTE_PTR_ADD(ptr, x) ((void*)((uintptr_t)(ptr) + (x)))
+
+/**
+ * subtract a byte-value offset from a pointer
+ */
+#define RTE_PTR_SUB(ptr, x) ((void*)((uintptr_t)ptr - (x)))
+
+/**
+ * get the difference between two pointer values, i.e. how far apart
+ * in bytes are the locations they point two. It is assumed that
+ * ptr1 is greater than ptr2.
+ */
+#define RTE_PTR_DIFF(ptr1, ptr2) ((uintptr_t)(ptr1) - (uintptr_t)(ptr2))
+
+/*********** Macros/static functions for doing alignment ********/
+
+/**
+ * Function which rounds an unsigned int down to a given power-of-two value.
+ * Takes uintptr_t types as parameters, as this type of operation is most
+ * commonly done for pointer alignment. (See also RTE_ALIGN_FLOOR,
+ * RTE_ALIGN_CEIL, RTE_ALIGN, RTE_PTR_ALIGN_FLOOR, RTE_PTR_ALIGN_CEL,
+ * RTE_PTR_ALIGN macros)
+ * @param ptr
+ * The value to be rounded down
+ * @param align
+ * The power-of-two of which the result must be a multiple.
+ * @return
+ * Function returns a properly aligned value where align is a power-of-two.
+ * If align is not a power-of-two, result will be incorrect.
+ */
+static inline uintptr_t
+rte_align_floor_int(uintptr_t ptr, uintptr_t align)
+{
+ return (ptr & ~(align - 1));
+}
+
+/**
+ * Macro to align a pointer to a given power-of-two. The resultant
+ * pointer will be a pointer of the same type as the first parameter, and
+ * point to an address no higher than the first parameter. Second parameter
+ * must be a power-of-two value.
+ */
+#define RTE_PTR_ALIGN_FLOOR(ptr, align) \
+ (typeof(ptr))rte_align_floor_int((uintptr_t)ptr, align)
+
+/**
+ * Macro to align a value to a given power-of-two. The resultant value
+ * will be of the same type as the first parameter, and will be no
+ * bigger than the first parameter. Second parameter must be a
+ * power-of-two value.
+ */
+#define RTE_ALIGN_FLOOR(val, align) \
+ (typeof(val))((val) & (~((typeof(val))((align) - 1))))
+
+/**
+ * Macro to align a pointer to a given power-of-two. The resultant
+ * pointer will be a pointer of the same type as the first parameter, and
+ * point to an address no lower than the first parameter. Second parameter
+ * must be a power-of-two value.
+ */
+#define RTE_PTR_ALIGN_CEIL(ptr, align) \
+ RTE_PTR_ALIGN_FLOOR((typeof(ptr))RTE_PTR_ADD(ptr, (align) - 1), align)
+
+/**
+ * Macro to align a value to a given power-of-two. The resultant value
+ * will be of the same type as the first parameter, and will be no lower
+ * than the first parameter. Second parameter must be a power-of-two
+ * value.
+ */
+#define RTE_ALIGN_CEIL(val, align) \
+ RTE_ALIGN_FLOOR(((val) + ((typeof(val)) (align) - 1)), align)
+
+/**
+ * Macro to align a pointer to a given power-of-two. The resultant
+ * pointer will be a pointer of the same type as the first parameter, and
+ * point to an address no lower than the first parameter. Second parameter
+ * must be a power-of-two value.
+ * This function is the same as RTE_PTR_ALIGN_CEIL
+ */
+#define RTE_PTR_ALIGN(ptr, align) RTE_PTR_ALIGN_CEIL(ptr, align)
+
+/**
+ * Macro to align a value to a given power-of-two. The resultant
+ * value will be of the same type as the first parameter, and
+ * will be no lower than the first parameter. Second parameter
+ * must be a power-of-two value.
+ * This function is the same as RTE_ALIGN_CEIL
+ */
+#define RTE_ALIGN(val, align) RTE_ALIGN_CEIL(val, align)
+
+/**
+ * Checks if a pointer is aligned to a given power-of-two value
+ *
+ * @param ptr
+ * The pointer whose alignment is to be checked
+ * @param align
+ * The power-of-two value to which the ptr should be aligned
+ *
+ * @return
+ * True(1) where the pointer is correctly aligned, false(0) otherwise
+ */
+static inline int
+rte_is_aligned(void *ptr, unsigned align)
+{
+ return RTE_PTR_ALIGN(ptr, align) == ptr;
+}
+
+/*********** Macros for compile type checks ********/
+
+/**
+ * Triggers an error at compilation time if the condition is true.
+ */
+#ifndef __OPTIMIZE__
+#define RTE_BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
+#else
+extern int RTE_BUILD_BUG_ON_detected_error;
+#define RTE_BUILD_BUG_ON(condition) do { \
+ ((void)sizeof(char[1 - 2*!!(condition)])); \
+ if (condition) \
+ RTE_BUILD_BUG_ON_detected_error = 1; \
+} while(0)
+#endif
+
+/*********** Macros to work with powers of 2 ********/
+
+/**
+ * Returns true if n is a power of 2
+ * @param n
+ * Number to check
+ * @return 1 if true, 0 otherwise
+ */
+static inline int
+rte_is_power_of_2(uint32_t n)
+{
+ return ((n-1) & n) == 0;
+}
+
+/**
+ * Aligns input parameter to the next power of 2
+ *
+ * @param x
+ * The integer value to algin
+ *
+ * @return
+ * Input parameter aligned to the next power of 2
+ */
+static inline uint32_t
+rte_align32pow2(uint32_t x)
+{
+ x--;
+ x |= x >> 1;
+ x |= x >> 2;
+ x |= x >> 4;
+ x |= x >> 8;
+ x |= x >> 16;
+
+ return x + 1;
+}
+
+/**
+ * Aligns 64b input parameter to the next power of 2
+ *
+ * @param x
+ * The 64b value to algin
+ *
+ * @return
+ * Input parameter aligned to the next power of 2
+ */
+static inline uint64_t
+rte_align64pow2(uint64_t v)
+{
+ v--;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ v |= v >> 32;
+
+ return v + 1;
+}
+
+/*********** Macros for calculating min and max **********/
+
+/**
+ * Macro to return the minimum of two numbers
+ */
+#define RTE_MIN(a, b) ({ \
+ typeof (a) _a = (a); \
+ typeof (b) _b = (b); \
+ _a < _b ? _a : _b; \
+ })
+
+/**
+ * Macro to return the maximum of two numbers
+ */
+#define RTE_MAX(a, b) ({ \
+ typeof (a) _a = (a); \
+ typeof (b) _b = (b); \
+ _a > _b ? _a : _b; \
+ })
+
+/*********** Other general functions / macros ********/
+
+#ifdef __SSE2__
+#include <emmintrin.h>
+/**
+ * PAUSE instruction for tight loops (avoid busy waiting)
+ */
+static inline void
+rte_pause (void)
+{
+ _mm_pause();
+}
+#else
+static inline void
+rte_pause(void) {}
+#endif
+
+/**
+ * Searches the input parameter for the least significant set bit
+ * (starting from zero).
+ * If a least significant 1 bit is found, its bit index is returned.
+ * If the content of the input parameter is zero, then the content of the return
+ * value is undefined.
+ * @param v
+ * input parameter, should not be zero.
+ * @return
+ * least significant set bit in the input parameter.
+ */
+static inline uint32_t
+rte_bsf32(uint32_t v)
+{
+ return (__builtin_ctz(v));
+}
+
+#ifndef offsetof
+/** Return the offset of a field in a structure. */
+#define offsetof(TYPE, MEMBER) __builtin_offsetof (TYPE, MEMBER)
+#endif
+
+#define _RTE_STR(x) #x
+/** Take a macro value and get a string version of it */
+#define RTE_STR(x) _RTE_STR(x)
+
+/** Mask value of type <tp> for the first <ln> bit set. */
+#define RTE_LEN2MASK(ln, tp) \
+ ((tp)((uint64_t)-1 >> (sizeof(uint64_t) * CHAR_BIT - (ln))))
+
+/** Number of elements in the array. */
+#define RTE_DIM(a) (sizeof (a) / sizeof ((a)[0]))
+
+/**
+ * Converts a numeric string to the equivalent uint64_t value.
+ * As well as straight number conversion, also recognises the suffixes
+ * k, m and g for kilobytes, megabytes and gigabytes respectively.
+ *
+ * If a negative number is passed in i.e. a string with the first non-black
+ * character being "-", zero is returned. Zero is also returned in the case of
+ * an error with the strtoull call in the function.
+ *
+ * @param str
+ * String containing number to convert.
+ * @return
+ * Number.
+ */
+static inline uint64_t
+rte_str_to_size(const char *str)
+{
+ char *endptr;
+ unsigned long long size;
+
+ while (isspace((int)*str))
+ str++;
+ if (*str == '-')
+ return 0;
+
+ errno = 0;
+ size = strtoull(str, &endptr, 0);
+ if (errno)
+ return 0;
+
+ if (*endptr == ' ')
+ endptr++; /* allow 1 space gap */
+
+ switch (*endptr){
+ case 'G': case 'g': size *= 1024; /* fall-through */
+ case 'M': case 'm': size *= 1024; /* fall-through */
+ case 'K': case 'k': size *= 1024; /* fall-through */
+ default:
+ break;
+ }
+ return size;
+}
+
+/**
+ * Function to terminate the application immediately, printing an error
+ * message and returning the exit_code back to the shell.
+ *
+ * This function never returns
+ *
+ * @param exit_code
+ * The exit code to be returned by the application
+ * @param format
+ * The format string to be used for printing the message. This can include
+ * printf format characters which will be expanded using any further parameters
+ * to the function.
+ */
+void
+rte_exit(int exit_code, const char *format, ...)
+ __attribute__((noreturn))
+ __attribute__((format(printf, 2, 3)));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_common_vect.h b/src/dpdk_lib18/librte_eal/common/include/rte_common_vect.h
new file mode 100755
index 00000000..95bf4b1a
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/rte_common_vect.h
@@ -0,0 +1,93 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_COMMON_VECT_H_
+#define _RTE_COMMON_VECT_H_
+
+/**
+ * @file
+ *
+ * RTE SSE/AVX related header.
+ */
+
+#if (defined(__ICC) || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
+
+#ifdef __SSE__
+#include <xmmintrin.h>
+#endif
+
+#ifdef __SSE2__
+#include <emmintrin.h>
+#endif
+
+#if defined(__SSE4_2__) || defined(__SSE4_1__)
+#include <smmintrin.h>
+#endif
+
+#else
+
+#include <x86intrin.h>
+
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef __m128i xmm_t;
+
+#define XMM_SIZE (sizeof(xmm_t))
+#define XMM_MASK (XMM_SIZE - 1)
+
+typedef union rte_xmm {
+ xmm_t m;
+ uint8_t u8[XMM_SIZE / sizeof(uint8_t)];
+ uint16_t u16[XMM_SIZE / sizeof(uint16_t)];
+ uint32_t u32[XMM_SIZE / sizeof(uint32_t)];
+ uint64_t u64[XMM_SIZE / sizeof(uint64_t)];
+ double pd[XMM_SIZE / sizeof(double)];
+} rte_xmm_t;
+
+#ifdef RTE_ARCH_I686
+#define _mm_cvtsi128_si64(a) ({ \
+ rte_xmm_t m; \
+ m.m = (a); \
+ (m.u64[0]); \
+})
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_COMMON__VECT_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_debug.h b/src/dpdk_lib18/librte_eal/common/include/rte_debug.h
new file mode 100755
index 00000000..82ee3b34
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/rte_debug.h
@@ -0,0 +1,105 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_DEBUG_H_
+#define _RTE_DEBUG_H_
+
+/**
+ * @file
+ *
+ * Debug Functions in RTE
+ *
+ * This file defines a generic API for debug operations. Part of
+ * the implementation is architecture-specific.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Dump the stack of the calling core to the console.
+ */
+void rte_dump_stack(void);
+
+/**
+ * Dump the registers of the calling core to the console.
+ *
+ * Note: Not implemented in a userapp environment; use gdb instead.
+ */
+void rte_dump_registers(void);
+
+/**
+ * Provide notification of a critical non-recoverable error and terminate
+ * execution abnormally.
+ *
+ * Display the format string and its expanded arguments (printf-like).
+ *
+ * In a linuxapp environment, this function dumps the stack and calls
+ * abort() resulting in a core dump if enabled.
+ *
+ * The function never returns.
+ *
+ * @param format
+ * The format string
+ * @param args
+ * The variable list of arguments.
+ */
+#define rte_panic(...) rte_panic_(__func__, __VA_ARGS__, "dummy")
+#define rte_panic_(func, format, ...) __rte_panic(func, format "%.0s", __VA_ARGS__)
+
+#define RTE_VERIFY(exp) do { \
+ if (!(exp)) \
+ rte_panic("line %d\tassert \"" #exp "\" failed\n", __LINE__); \
+} while (0)
+
+/*
+ * Provide notification of a critical non-recoverable error and stop.
+ *
+ * This function should not be called directly. Refer to rte_panic() macro
+ * documentation.
+ */
+void __rte_panic(const char *funcname , const char *format, ...)
+#ifdef __GNUC__
+#if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 2))
+ __attribute__((cold))
+#endif
+#endif
+ __attribute__((noreturn))
+ __attribute__((format(printf, 2, 3)));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DEBUG_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_dev.h b/src/dpdk_lib18/librte_eal/common/include/rte_dev.h
new file mode 100755
index 00000000..f7e3a104
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/rte_dev.h
@@ -0,0 +1,111 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 6WIND S.A.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_DEV_H_
+#define _RTE_DEV_H_
+
+/**
+ * @file
+ *
+ * RTE PMD Driver Registration Interface
+ *
+ * This file manages the list of device drivers.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/queue.h>
+
+/** Double linked list of device drivers. */
+TAILQ_HEAD(rte_driver_list, rte_driver);
+
+/**
+ * Initialization function called for each device driver once.
+ */
+typedef int (rte_dev_init_t)(const char *name, const char *args);
+
+/**
+ * Driver type enumeration
+ */
+enum pmd_type {
+ PMD_VDEV = 0,
+ PMD_PDEV = 1,
+};
+
+/**
+ * A structure describing a device driver.
+ */
+struct rte_driver {
+ TAILQ_ENTRY(rte_driver) next; /**< Next in list. */
+ enum pmd_type type; /**< PMD Driver type */
+ const char *name; /**< Driver name. */
+ rte_dev_init_t *init; /**< Device init. function. */
+};
+
+/**
+ * Register a device driver.
+ *
+ * @param driver
+ * A pointer to a rte_dev structure describing the driver
+ * to be registered.
+ */
+void rte_eal_driver_register(struct rte_driver *driver);
+
+/**
+ * Unregister a device driver.
+ *
+ * @param driver
+ * A pointer to a rte_dev structure describing the driver
+ * to be unregistered.
+ */
+void rte_eal_driver_unregister(struct rte_driver *driver);
+
+/**
+ * Initalize all the registered drivers in this process
+ */
+int rte_eal_dev_init(void);
+
+#define PMD_REGISTER_DRIVER(d)\
+void devinitfn_ ##d(void);\
+void __attribute__((constructor, used)) devinitfn_ ##d(void)\
+{\
+ rte_eal_driver_register(&d);\
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_VDEV_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_devargs.h b/src/dpdk_lib18/librte_eal/common/include/rte_devargs.h
new file mode 100755
index 00000000..9f9c98f1
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/rte_devargs.h
@@ -0,0 +1,149 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2014 6WIND S.A.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A nor the names of its contributors
+ * may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_DEVARGS_H_
+#define _RTE_DEVARGS_H_
+
+/**
+ * @file
+ *
+ * RTE devargs: list of devices and their user arguments
+ *
+ * This file stores a list of devices and their arguments given by
+ * the user when a DPDK application is started. These devices can be PCI
+ * devices or virtual devices. These devices are stored at startup in a
+ * list of rte_devargs structures.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+#include <sys/queue.h>
+#include <rte_pci.h>
+
+/**
+ * Type of generic device
+ */
+enum rte_devtype {
+ RTE_DEVTYPE_WHITELISTED_PCI,
+ RTE_DEVTYPE_BLACKLISTED_PCI,
+ RTE_DEVTYPE_VIRTUAL,
+};
+
+/**
+ * Structure that stores a device given by the user with its arguments
+ *
+ * A user device is a physical or a virtual device given by the user to
+ * the DPDK application at startup through command line arguments.
+ *
+ * The structure stores the configuration of the device, its PCI
+ * identifier if it's a PCI device or the driver name if it's a virtual
+ * device.
+ */
+struct rte_devargs {
+ /** Next in list. */
+ TAILQ_ENTRY(rte_devargs) next;
+ /** Type of device. */
+ enum rte_devtype type;
+ union {
+ /** Used if type is RTE_DEVTYPE_*_PCI. */
+ struct {
+ /** PCI location. */
+ struct rte_pci_addr addr;
+ } pci;
+ /** Used if type is RTE_DEVTYPE_VIRTUAL. */
+ struct {
+ /** Driver name. */
+ char drv_name[32];
+ } virtual;
+ };
+#define RTE_DEVARGS_LEN 256
+ char args[RTE_DEVARGS_LEN]; /**< Arguments string as given by user. */
+};
+
+/** user device double-linked queue type definition */
+TAILQ_HEAD(rte_devargs_list, rte_devargs);
+
+/** Global list of user devices */
+extern struct rte_devargs_list devargs_list;
+
+/**
+ * Add a device to the user device list
+ *
+ * For PCI devices, the format of arguments string is "PCI_ADDR" or
+ * "PCI_ADDR,key=val,key2=val2,...". Examples: "08:00.1", "0000:5:00.0",
+ * "04:00.0,arg=val".
+ *
+ * For virtual devices, the format of arguments string is "DRIVER_NAME*"
+ * or "DRIVER_NAME*,key=val,key2=val2,...". Examples: "eth_ring",
+ * "eth_ring0", "eth_pmdAnything,arg=0:arg2=1". The validity of the
+ * driver name is not checked by this function, it is done when probing
+ * the drivers.
+ *
+ * @param devtype
+ * The type of the device.
+ * @param devargs_list
+ * The arguments as given by the user.
+ *
+ * @return
+ * - 0 on success
+ * - A negative value on error
+ */
+int rte_eal_devargs_add(enum rte_devtype devtype, const char *devargs_str);
+
+/**
+ * Count the number of user devices of a specified type
+ *
+ * @param devtype
+ * The type of the devices to counted.
+ *
+ * @return
+ * The number of devices.
+ */
+unsigned int
+rte_eal_devargs_type_count(enum rte_devtype devtype);
+
+/**
+ * This function dumps the list of user device and their arguments.
+ *
+ * @param f
+ * A pointer to a file for output
+ */
+void rte_eal_devargs_dump(FILE *f);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DEVARGS_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_eal.h b/src/dpdk_lib18/librte_eal/common/include/rte_eal.h
new file mode 100755
index 00000000..f4ecd2e0
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/rte_eal.h
@@ -0,0 +1,269 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_EAL_H_
+#define _RTE_EAL_H_
+
+/**
+ * @file
+ *
+ * EAL Configuration API
+ */
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_MAGIC 19820526 /**< Magic number written by the main partition when ready. */
+
+/**
+ * The lcore role (used in RTE or not).
+ */
+enum rte_lcore_role_t {
+ ROLE_RTE,
+ ROLE_OFF,
+};
+
+/**
+ * The type of process in a linuxapp, multi-process setup
+ */
+enum rte_proc_type_t {
+ RTE_PROC_AUTO = -1, /* allow auto-detection of primary/secondary */
+ RTE_PROC_PRIMARY = 0, /* set to zero, so primary is the default */
+ RTE_PROC_SECONDARY,
+
+ RTE_PROC_INVALID
+};
+
+/**
+ * The global RTE configuration structure.
+ */
+struct rte_config {
+ uint32_t master_lcore; /**< Id of the master lcore */
+ uint32_t lcore_count; /**< Number of available logical cores. */
+ enum rte_lcore_role_t lcore_role[RTE_MAX_LCORE]; /**< State of cores. */
+
+ /** Primary or secondary configuration */
+ enum rte_proc_type_t process_type;
+
+ /**
+ * Pointer to memory configuration, which may be shared across multiple
+ * Intel DPDK instances
+ */
+ struct rte_mem_config *mem_config;
+} __attribute__((__packed__));
+
+/**
+ * Get the global configuration structure.
+ *
+ * @return
+ * A pointer to the global configuration structure.
+ */
+struct rte_config *rte_eal_get_configuration(void);
+
+/**
+ * Get a lcore's role.
+ *
+ * @param lcore_id
+ * The identifier of the lcore.
+ * @return
+ * The role of the lcore.
+ */
+enum rte_lcore_role_t rte_eal_lcore_role(unsigned lcore_id);
+
+
+/**
+ * Get the process type in a multi-process setup
+ *
+ * @return
+ * The process type
+ */
+enum rte_proc_type_t rte_eal_process_type(void);
+
+/**
+ * Request iopl privilege for all RPL.
+ *
+ * This function should be called by pmds which need access to ioports.
+
+ * @return
+ * - On success, returns 0.
+ * - On failure, returns -1.
+ */
+int rte_eal_iopl_init(void);
+
+/**
+ * Initialize the Environment Abstraction Layer (EAL).
+ *
+ * This function is to be executed on the MASTER lcore only, as soon
+ * as possible in the application's main() function.
+ *
+ * The function finishes the initialization process before main() is called.
+ * It puts the SLAVE lcores in the WAIT state.
+ *
+ * When the multi-partition feature is supported, depending on the
+ * configuration (if CONFIG_RTE_EAL_MAIN_PARTITION is disabled), this
+ * function waits to ensure that the magic number is set before
+ * returning. See also the rte_eal_get_configuration() function. Note:
+ * This behavior may change in the future.
+ *
+ * @param argc
+ * The argc argument that was given to the main() function.
+ * @param argv
+ * The argv argument that was given to the main() function.
+ * @return
+ * - On success, the number of parsed arguments, which is greater or
+ * equal to zero. After the call to rte_eal_init(),
+ * all arguments argv[x] with x < ret may be modified and should
+ * not be accessed by the application.
+ * - On failure, a negative error value.
+ */
+int rte_eal_init(int argc, char **argv);
+/**
+ * Usage function typedef used by the application usage function.
+ *
+ * Use this function typedef to define and call rte_set_applcation_usage_hook()
+ * routine.
+ */
+typedef void (*rte_usage_hook_t)(const char * prgname);
+
+/**
+ * Add application usage routine callout from the eal_usage() routine.
+ *
+ * This function allows the application to include its usage message
+ * in the EAL system usage message. The routine rte_set_application_usage_hook()
+ * needs to be called before the rte_eal_init() routine in the application.
+ *
+ * This routine is optional for the application and will behave as if the set
+ * routine was never called as the default behavior.
+ *
+ * @param func
+ * The func argument is a function pointer to the application usage routine.
+ * Called function is defined using rte_usage_hook_t typedef, which is of
+ * the form void rte_usage_func(const char * prgname).
+ *
+ * Calling this routine with a NULL value will reset the usage hook routine and
+ * return the current value, which could be NULL.
+ * @return
+ * - Returns the current value of the rte_application_usage pointer to allow
+ * the caller to daisy chain the usage routines if needing more then one.
+ */
+rte_usage_hook_t
+rte_set_application_usage_hook( rte_usage_hook_t usage_func );
+
+/**
+ * macro to get the lock of tailq in mem_config
+ */
+#define RTE_EAL_TAILQ_RWLOCK (&rte_eal_get_configuration()->mem_config->qlock)
+
+/**
+ * macro to get the multiple lock of mempool shared by mutiple-instance
+ */
+#define RTE_EAL_MEMPOOL_RWLOCK (&rte_eal_get_configuration()->mem_config->mplock)
+
+
+/**
+ * Utility macro to do a thread-safe tailq 'INSERT' of rte_mem_config
+ *
+ * @param idx
+ * a kind of tailq define in enum rte_tailq_t
+ *
+ * @param type
+ * type of list(tailq head)
+ *
+ * @param elm
+ * The element will be added into the list
+ *
+ */
+#define RTE_EAL_TAILQ_INSERT_TAIL(idx, type, elm) do { \
+ struct type *list; \
+ list = RTE_TAILQ_LOOKUP_BY_IDX(idx, type); \
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); \
+ TAILQ_INSERT_TAIL(list, elm, next); \
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); \
+} while (0)
+
+/**
+ * Utility macro to do a thread-safe tailq 'REMOVE' of rte_mem_config
+ *
+ * @param idx
+ * a kind of tailq define in enum rte_tailq_t
+ *
+ * @param type
+ * type of list(tailq head)
+ *
+ * @param elm
+ * The element will be remove from the list
+ *
+ */
+#define RTE_EAL_TAILQ_REMOVE(idx, type, elm) do { \
+ struct type *list; \
+ list = RTE_TAILQ_LOOKUP_BY_IDX(idx, type); \
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); \
+ TAILQ_REMOVE(list, elm, next); \
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); \
+} while (0) \
+
+
+/**
+ * macro to check TAILQ exist
+ *
+ * @param idx
+ * a kind of tailq define in enum rte_tailq_t
+ *
+ */
+#define RTE_EAL_TAILQ_EXIST_CHECK(idx) do { \
+ if (RTE_TAILQ_LOOKUP_BY_IDX(idx, rte_tailq_head) == NULL){ \
+ rte_errno = E_RTE_NO_TAILQ; \
+ return NULL; \
+ } \
+} while(0)
+
+/**
+ * Whether EAL is using huge pages (disabled by --no-huge option).
+ * The no-huge mode cannot be used with UIO poll-mode drivers like igb/ixgbe.
+ * It is useful for NIC drivers (e.g. librte_pmd_mlx4, librte_pmd_vmxnet3) or
+ * crypto drivers (e.g. librte_crypto_nitrox) provided by third-parties such
+ * as 6WIND.
+ *
+ * @return
+ * Nonzero if hugepages are enabled.
+ */
+int rte_eal_has_hugepages(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_EAL_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_eal_memconfig.h b/src/dpdk_lib18/librte_eal/common/include/rte_eal_memconfig.h
new file mode 100755
index 00000000..d6359e51
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/rte_eal_memconfig.h
@@ -0,0 +1,112 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_EAL_MEMCONFIG_H_
+#define _RTE_EAL_MEMCONFIG_H_
+
+#include <rte_tailq.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_malloc_heap.h>
+#include <rte_rwlock.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Index type of tailq_head
+ */
+enum rte_tailq_t {
+#define rte_tailq_elem(idx, name) idx,
+#define rte_tailq_end(idx) idx
+#include <rte_tailq_elem.h>
+};
+
+/**
+ * the structure for the memory configuration for the RTE.
+ * Used by the rte_config structure. It is separated out, as for multi-process
+ * support, the memory details should be shared across instances
+ */
+struct rte_mem_config {
+ volatile uint32_t magic; /**< Magic number - Sanity check. */
+
+ /* memory topology */
+ uint32_t nchannel; /**< Number of channels (0 if unknown). */
+ uint32_t nrank; /**< Number of ranks (0 if unknown). */
+
+ /**
+ * current lock nest order
+ * - qlock->mlock (ring/hash/lpm)
+ * - mplock->qlock->mlock (mempool)
+ * Notice:
+ * *ALWAYS* obtain qlock first if having to obtain both qlock and mlock
+ */
+ rte_rwlock_t mlock; /**< only used by memzone LIB for thread-safe. */
+ rte_rwlock_t qlock; /**< used for tailq operation for thread safe. */
+ rte_rwlock_t mplock; /**< only used by mempool LIB for thread-safe. */
+
+ uint32_t memzone_idx; /**< Index of memzone */
+
+ /* memory segments and zones */
+ struct rte_memseg memseg[RTE_MAX_MEMSEG]; /**< Physmem descriptors. */
+ struct rte_memzone memzone[RTE_MAX_MEMZONE]; /**< Memzone descriptors. */
+
+ /* Runtime Physmem descriptors. */
+ struct rte_memseg free_memseg[RTE_MAX_MEMSEG];
+
+ struct rte_tailq_head tailq_head[RTE_MAX_TAILQ]; /**< Tailqs for objects */
+
+ /* Heaps of Malloc per socket */
+ struct malloc_heap malloc_heaps[RTE_MAX_NUMA_NODES];
+
+ /* address of mem_config in primary process. used to map shared config into
+ * exact same address the primary process maps it.
+ */
+ uint64_t mem_cfg_addr;
+} __attribute__((__packed__));
+
+
+inline static void
+rte_eal_mcfg_wait_complete(struct rte_mem_config* mcfg)
+{
+ /* wait until shared mem_config finish initialising */
+ while(mcfg->magic != RTE_MAGIC)
+ rte_pause();
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*__RTE_EAL_MEMCONFIG_H_*/
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_errno.h b/src/dpdk_lib18/librte_eal/common/include/rte_errno.h
new file mode 100755
index 00000000..45910cdc
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/rte_errno.h
@@ -0,0 +1,96 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file
+ *
+ * API for error cause tracking
+ */
+
+#ifndef _RTE_ERRNO_H_
+#define _RTE_ERRNO_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_per_lcore.h>
+
+RTE_DECLARE_PER_LCORE(int, _rte_errno); /**< Per core error number. */
+
+/**
+ * Error number value, stored per-thread, which can be queried after
+ * calls to certain functions to determine why those functions failed.
+ *
+ * Uses standard values from errno.h wherever possible, with a small number
+ * of additional possible values for RTE-specific conditions.
+ */
+#define rte_errno RTE_PER_LCORE(_rte_errno)
+
+/**
+ * Function which returns a printable string describing a particular
+ * error code. For non-RTE-specific error codes, this function returns
+ * the value from the libc strerror function.
+ *
+ * @param errnum
+ * The error number to be looked up - generally the value of rte_errno
+ * @return
+ * A pointer to a thread-local string containing the text describing
+ * the error.
+ */
+const char *rte_strerror(int errnum);
+
+#ifndef __ELASTERROR
+/**
+ * Check if we have a defined value for the max system-defined errno values.
+ * if no max defined, start from 1000 to prevent overlap with standard values
+ */
+#define __ELASTERROR 1000
+#endif
+
+/** Error types */
+enum {
+ RTE_MIN_ERRNO = __ELASTERROR, /**< Start numbering above std errno vals */
+
+ E_RTE_SECONDARY, /**< Operation not allowed in secondary processes */
+ E_RTE_NO_CONFIG, /**< Missing rte_config */
+ E_RTE_NO_TAILQ, /**< Uninitialised TAILQ */
+
+ RTE_MAX_ERRNO /**< Max RTE error number */
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_ERRNO_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_hexdump.h b/src/dpdk_lib18/librte_eal/common/include/rte_hexdump.h
new file mode 100755
index 00000000..891c77bf
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/rte_hexdump.h
@@ -0,0 +1,89 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_HEXDUMP_H_
+#define _RTE_HEXDUMP_H_
+
+/**
+ * @file
+ * Simple API to dump out memory in a special hex format.
+ */
+
+#include <stdio.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+* Dump out memory in a special hex dump format.
+*
+* @param f
+* A pointer to a file for output
+* @param title
+* If not NULL this string is printed as a header to the output.
+* @param buf
+* This is the buffer address to print out.
+* @param len
+* The number of bytes to dump out
+* @return
+* None.
+*/
+
+extern void
+rte_hexdump(FILE *f, const char * title, const void * buf, unsigned int len);
+
+/**
+* Dump out memory in a hex format with colons between bytes.
+*
+* @param f
+* A pointer to a file for output
+* @param title
+* If not NULL this string is printed as a header to the output.
+* @param buf
+* This is the buffer address to print out.
+* @param len
+* The number of bytes to dump out
+* @return
+* None.
+*/
+
+void
+rte_memdump(FILE *f, const char * title, const void * buf, unsigned int len);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_HEXDUMP_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_interrupts.h b/src/dpdk_lib18/librte_eal/common/include/rte_interrupts.h
new file mode 100755
index 00000000..609c34bc
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/rte_interrupts.h
@@ -0,0 +1,121 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_INTERRUPTS_H_
+#define _RTE_INTERRUPTS_H_
+
+/**
+ * @file
+ *
+ * The RTE interrupt interface provides functions to register/unregister
+ * callbacks for a specific interrupt.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Interrupt handle */
+struct rte_intr_handle;
+
+/** Function to be registered for the specific interrupt */
+typedef void (*rte_intr_callback_fn)(struct rte_intr_handle *intr_handle,
+ void *cb_arg);
+
+#include <exec-env/rte_interrupts.h>
+
+/**
+ * It registers the callback for the specific interrupt. Multiple
+ * callbacks cal be registered at the same time.
+ * @param intr_handle
+ * Pointer to the interrupt handle.
+ * @param cb
+ * callback address.
+ * @param cb_arg
+ * address of parameter for callback.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+int rte_intr_callback_register(struct rte_intr_handle *intr_handle,
+ rte_intr_callback_fn cb, void *cb_arg);
+
+/**
+ * It unregisters the callback according to the specified interrupt handle.
+ *
+ * @param intr_handle
+ * pointer to the interrupt handle.
+ * @param cb
+ * callback address.
+ * @param cb_arg
+ * address of parameter for callback, (void *)-1 means to remove all
+ * registered which has the same callback address.
+ *
+ * @return
+ * - On success, return the number of callback entities removed.
+ * - On failure, a negative value.
+ */
+int rte_intr_callback_unregister(struct rte_intr_handle *intr_handle,
+ rte_intr_callback_fn cb, void *cb_arg);
+
+/**
+ * It enables the interrupt for the specified handle.
+ *
+ * @param intr_handle
+ * pointer to the interrupt handle.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+int rte_intr_enable(struct rte_intr_handle *intr_handle);
+
+/**
+ * It disables the interrupt for the specified handle.
+ *
+ * @param intr_handle
+ * pointer to the interrupt handle.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+int rte_intr_disable(struct rte_intr_handle *intr_handle);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_launch.h b/src/dpdk_lib18/librte_eal/common/include/rte_launch.h
new file mode 100755
index 00000000..dd1946da
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/rte_launch.h
@@ -0,0 +1,177 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_LAUNCH_H_
+#define _RTE_LAUNCH_H_
+
+/**
+ * @file
+ *
+ * Launch tasks on other lcores
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * State of an lcore.
+ */
+enum rte_lcore_state_t {
+ WAIT, /**< waiting a new command */
+ RUNNING, /**< executing command */
+ FINISHED, /**< command executed */
+};
+
+/**
+ * Definition of a remote launch function.
+ */
+typedef int (lcore_function_t)(void *);
+
+/**
+ * Launch a function on another lcore.
+ *
+ * To be executed on the MASTER lcore only.
+ *
+ * Sends a message to a slave lcore (identified by the slave_id) that
+ * is in the WAIT state (this is true after the first call to
+ * rte_eal_init()). This can be checked by first calling
+ * rte_eal_wait_lcore(slave_id).
+ *
+ * When the remote lcore receives the message, it switches to
+ * the RUNNING state, then calls the function f with argument arg. Once the
+ * execution is done, the remote lcore switches to a FINISHED state and
+ * the return value of f is stored in a local variable to be read using
+ * rte_eal_wait_lcore().
+ *
+ * The MASTER lcore returns as soon as the message is sent and knows
+ * nothing about the completion of f.
+ *
+ * Note: This function is not designed to offer optimum
+ * performance. It is just a practical way to launch a function on
+ * another lcore at initialization time.
+ *
+ * @param f
+ * The function to be called.
+ * @param arg
+ * The argument for the function.
+ * @param slave_id
+ * The identifier of the lcore on which the function should be executed.
+ * @return
+ * - 0: Success. Execution of function f started on the remote lcore.
+ * - (-EBUSY): The remote lcore is not in a WAIT state.
+ */
+int rte_eal_remote_launch(lcore_function_t *f, void *arg, unsigned slave_id);
+
+/**
+ * This enum indicates whether the master core must execute the handler
+ * launched on all logical cores.
+ */
+enum rte_rmt_call_master_t {
+ SKIP_MASTER = 0, /**< lcore handler not executed by master core. */
+ CALL_MASTER, /**< lcore handler executed by master core. */
+};
+
+/**
+ * Launch a function on all lcores.
+ *
+ * Check that each SLAVE lcore is in a WAIT state, then call
+ * rte_eal_remote_launch() for each lcore.
+ *
+ * @param f
+ * The function to be called.
+ * @param arg
+ * The argument for the function.
+ * @param call_master
+ * If call_master set to SKIP_MASTER, the MASTER lcore does not call
+ * the function. If call_master is set to CALL_MASTER, the function
+ * is also called on master before returning. In any case, the master
+ * lcore returns as soon as it finished its job and knows nothing
+ * about the completion of f on the other lcores.
+ * @return
+ * - 0: Success. Execution of function f started on all remote lcores.
+ * - (-EBUSY): At least one remote lcore is not in a WAIT state. In this
+ * case, no message is sent to any of the lcores.
+ */
+int rte_eal_mp_remote_launch(lcore_function_t *f, void *arg,
+ enum rte_rmt_call_master_t call_master);
+
+/**
+ * Get the state of the lcore identified by slave_id.
+ *
+ * To be executed on the MASTER lcore only.
+ *
+ * @param slave_id
+ * The identifier of the lcore.
+ * @return
+ * The state of the lcore.
+ */
+enum rte_lcore_state_t rte_eal_get_lcore_state(unsigned slave_id);
+
+/**
+ * Wait until an lcore finishes its job.
+ *
+ * To be executed on the MASTER lcore only.
+ *
+ * If the slave lcore identified by the slave_id is in a FINISHED state,
+ * switch to the WAIT state. If the lcore is in RUNNING state, wait until
+ * the lcore finishes its job and moves to the FINISHED state.
+ *
+ * @param slave_id
+ * The identifier of the lcore.
+ * @return
+ * - 0: If the lcore identified by the slave_id is in a WAIT state.
+ * - The value that was returned by the previous remote launch
+ * function call if the lcore identified by the slave_id was in a
+ * FINISHED or RUNNING state. In this case, it changes the state
+ * of the lcore to WAIT.
+ */
+int rte_eal_wait_lcore(unsigned slave_id);
+
+/**
+ * Wait until all lcores finish their jobs.
+ *
+ * To be executed on the MASTER lcore only. Issue an
+ * rte_eal_wait_lcore() for every lcore. The return values are
+ * ignored.
+ *
+ * After a call to rte_eal_mp_wait_lcore(), the caller can assume
+ * that all slave lcores are in a WAIT state.
+ */
+void rte_eal_mp_wait_lcore(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_LAUNCH_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_lcore.h b/src/dpdk_lib18/librte_eal/common/include/rte_lcore.h
new file mode 100755
index 00000000..49b2c034
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/rte_lcore.h
@@ -0,0 +1,229 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_LCORE_H_
+#define _RTE_LCORE_H_
+
+/**
+ * @file
+ *
+ * API for lcore and socket manipulation
+ *
+ */
+#include <rte_per_lcore.h>
+#include <rte_eal.h>
+#include <rte_launch.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define LCORE_ID_ANY -1 /**< Any lcore. */
+
+/**
+ * Structure storing internal configuration (per-lcore)
+ */
+struct lcore_config {
+ unsigned detected; /**< true if lcore was detected */
+ pthread_t thread_id; /**< pthread identifier */
+ int pipe_master2slave[2]; /**< communication pipe with master */
+ int pipe_slave2master[2]; /**< communication pipe with master */
+ lcore_function_t * volatile f; /**< function to call */
+ void * volatile arg; /**< argument of function */
+ volatile int ret; /**< return value of function */
+ volatile enum rte_lcore_state_t state; /**< lcore state */
+ unsigned socket_id; /**< physical socket id for this lcore */
+ unsigned core_id; /**< core number on socket for this lcore */
+ int core_index; /**< relative index, starting from 0 */
+};
+
+/**
+ * Internal configuration (per-lcore)
+ */
+extern struct lcore_config lcore_config[RTE_MAX_LCORE];
+
+RTE_DECLARE_PER_LCORE(unsigned, _lcore_id); /**< Per core "core id". */
+
+/**
+ * Return the ID of the execution unit we are running on.
+ * @return
+ * Logical core ID
+ */
+static inline unsigned
+rte_lcore_id(void)
+{
+ return RTE_PER_LCORE(_lcore_id);
+}
+
+/**
+ * Get the id of the master lcore
+ *
+ * @return
+ * the id of the master lcore
+ */
+static inline unsigned
+rte_get_master_lcore(void)
+{
+ return rte_eal_get_configuration()->master_lcore;
+}
+
+/**
+ * Return the number of execution units (lcores) on the system.
+ *
+ * @return
+ * the number of execution units (lcores) on the system.
+ */
+static inline unsigned
+rte_lcore_count(void)
+{
+ const struct rte_config *cfg = rte_eal_get_configuration();
+ return cfg->lcore_count;
+}
+
+/**
+ * Return the index of the lcore starting from zero.
+ * The order is physical or given by command line (-l option).
+ *
+ * @param lcore_id
+ * The targeted lcore, or -1 for the current one.
+ * @return
+ * The relative index, or -1 if not enabled.
+ */
+static inline int
+rte_lcore_index(int lcore_id)
+{
+ if (lcore_id >= RTE_MAX_LCORE)
+ return -1;
+ if (lcore_id < 0)
+ lcore_id = rte_lcore_id();
+ return lcore_config[lcore_id].core_index;
+}
+
+/**
+ * Return the ID of the physical socket of the logical core we are
+ * running on.
+ * @return
+ * the ID of current lcoreid's physical socket
+ */
+static inline unsigned
+rte_socket_id(void)
+{
+ return lcore_config[rte_lcore_id()].socket_id;
+}
+
+/**
+ * Get the ID of the physical socket of the specified lcore
+ *
+ * @param lcore_id
+ * the targeted lcore, which MUST be between 0 and RTE_MAX_LCORE-1.
+ * @return
+ * the ID of lcoreid's physical socket
+ */
+static inline unsigned
+rte_lcore_to_socket_id(unsigned lcore_id)
+{
+ return lcore_config[lcore_id].socket_id;
+}
+
+/**
+ * Test if an lcore is enabled.
+ *
+ * @param lcore_id
+ * The identifier of the lcore, which MUST be between 0 and
+ * RTE_MAX_LCORE-1.
+ * @return
+ * True if the given lcore is enabled; false otherwise.
+ */
+static inline int
+rte_lcore_is_enabled(unsigned lcore_id)
+{
+ struct rte_config *cfg = rte_eal_get_configuration();
+ if (lcore_id >= RTE_MAX_LCORE)
+ return 0;
+ return (cfg->lcore_role[lcore_id] != ROLE_OFF);
+}
+
+/**
+ * Get the next enabled lcore ID.
+ *
+ * @param i
+ * The current lcore (reference).
+ * @param skip_master
+ * If true, do not return the ID of the master lcore.
+ * @param wrap
+ * If true, go back to 0 when RTE_MAX_LCORE is reached; otherwise,
+ * return RTE_MAX_LCORE.
+ * @return
+ * The next lcore_id or RTE_MAX_LCORE if not found.
+ */
+static inline unsigned
+rte_get_next_lcore(unsigned i, int skip_master, int wrap)
+{
+ i++;
+ if (wrap)
+ i %= RTE_MAX_LCORE;
+
+ while (i < RTE_MAX_LCORE) {
+ if (!rte_lcore_is_enabled(i) ||
+ (skip_master && (i == rte_get_master_lcore()))) {
+ i++;
+ if (wrap)
+ i %= RTE_MAX_LCORE;
+ continue;
+ }
+ break;
+ }
+ return i;
+}
+/**
+ * Macro to browse all running lcores.
+ */
+#define RTE_LCORE_FOREACH(i) \
+ for (i = rte_get_next_lcore(-1, 0, 0); \
+ i<RTE_MAX_LCORE; \
+ i = rte_get_next_lcore(i, 0, 0))
+
+/**
+ * Macro to browse all running lcores except the master lcore.
+ */
+#define RTE_LCORE_FOREACH_SLAVE(i) \
+ for (i = rte_get_next_lcore(-1, 1, 0); \
+ i<RTE_MAX_LCORE; \
+ i = rte_get_next_lcore(i, 1, 0))
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* _RTE_LCORE_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_log.h b/src/dpdk_lib18/librte_eal/common/include/rte_log.h
new file mode 100755
index 00000000..db1ea08c
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/rte_log.h
@@ -0,0 +1,308 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_LOG_H_
+#define _RTE_LOG_H_
+
+/**
+ * @file
+ *
+ * RTE Logs API
+ *
+ * This file provides a log API to RTE applications.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdarg.h>
+
+/** The rte_log structure. */
+struct rte_logs {
+ uint32_t type; /**< Bitfield with enabled logs. */
+ uint32_t level; /**< Log level. */
+ FILE *file; /**< Pointer to current FILE* for logs. */
+};
+
+/** Global log informations */
+extern struct rte_logs rte_logs;
+
+/* SDK log type */
+#define RTE_LOGTYPE_EAL 0x00000001 /**< Log related to eal. */
+#define RTE_LOGTYPE_MALLOC 0x00000002 /**< Log related to malloc. */
+#define RTE_LOGTYPE_RING 0x00000004 /**< Log related to ring. */
+#define RTE_LOGTYPE_MEMPOOL 0x00000008 /**< Log related to mempool. */
+#define RTE_LOGTYPE_TIMER 0x00000010 /**< Log related to timers. */
+#define RTE_LOGTYPE_PMD 0x00000020 /**< Log related to poll mode driver. */
+#define RTE_LOGTYPE_HASH 0x00000040 /**< Log related to hash table. */
+#define RTE_LOGTYPE_LPM 0x00000080 /**< Log related to LPM. */
+#define RTE_LOGTYPE_KNI 0x00000100 /**< Log related to KNI. */
+#define RTE_LOGTYPE_ACL 0x00000200 /**< Log related to ACL. */
+#define RTE_LOGTYPE_POWER 0x00000400 /**< Log related to power. */
+#define RTE_LOGTYPE_METER 0x00000800 /**< Log related to QoS meter. */
+#define RTE_LOGTYPE_SCHED 0x00001000 /**< Log related to QoS port scheduler. */
+#define RTE_LOGTYPE_PORT 0x00002000 /**< Log related to port. */
+#define RTE_LOGTYPE_TABLE 0x00004000 /**< Log related to table. */
+#define RTE_LOGTYPE_PIPELINE 0x00008000 /**< Log related to pipeline. */
+
+/* these log types can be used in an application */
+#define RTE_LOGTYPE_USER1 0x01000000 /**< User-defined log type 1. */
+#define RTE_LOGTYPE_USER2 0x02000000 /**< User-defined log type 2. */
+#define RTE_LOGTYPE_USER3 0x04000000 /**< User-defined log type 3. */
+#define RTE_LOGTYPE_USER4 0x08000000 /**< User-defined log type 4. */
+#define RTE_LOGTYPE_USER5 0x10000000 /**< User-defined log type 5. */
+#define RTE_LOGTYPE_USER6 0x20000000 /**< User-defined log type 6. */
+#define RTE_LOGTYPE_USER7 0x40000000 /**< User-defined log type 7. */
+#define RTE_LOGTYPE_USER8 0x80000000 /**< User-defined log type 8. */
+
+/* Can't use 0, as it gives compiler warnings */
+#define RTE_LOG_EMERG 1U /**< System is unusable. */
+#define RTE_LOG_ALERT 2U /**< Action must be taken immediately. */
+#define RTE_LOG_CRIT 3U /**< Critical conditions. */
+#define RTE_LOG_ERR 4U /**< Error conditions. */
+#define RTE_LOG_WARNING 5U /**< Warning conditions. */
+#define RTE_LOG_NOTICE 6U /**< Normal but significant condition. */
+#define RTE_LOG_INFO 7U /**< Informational. */
+#define RTE_LOG_DEBUG 8U /**< Debug-level messages. */
+
+/** The default log stream. */
+extern FILE *eal_default_log_stream;
+
+/**
+ * Change the stream that will be used by the logging system.
+ *
+ * This can be done at any time. The f argument represents the stream
+ * to be used to send the logs. If f is NULL, the default output is
+ * used (stderr).
+ *
+ * @param f
+ * Pointer to the stream.
+ * @return
+ * - 0 on success.
+ * - Negative on error.
+ */
+int rte_openlog_stream(FILE *f);
+
+/**
+ * Set the global log level.
+ *
+ * After this call, all logs that are lower or equal than level and
+ * lower or equal than the RTE_LOG_LEVEL configuration option will be
+ * displayed.
+ *
+ * @param level
+ * Log level. A value between RTE_LOG_EMERG (1) and RTE_LOG_DEBUG (8).
+ */
+void rte_set_log_level(uint32_t level);
+
+/**
+ * Get the global log level.
+ */
+uint32_t rte_get_log_level(void);
+
+/**
+ * Enable or disable the log type.
+ *
+ * @param type
+ * Log type, for example, RTE_LOGTYPE_EAL.
+ * @param enable
+ * True for enable; false for disable.
+ */
+void rte_set_log_type(uint32_t type, int enable);
+
+/**
+ * Get the current loglevel for the message being processed.
+ *
+ * Before calling the user-defined stream for logging, the log
+ * subsystem sets a per-lcore variable containing the loglevel and the
+ * logtype of the message being processed. This information can be
+ * accessed by the user-defined log output function through this
+ * function.
+ *
+ * @return
+ * The loglevel of the message being processed.
+ */
+int rte_log_cur_msg_loglevel(void);
+
+/**
+ * Get the current logtype for the message being processed.
+ *
+ * Before calling the user-defined stream for logging, the log
+ * subsystem sets a per-lcore variable containing the loglevel and the
+ * logtype of the message being processed. This information can be
+ * accessed by the user-defined log output function through this
+ * function.
+ *
+ * @return
+ * The logtype of the message being processed.
+ */
+int rte_log_cur_msg_logtype(void);
+
+/**
+ * Enable or disable the history (enabled by default)
+ *
+ * @param enable
+ * true to enable, or 0 to disable history.
+ */
+void rte_log_set_history(int enable);
+
+/**
+ * Dump the log history to a file
+ *
+ * @param f
+ * A pointer to a file for output
+ */
+void rte_log_dump_history(FILE *f);
+
+/**
+ * Add a log message to the history.
+ *
+ * This function can be called from a user-defined log stream. It adds
+ * the given message in the history that can be dumped using
+ * rte_log_dump_history().
+ *
+ * @param buf
+ * A data buffer containing the message to be saved in the history.
+ * @param size
+ * The length of the data buffer.
+ * @return
+ * - 0: Success.
+ * - (-ENOBUFS) if there is no room to store the message.
+ */
+int rte_log_add_in_history(const char *buf, size_t size);
+
+/**
+ * Generates a log message.
+ *
+ * The message will be sent in the stream defined by the previous call
+ * to rte_openlog_stream().
+ *
+ * The level argument determines if the log should be displayed or
+ * not, depending on the global rte_logs variable.
+ *
+ * The preferred alternative is the RTE_LOG() function because debug logs may
+ * be removed at compilation time if optimization is enabled. Moreover,
+ * logs are automatically prefixed by type when using the macro.
+ *
+ * @param level
+ * Log level. A value between RTE_LOG_EMERG (1) and RTE_LOG_DEBUG (8).
+ * @param logtype
+ * The log type, for example, RTE_LOGTYPE_EAL.
+ * @param format
+ * The format string, as in printf(3), followed by the variable arguments
+ * required by the format.
+ * @return
+ * - 0: Success.
+ * - Negative on error.
+ */
+int rte_log(uint32_t level, uint32_t logtype, const char *format, ...)
+#ifdef __GNUC__
+#if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 2))
+ __attribute__((cold))
+#endif
+#endif
+ __attribute__((format(printf, 3, 4)));
+
+/**
+ * Generates a log message.
+ *
+ * The message will be sent in the stream defined by the previous call
+ * to rte_openlog_stream().
+ *
+ * The level argument determines if the log should be displayed or
+ * not, depending on the global rte_logs variable. A trailing
+ * newline may be added if needed.
+ *
+ * The preferred alternative is the RTE_LOG() because debug logs may be
+ * removed at compilation time.
+ *
+ * @param level
+ * Log level. A value between RTE_LOG_EMERG (1) and RTE_LOG_DEBUG (8).
+ * @param logtype
+ * The log type, for example, RTE_LOGTYPE_EAL.
+ * @param format
+ * The format string, as in printf(3), followed by the variable arguments
+ * required by the format.
+ * @param ap
+ * The va_list of the variable arguments required by the format.
+ * @return
+ * - 0: Success.
+ * - Negative on error.
+ */
+int rte_vlog(uint32_t level, uint32_t logtype, const char *format, va_list ap)
+ __attribute__((format(printf,3,0)));
+
+/**
+ * Generates a log message.
+ *
+ * The RTE_LOG() is equivalent to rte_log() with two differences:
+
+ * - RTE_LOG() can be used to remove debug logs at compilation time,
+ * depending on RTE_LOG_LEVEL configuration option, and compilation
+ * optimization level. If optimization is enabled, the tests
+ * involving constants only are pre-computed. If compilation is done
+ * with -O0, these tests will be done at run time.
+ * - The log level and log type names are smaller, for example:
+ * RTE_LOG(INFO, EAL, "this is a %s", "log");
+ *
+ * @param l
+ * Log level. A value between EMERG (1) and DEBUG (8). The short name is
+ * expanded by the macro, so it cannot be an integer value.
+ * @param t
+ * The log type, for example, EAL. The short name is expanded by the
+ * macro, so it cannot be an integer value.
+ * @param fmt
+ * The fmt string, as in printf(3), followed by the variable arguments
+ * required by the format.
+ * @param args
+ * The variable list of arguments according to the format string.
+ * @return
+ * - 0: Success.
+ * - Negative on error.
+ */
+#define RTE_LOG(l, t, ...) \
+ (void)(((RTE_LOG_ ## l <= RTE_LOG_LEVEL) && \
+ (RTE_LOG_ ## l <= rte_logs.level) && \
+ (RTE_LOGTYPE_ ## t & rte_logs.type)) ? \
+ rte_log(RTE_LOG_ ## l, \
+ RTE_LOGTYPE_ ## t, # t ": " __VA_ARGS__) : \
+ 0)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_LOG_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_malloc_heap.h b/src/dpdk_lib18/librte_eal/common/include/rte_malloc_heap.h
new file mode 100755
index 00000000..716216f2
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/rte_malloc_heap.h
@@ -0,0 +1,56 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_MALLOC_HEAP_H_
+#define _RTE_MALLOC_HEAP_H_
+
+#include <stddef.h>
+#include <sys/queue.h>
+#include <rte_spinlock.h>
+#include <rte_memory.h>
+
+/* Number of free lists per heap, grouped by size. */
+#define RTE_HEAP_NUM_FREELISTS 5
+
+/**
+ * Structure to hold malloc heap
+ */
+struct malloc_heap {
+ rte_spinlock_t lock;
+ LIST_HEAD(, malloc_elem) free_head[RTE_HEAP_NUM_FREELISTS];
+ unsigned mz_count;
+ unsigned alloc_count;
+ size_t total_size;
+} __rte_cache_aligned;
+
+#endif /* _RTE_MALLOC_HEAP_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_memory.h b/src/dpdk_lib18/librte_eal/common/include/rte_memory.h
new file mode 100755
index 00000000..7f8103f4
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/rte_memory.h
@@ -0,0 +1,218 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_MEMORY_H_
+#define _RTE_MEMORY_H_
+
+/**
+ * @file
+ *
+ * Memory-related RTE API.
+ */
+
+#include <stdint.h>
+#include <stddef.h>
+#include <stdio.h>
+
+#ifdef RTE_EXEC_ENV_LINUXAPP
+#include <exec-env/rte_dom0_common.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum rte_page_sizes {
+ RTE_PGSIZE_4K = 1ULL << 12,
+ RTE_PGSIZE_2M = 1ULL << 21,
+ RTE_PGSIZE_1G = 1ULL << 30,
+ RTE_PGSIZE_64K = 1ULL << 16,
+ RTE_PGSIZE_16M = 1ULL << 24,
+ RTE_PGSIZE_16G = 1ULL << 34
+};
+
+#define SOCKET_ID_ANY -1 /**< Any NUMA socket. */
+#ifndef RTE_CACHE_LINE_SIZE
+#define RTE_CACHE_LINE_SIZE 64 /**< Cache line size. */
+#endif
+#define RTE_CACHE_LINE_MASK (RTE_CACHE_LINE_SIZE-1) /**< Cache line mask. */
+
+#define RTE_CACHE_LINE_ROUNDUP(size) \
+ (RTE_CACHE_LINE_SIZE * ((size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE))
+/**< Return the first cache-aligned value greater or equal to size. */
+
+/**
+ * Force alignment to cache line.
+ */
+#define __rte_cache_aligned __attribute__((__aligned__(RTE_CACHE_LINE_SIZE)))
+
+typedef uint64_t phys_addr_t; /**< Physical address definition. */
+#define RTE_BAD_PHYS_ADDR ((phys_addr_t)-1)
+
+/**
+ * Physical memory segment descriptor.
+ */
+struct rte_memseg {
+ phys_addr_t phys_addr; /**< Start physical address. */
+ union {
+ void *addr; /**< Start virtual address. */
+ uint64_t addr_64; /**< Makes sure addr is always 64 bits */
+ };
+#ifdef RTE_LIBRTE_IVSHMEM
+ phys_addr_t ioremap_addr; /**< Real physical address inside the VM */
+#endif
+ size_t len; /**< Length of the segment. */
+ uint64_t hugepage_sz; /**< The pagesize of underlying memory */
+ int32_t socket_id; /**< NUMA socket ID. */
+ uint32_t nchannel; /**< Number of channels. */
+ uint32_t nrank; /**< Number of ranks. */
+#ifdef RTE_LIBRTE_XEN_DOM0
+ /**< store segment MFNs */
+ uint64_t mfn[DOM0_NUM_MEMBLOCK];
+#endif
+} __attribute__((__packed__));
+
+/**
+ * Lock page in physical memory and prevent from swapping.
+ *
+ * @param virt
+ * The virtual address.
+ * @return
+ * 0 on success, negative on error.
+ */
+int rte_mem_lock_page(const void *virt);
+
+/**
+ * Get physical address of any mapped virtual address in the current process.
+ * It is found by browsing the /proc/self/pagemap special file.
+ * The page must be locked.
+ *
+ * @param virt
+ * The virtual address.
+ * @return
+ * The physical address or RTE_BAD_PHYS_ADDR on error.
+ */
+phys_addr_t rte_mem_virt2phy(const void *virt);
+
+/**
+ * Get the layout of the available physical memory.
+ *
+ * It can be useful for an application to have the full physical
+ * memory layout to decide the size of a memory zone to reserve. This
+ * table is stored in rte_config (see rte_eal_get_configuration()).
+ *
+ * @return
+ * - On success, return a pointer to a read-only table of struct
+ * rte_physmem_desc elements, containing the layout of all
+ * addressable physical memory. The last element of the table
+ * contains a NULL address.
+ * - On error, return NULL. This should not happen since it is a fatal
+ * error that will probably cause the entire system to panic.
+ */
+const struct rte_memseg *rte_eal_get_physmem_layout(void);
+
+/**
+ * Dump the physical memory layout to the console.
+ *
+ * @param f
+ * A pointer to a file for output
+ */
+void rte_dump_physmem_layout(FILE *f);
+
+/**
+ * Get the total amount of available physical memory.
+ *
+ * @return
+ * The total amount of available physical memory in bytes.
+ */
+uint64_t rte_eal_get_physmem_size(void);
+
+/**
+ * Get the number of memory channels.
+ *
+ * @return
+ * The number of memory channels on the system. The value is 0 if unknown
+ * or not the same on all devices.
+ */
+unsigned rte_memory_get_nchannel(void);
+
+/**
+ * Get the number of memory ranks.
+ *
+ * @return
+ * The number of memory ranks on the system. The value is 0 if unknown or
+ * not the same on all devices.
+ */
+unsigned rte_memory_get_nrank(void);
+
+#ifdef RTE_LIBRTE_XEN_DOM0
+/**
+ * Return the physical address of elt, which is an element of the pool mp.
+ *
+ * @param memseg_id
+ * The mempool is from which memory segment.
+ * @param phy_addr
+ * physical address of elt.
+ *
+ * @return
+ * The physical address or error.
+ */
+phys_addr_t rte_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr);
+
+/**
+ * Memory init for supporting application running on Xen domain0.
+ *
+ * @param void
+ *
+ * @return
+ * 0: successfully
+ * negative: error
+ */
+int rte_xen_dom0_memory_init(void);
+
+/**
+ * Attach to memory setments of primary process on Xen domain0.
+ *
+ * @param void
+ *
+ * @return
+ * 0: successfully
+ * negative: error
+ */
+int rte_xen_dom0_memory_attach(void);
+#endif
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MEMORY_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_memzone.h b/src/dpdk_lib18/librte_eal/common/include/rte_memzone.h
new file mode 100755
index 00000000..81b6ad40
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/rte_memzone.h
@@ -0,0 +1,278 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_MEMZONE_H_
+#define _RTE_MEMZONE_H_
+
+/**
+ * @file
+ * RTE Memzone
+ *
+ * The goal of the memzone allocator is to reserve contiguous
+ * portions of physical memory. These zones are identified by a name.
+ *
+ * The memzone descriptors are shared by all partitions and are
+ * located in a known place of physical memory. This zone is accessed
+ * using rte_eal_get_configuration(). The lookup (by name) of a
+ * memory zone can be done in any partition and returns the same
+ * physical address.
+ *
+ * A reserved memory zone cannot be unreserved. The reservation shall
+ * be done at initialization time only.
+ */
+
+#include <stdio.h>
+#include <rte_memory.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_MEMZONE_2MB 0x00000001 /**< Use 2MB pages. */
+#define RTE_MEMZONE_1GB 0x00000002 /**< Use 1GB pages. */
+#define RTE_MEMZONE_16MB 0x00000100 /**< Use 16MB pages. */
+#define RTE_MEMZONE_16GB 0x00000200 /**< Use 16GB pages. */
+#define RTE_MEMZONE_SIZE_HINT_ONLY 0x00000004 /**< Use available page size */
+
+/**
+ * A structure describing a memzone, which is a contiguous portion of
+ * physical memory identified by a name.
+ */
+struct rte_memzone {
+
+#define RTE_MEMZONE_NAMESIZE 32 /**< Maximum length of memory zone name.*/
+ char name[RTE_MEMZONE_NAMESIZE]; /**< Name of the memory zone. */
+
+ phys_addr_t phys_addr; /**< Start physical address. */
+ union {
+ void *addr; /**< Start virtual address. */
+ uint64_t addr_64; /**< Makes sure addr is always 64-bits */
+ };
+#ifdef RTE_LIBRTE_IVSHMEM
+ phys_addr_t ioremap_addr; /**< Real physical address inside the VM */
+#endif
+ size_t len; /**< Length of the memzone. */
+
+ uint64_t hugepage_sz; /**< The page size of underlying memory */
+
+ int32_t socket_id; /**< NUMA socket ID. */
+
+ uint32_t flags; /**< Characteristics of this memzone. */
+ uint32_t memseg_id; /** <store the memzone is from which memseg. */
+} __attribute__((__packed__));
+
+/**
+ * Reserve a portion of physical memory.
+ *
+ * This function reserves some memory and returns a pointer to a
+ * correctly filled memzone descriptor. If the allocation cannot be
+ * done, return NULL. Note: A reserved zone cannot be freed.
+ *
+ * @param name
+ * The name of the memzone. If it already exists, the function will
+ * fail and return NULL.
+ * @param len
+ * The size of the memory to be reserved. If it
+ * is 0, the biggest contiguous zone will be reserved.
+ * @param socket_id
+ * The socket identifier in the case of
+ * NUMA. The value can be SOCKET_ID_ANY if there is no NUMA
+ * constraint for the reserved zone.
+ * @param flags
+ * The flags parameter is used to request memzones to be
+ * taken from 1GB or 2MB hugepages.
+ * - RTE_MEMZONE_2MB - Reserve from 2MB pages
+ * - RTE_MEMZONE_1GB - Reserve from 1GB pages
+ * - RTE_MEMZONE_16MB - Reserve from 16MB pages
+ * - RTE_MEMZONE_16GB - Reserve from 16GB pages
+ * - RTE_MEMZONE_SIZE_HINT_ONLY - Allow alternative page size to be used if
+ * the requested page size is unavailable.
+ * If this flag is not set, the function
+ * will return error on an unavailable size
+ * request.
+ * @return
+ * A pointer to a correctly-filled read-only memzone descriptor, or NULL
+ * on error.
+ * On error case, rte_errno will be set appropriately:
+ * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
+ * - E_RTE_SECONDARY - function was called from a secondary process instance
+ * - ENOSPC - the maximum number of memzones has already been allocated
+ * - EEXIST - a memzone with the same name already exists
+ * - ENOMEM - no appropriate memory area found in which to create memzone
+ * - EINVAL - invalid parameters
+ */
+const struct rte_memzone *rte_memzone_reserve(const char *name,
+ size_t len, int socket_id,
+ unsigned flags);
+
+/**
+ * Reserve a portion of physical memory with alignment on a specified
+ * boundary.
+ *
+ * This function reserves some memory with alignment on a specified
+ * boundary, and returns a pointer to a correctly filled memzone
+ * descriptor. If the allocation cannot be done or if the alignment
+ * is not a power of 2, returns NULL.
+ * Note: A reserved zone cannot be freed.
+ *
+ * @param name
+ * The name of the memzone. If it already exists, the function will
+ * fail and return NULL.
+ * @param len
+ * The size of the memory to be reserved. If it
+ * is 0, the biggest contiguous zone will be reserved.
+ * @param socket_id
+ * The socket identifier in the case of
+ * NUMA. The value can be SOCKET_ID_ANY if there is no NUMA
+ * constraint for the reserved zone.
+ * @param flags
+ * The flags parameter is used to request memzones to be
+ * taken from 1GB or 2MB hugepages.
+ * - RTE_MEMZONE_2MB - Reserve from 2MB pages
+ * - RTE_MEMZONE_1GB - Reserve from 1GB pages
+ * - RTE_MEMZONE_16MB - Reserve from 16MB pages
+ * - RTE_MEMZONE_16GB - Reserve from 16GB pages
+ * - RTE_MEMZONE_SIZE_HINT_ONLY - Allow alternative page size to be used if
+ * the requested page size is unavailable.
+ * If this flag is not set, the function
+ * will return error on an unavailable size
+ * request.
+ * @param align
+ * Alignment for resulting memzone. Must be a power of 2.
+ * @return
+ * A pointer to a correctly-filled read-only memzone descriptor, or NULL
+ * on error.
+ * On error case, rte_errno will be set appropriately:
+ * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
+ * - E_RTE_SECONDARY - function was called from a secondary process instance
+ * - ENOSPC - the maximum number of memzones has already been allocated
+ * - EEXIST - a memzone with the same name already exists
+ * - ENOMEM - no appropriate memory area found in which to create memzone
+ * - EINVAL - invalid parameters
+ */
+const struct rte_memzone *rte_memzone_reserve_aligned(const char *name,
+ size_t len, int socket_id,
+ unsigned flags, unsigned align);
+
+/**
+ * Reserve a portion of physical memory with specified alignment and
+ * boundary.
+ *
+ * This function reserves some memory with specified alignment and
+ * boundary, and returns a pointer to a correctly filled memzone
+ * descriptor. If the allocation cannot be done or if the alignment
+ * or boundary are not a power of 2, returns NULL.
+ * Memory buffer is reserved in a way, that it wouldn't cross specified
+ * boundary. That implies that requested length should be less or equal
+ * then boundary.
+ * Note: A reserved zone cannot be freed.
+ *
+ * @param name
+ * The name of the memzone. If it already exists, the function will
+ * fail and return NULL.
+ * @param len
+ * The size of the memory to be reserved. If it
+ * is 0, the biggest contiguous zone will be reserved.
+ * @param socket_id
+ * The socket identifier in the case of
+ * NUMA. The value can be SOCKET_ID_ANY if there is no NUMA
+ * constraint for the reserved zone.
+ * @param flags
+ * The flags parameter is used to request memzones to be
+ * taken from 1GB or 2MB hugepages.
+ * - RTE_MEMZONE_2MB - Reserve from 2MB pages
+ * - RTE_MEMZONE_1GB - Reserve from 1GB pages
+ * - RTE_MEMZONE_16MB - Reserve from 16MB pages
+ * - RTE_MEMZONE_16GB - Reserve from 16GB pages
+ * - RTE_MEMZONE_SIZE_HINT_ONLY - Allow alternative page size to be used if
+ * the requested page size is unavailable.
+ * If this flag is not set, the function
+ * will return error on an unavailable size
+ * request.
+ * @param align
+ * Alignment for resulting memzone. Must be a power of 2.
+ * @param bound
+ * Boundary for resulting memzone. Must be a power of 2 or zero.
+ * Zero value implies no boundary condition.
+ * @return
+ * A pointer to a correctly-filled read-only memzone descriptor, or NULL
+ * on error.
+ * On error case, rte_errno will be set appropriately:
+ * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
+ * - E_RTE_SECONDARY - function was called from a secondary process instance
+ * - ENOSPC - the maximum number of memzones has already been allocated
+ * - EEXIST - a memzone with the same name already exists
+ * - ENOMEM - no appropriate memory area found in which to create memzone
+ * - EINVAL - invalid parameters
+ */
+const struct rte_memzone *rte_memzone_reserve_bounded(const char *name,
+ size_t len, int socket_id,
+ unsigned flags, unsigned align, unsigned bound);
+
+/**
+ * Lookup for a memzone.
+ *
+ * Get a pointer to a descriptor of an already reserved memory
+ * zone identified by the name given as an argument.
+ *
+ * @param name
+ * The name of the memzone.
+ * @return
+ * A pointer to a read-only memzone descriptor.
+ */
+const struct rte_memzone *rte_memzone_lookup(const char *name);
+
+/**
+ * Dump all reserved memzones to the console.
+ *
+ * @param f
+ * A pointer to a file for output
+ */
+void rte_memzone_dump(FILE *);
+
+/**
+ * Walk list of all memzones
+ *
+ * @param func
+ * Iterator function
+ * @param arg
+ * Argument passed to iterator
+ */
+void rte_memzone_walk(void (*func)(const struct rte_memzone *, void *arg),
+ void *arg);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MEMZONE_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_pci.h b/src/dpdk_lib18/librte_eal/common/include/rte_pci.h
new file mode 100755
index 00000000..66ed7933
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/rte_pci.h
@@ -0,0 +1,305 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/* BSD LICENSE
+ *
+ * Copyright 2013-2014 6WIND S.A.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_PCI_H_
+#define _RTE_PCI_H_
+
+/**
+ * @file
+ *
+ * RTE PCI Interface
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdlib.h>
+#include <limits.h>
+#include <errno.h>
+#include <sys/queue.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <rte_interrupts.h>
+
+TAILQ_HEAD(pci_device_list, rte_pci_device); /**< PCI devices in D-linked Q. */
+TAILQ_HEAD(pci_driver_list, rte_pci_driver); /**< PCI drivers in D-linked Q. */
+
+extern struct pci_driver_list pci_driver_list; /**< Global list of PCI drivers. */
+extern struct pci_device_list pci_device_list; /**< Global list of PCI devices. */
+
+/** Pathname of PCI devices directory. */
+#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
+
+/** Formatting string for PCI device identifier: Ex: 0000:00:01.0 */
+#define PCI_PRI_FMT "%.4"PRIx16":%.2"PRIx8":%.2"PRIx8".%"PRIx8
+
+/** Short formatting string, without domain, for PCI device: Ex: 00:01.0 */
+#define PCI_SHORT_PRI_FMT "%.2"PRIx8":%.2"PRIx8".%"PRIx8
+
+/** Nb. of values in PCI device identifier format string. */
+#define PCI_FMT_NVAL 4
+
+/** Nb. of values in PCI resource format. */
+#define PCI_RESOURCE_FMT_NVAL 3
+
+/**
+ * A structure describing a PCI resource.
+ */
+struct rte_pci_resource {
+ uint64_t phys_addr; /**< Physical address, 0 if no resource. */
+ uint64_t len; /**< Length of the resource. */
+ void *addr; /**< Virtual address, NULL when not mapped. */
+};
+
+/** Maximum number of PCI resources. */
+#define PCI_MAX_RESOURCE 7
+
+/**
+ * A structure describing an ID for a PCI driver. Each driver provides a
+ * table of these IDs for each device that it supports.
+ */
+struct rte_pci_id {
+ uint16_t vendor_id; /**< Vendor ID or PCI_ANY_ID. */
+ uint16_t device_id; /**< Device ID or PCI_ANY_ID. */
+ uint16_t subsystem_vendor_id; /**< Subsystem vendor ID or PCI_ANY_ID. */
+ uint16_t subsystem_device_id; /**< Subsystem device ID or PCI_ANY_ID. */
+};
+
+/**
+ * A structure describing the location of a PCI device.
+ */
+struct rte_pci_addr {
+ uint16_t domain; /**< Device domain */
+ uint8_t bus; /**< Device bus */
+ uint8_t devid; /**< Device ID */
+ uint8_t function; /**< Device function. */
+};
+
+struct rte_devargs;
+
+/**
+ * A structure describing a PCI device.
+ */
+struct rte_pci_device {
+ TAILQ_ENTRY(rte_pci_device) next; /**< Next probed PCI device. */
+ struct rte_pci_addr addr; /**< PCI location. */
+ struct rte_pci_id id; /**< PCI ID. */
+ struct rte_pci_resource mem_resource[PCI_MAX_RESOURCE]; /**< PCI Memory Resource */
+ struct rte_intr_handle intr_handle; /**< Interrupt handle */
+ const struct rte_pci_driver *driver; /**< Associated driver */
+ uint16_t max_vfs; /**< sriov enable if not zero */
+ int numa_node; /**< NUMA node connection */
+ struct rte_devargs *devargs; /**< Device user arguments */
+};
+
+/** Any PCI device identifier (vendor, device, ...) */
+#define PCI_ANY_ID (0xffff)
+
+#ifdef __cplusplus
+/** C++ macro used to help building up tables of device IDs */
+#define RTE_PCI_DEVICE(vend, dev) \
+ (vend), \
+ (dev), \
+ PCI_ANY_ID, \
+ PCI_ANY_ID
+#else
+/** Macro used to help building up tables of device IDs */
+#define RTE_PCI_DEVICE(vend, dev) \
+ .vendor_id = (vend), \
+ .device_id = (dev), \
+ .subsystem_vendor_id = PCI_ANY_ID, \
+ .subsystem_device_id = PCI_ANY_ID
+#endif
+
+struct rte_pci_driver;
+
+/**
+ * Initialisation function for the driver called during PCI probing.
+ */
+typedef int (pci_devinit_t)(struct rte_pci_driver *, struct rte_pci_device *);
+
+/**
+ * A structure describing a PCI driver.
+ */
+struct rte_pci_driver {
+ TAILQ_ENTRY(rte_pci_driver) next; /**< Next in list. */
+ const char *name; /**< Driver name. */
+ pci_devinit_t *devinit; /**< Device init. function. */
+ struct rte_pci_id *id_table; /**< ID table, NULL terminated. */
+ uint32_t drv_flags; /**< Flags contolling handling of device. */
+};
+
+/** Device needs PCI BAR mapping (done with either IGB_UIO or VFIO) */
+#define RTE_PCI_DRV_NEED_MAPPING 0x0001
+/** Device driver must be registered several times until failure - deprecated */
+#pragma GCC poison RTE_PCI_DRV_MULTIPLE
+/** Device needs to be unbound even if no module is provided */
+#define RTE_PCI_DRV_FORCE_UNBIND 0x0004
+/** Device driver supports link state interrupt */
+#define RTE_PCI_DRV_INTR_LSC 0x0008
+
+/**< Internal use only - Macro used by pci addr parsing functions **/
+#define GET_PCIADDR_FIELD(in, fd, lim, dlm) \
+do { \
+ unsigned long val; \
+ char *end; \
+ errno = 0; \
+ val = strtoul((in), &end, 16); \
+ if (errno != 0 || end[0] != (dlm) || val > (lim)) \
+ return (-EINVAL); \
+ (fd) = (typeof (fd))val; \
+ (in) = end + 1; \
+} while(0)
+
+/**
+ * Utility function to produce a PCI Bus-Device-Function value
+ * given a string representation. Assumes that the BDF is provided without
+ * a domain prefix (i.e. domain returned is always 0)
+ *
+ * @param input
+ * The input string to be parsed. Should have the format XX:XX.X
+ * @param dev_addr
+ * The PCI Bus-Device-Function address to be returned. Domain will always be
+ * returned as 0
+ * @return
+ * 0 on success, negative on error.
+ */
+static inline int
+eal_parse_pci_BDF(const char *input, struct rte_pci_addr *dev_addr)
+{
+ dev_addr->domain = 0;
+ GET_PCIADDR_FIELD(input, dev_addr->bus, UINT8_MAX, ':');
+ GET_PCIADDR_FIELD(input, dev_addr->devid, UINT8_MAX, '.');
+ GET_PCIADDR_FIELD(input, dev_addr->function, UINT8_MAX, 0);
+ return (0);
+}
+
+/**
+ * Utility function to produce a PCI Bus-Device-Function value
+ * given a string representation. Assumes that the BDF is provided including
+ * a domain prefix.
+ *
+ * @param input
+ * The input string to be parsed. Should have the format XXXX:XX:XX.X
+ * @param dev_addr
+ * The PCI Bus-Device-Function address to be returned
+ * @return
+ * 0 on success, negative on error.
+ */
+static inline int
+eal_parse_pci_DomBDF(const char *input, struct rte_pci_addr *dev_addr)
+{
+ GET_PCIADDR_FIELD(input, dev_addr->domain, UINT16_MAX, ':');
+ GET_PCIADDR_FIELD(input, dev_addr->bus, UINT8_MAX, ':');
+ GET_PCIADDR_FIELD(input, dev_addr->devid, UINT8_MAX, '.');
+ GET_PCIADDR_FIELD(input, dev_addr->function, UINT8_MAX, 0);
+ return (0);
+}
+#undef GET_PCIADDR_FIELD
+
+/**
+ * Probe the PCI bus for registered drivers.
+ *
+ * Scan the content of the PCI bus, and call the probe() function for
+ * all registered drivers that have a matching entry in its id_table
+ * for discovered devices.
+ *
+ * @return
+ * - 0 on success.
+ * - Negative on error.
+ */
+int rte_eal_pci_probe(void);
+
+/**
+ * Dump the content of the PCI bus.
+ *
+ * @param f
+ * A pointer to a file for output
+ */
+void rte_eal_pci_dump(FILE *f);
+
+/**
+ * Register a PCI driver.
+ *
+ * @param driver
+ * A pointer to a rte_pci_driver structure describing the driver
+ * to be registered.
+ */
+void rte_eal_pci_register(struct rte_pci_driver *driver);
+
+/**
+ * Unregister a PCI driver.
+ *
+ * @param driver
+ * A pointer to a rte_pci_driver structure describing the driver
+ * to be unregistered.
+ */
+void rte_eal_pci_unregister(struct rte_pci_driver *driver);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_PCI_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_pci_dev_feature_defs.h b/src/dpdk_lib18/librte_eal/common/include/rte_pci_dev_feature_defs.h
new file mode 100755
index 00000000..6316b6dd
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/rte_pci_dev_feature_defs.h
@@ -0,0 +1,45 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_PCI_DEV_DEFS_H_
+#define _RTE_PCI_DEV_DEFS_H_
+
+/* interrupt mode */
+enum rte_intr_mode {
+ RTE_INTR_MODE_NONE = 0,
+ RTE_INTR_MODE_LEGACY,
+ RTE_INTR_MODE_MSI,
+ RTE_INTR_MODE_MSIX
+};
+
+#endif /* _RTE_PCI_DEV_DEFS_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_pci_dev_features.h b/src/dpdk_lib18/librte_eal/common/include/rte_pci_dev_features.h
new file mode 100755
index 00000000..01200de9
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/rte_pci_dev_features.h
@@ -0,0 +1,44 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_PCI_DEV_FEATURES_H
+#define _RTE_PCI_DEV_FEATURES_H
+
+#include <rte_pci_dev_feature_defs.h>
+
+#define RTE_INTR_MODE_NONE_NAME "none"
+#define RTE_INTR_MODE_LEGACY_NAME "legacy"
+#define RTE_INTR_MODE_MSI_NAME "msi"
+#define RTE_INTR_MODE_MSIX_NAME "msix"
+
+#endif
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_pci_dev_ids.h b/src/dpdk_lib18/librte_eal/common/include/rte_pci_dev_ids.h
new file mode 100755
index 00000000..c922de92
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/rte_pci_dev_ids.h
@@ -0,0 +1,540 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/**
+ * @file
+ *
+ * This file contains a list of the PCI device IDs recognised by DPDK, which
+ * can be used to fill out an array of structures describing the devices.
+ *
+ * Currently four families of devices are recognised: those supported by the
+ * IGB driver, by EM driver, those supported by the IXGBE driver, and by virtio
+ * driver which is a para virtualization driver running in guest virtual machine.
+ * The inclusion of these in an array built using this file depends on the
+ * definition of
+ * RTE_PCI_DEV_ID_DECL_EM
+ * RTE_PCI_DEV_ID_DECL_IGB
+ * RTE_PCI_DEV_ID_DECL_IGBVF
+ * RTE_PCI_DEV_ID_DECL_IXGBE
+ * RTE_PCI_DEV_ID_DECL_IXGBEVF
+ * RTE_PCI_DEV_ID_DECL_I40E
+ * RTE_PCI_DEV_ID_DECL_I40EVF
+ * RTE_PCI_DEV_ID_DECL_VIRTIO
+ * at the time when this file is included.
+ *
+ * In order to populate an array, the user of this file must define this macro:
+ * RTE_PCI_DEV_ID_DECL_IXGBE(vendorID, deviceID). For example:
+ *
+ * @code
+ * struct device {
+ * int vend;
+ * int dev;
+ * };
+ *
+ * struct device devices[] = {
+ * #define RTE_PCI_DEV_ID_DECL_IXGBE(vendorID, deviceID) {vend, dev},
+ * #include <rte_pci_dev_ids.h>
+ * };
+ * @endcode
+ *
+ * Note that this file can be included multiple times within the same file.
+ */
+
+#ifndef RTE_PCI_DEV_ID_DECL_EM
+#define RTE_PCI_DEV_ID_DECL_EM(vend, dev)
+#endif
+
+#ifndef RTE_PCI_DEV_ID_DECL_IGB
+#define RTE_PCI_DEV_ID_DECL_IGB(vend, dev)
+#endif
+
+#ifndef RTE_PCI_DEV_ID_DECL_IGBVF
+#define RTE_PCI_DEV_ID_DECL_IGBVF(vend, dev)
+#endif
+
+#ifndef RTE_PCI_DEV_ID_DECL_IXGBE
+#define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev)
+#endif
+
+#ifndef RTE_PCI_DEV_ID_DECL_IXGBEVF
+#define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev)
+#endif
+
+#ifndef RTE_PCI_DEV_ID_DECL_I40E
+#define RTE_PCI_DEV_ID_DECL_I40E(vend, dev)
+#endif
+
+#ifndef RTE_PCI_DEV_ID_DECL_I40EVF
+#define RTE_PCI_DEV_ID_DECL_I40EVF(vend, dev)
+#endif
+
+#ifndef RTE_PCI_DEV_ID_DECL_VIRTIO
+#define RTE_PCI_DEV_ID_DECL_VIRTIO(vend, dev)
+#endif
+
+#ifndef RTE_PCI_DEV_ID_DECL_VMXNET3
+#define RTE_PCI_DEV_ID_DECL_VMXNET3(vend, dev)
+#endif
+
+#ifndef PCI_VENDOR_ID_INTEL
+/** Vendor ID used by Intel devices */
+#define PCI_VENDOR_ID_INTEL 0x8086
+#endif
+
+#ifndef PCI_VENDOR_ID_QUMRANET
+/** Vendor ID used by virtio devices */
+#define PCI_VENDOR_ID_QUMRANET 0x1AF4
+#endif
+
+#ifndef PCI_VENDOR_ID_VMWARE
+/** Vendor ID used by VMware devices */
+#define PCI_VENDOR_ID_VMWARE 0x15AD
+#endif
+
+/******************** Physical EM devices from e1000_hw.h ********************/
+
+#define E1000_DEV_ID_82542 0x1000
+#define E1000_DEV_ID_82543GC_FIBER 0x1001
+#define E1000_DEV_ID_82543GC_COPPER 0x1004
+#define E1000_DEV_ID_82544EI_COPPER 0x1008
+#define E1000_DEV_ID_82544EI_FIBER 0x1009
+#define E1000_DEV_ID_82544GC_COPPER 0x100C
+#define E1000_DEV_ID_82544GC_LOM 0x100D
+#define E1000_DEV_ID_82540EM 0x100E
+#define E1000_DEV_ID_82540EM_LOM 0x1015
+#define E1000_DEV_ID_82540EP_LOM 0x1016
+#define E1000_DEV_ID_82540EP 0x1017
+#define E1000_DEV_ID_82540EP_LP 0x101E
+#define E1000_DEV_ID_82545EM_COPPER 0x100F
+#define E1000_DEV_ID_82545EM_FIBER 0x1011
+#define E1000_DEV_ID_82545GM_COPPER 0x1026
+#define E1000_DEV_ID_82545GM_FIBER 0x1027
+#define E1000_DEV_ID_82545GM_SERDES 0x1028
+#define E1000_DEV_ID_82546EB_COPPER 0x1010
+#define E1000_DEV_ID_82546EB_FIBER 0x1012
+#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D
+#define E1000_DEV_ID_82546GB_COPPER 0x1079
+#define E1000_DEV_ID_82546GB_FIBER 0x107A
+#define E1000_DEV_ID_82546GB_SERDES 0x107B
+#define E1000_DEV_ID_82546GB_PCIE 0x108A
+#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
+#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
+#define E1000_DEV_ID_82541EI 0x1013
+#define E1000_DEV_ID_82541EI_MOBILE 0x1018
+#define E1000_DEV_ID_82541ER_LOM 0x1014
+#define E1000_DEV_ID_82541ER 0x1078
+#define E1000_DEV_ID_82541GI 0x1076
+#define E1000_DEV_ID_82541GI_LF 0x107C
+#define E1000_DEV_ID_82541GI_MOBILE 0x1077
+#define E1000_DEV_ID_82547EI 0x1019
+#define E1000_DEV_ID_82547EI_MOBILE 0x101A
+#define E1000_DEV_ID_82547GI 0x1075
+#define E1000_DEV_ID_82571EB_COPPER 0x105E
+#define E1000_DEV_ID_82571EB_FIBER 0x105F
+#define E1000_DEV_ID_82571EB_SERDES 0x1060
+#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9
+#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA
+#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
+#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5
+#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5
+#define E1000_DEV_ID_82571EB_QUAD_COPPER_LP 0x10BC
+#define E1000_DEV_ID_82572EI_COPPER 0x107D
+#define E1000_DEV_ID_82572EI_FIBER 0x107E
+#define E1000_DEV_ID_82572EI_SERDES 0x107F
+#define E1000_DEV_ID_82572EI 0x10B9
+#define E1000_DEV_ID_82573E 0x108B
+#define E1000_DEV_ID_82573E_IAMT 0x108C
+#define E1000_DEV_ID_82573L 0x109A
+#define E1000_DEV_ID_82574L 0x10D3
+#define E1000_DEV_ID_82574LA 0x10F6
+#define E1000_DEV_ID_82583V 0x150C
+#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096
+#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098
+#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA
+#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB
+#define E1000_DEV_ID_ICH8_82567V_3 0x1501
+#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049
+#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A
+#define E1000_DEV_ID_ICH8_IGP_C 0x104B
+#define E1000_DEV_ID_ICH8_IFE 0x104C
+#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4
+#define E1000_DEV_ID_ICH8_IFE_G 0x10C5
+#define E1000_DEV_ID_ICH8_IGP_M 0x104D
+#define E1000_DEV_ID_ICH9_IGP_M 0x10BF
+#define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5
+#define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB
+#define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD
+#define E1000_DEV_ID_ICH9_BM 0x10E5
+#define E1000_DEV_ID_ICH9_IGP_C 0x294C
+#define E1000_DEV_ID_ICH9_IFE 0x10C0
+#define E1000_DEV_ID_ICH9_IFE_GT 0x10C3
+#define E1000_DEV_ID_ICH9_IFE_G 0x10C2
+#define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC
+#define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD
+#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE
+#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE
+#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF
+#define E1000_DEV_ID_ICH10_D_BM_V 0x1525
+
+#define E1000_DEV_ID_PCH_M_HV_LM 0x10EA
+#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB
+#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF
+#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0
+#define E1000_DEV_ID_PCH2_LV_LM 0x1502
+#define E1000_DEV_ID_PCH2_LV_V 0x1503
+#define E1000_DEV_ID_PCH_LPT_I217_LM 0x153A
+#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B
+#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A
+#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559
+
+/*
+ * Tested (supported) on VM emulated HW.
+ */
+
+RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82540EM)
+RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82545EM_COPPER)
+RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82545EM_FIBER)
+
+/*
+ * Tested (supported) on real HW.
+ */
+
+RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82546EB_COPPER)
+RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82546EB_FIBER)
+RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82546EB_QUAD_COPPER)
+RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82571EB_COPPER)
+RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82571EB_FIBER)
+RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82571EB_SERDES)
+RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL)
+RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD)
+RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER)
+RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER)
+RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER)
+RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP)
+RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82572EI_COPPER)
+RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82572EI_FIBER)
+RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82572EI_SERDES)
+RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82572EI)
+RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82573L)
+RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82574L)
+RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82574LA)
+
+/******************** Physical IGB devices from e1000_hw.h ********************/
+
+#define E1000_DEV_ID_82576 0x10C9
+#define E1000_DEV_ID_82576_FIBER 0x10E6
+#define E1000_DEV_ID_82576_SERDES 0x10E7
+#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
+#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526
+#define E1000_DEV_ID_82576_NS 0x150A
+#define E1000_DEV_ID_82576_NS_SERDES 0x1518
+#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D
+#define E1000_DEV_ID_82575EB_COPPER 0x10A7
+#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
+#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
+#define E1000_DEV_ID_82580_COPPER 0x150E
+#define E1000_DEV_ID_82580_FIBER 0x150F
+#define E1000_DEV_ID_82580_SERDES 0x1510
+#define E1000_DEV_ID_82580_SGMII 0x1511
+#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
+#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527
+#define E1000_DEV_ID_I350_COPPER 0x1521
+#define E1000_DEV_ID_I350_FIBER 0x1522
+#define E1000_DEV_ID_I350_SERDES 0x1523
+#define E1000_DEV_ID_I350_SGMII 0x1524
+#define E1000_DEV_ID_I350_DA4 0x1546
+#define E1000_DEV_ID_I210_COPPER 0x1533
+#define E1000_DEV_ID_I210_COPPER_OEM1 0x1534
+#define E1000_DEV_ID_I210_COPPER_IT 0x1535
+#define E1000_DEV_ID_I210_FIBER 0x1536
+#define E1000_DEV_ID_I210_SERDES 0x1537
+#define E1000_DEV_ID_I210_SGMII 0x1538
+#define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B
+#define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C
+#define E1000_DEV_ID_I211_COPPER 0x1539
+#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40
+#define E1000_DEV_ID_I354_SGMII 0x1F41
+#define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45
+#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438
+#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A
+#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C
+#define E1000_DEV_ID_DH89XXCC_SFP 0x0440
+
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82576)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82576_FIBER)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82576_SERDES)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82576_QUAD_COPPER)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82576_NS)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82576_NS_SERDES)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82576_SERDES_QUAD)
+
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82575EB_COPPER)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER)
+
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82580_COPPER)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82580_FIBER)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82580_SERDES)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82580_SGMII)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82580_COPPER_DUAL)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82580_QUAD_FIBER)
+
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I350_COPPER)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I350_FIBER)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I350_SERDES)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I350_SGMII)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I350_DA4)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I210_COPPER)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I210_COPPER_OEM1)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I210_COPPER_IT)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I210_FIBER)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I210_SERDES)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I210_SGMII)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I211_COPPER)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I354_SGMII)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_DH89XXCC_SGMII)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_DH89XXCC_SERDES)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE)
+RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_DH89XXCC_SFP)
+
+/****************** Physical IXGBE devices from ixgbe_type.h ******************/
+
+#define IXGBE_DEV_ID_82598 0x10B6
+#define IXGBE_DEV_ID_82598_BX 0x1508
+#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6
+#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
+#define IXGBE_DEV_ID_82598AT 0x10C8
+#define IXGBE_DEV_ID_82598AT2 0x150B
+#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB
+#define IXGBE_DEV_ID_82598EB_CX4 0x10DD
+#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC
+#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1
+#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1
+#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
+#define IXGBE_DEV_ID_82599_KX4 0x10F7
+#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
+#define IXGBE_DEV_ID_82599_KR 0x1517
+#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
+#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
+#define IXGBE_DEV_ID_82599_CX4 0x10F9
+#define IXGBE_DEV_ID_82599_SFP 0x10FB
+#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
+#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
+#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
+#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470
+#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152A
+#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
+#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
+#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
+#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A
+#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558
+#define IXGBE_DEV_ID_82599EN_SFP 0x1557
+#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
+#define IXGBE_DEV_ID_82599_T3_LOM 0x151C
+#define IXGBE_DEV_ID_82599_LS 0x154F
+#define IXGBE_DEV_ID_X540T 0x1528
+#define IXGBE_DEV_ID_X540T1 0x1560
+#define IXGBE_DEV_ID_X550EM_X 0x15A7
+#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC
+#define IXGBE_DEV_ID_X550T 0x1563
+#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA
+#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB
+
+#ifdef RTE_NIC_BYPASS
+#define IXGBE_DEV_ID_82599_BYPASS 0x155D
+#endif
+
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_BX)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, \
+ IXGBE_DEV_ID_82598AF_SINGLE_PORT)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AT)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AT2)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_CX4)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, \
+ IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_XF_LR)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KX4)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KR)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, \
+ IXGBE_DEV_ID_82599_COMBO_BACKPLANE)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, \
+ IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_CX4)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_SUBDEV_ID_82599_SFP)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_SUBDEV_ID_82599_RNDC)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_SUBDEV_ID_82599_560FLR)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_SUBDEV_ID_82599_ECNA_DP)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP_FCOE)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP_EM)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP_SF2)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599EN_SFP)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_XAUI_LOM)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_T3_LOM)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_LS)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X540T)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X540T1)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_X)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_X_SFP)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550T)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_X_KX4)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_X_KR)
+
+#ifdef RTE_NIC_BYPASS
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_BYPASS)
+#endif
+
+/*************** Physical I40E devices from i40e_type.h *****************/
+
+#define I40E_DEV_ID_SFP_XL710 0x1572
+#define I40E_DEV_ID_QEMU 0x1574
+#define I40E_DEV_ID_KX_A 0x157F
+#define I40E_DEV_ID_KX_B 0x1580
+#define I40E_DEV_ID_KX_C 0x1581
+#define I40E_DEV_ID_QSFP_A 0x1583
+#define I40E_DEV_ID_QSFP_B 0x1584
+#define I40E_DEV_ID_QSFP_C 0x1585
+#define I40E_DEV_ID_10G_BASE_T 0x1586
+
+RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_SFP_XL710)
+RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_QEMU)
+RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_KX_A)
+RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_KX_B)
+RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_KX_C)
+RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_QSFP_A)
+RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_QSFP_B)
+RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_QSFP_C)
+RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_10G_BASE_T)
+
+/****************** Virtual IGB devices from e1000_hw.h ******************/
+
+#define E1000_DEV_ID_82576_VF 0x10CA
+#define E1000_DEV_ID_82576_VF_HV 0x152D
+#define E1000_DEV_ID_I350_VF 0x1520
+#define E1000_DEV_ID_I350_VF_HV 0x152F
+
+RTE_PCI_DEV_ID_DECL_IGBVF(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82576_VF)
+RTE_PCI_DEV_ID_DECL_IGBVF(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82576_VF_HV)
+RTE_PCI_DEV_ID_DECL_IGBVF(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I350_VF)
+RTE_PCI_DEV_ID_DECL_IGBVF(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I350_VF_HV)
+
+/****************** Virtual IXGBE devices from ixgbe_type.h ******************/
+
+#define IXGBE_DEV_ID_82599_VF 0x10ED
+#define IXGBE_DEV_ID_82599_VF_HV 0x152E
+#define IXGBE_DEV_ID_X540_VF 0x1515
+#define IXGBE_DEV_ID_X540_VF_HV 0x1530
+#define IXGBE_DEV_ID_X550_VF_HV 0x1564
+#define IXGBE_DEV_ID_X550_VF 0x1565
+#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
+#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
+
+RTE_PCI_DEV_ID_DECL_IXGBEVF(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_VF)
+RTE_PCI_DEV_ID_DECL_IXGBEVF(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_VF_HV)
+RTE_PCI_DEV_ID_DECL_IXGBEVF(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X540_VF)
+RTE_PCI_DEV_ID_DECL_IXGBEVF(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X540_VF_HV)
+RTE_PCI_DEV_ID_DECL_IXGBEVF(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550_VF_HV)
+RTE_PCI_DEV_ID_DECL_IXGBEVF(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550_VF)
+RTE_PCI_DEV_ID_DECL_IXGBEVF(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_X_VF)
+RTE_PCI_DEV_ID_DECL_IXGBEVF(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV)
+
+/****************** Virtual I40E devices from i40e_type.h ********************/
+
+#define I40E_DEV_ID_VF 0x154C
+#define I40E_DEV_ID_VF_HV 0x1571
+
+RTE_PCI_DEV_ID_DECL_I40EVF(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_VF)
+RTE_PCI_DEV_ID_DECL_I40EVF(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_VF_HV)
+
+/****************** Virtio devices from virtio.h ******************/
+
+#define QUMRANET_DEV_ID_VIRTIO 0x1000
+
+RTE_PCI_DEV_ID_DECL_VIRTIO(PCI_VENDOR_ID_QUMRANET, QUMRANET_DEV_ID_VIRTIO)
+
+/****************** VMware VMXNET3 devices ******************/
+
+#define VMWARE_DEV_ID_VMXNET3 0x07B0
+
+RTE_PCI_DEV_ID_DECL_VMXNET3(PCI_VENDOR_ID_VMWARE, VMWARE_DEV_ID_VMXNET3)
+
+/*
+ * Undef all RTE_PCI_DEV_ID_DECL_* here.
+ */
+#undef RTE_PCI_DEV_ID_DECL_EM
+#undef RTE_PCI_DEV_ID_DECL_IGB
+#undef RTE_PCI_DEV_ID_DECL_IGBVF
+#undef RTE_PCI_DEV_ID_DECL_IXGBE
+#undef RTE_PCI_DEV_ID_DECL_IXGBEVF
+#undef RTE_PCI_DEV_ID_DECL_I40E
+#undef RTE_PCI_DEV_ID_DECL_I40EVF
+#undef RTE_PCI_DEV_ID_DECL_VIRTIO
+#undef RTE_PCI_DEV_ID_DECL_VMXNET3
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_per_lcore.h b/src/dpdk_lib18/librte_eal/common/include/rte_per_lcore.h
new file mode 100755
index 00000000..5434729a
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/rte_per_lcore.h
@@ -0,0 +1,79 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_PER_LCORE_H_
+#define _RTE_PER_LCORE_H_
+
+/**
+ * @file
+ *
+ * Per-lcore variables in RTE
+ *
+ * This file defines an API for instantiating per-lcore "global
+ * variables" that are environment-specific. Note that in all
+ * environments, a "shared variable" is the default when you use a
+ * global variable.
+ *
+ * Parts of this are execution environment specific.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <pthread.h>
+
+/**
+ * Macro to define a per lcore variable "var" of type "type", don't
+ * use keywords like "static" or "volatile" in type, just prefix the
+ * whole macro.
+ */
+#define RTE_DEFINE_PER_LCORE(type, name) \
+ __thread __typeof__(type) per_lcore_##name
+
+/**
+ * Macro to declare an extern per lcore variable "var" of type "type"
+ */
+#define RTE_DECLARE_PER_LCORE(type, name) \
+ extern __thread __typeof__(type) per_lcore_##name
+
+/**
+ * Read/write the per-lcore variable value
+ */
+#define RTE_PER_LCORE(name) (per_lcore_##name)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_PER_LCORE_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_random.h b/src/dpdk_lib18/librte_eal/common/include/rte_random.h
new file mode 100755
index 00000000..24ae8363
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/rte_random.h
@@ -0,0 +1,91 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_RANDOM_H_
+#define _RTE_RANDOM_H_
+
+/**
+ * @file
+ *
+ * Pseudo-random Generators in RTE
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <stdlib.h>
+
+/**
+ * Seed the pseudo-random generator.
+ *
+ * The generator is automatically seeded by the EAL init with a timer
+ * value. It may need to be re-seeded by the user with a real random
+ * value.
+ *
+ * @param seedval
+ * The value of the seed.
+ */
+static inline void
+rte_srand(uint64_t seedval)
+{
+ srand48((long unsigned int)seedval);
+}
+
+/**
+ * Get a pseudo-random value.
+ *
+ * This function generates pseudo-random numbers using the linear
+ * congruential algorithm and 48-bit integer arithmetic, called twice
+ * to generate a 64-bit value.
+ *
+ * @return
+ * A pseudo-random value between 0 and (1<<64)-1.
+ */
+static inline uint64_t
+rte_rand(void)
+{
+ uint64_t val;
+ val = lrand48();
+ val <<= 32;
+ val += lrand48();
+ return val;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* _RTE_PER_LCORE_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_rwlock.h b/src/dpdk_lib18/librte_eal/common/include/rte_rwlock.h
new file mode 100755
index 00000000..115731de
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/rte_rwlock.h
@@ -0,0 +1,158 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_RWLOCK_H_
+#define _RTE_RWLOCK_H_
+
+/**
+ * @file
+ *
+ * RTE Read-Write Locks
+ *
+ * This file defines an API for read-write locks. The lock is used to
+ * protect data that allows multiple readers in parallel, but only
+ * one writer. All readers are blocked until the writer is finished
+ * writing.
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_common.h>
+#include <rte_atomic.h>
+
+/**
+ * The rte_rwlock_t type.
+ *
+ * cnt is -1 when write lock is held, and > 0 when read locks are held.
+ */
+typedef struct {
+ volatile int32_t cnt; /**< -1 when W lock held, > 0 when R locks held. */
+} rte_rwlock_t;
+
+/**
+ * A static rwlock initializer.
+ */
+#define RTE_RWLOCK_INITIALIZER { 0 }
+
+/**
+ * Initialize the rwlock to an unlocked state.
+ *
+ * @param rwl
+ * A pointer to the rwlock structure.
+ */
+static inline void
+rte_rwlock_init(rte_rwlock_t *rwl)
+{
+ rwl->cnt = 0;
+}
+
+/**
+ * Take a read lock. Loop until the lock is held.
+ *
+ * @param rwl
+ * A pointer to a rwlock structure.
+ */
+static inline void
+rte_rwlock_read_lock(rte_rwlock_t *rwl)
+{
+ int32_t x;
+ int success = 0;
+
+ while (success == 0) {
+ x = rwl->cnt;
+ /* write lock is held */
+ if (x < 0) {
+ rte_pause();
+ continue;
+ }
+ success = rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt,
+ x, x + 1);
+ }
+}
+
+/**
+ * Release a read lock.
+ *
+ * @param rwl
+ * A pointer to the rwlock structure.
+ */
+static inline void
+rte_rwlock_read_unlock(rte_rwlock_t *rwl)
+{
+ rte_atomic32_dec((rte_atomic32_t *)(intptr_t)&rwl->cnt);
+}
+
+/**
+ * Take a write lock. Loop until the lock is held.
+ *
+ * @param rwl
+ * A pointer to a rwlock structure.
+ */
+static inline void
+rte_rwlock_write_lock(rte_rwlock_t *rwl)
+{
+ int32_t x;
+ int success = 0;
+
+ while (success == 0) {
+ x = rwl->cnt;
+ /* a lock is held */
+ if (x != 0) {
+ rte_pause();
+ continue;
+ }
+ success = rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt,
+ 0, -1);
+ }
+}
+
+/**
+ * Release a write lock.
+ *
+ * @param rwl
+ * A pointer to a rwlock structure.
+ */
+static inline void
+rte_rwlock_write_unlock(rte_rwlock_t *rwl)
+{
+ rte_atomic32_inc((rte_atomic32_t *)(intptr_t)&rwl->cnt);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_RWLOCK_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_string_fns.h b/src/dpdk_lib18/librte_eal/common/include/rte_string_fns.h
new file mode 100755
index 00000000..cfca2f8d
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/rte_string_fns.h
@@ -0,0 +1,81 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file
+ *
+ * String-related functions as replacement for libc equivalents
+ */
+
+#ifndef _RTE_STRING_FNS_H_
+#define _RTE_STRING_FNS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Takes string "string" parameter and splits it at character "delim"
+ * up to maxtokens-1 times - to give "maxtokens" resulting tokens. Like
+ * strtok or strsep functions, this modifies its input string, by replacing
+ * instances of "delim" with '\\0'. All resultant tokens are returned in the
+ * "tokens" array which must have enough entries to hold "maxtokens".
+ *
+ * @param string
+ * The input string to be split into tokens
+ *
+ * @param stringlen
+ * The max length of the input buffer
+ *
+ * @param tokens
+ * The array to hold the pointers to the tokens in the string
+ *
+ * @param maxtokens
+ * The number of elements in the tokens array. At most, maxtokens-1 splits
+ * of the string will be done.
+ *
+ * @param delim
+ * The character on which the split of the data will be done
+ *
+ * @return
+ * The number of tokens in the tokens array.
+ */
+int
+rte_strsplit(char *string, int stringlen,
+ char **tokens, int maxtokens, char delim);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_STRING_FNS_H */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_tailq.h b/src/dpdk_lib18/librte_eal/common/include/rte_tailq.h
new file mode 100755
index 00000000..b34e5ede
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/rte_tailq.h
@@ -0,0 +1,215 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_TAILQ_H_
+#define _RTE_TAILQ_H_
+
+/**
+ * @file
+ * Here defines rte_tailq APIs for only internal use
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/queue.h>
+#include <stdio.h>
+
+/** dummy structure type used by the rte_tailq APIs */
+struct rte_tailq_entry {
+ TAILQ_ENTRY(rte_tailq_entry) next; /**< Pointer entries for a tailq list */
+ void *data; /**< Pointer to the data referenced by this tailq entry */
+};
+/** dummy */
+TAILQ_HEAD(rte_tailq_entry_head, rte_tailq_entry);
+
+#define RTE_TAILQ_NAMESIZE 32
+
+/**
+ * The structure defining a tailq header entry for storing
+ * in the rte_config structure in shared memory. Each tailq
+ * is identified by name.
+ * Any library storing a set of objects e.g. rings, mempools, hash-tables,
+ * is recommended to use an entry here, so as to make it easy for
+ * a multi-process app to find already-created elements in shared memory.
+ */
+struct rte_tailq_head {
+ struct rte_tailq_entry_head tailq_head; /**< NOTE: must be first element */
+};
+
+/**
+ * Utility macro to make reserving a tailqueue for a particular struct easier.
+ *
+ * @param name
+ * The name to be given to the tailq - used by lookup to find it later
+ *
+ * @param struct_name
+ * The name of the list type we are using. (Generally this is the same as the
+ * first parameter passed to TAILQ_HEAD macro)
+ *
+ * @return
+ * The return value from rte_eal_tailq_reserve, typecast to the appropriate
+ * structure pointer type.
+ * NULL on error, since the tailq_head is the first
+ * element in the rte_tailq_head structure.
+ */
+#define RTE_TAILQ_RESERVE(name, struct_name) \
+ (struct struct_name *)(&rte_eal_tailq_reserve(name)->tailq_head)
+
+/**
+ * Utility macro to make reserving a tailqueue for a particular struct easier.
+ *
+ * @param idx
+ * The tailq idx defined in rte_tail_t to be given to the tail queue.
+ * - used by lookup to find it later
+ *
+ * @param struct_name
+ * The name of the list type we are using. (Generally this is the same as the
+ * first parameter passed to TAILQ_HEAD macro)
+ *
+ * @return
+ * The return value from rte_eal_tailq_reserve, typecast to the appropriate
+ * structure pointer type.
+ * NULL on error, since the tailq_head is the first
+ * element in the rte_tailq_head structure.
+ */
+#define RTE_TAILQ_RESERVE_BY_IDX(idx, struct_name) \
+ (struct struct_name *)(&rte_eal_tailq_reserve_by_idx(idx)->tailq_head)
+
+/**
+ * Utility macro to make looking up a tailqueue for a particular struct easier.
+ *
+ * @param name
+ * The name of tailq
+ *
+ * @param struct_name
+ * The name of the list type we are using. (Generally this is the same as the
+ * first parameter passed to TAILQ_HEAD macro)
+ *
+ * @return
+ * The return value from rte_eal_tailq_lookup, typecast to the appropriate
+ * structure pointer type.
+ * NULL on error, since the tailq_head is the first
+ * element in the rte_tailq_head structure.
+ */
+#define RTE_TAILQ_LOOKUP(name, struct_name) \
+ (struct struct_name *)(&rte_eal_tailq_lookup(name)->tailq_head)
+
+/**
+ * Utility macro to make looking up a tailqueue for a particular struct easier.
+ *
+ * @param idx
+ * The tailq idx defined in rte_tail_t to be given to the tail queue.
+ *
+ * @param struct_name
+ * The name of the list type we are using. (Generally this is the same as the
+ * first parameter passed to TAILQ_HEAD macro)
+ *
+ * @return
+ * The return value from rte_eal_tailq_lookup, typecast to the appropriate
+ * structure pointer type.
+ * NULL on error, since the tailq_head is the first
+ * element in the rte_tailq_head structure.
+ */
+#define RTE_TAILQ_LOOKUP_BY_IDX(idx, struct_name) \
+ (struct struct_name *)(&rte_eal_tailq_lookup_by_idx(idx)->tailq_head)
+
+/**
+ * Reserve a slot in the tailq list for a particular tailq header
+ * Note: this function, along with rte_tailq_lookup, is not multi-thread safe,
+ * and both these functions should only be called from a single thread at a time
+ *
+ * @param name
+ * The name to be given to the tail queue.
+ * @return
+ * A pointer to the newly reserved tailq entry
+ */
+struct rte_tailq_head *rte_eal_tailq_reserve(const char *name);
+
+/**
+ * Reserve a slot in the tailq list for a particular tailq header
+ * Note: this function, along with rte_tailq_lookup, is not multi-thread safe,
+ * and both these functions should only be called from a single thread at a time
+ *
+ * @param idx
+ * The tailq idx defined in rte_tail_t to be given to the tail queue.
+ * @return
+ * A pointer to the newly reserved tailq entry
+ */
+struct rte_tailq_head *rte_eal_tailq_reserve_by_idx(const unsigned idx);
+
+/**
+ * Dump tail queues to the console.
+ *
+ * @param f
+ * A pointer to a file for output
+ */
+void rte_dump_tailq(FILE *f);
+
+/**
+ * Lookup for a tail queue.
+ *
+ * Get a pointer to a tail queue header of an already reserved tail
+ * queue identified by the name given as an argument.
+ * Note: this function, along with rte_tailq_reserve, is not multi-thread safe,
+ * and both these functions should only be called from a single thread at a time
+ *
+ * @param name
+ * The name of the queue.
+ * @return
+ * A pointer to the tail queue head structure.
+ */
+struct rte_tailq_head *rte_eal_tailq_lookup(const char *name);
+
+/**
+ * Lookup for a tail queue.
+ *
+ * Get a pointer to a tail queue header of an already reserved tail
+ * queue identified by the name given as an argument.
+ * Note: this function, along with rte_tailq_reserve, is not multi-thread safe,
+ * and both these functions should only be called from a single thread at a time
+ *
+ * @param idx
+ * The tailq idx defined in rte_tail_t to be given to the tail queue.
+ * @return
+ * A pointer to the tail queue head structure.
+ */
+struct rte_tailq_head *rte_eal_tailq_lookup_by_idx(const unsigned idx);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_TAILQ_H_ */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_tailq_elem.h b/src/dpdk_lib18/librte_eal/common/include/rte_tailq_elem.h
new file mode 100755
index 00000000..f74fc7cb
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/rte_tailq_elem.h
@@ -0,0 +1,90 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file
+ *
+ * This file contains the type of the tailq elem recognised by DPDK, which
+ * can be used to fill out an array of structures describing the tailq.
+ *
+ * In order to populate an array, the user of this file must define this macro:
+ * rte_tailq_elem(idx, name). For example:
+ *
+ * @code
+ * enum rte_tailq_t {
+ * #define rte_tailq_elem(idx, name) idx,
+ * #define rte_tailq_end(idx) idx
+ * #include <rte_tailq_elem.h>
+ * };
+ *
+ * const char* rte_tailq_names[RTE_MAX_TAILQ] = {
+ * #define rte_tailq_elem(idx, name) name,
+ * #include <rte_tailq_elem.h>
+ * };
+ * @endcode
+ *
+ * Note that this file can be included multiple times within the same file.
+ */
+
+#ifndef rte_tailq_elem
+#define rte_tailq_elem(idx, name)
+#endif /* rte_tailq_elem */
+
+#ifndef rte_tailq_end
+#define rte_tailq_end(idx)
+#endif /* rte_tailq_end */
+
+rte_tailq_elem(RTE_TAILQ_PCI, "PCI_RESOURCE_LIST")
+
+rte_tailq_elem(RTE_TAILQ_MEMPOOL, "RTE_MEMPOOL")
+
+rte_tailq_elem(RTE_TAILQ_RING, "RTE_RING")
+
+rte_tailq_elem(RTE_TAILQ_HASH, "RTE_HASH")
+
+rte_tailq_elem(RTE_TAILQ_FBK_HASH, "RTE_FBK_HASH")
+
+rte_tailq_elem(RTE_TAILQ_LPM, "RTE_LPM")
+
+rte_tailq_elem(RTE_TAILQ_LPM6, "RTE_LPM6")
+
+rte_tailq_elem(RTE_TAILQ_PM, "RTE_PM")
+
+rte_tailq_elem(RTE_TAILQ_ACL, "RTE_ACL")
+
+rte_tailq_elem(RTE_TAILQ_DISTRIBUTOR, "RTE_DISTRIBUTOR")
+
+rte_tailq_end(RTE_TAILQ_NUM)
+
+#undef rte_tailq_elem
+#undef rte_tailq_end
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_version.h b/src/dpdk_lib18/librte_eal/common/include/rte_version.h
new file mode 100755
index 00000000..d2686ae3
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/rte_version.h
@@ -0,0 +1,129 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file
+ * Definitions of Intel(R) DPDK version numbers
+ */
+
+#ifndef _RTE_VERSION_H_
+#define _RTE_VERSION_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <rte_common.h>
+
+/**
+ * String that appears before the version number
+ */
+#define RTE_VER_PREFIX "RTE"
+
+/**
+ * Major version number i.e. the x in x.y.z
+ */
+#define RTE_VER_MAJOR 1
+
+/**
+ * Minor version number i.e. the y in x.y.z
+ */
+#define RTE_VER_MINOR 8
+
+/**
+ * Patch level number i.e. the z in x.y.z
+ */
+#define RTE_VER_PATCH_LEVEL 0
+
+/**
+ * Extra string to be appended to version number
+ */
+#define RTE_VER_SUFFIX ""
+
+/**
+ * Patch release number
+ * 0-15 = release candidates
+ * 16 = release
+ */
+#define RTE_VER_PATCH_RELEASE 16
+
+/**
+ * Macro to compute a version number usable for comparisons
+ */
+#define RTE_VERSION_NUM(a,b,c,d) ((a) << 24 | (b) << 16 | (c) << 8 | (d))
+
+/**
+ * All version numbers in one to compare with RTE_VERSION_NUM()
+ */
+#define RTE_VERSION RTE_VERSION_NUM( \
+ RTE_VER_MAJOR, \
+ RTE_VER_MINOR, \
+ RTE_VER_PATCH_LEVEL, \
+ RTE_VER_PATCH_RELEASE)
+
+/**
+ * Function returning version string
+ * @return
+ * string
+ */
+static inline const char *
+rte_version(void)
+{
+ static char version[32];
+ if (version[0] != 0)
+ return version;
+ if (strlen(RTE_VER_SUFFIX) == 0)
+ snprintf(version, sizeof(version), "%s %d.%d.%d",
+ RTE_VER_PREFIX,
+ RTE_VER_MAJOR,
+ RTE_VER_MINOR,
+ RTE_VER_PATCH_LEVEL);
+ else
+ snprintf(version, sizeof(version), "%s %d.%d.%d%s%d",
+ RTE_VER_PREFIX,
+ RTE_VER_MAJOR,
+ RTE_VER_MINOR,
+ RTE_VER_PATCH_LEVEL,
+ RTE_VER_SUFFIX,
+ RTE_VER_PATCH_RELEASE < 16 ?
+ RTE_VER_PATCH_RELEASE :
+ RTE_VER_PATCH_RELEASE - 16);
+ return version;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_VERSION_H */
diff --git a/src/dpdk_lib18/librte_eal/common/include/rte_warnings.h b/src/dpdk_lib18/librte_eal/common/include/rte_warnings.h
new file mode 100755
index 00000000..da80877f
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/common/include/rte_warnings.h
@@ -0,0 +1,84 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file
+ * Definitions of warnings for use of various insecure functions
+ */
+
+#ifndef _RTE_WARNINGS_H_
+#define _RTE_WARNINGS_H_
+
+#ifdef RTE_INSECURE_FUNCTION_WARNING
+
+/* we need to include all used standard header files so that they appear
+ * _before_ we poison the function names.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <errno.h>
+#ifdef RTE_LIBRTE_EAL_LINUXAPP
+#include <dirent.h>
+#endif
+
+/* the following function are deemed not fully secure for use e.g. they
+ * do not always null-terminate arguments */
+#pragma GCC poison sprintf strtok snprintf vsnprintf
+#pragma GCC poison strlen strcpy strcat
+#pragma GCC poison sscanf
+
+/* other unsafe functions may be implemented as macros so just undef them */
+#ifdef strsep
+#undef strsep
+#else
+#pragma GCC poison strsep
+#endif
+
+#ifdef strncpy
+#undef strncpy
+#else
+#pragma GCC poison strncpy
+#endif
+
+#ifdef strncat
+#undef strncat
+#else
+#pragma GCC poison strncat
+#endif
+
+#endif
+
+#endif /* RTE_WARNINGS_H */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/Makefile b/src/dpdk_lib18/librte_eal/linuxapp/Makefile
new file mode 100755
index 00000000..8fcfdf67
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/Makefile
@@ -0,0 +1,45 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifeq ($(CONFIG_RTE_EAL_IGB_UIO),y)
+DIRS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += igb_uio
+endif
+DIRS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal
+ifeq ($(CONFIG_RTE_LIBRTE_KNI),y)
+DIRS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += kni
+endif
+ifeq ($(CONFIG_RTE_LIBRTE_XEN_DOM0),y)
+DIRS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += xen_dom0
+endif
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/Makefile b/src/dpdk_lib18/librte_eal/linuxapp/eal/Makefile
new file mode 100755
index 00000000..72ecf3aa
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/eal/Makefile
@@ -0,0 +1,112 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+LIB = librte_eal.a
+
+VPATH += $(RTE_SDK)/lib/librte_eal/common
+
+CFLAGS += -I$(SRCDIR)/include
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
+CFLAGS += -I$(RTE_SDK)/lib/librte_ring
+CFLAGS += -I$(RTE_SDK)/lib/librte_mempool
+CFLAGS += -I$(RTE_SDK)/lib/librte_malloc
+CFLAGS += -I$(RTE_SDK)/lib/librte_ether
+CFLAGS += -I$(RTE_SDK)/lib/librte_ivshmem
+CFLAGS += -I$(RTE_SDK)/lib/librte_pmd_ring
+CFLAGS += -I$(RTE_SDK)/lib/librte_pmd_pcap
+CFLAGS += -I$(RTE_SDK)/lib/librte_pmd_af_packet
+CFLAGS += -I$(RTE_SDK)/lib/librte_pmd_xenvirt
+CFLAGS += $(WERROR_FLAGS) -O3
+
+# specific to linuxapp exec-env
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) := eal.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_hugepage_info.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_memory.c
+ifeq ($(CONFIG_RTE_LIBRTE_XEN_DOM0),y)
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_xen_memory.c
+endif
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_thread.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_log.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_pci.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_pci_uio.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_pci_vfio.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_pci_vfio_mp_sync.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_debug.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_lcore.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_timer.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_interrupts.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_alarm.c
+ifeq ($(CONFIG_RTE_LIBRTE_IVSHMEM),y)
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_ivshmem.c
+endif
+
+# from common dir
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_memzone.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_log.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_launch.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_pci.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_memory.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_tailqs.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_errno.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_cpuflags.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_string_fns.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_hexdump.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_devargs.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_dev.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_options.c
+
+CFLAGS_eal.o := -D_GNU_SOURCE
+CFLAGS_eal_thread.o := -D_GNU_SOURCE
+CFLAGS_eal_log.o := -D_GNU_SOURCE
+CFLAGS_eal_common_log.o := -D_GNU_SOURCE
+CFLAGS_eal_hugepage_info.o := -D_GNU_SOURCE
+CFLAGS_eal_pci.o := -D_GNU_SOURCE
+CFLAGS_eal_pci_vfio.o := -D_GNU_SOURCE
+CFLAGS_eal_common_whitelist.o := -D_GNU_SOURCE
+
+# workaround for a gcc bug with noreturn attribute
+# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_eal_thread.o += -Wno-return-type
+endif
+
+INC := rte_interrupts.h rte_kni_common.h rte_dom0_common.h
+
+SYMLINK-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP)-include/exec-env := \
+ $(addprefix include/exec-env/,$(INC))
+
+DEPDIRS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += lib/librte_eal/common
+
+include $(RTE_SDK)/mk/rte.lib.mk
+
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal.c b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal.c
new file mode 100755
index 00000000..2fb1acc2
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal.c
@@ -0,0 +1,861 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012-2014 6WIND S.A.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <syslog.h>
+#include <getopt.h>
+#include <sys/file.h>
+#include <fcntl.h>
+#include <dlfcn.h>
+#include <stddef.h>
+#include <errno.h>
+#include <limits.h>
+#include <errno.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#if defined(RTE_ARCH_X86_64) || defined(RTE_ARCH_I686)
+#include <sys/io.h>
+#endif
+
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_random.h>
+#include <rte_cycles.h>
+#include <rte_string_fns.h>
+#include <rte_cpuflags.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_devargs.h>
+#include <rte_common.h>
+#include <rte_version.h>
+#include <rte_atomic.h>
+#include <malloc_heap.h>
+#include <rte_eth_ring.h>
+
+#include "eal_private.h"
+#include "eal_thread.h"
+#include "eal_internal_cfg.h"
+#include "eal_filesystem.h"
+#include "eal_hugepages.h"
+#include "eal_options.h"
+
+#define MEMSIZE_IF_NO_HUGE_PAGE (64ULL * 1024ULL * 1024ULL)
+
+#define SOCKET_MEM_STRLEN (RTE_MAX_NUMA_NODES * 10)
+
+/* Allow the application to print its usage message too if set */
+static rte_usage_hook_t rte_application_usage_hook = NULL;
+
+TAILQ_HEAD(shared_driver_list, shared_driver);
+
+/* Definition for shared object drivers. */
+struct shared_driver {
+ TAILQ_ENTRY(shared_driver) next;
+
+ char name[PATH_MAX];
+ void* lib_handle;
+};
+
+/* List of external loadable drivers */
+static struct shared_driver_list solib_list =
+TAILQ_HEAD_INITIALIZER(solib_list);
+
+/* early configuration structure, when memory config is not mmapped */
+static struct rte_mem_config early_mem_config;
+
+/* define fd variable here, because file needs to be kept open for the
+ * duration of the program, as we hold a write lock on it in the primary proc */
+static int mem_cfg_fd = -1;
+
+static struct flock wr_lock = {
+ .l_type = F_WRLCK,
+ .l_whence = SEEK_SET,
+ .l_start = offsetof(struct rte_mem_config, memseg),
+ .l_len = sizeof(early_mem_config.memseg),
+};
+
+/* Address of global and public configuration */
+static struct rte_config rte_config = {
+ .mem_config = &early_mem_config,
+};
+
+/* internal configuration (per-core) */
+struct lcore_config lcore_config[RTE_MAX_LCORE];
+
+/* internal configuration */
+struct internal_config internal_config;
+
+/* used by rte_rdtsc() */
+int rte_cycles_vmware_tsc_map;
+
+/* Return a pointer to the configuration structure */
+struct rte_config *
+rte_eal_get_configuration(void)
+{
+ return &rte_config;
+}
+
+/* parse a sysfs (or other) file containing one integer value */
+int
+eal_parse_sysfs_value(const char *filename, unsigned long *val)
+{
+ FILE *f;
+ char buf[BUFSIZ];
+ char *end = NULL;
+
+ if ((f = fopen(filename, "r")) == NULL) {
+ RTE_LOG(ERR, EAL, "%s(): cannot open sysfs value %s\n",
+ __func__, filename);
+ return -1;
+ }
+
+ if (fgets(buf, sizeof(buf), f) == NULL) {
+ RTE_LOG(ERR, EAL, "%s(): cannot read sysfs value %s\n",
+ __func__, filename);
+ fclose(f);
+ return -1;
+ }
+ *val = strtoul(buf, &end, 0);
+ if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) {
+ RTE_LOG(ERR, EAL, "%s(): cannot parse sysfs value %s\n",
+ __func__, filename);
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+ return 0;
+}
+
+
+/* create memory configuration in shared/mmap memory. Take out
+ * a write lock on the memsegs, so we can auto-detect primary/secondary.
+ * This means we never close the file while running (auto-close on exit).
+ * We also don't lock the whole file, so that in future we can use read-locks
+ * on other parts, e.g. memzones, to detect if there are running secondary
+ * processes. */
+static void
+rte_eal_config_create(void)
+{
+ void *rte_mem_cfg_addr;
+ int retval;
+
+ const char *pathname = eal_runtime_config_path();
+
+ if (internal_config.no_shconf)
+ return;
+
+ /* map the config before hugepage address so that we don't waste a page */
+ if (internal_config.base_virtaddr != 0)
+ rte_mem_cfg_addr = (void *)
+ RTE_ALIGN_FLOOR(internal_config.base_virtaddr -
+ sizeof(struct rte_mem_config), sysconf(_SC_PAGE_SIZE));
+ else
+ rte_mem_cfg_addr = NULL;
+
+ if (mem_cfg_fd < 0){
+ mem_cfg_fd = open(pathname, O_RDWR | O_CREAT, 0660);
+ if (mem_cfg_fd < 0)
+ rte_panic("Cannot open '%s' for rte_mem_config\n", pathname);
+ }
+
+ retval = ftruncate(mem_cfg_fd, sizeof(*rte_config.mem_config));
+ if (retval < 0){
+ close(mem_cfg_fd);
+ rte_panic("Cannot resize '%s' for rte_mem_config\n", pathname);
+ }
+
+ retval = fcntl(mem_cfg_fd, F_SETLK, &wr_lock);
+ if (retval < 0){
+ close(mem_cfg_fd);
+ rte_exit(EXIT_FAILURE, "Cannot create lock on '%s'. Is another primary "
+ "process running?\n", pathname);
+ }
+
+ rte_mem_cfg_addr = mmap(rte_mem_cfg_addr, sizeof(*rte_config.mem_config),
+ PROT_READ | PROT_WRITE, MAP_SHARED, mem_cfg_fd, 0);
+
+ if (rte_mem_cfg_addr == MAP_FAILED){
+ rte_panic("Cannot mmap memory for rte_config\n");
+ }
+ memcpy(rte_mem_cfg_addr, &early_mem_config, sizeof(early_mem_config));
+ rte_config.mem_config = (struct rte_mem_config *) rte_mem_cfg_addr;
+
+ /* store address of the config in the config itself so that secondary
+ * processes could later map the config into this exact location */
+ rte_config.mem_config->mem_cfg_addr = (uintptr_t) rte_mem_cfg_addr;
+
+}
+
+/* attach to an existing shared memory config */
+static void
+rte_eal_config_attach(void)
+{
+ struct rte_mem_config *mem_config;
+
+ const char *pathname = eal_runtime_config_path();
+
+ if (internal_config.no_shconf)
+ return;
+
+ if (mem_cfg_fd < 0){
+ mem_cfg_fd = open(pathname, O_RDWR);
+ if (mem_cfg_fd < 0)
+ rte_panic("Cannot open '%s' for rte_mem_config\n", pathname);
+ }
+
+ /* map it as read-only first */
+ mem_config = (struct rte_mem_config *) mmap(NULL, sizeof(*mem_config),
+ PROT_READ, MAP_SHARED, mem_cfg_fd, 0);
+ if (mem_config == MAP_FAILED)
+ rte_panic("Cannot mmap memory for rte_config\n");
+
+ rte_config.mem_config = mem_config;
+}
+
+/* reattach the shared config at exact memory location primary process has it */
+static void
+rte_eal_config_reattach(void)
+{
+ struct rte_mem_config *mem_config;
+ void *rte_mem_cfg_addr;
+
+ if (internal_config.no_shconf)
+ return;
+
+ /* save the address primary process has mapped shared config to */
+ rte_mem_cfg_addr = (void *) (uintptr_t) rte_config.mem_config->mem_cfg_addr;
+
+ /* unmap original config */
+ munmap(rte_config.mem_config, sizeof(struct rte_mem_config));
+
+ /* remap the config at proper address */
+ mem_config = (struct rte_mem_config *) mmap(rte_mem_cfg_addr,
+ sizeof(*mem_config), PROT_READ | PROT_WRITE, MAP_SHARED,
+ mem_cfg_fd, 0);
+ close(mem_cfg_fd);
+ if (mem_config == MAP_FAILED || mem_config != rte_mem_cfg_addr)
+ rte_panic("Cannot mmap memory for rte_config\n");
+
+ rte_config.mem_config = mem_config;
+}
+
+/* Detect if we are a primary or a secondary process */
+enum rte_proc_type_t
+eal_proc_type_detect(void)
+{
+ enum rte_proc_type_t ptype = RTE_PROC_PRIMARY;
+ const char *pathname = eal_runtime_config_path();
+
+ /* if we can open the file but not get a write-lock we are a secondary
+ * process. NOTE: if we get a file handle back, we keep that open
+ * and don't close it to prevent a race condition between multiple opens */
+ if (((mem_cfg_fd = open(pathname, O_RDWR)) >= 0) &&
+ (fcntl(mem_cfg_fd, F_SETLK, &wr_lock) < 0))
+ ptype = RTE_PROC_SECONDARY;
+
+ RTE_LOG(INFO, EAL, "Auto-detected process type: %s\n",
+ ptype == RTE_PROC_PRIMARY ? "PRIMARY" : "SECONDARY");
+
+ return ptype;
+}
+
+/* Sets up rte_config structure with the pointer to shared memory config.*/
+static void
+rte_config_init(void)
+{
+ rte_config.process_type = internal_config.process_type;
+
+ switch (rte_config.process_type){
+ case RTE_PROC_PRIMARY:
+ rte_eal_config_create();
+ break;
+ case RTE_PROC_SECONDARY:
+ rte_eal_config_attach();
+ rte_eal_mcfg_wait_complete(rte_config.mem_config);
+ rte_eal_config_reattach();
+ break;
+ case RTE_PROC_AUTO:
+ case RTE_PROC_INVALID:
+ rte_panic("Invalid process type\n");
+ }
+}
+
+/* Unlocks hugepage directories that were locked by eal_hugepage_info_init */
+static void
+eal_hugedirs_unlock(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_HUGEPAGE_SIZES; i++)
+ {
+ /* skip uninitialized */
+ if (internal_config.hugepage_info[i].lock_descriptor < 0)
+ continue;
+ /* unlock hugepage file */
+ flock(internal_config.hugepage_info[i].lock_descriptor, LOCK_UN);
+ close(internal_config.hugepage_info[i].lock_descriptor);
+ /* reset the field */
+ internal_config.hugepage_info[i].lock_descriptor = -1;
+ }
+}
+
+/* display usage */
+static void
+eal_usage(const char *prgname)
+{
+ printf("\nUsage: %s ", prgname);
+ eal_common_usage();
+ printf("EAL Linux options:\n"
+ " -d LIB.so : add driver (can be used multiple times)\n"
+ " --"OPT_XEN_DOM0" : support application running on Xen Domain0 "
+ "without hugetlbfs\n"
+ " --"OPT_SOCKET_MEM" : memory to allocate on specific\n"
+ " sockets (use comma separated values)\n"
+ " --"OPT_HUGE_DIR" : directory where hugetlbfs is mounted\n"
+ " --"OPT_FILE_PREFIX": prefix for hugepage filenames\n"
+ " --"OPT_BASE_VIRTADDR": specify base virtual address\n"
+ " --"OPT_VFIO_INTR": specify desired interrupt mode for VFIO "
+ "(legacy|msi|msix)\n"
+ " --"OPT_CREATE_UIO_DEV": create /dev/uioX (usually done by hotplug)\n"
+ "\n");
+ /* Allow the application to print its usage message too if hook is set */
+ if ( rte_application_usage_hook ) {
+ printf("===== Application Usage =====\n\n");
+ rte_application_usage_hook(prgname);
+ }
+}
+
+/* Set a per-application usage message */
+rte_usage_hook_t
+rte_set_application_usage_hook( rte_usage_hook_t usage_func )
+{
+ rte_usage_hook_t old_func;
+
+ /* Will be NULL on the first call to denote the last usage routine. */
+ old_func = rte_application_usage_hook;
+ rte_application_usage_hook = usage_func;
+
+ return old_func;
+}
+
+static int
+eal_parse_socket_mem(char *socket_mem)
+{
+ char * arg[RTE_MAX_NUMA_NODES];
+ char *end;
+ int arg_num, i, len;
+ uint64_t total_mem = 0;
+
+ len = strnlen(socket_mem, SOCKET_MEM_STRLEN);
+ if (len == SOCKET_MEM_STRLEN) {
+ RTE_LOG(ERR, EAL, "--socket-mem is too long\n");
+ return -1;
+ }
+
+ /* all other error cases will be caught later */
+ if (!isdigit(socket_mem[len-1]))
+ return -1;
+
+ /* split the optarg into separate socket values */
+ arg_num = rte_strsplit(socket_mem, len,
+ arg, RTE_MAX_NUMA_NODES, ',');
+
+ /* if split failed, or 0 arguments */
+ if (arg_num <= 0)
+ return -1;
+
+ internal_config.force_sockets = 1;
+
+ /* parse each defined socket option */
+ errno = 0;
+ for (i = 0; i < arg_num; i++) {
+ end = NULL;
+ internal_config.socket_mem[i] = strtoull(arg[i], &end, 10);
+
+ /* check for invalid input */
+ if ((errno != 0) ||
+ (arg[i][0] == '\0') || (end == NULL) || (*end != '\0'))
+ return -1;
+ internal_config.socket_mem[i] *= 1024ULL;
+ internal_config.socket_mem[i] *= 1024ULL;
+ total_mem += internal_config.socket_mem[i];
+ }
+
+ /* check if we have a positive amount of total memory */
+ if (total_mem == 0)
+ return -1;
+
+ return 0;
+}
+
+static int
+eal_parse_base_virtaddr(const char *arg)
+{
+ char *end;
+ uint64_t addr;
+
+ errno = 0;
+ addr = strtoull(arg, &end, 16);
+
+ /* check for errors */
+ if ((errno != 0) || (arg[0] == '\0') || end == NULL || (*end != '\0'))
+ return -1;
+
+ /* make sure we don't exceed 32-bit boundary on 32-bit target */
+#ifndef RTE_ARCH_64
+ if (addr >= UINTPTR_MAX)
+ return -1;
+#endif
+
+ /* align the addr on 16M boundary, 16MB is the minimum huge page
+ * size on IBM Power architecture. If the addr is aligned to 16MB,
+ * it can align to 2MB for x86. So this alignment can also be used
+ * on x86 */
+ internal_config.base_virtaddr =
+ RTE_PTR_ALIGN_CEIL((uintptr_t)addr, (size_t)RTE_PGSIZE_16M);
+
+ return 0;
+}
+
+static int
+eal_parse_vfio_intr(const char *mode)
+{
+ unsigned i;
+ static struct {
+ const char *name;
+ enum rte_intr_mode value;
+ } map[] = {
+ { "legacy", RTE_INTR_MODE_LEGACY },
+ { "msi", RTE_INTR_MODE_MSI },
+ { "msix", RTE_INTR_MODE_MSIX },
+ };
+
+ for (i = 0; i < RTE_DIM(map); i++) {
+ if (!strcmp(mode, map[i].name)) {
+ internal_config.vfio_intr_mode = map[i].value;
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static inline size_t
+eal_get_hugepage_mem_size(void)
+{
+ uint64_t size = 0;
+ unsigned i, j;
+
+ for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
+ struct hugepage_info *hpi = &internal_config.hugepage_info[i];
+ if (hpi->hugedir != NULL) {
+ for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
+ size += hpi->hugepage_sz * hpi->num_pages[j];
+ }
+ }
+ }
+
+ return (size < SIZE_MAX) ? (size_t)(size) : SIZE_MAX;
+}
+
+/* Parse the argument given in the command line of the application */
+static int
+eal_parse_args(int argc, char **argv)
+{
+ int opt, ret;
+ char **argvopt;
+ int option_index;
+ char *prgname = argv[0];
+ struct shared_driver *solib;
+
+ argvopt = argv;
+
+ eal_reset_internal_config(&internal_config);
+
+ while ((opt = getopt_long(argc, argvopt, eal_short_options,
+ eal_long_options, &option_index)) != EOF) {
+
+ int ret;
+
+ /* getopt is not happy, stop right now */
+ if (opt == '?')
+ return -1;
+
+ ret = eal_parse_common_option(opt, optarg, &internal_config);
+ /* common parser is not happy */
+ if (ret < 0) {
+ eal_usage(prgname);
+ return -1;
+ }
+ /* common parser handled this option */
+ if (ret == 0)
+ continue;
+
+ switch (opt) {
+ /* force loading of external driver */
+ case 'd':
+ solib = malloc(sizeof(*solib));
+ if (solib == NULL) {
+ RTE_LOG(ERR, EAL, "malloc(solib) failed\n");
+ return -1;
+ }
+ memset(solib, 0, sizeof(*solib));
+ strncpy(solib->name, optarg, PATH_MAX-1);
+ solib->name[PATH_MAX-1] = 0;
+ TAILQ_INSERT_TAIL(&solib_list, solib, next);
+ break;
+
+ /* long options */
+ case OPT_XEN_DOM0_NUM:
+#ifdef RTE_LIBRTE_XEN_DOM0
+ internal_config.xen_dom0_support = 1;
+#else
+ RTE_LOG(ERR, EAL, "Can't support DPDK app "
+ "running on Dom0, please configure"
+ " RTE_LIBRTE_XEN_DOM0=y\n");
+ return -1;
+#endif
+ break;
+
+ case OPT_HUGE_DIR_NUM:
+ internal_config.hugepage_dir = optarg;
+ break;
+
+ case OPT_FILE_PREFIX_NUM:
+ internal_config.hugefile_prefix = optarg;
+ break;
+
+ case OPT_SOCKET_MEM_NUM:
+ if (eal_parse_socket_mem(optarg) < 0) {
+ RTE_LOG(ERR, EAL, "invalid parameters for --"
+ OPT_SOCKET_MEM "\n");
+ eal_usage(prgname);
+ return -1;
+ }
+ break;
+
+ case OPT_BASE_VIRTADDR_NUM:
+ if (eal_parse_base_virtaddr(optarg) < 0) {
+ RTE_LOG(ERR, EAL, "invalid parameter for --"
+ OPT_BASE_VIRTADDR "\n");
+ eal_usage(prgname);
+ return -1;
+ }
+ break;
+
+ case OPT_VFIO_INTR_NUM:
+ if (eal_parse_vfio_intr(optarg) < 0) {
+ RTE_LOG(ERR, EAL, "invalid parameters for --"
+ OPT_VFIO_INTR "\n");
+ eal_usage(prgname);
+ return -1;
+ }
+ break;
+
+ case OPT_CREATE_UIO_DEV_NUM:
+ internal_config.create_uio_dev = 1;
+ break;
+
+ default:
+ if (opt < OPT_LONG_MIN_NUM && isprint(opt)) {
+ RTE_LOG(ERR, EAL, "Option %c is not supported "
+ "on Linux\n", opt);
+ } else if (opt >= OPT_LONG_MIN_NUM &&
+ opt < OPT_LONG_MAX_NUM) {
+ RTE_LOG(ERR, EAL, "Option %s is not supported "
+ "on Linux\n",
+ eal_long_options[option_index].name);
+ } else {
+ RTE_LOG(ERR, EAL, "Option %d is not supported "
+ "on Linux\n", opt);
+ }
+ eal_usage(prgname);
+ return -1;
+ }
+ }
+
+ if (eal_adjust_config(&internal_config) != 0)
+ return -1;
+
+ /* sanity checks */
+ if (eal_check_common_options(&internal_config) != 0) {
+ eal_usage(prgname);
+ return -1;
+ }
+
+ /* --xen-dom0 doesn't make sense with --socket-mem */
+ if (internal_config.xen_dom0_support && internal_config.force_sockets == 1) {
+ RTE_LOG(ERR, EAL, "Options --"OPT_SOCKET_MEM" cannot be specified "
+ "together with --"OPT_XEN_DOM0"\n");
+ eal_usage(prgname);
+ return -1;
+ }
+
+ if (optind >= 0)
+ argv[optind-1] = prgname;
+ ret = optind-1;
+ optind = 0; /* reset getopt lib */
+ return ret;
+}
+
+static void
+eal_check_mem_on_local_socket(void)
+{
+ const struct rte_memseg *ms;
+ int i, socket_id;
+
+ socket_id = rte_lcore_to_socket_id(rte_config.master_lcore);
+
+ ms = rte_eal_get_physmem_layout();
+
+ for (i = 0; i < RTE_MAX_MEMSEG; i++)
+ if (ms[i].socket_id == socket_id &&
+ ms[i].len > 0)
+ return;
+
+ RTE_LOG(WARNING, EAL, "WARNING: Master core has no "
+ "memory on local socket!\n");
+}
+
+static int
+sync_func(__attribute__((unused)) void *arg)
+{
+ return 0;
+}
+
+inline static void
+rte_eal_mcfg_complete(void)
+{
+ /* ALL shared mem_config related INIT DONE */
+ if (rte_config.process_type == RTE_PROC_PRIMARY)
+ rte_config.mem_config->magic = RTE_MAGIC;
+}
+
+/*
+ * Request iopl privilege for all RPL, returns 0 on success
+ * iopl() call is mostly for the i386 architecture. For other architectures,
+ * return -1 to indicate IO privilege can't be changed in this way.
+ */
+int
+rte_eal_iopl_init(void)
+{
+#if defined(RTE_ARCH_X86_64) || defined(RTE_ARCH_I686)
+ if (iopl(3) != 0)
+ return -1;
+ return 0;
+#else
+ return -1;
+#endif
+}
+
+/* Launch threads, called at application init(). */
+int
+rte_eal_init(int argc, char **argv)
+{
+ int i, fctret, ret;
+ pthread_t thread_id;
+ static rte_atomic32_t run_once = RTE_ATOMIC32_INIT(0);
+ struct shared_driver *solib = NULL;
+ const char *logid;
+
+ if (!rte_atomic32_test_and_set(&run_once))
+ return -1;
+
+ logid = strrchr(argv[0], '/');
+ logid = strdup(logid ? logid + 1: argv[0]);
+
+ thread_id = pthread_self();
+
+ if (rte_eal_log_early_init() < 0)
+ rte_panic("Cannot init early logs\n");
+
+ if (rte_eal_cpu_init() < 0)
+ rte_panic("Cannot detect lcores\n");
+
+ fctret = eal_parse_args(argc, argv);
+ if (fctret < 0)
+ exit(1);
+
+ /* set log level as early as possible */
+ rte_set_log_level(internal_config.log_level);
+
+ if (internal_config.no_hugetlbfs == 0 &&
+ internal_config.process_type != RTE_PROC_SECONDARY &&
+ internal_config.xen_dom0_support == 0 &&
+ eal_hugepage_info_init() < 0)
+ rte_panic("Cannot get hugepage information\n");
+
+ if (internal_config.memory == 0 && internal_config.force_sockets == 0) {
+ if (internal_config.no_hugetlbfs)
+ internal_config.memory = MEMSIZE_IF_NO_HUGE_PAGE;
+ else
+ internal_config.memory = eal_get_hugepage_mem_size();
+ }
+
+ if (internal_config.vmware_tsc_map == 1) {
+#ifdef RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT
+ rte_cycles_vmware_tsc_map = 1;
+ RTE_LOG (DEBUG, EAL, "Using VMWARE TSC MAP, "
+ "you must have monitor_control.pseudo_perfctr = TRUE\n");
+#else
+ RTE_LOG (WARNING, EAL, "Ignoring --vmware-tsc-map because "
+ "RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT is not set\n");
+#endif
+ }
+
+ rte_srand(rte_rdtsc());
+
+ rte_config_init();
+
+ if (rte_eal_pci_init() < 0)
+ rte_panic("Cannot init PCI\n");
+
+#ifdef RTE_LIBRTE_IVSHMEM
+ if (rte_eal_ivshmem_init() < 0)
+ rte_panic("Cannot init IVSHMEM\n");
+#endif
+
+ if (rte_eal_memory_init() < 0)
+ rte_panic("Cannot init memory\n");
+
+ /* the directories are locked during eal_hugepage_info_init */
+ eal_hugedirs_unlock();
+
+ if (rte_eal_memzone_init() < 0)
+ rte_panic("Cannot init memzone\n");
+
+ if (rte_eal_tailqs_init() < 0)
+ rte_panic("Cannot init tail queues for objects\n");
+
+#ifdef RTE_LIBRTE_IVSHMEM
+ if (rte_eal_ivshmem_obj_init() < 0)
+ rte_panic("Cannot init IVSHMEM objects\n");
+#endif
+
+ if (rte_eal_log_init(logid, internal_config.syslog_facility) < 0)
+ rte_panic("Cannot init logs\n");
+
+ if (rte_eal_alarm_init() < 0)
+ rte_panic("Cannot init interrupt-handling thread\n");
+
+ if (rte_eal_intr_init() < 0)
+ rte_panic("Cannot init interrupt-handling thread\n");
+
+ if (rte_eal_timer_init() < 0)
+ rte_panic("Cannot init HPET or TSC timers\n");
+
+ eal_check_mem_on_local_socket();
+
+ rte_eal_mcfg_complete();
+
+ TAILQ_FOREACH(solib, &solib_list, next) {
+ RTE_LOG(INFO, EAL, "open shared lib %s\n", solib->name);
+ solib->lib_handle = dlopen(solib->name, RTLD_NOW);
+ if (solib->lib_handle == NULL)
+ RTE_LOG(WARNING, EAL, "%s\n", dlerror());
+ }
+
+ eal_thread_init_master(rte_config.master_lcore);
+
+ RTE_LOG(DEBUG, EAL, "Master core %u is ready (tid=%x)\n",
+ rte_config.master_lcore, (int)thread_id);
+
+ if (rte_eal_dev_init() < 0)
+ rte_panic("Cannot init pmd devices\n");
+
+ RTE_LCORE_FOREACH_SLAVE(i) {
+
+ /*
+ * create communication pipes between master thread
+ * and children
+ */
+ if (pipe(lcore_config[i].pipe_master2slave) < 0)
+ rte_panic("Cannot create pipe\n");
+ if (pipe(lcore_config[i].pipe_slave2master) < 0)
+ rte_panic("Cannot create pipe\n");
+
+ lcore_config[i].state = WAIT;
+
+ /* create a thread for each lcore */
+ ret = pthread_create(&lcore_config[i].thread_id, NULL,
+ eal_thread_loop, NULL);
+ if (ret != 0)
+ rte_panic("Cannot create thread\n");
+ }
+
+ /*
+ * Launch a dummy function on all slave lcores, so that master lcore
+ * knows they are all ready when this function returns.
+ */
+ rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MASTER);
+ rte_eal_mp_wait_lcore();
+
+ /* Probe & Initialize PCI devices */
+ if (rte_eal_pci_probe())
+ rte_panic("Cannot probe PCI\n");
+
+ return fctret;
+}
+
+/* get core role */
+enum rte_lcore_role_t
+rte_eal_lcore_role(unsigned lcore_id)
+{
+ return (rte_config.lcore_role[lcore_id]);
+}
+
+enum rte_proc_type_t
+rte_eal_process_type(void)
+{
+ return (rte_config.process_type);
+}
+
+int rte_eal_has_hugepages(void)
+{
+ return ! internal_config.no_hugetlbfs;
+}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_alarm.c b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_alarm.c
new file mode 100755
index 00000000..e8da32fc
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_alarm.c
@@ -0,0 +1,268 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <stdio.h>
+#include <stdint.h>
+#include <signal.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <sys/time.h>
+#include <sys/timerfd.h>
+
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_interrupts.h>
+#include <rte_alarm.h>
+#include <rte_common.h>
+#include <rte_per_lcore.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_errno.h>
+#include <rte_malloc.h>
+#include <rte_spinlock.h>
+#include <eal_private.h>
+
+#ifndef TFD_NONBLOCK
+#include <fcntl.h>
+#define TFD_NONBLOCK O_NONBLOCK
+#endif
+
+#define NS_PER_US 1000
+#define US_PER_MS 1000
+#define MS_PER_S 1000
+#define US_PER_S (US_PER_MS * MS_PER_S)
+
+struct alarm_entry {
+ LIST_ENTRY(alarm_entry) next;
+ struct timeval time;
+ rte_eal_alarm_callback cb_fn;
+ void *cb_arg;
+ volatile uint8_t executing;
+ volatile pthread_t executing_id;
+};
+
+static LIST_HEAD(alarm_list, alarm_entry) alarm_list = LIST_HEAD_INITIALIZER();
+static rte_spinlock_t alarm_list_lk = RTE_SPINLOCK_INITIALIZER;
+
+static struct rte_intr_handle intr_handle = {.fd = -1 };
+static int handler_registered = 0;
+static void eal_alarm_callback(struct rte_intr_handle *hdl, void *arg);
+
+int
+rte_eal_alarm_init(void)
+{
+ intr_handle.type = RTE_INTR_HANDLE_ALARM;
+ /* create a timerfd file descriptor */
+ intr_handle.fd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK);
+ if (intr_handle.fd == -1)
+ goto error;
+
+ return 0;
+
+error:
+ rte_errno = errno;
+ return -1;
+}
+
+static void
+eal_alarm_callback(struct rte_intr_handle *hdl __rte_unused,
+ void *arg __rte_unused)
+{
+ struct timeval now;
+ struct alarm_entry *ap;
+
+ rte_spinlock_lock(&alarm_list_lk);
+ while ((ap = LIST_FIRST(&alarm_list)) !=NULL &&
+ gettimeofday(&now, NULL) == 0 &&
+ (ap->time.tv_sec < now.tv_sec || (ap->time.tv_sec == now.tv_sec &&
+ ap->time.tv_usec <= now.tv_usec))){
+ ap->executing = 1;
+ ap->executing_id = pthread_self();
+ rte_spinlock_unlock(&alarm_list_lk);
+
+ ap->cb_fn(ap->cb_arg);
+
+ rte_spinlock_lock(&alarm_list_lk);
+
+ LIST_REMOVE(ap, next);
+ rte_free(ap);
+ }
+
+ if (!LIST_EMPTY(&alarm_list)) {
+ struct itimerspec atime = { .it_interval = { 0, 0 } };
+
+ ap = LIST_FIRST(&alarm_list);
+ atime.it_value.tv_sec = ap->time.tv_sec;
+ atime.it_value.tv_nsec = ap->time.tv_usec * NS_PER_US;
+ /* perform borrow for subtraction if necessary */
+ if (now.tv_usec > ap->time.tv_usec)
+ atime.it_value.tv_sec--, atime.it_value.tv_nsec += US_PER_S * NS_PER_US;
+
+ atime.it_value.tv_sec -= now.tv_sec;
+ atime.it_value.tv_nsec -= now.tv_usec * NS_PER_US;
+ timerfd_settime(intr_handle.fd, 0, &atime, NULL);
+ }
+ rte_spinlock_unlock(&alarm_list_lk);
+}
+
+int
+rte_eal_alarm_set(uint64_t us, rte_eal_alarm_callback cb_fn, void *cb_arg)
+{
+ struct timeval now;
+ int ret = 0;
+ struct alarm_entry *ap, *new_alarm;
+
+ /* Check parameters, including that us won't cause a uint64_t overflow */
+ if (us < 1 || us > (UINT64_MAX - US_PER_S) || cb_fn == NULL)
+ return -EINVAL;
+
+ new_alarm = rte_zmalloc(NULL, sizeof(*new_alarm), 0);
+ if (new_alarm == NULL)
+ return -ENOMEM;
+
+ /* use current time to calculate absolute time of alarm */
+ gettimeofday(&now, NULL);
+
+ new_alarm->cb_fn = cb_fn;
+ new_alarm->cb_arg = cb_arg;
+ new_alarm->time.tv_usec = (now.tv_usec + us) % US_PER_S;
+ new_alarm->time.tv_sec = now.tv_sec + ((now.tv_usec + us) / US_PER_S);
+
+ rte_spinlock_lock(&alarm_list_lk);
+ if (!handler_registered) {
+ ret |= rte_intr_callback_register(&intr_handle,
+ eal_alarm_callback, NULL);
+ handler_registered = (ret == 0) ? 1 : 0;
+ }
+
+ if (LIST_EMPTY(&alarm_list))
+ LIST_INSERT_HEAD(&alarm_list, new_alarm, next);
+ else {
+ LIST_FOREACH(ap, &alarm_list, next) {
+ if (ap->time.tv_sec > new_alarm->time.tv_sec ||
+ (ap->time.tv_sec == new_alarm->time.tv_sec &&
+ ap->time.tv_usec > new_alarm->time.tv_usec)){
+ LIST_INSERT_BEFORE(ap, new_alarm, next);
+ break;
+ }
+ if (LIST_NEXT(ap, next) == NULL) {
+ LIST_INSERT_AFTER(ap, new_alarm, next);
+ break;
+ }
+ }
+ }
+
+ if (LIST_FIRST(&alarm_list) == new_alarm) {
+ struct itimerspec alarm_time = {
+ .it_interval = {0, 0},
+ .it_value = {
+ .tv_sec = us / US_PER_S,
+ .tv_nsec = (us % US_PER_S) * NS_PER_US,
+ },
+ };
+ ret |= timerfd_settime(intr_handle.fd, 0, &alarm_time, NULL);
+ }
+ rte_spinlock_unlock(&alarm_list_lk);
+
+ return ret;
+}
+
+int
+rte_eal_alarm_cancel(rte_eal_alarm_callback cb_fn, void *cb_arg)
+{
+ struct alarm_entry *ap, *ap_prev;
+ int count = 0;
+ int err = 0;
+ int executing;
+
+ if (!cb_fn) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ do {
+ executing = 0;
+ rte_spinlock_lock(&alarm_list_lk);
+ /* remove any matches at the start of the list */
+ while ((ap = LIST_FIRST(&alarm_list)) != NULL &&
+ cb_fn == ap->cb_fn &&
+ (cb_arg == (void *)-1 || cb_arg == ap->cb_arg)) {
+
+ if (ap->executing == 0) {
+ LIST_REMOVE(ap, next);
+ rte_free(ap);
+ count++;
+ } else {
+ /* If calling from other context, mark that alarm is executing
+ * so loop can spin till it finish. Otherwise we are trying to
+ * cancel our self - mark it by EINPROGRESS */
+ if (pthread_equal(ap->executing_id, pthread_self()) == 0)
+ executing++;
+ else
+ err = EINPROGRESS;
+
+ break;
+ }
+ }
+ ap_prev = ap;
+
+ /* now go through list, removing entries not at start */
+ LIST_FOREACH(ap, &alarm_list, next) {
+ /* this won't be true first time through */
+ if (cb_fn == ap->cb_fn &&
+ (cb_arg == (void *)-1 || cb_arg == ap->cb_arg)) {
+
+ if (ap->executing == 0) {
+ LIST_REMOVE(ap, next);
+ rte_free(ap);
+ count++;
+ ap = ap_prev;
+ } else if (pthread_equal(ap->executing_id, pthread_self()) == 0)
+ executing++;
+ else
+ err = EINPROGRESS;
+ }
+ ap_prev = ap;
+ }
+ rte_spinlock_unlock(&alarm_list_lk);
+ } while (executing != 0);
+
+ if (count == 0 && err == 0)
+ rte_errno = ENOENT;
+ else if (err)
+ rte_errno = err;
+
+ return count;
+}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_debug.c b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_debug.c
new file mode 100755
index 00000000..44fc4f33
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_debug.c
@@ -0,0 +1,113 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <execinfo.h>
+#include <stdarg.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_common.h>
+
+#define BACKTRACE_SIZE 256
+
+/* dump the stack of the calling core */
+void rte_dump_stack(void)
+{
+ void *func[BACKTRACE_SIZE];
+ char **symb = NULL;
+ int size;
+
+ size = backtrace(func, BACKTRACE_SIZE);
+ symb = backtrace_symbols(func, size);
+ while (size > 0) {
+ rte_log(RTE_LOG_ERR, RTE_LOGTYPE_EAL,
+ "%d: [%s]\n", size, symb[size - 1]);
+ size --;
+ }
+}
+
+/* not implemented in this environment */
+void rte_dump_registers(void)
+{
+ return;
+}
+
+/* call abort(), it will generate a coredump if enabled */
+void __rte_panic(const char *funcname, const char *format, ...)
+{
+ va_list ap;
+
+ /* disable history */
+ rte_log_set_history(0);
+
+ rte_log(RTE_LOG_CRIT, RTE_LOGTYPE_EAL, "PANIC in %s():\n", funcname);
+ va_start(ap, format);
+ rte_vlog(RTE_LOG_CRIT, RTE_LOGTYPE_EAL, format, ap);
+ va_end(ap);
+ rte_dump_stack();
+ rte_dump_registers();
+ abort();
+}
+
+/*
+ * Like rte_panic this terminates the application. However, no traceback is
+ * provided and no core-dump is generated.
+ */
+void
+rte_exit(int exit_code, const char *format, ...)
+{
+ va_list ap;
+
+ /* disable history */
+ rte_log_set_history(0);
+
+ if (exit_code != 0)
+ RTE_LOG(CRIT, EAL, "Error - exiting with code: %d\n"
+ " Cause: ", exit_code);
+
+ va_start(ap, format);
+ rte_vlog(RTE_LOG_CRIT, RTE_LOGTYPE_EAL, format, ap);
+ va_end(ap);
+
+#ifndef RTE_EAL_ALWAYS_PANIC_ON_ERROR
+ exit(exit_code);
+#else
+ rte_dump_stack();
+ rte_dump_registers();
+ abort();
+#endif
+}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_hugepage_info.c b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_hugepage_info.c
new file mode 100755
index 00000000..590cb565
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_hugepage_info.c
@@ -0,0 +1,359 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <sys/types.h>
+#include <sys/file.h>
+#include <dirent.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <fnmatch.h>
+#include <inttypes.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_launch.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_debug.h>
+#include <rte_log.h>
+#include <rte_common.h>
+#include "rte_string_fns.h"
+#include "eal_internal_cfg.h"
+#include "eal_hugepages.h"
+#include "eal_filesystem.h"
+
+static const char sys_dir_path[] = "/sys/kernel/mm/hugepages";
+
+static int32_t
+get_num_hugepages(const char *subdir)
+{
+ char path[PATH_MAX];
+ long unsigned resv_pages, num_pages = 0;
+ const char *nr_hp_file;
+ const char *nr_rsvd_file = "resv_hugepages";
+
+ /* first, check how many reserved pages kernel reports */
+ snprintf(path, sizeof(path), "%s/%s/%s",
+ sys_dir_path, subdir, nr_rsvd_file);
+
+ if (eal_parse_sysfs_value(path, &resv_pages) < 0)
+ return 0;
+
+ /* if secondary process, just look at the number of hugepages,
+ * otherwise look at number of free hugepages */
+ if (internal_config.process_type == RTE_PROC_SECONDARY)
+ nr_hp_file = "nr_hugepages";
+ else
+ nr_hp_file = "free_hugepages";
+
+ memset(path, 0, sizeof(path));
+
+ snprintf(path, sizeof(path), "%s/%s/%s",
+ sys_dir_path, subdir, nr_hp_file);
+
+ if (eal_parse_sysfs_value(path, &num_pages) < 0)
+ return 0;
+
+ if (num_pages == 0)
+ RTE_LOG(WARNING, EAL, "No free hugepages reported in %s\n",
+ subdir);
+
+ /* adjust num_pages in case of primary process */
+ if (num_pages > 0 && internal_config.process_type == RTE_PROC_PRIMARY)
+ num_pages -= resv_pages;
+
+ return (int32_t)num_pages;
+}
+
+static uint64_t
+get_default_hp_size(void)
+{
+ const char proc_meminfo[] = "/proc/meminfo";
+ const char str_hugepagesz[] = "Hugepagesize:";
+ unsigned hugepagesz_len = sizeof(str_hugepagesz) - 1;
+ char buffer[256];
+ unsigned long long size = 0;
+
+ FILE *fd = fopen(proc_meminfo, "r");
+ if (fd == NULL)
+ rte_panic("Cannot open %s\n", proc_meminfo);
+ while(fgets(buffer, sizeof(buffer), fd)){
+ if (strncmp(buffer, str_hugepagesz, hugepagesz_len) == 0){
+ size = rte_str_to_size(&buffer[hugepagesz_len]);
+ break;
+ }
+ }
+ fclose(fd);
+ if (size == 0)
+ rte_panic("Cannot get default hugepage size from %s\n", proc_meminfo);
+ return size;
+}
+
+static const char *
+get_hugepage_dir(uint64_t hugepage_sz)
+{
+ enum proc_mount_fieldnames {
+ DEVICE = 0,
+ MOUNTPT,
+ FSTYPE,
+ OPTIONS,
+ _FIELDNAME_MAX
+ };
+ static uint64_t default_size = 0;
+ const char proc_mounts[] = "/proc/mounts";
+ const char hugetlbfs_str[] = "hugetlbfs";
+ const size_t htlbfs_str_len = sizeof(hugetlbfs_str) - 1;
+ const char pagesize_opt[] = "pagesize=";
+ const size_t pagesize_opt_len = sizeof(pagesize_opt) - 1;
+ const char split_tok = ' ';
+ char *splitstr[_FIELDNAME_MAX];
+ char buf[BUFSIZ];
+ char *retval = NULL;
+
+ FILE *fd = fopen(proc_mounts, "r");
+ if (fd == NULL)
+ rte_panic("Cannot open %s\n", proc_mounts);
+
+ if (default_size == 0)
+ default_size = get_default_hp_size();
+
+ while (fgets(buf, sizeof(buf), fd)){
+ if (rte_strsplit(buf, sizeof(buf), splitstr, _FIELDNAME_MAX,
+ split_tok) != _FIELDNAME_MAX) {
+ RTE_LOG(ERR, EAL, "Error parsing %s\n", proc_mounts);
+ break; /* return NULL */
+ }
+
+ /* we have a specified --huge-dir option, only examine that dir */
+ if (internal_config.hugepage_dir != NULL &&
+ strcmp(splitstr[MOUNTPT], internal_config.hugepage_dir) != 0)
+ continue;
+
+ if (strncmp(splitstr[FSTYPE], hugetlbfs_str, htlbfs_str_len) == 0){
+ const char *pagesz_str = strstr(splitstr[OPTIONS], pagesize_opt);
+
+ /* if no explicit page size, the default page size is compared */
+ if (pagesz_str == NULL){
+ if (hugepage_sz == default_size){
+ retval = strdup(splitstr[MOUNTPT]);
+ break;
+ }
+ }
+ /* there is an explicit page size, so check it */
+ else {
+ uint64_t pagesz = rte_str_to_size(&pagesz_str[pagesize_opt_len]);
+ if (pagesz == hugepage_sz) {
+ retval = strdup(splitstr[MOUNTPT]);
+ break;
+ }
+ }
+ } /* end if strncmp hugetlbfs */
+ } /* end while fgets */
+
+ fclose(fd);
+ return retval;
+}
+
+static inline void
+swap_hpi(struct hugepage_info *a, struct hugepage_info *b)
+{
+ char buf[sizeof(*a)];
+ memcpy(buf, a, sizeof(buf));
+ memcpy(a, b, sizeof(buf));
+ memcpy(b, buf, sizeof(buf));
+}
+
+/*
+ * Clear the hugepage directory of whatever hugepage files
+ * there are. Checks if the file is locked (i.e.
+ * if it's in use by another DPDK process).
+ */
+static int
+clear_hugedir(const char * hugedir)
+{
+ DIR *dir;
+ struct dirent *dirent;
+ int dir_fd, fd, lck_result;
+ const char filter[] = "*map_*"; /* matches hugepage files */
+
+ /* open directory */
+ dir = opendir(hugedir);
+ if (!dir) {
+ RTE_LOG(INFO, EAL, "Unable to open hugepage directory %s\n",
+ hugedir);
+ goto error;
+ }
+ dir_fd = dirfd(dir);
+
+ dirent = readdir(dir);
+ if (!dirent) {
+ RTE_LOG(INFO, EAL, "Unable to read hugepage directory %s\n",
+ hugedir);
+ goto error;
+ }
+
+ while(dirent != NULL){
+ /* skip files that don't match the hugepage pattern */
+ if (fnmatch(filter, dirent->d_name, 0) > 0) {
+ dirent = readdir(dir);
+ continue;
+ }
+
+ /* try and lock the file */
+ fd = openat(dir_fd, dirent->d_name, O_RDONLY);
+
+ /* skip to next file */
+ if (fd == -1) {
+ dirent = readdir(dir);
+ continue;
+ }
+
+ /* non-blocking lock */
+ lck_result = flock(fd, LOCK_EX | LOCK_NB);
+
+ /* if lock succeeds, unlock and remove the file */
+ if (lck_result != -1) {
+ flock(fd, LOCK_UN);
+ unlinkat(dir_fd, dirent->d_name, 0);
+ }
+ close (fd);
+ dirent = readdir(dir);
+ }
+
+ closedir(dir);
+ return 0;
+
+error:
+ if (dir)
+ closedir(dir);
+
+ RTE_LOG(INFO, EAL, "Error while clearing hugepage dir: %s\n",
+ strerror(errno));
+
+ return -1;
+}
+
+/*
+ * when we initialize the hugepage info, everything goes
+ * to socket 0 by default. it will later get sorted by memory
+ * initialization procedure.
+ */
+int
+eal_hugepage_info_init(void)
+{
+ const char dirent_start_text[] = "hugepages-";
+ const size_t dirent_start_len = sizeof(dirent_start_text) - 1;
+ unsigned i, num_sizes = 0;
+
+ DIR *dir = opendir(sys_dir_path);
+ if (dir == NULL)
+ rte_panic("Cannot open directory %s to read system hugepage info\n",
+ sys_dir_path);
+
+ struct dirent *dirent = readdir(dir);
+ while(dirent != NULL){
+ if (strncmp(dirent->d_name, dirent_start_text, dirent_start_len) == 0){
+ struct hugepage_info *hpi = \
+ &internal_config.hugepage_info[num_sizes];
+ hpi->hugepage_sz = rte_str_to_size(&dirent->d_name[dirent_start_len]);
+ hpi->hugedir = get_hugepage_dir(hpi->hugepage_sz);
+
+ /* first, check if we have a mountpoint */
+ if (hpi->hugedir == NULL){
+ int32_t num_pages;
+ if ((num_pages = get_num_hugepages(dirent->d_name)) > 0)
+ RTE_LOG(INFO, EAL, "%u hugepages of size %llu reserved, "\
+ "but no mounted hugetlbfs found for that size\n",
+ (unsigned)num_pages,
+ (unsigned long long)hpi->hugepage_sz);
+ } else {
+ /* try to obtain a writelock */
+ hpi->lock_descriptor = open(hpi->hugedir, O_RDONLY);
+
+ /* if blocking lock failed */
+ if (flock(hpi->lock_descriptor, LOCK_EX) == -1) {
+ RTE_LOG(CRIT, EAL, "Failed to lock hugepage directory!\n");
+ closedir(dir);
+ return -1;
+ }
+ /* clear out the hugepages dir from unused pages */
+ if (clear_hugedir(hpi->hugedir) == -1) {
+ closedir(dir);
+ return -1;
+ }
+
+ /* for now, put all pages into socket 0,
+ * later they will be sorted */
+ hpi->num_pages[0] = get_num_hugepages(dirent->d_name);
+
+#ifndef RTE_ARCH_64
+ /* for 32-bit systems, limit number of hugepages to 1GB per page size */
+ hpi->num_pages[0] = RTE_MIN(hpi->num_pages[0],
+ RTE_PGSIZE_1G / hpi->hugepage_sz);
+#endif
+
+ num_sizes++;
+ }
+ }
+ dirent = readdir(dir);
+ }
+ closedir(dir);
+ internal_config.num_hugepage_sizes = num_sizes;
+
+ /* sort the page directory entries by size, largest to smallest */
+ for (i = 0; i < num_sizes; i++){
+ unsigned j;
+ for (j = i+1; j < num_sizes; j++)
+ if (internal_config.hugepage_info[j-1].hugepage_sz < \
+ internal_config.hugepage_info[j].hugepage_sz)
+ swap_hpi(&internal_config.hugepage_info[j-1],
+ &internal_config.hugepage_info[j]);
+ }
+
+ /* now we have all info, check we have at least one valid size */
+ for (i = 0; i < num_sizes; i++)
+ if (internal_config.hugepage_info[i].hugedir != NULL &&
+ internal_config.hugepage_info[i].num_pages[0] > 0)
+ return 0;
+
+ /* no valid hugepage mounts available, return error */
+ return -1;
+}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_interrupts.c b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_interrupts.c
new file mode 100755
index 00000000..dc2668a4
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_interrupts.c
@@ -0,0 +1,826 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include <sys/queue.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <sys/epoll.h>
+#include <sys/signalfd.h>
+#include <sys/ioctl.h>
+
+#include <rte_common.h>
+#include <rte_interrupts.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_debug.h>
+#include <rte_log.h>
+#include <rte_mempool.h>
+#include <rte_pci.h>
+#include <rte_malloc.h>
+#include <rte_errno.h>
+#include <rte_spinlock.h>
+
+#include "eal_private.h"
+#include "eal_vfio.h"
+
+#define EAL_INTR_EPOLL_WAIT_FOREVER (-1)
+
+/**
+ * union for pipe fds.
+ */
+union intr_pipefds{
+ struct {
+ int pipefd[2];
+ };
+ struct {
+ int readfd;
+ int writefd;
+ };
+};
+
+/**
+ * union buffer for reading on different devices
+ */
+union rte_intr_read_buffer {
+ int uio_intr_count; /* for uio device */
+#ifdef VFIO_PRESENT
+ uint64_t vfio_intr_count; /* for vfio device */
+#endif
+ uint64_t timerfd_num; /* for timerfd */
+ char charbuf[16]; /* for others */
+};
+
+TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback);
+TAILQ_HEAD(rte_intr_source_list, rte_intr_source);
+
+struct rte_intr_callback {
+ TAILQ_ENTRY(rte_intr_callback) next;
+ rte_intr_callback_fn cb_fn; /**< callback address */
+ void *cb_arg; /**< parameter for callback */
+};
+
+struct rte_intr_source {
+ TAILQ_ENTRY(rte_intr_source) next;
+ struct rte_intr_handle intr_handle; /**< interrupt handle */
+ struct rte_intr_cb_list callbacks; /**< user callbacks */
+ uint32_t active;
+};
+
+/* global spinlock for interrupt data operation */
+static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER;
+
+/* union buffer for pipe read/write */
+static union intr_pipefds intr_pipe;
+
+/* interrupt sources list */
+static struct rte_intr_source_list intr_sources;
+
+/* interrupt handling thread */
+static pthread_t intr_thread;
+
+/* VFIO interrupts */
+#ifdef VFIO_PRESENT
+
+#define IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + sizeof(int))
+
+/* enable legacy (INTx) interrupts */
+static int
+vfio_enable_intx(struct rte_intr_handle *intr_handle) {
+ struct vfio_irq_set *irq_set;
+ char irq_set_buf[IRQ_SET_BUF_LEN];
+ int len, ret;
+ int *fd_ptr;
+
+ len = sizeof(irq_set_buf);
+
+ /* enable INTx */
+ irq_set = (struct vfio_irq_set *) irq_set_buf;
+ irq_set->argsz = len;
+ irq_set->count = 1;
+ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
+ irq_set->start = 0;
+ fd_ptr = (int *) &irq_set->data;
+ *fd_ptr = intr_handle->fd;
+
+ ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+
+ if (ret) {
+ RTE_LOG(ERR, EAL, "Error enabling INTx interrupts for fd %d\n",
+ intr_handle->fd);
+ return -1;
+ }
+
+ /* unmask INTx after enabling */
+ memset(irq_set, 0, len);
+ len = sizeof(struct vfio_irq_set);
+ irq_set->argsz = len;
+ irq_set->count = 1;
+ irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK;
+ irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
+ irq_set->start = 0;
+
+ ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+
+ if (ret) {
+ RTE_LOG(ERR, EAL, "Error unmasking INTx interrupts for fd %d\n",
+ intr_handle->fd);
+ return -1;
+ }
+ return 0;
+}
+
+/* disable legacy (INTx) interrupts */
+static int
+vfio_disable_intx(struct rte_intr_handle *intr_handle) {
+ struct vfio_irq_set *irq_set;
+ char irq_set_buf[IRQ_SET_BUF_LEN];
+ int len, ret;
+
+ len = sizeof(struct vfio_irq_set);
+
+ /* mask interrupts before disabling */
+ irq_set = (struct vfio_irq_set *) irq_set_buf;
+ irq_set->argsz = len;
+ irq_set->count = 1;
+ irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK;
+ irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
+ irq_set->start = 0;
+
+ ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+
+ if (ret) {
+ RTE_LOG(ERR, EAL, "Error unmasking INTx interrupts for fd %d\n",
+ intr_handle->fd);
+ return -1;
+ }
+
+ /* disable INTx*/
+ memset(irq_set, 0, len);
+ irq_set->argsz = len;
+ irq_set->count = 0;
+ irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
+ irq_set->start = 0;
+
+ ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+
+ if (ret) {
+ RTE_LOG(ERR, EAL,
+ "Error disabling INTx interrupts for fd %d\n", intr_handle->fd);
+ return -1;
+ }
+ return 0;
+}
+
+/* enable MSI-X interrupts */
+static int
+vfio_enable_msi(struct rte_intr_handle *intr_handle) {
+ int len, ret;
+ char irq_set_buf[IRQ_SET_BUF_LEN];
+ struct vfio_irq_set *irq_set;
+ int *fd_ptr;
+
+ len = sizeof(irq_set_buf);
+
+ irq_set = (struct vfio_irq_set *) irq_set_buf;
+ irq_set->argsz = len;
+ irq_set->count = 1;
+ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = VFIO_PCI_MSI_IRQ_INDEX;
+ irq_set->start = 0;
+ fd_ptr = (int *) &irq_set->data;
+ *fd_ptr = intr_handle->fd;
+
+ ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+
+ if (ret) {
+ RTE_LOG(ERR, EAL, "Error enabling MSI interrupts for fd %d\n",
+ intr_handle->fd);
+ return -1;
+ }
+
+ /* manually trigger interrupt to enable it */
+ memset(irq_set, 0, len);
+ len = sizeof(struct vfio_irq_set);
+ irq_set->argsz = len;
+ irq_set->count = 1;
+ irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = VFIO_PCI_MSI_IRQ_INDEX;
+ irq_set->start = 0;
+
+ ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+
+ if (ret) {
+ RTE_LOG(ERR, EAL, "Error triggering MSI interrupts for fd %d\n",
+ intr_handle->fd);
+ return -1;
+ }
+ return 0;
+}
+
+/* disable MSI-X interrupts */
+static int
+vfio_disable_msi(struct rte_intr_handle *intr_handle) {
+ struct vfio_irq_set *irq_set;
+ char irq_set_buf[IRQ_SET_BUF_LEN];
+ int len, ret;
+
+ len = sizeof(struct vfio_irq_set);
+
+ irq_set = (struct vfio_irq_set *) irq_set_buf;
+ irq_set->argsz = len;
+ irq_set->count = 0;
+ irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = VFIO_PCI_MSI_IRQ_INDEX;
+ irq_set->start = 0;
+
+ ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+
+ if (ret)
+ RTE_LOG(ERR, EAL,
+ "Error disabling MSI interrupts for fd %d\n", intr_handle->fd);
+
+ return ret;
+}
+
+/* enable MSI-X interrupts */
+static int
+vfio_enable_msix(struct rte_intr_handle *intr_handle) {
+ int len, ret;
+ char irq_set_buf[IRQ_SET_BUF_LEN];
+ struct vfio_irq_set *irq_set;
+ int *fd_ptr;
+
+ len = sizeof(irq_set_buf);
+
+ irq_set = (struct vfio_irq_set *) irq_set_buf;
+ irq_set->argsz = len;
+ irq_set->count = 1;
+ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+ irq_set->start = 0;
+ fd_ptr = (int *) &irq_set->data;
+ *fd_ptr = intr_handle->fd;
+
+ ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+
+ if (ret) {
+ RTE_LOG(ERR, EAL, "Error enabling MSI-X interrupts for fd %d\n",
+ intr_handle->fd);
+ return -1;
+ }
+
+ /* manually trigger interrupt to enable it */
+ memset(irq_set, 0, len);
+ len = sizeof(struct vfio_irq_set);
+ irq_set->argsz = len;
+ irq_set->count = 1;
+ irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+ irq_set->start = 0;
+
+ ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+
+ if (ret) {
+ RTE_LOG(ERR, EAL, "Error triggering MSI-X interrupts for fd %d\n",
+ intr_handle->fd);
+ return -1;
+ }
+ return 0;
+}
+
+/* disable MSI-X interrupts */
+static int
+vfio_disable_msix(struct rte_intr_handle *intr_handle) {
+ struct vfio_irq_set *irq_set;
+ char irq_set_buf[IRQ_SET_BUF_LEN];
+ int len, ret;
+
+ len = sizeof(struct vfio_irq_set);
+
+ irq_set = (struct vfio_irq_set *) irq_set_buf;
+ irq_set->argsz = len;
+ irq_set->count = 0;
+ irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+ irq_set->start = 0;
+
+ ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+
+ if (ret)
+ RTE_LOG(ERR, EAL,
+ "Error disabling MSI-X interrupts for fd %d\n", intr_handle->fd);
+
+ return ret;
+}
+#endif
+
+int
+rte_intr_callback_register(struct rte_intr_handle *intr_handle,
+ rte_intr_callback_fn cb, void *cb_arg)
+{
+ int ret, wake_thread;
+ struct rte_intr_source *src;
+ struct rte_intr_callback *callback;
+
+ wake_thread = 0;
+
+ /* first do parameter checking */
+ if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) {
+ RTE_LOG(ERR, EAL,
+ "Registering with invalid input parameter\n");
+ return -EINVAL;
+ }
+
+ /* allocate a new interrupt callback entity */
+ callback = rte_zmalloc("interrupt callback list",
+ sizeof(*callback), 0);
+ if (callback == NULL) {
+ RTE_LOG(ERR, EAL, "Can not allocate memory\n");
+ return -ENOMEM;
+ }
+ callback->cb_fn = cb;
+ callback->cb_arg = cb_arg;
+
+ rte_spinlock_lock(&intr_lock);
+
+ /* check if there is at least one callback registered for the fd */
+ TAILQ_FOREACH(src, &intr_sources, next) {
+ if (src->intr_handle.fd == intr_handle->fd) {
+ /* we had no interrupts for this */
+ if TAILQ_EMPTY(&src->callbacks)
+ wake_thread = 1;
+
+ TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
+ ret = 0;
+ break;
+ }
+ }
+
+ /* no existing callbacks for this - add new source */
+ if (src == NULL) {
+ if ((src = rte_zmalloc("interrupt source list",
+ sizeof(*src), 0)) == NULL) {
+ RTE_LOG(ERR, EAL, "Can not allocate memory\n");
+ rte_free(callback);
+ ret = -ENOMEM;
+ } else {
+ src->intr_handle = *intr_handle;
+ TAILQ_INIT(&src->callbacks);
+ TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
+ TAILQ_INSERT_TAIL(&intr_sources, src, next);
+ wake_thread = 1;
+ ret = 0;
+ }
+ }
+
+ rte_spinlock_unlock(&intr_lock);
+
+ /**
+ * check if need to notify the pipe fd waited by epoll_wait to
+ * rebuild the wait list.
+ */
+ if (wake_thread)
+ if (write(intr_pipe.writefd, "1", 1) < 0)
+ return -EPIPE;
+
+ return (ret);
+}
+
+int
+rte_intr_callback_unregister(struct rte_intr_handle *intr_handle,
+ rte_intr_callback_fn cb_fn, void *cb_arg)
+{
+ int ret;
+ struct rte_intr_source *src;
+ struct rte_intr_callback *cb, *next;
+
+ /* do parameter checking first */
+ if (intr_handle == NULL || intr_handle->fd < 0) {
+ RTE_LOG(ERR, EAL,
+ "Unregistering with invalid input parameter\n");
+ return -EINVAL;
+ }
+
+ rte_spinlock_lock(&intr_lock);
+
+ /* check if the insterrupt source for the fd is existent */
+ TAILQ_FOREACH(src, &intr_sources, next)
+ if (src->intr_handle.fd == intr_handle->fd)
+ break;
+
+ /* No interrupt source registered for the fd */
+ if (src == NULL) {
+ ret = -ENOENT;
+
+ /* interrupt source has some active callbacks right now. */
+ } else if (src->active != 0) {
+ ret = -EAGAIN;
+
+ /* ok to remove. */
+ } else {
+ ret = 0;
+
+ /*walk through the callbacks and remove all that match. */
+ for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
+
+ next = TAILQ_NEXT(cb, next);
+
+ if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
+ cb->cb_arg == cb_arg)) {
+ TAILQ_REMOVE(&src->callbacks, cb, next);
+ rte_free(cb);
+ ret++;
+ }
+ }
+
+ /* all callbacks for that source are removed. */
+ if (TAILQ_EMPTY(&src->callbacks)) {
+ TAILQ_REMOVE(&intr_sources, src, next);
+ rte_free(src);
+ }
+ }
+
+ rte_spinlock_unlock(&intr_lock);
+
+ /* notify the pipe fd waited by epoll_wait to rebuild the wait list */
+ if (ret >= 0 && write(intr_pipe.writefd, "1", 1) < 0) {
+ ret = -EPIPE;
+ }
+
+ return (ret);
+}
+
+int
+rte_intr_enable(struct rte_intr_handle *intr_handle)
+{
+ const int value = 1;
+
+ if (!intr_handle || intr_handle->fd < 0)
+ return -1;
+
+ switch (intr_handle->type){
+ /* write to the uio fd to enable the interrupt */
+ case RTE_INTR_HANDLE_UIO:
+ if (write(intr_handle->fd, &value, sizeof(value)) < 0) {
+ RTE_LOG(ERR, EAL,
+ "Error enabling interrupts for fd %d\n",
+ intr_handle->fd);
+ return -1;
+ }
+ break;
+ /* not used at this moment */
+ case RTE_INTR_HANDLE_ALARM:
+ return -1;
+#ifdef VFIO_PRESENT
+ case RTE_INTR_HANDLE_VFIO_MSIX:
+ if (vfio_enable_msix(intr_handle))
+ return -1;
+ break;
+ case RTE_INTR_HANDLE_VFIO_MSI:
+ if (vfio_enable_msi(intr_handle))
+ return -1;
+ break;
+ case RTE_INTR_HANDLE_VFIO_LEGACY:
+ if (vfio_enable_intx(intr_handle))
+ return -1;
+ break;
+#endif
+ /* unknown handle type */
+ default:
+ RTE_LOG(ERR, EAL,
+ "Unknown handle type of fd %d\n",
+ intr_handle->fd);
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+rte_intr_disable(struct rte_intr_handle *intr_handle)
+{
+ const int value = 0;
+
+ if (!intr_handle || intr_handle->fd < 0)
+ return -1;
+
+ switch (intr_handle->type){
+ /* write to the uio fd to disable the interrupt */
+ case RTE_INTR_HANDLE_UIO:
+ if (write(intr_handle->fd, &value, sizeof(value)) < 0){
+ RTE_LOG(ERR, EAL,
+ "Error disabling interrupts for fd %d\n",
+ intr_handle->fd);
+ return -1;
+ }
+ break;
+ /* not used at this moment */
+ case RTE_INTR_HANDLE_ALARM:
+ return -1;
+#ifdef VFIO_PRESENT
+ case RTE_INTR_HANDLE_VFIO_MSIX:
+ if (vfio_disable_msix(intr_handle))
+ return -1;
+ break;
+ case RTE_INTR_HANDLE_VFIO_MSI:
+ if (vfio_disable_msi(intr_handle))
+ return -1;
+ break;
+ case RTE_INTR_HANDLE_VFIO_LEGACY:
+ if (vfio_disable_intx(intr_handle))
+ return -1;
+ break;
+#endif
+ /* unknown handle type */
+ default:
+ RTE_LOG(ERR, EAL,
+ "Unknown handle type of fd %d\n",
+ intr_handle->fd);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+eal_intr_process_interrupts(struct epoll_event *events, int nfds)
+{
+ int n, bytes_read;
+ struct rte_intr_source *src;
+ struct rte_intr_callback *cb;
+ union rte_intr_read_buffer buf;
+ struct rte_intr_callback active_cb;
+
+ for (n = 0; n < nfds; n++) {
+
+ /**
+ * if the pipe fd is ready to read, return out to
+ * rebuild the wait list.
+ */
+ if (events[n].data.fd == intr_pipe.readfd){
+ int r = read(intr_pipe.readfd, buf.charbuf,
+ sizeof(buf.charbuf));
+ RTE_SET_USED(r);
+ return -1;
+ }
+ rte_spinlock_lock(&intr_lock);
+ TAILQ_FOREACH(src, &intr_sources, next)
+ if (src->intr_handle.fd ==
+ events[n].data.fd)
+ break;
+ if (src == NULL){
+ rte_spinlock_unlock(&intr_lock);
+ continue;
+ }
+
+ /* mark this interrupt source as active and release the lock. */
+ src->active = 1;
+ rte_spinlock_unlock(&intr_lock);
+
+ /* set the length to be read dor different handle type */
+ switch (src->intr_handle.type) {
+ case RTE_INTR_HANDLE_UIO:
+ bytes_read = sizeof(buf.uio_intr_count);
+ break;
+ case RTE_INTR_HANDLE_ALARM:
+ bytes_read = sizeof(buf.timerfd_num);
+ break;
+#ifdef VFIO_PRESENT
+ case RTE_INTR_HANDLE_VFIO_MSIX:
+ case RTE_INTR_HANDLE_VFIO_MSI:
+ case RTE_INTR_HANDLE_VFIO_LEGACY:
+ bytes_read = sizeof(buf.vfio_intr_count);
+ break;
+#endif
+ default:
+ bytes_read = 1;
+ break;
+ }
+
+ /**
+ * read out to clear the ready-to-be-read flag
+ * for epoll_wait.
+ */
+ bytes_read = read(events[n].data.fd, &buf, bytes_read);
+
+ if (bytes_read < 0)
+ RTE_LOG(ERR, EAL, "Error reading from file "
+ "descriptor %d: %s\n", events[n].data.fd,
+ strerror(errno));
+ else if (bytes_read == 0)
+ RTE_LOG(ERR, EAL, "Read nothing from file "
+ "descriptor %d\n", events[n].data.fd);
+
+ /* grab a lock, again to call callbacks and update status. */
+ rte_spinlock_lock(&intr_lock);
+
+ if (bytes_read > 0) {
+
+ /* Finally, call all callbacks. */
+ TAILQ_FOREACH(cb, &src->callbacks, next) {
+
+ /* make a copy and unlock. */
+ active_cb = *cb;
+ rte_spinlock_unlock(&intr_lock);
+
+ /* call the actual callback */
+ active_cb.cb_fn(&src->intr_handle,
+ active_cb.cb_arg);
+
+ /*get the lock back. */
+ rte_spinlock_lock(&intr_lock);
+ }
+ }
+
+ /* we done with that interrupt source, release it. */
+ src->active = 0;
+ rte_spinlock_unlock(&intr_lock);
+ }
+
+ return 0;
+}
+
+/**
+ * It handles all the interrupts.
+ *
+ * @param pfd
+ * epoll file descriptor.
+ * @param totalfds
+ * The number of file descriptors added in epoll.
+ *
+ * @return
+ * void
+ */
+static void
+eal_intr_handle_interrupts(int pfd, unsigned totalfds)
+{
+ struct epoll_event events[totalfds];
+ int nfds = 0;
+
+ for(;;) {
+ nfds = epoll_wait(pfd, events, totalfds,
+ EAL_INTR_EPOLL_WAIT_FOREVER);
+ /* epoll_wait fail */
+ if (nfds < 0) {
+ if (errno == EINTR)
+ continue;
+ RTE_LOG(ERR, EAL,
+ "epoll_wait returns with fail\n");
+ return;
+ }
+ /* epoll_wait timeout, will never happens here */
+ else if (nfds == 0)
+ continue;
+ /* epoll_wait has at least one fd ready to read */
+ if (eal_intr_process_interrupts(events, nfds) < 0)
+ return;
+ }
+}
+
+/**
+ * It builds/rebuilds up the epoll file descriptor with all the
+ * file descriptors being waited on. Then handles the interrupts.
+ *
+ * @param arg
+ * pointer. (unused)
+ *
+ * @return
+ * never return;
+ */
+static __attribute__((noreturn)) void *
+eal_intr_thread_main(__rte_unused void *arg)
+{
+ struct epoll_event ev;
+
+ /* host thread, never break out */
+ for (;;) {
+ /* build up the epoll fd with all descriptors we are to
+ * wait on then pass it to the handle_interrupts function
+ */
+ static struct epoll_event pipe_event = {
+ .events = EPOLLIN | EPOLLPRI,
+ };
+ struct rte_intr_source *src;
+ unsigned numfds = 0;
+
+ /* create epoll fd */
+ int pfd = epoll_create(1);
+ if (pfd < 0)
+ rte_panic("Cannot create epoll instance\n");
+
+ pipe_event.data.fd = intr_pipe.readfd;
+ /**
+ * add pipe fd into wait list, this pipe is used to
+ * rebuild the wait list.
+ */
+ if (epoll_ctl(pfd, EPOLL_CTL_ADD, intr_pipe.readfd,
+ &pipe_event) < 0) {
+ rte_panic("Error adding fd to %d epoll_ctl, %s\n",
+ intr_pipe.readfd, strerror(errno));
+ }
+ numfds++;
+
+ rte_spinlock_lock(&intr_lock);
+
+ TAILQ_FOREACH(src, &intr_sources, next) {
+ if (src->callbacks.tqh_first == NULL)
+ continue; /* skip those with no callbacks */
+ ev.events = EPOLLIN | EPOLLPRI;
+ ev.data.fd = src->intr_handle.fd;
+
+ /**
+ * add all the uio device file descriptor
+ * into wait list.
+ */
+ if (epoll_ctl(pfd, EPOLL_CTL_ADD,
+ src->intr_handle.fd, &ev) < 0){
+ rte_panic("Error adding fd %d epoll_ctl, %s\n",
+ src->intr_handle.fd, strerror(errno));
+ }
+ else
+ numfds++;
+ }
+ rte_spinlock_unlock(&intr_lock);
+ /* serve the interrupt */
+ eal_intr_handle_interrupts(pfd, numfds);
+
+ /**
+ * when we return, we need to rebuild the
+ * list of fds to monitor.
+ */
+ close(pfd);
+ }
+}
+
+int
+rte_eal_intr_init(void)
+{
+ int ret = 0;
+
+ /* init the global interrupt source head */
+ TAILQ_INIT(&intr_sources);
+
+ /**
+ * create a pipe which will be waited by epoll and notified to
+ * rebuild the wait list of epoll.
+ */
+ if (pipe(intr_pipe.pipefd) < 0)
+ return -1;
+
+ /* create the host thread to wait/handle the interrupt */
+ ret = pthread_create(&intr_thread, NULL,
+ eal_intr_thread_main, NULL);
+ if (ret != 0)
+ RTE_LOG(ERR, EAL,
+ "Failed to create thread for interrupt handling\n");
+
+ return -ret;
+}
+
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_ivshmem.c b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_ivshmem.c
new file mode 100755
index 00000000..413a9bae
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_ivshmem.c
@@ -0,0 +1,968 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef RTE_LIBRTE_IVSHMEM /* hide it from coverage */
+
+#include <stdint.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <sys/mman.h>
+#include <sys/file.h>
+#include <string.h>
+#include <sys/queue.h>
+
+#include <rte_log.h>
+#include <rte_pci.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_string_fns.h>
+#include <rte_errno.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_common.h>
+#include <rte_ivshmem.h>
+#include <rte_tailq_elem.h>
+
+#include "eal_internal_cfg.h"
+#include "eal_private.h"
+
+#define PCI_VENDOR_ID_IVSHMEM 0x1Af4
+#define PCI_DEVICE_ID_IVSHMEM 0x1110
+
+#define IVSHMEM_MAGIC 0x0BADC0DE
+
+#define IVSHMEM_RESOURCE_PATH "/sys/bus/pci/devices/%04x:%02x:%02x.%x/resource2"
+#define IVSHMEM_CONFIG_PATH "/var/run/.%s_ivshmem_config"
+
+#define PHYS 0x1
+#define VIRT 0x2
+#define IOREMAP 0x4
+#define FULL (PHYS|VIRT|IOREMAP)
+
+#define METADATA_SIZE_ALIGNED \
+ (RTE_ALIGN_CEIL(sizeof(struct rte_ivshmem_metadata),pagesz))
+
+#define CONTAINS(x,y)\
+ (((y).addr_64 >= (x).addr_64) && ((y).addr_64 < (x).addr_64 + (x).len))
+
+#define DIM(x) (sizeof(x)/sizeof(x[0]))
+
+struct ivshmem_pci_device {
+ char path[PATH_MAX];
+ phys_addr_t ioremap_addr;
+};
+
+/* data type to store in config */
+struct ivshmem_segment {
+ struct rte_ivshmem_metadata_entry entry;
+ uint64_t align;
+ char path[PATH_MAX];
+};
+struct ivshmem_shared_config {
+ struct ivshmem_segment segment[RTE_MAX_MEMSEG];
+ uint32_t segment_idx;
+ struct ivshmem_pci_device pci_devs[RTE_LIBRTE_IVSHMEM_MAX_PCI_DEVS];
+ uint32_t pci_devs_idx;
+};
+static struct ivshmem_shared_config * ivshmem_config;
+static int memseg_idx;
+static int pagesz;
+
+/* Tailq heads to add rings to */
+TAILQ_HEAD(rte_ring_list, rte_tailq_entry);
+
+/*
+ * Utility functions
+ */
+
+static int
+is_ivshmem_device(struct rte_pci_device * dev)
+{
+ return (dev->id.vendor_id == PCI_VENDOR_ID_IVSHMEM
+ && dev->id.device_id == PCI_DEVICE_ID_IVSHMEM);
+}
+
+static void *
+map_metadata(int fd, uint64_t len)
+{
+ size_t metadata_len = sizeof(struct rte_ivshmem_metadata);
+ size_t aligned_len = METADATA_SIZE_ALIGNED;
+
+ return mmap(NULL, metadata_len, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, len - aligned_len);
+}
+
+static void
+unmap_metadata(void * ptr)
+{
+ munmap(ptr, sizeof(struct rte_ivshmem_metadata));
+}
+
+static int
+has_ivshmem_metadata(int fd, uint64_t len)
+{
+ struct rte_ivshmem_metadata metadata;
+ void * ptr;
+
+ ptr = map_metadata(fd, len);
+
+ if (ptr == MAP_FAILED)
+ return -1;
+
+ metadata = *(struct rte_ivshmem_metadata*) (ptr);
+
+ unmap_metadata(ptr);
+
+ return metadata.magic_number == IVSHMEM_MAGIC;
+}
+
+static void
+remove_segment(struct ivshmem_segment * ms, int len, int idx)
+{
+ int i;
+
+ for (i = idx; i < len - 1; i++)
+ memcpy(&ms[i], &ms[i+1], sizeof(struct ivshmem_segment));
+ memset(&ms[len-1], 0, sizeof(struct ivshmem_segment));
+}
+
+static int
+overlap(const struct rte_memzone * mz1, const struct rte_memzone * mz2)
+{
+ uint64_t start1, end1, start2, end2;
+ uint64_t p_start1, p_end1, p_start2, p_end2;
+ uint64_t i_start1, i_end1, i_start2, i_end2;
+ int result = 0;
+
+ /* gather virtual addresses */
+ start1 = mz1->addr_64;
+ end1 = mz1->addr_64 + mz1->len;
+ start2 = mz2->addr_64;
+ end2 = mz2->addr_64 + mz2->len;
+
+ /* gather physical addresses */
+ p_start1 = mz1->phys_addr;
+ p_end1 = mz1->phys_addr + mz1->len;
+ p_start2 = mz2->phys_addr;
+ p_end2 = mz2->phys_addr + mz2->len;
+
+ /* gather ioremap addresses */
+ i_start1 = mz1->ioremap_addr;
+ i_end1 = mz1->ioremap_addr + mz1->len;
+ i_start2 = mz2->ioremap_addr;
+ i_end2 = mz2->ioremap_addr + mz2->len;
+
+ /* check for overlap in virtual addresses */
+ if (start1 >= start2 && start1 < end2)
+ result |= VIRT;
+ if (start2 >= start1 && start2 < end1)
+ result |= VIRT;
+
+ /* check for overlap in physical addresses */
+ if (p_start1 >= p_start2 && p_start1 < p_end2)
+ result |= PHYS;
+ if (p_start2 >= p_start1 && p_start2 < p_end1)
+ result |= PHYS;
+
+ /* check for overlap in ioremap addresses */
+ if (i_start1 >= i_start2 && i_start1 < i_end2)
+ result |= IOREMAP;
+ if (i_start2 >= i_start1 && i_start2 < i_end1)
+ result |= IOREMAP;
+
+ return result;
+}
+
+static int
+adjacent(const struct rte_memzone * mz1, const struct rte_memzone * mz2)
+{
+ uint64_t start1, end1, start2, end2;
+ uint64_t p_start1, p_end1, p_start2, p_end2;
+ uint64_t i_start1, i_end1, i_start2, i_end2;
+ int result = 0;
+
+ /* gather virtual addresses */
+ start1 = mz1->addr_64;
+ end1 = mz1->addr_64 + mz1->len;
+ start2 = mz2->addr_64;
+ end2 = mz2->addr_64 + mz2->len;
+
+ /* gather physical addresses */
+ p_start1 = mz1->phys_addr;
+ p_end1 = mz1->phys_addr + mz1->len;
+ p_start2 = mz2->phys_addr;
+ p_end2 = mz2->phys_addr + mz2->len;
+
+ /* gather ioremap addresses */
+ i_start1 = mz1->ioremap_addr;
+ i_end1 = mz1->ioremap_addr + mz1->len;
+ i_start2 = mz2->ioremap_addr;
+ i_end2 = mz2->ioremap_addr + mz2->len;
+
+ /* check if segments are virtually adjacent */
+ if (start1 == end2)
+ result |= VIRT;
+ if (start2 == end1)
+ result |= VIRT;
+
+ /* check if segments are physically adjacent */
+ if (p_start1 == p_end2)
+ result |= PHYS;
+ if (p_start2 == p_end1)
+ result |= PHYS;
+
+ /* check if segments are ioremap-adjacent */
+ if (i_start1 == i_end2)
+ result |= IOREMAP;
+ if (i_start2 == i_end1)
+ result |= IOREMAP;
+
+ return result;
+}
+
+static int
+has_adjacent_segments(struct ivshmem_segment * ms, int len)
+{
+ int i, j, a;
+
+ for (i = 0; i < len; i++)
+ for (j = i + 1; j < len; j++) {
+ a = adjacent(&ms[i].entry.mz, &ms[j].entry.mz);
+
+ /* check if segments are adjacent virtually and/or physically but
+ * not ioremap (since that would indicate that they are from
+ * different PCI devices and thus don't need to be concatenated.
+ */
+ if ((a & (VIRT|PHYS)) > 0 && (a & IOREMAP) == 0)
+ return 1;
+ }
+ return 0;
+}
+
+static int
+has_overlapping_segments(struct ivshmem_segment * ms, int len)
+{
+ int i, j;
+
+ for (i = 0; i < len; i++)
+ for (j = i + 1; j < len; j++)
+ if (overlap(&ms[i].entry.mz, &ms[j].entry.mz))
+ return 1;
+ return 0;
+}
+
+static int
+seg_compare(const void * a, const void * b)
+{
+ const struct ivshmem_segment * s1 = (const struct ivshmem_segment*) a;
+ const struct ivshmem_segment * s2 = (const struct ivshmem_segment*) b;
+
+ /* move unallocated zones to the end */
+ if (s1->entry.mz.addr == NULL && s2->entry.mz.addr == NULL)
+ return 0;
+ if (s1->entry.mz.addr == 0)
+ return 1;
+ if (s2->entry.mz.addr == 0)
+ return -1;
+
+ return s1->entry.mz.phys_addr > s2->entry.mz.phys_addr;
+}
+
+#ifdef RTE_LIBRTE_IVSHMEM_DEBUG
+static void
+entry_dump(struct rte_ivshmem_metadata_entry *e)
+{
+ RTE_LOG(DEBUG, EAL, "\tvirt: %p-%p\n", e->mz.addr,
+ RTE_PTR_ADD(e->mz.addr, e->mz.len));
+ RTE_LOG(DEBUG, EAL, "\tphys: 0x%" PRIx64 "-0x%" PRIx64 "\n",
+ e->mz.phys_addr,
+ e->mz.phys_addr + e->mz.len);
+ RTE_LOG(DEBUG, EAL, "\tio: 0x%" PRIx64 "-0x%" PRIx64 "\n",
+ e->mz.ioremap_addr,
+ e->mz.ioremap_addr + e->mz.len);
+ RTE_LOG(DEBUG, EAL, "\tlen: 0x%" PRIx64 "\n", e->mz.len);
+ RTE_LOG(DEBUG, EAL, "\toff: 0x%" PRIx64 "\n", e->offset);
+}
+#endif
+
+
+
+/*
+ * Actual useful code
+ */
+
+/* read through metadata mapped from the IVSHMEM device */
+static int
+read_metadata(char * path, int path_len, int fd, uint64_t flen)
+{
+ struct rte_ivshmem_metadata metadata;
+ struct rte_ivshmem_metadata_entry * entry;
+ int idx, i;
+ void * ptr;
+
+ ptr = map_metadata(fd, flen);
+
+ if (ptr == MAP_FAILED)
+ return -1;
+
+ metadata = *(struct rte_ivshmem_metadata*) (ptr);
+
+ unmap_metadata(ptr);
+
+ RTE_LOG(DEBUG, EAL, "Parsing metadata for \"%s\"\n", metadata.name);
+
+ idx = ivshmem_config->segment_idx;
+
+ for (i = 0; i < RTE_LIBRTE_IVSHMEM_MAX_ENTRIES &&
+ idx <= RTE_MAX_MEMSEG; i++) {
+
+ if (idx == RTE_MAX_MEMSEG) {
+ RTE_LOG(ERR, EAL, "Not enough memory segments!\n");
+ return -1;
+ }
+
+ entry = &metadata.entry[i];
+
+ /* stop on uninitialized memzone */
+ if (entry->mz.len == 0)
+ break;
+
+ /* copy metadata entry */
+ memcpy(&ivshmem_config->segment[idx].entry, entry,
+ sizeof(struct rte_ivshmem_metadata_entry));
+
+ /* copy path */
+ snprintf(ivshmem_config->segment[idx].path, path_len, "%s", path);
+
+ idx++;
+ }
+ ivshmem_config->segment_idx = idx;
+
+ return 0;
+}
+
+/* check through each segment and look for adjacent or overlapping ones. */
+static int
+cleanup_segments(struct ivshmem_segment * ms, int tbl_len)
+{
+ struct ivshmem_segment * s, * tmp;
+ int i, j, concat, seg_adjacent, seg_overlapping;
+ uint64_t start1, start2, end1, end2, p_start1, p_start2, i_start1, i_start2;
+
+ qsort(ms, tbl_len, sizeof(struct ivshmem_segment),
+ seg_compare);
+
+ while (has_overlapping_segments(ms, tbl_len) ||
+ has_adjacent_segments(ms, tbl_len)) {
+
+ for (i = 0; i < tbl_len; i++) {
+ s = &ms[i];
+
+ concat = 0;
+
+ for (j = i + 1; j < tbl_len; j++) {
+ tmp = &ms[j];
+
+ /* check if this segment is overlapping with existing segment,
+ * or is adjacent to existing segment */
+ seg_overlapping = overlap(&s->entry.mz, &tmp->entry.mz);
+ seg_adjacent = adjacent(&s->entry.mz, &tmp->entry.mz);
+
+ /* check if segments fully overlap or are fully adjacent */
+ if ((seg_adjacent == FULL) || (seg_overlapping == FULL)) {
+
+#ifdef RTE_LIBRTE_IVSHMEM_DEBUG
+ RTE_LOG(DEBUG, EAL, "Concatenating segments\n");
+ RTE_LOG(DEBUG, EAL, "Segment %i:\n", i);
+ entry_dump(&s->entry);
+ RTE_LOG(DEBUG, EAL, "Segment %i:\n", j);
+ entry_dump(&tmp->entry);
+#endif
+
+ start1 = s->entry.mz.addr_64;
+ start2 = tmp->entry.mz.addr_64;
+ p_start1 = s->entry.mz.phys_addr;
+ p_start2 = tmp->entry.mz.phys_addr;
+ i_start1 = s->entry.mz.ioremap_addr;
+ i_start2 = tmp->entry.mz.ioremap_addr;
+ end1 = s->entry.mz.addr_64 + s->entry.mz.len;
+ end2 = tmp->entry.mz.addr_64 + tmp->entry.mz.len;
+
+ /* settle for minimum start address and maximum length */
+ s->entry.mz.addr_64 = RTE_MIN(start1, start2);
+ s->entry.mz.phys_addr = RTE_MIN(p_start1, p_start2);
+ s->entry.mz.ioremap_addr = RTE_MIN(i_start1, i_start2);
+ s->entry.offset = RTE_MIN(s->entry.offset, tmp->entry.offset);
+ s->entry.mz.len = RTE_MAX(end1, end2) - s->entry.mz.addr_64;
+ concat = 1;
+
+#ifdef RTE_LIBRTE_IVSHMEM_DEBUG
+ RTE_LOG(DEBUG, EAL, "Resulting segment:\n");
+ entry_dump(&s->entry);
+
+#endif
+ }
+ /* if segments not fully overlap, we have an error condition.
+ * adjacent segments can coexist.
+ */
+ else if (seg_overlapping > 0) {
+ RTE_LOG(ERR, EAL, "Segments %i and %i overlap!\n", i, j);
+#ifdef RTE_LIBRTE_IVSHMEM_DEBUG
+ RTE_LOG(DEBUG, EAL, "Segment %i:\n", i);
+ entry_dump(&s->entry);
+ RTE_LOG(DEBUG, EAL, "Segment %i:\n", j);
+ entry_dump(&tmp->entry);
+#endif
+ return -1;
+ }
+ if (concat)
+ break;
+ }
+ /* if we concatenated, remove segment at j */
+ if (concat) {
+ remove_segment(ms, tbl_len, j);
+ tbl_len--;
+ break;
+ }
+ }
+ }
+
+ return tbl_len;
+}
+
+static int
+create_shared_config(void)
+{
+ char path[PATH_MAX];
+ int fd;
+
+ /* build ivshmem config file path */
+ snprintf(path, sizeof(path), IVSHMEM_CONFIG_PATH,
+ internal_config.hugefile_prefix);
+
+ fd = open(path, O_CREAT | O_RDWR, 0600);
+
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Could not open %s: %s\n", path, strerror(errno));
+ return -1;
+ }
+
+ /* try ex-locking first - if the file is locked, we have a problem */
+ if (flock(fd, LOCK_EX | LOCK_NB) == -1) {
+ RTE_LOG(ERR, EAL, "Locking %s failed: %s\n", path, strerror(errno));
+ close(fd);
+ return -1;
+ }
+
+ if (ftruncate(fd, sizeof(struct ivshmem_shared_config)) < 0) {
+ RTE_LOG(ERR, EAL, "ftruncate failed: %s\n", strerror(errno));
+ return -1;
+ }
+
+ ivshmem_config = mmap(NULL, sizeof(struct ivshmem_shared_config),
+ PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+
+ if (ivshmem_config == MAP_FAILED)
+ return -1;
+
+ memset(ivshmem_config, 0, sizeof(struct ivshmem_shared_config));
+
+ /* change the exclusive lock we got earlier to a shared lock */
+ if (flock(fd, LOCK_SH | LOCK_NB) == -1) {
+ RTE_LOG(ERR, EAL, "Locking %s failed: %s \n", path, strerror(errno));
+ return -1;
+ }
+
+ close(fd);
+
+ return 0;
+}
+
+/* open shared config file and, if present, map the config.
+ * having no config file is not an error condition, as we later check if
+ * ivshmem_config is NULL (if it is, that means nothing was mapped). */
+static int
+open_shared_config(void)
+{
+ char path[PATH_MAX];
+ int fd;
+
+ /* build ivshmem config file path */
+ snprintf(path, sizeof(path), IVSHMEM_CONFIG_PATH,
+ internal_config.hugefile_prefix);
+
+ fd = open(path, O_RDONLY);
+
+ /* if the file doesn't exist, just return success */
+ if (fd < 0 && errno == ENOENT)
+ return 0;
+ /* else we have an error condition */
+ else if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Could not open %s: %s\n",
+ path, strerror(errno));
+ return -1;
+ }
+
+ /* try ex-locking first - if the lock *does* succeed, this means it's a
+ * stray config file, so it should be deleted.
+ */
+ if (flock(fd, LOCK_EX | LOCK_NB) != -1) {
+
+ /* if we can't remove the file, something is wrong */
+ if (unlink(path) < 0) {
+ RTE_LOG(ERR, EAL, "Could not remove %s: %s\n", path,
+ strerror(errno));
+ return -1;
+ }
+
+ /* release the lock */
+ flock(fd, LOCK_UN);
+ close(fd);
+
+ /* return success as having a stray config file is equivalent to not
+ * having config file at all.
+ */
+ return 0;
+ }
+
+ ivshmem_config = mmap(NULL, sizeof(struct ivshmem_shared_config),
+ PROT_READ, MAP_SHARED, fd, 0);
+
+ if (ivshmem_config == MAP_FAILED)
+ return -1;
+
+ /* place a shared lock on config file */
+ if (flock(fd, LOCK_SH | LOCK_NB) == -1) {
+ RTE_LOG(ERR, EAL, "Locking %s failed: %s \n", path, strerror(errno));
+ return -1;
+ }
+
+ close(fd);
+
+ return 0;
+}
+
+/*
+ * This function does the following:
+ *
+ * 1) Builds a table of ivshmem_segments with proper offset alignment
+ * 2) Cleans up that table so that we don't have any overlapping or adjacent
+ * memory segments
+ * 3) Creates memsegs from this table and maps them into memory.
+ */
+static inline int
+map_all_segments(void)
+{
+ struct ivshmem_segment ms_tbl[RTE_MAX_MEMSEG];
+ struct ivshmem_pci_device * pci_dev;
+ struct rte_mem_config * mcfg;
+ struct ivshmem_segment * seg;
+ int fd, fd_zero;
+ unsigned i, j;
+ struct rte_memzone mz;
+ struct rte_memseg ms;
+ void * base_addr;
+ uint64_t align, len;
+ phys_addr_t ioremap_addr;
+
+ ioremap_addr = 0;
+
+ memset(ms_tbl, 0, sizeof(ms_tbl));
+ memset(&mz, 0, sizeof(struct rte_memzone));
+ memset(&ms, 0, sizeof(struct rte_memseg));
+
+ /* first, build a table of memsegs to map, to avoid failed mmaps due to
+ * overlaps
+ */
+ for (i = 0; i < ivshmem_config->segment_idx && i <= RTE_MAX_MEMSEG; i++) {
+ if (i == RTE_MAX_MEMSEG) {
+ RTE_LOG(ERR, EAL, "Too many segments requested!\n");
+ return -1;
+ }
+
+ seg = &ivshmem_config->segment[i];
+
+ /* copy segment to table */
+ memcpy(&ms_tbl[i], seg, sizeof(struct ivshmem_segment));
+
+ /* find ioremap addr */
+ for (j = 0; j < DIM(ivshmem_config->pci_devs); j++) {
+ pci_dev = &ivshmem_config->pci_devs[j];
+ if (!strncmp(pci_dev->path, seg->path, sizeof(pci_dev->path))) {
+ ioremap_addr = pci_dev->ioremap_addr;
+ break;
+ }
+ }
+ if (ioremap_addr == 0) {
+ RTE_LOG(ERR, EAL, "Cannot find ioremap addr!\n");
+ return -1;
+ }
+
+ /* work out alignments */
+ align = seg->entry.mz.addr_64 -
+ RTE_ALIGN_FLOOR(seg->entry.mz.addr_64, 0x1000);
+ len = RTE_ALIGN_CEIL(seg->entry.mz.len + align, 0x1000);
+
+ /* save original alignments */
+ ms_tbl[i].align = align;
+
+ /* create a memory zone */
+ mz.addr_64 = seg->entry.mz.addr_64 - align;
+ mz.len = len;
+ mz.hugepage_sz = seg->entry.mz.hugepage_sz;
+ mz.phys_addr = seg->entry.mz.phys_addr - align;
+
+ /* find true physical address */
+ mz.ioremap_addr = ioremap_addr + seg->entry.offset - align;
+
+ ms_tbl[i].entry.offset = seg->entry.offset - align;
+
+ memcpy(&ms_tbl[i].entry.mz, &mz, sizeof(struct rte_memzone));
+ }
+
+ /* clean up the segments */
+ memseg_idx = cleanup_segments(ms_tbl, ivshmem_config->segment_idx);
+
+ if (memseg_idx < 0)
+ return -1;
+
+ mcfg = rte_eal_get_configuration()->mem_config;
+
+ fd_zero = open("/dev/zero", O_RDWR);
+
+ if (fd_zero < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open /dev/zero: %s\n", strerror(errno));
+ return -1;
+ }
+
+ /* create memsegs and put them into DPDK memory */
+ for (i = 0; i < (unsigned) memseg_idx; i++) {
+
+ seg = &ms_tbl[i];
+
+ ms.addr_64 = seg->entry.mz.addr_64;
+ ms.hugepage_sz = seg->entry.mz.hugepage_sz;
+ ms.len = seg->entry.mz.len;
+ ms.nchannel = rte_memory_get_nchannel();
+ ms.nrank = rte_memory_get_nrank();
+ ms.phys_addr = seg->entry.mz.phys_addr;
+ ms.ioremap_addr = seg->entry.mz.ioremap_addr;
+ ms.socket_id = seg->entry.mz.socket_id;
+
+ base_addr = mmap(ms.addr, ms.len,
+ PROT_READ | PROT_WRITE, MAP_PRIVATE, fd_zero, 0);
+
+ if (base_addr == MAP_FAILED || base_addr != ms.addr) {
+ RTE_LOG(ERR, EAL, "Cannot map /dev/zero!\n");
+ return -1;
+ }
+
+ fd = open(seg->path, O_RDWR);
+
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", seg->path,
+ strerror(errno));
+ return -1;
+ }
+
+ munmap(ms.addr, ms.len);
+
+ base_addr = mmap(ms.addr, ms.len,
+ PROT_READ | PROT_WRITE, MAP_SHARED, fd,
+ seg->entry.offset);
+
+
+ if (base_addr == MAP_FAILED || base_addr != ms.addr) {
+ RTE_LOG(ERR, EAL, "Cannot map segment into memory: "
+ "expected %p got %p (%s)\n", ms.addr, base_addr,
+ strerror(errno));
+ return -1;
+ }
+
+ RTE_LOG(DEBUG, EAL, "Memory segment mapped: %p (len %" PRIx64 ") at "
+ "offset 0x%" PRIx64 "\n",
+ ms.addr, ms.len, seg->entry.offset);
+
+ /* put the pointers back into their real positions using original
+ * alignment */
+ ms.addr_64 += seg->align;
+ ms.phys_addr += seg->align;
+ ms.ioremap_addr += seg->align;
+ ms.len -= seg->align;
+
+ /* at this point, the rest of DPDK memory is not initialized, so we
+ * expect memsegs to be empty */
+ memcpy(&mcfg->memseg[i], &ms,
+ sizeof(struct rte_memseg));
+ memcpy(&mcfg->free_memseg[i], &ms,
+ sizeof(struct rte_memseg));
+
+
+ /* adjust the free_memseg so that there's no free space left */
+ mcfg->free_memseg[i].ioremap_addr += mcfg->free_memseg[i].len;
+ mcfg->free_memseg[i].phys_addr += mcfg->free_memseg[i].len;
+ mcfg->free_memseg[i].addr_64 += mcfg->free_memseg[i].len;
+ mcfg->free_memseg[i].len = 0;
+
+ close(fd);
+
+ RTE_LOG(DEBUG, EAL, "IVSHMEM segment found, size: 0x%lx\n",
+ ms.len);
+ }
+
+ return 0;
+}
+
+/* this happens at a later stage, after general EAL memory initialization */
+int
+rte_eal_ivshmem_obj_init(void)
+{
+ struct rte_ring_list* ring_list = NULL;
+ struct rte_mem_config * mcfg;
+ struct ivshmem_segment * seg;
+ struct rte_memzone * mz;
+ struct rte_ring * r;
+ struct rte_tailq_entry *te;
+ unsigned i, ms, idx;
+ uint64_t offset;
+
+ /* secondary process would not need any object discovery - it'll all
+ * already be in shared config */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY || ivshmem_config == NULL)
+ return 0;
+
+ /* check that we have an initialised ring tail queue */
+ if ((ring_list =
+ RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_RING, rte_ring_list)) == NULL) {
+ RTE_LOG(ERR, EAL, "No rte_ring tailq found!\n");
+ return -1;
+ }
+
+ mcfg = rte_eal_get_configuration()->mem_config;
+
+ /* create memzones */
+ for (i = 0; i < ivshmem_config->segment_idx && i <= RTE_MAX_MEMZONE; i++) {
+
+ seg = &ivshmem_config->segment[i];
+
+ /* add memzone */
+ if (mcfg->memzone_idx == RTE_MAX_MEMZONE) {
+ RTE_LOG(ERR, EAL, "No more memory zones available!\n");
+ return -1;
+ }
+
+ idx = mcfg->memzone_idx;
+
+ RTE_LOG(DEBUG, EAL, "Found memzone: '%s' at %p (len 0x%" PRIx64 ")\n",
+ seg->entry.mz.name, seg->entry.mz.addr, seg->entry.mz.len);
+
+ memcpy(&mcfg->memzone[idx], &seg->entry.mz,
+ sizeof(struct rte_memzone));
+
+ /* find ioremap address */
+ for (ms = 0; ms <= RTE_MAX_MEMSEG; ms++) {
+ if (ms == RTE_MAX_MEMSEG) {
+ RTE_LOG(ERR, EAL, "Physical address of segment not found!\n");
+ return -1;
+ }
+ if (CONTAINS(mcfg->memseg[ms], mcfg->memzone[idx])) {
+ offset = mcfg->memzone[idx].addr_64 -
+ mcfg->memseg[ms].addr_64;
+ mcfg->memzone[idx].ioremap_addr = mcfg->memseg[ms].ioremap_addr +
+ offset;
+ break;
+ }
+ }
+
+ mcfg->memzone_idx++;
+ }
+
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ /* find rings */
+ for (i = 0; i < mcfg->memzone_idx; i++) {
+ mz = &mcfg->memzone[i];
+
+ /* check if memzone has a ring prefix */
+ if (strncmp(mz->name, RTE_RING_MZ_PREFIX,
+ sizeof(RTE_RING_MZ_PREFIX) - 1) != 0)
+ continue;
+
+ r = (struct rte_ring*) (mz->addr_64);
+
+ te = rte_zmalloc("RING_TAILQ_ENTRY", sizeof(*te), 0);
+ if (te == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot allocate ring tailq entry!\n");
+ return -1;
+ }
+
+ te->data = (void *) r;
+
+ TAILQ_INSERT_TAIL(ring_list, te, next);
+
+ RTE_LOG(DEBUG, EAL, "Found ring: '%s' at %p\n", r->name, mz->addr);
+ }
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+#ifdef RTE_LIBRTE_IVSHMEM_DEBUG
+ rte_memzone_dump(stdout);
+ rte_ring_list_dump(stdout);
+#endif
+
+ return 0;
+}
+
+/* initialize ivshmem structures */
+int rte_eal_ivshmem_init(void)
+{
+ struct rte_pci_device * dev;
+ struct rte_pci_resource * res;
+ int fd, ret;
+ char path[PATH_MAX];
+
+ /* initialize everything to 0 */
+ memset(path, 0, sizeof(path));
+ ivshmem_config = NULL;
+
+ pagesz = getpagesize();
+
+ RTE_LOG(DEBUG, EAL, "Searching for IVSHMEM devices...\n");
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+
+ if (open_shared_config() < 0) {
+ RTE_LOG(ERR, EAL, "Could not open IVSHMEM config!\n");
+ return -1;
+ }
+ }
+ else {
+
+ TAILQ_FOREACH(dev, &pci_device_list, next) {
+
+ if (is_ivshmem_device(dev)) {
+
+ /* IVSHMEM memory is always on BAR2 */
+ res = &dev->mem_resource[2];
+
+ /* if we don't have a BAR2 */
+ if (res->len == 0)
+ continue;
+
+ /* construct pci device path */
+ snprintf(path, sizeof(path), IVSHMEM_RESOURCE_PATH,
+ dev->addr.domain, dev->addr.bus, dev->addr.devid,
+ dev->addr.function);
+
+ /* try to find memseg */
+ fd = open(path, O_RDWR);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Could not open %s\n", path);
+ return -1;
+ }
+
+ /* check if it's a DPDK IVSHMEM device */
+ ret = has_ivshmem_metadata(fd, res->len);
+
+ /* is DPDK device */
+ if (ret == 1) {
+
+ /* config file creation is deferred until the first
+ * DPDK device is found. then, it has to be created
+ * only once. */
+ if (ivshmem_config == NULL &&
+ create_shared_config() < 0) {
+ RTE_LOG(ERR, EAL, "Could not create IVSHMEM config!\n");
+ close(fd);
+ return -1;
+ }
+
+ if (read_metadata(path, sizeof(path), fd, res->len) < 0) {
+ RTE_LOG(ERR, EAL, "Could not read metadata from"
+ " device %02x:%02x.%x!\n", dev->addr.bus,
+ dev->addr.devid, dev->addr.function);
+ close(fd);
+ return -1;
+ }
+
+ if (ivshmem_config->pci_devs_idx == RTE_LIBRTE_IVSHMEM_MAX_PCI_DEVS) {
+ RTE_LOG(WARNING, EAL,
+ "IVSHMEM PCI device limit exceeded. Increase "
+ "CONFIG_RTE_LIBRTE_IVSHMEM_MAX_PCI_DEVS in "
+ "your config file.\n");
+ break;
+ }
+
+ RTE_LOG(INFO, EAL, "Found IVSHMEM device %02x:%02x.%x\n",
+ dev->addr.bus, dev->addr.devid, dev->addr.function);
+
+ ivshmem_config->pci_devs[ivshmem_config->pci_devs_idx].ioremap_addr = res->phys_addr;
+ snprintf(ivshmem_config->pci_devs[ivshmem_config->pci_devs_idx].path,
+ sizeof(ivshmem_config->pci_devs[ivshmem_config->pci_devs_idx].path),
+ "%s", path);
+
+ ivshmem_config->pci_devs_idx++;
+ }
+ /* failed to read */
+ else if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Could not read IVSHMEM device: %s\n",
+ strerror(errno));
+ close(fd);
+ return -1;
+ }
+ /* not a DPDK device */
+ else
+ RTE_LOG(DEBUG, EAL, "Skipping non-DPDK IVSHMEM device\n");
+
+ /* close the BAR fd */
+ close(fd);
+ }
+ }
+ }
+
+ /* ivshmem_config is not NULL only if config was created and/or mapped */
+ if (ivshmem_config) {
+ if (map_all_segments() < 0) {
+ RTE_LOG(ERR, EAL, "Mapping IVSHMEM segments failed!\n");
+ return -1;
+ }
+ }
+ else {
+ RTE_LOG(DEBUG, EAL, "No IVSHMEM configuration found! \n");
+ }
+
+ return 0;
+}
+
+#endif
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_lcore.c b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_lcore.c
new file mode 100755
index 00000000..c67e0e68
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_lcore.c
@@ -0,0 +1,191 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+#include <limits.h>
+#include <string.h>
+#include <dirent.h>
+
+#include <rte_log.h>
+#include <rte_eal.h>
+#include <rte_lcore.h>
+#include <rte_common.h>
+#include <rte_string_fns.h>
+#include <rte_debug.h>
+
+#include "eal_private.h"
+#include "eal_filesystem.h"
+
+#define SYS_CPU_DIR "/sys/devices/system/cpu/cpu%u"
+#define CORE_ID_FILE "topology/core_id"
+#define PHYS_PKG_FILE "topology/physical_package_id"
+
+/* Check if a cpu is present by the presence of the cpu information for it */
+static int
+cpu_detected(unsigned lcore_id)
+{
+ char path[PATH_MAX];
+ int len = snprintf(path, sizeof(path), SYS_CPU_DIR
+ "/"CORE_ID_FILE, lcore_id);
+ if (len <= 0 || (unsigned)len >= sizeof(path))
+ return 0;
+ if (access(path, F_OK) != 0)
+ return 0;
+
+ return 1;
+}
+
+/* Get CPU socket id (NUMA node) by reading directory
+ * /sys/devices/system/cpu/cpuX looking for symlink "nodeY"
+ * which gives the NUMA topology information.
+ * Note: physical package id != NUMA node, but we use it as a
+ * fallback for kernels which don't create a nodeY link
+ */
+static unsigned
+cpu_socket_id(unsigned lcore_id)
+{
+ const char node_prefix[] = "node";
+ const size_t prefix_len = sizeof(node_prefix) - 1;
+ char path[PATH_MAX];
+ DIR *d = NULL;
+ unsigned long id = 0;
+ struct dirent *e;
+ char *endptr = NULL;
+
+ int len = snprintf(path, sizeof(path),
+ SYS_CPU_DIR, lcore_id);
+ if (len <= 0 || (unsigned)len >= sizeof(path))
+ goto err;
+
+ d = opendir(path);
+ if (!d)
+ goto err;
+
+ while ((e = readdir(d)) != NULL) {
+ if (strncmp(e->d_name, node_prefix, prefix_len) == 0) {
+ id = strtoul(e->d_name+prefix_len, &endptr, 0);
+ break;
+ }
+ }
+ if (endptr == NULL || *endptr!='\0' || endptr == e->d_name+prefix_len) {
+ RTE_LOG(WARNING, EAL, "Cannot read numa node link "
+ "for lcore %u - using physical package id instead\n",
+ lcore_id);
+
+ len = snprintf(path, sizeof(path), SYS_CPU_DIR "/%s",
+ lcore_id, PHYS_PKG_FILE);
+ if (len <= 0 || (unsigned)len >= sizeof(path))
+ goto err;
+ if (eal_parse_sysfs_value(path, &id) != 0)
+ goto err;
+ }
+ closedir(d);
+ return (unsigned)id;
+
+err:
+ if (d)
+ closedir(d);
+ RTE_LOG(ERR, EAL, "Error getting NUMA socket information from %s "
+ "for lcore %u - assuming NUMA socket 0\n", SYS_CPU_DIR, lcore_id);
+ return 0;
+}
+
+/* Get the cpu core id value from the /sys/.../cpuX core_id value */
+static unsigned
+cpu_core_id(unsigned lcore_id)
+{
+ char path[PATH_MAX];
+ unsigned long id;
+
+ int len = snprintf(path, sizeof(path), SYS_CPU_DIR "/%s", lcore_id, CORE_ID_FILE);
+ if (len <= 0 || (unsigned)len >= sizeof(path))
+ goto err;
+ if (eal_parse_sysfs_value(path, &id) != 0)
+ goto err;
+ return (unsigned)id;
+
+err:
+ RTE_LOG(ERR, EAL, "Error reading core id value from %s "
+ "for lcore %u - assuming core 0\n", SYS_CPU_DIR, lcore_id);
+ return 0;
+}
+
+/*
+ * Parse /sys/devices/system/cpu to get the number of physical and logical
+ * processors on the machine. The function will fill the cpu_info
+ * structure.
+ */
+int
+rte_eal_cpu_init(void)
+{
+ /* pointer to global configuration */
+ struct rte_config *config = rte_eal_get_configuration();
+ unsigned lcore_id;
+ unsigned count = 0;
+
+ /*
+ * Parse the maximum set of logical cores, detect the subset of running
+ * ones and enable them by default.
+ */
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ lcore_config[lcore_id].detected = cpu_detected(lcore_id);
+ if (lcore_config[lcore_id].detected == 0) {
+ config->lcore_role[lcore_id] = ROLE_OFF;
+ continue;
+ }
+ /* By default, each detected core is enabled */
+ config->lcore_role[lcore_id] = ROLE_RTE;
+ lcore_config[lcore_id].core_id = cpu_core_id(lcore_id);
+ lcore_config[lcore_id].socket_id = cpu_socket_id(lcore_id);
+ if (lcore_config[lcore_id].socket_id >= RTE_MAX_NUMA_NODES)
+#ifdef RTE_EAL_ALLOW_INV_SOCKET_ID
+ lcore_config[lcore_id].socket_id = 0;
+#else
+ rte_panic("Socket ID (%u) is greater than "
+ "RTE_MAX_NUMA_NODES (%d)\n",
+ lcore_config[lcore_id].socket_id, RTE_MAX_NUMA_NODES);
+#endif
+ RTE_LOG(DEBUG, EAL, "Detected lcore %u as core %u on socket %u\n",
+ lcore_id,
+ lcore_config[lcore_id].core_id,
+ lcore_config[lcore_id].socket_id);
+ count ++;
+ }
+ /* Set the count of enabled logical cores of the EAL configuration */
+ config->lcore_count = count;
+ RTE_LOG(DEBUG, EAL, "Support maximum %u logical core(s) by configuration.\n",
+ RTE_MAX_LCORE);
+ RTE_LOG(DEBUG, EAL, "Detected %u lcore(s)\n", config->lcore_count);
+
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_log.c b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_log.c
new file mode 100755
index 00000000..94dedfb3
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_log.c
@@ -0,0 +1,197 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <syslog.h>
+#include <sys/queue.h>
+
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_launch.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include "eal_private.h"
+
+/*
+ * default log function, used once mempool (hence log history) is
+ * available
+ */
+static ssize_t
+console_log_write(__attribute__((unused)) void *c, const char *buf, size_t size)
+{
+ char copybuf[BUFSIZ + 1];
+ ssize_t ret;
+ uint32_t loglevel;
+
+ /* add this log in history */
+ rte_log_add_in_history(buf, size);
+
+ /* write on stdout */
+ ret = fwrite(buf, 1, size, stdout);
+ fflush(stdout);
+
+ /* truncate message if too big (should not happen) */
+ if (size > BUFSIZ)
+ size = BUFSIZ;
+
+ /* Syslog error levels are from 0 to 7, so subtract 1 to convert */
+ loglevel = rte_log_cur_msg_loglevel() - 1;
+ memcpy(copybuf, buf, size);
+ copybuf[size] = '\0';
+
+ /* write on syslog too */
+ syslog(loglevel, "%s", copybuf);
+
+ return ret;
+}
+
+static ssize_t
+console_log_read(__attribute__((unused)) void *c,
+ __attribute__((unused)) char *buf,
+ __attribute__((unused)) size_t size)
+{
+ return 0;
+}
+
+static int
+console_log_seek(__attribute__((unused)) void *c,
+ __attribute__((unused)) off64_t *offset,
+ __attribute__((unused)) int whence)
+{
+ return -1;
+}
+
+static int
+console_log_close(__attribute__((unused)) void *c)
+{
+ return 0;
+}
+
+static cookie_io_functions_t console_log_func = {
+ .read = console_log_read,
+ .write = console_log_write,
+ .seek = console_log_seek,
+ .close = console_log_close
+};
+
+/*
+ * set the log to default function, called during eal init process,
+ * once memzones are available.
+ */
+int
+rte_eal_log_init(const char *id, int facility)
+{
+ FILE *log_stream;
+
+ log_stream = fopencookie(NULL, "w+", console_log_func);
+ if (log_stream == NULL)
+ return -1;
+
+ openlog(id, LOG_NDELAY | LOG_PID, facility);
+
+ if (rte_eal_common_log_init(log_stream) < 0)
+ return -1;
+
+ return 0;
+}
+
+/* early logs */
+
+/*
+ * early log function, used during boot when mempool (hence log
+ * history) is not available
+ */
+static ssize_t
+early_log_write(__attribute__((unused)) void *c, const char *buf, size_t size)
+{
+ ssize_t ret;
+ ret = fwrite(buf, size, 1, stdout);
+ fflush(stdout);
+ if (ret == 0)
+ return -1;
+ return ret;
+}
+
+static ssize_t
+early_log_read(__attribute__((unused)) void *c,
+ __attribute__((unused)) char *buf,
+ __attribute__((unused)) size_t size)
+{
+ return 0;
+}
+
+static int
+early_log_seek(__attribute__((unused)) void *c,
+ __attribute__((unused)) off64_t *offset,
+ __attribute__((unused)) int whence)
+{
+ return -1;
+}
+
+static int
+early_log_close(__attribute__((unused)) void *c)
+{
+ return 0;
+}
+
+static cookie_io_functions_t early_log_func = {
+ .read = early_log_read,
+ .write = early_log_write,
+ .seek = early_log_seek,
+ .close = early_log_close
+};
+static FILE *early_log_stream;
+
+/*
+ * init the log library, called by rte_eal_init() to enable early
+ * logs
+ */
+int
+rte_eal_log_early_init(void)
+{
+ early_log_stream = fopencookie(NULL, "w+", early_log_func);
+ if (early_log_stream == NULL) {
+ printf("Cannot configure early_log_stream\n");
+ return -1;
+ }
+ rte_openlog_stream(early_log_stream);
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_memory.c b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_memory.c
new file mode 100755
index 00000000..bae25079
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_memory.c
@@ -0,0 +1,1564 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/* BSD LICENSE
+ *
+ * Copyright(c) 2013 6WIND.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define _FILE_OFFSET_BITS 64
+#include <errno.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <string.h>
+#include <stdarg.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/queue.h>
+#include <sys/file.h>
+#include <unistd.h>
+#include <limits.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_common.h>
+#include <rte_string_fns.h>
+
+#include "eal_private.h"
+#include "eal_internal_cfg.h"
+#include "eal_filesystem.h"
+#include "eal_hugepages.h"
+
+/**
+ * @file
+ * Huge page mapping under linux
+ *
+ * To reserve a big contiguous amount of memory, we use the hugepage
+ * feature of linux. For that, we need to have hugetlbfs mounted. This
+ * code will create many files in this directory (one per page) and
+ * map them in virtual memory. For each page, we will retrieve its
+ * physical address and remap it in order to have a virtual contiguous
+ * zone as well as a physical contiguous zone.
+ */
+
+static uint64_t baseaddr_offset;
+
+#define RANDOMIZE_VA_SPACE_FILE "/proc/sys/kernel/randomize_va_space"
+
+/* Lock page in physical memory and prevent from swapping. */
+int
+rte_mem_lock_page(const void *virt)
+{
+ unsigned long virtual = (unsigned long)virt;
+ int page_size = getpagesize();
+ unsigned long aligned = (virtual & ~ (page_size - 1));
+ return mlock((void*)aligned, page_size);
+}
+
+/*
+ * Get physical address of any mapped virtual address in the current process.
+ */
+phys_addr_t
+rte_mem_virt2phy(const void *virtaddr)
+{
+ int fd;
+ uint64_t page, physaddr;
+ unsigned long virt_pfn;
+ int page_size;
+ off_t offset;
+
+ /* standard page size */
+ page_size = getpagesize();
+
+ fd = open("/proc/self/pagemap", O_RDONLY);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "%s(): cannot open /proc/self/pagemap: %s\n",
+ __func__, strerror(errno));
+ return RTE_BAD_PHYS_ADDR;
+ }
+
+ virt_pfn = (unsigned long)virtaddr / page_size;
+ offset = sizeof(uint64_t) * virt_pfn;
+ if (lseek(fd, offset, SEEK_SET) == (off_t) -1) {
+ RTE_LOG(ERR, EAL, "%s(): seek error in /proc/self/pagemap: %s\n",
+ __func__, strerror(errno));
+ close(fd);
+ return RTE_BAD_PHYS_ADDR;
+ }
+ if (read(fd, &page, sizeof(uint64_t)) < 0) {
+ RTE_LOG(ERR, EAL, "%s(): cannot read /proc/self/pagemap: %s\n",
+ __func__, strerror(errno));
+ close(fd);
+ return RTE_BAD_PHYS_ADDR;
+ }
+
+ /*
+ * the pfn (page frame number) are bits 0-54 (see
+ * pagemap.txt in linux Documentation)
+ */
+ physaddr = ((page & 0x7fffffffffffffULL) * page_size)
+ + ((unsigned long)virtaddr % page_size);
+ close(fd);
+ return physaddr;
+}
+
+/*
+ * For each hugepage in hugepg_tbl, fill the physaddr value. We find
+ * it by browsing the /proc/self/pagemap special file.
+ */
+static int
+find_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
+{
+ unsigned i;
+ phys_addr_t addr;
+
+ for (i = 0; i < hpi->num_pages[0]; i++) {
+ addr = rte_mem_virt2phy(hugepg_tbl[i].orig_va);
+ if (addr == RTE_BAD_PHYS_ADDR)
+ return -1;
+ hugepg_tbl[i].physaddr = addr;
+ }
+ return 0;
+}
+
+/*
+ * Check whether address-space layout randomization is enabled in
+ * the kernel. This is important for multi-process as it can prevent
+ * two processes mapping data to the same virtual address
+ * Returns:
+ * 0 - address space randomization disabled
+ * 1/2 - address space randomization enabled
+ * negative error code on error
+ */
+static int
+aslr_enabled(void)
+{
+ char c;
+ int retval, fd = open(RANDOMIZE_VA_SPACE_FILE, O_RDONLY);
+ if (fd < 0)
+ return -errno;
+ retval = read(fd, &c, 1);
+ close(fd);
+ if (retval < 0)
+ return -errno;
+ if (retval == 0)
+ return -EIO;
+ switch (c) {
+ case '0' : return 0;
+ case '1' : return 1;
+ case '2' : return 2;
+ default: return -EINVAL;
+ }
+}
+
+/*
+ * Try to mmap *size bytes in /dev/zero. If it is successful, return the
+ * pointer to the mmap'd area and keep *size unmodified. Else, retry
+ * with a smaller zone: decrease *size by hugepage_sz until it reaches
+ * 0. In this case, return NULL. Note: this function returns an address
+ * which is a multiple of hugepage size.
+ */
+static void *
+get_virtual_area(size_t *size, size_t hugepage_sz)
+{
+ void *addr;
+ int fd;
+ long aligned_addr;
+
+ if (internal_config.base_virtaddr != 0) {
+ addr = (void*) (uintptr_t) (internal_config.base_virtaddr +
+ baseaddr_offset);
+ }
+ else addr = NULL;
+
+ RTE_LOG(INFO, EAL, "Ask a virtual area of 0x%zx bytes\n", *size);
+
+ fd = open("/dev/zero", O_RDONLY);
+ if (fd < 0){
+ RTE_LOG(ERR, EAL, "Cannot open /dev/zero\n");
+ return NULL;
+ }
+ do {
+ addr = mmap(addr,
+ (*size) + hugepage_sz, PROT_READ, MAP_PRIVATE, fd, 0);
+ if (addr == MAP_FAILED)
+ *size -= hugepage_sz;
+ } while (addr == MAP_FAILED && *size > 0);
+
+ if (addr == MAP_FAILED) {
+ close(fd);
+ RTE_LOG(INFO, EAL, "Cannot get a virtual area\n");
+ return NULL;
+ }
+
+ munmap(addr, (*size) + hugepage_sz);
+ close(fd);
+
+ /* align addr to a huge page size boundary */
+ aligned_addr = (long)addr;
+ aligned_addr += (hugepage_sz - 1);
+ aligned_addr &= (~(hugepage_sz - 1));
+ addr = (void *)(aligned_addr);
+
+ RTE_LOG(INFO, EAL, "Virtual area found at %p (size = 0x%zx)\n",
+ addr, *size);
+
+ /* increment offset */
+ baseaddr_offset += *size;
+
+ return addr;
+}
+
+/*
+ * Mmap all hugepages of hugepage table: it first open a file in
+ * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
+ * virtual address is stored in hugepg_tbl[i].orig_va, else it is stored
+ * in hugepg_tbl[i].final_va. The second mapping (when orig is 0) tries to
+ * map continguous physical blocks in contiguous virtual blocks.
+ */
+static int
+map_all_hugepages(struct hugepage_file *hugepg_tbl,
+ struct hugepage_info *hpi, int orig)
+{
+ int fd;
+ unsigned i;
+ void *virtaddr;
+ void *vma_addr = NULL;
+ size_t vma_len = 0;
+
+#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
+ RTE_SET_USED(vma_len);
+#endif
+
+ for (i = 0; i < hpi->num_pages[0]; i++) {
+ uint64_t hugepage_sz = hpi->hugepage_sz;
+
+ if (orig) {
+ hugepg_tbl[i].file_id = i;
+ hugepg_tbl[i].size = hugepage_sz;
+#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
+ eal_get_hugefile_temp_path(hugepg_tbl[i].filepath,
+ sizeof(hugepg_tbl[i].filepath), hpi->hugedir,
+ hugepg_tbl[i].file_id);
+#else
+ eal_get_hugefile_path(hugepg_tbl[i].filepath,
+ sizeof(hugepg_tbl[i].filepath), hpi->hugedir,
+ hugepg_tbl[i].file_id);
+#endif
+ hugepg_tbl[i].filepath[sizeof(hugepg_tbl[i].filepath) - 1] = '\0';
+ }
+#ifndef RTE_ARCH_64
+ /* for 32-bit systems, don't remap 1G and 16G pages, just reuse
+ * original map address as final map address.
+ */
+ else if ((hugepage_sz == RTE_PGSIZE_1G)
+ || (hugepage_sz == RTE_PGSIZE_16G)) {
+ hugepg_tbl[i].final_va = hugepg_tbl[i].orig_va;
+ hugepg_tbl[i].orig_va = NULL;
+ continue;
+ }
+#endif
+
+#ifndef RTE_EAL_SINGLE_FILE_SEGMENTS
+ else if (vma_len == 0) {
+ unsigned j, num_pages;
+
+ /* reserve a virtual area for next contiguous
+ * physical block: count the number of
+ * contiguous physical pages. */
+ for (j = i+1; j < hpi->num_pages[0] ; j++) {
+#ifdef RTE_ARCH_PPC_64
+ /* The physical addresses are sorted in
+ * descending order on PPC64 */
+ if (hugepg_tbl[j].physaddr !=
+ hugepg_tbl[j-1].physaddr - hugepage_sz)
+ break;
+#else
+ if (hugepg_tbl[j].physaddr !=
+ hugepg_tbl[j-1].physaddr + hugepage_sz)
+ break;
+#endif
+ }
+ num_pages = j - i;
+ vma_len = num_pages * hugepage_sz;
+
+ /* get the biggest virtual memory area up to
+ * vma_len. If it fails, vma_addr is NULL, so
+ * let the kernel provide the address. */
+ vma_addr = get_virtual_area(&vma_len, hpi->hugepage_sz);
+ if (vma_addr == NULL)
+ vma_len = hugepage_sz;
+ }
+#endif
+
+ /* try to create hugepage file */
+ fd = open(hugepg_tbl[i].filepath, O_CREAT | O_RDWR, 0755);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "%s(): open failed: %s\n", __func__,
+ strerror(errno));
+ return -1;
+ }
+
+ virtaddr = mmap(vma_addr, hugepage_sz, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, 0);
+ if (virtaddr == MAP_FAILED) {
+ RTE_LOG(ERR, EAL, "%s(): mmap failed: %s\n", __func__,
+ strerror(errno));
+ close(fd);
+ return -1;
+ }
+
+ if (orig) {
+ hugepg_tbl[i].orig_va = virtaddr;
+ memset(virtaddr, 0, hugepage_sz);
+ }
+ else {
+ hugepg_tbl[i].final_va = virtaddr;
+ }
+
+ /* set shared flock on the file. */
+ if (flock(fd, LOCK_SH | LOCK_NB) == -1) {
+ RTE_LOG(ERR, EAL, "%s(): Locking file failed:%s \n",
+ __func__, strerror(errno));
+ close(fd);
+ return -1;
+ }
+
+ close(fd);
+
+ vma_addr = (char *)vma_addr + hugepage_sz;
+ vma_len -= hugepage_sz;
+ }
+ return 0;
+}
+
+#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
+
+/*
+ * Remaps all hugepages into single file segments
+ */
+static int
+remap_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
+{
+ int fd;
+ unsigned i = 0, j, num_pages, page_idx = 0;
+ void *vma_addr = NULL, *old_addr = NULL, *page_addr = NULL;
+ size_t vma_len = 0;
+ size_t hugepage_sz = hpi->hugepage_sz;
+ size_t total_size, offset;
+ char filepath[MAX_HUGEPAGE_PATH];
+ phys_addr_t physaddr;
+ int socket;
+
+ while (i < hpi->num_pages[0]) {
+
+#ifndef RTE_ARCH_64
+ /* for 32-bit systems, don't remap 1G pages and 16G pages,
+ * just reuse original map address as final map address.
+ */
+ if ((hugepage_sz == RTE_PGSIZE_1G)
+ || (hugepage_sz == RTE_PGSIZE_16G)) {
+ hugepg_tbl[i].final_va = hugepg_tbl[i].orig_va;
+ hugepg_tbl[i].orig_va = NULL;
+ i++;
+ continue;
+ }
+#endif
+
+ /* reserve a virtual area for next contiguous
+ * physical block: count the number of
+ * contiguous physical pages. */
+ for (j = i+1; j < hpi->num_pages[0] ; j++) {
+#ifdef RTE_ARCH_PPC_64
+ /* The physical addresses are sorted in descending
+ * order on PPC64 */
+ if (hugepg_tbl[j].physaddr !=
+ hugepg_tbl[j-1].physaddr - hugepage_sz)
+ break;
+#else
+ if (hugepg_tbl[j].physaddr !=
+ hugepg_tbl[j-1].physaddr + hugepage_sz)
+ break;
+#endif
+ }
+ num_pages = j - i;
+ vma_len = num_pages * hugepage_sz;
+
+ socket = hugepg_tbl[i].socket_id;
+
+ /* get the biggest virtual memory area up to
+ * vma_len. If it fails, vma_addr is NULL, so
+ * let the kernel provide the address. */
+ vma_addr = get_virtual_area(&vma_len, hpi->hugepage_sz);
+
+ /* If we can't find a big enough virtual area, work out how many pages
+ * we are going to get */
+ if (vma_addr == NULL)
+ j = i + 1;
+ else if (vma_len != num_pages * hugepage_sz) {
+ num_pages = vma_len / hugepage_sz;
+ j = i + num_pages;
+
+ }
+
+ hugepg_tbl[page_idx].file_id = page_idx;
+ eal_get_hugefile_path(filepath,
+ sizeof(filepath),
+ hpi->hugedir,
+ hugepg_tbl[page_idx].file_id);
+
+ /* try to create hugepage file */
+ fd = open(filepath, O_CREAT | O_RDWR, 0755);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "%s(): open failed: %s\n", __func__, strerror(errno));
+ return -1;
+ }
+
+ total_size = 0;
+ for (;i < j; i++) {
+
+ /* unmap current segment */
+ if (total_size > 0)
+ munmap(vma_addr, total_size);
+
+ /* unmap original page */
+ munmap(hugepg_tbl[i].orig_va, hugepage_sz);
+ unlink(hugepg_tbl[i].filepath);
+
+ total_size += hugepage_sz;
+
+ old_addr = vma_addr;
+
+ /* map new, bigger segment */
+ vma_addr = mmap(vma_addr, total_size,
+ PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+
+ if (vma_addr == MAP_FAILED || vma_addr != old_addr) {
+ RTE_LOG(ERR, EAL, "%s(): mmap failed: %s\n", __func__, strerror(errno));
+ close(fd);
+ return -1;
+ }
+
+ /* touch the page. this is needed because kernel postpones mapping
+ * creation until the first page fault. with this, we pin down
+ * the page and it is marked as used and gets into process' pagemap.
+ */
+ for (offset = 0; offset < total_size; offset += hugepage_sz)
+ *((volatile uint8_t*) RTE_PTR_ADD(vma_addr, offset));
+ }
+
+ /* set shared flock on the file. */
+ if (flock(fd, LOCK_SH | LOCK_NB) == -1) {
+ RTE_LOG(ERR, EAL, "%s(): Locking file failed:%s \n",
+ __func__, strerror(errno));
+ close(fd);
+ return -1;
+ }
+
+ snprintf(hugepg_tbl[page_idx].filepath, MAX_HUGEPAGE_PATH, "%s",
+ filepath);
+
+ physaddr = rte_mem_virt2phy(vma_addr);
+
+ if (physaddr == RTE_BAD_PHYS_ADDR)
+ return -1;
+
+ hugepg_tbl[page_idx].final_va = vma_addr;
+
+ hugepg_tbl[page_idx].physaddr = physaddr;
+
+ hugepg_tbl[page_idx].repeated = num_pages;
+
+ hugepg_tbl[page_idx].socket_id = socket;
+
+ close(fd);
+
+ /* verify the memory segment - that is, check that every VA corresponds
+ * to the physical address we expect to see
+ */
+ for (offset = 0; offset < vma_len; offset += hugepage_sz) {
+ uint64_t expected_physaddr;
+
+ expected_physaddr = hugepg_tbl[page_idx].physaddr + offset;
+ page_addr = RTE_PTR_ADD(vma_addr, offset);
+ physaddr = rte_mem_virt2phy(page_addr);
+
+ if (physaddr != expected_physaddr) {
+ RTE_LOG(ERR, EAL, "Segment sanity check failed: wrong physaddr "
+ "at %p (offset 0x%" PRIx64 ": 0x%" PRIx64
+ " (expected 0x%" PRIx64 ")\n",
+ page_addr, offset, physaddr, expected_physaddr);
+ return -1;
+ }
+ }
+
+ /* zero out the whole segment */
+ memset(hugepg_tbl[page_idx].final_va, 0, total_size);
+
+ page_idx++;
+ }
+
+ /* zero out the rest */
+ memset(&hugepg_tbl[page_idx], 0, (hpi->num_pages[0] - page_idx) * sizeof(struct hugepage_file));
+ return page_idx;
+}
+#else/* RTE_EAL_SINGLE_FILE_SEGMENTS=n */
+
+/* Unmap all hugepages from original mapping */
+static int
+unmap_all_hugepages_orig(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
+{
+ unsigned i;
+ for (i = 0; i < hpi->num_pages[0]; i++) {
+ if (hugepg_tbl[i].orig_va) {
+ munmap(hugepg_tbl[i].orig_va, hpi->hugepage_sz);
+ hugepg_tbl[i].orig_va = NULL;
+ }
+ }
+ return 0;
+}
+#endif /* RTE_EAL_SINGLE_FILE_SEGMENTS */
+
+/*
+ * Parse /proc/self/numa_maps to get the NUMA socket ID for each huge
+ * page.
+ */
+static int
+find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
+{
+ int socket_id;
+ char *end, *nodestr;
+ unsigned i, hp_count = 0;
+ uint64_t virt_addr;
+ char buf[BUFSIZ];
+ char hugedir_str[PATH_MAX];
+ FILE *f;
+
+ f = fopen("/proc/self/numa_maps", "r");
+ if (f == NULL) {
+ RTE_LOG(INFO, EAL, "cannot open /proc/self/numa_maps,"
+ " consider that all memory is in socket_id 0\n");
+ return 0;
+ }
+
+ snprintf(hugedir_str, sizeof(hugedir_str),
+ "%s/", hpi->hugedir);
+
+ /* parse numa map */
+ while (fgets(buf, sizeof(buf), f) != NULL) {
+
+ /* ignore non huge page */
+ if (strstr(buf, " huge ") == NULL &&
+ strstr(buf, hugedir_str) == NULL)
+ continue;
+
+ /* get zone addr */
+ virt_addr = strtoull(buf, &end, 16);
+ if (virt_addr == 0 || end == buf) {
+ RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
+ goto error;
+ }
+
+ /* get node id (socket id) */
+ nodestr = strstr(buf, " N");
+ if (nodestr == NULL) {
+ RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
+ goto error;
+ }
+ nodestr += 2;
+ end = strstr(nodestr, "=");
+ if (end == NULL) {
+ RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
+ goto error;
+ }
+ end[0] = '\0';
+ end = NULL;
+
+ socket_id = strtoul(nodestr, &end, 0);
+ if ((nodestr[0] == '\0') || (end == NULL) || (*end != '\0')) {
+ RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
+ goto error;
+ }
+
+ /* if we find this page in our mappings, set socket_id */
+ for (i = 0; i < hpi->num_pages[0]; i++) {
+ void *va = (void *)(unsigned long)virt_addr;
+ if (hugepg_tbl[i].orig_va == va) {
+ hugepg_tbl[i].socket_id = socket_id;
+ hp_count++;
+ }
+ }
+ }
+
+ if (hp_count < hpi->num_pages[0])
+ goto error;
+
+ fclose(f);
+ return 0;
+
+error:
+ fclose(f);
+ return -1;
+}
+
+/*
+ * Sort the hugepg_tbl by physical address (lower addresses first on x86,
+ * higher address first on powerpc). We use a slow algorithm, but we won't
+ * have millions of pages, and this is only done at init time.
+ */
+static int
+sort_by_physaddr(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
+{
+ unsigned i, j;
+ int compare_idx;
+ uint64_t compare_addr;
+ struct hugepage_file tmp;
+
+ for (i = 0; i < hpi->num_pages[0]; i++) {
+ compare_addr = 0;
+ compare_idx = -1;
+
+ /*
+ * browse all entries starting at 'i', and find the
+ * entry with the smallest addr
+ */
+ for (j=i; j< hpi->num_pages[0]; j++) {
+
+ if (compare_addr == 0 ||
+#ifdef RTE_ARCH_PPC_64
+ hugepg_tbl[j].physaddr > compare_addr) {
+#else
+ hugepg_tbl[j].physaddr < compare_addr) {
+#endif
+ compare_addr = hugepg_tbl[j].physaddr;
+ compare_idx = j;
+ }
+ }
+
+ /* should not happen */
+ if (compare_idx == -1) {
+ RTE_LOG(ERR, EAL, "%s(): error in physaddr sorting\n", __func__);
+ return -1;
+ }
+
+ /* swap the 2 entries in the table */
+ memcpy(&tmp, &hugepg_tbl[compare_idx],
+ sizeof(struct hugepage_file));
+ memcpy(&hugepg_tbl[compare_idx], &hugepg_tbl[i],
+ sizeof(struct hugepage_file));
+ memcpy(&hugepg_tbl[i], &tmp, sizeof(struct hugepage_file));
+ }
+ return 0;
+}
+
+/*
+ * Uses mmap to create a shared memory area for storage of data
+ * Used in this file to store the hugepage file map on disk
+ */
+static void *
+create_shared_memory(const char *filename, const size_t mem_size)
+{
+ void *retval;
+ int fd = open(filename, O_CREAT | O_RDWR, 0666);
+ if (fd < 0)
+ return NULL;
+ if (ftruncate(fd, mem_size) < 0) {
+ close(fd);
+ return NULL;
+ }
+ retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ close(fd);
+ return retval;
+}
+
+/*
+ * this copies *active* hugepages from one hugepage table to another.
+ * destination is typically the shared memory.
+ */
+static int
+copy_hugepages_to_shared_mem(struct hugepage_file * dst, int dest_size,
+ const struct hugepage_file * src, int src_size)
+{
+ int src_pos, dst_pos = 0;
+
+ for (src_pos = 0; src_pos < src_size; src_pos++) {
+ if (src[src_pos].final_va != NULL) {
+ /* error on overflow attempt */
+ if (dst_pos == dest_size)
+ return -1;
+ memcpy(&dst[dst_pos], &src[src_pos], sizeof(struct hugepage_file));
+ dst_pos++;
+ }
+ }
+ return 0;
+}
+
+/*
+ * unmaps hugepages that are not going to be used. since we originally allocate
+ * ALL hugepages (not just those we need), additional unmapping needs to be done.
+ */
+static int
+unmap_unneeded_hugepages(struct hugepage_file *hugepg_tbl,
+ struct hugepage_info *hpi,
+ unsigned num_hp_info)
+{
+ unsigned socket, size;
+ int page, nrpages = 0;
+
+ /* get total number of hugepages */
+ for (size = 0; size < num_hp_info; size++)
+ for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++)
+ nrpages += internal_config.hugepage_info[size].num_pages[socket];
+
+ for (size = 0; size < num_hp_info; size++) {
+ for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) {
+ unsigned pages_found = 0;
+
+ /* traverse until we have unmapped all the unused pages */
+ for (page = 0; page < nrpages; page++) {
+ struct hugepage_file *hp = &hugepg_tbl[page];
+
+#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
+ /* if this page was already cleared */
+ if (hp->final_va == NULL)
+ continue;
+#endif
+
+ /* find a page that matches the criteria */
+ if ((hp->size == hpi[size].hugepage_sz) &&
+ (hp->socket_id == (int) socket)) {
+
+ /* if we skipped enough pages, unmap the rest */
+ if (pages_found == hpi[size].num_pages[socket]) {
+ uint64_t unmap_len;
+
+#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
+ unmap_len = hp->size * hp->repeated;
+#else
+ unmap_len = hp->size;
+#endif
+
+ /* get start addr and len of the remaining segment */
+ munmap(hp->final_va, (size_t) unmap_len);
+
+ hp->final_va = NULL;
+ if (unlink(hp->filepath) == -1) {
+ RTE_LOG(ERR, EAL, "%s(): Removing %s failed: %s\n",
+ __func__, hp->filepath, strerror(errno));
+ return -1;
+ }
+ }
+#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
+ /* else, check how much do we need to map */
+ else {
+ int nr_pg_left =
+ hpi[size].num_pages[socket] - pages_found;
+
+ /* if we need enough memory to fit into the segment */
+ if (hp->repeated <= nr_pg_left) {
+ pages_found += hp->repeated;
+ }
+ /* truncate the segment */
+ else {
+ uint64_t final_size = nr_pg_left * hp->size;
+ uint64_t seg_size = hp->repeated * hp->size;
+
+ void * unmap_va = RTE_PTR_ADD(hp->final_va,
+ final_size);
+ int fd;
+
+ munmap(unmap_va, seg_size - final_size);
+
+ fd = open(hp->filepath, O_RDWR);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
+ hp->filepath, strerror(errno));
+ return -1;
+ }
+ if (ftruncate(fd, final_size) < 0) {
+ RTE_LOG(ERR, EAL, "Cannot truncate %s: %s\n",
+ hp->filepath, strerror(errno));
+ return -1;
+ }
+ close(fd);
+
+ pages_found += nr_pg_left;
+ hp->repeated = nr_pg_left;
+ }
+ }
+#else
+ /* else, lock the page and skip */
+ else
+ pages_found++;
+#endif
+
+ } /* match page */
+ } /* foreach page */
+ } /* foreach socket */
+ } /* foreach pagesize */
+
+ return 0;
+}
+
+static inline uint64_t
+get_socket_mem_size(int socket)
+{
+ uint64_t size = 0;
+ unsigned i;
+
+ for (i = 0; i < internal_config.num_hugepage_sizes; i++){
+ struct hugepage_info *hpi = &internal_config.hugepage_info[i];
+ if (hpi->hugedir != NULL)
+ size += hpi->hugepage_sz * hpi->num_pages[socket];
+ }
+
+ return (size);
+}
+
+/*
+ * This function is a NUMA-aware equivalent of calc_num_pages.
+ * It takes in the list of hugepage sizes and the
+ * number of pages thereof, and calculates the best number of
+ * pages of each size to fulfill the request for <memory> ram
+ */
+static int
+calc_num_pages_per_socket(uint64_t * memory,
+ struct hugepage_info *hp_info,
+ struct hugepage_info *hp_used,
+ unsigned num_hp_info)
+{
+ unsigned socket, j, i = 0;
+ unsigned requested, available;
+ int total_num_pages = 0;
+ uint64_t remaining_mem, cur_mem;
+ uint64_t total_mem = internal_config.memory;
+
+ if (num_hp_info == 0)
+ return -1;
+
+ /* if specific memory amounts per socket weren't requested */
+ if (internal_config.force_sockets == 0) {
+ int cpu_per_socket[RTE_MAX_NUMA_NODES];
+ size_t default_size, total_size;
+ unsigned lcore_id;
+
+ /* Compute number of cores per socket */
+ memset(cpu_per_socket, 0, sizeof(cpu_per_socket));
+ RTE_LCORE_FOREACH(lcore_id) {
+ cpu_per_socket[rte_lcore_to_socket_id(lcore_id)]++;
+ }
+
+ /*
+ * Automatically spread requested memory amongst detected sockets according
+ * to number of cores from cpu mask present on each socket
+ */
+ total_size = internal_config.memory;
+ for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0; socket++) {
+
+ /* Set memory amount per socket */
+ default_size = (internal_config.memory * cpu_per_socket[socket])
+ / rte_lcore_count();
+
+ /* Limit to maximum available memory on socket */
+ default_size = RTE_MIN(default_size, get_socket_mem_size(socket));
+
+ /* Update sizes */
+ memory[socket] = default_size;
+ total_size -= default_size;
+ }
+
+ /*
+ * If some memory is remaining, try to allocate it by getting all
+ * available memory from sockets, one after the other
+ */
+ for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0; socket++) {
+ /* take whatever is available */
+ default_size = RTE_MIN(get_socket_mem_size(socket) - memory[socket],
+ total_size);
+
+ /* Update sizes */
+ memory[socket] += default_size;
+ total_size -= default_size;
+ }
+ }
+
+ for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_mem != 0; socket++) {
+ /* skips if the memory on specific socket wasn't requested */
+ for (i = 0; i < num_hp_info && memory[socket] != 0; i++){
+ hp_used[i].hugedir = hp_info[i].hugedir;
+ hp_used[i].num_pages[socket] = RTE_MIN(
+ memory[socket] / hp_info[i].hugepage_sz,
+ hp_info[i].num_pages[socket]);
+
+ cur_mem = hp_used[i].num_pages[socket] *
+ hp_used[i].hugepage_sz;
+
+ memory[socket] -= cur_mem;
+ total_mem -= cur_mem;
+
+ total_num_pages += hp_used[i].num_pages[socket];
+
+ /* check if we have met all memory requests */
+ if (memory[socket] == 0)
+ break;
+
+ /* check if we have any more pages left at this size, if so
+ * move on to next size */
+ if (hp_used[i].num_pages[socket] == hp_info[i].num_pages[socket])
+ continue;
+ /* At this point we know that there are more pages available that are
+ * bigger than the memory we want, so lets see if we can get enough
+ * from other page sizes.
+ */
+ remaining_mem = 0;
+ for (j = i+1; j < num_hp_info; j++)
+ remaining_mem += hp_info[j].hugepage_sz *
+ hp_info[j].num_pages[socket];
+
+ /* is there enough other memory, if not allocate another page and quit */
+ if (remaining_mem < memory[socket]){
+ cur_mem = RTE_MIN(memory[socket],
+ hp_info[i].hugepage_sz);
+ memory[socket] -= cur_mem;
+ total_mem -= cur_mem;
+ hp_used[i].num_pages[socket]++;
+ total_num_pages++;
+ break; /* we are done with this socket*/
+ }
+ }
+ /* if we didn't satisfy all memory requirements per socket */
+ if (memory[socket] > 0) {
+ /* to prevent icc errors */
+ requested = (unsigned) (internal_config.socket_mem[socket] /
+ 0x100000);
+ available = requested -
+ ((unsigned) (memory[socket] / 0x100000));
+ RTE_LOG(INFO, EAL, "Not enough memory available on socket %u! "
+ "Requested: %uMB, available: %uMB\n", socket,
+ requested, available);
+ return -1;
+ }
+ }
+
+ /* if we didn't satisfy total memory requirements */
+ if (total_mem > 0) {
+ requested = (unsigned) (internal_config.memory / 0x100000);
+ available = requested - (unsigned) (total_mem / 0x100000);
+ RTE_LOG(INFO, EAL, "Not enough memory available! Requested: %uMB,"
+ " available: %uMB\n", requested, available);
+ return -1;
+ }
+ return total_num_pages;
+}
+
+/*
+ * Prepare physical memory mapping: fill configuration structure with
+ * these infos, return 0 on success.
+ * 1. map N huge pages in separate files in hugetlbfs
+ * 2. find associated physical addr
+ * 3. find associated NUMA socket ID
+ * 4. sort all huge pages by physical address
+ * 5. remap these N huge pages in the correct order
+ * 6. unmap the first mapping
+ * 7. fill memsegs in configuration with contiguous zones
+ */
+static int
+rte_eal_hugepage_init(void)
+{
+ struct rte_mem_config *mcfg;
+ struct hugepage_file *hugepage, *tmp_hp = NULL;
+ struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
+
+ uint64_t memory[RTE_MAX_NUMA_NODES];
+
+ unsigned hp_offset;
+ int i, j, new_memseg;
+ int nr_hugefiles, nr_hugepages = 0;
+ void *addr;
+#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
+ int new_pages_count[MAX_HUGEPAGE_SIZES];
+#endif
+
+ memset(used_hp, 0, sizeof(used_hp));
+
+ /* get pointer to global configuration */
+ mcfg = rte_eal_get_configuration()->mem_config;
+
+ /* hugetlbfs can be disabled */
+ if (internal_config.no_hugetlbfs) {
+ addr = mmap(NULL, internal_config.memory, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+ if (addr == MAP_FAILED) {
+ RTE_LOG(ERR, EAL, "%s: mmap() failed: %s\n", __func__,
+ strerror(errno));
+ return -1;
+ }
+ mcfg->memseg[0].phys_addr = (phys_addr_t)(uintptr_t)addr;
+ mcfg->memseg[0].addr = addr;
+ mcfg->memseg[0].len = internal_config.memory;
+ mcfg->memseg[0].socket_id = SOCKET_ID_ANY;
+ return 0;
+ }
+
+/* check if app runs on Xen Dom0 */
+ if (internal_config.xen_dom0_support) {
+#ifdef RTE_LIBRTE_XEN_DOM0
+ /* use dom0_mm kernel driver to init memory */
+ if (rte_xen_dom0_memory_init() < 0)
+ return -1;
+ else
+ return 0;
+#endif
+ }
+
+
+ /* calculate total number of hugepages available. at this point we haven't
+ * yet started sorting them so they all are on socket 0 */
+ for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
+ /* meanwhile, also initialize used_hp hugepage sizes in used_hp */
+ used_hp[i].hugepage_sz = internal_config.hugepage_info[i].hugepage_sz;
+
+ nr_hugepages += internal_config.hugepage_info[i].num_pages[0];
+ }
+
+ /*
+ * allocate a memory area for hugepage table.
+ * this isn't shared memory yet. due to the fact that we need some
+ * processing done on these pages, shared memory will be created
+ * at a later stage.
+ */
+ tmp_hp = malloc(nr_hugepages * sizeof(struct hugepage_file));
+ if (tmp_hp == NULL)
+ goto fail;
+
+ memset(tmp_hp, 0, nr_hugepages * sizeof(struct hugepage_file));
+
+ hp_offset = 0; /* where we start the current page size entries */
+
+ /* map all hugepages and sort them */
+ for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
+ struct hugepage_info *hpi;
+
+ /*
+ * we don't yet mark hugepages as used at this stage, so
+ * we just map all hugepages available to the system
+ * all hugepages are still located on socket 0
+ */
+ hpi = &internal_config.hugepage_info[i];
+
+ if (hpi->num_pages[0] == 0)
+ continue;
+
+ /* map all hugepages available */
+ if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 1) < 0){
+ RTE_LOG(DEBUG, EAL, "Failed to mmap %u MB hugepages\n",
+ (unsigned)(hpi->hugepage_sz / 0x100000));
+ goto fail;
+ }
+
+ /* find physical addresses and sockets for each hugepage */
+ if (find_physaddrs(&tmp_hp[hp_offset], hpi) < 0){
+ RTE_LOG(DEBUG, EAL, "Failed to find phys addr for %u MB pages\n",
+ (unsigned)(hpi->hugepage_sz / 0x100000));
+ goto fail;
+ }
+
+ if (find_numasocket(&tmp_hp[hp_offset], hpi) < 0){
+ RTE_LOG(DEBUG, EAL, "Failed to find NUMA socket for %u MB pages\n",
+ (unsigned)(hpi->hugepage_sz / 0x100000));
+ goto fail;
+ }
+
+ if (sort_by_physaddr(&tmp_hp[hp_offset], hpi) < 0)
+ goto fail;
+
+#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
+ /* remap all hugepages into single file segments */
+ new_pages_count[i] = remap_all_hugepages(&tmp_hp[hp_offset], hpi);
+ if (new_pages_count[i] < 0){
+ RTE_LOG(DEBUG, EAL, "Failed to remap %u MB pages\n",
+ (unsigned)(hpi->hugepage_sz / 0x100000));
+ goto fail;
+ }
+
+ /* we have processed a num of hugepages of this size, so inc offset */
+ hp_offset += new_pages_count[i];
+#else
+ /* remap all hugepages */
+ if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) < 0){
+ RTE_LOG(DEBUG, EAL, "Failed to remap %u MB pages\n",
+ (unsigned)(hpi->hugepage_sz / 0x100000));
+ goto fail;
+ }
+
+ /* unmap original mappings */
+ if (unmap_all_hugepages_orig(&tmp_hp[hp_offset], hpi) < 0)
+ goto fail;
+
+ /* we have processed a num of hugepages of this size, so inc offset */
+ hp_offset += hpi->num_pages[0];
+#endif
+ }
+
+#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
+ nr_hugefiles = 0;
+ for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
+ nr_hugefiles += new_pages_count[i];
+ }
+#else
+ nr_hugefiles = nr_hugepages;
+#endif
+
+
+ /* clean out the numbers of pages */
+ for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++)
+ for (j = 0; j < RTE_MAX_NUMA_NODES; j++)
+ internal_config.hugepage_info[i].num_pages[j] = 0;
+
+ /* get hugepages for each socket */
+ for (i = 0; i < nr_hugefiles; i++) {
+ int socket = tmp_hp[i].socket_id;
+
+ /* find a hugepage info with right size and increment num_pages */
+ for (j = 0; j < (int) internal_config.num_hugepage_sizes; j++) {
+ if (tmp_hp[i].size ==
+ internal_config.hugepage_info[j].hugepage_sz) {
+#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
+ internal_config.hugepage_info[j].num_pages[socket] +=
+ tmp_hp[i].repeated;
+#else
+ internal_config.hugepage_info[j].num_pages[socket]++;
+#endif
+ }
+ }
+ }
+
+ /* make a copy of socket_mem, needed for number of pages calculation */
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ memory[i] = internal_config.socket_mem[i];
+
+ /* calculate final number of pages */
+ nr_hugepages = calc_num_pages_per_socket(memory,
+ internal_config.hugepage_info, used_hp,
+ internal_config.num_hugepage_sizes);
+
+ /* error if not enough memory available */
+ if (nr_hugepages < 0)
+ goto fail;
+
+ /* reporting in! */
+ for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
+ for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
+ if (used_hp[i].num_pages[j] > 0) {
+ RTE_LOG(INFO, EAL,
+ "Requesting %u pages of size %uMB"
+ " from socket %i\n",
+ used_hp[i].num_pages[j],
+ (unsigned)
+ (used_hp[i].hugepage_sz / 0x100000),
+ j);
+ }
+ }
+ }
+
+ /* create shared memory */
+ hugepage = create_shared_memory(eal_hugepage_info_path(),
+ nr_hugefiles * sizeof(struct hugepage_file));
+
+ if (hugepage == NULL) {
+ RTE_LOG(ERR, EAL, "Failed to create shared memory!\n");
+ goto fail;
+ }
+ memset(hugepage, 0, nr_hugefiles * sizeof(struct hugepage_file));
+
+ /*
+ * unmap pages that we won't need (looks at used_hp).
+ * also, sets final_va to NULL on pages that were unmapped.
+ */
+ if (unmap_unneeded_hugepages(tmp_hp, used_hp,
+ internal_config.num_hugepage_sizes) < 0) {
+ RTE_LOG(ERR, EAL, "Unmapping and locking hugepages failed!\n");
+ goto fail;
+ }
+
+ /*
+ * copy stuff from malloc'd hugepage* to the actual shared memory.
+ * this procedure only copies those hugepages that have final_va
+ * not NULL. has overflow protection.
+ */
+ if (copy_hugepages_to_shared_mem(hugepage, nr_hugefiles,
+ tmp_hp, nr_hugefiles) < 0) {
+ RTE_LOG(ERR, EAL, "Copying tables to shared memory failed!\n");
+ goto fail;
+ }
+
+ /* free the temporary hugepage table */
+ free(tmp_hp);
+ tmp_hp = NULL;
+
+ /* find earliest free memseg - this is needed because in case of IVSHMEM,
+ * segments might have already been initialized */
+ for (j = 0; j < RTE_MAX_MEMSEG; j++)
+ if (mcfg->memseg[j].addr == NULL) {
+ /* move to previous segment and exit loop */
+ j--;
+ break;
+ }
+
+ for (i = 0; i < nr_hugefiles; i++) {
+ new_memseg = 0;
+
+ /* if this is a new section, create a new memseg */
+ if (i == 0)
+ new_memseg = 1;
+ else if (hugepage[i].socket_id != hugepage[i-1].socket_id)
+ new_memseg = 1;
+ else if (hugepage[i].size != hugepage[i-1].size)
+ new_memseg = 1;
+
+#ifdef RTE_ARCH_PPC_64
+ /* On PPC64 architecture, the mmap always start from higher
+ * virtual address to lower address. Here, both the physical
+ * address and virtual address are in descending order */
+ else if ((hugepage[i-1].physaddr - hugepage[i].physaddr) !=
+ hugepage[i].size)
+ new_memseg = 1;
+ else if (((unsigned long)hugepage[i-1].final_va -
+ (unsigned long)hugepage[i].final_va) != hugepage[i].size)
+ new_memseg = 1;
+#else
+ else if ((hugepage[i].physaddr - hugepage[i-1].physaddr) !=
+ hugepage[i].size)
+ new_memseg = 1;
+ else if (((unsigned long)hugepage[i].final_va -
+ (unsigned long)hugepage[i-1].final_va) != hugepage[i].size)
+ new_memseg = 1;
+#endif
+
+ if (new_memseg) {
+ j += 1;
+ if (j == RTE_MAX_MEMSEG)
+ break;
+
+ mcfg->memseg[j].phys_addr = hugepage[i].physaddr;
+ mcfg->memseg[j].addr = hugepage[i].final_va;
+#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
+ mcfg->memseg[j].len = hugepage[i].size * hugepage[i].repeated;
+#else
+ mcfg->memseg[j].len = hugepage[i].size;
+#endif
+ mcfg->memseg[j].socket_id = hugepage[i].socket_id;
+ mcfg->memseg[j].hugepage_sz = hugepage[i].size;
+ }
+ /* continuation of previous memseg */
+ else {
+#ifdef RTE_ARCH_PPC_64
+ /* Use the phy and virt address of the last page as segment
+ * address for IBM Power architecture */
+ mcfg->memseg[j].phys_addr = hugepage[i].physaddr;
+ mcfg->memseg[j].addr = hugepage[i].final_va;
+#endif
+ mcfg->memseg[j].len += mcfg->memseg[j].hugepage_sz;
+ }
+ hugepage[i].memseg_id = j;
+ }
+
+ if (i < nr_hugefiles) {
+ RTE_LOG(ERR, EAL, "Can only reserve %d pages "
+ "from %d requested\n"
+ "Current %s=%d is not enough\n"
+ "Please either increase it or request less amount "
+ "of memory.\n",
+ i, nr_hugefiles, RTE_STR(CONFIG_RTE_MAX_MEMSEG),
+ RTE_MAX_MEMSEG);
+ return (-ENOMEM);
+ }
+
+ return 0;
+
+fail:
+ if (tmp_hp)
+ free(tmp_hp);
+ return -1;
+}
+
+/*
+ * uses fstat to report the size of a file on disk
+ */
+static off_t
+getFileSize(int fd)
+{
+ struct stat st;
+ if (fstat(fd, &st) < 0)
+ return 0;
+ return st.st_size;
+}
+
+/*
+ * This creates the memory mappings in the secondary process to match that of
+ * the server process. It goes through each memory segment in the DPDK runtime
+ * configuration and finds the hugepages which form that segment, mapping them
+ * in order to form a contiguous block in the virtual memory space
+ */
+static int
+rte_eal_hugepage_attach(void)
+{
+ const struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ const struct hugepage_file *hp = NULL;
+ unsigned num_hp = 0;
+ unsigned i, s = 0; /* s used to track the segment number */
+ off_t size;
+ int fd, fd_zero = -1, fd_hugepage = -1;
+
+ if (aslr_enabled() > 0) {
+ RTE_LOG(WARNING, EAL, "WARNING: Address Space Layout Randomization "
+ "(ASLR) is enabled in the kernel.\n");
+ RTE_LOG(WARNING, EAL, " This may cause issues with mapping memory "
+ "into secondary processes\n");
+ }
+
+ if (internal_config.xen_dom0_support) {
+#ifdef RTE_LIBRTE_XEN_DOM0
+ if (rte_xen_dom0_memory_attach() < 0) {
+ RTE_LOG(ERR, EAL,"Failed to attach memory setments of primay "
+ "process\n");
+ return -1;
+ }
+ return 0;
+#endif
+ }
+
+ fd_zero = open("/dev/zero", O_RDONLY);
+ if (fd_zero < 0) {
+ RTE_LOG(ERR, EAL, "Could not open /dev/zero\n");
+ goto error;
+ }
+ fd_hugepage = open(eal_hugepage_info_path(), O_RDONLY);
+ if (fd_hugepage < 0) {
+ RTE_LOG(ERR, EAL, "Could not open %s\n", eal_hugepage_info_path());
+ goto error;
+ }
+
+ /* map all segments into memory to make sure we get the addrs */
+ for (s = 0; s < RTE_MAX_MEMSEG; ++s) {
+ void *base_addr;
+
+ /*
+ * the first memory segment with len==0 is the one that
+ * follows the last valid segment.
+ */
+ if (mcfg->memseg[s].len == 0)
+ break;
+
+#ifdef RTE_LIBRTE_IVSHMEM
+ /*
+ * if segment has ioremap address set, it's an IVSHMEM segment and
+ * doesn't need mapping as it was already mapped earlier
+ */
+ if (mcfg->memseg[s].ioremap_addr != 0)
+ continue;
+#endif
+
+ /*
+ * fdzero is mmapped to get a contiguous block of virtual
+ * addresses of the appropriate memseg size.
+ * use mmap to get identical addresses as the primary process.
+ */
+ base_addr = mmap(mcfg->memseg[s].addr, mcfg->memseg[s].len,
+ PROT_READ, MAP_PRIVATE, fd_zero, 0);
+ if (base_addr == MAP_FAILED ||
+ base_addr != mcfg->memseg[s].addr) {
+ RTE_LOG(ERR, EAL, "Could not mmap %llu bytes "
+ "in /dev/zero to requested address [%p]: '%s'\n",
+ (unsigned long long)mcfg->memseg[s].len,
+ mcfg->memseg[s].addr, strerror(errno));
+ if (aslr_enabled() > 0) {
+ RTE_LOG(ERR, EAL, "It is recommended to "
+ "disable ASLR in the kernel "
+ "and retry running both primary "
+ "and secondary processes\n");
+ }
+ goto error;
+ }
+ }
+
+ size = getFileSize(fd_hugepage);
+ hp = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd_hugepage, 0);
+ if (hp == NULL) {
+ RTE_LOG(ERR, EAL, "Could not mmap %s\n", eal_hugepage_info_path());
+ goto error;
+ }
+
+ num_hp = size / sizeof(struct hugepage_file);
+ RTE_LOG(DEBUG, EAL, "Analysing %u files\n", num_hp);
+
+ s = 0;
+ while (s < RTE_MAX_MEMSEG && mcfg->memseg[s].len > 0){
+ void *addr, *base_addr;
+ uintptr_t offset = 0;
+ size_t mapping_size;
+#ifdef RTE_LIBRTE_IVSHMEM
+ /*
+ * if segment has ioremap address set, it's an IVSHMEM segment and
+ * doesn't need mapping as it was already mapped earlier
+ */
+ if (mcfg->memseg[s].ioremap_addr != 0) {
+ s++;
+ continue;
+ }
+#endif
+ /*
+ * free previously mapped memory so we can map the
+ * hugepages into the space
+ */
+ base_addr = mcfg->memseg[s].addr;
+ munmap(base_addr, mcfg->memseg[s].len);
+
+ /* find the hugepages for this segment and map them
+ * we don't need to worry about order, as the server sorted the
+ * entries before it did the second mmap of them */
+ for (i = 0; i < num_hp && offset < mcfg->memseg[s].len; i++){
+ if (hp[i].memseg_id == (int)s){
+ fd = open(hp[i].filepath, O_RDWR);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Could not open %s\n",
+ hp[i].filepath);
+ goto error;
+ }
+#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
+ mapping_size = hp[i].size * hp[i].repeated;
+#else
+ mapping_size = hp[i].size;
+#endif
+ addr = mmap(RTE_PTR_ADD(base_addr, offset),
+ mapping_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, 0);
+ close(fd); /* close file both on success and on failure */
+ if (addr == MAP_FAILED ||
+ addr != RTE_PTR_ADD(base_addr, offset)) {
+ RTE_LOG(ERR, EAL, "Could not mmap %s\n",
+ hp[i].filepath);
+ goto error;
+ }
+ offset+=mapping_size;
+ }
+ }
+ RTE_LOG(DEBUG, EAL, "Mapped segment %u of size 0x%llx\n", s,
+ (unsigned long long)mcfg->memseg[s].len);
+ s++;
+ }
+ /* unmap the hugepage config file, since we are done using it */
+ munmap((void *)(uintptr_t)hp, size);
+ close(fd_zero);
+ close(fd_hugepage);
+ return 0;
+
+error:
+ if (fd_zero >= 0)
+ close(fd_zero);
+ if (fd_hugepage >= 0)
+ close(fd_hugepage);
+ return -1;
+}
+
+static int
+rte_eal_memdevice_init(void)
+{
+ struct rte_config *config;
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+ return 0;
+
+ config = rte_eal_get_configuration();
+ config->mem_config->nchannel = internal_config.force_nchannel;
+ config->mem_config->nrank = internal_config.force_nrank;
+
+ return 0;
+}
+
+
+/* init memory subsystem */
+int
+rte_eal_memory_init(void)
+{
+ RTE_LOG(INFO, EAL, "Setting up memory...\n");
+ const int retval = rte_eal_process_type() == RTE_PROC_PRIMARY ?
+ rte_eal_hugepage_init() :
+ rte_eal_hugepage_attach();
+ if (retval < 0)
+ return -1;
+
+ if (internal_config.no_shconf == 0 && rte_eal_memdevice_init() < 0)
+ return -1;
+
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci.c b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci.c
new file mode 100755
index 00000000..b5f54101
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci.c
@@ -0,0 +1,629 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <dirent.h>
+#include <sys/mman.h>
+
+#include <rte_log.h>
+#include <rte_pci.h>
+#include <rte_tailq.h>
+#include <rte_eal_memconfig.h>
+#include <rte_malloc.h>
+#include <rte_devargs.h>
+
+#include "rte_pci_dev_ids.h"
+#include "eal_filesystem.h"
+#include "eal_private.h"
+#include "eal_pci_init.h"
+
+/**
+ * @file
+ * PCI probing under linux
+ *
+ * This code is used to simulate a PCI probe by parsing information in sysfs.
+ * When a registered device matches a driver, it is then initialized with
+ * IGB_UIO driver (or doesn't initialize, if the device wasn't bound to it).
+ */
+
+struct mapped_pci_res_list *pci_res_list = NULL;
+
+/* unbind kernel driver for this device */
+static int
+pci_unbind_kernel_driver(struct rte_pci_device *dev)
+{
+ int n;
+ FILE *f;
+ char filename[PATH_MAX];
+ char buf[BUFSIZ];
+ struct rte_pci_addr *loc = &dev->addr;
+
+ /* open /sys/bus/pci/devices/AAAA:BB:CC.D/driver */
+ snprintf(filename, sizeof(filename),
+ SYSFS_PCI_DEVICES "/" PCI_PRI_FMT "/driver/unbind",
+ loc->domain, loc->bus, loc->devid, loc->function);
+
+ f = fopen(filename, "w");
+ if (f == NULL) /* device was not bound */
+ return 0;
+
+ n = snprintf(buf, sizeof(buf), PCI_PRI_FMT "\n",
+ loc->domain, loc->bus, loc->devid, loc->function);
+ if ((n < 0) || (n >= (int)sizeof(buf))) {
+ RTE_LOG(ERR, EAL, "%s(): snprintf failed\n", __func__);
+ goto error;
+ }
+ if (fwrite(buf, n, 1, f) == 0) {
+ RTE_LOG(ERR, EAL, "%s(): could not write to %s\n", __func__,
+ filename);
+ goto error;
+ }
+
+ fclose(f);
+ return 0;
+
+error:
+ fclose(f);
+ return -1;
+}
+
+void *
+pci_find_max_end_va(void)
+{
+ const struct rte_memseg *seg = rte_eal_get_physmem_layout();
+ const struct rte_memseg *last = seg;
+ unsigned i = 0;
+
+ for (i = 0; i < RTE_MAX_MEMSEG; i++, seg++) {
+ if (seg->addr == NULL)
+ break;
+
+ if (seg->addr > last->addr)
+ last = seg;
+
+ }
+ return RTE_PTR_ADD(last->addr, last->len);
+}
+
+
+/* map a particular resource from a file */
+void *
+pci_map_resource(void *requested_addr, int fd, off_t offset, size_t size)
+{
+ void *mapaddr;
+
+ /* Map the PCI memory resource of device */
+ mapaddr = mmap(requested_addr, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, offset);
+ if (mapaddr == MAP_FAILED) {
+ RTE_LOG(ERR, EAL, "%s(): cannot mmap(%d, %p, 0x%lx, 0x%lx): %s (%p)\n",
+ __func__, fd, requested_addr,
+ (unsigned long)size, (unsigned long)offset,
+ strerror(errno), mapaddr);
+ } else {
+ RTE_LOG(DEBUG, EAL, " PCI memory mapped at %p\n", mapaddr);
+ }
+
+ return mapaddr;
+}
+
+/* parse the "resource" sysfs file */
+#define IORESOURCE_MEM 0x00000200
+
+static int
+pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev)
+{
+ FILE *f;
+ char buf[BUFSIZ];
+ union pci_resource_info {
+ struct {
+ char *phys_addr;
+ char *end_addr;
+ char *flags;
+ };
+ char *ptrs[PCI_RESOURCE_FMT_NVAL];
+ } res_info;
+ int i;
+ uint64_t phys_addr, end_addr, flags;
+
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot open sysfs resource\n");
+ return -1;
+ }
+
+ for (i = 0; i<PCI_MAX_RESOURCE; i++) {
+
+ if (fgets(buf, sizeof(buf), f) == NULL) {
+ RTE_LOG(ERR, EAL,
+ "%s(): cannot read resource\n", __func__);
+ goto error;
+ }
+
+ if (rte_strsplit(buf, sizeof(buf), res_info.ptrs, 3, ' ') != 3) {
+ RTE_LOG(ERR, EAL,
+ "%s(): bad resource format\n", __func__);
+ goto error;
+ }
+ errno = 0;
+ phys_addr = strtoull(res_info.phys_addr, NULL, 16);
+ end_addr = strtoull(res_info.end_addr, NULL, 16);
+ flags = strtoull(res_info.flags, NULL, 16);
+ if (errno != 0) {
+ RTE_LOG(ERR, EAL,
+ "%s(): bad resource format\n", __func__);
+ goto error;
+ }
+
+ if (flags & IORESOURCE_MEM) {
+ dev->mem_resource[i].phys_addr = phys_addr;
+ dev->mem_resource[i].len = end_addr - phys_addr + 1;
+ /* not mapped for now */
+ dev->mem_resource[i].addr = NULL;
+ }
+ }
+ fclose(f);
+ return 0;
+
+error:
+ fclose(f);
+ return -1;
+}
+
+/* Compare two PCI device addresses. */
+static int
+pci_addr_comparison(struct rte_pci_addr *addr, struct rte_pci_addr *addr2)
+{
+ uint64_t dev_addr = (addr->domain << 24) + (addr->bus << 16) + (addr->devid << 8) + addr->function;
+ uint64_t dev_addr2 = (addr2->domain << 24) + (addr2->bus << 16) + (addr2->devid << 8) + addr2->function;
+
+ if (dev_addr > dev_addr2)
+ return 1;
+ else
+ return 0;
+}
+
+
+/* Scan one pci sysfs entry, and fill the devices list from it. */
+static int
+pci_scan_one(const char *dirname, uint16_t domain, uint8_t bus,
+ uint8_t devid, uint8_t function)
+{
+ char filename[PATH_MAX];
+ unsigned long tmp;
+ struct rte_pci_device *dev;
+
+ dev = malloc(sizeof(*dev));
+ if (dev == NULL) {
+ return -1;
+ }
+
+ memset(dev, 0, sizeof(*dev));
+ dev->addr.domain = domain;
+ dev->addr.bus = bus;
+ dev->addr.devid = devid;
+ dev->addr.function = function;
+
+ /* get vendor id */
+ snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+ if (eal_parse_sysfs_value(filename, &tmp) < 0) {
+ free(dev);
+ return -1;
+ }
+ dev->id.vendor_id = (uint16_t)tmp;
+
+ /* get device id */
+ snprintf(filename, sizeof(filename), "%s/device", dirname);
+ if (eal_parse_sysfs_value(filename, &tmp) < 0) {
+ free(dev);
+ return -1;
+ }
+ dev->id.device_id = (uint16_t)tmp;
+
+ /* get subsystem_vendor id */
+ snprintf(filename, sizeof(filename), "%s/subsystem_vendor",
+ dirname);
+ if (eal_parse_sysfs_value(filename, &tmp) < 0) {
+ free(dev);
+ return -1;
+ }
+ dev->id.subsystem_vendor_id = (uint16_t)tmp;
+
+ /* get subsystem_device id */
+ snprintf(filename, sizeof(filename), "%s/subsystem_device",
+ dirname);
+ if (eal_parse_sysfs_value(filename, &tmp) < 0) {
+ free(dev);
+ return -1;
+ }
+ dev->id.subsystem_device_id = (uint16_t)tmp;
+
+ /* get max_vfs */
+ dev->max_vfs = 0;
+ snprintf(filename, sizeof(filename), "%s/max_vfs", dirname);
+ if (!access(filename, F_OK) &&
+ eal_parse_sysfs_value(filename, &tmp) == 0) {
+ dev->max_vfs = (uint16_t)tmp;
+ }
+
+ /* get numa node */
+ snprintf(filename, sizeof(filename), "%s/numa_node",
+ dirname);
+ if (access(filename, R_OK) != 0) {
+ /* if no NUMA support just set node to -1 */
+ dev->numa_node = -1;
+ } else {
+ if (eal_parse_sysfs_value(filename, &tmp) < 0) {
+ free(dev);
+ return -1;
+ }
+ dev->numa_node = tmp;
+ }
+
+ /* parse resources */
+ snprintf(filename, sizeof(filename), "%s/resource", dirname);
+ if (pci_parse_sysfs_resource(filename, dev) < 0) {
+ RTE_LOG(ERR, EAL, "%s(): cannot parse resource\n", __func__);
+ free(dev);
+ return -1;
+ }
+
+ /* device is valid, add in list (sorted) */
+ if (TAILQ_EMPTY(&pci_device_list)) {
+ TAILQ_INSERT_TAIL(&pci_device_list, dev, next);
+ }
+ else {
+ struct rte_pci_device *dev2 = NULL;
+
+ TAILQ_FOREACH(dev2, &pci_device_list, next) {
+ if (pci_addr_comparison(&dev->addr, &dev2->addr))
+ continue;
+ else {
+ TAILQ_INSERT_BEFORE(dev2, dev, next);
+ return 0;
+ }
+ }
+ TAILQ_INSERT_TAIL(&pci_device_list, dev, next);
+ }
+
+ return 0;
+}
+
+/*
+ * split up a pci address into its constituent parts.
+ */
+static int
+parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
+ uint8_t *bus, uint8_t *devid, uint8_t *function)
+{
+ /* first split on ':' */
+ union splitaddr {
+ struct {
+ char *domain;
+ char *bus;
+ char *devid;
+ char *function;
+ };
+ char *str[PCI_FMT_NVAL]; /* last element-separator is "." not ":" */
+ } splitaddr;
+
+ char *buf_copy = strndup(buf, bufsize);
+ if (buf_copy == NULL)
+ return -1;
+
+ if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':')
+ != PCI_FMT_NVAL - 1)
+ goto error;
+ /* final split is on '.' between devid and function */
+ splitaddr.function = strchr(splitaddr.devid,'.');
+ if (splitaddr.function == NULL)
+ goto error;
+ *splitaddr.function++ = '\0';
+
+ /* now convert to int values */
+ errno = 0;
+ *domain = (uint16_t)strtoul(splitaddr.domain, NULL, 16);
+ *bus = (uint8_t)strtoul(splitaddr.bus, NULL, 16);
+ *devid = (uint8_t)strtoul(splitaddr.devid, NULL, 16);
+ *function = (uint8_t)strtoul(splitaddr.function, NULL, 10);
+ if (errno != 0)
+ goto error;
+
+ free(buf_copy); /* free the copy made with strdup */
+ return 0;
+error:
+ free(buf_copy);
+ return -1;
+}
+
+/*
+ * Scan the content of the PCI bus, and the devices in the devices
+ * list
+ */
+static int
+pci_scan(void)
+{
+ struct dirent *e;
+ DIR *dir;
+ char dirname[PATH_MAX];
+ uint16_t domain;
+ uint8_t bus, devid, function;
+
+ dir = opendir(SYSFS_PCI_DEVICES);
+ if (dir == NULL) {
+ RTE_LOG(ERR, EAL, "%s(): opendir failed: %s\n",
+ __func__, strerror(errno));
+ return -1;
+ }
+
+ while ((e = readdir(dir)) != NULL) {
+ if (e->d_name[0] == '.')
+ continue;
+
+ if (parse_pci_addr_format(e->d_name, sizeof(e->d_name), &domain,
+ &bus, &devid, &function) != 0)
+ continue;
+
+ snprintf(dirname, sizeof(dirname), "%s/%s", SYSFS_PCI_DEVICES,
+ e->d_name);
+ if (pci_scan_one(dirname, domain, bus, devid, function) < 0)
+ goto error;
+ }
+ closedir(dir);
+ return 0;
+
+error:
+ closedir(dir);
+ return -1;
+}
+
+#ifdef RTE_PCI_CONFIG
+static int
+pci_config_extended_tag(struct rte_pci_device *dev)
+{
+ struct rte_pci_addr *loc = &dev->addr;
+ char filename[PATH_MAX];
+ char buf[BUFSIZ];
+ FILE *f;
+
+ /* not configured, let it as is */
+ if (strncmp(RTE_PCI_EXTENDED_TAG, "on", 2) != 0 &&
+ strncmp(RTE_PCI_EXTENDED_TAG, "off", 3) != 0)
+ return 0;
+
+ snprintf(filename, sizeof(filename),
+ SYSFS_PCI_DEVICES "/" PCI_PRI_FMT "/" "extended_tag",
+ loc->domain, loc->bus, loc->devid, loc->function);
+ f = fopen(filename, "rw+");
+ if (!f)
+ return -1;
+
+ fgets(buf, sizeof(buf), f);
+ if (strncmp(RTE_PCI_EXTENDED_TAG, "on", 2) == 0) {
+ /* enable Extended Tag*/
+ if (strncmp(buf, "on", 2) != 0) {
+ fseek(f, 0, SEEK_SET);
+ fputs("on", f);
+ }
+ } else {
+ /* disable Extended Tag */
+ if (strncmp(buf, "off", 3) != 0) {
+ fseek(f, 0, SEEK_SET);
+ fputs("off", f);
+ }
+ }
+ fclose(f);
+
+ return 0;
+}
+
+static int
+pci_config_max_read_request_size(struct rte_pci_device *dev)
+{
+ struct rte_pci_addr *loc = &dev->addr;
+ char filename[PATH_MAX];
+ char buf[BUFSIZ], param[BUFSIZ];
+ FILE *f;
+ /* size can be 128, 256, 512, 1024, 2048, 4096 */
+ uint32_t max_size = RTE_PCI_MAX_READ_REQUEST_SIZE;
+
+ /* not configured, let it as is */
+ if (!max_size)
+ return 0;
+
+ snprintf(filename, sizeof(filename),
+ SYSFS_PCI_DEVICES "/" PCI_PRI_FMT "/" "max_read_request_size",
+ loc->domain, loc->bus, loc->devid, loc->function);
+ f = fopen(filename, "rw+");
+ if (!f)
+ return -1;
+
+ fgets(buf, sizeof(buf), f);
+ snprintf(param, sizeof(param), "%d", max_size);
+
+ /* check if the size to be set is the same as current */
+ if (strcmp(buf, param) == 0) {
+ fclose(f);
+ return 0;
+ }
+ fseek(f, 0, SEEK_SET);
+ fputs(param, f);
+ fclose(f);
+
+ return 0;
+}
+
+static void
+pci_config_space_set(struct rte_pci_device *dev)
+{
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
+ /* configure extended tag */
+ pci_config_extended_tag(dev);
+
+ /* configure max read request size */
+ pci_config_max_read_request_size(dev);
+}
+#endif
+
+static int
+pci_map_device(struct rte_pci_device *dev)
+{
+ int ret, mapped = 0;
+
+ /* try mapping the NIC resources using VFIO if it exists */
+#ifdef VFIO_PRESENT
+ if (pci_vfio_is_enabled()) {
+ ret = pci_vfio_map_resource(dev);
+ if (ret == 0)
+ mapped = 1;
+ else if (ret < 0)
+ return ret;
+ }
+#endif
+ /* map resources for devices that use igb_uio */
+ if (!mapped) {
+ ret = pci_uio_map_resource(dev);
+ if (ret != 0)
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * If vendor/device ID match, call the devinit() function of the
+ * driver.
+ */
+int
+rte_eal_pci_probe_one_driver(struct rte_pci_driver *dr, struct rte_pci_device *dev)
+{
+ int ret;
+ struct rte_pci_id *id_table;
+
+ for (id_table = dr->id_table ; id_table->vendor_id != 0; id_table++) {
+
+ /* check if device's identifiers match the driver's ones */
+ if (id_table->vendor_id != dev->id.vendor_id &&
+ id_table->vendor_id != PCI_ANY_ID)
+ continue;
+ if (id_table->device_id != dev->id.device_id &&
+ id_table->device_id != PCI_ANY_ID)
+ continue;
+ if (id_table->subsystem_vendor_id != dev->id.subsystem_vendor_id &&
+ id_table->subsystem_vendor_id != PCI_ANY_ID)
+ continue;
+ if (id_table->subsystem_device_id != dev->id.subsystem_device_id &&
+ id_table->subsystem_device_id != PCI_ANY_ID)
+ continue;
+
+ struct rte_pci_addr *loc = &dev->addr;
+
+ RTE_LOG(DEBUG, EAL, "PCI device "PCI_PRI_FMT" on NUMA socket %i\n",
+ loc->domain, loc->bus, loc->devid, loc->function,
+ dev->numa_node);
+
+ RTE_LOG(DEBUG, EAL, " probe driver: %x:%x %s\n", dev->id.vendor_id,
+ dev->id.device_id, dr->name);
+
+ /* no initialization when blacklisted, return without error */
+ if (dev->devargs != NULL &&
+ dev->devargs->type == RTE_DEVTYPE_BLACKLISTED_PCI) {
+ RTE_LOG(DEBUG, EAL, " Device is blacklisted, not initializing\n");
+ return 1;
+ }
+
+ if (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) {
+#ifdef RTE_PCI_CONFIG
+ /*
+ * Set PCIe config space for high performance.
+ * Return value can be ignored.
+ */
+ pci_config_space_set(dev);
+#endif
+ /* map resources for devices that use igb_uio */
+ ret = pci_map_device(dev);
+ if (ret != 0)
+ return ret;
+ } else if (dr->drv_flags & RTE_PCI_DRV_FORCE_UNBIND &&
+ rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ /* unbind current driver */
+ if (pci_unbind_kernel_driver(dev) < 0)
+ return -1;
+ }
+
+ /* reference driver structure */
+ dev->driver = dr;
+
+ /* call the driver devinit() function */
+ return dr->devinit(dr, dev);
+ }
+ /* return positive value if driver is not found */
+ return 1;
+}
+
+/* Init the PCI EAL subsystem */
+int
+rte_eal_pci_init(void)
+{
+ TAILQ_INIT(&pci_driver_list);
+ TAILQ_INIT(&pci_device_list);
+ pci_res_list = RTE_TAILQ_RESERVE_BY_IDX(RTE_TAILQ_PCI,
+ mapped_pci_res_list);
+
+ /* for debug purposes, PCI can be disabled */
+ if (internal_config.no_pci)
+ return 0;
+
+ if (pci_scan() < 0) {
+ RTE_LOG(ERR, EAL, "%s(): Cannot scan PCI bus\n", __func__);
+ return -1;
+ }
+#ifdef VFIO_PRESENT
+ pci_vfio_enable();
+
+ if (pci_vfio_is_enabled()) {
+
+ /* if we are primary process, create a thread to communicate with
+ * secondary processes. the thread will use a socket to wait for
+ * requests from secondary process to send open file descriptors,
+ * because VFIO does not allow multiple open descriptors on a group or
+ * VFIO container.
+ */
+ if (internal_config.process_type == RTE_PROC_PRIMARY &&
+ pci_vfio_mp_sync_setup() < 0)
+ return -1;
+ }
+#endif
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci_init.h b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci_init.h
new file mode 100755
index 00000000..1070eb88
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci_init.h
@@ -0,0 +1,122 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef EAL_PCI_INIT_H_
+#define EAL_PCI_INIT_H_
+
+#include "eal_vfio.h"
+
+struct pci_map {
+ void *addr;
+ uint64_t offset;
+ uint64_t size;
+ uint64_t phaddr;
+};
+
+/*
+ * For multi-process we need to reproduce all PCI mappings in secondary
+ * processes, so save them in a tailq.
+ */
+struct mapped_pci_resource {
+ TAILQ_ENTRY(mapped_pci_resource) next;
+
+ struct rte_pci_addr pci_addr;
+ char path[PATH_MAX];
+ int nb_maps;
+ struct pci_map maps[PCI_MAX_RESOURCE];
+};
+
+TAILQ_HEAD(mapped_pci_res_list, mapped_pci_resource);
+extern struct mapped_pci_res_list *pci_res_list;
+
+/*
+ * Helper function to map PCI resources right after hugepages in virtual memory
+ */
+extern void *pci_map_addr;
+void *pci_find_max_end_va(void);
+
+void *pci_map_resource(void *requested_addr, int fd, off_t offset,
+ size_t size);
+
+/* map IGB_UIO resource prototype */
+int pci_uio_map_resource(struct rte_pci_device *dev);
+
+#ifdef VFIO_PRESENT
+
+#define VFIO_MAX_GROUPS 64
+
+int pci_vfio_enable(void);
+int pci_vfio_is_enabled(void);
+int pci_vfio_mp_sync_setup(void);
+
+/* map VFIO resource prototype */
+int pci_vfio_map_resource(struct rte_pci_device *dev);
+int pci_vfio_get_group_fd(int iommu_group_fd);
+int pci_vfio_get_container_fd(void);
+
+/*
+ * Function prototypes for VFIO multiprocess sync functions
+ */
+int vfio_mp_sync_send_request(int socket, int req);
+int vfio_mp_sync_receive_request(int socket);
+int vfio_mp_sync_send_fd(int socket, int fd);
+int vfio_mp_sync_receive_fd(int socket);
+int vfio_mp_sync_connect_to_primary(void);
+
+/* socket comm protocol definitions */
+#define SOCKET_REQ_CONTAINER 0x100
+#define SOCKET_REQ_GROUP 0x200
+#define SOCKET_OK 0x0
+#define SOCKET_NO_FD 0x1
+#define SOCKET_ERR 0xFF
+
+/*
+ * we don't need to store device fd's anywhere since they can be obtained from
+ * the group fd via an ioctl() call.
+ */
+struct vfio_group {
+ int group_no;
+ int fd;
+};
+
+struct vfio_config {
+ int vfio_enabled;
+ int vfio_container_fd;
+ int vfio_container_has_dma;
+ int vfio_group_idx;
+ struct vfio_group vfio_groups[VFIO_MAX_GROUPS];
+};
+
+#endif
+
+#endif /* EAL_PCI_INIT_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci_uio.c b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci_uio.c
new file mode 100755
index 00000000..e53f06b8
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci_uio.c
@@ -0,0 +1,440 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <fcntl.h>
+#include <dirent.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+
+#include <rte_log.h>
+#include <rte_pci.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_tailq.h>
+
+#include "rte_pci_dev_ids.h"
+#include "eal_filesystem.h"
+#include "eal_pci_init.h"
+
+static int pci_parse_sysfs_value(const char *filename, uint64_t *val);
+
+void *pci_map_addr = NULL;
+
+
+#define OFF_MAX ((uint64_t)(off_t)-1)
+static int
+pci_uio_get_mappings(const char *devname, struct pci_map maps[], int nb_maps)
+{
+ int i;
+ char dirname[PATH_MAX];
+ char filename[PATH_MAX];
+ uint64_t offset, size;
+
+ for (i = 0; i != nb_maps; i++) {
+
+ /* check if map directory exists */
+ snprintf(dirname, sizeof(dirname),
+ "%s/maps/map%u", devname, i);
+
+ if (access(dirname, F_OK) != 0)
+ break;
+
+ /* get mapping offset */
+ snprintf(filename, sizeof(filename),
+ "%s/offset", dirname);
+ if (pci_parse_sysfs_value(filename, &offset) < 0) {
+ RTE_LOG(ERR, EAL,
+ "%s(): cannot parse offset of %s\n",
+ __func__, dirname);
+ return -1;
+ }
+
+ /* get mapping size */
+ snprintf(filename, sizeof(filename),
+ "%s/size", dirname);
+ if (pci_parse_sysfs_value(filename, &size) < 0) {
+ RTE_LOG(ERR, EAL,
+ "%s(): cannot parse size of %s\n",
+ __func__, dirname);
+ return -1;
+ }
+
+ /* get mapping physical address */
+ snprintf(filename, sizeof(filename),
+ "%s/addr", dirname);
+ if (pci_parse_sysfs_value(filename, &maps[i].phaddr) < 0) {
+ RTE_LOG(ERR, EAL,
+ "%s(): cannot parse addr of %s\n",
+ __func__, dirname);
+ return -1;
+ }
+
+ if ((offset > OFF_MAX) || (size > SIZE_MAX)) {
+ RTE_LOG(ERR, EAL,
+ "%s(): offset/size exceed system max value\n",
+ __func__);
+ return -1;
+ }
+
+ maps[i].offset = offset;
+ maps[i].size = size;
+ }
+
+ return i;
+}
+
+static int
+pci_uio_map_secondary(struct rte_pci_device *dev)
+{
+ int fd, i;
+ struct mapped_pci_resource *uio_res;
+
+ TAILQ_FOREACH(uio_res, pci_res_list, next) {
+
+ /* skip this element if it doesn't match our PCI address */
+ if (memcmp(&uio_res->pci_addr, &dev->addr, sizeof(dev->addr)))
+ continue;
+
+ for (i = 0; i != uio_res->nb_maps; i++) {
+ /*
+ * open devname, to mmap it
+ */
+ fd = open(uio_res->path, O_RDWR);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
+ uio_res->path, strerror(errno));
+ return -1;
+ }
+
+ if (pci_map_resource(uio_res->maps[i].addr, fd,
+ (off_t)uio_res->maps[i].offset,
+ (size_t)uio_res->maps[i].size)
+ != uio_res->maps[i].addr) {
+ RTE_LOG(ERR, EAL,
+ "Cannot mmap device resource\n");
+ close(fd);
+ return -1;
+ }
+ /* fd is not needed in slave process, close it */
+ close(fd);
+ }
+ return 0;
+ }
+
+ RTE_LOG(ERR, EAL, "Cannot find resource for device\n");
+ return 1;
+}
+
+static int
+pci_mknod_uio_dev(const char *sysfs_uio_path, unsigned uio_num)
+{
+ FILE *f;
+ char filename[PATH_MAX];
+ int ret;
+ unsigned major, minor;
+ dev_t dev;
+
+ /* get the name of the sysfs file that contains the major and minor
+ * of the uio device and read its content */
+ snprintf(filename, sizeof(filename), "%s/dev", sysfs_uio_path);
+
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ RTE_LOG(ERR, EAL, "%s(): cannot open sysfs to get major:minor\n",
+ __func__);
+ return -1;
+ }
+
+ ret = fscanf(f, "%d:%d", &major, &minor);
+ if (ret != 2) {
+ RTE_LOG(ERR, EAL, "%s(): cannot parse sysfs to get major:minor\n",
+ __func__);
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+
+ /* create the char device "mknod /dev/uioX c major minor" */
+ snprintf(filename, sizeof(filename), "/dev/uio%u", uio_num);
+ dev = makedev(major, minor);
+ ret = mknod(filename, S_IFCHR | S_IRUSR | S_IWUSR, dev);
+ if (f == NULL) {
+ RTE_LOG(ERR, EAL, "%s(): mknod() failed %s\n",
+ __func__, strerror(errno));
+ return -1;
+ }
+
+ return ret;
+}
+
+/*
+ * Return the uioX char device used for a pci device. On success, return
+ * the UIO number and fill dstbuf string with the path of the device in
+ * sysfs. On error, return a negative value. In this case dstbuf is
+ * invalid.
+ */
+static int
+pci_get_uio_dev(struct rte_pci_device *dev, char *dstbuf,
+ unsigned int buflen)
+{
+ struct rte_pci_addr *loc = &dev->addr;
+ unsigned int uio_num;
+ struct dirent *e;
+ DIR *dir;
+ char dirname[PATH_MAX];
+
+ /* depending on kernel version, uio can be located in uio/uioX
+ * or uio:uioX */
+
+ snprintf(dirname, sizeof(dirname),
+ SYSFS_PCI_DEVICES "/" PCI_PRI_FMT "/uio",
+ loc->domain, loc->bus, loc->devid, loc->function);
+
+ dir = opendir(dirname);
+ if (dir == NULL) {
+ /* retry with the parent directory */
+ snprintf(dirname, sizeof(dirname),
+ SYSFS_PCI_DEVICES "/" PCI_PRI_FMT,
+ loc->domain, loc->bus, loc->devid, loc->function);
+ dir = opendir(dirname);
+
+ if (dir == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot opendir %s\n", dirname);
+ return -1;
+ }
+ }
+
+ /* take the first file starting with "uio" */
+ while ((e = readdir(dir)) != NULL) {
+ /* format could be uio%d ...*/
+ int shortprefix_len = sizeof("uio") - 1;
+ /* ... or uio:uio%d */
+ int longprefix_len = sizeof("uio:uio") - 1;
+ char *endptr;
+
+ if (strncmp(e->d_name, "uio", 3) != 0)
+ continue;
+
+ /* first try uio%d */
+ errno = 0;
+ uio_num = strtoull(e->d_name + shortprefix_len, &endptr, 10);
+ if (errno == 0 && endptr != (e->d_name + shortprefix_len)) {
+ snprintf(dstbuf, buflen, "%s/uio%u", dirname, uio_num);
+ break;
+ }
+
+ /* then try uio:uio%d */
+ errno = 0;
+ uio_num = strtoull(e->d_name + longprefix_len, &endptr, 10);
+ if (errno == 0 && endptr != (e->d_name + longprefix_len)) {
+ snprintf(dstbuf, buflen, "%s/uio:uio%u", dirname, uio_num);
+ break;
+ }
+ }
+ closedir(dir);
+
+ /* No uio resource found */
+ if (e == NULL)
+ return -1;
+
+ /* create uio device if we've been asked to */
+ if (internal_config.create_uio_dev &&
+ pci_mknod_uio_dev(dstbuf, uio_num) < 0)
+ RTE_LOG(WARNING, EAL, "Cannot create /dev/uio%u\n", uio_num);
+
+ return uio_num;
+}
+
+/* map the PCI resource of a PCI device in virtual memory */
+int
+pci_uio_map_resource(struct rte_pci_device *dev)
+{
+ int i, j;
+ char dirname[PATH_MAX];
+ char devname[PATH_MAX]; /* contains the /dev/uioX */
+ void *mapaddr;
+ int uio_num;
+ uint64_t phaddr;
+ uint64_t offset;
+ uint64_t pagesz;
+ int nb_maps;
+ struct rte_pci_addr *loc = &dev->addr;
+ struct mapped_pci_resource *uio_res;
+ struct pci_map *maps;
+
+ dev->intr_handle.fd = -1;
+ dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+
+ /* secondary processes - use already recorded details */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return pci_uio_map_secondary(dev);
+
+ /* find uio resource */
+ uio_num = pci_get_uio_dev(dev, dirname, sizeof(dirname));
+ if (uio_num < 0) {
+ RTE_LOG(WARNING, EAL, " "PCI_PRI_FMT" not managed by UIO driver, "
+ "skipping\n", loc->domain, loc->bus, loc->devid, loc->function);
+ return 1;
+ }
+ snprintf(devname, sizeof(devname), "/dev/uio%u", uio_num);
+
+ /* save fd if in primary process */
+ dev->intr_handle.fd = open(devname, O_RDWR);
+ if (dev->intr_handle.fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
+ devname, strerror(errno));
+ return -1;
+ }
+ dev->intr_handle.type = RTE_INTR_HANDLE_UIO;
+
+ /* allocate the mapping details for secondary processes*/
+ uio_res = rte_zmalloc("UIO_RES", sizeof(*uio_res), 0);
+ if (uio_res == NULL) {
+ RTE_LOG(ERR, EAL,
+ "%s(): cannot store uio mmap details\n", __func__);
+ return -1;
+ }
+
+ snprintf(uio_res->path, sizeof(uio_res->path), "%s", devname);
+ memcpy(&uio_res->pci_addr, &dev->addr, sizeof(uio_res->pci_addr));
+
+ /* collect info about device mappings */
+ nb_maps = pci_uio_get_mappings(dirname, uio_res->maps,
+ RTE_DIM(uio_res->maps));
+ if (nb_maps < 0) {
+ rte_free(uio_res);
+ return nb_maps;
+ }
+
+ uio_res->nb_maps = nb_maps;
+
+ /* Map all BARs */
+ pagesz = sysconf(_SC_PAGESIZE);
+
+ maps = uio_res->maps;
+ for (i = 0; i != PCI_MAX_RESOURCE; i++) {
+ int fd;
+
+ /* skip empty BAR */
+ phaddr = dev->mem_resource[i].phys_addr;
+ if (phaddr == 0)
+ continue;
+
+ for (j = 0; j != nb_maps && (phaddr != maps[j].phaddr ||
+ dev->mem_resource[i].len != maps[j].size);
+ j++)
+ ;
+
+ /* if matching map is found, then use it */
+ if (j != nb_maps) {
+ int fail = 0;
+ offset = j * pagesz;
+
+ /*
+ * open devname, to mmap it
+ */
+ fd = open(devname, O_RDWR);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
+ devname, strerror(errno));
+ return -1;
+ }
+
+ if (maps[j].addr != NULL)
+ fail = 1;
+ else {
+ /* try mapping somewhere close to the end of hugepages */
+ if (pci_map_addr == NULL)
+ pci_map_addr = pci_find_max_end_va();
+
+ mapaddr = pci_map_resource(pci_map_addr, fd, (off_t)offset,
+ (size_t)maps[j].size);
+ if (mapaddr == MAP_FAILED)
+ fail = 1;
+
+ pci_map_addr = RTE_PTR_ADD(mapaddr, (size_t) maps[j].size);
+ }
+
+ if (fail) {
+ rte_free(uio_res);
+ close(fd);
+ return -1;
+ }
+ close(fd);
+
+ maps[j].addr = mapaddr;
+ maps[j].offset = offset;
+ dev->mem_resource[i].addr = mapaddr;
+ }
+ }
+
+ TAILQ_INSERT_TAIL(pci_res_list, uio_res, next);
+
+ return 0;
+}
+
+/*
+ * parse a sysfs file containing one integer value
+ * different to the eal version, as it needs to work with 64-bit values
+ */
+static int
+pci_parse_sysfs_value(const char *filename, uint64_t *val)
+{
+ FILE *f;
+ char buf[BUFSIZ];
+ char *end = NULL;
+
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ RTE_LOG(ERR, EAL, "%s(): cannot open sysfs value %s\n",
+ __func__, filename);
+ return -1;
+ }
+
+ if (fgets(buf, sizeof(buf), f) == NULL) {
+ RTE_LOG(ERR, EAL, "%s(): cannot read sysfs value %s\n",
+ __func__, filename);
+ fclose(f);
+ return -1;
+ }
+ *val = strtoull(buf, &end, 0);
+ if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) {
+ RTE_LOG(ERR, EAL, "%s(): cannot parse sysfs value %s\n",
+ __func__, filename);
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci_vfio.c b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci_vfio.c
new file mode 100755
index 00000000..c1246e8d
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci_vfio.c
@@ -0,0 +1,807 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <fcntl.h>
+#include <linux/pci_regs.h>
+#include <sys/eventfd.h>
+#include <sys/socket.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+
+#include <rte_log.h>
+#include <rte_pci.h>
+#include <rte_tailq.h>
+#include <rte_eal_memconfig.h>
+#include <rte_malloc.h>
+
+#include "eal_filesystem.h"
+#include "eal_pci_init.h"
+#include "eal_vfio.h"
+
+/**
+ * @file
+ * PCI probing under linux (VFIO version)
+ *
+ * This code tries to determine if the PCI device is bound to VFIO driver,
+ * and initialize it (map BARs, set up interrupts) if that's the case.
+ *
+ * This file is only compiled if CONFIG_RTE_EAL_VFIO is set to "y".
+ */
+
+#ifdef VFIO_PRESENT
+
+#define VFIO_DIR "/dev/vfio"
+#define VFIO_CONTAINER_PATH "/dev/vfio/vfio"
+#define VFIO_GROUP_FMT "/dev/vfio/%u"
+#define VFIO_GET_REGION_ADDR(x) ((uint64_t) x << 40ULL)
+
+/* per-process VFIO config */
+static struct vfio_config vfio_cfg;
+
+/* get PCI BAR number where MSI-X interrupts are */
+static int
+pci_vfio_get_msix_bar(int fd, int *msix_bar)
+{
+ int ret;
+ uint32_t reg;
+ uint8_t cap_id, cap_offset;
+
+ /* read PCI capability pointer from config space */
+ ret = pread64(fd, &reg, sizeof(reg),
+ VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
+ PCI_CAPABILITY_LIST);
+ if (ret != sizeof(reg)) {
+ RTE_LOG(ERR, EAL, "Cannot read capability pointer from PCI "
+ "config space!\n");
+ return -1;
+ }
+
+ /* we need first byte */
+ cap_offset = reg & 0xFF;
+
+ while (cap_offset) {
+
+ /* read PCI capability ID */
+ ret = pread64(fd, &reg, sizeof(reg),
+ VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
+ cap_offset);
+ if (ret != sizeof(reg)) {
+ RTE_LOG(ERR, EAL, "Cannot read capability ID from PCI "
+ "config space!\n");
+ return -1;
+ }
+
+ /* we need first byte */
+ cap_id = reg & 0xFF;
+
+ /* if we haven't reached MSI-X, check next capability */
+ if (cap_id != PCI_CAP_ID_MSIX) {
+ ret = pread64(fd, &reg, sizeof(reg),
+ VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
+ cap_offset);
+ if (ret != sizeof(reg)) {
+ RTE_LOG(ERR, EAL, "Cannot read capability pointer from PCI "
+ "config space!\n");
+ return -1;
+ }
+
+ /* we need second byte */
+ cap_offset = (reg & 0xFF00) >> 8;
+
+ continue;
+ }
+ /* else, read table offset */
+ else {
+ /* table offset resides in the next 4 bytes */
+ ret = pread64(fd, &reg, sizeof(reg),
+ VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
+ cap_offset + 4);
+ if (ret != sizeof(reg)) {
+ RTE_LOG(ERR, EAL, "Cannot read table offset from PCI config "
+ "space!\n");
+ return -1;
+ }
+
+ *msix_bar = reg & RTE_PCI_MSIX_TABLE_BIR;
+
+ return 0;
+ }
+ }
+ return 0;
+}
+
+/* set PCI bus mastering */
+static int
+pci_vfio_set_bus_master(int dev_fd)
+{
+ uint16_t reg;
+ int ret;
+
+ ret = pread64(dev_fd, &reg, sizeof(reg),
+ VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
+ PCI_COMMAND);
+ if (ret != sizeof(reg)) {
+ RTE_LOG(ERR, EAL, "Cannot read command from PCI config space!\n");
+ return -1;
+ }
+
+ /* set the master bit */
+ reg |= PCI_COMMAND_MASTER;
+
+ ret = pwrite64(dev_fd, &reg, sizeof(reg),
+ VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
+ PCI_COMMAND);
+
+ if (ret != sizeof(reg)) {
+ RTE_LOG(ERR, EAL, "Cannot write command to PCI config space!\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/* set up DMA mappings */
+static int
+pci_vfio_setup_dma_maps(int vfio_container_fd)
+{
+ const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+ int i, ret;
+
+ ret = ioctl(vfio_container_fd, VFIO_SET_IOMMU,
+ VFIO_TYPE1_IOMMU);
+ if (ret) {
+ RTE_LOG(ERR, EAL, " cannot set IOMMU type, "
+ "error %i (%s)\n", errno, strerror(errno));
+ return -1;
+ }
+
+ /* map all DPDK segments for DMA. use 1:1 PA to IOVA mapping */
+ for (i = 0; i < RTE_MAX_MEMSEG; i++) {
+ struct vfio_iommu_type1_dma_map dma_map;
+
+ if (ms[i].addr == NULL)
+ break;
+
+ memset(&dma_map, 0, sizeof(dma_map));
+ dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map);
+ dma_map.vaddr = ms[i].addr_64;
+ dma_map.size = ms[i].len;
+ dma_map.iova = ms[i].phys_addr;
+ dma_map.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
+
+ ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
+
+ if (ret) {
+ RTE_LOG(ERR, EAL, " cannot set up DMA remapping, "
+ "error %i (%s)\n", errno, strerror(errno));
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/* set up interrupt support (but not enable interrupts) */
+static int
+pci_vfio_setup_interrupts(struct rte_pci_device *dev, int vfio_dev_fd)
+{
+ int i, ret, intr_idx;
+
+ /* default to invalid index */
+ intr_idx = VFIO_PCI_NUM_IRQS;
+
+ /* get interrupt type from internal config (MSI-X by default, can be
+ * overriden from the command line
+ */
+ switch (internal_config.vfio_intr_mode) {
+ case RTE_INTR_MODE_MSIX:
+ intr_idx = VFIO_PCI_MSIX_IRQ_INDEX;
+ break;
+ case RTE_INTR_MODE_MSI:
+ intr_idx = VFIO_PCI_MSI_IRQ_INDEX;
+ break;
+ case RTE_INTR_MODE_LEGACY:
+ intr_idx = VFIO_PCI_INTX_IRQ_INDEX;
+ break;
+ /* don't do anything if we want to automatically determine interrupt type */
+ case RTE_INTR_MODE_NONE:
+ break;
+ default:
+ RTE_LOG(ERR, EAL, " unknown default interrupt type!\n");
+ return -1;
+ }
+
+ /* start from MSI-X interrupt type */
+ for (i = VFIO_PCI_MSIX_IRQ_INDEX; i >= 0; i--) {
+ struct vfio_irq_info irq = { .argsz = sizeof(irq) };
+ int fd = -1;
+
+ /* skip interrupt modes we don't want */
+ if (internal_config.vfio_intr_mode != RTE_INTR_MODE_NONE &&
+ i != intr_idx)
+ continue;
+
+ irq.index = i;
+
+ ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_IRQ_INFO, &irq);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, " cannot get IRQ info, "
+ "error %i (%s)\n", errno, strerror(errno));
+ return -1;
+ }
+
+ /* if this vector cannot be used with eventfd, fail if we explicitly
+ * specified interrupt type, otherwise continue */
+ if ((irq.flags & VFIO_IRQ_INFO_EVENTFD) == 0) {
+ if (internal_config.vfio_intr_mode != RTE_INTR_MODE_NONE) {
+ RTE_LOG(ERR, EAL,
+ " interrupt vector does not support eventfd!\n");
+ return -1;
+ } else
+ continue;
+ }
+
+ /* set up an eventfd for interrupts */
+ fd = eventfd(0, 0);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, " cannot set up eventfd, "
+ "error %i (%s)\n", errno, strerror(errno));
+ return -1;
+ }
+
+ dev->intr_handle.fd = fd;
+ dev->intr_handle.vfio_dev_fd = vfio_dev_fd;
+
+ switch (i) {
+ case VFIO_PCI_MSIX_IRQ_INDEX:
+ internal_config.vfio_intr_mode = RTE_INTR_MODE_MSIX;
+ dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_MSIX;
+ break;
+ case VFIO_PCI_MSI_IRQ_INDEX:
+ internal_config.vfio_intr_mode = RTE_INTR_MODE_MSI;
+ dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_MSI;
+ break;
+ case VFIO_PCI_INTX_IRQ_INDEX:
+ internal_config.vfio_intr_mode = RTE_INTR_MODE_LEGACY;
+ dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_LEGACY;
+ break;
+ default:
+ RTE_LOG(ERR, EAL, " unknown interrupt type!\n");
+ return -1;
+ }
+
+ return 0;
+ }
+
+ /* if we're here, we haven't found a suitable interrupt vector */
+ return -1;
+}
+
+/* open container fd or get an existing one */
+int
+pci_vfio_get_container_fd(void)
+{
+ int ret, vfio_container_fd;
+
+ /* if we're in a primary process, try to open the container */
+ if (internal_config.process_type == RTE_PROC_PRIMARY) {
+ vfio_container_fd = open(VFIO_CONTAINER_PATH, O_RDWR);
+ if (vfio_container_fd < 0) {
+ RTE_LOG(ERR, EAL, " cannot open VFIO container, "
+ "error %i (%s)\n", errno, strerror(errno));
+ return -1;
+ }
+
+ /* check VFIO API version */
+ ret = ioctl(vfio_container_fd, VFIO_GET_API_VERSION);
+ if (ret != VFIO_API_VERSION) {
+ if (ret < 0)
+ RTE_LOG(ERR, EAL, " could not get VFIO API version, "
+ "error %i (%s)\n", errno, strerror(errno));
+ else
+ RTE_LOG(ERR, EAL, " unsupported VFIO API version!\n");
+ close(vfio_container_fd);
+ return -1;
+ }
+
+ /* check if we support IOMMU type 1 */
+ ret = ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU);
+ if (ret != 1) {
+ if (ret < 0)
+ RTE_LOG(ERR, EAL, " could not get IOMMU type, "
+ "error %i (%s)\n", errno, strerror(errno));
+ else
+ RTE_LOG(ERR, EAL, " unsupported IOMMU type!\n");
+ close(vfio_container_fd);
+ return -1;
+ }
+
+ return vfio_container_fd;
+ } else {
+ /*
+ * if we're in a secondary process, request container fd from the
+ * primary process via our socket
+ */
+ int socket_fd;
+
+ socket_fd = vfio_mp_sync_connect_to_primary();
+ if (socket_fd < 0) {
+ RTE_LOG(ERR, EAL, " cannot connect to primary process!\n");
+ return -1;
+ }
+ if (vfio_mp_sync_send_request(socket_fd, SOCKET_REQ_CONTAINER) < 0) {
+ RTE_LOG(ERR, EAL, " cannot request container fd!\n");
+ close(socket_fd);
+ return -1;
+ }
+ vfio_container_fd = vfio_mp_sync_receive_fd(socket_fd);
+ if (vfio_container_fd < 0) {
+ RTE_LOG(ERR, EAL, " cannot get container fd!\n");
+ close(socket_fd);
+ return -1;
+ }
+ close(socket_fd);
+ return vfio_container_fd;
+ }
+
+ return -1;
+}
+
+/* open group fd or get an existing one */
+int
+pci_vfio_get_group_fd(int iommu_group_no)
+{
+ int i;
+ int vfio_group_fd;
+ char filename[PATH_MAX];
+
+ /* check if we already have the group descriptor open */
+ for (i = 0; i < vfio_cfg.vfio_group_idx; i++)
+ if (vfio_cfg.vfio_groups[i].group_no == iommu_group_no)
+ return vfio_cfg.vfio_groups[i].fd;
+
+ /* if primary, try to open the group */
+ if (internal_config.process_type == RTE_PROC_PRIMARY) {
+ snprintf(filename, sizeof(filename),
+ VFIO_GROUP_FMT, iommu_group_no);
+ vfio_group_fd = open(filename, O_RDWR);
+ if (vfio_group_fd < 0) {
+ /* if file not found, it's not an error */
+ if (errno != ENOENT) {
+ RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", filename,
+ strerror(errno));
+ return -1;
+ }
+ return 0;
+ }
+
+ /* if the fd is valid, create a new group for it */
+ if (vfio_cfg.vfio_group_idx == VFIO_MAX_GROUPS) {
+ RTE_LOG(ERR, EAL, "Maximum number of VFIO groups reached!\n");
+ return -1;
+ }
+ vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].group_no = iommu_group_no;
+ vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].fd = vfio_group_fd;
+ return vfio_group_fd;
+ }
+ /* if we're in a secondary process, request group fd from the primary
+ * process via our socket
+ */
+ else {
+ int socket_fd, ret;
+
+ socket_fd = vfio_mp_sync_connect_to_primary();
+
+ if (socket_fd < 0) {
+ RTE_LOG(ERR, EAL, " cannot connect to primary process!\n");
+ return -1;
+ }
+ if (vfio_mp_sync_send_request(socket_fd, SOCKET_REQ_GROUP) < 0) {
+ RTE_LOG(ERR, EAL, " cannot request container fd!\n");
+ close(socket_fd);
+ return -1;
+ }
+ if (vfio_mp_sync_send_request(socket_fd, iommu_group_no) < 0) {
+ RTE_LOG(ERR, EAL, " cannot send group number!\n");
+ close(socket_fd);
+ return -1;
+ }
+ ret = vfio_mp_sync_receive_request(socket_fd);
+ switch (ret) {
+ case SOCKET_NO_FD:
+ close(socket_fd);
+ return 0;
+ case SOCKET_OK:
+ vfio_group_fd = vfio_mp_sync_receive_fd(socket_fd);
+ /* if we got the fd, return it */
+ if (vfio_group_fd > 0) {
+ close(socket_fd);
+ return vfio_group_fd;
+ }
+ /* fall-through on error */
+ default:
+ RTE_LOG(ERR, EAL, " cannot get container fd!\n");
+ close(socket_fd);
+ return -1;
+ }
+ }
+ return -1;
+}
+
+/* parse IOMMU group number for a PCI device
+ * returns -1 for errors, 0 for non-existent group */
+static int
+pci_vfio_get_group_no(const char *pci_addr)
+{
+ char linkname[PATH_MAX];
+ char filename[PATH_MAX];
+ char *tok[16], *group_tok, *end;
+ int ret, iommu_group_no;
+
+ memset(linkname, 0, sizeof(linkname));
+ memset(filename, 0, sizeof(filename));
+
+ /* try to find out IOMMU group for this device */
+ snprintf(linkname, sizeof(linkname),
+ SYSFS_PCI_DEVICES "/%s/iommu_group", pci_addr);
+
+ ret = readlink(linkname, filename, sizeof(filename));
+
+ /* if the link doesn't exist, no VFIO for us */
+ if (ret < 0)
+ return 0;
+
+ ret = rte_strsplit(filename, sizeof(filename),
+ tok, RTE_DIM(tok), '/');
+
+ if (ret <= 0) {
+ RTE_LOG(ERR, EAL, " %s cannot get IOMMU group\n", pci_addr);
+ return -1;
+ }
+
+ /* IOMMU group is always the last token */
+ errno = 0;
+ group_tok = tok[ret - 1];
+ end = group_tok;
+ iommu_group_no = strtol(group_tok, &end, 10);
+ if ((end != group_tok && *end != '\0') || errno != 0) {
+ RTE_LOG(ERR, EAL, " %s error parsing IOMMU number!\n", pci_addr);
+ return -1;
+ }
+
+ return iommu_group_no;
+}
+
+static void
+clear_current_group(void)
+{
+ vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].group_no = 0;
+ vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].fd = -1;
+}
+
+
+/*
+ * map the PCI resources of a PCI device in virtual memory (VFIO version).
+ * primary and secondary processes follow almost exactly the same path
+ */
+int
+pci_vfio_map_resource(struct rte_pci_device *dev)
+{
+ struct vfio_group_status group_status = {
+ .argsz = sizeof(group_status)
+ };
+ struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
+ int vfio_group_fd, vfio_dev_fd;
+ int iommu_group_no;
+ char pci_addr[PATH_MAX] = {0};
+ struct rte_pci_addr *loc = &dev->addr;
+ int i, ret, msix_bar;
+ struct mapped_pci_resource *vfio_res = NULL;
+ struct pci_map *maps;
+
+ dev->intr_handle.fd = -1;
+ dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+
+ /* store PCI address string */
+ snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
+ loc->domain, loc->bus, loc->devid, loc->function);
+
+ /* get group number */
+ iommu_group_no = pci_vfio_get_group_no(pci_addr);
+
+ /* if 0, group doesn't exist */
+ if (iommu_group_no == 0) {
+ RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
+ pci_addr);
+ return 1;
+ }
+ /* if negative, something failed */
+ else if (iommu_group_no < 0)
+ return -1;
+
+ /* get the actual group fd */
+ vfio_group_fd = pci_vfio_get_group_fd(iommu_group_no);
+ if (vfio_group_fd < 0)
+ return -1;
+
+ /* store group fd */
+ vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].group_no = iommu_group_no;
+ vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].fd = vfio_group_fd;
+
+ /* if group_fd == 0, that means the device isn't managed by VFIO */
+ if (vfio_group_fd == 0) {
+ RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
+ pci_addr);
+ /* we store 0 as group fd to distinguish between existing but
+ * unbound VFIO groups, and groups that don't exist at all.
+ */
+ vfio_cfg.vfio_group_idx++;
+ return 1;
+ }
+
+ /*
+ * at this point, we know at least one port on this device is bound to VFIO,
+ * so we can proceed to try and set this particular port up
+ */
+
+ /* check if the group is viable */
+ ret = ioctl(vfio_group_fd, VFIO_GROUP_GET_STATUS, &group_status);
+ if (ret) {
+ RTE_LOG(ERR, EAL, " %s cannot get group status, "
+ "error %i (%s)\n", pci_addr, errno, strerror(errno));
+ close(vfio_group_fd);
+ clear_current_group();
+ return -1;
+ } else if (!(group_status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
+ RTE_LOG(ERR, EAL, " %s VFIO group is not viable!\n", pci_addr);
+ close(vfio_group_fd);
+ clear_current_group();
+ return -1;
+ }
+
+ /*
+ * at this point, we know that this group is viable (meaning, all devices
+ * are either bound to VFIO or not bound to anything)
+ */
+
+ /* check if group does not have a container yet */
+ if (!(group_status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) {
+
+ /* add group to a container */
+ ret = ioctl(vfio_group_fd, VFIO_GROUP_SET_CONTAINER,
+ &vfio_cfg.vfio_container_fd);
+ if (ret) {
+ RTE_LOG(ERR, EAL, " %s cannot add VFIO group to container, "
+ "error %i (%s)\n", pci_addr, errno, strerror(errno));
+ close(vfio_group_fd);
+ clear_current_group();
+ return -1;
+ }
+ /*
+ * at this point we know that this group has been successfully
+ * initialized, so we increment vfio_group_idx to indicate that we can
+ * add new groups.
+ */
+ vfio_cfg.vfio_group_idx++;
+ }
+
+ /*
+ * set up DMA mappings for container
+ *
+ * needs to be done only once, only when at least one group is assigned to
+ * a container and only in primary process
+ */
+ if (internal_config.process_type == RTE_PROC_PRIMARY &&
+ vfio_cfg.vfio_container_has_dma == 0) {
+ ret = pci_vfio_setup_dma_maps(vfio_cfg.vfio_container_fd);
+ if (ret) {
+ RTE_LOG(ERR, EAL, " %s DMA remapping failed, "
+ "error %i (%s)\n", pci_addr, errno, strerror(errno));
+ return -1;
+ }
+ vfio_cfg.vfio_container_has_dma = 1;
+ }
+
+ /* get a file descriptor for the device */
+ vfio_dev_fd = ioctl(vfio_group_fd, VFIO_GROUP_GET_DEVICE_FD, pci_addr);
+ if (vfio_dev_fd < 0) {
+ /* if we cannot get a device fd, this simply means that this
+ * particular port is not bound to VFIO
+ */
+ RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
+ pci_addr);
+ return 1;
+ }
+
+ /* test and setup the device */
+ ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_INFO, &device_info);
+ if (ret) {
+ RTE_LOG(ERR, EAL, " %s cannot get device info, "
+ "error %i (%s)\n", pci_addr, errno, strerror(errno));
+ close(vfio_dev_fd);
+ return -1;
+ }
+
+ /* get MSI-X BAR, if any (we have to know where it is because we can't
+ * mmap it when using VFIO) */
+ msix_bar = -1;
+ ret = pci_vfio_get_msix_bar(vfio_dev_fd, &msix_bar);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, " %s cannot get MSI-X BAR number!\n", pci_addr);
+ close(vfio_dev_fd);
+ return -1;
+ }
+
+ /* if we're in a primary process, allocate vfio_res and get region info */
+ if (internal_config.process_type == RTE_PROC_PRIMARY) {
+ vfio_res = rte_zmalloc("VFIO_RES", sizeof(*vfio_res), 0);
+ if (vfio_res == NULL) {
+ RTE_LOG(ERR, EAL,
+ "%s(): cannot store uio mmap details\n", __func__);
+ close(vfio_dev_fd);
+ return -1;
+ }
+ memcpy(&vfio_res->pci_addr, &dev->addr, sizeof(vfio_res->pci_addr));
+
+ /* get number of registers (up to BAR5) */
+ vfio_res->nb_maps = RTE_MIN((int) device_info.num_regions,
+ VFIO_PCI_BAR5_REGION_INDEX + 1);
+ } else {
+ /* if we're in a secondary process, just find our tailq entry */
+ TAILQ_FOREACH(vfio_res, pci_res_list, next) {
+ if (memcmp(&vfio_res->pci_addr, &dev->addr, sizeof(dev->addr)))
+ continue;
+ break;
+ }
+ /* if we haven't found our tailq entry, something's wrong */
+ if (vfio_res == NULL) {
+ RTE_LOG(ERR, EAL, " %s cannot find TAILQ entry for PCI device!\n",
+ pci_addr);
+ close(vfio_dev_fd);
+ return -1;
+ }
+ }
+
+ /* map BARs */
+ maps = vfio_res->maps;
+
+ for (i = 0; i < (int) vfio_res->nb_maps; i++) {
+ struct vfio_region_info reg = { .argsz = sizeof(reg) };
+ void *bar_addr;
+
+ reg.index = i;
+
+ ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg);
+
+ if (ret) {
+ RTE_LOG(ERR, EAL, " %s cannot get device region info "
+ "error %i (%s)\n", pci_addr, errno, strerror(errno));
+ close(vfio_dev_fd);
+ if (internal_config.process_type == RTE_PROC_PRIMARY)
+ rte_free(vfio_res);
+ return -1;
+ }
+
+ /* skip non-mmapable BARs */
+ if ((reg.flags & VFIO_REGION_INFO_FLAG_MMAP) == 0)
+ continue;
+
+ /* skip MSI-X BAR */
+ if (i == msix_bar)
+ continue;
+
+ if (internal_config.process_type == RTE_PROC_PRIMARY) {
+ /* try mapping somewhere close to the end of hugepages */
+ if (pci_map_addr == NULL)
+ pci_map_addr = pci_find_max_end_va();
+
+ bar_addr = pci_map_resource(pci_map_addr, vfio_dev_fd, reg.offset,
+ reg.size);
+ pci_map_addr = RTE_PTR_ADD(bar_addr, (size_t) reg.size);
+ } else {
+ bar_addr = pci_map_resource(maps[i].addr, vfio_dev_fd, reg.offset,
+ reg.size);
+ }
+
+ if (bar_addr == MAP_FAILED ||
+ (internal_config.process_type == RTE_PROC_SECONDARY &&
+ bar_addr != maps[i].addr)) {
+ RTE_LOG(ERR, EAL, " %s mapping BAR%i failed: %s\n", pci_addr, i,
+ strerror(errno));
+ close(vfio_dev_fd);
+ if (internal_config.process_type == RTE_PROC_PRIMARY)
+ rte_free(vfio_res);
+ return -1;
+ }
+
+ maps[i].addr = bar_addr;
+ maps[i].offset = reg.offset;
+ maps[i].size = reg.size;
+ dev->mem_resource[i].addr = bar_addr;
+ }
+
+ /* if secondary process, do not set up interrupts */
+ if (internal_config.process_type == RTE_PROC_PRIMARY) {
+ if (pci_vfio_setup_interrupts(dev, vfio_dev_fd) != 0) {
+ RTE_LOG(ERR, EAL, " %s error setting up interrupts!\n", pci_addr);
+ close(vfio_dev_fd);
+ rte_free(vfio_res);
+ return -1;
+ }
+
+ /* set bus mastering for the device */
+ if (pci_vfio_set_bus_master(vfio_dev_fd)) {
+ RTE_LOG(ERR, EAL, " %s cannot set up bus mastering!\n", pci_addr);
+ close(vfio_dev_fd);
+ rte_free(vfio_res);
+ return -1;
+ }
+
+ /* Reset the device */
+ ioctl(vfio_dev_fd, VFIO_DEVICE_RESET);
+ }
+
+ if (internal_config.process_type == RTE_PROC_PRIMARY)
+ TAILQ_INSERT_TAIL(pci_res_list, vfio_res, next);
+
+ return 0;
+}
+
+int
+pci_vfio_enable(void)
+{
+ /* initialize group list */
+ int i;
+
+ for (i = 0; i < VFIO_MAX_GROUPS; i++) {
+ vfio_cfg.vfio_groups[i].fd = -1;
+ vfio_cfg.vfio_groups[i].group_no = -1;
+ }
+ vfio_cfg.vfio_container_fd = pci_vfio_get_container_fd();
+
+ /* check if we have VFIO driver enabled */
+ if (vfio_cfg.vfio_container_fd != -1)
+ vfio_cfg.vfio_enabled = 1;
+ else
+ RTE_LOG(INFO, EAL, "VFIO support could not be initialized\n");
+
+ return 0;
+}
+
+int
+pci_vfio_is_enabled(void)
+{
+ return vfio_cfg.vfio_enabled;
+}
+#endif
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c
new file mode 100755
index 00000000..6588fb1f
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c
@@ -0,0 +1,395 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <fcntl.h>
+#include <sys/socket.h>
+
+/* sys/un.h with __USE_MISC uses strlen, which is unsafe */
+#ifdef __USE_MISC
+#define REMOVED_USE_MISC
+#undef __USE_MISC
+#endif
+#include <sys/un.h>
+/* make sure we redefine __USE_MISC only if it was previously undefined */
+#ifdef REMOVED_USE_MISC
+#define __USE_MISC
+#undef REMOVED_USE_MISC
+#endif
+
+#include <rte_log.h>
+#include <rte_pci.h>
+#include <rte_tailq.h>
+#include <rte_eal_memconfig.h>
+#include <rte_malloc.h>
+
+#include "eal_filesystem.h"
+#include "eal_pci_init.h"
+
+/**
+ * @file
+ * VFIO socket for communication between primary and secondary processes.
+ *
+ * This file is only compiled if CONFIG_RTE_EAL_VFIO is set to "y".
+ */
+
+#ifdef VFIO_PRESENT
+
+#define SOCKET_PATH_FMT "%s/.%s_mp_socket"
+#define CMSGLEN (CMSG_LEN(sizeof(int)))
+#define FD_TO_CMSGHDR(fd, chdr) \
+ do {\
+ (chdr).cmsg_len = CMSGLEN;\
+ (chdr).cmsg_level = SOL_SOCKET;\
+ (chdr).cmsg_type = SCM_RIGHTS;\
+ memcpy((chdr).__cmsg_data, &(fd), sizeof(fd));\
+ } while (0)
+#define CMSGHDR_TO_FD(chdr, fd) \
+ memcpy(&(fd), (chdr).__cmsg_data, sizeof(fd))
+
+static pthread_t socket_thread;
+static int mp_socket_fd;
+
+
+/* get socket path (/var/run if root, $HOME otherwise) */
+static void
+get_socket_path(char *buffer, int bufsz)
+{
+ const char *dir = "/var/run";
+ const char *home_dir = getenv("HOME");
+
+ if (getuid() != 0 && home_dir != NULL)
+ dir = home_dir;
+
+ /* use current prefix as file path */
+ snprintf(buffer, bufsz, SOCKET_PATH_FMT, dir,
+ internal_config.hugefile_prefix);
+}
+
+
+
+/*
+ * data flow for socket comm protocol:
+ * 1. client sends SOCKET_REQ_CONTAINER or SOCKET_REQ_GROUP
+ * 1a. in case of SOCKET_REQ_GROUP, client also then sends group number
+ * 2. server receives message
+ * 2a. in case of invalid group, SOCKET_ERR is sent back to client
+ * 2b. in case of unbound group, SOCKET_NO_FD is sent back to client
+ * 2c. in case of valid group, SOCKET_OK is sent and immediately followed by fd
+ *
+ * in case of any error, socket is closed.
+ */
+
+/* send a request, return -1 on error */
+int
+vfio_mp_sync_send_request(int socket, int req)
+{
+ struct msghdr hdr;
+ struct iovec iov;
+ int buf;
+ int ret;
+
+ memset(&hdr, 0, sizeof(hdr));
+
+ buf = req;
+
+ hdr.msg_iov = &iov;
+ hdr.msg_iovlen = 1;
+ iov.iov_base = (char *) &buf;
+ iov.iov_len = sizeof(buf);
+
+ ret = sendmsg(socket, &hdr, 0);
+ if (ret < 0)
+ return -1;
+ return 0;
+}
+
+/* receive a request and return it */
+int
+vfio_mp_sync_receive_request(int socket)
+{
+ int buf;
+ struct msghdr hdr;
+ struct iovec iov;
+ int ret, req;
+
+ memset(&hdr, 0, sizeof(hdr));
+
+ buf = SOCKET_ERR;
+
+ hdr.msg_iov = &iov;
+ hdr.msg_iovlen = 1;
+ iov.iov_base = (char *) &buf;
+ iov.iov_len = sizeof(buf);
+
+ ret = recvmsg(socket, &hdr, 0);
+ if (ret < 0)
+ return -1;
+
+ req = buf;
+
+ return req;
+}
+
+/* send OK in message, fd in control message */
+int
+vfio_mp_sync_send_fd(int socket, int fd)
+{
+ int buf;
+ struct msghdr hdr;
+ struct cmsghdr *chdr;
+ char chdr_buf[CMSGLEN];
+ struct iovec iov;
+ int ret;
+
+ chdr = (struct cmsghdr *) chdr_buf;
+ memset(chdr, 0, sizeof(chdr_buf));
+ memset(&hdr, 0, sizeof(hdr));
+
+ hdr.msg_iov = &iov;
+ hdr.msg_iovlen = 1;
+ iov.iov_base = (char *) &buf;
+ iov.iov_len = sizeof(buf);
+ hdr.msg_control = chdr;
+ hdr.msg_controllen = CMSGLEN;
+
+ buf = SOCKET_OK;
+ FD_TO_CMSGHDR(fd, *chdr);
+
+ ret = sendmsg(socket, &hdr, 0);
+ if (ret < 0)
+ return -1;
+ return 0;
+}
+
+/* receive OK in message, fd in control message */
+int
+vfio_mp_sync_receive_fd(int socket)
+{
+ int buf;
+ struct msghdr hdr;
+ struct cmsghdr *chdr;
+ char chdr_buf[CMSGLEN];
+ struct iovec iov;
+ int ret, req, fd;
+
+ buf = SOCKET_ERR;
+
+ chdr = (struct cmsghdr *) chdr_buf;
+ memset(chdr, 0, sizeof(chdr_buf));
+ memset(&hdr, 0, sizeof(hdr));
+
+ hdr.msg_iov = &iov;
+ hdr.msg_iovlen = 1;
+ iov.iov_base = (char *) &buf;
+ iov.iov_len = sizeof(buf);
+ hdr.msg_control = chdr;
+ hdr.msg_controllen = CMSGLEN;
+
+ ret = recvmsg(socket, &hdr, 0);
+ if (ret < 0)
+ return -1;
+
+ req = buf;
+
+ if (req != SOCKET_OK)
+ return -1;
+
+ CMSGHDR_TO_FD(*chdr, fd);
+
+ return fd;
+}
+
+/* connect socket_fd in secondary process to the primary process's socket */
+int
+vfio_mp_sync_connect_to_primary(void)
+{
+ struct sockaddr_un addr;
+ socklen_t sockaddr_len;
+ int socket_fd;
+
+ /* set up a socket */
+ socket_fd = socket(AF_UNIX, SOCK_SEQPACKET, 0);
+ if (socket_fd < 0) {
+ RTE_LOG(ERR, EAL, "Failed to create socket!\n");
+ return -1;
+ }
+
+ get_socket_path(addr.sun_path, sizeof(addr.sun_path));
+ addr.sun_family = AF_UNIX;
+
+ sockaddr_len = sizeof(struct sockaddr_un);
+
+ if (connect(socket_fd, (struct sockaddr *) &addr, sockaddr_len) == 0)
+ return socket_fd;
+
+ /* if connect failed */
+ close(socket_fd);
+ return -1;
+}
+
+
+
+/*
+ * socket listening thread for primary process
+ */
+static __attribute__((noreturn)) void *
+pci_vfio_mp_sync_thread(void __rte_unused * arg)
+{
+ int ret, fd, vfio_group_no;
+
+ /* wait for requests on the socket */
+ for (;;) {
+ int conn_sock;
+ struct sockaddr_un addr;
+ socklen_t sockaddr_len = sizeof(addr);
+
+ /* this is a blocking call */
+ conn_sock = accept(mp_socket_fd, (struct sockaddr *) &addr,
+ &sockaddr_len);
+
+ /* just restart on error */
+ if (conn_sock == -1)
+ continue;
+
+ /* set socket to linger after close */
+ struct linger l;
+ l.l_onoff = 1;
+ l.l_linger = 60;
+ setsockopt(conn_sock, SOL_SOCKET, SO_LINGER, &l, sizeof(l));
+
+ ret = vfio_mp_sync_receive_request(conn_sock);
+
+ switch (ret) {
+ case SOCKET_REQ_CONTAINER:
+ fd = pci_vfio_get_container_fd();
+ if (fd < 0)
+ vfio_mp_sync_send_request(conn_sock, SOCKET_ERR);
+ else
+ vfio_mp_sync_send_fd(conn_sock, fd);
+ break;
+ case SOCKET_REQ_GROUP:
+ /* wait for group number */
+ vfio_group_no = vfio_mp_sync_receive_request(conn_sock);
+ if (vfio_group_no < 0) {
+ close(conn_sock);
+ continue;
+ }
+
+ fd = pci_vfio_get_group_fd(vfio_group_no);
+
+ if (fd < 0)
+ vfio_mp_sync_send_request(conn_sock, SOCKET_ERR);
+ /* if VFIO group exists but isn't bound to VFIO driver */
+ else if (fd == 0)
+ vfio_mp_sync_send_request(conn_sock, SOCKET_NO_FD);
+ /* if group exists and is bound to VFIO driver */
+ else {
+ vfio_mp_sync_send_request(conn_sock, SOCKET_OK);
+ vfio_mp_sync_send_fd(conn_sock, fd);
+ }
+ break;
+ default:
+ vfio_mp_sync_send_request(conn_sock, SOCKET_ERR);
+ break;
+ }
+ close(conn_sock);
+ }
+}
+
+static int
+vfio_mp_sync_socket_setup(void)
+{
+ int ret, socket_fd;
+ struct sockaddr_un addr;
+ socklen_t sockaddr_len;
+
+ /* set up a socket */
+ socket_fd = socket(AF_UNIX, SOCK_SEQPACKET, 0);
+ if (socket_fd < 0) {
+ RTE_LOG(ERR, EAL, "Failed to create socket!\n");
+ return -1;
+ }
+
+ get_socket_path(addr.sun_path, sizeof(addr.sun_path));
+ addr.sun_family = AF_UNIX;
+
+ sockaddr_len = sizeof(struct sockaddr_un);
+
+ unlink(addr.sun_path);
+
+ ret = bind(socket_fd, (struct sockaddr *) &addr, sockaddr_len);
+ if (ret) {
+ RTE_LOG(ERR, EAL, "Failed to bind socket: %s!\n", strerror(errno));
+ close(socket_fd);
+ return -1;
+ }
+
+ ret = listen(socket_fd, 50);
+ if (ret) {
+ RTE_LOG(ERR, EAL, "Failed to listen: %s!\n", strerror(errno));
+ close(socket_fd);
+ return -1;
+ }
+
+ /* save the socket in local configuration */
+ mp_socket_fd = socket_fd;
+
+ return 0;
+}
+
+/*
+ * set up a local socket and tell it to listen for incoming connections
+ */
+int
+pci_vfio_mp_sync_setup(void)
+{
+ int ret;
+
+ if (vfio_mp_sync_socket_setup() < 0) {
+ RTE_LOG(ERR, EAL, "Failed to set up local socket!\n");
+ return -1;
+ }
+
+ ret = pthread_create(&socket_thread, NULL,
+ pci_vfio_mp_sync_thread, NULL);
+ if (ret) {
+ RTE_LOG(ERR, EAL, "Failed to create thread for communication with "
+ "secondary processes!\n");
+ close(mp_socket_fd);
+ return -1;
+ }
+ return 0;
+}
+
+#endif
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_thread.c b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_thread.c
new file mode 100755
index 00000000..80a985f2
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_thread.c
@@ -0,0 +1,233 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <sched.h>
+#include <sys/queue.h>
+
+#include <rte_debug.h>
+#include <rte_atomic.h>
+#include <rte_launch.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_per_lcore.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+
+#include "eal_private.h"
+#include "eal_thread.h"
+
+RTE_DEFINE_PER_LCORE(unsigned, _lcore_id);
+
+/*
+ * Send a message to a slave lcore identified by slave_id to call a
+ * function f with argument arg. Once the execution is done, the
+ * remote lcore switch in FINISHED state.
+ */
+int
+rte_eal_remote_launch(int (*f)(void *), void *arg, unsigned slave_id)
+{
+ int n;
+ char c = 0;
+ int m2s = lcore_config[slave_id].pipe_master2slave[1];
+ int s2m = lcore_config[slave_id].pipe_slave2master[0];
+
+ if (lcore_config[slave_id].state != WAIT)
+ return -EBUSY;
+
+ lcore_config[slave_id].f = f;
+ lcore_config[slave_id].arg = arg;
+
+ /* send message */
+ n = 0;
+ while (n == 0 || (n < 0 && errno == EINTR))
+ n = write(m2s, &c, 1);
+ if (n < 0)
+ rte_panic("cannot write on configuration pipe\n");
+
+ /* wait ack */
+ do {
+ n = read(s2m, &c, 1);
+ } while (n < 0 && errno == EINTR);
+
+ if (n <= 0)
+ rte_panic("cannot read on configuration pipe\n");
+
+ return 0;
+}
+
+/* set affinity for current thread */
+static int
+eal_thread_set_affinity(void)
+{
+ int s;
+ pthread_t thread;
+
+/*
+ * According to the section VERSIONS of the CPU_ALLOC man page:
+ *
+ * The CPU_ZERO(), CPU_SET(), CPU_CLR(), and CPU_ISSET() macros were added
+ * in glibc 2.3.3.
+ *
+ * CPU_COUNT() first appeared in glibc 2.6.
+ *
+ * CPU_AND(), CPU_OR(), CPU_XOR(), CPU_EQUAL(), CPU_ALLOC(),
+ * CPU_ALLOC_SIZE(), CPU_FREE(), CPU_ZERO_S(), CPU_SET_S(), CPU_CLR_S(),
+ * CPU_ISSET_S(), CPU_AND_S(), CPU_OR_S(), CPU_XOR_S(), and CPU_EQUAL_S()
+ * first appeared in glibc 2.7.
+ */
+#if defined(CPU_ALLOC)
+ size_t size;
+ cpu_set_t *cpusetp;
+
+ cpusetp = CPU_ALLOC(RTE_MAX_LCORE);
+ if (cpusetp == NULL) {
+ RTE_LOG(ERR, EAL, "CPU_ALLOC failed\n");
+ return -1;
+ }
+
+ size = CPU_ALLOC_SIZE(RTE_MAX_LCORE);
+ CPU_ZERO_S(size, cpusetp);
+ CPU_SET_S(rte_lcore_id(), size, cpusetp);
+
+ thread = pthread_self();
+ s = pthread_setaffinity_np(thread, size, cpusetp);
+ if (s != 0) {
+ RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n");
+ CPU_FREE(cpusetp);
+ return -1;
+ }
+
+ CPU_FREE(cpusetp);
+#else /* CPU_ALLOC */
+ cpu_set_t cpuset;
+ CPU_ZERO( &cpuset );
+ CPU_SET( rte_lcore_id(), &cpuset );
+
+ thread = pthread_self();
+ s = pthread_setaffinity_np(thread, sizeof( cpuset ), &cpuset);
+ if (s != 0) {
+ RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n");
+ return -1;
+ }
+#endif
+ return 0;
+}
+
+void eal_thread_init_master(unsigned lcore_id)
+{
+ /* set the lcore ID in per-lcore memory area */
+ RTE_PER_LCORE(_lcore_id) = lcore_id;
+
+ /* set CPU affinity */
+ if (eal_thread_set_affinity() < 0)
+ rte_panic("cannot set affinity\n");
+}
+
+/* main loop of threads */
+__attribute__((noreturn)) void *
+eal_thread_loop(__attribute__((unused)) void *arg)
+{
+ char c;
+ int n, ret;
+ unsigned lcore_id;
+ pthread_t thread_id;
+ int m2s, s2m;
+
+ thread_id = pthread_self();
+
+ /* retrieve our lcore_id from the configuration structure */
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ if (thread_id == lcore_config[lcore_id].thread_id)
+ break;
+ }
+ if (lcore_id == RTE_MAX_LCORE)
+ rte_panic("cannot retrieve lcore id\n");
+
+ RTE_LOG(DEBUG, EAL, "Core %u is ready (tid=%x)\n",
+ lcore_id, (int)thread_id);
+
+ m2s = lcore_config[lcore_id].pipe_master2slave[0];
+ s2m = lcore_config[lcore_id].pipe_slave2master[1];
+
+ /* set the lcore ID in per-lcore memory area */
+ RTE_PER_LCORE(_lcore_id) = lcore_id;
+
+ /* set CPU affinity */
+ if (eal_thread_set_affinity() < 0)
+ rte_panic("cannot set affinity\n");
+
+ /* read on our pipe to get commands */
+ while (1) {
+ void *fct_arg;
+
+ /* wait command */
+ do {
+ n = read(m2s, &c, 1);
+ } while (n < 0 && errno == EINTR);
+
+ if (n <= 0)
+ rte_panic("cannot read on configuration pipe\n");
+
+ lcore_config[lcore_id].state = RUNNING;
+
+ /* send ack */
+ n = 0;
+ while (n == 0 || (n < 0 && errno == EINTR))
+ n = write(s2m, &c, 1);
+ if (n < 0)
+ rte_panic("cannot write on configuration pipe\n");
+
+ if (lcore_config[lcore_id].f == NULL)
+ rte_panic("NULL function pointer\n");
+
+ /* call the function and store the return value */
+ fct_arg = lcore_config[lcore_id].arg;
+ ret = lcore_config[lcore_id].f(fct_arg);
+ lcore_config[lcore_id].ret = ret;
+ rte_wmb();
+ lcore_config[lcore_id].state = FINISHED;
+ }
+
+ /* never reached */
+ /* pthread_exit(NULL); */
+ /* return NULL; */
+}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_timer.c b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_timer.c
new file mode 100755
index 00000000..6321e42e
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_timer.c
@@ -0,0 +1,344 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012-2013 6WIND S.A.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <pthread.h>
+#include <errno.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_cycles.h>
+#include <rte_tailq.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_debug.h>
+
+#include "eal_private.h"
+#include "eal_internal_cfg.h"
+
+enum timer_source eal_timer_source = EAL_TIMER_HPET;
+
+/* The frequency of the RDTSC timer resolution */
+static uint64_t eal_tsc_resolution_hz = 0;
+
+#ifdef RTE_LIBEAL_USE_HPET
+
+#define DEV_HPET "/dev/hpet"
+
+/* Maximum number of counters. */
+#define HPET_TIMER_NUM 3
+
+/* General capabilities register */
+#define CLK_PERIOD_SHIFT 32 /* Clock period shift. */
+#define CLK_PERIOD_MASK 0xffffffff00000000ULL /* Clock period mask. */
+
+/**
+ * HPET timer registers. From the Intel IA-PC HPET (High Precision Event
+ * Timers) Specification.
+ */
+struct eal_hpet_regs {
+ /* Memory-mapped, software visible registers */
+ uint64_t capabilities; /**< RO General Capabilities Register. */
+ uint64_t reserved0; /**< Reserved for future use. */
+ uint64_t config; /**< RW General Configuration Register. */
+ uint64_t reserved1; /**< Reserved for future use. */
+ uint64_t isr; /**< RW Clear General Interrupt Status. */
+ uint64_t reserved2[25]; /**< Reserved for future use. */
+ union {
+ uint64_t counter; /**< RW Main Counter Value Register. */
+ struct {
+ uint32_t counter_l; /**< RW Main Counter Low. */
+ uint32_t counter_h; /**< RW Main Counter High. */
+ };
+ };
+ uint64_t reserved3; /**< Reserved for future use. */
+ struct {
+ uint64_t config; /**< RW Timer Config and Capability Reg. */
+ uint64_t comp; /**< RW Timer Comparator Value Register. */
+ uint64_t fsb; /**< RW FSB Interrupt Route Register. */
+ uint64_t reserved4; /**< Reserved for future use. */
+ } timers[HPET_TIMER_NUM]; /**< Set of HPET timers. */
+};
+
+/* Mmap'd hpet registers */
+static volatile struct eal_hpet_regs *eal_hpet = NULL;
+
+/* Period at which the HPET counter increments in
+ * femtoseconds (10^-15 seconds). */
+static uint32_t eal_hpet_resolution_fs = 0;
+
+/* Frequency of the HPET counter in Hz */
+static uint64_t eal_hpet_resolution_hz = 0;
+
+/* Incremented 4 times during one 32bits hpet full count */
+static uint32_t eal_hpet_msb;
+
+static pthread_t msb_inc_thread_id;
+
+/*
+ * This function runs on a specific thread to update a global variable
+ * containing used to process MSB of the HPET (unfortunatelly, we need
+ * this because hpet is 32 bits by default under linux).
+ */
+static void
+hpet_msb_inc(__attribute__((unused)) void *arg)
+{
+ uint32_t t;
+
+ while (1) {
+ t = (eal_hpet->counter_l >> 30);
+ if (t != (eal_hpet_msb & 3))
+ eal_hpet_msb ++;
+ sleep(10);
+ }
+}
+
+uint64_t
+rte_get_hpet_hz(void)
+{
+ if(internal_config.no_hpet)
+ rte_panic("Error, HPET called, but no HPET present\n");
+
+ return eal_hpet_resolution_hz;
+}
+
+uint64_t
+rte_get_hpet_cycles(void)
+{
+ uint32_t t, msb;
+ uint64_t ret;
+
+ if(internal_config.no_hpet)
+ rte_panic("Error, HPET called, but no HPET present\n");
+
+ t = eal_hpet->counter_l;
+ msb = eal_hpet_msb;
+ ret = (msb + 2 - (t >> 30)) / 4;
+ ret <<= 32;
+ ret += t;
+ return ret;
+}
+
+#endif
+
+
+void
+rte_delay_us(unsigned us)
+{
+ const uint64_t start = rte_get_timer_cycles();
+ const uint64_t ticks = (uint64_t)us * rte_get_timer_hz() / 1E6;
+ while ((rte_get_timer_cycles() - start) < ticks)
+ rte_pause();
+}
+
+uint64_t
+rte_get_tsc_hz(void)
+{
+ return eal_tsc_resolution_hz;
+}
+
+
+#ifdef RTE_LIBEAL_USE_HPET
+/*
+ * Open and mmap /dev/hpet (high precision event timer) that will
+ * provide our time reference.
+ */
+int
+rte_eal_hpet_init(int make_default)
+{
+ int fd, ret;
+
+ if (internal_config.no_hpet) {
+ RTE_LOG(INFO, EAL, "HPET is disabled\n");
+ return -1;
+ }
+
+ fd = open(DEV_HPET, O_RDONLY);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "ERROR: Cannot open "DEV_HPET": %s!\n",
+ strerror(errno));
+ internal_config.no_hpet = 1;
+ return -1;
+ }
+ eal_hpet = mmap(NULL, 1024, PROT_READ, MAP_SHARED, fd, 0);
+ if (eal_hpet == MAP_FAILED) {
+ RTE_LOG(ERR, EAL, "ERROR: Cannot mmap "DEV_HPET"!\n"
+ "Please enable CONFIG_HPET_MMAP in your kernel configuration "
+ "to allow HPET support.\n"
+ "To run without using HPET, set CONFIG_RTE_LIBEAL_USE_HPET=n "
+ "in your build configuration or use '--no-hpet' EAL flag.\n");
+ close(fd);
+ internal_config.no_hpet = 1;
+ return -1;
+ }
+ close(fd);
+
+ eal_hpet_resolution_fs = (uint32_t)((eal_hpet->capabilities &
+ CLK_PERIOD_MASK) >>
+ CLK_PERIOD_SHIFT);
+
+ eal_hpet_resolution_hz = (1000ULL*1000ULL*1000ULL*1000ULL*1000ULL) /
+ (uint64_t)eal_hpet_resolution_fs;
+
+ RTE_LOG(INFO, EAL, "HPET frequency is ~%"PRIu64" kHz\n",
+ eal_hpet_resolution_hz/1000);
+
+ eal_hpet_msb = (eal_hpet->counter_l >> 30);
+
+ /* create a thread that will increment a global variable for
+ * msb (hpet is 32 bits by default under linux) */
+ ret = pthread_create(&msb_inc_thread_id, NULL,
+ (void *(*)(void *))hpet_msb_inc, NULL);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "ERROR: Cannot create HPET timer thread!\n");
+ internal_config.no_hpet = 1;
+ return -1;
+ }
+
+ if (make_default)
+ eal_timer_source = EAL_TIMER_HPET;
+ return 0;
+}
+#endif
+
+static void
+check_tsc_flags(void)
+{
+ char line[512];
+ FILE *stream;
+
+ stream = fopen("/proc/cpuinfo", "r");
+ if (!stream) {
+ RTE_LOG(WARNING, EAL, "WARNING: Unable to open /proc/cpuinfo\n");
+ return;
+ }
+
+ while (fgets(line, sizeof line, stream)) {
+ char *constant_tsc;
+ char *nonstop_tsc;
+
+ if (strncmp(line, "flags", 5) != 0)
+ continue;
+
+ constant_tsc = strstr(line, "constant_tsc");
+ nonstop_tsc = strstr(line, "nonstop_tsc");
+ if (!constant_tsc || !nonstop_tsc)
+ RTE_LOG(WARNING, EAL,
+ "WARNING: cpu flags "
+ "constant_tsc=%s "
+ "nonstop_tsc=%s "
+ "-> using unreliable clock cycles !\n",
+ constant_tsc ? "yes":"no",
+ nonstop_tsc ? "yes":"no");
+ break;
+ }
+
+ fclose(stream);
+}
+
+static int
+set_tsc_freq_from_clock(void)
+{
+#if 0
+//CLOCK_MONOTONIC_RAW
+#define NS_PER_SEC 1E9
+
+ struct timespec sleeptime = {.tv_nsec = 5E8 }; /* 1/2 second */
+
+ struct timespec t_start, t_end;
+
+ if (clock_gettime(CLOCK_MONOTONIC_RAW, &t_start) == 0) {
+ uint64_t ns, end, start = rte_rdtsc();
+ nanosleep(&sleeptime,NULL);
+ clock_gettime(CLOCK_MONOTONIC_RAW, &t_end);
+ end = rte_rdtsc();
+ ns = ((t_end.tv_sec - t_start.tv_sec) * NS_PER_SEC);
+ ns += (t_end.tv_nsec - t_start.tv_nsec);
+
+ double secs = (double)ns/NS_PER_SEC;
+ eal_tsc_resolution_hz = (uint64_t)((end - start)/secs);
+ return 0;
+ }
+#endif
+ return -1;
+}
+
+static void
+set_tsc_freq_fallback(void)
+{
+ RTE_LOG(WARNING, EAL, "WARNING: clock_gettime cannot use "
+ "CLOCK_MONOTONIC_RAW and HPET is not available"
+ " - clock timings may be less accurate.\n");
+ /* assume that the sleep(1) will sleep for 1 second */
+ uint64_t start = rte_rdtsc();
+ sleep(1);
+ eal_tsc_resolution_hz = rte_rdtsc() - start;
+}
+/*
+ * This function measures the TSC frequency. It uses a variety of approaches.
+ *
+ * 1. If kernel provides CLOCK_MONOTONIC_RAW we use that to tune the TSC value
+ * 2. If kernel does not provide that, and we have HPET support, tune using HPET
+ * 3. Lastly, if neither of the above can be used, just sleep for 1 second and
+ * tune off that, printing a warning about inaccuracy of timing
+ */
+static void
+set_tsc_freq(void)
+{
+ if (set_tsc_freq_from_clock() < 0)
+ set_tsc_freq_fallback();
+
+ RTE_LOG(INFO, EAL, "TSC frequency is ~%"PRIu64" KHz\n",
+ eal_tsc_resolution_hz/1000);
+}
+
+int
+rte_eal_timer_init(void)
+{
+
+ eal_timer_source = EAL_TIMER_TSC;
+
+ set_tsc_freq();
+ check_tsc_flags();
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_vfio.h b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_vfio.h
new file mode 100755
index 00000000..03e693e0
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_vfio.h
@@ -0,0 +1,55 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef EAL_VFIO_H_
+#define EAL_VFIO_H_
+
+/*
+ * determine if VFIO is present on the system
+ */
+#ifdef RTE_EAL_VFIO
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
+#include <linux/vfio.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
+#define RTE_PCI_MSIX_TABLE_BIR 0x7
+#else
+#define RTE_PCI_MSIX_TABLE_BIR PCI_MSIX_TABLE_BIR
+#endif
+
+#define VFIO_PRESENT
+#endif /* kernel version */
+#endif /* RTE_EAL_VFIO */
+
+#endif /* EAL_VFIO_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_xen_memory.c b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_xen_memory.c
new file mode 100755
index 00000000..ee5cc2da
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/eal/eal_xen_memory.c
@@ -0,0 +1,370 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <string.h>
+#include <stdarg.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/queue.h>
+#include <sys/file.h>
+#include <unistd.h>
+#include <limits.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_common.h>
+#include <rte_string_fns.h>
+
+#include "eal_private.h"
+#include "eal_internal_cfg.h"
+#include "eal_filesystem.h"
+#include <exec-env/rte_dom0_common.h>
+
+#define PAGE_SIZE RTE_PGSIZE_4K
+#define DEFAUL_DOM0_NAME "dom0-mem"
+
+static int xen_fd = -1;
+static const char sys_dir_path[] = "/sys/kernel/mm/dom0-mm/memsize-mB";
+
+/*
+ * Try to mmap *size bytes in /dev/zero. If it is successful, return the
+ * pointer to the mmap'd area and keep *size unmodified. Else, retry
+ * with a smaller zone: decrease *size by mem_size until it reaches
+ * 0. In this case, return NULL. Note: this function returns an address
+ * which is a multiple of mem_size size.
+ */
+static void *
+xen_get_virtual_area(size_t *size, size_t mem_size)
+{
+ void *addr;
+ int fd;
+ long aligned_addr;
+
+ RTE_LOG(INFO, EAL, "Ask a virtual area of 0x%zu bytes\n", *size);
+
+ fd = open("/dev/zero", O_RDONLY);
+ if (fd < 0){
+ RTE_LOG(ERR, EAL, "Cannot open /dev/zero\n");
+ return NULL;
+ }
+ do {
+ addr = mmap(NULL, (*size) + mem_size, PROT_READ,
+ MAP_PRIVATE, fd, 0);
+ if (addr == MAP_FAILED)
+ *size -= mem_size;
+ } while (addr == MAP_FAILED && *size > 0);
+
+ if (addr == MAP_FAILED) {
+ close(fd);
+ RTE_LOG(INFO, EAL, "Cannot get a virtual area\n");
+ return NULL;
+ }
+
+ munmap(addr, (*size) + mem_size);
+ close(fd);
+
+ /* align addr to a mem_size boundary */
+ aligned_addr = (uintptr_t)addr;
+ aligned_addr = RTE_ALIGN_CEIL(aligned_addr, mem_size);
+ addr = (void *)(aligned_addr);
+
+ RTE_LOG(INFO, EAL, "Virtual area found at %p (size = 0x%zx)\n",
+ addr, *size);
+
+ return addr;
+}
+
+/**
+ * Get memory size configuration from /sys/devices/virtual/misc/dom0_mm
+ * /memsize-mB/memsize file, and the size unit is mB.
+ */
+static int
+get_xen_memory_size(void)
+{
+ char path[PATH_MAX];
+ unsigned long mem_size = 0;
+ static const char *file_name;
+
+ file_name = "memsize";
+ snprintf(path, sizeof(path), "%s/%s",
+ sys_dir_path, file_name);
+
+ if (eal_parse_sysfs_value(path, &mem_size) < 0)
+ return -1;
+
+ if (mem_size == 0)
+ rte_exit(EXIT_FAILURE,"XEN-DOM0:the %s/%s was not"
+ " configured.\n",sys_dir_path, file_name);
+ if (mem_size % 2)
+ rte_exit(EXIT_FAILURE,"XEN-DOM0:the %s/%s must be"
+ " even number.\n",sys_dir_path, file_name);
+
+ if (mem_size > DOM0_CONFIG_MEMSIZE)
+ rte_exit(EXIT_FAILURE,"XEN-DOM0:the %s/%s should not be larger"
+ " than %d mB\n",sys_dir_path, file_name, DOM0_CONFIG_MEMSIZE);
+
+ return mem_size;
+}
+
+/**
+ * Based on physical address to caculate MFN in Xen Dom0.
+ */
+phys_addr_t
+rte_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr)
+{
+ int mfn_id;
+ uint64_t mfn, mfn_offset;
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ struct rte_memseg *memseg = mcfg->memseg;
+
+ mfn_id = (phy_addr - memseg[memseg_id].phys_addr) / RTE_PGSIZE_2M;
+
+ /*the MFN is contiguous in 2M */
+ mfn_offset = (phy_addr - memseg[memseg_id].phys_addr) %
+ RTE_PGSIZE_2M / PAGE_SIZE;
+ mfn = mfn_offset + memseg[memseg_id].mfn[mfn_id];
+
+ /** return mechine address */
+ return (mfn * PAGE_SIZE + phy_addr % PAGE_SIZE);
+}
+
+int
+rte_xen_dom0_memory_init(void)
+{
+ void *vir_addr, *vma_addr = NULL;
+ int err, ret = 0;
+ uint32_t i, requested, mem_size, memseg_idx, num_memseg = 0;
+ size_t vma_len = 0;
+ struct memory_info meminfo;
+ struct memseg_info seginfo[RTE_MAX_MEMSEG];
+ int flags, page_size = getpagesize();
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ struct rte_memseg *memseg = mcfg->memseg;
+ uint64_t total_mem = internal_config.memory;
+
+ memset(seginfo, 0, sizeof(seginfo));
+ memset(&meminfo, 0, sizeof(struct memory_info));
+
+ mem_size = get_xen_memory_size();
+ requested = (unsigned) (total_mem / 0x100000);
+ if (requested > mem_size)
+ /* if we didn't satisfy total memory requirements */
+ rte_exit(EXIT_FAILURE,"Not enough memory available! Requested: %uMB,"
+ " available: %uMB\n", requested, mem_size);
+ else if (total_mem != 0)
+ mem_size = requested;
+
+ /* Check FD and open once */
+ if (xen_fd < 0) {
+ xen_fd = open(DOM0_MM_DEV, O_RDWR);
+ if (xen_fd < 0) {
+ RTE_LOG(ERR, EAL, "Can not open %s\n",DOM0_MM_DEV);
+ return -1;
+ }
+ }
+
+ meminfo.size = mem_size;
+
+ /* construct memory mangement name for Dom0 */
+ snprintf(meminfo.name, DOM0_NAME_MAX, "%s-%s",
+ internal_config.hugefile_prefix, DEFAUL_DOM0_NAME);
+
+ /* Notify kernel driver to allocate memory */
+ ret = ioctl(xen_fd, RTE_DOM0_IOCTL_PREPARE_MEMSEG, &meminfo);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "XEN DOM0:failed to get memory\n");
+ err = -EIO;
+ goto fail;
+ }
+
+ /* Get number of memory segment from driver */
+ ret = ioctl(xen_fd, RTE_DOM0_IOCTL_GET_NUM_MEMSEG, &num_memseg);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "XEN DOM0:failed to get memseg count.\n");
+ err = -EIO;
+ goto fail;
+ }
+
+ if(num_memseg > RTE_MAX_MEMSEG){
+ RTE_LOG(ERR, EAL, "XEN DOM0: the memseg count %d is greater"
+ " than max memseg %d.\n",num_memseg, RTE_MAX_MEMSEG);
+ err = -EIO;
+ goto fail;
+ }
+
+ /* get all memory segements information */
+ ret = ioctl(xen_fd, RTE_DOM0_IOCTL_GET_MEMSEG_INFO, seginfo);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "XEN DOM0:failed to get memseg info.\n");
+ err = -EIO;
+ goto fail;
+ }
+
+ /* map all memory segments to contiguous user space */
+ for (memseg_idx = 0; memseg_idx < num_memseg; memseg_idx++)
+ {
+ vma_len = seginfo[memseg_idx].size;
+
+ /**
+ * get the biggest virtual memory area up to vma_len. If it fails,
+ * vma_addr is NULL, so let the kernel provide the address.
+ */
+ vma_addr = xen_get_virtual_area(&vma_len, RTE_PGSIZE_2M);
+ if (vma_addr == NULL) {
+ flags = MAP_SHARED;
+ vma_len = RTE_PGSIZE_2M;
+ } else
+ flags = MAP_SHARED | MAP_FIXED;
+
+ seginfo[memseg_idx].size = vma_len;
+ vir_addr = mmap(vma_addr, seginfo[memseg_idx].size,
+ PROT_READ|PROT_WRITE, flags, xen_fd,
+ memseg_idx * page_size);
+ if (vir_addr == MAP_FAILED) {
+ RTE_LOG(ERR, EAL, "XEN DOM0:Could not mmap %s\n",
+ DOM0_MM_DEV);
+ err = -EIO;
+ goto fail;
+ }
+
+ memseg[memseg_idx].addr = vir_addr;
+ memseg[memseg_idx].phys_addr = page_size *
+ seginfo[memseg_idx].pfn ;
+ memseg[memseg_idx].len = seginfo[memseg_idx].size;
+ for ( i = 0; i < seginfo[memseg_idx].size / RTE_PGSIZE_2M; i++)
+ memseg[memseg_idx].mfn[i] = seginfo[memseg_idx].mfn[i];
+
+ /* MFNs are continuous in 2M, so assume that page size is 2M */
+ memseg[memseg_idx].hugepage_sz = RTE_PGSIZE_2M;
+
+ memseg[memseg_idx].nchannel = mcfg->nchannel;
+ memseg[memseg_idx].nrank = mcfg->nrank;
+
+ /* NUMA is not suppoted in Xen Dom0, so only set socket 0*/
+ memseg[memseg_idx].socket_id = 0;
+ }
+
+ return 0;
+fail:
+ if (xen_fd > 0) {
+ close(xen_fd);
+ xen_fd = -1;
+ }
+ return err;
+}
+
+/*
+ * This creates the memory mappings in the secondary process to match that of
+ * the server process. It goes through each memory segment in the DPDK runtime
+ * configuration, mapping them in order to form a contiguous block in the
+ * virtual memory space
+ */
+int
+rte_xen_dom0_memory_attach(void)
+{
+ const struct rte_mem_config *mcfg;
+ unsigned s = 0; /* s used to track the segment number */
+ int xen_fd = -1;
+ int ret = -1;
+ void *vir_addr;
+ char name[DOM0_NAME_MAX] = {0};
+ int page_size = getpagesize();
+
+ mcfg = rte_eal_get_configuration()->mem_config;
+
+ /* Check FD and open once */
+ if (xen_fd < 0) {
+ xen_fd = open(DOM0_MM_DEV, O_RDWR);
+ if (xen_fd < 0) {
+ RTE_LOG(ERR, EAL, "Can not open %s\n",DOM0_MM_DEV);
+ goto error;
+ }
+ }
+
+ /* construct memory mangement name for Dom0 */
+ snprintf(name, DOM0_NAME_MAX, "%s-%s",
+ internal_config.hugefile_prefix, DEFAUL_DOM0_NAME);
+ /* attach to memory segments of primary process */
+ ret = ioctl(xen_fd, RTE_DOM0_IOCTL_ATTACH_TO_MEMSEG, name);
+ if (ret) {
+ RTE_LOG(ERR, EAL,"attach memory segments fail.\n");
+ goto error;
+ }
+
+ /* map all segments into memory to make sure we get the addrs */
+ for (s = 0; s < RTE_MAX_MEMSEG; ++s) {
+
+ /*
+ * the first memory segment with len==0 is the one that
+ * follows the last valid segment.
+ */
+ if (mcfg->memseg[s].len == 0)
+ break;
+
+ vir_addr = mmap(mcfg->memseg[s].addr, mcfg->memseg[s].len,
+ PROT_READ|PROT_WRITE, MAP_SHARED|MAP_FIXED, xen_fd,
+ s * page_size);
+ if (vir_addr == MAP_FAILED) {
+ RTE_LOG(ERR, EAL, "Could not mmap %llu bytes "
+ "in %s to requested address [%p]\n",
+ (unsigned long long)mcfg->memseg[s].len, DOM0_MM_DEV,
+ mcfg->memseg[s].addr);
+ goto error;
+ }
+ }
+ return 0;
+
+error:
+ if (xen_fd >= 0) {
+ close(xen_fd);
+ xen_fd = -1;
+ }
+ return -1;
+}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/include/exec-env/rte_dom0_common.h b/src/dpdk_lib18/librte_eal/linuxapp/eal/include/exec-env/rte_dom0_common.h
new file mode 100755
index 00000000..d9707780
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/eal/include/exec-env/rte_dom0_common.h
@@ -0,0 +1,108 @@
+/*-
+ * This file is provided under a dual BSD/LGPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GNU LESSER GENERAL PUBLIC LICENSE
+ *
+ * Copyright(c) 2007-2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Contact Information:
+ * Intel Corporation
+ *
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _RTE_DOM0_COMMON_H_
+#define _RTE_DOM0_COMMON_H_
+
+#ifdef __KERNEL__
+#include <linux/if.h>
+#endif
+
+#define DOM0_NAME_MAX 256
+#define DOM0_MM_DEV "/dev/dom0_mm"
+
+#define DOM0_CONTIG_NUM_ORDER 9 /**< order of 2M */
+#define DOM0_NUM_MEMSEG 512 /**< Maximum nb. of memory segment. */
+#define DOM0_MEMBLOCK_SIZE 0x200000 /**< size of memory block(2M). */
+#define DOM0_CONFIG_MEMSIZE 4096 /**< Maximum config memory size(4G). */
+#define DOM0_NUM_MEMBLOCK (DOM0_CONFIG_MEMSIZE / 2) /**< Maximum nb. of 2M memory block. */
+
+#define RTE_DOM0_IOCTL_PREPARE_MEMSEG _IOWR(0, 1 , struct memory_info)
+#define RTE_DOM0_IOCTL_ATTACH_TO_MEMSEG _IOWR(0, 2 , char *)
+#define RTE_DOM0_IOCTL_GET_NUM_MEMSEG _IOWR(0, 3, int)
+#define RTE_DOM0_IOCTL_GET_MEMSEG_INFO _IOWR(0, 4, void *)
+
+/**
+ * A structure used to store memory information.
+ */
+struct memory_info {
+ char name[DOM0_NAME_MAX];
+ uint64_t size;
+};
+
+/**
+ * A structure used to store memory segment information.
+ */
+struct memseg_info {
+ uint32_t idx;
+ uint64_t pfn;
+ uint64_t size;
+ uint64_t mfn[DOM0_NUM_MEMBLOCK];
+};
+
+/**
+ * A structure used to store memory block information.
+ */
+struct memblock_info {
+ uint8_t exchange_flag;
+ uint8_t used;
+ uint64_t vir_addr;
+ uint64_t pfn;
+ uint64_t mfn;
+};
+#endif /* _RTE_DOM0_COMMON_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h b/src/dpdk_lib18/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h
new file mode 100755
index 00000000..23eafd96
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h
@@ -0,0 +1,58 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_INTERRUPTS_H_
+#error "don't include this file directly, please include generic <rte_interrupts.h>"
+#endif
+
+#ifndef _RTE_LINUXAPP_INTERRUPTS_H_
+#define _RTE_LINUXAPP_INTERRUPTS_H_
+
+enum rte_intr_handle_type {
+ RTE_INTR_HANDLE_UNKNOWN = 0,
+ RTE_INTR_HANDLE_UIO, /**< uio device handle */
+ RTE_INTR_HANDLE_VFIO_LEGACY, /**< vfio device handle (legacy) */
+ RTE_INTR_HANDLE_VFIO_MSI, /**< vfio device handle (MSI) */
+ RTE_INTR_HANDLE_VFIO_MSIX, /**< vfio device handle (MSIX) */
+ RTE_INTR_HANDLE_ALARM, /**< alarm handle */
+ RTE_INTR_HANDLE_MAX
+};
+
+/** Handle for interrupts. */
+struct rte_intr_handle {
+ int vfio_dev_fd; /**< VFIO device file descriptor */
+ int fd; /**< file descriptor */
+ enum rte_intr_handle_type type; /**< handle type */
+};
+
+#endif /* _RTE_LINUXAPP_INTERRUPTS_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h b/src/dpdk_lib18/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
new file mode 100755
index 00000000..1e55c2d9
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
@@ -0,0 +1,174 @@
+/*-
+ * This file is provided under a dual BSD/LGPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GNU LESSER GENERAL PUBLIC LICENSE
+ *
+ * Copyright(c) 2007-2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Contact Information:
+ * Intel Corporation
+ *
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _RTE_KNI_COMMON_H_
+#define _RTE_KNI_COMMON_H_
+
+#ifdef __KERNEL__
+#include <linux/if.h>
+#endif
+
+/**
+ * KNI name is part of memzone name.
+ */
+#define RTE_KNI_NAMESIZE 32
+
+#ifndef RTE_CACHE_LINE_SIZE
+#define RTE_CACHE_LINE_SIZE 64 /**< Cache line size. */
+#endif
+
+/*
+ * Request id.
+ */
+enum rte_kni_req_id {
+ RTE_KNI_REQ_UNKNOWN = 0,
+ RTE_KNI_REQ_CHANGE_MTU,
+ RTE_KNI_REQ_CFG_NETWORK_IF,
+ RTE_KNI_REQ_MAX,
+};
+
+/*
+ * Structure for KNI request.
+ */
+struct rte_kni_request {
+ uint32_t req_id; /**< Request id */
+ union {
+ uint32_t new_mtu; /**< New MTU */
+ uint8_t if_up; /**< 1: interface up, 0: interface down */
+ };
+ int32_t result; /**< Result for processing request */
+} __attribute__((__packed__));
+
+/*
+ * Fifo struct mapped in a shared memory. It describes a circular buffer FIFO
+ * Write and read should wrap around. Fifo is empty when write == read
+ * Writing should never overwrite the read position
+ */
+struct rte_kni_fifo {
+ volatile unsigned write; /**< Next position to be written*/
+ volatile unsigned read; /**< Next position to be read */
+ unsigned len; /**< Circular buffer length */
+ unsigned elem_size; /**< Pointer size - for 32/64 bit OS */
+ void * volatile buffer[0]; /**< The buffer contains mbuf pointers */
+};
+
+/*
+ * The kernel image of the rte_mbuf struct, with only the relevant fields.
+ * Padding is necessary to assure the offsets of these fields
+ */
+struct rte_kni_mbuf {
+ void *buf_addr __attribute__((__aligned__(RTE_CACHE_LINE_SIZE)));
+ char pad0[10];
+ uint16_t data_off; /**< Start address of data in segment buffer. */
+ char pad1[4];
+ uint64_t ol_flags; /**< Offload features. */
+ char pad2[2];
+ uint16_t data_len; /**< Amount of data in segment buffer. */
+ uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
+
+ /* fields on second cache line */
+ char pad3[8] __attribute__((__aligned__(RTE_CACHE_LINE_SIZE)));
+ void *pool;
+ void *next;
+};
+
+/*
+ * Struct used to create a KNI device. Passed to the kernel in IOCTL call
+ */
+
+struct rte_kni_device_info {
+ char name[RTE_KNI_NAMESIZE]; /**< Network device name for KNI */
+
+ phys_addr_t tx_phys;
+ phys_addr_t rx_phys;
+ phys_addr_t alloc_phys;
+ phys_addr_t free_phys;
+
+ /* Used by Ethtool */
+ phys_addr_t req_phys;
+ phys_addr_t resp_phys;
+ phys_addr_t sync_phys;
+ void * sync_va;
+
+ /* mbuf mempool */
+ void * mbuf_va;
+ phys_addr_t mbuf_phys;
+
+ /* PCI info */
+ uint16_t vendor_id; /**< Vendor ID or PCI_ANY_ID. */
+ uint16_t device_id; /**< Device ID or PCI_ANY_ID. */
+ uint8_t bus; /**< Device bus */
+ uint8_t devid; /**< Device ID */
+ uint8_t function; /**< Device function. */
+
+ uint16_t group_id; /**< Group ID */
+ uint32_t core_id; /**< core ID to bind for kernel thread */
+
+ uint8_t force_bind : 1; /**< Flag for kernel thread binding */
+
+ /* mbuf size */
+ unsigned mbuf_size;
+};
+
+#define KNI_DEVICE "kni"
+
+#define RTE_KNI_IOCTL_TEST _IOWR(0, 1, int)
+#define RTE_KNI_IOCTL_CREATE _IOWR(0, 2, struct rte_kni_device_info)
+#define RTE_KNI_IOCTL_RELEASE _IOWR(0, 3, struct rte_kni_device_info)
+
+#endif /* _RTE_KNI_COMMON_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/igb_uio/Makefile b/src/dpdk_lib18/librte_eal/linuxapp/igb_uio/Makefile
new file mode 100755
index 00000000..ec6e702f
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/igb_uio/Makefile
@@ -0,0 +1,53 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# module name and path
+#
+MODULE = igb_uio
+MODULE_PATH = drivers/net/igb_uio
+
+#
+# CFLAGS
+#
+MODULE_CFLAGS += -I$(SRCDIR) --param max-inline-insns-single=100
+MODULE_CFLAGS += -I$(RTE_OUTPUT)/include
+MODULE_CFLAGS += -Winline -Wall -Werror
+MODULE_CFLAGS += -include $(RTE_OUTPUT)/include/rte_config.h
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-y := igb_uio.c
+
+include $(RTE_SDK)/mk/rte.module.mk
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/igb_uio/compat.h b/src/dpdk_lib18/librte_eal/linuxapp/igb_uio/compat.h
new file mode 100755
index 00000000..c1d45a66
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/igb_uio/compat.h
@@ -0,0 +1,116 @@
+/*
+ * Minimal wrappers to allow compiling igb_uio on older kernels.
+ */
+
+#ifndef RHEL_RELEASE_VERSION
+#define RHEL_RELEASE_VERSION(a, b) (((a) << 8) + (b))
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
+#define pci_cfg_access_lock pci_block_user_cfg_access
+#define pci_cfg_access_unlock pci_unblock_user_cfg_access
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)
+#define HAVE_PTE_MASK_PAGE_IOMAP
+#endif
+
+#ifndef PCI_MSIX_ENTRY_SIZE
+#define PCI_MSIX_ENTRY_SIZE 16
+#define PCI_MSIX_ENTRY_LOWER_ADDR 0
+#define PCI_MSIX_ENTRY_UPPER_ADDR 4
+#define PCI_MSIX_ENTRY_DATA 8
+#define PCI_MSIX_ENTRY_VECTOR_CTRL 12
+#define PCI_MSIX_ENTRY_CTRL_MASKBIT 1
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 34) && \
+ (!(defined(RHEL_RELEASE_CODE) && \
+ RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 9)))
+
+static int pci_num_vf(struct pci_dev *dev)
+{
+ struct iov {
+ int pos;
+ int nres;
+ u32 cap;
+ u16 ctrl;
+ u16 total;
+ u16 initial;
+ u16 nr_virtfn;
+ } *iov = (struct iov *)dev->sriov;
+
+ if (!dev->is_physfn)
+ return 0;
+
+ return iov->nr_virtfn;
+}
+
+#endif /* < 2.6.34 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) && \
+ (!(defined(RHEL_RELEASE_CODE) && \
+ RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4)))
+
+#define kstrtoul strict_strtoul
+
+#endif /* < 2.6.39 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0) && \
+ (!(defined(RHEL_RELEASE_CODE) && \
+ RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 3)))
+
+/* Check if INTX works to control irq's.
+ * Set's INTX_DISABLE flag and reads it back
+ */
+static bool pci_intx_mask_supported(struct pci_dev *pdev)
+{
+ bool mask_supported = false;
+ uint16_t orig, new;
+
+ pci_block_user_cfg_access(pdev);
+ pci_read_config_word(pdev, PCI_COMMAND, &orig);
+ pci_write_config_word(pdev, PCI_COMMAND,
+ orig ^ PCI_COMMAND_INTX_DISABLE);
+ pci_read_config_word(pdev, PCI_COMMAND, &new);
+
+ if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
+ dev_err(&pdev->dev, "Command register changed from "
+ "0x%x to 0x%x: driver or hardware bug?\n", orig, new);
+ } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
+ mask_supported = true;
+ pci_write_config_word(pdev, PCI_COMMAND, orig);
+ }
+ pci_unblock_user_cfg_access(pdev);
+
+ return mask_supported;
+}
+
+static bool pci_check_and_mask_intx(struct pci_dev *pdev)
+{
+ bool pending;
+ uint32_t status;
+
+ pci_block_user_cfg_access(pdev);
+ pci_read_config_dword(pdev, PCI_COMMAND, &status);
+
+ /* interrupt is not ours, goes to out */
+ pending = (((status >> 16) & PCI_STATUS_INTERRUPT) != 0);
+ if (pending) {
+ uint16_t old, new;
+
+ old = status;
+ if (status != 0)
+ new = old & (~PCI_COMMAND_INTX_DISABLE);
+ else
+ new = old | PCI_COMMAND_INTX_DISABLE;
+
+ if (old != new)
+ pci_write_config_word(pdev, PCI_COMMAND, new);
+ }
+ pci_unblock_user_cfg_access(pdev);
+
+ return pending;
+}
+
+#endif /* < 3.3.0 */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/igb_uio/igb_uio.c b/src/dpdk_lib18/librte_eal/linuxapp/igb_uio/igb_uio.c
new file mode 100755
index 00000000..ba1364b7
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/igb_uio/igb_uio.c
@@ -0,0 +1,643 @@
+/*-
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/uio_driver.h>
+#include <linux/io.h>
+#include <linux/msi.h>
+#include <linux/version.h>
+
+#ifdef CONFIG_XEN_DOM0
+#include <xen/xen.h>
+#endif
+#include <rte_pci_dev_features.h>
+
+#include "compat.h"
+
+#ifdef RTE_PCI_CONFIG
+#define PCI_SYS_FILE_BUF_SIZE 10
+#define PCI_DEV_CAP_REG 0xA4
+#define PCI_DEV_CTRL_REG 0xA8
+#define PCI_DEV_CAP_EXT_TAG_MASK 0x20
+#define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
+#define PCI_DEV_CTRL_EXT_TAG_MASK (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
+#endif
+
+/**
+ * A structure describing the private information for a uio device.
+ */
+struct rte_uio_pci_dev {
+ struct uio_info info;
+ struct pci_dev *pdev;
+ enum rte_intr_mode mode;
+};
+
+static char *intr_mode = NULL;
+static enum rte_intr_mode igbuio_intr_mode_preferred = RTE_INTR_MODE_MSIX;
+
+static inline struct rte_uio_pci_dev *
+igbuio_get_uio_pci_dev(struct uio_info *info)
+{
+ return container_of(info, struct rte_uio_pci_dev, info);
+}
+
+/* sriov sysfs */
+static ssize_t
+show_max_vfs(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, 10, "%u\n",
+ pci_num_vf(container_of(dev, struct pci_dev, dev)));
+}
+
+static ssize_t
+store_max_vfs(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err = 0;
+ unsigned long max_vfs;
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+
+ if (0 != kstrtoul(buf, 0, &max_vfs))
+ return -EINVAL;
+
+ if (0 == max_vfs)
+ pci_disable_sriov(pdev);
+ else if (0 == pci_num_vf(pdev))
+ err = pci_enable_sriov(pdev, max_vfs);
+ else /* do nothing if change max_vfs number */
+ err = -EINVAL;
+
+ return err ? err : count;
+}
+
+#ifdef RTE_PCI_CONFIG
+static ssize_t
+show_extended_tag(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pci_dev = container_of(dev, struct pci_dev, dev);
+ uint32_t val = 0;
+
+ pci_read_config_dword(pci_dev, PCI_DEV_CAP_REG, &val);
+ if (!(val & PCI_DEV_CAP_EXT_TAG_MASK)) /* Not supported */
+ return snprintf(buf, PCI_SYS_FILE_BUF_SIZE, "%s\n", "invalid");
+
+ val = 0;
+ pci_bus_read_config_dword(pci_dev->bus, pci_dev->devfn,
+ PCI_DEV_CTRL_REG, &val);
+
+ return snprintf(buf, PCI_SYS_FILE_BUF_SIZE, "%s\n",
+ (val & PCI_DEV_CTRL_EXT_TAG_MASK) ? "on" : "off");
+}
+
+static ssize_t
+store_extended_tag(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct pci_dev *pci_dev = container_of(dev, struct pci_dev, dev);
+ uint32_t val = 0, enable;
+
+ if (strncmp(buf, "on", 2) == 0)
+ enable = 1;
+ else if (strncmp(buf, "off", 3) == 0)
+ enable = 0;
+ else
+ return -EINVAL;
+
+ pci_cfg_access_lock(pci_dev);
+ pci_bus_read_config_dword(pci_dev->bus, pci_dev->devfn,
+ PCI_DEV_CAP_REG, &val);
+ if (!(val & PCI_DEV_CAP_EXT_TAG_MASK)) { /* Not supported */
+ pci_cfg_access_unlock(pci_dev);
+ return -EPERM;
+ }
+
+ val = 0;
+ pci_bus_read_config_dword(pci_dev->bus, pci_dev->devfn,
+ PCI_DEV_CTRL_REG, &val);
+ if (enable)
+ val |= PCI_DEV_CTRL_EXT_TAG_MASK;
+ else
+ val &= ~PCI_DEV_CTRL_EXT_TAG_MASK;
+ pci_bus_write_config_dword(pci_dev->bus, pci_dev->devfn,
+ PCI_DEV_CTRL_REG, val);
+ pci_cfg_access_unlock(pci_dev);
+
+ return count;
+}
+
+static ssize_t
+show_max_read_request_size(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pci_dev = container_of(dev, struct pci_dev, dev);
+ int val = pcie_get_readrq(pci_dev);
+
+ return snprintf(buf, PCI_SYS_FILE_BUF_SIZE, "%d\n", val);
+}
+
+static ssize_t
+store_max_read_request_size(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct pci_dev *pci_dev = container_of(dev, struct pci_dev, dev);
+ unsigned long size = 0;
+ int ret;
+
+ if (0 != kstrtoul(buf, 0, &size))
+ return -EINVAL;
+
+ ret = pcie_set_readrq(pci_dev, (int)size);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+#endif
+
+static DEVICE_ATTR(max_vfs, S_IRUGO | S_IWUSR, show_max_vfs, store_max_vfs);
+#ifdef RTE_PCI_CONFIG
+static DEVICE_ATTR(extended_tag, S_IRUGO | S_IWUSR, show_extended_tag,
+ store_extended_tag);
+static DEVICE_ATTR(max_read_request_size, S_IRUGO | S_IWUSR,
+ show_max_read_request_size, store_max_read_request_size);
+#endif
+
+static struct attribute *dev_attrs[] = {
+ &dev_attr_max_vfs.attr,
+#ifdef RTE_PCI_CONFIG
+ &dev_attr_extended_tag.attr,
+ &dev_attr_max_read_request_size.attr,
+#endif
+ NULL,
+};
+
+static const struct attribute_group dev_attr_grp = {
+ .attrs = dev_attrs,
+};
+/*
+ * It masks the msix on/off of generating MSI-X messages.
+ */
+static void
+igbuio_msix_mask_irq(struct msi_desc *desc, int32_t state)
+{
+ u32 mask_bits = desc->masked;
+ unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_VECTOR_CTRL;
+
+ if (state != 0)
+ mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
+ else
+ mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
+
+ if (mask_bits != desc->masked) {
+ writel(mask_bits, desc->mask_base + offset);
+ readl(desc->mask_base);
+ desc->masked = mask_bits;
+ }
+}
+
+/**
+ * This is the irqcontrol callback to be registered to uio_info.
+ * It can be used to disable/enable interrupt from user space processes.
+ *
+ * @param info
+ * pointer to uio_info.
+ * @param irq_state
+ * state value. 1 to enable interrupt, 0 to disable interrupt.
+ *
+ * @return
+ * - On success, 0.
+ * - On failure, a negative value.
+ */
+static int
+igbuio_pci_irqcontrol(struct uio_info *info, s32 irq_state)
+{
+ struct rte_uio_pci_dev *udev = igbuio_get_uio_pci_dev(info);
+ struct pci_dev *pdev = udev->pdev;
+
+ pci_cfg_access_lock(pdev);
+ if (udev->mode == RTE_INTR_MODE_LEGACY)
+ pci_intx(pdev, !!irq_state);
+
+ else if (udev->mode == RTE_INTR_MODE_MSIX) {
+ struct msi_desc *desc;
+
+ list_for_each_entry(desc, &pdev->msi_list, list)
+ igbuio_msix_mask_irq(desc, irq_state);
+ }
+ pci_cfg_access_unlock(pdev);
+
+ return 0;
+}
+
+/**
+ * This is interrupt handler which will check if the interrupt is for the right device.
+ * If yes, disable it here and will be enable later.
+ */
+static irqreturn_t
+igbuio_pci_irqhandler(int irq, struct uio_info *info)
+{
+ struct rte_uio_pci_dev *udev = igbuio_get_uio_pci_dev(info);
+
+ /* Legacy mode need to mask in hardware */
+ if (udev->mode == RTE_INTR_MODE_LEGACY &&
+ !pci_check_and_mask_intx(udev->pdev))
+ return IRQ_NONE;
+
+ /* Message signal mode, no share IRQ and automasked */
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_XEN_DOM0
+static int
+igbuio_dom0_mmap_phys(struct uio_info *info, struct vm_area_struct *vma)
+{
+ int idx;
+
+ idx = (int)vma->vm_pgoff;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+#ifdef HAVE_PTE_MASK_PAGE_IOMAP
+ vma->vm_page_prot.pgprot |= _PAGE_IOMAP;
+#endif
+
+ return remap_pfn_range(vma,
+ vma->vm_start,
+ info->mem[idx].addr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+
+/**
+ * This is uio device mmap method which will use igbuio mmap for Xen
+ * Dom0 environment.
+ */
+static int
+igbuio_dom0_pci_mmap(struct uio_info *info, struct vm_area_struct *vma)
+{
+ int idx;
+
+ if (vma->vm_pgoff >= MAX_UIO_MAPS)
+ return -EINVAL;
+
+ if (info->mem[vma->vm_pgoff].size == 0)
+ return -EINVAL;
+
+ idx = (int)vma->vm_pgoff;
+ switch (info->mem[idx].memtype) {
+ case UIO_MEM_PHYS:
+ return igbuio_dom0_mmap_phys(info, vma);
+ case UIO_MEM_LOGICAL:
+ case UIO_MEM_VIRTUAL:
+ default:
+ return -EINVAL;
+ }
+}
+#endif
+
+/* Remap pci resources described by bar #pci_bar in uio resource n. */
+static int
+igbuio_pci_setup_iomem(struct pci_dev *dev, struct uio_info *info,
+ int n, int pci_bar, const char *name)
+{
+ unsigned long addr, len;
+ void *internal_addr;
+
+ if (sizeof(info->mem) / sizeof(info->mem[0]) <= n)
+ return -EINVAL;
+
+ addr = pci_resource_start(dev, pci_bar);
+ len = pci_resource_len(dev, pci_bar);
+ if (addr == 0 || len == 0)
+ return -1;
+ internal_addr = ioremap(addr, len);
+ if (internal_addr == NULL)
+ return -1;
+ info->mem[n].name = name;
+ info->mem[n].addr = addr;
+ info->mem[n].internal_addr = internal_addr;
+ info->mem[n].size = len;
+ info->mem[n].memtype = UIO_MEM_PHYS;
+ return 0;
+}
+
+/* Get pci port io resources described by bar #pci_bar in uio resource n. */
+static int
+igbuio_pci_setup_ioport(struct pci_dev *dev, struct uio_info *info,
+ int n, int pci_bar, const char *name)
+{
+ unsigned long addr, len;
+
+ if (sizeof(info->port) / sizeof(info->port[0]) <= n)
+ return -EINVAL;
+
+ addr = pci_resource_start(dev, pci_bar);
+ len = pci_resource_len(dev, pci_bar);
+ if (addr == 0 || len == 0)
+ return -EINVAL;
+
+ info->port[n].name = name;
+ info->port[n].start = addr;
+ info->port[n].size = len;
+ info->port[n].porttype = UIO_PORT_X86;
+
+ return 0;
+}
+
+/* Unmap previously ioremap'd resources */
+static void
+igbuio_pci_release_iomem(struct uio_info *info)
+{
+ int i;
+
+ for (i = 0; i < MAX_UIO_MAPS; i++) {
+ if (info->mem[i].internal_addr)
+ iounmap(info->mem[i].internal_addr);
+ }
+}
+
+static int
+igbuio_setup_bars(struct pci_dev *dev, struct uio_info *info)
+{
+ int i, iom, iop, ret;
+ unsigned long flags;
+ static const char *bar_names[PCI_STD_RESOURCE_END + 1] = {
+ "BAR0",
+ "BAR1",
+ "BAR2",
+ "BAR3",
+ "BAR4",
+ "BAR5",
+ };
+
+ iom = 0;
+ iop = 0;
+
+ for (i = 0; i != sizeof(bar_names) / sizeof(bar_names[0]); i++) {
+ if (pci_resource_len(dev, i) != 0 &&
+ pci_resource_start(dev, i) != 0) {
+ flags = pci_resource_flags(dev, i);
+ if (flags & IORESOURCE_MEM) {
+ ret = igbuio_pci_setup_iomem(dev, info, iom,
+ i, bar_names[i]);
+ if (ret != 0)
+ return ret;
+ iom++;
+ } else if (flags & IORESOURCE_IO) {
+ ret = igbuio_pci_setup_ioport(dev, info, iop,
+ i, bar_names[i]);
+ if (ret != 0)
+ return ret;
+ iop++;
+ }
+ }
+ }
+
+ return (iom != 0) ? ret : -ENOENT;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
+static int __devinit
+#else
+static int
+#endif
+igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ struct rte_uio_pci_dev *udev;
+ struct msix_entry msix_entry;
+ int err;
+
+ udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL);
+ if (!udev)
+ return -ENOMEM;
+
+ /*
+ * enable device: ask low-level code to enable I/O and
+ * memory
+ */
+ err = pci_enable_device(dev);
+ if (err != 0) {
+ dev_err(&dev->dev, "Cannot enable PCI device\n");
+ goto fail_free;
+ }
+
+ /*
+ * reserve device's PCI memory regions for use by this
+ * module
+ */
+ err = pci_request_regions(dev, "igb_uio");
+ if (err != 0) {
+ dev_err(&dev->dev, "Cannot request regions\n");
+ goto fail_disable;
+ }
+
+ /* enable bus mastering on the device */
+ pci_set_master(dev);
+
+ /* remap IO memory */
+ err = igbuio_setup_bars(dev, &udev->info);
+ if (err != 0)
+ goto fail_release_iomem;
+
+ /* set 64-bit DMA mask */
+ err = pci_set_dma_mask(dev, DMA_BIT_MASK(64));
+ if (err != 0) {
+ dev_err(&dev->dev, "Cannot set DMA mask\n");
+ goto fail_release_iomem;
+ }
+
+ err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64));
+ if (err != 0) {
+ dev_err(&dev->dev, "Cannot set consistent DMA mask\n");
+ goto fail_release_iomem;
+ }
+
+ /* fill uio infos */
+ udev->info.name = "igb_uio";
+ udev->info.version = "0.1";
+ udev->info.handler = igbuio_pci_irqhandler;
+ udev->info.irqcontrol = igbuio_pci_irqcontrol;
+#ifdef CONFIG_XEN_DOM0
+ /* check if the driver run on Xen Dom0 */
+ if (xen_initial_domain())
+ udev->info.mmap = igbuio_dom0_pci_mmap;
+#endif
+ udev->info.priv = udev;
+ udev->pdev = dev;
+
+ switch (igbuio_intr_mode_preferred) {
+ case RTE_INTR_MODE_MSIX:
+ /* Only 1 msi-x vector needed */
+ msix_entry.entry = 0;
+ if (pci_enable_msix(dev, &msix_entry, 1) == 0) {
+ dev_dbg(&dev->dev, "using MSI-X");
+ udev->info.irq = msix_entry.vector;
+ udev->mode = RTE_INTR_MODE_MSIX;
+ break;
+ }
+ /* fall back to INTX */
+ case RTE_INTR_MODE_LEGACY:
+ if (pci_intx_mask_supported(dev)) {
+ dev_dbg(&dev->dev, "using INTX");
+ udev->info.irq_flags = IRQF_SHARED;
+ udev->info.irq = dev->irq;
+ udev->mode = RTE_INTR_MODE_LEGACY;
+ break;
+ }
+ dev_notice(&dev->dev, "PCI INTX mask not supported\n");
+ /* fall back to no IRQ */
+ case RTE_INTR_MODE_NONE:
+ udev->mode = RTE_INTR_MODE_NONE;
+ udev->info.irq = 0;
+ break;
+
+ default:
+ dev_err(&dev->dev, "invalid IRQ mode %u",
+ igbuio_intr_mode_preferred);
+ err = -EINVAL;
+ goto fail_release_iomem;
+ }
+
+ err = sysfs_create_group(&dev->dev.kobj, &dev_attr_grp);
+ if (err != 0)
+ goto fail_release_iomem;
+
+ /* register uio driver */
+ err = uio_register_device(&dev->dev, &udev->info);
+ if (err != 0)
+ goto fail_remove_group;
+
+ pci_set_drvdata(dev, udev);
+
+ dev_info(&dev->dev, "uio device registered with irq %lx\n",
+ udev->info.irq);
+
+ return 0;
+
+fail_remove_group:
+ sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
+fail_release_iomem:
+ igbuio_pci_release_iomem(&udev->info);
+ if (udev->mode == RTE_INTR_MODE_MSIX)
+ pci_disable_msix(udev->pdev);
+ pci_release_regions(dev);
+fail_disable:
+ pci_disable_device(dev);
+fail_free:
+ kfree(udev);
+
+ return err;
+}
+
+static void
+igbuio_pci_remove(struct pci_dev *dev)
+{
+ struct uio_info *info = pci_get_drvdata(dev);
+ struct rte_uio_pci_dev *udev = igbuio_get_uio_pci_dev(info);
+
+ if (info->priv == NULL) {
+ pr_notice("Not igbuio device\n");
+ return;
+ }
+
+ sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
+ uio_unregister_device(info);
+ igbuio_pci_release_iomem(info);
+ if (udev->mode == RTE_INTR_MODE_MSIX)
+ pci_disable_msix(dev);
+ pci_release_regions(dev);
+ pci_disable_device(dev);
+ pci_set_drvdata(dev, NULL);
+ kfree(info);
+}
+
+static int
+igbuio_config_intr_mode(char *intr_str)
+{
+ if (!intr_str) {
+ pr_info("Use MSIX interrupt by default\n");
+ return 0;
+ }
+
+ if (!strcmp(intr_str, RTE_INTR_MODE_MSIX_NAME)) {
+ igbuio_intr_mode_preferred = RTE_INTR_MODE_MSIX;
+ pr_info("Use MSIX interrupt\n");
+ } else if (!strcmp(intr_str, RTE_INTR_MODE_LEGACY_NAME)) {
+ igbuio_intr_mode_preferred = RTE_INTR_MODE_LEGACY;
+ pr_info("Use legacy interrupt\n");
+ } else {
+ pr_info("Error: bad parameter - %s\n", intr_str);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct pci_driver igbuio_pci_driver = {
+ .name = "igb_uio",
+ .id_table = NULL,
+ .probe = igbuio_pci_probe,
+ .remove = igbuio_pci_remove,
+};
+
+static int __init
+igbuio_pci_init_module(void)
+{
+ int ret;
+
+ ret = igbuio_config_intr_mode(intr_mode);
+ if (ret < 0)
+ return ret;
+
+ return pci_register_driver(&igbuio_pci_driver);
+}
+
+static void __exit
+igbuio_pci_exit_module(void)
+{
+ pci_unregister_driver(&igbuio_pci_driver);
+}
+
+module_init(igbuio_pci_init_module);
+module_exit(igbuio_pci_exit_module);
+
+module_param(intr_mode, charp, S_IRUGO);
+MODULE_PARM_DESC(intr_mode,
+"igb_uio interrupt mode (default=msix):\n"
+" " RTE_INTR_MODE_MSIX_NAME " Use MSIX interrupt\n"
+" " RTE_INTR_MODE_LEGACY_NAME " Use Legacy interrupt\n"
+"\n");
+
+MODULE_DESCRIPTION("UIO driver for Intel IGB PCI cards");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Intel Corporation");
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/Makefile b/src/dpdk_lib18/librte_eal/linuxapp/kni/Makefile
new file mode 100755
index 00000000..fb673d93
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/Makefile
@@ -0,0 +1,93 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# module name and path
+#
+MODULE = rte_kni
+
+#
+# CFLAGS
+#
+MODULE_CFLAGS += -I$(SRCDIR) --param max-inline-insns-single=50
+MODULE_CFLAGS += -I$(RTE_OUTPUT)/include -I$(SRCDIR)/ethtool/ixgbe -I$(SRCDIR)/ethtool/igb
+MODULE_CFLAGS += -include $(RTE_OUTPUT)/include/rte_config.h
+MODULE_CFLAGS += -Wall -Werror
+
+ifeq ($(shell test -f /proc/version_signature && lsb_release -si 2>/dev/null),Ubuntu)
+MODULE_CFLAGS += -DUBUNTU_RELEASE_CODE=$(shell lsb_release -sr | tr -d .)
+UBUNTU_KERNEL_CODE := $(shell cut -d' ' -f2 /proc/version_signature | \
+ cut -d'~' -f1 | cut -d- -f1,2 | tr .- $(comma))
+MODULE_CFLAGS += -D"UBUNTU_KERNEL_CODE=UBUNTU_KERNEL_VERSION($(UBUNTU_KERNEL_CODE))"
+endif
+
+# this lib needs main eal
+DEPDIRS-y += lib/librte_eal/linuxapp/eal
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-y := ethtool/ixgbe/ixgbe_main.c
+SRCS-y += ethtool/ixgbe/ixgbe_api.c
+SRCS-y += ethtool/ixgbe/ixgbe_common.c
+SRCS-y += ethtool/ixgbe/ixgbe_ethtool.c
+SRCS-y += ethtool/ixgbe/ixgbe_82599.c
+SRCS-y += ethtool/ixgbe/ixgbe_82598.c
+SRCS-y += ethtool/ixgbe/ixgbe_x540.c
+SRCS-y += ethtool/ixgbe/ixgbe_phy.c
+SRCS-y += ethtool/ixgbe/kcompat.c
+
+SRCS-y += ethtool/igb/e1000_82575.c
+SRCS-y += ethtool/igb/e1000_i210.c
+SRCS-y += ethtool/igb/e1000_api.c
+SRCS-y += ethtool/igb/e1000_mac.c
+SRCS-y += ethtool/igb/e1000_manage.c
+SRCS-y += ethtool/igb/e1000_mbx.c
+SRCS-y += ethtool/igb/e1000_nvm.c
+SRCS-y += ethtool/igb/e1000_phy.c
+SRCS-y += ethtool/igb/igb_ethtool.c
+SRCS-y += ethtool/igb/igb_hwmon.c
+SRCS-y += ethtool/igb/igb_main.c
+SRCS-y += ethtool/igb/igb_debugfs.c
+SRCS-y += ethtool/igb/igb_param.c
+SRCS-y += ethtool/igb/igb_procfs.c
+SRCS-y += ethtool/igb/igb_vmdq.c
+#SRCS-y += ethtool/igb/igb_ptp.c
+#SRCS-y += ethtool/igb/kcompat.c
+
+SRCS-y += kni_misc.c
+SRCS-y += kni_net.c
+SRCS-y += kni_ethtool.c
+SRCS-$(CONFIG_RTE_KNI_VHOST) += kni_vhost.c
+
+include $(RTE_SDK)/mk/rte.module.mk
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/compat.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/compat.h
new file mode 100755
index 00000000..13135236
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/compat.h
@@ -0,0 +1,21 @@
+/*
+ * Minimal wrappers to allow compiling kni on older kernels.
+ */
+
+#ifndef RHEL_RELEASE_VERSION
+#define RHEL_RELEASE_VERSION(a, b) (((a) << 8) + (b))
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) && \
+ (!(defined(RHEL_RELEASE_CODE) && \
+ RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4)))
+
+#define kstrtoul strict_strtoul
+
+#endif /* < 2.6.39 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+
+#define sk_sleep(s) (s)->sk_sleep
+
+#endif /* < 2.6.35 */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/README b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/README
new file mode 100755
index 00000000..2cfefe72
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/README
@@ -0,0 +1,100 @@
+..
+ BSD LICENSE
+
+ Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Description
+
+In order to support ethtool in Kernel NIC Interface, the standard Linux kernel
+drivers of ixgbe/igb are needed to be reused here. ixgbe-3.9.17 is the version
+modified from in kernel NIC interface kernel module to support ixgbe NIC, and
+igb-3.4.8 is the version modified from in kernel NIC interface kernel module to
+support igb NIC.
+
+The source code package of ixgbe can be downloaded from sourceforge.net as below.
+http://sourceforge.net/projects/e1000/files/ixgbe%20stable/
+Below source files are copied or modified from ixgbe.
+
+ixgbe_82598.h
+ixgbe_82599.c
+ixgbe_82599.h
+ixgbe_api.c
+ixgbe_api.h
+ixgbe_common.c
+ixgbe_common.h
+ixgbe_dcb.h
+ixgbe_ethtool.c
+ixgbe_fcoe.h
+ixgbe.h
+ixgbe_main.c
+ixgbe_mbx.h
+ixgbe_osdep.h
+ixgbe_phy.c
+ixgbe_phy.h
+ixgbe_sriov.h
+ixgbe_type.h
+kcompat.c
+kcompat.h
+
+The source code package of igb can be downloaded from sourceforge.net as below.
+http://sourceforge.net/projects/e1000/files/igb%20stable/
+Below source files are copied or modified from igb.
+
+e1000_82575.c
+e1000_82575.h
+e1000_api.c
+e1000_api.h
+e1000_defines.h
+e1000_hw.h
+e1000_mac.c
+e1000_mac.h
+e1000_manage.c
+e1000_manage.h
+e1000_mbx.c
+e1000_mbx.h
+e1000_nvm.c
+e1000_nvm.h
+e1000_osdep.h
+e1000_phy.c
+e1000_phy.h
+e1000_regs.h
+igb_ethtool.c
+igb.h
+igb_main.c
+igb_param.c
+igb_procfs.c
+igb_regtest.h
+igb_sysfs.c
+igb_vmdq.c
+igb_vmdq.h
+kcompat.c
+kcompat_ethtool.c
+kcompat.h
+
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/COPYING b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/COPYING
new file mode 100755
index 00000000..5f297e5b
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/COPYING
@@ -0,0 +1,339 @@
+
+"This software program is licensed subject to the GNU General Public License
+(GPL). Version 2, June 1991, available at
+<http://www.fsf.org/copyleft/gpl.html>"
+
+GNU General Public License
+
+Version 2, June 1991
+
+Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+
+Everyone is permitted to copy and distribute verbatim copies of this license
+document, but changing it is not allowed.
+
+Preamble
+
+The licenses for most software are designed to take away your freedom to
+share and change it. By contrast, the GNU General Public License is intended
+to guarantee your freedom to share and change free software--to make sure
+the software is free for all its users. This General Public License applies
+to most of the Free Software Foundation's software and to any other program
+whose authors commit to using it. (Some other Free Software Foundation
+software is covered by the GNU Library General Public License instead.) You
+can apply it to your programs, too.
+
+When we speak of free software, we are referring to freedom, not price. Our
+General Public Licenses are designed to make sure that you have the freedom
+to distribute copies of free software (and charge for this service if you
+wish), that you receive source code or can get it if you want it, that you
+can change the software or use pieces of it in new free programs; and that
+you know you can do these things.
+
+To protect your rights, we need to make restrictions that forbid anyone to
+deny you these rights or to ask you to surrender the rights. These
+restrictions translate to certain responsibilities for you if you distribute
+copies of the software, or if you modify it.
+
+For example, if you distribute copies of such a program, whether gratis or
+for a fee, you must give the recipients all the rights that you have. You
+must make sure that they, too, receive or can get the source code. And you
+must show them these terms so they know their rights.
+
+We protect your rights with two steps: (1) copyright the software, and (2)
+offer you this license which gives you legal permission to copy, distribute
+and/or modify the software.
+
+Also, for each author's protection and ours, we want to make certain that
+everyone understands that there is no warranty for this free software. If
+the software is modified by someone else and passed on, we want its
+recipients to know that what they have is not the original, so that any
+problems introduced by others will not reflect on the original authors'
+reputations.
+
+Finally, any free program is threatened constantly by software patents. We
+wish to avoid the danger that redistributors of a free program will
+individually obtain patent licenses, in effect making the program
+proprietary. To prevent this, we have made it clear that any patent must be
+licensed for everyone's free use or not licensed at all.
+
+The precise terms and conditions for copying, distribution and modification
+follow.
+
+TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+0. This License applies to any program or other work which contains a notice
+ placed by the copyright holder saying it may be distributed under the
+ terms of this General Public License. The "Program", below, refers to any
+ such program or work, and a "work based on the Program" means either the
+ Program or any derivative work under copyright law: that is to say, a
+ work containing the Program or a portion of it, either verbatim or with
+ modifications and/or translated into another language. (Hereinafter,
+ translation is included without limitation in the term "modification".)
+ Each licensee is addressed as "you".
+
+ Activities other than copying, distribution and modification are not
+ covered by this License; they are outside its scope. The act of running
+ the Program is not restricted, and the output from the Program is covered
+ only if its contents constitute a work based on the Program (independent
+ of having been made by running the Program). Whether that is true depends
+ on what the Program does.
+
+1. You may copy and distribute verbatim copies of the Program's source code
+ as you receive it, in any medium, provided that you conspicuously and
+ appropriately publish on each copy an appropriate copyright notice and
+ disclaimer of warranty; keep intact all the notices that refer to this
+ License and to the absence of any warranty; and give any other recipients
+ of the Program a copy of this License along with the Program.
+
+ You may charge a fee for the physical act of transferring a copy, and you
+ may at your option offer warranty protection in exchange for a fee.
+
+2. You may modify your copy or copies of the Program or any portion of it,
+ thus forming a work based on the Program, and copy and distribute such
+ modifications or work under the terms of Section 1 above, provided that
+ you also meet all of these conditions:
+
+ * a) You must cause the modified files to carry prominent notices stating
+ that you changed the files and the date of any change.
+
+ * b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any part
+ thereof, to be licensed as a whole at no charge to all third parties
+ under the terms of this License.
+
+ * c) If the modified program normally reads commands interactively when
+ run, you must cause it, when started running for such interactive
+ use in the most ordinary way, to print or display an announcement
+ including an appropriate copyright notice and a notice that there is
+ no warranty (or else, saying that you provide a warranty) and that
+ users may redistribute the program under these conditions, and
+ telling the user how to view a copy of this License. (Exception: if
+ the Program itself is interactive but does not normally print such
+ an announcement, your work based on the Program is not required to
+ print an announcement.)
+
+ These requirements apply to the modified work as a whole. If identifiable
+ sections of that work are not derived from the Program, and can be
+ reasonably considered independent and separate works in themselves, then
+ this License, and its terms, do not apply to those sections when you
+ distribute them as separate works. But when you distribute the same
+ sections as part of a whole which is a work based on the Program, the
+ distribution of the whole must be on the terms of this License, whose
+ permissions for other licensees extend to the entire whole, and thus to
+ each and every part regardless of who wrote it.
+
+ Thus, it is not the intent of this section to claim rights or contest
+ your rights to work written entirely by you; rather, the intent is to
+ exercise the right to control the distribution of derivative or
+ collective works based on the Program.
+
+ In addition, mere aggregation of another work not based on the Program
+ with the Program (or with a work based on the Program) on a volume of a
+ storage or distribution medium does not bring the other work under the
+ scope of this License.
+
+3. You may copy and distribute the Program (or a work based on it, under
+ Section 2) in object code or executable form under the terms of Sections
+ 1 and 2 above provided that you also do one of the following:
+
+ * a) Accompany it with the complete corresponding machine-readable source
+ code, which must be distributed under the terms of Sections 1 and 2
+ above on a medium customarily used for software interchange; or,
+
+ * b) Accompany it with a written offer, valid for at least three years,
+ to give any third party, for a charge no more than your cost of
+ physically performing source distribution, a complete machine-
+ readable copy of the corresponding source code, to be distributed
+ under the terms of Sections 1 and 2 above on a medium customarily
+ used for software interchange; or,
+
+ * c) Accompany it with the information you received as to the offer to
+ distribute corresponding source code. (This alternative is allowed
+ only for noncommercial distribution and only if you received the
+ program in object code or executable form with such an offer, in
+ accord with Subsection b above.)
+
+ The source code for a work means the preferred form of the work for
+ making modifications to it. For an executable work, complete source code
+ means all the source code for all modules it contains, plus any
+ associated interface definition files, plus the scripts used to control
+ compilation and installation of the executable. However, as a special
+ exception, the source code distributed need not include anything that is
+ normally distributed (in either source or binary form) with the major
+ components (compiler, kernel, and so on) of the operating system on which
+ the executable runs, unless that component itself accompanies the
+ executable.
+
+ If distribution of executable or object code is made by offering access
+ to copy from a designated place, then offering equivalent access to copy
+ the source code from the same place counts as distribution of the source
+ code, even though third parties are not compelled to copy the source
+ along with the object code.
+
+4. You may not copy, modify, sublicense, or distribute the Program except as
+ expressly provided under this License. Any attempt otherwise to copy,
+ modify, sublicense or distribute the Program is void, and will
+ automatically terminate your rights under this License. However, parties
+ who have received copies, or rights, from you under this License will not
+ have their licenses terminated so long as such parties remain in full
+ compliance.
+
+5. You are not required to accept this License, since you have not signed
+ it. However, nothing else grants you permission to modify or distribute
+ the Program or its derivative works. These actions are prohibited by law
+ if you do not accept this License. Therefore, by modifying or
+ distributing the Program (or any work based on the Program), you
+ indicate your acceptance of this License to do so, and all its terms and
+ conditions for copying, distributing or modifying the Program or works
+ based on it.
+
+6. Each time you redistribute the Program (or any work based on the
+ Program), the recipient automatically receives a license from the
+ original licensor to copy, distribute or modify the Program subject to
+ these terms and conditions. You may not impose any further restrictions
+ on the recipients' exercise of the rights granted herein. You are not
+ responsible for enforcing compliance by third parties to this License.
+
+7. If, as a consequence of a court judgment or allegation of patent
+ infringement or for any other reason (not limited to patent issues),
+ conditions are imposed on you (whether by court order, agreement or
+ otherwise) that contradict the conditions of this License, they do not
+ excuse you from the conditions of this License. If you cannot distribute
+ so as to satisfy simultaneously your obligations under this License and
+ any other pertinent obligations, then as a consequence you may not
+ distribute the Program at all. For example, if a patent license would
+ not permit royalty-free redistribution of the Program by all those who
+ receive copies directly or indirectly through you, then the only way you
+ could satisfy both it and this License would be to refrain entirely from
+ distribution of the Program.
+
+ If any portion of this section is held invalid or unenforceable under any
+ particular circumstance, the balance of the section is intended to apply
+ and the section as a whole is intended to apply in other circumstances.
+
+ It is not the purpose of this section to induce you to infringe any
+ patents or other property right claims or to contest validity of any
+ such claims; this section has the sole purpose of protecting the
+ integrity of the free software distribution system, which is implemented
+ by public license practices. Many people have made generous contributions
+ to the wide range of software distributed through that system in
+ reliance on consistent application of that system; it is up to the
+ author/donor to decide if he or she is willing to distribute software
+ through any other system and a licensee cannot impose that choice.
+
+ This section is intended to make thoroughly clear what is believed to be
+ a consequence of the rest of this License.
+
+8. If the distribution and/or use of the Program is restricted in certain
+ countries either by patents or by copyrighted interfaces, the original
+ copyright holder who places the Program under this License may add an
+ explicit geographical distribution limitation excluding those countries,
+ so that distribution is permitted only in or among countries not thus
+ excluded. In such case, this License incorporates the limitation as if
+ written in the body of this License.
+
+9. The Free Software Foundation may publish revised and/or new versions of
+ the General Public License from time to time. Such new versions will be
+ similar in spirit to the present version, but may differ in detail to
+ address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the Program
+ specifies a version number of this License which applies to it and "any
+ later version", you have the option of following the terms and
+ conditions either of that version or of any later version published by
+ the Free Software Foundation. If the Program does not specify a version
+ number of this License, you may choose any version ever published by the
+ Free Software Foundation.
+
+10. If you wish to incorporate parts of the Program into other free programs
+ whose distribution conditions are different, write to the author to ask
+ for permission. For software which is copyrighted by the Free Software
+ Foundation, write to the Free Software Foundation; we sometimes make
+ exceptions for this. Our decision will be guided by the two goals of
+ preserving the free status of all derivatives of our free software and
+ of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+ FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+ OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+ PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER
+ EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
+ ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH
+ YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL
+ NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+ REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR
+ DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL
+ DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM
+ (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED
+ INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF
+ THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR
+ OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+
+END OF TERMS AND CONDITIONS
+
+How to Apply These Terms to Your New Programs
+
+If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it free
+software which everyone can redistribute and change under these terms.
+
+To do so, attach the following notices to the program. It is safest to
+attach them to the start of each source file to most effectively convey the
+exclusion of warranty; and each file should have at least the "copyright"
+line and a pointer to where the full notice is found.
+
+one line to give the program's name and an idea of what it does.
+Copyright (C) yyyy name of author
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2 of the License, or (at your option)
+any later version.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc., 59
+Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this when
+it starts in an interactive mode:
+
+Gnomovision version 69, Copyright (C) year name of author Gnomovision comes
+with ABSOLUTELY NO WARRANTY; for details type 'show w'. This is free
+software, and you are welcome to redistribute it under certain conditions;
+type 'show c' for details.
+
+The hypothetical commands 'show w' and 'show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may be
+called something other than 'show w' and 'show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+'Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+signature of Ty Coon, 1 April 1989
+Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Library General Public
+License instead of this License.
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c
new file mode 100755
index 00000000..b8c9a13f
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c
@@ -0,0 +1,3665 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/*
+ * 82575EB Gigabit Network Connection
+ * 82575EB Gigabit Backplane Connection
+ * 82575GB Gigabit Network Connection
+ * 82576 Gigabit Network Connection
+ * 82576 Quad Port Gigabit Mezzanine Adapter
+ * 82580 Gigabit Network Connection
+ * I350 Gigabit Network Connection
+ */
+
+#include "e1000_api.h"
+#include "e1000_i210.h"
+
+static s32 e1000_init_phy_params_82575(struct e1000_hw *hw);
+static s32 e1000_init_mac_params_82575(struct e1000_hw *hw);
+static s32 e1000_acquire_phy_82575(struct e1000_hw *hw);
+static void e1000_release_phy_82575(struct e1000_hw *hw);
+static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw);
+static void e1000_release_nvm_82575(struct e1000_hw *hw);
+static s32 e1000_check_for_link_82575(struct e1000_hw *hw);
+static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw);
+static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw);
+static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex);
+static s32 e1000_init_hw_82575(struct e1000_hw *hw);
+static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw);
+static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+ u16 *data);
+static s32 e1000_reset_hw_82575(struct e1000_hw *hw);
+static s32 e1000_reset_hw_82580(struct e1000_hw *hw);
+static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw,
+ u32 offset, u16 *data);
+static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw,
+ u32 offset, u16 data);
+static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw,
+ bool active);
+static s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw,
+ bool active);
+static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw,
+ bool active);
+static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw);
+static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw);
+static s32 e1000_get_media_type_82575(struct e1000_hw *hw);
+static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw);
+static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data);
+static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw,
+ u32 offset, u16 data);
+static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw);
+static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
+static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
+ u16 *speed, u16 *duplex);
+static s32 e1000_get_phy_id_82575(struct e1000_hw *hw);
+static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
+static bool e1000_sgmii_active_82575(struct e1000_hw *hw);
+static s32 e1000_reset_init_script_82575(struct e1000_hw *hw);
+static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw);
+static void e1000_config_collision_dist_82575(struct e1000_hw *hw);
+static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw);
+static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw);
+static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw);
+static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw);
+static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw);
+static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw);
+static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw);
+static s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw,
+ u16 offset);
+static s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
+ u16 offset);
+static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw);
+static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw);
+static void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value);
+static void e1000_clear_vfta_i350(struct e1000_hw *hw);
+
+static void e1000_i2c_start(struct e1000_hw *hw);
+static void e1000_i2c_stop(struct e1000_hw *hw);
+static s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data);
+static s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data);
+static s32 e1000_get_i2c_ack(struct e1000_hw *hw);
+static s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data);
+static s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data);
+static void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl);
+static void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl);
+static s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data);
+static bool e1000_get_i2c_data(u32 *i2cctl);
+
+static const u16 e1000_82580_rxpbs_table[] = {
+ 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 };
+#define E1000_82580_RXPBS_TABLE_SIZE \
+ (sizeof(e1000_82580_rxpbs_table)/sizeof(u16))
+
+
+/**
+ * e1000_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
+ * @hw: pointer to the HW structure
+ *
+ * Called to determine if the I2C pins are being used for I2C or as an
+ * external MDIO interface since the two options are mutually exclusive.
+ **/
+static bool e1000_sgmii_uses_mdio_82575(struct e1000_hw *hw)
+{
+ u32 reg = 0;
+ bool ext_mdio = false;
+
+ DEBUGFUNC("e1000_sgmii_uses_mdio_82575");
+
+ switch (hw->mac.type) {
+ case e1000_82575:
+ case e1000_82576:
+ reg = E1000_READ_REG(hw, E1000_MDIC);
+ ext_mdio = !!(reg & E1000_MDIC_DEST);
+ break;
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ case e1000_i210:
+ case e1000_i211:
+ reg = E1000_READ_REG(hw, E1000_MDICNFG);
+ ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
+ break;
+ default:
+ break;
+ }
+ return ext_mdio;
+}
+
+/**
+ * e1000_init_phy_params_82575 - Init PHY func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+ u32 ctrl_ext;
+
+ DEBUGFUNC("e1000_init_phy_params_82575");
+
+ phy->ops.read_i2c_byte = e1000_read_i2c_byte_generic;
+ phy->ops.write_i2c_byte = e1000_write_i2c_byte_generic;
+
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ phy->type = e1000_phy_none;
+ goto out;
+ }
+
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper_82575;
+
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->reset_delay_us = 100;
+
+ phy->ops.acquire = e1000_acquire_phy_82575;
+ phy->ops.check_reset_block = e1000_check_reset_block_generic;
+ phy->ops.commit = e1000_phy_sw_reset_generic;
+ phy->ops.get_cfg_done = e1000_get_cfg_done_82575;
+ phy->ops.release = e1000_release_phy_82575;
+
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+
+ if (e1000_sgmii_active_82575(hw)) {
+ phy->ops.reset = e1000_phy_hw_reset_sgmii_82575;
+ ctrl_ext |= E1000_CTRL_I2C_ENA;
+ } else {
+ phy->ops.reset = e1000_phy_hw_reset_generic;
+ ctrl_ext &= ~E1000_CTRL_I2C_ENA;
+ }
+
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ e1000_reset_mdicnfg_82580(hw);
+
+ if (e1000_sgmii_active_82575(hw) && !e1000_sgmii_uses_mdio_82575(hw)) {
+ phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575;
+ phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575;
+ } else {
+ switch (hw->mac.type) {
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ phy->ops.read_reg = e1000_read_phy_reg_82580;
+ phy->ops.write_reg = e1000_write_phy_reg_82580;
+ break;
+ case e1000_i210:
+ case e1000_i211:
+ phy->ops.read_reg = e1000_read_phy_reg_gs40g;
+ phy->ops.write_reg = e1000_write_phy_reg_gs40g;
+ break;
+ default:
+ phy->ops.read_reg = e1000_read_phy_reg_igp;
+ phy->ops.write_reg = e1000_write_phy_reg_igp;
+ }
+ }
+
+ /* Set phy->phy_addr and phy->id. */
+ ret_val = e1000_get_phy_id_82575(hw);
+
+ /* Verify phy id and set remaining function pointers */
+ switch (phy->id) {
+ case M88E1543_E_PHY_ID:
+ case I347AT4_E_PHY_ID:
+ case M88E1112_E_PHY_ID:
+ case M88E1340M_E_PHY_ID:
+ case M88E1111_I_PHY_ID:
+ phy->type = e1000_phy_m88;
+ phy->ops.check_polarity = e1000_check_polarity_m88;
+ phy->ops.get_info = e1000_get_phy_info_m88;
+ if (phy->id == I347AT4_E_PHY_ID ||
+ phy->id == M88E1112_E_PHY_ID ||
+ phy->id == M88E1340M_E_PHY_ID)
+ phy->ops.get_cable_length =
+ e1000_get_cable_length_m88_gen2;
+ else if (phy->id == M88E1543_E_PHY_ID)
+ phy->ops.get_cable_length =
+ e1000_get_cable_length_m88_gen2;
+ else
+ phy->ops.get_cable_length = e1000_get_cable_length_m88;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
+ /* Check if this PHY is confgured for media swap. */
+ if (phy->id == M88E1112_E_PHY_ID) {
+ u16 data;
+
+ ret_val = phy->ops.write_reg(hw,
+ E1000_M88E1112_PAGE_ADDR,
+ 2);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.read_reg(hw,
+ E1000_M88E1112_MAC_CTRL_1,
+ &data);
+ if (ret_val)
+ goto out;
+
+ data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >>
+ E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT;
+ if (data == E1000_M88E1112_AUTO_COPPER_SGMII ||
+ data == E1000_M88E1112_AUTO_COPPER_BASEX)
+ hw->mac.ops.check_for_link =
+ e1000_check_for_link_media_swap;
+ }
+ break;
+ case IGP03E1000_E_PHY_ID:
+ case IGP04E1000_E_PHY_ID:
+ phy->type = e1000_phy_igp_3;
+ phy->ops.check_polarity = e1000_check_polarity_igp;
+ phy->ops.get_info = e1000_get_phy_info_igp;
+ phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
+ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82575;
+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic;
+ break;
+ case I82580_I_PHY_ID:
+ case I350_I_PHY_ID:
+ phy->type = e1000_phy_82580;
+ phy->ops.check_polarity = e1000_check_polarity_82577;
+ phy->ops.force_speed_duplex =
+ e1000_phy_force_speed_duplex_82577;
+ phy->ops.get_cable_length = e1000_get_cable_length_82577;
+ phy->ops.get_info = e1000_get_phy_info_82577;
+ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580;
+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580;
+ break;
+ case I210_I_PHY_ID:
+ phy->type = e1000_phy_i210;
+ phy->ops.check_polarity = e1000_check_polarity_m88;
+ phy->ops.get_info = e1000_get_phy_info_m88;
+ phy->ops.get_cable_length = e1000_get_cable_length_m88_gen2;
+ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580;
+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params_82575 - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_init_nvm_params_82575(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+ u16 size;
+
+ DEBUGFUNC("e1000_init_nvm_params_82575");
+
+ size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+ E1000_EECD_SIZE_EX_SHIFT);
+ /*
+ * Added to a constant, "size" becomes the left-shift value
+ * for setting word_size.
+ */
+ size += NVM_WORD_SIZE_BASE_SHIFT;
+
+ /* Just in case size is out of range, cap it to the largest
+ * EEPROM size supported
+ */
+ if (size > 15)
+ size = 15;
+
+ nvm->word_size = 1 << size;
+ if (hw->mac.type < e1000_i210) {
+ nvm->opcode_bits = 8;
+ nvm->delay_usec = 1;
+
+ switch (nvm->override) {
+ case e1000_nvm_override_spi_large:
+ nvm->page_size = 32;
+ nvm->address_bits = 16;
+ break;
+ case e1000_nvm_override_spi_small:
+ nvm->page_size = 8;
+ nvm->address_bits = 8;
+ break;
+ default:
+ nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+ nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
+ 16 : 8;
+ break;
+ }
+ if (nvm->word_size == (1 << 15))
+ nvm->page_size = 128;
+
+ nvm->type = e1000_nvm_eeprom_spi;
+ } else {
+ nvm->type = e1000_nvm_flash_hw;
+ }
+
+ /* Function Pointers */
+ nvm->ops.acquire = e1000_acquire_nvm_82575;
+ nvm->ops.release = e1000_release_nvm_82575;
+ if (nvm->word_size < (1 << 15))
+ nvm->ops.read = e1000_read_nvm_eerd;
+ else
+ nvm->ops.read = e1000_read_nvm_spi;
+
+ nvm->ops.write = e1000_write_nvm_spi;
+ nvm->ops.validate = e1000_validate_nvm_checksum_generic;
+ nvm->ops.update = e1000_update_nvm_checksum_generic;
+ nvm->ops.valid_led_default = e1000_valid_led_default_82575;
+
+ /* override generic family function pointers for specific descendants */
+ switch (hw->mac.type) {
+ case e1000_82580:
+ nvm->ops.validate = e1000_validate_nvm_checksum_82580;
+ nvm->ops.update = e1000_update_nvm_checksum_82580;
+ break;
+ case e1000_i350:
+ //case e1000_i354:
+ nvm->ops.validate = e1000_validate_nvm_checksum_i350;
+ nvm->ops.update = e1000_update_nvm_checksum_i350;
+ break;
+ default:
+ break;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_mac_params_82575 - Init MAC func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+
+ DEBUGFUNC("e1000_init_mac_params_82575");
+
+ /* Derives media type */
+ e1000_get_media_type_82575(hw);
+ /* Set mta register count */
+ mac->mta_reg_count = 128;
+ /* Set uta register count */
+ mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128;
+ /* Set rar entry count */
+ mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
+ if (mac->type == e1000_82576)
+ mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
+ if (mac->type == e1000_82580)
+ mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
+ if (mac->type == e1000_i350 || mac->type == e1000_i354)
+ mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
+
+ /* Enable EEE default settings for EEE supported devices */
+ if (mac->type >= e1000_i350)
+ dev_spec->eee_disable = false;
+
+ /* Allow a single clear of the SW semaphore on I210 and newer */
+ if (mac->type >= e1000_i210)
+ dev_spec->clear_semaphore_once = true;
+
+ /* Set if part includes ASF firmware */
+ mac->asf_firmware_present = true;
+ /* FWSM register */
+ mac->has_fwsm = true;
+ /* ARC supported; valid only if manageability features are enabled. */
+ mac->arc_subsystem_valid =
+ !!(E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK);
+
+ /* Function pointers */
+
+ /* bus type/speed/width */
+ mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic;
+ /* reset */
+ if (mac->type >= e1000_82580)
+ mac->ops.reset_hw = e1000_reset_hw_82580;
+ else
+ mac->ops.reset_hw = e1000_reset_hw_82575;
+ /* hw initialization */
+ mac->ops.init_hw = e1000_init_hw_82575;
+ /* link setup */
+ mac->ops.setup_link = e1000_setup_link_generic;
+ /* physical interface link setup */
+ mac->ops.setup_physical_interface =
+ (hw->phy.media_type == e1000_media_type_copper)
+ ? e1000_setup_copper_link_82575 : e1000_setup_serdes_link_82575;
+ /* physical interface shutdown */
+ mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575;
+ /* physical interface power up */
+ mac->ops.power_up_serdes = e1000_power_up_serdes_link_82575;
+ /* check for link */
+ mac->ops.check_for_link = e1000_check_for_link_82575;
+ /* read mac address */
+ mac->ops.read_mac_addr = e1000_read_mac_addr_82575;
+ /* configure collision distance */
+ mac->ops.config_collision_dist = e1000_config_collision_dist_82575;
+ /* multicast address update */
+ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
+ if (hw->mac.type == e1000_i350 || mac->type == e1000_i354) {
+ /* writing VFTA */
+ mac->ops.write_vfta = e1000_write_vfta_i350;
+ /* clearing VFTA */
+ mac->ops.clear_vfta = e1000_clear_vfta_i350;
+ } else {
+ /* writing VFTA */
+ mac->ops.write_vfta = e1000_write_vfta_generic;
+ /* clearing VFTA */
+ mac->ops.clear_vfta = e1000_clear_vfta_generic;
+ }
+ if (hw->mac.type >= e1000_82580)
+ mac->ops.validate_mdi_setting =
+ e1000_validate_mdi_setting_crossover_generic;
+ /* ID LED init */
+ mac->ops.id_led_init = e1000_id_led_init_generic;
+ /* blink LED */
+ mac->ops.blink_led = e1000_blink_led_generic;
+ /* setup LED */
+ mac->ops.setup_led = e1000_setup_led_generic;
+ /* cleanup LED */
+ mac->ops.cleanup_led = e1000_cleanup_led_generic;
+ /* turn on/off LED */
+ mac->ops.led_on = e1000_led_on_generic;
+ mac->ops.led_off = e1000_led_off_generic;
+ /* clear hardware counters */
+ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575;
+ /* link info */
+ mac->ops.get_link_up_info = e1000_get_link_up_info_82575;
+ /* get thermal sensor data */
+ mac->ops.get_thermal_sensor_data =
+ e1000_get_thermal_sensor_data_generic;
+ mac->ops.init_thermal_sensor_thresh =
+ e1000_init_thermal_sensor_thresh_generic;
+ /* acquire SW_FW sync */
+ mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_82575;
+ mac->ops.release_swfw_sync = e1000_release_swfw_sync_82575;
+ if (mac->type >= e1000_i210) {
+ mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_i210;
+ mac->ops.release_swfw_sync = e1000_release_swfw_sync_i210;
+ }
+
+ /* set lan id for port to determine which phy lock to use */
+ hw->mac.ops.set_lan_id(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_function_pointers_82575 - Init func ptrs.
+ * @hw: pointer to the HW structure
+ *
+ * Called to initialize all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_82575(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_init_function_pointers_82575");
+
+ hw->mac.ops.init_params = e1000_init_mac_params_82575;
+ hw->nvm.ops.init_params = e1000_init_nvm_params_82575;
+ hw->phy.ops.init_params = e1000_init_phy_params_82575;
+ hw->mbx.ops.init_params = e1000_init_mbx_params_pf;
+}
+
+/**
+ * e1000_acquire_phy_82575 - Acquire rights to access PHY
+ * @hw: pointer to the HW structure
+ *
+ * Acquire access rights to the correct PHY.
+ **/
+static s32 e1000_acquire_phy_82575(struct e1000_hw *hw)
+{
+ u16 mask = E1000_SWFW_PHY0_SM;
+
+ DEBUGFUNC("e1000_acquire_phy_82575");
+
+ if (hw->bus.func == E1000_FUNC_1)
+ mask = E1000_SWFW_PHY1_SM;
+ else if (hw->bus.func == E1000_FUNC_2)
+ mask = E1000_SWFW_PHY2_SM;
+ else if (hw->bus.func == E1000_FUNC_3)
+ mask = E1000_SWFW_PHY3_SM;
+
+ return hw->mac.ops.acquire_swfw_sync(hw, mask);
+}
+
+/**
+ * e1000_release_phy_82575 - Release rights to access PHY
+ * @hw: pointer to the HW structure
+ *
+ * A wrapper to release access rights to the correct PHY.
+ **/
+static void e1000_release_phy_82575(struct e1000_hw *hw)
+{
+ u16 mask = E1000_SWFW_PHY0_SM;
+
+ DEBUGFUNC("e1000_release_phy_82575");
+
+ if (hw->bus.func == E1000_FUNC_1)
+ mask = E1000_SWFW_PHY1_SM;
+ else if (hw->bus.func == E1000_FUNC_2)
+ mask = E1000_SWFW_PHY2_SM;
+ else if (hw->bus.func == E1000_FUNC_3)
+ mask = E1000_SWFW_PHY3_SM;
+
+ hw->mac.ops.release_swfw_sync(hw, mask);
+}
+
+/**
+ * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset using the serial gigabit media independent
+ * interface and stores the retrieved information in data.
+ **/
+static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+ u16 *data)
+{
+ s32 ret_val = -E1000_ERR_PARAM;
+
+ DEBUGFUNC("e1000_read_phy_reg_sgmii_82575");
+
+ if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
+ DEBUGOUT1("PHY Address %u is out of range\n", offset);
+ goto out;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_read_phy_reg_i2c(hw, offset, data);
+
+ hw->phy.ops.release(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes the data to PHY register at the offset using the serial gigabit
+ * media independent interface.
+ **/
+static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+ u16 data)
+{
+ s32 ret_val = -E1000_ERR_PARAM;
+
+ DEBUGFUNC("e1000_write_phy_reg_sgmii_82575");
+
+ if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
+ DEBUGOUT1("PHY Address %d is out of range\n", offset);
+ goto out;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_write_phy_reg_i2c(hw, offset, data);
+
+ hw->phy.ops.release(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_get_phy_id_82575 - Retrieve PHY addr and id
+ * @hw: pointer to the HW structure
+ *
+ * Retrieves the PHY address and ID for both PHY's which do and do not use
+ * sgmi interface.
+ **/
+static s32 e1000_get_phy_id_82575(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+ u16 phy_id;
+ u32 ctrl_ext;
+ u32 mdic;
+
+ DEBUGFUNC("e1000_get_phy_id_82575");
+
+ /* i354 devices can have a PHY that needs an extra read for id */
+ if (hw->mac.type == e1000_i354)
+ e1000_get_phy_id(hw);
+
+
+ /*
+ * For SGMII PHYs, we try the list of possible addresses until
+ * we find one that works. For non-SGMII PHYs
+ * (e.g. integrated copper PHYs), an address of 1 should
+ * work. The result of this function should mean phy->phy_addr
+ * and phy->id are set correctly.
+ */
+ if (!e1000_sgmii_active_82575(hw)) {
+ phy->addr = 1;
+ ret_val = e1000_get_phy_id(hw);
+ goto out;
+ }
+
+ if (e1000_sgmii_uses_mdio_82575(hw)) {
+ switch (hw->mac.type) {
+ case e1000_82575:
+ case e1000_82576:
+ mdic = E1000_READ_REG(hw, E1000_MDIC);
+ mdic &= E1000_MDIC_PHY_MASK;
+ phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
+ break;
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ case e1000_i210:
+ case e1000_i211:
+ mdic = E1000_READ_REG(hw, E1000_MDICNFG);
+ mdic &= E1000_MDICNFG_PHY_MASK;
+ phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ break;
+ }
+ ret_val = e1000_get_phy_id(hw);
+ goto out;
+ }
+
+ /* Power on sgmii phy if it is disabled */
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT,
+ ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(300);
+
+ /*
+ * The address field in the I2CCMD register is 3 bits and 0 is invalid.
+ * Therefore, we need to test 1-7
+ */
+ for (phy->addr = 1; phy->addr < 8; phy->addr++) {
+ ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
+ if (ret_val == E1000_SUCCESS) {
+ DEBUGOUT2("Vendor ID 0x%08X read at address %u\n",
+ phy_id, phy->addr);
+ /*
+ * At the time of this writing, The M88 part is
+ * the only supported SGMII PHY product.
+ */
+ if (phy_id == M88_VENDOR)
+ break;
+ } else {
+ DEBUGOUT1("PHY address %u was unreadable\n",
+ phy->addr);
+ }
+ }
+
+ /* A valid PHY type couldn't be found. */
+ if (phy->addr == 8) {
+ phy->addr = 0;
+ ret_val = -E1000_ERR_PHY;
+ } else {
+ ret_val = e1000_get_phy_id(hw);
+ }
+
+ /* restore previous sfp cage power state */
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset
+ * @hw: pointer to the HW structure
+ *
+ * Resets the PHY using the serial gigabit media independent interface.
+ **/
+static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575");
+
+ /*
+ * This isn't a true "hard" reset, but is the only reset
+ * available to us at this time.
+ */
+
+ DEBUGOUT("Soft resetting SGMII attached PHY...\n");
+
+ if (!(hw->phy.ops.write_reg))
+ goto out;
+
+ /*
+ * SFP documentation requires the following to configure the SPF module
+ * to work on SGMII. No further documentation is given.
+ */
+ ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
+ if (ret_val)
+ goto out;
+
+ ret_val = hw->phy.ops.commit(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU D0 state according to the active flag. When
+ * activating LPLU this function also disables smart speed
+ * and vice versa. LPLU will not be activated unless the
+ * device autonegotiation advertisement meets standards of
+ * either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+ u16 data;
+
+ DEBUGFUNC("e1000_set_d0_lplu_state_82575");
+
+ if (!(hw->phy.ops.read_reg))
+ goto out;
+
+ ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+ if (ret_val)
+ goto out;
+
+ if (active) {
+ data |= IGP02E1000_PM_D0_LPLU;
+ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ data);
+ if (ret_val)
+ goto out;
+
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ goto out;
+ } else {
+ data &= ~IGP02E1000_PM_D0_LPLU;
+ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ data);
+ /*
+ * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ goto out;
+
+ data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ goto out;
+ } else if (phy->smart_speed == e1000_smart_speed_off) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ goto out;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ goto out;
+ }
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU D0 state according to the active flag. When
+ * activating LPLU this function also disables smart speed
+ * and vice versa. LPLU will not be activated unless the
+ * device autonegotiation advertisement meets standards of
+ * either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+ u32 data;
+
+ DEBUGFUNC("e1000_set_d0_lplu_state_82580");
+
+ data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
+
+ if (active) {
+ data |= E1000_82580_PM_D0_LPLU;
+
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ data &= ~E1000_82580_PM_SPD;
+ } else {
+ data &= ~E1000_82580_PM_D0_LPLU;
+
+ /*
+ * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on)
+ data |= E1000_82580_PM_SPD;
+ else if (phy->smart_speed == e1000_smart_speed_off)
+ data &= ~E1000_82580_PM_SPD;
+ }
+
+ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data);
+ return ret_val;
+}
+
+/**
+ * e1000_set_d3_lplu_state_82580 - Sets low power link up state for D3
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * The low power link up (lplu) state is set to the power management level D3
+ * and SmartSpeed is disabled when active is true, else clear lplu for D3
+ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
+ * is used during Dx states where the power conservation is most important.
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained.
+ **/
+s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+ u32 data;
+
+ DEBUGFUNC("e1000_set_d3_lplu_state_82580");
+
+ data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
+
+ if (!active) {
+ data &= ~E1000_82580_PM_D3_LPLU;
+ /*
+ * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on)
+ data |= E1000_82580_PM_SPD;
+ else if (phy->smart_speed == e1000_smart_speed_off)
+ data &= ~E1000_82580_PM_SPD;
+ } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+ (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+ (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+ data |= E1000_82580_PM_D3_LPLU;
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ data &= ~E1000_82580_PM_SPD;
+ }
+
+ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data);
+ return ret_val;
+}
+
+/**
+ * e1000_acquire_nvm_82575 - Request for access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the necessary semaphores for exclusive access to the EEPROM.
+ * Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ * Return successful if access grant bit set, else clear the request for
+ * EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_acquire_nvm_82575");
+
+ ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
+ if (ret_val)
+ goto out;
+
+ /*
+ * Check if there is some access
+ * error this access may hook on
+ */
+ if (hw->mac.type == e1000_i350) {
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+ if (eecd & (E1000_EECD_BLOCKED | E1000_EECD_ABORT |
+ E1000_EECD_TIMEOUT)) {
+ /* Clear all access error flags */
+ E1000_WRITE_REG(hw, E1000_EECD, eecd |
+ E1000_EECD_ERROR_CLR);
+ DEBUGOUT("Nvm bit banging access error detected and cleared.\n");
+ }
+ }
+ if (hw->mac.type == e1000_82580) {
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+ if (eecd & E1000_EECD_BLOCKED) {
+ /* Clear access error flag */
+ E1000_WRITE_REG(hw, E1000_EECD, eecd |
+ E1000_EECD_BLOCKED);
+ DEBUGOUT("Nvm bit banging access error detected and cleared.\n");
+ }
+ }
+
+
+ ret_val = e1000_acquire_nvm_generic(hw);
+ if (ret_val)
+ e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_release_nvm_82575 - Release exclusive access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Stop any current commands to the EEPROM and clear the EEPROM request bit,
+ * then release the semaphores acquired.
+ **/
+static void e1000_release_nvm_82575(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_release_nvm_82575");
+
+ e1000_release_nvm_generic(hw);
+
+ e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
+}
+
+/**
+ * e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
+ * will also specify which port we're acquiring the lock for.
+ **/
+static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+ u32 swmask = mask;
+ u32 fwmask = mask << 16;
+ s32 ret_val = E1000_SUCCESS;
+ s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
+
+ DEBUGFUNC("e1000_acquire_swfw_sync_82575");
+
+ while (i < timeout) {
+ if (e1000_get_hw_semaphore_generic(hw)) {
+ ret_val = -E1000_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+ if (!(swfw_sync & (fwmask | swmask)))
+ break;
+
+ /*
+ * Firmware currently using resource (fwmask)
+ * or other software thread using resource (swmask)
+ */
+ e1000_put_hw_semaphore_generic(hw);
+ msec_delay_irq(5);
+ i++;
+ }
+
+ if (i == timeout) {
+ DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
+ ret_val = -E1000_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ swfw_sync |= swmask;
+ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+ e1000_put_hw_semaphore_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_release_swfw_sync_82575 - Release SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Release the SW/FW semaphore used to access the PHY or NVM. The mask
+ * will also specify which port we're releasing the lock for.
+ **/
+static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+
+ DEBUGFUNC("e1000_release_swfw_sync_82575");
+
+ while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS)
+ ; /* Empty */
+
+ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+ swfw_sync &= ~mask;
+ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+ e1000_put_hw_semaphore_generic(hw);
+}
+
+/**
+ * e1000_get_cfg_done_82575 - Read config done bit
+ * @hw: pointer to the HW structure
+ *
+ * Read the management control register for the config done bit for
+ * completion status. NOTE: silicon which is EEPROM-less will fail trying
+ * to read the config done bit, so an error is *ONLY* logged and returns
+ * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon
+ * would not be able to be reset or change link.
+ **/
+static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw)
+{
+ s32 timeout = PHY_CFG_TIMEOUT;
+ s32 ret_val = E1000_SUCCESS;
+ u32 mask = E1000_NVM_CFG_DONE_PORT_0;
+
+ DEBUGFUNC("e1000_get_cfg_done_82575");
+
+ if (hw->bus.func == E1000_FUNC_1)
+ mask = E1000_NVM_CFG_DONE_PORT_1;
+ else if (hw->bus.func == E1000_FUNC_2)
+ mask = E1000_NVM_CFG_DONE_PORT_2;
+ else if (hw->bus.func == E1000_FUNC_3)
+ mask = E1000_NVM_CFG_DONE_PORT_3;
+ while (timeout) {
+ if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask)
+ break;
+ msec_delay(1);
+ timeout--;
+ }
+ if (!timeout)
+ DEBUGOUT("MNG configuration cycle has not completed.\n");
+
+ /* If EEPROM is not marked present, init the PHY manually */
+ if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
+ (hw->phy.type == e1000_phy_igp_3))
+ e1000_phy_init_script_igp3(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_link_up_info_82575 - Get link speed/duplex info
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+ *
+ * This is a wrapper function, if using the serial gigabit media independent
+ * interface, use PCS to retrieve the link speed and duplex information.
+ * Otherwise, use the generic function to get the link speed and duplex info.
+ **/
+static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_get_link_up_info_82575");
+
+ if (hw->phy.media_type != e1000_media_type_copper)
+ ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed,
+ duplex);
+ else
+ ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed,
+ duplex);
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_link_82575 - Check for link
+ * @hw: pointer to the HW structure
+ *
+ * If sgmii is enabled, then use the pcs register to determine link, otherwise
+ * use the generic interface for determining link.
+ **/
+static s32 e1000_check_for_link_82575(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 speed, duplex;
+
+ DEBUGFUNC("e1000_check_for_link_82575");
+
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed,
+ &duplex);
+ /*
+ * Use this flag to determine if link needs to be checked or
+ * not. If we have link clear the flag so that we do not
+ * continue to check for link.
+ */
+ hw->mac.get_link_status = !hw->mac.serdes_has_link;
+
+ /*
+ * Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ if (ret_val)
+ DEBUGOUT("Error configuring flow control\n");
+ } else {
+ ret_val = e1000_check_for_copper_link_generic(hw);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_link_media_swap - Check which M88E1112 interface linked
+ * @hw: pointer to the HW structure
+ *
+ * Poll the M88E1112 interfaces to see which interface achieved link.
+ */
+static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ u8 port = 0;
+
+ DEBUGFUNC("e1000_check_for_link_media_swap");
+
+ /* Check the copper medium. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (data & E1000_M88E1112_STATUS_LINK)
+ port = E1000_MEDIA_PORT_COPPER;
+
+ /* Check the other medium. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (data & E1000_M88E1112_STATUS_LINK)
+ port = E1000_MEDIA_PORT_OTHER;
+
+ /* Determine if a swap needs to happen. */
+ if (port && (hw->dev_spec._82575.media_port != port)) {
+ hw->dev_spec._82575.media_port = port;
+ hw->dev_spec._82575.media_changed = true;
+ } else {
+ ret_val = e1000_check_for_link_82575(hw);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_power_up_serdes_link_82575 - Power up the serdes link after shutdown
+ * @hw: pointer to the HW structure
+ **/
+static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw)
+{
+ u32 reg;
+
+ DEBUGFUNC("e1000_power_up_serdes_link_82575");
+
+ if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
+ !e1000_sgmii_active_82575(hw))
+ return;
+
+ /* Enable PCS to turn on link */
+ reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
+ reg |= E1000_PCS_CFG_PCS_EN;
+ E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
+
+ /* Power up the laser */
+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ reg &= ~E1000_CTRL_EXT_SDP3_DATA;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+
+ /* flush the write to verify completion */
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(1);
+}
+
+/**
+ * e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+ *
+ * Using the physical coding sub-layer (PCS), retrieve the current speed and
+ * duplex, then store the values in the pointers provided.
+ **/
+static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
+ u16 *speed, u16 *duplex)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 pcs;
+ u32 status;
+
+ DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575");
+
+ /*
+ * Read the PCS Status register for link state. For non-copper mode,
+ * the status register is not accurate. The PCS status register is
+ * used instead.
+ */
+ pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT);
+
+ /*
+ * The link up bit determines when link is up on autoneg.
+ */
+ if (pcs & E1000_PCS_LSTS_LINK_OK) {
+ mac->serdes_has_link = true;
+
+ /* Detect and store PCS speed */
+ if (pcs & E1000_PCS_LSTS_SPEED_1000)
+ *speed = SPEED_1000;
+ else if (pcs & E1000_PCS_LSTS_SPEED_100)
+ *speed = SPEED_100;
+ else
+ *speed = SPEED_10;
+
+ /* Detect and store PCS duplex */
+ if (pcs & E1000_PCS_LSTS_DUPLEX_FULL)
+ *duplex = FULL_DUPLEX;
+ else
+ *duplex = HALF_DUPLEX;
+
+ /* Check if it is an I354 2.5Gb backplane connection. */
+ if (mac->type == e1000_i354) {
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if ((status & E1000_STATUS_2P5_SKU) &&
+ !(status & E1000_STATUS_2P5_SKU_OVER)) {
+ *speed = SPEED_2500;
+ *duplex = FULL_DUPLEX;
+ DEBUGOUT("2500 Mbs, ");
+ DEBUGOUT("Full Duplex\n");
+ }
+ }
+
+ } else {
+ mac->serdes_has_link = false;
+ *speed = 0;
+ *duplex = 0;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_shutdown_serdes_link_82575 - Remove link during power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of serdes shut down sfp and PCS on driver unload
+ * when management pass thru is not enabled.
+ **/
+void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw)
+{
+ u32 reg;
+
+ DEBUGFUNC("e1000_shutdown_serdes_link_82575");
+
+ if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
+ !e1000_sgmii_active_82575(hw))
+ return;
+
+ if (!e1000_enable_mng_pass_thru(hw)) {
+ /* Disable PCS to turn off link */
+ reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
+ reg &= ~E1000_PCS_CFG_PCS_EN;
+ E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
+
+ /* shutdown the laser */
+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ reg |= E1000_CTRL_EXT_SDP3_DATA;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+
+ /* flush the write to verify completion */
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(1);
+ }
+
+ return;
+}
+
+/**
+ * e1000_reset_hw_82575 - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+ * This resets the hardware into a known state.
+ **/
+static s32 e1000_reset_hw_82575(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_reset_hw_82575");
+
+ /*
+ * Prevent the PCI-E bus from sticking if there is no TLP connection
+ * on the last TLP read/write transaction when MAC is reset.
+ */
+ ret_val = e1000_disable_pcie_master_generic(hw);
+ if (ret_val)
+ DEBUGOUT("PCI-E Master disable polling has failed.\n");
+
+ /* set the completion timeout for interface */
+ ret_val = e1000_set_pcie_completion_timeout(hw);
+ if (ret_val)
+ DEBUGOUT("PCI-E Set completion timeout has failed.\n");
+
+ DEBUGOUT("Masking off all interrupts\n");
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+
+ E1000_WRITE_REG(hw, E1000_RCTL, 0);
+ E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+ E1000_WRITE_FLUSH(hw);
+
+ msec_delay(10);
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ DEBUGOUT("Issuing a global reset to MAC\n");
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+
+ ret_val = e1000_get_auto_rd_done_generic(hw);
+ if (ret_val) {
+ /*
+ * When auto config read does not complete, do not
+ * return with an error. This can happen in situations
+ * where there is no eeprom and prevents getting link.
+ */
+ DEBUGOUT("Auto Read Done did not complete\n");
+ }
+
+ /* If EEPROM is not present, run manual init scripts */
+ if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES))
+ e1000_reset_init_script_82575(hw);
+
+ /* Clear any pending interrupt events. */
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+ E1000_READ_REG(hw, E1000_ICR);
+
+ /* Install any alternate MAC address into RAR0 */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_hw_82575 - Initialize hardware
+ * @hw: pointer to the HW structure
+ *
+ * This inits the hardware readying it for operation.
+ **/
+static s32 e1000_init_hw_82575(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ u16 i, rar_count = mac->rar_entry_count;
+
+ DEBUGFUNC("e1000_init_hw_82575");
+
+ /* Initialize identification LED */
+ ret_val = mac->ops.id_led_init(hw);
+ if (ret_val) {
+ DEBUGOUT("Error initializing identification LED\n");
+ /* This is not fatal and we should not stop init due to this */
+ }
+
+ /* Disabling VLAN filtering */
+ DEBUGOUT("Initializing the IEEE VLAN\n");
+ mac->ops.clear_vfta(hw);
+
+ /* Setup the receive address */
+ e1000_init_rx_addrs_generic(hw, rar_count);
+
+ /* Zero out the Multicast HASH table */
+ DEBUGOUT("Zeroing the MTA\n");
+ for (i = 0; i < mac->mta_reg_count; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+ /* Zero out the Unicast HASH table */
+ DEBUGOUT("Zeroing the UTA\n");
+ for (i = 0; i < mac->uta_reg_count; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0);
+
+ /* Setup link and flow control */
+ ret_val = mac->ops.setup_link(hw);
+
+ /* Set the default MTU size */
+ hw->dev_spec._82575.mtu = 1500;
+
+ /*
+ * Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ e1000_clear_hw_cntrs_82575(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_setup_copper_link_82575 - Configure copper link settings
+ * @hw: pointer to the HW structure
+ *
+ * Configures the link for auto-neg or forced speed and duplex. Then we check
+ * for link, once link is established calls to configure collision distance
+ * and flow control are called.
+ **/
+static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ u32 phpm_reg;
+
+ DEBUGFUNC("e1000_setup_copper_link_82575");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ /* Clear Go Link Disconnect bit on supported devices */
+ switch (hw->mac.type) {
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i210:
+ case e1000_i211:
+ phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
+ phpm_reg &= ~E1000_82580_PM_GO_LINKD;
+ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
+ break;
+ default:
+ break;
+ }
+
+ ret_val = e1000_setup_serdes_link_82575(hw);
+ if (ret_val)
+ goto out;
+
+ if (e1000_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
+ /* allow time for SFP cage time to power up phy */
+ msec_delay(300);
+
+ ret_val = hw->phy.ops.reset(hw);
+ if (ret_val) {
+ DEBUGOUT("Error resetting the PHY.\n");
+ goto out;
+ }
+ }
+ switch (hw->phy.type) {
+ case e1000_phy_i210:
+ case e1000_phy_m88:
+ switch (hw->phy.id) {
+ case I347AT4_E_PHY_ID:
+ case M88E1112_E_PHY_ID:
+ case M88E1340M_E_PHY_ID:
+ case M88E1543_E_PHY_ID:
+ case I210_I_PHY_ID:
+ ret_val = e1000_copper_link_setup_m88_gen2(hw);
+ break;
+ default:
+ ret_val = e1000_copper_link_setup_m88(hw);
+ break;
+ }
+ break;
+ case e1000_phy_igp_3:
+ ret_val = e1000_copper_link_setup_igp(hw);
+ break;
+ case e1000_phy_82580:
+ ret_val = e1000_copper_link_setup_82577(hw);
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ break;
+ }
+
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_setup_copper_link_generic(hw);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_setup_serdes_link_82575 - Setup link for serdes
+ * @hw: pointer to the HW structure
+ *
+ * Configure the physical coding sub-layer (PCS) link. The PCS link is
+ * used on copper connections where the serialized gigabit media independent
+ * interface (sgmii), or serdes fiber is being used. Configures the link
+ * for auto-negotiation or forces speed/duplex.
+ **/
+static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw)
+{
+ u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
+ bool pcs_autoneg;
+ s32 ret_val = E1000_SUCCESS;
+ u16 data;
+
+ DEBUGFUNC("e1000_setup_serdes_link_82575");
+
+ if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
+ !e1000_sgmii_active_82575(hw))
+ return ret_val;
+
+ /*
+ * On the 82575, SerDes loopback mode persists until it is
+ * explicitly turned off or a power cycle is performed. A read to
+ * the register does not indicate its status. Therefore, we ensure
+ * loopback mode is disabled during initialization.
+ */
+ E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
+
+ /* power on the sfp cage if present */
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+
+ ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl_reg |= E1000_CTRL_SLU;
+
+ /* set both sw defined pins on 82575/82576*/
+ if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576)
+ ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
+
+ reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
+
+ /* default pcs_autoneg to the same setting as mac autoneg */
+ pcs_autoneg = hw->mac.autoneg;
+
+ switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
+ case E1000_CTRL_EXT_LINK_MODE_SGMII:
+ /* sgmii mode lets the phy handle forcing speed/duplex */
+ pcs_autoneg = true;
+ /* autoneg time out should be disabled for SGMII mode */
+ reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
+ break;
+ case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
+ /* disable PCS autoneg and support parallel detect only */
+ pcs_autoneg = false;
+ /* fall through to default case */
+ default:
+ if (hw->mac.type == e1000_82575 ||
+ hw->mac.type == e1000_82576) {
+ ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT)
+ pcs_autoneg = false;
+ }
+
+ /*
+ * non-SGMII modes only supports a speed of 1000/Full for the
+ * link so it is best to just force the MAC and let the pcs
+ * link either autoneg or be forced to 1000/Full
+ */
+ ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
+ E1000_CTRL_FD | E1000_CTRL_FRCDPX;
+
+ /* set speed of 1000/Full if speed/duplex is forced */
+ reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
+ break;
+ }
+
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
+
+ /*
+ * New SerDes mode allows for forcing speed or autonegotiating speed
+ * at 1gb. Autoneg should be default set by most drivers. This is the
+ * mode that will be compatible with older link partners and switches.
+ * However, both are supported by the hardware and some drivers/tools.
+ */
+ reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
+ E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
+
+ if (pcs_autoneg) {
+ /* Set PCS register for autoneg */
+ reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
+ E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
+
+ /* Disable force flow control for autoneg */
+ reg &= ~E1000_PCS_LCTL_FORCE_FCTRL;
+
+ /* Configure flow control advertisement for autoneg */
+ anadv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV);
+ anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE);
+
+ switch (hw->fc.requested_mode) {
+ case e1000_fc_full:
+ case e1000_fc_rx_pause:
+ anadv_reg |= E1000_TXCW_ASM_DIR;
+ anadv_reg |= E1000_TXCW_PAUSE;
+ break;
+ case e1000_fc_tx_pause:
+ anadv_reg |= E1000_TXCW_ASM_DIR;
+ break;
+ default:
+ break;
+ }
+
+ E1000_WRITE_REG(hw, E1000_PCS_ANADV, anadv_reg);
+
+ DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
+ } else {
+ /* Set PCS register for forced link */
+ reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
+
+ /* Force flow control for forced link */
+ reg |= E1000_PCS_LCTL_FORCE_FCTRL;
+
+ DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
+ }
+
+ E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
+
+ if (!pcs_autoneg && !e1000_sgmii_active_82575(hw))
+ e1000_force_mac_fc_generic(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_media_type_82575 - derives current media type.
+ * @hw: pointer to the HW structure
+ *
+ * The media type is chosen reflecting few settings.
+ * The following are taken into account:
+ * - link mode set in the current port Init Control Word #3
+ * - current link mode settings in CSR register
+ * - MDIO vs. I2C PHY control interface chosen
+ * - SFP module media type
+ **/
+static s32 e1000_get_media_type_82575(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+ s32 ret_val = E1000_SUCCESS;
+ u32 ctrl_ext = 0;
+ u32 link_mode = 0;
+
+ /* Set internal phy as default */
+ dev_spec->sgmii_active = false;
+ dev_spec->module_plugged = false;
+
+ /* Get CSR setting */
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+
+ /* extract link mode setting */
+ link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK;
+
+ switch (link_mode) {
+ case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+ break;
+ case E1000_CTRL_EXT_LINK_MODE_GMII:
+ hw->phy.media_type = e1000_media_type_copper;
+ break;
+ case E1000_CTRL_EXT_LINK_MODE_SGMII:
+ /* Get phy control interface type set (MDIO vs. I2C)*/
+ if (e1000_sgmii_uses_mdio_82575(hw)) {
+ hw->phy.media_type = e1000_media_type_copper;
+ dev_spec->sgmii_active = true;
+ break;
+ }
+ /* fall through for I2C based SGMII */
+ case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
+ /* read media type from SFP EEPROM */
+ ret_val = e1000_set_sfp_media_type_82575(hw);
+ if ((ret_val != E1000_SUCCESS) ||
+ (hw->phy.media_type == e1000_media_type_unknown)) {
+ /*
+ * If media type was not identified then return media
+ * type defined by the CTRL_EXT settings.
+ */
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+
+ if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) {
+ hw->phy.media_type = e1000_media_type_copper;
+ dev_spec->sgmii_active = true;
+ }
+
+ break;
+ }
+
+ /* do not change link mode for 100BaseFX */
+ if (dev_spec->eth_flags.e100_base_fx)
+ break;
+
+ /* change current link mode setting */
+ ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
+
+ if (hw->phy.media_type == e1000_media_type_copper)
+ ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII;
+ else
+ ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+
+ break;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_set_sfp_media_type_82575 - derives SFP module media type.
+ * @hw: pointer to the HW structure
+ *
+ * The media type is chosen based on SFP module.
+ * compatibility flags retrieved from SFP ID EEPROM.
+ **/
+static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_ERR_CONFIG;
+ u32 ctrl_ext = 0;
+ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+ struct sfp_e1000_flags *eth_flags = &dev_spec->eth_flags;
+ u8 tranceiver_type = 0;
+ s32 timeout = 3;
+
+ /* Turn I2C interface ON and power on sfp cage */
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA);
+
+ E1000_WRITE_FLUSH(hw);
+
+ /* Read SFP module data */
+ while (timeout) {
+ ret_val = e1000_read_sfp_data_byte(hw,
+ E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET),
+ &tranceiver_type);
+ if (ret_val == E1000_SUCCESS)
+ break;
+ msec_delay(100);
+ timeout--;
+ }
+ if (ret_val != E1000_SUCCESS)
+ goto out;
+
+ ret_val = e1000_read_sfp_data_byte(hw,
+ E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET),
+ (u8 *)eth_flags);
+ if (ret_val != E1000_SUCCESS)
+ goto out;
+
+ /* Check if there is some SFP module plugged and powered */
+ if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) ||
+ (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) {
+ dev_spec->module_plugged = true;
+ if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+ } else if (eth_flags->e100_base_fx) {
+ dev_spec->sgmii_active = true;
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+ } else if (eth_flags->e1000_base_t) {
+ dev_spec->sgmii_active = true;
+ hw->phy.media_type = e1000_media_type_copper;
+ } else {
+ hw->phy.media_type = e1000_media_type_unknown;
+ DEBUGOUT("PHY module has not been recognized\n");
+ goto out;
+ }
+ } else {
+ hw->phy.media_type = e1000_media_type_unknown;
+ }
+ ret_val = E1000_SUCCESS;
+out:
+ /* Restore I2C interface setting */
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ return ret_val;
+}
+
+/**
+ * e1000_valid_led_default_82575 - Verify a valid default LED config
+ * @hw: pointer to the HW structure
+ * @data: pointer to the NVM (EEPROM)
+ *
+ * Read the EEPROM for the current default LED configuration. If the
+ * LED configuration is not valid, set to a valid LED configuration.
+ **/
+static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_valid_led_default_82575");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+
+ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
+ switch (hw->phy.media_type) {
+ case e1000_media_type_internal_serdes:
+ *data = ID_LED_DEFAULT_82575_SERDES;
+ break;
+ case e1000_media_type_copper:
+ default:
+ *data = ID_LED_DEFAULT;
+ break;
+ }
+ }
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_sgmii_active_82575 - Return sgmii state
+ * @hw: pointer to the HW structure
+ *
+ * 82575 silicon has a serialized gigabit media independent interface (sgmii)
+ * which can be enabled for use in the embedded applications. Simply
+ * return the current state of the sgmii interface.
+ **/
+static bool e1000_sgmii_active_82575(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+ return dev_spec->sgmii_active;
+}
+
+/**
+ * e1000_reset_init_script_82575 - Inits HW defaults after reset
+ * @hw: pointer to the HW structure
+ *
+ * Inits recommended HW defaults after a reset when there is no EEPROM
+ * detected. This is only for the 82575.
+ **/
+static s32 e1000_reset_init_script_82575(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_reset_init_script_82575");
+
+ if (hw->mac.type == e1000_82575) {
+ DEBUGOUT("Running reset init script for 82575\n");
+ /* SerDes configuration via SERDESCTRL */
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x00, 0x0C);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x01, 0x78);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x1B, 0x23);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x23, 0x15);
+
+ /* CCM configuration via CCMCTL register */
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x14, 0x00);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x10, 0x00);
+
+ /* PCIe lanes configuration */
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x00, 0xEC);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x61, 0xDF);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x34, 0x05);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x2F, 0x81);
+
+ /* PCIe PLL Configuration */
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x02, 0x47);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x14, 0x00);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x10, 0x00);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_mac_addr_82575 - Read device MAC address
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_read_mac_addr_82575");
+
+ /*
+ * If there's an alternate MAC address place it in RAR0
+ * so that it will override the Si installed default perm
+ * address.
+ */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_read_mac_addr_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_config_collision_dist_82575 - Configure collision distance
+ * @hw: pointer to the HW structure
+ *
+ * Configures the collision distance to the default value and is used
+ * during link setup.
+ **/
+static void e1000_config_collision_dist_82575(struct e1000_hw *hw)
+{
+ u32 tctl_ext;
+
+ DEBUGFUNC("e1000_config_collision_dist_82575");
+
+ tctl_ext = E1000_READ_REG(hw, E1000_TCTL_EXT);
+
+ tctl_ext &= ~E1000_TCTL_EXT_COLD;
+ tctl_ext |= E1000_COLLISION_DISTANCE << E1000_TCTL_EXT_COLD_SHIFT;
+
+ E1000_WRITE_REG(hw, E1000_TCTL_EXT, tctl_ext);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ * e1000_power_down_phy_copper_82575 - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+
+ if (!(phy->ops.check_reset_block))
+ return;
+
+ /* If the management interface is not enabled, then power down */
+ if (!(e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw)))
+ e1000_power_down_phy_copper(hw);
+
+ return;
+}
+
+/**
+ * e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the hardware counters by reading the counter registers.
+ **/
+static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_clear_hw_cntrs_82575");
+
+ e1000_clear_hw_cntrs_base_generic(hw);
+
+ E1000_READ_REG(hw, E1000_PRC64);
+ E1000_READ_REG(hw, E1000_PRC127);
+ E1000_READ_REG(hw, E1000_PRC255);
+ E1000_READ_REG(hw, E1000_PRC511);
+ E1000_READ_REG(hw, E1000_PRC1023);
+ E1000_READ_REG(hw, E1000_PRC1522);
+ E1000_READ_REG(hw, E1000_PTC64);
+ E1000_READ_REG(hw, E1000_PTC127);
+ E1000_READ_REG(hw, E1000_PTC255);
+ E1000_READ_REG(hw, E1000_PTC511);
+ E1000_READ_REG(hw, E1000_PTC1023);
+ E1000_READ_REG(hw, E1000_PTC1522);
+
+ E1000_READ_REG(hw, E1000_ALGNERRC);
+ E1000_READ_REG(hw, E1000_RXERRC);
+ E1000_READ_REG(hw, E1000_TNCRS);
+ E1000_READ_REG(hw, E1000_CEXTERR);
+ E1000_READ_REG(hw, E1000_TSCTC);
+ E1000_READ_REG(hw, E1000_TSCTFC);
+
+ E1000_READ_REG(hw, E1000_MGTPRC);
+ E1000_READ_REG(hw, E1000_MGTPDC);
+ E1000_READ_REG(hw, E1000_MGTPTC);
+
+ E1000_READ_REG(hw, E1000_IAC);
+ E1000_READ_REG(hw, E1000_ICRXOC);
+
+ E1000_READ_REG(hw, E1000_ICRXPTC);
+ E1000_READ_REG(hw, E1000_ICRXATC);
+ E1000_READ_REG(hw, E1000_ICTXPTC);
+ E1000_READ_REG(hw, E1000_ICTXATC);
+ E1000_READ_REG(hw, E1000_ICTXQEC);
+ E1000_READ_REG(hw, E1000_ICTXQMTC);
+ E1000_READ_REG(hw, E1000_ICRXDMTC);
+
+ E1000_READ_REG(hw, E1000_CBTMPC);
+ E1000_READ_REG(hw, E1000_HTDPMC);
+ E1000_READ_REG(hw, E1000_CBRMPC);
+ E1000_READ_REG(hw, E1000_RPTHC);
+ E1000_READ_REG(hw, E1000_HGPTC);
+ E1000_READ_REG(hw, E1000_HTCBDPC);
+ E1000_READ_REG(hw, E1000_HGORCL);
+ E1000_READ_REG(hw, E1000_HGORCH);
+ E1000_READ_REG(hw, E1000_HGOTCL);
+ E1000_READ_REG(hw, E1000_HGOTCH);
+ E1000_READ_REG(hw, E1000_LENERRS);
+
+ /* This register should not be read in copper configurations */
+ if ((hw->phy.media_type == e1000_media_type_internal_serdes) ||
+ e1000_sgmii_active_82575(hw))
+ E1000_READ_REG(hw, E1000_SCVPC);
+}
+
+/**
+ * e1000_rx_fifo_flush_82575 - Clean rx fifo after Rx enable
+ * @hw: pointer to the HW structure
+ *
+ * After rx enable if managability is enabled then there is likely some
+ * bad data at the start of the fifo and possibly in the DMA fifo. This
+ * function clears the fifos and flushes any packets that came in as rx was
+ * being enabled.
+ **/
+void e1000_rx_fifo_flush_82575(struct e1000_hw *hw)
+{
+ u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
+ int i, ms_wait;
+
+ DEBUGFUNC("e1000_rx_fifo_workaround_82575");
+ if (hw->mac.type != e1000_82575 ||
+ !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN))
+ return;
+
+ /* Disable all Rx queues */
+ for (i = 0; i < 4; i++) {
+ rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
+ E1000_WRITE_REG(hw, E1000_RXDCTL(i),
+ rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
+ }
+ /* Poll all queues to verify they have shut down */
+ for (ms_wait = 0; ms_wait < 10; ms_wait++) {
+ msec_delay(1);
+ rx_enabled = 0;
+ for (i = 0; i < 4; i++)
+ rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i));
+ if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
+ break;
+ }
+
+ if (ms_wait == 10)
+ DEBUGOUT("Queue disable timed out after 10ms\n");
+
+ /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
+ * incoming packets are rejected. Set enable and wait 2ms so that
+ * any packet that was coming in as RCTL.EN was set is flushed
+ */
+ rfctl = E1000_READ_REG(hw, E1000_RFCTL);
+ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
+
+ rlpml = E1000_READ_REG(hw, E1000_RLPML);
+ E1000_WRITE_REG(hw, E1000_RLPML, 0);
+
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
+ temp_rctl |= E1000_RCTL_LPE;
+
+ E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl);
+ E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN);
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(2);
+
+ /* Enable Rx queues that were previously enabled and restore our
+ * previous state
+ */
+ for (i = 0; i < 4; i++)
+ E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ E1000_WRITE_FLUSH(hw);
+
+ E1000_WRITE_REG(hw, E1000_RLPML, rlpml);
+ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
+
+ /* Flush receive errors generated by workaround */
+ E1000_READ_REG(hw, E1000_ROC);
+ E1000_READ_REG(hw, E1000_RNBC);
+ E1000_READ_REG(hw, E1000_MPC);
+}
+
+/**
+ * e1000_set_pcie_completion_timeout - set pci-e completion timeout
+ * @hw: pointer to the HW structure
+ *
+ * The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
+ * however the hardware default for these parts is 500us to 1ms which is less
+ * than the 10ms recommended by the pci-e spec. To address this we need to
+ * increase the value to either 10ms to 200ms for capability version 1 config,
+ * or 16ms to 55ms for version 2.
+ **/
+static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw)
+{
+ u32 gcr = E1000_READ_REG(hw, E1000_GCR);
+ s32 ret_val = E1000_SUCCESS;
+ u16 pcie_devctl2;
+
+ /* only take action if timeout value is defaulted to 0 */
+ if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
+ goto out;
+
+ /*
+ * if capababilities version is type 1 we can write the
+ * timeout of 10ms to 200ms through the GCR register
+ */
+ if (!(gcr & E1000_GCR_CAP_VER2)) {
+ gcr |= E1000_GCR_CMPL_TMOUT_10ms;
+ goto out;
+ }
+
+ /*
+ * for version 2 capabilities we need to write the config space
+ * directly in order to set the completion timeout value for
+ * 16ms to 55ms
+ */
+ ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
+ &pcie_devctl2);
+ if (ret_val)
+ goto out;
+
+ pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
+
+ ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
+ &pcie_devctl2);
+out:
+ /* disable completion timeout resend */
+ gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
+
+ E1000_WRITE_REG(hw, E1000_GCR, gcr);
+ return ret_val;
+}
+
+/**
+ * e1000_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
+ * @hw: pointer to the hardware struct
+ * @enable: state to enter, either enabled or disabled
+ * @pf: Physical Function pool - do not set anti-spoofing for the PF
+ *
+ * enables/disables L2 switch anti-spoofing functionality.
+ **/
+void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
+{
+ u32 reg_val, reg_offset;
+
+ switch (hw->mac.type) {
+ case e1000_82576:
+ reg_offset = E1000_DTXSWC;
+ break;
+ case e1000_i350:
+ case e1000_i354:
+ reg_offset = E1000_TXSWC;
+ break;
+ default:
+ return;
+ }
+
+ reg_val = E1000_READ_REG(hw, reg_offset);
+ if (enable) {
+ reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK |
+ E1000_DTXSWC_VLAN_SPOOF_MASK);
+ /* The PF can spoof - it has to in order to
+ * support emulation mode NICs
+ */
+ reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
+ } else {
+ reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
+ E1000_DTXSWC_VLAN_SPOOF_MASK);
+ }
+ E1000_WRITE_REG(hw, reg_offset, reg_val);
+}
+
+/**
+ * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback
+ * @hw: pointer to the hardware struct
+ * @enable: state to enter, either enabled or disabled
+ *
+ * enables/disables L2 switch loopback functionality.
+ **/
+void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
+{
+ u32 dtxswc;
+
+ switch (hw->mac.type) {
+ case e1000_82576:
+ dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
+ if (enable)
+ dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+ else
+ dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+ E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
+ break;
+ case e1000_i350:
+ case e1000_i354:
+ dtxswc = E1000_READ_REG(hw, E1000_TXSWC);
+ if (enable)
+ dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+ else
+ dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+ E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc);
+ break;
+ default:
+ /* Currently no other hardware supports loopback */
+ break;
+ }
+
+
+}
+
+/**
+ * e1000_vmdq_set_replication_pf - enable or disable vmdq replication
+ * @hw: pointer to the hardware struct
+ * @enable: state to enter, either enabled or disabled
+ *
+ * enables/disables replication of packets across multiple pools.
+ **/
+void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
+{
+ u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
+
+ if (enable)
+ vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
+ else
+ vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
+
+ E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
+}
+
+/**
+ * e1000_read_phy_reg_82580 - Read 82580 MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the MDI control register in the PHY at offset and stores the
+ * information read to data.
+ **/
+static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_read_phy_reg_82580");
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_read_phy_reg_mdic(hw, offset, data);
+
+ hw->phy.ops.release(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_write_phy_reg_82580 - Write 82580 MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write to register at offset
+ *
+ * Writes data to MDI control register in the PHY at offset.
+ **/
+static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_write_phy_reg_82580");
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_write_phy_reg_mdic(hw, offset, data);
+
+ hw->phy.ops.release(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
+ * @hw: pointer to the HW structure
+ *
+ * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
+ * the values found in the EEPROM. This addresses an issue in which these
+ * bits are not restored from EEPROM after reset.
+ **/
+static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u32 mdicnfg;
+ u16 nvm_data = 0;
+
+ DEBUGFUNC("e1000_reset_mdicnfg_82580");
+
+ if (hw->mac.type != e1000_82580)
+ goto out;
+ if (!e1000_sgmii_active_82575(hw))
+ goto out;
+
+ ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
+ NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
+ &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+
+ mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG);
+ if (nvm_data & NVM_WORD24_EXT_MDIO)
+ mdicnfg |= E1000_MDICNFG_EXT_MDIO;
+ if (nvm_data & NVM_WORD24_COM_MDIO)
+ mdicnfg |= E1000_MDICNFG_COM_MDIO;
+ E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_reset_hw_82580 - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+ * This resets function or entire device (all ports, etc.)
+ * to a known state.
+ **/
+static s32 e1000_reset_hw_82580(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ /* BH SW mailbox bit in SW_FW_SYNC */
+ u16 swmbsw_mask = E1000_SW_SYNCH_MB;
+ u32 ctrl;
+ bool global_device_reset = hw->dev_spec._82575.global_device_reset;
+
+ DEBUGFUNC("e1000_reset_hw_82580");
+
+ hw->dev_spec._82575.global_device_reset = false;
+
+ /* 82580 does not reliably do global_device_reset due to hw errata */
+ if (hw->mac.type == e1000_82580)
+ global_device_reset = false;
+
+ /* Get current control state. */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ /*
+ * Prevent the PCI-E bus from sticking if there is no TLP connection
+ * on the last TLP read/write transaction when MAC is reset.
+ */
+ ret_val = e1000_disable_pcie_master_generic(hw);
+ if (ret_val)
+ DEBUGOUT("PCI-E Master disable polling has failed.\n");
+
+ DEBUGOUT("Masking off all interrupts\n");
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+ E1000_WRITE_REG(hw, E1000_RCTL, 0);
+ E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+ E1000_WRITE_FLUSH(hw);
+
+ msec_delay(10);
+
+ /* Determine whether or not a global dev reset is requested */
+ if (global_device_reset && hw->mac.ops.acquire_swfw_sync(hw,
+ swmbsw_mask))
+ global_device_reset = false;
+
+ if (global_device_reset && !(E1000_READ_REG(hw, E1000_STATUS) &
+ E1000_STAT_DEV_RST_SET))
+ ctrl |= E1000_CTRL_DEV_RST;
+ else
+ ctrl |= E1000_CTRL_RST;
+
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ E1000_WRITE_FLUSH(hw);
+
+ /* Add delay to insure DEV_RST has time to complete */
+ if (global_device_reset)
+ msec_delay(5);
+
+ ret_val = e1000_get_auto_rd_done_generic(hw);
+ if (ret_val) {
+ /*
+ * When auto config read does not complete, do not
+ * return with an error. This can happen in situations
+ * where there is no eeprom and prevents getting link.
+ */
+ DEBUGOUT("Auto Read Done did not complete\n");
+ }
+
+ /* clear global device reset status bit */
+ E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET);
+
+ /* Clear any pending interrupt events. */
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+ E1000_READ_REG(hw, E1000_ICR);
+
+ ret_val = e1000_reset_mdicnfg_82580(hw);
+ if (ret_val)
+ DEBUGOUT("Could not reset MDICNFG based on EEPROM\n");
+
+ /* Install any alternate MAC address into RAR0 */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+
+ /* Release semaphore */
+ if (global_device_reset)
+ hw->mac.ops.release_swfw_sync(hw, swmbsw_mask);
+
+ return ret_val;
+}
+
+/**
+ * e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual Rx PBA size
+ * @data: data received by reading RXPBS register
+ *
+ * The 82580 uses a table based approach for packet buffer allocation sizes.
+ * This function converts the retrieved value into the correct table value
+ * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
+ * 0x0 36 72 144 1 2 4 8 16
+ * 0x8 35 70 140 rsv rsv rsv rsv rsv
+ */
+u16 e1000_rxpbs_adjust_82580(u32 data)
+{
+ u16 ret_val = 0;
+
+ if (data < E1000_82580_RXPBS_TABLE_SIZE)
+ ret_val = e1000_82580_rxpbs_table[data];
+
+ return ret_val;
+}
+
+/**
+ * e1000_validate_nvm_checksum_with_offset - Validate EEPROM
+ * checksum
+ * @hw: pointer to the HW structure
+ * @offset: offset in words of the checksum protected region
+ *
+ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ * and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ DEBUGFUNC("e1000_validate_nvm_checksum_with_offset");
+
+ for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
+ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+ checksum += nvm_data;
+ }
+
+ if (checksum != (u16) NVM_SUM) {
+ DEBUGOUT("NVM Checksum Invalid\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_update_nvm_checksum_with_offset - Update EEPROM
+ * checksum
+ * @hw: pointer to the HW structure
+ * @offset: offset in words of the checksum protected region
+ *
+ * Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ * up to the checksum. Then calculates the EEPROM checksum and writes the
+ * value to the EEPROM.
+ **/
+s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
+{
+ s32 ret_val;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ DEBUGFUNC("e1000_update_nvm_checksum_with_offset");
+
+ for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
+ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error while updating checksum.\n");
+ goto out;
+ }
+ checksum += nvm_data;
+ }
+ checksum = (u16) NVM_SUM - checksum;
+ ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
+ &checksum);
+ if (ret_val)
+ DEBUGOUT("NVM Write Error while updating checksum.\n");
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_validate_nvm_checksum_82580 - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM section checksum by reading/adding each word of
+ * the EEPROM and then verifies that the sum of the EEPROM is
+ * equal to 0xBABA.
+ **/
+static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 eeprom_regions_count = 1;
+ u16 j, nvm_data;
+ u16 nvm_offset;
+
+ DEBUGFUNC("e1000_validate_nvm_checksum_82580");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+
+ if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
+ /* if chekcsums compatibility bit is set validate checksums
+ * for all 4 ports. */
+ eeprom_regions_count = 4;
+ }
+
+ for (j = 0; j < eeprom_regions_count; j++) {
+ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+ ret_val = e1000_validate_nvm_checksum_with_offset(hw,
+ nvm_offset);
+ if (ret_val != E1000_SUCCESS)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_update_nvm_checksum_82580 - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM section checksums for all 4 ports by reading/adding
+ * each word of the EEPROM up to the checksum. Then calculates the EEPROM
+ * checksum and writes the value to the EEPROM.
+ **/
+static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 j, nvm_data;
+ u16 nvm_offset;
+
+ DEBUGFUNC("e1000_update_nvm_checksum_82580");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error while updating checksum compatibility bit.\n");
+ goto out;
+ }
+
+ if (!(nvm_data & NVM_COMPATIBILITY_BIT_MASK)) {
+ /* set compatibility bit to validate checksums appropriately */
+ nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
+ ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
+ &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Write Error while updating checksum compatibility bit.\n");
+ goto out;
+ }
+ }
+
+ for (j = 0; j < 4; j++) {
+ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+ ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset);
+ if (ret_val)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_validate_nvm_checksum_i350 - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM section checksum by reading/adding each word of
+ * the EEPROM and then verifies that the sum of the EEPROM is
+ * equal to 0xBABA.
+ **/
+static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 j;
+ u16 nvm_offset;
+
+ DEBUGFUNC("e1000_validate_nvm_checksum_i350");
+
+ for (j = 0; j < 4; j++) {
+ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+ ret_val = e1000_validate_nvm_checksum_with_offset(hw,
+ nvm_offset);
+ if (ret_val != E1000_SUCCESS)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_update_nvm_checksum_i350 - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM section checksums for all 4 ports by reading/adding
+ * each word of the EEPROM up to the checksum. Then calculates the EEPROM
+ * checksum and writes the value to the EEPROM.
+ **/
+static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 j;
+ u16 nvm_offset;
+
+ DEBUGFUNC("e1000_update_nvm_checksum_i350");
+
+ for (j = 0; j < 4; j++) {
+ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+ ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset);
+ if (ret_val != E1000_SUCCESS)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * __e1000_access_emi_reg - Read/write EMI register
+ * @hw: pointer to the HW structure
+ * @addr: EMI address to program
+ * @data: pointer to value to read/write from/to the EMI address
+ * @read: boolean flag to indicate read or write
+ **/
+static s32 __e1000_access_emi_reg(struct e1000_hw *hw, u16 address,
+ u16 *data, bool read)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("__e1000_access_emi_reg");
+
+ ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
+ if (ret_val)
+ return ret_val;
+
+ if (read)
+ ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data);
+ else
+ ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_emi_reg - Read Extended Management Interface register
+ * @hw: pointer to the HW structure
+ * @addr: EMI address to program
+ * @data: value to be read from the EMI address
+ **/
+s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
+{
+ DEBUGFUNC("e1000_read_emi_reg");
+
+ return __e1000_access_emi_reg(hw, addr, data, true);
+}
+
+/**
+ * e1000_set_eee_i350 - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ *
+ * Enable/disable EEE based on setting in dev_spec structure.
+ *
+ **/
+s32 e1000_set_eee_i350(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u32 ipcnfg, eeer;
+
+ DEBUGFUNC("e1000_set_eee_i350");
+
+ if ((hw->mac.type < e1000_i350) ||
+ (hw->phy.media_type != e1000_media_type_copper))
+ goto out;
+ ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG);
+ eeer = E1000_READ_REG(hw, E1000_EEER);
+
+ /* enable or disable per user setting */
+ if (!(hw->dev_spec._82575.eee_disable)) {
+ u32 eee_su = E1000_READ_REG(hw, E1000_EEE_SU);
+
+ ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
+ eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
+ E1000_EEER_LPI_FC);
+
+ /* This bit should not be set in normal operation. */
+ if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
+ DEBUGOUT("LPI Clock Stop Bit should not be set!\n");
+ } else {
+ ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
+ eeer &= ~(E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
+ E1000_EEER_LPI_FC);
+ }
+ E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg);
+ E1000_WRITE_REG(hw, E1000_EEER, eeer);
+ E1000_READ_REG(hw, E1000_IPCNFG);
+ E1000_READ_REG(hw, E1000_EEER);
+out:
+
+ return ret_val;
+}
+
+/**
+ * e1000_set_eee_i354 - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ *
+ * Enable/disable EEE legacy mode based on setting in dev_spec structure.
+ *
+ **/
+s32 e1000_set_eee_i354(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_set_eee_i354");
+
+ if ((hw->phy.media_type != e1000_media_type_copper) ||
+ ((phy->id != M88E1543_E_PHY_ID)))
+ goto out;
+
+ if (!hw->dev_spec._82575.eee_disable) {
+ /* Switch to PHY page 18. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1,
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data |= E1000_M88E1543_EEE_CTRL_1_MS;
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1,
+ phy_data);
+ if (ret_val)
+ goto out;
+
+ /* Return the PHY to page 0. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
+ if (ret_val)
+ goto out;
+
+ /* Turn on EEE advertisement. */
+ ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+ E1000_EEE_ADV_DEV_I354,
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data |= E1000_EEE_ADV_100_SUPPORTED |
+ E1000_EEE_ADV_1000_SUPPORTED;
+ ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+ E1000_EEE_ADV_DEV_I354,
+ phy_data);
+ } else {
+ /* Turn off EEE advertisement. */
+ ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+ E1000_EEE_ADV_DEV_I354,
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED |
+ E1000_EEE_ADV_1000_SUPPORTED);
+ ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+ E1000_EEE_ADV_DEV_I354,
+ phy_data);
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_get_eee_status_i354 - Get EEE status
+ * @hw: pointer to the HW structure
+ * @status: EEE status
+ *
+ * Get EEE status by guessing based on whether Tx or Rx LPI indications have
+ * been received.
+ **/
+s32 e1000_get_eee_status_i354(struct e1000_hw *hw, bool *status)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_get_eee_status_i354");
+
+ /* Check if EEE is supported on this device. */
+ if ((hw->phy.media_type != e1000_media_type_copper) ||
+ ((phy->id != M88E1543_E_PHY_ID)))
+ goto out;
+
+ ret_val = e1000_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
+ E1000_PCS_STATUS_DEV_I354,
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD |
+ E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false;
+
+out:
+ return ret_val;
+}
+
+/* Due to a hw errata, if the host tries to configure the VFTA register
+ * while performing queries from the BMC or DMA, then the VFTA in some
+ * cases won't be written.
+ */
+
+/**
+ * e1000_clear_vfta_i350 - Clear VLAN filter table
+ * @hw: pointer to the HW structure
+ *
+ * Clears the register array which contains the VLAN filter table by
+ * setting all the values to 0.
+ **/
+void e1000_clear_vfta_i350(struct e1000_hw *hw)
+{
+ u32 offset;
+ int i;
+
+ DEBUGFUNC("e1000_clear_vfta_350");
+
+ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+ for (i = 0; i < 10; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
+
+ E1000_WRITE_FLUSH(hw);
+ }
+}
+
+/**
+ * e1000_write_vfta_i350 - Write value to VLAN filter table
+ * @hw: pointer to the HW structure
+ * @offset: register offset in VLAN filter table
+ * @value: register value written to VLAN filter table
+ *
+ * Writes value at the given offset in the register array which stores
+ * the VLAN filter table.
+ **/
+void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
+{
+ int i;
+
+ DEBUGFUNC("e1000_write_vfta_350");
+
+ for (i = 0; i < 10; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
+
+ E1000_WRITE_FLUSH(hw);
+}
+
+
+/**
+ * e1000_set_i2c_bb - Enable I2C bit-bang
+ * @hw: pointer to the HW structure
+ *
+ * Enable I2C bit-bang interface
+ *
+ **/
+s32 e1000_set_i2c_bb(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u32 ctrl_ext, i2cparams;
+
+ DEBUGFUNC("e1000_set_i2c_bb");
+
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_I2C_ENA;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH(hw);
+
+ i2cparams = E1000_READ_REG(hw, E1000_I2CPARAMS);
+ i2cparams |= E1000_I2CBB_EN;
+ i2cparams |= E1000_I2C_DATA_OE_N;
+ i2cparams |= E1000_I2C_CLK_OE_N;
+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cparams);
+ E1000_WRITE_FLUSH(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_i2c_byte_generic - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @dev_addr: device address
+ * @data: value read
+ *
+ * Performs byte read operation over I2C interface at
+ * a specified device address.
+ **/
+s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ s32 status = E1000_SUCCESS;
+ u32 max_retry = 10;
+ u32 retry = 1;
+ u16 swfw_mask = 0;
+
+ bool nack = true;
+
+ DEBUGFUNC("e1000_read_i2c_byte_generic");
+
+ swfw_mask = E1000_SWFW_PHY0_SM;
+
+ do {
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)
+ != E1000_SUCCESS) {
+ status = E1000_ERR_SWFW_SYNC;
+ goto read_byte_out;
+ }
+
+ e1000_i2c_start(hw);
+
+ /* Device Address and write indication */
+ status = e1000_clock_out_i2c_byte(hw, dev_addr);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_get_i2c_ack(hw);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_clock_out_i2c_byte(hw, byte_offset);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_get_i2c_ack(hw);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ e1000_i2c_start(hw);
+
+ /* Device Address and read indication */
+ status = e1000_clock_out_i2c_byte(hw, (dev_addr | 0x1));
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_get_i2c_ack(hw);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_clock_in_i2c_byte(hw, data);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_clock_out_i2c_bit(hw, nack);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ e1000_i2c_stop(hw);
+ break;
+
+fail:
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ msec_delay(100);
+ e1000_i2c_bus_clear(hw);
+ retry++;
+ if (retry < max_retry)
+ DEBUGOUT("I2C byte read error - Retrying.\n");
+ else
+ DEBUGOUT("I2C byte read error.\n");
+
+ } while (retry < max_retry);
+
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+read_byte_out:
+
+ return status;
+}
+
+/**
+ * e1000_write_i2c_byte_generic - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @dev_addr: device address
+ * @data: value to write
+ *
+ * Performs byte write operation over I2C interface at
+ * a specified device address.
+ **/
+s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ s32 status = E1000_SUCCESS;
+ u32 max_retry = 1;
+ u32 retry = 0;
+ u16 swfw_mask = 0;
+
+ DEBUGFUNC("e1000_write_i2c_byte_generic");
+
+ swfw_mask = E1000_SWFW_PHY0_SM;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS) {
+ status = E1000_ERR_SWFW_SYNC;
+ goto write_byte_out;
+ }
+
+ do {
+ e1000_i2c_start(hw);
+
+ status = e1000_clock_out_i2c_byte(hw, dev_addr);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_get_i2c_ack(hw);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_clock_out_i2c_byte(hw, byte_offset);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_get_i2c_ack(hw);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_clock_out_i2c_byte(hw, data);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_get_i2c_ack(hw);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ e1000_i2c_stop(hw);
+ break;
+
+fail:
+ e1000_i2c_bus_clear(hw);
+ retry++;
+ if (retry < max_retry)
+ DEBUGOUT("I2C byte write error - Retrying.\n");
+ else
+ DEBUGOUT("I2C byte write error.\n");
+ } while (retry < max_retry);
+
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+write_byte_out:
+
+ return status;
+}
+
+/**
+ * e1000_i2c_start - Sets I2C start condition
+ * @hw: pointer to hardware structure
+ *
+ * Sets I2C start condition (High -> Low on SDA while SCL is High)
+ **/
+static void e1000_i2c_start(struct e1000_hw *hw)
+{
+ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ DEBUGFUNC("e1000_i2c_start");
+
+ /* Start condition must begin with data and clock high */
+ e1000_set_i2c_data(hw, &i2cctl, 1);
+ e1000_raise_i2c_clk(hw, &i2cctl);
+
+ /* Setup time for start condition (4.7us) */
+ usec_delay(E1000_I2C_T_SU_STA);
+
+ e1000_set_i2c_data(hw, &i2cctl, 0);
+
+ /* Hold time for start condition (4us) */
+ usec_delay(E1000_I2C_T_HD_STA);
+
+ e1000_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us */
+ usec_delay(E1000_I2C_T_LOW);
+
+}
+
+/**
+ * e1000_i2c_stop - Sets I2C stop condition
+ * @hw: pointer to hardware structure
+ *
+ * Sets I2C stop condition (Low -> High on SDA while SCL is High)
+ **/
+static void e1000_i2c_stop(struct e1000_hw *hw)
+{
+ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ DEBUGFUNC("e1000_i2c_stop");
+
+ /* Stop condition must begin with data low and clock high */
+ e1000_set_i2c_data(hw, &i2cctl, 0);
+ e1000_raise_i2c_clk(hw, &i2cctl);
+
+ /* Setup time for stop condition (4us) */
+ usec_delay(E1000_I2C_T_SU_STO);
+
+ e1000_set_i2c_data(hw, &i2cctl, 1);
+
+ /* bus free time between stop and start (4.7us)*/
+ usec_delay(E1000_I2C_T_BUF);
+}
+
+/**
+ * e1000_clock_in_i2c_byte - Clocks in one byte via I2C
+ * @hw: pointer to hardware structure
+ * @data: data byte to clock in
+ *
+ * Clocks in one byte data via I2C data/clock
+ **/
+static s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data)
+{
+ s32 i;
+ bool bit = 0;
+
+ DEBUGFUNC("e1000_clock_in_i2c_byte");
+
+ *data = 0;
+ for (i = 7; i >= 0; i--) {
+ e1000_clock_in_i2c_bit(hw, &bit);
+ *data |= bit << i;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_clock_out_i2c_byte - Clocks out one byte via I2C
+ * @hw: pointer to hardware structure
+ * @data: data byte clocked out
+ *
+ * Clocks out one byte data via I2C data/clock
+ **/
+static s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data)
+{
+ s32 status = E1000_SUCCESS;
+ s32 i;
+ u32 i2cctl;
+ bool bit = 0;
+
+ DEBUGFUNC("e1000_clock_out_i2c_byte");
+
+ for (i = 7; i >= 0; i--) {
+ bit = (data >> i) & 0x1;
+ status = e1000_clock_out_i2c_bit(hw, bit);
+
+ if (status != E1000_SUCCESS)
+ break;
+ }
+
+ /* Release SDA line (set high) */
+ i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ i2cctl |= E1000_I2C_DATA_OE_N;
+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl);
+ E1000_WRITE_FLUSH(hw);
+
+ return status;
+}
+
+/**
+ * e1000_get_i2c_ack - Polls for I2C ACK
+ * @hw: pointer to hardware structure
+ *
+ * Clocks in/out one bit via I2C data/clock
+ **/
+static s32 e1000_get_i2c_ack(struct e1000_hw *hw)
+{
+ s32 status = E1000_SUCCESS;
+ u32 i = 0;
+ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+ u32 timeout = 10;
+ bool ack = true;
+
+ DEBUGFUNC("e1000_get_i2c_ack");
+
+ e1000_raise_i2c_clk(hw, &i2cctl);
+
+ /* Minimum high period of clock is 4us */
+ usec_delay(E1000_I2C_T_HIGH);
+
+ /* Wait until SCL returns high */
+ for (i = 0; i < timeout; i++) {
+ usec_delay(1);
+ i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+ if (i2cctl & E1000_I2C_CLK_IN)
+ break;
+ }
+ if (!(i2cctl & E1000_I2C_CLK_IN))
+ return E1000_ERR_I2C;
+
+ ack = e1000_get_i2c_data(&i2cctl);
+ if (ack) {
+ DEBUGOUT("I2C ack was not received.\n");
+ status = E1000_ERR_I2C;
+ }
+
+ e1000_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us */
+ usec_delay(E1000_I2C_T_LOW);
+
+ return status;
+}
+
+/**
+ * e1000_clock_in_i2c_bit - Clocks in one bit via I2C data/clock
+ * @hw: pointer to hardware structure
+ * @data: read data value
+ *
+ * Clocks in one bit via I2C data/clock
+ **/
+static s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data)
+{
+ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ DEBUGFUNC("e1000_clock_in_i2c_bit");
+
+ e1000_raise_i2c_clk(hw, &i2cctl);
+
+ /* Minimum high period of clock is 4us */
+ usec_delay(E1000_I2C_T_HIGH);
+
+ i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+ *data = e1000_get_i2c_data(&i2cctl);
+
+ e1000_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us */
+ usec_delay(E1000_I2C_T_LOW);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock
+ * @hw: pointer to hardware structure
+ * @data: data value to write
+ *
+ * Clocks out one bit via I2C data/clock
+ **/
+static s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data)
+{
+ s32 status;
+ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ DEBUGFUNC("e1000_clock_out_i2c_bit");
+
+ status = e1000_set_i2c_data(hw, &i2cctl, data);
+ if (status == E1000_SUCCESS) {
+ e1000_raise_i2c_clk(hw, &i2cctl);
+
+ /* Minimum high period of clock is 4us */
+ usec_delay(E1000_I2C_T_HIGH);
+
+ e1000_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us.
+ * This also takes care of the data hold time.
+ */
+ usec_delay(E1000_I2C_T_LOW);
+ } else {
+ status = E1000_ERR_I2C;
+ DEBUGOUT1("I2C data was not set to %X\n", data);
+ }
+
+ return status;
+}
+/**
+ * e1000_raise_i2c_clk - Raises the I2C SCL clock
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Raises the I2C clock line '0'->'1'
+ **/
+static void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl)
+{
+ DEBUGFUNC("e1000_raise_i2c_clk");
+
+ *i2cctl |= E1000_I2C_CLK_OUT;
+ *i2cctl &= ~E1000_I2C_CLK_OE_N;
+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl);
+ E1000_WRITE_FLUSH(hw);
+
+ /* SCL rise time (1000ns) */
+ usec_delay(E1000_I2C_T_RISE);
+}
+
+/**
+ * e1000_lower_i2c_clk - Lowers the I2C SCL clock
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Lowers the I2C clock line '1'->'0'
+ **/
+static void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl)
+{
+
+ DEBUGFUNC("e1000_lower_i2c_clk");
+
+ *i2cctl &= ~E1000_I2C_CLK_OUT;
+ *i2cctl &= ~E1000_I2C_CLK_OE_N;
+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl);
+ E1000_WRITE_FLUSH(hw);
+
+ /* SCL fall time (300ns) */
+ usec_delay(E1000_I2C_T_FALL);
+}
+
+/**
+ * e1000_set_i2c_data - Sets the I2C data bit
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ * @data: I2C data value (0 or 1) to set
+ *
+ * Sets the I2C data bit
+ **/
+static s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data)
+{
+ s32 status = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_set_i2c_data");
+
+ if (data)
+ *i2cctl |= E1000_I2C_DATA_OUT;
+ else
+ *i2cctl &= ~E1000_I2C_DATA_OUT;
+
+ *i2cctl &= ~E1000_I2C_DATA_OE_N;
+ *i2cctl |= E1000_I2C_CLK_OE_N;
+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl);
+ E1000_WRITE_FLUSH(hw);
+
+ /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
+ usec_delay(E1000_I2C_T_RISE + E1000_I2C_T_FALL + E1000_I2C_T_SU_DATA);
+
+ *i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+ if (data != e1000_get_i2c_data(i2cctl)) {
+ status = E1000_ERR_I2C;
+ DEBUGOUT1("Error - I2C data was not set to %X.\n", data);
+ }
+
+ return status;
+}
+
+/**
+ * e1000_get_i2c_data - Reads the I2C SDA data bit
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Returns the I2C data bit value
+ **/
+static bool e1000_get_i2c_data(u32 *i2cctl)
+{
+ bool data;
+
+ DEBUGFUNC("e1000_get_i2c_data");
+
+ if (*i2cctl & E1000_I2C_DATA_IN)
+ data = 1;
+ else
+ data = 0;
+
+ return data;
+}
+
+/**
+ * e1000_i2c_bus_clear - Clears the I2C bus
+ * @hw: pointer to hardware structure
+ *
+ * Clears the I2C bus by sending nine clock pulses.
+ * Used when data line is stuck low.
+ **/
+void e1000_i2c_bus_clear(struct e1000_hw *hw)
+{
+ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+ u32 i;
+
+ DEBUGFUNC("e1000_i2c_bus_clear");
+
+ e1000_i2c_start(hw);
+
+ e1000_set_i2c_data(hw, &i2cctl, 1);
+
+ for (i = 0; i < 9; i++) {
+ e1000_raise_i2c_clk(hw, &i2cctl);
+
+ /* Min high period of clock is 4us */
+ usec_delay(E1000_I2C_T_HIGH);
+
+ e1000_lower_i2c_clk(hw, &i2cctl);
+
+ /* Min low period of clock is 4.7us*/
+ usec_delay(E1000_I2C_T_LOW);
+ }
+
+ e1000_i2c_start(hw);
+
+ /* Put the i2c bus back to default state */
+ e1000_i2c_stop(hw);
+}
+
+static const u8 e1000_emc_temp_data[4] = {
+ E1000_EMC_INTERNAL_DATA,
+ E1000_EMC_DIODE1_DATA,
+ E1000_EMC_DIODE2_DATA,
+ E1000_EMC_DIODE3_DATA
+};
+static const u8 e1000_emc_therm_limit[4] = {
+ E1000_EMC_INTERNAL_THERM_LIMIT,
+ E1000_EMC_DIODE1_THERM_LIMIT,
+ E1000_EMC_DIODE2_THERM_LIMIT,
+ E1000_EMC_DIODE3_THERM_LIMIT
+};
+
+/**
+ * e1000_get_thermal_sensor_data_generic - Gathers thermal sensor data
+ * @hw: pointer to hardware structure
+ *
+ * Updates the temperatures in mac.thermal_sensor_data
+ **/
+s32 e1000_get_thermal_sensor_data_generic(struct e1000_hw *hw)
+{
+ s32 status = E1000_SUCCESS;
+ u16 ets_offset;
+ u16 ets_cfg;
+ u16 ets_sensor;
+ u8 num_sensors;
+ u8 sensor_index;
+ u8 sensor_location;
+ u8 i;
+ struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+
+ DEBUGFUNC("e1000_get_thermal_sensor_data_generic");
+
+ if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
+ return E1000_NOT_IMPLEMENTED;
+
+ data->sensor[0].temp = (E1000_READ_REG(hw, E1000_THMJT) & 0xFF);
+
+ /* Return the internal sensor only if ETS is unsupported */
+ e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_offset);
+ if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
+ return status;
+
+ e1000_read_nvm(hw, ets_offset, 1, &ets_cfg);
+ if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
+ != NVM_ETS_TYPE_EMC)
+ return E1000_NOT_IMPLEMENTED;
+
+ num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
+ if (num_sensors > E1000_MAX_SENSORS)
+ num_sensors = E1000_MAX_SENSORS;
+
+ for (i = 1; i < num_sensors; i++) {
+ e1000_read_nvm(hw, (ets_offset + i), 1, &ets_sensor);
+ sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
+ NVM_ETS_DATA_INDEX_SHIFT);
+ sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
+ NVM_ETS_DATA_LOC_SHIFT);
+
+ if (sensor_location != 0)
+ hw->phy.ops.read_i2c_byte(hw,
+ e1000_emc_temp_data[sensor_index],
+ E1000_I2C_THERMAL_SENSOR_ADDR,
+ &data->sensor[i].temp);
+ }
+ return status;
+}
+
+/**
+ * e1000_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds
+ * @hw: pointer to hardware structure
+ *
+ * Sets the thermal sensor thresholds according to the NVM map
+ * and save off the threshold and location values into mac.thermal_sensor_data
+ **/
+s32 e1000_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
+{
+ s32 status = E1000_SUCCESS;
+ u16 ets_offset;
+ u16 ets_cfg;
+ u16 ets_sensor;
+ u8 low_thresh_delta;
+ u8 num_sensors;
+ u8 sensor_index;
+ u8 sensor_location;
+ u8 therm_limit;
+ u8 i;
+ struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+
+ DEBUGFUNC("e1000_init_thermal_sensor_thresh_generic");
+
+ if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
+ return E1000_NOT_IMPLEMENTED;
+
+ memset(data, 0, sizeof(struct e1000_thermal_sensor_data));
+
+ data->sensor[0].location = 0x1;
+ data->sensor[0].caution_thresh =
+ (E1000_READ_REG(hw, E1000_THHIGHTC) & 0xFF);
+ data->sensor[0].max_op_thresh =
+ (E1000_READ_REG(hw, E1000_THLOWTC) & 0xFF);
+
+ /* Return the internal sensor only if ETS is unsupported */
+ e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_offset);
+ if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
+ return status;
+
+ e1000_read_nvm(hw, ets_offset, 1, &ets_cfg);
+ if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
+ != NVM_ETS_TYPE_EMC)
+ return E1000_NOT_IMPLEMENTED;
+
+ low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >>
+ NVM_ETS_LTHRES_DELTA_SHIFT);
+ num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
+
+ for (i = 1; i <= num_sensors; i++) {
+ e1000_read_nvm(hw, (ets_offset + i), 1, &ets_sensor);
+ sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
+ NVM_ETS_DATA_INDEX_SHIFT);
+ sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
+ NVM_ETS_DATA_LOC_SHIFT);
+ therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK;
+
+ hw->phy.ops.write_i2c_byte(hw,
+ e1000_emc_therm_limit[sensor_index],
+ E1000_I2C_THERMAL_SENSOR_ADDR,
+ therm_limit);
+
+ if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) {
+ data->sensor[i].location = sensor_location;
+ data->sensor[i].caution_thresh = therm_limit;
+ data->sensor[i].max_op_thresh = therm_limit -
+ low_thresh_delta;
+ }
+ }
+ return status;
+}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.h
new file mode 100755
index 00000000..1aec75ab
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.h
@@ -0,0 +1,509 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_82575_H_
+#define _E1000_82575_H_
+
+#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
+ (ID_LED_DEF1_DEF2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_OFF1_ON2))
+/*
+ * Receive Address Register Count
+ * Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor.
+ * These entries are also used for MAC-based filtering.
+ */
+/*
+ * For 82576, there are an additional set of RARs that begin at an offset
+ * separate from the first set of RARs.
+ */
+#define E1000_RAR_ENTRIES_82575 16
+#define E1000_RAR_ENTRIES_82576 24
+#define E1000_RAR_ENTRIES_82580 24
+#define E1000_RAR_ENTRIES_I350 32
+#define E1000_SW_SYNCH_MB 0x00000100
+#define E1000_STAT_DEV_RST_SET 0x00100000
+#define E1000_CTRL_DEV_RST 0x20000000
+
+struct e1000_adv_data_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ union {
+ u32 data;
+ struct {
+ u32 datalen:16; /* Data buffer length */
+ u32 rsvd:4;
+ u32 dtyp:4; /* Descriptor type */
+ u32 dcmd:8; /* Descriptor command */
+ } config;
+ } lower;
+ union {
+ u32 data;
+ struct {
+ u32 status:4; /* Descriptor status */
+ u32 idx:4;
+ u32 popts:6; /* Packet Options */
+ u32 paylen:18; /* Payload length */
+ } options;
+ } upper;
+};
+
+#define E1000_TXD_DTYP_ADV_C 0x2 /* Advanced Context Descriptor */
+#define E1000_TXD_DTYP_ADV_D 0x3 /* Advanced Data Descriptor */
+#define E1000_ADV_TXD_CMD_DEXT 0x20 /* Descriptor extension (0 = legacy) */
+#define E1000_ADV_TUCMD_IPV4 0x2 /* IP Packet Type: 1=IPv4 */
+#define E1000_ADV_TUCMD_IPV6 0x0 /* IP Packet Type: 0=IPv6 */
+#define E1000_ADV_TUCMD_L4T_UDP 0x0 /* L4 Packet TYPE of UDP */
+#define E1000_ADV_TUCMD_L4T_TCP 0x4 /* L4 Packet TYPE of TCP */
+#define E1000_ADV_TUCMD_MKRREQ 0x10 /* Indicates markers are required */
+#define E1000_ADV_DCMD_EOP 0x1 /* End of Packet */
+#define E1000_ADV_DCMD_IFCS 0x2 /* Insert FCS (Ethernet CRC) */
+#define E1000_ADV_DCMD_RS 0x8 /* Report Status */
+#define E1000_ADV_DCMD_VLE 0x40 /* Add VLAN tag */
+#define E1000_ADV_DCMD_TSE 0x80 /* TCP Seg enable */
+/* Extended Device Control */
+#define E1000_CTRL_EXT_NSICR 0x00000001 /* Disable Intr Clear all on read */
+
+struct e1000_adv_context_desc {
+ union {
+ u32 ip_config;
+ struct {
+ u32 iplen:9;
+ u32 maclen:7;
+ u32 vlan_tag:16;
+ } fields;
+ } ip_setup;
+ u32 seq_num;
+ union {
+ u64 l4_config;
+ struct {
+ u32 mkrloc:9;
+ u32 tucmd:11;
+ u32 dtyp:4;
+ u32 adv:8;
+ u32 rsvd:4;
+ u32 idx:4;
+ u32 l4len:8;
+ u32 mss:16;
+ } fields;
+ } l4_setup;
+};
+
+/* SRRCTL bit definitions */
+#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
+#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00
+#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
+#define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000
+#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000
+#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000
+#define E1000_SRRCTL_TIMESTAMP 0x40000000
+#define E1000_SRRCTL_DROP_EN 0x80000000
+
+#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F
+#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00
+
+#define E1000_TX_HEAD_WB_ENABLE 0x1
+#define E1000_TX_SEQNUM_WB_ENABLE 0x2
+
+#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002
+#define E1000_MRQC_ENABLE_VMDQ 0x00000003
+#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005
+#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
+#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
+#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
+#define E1000_MRQC_ENABLE_RSS_8Q 0x00000002
+
+#define E1000_VMRCTL_MIRROR_PORT_SHIFT 8
+#define E1000_VMRCTL_MIRROR_DSTPORT_MASK (7 << \
+ E1000_VMRCTL_MIRROR_PORT_SHIFT)
+#define E1000_VMRCTL_POOL_MIRROR_ENABLE (1 << 0)
+#define E1000_VMRCTL_UPLINK_MIRROR_ENABLE (1 << 1)
+#define E1000_VMRCTL_DOWNLINK_MIRROR_ENABLE (1 << 2)
+
+#define E1000_EICR_TX_QUEUE ( \
+ E1000_EICR_TX_QUEUE0 | \
+ E1000_EICR_TX_QUEUE1 | \
+ E1000_EICR_TX_QUEUE2 | \
+ E1000_EICR_TX_QUEUE3)
+
+#define E1000_EICR_RX_QUEUE ( \
+ E1000_EICR_RX_QUEUE0 | \
+ E1000_EICR_RX_QUEUE1 | \
+ E1000_EICR_RX_QUEUE2 | \
+ E1000_EICR_RX_QUEUE3)
+
+#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE
+#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE
+
+#define EIMS_ENABLE_MASK ( \
+ E1000_EIMS_RX_QUEUE | \
+ E1000_EIMS_TX_QUEUE | \
+ E1000_EIMS_TCP_TIMER | \
+ E1000_EIMS_OTHER)
+
+/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
+#define E1000_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
+#define E1000_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */
+#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
+#define E1000_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */
+#define E1000_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */
+#define E1000_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */
+#define E1000_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */
+#define E1000_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */
+#define E1000_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */
+#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */
+
+/* Receive Descriptor - Advanced */
+union e1000_adv_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ union {
+ __le32 data;
+ struct {
+ __le16 pkt_info; /*RSS type, Pkt type*/
+ /* Split Header, header buffer len */
+ __le16 hdr_info;
+ } hs_rss;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length; /* Packet length */
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+#define E1000_RXDADV_RSSTYPE_MASK 0x0000000F
+#define E1000_RXDADV_RSSTYPE_SHIFT 12
+#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
+#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
+#define E1000_RXDADV_SPLITHEADER_EN 0x00001000
+#define E1000_RXDADV_SPH 0x8000
+#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
+#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
+#define E1000_RXDADV_ERR_HBO 0x00800000
+
+/* RSS Hash results */
+#define E1000_RXDADV_RSSTYPE_NONE 0x00000000
+#define E1000_RXDADV_RSSTYPE_IPV4_TCP 0x00000001
+#define E1000_RXDADV_RSSTYPE_IPV4 0x00000002
+#define E1000_RXDADV_RSSTYPE_IPV6_TCP 0x00000003
+#define E1000_RXDADV_RSSTYPE_IPV6_EX 0x00000004
+#define E1000_RXDADV_RSSTYPE_IPV6 0x00000005
+#define E1000_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
+#define E1000_RXDADV_RSSTYPE_IPV4_UDP 0x00000007
+#define E1000_RXDADV_RSSTYPE_IPV6_UDP 0x00000008
+#define E1000_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
+
+/* RSS Packet Types as indicated in the receive descriptor */
+#define E1000_RXDADV_PKTTYPE_NONE 0x00000000
+#define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */
+#define E1000_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPV4 hdr + extensions */
+#define E1000_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPV6 hdr present */
+#define E1000_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPV6 hdr + extensions */
+#define E1000_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */
+#define E1000_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
+#define E1000_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
+#define E1000_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
+
+#define E1000_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */
+#define E1000_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */
+#define E1000_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */
+#define E1000_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */
+#define E1000_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */
+#define E1000_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */
+
+/* LinkSec results */
+/* Security Processing bit Indication */
+#define E1000_RXDADV_LNKSEC_STATUS_SECP 0x00020000
+#define E1000_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000
+#define E1000_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000
+#define E1000_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000
+#define E1000_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000
+
+#define E1000_RXDADV_IPSEC_STATUS_SECP 0x00020000
+#define E1000_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000
+#define E1000_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000
+#define E1000_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000
+#define E1000_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED 0x18000000
+
+/* Transmit Descriptor - Advanced */
+union e1000_adv_tx_desc {
+ struct {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le32 cmd_type_len;
+ __le32 olinfo_status;
+ } read;
+ struct {
+ __le64 rsvd; /* Reserved */
+ __le32 nxtseq_seed;
+ __le32 status;
+ } wb;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
+#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
+#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
+#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */
+#define E1000_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */
+#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
+#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
+#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+#define E1000_ADVTXD_MAC_LINKSEC 0x00040000 /* Apply LinkSec on pkt */
+#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp pkt */
+#define E1000_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED prsnt in WB */
+#define E1000_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
+#define E1000_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
+#define E1000_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
+#define E1000_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
+/* 1st & Last TSO-full iSCSI PDU*/
+#define E1000_ADVTXD_POPTS_ISCO_FULL 0x00001800
+#define E1000_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
+#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
+
+/* Context descriptors */
+struct e1000_adv_tx_context_desc {
+ __le32 vlan_macip_lens;
+ __le32 seqnum_seed;
+ __le32 type_tucmd_mlhl;
+ __le32 mss_l4len_idx;
+};
+
+#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
+#define E1000_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
+#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
+#define E1000_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
+#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
+#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
+#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
+#define E1000_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
+/* IPSec Encrypt Enable for ESP */
+#define E1000_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000
+/* Req requires Markers and CRC */
+#define E1000_ADVTXD_TUCMD_MKRREQ 0x00002000
+#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
+#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+/* Adv ctxt IPSec SA IDX mask */
+#define E1000_ADVTXD_IPSEC_SA_INDEX_MASK 0x000000FF
+/* Adv ctxt IPSec ESP len mask */
+#define E1000_ADVTXD_IPSEC_ESP_LEN_MASK 0x000000FF
+
+/* Additional Transmit Descriptor Control definitions */
+#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */
+#define E1000_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wbk flushing */
+/* Tx Queue Arbitration Priority 0=low, 1=high */
+#define E1000_TXDCTL_PRIORITY 0x08000000
+
+/* Additional Receive Descriptor Control definitions */
+#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */
+#define E1000_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. wbk flushing */
+
+/* Direct Cache Access (DCA) definitions */
+#define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */
+#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */
+
+#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
+#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
+
+#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
+#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
+#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header ena */
+#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload ena */
+#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx Desc Relax Order */
+
+#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
+#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
+#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
+#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
+
+#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
+#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */
+#define E1000_DCA_TXCTRL_CPUID_SHIFT_82576 24 /* Tx CPUID */
+#define E1000_DCA_RXCTRL_CPUID_SHIFT_82576 24 /* Rx CPUID */
+
+/* Additional interrupt register bit definitions */
+#define E1000_ICR_LSECPNS 0x00000020 /* PN threshold - server */
+#define E1000_IMS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */
+#define E1000_ICS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */
+
+/* ETQF register bit definitions */
+#define E1000_ETQF_FILTER_ENABLE (1 << 26)
+#define E1000_ETQF_IMM_INT (1 << 29)
+#define E1000_ETQF_1588 (1 << 30)
+#define E1000_ETQF_QUEUE_ENABLE (1 << 31)
+/*
+ * ETQF filter list: one static filter per filter consumer. This is
+ * to avoid filter collisions later. Add new filters
+ * here!!
+ *
+ * Current filters:
+ * EAPOL 802.1x (0x888e): Filter 0
+ */
+#define E1000_ETQF_FILTER_EAPOL 0
+
+#define E1000_FTQF_VF_BP 0x00008000
+#define E1000_FTQF_1588_TIME_STAMP 0x08000000
+#define E1000_FTQF_MASK 0xF0000000
+#define E1000_FTQF_MASK_PROTO_BP 0x10000000
+#define E1000_FTQF_MASK_SOURCE_ADDR_BP 0x20000000
+#define E1000_FTQF_MASK_DEST_ADDR_BP 0x40000000
+#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000
+
+#define E1000_NVM_APME_82575 0x0400
+#define MAX_NUM_VFS 7
+
+#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof cntrl */
+#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof cntrl */
+#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */
+#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
+#define E1000_DTXSWC_LLE_SHIFT 16
+#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */
+
+/* Easy defines for setting default pool, would normally be left a zero */
+#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
+#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
+
+/* Other useful VMD_CTL register defines */
+#define E1000_VT_CTL_IGNORE_MAC (1 << 28)
+#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29)
+#define E1000_VT_CTL_VM_REPL_EN (1 << 30)
+
+/* Per VM Offload register setup */
+#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
+#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */
+#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */
+#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */
+#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */
+#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */
+#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */
+#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */
+#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
+#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */
+
+#define E1000_VMOLR_VPE 0x00800000 /* VLAN promiscuous enable */
+#define E1000_VMOLR_UPE 0x20000000 /* Unicast promisuous enable */
+#define E1000_DVMOLR_HIDVLAN 0x20000000 /* Vlan hiding enable */
+#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
+#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */
+
+#define E1000_PBRWAC_WALPB 0x00000007 /* Wrap around event on LAN Rx PB */
+#define E1000_PBRWAC_PBE 0x00000008 /* Rx packet buffer empty */
+
+#define E1000_VLVF_ARRAY_SIZE 32
+#define E1000_VLVF_VLANID_MASK 0x00000FFF
+#define E1000_VLVF_POOLSEL_SHIFT 12
+#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT)
+#define E1000_VLVF_LVLAN 0x00100000
+#define E1000_VLVF_VLANID_ENABLE 0x80000000
+
+#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
+#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
+
+#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+
+#define E1000_IOVCTL 0x05BBC
+#define E1000_IOVCTL_REUSE_VFQ 0x00000001
+
+#define E1000_RPLOLR_STRVLAN 0x40000000
+#define E1000_RPLOLR_STRCRC 0x80000000
+
+#define E1000_TCTL_EXT_COLD 0x000FFC00
+#define E1000_TCTL_EXT_COLD_SHIFT 10
+
+#define E1000_DTXCTL_8023LL 0x0004
+#define E1000_DTXCTL_VLAN_ADDED 0x0008
+#define E1000_DTXCTL_OOS_ENABLE 0x0010
+#define E1000_DTXCTL_MDP_EN 0x0020
+#define E1000_DTXCTL_SPOOF_INT 0x0040
+
+#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14)
+
+#define ALL_QUEUES 0xFFFF
+
+/* Rx packet buffer size defines */
+#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F
+void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable);
+void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf);
+void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable);
+s32 e1000_init_nvm_params_82575(struct e1000_hw *hw);
+
+u16 e1000_rxpbs_adjust_82580(u32 data);
+s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data);
+s32 e1000_set_eee_i350(struct e1000_hw *);
+s32 e1000_set_eee_i354(struct e1000_hw *);
+s32 e1000_get_eee_status_i354(struct e1000_hw *, bool *);
+#define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8
+#define E1000_EMC_INTERNAL_DATA 0x00
+#define E1000_EMC_INTERNAL_THERM_LIMIT 0x20
+#define E1000_EMC_DIODE1_DATA 0x01
+#define E1000_EMC_DIODE1_THERM_LIMIT 0x19
+#define E1000_EMC_DIODE2_DATA 0x23
+#define E1000_EMC_DIODE2_THERM_LIMIT 0x1A
+#define E1000_EMC_DIODE3_DATA 0x2A
+#define E1000_EMC_DIODE3_THERM_LIMIT 0x30
+
+s32 e1000_get_thermal_sensor_data_generic(struct e1000_hw *hw);
+s32 e1000_init_thermal_sensor_thresh_generic(struct e1000_hw *hw);
+
+/* I2C SDA and SCL timing parameters for standard mode */
+#define E1000_I2C_T_HD_STA 4
+#define E1000_I2C_T_LOW 5
+#define E1000_I2C_T_HIGH 4
+#define E1000_I2C_T_SU_STA 5
+#define E1000_I2C_T_HD_DATA 5
+#define E1000_I2C_T_SU_DATA 1
+#define E1000_I2C_T_RISE 1
+#define E1000_I2C_T_FALL 1
+#define E1000_I2C_T_SU_STO 4
+#define E1000_I2C_T_BUF 5
+
+s32 e1000_set_i2c_bb(struct e1000_hw *hw);
+s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data);
+void e1000_i2c_bus_clear(struct e1000_hw *hw);
+#endif /* _E1000_82575_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_api.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_api.c
new file mode 100755
index 00000000..b1d748fe
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_api.c
@@ -0,0 +1,1160 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000_api.h"
+
+/**
+ * e1000_init_mac_params - Initialize MAC function pointers
+ * @hw: pointer to the HW structure
+ *
+ * This function initializes the function pointers for the MAC
+ * set of functions. Called by drivers or by e1000_setup_init_funcs.
+ **/
+s32 e1000_init_mac_params(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ if (hw->mac.ops.init_params) {
+ ret_val = hw->mac.ops.init_params(hw);
+ if (ret_val) {
+ DEBUGOUT("MAC Initialization Error\n");
+ goto out;
+ }
+ } else {
+ DEBUGOUT("mac.init_mac_params was NULL\n");
+ ret_val = -E1000_ERR_CONFIG;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params - Initialize NVM function pointers
+ * @hw: pointer to the HW structure
+ *
+ * This function initializes the function pointers for the NVM
+ * set of functions. Called by drivers or by e1000_setup_init_funcs.
+ **/
+s32 e1000_init_nvm_params(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ if (hw->nvm.ops.init_params) {
+ ret_val = hw->nvm.ops.init_params(hw);
+ if (ret_val) {
+ DEBUGOUT("NVM Initialization Error\n");
+ goto out;
+ }
+ } else {
+ DEBUGOUT("nvm.init_nvm_params was NULL\n");
+ ret_val = -E1000_ERR_CONFIG;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_phy_params - Initialize PHY function pointers
+ * @hw: pointer to the HW structure
+ *
+ * This function initializes the function pointers for the PHY
+ * set of functions. Called by drivers or by e1000_setup_init_funcs.
+ **/
+s32 e1000_init_phy_params(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ if (hw->phy.ops.init_params) {
+ ret_val = hw->phy.ops.init_params(hw);
+ if (ret_val) {
+ DEBUGOUT("PHY Initialization Error\n");
+ goto out;
+ }
+ } else {
+ DEBUGOUT("phy.init_phy_params was NULL\n");
+ ret_val = -E1000_ERR_CONFIG;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_mbx_params - Initialize mailbox function pointers
+ * @hw: pointer to the HW structure
+ *
+ * This function initializes the function pointers for the PHY
+ * set of functions. Called by drivers or by e1000_setup_init_funcs.
+ **/
+s32 e1000_init_mbx_params(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ if (hw->mbx.ops.init_params) {
+ ret_val = hw->mbx.ops.init_params(hw);
+ if (ret_val) {
+ DEBUGOUT("Mailbox Initialization Error\n");
+ goto out;
+ }
+ } else {
+ DEBUGOUT("mbx.init_mbx_params was NULL\n");
+ ret_val = -E1000_ERR_CONFIG;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * device ID stored in the hw structure.
+ * MUST BE FIRST FUNCTION CALLED (explicitly or through
+ * e1000_setup_init_funcs()).
+ **/
+s32 e1000_set_mac_type(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_set_mac_type");
+
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82575EB_COPPER:
+ case E1000_DEV_ID_82575EB_FIBER_SERDES:
+ case E1000_DEV_ID_82575GB_QUAD_COPPER:
+ mac->type = e1000_82575;
+ break;
+ case E1000_DEV_ID_82576:
+ case E1000_DEV_ID_82576_FIBER:
+ case E1000_DEV_ID_82576_SERDES:
+ case E1000_DEV_ID_82576_QUAD_COPPER:
+ case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
+ case E1000_DEV_ID_82576_NS:
+ case E1000_DEV_ID_82576_NS_SERDES:
+ case E1000_DEV_ID_82576_SERDES_QUAD:
+ mac->type = e1000_82576;
+ break;
+ case E1000_DEV_ID_82580_COPPER:
+ case E1000_DEV_ID_82580_FIBER:
+ case E1000_DEV_ID_82580_SERDES:
+ case E1000_DEV_ID_82580_SGMII:
+ case E1000_DEV_ID_82580_COPPER_DUAL:
+ case E1000_DEV_ID_82580_QUAD_FIBER:
+ case E1000_DEV_ID_DH89XXCC_SGMII:
+ case E1000_DEV_ID_DH89XXCC_SERDES:
+ case E1000_DEV_ID_DH89XXCC_BACKPLANE:
+ case E1000_DEV_ID_DH89XXCC_SFP:
+ mac->type = e1000_82580;
+ break;
+ case E1000_DEV_ID_I350_COPPER:
+ case E1000_DEV_ID_I350_FIBER:
+ case E1000_DEV_ID_I350_SERDES:
+ case E1000_DEV_ID_I350_SGMII:
+ case E1000_DEV_ID_I350_DA4:
+ mac->type = e1000_i350;
+ break;
+ case E1000_DEV_ID_I210_COPPER_FLASHLESS:
+ case E1000_DEV_ID_I210_SERDES_FLASHLESS:
+ case E1000_DEV_ID_I210_COPPER:
+ case E1000_DEV_ID_I210_COPPER_OEM1:
+ case E1000_DEV_ID_I210_COPPER_IT:
+ case E1000_DEV_ID_I210_FIBER:
+ case E1000_DEV_ID_I210_SERDES:
+ case E1000_DEV_ID_I210_SGMII:
+ mac->type = e1000_i210;
+ break;
+ case E1000_DEV_ID_I211_COPPER:
+ mac->type = e1000_i211;
+ break;
+
+ case E1000_DEV_ID_I354_BACKPLANE_1GBPS:
+ case E1000_DEV_ID_I354_SGMII:
+ case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS:
+ mac->type = e1000_i354;
+ break;
+ default:
+ /* Should never have loaded on this device */
+ ret_val = -E1000_ERR_MAC_INIT;
+ break;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_setup_init_funcs - Initializes function pointers
+ * @hw: pointer to the HW structure
+ * @init_device: true will initialize the rest of the function pointers
+ * getting the device ready for use. false will only set
+ * MAC type and the function pointers for the other init
+ * functions. Passing false will not generate any hardware
+ * reads or writes.
+ *
+ * This function must be called by a driver in order to use the rest
+ * of the 'shared' code files. Called by drivers only.
+ **/
+s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device)
+{
+ s32 ret_val;
+
+ /* Can't do much good without knowing the MAC type. */
+ ret_val = e1000_set_mac_type(hw);
+ if (ret_val) {
+ DEBUGOUT("ERROR: MAC type could not be set properly.\n");
+ goto out;
+ }
+
+ if (!hw->hw_addr) {
+ DEBUGOUT("ERROR: Registers not mapped\n");
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+ /*
+ * Init function pointers to generic implementations. We do this first
+ * allowing a driver module to override it afterward.
+ */
+ e1000_init_mac_ops_generic(hw);
+ e1000_init_phy_ops_generic(hw);
+ e1000_init_nvm_ops_generic(hw);
+ e1000_init_mbx_ops_generic(hw);
+
+ /*
+ * Set up the init function pointers. These are functions within the
+ * adapter family file that sets up function pointers for the rest of
+ * the functions in that family.
+ */
+ switch (hw->mac.type) {
+ case e1000_82575:
+ case e1000_82576:
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ e1000_init_function_pointers_82575(hw);
+ break;
+ case e1000_i210:
+ case e1000_i211:
+ e1000_init_function_pointers_i210(hw);
+ break;
+ default:
+ DEBUGOUT("Hardware not supported\n");
+ ret_val = -E1000_ERR_CONFIG;
+ break;
+ }
+
+ /*
+ * Initialize the rest of the function pointers. These require some
+ * register reads/writes in some cases.
+ */
+ if (!(ret_val) && init_device) {
+ ret_val = e1000_init_mac_params(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_init_nvm_params(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_init_phy_params(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_init_mbx_params(hw);
+ if (ret_val)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_get_bus_info - Obtain bus information for adapter
+ * @hw: pointer to the HW structure
+ *
+ * This will obtain information about the HW bus for which the
+ * adapter is attached and stores it in the hw structure. This is a
+ * function pointer entry point called by drivers.
+ **/
+s32 e1000_get_bus_info(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.get_bus_info)
+ return hw->mac.ops.get_bus_info(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_clear_vfta - Clear VLAN filter table
+ * @hw: pointer to the HW structure
+ *
+ * This clears the VLAN filter table on the adapter. This is a function
+ * pointer entry point called by drivers.
+ **/
+void e1000_clear_vfta(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.clear_vfta)
+ hw->mac.ops.clear_vfta(hw);
+}
+
+/**
+ * e1000_write_vfta - Write value to VLAN filter table
+ * @hw: pointer to the HW structure
+ * @offset: the 32-bit offset in which to write the value to.
+ * @value: the 32-bit value to write at location offset.
+ *
+ * This writes a 32-bit value to a 32-bit offset in the VLAN filter
+ * table. This is a function pointer entry point called by drivers.
+ **/
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
+{
+ if (hw->mac.ops.write_vfta)
+ hw->mac.ops.write_vfta(hw, offset, value);
+}
+
+/**
+ * e1000_update_mc_addr_list - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ *
+ * Updates the Multicast Table Array.
+ * The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count)
+{
+ if (hw->mac.ops.update_mc_addr_list)
+ hw->mac.ops.update_mc_addr_list(hw, mc_addr_list,
+ mc_addr_count);
+}
+
+/**
+ * e1000_force_mac_fc - Force MAC flow control
+ * @hw: pointer to the HW structure
+ *
+ * Force the MAC's flow control settings. Currently no func pointer exists
+ * and all implementations are handled in the generic version of this
+ * function.
+ **/
+s32 e1000_force_mac_fc(struct e1000_hw *hw)
+{
+ return e1000_force_mac_fc_generic(hw);
+}
+
+/**
+ * e1000_check_for_link - Check/Store link connection
+ * @hw: pointer to the HW structure
+ *
+ * This checks the link condition of the adapter and stores the
+ * results in the hw->mac structure. This is a function pointer entry
+ * point called by drivers.
+ **/
+s32 e1000_check_for_link(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.check_for_link)
+ return hw->mac.ops.check_for_link(hw);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_check_mng_mode - Check management mode
+ * @hw: pointer to the HW structure
+ *
+ * This checks if the adapter has manageability enabled.
+ * This is a function pointer entry point called by drivers.
+ **/
+bool e1000_check_mng_mode(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.check_mng_mode)
+ return hw->mac.ops.check_mng_mode(hw);
+
+ return false;
+}
+
+/**
+ * e1000_mng_write_dhcp_info - Writes DHCP info to host interface
+ * @hw: pointer to the HW structure
+ * @buffer: pointer to the host interface
+ * @length: size of the buffer
+ *
+ * Writes the DHCP information to the host interface.
+ **/
+s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
+{
+ return e1000_mng_write_dhcp_info_generic(hw, buffer, length);
+}
+
+/**
+ * e1000_reset_hw - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+ * This resets the hardware into a known state. This is a function pointer
+ * entry point called by drivers.
+ **/
+s32 e1000_reset_hw(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.reset_hw)
+ return hw->mac.ops.reset_hw(hw);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_init_hw - Initialize hardware
+ * @hw: pointer to the HW structure
+ *
+ * This inits the hardware readying it for operation. This is a function
+ * pointer entry point called by drivers.
+ **/
+s32 e1000_init_hw(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.init_hw)
+ return hw->mac.ops.init_hw(hw);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_setup_link - Configures link and flow control
+ * @hw: pointer to the HW structure
+ *
+ * This configures link and flow control settings for the adapter. This
+ * is a function pointer entry point called by drivers. While modules can
+ * also call this, they probably call their own version of this function.
+ **/
+s32 e1000_setup_link(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.setup_link)
+ return hw->mac.ops.setup_link(hw);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_get_speed_and_duplex - Returns current speed and duplex
+ * @hw: pointer to the HW structure
+ * @speed: pointer to a 16-bit value to store the speed
+ * @duplex: pointer to a 16-bit value to store the duplex.
+ *
+ * This returns the speed and duplex of the adapter in the two 'out'
+ * variables passed in. This is a function pointer entry point called
+ * by drivers.
+ **/
+s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
+{
+ if (hw->mac.ops.get_link_up_info)
+ return hw->mac.ops.get_link_up_info(hw, speed, duplex);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_setup_led - Configures SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * This prepares the SW controllable LED for use and saves the current state
+ * of the LED so it can be later restored. This is a function pointer entry
+ * point called by drivers.
+ **/
+s32 e1000_setup_led(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.setup_led)
+ return hw->mac.ops.setup_led(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_cleanup_led - Restores SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * This restores the SW controllable LED to the value saved off by
+ * e1000_setup_led. This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_cleanup_led(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.cleanup_led)
+ return hw->mac.ops.cleanup_led(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_blink_led - Blink SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * This starts the adapter LED blinking. Request the LED to be setup first
+ * and cleaned up after. This is a function pointer entry point called by
+ * drivers.
+ **/
+s32 e1000_blink_led(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.blink_led)
+ return hw->mac.ops.blink_led(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_id_led_init - store LED configurations in SW
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the LED config in SW. This is a function pointer entry point
+ * called by drivers.
+ **/
+s32 e1000_id_led_init(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.id_led_init)
+ return hw->mac.ops.id_led_init(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_led_on - Turn on SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * Turns the SW defined LED on. This is a function pointer entry point
+ * called by drivers.
+ **/
+s32 e1000_led_on(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.led_on)
+ return hw->mac.ops.led_on(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_led_off - Turn off SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * Turns the SW defined LED off. This is a function pointer entry point
+ * called by drivers.
+ **/
+s32 e1000_led_off(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.led_off)
+ return hw->mac.ops.led_off(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_reset_adaptive - Reset adaptive IFS
+ * @hw: pointer to the HW structure
+ *
+ * Resets the adaptive IFS. Currently no func pointer exists and all
+ * implementations are handled in the generic version of this function.
+ **/
+void e1000_reset_adaptive(struct e1000_hw *hw)
+{
+ e1000_reset_adaptive_generic(hw);
+}
+
+/**
+ * e1000_update_adaptive - Update adaptive IFS
+ * @hw: pointer to the HW structure
+ *
+ * Updates adapter IFS. Currently no func pointer exists and all
+ * implementations are handled in the generic version of this function.
+ **/
+void e1000_update_adaptive(struct e1000_hw *hw)
+{
+ e1000_update_adaptive_generic(hw);
+}
+
+/**
+ * e1000_disable_pcie_master - Disable PCI-Express master access
+ * @hw: pointer to the HW structure
+ *
+ * Disables PCI-Express master access and verifies there are no pending
+ * requests. Currently no func pointer exists and all implementations are
+ * handled in the generic version of this function.
+ **/
+s32 e1000_disable_pcie_master(struct e1000_hw *hw)
+{
+ return e1000_disable_pcie_master_generic(hw);
+}
+
+/**
+ * e1000_config_collision_dist - Configure collision distance
+ * @hw: pointer to the HW structure
+ *
+ * Configures the collision distance to the default value and is used
+ * during link setup.
+ **/
+void e1000_config_collision_dist(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.config_collision_dist)
+ hw->mac.ops.config_collision_dist(hw);
+}
+
+/**
+ * e1000_rar_set - Sets a receive address register
+ * @hw: pointer to the HW structure
+ * @addr: address to set the RAR to
+ * @index: the RAR to set
+ *
+ * Sets a Receive Address Register (RAR) to the specified address.
+ **/
+void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+ if (hw->mac.ops.rar_set)
+ hw->mac.ops.rar_set(hw, addr, index);
+}
+
+/**
+ * e1000_validate_mdi_setting - Ensures valid MDI/MDIX SW state
+ * @hw: pointer to the HW structure
+ *
+ * Ensures that the MDI/MDIX SW state is valid.
+ **/
+s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.validate_mdi_setting)
+ return hw->mac.ops.validate_mdi_setting(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_hash_mc_addr - Determines address location in multicast table
+ * @hw: pointer to the HW structure
+ * @mc_addr: Multicast address to hash.
+ *
+ * This hashes an address to determine its location in the multicast
+ * table. Currently no func pointer exists and all implementations
+ * are handled in the generic version of this function.
+ **/
+u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+{
+ return e1000_hash_mc_addr_generic(hw, mc_addr);
+}
+
+/**
+ * e1000_enable_tx_pkt_filtering - Enable packet filtering on TX
+ * @hw: pointer to the HW structure
+ *
+ * Enables packet filtering on transmit packets if manageability is enabled
+ * and host interface is enabled.
+ * Currently no func pointer exists and all implementations are handled in the
+ * generic version of this function.
+ **/
+bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
+{
+ return e1000_enable_tx_pkt_filtering_generic(hw);
+}
+
+/**
+ * e1000_mng_host_if_write - Writes to the manageability host interface
+ * @hw: pointer to the HW structure
+ * @buffer: pointer to the host interface buffer
+ * @length: size of the buffer
+ * @offset: location in the buffer to write to
+ * @sum: sum of the data (not checksum)
+ *
+ * This function writes the buffer content at the offset given on the host if.
+ * It also does alignment considerations to do the writes in most efficient
+ * way. Also fills up the sum of the buffer in *buffer parameter.
+ **/
+s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
+ u16 offset, u8 *sum)
+{
+ return e1000_mng_host_if_write_generic(hw, buffer, length, offset, sum);
+}
+
+/**
+ * e1000_mng_write_cmd_header - Writes manageability command header
+ * @hw: pointer to the HW structure
+ * @hdr: pointer to the host interface command header
+ *
+ * Writes the command header after does the checksum calculation.
+ **/
+s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
+ struct e1000_host_mng_command_header *hdr)
+{
+ return e1000_mng_write_cmd_header_generic(hw, hdr);
+}
+
+/**
+ * e1000_mng_enable_host_if - Checks host interface is enabled
+ * @hw: pointer to the HW structure
+ *
+ * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
+ *
+ * This function checks whether the HOST IF is enabled for command operation
+ * and also checks whether the previous command is completed. It busy waits
+ * in case of previous command is not completed.
+ **/
+s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
+{
+ return e1000_mng_enable_host_if_generic(hw);
+}
+
+/**
+ * e1000_check_reset_block - Verifies PHY can be reset
+ * @hw: pointer to the HW structure
+ *
+ * Checks if the PHY is in a state that can be reset or if manageability
+ * has it tied up. This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_check_reset_block(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.check_reset_block)
+ return hw->phy.ops.check_reset_block(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_phy_reg - Reads PHY register
+ * @hw: pointer to the HW structure
+ * @offset: the register to read
+ * @data: the buffer to store the 16-bit read.
+ *
+ * Reads the PHY register and returns the value in data.
+ * This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ if (hw->phy.ops.read_reg)
+ return hw->phy.ops.read_reg(hw, offset, data);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_phy_reg - Writes PHY register
+ * @hw: pointer to the HW structure
+ * @offset: the register to write
+ * @data: the value to write.
+ *
+ * Writes the PHY register at offset with the value in data.
+ * This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ if (hw->phy.ops.write_reg)
+ return hw->phy.ops.write_reg(hw, offset, data);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_release_phy - Generic release PHY
+ * @hw: pointer to the HW structure
+ *
+ * Return if silicon family does not require a semaphore when accessing the
+ * PHY.
+ **/
+void e1000_release_phy(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.release)
+ hw->phy.ops.release(hw);
+}
+
+/**
+ * e1000_acquire_phy - Generic acquire PHY
+ * @hw: pointer to the HW structure
+ *
+ * Return success if silicon family does not require a semaphore when
+ * accessing the PHY.
+ **/
+s32 e1000_acquire_phy(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.acquire)
+ return hw->phy.ops.acquire(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_kmrn_reg - Reads register using Kumeran interface
+ * @hw: pointer to the HW structure
+ * @offset: the register to read
+ * @data: the location to store the 16-bit value read.
+ *
+ * Reads a register out of the Kumeran interface. Currently no func pointer
+ * exists and all implementations are handled in the generic version of
+ * this function.
+ **/
+s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return e1000_read_kmrn_reg_generic(hw, offset, data);
+}
+
+/**
+ * e1000_write_kmrn_reg - Writes register using Kumeran interface
+ * @hw: pointer to the HW structure
+ * @offset: the register to write
+ * @data: the value to write.
+ *
+ * Writes a register to the Kumeran interface. Currently no func pointer
+ * exists and all implementations are handled in the generic version of
+ * this function.
+ **/
+s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return e1000_write_kmrn_reg_generic(hw, offset, data);
+}
+
+/**
+ * e1000_get_cable_length - Retrieves cable length estimation
+ * @hw: pointer to the HW structure
+ *
+ * This function estimates the cable length and stores them in
+ * hw->phy.min_length and hw->phy.max_length. This is a function pointer
+ * entry point called by drivers.
+ **/
+s32 e1000_get_cable_length(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.get_cable_length)
+ return hw->phy.ops.get_cable_length(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_phy_info - Retrieves PHY information from registers
+ * @hw: pointer to the HW structure
+ *
+ * This function gets some information from various PHY registers and
+ * populates hw->phy values with it. This is a function pointer entry
+ * point called by drivers.
+ **/
+s32 e1000_get_phy_info(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.get_info)
+ return hw->phy.ops.get_info(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_hw_reset - Hard PHY reset
+ * @hw: pointer to the HW structure
+ *
+ * Performs a hard PHY reset. This is a function pointer entry point called
+ * by drivers.
+ **/
+s32 e1000_phy_hw_reset(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.reset)
+ return hw->phy.ops.reset(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_commit - Soft PHY reset
+ * @hw: pointer to the HW structure
+ *
+ * Performs a soft PHY reset on those that apply. This is a function pointer
+ * entry point called by drivers.
+ **/
+s32 e1000_phy_commit(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.commit)
+ return hw->phy.ops.commit(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_d0_lplu_state - Sets low power link up state for D0
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * The low power link up (lplu) state is set to the power management level D0
+ * and SmartSpeed is disabled when active is true, else clear lplu for D0
+ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
+ * is used during Dx states where the power conservation is most important.
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained. This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
+{
+ if (hw->phy.ops.set_d0_lplu_state)
+ return hw->phy.ops.set_d0_lplu_state(hw, active);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_d3_lplu_state - Sets low power link up state for D3
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * The low power link up (lplu) state is set to the power management level D3
+ * and SmartSpeed is disabled when active is true, else clear lplu for D3
+ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
+ * is used during Dx states where the power conservation is most important.
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained. This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
+{
+ if (hw->phy.ops.set_d3_lplu_state)
+ return hw->phy.ops.set_d3_lplu_state(hw, active);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_mac_addr - Reads MAC address
+ * @hw: pointer to the HW structure
+ *
+ * Reads the MAC address out of the adapter and stores it in the HW structure.
+ * Currently no func pointer exists and all implementations are handled in the
+ * generic version of this function.
+ **/
+s32 e1000_read_mac_addr(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.read_mac_addr)
+ return hw->mac.ops.read_mac_addr(hw);
+
+ return e1000_read_mac_addr_generic(hw);
+}
+
+/**
+ * e1000_read_pba_string - Read device part number string
+ * @hw: pointer to the HW structure
+ * @pba_num: pointer to device part number
+ * @pba_num_size: size of part number buffer
+ *
+ * Reads the product board assembly (PBA) number from the EEPROM and stores
+ * the value in pba_num.
+ * Currently no func pointer exists and all implementations are handled in the
+ * generic version of this function.
+ **/
+s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size)
+{
+ return e1000_read_pba_string_generic(hw, pba_num, pba_num_size);
+}
+
+/**
+ * e1000_read_pba_length - Read device part number string length
+ * @hw: pointer to the HW structure
+ * @pba_num_size: size of part number buffer
+ *
+ * Reads the product board assembly (PBA) number length from the EEPROM and
+ * stores the value in pba_num.
+ * Currently no func pointer exists and all implementations are handled in the
+ * generic version of this function.
+ **/
+s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size)
+{
+ return e1000_read_pba_length_generic(hw, pba_num_size);
+}
+
+/**
+ * e1000_validate_nvm_checksum - Verifies NVM (EEPROM) checksum
+ * @hw: pointer to the HW structure
+ *
+ * Validates the NVM checksum is correct. This is a function pointer entry
+ * point called by drivers.
+ **/
+s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
+{
+ if (hw->nvm.ops.validate)
+ return hw->nvm.ops.validate(hw);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_update_nvm_checksum - Updates NVM (EEPROM) checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the NVM checksum. Currently no func pointer exists and all
+ * implementations are handled in the generic version of this function.
+ **/
+s32 e1000_update_nvm_checksum(struct e1000_hw *hw)
+{
+ if (hw->nvm.ops.update)
+ return hw->nvm.ops.update(hw);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_reload_nvm - Reloads EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
+ * extended control register.
+ **/
+void e1000_reload_nvm(struct e1000_hw *hw)
+{
+ if (hw->nvm.ops.reload)
+ hw->nvm.ops.reload(hw);
+}
+
+/**
+ * e1000_read_nvm - Reads NVM (EEPROM)
+ * @hw: pointer to the HW structure
+ * @offset: the word offset to read
+ * @words: number of 16-bit words to read
+ * @data: pointer to the properly sized buffer for the data.
+ *
+ * Reads 16-bit chunks of data from the NVM (EEPROM). This is a function
+ * pointer entry point called by drivers.
+ **/
+s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ if (hw->nvm.ops.read)
+ return hw->nvm.ops.read(hw, offset, words, data);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_write_nvm - Writes to NVM (EEPROM)
+ * @hw: pointer to the HW structure
+ * @offset: the word offset to read
+ * @words: number of 16-bit words to write
+ * @data: pointer to the properly sized buffer for the data.
+ *
+ * Writes 16-bit chunks of data to the NVM (EEPROM). This is a function
+ * pointer entry point called by drivers.
+ **/
+s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ if (hw->nvm.ops.write)
+ return hw->nvm.ops.write(hw, offset, words, data);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_8bit_ctrl_reg - Writes 8bit Control register
+ * @hw: pointer to the HW structure
+ * @reg: 32bit register offset
+ * @offset: the register to write
+ * @data: the value to write.
+ *
+ * Writes the PHY register at offset with the value in data.
+ * This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset,
+ u8 data)
+{
+ return e1000_write_8bit_ctrl_reg_generic(hw, reg, offset, data);
+}
+
+/**
+ * e1000_power_up_phy - Restores link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * The phy may be powered down to save power, to turn off link when the
+ * driver is unloaded, or wake on lan is not enabled (among others).
+ **/
+void e1000_power_up_phy(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.power_up)
+ hw->phy.ops.power_up(hw);
+
+ e1000_setup_link(hw);
+}
+
+/**
+ * e1000_power_down_phy - Power down PHY
+ * @hw: pointer to the HW structure
+ *
+ * The phy may be powered down to save power, to turn off link when the
+ * driver is unloaded, or wake on lan is not enabled (among others).
+ **/
+void e1000_power_down_phy(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.power_down)
+ hw->phy.ops.power_down(hw);
+}
+
+/**
+ * e1000_power_up_fiber_serdes_link - Power up serdes link
+ * @hw: pointer to the HW structure
+ *
+ * Power on the optics and PCS.
+ **/
+void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.power_up_serdes)
+ hw->mac.ops.power_up_serdes(hw);
+}
+
+/**
+ * e1000_shutdown_fiber_serdes_link - Remove link during power down
+ * @hw: pointer to the HW structure
+ *
+ * Shutdown the optics and PCS on driver unload.
+ **/
+void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.shutdown_serdes)
+ hw->mac.ops.shutdown_serdes(hw);
+}
+
+/**
+ * e1000_get_thermal_sensor_data - Gathers thermal sensor data
+ * @hw: pointer to hardware structure
+ *
+ * Updates the temperatures in mac.thermal_sensor_data
+ **/
+s32 e1000_get_thermal_sensor_data(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.get_thermal_sensor_data)
+ return hw->mac.ops.get_thermal_sensor_data(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_thermal_sensor_thresh - Sets thermal sensor thresholds
+ * @hw: pointer to hardware structure
+ *
+ * Sets the thermal sensor thresholds according to the NVM map
+ **/
+s32 e1000_init_thermal_sensor_thresh(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.init_thermal_sensor_thresh)
+ return hw->mac.ops.init_thermal_sensor_thresh(hw);
+
+ return E1000_SUCCESS;
+}
+
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_api.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_api.h
new file mode 100755
index 00000000..b21294ec
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_api.h
@@ -0,0 +1,157 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_API_H_
+#define _E1000_API_H_
+
+#include "e1000_hw.h"
+
+extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
+extern void e1000_rx_fifo_flush_82575(struct e1000_hw *hw);
+extern void e1000_init_function_pointers_vf(struct e1000_hw *hw);
+extern void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw);
+extern void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw);
+extern void e1000_init_function_pointers_i210(struct e1000_hw *hw);
+
+s32 e1000_set_obff_timer(struct e1000_hw *hw, u32 itr);
+s32 e1000_set_mac_type(struct e1000_hw *hw);
+s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device);
+s32 e1000_init_mac_params(struct e1000_hw *hw);
+s32 e1000_init_nvm_params(struct e1000_hw *hw);
+s32 e1000_init_phy_params(struct e1000_hw *hw);
+s32 e1000_init_mbx_params(struct e1000_hw *hw);
+s32 e1000_get_bus_info(struct e1000_hw *hw);
+void e1000_clear_vfta(struct e1000_hw *hw);
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
+s32 e1000_force_mac_fc(struct e1000_hw *hw);
+s32 e1000_check_for_link(struct e1000_hw *hw);
+s32 e1000_reset_hw(struct e1000_hw *hw);
+s32 e1000_init_hw(struct e1000_hw *hw);
+s32 e1000_setup_link(struct e1000_hw *hw);
+s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex);
+s32 e1000_disable_pcie_master(struct e1000_hw *hw);
+void e1000_config_collision_dist(struct e1000_hw *hw);
+void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
+u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
+void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count);
+s32 e1000_setup_led(struct e1000_hw *hw);
+s32 e1000_cleanup_led(struct e1000_hw *hw);
+s32 e1000_check_reset_block(struct e1000_hw *hw);
+s32 e1000_blink_led(struct e1000_hw *hw);
+s32 e1000_led_on(struct e1000_hw *hw);
+s32 e1000_led_off(struct e1000_hw *hw);
+s32 e1000_id_led_init(struct e1000_hw *hw);
+void e1000_reset_adaptive(struct e1000_hw *hw);
+void e1000_update_adaptive(struct e1000_hw *hw);
+s32 e1000_get_cable_length(struct e1000_hw *hw);
+s32 e1000_validate_mdi_setting(struct e1000_hw *hw);
+s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset,
+ u8 data);
+s32 e1000_get_phy_info(struct e1000_hw *hw);
+void e1000_release_phy(struct e1000_hw *hw);
+s32 e1000_acquire_phy(struct e1000_hw *hw);
+s32 e1000_phy_hw_reset(struct e1000_hw *hw);
+s32 e1000_phy_commit(struct e1000_hw *hw);
+void e1000_power_up_phy(struct e1000_hw *hw);
+void e1000_power_down_phy(struct e1000_hw *hw);
+s32 e1000_read_mac_addr(struct e1000_hw *hw);
+s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size);
+s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size);
+void e1000_reload_nvm(struct e1000_hw *hw);
+s32 e1000_update_nvm_checksum(struct e1000_hw *hw);
+s32 e1000_validate_nvm_checksum(struct e1000_hw *hw);
+s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
+bool e1000_check_mng_mode(struct e1000_hw *hw);
+bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
+s32 e1000_mng_enable_host_if(struct e1000_hw *hw);
+s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
+ u16 offset, u8 *sum);
+s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
+ struct e1000_host_mng_command_header *hdr);
+s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
+s32 e1000_get_thermal_sensor_data(struct e1000_hw *hw);
+s32 e1000_init_thermal_sensor_thresh(struct e1000_hw *hw);
+
+
+
+/*
+ * TBI_ACCEPT macro definition:
+ *
+ * This macro requires:
+ * adapter = a pointer to struct e1000_hw
+ * status = the 8 bit status field of the Rx descriptor with EOP set
+ * error = the 8 bit error field of the Rx descriptor with EOP set
+ * length = the sum of all the length fields of the Rx descriptors that
+ * make up the current frame
+ * last_byte = the last byte of the frame DMAed by the hardware
+ * max_frame_length = the maximum frame length we want to accept.
+ * min_frame_length = the minimum frame length we want to accept.
+ *
+ * This macro is a conditional that should be used in the interrupt
+ * handler's Rx processing routine when RxErrors have been detected.
+ *
+ * Typical use:
+ * ...
+ * if (TBI_ACCEPT) {
+ * accept_frame = true;
+ * e1000_tbi_adjust_stats(adapter, MacAddress);
+ * frame_length--;
+ * } else {
+ * accept_frame = false;
+ * }
+ * ...
+ */
+
+/* The carrier extension symbol, as received by the NIC. */
+#define CARRIER_EXTENSION 0x0F
+
+#define TBI_ACCEPT(a, status, errors, length, last_byte, \
+ min_frame_size, max_frame_size) \
+ (e1000_tbi_sbp_enabled_82543(a) && \
+ (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \
+ ((last_byte) == CARRIER_EXTENSION) && \
+ (((status) & E1000_RXD_STAT_VP) ? \
+ (((length) > (min_frame_size - VLAN_TAG_SIZE)) && \
+ ((length) <= (max_frame_size + 1))) : \
+ (((length) > min_frame_size) && \
+ ((length) <= (max_frame_size + VLAN_TAG_SIZE + 1)))))
+
+#ifndef E1000_MAX
+#define E1000_MAX(a, b) ((a) > (b) ? (a) : (b))
+#endif
+#ifndef E1000_DIVIDE_ROUND_UP
+#define E1000_DIVIDE_ROUND_UP(a, b) (((a) + (b) - 1) / (b)) /* ceil(a/b) */
+#endif
+#endif /* _E1000_API_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_defines.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_defines.h
new file mode 100755
index 00000000..63b228c5
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_defines.h
@@ -0,0 +1,1380 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_DEFINES_H_
+#define _E1000_DEFINES_H_
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define REQ_RX_DESCRIPTOR_MULTIPLE 8
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define E1000_WUC_APME 0x00000001 /* APM Enable */
+#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
+#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */
+#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */
+
+/* Wake Up Filter Control */
+#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
+#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+
+/* Wake Up Status */
+#define E1000_WUS_LNKC E1000_WUFC_LNKC
+#define E1000_WUS_MAG E1000_WUFC_MAG
+#define E1000_WUS_EX E1000_WUFC_EX
+#define E1000_WUS_MC E1000_WUFC_MC
+#define E1000_WUS_BC E1000_WUFC_BC
+
+/* Extended Device Control */
+#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* SW Definable Pin 4 data */
+#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* SW Definable Pin 6 data */
+#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* SW Definable Pin 3 data */
+#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */
+#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* Direction of SDP3 0=in 1=out */
+#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
+/* Physical Func Reset Done Indication */
+#define E1000_CTRL_EXT_PFRSTD 0x00004000
+#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
+#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
+#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clk Gating */
+#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+/* Offset of the link mode field in Ctrl Ext register */
+#define E1000_CTRL_EXT_LINK_MODE_OFFSET 22
+#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
+#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
+#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
+#define E1000_CTRL_EXT_EIAME 0x01000000
+#define E1000_CTRL_EXT_IRCA 0x00000001
+#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */
+#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */
+#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
+#define E1000_I2CCMD_REG_ADDR_SHIFT 16
+#define E1000_I2CCMD_PHY_ADDR_SHIFT 24
+#define E1000_I2CCMD_OPCODE_READ 0x08000000
+#define E1000_I2CCMD_OPCODE_WRITE 0x00000000
+#define E1000_I2CCMD_READY 0x20000000
+#define E1000_I2CCMD_ERROR 0x80000000
+#define E1000_I2CCMD_SFP_DATA_ADDR(a) (0x0000 + (a))
+#define E1000_I2CCMD_SFP_DIAG_ADDR(a) (0x0100 + (a))
+#define E1000_MAX_SGMII_PHY_REG_ADDR 255
+#define E1000_I2CCMD_PHY_TIMEOUT 200
+#define E1000_IVAR_VALID 0x80
+#define E1000_GPIE_NSICR 0x00000001
+#define E1000_GPIE_MSIX_MODE 0x00000010
+#define E1000_GPIE_EIAME 0x40000000
+#define E1000_GPIE_PBA 0x80000000
+
+/* Receive Descriptor bit definitions */
+#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
+#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
+#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
+#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
+#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
+#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
+#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */
+#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */
+#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
+#define E1000_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
+#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
+#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
+#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
+#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
+#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
+#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
+#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+
+#define E1000_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */
+#define E1000_RXDEXT_STATERR_LB 0x00040000
+#define E1000_RXDEXT_STATERR_CE 0x01000000
+#define E1000_RXDEXT_STATERR_SE 0x02000000
+#define E1000_RXDEXT_STATERR_SEQ 0x04000000
+#define E1000_RXDEXT_STATERR_CXE 0x10000000
+#define E1000_RXDEXT_STATERR_TCPE 0x20000000
+#define E1000_RXDEXT_STATERR_IPE 0x40000000
+#define E1000_RXDEXT_STATERR_RXE 0x80000000
+
+/* mask to determine if packets should be dropped due to frame errors */
+#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
+ E1000_RXD_ERR_CE | \
+ E1000_RXD_ERR_SE | \
+ E1000_RXD_ERR_SEQ | \
+ E1000_RXD_ERR_CXE | \
+ E1000_RXD_ERR_RXE)
+
+/* Same mask, but for extended and packet split descriptors */
+#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
+ E1000_RXDEXT_STATERR_CE | \
+ E1000_RXDEXT_STATERR_SE | \
+ E1000_RXDEXT_STATERR_SEQ | \
+ E1000_RXDEXT_STATERR_CXE | \
+ E1000_RXDEXT_STATERR_RXE)
+
+#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000
+#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
+#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
+#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
+
+#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000
+
+/* Management Control */
+#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
+#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
+#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
+#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
+#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
+/* Enable MAC address filtering */
+#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000
+/* Enable MNG packets to host memory */
+#define E1000_MANC_EN_MNG2HOST 0x00200000
+
+#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */
+#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */
+#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */
+#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */
+
+/* Receive Control */
+#define E1000_RCTL_RST 0x00000001 /* Software reset */
+#define E1000_RCTL_EN 0x00000002 /* enable */
+#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
+#define E1000_RCTL_UPE 0x00000008 /* unicast promisc enable */
+#define E1000_RCTL_MPE 0x00000010 /* multicast promisc enable */
+#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
+#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */
+#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
+#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
+#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
+#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */
+#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
+#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
+#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
+#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */
+#define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */
+#define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */
+#define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
+#define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */
+#define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */
+#define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */
+#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
+#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
+#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
+#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */
+#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
+#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
+#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
+
+/* Use byte values for the following shift parameters
+ * Usage:
+ * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
+ * E1000_PSRCTL_BSIZE0_MASK) |
+ * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
+ * E1000_PSRCTL_BSIZE1_MASK) |
+ * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
+ * E1000_PSRCTL_BSIZE2_MASK) |
+ * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
+ * E1000_PSRCTL_BSIZE3_MASK))
+ * where value0 = [128..16256], default=256
+ * value1 = [1024..64512], default=4096
+ * value2 = [0..64512], default=4096
+ * value3 = [0..64512], default=0
+ */
+
+#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F
+#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00
+#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
+#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000
+
+#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */
+#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */
+#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
+#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
+
+/* SWFW_SYNC Definitions */
+#define E1000_SWFW_EEP_SM 0x01
+#define E1000_SWFW_PHY0_SM 0x02
+#define E1000_SWFW_PHY1_SM 0x04
+#define E1000_SWFW_CSR_SM 0x08
+#define E1000_SWFW_PHY2_SM 0x20
+#define E1000_SWFW_PHY3_SM 0x40
+#define E1000_SWFW_SW_MNG_SM 0x400
+
+/* Device Control */
+#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
+#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */
+#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master reqs */
+#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
+#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
+#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
+#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
+#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
+#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */
+#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
+#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
+#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
+#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
+#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
+#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
+#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */
+#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
+#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */
+#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
+#define E1000_CTRL_RST 0x04000000 /* Global reset */
+#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
+#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
+#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
+#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
+#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */
+
+
+#define E1000_CONNSW_ENRGSRC 0x4
+#define E1000_CONNSW_PHYSD 0x400
+#define E1000_CONNSW_PHY_PDN 0x800
+#define E1000_CONNSW_SERDESD 0x200
+#define E1000_CONNSW_AUTOSENSE_CONF 0x2
+#define E1000_CONNSW_AUTOSENSE_EN 0x1
+#define E1000_PCS_CFG_PCS_EN 8
+#define E1000_PCS_LCTL_FLV_LINK_UP 1
+#define E1000_PCS_LCTL_FSV_10 0
+#define E1000_PCS_LCTL_FSV_100 2
+#define E1000_PCS_LCTL_FSV_1000 4
+#define E1000_PCS_LCTL_FDV_FULL 8
+#define E1000_PCS_LCTL_FSD 0x10
+#define E1000_PCS_LCTL_FORCE_LINK 0x20
+#define E1000_PCS_LCTL_FORCE_FCTRL 0x80
+#define E1000_PCS_LCTL_AN_ENABLE 0x10000
+#define E1000_PCS_LCTL_AN_RESTART 0x20000
+#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000
+#define E1000_ENABLE_SERDES_LOOPBACK 0x0410
+
+#define E1000_PCS_LSTS_LINK_OK 1
+#define E1000_PCS_LSTS_SPEED_100 2
+#define E1000_PCS_LSTS_SPEED_1000 4
+#define E1000_PCS_LSTS_DUPLEX_FULL 8
+#define E1000_PCS_LSTS_SYNK_OK 0x10
+#define E1000_PCS_LSTS_AN_COMPLETE 0x10000
+
+/* Device Status */
+#define E1000_STATUS_FD 0x00000001 /* Duplex 0=half 1=full */
+#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
+#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
+#define E1000_STATUS_FUNC_SHIFT 2
+#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
+#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
+#define E1000_STATUS_SPEED_MASK 0x000000C0
+#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
+#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
+#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Compltn by NVM */
+#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master request status */
+#define E1000_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */
+#define E1000_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */
+
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+#define SPEED_2500 2500
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+
+#define ADVERTISE_10_HALF 0x0001
+#define ADVERTISE_10_FULL 0x0002
+#define ADVERTISE_100_HALF 0x0004
+#define ADVERTISE_100_FULL 0x0008
+#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
+#define ADVERTISE_1000_FULL 0x0020
+
+/* 1000/H is not supported, nor spec-compliant. */
+#define E1000_ALL_SPEED_DUPLEX ( \
+ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \
+ ADVERTISE_100_FULL | ADVERTISE_1000_FULL)
+#define E1000_ALL_NOT_GIG ( \
+ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \
+ ADVERTISE_100_FULL)
+#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL)
+#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL)
+#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF)
+
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
+
+/* LED Control */
+#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
+#define E1000_LEDCTL_LED0_MODE_SHIFT 0
+#define E1000_LEDCTL_LED0_IVRT 0x00000040
+#define E1000_LEDCTL_LED0_BLINK 0x00000080
+
+#define E1000_LEDCTL_MODE_LED_ON 0xE
+#define E1000_LEDCTL_MODE_LED_OFF 0xF
+
+/* Transmit Descriptor bit definitions */
+#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */
+#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */
+#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
+#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
+#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
+#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
+#define E1000_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */
+#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
+#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
+#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
+#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
+#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
+#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
+#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
+#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
+#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
+#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */
+
+/* Transmit Control */
+#define E1000_TCTL_EN 0x00000002 /* enable Tx */
+#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
+#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
+#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
+#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
+#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */
+
+/* Transmit Arbitration Count */
+#define E1000_TARC0_ENABLE 0x00000400 /* Enable Tx Queue 0 */
+
+/* SerDes Control */
+#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
+#define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410
+
+/* Receive Checksum Control */
+#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */
+#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
+#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
+#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
+#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
+
+/* Header split receive */
+#define E1000_RFCTL_NFSW_DIS 0x00000040
+#define E1000_RFCTL_NFSR_DIS 0x00000080
+#define E1000_RFCTL_ACK_DIS 0x00001000
+#define E1000_RFCTL_EXTEN 0x00008000
+#define E1000_RFCTL_IPV6_EX_DIS 0x00010000
+#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
+#define E1000_RFCTL_LEF 0x00040000
+
+/* Collision related configuration parameters */
+#define E1000_COLLISION_THRESHOLD 15
+#define E1000_CT_SHIFT 4
+#define E1000_COLLISION_DISTANCE 63
+#define E1000_COLD_SHIFT 12
+
+/* Default values for the transmit IPG register */
+#define DEFAULT_82543_TIPG_IPGT_FIBER 9
+#define DEFAULT_82543_TIPG_IPGT_COPPER 8
+
+#define E1000_TIPG_IPGT_MASK 0x000003FF
+
+#define DEFAULT_82543_TIPG_IPGR1 8
+#define E1000_TIPG_IPGR1_SHIFT 10
+
+#define DEFAULT_82543_TIPG_IPGR2 6
+#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
+#define E1000_TIPG_IPGR2_SHIFT 20
+
+/* Ethertype field values */
+#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
+
+#define ETHERNET_FCS_SIZE 4
+#define MAX_JUMBO_FRAME_SIZE 0x3F00
+
+/* Extended Configuration Control and Size */
+#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
+#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001
+#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008
+#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
+#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16
+
+#define E1000_PHY_CTRL_D0A_LPLU 0x00000002
+#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004
+#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
+#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040
+
+#define E1000_KABGTXD_BGSQLBIAS 0x00050000
+
+/* PBA constants */
+#define E1000_PBA_8K 0x0008 /* 8KB */
+#define E1000_PBA_10K 0x000A /* 10KB */
+#define E1000_PBA_12K 0x000C /* 12KB */
+#define E1000_PBA_14K 0x000E /* 14KB */
+#define E1000_PBA_16K 0x0010 /* 16KB */
+#define E1000_PBA_18K 0x0012
+#define E1000_PBA_20K 0x0014
+#define E1000_PBA_22K 0x0016
+#define E1000_PBA_24K 0x0018
+#define E1000_PBA_26K 0x001A
+#define E1000_PBA_30K 0x001E
+#define E1000_PBA_32K 0x0020
+#define E1000_PBA_34K 0x0022
+#define E1000_PBA_35K 0x0023
+#define E1000_PBA_38K 0x0026
+#define E1000_PBA_40K 0x0028
+#define E1000_PBA_48K 0x0030 /* 48KB */
+#define E1000_PBA_64K 0x0040 /* 64KB */
+
+#define E1000_PBA_RXA_MASK 0xFFFF
+
+#define E1000_PBS_16K E1000_PBA_16K
+
+#define IFS_MAX 80
+#define IFS_MIN 40
+#define IFS_RATIO 4
+#define IFS_STEP 10
+#define MIN_NUM_XMITS 1000
+
+/* SW Semaphore Register */
+#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
+#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
+#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
+
+#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */
+
+/* Interrupt Cause Read */
+#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
+#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */
+#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
+#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
+#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
+#define E1000_ICR_RXO 0x00000040 /* Rx overrun */
+#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
+#define E1000_ICR_VMMB 0x00000100 /* VM MB event */
+#define E1000_ICR_RXCFG 0x00000400 /* Rx /c/ ordered set */
+#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */
+#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */
+#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */
+#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */
+#define E1000_ICR_TXD_LOW 0x00008000
+#define E1000_ICR_MNG 0x00040000 /* Manageability event */
+#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */
+#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */
+/* If this bit asserted, the driver should claim the interrupt */
+#define E1000_ICR_INT_ASSERTED 0x80000000
+#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */
+#define E1000_ICR_FER 0x00400000 /* Fatal Error */
+
+#define E1000_ICR_THS 0x00800000 /* ICR.THS: Thermal Sensor Event*/
+#define E1000_ICR_MDDET 0x10000000 /* Malicious Driver Detect */
+
+
+/* Extended Interrupt Cause Read */
+#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */
+#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */
+#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */
+#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */
+#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */
+#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */
+#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */
+#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */
+#define E1000_EICR_TCP_TIMER 0x40000000 /* TCP Timer */
+#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
+/* TCP Timer */
+#define E1000_TCPTIMER_KS 0x00000100 /* KickStart */
+#define E1000_TCPTIMER_COUNT_ENABLE 0x00000200 /* Count Enable */
+#define E1000_TCPTIMER_COUNT_FINISH 0x00000400 /* Count finish */
+#define E1000_TCPTIMER_LOOP 0x00000800 /* Loop */
+
+/* This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register. Each bit is documented below:
+ * o RXT0 = Receiver Timer Interrupt (ring 0)
+ * o TXDW = Transmit Descriptor Written Back
+ * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ * o RXSEQ = Receive Sequence Error
+ * o LSC = Link Status Change
+ */
+#define IMS_ENABLE_MASK ( \
+ E1000_IMS_RXT0 | \
+ E1000_IMS_TXDW | \
+ E1000_IMS_RXDMT0 | \
+ E1000_IMS_RXSEQ | \
+ E1000_IMS_LSC)
+
+/* Interrupt Mask Set */
+#define E1000_IMS_TXDW E1000_ICR_TXDW /* Tx desc written back */
+#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
+#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */
+#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
+#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
+#define E1000_IMS_RXO E1000_ICR_RXO /* Rx overrun */
+#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
+#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW
+#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */
+#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */
+#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
+#define E1000_IMS_FER E1000_ICR_FER /* Fatal Error */
+
+#define E1000_IMS_THS E1000_ICR_THS /* ICR.TS: Thermal Sensor Event*/
+#define E1000_IMS_MDDET E1000_ICR_MDDET /* Malicious Driver Detect */
+/* Extended Interrupt Mask Set */
+#define E1000_EIMS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
+#define E1000_EIMS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
+#define E1000_EIMS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
+#define E1000_EIMS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
+#define E1000_EIMS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
+#define E1000_EIMS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
+#define E1000_EIMS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
+#define E1000_EIMS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
+#define E1000_EIMS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */
+#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */
+
+/* Interrupt Cause Set */
+#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
+#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
+
+/* Extended Interrupt Cause Set */
+#define E1000_EICS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
+#define E1000_EICS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
+#define E1000_EICS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
+#define E1000_EICS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
+#define E1000_EICS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
+#define E1000_EICS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
+#define E1000_EICS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
+#define E1000_EICS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
+#define E1000_EICS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */
+#define E1000_EICS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */
+
+#define E1000_EITR_ITR_INT_MASK 0x0000FFFF
+/* E1000_EITR_CNT_IGNR is only for 82576 and newer */
+#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */
+#define E1000_EITR_INTERVAL 0x00007FFC
+
+/* Transmit Descriptor Control */
+#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
+#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
+#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
+#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */
+#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
+#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
+/* Enable the counting of descriptors still to be processed. */
+#define E1000_TXDCTL_COUNT_DESC 0x00400000
+
+/* Flow Control Constants */
+#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
+#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
+#define FLOW_CONTROL_TYPE 0x8808
+
+/* 802.1q VLAN Packet Size */
+#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */
+#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
+
+/* Receive Address
+ * Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor.
+ * Technically, we have 16 spots. However, we reserve one of these spots
+ * (RAR[15]) for our directed address used by controllers with
+ * manageability enabled, allowing us room for 15 multicast addresses.
+ */
+#define E1000_RAR_ENTRIES 15
+#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
+#define E1000_RAL_MAC_ADDR_LEN 4
+#define E1000_RAH_MAC_ADDR_LEN 2
+#define E1000_RAH_QUEUE_MASK_82575 0x000C0000
+#define E1000_RAH_POOL_1 0x00040000
+
+/* Error Codes */
+#define E1000_SUCCESS 0
+#define E1000_ERR_NVM 1
+#define E1000_ERR_PHY 2
+#define E1000_ERR_CONFIG 3
+#define E1000_ERR_PARAM 4
+#define E1000_ERR_MAC_INIT 5
+#define E1000_ERR_PHY_TYPE 6
+#define E1000_ERR_RESET 9
+#define E1000_ERR_MASTER_REQUESTS_PENDING 10
+#define E1000_ERR_HOST_INTERFACE_COMMAND 11
+#define E1000_BLK_PHY_RESET 12
+#define E1000_ERR_SWFW_SYNC 13
+#define E1000_NOT_IMPLEMENTED 14
+#define E1000_ERR_MBX 15
+#define E1000_ERR_INVALID_ARGUMENT 16
+#define E1000_ERR_NO_SPACE 17
+#define E1000_ERR_NVM_PBA_SECTION 18
+#define E1000_ERR_I2C 19
+#define E1000_ERR_INVM_VALUE_NOT_FOUND 20
+
+/* Loop limit on how long we wait for auto-negotiation to complete */
+#define FIBER_LINK_UP_LIMIT 50
+#define COPPER_LINK_UP_LIMIT 10
+#define PHY_AUTO_NEG_LIMIT 45
+#define PHY_FORCE_LIMIT 20
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define MASTER_DISABLE_TIMEOUT 800
+/* Number of milliseconds we wait for PHY configuration done after MAC reset */
+#define PHY_CFG_TIMEOUT 100
+/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */
+#define MDIO_OWNERSHIP_TIMEOUT 10
+/* Number of milliseconds for NVM auto read done after MAC reset. */
+#define AUTO_READ_DONE_TIMEOUT 10
+
+/* Flow Control */
+#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */
+#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */
+#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
+
+/* Transmit Configuration Word */
+#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
+#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
+#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
+#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */
+#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
+
+/* Receive Configuration Word */
+#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */
+#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */
+#define E1000_RXCW_C 0x20000000 /* Receive config */
+#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
+
+#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
+#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */
+
+#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */
+#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */
+#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00
+#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02
+#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
+#define E1000_TSYNCRXCTL_TYPE_ALL 0x08
+#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
+#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */
+#define E1000_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */
+
+#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF
+#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00
+#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01
+#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02
+#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03
+#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04
+
+#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00
+#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000
+#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300
+#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800
+#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00
+#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00
+#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00
+#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00
+
+#define E1000_TIMINCA_16NS_SHIFT 24
+#define E1000_TIMINCA_INCPERIOD_SHIFT 24
+#define E1000_TIMINCA_INCVALUE_MASK 0x00FFFFFF
+
+#define E1000_TSICR_TXTS 0x00000002
+#define E1000_TSIM_TXTS 0x00000002
+/* TUPLE Filtering Configuration */
+#define E1000_TTQF_DISABLE_MASK 0xF0008000 /* TTQF Disable Mask */
+#define E1000_TTQF_QUEUE_ENABLE 0x100 /* TTQF Queue Enable Bit */
+#define E1000_TTQF_PROTOCOL_MASK 0xFF /* TTQF Protocol Mask */
+/* TTQF TCP Bit, shift with E1000_TTQF_PROTOCOL SHIFT */
+#define E1000_TTQF_PROTOCOL_TCP 0x0
+/* TTQF UDP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */
+#define E1000_TTQF_PROTOCOL_UDP 0x1
+/* TTQF SCTP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */
+#define E1000_TTQF_PROTOCOL_SCTP 0x2
+#define E1000_TTQF_PROTOCOL_SHIFT 5 /* TTQF Protocol Shift */
+#define E1000_TTQF_QUEUE_SHIFT 16 /* TTQF Queue Shfit */
+#define E1000_TTQF_RX_QUEUE_MASK 0x70000 /* TTQF Queue Mask */
+#define E1000_TTQF_MASK_ENABLE 0x10000000 /* TTQF Mask Enable Bit */
+#define E1000_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */
+#define E1000_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */
+#define E1000_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */
+#define E1000_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */
+
+#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */
+#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */
+#define E1000_MDICNFG_PHY_MASK 0x03E00000
+#define E1000_MDICNFG_PHY_SHIFT 21
+
+#define E1000_MEDIA_PORT_COPPER 1
+#define E1000_MEDIA_PORT_OTHER 2
+#define E1000_M88E1112_AUTO_COPPER_SGMII 0x2
+#define E1000_M88E1112_AUTO_COPPER_BASEX 0x3
+#define E1000_M88E1112_STATUS_LINK 0x0004 /* Interface Link Bit */
+#define E1000_M88E1112_MAC_CTRL_1 0x10
+#define E1000_M88E1112_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */
+#define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7
+#define E1000_M88E1112_PAGE_ADDR 0x16
+#define E1000_M88E1112_STATUS 0x01
+
+#define E1000_THSTAT_LOW_EVENT 0x20000000 /* Low thermal threshold */
+#define E1000_THSTAT_MID_EVENT 0x00200000 /* Mid thermal threshold */
+#define E1000_THSTAT_HIGH_EVENT 0x00002000 /* High thermal threshold */
+#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */
+#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Spd Throttle Event */
+
+/* I350 EEE defines */
+#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */
+#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */
+#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */
+#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */
+#define E1000_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */
+/* EEE status */
+#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */
+#define E1000_EEER_RX_LPI_STATUS 0x40000000 /* Rx in LPI state */
+#define E1000_EEER_TX_LPI_STATUS 0x80000000 /* Tx in LPI state */
+#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */
+#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */
+#define E1000_M88E1543_EEE_CTRL_1 0x0
+#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */
+#define E1000_EEE_ADV_DEV_I354 7
+#define E1000_EEE_ADV_ADDR_I354 60
+#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */
+#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */
+#define E1000_PCS_STATUS_DEV_I354 3
+#define E1000_PCS_STATUS_ADDR_I354 1
+#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400
+#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800
+#define E1000_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */
+#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */
+#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */
+/* PCI Express Control */
+#define E1000_GCR_RXD_NO_SNOOP 0x00000001
+#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
+#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004
+#define E1000_GCR_TXD_NO_SNOOP 0x00000008
+#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010
+#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020
+#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000
+#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000
+#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000
+#define E1000_GCR_CAP_VER2 0x00040000
+
+#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \
+ E1000_GCR_RXDSCW_NO_SNOOP | \
+ E1000_GCR_RXDSCR_NO_SNOOP | \
+ E1000_GCR_TXD_NO_SNOOP | \
+ E1000_GCR_TXDSCW_NO_SNOOP | \
+ E1000_GCR_TXDSCR_NO_SNOOP)
+
+#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */
+
+/* mPHY address control and data registers */
+#define E1000_MPHY_ADDR_CTL 0x0024 /* Address Control Reg */
+#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000
+#define E1000_MPHY_DATA 0x0E10 /* Data Register */
+
+/* AFE CSR Offset for PCS CLK */
+#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004
+/* Override for near end digital loopback. */
+#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10
+
+/* PHY Control Register */
+#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
+#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
+#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
+#define MII_CR_POWER_DOWN 0x0800 /* Power down */
+#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
+#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
+#define MII_CR_SPEED_1000 0x0040
+#define MII_CR_SPEED_100 0x2000
+#define MII_CR_SPEED_10 0x0000
+
+/* PHY Status Register */
+#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
+#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
+#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
+#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
+#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
+#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
+#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
+#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
+#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
+#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
+#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
+#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
+#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
+#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
+
+/* Autoneg Advertisement Register */
+#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */
+#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
+#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
+#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
+#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
+#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
+#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
+#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
+#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
+#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
+
+/* Link Partner Ability Register (Base Page) */
+#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */
+#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP 10T Half Dplx Capable */
+#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP 10T Full Dplx Capable */
+#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP 100TX Half Dplx Capable */
+#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP 100TX Full Dplx Capable */
+#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */
+#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
+#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asym Pause Direction bit */
+#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP detected Remote Fault */
+#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP rx'd link code word */
+#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */
+
+/* Autoneg Expansion Register */
+#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */
+#define NWAY_ER_PAGE_RXD 0x0002 /* LP 10T Half Dplx Capable */
+#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP 10T Full Dplx Capable */
+#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP 100TX Half Dplx Capable */
+#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP 100TX Full Dplx Capable */
+
+/* 1000BASE-T Control Register */
+#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */
+#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
+#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
+/* 1=Repeater/switch device port 0=DTE device */
+#define CR_1000T_REPEATER_DTE 0x0400
+/* 1=Configure PHY as Master 0=Configure PHY as Slave */
+#define CR_1000T_MS_VALUE 0x0800
+/* 1=Master/Slave manual config value 0=Automatic Master/Slave config */
+#define CR_1000T_MS_ENABLE 0x1000
+#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
+#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
+#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
+#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
+#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
+
+/* 1000BASE-T Status Register */
+#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle err since last rd */
+#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asym pause direction bit */
+#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
+#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
+#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
+#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local Tx Master, 0=Slave */
+#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
+
+#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5
+
+/* PHY 1000 MII Register/Bit Definitions */
+/* PHY Registers defined by IEEE */
+#define PHY_CONTROL 0x00 /* Control Register */
+#define PHY_STATUS 0x01 /* Status Register */
+#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
+#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
+#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
+#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
+#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */
+#define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */
+#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
+#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
+#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
+
+#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */
+
+/* NVM Control */
+#define E1000_EECD_SK 0x00000001 /* NVM Clock */
+#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */
+#define E1000_EECD_DI 0x00000004 /* NVM Data In */
+#define E1000_EECD_DO 0x00000008 /* NVM Data Out */
+#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */
+#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */
+#define E1000_EECD_PRES 0x00000100 /* NVM Present */
+#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */
+#define E1000_EECD_BLOCKED 0x00008000 /* Bit banging access blocked flag */
+#define E1000_EECD_ABORT 0x00010000 /* NVM operation aborted flag */
+#define E1000_EECD_TIMEOUT 0x00020000 /* NVM read operation timeout flag */
+#define E1000_EECD_ERROR_CLR 0x00040000 /* NVM error status clear bit */
+/* NVM Addressing bits based on type 0=small, 1=large */
+#define E1000_EECD_ADDR_BITS 0x00000400
+#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */
+#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
+#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
+#define E1000_EECD_SIZE_EX_SHIFT 11
+#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */
+#define E1000_EECD_AUPDEN 0x00100000 /* Ena Auto FLASH update */
+#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
+#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES)
+#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */
+#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done */
+#define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */
+#define E1000_EECD_SEC1VAL_I210 0x02000000 /* Sector One Valid */
+#define E1000_FLUDONE_ATTEMPTS 20000
+#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */
+#define E1000_I210_FIFO_SEL_RX 0x00
+#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i))
+#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0)
+#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06
+#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01
+
+#define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */
+/* Secure FLASH mode requires removing MSb */
+#define E1000_I210_FW_PTR_MASK 0x7FFF
+/* Firmware code revision field word offset*/
+#define E1000_I210_FW_VER_OFFSET 328
+
+#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write regs */
+#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
+#define E1000_NVM_RW_REG_START 1 /* Start operation */
+#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
+#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
+#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */
+#define E1000_FLASH_UPDATES 2000
+
+/* NVM Word Offsets */
+#define NVM_COMPAT 0x0003
+#define NVM_ID_LED_SETTINGS 0x0004
+#define NVM_VERSION 0x0005
+#define E1000_I210_NVM_FW_MODULE_PTR 0x0010
+#define E1000_I350_NVM_FW_MODULE_PTR 0x0051
+#define NVM_FUTURE_INIT_WORD1 0x0019
+#define NVM_ETRACK_WORD 0x0042
+#define NVM_ETRACK_HIWORD 0x0043
+#define NVM_COMB_VER_OFF 0x0083
+#define NVM_COMB_VER_PTR 0x003d
+
+/* NVM version defines */
+#define NVM_MAJOR_MASK 0xF000
+#define NVM_MINOR_MASK 0x0FF0
+#define NVM_IMAGE_ID_MASK 0x000F
+#define NVM_COMB_VER_MASK 0x00FF
+#define NVM_MAJOR_SHIFT 12
+#define NVM_MINOR_SHIFT 4
+#define NVM_COMB_VER_SHFT 8
+#define NVM_VER_INVALID 0xFFFF
+#define NVM_ETRACK_SHIFT 16
+#define NVM_ETRACK_VALID 0x8000
+#define NVM_NEW_DEC_MASK 0x0F00
+#define NVM_HEX_CONV 16
+#define NVM_HEX_TENS 10
+
+/* FW version defines */
+/* Offset of "Loader patch ptr" in Firmware Header */
+#define E1000_I350_NVM_FW_LOADER_PATCH_PTR_OFFSET 0x01
+/* Patch generation hour & minutes */
+#define E1000_I350_NVM_FW_VER_WORD1_OFFSET 0x04
+/* Patch generation month & day */
+#define E1000_I350_NVM_FW_VER_WORD2_OFFSET 0x05
+/* Patch generation year */
+#define E1000_I350_NVM_FW_VER_WORD3_OFFSET 0x06
+/* Patch major & minor numbers */
+#define E1000_I350_NVM_FW_VER_WORD4_OFFSET 0x07
+
+#define NVM_MAC_ADDR 0x0000
+#define NVM_SUB_DEV_ID 0x000B
+#define NVM_SUB_VEN_ID 0x000C
+#define NVM_DEV_ID 0x000D
+#define NVM_VEN_ID 0x000E
+#define NVM_INIT_CTRL_2 0x000F
+#define NVM_INIT_CTRL_4 0x0013
+#define NVM_LED_1_CFG 0x001C
+#define NVM_LED_0_2_CFG 0x001F
+
+#define NVM_COMPAT_VALID_CSUM 0x0001
+#define NVM_FUTURE_INIT_WORD1_VALID_CSUM 0x0040
+
+#define NVM_ETS_CFG 0x003E
+#define NVM_ETS_LTHRES_DELTA_MASK 0x07C0
+#define NVM_ETS_LTHRES_DELTA_SHIFT 6
+#define NVM_ETS_TYPE_MASK 0x0038
+#define NVM_ETS_TYPE_SHIFT 3
+#define NVM_ETS_TYPE_EMC 0x000
+#define NVM_ETS_NUM_SENSORS_MASK 0x0007
+#define NVM_ETS_DATA_LOC_MASK 0x3C00
+#define NVM_ETS_DATA_LOC_SHIFT 10
+#define NVM_ETS_DATA_INDEX_MASK 0x0300
+#define NVM_ETS_DATA_INDEX_SHIFT 8
+#define NVM_ETS_DATA_HTHRESH_MASK 0x00FF
+#define NVM_INIT_CONTROL2_REG 0x000F
+#define NVM_INIT_CONTROL3_PORT_B 0x0014
+#define NVM_INIT_3GIO_3 0x001A
+#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020
+#define NVM_INIT_CONTROL3_PORT_A 0x0024
+#define NVM_CFG 0x0012
+#define NVM_ALT_MAC_ADDR_PTR 0x0037
+#define NVM_CHECKSUM_REG 0x003F
+#define NVM_COMPATIBILITY_REG_3 0x0003
+#define NVM_COMPATIBILITY_BIT_MASK 0x8000
+
+#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
+#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
+#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */
+#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */
+
+#define NVM_82580_LAN_FUNC_OFFSET(a) ((a) ? (0x40 + (0x40 * (a))) : 0)
+
+/* Mask bits for fields in Word 0x24 of the NVM */
+#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */
+#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed extrnl */
+/* Offset of Link Mode bits for 82575/82576 */
+#define NVM_WORD24_LNK_MODE_OFFSET 8
+/* Offset of Link Mode bits for 82580 up */
+#define NVM_WORD24_82580_LNK_MODE_OFFSET 4
+
+
+/* Mask bits for fields in Word 0x0f of the NVM */
+#define NVM_WORD0F_PAUSE_MASK 0x3000
+#define NVM_WORD0F_PAUSE 0x1000
+#define NVM_WORD0F_ASM_DIR 0x2000
+
+/* Mask bits for fields in Word 0x1a of the NVM */
+#define NVM_WORD1A_ASPM_MASK 0x000C
+
+/* Mask bits for fields in Word 0x03 of the EEPROM */
+#define NVM_COMPAT_LOM 0x0800
+
+/* length of string needed to store PBA number */
+#define E1000_PBANUM_LENGTH 11
+
+/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
+#define NVM_SUM 0xBABA
+
+/* PBA (printed board assembly) number words */
+#define NVM_PBA_OFFSET_0 8
+#define NVM_PBA_OFFSET_1 9
+#define NVM_PBA_PTR_GUARD 0xFAFA
+#define NVM_RESERVED_WORD 0xFFFF
+#define NVM_WORD_SIZE_BASE_SHIFT 6
+
+/* NVM Commands - SPI */
+#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
+#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */
+#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */
+#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
+#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */
+#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */
+
+/* SPI NVM Status Register */
+#define NVM_STATUS_RDY_SPI 0x01
+
+/* Word definitions for ID LED Settings */
+#define ID_LED_RESERVED_0000 0x0000
+#define ID_LED_RESERVED_FFFF 0xFFFF
+#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
+ (ID_LED_OFF1_OFF2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_DEF1_DEF2))
+#define ID_LED_DEF1_DEF2 0x1
+#define ID_LED_DEF1_ON2 0x2
+#define ID_LED_DEF1_OFF2 0x3
+#define ID_LED_ON1_DEF2 0x4
+#define ID_LED_ON1_ON2 0x5
+#define ID_LED_ON1_OFF2 0x6
+#define ID_LED_OFF1_DEF2 0x7
+#define ID_LED_OFF1_ON2 0x8
+#define ID_LED_OFF1_OFF2 0x9
+
+#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF
+#define IGP_ACTIVITY_LED_ENABLE 0x0300
+#define IGP_LED3_MODE 0x07000000
+
+/* PCI/PCI-X/PCI-EX Config space */
+#define PCI_HEADER_TYPE_REGISTER 0x0E
+#define PCIE_LINK_STATUS 0x12
+#define PCIE_DEVICE_CONTROL2 0x28
+
+#define PCI_HEADER_TYPE_MULTIFUNC 0x80
+#define PCIE_LINK_WIDTH_MASK 0x3F0
+#define PCIE_LINK_WIDTH_SHIFT 4
+#define PCIE_LINK_SPEED_MASK 0x0F
+#define PCIE_LINK_SPEED_2500 0x01
+#define PCIE_LINK_SPEED_5000 0x02
+#define PCIE_DEVICE_CONTROL2_16ms 0x0005
+
+#ifndef ETH_ADDR_LEN
+#define ETH_ADDR_LEN 6
+#endif
+
+#define PHY_REVISION_MASK 0xFFFFFFF0
+#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
+#define MAX_PHY_MULTI_PAGE_REG 0xF
+
+/* Bit definitions for valid PHY IDs.
+ * I = Integrated
+ * E = External
+ */
+#define M88E1000_E_PHY_ID 0x01410C50
+#define M88E1000_I_PHY_ID 0x01410C30
+#define M88E1011_I_PHY_ID 0x01410C20
+#define IGP01E1000_I_PHY_ID 0x02A80380
+#define M88E1111_I_PHY_ID 0x01410CC0
+#define M88E1543_E_PHY_ID 0x01410EA0
+#define M88E1112_E_PHY_ID 0x01410C90
+#define I347AT4_E_PHY_ID 0x01410DC0
+#define M88E1340M_E_PHY_ID 0x01410DF0
+#define GG82563_E_PHY_ID 0x01410CA0
+#define IGP03E1000_E_PHY_ID 0x02A80390
+#define IFE_E_PHY_ID 0x02A80330
+#define IFE_PLUS_E_PHY_ID 0x02A80320
+#define IFE_C_E_PHY_ID 0x02A80310
+#define I82580_I_PHY_ID 0x015403A0
+#define I350_I_PHY_ID 0x015403B0
+#define I210_I_PHY_ID 0x01410C00
+#define IGP04E1000_E_PHY_ID 0x02A80391
+#define M88_VENDOR 0x0141
+
+/* M88E1000 Specific Registers */
+#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Reg */
+#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Reg */
+#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Cntrl */
+#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */
+
+#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for pg number setting */
+#define M88E1000_PHY_GEN_CONTROL 0x1E /* meaning depends on reg 29 */
+
+/* M88E1000 PHY Specific Control Register */
+#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reverse enabled */
+/* MDI Crossover Mode bits 6:5 Manual MDI configuration */
+#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000
+#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
+/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
+#define M88E1000_PSCR_AUTO_X_1000T 0x0040
+/* Auto crossover enabled all speeds */
+#define M88E1000_PSCR_AUTO_X_MODE 0x0060
+#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Tx */
+
+/* M88E1000 PHY Specific Status Register */
+#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
+#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
+#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
+/* 0 = <50M
+ * 1 = 50-80M
+ * 2 = 80-110M
+ * 3 = 110-140M
+ * 4 = >140M
+ */
+#define M88E1000_PSSR_CABLE_LENGTH 0x0380
+#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */
+#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
+#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
+#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
+
+#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
+
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master
+ */
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the slave
+ */
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
+#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
+
+/* Intel I347AT4 Registers */
+#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */
+#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */
+#define I347AT4_PAGE_SELECT 0x16
+
+/* I347AT4 Extended PHY Specific Control Register */
+
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master
+ */
+#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800
+#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000
+#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000
+#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000
+#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000
+#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000
+#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000
+#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000
+#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000
+#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000
+
+/* I347AT4 PHY Cable Diagnostics Control */
+#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */
+
+/* M88E1112 only registers */
+#define M88E1112_VCT_DSP_DISTANCE 0x001A
+
+/* M88EC018 Rev 2 specific DownShift settings */
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
+
+/* Bits...
+ * 15-5: page
+ * 4-0: register offset
+ */
+#define GG82563_PAGE_SHIFT 5
+#define GG82563_REG(page, reg) \
+ (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
+#define GG82563_MIN_ALT_REG 30
+
+/* GG82563 Specific Registers */
+#define GG82563_PHY_SPEC_CTRL GG82563_REG(0, 16) /* PHY Spec Cntrl */
+#define GG82563_PHY_PAGE_SELECT GG82563_REG(0, 22) /* Page Select */
+#define GG82563_PHY_SPEC_CTRL_2 GG82563_REG(0, 26) /* PHY Spec Cntrl2 */
+#define GG82563_PHY_PAGE_SELECT_ALT GG82563_REG(0, 29) /* Alt Page Select */
+
+/* MAC Specific Control Register */
+#define GG82563_PHY_MAC_SPEC_CTRL GG82563_REG(2, 21)
+
+#define GG82563_PHY_DSP_DISTANCE GG82563_REG(5, 26) /* DSP Distance */
+
+/* Page 193 - Port Control Registers */
+/* Kumeran Mode Control */
+#define GG82563_PHY_KMRN_MODE_CTRL GG82563_REG(193, 16)
+#define GG82563_PHY_PWR_MGMT_CTRL GG82563_REG(193, 20) /* Pwr Mgt Ctrl */
+
+/* Page 194 - KMRN Registers */
+#define GG82563_PHY_INBAND_CTRL GG82563_REG(194, 18) /* Inband Ctrl */
+
+/* MDI Control */
+#define E1000_MDIC_REG_MASK 0x001F0000
+#define E1000_MDIC_REG_SHIFT 16
+#define E1000_MDIC_PHY_MASK 0x03E00000
+#define E1000_MDIC_PHY_SHIFT 21
+#define E1000_MDIC_OP_WRITE 0x04000000
+#define E1000_MDIC_OP_READ 0x08000000
+#define E1000_MDIC_READY 0x10000000
+#define E1000_MDIC_ERROR 0x40000000
+#define E1000_MDIC_DEST 0x80000000
+
+/* SerDes Control */
+#define E1000_GEN_CTL_READY 0x80000000
+#define E1000_GEN_CTL_ADDRESS_SHIFT 8
+#define E1000_GEN_POLL_TIMEOUT 640
+
+/* LinkSec register fields */
+#define E1000_LSECTXCAP_SUM_MASK 0x00FF0000
+#define E1000_LSECTXCAP_SUM_SHIFT 16
+#define E1000_LSECRXCAP_SUM_MASK 0x00FF0000
+#define E1000_LSECRXCAP_SUM_SHIFT 16
+
+#define E1000_LSECTXCTRL_EN_MASK 0x00000003
+#define E1000_LSECTXCTRL_DISABLE 0x0
+#define E1000_LSECTXCTRL_AUTH 0x1
+#define E1000_LSECTXCTRL_AUTH_ENCRYPT 0x2
+#define E1000_LSECTXCTRL_AISCI 0x00000020
+#define E1000_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00
+#define E1000_LSECTXCTRL_RSV_MASK 0x000000D8
+
+#define E1000_LSECRXCTRL_EN_MASK 0x0000000C
+#define E1000_LSECRXCTRL_EN_SHIFT 2
+#define E1000_LSECRXCTRL_DISABLE 0x0
+#define E1000_LSECRXCTRL_CHECK 0x1
+#define E1000_LSECRXCTRL_STRICT 0x2
+#define E1000_LSECRXCTRL_DROP 0x3
+#define E1000_LSECRXCTRL_PLSH 0x00000040
+#define E1000_LSECRXCTRL_RP 0x00000080
+#define E1000_LSECRXCTRL_RSV_MASK 0xFFFFFF33
+
+/* Tx Rate-Scheduler Config fields */
+#define E1000_RTTBCNRC_RS_ENA 0x80000000
+#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF
+#define E1000_RTTBCNRC_RF_INT_SHIFT 14
+#define E1000_RTTBCNRC_RF_INT_MASK \
+ (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT)
+
+/* DMA Coalescing register fields */
+/* DMA Coalescing Watchdog Timer */
+#define E1000_DMACR_DMACWT_MASK 0x00003FFF
+/* DMA Coalescing Rx Threshold */
+#define E1000_DMACR_DMACTHR_MASK 0x00FF0000
+#define E1000_DMACR_DMACTHR_SHIFT 16
+/* Lx when no PCIe transactions */
+#define E1000_DMACR_DMAC_LX_MASK 0x30000000
+#define E1000_DMACR_DMAC_LX_SHIFT 28
+#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
+/* DMA Coalescing BMC-to-OS Watchdog Enable */
+#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000
+
+/* DMA Coalescing Transmit Threshold */
+#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF
+
+#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */
+
+/* Rx Traffic Rate Threshold */
+#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF
+/* Rx packet rate in current window */
+#define E1000_DMCRTRH_LRPRCW 0x80000000
+
+/* DMA Coal Rx Traffic Current Count */
+#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF
+
+/* Flow ctrl Rx Threshold High val */
+#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0
+#define E1000_FCRTC_RTH_COAL_SHIFT 4
+/* Lx power decision based on DMA coal */
+#define E1000_PCIEMISC_LX_DECISION 0x00000080
+
+#define E1000_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */
+#define E1000_RXPBS_SIZE_I210_MASK 0x0000003F /* Rx packet buffer size */
+#define E1000_TXPB0S_SIZE_I210_MASK 0x0000003F /* Tx packet buffer 0 size */
+
+/* Proxy Filter Control */
+#define E1000_PROXYFC_D0 0x00000001 /* Enable offload in D0 */
+#define E1000_PROXYFC_EX 0x00000004 /* Directed exact proxy */
+#define E1000_PROXYFC_MC 0x00000008 /* Directed MC Proxy */
+#define E1000_PROXYFC_BC 0x00000010 /* Broadcast Proxy Enable */
+#define E1000_PROXYFC_ARP_DIRECTED 0x00000020 /* Directed ARP Proxy Ena */
+#define E1000_PROXYFC_IPV4 0x00000040 /* Directed IPv4 Enable */
+#define E1000_PROXYFC_IPV6 0x00000080 /* Directed IPv6 Enable */
+#define E1000_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */
+#define E1000_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Ena */
+/* Proxy Status */
+#define E1000_PROXYS_CLEAR 0xFFFFFFFF /* Clear */
+
+/* Firmware Status */
+#define E1000_FWSTS_FWRI 0x80000000 /* FW Reset Indication */
+/* VF Control */
+#define E1000_VTCTRL_RST 0x04000000 /* Reset VF */
+
+#define E1000_STATUS_LAN_ID_MASK 0x00000000C /* Mask for Lan ID field */
+/* Lan ID bit field offset in status register */
+#define E1000_STATUS_LAN_ID_OFFSET 2
+#define E1000_VFTA_ENTRIES 128
+#ifndef E1000_UNUSEDARG
+#define E1000_UNUSEDARG
+#endif /* E1000_UNUSEDARG */
+#endif /* _E1000_DEFINES_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_hw.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_hw.h
new file mode 100755
index 00000000..347cef71
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_hw.h
@@ -0,0 +1,793 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_HW_H_
+#define _E1000_HW_H_
+
+#include "e1000_osdep.h"
+#include "e1000_regs.h"
+#include "e1000_defines.h"
+
+struct e1000_hw;
+
+#define E1000_DEV_ID_82576 0x10C9
+#define E1000_DEV_ID_82576_FIBER 0x10E6
+#define E1000_DEV_ID_82576_SERDES 0x10E7
+#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
+#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526
+#define E1000_DEV_ID_82576_NS 0x150A
+#define E1000_DEV_ID_82576_NS_SERDES 0x1518
+#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D
+#define E1000_DEV_ID_82575EB_COPPER 0x10A7
+#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
+#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
+#define E1000_DEV_ID_82580_COPPER 0x150E
+#define E1000_DEV_ID_82580_FIBER 0x150F
+#define E1000_DEV_ID_82580_SERDES 0x1510
+#define E1000_DEV_ID_82580_SGMII 0x1511
+#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
+#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527
+#define E1000_DEV_ID_I350_COPPER 0x1521
+#define E1000_DEV_ID_I350_FIBER 0x1522
+#define E1000_DEV_ID_I350_SERDES 0x1523
+#define E1000_DEV_ID_I350_SGMII 0x1524
+#define E1000_DEV_ID_I350_DA4 0x1546
+#define E1000_DEV_ID_I210_COPPER 0x1533
+#define E1000_DEV_ID_I210_COPPER_OEM1 0x1534
+#define E1000_DEV_ID_I210_COPPER_IT 0x1535
+#define E1000_DEV_ID_I210_FIBER 0x1536
+#define E1000_DEV_ID_I210_SERDES 0x1537
+#define E1000_DEV_ID_I210_SGMII 0x1538
+#define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B
+#define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C
+#define E1000_DEV_ID_I211_COPPER 0x1539
+#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40
+#define E1000_DEV_ID_I354_SGMII 0x1F41
+#define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45
+#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438
+#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A
+#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C
+#define E1000_DEV_ID_DH89XXCC_SFP 0x0440
+
+#define E1000_REVISION_0 0
+#define E1000_REVISION_1 1
+#define E1000_REVISION_2 2
+#define E1000_REVISION_3 3
+#define E1000_REVISION_4 4
+
+#define E1000_FUNC_0 0
+#define E1000_FUNC_1 1
+#define E1000_FUNC_2 2
+#define E1000_FUNC_3 3
+
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9
+
+enum e1000_mac_type {
+ e1000_undefined = 0,
+ e1000_82575,
+ e1000_82576,
+ e1000_82580,
+ e1000_i350,
+ e1000_i354,
+ e1000_i210,
+ e1000_i211,
+ e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
+};
+
+enum e1000_media_type {
+ e1000_media_type_unknown = 0,
+ e1000_media_type_copper = 1,
+ e1000_media_type_fiber = 2,
+ e1000_media_type_internal_serdes = 3,
+ e1000_num_media_types
+};
+
+enum e1000_nvm_type {
+ e1000_nvm_unknown = 0,
+ e1000_nvm_none,
+ e1000_nvm_eeprom_spi,
+ e1000_nvm_flash_hw,
+ e1000_nvm_invm,
+ e1000_nvm_flash_sw
+};
+
+enum e1000_nvm_override {
+ e1000_nvm_override_none = 0,
+ e1000_nvm_override_spi_small,
+ e1000_nvm_override_spi_large,
+};
+
+enum e1000_phy_type {
+ e1000_phy_unknown = 0,
+ e1000_phy_none,
+ e1000_phy_m88,
+ e1000_phy_igp,
+ e1000_phy_igp_2,
+ e1000_phy_gg82563,
+ e1000_phy_igp_3,
+ e1000_phy_ife,
+ e1000_phy_82580,
+ e1000_phy_vf,
+ e1000_phy_i210,
+};
+
+enum e1000_bus_type {
+ e1000_bus_type_unknown = 0,
+ e1000_bus_type_pci,
+ e1000_bus_type_pcix,
+ e1000_bus_type_pci_express,
+ e1000_bus_type_reserved
+};
+
+enum e1000_bus_speed {
+ e1000_bus_speed_unknown = 0,
+ e1000_bus_speed_33,
+ e1000_bus_speed_66,
+ e1000_bus_speed_100,
+ e1000_bus_speed_120,
+ e1000_bus_speed_133,
+ e1000_bus_speed_2500,
+ e1000_bus_speed_5000,
+ e1000_bus_speed_reserved
+};
+
+enum e1000_bus_width {
+ e1000_bus_width_unknown = 0,
+ e1000_bus_width_pcie_x1,
+ e1000_bus_width_pcie_x2,
+ e1000_bus_width_pcie_x4 = 4,
+ e1000_bus_width_pcie_x8 = 8,
+ e1000_bus_width_32,
+ e1000_bus_width_64,
+ e1000_bus_width_reserved
+};
+
+enum e1000_1000t_rx_status {
+ e1000_1000t_rx_status_not_ok = 0,
+ e1000_1000t_rx_status_ok,
+ e1000_1000t_rx_status_undefined = 0xFF
+};
+
+enum e1000_rev_polarity {
+ e1000_rev_polarity_normal = 0,
+ e1000_rev_polarity_reversed,
+ e1000_rev_polarity_undefined = 0xFF
+};
+
+enum e1000_fc_mode {
+ e1000_fc_none = 0,
+ e1000_fc_rx_pause,
+ e1000_fc_tx_pause,
+ e1000_fc_full,
+ e1000_fc_default = 0xFF
+};
+
+enum e1000_ms_type {
+ e1000_ms_hw_default = 0,
+ e1000_ms_force_master,
+ e1000_ms_force_slave,
+ e1000_ms_auto
+};
+
+enum e1000_smart_speed {
+ e1000_smart_speed_default = 0,
+ e1000_smart_speed_on,
+ e1000_smart_speed_off
+};
+
+enum e1000_serdes_link_state {
+ e1000_serdes_link_down = 0,
+ e1000_serdes_link_autoneg_progress,
+ e1000_serdes_link_autoneg_complete,
+ e1000_serdes_link_forced_up
+};
+
+#ifndef __le16
+#define __le16 u16
+#endif
+#ifndef __le32
+#define __le32 u32
+#endif
+#ifndef __le64
+#define __le64 u64
+#endif
+/* Receive Descriptor */
+struct e1000_rx_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ __le16 length; /* Length of data DMAed into data buffer */
+ __le16 csum; /* Packet checksum */
+ u8 status; /* Descriptor status */
+ u8 errors; /* Descriptor Errors */
+ __le16 special;
+};
+
+/* Receive Descriptor - Extended */
+union e1000_rx_desc_extended {
+ struct {
+ __le64 buffer_addr;
+ __le64 reserved;
+ } read;
+ struct {
+ struct {
+ __le32 mrq; /* Multiple Rx Queues */
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length;
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+#define MAX_PS_BUFFERS 4
+
+/* Number of packet split data buffers (not including the header buffer) */
+#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
+
+/* Receive Descriptor - Packet Split */
+union e1000_rx_desc_packet_split {
+ struct {
+ /* one buffer for protocol header(s), three data buffers */
+ __le64 buffer_addr[MAX_PS_BUFFERS];
+ } read;
+ struct {
+ struct {
+ __le32 mrq; /* Multiple Rx Queues */
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length0; /* length of buffer 0 */
+ __le16 vlan; /* VLAN tag */
+ } middle;
+ struct {
+ __le16 header_status;
+ /* length of buffers 1-3 */
+ __le16 length[PS_PAGE_BUFFERS];
+ } upper;
+ __le64 reserved;
+ } wb; /* writeback */
+};
+
+/* Transmit Descriptor */
+struct e1000_tx_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ union {
+ __le32 data;
+ struct {
+ __le16 length; /* Data buffer length */
+ u8 cso; /* Checksum offset */
+ u8 cmd; /* Descriptor control */
+ } flags;
+ } lower;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 css; /* Checksum start */
+ __le16 special;
+ } fields;
+ } upper;
+};
+
+/* Offload Context Descriptor */
+struct e1000_context_desc {
+ union {
+ __le32 ip_config;
+ struct {
+ u8 ipcss; /* IP checksum start */
+ u8 ipcso; /* IP checksum offset */
+ __le16 ipcse; /* IP checksum end */
+ } ip_fields;
+ } lower_setup;
+ union {
+ __le32 tcp_config;
+ struct {
+ u8 tucss; /* TCP checksum start */
+ u8 tucso; /* TCP checksum offset */
+ __le16 tucse; /* TCP checksum end */
+ } tcp_fields;
+ } upper_setup;
+ __le32 cmd_and_length;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 hdr_len; /* Header length */
+ __le16 mss; /* Maximum segment size */
+ } fields;
+ } tcp_seg_setup;
+};
+
+/* Offload data descriptor */
+struct e1000_data_desc {
+ __le64 buffer_addr; /* Address of the descriptor's buffer address */
+ union {
+ __le32 data;
+ struct {
+ __le16 length; /* Data buffer length */
+ u8 typ_len_ext;
+ u8 cmd;
+ } flags;
+ } lower;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 popts; /* Packet Options */
+ __le16 special;
+ } fields;
+ } upper;
+};
+
+/* Statistics counters collected by the MAC */
+struct e1000_hw_stats {
+ u64 crcerrs;
+ u64 algnerrc;
+ u64 symerrs;
+ u64 rxerrc;
+ u64 mpc;
+ u64 scc;
+ u64 ecol;
+ u64 mcc;
+ u64 latecol;
+ u64 colc;
+ u64 dc;
+ u64 tncrs;
+ u64 sec;
+ u64 cexterr;
+ u64 rlec;
+ u64 xonrxc;
+ u64 xontxc;
+ u64 xoffrxc;
+ u64 xofftxc;
+ u64 fcruc;
+ u64 prc64;
+ u64 prc127;
+ u64 prc255;
+ u64 prc511;
+ u64 prc1023;
+ u64 prc1522;
+ u64 gprc;
+ u64 bprc;
+ u64 mprc;
+ u64 gptc;
+ u64 gorc;
+ u64 gotc;
+ u64 rnbc;
+ u64 ruc;
+ u64 rfc;
+ u64 roc;
+ u64 rjc;
+ u64 mgprc;
+ u64 mgpdc;
+ u64 mgptc;
+ u64 tor;
+ u64 tot;
+ u64 tpr;
+ u64 tpt;
+ u64 ptc64;
+ u64 ptc127;
+ u64 ptc255;
+ u64 ptc511;
+ u64 ptc1023;
+ u64 ptc1522;
+ u64 mptc;
+ u64 bptc;
+ u64 tsctc;
+ u64 tsctfc;
+ u64 iac;
+ u64 icrxptc;
+ u64 icrxatc;
+ u64 ictxptc;
+ u64 ictxatc;
+ u64 ictxqec;
+ u64 ictxqmtc;
+ u64 icrxdmtc;
+ u64 icrxoc;
+ u64 cbtmpc;
+ u64 htdpmc;
+ u64 cbrdpc;
+ u64 cbrmpc;
+ u64 rpthc;
+ u64 hgptc;
+ u64 htcbdpc;
+ u64 hgorc;
+ u64 hgotc;
+ u64 lenerrs;
+ u64 scvpc;
+ u64 hrmpc;
+ u64 doosync;
+ u64 o2bgptc;
+ u64 o2bspc;
+ u64 b2ospc;
+ u64 b2ogprc;
+};
+
+
+struct e1000_phy_stats {
+ u32 idle_errors;
+ u32 receive_errors;
+};
+
+struct e1000_host_mng_dhcp_cookie {
+ u32 signature;
+ u8 status;
+ u8 reserved0;
+ u16 vlan_id;
+ u32 reserved1;
+ u16 reserved2;
+ u8 reserved3;
+ u8 checksum;
+};
+
+/* Host Interface "Rev 1" */
+struct e1000_host_command_header {
+ u8 command_id;
+ u8 command_length;
+ u8 command_options;
+ u8 checksum;
+};
+
+#define E1000_HI_MAX_DATA_LENGTH 252
+struct e1000_host_command_info {
+ struct e1000_host_command_header command_header;
+ u8 command_data[E1000_HI_MAX_DATA_LENGTH];
+};
+
+/* Host Interface "Rev 2" */
+struct e1000_host_mng_command_header {
+ u8 command_id;
+ u8 checksum;
+ u16 reserved1;
+ u16 reserved2;
+ u16 command_length;
+};
+
+#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
+struct e1000_host_mng_command_info {
+ struct e1000_host_mng_command_header command_header;
+ u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
+};
+
+#include "e1000_mac.h"
+#include "e1000_phy.h"
+#include "e1000_nvm.h"
+#include "e1000_manage.h"
+#include "e1000_mbx.h"
+
+/* Function pointers for the MAC. */
+struct e1000_mac_operations {
+ s32 (*init_params)(struct e1000_hw *);
+ s32 (*id_led_init)(struct e1000_hw *);
+ s32 (*blink_led)(struct e1000_hw *);
+ bool (*check_mng_mode)(struct e1000_hw *);
+ s32 (*check_for_link)(struct e1000_hw *);
+ s32 (*cleanup_led)(struct e1000_hw *);
+ void (*clear_hw_cntrs)(struct e1000_hw *);
+ void (*clear_vfta)(struct e1000_hw *);
+ s32 (*get_bus_info)(struct e1000_hw *);
+ void (*set_lan_id)(struct e1000_hw *);
+ s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
+ s32 (*led_on)(struct e1000_hw *);
+ s32 (*led_off)(struct e1000_hw *);
+ void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
+ s32 (*reset_hw)(struct e1000_hw *);
+ s32 (*init_hw)(struct e1000_hw *);
+ void (*shutdown_serdes)(struct e1000_hw *);
+ void (*power_up_serdes)(struct e1000_hw *);
+ s32 (*setup_link)(struct e1000_hw *);
+ s32 (*setup_physical_interface)(struct e1000_hw *);
+ s32 (*setup_led)(struct e1000_hw *);
+ void (*write_vfta)(struct e1000_hw *, u32, u32);
+ void (*config_collision_dist)(struct e1000_hw *);
+ void (*rar_set)(struct e1000_hw *, u8*, u32);
+ s32 (*read_mac_addr)(struct e1000_hw *);
+ s32 (*validate_mdi_setting)(struct e1000_hw *);
+ s32 (*get_thermal_sensor_data)(struct e1000_hw *);
+ s32 (*init_thermal_sensor_thresh)(struct e1000_hw *);
+ s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
+ void (*release_swfw_sync)(struct e1000_hw *, u16);
+};
+
+/* When to use various PHY register access functions:
+ *
+ * Func Caller
+ * Function Does Does When to use
+ * ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * X_reg L,P,A n/a for simple PHY reg accesses
+ * X_reg_locked P,A L for multiple accesses of different regs
+ * on different pages
+ * X_reg_page A L,P for multiple accesses of different regs
+ * on the same page
+ *
+ * Where X=[read|write], L=locking, P=sets page, A=register access
+ *
+ */
+struct e1000_phy_operations {
+ s32 (*init_params)(struct e1000_hw *);
+ s32 (*acquire)(struct e1000_hw *);
+ s32 (*check_polarity)(struct e1000_hw *);
+ s32 (*check_reset_block)(struct e1000_hw *);
+ s32 (*commit)(struct e1000_hw *);
+ s32 (*force_speed_duplex)(struct e1000_hw *);
+ s32 (*get_cfg_done)(struct e1000_hw *hw);
+ s32 (*get_cable_length)(struct e1000_hw *);
+ s32 (*get_info)(struct e1000_hw *);
+ s32 (*set_page)(struct e1000_hw *, u16);
+ s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
+ s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *);
+ s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *);
+ void (*release)(struct e1000_hw *);
+ s32 (*reset)(struct e1000_hw *);
+ s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
+ s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
+ s32 (*write_reg)(struct e1000_hw *, u32, u16);
+ s32 (*write_reg_locked)(struct e1000_hw *, u32, u16);
+ s32 (*write_reg_page)(struct e1000_hw *, u32, u16);
+ void (*power_up)(struct e1000_hw *);
+ void (*power_down)(struct e1000_hw *);
+ s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *);
+ s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8);
+};
+
+/* Function pointers for the NVM. */
+struct e1000_nvm_operations {
+ s32 (*init_params)(struct e1000_hw *);
+ s32 (*acquire)(struct e1000_hw *);
+ s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
+ void (*release)(struct e1000_hw *);
+ void (*reload)(struct e1000_hw *);
+ s32 (*update)(struct e1000_hw *);
+ s32 (*valid_led_default)(struct e1000_hw *, u16 *);
+ s32 (*validate)(struct e1000_hw *);
+ s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
+};
+
+#define E1000_MAX_SENSORS 3
+
+struct e1000_thermal_diode_data {
+ u8 location;
+ u8 temp;
+ u8 caution_thresh;
+ u8 max_op_thresh;
+};
+
+struct e1000_thermal_sensor_data {
+ struct e1000_thermal_diode_data sensor[E1000_MAX_SENSORS];
+};
+
+struct e1000_mac_info {
+ struct e1000_mac_operations ops;
+ u8 addr[ETH_ADDR_LEN];
+ u8 perm_addr[ETH_ADDR_LEN];
+
+ enum e1000_mac_type type;
+
+ u32 collision_delta;
+ u32 ledctl_default;
+ u32 ledctl_mode1;
+ u32 ledctl_mode2;
+ u32 mc_filter_type;
+ u32 tx_packet_delta;
+ u32 txcw;
+
+ u16 current_ifs_val;
+ u16 ifs_max_val;
+ u16 ifs_min_val;
+ u16 ifs_ratio;
+ u16 ifs_step_size;
+ u16 mta_reg_count;
+ u16 uta_reg_count;
+
+ /* Maximum size of the MTA register table in all supported adapters */
+ #define MAX_MTA_REG 128
+ u32 mta_shadow[MAX_MTA_REG];
+ u16 rar_entry_count;
+
+ u8 forced_speed_duplex;
+
+ bool adaptive_ifs;
+ bool has_fwsm;
+ bool arc_subsystem_valid;
+ bool asf_firmware_present;
+ bool autoneg;
+ bool autoneg_failed;
+ bool get_link_status;
+ bool in_ifs_mode;
+ enum e1000_serdes_link_state serdes_link_state;
+ bool serdes_has_link;
+ bool tx_pkt_filtering;
+ struct e1000_thermal_sensor_data thermal_sensor_data;
+};
+
+struct e1000_phy_info {
+ struct e1000_phy_operations ops;
+ enum e1000_phy_type type;
+
+ enum e1000_1000t_rx_status local_rx;
+ enum e1000_1000t_rx_status remote_rx;
+ enum e1000_ms_type ms_type;
+ enum e1000_ms_type original_ms_type;
+ enum e1000_rev_polarity cable_polarity;
+ enum e1000_smart_speed smart_speed;
+
+ u32 addr;
+ u32 id;
+ u32 reset_delay_us; /* in usec */
+ u32 revision;
+
+ enum e1000_media_type media_type;
+
+ u16 autoneg_advertised;
+ u16 autoneg_mask;
+ u16 cable_length;
+ u16 max_cable_length;
+ u16 min_cable_length;
+
+ u8 mdix;
+
+ bool disable_polarity_correction;
+ bool is_mdix;
+ bool polarity_correction;
+ bool reset_disable;
+ bool speed_downgraded;
+ bool autoneg_wait_to_complete;
+};
+
+struct e1000_nvm_info {
+ struct e1000_nvm_operations ops;
+ enum e1000_nvm_type type;
+ enum e1000_nvm_override override;
+
+ u32 flash_bank_size;
+ u32 flash_base_addr;
+
+ u16 word_size;
+ u16 delay_usec;
+ u16 address_bits;
+ u16 opcode_bits;
+ u16 page_size;
+};
+
+struct e1000_bus_info {
+ enum e1000_bus_type type;
+ enum e1000_bus_speed speed;
+ enum e1000_bus_width width;
+
+ u16 func;
+ u16 pci_cmd_word;
+};
+
+struct e1000_fc_info {
+ u32 high_water; /* Flow control high-water mark */
+ u32 low_water; /* Flow control low-water mark */
+ u16 pause_time; /* Flow control pause timer */
+ u16 refresh_time; /* Flow control refresh timer */
+ bool send_xon; /* Flow control send XON */
+ bool strict_ieee; /* Strict IEEE mode */
+ enum e1000_fc_mode current_mode; /* FC mode in effect */
+ enum e1000_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+struct e1000_mbx_operations {
+ s32 (*init_params)(struct e1000_hw *hw);
+ s32 (*read)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*write)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*check_for_msg)(struct e1000_hw *, u16);
+ s32 (*check_for_ack)(struct e1000_hw *, u16);
+ s32 (*check_for_rst)(struct e1000_hw *, u16);
+};
+
+struct e1000_mbx_stats {
+ u32 msgs_tx;
+ u32 msgs_rx;
+
+ u32 acks;
+ u32 reqs;
+ u32 rsts;
+};
+
+struct e1000_mbx_info {
+ struct e1000_mbx_operations ops;
+ struct e1000_mbx_stats stats;
+ u32 timeout;
+ u32 usec_delay;
+ u16 size;
+};
+
+struct e1000_dev_spec_82575 {
+ bool sgmii_active;
+ bool global_device_reset;
+ bool eee_disable;
+ bool module_plugged;
+ bool clear_semaphore_once;
+ u32 mtu;
+ struct sfp_e1000_flags eth_flags;
+ u8 media_port;
+ bool media_changed;
+};
+
+struct e1000_dev_spec_vf {
+ u32 vf_number;
+ u32 v2p_mailbox;
+};
+
+struct e1000_hw {
+ void *back;
+
+ u8 __iomem *hw_addr;
+ u8 __iomem *flash_address;
+ unsigned long io_base;
+
+ struct e1000_mac_info mac;
+ struct e1000_fc_info fc;
+ struct e1000_phy_info phy;
+ struct e1000_nvm_info nvm;
+ struct e1000_bus_info bus;
+ struct e1000_mbx_info mbx;
+ struct e1000_host_mng_dhcp_cookie mng_cookie;
+
+ union {
+ struct e1000_dev_spec_82575 _82575;
+ struct e1000_dev_spec_vf vf;
+ } dev_spec;
+
+ u16 device_id;
+ u16 subsystem_vendor_id;
+ u16 subsystem_device_id;
+ u16 vendor_id;
+
+ u8 revision_id;
+};
+
+#include "e1000_82575.h"
+#include "e1000_i210.h"
+
+/* These functions must be implemented by drivers */
+s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+
+#endif
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_i210.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_i210.c
new file mode 100755
index 00000000..1e9f3e6e
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_i210.c
@@ -0,0 +1,909 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000_api.h"
+
+
+static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw);
+static void e1000_release_nvm_i210(struct e1000_hw *hw);
+static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw);
+static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+static s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw);
+static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
+
+/**
+ * e1000_acquire_nvm_i210 - Request for access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the necessary semaphores for exclusive access to the EEPROM.
+ * Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ * Return successful if access grant bit set, else clear the request for
+ * EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_acquire_nvm_i210");
+
+ ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
+
+ return ret_val;
+}
+
+/**
+ * e1000_release_nvm_i210 - Release exclusive access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Stop any current commands to the EEPROM and clear the EEPROM request bit,
+ * then release the semaphores acquired.
+ **/
+static void e1000_release_nvm_i210(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_release_nvm_i210");
+
+ e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
+}
+
+/**
+ * e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
+ * will also specify which port we're acquiring the lock for.
+ **/
+s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+ u32 swmask = mask;
+ u32 fwmask = mask << 16;
+ s32 ret_val = E1000_SUCCESS;
+ s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
+
+ DEBUGFUNC("e1000_acquire_swfw_sync_i210");
+
+ while (i < timeout) {
+ if (e1000_get_hw_semaphore_i210(hw)) {
+ ret_val = -E1000_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+ if (!(swfw_sync & (fwmask | swmask)))
+ break;
+
+ /*
+ * Firmware currently using resource (fwmask)
+ * or other software thread using resource (swmask)
+ */
+ e1000_put_hw_semaphore_generic(hw);
+ msec_delay_irq(5);
+ i++;
+ }
+
+ if (i == timeout) {
+ DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
+ ret_val = -E1000_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ swfw_sync |= swmask;
+ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+ e1000_put_hw_semaphore_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_release_swfw_sync_i210 - Release SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Release the SW/FW semaphore used to access the PHY or NVM. The mask
+ * will also specify which port we're releasing the lock for.
+ **/
+void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+
+ DEBUGFUNC("e1000_release_swfw_sync_i210");
+
+ while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS)
+ ; /* Empty */
+
+ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+ swfw_sync &= ~mask;
+ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+ e1000_put_hw_semaphore_generic(hw);
+}
+
+/**
+ * e1000_get_hw_semaphore_i210 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM
+ **/
+static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw)
+{
+ u32 swsm;
+ s32 timeout = hw->nvm.word_size + 1;
+ s32 i = 0;
+
+ DEBUGFUNC("e1000_get_hw_semaphore_i210");
+
+ /* Get the SW semaphore */
+ while (i < timeout) {
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ if (!(swsm & E1000_SWSM_SMBI))
+ break;
+
+ usec_delay(50);
+ i++;
+ }
+
+ if (i == timeout) {
+ /* In rare circumstances, the SW semaphore may already be held
+ * unintentionally. Clear the semaphore once before giving up.
+ */
+ if (hw->dev_spec._82575.clear_semaphore_once) {
+ hw->dev_spec._82575.clear_semaphore_once = false;
+ e1000_put_hw_semaphore_generic(hw);
+ for (i = 0; i < timeout; i++) {
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ if (!(swsm & E1000_SWSM_SMBI))
+ break;
+
+ usec_delay(50);
+ }
+ }
+
+ /* If we do not have the semaphore here, we have to give up. */
+ if (i == timeout) {
+ DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
+ return -E1000_ERR_NVM;
+ }
+ }
+
+ /* Get the FW semaphore. */
+ for (i = 0; i < timeout; i++) {
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
+
+ /* Semaphore acquired if bit latched */
+ if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
+ break;
+
+ usec_delay(50);
+ }
+
+ if (i == timeout) {
+ /* Release semaphores */
+ e1000_put_hw_semaphore_generic(hw);
+ DEBUGOUT("Driver can't access the NVM\n");
+ return -E1000_ERR_NVM;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
+ * @hw: pointer to the HW structure
+ * @offset: offset of word in the Shadow Ram to read
+ * @words: number of words to read
+ * @data: word read from the Shadow Ram
+ *
+ * Reads a 16 bit word from the Shadow Ram using the EERD register.
+ * Uses necessary synchronization semaphores.
+ **/
+s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ s32 status = E1000_SUCCESS;
+ u16 i, count;
+
+ DEBUGFUNC("e1000_read_nvm_srrd_i210");
+
+ /* We cannot hold synchronization semaphores for too long,
+ * because of forceful takeover procedure. However it is more efficient
+ * to read in bursts than synchronizing access for each word. */
+ for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
+ count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
+ E1000_EERD_EEWR_MAX_COUNT : (words - i);
+ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ status = e1000_read_nvm_eerd(hw, offset, count,
+ data + i);
+ hw->nvm.ops.release(hw);
+ } else {
+ status = E1000_ERR_SWFW_SYNC;
+ }
+
+ if (status != E1000_SUCCESS)
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
+ * @hw: pointer to the HW structure
+ * @offset: offset within the Shadow RAM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the Shadow RAM
+ *
+ * Writes data to Shadow RAM at offset using EEWR register.
+ *
+ * If e1000_update_nvm_checksum is not called after this function , the
+ * data will not be committed to FLASH and also Shadow RAM will most likely
+ * contain an invalid checksum.
+ *
+ * If error code is returned, data and Shadow RAM may be inconsistent - buffer
+ * partially written.
+ **/
+s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ s32 status = E1000_SUCCESS;
+ u16 i, count;
+
+ DEBUGFUNC("e1000_write_nvm_srwr_i210");
+
+ /* We cannot hold synchronization semaphores for too long,
+ * because of forceful takeover procedure. However it is more efficient
+ * to write in bursts than synchronizing access for each word. */
+ for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
+ count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
+ E1000_EERD_EEWR_MAX_COUNT : (words - i);
+ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ status = e1000_write_nvm_srwr(hw, offset, count,
+ data + i);
+ hw->nvm.ops.release(hw);
+ } else {
+ status = E1000_ERR_SWFW_SYNC;
+ }
+
+ if (status != E1000_SUCCESS)
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * e1000_write_nvm_srwr - Write to Shadow Ram using EEWR
+ * @hw: pointer to the HW structure
+ * @offset: offset within the Shadow Ram to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the Shadow Ram
+ *
+ * Writes data to Shadow Ram at offset using EEWR register.
+ *
+ * If e1000_update_nvm_checksum is not called after this function , the
+ * Shadow Ram will most likely contain an invalid checksum.
+ **/
+static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 i, k, eewr = 0;
+ u32 attempts = 100000;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_write_nvm_srwr");
+
+ /*
+ * A check for invalid values: offset too large, too many words,
+ * too many words for the offset, and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+
+ for (i = 0; i < words; i++) {
+ eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
+ (data[i] << E1000_NVM_RW_REG_DATA) |
+ E1000_NVM_RW_REG_START;
+
+ E1000_WRITE_REG(hw, E1000_SRWR, eewr);
+
+ for (k = 0; k < attempts; k++) {
+ if (E1000_NVM_RW_REG_DONE &
+ E1000_READ_REG(hw, E1000_SRWR)) {
+ ret_val = E1000_SUCCESS;
+ break;
+ }
+ usec_delay(5);
+ }
+
+ if (ret_val != E1000_SUCCESS) {
+ DEBUGOUT("Shadow RAM write EEWR timed out\n");
+ break;
+ }
+ }
+
+out:
+ return ret_val;
+}
+
+/** e1000_read_invm_word_i210 - Reads OTP
+ * @hw: pointer to the HW structure
+ * @address: the word address (aka eeprom offset) to read
+ * @data: pointer to the data read
+ *
+ * Reads 16-bit words from the OTP. Return error when the word is not
+ * stored in OTP.
+ **/
+static s32 e1000_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
+{
+ s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+ u32 invm_dword;
+ u16 i;
+ u8 record_type, word_address;
+
+ DEBUGFUNC("e1000_read_invm_word_i210");
+
+ for (i = 0; i < E1000_INVM_SIZE; i++) {
+ invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
+ /* Get record type */
+ record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
+ if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
+ break;
+ if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
+ i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
+ if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
+ i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
+ if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
+ word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
+ if (word_address == address) {
+ *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
+ DEBUGOUT2("Read INVM Word 0x%02x = %x",
+ address, *data);
+ status = E1000_SUCCESS;
+ break;
+ }
+ }
+ }
+ if (status != E1000_SUCCESS)
+ DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address);
+ return status;
+}
+
+/** e1000_read_invm_i210 - Read invm wrapper function for I210/I211
+ * @hw: pointer to the HW structure
+ * @address: the word address (aka eeprom offset) to read
+ * @data: pointer to the data read
+ *
+ * Wrapper function to return data formerly found in the NVM.
+ **/
+static s32 e1000_read_invm_i210(struct e1000_hw *hw, u16 offset,
+ u16 E1000_UNUSEDARG words, u16 *data)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_read_invm_i210");
+
+ /* Only the MAC addr is required to be present in the iNVM */
+ switch (offset) {
+ case NVM_MAC_ADDR:
+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, &data[0]);
+ ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+1,
+ &data[1]);
+ ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+2,
+ &data[2]);
+ if (ret_val != E1000_SUCCESS)
+ DEBUGOUT("MAC Addr not found in iNVM\n");
+ break;
+ case NVM_INIT_CTRL_2:
+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_INIT_CTRL_2_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
+ case NVM_INIT_CTRL_4:
+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_INIT_CTRL_4_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
+ case NVM_LED_1_CFG:
+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_LED_1_CFG_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
+ case NVM_LED_0_2_CFG:
+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_LED_0_2_CFG_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
+ case NVM_ID_LED_SETTINGS:
+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = ID_LED_RESERVED_FFFF;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
+ case NVM_SUB_DEV_ID:
+ *data = hw->subsystem_device_id;
+ break;
+ case NVM_SUB_VEN_ID:
+ *data = hw->subsystem_vendor_id;
+ break;
+ case NVM_DEV_ID:
+ *data = hw->device_id;
+ break;
+ case NVM_VEN_ID:
+ *data = hw->vendor_id;
+ break;
+ default:
+ DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset);
+ *data = NVM_RESERVED_WORD;
+ break;
+ }
+ return ret_val;
+}
+
+/**
+ * e1000_read_invm_version - Reads iNVM version and image type
+ * @hw: pointer to the HW structure
+ * @invm_ver: version structure for the version read
+ *
+ * Reads iNVM version and image type.
+ **/
+s32 e1000_read_invm_version(struct e1000_hw *hw,
+ struct e1000_fw_version *invm_ver)
+{
+ u32 *record = NULL;
+ u32 *next_record = NULL;
+ u32 i = 0;
+ u32 invm_dword = 0;
+ u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
+ E1000_INVM_RECORD_SIZE_IN_BYTES);
+ u32 buffer[E1000_INVM_SIZE];
+ s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+ u16 version = 0;
+
+ DEBUGFUNC("e1000_read_invm_version");
+
+ /* Read iNVM memory */
+ for (i = 0; i < E1000_INVM_SIZE; i++) {
+ invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
+ buffer[i] = invm_dword;
+ }
+
+ /* Read version number */
+ for (i = 1; i < invm_blocks; i++) {
+ record = &buffer[invm_blocks - i];
+ next_record = &buffer[invm_blocks - i + 1];
+
+ /* Check if we have first version location used */
+ if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
+ version = 0;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /* Check if we have second version location used */
+ else if ((i == 1) &&
+ ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
+ version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /*
+ * Check if we have odd version location
+ * used and it is the last one used
+ */
+ else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
+ ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
+ (i != 1))) {
+ version = (*next_record & E1000_INVM_VER_FIELD_TWO)
+ >> 13;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /*
+ * Check if we have even version location
+ * used and it is the last one used
+ */
+ else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
+ ((*record & 0x3) == 0)) {
+ version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+ status = E1000_SUCCESS;
+ break;
+ }
+ }
+
+ if (status == E1000_SUCCESS) {
+ invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
+ >> E1000_INVM_MAJOR_SHIFT;
+ invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
+ }
+ /* Read Image Type */
+ for (i = 1; i < invm_blocks; i++) {
+ record = &buffer[invm_blocks - i];
+ next_record = &buffer[invm_blocks - i + 1];
+
+ /* Check if we have image type in first location used */
+ if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
+ invm_ver->invm_img_type = 0;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /* Check if we have image type in first location used */
+ else if ((((*record & 0x3) == 0) &&
+ ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
+ ((((*record & 0x3) != 0) && (i != 1)))) {
+ invm_ver->invm_img_type =
+ (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
+ status = E1000_SUCCESS;
+ break;
+ }
+ }
+ return status;
+}
+
+/**
+ * e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ * and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw)
+{
+ s32 status = E1000_SUCCESS;
+ s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
+
+ DEBUGFUNC("e1000_validate_nvm_checksum_i210");
+
+ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+
+ /*
+ * Replace the read function with semaphore grabbing with
+ * the one that skips this for a while.
+ * We have semaphore taken already here.
+ */
+ read_op_ptr = hw->nvm.ops.read;
+ hw->nvm.ops.read = e1000_read_nvm_eerd;
+
+ status = e1000_validate_nvm_checksum_generic(hw);
+
+ /* Revert original read operation. */
+ hw->nvm.ops.read = read_op_ptr;
+
+ hw->nvm.ops.release(hw);
+ } else {
+ status = E1000_ERR_SWFW_SYNC;
+ }
+
+ return status;
+}
+
+
+/**
+ * e1000_update_nvm_checksum_i210 - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ * up to the checksum. Then calculates the EEPROM checksum and writes the
+ * value to the EEPROM. Next commit EEPROM data onto the Flash.
+ **/
+s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ DEBUGFUNC("e1000_update_nvm_checksum_i210");
+
+ /*
+ * Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data);
+ if (ret_val != E1000_SUCCESS) {
+ DEBUGOUT("EEPROM read failed\n");
+ goto out;
+ }
+
+ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ /*
+ * Do not use hw->nvm.ops.write, hw->nvm.ops.read
+ * because we do not want to take the synchronization
+ * semaphores twice here.
+ */
+
+ for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+ ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ hw->nvm.ops.release(hw);
+ DEBUGOUT("NVM Read Error while updating checksum.\n");
+ goto out;
+ }
+ checksum += nvm_data;
+ }
+ checksum = (u16) NVM_SUM - checksum;
+ ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
+ &checksum);
+ if (ret_val != E1000_SUCCESS) {
+ hw->nvm.ops.release(hw);
+ DEBUGOUT("NVM Write Error while updating checksum.\n");
+ goto out;
+ }
+
+ hw->nvm.ops.release(hw);
+
+ ret_val = e1000_update_flash_i210(hw);
+ } else {
+ ret_val = E1000_ERR_SWFW_SYNC;
+ }
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_get_flash_presence_i210 - Check if flash device is detected.
+ * @hw: pointer to the HW structure
+ *
+ **/
+bool e1000_get_flash_presence_i210(struct e1000_hw *hw)
+{
+ u32 eec = 0;
+ bool ret_val = false;
+
+ DEBUGFUNC("e1000_get_flash_presence_i210");
+
+ eec = E1000_READ_REG(hw, E1000_EECD);
+
+ if (eec & E1000_EECD_FLASH_DETECTED_I210)
+ ret_val = true;
+
+ return ret_val;
+}
+
+/**
+ * e1000_update_flash_i210 - Commit EEPROM to the flash
+ * @hw: pointer to the HW structure
+ *
+ **/
+s32 e1000_update_flash_i210(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u32 flup;
+
+ DEBUGFUNC("e1000_update_flash_i210");
+
+ ret_val = e1000_pool_flash_update_done_i210(hw);
+ if (ret_val == -E1000_ERR_NVM) {
+ DEBUGOUT("Flash update time out\n");
+ goto out;
+ }
+
+ flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210;
+ E1000_WRITE_REG(hw, E1000_EECD, flup);
+
+ ret_val = e1000_pool_flash_update_done_i210(hw);
+ if (ret_val == E1000_SUCCESS)
+ DEBUGOUT("Flash update complete\n");
+ else
+ DEBUGOUT("Flash update time out\n");
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_pool_flash_update_done_i210 - Pool FLUDONE status.
+ * @hw: pointer to the HW structure
+ *
+ **/
+s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw)
+{
+ s32 ret_val = -E1000_ERR_NVM;
+ u32 i, reg;
+
+ DEBUGFUNC("e1000_pool_flash_update_done_i210");
+
+ for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
+ reg = E1000_READ_REG(hw, E1000_EECD);
+ if (reg & E1000_EECD_FLUDONE_I210) {
+ ret_val = E1000_SUCCESS;
+ break;
+ }
+ usec_delay(5);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Initialize the i210/i211 NVM parameters and function pointers.
+ **/
+static s32 e1000_init_nvm_params_i210(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ struct e1000_nvm_info *nvm = &hw->nvm;
+
+ DEBUGFUNC("e1000_init_nvm_params_i210");
+
+ ret_val = e1000_init_nvm_params_82575(hw);
+ nvm->ops.acquire = e1000_acquire_nvm_i210;
+ nvm->ops.release = e1000_release_nvm_i210;
+ nvm->ops.valid_led_default = e1000_valid_led_default_i210;
+ if (e1000_get_flash_presence_i210(hw)) {
+ hw->nvm.type = e1000_nvm_flash_hw;
+ nvm->ops.read = e1000_read_nvm_srrd_i210;
+ nvm->ops.write = e1000_write_nvm_srwr_i210;
+ nvm->ops.validate = e1000_validate_nvm_checksum_i210;
+ nvm->ops.update = e1000_update_nvm_checksum_i210;
+ } else {
+ hw->nvm.type = e1000_nvm_invm;
+ nvm->ops.read = e1000_read_invm_i210;
+ nvm->ops.write = e1000_null_write_nvm;
+ nvm->ops.validate = e1000_null_ops_generic;
+ nvm->ops.update = e1000_null_ops_generic;
+ }
+ return ret_val;
+}
+
+/**
+ * e1000_init_function_pointers_i210 - Init func ptrs.
+ * @hw: pointer to the HW structure
+ *
+ * Called to initialize all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_i210(struct e1000_hw *hw)
+{
+ e1000_init_function_pointers_82575(hw);
+ hw->nvm.ops.init_params = e1000_init_nvm_params_i210;
+
+ return;
+}
+
+/**
+ * e1000_valid_led_default_i210 - Verify a valid default LED config
+ * @hw: pointer to the HW structure
+ * @data: pointer to the NVM (EEPROM)
+ *
+ * Read the EEPROM for the current default LED configuration. If the
+ * LED configuration is not valid, set to a valid LED configuration.
+ **/
+static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_valid_led_default_i210");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+
+ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
+ switch (hw->phy.media_type) {
+ case e1000_media_type_internal_serdes:
+ *data = ID_LED_DEFAULT_I210_SERDES;
+ break;
+ case e1000_media_type_copper:
+ default:
+ *data = ID_LED_DEFAULT_I210;
+ break;
+ }
+ }
+out:
+ return ret_val;
+}
+
+/**
+ * __e1000_access_xmdio_reg - Read/write XMDIO register
+ * @hw: pointer to the HW structure
+ * @address: XMDIO address to program
+ * @dev_addr: device address to program
+ * @data: pointer to value to read/write from/to the XMDIO address
+ * @read: boolean flag to indicate read or write
+ **/
+static s32 __e1000_access_xmdio_reg(struct e1000_hw *hw, u16 address,
+ u8 dev_addr, u16 *data, bool read)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("__e1000_access_xmdio_reg");
+
+ ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA |
+ dev_addr);
+ if (ret_val)
+ return ret_val;
+
+ if (read)
+ ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data);
+ else
+ ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data);
+ if (ret_val)
+ return ret_val;
+
+ /* Recalibrate the device back to 0 */
+ ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0);
+ if (ret_val)
+ return ret_val;
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_xmdio_reg - Read XMDIO register
+ * @hw: pointer to the HW structure
+ * @addr: XMDIO address to program
+ * @dev_addr: device address to program
+ * @data: value to be read from the EMI address
+ **/
+s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data)
+{
+ DEBUGFUNC("e1000_read_xmdio_reg");
+
+ return __e1000_access_xmdio_reg(hw, addr, dev_addr, data, true);
+}
+
+/**
+ * e1000_write_xmdio_reg - Write XMDIO register
+ * @hw: pointer to the HW structure
+ * @addr: XMDIO address to program
+ * @dev_addr: device address to program
+ * @data: value to be written to the XMDIO address
+ **/
+s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
+{
+ DEBUGFUNC("e1000_read_xmdio_reg");
+
+ return __e1000_access_xmdio_reg(hw, addr, dev_addr, &data, false);
+}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_i210.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_i210.h
new file mode 100755
index 00000000..57b2eb56
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_i210.h
@@ -0,0 +1,91 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_I210_H_
+#define _E1000_I210_H_
+
+bool e1000_get_flash_presence_i210(struct e1000_hw *hw);
+s32 e1000_update_flash_i210(struct e1000_hw *hw);
+s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw);
+s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw);
+s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 e1000_read_invm_version(struct e1000_hw *hw,
+ struct e1000_fw_version *invm_ver);
+s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
+ u16 *data);
+s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
+ u16 data);
+
+#define E1000_STM_OPCODE 0xDB00
+#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
+
+#define INVM_DWORD_TO_RECORD_TYPE(invm_dword) \
+ (u8)((invm_dword) & 0x7)
+#define INVM_DWORD_TO_WORD_ADDRESS(invm_dword) \
+ (u8)(((invm_dword) & 0x0000FE00) >> 9)
+#define INVM_DWORD_TO_WORD_DATA(invm_dword) \
+ (u16)(((invm_dword) & 0xFFFF0000) >> 16)
+
+enum E1000_INVM_STRUCTURE_TYPE {
+ E1000_INVM_UNINITIALIZED_STRUCTURE = 0x00,
+ E1000_INVM_WORD_AUTOLOAD_STRUCTURE = 0x01,
+ E1000_INVM_CSR_AUTOLOAD_STRUCTURE = 0x02,
+ E1000_INVM_PHY_REGISTER_AUTOLOAD_STRUCTURE = 0x03,
+ E1000_INVM_RSA_KEY_SHA256_STRUCTURE = 0x04,
+ E1000_INVM_INVALIDATED_STRUCTURE = 0x0F,
+};
+
+#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8
+#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1
+#define E1000_INVM_ULT_BYTES_SIZE 8
+#define E1000_INVM_RECORD_SIZE_IN_BYTES 4
+#define E1000_INVM_VER_FIELD_ONE 0x1FF8
+#define E1000_INVM_VER_FIELD_TWO 0x7FE000
+#define E1000_INVM_IMGTYPE_FIELD 0x1F800000
+
+#define E1000_INVM_MAJOR_MASK 0x3F0
+#define E1000_INVM_MINOR_MASK 0xF
+#define E1000_INVM_MAJOR_SHIFT 4
+
+#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_OFF1_OFF2))
+#define ID_LED_DEFAULT_I210_SERDES ((ID_LED_DEF1_DEF2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_OFF1_ON2))
+
+/* NVM offset defaults for I211 devices */
+#define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243
+#define NVM_INIT_CTRL_4_DEFAULT_I211 0x00C1
+#define NVM_LED_1_CFG_DEFAULT_I211 0x0184
+#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C
+#endif
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mac.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mac.c
new file mode 100755
index 00000000..4ee59ba9
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mac.c
@@ -0,0 +1,2096 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000_api.h"
+
+static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw);
+static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
+static void e1000_config_collision_dist_generic(struct e1000_hw *hw);
+static void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
+
+/**
+ * e1000_init_mac_ops_generic - Initialize MAC function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setups up the function pointers to no-op functions
+ **/
+void e1000_init_mac_ops_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ DEBUGFUNC("e1000_init_mac_ops_generic");
+
+ /* General Setup */
+ mac->ops.init_params = e1000_null_ops_generic;
+ mac->ops.init_hw = e1000_null_ops_generic;
+ mac->ops.reset_hw = e1000_null_ops_generic;
+ mac->ops.setup_physical_interface = e1000_null_ops_generic;
+ mac->ops.get_bus_info = e1000_null_ops_generic;
+ mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pcie;
+ mac->ops.read_mac_addr = e1000_read_mac_addr_generic;
+ mac->ops.config_collision_dist = e1000_config_collision_dist_generic;
+ mac->ops.clear_hw_cntrs = e1000_null_mac_generic;
+ /* LED */
+ mac->ops.cleanup_led = e1000_null_ops_generic;
+ mac->ops.setup_led = e1000_null_ops_generic;
+ mac->ops.blink_led = e1000_null_ops_generic;
+ mac->ops.led_on = e1000_null_ops_generic;
+ mac->ops.led_off = e1000_null_ops_generic;
+ /* LINK */
+ mac->ops.setup_link = e1000_null_ops_generic;
+ mac->ops.get_link_up_info = e1000_null_link_info;
+ mac->ops.check_for_link = e1000_null_ops_generic;
+ /* Management */
+ mac->ops.check_mng_mode = e1000_null_mng_mode;
+ /* VLAN, MC, etc. */
+ mac->ops.update_mc_addr_list = e1000_null_update_mc;
+ mac->ops.clear_vfta = e1000_null_mac_generic;
+ mac->ops.write_vfta = e1000_null_write_vfta;
+ mac->ops.rar_set = e1000_rar_set_generic;
+ mac->ops.validate_mdi_setting = e1000_validate_mdi_setting_generic;
+}
+
+/**
+ * e1000_null_ops_generic - No-op function, returns 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_ops_generic(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_null_ops_generic");
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_mac_generic - No-op function, return void
+ * @hw: pointer to the HW structure
+ **/
+void e1000_null_mac_generic(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_null_mac_generic");
+ return;
+}
+
+/**
+ * e1000_null_link_info - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_link_info(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 E1000_UNUSEDARG *s, u16 E1000_UNUSEDARG *d)
+{
+ DEBUGFUNC("e1000_null_link_info");
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_mng_mode - No-op function, return false
+ * @hw: pointer to the HW structure
+ **/
+bool e1000_null_mng_mode(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_null_mng_mode");
+ return false;
+}
+
+/**
+ * e1000_null_update_mc - No-op function, return void
+ * @hw: pointer to the HW structure
+ **/
+void e1000_null_update_mc(struct e1000_hw E1000_UNUSEDARG *hw,
+ u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a)
+{
+ DEBUGFUNC("e1000_null_update_mc");
+ return;
+}
+
+/**
+ * e1000_null_write_vfta - No-op function, return void
+ * @hw: pointer to the HW structure
+ **/
+void e1000_null_write_vfta(struct e1000_hw E1000_UNUSEDARG *hw,
+ u32 E1000_UNUSEDARG a, u32 E1000_UNUSEDARG b)
+{
+ DEBUGFUNC("e1000_null_write_vfta");
+ return;
+}
+
+/**
+ * e1000_null_rar_set - No-op function, return void
+ * @hw: pointer to the HW structure
+ **/
+void e1000_null_rar_set(struct e1000_hw E1000_UNUSEDARG *hw,
+ u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a)
+{
+ DEBUGFUNC("e1000_null_rar_set");
+ return;
+}
+
+/**
+ * e1000_get_bus_info_pcie_generic - Get PCIe bus information
+ * @hw: pointer to the HW structure
+ *
+ * Determines and stores the system bus information for a particular
+ * network interface. The following bus information is determined and stored:
+ * bus speed, bus width, type (PCIe), and PCIe function.
+ **/
+s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_bus_info *bus = &hw->bus;
+ s32 ret_val;
+ u16 pcie_link_status;
+
+ DEBUGFUNC("e1000_get_bus_info_pcie_generic");
+
+ bus->type = e1000_bus_type_pci_express;
+
+ ret_val = e1000_read_pcie_cap_reg(hw, PCIE_LINK_STATUS,
+ &pcie_link_status);
+ if (ret_val) {
+ bus->width = e1000_bus_width_unknown;
+ bus->speed = e1000_bus_speed_unknown;
+ } else {
+ switch (pcie_link_status & PCIE_LINK_SPEED_MASK) {
+ case PCIE_LINK_SPEED_2500:
+ bus->speed = e1000_bus_speed_2500;
+ break;
+ case PCIE_LINK_SPEED_5000:
+ bus->speed = e1000_bus_speed_5000;
+ break;
+ default:
+ bus->speed = e1000_bus_speed_unknown;
+ break;
+ }
+
+ bus->width = (enum e1000_bus_width)((pcie_link_status &
+ PCIE_LINK_WIDTH_MASK) >> PCIE_LINK_WIDTH_SHIFT);
+ }
+
+ mac->ops.set_lan_id(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
+ *
+ * @hw: pointer to the HW structure
+ *
+ * Determines the LAN function id by reading memory-mapped registers
+ * and swaps the port value if requested.
+ **/
+static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
+{
+ struct e1000_bus_info *bus = &hw->bus;
+ u32 reg;
+
+ /* The status register reports the correct function number
+ * for the device regardless of function swap state.
+ */
+ reg = E1000_READ_REG(hw, E1000_STATUS);
+ bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
+}
+
+/**
+ * e1000_set_lan_id_single_port - Set LAN id for a single port device
+ * @hw: pointer to the HW structure
+ *
+ * Sets the LAN function id to zero for a single port device.
+ **/
+void e1000_set_lan_id_single_port(struct e1000_hw *hw)
+{
+ struct e1000_bus_info *bus = &hw->bus;
+
+ bus->func = 0;
+}
+
+/**
+ * e1000_clear_vfta_generic - Clear VLAN filter table
+ * @hw: pointer to the HW structure
+ *
+ * Clears the register array which contains the VLAN filter table by
+ * setting all the values to 0.
+ **/
+void e1000_clear_vfta_generic(struct e1000_hw *hw)
+{
+ u32 offset;
+
+ DEBUGFUNC("e1000_clear_vfta_generic");
+
+ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
+ E1000_WRITE_FLUSH(hw);
+ }
+}
+
+/**
+ * e1000_write_vfta_generic - Write value to VLAN filter table
+ * @hw: pointer to the HW structure
+ * @offset: register offset in VLAN filter table
+ * @value: register value written to VLAN filter table
+ *
+ * Writes value at the given offset in the register array which stores
+ * the VLAN filter table.
+ **/
+void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
+{
+ DEBUGFUNC("e1000_write_vfta_generic");
+
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ * e1000_init_rx_addrs_generic - Initialize receive address's
+ * @hw: pointer to the HW structure
+ * @rar_count: receive address registers
+ *
+ * Setup the receive address registers by setting the base receive address
+ * register to the devices MAC address and clearing all the other receive
+ * address registers to 0.
+ **/
+void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count)
+{
+ u32 i;
+ u8 mac_addr[ETH_ADDR_LEN] = {0};
+
+ DEBUGFUNC("e1000_init_rx_addrs_generic");
+
+ /* Setup the receive address */
+ DEBUGOUT("Programming MAC Address into RAR[0]\n");
+
+ hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
+
+ /* Zero out the other (rar_entry_count - 1) receive addresses */
+ DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count-1);
+ for (i = 1; i < rar_count; i++)
+ hw->mac.ops.rar_set(hw, mac_addr, i);
+}
+
+/**
+ * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr
+ * @hw: pointer to the HW structure
+ *
+ * Checks the nvm for an alternate MAC address. An alternate MAC address
+ * can be setup by pre-boot software and must be treated like a permanent
+ * address and must override the actual permanent MAC address. If an
+ * alternate MAC address is found it is programmed into RAR0, replacing
+ * the permanent address that was installed into RAR0 by the Si on reset.
+ * This function will return SUCCESS unless it encounters an error while
+ * reading the EEPROM.
+ **/
+s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
+{
+ u32 i;
+ s32 ret_val;
+ u16 offset, nvm_alt_mac_addr_offset, nvm_data;
+ u8 alt_mac_addr[ETH_ADDR_LEN];
+
+ DEBUGFUNC("e1000_check_alt_mac_addr_generic");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &nvm_data);
+ if (ret_val)
+ return ret_val;
+
+
+ /* Alternate MAC address is handled by the option ROM for 82580
+ * and newer. SW support not required.
+ */
+ if (hw->mac.type >= e1000_82580)
+ return E1000_SUCCESS;
+
+ ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
+ &nvm_alt_mac_addr_offset);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
+ (nvm_alt_mac_addr_offset == 0x0000))
+ /* There is no Alternate MAC Address */
+ return E1000_SUCCESS;
+
+ if (hw->bus.func == E1000_FUNC_1)
+ nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
+ if (hw->bus.func == E1000_FUNC_2)
+ nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2;
+
+ if (hw->bus.func == E1000_FUNC_3)
+ nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3;
+ for (i = 0; i < ETH_ADDR_LEN; i += 2) {
+ offset = nvm_alt_mac_addr_offset + (i >> 1);
+ ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
+ alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
+ }
+
+ /* if multicast bit is set, the alternate address will not be used */
+ if (alt_mac_addr[0] & 0x01) {
+ DEBUGOUT("Ignoring Alternate Mac Address with MC bit set\n");
+ return E1000_SUCCESS;
+ }
+
+ /* We have a valid alternate MAC address, and we want to treat it the
+ * same as the normal permanent MAC address stored by the HW into the
+ * RAR. Do this by mapping this address into RAR0.
+ */
+ hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_rar_set_generic - Set receive address register
+ * @hw: pointer to the HW structure
+ * @addr: pointer to the receive address
+ * @index: receive address array register
+ *
+ * Sets the receive address array register at index to the address passed
+ * in by addr.
+ **/
+static void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+ u32 rar_low, rar_high;
+
+ DEBUGFUNC("e1000_rar_set_generic");
+
+ /* HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
+ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+
+ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+ /* If MAC address zero, no need to set the AV bit */
+ if (rar_low || rar_high)
+ rar_high |= E1000_RAH_AV;
+
+ /* Some bridges will combine consecutive 32-bit writes into
+ * a single burst write, which will malfunction on some parts.
+ * The flushes avoid this.
+ */
+ E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
+ E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ * e1000_hash_mc_addr_generic - Generate a multicast hash value
+ * @hw: pointer to the HW structure
+ * @mc_addr: pointer to a multicast address
+ *
+ * Generates a multicast address hash value which is used to determine
+ * the multicast filter table array address and new table value.
+ **/
+u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr)
+{
+ u32 hash_value, hash_mask;
+ u8 bit_shift = 0;
+
+ DEBUGFUNC("e1000_hash_mc_addr_generic");
+
+ /* Register count multiplied by bits per register */
+ hash_mask = (hw->mac.mta_reg_count * 32) - 1;
+
+ /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
+ * where 0xFF would still fall within the hash mask.
+ */
+ while (hash_mask >> bit_shift != 0xFF)
+ bit_shift++;
+
+ /* The portion of the address that is used for the hash table
+ * is determined by the mc_filter_type setting.
+ * The algorithm is such that there is a total of 8 bits of shifting.
+ * The bit_shift for a mc_filter_type of 0 represents the number of
+ * left-shifts where the MSB of mc_addr[5] would still fall within
+ * the hash_mask. Case 0 does this exactly. Since there are a total
+ * of 8 bits of shifting, then mc_addr[4] will shift right the
+ * remaining number of bits. Thus 8 - bit_shift. The rest of the
+ * cases are a variation of this algorithm...essentially raising the
+ * number of bits to shift mc_addr[5] left, while still keeping the
+ * 8-bit shifting total.
+ *
+ * For example, given the following Destination MAC Address and an
+ * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
+ * we can see that the bit_shift for case 0 is 4. These are the hash
+ * values resulting from each mc_filter_type...
+ * [0] [1] [2] [3] [4] [5]
+ * 01 AA 00 12 34 56
+ * LSB MSB
+ *
+ * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
+ * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
+ * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
+ * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
+ */
+ switch (hw->mac.mc_filter_type) {
+ default:
+ case 0:
+ break;
+ case 1:
+ bit_shift += 1;
+ break;
+ case 2:
+ bit_shift += 2;
+ break;
+ case 3:
+ bit_shift += 4;
+ break;
+ }
+
+ hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
+ (((u16) mc_addr[5]) << bit_shift)));
+
+ return hash_value;
+}
+
+/**
+ * e1000_update_mc_addr_list_generic - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ *
+ * Updates entire Multicast Table Array.
+ * The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count)
+{
+ u32 hash_value, hash_bit, hash_reg;
+ int i;
+
+ DEBUGFUNC("e1000_update_mc_addr_list_generic");
+
+ /* clear mta_shadow */
+ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+
+ /* update mta_shadow from mc_addr_list */
+ for (i = 0; (u32) i < mc_addr_count; i++) {
+ hash_value = e1000_hash_mc_addr_generic(hw, mc_addr_list);
+
+ hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
+ hash_bit = hash_value & 0x1F;
+
+ hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+ mc_addr_list += (ETH_ADDR_LEN);
+ }
+
+ /* replace the entire MTA table */
+ for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ * e1000_clear_hw_cntrs_base_generic - Clear base hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the base hardware counters by reading the counter registers.
+ **/
+void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_clear_hw_cntrs_base_generic");
+
+ E1000_READ_REG(hw, E1000_CRCERRS);
+ E1000_READ_REG(hw, E1000_SYMERRS);
+ E1000_READ_REG(hw, E1000_MPC);
+ E1000_READ_REG(hw, E1000_SCC);
+ E1000_READ_REG(hw, E1000_ECOL);
+ E1000_READ_REG(hw, E1000_MCC);
+ E1000_READ_REG(hw, E1000_LATECOL);
+ E1000_READ_REG(hw, E1000_COLC);
+ E1000_READ_REG(hw, E1000_DC);
+ E1000_READ_REG(hw, E1000_SEC);
+ E1000_READ_REG(hw, E1000_RLEC);
+ E1000_READ_REG(hw, E1000_XONRXC);
+ E1000_READ_REG(hw, E1000_XONTXC);
+ E1000_READ_REG(hw, E1000_XOFFRXC);
+ E1000_READ_REG(hw, E1000_XOFFTXC);
+ E1000_READ_REG(hw, E1000_FCRUC);
+ E1000_READ_REG(hw, E1000_GPRC);
+ E1000_READ_REG(hw, E1000_BPRC);
+ E1000_READ_REG(hw, E1000_MPRC);
+ E1000_READ_REG(hw, E1000_GPTC);
+ E1000_READ_REG(hw, E1000_GORCL);
+ E1000_READ_REG(hw, E1000_GORCH);
+ E1000_READ_REG(hw, E1000_GOTCL);
+ E1000_READ_REG(hw, E1000_GOTCH);
+ E1000_READ_REG(hw, E1000_RNBC);
+ E1000_READ_REG(hw, E1000_RUC);
+ E1000_READ_REG(hw, E1000_RFC);
+ E1000_READ_REG(hw, E1000_ROC);
+ E1000_READ_REG(hw, E1000_RJC);
+ E1000_READ_REG(hw, E1000_TORL);
+ E1000_READ_REG(hw, E1000_TORH);
+ E1000_READ_REG(hw, E1000_TOTL);
+ E1000_READ_REG(hw, E1000_TOTH);
+ E1000_READ_REG(hw, E1000_TPR);
+ E1000_READ_REG(hw, E1000_TPT);
+ E1000_READ_REG(hw, E1000_MPTC);
+ E1000_READ_REG(hw, E1000_BPTC);
+}
+
+/**
+ * e1000_check_for_copper_link_generic - Check for link (Copper)
+ * @hw: pointer to the HW structure
+ *
+ * Checks to see of the link status of the hardware has changed. If a
+ * change in link status has been detected, then we read the PHY registers
+ * to get the current speed/duplex if link exists.
+ **/
+s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ bool link;
+
+ DEBUGFUNC("e1000_check_for_copper_link");
+
+ /* We only want to go out to the PHY registers to see if Auto-Neg
+ * has completed and/or if our link status has changed. The
+ * get_link_status flag is set upon receiving a Link Status
+ * Change or Rx Sequence Error interrupt.
+ */
+ if (!mac->get_link_status)
+ return E1000_SUCCESS;
+
+ /* First we want to see if the MII Status Register reports
+ * link. If so, then we want to get the current speed/duplex
+ * of the PHY.
+ */
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link)
+ return E1000_SUCCESS; /* No link detected */
+
+ mac->get_link_status = false;
+
+ /* Check if there was DownShift, must be checked
+ * immediately after link-up
+ */
+ e1000_check_downshift_generic(hw);
+
+ /* If we are forcing speed/duplex, then we simply return since
+ * we have already determined whether we have link or not.
+ */
+ if (!mac->autoneg)
+ return -E1000_ERR_CONFIG;
+
+ /* Auto-Neg is enabled. Auto Speed Detection takes care
+ * of MAC speed/duplex configuration. So we only need to
+ * configure Collision Distance in the MAC.
+ */
+ mac->ops.config_collision_dist(hw);
+
+ /* Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ if (ret_val)
+ DEBUGOUT("Error configuring flow control\n");
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_fiber_link_generic - Check for link (Fiber)
+ * @hw: pointer to the HW structure
+ *
+ * Checks for link up on the hardware. If link is not up and we have
+ * a signal, then we need to force link up.
+ **/
+s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 rxcw;
+ u32 ctrl;
+ u32 status;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_check_for_fiber_link_generic");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ rxcw = E1000_READ_REG(hw, E1000_RXCW);
+
+ /* If we don't have link (auto-negotiation failed or link partner
+ * cannot auto-negotiate), the cable is plugged in (we have signal),
+ * and our link partner is not trying to auto-negotiate with us (we
+ * are receiving idles or data), we need to force link up. We also
+ * need to give auto-negotiation time to complete, in case the cable
+ * was just plugged in. The autoneg_failed flag does this.
+ */
+ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
+ if ((ctrl & E1000_CTRL_SWDPIN1) && !(status & E1000_STATUS_LU) &&
+ !(rxcw & E1000_RXCW_C)) {
+ if (!mac->autoneg_failed) {
+ mac->autoneg_failed = true;
+ return E1000_SUCCESS;
+ }
+ DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
+
+ /* Disable auto-negotiation in the TXCW register */
+ E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+
+ /* Force link-up and also force full-duplex. */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ /* Configure Flow Control after forcing link up. */
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ if (ret_val) {
+ DEBUGOUT("Error configuring flow control\n");
+ return ret_val;
+ }
+ } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+ /* If we are forcing link and we are receiving /C/ ordered
+ * sets, re-enable auto-negotiation in the TXCW register
+ * and disable forced link in the Device Control register
+ * in an attempt to auto-negotiate with our link partner.
+ */
+ DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
+ E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
+ E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+ mac->serdes_has_link = true;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_check_for_serdes_link_generic - Check for link (Serdes)
+ * @hw: pointer to the HW structure
+ *
+ * Checks for link up on the hardware. If link is not up and we have
+ * a signal, then we need to force link up.
+ **/
+s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 rxcw;
+ u32 ctrl;
+ u32 status;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_check_for_serdes_link_generic");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ rxcw = E1000_READ_REG(hw, E1000_RXCW);
+
+ /* If we don't have link (auto-negotiation failed or link partner
+ * cannot auto-negotiate), and our link partner is not trying to
+ * auto-negotiate with us (we are receiving idles or data),
+ * we need to force link up. We also need to give auto-negotiation
+ * time to complete.
+ */
+ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
+ if (!(status & E1000_STATUS_LU) && !(rxcw & E1000_RXCW_C)) {
+ if (!mac->autoneg_failed) {
+ mac->autoneg_failed = true;
+ return E1000_SUCCESS;
+ }
+ DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
+
+ /* Disable auto-negotiation in the TXCW register */
+ E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+
+ /* Force link-up and also force full-duplex. */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ /* Configure Flow Control after forcing link up. */
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ if (ret_val) {
+ DEBUGOUT("Error configuring flow control\n");
+ return ret_val;
+ }
+ } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+ /* If we are forcing link and we are receiving /C/ ordered
+ * sets, re-enable auto-negotiation in the TXCW register
+ * and disable forced link in the Device Control register
+ * in an attempt to auto-negotiate with our link partner.
+ */
+ DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
+ E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
+ E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+ mac->serdes_has_link = true;
+ } else if (!(E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW))) {
+ /* If we force link for non-auto-negotiation switch, check
+ * link status based on MAC synchronization for internal
+ * serdes media type.
+ */
+ /* SYNCH bit and IV bit are sticky. */
+ usec_delay(10);
+ rxcw = E1000_READ_REG(hw, E1000_RXCW);
+ if (rxcw & E1000_RXCW_SYNCH) {
+ if (!(rxcw & E1000_RXCW_IV)) {
+ mac->serdes_has_link = true;
+ DEBUGOUT("SERDES: Link up - forced.\n");
+ }
+ } else {
+ mac->serdes_has_link = false;
+ DEBUGOUT("SERDES: Link down - force failed.\n");
+ }
+ }
+
+ if (E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW)) {
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if (status & E1000_STATUS_LU) {
+ /* SYNCH bit and IV bit are sticky, so reread rxcw. */
+ usec_delay(10);
+ rxcw = E1000_READ_REG(hw, E1000_RXCW);
+ if (rxcw & E1000_RXCW_SYNCH) {
+ if (!(rxcw & E1000_RXCW_IV)) {
+ mac->serdes_has_link = true;
+ DEBUGOUT("SERDES: Link up - autoneg completed successfully.\n");
+ } else {
+ mac->serdes_has_link = false;
+ DEBUGOUT("SERDES: Link down - invalid codewords detected in autoneg.\n");
+ }
+ } else {
+ mac->serdes_has_link = false;
+ DEBUGOUT("SERDES: Link down - no sync.\n");
+ }
+ } else {
+ mac->serdes_has_link = false;
+ DEBUGOUT("SERDES: Link down - autoneg failed\n");
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_default_fc_generic - Set flow control default values
+ * @hw: pointer to the HW structure
+ *
+ * Read the EEPROM for the default values for flow control and store the
+ * values.
+ **/
+static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 nvm_data;
+
+ DEBUGFUNC("e1000_set_default_fc_generic");
+
+ /* Read and store word 0x0F of the EEPROM. This word contains bits
+ * that determine the hardware's default PAUSE (flow control) mode,
+ * a bit that determines whether the HW defaults to enabling or
+ * disabling auto-negotiation, and the direction of the
+ * SW defined pins. If there is no SW over-ride of the flow
+ * control setting, then the variable hw->fc will
+ * be initialized based on a value in the EEPROM.
+ */
+ ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
+
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (!(nvm_data & NVM_WORD0F_PAUSE_MASK))
+ hw->fc.requested_mode = e1000_fc_none;
+ else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
+ NVM_WORD0F_ASM_DIR)
+ hw->fc.requested_mode = e1000_fc_tx_pause;
+ else
+ hw->fc.requested_mode = e1000_fc_full;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_setup_link_generic - Setup flow control and link settings
+ * @hw: pointer to the HW structure
+ *
+ * Determines which flow control settings to use, then configures flow
+ * control. Calls the appropriate media-specific link configuration
+ * function. Assuming the adapter has a valid link partner, a valid link
+ * should be established. Assumes the hardware has previously been reset
+ * and the transmitter and receiver are not enabled.
+ **/
+s32 e1000_setup_link_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_setup_link_generic");
+
+ /* In the case of the phy reset being blocked, we already have a link.
+ * We do not need to set it up again.
+ */
+ if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
+ return E1000_SUCCESS;
+
+ /* If requested flow control is set to default, set flow control
+ * based on the EEPROM flow control settings.
+ */
+ if (hw->fc.requested_mode == e1000_fc_default) {
+ ret_val = e1000_set_default_fc_generic(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Save off the requested flow control mode for use later. Depending
+ * on the link partner's capabilities, we may or may not use this mode.
+ */
+ hw->fc.current_mode = hw->fc.requested_mode;
+
+ DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
+ hw->fc.current_mode);
+
+ /* Call the necessary media_type subroutine to configure the link. */
+ ret_val = hw->mac.ops.setup_physical_interface(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Initialize the flow control address, type, and PAUSE timer
+ * registers to their default values. This is done even if flow
+ * control is disabled, because it does not hurt anything to
+ * initialize these registers.
+ */
+ DEBUGOUT("Initializing the Flow Control address, type and timer regs\n");
+ E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE);
+ E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+ E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
+
+ E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
+
+ return e1000_set_fc_watermarks_generic(hw);
+}
+
+/**
+ * e1000_commit_fc_settings_generic - Configure flow control
+ * @hw: pointer to the HW structure
+ *
+ * Write the flow control settings to the Transmit Config Word Register (TXCW)
+ * base on the flow control settings in e1000_mac_info.
+ **/
+static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 txcw;
+
+ DEBUGFUNC("e1000_commit_fc_settings_generic");
+
+ /* Check for a software override of the flow control settings, and
+ * setup the device accordingly. If auto-negotiation is enabled, then
+ * software will have to set the "PAUSE" bits to the correct value in
+ * the Transmit Config Word Register (TXCW) and re-start auto-
+ * negotiation. However, if auto-negotiation is disabled, then
+ * software will have to manually configure the two flow control enable
+ * bits in the CTRL register.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but we
+ * do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ */
+ switch (hw->fc.current_mode) {
+ case e1000_fc_none:
+ /* Flow control completely disabled by a software over-ride. */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
+ break;
+ case e1000_fc_rx_pause:
+ /* Rx Flow control is enabled and Tx Flow control is disabled
+ * by a software over-ride. Since there really isn't a way to
+ * advertise that we are capable of Rx Pause ONLY, we will
+ * advertise that we support both symmetric and asymmetric Rx
+ * PAUSE. Later, we will disable the adapter's ability to send
+ * PAUSE frames.
+ */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+ break;
+ case e1000_fc_tx_pause:
+ /* Tx Flow control is enabled, and Rx Flow control is disabled,
+ * by a software over-ride.
+ */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
+ break;
+ case e1000_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by a software
+ * over-ride.
+ */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ break;
+ }
+
+ E1000_WRITE_REG(hw, E1000_TXCW, txcw);
+ mac->txcw = txcw;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_poll_fiber_serdes_link_generic - Poll for link up
+ * @hw: pointer to the HW structure
+ *
+ * Polls for link up by reading the status register, if link fails to come
+ * up with auto-negotiation, then the link is forced if a signal is detected.
+ **/
+static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 i, status;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_poll_fiber_serdes_link_generic");
+
+ /* If we have a signal (the cable is plugged in, or assumed true for
+ * serdes media) then poll for a "Link-Up" indication in the Device
+ * Status Register. Time-out if a link isn't seen in 500 milliseconds
+ * seconds (Auto-negotiation should complete in less than 500
+ * milliseconds even if the other end is doing it in SW).
+ */
+ for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
+ msec_delay(10);
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if (status & E1000_STATUS_LU)
+ break;
+ }
+ if (i == FIBER_LINK_UP_LIMIT) {
+ DEBUGOUT("Never got a valid link from auto-neg!!!\n");
+ mac->autoneg_failed = true;
+ /* AutoNeg failed to achieve a link, so we'll call
+ * mac->check_for_link. This routine will force the
+ * link up if we detect a signal. This will allow us to
+ * communicate with non-autonegotiating link partners.
+ */
+ ret_val = mac->ops.check_for_link(hw);
+ if (ret_val) {
+ DEBUGOUT("Error while checking for link\n");
+ return ret_val;
+ }
+ mac->autoneg_failed = false;
+ } else {
+ mac->autoneg_failed = false;
+ DEBUGOUT("Valid Link Found\n");
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_setup_fiber_serdes_link_generic - Setup link for fiber/serdes
+ * @hw: pointer to the HW structure
+ *
+ * Configures collision distance and flow control for fiber and serdes
+ * links. Upon successful setup, poll for link.
+ **/
+s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_setup_fiber_serdes_link_generic");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ /* Take the link out of reset */
+ ctrl &= ~E1000_CTRL_LRST;
+
+ hw->mac.ops.config_collision_dist(hw);
+
+ ret_val = e1000_commit_fc_settings_generic(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Since auto-negotiation is enabled, take the link out of reset (the
+ * link will be in reset, because we previously reset the chip). This
+ * will restart auto-negotiation. If auto-negotiation is successful
+ * then the link-up status bit will be set and the flow control enable
+ * bits (RFCE and TFCE) will be set according to their negotiated value.
+ */
+ DEBUGOUT("Auto-negotiation enabled\n");
+
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(1);
+
+ /* For these adapters, the SW definable pin 1 is set when the optics
+ * detect a signal. If we have a signal, then poll for a "Link-Up"
+ * indication.
+ */
+ if (hw->phy.media_type == e1000_media_type_internal_serdes ||
+ (E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) {
+ ret_val = e1000_poll_fiber_serdes_link_generic(hw);
+ } else {
+ DEBUGOUT("No signal detected\n");
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_config_collision_dist_generic - Configure collision distance
+ * @hw: pointer to the HW structure
+ *
+ * Configures the collision distance to the default value and is used
+ * during link setup.
+ **/
+static void e1000_config_collision_dist_generic(struct e1000_hw *hw)
+{
+ u32 tctl;
+
+ DEBUGFUNC("e1000_config_collision_dist_generic");
+
+ tctl = E1000_READ_REG(hw, E1000_TCTL);
+
+ tctl &= ~E1000_TCTL_COLD;
+ tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
+
+ E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ * e1000_set_fc_watermarks_generic - Set flow control high/low watermarks
+ * @hw: pointer to the HW structure
+ *
+ * Sets the flow control high/low threshold (watermark) registers. If
+ * flow control XON frame transmission is enabled, then set XON frame
+ * transmission as well.
+ **/
+s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw)
+{
+ u32 fcrtl = 0, fcrth = 0;
+
+ DEBUGFUNC("e1000_set_fc_watermarks_generic");
+
+ /* Set the flow control receive threshold registers. Normally,
+ * these registers will be set to a default threshold that may be
+ * adjusted later by the driver's runtime code. However, if the
+ * ability to transmit pause frames is not enabled, then these
+ * registers will be set to 0.
+ */
+ if (hw->fc.current_mode & e1000_fc_tx_pause) {
+ /* We need to set up the Receive Threshold high and low water
+ * marks as well as (optionally) enabling the transmission of
+ * XON frames.
+ */
+ fcrtl = hw->fc.low_water;
+ if (hw->fc.send_xon)
+ fcrtl |= E1000_FCRTL_XONE;
+
+ fcrth = hw->fc.high_water;
+ }
+ E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl);
+ E1000_WRITE_REG(hw, E1000_FCRTH, fcrth);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_force_mac_fc_generic - Force the MAC's flow control settings
+ * @hw: pointer to the HW structure
+ *
+ * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
+ * device control register to reflect the adapter settings. TFCE and RFCE
+ * need to be explicitly set by software when a copper PHY is used because
+ * autonegotiation is managed by the PHY rather than the MAC. Software must
+ * also configure these bits when link is forced on a fiber connection.
+ **/
+s32 e1000_force_mac_fc_generic(struct e1000_hw *hw)
+{
+ u32 ctrl;
+
+ DEBUGFUNC("e1000_force_mac_fc_generic");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ /* Because we didn't get link via the internal auto-negotiation
+ * mechanism (we either forced link or we got link via PHY
+ * auto-neg), we have to manually enable/disable transmit an
+ * receive flow control.
+ *
+ * The "Case" statement below enables/disable flow control
+ * according to the "hw->fc.current_mode" parameter.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause
+ * frames but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * frames but we do not receive pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) is enabled.
+ * other: No other values should be possible at this point.
+ */
+ DEBUGOUT1("hw->fc.current_mode = %u\n", hw->fc.current_mode);
+
+ switch (hw->fc.current_mode) {
+ case e1000_fc_none:
+ ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
+ break;
+ case e1000_fc_rx_pause:
+ ctrl &= (~E1000_CTRL_TFCE);
+ ctrl |= E1000_CTRL_RFCE;
+ break;
+ case e1000_fc_tx_pause:
+ ctrl &= (~E1000_CTRL_RFCE);
+ ctrl |= E1000_CTRL_TFCE;
+ break;
+ case e1000_fc_full:
+ ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_config_fc_after_link_up_generic - Configures flow control after link
+ * @hw: pointer to the HW structure
+ *
+ * Checks the status of auto-negotiation after link up to ensure that the
+ * speed and duplex were not forced. If the link needed to be forced, then
+ * flow control needs to be forced also. If auto-negotiation is enabled
+ * and did not fail, then we configure flow control based on our link
+ * partner.
+ **/
+s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val = E1000_SUCCESS;
+ u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
+ u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
+ u16 speed, duplex;
+
+ DEBUGFUNC("e1000_config_fc_after_link_up_generic");
+
+ /* Check for the case where we have fiber media and auto-neg failed
+ * so we had to force link. In this case, we need to force the
+ * configuration of the MAC to match the "fc" parameter.
+ */
+ if (mac->autoneg_failed) {
+ if (hw->phy.media_type == e1000_media_type_fiber ||
+ hw->phy.media_type == e1000_media_type_internal_serdes)
+ ret_val = e1000_force_mac_fc_generic(hw);
+ } else {
+ if (hw->phy.media_type == e1000_media_type_copper)
+ ret_val = e1000_force_mac_fc_generic(hw);
+ }
+
+ if (ret_val) {
+ DEBUGOUT("Error forcing flow control settings\n");
+ return ret_val;
+ }
+
+ /* Check for the case where we have copper media and auto-neg is
+ * enabled. In this case, we need to check and see if Auto-Neg
+ * has completed, and if so, how the PHY and link partner has
+ * flow control configured.
+ */
+ if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
+ /* Read the MII Status Register and check to see if AutoNeg
+ * has completed. We read this twice because this reg has
+ * some "sticky" (latched) bits.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
+ DEBUGOUT("Copper PHY and Auto Neg has not completed.\n");
+ return ret_val;
+ }
+
+ /* The AutoNeg process has completed, so we now need to
+ * read both the Auto Negotiation Advertisement
+ * Register (Address 4) and the Auto_Negotiation Base
+ * Page Ability Register (Address 5) to determine how
+ * flow control was negotiated.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
+ &mii_nway_adv_reg);
+ if (ret_val)
+ return ret_val;
+ ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
+ &mii_nway_lp_ability_reg);
+ if (ret_val)
+ return ret_val;
+
+ /* Two bits in the Auto Negotiation Advertisement Register
+ * (Address 4) and two bits in the Auto Negotiation Base
+ * Page Ability Register (Address 5) determine flow control
+ * for both the PHY and the link partner. The following
+ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+ * 1999, describes these PAUSE resolution bits and how flow
+ * control is determined based upon these settings.
+ * NOTE: DC = Don't Care
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+ *-------|---------|-------|---------|--------------------
+ * 0 | 0 | DC | DC | e1000_fc_none
+ * 0 | 1 | 0 | DC | e1000_fc_none
+ * 0 | 1 | 1 | 0 | e1000_fc_none
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ * 1 | 0 | 0 | DC | e1000_fc_none
+ * 1 | DC | 1 | DC | e1000_fc_full
+ * 1 | 1 | 0 | 0 | e1000_fc_none
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ *
+ * Are both PAUSE bits set to 1? If so, this implies
+ * Symmetric Flow Control is enabled at both ends. The
+ * ASM_DIR bits are irrelevant per the spec.
+ *
+ * For Symmetric Flow Control:
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | DC | 1 | DC | E1000_fc_full
+ *
+ */
+ if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+ /* Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise Rx
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == e1000_fc_full) {
+ hw->fc.current_mode = e1000_fc_full;
+ DEBUGOUT("Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
+ }
+ }
+ /* For receiving PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ */
+ else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_tx_pause;
+ DEBUGOUT("Flow Control = Tx PAUSE frames only.\n");
+ }
+ /* For transmitting PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ */
+ else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
+ } else {
+ /* Per the IEEE spec, at this point flow control
+ * should be disabled.
+ */
+ hw->fc.current_mode = e1000_fc_none;
+ DEBUGOUT("Flow Control = NONE.\n");
+ }
+
+ /* Now we need to do one last check... If we auto-
+ * negotiated to HALF DUPLEX, flow control should not be
+ * enabled per IEEE 802.3 spec.
+ */
+ ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
+ if (ret_val) {
+ DEBUGOUT("Error getting link speed and duplex\n");
+ return ret_val;
+ }
+
+ if (duplex == HALF_DUPLEX)
+ hw->fc.current_mode = e1000_fc_none;
+
+ /* Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+ ret_val = e1000_force_mac_fc_generic(hw);
+ if (ret_val) {
+ DEBUGOUT("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ }
+
+ /* Check for the case where we have SerDes media and auto-neg is
+ * enabled. In this case, we need to check and see if Auto-Neg
+ * has completed, and if so, how the PHY and link partner has
+ * flow control configured.
+ */
+ if ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
+ mac->autoneg) {
+ /* Read the PCS_LSTS and check to see if AutoNeg
+ * has completed.
+ */
+ pcs_status_reg = E1000_READ_REG(hw, E1000_PCS_LSTAT);
+
+ if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
+ DEBUGOUT("PCS Auto Neg has not completed.\n");
+ return ret_val;
+ }
+
+ /* The AutoNeg process has completed, so we now need to
+ * read both the Auto Negotiation Advertisement
+ * Register (PCS_ANADV) and the Auto_Negotiation Base
+ * Page Ability Register (PCS_LPAB) to determine how
+ * flow control was negotiated.
+ */
+ pcs_adv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV);
+ pcs_lp_ability_reg = E1000_READ_REG(hw, E1000_PCS_LPAB);
+
+ /* Two bits in the Auto Negotiation Advertisement Register
+ * (PCS_ANADV) and two bits in the Auto Negotiation Base
+ * Page Ability Register (PCS_LPAB) determine flow control
+ * for both the PHY and the link partner. The following
+ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+ * 1999, describes these PAUSE resolution bits and how flow
+ * control is determined based upon these settings.
+ * NOTE: DC = Don't Care
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+ *-------|---------|-------|---------|--------------------
+ * 0 | 0 | DC | DC | e1000_fc_none
+ * 0 | 1 | 0 | DC | e1000_fc_none
+ * 0 | 1 | 1 | 0 | e1000_fc_none
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ * 1 | 0 | 0 | DC | e1000_fc_none
+ * 1 | DC | 1 | DC | e1000_fc_full
+ * 1 | 1 | 0 | 0 | e1000_fc_none
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ *
+ * Are both PAUSE bits set to 1? If so, this implies
+ * Symmetric Flow Control is enabled at both ends. The
+ * ASM_DIR bits are irrelevant per the spec.
+ *
+ * For Symmetric Flow Control:
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | DC | 1 | DC | e1000_fc_full
+ *
+ */
+ if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
+ /* Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise Rx
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == e1000_fc_full) {
+ hw->fc.current_mode = e1000_fc_full;
+ DEBUGOUT("Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
+ }
+ }
+ /* For receiving PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ */
+ else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
+ (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_tx_pause;
+ DEBUGOUT("Flow Control = Tx PAUSE frames only.\n");
+ }
+ /* For transmitting PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ */
+ else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
+ !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
+ } else {
+ /* Per the IEEE spec, at this point flow control
+ * should be disabled.
+ */
+ hw->fc.current_mode = e1000_fc_none;
+ DEBUGOUT("Flow Control = NONE.\n");
+ }
+
+ /* Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+ pcs_ctrl_reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
+ pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
+ E1000_WRITE_REG(hw, E1000_PCS_LCTL, pcs_ctrl_reg);
+
+ ret_val = e1000_force_mac_fc_generic(hw);
+ if (ret_val) {
+ DEBUGOUT("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_speed_and_duplex_copper_generic - Retrieve current speed/duplex
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+ *
+ * Read the status register for the current speed/duplex and store the current
+ * speed and duplex for copper connections.
+ **/
+s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ u32 status;
+
+ DEBUGFUNC("e1000_get_speed_and_duplex_copper_generic");
+
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if (status & E1000_STATUS_SPEED_1000) {
+ *speed = SPEED_1000;
+ DEBUGOUT("1000 Mbs, ");
+ } else if (status & E1000_STATUS_SPEED_100) {
+ *speed = SPEED_100;
+ DEBUGOUT("100 Mbs, ");
+ } else {
+ *speed = SPEED_10;
+ DEBUGOUT("10 Mbs, ");
+ }
+
+ if (status & E1000_STATUS_FD) {
+ *duplex = FULL_DUPLEX;
+ DEBUGOUT("Full Duplex\n");
+ } else {
+ *duplex = HALF_DUPLEX;
+ DEBUGOUT("Half Duplex\n");
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_speed_and_duplex_fiber_generic - Retrieve current speed/duplex
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+ *
+ * Sets the speed and duplex to gigabit full duplex (the only possible option)
+ * for fiber/serdes links.
+ **/
+s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 *speed, u16 *duplex)
+{
+ DEBUGFUNC("e1000_get_speed_and_duplex_fiber_serdes_generic");
+
+ *speed = SPEED_1000;
+ *duplex = FULL_DUPLEX;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_hw_semaphore_generic - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM
+ **/
+s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw)
+{
+ u32 swsm;
+ s32 timeout = hw->nvm.word_size + 1;
+ s32 i = 0;
+
+ DEBUGFUNC("e1000_get_hw_semaphore_generic");
+
+ /* Get the SW semaphore */
+ while (i < timeout) {
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ if (!(swsm & E1000_SWSM_SMBI))
+ break;
+
+ usec_delay(50);
+ i++;
+ }
+
+ if (i == timeout) {
+ DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
+ return -E1000_ERR_NVM;
+ }
+
+ /* Get the FW semaphore. */
+ for (i = 0; i < timeout; i++) {
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
+
+ /* Semaphore acquired if bit latched */
+ if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
+ break;
+
+ usec_delay(50);
+ }
+
+ if (i == timeout) {
+ /* Release semaphores */
+ e1000_put_hw_semaphore_generic(hw);
+ DEBUGOUT("Driver can't access the NVM\n");
+ return -E1000_ERR_NVM;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_put_hw_semaphore_generic - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used to access the PHY or NVM
+ **/
+void e1000_put_hw_semaphore_generic(struct e1000_hw *hw)
+{
+ u32 swsm;
+
+ DEBUGFUNC("e1000_put_hw_semaphore_generic");
+
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+
+ swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
+
+ E1000_WRITE_REG(hw, E1000_SWSM, swsm);
+}
+
+/**
+ * e1000_get_auto_rd_done_generic - Check for auto read completion
+ * @hw: pointer to the HW structure
+ *
+ * Check EEPROM for Auto Read done bit.
+ **/
+s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw)
+{
+ s32 i = 0;
+
+ DEBUGFUNC("e1000_get_auto_rd_done_generic");
+
+ while (i < AUTO_READ_DONE_TIMEOUT) {
+ if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_AUTO_RD)
+ break;
+ msec_delay(1);
+ i++;
+ }
+
+ if (i == AUTO_READ_DONE_TIMEOUT) {
+ DEBUGOUT("Auto read by HW from NVM has not completed.\n");
+ return -E1000_ERR_RESET;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_valid_led_default_generic - Verify a valid default LED config
+ * @hw: pointer to the HW structure
+ * @data: pointer to the NVM (EEPROM)
+ *
+ * Read the EEPROM for the current default LED configuration. If the
+ * LED configuration is not valid, set to a valid LED configuration.
+ **/
+s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_valid_led_default_generic");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
+ *data = ID_LED_DEFAULT;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_id_led_init_generic -
+ * @hw: pointer to the HW structure
+ *
+ **/
+s32 e1000_id_led_init_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ const u32 ledctl_mask = 0x000000FF;
+ const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
+ const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
+ u16 data, i, temp;
+ const u16 led_mask = 0x0F;
+
+ DEBUGFUNC("e1000_id_led_init_generic");
+
+ ret_val = hw->nvm.ops.valid_led_default(hw, &data);
+ if (ret_val)
+ return ret_val;
+
+ mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
+ mac->ledctl_mode1 = mac->ledctl_default;
+ mac->ledctl_mode2 = mac->ledctl_default;
+
+ for (i = 0; i < 4; i++) {
+ temp = (data >> (i << 2)) & led_mask;
+ switch (temp) {
+ case ID_LED_ON1_DEF2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_ON1_OFF2:
+ mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+ mac->ledctl_mode1 |= ledctl_on << (i << 3);
+ break;
+ case ID_LED_OFF1_DEF2:
+ case ID_LED_OFF1_ON2:
+ case ID_LED_OFF1_OFF2:
+ mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+ mac->ledctl_mode1 |= ledctl_off << (i << 3);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ switch (temp) {
+ case ID_LED_DEF1_ON2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_OFF1_ON2:
+ mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+ mac->ledctl_mode2 |= ledctl_on << (i << 3);
+ break;
+ case ID_LED_DEF1_OFF2:
+ case ID_LED_ON1_OFF2:
+ case ID_LED_OFF1_OFF2:
+ mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+ mac->ledctl_mode2 |= ledctl_off << (i << 3);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_setup_led_generic - Configures SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * This prepares the SW controllable LED for use and saves the current state
+ * of the LED so it can be later restored.
+ **/
+s32 e1000_setup_led_generic(struct e1000_hw *hw)
+{
+ u32 ledctl;
+
+ DEBUGFUNC("e1000_setup_led_generic");
+
+ if (hw->mac.ops.setup_led != e1000_setup_led_generic)
+ return -E1000_ERR_CONFIG;
+
+ if (hw->phy.media_type == e1000_media_type_fiber) {
+ ledctl = E1000_READ_REG(hw, E1000_LEDCTL);
+ hw->mac.ledctl_default = ledctl;
+ /* Turn off LED0 */
+ ledctl &= ~(E1000_LEDCTL_LED0_IVRT | E1000_LEDCTL_LED0_BLINK |
+ E1000_LEDCTL_LED0_MODE_MASK);
+ ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
+ E1000_LEDCTL_LED0_MODE_SHIFT);
+ E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl);
+ } else if (hw->phy.media_type == e1000_media_type_copper) {
+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_cleanup_led_generic - Set LED config to default operation
+ * @hw: pointer to the HW structure
+ *
+ * Remove the current LED configuration and set the LED configuration
+ * to the default value, saved from the EEPROM.
+ **/
+s32 e1000_cleanup_led_generic(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_cleanup_led_generic");
+
+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_blink_led_generic - Blink LED
+ * @hw: pointer to the HW structure
+ *
+ * Blink the LEDs which are set to be on.
+ **/
+s32 e1000_blink_led_generic(struct e1000_hw *hw)
+{
+ u32 ledctl_blink = 0;
+ u32 i;
+
+ DEBUGFUNC("e1000_blink_led_generic");
+
+ if (hw->phy.media_type == e1000_media_type_fiber) {
+ /* always blink LED0 for PCI-E fiber */
+ ledctl_blink = E1000_LEDCTL_LED0_BLINK |
+ (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
+ } else {
+ /* Set the blink bit for each LED that's "on" (0x0E)
+ * (or "off" if inverted) in ledctl_mode2. The blink
+ * logic in hardware only works when mode is set to "on"
+ * so it must be changed accordingly when the mode is
+ * "off" and inverted.
+ */
+ ledctl_blink = hw->mac.ledctl_mode2;
+ for (i = 0; i < 32; i += 8) {
+ u32 mode = (hw->mac.ledctl_mode2 >> i) &
+ E1000_LEDCTL_LED0_MODE_MASK;
+ u32 led_default = hw->mac.ledctl_default >> i;
+
+ if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
+ (mode == E1000_LEDCTL_MODE_LED_ON)) ||
+ ((led_default & E1000_LEDCTL_LED0_IVRT) &&
+ (mode == E1000_LEDCTL_MODE_LED_OFF))) {
+ ledctl_blink &=
+ ~(E1000_LEDCTL_LED0_MODE_MASK << i);
+ ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
+ E1000_LEDCTL_MODE_LED_ON) << i;
+ }
+ }
+ }
+
+ E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_led_on_generic - Turn LED on
+ * @hw: pointer to the HW structure
+ *
+ * Turn LED on.
+ **/
+s32 e1000_led_on_generic(struct e1000_hw *hw)
+{
+ u32 ctrl;
+
+ DEBUGFUNC("e1000_led_on_generic");
+
+ switch (hw->phy.media_type) {
+ case e1000_media_type_fiber:
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ break;
+ case e1000_media_type_copper:
+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
+ break;
+ default:
+ break;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_led_off_generic - Turn LED off
+ * @hw: pointer to the HW structure
+ *
+ * Turn LED off.
+ **/
+s32 e1000_led_off_generic(struct e1000_hw *hw)
+{
+ u32 ctrl;
+
+ DEBUGFUNC("e1000_led_off_generic");
+
+ switch (hw->phy.media_type) {
+ case e1000_media_type_fiber:
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ break;
+ case e1000_media_type_copper:
+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
+ break;
+ default:
+ break;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_pcie_no_snoop_generic - Set PCI-express capabilities
+ * @hw: pointer to the HW structure
+ * @no_snoop: bitmap of snoop events
+ *
+ * Set the PCI-express register to snoop for events enabled in 'no_snoop'.
+ **/
+void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop)
+{
+ u32 gcr;
+
+ DEBUGFUNC("e1000_set_pcie_no_snoop_generic");
+
+ if (no_snoop) {
+ gcr = E1000_READ_REG(hw, E1000_GCR);
+ gcr &= ~(PCIE_NO_SNOOP_ALL);
+ gcr |= no_snoop;
+ E1000_WRITE_REG(hw, E1000_GCR, gcr);
+ }
+}
+
+/**
+ * e1000_disable_pcie_master_generic - Disables PCI-express master access
+ * @hw: pointer to the HW structure
+ *
+ * Returns E1000_SUCCESS if successful, else returns -10
+ * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
+ * the master requests to be disabled.
+ *
+ * Disables PCI-Express master access and verifies there are no pending
+ * requests.
+ **/
+s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 timeout = MASTER_DISABLE_TIMEOUT;
+
+ DEBUGFUNC("e1000_disable_pcie_master_generic");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ while (timeout) {
+ if (!(E1000_READ_REG(hw, E1000_STATUS) &
+ E1000_STATUS_GIO_MASTER_ENABLE))
+ break;
+ usec_delay(100);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("Master requests are pending.\n");
+ return -E1000_ERR_MASTER_REQUESTS_PENDING;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_reset_adaptive_generic - Reset Adaptive Interframe Spacing
+ * @hw: pointer to the HW structure
+ *
+ * Reset the Adaptive Interframe Spacing throttle to default values.
+ **/
+void e1000_reset_adaptive_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+
+ DEBUGFUNC("e1000_reset_adaptive_generic");
+
+ if (!mac->adaptive_ifs) {
+ DEBUGOUT("Not in Adaptive IFS mode!\n");
+ return;
+ }
+
+ mac->current_ifs_val = 0;
+ mac->ifs_min_val = IFS_MIN;
+ mac->ifs_max_val = IFS_MAX;
+ mac->ifs_step_size = IFS_STEP;
+ mac->ifs_ratio = IFS_RATIO;
+
+ mac->in_ifs_mode = false;
+ E1000_WRITE_REG(hw, E1000_AIT, 0);
+}
+
+/**
+ * e1000_update_adaptive_generic - Update Adaptive Interframe Spacing
+ * @hw: pointer to the HW structure
+ *
+ * Update the Adaptive Interframe Spacing Throttle value based on the
+ * time between transmitted packets and time between collisions.
+ **/
+void e1000_update_adaptive_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+
+ DEBUGFUNC("e1000_update_adaptive_generic");
+
+ if (!mac->adaptive_ifs) {
+ DEBUGOUT("Not in Adaptive IFS mode!\n");
+ return;
+ }
+
+ if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
+ if (mac->tx_packet_delta > MIN_NUM_XMITS) {
+ mac->in_ifs_mode = true;
+ if (mac->current_ifs_val < mac->ifs_max_val) {
+ if (!mac->current_ifs_val)
+ mac->current_ifs_val = mac->ifs_min_val;
+ else
+ mac->current_ifs_val +=
+ mac->ifs_step_size;
+ E1000_WRITE_REG(hw, E1000_AIT,
+ mac->current_ifs_val);
+ }
+ }
+ } else {
+ if (mac->in_ifs_mode &&
+ (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
+ mac->current_ifs_val = 0;
+ mac->in_ifs_mode = false;
+ E1000_WRITE_REG(hw, E1000_AIT, 0);
+ }
+ }
+}
+
+/**
+ * e1000_validate_mdi_setting_generic - Verify MDI/MDIx settings
+ * @hw: pointer to the HW structure
+ *
+ * Verify that when not using auto-negotiation that MDI/MDIx is correctly
+ * set, which is forced to MDI mode only.
+ **/
+static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_validate_mdi_setting_generic");
+
+ if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
+ DEBUGOUT("Invalid MDI setting detected\n");
+ hw->phy.mdix = 1;
+ return -E1000_ERR_CONFIG;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_validate_mdi_setting_crossover_generic - Verify MDI/MDIx settings
+ * @hw: pointer to the HW structure
+ *
+ * Validate the MDI/MDIx setting, allowing for auto-crossover during forced
+ * operation.
+ **/
+s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_validate_mdi_setting_crossover_generic");
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_8bit_ctrl_reg_generic - Write a 8bit CTRL register
+ * @hw: pointer to the HW structure
+ * @reg: 32bit register offset such as E1000_SCTL
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes an address/data control type register. There are several of these
+ * and they all have the format address << 8 | data and bit 31 is polled for
+ * completion.
+ **/
+s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
+ u32 offset, u8 data)
+{
+ u32 i, regvalue = 0;
+
+ DEBUGFUNC("e1000_write_8bit_ctrl_reg_generic");
+
+ /* Set up the address and data */
+ regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
+ E1000_WRITE_REG(hw, reg, regvalue);
+
+ /* Poll the ready bit to see if the MDI read completed */
+ for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
+ usec_delay(5);
+ regvalue = E1000_READ_REG(hw, reg);
+ if (regvalue & E1000_GEN_CTL_READY)
+ break;
+ }
+ if (!(regvalue & E1000_GEN_CTL_READY)) {
+ DEBUGOUT1("Reg %08x did not indicate ready\n", reg);
+ return -E1000_ERR_PHY;
+ }
+
+ return E1000_SUCCESS;
+}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mac.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mac.h
new file mode 100755
index 00000000..6a1b0f52
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mac.h
@@ -0,0 +1,80 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_MAC_H_
+#define _E1000_MAC_H_
+
+void e1000_init_mac_ops_generic(struct e1000_hw *hw);
+void e1000_null_mac_generic(struct e1000_hw *hw);
+s32 e1000_null_ops_generic(struct e1000_hw *hw);
+s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d);
+bool e1000_null_mng_mode(struct e1000_hw *hw);
+void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a);
+void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b);
+void e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a);
+s32 e1000_blink_led_generic(struct e1000_hw *hw);
+s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw);
+s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw);
+s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw);
+s32 e1000_cleanup_led_generic(struct e1000_hw *hw);
+s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw);
+s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw);
+s32 e1000_force_mac_fc_generic(struct e1000_hw *hw);
+s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw);
+s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw);
+void e1000_set_lan_id_single_port(struct e1000_hw *hw);
+s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw);
+s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex);
+s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw,
+ u16 *speed, u16 *duplex);
+s32 e1000_id_led_init_generic(struct e1000_hw *hw);
+s32 e1000_led_on_generic(struct e1000_hw *hw);
+s32 e1000_led_off_generic(struct e1000_hw *hw);
+void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count);
+s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw);
+s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw);
+s32 e1000_setup_led_generic(struct e1000_hw *hw);
+s32 e1000_setup_link_generic(struct e1000_hw *hw);
+s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw *hw);
+s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
+ u32 offset, u8 data);
+
+u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr);
+
+void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw);
+void e1000_clear_vfta_generic(struct e1000_hw *hw);
+void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count);
+void e1000_put_hw_semaphore_generic(struct e1000_hw *hw);
+s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
+void e1000_reset_adaptive_generic(struct e1000_hw *hw);
+void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop);
+void e1000_update_adaptive_generic(struct e1000_hw *hw);
+void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
+
+#endif
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_manage.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_manage.c
new file mode 100755
index 00000000..e1a2abe0
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_manage.c
@@ -0,0 +1,556 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000_api.h"
+
+/**
+ * e1000_calculate_checksum - Calculate checksum for buffer
+ * @buffer: pointer to EEPROM
+ * @length: size of EEPROM to calculate a checksum for
+ *
+ * Calculates the checksum for some buffer on a specified length. The
+ * checksum calculated is returned.
+ **/
+u8 e1000_calculate_checksum(u8 *buffer, u32 length)
+{
+ u32 i;
+ u8 sum = 0;
+
+ DEBUGFUNC("e1000_calculate_checksum");
+
+ if (!buffer)
+ return 0;
+
+ for (i = 0; i < length; i++)
+ sum += buffer[i];
+
+ return (u8) (0 - sum);
+}
+
+/**
+ * e1000_mng_enable_host_if_generic - Checks host interface is enabled
+ * @hw: pointer to the HW structure
+ *
+ * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
+ *
+ * This function checks whether the HOST IF is enabled for command operation
+ * and also checks whether the previous command is completed. It busy waits
+ * in case of previous command is not completed.
+ **/
+s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw)
+{
+ u32 hicr;
+ u8 i;
+
+ DEBUGFUNC("e1000_mng_enable_host_if_generic");
+
+ if (!hw->mac.arc_subsystem_valid) {
+ DEBUGOUT("ARC subsystem not valid.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Check that the host interface is enabled. */
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ if (!(hicr & E1000_HICR_EN)) {
+ DEBUGOUT("E1000_HOST_EN bit disabled.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+ /* check the previous command is completed */
+ for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ if (!(hicr & E1000_HICR_C))
+ break;
+ msec_delay_irq(1);
+ }
+
+ if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
+ DEBUGOUT("Previous command timeout failed .\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_check_mng_mode_generic - Generic check management mode
+ * @hw: pointer to the HW structure
+ *
+ * Reads the firmware semaphore register and returns true (>0) if
+ * manageability is enabled, else false (0).
+ **/
+bool e1000_check_mng_mode_generic(struct e1000_hw *hw)
+{
+ u32 fwsm = E1000_READ_REG(hw, E1000_FWSM);
+
+ DEBUGFUNC("e1000_check_mng_mode_generic");
+
+
+ return (fwsm & E1000_FWSM_MODE_MASK) ==
+ (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
+}
+
+/**
+ * e1000_enable_tx_pkt_filtering_generic - Enable packet filtering on Tx
+ * @hw: pointer to the HW structure
+ *
+ * Enables packet filtering on transmit packets if manageability is enabled
+ * and host interface is enabled.
+ **/
+bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw)
+{
+ struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
+ u32 *buffer = (u32 *)&hw->mng_cookie;
+ u32 offset;
+ s32 ret_val, hdr_csum, csum;
+ u8 i, len;
+
+ DEBUGFUNC("e1000_enable_tx_pkt_filtering_generic");
+
+ hw->mac.tx_pkt_filtering = true;
+
+ /* No manageability, no filtering */
+ if (!hw->mac.ops.check_mng_mode(hw)) {
+ hw->mac.tx_pkt_filtering = false;
+ return hw->mac.tx_pkt_filtering;
+ }
+
+ /* If we can't read from the host interface for whatever
+ * reason, disable filtering.
+ */
+ ret_val = e1000_mng_enable_host_if_generic(hw);
+ if (ret_val != E1000_SUCCESS) {
+ hw->mac.tx_pkt_filtering = false;
+ return hw->mac.tx_pkt_filtering;
+ }
+
+ /* Read in the header. Length and offset are in dwords. */
+ len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
+ offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
+ for (i = 0; i < len; i++)
+ *(buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF,
+ offset + i);
+ hdr_csum = hdr->checksum;
+ hdr->checksum = 0;
+ csum = e1000_calculate_checksum((u8 *)hdr,
+ E1000_MNG_DHCP_COOKIE_LENGTH);
+ /* If either the checksums or signature don't match, then
+ * the cookie area isn't considered valid, in which case we
+ * take the safe route of assuming Tx filtering is enabled.
+ */
+ if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
+ hw->mac.tx_pkt_filtering = true;
+ return hw->mac.tx_pkt_filtering;
+ }
+
+ /* Cookie area is valid, make the final check for filtering. */
+ if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING))
+ hw->mac.tx_pkt_filtering = false;
+
+ return hw->mac.tx_pkt_filtering;
+}
+
+/**
+ * e1000_mng_write_cmd_header_generic - Writes manageability command header
+ * @hw: pointer to the HW structure
+ * @hdr: pointer to the host interface command header
+ *
+ * Writes the command header after does the checksum calculation.
+ **/
+s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw,
+ struct e1000_host_mng_command_header *hdr)
+{
+ u16 i, length = sizeof(struct e1000_host_mng_command_header);
+
+ DEBUGFUNC("e1000_mng_write_cmd_header_generic");
+
+ /* Write the whole command header structure with new checksum. */
+
+ hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
+
+ length >>= 2;
+ /* Write the relevant command block into the ram area. */
+ for (i = 0; i < length; i++) {
+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i,
+ *((u32 *) hdr + i));
+ E1000_WRITE_FLUSH(hw);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_mng_host_if_write_generic - Write to the manageability host interface
+ * @hw: pointer to the HW structure
+ * @buffer: pointer to the host interface buffer
+ * @length: size of the buffer
+ * @offset: location in the buffer to write to
+ * @sum: sum of the data (not checksum)
+ *
+ * This function writes the buffer content at the offset given on the host if.
+ * It also does alignment considerations to do the writes in most efficient
+ * way. Also fills up the sum of the buffer in *buffer parameter.
+ **/
+s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
+ u16 length, u16 offset, u8 *sum)
+{
+ u8 *tmp;
+ u8 *bufptr = buffer;
+ u32 data = 0;
+ u16 remaining, i, j, prev_bytes;
+
+ DEBUGFUNC("e1000_mng_host_if_write_generic");
+
+ /* sum = only sum of the data and it is not checksum */
+
+ if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH)
+ return -E1000_ERR_PARAM;
+
+ tmp = (u8 *)&data;
+ prev_bytes = offset & 0x3;
+ offset >>= 2;
+
+ if (prev_bytes) {
+ data = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset);
+ for (j = prev_bytes; j < sizeof(u32); j++) {
+ *(tmp + j) = *bufptr++;
+ *sum += *(tmp + j);
+ }
+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset, data);
+ length -= j - prev_bytes;
+ offset++;
+ }
+
+ remaining = length & 0x3;
+ length -= remaining;
+
+ /* Calculate length in DWORDs */
+ length >>= 2;
+
+ /* The device driver writes the relevant command block into the
+ * ram area.
+ */
+ for (i = 0; i < length; i++) {
+ for (j = 0; j < sizeof(u32); j++) {
+ *(tmp + j) = *bufptr++;
+ *sum += *(tmp + j);
+ }
+
+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i,
+ data);
+ }
+ if (remaining) {
+ for (j = 0; j < sizeof(u32); j++) {
+ if (j < remaining)
+ *(tmp + j) = *bufptr++;
+ else
+ *(tmp + j) = 0;
+
+ *sum += *(tmp + j);
+ }
+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i,
+ data);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_mng_write_dhcp_info_generic - Writes DHCP info to host interface
+ * @hw: pointer to the HW structure
+ * @buffer: pointer to the host interface
+ * @length: size of the buffer
+ *
+ * Writes the DHCP information to the host interface.
+ **/
+s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, u8 *buffer,
+ u16 length)
+{
+ struct e1000_host_mng_command_header hdr;
+ s32 ret_val;
+ u32 hicr;
+
+ DEBUGFUNC("e1000_mng_write_dhcp_info_generic");
+
+ hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
+ hdr.command_length = length;
+ hdr.reserved1 = 0;
+ hdr.reserved2 = 0;
+ hdr.checksum = 0;
+
+ /* Enable the host interface */
+ ret_val = e1000_mng_enable_host_if_generic(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Populate the host interface with the contents of "buffer". */
+ ret_val = e1000_mng_host_if_write_generic(hw, buffer, length,
+ sizeof(hdr), &(hdr.checksum));
+ if (ret_val)
+ return ret_val;
+
+ /* Write the manageability command header */
+ ret_val = e1000_mng_write_cmd_header_generic(hw, &hdr);
+ if (ret_val)
+ return ret_val;
+
+ /* Tell the ARC a new command is pending. */
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_enable_mng_pass_thru - Check if management passthrough is needed
+ * @hw: pointer to the HW structure
+ *
+ * Verifies the hardware needs to leave interface enabled so that frames can
+ * be directed to and from the management interface.
+ **/
+bool e1000_enable_mng_pass_thru(struct e1000_hw *hw)
+{
+ u32 manc;
+ u32 fwsm, factps;
+
+ DEBUGFUNC("e1000_enable_mng_pass_thru");
+
+ if (!hw->mac.asf_firmware_present)
+ return false;
+
+ manc = E1000_READ_REG(hw, E1000_MANC);
+
+ if (!(manc & E1000_MANC_RCV_TCO_EN))
+ return false;
+
+ if (hw->mac.has_fwsm) {
+ fwsm = E1000_READ_REG(hw, E1000_FWSM);
+ factps = E1000_READ_REG(hw, E1000_FACTPS);
+
+ if (!(factps & E1000_FACTPS_MNGCG) &&
+ ((fwsm & E1000_FWSM_MODE_MASK) ==
+ (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT)))
+ return true;
+ } else if ((manc & E1000_MANC_SMBUS_EN) &&
+ !(manc & E1000_MANC_ASF_EN)) {
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * e1000_host_interface_command - Writes buffer to host interface
+ * @hw: pointer to the HW structure
+ * @buffer: contains a command to write
+ * @length: the byte length of the buffer, must be multiple of 4 bytes
+ *
+ * Writes a buffer to the Host Interface. Upon success, returns E1000_SUCCESS
+ * else returns E1000_ERR_HOST_INTERFACE_COMMAND.
+ **/
+s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length)
+{
+ u32 hicr, i;
+
+ DEBUGFUNC("e1000_host_interface_command");
+
+ if (!(hw->mac.arc_subsystem_valid)) {
+ DEBUGOUT("Hardware doesn't support host interface command.\n");
+ return E1000_SUCCESS;
+ }
+
+ if (!hw->mac.asf_firmware_present) {
+ DEBUGOUT("Firmware is not present.\n");
+ return E1000_SUCCESS;
+ }
+
+ if (length == 0 || length & 0x3 ||
+ length > E1000_HI_MAX_BLOCK_BYTE_LENGTH) {
+ DEBUGOUT("Buffer length failure.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Check that the host interface is enabled. */
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ if (!(hicr & E1000_HICR_EN)) {
+ DEBUGOUT("E1000_HOST_EN bit disabled.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Calculate length in DWORDs */
+ length >>= 2;
+
+ /* The device driver writes the relevant command block
+ * into the ram area.
+ */
+ for (i = 0; i < length; i++)
+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i,
+ *((u32 *)buffer + i));
+
+ /* Setting this bit tells the ARC that a new command is pending. */
+ E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
+
+ for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ if (!(hicr & E1000_HICR_C))
+ break;
+ msec_delay(1);
+ }
+
+ /* Check command successful completion. */
+ if (i == E1000_HI_COMMAND_TIMEOUT ||
+ (!(E1000_READ_REG(hw, E1000_HICR) & E1000_HICR_SV))) {
+ DEBUGOUT("Command has failed with no status valid.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ for (i = 0; i < length; i++)
+ *((u32 *)buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw,
+ E1000_HOST_IF,
+ i);
+
+ return E1000_SUCCESS;
+}
+/**
+ * e1000_load_firmware - Writes proxy FW code buffer to host interface
+ * and execute.
+ * @hw: pointer to the HW structure
+ * @buffer: contains a firmware to write
+ * @length: the byte length of the buffer, must be multiple of 4 bytes
+ *
+ * Upon success returns E1000_SUCCESS, returns E1000_ERR_CONFIG if not enabled
+ * in HW else returns E1000_ERR_HOST_INTERFACE_COMMAND.
+ **/
+s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length)
+{
+ u32 hicr, hibba, fwsm, icr, i;
+
+ DEBUGFUNC("e1000_load_firmware");
+
+ if (hw->mac.type < e1000_i210) {
+ DEBUGOUT("Hardware doesn't support loading FW by the driver\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ /* Check that the host interface is enabled. */
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ if (!(hicr & E1000_HICR_EN)) {
+ DEBUGOUT("E1000_HOST_EN bit disabled.\n");
+ return -E1000_ERR_CONFIG;
+ }
+ if (!(hicr & E1000_HICR_MEMORY_BASE_EN)) {
+ DEBUGOUT("E1000_HICR_MEMORY_BASE_EN bit disabled.\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ if (length == 0 || length & 0x3 || length > E1000_HI_FW_MAX_LENGTH) {
+ DEBUGOUT("Buffer length failure.\n");
+ return -E1000_ERR_INVALID_ARGUMENT;
+ }
+
+ /* Clear notification from ROM-FW by reading ICR register */
+ icr = E1000_READ_REG(hw, E1000_ICR_V2);
+
+ /* Reset ROM-FW */
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ hicr |= E1000_HICR_FW_RESET_ENABLE;
+ E1000_WRITE_REG(hw, E1000_HICR, hicr);
+ hicr |= E1000_HICR_FW_RESET;
+ E1000_WRITE_REG(hw, E1000_HICR, hicr);
+ E1000_WRITE_FLUSH(hw);
+
+ /* Wait till MAC notifies about its readiness after ROM-FW reset */
+ for (i = 0; i < (E1000_HI_COMMAND_TIMEOUT * 2); i++) {
+ icr = E1000_READ_REG(hw, E1000_ICR_V2);
+ if (icr & E1000_ICR_MNG)
+ break;
+ msec_delay(1);
+ }
+
+ /* Check for timeout */
+ if (i == E1000_HI_COMMAND_TIMEOUT) {
+ DEBUGOUT("FW reset failed.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Wait till MAC is ready to accept new FW code */
+ for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
+ fwsm = E1000_READ_REG(hw, E1000_FWSM);
+ if ((fwsm & E1000_FWSM_FW_VALID) &&
+ ((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT ==
+ E1000_FWSM_HI_EN_ONLY_MODE))
+ break;
+ msec_delay(1);
+ }
+
+ /* Check for timeout */
+ if (i == E1000_HI_COMMAND_TIMEOUT) {
+ DEBUGOUT("FW reset failed.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Calculate length in DWORDs */
+ length >>= 2;
+
+ /* The device driver writes the relevant FW code block
+ * into the ram area in DWORDs via 1kB ram addressing window.
+ */
+ for (i = 0; i < length; i++) {
+ if (!(i % E1000_HI_FW_BLOCK_DWORD_LENGTH)) {
+ /* Point to correct 1kB ram window */
+ hibba = E1000_HI_FW_BASE_ADDRESS +
+ ((E1000_HI_FW_BLOCK_DWORD_LENGTH << 2) *
+ (i / E1000_HI_FW_BLOCK_DWORD_LENGTH));
+
+ E1000_WRITE_REG(hw, E1000_HIBBA, hibba);
+ }
+
+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF,
+ i % E1000_HI_FW_BLOCK_DWORD_LENGTH,
+ *((u32 *)buffer + i));
+ }
+
+ /* Setting this bit tells the ARC that a new FW is ready to execute. */
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
+
+ for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ if (!(hicr & E1000_HICR_C))
+ break;
+ msec_delay(1);
+ }
+
+ /* Check for successful FW start. */
+ if (i == E1000_HI_COMMAND_TIMEOUT) {
+ DEBUGOUT("New FW did not start within timeout period.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ return E1000_SUCCESS;
+}
+
+
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_manage.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_manage.h
new file mode 100755
index 00000000..c94b2185
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_manage.h
@@ -0,0 +1,89 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_MANAGE_H_
+#define _E1000_MANAGE_H_
+
+bool e1000_check_mng_mode_generic(struct e1000_hw *hw);
+bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw);
+s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw);
+s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
+ u16 length, u16 offset, u8 *sum);
+s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw,
+ struct e1000_host_mng_command_header *hdr);
+s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw,
+ u8 *buffer, u16 length);
+bool e1000_enable_mng_pass_thru(struct e1000_hw *hw);
+u8 e1000_calculate_checksum(u8 *buffer, u32 length);
+s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length);
+s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length);
+
+enum e1000_mng_mode {
+ e1000_mng_mode_none = 0,
+ e1000_mng_mode_asf,
+ e1000_mng_mode_pt,
+ e1000_mng_mode_ipmi,
+ e1000_mng_mode_host_if_only
+};
+
+#define E1000_FACTPS_MNGCG 0x20000000
+
+#define E1000_FWSM_MODE_MASK 0xE
+#define E1000_FWSM_MODE_SHIFT 1
+#define E1000_FWSM_FW_VALID 0x00008000
+#define E1000_FWSM_HI_EN_ONLY_MODE 0x4
+
+#define E1000_MNG_IAMT_MODE 0x3
+#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10
+#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0
+#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10
+#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
+#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
+
+#define E1000_VFTA_ENTRY_SHIFT 5
+#define E1000_VFTA_ENTRY_MASK 0x7F
+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
+
+#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
+#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */
+#define E1000_HI_COMMAND_TIMEOUT 500 /* Process HI cmd limit */
+#define E1000_HI_FW_BASE_ADDRESS 0x10000
+#define E1000_HI_FW_MAX_LENGTH (64 * 1024) /* Num of bytes */
+#define E1000_HI_FW_BLOCK_DWORD_LENGTH 256 /* Num of DWORDs per page */
+#define E1000_HICR_MEMORY_BASE_EN 0x200 /* MB Enable bit - RO */
+#define E1000_HICR_EN 0x01 /* Enable bit - RO */
+/* Driver sets this bit when done to put command in RAM */
+#define E1000_HICR_C 0x02
+#define E1000_HICR_SV 0x04 /* Status Validity */
+#define E1000_HICR_FW_RESET_ENABLE 0x40
+#define E1000_HICR_FW_RESET 0x80
+
+/* Intel(R) Active Management Technology signature */
+#define E1000_IAMT_SIGNATURE 0x544D4149
+
+#endif
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.c
new file mode 100755
index 00000000..6d004b65
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.c
@@ -0,0 +1,526 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000_mbx.h"
+
+/**
+ * e1000_null_mbx_check_for_flag - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_null_mbx_check_for_flag(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 E1000_UNUSEDARG mbx_id)
+{
+ DEBUGFUNC("e1000_null_mbx_check_flag");
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_mbx_transact - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_null_mbx_transact(struct e1000_hw E1000_UNUSEDARG *hw,
+ u32 E1000_UNUSEDARG *msg,
+ u16 E1000_UNUSEDARG size,
+ u16 E1000_UNUSEDARG mbx_id)
+{
+ DEBUGFUNC("e1000_null_mbx_rw_msg");
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_mbx - Reads a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns SUCCESS if it successfully read message from buffer
+ **/
+s32 e1000_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ DEBUGFUNC("e1000_read_mbx");
+
+ /* limit read to size of mailbox */
+ if (size > mbx->size)
+ size = mbx->size;
+
+ if (mbx->ops.read)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_mbx - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+s32 e1000_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_write_mbx");
+
+ if (size > mbx->size)
+ ret_val = -E1000_ERR_MBX;
+
+ else if (mbx->ops.write)
+ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_msg - checks to see if someone sent us mail
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 e1000_check_for_msg(struct e1000_hw *hw, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ DEBUGFUNC("e1000_check_for_msg");
+
+ if (mbx->ops.check_for_msg)
+ ret_val = mbx->ops.check_for_msg(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_ack - checks to see if someone sent us ACK
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 e1000_check_for_ack(struct e1000_hw *hw, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ DEBUGFUNC("e1000_check_for_ack");
+
+ if (mbx->ops.check_for_ack)
+ ret_val = mbx->ops.check_for_ack(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_rst - checks to see if other side has reset
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 e1000_check_for_rst(struct e1000_hw *hw, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ DEBUGFUNC("e1000_check_for_rst");
+
+ if (mbx->ops.check_for_rst)
+ ret_val = mbx->ops.check_for_rst(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * e1000_poll_for_msg - Wait for message notification
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification
+ **/
+static s32 e1000_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ DEBUGFUNC("e1000_poll_for_msg");
+
+ if (!countdown || !mbx->ops.check_for_msg)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+ break;
+ usec_delay(mbx->usec_delay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+out:
+ return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
+}
+
+/**
+ * e1000_poll_for_ack - Wait for message acknowledgement
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message acknowledgement
+ **/
+static s32 e1000_poll_for_ack(struct e1000_hw *hw, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ DEBUGFUNC("e1000_poll_for_ack");
+
+ if (!countdown || !mbx->ops.check_for_ack)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+ break;
+ usec_delay(mbx->usec_delay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+out:
+ return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
+}
+
+/**
+ * e1000_read_posted_mbx - Wait for message notification and receive message
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification and
+ * copied it into the receive buffer.
+ **/
+s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ DEBUGFUNC("e1000_read_posted_mbx");
+
+ if (!mbx->ops.read)
+ goto out;
+
+ ret_val = e1000_poll_for_msg(hw, mbx_id);
+
+ /* if ack received read message, otherwise we timed out */
+ if (!ret_val)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout period
+ **/
+s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ DEBUGFUNC("e1000_write_posted_mbx");
+
+ /* exit if either we can't write or there isn't a defined timeout */
+ if (!mbx->ops.write || !mbx->timeout)
+ goto out;
+
+ /* send msg */
+ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+ /* if msg sent wait until we receive an ack */
+ if (!ret_val)
+ ret_val = e1000_poll_for_ack(hw, mbx_id);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_mbx_ops_generic - Initialize mbx function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Sets the function pointers to no-op functions
+ **/
+void e1000_init_mbx_ops_generic(struct e1000_hw *hw)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ mbx->ops.init_params = e1000_null_ops_generic;
+ mbx->ops.read = e1000_null_mbx_transact;
+ mbx->ops.write = e1000_null_mbx_transact;
+ mbx->ops.check_for_msg = e1000_null_mbx_check_for_flag;
+ mbx->ops.check_for_ack = e1000_null_mbx_check_for_flag;
+ mbx->ops.check_for_rst = e1000_null_mbx_check_for_flag;
+ mbx->ops.read_posted = e1000_read_posted_mbx;
+ mbx->ops.write_posted = e1000_write_posted_mbx;
+}
+
+static s32 e1000_check_for_bit_pf(struct e1000_hw *hw, u32 mask)
+{
+ u32 mbvficr = E1000_READ_REG(hw, E1000_MBVFICR);
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (mbvficr & mask) {
+ ret_val = E1000_SUCCESS;
+ E1000_WRITE_REG(hw, E1000_MBVFICR, mask);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_msg_pf - checks to see if the VF has sent mail
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 e1000_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number)
+{
+ s32 ret_val = -E1000_ERR_MBX;
+
+ DEBUGFUNC("e1000_check_for_msg_pf");
+
+ if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) {
+ ret_val = E1000_SUCCESS;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_ack_pf - checks to see if the VF has ACKed
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 e1000_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number)
+{
+ s32 ret_val = -E1000_ERR_MBX;
+
+ DEBUGFUNC("e1000_check_for_ack_pf");
+
+ if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) {
+ ret_val = E1000_SUCCESS;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_rst_pf - checks to see if the VF has reset
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 e1000_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
+{
+ u32 vflre = E1000_READ_REG(hw, E1000_VFLRE);
+ s32 ret_val = -E1000_ERR_MBX;
+
+ DEBUGFUNC("e1000_check_for_rst_pf");
+
+ if (vflre & (1 << vf_number)) {
+ ret_val = E1000_SUCCESS;
+ E1000_WRITE_REG(hw, E1000_VFLRE, (1 << vf_number));
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_obtain_mbx_lock_pf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * return SUCCESS if we obtained the mailbox lock
+ **/
+static s32 e1000_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
+{
+ s32 ret_val = -E1000_ERR_MBX;
+ u32 p2v_mailbox;
+
+ DEBUGFUNC("e1000_obtain_mbx_lock_pf");
+
+ /* Take ownership of the buffer */
+ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
+
+ /* reserve mailbox for vf use */
+ p2v_mailbox = E1000_READ_REG(hw, E1000_P2VMAILBOX(vf_number));
+ if (p2v_mailbox & E1000_P2VMAILBOX_PFU)
+ ret_val = E1000_SUCCESS;
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_mbx_pf - Places a message in the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+static s32 e1000_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
+ u16 vf_number)
+{
+ s32 ret_val;
+ u16 i;
+
+ DEBUGFUNC("e1000_write_mbx_pf");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ e1000_check_for_msg_pf(hw, vf_number);
+ e1000_check_for_ack_pf(hw, vf_number);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i, msg[i]);
+
+ /* Interrupt VF to tell it a message has been sent and release buffer*/
+ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+out_no_write:
+ return ret_val;
+
+}
+
+/**
+ * e1000_read_mbx_pf - Read a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * This function copies a message from the mailbox buffer to the caller's
+ * memory buffer. The presumption is that the caller knows that there was
+ * a message due to a VF request so no polling for message is needed.
+ **/
+static s32 e1000_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
+ u16 vf_number)
+{
+ s32 ret_val;
+ u16 i;
+
+ DEBUGFUNC("e1000_read_mbx_pf");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i);
+
+ /* Acknowledge the message and release buffer */
+ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return ret_val;
+}
+
+/**
+ * e1000_init_mbx_params_pf - set initial values for pf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for pf mailbox
+ */
+s32 e1000_init_mbx_params_pf(struct e1000_hw *hw)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+
+ switch (hw->mac.type) {
+ case e1000_82576:
+ case e1000_i350:
+ case e1000_i354:
+ mbx->timeout = 0;
+ mbx->usec_delay = 0;
+
+ mbx->size = E1000_VFMAILBOX_SIZE;
+
+ mbx->ops.read = e1000_read_mbx_pf;
+ mbx->ops.write = e1000_write_mbx_pf;
+ mbx->ops.read_posted = e1000_read_posted_mbx;
+ mbx->ops.write_posted = e1000_write_posted_mbx;
+ mbx->ops.check_for_msg = e1000_check_for_msg_pf;
+ mbx->ops.check_for_ack = e1000_check_for_ack_pf;
+ mbx->ops.check_for_rst = e1000_check_for_rst_pf;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+ default:
+ return E1000_SUCCESS;
+ }
+}
+
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.h
new file mode 100755
index 00000000..bbf838c8
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.h
@@ -0,0 +1,87 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_MBX_H_
+#define _E1000_MBX_H_
+
+#include "e1000_api.h"
+
+#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */
+#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
+#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
+
+#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */
+#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
+#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */
+#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
+
+#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+
+/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
+ * PF. The reverse is true if it is E1000_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+/* Msgs below or'd with this are the ACK */
+#define E1000_VT_MSGTYPE_ACK 0x80000000
+/* Msgs below or'd with this are the NACK */
+#define E1000_VT_MSGTYPE_NACK 0x40000000
+/* Indicates that VF is still clear to send requests */
+#define E1000_VT_MSGTYPE_CTS 0x20000000
+#define E1000_VT_MSGINFO_SHIFT 16
+/* bits 23:16 are used for extra info for certain messages */
+#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
+
+#define E1000_VF_RESET 0x01 /* VF requests reset */
+#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */
+#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */
+#define E1000_VF_SET_MULTICAST_COUNT_MASK (0x1F << E1000_VT_MSGINFO_SHIFT)
+#define E1000_VF_SET_MULTICAST_OVERFLOW (0x80 << E1000_VT_MSGINFO_SHIFT)
+#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */
+#define E1000_VF_SET_VLAN_ADD (0x01 << E1000_VT_MSGINFO_SHIFT)
+#define E1000_VF_SET_LPE 0x05 /* reqs to set VMOLR.LPE */
+#define E1000_VF_SET_PROMISC 0x06 /* reqs to clear VMOLR.ROPE/MPME*/
+#define E1000_VF_SET_PROMISC_UNICAST (0x01 << E1000_VT_MSGINFO_SHIFT)
+#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT)
+
+#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
+
+#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define E1000_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+
+s32 e1000_read_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 e1000_write_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 e1000_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 e1000_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 e1000_check_for_msg(struct e1000_hw *, u16);
+s32 e1000_check_for_ack(struct e1000_hw *, u16);
+s32 e1000_check_for_rst(struct e1000_hw *, u16);
+void e1000_init_mbx_ops_generic(struct e1000_hw *hw);
+s32 e1000_init_mbx_params_pf(struct e1000_hw *);
+
+#endif /* _E1000_MBX_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.c
new file mode 100755
index 00000000..ff421986
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.c
@@ -0,0 +1,967 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000_api.h"
+
+static void e1000_reload_nvm_generic(struct e1000_hw *hw);
+
+/**
+ * e1000_init_nvm_ops_generic - Initialize NVM function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setups up the function pointers to no-op functions
+ **/
+void e1000_init_nvm_ops_generic(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ DEBUGFUNC("e1000_init_nvm_ops_generic");
+
+ /* Initialize function pointers */
+ nvm->ops.init_params = e1000_null_ops_generic;
+ nvm->ops.acquire = e1000_null_ops_generic;
+ nvm->ops.read = e1000_null_read_nvm;
+ nvm->ops.release = e1000_null_nvm_generic;
+ nvm->ops.reload = e1000_reload_nvm_generic;
+ nvm->ops.update = e1000_null_ops_generic;
+ nvm->ops.valid_led_default = e1000_null_led_default;
+ nvm->ops.validate = e1000_null_ops_generic;
+ nvm->ops.write = e1000_null_write_nvm;
+}
+
+/**
+ * e1000_null_nvm_read - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_read_nvm(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b,
+ u16 E1000_UNUSEDARG *c)
+{
+ DEBUGFUNC("e1000_null_read_nvm");
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_nvm_generic - No-op function, return void
+ * @hw: pointer to the HW structure
+ **/
+void e1000_null_nvm_generic(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_null_nvm_generic");
+ return;
+}
+
+/**
+ * e1000_null_led_default - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_led_default(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 E1000_UNUSEDARG *data)
+{
+ DEBUGFUNC("e1000_null_led_default");
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_write_nvm - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_write_nvm(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b,
+ u16 E1000_UNUSEDARG *c)
+{
+ DEBUGFUNC("e1000_null_write_nvm");
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_raise_eec_clk - Raise EEPROM clock
+ * @hw: pointer to the HW structure
+ * @eecd: pointer to the EEPROM
+ *
+ * Enable/Raise the EEPROM clock bit.
+ **/
+static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+ *eecd = *eecd | E1000_EECD_SK;
+ E1000_WRITE_REG(hw, E1000_EECD, *eecd);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(hw->nvm.delay_usec);
+}
+
+/**
+ * e1000_lower_eec_clk - Lower EEPROM clock
+ * @hw: pointer to the HW structure
+ * @eecd: pointer to the EEPROM
+ *
+ * Clear/Lower the EEPROM clock bit.
+ **/
+static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+ *eecd = *eecd & ~E1000_EECD_SK;
+ E1000_WRITE_REG(hw, E1000_EECD, *eecd);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(hw->nvm.delay_usec);
+}
+
+/**
+ * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
+ * @hw: pointer to the HW structure
+ * @data: data to send to the EEPROM
+ * @count: number of bits to shift out
+ *
+ * We need to shift 'count' bits out to the EEPROM. So, the value in the
+ * "data" parameter will be shifted out to the EEPROM one bit at a time.
+ * In order to do this, "data" must be broken down into bits.
+ **/
+static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+ u32 mask;
+
+ DEBUGFUNC("e1000_shift_out_eec_bits");
+
+ mask = 0x01 << (count - 1);
+ if (nvm->type == e1000_nvm_eeprom_spi)
+ eecd |= E1000_EECD_DO;
+
+ do {
+ eecd &= ~E1000_EECD_DI;
+
+ if (data & mask)
+ eecd |= E1000_EECD_DI;
+
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ E1000_WRITE_FLUSH(hw);
+
+ usec_delay(nvm->delay_usec);
+
+ e1000_raise_eec_clk(hw, &eecd);
+ e1000_lower_eec_clk(hw, &eecd);
+
+ mask >>= 1;
+ } while (mask);
+
+ eecd &= ~E1000_EECD_DI;
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+}
+
+/**
+ * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
+ * @hw: pointer to the HW structure
+ * @count: number of bits to shift in
+ *
+ * In order to read a register from the EEPROM, we need to shift 'count' bits
+ * in from the EEPROM. Bits are "shifted in" by raising the clock input to
+ * the EEPROM (setting the SK bit), and then reading the value of the data out
+ * "DO" bit. During this "shifting in" process the data in "DI" bit should
+ * always be clear.
+ **/
+static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
+{
+ u32 eecd;
+ u32 i;
+ u16 data;
+
+ DEBUGFUNC("e1000_shift_in_eec_bits");
+
+ eecd = E1000_READ_REG(hw, E1000_EECD);
+
+ eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
+ data = 0;
+
+ for (i = 0; i < count; i++) {
+ data <<= 1;
+ e1000_raise_eec_clk(hw, &eecd);
+
+ eecd = E1000_READ_REG(hw, E1000_EECD);
+
+ eecd &= ~E1000_EECD_DI;
+ if (eecd & E1000_EECD_DO)
+ data |= 1;
+
+ e1000_lower_eec_clk(hw, &eecd);
+ }
+
+ return data;
+}
+
+/**
+ * e1000_poll_eerd_eewr_done - Poll for EEPROM read/write completion
+ * @hw: pointer to the HW structure
+ * @ee_reg: EEPROM flag for polling
+ *
+ * Polls the EEPROM status bit for either read or write completion based
+ * upon the value of 'ee_reg'.
+ **/
+s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
+{
+ u32 attempts = 100000;
+ u32 i, reg = 0;
+
+ DEBUGFUNC("e1000_poll_eerd_eewr_done");
+
+ for (i = 0; i < attempts; i++) {
+ if (ee_reg == E1000_NVM_POLL_READ)
+ reg = E1000_READ_REG(hw, E1000_EERD);
+ else
+ reg = E1000_READ_REG(hw, E1000_EEWR);
+
+ if (reg & E1000_NVM_RW_REG_DONE)
+ return E1000_SUCCESS;
+
+ usec_delay(5);
+ }
+
+ return -E1000_ERR_NVM;
+}
+
+/**
+ * e1000_acquire_nvm_generic - Generic request for access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ * Return successful if access grant bit set, else clear the request for
+ * EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+s32 e1000_acquire_nvm_generic(struct e1000_hw *hw)
+{
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+ s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
+
+ DEBUGFUNC("e1000_acquire_nvm_generic");
+
+ E1000_WRITE_REG(hw, E1000_EECD, eecd | E1000_EECD_REQ);
+ eecd = E1000_READ_REG(hw, E1000_EECD);
+
+ while (timeout) {
+ if (eecd & E1000_EECD_GNT)
+ break;
+ usec_delay(5);
+ eecd = E1000_READ_REG(hw, E1000_EECD);
+ timeout--;
+ }
+
+ if (!timeout) {
+ eecd &= ~E1000_EECD_REQ;
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ DEBUGOUT("Could not acquire NVM grant\n");
+ return -E1000_ERR_NVM;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_standby_nvm - Return EEPROM to standby state
+ * @hw: pointer to the HW structure
+ *
+ * Return the EEPROM to a standby state.
+ **/
+static void e1000_standby_nvm(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+
+ DEBUGFUNC("e1000_standby_nvm");
+
+ if (nvm->type == e1000_nvm_eeprom_spi) {
+ /* Toggle CS to flush commands */
+ eecd |= E1000_EECD_CS;
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(nvm->delay_usec);
+ eecd &= ~E1000_EECD_CS;
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(nvm->delay_usec);
+ }
+}
+
+/**
+ * e1000_stop_nvm - Terminate EEPROM command
+ * @hw: pointer to the HW structure
+ *
+ * Terminates the current command by inverting the EEPROM's chip select pin.
+ **/
+static void e1000_stop_nvm(struct e1000_hw *hw)
+{
+ u32 eecd;
+
+ DEBUGFUNC("e1000_stop_nvm");
+
+ eecd = E1000_READ_REG(hw, E1000_EECD);
+ if (hw->nvm.type == e1000_nvm_eeprom_spi) {
+ /* Pull CS high */
+ eecd |= E1000_EECD_CS;
+ e1000_lower_eec_clk(hw, &eecd);
+ }
+}
+
+/**
+ * e1000_release_nvm_generic - Release exclusive access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Stop any current commands to the EEPROM and clear the EEPROM request bit.
+ **/
+void e1000_release_nvm_generic(struct e1000_hw *hw)
+{
+ u32 eecd;
+
+ DEBUGFUNC("e1000_release_nvm_generic");
+
+ e1000_stop_nvm(hw);
+
+ eecd = E1000_READ_REG(hw, E1000_EECD);
+ eecd &= ~E1000_EECD_REQ;
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+}
+
+/**
+ * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
+ * @hw: pointer to the HW structure
+ *
+ * Setups the EEPROM for reading and writing.
+ **/
+static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+ u8 spi_stat_reg;
+
+ DEBUGFUNC("e1000_ready_nvm_eeprom");
+
+ if (nvm->type == e1000_nvm_eeprom_spi) {
+ u16 timeout = NVM_MAX_RETRY_SPI;
+
+ /* Clear SK and CS */
+ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(1);
+
+ /* Read "Status Register" repeatedly until the LSB is cleared.
+ * The EEPROM will signal that the command has been completed
+ * by clearing bit 0 of the internal status register. If it's
+ * not cleared within 'timeout', then error out.
+ */
+ while (timeout) {
+ e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
+ hw->nvm.opcode_bits);
+ spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
+ if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
+ break;
+
+ usec_delay(5);
+ e1000_standby_nvm(hw);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("SPI NVM Status error\n");
+ return -E1000_ERR_NVM;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_nvm_spi - Read EEPROM's using SPI
+ * @hw: pointer to the HW structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM.
+ **/
+s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 i = 0;
+ s32 ret_val;
+ u16 word_in;
+ u8 read_opcode = NVM_READ_OPCODE_SPI;
+
+ DEBUGFUNC("e1000_read_nvm_spi");
+
+ /* A check for invalid values: offset too large, too many words,
+ * and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ return -E1000_ERR_NVM;
+ }
+
+ ret_val = nvm->ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_ready_nvm_eeprom(hw);
+ if (ret_val)
+ goto release;
+
+ e1000_standby_nvm(hw);
+
+ if ((nvm->address_bits == 8) && (offset >= 128))
+ read_opcode |= NVM_A8_OPCODE_SPI;
+
+ /* Send the READ command (opcode + addr) */
+ e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
+ e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
+
+ /* Read the data. SPI NVMs increment the address with each byte
+ * read and will roll over if reading beyond the end. This allows
+ * us to read the whole NVM from any offset
+ */
+ for (i = 0; i < words; i++) {
+ word_in = e1000_shift_in_eec_bits(hw, 16);
+ data[i] = (word_in >> 8) | (word_in << 8);
+ }
+
+release:
+ nvm->ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_nvm_eerd - Reads EEPROM using EERD register
+ * @hw: pointer to the HW structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 i, eerd = 0;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_read_nvm_eerd");
+
+ /* A check for invalid values: offset too large, too many words,
+ * too many words for the offset, and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ return -E1000_ERR_NVM;
+ }
+
+ for (i = 0; i < words; i++) {
+ eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
+ E1000_NVM_RW_REG_START;
+
+ E1000_WRITE_REG(hw, E1000_EERD, eerd);
+ ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
+ if (ret_val)
+ break;
+
+ data[i] = (E1000_READ_REG(hw, E1000_EERD) >>
+ E1000_NVM_RW_REG_DATA);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_nvm_spi - Write to EEPROM using SPI
+ * @hw: pointer to the HW structure
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the EEPROM
+ *
+ * Writes data to EEPROM at offset using SPI interface.
+ *
+ * If e1000_update_nvm_checksum is not called after this function , the
+ * EEPROM will most likely contain an invalid checksum.
+ **/
+s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ s32 ret_val = -E1000_ERR_NVM;
+ u16 widx = 0;
+
+ DEBUGFUNC("e1000_write_nvm_spi");
+
+ /* A check for invalid values: offset too large, too many words,
+ * and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ return -E1000_ERR_NVM;
+ }
+
+ while (widx < words) {
+ u8 write_opcode = NVM_WRITE_OPCODE_SPI;
+
+ ret_val = nvm->ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_ready_nvm_eeprom(hw);
+ if (ret_val) {
+ nvm->ops.release(hw);
+ return ret_val;
+ }
+
+ e1000_standby_nvm(hw);
+
+ /* Send the WRITE ENABLE command (8 bit opcode) */
+ e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
+ nvm->opcode_bits);
+
+ e1000_standby_nvm(hw);
+
+ /* Some SPI eeproms use the 8th address bit embedded in the
+ * opcode
+ */
+ if ((nvm->address_bits == 8) && (offset >= 128))
+ write_opcode |= NVM_A8_OPCODE_SPI;
+
+ /* Send the Write command (8-bit opcode + addr) */
+ e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
+ e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
+ nvm->address_bits);
+
+ /* Loop to allow for up to whole page write of eeprom */
+ while (widx < words) {
+ u16 word_out = data[widx];
+ word_out = (word_out >> 8) | (word_out << 8);
+ e1000_shift_out_eec_bits(hw, word_out, 16);
+ widx++;
+
+ if ((((offset + widx) * 2) % nvm->page_size) == 0) {
+ e1000_standby_nvm(hw);
+ break;
+ }
+ }
+ msec_delay(10);
+ nvm->ops.release(hw);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_pba_string_generic - Read device part number
+ * @hw: pointer to the HW structure
+ * @pba_num: pointer to device part number
+ * @pba_num_size: size of part number buffer
+ *
+ * Reads the product board assembly (PBA) number from the EEPROM and stores
+ * the value in pba_num.
+ **/
+s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ s32 ret_val;
+ u16 nvm_data;
+ u16 pba_ptr;
+ u16 offset;
+ u16 length;
+
+ DEBUGFUNC("e1000_read_pba_string_generic");
+
+ if (pba_num == NULL) {
+ DEBUGOUT("PBA string buffer was null\n");
+ return -E1000_ERR_INVALID_ARGUMENT;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ /* if nvm_data is not ptr guard the PBA must be in legacy format which
+ * means pba_ptr is actually our second data word for the PBA number
+ * and we can decode it into an ascii string
+ */
+ if (nvm_data != NVM_PBA_PTR_GUARD) {
+ DEBUGOUT("NVM PBA number is not stored as string\n");
+
+ /* make sure callers buffer is big enough to store the PBA */
+ if (pba_num_size < E1000_PBANUM_LENGTH) {
+ DEBUGOUT("PBA string buffer too small\n");
+ return E1000_ERR_NO_SPACE;
+ }
+
+ /* extract hex string from data and pba_ptr */
+ pba_num[0] = (nvm_data >> 12) & 0xF;
+ pba_num[1] = (nvm_data >> 8) & 0xF;
+ pba_num[2] = (nvm_data >> 4) & 0xF;
+ pba_num[3] = nvm_data & 0xF;
+ pba_num[4] = (pba_ptr >> 12) & 0xF;
+ pba_num[5] = (pba_ptr >> 8) & 0xF;
+ pba_num[6] = '-';
+ pba_num[7] = 0;
+ pba_num[8] = (pba_ptr >> 4) & 0xF;
+ pba_num[9] = pba_ptr & 0xF;
+
+ /* put a null character on the end of our string */
+ pba_num[10] = '\0';
+
+ /* switch all the data but the '-' to hex char */
+ for (offset = 0; offset < 10; offset++) {
+ if (pba_num[offset] < 0xA)
+ pba_num[offset] += '0';
+ else if (pba_num[offset] < 0x10)
+ pba_num[offset] += 'A' - 0xA;
+ }
+
+ return E1000_SUCCESS;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (length == 0xFFFF || length == 0) {
+ DEBUGOUT("NVM PBA number section invalid length\n");
+ return -E1000_ERR_NVM_PBA_SECTION;
+ }
+ /* check if pba_num buffer is big enough */
+ if (pba_num_size < (((u32)length * 2) - 1)) {
+ DEBUGOUT("PBA string buffer too small\n");
+ return -E1000_ERR_NO_SPACE;
+ }
+
+ /* trim pba length from start of string */
+ pba_ptr++;
+ length--;
+
+ for (offset = 0; offset < length; offset++) {
+ ret_val = hw->nvm.ops.read(hw, pba_ptr + offset, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+ pba_num[offset * 2] = (u8)(nvm_data >> 8);
+ pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
+ }
+ pba_num[offset * 2] = '\0';
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_pba_length_generic - Read device part number length
+ * @hw: pointer to the HW structure
+ * @pba_num_size: size of part number buffer
+ *
+ * Reads the product board assembly (PBA) number length from the EEPROM and
+ * stores the value in pba_num_size.
+ **/
+s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size)
+{
+ s32 ret_val;
+ u16 nvm_data;
+ u16 pba_ptr;
+ u16 length;
+
+ DEBUGFUNC("e1000_read_pba_length_generic");
+
+ if (pba_num_size == NULL) {
+ DEBUGOUT("PBA buffer size was null\n");
+ return -E1000_ERR_INVALID_ARGUMENT;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ /* if data is not ptr guard the PBA must be in legacy format */
+ if (nvm_data != NVM_PBA_PTR_GUARD) {
+ *pba_num_size = E1000_PBANUM_LENGTH;
+ return E1000_SUCCESS;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (length == 0xFFFF || length == 0) {
+ DEBUGOUT("NVM PBA number section invalid length\n");
+ return -E1000_ERR_NVM_PBA_SECTION;
+ }
+
+ /* Convert from length in u16 values to u8 chars, add 1 for NULL,
+ * and subtract 2 because length field is included in length.
+ */
+ *pba_num_size = ((u32)length * 2) - 1;
+
+ return E1000_SUCCESS;
+}
+
+
+
+
+
+/**
+ * e1000_read_mac_addr_generic - Read device MAC address
+ * @hw: pointer to the HW structure
+ *
+ * Reads the device MAC address from the EEPROM and stores the value.
+ * Since devices with two ports use the same EEPROM, we increment the
+ * last bit in the MAC address for the second port.
+ **/
+s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
+{
+ u32 rar_high;
+ u32 rar_low;
+ u16 i;
+
+ rar_high = E1000_READ_REG(hw, E1000_RAH(0));
+ rar_low = E1000_READ_REG(hw, E1000_RAL(0));
+
+ for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
+
+ for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
+
+ for (i = 0; i < ETH_ADDR_LEN; i++)
+ hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_validate_nvm_checksum_generic - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ * and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ DEBUGFUNC("e1000_validate_nvm_checksum_generic");
+
+ for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
+ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+ checksum += nvm_data;
+ }
+
+ if (checksum != (u16) NVM_SUM) {
+ DEBUGOUT("NVM Checksum Invalid\n");
+ return -E1000_ERR_NVM;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_update_nvm_checksum_generic - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ * up to the checksum. Then calculates the EEPROM checksum and writes the
+ * value to the EEPROM.
+ **/
+s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ DEBUGFUNC("e1000_update_nvm_checksum");
+
+ for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error while updating checksum.\n");
+ return ret_val;
+ }
+ checksum += nvm_data;
+ }
+ checksum = (u16) NVM_SUM - checksum;
+ ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
+ if (ret_val)
+ DEBUGOUT("NVM Write Error while updating checksum.\n");
+
+ return ret_val;
+}
+
+/**
+ * e1000_reload_nvm_generic - Reloads EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
+ * extended control register.
+ **/
+static void e1000_reload_nvm_generic(struct e1000_hw *hw)
+{
+ u32 ctrl_ext;
+
+ DEBUGFUNC("e1000_reload_nvm_generic");
+
+ usec_delay(10);
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ * e1000_get_fw_version - Get firmware version information
+ * @hw: pointer to the HW structure
+ * @fw_vers: pointer to output version structure
+ *
+ * unsupported/not present features return 0 in version structure
+ **/
+void e1000_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
+{
+ u16 eeprom_verh, eeprom_verl, etrack_test, fw_version;
+ u8 q, hval, rem, result;
+ u16 comb_verh, comb_verl, comb_offset;
+
+ memset(fw_vers, 0, sizeof(struct e1000_fw_version));
+
+ /* basic eeprom version numbers, bits used vary by part and by tool
+ * used to create the nvm images */
+ /* Check which data format we have */
+ hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
+ switch (hw->mac.type) {
+ case e1000_i211:
+ e1000_read_invm_version(hw, fw_vers);
+ return;
+ case e1000_82575:
+ case e1000_82576:
+ case e1000_82580:
+ /* Use this format, unless EETRACK ID exists,
+ * then use alternate format
+ */
+ if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) {
+ hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+ fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
+ >> NVM_MAJOR_SHIFT;
+ fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK)
+ >> NVM_MINOR_SHIFT;
+ fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK);
+ goto etrack_id;
+ }
+ break;
+ case e1000_i210:
+ if (!(e1000_get_flash_presence_i210(hw))) {
+ e1000_read_invm_version(hw, fw_vers);
+ return;
+ }
+ /* fall through */
+ case e1000_i350:
+ case e1000_i354:
+ /* find combo image version */
+ hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
+ if ((comb_offset != 0x0) &&
+ (comb_offset != NVM_VER_INVALID)) {
+
+ hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
+ + 1), 1, &comb_verh);
+ hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
+ 1, &comb_verl);
+
+ /* get Option Rom version if it exists and is valid */
+ if ((comb_verh && comb_verl) &&
+ ((comb_verh != NVM_VER_INVALID) &&
+ (comb_verl != NVM_VER_INVALID))) {
+
+ fw_vers->or_valid = true;
+ fw_vers->or_major =
+ comb_verl >> NVM_COMB_VER_SHFT;
+ fw_vers->or_build =
+ (comb_verl << NVM_COMB_VER_SHFT)
+ | (comb_verh >> NVM_COMB_VER_SHFT);
+ fw_vers->or_patch =
+ comb_verh & NVM_COMB_VER_MASK;
+ }
+ }
+ break;
+ default:
+ return;
+ }
+ hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+ fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
+ >> NVM_MAJOR_SHIFT;
+
+ /* check for old style version format in newer images*/
+ if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) {
+ eeprom_verl = (fw_version & NVM_COMB_VER_MASK);
+ } else {
+ eeprom_verl = (fw_version & NVM_MINOR_MASK)
+ >> NVM_MINOR_SHIFT;
+ }
+ /* Convert minor value to hex before assigning to output struct
+ * Val to be converted will not be higher than 99, per tool output
+ */
+ q = eeprom_verl / NVM_HEX_CONV;
+ hval = q * NVM_HEX_TENS;
+ rem = eeprom_verl % NVM_HEX_CONV;
+ result = hval + rem;
+ fw_vers->eep_minor = result;
+
+etrack_id:
+ if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) {
+ hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
+ hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
+ fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT)
+ | eeprom_verl;
+ }
+ return;
+}
+
+
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.h
new file mode 100755
index 00000000..fe62785a
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.h
@@ -0,0 +1,75 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_NVM_H_
+#define _E1000_NVM_H_
+
+
+struct e1000_fw_version {
+ u32 etrack_id;
+ u16 eep_major;
+ u16 eep_minor;
+ u16 eep_build;
+
+ u8 invm_major;
+ u8 invm_minor;
+ u8 invm_img_type;
+
+ bool or_valid;
+ u16 or_major;
+ u16 or_build;
+ u16 or_patch;
+};
+
+
+void e1000_init_nvm_ops_generic(struct e1000_hw *hw);
+s32 e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c);
+void e1000_null_nvm_generic(struct e1000_hw *hw);
+s32 e1000_null_led_default(struct e1000_hw *hw, u16 *data);
+s32 e1000_null_write_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c);
+s32 e1000_acquire_nvm_generic(struct e1000_hw *hw);
+
+s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
+s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
+s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
+s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size);
+s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data);
+s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw);
+s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw);
+void e1000_release_nvm_generic(struct e1000_hw *hw);
+void e1000_get_fw_version(struct e1000_hw *hw,
+ struct e1000_fw_version *fw_vers);
+
+#define E1000_STM_OPCODE 0xDB00
+
+#endif
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_osdep.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_osdep.h
new file mode 100755
index 00000000..d1cf98e2
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_osdep.h
@@ -0,0 +1,136 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/* glue for the OS independent part of e1000
+ * includes register access macros
+ */
+
+#ifndef _E1000_OSDEP_H_
+#define _E1000_OSDEP_H_
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/sched.h>
+#include "kcompat.h"
+
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic ignored "-Wunused-function"
+#endif
+
+#define usec_delay(x) udelay(x)
+#define usec_delay_irq(x) udelay(x)
+#ifndef msec_delay
+#define msec_delay(x) do { \
+ /* Don't mdelay in interrupt context! */ \
+ if (in_interrupt()) \
+ BUG(); \
+ else \
+ msleep(x); \
+} while (0)
+
+/* Some workarounds require millisecond delays and are run during interrupt
+ * context. Most notably, when establishing link, the phy may need tweaking
+ * but cannot process phy register reads/writes faster than millisecond
+ * intervals...and we establish link due to a "link status change" interrupt.
+ */
+#define msec_delay_irq(x) mdelay(x)
+#endif
+
+#define PCI_COMMAND_REGISTER PCI_COMMAND
+#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE
+#define ETH_ADDR_LEN ETH_ALEN
+
+#ifdef __BIG_ENDIAN
+#define E1000_BIG_ENDIAN __BIG_ENDIAN
+#endif
+
+
+#ifdef DEBUG
+#define DEBUGOUT(S) printk(KERN_DEBUG S)
+#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S, ## A)
+#else
+#define DEBUGOUT(S)
+#define DEBUGOUT1(S, A...)
+#endif
+
+#ifdef DEBUG_FUNC
+#define DEBUGFUNC(F) DEBUGOUT(F "\n")
+#else
+#define DEBUGFUNC(F)
+#endif
+#define DEBUGOUT2 DEBUGOUT1
+#define DEBUGOUT3 DEBUGOUT2
+#define DEBUGOUT7 DEBUGOUT3
+
+#define E1000_REGISTER(a, reg) reg
+
+#define E1000_WRITE_REG(a, reg, value) ( \
+ writel((value), ((a)->hw_addr + E1000_REGISTER(a, reg))))
+
+#define E1000_READ_REG(a, reg) (readl((a)->hw_addr + E1000_REGISTER(a, reg)))
+
+#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \
+ writel((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 2))))
+
+#define E1000_READ_REG_ARRAY(a, reg, offset) ( \
+ readl((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 2)))
+
+#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
+#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
+
+#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \
+ writew((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 1))))
+
+#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \
+ readw((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 1)))
+
+#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \
+ writeb((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + (offset))))
+
+#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \
+ readb((a)->hw_addr + E1000_REGISTER(a, reg) + (offset)))
+
+#define E1000_WRITE_REG_IO(a, reg, offset) do { \
+ outl(reg, ((a)->io_base)); \
+ outl(offset, ((a)->io_base + 4)); } while (0)
+
+#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS)
+
+#define E1000_WRITE_FLASH_REG(a, reg, value) ( \
+ writel((value), ((a)->flash_address + reg)))
+
+#define E1000_WRITE_FLASH_REG16(a, reg, value) ( \
+ writew((value), ((a)->flash_address + reg)))
+
+#define E1000_READ_FLASH_REG(a, reg) (readl((a)->flash_address + reg))
+
+#define E1000_READ_FLASH_REG16(a, reg) (readw((a)->flash_address + reg))
+
+#endif /* _E1000_OSDEP_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_phy.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_phy.c
new file mode 100755
index 00000000..df224702
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_phy.c
@@ -0,0 +1,3405 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000_api.h"
+
+static s32 e1000_wait_autoneg(struct e1000_hw *hw);
+/* Cable length tables */
+static const u16 e1000_m88_cable_length_table[] = {
+ 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
+#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
+ (sizeof(e1000_m88_cable_length_table) / \
+ sizeof(e1000_m88_cable_length_table[0]))
+
+static const u16 e1000_igp_2_cable_length_table[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3,
+ 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22,
+ 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40,
+ 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61,
+ 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82,
+ 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95,
+ 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121,
+ 124};
+#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
+ (sizeof(e1000_igp_2_cable_length_table) / \
+ sizeof(e1000_igp_2_cable_length_table[0]))
+
+/**
+ * e1000_init_phy_ops_generic - Initialize PHY function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setups up the function pointers to no-op functions
+ **/
+void e1000_init_phy_ops_generic(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ DEBUGFUNC("e1000_init_phy_ops_generic");
+
+ /* Initialize function pointers */
+ phy->ops.init_params = e1000_null_ops_generic;
+ phy->ops.acquire = e1000_null_ops_generic;
+ phy->ops.check_polarity = e1000_null_ops_generic;
+ phy->ops.check_reset_block = e1000_null_ops_generic;
+ phy->ops.commit = e1000_null_ops_generic;
+ phy->ops.force_speed_duplex = e1000_null_ops_generic;
+ phy->ops.get_cfg_done = e1000_null_ops_generic;
+ phy->ops.get_cable_length = e1000_null_ops_generic;
+ phy->ops.get_info = e1000_null_ops_generic;
+ phy->ops.set_page = e1000_null_set_page;
+ phy->ops.read_reg = e1000_null_read_reg;
+ phy->ops.read_reg_locked = e1000_null_read_reg;
+ phy->ops.read_reg_page = e1000_null_read_reg;
+ phy->ops.release = e1000_null_phy_generic;
+ phy->ops.reset = e1000_null_ops_generic;
+ phy->ops.set_d0_lplu_state = e1000_null_lplu_state;
+ phy->ops.set_d3_lplu_state = e1000_null_lplu_state;
+ phy->ops.write_reg = e1000_null_write_reg;
+ phy->ops.write_reg_locked = e1000_null_write_reg;
+ phy->ops.write_reg_page = e1000_null_write_reg;
+ phy->ops.power_up = e1000_null_phy_generic;
+ phy->ops.power_down = e1000_null_phy_generic;
+ phy->ops.read_i2c_byte = e1000_read_i2c_byte_null;
+ phy->ops.write_i2c_byte = e1000_write_i2c_byte_null;
+}
+
+/**
+ * e1000_null_set_page - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_set_page(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 E1000_UNUSEDARG data)
+{
+ DEBUGFUNC("e1000_null_set_page");
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_read_reg - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_read_reg(struct e1000_hw E1000_UNUSEDARG *hw,
+ u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG *data)
+{
+ DEBUGFUNC("e1000_null_read_reg");
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_phy_generic - No-op function, return void
+ * @hw: pointer to the HW structure
+ **/
+void e1000_null_phy_generic(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_null_phy_generic");
+ return;
+}
+
+/**
+ * e1000_null_lplu_state - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_lplu_state(struct e1000_hw E1000_UNUSEDARG *hw,
+ bool E1000_UNUSEDARG active)
+{
+ DEBUGFUNC("e1000_null_lplu_state");
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_write_reg - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_write_reg(struct e1000_hw E1000_UNUSEDARG *hw,
+ u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG data)
+{
+ DEBUGFUNC("e1000_null_write_reg");
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_i2c_byte_null - No-op function, return 0
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @dev_addr: device address
+ * @data: data value read
+ *
+ **/
+s32 e1000_read_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw,
+ u8 E1000_UNUSEDARG byte_offset,
+ u8 E1000_UNUSEDARG dev_addr,
+ u8 E1000_UNUSEDARG *data)
+{
+ DEBUGFUNC("e1000_read_i2c_byte_null");
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_i2c_byte_null - No-op function, return 0
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @dev_addr: device address
+ * @data: data value to write
+ *
+ **/
+s32 e1000_write_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw,
+ u8 E1000_UNUSEDARG byte_offset,
+ u8 E1000_UNUSEDARG dev_addr,
+ u8 E1000_UNUSEDARG data)
+{
+ DEBUGFUNC("e1000_write_i2c_byte_null");
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_check_reset_block_generic - Check if PHY reset is blocked
+ * @hw: pointer to the HW structure
+ *
+ * Read the PHY management control register and check whether a PHY reset
+ * is blocked. If a reset is not blocked return E1000_SUCCESS, otherwise
+ * return E1000_BLK_PHY_RESET (12).
+ **/
+s32 e1000_check_reset_block_generic(struct e1000_hw *hw)
+{
+ u32 manc;
+
+ DEBUGFUNC("e1000_check_reset_block");
+
+ manc = E1000_READ_REG(hw, E1000_MANC);
+
+ return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
+ E1000_BLK_PHY_RESET : E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_phy_id - Retrieve the PHY ID and revision
+ * @hw: pointer to the HW structure
+ *
+ * Reads the PHY registers and stores the PHY ID and possibly the PHY
+ * revision in the hardware structure.
+ **/
+s32 e1000_get_phy_id(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+ u16 phy_id;
+
+ DEBUGFUNC("e1000_get_phy_id");
+
+ if (!phy->ops.read_reg)
+ return E1000_SUCCESS;
+
+ ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
+ if (ret_val)
+ return ret_val;
+
+ phy->id = (u32)(phy_id << 16);
+ usec_delay(20);
+ ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
+ if (ret_val)
+ return ret_val;
+
+ phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
+ phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
+
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_reset_dsp_generic - Reset PHY DSP
+ * @hw: pointer to the HW structure
+ *
+ * Reset the digital signal processor.
+ **/
+s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_phy_reset_dsp_generic");
+
+ if (!hw->phy.ops.write_reg)
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
+ if (ret_val)
+ return ret_val;
+
+ return hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0);
+}
+
+/**
+ * e1000_read_phy_reg_mdic - Read MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the MDI control register in the PHY at offset and stores the
+ * information read to data.
+ **/
+s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, mdic = 0;
+
+ DEBUGFUNC("e1000_read_phy_reg_mdic");
+
+ if (offset > MAX_PHY_REG_ADDRESS) {
+ DEBUGOUT1("PHY Address %d is out of range\n", offset);
+ return -E1000_ERR_PARAM;
+ }
+
+ /* Set up Op-code, Phy Address, and register offset in the MDI
+ * Control register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ mdic = ((offset << E1000_MDIC_REG_SHIFT) |
+ (phy->addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_READ));
+
+ E1000_WRITE_REG(hw, E1000_MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read completed
+ * Increasing the time out as testing showed failures with
+ * the lower time out
+ */
+ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+ usec_delay_irq(50);
+ mdic = E1000_READ_REG(hw, E1000_MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ DEBUGOUT("MDI Read did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+ DEBUGOUT("MDI Error\n");
+ return -E1000_ERR_PHY;
+ }
+ if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
+ DEBUGOUT2("MDI Read offset error - requested %d, returned %d\n",
+ offset,
+ (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
+ return -E1000_ERR_PHY;
+ }
+ *data = (u16) mdic;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_phy_reg_mdic - Write MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write to register at offset
+ *
+ * Writes data to MDI control register in the PHY at offset.
+ **/
+s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, mdic = 0;
+
+ DEBUGFUNC("e1000_write_phy_reg_mdic");
+
+ if (offset > MAX_PHY_REG_ADDRESS) {
+ DEBUGOUT1("PHY Address %d is out of range\n", offset);
+ return -E1000_ERR_PARAM;
+ }
+
+ /* Set up Op-code, Phy Address, and register offset in the MDI
+ * Control register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ mdic = (((u32)data) |
+ (offset << E1000_MDIC_REG_SHIFT) |
+ (phy->addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_WRITE));
+
+ E1000_WRITE_REG(hw, E1000_MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read completed
+ * Increasing the time out as testing showed failures with
+ * the lower time out
+ */
+ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+ usec_delay_irq(50);
+ mdic = E1000_READ_REG(hw, E1000_MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ DEBUGOUT("MDI Write did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+ DEBUGOUT("MDI Error\n");
+ return -E1000_ERR_PHY;
+ }
+ if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
+ DEBUGOUT2("MDI Write offset error - requested %d, returned %d\n",
+ offset,
+ (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
+ return -E1000_ERR_PHY;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_phy_reg_i2c - Read PHY register using i2c
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset using the i2c interface and stores the
+ * retrieved information in data.
+ **/
+s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, i2ccmd = 0;
+
+ DEBUGFUNC("e1000_read_phy_reg_i2c");
+
+ /* Set up Op-code, Phy Address, and register address in the I2CCMD
+ * register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+ (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
+ (E1000_I2CCMD_OPCODE_READ));
+
+ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+
+ /* Poll the ready bit to see if the I2C read completed */
+ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+ usec_delay(50);
+ i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
+ if (i2ccmd & E1000_I2CCMD_READY)
+ break;
+ }
+ if (!(i2ccmd & E1000_I2CCMD_READY)) {
+ DEBUGOUT("I2CCMD Read did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (i2ccmd & E1000_I2CCMD_ERROR) {
+ DEBUGOUT("I2CCMD Error bit set\n");
+ return -E1000_ERR_PHY;
+ }
+
+ /* Need to byte-swap the 16-bit value. */
+ *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_phy_reg_i2c - Write PHY register using i2c
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes the data to PHY register at the offset using the i2c interface.
+ **/
+s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, i2ccmd = 0;
+ u16 phy_data_swapped;
+
+ DEBUGFUNC("e1000_write_phy_reg_i2c");
+
+ /* Prevent overwritting SFP I2C EEPROM which is at A0 address.*/
+ if ((hw->phy.addr == 0) || (hw->phy.addr > 7)) {
+ DEBUGOUT1("PHY I2C Address %d is out of range.\n",
+ hw->phy.addr);
+ return -E1000_ERR_CONFIG;
+ }
+
+ /* Swap the data bytes for the I2C interface */
+ phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
+
+ /* Set up Op-code, Phy Address, and register address in the I2CCMD
+ * register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+ (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
+ E1000_I2CCMD_OPCODE_WRITE |
+ phy_data_swapped);
+
+ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+
+ /* Poll the ready bit to see if the I2C read completed */
+ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+ usec_delay(50);
+ i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
+ if (i2ccmd & E1000_I2CCMD_READY)
+ break;
+ }
+ if (!(i2ccmd & E1000_I2CCMD_READY)) {
+ DEBUGOUT("I2CCMD Write did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (i2ccmd & E1000_I2CCMD_ERROR) {
+ DEBUGOUT("I2CCMD Error bit set\n");
+ return -E1000_ERR_PHY;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_sfp_data_byte - Reads SFP module data.
+ * @hw: pointer to the HW structure
+ * @offset: byte location offset to be read
+ * @data: read data buffer pointer
+ *
+ * Reads one byte from SFP module data stored
+ * in SFP resided EEPROM memory or SFP diagnostic area.
+ * Function should be called with
+ * E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access
+ * E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters
+ * access
+ **/
+s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data)
+{
+ u32 i = 0;
+ u32 i2ccmd = 0;
+ u32 data_local = 0;
+
+ DEBUGFUNC("e1000_read_sfp_data_byte");
+
+ if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) {
+ DEBUGOUT("I2CCMD command address exceeds upper limit\n");
+ return -E1000_ERR_PHY;
+ }
+
+ /* Set up Op-code, EEPROM Address,in the I2CCMD
+ * register. The MAC will take care of interfacing with the
+ * EEPROM to retrieve the desired data.
+ */
+ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+ E1000_I2CCMD_OPCODE_READ);
+
+ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+
+ /* Poll the ready bit to see if the I2C read completed */
+ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+ usec_delay(50);
+ data_local = E1000_READ_REG(hw, E1000_I2CCMD);
+ if (data_local & E1000_I2CCMD_READY)
+ break;
+ }
+ if (!(data_local & E1000_I2CCMD_READY)) {
+ DEBUGOUT("I2CCMD Read did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (data_local & E1000_I2CCMD_ERROR) {
+ DEBUGOUT("I2CCMD Error bit set\n");
+ return -E1000_ERR_PHY;
+ }
+ *data = (u8) data_local & 0xFF;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_sfp_data_byte - Writes SFP module data.
+ * @hw: pointer to the HW structure
+ * @offset: byte location offset to write to
+ * @data: data to write
+ *
+ * Writes one byte to SFP module data stored
+ * in SFP resided EEPROM memory or SFP diagnostic area.
+ * Function should be called with
+ * E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access
+ * E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters
+ * access
+ **/
+s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data)
+{
+ u32 i = 0;
+ u32 i2ccmd = 0;
+ u32 data_local = 0;
+
+ DEBUGFUNC("e1000_write_sfp_data_byte");
+
+ if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) {
+ DEBUGOUT("I2CCMD command address exceeds upper limit\n");
+ return -E1000_ERR_PHY;
+ }
+ /* The programming interface is 16 bits wide
+ * so we need to read the whole word first
+ * then update appropriate byte lane and write
+ * the updated word back.
+ */
+ /* Set up Op-code, EEPROM Address,in the I2CCMD
+ * register. The MAC will take care of interfacing
+ * with an EEPROM to write the data given.
+ */
+ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+ E1000_I2CCMD_OPCODE_READ);
+ /* Set a command to read single word */
+ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+ usec_delay(50);
+ /* Poll the ready bit to see if lastly
+ * launched I2C operation completed
+ */
+ i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
+ if (i2ccmd & E1000_I2CCMD_READY) {
+ /* Check if this is READ or WRITE phase */
+ if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) ==
+ E1000_I2CCMD_OPCODE_READ) {
+ /* Write the selected byte
+ * lane and update whole word
+ */
+ data_local = i2ccmd & 0xFF00;
+ data_local |= data;
+ i2ccmd = ((offset <<
+ E1000_I2CCMD_REG_ADDR_SHIFT) |
+ E1000_I2CCMD_OPCODE_WRITE | data_local);
+ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+ } else {
+ break;
+ }
+ }
+ }
+ if (!(i2ccmd & E1000_I2CCMD_READY)) {
+ DEBUGOUT("I2CCMD Write did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (i2ccmd & E1000_I2CCMD_ERROR) {
+ DEBUGOUT("I2CCMD Error bit set\n");
+ return -E1000_ERR_PHY;
+ }
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_phy_reg_m88 - Read m88 PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore, if necessary, then reads the PHY register at offset
+ * and storing the retrieved information in data. Release any acquired
+ * semaphores before exiting.
+ **/
+s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_read_phy_reg_m88");
+
+ if (!hw->phy.ops.acquire)
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_phy_reg_m88 - Write m88 PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_write_phy_reg_m88");
+
+ if (!hw->phy.ops.acquire)
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_set_page_igp - Set page as on IGP-like PHY(s)
+ * @hw: pointer to the HW structure
+ * @page: page to set (shifted left when necessary)
+ *
+ * Sets PHY page required for PHY register access. Assumes semaphore is
+ * already acquired. Note, this function sets phy.addr to 1 so the caller
+ * must set it appropriately (if necessary) after this function returns.
+ **/
+s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page)
+{
+ DEBUGFUNC("e1000_set_page_igp");
+
+ DEBUGOUT1("Setting page 0x%x\n", page);
+
+ hw->phy.addr = 1;
+
+ return e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page);
+}
+
+/**
+ * __e1000_read_phy_reg_igp - Read igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary, then reads the PHY register at offset
+ * and stores the retrieved information in data. Release any acquired
+ * semaphores before exiting.
+ **/
+static s32 __e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
+ bool locked)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("__e1000_read_phy_reg_igp");
+
+ if (!locked) {
+ if (!hw->phy.ops.acquire)
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG)
+ ret_val = e1000_write_phy_reg_mdic(hw,
+ IGP01E1000_PHY_PAGE_SELECT,
+ (u16)offset);
+ if (!ret_val)
+ ret_val = e1000_read_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
+ if (!locked)
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_phy_reg_igp - Read igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore then reads the PHY register at offset and stores the
+ * retrieved information in data.
+ * Release the acquired semaphore before exiting.
+ **/
+s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_phy_reg_igp(hw, offset, data, false);
+}
+
+/**
+ * e1000_read_phy_reg_igp_locked - Read igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset and stores the retrieved information
+ * in data. Assumes semaphore already acquired.
+ **/
+s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_phy_reg_igp(hw, offset, data, true);
+}
+
+/**
+ * e1000_write_phy_reg_igp - Write igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+static s32 __e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
+ bool locked)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_write_phy_reg_igp");
+
+ if (!locked) {
+ if (!hw->phy.ops.acquire)
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG)
+ ret_val = e1000_write_phy_reg_mdic(hw,
+ IGP01E1000_PHY_PAGE_SELECT,
+ (u16)offset);
+ if (!ret_val)
+ ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS &
+ offset,
+ data);
+ if (!locked)
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_phy_reg_igp - Write igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_phy_reg_igp(hw, offset, data, false);
+}
+
+/**
+ * e1000_write_phy_reg_igp_locked - Write igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes the data to PHY register at the offset.
+ * Assumes semaphore already acquired.
+ **/
+s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_phy_reg_igp(hw, offset, data, true);
+}
+
+/**
+ * __e1000_read_kmrn_reg - Read kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary. Then reads the PHY register at offset
+ * using the kumeran interface. The information retrieved is stored in data.
+ * Release any acquired semaphores before exiting.
+ **/
+static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
+ bool locked)
+{
+ u32 kmrnctrlsta;
+
+ DEBUGFUNC("__e1000_read_kmrn_reg");
+
+ if (!locked) {
+ s32 ret_val = E1000_SUCCESS;
+
+ if (!hw->phy.ops.acquire)
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+ E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
+ E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
+ E1000_WRITE_FLUSH(hw);
+
+ usec_delay(2);
+
+ kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA);
+ *data = (u16)kmrnctrlsta;
+
+ if (!locked)
+ hw->phy.ops.release(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_kmrn_reg_generic - Read kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore then reads the PHY register at offset using the
+ * kumeran interface. The information retrieved is stored in data.
+ * Release the acquired semaphore before exiting.
+ **/
+s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_kmrn_reg(hw, offset, data, false);
+}
+
+/**
+ * e1000_read_kmrn_reg_locked - Read kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset using the kumeran interface. The
+ * information retrieved is stored in data.
+ * Assumes semaphore already acquired.
+ **/
+s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_kmrn_reg(hw, offset, data, true);
+}
+
+/**
+ * __e1000_write_kmrn_reg - Write kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary. Then write the data to PHY register
+ * at the offset using the kumeran interface. Release any acquired semaphores
+ * before exiting.
+ **/
+static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
+ bool locked)
+{
+ u32 kmrnctrlsta;
+
+ DEBUGFUNC("e1000_write_kmrn_reg_generic");
+
+ if (!locked) {
+ s32 ret_val = E1000_SUCCESS;
+
+ if (!hw->phy.ops.acquire)
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+ E1000_KMRNCTRLSTA_OFFSET) | data;
+ E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
+ E1000_WRITE_FLUSH(hw);
+
+ usec_delay(2);
+
+ if (!locked)
+ hw->phy.ops.release(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_kmrn_reg_generic - Write kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore then writes the data to the PHY register at the offset
+ * using the kumeran interface. Release the acquired semaphore before exiting.
+ **/
+s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_kmrn_reg(hw, offset, data, false);
+}
+
+/**
+ * e1000_write_kmrn_reg_locked - Write kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Write the data to PHY register at the offset using the kumeran interface.
+ * Assumes semaphore already acquired.
+ **/
+s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_kmrn_reg(hw, offset, data, true);
+}
+
+/**
+ * e1000_set_master_slave_mode - Setup PHY for Master/slave mode
+ * @hw: pointer to the HW structure
+ *
+ * Sets up Master/slave mode
+ **/
+static s32 e1000_set_master_slave_mode(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ /* Resolve Master/Slave mode */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* load defaults for future use */
+ hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ?
+ ((phy_data & CR_1000T_MS_VALUE) ?
+ e1000_ms_force_master :
+ e1000_ms_force_slave) : e1000_ms_auto;
+
+ switch (hw->phy.ms_type) {
+ case e1000_ms_force_master:
+ phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+ break;
+ case e1000_ms_force_slave:
+ phy_data |= CR_1000T_MS_ENABLE;
+ phy_data &= ~(CR_1000T_MS_VALUE);
+ break;
+ case e1000_ms_auto:
+ phy_data &= ~CR_1000T_MS_ENABLE;
+ /* fall-through */
+ default:
+ break;
+ }
+
+ return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data);
+}
+
+/**
+ * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Sets up Carrier-sense on Transmit and downshift values.
+ **/
+s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_copper_link_setup_82577");
+
+ if (hw->phy.reset_disable)
+ return E1000_SUCCESS;
+
+ if (hw->phy.type == e1000_phy_82580) {
+ ret_val = hw->phy.ops.reset(hw);
+ if (ret_val) {
+ DEBUGOUT("Error resetting the PHY.\n");
+ return ret_val;
+ }
+ }
+
+ /* Enable CRS on Tx. This must be set for half-duplex operation. */
+ ret_val = hw->phy.ops.read_reg(hw, I82577_CFG_REG, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= I82577_CFG_ASSERT_CRS_ON_TX;
+
+ /* Enable downshift */
+ phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
+
+ ret_val = hw->phy.ops.write_reg(hw, I82577_CFG_REG, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Set MDI/MDIX mode */
+ ret_val = hw->phy.ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data);
+ if (ret_val)
+ return ret_val;
+ phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK;
+ /* Options:
+ * 0 - Auto (default)
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ */
+ switch (hw->phy.mdix) {
+ case 1:
+ break;
+ case 2:
+ phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX;
+ break;
+ case 0:
+ default:
+ phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX;
+ break;
+ }
+ ret_val = hw->phy.ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ return e1000_set_master_slave_mode(hw);
+}
+
+/**
+ * e1000_copper_link_setup_m88 - Setup m88 PHY's for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock
+ * and downshift values are set also.
+ **/
+s32 e1000_copper_link_setup_m88(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_copper_link_setup_m88");
+
+ if (phy->reset_disable)
+ return E1000_SUCCESS;
+
+ /* Enable CRS on Tx. This must be set for half-duplex operation. */
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+
+ /* Options:
+ * MDI/MDI-X = 0 (default)
+ * 0 - Auto for all speeds
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+ */
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+ switch (phy->mdix) {
+ case 1:
+ phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+ break;
+ case 2:
+ phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+ break;
+ case 3:
+ phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+ break;
+ case 0:
+ default:
+ phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+ break;
+ }
+
+ /* Options:
+ * disable_polarity_correction = 0 (default)
+ * Automatic Correction for Reversed Cable Polarity
+ * 0 - Disabled
+ * 1 - Enabled
+ */
+ phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+ if (phy->disable_polarity_correction)
+ phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (phy->revision < E1000_REVISION_4) {
+ /* Force TX_CLK in the Extended PHY Specific Control Register
+ * to 25MHz clock.
+ */
+ ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_EPSCR_TX_CLK_25;
+
+ if ((phy->revision == E1000_REVISION_2) &&
+ (phy->id == M88E1111_I_PHY_ID)) {
+ /* 82573L PHY - set the downshift counter to 5x. */
+ phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
+ phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
+ } else {
+ /* Configure Master and Slave downshift values */
+ phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
+ M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
+ phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
+ M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
+ }
+ ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Commit the changes. */
+ ret_val = phy->ops.commit(hw);
+ if (ret_val) {
+ DEBUGOUT("Error committing the PHY changes\n");
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's.
+ * Also enables and sets the downshift parameters.
+ **/
+s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_copper_link_setup_m88_gen2");
+
+ if (phy->reset_disable)
+ return E1000_SUCCESS;
+
+ /* Enable CRS on Tx. This must be set for half-duplex operation. */
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Options:
+ * MDI/MDI-X = 0 (default)
+ * 0 - Auto for all speeds
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+ */
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+ switch (phy->mdix) {
+ case 1:
+ phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+ break;
+ case 2:
+ phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+ break;
+ case 3:
+ /* M88E1112 does not support this mode) */
+ if (phy->id != M88E1112_E_PHY_ID) {
+ phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+ break;
+ }
+ case 0:
+ default:
+ phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+ break;
+ }
+
+ /* Options:
+ * disable_polarity_correction = 0 (default)
+ * Automatic Correction for Reversed Cable Polarity
+ * 0 - Disabled
+ * 1 - Enabled
+ */
+ phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+ if (phy->disable_polarity_correction)
+ phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+
+ /* Enable downshift and setting it to X6 */
+ if (phy->id == M88E1543_E_PHY_ID) {
+ phy_data &= ~I347AT4_PSCR_DOWNSHIFT_ENABLE;
+ ret_val =
+ phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.commit(hw);
+ if (ret_val) {
+ DEBUGOUT("Error committing the PHY changes\n");
+ return ret_val;
+ }
+ }
+
+ phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK;
+ phy_data |= I347AT4_PSCR_DOWNSHIFT_6X;
+ phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE;
+
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Commit the changes. */
+ ret_val = phy->ops.commit(hw);
+ if (ret_val) {
+ DEBUGOUT("Error committing the PHY changes\n");
+ return ret_val;
+ }
+
+ ret_val = e1000_set_master_slave_mode(hw);
+ if (ret_val)
+ return ret_val;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_copper_link_setup_igp - Setup igp PHY's for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for
+ * igp PHY's.
+ **/
+s32 e1000_copper_link_setup_igp(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("e1000_copper_link_setup_igp");
+
+ if (phy->reset_disable)
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.reset(hw);
+ if (ret_val) {
+ DEBUGOUT("Error resetting the PHY.\n");
+ return ret_val;
+ }
+
+ /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid
+ * timeout issues when LFS is enabled.
+ */
+ msec_delay(100);
+
+ /* disable lplu d0 during driver init */
+ if (hw->phy.ops.set_d0_lplu_state) {
+ ret_val = hw->phy.ops.set_d0_lplu_state(hw, false);
+ if (ret_val) {
+ DEBUGOUT("Error Disabling LPLU D0\n");
+ return ret_val;
+ }
+ }
+ /* Configure mdi-mdix settings */
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+
+ switch (phy->mdix) {
+ case 1:
+ data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+ break;
+ case 2:
+ data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
+ break;
+ case 0:
+ default:
+ data |= IGP01E1000_PSCR_AUTO_MDIX;
+ break;
+ }
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data);
+ if (ret_val)
+ return ret_val;
+
+ /* set auto-master slave resolution settings */
+ if (hw->mac.autoneg) {
+ /* when autonegotiation advertisement is only 1000Mbps then we
+ * should disable SmartSpeed and enable Auto MasterSlave
+ * resolution as hardware default.
+ */
+ if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
+ /* Disable SmartSpeed */
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+
+ /* Set auto Master/Slave resolution process */
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~CR_1000T_MS_ENABLE;
+ ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ ret_val = e1000_set_master_slave_mode(hw);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation
+ * @hw: pointer to the HW structure
+ *
+ * Reads the MII auto-neg advertisement register and/or the 1000T control
+ * register and if the PHY is already setup for auto-negotiation, then
+ * return successful. Otherwise, setup advertisement and flow control to
+ * the appropriate values for the wanted auto-negotiation.
+ **/
+static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 mii_autoneg_adv_reg;
+ u16 mii_1000t_ctrl_reg = 0;
+
+ DEBUGFUNC("e1000_phy_setup_autoneg");
+
+ phy->autoneg_advertised &= phy->autoneg_mask;
+
+ /* Read the MII Auto-Neg Advertisement Register (Address 4). */
+ ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
+ /* Read the MII 1000Base-T Control Register (Address 9). */
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL,
+ &mii_1000t_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Need to parse both autoneg_advertised and fc and set up
+ * the appropriate PHY registers. First we will parse for
+ * autoneg_advertised software override. Since we can advertise
+ * a plethora of combinations, we need to check each bit
+ * individually.
+ */
+
+ /* First we clear all the 10/100 mb speed bits in the Auto-Neg
+ * Advertisement Register (Address 4) and the 1000 mb speed bits in
+ * the 1000Base-T Control Register (Address 9).
+ */
+ mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
+ NWAY_AR_100TX_HD_CAPS |
+ NWAY_AR_10T_FD_CAPS |
+ NWAY_AR_10T_HD_CAPS);
+ mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
+
+ DEBUGOUT1("autoneg_advertised %x\n", phy->autoneg_advertised);
+
+ /* Do we want to advertise 10 Mb Half Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
+ DEBUGOUT("Advertise 10mb Half duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+ }
+
+ /* Do we want to advertise 10 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
+ DEBUGOUT("Advertise 10mb Full duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+ }
+
+ /* Do we want to advertise 100 Mb Half Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
+ DEBUGOUT("Advertise 100mb Half duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+ }
+
+ /* Do we want to advertise 100 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
+ DEBUGOUT("Advertise 100mb Full duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+ }
+
+ /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
+ if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
+ DEBUGOUT("Advertise 1000mb Half duplex request denied!\n");
+
+ /* Do we want to advertise 1000 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
+ DEBUGOUT("Advertise 1000mb Full duplex\n");
+ mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+ }
+
+ /* Check for a software override of the flow control settings, and
+ * setup the PHY advertisement registers accordingly. If
+ * auto-negotiation is enabled, then software will have to set the
+ * "PAUSE" bits to the correct value in the Auto-Negotiation
+ * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
+ * negotiation.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * but we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: No software override. The flow control configuration
+ * in the EEPROM is used.
+ */
+ switch (hw->fc.current_mode) {
+ case e1000_fc_none:
+ /* Flow control (Rx & Tx) is completely disabled by a
+ * software over-ride.
+ */
+ mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ case e1000_fc_rx_pause:
+ /* Rx Flow control is enabled, and Tx Flow control is
+ * disabled, by a software over-ride.
+ *
+ * Since there really isn't a way to advertise that we are
+ * capable of Rx Pause ONLY, we will advertise that we
+ * support both symmetric and asymmetric Rx PAUSE. Later
+ * (in e1000_config_fc_after_link_up) we will disable the
+ * hw's ability to send PAUSE frames.
+ */
+ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ case e1000_fc_tx_pause:
+ /* Tx Flow control is enabled, and Rx Flow control is
+ * disabled, by a software over-ride.
+ */
+ mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
+ mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+ break;
+ case e1000_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by a software
+ * over-ride.
+ */
+ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+ if (ret_val)
+ return ret_val;
+
+ DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+
+ if (phy->autoneg_mask & ADVERTISE_1000_FULL)
+ ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL,
+ mii_1000t_ctrl_reg);
+
+ return ret_val;
+}
+
+/**
+ * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Performs initial bounds checking on autoneg advertisement parameter, then
+ * configure to advertise the full capability. Setup the PHY to autoneg
+ * and restart the negotiation process between the link partner. If
+ * autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
+ **/
+static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_ctrl;
+
+ DEBUGFUNC("e1000_copper_link_autoneg");
+
+ /* Perform some bounds checking on the autoneg advertisement
+ * parameter.
+ */
+ phy->autoneg_advertised &= phy->autoneg_mask;
+
+ /* If autoneg_advertised is zero, we assume it was not defaulted
+ * by the calling code so we set to advertise full capability.
+ */
+ if (!phy->autoneg_advertised)
+ phy->autoneg_advertised = phy->autoneg_mask;
+
+ DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
+ ret_val = e1000_phy_setup_autoneg(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Setting up Auto-Negotiation\n");
+ return ret_val;
+ }
+ DEBUGOUT("Restarting Auto-Neg\n");
+
+ /* Restart auto-negotiation by setting the Auto Neg Enable bit and
+ * the Auto Neg Restart bit in the PHY control register.
+ */
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
+ if (ret_val)
+ return ret_val;
+
+ phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
+ if (ret_val)
+ return ret_val;
+
+ /* Does the user want to wait for Auto-Neg to complete here, or
+ * check at a later time (for example, callback routine).
+ */
+ if (phy->autoneg_wait_to_complete) {
+ ret_val = e1000_wait_autoneg(hw);
+ if (ret_val) {
+ DEBUGOUT("Error while waiting for autoneg to complete\n");
+ return ret_val;
+ }
+ }
+
+ hw->mac.get_link_status = true;
+
+ return ret_val;
+}
+
+/**
+ * e1000_setup_copper_link_generic - Configure copper link settings
+ * @hw: pointer to the HW structure
+ *
+ * Calls the appropriate function to configure the link for auto-neg or forced
+ * speed and duplex. Then we check for link, once link is established calls
+ * to configure collision distance and flow control are called. If link is
+ * not established, we return -E1000_ERR_PHY (-2).
+ **/
+s32 e1000_setup_copper_link_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ bool link;
+
+ DEBUGFUNC("e1000_setup_copper_link_generic");
+
+ if (hw->mac.autoneg) {
+ /* Setup autoneg and flow control advertisement and perform
+ * autonegotiation.
+ */
+ ret_val = e1000_copper_link_autoneg(hw);
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* PHY will be set to 10H, 10F, 100H or 100F
+ * depending on user settings.
+ */
+ DEBUGOUT("Forcing Speed and Duplex\n");
+ ret_val = hw->phy.ops.force_speed_duplex(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Forcing Speed and Duplex\n");
+ return ret_val;
+ }
+ }
+
+ /* Check link status. Wait up to 100 microseconds for link to become
+ * valid.
+ */
+ ret_val = e1000_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10,
+ &link);
+ if (ret_val)
+ return ret_val;
+
+ if (link) {
+ DEBUGOUT("Valid link established!!!\n");
+ hw->mac.ops.config_collision_dist(hw);
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ } else {
+ DEBUGOUT("Unable to establish link!!!\n");
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
+ * @hw: pointer to the HW structure
+ *
+ * Calls the PHY setup function to force speed and duplex. Clears the
+ * auto-crossover to force MDI manually. Waits for link and returns
+ * successful if link up is successful, else -E1000_ERR_PHY (-2).
+ **/
+s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+ DEBUGFUNC("e1000_phy_force_speed_duplex_igp");
+
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Clear Auto-Crossover to force MDI manually. IGP requires MDI
+ * forced whenever speed and duplex are forced.
+ */
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+ phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ DEBUGOUT1("IGP PSCR: %X\n", phy_data);
+
+ usec_delay(1);
+
+ if (phy->autoneg_wait_to_complete) {
+ DEBUGOUT("Waiting for forced speed/duplex link on IGP phy.\n");
+
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link)
+ DEBUGOUT("Link taking longer than expected.\n");
+
+ /* Try once more */
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Calls the PHY setup function to force speed and duplex. Clears the
+ * auto-crossover to force MDI manually. Resets the PHY to commit the
+ * changes. If time expires while waiting for link up, we reset the DSP.
+ * After reset, TX_CLK and CRS on Tx must be set. Return successful upon
+ * successful completion, else return corresponding error code.
+ **/
+s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+ DEBUGFUNC("e1000_phy_force_speed_duplex_m88");
+
+ /* I210 and I211 devices support Auto-Crossover in forced operation. */
+ if (phy->type != e1000_phy_i210) {
+ /* Clear Auto-Crossover to force MDI manually. M88E1000
+ * requires MDI forced whenever speed and duplex are forced.
+ */
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data);
+
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Reset the phy to commit changes. */
+ ret_val = hw->phy.ops.commit(hw);
+ if (ret_val)
+ return ret_val;
+
+ if (phy->autoneg_wait_to_complete) {
+ DEBUGOUT("Waiting for forced speed/duplex link on M88 phy.\n");
+
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link) {
+ bool reset_dsp = true;
+
+ switch (hw->phy.id) {
+ case I347AT4_E_PHY_ID:
+ case M88E1340M_E_PHY_ID:
+ case M88E1112_E_PHY_ID:
+ case M88E1543_E_PHY_ID:
+ case I210_I_PHY_ID:
+ reset_dsp = false;
+ break;
+ default:
+ if (hw->phy.type != e1000_phy_m88)
+ reset_dsp = false;
+ break;
+ }
+
+ if (!reset_dsp) {
+ DEBUGOUT("Link taking longer than expected.\n");
+ } else {
+ /* We didn't get link.
+ * Reset the DSP and cross our fingers.
+ */
+ ret_val = phy->ops.write_reg(hw,
+ M88E1000_PHY_PAGE_SELECT,
+ 0x001d);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_phy_reset_dsp_generic(hw);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ /* Try once more */
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (hw->phy.type != e1000_phy_m88)
+ return E1000_SUCCESS;
+
+ if (hw->phy.id == I347AT4_E_PHY_ID ||
+ hw->phy.id == M88E1340M_E_PHY_ID ||
+ hw->phy.id == M88E1112_E_PHY_ID)
+ return E1000_SUCCESS;
+ if (hw->phy.id == I210_I_PHY_ID)
+ return E1000_SUCCESS;
+ if ((hw->phy.id == M88E1543_E_PHY_ID))
+ return E1000_SUCCESS;
+ ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Resetting the phy means we need to re-force TX_CLK in the
+ * Extended PHY Specific Control Register to 25MHz clock from
+ * the reset value of 2.5MHz.
+ */
+ phy_data |= M88E1000_EPSCR_TX_CLK_25;
+ ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* In addition, we must re-enable CRS on Tx for both half and full
+ * duplex.
+ */
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex
+ * @hw: pointer to the HW structure
+ *
+ * Forces the speed and duplex settings of the PHY.
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ bool link;
+
+ DEBUGFUNC("e1000_phy_force_speed_duplex_ife");
+
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &data);
+ if (ret_val)
+ return ret_val;
+
+ e1000_phy_force_speed_duplex_setup(hw, &data);
+
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, data);
+ if (ret_val)
+ return ret_val;
+
+ /* Disable MDI-X support for 10/100 */
+ ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IFE_PMC_AUTO_MDIX;
+ data &= ~IFE_PMC_FORCE_MDIX;
+
+ ret_val = phy->ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, data);
+ if (ret_val)
+ return ret_val;
+
+ DEBUGOUT1("IFE PMC: %X\n", data);
+
+ usec_delay(1);
+
+ if (phy->autoneg_wait_to_complete) {
+ DEBUGOUT("Waiting for forced speed/duplex link on IFE phy.\n");
+
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link)
+ DEBUGOUT("Link taking longer than expected.\n");
+
+ /* Try once more */
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
+ * @hw: pointer to the HW structure
+ * @phy_ctrl: pointer to current value of PHY_CONTROL
+ *
+ * Forces speed and duplex on the PHY by doing the following: disable flow
+ * control, force speed/duplex on the MAC, disable auto speed detection,
+ * disable auto-negotiation, configure duplex, configure speed, configure
+ * the collision distance, write configuration to CTRL register. The
+ * caller must write to the PHY_CONTROL register for these settings to
+ * take affect.
+ **/
+void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 ctrl;
+
+ DEBUGFUNC("e1000_phy_force_speed_duplex_setup");
+
+ /* Turn off flow control when forcing speed/duplex */
+ hw->fc.current_mode = e1000_fc_none;
+
+ /* Force speed/duplex on the mac */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ctrl &= ~E1000_CTRL_SPD_SEL;
+
+ /* Disable Auto Speed Detection */
+ ctrl &= ~E1000_CTRL_ASDE;
+
+ /* Disable autoneg on the phy */
+ *phy_ctrl &= ~MII_CR_AUTO_NEG_EN;
+
+ /* Forcing Full or Half Duplex? */
+ if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
+ ctrl &= ~E1000_CTRL_FD;
+ *phy_ctrl &= ~MII_CR_FULL_DUPLEX;
+ DEBUGOUT("Half Duplex\n");
+ } else {
+ ctrl |= E1000_CTRL_FD;
+ *phy_ctrl |= MII_CR_FULL_DUPLEX;
+ DEBUGOUT("Full Duplex\n");
+ }
+
+ /* Forcing 10mb or 100mb? */
+ if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
+ ctrl |= E1000_CTRL_SPD_100;
+ *phy_ctrl |= MII_CR_SPEED_100;
+ *phy_ctrl &= ~MII_CR_SPEED_1000;
+ DEBUGOUT("Forcing 100mb\n");
+ } else {
+ ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+ *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
+ DEBUGOUT("Forcing 10mb\n");
+ }
+
+ hw->mac.ops.config_collision_dist(hw);
+
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+}
+
+/**
+ * e1000_set_d3_lplu_state_generic - Sets low power link up state for D3
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * The low power link up (lplu) state is set to the power management level D3
+ * and SmartSpeed is disabled when active is true, else clear lplu for D3
+ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
+ * is used during Dx states where the power conservation is most important.
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained.
+ **/
+s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("e1000_set_d3_lplu_state_generic");
+
+ if (!hw->phy.ops.read_reg)
+ return E1000_SUCCESS;
+
+ ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (!active) {
+ data &= ~IGP02E1000_PM_D3_LPLU;
+ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ data);
+ if (ret_val)
+ return ret_val;
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ } else if (phy->smart_speed == e1000_smart_speed_off) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ }
+ } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+ (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+ (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+ data |= IGP02E1000_PM_D3_LPLU;
+ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ data);
+ if (ret_val)
+ return ret_val;
+
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_downshift_generic - Checks whether a downshift in speed occurred
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * A downshift is detected by querying the PHY link health.
+ **/
+s32 e1000_check_downshift_generic(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, offset, mask;
+
+ DEBUGFUNC("e1000_check_downshift_generic");
+
+ switch (phy->type) {
+ case e1000_phy_i210:
+ case e1000_phy_m88:
+ case e1000_phy_gg82563:
+ offset = M88E1000_PHY_SPEC_STATUS;
+ mask = M88E1000_PSSR_DOWNSHIFT;
+ break;
+ case e1000_phy_igp_2:
+ case e1000_phy_igp_3:
+ offset = IGP01E1000_PHY_LINK_HEALTH;
+ mask = IGP01E1000_PLHR_SS_DOWNGRADE;
+ break;
+ default:
+ /* speed downshift not supported */
+ phy->speed_downgraded = false;
+ return E1000_SUCCESS;
+ }
+
+ ret_val = phy->ops.read_reg(hw, offset, &phy_data);
+
+ if (!ret_val)
+ phy->speed_downgraded = !!(phy_data & mask);
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_polarity_m88 - Checks the polarity.
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ * Polarity is determined based on the PHY specific status register.
+ **/
+s32 e1000_check_polarity_m88(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("e1000_check_polarity_m88");
+
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data);
+
+ if (!ret_val)
+ phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_polarity_igp - Checks the polarity.
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ * Polarity is determined based on the PHY port status register, and the
+ * current speed (since there is no polarity at 100Mbps).
+ **/
+s32 e1000_check_polarity_igp(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data, offset, mask;
+
+ DEBUGFUNC("e1000_check_polarity_igp");
+
+ /* Polarity is determined based on the speed of
+ * our connection.
+ */
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+ IGP01E1000_PSSR_SPEED_1000MBPS) {
+ offset = IGP01E1000_PHY_PCS_INIT_REG;
+ mask = IGP01E1000_PHY_POLARITY_MASK;
+ } else {
+ /* This really only applies to 10Mbps since
+ * there is no polarity for 100Mbps (always 0).
+ */
+ offset = IGP01E1000_PHY_PORT_STATUS;
+ mask = IGP01E1000_PSSR_POLARITY_REVERSED;
+ }
+
+ ret_val = phy->ops.read_reg(hw, offset, &data);
+
+ if (!ret_val)
+ phy->cable_polarity = ((data & mask)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_polarity_ife - Check cable polarity for IFE PHY
+ * @hw: pointer to the HW structure
+ *
+ * Polarity is determined on the polarity reversal feature being enabled.
+ **/
+s32 e1000_check_polarity_ife(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, offset, mask;
+
+ DEBUGFUNC("e1000_check_polarity_ife");
+
+ /* Polarity is determined based on the reversal feature being enabled.
+ */
+ if (phy->polarity_correction) {
+ offset = IFE_PHY_EXTENDED_STATUS_CONTROL;
+ mask = IFE_PESC_POLARITY_REVERSED;
+ } else {
+ offset = IFE_PHY_SPECIAL_CONTROL;
+ mask = IFE_PSC_FORCE_POLARITY;
+ }
+
+ ret_val = phy->ops.read_reg(hw, offset, &phy_data);
+
+ if (!ret_val)
+ phy->cable_polarity = ((phy_data & mask)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
+
+ return ret_val;
+}
+
+/**
+ * e1000_wait_autoneg - Wait for auto-neg completion
+ * @hw: pointer to the HW structure
+ *
+ * Waits for auto-negotiation to complete or for the auto-negotiation time
+ * limit to expire, which ever happens first.
+ **/
+static s32 e1000_wait_autoneg(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 i, phy_status;
+
+ DEBUGFUNC("e1000_wait_autoneg");
+
+ if (!hw->phy.ops.read_reg)
+ return E1000_SUCCESS;
+
+ /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
+ for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val)
+ break;
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val)
+ break;
+ if (phy_status & MII_SR_AUTONEG_COMPLETE)
+ break;
+ msec_delay(100);
+ }
+
+ /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
+ * has completed.
+ */
+ return ret_val;
+}
+
+/**
+ * e1000_phy_has_link_generic - Polls PHY for link
+ * @hw: pointer to the HW structure
+ * @iterations: number of times to poll for link
+ * @usec_interval: delay between polling attempts
+ * @success: pointer to whether polling was successful or not
+ *
+ * Polls the PHY status register for link, 'iterations' number of times.
+ **/
+s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+ u32 usec_interval, bool *success)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 i, phy_status;
+
+ DEBUGFUNC("e1000_phy_has_link_generic");
+
+ if (!hw->phy.ops.read_reg)
+ return E1000_SUCCESS;
+
+ for (i = 0; i < iterations; i++) {
+ /* Some PHYs require the PHY_STATUS register to be read
+ * twice due to the link bit being sticky. No harm doing
+ * it across the board.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val)
+ /* If the first read fails, another entity may have
+ * ownership of the resources, wait and try again to
+ * see if they have relinquished the resources yet.
+ */
+ usec_delay(usec_interval);
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val)
+ break;
+ if (phy_status & MII_SR_LINK_STATUS)
+ break;
+ if (usec_interval >= 1000)
+ msec_delay_irq(usec_interval/1000);
+ else
+ usec_delay(usec_interval);
+ }
+
+ *success = (i < iterations);
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_cable_length_m88 - Determine cable length for m88 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Reads the PHY specific status register to retrieve the cable length
+ * information. The cable length is determined by averaging the minimum and
+ * maximum values to get the "average" cable length. The m88 PHY has four
+ * possible cable length values, which are:
+ * Register Value Cable Length
+ * 0 < 50 meters
+ * 1 50 - 80 meters
+ * 2 80 - 110 meters
+ * 3 110 - 140 meters
+ * 4 > 140 meters
+ **/
+s32 e1000_get_cable_length_m88(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, index;
+
+ DEBUGFUNC("e1000_get_cable_length_m88");
+
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+ M88E1000_PSSR_CABLE_LENGTH_SHIFT);
+
+ if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1)
+ return -E1000_ERR_PHY;
+
+ phy->min_cable_length = e1000_m88_cable_length_table[index];
+ phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
+
+ phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+ return E1000_SUCCESS;
+}
+
+s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, phy_data2, is_cm;
+ u16 index, default_page;
+
+ DEBUGFUNC("e1000_get_cable_length_m88_gen2");
+
+ switch (hw->phy.id) {
+ case I210_I_PHY_ID:
+ /* Get cable length from PHY Cable Diagnostics Control Reg */
+ ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
+ (I347AT4_PCDL + phy->addr),
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Check if the unit of cable length is meters or cm */
+ ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
+ I347AT4_PCDC, &phy_data2);
+ if (ret_val)
+ return ret_val;
+
+ is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT);
+
+ /* Populate the phy structure with cable length in meters */
+ phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
+ phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
+ phy->cable_length = phy_data / (is_cm ? 100 : 1);
+ break;
+ case M88E1543_E_PHY_ID:
+ case M88E1340M_E_PHY_ID:
+ case I347AT4_E_PHY_ID:
+ /* Remember the original page select and set it to 7 */
+ ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
+ &default_page);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07);
+ if (ret_val)
+ return ret_val;
+
+ /* Get cable length from PHY Cable Diagnostics Control Reg */
+ ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr),
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Check if the unit of cable length is meters or cm */
+ ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2);
+ if (ret_val)
+ return ret_val;
+
+ is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT);
+
+ /* Populate the phy structure with cable length in meters */
+ phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
+ phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
+ phy->cable_length = phy_data / (is_cm ? 100 : 1);
+
+ /* Reset the page select to its original value */
+ ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
+ default_page);
+ if (ret_val)
+ return ret_val;
+ break;
+
+ case M88E1112_E_PHY_ID:
+ /* Remember the original page select and set it to 5 */
+ ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
+ &default_page);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+ M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+
+ if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1)
+ return -E1000_ERR_PHY;
+
+ phy->min_cable_length = e1000_m88_cable_length_table[index];
+ phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
+
+ phy->cable_length = (phy->min_cable_length +
+ phy->max_cable_length) / 2;
+
+ /* Reset the page select to its original value */
+ ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
+ default_page);
+ if (ret_val)
+ return ret_val;
+
+ break;
+ default:
+ return -E1000_ERR_PHY;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_cable_length_igp_2 - Determine cable length for igp2 PHY
+ * @hw: pointer to the HW structure
+ *
+ * The automatic gain control (agc) normalizes the amplitude of the
+ * received signal, adjusting for the attenuation produced by the
+ * cable. By reading the AGC registers, which represent the
+ * combination of coarse and fine gain value, the value can be put
+ * into a lookup table to obtain the approximate cable length
+ * for each channel.
+ **/
+s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, i, agc_value = 0;
+ u16 cur_agc_index, max_agc_index = 0;
+ u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
+ static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
+ IGP02E1000_PHY_AGC_A,
+ IGP02E1000_PHY_AGC_B,
+ IGP02E1000_PHY_AGC_C,
+ IGP02E1000_PHY_AGC_D
+ };
+
+ DEBUGFUNC("e1000_get_cable_length_igp_2");
+
+ /* Read the AGC registers for all channels */
+ for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
+ ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Getting bits 15:9, which represent the combination of
+ * coarse and fine gain values. The result is a number
+ * that can be put into the lookup table to obtain the
+ * approximate cable length.
+ */
+ cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+ IGP02E1000_AGC_LENGTH_MASK);
+
+ /* Array index bound check. */
+ if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
+ (cur_agc_index == 0))
+ return -E1000_ERR_PHY;
+
+ /* Remove min & max AGC values from calculation. */
+ if (e1000_igp_2_cable_length_table[min_agc_index] >
+ e1000_igp_2_cable_length_table[cur_agc_index])
+ min_agc_index = cur_agc_index;
+ if (e1000_igp_2_cable_length_table[max_agc_index] <
+ e1000_igp_2_cable_length_table[cur_agc_index])
+ max_agc_index = cur_agc_index;
+
+ agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
+ }
+
+ agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
+ e1000_igp_2_cable_length_table[max_agc_index]);
+ agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
+
+ /* Calculate cable length with the error range of +/- 10 meters. */
+ phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
+ (agc_value - IGP02E1000_AGC_RANGE) : 0);
+ phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
+
+ phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_phy_info_m88 - Retrieve PHY information
+ * @hw: pointer to the HW structure
+ *
+ * Valid for only copper links. Read the PHY status register (sticky read)
+ * to verify that link is up. Read the PHY special control register to
+ * determine the polarity and 10base-T extended distance. Read the PHY
+ * special status register to determine MDI/MDIx and current speed. If
+ * speed is 1000, then determine cable length, local and remote receiver.
+ **/
+s32 e1000_get_phy_info_m88(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+ DEBUGFUNC("e1000_get_phy_info_m88");
+
+ if (phy->media_type != e1000_media_type_copper) {
+ DEBUGOUT("Phy info is only valid for copper media\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link) {
+ DEBUGOUT("Phy info is only valid if link is up\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy->polarity_correction = !!(phy_data &
+ M88E1000_PSCR_POLARITY_REVERSAL);
+
+ ret_val = e1000_check_polarity_m88(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX);
+
+ if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
+ ret_val = hw->phy.ops.get_cable_length(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+
+ phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+ } else {
+ /* Set values to "undefined" */
+ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+ phy->local_rx = e1000_1000t_rx_status_undefined;
+ phy->remote_rx = e1000_1000t_rx_status_undefined;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_phy_info_igp - Retrieve igp PHY information
+ * @hw: pointer to the HW structure
+ *
+ * Read PHY status to determine if link is up. If link is up, then
+ * set/determine 10base-T extended distance and polarity correction. Read
+ * PHY port status to determine MDI/MDIx and speed. Based on the speed,
+ * determine on the cable length, local and remote receiver.
+ **/
+s32 e1000_get_phy_info_igp(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ bool link;
+
+ DEBUGFUNC("e1000_get_phy_info_igp");
+
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link) {
+ DEBUGOUT("Phy info is only valid if link is up\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ phy->polarity_correction = true;
+
+ ret_val = e1000_check_polarity_igp(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX);
+
+ if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+ IGP01E1000_PSSR_SPEED_1000MBPS) {
+ ret_val = phy->ops.get_cable_length(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+
+ phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+ } else {
+ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+ phy->local_rx = e1000_1000t_rx_status_undefined;
+ phy->remote_rx = e1000_1000t_rx_status_undefined;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_phy_info_ife - Retrieves various IFE PHY states
+ * @hw: pointer to the HW structure
+ *
+ * Populates "phy" structure with various feature states.
+ **/
+s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ bool link;
+
+ DEBUGFUNC("e1000_get_phy_info_ife");
+
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link) {
+ DEBUGOUT("Phy info is only valid if link is up\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = phy->ops.read_reg(hw, IFE_PHY_SPECIAL_CONTROL, &data);
+ if (ret_val)
+ return ret_val;
+ phy->polarity_correction = !(data & IFE_PSC_AUTO_POLARITY_DISABLE);
+
+ if (phy->polarity_correction) {
+ ret_val = e1000_check_polarity_ife(hw);
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* Polarity is forced */
+ phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
+ }
+
+ ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
+ if (ret_val)
+ return ret_val;
+
+ phy->is_mdix = !!(data & IFE_PMC_MDIX_STATUS);
+
+ /* The following parameters are undefined for 10/100 operation. */
+ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+ phy->local_rx = e1000_1000t_rx_status_undefined;
+ phy->remote_rx = e1000_1000t_rx_status_undefined;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_sw_reset_generic - PHY software reset
+ * @hw: pointer to the HW structure
+ *
+ * Does a software reset of the PHY by reading the PHY control register and
+ * setting/write the control register reset bit to the PHY.
+ **/
+s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_ctrl;
+
+ DEBUGFUNC("e1000_phy_sw_reset_generic");
+
+ if (!hw->phy.ops.read_reg)
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
+ if (ret_val)
+ return ret_val;
+
+ phy_ctrl |= MII_CR_RESET;
+ ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
+ if (ret_val)
+ return ret_val;
+
+ usec_delay(1);
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_hw_reset_generic - PHY hardware reset
+ * @hw: pointer to the HW structure
+ *
+ * Verify the reset block is not blocking us from resetting. Acquire
+ * semaphore (if necessary) and read/set/write the device control reset
+ * bit in the PHY. Wait the appropriate delay time for the device to
+ * reset and release the semaphore (if necessary).
+ **/
+s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u32 ctrl;
+
+ DEBUGFUNC("e1000_phy_hw_reset_generic");
+
+ if (phy->ops.check_reset_block) {
+ ret_val = phy->ops.check_reset_block(hw);
+ if (ret_val)
+ return E1000_SUCCESS;
+ }
+
+ ret_val = phy->ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PHY_RST);
+ E1000_WRITE_FLUSH(hw);
+
+ usec_delay(phy->reset_delay_us);
+
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ E1000_WRITE_FLUSH(hw);
+
+ usec_delay(150);
+
+ phy->ops.release(hw);
+
+ return phy->ops.get_cfg_done(hw);
+}
+
+/**
+ * e1000_get_cfg_done_generic - Generic configuration done
+ * @hw: pointer to the HW structure
+ *
+ * Generic function to wait 10 milli-seconds for configuration to complete
+ * and return success.
+ **/
+s32 e1000_get_cfg_done_generic(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_get_cfg_done_generic");
+
+ msec_delay_irq(10);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_init_script_igp3 - Inits the IGP3 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Initializes a Intel Gigabit PHY3 when an EEPROM is not present.
+ **/
+s32 e1000_phy_init_script_igp3(struct e1000_hw *hw)
+{
+ DEBUGOUT("Running IGP 3 PHY init script\n");
+
+ /* PHY init IGP 3 */
+ /* Enable rise/fall, 10-mode work in class-A */
+ hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018);
+ /* Remove all caps from Replica path filter */
+ hw->phy.ops.write_reg(hw, 0x2F52, 0x0000);
+ /* Bias trimming for ADC, AFE and Driver (Default) */
+ hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24);
+ /* Increase Hybrid poly bias */
+ hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0);
+ /* Add 4% to Tx amplitude in Gig mode */
+ hw->phy.ops.write_reg(hw, 0x2010, 0x10B0);
+ /* Disable trimming (TTT) */
+ hw->phy.ops.write_reg(hw, 0x2011, 0x0000);
+ /* Poly DC correction to 94.6% + 2% for all channels */
+ hw->phy.ops.write_reg(hw, 0x20DD, 0x249A);
+ /* ABS DC correction to 95.9% */
+ hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3);
+ /* BG temp curve trim */
+ hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE);
+ /* Increasing ADC OPAMP stage 1 currents to max */
+ hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4);
+ /* Force 1000 ( required for enabling PHY regs configuration) */
+ hw->phy.ops.write_reg(hw, 0x0000, 0x0140);
+ /* Set upd_freq to 6 */
+ hw->phy.ops.write_reg(hw, 0x1F30, 0x1606);
+ /* Disable NPDFE */
+ hw->phy.ops.write_reg(hw, 0x1F31, 0xB814);
+ /* Disable adaptive fixed FFE (Default) */
+ hw->phy.ops.write_reg(hw, 0x1F35, 0x002A);
+ /* Enable FFE hysteresis */
+ hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067);
+ /* Fixed FFE for short cable lengths */
+ hw->phy.ops.write_reg(hw, 0x1F54, 0x0065);
+ /* Fixed FFE for medium cable lengths */
+ hw->phy.ops.write_reg(hw, 0x1F55, 0x002A);
+ /* Fixed FFE for long cable lengths */
+ hw->phy.ops.write_reg(hw, 0x1F56, 0x002A);
+ /* Enable Adaptive Clip Threshold */
+ hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0);
+ /* AHT reset limit to 1 */
+ hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF);
+ /* Set AHT master delay to 127 msec */
+ hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC);
+ /* Set scan bits for AHT */
+ hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF);
+ /* Set AHT Preset bits */
+ hw->phy.ops.write_reg(hw, 0x1F79, 0x0210);
+ /* Change integ_factor of channel A to 3 */
+ hw->phy.ops.write_reg(hw, 0x1895, 0x0003);
+ /* Change prop_factor of channels BCD to 8 */
+ hw->phy.ops.write_reg(hw, 0x1796, 0x0008);
+ /* Change cg_icount + enable integbp for channels BCD */
+ hw->phy.ops.write_reg(hw, 0x1798, 0xD008);
+ /* Change cg_icount + enable integbp + change prop_factor_master
+ * to 8 for channel A
+ */
+ hw->phy.ops.write_reg(hw, 0x1898, 0xD918);
+ /* Disable AHT in Slave mode on channel A */
+ hw->phy.ops.write_reg(hw, 0x187A, 0x0800);
+ /* Enable LPLU and disable AN to 1000 in non-D0a states,
+ * Enable SPD+B2B
+ */
+ hw->phy.ops.write_reg(hw, 0x0019, 0x008D);
+ /* Enable restart AN on an1000_dis change */
+ hw->phy.ops.write_reg(hw, 0x001B, 0x2080);
+ /* Enable wh_fifo read clock in 10/100 modes */
+ hw->phy.ops.write_reg(hw, 0x0014, 0x0045);
+ /* Restart AN, Speed selection is 1000 */
+ hw->phy.ops.write_reg(hw, 0x0000, 0x1340);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_phy_type_from_id - Get PHY type from id
+ * @phy_id: phy_id read from the phy
+ *
+ * Returns the phy type from the id.
+ **/
+enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id)
+{
+ enum e1000_phy_type phy_type = e1000_phy_unknown;
+
+ switch (phy_id) {
+ case M88E1000_I_PHY_ID:
+ case M88E1000_E_PHY_ID:
+ case M88E1111_I_PHY_ID:
+ case M88E1011_I_PHY_ID:
+ case M88E1543_E_PHY_ID:
+ case I347AT4_E_PHY_ID:
+ case M88E1112_E_PHY_ID:
+ case M88E1340M_E_PHY_ID:
+ phy_type = e1000_phy_m88;
+ break;
+ case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */
+ phy_type = e1000_phy_igp_2;
+ break;
+ case GG82563_E_PHY_ID:
+ phy_type = e1000_phy_gg82563;
+ break;
+ case IGP03E1000_E_PHY_ID:
+ phy_type = e1000_phy_igp_3;
+ break;
+ case IFE_E_PHY_ID:
+ case IFE_PLUS_E_PHY_ID:
+ case IFE_C_E_PHY_ID:
+ phy_type = e1000_phy_ife;
+ break;
+ case I82580_I_PHY_ID:
+ phy_type = e1000_phy_82580;
+ break;
+ case I210_I_PHY_ID:
+ phy_type = e1000_phy_i210;
+ break;
+ default:
+ phy_type = e1000_phy_unknown;
+ break;
+ }
+ return phy_type;
+}
+
+/**
+ * e1000_determine_phy_address - Determines PHY address.
+ * @hw: pointer to the HW structure
+ *
+ * This uses a trial and error method to loop through possible PHY
+ * addresses. It tests each by reading the PHY ID registers and
+ * checking for a match.
+ **/
+s32 e1000_determine_phy_address(struct e1000_hw *hw)
+{
+ u32 phy_addr = 0;
+ u32 i;
+ enum e1000_phy_type phy_type = e1000_phy_unknown;
+
+ hw->phy.id = phy_type;
+
+ for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) {
+ hw->phy.addr = phy_addr;
+ i = 0;
+
+ do {
+ e1000_get_phy_id(hw);
+ phy_type = e1000_get_phy_type_from_id(hw->phy.id);
+
+ /* If phy_type is valid, break - we found our
+ * PHY address
+ */
+ if (phy_type != e1000_phy_unknown)
+ return E1000_SUCCESS;
+
+ msec_delay(1);
+ i++;
+ } while (i < 10);
+ }
+
+ return -E1000_ERR_PHY_TYPE;
+}
+
+/**
+ * e1000_power_up_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, restore the link to previous
+ * settings.
+ **/
+void e1000_power_up_phy_copper(struct e1000_hw *hw)
+{
+ u16 mii_reg = 0;
+ u16 power_reg = 0;
+
+ /* The PHY will retain its settings across a power down/up cycle */
+ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
+ mii_reg &= ~MII_CR_POWER_DOWN;
+ if (hw->phy.type == e1000_phy_i210) {
+ hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
+ power_reg &= ~GS40G_CS_POWER_DOWN;
+ hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
+ }
+ hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
+}
+
+/**
+ * e1000_power_down_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, restore the link to previous
+ * settings.
+ **/
+void e1000_power_down_phy_copper(struct e1000_hw *hw)
+{
+ u16 mii_reg = 0;
+ u16 power_reg = 0;
+
+ /* The PHY will retain its settings across a power down/up cycle */
+ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
+ mii_reg |= MII_CR_POWER_DOWN;
+ /* i210 Phy requires an additional bit for power up/down */
+ if (hw->phy.type == e1000_phy_i210) {
+ hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
+ power_reg |= GS40G_CS_POWER_DOWN;
+ hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
+ }
+ hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
+ msec_delay(1);
+}
+
+/**
+ * e1000_check_polarity_82577 - Checks the polarity.
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ * Polarity is determined based on the PHY specific status register.
+ **/
+s32 e1000_check_polarity_82577(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("e1000_check_polarity_82577");
+
+ ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
+
+ if (!ret_val)
+ phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Calls the PHY setup function to force speed and duplex.
+ **/
+s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+ DEBUGFUNC("e1000_phy_force_speed_duplex_82577");
+
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ usec_delay(1);
+
+ if (phy->autoneg_wait_to_complete) {
+ DEBUGOUT("Waiting for forced speed/duplex link on 82577 phy\n");
+
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link)
+ DEBUGOUT("Link taking longer than expected.\n");
+
+ /* Try once more */
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_phy_info_82577 - Retrieve I82577 PHY information
+ * @hw: pointer to the HW structure
+ *
+ * Read PHY status to determine if link is up. If link is up, then
+ * set/determine 10base-T extended distance and polarity correction. Read
+ * PHY port status to determine MDI/MDIx and speed. Based on the speed,
+ * determine on the cable length, local and remote receiver.
+ **/
+s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ bool link;
+
+ DEBUGFUNC("e1000_get_phy_info_82577");
+
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link) {
+ DEBUGOUT("Phy info is only valid if link is up\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ phy->polarity_correction = true;
+
+ ret_val = e1000_check_polarity_82577(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
+ if (ret_val)
+ return ret_val;
+
+ phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX);
+
+ if ((data & I82577_PHY_STATUS2_SPEED_MASK) ==
+ I82577_PHY_STATUS2_SPEED_1000MBPS) {
+ ret_val = hw->phy.ops.get_cable_length(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+
+ phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+ } else {
+ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+ phy->local_rx = e1000_1000t_rx_status_undefined;
+ phy->remote_rx = e1000_1000t_rx_status_undefined;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_cable_length_82577 - Determine cable length for 82577 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Reads the diagnostic status register and verifies result is valid before
+ * placing it in the phy_cable_length field.
+ **/
+s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, length;
+
+ DEBUGFUNC("e1000_get_cable_length_82577");
+
+ ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
+ I82577_DSTATUS_CABLE_LENGTH_SHIFT);
+
+ if (length == E1000_CABLE_LENGTH_UNDEFINED)
+ return -E1000_ERR_PHY;
+
+ phy->cable_length = length;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_phy_reg_gs40g - Write GS40G PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ s32 ret_val;
+ u16 page = offset >> GS40G_PAGE_SHIFT;
+
+ DEBUGFUNC("e1000_write_phy_reg_gs40g");
+
+ offset = offset & GS40G_OFFSET_MASK;
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page);
+ if (ret_val)
+ goto release;
+ ret_val = e1000_write_phy_reg_mdic(hw, offset, data);
+
+release:
+ hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000_read_phy_reg_gs40g - Read GS40G PHY register
+ * @hw: pointer to the HW structure
+ * @offset: lower half is register offset to read to
+ * upper half is page to use.
+ * @data: data to read at register offset
+ *
+ * Acquires semaphore, if necessary, then reads the data in the PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ s32 ret_val;
+ u16 page = offset >> GS40G_PAGE_SHIFT;
+
+ DEBUGFUNC("e1000_read_phy_reg_gs40g");
+
+ offset = offset & GS40G_OFFSET_MASK;
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page);
+ if (ret_val)
+ goto release;
+ ret_val = e1000_read_phy_reg_mdic(hw, offset, data);
+
+release:
+ hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000_read_phy_reg_mphy - Read mPHY control register
+ * @hw: pointer to the HW structure
+ * @address: address to be read
+ * @data: pointer to the read data
+ *
+ * Reads the mPHY control register in the PHY at offset and stores the
+ * information read to data.
+ **/
+s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data)
+{
+ u32 mphy_ctrl = 0;
+ bool locked = false;
+ bool ready = false;
+
+ DEBUGFUNC("e1000_read_phy_reg_mphy");
+
+ /* Check if mPHY is ready to read/write operations */
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+
+ /* Check if mPHY access is disabled and enable it if so */
+ mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL);
+ if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) {
+ locked = true;
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ mphy_ctrl |= E1000_MPHY_ENA_ACCESS;
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
+ }
+
+ /* Set the address that we want to read */
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+
+ /* We mask address, because we want to use only current lane */
+ mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK &
+ ~E1000_MPHY_ADDRESS_FNC_OVERRIDE) |
+ (address & E1000_MPHY_ADDRESS_MASK);
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
+
+ /* Read data from the address */
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ *data = E1000_READ_REG(hw, E1000_MPHY_DATA);
+
+ /* Disable access to mPHY if it was originally disabled */
+ if (locked)
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL,
+ E1000_MPHY_DIS_ACCESS);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_phy_reg_mphy - Write mPHY control register
+ * @hw: pointer to the HW structure
+ * @address: address to write to
+ * @data: data to write to register at offset
+ * @line_override: used when we want to use different line than default one
+ *
+ * Writes data to mPHY control register.
+ **/
+s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data,
+ bool line_override)
+{
+ u32 mphy_ctrl = 0;
+ bool locked = false;
+ bool ready = false;
+
+ DEBUGFUNC("e1000_write_phy_reg_mphy");
+
+ /* Check if mPHY is ready to read/write operations */
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+
+ /* Check if mPHY access is disabled and enable it if so */
+ mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL);
+ if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) {
+ locked = true;
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ mphy_ctrl |= E1000_MPHY_ENA_ACCESS;
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
+ }
+
+ /* Set the address that we want to read */
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+
+ /* We mask address, because we want to use only current lane */
+ if (line_override)
+ mphy_ctrl |= E1000_MPHY_ADDRESS_FNC_OVERRIDE;
+ else
+ mphy_ctrl &= ~E1000_MPHY_ADDRESS_FNC_OVERRIDE;
+ mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK) |
+ (address & E1000_MPHY_ADDRESS_MASK);
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
+
+ /* Read data from the address */
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ E1000_WRITE_REG(hw, E1000_MPHY_DATA, data);
+
+ /* Disable access to mPHY if it was originally disabled */
+ if (locked)
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL,
+ E1000_MPHY_DIS_ACCESS);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_is_mphy_ready - Check if mPHY control register is not busy
+ * @hw: pointer to the HW structure
+ *
+ * Returns mPHY control register status.
+ **/
+bool e1000_is_mphy_ready(struct e1000_hw *hw)
+{
+ u16 retry_count = 0;
+ u32 mphy_ctrl = 0;
+ bool ready = false;
+
+ while (retry_count < 2) {
+ mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL);
+ if (mphy_ctrl & E1000_MPHY_BUSY) {
+ usec_delay(20);
+ retry_count++;
+ continue;
+ }
+ ready = true;
+ break;
+ }
+
+ if (!ready)
+ DEBUGOUT("ERROR READING mPHY control register, phy is busy.\n");
+
+ return ready;
+}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_phy.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_phy.h
new file mode 100755
index 00000000..5387c5e7
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_phy.h
@@ -0,0 +1,256 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_PHY_H_
+#define _E1000_PHY_H_
+
+void e1000_init_phy_ops_generic(struct e1000_hw *hw);
+s32 e1000_null_read_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+void e1000_null_phy_generic(struct e1000_hw *hw);
+s32 e1000_null_lplu_state(struct e1000_hw *hw, bool active);
+s32 e1000_null_write_reg(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_null_set_page(struct e1000_hw *hw, u16 data);
+s32 e1000_read_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+s32 e1000_write_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data);
+s32 e1000_check_downshift_generic(struct e1000_hw *hw);
+s32 e1000_check_polarity_m88(struct e1000_hw *hw);
+s32 e1000_check_polarity_igp(struct e1000_hw *hw);
+s32 e1000_check_polarity_ife(struct e1000_hw *hw);
+s32 e1000_check_reset_block_generic(struct e1000_hw *hw);
+s32 e1000_copper_link_setup_igp(struct e1000_hw *hw);
+s32 e1000_copper_link_setup_m88(struct e1000_hw *hw);
+s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw);
+s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw);
+s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw);
+s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
+s32 e1000_get_cable_length_m88(struct e1000_hw *hw);
+s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw);
+s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw);
+s32 e1000_get_cfg_done_generic(struct e1000_hw *hw);
+s32 e1000_get_phy_id(struct e1000_hw *hw);
+s32 e1000_get_phy_info_igp(struct e1000_hw *hw);
+s32 e1000_get_phy_info_m88(struct e1000_hw *hw);
+s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
+s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw);
+void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
+s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw);
+s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw);
+s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page);
+s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active);
+s32 e1000_setup_copper_link_generic(struct e1000_hw *hw);
+s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+ u32 usec_interval, bool *success);
+s32 e1000_phy_init_script_igp3(struct e1000_hw *hw);
+enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id);
+s32 e1000_determine_phy_address(struct e1000_hw *hw);
+s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg);
+s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg);
+void e1000_power_up_phy_copper(struct e1000_hw *hw);
+void e1000_power_down_phy_copper(struct e1000_hw *hw);
+s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data);
+s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data);
+s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
+s32 e1000_check_polarity_82577(struct e1000_hw *hw);
+s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
+s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
+s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
+s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data);
+s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data,
+ bool line_override);
+bool e1000_is_mphy_ready(struct e1000_hw *hw);
+
+#define E1000_MAX_PHY_ADDR 8
+
+/* IGP01E1000 Specific Registers */
+#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
+#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
+#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
+#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
+#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
+#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
+#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */
+#define IGP_PAGE_SHIFT 5
+#define PHY_REG_MASK 0x1F
+
+/* GS40G - I210 PHY defines */
+#define GS40G_PAGE_SELECT 0x16
+#define GS40G_PAGE_SHIFT 16
+#define GS40G_OFFSET_MASK 0xFFFF
+#define GS40G_PAGE_2 0x20000
+#define GS40G_MAC_REG2 0x15
+#define GS40G_MAC_LB 0x4140
+#define GS40G_MAC_SPEED_1G 0X0006
+#define GS40G_COPPER_SPEC 0x0010
+#define GS40G_CS_POWER_DOWN 0x0002
+
+#define HV_INTC_FC_PAGE_START 768
+#define I82578_ADDR_REG 29
+#define I82577_ADDR_REG 16
+#define I82577_CFG_REG 22
+#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
+#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */
+#define I82577_CTRL_REG 23
+
+/* 82577 specific PHY registers */
+#define I82577_PHY_CTRL_2 18
+#define I82577_PHY_LBK_CTRL 19
+#define I82577_PHY_STATUS_2 26
+#define I82577_PHY_DIAG_STATUS 31
+
+/* I82577 PHY Status 2 */
+#define I82577_PHY_STATUS2_REV_POLARITY 0x0400
+#define I82577_PHY_STATUS2_MDIX 0x0800
+#define I82577_PHY_STATUS2_SPEED_MASK 0x0300
+#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
+
+/* I82577 PHY Control 2 */
+#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200
+#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
+#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600
+
+/* I82577 PHY Diagnostics Status */
+#define I82577_DSTATUS_CABLE_LENGTH 0x03FC
+#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2
+
+/* 82580 PHY Power Management */
+#define E1000_82580_PHY_POWER_MGMT 0xE14
+#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */
+#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */
+#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */
+#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */
+
+#define E1000_MPHY_DIS_ACCESS 0x80000000 /* disable_access bit */
+#define E1000_MPHY_ENA_ACCESS 0x40000000 /* enable_access bit */
+#define E1000_MPHY_BUSY 0x00010000 /* busy bit */
+#define E1000_MPHY_ADDRESS_FNC_OVERRIDE 0x20000000 /* fnc_override bit */
+#define E1000_MPHY_ADDRESS_MASK 0x0000FFFF /* address mask */
+
+#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
+#define IGP01E1000_PHY_POLARITY_MASK 0x0078
+
+#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
+
+#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
+
+#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
+#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
+#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
+
+#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
+
+#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
+#define IGP01E1000_PSSR_MDIX 0x0800
+#define IGP01E1000_PSSR_SPEED_MASK 0xC000
+#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
+
+#define IGP02E1000_PHY_CHANNEL_NUM 4
+#define IGP02E1000_PHY_AGC_A 0x11B1
+#define IGP02E1000_PHY_AGC_B 0x12B1
+#define IGP02E1000_PHY_AGC_C 0x14B1
+#define IGP02E1000_PHY_AGC_D 0x18B1
+
+#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course=15:13, Fine=12:9 */
+#define IGP02E1000_AGC_LENGTH_MASK 0x7F
+#define IGP02E1000_AGC_RANGE 15
+
+#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
+
+#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000
+#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
+#define E1000_KMRNCTRLSTA_REN 0x00200000
+#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
+#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
+#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
+#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */
+#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
+
+#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
+#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Ctrl */
+#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Ctrl */
+#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */
+
+/* IFE PHY Extended Status Control */
+#define IFE_PESC_POLARITY_REVERSED 0x0100
+
+/* IFE PHY Special Control */
+#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010
+#define IFE_PSC_FORCE_POLARITY 0x0020
+
+/* IFE PHY Special Control and LED Control */
+#define IFE_PSCL_PROBE_MODE 0x0020
+#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
+#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
+
+/* IFE PHY MDIX Control */
+#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
+#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */
+#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto, 0=disable */
+
+/* SFP modules ID memory locations */
+#define E1000_SFF_IDENTIFIER_OFFSET 0x00
+#define E1000_SFF_IDENTIFIER_SFF 0x02
+#define E1000_SFF_IDENTIFIER_SFP 0x03
+
+#define E1000_SFF_ETH_FLAGS_OFFSET 0x06
+/* Flags for SFP modules compatible with ETH up to 1Gb */
+struct sfp_e1000_flags {
+ u8 e1000_base_sx:1;
+ u8 e1000_base_lx:1;
+ u8 e1000_base_cx:1;
+ u8 e1000_base_t:1;
+ u8 e100_base_lx:1;
+ u8 e100_base_fx:1;
+ u8 e10_base_bx10:1;
+ u8 e10_base_px:1;
+};
+
+/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
+#define E1000_SFF_VENDOR_OUI_TYCO 0x00407600
+#define E1000_SFF_VENDOR_OUI_FTL 0x00906500
+#define E1000_SFF_VENDOR_OUI_AVAGO 0x00176A00
+#define E1000_SFF_VENDOR_OUI_INTEL 0x001B2100
+
+#endif
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_regs.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_regs.h
new file mode 100755
index 00000000..0e083c54
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/e1000_regs.h
@@ -0,0 +1,646 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_REGS_H_
+#define _E1000_REGS_H_
+
+#define E1000_CTRL 0x00000 /* Device Control - RW */
+#define E1000_STATUS 0x00008 /* Device Status - RO */
+#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
+#define E1000_EERD 0x00014 /* EEPROM Read - RW */
+#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
+#define E1000_FLA 0x0001C /* Flash Access - RW */
+#define E1000_MDIC 0x00020 /* MDI Control - RW */
+#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */
+#define E1000_REGISTER_SET_SIZE 0x20000 /* CSR Size */
+#define E1000_EEPROM_INIT_CTRL_WORD_2 0x0F /* EEPROM Init Ctrl Word 2 */
+#define E1000_EEPROM_PCIE_CTRL_WORD_2 0x28 /* EEPROM PCIe Ctrl Word 2 */
+#define E1000_BARCTRL 0x5BBC /* BAR ctrl reg */
+#define E1000_BARCTRL_FLSIZE 0x0700 /* BAR ctrl Flsize */
+#define E1000_BARCTRL_CSRSIZE 0x2000 /* BAR ctrl CSR size */
+#define E1000_MPHY_ADDR_CTRL 0x0024 /* GbE MPHY Address Control */
+#define E1000_MPHY_DATA 0x0E10 /* GBE MPHY Data */
+#define E1000_MPHY_STAT 0x0E0C /* GBE MPHY Statistics */
+#define E1000_PPHY_CTRL 0x5b48 /* PCIe PHY Control */
+#define E1000_I350_BARCTRL 0x5BFC /* BAR ctrl reg */
+#define E1000_I350_DTXMXPKTSZ 0x355C /* Maximum sent packet size reg*/
+#define E1000_SCTL 0x00024 /* SerDes Control - RW */
+#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
+#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
+#define E1000_FCT 0x00030 /* Flow Control Type - RW */
+#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
+#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
+#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
+#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
+#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
+#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
+#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
+#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
+#define E1000_RCTL 0x00100 /* Rx Control - RW */
+#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
+#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */
+#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */
+#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */
+#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
+#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
+#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
+#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
+#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
+#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
+#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */
+#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */
+#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
+#define E1000_TCTL 0x00400 /* Tx Control - RW */
+#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */
+#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */
+#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
+#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
+#define E1000_LEDMUX 0x08130 /* LED MUX Control */
+#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */
+#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */
+#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */
+#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
+#define E1000_PBS 0x01008 /* Packet Buffer Size */
+#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
+#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */
+#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
+#define E1000_FLOP 0x0103C /* FLASH Opcode Register */
+#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
+#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */
+#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */
+#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */
+#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */
+#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */
+#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */
+#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */
+#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */
+#define E1000_I2C_CLK_STRETCH_DIS 0x00008000 /* I2C- Dis Clk Stretching */
+#define E1000_WDSTP 0x01040 /* Watchdog Setup - RW */
+#define E1000_SWDSTS 0x01044 /* SW Device Status - RW */
+#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */
+#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */
+#define E1000_VPDDIAG 0x01060 /* VPD Diagnostic - RO */
+#define E1000_ICR_V2 0x01500 /* Intr Cause - new location - RC */
+#define E1000_ICS_V2 0x01504 /* Intr Cause Set - new location - WO */
+#define E1000_IMS_V2 0x01508 /* Intr Mask Set/Read - new location - RW */
+#define E1000_IMC_V2 0x0150C /* Intr Mask Clear - new location - WO */
+#define E1000_IAM_V2 0x01510 /* Intr Ack Auto Mask - new location - RW */
+#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */
+#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
+#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
+#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */
+#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
+#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
+#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
+#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
+#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
+#define E1000_PBRTH 0x02458 /* PB Rx Arbitration Threshold - RW */
+#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */
+/* Split and Replication Rx Control - RW */
+#define E1000_RDPUMB 0x025CC /* DMA Rx Descriptor uC Mailbox - RW */
+#define E1000_RDPUAD 0x025D0 /* DMA Rx Descriptor uC Addr Command - RW */
+#define E1000_RDPUWD 0x025D4 /* DMA Rx Descriptor uC Data Write - RW */
+#define E1000_RDPURD 0x025D8 /* DMA Rx Descriptor uC Data Read - RW */
+#define E1000_RDPUCTL 0x025DC /* DMA Rx Descriptor uC Control - RW */
+#define E1000_PBDIAG 0x02458 /* Packet Buffer Diagnostic - RW */
+#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
+#define E1000_IRPBS 0x02404 /* Same as RXPBS, renamed for newer Si - RW */
+#define E1000_PBRWAC 0x024E8 /* Rx packet buffer wrap around counter - RO */
+#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */
+#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */
+#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */
+#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */
+#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */
+#define E1000_I210_FLMNGCTL 0x12038
+#define E1000_I210_FLMNGDATA 0x1203C
+#define E1000_I210_FLMNGCNT 0x12040
+
+#define E1000_I210_FLSWCTL 0x12048
+#define E1000_I210_FLSWDATA 0x1204C
+#define E1000_I210_FLSWCNT 0x12050
+
+#define E1000_I210_FLA 0x1201C
+
+#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n))
+#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */
+
+/* QAV Tx mode control register */
+#define E1000_I210_TQAVCTRL 0x3570
+
+/* QAV Tx mode control register bitfields masks */
+/* QAV enable */
+#define E1000_TQAVCTRL_MODE (1 << 0)
+/* Fetching arbitration type */
+#define E1000_TQAVCTRL_FETCH_ARB (1 << 4)
+/* Fetching timer enable */
+#define E1000_TQAVCTRL_FETCH_TIMER_ENABLE (1 << 5)
+/* Launch arbitration type */
+#define E1000_TQAVCTRL_LAUNCH_ARB (1 << 8)
+/* Launch timer enable */
+#define E1000_TQAVCTRL_LAUNCH_TIMER_ENABLE (1 << 9)
+/* SP waits for SR enable */
+#define E1000_TQAVCTRL_SP_WAIT_SR (1 << 10)
+/* Fetching timer correction */
+#define E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET 16
+#define E1000_TQAVCTRL_FETCH_TIMER_DELTA \
+ (0xFFFF << E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET)
+
+/* High credit registers where _n can be 0 or 1. */
+#define E1000_I210_TQAVHC(_n) (0x300C + 0x40 * (_n))
+
+/* Queues fetch arbitration priority control register */
+#define E1000_I210_TQAVARBCTRL 0x3574
+/* Queues priority masks where _n and _p can be 0-3. */
+#define E1000_TQAVARBCTRL_QUEUE_PRI(_n, _p) ((_p) << (2 * _n))
+/* QAV Tx mode control registers where _n can be 0 or 1. */
+#define E1000_I210_TQAVCC(_n) (0x3004 + 0x40 * (_n))
+
+/* QAV Tx mode control register bitfields masks */
+#define E1000_TQAVCC_IDLE_SLOPE 0xFFFF /* Idle slope */
+#define E1000_TQAVCC_KEEP_CREDITS (1 << 30) /* Keep credits opt enable */
+#define E1000_TQAVCC_QUEUE_MODE (1 << 31) /* SP vs. SR Tx mode */
+
+/* Good transmitted packets counter registers */
+#define E1000_PQGPTC(_n) (0x010014 + (0x100 * (_n)))
+
+/* Queues packet buffer size masks where _n can be 0-3 and _s 0-63 [kB] */
+#define E1000_I210_TXPBS_SIZE(_n, _s) ((_s) << (6 * _n))
+
+#define E1000_MMDAC 13 /* MMD Access Control */
+#define E1000_MMDAAD 14 /* MMD Access Address/Data */
+
+/* Convenience macros
+ *
+ * Note: "_n" is the queue number of the register to be written to.
+ *
+ * Example usage:
+ * E1000_RDBAL_REG(current_rx_queue)
+ */
+#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
+ (0x0C000 + ((_n) * 0x40)))
+#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
+ (0x0C004 + ((_n) * 0x40)))
+#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
+ (0x0C008 + ((_n) * 0x40)))
+#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \
+ (0x0C00C + ((_n) * 0x40)))
+#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
+ (0x0C010 + ((_n) * 0x40)))
+#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \
+ (0x0C014 + ((_n) * 0x40)))
+#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n)
+#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
+ (0x0C018 + ((_n) * 0x40)))
+#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
+ (0x0C028 + ((_n) * 0x40)))
+#define E1000_RQDPC(_n) ((_n) < 4 ? (0x02830 + ((_n) * 0x100)) : \
+ (0x0C030 + ((_n) * 0x40)))
+#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
+ (0x0E000 + ((_n) * 0x40)))
+#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
+ (0x0E004 + ((_n) * 0x40)))
+#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
+ (0x0E008 + ((_n) * 0x40)))
+#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
+ (0x0E010 + ((_n) * 0x40)))
+#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \
+ (0x0E014 + ((_n) * 0x40)))
+#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n)
+#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
+ (0x0E018 + ((_n) * 0x40)))
+#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
+ (0x0E028 + ((_n) * 0x40)))
+#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) : \
+ (0x0E038 + ((_n) * 0x40)))
+#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) : \
+ (0x0E03C + ((_n) * 0x40)))
+#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100))
+#define E1000_RSRPD 0x02C00 /* Rx Small Packet Detect - RW */
+#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */
+#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */
+#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
+#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+ (0x054E0 + ((_i - 16) * 8)))
+#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+ (0x054E4 + ((_i - 16) * 8)))
+#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8))
+#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8))
+#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
+#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
+#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
+#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8))
+#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8))
+#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8))
+#define E1000_PBSLAC 0x03100 /* Pkt Buffer Slave Access Control */
+#define E1000_PBSLAD(_n) (0x03110 + (0x4 * (_n))) /* Pkt Buffer DWORD */
+#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
+/* Same as TXPBS, renamed for newer Si - RW */
+#define E1000_ITPBS 0x03404
+#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
+#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
+#define E1000_TDPUMB 0x0357C /* DMA Tx Desc uC Mail Box - RW */
+#define E1000_TDPUAD 0x03580 /* DMA Tx Desc uC Addr Command - RW */
+#define E1000_TDPUWD 0x03584 /* DMA Tx Desc uC Data Write - RW */
+#define E1000_TDPURD 0x03588 /* DMA Tx Desc uC Data Read - RW */
+#define E1000_TDPUCTL 0x0358C /* DMA Tx Desc uC Control - RW */
+#define E1000_DTXCTL 0x03590 /* DMA Tx Control - RW */
+#define E1000_DTXTCPFLGL 0x0359C /* DMA Tx Control flag low - RW */
+#define E1000_DTXTCPFLGH 0x035A0 /* DMA Tx Control flag high - RW */
+/* DMA Tx Max Total Allow Size Reqs - RW */
+#define E1000_DTXMXSZRQ 0x03540
+#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */
+#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */
+#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
+#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
+#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
+#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
+#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
+#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
+#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
+#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
+#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
+#define E1000_COLC 0x04028 /* Collision Count - R/clr */
+#define E1000_DC 0x04030 /* Defer Count - R/clr */
+#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */
+#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */
+#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
+#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
+#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */
+#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */
+#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */
+#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */
+#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */
+#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */
+#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */
+#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */
+#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */
+#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */
+#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */
+#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */
+#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */
+#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */
+#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */
+#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */
+#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */
+#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */
+#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */
+#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */
+#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */
+#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */
+#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */
+#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */
+#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */
+#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
+#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */
+#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */
+#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */
+#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */
+#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */
+#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */
+#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */
+#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */
+#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */
+#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */
+#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */
+#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */
+#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */
+#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */
+#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */
+#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */
+#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */
+#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
+#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */
+#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */
+#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */
+#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */
+#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */
+#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */
+#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */
+#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
+
+/* Virtualization statistical counters */
+#define E1000_PFVFGPRC(_n) (0x010010 + (0x100 * (_n)))
+#define E1000_PFVFGPTC(_n) (0x010014 + (0x100 * (_n)))
+#define E1000_PFVFGORC(_n) (0x010018 + (0x100 * (_n)))
+#define E1000_PFVFGOTC(_n) (0x010034 + (0x100 * (_n)))
+#define E1000_PFVFMPRC(_n) (0x010038 + (0x100 * (_n)))
+#define E1000_PFVFGPRLBC(_n) (0x010040 + (0x100 * (_n)))
+#define E1000_PFVFGPTLBC(_n) (0x010044 + (0x100 * (_n)))
+#define E1000_PFVFGORLBC(_n) (0x010048 + (0x100 * (_n)))
+#define E1000_PFVFGOTLBC(_n) (0x010050 + (0x100 * (_n)))
+
+/* LinkSec */
+#define E1000_LSECTXUT 0x04300 /* Tx Untagged Pkt Cnt */
+#define E1000_LSECTXPKTE 0x04304 /* Encrypted Tx Pkts Cnt */
+#define E1000_LSECTXPKTP 0x04308 /* Protected Tx Pkt Cnt */
+#define E1000_LSECTXOCTE 0x0430C /* Encrypted Tx Octets Cnt */
+#define E1000_LSECTXOCTP 0x04310 /* Protected Tx Octets Cnt */
+#define E1000_LSECRXUT 0x04314 /* Untagged non-Strict Rx Pkt Cnt */
+#define E1000_LSECRXOCTD 0x0431C /* Rx Octets Decrypted Count */
+#define E1000_LSECRXOCTV 0x04320 /* Rx Octets Validated */
+#define E1000_LSECRXBAD 0x04324 /* Rx Bad Tag */
+#define E1000_LSECRXNOSCI 0x04328 /* Rx Packet No SCI Count */
+#define E1000_LSECRXUNSCI 0x0432C /* Rx Packet Unknown SCI Count */
+#define E1000_LSECRXUNCH 0x04330 /* Rx Unchecked Packets Count */
+#define E1000_LSECRXDELAY 0x04340 /* Rx Delayed Packet Count */
+#define E1000_LSECRXLATE 0x04350 /* Rx Late Packets Count */
+#define E1000_LSECRXOK(_n) (0x04360 + (0x04 * (_n))) /* Rx Pkt OK Cnt */
+#define E1000_LSECRXINV(_n) (0x04380 + (0x04 * (_n))) /* Rx Invalid Cnt */
+#define E1000_LSECRXNV(_n) (0x043A0 + (0x04 * (_n))) /* Rx Not Valid Cnt */
+#define E1000_LSECRXUNSA 0x043C0 /* Rx Unused SA Count */
+#define E1000_LSECRXNUSA 0x043D0 /* Rx Not Using SA Count */
+#define E1000_LSECTXCAP 0x0B000 /* Tx Capabilities Register - RO */
+#define E1000_LSECRXCAP 0x0B300 /* Rx Capabilities Register - RO */
+#define E1000_LSECTXCTRL 0x0B004 /* Tx Control - RW */
+#define E1000_LSECRXCTRL 0x0B304 /* Rx Control - RW */
+#define E1000_LSECTXSCL 0x0B008 /* Tx SCI Low - RW */
+#define E1000_LSECTXSCH 0x0B00C /* Tx SCI High - RW */
+#define E1000_LSECTXSA 0x0B010 /* Tx SA0 - RW */
+#define E1000_LSECTXPN0 0x0B018 /* Tx SA PN 0 - RW */
+#define E1000_LSECTXPN1 0x0B01C /* Tx SA PN 1 - RW */
+#define E1000_LSECRXSCL 0x0B3D0 /* Rx SCI Low - RW */
+#define E1000_LSECRXSCH 0x0B3E0 /* Rx SCI High - RW */
+/* LinkSec Tx 128-bit Key 0 - WO */
+#define E1000_LSECTXKEY0(_n) (0x0B020 + (0x04 * (_n)))
+/* LinkSec Tx 128-bit Key 1 - WO */
+#define E1000_LSECTXKEY1(_n) (0x0B030 + (0x04 * (_n)))
+#define E1000_LSECRXSA(_n) (0x0B310 + (0x04 * (_n))) /* Rx SAs - RW */
+#define E1000_LSECRXPN(_n) (0x0B330 + (0x04 * (_n))) /* Rx SAs - RW */
+/* LinkSec Rx Keys - where _n is the SA no. and _m the 4 dwords of the 128 bit
+ * key - RW.
+ */
+#define E1000_LSECRXKEY(_n, _m) (0x0B350 + (0x10 * (_n)) + (0x04 * (_m)))
+
+#define E1000_SSVPC 0x041A0 /* Switch Security Violation Pkt Cnt */
+#define E1000_IPSCTRL 0xB430 /* IpSec Control Register */
+#define E1000_IPSRXCMD 0x0B408 /* IPSec Rx Command Register - RW */
+#define E1000_IPSRXIDX 0x0B400 /* IPSec Rx Index - RW */
+/* IPSec Rx IPv4/v6 Address - RW */
+#define E1000_IPSRXIPADDR(_n) (0x0B420 + (0x04 * (_n)))
+/* IPSec Rx 128-bit Key - RW */
+#define E1000_IPSRXKEY(_n) (0x0B410 + (0x04 * (_n)))
+#define E1000_IPSRXSALT 0x0B404 /* IPSec Rx Salt - RW */
+#define E1000_IPSRXSPI 0x0B40C /* IPSec Rx SPI - RW */
+/* IPSec Tx 128-bit Key - RW */
+#define E1000_IPSTXKEY(_n) (0x0B460 + (0x04 * (_n)))
+#define E1000_IPSTXSALT 0x0B454 /* IPSec Tx Salt - RW */
+#define E1000_IPSTXIDX 0x0B450 /* IPSec Tx SA IDX - RW */
+#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */
+#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */
+#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */
+#define E1000_CBTMPC 0x0402C /* Circuit Breaker Tx Packet Count */
+#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */
+#define E1000_CBRDPC 0x04044 /* Circuit Breaker Rx Dropped Count */
+#define E1000_CBRMPC 0x040FC /* Circuit Breaker Rx Packet Count */
+#define E1000_RPTHC 0x04104 /* Rx Packets To Host */
+#define E1000_HGPTC 0x04118 /* Host Good Packets Tx Count */
+#define E1000_HTCBDPC 0x04124 /* Host Tx Circuit Breaker Dropped Count */
+#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */
+#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */
+#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */
+#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */
+#define E1000_LENERRS 0x04138 /* Length Errors Count */
+#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */
+#define E1000_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */
+#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */
+#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */
+#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */
+#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Pg - RW */
+#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */
+#define E1000_RLPML 0x05004 /* Rx Long Packet Max Length */
+#define E1000_RFCTL 0x05008 /* Receive Filter Control*/
+#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
+#define E1000_RA 0x05400 /* Receive Address - RW Array */
+#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */
+#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
+#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */
+#define E1000_CIAA 0x05B88 /* Config Indirect Access Address - RW */
+#define E1000_CIAD 0x05B8C /* Config Indirect Access Data - RW */
+#define E1000_VFQA0 0x0B000 /* VLAN Filter Queue Array 0 - RW Array */
+#define E1000_VFQA1 0x0B200 /* VLAN Filter Queue Array 1 - RW Array */
+#define E1000_WUC 0x05800 /* Wakeup Control - RW */
+#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
+#define E1000_WUS 0x05810 /* Wakeup Status - RO */
+#define E1000_MANC 0x05820 /* Management Control - RW */
+#define E1000_IPAV 0x05838 /* IP Address Valid - RW */
+#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */
+#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */
+#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */
+#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */
+#define E1000_PBACL 0x05B68 /* MSIx PBA Clear - Read/Write 1's to clear */
+#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */
+#define E1000_HOST_IF 0x08800 /* Host Interface */
+#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */
+#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */
+#define E1000_HIBBA 0x8F40 /* Host Interface Buffer Base Address */
+/* Flexible Host Filter Table */
+#define E1000_FHFT(_n) (0x09000 + ((_n) * 0x100))
+/* Ext Flexible Host Filter Table */
+#define E1000_FHFT_EXT(_n) (0x09A00 + ((_n) * 0x100))
+
+
+#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */
+#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */
+/* Management Decision Filters */
+#define E1000_MDEF(_n) (0x05890 + (4 * (_n)))
+#define E1000_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */
+#define E1000_CCMCTL 0x05B48 /* CCM Control Register */
+#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */
+#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */
+#define E1000_GCR 0x05B00 /* PCI-Ex Control */
+#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */
+#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */
+#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */
+#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */
+#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */
+#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
+#define E1000_SWSM 0x05B50 /* SW Semaphore */
+#define E1000_FWSM 0x05B54 /* FW Semaphore */
+/* Driver-only SW semaphore (not used by BOOT agents) */
+#define E1000_SWSM2 0x05B58
+#define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */
+#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */
+#define E1000_UFUSE 0x05B78 /* UFUSE - RO */
+#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
+#define E1000_HICR 0x08F00 /* Host Interface Control */
+#define E1000_FWSTS 0x08F0C /* FW Status */
+
+/* RSS registers */
+#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */
+#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
+#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */
+#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/
+#define E1000_IMIRVP 0x05AC0 /* Immediate INT Rx VLAN Priority -RW */
+#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) /* MSI-X Alloc Reg -RW */
+#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */
+#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */
+#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */
+#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */
+/* VT Registers */
+#define E1000_SWPBS 0x03004 /* Switch Packet Buffer Size - RW */
+#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */
+#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */
+#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */
+#define E1000_VFRE 0x00C8C /* VF Receive Enables */
+#define E1000_VFTE 0x00C90 /* VF Transmit Enables */
+#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */
+#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */
+#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */
+#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */
+#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */
+#define E1000_IOVTCL 0x05BBC /* IOV Control Register */
+#define E1000_VMRCTL 0X05D80 /* Virtual Mirror Rule Control */
+#define E1000_VMRVLAN 0x05D90 /* Virtual Mirror Rule VLAN */
+#define E1000_VMRVM 0x05DA0 /* Virtual Mirror Rule VM */
+#define E1000_MDFB 0x03558 /* Malicious Driver free block */
+#define E1000_LVMMC 0x03548 /* Last VM Misbehavior cause */
+#define E1000_TXSWC 0x05ACC /* Tx Switch Control */
+#define E1000_SCCRL 0x05DB0 /* Storm Control Control */
+#define E1000_BSCTRH 0x05DB8 /* Broadcast Storm Control Threshold */
+#define E1000_MSCTRH 0x05DBC /* Multicast Storm Control Threshold */
+/* These act per VF so an array friendly macro is used */
+#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n)))
+#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n)))
+#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
+#define E1000_VFVMBMEM(_n) (0x00800 + (_n))
+#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
+/* VLAN Virtual Machine Filter - RW */
+#define E1000_VLVF(_n) (0x05D00 + (4 * (_n)))
+#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
+#define E1000_DVMOLR(_n) (0x0C038 + (0x40 * (_n))) /* DMA VM offload */
+#define E1000_VTCTRL(_n) (0x10000 + (0x100 * (_n))) /* VT Control */
+#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
+#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
+#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
+#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
+#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
+#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */
+#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */
+#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
+#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
+#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
+#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
+#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
+#define E1000_TIMADJL 0x0B60C /* Time sync time adjustment offset Low - RW */
+#define E1000_TIMADJH 0x0B610 /* Time sync time adjustment offset High - RW */
+#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
+#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */
+#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */
+#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */
+
+/* Filtering Registers */
+#define E1000_SAQF(_n) (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */
+#define E1000_DAQF(_n) (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */
+#define E1000_SPQF(_n) (0x059C0 + (4 * (_n))) /* Source Port Queue Fltr */
+#define E1000_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */
+#define E1000_TTQF(_n) (0x059E0 + (4 * (_n))) /* 2-tuple Queue Fltr */
+#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */
+#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
+
+#define E1000_RTTDCS 0x3600 /* Reedtown Tx Desc plane control and status */
+#define E1000_RTTPCS 0x3474 /* Reedtown Tx Packet Plane control and status */
+#define E1000_RTRPCS 0x2474 /* Rx packet plane control and status */
+#define E1000_RTRUP2TC 0x05AC4 /* Rx User Priority to Traffic Class */
+#define E1000_RTTUP2TC 0x0418 /* Transmit User Priority to Traffic Class */
+/* Tx Desc plane TC Rate-scheduler config */
+#define E1000_RTTDTCRC(_n) (0x3610 + ((_n) * 4))
+/* Tx Packet plane TC Rate-Scheduler Config */
+#define E1000_RTTPTCRC(_n) (0x3480 + ((_n) * 4))
+/* Rx Packet plane TC Rate-Scheduler Config */
+#define E1000_RTRPTCRC(_n) (0x2480 + ((_n) * 4))
+/* Tx Desc Plane TC Rate-Scheduler Status */
+#define E1000_RTTDTCRS(_n) (0x3630 + ((_n) * 4))
+/* Tx Desc Plane TC Rate-Scheduler MMW */
+#define E1000_RTTDTCRM(_n) (0x3650 + ((_n) * 4))
+/* Tx Packet plane TC Rate-Scheduler Status */
+#define E1000_RTTPTCRS(_n) (0x34A0 + ((_n) * 4))
+/* Tx Packet plane TC Rate-scheduler MMW */
+#define E1000_RTTPTCRM(_n) (0x34C0 + ((_n) * 4))
+/* Rx Packet plane TC Rate-Scheduler Status */
+#define E1000_RTRPTCRS(_n) (0x24A0 + ((_n) * 4))
+/* Rx Packet plane TC Rate-Scheduler MMW */
+#define E1000_RTRPTCRM(_n) (0x24C0 + ((_n) * 4))
+/* Tx Desc plane VM Rate-Scheduler MMW*/
+#define E1000_RTTDVMRM(_n) (0x3670 + ((_n) * 4))
+/* Tx BCN Rate-Scheduler MMW */
+#define E1000_RTTBCNRM(_n) (0x3690 + ((_n) * 4))
+#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select */
+#define E1000_RTTDVMRC 0x3608 /* Tx Desc Plane VM Rate-Scheduler Config */
+#define E1000_RTTDVMRS 0x360C /* Tx Desc Plane VM Rate-Scheduler Status */
+#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config */
+#define E1000_RTTBCNRS 0x36B4 /* Tx BCN Rate-Scheduler Status */
+#define E1000_RTTBCNCR 0xB200 /* Tx BCN Control Register */
+#define E1000_RTTBCNTG 0x35A4 /* Tx BCN Tagging */
+#define E1000_RTTBCNCP 0xB208 /* Tx BCN Congestion point */
+#define E1000_RTRBCNCR 0xB20C /* Rx BCN Control Register */
+#define E1000_RTTBCNRD 0x36B8 /* Tx BCN Rate Drift */
+#define E1000_PFCTOP 0x1080 /* Priority Flow Control Type and Opcode */
+#define E1000_RTTBCNIDX 0xB204 /* Tx BCN Congestion Point */
+#define E1000_RTTBCNACH 0x0B214 /* Tx BCN Control High */
+#define E1000_RTTBCNACL 0x0B210 /* Tx BCN Control Low */
+
+/* DMA Coalescing registers */
+#define E1000_DMACR 0x02508 /* Control Register */
+#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */
+#define E1000_DMCTLX 0x02514 /* Time to Lx Request */
+#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */
+#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */
+#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */
+#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
+
+/* PCIe Parity Status Register */
+#define E1000_PCIEERRSTS 0x05BA8
+
+#define E1000_PROXYS 0x5F64 /* Proxying Status */
+#define E1000_PROXYFC 0x5F60 /* Proxying Filter Control */
+/* Thermal sensor configuration and status registers */
+#define E1000_THMJT 0x08100 /* Junction Temperature */
+#define E1000_THLOWTC 0x08104 /* Low Threshold Control */
+#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */
+#define E1000_THHIGHTC 0x0810C /* High Threshold Control */
+#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */
+
+/* Energy Efficient Ethernet "EEE" registers */
+#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */
+#define E1000_LTRC 0x01A0 /* Latency Tolerance Reporting Control */
+#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/
+#define E1000_EEE_SU 0x0E34 /* EEE Setup */
+#define E1000_TLPIC 0x4148 /* EEE Tx LPI Count - TLPIC */
+#define E1000_RLPIC 0x414C /* EEE Rx LPI Count - RLPIC */
+
+/* OS2BMC Registers */
+#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */
+#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */
+#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */
+#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */
+
+
+
+#endif
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb.h
new file mode 100755
index 00000000..a582f52e
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb.h
@@ -0,0 +1,859 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* Linux PRO/1000 Ethernet Driver main header file */
+
+#ifndef _IGB_H_
+#define _IGB_H_
+
+#include <linux/kobject.h>
+
+#ifndef IGB_NO_LRO
+#include <net/tcp.h>
+#endif
+
+#undef HAVE_HW_TIME_STAMP
+#ifdef HAVE_HW_TIME_STAMP
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+
+#endif
+#ifdef SIOCETHTOOL
+#include <linux/ethtool.h>
+#endif
+
+struct igb_adapter;
+
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+//#define IGB_DCA
+#endif
+#ifdef IGB_DCA
+#include <linux/dca.h>
+#endif
+
+#include "kcompat.h"
+
+#ifdef HAVE_SCTP
+#include <linux/sctp.h>
+#endif
+
+#include "e1000_api.h"
+#include "e1000_82575.h"
+#include "e1000_manage.h"
+#include "e1000_mbx.h"
+
+#define IGB_ERR(args...) printk(KERN_ERR "igb: " args)
+
+#define PFX "igb: "
+#define DPRINTK(nlevel, klevel, fmt, args...) \
+ (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
+ printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
+ __FUNCTION__ , ## args))
+
+#ifdef HAVE_PTP_1588_CLOCK
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#endif /* HAVE_PTP_1588_CLOCK */
+
+#ifdef HAVE_I2C_SUPPORT
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#endif /* HAVE_I2C_SUPPORT */
+
+/* Interrupt defines */
+#define IGB_START_ITR 648 /* ~6000 ints/sec */
+#define IGB_4K_ITR 980
+#define IGB_20K_ITR 196
+#define IGB_70K_ITR 56
+
+/* Interrupt modes, as used by the IntMode parameter */
+#define IGB_INT_MODE_LEGACY 0
+#define IGB_INT_MODE_MSI 1
+#define IGB_INT_MODE_MSIX 2
+
+/* TX/RX descriptor defines */
+#define IGB_DEFAULT_TXD 256
+#define IGB_DEFAULT_TX_WORK 128
+#define IGB_MIN_TXD 80
+#define IGB_MAX_TXD 4096
+
+#define IGB_DEFAULT_RXD 256
+#define IGB_MIN_RXD 80
+#define IGB_MAX_RXD 4096
+
+#define IGB_MIN_ITR_USECS 10 /* 100k irq/sec */
+#define IGB_MAX_ITR_USECS 8191 /* 120 irq/sec */
+
+#define NON_Q_VECTORS 1
+#define MAX_Q_VECTORS 10
+
+/* Transmit and receive queues */
+#define IGB_MAX_RX_QUEUES 16
+#define IGB_MAX_TX_QUEUES 16
+
+#define IGB_MAX_VF_MC_ENTRIES 30
+#define IGB_MAX_VF_FUNCTIONS 8
+#define IGB_82576_VF_DEV_ID 0x10CA
+#define IGB_I350_VF_DEV_ID 0x1520
+#define IGB_MAX_UTA_ENTRIES 128
+#define MAX_EMULATION_MAC_ADDRS 16
+#define OUI_LEN 3
+#define IGB_MAX_VMDQ_QUEUES 8
+
+
+struct vf_data_storage {
+ unsigned char vf_mac_addresses[ETH_ALEN];
+ u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
+ u16 num_vf_mc_hashes;
+ u16 default_vf_vlan_id;
+ u16 vlans_enabled;
+ unsigned char em_mac_addresses[MAX_EMULATION_MAC_ADDRS * ETH_ALEN];
+ u32 uta_table_copy[IGB_MAX_UTA_ENTRIES];
+ u32 flags;
+ unsigned long last_nack;
+#ifdef IFLA_VF_MAX
+ u16 pf_vlan; /* When set, guest VLAN config not allowed. */
+ u16 pf_qos;
+ u16 tx_rate;
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+ bool spoofchk_enabled;
+#endif
+#endif
+};
+
+#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
+#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */
+#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */
+#define IGB_VF_FLAG_PF_SET_MAC 0x00000008 /* PF has set MAC address */
+
+/* RX descriptor control thresholds.
+ * PTHRESH - MAC will consider prefetch if it has fewer than this number of
+ * descriptors available in its onboard memory.
+ * Setting this to 0 disables RX descriptor prefetch.
+ * HTHRESH - MAC will only prefetch if there are at least this many descriptors
+ * available in host memory.
+ * If PTHRESH is 0, this should also be 0.
+ * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
+ * descriptors until either it has this many to write back, or the
+ * ITR timer expires.
+ */
+#define IGB_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8)
+#define IGB_RX_HTHRESH 8
+#define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8)
+#define IGB_TX_HTHRESH 1
+#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \
+ adapter->msix_entries) ? 1 : 4)
+
+/* this is the size past which hardware will drop packets when setting LPE=0 */
+#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+
+/* NOTE: netdev_alloc_skb reserves 16 bytes, NET_IP_ALIGN means we
+ * reserve 2 more, and skb_shared_info adds an additional 384 more,
+ * this adds roughly 448 bytes of extra data meaning the smallest
+ * allocation we could have is 1K.
+ * i.e. RXBUFFER_512 --> size-1024 slab
+ */
+/* Supported Rx Buffer Sizes */
+#define IGB_RXBUFFER_256 256
+#define IGB_RXBUFFER_2048 2048
+#define IGB_RXBUFFER_16384 16384
+#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
+#if MAX_SKB_FRAGS < 8
+#define IGB_RX_BUFSZ ALIGN(MAX_JUMBO_FRAME_SIZE / MAX_SKB_FRAGS, 1024)
+#else
+#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
+#endif
+
+
+/* Packet Buffer allocations */
+#define IGB_PBA_BYTES_SHIFT 0xA
+#define IGB_TX_HEAD_ADDR_SHIFT 7
+#define IGB_PBA_TX_MASK 0xFFFF0000
+
+#define IGB_FC_PAUSE_TIME 0x0680 /* 858 usec */
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+
+#define IGB_EEPROM_APME 0x0400
+#define AUTO_ALL_MODES 0
+
+#ifndef IGB_MASTER_SLAVE
+/* Switch to override PHY master/slave setting */
+#define IGB_MASTER_SLAVE e1000_ms_hw_default
+#endif
+
+#define IGB_MNG_VLAN_NONE -1
+
+#ifndef IGB_NO_LRO
+#define IGB_LRO_MAX 32 /*Maximum number of LRO descriptors*/
+struct igb_lro_stats {
+ u32 flushed;
+ u32 coal;
+};
+
+/*
+ * igb_lro_header - header format to be aggregated by LRO
+ * @iph: IP header without options
+ * @tcp: TCP header
+ * @ts: Optional TCP timestamp data in TCP options
+ *
+ * This structure relies on the check above that verifies that the header
+ * is IPv4 and does not contain any options.
+ */
+struct igb_lrohdr {
+ struct iphdr iph;
+ struct tcphdr th;
+ __be32 ts[0];
+};
+
+struct igb_lro_list {
+ struct sk_buff_head active;
+ struct igb_lro_stats stats;
+};
+
+#endif /* IGB_NO_LRO */
+struct igb_cb {
+#ifndef IGB_NO_LRO
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ union { /* Union defining head/tail partner */
+ struct sk_buff *head;
+ struct sk_buff *tail;
+ };
+#endif
+ __be32 tsecr; /* timestamp echo response */
+ u32 tsval; /* timestamp value in host order */
+ u32 next_seq; /* next expected sequence number */
+ u16 free; /* 65521 minus total size */
+ u16 mss; /* size of data portion of packet */
+ u16 append_cnt; /* number of skb's appended */
+#endif /* IGB_NO_LRO */
+#ifdef HAVE_VLAN_RX_REGISTER
+ u16 vid; /* VLAN tag */
+#endif
+};
+#define IGB_CB(skb) ((struct igb_cb *)(skb)->cb)
+
+enum igb_tx_flags {
+ /* cmd_type flags */
+ IGB_TX_FLAGS_VLAN = 0x01,
+ IGB_TX_FLAGS_TSO = 0x02,
+ IGB_TX_FLAGS_TSTAMP = 0x04,
+
+ /* olinfo flags */
+ IGB_TX_FLAGS_IPV4 = 0x10,
+ IGB_TX_FLAGS_CSUM = 0x20,
+};
+
+/* VLAN info */
+#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
+#define IGB_TX_FLAGS_VLAN_SHIFT 16
+
+/*
+ * The largest size we can write to the descriptor is 65535. In order to
+ * maintain a power of two alignment we have to limit ourselves to 32K.
+ */
+#define IGB_MAX_TXD_PWR 15
+#define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
+#ifndef MAX_SKB_FRAGS
+#define DESC_NEEDED 4
+#elif (MAX_SKB_FRAGS < 16)
+#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+#else
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+#endif
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer */
+struct igb_tx_buffer {
+ union e1000_adv_tx_desc *next_to_watch;
+ unsigned long time_stamp;
+ struct sk_buff *skb;
+ unsigned int bytecount;
+ u16 gso_segs;
+ __be16 protocol;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+ u32 tx_flags;
+};
+
+struct igb_rx_buffer {
+ dma_addr_t dma;
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ struct sk_buff *skb;
+#else
+ struct page *page;
+ u32 page_offset;
+#endif
+};
+
+struct igb_tx_queue_stats {
+ u64 packets;
+ u64 bytes;
+ u64 restart_queue;
+};
+
+struct igb_rx_queue_stats {
+ u64 packets;
+ u64 bytes;
+ u64 drops;
+ u64 csum_err;
+ u64 alloc_failed;
+ u64 ipv4_packets; /* IPv4 headers processed */
+ u64 ipv4e_packets; /* IPv4E headers with extensions processed */
+ u64 ipv6_packets; /* IPv6 headers processed */
+ u64 ipv6e_packets; /* IPv6E headers with extensions processed */
+ u64 tcp_packets; /* TCP headers processed */
+ u64 udp_packets; /* UDP headers processed */
+ u64 sctp_packets; /* SCTP headers processed */
+ u64 nfs_packets; /* NFS headers processe */
+};
+
+struct igb_ring_container {
+ struct igb_ring *ring; /* pointer to linked list of rings */
+ unsigned int total_bytes; /* total bytes processed this int */
+ unsigned int total_packets; /* total packets processed this int */
+ u16 work_limit; /* total work allowed per interrupt */
+ u8 count; /* total number of rings in vector */
+ u8 itr; /* current ITR setting for ring */
+};
+
+struct igb_ring {
+ struct igb_q_vector *q_vector; /* backlink to q_vector */
+ struct net_device *netdev; /* back pointer to net_device */
+ struct device *dev; /* device for dma mapping */
+ union { /* array of buffer info structs */
+ struct igb_tx_buffer *tx_buffer_info;
+ struct igb_rx_buffer *rx_buffer_info;
+ };
+#ifdef HAVE_PTP_1588_CLOCK
+ unsigned long last_rx_timestamp;
+#endif /* HAVE_PTP_1588_CLOCK */
+ void *desc; /* descriptor ring memory */
+ unsigned long flags; /* ring specific flags */
+ void __iomem *tail; /* pointer to ring tail register */
+ dma_addr_t dma; /* phys address of the ring */
+ unsigned int size; /* length of desc. ring in bytes */
+
+ u16 count; /* number of desc. in the ring */
+ u8 queue_index; /* logical index of the ring*/
+ u8 reg_idx; /* physical index of the ring */
+
+ /* everything past this point are written often */
+ u16 next_to_clean;
+ u16 next_to_use;
+ u16 next_to_alloc;
+
+ union {
+ /* TX */
+ struct {
+ struct igb_tx_queue_stats tx_stats;
+ };
+ /* RX */
+ struct {
+ struct igb_rx_queue_stats rx_stats;
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ u16 rx_buffer_len;
+#else
+ struct sk_buff *skb;
+#endif
+ };
+ };
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+ struct net_device *vmdq_netdev;
+ int vqueue_index; /* queue index for virtual netdev */
+#endif
+} ____cacheline_internodealigned_in_smp;
+
+struct igb_q_vector {
+ struct igb_adapter *adapter; /* backlink */
+ int cpu; /* CPU for DCA */
+ u32 eims_value; /* EIMS mask value */
+
+ u16 itr_val;
+ u8 set_itr;
+ void __iomem *itr_register;
+
+ struct igb_ring_container rx, tx;
+
+ struct napi_struct napi;
+#ifndef IGB_NO_LRO
+ struct igb_lro_list lrolist; /* LRO list for queue vector*/
+#endif
+ char name[IFNAMSIZ + 9];
+#ifndef HAVE_NETDEV_NAPI_LIST
+ struct net_device poll_dev;
+#endif
+
+ /* for dynamic allocation of rings associated with this q_vector */
+ struct igb_ring ring[0] ____cacheline_internodealigned_in_smp;
+};
+
+enum e1000_ring_flags_t {
+#ifndef HAVE_NDO_SET_FEATURES
+ IGB_RING_FLAG_RX_CSUM,
+#endif
+ IGB_RING_FLAG_RX_SCTP_CSUM,
+ IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
+ IGB_RING_FLAG_TX_CTX_IDX,
+ IGB_RING_FLAG_TX_DETECT_HANG,
+};
+
+struct igb_mac_addr {
+ u8 addr[ETH_ALEN];
+ u16 queue;
+ u16 state; /* bitmask */
+};
+#define IGB_MAC_STATE_DEFAULT 0x1
+#define IGB_MAC_STATE_MODIFIED 0x2
+#define IGB_MAC_STATE_IN_USE 0x4
+
+#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
+
+#define IGB_RX_DESC(R, i) \
+ (&(((union e1000_adv_rx_desc *)((R)->desc))[i]))
+#define IGB_TX_DESC(R, i) \
+ (&(((union e1000_adv_tx_desc *)((R)->desc))[i]))
+#define IGB_TX_CTXTDESC(R, i) \
+ (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i]))
+
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+#define netdev_ring(ring) \
+ ((ring->vmdq_netdev ? ring->vmdq_netdev : ring->netdev))
+#define ring_queue_index(ring) \
+ ((ring->vmdq_netdev ? ring->vqueue_index : ring->queue_index))
+#else
+#define netdev_ring(ring) (ring->netdev)
+#define ring_queue_index(ring) (ring->queue_index)
+#endif /* CONFIG_IGB_VMDQ_NETDEV */
+
+/* igb_test_staterr - tests bits within Rx descriptor status and error fields */
+static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc,
+ const u32 stat_err_bits)
+{
+ return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
+}
+
+/* igb_desc_unused - calculate if we have unused descriptors */
+static inline u16 igb_desc_unused(const struct igb_ring *ring)
+{
+ u16 ntc = ring->next_to_clean;
+ u16 ntu = ring->next_to_use;
+
+ return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
+}
+
+#ifdef CONFIG_BQL
+static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring)
+{
+ return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
+}
+#endif /* CONFIG_BQL */
+
+// #ifdef EXT_THERMAL_SENSOR_SUPPORT
+// #ifdef IGB_PROCFS
+struct igb_therm_proc_data
+{
+ struct e1000_hw *hw;
+ struct e1000_thermal_diode_data *sensor_data;
+};
+
+// #endif /* IGB_PROCFS */
+// #endif /* EXT_THERMAL_SENSOR_SUPPORT */
+
+#ifdef IGB_HWMON
+#define IGB_HWMON_TYPE_LOC 0
+#define IGB_HWMON_TYPE_TEMP 1
+#define IGB_HWMON_TYPE_CAUTION 2
+#define IGB_HWMON_TYPE_MAX 3
+
+struct hwmon_attr {
+ struct device_attribute dev_attr;
+ struct e1000_hw *hw;
+ struct e1000_thermal_diode_data *sensor;
+ char name[12];
+ };
+
+struct hwmon_buff {
+ struct device *device;
+ struct hwmon_attr *hwmon_list;
+ unsigned int n_hwmon;
+ };
+#endif /* IGB_HWMON */
+
+/* board specific private data structure */
+struct igb_adapter {
+#ifdef HAVE_VLAN_RX_REGISTER
+ /* vlgrp must be first member of structure */
+ struct vlan_group *vlgrp;
+#else
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+#endif
+ struct net_device *netdev;
+
+ unsigned long state;
+ unsigned int flags;
+
+ unsigned int num_q_vectors;
+ struct msix_entry *msix_entries;
+
+
+ /* TX */
+ u16 tx_work_limit;
+ u32 tx_timeout_count;
+ int num_tx_queues;
+ struct igb_ring *tx_ring[IGB_MAX_TX_QUEUES];
+
+ /* RX */
+ int num_rx_queues;
+ struct igb_ring *rx_ring[IGB_MAX_RX_QUEUES];
+
+ struct timer_list watchdog_timer;
+ struct timer_list dma_err_timer;
+ struct timer_list phy_info_timer;
+ u16 mng_vlan_id;
+ u32 bd_number;
+ u32 wol;
+ u32 en_mng_pt;
+ u16 link_speed;
+ u16 link_duplex;
+ u8 port_num;
+
+ /* Interrupt Throttle Rate */
+ u32 rx_itr_setting;
+ u32 tx_itr_setting;
+
+ struct work_struct reset_task;
+ struct work_struct watchdog_task;
+ struct work_struct dma_err_task;
+ bool fc_autoneg;
+ u8 tx_timeout_factor;
+
+#ifdef DEBUG
+ bool tx_hang_detected;
+ bool disable_hw_reset;
+#endif
+ u32 max_frame_size;
+
+ /* OS defined structs */
+ struct pci_dev *pdev;
+#ifndef HAVE_NETDEV_STATS_IN_NETDEV
+ struct net_device_stats net_stats;
+#endif
+#ifndef IGB_NO_LRO
+ struct igb_lro_stats lro_stats;
+#endif
+
+ /* structs defined in e1000_hw.h */
+ struct e1000_hw hw;
+ struct e1000_hw_stats stats;
+ struct e1000_phy_info phy_info;
+ struct e1000_phy_stats phy_stats;
+
+#ifdef ETHTOOL_TEST
+ u32 test_icr;
+ struct igb_ring test_tx_ring;
+ struct igb_ring test_rx_ring;
+#endif
+
+ int msg_enable;
+
+ struct igb_q_vector *q_vector[MAX_Q_VECTORS];
+ u32 eims_enable_mask;
+ u32 eims_other;
+
+ /* to not mess up cache alignment, always add to the bottom */
+ u32 *config_space;
+ u16 tx_ring_count;
+ u16 rx_ring_count;
+ struct vf_data_storage *vf_data;
+#ifdef IFLA_VF_MAX
+ int vf_rate_link_speed;
+#endif
+ u32 lli_port;
+ u32 lli_size;
+ unsigned int vfs_allocated_count;
+ /* Malicious Driver Detection flag. Valid only when SR-IOV is enabled */
+ bool mdd;
+ int int_mode;
+ u32 rss_queues;
+ u32 vmdq_pools;
+ char fw_version[32];
+ u32 wvbr;
+ struct igb_mac_addr *mac_table;
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+ struct net_device *vmdq_netdev[IGB_MAX_VMDQ_QUEUES];
+#endif
+ int vferr_refcount;
+ int dmac;
+ u32 *shadow_vfta;
+
+ /* External Thermal Sensor support flag */
+ bool ets;
+#ifdef IGB_HWMON
+ struct hwmon_buff igb_hwmon_buff;
+#else /* IGB_HWMON */
+#ifdef IGB_PROCFS
+ struct proc_dir_entry *eth_dir;
+ struct proc_dir_entry *info_dir;
+ struct proc_dir_entry *therm_dir[E1000_MAX_SENSORS];
+ struct igb_therm_proc_data therm_data[E1000_MAX_SENSORS];
+ bool old_lsc;
+#endif /* IGB_PROCFS */
+#endif /* IGB_HWMON */
+ u32 etrack_id;
+
+#ifdef HAVE_PTP_1588_CLOCK
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_caps;
+ struct delayed_work ptp_overflow_work;
+ struct work_struct ptp_tx_work;
+ struct sk_buff *ptp_tx_skb;
+ unsigned long ptp_tx_start;
+ unsigned long last_rx_ptp_check;
+ spinlock_t tmreg_lock;
+ struct cyclecounter cc;
+ struct timecounter tc;
+ u32 tx_hwtstamp_timeouts;
+ u32 rx_hwtstamp_cleared;
+#endif /* HAVE_PTP_1588_CLOCK */
+
+#ifdef HAVE_I2C_SUPPORT
+ struct i2c_algo_bit_data i2c_algo;
+ struct i2c_adapter i2c_adap;
+ struct i2c_client *i2c_client;
+#endif /* HAVE_I2C_SUPPORT */
+ unsigned long link_check_timeout;
+
+
+ int devrc;
+
+ int copper_tries;
+ u16 eee_advert;
+};
+
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+struct igb_vmdq_adapter {
+#ifdef HAVE_VLAN_RX_REGISTER
+ /* vlgrp must be first member of structure */
+ struct vlan_group *vlgrp;
+#else
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+#endif
+ struct igb_adapter *real_adapter;
+ struct net_device *vnetdev;
+ struct net_device_stats net_stats;
+ struct igb_ring *tx_ring;
+ struct igb_ring *rx_ring;
+};
+#endif
+
+#define IGB_FLAG_HAS_MSI (1 << 0)
+#define IGB_FLAG_DCA_ENABLED (1 << 1)
+#define IGB_FLAG_LLI_PUSH (1 << 2)
+#define IGB_FLAG_QUAD_PORT_A (1 << 3)
+#define IGB_FLAG_QUEUE_PAIRS (1 << 4)
+#define IGB_FLAG_EEE (1 << 5)
+#define IGB_FLAG_DMAC (1 << 6)
+#define IGB_FLAG_DETECT_BAD_DMA (1 << 7)
+#define IGB_FLAG_PTP (1 << 8)
+#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 9)
+#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 10)
+#define IGB_FLAG_WOL_SUPPORTED (1 << 11)
+#define IGB_FLAG_NEED_LINK_UPDATE (1 << 12)
+#define IGB_FLAG_LOOPBACK_ENABLE (1 << 13)
+#define IGB_FLAG_MEDIA_RESET (1 << 14)
+#define IGB_FLAG_MAS_ENABLE (1 << 15)
+
+/* Media Auto Sense */
+#define IGB_MAS_ENABLE_0 0X0001
+#define IGB_MAS_ENABLE_1 0X0002
+#define IGB_MAS_ENABLE_2 0X0004
+#define IGB_MAS_ENABLE_3 0X0008
+
+#define IGB_MIN_TXPBSIZE 20408
+#define IGB_TX_BUF_4096 4096
+
+#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */
+
+/* DMA Coalescing defines */
+#define IGB_DMAC_DISABLE 0
+#define IGB_DMAC_MIN 250
+#define IGB_DMAC_500 500
+#define IGB_DMAC_EN_DEFAULT 1000
+#define IGB_DMAC_2000 2000
+#define IGB_DMAC_3000 3000
+#define IGB_DMAC_4000 4000
+#define IGB_DMAC_5000 5000
+#define IGB_DMAC_6000 6000
+#define IGB_DMAC_7000 7000
+#define IGB_DMAC_8000 8000
+#define IGB_DMAC_9000 9000
+#define IGB_DMAC_MAX 10000
+
+#define IGB_82576_TSYNC_SHIFT 19
+#define IGB_82580_TSYNC_SHIFT 24
+#define IGB_TS_HDR_LEN 16
+
+/* CEM Support */
+#define FW_HDR_LEN 0x4
+#define FW_CMD_DRV_INFO 0xDD
+#define FW_CMD_DRV_INFO_LEN 0x5
+#define FW_CMD_RESERVED 0X0
+#define FW_RESP_SUCCESS 0x1
+#define FW_UNUSED_VER 0x0
+#define FW_MAX_RETRIES 3
+#define FW_STATUS_SUCCESS 0x1
+#define FW_FAMILY_DRV_VER 0Xffffffff
+
+#define IGB_MAX_LINK_TRIES 20
+
+struct e1000_fw_hdr {
+ u8 cmd;
+ u8 buf_len;
+ union
+ {
+ u8 cmd_resv;
+ u8 ret_status;
+ } cmd_or_resp;
+ u8 checksum;
+};
+
+#pragma pack(push,1)
+struct e1000_fw_drv_info {
+ struct e1000_fw_hdr hdr;
+ u8 port_num;
+ u32 drv_version;
+ u16 pad; /* end spacing to ensure length is mult. of dword */
+ u8 pad2; /* end spacing to ensure length is mult. of dword2 */
+};
+#pragma pack(pop)
+
+enum e1000_state_t {
+ __IGB_TESTING,
+ __IGB_RESETTING,
+ __IGB_DOWN
+};
+
+extern char igb_driver_name[];
+extern char igb_driver_version[];
+
+extern int igb_up(struct igb_adapter *);
+extern void igb_down(struct igb_adapter *);
+extern void igb_reinit_locked(struct igb_adapter *);
+extern void igb_reset(struct igb_adapter *);
+extern int igb_set_spd_dplx(struct igb_adapter *, u16);
+extern int igb_setup_tx_resources(struct igb_ring *);
+extern int igb_setup_rx_resources(struct igb_ring *);
+extern void igb_free_tx_resources(struct igb_ring *);
+extern void igb_free_rx_resources(struct igb_ring *);
+extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
+extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
+extern void igb_setup_tctl(struct igb_adapter *);
+extern void igb_setup_rctl(struct igb_adapter *);
+extern netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
+extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
+ struct igb_tx_buffer *);
+extern void igb_alloc_rx_buffers(struct igb_ring *, u16);
+extern void igb_clean_rx_ring(struct igb_ring *);
+extern void igb_update_stats(struct igb_adapter *);
+extern bool igb_has_link(struct igb_adapter *adapter);
+extern void igb_set_ethtool_ops(struct net_device *);
+extern void igb_check_options(struct igb_adapter *);
+extern void igb_power_up_link(struct igb_adapter *);
+#ifdef HAVE_PTP_1588_CLOCK
+extern void igb_ptp_init(struct igb_adapter *adapter);
+extern void igb_ptp_stop(struct igb_adapter *adapter);
+extern void igb_ptp_reset(struct igb_adapter *adapter);
+extern void igb_ptp_tx_work(struct work_struct *work);
+extern void igb_ptp_rx_hang(struct igb_adapter *adapter);
+extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
+extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
+ struct sk_buff *skb);
+extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
+ unsigned char *va,
+ struct sk_buff *skb);
+static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
+ skb_pull(skb, IGB_TS_HDR_LEN);
+#endif
+ return;
+ }
+
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS))
+ igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
+
+ /* Update the last_rx_timestamp timer in order to enable watchdog check
+ * for error case of latched timestamp on a dropped packet.
+ */
+ rx_ring->last_rx_timestamp = jiffies;
+}
+
+extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd);
+#endif /* HAVE_PTP_1588_CLOCK */
+#ifdef ETHTOOL_OPS_COMPAT
+extern int ethtool_ioctl(struct ifreq *);
+#endif
+extern int igb_write_mc_addr_list(struct net_device *netdev);
+extern int igb_add_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue);
+extern int igb_del_mac_filter(struct igb_adapter *adapter, u8* addr, u16 queue);
+extern int igb_available_rars(struct igb_adapter *adapter);
+extern s32 igb_vlvf_set(struct igb_adapter *, u32, bool, u32);
+extern void igb_configure_vt_default_pool(struct igb_adapter *adapter);
+extern void igb_enable_vlan_tags(struct igb_adapter *adapter);
+#ifndef HAVE_VLAN_RX_REGISTER
+extern void igb_vlan_mode(struct net_device *, u32);
+#endif
+
+#define E1000_PCS_CFG_IGN_SD 1
+
+#ifdef IGB_HWMON
+void igb_sysfs_exit(struct igb_adapter *adapter);
+int igb_sysfs_init(struct igb_adapter *adapter);
+#else
+#ifdef IGB_PROCFS
+int igb_procfs_init(struct igb_adapter* adapter);
+void igb_procfs_exit(struct igb_adapter* adapter);
+int igb_procfs_topdir_init(void);
+void igb_procfs_topdir_exit(void);
+#endif /* IGB_PROCFS */
+#endif /* IGB_HWMON */
+
+
+
+#endif /* _IGB_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_debugfs.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_debugfs.c
new file mode 100755
index 00000000..d33c814a
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_debugfs.c
@@ -0,0 +1,29 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "igb.h"
+
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c
new file mode 100755
index 00000000..f3c48b25
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c
@@ -0,0 +1,2859 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool support for igb */
+
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+
+#ifdef SIOCETHTOOL
+#include <linux/ethtool.h>
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif /* CONFIG_PM_RUNTIME */
+#include <linux/highmem.h>
+
+#include "igb.h"
+#include "igb_regtest.h"
+#include <linux/if_vlan.h>
+#ifdef ETHTOOL_GEEE
+#include <linux/mdio.h>
+#endif
+
+#ifdef ETHTOOL_OPS_COMPAT
+#include "kcompat_ethtool.c"
+#endif
+#ifdef ETHTOOL_GSTATS
+struct igb_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define IGB_STAT(_name, _stat) { \
+ .stat_string = _name, \
+ .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \
+ .stat_offset = offsetof(struct igb_adapter, _stat) \
+}
+static const struct igb_stats igb_gstrings_stats[] = {
+ IGB_STAT("rx_packets", stats.gprc),
+ IGB_STAT("tx_packets", stats.gptc),
+ IGB_STAT("rx_bytes", stats.gorc),
+ IGB_STAT("tx_bytes", stats.gotc),
+ IGB_STAT("rx_broadcast", stats.bprc),
+ IGB_STAT("tx_broadcast", stats.bptc),
+ IGB_STAT("rx_multicast", stats.mprc),
+ IGB_STAT("tx_multicast", stats.mptc),
+ IGB_STAT("multicast", stats.mprc),
+ IGB_STAT("collisions", stats.colc),
+ IGB_STAT("rx_crc_errors", stats.crcerrs),
+ IGB_STAT("rx_no_buffer_count", stats.rnbc),
+ IGB_STAT("rx_missed_errors", stats.mpc),
+ IGB_STAT("tx_aborted_errors", stats.ecol),
+ IGB_STAT("tx_carrier_errors", stats.tncrs),
+ IGB_STAT("tx_window_errors", stats.latecol),
+ IGB_STAT("tx_abort_late_coll", stats.latecol),
+ IGB_STAT("tx_deferred_ok", stats.dc),
+ IGB_STAT("tx_single_coll_ok", stats.scc),
+ IGB_STAT("tx_multi_coll_ok", stats.mcc),
+ IGB_STAT("tx_timeout_count", tx_timeout_count),
+ IGB_STAT("rx_long_length_errors", stats.roc),
+ IGB_STAT("rx_short_length_errors", stats.ruc),
+ IGB_STAT("rx_align_errors", stats.algnerrc),
+ IGB_STAT("tx_tcp_seg_good", stats.tsctc),
+ IGB_STAT("tx_tcp_seg_failed", stats.tsctfc),
+ IGB_STAT("rx_flow_control_xon", stats.xonrxc),
+ IGB_STAT("rx_flow_control_xoff", stats.xoffrxc),
+ IGB_STAT("tx_flow_control_xon", stats.xontxc),
+ IGB_STAT("tx_flow_control_xoff", stats.xofftxc),
+ IGB_STAT("rx_long_byte_count", stats.gorc),
+ IGB_STAT("tx_dma_out_of_sync", stats.doosync),
+#ifndef IGB_NO_LRO
+ IGB_STAT("lro_aggregated", lro_stats.coal),
+ IGB_STAT("lro_flushed", lro_stats.flushed),
+#endif /* IGB_LRO */
+ IGB_STAT("tx_smbus", stats.mgptc),
+ IGB_STAT("rx_smbus", stats.mgprc),
+ IGB_STAT("dropped_smbus", stats.mgpdc),
+ IGB_STAT("os2bmc_rx_by_bmc", stats.o2bgptc),
+ IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
+ IGB_STAT("os2bmc_tx_by_host", stats.o2bspc),
+ IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc),
+#ifdef HAVE_PTP_1588_CLOCK
+ IGB_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
+ IGB_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
+#endif /* HAVE_PTP_1588_CLOCK */
+};
+
+#define IGB_NETDEV_STAT(_net_stat) { \
+ .stat_string = #_net_stat, \
+ .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
+ .stat_offset = offsetof(struct net_device_stats, _net_stat) \
+}
+static const struct igb_stats igb_gstrings_net_stats[] = {
+ IGB_NETDEV_STAT(rx_errors),
+ IGB_NETDEV_STAT(tx_errors),
+ IGB_NETDEV_STAT(tx_dropped),
+ IGB_NETDEV_STAT(rx_length_errors),
+ IGB_NETDEV_STAT(rx_over_errors),
+ IGB_NETDEV_STAT(rx_frame_errors),
+ IGB_NETDEV_STAT(rx_fifo_errors),
+ IGB_NETDEV_STAT(tx_fifo_errors),
+ IGB_NETDEV_STAT(tx_heartbeat_errors)
+};
+
+#define IGB_GLOBAL_STATS_LEN ARRAY_SIZE(igb_gstrings_stats)
+#define IGB_NETDEV_STATS_LEN ARRAY_SIZE(igb_gstrings_net_stats)
+#define IGB_RX_QUEUE_STATS_LEN \
+ (sizeof(struct igb_rx_queue_stats) / sizeof(u64))
+#define IGB_TX_QUEUE_STATS_LEN \
+ (sizeof(struct igb_tx_queue_stats) / sizeof(u64))
+#define IGB_QUEUE_STATS_LEN \
+ ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \
+ IGB_RX_QUEUE_STATS_LEN) + \
+ (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \
+ IGB_TX_QUEUE_STATS_LEN))
+#define IGB_STATS_LEN \
+ (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN)
+
+#endif /* ETHTOOL_GSTATS */
+#ifdef ETHTOOL_TEST
+static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register test (offline)", "Eeprom test (offline)",
+ "Interrupt test (offline)", "Loopback test (offline)",
+ "Link test (on/offline)"
+};
+#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN)
+#endif /* ETHTOOL_TEST */
+
+static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 status;
+
+ if (hw->phy.media_type == e1000_media_type_copper) {
+
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full|
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP |
+ SUPPORTED_Pause);
+ ecmd->advertising = ADVERTISED_TP;
+
+ if (hw->mac.autoneg == 1) {
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ /* the e1000 autoneg seems to match ethtool nicely */
+ ecmd->advertising |= hw->phy.autoneg_advertised;
+ }
+
+ ecmd->port = PORT_TP;
+ ecmd->phy_address = hw->phy.addr;
+ ecmd->transceiver = XCVR_INTERNAL;
+
+ } else {
+ ecmd->supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause);
+ if (hw->mac.type == e1000_i354)
+ ecmd->supported |= (SUPPORTED_2500baseX_Full);
+
+ ecmd->advertising = ADVERTISED_FIBRE;
+
+ switch (adapter->link_speed) {
+ case SPEED_2500:
+ ecmd->advertising = ADVERTISED_2500baseX_Full;
+ break;
+ case SPEED_1000:
+ ecmd->advertising = ADVERTISED_1000baseT_Full;
+ break;
+ case SPEED_100:
+ ecmd->advertising = ADVERTISED_100baseT_Full;
+ break;
+ default:
+ break;
+ }
+
+ if (hw->mac.autoneg == 1)
+ ecmd->advertising |= ADVERTISED_Autoneg;
+
+ ecmd->port = PORT_FIBRE;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ }
+
+ if (hw->mac.autoneg != 1)
+ ecmd->advertising &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+
+ if (hw->fc.requested_mode == e1000_fc_full)
+ ecmd->advertising |= ADVERTISED_Pause;
+ else if (hw->fc.requested_mode == e1000_fc_rx_pause)
+ ecmd->advertising |= (ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+ else if (hw->fc.requested_mode == e1000_fc_tx_pause)
+ ecmd->advertising |= ADVERTISED_Asym_Pause;
+ else
+ ecmd->advertising &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+
+ status = E1000_READ_REG(hw, E1000_STATUS);
+
+ if (status & E1000_STATUS_LU) {
+ if ((hw->mac.type == e1000_i354) &&
+ (status & E1000_STATUS_2P5_SKU) &&
+ !(status & E1000_STATUS_2P5_SKU_OVER))
+ ecmd->speed = SPEED_2500;
+ else if (status & E1000_STATUS_SPEED_1000)
+ ecmd->speed = SPEED_1000;
+ else if (status & E1000_STATUS_SPEED_100)
+ ecmd->speed = SPEED_100;
+ else
+ ecmd->speed = SPEED_10;
+
+ if ((status & E1000_STATUS_FD) ||
+ hw->phy.media_type != e1000_media_type_copper)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
+
+ } else {
+ ecmd->speed = -1;
+ ecmd->duplex = -1;
+ }
+
+ if ((hw->phy.media_type == e1000_media_type_fiber) ||
+ hw->mac.autoneg)
+ ecmd->autoneg = AUTONEG_ENABLE;
+ else
+ ecmd->autoneg = AUTONEG_DISABLE;
+#ifdef ETH_TP_MDI_X
+
+ /* MDI-X => 2; MDI =>1; Invalid =>0 */
+ if (hw->phy.media_type == e1000_media_type_copper)
+ ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
+ ETH_TP_MDI;
+ else
+ ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+
+#ifdef ETH_TP_MDI_AUTO
+ if (hw->phy.mdix == AUTO_ALL_MODES)
+ ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ else
+ ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+
+#endif
+#endif /* ETH_TP_MDI_X */
+ return 0;
+}
+
+static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (ecmd->duplex == DUPLEX_HALF) {
+ if (!hw->dev_spec._82575.eee_disable)
+ dev_info(pci_dev_to_dev(adapter->pdev), "EEE disabled: not supported with half duplex\n");
+ hw->dev_spec._82575.eee_disable = true;
+ } else {
+ if (hw->dev_spec._82575.eee_disable)
+ dev_info(pci_dev_to_dev(adapter->pdev), "EEE enabled\n");
+ hw->dev_spec._82575.eee_disable = false;
+ }
+
+ /* When SoL/IDER sessions are active, autoneg/speed/duplex
+ * cannot be changed */
+ if (e1000_check_reset_block(hw)) {
+ dev_err(pci_dev_to_dev(adapter->pdev), "Cannot change link "
+ "characteristics when SoL/IDER is active.\n");
+ return -EINVAL;
+ }
+
+#ifdef ETH_TP_MDI_AUTO
+ /*
+ * MDI setting is only allowed when autoneg enabled because
+ * some hardware doesn't allow MDI setting when speed or
+ * duplex is forced.
+ */
+ if (ecmd->eth_tp_mdix_ctrl) {
+ if (hw->phy.media_type != e1000_media_type_copper)
+ return -EOPNOTSUPP;
+
+ if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+ (ecmd->autoneg != AUTONEG_ENABLE)) {
+ dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
+ return -EINVAL;
+ }
+ }
+
+#endif /* ETH_TP_MDI_AUTO */
+ while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ hw->mac.autoneg = 1;
+ if (hw->phy.media_type == e1000_media_type_fiber) {
+ hw->phy.autoneg_advertised = ecmd->advertising |
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg;
+ switch (adapter->link_speed) {
+ case SPEED_2500:
+ hw->phy.autoneg_advertised =
+ ADVERTISED_2500baseX_Full;
+ break;
+ case SPEED_1000:
+ hw->phy.autoneg_advertised =
+ ADVERTISED_1000baseT_Full;
+ break;
+ case SPEED_100:
+ hw->phy.autoneg_advertised =
+ ADVERTISED_100baseT_Full;
+ break;
+ default:
+ break;
+ }
+ } else {
+ hw->phy.autoneg_advertised = ecmd->advertising |
+ ADVERTISED_TP |
+ ADVERTISED_Autoneg;
+ }
+ ecmd->advertising = hw->phy.autoneg_advertised;
+ if (adapter->fc_autoneg)
+ hw->fc.requested_mode = e1000_fc_default;
+ } else {
+ if (igb_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
+ clear_bit(__IGB_RESETTING, &adapter->state);
+ return -EINVAL;
+ }
+ }
+
+#ifdef ETH_TP_MDI_AUTO
+ /* MDI-X => 2; MDI => 1; Auto => 3 */
+ if (ecmd->eth_tp_mdix_ctrl) {
+ /* fix up the value for auto (3 => 0) as zero is mapped
+ * internally to auto
+ */
+ if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+ hw->phy.mdix = AUTO_ALL_MODES;
+ else
+ hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
+ }
+
+#endif /* ETH_TP_MDI_AUTO */
+ /* reset the link */
+ if (netif_running(adapter->netdev)) {
+ igb_down(adapter);
+ igb_up(adapter);
+ } else
+ igb_reset(adapter);
+
+ clear_bit(__IGB_RESETTING, &adapter->state);
+ return 0;
+}
+
+static u32 igb_get_link(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_mac_info *mac = &adapter->hw.mac;
+
+ /*
+ * If the link is not reported up to netdev, interrupts are disabled,
+ * and so the physical link state may have changed since we last
+ * looked. Set get_link_status to make sure that the true link
+ * state is interrogated, rather than pulling a cached and possibly
+ * stale link state from the driver.
+ */
+ if (!netif_carrier_ok(netdev))
+ mac->get_link_status = 1;
+
+ return igb_has_link(adapter);
+}
+
+static void igb_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ pause->autoneg =
+ (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+ if (hw->fc.current_mode == e1000_fc_rx_pause)
+ pause->rx_pause = 1;
+ else if (hw->fc.current_mode == e1000_fc_tx_pause)
+ pause->tx_pause = 1;
+ else if (hw->fc.current_mode == e1000_fc_full) {
+ pause->rx_pause = 1;
+ pause->tx_pause = 1;
+ }
+}
+
+static int igb_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int retval = 0;
+
+ adapter->fc_autoneg = pause->autoneg;
+
+ while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ if (adapter->fc_autoneg == AUTONEG_ENABLE) {
+ hw->fc.requested_mode = e1000_fc_default;
+ if (netif_running(adapter->netdev)) {
+ igb_down(adapter);
+ igb_up(adapter);
+ } else {
+ igb_reset(adapter);
+ }
+ } else {
+ if (pause->rx_pause && pause->tx_pause)
+ hw->fc.requested_mode = e1000_fc_full;
+ else if (pause->rx_pause && !pause->tx_pause)
+ hw->fc.requested_mode = e1000_fc_rx_pause;
+ else if (!pause->rx_pause && pause->tx_pause)
+ hw->fc.requested_mode = e1000_fc_tx_pause;
+ else if (!pause->rx_pause && !pause->tx_pause)
+ hw->fc.requested_mode = e1000_fc_none;
+
+ hw->fc.current_mode = hw->fc.requested_mode;
+
+ if (hw->phy.media_type == e1000_media_type_fiber) {
+ retval = hw->mac.ops.setup_link(hw);
+ /* implicit goto out */
+ } else {
+ retval = e1000_force_mac_fc(hw);
+ if (retval)
+ goto out;
+ e1000_set_fc_watermarks_generic(hw);
+ }
+ }
+
+out:
+ clear_bit(__IGB_RESETTING, &adapter->state);
+ return retval;
+}
+
+static u32 igb_get_msglevel(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ return adapter->msg_enable;
+}
+
+static void igb_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ adapter->msg_enable = data;
+}
+
+static int igb_get_regs_len(struct net_device *netdev)
+{
+#define IGB_REGS_LEN 555
+ return IGB_REGS_LEN * sizeof(u32);
+}
+
+static void igb_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 *regs_buff = p;
+ u8 i;
+
+ memset(p, 0, IGB_REGS_LEN * sizeof(u32));
+
+ regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
+
+ /* General Registers */
+ regs_buff[0] = E1000_READ_REG(hw, E1000_CTRL);
+ regs_buff[1] = E1000_READ_REG(hw, E1000_STATUS);
+ regs_buff[2] = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ regs_buff[3] = E1000_READ_REG(hw, E1000_MDIC);
+ regs_buff[4] = E1000_READ_REG(hw, E1000_SCTL);
+ regs_buff[5] = E1000_READ_REG(hw, E1000_CONNSW);
+ regs_buff[6] = E1000_READ_REG(hw, E1000_VET);
+ regs_buff[7] = E1000_READ_REG(hw, E1000_LEDCTL);
+ regs_buff[8] = E1000_READ_REG(hw, E1000_PBA);
+ regs_buff[9] = E1000_READ_REG(hw, E1000_PBS);
+ regs_buff[10] = E1000_READ_REG(hw, E1000_FRTIMER);
+ regs_buff[11] = E1000_READ_REG(hw, E1000_TCPTIMER);
+
+ /* NVM Register */
+ regs_buff[12] = E1000_READ_REG(hw, E1000_EECD);
+
+ /* Interrupt */
+ /* Reading EICS for EICR because they read the
+ * same but EICS does not clear on read */
+ regs_buff[13] = E1000_READ_REG(hw, E1000_EICS);
+ regs_buff[14] = E1000_READ_REG(hw, E1000_EICS);
+ regs_buff[15] = E1000_READ_REG(hw, E1000_EIMS);
+ regs_buff[16] = E1000_READ_REG(hw, E1000_EIMC);
+ regs_buff[17] = E1000_READ_REG(hw, E1000_EIAC);
+ regs_buff[18] = E1000_READ_REG(hw, E1000_EIAM);
+ /* Reading ICS for ICR because they read the
+ * same but ICS does not clear on read */
+ regs_buff[19] = E1000_READ_REG(hw, E1000_ICS);
+ regs_buff[20] = E1000_READ_REG(hw, E1000_ICS);
+ regs_buff[21] = E1000_READ_REG(hw, E1000_IMS);
+ regs_buff[22] = E1000_READ_REG(hw, E1000_IMC);
+ regs_buff[23] = E1000_READ_REG(hw, E1000_IAC);
+ regs_buff[24] = E1000_READ_REG(hw, E1000_IAM);
+ regs_buff[25] = E1000_READ_REG(hw, E1000_IMIRVP);
+
+ /* Flow Control */
+ regs_buff[26] = E1000_READ_REG(hw, E1000_FCAL);
+ regs_buff[27] = E1000_READ_REG(hw, E1000_FCAH);
+ regs_buff[28] = E1000_READ_REG(hw, E1000_FCTTV);
+ regs_buff[29] = E1000_READ_REG(hw, E1000_FCRTL);
+ regs_buff[30] = E1000_READ_REG(hw, E1000_FCRTH);
+ regs_buff[31] = E1000_READ_REG(hw, E1000_FCRTV);
+
+ /* Receive */
+ regs_buff[32] = E1000_READ_REG(hw, E1000_RCTL);
+ regs_buff[33] = E1000_READ_REG(hw, E1000_RXCSUM);
+ regs_buff[34] = E1000_READ_REG(hw, E1000_RLPML);
+ regs_buff[35] = E1000_READ_REG(hw, E1000_RFCTL);
+ regs_buff[36] = E1000_READ_REG(hw, E1000_MRQC);
+ regs_buff[37] = E1000_READ_REG(hw, E1000_VT_CTL);
+
+ /* Transmit */
+ regs_buff[38] = E1000_READ_REG(hw, E1000_TCTL);
+ regs_buff[39] = E1000_READ_REG(hw, E1000_TCTL_EXT);
+ regs_buff[40] = E1000_READ_REG(hw, E1000_TIPG);
+ regs_buff[41] = E1000_READ_REG(hw, E1000_DTXCTL);
+
+ /* Wake Up */
+ regs_buff[42] = E1000_READ_REG(hw, E1000_WUC);
+ regs_buff[43] = E1000_READ_REG(hw, E1000_WUFC);
+ regs_buff[44] = E1000_READ_REG(hw, E1000_WUS);
+ regs_buff[45] = E1000_READ_REG(hw, E1000_IPAV);
+ regs_buff[46] = E1000_READ_REG(hw, E1000_WUPL);
+
+ /* MAC */
+ regs_buff[47] = E1000_READ_REG(hw, E1000_PCS_CFG0);
+ regs_buff[48] = E1000_READ_REG(hw, E1000_PCS_LCTL);
+ regs_buff[49] = E1000_READ_REG(hw, E1000_PCS_LSTAT);
+ regs_buff[50] = E1000_READ_REG(hw, E1000_PCS_ANADV);
+ regs_buff[51] = E1000_READ_REG(hw, E1000_PCS_LPAB);
+ regs_buff[52] = E1000_READ_REG(hw, E1000_PCS_NPTX);
+ regs_buff[53] = E1000_READ_REG(hw, E1000_PCS_LPABNP);
+
+ /* Statistics */
+ regs_buff[54] = adapter->stats.crcerrs;
+ regs_buff[55] = adapter->stats.algnerrc;
+ regs_buff[56] = adapter->stats.symerrs;
+ regs_buff[57] = adapter->stats.rxerrc;
+ regs_buff[58] = adapter->stats.mpc;
+ regs_buff[59] = adapter->stats.scc;
+ regs_buff[60] = adapter->stats.ecol;
+ regs_buff[61] = adapter->stats.mcc;
+ regs_buff[62] = adapter->stats.latecol;
+ regs_buff[63] = adapter->stats.colc;
+ regs_buff[64] = adapter->stats.dc;
+ regs_buff[65] = adapter->stats.tncrs;
+ regs_buff[66] = adapter->stats.sec;
+ regs_buff[67] = adapter->stats.htdpmc;
+ regs_buff[68] = adapter->stats.rlec;
+ regs_buff[69] = adapter->stats.xonrxc;
+ regs_buff[70] = adapter->stats.xontxc;
+ regs_buff[71] = adapter->stats.xoffrxc;
+ regs_buff[72] = adapter->stats.xofftxc;
+ regs_buff[73] = adapter->stats.fcruc;
+ regs_buff[74] = adapter->stats.prc64;
+ regs_buff[75] = adapter->stats.prc127;
+ regs_buff[76] = adapter->stats.prc255;
+ regs_buff[77] = adapter->stats.prc511;
+ regs_buff[78] = adapter->stats.prc1023;
+ regs_buff[79] = adapter->stats.prc1522;
+ regs_buff[80] = adapter->stats.gprc;
+ regs_buff[81] = adapter->stats.bprc;
+ regs_buff[82] = adapter->stats.mprc;
+ regs_buff[83] = adapter->stats.gptc;
+ regs_buff[84] = adapter->stats.gorc;
+ regs_buff[86] = adapter->stats.gotc;
+ regs_buff[88] = adapter->stats.rnbc;
+ regs_buff[89] = adapter->stats.ruc;
+ regs_buff[90] = adapter->stats.rfc;
+ regs_buff[91] = adapter->stats.roc;
+ regs_buff[92] = adapter->stats.rjc;
+ regs_buff[93] = adapter->stats.mgprc;
+ regs_buff[94] = adapter->stats.mgpdc;
+ regs_buff[95] = adapter->stats.mgptc;
+ regs_buff[96] = adapter->stats.tor;
+ regs_buff[98] = adapter->stats.tot;
+ regs_buff[100] = adapter->stats.tpr;
+ regs_buff[101] = adapter->stats.tpt;
+ regs_buff[102] = adapter->stats.ptc64;
+ regs_buff[103] = adapter->stats.ptc127;
+ regs_buff[104] = adapter->stats.ptc255;
+ regs_buff[105] = adapter->stats.ptc511;
+ regs_buff[106] = adapter->stats.ptc1023;
+ regs_buff[107] = adapter->stats.ptc1522;
+ regs_buff[108] = adapter->stats.mptc;
+ regs_buff[109] = adapter->stats.bptc;
+ regs_buff[110] = adapter->stats.tsctc;
+ regs_buff[111] = adapter->stats.iac;
+ regs_buff[112] = adapter->stats.rpthc;
+ regs_buff[113] = adapter->stats.hgptc;
+ regs_buff[114] = adapter->stats.hgorc;
+ regs_buff[116] = adapter->stats.hgotc;
+ regs_buff[118] = adapter->stats.lenerrs;
+ regs_buff[119] = adapter->stats.scvpc;
+ regs_buff[120] = adapter->stats.hrmpc;
+
+ for (i = 0; i < 4; i++)
+ regs_buff[121 + i] = E1000_READ_REG(hw, E1000_SRRCTL(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[125 + i] = E1000_READ_REG(hw, E1000_PSRTYPE(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[129 + i] = E1000_READ_REG(hw, E1000_RDBAL(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[133 + i] = E1000_READ_REG(hw, E1000_RDBAH(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[137 + i] = E1000_READ_REG(hw, E1000_RDLEN(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[141 + i] = E1000_READ_REG(hw, E1000_RDH(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[145 + i] = E1000_READ_REG(hw, E1000_RDT(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[149 + i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
+
+ for (i = 0; i < 10; i++)
+ regs_buff[153 + i] = E1000_READ_REG(hw, E1000_EITR(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[163 + i] = E1000_READ_REG(hw, E1000_IMIR(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[171 + i] = E1000_READ_REG(hw, E1000_IMIREXT(i));
+ for (i = 0; i < 16; i++)
+ regs_buff[179 + i] = E1000_READ_REG(hw, E1000_RAL(i));
+ for (i = 0; i < 16; i++)
+ regs_buff[195 + i] = E1000_READ_REG(hw, E1000_RAH(i));
+
+ for (i = 0; i < 4; i++)
+ regs_buff[211 + i] = E1000_READ_REG(hw, E1000_TDBAL(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[215 + i] = E1000_READ_REG(hw, E1000_TDBAH(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[219 + i] = E1000_READ_REG(hw, E1000_TDLEN(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[223 + i] = E1000_READ_REG(hw, E1000_TDH(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[227 + i] = E1000_READ_REG(hw, E1000_TDT(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[231 + i] = E1000_READ_REG(hw, E1000_TXDCTL(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[235 + i] = E1000_READ_REG(hw, E1000_TDWBAL(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[239 + i] = E1000_READ_REG(hw, E1000_TDWBAH(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[243 + i] = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i));
+
+ for (i = 0; i < 4; i++)
+ regs_buff[247 + i] = E1000_READ_REG(hw, E1000_IP4AT_REG(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[251 + i] = E1000_READ_REG(hw, E1000_IP6AT_REG(i));
+ for (i = 0; i < 32; i++)
+ regs_buff[255 + i] = E1000_READ_REG(hw, E1000_WUPM_REG(i));
+ for (i = 0; i < 128; i++)
+ regs_buff[287 + i] = E1000_READ_REG(hw, E1000_FFMT_REG(i));
+ for (i = 0; i < 128; i++)
+ regs_buff[415 + i] = E1000_READ_REG(hw, E1000_FFVT_REG(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[543 + i] = E1000_READ_REG(hw, E1000_FFLT_REG(i));
+
+ regs_buff[547] = E1000_READ_REG(hw, E1000_TDFH);
+ regs_buff[548] = E1000_READ_REG(hw, E1000_TDFT);
+ regs_buff[549] = E1000_READ_REG(hw, E1000_TDFHS);
+ regs_buff[550] = E1000_READ_REG(hw, E1000_TDFPC);
+ if (hw->mac.type > e1000_82580) {
+ regs_buff[551] = adapter->stats.o2bgptc;
+ regs_buff[552] = adapter->stats.b2ospc;
+ regs_buff[553] = adapter->stats.o2bspc;
+ regs_buff[554] = adapter->stats.b2ogprc;
+ }
+}
+
+static int igb_get_eeprom_len(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ return adapter->hw.nvm.word_size * 2;
+}
+
+static int igb_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 *eeprom_buff;
+ int first_word, last_word;
+ int ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+
+ eeprom_buff = kmalloc(sizeof(u16) *
+ (last_word - first_word + 1), GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ if (hw->nvm.type == e1000_nvm_eeprom_spi)
+ ret_val = e1000_read_nvm(hw, first_word,
+ last_word - first_word + 1,
+ eeprom_buff);
+ else {
+ for (i = 0; i < last_word - first_word + 1; i++) {
+ ret_val = e1000_read_nvm(hw, first_word + i, 1,
+ &eeprom_buff[i]);
+ if (ret_val)
+ break;
+ }
+ }
+
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ eeprom_buff[i] = le16_to_cpu(eeprom_buff[i]);
+
+ memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
+ eeprom->len);
+ kfree(eeprom_buff);
+
+ return ret_val;
+}
+
+static int igb_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 *eeprom_buff;
+ void *ptr;
+ int max_len, first_word, last_word, ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EOPNOTSUPP;
+
+ if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
+ return -EFAULT;
+
+ max_len = hw->nvm.word_size * 2;
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ ptr = (void *)eeprom_buff;
+
+ if (eeprom->offset & 1) {
+ /* need read/modify/write of first changed EEPROM word */
+ /* only the second byte of the word is being modified */
+ ret_val = e1000_read_nvm(hw, first_word, 1,
+ &eeprom_buff[0]);
+ ptr++;
+ }
+ if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
+ /* need read/modify/write of last changed EEPROM word */
+ /* only the first byte of the word is being modified */
+ ret_val = e1000_read_nvm(hw, last_word, 1,
+ &eeprom_buff[last_word - first_word]);
+ }
+
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(ptr, bytes, eeprom->len);
+
+ for (i = 0; i < last_word - first_word + 1; i++)
+ cpu_to_le16s(&eeprom_buff[i]);
+
+ ret_val = e1000_write_nvm(hw, first_word,
+ last_word - first_word + 1, eeprom_buff);
+
+ /* Update the checksum if write succeeded.
+ * and flush shadow RAM for 82573 controllers */
+ if (ret_val == 0)
+ e1000_update_nvm_checksum(hw);
+
+ kfree(eeprom_buff);
+ return ret_val;
+}
+
+static void igb_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1);
+ strncpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version) - 1);
+
+ strncpy(drvinfo->fw_version, adapter->fw_version,
+ sizeof(drvinfo->fw_version) - 1);
+ strncpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info) -1);
+ drvinfo->n_stats = IGB_STATS_LEN;
+ drvinfo->testinfo_len = IGB_TEST_LEN;
+ drvinfo->regdump_len = igb_get_regs_len(netdev);
+ drvinfo->eedump_len = igb_get_eeprom_len(netdev);
+}
+
+static void igb_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ ring->rx_max_pending = IGB_MAX_RXD;
+ ring->tx_max_pending = IGB_MAX_TXD;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = adapter->rx_ring_count;
+ ring->tx_pending = adapter->tx_ring_count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static int igb_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct igb_ring *temp_ring;
+ int i, err = 0;
+ u16 new_rx_count, new_tx_count;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ new_rx_count = min(ring->rx_pending, (u32)IGB_MAX_RXD);
+ new_rx_count = max(new_rx_count, (u16)IGB_MIN_RXD);
+ new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ new_tx_count = min(ring->tx_pending, (u32)IGB_MAX_TXD);
+ new_tx_count = max(new_tx_count, (u16)IGB_MIN_TXD);
+ new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
+
+ if ((new_tx_count == adapter->tx_ring_count) &&
+ (new_rx_count == adapter->rx_ring_count)) {
+ /* nothing to do */
+ return 0;
+ }
+
+ while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ if (!netif_running(adapter->netdev)) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_ring[i]->count = new_tx_count;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i]->count = new_rx_count;
+ adapter->tx_ring_count = new_tx_count;
+ adapter->rx_ring_count = new_rx_count;
+ goto clear_reset;
+ }
+
+ if (adapter->num_tx_queues > adapter->num_rx_queues)
+ temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring));
+ else
+ temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring));
+
+ if (!temp_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
+ }
+
+ igb_down(adapter);
+
+ /*
+ * We can't just free everything and then setup again,
+ * because the ISRs in MSI-X mode get passed pointers
+ * to the tx and rx ring structs.
+ */
+ if (new_tx_count != adapter->tx_ring_count) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ memcpy(&temp_ring[i], adapter->tx_ring[i],
+ sizeof(struct igb_ring));
+
+ temp_ring[i].count = new_tx_count;
+ err = igb_setup_tx_resources(&temp_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ igb_free_tx_resources(&temp_ring[i]);
+ }
+ goto err_setup;
+ }
+ }
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ igb_free_tx_resources(adapter->tx_ring[i]);
+
+ memcpy(adapter->tx_ring[i], &temp_ring[i],
+ sizeof(struct igb_ring));
+ }
+
+ adapter->tx_ring_count = new_tx_count;
+ }
+
+ if (new_rx_count != adapter->rx_ring_count) {
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ memcpy(&temp_ring[i], adapter->rx_ring[i],
+ sizeof(struct igb_ring));
+
+ temp_ring[i].count = new_rx_count;
+ err = igb_setup_rx_resources(&temp_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ igb_free_rx_resources(&temp_ring[i]);
+ }
+ goto err_setup;
+ }
+
+ }
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ igb_free_rx_resources(adapter->rx_ring[i]);
+
+ memcpy(adapter->rx_ring[i], &temp_ring[i],
+ sizeof(struct igb_ring));
+ }
+
+ adapter->rx_ring_count = new_rx_count;
+ }
+err_setup:
+ igb_up(adapter);
+ vfree(temp_ring);
+clear_reset:
+ clear_bit(__IGB_RESETTING, &adapter->state);
+ return err;
+}
+static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
+ int reg, u32 mask, u32 write)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 pat, val;
+ static const u32 _test[] =
+ {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
+ E1000_WRITE_REG(hw, reg, (_test[pat] & write));
+ val = E1000_READ_REG(hw, reg) & mask;
+ if (val != (_test[pat] & write & mask)) {
+ dev_err(pci_dev_to_dev(adapter->pdev), "pattern test reg %04X "
+ "failed: got 0x%08X expected 0x%08X\n",
+ E1000_REGISTER(hw, reg), val, (_test[pat] & write & mask));
+ *data = E1000_REGISTER(hw, reg);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
+ int reg, u32 mask, u32 write)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 val;
+ E1000_WRITE_REG(hw, reg, write & mask);
+ val = E1000_READ_REG(hw, reg);
+ if ((write & mask) != (val & mask)) {
+ dev_err(pci_dev_to_dev(adapter->pdev), "set/check reg %04X test failed:"
+ " got 0x%08X expected 0x%08X\n", reg,
+ (val & mask), (write & mask));
+ *data = E1000_REGISTER(hw, reg);
+ return 1;
+ }
+
+ return 0;
+}
+
+#define REG_PATTERN_TEST(reg, mask, write) \
+ do { \
+ if (reg_pattern_test(adapter, data, reg, mask, write)) \
+ return 1; \
+ } while (0)
+
+#define REG_SET_AND_CHECK(reg, mask, write) \
+ do { \
+ if (reg_set_and_check(adapter, data, reg, mask, write)) \
+ return 1; \
+ } while (0)
+
+static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct igb_reg_test *test;
+ u32 value, before, after;
+ u32 i, toggle;
+
+ switch (adapter->hw.mac.type) {
+ case e1000_i350:
+ case e1000_i354:
+ test = reg_test_i350;
+ toggle = 0x7FEFF3FF;
+ break;
+ case e1000_i210:
+ case e1000_i211:
+ test = reg_test_i210;
+ toggle = 0x7FEFF3FF;
+ break;
+ case e1000_82580:
+ test = reg_test_82580;
+ toggle = 0x7FEFF3FF;
+ break;
+ case e1000_82576:
+ test = reg_test_82576;
+ toggle = 0x7FFFF3FF;
+ break;
+ default:
+ test = reg_test_82575;
+ toggle = 0x7FFFF3FF;
+ break;
+ }
+
+ /* Because the status register is such a special case,
+ * we handle it separately from the rest of the register
+ * tests. Some bits are read-only, some toggle, and some
+ * are writable on newer MACs.
+ */
+ before = E1000_READ_REG(hw, E1000_STATUS);
+ value = (E1000_READ_REG(hw, E1000_STATUS) & toggle);
+ E1000_WRITE_REG(hw, E1000_STATUS, toggle);
+ after = E1000_READ_REG(hw, E1000_STATUS) & toggle;
+ if (value != after) {
+ dev_err(pci_dev_to_dev(adapter->pdev), "failed STATUS register test "
+ "got: 0x%08X expected: 0x%08X\n", after, value);
+ *data = 1;
+ return 1;
+ }
+ /* restore previous status */
+ E1000_WRITE_REG(hw, E1000_STATUS, before);
+
+ /* Perform the remainder of the register test, looping through
+ * the test table until we either fail or reach the null entry.
+ */
+ while (test->reg) {
+ for (i = 0; i < test->array_len; i++) {
+ switch (test->test_type) {
+ case PATTERN_TEST:
+ REG_PATTERN_TEST(test->reg +
+ (i * test->reg_offset),
+ test->mask,
+ test->write);
+ break;
+ case SET_READ_TEST:
+ REG_SET_AND_CHECK(test->reg +
+ (i * test->reg_offset),
+ test->mask,
+ test->write);
+ break;
+ case WRITE_NO_TEST:
+ writel(test->write,
+ (adapter->hw.hw_addr + test->reg)
+ + (i * test->reg_offset));
+ break;
+ case TABLE32_TEST:
+ REG_PATTERN_TEST(test->reg + (i * 4),
+ test->mask,
+ test->write);
+ break;
+ case TABLE64_TEST_LO:
+ REG_PATTERN_TEST(test->reg + (i * 8),
+ test->mask,
+ test->write);
+ break;
+ case TABLE64_TEST_HI:
+ REG_PATTERN_TEST((test->reg + 4) + (i * 8),
+ test->mask,
+ test->write);
+ break;
+ }
+ }
+ test++;
+ }
+
+ *data = 0;
+ return 0;
+}
+
+static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
+{
+ *data = 0;
+
+ /* Validate NVM checksum */
+ if (e1000_validate_nvm_checksum(&adapter->hw) < 0)
+ *data = 2;
+
+ return *data;
+}
+
+static irqreturn_t igb_test_intr(int irq, void *data)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *) data;
+ struct e1000_hw *hw = &adapter->hw;
+
+ adapter->test_icr |= E1000_READ_REG(hw, E1000_ICR);
+
+ return IRQ_HANDLED;
+}
+
+static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ u32 mask, ics_mask, i = 0, shared_int = TRUE;
+ u32 irq = adapter->pdev->irq;
+
+ *data = 0;
+
+ /* Hook up test interrupt handler just for this test */
+ if (adapter->msix_entries) {
+ if (request_irq(adapter->msix_entries[0].vector,
+ &igb_test_intr, 0, netdev->name, adapter)) {
+ *data = 1;
+ return -1;
+ }
+ } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
+ shared_int = FALSE;
+ if (request_irq(irq,
+ igb_test_intr, 0, netdev->name, adapter)) {
+ *data = 1;
+ return -1;
+ }
+ } else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED,
+ netdev->name, adapter)) {
+ shared_int = FALSE;
+ } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED,
+ netdev->name, adapter)) {
+ *data = 1;
+ return -1;
+ }
+ dev_info(pci_dev_to_dev(adapter->pdev), "testing %s interrupt\n",
+ (shared_int ? "shared" : "unshared"));
+
+ /* Disable all the interrupts */
+ E1000_WRITE_REG(hw, E1000_IMC, ~0);
+ E1000_WRITE_FLUSH(hw);
+ usleep_range(10000, 20000);
+
+ /* Define all writable bits for ICS */
+ switch (hw->mac.type) {
+ case e1000_82575:
+ ics_mask = 0x37F47EDD;
+ break;
+ case e1000_82576:
+ ics_mask = 0x77D4FBFD;
+ break;
+ case e1000_82580:
+ ics_mask = 0x77DCFED5;
+ break;
+ case e1000_i350:
+ case e1000_i354:
+ ics_mask = 0x77DCFED5;
+ break;
+ case e1000_i210:
+ case e1000_i211:
+ ics_mask = 0x774CFED5;
+ break;
+ default:
+ ics_mask = 0x7FFFFFFF;
+ break;
+ }
+
+ /* Test each interrupt */
+ for (; i < 31; i++) {
+ /* Interrupt to test */
+ mask = 1 << i;
+
+ if (!(mask & ics_mask))
+ continue;
+
+ if (!shared_int) {
+ /* Disable the interrupt to be reported in
+ * the cause register and then force the same
+ * interrupt and see if one gets posted. If
+ * an interrupt was posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+
+ /* Flush any pending interrupts */
+ E1000_WRITE_REG(hw, E1000_ICR, ~0);
+
+ E1000_WRITE_REG(hw, E1000_IMC, mask);
+ E1000_WRITE_REG(hw, E1000_ICS, mask);
+ E1000_WRITE_FLUSH(hw);
+ usleep_range(10000, 20000);
+
+ if (adapter->test_icr & mask) {
+ *data = 3;
+ break;
+ }
+ }
+
+ /* Enable the interrupt to be reported in
+ * the cause register and then force the same
+ * interrupt and see if one gets posted. If
+ * an interrupt was not posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+
+ /* Flush any pending interrupts */
+ E1000_WRITE_REG(hw, E1000_ICR, ~0);
+
+ E1000_WRITE_REG(hw, E1000_IMS, mask);
+ E1000_WRITE_REG(hw, E1000_ICS, mask);
+ E1000_WRITE_FLUSH(hw);
+ usleep_range(10000, 20000);
+
+ if (!(adapter->test_icr & mask)) {
+ *data = 4;
+ break;
+ }
+
+ if (!shared_int) {
+ /* Disable the other interrupts to be reported in
+ * the cause register and then force the other
+ * interrupts and see if any get posted. If
+ * an interrupt was posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+
+ /* Flush any pending interrupts */
+ E1000_WRITE_REG(hw, E1000_ICR, ~0);
+
+ E1000_WRITE_REG(hw, E1000_IMC, ~mask);
+ E1000_WRITE_REG(hw, E1000_ICS, ~mask);
+ E1000_WRITE_FLUSH(hw);
+ usleep_range(10000, 20000);
+
+ if (adapter->test_icr & mask) {
+ *data = 5;
+ break;
+ }
+ }
+ }
+
+ /* Disable all the interrupts */
+ E1000_WRITE_REG(hw, E1000_IMC, ~0);
+ E1000_WRITE_FLUSH(hw);
+ usleep_range(10000, 20000);
+
+ /* Unhook test interrupt handler */
+ if (adapter->msix_entries)
+ free_irq(adapter->msix_entries[0].vector, adapter);
+ else
+ free_irq(irq, adapter);
+
+ return *data;
+}
+
+static void igb_free_desc_rings(struct igb_adapter *adapter)
+{
+ igb_free_tx_resources(&adapter->test_tx_ring);
+ igb_free_rx_resources(&adapter->test_rx_ring);
+}
+
+static int igb_setup_desc_rings(struct igb_adapter *adapter)
+{
+ struct igb_ring *tx_ring = &adapter->test_tx_ring;
+ struct igb_ring *rx_ring = &adapter->test_rx_ring;
+ struct e1000_hw *hw = &adapter->hw;
+ int ret_val;
+
+ /* Setup Tx descriptor ring and Tx buffers */
+ tx_ring->count = IGB_DEFAULT_TXD;
+ tx_ring->dev = pci_dev_to_dev(adapter->pdev);
+ tx_ring->netdev = adapter->netdev;
+ tx_ring->reg_idx = adapter->vfs_allocated_count;
+
+ if (igb_setup_tx_resources(tx_ring)) {
+ ret_val = 1;
+ goto err_nomem;
+ }
+
+ igb_setup_tctl(adapter);
+ igb_configure_tx_ring(adapter, tx_ring);
+
+ /* Setup Rx descriptor ring and Rx buffers */
+ rx_ring->count = IGB_DEFAULT_RXD;
+ rx_ring->dev = pci_dev_to_dev(adapter->pdev);
+ rx_ring->netdev = adapter->netdev;
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ rx_ring->rx_buffer_len = IGB_RX_HDR_LEN;
+#endif
+ rx_ring->reg_idx = adapter->vfs_allocated_count;
+
+ if (igb_setup_rx_resources(rx_ring)) {
+ ret_val = 2;
+ goto err_nomem;
+ }
+
+ /* set the default queue to queue 0 of PF */
+ E1000_WRITE_REG(hw, E1000_MRQC, adapter->vfs_allocated_count << 3);
+
+ /* enable receive ring */
+ igb_setup_rctl(adapter);
+ igb_configure_rx_ring(adapter, rx_ring);
+
+ igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring));
+
+ return 0;
+
+err_nomem:
+ igb_free_desc_rings(adapter);
+ return ret_val;
+}
+
+static void igb_phy_disable_receiver(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* Write out to PHY registers 29 and 30 to disable the Receiver. */
+ e1000_write_phy_reg(hw, 29, 0x001F);
+ e1000_write_phy_reg(hw, 30, 0x8FFC);
+ e1000_write_phy_reg(hw, 29, 0x001A);
+ e1000_write_phy_reg(hw, 30, 0x8FF0);
+}
+
+static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_reg = 0;
+
+ hw->mac.autoneg = FALSE;
+
+ if (hw->phy.type == e1000_phy_m88) {
+ if (hw->phy.id != I210_I_PHY_ID) {
+ /* Auto-MDI/MDIX Off */
+ e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
+ /* reset to update Auto-MDI/MDIX */
+ e1000_write_phy_reg(hw, PHY_CONTROL, 0x9140);
+ /* autoneg off */
+ e1000_write_phy_reg(hw, PHY_CONTROL, 0x8140);
+ } else {
+ /* force 1000, set loopback */
+ e1000_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
+ e1000_write_phy_reg(hw, PHY_CONTROL, 0x4140);
+ }
+ } else {
+ /* enable MII loopback */
+ if (hw->phy.type == e1000_phy_82580)
+ e1000_write_phy_reg(hw, I82577_PHY_LBK_CTRL, 0x8041);
+ }
+
+ /* force 1000, set loopback */
+ e1000_write_phy_reg(hw, PHY_CONTROL, 0x4140);
+
+ /* Now set up the MAC to the same speed/duplex as the PHY. */
+ ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+ ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+ E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
+ E1000_CTRL_FD | /* Force Duplex to FULL */
+ E1000_CTRL_SLU); /* Set link up enable bit */
+
+ if (hw->phy.type == e1000_phy_m88)
+ ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
+
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
+
+ /* Disable the receiver on the PHY so when a cable is plugged in, the
+ * PHY does not begin to autoneg when a cable is reconnected to the NIC.
+ */
+ if (hw->phy.type == e1000_phy_m88)
+ igb_phy_disable_receiver(adapter);
+
+ mdelay(500);
+ return 0;
+}
+
+static int igb_set_phy_loopback(struct igb_adapter *adapter)
+{
+ return igb_integrated_phy_loopback(adapter);
+}
+
+static int igb_setup_loopback_test(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 reg;
+
+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+
+ /* use CTRL_EXT to identify link type as SGMII can appear as copper */
+ if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) {
+ if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
+
+ /* Enable DH89xxCC MPHY for near end loopback */
+ reg = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTL);
+ reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) |
+ E1000_MPHY_PCS_CLK_REG_OFFSET;
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTL, reg);
+
+ reg = E1000_READ_REG(hw, E1000_MPHY_DATA);
+ reg |= E1000_MPHY_PCS_CLK_REG_DIGINELBEN;
+ E1000_WRITE_REG(hw, E1000_MPHY_DATA, reg);
+ }
+
+ reg = E1000_READ_REG(hw, E1000_RCTL);
+ reg |= E1000_RCTL_LBM_TCVR;
+ E1000_WRITE_REG(hw, E1000_RCTL, reg);
+
+ E1000_WRITE_REG(hw, E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK);
+
+ reg = E1000_READ_REG(hw, E1000_CTRL);
+ reg &= ~(E1000_CTRL_RFCE |
+ E1000_CTRL_TFCE |
+ E1000_CTRL_LRST);
+ reg |= E1000_CTRL_SLU |
+ E1000_CTRL_FD;
+ E1000_WRITE_REG(hw, E1000_CTRL, reg);
+
+ /* Unset switch control to serdes energy detect */
+ reg = E1000_READ_REG(hw, E1000_CONNSW);
+ reg &= ~E1000_CONNSW_ENRGSRC;
+ E1000_WRITE_REG(hw, E1000_CONNSW, reg);
+
+ /* Unset sigdetect for SERDES loopback on
+ * 82580 and newer devices
+ */
+ if (hw->mac.type >= e1000_82580) {
+ reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
+ reg |= E1000_PCS_CFG_IGN_SD;
+ E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
+ }
+
+ /* Set PCS register for forced speed */
+ reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
+ reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/
+ reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */
+ E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */
+ E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */
+ E1000_PCS_LCTL_FSD | /* Force Speed */
+ E1000_PCS_LCTL_FORCE_LINK; /* Force Link */
+ E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
+
+ return 0;
+ }
+
+ return igb_set_phy_loopback(adapter);
+}
+
+static void igb_loopback_cleanup(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+ u16 phy_reg;
+
+ if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
+ u32 reg;
+
+ /* Disable near end loopback on DH89xxCC */
+ reg = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTL);
+ reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK ) |
+ E1000_MPHY_PCS_CLK_REG_OFFSET;
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTL, reg);
+
+ reg = E1000_READ_REG(hw, E1000_MPHY_DATA);
+ reg &= ~E1000_MPHY_PCS_CLK_REG_DIGINELBEN;
+ E1000_WRITE_REG(hw, E1000_MPHY_DATA, reg);
+ }
+
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+
+ hw->mac.autoneg = TRUE;
+ e1000_read_phy_reg(hw, PHY_CONTROL, &phy_reg);
+ if (phy_reg & MII_CR_LOOPBACK) {
+ phy_reg &= ~MII_CR_LOOPBACK;
+ if (hw->phy.type == I210_I_PHY_ID)
+ e1000_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
+ e1000_write_phy_reg(hw, PHY_CONTROL, phy_reg);
+ e1000_phy_commit(hw);
+ }
+}
+static void igb_create_lbtest_frame(struct sk_buff *skb,
+ unsigned int frame_size)
+{
+ memset(skb->data, 0xFF, frame_size);
+ frame_size /= 2;
+ memset(&skb->data[frame_size], 0xAA, frame_size - 1);
+ memset(&skb->data[frame_size + 10], 0xBE, 1);
+ memset(&skb->data[frame_size + 12], 0xAF, 1);
+}
+
+static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer,
+ unsigned int frame_size)
+{
+ unsigned char *data;
+ bool match = true;
+
+ frame_size >>= 1;
+
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ data = rx_buffer->skb->data;
+#else
+ data = kmap(rx_buffer->page);
+#endif
+
+ if (data[3] != 0xFF ||
+ data[frame_size + 10] != 0xBE ||
+ data[frame_size + 12] != 0xAF)
+ match = false;
+
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ kunmap(rx_buffer->page);
+
+#endif
+ return match;
+}
+
+static u16 igb_clean_test_rings(struct igb_ring *rx_ring,
+ struct igb_ring *tx_ring,
+ unsigned int size)
+{
+ union e1000_adv_rx_desc *rx_desc;
+ struct igb_rx_buffer *rx_buffer_info;
+ struct igb_tx_buffer *tx_buffer_info;
+ u16 rx_ntc, tx_ntc, count = 0;
+
+ /* initialize next to clean and descriptor values */
+ rx_ntc = rx_ring->next_to_clean;
+ tx_ntc = tx_ring->next_to_clean;
+ rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
+
+ while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
+ /* check rx buffer */
+ rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
+
+ /* sync Rx buffer for CPU read */
+ dma_sync_single_for_cpu(rx_ring->dev,
+ rx_buffer_info->dma,
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ IGB_RX_HDR_LEN,
+#else
+ IGB_RX_BUFSZ,
+#endif
+ DMA_FROM_DEVICE);
+
+ /* verify contents of skb */
+ if (igb_check_lbtest_frame(rx_buffer_info, size))
+ count++;
+
+ /* sync Rx buffer for device write */
+ dma_sync_single_for_device(rx_ring->dev,
+ rx_buffer_info->dma,
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ IGB_RX_HDR_LEN,
+#else
+ IGB_RX_BUFSZ,
+#endif
+ DMA_FROM_DEVICE);
+
+ /* unmap buffer on tx side */
+ tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
+ igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
+
+ /* increment rx/tx next to clean counters */
+ rx_ntc++;
+ if (rx_ntc == rx_ring->count)
+ rx_ntc = 0;
+ tx_ntc++;
+ if (tx_ntc == tx_ring->count)
+ tx_ntc = 0;
+
+ /* fetch next descriptor */
+ rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
+ }
+
+ /* re-map buffers to ring, store next to clean values */
+ igb_alloc_rx_buffers(rx_ring, count);
+ rx_ring->next_to_clean = rx_ntc;
+ tx_ring->next_to_clean = tx_ntc;
+
+ return count;
+}
+
+static int igb_run_loopback_test(struct igb_adapter *adapter)
+{
+ struct igb_ring *tx_ring = &adapter->test_tx_ring;
+ struct igb_ring *rx_ring = &adapter->test_rx_ring;
+ u16 i, j, lc, good_cnt;
+ int ret_val = 0;
+ unsigned int size = IGB_RX_HDR_LEN;
+ netdev_tx_t tx_ret_val;
+ struct sk_buff *skb;
+
+ /* allocate test skb */
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (!skb)
+ return 11;
+
+ /* place data into test skb */
+ igb_create_lbtest_frame(skb, size);
+ skb_put(skb, size);
+
+ /*
+ * Calculate the loop count based on the largest descriptor ring
+ * The idea is to wrap the largest ring a number of times using 64
+ * send/receive pairs during each loop
+ */
+
+ if (rx_ring->count <= tx_ring->count)
+ lc = ((tx_ring->count / 64) * 2) + 1;
+ else
+ lc = ((rx_ring->count / 64) * 2) + 1;
+
+ for (j = 0; j <= lc; j++) { /* loop count loop */
+ /* reset count of good packets */
+ good_cnt = 0;
+
+ /* place 64 packets on the transmit queue*/
+ for (i = 0; i < 64; i++) {
+ skb_get(skb);
+ tx_ret_val = igb_xmit_frame_ring(skb, tx_ring);
+ if (tx_ret_val == NETDEV_TX_OK)
+ good_cnt++;
+ }
+
+ if (good_cnt != 64) {
+ ret_val = 12;
+ break;
+ }
+
+ /* allow 200 milliseconds for packets to go from tx to rx */
+ msleep(200);
+
+ good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size);
+ if (good_cnt != 64) {
+ ret_val = 13;
+ break;
+ }
+ } /* end loop count loop */
+
+ /* free the original skb */
+ kfree_skb(skb);
+
+ return ret_val;
+}
+
+static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
+{
+ /* PHY loopback cannot be performed if SoL/IDER
+ * sessions are active */
+ if (e1000_check_reset_block(&adapter->hw)) {
+ dev_err(pci_dev_to_dev(adapter->pdev),
+ "Cannot do PHY loopback test "
+ "when SoL/IDER is active.\n");
+ *data = 0;
+ goto out;
+ }
+ if (adapter->hw.mac.type == e1000_i354) {
+ dev_info(&adapter->pdev->dev,
+ "Loopback test not supported on i354.\n");
+ *data = 0;
+ goto out;
+ }
+ *data = igb_setup_desc_rings(adapter);
+ if (*data)
+ goto out;
+ *data = igb_setup_loopback_test(adapter);
+ if (*data)
+ goto err_loopback;
+ *data = igb_run_loopback_test(adapter);
+
+ igb_loopback_cleanup(adapter);
+
+err_loopback:
+ igb_free_desc_rings(adapter);
+out:
+ return *data;
+}
+
+static int igb_link_test(struct igb_adapter *adapter, u64 *data)
+{
+ u32 link;
+ int i, time;
+
+ *data = 0;
+ time = 0;
+ if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
+ int i = 0;
+ adapter->hw.mac.serdes_has_link = FALSE;
+
+ /* On some blade server designs, link establishment
+ * could take as long as 2-3 minutes */
+ do {
+ e1000_check_for_link(&adapter->hw);
+ if (adapter->hw.mac.serdes_has_link)
+ goto out;
+ msleep(20);
+ } while (i++ < 3750);
+
+ *data = 1;
+ } else {
+ for (i=0; i < IGB_MAX_LINK_TRIES; i++) {
+ link = igb_has_link(adapter);
+ if (link)
+ goto out;
+ else {
+ time++;
+ msleep(1000);
+ }
+ }
+ if (!link)
+ *data = 1;
+ }
+ out:
+ return *data;
+}
+
+static void igb_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ u16 autoneg_advertised;
+ u8 forced_speed_duplex, autoneg;
+ bool if_running = netif_running(netdev);
+
+ set_bit(__IGB_TESTING, &adapter->state);
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ /* Offline tests */
+
+ /* save speed, duplex, autoneg settings */
+ autoneg_advertised = adapter->hw.phy.autoneg_advertised;
+ forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
+ autoneg = adapter->hw.mac.autoneg;
+
+ dev_info(pci_dev_to_dev(adapter->pdev), "offline testing starting\n");
+
+ /* power up link for link test */
+ igb_power_up_link(adapter);
+
+ /* Link test performed before hardware reset so autoneg doesn't
+ * interfere with test result */
+ if (igb_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ if (if_running)
+ /* indicate we're in test mode */
+ dev_close(netdev);
+ else
+ igb_reset(adapter);
+
+ if (igb_reg_test(adapter, &data[0]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ igb_reset(adapter);
+ if (igb_eeprom_test(adapter, &data[1]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ igb_reset(adapter);
+ if (igb_intr_test(adapter, &data[2]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ igb_reset(adapter);
+
+ /* power up link for loopback test */
+ igb_power_up_link(adapter);
+
+ if (igb_loopback_test(adapter, &data[3]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* restore speed, duplex, autoneg settings */
+ adapter->hw.phy.autoneg_advertised = autoneg_advertised;
+ adapter->hw.mac.forced_speed_duplex = forced_speed_duplex;
+ adapter->hw.mac.autoneg = autoneg;
+
+ /* force this routine to wait until autoneg complete/timeout */
+ adapter->hw.phy.autoneg_wait_to_complete = TRUE;
+ igb_reset(adapter);
+ adapter->hw.phy.autoneg_wait_to_complete = FALSE;
+
+ clear_bit(__IGB_TESTING, &adapter->state);
+ if (if_running)
+ dev_open(netdev);
+ } else {
+ dev_info(pci_dev_to_dev(adapter->pdev), "online testing starting\n");
+
+ /* PHY is powered down when interface is down */
+ if (if_running && igb_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ else
+ data[4] = 0;
+
+ /* Online tests aren't run; pass by default */
+ data[0] = 0;
+ data[1] = 0;
+ data[2] = 0;
+ data[3] = 0;
+
+ clear_bit(__IGB_TESTING, &adapter->state);
+ }
+ msleep_interruptible(4 * 1000);
+}
+
+static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ wol->supported = WAKE_UCAST | WAKE_MCAST |
+ WAKE_BCAST | WAKE_MAGIC |
+ WAKE_PHY;
+ wol->wolopts = 0;
+
+ if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
+ return;
+
+ /* apply any specific unsupported masks here */
+ switch (adapter->hw.device_id) {
+ default:
+ break;
+ }
+
+ if (adapter->wol & E1000_WUFC_EX)
+ wol->wolopts |= WAKE_UCAST;
+ if (adapter->wol & E1000_WUFC_MC)
+ wol->wolopts |= WAKE_MCAST;
+ if (adapter->wol & E1000_WUFC_BC)
+ wol->wolopts |= WAKE_BCAST;
+ if (adapter->wol & E1000_WUFC_MAG)
+ wol->wolopts |= WAKE_MAGIC;
+ if (adapter->wol & E1000_WUFC_LNKC)
+ wol->wolopts |= WAKE_PHY;
+}
+
+static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
+ return -EOPNOTSUPP;
+
+ if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
+ return wol->wolopts ? -EOPNOTSUPP : 0;
+
+ /* these settings will always override what we currently have */
+ adapter->wol = 0;
+
+ if (wol->wolopts & WAKE_UCAST)
+ adapter->wol |= E1000_WUFC_EX;
+ if (wol->wolopts & WAKE_MCAST)
+ adapter->wol |= E1000_WUFC_MC;
+ if (wol->wolopts & WAKE_BCAST)
+ adapter->wol |= E1000_WUFC_BC;
+ if (wol->wolopts & WAKE_MAGIC)
+ adapter->wol |= E1000_WUFC_MAG;
+ if (wol->wolopts & WAKE_PHY)
+ adapter->wol |= E1000_WUFC_LNKC;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ return 0;
+}
+
+/* bit defines for adapter->led_status */
+#ifdef HAVE_ETHTOOL_SET_PHYS_ID
+static int igb_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ e1000_blink_led(hw);
+ return 2;
+ case ETHTOOL_ID_ON:
+ e1000_led_on(hw);
+ break;
+ case ETHTOOL_ID_OFF:
+ e1000_led_off(hw);
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ e1000_led_off(hw);
+ e1000_cleanup_led(hw);
+ break;
+ }
+
+ return 0;
+}
+#else
+static int igb_phys_id(struct net_device *netdev, u32 data)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned long timeout;
+
+ timeout = data * 1000;
+
+ /*
+ * msleep_interruptable only accepts unsigned int so we are limited
+ * in how long a duration we can wait
+ */
+ if (!timeout || timeout > UINT_MAX)
+ timeout = UINT_MAX;
+
+ e1000_blink_led(hw);
+ msleep_interruptible(timeout);
+
+ e1000_led_off(hw);
+ e1000_cleanup_led(hw);
+
+ return 0;
+}
+#endif /* HAVE_ETHTOOL_SET_PHYS_ID */
+
+static int igb_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
+ ((ec->rx_coalesce_usecs > 3) &&
+ (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
+ (ec->rx_coalesce_usecs == 2))
+ {
+ printk("set_coalesce:invalid parameter..");
+ return -EINVAL;
+ }
+
+ if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
+ ((ec->tx_coalesce_usecs > 3) &&
+ (ec->tx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
+ (ec->tx_coalesce_usecs == 2))
+ return -EINVAL;
+
+ if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
+ return -EINVAL;
+
+ if (ec->tx_max_coalesced_frames_irq)
+ adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq;
+
+ /* If ITR is disabled, disable DMAC */
+ if (ec->rx_coalesce_usecs == 0) {
+ adapter->dmac = IGB_DMAC_DISABLE;
+ }
+
+ /* convert to rate of irq's per second */
+ if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3)
+ adapter->rx_itr_setting = ec->rx_coalesce_usecs;
+ else
+ adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
+
+ /* convert to rate of irq's per second */
+ if (adapter->flags & IGB_FLAG_QUEUE_PAIRS)
+ adapter->tx_itr_setting = adapter->rx_itr_setting;
+ else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3)
+ adapter->tx_itr_setting = ec->tx_coalesce_usecs;
+ else
+ adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
+
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ struct igb_q_vector *q_vector = adapter->q_vector[i];
+ q_vector->tx.work_limit = adapter->tx_work_limit;
+ if (q_vector->rx.ring)
+ q_vector->itr_val = adapter->rx_itr_setting;
+ else
+ q_vector->itr_val = adapter->tx_itr_setting;
+ if (q_vector->itr_val && q_vector->itr_val <= 3)
+ q_vector->itr_val = IGB_START_ITR;
+ q_vector->set_itr = 1;
+ }
+
+ return 0;
+}
+
+static int igb_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->rx_itr_setting <= 3)
+ ec->rx_coalesce_usecs = adapter->rx_itr_setting;
+ else
+ ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
+
+ ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit;
+
+ if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) {
+ if (adapter->tx_itr_setting <= 3)
+ ec->tx_coalesce_usecs = adapter->tx_itr_setting;
+ else
+ ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
+ }
+
+ return 0;
+}
+
+static int igb_nway_reset(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ if (netif_running(netdev))
+ igb_reinit_locked(adapter);
+ return 0;
+}
+
+#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
+static int igb_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return IGB_STATS_LEN;
+ case ETH_SS_TEST:
+ return IGB_TEST_LEN;
+ default:
+ return -ENOTSUPP;
+ }
+}
+#else
+static int igb_get_stats_count(struct net_device *netdev)
+{
+ return IGB_STATS_LEN;
+}
+
+static int igb_diag_test_count(struct net_device *netdev)
+{
+ return IGB_TEST_LEN;
+}
+#endif
+
+static void igb_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+#ifdef HAVE_NETDEV_STATS_IN_NETDEV
+ struct net_device_stats *net_stats = &netdev->stats;
+#else
+ struct net_device_stats *net_stats = &adapter->net_stats;
+#endif
+ u64 *queue_stat;
+ int i, j, k;
+ char *p;
+
+ igb_update_stats(adapter);
+
+ for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
+ p = (char *)adapter + igb_gstrings_stats[i].stat_offset;
+ data[i] = (igb_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ for (j = 0; j < IGB_NETDEV_STATS_LEN; j++, i++) {
+ p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset;
+ data[i] = (igb_gstrings_net_stats[j].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ for (j = 0; j < adapter->num_tx_queues; j++) {
+ queue_stat = (u64 *)&adapter->tx_ring[j]->tx_stats;
+ for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++)
+ data[i] = queue_stat[k];
+ }
+ for (j = 0; j < adapter->num_rx_queues; j++) {
+ queue_stat = (u64 *)&adapter->rx_ring[j]->rx_stats;
+ for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++)
+ data[i] = queue_stat[k];
+ }
+}
+
+static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *igb_gstrings_test,
+ IGB_TEST_LEN*ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, igb_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) {
+ memcpy(p, igb_gstrings_net_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ sprintf(p, "tx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_restart", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ sprintf(p, "rx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_drops", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_csum_err", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_alloc_failed", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_ipv4_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_ipv4e_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_ipv6_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_ipv6e_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_tcp_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_udp_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_sctp_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_nfs_packets", i);
+ p += ETH_GSTRING_LEN;
+ }
+/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
+ break;
+ }
+}
+
+#ifdef HAVE_ETHTOOL_GET_TS_INFO
+static int igb_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+
+ switch (adapter->hw.mac.type) {
+#ifdef HAVE_PTP_1588_CLOCK
+ case e1000_82575:
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ return 0;
+ case e1000_82576:
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ case e1000_i210:
+ case e1000_i211:
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (adapter->ptp_clock)
+ info->phc_index = ptp_clock_index(adapter->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types =
+ (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = 1 << HWTSTAMP_FILTER_NONE;
+
+ /* 82576 does not support timestamping all packets. */
+ if (adapter->hw.mac.type >= e1000_82580)
+ info->rx_filters |= 1 << HWTSTAMP_FILTER_ALL;
+ else
+ info->rx_filters |=
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+#endif /* HAVE_PTP_1588_CLOCK */
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+#endif /* HAVE_ETHTOOL_GET_TS_INFO */
+
+#ifdef CONFIG_PM_RUNTIME
+static int igb_ethtool_begin(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ pm_runtime_get_sync(&adapter->pdev->dev);
+
+ return 0;
+}
+
+static void igb_ethtool_complete(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ pm_runtime_put(&adapter->pdev->dev);
+}
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifndef HAVE_NDO_SET_FEATURES
+static u32 igb_get_rx_csum(struct net_device *netdev)
+{
+ return !!(netdev->features & NETIF_F_RXCSUM);
+}
+
+static int igb_set_rx_csum(struct net_device *netdev, u32 data)
+{
+ const u32 feature_list = NETIF_F_RXCSUM;
+
+ if (data)
+ netdev->features |= feature_list;
+ else
+ netdev->features &= ~feature_list;
+
+ return 0;
+}
+
+static int igb_set_tx_csum(struct net_device *netdev, u32 data)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+#ifdef NETIF_F_IPV6_CSUM
+ u32 feature_list = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+#else
+ u32 feature_list = NETIF_F_IP_CSUM;
+#endif
+
+ if (adapter->hw.mac.type >= e1000_82576)
+ feature_list |= NETIF_F_SCTP_CSUM;
+
+ if (data)
+ netdev->features |= feature_list;
+ else
+ netdev->features &= ~feature_list;
+
+ return 0;
+}
+
+#ifdef NETIF_F_TSO
+static int igb_set_tso(struct net_device *netdev, u32 data)
+{
+#ifdef NETIF_F_TSO6
+ const u32 feature_list = NETIF_F_TSO | NETIF_F_TSO6;
+#else
+ const u32 feature_list = NETIF_F_TSO;
+#endif
+
+ if (data)
+ netdev->features |= feature_list;
+ else
+ netdev->features &= ~feature_list;
+
+#ifndef HAVE_NETDEV_VLAN_FEATURES
+ if (!data) {
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct net_device *v_netdev;
+ int i;
+
+ /* disable TSO on all VLANs if they're present */
+ if (!adapter->vlgrp)
+ goto tso_out;
+
+ for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
+ v_netdev = vlan_group_get_device(adapter->vlgrp, i);
+ if (!v_netdev)
+ continue;
+
+ v_netdev->features &= ~feature_list;
+ vlan_group_set_device(adapter->vlgrp, i, v_netdev);
+ }
+ }
+
+tso_out:
+
+#endif /* HAVE_NETDEV_VLAN_FEATURES */
+ return 0;
+}
+
+#endif /* NETIF_F_TSO */
+#ifdef ETHTOOL_GFLAGS
+static int igb_set_flags(struct net_device *netdev, u32 data)
+{
+ u32 supported_flags = ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN |
+ ETH_FLAG_RXHASH;
+#ifndef HAVE_VLAN_RX_REGISTER
+ u32 changed = netdev->features ^ data;
+#endif
+ int rc;
+#ifndef IGB_NO_LRO
+
+ supported_flags |= ETH_FLAG_LRO;
+#endif
+ /*
+ * Since there is no support for separate tx vlan accel
+ * enabled make sure tx flag is cleared if rx is.
+ */
+ if (!(data & ETH_FLAG_RXVLAN))
+ data &= ~ETH_FLAG_TXVLAN;
+
+ rc = ethtool_op_set_flags(netdev, data, supported_flags);
+ if (rc)
+ return rc;
+#ifndef HAVE_VLAN_RX_REGISTER
+
+ if (changed & ETH_FLAG_RXVLAN)
+ igb_vlan_mode(netdev, data);
+#endif
+
+ return 0;
+}
+
+#endif /* ETHTOOL_GFLAGS */
+#endif /* HAVE_NDO_SET_FEATURES */
+#ifdef ETHTOOL_SADV_COAL
+static int igb_set_adv_coal(struct net_device *netdev, struct ethtool_value *edata)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ switch (edata->data) {
+ case IGB_DMAC_DISABLE:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_MIN:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_500:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_EN_DEFAULT:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_2000:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_3000:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_4000:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_5000:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_6000:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_7000:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_8000:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_9000:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_MAX:
+ adapter->dmac = edata->data;
+ break;
+ default:
+ adapter->dmac = IGB_DMAC_DISABLE;
+ printk("set_dmac: invalid setting, setting DMAC to %d\n",
+ adapter->dmac);
+ }
+ printk("%s: setting DMAC to %d\n", netdev->name, adapter->dmac);
+ return 0;
+}
+#endif /* ETHTOOL_SADV_COAL */
+#ifdef ETHTOOL_GADV_COAL
+static void igb_get_dmac(struct net_device *netdev,
+ struct ethtool_value *edata)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ edata->data = adapter->dmac;
+
+ return;
+}
+#endif
+
+#ifdef ETHTOOL_GEEE
+static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ret_val;
+ u16 phy_data;
+
+ if ((hw->mac.type < e1000_i350) ||
+ (hw->phy.media_type != e1000_media_type_copper))
+ return -EOPNOTSUPP;
+
+ edata->supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full);
+
+ if (!hw->dev_spec._82575.eee_disable)
+ edata->advertised =
+ mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
+
+ /* The IPCNFG and EEER registers are not supported on I354. */
+ if (hw->mac.type == e1000_i354) {
+ e1000_get_eee_status_i354(hw, (bool *)&edata->eee_active);
+ } else {
+ u32 eeer;
+
+ eeer = E1000_READ_REG(hw, E1000_EEER);
+
+ /* EEE status on negotiated link */
+ if (eeer & E1000_EEER_EEE_NEG)
+ edata->eee_active = true;
+
+ if (eeer & E1000_EEER_TX_LPI_EN)
+ edata->tx_lpi_enabled = true;
+ }
+
+ /* EEE Link Partner Advertised */
+ switch (hw->mac.type) {
+ case e1000_i350:
+ ret_val = e1000_read_emi_reg(hw, E1000_EEE_LP_ADV_ADDR_I350,
+ &phy_data);
+ if (ret_val)
+ return -ENODATA;
+
+ edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+
+ break;
+ case e1000_i354:
+ case e1000_i210:
+ case e1000_i211:
+ ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210,
+ E1000_EEE_LP_ADV_DEV_I210,
+ &phy_data);
+ if (ret_val)
+ return -ENODATA;
+
+ edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+
+ break;
+ default:
+ break;
+ }
+
+ edata->eee_enabled = !hw->dev_spec._82575.eee_disable;
+
+ if ((hw->mac.type == e1000_i354) &&
+ (edata->eee_enabled))
+ edata->tx_lpi_enabled = true;
+
+ /*
+ * report correct negotiated EEE status for devices that
+ * wrongly report EEE at half-duplex
+ */
+ if (adapter->link_duplex == HALF_DUPLEX) {
+ edata->eee_enabled = false;
+ edata->eee_active = false;
+ edata->tx_lpi_enabled = false;
+ edata->advertised &= ~edata->advertised;
+ }
+
+ return 0;
+}
+#endif
+
+#ifdef ETHTOOL_SEEE
+static int igb_set_eee(struct net_device *netdev,
+ struct ethtool_eee *edata)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct ethtool_eee eee_curr;
+ s32 ret_val;
+
+ if ((hw->mac.type < e1000_i350) ||
+ (hw->phy.media_type != e1000_media_type_copper))
+ return -EOPNOTSUPP;
+
+ ret_val = igb_get_eee(netdev, &eee_curr);
+ if (ret_val)
+ return ret_val;
+
+ if (eee_curr.eee_enabled) {
+ if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
+ dev_err(pci_dev_to_dev(adapter->pdev),
+ "Setting EEE tx-lpi is not supported\n");
+ return -EINVAL;
+ }
+
+ /* Tx LPI time is not implemented currently */
+ if (edata->tx_lpi_timer) {
+ dev_err(pci_dev_to_dev(adapter->pdev),
+ "Setting EEE Tx LPI timer is not supported\n");
+ return -EINVAL;
+ }
+
+ if (edata->advertised &
+ ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) {
+ dev_err(pci_dev_to_dev(adapter->pdev),
+ "EEE Advertisement supports only 100Tx and or 100T full duplex\n");
+ return -EINVAL;
+ }
+
+ } else if (!edata->eee_enabled) {
+ dev_err(pci_dev_to_dev(adapter->pdev),
+ "Setting EEE options is not supported with EEE disabled\n");
+ return -EINVAL;
+ }
+
+ adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
+
+ if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
+ hw->dev_spec._82575.eee_disable = !edata->eee_enabled;
+
+ /* reset link */
+ if (netif_running(netdev))
+ igb_reinit_locked(adapter);
+ else
+ igb_reset(adapter);
+ }
+
+ return 0;
+}
+#endif /* ETHTOOL_SEEE */
+
+#ifdef ETHTOOL_GRXRINGS
+static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ cmd->data = 0;
+
+ /* Report default options for RSS on igb */
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case UDP_V4_FLOW:
+ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case IPV4_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case UDP_V6_FLOW:
+ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IPV6_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
+ void *rule_locs)
+#else
+ u32 *rule_locs)
+#endif
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->num_rx_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ ret = igb_get_rss_hash_opts(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+#define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \
+ IGB_FLAG_RSS_FIELD_IPV6_UDP)
+static int igb_set_rss_hash_opt(struct igb_adapter *adapter,
+ struct ethtool_rxnfc *nfc)
+{
+ u32 flags = adapter->flags;
+
+ /*
+ * RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ !(nfc->data & RXH_L4_B_0_1) ||
+ !(nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ case UDP_V4_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ flags &= ~IGB_FLAG_RSS_FIELD_IPV4_UDP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ flags |= IGB_FLAG_RSS_FIELD_IPV4_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ flags &= ~IGB_FLAG_RSS_FIELD_IPV6_UDP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ flags |= IGB_FLAG_RSS_FIELD_IPV6_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ (nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* if we changed something we need to update flags */
+ if (flags != adapter->flags) {
+ struct e1000_hw *hw = &adapter->hw;
+ u32 mrqc = E1000_READ_REG(hw, E1000_MRQC);
+
+ if ((flags & UDP_RSS_FLAGS) &&
+ !(adapter->flags & UDP_RSS_FLAGS))
+ DPRINTK(DRV, WARNING,
+ "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
+
+ adapter->flags = flags;
+
+ /* Perform hash on these packet types */
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
+ E1000_MRQC_RSS_FIELD_IPV4_TCP |
+ E1000_MRQC_RSS_FIELD_IPV6 |
+ E1000_MRQC_RSS_FIELD_IPV6_TCP;
+
+ mrqc &= ~(E1000_MRQC_RSS_FIELD_IPV4_UDP |
+ E1000_MRQC_RSS_FIELD_IPV6_UDP);
+
+ if (flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
+
+ if (flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
+
+ E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
+ }
+
+ return 0;
+}
+
+static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = igb_set_rss_hash_opt(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+#endif /* ETHTOOL_GRXRINGS */
+
+static const struct ethtool_ops igb_ethtool_ops = {
+ .get_settings = igb_get_settings,
+ .set_settings = igb_set_settings,
+ .get_drvinfo = igb_get_drvinfo,
+ .get_regs_len = igb_get_regs_len,
+ .get_regs = igb_get_regs,
+ .get_wol = igb_get_wol,
+ .set_wol = igb_set_wol,
+ .get_msglevel = igb_get_msglevel,
+ .set_msglevel = igb_set_msglevel,
+ .nway_reset = igb_nway_reset,
+ .get_link = igb_get_link,
+ .get_eeprom_len = igb_get_eeprom_len,
+ .get_eeprom = igb_get_eeprom,
+ .set_eeprom = igb_set_eeprom,
+ .get_ringparam = igb_get_ringparam,
+ .set_ringparam = igb_set_ringparam,
+ .get_pauseparam = igb_get_pauseparam,
+ .set_pauseparam = igb_set_pauseparam,
+ .self_test = igb_diag_test,
+ .get_strings = igb_get_strings,
+#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+#ifdef HAVE_ETHTOOL_SET_PHYS_ID
+ .set_phys_id = igb_set_phys_id,
+#else
+ .phys_id = igb_phys_id,
+#endif /* HAVE_ETHTOOL_SET_PHYS_ID */
+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
+#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
+ .get_sset_count = igb_get_sset_count,
+#else
+ .get_stats_count = igb_get_stats_count,
+ .self_test_count = igb_diag_test_count,
+#endif
+ .get_ethtool_stats = igb_get_ethtool_stats,
+#ifdef HAVE_ETHTOOL_GET_PERM_ADDR
+ .get_perm_addr = ethtool_op_get_perm_addr,
+#endif
+ .get_coalesce = igb_get_coalesce,
+ .set_coalesce = igb_set_coalesce,
+#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+#ifdef HAVE_ETHTOOL_GET_TS_INFO
+ .get_ts_info = igb_get_ts_info,
+#endif /* HAVE_ETHTOOL_GET_TS_INFO */
+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
+#ifdef CONFIG_PM_RUNTIME
+ .begin = igb_ethtool_begin,
+ .complete = igb_ethtool_complete,
+#endif /* CONFIG_PM_RUNTIME */
+#ifndef HAVE_NDO_SET_FEATURES
+ .get_rx_csum = igb_get_rx_csum,
+ .set_rx_csum = igb_set_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = igb_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+#ifdef NETIF_F_TSO
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = igb_set_tso,
+#endif
+#ifdef ETHTOOL_GFLAGS
+ .get_flags = ethtool_op_get_flags,
+ .set_flags = igb_set_flags,
+#endif /* ETHTOOL_GFLAGS */
+#endif /* HAVE_NDO_SET_FEATURES */
+#ifdef ETHTOOL_GADV_COAL
+ .get_advcoal = igb_get_adv_coal,
+ .set_advcoal = igb_set_dmac_coal,
+#endif /* ETHTOOL_GADV_COAL */
+#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+#ifdef ETHTOOL_GEEE
+ .get_eee = igb_get_eee,
+#endif
+#ifdef ETHTOOL_SEEE
+ .set_eee = igb_set_eee,
+#endif
+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
+#ifdef ETHTOOL_GRXRINGS
+ .get_rxnfc = igb_get_rxnfc,
+ .set_rxnfc = igb_set_rxnfc,
+#endif
+};
+
+#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+static const struct ethtool_ops_ext igb_ethtool_ops_ext = {
+ .size = sizeof(struct ethtool_ops_ext),
+ .get_ts_info = igb_get_ts_info,
+ .set_phys_id = igb_set_phys_id,
+ .get_eee = igb_get_eee,
+ .set_eee = igb_set_eee,
+};
+
+void igb_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops);
+ set_ethtool_ops_ext(netdev, &igb_ethtool_ops_ext);
+}
+#else
+void igb_set_ethtool_ops(struct net_device *netdev)
+{
+ /* have to "undeclare" const on this struct to remove warnings */
+ SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igb_ethtool_ops);
+}
+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
+#endif /* SIOCETHTOOL */
+
+
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_hwmon.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_hwmon.c
new file mode 100755
index 00000000..07a1ae07
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_hwmon.c
@@ -0,0 +1,260 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "igb.h"
+#include "e1000_82575.h"
+#include "e1000_hw.h"
+#ifdef IGB_HWMON
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/hwmon.h>
+#include <linux/pci.h>
+
+#ifdef HAVE_I2C_SUPPORT
+static struct i2c_board_info i350_sensor_info = {
+ I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)),
+};
+#endif /* HAVE_I2C_SUPPORT */
+
+/* hwmon callback functions */
+static ssize_t igb_hwmon_show_location(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+ dev_attr);
+ return sprintf(buf, "loc%u\n",
+ igb_attr->sensor->location);
+}
+
+static ssize_t igb_hwmon_show_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+ dev_attr);
+ unsigned int value;
+
+ /* reset the temp field */
+ igb_attr->hw->mac.ops.get_thermal_sensor_data(igb_attr->hw);
+
+ value = igb_attr->sensor->temp;
+
+ /* display millidegree */
+ value *= 1000;
+
+ return sprintf(buf, "%u\n", value);
+}
+
+static ssize_t igb_hwmon_show_cautionthresh(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+ dev_attr);
+ unsigned int value = igb_attr->sensor->caution_thresh;
+
+ /* display millidegree */
+ value *= 1000;
+
+ return sprintf(buf, "%u\n", value);
+}
+
+static ssize_t igb_hwmon_show_maxopthresh(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+ dev_attr);
+ unsigned int value = igb_attr->sensor->max_op_thresh;
+
+ /* display millidegree */
+ value *= 1000;
+
+ return sprintf(buf, "%u\n", value);
+}
+
+/* igb_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file.
+ * @ adapter: pointer to the adapter structure
+ * @ offset: offset in the eeprom sensor data table
+ * @ type: type of sensor data to display
+ *
+ * For each file we want in hwmon's sysfs interface we need a device_attribute
+ * This is included in our hwmon_attr struct that contains the references to
+ * the data structures we need to get the data to display.
+ */
+static int igb_add_hwmon_attr(struct igb_adapter *adapter,
+ unsigned int offset, int type) {
+ int rc;
+ unsigned int n_attr;
+ struct hwmon_attr *igb_attr;
+
+ n_attr = adapter->igb_hwmon_buff.n_hwmon;
+ igb_attr = &adapter->igb_hwmon_buff.hwmon_list[n_attr];
+
+ switch (type) {
+ case IGB_HWMON_TYPE_LOC:
+ igb_attr->dev_attr.show = igb_hwmon_show_location;
+ snprintf(igb_attr->name, sizeof(igb_attr->name),
+ "temp%u_label", offset);
+ break;
+ case IGB_HWMON_TYPE_TEMP:
+ igb_attr->dev_attr.show = igb_hwmon_show_temp;
+ snprintf(igb_attr->name, sizeof(igb_attr->name),
+ "temp%u_input", offset);
+ break;
+ case IGB_HWMON_TYPE_CAUTION:
+ igb_attr->dev_attr.show = igb_hwmon_show_cautionthresh;
+ snprintf(igb_attr->name, sizeof(igb_attr->name),
+ "temp%u_max", offset);
+ break;
+ case IGB_HWMON_TYPE_MAX:
+ igb_attr->dev_attr.show = igb_hwmon_show_maxopthresh;
+ snprintf(igb_attr->name, sizeof(igb_attr->name),
+ "temp%u_crit", offset);
+ break;
+ default:
+ rc = -EPERM;
+ return rc;
+ }
+
+ /* These always the same regardless of type */
+ igb_attr->sensor =
+ &adapter->hw.mac.thermal_sensor_data.sensor[offset];
+ igb_attr->hw = &adapter->hw;
+ igb_attr->dev_attr.store = NULL;
+ igb_attr->dev_attr.attr.mode = S_IRUGO;
+ igb_attr->dev_attr.attr.name = igb_attr->name;
+ sysfs_attr_init(&igb_attr->dev_attr.attr);
+ rc = device_create_file(&adapter->pdev->dev,
+ &igb_attr->dev_attr);
+ if (rc == 0)
+ ++adapter->igb_hwmon_buff.n_hwmon;
+
+ return rc;
+}
+
+static void igb_sysfs_del_adapter(struct igb_adapter *adapter)
+{
+ int i;
+
+ if (adapter == NULL)
+ return;
+
+ for (i = 0; i < adapter->igb_hwmon_buff.n_hwmon; i++) {
+ device_remove_file(&adapter->pdev->dev,
+ &adapter->igb_hwmon_buff.hwmon_list[i].dev_attr);
+ }
+
+ kfree(adapter->igb_hwmon_buff.hwmon_list);
+
+ if (adapter->igb_hwmon_buff.device)
+ hwmon_device_unregister(adapter->igb_hwmon_buff.device);
+}
+
+/* called from igb_main.c */
+void igb_sysfs_exit(struct igb_adapter *adapter)
+{
+ igb_sysfs_del_adapter(adapter);
+}
+
+/* called from igb_main.c */
+int igb_sysfs_init(struct igb_adapter *adapter)
+{
+ struct hwmon_buff *igb_hwmon = &adapter->igb_hwmon_buff;
+ unsigned int i;
+ int n_attrs;
+ int rc = 0;
+#ifdef HAVE_I2C_SUPPORT
+ struct i2c_client *client = NULL;
+#endif /* HAVE_I2C_SUPPORT */
+
+ /* If this method isn't defined we don't support thermals */
+ if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL)
+ goto exit;
+
+ /* Don't create thermal hwmon interface if no sensors present */
+ rc = (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw));
+ if (rc)
+ goto exit;
+#ifdef HAVE_I2C_SUPPORT
+ /* init i2c_client */
+ client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
+ if (client == NULL) {
+ dev_info(&adapter->pdev->dev,
+ "Failed to create new i2c device..\n");
+ goto exit;
+ }
+ adapter->i2c_client = client;
+#endif /* HAVE_I2C_SUPPORT */
+
+ /* Allocation space for max attributes
+ * max num sensors * values (loc, temp, max, caution)
+ */
+ n_attrs = E1000_MAX_SENSORS * 4;
+ igb_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
+ GFP_KERNEL);
+ if (!igb_hwmon->hwmon_list) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ igb_hwmon->device = hwmon_device_register(&adapter->pdev->dev);
+ if (IS_ERR(igb_hwmon->device)) {
+ rc = PTR_ERR(igb_hwmon->device);
+ goto err;
+ }
+
+ for (i = 0; i < E1000_MAX_SENSORS; i++) {
+
+ /* Only create hwmon sysfs entries for sensors that have
+ * meaningful data.
+ */
+ if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0)
+ continue;
+
+ /* Bail if any hwmon attr struct fails to initialize */
+ rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_CAUTION);
+ rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC);
+ rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP);
+ rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX);
+ if (rc)
+ goto err;
+ }
+
+ goto exit;
+
+err:
+ igb_sysfs_del_adapter(adapter);
+exit:
+ return rc;
+}
+#endif /* IGB_HWMON */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c
new file mode 100755
index 00000000..a802a021
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c
@@ -0,0 +1,10263 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+#include <linux/netdevice.h>
+#include <linux/tcp.h>
+#ifdef NETIF_F_TSO
+#include <net/checksum.h>
+#ifdef NETIF_F_TSO6
+#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
+#endif
+#endif
+#ifdef SIOCGMIIPHY
+#include <linux/mii.h>
+#endif
+#ifdef SIOCETHTOOL
+#include <linux/ethtool.h>
+#endif
+#include <linux/if_vlan.h>
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif /* CONFIG_PM_RUNTIME */
+
+#include <linux/if_bridge.h>
+#include "igb.h"
+#include "igb_vmdq.h"
+
+#include <linux/uio_driver.h>
+
+#if defined(DEBUG) || defined (DEBUG_DUMP) || defined (DEBUG_ICR) || defined(DEBUG_ITR)
+#define DRV_DEBUG "_debug"
+#else
+#define DRV_DEBUG
+#endif
+#define DRV_HW_PERF
+#define VERSION_SUFFIX
+
+#define MAJ 5
+#define MIN 0
+#define BUILD 6
+#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." __stringify(BUILD) VERSION_SUFFIX DRV_DEBUG DRV_HW_PERF
+
+char igb_driver_name[] = "igb";
+char igb_driver_version[] = DRV_VERSION;
+static const char igb_driver_string[] =
+ "Intel(R) Gigabit Ethernet Network Driver";
+static const char igb_copyright[] =
+ "Copyright (c) 2007-2013 Intel Corporation.";
+
+static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER) },
+ /* required last entry */
+ {0, }
+};
+
+//MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
+static void igb_set_sriov_capability(struct igb_adapter *adapter) __attribute__((__unused__));
+void igb_reset(struct igb_adapter *);
+static int igb_setup_all_tx_resources(struct igb_adapter *);
+static int igb_setup_all_rx_resources(struct igb_adapter *);
+static void igb_free_all_tx_resources(struct igb_adapter *);
+static void igb_free_all_rx_resources(struct igb_adapter *);
+static void igb_setup_mrqc(struct igb_adapter *);
+void igb_update_stats(struct igb_adapter *);
+static int igb_probe(struct pci_dev *, const struct pci_device_id *);
+static void __devexit igb_remove(struct pci_dev *pdev);
+static int igb_sw_init(struct igb_adapter *);
+static int igb_open(struct net_device *);
+static int igb_close(struct net_device *);
+static void igb_configure(struct igb_adapter *);
+static void igb_configure_tx(struct igb_adapter *);
+static void igb_configure_rx(struct igb_adapter *);
+static void igb_clean_all_tx_rings(struct igb_adapter *);
+static void igb_clean_all_rx_rings(struct igb_adapter *);
+static void igb_clean_tx_ring(struct igb_ring *);
+static void igb_set_rx_mode(struct net_device *);
+static void igb_update_phy_info(unsigned long);
+static void igb_watchdog(unsigned long);
+static void igb_watchdog_task(struct work_struct *);
+static void igb_dma_err_task(struct work_struct *);
+static void igb_dma_err_timer(unsigned long data);
+static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
+static struct net_device_stats *igb_get_stats(struct net_device *);
+static int igb_change_mtu(struct net_device *, int);
+void igb_full_sync_mac_table(struct igb_adapter *adapter);
+static int igb_set_mac(struct net_device *, void *);
+static void igb_set_uta(struct igb_adapter *adapter);
+static irqreturn_t igb_intr(int irq, void *);
+static irqreturn_t igb_intr_msi(int irq, void *);
+static irqreturn_t igb_msix_other(int irq, void *);
+static irqreturn_t igb_msix_ring(int irq, void *);
+#ifdef IGB_DCA
+static void igb_update_dca(struct igb_q_vector *);
+static void igb_setup_dca(struct igb_adapter *);
+#endif /* IGB_DCA */
+static int igb_poll(struct napi_struct *, int);
+static bool igb_clean_tx_irq(struct igb_q_vector *);
+static bool igb_clean_rx_irq(struct igb_q_vector *, int);
+static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
+static void igb_tx_timeout(struct net_device *);
+static void igb_reset_task(struct work_struct *);
+#ifdef HAVE_VLAN_RX_REGISTER
+static void igb_vlan_mode(struct net_device *, struct vlan_group *);
+#endif
+#ifdef HAVE_VLAN_PROTOCOL
+static int igb_vlan_rx_add_vid(struct net_device *,
+ __be16 proto, u16);
+static int igb_vlan_rx_kill_vid(struct net_device *,
+ __be16 proto, u16);
+#elif defined HAVE_INT_NDO_VLAN_RX_ADD_VID
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+static int igb_vlan_rx_add_vid(struct net_device *,
+ __always_unused __be16 proto, u16);
+static int igb_vlan_rx_kill_vid(struct net_device *,
+ __always_unused __be16 proto, u16);
+#else
+static int igb_vlan_rx_add_vid(struct net_device *, u16);
+static int igb_vlan_rx_kill_vid(struct net_device *, u16);
+#endif
+#else
+static void igb_vlan_rx_add_vid(struct net_device *, u16);
+static void igb_vlan_rx_kill_vid(struct net_device *, u16);
+#endif
+static void igb_restore_vlan(struct igb_adapter *);
+void igb_rar_set(struct igb_adapter *adapter, u32 index);
+static void igb_ping_all_vfs(struct igb_adapter *);
+static void igb_msg_task(struct igb_adapter *);
+static void igb_vmm_control(struct igb_adapter *);
+static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
+static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
+static void igb_process_mdd_event(struct igb_adapter *);
+#ifdef IFLA_VF_MAX
+static int igb_ndo_set_vf_mac( struct net_device *netdev, int vf, u8 *mac);
+static int igb_ndo_set_vf_vlan(struct net_device *netdev,
+ int vf, u16 vlan, u8 qos);
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
+ bool setting);
+#endif
+#ifdef HAVE_VF_MIN_MAX_TXRATE
+static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
+#else /* HAVE_VF_MIN_MAX_TXRATE */
+static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
+#endif /* HAVE_VF_MIN_MAX_TXRATE */
+static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
+ struct ifla_vf_info *ivi);
+static void igb_check_vf_rate_limit(struct igb_adapter *);
+#endif
+static int igb_vf_configure(struct igb_adapter *adapter, int vf);
+#ifdef CONFIG_PM
+#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
+static int igb_suspend(struct device *dev);
+static int igb_resume(struct device *dev);
+#ifdef CONFIG_PM_RUNTIME
+static int igb_runtime_suspend(struct device *dev);
+static int igb_runtime_resume(struct device *dev);
+static int igb_runtime_idle(struct device *dev);
+#endif /* CONFIG_PM_RUNTIME */
+static const struct dev_pm_ops igb_pm_ops = {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)
+ .suspend = igb_suspend,
+ .resume = igb_resume,
+ .freeze = igb_suspend,
+ .thaw = igb_resume,
+ .poweroff = igb_suspend,
+ .restore = igb_resume,
+#ifdef CONFIG_PM_RUNTIME
+ .runtime_suspend = igb_runtime_suspend,
+ .runtime_resume = igb_runtime_resume,
+ .runtime_idle = igb_runtime_idle,
+#endif
+#else /* Linux >= 2.6.34 */
+ SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
+#ifdef CONFIG_PM_RUNTIME
+ SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
+ igb_runtime_idle)
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* Linux version */
+};
+#else
+static int igb_suspend(struct pci_dev *pdev, pm_message_t state);
+static int igb_resume(struct pci_dev *pdev);
+#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
+#endif /* CONFIG_PM */
+#ifndef USE_REBOOT_NOTIFIER
+static void igb_shutdown(struct pci_dev *);
+#else
+static int igb_notify_reboot(struct notifier_block *, unsigned long, void *);
+static struct notifier_block igb_notifier_reboot = {
+ .notifier_call = igb_notify_reboot,
+ .next = NULL,
+ .priority = 0
+};
+#endif
+#ifdef IGB_DCA
+static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
+static struct notifier_block dca_notifier = {
+ .notifier_call = igb_notify_dca,
+ .next = NULL,
+ .priority = 0
+};
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* for netdump / net console */
+static void igb_netpoll(struct net_device *);
+#endif
+
+#ifdef HAVE_PCI_ERS
+static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
+ pci_channel_state_t);
+static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
+static void igb_io_resume(struct pci_dev *);
+
+static struct pci_error_handlers igb_err_handler = {
+ .error_detected = igb_io_error_detected,
+ .slot_reset = igb_io_slot_reset,
+ .resume = igb_io_resume,
+};
+#endif
+
+static void igb_init_fw(struct igb_adapter *adapter);
+static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
+
+static struct pci_driver igb_driver = {
+ .name = igb_driver_name,
+ .id_table = igb_pci_tbl,
+ .probe = igb_probe,
+ .remove = __devexit_p(igb_remove),
+#ifdef CONFIG_PM
+#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
+ .driver.pm = &igb_pm_ops,
+#else
+ .suspend = igb_suspend,
+ .resume = igb_resume,
+#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
+#endif /* CONFIG_PM */
+#ifndef USE_REBOOT_NOTIFIER
+ .shutdown = igb_shutdown,
+#endif
+#ifdef HAVE_PCI_ERS
+ .err_handler = &igb_err_handler
+#endif
+};
+
+//MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
+//MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
+//MODULE_LICENSE("GPL");
+//MODULE_VERSION(DRV_VERSION);
+
+static void igb_vfta_set(struct igb_adapter *adapter, u32 vid, bool add)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_host_mng_dhcp_cookie *mng_cookie = &hw->mng_cookie;
+ u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK;
+ u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
+ u32 vfta;
+
+ /*
+ * if this is the management vlan the only option is to add it in so
+ * that the management pass through will continue to work
+ */
+ if ((mng_cookie->status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
+ (vid == mng_cookie->vlan_id))
+ add = TRUE;
+
+ vfta = adapter->shadow_vfta[index];
+
+ if (add)
+ vfta |= mask;
+ else
+ vfta &= ~mask;
+
+ e1000_write_vfta(hw, index, vfta);
+ adapter->shadow_vfta[index] = vfta;
+}
+
+static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
+//module_param(debug, int, 0);
+//MODULE_PARM_DESC(debug, "Debug level (0=none, ..., 16=all)");
+
+/**
+ * igb_init_module - Driver Registration Routine
+ *
+ * igb_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init igb_init_module(void)
+{
+ int ret;
+
+ printk(KERN_INFO "%s - version %s\n",
+ igb_driver_string, igb_driver_version);
+
+ printk(KERN_INFO "%s\n", igb_copyright);
+#ifdef IGB_HWMON
+/* only use IGB_PROCFS if IGB_HWMON is not defined */
+#else
+#ifdef IGB_PROCFS
+ if (igb_procfs_topdir_init())
+ printk(KERN_INFO "Procfs failed to initialize topdir\n");
+#endif /* IGB_PROCFS */
+#endif /* IGB_HWMON */
+
+#ifdef IGB_DCA
+ dca_register_notify(&dca_notifier);
+#endif
+ ret = pci_register_driver(&igb_driver);
+#ifdef USE_REBOOT_NOTIFIER
+ if (ret >= 0) {
+ register_reboot_notifier(&igb_notifier_reboot);
+ }
+#endif
+ return ret;
+}
+
+#undef module_init
+#define module_init(x) static int x(void) __attribute__((__unused__));
+module_init(igb_init_module);
+
+/**
+ * igb_exit_module - Driver Exit Cleanup Routine
+ *
+ * igb_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit igb_exit_module(void)
+{
+#ifdef IGB_DCA
+ dca_unregister_notify(&dca_notifier);
+#endif
+#ifdef USE_REBOOT_NOTIFIER
+ unregister_reboot_notifier(&igb_notifier_reboot);
+#endif
+ pci_unregister_driver(&igb_driver);
+
+#ifdef IGB_HWMON
+/* only compile IGB_PROCFS if IGB_HWMON is not defined */
+#else
+#ifdef IGB_PROCFS
+ igb_procfs_topdir_exit();
+#endif /* IGB_PROCFS */
+#endif /* IGB_HWMON */
+}
+
+#undef module_exit
+#define module_exit(x) static void x(void) __attribute__((__unused__));
+module_exit(igb_exit_module);
+
+#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
+/**
+ * igb_cache_ring_register - Descriptor ring to register mapping
+ * @adapter: board private structure to initialize
+ *
+ * Once we know the feature-set enabled for the device, we'll cache
+ * the register offset the descriptor ring is assigned to.
+ **/
+static void igb_cache_ring_register(struct igb_adapter *adapter)
+{
+ int i = 0, j = 0;
+ u32 rbase_offset = adapter->vfs_allocated_count;
+
+ switch (adapter->hw.mac.type) {
+ case e1000_82576:
+ /* The queues are allocated for virtualization such that VF 0
+ * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
+ * In order to avoid collision we start at the first free queue
+ * and continue consuming queues in the same sequence
+ */
+ if ((adapter->rss_queues > 1) && adapter->vmdq_pools) {
+ for (; i < adapter->rss_queues; i++)
+ adapter->rx_ring[i]->reg_idx = rbase_offset +
+ Q_IDX_82576(i);
+ }
+ case e1000_82575:
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ case e1000_i210:
+ case e1000_i211:
+ default:
+ for (; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i]->reg_idx = rbase_offset + i;
+ for (; j < adapter->num_tx_queues; j++)
+ adapter->tx_ring[j]->reg_idx = rbase_offset + j;
+ break;
+ }
+}
+
+static void igb_configure_lli(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 port;
+
+ /* LLI should only be enabled for MSI-X or MSI interrupts */
+ if (!adapter->msix_entries && !(adapter->flags & IGB_FLAG_HAS_MSI))
+ return;
+
+ if (adapter->lli_port) {
+ /* use filter 0 for port */
+ port = htons((u16)adapter->lli_port);
+ E1000_WRITE_REG(hw, E1000_IMIR(0),
+ (port | E1000_IMIR_PORT_IM_EN));
+ E1000_WRITE_REG(hw, E1000_IMIREXT(0),
+ (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
+ }
+
+ if (adapter->flags & IGB_FLAG_LLI_PUSH) {
+ /* use filter 1 for push flag */
+ E1000_WRITE_REG(hw, E1000_IMIR(1),
+ (E1000_IMIR_PORT_BP | E1000_IMIR_PORT_IM_EN));
+ E1000_WRITE_REG(hw, E1000_IMIREXT(1),
+ (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_PSH));
+ }
+
+ if (adapter->lli_size) {
+ /* use filter 2 for size */
+ E1000_WRITE_REG(hw, E1000_IMIR(2),
+ (E1000_IMIR_PORT_BP | E1000_IMIR_PORT_IM_EN));
+ E1000_WRITE_REG(hw, E1000_IMIREXT(2),
+ (adapter->lli_size | E1000_IMIREXT_CTRL_BP));
+ }
+
+}
+
+/**
+ * igb_write_ivar - configure ivar for given MSI-X vector
+ * @hw: pointer to the HW structure
+ * @msix_vector: vector number we are allocating to a given ring
+ * @index: row index of IVAR register to write within IVAR table
+ * @offset: column offset of in IVAR, should be multiple of 8
+ *
+ * This function is intended to handle the writing of the IVAR register
+ * for adapters 82576 and newer. The IVAR table consists of 2 columns,
+ * each containing an cause allocation for an Rx and Tx ring, and a
+ * variable number of rows depending on the number of queues supported.
+ **/
+static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
+ int index, int offset)
+{
+ u32 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
+
+ /* clear any bits that are currently set */
+ ivar &= ~((u32)0xFF << offset);
+
+ /* write vector and valid bit */
+ ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
+
+ E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
+}
+
+#define IGB_N0_QUEUE -1
+static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
+{
+ struct igb_adapter *adapter = q_vector->adapter;
+ struct e1000_hw *hw = &adapter->hw;
+ int rx_queue = IGB_N0_QUEUE;
+ int tx_queue = IGB_N0_QUEUE;
+ u32 msixbm = 0;
+
+ if (q_vector->rx.ring)
+ rx_queue = q_vector->rx.ring->reg_idx;
+ if (q_vector->tx.ring)
+ tx_queue = q_vector->tx.ring->reg_idx;
+
+ switch (hw->mac.type) {
+ case e1000_82575:
+ /* The 82575 assigns vectors using a bitmask, which matches the
+ bitmask for the EICR/EIMS/EIMC registers. To assign one
+ or more queues to a vector, we write the appropriate bits
+ into the MSIXBM register for that vector. */
+ if (rx_queue > IGB_N0_QUEUE)
+ msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
+ if (tx_queue > IGB_N0_QUEUE)
+ msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
+ if (!adapter->msix_entries && msix_vector == 0)
+ msixbm |= E1000_EIMS_OTHER;
+ E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), msix_vector, msixbm);
+ q_vector->eims_value = msixbm;
+ break;
+ case e1000_82576:
+ /*
+ * 82576 uses a table that essentially consists of 2 columns
+ * with 8 rows. The ordering is column-major so we use the
+ * lower 3 bits as the row index, and the 4th bit as the
+ * column offset.
+ */
+ if (rx_queue > IGB_N0_QUEUE)
+ igb_write_ivar(hw, msix_vector,
+ rx_queue & 0x7,
+ (rx_queue & 0x8) << 1);
+ if (tx_queue > IGB_N0_QUEUE)
+ igb_write_ivar(hw, msix_vector,
+ tx_queue & 0x7,
+ ((tx_queue & 0x8) << 1) + 8);
+ q_vector->eims_value = 1 << msix_vector;
+ break;
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ case e1000_i210:
+ case e1000_i211:
+ /*
+ * On 82580 and newer adapters the scheme is similar to 82576
+ * however instead of ordering column-major we have things
+ * ordered row-major. So we traverse the table by using
+ * bit 0 as the column offset, and the remaining bits as the
+ * row index.
+ */
+ if (rx_queue > IGB_N0_QUEUE)
+ igb_write_ivar(hw, msix_vector,
+ rx_queue >> 1,
+ (rx_queue & 0x1) << 4);
+ if (tx_queue > IGB_N0_QUEUE)
+ igb_write_ivar(hw, msix_vector,
+ tx_queue >> 1,
+ ((tx_queue & 0x1) << 4) + 8);
+ q_vector->eims_value = 1 << msix_vector;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ /* add q_vector eims value to global eims_enable_mask */
+ adapter->eims_enable_mask |= q_vector->eims_value;
+
+ /* configure q_vector to set itr on first interrupt */
+ q_vector->set_itr = 1;
+}
+
+/**
+ * igb_configure_msix - Configure MSI-X hardware
+ *
+ * igb_configure_msix sets up the hardware to properly
+ * generate MSI-X interrupts.
+ **/
+static void igb_configure_msix(struct igb_adapter *adapter)
+{
+ u32 tmp;
+ int i, vector = 0;
+ struct e1000_hw *hw = &adapter->hw;
+
+ adapter->eims_enable_mask = 0;
+
+ /* set vector for other causes, i.e. link changes */
+ switch (hw->mac.type) {
+ case e1000_82575:
+ tmp = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ /* enable MSI-X PBA support*/
+ tmp |= E1000_CTRL_EXT_PBA_CLR;
+
+ /* Auto-Mask interrupts upon ICR read. */
+ tmp |= E1000_CTRL_EXT_EIAME;
+ tmp |= E1000_CTRL_EXT_IRCA;
+
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp);
+
+ /* enable msix_other interrupt */
+ E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), vector++,
+ E1000_EIMS_OTHER);
+ adapter->eims_other = E1000_EIMS_OTHER;
+
+ break;
+
+ case e1000_82576:
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ case e1000_i210:
+ case e1000_i211:
+ /* Turn on MSI-X capability first, or our settings
+ * won't stick. And it will take days to debug. */
+ E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE |
+ E1000_GPIE_PBA | E1000_GPIE_EIAME |
+ E1000_GPIE_NSICR);
+
+ /* enable msix_other interrupt */
+ adapter->eims_other = 1 << vector;
+ tmp = (vector++ | E1000_IVAR_VALID) << 8;
+
+ E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmp);
+ break;
+ default:
+ /* do nothing, since nothing else supports MSI-X */
+ break;
+ } /* switch (hw->mac.type) */
+
+ adapter->eims_enable_mask |= adapter->eims_other;
+
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ igb_assign_vector(adapter->q_vector[i], vector++);
+
+ E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ * igb_request_msix - Initialize MSI-X interrupts
+ *
+ * igb_request_msix allocates MSI-X vectors and requests interrupts from the
+ * kernel.
+ **/
+static int igb_request_msix(struct igb_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ int i, err = 0, vector = 0, free_vector = 0;
+
+ err = request_irq(adapter->msix_entries[vector].vector,
+ &igb_msix_other, 0, netdev->name, adapter);
+ if (err)
+ goto err_out;
+
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ struct igb_q_vector *q_vector = adapter->q_vector[i];
+
+ vector++;
+
+ q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
+
+ if (q_vector->rx.ring && q_vector->tx.ring)
+ sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
+ q_vector->rx.ring->queue_index);
+ else if (q_vector->tx.ring)
+ sprintf(q_vector->name, "%s-tx-%u", netdev->name,
+ q_vector->tx.ring->queue_index);
+ else if (q_vector->rx.ring)
+ sprintf(q_vector->name, "%s-rx-%u", netdev->name,
+ q_vector->rx.ring->queue_index);
+ else
+ sprintf(q_vector->name, "%s-unused", netdev->name);
+
+ err = request_irq(adapter->msix_entries[vector].vector,
+ igb_msix_ring, 0, q_vector->name,
+ q_vector);
+ if (err)
+ goto err_free;
+ }
+
+ igb_configure_msix(adapter);
+ return 0;
+
+err_free:
+ /* free already assigned IRQs */
+ free_irq(adapter->msix_entries[free_vector++].vector, adapter);
+
+ vector--;
+ for (i = 0; i < vector; i++) {
+ free_irq(adapter->msix_entries[free_vector++].vector,
+ adapter->q_vector[i]);
+ }
+err_out:
+ return err;
+}
+
+static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
+{
+ if (adapter->msix_entries) {
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
+ pci_disable_msi(adapter->pdev);
+ }
+}
+
+/**
+ * igb_free_q_vector - Free memory allocated for specific interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
+{
+ struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+
+ if (q_vector->tx.ring)
+ adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
+
+ if (q_vector->rx.ring)
+ adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL;
+
+ adapter->q_vector[v_idx] = NULL;
+ netif_napi_del(&q_vector->napi);
+#ifndef IGB_NO_LRO
+ __skb_queue_purge(&q_vector->lrolist.active);
+#endif
+ kfree(q_vector);
+}
+
+/**
+ * igb_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void igb_free_q_vectors(struct igb_adapter *adapter)
+{
+ int v_idx = adapter->num_q_vectors;
+
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+ adapter->num_q_vectors = 0;
+
+ while (v_idx--)
+ igb_free_q_vector(adapter, v_idx);
+}
+
+/**
+ * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
+ *
+ * This function resets the device so that it has 0 rx queues, tx queues, and
+ * MSI-X interrupts allocated.
+ */
+static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
+{
+ igb_free_q_vectors(adapter);
+ igb_reset_interrupt_capability(adapter);
+}
+
+/**
+ * igb_process_mdd_event
+ * @adapter - board private structure
+ *
+ * Identify a malicious VF, disable the VF TX/RX queues and log a message.
+ */
+static void igb_process_mdd_event(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 lvmmc, vfte, vfre, mdfb;
+ u8 vf_queue;
+
+ lvmmc = E1000_READ_REG(hw, E1000_LVMMC);
+ vf_queue = lvmmc >> 29;
+
+ /* VF index cannot be bigger or equal to VFs allocated */
+ if (vf_queue >= adapter->vfs_allocated_count)
+ return;
+
+ netdev_info(adapter->netdev,
+ "VF %d misbehaved. VF queues are disabled. "
+ "VM misbehavior code is 0x%x\n", vf_queue, lvmmc);
+
+ /* Disable VFTE and VFRE related bits */
+ vfte = E1000_READ_REG(hw, E1000_VFTE);
+ vfte &= ~(1 << vf_queue);
+ E1000_WRITE_REG(hw, E1000_VFTE, vfte);
+
+ vfre = E1000_READ_REG(hw, E1000_VFRE);
+ vfre &= ~(1 << vf_queue);
+ E1000_WRITE_REG(hw, E1000_VFRE, vfre);
+
+ /* Disable MDFB related bit. Clear on write */
+ mdfb = E1000_READ_REG(hw, E1000_MDFB);
+ mdfb |= (1 << vf_queue);
+ E1000_WRITE_REG(hw, E1000_MDFB, mdfb);
+
+ /* Reset the specific VF */
+ E1000_WRITE_REG(hw, E1000_VTCTRL(vf_queue), E1000_VTCTRL_RST);
+}
+
+/**
+ * igb_disable_mdd
+ * @adapter - board private structure
+ *
+ * Disable MDD behavior in the HW
+ **/
+static void igb_disable_mdd(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 reg;
+
+ if ((hw->mac.type != e1000_i350) ||
+ (hw->mac.type != e1000_i354))
+ return;
+
+ reg = E1000_READ_REG(hw, E1000_DTXCTL);
+ reg &= (~E1000_DTXCTL_MDP_EN);
+ E1000_WRITE_REG(hw, E1000_DTXCTL, reg);
+}
+
+/**
+ * igb_enable_mdd
+ * @adapter - board private structure
+ *
+ * Enable the HW to detect malicious driver and sends an interrupt to
+ * the driver.
+ **/
+static void igb_enable_mdd(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 reg;
+
+ /* Only available on i350 device */
+ if (hw->mac.type != e1000_i350)
+ return;
+
+ reg = E1000_READ_REG(hw, E1000_DTXCTL);
+ reg |= E1000_DTXCTL_MDP_EN;
+ E1000_WRITE_REG(hw, E1000_DTXCTL, reg);
+}
+
+/**
+ * igb_reset_sriov_capability - disable SR-IOV if enabled
+ *
+ * Attempt to disable single root IO virtualization capabilites present in the
+ * kernel.
+ **/
+static void igb_reset_sriov_capability(struct igb_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* reclaim resources allocated to VFs */
+ if (adapter->vf_data) {
+ if (!pci_vfs_assigned(pdev)) {
+ /*
+ * disable iov and allow time for transactions to
+ * clear
+ */
+ pci_disable_sriov(pdev);
+ msleep(500);
+
+ dev_info(pci_dev_to_dev(pdev), "IOV Disabled\n");
+ } else {
+ dev_info(pci_dev_to_dev(pdev), "IOV Not Disabled\n "
+ "VF(s) are assigned to guests!\n");
+ }
+ /* Disable Malicious Driver Detection */
+ igb_disable_mdd(adapter);
+
+ /* free vf data storage */
+ kfree(adapter->vf_data);
+ adapter->vf_data = NULL;
+
+ /* switch rings back to PF ownership */
+ E1000_WRITE_REG(hw, E1000_IOVCTL,
+ E1000_IOVCTL_REUSE_VFQ);
+ E1000_WRITE_FLUSH(hw);
+ msleep(100);
+ }
+
+ adapter->vfs_allocated_count = 0;
+}
+
+/**
+ * igb_set_sriov_capability - setup SR-IOV if supported
+ *
+ * Attempt to enable single root IO virtualization capabilites present in the
+ * kernel.
+ **/
+static void igb_set_sriov_capability(struct igb_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int old_vfs = 0;
+ int i;
+
+ old_vfs = pci_num_vf(pdev);
+ if (old_vfs) {
+ dev_info(pci_dev_to_dev(pdev),
+ "%d pre-allocated VFs found - override "
+ "max_vfs setting of %d\n", old_vfs,
+ adapter->vfs_allocated_count);
+ adapter->vfs_allocated_count = old_vfs;
+ }
+ /* no VFs requested, do nothing */
+ if (!adapter->vfs_allocated_count)
+ return;
+
+ /* allocate vf data storage */
+ adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
+ sizeof(struct vf_data_storage),
+ GFP_KERNEL);
+
+ if (adapter->vf_data) {
+ if (!old_vfs) {
+ if (pci_enable_sriov(pdev,
+ adapter->vfs_allocated_count))
+ goto err_out;
+ }
+ for (i = 0; i < adapter->vfs_allocated_count; i++)
+ igb_vf_configure(adapter, i);
+
+ switch (adapter->hw.mac.type) {
+ case e1000_82576:
+ case e1000_i350:
+ /* Enable VM to VM loopback by default */
+ adapter->flags |= IGB_FLAG_LOOPBACK_ENABLE;
+ break;
+ default:
+ /* Currently no other hardware supports loopback */
+ break;
+ }
+
+ /* DMA Coalescing is not supported in IOV mode. */
+ if (adapter->hw.mac.type >= e1000_i350)
+ adapter->dmac = IGB_DMAC_DISABLE;
+ if (adapter->hw.mac.type < e1000_i350)
+ adapter->flags |= IGB_FLAG_DETECT_BAD_DMA;
+ return;
+
+ }
+
+err_out:
+ kfree(adapter->vf_data);
+ adapter->vf_data = NULL;
+ adapter->vfs_allocated_count = 0;
+ dev_warn(pci_dev_to_dev(pdev),
+ "Failed to initialize SR-IOV virtualization\n");
+}
+
+/**
+ * igb_set_interrupt_capability - set MSI or MSI-X if supported
+ *
+ * Attempt to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int err;
+ int numvecs, i;
+
+ if (!msix)
+ adapter->int_mode = IGB_INT_MODE_MSI;
+
+ /* Number of supported queues. */
+ adapter->num_rx_queues = adapter->rss_queues;
+
+ if (adapter->vmdq_pools > 1)
+ adapter->num_rx_queues += adapter->vmdq_pools - 1;
+
+#ifdef HAVE_TX_MQ
+ if (adapter->vmdq_pools)
+ adapter->num_tx_queues = adapter->vmdq_pools;
+ else
+ adapter->num_tx_queues = adapter->num_rx_queues;
+#else
+ adapter->num_tx_queues = max_t(u32, 1, adapter->vmdq_pools);
+#endif
+
+ switch (adapter->int_mode) {
+ case IGB_INT_MODE_MSIX:
+ /* start with one vector for every rx queue */
+ numvecs = adapter->num_rx_queues;
+
+ /* if tx handler is separate add 1 for every tx queue */
+ if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
+ numvecs += adapter->num_tx_queues;
+
+ /* store the number of vectors reserved for queues */
+ adapter->num_q_vectors = numvecs;
+
+ /* add 1 vector for link status interrupts */
+ numvecs++;
+ adapter->msix_entries = kcalloc(numvecs,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (adapter->msix_entries) {
+ for (i = 0; i < numvecs; i++)
+ adapter->msix_entries[i].entry = i;
+
+ err = pci_enable_msix(pdev,
+ adapter->msix_entries, numvecs);
+ if (err == 0)
+ break;
+ }
+ /* MSI-X failed, so fall through and try MSI */
+ dev_warn(pci_dev_to_dev(pdev), "Failed to initialize MSI-X interrupts. "
+ "Falling back to MSI interrupts.\n");
+ igb_reset_interrupt_capability(adapter);
+ case IGB_INT_MODE_MSI:
+ if (!pci_enable_msi(pdev))
+ adapter->flags |= IGB_FLAG_HAS_MSI;
+ else
+ dev_warn(pci_dev_to_dev(pdev), "Failed to initialize MSI "
+ "interrupts. Falling back to legacy "
+ "interrupts.\n");
+ /* Fall through */
+ case IGB_INT_MODE_LEGACY:
+ /* disable advanced features and set number of queues to 1 */
+ igb_reset_sriov_capability(adapter);
+ adapter->vmdq_pools = 0;
+ adapter->rss_queues = 1;
+ adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+ adapter->num_q_vectors = 1;
+ /* Don't do anything; this is system default */
+ break;
+ }
+}
+
+static void igb_add_ring(struct igb_ring *ring,
+ struct igb_ring_container *head)
+{
+ head->ring = ring;
+ head->count++;
+}
+
+/**
+ * igb_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_count: q_vectors allocated on adapter, used for ring interleaving
+ * @v_idx: index of vector in adapter struct
+ * @txr_count: total number of Tx rings to allocate
+ * @txr_idx: index of first Tx ring to allocate
+ * @rxr_count: total number of Rx rings to allocate
+ * @rxr_idx: index of first Rx ring to allocate
+ *
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ **/
+static int igb_alloc_q_vector(struct igb_adapter *adapter,
+ unsigned int v_count, unsigned int v_idx,
+ unsigned int txr_count, unsigned int txr_idx,
+ unsigned int rxr_count, unsigned int rxr_idx)
+{
+ struct igb_q_vector *q_vector;
+ struct igb_ring *ring;
+ int ring_count, size;
+
+ /* igb only supports 1 Tx and/or 1 Rx queue per vector */
+ if (txr_count > 1 || rxr_count > 1)
+ return -ENOMEM;
+
+ ring_count = txr_count + rxr_count;
+ size = sizeof(struct igb_q_vector) +
+ (sizeof(struct igb_ring) * ring_count);
+
+ /* allocate q_vector and rings */
+ q_vector = kzalloc(size, GFP_KERNEL);
+ if (!q_vector)
+ return -ENOMEM;
+
+#ifndef IGB_NO_LRO
+ /* initialize LRO */
+ __skb_queue_head_init(&q_vector->lrolist.active);
+
+#endif
+ /* initialize NAPI */
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ igb_poll, 64);
+
+ /* tie q_vector and adapter together */
+ adapter->q_vector[v_idx] = q_vector;
+ q_vector->adapter = adapter;
+
+ /* initialize work limits */
+ q_vector->tx.work_limit = adapter->tx_work_limit;
+
+ /* initialize ITR configuration */
+ q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0);
+ q_vector->itr_val = IGB_START_ITR;
+
+ /* initialize pointer to rings */
+ ring = q_vector->ring;
+
+ /* intialize ITR */
+ if (rxr_count) {
+ /* rx or rx/tx vector */
+ if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
+ q_vector->itr_val = adapter->rx_itr_setting;
+ } else {
+ /* tx only vector */
+ if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
+ q_vector->itr_val = adapter->tx_itr_setting;
+ }
+
+ if (txr_count) {
+ /* assign generic ring traits */
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Tx values */
+ igb_add_ring(ring, &q_vector->tx);
+
+ /* For 82575, context index must be unique per ring. */
+ if (adapter->hw.mac.type == e1000_82575)
+ set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
+
+ /* apply Tx specific ring traits */
+ ring->count = adapter->tx_ring_count;
+ ring->queue_index = txr_idx;
+
+ /* assign ring to adapter */
+ adapter->tx_ring[txr_idx] = ring;
+
+ /* push pointer to next ring */
+ ring++;
+ }
+
+ if (rxr_count) {
+ /* assign generic ring traits */
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Rx values */
+ igb_add_ring(ring, &q_vector->rx);
+
+#ifndef HAVE_NDO_SET_FEATURES
+ /* enable rx checksum */
+ set_bit(IGB_RING_FLAG_RX_CSUM, &ring->flags);
+
+#endif
+ /* set flag indicating ring supports SCTP checksum offload */
+ if (adapter->hw.mac.type >= e1000_82576)
+ set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
+
+ if ((adapter->hw.mac.type == e1000_i350) ||
+ (adapter->hw.mac.type == e1000_i354))
+ set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
+
+ /* apply Rx specific ring traits */
+ ring->count = adapter->rx_ring_count;
+ ring->queue_index = rxr_idx;
+
+ /* assign ring to adapter */
+ adapter->rx_ring[rxr_idx] = ring;
+ }
+
+ return 0;
+}
+
+/**
+ * igb_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ **/
+static int igb_alloc_q_vectors(struct igb_adapter *adapter)
+{
+ int q_vectors = adapter->num_q_vectors;
+ int rxr_remaining = adapter->num_rx_queues;
+ int txr_remaining = adapter->num_tx_queues;
+ int rxr_idx = 0, txr_idx = 0, v_idx = 0;
+ int err;
+
+ if (q_vectors >= (rxr_remaining + txr_remaining)) {
+ for (; rxr_remaining; v_idx++) {
+ err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
+ 0, 0, 1, rxr_idx);
+
+ if (err)
+ goto err_out;
+
+ /* update counts and index */
+ rxr_remaining--;
+ rxr_idx++;
+ }
+ }
+
+ for (; v_idx < q_vectors; v_idx++) {
+ int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
+ int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+ err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
+ tqpv, txr_idx, rqpv, rxr_idx);
+
+ if (err)
+ goto err_out;
+
+ /* update counts and index */
+ rxr_remaining -= rqpv;
+ txr_remaining -= tqpv;
+ rxr_idx++;
+ txr_idx++;
+ }
+
+ return 0;
+
+err_out:
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+ adapter->num_q_vectors = 0;
+
+ while (v_idx--)
+ igb_free_q_vector(adapter, v_idx);
+
+ return -ENOMEM;
+}
+
+/**
+ * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
+ *
+ * This function initializes the interrupts and allocates all of the queues.
+ **/
+static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int err;
+
+ igb_set_interrupt_capability(adapter, msix);
+
+ err = igb_alloc_q_vectors(adapter);
+ if (err) {
+ dev_err(pci_dev_to_dev(pdev), "Unable to allocate memory for vectors\n");
+ goto err_alloc_q_vectors;
+ }
+
+ igb_cache_ring_register(adapter);
+
+ return 0;
+
+err_alloc_q_vectors:
+ igb_reset_interrupt_capability(adapter);
+ return err;
+}
+
+/**
+ * igb_request_irq - initialize interrupts
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int igb_request_irq(struct igb_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ int err = 0;
+
+ if (adapter->msix_entries) {
+ err = igb_request_msix(adapter);
+ if (!err)
+ goto request_done;
+ /* fall back to MSI */
+ igb_free_all_tx_resources(adapter);
+ igb_free_all_rx_resources(adapter);
+
+ igb_clear_interrupt_scheme(adapter);
+ igb_reset_sriov_capability(adapter);
+ err = igb_init_interrupt_scheme(adapter, false);
+ if (err)
+ goto request_done;
+ igb_setup_all_tx_resources(adapter);
+ igb_setup_all_rx_resources(adapter);
+ igb_configure(adapter);
+ }
+
+ igb_assign_vector(adapter->q_vector[0], 0);
+
+ if (adapter->flags & IGB_FLAG_HAS_MSI) {
+ err = request_irq(pdev->irq, &igb_intr_msi, 0,
+ netdev->name, adapter);
+ if (!err)
+ goto request_done;
+
+ /* fall back to legacy interrupts */
+ igb_reset_interrupt_capability(adapter);
+ adapter->flags &= ~IGB_FLAG_HAS_MSI;
+ }
+
+ err = request_irq(pdev->irq, &igb_intr, IRQF_SHARED,
+ netdev->name, adapter);
+
+ if (err)
+ dev_err(pci_dev_to_dev(pdev), "Error %d getting interrupt\n",
+ err);
+
+request_done:
+ return err;
+}
+
+static void igb_free_irq(struct igb_adapter *adapter)
+{
+ if (adapter->msix_entries) {
+ int vector = 0, i;
+
+ free_irq(adapter->msix_entries[vector++].vector, adapter);
+
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ free_irq(adapter->msix_entries[vector++].vector,
+ adapter->q_vector[i]);
+ } else {
+ free_irq(adapter->pdev->irq, adapter);
+ }
+}
+
+/**
+ * igb_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static void igb_irq_disable(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /*
+ * we need to be careful when disabling interrupts. The VFs are also
+ * mapped into these registers and so clearing the bits can cause
+ * issues on the VF drivers so we only need to clear what we set
+ */
+ if (adapter->msix_entries) {
+ u32 regval = E1000_READ_REG(hw, E1000_EIAM);
+ E1000_WRITE_REG(hw, E1000_EIAM, regval & ~adapter->eims_enable_mask);
+ E1000_WRITE_REG(hw, E1000_EIMC, adapter->eims_enable_mask);
+ regval = E1000_READ_REG(hw, E1000_EIAC);
+ E1000_WRITE_REG(hw, E1000_EIAC, regval & ~adapter->eims_enable_mask);
+ }
+
+ E1000_WRITE_REG(hw, E1000_IAM, 0);
+ E1000_WRITE_REG(hw, E1000_IMC, ~0);
+ E1000_WRITE_FLUSH(hw);
+
+ if (adapter->msix_entries) {
+ int vector = 0, i;
+
+ synchronize_irq(adapter->msix_entries[vector++].vector);
+
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ synchronize_irq(adapter->msix_entries[vector++].vector);
+ } else {
+ synchronize_irq(adapter->pdev->irq);
+ }
+}
+
+/**
+ * igb_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+static void igb_irq_enable(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (adapter->msix_entries) {
+ u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
+ u32 regval = E1000_READ_REG(hw, E1000_EIAC);
+ E1000_WRITE_REG(hw, E1000_EIAC, regval | adapter->eims_enable_mask);
+ regval = E1000_READ_REG(hw, E1000_EIAM);
+ E1000_WRITE_REG(hw, E1000_EIAM, regval | adapter->eims_enable_mask);
+ E1000_WRITE_REG(hw, E1000_EIMS, adapter->eims_enable_mask);
+ if (adapter->vfs_allocated_count) {
+ E1000_WRITE_REG(hw, E1000_MBVFIMR, 0xFF);
+ ims |= E1000_IMS_VMMB;
+ if (adapter->mdd)
+ if ((adapter->hw.mac.type == e1000_i350) ||
+ (adapter->hw.mac.type == e1000_i354))
+ ims |= E1000_IMS_MDDET;
+ }
+ E1000_WRITE_REG(hw, E1000_IMS, ims);
+ } else {
+ E1000_WRITE_REG(hw, E1000_IMS, IMS_ENABLE_MASK |
+ E1000_IMS_DRSTA);
+ E1000_WRITE_REG(hw, E1000_IAM, IMS_ENABLE_MASK |
+ E1000_IMS_DRSTA);
+ }
+}
+
+static void igb_update_mng_vlan(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 vid = adapter->hw.mng_cookie.vlan_id;
+ u16 old_vid = adapter->mng_vlan_id;
+
+ if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
+ /* add VID to filter table */
+ igb_vfta_set(adapter, vid, TRUE);
+ adapter->mng_vlan_id = vid;
+ } else {
+ adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
+ }
+
+ if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
+ (vid != old_vid) &&
+#ifdef HAVE_VLAN_RX_REGISTER
+ !vlan_group_get_device(adapter->vlgrp, old_vid)) {
+#else
+ !test_bit(old_vid, adapter->active_vlans)) {
+#endif
+ /* remove VID from filter table */
+ igb_vfta_set(adapter, old_vid, FALSE);
+ }
+}
+
+/**
+ * igb_release_hw_control - release control of the h/w to f/w
+ * @adapter: address of board private structure
+ *
+ * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded.
+ *
+ **/
+static void igb_release_hw_control(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_ext;
+
+ /* Let firmware take over control of h/w */
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT,
+ ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+}
+
+/**
+ * igb_get_hw_control - get control of the h/w from f/w
+ * @adapter: address of board private structure
+ *
+ * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that
+ * the driver is loaded.
+ *
+ **/
+static void igb_get_hw_control(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_ext;
+
+ /* Let firmware know the driver has taken over */
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT,
+ ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+}
+
+/**
+ * igb_configure - configure the hardware for RX and TX
+ * @adapter: private board structure
+ **/
+static void igb_configure(struct igb_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i;
+
+ igb_get_hw_control(adapter);
+ igb_set_rx_mode(netdev);
+
+ igb_restore_vlan(adapter);
+
+ igb_setup_tctl(adapter);
+ igb_setup_mrqc(adapter);
+ igb_setup_rctl(adapter);
+
+ igb_configure_tx(adapter);
+ igb_configure_rx(adapter);
+
+ e1000_rx_fifo_flush_82575(&adapter->hw);
+#ifdef CONFIG_NETDEVICES_MULTIQUEUE
+ if (adapter->num_tx_queues > 1)
+ netdev->features |= NETIF_F_MULTI_QUEUE;
+ else
+ netdev->features &= ~NETIF_F_MULTI_QUEUE;
+#endif
+
+ /* call igb_desc_unused which always leaves
+ * at least 1 descriptor unused to make sure
+ * next_to_use != next_to_clean */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igb_ring *ring = adapter->rx_ring[i];
+ igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
+ }
+}
+
+/**
+ * igb_power_up_link - Power up the phy/serdes link
+ * @adapter: address of board private structure
+ **/
+void igb_power_up_link(struct igb_adapter *adapter)
+{
+ e1000_phy_hw_reset(&adapter->hw);
+
+ if (adapter->hw.phy.media_type == e1000_media_type_copper)
+ e1000_power_up_phy(&adapter->hw);
+ else
+ e1000_power_up_fiber_serdes_link(&adapter->hw);
+}
+
+/**
+ * igb_power_down_link - Power down the phy/serdes link
+ * @adapter: address of board private structure
+ */
+static void igb_power_down_link(struct igb_adapter *adapter)
+{
+ if (adapter->hw.phy.media_type == e1000_media_type_copper)
+ e1000_power_down_phy(&adapter->hw);
+ else
+ e1000_shutdown_fiber_serdes_link(&adapter->hw);
+}
+
+/* Detect and switch function for Media Auto Sense */
+static void igb_check_swap_media(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_ext, connsw;
+ bool swap_now = false;
+ bool link;
+
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ connsw = E1000_READ_REG(hw, E1000_CONNSW);
+ link = igb_has_link(adapter);
+
+ /* need to live swap if current media is copper and we have fiber/serdes
+ * to go to.
+ */
+
+ if ((hw->phy.media_type == e1000_media_type_copper) &&
+ (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
+ swap_now = true;
+ } else if (!(connsw & E1000_CONNSW_SERDESD)) {
+ /* copper signal takes time to appear */
+ if (adapter->copper_tries < 2) {
+ adapter->copper_tries++;
+ connsw |= E1000_CONNSW_AUTOSENSE_CONF;
+ E1000_WRITE_REG(hw, E1000_CONNSW, connsw);
+ return;
+ } else {
+ adapter->copper_tries = 0;
+ if ((connsw & E1000_CONNSW_PHYSD) &&
+ (!(connsw & E1000_CONNSW_PHY_PDN))) {
+ swap_now = true;
+ connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
+ E1000_WRITE_REG(hw, E1000_CONNSW, connsw);
+ }
+ }
+ }
+
+ if (swap_now) {
+ switch (hw->phy.media_type) {
+ case e1000_media_type_copper:
+ dev_info(pci_dev_to_dev(adapter->pdev),
+ "%s:MAS: changing media to fiber/serdes\n",
+ adapter->netdev->name);
+ ctrl_ext |=
+ E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+ adapter->flags |= IGB_FLAG_MEDIA_RESET;
+ adapter->copper_tries = 0;
+ break;
+ case e1000_media_type_internal_serdes:
+ case e1000_media_type_fiber:
+ dev_info(pci_dev_to_dev(adapter->pdev),
+ "%s:MAS: changing media to copper\n",
+ adapter->netdev->name);
+ ctrl_ext &=
+ ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+ adapter->flags |= IGB_FLAG_MEDIA_RESET;
+ break;
+ default:
+ /* shouldn't get here during regular operation */
+ dev_err(pci_dev_to_dev(adapter->pdev),
+ "%s:AMS: Invalid media type found, returning\n",
+ adapter->netdev->name);
+ break;
+ }
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ }
+}
+
+#ifdef HAVE_I2C_SUPPORT
+/* igb_get_i2c_data - Reads the I2C SDA data bit
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Returns the I2C data bit value
+ */
+static int igb_get_i2c_data(void *data)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ return ((i2cctl & E1000_I2C_DATA_IN) != 0);
+}
+
+/* igb_set_i2c_data - Sets the I2C data bit
+ * @data: pointer to hardware structure
+ * @state: I2C data value (0 or 1) to set
+ *
+ * Sets the I2C data bit
+ */
+static void igb_set_i2c_data(void *data, int state)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ if (state)
+ i2cctl |= E1000_I2C_DATA_OUT;
+ else
+ i2cctl &= ~E1000_I2C_DATA_OUT;
+
+ i2cctl &= ~E1000_I2C_DATA_OE_N;
+ i2cctl |= E1000_I2C_CLK_OE_N;
+
+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl);
+ E1000_WRITE_FLUSH(hw);
+
+}
+
+/* igb_set_i2c_clk - Sets the I2C SCL clock
+ * @data: pointer to hardware structure
+ * @state: state to set clock
+ *
+ * Sets the I2C clock line to state
+ */
+static void igb_set_i2c_clk(void *data, int state)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ if (state) {
+ i2cctl |= E1000_I2C_CLK_OUT;
+ i2cctl &= ~E1000_I2C_CLK_OE_N;
+ } else {
+ i2cctl &= ~E1000_I2C_CLK_OUT;
+ i2cctl &= ~E1000_I2C_CLK_OE_N;
+ }
+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/* igb_get_i2c_clk - Gets the I2C SCL clock state
+ * @data: pointer to hardware structure
+ *
+ * Gets the I2C clock state
+ */
+static int igb_get_i2c_clk(void *data)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ return ((i2cctl & E1000_I2C_CLK_IN) != 0);
+}
+
+static const struct i2c_algo_bit_data igb_i2c_algo = {
+ .setsda = igb_set_i2c_data,
+ .setscl = igb_set_i2c_clk,
+ .getsda = igb_get_i2c_data,
+ .getscl = igb_get_i2c_clk,
+ .udelay = 5,
+ .timeout = 20,
+};
+
+/* igb_init_i2c - Init I2C interface
+ * @adapter: pointer to adapter structure
+ *
+ */
+static s32 igb_init_i2c(struct igb_adapter *adapter)
+{
+ s32 status = E1000_SUCCESS;
+
+ /* I2C interface supported on i350 devices */
+ if (adapter->hw.mac.type != e1000_i350)
+ return E1000_SUCCESS;
+
+ /* Initialize the i2c bus which is controlled by the registers.
+ * This bus will use the i2c_algo_bit structue that implements
+ * the protocol through toggling of the 4 bits in the register.
+ */
+ adapter->i2c_adap.owner = THIS_MODULE;
+ adapter->i2c_algo = igb_i2c_algo;
+ adapter->i2c_algo.data = adapter;
+ adapter->i2c_adap.algo_data = &adapter->i2c_algo;
+ adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
+ strlcpy(adapter->i2c_adap.name, "igb BB",
+ sizeof(adapter->i2c_adap.name));
+ status = i2c_bit_add_bus(&adapter->i2c_adap);
+ return status;
+}
+
+#endif /* HAVE_I2C_SUPPORT */
+/**
+ * igb_up - Open the interface and prepare it to handle traffic
+ * @adapter: board private structure
+ **/
+int igb_up(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int i;
+
+ /* hardware has been reset, we need to reload some things */
+ igb_configure(adapter);
+
+ clear_bit(__IGB_DOWN, &adapter->state);
+
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ napi_enable(&(adapter->q_vector[i]->napi));
+
+ if (adapter->msix_entries)
+ igb_configure_msix(adapter);
+ else
+ igb_assign_vector(adapter->q_vector[0], 0);
+
+ igb_configure_lli(adapter);
+
+ /* Clear any pending interrupts. */
+ E1000_READ_REG(hw, E1000_ICR);
+ igb_irq_enable(adapter);
+
+ /* notify VFs that reset has been completed */
+ if (adapter->vfs_allocated_count) {
+ u32 reg_data = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ reg_data |= E1000_CTRL_EXT_PFRSTD;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg_data);
+ }
+
+ netif_tx_start_all_queues(adapter->netdev);
+
+ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
+ schedule_work(&adapter->dma_err_task);
+ /* start the watchdog. */
+ hw->mac.get_link_status = 1;
+ schedule_work(&adapter->watchdog_task);
+
+ if ((adapter->flags & IGB_FLAG_EEE) &&
+ (!hw->dev_spec._82575.eee_disable))
+ adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
+
+ return 0;
+}
+
+void igb_down(struct igb_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 tctl, rctl;
+ int i;
+
+ /* signal that we're down so the interrupt handler does not
+ * reschedule our watchdog timer */
+ set_bit(__IGB_DOWN, &adapter->state);
+
+ /* disable receives in the hardware */
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+ /* flush and sleep below */
+
+ netif_tx_stop_all_queues(netdev);
+
+ /* disable transmits in the hardware */
+ tctl = E1000_READ_REG(hw, E1000_TCTL);
+ tctl &= ~E1000_TCTL_EN;
+ E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+ /* flush both disables and wait for them to finish */
+ E1000_WRITE_FLUSH(hw);
+ usleep_range(10000, 20000);
+
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ napi_disable(&(adapter->q_vector[i]->napi));
+
+ igb_irq_disable(adapter);
+
+ adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
+
+ del_timer_sync(&adapter->watchdog_timer);
+ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
+ del_timer_sync(&adapter->dma_err_timer);
+ del_timer_sync(&adapter->phy_info_timer);
+
+ netif_carrier_off(netdev);
+
+ /* record the stats before reset*/
+ igb_update_stats(adapter);
+
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+
+#ifdef HAVE_PCI_ERS
+ if (!pci_channel_offline(adapter->pdev))
+ igb_reset(adapter);
+#else
+ igb_reset(adapter);
+#endif
+ igb_clean_all_tx_rings(adapter);
+ igb_clean_all_rx_rings(adapter);
+#ifdef IGB_DCA
+ /* since we reset the hardware DCA settings were cleared */
+ igb_setup_dca(adapter);
+#endif
+}
+
+void igb_reinit_locked(struct igb_adapter *adapter)
+{
+ WARN_ON(in_interrupt());
+ while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+ igb_down(adapter);
+ igb_up(adapter);
+ clear_bit(__IGB_RESETTING, &adapter->state);
+}
+
+/**
+ * igb_enable_mas - Media Autosense re-enable after swap
+ *
+ * @adapter: adapter struct
+ **/
+static s32 igb_enable_mas(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 connsw;
+ s32 ret_val = E1000_SUCCESS;
+
+ connsw = E1000_READ_REG(hw, E1000_CONNSW);
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ /* configure for SerDes media detect */
+ if (!(connsw & E1000_CONNSW_SERDESD)) {
+ connsw |= E1000_CONNSW_ENRGSRC;
+ connsw |= E1000_CONNSW_AUTOSENSE_EN;
+ E1000_WRITE_REG(hw, E1000_CONNSW, connsw);
+ E1000_WRITE_FLUSH(hw);
+ } else if (connsw & E1000_CONNSW_SERDESD) {
+ /* already SerDes, no need to enable anything */
+ return ret_val;
+ } else {
+ dev_info(pci_dev_to_dev(adapter->pdev),
+ "%s:MAS: Unable to configure feature, disabling..\n",
+ adapter->netdev->name);
+ adapter->flags &= ~IGB_FLAG_MAS_ENABLE;
+ }
+ }
+ return ret_val;
+}
+
+void igb_reset(struct igb_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_fc_info *fc = &hw->fc;
+ u32 pba = 0, tx_space, min_tx_space, min_rx_space, hwm;
+
+ /* Repartition Pba for greater than 9k mtu
+ * To take effect CTRL.RST is required.
+ */
+ switch (mac->type) {
+ case e1000_i350:
+ case e1000_82580:
+ case e1000_i354:
+ pba = E1000_READ_REG(hw, E1000_RXPBS);
+ pba = e1000_rxpbs_adjust_82580(pba);
+ break;
+ case e1000_82576:
+ pba = E1000_READ_REG(hw, E1000_RXPBS);
+ pba &= E1000_RXPBS_SIZE_MASK_82576;
+ break;
+ case e1000_82575:
+ case e1000_i210:
+ case e1000_i211:
+ default:
+ pba = E1000_PBA_34K;
+ break;
+ }
+
+ if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
+ (mac->type < e1000_82576)) {
+ /* adjust PBA for jumbo frames */
+ E1000_WRITE_REG(hw, E1000_PBA, pba);
+
+ /* To maintain wire speed transmits, the Tx FIFO should be
+ * large enough to accommodate two full transmit packets,
+ * rounded up to the next 1KB and expressed in KB. Likewise,
+ * the Rx FIFO should be large enough to accommodate at least
+ * one full receive packet and is similarly rounded up and
+ * expressed in KB. */
+ pba = E1000_READ_REG(hw, E1000_PBA);
+ /* upper 16 bits has Tx packet buffer allocation size in KB */
+ tx_space = pba >> 16;
+ /* lower 16 bits has Rx packet buffer allocation size in KB */
+ pba &= 0xffff;
+ /* the tx fifo also stores 16 bytes of information about the tx
+ * but don't include ethernet FCS because hardware appends it */
+ min_tx_space = (adapter->max_frame_size +
+ sizeof(union e1000_adv_tx_desc) -
+ ETH_FCS_LEN) * 2;
+ min_tx_space = ALIGN(min_tx_space, 1024);
+ min_tx_space >>= 10;
+ /* software strips receive CRC, so leave room for it */
+ min_rx_space = adapter->max_frame_size;
+ min_rx_space = ALIGN(min_rx_space, 1024);
+ min_rx_space >>= 10;
+
+ /* If current Tx allocation is less than the min Tx FIFO size,
+ * and the min Tx FIFO size is less than the current Rx FIFO
+ * allocation, take space away from current Rx allocation */
+ if (tx_space < min_tx_space &&
+ ((min_tx_space - tx_space) < pba)) {
+ pba = pba - (min_tx_space - tx_space);
+
+ /* if short on rx space, rx wins and must trump tx
+ * adjustment */
+ if (pba < min_rx_space)
+ pba = min_rx_space;
+ }
+ E1000_WRITE_REG(hw, E1000_PBA, pba);
+ }
+
+ /* flow control settings */
+ /* The high water mark must be low enough to fit one full frame
+ * (or the size used for early receive) above it in the Rx FIFO.
+ * Set it to the lower of:
+ * - 90% of the Rx FIFO size, or
+ * - the full Rx FIFO size minus one full frame */
+ hwm = min(((pba << 10) * 9 / 10),
+ ((pba << 10) - 2 * adapter->max_frame_size));
+
+ fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
+ fc->low_water = fc->high_water - 16;
+ fc->pause_time = 0xFFFF;
+ fc->send_xon = 1;
+ fc->current_mode = fc->requested_mode;
+
+ /* disable receive for all VFs and wait one second */
+ if (adapter->vfs_allocated_count) {
+ int i;
+ /*
+ * Clear all flags except indication that the PF has set
+ * the VF MAC addresses administratively
+ */
+ for (i = 0 ; i < adapter->vfs_allocated_count; i++)
+ adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
+
+ /* ping all the active vfs to let them know we are going down */
+ igb_ping_all_vfs(adapter);
+
+ /* disable transmits and receives */
+ E1000_WRITE_REG(hw, E1000_VFRE, 0);
+ E1000_WRITE_REG(hw, E1000_VFTE, 0);
+ }
+
+ /* Allow time for pending master requests to run */
+ e1000_reset_hw(hw);
+ E1000_WRITE_REG(hw, E1000_WUC, 0);
+
+ if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+ e1000_setup_init_funcs(hw, TRUE);
+ igb_check_options(adapter);
+ e1000_get_bus_info(hw);
+ adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
+ }
+ if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+ if (igb_enable_mas(adapter))
+ dev_err(pci_dev_to_dev(pdev),
+ "Error enabling Media Auto Sense\n");
+ }
+ if (e1000_init_hw(hw))
+ dev_err(pci_dev_to_dev(pdev), "Hardware Error\n");
+
+ /*
+ * Flow control settings reset on hardware reset, so guarantee flow
+ * control is off when forcing speed.
+ */
+ if (!hw->mac.autoneg)
+ e1000_force_mac_fc(hw);
+
+ igb_init_dmac(adapter, pba);
+ /* Re-initialize the thermal sensor on i350 devices. */
+ if (mac->type == e1000_i350 && hw->bus.func == 0) {
+ /*
+ * If present, re-initialize the external thermal sensor
+ * interface.
+ */
+ if (adapter->ets)
+ e1000_set_i2c_bb(hw);
+ e1000_init_thermal_sensor_thresh(hw);
+ }
+
+ /*Re-establish EEE setting */
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ switch (mac->type) {
+ case e1000_i350:
+ case e1000_i210:
+ case e1000_i211:
+ e1000_set_eee_i350(hw);
+ break;
+ case e1000_i354:
+ e1000_set_eee_i354(hw);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (!netif_running(adapter->netdev))
+ igb_power_down_link(adapter);
+
+ igb_update_mng_vlan(adapter);
+
+ /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
+ E1000_WRITE_REG(hw, E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
+
+
+#ifdef HAVE_PTP_1588_CLOCK
+ /* Re-enable PTP, where applicable. */
+ igb_ptp_reset(adapter);
+#endif /* HAVE_PTP_1588_CLOCK */
+
+ e1000_get_phy_info(hw);
+
+ adapter->devrc++;
+}
+
+#ifdef HAVE_NDO_SET_FEATURES
+static kni_netdev_features_t igb_fix_features(struct net_device *netdev,
+ kni_netdev_features_t features)
+{
+ /*
+ * Since there is no support for separate tx vlan accel
+ * enabled make sure tx flag is cleared if rx is.
+ */
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+ if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
+ features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+#else
+ if (!(features & NETIF_F_HW_VLAN_RX))
+ features &= ~NETIF_F_HW_VLAN_TX;
+#endif
+
+ /* If Rx checksum is disabled, then LRO should also be disabled */
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_LRO;
+
+ return features;
+}
+
+static int igb_set_features(struct net_device *netdev,
+ kni_netdev_features_t features)
+{
+ u32 changed = netdev->features ^ features;
+
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+#else
+ if (changed & NETIF_F_HW_VLAN_RX)
+#endif
+ igb_vlan_mode(netdev, features);
+
+ return 0;
+}
+
+#ifdef NTF_SELF
+#ifdef USE_CONST_DEV_UC_CHAR
+static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr,
+ u16 flags)
+#else
+static int igb_ndo_fdb_add(struct ndmsg *ndm,
+ struct net_device *dev,
+ unsigned char *addr,
+ u16 flags)
+#endif
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ struct e1000_hw *hw = &adapter->hw;
+ int err;
+
+ if (!(adapter->vfs_allocated_count))
+ return -EOPNOTSUPP;
+
+ /* Hardware does not support aging addresses so if a
+ * ndm_state is given only allow permanent addresses
+ */
+ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+ pr_info("%s: FDB only supports static addresses\n",
+ igb_driver_name);
+ return -EINVAL;
+ }
+
+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
+ u32 rar_uc_entries = hw->mac.rar_entry_count -
+ (adapter->vfs_allocated_count + 1);
+
+ if (netdev_uc_count(dev) < rar_uc_entries)
+ err = dev_uc_add_excl(dev, addr);
+ else
+ err = -ENOMEM;
+ } else if (is_multicast_ether_addr(addr)) {
+ err = dev_mc_add_excl(dev, addr);
+ } else {
+ err = -EINVAL;
+ }
+
+ /* Only return duplicate errors if NLM_F_EXCL is set */
+ if (err == -EEXIST && !(flags & NLM_F_EXCL))
+ err = 0;
+
+ return err;
+}
+
+#ifndef USE_DEFAULT_FDB_DEL_DUMP
+#ifdef USE_CONST_DEV_UC_CHAR
+static int igb_ndo_fdb_del(struct ndmsg *ndm,
+ struct net_device *dev,
+ const unsigned char *addr)
+#else
+static int igb_ndo_fdb_del(struct ndmsg *ndm,
+ struct net_device *dev,
+ unsigned char *addr)
+#endif
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ int err = -EOPNOTSUPP;
+
+ if (ndm->ndm_state & NUD_PERMANENT) {
+ pr_info("%s: FDB only supports static addresses\n",
+ igb_driver_name);
+ return -EINVAL;
+ }
+
+ if (adapter->vfs_allocated_count) {
+ if (is_unicast_ether_addr(addr))
+ err = dev_uc_del(dev, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_del(dev, addr);
+ else
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int igb_ndo_fdb_dump(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct net_device *dev,
+ int idx)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+
+ if (adapter->vfs_allocated_count)
+ idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
+
+ return idx;
+}
+#endif /* USE_DEFAULT_FDB_DEL_DUMP */
+
+#ifdef HAVE_BRIDGE_ATTRIBS
+static int igb_ndo_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct nlattr *attr, *br_spec;
+ int rem;
+
+ if (!(adapter->vfs_allocated_count))
+ return -EOPNOTSUPP;
+
+ switch (adapter->hw.mac.type) {
+ case e1000_82576:
+ case e1000_i350:
+ case e1000_i354:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+
+ nla_for_each_nested(attr, br_spec, rem) {
+ __u16 mode;
+
+ if (nla_type(attr) != IFLA_BRIDGE_MODE)
+ continue;
+
+ mode = nla_get_u16(attr);
+ if (mode == BRIDGE_MODE_VEPA) {
+ e1000_vmdq_set_loopback_pf(hw, 0);
+ adapter->flags &= ~IGB_FLAG_LOOPBACK_ENABLE;
+ } else if (mode == BRIDGE_MODE_VEB) {
+ e1000_vmdq_set_loopback_pf(hw, 1);
+ adapter->flags |= IGB_FLAG_LOOPBACK_ENABLE;
+ } else
+ return -EINVAL;
+
+ netdev_info(adapter->netdev, "enabling bridge mode: %s\n",
+ mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+ }
+
+ return 0;
+}
+
+#ifdef HAVE_BRIDGE_FILTER
+static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev, u32 filter_mask)
+#else
+static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev)
+#endif
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ u16 mode;
+
+ if (!(adapter->vfs_allocated_count))
+ return -EOPNOTSUPP;
+
+ if (adapter->flags & IGB_FLAG_LOOPBACK_ENABLE)
+ mode = BRIDGE_MODE_VEB;
+ else
+ mode = BRIDGE_MODE_VEPA;
+
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode);
+}
+#endif /* HAVE_BRIDGE_ATTRIBS */
+#endif /* NTF_SELF */
+
+#endif /* HAVE_NDO_SET_FEATURES */
+#ifdef HAVE_NET_DEVICE_OPS
+static const struct net_device_ops igb_netdev_ops = {
+ .ndo_open = igb_open,
+ .ndo_stop = igb_close,
+ .ndo_start_xmit = igb_xmit_frame,
+ .ndo_get_stats = igb_get_stats,
+ .ndo_set_rx_mode = igb_set_rx_mode,
+ .ndo_set_mac_address = igb_set_mac,
+ .ndo_change_mtu = igb_change_mtu,
+ .ndo_do_ioctl = igb_ioctl,
+ .ndo_tx_timeout = igb_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
+#ifdef IFLA_VF_MAX
+ .ndo_set_vf_mac = igb_ndo_set_vf_mac,
+ .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
+#ifdef HAVE_VF_MIN_MAX_TXRATE
+ .ndo_set_vf_rate = igb_ndo_set_vf_bw,
+#else /* HAVE_VF_MIN_MAX_TXRATE */
+ .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
+#endif /* HAVE_VF_MIN_MAX_TXRATE */
+ .ndo_get_vf_config = igb_ndo_get_vf_config,
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+ .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
+#endif /* HAVE_VF_SPOOFCHK_CONFIGURE */
+#endif /* IFLA_VF_MAX */
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = igb_netpoll,
+#endif
+#ifdef HAVE_NDO_SET_FEATURES
+ .ndo_fix_features = igb_fix_features,
+ .ndo_set_features = igb_set_features,
+#endif
+#ifdef HAVE_VLAN_RX_REGISTER
+ .ndo_vlan_rx_register = igb_vlan_mode,
+#endif
+#ifndef HAVE_RHEL6_NETDEV_OPS_EXT_FDB
+#ifdef NTF_SELF
+ .ndo_fdb_add = igb_ndo_fdb_add,
+#ifndef USE_DEFAULT_FDB_DEL_DUMP
+ .ndo_fdb_del = igb_ndo_fdb_del,
+ .ndo_fdb_dump = igb_ndo_fdb_dump,
+#endif
+#endif /* ! HAVE_RHEL6_NETDEV_OPS_EXT_FDB */
+#ifdef HAVE_BRIDGE_ATTRIBS
+ .ndo_bridge_setlink = igb_ndo_bridge_setlink,
+ .ndo_bridge_getlink = igb_ndo_bridge_getlink,
+#endif /* HAVE_BRIDGE_ATTRIBS */
+#endif
+};
+
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+static const struct net_device_ops igb_vmdq_ops = {
+ .ndo_open = &igb_vmdq_open,
+ .ndo_stop = &igb_vmdq_close,
+ .ndo_start_xmit = &igb_vmdq_xmit_frame,
+ .ndo_get_stats = &igb_vmdq_get_stats,
+ .ndo_set_rx_mode = &igb_vmdq_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = &igb_vmdq_set_mac,
+ .ndo_change_mtu = &igb_vmdq_change_mtu,
+ .ndo_tx_timeout = &igb_vmdq_tx_timeout,
+ .ndo_vlan_rx_register = &igb_vmdq_vlan_rx_register,
+ .ndo_vlan_rx_add_vid = &igb_vmdq_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = &igb_vmdq_vlan_rx_kill_vid,
+};
+
+#endif /* CONFIG_IGB_VMDQ_NETDEV */
+#endif /* HAVE_NET_DEVICE_OPS */
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+void igb_assign_vmdq_netdev_ops(struct net_device *vnetdev)
+{
+#ifdef HAVE_NET_DEVICE_OPS
+ vnetdev->netdev_ops = &igb_vmdq_ops;
+#else
+ dev->open = &igb_vmdq_open;
+ dev->stop = &igb_vmdq_close;
+ dev->hard_start_xmit = &igb_vmdq_xmit_frame;
+ dev->get_stats = &igb_vmdq_get_stats;
+#ifdef HAVE_SET_RX_MODE
+ dev->set_rx_mode = &igb_vmdq_set_rx_mode;
+#endif
+ dev->set_multicast_list = &igb_vmdq_set_rx_mode;
+ dev->set_mac_address = &igb_vmdq_set_mac;
+ dev->change_mtu = &igb_vmdq_change_mtu;
+#ifdef HAVE_TX_TIMEOUT
+ dev->tx_timeout = &igb_vmdq_tx_timeout;
+#endif
+#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX)
+ dev->vlan_rx_register = &igb_vmdq_vlan_rx_register;
+ dev->vlan_rx_add_vid = &igb_vmdq_vlan_rx_add_vid;
+ dev->vlan_rx_kill_vid = &igb_vmdq_vlan_rx_kill_vid;
+#endif
+#endif
+ igb_vmdq_set_ethtool_ops(vnetdev);
+ vnetdev->watchdog_timeo = 5 * HZ;
+
+}
+
+int igb_init_vmdq_netdevs(struct igb_adapter *adapter)
+{
+ int pool, err = 0, base_queue;
+ struct net_device *vnetdev;
+ struct igb_vmdq_adapter *vmdq_adapter;
+
+ for (pool = 1; pool < adapter->vmdq_pools; pool++) {
+ int qpp = (!adapter->rss_queues ? 1 : adapter->rss_queues);
+ base_queue = pool * qpp;
+ vnetdev = alloc_etherdev(sizeof(struct igb_vmdq_adapter));
+ if (!vnetdev) {
+ err = -ENOMEM;
+ break;
+ }
+ vmdq_adapter = netdev_priv(vnetdev);
+ vmdq_adapter->vnetdev = vnetdev;
+ vmdq_adapter->real_adapter = adapter;
+ vmdq_adapter->rx_ring = adapter->rx_ring[base_queue];
+ vmdq_adapter->tx_ring = adapter->tx_ring[base_queue];
+ igb_assign_vmdq_netdev_ops(vnetdev);
+ snprintf(vnetdev->name, IFNAMSIZ, "%sv%d",
+ adapter->netdev->name, pool);
+ vnetdev->features = adapter->netdev->features;
+#ifdef HAVE_NETDEV_VLAN_FEATURES
+ vnetdev->vlan_features = adapter->netdev->vlan_features;
+#endif
+ adapter->vmdq_netdev[pool-1] = vnetdev;
+ err = register_netdev(vnetdev);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+int igb_remove_vmdq_netdevs(struct igb_adapter *adapter)
+{
+ int pool, err = 0;
+
+ for (pool = 1; pool < adapter->vmdq_pools; pool++) {
+ unregister_netdev(adapter->vmdq_netdev[pool-1]);
+ free_netdev(adapter->vmdq_netdev[pool-1]);
+ adapter->vmdq_netdev[pool-1] = NULL;
+ }
+ return err;
+}
+#endif /* CONFIG_IGB_VMDQ_NETDEV */
+
+/**
+ * igb_set_fw_version - Configure version string for ethtool
+ * @adapter: adapter struct
+ *
+ **/
+static void igb_set_fw_version(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_fw_version fw;
+
+ e1000_get_fw_version(hw, &fw);
+
+ switch (hw->mac.type) {
+ case e1000_i210:
+ case e1000_i211:
+ if (!(e1000_get_flash_presence_i210(hw))) {
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%2d.%2d-%d",
+ fw.invm_major, fw.invm_minor, fw.invm_img_type);
+ break;
+ }
+ /* fall through */
+ default:
+ /* if option rom is valid, display its version too*/
+ if (fw.or_valid) {
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x, %d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.etrack_id,
+ fw.or_major, fw.or_build, fw.or_patch);
+ /* no option rom */
+ } else {
+ if (fw.etrack_id != 0X0000) {
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x",
+ fw.eep_major, fw.eep_minor, fw.etrack_id);
+ } else {
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.eep_build);
+ }
+ }
+ break;
+ }
+
+ return;
+}
+
+/**
+ * igb_init_mas - init Media Autosense feature if enabled in the NVM
+ *
+ * @adapter: adapter struct
+ **/
+static void igb_init_mas(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 eeprom_data;
+
+ e1000_read_nvm(hw, NVM_COMPAT, 1, &eeprom_data);
+ switch (hw->bus.func) {
+ case E1000_FUNC_0:
+ if (eeprom_data & IGB_MAS_ENABLE_0)
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ break;
+ case E1000_FUNC_1:
+ if (eeprom_data & IGB_MAS_ENABLE_1)
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ break;
+ case E1000_FUNC_2:
+ if (eeprom_data & IGB_MAS_ENABLE_2)
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ break;
+ case E1000_FUNC_3:
+ if (eeprom_data & IGB_MAS_ENABLE_3)
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ break;
+ default:
+ /* Shouldn't get here */
+ dev_err(pci_dev_to_dev(adapter->pdev),
+ "%s:AMS: Invalid port configuration, returning\n",
+ adapter->netdev->name);
+ break;
+ }
+}
+
+/**
+ * igb_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in igb_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * igb_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int __devinit igb_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct igb_adapter *adapter;
+ struct e1000_hw *hw;
+ u16 eeprom_data = 0;
+ u8 pba_str[E1000_PBANUM_LENGTH];
+ s32 ret_val;
+ static int global_quad_port_a; /* global quad port a indication */
+ int i, err, pci_using_dac;
+ static int cards_found;
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ pci_using_dac = 0;
+ err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64));
+ if (!err) {
+ err = dma_set_coherent_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64));
+ if (!err)
+ pci_using_dac = 1;
+ } else {
+ err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32));
+ if (err) {
+ err = dma_set_coherent_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32));
+ if (err) {
+ IGB_ERR("No usable DMA configuration, "
+ "aborting\n");
+ goto err_dma;
+ }
+ }
+ }
+
+#ifndef HAVE_ASPM_QUIRKS
+ /* 82575 requires that the pci-e link partner disable the L0s state */
+ switch (pdev->device) {
+ case E1000_DEV_ID_82575EB_COPPER:
+ case E1000_DEV_ID_82575EB_FIBER_SERDES:
+ case E1000_DEV_ID_82575GB_QUAD_COPPER:
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
+ default:
+ break;
+ }
+
+#endif /* HAVE_ASPM_QUIRKS */
+ err = pci_request_selected_regions(pdev,
+ pci_select_bars(pdev,
+ IORESOURCE_MEM),
+ igb_driver_name);
+ if (err)
+ goto err_pci_reg;
+
+ pci_enable_pcie_error_reporting(pdev);
+
+ pci_set_master(pdev);
+
+ err = -ENOMEM;
+#ifdef HAVE_TX_MQ
+ netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
+ IGB_MAX_TX_QUEUES);
+#else
+ netdev = alloc_etherdev(sizeof(struct igb_adapter));
+#endif /* HAVE_TX_MQ */
+ if (!netdev)
+ goto err_alloc_etherdev;
+
+ SET_MODULE_OWNER(netdev);
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ hw = &adapter->hw;
+ hw->back = adapter;
+ adapter->port_num = hw->bus.func;
+ adapter->msg_enable = (1 << debug) - 1;
+
+#ifdef HAVE_PCI_ERS
+ err = pci_save_state(pdev);
+ if (err)
+ goto err_ioremap;
+#endif
+ err = -EIO;
+ hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!hw->hw_addr)
+ goto err_ioremap;
+
+#ifdef HAVE_NET_DEVICE_OPS
+ netdev->netdev_ops = &igb_netdev_ops;
+#else /* HAVE_NET_DEVICE_OPS */
+ netdev->open = &igb_open;
+ netdev->stop = &igb_close;
+ netdev->get_stats = &igb_get_stats;
+#ifdef HAVE_SET_RX_MODE
+ netdev->set_rx_mode = &igb_set_rx_mode;
+#endif
+ netdev->set_multicast_list = &igb_set_rx_mode;
+ netdev->set_mac_address = &igb_set_mac;
+ netdev->change_mtu = &igb_change_mtu;
+ netdev->do_ioctl = &igb_ioctl;
+#ifdef HAVE_TX_TIMEOUT
+ netdev->tx_timeout = &igb_tx_timeout;
+#endif
+ netdev->vlan_rx_register = igb_vlan_mode;
+ netdev->vlan_rx_add_vid = igb_vlan_rx_add_vid;
+ netdev->vlan_rx_kill_vid = igb_vlan_rx_kill_vid;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ netdev->poll_controller = igb_netpoll;
+#endif
+ netdev->hard_start_xmit = &igb_xmit_frame;
+#endif /* HAVE_NET_DEVICE_OPS */
+ igb_set_ethtool_ops(netdev);
+#ifdef HAVE_TX_TIMEOUT
+ netdev->watchdog_timeo = 5 * HZ;
+#endif
+
+ strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+
+ adapter->bd_number = cards_found;
+
+ /* setup the private structure */
+ err = igb_sw_init(adapter);
+ if (err)
+ goto err_sw_init;
+
+ e1000_get_bus_info(hw);
+
+ hw->phy.autoneg_wait_to_complete = FALSE;
+ hw->mac.adaptive_ifs = FALSE;
+
+ /* Copper options */
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ hw->phy.mdix = AUTO_ALL_MODES;
+ hw->phy.disable_polarity_correction = FALSE;
+ hw->phy.ms_type = e1000_ms_hw_default;
+ }
+
+ if (e1000_check_reset_block(hw))
+ dev_info(pci_dev_to_dev(pdev),
+ "PHY reset is blocked due to SOL/IDER session.\n");
+
+ /*
+ * features is initialized to 0 in allocation, it might have bits
+ * set by igb_sw_init so we should use an or instead of an
+ * assignment.
+ */
+ netdev->features |= NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+#ifdef NETIF_F_IPV6_CSUM
+ NETIF_F_IPV6_CSUM |
+#endif
+#ifdef NETIF_F_TSO
+ NETIF_F_TSO |
+#ifdef NETIF_F_TSO6
+ NETIF_F_TSO6 |
+#endif
+#endif /* NETIF_F_TSO */
+#ifdef NETIF_F_RXHASH
+ NETIF_F_RXHASH |
+#endif
+ NETIF_F_RXCSUM |
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
+#else
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_TX;
+#endif
+
+ if (hw->mac.type >= e1000_82576)
+ netdev->features |= NETIF_F_SCTP_CSUM;
+
+#ifdef HAVE_NDO_SET_FEATURES
+ /* copy netdev features into list of user selectable features */
+ netdev->hw_features |= netdev->features;
+#ifndef IGB_NO_LRO
+
+ /* give us the option of enabling LRO later */
+ netdev->hw_features |= NETIF_F_LRO;
+#endif
+#else
+#ifdef NETIF_F_GRO
+
+ /* this is only needed on kernels prior to 2.6.39 */
+ netdev->features |= NETIF_F_GRO;
+#endif
+#endif
+
+ /* set this bit last since it cannot be part of hw_features */
+#ifdef NETIF_F_HW_VLAN_CTAG_FILTER
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+#else
+ netdev->features |= NETIF_F_HW_VLAN_FILTER;
+#endif
+
+#ifdef HAVE_NETDEV_VLAN_FEATURES
+ netdev->vlan_features |= NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_SG;
+
+#endif
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
+#ifdef DEBUG
+ if (adapter->dmac != IGB_DMAC_DISABLE)
+ printk("%s: DMA Coalescing is enabled..\n", netdev->name);
+#endif
+
+ /* before reading the NVM, reset the controller to put the device in a
+ * known good starting state */
+ e1000_reset_hw(hw);
+
+ /* make sure the NVM is good */
+ if (e1000_validate_nvm_checksum(hw) < 0) {
+ dev_err(pci_dev_to_dev(pdev), "The NVM Checksum Is Not"
+ " Valid\n");
+ err = -EIO;
+ goto err_eeprom;
+ }
+
+ /* copy the MAC address out of the NVM */
+ if (e1000_read_mac_addr(hw))
+ dev_err(pci_dev_to_dev(pdev), "NVM Read Error\n");
+ memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
+#ifdef ETHTOOL_GPERMADDR
+ memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
+
+ if (!is_valid_ether_addr(netdev->perm_addr)) {
+#else
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+#endif
+ dev_err(pci_dev_to_dev(pdev), "Invalid MAC Address\n");
+ err = -EIO;
+ goto err_eeprom;
+ }
+
+ memcpy(&adapter->mac_table[0].addr, hw->mac.addr, netdev->addr_len);
+ adapter->mac_table[0].queue = adapter->vfs_allocated_count;
+ adapter->mac_table[0].state = (IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE);
+ igb_rar_set(adapter, 0);
+
+ /* get firmware version for ethtool -i */
+ igb_set_fw_version(adapter);
+
+ /* Check if Media Autosense is enabled */
+ if (hw->mac.type == e1000_82580)
+ igb_init_mas(adapter);
+ setup_timer(&adapter->watchdog_timer, &igb_watchdog,
+ (unsigned long) adapter);
+ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
+ setup_timer(&adapter->dma_err_timer, &igb_dma_err_timer,
+ (unsigned long) adapter);
+ setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
+ (unsigned long) adapter);
+
+ INIT_WORK(&adapter->reset_task, igb_reset_task);
+ INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
+ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
+ INIT_WORK(&adapter->dma_err_task, igb_dma_err_task);
+
+ /* Initialize link properties that are user-changeable */
+ adapter->fc_autoneg = true;
+ hw->mac.autoneg = true;
+ hw->phy.autoneg_advertised = 0x2f;
+
+ hw->fc.requested_mode = e1000_fc_default;
+ hw->fc.current_mode = e1000_fc_default;
+
+ e1000_validate_mdi_setting(hw);
+
+ /* By default, support wake on port A */
+ if (hw->bus.func == 0)
+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+
+ /* Check the NVM for wake support for non-port A ports */
+ if (hw->mac.type >= e1000_82580)
+ hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
+ NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
+ &eeprom_data);
+ else if (hw->bus.func == 1)
+ e1000_read_nvm(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
+
+ if (eeprom_data & IGB_EEPROM_APME)
+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+
+ /* now that we have the eeprom settings, apply the special cases where
+ * the eeprom may be wrong or the board simply won't support wake on
+ * lan on a particular port */
+ switch (pdev->device) {
+ case E1000_DEV_ID_82575GB_QUAD_COPPER:
+ adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
+ break;
+ case E1000_DEV_ID_82575EB_FIBER_SERDES:
+ case E1000_DEV_ID_82576_FIBER:
+ case E1000_DEV_ID_82576_SERDES:
+ /* Wake events only supported on port A for dual fiber
+ * regardless of eeprom setting */
+ if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1)
+ adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
+ break;
+ case E1000_DEV_ID_82576_QUAD_COPPER:
+ case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
+ /* if quad port adapter, disable WoL on all but port A */
+ if (global_quad_port_a != 0)
+ adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
+ else
+ adapter->flags |= IGB_FLAG_QUAD_PORT_A;
+ /* Reset for multiple quad port adapters */
+ if (++global_quad_port_a == 4)
+ global_quad_port_a = 0;
+ break;
+ default:
+ /* If the device can't wake, don't set software support */
+ if (!device_can_wakeup(&adapter->pdev->dev))
+ adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
+ break;
+ }
+
+ /* initialize the wol settings based on the eeprom settings */
+ if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
+ adapter->wol |= E1000_WUFC_MAG;
+
+ /* Some vendors want WoL disabled by default, but still supported */
+ if ((hw->mac.type == e1000_i350) &&
+ (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+ adapter->wol = 0;
+ }
+
+ device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev),
+ adapter->flags & IGB_FLAG_WOL_SUPPORTED);
+
+ /* reset the hardware with the new settings */
+ igb_reset(adapter);
+ adapter->devrc = 0;
+
+#ifdef HAVE_I2C_SUPPORT
+ /* Init the I2C interface */
+ err = igb_init_i2c(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "failed to init i2c interface\n");
+ goto err_eeprom;
+ }
+#endif /* HAVE_I2C_SUPPORT */
+
+ /* let the f/w know that the h/w is now under the control of the
+ * driver. */
+ igb_get_hw_control(adapter);
+
+ strncpy(netdev->name, "eth%d", IFNAMSIZ);
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+ err = igb_init_vmdq_netdevs(adapter);
+ if (err)
+ goto err_register;
+#endif
+ /* carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+
+#ifdef IGB_DCA
+ if (dca_add_requester(&pdev->dev) == E1000_SUCCESS) {
+ adapter->flags |= IGB_FLAG_DCA_ENABLED;
+ dev_info(pci_dev_to_dev(pdev), "DCA enabled\n");
+ igb_setup_dca(adapter);
+ }
+
+#endif
+#ifdef HAVE_PTP_1588_CLOCK
+ /* do hw tstamp init after resetting */
+ igb_ptp_init(adapter);
+#endif /* HAVE_PTP_1588_CLOCK */
+
+ dev_info(pci_dev_to_dev(pdev), "Intel(R) Gigabit Ethernet Network Connection\n");
+ /* print bus type/speed/width info */
+ dev_info(pci_dev_to_dev(pdev), "%s: (PCIe:%s:%s) ",
+ netdev->name,
+ ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5GT/s" :
+ (hw->bus.speed == e1000_bus_speed_5000) ? "5.0GT/s" :
+ (hw->mac.type == e1000_i354) ? "integrated" :
+ "unknown"),
+ ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
+ (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
+ (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
+ (hw->mac.type == e1000_i354) ? "integrated" :
+ "unknown"));
+ dev_info(pci_dev_to_dev(pdev), "%s: MAC: ", netdev->name);
+ for (i = 0; i < 6; i++)
+ printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
+
+ ret_val = e1000_read_pba_string(hw, pba_str, E1000_PBANUM_LENGTH);
+ if (ret_val)
+ strncpy(pba_str, "Unknown", sizeof(pba_str) - 1);
+ dev_info(pci_dev_to_dev(pdev), "%s: PBA No: %s\n", netdev->name,
+ pba_str);
+
+
+ /* Initialize the thermal sensor on i350 devices. */
+ if (hw->mac.type == e1000_i350) {
+ if (hw->bus.func == 0) {
+ u16 ets_word;
+
+ /*
+ * Read the NVM to determine if this i350 device
+ * supports an external thermal sensor.
+ */
+ e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_word);
+ if (ets_word != 0x0000 && ets_word != 0xFFFF)
+ adapter->ets = true;
+ else
+ adapter->ets = false;
+ }
+#ifdef IGB_HWMON
+
+ igb_sysfs_init(adapter);
+#else
+#ifdef IGB_PROCFS
+
+ igb_procfs_init(adapter);
+#endif /* IGB_PROCFS */
+#endif /* IGB_HWMON */
+ } else {
+ adapter->ets = false;
+ }
+
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ switch (hw->mac.type) {
+ case e1000_i350:
+ case e1000_i210:
+ case e1000_i211:
+ /* Enable EEE for internal copper PHY devices */
+ err = e1000_set_eee_i350(hw);
+ if ((!err) &&
+ (adapter->flags & IGB_FLAG_EEE))
+ adapter->eee_advert =
+ MDIO_EEE_100TX | MDIO_EEE_1000T;
+ break;
+ case e1000_i354:
+ if ((E1000_READ_REG(hw, E1000_CTRL_EXT)) &
+ (E1000_CTRL_EXT_LINK_MODE_SGMII)) {
+ err = e1000_set_eee_i354(hw);
+ if ((!err) &&
+ (adapter->flags & IGB_FLAG_EEE))
+ adapter->eee_advert =
+ MDIO_EEE_100TX | MDIO_EEE_1000T;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* send driver version info to firmware */
+ if (hw->mac.type >= e1000_i350)
+ igb_init_fw(adapter);
+
+#ifndef IGB_NO_LRO
+ if (netdev->features & NETIF_F_LRO)
+ dev_info(pci_dev_to_dev(pdev), "Internal LRO is enabled \n");
+ else
+ dev_info(pci_dev_to_dev(pdev), "LRO is disabled \n");
+#endif
+ dev_info(pci_dev_to_dev(pdev),
+ "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
+ adapter->msix_entries ? "MSI-X" :
+ (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
+ adapter->num_rx_queues, adapter->num_tx_queues);
+
+ cards_found++;
+
+ pm_runtime_put_noidle(&pdev->dev);
+ return 0;
+
+err_register:
+ igb_release_hw_control(adapter);
+#ifdef HAVE_I2C_SUPPORT
+ memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
+#endif /* HAVE_I2C_SUPPORT */
+err_eeprom:
+ if (!e1000_check_reset_block(hw))
+ e1000_phy_hw_reset(hw);
+
+ if (hw->flash_address)
+ iounmap(hw->flash_address);
+err_sw_init:
+ igb_clear_interrupt_scheme(adapter);
+ igb_reset_sriov_capability(adapter);
+ iounmap(hw->hw_addr);
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+#ifdef HAVE_I2C_SUPPORT
+/*
+ * igb_remove_i2c - Cleanup I2C interface
+ * @adapter: pointer to adapter structure
+ *
+ */
+static void igb_remove_i2c(struct igb_adapter *adapter)
+{
+
+ /* free the adapter bus structure */
+ i2c_del_adapter(&adapter->i2c_adap);
+}
+#endif /* HAVE_I2C_SUPPORT */
+
+/**
+ * igb_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * igb_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void __devexit igb_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ pm_runtime_get_noresume(&pdev->dev);
+#ifdef HAVE_I2C_SUPPORT
+ igb_remove_i2c(adapter);
+#endif /* HAVE_I2C_SUPPORT */
+#ifdef HAVE_PTP_1588_CLOCK
+ igb_ptp_stop(adapter);
+#endif /* HAVE_PTP_1588_CLOCK */
+
+ /* flush_scheduled work may reschedule our watchdog task, so
+ * explicitly disable watchdog tasks from being rescheduled */
+ set_bit(__IGB_DOWN, &adapter->state);
+ del_timer_sync(&adapter->watchdog_timer);
+ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
+ del_timer_sync(&adapter->dma_err_timer);
+ del_timer_sync(&adapter->phy_info_timer);
+
+ flush_scheduled_work();
+
+#ifdef IGB_DCA
+ if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
+ dev_info(pci_dev_to_dev(pdev), "DCA disabled\n");
+ dca_remove_requester(&pdev->dev);
+ adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
+ E1000_WRITE_REG(hw, E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_DISABLE);
+ }
+#endif
+
+ /* Release control of h/w to f/w. If f/w is AMT enabled, this
+ * would have already happened in close and is redundant. */
+ igb_release_hw_control(adapter);
+
+ unregister_netdev(netdev);
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+ igb_remove_vmdq_netdevs(adapter);
+#endif
+
+ igb_clear_interrupt_scheme(adapter);
+ igb_reset_sriov_capability(adapter);
+
+ iounmap(hw->hw_addr);
+ if (hw->flash_address)
+ iounmap(hw->flash_address);
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+
+#ifdef IGB_HWMON
+ igb_sysfs_exit(adapter);
+#else
+#ifdef IGB_PROCFS
+ igb_procfs_exit(adapter);
+#endif /* IGB_PROCFS */
+#endif /* IGB_HWMON */
+ kfree(adapter->mac_table);
+ kfree(adapter->shadow_vfta);
+ free_netdev(netdev);
+
+ pci_disable_pcie_error_reporting(pdev);
+
+ pci_disable_device(pdev);
+}
+
+/**
+ * igb_sw_init - Initialize general software structures (struct igb_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * igb_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int igb_sw_init(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+
+ /* PCI config space info */
+
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+
+ pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
+
+ /* set default ring sizes */
+ adapter->tx_ring_count = IGB_DEFAULT_TXD;
+ adapter->rx_ring_count = IGB_DEFAULT_RXD;
+
+ /* set default work limits */
+ adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
+
+ adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
+ VLAN_HLEN;
+
+ /* Initialize the hardware-specific values */
+ if (e1000_setup_init_funcs(hw, TRUE)) {
+ dev_err(pci_dev_to_dev(pdev), "Hardware Initialization Failure\n");
+ return -EIO;
+ }
+
+ adapter->mac_table = kzalloc(sizeof(struct igb_mac_addr) *
+ hw->mac.rar_entry_count,
+ GFP_ATOMIC);
+
+ /* Setup and initialize a copy of the hw vlan table array */
+ adapter->shadow_vfta = (u32 *)kzalloc(sizeof(u32) * E1000_VFTA_ENTRIES,
+ GFP_ATOMIC);
+#ifdef NO_KNI
+ /* These calls may decrease the number of queues */
+ if (hw->mac.type < e1000_i210) {
+ igb_set_sriov_capability(adapter);
+ }
+
+ if (igb_init_interrupt_scheme(adapter, true)) {
+ dev_err(pci_dev_to_dev(pdev), "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+ /* Explicitly disable IRQ since the NIC can be in any state. */
+ igb_irq_disable(adapter);
+
+ set_bit(__IGB_DOWN, &adapter->state);
+#endif
+ return 0;
+}
+
+/**
+ * igb_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int __igb_open(struct net_device *netdev, bool resuming)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+#ifdef CONFIG_PM_RUNTIME
+ struct pci_dev *pdev = adapter->pdev;
+#endif /* CONFIG_PM_RUNTIME */
+ int err;
+ int i;
+
+ /* disallow open during test */
+ if (test_bit(__IGB_TESTING, &adapter->state)) {
+ WARN_ON(resuming);
+ return -EBUSY;
+ }
+
+#ifdef CONFIG_PM_RUNTIME
+ if (!resuming)
+ pm_runtime_get_sync(&pdev->dev);
+#endif /* CONFIG_PM_RUNTIME */
+
+ netif_carrier_off(netdev);
+
+ /* allocate transmit descriptors */
+ err = igb_setup_all_tx_resources(adapter);
+ if (err)
+ goto err_setup_tx;
+
+ /* allocate receive descriptors */
+ err = igb_setup_all_rx_resources(adapter);
+ if (err)
+ goto err_setup_rx;
+
+ igb_power_up_link(adapter);
+
+ /* before we allocate an interrupt, we must be ready to handle it.
+ * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
+ * as soon as we call pci_request_irq, so we have to setup our
+ * clean_rx handler before we do so. */
+ igb_configure(adapter);
+
+ err = igb_request_irq(adapter);
+ if (err)
+ goto err_req_irq;
+
+ /* Notify the stack of the actual queue counts. */
+ netif_set_real_num_tx_queues(netdev,
+ adapter->vmdq_pools ? 1 :
+ adapter->num_tx_queues);
+
+ err = netif_set_real_num_rx_queues(netdev,
+ adapter->vmdq_pools ? 1 :
+ adapter->num_rx_queues);
+ if (err)
+ goto err_set_queues;
+
+ /* From here on the code is the same as igb_up() */
+ clear_bit(__IGB_DOWN, &adapter->state);
+
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ napi_enable(&(adapter->q_vector[i]->napi));
+ igb_configure_lli(adapter);
+
+ /* Clear any pending interrupts. */
+ E1000_READ_REG(hw, E1000_ICR);
+
+ igb_irq_enable(adapter);
+
+ /* notify VFs that reset has been completed */
+ if (adapter->vfs_allocated_count) {
+ u32 reg_data = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ reg_data |= E1000_CTRL_EXT_PFRSTD;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg_data);
+ }
+
+ netif_tx_start_all_queues(netdev);
+
+ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
+ schedule_work(&adapter->dma_err_task);
+
+ /* start the watchdog. */
+ hw->mac.get_link_status = 1;
+ schedule_work(&adapter->watchdog_task);
+
+ return E1000_SUCCESS;
+
+err_set_queues:
+ igb_free_irq(adapter);
+err_req_irq:
+ igb_release_hw_control(adapter);
+ igb_power_down_link(adapter);
+ igb_free_all_rx_resources(adapter);
+err_setup_rx:
+ igb_free_all_tx_resources(adapter);
+err_setup_tx:
+ igb_reset(adapter);
+
+#ifdef CONFIG_PM_RUNTIME
+ if (!resuming)
+ pm_runtime_put(&pdev->dev);
+#endif /* CONFIG_PM_RUNTIME */
+
+ return err;
+}
+
+static int igb_open(struct net_device *netdev)
+{
+ return __igb_open(netdev, false);
+}
+
+/**
+ * igb_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the driver's control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+static int __igb_close(struct net_device *netdev, bool suspending)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+#ifdef CONFIG_PM_RUNTIME
+ struct pci_dev *pdev = adapter->pdev;
+#endif /* CONFIG_PM_RUNTIME */
+
+ WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
+
+#ifdef CONFIG_PM_RUNTIME
+ if (!suspending)
+ pm_runtime_get_sync(&pdev->dev);
+#endif /* CONFIG_PM_RUNTIME */
+
+ igb_down(adapter);
+
+ igb_release_hw_control(adapter);
+
+ igb_free_irq(adapter);
+
+ igb_free_all_tx_resources(adapter);
+ igb_free_all_rx_resources(adapter);
+
+#ifdef CONFIG_PM_RUNTIME
+ if (!suspending)
+ pm_runtime_put_sync(&pdev->dev);
+#endif /* CONFIG_PM_RUNTIME */
+
+ return 0;
+}
+
+static int igb_close(struct net_device *netdev)
+{
+ return __igb_close(netdev, false);
+}
+
+/**
+ * igb_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @tx_ring: tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+int igb_setup_tx_resources(struct igb_ring *tx_ring)
+{
+ struct device *dev = tx_ring->dev;
+ int size;
+
+ size = sizeof(struct igb_tx_buffer) * tx_ring->count;
+ tx_ring->tx_buffer_info = vzalloc(size);
+ if (!tx_ring->tx_buffer_info)
+ goto err;
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+
+ if (!tx_ring->desc)
+ goto err;
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ return 0;
+
+err:
+ vfree(tx_ring->tx_buffer_info);
+ dev_err(dev,
+ "Unable to allocate memory for the transmit descriptor ring\n");
+ return -ENOMEM;
+}
+
+/**
+ * igb_setup_all_tx_resources - wrapper to allocate Tx resources
+ * (Descriptors) for all queues
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ err = igb_setup_tx_resources(adapter->tx_ring[i]);
+ if (err) {
+ dev_err(pci_dev_to_dev(pdev),
+ "Allocation for Tx Queue %u failed\n", i);
+ for (i--; i >= 0; i--)
+ igb_free_tx_resources(adapter->tx_ring[i]);
+ break;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * igb_setup_tctl - configure the transmit control registers
+ * @adapter: Board private structure
+ **/
+void igb_setup_tctl(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 tctl;
+
+ /* disable queue 0 which is enabled by default on 82575 and 82576 */
+ E1000_WRITE_REG(hw, E1000_TXDCTL(0), 0);
+
+ /* Program the Transmit Control Register */
+ tctl = E1000_READ_REG(hw, E1000_TCTL);
+ tctl &= ~E1000_TCTL_CT;
+ tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
+ (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
+
+ e1000_config_collision_dist(hw);
+
+ /* Enable transmits */
+ tctl |= E1000_TCTL_EN;
+
+ E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+}
+
+static u32 igb_tx_wthresh(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ switch (hw->mac.type) {
+ case e1000_i354:
+ return 4;
+ case e1000_82576:
+ if (adapter->msix_entries)
+ return 1;
+ default:
+ break;
+ }
+
+ return 16;
+}
+
+/**
+ * igb_configure_tx_ring - Configure transmit ring after Reset
+ * @adapter: board private structure
+ * @ring: tx ring to configure
+ *
+ * Configure a transmit ring after a reset.
+ **/
+void igb_configure_tx_ring(struct igb_adapter *adapter,
+ struct igb_ring *ring)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 txdctl = 0;
+ u64 tdba = ring->dma;
+ int reg_idx = ring->reg_idx;
+
+ /* disable the queue */
+ E1000_WRITE_REG(hw, E1000_TXDCTL(reg_idx), 0);
+ E1000_WRITE_FLUSH(hw);
+ mdelay(10);
+
+ E1000_WRITE_REG(hw, E1000_TDLEN(reg_idx),
+ ring->count * sizeof(union e1000_adv_tx_desc));
+ E1000_WRITE_REG(hw, E1000_TDBAL(reg_idx),
+ tdba & 0x00000000ffffffffULL);
+ E1000_WRITE_REG(hw, E1000_TDBAH(reg_idx), tdba >> 32);
+
+ ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
+ E1000_WRITE_REG(hw, E1000_TDH(reg_idx), 0);
+ writel(0, ring->tail);
+
+ txdctl |= IGB_TX_PTHRESH;
+ txdctl |= IGB_TX_HTHRESH << 8;
+ txdctl |= igb_tx_wthresh(adapter) << 16;
+
+ txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
+ E1000_WRITE_REG(hw, E1000_TXDCTL(reg_idx), txdctl);
+}
+
+/**
+ * igb_configure_tx - Configure transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void igb_configure_tx(struct igb_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
+}
+
+/**
+ * igb_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int igb_setup_rx_resources(struct igb_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ int size, desc_len;
+
+ size = sizeof(struct igb_rx_buffer) * rx_ring->count;
+ rx_ring->rx_buffer_info = vzalloc(size);
+ if (!rx_ring->rx_buffer_info)
+ goto err;
+
+ desc_len = sizeof(union e1000_adv_rx_desc);
+
+ /* Round up to nearest 4K */
+ rx_ring->size = rx_ring->count * desc_len;
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+
+ if (!rx_ring->desc)
+ goto err;
+
+ rx_ring->next_to_alloc = 0;
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ return 0;
+
+err:
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ dev_err(dev, "Unable to allocate memory for the receive descriptor"
+ " ring\n");
+ return -ENOMEM;
+}
+
+/**
+ * igb_setup_all_rx_resources - wrapper to allocate Rx resources
+ * (Descriptors) for all queues
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ err = igb_setup_rx_resources(adapter->rx_ring[i]);
+ if (err) {
+ dev_err(pci_dev_to_dev(pdev),
+ "Allocation for Rx Queue %u failed\n", i);
+ for (i--; i >= 0; i--)
+ igb_free_rx_resources(adapter->rx_ring[i]);
+ break;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * igb_setup_mrqc - configure the multiple receive queue control registers
+ * @adapter: Board private structure
+ **/
+static void igb_setup_mrqc(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 mrqc, rxcsum;
+ u32 j, num_rx_queues, shift = 0, shift2 = 0;
+ static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741,
+ 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE,
+ 0xA32DCB77, 0x0CF23080, 0x3BB7426A,
+ 0xFA01ACBE };
+
+ /* Fill out hash function seeds */
+ for (j = 0; j < 10; j++)
+ E1000_WRITE_REG(hw, E1000_RSSRK(j), rsskey[j]);
+
+ num_rx_queues = adapter->rss_queues;
+
+ /* 82575 and 82576 supports 2 RSS queues for VMDq */
+ switch (hw->mac.type) {
+ case e1000_82575:
+ if (adapter->vmdq_pools) {
+ shift = 2;
+ shift2 = 6;
+ break;
+ }
+ shift = 6;
+ break;
+ case e1000_82576:
+ /* 82576 supports 2 RSS queues for SR-IOV */
+ if (adapter->vfs_allocated_count || adapter->vmdq_pools) {
+ shift = 3;
+ num_rx_queues = 2;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Populate the redirection table 4 entries at a time. To do this
+ * we are generating the results for n and n+2 and then interleaving
+ * those with the results with n+1 and n+3.
+ */
+ for (j = 0; j < 32; j++) {
+ /* first pass generates n and n+2 */
+ u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues;
+ u32 reta = (base & 0x07800780) >> (7 - shift);
+
+ /* second pass generates n+1 and n+3 */
+ base += 0x00010001 * num_rx_queues;
+ reta |= (base & 0x07800780) << (1 + shift);
+
+ /* generate 2nd table for 82575 based parts */
+ if (shift2)
+ reta |= (0x01010101 * num_rx_queues) << shift2;
+
+ E1000_WRITE_REG(hw, E1000_RETA(j), reta);
+ }
+
+ /*
+ * Disable raw packet checksumming so that RSS hash is placed in
+ * descriptor on writeback. No need to enable TCP/UDP/IP checksum
+ * offloads as they are enabled by default
+ */
+ rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
+ rxcsum |= E1000_RXCSUM_PCSD;
+
+ if (adapter->hw.mac.type >= e1000_82576)
+ /* Enable Receive Checksum Offload for SCTP */
+ rxcsum |= E1000_RXCSUM_CRCOFL;
+
+ /* Don't need to set TUOFL or IPOFL, they default to 1 */
+ E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
+
+ /* Generate RSS hash based on packet types, TCP/UDP
+ * port numbers and/or IPv4/v6 src and dst addresses
+ */
+ mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
+ E1000_MRQC_RSS_FIELD_IPV4_TCP |
+ E1000_MRQC_RSS_FIELD_IPV6 |
+ E1000_MRQC_RSS_FIELD_IPV6_TCP |
+ E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
+
+ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
+ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
+
+ /* If VMDq is enabled then we set the appropriate mode for that, else
+ * we default to RSS so that an RSS hash is calculated per packet even
+ * if we are only using one queue */
+ if (adapter->vfs_allocated_count || adapter->vmdq_pools) {
+ if (hw->mac.type > e1000_82575) {
+ /* Set the default pool for the PF's first queue */
+ u32 vtctl = E1000_READ_REG(hw, E1000_VT_CTL);
+ vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
+ E1000_VT_CTL_DISABLE_DEF_POOL);
+ vtctl |= adapter->vfs_allocated_count <<
+ E1000_VT_CTL_DEFAULT_POOL_SHIFT;
+ E1000_WRITE_REG(hw, E1000_VT_CTL, vtctl);
+ } else if (adapter->rss_queues > 1) {
+ /* set default queue for pool 1 to queue 2 */
+ E1000_WRITE_REG(hw, E1000_VT_CTL,
+ adapter->rss_queues << 7);
+ }
+ if (adapter->rss_queues > 1)
+ mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
+ else
+ mrqc |= E1000_MRQC_ENABLE_VMDQ;
+ } else {
+ mrqc |= E1000_MRQC_ENABLE_RSS_4Q;
+ }
+ igb_vmm_control(adapter);
+
+ E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
+}
+
+/**
+ * igb_setup_rctl - configure the receive control registers
+ * @adapter: Board private structure
+ **/
+void igb_setup_rctl(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+
+ rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+ rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
+
+ rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
+ (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+
+ /*
+ * enable stripping of CRC. It's unlikely this will break BMC
+ * redirection as it did with e1000. Newer features require
+ * that the HW strips the CRC.
+ */
+ rctl |= E1000_RCTL_SECRC;
+
+ /* disable store bad packets and clear size bits. */
+ rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
+
+ /* enable LPE to prevent packets larger than max_frame_size */
+ rctl |= E1000_RCTL_LPE;
+
+ /* disable queue 0 to prevent tail write w/o re-config */
+ E1000_WRITE_REG(hw, E1000_RXDCTL(0), 0);
+
+ /* Attention!!! For SR-IOV PF driver operations you must enable
+ * queue drop for all VF and PF queues to prevent head of line blocking
+ * if an un-trusted VF does not provide descriptors to hardware.
+ */
+ if (adapter->vfs_allocated_count) {
+ /* set all queue drop enable bits */
+ E1000_WRITE_REG(hw, E1000_QDE, ALL_QUEUES);
+ }
+
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
+ int vfn)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 vmolr;
+
+ /* if it isn't the PF check to see if VFs are enabled and
+ * increase the size to support vlan tags */
+ if (vfn < adapter->vfs_allocated_count &&
+ adapter->vf_data[vfn].vlans_enabled)
+ size += VLAN_HLEN;
+
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+ if (vfn >= adapter->vfs_allocated_count) {
+ int queue = vfn - adapter->vfs_allocated_count;
+ struct igb_vmdq_adapter *vadapter;
+
+ vadapter = netdev_priv(adapter->vmdq_netdev[queue-1]);
+ if (vadapter->vlgrp)
+ size += VLAN_HLEN;
+ }
+#endif
+ vmolr = E1000_READ_REG(hw, E1000_VMOLR(vfn));
+ vmolr &= ~E1000_VMOLR_RLPML_MASK;
+ vmolr |= size | E1000_VMOLR_LPE;
+ E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr);
+
+ return 0;
+}
+
+/**
+ * igb_rlpml_set - set maximum receive packet size
+ * @adapter: board private structure
+ *
+ * Configure maximum receivable packet size.
+ **/
+static void igb_rlpml_set(struct igb_adapter *adapter)
+{
+ u32 max_frame_size = adapter->max_frame_size;
+ struct e1000_hw *hw = &adapter->hw;
+ u16 pf_id = adapter->vfs_allocated_count;
+
+ if (adapter->vmdq_pools && hw->mac.type != e1000_82575) {
+ int i;
+ for (i = 0; i < adapter->vmdq_pools; i++)
+ igb_set_vf_rlpml(adapter, max_frame_size, pf_id + i);
+ /*
+ * If we're in VMDQ or SR-IOV mode, then set global RLPML
+ * to our max jumbo frame size, in case we need to enable
+ * jumbo frames on one of the rings later.
+ * This will not pass over-length frames into the default
+ * queue because it's gated by the VMOLR.RLPML.
+ */
+ max_frame_size = MAX_JUMBO_FRAME_SIZE;
+ }
+ /* Set VF RLPML for the PF device. */
+ if (adapter->vfs_allocated_count)
+ igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
+
+ E1000_WRITE_REG(hw, E1000_RLPML, max_frame_size);
+}
+
+static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter,
+ int vfn, bool enable)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 val;
+ void __iomem *reg;
+
+ if (hw->mac.type < e1000_82576)
+ return;
+
+ if (hw->mac.type == e1000_i350)
+ reg = hw->hw_addr + E1000_DVMOLR(vfn);
+ else
+ reg = hw->hw_addr + E1000_VMOLR(vfn);
+
+ val = readl(reg);
+ if (enable)
+ val |= E1000_VMOLR_STRVLAN;
+ else
+ val &= ~(E1000_VMOLR_STRVLAN);
+ writel(val, reg);
+}
+static inline void igb_set_vmolr(struct igb_adapter *adapter,
+ int vfn, bool aupe)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 vmolr;
+
+ /*
+ * This register exists only on 82576 and newer so if we are older then
+ * we should exit and do nothing
+ */
+ if (hw->mac.type < e1000_82576)
+ return;
+
+ vmolr = E1000_READ_REG(hw, E1000_VMOLR(vfn));
+
+ if (aupe)
+ vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
+ else
+ vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
+
+ /* clear all bits that might not be set */
+ vmolr &= ~E1000_VMOLR_RSSE;
+
+ if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
+ vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
+
+ vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
+ vmolr |= E1000_VMOLR_LPE; /* Accept long packets */
+
+ E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr);
+}
+
+/**
+ * igb_configure_rx_ring - Configure a receive ring after Reset
+ * @adapter: board private structure
+ * @ring: receive ring to be configured
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+void igb_configure_rx_ring(struct igb_adapter *adapter,
+ struct igb_ring *ring)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u64 rdba = ring->dma;
+ int reg_idx = ring->reg_idx;
+ u32 srrctl = 0, rxdctl = 0;
+
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ /*
+ * RLPML prevents us from receiving a frame larger than max_frame so
+ * it is safe to just set the rx_buffer_len to max_frame without the
+ * risk of an skb over panic.
+ */
+ ring->rx_buffer_len = max_t(u32, adapter->max_frame_size,
+ MAXIMUM_ETHERNET_VLAN_SIZE);
+
+#endif
+ /* disable the queue */
+ E1000_WRITE_REG(hw, E1000_RXDCTL(reg_idx), 0);
+
+ /* Set DMA base address registers */
+ E1000_WRITE_REG(hw, E1000_RDBAL(reg_idx),
+ rdba & 0x00000000ffffffffULL);
+ E1000_WRITE_REG(hw, E1000_RDBAH(reg_idx), rdba >> 32);
+ E1000_WRITE_REG(hw, E1000_RDLEN(reg_idx),
+ ring->count * sizeof(union e1000_adv_rx_desc));
+
+ /* initialize head and tail */
+ ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
+ E1000_WRITE_REG(hw, E1000_RDH(reg_idx), 0);
+ writel(0, ring->tail);
+
+ /* reset next-to- use/clean to place SW in sync with hardwdare */
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ ring->next_to_alloc = 0;
+
+#endif
+ /* set descriptor configuration */
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
+ srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
+ E1000_SRRCTL_BSIZEPKT_SHIFT;
+#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
+ srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
+#ifdef HAVE_PTP_1588_CLOCK
+ if (hw->mac.type >= e1000_82580)
+ srrctl |= E1000_SRRCTL_TIMESTAMP;
+#endif /* HAVE_PTP_1588_CLOCK */
+ /*
+ * We should set the drop enable bit if:
+ * SR-IOV is enabled
+ * or
+ * Flow Control is disabled and number of RX queues > 1
+ *
+ * This allows us to avoid head of line blocking for security
+ * and performance reasons.
+ */
+ if (adapter->vfs_allocated_count ||
+ (adapter->num_rx_queues > 1 &&
+ (hw->fc.requested_mode == e1000_fc_none ||
+ hw->fc.requested_mode == e1000_fc_rx_pause)))
+ srrctl |= E1000_SRRCTL_DROP_EN;
+
+ E1000_WRITE_REG(hw, E1000_SRRCTL(reg_idx), srrctl);
+
+ /* set filtering for VMDQ pools */
+ igb_set_vmolr(adapter, reg_idx & 0x7, true);
+
+ rxdctl |= IGB_RX_PTHRESH;
+ rxdctl |= IGB_RX_HTHRESH << 8;
+ rxdctl |= IGB_RX_WTHRESH << 16;
+
+ /* enable receive descriptor fetching */
+ rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
+ E1000_WRITE_REG(hw, E1000_RXDCTL(reg_idx), rxdctl);
+}
+
+/**
+ * igb_configure_rx - Configure receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void igb_configure_rx(struct igb_adapter *adapter)
+{
+ int i;
+
+ /* set UTA to appropriate mode */
+ igb_set_uta(adapter);
+
+ igb_full_sync_mac_table(adapter);
+ /* Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring */
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
+}
+
+/**
+ * igb_free_tx_resources - Free Tx Resources per Queue
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void igb_free_tx_resources(struct igb_ring *tx_ring)
+{
+ igb_clean_tx_ring(tx_ring);
+
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+
+ /* if not set, then don't free */
+ if (!tx_ring->desc)
+ return;
+
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
+
+ tx_ring->desc = NULL;
+}
+
+/**
+ * igb_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+static void igb_free_all_tx_resources(struct igb_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ igb_free_tx_resources(adapter->tx_ring[i]);
+}
+
+void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
+ struct igb_tx_buffer *tx_buffer)
+{
+ if (tx_buffer->skb) {
+ dev_kfree_skb_any(tx_buffer->skb);
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_single(ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ } else if (dma_unmap_len(tx_buffer, len)) {
+ dma_unmap_page(ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ }
+ tx_buffer->next_to_watch = NULL;
+ tx_buffer->skb = NULL;
+ dma_unmap_len_set(tx_buffer, len, 0);
+ /* buffer_info must be completely set up in the transmit path */
+}
+
+/**
+ * igb_clean_tx_ring - Free Tx Buffers
+ * @tx_ring: ring to be cleaned
+ **/
+static void igb_clean_tx_ring(struct igb_ring *tx_ring)
+{
+ struct igb_tx_buffer *buffer_info;
+ unsigned long size;
+ u16 i;
+
+ if (!tx_ring->tx_buffer_info)
+ return;
+ /* Free all the Tx ring sk_buffs */
+
+ for (i = 0; i < tx_ring->count; i++) {
+ buffer_info = &tx_ring->tx_buffer_info[i];
+ igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
+ }
+
+ netdev_tx_reset_queue(txring_txq(tx_ring));
+
+ size = sizeof(struct igb_tx_buffer) * tx_ring->count;
+ memset(tx_ring->tx_buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+}
+
+/**
+ * igb_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ igb_clean_tx_ring(adapter->tx_ring[i]);
+}
+
+/**
+ * igb_free_rx_resources - Free Rx Resources
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void igb_free_rx_resources(struct igb_ring *rx_ring)
+{
+ igb_clean_rx_ring(rx_ring);
+
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+
+ /* if not set, then don't free */
+ if (!rx_ring->desc)
+ return;
+
+ dma_free_coherent(rx_ring->dev, rx_ring->size,
+ rx_ring->desc, rx_ring->dma);
+
+ rx_ring->desc = NULL;
+}
+
+/**
+ * igb_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+static void igb_free_all_rx_resources(struct igb_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ igb_free_rx_resources(adapter->rx_ring[i]);
+}
+
+/**
+ * igb_clean_rx_ring - Free Rx Buffers per Queue
+ * @rx_ring: ring to free buffers from
+ **/
+void igb_clean_rx_ring(struct igb_ring *rx_ring)
+{
+ unsigned long size;
+ u16 i;
+
+ if (!rx_ring->rx_buffer_info)
+ return;
+
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ if (rx_ring->skb)
+ dev_kfree_skb(rx_ring->skb);
+ rx_ring->skb = NULL;
+
+#endif
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ if (buffer_info->dma) {
+ dma_unmap_single(rx_ring->dev,
+ buffer_info->dma,
+ rx_ring->rx_buffer_len,
+ DMA_FROM_DEVICE);
+ buffer_info->dma = 0;
+ }
+
+ if (buffer_info->skb) {
+ dev_kfree_skb(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+#else
+ if (!buffer_info->page)
+ continue;
+
+ dma_unmap_page(rx_ring->dev,
+ buffer_info->dma,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ __free_page(buffer_info->page);
+
+ buffer_info->page = NULL;
+#endif
+ }
+
+ size = sizeof(struct igb_rx_buffer) * rx_ring->count;
+ memset(rx_ring->rx_buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_alloc = 0;
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+}
+
+/**
+ * igb_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ igb_clean_rx_ring(adapter->rx_ring[i]);
+}
+
+/**
+ * igb_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int igb_set_mac(struct net_device *netdev, void *p)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ igb_del_mac_filter(adapter, hw->mac.addr,
+ adapter->vfs_allocated_count);
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+
+ /* set the correct pool for the new PF MAC address in entry 0 */
+ return igb_add_mac_filter(adapter, hw->mac.addr,
+ adapter->vfs_allocated_count);
+}
+
+/**
+ * igb_write_mc_addr_list - write multicast addresses to MTA
+ * @netdev: network interface device structure
+ *
+ * Writes multicast address list to the MTA hash table.
+ * Returns: -ENOMEM on failure
+ * 0 on no addresses written
+ * X on writing X addresses to MTA
+ **/
+int igb_write_mc_addr_list(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+#ifdef NETDEV_HW_ADDR_T_MULTICAST
+ struct netdev_hw_addr *ha;
+#else
+ struct dev_mc_list *ha;
+#endif
+ u8 *mta_list;
+ int i, count;
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+ int vm;
+#endif
+ count = netdev_mc_count(netdev);
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+ for (vm = 1; vm < adapter->vmdq_pools; vm++) {
+ if (!adapter->vmdq_netdev[vm])
+ break;
+ if (!netif_running(adapter->vmdq_netdev[vm]))
+ continue;
+ count += netdev_mc_count(adapter->vmdq_netdev[vm]);
+ }
+#endif
+
+ if (!count) {
+ e1000_update_mc_addr_list(hw, NULL, 0);
+ return 0;
+ }
+ mta_list = kzalloc(count * 6, GFP_ATOMIC);
+ if (!mta_list)
+ return -ENOMEM;
+
+ /* The shared function expects a packed array of only addresses. */
+ i = 0;
+ netdev_for_each_mc_addr(ha, netdev)
+#ifdef NETDEV_HW_ADDR_T_MULTICAST
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
+#else
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->dmi_addr, ETH_ALEN);
+#endif
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+ for (vm = 1; vm < adapter->vmdq_pools; vm++) {
+ if (!adapter->vmdq_netdev[vm])
+ break;
+ if (!netif_running(adapter->vmdq_netdev[vm]) ||
+ !netdev_mc_count(adapter->vmdq_netdev[vm]))
+ continue;
+ netdev_for_each_mc_addr(ha, adapter->vmdq_netdev[vm])
+#ifdef NETDEV_HW_ADDR_T_MULTICAST
+ memcpy(mta_list + (i++ * ETH_ALEN),
+ ha->addr, ETH_ALEN);
+#else
+ memcpy(mta_list + (i++ * ETH_ALEN),
+ ha->dmi_addr, ETH_ALEN);
+#endif
+ }
+#endif
+ e1000_update_mc_addr_list(hw, mta_list, i);
+ kfree(mta_list);
+
+ return count;
+}
+
+void igb_rar_set(struct igb_adapter *adapter, u32 index)
+{
+ u32 rar_low, rar_high;
+ struct e1000_hw *hw = &adapter->hw;
+ u8 *addr = adapter->mac_table[index].addr;
+ /* HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
+ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+ /* Indicate to hardware the Address is Valid. */
+ if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE)
+ rar_high |= E1000_RAH_AV;
+
+ if (hw->mac.type == e1000_82575)
+ rar_high |= E1000_RAH_POOL_1 * adapter->mac_table[index].queue;
+ else
+ rar_high |= E1000_RAH_POOL_1 << adapter->mac_table[index].queue;
+
+ E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
+ E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
+ E1000_WRITE_FLUSH(hw);
+}
+
+void igb_full_sync_mac_table(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int i;
+ for (i = 0; i < hw->mac.rar_entry_count; i++) {
+ igb_rar_set(adapter, i);
+ }
+}
+
+void igb_sync_mac_table(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int i;
+ for (i = 0; i < hw->mac.rar_entry_count; i++) {
+ if (adapter->mac_table[i].state & IGB_MAC_STATE_MODIFIED)
+ igb_rar_set(adapter, i);
+ adapter->mac_table[i].state &= ~(IGB_MAC_STATE_MODIFIED);
+ }
+}
+
+int igb_available_rars(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int i, count = 0;
+
+ for (i = 0; i < hw->mac.rar_entry_count; i++) {
+ if (adapter->mac_table[i].state == 0)
+ count++;
+ }
+ return count;
+}
+
+#ifdef HAVE_SET_RX_MODE
+/**
+ * igb_write_uc_addr_list - write unicast addresses to RAR table
+ * @netdev: network interface device structure
+ *
+ * Writes unicast address list to the RAR table.
+ * Returns: -ENOMEM on failure/insufficient address space
+ * 0 on no addresses written
+ * X on writing X addresses to the RAR table
+ **/
+static int igb_write_uc_addr_list(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ unsigned int vfn = adapter->vfs_allocated_count;
+ int count = 0;
+
+ /* return ENOMEM indicating insufficient memory for addresses */
+ if (netdev_uc_count(netdev) > igb_available_rars(adapter))
+ return -ENOMEM;
+ if (!netdev_uc_empty(netdev)) {
+#ifdef NETDEV_HW_ADDR_T_UNICAST
+ struct netdev_hw_addr *ha;
+#else
+ struct dev_mc_list *ha;
+#endif
+ netdev_for_each_uc_addr(ha, netdev) {
+#ifdef NETDEV_HW_ADDR_T_UNICAST
+ igb_del_mac_filter(adapter, ha->addr, vfn);
+ igb_add_mac_filter(adapter, ha->addr, vfn);
+#else
+ igb_del_mac_filter(adapter, ha->da_addr, vfn);
+ igb_add_mac_filter(adapter, ha->da_addr, vfn);
+#endif
+ count++;
+ }
+ }
+ return count;
+}
+
+#endif /* HAVE_SET_RX_MODE */
+/**
+ * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_mode entry point is called whenever the unicast or multicast
+ * address lists or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper unicast, multicast,
+ * promiscuous mode, and all-multi behavior.
+ **/
+static void igb_set_rx_mode(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned int vfn = adapter->vfs_allocated_count;
+ u32 rctl, vmolr = 0;
+ int count;
+
+ /* Check for Promiscuous and All Multicast modes */
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+
+ /* clear the effected bits */
+ rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
+
+ if (netdev->flags & IFF_PROMISC) {
+ rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+ vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
+ /* retain VLAN HW filtering if in VT mode */
+ if (adapter->vfs_allocated_count || adapter->vmdq_pools)
+ rctl |= E1000_RCTL_VFE;
+ } else {
+ if (netdev->flags & IFF_ALLMULTI) {
+ rctl |= E1000_RCTL_MPE;
+ vmolr |= E1000_VMOLR_MPME;
+ } else {
+ /*
+ * Write addresses to the MTA, if the attempt fails
+ * then we should just turn on promiscuous mode so
+ * that we can at least receive multicast traffic
+ */
+ count = igb_write_mc_addr_list(netdev);
+ if (count < 0) {
+ rctl |= E1000_RCTL_MPE;
+ vmolr |= E1000_VMOLR_MPME;
+ } else if (count) {
+ vmolr |= E1000_VMOLR_ROMPE;
+ }
+ }
+#ifdef HAVE_SET_RX_MODE
+ /*
+ * Write addresses to available RAR registers, if there is not
+ * sufficient space to store all the addresses then enable
+ * unicast promiscuous mode
+ */
+ count = igb_write_uc_addr_list(netdev);
+ if (count < 0) {
+ rctl |= E1000_RCTL_UPE;
+ vmolr |= E1000_VMOLR_ROPE;
+ }
+#endif /* HAVE_SET_RX_MODE */
+ rctl |= E1000_RCTL_VFE;
+ }
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+
+ /*
+ * In order to support SR-IOV and eventually VMDq it is necessary to set
+ * the VMOLR to enable the appropriate modes. Without this workaround
+ * we will have issues with VLAN tag stripping not being done for frames
+ * that are only arriving because we are the default pool
+ */
+ if (hw->mac.type < e1000_82576)
+ return;
+
+ vmolr |= E1000_READ_REG(hw, E1000_VMOLR(vfn)) &
+ ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
+ E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr);
+ igb_restore_vf_multicasts(adapter);
+}
+
+static void igb_check_wvbr(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 wvbr = 0;
+
+ switch (hw->mac.type) {
+ case e1000_82576:
+ case e1000_i350:
+ if (!(wvbr = E1000_READ_REG(hw, E1000_WVBR)))
+ return;
+ break;
+ default:
+ break;
+ }
+
+ adapter->wvbr |= wvbr;
+}
+
+#define IGB_STAGGERED_QUEUE_OFFSET 8
+
+static void igb_spoof_check(struct igb_adapter *adapter)
+{
+ int j;
+
+ if (!adapter->wvbr)
+ return;
+
+ switch (adapter->hw.mac.type) {
+ case e1000_82576:
+ for (j = 0; j < adapter->vfs_allocated_count; j++) {
+ if (adapter->wvbr & (1 << j) ||
+ adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
+ DPRINTK(DRV, WARNING,
+ "Spoof event(s) detected on VF %d\n", j);
+ adapter->wvbr &=
+ ~((1 << j) |
+ (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
+ }
+ }
+ break;
+ case e1000_i350:
+ for (j = 0; j < adapter->vfs_allocated_count; j++) {
+ if (adapter->wvbr & (1 << j)) {
+ DPRINTK(DRV, WARNING,
+ "Spoof event(s) detected on VF %d\n", j);
+ adapter->wvbr &= ~(1 << j);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/* Need to wait a few seconds after link up to get diagnostic information from
+ * the phy */
+static void igb_update_phy_info(unsigned long data)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *) data;
+ e1000_get_phy_info(&adapter->hw);
+}
+
+/**
+ * igb_has_link - check shared code for link and determine up/down
+ * @adapter: pointer to driver private info
+ **/
+bool igb_has_link(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ bool link_active = FALSE;
+
+ /* get_link_status is set on LSC (link status) interrupt or
+ * rx sequence error interrupt. get_link_status will stay
+ * false until the e1000_check_for_link establishes link
+ * for copper adapters ONLY
+ */
+ switch (hw->phy.media_type) {
+ case e1000_media_type_copper:
+ if (!hw->mac.get_link_status)
+ return true;
+ case e1000_media_type_internal_serdes:
+ e1000_check_for_link(hw);
+ link_active = !hw->mac.get_link_status;
+ break;
+ case e1000_media_type_unknown:
+ default:
+ break;
+ }
+
+ if (((hw->mac.type == e1000_i210) ||
+ (hw->mac.type == e1000_i211)) &&
+ (hw->phy.id == I210_I_PHY_ID)) {
+ if (!netif_carrier_ok(adapter->netdev)) {
+ adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
+ } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
+ adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
+ adapter->link_check_timeout = jiffies;
+ }
+ }
+
+ return link_active;
+}
+
+/**
+ * igb_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void igb_watchdog(unsigned long data)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ /* Do the rest outside of interrupt context */
+ schedule_work(&adapter->watchdog_task);
+}
+
+static void igb_watchdog_task(struct work_struct *work)
+{
+ struct igb_adapter *adapter = container_of(work,
+ struct igb_adapter,
+ watchdog_task);
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ u32 link;
+ int i;
+ u32 thstat, ctrl_ext;
+ u32 connsw;
+
+ link = igb_has_link(adapter);
+ /* Force link down if we have fiber to swap to */
+ if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ connsw = E1000_READ_REG(hw, E1000_CONNSW);
+ if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
+ link = 0;
+ }
+ }
+
+ if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
+ if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
+ adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
+ else
+ link = FALSE;
+ }
+
+ if (link) {
+ /* Perform a reset if the media type changed. */
+ if (hw->dev_spec._82575.media_changed) {
+ hw->dev_spec._82575.media_changed = false;
+ adapter->flags |= IGB_FLAG_MEDIA_RESET;
+ igb_reset(adapter);
+ }
+
+ /* Cancel scheduled suspend requests. */
+ pm_runtime_resume(netdev->dev.parent);
+
+ if (!netif_carrier_ok(netdev)) {
+ u32 ctrl;
+ e1000_get_speed_and_duplex(hw,
+ &adapter->link_speed,
+ &adapter->link_duplex);
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ /* Links status message must follow this format */
+ printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
+ "Flow Control: %s\n",
+ netdev->name,
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ?
+ "Full Duplex" : "Half Duplex",
+ ((ctrl & E1000_CTRL_TFCE) &&
+ (ctrl & E1000_CTRL_RFCE)) ? "RX/TX":
+ ((ctrl & E1000_CTRL_RFCE) ? "RX" :
+ ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
+ /* adjust timeout factor according to speed/duplex */
+ adapter->tx_timeout_factor = 1;
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ adapter->tx_timeout_factor = 14;
+ break;
+ case SPEED_100:
+ /* maybe add some timeout factor ? */
+ break;
+ default:
+ break;
+ }
+
+ netif_carrier_on(netdev);
+ netif_tx_wake_all_queues(netdev);
+
+ igb_ping_all_vfs(adapter);
+#ifdef IFLA_VF_MAX
+ igb_check_vf_rate_limit(adapter);
+#endif /* IFLA_VF_MAX */
+
+ /* link state has changed, schedule phy info update */
+ if (!test_bit(__IGB_DOWN, &adapter->state))
+ mod_timer(&adapter->phy_info_timer,
+ round_jiffies(jiffies + 2 * HZ));
+ }
+ } else {
+ if (netif_carrier_ok(netdev)) {
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+ /* check for thermal sensor event on i350 */
+ if (hw->mac.type == e1000_i350) {
+ thstat = E1000_READ_REG(hw, E1000_THSTAT);
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ if ((hw->phy.media_type ==
+ e1000_media_type_copper) &&
+ !(ctrl_ext &
+ E1000_CTRL_EXT_LINK_MODE_SGMII)) {
+ if (thstat & E1000_THSTAT_PWR_DOWN) {
+ printk(KERN_ERR "igb: %s The "
+ "network adapter was stopped "
+ "because it overheated.\n",
+ netdev->name);
+ }
+ if (thstat & E1000_THSTAT_LINK_THROTTLE) {
+ printk(KERN_INFO
+ "igb: %s The network "
+ "adapter supported "
+ "link speed "
+ "was downshifted "
+ "because it "
+ "overheated.\n",
+ netdev->name);
+ }
+ }
+ }
+
+ /* Links status message must follow this format */
+ printk(KERN_INFO "igb: %s NIC Link is Down\n",
+ netdev->name);
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ igb_ping_all_vfs(adapter);
+
+ /* link state has changed, schedule phy info update */
+ if (!test_bit(__IGB_DOWN, &adapter->state))
+ mod_timer(&adapter->phy_info_timer,
+ round_jiffies(jiffies + 2 * HZ));
+ /* link is down, time to check for alternate media */
+ if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+ igb_check_swap_media(adapter);
+ if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+ schedule_work(&adapter->reset_task);
+ /* return immediately */
+ return;
+ }
+ }
+ pm_schedule_suspend(netdev->dev.parent,
+ MSEC_PER_SEC * 5);
+
+ /* also check for alternate media here */
+ } else if (!netif_carrier_ok(netdev) &&
+ (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
+ hw->mac.ops.power_up_serdes(hw);
+ igb_check_swap_media(adapter);
+ if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+ schedule_work(&adapter->reset_task);
+ /* return immediately */
+ return;
+ }
+ }
+ }
+
+ igb_update_stats(adapter);
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igb_ring *tx_ring = adapter->tx_ring[i];
+ if (!netif_carrier_ok(netdev)) {
+ /* We've lost link, so the controller stops DMA,
+ * but we've got queued Tx work that's never going
+ * to get done, so reset controller to flush Tx.
+ * (Do the reset outside of interrupt context). */
+ if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
+ adapter->tx_timeout_count++;
+ schedule_work(&adapter->reset_task);
+ /* return immediately since reset is imminent */
+ return;
+ }
+ }
+
+ /* Force detection of hung controller every watchdog period */
+ set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
+ }
+
+ /* Cause software interrupt to ensure rx ring is cleaned */
+ if (adapter->msix_entries) {
+ u32 eics = 0;
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ eics |= adapter->q_vector[i]->eims_value;
+ E1000_WRITE_REG(hw, E1000_EICS, eics);
+ } else {
+ E1000_WRITE_REG(hw, E1000_ICS, E1000_ICS_RXDMT0);
+ }
+
+ igb_spoof_check(adapter);
+
+ /* Reset the timer */
+ if (!test_bit(__IGB_DOWN, &adapter->state)) {
+ if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + HZ));
+ else
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + 2 * HZ));
+ }
+}
+
+static void igb_dma_err_task(struct work_struct *work)
+{
+ struct igb_adapter *adapter = container_of(work,
+ struct igb_adapter,
+ dma_err_task);
+ int vf;
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ u32 hgptc;
+ u32 ciaa, ciad;
+
+ hgptc = E1000_READ_REG(hw, E1000_HGPTC);
+ if (hgptc) /* If incrementing then no need for the check below */
+ goto dma_timer_reset;
+ /*
+ * Check to see if a bad DMA write target from an errant or
+ * malicious VF has caused a PCIe error. If so then we can
+ * issue a VFLR to the offending VF(s) and then resume without
+ * requesting a full slot reset.
+ */
+
+ for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
+ ciaa = (vf << 16) | 0x80000000;
+ /* 32 bit read so align, we really want status at offset 6 */
+ ciaa |= PCI_COMMAND;
+ E1000_WRITE_REG(hw, E1000_CIAA, ciaa);
+ ciad = E1000_READ_REG(hw, E1000_CIAD);
+ ciaa &= 0x7FFFFFFF;
+ /* disable debug mode asap after reading data */
+ E1000_WRITE_REG(hw, E1000_CIAA, ciaa);
+ /* Get the upper 16 bits which will be the PCI status reg */
+ ciad >>= 16;
+ if (ciad & (PCI_STATUS_REC_MASTER_ABORT |
+ PCI_STATUS_REC_TARGET_ABORT |
+ PCI_STATUS_SIG_SYSTEM_ERROR)) {
+ netdev_err(netdev, "VF %d suffered error\n", vf);
+ /* Issue VFLR */
+ ciaa = (vf << 16) | 0x80000000;
+ ciaa |= 0xA8;
+ E1000_WRITE_REG(hw, E1000_CIAA, ciaa);
+ ciad = 0x00008000; /* VFLR */
+ E1000_WRITE_REG(hw, E1000_CIAD, ciad);
+ ciaa &= 0x7FFFFFFF;
+ E1000_WRITE_REG(hw, E1000_CIAA, ciaa);
+ }
+ }
+dma_timer_reset:
+ /* Reset the timer */
+ if (!test_bit(__IGB_DOWN, &adapter->state))
+ mod_timer(&adapter->dma_err_timer,
+ round_jiffies(jiffies + HZ / 10));
+}
+
+/**
+ * igb_dma_err_timer - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void igb_dma_err_timer(unsigned long data)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ /* Do the rest outside of interrupt context */
+ schedule_work(&adapter->dma_err_task);
+}
+
+enum latency_range {
+ lowest_latency = 0,
+ low_latency = 1,
+ bulk_latency = 2,
+ latency_invalid = 255
+};
+
+/**
+ * igb_update_ring_itr - update the dynamic ITR value based on packet size
+ *
+ * Stores a new ITR value based on strictly on packet size. This
+ * algorithm is less sophisticated than that used in igb_update_itr,
+ * due to the difficulty of synchronizing statistics across multiple
+ * receive rings. The divisors and thresholds used by this function
+ * were determined based on theoretical maximum wire speed and testing
+ * data, in order to minimize response time while increasing bulk
+ * throughput.
+ * This functionality is controlled by the InterruptThrottleRate module
+ * parameter (see igb_param.c)
+ * NOTE: This function is called only when operating in a multiqueue
+ * receive environment.
+ * @q_vector: pointer to q_vector
+ **/
+static void igb_update_ring_itr(struct igb_q_vector *q_vector)
+{
+ int new_val = q_vector->itr_val;
+ int avg_wire_size = 0;
+ struct igb_adapter *adapter = q_vector->adapter;
+ unsigned int packets;
+
+ /* For non-gigabit speeds, just fix the interrupt rate at 4000
+ * ints/sec - ITR timer value of 120 ticks.
+ */
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ case SPEED_100:
+ new_val = IGB_4K_ITR;
+ goto set_itr_val;
+ default:
+ break;
+ }
+
+ packets = q_vector->rx.total_packets;
+ if (packets)
+ avg_wire_size = q_vector->rx.total_bytes / packets;
+
+ packets = q_vector->tx.total_packets;
+ if (packets)
+ avg_wire_size = max_t(u32, avg_wire_size,
+ q_vector->tx.total_bytes / packets);
+
+ /* if avg_wire_size isn't set no work was done */
+ if (!avg_wire_size)
+ goto clear_counts;
+
+ /* Add 24 bytes to size to account for CRC, preamble, and gap */
+ avg_wire_size += 24;
+
+ /* Don't starve jumbo frames */
+ avg_wire_size = min(avg_wire_size, 3000);
+
+ /* Give a little boost to mid-size frames */
+ if ((avg_wire_size > 300) && (avg_wire_size < 1200))
+ new_val = avg_wire_size / 3;
+ else
+ new_val = avg_wire_size / 2;
+
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (new_val < IGB_20K_ITR &&
+ ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
+ (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
+ new_val = IGB_20K_ITR;
+
+set_itr_val:
+ if (new_val != q_vector->itr_val) {
+ q_vector->itr_val = new_val;
+ q_vector->set_itr = 1;
+ }
+clear_counts:
+ q_vector->rx.total_bytes = 0;
+ q_vector->rx.total_packets = 0;
+ q_vector->tx.total_bytes = 0;
+ q_vector->tx.total_packets = 0;
+}
+
+/**
+ * igb_update_itr - update the dynamic ITR value based on statistics
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ * this functionality is controlled by the InterruptThrottleRate module
+ * parameter (see igb_param.c)
+ * NOTE: These calculations are only valid when operating in a single-
+ * queue environment.
+ * @q_vector: pointer to q_vector
+ * @ring_container: ring info to update the itr for
+ **/
+static void igb_update_itr(struct igb_q_vector *q_vector,
+ struct igb_ring_container *ring_container)
+{
+ unsigned int packets = ring_container->total_packets;
+ unsigned int bytes = ring_container->total_bytes;
+ u8 itrval = ring_container->itr;
+
+ /* no packets, exit with status unchanged */
+ if (packets == 0)
+ return;
+
+ switch (itrval) {
+ case lowest_latency:
+ /* handle TSO and jumbo frames */
+ if (bytes/packets > 8000)
+ itrval = bulk_latency;
+ else if ((packets < 5) && (bytes > 512))
+ itrval = low_latency;
+ break;
+ case low_latency: /* 50 usec aka 20000 ints/s */
+ if (bytes > 10000) {
+ /* this if handles the TSO accounting */
+ if (bytes/packets > 8000) {
+ itrval = bulk_latency;
+ } else if ((packets < 10) || ((bytes/packets) > 1200)) {
+ itrval = bulk_latency;
+ } else if ((packets > 35)) {
+ itrval = lowest_latency;
+ }
+ } else if (bytes/packets > 2000) {
+ itrval = bulk_latency;
+ } else if (packets <= 2 && bytes < 512) {
+ itrval = lowest_latency;
+ }
+ break;
+ case bulk_latency: /* 250 usec aka 4000 ints/s */
+ if (bytes > 25000) {
+ if (packets > 35)
+ itrval = low_latency;
+ } else if (bytes < 1500) {
+ itrval = low_latency;
+ }
+ break;
+ }
+
+ /* clear work counters since we have the values we need */
+ ring_container->total_bytes = 0;
+ ring_container->total_packets = 0;
+
+ /* write updated itr to ring container */
+ ring_container->itr = itrval;
+}
+
+static void igb_set_itr(struct igb_q_vector *q_vector)
+{
+ struct igb_adapter *adapter = q_vector->adapter;
+ u32 new_itr = q_vector->itr_val;
+ u8 current_itr = 0;
+
+ /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ case SPEED_100:
+ current_itr = 0;
+ new_itr = IGB_4K_ITR;
+ goto set_itr_now;
+ default:
+ break;
+ }
+
+ igb_update_itr(q_vector, &q_vector->tx);
+ igb_update_itr(q_vector, &q_vector->rx);
+
+ current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
+
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (current_itr == lowest_latency &&
+ ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
+ (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
+ current_itr = low_latency;
+
+ switch (current_itr) {
+ /* counts and packets in update_itr are dependent on these numbers */
+ case lowest_latency:
+ new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
+ break;
+ case low_latency:
+ new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
+ break;
+ case bulk_latency:
+ new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
+ break;
+ default:
+ break;
+ }
+
+set_itr_now:
+ if (new_itr != q_vector->itr_val) {
+ /* this attempts to bias the interrupt rate towards Bulk
+ * by adding intermediate steps when interrupt rate is
+ * increasing */
+ new_itr = new_itr > q_vector->itr_val ?
+ max((new_itr * q_vector->itr_val) /
+ (new_itr + (q_vector->itr_val >> 2)),
+ new_itr) :
+ new_itr;
+ /* Don't write the value here; it resets the adapter's
+ * internal timer, and causes us to delay far longer than
+ * we should between interrupts. Instead, we write the ITR
+ * value at the beginning of the next interrupt so the timing
+ * ends up being correct.
+ */
+ q_vector->itr_val = new_itr;
+ q_vector->set_itr = 1;
+ }
+}
+
+void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
+ u32 type_tucmd, u32 mss_l4len_idx)
+{
+ struct e1000_adv_tx_context_desc *context_desc;
+ u16 i = tx_ring->next_to_use;
+
+ context_desc = IGB_TX_CTXTDESC(tx_ring, i);
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+ /* set bits to identify this as an advanced context descriptor */
+ type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
+
+ /* For 82575, context index must be unique per ring. */
+ if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
+ mss_l4len_idx |= tx_ring->reg_idx << 4;
+
+ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+ context_desc->seqnum_seed = 0;
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
+ context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+}
+
+static int igb_tso(struct igb_ring *tx_ring,
+ struct igb_tx_buffer *first,
+ u8 *hdr_len)
+{
+#ifdef NETIF_F_TSO
+ struct sk_buff *skb = first->skb;
+ u32 vlan_macip_lens, type_tucmd;
+ u32 mss_l4len_idx, l4len;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (!skb_is_gso(skb))
+#endif /* NETIF_F_TSO */
+ return 0;
+#ifdef NETIF_F_TSO
+
+ if (skb_header_cloned(skb)) {
+ int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err)
+ return err;
+ }
+
+ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+ type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
+
+ if (first->protocol == __constant_htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+ type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
+ first->tx_flags |= IGB_TX_FLAGS_TSO |
+ IGB_TX_FLAGS_CSUM |
+ IGB_TX_FLAGS_IPV4;
+#ifdef NETIF_F_TSO6
+ } else if (skb_is_gso_v6(skb)) {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ first->tx_flags |= IGB_TX_FLAGS_TSO |
+ IGB_TX_FLAGS_CSUM;
+#endif
+ }
+
+ /* compute header lengths */
+ l4len = tcp_hdrlen(skb);
+ *hdr_len = skb_transport_offset(skb) + l4len;
+
+ /* update gso size and bytecount with header size */
+ first->gso_segs = skb_shinfo(skb)->gso_segs;
+ first->bytecount += (first->gso_segs - 1) * *hdr_len;
+
+ /* MSS L4LEN IDX */
+ mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
+
+ /* VLAN MACLEN IPLEN */
+ vlan_macip_lens = skb_network_header_len(skb);
+ vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
+
+ igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
+
+ return 1;
+#endif /* NETIF_F_TSO */
+}
+
+static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
+{
+ struct sk_buff *skb = first->skb;
+ u32 vlan_macip_lens = 0;
+ u32 mss_l4len_idx = 0;
+ u32 type_tucmd = 0;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL) {
+ if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
+ return;
+ } else {
+ u8 nexthdr = 0;
+ switch (first->protocol) {
+ case __constant_htons(ETH_P_IP):
+ vlan_macip_lens |= skb_network_header_len(skb);
+ type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
+ nexthdr = ip_hdr(skb)->protocol;
+ break;
+#ifdef NETIF_F_IPV6_CSUM
+ case __constant_htons(ETH_P_IPV6):
+ vlan_macip_lens |= skb_network_header_len(skb);
+ nexthdr = ipv6_hdr(skb)->nexthdr;
+ break;
+#endif
+ default:
+ if (unlikely(net_ratelimit())) {
+ dev_warn(tx_ring->dev,
+ "partial checksum but proto=%x!\n",
+ first->protocol);
+ }
+ break;
+ }
+
+ switch (nexthdr) {
+ case IPPROTO_TCP:
+ type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
+ mss_l4len_idx = tcp_hdrlen(skb) <<
+ E1000_ADVTXD_L4LEN_SHIFT;
+ break;
+#ifdef HAVE_SCTP
+ case IPPROTO_SCTP:
+ type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
+ mss_l4len_idx = sizeof(struct sctphdr) <<
+ E1000_ADVTXD_L4LEN_SHIFT;
+ break;
+#endif
+ case IPPROTO_UDP:
+ mss_l4len_idx = sizeof(struct udphdr) <<
+ E1000_ADVTXD_L4LEN_SHIFT;
+ break;
+ default:
+ if (unlikely(net_ratelimit())) {
+ dev_warn(tx_ring->dev,
+ "partial checksum but l4 proto=%x!\n",
+ nexthdr);
+ }
+ break;
+ }
+
+ /* update TX checksum flag */
+ first->tx_flags |= IGB_TX_FLAGS_CSUM;
+ }
+
+ vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
+
+ igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
+}
+
+#define IGB_SET_FLAG(_input, _flag, _result) \
+ ((_flag <= _result) ? \
+ ((u32)(_input & _flag) * (_result / _flag)) : \
+ ((u32)(_input & _flag) / (_flag / _result)))
+
+static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
+{
+ /* set type for advanced descriptor with frame checksum insertion */
+ u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
+ E1000_ADVTXD_DCMD_DEXT |
+ E1000_ADVTXD_DCMD_IFCS;
+
+ /* set HW vlan bit if vlan is present */
+ cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
+ (E1000_ADVTXD_DCMD_VLE));
+
+ /* set segmentation bits for TSO */
+ cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
+ (E1000_ADVTXD_DCMD_TSE));
+
+ /* set timestamp bit if present */
+ cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
+ (E1000_ADVTXD_MAC_TSTAMP));
+
+ return cmd_type;
+}
+
+static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
+ union e1000_adv_tx_desc *tx_desc,
+ u32 tx_flags, unsigned int paylen)
+{
+ u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
+
+ /* 82575 requires a unique index per ring */
+ if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
+ olinfo_status |= tx_ring->reg_idx << 4;
+
+ /* insert L4 checksum */
+ olinfo_status |= IGB_SET_FLAG(tx_flags,
+ IGB_TX_FLAGS_CSUM,
+ (E1000_TXD_POPTS_TXSM << 8));
+
+ /* insert IPv4 checksum */
+ olinfo_status |= IGB_SET_FLAG(tx_flags,
+ IGB_TX_FLAGS_IPV4,
+ (E1000_TXD_POPTS_IXSM << 8));
+
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+}
+
+static void igb_tx_map(struct igb_ring *tx_ring,
+ struct igb_tx_buffer *first,
+ const u8 hdr_len)
+{
+ struct sk_buff *skb = first->skb;
+ struct igb_tx_buffer *tx_buffer;
+ union e1000_adv_tx_desc *tx_desc;
+ struct skb_frag_struct *frag;
+ dma_addr_t dma;
+ unsigned int data_len, size;
+ u32 tx_flags = first->tx_flags;
+ u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
+ u16 i = tx_ring->next_to_use;
+
+ tx_desc = IGB_TX_DESC(tx_ring, i);
+
+ igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
+
+ size = skb_headlen(skb);
+ data_len = skb->data_len;
+
+ dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+
+ tx_buffer = first;
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buffer, len, size);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+ while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
+ tx_desc->read.cmd_type_len =
+ cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
+
+ i++;
+ tx_desc++;
+ if (i == tx_ring->count) {
+ tx_desc = IGB_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+ tx_desc->read.olinfo_status = 0;
+
+ dma += IGB_MAX_DATA_PER_TXD;
+ size -= IGB_MAX_DATA_PER_TXD;
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ }
+
+ if (likely(!data_len))
+ break;
+
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
+
+ i++;
+ tx_desc++;
+ if (i == tx_ring->count) {
+ tx_desc = IGB_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+ tx_desc->read.olinfo_status = 0;
+
+ size = skb_frag_size(frag);
+ data_len -= size;
+
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
+ size, DMA_TO_DEVICE);
+
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ }
+
+ /* write last descriptor with RS and EOP bits */
+ cmd_type |= size | IGB_TXD_DCMD;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
+ /* set the timestamp */
+ first->time_stamp = jiffies;
+
+ /*
+ * Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch. (Only applicable for weak-ordered
+ * memory model archs, such as IA-64).
+ *
+ * We also need this memory barrier to make certain all of the
+ * status bits have been updated before next_to_watch is written.
+ */
+ wmb();
+
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_ring->next_to_use = i;
+
+ writel(i, tx_ring->tail);
+
+ /* we need this if more than one processor can write to our tail
+ * at a time, it syncronizes IO on IA64/Altix systems */
+ mmiowb();
+
+ return;
+
+dma_error:
+ dev_err(tx_ring->dev, "TX DMA map failed\n");
+
+ /* clear dma mappings for failed tx_buffer_info map */
+ for (;;) {
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ igb_unmap_and_free_tx_resource(tx_ring, tx_buffer);
+ if (tx_buffer == first)
+ break;
+ if (i == 0)
+ i = tx_ring->count;
+ i--;
+ }
+
+ tx_ring->next_to_use = i;
+}
+
+static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
+{
+ struct net_device *netdev = netdev_ring(tx_ring);
+
+ if (netif_is_multiqueue(netdev))
+ netif_stop_subqueue(netdev, ring_queue_index(tx_ring));
+ else
+ netif_stop_queue(netdev);
+
+ /* Herbert's original patch had:
+ * smp_mb__after_netif_stop_queue();
+ * but since that doesn't exist yet, just open code it. */
+ smp_mb();
+
+ /* We need to check again in a case another CPU has just
+ * made room available. */
+ if (igb_desc_unused(tx_ring) < size)
+ return -EBUSY;
+
+ /* A reprieve! */
+ if (netif_is_multiqueue(netdev))
+ netif_wake_subqueue(netdev, ring_queue_index(tx_ring));
+ else
+ netif_wake_queue(netdev);
+
+ tx_ring->tx_stats.restart_queue++;
+
+ return 0;
+}
+
+static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
+{
+ if (igb_desc_unused(tx_ring) >= size)
+ return 0;
+ return __igb_maybe_stop_tx(tx_ring, size);
+}
+
+netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
+ struct igb_ring *tx_ring)
+{
+ struct igb_tx_buffer *first;
+ int tso;
+ u32 tx_flags = 0;
+#if PAGE_SIZE > IGB_MAX_DATA_PER_TXD
+ unsigned short f;
+#endif
+ u16 count = TXD_USE_COUNT(skb_headlen(skb));
+ __be16 protocol = vlan_get_protocol(skb);
+ u8 hdr_len = 0;
+
+ /*
+ * need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
+ * + 2 desc gap to keep tail from touching head,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+#if PAGE_SIZE > IGB_MAX_DATA_PER_TXD
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+#else
+ count += skb_shinfo(skb)->nr_frags;
+#endif
+ if (igb_maybe_stop_tx(tx_ring, count + 3)) {
+ /* this is a hard error */
+ return NETDEV_TX_BUSY;
+ }
+
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ first->skb = skb;
+ first->bytecount = skb->len;
+ first->gso_segs = 1;
+
+ skb_tx_timestamp(skb);
+
+#ifdef HAVE_PTP_1588_CLOCK
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
+ if (!adapter->ptp_tx_skb) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ tx_flags |= IGB_TX_FLAGS_TSTAMP;
+
+ adapter->ptp_tx_skb = skb_get(skb);
+ adapter->ptp_tx_start = jiffies;
+ if (adapter->hw.mac.type == e1000_82576)
+ schedule_work(&adapter->ptp_tx_work);
+ }
+ }
+#endif /* HAVE_PTP_1588_CLOCK */
+
+ if (vlan_tx_tag_present(skb)) {
+ tx_flags |= IGB_TX_FLAGS_VLAN;
+ tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
+ }
+
+ /* record initial flags and protocol */
+ first->tx_flags = tx_flags;
+ first->protocol = protocol;
+
+ tso = igb_tso(tx_ring, first, &hdr_len);
+ if (tso < 0)
+ goto out_drop;
+ else if (!tso)
+ igb_tx_csum(tx_ring, first);
+
+ igb_tx_map(tx_ring, first, hdr_len);
+
+#ifndef HAVE_TRANS_START_IN_QUEUE
+ netdev_ring(tx_ring)->trans_start = jiffies;
+
+#endif
+ /* Make sure there is space in the ring for the next send. */
+ igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+ return NETDEV_TX_OK;
+
+out_drop:
+ igb_unmap_and_free_tx_resource(tx_ring, first);
+
+ return NETDEV_TX_OK;
+}
+
+#ifdef HAVE_TX_MQ
+static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
+ struct sk_buff *skb)
+{
+ unsigned int r_idx = skb->queue_mapping;
+
+ if (r_idx >= adapter->num_tx_queues)
+ r_idx = r_idx % adapter->num_tx_queues;
+
+ return adapter->tx_ring[r_idx];
+}
+#else
+#define igb_tx_queue_mapping(_adapter, _skb) (_adapter)->tx_ring[0]
+#endif
+
+static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ if (test_bit(__IGB_DOWN, &adapter->state)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (skb->len <= 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /*
+ * The minimum packet size with TCTL.PSP set is 17 so pad the skb
+ * in order to meet this minimum size requirement.
+ */
+ if (skb->len < 17) {
+ if (skb_padto(skb, 17))
+ return NETDEV_TX_OK;
+ skb->len = 17;
+ }
+
+ return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
+}
+
+/**
+ * igb_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+static void igb_tx_timeout(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* Do the reset outside of interrupt context */
+ adapter->tx_timeout_count++;
+
+ if (hw->mac.type >= e1000_82580)
+ hw->dev_spec._82575.global_device_reset = true;
+
+ schedule_work(&adapter->reset_task);
+ E1000_WRITE_REG(hw, E1000_EICS,
+ (adapter->eims_enable_mask & ~adapter->eims_other));
+}
+
+static void igb_reset_task(struct work_struct *work)
+{
+ struct igb_adapter *adapter;
+ adapter = container_of(work, struct igb_adapter, reset_task);
+
+ igb_reinit_locked(adapter);
+}
+
+/**
+ * igb_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are updated here and also from the timer callback.
+ **/
+static struct net_device_stats *igb_get_stats(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ if (!test_bit(__IGB_RESETTING, &adapter->state))
+ igb_update_stats(adapter);
+
+#ifdef HAVE_NETDEV_STATS_IN_NETDEV
+ /* only return the current stats */
+ return &netdev->stats;
+#else
+ /* only return the current stats */
+ return &adapter->net_stats;
+#endif /* HAVE_NETDEV_STATS_IN_NETDEV */
+}
+
+/**
+ * igb_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int igb_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+ if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
+ dev_err(pci_dev_to_dev(pdev), "Invalid MTU setting\n");
+ return -EINVAL;
+ }
+
+#define MAX_STD_JUMBO_FRAME_SIZE 9238
+ if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
+ dev_err(pci_dev_to_dev(pdev), "MTU > 9216 not supported.\n");
+ return -EINVAL;
+ }
+
+ /* adjust max frame to be at least the size of a standard frame */
+ if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
+ max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
+
+ while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ /* igb_down has a dependency on max_frame_size */
+ adapter->max_frame_size = max_frame;
+
+ if (netif_running(netdev))
+ igb_down(adapter);
+
+ dev_info(pci_dev_to_dev(pdev), "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
+ netdev->mtu = new_mtu;
+ hw->dev_spec._82575.mtu = new_mtu;
+
+ if (netif_running(netdev))
+ igb_up(adapter);
+ else
+ igb_reset(adapter);
+
+ clear_bit(__IGB_RESETTING, &adapter->state);
+
+ return 0;
+}
+
+/**
+ * igb_update_stats - Update the board statistics counters
+ * @adapter: board private structure
+ **/
+
+void igb_update_stats(struct igb_adapter *adapter)
+{
+#ifdef HAVE_NETDEV_STATS_IN_NETDEV
+ struct net_device_stats *net_stats = &adapter->netdev->stats;
+#else
+ struct net_device_stats *net_stats = &adapter->net_stats;
+#endif /* HAVE_NETDEV_STATS_IN_NETDEV */
+ struct e1000_hw *hw = &adapter->hw;
+#ifdef HAVE_PCI_ERS
+ struct pci_dev *pdev = adapter->pdev;
+#endif
+ u32 reg, mpc;
+ u16 phy_tmp;
+ int i;
+ u64 bytes, packets;
+#ifndef IGB_NO_LRO
+ u32 flushed = 0, coal = 0;
+ struct igb_q_vector *q_vector;
+#endif
+
+#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
+
+ /*
+ * Prevent stats update while adapter is being reset, or if the pci
+ * connection is down.
+ */
+ if (adapter->link_speed == 0)
+ return;
+#ifdef HAVE_PCI_ERS
+ if (pci_channel_offline(pdev))
+ return;
+
+#endif
+#ifndef IGB_NO_LRO
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ q_vector = adapter->q_vector[i];
+ if (!q_vector)
+ continue;
+ flushed += q_vector->lrolist.stats.flushed;
+ coal += q_vector->lrolist.stats.coal;
+ }
+ adapter->lro_stats.flushed = flushed;
+ adapter->lro_stats.coal = coal;
+
+#endif
+ bytes = 0;
+ packets = 0;
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ u32 rqdpc_tmp = E1000_READ_REG(hw, E1000_RQDPC(i)) & 0x0FFF;
+ struct igb_ring *ring = adapter->rx_ring[i];
+ ring->rx_stats.drops += rqdpc_tmp;
+ net_stats->rx_fifo_errors += rqdpc_tmp;
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+ if (!ring->vmdq_netdev) {
+ bytes += ring->rx_stats.bytes;
+ packets += ring->rx_stats.packets;
+ }
+#else
+ bytes += ring->rx_stats.bytes;
+ packets += ring->rx_stats.packets;
+#endif
+ }
+
+ net_stats->rx_bytes = bytes;
+ net_stats->rx_packets = packets;
+
+ bytes = 0;
+ packets = 0;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igb_ring *ring = adapter->tx_ring[i];
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+ if (!ring->vmdq_netdev) {
+ bytes += ring->tx_stats.bytes;
+ packets += ring->tx_stats.packets;
+ }
+#else
+ bytes += ring->tx_stats.bytes;
+ packets += ring->tx_stats.packets;
+#endif
+ }
+ net_stats->tx_bytes = bytes;
+ net_stats->tx_packets = packets;
+
+ /* read stats registers */
+ adapter->stats.crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
+ adapter->stats.gprc += E1000_READ_REG(hw, E1000_GPRC);
+ adapter->stats.gorc += E1000_READ_REG(hw, E1000_GORCL);
+ E1000_READ_REG(hw, E1000_GORCH); /* clear GORCL */
+ adapter->stats.bprc += E1000_READ_REG(hw, E1000_BPRC);
+ adapter->stats.mprc += E1000_READ_REG(hw, E1000_MPRC);
+ adapter->stats.roc += E1000_READ_REG(hw, E1000_ROC);
+
+ adapter->stats.prc64 += E1000_READ_REG(hw, E1000_PRC64);
+ adapter->stats.prc127 += E1000_READ_REG(hw, E1000_PRC127);
+ adapter->stats.prc255 += E1000_READ_REG(hw, E1000_PRC255);
+ adapter->stats.prc511 += E1000_READ_REG(hw, E1000_PRC511);
+ adapter->stats.prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
+ adapter->stats.prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
+ adapter->stats.symerrs += E1000_READ_REG(hw, E1000_SYMERRS);
+ adapter->stats.sec += E1000_READ_REG(hw, E1000_SEC);
+
+ mpc = E1000_READ_REG(hw, E1000_MPC);
+ adapter->stats.mpc += mpc;
+ net_stats->rx_fifo_errors += mpc;
+ adapter->stats.scc += E1000_READ_REG(hw, E1000_SCC);
+ adapter->stats.ecol += E1000_READ_REG(hw, E1000_ECOL);
+ adapter->stats.mcc += E1000_READ_REG(hw, E1000_MCC);
+ adapter->stats.latecol += E1000_READ_REG(hw, E1000_LATECOL);
+ adapter->stats.dc += E1000_READ_REG(hw, E1000_DC);
+ adapter->stats.rlec += E1000_READ_REG(hw, E1000_RLEC);
+ adapter->stats.xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
+ adapter->stats.xontxc += E1000_READ_REG(hw, E1000_XONTXC);
+ adapter->stats.xoffrxc += E1000_READ_REG(hw, E1000_XOFFRXC);
+ adapter->stats.xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
+ adapter->stats.fcruc += E1000_READ_REG(hw, E1000_FCRUC);
+ adapter->stats.gptc += E1000_READ_REG(hw, E1000_GPTC);
+ adapter->stats.gotc += E1000_READ_REG(hw, E1000_GOTCL);
+ E1000_READ_REG(hw, E1000_GOTCH); /* clear GOTCL */
+ adapter->stats.rnbc += E1000_READ_REG(hw, E1000_RNBC);
+ adapter->stats.ruc += E1000_READ_REG(hw, E1000_RUC);
+ adapter->stats.rfc += E1000_READ_REG(hw, E1000_RFC);
+ adapter->stats.rjc += E1000_READ_REG(hw, E1000_RJC);
+ adapter->stats.tor += E1000_READ_REG(hw, E1000_TORH);
+ adapter->stats.tot += E1000_READ_REG(hw, E1000_TOTH);
+ adapter->stats.tpr += E1000_READ_REG(hw, E1000_TPR);
+
+ adapter->stats.ptc64 += E1000_READ_REG(hw, E1000_PTC64);
+ adapter->stats.ptc127 += E1000_READ_REG(hw, E1000_PTC127);
+ adapter->stats.ptc255 += E1000_READ_REG(hw, E1000_PTC255);
+ adapter->stats.ptc511 += E1000_READ_REG(hw, E1000_PTC511);
+ adapter->stats.ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
+ adapter->stats.ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
+
+ adapter->stats.mptc += E1000_READ_REG(hw, E1000_MPTC);
+ adapter->stats.bptc += E1000_READ_REG(hw, E1000_BPTC);
+
+ adapter->stats.tpt += E1000_READ_REG(hw, E1000_TPT);
+ adapter->stats.colc += E1000_READ_REG(hw, E1000_COLC);
+
+ adapter->stats.algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
+ /* read internal phy sepecific stats */
+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
+ adapter->stats.rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
+
+ /* this stat has invalid values on i210/i211 */
+ if ((hw->mac.type != e1000_i210) &&
+ (hw->mac.type != e1000_i211))
+ adapter->stats.tncrs += E1000_READ_REG(hw, E1000_TNCRS);
+ }
+ adapter->stats.tsctc += E1000_READ_REG(hw, E1000_TSCTC);
+ adapter->stats.tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
+
+ adapter->stats.iac += E1000_READ_REG(hw, E1000_IAC);
+ adapter->stats.icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
+ adapter->stats.icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
+ adapter->stats.icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
+ adapter->stats.ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
+ adapter->stats.ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
+ adapter->stats.ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
+ adapter->stats.ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
+ adapter->stats.icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
+
+ /* Fill out the OS statistics structure */
+ net_stats->multicast = adapter->stats.mprc;
+ net_stats->collisions = adapter->stats.colc;
+
+ /* Rx Errors */
+
+ /* RLEC on some newer hardware can be incorrect so build
+ * our own version based on RUC and ROC */
+ net_stats->rx_errors = adapter->stats.rxerrc +
+ adapter->stats.crcerrs + adapter->stats.algnerrc +
+ adapter->stats.ruc + adapter->stats.roc +
+ adapter->stats.cexterr;
+ net_stats->rx_length_errors = adapter->stats.ruc +
+ adapter->stats.roc;
+ net_stats->rx_crc_errors = adapter->stats.crcerrs;
+ net_stats->rx_frame_errors = adapter->stats.algnerrc;
+ net_stats->rx_missed_errors = adapter->stats.mpc;
+
+ /* Tx Errors */
+ net_stats->tx_errors = adapter->stats.ecol +
+ adapter->stats.latecol;
+ net_stats->tx_aborted_errors = adapter->stats.ecol;
+ net_stats->tx_window_errors = adapter->stats.latecol;
+ net_stats->tx_carrier_errors = adapter->stats.tncrs;
+
+ /* Tx Dropped needs to be maintained elsewhere */
+
+ /* Phy Stats */
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ if ((adapter->link_speed == SPEED_1000) &&
+ (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
+ phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
+ adapter->phy_stats.idle_errors += phy_tmp;
+ }
+ }
+
+ /* Management Stats */
+ adapter->stats.mgptc += E1000_READ_REG(hw, E1000_MGTPTC);
+ adapter->stats.mgprc += E1000_READ_REG(hw, E1000_MGTPRC);
+ if (hw->mac.type > e1000_82580) {
+ adapter->stats.o2bgptc += E1000_READ_REG(hw, E1000_O2BGPTC);
+ adapter->stats.o2bspc += E1000_READ_REG(hw, E1000_O2BSPC);
+ adapter->stats.b2ospc += E1000_READ_REG(hw, E1000_B2OSPC);
+ adapter->stats.b2ogprc += E1000_READ_REG(hw, E1000_B2OGPRC);
+ }
+}
+
+static irqreturn_t igb_msix_other(int irq, void *data)
+{
+ struct igb_adapter *adapter = data;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 icr = E1000_READ_REG(hw, E1000_ICR);
+ /* reading ICR causes bit 31 of EICR to be cleared */
+
+ if (icr & E1000_ICR_DRSTA)
+ schedule_work(&adapter->reset_task);
+
+ if (icr & E1000_ICR_DOUTSYNC) {
+ /* HW is reporting DMA is out of sync */
+ adapter->stats.doosync++;
+ /* The DMA Out of Sync is also indication of a spoof event
+ * in IOV mode. Check the Wrong VM Behavior register to
+ * see if it is really a spoof event. */
+ igb_check_wvbr(adapter);
+ }
+
+ /* Check for a mailbox event */
+ if (icr & E1000_ICR_VMMB)
+ igb_msg_task(adapter);
+
+ if (icr & E1000_ICR_LSC) {
+ hw->mac.get_link_status = 1;
+ /* guard against interrupt when we're going down */
+ if (!test_bit(__IGB_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
+
+#ifdef HAVE_PTP_1588_CLOCK
+ if (icr & E1000_ICR_TS) {
+ u32 tsicr = E1000_READ_REG(hw, E1000_TSICR);
+
+ if (tsicr & E1000_TSICR_TXTS) {
+ /* acknowledge the interrupt */
+ E1000_WRITE_REG(hw, E1000_TSICR, E1000_TSICR_TXTS);
+ /* retrieve hardware timestamp */
+ schedule_work(&adapter->ptp_tx_work);
+ }
+ }
+#endif /* HAVE_PTP_1588_CLOCK */
+
+ /* Check for MDD event */
+ if (icr & E1000_ICR_MDDET)
+ igb_process_mdd_event(adapter);
+
+ E1000_WRITE_REG(hw, E1000_EIMS, adapter->eims_other);
+
+ return IRQ_HANDLED;
+}
+
+static void igb_write_itr(struct igb_q_vector *q_vector)
+{
+ struct igb_adapter *adapter = q_vector->adapter;
+ u32 itr_val = q_vector->itr_val & 0x7FFC;
+
+ if (!q_vector->set_itr)
+ return;
+
+ if (!itr_val)
+ itr_val = 0x4;
+
+ if (adapter->hw.mac.type == e1000_82575)
+ itr_val |= itr_val << 16;
+ else
+ itr_val |= E1000_EITR_CNT_IGNR;
+
+ writel(itr_val, q_vector->itr_register);
+ q_vector->set_itr = 0;
+}
+
+static irqreturn_t igb_msix_ring(int irq, void *data)
+{
+ struct igb_q_vector *q_vector = data;
+
+ /* Write the ITR value calculated from the previous interrupt. */
+ igb_write_itr(q_vector);
+
+ napi_schedule(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef IGB_DCA
+static void igb_update_tx_dca(struct igb_adapter *adapter,
+ struct igb_ring *tx_ring,
+ int cpu)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
+
+ if (hw->mac.type != e1000_82575)
+ txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT_82576;
+
+ /*
+ * We can enable relaxed ordering for reads, but not writes when
+ * DCA is enabled. This is due to a known issue in some chipsets
+ * which will cause the DCA tag to be cleared.
+ */
+ txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
+ E1000_DCA_TXCTRL_DATA_RRO_EN |
+ E1000_DCA_TXCTRL_DESC_DCA_EN;
+
+ E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
+}
+
+static void igb_update_rx_dca(struct igb_adapter *adapter,
+ struct igb_ring *rx_ring,
+ int cpu)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
+
+ if (hw->mac.type != e1000_82575)
+ rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT_82576;
+
+ /*
+ * We can enable relaxed ordering for reads, but not writes when
+ * DCA is enabled. This is due to a known issue in some chipsets
+ * which will cause the DCA tag to be cleared.
+ */
+ rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
+ E1000_DCA_RXCTRL_DESC_DCA_EN;
+
+ E1000_WRITE_REG(hw, E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
+}
+
+static void igb_update_dca(struct igb_q_vector *q_vector)
+{
+ struct igb_adapter *adapter = q_vector->adapter;
+ int cpu = get_cpu();
+
+ if (q_vector->cpu == cpu)
+ goto out_no_update;
+
+ if (q_vector->tx.ring)
+ igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
+
+ if (q_vector->rx.ring)
+ igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
+
+ q_vector->cpu = cpu;
+out_no_update:
+ put_cpu();
+}
+
+static void igb_setup_dca(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int i;
+
+ if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
+ return;
+
+ /* Always use CB2 mode, difference is masked in the CB driver. */
+ E1000_WRITE_REG(hw, E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
+
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ adapter->q_vector[i]->cpu = -1;
+ igb_update_dca(adapter->q_vector[i]);
+ }
+}
+
+static int __igb_notify_dca(struct device *dev, void *data)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned long event = *(unsigned long *)data;
+
+ switch (event) {
+ case DCA_PROVIDER_ADD:
+ /* if already enabled, don't do it again */
+ if (adapter->flags & IGB_FLAG_DCA_ENABLED)
+ break;
+ if (dca_add_requester(dev) == E1000_SUCCESS) {
+ adapter->flags |= IGB_FLAG_DCA_ENABLED;
+ dev_info(pci_dev_to_dev(pdev), "DCA enabled\n");
+ igb_setup_dca(adapter);
+ break;
+ }
+ /* Fall Through since DCA is disabled. */
+ case DCA_PROVIDER_REMOVE:
+ if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
+ /* without this a class_device is left
+ * hanging around in the sysfs model */
+ dca_remove_requester(dev);
+ dev_info(pci_dev_to_dev(pdev), "DCA disabled\n");
+ adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
+ E1000_WRITE_REG(hw, E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_DISABLE);
+ }
+ break;
+ }
+
+ return E1000_SUCCESS;
+}
+
+static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
+ void *p)
+{
+ int ret_val;
+
+ ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
+ __igb_notify_dca);
+
+ return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
+}
+#endif /* IGB_DCA */
+
+static int igb_vf_configure(struct igb_adapter *adapter, int vf)
+{
+ unsigned char mac_addr[ETH_ALEN];
+
+ random_ether_addr(mac_addr);
+ igb_set_vf_mac(adapter, vf, mac_addr);
+
+#ifdef IFLA_VF_MAX
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+ /* By default spoof check is enabled for all VFs */
+ adapter->vf_data[vf].spoofchk_enabled = true;
+#endif
+#endif
+
+ return true;
+}
+
+static void igb_ping_all_vfs(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ping;
+ int i;
+
+ for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
+ ping = E1000_PF_CONTROL_MSG;
+ if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
+ ping |= E1000_VT_MSGTYPE_CTS;
+ e1000_write_mbx(hw, &ping, 1, i);
+ }
+}
+
+/**
+ * igb_mta_set_ - Set multicast filter table address
+ * @adapter: pointer to the adapter structure
+ * @hash_value: determines the MTA register and bit to set
+ *
+ * The multicast table address is a register array of 32-bit registers.
+ * The hash_value is used to determine what register the bit is in, the
+ * current value is read, the new bit is OR'd in and the new value is
+ * written back into the register.
+ **/
+void igb_mta_set(struct igb_adapter *adapter, u32 hash_value)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 hash_bit, hash_reg, mta;
+
+ /*
+ * The MTA is a register array of 32-bit registers. It is
+ * treated like an array of (32*mta_reg_count) bits. We want to
+ * set bit BitArray[hash_value]. So we figure out what register
+ * the bit is in, read it, OR in the new bit, then write
+ * back the new value. The (hw->mac.mta_reg_count - 1) serves as a
+ * mask to bits 31:5 of the hash value which gives us the
+ * register we're modifying. The hash bit within that register
+ * is determined by the lower 5 bits of the hash value.
+ */
+ hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
+ hash_bit = hash_value & 0x1F;
+
+ mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg);
+
+ mta |= (1 << hash_bit);
+
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta);
+ E1000_WRITE_FLUSH(hw);
+}
+
+static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
+{
+
+ struct e1000_hw *hw = &adapter->hw;
+ u32 vmolr = E1000_READ_REG(hw, E1000_VMOLR(vf));
+ struct vf_data_storage *vf_data = &adapter->vf_data[vf];
+
+ vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
+ IGB_VF_FLAG_MULTI_PROMISC);
+ vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
+
+#ifdef IGB_ENABLE_VF_PROMISC
+ if (*msgbuf & E1000_VF_SET_PROMISC_UNICAST) {
+ vmolr |= E1000_VMOLR_ROPE;
+ vf_data->flags |= IGB_VF_FLAG_UNI_PROMISC;
+ *msgbuf &= ~E1000_VF_SET_PROMISC_UNICAST;
+ }
+#endif
+ if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
+ vmolr |= E1000_VMOLR_MPME;
+ vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
+ *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
+ } else {
+ /*
+ * if we have hashes and we are clearing a multicast promisc
+ * flag we need to write the hashes to the MTA as this step
+ * was previously skipped
+ */
+ if (vf_data->num_vf_mc_hashes > 30) {
+ vmolr |= E1000_VMOLR_MPME;
+ } else if (vf_data->num_vf_mc_hashes) {
+ int j;
+ vmolr |= E1000_VMOLR_ROMPE;
+ for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
+ igb_mta_set(adapter, vf_data->vf_mc_hashes[j]);
+ }
+ }
+
+ E1000_WRITE_REG(hw, E1000_VMOLR(vf), vmolr);
+
+ /* there are flags left unprocessed, likely not supported */
+ if (*msgbuf & E1000_VT_MSGINFO_MASK)
+ return -EINVAL;
+
+ return 0;
+
+}
+
+static int igb_set_vf_multicasts(struct igb_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
+ u16 *hash_list = (u16 *)&msgbuf[1];
+ struct vf_data_storage *vf_data = &adapter->vf_data[vf];
+ int i;
+
+ /* salt away the number of multicast addresses assigned
+ * to this VF for later use to restore when the PF multi cast
+ * list changes
+ */
+ vf_data->num_vf_mc_hashes = n;
+
+ /* only up to 30 hash values supported */
+ if (n > 30)
+ n = 30;
+
+ /* store the hashes for later use */
+ for (i = 0; i < n; i++)
+ vf_data->vf_mc_hashes[i] = hash_list[i];
+
+ /* Flush and reset the mta with the new values */
+ igb_set_rx_mode(adapter->netdev);
+
+ return 0;
+}
+
+static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct vf_data_storage *vf_data;
+ int i, j;
+
+ for (i = 0; i < adapter->vfs_allocated_count; i++) {
+ u32 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
+ vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
+
+ vf_data = &adapter->vf_data[i];
+
+ if ((vf_data->num_vf_mc_hashes > 30) ||
+ (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
+ vmolr |= E1000_VMOLR_MPME;
+ } else if (vf_data->num_vf_mc_hashes) {
+ vmolr |= E1000_VMOLR_ROMPE;
+ for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
+ igb_mta_set(adapter, vf_data->vf_mc_hashes[j]);
+ }
+ E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
+ }
+}
+
+static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 pool_mask, reg, vid;
+ u16 vlan_default;
+ int i;
+
+ pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
+
+ /* Find the vlan filter for this id */
+ for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
+ reg = E1000_READ_REG(hw, E1000_VLVF(i));
+
+ /* remove the vf from the pool */
+ reg &= ~pool_mask;
+
+ /* if pool is empty then remove entry from vfta */
+ if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
+ (reg & E1000_VLVF_VLANID_ENABLE)) {
+ reg = 0;
+ vid = reg & E1000_VLVF_VLANID_MASK;
+ igb_vfta_set(adapter, vid, FALSE);
+ }
+
+ E1000_WRITE_REG(hw, E1000_VLVF(i), reg);
+ }
+
+ adapter->vf_data[vf].vlans_enabled = 0;
+
+ vlan_default = adapter->vf_data[vf].default_vf_vlan_id;
+ if (vlan_default)
+ igb_vlvf_set(adapter, vlan_default, true, vf);
+}
+
+s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 reg, i;
+
+ /* The vlvf table only exists on 82576 hardware and newer */
+ if (hw->mac.type < e1000_82576)
+ return -1;
+
+ /* we only need to do this if VMDq is enabled */
+ if (!adapter->vmdq_pools)
+ return -1;
+
+ /* Find the vlan filter for this id */
+ for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
+ reg = E1000_READ_REG(hw, E1000_VLVF(i));
+ if ((reg & E1000_VLVF_VLANID_ENABLE) &&
+ vid == (reg & E1000_VLVF_VLANID_MASK))
+ break;
+ }
+
+ if (add) {
+ if (i == E1000_VLVF_ARRAY_SIZE) {
+ /* Did not find a matching VLAN ID entry that was
+ * enabled. Search for a free filter entry, i.e.
+ * one without the enable bit set
+ */
+ for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
+ reg = E1000_READ_REG(hw, E1000_VLVF(i));
+ if (!(reg & E1000_VLVF_VLANID_ENABLE))
+ break;
+ }
+ }
+ if (i < E1000_VLVF_ARRAY_SIZE) {
+ /* Found an enabled/available entry */
+ reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
+
+ /* if !enabled we need to set this up in vfta */
+ if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
+ /* add VID to filter table */
+ igb_vfta_set(adapter, vid, TRUE);
+ reg |= E1000_VLVF_VLANID_ENABLE;
+ }
+ reg &= ~E1000_VLVF_VLANID_MASK;
+ reg |= vid;
+ E1000_WRITE_REG(hw, E1000_VLVF(i), reg);
+
+ /* do not modify RLPML for PF devices */
+ if (vf >= adapter->vfs_allocated_count)
+ return E1000_SUCCESS;
+
+ if (!adapter->vf_data[vf].vlans_enabled) {
+ u32 size;
+ reg = E1000_READ_REG(hw, E1000_VMOLR(vf));
+ size = reg & E1000_VMOLR_RLPML_MASK;
+ size += 4;
+ reg &= ~E1000_VMOLR_RLPML_MASK;
+ reg |= size;
+ E1000_WRITE_REG(hw, E1000_VMOLR(vf), reg);
+ }
+
+ adapter->vf_data[vf].vlans_enabled++;
+ }
+ } else {
+ if (i < E1000_VLVF_ARRAY_SIZE) {
+ /* remove vf from the pool */
+ reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
+ /* if pool is empty then remove entry from vfta */
+ if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
+ reg = 0;
+ igb_vfta_set(adapter, vid, FALSE);
+ }
+ E1000_WRITE_REG(hw, E1000_VLVF(i), reg);
+
+ /* do not modify RLPML for PF devices */
+ if (vf >= adapter->vfs_allocated_count)
+ return E1000_SUCCESS;
+
+ adapter->vf_data[vf].vlans_enabled--;
+ if (!adapter->vf_data[vf].vlans_enabled) {
+ u32 size;
+ reg = E1000_READ_REG(hw, E1000_VMOLR(vf));
+ size = reg & E1000_VMOLR_RLPML_MASK;
+ size -= 4;
+ reg &= ~E1000_VMOLR_RLPML_MASK;
+ reg |= size;
+ E1000_WRITE_REG(hw, E1000_VMOLR(vf), reg);
+ }
+ }
+ }
+ return E1000_SUCCESS;
+}
+
+#ifdef IFLA_VF_MAX
+static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (vid)
+ E1000_WRITE_REG(hw, E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
+ else
+ E1000_WRITE_REG(hw, E1000_VMVIR(vf), 0);
+}
+
+static int igb_ndo_set_vf_vlan(struct net_device *netdev,
+ int vf, u16 vlan, u8 qos)
+{
+ int err = 0;
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ /* VLAN IDs accepted range 0-4094 */
+ if ((vf >= adapter->vfs_allocated_count) || (vlan > VLAN_VID_MASK-1) || (qos > 7))
+ return -EINVAL;
+ if (vlan || qos) {
+ err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
+ if (err)
+ goto out;
+ igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
+ igb_set_vmolr(adapter, vf, !vlan);
+ adapter->vf_data[vf].pf_vlan = vlan;
+ adapter->vf_data[vf].pf_qos = qos;
+ igb_set_vf_vlan_strip(adapter, vf, true);
+ dev_info(&adapter->pdev->dev,
+ "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
+ if (test_bit(__IGB_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev,
+ "The VF VLAN has been set,"
+ " but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev,
+ "Bring the PF device up before"
+ " attempting to use the VF device.\n");
+ }
+ } else {
+ if (adapter->vf_data[vf].pf_vlan)
+ dev_info(&adapter->pdev->dev,
+ "Clearing VLAN on VF %d\n", vf);
+ igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
+ false, vf);
+ igb_set_vmvir(adapter, vlan, vf);
+ igb_set_vmolr(adapter, vf, true);
+ igb_set_vf_vlan_strip(adapter, vf, false);
+ adapter->vf_data[vf].pf_vlan = 0;
+ adapter->vf_data[vf].pf_qos = 0;
+ }
+out:
+ return err;
+}
+
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
+ bool setting)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 dtxswc, reg_offset;
+
+ if (!adapter->vfs_allocated_count)
+ return -EOPNOTSUPP;
+
+ if (vf >= adapter->vfs_allocated_count)
+ return -EINVAL;
+
+ reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
+ dtxswc = E1000_READ_REG(hw, reg_offset);
+ if (setting)
+ dtxswc |= ((1 << vf) |
+ (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
+ else
+ dtxswc &= ~((1 << vf) |
+ (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
+ E1000_WRITE_REG(hw, reg_offset, dtxswc);
+
+ adapter->vf_data[vf].spoofchk_enabled = setting;
+ return E1000_SUCCESS;
+}
+#endif /* HAVE_VF_SPOOFCHK_CONFIGURE */
+#endif /* IFLA_VF_MAX */
+
+static int igb_find_vlvf_entry(struct igb_adapter *adapter, int vid)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int i;
+ u32 reg;
+
+ /* Find the vlan filter for this id */
+ for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
+ reg = E1000_READ_REG(hw, E1000_VLVF(i));
+ if ((reg & E1000_VLVF_VLANID_ENABLE) &&
+ vid == (reg & E1000_VLVF_VLANID_MASK))
+ break;
+ }
+
+ if (i >= E1000_VLVF_ARRAY_SIZE)
+ i = -1;
+
+ return i;
+}
+
+static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
+ int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
+ int err = 0;
+
+ if (vid)
+ igb_set_vf_vlan_strip(adapter, vf, true);
+ else
+ igb_set_vf_vlan_strip(adapter, vf, false);
+
+ /* If in promiscuous mode we need to make sure the PF also has
+ * the VLAN filter set.
+ */
+ if (add && (adapter->netdev->flags & IFF_PROMISC))
+ err = igb_vlvf_set(adapter, vid, add,
+ adapter->vfs_allocated_count);
+ if (err)
+ goto out;
+
+ err = igb_vlvf_set(adapter, vid, add, vf);
+
+ if (err)
+ goto out;
+
+ /* Go through all the checks to see if the VLAN filter should
+ * be wiped completely.
+ */
+ if (!add && (adapter->netdev->flags & IFF_PROMISC)) {
+ u32 vlvf, bits;
+
+ int regndx = igb_find_vlvf_entry(adapter, vid);
+ if (regndx < 0)
+ goto out;
+ /* See if any other pools are set for this VLAN filter
+ * entry other than the PF.
+ */
+ vlvf = bits = E1000_READ_REG(hw, E1000_VLVF(regndx));
+ bits &= 1 << (E1000_VLVF_POOLSEL_SHIFT +
+ adapter->vfs_allocated_count);
+ /* If the filter was removed then ensure PF pool bit
+ * is cleared if the PF only added itself to the pool
+ * because the PF is in promiscuous mode.
+ */
+ if ((vlvf & VLAN_VID_MASK) == vid &&
+#ifndef HAVE_VLAN_RX_REGISTER
+ !test_bit(vid, adapter->active_vlans) &&
+#endif
+ !bits)
+ igb_vlvf_set(adapter, vid, add,
+ adapter->vfs_allocated_count);
+ }
+
+out:
+ return err;
+}
+
+static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* clear flags except flag that the PF has set the MAC */
+ adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
+ adapter->vf_data[vf].last_nack = jiffies;
+
+ /* reset offloads to defaults */
+ igb_set_vmolr(adapter, vf, true);
+
+ /* reset vlans for device */
+ igb_clear_vf_vfta(adapter, vf);
+#ifdef IFLA_VF_MAX
+ if (adapter->vf_data[vf].pf_vlan)
+ igb_ndo_set_vf_vlan(adapter->netdev, vf,
+ adapter->vf_data[vf].pf_vlan,
+ adapter->vf_data[vf].pf_qos);
+ else
+ igb_clear_vf_vfta(adapter, vf);
+#endif
+
+ /* reset multicast table array for vf */
+ adapter->vf_data[vf].num_vf_mc_hashes = 0;
+
+ /* Flush and reset the mta with the new values */
+ igb_set_rx_mode(adapter->netdev);
+
+ /*
+ * Reset the VFs TDWBAL and TDWBAH registers which are not
+ * cleared by a VFLR
+ */
+ E1000_WRITE_REG(hw, E1000_TDWBAH(vf), 0);
+ E1000_WRITE_REG(hw, E1000_TDWBAL(vf), 0);
+ if (hw->mac.type == e1000_82576) {
+ E1000_WRITE_REG(hw, E1000_TDWBAH(IGB_MAX_VF_FUNCTIONS + vf), 0);
+ E1000_WRITE_REG(hw, E1000_TDWBAL(IGB_MAX_VF_FUNCTIONS + vf), 0);
+ }
+}
+
+static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
+{
+ unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
+
+ /* generate a new mac address as we were hotplug removed/added */
+ if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
+ random_ether_addr(vf_mac);
+
+ /* process remaining reset events */
+ igb_vf_reset(adapter, vf);
+}
+
+static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
+ u32 reg, msgbuf[3];
+ u8 *addr = (u8 *)(&msgbuf[1]);
+
+ /* process all the same items cleared in a function level reset */
+ igb_vf_reset(adapter, vf);
+
+ /* set vf mac address */
+ igb_del_mac_filter(adapter, vf_mac, vf);
+ igb_add_mac_filter(adapter, vf_mac, vf);
+
+ /* enable transmit and receive for vf */
+ reg = E1000_READ_REG(hw, E1000_VFTE);
+ E1000_WRITE_REG(hw, E1000_VFTE, reg | (1 << vf));
+ reg = E1000_READ_REG(hw, E1000_VFRE);
+ E1000_WRITE_REG(hw, E1000_VFRE, reg | (1 << vf));
+
+ adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
+
+ /* reply to reset with ack and vf mac address */
+ msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
+ memcpy(addr, vf_mac, 6);
+ e1000_write_mbx(hw, msgbuf, 3, vf);
+}
+
+static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
+{
+ /*
+ * The VF MAC Address is stored in a packed array of bytes
+ * starting at the second 32 bit word of the msg array
+ */
+ unsigned char *addr = (unsigned char *)&msg[1];
+ int err = -1;
+
+ if (is_valid_ether_addr(addr))
+ err = igb_set_vf_mac(adapter, vf, addr);
+
+ return err;
+}
+
+static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct vf_data_storage *vf_data = &adapter->vf_data[vf];
+ u32 msg = E1000_VT_MSGTYPE_NACK;
+
+ /* if device isn't clear to send it shouldn't be reading either */
+ if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
+ time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
+ e1000_write_mbx(hw, &msg, 1, vf);
+ vf_data->last_nack = jiffies;
+ }
+}
+
+static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ u32 msgbuf[E1000_VFMAILBOX_SIZE];
+ struct e1000_hw *hw = &adapter->hw;
+ struct vf_data_storage *vf_data = &adapter->vf_data[vf];
+ s32 retval;
+
+ retval = e1000_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
+
+ if (retval) {
+ dev_err(pci_dev_to_dev(pdev), "Error receiving message from VF\n");
+ return;
+ }
+
+ /* this is a message we already processed, do nothing */
+ if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
+ return;
+
+ /*
+ * until the vf completes a reset it should not be
+ * allowed to start any configuration.
+ */
+
+ if (msgbuf[0] == E1000_VF_RESET) {
+ igb_vf_reset_msg(adapter, vf);
+ return;
+ }
+
+ if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
+ msgbuf[0] = E1000_VT_MSGTYPE_NACK;
+ if (time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
+ e1000_write_mbx(hw, msgbuf, 1, vf);
+ vf_data->last_nack = jiffies;
+ }
+ return;
+ }
+
+ switch ((msgbuf[0] & 0xFFFF)) {
+ case E1000_VF_SET_MAC_ADDR:
+ retval = -EINVAL;
+#ifndef IGB_DISABLE_VF_MAC_SET
+ if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
+ retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
+ else
+ DPRINTK(DRV, INFO,
+ "VF %d attempted to override administratively "
+ "set MAC address\nReload the VF driver to "
+ "resume operations\n", vf);
+#endif
+ break;
+ case E1000_VF_SET_PROMISC:
+ retval = igb_set_vf_promisc(adapter, msgbuf, vf);
+ break;
+ case E1000_VF_SET_MULTICAST:
+ retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
+ break;
+ case E1000_VF_SET_LPE:
+ retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
+ break;
+ case E1000_VF_SET_VLAN:
+ retval = -1;
+#ifdef IFLA_VF_MAX
+ if (vf_data->pf_vlan)
+ DPRINTK(DRV, INFO,
+ "VF %d attempted to override administratively "
+ "set VLAN tag\nReload the VF driver to "
+ "resume operations\n", vf);
+ else
+#endif
+ retval = igb_set_vf_vlan(adapter, msgbuf, vf);
+ break;
+ default:
+ dev_err(pci_dev_to_dev(pdev), "Unhandled Msg %08x\n", msgbuf[0]);
+ retval = -E1000_ERR_MBX;
+ break;
+ }
+
+ /* notify the VF of the results of what it sent us */
+ if (retval)
+ msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
+ else
+ msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
+
+ msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
+
+ e1000_write_mbx(hw, msgbuf, 1, vf);
+}
+
+static void igb_msg_task(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 vf;
+
+ for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
+ /* process any reset requests */
+ if (!e1000_check_for_rst(hw, vf))
+ igb_vf_reset_event(adapter, vf);
+
+ /* process any messages pending */
+ if (!e1000_check_for_msg(hw, vf))
+ igb_rcv_msg_from_vf(adapter, vf);
+
+ /* process any acks */
+ if (!e1000_check_for_ack(hw, vf))
+ igb_rcv_ack_from_vf(adapter, vf);
+ }
+}
+
+/**
+ * igb_set_uta - Set unicast filter table address
+ * @adapter: board private structure
+ *
+ * The unicast table address is a register array of 32-bit registers.
+ * The table is meant to be used in a way similar to how the MTA is used
+ * however due to certain limitations in the hardware it is necessary to
+ * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
+ * enable bit to allow vlan tag stripping when promiscuous mode is enabled
+ **/
+static void igb_set_uta(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int i;
+
+ /* The UTA table only exists on 82576 hardware and newer */
+ if (hw->mac.type < e1000_82576)
+ return;
+
+ /* we only need to do this if VMDq is enabled */
+ if (!adapter->vmdq_pools)
+ return;
+
+ for (i = 0; i < hw->mac.uta_reg_count; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, ~0);
+}
+
+/**
+ * igb_intr_msi - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t igb_intr_msi(int irq, void *data)
+{
+ struct igb_adapter *adapter = data;
+ struct igb_q_vector *q_vector = adapter->q_vector[0];
+ struct e1000_hw *hw = &adapter->hw;
+ /* read ICR disables interrupts using IAM */
+ u32 icr = E1000_READ_REG(hw, E1000_ICR);
+
+ igb_write_itr(q_vector);
+
+ if (icr & E1000_ICR_DRSTA)
+ schedule_work(&adapter->reset_task);
+
+ if (icr & E1000_ICR_DOUTSYNC) {
+ /* HW is reporting DMA is out of sync */
+ adapter->stats.doosync++;
+ }
+
+ if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
+ hw->mac.get_link_status = 1;
+ if (!test_bit(__IGB_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
+
+#ifdef HAVE_PTP_1588_CLOCK
+ if (icr & E1000_ICR_TS) {
+ u32 tsicr = E1000_READ_REG(hw, E1000_TSICR);
+
+ if (tsicr & E1000_TSICR_TXTS) {
+ /* acknowledge the interrupt */
+ E1000_WRITE_REG(hw, E1000_TSICR, E1000_TSICR_TXTS);
+ /* retrieve hardware timestamp */
+ schedule_work(&adapter->ptp_tx_work);
+ }
+ }
+#endif /* HAVE_PTP_1588_CLOCK */
+
+ napi_schedule(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * igb_intr - Legacy Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t igb_intr(int irq, void *data)
+{
+ struct igb_adapter *adapter = data;
+ struct igb_q_vector *q_vector = adapter->q_vector[0];
+ struct e1000_hw *hw = &adapter->hw;
+ /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
+ * need for the IMC write */
+ u32 icr = E1000_READ_REG(hw, E1000_ICR);
+
+ /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
+ * not set, then the adapter didn't send an interrupt */
+ if (!(icr & E1000_ICR_INT_ASSERTED))
+ return IRQ_NONE;
+
+ igb_write_itr(q_vector);
+
+ if (icr & E1000_ICR_DRSTA)
+ schedule_work(&adapter->reset_task);
+
+ if (icr & E1000_ICR_DOUTSYNC) {
+ /* HW is reporting DMA is out of sync */
+ adapter->stats.doosync++;
+ }
+
+ if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
+ hw->mac.get_link_status = 1;
+ /* guard against interrupt when we're going down */
+ if (!test_bit(__IGB_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
+
+#ifdef HAVE_PTP_1588_CLOCK
+ if (icr & E1000_ICR_TS) {
+ u32 tsicr = E1000_READ_REG(hw, E1000_TSICR);
+
+ if (tsicr & E1000_TSICR_TXTS) {
+ /* acknowledge the interrupt */
+ E1000_WRITE_REG(hw, E1000_TSICR, E1000_TSICR_TXTS);
+ /* retrieve hardware timestamp */
+ schedule_work(&adapter->ptp_tx_work);
+ }
+ }
+#endif /* HAVE_PTP_1588_CLOCK */
+
+ napi_schedule(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+void igb_ring_irq_enable(struct igb_q_vector *q_vector)
+{
+ struct igb_adapter *adapter = q_vector->adapter;
+ struct e1000_hw *hw = &adapter->hw;
+
+ if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
+ (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
+ if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
+ igb_set_itr(q_vector);
+ else
+ igb_update_ring_itr(q_vector);
+ }
+
+ if (!test_bit(__IGB_DOWN, &adapter->state)) {
+ if (adapter->msix_entries)
+ E1000_WRITE_REG(hw, E1000_EIMS, q_vector->eims_value);
+ else
+ igb_irq_enable(adapter);
+ }
+}
+
+/**
+ * igb_poll - NAPI Rx polling callback
+ * @napi: napi polling structure
+ * @budget: count of how many packets we should handle
+ **/
+static int igb_poll(struct napi_struct *napi, int budget)
+{
+ struct igb_q_vector *q_vector = container_of(napi, struct igb_q_vector, napi);
+ bool clean_complete = true;
+
+#ifdef IGB_DCA
+ if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
+ igb_update_dca(q_vector);
+#endif
+ if (q_vector->tx.ring)
+ clean_complete = igb_clean_tx_irq(q_vector);
+
+ if (q_vector->rx.ring)
+ clean_complete &= igb_clean_rx_irq(q_vector, budget);
+
+#ifndef HAVE_NETDEV_NAPI_LIST
+ /* if netdev is disabled we need to stop polling */
+ if (!netif_running(q_vector->adapter->netdev))
+ clean_complete = true;
+
+#endif
+ /* If all work not completed, return budget and keep polling */
+ if (!clean_complete)
+ return budget;
+
+ /* If not enough Rx work done, exit the polling mode */
+ napi_complete(napi);
+ igb_ring_irq_enable(q_vector);
+
+ return 0;
+}
+
+/**
+ * igb_clean_tx_irq - Reclaim resources after transmit completes
+ * @q_vector: pointer to q_vector containing needed info
+ * returns TRUE if ring is completely cleaned
+ **/
+static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
+{
+ struct igb_adapter *adapter = q_vector->adapter;
+ struct igb_ring *tx_ring = q_vector->tx.ring;
+ struct igb_tx_buffer *tx_buffer;
+ union e1000_adv_tx_desc *tx_desc;
+ unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int budget = q_vector->tx.work_limit;
+ unsigned int i = tx_ring->next_to_clean;
+
+ if (test_bit(__IGB_DOWN, &adapter->state))
+ return true;
+
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ tx_desc = IGB_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
+
+ do {
+ union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ read_barrier_depends();
+
+ /* if DD is not set pending work has not been completed */
+ if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buffer->next_to_watch = NULL;
+
+ /* update the statistics for this packet */
+ total_bytes += tx_buffer->bytecount;
+ total_packets += tx_buffer->gso_segs;
+
+ /* free the skb */
+ dev_kfree_skb_any(tx_buffer->skb);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+
+ /* clear tx_buffer data */
+ tx_buffer->skb = NULL;
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ /* clear last DMA location and unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IGB_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buffer, len)) {
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+ }
+ }
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IGB_TX_DESC(tx_ring, 0);
+ }
+
+ /* issue prefetch for next Tx descriptor */
+ prefetch(tx_desc);
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
+ netdev_tx_completed_queue(txring_txq(tx_ring),
+ total_packets, total_bytes);
+
+ i += tx_ring->count;
+ tx_ring->next_to_clean = i;
+ tx_ring->tx_stats.bytes += total_bytes;
+ tx_ring->tx_stats.packets += total_packets;
+ q_vector->tx.total_bytes += total_bytes;
+ q_vector->tx.total_packets += total_packets;
+
+#ifdef DEBUG
+ if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags) &&
+ !(adapter->disable_hw_reset && adapter->tx_hang_detected)) {
+#else
+ if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
+#endif
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* Detect a transmit hang in hardware, this serializes the
+ * check with the clearing of time_stamp and movement of i */
+ clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
+ if (tx_buffer->next_to_watch &&
+ time_after(jiffies, tx_buffer->time_stamp +
+ (adapter->tx_timeout_factor * HZ))
+ && !(E1000_READ_REG(hw, E1000_STATUS) &
+ E1000_STATUS_TXOFF)) {
+
+ /* detected Tx unit hang */
+#ifdef DEBUG
+ adapter->tx_hang_detected = TRUE;
+ if (adapter->disable_hw_reset) {
+ DPRINTK(DRV, WARNING,
+ "Deactivating netdev watchdog timer\n");
+ if (del_timer(&netdev_ring(tx_ring)->watchdog_timer))
+ dev_put(netdev_ring(tx_ring));
+#ifndef HAVE_NET_DEVICE_OPS
+ netdev_ring(tx_ring)->tx_timeout = NULL;
+#endif
+ }
+#endif /* DEBUG */
+ dev_err(tx_ring->dev,
+ "Detected Tx Unit Hang\n"
+ " Tx Queue <%d>\n"
+ " TDH <%x>\n"
+ " TDT <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " next_to_watch <%p>\n"
+ " jiffies <%lx>\n"
+ " desc.status <%x>\n",
+ tx_ring->queue_index,
+ E1000_READ_REG(hw, E1000_TDH(tx_ring->reg_idx)),
+ readl(tx_ring->tail),
+ tx_ring->next_to_use,
+ tx_ring->next_to_clean,
+ tx_buffer->time_stamp,
+ tx_buffer->next_to_watch,
+ jiffies,
+ tx_buffer->next_to_watch->wb.status);
+ if (netif_is_multiqueue(netdev_ring(tx_ring)))
+ netif_stop_subqueue(netdev_ring(tx_ring),
+ ring_queue_index(tx_ring));
+ else
+ netif_stop_queue(netdev_ring(tx_ring));
+
+ /* we are about to reset, no point in enabling stuff */
+ return true;
+ }
+ }
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+ if (unlikely(total_packets &&
+ netif_carrier_ok(netdev_ring(tx_ring)) &&
+ igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+ if (netif_is_multiqueue(netdev_ring(tx_ring))) {
+ if (__netif_subqueue_stopped(netdev_ring(tx_ring),
+ ring_queue_index(tx_ring)) &&
+ !(test_bit(__IGB_DOWN, &adapter->state))) {
+ netif_wake_subqueue(netdev_ring(tx_ring),
+ ring_queue_index(tx_ring));
+ tx_ring->tx_stats.restart_queue++;
+ }
+ } else {
+ if (netif_queue_stopped(netdev_ring(tx_ring)) &&
+ !(test_bit(__IGB_DOWN, &adapter->state))) {
+ netif_wake_queue(netdev_ring(tx_ring));
+ tx_ring->tx_stats.restart_queue++;
+ }
+ }
+ }
+
+ return !!budget;
+}
+
+#ifdef HAVE_VLAN_RX_REGISTER
+/**
+ * igb_receive_skb - helper function to handle rx indications
+ * @q_vector: structure containing interrupt and ring information
+ * @skb: packet to send up
+ **/
+static void igb_receive_skb(struct igb_q_vector *q_vector,
+ struct sk_buff *skb)
+{
+ struct vlan_group **vlgrp = netdev_priv(skb->dev);
+
+ if (IGB_CB(skb)->vid) {
+ if (*vlgrp) {
+ vlan_gro_receive(&q_vector->napi, *vlgrp,
+ IGB_CB(skb)->vid, skb);
+ } else {
+ dev_kfree_skb_any(skb);
+ }
+ } else {
+ napi_gro_receive(&q_vector->napi, skb);
+ }
+}
+
+#endif /* HAVE_VLAN_RX_REGISTER */
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+/**
+ * igb_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void igb_reuse_rx_page(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *old_buff)
+{
+ struct igb_rx_buffer *new_buff;
+ u16 nta = rx_ring->next_to_alloc;
+
+ new_buff = &rx_ring->rx_buffer_info[nta];
+
+ /* update, and store next to alloc */
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ /* transfer page from old buffer to new buffer */
+ memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer));
+
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
+ old_buff->page_offset,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+}
+
+static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
+ struct page *page,
+ unsigned int truesize)
+{
+ /* avoid re-using remote pages */
+ if (unlikely(page_to_nid(page) != numa_node_id()))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= IGB_RX_BUFSZ;
+
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += truesize;
+
+ if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
+ return false;
+#endif
+
+ /* bump ref count on page before it is given to the stack */
+ get_page(page);
+
+ return true;
+}
+
+/**
+ * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @rx_desc: descriptor containing length of buffer written by hardware
+ * @skb: sk_buff to place the data into
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
+ **/
+static bool igb_add_rx_frag(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *rx_buffer,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct page *page = rx_buffer->page;
+ unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = IGB_RX_BUFSZ;
+#else
+ unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+#endif
+
+ if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
+
+#ifdef HAVE_PTP_1588_CLOCK
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
+ va += IGB_TS_HDR_LEN;
+ size -= IGB_TS_HDR_LEN;
+ }
+#endif /* HAVE_PTP_1588_CLOCK */
+
+ memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
+
+ /* we can reuse buffer as-is, just make sure it is local */
+ if (likely(page_to_nid(page) == numa_node_id()))
+ return true;
+
+ /* this page cannot be reused so discard it */
+ put_page(page);
+ return false;
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rx_buffer->page_offset, size, truesize);
+
+ return igb_can_reuse_rx_page(rx_buffer, page, truesize);
+}
+
+static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct igb_rx_buffer *rx_buffer;
+ struct page *page;
+
+ rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+
+ page = rx_buffer->page;
+ prefetchw(page);
+
+ if (likely(!skb)) {
+ void *page_addr = page_address(page) +
+ rx_buffer->page_offset;
+
+ /* prefetch first cache line of first page */
+ prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+ prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+ /* allocate a skb to store the frags */
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ IGB_RX_HDR_LEN);
+ if (unlikely(!skb)) {
+ rx_ring->rx_stats.alloc_failed++;
+ return NULL;
+ }
+
+ /*
+ * we will be copying header into skb->data in
+ * pskb_may_pull so it is in our interest to prefetch
+ * it now to avoid a possible cache miss
+ */
+ prefetchw(skb->data);
+ }
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
+ /* pull page into skb */
+ if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+ /* hand second half of page back to the ring */
+ igb_reuse_rx_page(rx_ring, rx_buffer);
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page(rx_ring->dev, rx_buffer->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ }
+
+ /* clear contents of rx_buffer */
+ rx_buffer->page = NULL;
+
+ return skb;
+}
+
+#endif
+static inline void igb_rx_checksum(struct igb_ring *ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ skb_checksum_none_assert(skb);
+
+ /* Ignore Checksum bit is set */
+ if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
+ return;
+
+ /* Rx checksum disabled via ethtool */
+ if (!(netdev_ring(ring)->features & NETIF_F_RXCSUM))
+ return;
+
+ /* TCP/UDP checksum error bit is set */
+ if (igb_test_staterr(rx_desc,
+ E1000_RXDEXT_STATERR_TCPE |
+ E1000_RXDEXT_STATERR_IPE)) {
+ /*
+ * work around errata with sctp packets where the TCPE aka
+ * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
+ * packets, (aka let the stack check the crc32c)
+ */
+ if (!((skb->len == 60) &&
+ test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags)))
+ ring->rx_stats.csum_err++;
+
+ /* let the stack verify checksum errors */
+ return;
+ }
+ /* It must be a TCP or UDP packet with a valid checksum */
+ if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
+ E1000_RXD_STAT_UDPCS))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+#ifdef NETIF_F_RXHASH
+static inline void igb_rx_hash(struct igb_ring *ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ if (netdev_ring(ring)->features & NETIF_F_RXHASH)
+ skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
+ PKT_HASH_TYPE_L3);
+}
+
+#endif
+#ifndef IGB_NO_LRO
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+/**
+ * igb_merge_active_tail - merge active tail into lro skb
+ * @tail: pointer to active tail in frag_list
+ *
+ * This function merges the length and data of an active tail into the
+ * skb containing the frag_list. It resets the tail's pointer to the head,
+ * but it leaves the heads pointer to tail intact.
+ **/
+static inline struct sk_buff *igb_merge_active_tail(struct sk_buff *tail)
+{
+ struct sk_buff *head = IGB_CB(tail)->head;
+
+ if (!head)
+ return tail;
+
+ head->len += tail->len;
+ head->data_len += tail->len;
+ head->truesize += tail->len;
+
+ IGB_CB(tail)->head = NULL;
+
+ return head;
+}
+
+/**
+ * igb_add_active_tail - adds an active tail into the skb frag_list
+ * @head: pointer to the start of the skb
+ * @tail: pointer to active tail to add to frag_list
+ *
+ * This function adds an active tail to the end of the frag list. This tail
+ * will still be receiving data so we cannot yet ad it's stats to the main
+ * skb. That is done via igb_merge_active_tail.
+ **/
+static inline void igb_add_active_tail(struct sk_buff *head, struct sk_buff *tail)
+{
+ struct sk_buff *old_tail = IGB_CB(head)->tail;
+
+ if (old_tail) {
+ igb_merge_active_tail(old_tail);
+ old_tail->next = tail;
+ } else {
+ skb_shinfo(head)->frag_list = tail;
+ }
+
+ IGB_CB(tail)->head = head;
+ IGB_CB(head)->tail = tail;
+
+ IGB_CB(head)->append_cnt++;
+}
+
+/**
+ * igb_close_active_frag_list - cleanup pointers on a frag_list skb
+ * @head: pointer to head of an active frag list
+ *
+ * This function will clear the frag_tail_tracker pointer on an active
+ * frag_list and returns true if the pointer was actually set
+ **/
+static inline bool igb_close_active_frag_list(struct sk_buff *head)
+{
+ struct sk_buff *tail = IGB_CB(head)->tail;
+
+ if (!tail)
+ return false;
+
+ igb_merge_active_tail(tail);
+
+ IGB_CB(head)->tail = NULL;
+
+ return true;
+}
+
+#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
+/**
+ * igb_can_lro - returns true if packet is TCP/IPV4 and LRO is enabled
+ * @adapter: board private structure
+ * @rx_desc: pointer to the rx descriptor
+ * @skb: pointer to the skb to be merged
+ *
+ **/
+static inline bool igb_can_lro(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct iphdr *iph = (struct iphdr *)skb->data;
+ __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+
+ /* verify hardware indicates this is IPv4/TCP */
+ if((!(pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_TCP)) ||
+ !(pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_IPV4))))
+ return false;
+
+ /* .. and LRO is enabled */
+ if (!(netdev_ring(rx_ring)->features & NETIF_F_LRO))
+ return false;
+
+ /* .. and we are not in promiscuous mode */
+ if (netdev_ring(rx_ring)->flags & IFF_PROMISC)
+ return false;
+
+ /* .. and the header is large enough for us to read IP/TCP fields */
+ if (!pskb_may_pull(skb, sizeof(struct igb_lrohdr)))
+ return false;
+
+ /* .. and there are no VLANs on packet */
+ if (skb->protocol != __constant_htons(ETH_P_IP))
+ return false;
+
+ /* .. and we are version 4 with no options */
+ if (*(u8 *)iph != 0x45)
+ return false;
+
+ /* .. and the packet is not fragmented */
+ if (iph->frag_off & htons(IP_MF | IP_OFFSET))
+ return false;
+
+ /* .. and that next header is TCP */
+ if (iph->protocol != IPPROTO_TCP)
+ return false;
+
+ return true;
+}
+
+static inline struct igb_lrohdr *igb_lro_hdr(struct sk_buff *skb)
+{
+ return (struct igb_lrohdr *)skb->data;
+}
+
+/**
+ * igb_lro_flush - Indicate packets to upper layer.
+ *
+ * Update IP and TCP header part of head skb if more than one
+ * skb's chained and indicate packets to upper layer.
+ **/
+static void igb_lro_flush(struct igb_q_vector *q_vector,
+ struct sk_buff *skb)
+{
+ struct igb_lro_list *lrolist = &q_vector->lrolist;
+
+ __skb_unlink(skb, &lrolist->active);
+
+ if (IGB_CB(skb)->append_cnt) {
+ struct igb_lrohdr *lroh = igb_lro_hdr(skb);
+
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ /* close any active lro contexts */
+ igb_close_active_frag_list(skb);
+
+#endif
+ /* incorporate ip header and re-calculate checksum */
+ lroh->iph.tot_len = ntohs(skb->len);
+ lroh->iph.check = 0;
+
+ /* header length is 5 since we know no options exist */
+ lroh->iph.check = ip_fast_csum((u8 *)lroh, 5);
+
+ /* clear TCP checksum to indicate we are an LRO frame */
+ lroh->th.check = 0;
+
+ /* incorporate latest timestamp into the tcp header */
+ if (IGB_CB(skb)->tsecr) {
+ lroh->ts[2] = IGB_CB(skb)->tsecr;
+ lroh->ts[1] = htonl(IGB_CB(skb)->tsval);
+ }
+#ifdef NETIF_F_GSO
+
+ skb_shinfo(skb)->gso_size = IGB_CB(skb)->mss;
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+#endif
+ }
+
+#ifdef HAVE_VLAN_RX_REGISTER
+ igb_receive_skb(q_vector, skb);
+#else
+ napi_gro_receive(&q_vector->napi, skb);
+#endif
+ lrolist->stats.flushed++;
+}
+
+static void igb_lro_flush_all(struct igb_q_vector *q_vector)
+{
+ struct igb_lro_list *lrolist = &q_vector->lrolist;
+ struct sk_buff *skb, *tmp;
+
+ skb_queue_reverse_walk_safe(&lrolist->active, skb, tmp)
+ igb_lro_flush(q_vector, skb);
+}
+
+/*
+ * igb_lro_header_ok - Main LRO function.
+ **/
+static void igb_lro_header_ok(struct sk_buff *skb)
+{
+ struct igb_lrohdr *lroh = igb_lro_hdr(skb);
+ u16 opt_bytes, data_len;
+
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ IGB_CB(skb)->tail = NULL;
+#endif
+ IGB_CB(skb)->tsecr = 0;
+ IGB_CB(skb)->append_cnt = 0;
+ IGB_CB(skb)->mss = 0;
+
+ /* ensure that the checksum is valid */
+ if (skb->ip_summed != CHECKSUM_UNNECESSARY)
+ return;
+
+ /* If we see CE codepoint in IP header, packet is not mergeable */
+ if (INET_ECN_is_ce(ipv4_get_dsfield(&lroh->iph)))
+ return;
+
+ /* ensure no bits set besides ack or psh */
+ if (lroh->th.fin || lroh->th.syn || lroh->th.rst ||
+ lroh->th.urg || lroh->th.ece || lroh->th.cwr ||
+ !lroh->th.ack)
+ return;
+
+ /* store the total packet length */
+ data_len = ntohs(lroh->iph.tot_len);
+
+ /* remove any padding from the end of the skb */
+ __pskb_trim(skb, data_len);
+
+ /* remove header length from data length */
+ data_len -= sizeof(struct igb_lrohdr);
+
+ /*
+ * check for timestamps. Since the only option we handle are timestamps,
+ * we only have to handle the simple case of aligned timestamps
+ */
+ opt_bytes = (lroh->th.doff << 2) - sizeof(struct tcphdr);
+ if (opt_bytes != 0) {
+ if ((opt_bytes != TCPOLEN_TSTAMP_ALIGNED) ||
+ !pskb_may_pull(skb, sizeof(struct igb_lrohdr) +
+ TCPOLEN_TSTAMP_ALIGNED) ||
+ (lroh->ts[0] != htonl((TCPOPT_NOP << 24) |
+ (TCPOPT_NOP << 16) |
+ (TCPOPT_TIMESTAMP << 8) |
+ TCPOLEN_TIMESTAMP)) ||
+ (lroh->ts[2] == 0)) {
+ return;
+ }
+
+ IGB_CB(skb)->tsval = ntohl(lroh->ts[1]);
+ IGB_CB(skb)->tsecr = lroh->ts[2];
+
+ data_len -= TCPOLEN_TSTAMP_ALIGNED;
+ }
+
+ /* record data_len as mss for the packet */
+ IGB_CB(skb)->mss = data_len;
+ IGB_CB(skb)->next_seq = ntohl(lroh->th.seq);
+}
+
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+static void igb_merge_frags(struct sk_buff *lro_skb, struct sk_buff *new_skb)
+{
+ struct skb_shared_info *sh_info;
+ struct skb_shared_info *new_skb_info;
+ unsigned int data_len;
+
+ sh_info = skb_shinfo(lro_skb);
+ new_skb_info = skb_shinfo(new_skb);
+
+ /* copy frags into the last skb */
+ memcpy(sh_info->frags + sh_info->nr_frags,
+ new_skb_info->frags,
+ new_skb_info->nr_frags * sizeof(skb_frag_t));
+
+ /* copy size data over */
+ sh_info->nr_frags += new_skb_info->nr_frags;
+ data_len = IGB_CB(new_skb)->mss;
+ lro_skb->len += data_len;
+ lro_skb->data_len += data_len;
+ lro_skb->truesize += data_len;
+
+ /* wipe record of data from new_skb */
+ new_skb_info->nr_frags = 0;
+ new_skb->len = new_skb->data_len = 0;
+ dev_kfree_skb_any(new_skb);
+}
+
+#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
+/**
+ * igb_lro_receive - if able, queue skb into lro chain
+ * @q_vector: structure containing interrupt and ring information
+ * @new_skb: pointer to current skb being checked
+ *
+ * Checks whether the skb given is eligible for LRO and if that's
+ * fine chains it to the existing lro_skb based on flowid. If an LRO for
+ * the flow doesn't exist create one.
+ **/
+static void igb_lro_receive(struct igb_q_vector *q_vector,
+ struct sk_buff *new_skb)
+{
+ struct sk_buff *lro_skb;
+ struct igb_lro_list *lrolist = &q_vector->lrolist;
+ struct igb_lrohdr *lroh = igb_lro_hdr(new_skb);
+ __be32 saddr = lroh->iph.saddr;
+ __be32 daddr = lroh->iph.daddr;
+ __be32 tcp_ports = *(__be32 *)&lroh->th;
+ u16 data_len;
+#ifdef HAVE_VLAN_RX_REGISTER
+ u16 vid = IGB_CB(new_skb)->vid;
+#else
+ u16 vid = new_skb->vlan_tci;
+#endif
+
+ igb_lro_header_ok(new_skb);
+
+ /*
+ * we have a packet that might be eligible for LRO,
+ * so see if it matches anything we might expect
+ */
+ skb_queue_walk(&lrolist->active, lro_skb) {
+ if (*(__be32 *)&igb_lro_hdr(lro_skb)->th != tcp_ports ||
+ igb_lro_hdr(lro_skb)->iph.saddr != saddr ||
+ igb_lro_hdr(lro_skb)->iph.daddr != daddr)
+ continue;
+
+#ifdef HAVE_VLAN_RX_REGISTER
+ if (IGB_CB(lro_skb)->vid != vid)
+#else
+ if (lro_skb->vlan_tci != vid)
+#endif
+ continue;
+
+ /* out of order packet */
+ if (IGB_CB(lro_skb)->next_seq != IGB_CB(new_skb)->next_seq) {
+ igb_lro_flush(q_vector, lro_skb);
+ IGB_CB(new_skb)->mss = 0;
+ break;
+ }
+
+ /* TCP timestamp options have changed */
+ if (!IGB_CB(lro_skb)->tsecr != !IGB_CB(new_skb)->tsecr) {
+ igb_lro_flush(q_vector, lro_skb);
+ break;
+ }
+
+ /* make sure timestamp values are increasing */
+ if (IGB_CB(lro_skb)->tsecr &&
+ IGB_CB(lro_skb)->tsval > IGB_CB(new_skb)->tsval) {
+ igb_lro_flush(q_vector, lro_skb);
+ IGB_CB(new_skb)->mss = 0;
+ break;
+ }
+
+ data_len = IGB_CB(new_skb)->mss;
+
+ /* Check for all of the above below
+ * malformed header
+ * no tcp data
+ * resultant packet would be too large
+ * new skb is larger than our current mss
+ * data would remain in header
+ * we would consume more frags then the sk_buff contains
+ * ack sequence numbers changed
+ * window size has changed
+ */
+ if (data_len == 0 ||
+ data_len > IGB_CB(lro_skb)->mss ||
+ data_len > IGB_CB(lro_skb)->free ||
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ data_len != new_skb->data_len ||
+ skb_shinfo(new_skb)->nr_frags >=
+ (MAX_SKB_FRAGS - skb_shinfo(lro_skb)->nr_frags) ||
+#endif
+ igb_lro_hdr(lro_skb)->th.ack_seq != lroh->th.ack_seq ||
+ igb_lro_hdr(lro_skb)->th.window != lroh->th.window) {
+ igb_lro_flush(q_vector, lro_skb);
+ break;
+ }
+
+ /* Remove IP and TCP header*/
+ skb_pull(new_skb, new_skb->len - data_len);
+
+ /* update timestamp and timestamp echo response */
+ IGB_CB(lro_skb)->tsval = IGB_CB(new_skb)->tsval;
+ IGB_CB(lro_skb)->tsecr = IGB_CB(new_skb)->tsecr;
+
+ /* update sequence and free space */
+ IGB_CB(lro_skb)->next_seq += data_len;
+ IGB_CB(lro_skb)->free -= data_len;
+
+ /* update append_cnt */
+ IGB_CB(lro_skb)->append_cnt++;
+
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ /* if header is empty pull pages into current skb */
+ igb_merge_frags(lro_skb, new_skb);
+#else
+ /* chain this new skb in frag_list */
+ igb_add_active_tail(lro_skb, new_skb);
+#endif
+
+ if ((data_len < IGB_CB(lro_skb)->mss) || lroh->th.psh ||
+ skb_shinfo(lro_skb)->nr_frags == MAX_SKB_FRAGS) {
+ igb_lro_hdr(lro_skb)->th.psh |= lroh->th.psh;
+ igb_lro_flush(q_vector, lro_skb);
+ }
+
+ lrolist->stats.coal++;
+ return;
+ }
+
+ if (IGB_CB(new_skb)->mss && !lroh->th.psh) {
+ /* if we are at capacity flush the tail */
+ if (skb_queue_len(&lrolist->active) >= IGB_LRO_MAX) {
+ lro_skb = skb_peek_tail(&lrolist->active);
+ if (lro_skb)
+ igb_lro_flush(q_vector, lro_skb);
+ }
+
+ /* update sequence and free space */
+ IGB_CB(new_skb)->next_seq += IGB_CB(new_skb)->mss;
+ IGB_CB(new_skb)->free = 65521 - new_skb->len;
+
+ /* .. and insert at the front of the active list */
+ __skb_queue_head(&lrolist->active, new_skb);
+
+ lrolist->stats.coal++;
+ return;
+ }
+
+ /* packet not handled by any of the above, pass it to the stack */
+#ifdef HAVE_VLAN_RX_REGISTER
+ igb_receive_skb(q_vector, new_skb);
+#else
+ napi_gro_receive(&q_vector->napi, new_skb);
+#endif
+}
+
+#endif /* IGB_NO_LRO */
+/**
+ * igb_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ *
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, timestamp, protocol, and
+ * other fields within the skb.
+ **/
+static void igb_process_skb_fields(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct net_device *dev = rx_ring->netdev;
+ __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+
+#ifdef NETIF_F_RXHASH
+ igb_rx_hash(rx_ring, rx_desc, skb);
+
+#endif
+ igb_rx_checksum(rx_ring, rx_desc, skb);
+
+ /* update packet type stats */
+ if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_IPV4))
+ rx_ring->rx_stats.ipv4_packets++;
+ else if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_IPV4_EX))
+ rx_ring->rx_stats.ipv4e_packets++;
+ else if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_IPV6))
+ rx_ring->rx_stats.ipv6_packets++;
+ else if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_IPV6_EX))
+ rx_ring->rx_stats.ipv6e_packets++;
+ else if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_TCP))
+ rx_ring->rx_stats.tcp_packets++;
+ else if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_UDP))
+ rx_ring->rx_stats.udp_packets++;
+ else if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_SCTP))
+ rx_ring->rx_stats.sctp_packets++;
+ else if (pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_NFS))
+ rx_ring->rx_stats.nfs_packets++;
+
+#ifdef HAVE_PTP_1588_CLOCK
+ igb_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
+#endif /* HAVE_PTP_1588_CLOCK */
+
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+ if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+#else
+ if ((dev->features & NETIF_F_HW_VLAN_RX) &&
+#endif
+ igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
+ u16 vid = 0;
+ if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
+ test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
+ vid = be16_to_cpu(rx_desc->wb.upper.vlan);
+ else
+ vid = le16_to_cpu(rx_desc->wb.upper.vlan);
+#ifdef HAVE_VLAN_RX_REGISTER
+ IGB_CB(skb)->vid = vid;
+ } else {
+ IGB_CB(skb)->vid = 0;
+#else
+
+#ifdef HAVE_VLAN_PROTOCOL
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+#else
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+#endif
+
+
+#endif
+ }
+
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+
+ skb->protocol = eth_type_trans(skb, dev);
+}
+
+/**
+ * igb_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ *
+ * This function updates next to clean. If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool igb_is_non_eop(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
+ prefetch(IGB_RX_DESC(rx_ring, ntc));
+
+ if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
+ return false;
+
+ return true;
+}
+
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+/* igb_clean_rx_irq -- * legacy */
+static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
+{
+ struct igb_ring *rx_ring = q_vector->rx.ring;
+ unsigned int total_bytes = 0, total_packets = 0;
+ u16 cleaned_count = igb_desc_unused(rx_ring);
+
+ do {
+ struct igb_rx_buffer *rx_buffer;
+ union e1000_adv_rx_desc *rx_desc;
+ struct sk_buff *skb;
+ u16 ntc;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
+ igb_alloc_rx_buffers(rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ ntc = rx_ring->next_to_clean;
+ rx_desc = IGB_RX_DESC(rx_ring, ntc);
+ rx_buffer = &rx_ring->rx_buffer_info[ntc];
+
+ if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
+ break;
+
+ /*
+ * This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * RXD_STAT_DD bit is set
+ */
+ rmb();
+
+ skb = rx_buffer->skb;
+
+ prefetch(skb->data);
+
+ /* pull the header of the skb in */
+ __skb_put(skb, le16_to_cpu(rx_desc->wb.upper.length));
+
+ /* clear skb reference in buffer info structure */
+ rx_buffer->skb = NULL;
+
+ cleaned_count++;
+
+ BUG_ON(igb_is_non_eop(rx_ring, rx_desc));
+
+ dma_unmap_single(rx_ring->dev, rx_buffer->dma,
+ rx_ring->rx_buffer_len,
+ DMA_FROM_DEVICE);
+ rx_buffer->dma = 0;
+
+ if (igb_test_staterr(rx_desc,
+ E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ total_bytes += skb->len;
+
+ /* populate checksum, timestamp, VLAN, and protocol */
+ igb_process_skb_fields(rx_ring, rx_desc, skb);
+
+#ifndef IGB_NO_LRO
+ if (igb_can_lro(rx_ring, rx_desc, skb))
+ igb_lro_receive(q_vector, skb);
+ else
+#endif
+#ifdef HAVE_VLAN_RX_REGISTER
+ igb_receive_skb(q_vector, skb);
+#else
+ napi_gro_receive(&q_vector->napi, skb);
+#endif
+
+#ifndef NETIF_F_GRO
+ netdev_ring(rx_ring)->last_rx = jiffies;
+
+#endif
+ /* update budget accounting */
+ total_packets++;
+ } while (likely(total_packets < budget));
+
+ rx_ring->rx_stats.packets += total_packets;
+ rx_ring->rx_stats.bytes += total_bytes;
+ q_vector->rx.total_packets += total_packets;
+ q_vector->rx.total_bytes += total_bytes;
+
+ if (cleaned_count)
+ igb_alloc_rx_buffers(rx_ring, cleaned_count);
+
+#ifndef IGB_NO_LRO
+ igb_lro_flush_all(q_vector);
+
+#endif /* IGB_NO_LRO */
+ return (total_packets < budget);
+}
+#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
+/**
+ * igb_get_headlen - determine size of header for LRO/GRO
+ * @data: pointer to the start of the headers
+ * @max_len: total length of section to find headers in
+ *
+ * This function is meant to determine the length of headers that will
+ * be recognized by hardware for LRO, and GRO offloads. The main
+ * motivation of doing this is to only perform one pull for IPv4 TCP
+ * packets so that we can do basic things like calculating the gso_size
+ * based on the average data per packet.
+ **/
+static unsigned int igb_get_headlen(unsigned char *data,
+ unsigned int max_len)
+{
+ union {
+ unsigned char *network;
+ /* l2 headers */
+ struct ethhdr *eth;
+ struct vlan_hdr *vlan;
+ /* l3 headers */
+ struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
+ } hdr;
+ __be16 protocol;
+ u8 nexthdr = 0; /* default to not TCP */
+ u8 hlen;
+
+ /* this should never happen, but better safe than sorry */
+ if (max_len < ETH_HLEN)
+ return max_len;
+
+ /* initialize network frame pointer */
+ hdr.network = data;
+
+ /* set first protocol and move network header forward */
+ protocol = hdr.eth->h_proto;
+ hdr.network += ETH_HLEN;
+
+ /* handle any vlan tag if present */
+ if (protocol == __constant_htons(ETH_P_8021Q)) {
+ if ((hdr.network - data) > (max_len - VLAN_HLEN))
+ return max_len;
+
+ protocol = hdr.vlan->h_vlan_encapsulated_proto;
+ hdr.network += VLAN_HLEN;
+ }
+
+ /* handle L3 protocols */
+ if (protocol == __constant_htons(ETH_P_IP)) {
+ if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
+ return max_len;
+
+ /* access ihl as a u8 to avoid unaligned access on ia64 */
+ hlen = (hdr.network[0] & 0x0F) << 2;
+
+ /* verify hlen meets minimum size requirements */
+ if (hlen < sizeof(struct iphdr))
+ return hdr.network - data;
+
+ /* record next protocol if header is present */
+ if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
+ nexthdr = hdr.ipv4->protocol;
+#ifdef NETIF_F_TSO6
+ } else if (protocol == __constant_htons(ETH_P_IPV6)) {
+ if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
+ return max_len;
+
+ /* record next protocol */
+ nexthdr = hdr.ipv6->nexthdr;
+ hlen = sizeof(struct ipv6hdr);
+#endif /* NETIF_F_TSO6 */
+ } else {
+ return hdr.network - data;
+ }
+
+ /* relocate pointer to start of L4 header */
+ hdr.network += hlen;
+
+ /* finally sort out TCP */
+ if (nexthdr == IPPROTO_TCP) {
+ if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
+ return max_len;
+
+ /* access doff as a u8 to avoid unaligned access on ia64 */
+ hlen = (hdr.network[12] & 0xF0) >> 2;
+
+ /* verify hlen meets minimum size requirements */
+ if (hlen < sizeof(struct tcphdr))
+ return hdr.network - data;
+
+ hdr.network += hlen;
+ } else if (nexthdr == IPPROTO_UDP) {
+ if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
+ return max_len;
+
+ hdr.network += sizeof(struct udphdr);
+ }
+
+ /*
+ * If everything has gone correctly hdr.network should be the
+ * data section of the packet and will be the end of the header.
+ * If not then it probably represents the end of the last recognized
+ * header.
+ */
+ if ((hdr.network - data) < max_len)
+ return hdr.network - data;
+ else
+ return max_len;
+}
+
+/**
+ * igb_pull_tail - igb specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an igb specific version of __pskb_pull_tail. The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void igb_pull_tail(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned char *va;
+ unsigned int pull_len;
+
+ /*
+ * it is valid to use page_address instead of kmap since we are
+ * working with pages allocated out of the lomem pool per
+ * alloc_page(GFP_ATOMIC)
+ */
+ va = skb_frag_address(frag);
+
+#ifdef HAVE_PTP_1588_CLOCK
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ /* retrieve timestamp from buffer */
+ igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
+
+ /* update pointers to remove timestamp header */
+ skb_frag_size_sub(frag, IGB_TS_HDR_LEN);
+ frag->page_offset += IGB_TS_HDR_LEN;
+ skb->data_len -= IGB_TS_HDR_LEN;
+ skb->len -= IGB_TS_HDR_LEN;
+
+ /* move va to start of packet data */
+ va += IGB_TS_HDR_LEN;
+ }
+#endif /* HAVE_PTP_1588_CLOCK */
+
+ /*
+ * we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, pull_len);
+ frag->page_offset += pull_len;
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+}
+
+/**
+ * igb_cleanup_headers - Correct corrupted or empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being fixed
+ *
+ * Address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool igb_cleanup_headers(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+
+ if (unlikely((igb_test_staterr(rx_desc,
+ E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
+ struct net_device *netdev = rx_ring->netdev;
+ if (!(netdev->features & NETIF_F_RXALL)) {
+ dev_kfree_skb_any(skb);
+ return true;
+ }
+ }
+
+ /* place header in linear portion of buffer */
+ if (skb_is_nonlinear(skb))
+ igb_pull_tail(rx_ring, rx_desc, skb);
+
+ /* if skb_pad returns an error the skb was freed */
+ if (unlikely(skb->len < 60)) {
+ int pad_len = 60 - skb->len;
+
+ if (skb_pad(skb, pad_len))
+ return true;
+ __skb_put(skb, pad_len);
+ }
+
+ return false;
+}
+
+/* igb_clean_rx_irq -- * packet split */
+static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
+{
+ struct igb_ring *rx_ring = q_vector->rx.ring;
+ struct sk_buff *skb = rx_ring->skb;
+ unsigned int total_bytes = 0, total_packets = 0;
+ u16 cleaned_count = igb_desc_unused(rx_ring);
+
+ do {
+ union e1000_adv_rx_desc *rx_desc;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
+ igb_alloc_rx_buffers(rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
+ if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
+ break;
+
+ /*
+ * This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * RXD_STAT_DD bit is set
+ */
+ rmb();
+
+ /* retrieve a buffer from the ring */
+ skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
+
+ /* exit if we failed to retrieve a buffer */
+ if (!skb)
+ break;
+
+ cleaned_count++;
+
+ /* fetch next buffer in frame if non-eop */
+ if (igb_is_non_eop(rx_ring, rx_desc))
+ continue;
+
+ /* verify the packet layout is correct */
+ if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
+ skb = NULL;
+ continue;
+ }
+
+ /* probably a little skewed due to removing CRC */
+ total_bytes += skb->len;
+
+ /* populate checksum, timestamp, VLAN, and protocol */
+ igb_process_skb_fields(rx_ring, rx_desc, skb);
+
+#ifndef IGB_NO_LRO
+ if (igb_can_lro(rx_ring, rx_desc, skb))
+ igb_lro_receive(q_vector, skb);
+ else
+#endif
+#ifdef HAVE_VLAN_RX_REGISTER
+ igb_receive_skb(q_vector, skb);
+#else
+ napi_gro_receive(&q_vector->napi, skb);
+#endif
+#ifndef NETIF_F_GRO
+
+ netdev_ring(rx_ring)->last_rx = jiffies;
+#endif
+
+ /* reset skb pointer */
+ skb = NULL;
+
+ /* update budget accounting */
+ total_packets++;
+ } while (likely(total_packets < budget));
+
+ /* place incomplete frames back on ring for completion */
+ rx_ring->skb = skb;
+
+ rx_ring->rx_stats.packets += total_packets;
+ rx_ring->rx_stats.bytes += total_bytes;
+ q_vector->rx.total_packets += total_packets;
+ q_vector->rx.total_bytes += total_bytes;
+
+ if (cleaned_count)
+ igb_alloc_rx_buffers(rx_ring, cleaned_count);
+
+#ifndef IGB_NO_LRO
+ igb_lro_flush_all(q_vector);
+
+#endif /* IGB_NO_LRO */
+ return (total_packets < budget);
+}
+#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
+
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *bi)
+{
+ struct sk_buff *skb = bi->skb;
+ dma_addr_t dma = bi->dma;
+
+ if (dma)
+ return true;
+
+ if (likely(!skb)) {
+ skb = netdev_alloc_skb_ip_align(netdev_ring(rx_ring),
+ rx_ring->rx_buffer_len);
+ bi->skb = skb;
+ if (!skb) {
+ rx_ring->rx_stats.alloc_failed++;
+ return false;
+ }
+
+ /* initialize skb for ring */
+ skb_record_rx_queue(skb, ring_queue_index(rx_ring));
+ }
+
+ dma = dma_map_single(rx_ring->dev, skb->data,
+ rx_ring->rx_buffer_len, DMA_FROM_DEVICE);
+
+ /* if mapping failed free memory back to system since
+ * there isn't much point in holding memory we can't use
+ */
+ if (dma_mapping_error(rx_ring->dev, dma)) {
+ dev_kfree_skb_any(skb);
+ bi->skb = NULL;
+
+ rx_ring->rx_stats.alloc_failed++;
+ return false;
+ }
+
+ bi->dma = dma;
+ return true;
+}
+
+#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
+static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *bi)
+{
+ struct page *page = bi->page;
+ dma_addr_t dma;
+
+ /* since we are recycling buffers we should seldom need to alloc */
+ if (likely(page))
+ return true;
+
+ /* alloc new page for storage */
+ page = alloc_page(GFP_ATOMIC | __GFP_COLD);
+ if (unlikely(!page)) {
+ rx_ring->rx_stats.alloc_failed++;
+ return false;
+ }
+
+ /* map page for use */
+ dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ /*
+ * if mapping failed free memory back to system since
+ * there isn't much point in holding memory we can't use
+ */
+ if (dma_mapping_error(rx_ring->dev, dma)) {
+ __free_page(page);
+
+ rx_ring->rx_stats.alloc_failed++;
+ return false;
+ }
+
+ bi->dma = dma;
+ bi->page = page;
+ bi->page_offset = 0;
+
+ return true;
+}
+
+#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
+/**
+ * igb_alloc_rx_buffers - Replace used receive buffers; packet split
+ * @adapter: address of board private structure
+ **/
+void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
+{
+ union e1000_adv_rx_desc *rx_desc;
+ struct igb_rx_buffer *bi;
+ u16 i = rx_ring->next_to_use;
+
+ /* nothing to do */
+ if (!cleaned_count)
+ return;
+
+ rx_desc = IGB_RX_DESC(rx_ring, i);
+ bi = &rx_ring->rx_buffer_info[i];
+ i -= rx_ring->count;
+
+ do {
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ if (!igb_alloc_mapped_skb(rx_ring, bi))
+#else
+ if (!igb_alloc_mapped_page(rx_ring, bi))
+#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
+ break;
+
+ /*
+ * Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+#else
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
+#endif
+
+ rx_desc++;
+ bi++;
+ i++;
+ if (unlikely(!i)) {
+ rx_desc = IGB_RX_DESC(rx_ring, 0);
+ bi = rx_ring->rx_buffer_info;
+ i -= rx_ring->count;
+ }
+
+ /* clear the hdr_addr for the next_to_use descriptor */
+ rx_desc->read.hdr_addr = 0;
+
+ cleaned_count--;
+ } while (cleaned_count);
+
+ i += rx_ring->count;
+
+ if (rx_ring->next_to_use != i) {
+ /* record the next descriptor to use */
+ rx_ring->next_to_use = i;
+
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = i;
+
+#endif
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(i, rx_ring->tail);
+ }
+}
+
+#ifdef SIOCGMIIPHY
+/**
+ * igb_mii_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ **/
+static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct mii_ioctl_data *data = if_mii(ifr);
+
+ if (adapter->hw.phy.media_type != e1000_media_type_copper)
+ return -EOPNOTSUPP;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = adapter->hw.phy.addr;
+ break;
+ case SIOCGMIIREG:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
+ &data->val_out))
+ return -EIO;
+ break;
+ case SIOCSMIIREG:
+ default:
+ return -EOPNOTSUPP;
+ }
+ return E1000_SUCCESS;
+}
+
+#endif
+/**
+ * igb_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ **/
+static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+#ifdef SIOCGMIIPHY
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ return igb_mii_ioctl(netdev, ifr, cmd);
+#endif
+#ifdef HAVE_PTP_1588_CLOCK
+ case SIOCSHWTSTAMP:
+ return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
+#endif /* HAVE_PTP_1588_CLOCK */
+#ifdef ETHTOOL_OPS_COMPAT
+ case SIOCETHTOOL:
+ return ethtool_ioctl(ifr);
+#endif
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+ struct igb_adapter *adapter = hw->back;
+ u16 cap_offset;
+
+ cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+ if (!cap_offset)
+ return -E1000_ERR_CONFIG;
+
+ pci_read_config_word(adapter->pdev, cap_offset + reg, value);
+
+ return E1000_SUCCESS;
+}
+
+s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+ struct igb_adapter *adapter = hw->back;
+ u16 cap_offset;
+
+ cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+ if (!cap_offset)
+ return -E1000_ERR_CONFIG;
+
+ pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
+
+ return E1000_SUCCESS;
+}
+
+#ifdef HAVE_VLAN_RX_REGISTER
+static void igb_vlan_mode(struct net_device *netdev, struct vlan_group *vlgrp)
+#else
+void igb_vlan_mode(struct net_device *netdev, u32 features)
+#endif
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl, rctl;
+ int i;
+#ifdef HAVE_VLAN_RX_REGISTER
+ bool enable = !!vlgrp;
+
+ igb_irq_disable(adapter);
+
+ adapter->vlgrp = vlgrp;
+
+ if (!test_bit(__IGB_DOWN, &adapter->state))
+ igb_irq_enable(adapter);
+#else
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+ bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
+#else
+ bool enable = !!(features & NETIF_F_HW_VLAN_RX);
+#endif
+#endif
+
+ if (enable) {
+ /* enable VLAN tag insert/strip */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= E1000_CTRL_VME;
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ /* Disable CFI check */
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl &= ~E1000_RCTL_CFIEN;
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ } else {
+ /* disable VLAN tag insert/strip */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl &= ~E1000_CTRL_VME;
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ }
+
+#ifndef CONFIG_IGB_VMDQ_NETDEV
+ for (i = 0; i < adapter->vmdq_pools; i++) {
+ igb_set_vf_vlan_strip(adapter,
+ adapter->vfs_allocated_count + i,
+ enable);
+ }
+
+#else
+ igb_set_vf_vlan_strip(adapter,
+ adapter->vfs_allocated_count,
+ enable);
+
+ for (i = 1; i < adapter->vmdq_pools; i++) {
+#ifdef HAVE_VLAN_RX_REGISTER
+ struct igb_vmdq_adapter *vadapter;
+ vadapter = netdev_priv(adapter->vmdq_netdev[i-1]);
+ enable = !!vadapter->vlgrp;
+#else
+ struct net_device *vnetdev;
+ vnetdev = adapter->vmdq_netdev[i-1];
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+ enable = !!(vnetdev->features & NETIF_F_HW_VLAN_CTAG_RX);
+#else
+ enable = !!(vnetdev->features & NETIF_F_HW_VLAN_RX);
+#endif
+#endif
+ igb_set_vf_vlan_strip(adapter,
+ adapter->vfs_allocated_count + i,
+ enable);
+ }
+
+#endif
+ igb_rlpml_set(adapter);
+}
+
+#ifdef HAVE_VLAN_PROTOCOL
+static int igb_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
+#elif defined HAVE_INT_NDO_VLAN_RX_ADD_VID
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+static int igb_vlan_rx_add_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+#else
+static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+#endif
+#else
+static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+#endif
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ int pf_id = adapter->vfs_allocated_count;
+
+ /* attempt to add filter to vlvf array */
+ igb_vlvf_set(adapter, vid, TRUE, pf_id);
+
+ /* add the filter since PF can receive vlans w/o entry in vlvf */
+ igb_vfta_set(adapter, vid, TRUE);
+#ifndef HAVE_NETDEV_VLAN_FEATURES
+
+ /* Copy feature flags from netdev to the vlan netdev for this vid.
+ * This allows things like TSO to bubble down to our vlan device.
+ * There is no need to update netdev for vlan 0 (DCB), since it
+ * wouldn't has v_netdev.
+ */
+ if (adapter->vlgrp) {
+ struct vlan_group *vlgrp = adapter->vlgrp;
+ struct net_device *v_netdev = vlan_group_get_device(vlgrp, vid);
+ if (v_netdev) {
+ v_netdev->features |= netdev->features;
+ vlan_group_set_device(vlgrp, vid, v_netdev);
+ }
+ }
+#endif
+#ifndef HAVE_VLAN_RX_REGISTER
+
+ set_bit(vid, adapter->active_vlans);
+#endif
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+ return 0;
+#endif
+}
+
+#ifdef HAVE_VLAN_PROTOCOL
+static int igb_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
+#elif defined HAVE_INT_NDO_VLAN_RX_ADD_VID
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+static int igb_vlan_rx_kill_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+#else
+static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+#endif
+#else
+static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+#endif
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ int pf_id = adapter->vfs_allocated_count;
+ s32 err;
+
+#ifdef HAVE_VLAN_RX_REGISTER
+ igb_irq_disable(adapter);
+
+ vlan_group_set_device(adapter->vlgrp, vid, NULL);
+
+ if (!test_bit(__IGB_DOWN, &adapter->state))
+ igb_irq_enable(adapter);
+
+#endif /* HAVE_VLAN_RX_REGISTER */
+ /* remove vlan from VLVF table array */
+ err = igb_vlvf_set(adapter, vid, FALSE, pf_id);
+
+ /* if vid was not present in VLVF just remove it from table */
+ if (err)
+ igb_vfta_set(adapter, vid, FALSE);
+#ifndef HAVE_VLAN_RX_REGISTER
+
+ clear_bit(vid, adapter->active_vlans);
+#endif
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+ return 0;
+#endif
+}
+
+static void igb_restore_vlan(struct igb_adapter *adapter)
+{
+#ifdef HAVE_VLAN_RX_REGISTER
+ igb_vlan_mode(adapter->netdev, adapter->vlgrp);
+
+ if (adapter->vlgrp) {
+ u16 vid;
+ for (vid = 0; vid < VLAN_N_VID; vid++) {
+ if (!vlan_group_get_device(adapter->vlgrp, vid))
+ continue;
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+ igb_vlan_rx_add_vid(adapter->netdev,
+ htons(ETH_P_8021Q), vid);
+#else
+ igb_vlan_rx_add_vid(adapter->netdev, vid);
+#endif
+ }
+ }
+#else
+ u16 vid;
+
+ igb_vlan_mode(adapter->netdev, adapter->netdev->features);
+
+ for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+ igb_vlan_rx_add_vid(adapter->netdev,
+ htons(ETH_P_8021Q), vid);
+#else
+ igb_vlan_rx_add_vid(adapter->netdev, vid);
+#endif
+#endif
+}
+
+int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_mac_info *mac = &adapter->hw.mac;
+
+ mac->autoneg = 0;
+
+ /* SerDes device's does not support 10Mbps Full/duplex
+ * and 100Mbps Half duplex
+ */
+ if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
+ switch (spddplx) {
+ case SPEED_10 + DUPLEX_HALF:
+ case SPEED_10 + DUPLEX_FULL:
+ case SPEED_100 + DUPLEX_HALF:
+ dev_err(pci_dev_to_dev(pdev),
+ "Unsupported Speed/Duplex configuration\n");
+ return -EINVAL;
+ default:
+ break;
+ }
+ }
+
+ switch (spddplx) {
+ case SPEED_10 + DUPLEX_HALF:
+ mac->forced_speed_duplex = ADVERTISE_10_HALF;
+ break;
+ case SPEED_10 + DUPLEX_FULL:
+ mac->forced_speed_duplex = ADVERTISE_10_FULL;
+ break;
+ case SPEED_100 + DUPLEX_HALF:
+ mac->forced_speed_duplex = ADVERTISE_100_HALF;
+ break;
+ case SPEED_100 + DUPLEX_FULL:
+ mac->forced_speed_duplex = ADVERTISE_100_FULL;
+ break;
+ case SPEED_1000 + DUPLEX_FULL:
+ mac->autoneg = 1;
+ adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
+ break;
+ case SPEED_1000 + DUPLEX_HALF: /* not supported */
+ default:
+ dev_err(pci_dev_to_dev(pdev), "Unsupported Speed/Duplex configuration\n");
+ return -EINVAL;
+ }
+
+ /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
+ adapter->hw.phy.mdix = AUTO_ALL_MODES;
+
+ return 0;
+}
+
+static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
+ bool runtime)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl, rctl, status;
+ u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
+#ifdef CONFIG_PM
+ int retval = 0;
+#endif
+
+ netif_device_detach(netdev);
+
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if (status & E1000_STATUS_LU)
+ wufc &= ~E1000_WUFC_LNKC;
+
+ if (netif_running(netdev))
+ __igb_close(netdev, true);
+
+ igb_clear_interrupt_scheme(adapter);
+
+#ifdef CONFIG_PM
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+#endif
+
+ if (wufc) {
+ igb_setup_rctl(adapter);
+ igb_set_rx_mode(netdev);
+
+ /* turn on all-multi mode if wake on multicast is enabled */
+ if (wufc & E1000_WUFC_MC) {
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl |= E1000_RCTL_MPE;
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ }
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ /* phy power management enable */
+ #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
+ ctrl |= E1000_CTRL_ADVD3WUC;
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ /* Allow time for pending master requests to run */
+ e1000_disable_pcie_master(hw);
+
+ E1000_WRITE_REG(hw, E1000_WUC, E1000_WUC_PME_EN);
+ E1000_WRITE_REG(hw, E1000_WUFC, wufc);
+ } else {
+ E1000_WRITE_REG(hw, E1000_WUC, 0);
+ E1000_WRITE_REG(hw, E1000_WUFC, 0);
+ }
+
+ *enable_wake = wufc || adapter->en_mng_pt;
+ if (!*enable_wake)
+ igb_power_down_link(adapter);
+ else
+ igb_power_up_link(adapter);
+
+ /* Release control of h/w to f/w. If f/w is AMT enabled, this
+ * would have already happened in close and is redundant. */
+ igb_release_hw_control(adapter);
+
+ pci_disable_device(pdev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
+static int igb_suspend(struct device *dev)
+#else
+static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
+#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
+{
+#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
+ struct pci_dev *pdev = to_pci_dev(dev);
+#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
+ int retval;
+ bool wake;
+
+ retval = __igb_shutdown(pdev, &wake, 0);
+ if (retval)
+ return retval;
+
+ if (wake) {
+ pci_prepare_to_sleep(pdev);
+ } else {
+ pci_wake_from_d3(pdev, false);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+
+ return 0;
+}
+
+#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
+static int igb_resume(struct device *dev)
+#else
+static int igb_resume(struct pci_dev *pdev)
+#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
+{
+#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
+ struct pci_dev *pdev = to_pci_dev(dev);
+#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(pci_dev_to_dev(pdev),
+ "igb: Cannot enable PCI device from suspend\n");
+ return err;
+ }
+ pci_set_master(pdev);
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ if (igb_init_interrupt_scheme(adapter, true)) {
+ dev_err(pci_dev_to_dev(pdev), "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+ igb_reset(adapter);
+
+ /* let the f/w know that the h/w is now under the control of the
+ * driver. */
+ igb_get_hw_control(adapter);
+
+ E1000_WRITE_REG(hw, E1000_WUS, ~0);
+
+ if (netdev->flags & IFF_UP) {
+ rtnl_lock();
+ err = __igb_open(netdev, true);
+ rtnl_unlock();
+ if (err)
+ return err;
+ }
+
+ netif_device_attach(netdev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
+static int igb_runtime_idle(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ if (!igb_has_link(adapter))
+ pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
+
+ return -EBUSY;
+}
+
+static int igb_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int retval;
+ bool wake;
+
+ retval = __igb_shutdown(pdev, &wake, 1);
+ if (retval)
+ return retval;
+
+ if (wake) {
+ pci_prepare_to_sleep(pdev);
+ } else {
+ pci_wake_from_d3(pdev, false);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+
+ return 0;
+}
+
+static int igb_runtime_resume(struct device *dev)
+{
+ return igb_resume(dev);
+}
+#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM */
+
+#ifdef USE_REBOOT_NOTIFIER
+/* only want to do this for 2.4 kernels? */
+static int igb_notify_reboot(struct notifier_block *nb, unsigned long event,
+ void *p)
+{
+ struct pci_dev *pdev = NULL;
+ bool wake;
+
+ switch (event) {
+ case SYS_DOWN:
+ case SYS_HALT:
+ case SYS_POWER_OFF:
+ while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
+ if (pci_dev_driver(pdev) == &igb_driver) {
+ __igb_shutdown(pdev, &wake, 0);
+ if (event == SYS_POWER_OFF) {
+ pci_wake_from_d3(pdev, wake);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+ }
+ }
+ }
+ return NOTIFY_DONE;
+}
+#else
+static void igb_shutdown(struct pci_dev *pdev)
+{
+ bool wake = false;
+
+ __igb_shutdown(pdev, &wake, 0);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, wake);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+#endif /* USE_REBOOT_NOTIFIER */
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void igb_netpoll(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct igb_q_vector *q_vector;
+ int i;
+
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ q_vector = adapter->q_vector[i];
+ if (adapter->msix_entries)
+ E1000_WRITE_REG(hw, E1000_EIMC, q_vector->eims_value);
+ else
+ igb_irq_disable(adapter);
+ napi_schedule(&q_vector->napi);
+ }
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+#ifdef HAVE_PCI_ERS
+#define E1000_DEV_ID_82576_VF 0x10CA
+/**
+ * igb_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+#ifdef CONFIG_PCI_IOV__UNUSED
+ struct pci_dev *bdev, *vfdev;
+ u32 dw0, dw1, dw2, dw3;
+ int vf, pos;
+ u16 req_id, pf_func;
+
+ if (!(adapter->flags & IGB_FLAG_DETECT_BAD_DMA))
+ goto skip_bad_vf_detection;
+
+ bdev = pdev->bus->self;
+ while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
+ bdev = bdev->bus->self;
+
+ if (!bdev)
+ goto skip_bad_vf_detection;
+
+ pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ goto skip_bad_vf_detection;
+
+ pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG, &dw0);
+ pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 4, &dw1);
+ pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 8, &dw2);
+ pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 12, &dw3);
+
+ req_id = dw1 >> 16;
+ /* On the 82576 if bit 7 of the requestor ID is set then it's a VF */
+ if (!(req_id & 0x0080))
+ goto skip_bad_vf_detection;
+
+ pf_func = req_id & 0x01;
+ if ((pf_func & 1) == (pdev->devfn & 1)) {
+
+ vf = (req_id & 0x7F) >> 1;
+ dev_err(pci_dev_to_dev(pdev),
+ "VF %d has caused a PCIe error\n", vf);
+ dev_err(pci_dev_to_dev(pdev),
+ "TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
+ "%8.8x\tdw3: %8.8x\n",
+ dw0, dw1, dw2, dw3);
+
+ /* Find the pci device of the offending VF */
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ E1000_DEV_ID_82576_VF, NULL);
+ while (vfdev) {
+ if (vfdev->devfn == (req_id & 0xFF))
+ break;
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ E1000_DEV_ID_82576_VF, vfdev);
+ }
+ /*
+ * There's a slim chance the VF could have been hot plugged,
+ * so if it is no longer present we don't need to issue the
+ * VFLR. Just clean up the AER in that case.
+ */
+ if (vfdev) {
+ dev_err(pci_dev_to_dev(pdev),
+ "Issuing VFLR to VF %d\n", vf);
+ pci_write_config_dword(vfdev, 0xA8, 0x00008000);
+ }
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+ }
+
+ /*
+ * Even though the error may have occurred on the other port
+ * we still need to increment the vf error reference count for
+ * both ports because the I/O resume function will be called
+ * for both of them.
+ */
+ adapter->vferr_refcount++;
+
+ return PCI_ERS_RESULT_RECOVERED;
+
+skip_bad_vf_detection:
+#endif /* CONFIG_PCI_IOV */
+
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (netif_running(netdev))
+ igb_down(adapter);
+ pci_disable_device(pdev);
+
+ /* Request a slot slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * igb_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the igb_resume routine.
+ */
+static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ pci_ers_result_t result;
+
+ if (pci_enable_device_mem(pdev)) {
+ dev_err(pci_dev_to_dev(pdev),
+ "Cannot re-enable PCI device after reset.\n");
+ result = PCI_ERS_RESULT_DISCONNECT;
+ } else {
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ schedule_work(&adapter->reset_task);
+ E1000_WRITE_REG(hw, E1000_WUS, ~0);
+ result = PCI_ERS_RESULT_RECOVERED;
+ }
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+
+ return result;
+}
+
+/**
+ * igb_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the igb_resume routine.
+ */
+static void igb_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->vferr_refcount) {
+ dev_info(pci_dev_to_dev(pdev), "Resuming after VF err\n");
+ adapter->vferr_refcount--;
+ return;
+ }
+
+ if (netif_running(netdev)) {
+ if (igb_up(adapter)) {
+ dev_err(pci_dev_to_dev(pdev), "igb_up failed after reset\n");
+ return;
+ }
+ }
+
+ netif_device_attach(netdev);
+
+ /* let the f/w know that the h/w is now under the control of the
+ * driver. */
+ igb_get_hw_control(adapter);
+}
+
+#endif /* HAVE_PCI_ERS */
+
+int igb_add_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int i;
+
+ if (is_zero_ether_addr(addr))
+ return 0;
+
+ for (i = 0; i < hw->mac.rar_entry_count; i++) {
+ if (adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE)
+ continue;
+ adapter->mac_table[i].state = (IGB_MAC_STATE_MODIFIED |
+ IGB_MAC_STATE_IN_USE);
+ memcpy(adapter->mac_table[i].addr, addr, ETH_ALEN);
+ adapter->mac_table[i].queue = queue;
+ igb_sync_mac_table(adapter);
+ return 0;
+ }
+ return -ENOMEM;
+}
+int igb_del_mac_filter(struct igb_adapter *adapter, u8* addr, u16 queue)
+{
+ /* search table for addr, if found, set to 0 and sync */
+ int i;
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (is_zero_ether_addr(addr))
+ return 0;
+ for (i = 0; i < hw->mac.rar_entry_count; i++) {
+ if (ether_addr_equal(addr, adapter->mac_table[i].addr) &&
+ adapter->mac_table[i].queue == queue) {
+ adapter->mac_table[i].state = IGB_MAC_STATE_MODIFIED;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ adapter->mac_table[i].queue = 0;
+ igb_sync_mac_table(adapter);
+ return 0;
+ }
+ }
+ return -ENOMEM;
+}
+static int igb_set_vf_mac(struct igb_adapter *adapter,
+ int vf, unsigned char *mac_addr)
+{
+ igb_del_mac_filter(adapter, adapter->vf_data[vf].vf_mac_addresses, vf);
+ memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
+
+ igb_add_mac_filter(adapter, mac_addr, vf);
+
+ return 0;
+}
+
+#ifdef IFLA_VF_MAX
+static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
+ return -EINVAL;
+ adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
+ dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
+ dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
+ " change effective.\n");
+ if (test_bit(__IGB_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
+ " but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
+ " attempting to use the VF device.\n");
+ }
+ return igb_set_vf_mac(adapter, vf, mac);
+}
+
+static int igb_link_mbps(int internal_link_speed)
+{
+ switch (internal_link_speed) {
+ case SPEED_100:
+ return 100;
+ case SPEED_1000:
+ return 1000;
+ case SPEED_2500:
+ return 2500;
+ default:
+ return 0;
+ }
+}
+
+static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
+ int link_speed)
+{
+ int rf_dec, rf_int;
+ u32 bcnrc_val;
+
+ if (tx_rate != 0) {
+ /* Calculate the rate factor values to set */
+ rf_int = link_speed / tx_rate;
+ rf_dec = (link_speed - (rf_int * tx_rate));
+ rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
+
+ bcnrc_val = E1000_RTTBCNRC_RS_ENA;
+ bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
+ E1000_RTTBCNRC_RF_INT_MASK);
+ bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
+ } else {
+ bcnrc_val = 0;
+ }
+
+ E1000_WRITE_REG(hw, E1000_RTTDQSEL, vf); /* vf X uses queue X */
+ /*
+ * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
+ * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
+ */
+ E1000_WRITE_REG(hw, E1000_RTTBCNRM(0), 0x14);
+ E1000_WRITE_REG(hw, E1000_RTTBCNRC, bcnrc_val);
+}
+
+static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
+{
+ int actual_link_speed, i;
+ bool reset_rate = false;
+
+ /* VF TX rate limit was not set */
+ if ((adapter->vf_rate_link_speed == 0) ||
+ (adapter->hw.mac.type != e1000_82576))
+ return;
+
+ actual_link_speed = igb_link_mbps(adapter->link_speed);
+ if (actual_link_speed != adapter->vf_rate_link_speed) {
+ reset_rate = true;
+ adapter->vf_rate_link_speed = 0;
+ dev_info(&adapter->pdev->dev,
+ "Link speed has been changed. VF Transmit rate is disabled\n");
+ }
+
+ for (i = 0; i < adapter->vfs_allocated_count; i++) {
+ if (reset_rate)
+ adapter->vf_data[i].tx_rate = 0;
+
+ igb_set_vf_rate_limit(&adapter->hw, i,
+ adapter->vf_data[i].tx_rate, actual_link_speed);
+ }
+}
+
+#ifdef HAVE_VF_MIN_MAX_TXRATE
+static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
+ int tx_rate)
+#else /* HAVE_VF_MIN_MAX_TXRATE */
+static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
+#endif /* HAVE_VF_MIN_MAX_TXRATE */
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int actual_link_speed;
+
+ if (hw->mac.type != e1000_82576)
+ return -EOPNOTSUPP;
+
+#ifdef HAVE_VF_MIN_MAX_TXRATE
+ if (min_tx_rate)
+ return -EINVAL;
+#endif /* HAVE_VF_MIN_MAX_TXRATE */
+
+ actual_link_speed = igb_link_mbps(adapter->link_speed);
+ if ((vf >= adapter->vfs_allocated_count) ||
+ (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) ||
+ (tx_rate < 0) || (tx_rate > actual_link_speed))
+ return -EINVAL;
+
+ adapter->vf_rate_link_speed = actual_link_speed;
+ adapter->vf_data[vf].tx_rate = (u16)tx_rate;
+ igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
+
+ return 0;
+}
+
+static int igb_ndo_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ if (vf >= adapter->vfs_allocated_count)
+ return -EINVAL;
+ ivi->vf = vf;
+ memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
+#ifdef HAVE_VF_MIN_MAX_TXRATE
+ ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
+ ivi->min_tx_rate = 0;
+#else /* HAVE_VF_MIN_MAX_TXRATE */
+ ivi->tx_rate = adapter->vf_data[vf].tx_rate;
+#endif /* HAVE_VF_MIN_MAX_TXRATE */
+ ivi->vlan = adapter->vf_data[vf].pf_vlan;
+ ivi->qos = adapter->vf_data[vf].pf_qos;
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+ ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
+#endif
+ return 0;
+}
+#endif
+static void igb_vmm_control(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int count;
+ u32 reg;
+
+ switch (hw->mac.type) {
+ case e1000_82575:
+ default:
+ /* replication is not supported for 82575 */
+ return;
+ case e1000_82576:
+ /* notify HW that the MAC is adding vlan tags */
+ reg = E1000_READ_REG(hw, E1000_DTXCTL);
+ reg |= (E1000_DTXCTL_VLAN_ADDED |
+ E1000_DTXCTL_SPOOF_INT);
+ E1000_WRITE_REG(hw, E1000_DTXCTL, reg);
+ case e1000_82580:
+ /* enable replication vlan tag stripping */
+ reg = E1000_READ_REG(hw, E1000_RPLOLR);
+ reg |= E1000_RPLOLR_STRVLAN;
+ E1000_WRITE_REG(hw, E1000_RPLOLR, reg);
+ case e1000_i350:
+ case e1000_i354:
+ /* none of the above registers are supported by i350 */
+ break;
+ }
+
+ /* Enable Malicious Driver Detection */
+ if ((adapter->vfs_allocated_count) &&
+ (adapter->mdd)) {
+ if (hw->mac.type == e1000_i350)
+ igb_enable_mdd(adapter);
+ }
+
+ /* enable replication and loopback support */
+ count = adapter->vfs_allocated_count || adapter->vmdq_pools;
+ if (adapter->flags & IGB_FLAG_LOOPBACK_ENABLE && count)
+ e1000_vmdq_set_loopback_pf(hw, 1);
+ e1000_vmdq_set_anti_spoofing_pf(hw,
+ adapter->vfs_allocated_count || adapter->vmdq_pools,
+ adapter->vfs_allocated_count);
+ e1000_vmdq_set_replication_pf(hw, adapter->vfs_allocated_count ||
+ adapter->vmdq_pools);
+}
+
+static void igb_init_fw(struct igb_adapter *adapter)
+{
+ struct e1000_fw_drv_info fw_cmd;
+ struct e1000_hw *hw = &adapter->hw;
+ int i;
+ u16 mask;
+
+ if (hw->mac.type == e1000_i210)
+ mask = E1000_SWFW_EEP_SM;
+ else
+ mask = E1000_SWFW_PHY0_SM;
+ /* i211 parts do not support this feature */
+ if (hw->mac.type == e1000_i211)
+ hw->mac.arc_subsystem_valid = false;
+
+ if (!hw->mac.ops.acquire_swfw_sync(hw, mask)) {
+ for (i = 0; i <= FW_MAX_RETRIES; i++) {
+ E1000_WRITE_REG(hw, E1000_FWSTS, E1000_FWSTS_FWRI);
+ fw_cmd.hdr.cmd = FW_CMD_DRV_INFO;
+ fw_cmd.hdr.buf_len = FW_CMD_DRV_INFO_LEN;
+ fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CMD_RESERVED;
+ fw_cmd.port_num = hw->bus.func;
+ fw_cmd.drv_version = FW_FAMILY_DRV_VER;
+ fw_cmd.hdr.checksum = 0;
+ fw_cmd.hdr.checksum = e1000_calculate_checksum((u8 *)&fw_cmd,
+ (FW_HDR_LEN +
+ fw_cmd.hdr.buf_len));
+ e1000_host_interface_command(hw, (u8*)&fw_cmd,
+ sizeof(fw_cmd));
+ if (fw_cmd.hdr.cmd_or_resp.ret_status == FW_STATUS_SUCCESS)
+ break;
+ }
+ } else
+ dev_warn(pci_dev_to_dev(adapter->pdev),
+ "Unable to get semaphore, firmware init failed.\n");
+ hw->mac.ops.release_swfw_sync(hw, mask);
+}
+
+static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 dmac_thr;
+ u16 hwm;
+ u32 status;
+
+ if (hw->mac.type == e1000_i211)
+ return;
+
+ if (hw->mac.type > e1000_82580) {
+ if (adapter->dmac != IGB_DMAC_DISABLE) {
+ u32 reg;
+
+ /* force threshold to 0. */
+ E1000_WRITE_REG(hw, E1000_DMCTXTH, 0);
+
+ /*
+ * DMA Coalescing high water mark needs to be greater
+ * than the Rx threshold. Set hwm to PBA - max frame
+ * size in 16B units, capping it at PBA - 6KB.
+ */
+ hwm = 64 * pba - adapter->max_frame_size / 16;
+ if (hwm < 64 * (pba - 6))
+ hwm = 64 * (pba - 6);
+ reg = E1000_READ_REG(hw, E1000_FCRTC);
+ reg &= ~E1000_FCRTC_RTH_COAL_MASK;
+ reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
+ & E1000_FCRTC_RTH_COAL_MASK);
+ E1000_WRITE_REG(hw, E1000_FCRTC, reg);
+
+ /*
+ * Set the DMA Coalescing Rx threshold to PBA - 2 * max
+ * frame size, capping it at PBA - 10KB.
+ */
+ dmac_thr = pba - adapter->max_frame_size / 512;
+ if (dmac_thr < pba - 10)
+ dmac_thr = pba - 10;
+ reg = E1000_READ_REG(hw, E1000_DMACR);
+ reg &= ~E1000_DMACR_DMACTHR_MASK;
+ reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
+ & E1000_DMACR_DMACTHR_MASK);
+
+ /* transition to L0x or L1 if available..*/
+ reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
+
+ /* Check if status is 2.5Gb backplane connection
+ * before configuration of watchdog timer, which is
+ * in msec values in 12.8usec intervals
+ * watchdog timer= msec values in 32usec intervals
+ * for non 2.5Gb connection
+ */
+ if (hw->mac.type == e1000_i354) {
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if ((status & E1000_STATUS_2P5_SKU) &&
+ (!(status & E1000_STATUS_2P5_SKU_OVER)))
+ reg |= ((adapter->dmac * 5) >> 6);
+ else
+ reg |= ((adapter->dmac) >> 5);
+ } else {
+ reg |= ((adapter->dmac) >> 5);
+ }
+
+ /*
+ * Disable BMC-to-OS Watchdog enable
+ * on devices that support OS-to-BMC
+ */
+ if (hw->mac.type != e1000_i354)
+ reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
+ E1000_WRITE_REG(hw, E1000_DMACR, reg);
+
+ /* no lower threshold to disable coalescing(smart fifb)-UTRESH=0*/
+ E1000_WRITE_REG(hw, E1000_DMCRTRH, 0);
+
+ /* This sets the time to wait before requesting
+ * transition to low power state to number of usecs
+ * needed to receive 1 512 byte frame at gigabit
+ * line rate. On i350 device, time to make transition
+ * to Lx state is delayed by 4 usec with flush disable
+ * bit set to avoid losing mailbox interrupts
+ */
+ reg = E1000_READ_REG(hw, E1000_DMCTLX);
+ if (hw->mac.type == e1000_i350)
+ reg |= IGB_DMCTLX_DCFLUSH_DIS;
+
+ /* in 2.5Gb connection, TTLX unit is 0.4 usec
+ * which is 0x4*2 = 0xA. But delay is still 4 usec
+ */
+ if (hw->mac.type == e1000_i354) {
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if ((status & E1000_STATUS_2P5_SKU) &&
+ (!(status & E1000_STATUS_2P5_SKU_OVER)))
+ reg |= 0xA;
+ else
+ reg |= 0x4;
+ } else {
+ reg |= 0x4;
+ }
+ E1000_WRITE_REG(hw, E1000_DMCTLX, reg);
+
+ /* free space in tx packet buffer to wake from DMA coal */
+ E1000_WRITE_REG(hw, E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
+ (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
+
+ /* make low power state decision controlled by DMA coal */
+ reg = E1000_READ_REG(hw, E1000_PCIEMISC);
+ reg &= ~E1000_PCIEMISC_LX_DECISION;
+ E1000_WRITE_REG(hw, E1000_PCIEMISC, reg);
+ } /* endif adapter->dmac is not disabled */
+ } else if (hw->mac.type == e1000_82580) {
+ u32 reg = E1000_READ_REG(hw, E1000_PCIEMISC);
+ E1000_WRITE_REG(hw, E1000_PCIEMISC,
+ reg & ~E1000_PCIEMISC_LX_DECISION);
+ E1000_WRITE_REG(hw, E1000_DMACR, 0);
+ }
+}
+
+#ifdef HAVE_I2C_SUPPORT
+/* igb_read_i2c_byte - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @dev_addr: device address
+ * @data: value read
+ *
+ * Performs byte read operation over I2C interface at
+ * a specified device address.
+ */
+s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
+ struct i2c_client *this_client = adapter->i2c_client;
+ s32 status;
+ u16 swfw_mask = 0;
+
+ if (!this_client)
+ return E1000_ERR_I2C;
+
+ swfw_mask = E1000_SWFW_PHY0_SM;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)
+ != E1000_SUCCESS)
+ return E1000_ERR_SWFW_SYNC;
+
+ status = i2c_smbus_read_byte_data(this_client, byte_offset);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+ if (status < 0)
+ return E1000_ERR_I2C;
+ else {
+ *data = status;
+ return E1000_SUCCESS;
+ }
+}
+
+/* igb_write_i2c_byte - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @dev_addr: device address
+ * @data: value to write
+ *
+ * Performs byte write operation over I2C interface at
+ * a specified device address.
+ */
+s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
+ struct i2c_client *this_client = adapter->i2c_client;
+ s32 status;
+ u16 swfw_mask = E1000_SWFW_PHY0_SM;
+
+ if (!this_client)
+ return E1000_ERR_I2C;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS)
+ return E1000_ERR_SWFW_SYNC;
+ status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+ if (status)
+ return E1000_ERR_I2C;
+ else
+ return E1000_SUCCESS;
+}
+#endif /* HAVE_I2C_SUPPORT */
+/* igb_main.c */
+
+
+/**
+ * igb_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in igb_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * igb_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+int igb_kni_probe(struct pci_dev *pdev,
+ struct net_device **lad_dev)
+{
+ struct net_device *netdev;
+ struct igb_adapter *adapter;
+ struct e1000_hw *hw;
+ u16 eeprom_data = 0;
+ u8 pba_str[E1000_PBANUM_LENGTH];
+ s32 ret_val;
+ static int global_quad_port_a; /* global quad port a indication */
+ int i, err, pci_using_dac = 0;
+ static int cards_found;
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+#ifdef NO_KNI
+ pci_using_dac = 0;
+ err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64));
+ if (!err) {
+ err = dma_set_coherent_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64));
+ if (!err)
+ pci_using_dac = 1;
+ } else {
+ err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32));
+ if (err) {
+ err = dma_set_coherent_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32));
+ if (err) {
+ IGB_ERR("No usable DMA configuration, "
+ "aborting\n");
+ goto err_dma;
+ }
+ }
+ }
+
+#ifndef HAVE_ASPM_QUIRKS
+ /* 82575 requires that the pci-e link partner disable the L0s state */
+ switch (pdev->device) {
+ case E1000_DEV_ID_82575EB_COPPER:
+ case E1000_DEV_ID_82575EB_FIBER_SERDES:
+ case E1000_DEV_ID_82575GB_QUAD_COPPER:
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
+ default:
+ break;
+ }
+
+#endif /* HAVE_ASPM_QUIRKS */
+ err = pci_request_selected_regions(pdev,
+ pci_select_bars(pdev,
+ IORESOURCE_MEM),
+ igb_driver_name);
+ if (err)
+ goto err_pci_reg;
+
+ pci_enable_pcie_error_reporting(pdev);
+
+ pci_set_master(pdev);
+
+ err = -ENOMEM;
+#endif /* NO_KNI */
+#ifdef HAVE_TX_MQ
+ netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
+ IGB_MAX_TX_QUEUES);
+#else
+ netdev = alloc_etherdev(sizeof(struct igb_adapter));
+#endif /* HAVE_TX_MQ */
+ if (!netdev)
+ goto err_alloc_etherdev;
+
+ SET_MODULE_OWNER(netdev);
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ //pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ hw = &adapter->hw;
+ hw->back = adapter;
+ adapter->port_num = hw->bus.func;
+ adapter->msg_enable = (1 << debug) - 1;
+
+#ifdef HAVE_PCI_ERS
+ err = pci_save_state(pdev);
+ if (err)
+ goto err_ioremap;
+#endif
+ err = -EIO;
+ hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!hw->hw_addr)
+ goto err_ioremap;
+
+#ifdef HAVE_NET_DEVICE_OPS
+ netdev->netdev_ops = &igb_netdev_ops;
+#else /* HAVE_NET_DEVICE_OPS */
+ netdev->open = &igb_open;
+ netdev->stop = &igb_close;
+ netdev->get_stats = &igb_get_stats;
+#ifdef HAVE_SET_RX_MODE
+ netdev->set_rx_mode = &igb_set_rx_mode;
+#endif
+ netdev->set_multicast_list = &igb_set_rx_mode;
+ netdev->set_mac_address = &igb_set_mac;
+ netdev->change_mtu = &igb_change_mtu;
+ netdev->do_ioctl = &igb_ioctl;
+#ifdef HAVE_TX_TIMEOUT
+ netdev->tx_timeout = &igb_tx_timeout;
+#endif
+ netdev->vlan_rx_register = igb_vlan_mode;
+ netdev->vlan_rx_add_vid = igb_vlan_rx_add_vid;
+ netdev->vlan_rx_kill_vid = igb_vlan_rx_kill_vid;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ netdev->poll_controller = igb_netpoll;
+#endif
+ netdev->hard_start_xmit = &igb_xmit_frame;
+#endif /* HAVE_NET_DEVICE_OPS */
+ igb_set_ethtool_ops(netdev);
+#ifdef HAVE_TX_TIMEOUT
+ netdev->watchdog_timeo = 5 * HZ;
+#endif
+
+ strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+
+ adapter->bd_number = cards_found;
+
+ /* setup the private structure */
+ err = igb_sw_init(adapter);
+ if (err)
+ goto err_sw_init;
+
+ e1000_get_bus_info(hw);
+
+ hw->phy.autoneg_wait_to_complete = FALSE;
+ hw->mac.adaptive_ifs = FALSE;
+
+ /* Copper options */
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ hw->phy.mdix = AUTO_ALL_MODES;
+ hw->phy.disable_polarity_correction = FALSE;
+ hw->phy.ms_type = e1000_ms_hw_default;
+ }
+
+ if (e1000_check_reset_block(hw))
+ dev_info(pci_dev_to_dev(pdev),
+ "PHY reset is blocked due to SOL/IDER session.\n");
+
+ /*
+ * features is initialized to 0 in allocation, it might have bits
+ * set by igb_sw_init so we should use an or instead of an
+ * assignment.
+ */
+ netdev->features |= NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+#ifdef NETIF_F_IPV6_CSUM
+ NETIF_F_IPV6_CSUM |
+#endif
+#ifdef NETIF_F_TSO
+ NETIF_F_TSO |
+#ifdef NETIF_F_TSO6
+ NETIF_F_TSO6 |
+#endif
+#endif /* NETIF_F_TSO */
+#ifdef NETIF_F_RXHASH
+ NETIF_F_RXHASH |
+#endif
+ NETIF_F_RXCSUM |
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
+#else
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_TX;
+#endif
+
+ if (hw->mac.type >= e1000_82576)
+ netdev->features |= NETIF_F_SCTP_CSUM;
+
+#ifdef HAVE_NDO_SET_FEATURES
+ /* copy netdev features into list of user selectable features */
+ netdev->hw_features |= netdev->features;
+#ifndef IGB_NO_LRO
+
+ /* give us the option of enabling LRO later */
+ netdev->hw_features |= NETIF_F_LRO;
+#endif
+#else
+#ifdef NETIF_F_GRO
+
+ /* this is only needed on kernels prior to 2.6.39 */
+ netdev->features |= NETIF_F_GRO;
+#endif
+#endif
+
+ /* set this bit last since it cannot be part of hw_features */
+#ifdef NETIF_F_HW_VLAN_CTAG_FILTER
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+#else
+ netdev->features |= NETIF_F_HW_VLAN_FILTER;
+#endif
+
+#ifdef HAVE_NETDEV_VLAN_FEATURES
+ netdev->vlan_features |= NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_SG;
+
+#endif
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+#ifdef NO_KNI
+ adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
+#ifdef DEBUG
+ if (adapter->dmac != IGB_DMAC_DISABLE)
+ printk("%s: DMA Coalescing is enabled..\n", netdev->name);
+#endif
+
+ /* before reading the NVM, reset the controller to put the device in a
+ * known good starting state */
+ e1000_reset_hw(hw);
+#endif /* NO_KNI */
+
+ /* make sure the NVM is good */
+ if (e1000_validate_nvm_checksum(hw) < 0) {
+ dev_err(pci_dev_to_dev(pdev), "The NVM Checksum Is Not"
+ " Valid\n");
+ err = -EIO;
+ goto err_eeprom;
+ }
+
+ /* copy the MAC address out of the NVM */
+ if (e1000_read_mac_addr(hw))
+ dev_err(pci_dev_to_dev(pdev), "NVM Read Error\n");
+ memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
+#ifdef ETHTOOL_GPERMADDR
+ memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
+
+ if (!is_valid_ether_addr(netdev->perm_addr)) {
+#else
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+#endif
+ dev_err(pci_dev_to_dev(pdev), "Invalid MAC Address\n");
+ err = -EIO;
+ goto err_eeprom;
+ }
+
+ memcpy(&adapter->mac_table[0].addr, hw->mac.addr, netdev->addr_len);
+ adapter->mac_table[0].queue = adapter->vfs_allocated_count;
+ adapter->mac_table[0].state = (IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE);
+ igb_rar_set(adapter, 0);
+
+ /* get firmware version for ethtool -i */
+ igb_set_fw_version(adapter);
+
+ /* Check if Media Autosense is enabled */
+ if (hw->mac.type == e1000_82580)
+ igb_init_mas(adapter);
+
+#ifdef NO_KNI
+ setup_timer(&adapter->watchdog_timer, &igb_watchdog,
+ (unsigned long) adapter);
+ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
+ setup_timer(&adapter->dma_err_timer, &igb_dma_err_timer,
+ (unsigned long) adapter);
+ setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
+ (unsigned long) adapter);
+
+ INIT_WORK(&adapter->reset_task, igb_reset_task);
+ INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
+ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
+ INIT_WORK(&adapter->dma_err_task, igb_dma_err_task);
+#endif
+
+ /* Initialize link properties that are user-changeable */
+ adapter->fc_autoneg = true;
+ hw->mac.autoneg = true;
+ hw->phy.autoneg_advertised = 0x2f;
+
+ hw->fc.requested_mode = e1000_fc_default;
+ hw->fc.current_mode = e1000_fc_default;
+
+ e1000_validate_mdi_setting(hw);
+
+ /* By default, support wake on port A */
+ if (hw->bus.func == 0)
+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+
+ /* Check the NVM for wake support for non-port A ports */
+ if (hw->mac.type >= e1000_82580)
+ hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
+ NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
+ &eeprom_data);
+ else if (hw->bus.func == 1)
+ e1000_read_nvm(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
+
+ if (eeprom_data & IGB_EEPROM_APME)
+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+
+ /* now that we have the eeprom settings, apply the special cases where
+ * the eeprom may be wrong or the board simply won't support wake on
+ * lan on a particular port */
+ switch (pdev->device) {
+ case E1000_DEV_ID_82575GB_QUAD_COPPER:
+ adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
+ break;
+ case E1000_DEV_ID_82575EB_FIBER_SERDES:
+ case E1000_DEV_ID_82576_FIBER:
+ case E1000_DEV_ID_82576_SERDES:
+ /* Wake events only supported on port A for dual fiber
+ * regardless of eeprom setting */
+ if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1)
+ adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
+ break;
+ case E1000_DEV_ID_82576_QUAD_COPPER:
+ case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
+ /* if quad port adapter, disable WoL on all but port A */
+ if (global_quad_port_a != 0)
+ adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
+ else
+ adapter->flags |= IGB_FLAG_QUAD_PORT_A;
+ /* Reset for multiple quad port adapters */
+ if (++global_quad_port_a == 4)
+ global_quad_port_a = 0;
+ break;
+ default:
+ /* If the device can't wake, don't set software support */
+ if (!device_can_wakeup(&adapter->pdev->dev))
+ adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
+ break;
+ }
+
+ /* initialize the wol settings based on the eeprom settings */
+ if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
+ adapter->wol |= E1000_WUFC_MAG;
+
+ /* Some vendors want WoL disabled by default, but still supported */
+ if ((hw->mac.type == e1000_i350) &&
+ (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+ adapter->wol = 0;
+ }
+
+#ifdef NO_KNI
+ device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev),
+ adapter->flags & IGB_FLAG_WOL_SUPPORTED);
+
+ /* reset the hardware with the new settings */
+ igb_reset(adapter);
+ adapter->devrc = 0;
+
+#ifdef HAVE_I2C_SUPPORT
+ /* Init the I2C interface */
+ err = igb_init_i2c(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "failed to init i2c interface\n");
+ goto err_eeprom;
+ }
+#endif /* HAVE_I2C_SUPPORT */
+
+ /* let the f/w know that the h/w is now under the control of the
+ * driver. */
+ igb_get_hw_control(adapter);
+
+ strncpy(netdev->name, "eth%d", IFNAMSIZ);
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+ err = igb_init_vmdq_netdevs(adapter);
+ if (err)
+ goto err_register;
+#endif
+ /* carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+
+#ifdef IGB_DCA
+ if (dca_add_requester(&pdev->dev) == E1000_SUCCESS) {
+ adapter->flags |= IGB_FLAG_DCA_ENABLED;
+ dev_info(pci_dev_to_dev(pdev), "DCA enabled\n");
+ igb_setup_dca(adapter);
+ }
+
+#endif
+#ifdef HAVE_PTP_1588_CLOCK
+ /* do hw tstamp init after resetting */
+ igb_ptp_init(adapter);
+#endif /* HAVE_PTP_1588_CLOCK */
+
+#endif /* NO_KNI */
+ dev_info(pci_dev_to_dev(pdev), "Intel(R) Gigabit Ethernet Network Connection\n");
+ /* print bus type/speed/width info */
+ dev_info(pci_dev_to_dev(pdev), "%s: (PCIe:%s:%s) ",
+ netdev->name,
+ ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5GT/s" :
+ (hw->bus.speed == e1000_bus_speed_5000) ? "5.0GT/s" :
+ (hw->mac.type == e1000_i354) ? "integrated" :
+ "unknown"),
+ ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
+ (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
+ (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
+ (hw->mac.type == e1000_i354) ? "integrated" :
+ "unknown"));
+ dev_info(pci_dev_to_dev(pdev), "%s: MAC: ", netdev->name);
+ for (i = 0; i < 6; i++)
+ printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
+
+ ret_val = e1000_read_pba_string(hw, pba_str, E1000_PBANUM_LENGTH);
+ if (ret_val)
+ strncpy(pba_str, "Unknown", sizeof(pba_str) - 1);
+ dev_info(pci_dev_to_dev(pdev), "%s: PBA No: %s\n", netdev->name,
+ pba_str);
+
+
+ /* Initialize the thermal sensor on i350 devices. */
+ if (hw->mac.type == e1000_i350) {
+ if (hw->bus.func == 0) {
+ u16 ets_word;
+
+ /*
+ * Read the NVM to determine if this i350 device
+ * supports an external thermal sensor.
+ */
+ e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_word);
+ if (ets_word != 0x0000 && ets_word != 0xFFFF)
+ adapter->ets = true;
+ else
+ adapter->ets = false;
+ }
+#ifdef NO_KNI
+#ifdef IGB_HWMON
+
+ igb_sysfs_init(adapter);
+#else
+#ifdef IGB_PROCFS
+
+ igb_procfs_init(adapter);
+#endif /* IGB_PROCFS */
+#endif /* IGB_HWMON */
+#endif /* NO_KNI */
+ } else {
+ adapter->ets = false;
+ }
+
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ switch (hw->mac.type) {
+ case e1000_i350:
+ case e1000_i210:
+ case e1000_i211:
+ /* Enable EEE for internal copper PHY devices */
+ err = e1000_set_eee_i350(hw);
+ if ((!err) &&
+ (adapter->flags & IGB_FLAG_EEE))
+ adapter->eee_advert =
+ MDIO_EEE_100TX | MDIO_EEE_1000T;
+ break;
+ case e1000_i354:
+ if ((E1000_READ_REG(hw, E1000_CTRL_EXT)) &
+ (E1000_CTRL_EXT_LINK_MODE_SGMII)) {
+ err = e1000_set_eee_i354(hw);
+ if ((!err) &&
+ (adapter->flags & IGB_FLAG_EEE))
+ adapter->eee_advert =
+ MDIO_EEE_100TX | MDIO_EEE_1000T;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* send driver version info to firmware */
+ if (hw->mac.type >= e1000_i350)
+ igb_init_fw(adapter);
+
+#ifndef IGB_NO_LRO
+ if (netdev->features & NETIF_F_LRO)
+ dev_info(pci_dev_to_dev(pdev), "Internal LRO is enabled \n");
+ else
+ dev_info(pci_dev_to_dev(pdev), "LRO is disabled \n");
+#endif
+ dev_info(pci_dev_to_dev(pdev),
+ "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
+ adapter->msix_entries ? "MSI-X" :
+ (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
+ adapter->num_rx_queues, adapter->num_tx_queues);
+
+ cards_found++;
+ *lad_dev = netdev;
+
+ pm_runtime_put_noidle(&pdev->dev);
+ return 0;
+
+//err_register:
+// igb_release_hw_control(adapter);
+#ifdef HAVE_I2C_SUPPORT
+ memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
+#endif /* HAVE_I2C_SUPPORT */
+err_eeprom:
+// if (!e1000_check_reset_block(hw))
+// e1000_phy_hw_reset(hw);
+
+ if (hw->flash_address)
+ iounmap(hw->flash_address);
+err_sw_init:
+// igb_clear_interrupt_scheme(adapter);
+// igb_reset_sriov_capability(adapter);
+ iounmap(hw->hw_addr);
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+// pci_release_selected_regions(pdev,
+// pci_select_bars(pdev, IORESOURCE_MEM));
+//err_pci_reg:
+//err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+
+void igb_kni_remove(struct pci_dev *pdev)
+{
+ pci_disable_device(pdev);
+}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_param.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_param.c
new file mode 100755
index 00000000..14439ad5
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_param.c
@@ -0,0 +1,848 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+#include <linux/netdevice.h>
+
+#include "igb.h"
+
+/* This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+
+#define IGB_MAX_NIC 32
+
+#define OPTION_UNSET -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED 1
+#define MAX_NUM_LIST_OPTS 15
+
+/* All parameters are treated the same, as an integer array of values.
+ * This macro just reduces the need to repeat the same declaration code
+ * over and over (plus this helps to avoid typo bugs).
+ */
+
+#define IGB_PARAM_INIT { [0 ... IGB_MAX_NIC] = OPTION_UNSET }
+#ifndef module_param_array
+/* Module Parameters are always initialized to -1, so that the driver
+ * can tell the difference between no user specified value or the
+ * user asking for the default value.
+ * The true default values are loaded in when igb_check_options is called.
+ *
+ * This is a GCC extension to ANSI C.
+ * See the item "Labeled Elements in Initializers" in the section
+ * "Extensions to the C Language Family" of the GCC documentation.
+ */
+
+#define IGB_PARAM(X, desc) \
+ static const int X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \
+ MODULE_PARM(X, "1-" __MODULE_STRING(IGB_MAX_NIC) "i"); \
+ MODULE_PARM_DESC(X, desc);
+#else
+#define IGB_PARAM(X, desc) \
+ static int X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \
+ static unsigned int num_##X; \
+ module_param_array_named(X, X, int, &num_##X, 0); \
+ MODULE_PARM_DESC(X, desc);
+#endif
+
+/* Interrupt Throttle Rate (interrupts/sec)
+ *
+ * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
+ */
+IGB_PARAM(InterruptThrottleRate,
+ "Maximum interrupts per second, per vector, (max 100000), default 3=adaptive");
+#define DEFAULT_ITR 3
+#define MAX_ITR 100000
+/* #define MIN_ITR 120 */
+#define MIN_ITR 0
+/* IntMode (Interrupt Mode)
+ *
+ * Valid Range: 0 - 2
+ *
+ * Default Value: 2 (MSI-X)
+ */
+IGB_PARAM(IntMode, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), default 2");
+#define MAX_INTMODE IGB_INT_MODE_MSIX
+#define MIN_INTMODE IGB_INT_MODE_LEGACY
+
+IGB_PARAM(Node, "set the starting node to allocate memory on, default -1");
+
+/* LLIPort (Low Latency Interrupt TCP Port)
+ *
+ * Valid Range: 0 - 65535
+ *
+ * Default Value: 0 (disabled)
+ */
+IGB_PARAM(LLIPort, "Low Latency Interrupt TCP Port (0-65535), default 0=off");
+
+#define DEFAULT_LLIPORT 0
+#define MAX_LLIPORT 0xFFFF
+#define MIN_LLIPORT 0
+
+/* LLIPush (Low Latency Interrupt on TCP Push flag)
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 0 (disabled)
+ */
+IGB_PARAM(LLIPush, "Low Latency Interrupt on TCP Push flag (0,1), default 0=off");
+
+#define DEFAULT_LLIPUSH 0
+#define MAX_LLIPUSH 1
+#define MIN_LLIPUSH 0
+
+/* LLISize (Low Latency Interrupt on Packet Size)
+ *
+ * Valid Range: 0 - 1500
+ *
+ * Default Value: 0 (disabled)
+ */
+IGB_PARAM(LLISize, "Low Latency Interrupt on Packet Size (0-1500), default 0=off");
+
+#define DEFAULT_LLISIZE 0
+#define MAX_LLISIZE 1500
+#define MIN_LLISIZE 0
+
+/* RSS (Enable RSS multiqueue receive)
+ *
+ * Valid Range: 0 - 8
+ *
+ * Default Value: 1
+ */
+IGB_PARAM(RSS, "Number of Receive-Side Scaling Descriptor Queues (0-8), default 1, 0=number of cpus");
+
+#define DEFAULT_RSS 1
+#define MAX_RSS 8
+#define MIN_RSS 0
+
+/* VMDQ (Enable VMDq multiqueue receive)
+ *
+ * Valid Range: 0 - 8
+ *
+ * Default Value: 0
+ */
+IGB_PARAM(VMDQ, "Number of Virtual Machine Device Queues: 0-1 = disable, 2-8 enable, default 0");
+
+#define DEFAULT_VMDQ 0
+#define MAX_VMDQ MAX_RSS
+#define MIN_VMDQ 0
+
+/* max_vfs (Enable SR-IOV VF devices)
+ *
+ * Valid Range: 0 - 7
+ *
+ * Default Value: 0
+ */
+IGB_PARAM(max_vfs, "Number of Virtual Functions: 0 = disable, 1-7 enable, default 0");
+
+#define DEFAULT_SRIOV 0
+#define MAX_SRIOV 7
+#define MIN_SRIOV 0
+
+/* MDD (Enable Malicious Driver Detection)
+ *
+ * Only available when SR-IOV is enabled - max_vfs is greater than 0
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 1
+ */
+IGB_PARAM(MDD, "Malicious Driver Detection (0/1), default 1 = enabled. "
+ "Only available when max_vfs is greater than 0");
+
+#ifdef DEBUG
+
+/* Disable Hardware Reset on Tx Hang
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 0 (disabled, i.e. h/w will reset)
+ */
+IGB_PARAM(DisableHwReset, "Disable reset of hardware on Tx hang");
+
+/* Dump Transmit and Receive buffers
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 0
+ */
+IGB_PARAM(DumpBuffers, "Dump Tx/Rx buffers on Tx hang or by request");
+
+#endif /* DEBUG */
+
+/* QueuePairs (Enable TX/RX queue pairs for interrupt handling)
+ *
+ * Valid Range: 0 - 1
+ *
+ * Default Value: 1
+ */
+IGB_PARAM(QueuePairs, "Enable Tx/Rx queue pairs for interrupt handling (0,1), default 1=on");
+
+#define DEFAULT_QUEUE_PAIRS 1
+#define MAX_QUEUE_PAIRS 1
+#define MIN_QUEUE_PAIRS 0
+
+/* Enable/disable EEE (a.k.a. IEEE802.3az)
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 1
+ */
+ IGB_PARAM(EEE, "Enable/disable on parts that support the feature");
+
+/* Enable/disable DMA Coalescing
+ *
+ * Valid Values: 0(off), 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000,
+ * 9000, 10000(msec), 250(usec), 500(usec)
+ *
+ * Default Value: 0
+ */
+ IGB_PARAM(DMAC, "Disable or set latency for DMA Coalescing ((0=off, 1000-10000(msec), 250, 500 (usec))");
+
+#ifndef IGB_NO_LRO
+/* Enable/disable Large Receive Offload
+ *
+ * Valid Values: 0(off), 1(on)
+ *
+ * Default Value: 0
+ */
+ IGB_PARAM(LRO, "Large Receive Offload (0,1), default 0=off");
+
+#endif
+struct igb_opt_list {
+ int i;
+ char *str;
+};
+struct igb_option {
+ enum { enable_option, range_option, list_option } type;
+ const char *name;
+ const char *err;
+ int def;
+ union {
+ struct { /* range_option info */
+ int min;
+ int max;
+ } r;
+ struct { /* list_option info */
+ int nr;
+ struct igb_opt_list *p;
+ } l;
+ } arg;
+};
+
+static int igb_validate_option(unsigned int *value,
+ struct igb_option *opt,
+ struct igb_adapter *adapter)
+{
+ if (*value == OPTION_UNSET) {
+ *value = opt->def;
+ return 0;
+ }
+
+ switch (opt->type) {
+ case enable_option:
+ switch (*value) {
+ case OPTION_ENABLED:
+ DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name);
+ return 0;
+ case OPTION_DISABLED:
+ DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name);
+ return 0;
+ }
+ break;
+ case range_option:
+ if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+ DPRINTK(PROBE, INFO,
+ "%s set to %d\n", opt->name, *value);
+ return 0;
+ }
+ break;
+ case list_option: {
+ int i;
+ struct igb_opt_list *ent;
+
+ for (i = 0; i < opt->arg.l.nr; i++) {
+ ent = &opt->arg.l.p[i];
+ if (*value == ent->i) {
+ if (ent->str[0] != '\0')
+ DPRINTK(PROBE, INFO, "%s\n", ent->str);
+ return 0;
+ }
+ }
+ }
+ break;
+ default:
+ BUG();
+ }
+
+ DPRINTK(PROBE, INFO, "Invalid %s value specified (%d) %s\n",
+ opt->name, *value, opt->err);
+ *value = opt->def;
+ return -1;
+}
+
+/**
+ * igb_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input. If an invalid value is given, or if no user specified
+ * value exists, a default value is used. The final value is stored
+ * in a variable in the adapter structure.
+ **/
+
+void igb_check_options(struct igb_adapter *adapter)
+{
+ int bd = adapter->bd_number;
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (bd >= IGB_MAX_NIC) {
+ DPRINTK(PROBE, NOTICE,
+ "Warning: no configuration for board #%d\n", bd);
+ DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
+#ifndef module_param_array
+ bd = IGB_MAX_NIC;
+#endif
+ }
+
+ { /* Interrupt Throttling Rate */
+ struct igb_option opt = {
+ .type = range_option,
+ .name = "Interrupt Throttling Rate (ints/sec)",
+ .err = "using default of " __MODULE_STRING(DEFAULT_ITR),
+ .def = DEFAULT_ITR,
+ .arg = { .r = { .min = MIN_ITR,
+ .max = MAX_ITR } }
+ };
+
+#ifdef module_param_array
+ if (num_InterruptThrottleRate > bd) {
+#endif
+ unsigned int itr = InterruptThrottleRate[bd];
+
+ switch (itr) {
+ case 0:
+ DPRINTK(PROBE, INFO, "%s turned off\n",
+ opt.name);
+ if (hw->mac.type >= e1000_i350)
+ adapter->dmac = IGB_DMAC_DISABLE;
+ adapter->rx_itr_setting = itr;
+ break;
+ case 1:
+ DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
+ opt.name);
+ adapter->rx_itr_setting = itr;
+ break;
+ case 3:
+ DPRINTK(PROBE, INFO,
+ "%s set to dynamic conservative mode\n",
+ opt.name);
+ adapter->rx_itr_setting = itr;
+ break;
+ default:
+ igb_validate_option(&itr, &opt, adapter);
+ /* Save the setting, because the dynamic bits
+ * change itr. In case of invalid user value,
+ * default to conservative mode, else need to
+ * clear the lower two bits because they are
+ * used as control */
+ if (itr == 3) {
+ adapter->rx_itr_setting = itr;
+ } else {
+ adapter->rx_itr_setting = 1000000000 /
+ (itr * 256);
+ adapter->rx_itr_setting &= ~3;
+ }
+ break;
+ }
+#ifdef module_param_array
+ } else {
+ adapter->rx_itr_setting = opt.def;
+ }
+#endif
+ adapter->tx_itr_setting = adapter->rx_itr_setting;
+ }
+ { /* Interrupt Mode */
+ struct igb_option opt = {
+ .type = range_option,
+ .name = "Interrupt Mode",
+ .err = "defaulting to 2 (MSI-X)",
+ .def = IGB_INT_MODE_MSIX,
+ .arg = { .r = { .min = MIN_INTMODE,
+ .max = MAX_INTMODE } }
+ };
+
+#ifdef module_param_array
+ if (num_IntMode > bd) {
+#endif
+ unsigned int int_mode = IntMode[bd];
+ igb_validate_option(&int_mode, &opt, adapter);
+ adapter->int_mode = int_mode;
+#ifdef module_param_array
+ } else {
+ adapter->int_mode = opt.def;
+ }
+#endif
+ }
+ { /* Low Latency Interrupt TCP Port */
+ struct igb_option opt = {
+ .type = range_option,
+ .name = "Low Latency Interrupt TCP Port",
+ .err = "using default of " __MODULE_STRING(DEFAULT_LLIPORT),
+ .def = DEFAULT_LLIPORT,
+ .arg = { .r = { .min = MIN_LLIPORT,
+ .max = MAX_LLIPORT } }
+ };
+
+#ifdef module_param_array
+ if (num_LLIPort > bd) {
+#endif
+ adapter->lli_port = LLIPort[bd];
+ if (adapter->lli_port) {
+ igb_validate_option(&adapter->lli_port, &opt,
+ adapter);
+ } else {
+ DPRINTK(PROBE, INFO, "%s turned off\n",
+ opt.name);
+ }
+#ifdef module_param_array
+ } else {
+ adapter->lli_port = opt.def;
+ }
+#endif
+ }
+ { /* Low Latency Interrupt on Packet Size */
+ struct igb_option opt = {
+ .type = range_option,
+ .name = "Low Latency Interrupt on Packet Size",
+ .err = "using default of " __MODULE_STRING(DEFAULT_LLISIZE),
+ .def = DEFAULT_LLISIZE,
+ .arg = { .r = { .min = MIN_LLISIZE,
+ .max = MAX_LLISIZE } }
+ };
+
+#ifdef module_param_array
+ if (num_LLISize > bd) {
+#endif
+ adapter->lli_size = LLISize[bd];
+ if (adapter->lli_size) {
+ igb_validate_option(&adapter->lli_size, &opt,
+ adapter);
+ } else {
+ DPRINTK(PROBE, INFO, "%s turned off\n",
+ opt.name);
+ }
+#ifdef module_param_array
+ } else {
+ adapter->lli_size = opt.def;
+ }
+#endif
+ }
+ { /* Low Latency Interrupt on TCP Push flag */
+ struct igb_option opt = {
+ .type = enable_option,
+ .name = "Low Latency Interrupt on TCP Push flag",
+ .err = "defaulting to Disabled",
+ .def = OPTION_DISABLED
+ };
+
+#ifdef module_param_array
+ if (num_LLIPush > bd) {
+#endif
+ unsigned int lli_push = LLIPush[bd];
+ igb_validate_option(&lli_push, &opt, adapter);
+ adapter->flags |= lli_push ? IGB_FLAG_LLI_PUSH : 0;
+#ifdef module_param_array
+ } else {
+ adapter->flags |= opt.def ? IGB_FLAG_LLI_PUSH : 0;
+ }
+#endif
+ }
+ { /* SRIOV - Enable SR-IOV VF devices */
+ struct igb_option opt = {
+ .type = range_option,
+ .name = "max_vfs - SR-IOV VF devices",
+ .err = "using default of " __MODULE_STRING(DEFAULT_SRIOV),
+ .def = DEFAULT_SRIOV,
+ .arg = { .r = { .min = MIN_SRIOV,
+ .max = MAX_SRIOV } }
+ };
+
+#ifdef module_param_array
+ if (num_max_vfs > bd) {
+#endif
+ adapter->vfs_allocated_count = max_vfs[bd];
+ igb_validate_option(&adapter->vfs_allocated_count, &opt, adapter);
+
+#ifdef module_param_array
+ } else {
+ adapter->vfs_allocated_count = opt.def;
+ }
+#endif
+ if (adapter->vfs_allocated_count) {
+ switch (hw->mac.type) {
+ case e1000_82575:
+ case e1000_82580:
+ case e1000_i210:
+ case e1000_i211:
+ case e1000_i354:
+ adapter->vfs_allocated_count = 0;
+ DPRINTK(PROBE, INFO, "SR-IOV option max_vfs not supported.\n");
+ default:
+ break;
+ }
+ }
+ }
+ { /* VMDQ - Enable VMDq multiqueue receive */
+ struct igb_option opt = {
+ .type = range_option,
+ .name = "VMDQ - VMDq multiqueue queue count",
+ .err = "using default of " __MODULE_STRING(DEFAULT_VMDQ),
+ .def = DEFAULT_VMDQ,
+ .arg = { .r = { .min = MIN_VMDQ,
+ .max = (MAX_VMDQ - adapter->vfs_allocated_count) } }
+ };
+ if ((hw->mac.type != e1000_i210) ||
+ (hw->mac.type != e1000_i211)) {
+#ifdef module_param_array
+ if (num_VMDQ > bd) {
+#endif
+ adapter->vmdq_pools = (VMDQ[bd] == 1 ? 0 : VMDQ[bd]);
+ if (adapter->vfs_allocated_count && !adapter->vmdq_pools) {
+ DPRINTK(PROBE, INFO, "Enabling SR-IOV requires VMDq be set to at least 1\n");
+ adapter->vmdq_pools = 1;
+ }
+ igb_validate_option(&adapter->vmdq_pools, &opt, adapter);
+
+#ifdef module_param_array
+ } else {
+ if (!adapter->vfs_allocated_count)
+ adapter->vmdq_pools = (opt.def == 1 ? 0 : opt.def);
+ else
+ adapter->vmdq_pools = 1;
+ }
+#endif
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+ if (hw->mac.type == e1000_82575 && adapter->vmdq_pools) {
+ DPRINTK(PROBE, INFO, "VMDq not supported on this part.\n");
+ adapter->vmdq_pools = 0;
+ }
+#endif
+
+ } else {
+ DPRINTK(PROBE, INFO, "VMDq option is not supported.\n");
+ adapter->vmdq_pools = opt.def;
+ }
+ }
+ { /* RSS - Enable RSS multiqueue receives */
+ struct igb_option opt = {
+ .type = range_option,
+ .name = "RSS - RSS multiqueue receive count",
+ .err = "using default of " __MODULE_STRING(DEFAULT_RSS),
+ .def = DEFAULT_RSS,
+ .arg = { .r = { .min = MIN_RSS,
+ .max = MAX_RSS } }
+ };
+
+ switch (hw->mac.type) {
+ case e1000_82575:
+#ifndef CONFIG_IGB_VMDQ_NETDEV
+ if (!!adapter->vmdq_pools) {
+ if (adapter->vmdq_pools <= 2) {
+ if (adapter->vmdq_pools == 2)
+ opt.arg.r.max = 3;
+ } else {
+ opt.arg.r.max = 1;
+ }
+ } else {
+ opt.arg.r.max = 4;
+ }
+#else
+ opt.arg.r.max = !!adapter->vmdq_pools ? 1 : 4;
+#endif /* CONFIG_IGB_VMDQ_NETDEV */
+ break;
+ case e1000_i210:
+ opt.arg.r.max = 4;
+ break;
+ case e1000_i211:
+ opt.arg.r.max = 2;
+ break;
+ case e1000_82576:
+#ifndef CONFIG_IGB_VMDQ_NETDEV
+ if (!!adapter->vmdq_pools)
+ opt.arg.r.max = 2;
+ break;
+#endif /* CONFIG_IGB_VMDQ_NETDEV */
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ default:
+ if (!!adapter->vmdq_pools)
+ opt.arg.r.max = 1;
+ break;
+ }
+
+ if (adapter->int_mode != IGB_INT_MODE_MSIX) {
+ DPRINTK(PROBE, INFO, "RSS is not supported when in MSI/Legacy Interrupt mode, %s\n",
+ opt.err);
+ opt.arg.r.max = 1;
+ }
+
+#ifdef module_param_array
+ if (num_RSS > bd) {
+#endif
+ adapter->rss_queues = RSS[bd];
+ switch (adapter->rss_queues) {
+ case 1:
+ break;
+ default:
+ igb_validate_option(&adapter->rss_queues, &opt, adapter);
+ if (adapter->rss_queues)
+ break;
+ case 0:
+ adapter->rss_queues = min_t(u32, opt.arg.r.max, num_online_cpus());
+ break;
+ }
+#ifdef module_param_array
+ } else {
+ adapter->rss_queues = opt.def;
+ }
+#endif
+ }
+ { /* QueuePairs - Enable Tx/Rx queue pairs for interrupt handling */
+ struct igb_option opt = {
+ .type = enable_option,
+ .name = "QueuePairs - Tx/Rx queue pairs for interrupt handling",
+ .err = "defaulting to Enabled",
+ .def = OPTION_ENABLED
+ };
+#ifdef module_param_array
+ if (num_QueuePairs > bd) {
+#endif
+ unsigned int qp = QueuePairs[bd];
+ /*
+ * We must enable queue pairs if the number of queues
+ * exceeds the number of available interrupts. We are
+ * limited to 10, or 3 per unallocated vf. On I210 and
+ * I211 devices, we are limited to 5 interrupts.
+ * However, since I211 only supports 2 queues, we do not
+ * need to check and override the user option.
+ */
+ if (qp == OPTION_DISABLED) {
+ if (adapter->rss_queues > 4)
+ qp = OPTION_ENABLED;
+
+ if (adapter->vmdq_pools > 4)
+ qp = OPTION_ENABLED;
+
+ if (adapter->rss_queues > 1 &&
+ (adapter->vmdq_pools > 3 ||
+ adapter->vfs_allocated_count > 6))
+ qp = OPTION_ENABLED;
+
+ if (hw->mac.type == e1000_i210 &&
+ adapter->rss_queues > 2)
+ qp = OPTION_ENABLED;
+
+ if (qp == OPTION_ENABLED)
+ DPRINTK(PROBE, INFO, "Number of queues exceeds available interrupts, %s\n",
+ opt.err);
+ }
+ igb_validate_option(&qp, &opt, adapter);
+ adapter->flags |= qp ? IGB_FLAG_QUEUE_PAIRS : 0;
+#ifdef module_param_array
+ } else {
+ adapter->flags |= opt.def ? IGB_FLAG_QUEUE_PAIRS : 0;
+ }
+#endif
+ }
+ { /* EEE - Enable EEE for capable adapters */
+
+ if (hw->mac.type >= e1000_i350) {
+ struct igb_option opt = {
+ .type = enable_option,
+ .name = "EEE Support",
+ .err = "defaulting to Enabled",
+ .def = OPTION_ENABLED
+ };
+#ifdef module_param_array
+ if (num_EEE > bd) {
+#endif
+ unsigned int eee = EEE[bd];
+ igb_validate_option(&eee, &opt, adapter);
+ adapter->flags |= eee ? IGB_FLAG_EEE : 0;
+ if (eee)
+ hw->dev_spec._82575.eee_disable = false;
+ else
+ hw->dev_spec._82575.eee_disable = true;
+
+#ifdef module_param_array
+ } else {
+ adapter->flags |= opt.def ? IGB_FLAG_EEE : 0;
+ if (adapter->flags & IGB_FLAG_EEE)
+ hw->dev_spec._82575.eee_disable = false;
+ else
+ hw->dev_spec._82575.eee_disable = true;
+ }
+#endif
+ }
+ }
+ { /* DMAC - Enable DMA Coalescing for capable adapters */
+
+ if (hw->mac.type >= e1000_i350) {
+ struct igb_opt_list list [] = {
+ { IGB_DMAC_DISABLE, "DMAC Disable"},
+ { IGB_DMAC_MIN, "DMAC 250 usec"},
+ { IGB_DMAC_500, "DMAC 500 usec"},
+ { IGB_DMAC_EN_DEFAULT, "DMAC 1000 usec"},
+ { IGB_DMAC_2000, "DMAC 2000 usec"},
+ { IGB_DMAC_3000, "DMAC 3000 usec"},
+ { IGB_DMAC_4000, "DMAC 4000 usec"},
+ { IGB_DMAC_5000, "DMAC 5000 usec"},
+ { IGB_DMAC_6000, "DMAC 6000 usec"},
+ { IGB_DMAC_7000, "DMAC 7000 usec"},
+ { IGB_DMAC_8000, "DMAC 8000 usec"},
+ { IGB_DMAC_9000, "DMAC 9000 usec"},
+ { IGB_DMAC_MAX, "DMAC 10000 usec"}
+ };
+ struct igb_option opt = {
+ .type = list_option,
+ .name = "DMA Coalescing",
+ .err = "using default of "__MODULE_STRING(IGB_DMAC_DISABLE),
+ .def = IGB_DMAC_DISABLE,
+ .arg = { .l = { .nr = 13,
+ .p = list
+ }
+ }
+ };
+#ifdef module_param_array
+ if (num_DMAC > bd) {
+#endif
+ unsigned int dmac = DMAC[bd];
+ if (adapter->rx_itr_setting == IGB_DMAC_DISABLE)
+ dmac = IGB_DMAC_DISABLE;
+ igb_validate_option(&dmac, &opt, adapter);
+ switch (dmac) {
+ case IGB_DMAC_DISABLE:
+ adapter->dmac = dmac;
+ break;
+ case IGB_DMAC_MIN:
+ adapter->dmac = dmac;
+ break;
+ case IGB_DMAC_500:
+ adapter->dmac = dmac;
+ break;
+ case IGB_DMAC_EN_DEFAULT:
+ adapter->dmac = dmac;
+ break;
+ case IGB_DMAC_2000:
+ adapter->dmac = dmac;
+ break;
+ case IGB_DMAC_3000:
+ adapter->dmac = dmac;
+ break;
+ case IGB_DMAC_4000:
+ adapter->dmac = dmac;
+ break;
+ case IGB_DMAC_5000:
+ adapter->dmac = dmac;
+ break;
+ case IGB_DMAC_6000:
+ adapter->dmac = dmac;
+ break;
+ case IGB_DMAC_7000:
+ adapter->dmac = dmac;
+ break;
+ case IGB_DMAC_8000:
+ adapter->dmac = dmac;
+ break;
+ case IGB_DMAC_9000:
+ adapter->dmac = dmac;
+ break;
+ case IGB_DMAC_MAX:
+ adapter->dmac = dmac;
+ break;
+ default:
+ adapter->dmac = opt.def;
+ DPRINTK(PROBE, INFO,
+ "Invalid DMAC setting, "
+ "resetting DMAC to %d\n", opt.def);
+ }
+#ifdef module_param_array
+ } else
+ adapter->dmac = opt.def;
+#endif
+ }
+ }
+#ifndef IGB_NO_LRO
+ { /* LRO - Enable Large Receive Offload */
+ struct igb_option opt = {
+ .type = enable_option,
+ .name = "LRO - Large Receive Offload",
+ .err = "defaulting to Disabled",
+ .def = OPTION_DISABLED
+ };
+ struct net_device *netdev = adapter->netdev;
+#ifdef module_param_array
+ if (num_LRO > bd) {
+#endif
+ unsigned int lro = LRO[bd];
+ igb_validate_option(&lro, &opt, adapter);
+ netdev->features |= lro ? NETIF_F_LRO : 0;
+#ifdef module_param_array
+ } else if (opt.def == OPTION_ENABLED) {
+ netdev->features |= NETIF_F_LRO;
+ }
+#endif
+ }
+#endif /* IGB_NO_LRO */
+ { /* MDD - Enable Malicious Driver Detection. Only available when
+ SR-IOV is enabled. */
+ struct igb_option opt = {
+ .type = enable_option,
+ .name = "Malicious Driver Detection",
+ .err = "defaulting to 1",
+ .def = OPTION_ENABLED,
+ .arg = { .r = { .min = OPTION_DISABLED,
+ .max = OPTION_ENABLED } }
+ };
+
+#ifdef module_param_array
+ if (num_MDD > bd) {
+#endif
+ adapter->mdd = MDD[bd];
+ igb_validate_option((uint *)&adapter->mdd, &opt,
+ adapter);
+#ifdef module_param_array
+ } else {
+ adapter->mdd = opt.def;
+ }
+#endif
+ }
+}
+
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_procfs.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_procfs.c
new file mode 100755
index 00000000..2e7850ca
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_procfs.c
@@ -0,0 +1,363 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "igb.h"
+#include "e1000_82575.h"
+#include "e1000_hw.h"
+
+#ifdef IGB_PROCFS
+#ifndef IGB_HWMON
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+
+static struct proc_dir_entry *igb_top_dir = NULL;
+
+
+bool igb_thermal_present(struct igb_adapter *adapter)
+{
+ s32 status;
+ struct e1000_hw *hw;
+
+ if (adapter == NULL)
+ return false;
+ hw = &adapter->hw;
+
+ /*
+ * Only set I2C bit-bang mode if an external thermal sensor is
+ * supported on this device.
+ */
+ if (adapter->ets) {
+ status = e1000_set_i2c_bb(hw);
+ if (status != E1000_SUCCESS)
+ return false;
+ }
+
+ status = hw->mac.ops.init_thermal_sensor_thresh(hw);
+ if (status != E1000_SUCCESS)
+ return false;
+
+ return true;
+}
+
+
+static int igb_macburn(char *page, char **start, off_t off, int count,
+ int *eof, void *data)
+{
+ struct e1000_hw *hw;
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ if (adapter == NULL)
+ return snprintf(page, count, "error: no adapter\n");
+
+ hw = &adapter->hw;
+ if (hw == NULL)
+ return snprintf(page, count, "error: no hw data\n");
+
+ return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n",
+ (unsigned int)hw->mac.perm_addr[0],
+ (unsigned int)hw->mac.perm_addr[1],
+ (unsigned int)hw->mac.perm_addr[2],
+ (unsigned int)hw->mac.perm_addr[3],
+ (unsigned int)hw->mac.perm_addr[4],
+ (unsigned int)hw->mac.perm_addr[5]);
+}
+
+static int igb_macadmn(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct e1000_hw *hw;
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ if (adapter == NULL)
+ return snprintf(page, count, "error: no adapter\n");
+
+ hw = &adapter->hw;
+ if (hw == NULL)
+ return snprintf(page, count, "error: no hw data\n");
+
+ return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n",
+ (unsigned int)hw->mac.addr[0],
+ (unsigned int)hw->mac.addr[1],
+ (unsigned int)hw->mac.addr[2],
+ (unsigned int)hw->mac.addr[3],
+ (unsigned int)hw->mac.addr[4],
+ (unsigned int)hw->mac.addr[5]);
+}
+
+static int igb_numeports(char *page, char **start, off_t off, int count,
+ int *eof, void *data)
+{
+ struct e1000_hw *hw;
+ int ports;
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ if (adapter == NULL)
+ return snprintf(page, count, "error: no adapter\n");
+
+ hw = &adapter->hw;
+ if (hw == NULL)
+ return snprintf(page, count, "error: no hw data\n");
+
+ ports = 4;
+
+ return snprintf(page, count, "%d\n", ports);
+}
+
+static int igb_porttype(char *page, char **start, off_t off, int count,
+ int *eof, void *data)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ if (adapter == NULL)
+ return snprintf(page, count, "error: no adapter\n");
+
+ return snprintf(page, count, "%d\n",
+ test_bit(__IGB_DOWN, &adapter->state));
+}
+
+static int igb_therm_location(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct igb_therm_proc_data *therm_data =
+ (struct igb_therm_proc_data *)data;
+
+ if (therm_data == NULL)
+ return snprintf(page, count, "error: no therm_data\n");
+
+ return snprintf(page, count, "%d\n", therm_data->sensor_data->location);
+}
+
+static int igb_therm_maxopthresh(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct igb_therm_proc_data *therm_data =
+ (struct igb_therm_proc_data *)data;
+
+ if (therm_data == NULL)
+ return snprintf(page, count, "error: no therm_data\n");
+
+ return snprintf(page, count, "%d\n",
+ therm_data->sensor_data->max_op_thresh);
+}
+
+static int igb_therm_cautionthresh(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct igb_therm_proc_data *therm_data =
+ (struct igb_therm_proc_data *)data;
+
+ if (therm_data == NULL)
+ return snprintf(page, count, "error: no therm_data\n");
+
+ return snprintf(page, count, "%d\n",
+ therm_data->sensor_data->caution_thresh);
+}
+
+static int igb_therm_temp(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ s32 status;
+ struct igb_therm_proc_data *therm_data =
+ (struct igb_therm_proc_data *)data;
+
+ if (therm_data == NULL)
+ return snprintf(page, count, "error: no therm_data\n");
+
+ status = e1000_get_thermal_sensor_data(therm_data->hw);
+ if (status != E1000_SUCCESS)
+ snprintf(page, count, "error: status %d returned\n", status);
+
+ return snprintf(page, count, "%d\n", therm_data->sensor_data->temp);
+}
+
+struct igb_proc_type{
+ char name[32];
+ int (*read)(char*, char**, off_t, int, int*, void*);
+};
+
+struct igb_proc_type igb_proc_entries[] = {
+ {"numeports", &igb_numeports},
+ {"porttype", &igb_porttype},
+ {"macburn", &igb_macburn},
+ {"macadmn", &igb_macadmn},
+ {"", NULL}
+};
+
+struct igb_proc_type igb_internal_entries[] = {
+ {"location", &igb_therm_location},
+ {"temp", &igb_therm_temp},
+ {"cautionthresh", &igb_therm_cautionthresh},
+ {"maxopthresh", &igb_therm_maxopthresh},
+ {"", NULL}
+};
+
+void igb_del_proc_entries(struct igb_adapter *adapter)
+{
+ int index, i;
+ char buf[16]; /* much larger than the sensor number will ever be */
+
+ if (igb_top_dir == NULL)
+ return;
+
+ for (i = 0; i < E1000_MAX_SENSORS; i++) {
+ if (adapter->therm_dir[i] == NULL)
+ continue;
+
+ for (index = 0; ; index++) {
+ if (igb_internal_entries[index].read == NULL)
+ break;
+
+ remove_proc_entry(igb_internal_entries[index].name,
+ adapter->therm_dir[i]);
+ }
+ snprintf(buf, sizeof(buf), "sensor_%d", i);
+ remove_proc_entry(buf, adapter->info_dir);
+ }
+
+ if (adapter->info_dir != NULL) {
+ for (index = 0; ; index++) {
+ if (igb_proc_entries[index].read == NULL)
+ break;
+ remove_proc_entry(igb_proc_entries[index].name,
+ adapter->info_dir);
+ }
+ remove_proc_entry("info", adapter->eth_dir);
+ }
+
+ if (adapter->eth_dir != NULL)
+ remove_proc_entry(pci_name(adapter->pdev), igb_top_dir);
+}
+
+/* called from igb_main.c */
+void igb_procfs_exit(struct igb_adapter *adapter)
+{
+ igb_del_proc_entries(adapter);
+}
+
+int igb_procfs_topdir_init(void)
+{
+ igb_top_dir = proc_mkdir("driver/igb", NULL);
+ if (igb_top_dir == NULL)
+ return (-ENOMEM);
+
+ return 0;
+}
+
+void igb_procfs_topdir_exit(void)
+{
+ remove_proc_entry("driver/igb", NULL);
+}
+
+/* called from igb_main.c */
+int igb_procfs_init(struct igb_adapter *adapter)
+{
+ int rc = 0;
+ int i;
+ int index;
+ char buf[16]; /* much larger than the sensor number will ever be */
+
+ adapter->eth_dir = NULL;
+ adapter->info_dir = NULL;
+ for (i = 0; i < E1000_MAX_SENSORS; i++)
+ adapter->therm_dir[i] = NULL;
+
+ if ( igb_top_dir == NULL ) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ adapter->eth_dir = proc_mkdir(pci_name(adapter->pdev), igb_top_dir);
+ if (adapter->eth_dir == NULL) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ adapter->info_dir = proc_mkdir("info", adapter->eth_dir);
+ if (adapter->info_dir == NULL) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ for (index = 0; ; index++) {
+ if (igb_proc_entries[index].read == NULL) {
+ break;
+ }
+ if (!(create_proc_read_entry(igb_proc_entries[index].name,
+ 0444,
+ adapter->info_dir,
+ igb_proc_entries[index].read,
+ adapter))) {
+
+ rc = -ENOMEM;
+ goto fail;
+ }
+ }
+ if (igb_thermal_present(adapter) == false)
+ goto exit;
+
+ for (i = 0; i < E1000_MAX_SENSORS; i++) {
+
+ if (adapter->hw.mac.thermal_sensor_data.sensor[i].location== 0)
+ continue;
+
+ snprintf(buf, sizeof(buf), "sensor_%d", i);
+ adapter->therm_dir[i] = proc_mkdir(buf, adapter->info_dir);
+ if (adapter->therm_dir[i] == NULL) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ for (index = 0; ; index++) {
+ if (igb_internal_entries[index].read == NULL)
+ break;
+ /*
+ * therm_data struct contains pointer the read func
+ * will be needing
+ */
+ adapter->therm_data[i].hw = &adapter->hw;
+ adapter->therm_data[i].sensor_data =
+ &adapter->hw.mac.thermal_sensor_data.sensor[i];
+
+ if (!(create_proc_read_entry(
+ igb_internal_entries[index].name,
+ 0444,
+ adapter->therm_dir[i],
+ igb_internal_entries[index].read,
+ &adapter->therm_data[i]))) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ }
+ }
+ goto exit;
+
+fail:
+ igb_del_proc_entries(adapter);
+exit:
+ return rc;
+}
+
+#endif /* !IGB_HWMON */
+#endif /* IGB_PROCFS */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_ptp.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_ptp.c
new file mode 100755
index 00000000..454b70ce
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_ptp.c
@@ -0,0 +1,944 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/******************************************************************************
+ Copyright(c) 2011 Richard Cochran <richardcochran@gmail.com> for some of the
+ 82576 and 82580 code
+******************************************************************************/
+
+#include "igb.h"
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/ptp_classify.h>
+
+#define INCVALUE_MASK 0x7fffffff
+#define ISGN 0x80000000
+
+/*
+ * The 82580 timesync updates the system timer every 8ns by 8ns,
+ * and this update value cannot be reprogrammed.
+ *
+ * Neither the 82576 nor the 82580 offer registers wide enough to hold
+ * nanoseconds time values for very long. For the 82580, SYSTIM always
+ * counts nanoseconds, but the upper 24 bits are not available. The
+ * frequency is adjusted by changing the 32 bit fractional nanoseconds
+ * register, TIMINCA.
+ *
+ * For the 82576, the SYSTIM register time unit is affect by the
+ * choice of the 24 bit TININCA:IV (incvalue) field. Five bits of this
+ * field are needed to provide the nominal 16 nanosecond period,
+ * leaving 19 bits for fractional nanoseconds.
+ *
+ * We scale the NIC clock cycle by a large factor so that relatively
+ * small clock corrections can be added or subtracted at each clock
+ * tick. The drawbacks of a large factor are a) that the clock
+ * register overflows more quickly (not such a big deal) and b) that
+ * the increment per tick has to fit into 24 bits. As a result we
+ * need to use a shift of 19 so we can fit a value of 16 into the
+ * TIMINCA register.
+ *
+ *
+ * SYSTIMH SYSTIML
+ * +--------------+ +---+---+------+
+ * 82576 | 32 | | 8 | 5 | 19 |
+ * +--------------+ +---+---+------+
+ * \________ 45 bits _______/ fract
+ *
+ * +----------+---+ +--------------+
+ * 82580 | 24 | 8 | | 32 |
+ * +----------+---+ +--------------+
+ * reserved \______ 40 bits _____/
+ *
+ *
+ * The 45 bit 82576 SYSTIM overflows every
+ * 2^45 * 10^-9 / 3600 = 9.77 hours.
+ *
+ * The 40 bit 82580 SYSTIM overflows every
+ * 2^40 * 10^-9 / 60 = 18.3 minutes.
+ */
+
+#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9)
+#define IGB_PTP_TX_TIMEOUT (HZ * 15)
+#define INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT)
+#define INCVALUE_82576_MASK ((1 << E1000_TIMINCA_16NS_SHIFT) - 1)
+#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
+#define IGB_NBITS_82580 40
+
+/*
+ * SYSTIM read access for the 82576
+ */
+
+static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc)
+{
+ struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
+ struct e1000_hw *hw = &igb->hw;
+ u64 val;
+ u32 lo, hi;
+
+ lo = E1000_READ_REG(hw, E1000_SYSTIML);
+ hi = E1000_READ_REG(hw, E1000_SYSTIMH);
+
+ val = ((u64) hi) << 32;
+ val |= lo;
+
+ return val;
+}
+
+/*
+ * SYSTIM read access for the 82580
+ */
+
+static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
+{
+ struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
+ struct e1000_hw *hw = &igb->hw;
+ u64 val;
+ u32 lo, hi;
+
+ /* The timestamp latches on lowest register read. For the 82580
+ * the lowest register is SYSTIMR instead of SYSTIML. However we only
+ * need to provide nanosecond resolution, so we just ignore it.
+ */
+ E1000_READ_REG(hw, E1000_SYSTIMR);
+ lo = E1000_READ_REG(hw, E1000_SYSTIML);
+ hi = E1000_READ_REG(hw, E1000_SYSTIMH);
+
+ val = ((u64) hi) << 32;
+ val |= lo;
+
+ return val;
+}
+
+/*
+ * SYSTIM read access for I210/I211
+ */
+
+static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 sec, nsec;
+
+ /* The timestamp latches on lowest register read. For I210/I211, the
+ * lowest register is SYSTIMR. Since we only need to provide nanosecond
+ * resolution, we can ignore it.
+ */
+ E1000_READ_REG(hw, E1000_SYSTIMR);
+ nsec = E1000_READ_REG(hw, E1000_SYSTIML);
+ sec = E1000_READ_REG(hw, E1000_SYSTIMH);
+
+ ts->tv_sec = sec;
+ ts->tv_nsec = nsec;
+}
+
+static void igb_ptp_write_i210(struct igb_adapter *adapter,
+ const struct timespec *ts)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /*
+ * Writing the SYSTIMR register is not necessary as it only provides
+ * sub-nanosecond resolution.
+ */
+ E1000_WRITE_REG(hw, E1000_SYSTIML, ts->tv_nsec);
+ E1000_WRITE_REG(hw, E1000_SYSTIMH, ts->tv_sec);
+}
+
+/**
+ * igb_ptp_systim_to_hwtstamp - convert system time value to hw timestamp
+ * @adapter: board private structure
+ * @hwtstamps: timestamp structure to update
+ * @systim: unsigned 64bit system time value.
+ *
+ * We need to convert the system time value stored in the RX/TXSTMP registers
+ * into a hwtstamp which can be used by the upper level timestamping functions.
+ *
+ * The 'tmreg_lock' spinlock is used to protect the consistency of the
+ * system time value. This is needed because reading the 64 bit time
+ * value involves reading two (or three) 32 bit registers. The first
+ * read latches the value. Ditto for writing.
+ *
+ * In addition, here have extended the system time with an overflow
+ * counter in software.
+ **/
+static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter,
+ struct skb_shared_hwtstamps *hwtstamps,
+ u64 systim)
+{
+ unsigned long flags;
+ u64 ns;
+
+ switch (adapter->hw.mac.type) {
+ case e1000_82576:
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ spin_lock_irqsave(&adapter->tmreg_lock, flags);
+
+ ns = timecounter_cyc2time(&adapter->tc, systim);
+
+ spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->hwtstamp = ns_to_ktime(ns);
+ break;
+ case e1000_i210:
+ case e1000_i211:
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ /* Upper 32 bits contain s, lower 32 bits contain ns. */
+ hwtstamps->hwtstamp = ktime_set(systim >> 32,
+ systim & 0xFFFFFFFF);
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * PTP clock operations
+ */
+
+static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
+ struct e1000_hw *hw = &igb->hw;
+ int neg_adj = 0;
+ u64 rate;
+ u32 incvalue;
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+ rate = ppb;
+ rate <<= 14;
+ rate = div_u64(rate, 1953125);
+
+ incvalue = 16 << IGB_82576_TSYNC_SHIFT;
+
+ if (neg_adj)
+ incvalue -= rate;
+ else
+ incvalue += rate;
+
+ E1000_WRITE_REG(hw, E1000_TIMINCA, INCPERIOD_82576 | (incvalue & INCVALUE_82576_MASK));
+
+ return 0;
+}
+
+static int igb_ptp_adjfreq_82580(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
+ struct e1000_hw *hw = &igb->hw;
+ int neg_adj = 0;
+ u64 rate;
+ u32 inca;
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+ rate = ppb;
+ rate <<= 26;
+ rate = div_u64(rate, 1953125);
+
+ /* At 2.5G speeds, the TIMINCA register on I354 updates the clock 2.5x
+ * as quickly. Account for this by dividing the adjustment by 2.5.
+ */
+ if (hw->mac.type == e1000_i354) {
+ u32 status = E1000_READ_REG(hw, E1000_STATUS);
+
+ if ((status & E1000_STATUS_2P5_SKU) &&
+ !(status & E1000_STATUS_2P5_SKU_OVER)) {
+ rate <<= 1;
+ rate = div_u64(rate, 5);
+ }
+ }
+
+ inca = rate & INCVALUE_MASK;
+ if (neg_adj)
+ inca |= ISGN;
+
+ E1000_WRITE_REG(hw, E1000_TIMINCA, inca);
+
+ return 0;
+}
+
+static int igb_ptp_adjtime_82576(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
+ unsigned long flags;
+ s64 now;
+
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+ now = timecounter_read(&igb->tc);
+ now += delta;
+ timecounter_init(&igb->tc, &igb->cc, now);
+
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+ return 0;
+}
+
+static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
+ unsigned long flags;
+ struct timespec now, then = ns_to_timespec(delta);
+
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+ igb_ptp_read_i210(igb, &now);
+ now = timespec_add(now, then);
+ igb_ptp_write_i210(igb, (const struct timespec *)&now);
+
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+ return 0;
+}
+
+static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp,
+ struct timespec *ts)
+{
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
+ unsigned long flags;
+ u64 ns;
+ u32 remainder;
+
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+ ns = timecounter_read(&igb->tc);
+
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+ ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
+ ts->tv_nsec = remainder;
+
+ return 0;
+}
+
+static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp,
+ struct timespec *ts)
+{
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
+ unsigned long flags;
+
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+ igb_ptp_read_i210(igb, ts);
+
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+ return 0;
+}
+
+static int igb_ptp_settime_82576(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
+ unsigned long flags;
+ u64 ns;
+
+ ns = ts->tv_sec * 1000000000ULL;
+ ns += ts->tv_nsec;
+
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+ timecounter_init(&igb->tc, &igb->cc, ns);
+
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+ return 0;
+}
+
+static int igb_ptp_settime_i210(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
+ unsigned long flags;
+
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+ igb_ptp_write_i210(igb, ts);
+
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+ return 0;
+}
+
+static int igb_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * igb_ptp_tx_work
+ * @work: pointer to work struct
+ *
+ * This work function polls the TSYNCTXCTL valid bit to determine when a
+ * timestamp has been taken for the current stored skb.
+ */
+void igb_ptp_tx_work(struct work_struct *work)
+{
+ struct igb_adapter *adapter = container_of(work, struct igb_adapter,
+ ptp_tx_work);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 tsynctxctl;
+
+ if (!adapter->ptp_tx_skb)
+ return;
+
+ if (time_is_before_jiffies(adapter->ptp_tx_start +
+ IGB_PTP_TX_TIMEOUT)) {
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ adapter->tx_hwtstamp_timeouts++;
+ dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang");
+ return;
+ }
+
+ tsynctxctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
+ if (tsynctxctl & E1000_TSYNCTXCTL_VALID)
+ igb_ptp_tx_hwtstamp(adapter);
+ else
+ /* reschedule to check later */
+ schedule_work(&adapter->ptp_tx_work);
+}
+
+static void igb_ptp_overflow_check(struct work_struct *work)
+{
+ struct igb_adapter *igb =
+ container_of(work, struct igb_adapter, ptp_overflow_work.work);
+ struct timespec ts;
+
+ igb->ptp_caps.gettime(&igb->ptp_caps, &ts);
+
+ pr_debug("igb overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
+
+ schedule_delayed_work(&igb->ptp_overflow_work,
+ IGB_SYSTIM_OVERFLOW_PERIOD);
+}
+
+/**
+ * igb_ptp_rx_hang - detect error case when Rx timestamp registers latched
+ * @adapter: private network adapter structure
+ *
+ * This watchdog task is scheduled to detect error case where hardware has
+ * dropped an Rx packet that was timestamped when the ring is full. The
+ * particular error is rare but leaves the device in a state unable to timestamp
+ * any future packets.
+ */
+void igb_ptp_rx_hang(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct igb_ring *rx_ring;
+ u32 tsyncrxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
+ unsigned long rx_event;
+ int n;
+
+ if (hw->mac.type != e1000_82576)
+ return;
+
+ /* If we don't have a valid timestamp in the registers, just update the
+ * timeout counter and exit
+ */
+ if (!(tsyncrxctl & E1000_TSYNCRXCTL_VALID)) {
+ adapter->last_rx_ptp_check = jiffies;
+ return;
+ }
+
+ /* Determine the most recent watchdog or rx_timestamp event */
+ rx_event = adapter->last_rx_ptp_check;
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ if (time_after(rx_ring->last_rx_timestamp, rx_event))
+ rx_event = rx_ring->last_rx_timestamp;
+ }
+
+ /* Only need to read the high RXSTMP register to clear the lock */
+ if (time_is_before_jiffies(rx_event + 5 * HZ)) {
+ E1000_READ_REG(hw, E1000_RXSTMPH);
+ adapter->last_rx_ptp_check = jiffies;
+ adapter->rx_hwtstamp_cleared++;
+ dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang");
+ }
+}
+
+/**
+ * igb_ptp_tx_hwtstamp - utility function which checks for TX time stamp
+ * @adapter: Board private structure.
+ *
+ * If we were asked to do hardware stamping and such a time stamp is
+ * available, then it must have been for this skb here because we only
+ * allow only one such packet into the queue.
+ */
+void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct skb_shared_hwtstamps shhwtstamps;
+ u64 regval;
+
+ regval = E1000_READ_REG(hw, E1000_TXSTMPL);
+ regval |= (u64)E1000_READ_REG(hw, E1000_TXSTMPH) << 32;
+
+ igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
+ skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+}
+
+/**
+ * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp
+ * @q_vector: Pointer to interrupt specific structure
+ * @va: Pointer to address containing Rx buffer
+ * @skb: Buffer containing timestamp and packet
+ *
+ * This function is meant to retrieve a timestamp from the first buffer of an
+ * incoming frame. The value is stored in little endian format starting on
+ * byte 8.
+ */
+void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
+ unsigned char *va,
+ struct sk_buff *skb)
+{
+ __le64 *regval = (__le64 *)va;
+
+ /*
+ * The timestamp is recorded in little endian format.
+ * DWORD: 0 1 2 3
+ * Field: Reserved Reserved SYSTIML SYSTIMH
+ */
+ igb_ptp_systim_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb),
+ le64_to_cpu(regval[1]));
+}
+
+/**
+ * igb_ptp_rx_rgtstamp - retrieve Rx timestamp stored in register
+ * @q_vector: Pointer to interrupt specific structure
+ * @skb: Buffer containing timestamp and packet
+ *
+ * This function is meant to retrieve a timestamp from the internal registers
+ * of the adapter and store it in the skb.
+ */
+void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
+ struct sk_buff *skb)
+{
+ struct igb_adapter *adapter = q_vector->adapter;
+ struct e1000_hw *hw = &adapter->hw;
+ u64 regval;
+
+ /*
+ * If this bit is set, then the RX registers contain the time stamp. No
+ * other packet will be time stamped until we read these registers, so
+ * read the registers to make them available again. Because only one
+ * packet can be time stamped at a time, we know that the register
+ * values must belong to this one here and therefore we don't need to
+ * compare any of the additional attributes stored for it.
+ *
+ * If nothing went wrong, then it should have a shared tx_flags that we
+ * can turn into a skb_shared_hwtstamps.
+ */
+ if (!(E1000_READ_REG(hw, E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
+ return;
+
+ regval = E1000_READ_REG(hw, E1000_RXSTMPL);
+ regval |= (u64)E1000_READ_REG(hw, E1000_RXSTMPH) << 32;
+
+ igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
+}
+
+/**
+ * igb_ptp_hwtstamp_ioctl - control hardware time stamping
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ *
+ * Outgoing time stamping can be enabled and disabled. Play nice and
+ * disable it when requested, although it shouldn't case any overhead
+ * when no packet needs it. At most one packet in the queue may be
+ * marked for time stamping, otherwise it would be impossible to tell
+ * for sure to which packet the hardware time stamp belongs.
+ *
+ * Incoming time stamping has to be configured via the hardware
+ * filters. Not all combinations are supported, in particular event
+ * type has to be specified. Matching the kind of event packet is
+ * not supported, with the exception of "all V2 events regardless of
+ * level 2 or 4".
+ *
+ **/
+int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct hwtstamp_config config;
+ u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
+ u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
+ u32 tsync_rx_cfg = 0;
+ bool is_l4 = false;
+ bool is_l2 = false;
+ u32 regval;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ tsync_tx_ctl = 0;
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tsync_rx_ctl = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
+ tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
+ tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ is_l2 = true;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_ALL:
+ /*
+ * 82576 cannot timestamp all packets, which it needs to do to
+ * support both V1 Sync and Delay_Req messages
+ */
+ if (hw->mac.type != e1000_82576) {
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ }
+ /* fall through */
+ default:
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+ return -ERANGE;
+ }
+
+ if (hw->mac.type == e1000_82575) {
+ if (tsync_rx_ctl | tsync_tx_ctl)
+ return -EINVAL;
+ return 0;
+ }
+
+ /*
+ * Per-packet timestamping only works if all packets are
+ * timestamped, so enable timestamping in all packets as
+ * long as one rx filter was configured.
+ */
+ if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
+ tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ is_l2 = true;
+ is_l4 = true;
+
+ if ((hw->mac.type == e1000_i210) ||
+ (hw->mac.type == e1000_i211)) {
+ regval = E1000_READ_REG(hw, E1000_RXPBS);
+ regval |= E1000_RXPBS_CFG_TS_EN;
+ E1000_WRITE_REG(hw, E1000_RXPBS, regval);
+ }
+ }
+
+ /* enable/disable TX */
+ regval = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
+ regval &= ~E1000_TSYNCTXCTL_ENABLED;
+ regval |= tsync_tx_ctl;
+ E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, regval);
+
+ /* enable/disable RX */
+ regval = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
+ regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
+ regval |= tsync_rx_ctl;
+ E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, regval);
+
+ /* define which PTP packets are time stamped */
+ E1000_WRITE_REG(hw, E1000_TSYNCRXCFG, tsync_rx_cfg);
+
+ /* define ethertype filter for timestamped packets */
+ if (is_l2)
+ E1000_WRITE_REG(hw, E1000_ETQF(3),
+ (E1000_ETQF_FILTER_ENABLE | /* enable filter */
+ E1000_ETQF_1588 | /* enable timestamping */
+ ETH_P_1588)); /* 1588 eth protocol type */
+ else
+ E1000_WRITE_REG(hw, E1000_ETQF(3), 0);
+
+ /* L4 Queue Filter[3]: filter by destination port and protocol */
+ if (is_l4) {
+ u32 ftqf = (IPPROTO_UDP /* UDP */
+ | E1000_FTQF_VF_BP /* VF not compared */
+ | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
+ | E1000_FTQF_MASK); /* mask all inputs */
+ ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
+
+ E1000_WRITE_REG(hw, E1000_IMIR(3), htons(PTP_EV_PORT));
+ E1000_WRITE_REG(hw, E1000_IMIREXT(3),
+ (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
+ if (hw->mac.type == e1000_82576) {
+ /* enable source port check */
+ E1000_WRITE_REG(hw, E1000_SPQF(3), htons(PTP_EV_PORT));
+ ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
+ }
+ E1000_WRITE_REG(hw, E1000_FTQF(3), ftqf);
+ } else {
+ E1000_WRITE_REG(hw, E1000_FTQF(3), E1000_FTQF_MASK);
+ }
+ E1000_WRITE_FLUSH(hw);
+
+ /* clear TX/RX time stamp registers, just to be sure */
+ regval = E1000_READ_REG(hw, E1000_TXSTMPL);
+ regval = E1000_READ_REG(hw, E1000_TXSTMPH);
+ regval = E1000_READ_REG(hw, E1000_RXSTMPL);
+ regval = E1000_READ_REG(hw, E1000_RXSTMPH);
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+void igb_ptp_init(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+
+ switch (hw->mac.type) {
+ case e1000_82576:
+ snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
+ adapter->ptp_caps.owner = THIS_MODULE;
+ adapter->ptp_caps.max_adj = 999999881;
+ adapter->ptp_caps.n_ext_ts = 0;
+ adapter->ptp_caps.pps = 0;
+ adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
+ adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
+ adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
+ adapter->ptp_caps.settime = igb_ptp_settime_82576;
+ adapter->ptp_caps.enable = igb_ptp_enable;
+ adapter->cc.read = igb_ptp_read_82576;
+ adapter->cc.mask = CLOCKSOURCE_MASK(64);
+ adapter->cc.mult = 1;
+ adapter->cc.shift = IGB_82576_TSYNC_SHIFT;
+ /* Dial the nominal frequency. */
+ E1000_WRITE_REG(hw, E1000_TIMINCA, INCPERIOD_82576 |
+ INCVALUE_82576);
+ break;
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
+ adapter->ptp_caps.owner = THIS_MODULE;
+ adapter->ptp_caps.max_adj = 62499999;
+ adapter->ptp_caps.n_ext_ts = 0;
+ adapter->ptp_caps.pps = 0;
+ adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
+ adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
+ adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
+ adapter->ptp_caps.settime = igb_ptp_settime_82576;
+ adapter->ptp_caps.enable = igb_ptp_enable;
+ adapter->cc.read = igb_ptp_read_82580;
+ adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580);
+ adapter->cc.mult = 1;
+ adapter->cc.shift = 0;
+ /* Enable the timer functions by clearing bit 31. */
+ E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0);
+ break;
+ case e1000_i210:
+ case e1000_i211:
+ snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
+ adapter->ptp_caps.owner = THIS_MODULE;
+ adapter->ptp_caps.max_adj = 62499999;
+ adapter->ptp_caps.n_ext_ts = 0;
+ adapter->ptp_caps.pps = 0;
+ adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
+ adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
+ adapter->ptp_caps.gettime = igb_ptp_gettime_i210;
+ adapter->ptp_caps.settime = igb_ptp_settime_i210;
+ adapter->ptp_caps.enable = igb_ptp_enable;
+ /* Enable the timer functions by clearing bit 31. */
+ E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0);
+ break;
+ default:
+ adapter->ptp_clock = NULL;
+ return;
+ }
+
+ E1000_WRITE_FLUSH(hw);
+
+ spin_lock_init(&adapter->tmreg_lock);
+ INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
+
+ /* Initialize the clock and overflow work for devices that need it. */
+ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
+ struct timespec ts = ktime_to_timespec(ktime_get_real());
+
+ igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
+ } else {
+ timecounter_init(&adapter->tc, &adapter->cc,
+ ktime_to_ns(ktime_get_real()));
+
+ INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
+ igb_ptp_overflow_check);
+
+ schedule_delayed_work(&adapter->ptp_overflow_work,
+ IGB_SYSTIM_OVERFLOW_PERIOD);
+ }
+
+ /* Initialize the time sync interrupts for devices that support it. */
+ if (hw->mac.type >= e1000_82580) {
+ E1000_WRITE_REG(hw, E1000_TSIM, E1000_TSIM_TXTS);
+ E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_TS);
+ }
+
+ adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
+ &adapter->pdev->dev);
+ if (IS_ERR(adapter->ptp_clock)) {
+ adapter->ptp_clock = NULL;
+ dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n");
+ } else {
+ dev_info(&adapter->pdev->dev, "added PHC on %s\n",
+ adapter->netdev->name);
+ adapter->flags |= IGB_FLAG_PTP;
+ }
+}
+
+/**
+ * igb_ptp_stop - Disable PTP device and stop the overflow check.
+ * @adapter: Board private structure.
+ *
+ * This function stops the PTP support and cancels the delayed work.
+ **/
+void igb_ptp_stop(struct igb_adapter *adapter)
+{
+ switch (adapter->hw.mac.type) {
+ case e1000_82576:
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ cancel_delayed_work_sync(&adapter->ptp_overflow_work);
+ break;
+ case e1000_i210:
+ case e1000_i211:
+ /* No delayed work to cancel. */
+ break;
+ default:
+ return;
+ }
+
+ cancel_work_sync(&adapter->ptp_tx_work);
+ if (adapter->ptp_tx_skb) {
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ }
+
+ if (adapter->ptp_clock) {
+ ptp_clock_unregister(adapter->ptp_clock);
+ dev_info(&adapter->pdev->dev, "removed PHC on %s\n",
+ adapter->netdev->name);
+ adapter->flags &= ~IGB_FLAG_PTP;
+ }
+}
+
+/**
+ * igb_ptp_reset - Re-enable the adapter for PTP following a reset.
+ * @adapter: Board private structure.
+ *
+ * This function handles the reset work required to re-enable the PTP device.
+ **/
+void igb_ptp_reset(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (!(adapter->flags & IGB_FLAG_PTP))
+ return;
+
+ switch (adapter->hw.mac.type) {
+ case e1000_82576:
+ /* Dial the nominal frequency. */
+ E1000_WRITE_REG(hw, E1000_TIMINCA, INCPERIOD_82576 |
+ INCVALUE_82576);
+ break;
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ case e1000_i210:
+ case e1000_i211:
+ /* Enable the timer functions and interrupts. */
+ E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0);
+ E1000_WRITE_REG(hw, E1000_TSIM, E1000_TSIM_TXTS);
+ E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_TS);
+ break;
+ default:
+ /* No work to do. */
+ return;
+ }
+
+ /* Re-initialize the timer. */
+ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
+ struct timespec ts = ktime_to_timespec(ktime_get_real());
+
+ igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
+ } else {
+ timecounter_init(&adapter->tc, &adapter->cc,
+ ktime_to_ns(ktime_get_real()));
+ }
+}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_regtest.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_regtest.h
new file mode 100755
index 00000000..a6761db8
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_regtest.h
@@ -0,0 +1,251 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool register test data */
+struct igb_reg_test {
+ u16 reg;
+ u16 reg_offset;
+ u16 array_len;
+ u16 test_type;
+ u32 mask;
+ u32 write;
+};
+
+/* In the hardware, registers are laid out either singly, in arrays
+ * spaced 0x100 bytes apart, or in contiguous tables. We assume
+ * most tests take place on arrays or single registers (handled
+ * as a single-element array) and special-case the tables.
+ * Table tests are always pattern tests.
+ *
+ * We also make provision for some required setup steps by specifying
+ * registers to be written without any read-back testing.
+ */
+
+#define PATTERN_TEST 1
+#define SET_READ_TEST 2
+#define WRITE_NO_TEST 3
+#define TABLE32_TEST 4
+#define TABLE64_TEST_LO 5
+#define TABLE64_TEST_HI 6
+
+/* i210 reg test */
+static struct igb_reg_test reg_test_i210[] = {
+ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ /* RDH is read-only for i210, only test RDT. */
+ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0003FFF0, 0x0003FFF0 },
+ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RA, 0, 16, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA, 0, 16, TABLE64_TEST_HI,
+ 0x900FFFFF, 0xFFFFFFFF },
+ { E1000_MTA, 0, 128, TABLE32_TEST,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0, 0, 0, 0 }
+};
+
+/* i350 reg test */
+static struct igb_reg_test reg_test_i350[] = {
+ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ /* VET is readonly on i350 */
+ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ /* RDH is read-only for i350, only test RDT. */
+ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
+ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RA, 0, 16, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA, 0, 16, TABLE64_TEST_HI,
+ 0xC3FFFFFF, 0xFFFFFFFF },
+ { E1000_RA2, 0, 16, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA2, 0, 16, TABLE64_TEST_HI,
+ 0xC3FFFFFF, 0xFFFFFFFF },
+ { E1000_MTA, 0, 128, TABLE32_TEST,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0, 0, 0, 0 }
+};
+
+/* 82580 reg test */
+static struct igb_reg_test reg_test_82580[] = {
+ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ /* RDH is read-only for 82580, only test RDT. */
+ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
+ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RA, 0, 16, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA, 0, 16, TABLE64_TEST_HI,
+ 0x83FFFFFF, 0xFFFFFFFF },
+ { E1000_RA2, 0, 8, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA2, 0, 8, TABLE64_TEST_HI,
+ 0x83FFFFFF, 0xFFFFFFFF },
+ { E1000_MTA, 0, 128, TABLE32_TEST,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0, 0, 0, 0 }
+};
+
+/* 82576 reg test */
+static struct igb_reg_test reg_test_82576[] = {
+ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ /* Enable all queues before testing. */
+ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
+ { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
+ /* RDH is read-only for 82576, only test RDT. */
+ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
+ { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 },
+ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
+ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RA, 0, 16, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA, 0, 16, TABLE64_TEST_HI,
+ 0x83FFFFFF, 0xFFFFFFFF },
+ { E1000_RA2, 0, 8, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA2, 0, 8, TABLE64_TEST_HI,
+ 0x83FFFFFF, 0xFFFFFFFF },
+ { E1000_MTA, 0, 128, TABLE32_TEST,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0, 0, 0, 0 }
+};
+
+/* 82575 register test */
+static struct igb_reg_test reg_test_82575[] = {
+ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ /* Enable all four RX queues before testing. */
+ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
+ /* RDH is read-only for 82575, only test RDT. */
+ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
+ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
+ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF },
+ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF },
+ { E1000_RA, 0, 16, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA, 0, 16, TABLE64_TEST_HI,
+ 0x800FFFFF, 0xFFFFFFFF },
+ { E1000_MTA, 0, 128, TABLE32_TEST,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0, 0, 0, 0 }
+};
+
+
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.c
new file mode 100755
index 00000000..90d96e62
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.c
@@ -0,0 +1,437 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+#include <linux/tcp.h>
+
+#include "igb.h"
+#include "igb_vmdq.h"
+#include <linux/if_vlan.h>
+
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+int igb_vmdq_open(struct net_device *dev)
+{
+ struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
+ struct igb_adapter *adapter = vadapter->real_adapter;
+ struct net_device *main_netdev = adapter->netdev;
+ int hw_queue = vadapter->rx_ring->queue_index +
+ adapter->vfs_allocated_count;
+
+ if (test_bit(__IGB_DOWN, &adapter->state)) {
+ DPRINTK(DRV, WARNING,
+ "Open %s before opening this device.\n",
+ main_netdev->name);
+ return -EAGAIN;
+ }
+ netif_carrier_off(dev);
+ vadapter->tx_ring->vmdq_netdev = dev;
+ vadapter->rx_ring->vmdq_netdev = dev;
+ if (is_valid_ether_addr(dev->dev_addr)) {
+ igb_del_mac_filter(adapter, dev->dev_addr, hw_queue);
+ igb_add_mac_filter(adapter, dev->dev_addr, hw_queue);
+ }
+ netif_carrier_on(dev);
+ return 0;
+}
+
+int igb_vmdq_close(struct net_device *dev)
+{
+ struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
+ struct igb_adapter *adapter = vadapter->real_adapter;
+ int hw_queue = vadapter->rx_ring->queue_index +
+ adapter->vfs_allocated_count;
+
+ netif_carrier_off(dev);
+ igb_del_mac_filter(adapter, dev->dev_addr, hw_queue);
+
+ vadapter->tx_ring->vmdq_netdev = NULL;
+ vadapter->rx_ring->vmdq_netdev = NULL;
+ return 0;
+}
+
+netdev_tx_t igb_vmdq_xmit_frame(struct sk_buff *skb, struct net_device *dev)
+{
+ struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
+
+ return igb_xmit_frame_ring(skb, vadapter->tx_ring);
+}
+
+struct net_device_stats *igb_vmdq_get_stats(struct net_device *dev)
+{
+ struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
+ struct igb_adapter *adapter = vadapter->real_adapter;
+ struct e1000_hw *hw = &adapter->hw;
+ int hw_queue = vadapter->rx_ring->queue_index +
+ adapter->vfs_allocated_count;
+
+ vadapter->net_stats.rx_packets +=
+ E1000_READ_REG(hw, E1000_PFVFGPRC(hw_queue));
+ E1000_WRITE_REG(hw, E1000_PFVFGPRC(hw_queue), 0);
+ vadapter->net_stats.tx_packets +=
+ E1000_READ_REG(hw, E1000_PFVFGPTC(hw_queue));
+ E1000_WRITE_REG(hw, E1000_PFVFGPTC(hw_queue), 0);
+ vadapter->net_stats.rx_bytes +=
+ E1000_READ_REG(hw, E1000_PFVFGORC(hw_queue));
+ E1000_WRITE_REG(hw, E1000_PFVFGORC(hw_queue), 0);
+ vadapter->net_stats.tx_bytes +=
+ E1000_READ_REG(hw, E1000_PFVFGOTC(hw_queue));
+ E1000_WRITE_REG(hw, E1000_PFVFGOTC(hw_queue), 0);
+ vadapter->net_stats.multicast +=
+ E1000_READ_REG(hw, E1000_PFVFMPRC(hw_queue));
+ E1000_WRITE_REG(hw, E1000_PFVFMPRC(hw_queue), 0);
+ /* only return the current stats */
+ return &vadapter->net_stats;
+}
+
+/**
+ * igb_write_vm_addr_list - write unicast addresses to RAR table
+ * @netdev: network interface device structure
+ *
+ * Writes unicast address list to the RAR table.
+ * Returns: -ENOMEM on failure/insufficient address space
+ * 0 on no addresses written
+ * X on writing X addresses to the RAR table
+ **/
+static int igb_write_vm_addr_list(struct net_device *netdev)
+{
+ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
+ struct igb_adapter *adapter = vadapter->real_adapter;
+ int count = 0;
+ int hw_queue = vadapter->rx_ring->queue_index +
+ adapter->vfs_allocated_count;
+
+ /* return ENOMEM indicating insufficient memory for addresses */
+ if (netdev_uc_count(netdev) > igb_available_rars(adapter))
+ return -ENOMEM;
+
+ if (!netdev_uc_empty(netdev)) {
+#ifdef NETDEV_HW_ADDR_T_UNICAST
+ struct netdev_hw_addr *ha;
+#else
+ struct dev_mc_list *ha;
+#endif
+ netdev_for_each_uc_addr(ha, netdev) {
+#ifdef NETDEV_HW_ADDR_T_UNICAST
+ igb_del_mac_filter(adapter, ha->addr, hw_queue);
+ igb_add_mac_filter(adapter, ha->addr, hw_queue);
+#else
+ igb_del_mac_filter(adapter, ha->da_addr, hw_queue);
+ igb_add_mac_filter(adapter, ha->da_addr, hw_queue);
+#endif
+ count++;
+ }
+ }
+ return count;
+}
+
+
+#define E1000_VMOLR_UPE 0x20000000 /* Unicast promiscuous mode */
+void igb_vmdq_set_rx_mode(struct net_device *dev)
+{
+ struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
+ struct igb_adapter *adapter = vadapter->real_adapter;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 vmolr, rctl;
+ int hw_queue = vadapter->rx_ring->queue_index +
+ adapter->vfs_allocated_count;
+
+ /* Check for Promiscuous and All Multicast modes */
+ vmolr = E1000_READ_REG(hw, E1000_VMOLR(hw_queue));
+
+ /* clear the affected bits */
+ vmolr &= ~(E1000_VMOLR_UPE | E1000_VMOLR_MPME |
+ E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE);
+
+ if (dev->flags & IFF_PROMISC) {
+ vmolr |= E1000_VMOLR_UPE;
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl |= E1000_RCTL_UPE;
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ } else {
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl &= ~E1000_RCTL_UPE;
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ if (dev->flags & IFF_ALLMULTI) {
+ vmolr |= E1000_VMOLR_MPME;
+ } else {
+ /*
+ * Write addresses to the MTA, if the attempt fails
+ * then we should just turn on promiscuous mode so
+ * that we can at least receive multicast traffic
+ */
+ if (igb_write_mc_addr_list(adapter->netdev) != 0)
+ vmolr |= E1000_VMOLR_ROMPE;
+ }
+#ifdef HAVE_SET_RX_MODE
+ /*
+ * Write addresses to available RAR registers, if there is not
+ * sufficient space to store all the addresses then enable
+ * unicast promiscuous mode
+ */
+ if (igb_write_vm_addr_list(dev) < 0)
+ vmolr |= E1000_VMOLR_UPE;
+#endif
+ }
+ E1000_WRITE_REG(hw, E1000_VMOLR(hw_queue), vmolr);
+
+ return;
+}
+
+int igb_vmdq_set_mac(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+ struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
+ struct igb_adapter *adapter = vadapter->real_adapter;
+ int hw_queue = vadapter->rx_ring->queue_index +
+ adapter->vfs_allocated_count;
+
+ igb_del_mac_filter(adapter, dev->dev_addr, hw_queue);
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ return igb_add_mac_filter(adapter, dev->dev_addr, hw_queue);
+}
+
+int igb_vmdq_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
+ struct igb_adapter *adapter = vadapter->real_adapter;
+
+ if (adapter->netdev->mtu < new_mtu) {
+ DPRINTK(PROBE, INFO,
+ "Set MTU on %s to >= %d "
+ "before changing MTU on %s\n",
+ adapter->netdev->name, new_mtu, dev->name);
+ return -EINVAL;
+ }
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+void igb_vmdq_tx_timeout(struct net_device *dev)
+{
+ return;
+}
+
+void igb_vmdq_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
+{
+ struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
+ struct igb_adapter *adapter = vadapter->real_adapter;
+ struct e1000_hw *hw = &adapter->hw;
+ int hw_queue = vadapter->rx_ring->queue_index +
+ adapter->vfs_allocated_count;
+
+ vadapter->vlgrp = grp;
+
+ igb_enable_vlan_tags(adapter);
+ E1000_WRITE_REG(hw, E1000_VMVIR(hw_queue), 0);
+
+ return;
+}
+void igb_vmdq_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+{
+ struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
+ struct igb_adapter *adapter = vadapter->real_adapter;
+#ifndef HAVE_NETDEV_VLAN_FEATURES
+ struct net_device *v_netdev;
+#endif
+ int hw_queue = vadapter->rx_ring->queue_index +
+ adapter->vfs_allocated_count;
+
+ /* attempt to add filter to vlvf array */
+ igb_vlvf_set(adapter, vid, TRUE, hw_queue);
+
+#ifndef HAVE_NETDEV_VLAN_FEATURES
+
+ /* Copy feature flags from netdev to the vlan netdev for this vid.
+ * This allows things like TSO to bubble down to our vlan device.
+ */
+ v_netdev = vlan_group_get_device(vadapter->vlgrp, vid);
+ v_netdev->features |= adapter->netdev->features;
+ vlan_group_set_device(vadapter->vlgrp, vid, v_netdev);
+#endif
+
+ return;
+}
+void igb_vmdq_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+ struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
+ struct igb_adapter *adapter = vadapter->real_adapter;
+ int hw_queue = vadapter->rx_ring->queue_index +
+ adapter->vfs_allocated_count;
+
+ vlan_group_set_device(vadapter->vlgrp, vid, NULL);
+ /* remove vlan from VLVF table array */
+ igb_vlvf_set(adapter, vid, FALSE, hw_queue);
+
+
+ return;
+}
+
+static int igb_vmdq_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
+ struct igb_adapter *adapter = vadapter->real_adapter;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 status;
+
+ if (hw->phy.media_type == e1000_media_type_copper) {
+
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full|
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP);
+ ecmd->advertising = ADVERTISED_TP;
+
+ if (hw->mac.autoneg == 1) {
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ /* the e1000 autoneg seems to match ethtool nicely */
+ ecmd->advertising |= hw->phy.autoneg_advertised;
+ }
+
+ ecmd->port = PORT_TP;
+ ecmd->phy_address = hw->phy.addr;
+ } else {
+ ecmd->supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg);
+
+ ecmd->advertising = (ADVERTISED_1000baseT_Full |
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg);
+
+ ecmd->port = PORT_FIBRE;
+ }
+
+ ecmd->transceiver = XCVR_INTERNAL;
+
+ status = E1000_READ_REG(hw, E1000_STATUS);
+
+ if (status & E1000_STATUS_LU) {
+
+ if ((status & E1000_STATUS_SPEED_1000) ||
+ hw->phy.media_type != e1000_media_type_copper)
+ ecmd->speed = SPEED_1000;
+ else if (status & E1000_STATUS_SPEED_100)
+ ecmd->speed = SPEED_100;
+ else
+ ecmd->speed = SPEED_10;
+
+ if ((status & E1000_STATUS_FD) ||
+ hw->phy.media_type != e1000_media_type_copper)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
+ } else {
+ ecmd->speed = -1;
+ ecmd->duplex = -1;
+ }
+
+ ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ return 0;
+}
+
+
+static u32 igb_vmdq_get_msglevel(struct net_device *netdev)
+{
+ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
+ struct igb_adapter *adapter = vadapter->real_adapter;
+ return adapter->msg_enable;
+}
+
+static void igb_vmdq_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
+ struct igb_adapter *adapter = vadapter->real_adapter;
+ struct net_device *main_netdev = adapter->netdev;
+
+ strncpy(drvinfo->driver, igb_driver_name, 32);
+ strncpy(drvinfo->version, igb_driver_version, 32);
+
+ strncpy(drvinfo->fw_version, "N/A", 4);
+ snprintf(drvinfo->bus_info, 32, "%s VMDQ %d", main_netdev->name,
+ vadapter->rx_ring->queue_index);
+ drvinfo->n_stats = 0;
+ drvinfo->testinfo_len = 0;
+ drvinfo->regdump_len = 0;
+}
+
+static void igb_vmdq_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
+
+ struct igb_ring *tx_ring = vadapter->tx_ring;
+ struct igb_ring *rx_ring = vadapter->rx_ring;
+
+ ring->rx_max_pending = IGB_MAX_RXD;
+ ring->tx_max_pending = IGB_MAX_TXD;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = rx_ring->count;
+ ring->tx_pending = tx_ring->count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+static u32 igb_vmdq_get_rx_csum(struct net_device *netdev)
+{
+ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
+ struct igb_adapter *adapter = vadapter->real_adapter;
+
+ return test_bit(IGB_RING_FLAG_RX_CSUM, &adapter->rx_ring[0]->flags);
+}
+
+
+static struct ethtool_ops igb_vmdq_ethtool_ops = {
+ .get_settings = igb_vmdq_get_settings,
+ .get_drvinfo = igb_vmdq_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = igb_vmdq_get_ringparam,
+ .get_rx_csum = igb_vmdq_get_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+ .get_msglevel = igb_vmdq_get_msglevel,
+#ifdef NETIF_F_TSO
+ .get_tso = ethtool_op_get_tso,
+#endif
+#ifdef HAVE_ETHTOOL_GET_PERM_ADDR
+ .get_perm_addr = ethtool_op_get_perm_addr,
+#endif
+};
+
+void igb_vmdq_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &igb_vmdq_ethtool_ops);
+}
+
+
+#endif /* CONFIG_IGB_VMDQ_NETDEV */
+
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.h
new file mode 100755
index 00000000..e51e7c4e
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.h
@@ -0,0 +1,46 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IGB_VMDQ_H_
+#define _IGB_VMDQ_H_
+
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+int igb_vmdq_open(struct net_device *dev);
+int igb_vmdq_close(struct net_device *dev);
+netdev_tx_t igb_vmdq_xmit_frame(struct sk_buff *skb, struct net_device *dev);
+struct net_device_stats *igb_vmdq_get_stats(struct net_device *dev);
+void igb_vmdq_set_rx_mode(struct net_device *dev);
+int igb_vmdq_set_mac(struct net_device *dev, void *addr);
+int igb_vmdq_change_mtu(struct net_device *dev, int new_mtu);
+void igb_vmdq_tx_timeout(struct net_device *dev);
+void igb_vmdq_vlan_rx_register(struct net_device *dev,
+ struct vlan_group *grp);
+void igb_vmdq_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
+void igb_vmdq_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
+void igb_vmdq_set_ethtool_ops(struct net_device *netdev);
+#endif /* CONFIG_IGB_VMDQ_NETDEV */
+#endif /* _IGB_VMDQ_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/kcompat.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/kcompat.c
new file mode 100755
index 00000000..bde3a83c
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/kcompat.c
@@ -0,0 +1,1482 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "igb.h"
+#include "kcompat.h"
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) )
+/* From lib/vsprintf.c */
+#include <asm/div64.h>
+
+static int skip_atoi(const char **s)
+{
+ int i=0;
+
+ while (isdigit(**s))
+ i = i*10 + *((*s)++) - '0';
+ return i;
+}
+
+#define _kc_ZEROPAD 1 /* pad with zero */
+#define _kc_SIGN 2 /* unsigned/signed long */
+#define _kc_PLUS 4 /* show plus */
+#define _kc_SPACE 8 /* space if plus */
+#define _kc_LEFT 16 /* left justified */
+#define _kc_SPECIAL 32 /* 0x */
+#define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */
+
+static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type)
+{
+ char c,sign,tmp[66];
+ const char *digits;
+ const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz";
+ const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+ int i;
+
+ digits = (type & _kc_LARGE) ? large_digits : small_digits;
+ if (type & _kc_LEFT)
+ type &= ~_kc_ZEROPAD;
+ if (base < 2 || base > 36)
+ return 0;
+ c = (type & _kc_ZEROPAD) ? '0' : ' ';
+ sign = 0;
+ if (type & _kc_SIGN) {
+ if (num < 0) {
+ sign = '-';
+ num = -num;
+ size--;
+ } else if (type & _kc_PLUS) {
+ sign = '+';
+ size--;
+ } else if (type & _kc_SPACE) {
+ sign = ' ';
+ size--;
+ }
+ }
+ if (type & _kc_SPECIAL) {
+ if (base == 16)
+ size -= 2;
+ else if (base == 8)
+ size--;
+ }
+ i = 0;
+ if (num == 0)
+ tmp[i++]='0';
+ else while (num != 0)
+ tmp[i++] = digits[do_div(num,base)];
+ if (i > precision)
+ precision = i;
+ size -= precision;
+ if (!(type&(_kc_ZEROPAD+_kc_LEFT))) {
+ while(size-->0) {
+ if (buf <= end)
+ *buf = ' ';
+ ++buf;
+ }
+ }
+ if (sign) {
+ if (buf <= end)
+ *buf = sign;
+ ++buf;
+ }
+ if (type & _kc_SPECIAL) {
+ if (base==8) {
+ if (buf <= end)
+ *buf = '0';
+ ++buf;
+ } else if (base==16) {
+ if (buf <= end)
+ *buf = '0';
+ ++buf;
+ if (buf <= end)
+ *buf = digits[33];
+ ++buf;
+ }
+ }
+ if (!(type & _kc_LEFT)) {
+ while (size-- > 0) {
+ if (buf <= end)
+ *buf = c;
+ ++buf;
+ }
+ }
+ while (i < precision--) {
+ if (buf <= end)
+ *buf = '0';
+ ++buf;
+ }
+ while (i-- > 0) {
+ if (buf <= end)
+ *buf = tmp[i];
+ ++buf;
+ }
+ while (size-- > 0) {
+ if (buf <= end)
+ *buf = ' ';
+ ++buf;
+ }
+ return buf;
+}
+
+int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
+{
+ int len;
+ unsigned long long num;
+ int i, base;
+ char *str, *end, c;
+ const char *s;
+
+ int flags; /* flags to number() */
+
+ int field_width; /* width of output field */
+ int precision; /* min. # of digits for integers; max
+ number of chars for from string */
+ int qualifier; /* 'h', 'l', or 'L' for integer fields */
+ /* 'z' support added 23/7/1999 S.H. */
+ /* 'z' changed to 'Z' --davidm 1/25/99 */
+
+ str = buf;
+ end = buf + size - 1;
+
+ if (end < buf - 1) {
+ end = ((void *) -1);
+ size = end - buf + 1;
+ }
+
+ for (; *fmt ; ++fmt) {
+ if (*fmt != '%') {
+ if (str <= end)
+ *str = *fmt;
+ ++str;
+ continue;
+ }
+
+ /* process flags */
+ flags = 0;
+ repeat:
+ ++fmt; /* this also skips first '%' */
+ switch (*fmt) {
+ case '-': flags |= _kc_LEFT; goto repeat;
+ case '+': flags |= _kc_PLUS; goto repeat;
+ case ' ': flags |= _kc_SPACE; goto repeat;
+ case '#': flags |= _kc_SPECIAL; goto repeat;
+ case '0': flags |= _kc_ZEROPAD; goto repeat;
+ }
+
+ /* get field width */
+ field_width = -1;
+ if (isdigit(*fmt))
+ field_width = skip_atoi(&fmt);
+ else if (*fmt == '*') {
+ ++fmt;
+ /* it's the next argument */
+ field_width = va_arg(args, int);
+ if (field_width < 0) {
+ field_width = -field_width;
+ flags |= _kc_LEFT;
+ }
+ }
+
+ /* get the precision */
+ precision = -1;
+ if (*fmt == '.') {
+ ++fmt;
+ if (isdigit(*fmt))
+ precision = skip_atoi(&fmt);
+ else if (*fmt == '*') {
+ ++fmt;
+ /* it's the next argument */
+ precision = va_arg(args, int);
+ }
+ if (precision < 0)
+ precision = 0;
+ }
+
+ /* get the conversion qualifier */
+ qualifier = -1;
+ if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') {
+ qualifier = *fmt;
+ ++fmt;
+ }
+
+ /* default base */
+ base = 10;
+
+ switch (*fmt) {
+ case 'c':
+ if (!(flags & _kc_LEFT)) {
+ while (--field_width > 0) {
+ if (str <= end)
+ *str = ' ';
+ ++str;
+ }
+ }
+ c = (unsigned char) va_arg(args, int);
+ if (str <= end)
+ *str = c;
+ ++str;
+ while (--field_width > 0) {
+ if (str <= end)
+ *str = ' ';
+ ++str;
+ }
+ continue;
+
+ case 's':
+ s = va_arg(args, char *);
+ if (!s)
+ s = "<NULL>";
+
+ len = strnlen(s, precision);
+
+ if (!(flags & _kc_LEFT)) {
+ while (len < field_width--) {
+ if (str <= end)
+ *str = ' ';
+ ++str;
+ }
+ }
+ for (i = 0; i < len; ++i) {
+ if (str <= end)
+ *str = *s;
+ ++str; ++s;
+ }
+ while (len < field_width--) {
+ if (str <= end)
+ *str = ' ';
+ ++str;
+ }
+ continue;
+
+ case 'p':
+ if (field_width == -1) {
+ field_width = 2*sizeof(void *);
+ flags |= _kc_ZEROPAD;
+ }
+ str = number(str, end,
+ (unsigned long) va_arg(args, void *),
+ 16, field_width, precision, flags);
+ continue;
+
+
+ case 'n':
+ /* FIXME:
+ * What does C99 say about the overflow case here? */
+ if (qualifier == 'l') {
+ long * ip = va_arg(args, long *);
+ *ip = (str - buf);
+ } else if (qualifier == 'Z') {
+ size_t * ip = va_arg(args, size_t *);
+ *ip = (str - buf);
+ } else {
+ int * ip = va_arg(args, int *);
+ *ip = (str - buf);
+ }
+ continue;
+
+ case '%':
+ if (str <= end)
+ *str = '%';
+ ++str;
+ continue;
+
+ /* integer number formats - set up the flags and "break" */
+ case 'o':
+ base = 8;
+ break;
+
+ case 'X':
+ flags |= _kc_LARGE;
+ case 'x':
+ base = 16;
+ break;
+
+ case 'd':
+ case 'i':
+ flags |= _kc_SIGN;
+ case 'u':
+ break;
+
+ default:
+ if (str <= end)
+ *str = '%';
+ ++str;
+ if (*fmt) {
+ if (str <= end)
+ *str = *fmt;
+ ++str;
+ } else {
+ --fmt;
+ }
+ continue;
+ }
+ if (qualifier == 'L')
+ num = va_arg(args, long long);
+ else if (qualifier == 'l') {
+ num = va_arg(args, unsigned long);
+ if (flags & _kc_SIGN)
+ num = (signed long) num;
+ } else if (qualifier == 'Z') {
+ num = va_arg(args, size_t);
+ } else if (qualifier == 'h') {
+ num = (unsigned short) va_arg(args, int);
+ if (flags & _kc_SIGN)
+ num = (signed short) num;
+ } else {
+ num = va_arg(args, unsigned int);
+ if (flags & _kc_SIGN)
+ num = (signed int) num;
+ }
+ str = number(str, end, num, base,
+ field_width, precision, flags);
+ }
+ if (str <= end)
+ *str = '\0';
+ else if (size > 0)
+ /* don't write out a null byte if the buf size is zero */
+ *end = '\0';
+ /* the trailing null byte doesn't count towards the total
+ * ++str;
+ */
+ return str-buf;
+}
+
+int _kc_snprintf(char * buf, size_t size, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ i = _kc_vsnprintf(buf,size,fmt,args);
+ va_end(args);
+ return i;
+}
+#endif /* < 2.4.8 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
+
+/**************************************/
+/* PCI DMA MAPPING */
+
+#if defined(CONFIG_HIGHMEM)
+
+#ifndef PCI_DRAM_OFFSET
+#define PCI_DRAM_OFFSET 0
+#endif
+
+u64
+_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
+ size_t size, int direction)
+{
+ return (((u64) (page - mem_map) << PAGE_SHIFT) + offset +
+ PCI_DRAM_OFFSET);
+}
+
+#else /* CONFIG_HIGHMEM */
+
+u64
+_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
+ size_t size, int direction)
+{
+ return pci_map_single(dev, (void *)page_address(page) + offset, size,
+ direction);
+}
+
+#endif /* CONFIG_HIGHMEM */
+
+void
+_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size,
+ int direction)
+{
+ return pci_unmap_single(dev, dma_addr, size, direction);
+}
+
+#endif /* 2.4.13 => 2.4.3 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
+
+/**************************************/
+/* PCI DRIVER API */
+
+int
+_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
+{
+ if (!pci_dma_supported(dev, mask))
+ return -EIO;
+ dev->dma_mask = mask;
+ return 0;
+}
+
+int
+_kc_pci_request_regions(struct pci_dev *dev, char *res_name)
+{
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ if (pci_resource_len(dev, i) == 0)
+ continue;
+
+ if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
+ if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
+ pci_release_regions(dev);
+ return -EBUSY;
+ }
+ } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
+ if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
+ pci_release_regions(dev);
+ return -EBUSY;
+ }
+ }
+ }
+ return 0;
+}
+
+void
+_kc_pci_release_regions(struct pci_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ if (pci_resource_len(dev, i) == 0)
+ continue;
+
+ if (pci_resource_flags(dev, i) & IORESOURCE_IO)
+ release_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
+
+ else if (pci_resource_flags(dev, i) & IORESOURCE_MEM)
+ release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
+ }
+}
+
+/**************************************/
+/* NETWORK DRIVER API */
+
+struct net_device *
+_kc_alloc_etherdev(int sizeof_priv)
+{
+ struct net_device *dev;
+ int alloc_size;
+
+ alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31;
+ dev = kzalloc(alloc_size, GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ if (sizeof_priv)
+ dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31);
+ dev->name[0] = '\0';
+ ether_setup(dev);
+
+ return dev;
+}
+
+int
+_kc_is_valid_ether_addr(u8 *addr)
+{
+ const char zaddr[6] = { 0, };
+
+ return !(addr[0] & 1) && memcmp(addr, zaddr, 6);
+}
+
+#endif /* 2.4.3 => 2.4.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
+
+int
+_kc_pci_set_power_state(struct pci_dev *dev, int state)
+{
+ return 0;
+}
+
+int
+_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable)
+{
+ return 0;
+}
+
+#endif /* 2.4.6 => 2.4.3 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page,
+ int off, int size)
+{
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ frag->page = page;
+ frag->page_offset = off;
+ frag->size = size;
+ skb_shinfo(skb)->nr_frags = i + 1;
+}
+
+/*
+ * Original Copyright:
+ * find_next_bit.c: fallback find next bit implementation
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
+ */
+unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ const unsigned long *p = addr + BITOP_WORD(offset);
+ unsigned long result = offset & ~(BITS_PER_LONG-1);
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset %= BITS_PER_LONG;
+ if (offset) {
+ tmp = *(p++);
+ tmp &= (~0UL << offset);
+ if (size < BITS_PER_LONG)
+ goto found_first;
+ if (tmp)
+ goto found_middle;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
+ }
+ while (size & ~(BITS_PER_LONG-1)) {
+ if ((tmp = *(p++)))
+ goto found_middle;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp &= (~0UL >> (BITS_PER_LONG - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + ffs(tmp);
+}
+
+size_t _kc_strlcpy(char *dest, const char *src, size_t size)
+{
+ size_t ret = strlen(src);
+
+ if (size) {
+ size_t len = (ret >= size) ? size - 1 : ret;
+ memcpy(dest, src, len);
+ dest[len] = '\0';
+ }
+ return ret;
+}
+
+#ifndef do_div
+#if BITS_PER_LONG == 32
+uint32_t __attribute__((weak)) _kc__div64_32(uint64_t *n, uint32_t base)
+{
+ uint64_t rem = *n;
+ uint64_t b = base;
+ uint64_t res, d = 1;
+ uint32_t high = rem >> 32;
+
+ /* Reduce the thing a bit first */
+ res = 0;
+ if (high >= base) {
+ high /= base;
+ res = (uint64_t) high << 32;
+ rem -= (uint64_t) (high*base) << 32;
+ }
+
+ while ((int64_t)b > 0 && b < rem) {
+ b = b+b;
+ d = d+d;
+ }
+
+ do {
+ if (rem >= b) {
+ rem -= b;
+ res += d;
+ }
+ b >>= 1;
+ d >>= 1;
+ } while (d);
+
+ *n = res;
+ return rem;
+}
+#endif /* BITS_PER_LONG == 32 */
+#endif /* do_div */
+#endif /* 2.6.0 => 2.4.6 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
+int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ i = vsnprintf(buf, size, fmt, args);
+ va_end(args);
+ return (i >= size) ? (size - 1) : i;
+}
+#endif /* < 2.6.4 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
+DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1};
+#endif /* < 2.6.10 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) )
+char *_kc_kstrdup(const char *s, unsigned int gfp)
+{
+ size_t len;
+ char *buf;
+
+ if (!s)
+ return NULL;
+
+ len = strlen(s) + 1;
+ buf = kmalloc(len, gfp);
+ if (buf)
+ memcpy(buf, s, len);
+ return buf;
+}
+#endif /* < 2.6.13 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
+void *_kc_kzalloc(size_t size, int flags)
+{
+ void *ret = kmalloc(size, flags);
+ if (ret)
+ memset(ret, 0, size);
+ return ret;
+}
+#endif /* <= 2.6.13 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
+int _kc_skb_pad(struct sk_buff *skb, int pad)
+{
+ int ntail;
+
+ /* If the skbuff is non linear tailroom is always zero.. */
+ if(!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
+ memset(skb->data+skb->len, 0, pad);
+ return 0;
+ }
+
+ ntail = skb->data_len + pad - (skb->end - skb->tail);
+ if (likely(skb_cloned(skb) || ntail > 0)) {
+ if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC));
+ goto free_skb;
+ }
+
+#ifdef MAX_SKB_FRAGS
+ if (skb_is_nonlinear(skb) &&
+ !__pskb_pull_tail(skb, skb->data_len))
+ goto free_skb;
+
+#endif
+ memset(skb->data + skb->len, 0, pad);
+ return 0;
+
+free_skb:
+ kfree_skb(skb);
+ return -ENOMEM;
+}
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))
+int _kc_pci_save_state(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct adapter_struct *adapter = netdev_priv(netdev);
+ int size = PCI_CONFIG_SPACE_LEN, i;
+ u16 pcie_cap_offset, pcie_link_status;
+
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
+ /* no ->dev for 2.4 kernels */
+ WARN_ON(pdev->dev.driver_data == NULL);
+#endif
+ pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ if (pcie_cap_offset) {
+ if (!pci_read_config_word(pdev,
+ pcie_cap_offset + PCIE_LINK_STATUS,
+ &pcie_link_status))
+ size = PCIE_CONFIG_SPACE_LEN;
+ }
+ pci_config_space_ich8lan();
+#ifdef HAVE_PCI_ERS
+ if (adapter->config_space == NULL)
+#else
+ WARN_ON(adapter->config_space != NULL);
+#endif
+ adapter->config_space = kmalloc(size, GFP_KERNEL);
+ if (!adapter->config_space) {
+ printk(KERN_ERR "Out of memory in pci_save_state\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < (size / 4); i++)
+ pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]);
+ return 0;
+}
+
+void _kc_pci_restore_state(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct adapter_struct *adapter = netdev_priv(netdev);
+ int size = PCI_CONFIG_SPACE_LEN, i;
+ u16 pcie_cap_offset;
+ u16 pcie_link_status;
+
+ if (adapter->config_space != NULL) {
+ pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ if (pcie_cap_offset &&
+ !pci_read_config_word(pdev,
+ pcie_cap_offset + PCIE_LINK_STATUS,
+ &pcie_link_status))
+ size = PCIE_CONFIG_SPACE_LEN;
+
+ pci_config_space_ich8lan();
+ for (i = 0; i < (size / 4); i++)
+ pci_write_config_dword(pdev, i * 4, adapter->config_space[i]);
+#ifndef HAVE_PCI_ERS
+ kfree(adapter->config_space);
+ adapter->config_space = NULL;
+#endif
+ }
+}
+#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */
+
+#ifdef HAVE_PCI_ERS
+void _kc_free_netdev(struct net_device *netdev)
+{
+ struct adapter_struct *adapter = netdev_priv(netdev);
+
+ if (adapter->config_space != NULL)
+ kfree(adapter->config_space);
+#ifdef CONFIG_SYSFS
+ if (netdev->reg_state == NETREG_UNINITIALIZED) {
+ kfree((char *)netdev - netdev->padded);
+ } else {
+ BUG_ON(netdev->reg_state != NETREG_UNREGISTERED);
+ netdev->reg_state = NETREG_RELEASED;
+ class_device_put(&netdev->class_dev);
+ }
+#else
+ kfree((char *)netdev - netdev->padded);
+#endif
+}
+#endif
+
+void *_kc_kmemdup(const void *src, size_t len, unsigned gfp)
+{
+ void *p;
+
+ p = kzalloc(len, gfp);
+ if (p)
+ memcpy(p, src, len);
+ return p;
+}
+#endif /* <= 2.6.19 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
+struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev)
+{
+ return ((struct adapter_struct *)netdev_priv(netdev))->pdev;
+}
+#endif /* < 2.6.21 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
+/* hexdump code taken from lib/hexdump.c */
+static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
+ int groupsize, unsigned char *linebuf,
+ size_t linebuflen, bool ascii)
+{
+ const u8 *ptr = buf;
+ u8 ch;
+ int j, lx = 0;
+ int ascii_column;
+
+ if (rowsize != 16 && rowsize != 32)
+ rowsize = 16;
+
+ if (!len)
+ goto nil;
+ if (len > rowsize) /* limit to one line at a time */
+ len = rowsize;
+ if ((len % groupsize) != 0) /* no mixed size output */
+ groupsize = 1;
+
+ switch (groupsize) {
+ case 8: {
+ const u64 *ptr8 = buf;
+ int ngroups = len / groupsize;
+
+ for (j = 0; j < ngroups; j++)
+ lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
+ "%s%16.16llx", j ? " " : "",
+ (unsigned long long)*(ptr8 + j));
+ ascii_column = 17 * ngroups + 2;
+ break;
+ }
+
+ case 4: {
+ const u32 *ptr4 = buf;
+ int ngroups = len / groupsize;
+
+ for (j = 0; j < ngroups; j++)
+ lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
+ "%s%8.8x", j ? " " : "", *(ptr4 + j));
+ ascii_column = 9 * ngroups + 2;
+ break;
+ }
+
+ case 2: {
+ const u16 *ptr2 = buf;
+ int ngroups = len / groupsize;
+
+ for (j = 0; j < ngroups; j++)
+ lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
+ "%s%4.4x", j ? " " : "", *(ptr2 + j));
+ ascii_column = 5 * ngroups + 2;
+ break;
+ }
+
+ default:
+ for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
+ ch = ptr[j];
+ linebuf[lx++] = hex_asc(ch >> 4);
+ linebuf[lx++] = hex_asc(ch & 0x0f);
+ linebuf[lx++] = ' ';
+ }
+ if (j)
+ lx--;
+
+ ascii_column = 3 * rowsize + 2;
+ break;
+ }
+ if (!ascii)
+ goto nil;
+
+ while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
+ linebuf[lx++] = ' ';
+ for (j = 0; (j < len) && (lx + 2) < linebuflen; j++)
+ linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j]
+ : '.';
+nil:
+ linebuf[lx++] = '\0';
+}
+
+void _kc_print_hex_dump(const char *level,
+ const char *prefix_str, int prefix_type,
+ int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii)
+{
+ const u8 *ptr = buf;
+ int i, linelen, remaining = len;
+ unsigned char linebuf[200];
+
+ if (rowsize != 16 && rowsize != 32)
+ rowsize = 16;
+
+ for (i = 0; i < len; i += rowsize) {
+ linelen = min(remaining, rowsize);
+ remaining -= rowsize;
+ _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
+ linebuf, sizeof(linebuf), ascii);
+
+ switch (prefix_type) {
+ case DUMP_PREFIX_ADDRESS:
+ printk("%s%s%*p: %s\n", level, prefix_str,
+ (int)(2 * sizeof(void *)), ptr + i, linebuf);
+ break;
+ case DUMP_PREFIX_OFFSET:
+ printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
+ break;
+ default:
+ printk("%s%s%s\n", level, prefix_str, linebuf);
+ break;
+ }
+ }
+}
+
+#ifdef HAVE_I2C_SUPPORT
+struct i2c_client *
+_kc_i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
+{
+ struct i2c_client *client;
+ int status;
+
+ client = kzalloc(sizeof *client, GFP_KERNEL);
+ if (!client)
+ return NULL;
+
+ client->adapter = adap;
+
+ client->dev.platform_data = info->platform_data;
+
+ client->flags = info->flags;
+ client->addr = info->addr;
+
+ strlcpy(client->name, info->type, sizeof(client->name));
+
+ /* Check for address business */
+ status = i2c_check_addr(adap, client->addr);
+ if (status)
+ goto out_err;
+
+ client->dev.parent = &client->adapter->dev;
+ client->dev.bus = &i2c_bus_type;
+
+ status = i2c_attach_client(client);
+ if (status)
+ goto out_err;
+
+ dev_dbg(&adap->dev, "client [%s] registered with bus id %s\n",
+ client->name, dev_name(&client->dev));
+
+ return client;
+
+out_err:
+ dev_err(&adap->dev, "Failed to register i2c client %s at 0x%02x "
+ "(%d)\n", client->name, client->addr, status);
+ kfree(client);
+ return NULL;
+}
+#endif /* HAVE_I2C_SUPPORT */
+#endif /* < 2.6.22 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
+#ifdef NAPI
+struct net_device *napi_to_poll_dev(const struct napi_struct *napi)
+{
+ struct adapter_q_vector *q_vector = container_of(napi,
+ struct adapter_q_vector,
+ napi);
+ return &q_vector->poll_dev;
+}
+
+int __kc_adapter_clean(struct net_device *netdev, int *budget)
+{
+ int work_done;
+ int work_to_do = min(*budget, netdev->quota);
+ /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */
+ struct napi_struct *napi = netdev->priv;
+ work_done = napi->poll(napi, work_to_do);
+ *budget -= work_done;
+ netdev->quota -= work_done;
+ return (work_done >= work_to_do) ? 1 : 0;
+}
+#endif /* NAPI */
+#endif /* <= 2.6.24 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
+void _kc_pci_disable_link_state(struct pci_dev *pdev, int state)
+{
+ struct pci_dev *parent = pdev->bus->self;
+ u16 link_state;
+ int pos;
+
+ if (!parent)
+ return;
+
+ pos = pci_find_capability(parent, PCI_CAP_ID_EXP);
+ if (pos) {
+ pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state);
+ link_state &= ~state;
+ pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state);
+ }
+}
+#endif /* < 2.6.26 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
+#ifdef HAVE_TX_MQ
+void _kc_netif_tx_stop_all_queues(struct net_device *netdev)
+{
+ struct adapter_struct *adapter = netdev_priv(netdev);
+ int i;
+
+ netif_stop_queue(netdev);
+ if (netif_is_multiqueue(netdev))
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ netif_stop_subqueue(netdev, i);
+}
+void _kc_netif_tx_wake_all_queues(struct net_device *netdev)
+{
+ struct adapter_struct *adapter = netdev_priv(netdev);
+ int i;
+
+ netif_wake_queue(netdev);
+ if (netif_is_multiqueue(netdev))
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ netif_wake_subqueue(netdev, i);
+}
+void _kc_netif_tx_start_all_queues(struct net_device *netdev)
+{
+ struct adapter_struct *adapter = netdev_priv(netdev);
+ int i;
+
+ netif_start_queue(netdev);
+ if (netif_is_multiqueue(netdev))
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ netif_start_subqueue(netdev, i);
+}
+#endif /* HAVE_TX_MQ */
+
+#ifndef __WARN_printf
+void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...)
+{
+ va_list args;
+
+ printk(KERN_WARNING "------------[ cut here ]------------\n");
+ printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file, line);
+ va_start(args, fmt);
+ vprintk(fmt, args);
+ va_end(args);
+
+ dump_stack();
+}
+#endif /* __WARN_printf */
+#endif /* < 2.6.27 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
+
+int
+_kc_pci_prepare_to_sleep(struct pci_dev *dev)
+{
+ pci_power_t target_state;
+ int error;
+
+ target_state = pci_choose_state(dev, PMSG_SUSPEND);
+
+ pci_enable_wake(dev, target_state, true);
+
+ error = pci_set_power_state(dev, target_state);
+
+ if (error)
+ pci_enable_wake(dev, target_state, false);
+
+ return error;
+}
+
+int
+_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable)
+{
+ int err;
+
+ err = pci_enable_wake(dev, PCI_D3cold, enable);
+ if (err)
+ goto out;
+
+ err = pci_enable_wake(dev, PCI_D3hot, enable);
+
+out:
+ return err;
+}
+#endif /* < 2.6.28 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
+static void __kc_pci_set_master(struct pci_dev *pdev, bool enable)
+{
+ u16 old_cmd, cmd;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &old_cmd);
+ if (enable)
+ cmd = old_cmd | PCI_COMMAND_MASTER;
+ else
+ cmd = old_cmd & ~PCI_COMMAND_MASTER;
+ if (cmd != old_cmd) {
+ dev_dbg(pci_dev_to_dev(pdev), "%s bus mastering\n",
+ enable ? "enabling" : "disabling");
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+ }
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,7) )
+ pdev->is_busmaster = enable;
+#endif
+}
+
+void _kc_pci_clear_master(struct pci_dev *dev)
+{
+ __kc_pci_set_master(dev, false);
+}
+#endif /* < 2.6.29 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
+int _kc_pci_num_vf(struct pci_dev *dev)
+{
+ int num_vf = 0;
+#ifdef CONFIG_PCI_IOV
+ struct pci_dev *vfdev;
+
+ /* loop through all ethernet devices starting at PF dev */
+ vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL);
+ while (vfdev) {
+ if (vfdev->is_virtfn && vfdev->physfn == dev)
+ num_vf++;
+
+ vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev);
+ }
+
+#endif
+ return num_vf;
+}
+#endif /* RHEL_RELEASE_CODE */
+#endif /* < 2.6.34 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
+#ifdef HAVE_TX_MQ
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)))
+#ifndef CONFIG_NETDEVICES_MULTIQUEUE
+void _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
+{
+ unsigned int real_num = dev->real_num_tx_queues;
+ struct Qdisc *qdisc;
+ int i;
+
+ if (unlikely(txq > dev->num_tx_queues))
+ ;
+ else if (txq > real_num)
+ dev->real_num_tx_queues = txq;
+ else if ( txq < real_num) {
+ dev->real_num_tx_queues = txq;
+ for (i = txq; i < dev->num_tx_queues; i++) {
+ qdisc = netdev_get_tx_queue(dev, i)->qdisc;
+ if (qdisc) {
+ spin_lock_bh(qdisc_lock(qdisc));
+ qdisc_reset(qdisc);
+ spin_unlock_bh(qdisc_lock(qdisc));
+ }
+ }
+ }
+}
+#endif /* CONFIG_NETDEVICES_MULTIQUEUE */
+#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
+#endif /* HAVE_TX_MQ */
+
+ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
+ const void __user *from, size_t count)
+{
+ loff_t pos = *ppos;
+ size_t res;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= available || !count)
+ return 0;
+ if (count > available - pos)
+ count = available - pos;
+ res = copy_from_user(to + pos, from, count);
+ if (res == count)
+ return -EFAULT;
+ count -= res;
+ *ppos = pos + count;
+ return count;
+}
+
+#endif /* < 2.6.35 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
+static const u32 _kc_flags_dup_features =
+ (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH);
+
+u32 _kc_ethtool_op_get_flags(struct net_device *dev)
+{
+ return dev->features & _kc_flags_dup_features;
+}
+
+int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported)
+{
+ if (data & ~supported)
+ return -EINVAL;
+
+ dev->features = ((dev->features & ~_kc_flags_dup_features) |
+ (data & _kc_flags_dup_features));
+ return 0;
+}
+#endif /* < 2.6.36 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)))
+
+
+
+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */
+#endif /* < 2.6.39 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) )
+void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
+ int off, int size, unsigned int truesize)
+{
+ skb_fill_page_desc(skb, i, page, off, size);
+ skb->len += size;
+ skb->data_len += size;
+ skb->truesize += truesize;
+}
+
+int _kc_simple_open(struct inode *inode, struct file *file)
+{
+ if (inode->i_private)
+ file->private_data = inode->i_private;
+
+ return 0;
+}
+
+#endif /* < 3.4.0 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) )
+#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) && \
+ !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5))
+static inline int __kc_pcie_cap_version(struct pci_dev *dev)
+{
+ int pos;
+ u16 reg16;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (!pos)
+ return 0;
+ pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16);
+ return reg16 & PCI_EXP_FLAGS_VERS;
+}
+
+static inline bool __kc_pcie_cap_has_devctl(const struct pci_dev __always_unused *dev)
+{
+ return true;
+}
+
+static inline bool __kc_pcie_cap_has_lnkctl(struct pci_dev *dev)
+{
+ int type = pci_pcie_type(dev);
+
+ return __kc_pcie_cap_version(dev) > 1 ||
+ type == PCI_EXP_TYPE_ROOT_PORT ||
+ type == PCI_EXP_TYPE_ENDPOINT ||
+ type == PCI_EXP_TYPE_LEG_END;
+}
+
+static inline bool __kc_pcie_cap_has_sltctl(struct pci_dev *dev)
+{
+ int type = pci_pcie_type(dev);
+ int pos;
+ u16 pcie_flags_reg;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (!pos)
+ return 0;
+ pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &pcie_flags_reg);
+
+ return __kc_pcie_cap_version(dev) > 1 ||
+ type == PCI_EXP_TYPE_ROOT_PORT ||
+ (type == PCI_EXP_TYPE_DOWNSTREAM &&
+ pcie_flags_reg & PCI_EXP_FLAGS_SLOT);
+}
+
+static inline bool __kc_pcie_cap_has_rtctl(struct pci_dev *dev)
+{
+ int type = pci_pcie_type(dev);
+
+ return __kc_pcie_cap_version(dev) > 1 ||
+ type == PCI_EXP_TYPE_ROOT_PORT ||
+ type == PCI_EXP_TYPE_RC_EC;
+}
+
+static bool __kc_pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
+{
+ if (!pci_is_pcie(dev))
+ return false;
+
+ switch (pos) {
+ case PCI_EXP_FLAGS_TYPE:
+ return true;
+ case PCI_EXP_DEVCAP:
+ case PCI_EXP_DEVCTL:
+ case PCI_EXP_DEVSTA:
+ return __kc_pcie_cap_has_devctl(dev);
+ case PCI_EXP_LNKCAP:
+ case PCI_EXP_LNKCTL:
+ case PCI_EXP_LNKSTA:
+ return __kc_pcie_cap_has_lnkctl(dev);
+ case PCI_EXP_SLTCAP:
+ case PCI_EXP_SLTCTL:
+ case PCI_EXP_SLTSTA:
+ return __kc_pcie_cap_has_sltctl(dev);
+ case PCI_EXP_RTCTL:
+ case PCI_EXP_RTCAP:
+ case PCI_EXP_RTSTA:
+ return __kc_pcie_cap_has_rtctl(dev);
+ case PCI_EXP_DEVCAP2:
+ case PCI_EXP_DEVCTL2:
+ case PCI_EXP_LNKCAP2:
+ case PCI_EXP_LNKCTL2:
+ case PCI_EXP_LNKSTA2:
+ return __kc_pcie_cap_version(dev) > 1;
+ default:
+ return false;
+ }
+}
+
+/*
+ * Note that these accessor functions are only for the "PCI Express
+ * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the
+ * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
+ */
+int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
+{
+ int ret;
+
+ *val = 0;
+ if (pos & 1)
+ return -EINVAL;
+
+ if (__kc_pcie_capability_reg_implemented(dev, pos)) {
+ ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
+ /*
+ * Reset *val to 0 if pci_read_config_word() fails, it may
+ * have been written as 0xFFFF if hardware error happens
+ * during pci_read_config_word().
+ */
+ if (ret)
+ *val = 0;
+ return ret;
+ }
+
+ /*
+ * For Functions that do not implement the Slot Capabilities,
+ * Slot Status, and Slot Control registers, these spaces must
+ * be hardwired to 0b, with the exception of the Presence Detect
+ * State bit in the Slot Status register of Downstream Ports,
+ * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8)
+ */
+ if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA &&
+ pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
+ *val = PCI_EXP_SLTSTA_PDS;
+ }
+
+ return 0;
+}
+
+int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
+{
+ if (pos & 1)
+ return -EINVAL;
+
+ if (!__kc_pcie_capability_reg_implemented(dev, pos))
+ return 0;
+
+ return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
+}
+
+int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
+ u16 clear, u16 set)
+{
+ int ret;
+ u16 val;
+
+ ret = __kc_pcie_capability_read_word(dev, pos, &val);
+ if (!ret) {
+ val &= ~clear;
+ val |= set;
+ ret = __kc_pcie_capability_write_word(dev, pos, val);
+ }
+
+ return ret;
+}
+#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) && \
+ !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) */
+#endif /* < 3.7.0 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) )
+#endif /* 3.9.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+#ifdef CONFIG_PCI_IOV
+int __kc_pci_vfs_assigned(struct pci_dev *dev)
+{
+ unsigned int vfs_assigned = 0;
+#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED
+ int pos;
+ struct pci_dev *vfdev;
+ unsigned short dev_id;
+
+ /* only search if we are a PF */
+ if (!dev->is_physfn)
+ return 0;
+
+ /* find SR-IOV capability */
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos)
+ return 0;
+
+ /*
+ * determine the device ID for the VFs, the vendor ID will be the
+ * same as the PF so there is no need to check for that one
+ */
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id);
+
+ /* loop through all the VFs to see if we own any that are assigned */
+ vfdev = pci_get_device(dev->vendor, dev_id, NULL);
+ while (vfdev) {
+ /*
+ * It is considered assigned if it is a virtual function with
+ * our dev as the physical function and the assigned bit is set
+ */
+ if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
+ (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED))
+ vfs_assigned++;
+
+ vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
+ }
+
+#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */
+ return vfs_assigned;
+}
+
+#endif /* CONFIG_PCI_IOV */
+#endif /* 3.10.0 */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h
new file mode 100755
index 00000000..1213cc61
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h
@@ -0,0 +1,3884 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _KCOMPAT_H_
+#define _KCOMPAT_H_
+
+#ifndef LINUX_VERSION_CODE
+#include <linux/version.h>
+#else
+#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
+#endif
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/mii.h>
+#include <linux/vmalloc.h>
+#include <asm/io.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+
+/* NAPI enable/disable flags here */
+#define NAPI
+
+#define adapter_struct igb_adapter
+#define adapter_q_vector igb_q_vector
+#define NAPI
+
+/* and finally set defines so that the code sees the changes */
+#ifdef NAPI
+#else
+#endif /* NAPI */
+
+/* packet split disable/enable */
+#ifdef DISABLE_PACKET_SPLIT
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+#define CONFIG_IGB_DISABLE_PACKET_SPLIT
+#endif
+#endif /* DISABLE_PACKET_SPLIT */
+
+/* MSI compatibility code for all kernels and drivers */
+#ifdef DISABLE_PCI_MSI
+#undef CONFIG_PCI_MSI
+#endif
+#ifndef CONFIG_PCI_MSI
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
+struct msix_entry {
+ u16 vector; /* kernel uses to write allocated vector */
+ u16 entry; /* driver uses to specify entry, OS writes */
+};
+#endif
+#undef pci_enable_msi
+#define pci_enable_msi(a) -ENOTSUPP
+#undef pci_disable_msi
+#define pci_disable_msi(a) do {} while (0)
+#undef pci_enable_msix
+#define pci_enable_msix(a, b, c) -ENOTSUPP
+#undef pci_disable_msix
+#define pci_disable_msix(a) do {} while (0)
+#define msi_remove_pci_irq_vectors(a) do {} while (0)
+#endif /* CONFIG_PCI_MSI */
+#ifdef DISABLE_PM
+#undef CONFIG_PM
+#endif
+
+#ifdef DISABLE_NET_POLL_CONTROLLER
+#undef CONFIG_NET_POLL_CONTROLLER
+#endif
+
+#ifndef PMSG_SUSPEND
+#define PMSG_SUSPEND 3
+#endif
+
+/* generic boolean compatibility */
+#undef TRUE
+#undef FALSE
+#define TRUE true
+#define FALSE false
+#ifdef GCC_VERSION
+#if ( GCC_VERSION < 3000 )
+#define _Bool char
+#endif
+#else
+#define _Bool char
+#endif
+
+/* kernels less than 2.4.14 don't have this */
+#ifndef ETH_P_8021Q
+#define ETH_P_8021Q 0x8100
+#endif
+
+#ifndef module_param
+#define module_param(v,t,p) MODULE_PARM(v, "i");
+#endif
+
+#ifndef DMA_64BIT_MASK
+#define DMA_64BIT_MASK 0xffffffffffffffffULL
+#endif
+
+#ifndef DMA_32BIT_MASK
+#define DMA_32BIT_MASK 0x00000000ffffffffULL
+#endif
+
+#ifndef PCI_CAP_ID_EXP
+#define PCI_CAP_ID_EXP 0x10
+#endif
+
+#ifndef PCIE_LINK_STATE_L0S
+#define PCIE_LINK_STATE_L0S 1
+#endif
+#ifndef PCIE_LINK_STATE_L1
+#define PCIE_LINK_STATE_L1 2
+#endif
+
+#ifndef mmiowb
+#ifdef CONFIG_IA64
+#define mmiowb() asm volatile ("mf.a" ::: "memory")
+#else
+#define mmiowb()
+#endif
+#endif
+
+#ifndef SET_NETDEV_DEV
+#define SET_NETDEV_DEV(net, pdev)
+#endif
+
+#if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) )
+#define free_netdev(x) kfree(x)
+#endif
+
+#ifdef HAVE_POLL_CONTROLLER
+#define CONFIG_NET_POLL_CONTROLLER
+#endif
+
+#ifndef SKB_DATAREF_SHIFT
+/* if we do not have the infrastructure to detect if skb_header is cloned
+ just return false in all cases */
+#define skb_header_cloned(x) 0
+#endif
+
+#ifndef NETIF_F_GSO
+#define gso_size tso_size
+#define gso_segs tso_segs
+#endif
+
+#ifndef NETIF_F_GRO
+#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \
+ vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan)
+#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb)
+#endif
+
+#ifndef NETIF_F_SCTP_CSUM
+#define NETIF_F_SCTP_CSUM 0
+#endif
+
+#ifndef NETIF_F_LRO
+#define NETIF_F_LRO (1 << 15)
+#endif
+
+#ifndef NETIF_F_NTUPLE
+#define NETIF_F_NTUPLE (1 << 27)
+#endif
+
+#ifndef IPPROTO_SCTP
+#define IPPROTO_SCTP 132
+#endif
+
+#ifndef CHECKSUM_PARTIAL
+#define CHECKSUM_PARTIAL CHECKSUM_HW
+#define CHECKSUM_COMPLETE CHECKSUM_HW
+#endif
+
+#ifndef __read_mostly
+#define __read_mostly
+#endif
+
+#ifndef MII_RESV1
+#define MII_RESV1 0x17 /* Reserved... */
+#endif
+
+#ifndef unlikely
+#define unlikely(_x) _x
+#define likely(_x) _x
+#endif
+
+#ifndef WARN_ON
+#define WARN_ON(x)
+#endif
+
+#ifndef PCI_DEVICE
+#define PCI_DEVICE(vend,dev) \
+ .vendor = (vend), .device = (dev), \
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
+#endif
+
+#ifndef node_online
+#define node_online(node) ((node) == 0)
+#endif
+
+#ifndef num_online_cpus
+#define num_online_cpus() smp_num_cpus
+#endif
+
+#ifndef cpu_online
+#define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map)
+#endif
+
+#ifndef _LINUX_RANDOM_H
+#include <linux/random.h>
+#endif
+
+#ifndef DECLARE_BITMAP
+#ifndef BITS_TO_LONGS
+#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
+#endif
+#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)]
+#endif
+
+#ifndef VLAN_HLEN
+#define VLAN_HLEN 4
+#endif
+
+#ifndef VLAN_ETH_HLEN
+#define VLAN_ETH_HLEN 18
+#endif
+
+#ifndef VLAN_ETH_FRAME_LEN
+#define VLAN_ETH_FRAME_LEN 1518
+#endif
+
+#if !defined(IXGBE_DCA) && !defined(IGB_DCA)
+#define dca_get_tag(b) 0
+#define dca_add_requester(a) -1
+#define dca_remove_requester(b) do { } while(0)
+#define DCA_PROVIDER_ADD 0x0001
+#define DCA_PROVIDER_REMOVE 0x0002
+#endif
+
+#ifndef DCA_GET_TAG_TWO_ARGS
+#define dca3_get_tag(a,b) dca_get_tag(b)
+#endif
+
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#if defined(__i386__) || defined(__x86_64__)
+#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#endif
+#endif
+
+/* taken from 2.6.24 definition in linux/kernel.h */
+#ifndef IS_ALIGNED
+#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0)
+#endif
+
+#ifdef IS_ENABLED
+#undef IS_ENABLED
+#undef __ARG_PLACEHOLDER_1
+#undef config_enabled
+#undef _config_enabled
+#undef __config_enabled
+#undef ___config_enabled
+#endif
+
+#define __ARG_PLACEHOLDER_1 0,
+#define config_enabled(cfg) _config_enabled(cfg)
+#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value)
+#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0)
+#define ___config_enabled(__ignored, val, ...) val
+
+#define IS_ENABLED(option) \
+ (config_enabled(option) || config_enabled(option##_MODULE))
+
+#if !defined(NETIF_F_HW_VLAN_TX) && !defined(NETIF_F_HW_VLAN_CTAG_TX)
+struct _kc_vlan_ethhdr {
+ unsigned char h_dest[ETH_ALEN];
+ unsigned char h_source[ETH_ALEN];
+ __be16 h_vlan_proto;
+ __be16 h_vlan_TCI;
+ __be16 h_vlan_encapsulated_proto;
+};
+#define vlan_ethhdr _kc_vlan_ethhdr
+struct _kc_vlan_hdr {
+ __be16 h_vlan_TCI;
+ __be16 h_vlan_encapsulated_proto;
+};
+#define vlan_hdr _kc_vlan_hdr
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+#define vlan_tx_tag_present(_skb) 0
+#define vlan_tx_tag_get(_skb) 0
+#endif
+#endif /* NETIF_F_HW_VLAN_TX && NETIF_F_HW_VLAN_CTAG_TX */
+
+#ifndef VLAN_PRIO_SHIFT
+#define VLAN_PRIO_SHIFT 13
+#endif
+
+
+#ifndef __GFP_COLD
+#define __GFP_COLD 0
+#endif
+
+#ifndef __GFP_COMP
+#define __GFP_COMP 0
+#endif
+
+/*****************************************************************************/
+/* Installations with ethtool version without eeprom, adapter id, or statistics
+ * support */
+
+#ifndef ETH_GSTRING_LEN
+#define ETH_GSTRING_LEN 32
+#endif
+
+#ifndef ETHTOOL_GSTATS
+#define ETHTOOL_GSTATS 0x1d
+#undef ethtool_drvinfo
+#define ethtool_drvinfo k_ethtool_drvinfo
+struct k_ethtool_drvinfo {
+ u32 cmd;
+ char driver[32];
+ char version[32];
+ char fw_version[32];
+ char bus_info[32];
+ char reserved1[32];
+ char reserved2[16];
+ u32 n_stats;
+ u32 testinfo_len;
+ u32 eedump_len;
+ u32 regdump_len;
+};
+
+struct ethtool_stats {
+ u32 cmd;
+ u32 n_stats;
+ u64 data[0];
+};
+#endif /* ETHTOOL_GSTATS */
+
+#ifndef ETHTOOL_PHYS_ID
+#define ETHTOOL_PHYS_ID 0x1c
+#endif /* ETHTOOL_PHYS_ID */
+
+#ifndef ETHTOOL_GSTRINGS
+#define ETHTOOL_GSTRINGS 0x1b
+enum ethtool_stringset {
+ ETH_SS_TEST = 0,
+ ETH_SS_STATS,
+};
+struct ethtool_gstrings {
+ u32 cmd; /* ETHTOOL_GSTRINGS */
+ u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/
+ u32 len; /* number of strings in the string set */
+ u8 data[0];
+};
+#endif /* ETHTOOL_GSTRINGS */
+
+#ifndef ETHTOOL_TEST
+#define ETHTOOL_TEST 0x1a
+enum ethtool_test_flags {
+ ETH_TEST_FL_OFFLINE = (1 << 0),
+ ETH_TEST_FL_FAILED = (1 << 1),
+};
+struct ethtool_test {
+ u32 cmd;
+ u32 flags;
+ u32 reserved;
+ u32 len;
+ u64 data[0];
+};
+#endif /* ETHTOOL_TEST */
+
+#ifndef ETHTOOL_GEEPROM
+#define ETHTOOL_GEEPROM 0xb
+#undef ETHTOOL_GREGS
+struct ethtool_eeprom {
+ u32 cmd;
+ u32 magic;
+ u32 offset;
+ u32 len;
+ u8 data[0];
+};
+
+struct ethtool_value {
+ u32 cmd;
+ u32 data;
+};
+#endif /* ETHTOOL_GEEPROM */
+
+#ifndef ETHTOOL_GLINK
+#define ETHTOOL_GLINK 0xa
+#endif /* ETHTOOL_GLINK */
+
+#ifndef ETHTOOL_GWOL
+#define ETHTOOL_GWOL 0x5
+#define ETHTOOL_SWOL 0x6
+#define SOPASS_MAX 6
+struct ethtool_wolinfo {
+ u32 cmd;
+ u32 supported;
+ u32 wolopts;
+ u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */
+};
+#endif /* ETHTOOL_GWOL */
+
+#ifndef ETHTOOL_GREGS
+#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */
+#define ethtool_regs _kc_ethtool_regs
+/* for passing big chunks of data */
+struct _kc_ethtool_regs {
+ u32 cmd;
+ u32 version; /* driver-specific, indicates different chips/revs */
+ u32 len; /* bytes */
+ u8 data[0];
+};
+#endif /* ETHTOOL_GREGS */
+
+#ifndef ETHTOOL_GMSGLVL
+#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */
+#endif
+#ifndef ETHTOOL_SMSGLVL
+#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */
+#endif
+#ifndef ETHTOOL_NWAY_RST
+#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */
+#endif
+#ifndef ETHTOOL_GLINK
+#define ETHTOOL_GLINK 0x0000000a /* Get link status */
+#endif
+#ifndef ETHTOOL_GEEPROM
+#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */
+#endif
+#ifndef ETHTOOL_SEEPROM
+#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */
+#endif
+#ifndef ETHTOOL_GCOALESCE
+#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */
+/* for configuring coalescing parameters of chip */
+#define ethtool_coalesce _kc_ethtool_coalesce
+struct _kc_ethtool_coalesce {
+ u32 cmd; /* ETHTOOL_{G,S}COALESCE */
+
+ /* How many usecs to delay an RX interrupt after
+ * a packet arrives. If 0, only rx_max_coalesced_frames
+ * is used.
+ */
+ u32 rx_coalesce_usecs;
+
+ /* How many packets to delay an RX interrupt after
+ * a packet arrives. If 0, only rx_coalesce_usecs is
+ * used. It is illegal to set both usecs and max frames
+ * to zero as this would cause RX interrupts to never be
+ * generated.
+ */
+ u32 rx_max_coalesced_frames;
+
+ /* Same as above two parameters, except that these values
+ * apply while an IRQ is being serviced by the host. Not
+ * all cards support this feature and the values are ignored
+ * in that case.
+ */
+ u32 rx_coalesce_usecs_irq;
+ u32 rx_max_coalesced_frames_irq;
+
+ /* How many usecs to delay a TX interrupt after
+ * a packet is sent. If 0, only tx_max_coalesced_frames
+ * is used.
+ */
+ u32 tx_coalesce_usecs;
+
+ /* How many packets to delay a TX interrupt after
+ * a packet is sent. If 0, only tx_coalesce_usecs is
+ * used. It is illegal to set both usecs and max frames
+ * to zero as this would cause TX interrupts to never be
+ * generated.
+ */
+ u32 tx_max_coalesced_frames;
+
+ /* Same as above two parameters, except that these values
+ * apply while an IRQ is being serviced by the host. Not
+ * all cards support this feature and the values are ignored
+ * in that case.
+ */
+ u32 tx_coalesce_usecs_irq;
+ u32 tx_max_coalesced_frames_irq;
+
+ /* How many usecs to delay in-memory statistics
+ * block updates. Some drivers do not have an in-memory
+ * statistic block, and in such cases this value is ignored.
+ * This value must not be zero.
+ */
+ u32 stats_block_coalesce_usecs;
+
+ /* Adaptive RX/TX coalescing is an algorithm implemented by
+ * some drivers to improve latency under low packet rates and
+ * improve throughput under high packet rates. Some drivers
+ * only implement one of RX or TX adaptive coalescing. Anything
+ * not implemented by the driver causes these values to be
+ * silently ignored.
+ */
+ u32 use_adaptive_rx_coalesce;
+ u32 use_adaptive_tx_coalesce;
+
+ /* When the packet rate (measured in packets per second)
+ * is below pkt_rate_low, the {rx,tx}_*_low parameters are
+ * used.
+ */
+ u32 pkt_rate_low;
+ u32 rx_coalesce_usecs_low;
+ u32 rx_max_coalesced_frames_low;
+ u32 tx_coalesce_usecs_low;
+ u32 tx_max_coalesced_frames_low;
+
+ /* When the packet rate is below pkt_rate_high but above
+ * pkt_rate_low (both measured in packets per second) the
+ * normal {rx,tx}_* coalescing parameters are used.
+ */
+
+ /* When the packet rate is (measured in packets per second)
+ * is above pkt_rate_high, the {rx,tx}_*_high parameters are
+ * used.
+ */
+ u32 pkt_rate_high;
+ u32 rx_coalesce_usecs_high;
+ u32 rx_max_coalesced_frames_high;
+ u32 tx_coalesce_usecs_high;
+ u32 tx_max_coalesced_frames_high;
+
+ /* How often to do adaptive coalescing packet rate sampling,
+ * measured in seconds. Must not be zero.
+ */
+ u32 rate_sample_interval;
+};
+#endif /* ETHTOOL_GCOALESCE */
+
+#ifndef ETHTOOL_SCOALESCE
+#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */
+#endif
+#ifndef ETHTOOL_GRINGPARAM
+#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */
+/* for configuring RX/TX ring parameters */
+#define ethtool_ringparam _kc_ethtool_ringparam
+struct _kc_ethtool_ringparam {
+ u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */
+
+ /* Read only attributes. These indicate the maximum number
+ * of pending RX/TX ring entries the driver will allow the
+ * user to set.
+ */
+ u32 rx_max_pending;
+ u32 rx_mini_max_pending;
+ u32 rx_jumbo_max_pending;
+ u32 tx_max_pending;
+
+ /* Values changeable by the user. The valid values are
+ * in the range 1 to the "*_max_pending" counterpart above.
+ */
+ u32 rx_pending;
+ u32 rx_mini_pending;
+ u32 rx_jumbo_pending;
+ u32 tx_pending;
+};
+#endif /* ETHTOOL_GRINGPARAM */
+
+#ifndef ETHTOOL_SRINGPARAM
+#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */
+#endif
+#ifndef ETHTOOL_GPAUSEPARAM
+#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */
+/* for configuring link flow control parameters */
+#define ethtool_pauseparam _kc_ethtool_pauseparam
+struct _kc_ethtool_pauseparam {
+ u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */
+
+ /* If the link is being auto-negotiated (via ethtool_cmd.autoneg
+ * being true) the user may set 'autoneg' here non-zero to have the
+ * pause parameters be auto-negotiated too. In such a case, the
+ * {rx,tx}_pause values below determine what capabilities are
+ * advertised.
+ *
+ * If 'autoneg' is zero or the link is not being auto-negotiated,
+ * then {rx,tx}_pause force the driver to use/not-use pause
+ * flow control.
+ */
+ u32 autoneg;
+ u32 rx_pause;
+ u32 tx_pause;
+};
+#endif /* ETHTOOL_GPAUSEPARAM */
+
+#ifndef ETHTOOL_SPAUSEPARAM
+#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */
+#endif
+#ifndef ETHTOOL_GRXCSUM
+#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_SRXCSUM
+#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_GTXCSUM
+#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_STXCSUM
+#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_GSG
+#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable
+ * (ethtool_value) */
+#endif
+#ifndef ETHTOOL_SSG
+#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable
+ * (ethtool_value). */
+#endif
+#ifndef ETHTOOL_TEST
+#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */
+#endif
+#ifndef ETHTOOL_GSTRINGS
+#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */
+#endif
+#ifndef ETHTOOL_PHYS_ID
+#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */
+#endif
+#ifndef ETHTOOL_GSTATS
+#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */
+#endif
+#ifndef ETHTOOL_GTSO
+#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_STSO
+#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */
+#endif
+
+#ifndef ETHTOOL_BUSINFO_LEN
+#define ETHTOOL_BUSINFO_LEN 32
+#endif
+
+#ifndef RHEL_RELEASE_VERSION
+#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b))
+#endif
+#ifndef AX_RELEASE_VERSION
+#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b))
+#endif
+
+#ifndef AX_RELEASE_CODE
+#define AX_RELEASE_CODE 0
+#endif
+
+#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,0))
+#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,0)
+#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,1))
+#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,1)
+#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,2))
+#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,3)
+#endif
+
+#ifndef RHEL_RELEASE_CODE
+/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */
+#define RHEL_RELEASE_CODE 0
+#endif
+
+/* SuSE version macro is the same as Linux kernel version */
+#ifndef SLE_VERSION
+#define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c)
+#endif
+#ifdef CONFIG_SUSE_KERNEL
+#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) )
+/* SLES11 GA is 2.6.27 based */
+#define SLE_VERSION_CODE SLE_VERSION(11,0,0)
+#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) )
+/* SLES11 SP1 is 2.6.32 based */
+#define SLE_VERSION_CODE SLE_VERSION(11,1,0)
+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,61)) && \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0)))
+/* SLES11 SP3 is at least 3.0.61+ based */
+#define SLE_VERSION_CODE SLE_VERSION(11,3,0)
+#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */
+#endif /* CONFIG_SUSE_KERNEL */
+#ifndef SLE_VERSION_CODE
+#define SLE_VERSION_CODE 0
+#endif /* SLE_VERSION_CODE */
+
+/* Ubuntu release and kernel codes must be specified from Makefile */
+#ifndef UBUNTU_RELEASE_VERSION
+#define UBUNTU_RELEASE_VERSION(a,b) (((a) * 100) + (b))
+#endif
+#ifndef UBUNTU_KERNEL_VERSION
+#define UBUNTU_KERNEL_VERSION(a,b,c,abi,upload) (((a) << 40) + ((b) << 32) + ((c) << 24) + ((abi) << 8) + (upload))
+#endif
+#ifndef UBUNTU_RELEASE_CODE
+#define UBUNTU_RELEASE_CODE 0
+#endif
+#ifndef UBUNTU_KERNEL_CODE
+#define UBUNTU_KERNEL_CODE 0
+#endif
+
+#ifdef __KLOCWORK__
+#ifdef ARRAY_SIZE
+#undef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+#endif /* __KLOCWORK__ */
+
+/*****************************************************************************/
+/* 2.4.3 => 2.4.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
+
+/**************************************/
+/* PCI DRIVER API */
+
+#ifndef pci_set_dma_mask
+#define pci_set_dma_mask _kc_pci_set_dma_mask
+extern int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask);
+#endif
+
+#ifndef pci_request_regions
+#define pci_request_regions _kc_pci_request_regions
+extern int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name);
+#endif
+
+#ifndef pci_release_regions
+#define pci_release_regions _kc_pci_release_regions
+extern void _kc_pci_release_regions(struct pci_dev *pdev);
+#endif
+
+/**************************************/
+/* NETWORK DRIVER API */
+
+#ifndef alloc_etherdev
+#define alloc_etherdev _kc_alloc_etherdev
+extern struct net_device * _kc_alloc_etherdev(int sizeof_priv);
+#endif
+
+#ifndef is_valid_ether_addr
+#define is_valid_ether_addr _kc_is_valid_ether_addr
+extern int _kc_is_valid_ether_addr(u8 *addr);
+#endif
+
+/**************************************/
+/* MISCELLANEOUS */
+
+#ifndef INIT_TQUEUE
+#define INIT_TQUEUE(_tq, _routine, _data) \
+ do { \
+ INIT_LIST_HEAD(&(_tq)->list); \
+ (_tq)->sync = 0; \
+ (_tq)->routine = _routine; \
+ (_tq)->data = _data; \
+ } while (0)
+#endif
+
+#endif /* 2.4.3 => 2.4.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) )
+/* Generic MII registers. */
+#define MII_BMCR 0x00 /* Basic mode control register */
+#define MII_BMSR 0x01 /* Basic mode status register */
+#define MII_PHYSID1 0x02 /* PHYS ID 1 */
+#define MII_PHYSID2 0x03 /* PHYS ID 2 */
+#define MII_ADVERTISE 0x04 /* Advertisement control reg */
+#define MII_LPA 0x05 /* Link partner ability reg */
+#define MII_EXPANSION 0x06 /* Expansion register */
+/* Basic mode control register. */
+#define BMCR_FULLDPLX 0x0100 /* Full duplex */
+#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */
+/* Basic mode status register. */
+#define BMSR_ERCAP 0x0001 /* Ext-reg capability */
+#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */
+#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */
+#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */
+#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */
+#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */
+/* Advertisement control register. */
+#define ADVERTISE_CSMA 0x0001 /* Only selector supported */
+#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */
+#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
+#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
+#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
+#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \
+ ADVERTISE_100HALF | ADVERTISE_100FULL)
+/* Expansion register for auto-negotiation. */
+#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */
+#endif
+
+/*****************************************************************************/
+/* 2.4.6 => 2.4.3 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
+
+#ifndef pci_set_power_state
+#define pci_set_power_state _kc_pci_set_power_state
+extern int _kc_pci_set_power_state(struct pci_dev *dev, int state);
+#endif
+
+#ifndef pci_enable_wake
+#define pci_enable_wake _kc_pci_enable_wake
+extern int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable);
+#endif
+
+#ifndef pci_disable_device
+#define pci_disable_device _kc_pci_disable_device
+extern void _kc_pci_disable_device(struct pci_dev *pdev);
+#endif
+
+/* PCI PM entry point syntax changed, so don't support suspend/resume */
+#undef CONFIG_PM
+
+#endif /* 2.4.6 => 2.4.3 */
+
+#ifndef HAVE_PCI_SET_MWI
+#define pci_set_mwi(X) pci_write_config_word(X, \
+ PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \
+ PCI_COMMAND_INVALIDATE);
+#define pci_clear_mwi(X) pci_write_config_word(X, \
+ PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \
+ ~PCI_COMMAND_INVALIDATE);
+#endif
+
+/*****************************************************************************/
+/* 2.4.10 => 2.4.9 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) )
+
+/**************************************/
+/* MODULE API */
+
+#ifndef MODULE_LICENSE
+ #define MODULE_LICENSE(X)
+#endif
+
+/**************************************/
+/* OTHER */
+
+#undef min
+#define min(x,y) ({ \
+ const typeof(x) _x = (x); \
+ const typeof(y) _y = (y); \
+ (void) (&_x == &_y); \
+ _x < _y ? _x : _y; })
+
+#undef max
+#define max(x,y) ({ \
+ const typeof(x) _x = (x); \
+ const typeof(y) _y = (y); \
+ (void) (&_x == &_y); \
+ _x > _y ? _x : _y; })
+
+#define min_t(type,x,y) ({ \
+ type _x = (x); \
+ type _y = (y); \
+ _x < _y ? _x : _y; })
+
+#define max_t(type,x,y) ({ \
+ type _x = (x); \
+ type _y = (y); \
+ _x > _y ? _x : _y; })
+
+#ifndef list_for_each_safe
+#define list_for_each_safe(pos, n, head) \
+ for (pos = (head)->next, n = pos->next; pos != (head); \
+ pos = n, n = pos->next)
+#endif
+
+#ifndef ____cacheline_aligned_in_smp
+#ifdef CONFIG_SMP
+#define ____cacheline_aligned_in_smp ____cacheline_aligned
+#else
+#define ____cacheline_aligned_in_smp
+#endif /* CONFIG_SMP */
+#endif
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) )
+extern int _kc_snprintf(char * buf, size_t size, const char *fmt, ...);
+#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args)
+extern int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
+#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args)
+#else /* 2.4.8 => 2.4.9 */
+extern int snprintf(char * buf, size_t size, const char *fmt, ...);
+extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
+#endif
+#endif /* 2.4.10 -> 2.4.6 */
+
+
+/*****************************************************************************/
+/* 2.4.12 => 2.4.10 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,12) )
+#ifndef HAVE_NETIF_MSG
+#define HAVE_NETIF_MSG 1
+enum {
+ NETIF_MSG_DRV = 0x0001,
+ NETIF_MSG_PROBE = 0x0002,
+ NETIF_MSG_LINK = 0x0004,
+ NETIF_MSG_TIMER = 0x0008,
+ NETIF_MSG_IFDOWN = 0x0010,
+ NETIF_MSG_IFUP = 0x0020,
+ NETIF_MSG_RX_ERR = 0x0040,
+ NETIF_MSG_TX_ERR = 0x0080,
+ NETIF_MSG_TX_QUEUED = 0x0100,
+ NETIF_MSG_INTR = 0x0200,
+ NETIF_MSG_TX_DONE = 0x0400,
+ NETIF_MSG_RX_STATUS = 0x0800,
+ NETIF_MSG_PKTDATA = 0x1000,
+ NETIF_MSG_HW = 0x2000,
+ NETIF_MSG_WOL = 0x4000,
+};
+
+#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
+#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
+#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
+#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
+#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
+#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
+#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
+#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
+#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
+#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
+#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
+#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
+#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
+#endif /* !HAVE_NETIF_MSG */
+#endif /* 2.4.12 => 2.4.10 */
+
+/*****************************************************************************/
+/* 2.4.13 => 2.4.12 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
+
+/**************************************/
+/* PCI DMA MAPPING */
+
+#ifndef virt_to_page
+ #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT))
+#endif
+
+#ifndef pci_map_page
+#define pci_map_page _kc_pci_map_page
+extern u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction);
+#endif
+
+#ifndef pci_unmap_page
+#define pci_unmap_page _kc_pci_unmap_page
+extern void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction);
+#endif
+
+/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */
+
+#undef DMA_32BIT_MASK
+#define DMA_32BIT_MASK 0xffffffff
+#undef DMA_64BIT_MASK
+#define DMA_64BIT_MASK 0xffffffff
+
+/**************************************/
+/* OTHER */
+
+#ifndef cpu_relax
+#define cpu_relax() rep_nop()
+#endif
+
+struct vlan_ethhdr {
+ unsigned char h_dest[ETH_ALEN];
+ unsigned char h_source[ETH_ALEN];
+ unsigned short h_vlan_proto;
+ unsigned short h_vlan_TCI;
+ unsigned short h_vlan_encapsulated_proto;
+};
+#endif /* 2.4.13 => 2.4.12 */
+
+/*****************************************************************************/
+/* 2.4.17 => 2.4.12 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) )
+
+#ifndef __devexit_p
+ #define __devexit_p(x) &(x)
+#endif
+
+#else
+ /* For Kernel 3.8 these are not defined - so undefine all */
+ #undef __devexit_p
+ #undef __devexit
+ #undef __devinit
+ #undef __devinitdata
+ #define __devexit_p(x) &(x)
+ #define __devexit
+ #define __devinit
+ #define __devinitdata
+
+#endif /* 2.4.17 => 2.4.13 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) )
+#define NETIF_MSG_HW 0x2000
+#define NETIF_MSG_WOL 0x4000
+
+#ifndef netif_msg_hw
+#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
+#endif
+#ifndef netif_msg_wol
+#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
+#endif
+#endif /* 2.4.18 */
+
+/*****************************************************************************/
+
+/*****************************************************************************/
+/* 2.4.20 => 2.4.19 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) )
+
+/* we won't support NAPI on less than 2.4.20 */
+#ifdef NAPI
+#undef NAPI
+#endif
+
+#endif /* 2.4.20 => 2.4.19 */
+
+/*****************************************************************************/
+/* 2.4.22 => 2.4.17 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
+#define pci_name(x) ((x)->slot_name)
+
+#ifndef SUPPORTED_10000baseT_Full
+#define SUPPORTED_10000baseT_Full (1 << 12)
+#endif
+#ifndef ADVERTISED_10000baseT_Full
+#define ADVERTISED_10000baseT_Full (1 << 12)
+#endif
+#endif
+
+/*****************************************************************************/
+/* 2.4.22 => 2.4.17 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
+#ifndef IGB_NO_LRO
+#define IGB_NO_LRO
+#endif
+#endif
+
+/*****************************************************************************/
+/*****************************************************************************/
+/* 2.4.23 => 2.4.22 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) )
+/*****************************************************************************/
+#ifdef NAPI
+#ifndef netif_poll_disable
+#define netif_poll_disable(x) _kc_netif_poll_disable(x)
+static inline void _kc_netif_poll_disable(struct net_device *netdev)
+{
+ while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) {
+ /* No hurry */
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(1);
+ }
+}
+#endif
+#ifndef netif_poll_enable
+#define netif_poll_enable(x) _kc_netif_poll_enable(x)
+static inline void _kc_netif_poll_enable(struct net_device *netdev)
+{
+ clear_bit(__LINK_STATE_RX_SCHED, &netdev->state);
+}
+#endif
+#endif /* NAPI */
+#ifndef netif_tx_disable
+#define netif_tx_disable(x) _kc_netif_tx_disable(x)
+static inline void _kc_netif_tx_disable(struct net_device *dev)
+{
+ spin_lock_bh(&dev->xmit_lock);
+ netif_stop_queue(dev);
+ spin_unlock_bh(&dev->xmit_lock);
+}
+#endif
+#else /* 2.4.23 => 2.4.22 */
+#define HAVE_SCTP
+#endif /* 2.4.23 => 2.4.22 */
+
+/*****************************************************************************/
+/* 2.6.4 => 2.6.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \
+ ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
+ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) )
+#define ETHTOOL_OPS_COMPAT
+#endif /* 2.6.4 => 2.6.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) )
+#define __user
+#endif /* < 2.4.27 */
+
+/*****************************************************************************/
+/* 2.5.71 => 2.4.x */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) )
+#define sk_protocol protocol
+#define pci_get_device pci_find_device
+#endif /* 2.5.70 => 2.4.x */
+
+/*****************************************************************************/
+/* < 2.4.27 or 2.6.0 <= 2.6.5 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \
+ ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
+ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) )
+
+#ifndef netif_msg_init
+#define netif_msg_init _kc_netif_msg_init
+static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits)
+{
+ /* use default */
+ if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
+ return default_msg_enable_bits;
+ if (debug_value == 0) /* no output */
+ return 0;
+ /* set low N bits */
+ return (1 << debug_value) -1;
+}
+#endif
+
+#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */
+/*****************************************************************************/
+#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \
+ (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \
+ ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )))
+#define netdev_priv(x) x->priv
+#endif
+
+/*****************************************************************************/
+/* <= 2.5.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) )
+#include <linux/rtnetlink.h>
+#undef pci_register_driver
+#define pci_register_driver pci_module_init
+
+/*
+ * Most of the dma compat code is copied/modifed from the 2.4.37
+ * /include/linux/libata-compat.h header file
+ */
+/* These definitions mirror those in pci.h, so they can be used
+ * interchangeably with their PCI_ counterparts */
+enum dma_data_direction {
+ DMA_BIDIRECTIONAL = 0,
+ DMA_TO_DEVICE = 1,
+ DMA_FROM_DEVICE = 2,
+ DMA_NONE = 3,
+};
+
+struct device {
+ struct pci_dev pdev;
+};
+
+static inline struct pci_dev *to_pci_dev (struct device *dev)
+{
+ return (struct pci_dev *) dev;
+}
+static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
+{
+ return (struct device *) pdev;
+}
+
+#define pdev_printk(lvl, pdev, fmt, args...) \
+ printk("%s %s: " fmt, lvl, pci_name(pdev), ## args)
+#define dev_err(dev, fmt, args...) \
+ pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args)
+#define dev_info(dev, fmt, args...) \
+ pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args)
+#define dev_warn(dev, fmt, args...) \
+ pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args)
+#define dev_notice(dev, fmt, args...) \
+ pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args)
+#define dev_dbg(dev, fmt, args...) \
+ pdev_printk(KERN_DEBUG, to_pci_dev(dev), fmt, ## args)
+
+/* NOTE: dangerous! we ignore the 'gfp' argument */
+#define dma_alloc_coherent(dev,sz,dma,gfp) \
+ pci_alloc_consistent(to_pci_dev(dev),(sz),(dma))
+#define dma_free_coherent(dev,sz,addr,dma_addr) \
+ pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr))
+
+#define dma_map_page(dev,a,b,c,d) \
+ pci_map_page(to_pci_dev(dev),(a),(b),(c),(d))
+#define dma_unmap_page(dev,a,b,c) \
+ pci_unmap_page(to_pci_dev(dev),(a),(b),(c))
+
+#define dma_map_single(dev,a,b,c) \
+ pci_map_single(to_pci_dev(dev),(a),(b),(c))
+#define dma_unmap_single(dev,a,b,c) \
+ pci_unmap_single(to_pci_dev(dev),(a),(b),(c))
+
+#define dma_map_sg(dev, sg, nents, dir) \
+ pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir)
+#define dma_unmap_sg(dev, sg, nents, dir) \
+ pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir)
+
+#define dma_sync_single(dev,a,b,c) \
+ pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c))
+
+/* for range just sync everything, that's all the pci API can do */
+#define dma_sync_single_range(dev,addr,off,sz,dir) \
+ pci_dma_sync_single(to_pci_dev(dev),(addr),(off)+(sz),(dir))
+
+#define dma_set_mask(dev,mask) \
+ pci_set_dma_mask(to_pci_dev(dev),(mask))
+
+/* hlist_* code - double linked lists */
+struct hlist_head {
+ struct hlist_node *first;
+};
+
+struct hlist_node {
+ struct hlist_node *next, **pprev;
+};
+
+static inline void __hlist_del(struct hlist_node *n)
+{
+ struct hlist_node *next = n->next;
+ struct hlist_node **pprev = n->pprev;
+ *pprev = next;
+ if (next)
+ next->pprev = pprev;
+}
+
+static inline void hlist_del(struct hlist_node *n)
+{
+ __hlist_del(n);
+ n->next = NULL;
+ n->pprev = NULL;
+}
+
+static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
+{
+ struct hlist_node *first = h->first;
+ n->next = first;
+ if (first)
+ first->pprev = &n->next;
+ h->first = n;
+ n->pprev = &h->first;
+}
+
+static inline int hlist_empty(const struct hlist_head *h)
+{
+ return !h->first;
+}
+#define HLIST_HEAD_INIT { .first = NULL }
+#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
+#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
+static inline void INIT_HLIST_NODE(struct hlist_node *h)
+{
+ h->next = NULL;
+ h->pprev = NULL;
+}
+
+#ifndef might_sleep
+#define might_sleep()
+#endif
+#else
+static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
+{
+ return &pdev->dev;
+}
+#endif /* <= 2.5.0 */
+
+/*****************************************************************************/
+/* 2.5.28 => 2.4.23 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
+
+#include <linux/tqueue.h>
+#define work_struct tq_struct
+#undef INIT_WORK
+#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a)
+#undef container_of
+#define container_of list_entry
+#define schedule_work schedule_task
+#define flush_scheduled_work flush_scheduled_tasks
+#define cancel_work_sync(x) flush_scheduled_work()
+
+#endif /* 2.5.28 => 2.4.17 */
+
+/*****************************************************************************/
+/* 2.6.0 => 2.5.28 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+#ifndef read_barrier_depends
+#define read_barrier_depends() rmb()
+#endif
+
+#undef get_cpu
+#define get_cpu() smp_processor_id()
+#undef put_cpu
+#define put_cpu() do { } while(0)
+#define MODULE_INFO(version, _version)
+#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
+#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1
+#endif
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1
+#endif
+
+#define dma_set_coherent_mask(dev,mask) 1
+
+#undef dev_put
+#define dev_put(dev) __dev_put(dev)
+
+#ifndef skb_fill_page_desc
+#define skb_fill_page_desc _kc_skb_fill_page_desc
+extern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size);
+#endif
+
+#undef ALIGN
+#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
+
+#ifndef page_count
+#define page_count(p) atomic_read(&(p)->count)
+#endif
+
+#ifdef MAX_NUMNODES
+#undef MAX_NUMNODES
+#endif
+#define MAX_NUMNODES 1
+
+/* find_first_bit and find_next bit are not defined for most
+ * 2.4 kernels (except for the redhat 2.4.21 kernels
+ */
+#include <linux/bitops.h>
+#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
+#undef find_next_bit
+#define find_next_bit _kc_find_next_bit
+extern unsigned long _kc_find_next_bit(const unsigned long *addr,
+ unsigned long size,
+ unsigned long offset);
+#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
+
+
+#ifndef netdev_name
+static inline const char *_kc_netdev_name(const struct net_device *dev)
+{
+ if (strchr(dev->name, '%'))
+ return "(unregistered net_device)";
+ return dev->name;
+}
+#define netdev_name(netdev) _kc_netdev_name(netdev)
+#endif /* netdev_name */
+
+#ifndef strlcpy
+#define strlcpy _kc_strlcpy
+extern size_t _kc_strlcpy(char *dest, const char *src, size_t size);
+#endif /* strlcpy */
+
+#ifndef do_div
+#if BITS_PER_LONG == 64
+# define do_div(n,base) ({ \
+ uint32_t __base = (base); \
+ uint32_t __rem; \
+ __rem = ((uint64_t)(n)) % __base; \
+ (n) = ((uint64_t)(n)) / __base; \
+ __rem; \
+ })
+#elif BITS_PER_LONG == 32
+extern uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor);
+# define do_div(n,base) ({ \
+ uint32_t __base = (base); \
+ uint32_t __rem; \
+ if (likely(((n) >> 32) == 0)) { \
+ __rem = (uint32_t)(n) % __base; \
+ (n) = (uint32_t)(n) / __base; \
+ } else \
+ __rem = _kc__div64_32(&(n), __base); \
+ __rem; \
+ })
+#else /* BITS_PER_LONG == ?? */
+# error do_div() does not yet support the C64
+#endif /* BITS_PER_LONG */
+#endif /* do_div */
+
+#ifndef NSEC_PER_SEC
+#define NSEC_PER_SEC 1000000000L
+#endif
+
+#undef HAVE_I2C_SUPPORT
+#else /* 2.6.0 */
+#if IS_ENABLED(CONFIG_I2C_ALGOBIT) && \
+ (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,9)))
+#define HAVE_I2C_SUPPORT
+#endif /* IS_ENABLED(CONFIG_I2C_ALGOBIT) */
+
+#endif /* 2.6.0 => 2.5.28 */
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )
+#define dma_pool pci_pool
+#define dma_pool_destroy pci_pool_destroy
+#define dma_pool_alloc pci_pool_alloc
+#define dma_pool_free pci_pool_free
+
+#define dma_pool_create(name,dev,size,align,allocation) \
+ pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation))
+#endif /* < 2.6.3 */
+
+/*****************************************************************************/
+/* 2.6.4 => 2.6.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
+#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
+#endif /* 2.6.4 => 2.6.0 */
+
+/*****************************************************************************/
+/* 2.6.5 => 2.6.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) )
+#define dma_sync_single_for_cpu dma_sync_single
+#define dma_sync_single_for_device dma_sync_single
+#define dma_sync_single_range_for_cpu dma_sync_single_range
+#define dma_sync_single_range_for_device dma_sync_single_range
+#ifndef pci_dma_mapping_error
+#define pci_dma_mapping_error _kc_pci_dma_mapping_error
+static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr)
+{
+ return dma_addr == 0;
+}
+#endif
+#endif /* 2.6.5 => 2.6.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
+extern int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...);
+#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args)
+#endif /* < 2.6.4 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) )
+/* taken from 2.6 include/linux/bitmap.h */
+#undef bitmap_zero
+#define bitmap_zero _kc_bitmap_zero
+static inline void _kc_bitmap_zero(unsigned long *dst, int nbits)
+{
+ if (nbits <= BITS_PER_LONG)
+ *dst = 0UL;
+ else {
+ int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ memset(dst, 0, len);
+ }
+}
+#define random_ether_addr _kc_random_ether_addr
+static inline void _kc_random_ether_addr(u8 *addr)
+{
+ get_random_bytes(addr, ETH_ALEN);
+ addr[0] &= 0xfe; /* clear multicast */
+ addr[0] |= 0x02; /* set local assignment */
+}
+#define page_to_nid(x) 0
+
+#endif /* < 2.6.6 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) )
+#undef if_mii
+#define if_mii _kc_if_mii
+static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq)
+{
+ return (struct mii_ioctl_data *) &rq->ifr_ifru;
+}
+
+#ifndef __force
+#define __force
+#endif
+#endif /* < 2.6.7 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
+#ifndef PCI_EXP_DEVCTL
+#define PCI_EXP_DEVCTL 8
+#endif
+#ifndef PCI_EXP_DEVCTL_CERE
+#define PCI_EXP_DEVCTL_CERE 0x0001
+#endif
+#define PCI_EXP_FLAGS 2 /* Capabilities register */
+#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */
+#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */
+#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */
+#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */
+#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */
+#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */
+#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
+#define PCI_EXP_DEVCAP 4 /* Device capabilities */
+#define PCI_EXP_DEVSTA 10 /* Device Status */
+#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \
+ schedule_timeout((x * HZ)/1000 + 2); \
+ } while (0)
+
+#endif /* < 2.6.8 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9))
+#include <net/dsfield.h>
+#define __iomem
+
+#ifndef kcalloc
+#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags)
+extern void *_kc_kzalloc(size_t size, int flags);
+#endif
+#define MSEC_PER_SEC 1000L
+static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j)
+{
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+ return (MSEC_PER_SEC / HZ) * j;
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+ return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
+#else
+ return (j * MSEC_PER_SEC) / HZ;
+#endif
+}
+static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m)
+{
+ if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET))
+ return MAX_JIFFY_OFFSET;
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+ return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+ return m * (HZ / MSEC_PER_SEC);
+#else
+ return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
+#endif
+}
+
+#define msleep_interruptible _kc_msleep_interruptible
+static inline unsigned long _kc_msleep_interruptible(unsigned int msecs)
+{
+ unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1;
+
+ while (timeout && !signal_pending(current)) {
+ __set_current_state(TASK_INTERRUPTIBLE);
+ timeout = schedule_timeout(timeout);
+ }
+ return _kc_jiffies_to_msecs(timeout);
+}
+
+/* Basic mode control register. */
+#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */
+
+#ifndef __le16
+#define __le16 u16
+#endif
+#ifndef __le32
+#define __le32 u32
+#endif
+#ifndef __le64
+#define __le64 u64
+#endif
+#ifndef __be16
+#define __be16 u16
+#endif
+#ifndef __be32
+#define __be32 u32
+#endif
+#ifndef __be64
+#define __be64 u64
+#endif
+
+static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
+{
+ return (struct vlan_ethhdr *)skb->mac.raw;
+}
+
+/* Wake-On-Lan options. */
+#define WAKE_PHY (1 << 0)
+#define WAKE_UCAST (1 << 1)
+#define WAKE_MCAST (1 << 2)
+#define WAKE_BCAST (1 << 3)
+#define WAKE_ARP (1 << 4)
+#define WAKE_MAGIC (1 << 5)
+#define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */
+
+#define skb_header_pointer _kc_skb_header_pointer
+static inline void *_kc_skb_header_pointer(const struct sk_buff *skb,
+ int offset, int len, void *buffer)
+{
+ int hlen = skb_headlen(skb);
+
+ if (hlen - offset >= len)
+ return skb->data + offset;
+
+#ifdef MAX_SKB_FRAGS
+ if (skb_copy_bits(skb, offset, buffer, len) < 0)
+ return NULL;
+
+ return buffer;
+#else
+ return NULL;
+#endif
+
+#ifndef NETDEV_TX_OK
+#define NETDEV_TX_OK 0
+#endif
+#ifndef NETDEV_TX_BUSY
+#define NETDEV_TX_BUSY 1
+#endif
+#ifndef NETDEV_TX_LOCKED
+#define NETDEV_TX_LOCKED -1
+#endif
+}
+
+#ifndef __bitwise
+#define __bitwise
+#endif
+#endif /* < 2.6.9 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
+#ifdef module_param_array_named
+#undef module_param_array_named
+#define module_param_array_named(name, array, type, nump, perm) \
+ static struct kparam_array __param_arr_##name \
+ = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \
+ sizeof(array[0]), array }; \
+ module_param_call(name, param_array_set, param_array_get, \
+ &__param_arr_##name, perm)
+#endif /* module_param_array_named */
+/*
+ * num_online is broken for all < 2.6.10 kernels. This is needed to support
+ * Node module parameter of ixgbe.
+ */
+#undef num_online_nodes
+#define num_online_nodes(n) 1
+extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES);
+#undef node_online_map
+#define node_online_map _kcompat_node_online_map
+#define pci_get_class pci_find_class
+#endif /* < 2.6.10 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) )
+#define PCI_D0 0
+#define PCI_D1 1
+#define PCI_D2 2
+#define PCI_D3hot 3
+#define PCI_D3cold 4
+typedef int pci_power_t;
+#define pci_choose_state(pdev,state) state
+#define PMSG_SUSPEND 3
+#define PCI_EXP_LNKCTL 16
+
+#undef NETIF_F_LLTX
+
+#ifndef ARCH_HAS_PREFETCH
+#define prefetch(X)
+#endif
+
+#ifndef NET_IP_ALIGN
+#define NET_IP_ALIGN 2
+#endif
+
+#define KC_USEC_PER_SEC 1000000L
+#define usecs_to_jiffies _kc_usecs_to_jiffies
+static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j)
+{
+#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
+ return (KC_USEC_PER_SEC / HZ) * j;
+#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
+ return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC);
+#else
+ return (j * KC_USEC_PER_SEC) / HZ;
+#endif
+}
+static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m)
+{
+ if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET))
+ return MAX_JIFFY_OFFSET;
+#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
+ return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ);
+#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
+ return m * (HZ / KC_USEC_PER_SEC);
+#else
+ return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC;
+#endif
+}
+
+#define PCI_EXP_LNKCAP 12 /* Link Capabilities */
+#define PCI_EXP_LNKSTA 18 /* Link Status */
+#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */
+#define PCI_EXP_SLTCTL 24 /* Slot Control */
+#define PCI_EXP_SLTSTA 26 /* Slot Status */
+#define PCI_EXP_RTCTL 28 /* Root Control */
+#define PCI_EXP_RTCAP 30 /* Root Capabilities */
+#define PCI_EXP_RTSTA 32 /* Root Status */
+#endif /* < 2.6.11 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) )
+#include <linux/reboot.h>
+#define USE_REBOOT_NOTIFIER
+
+/* Generic MII registers. */
+#define MII_CTRL1000 0x09 /* 1000BASE-T control */
+#define MII_STAT1000 0x0a /* 1000BASE-T status */
+/* Advertisement control register. */
+#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */
+#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */
+/* Link partner ability register. */
+#define LPA_PAUSE_CAP 0x0400 /* Can pause */
+#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */
+/* 1000BASE-T Control register */
+#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */
+#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */
+/* 1000BASE-T Status register */
+#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */
+#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */
+
+#ifndef is_zero_ether_addr
+#define is_zero_ether_addr _kc_is_zero_ether_addr
+static inline int _kc_is_zero_ether_addr(const u8 *addr)
+{
+ return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
+}
+#endif /* is_zero_ether_addr */
+#ifndef is_multicast_ether_addr
+#define is_multicast_ether_addr _kc_is_multicast_ether_addr
+static inline int _kc_is_multicast_ether_addr(const u8 *addr)
+{
+ return addr[0] & 0x01;
+}
+#endif /* is_multicast_ether_addr */
+#endif /* < 2.6.12 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) )
+#ifndef kstrdup
+#define kstrdup _kc_kstrdup
+extern char *_kc_kstrdup(const char *s, unsigned int gfp);
+#endif
+#endif /* < 2.6.13 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
+#define pm_message_t u32
+#ifndef kzalloc
+#define kzalloc _kc_kzalloc
+extern void *_kc_kzalloc(size_t size, int flags);
+#endif
+
+/* Generic MII registers. */
+#define MII_ESTATUS 0x0f /* Extended Status */
+/* Basic mode status register. */
+#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */
+/* Extended status register. */
+#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */
+#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */
+
+#define SUPPORTED_Pause (1 << 13)
+#define SUPPORTED_Asym_Pause (1 << 14)
+#define ADVERTISED_Pause (1 << 13)
+#define ADVERTISED_Asym_Pause (1 << 14)
+
+#if (!(RHEL_RELEASE_CODE && \
+ (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))))
+#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t))
+#define gfp_t unsigned
+#else
+typedef unsigned gfp_t;
+#endif
+#endif /* !RHEL4.3->RHEL5.0 */
+
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) )
+#ifdef CONFIG_X86_64
+#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir) \
+ dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir))
+#define dma_sync_single_range_for_device(dev, addr, off, sz, dir) \
+ dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir))
+#endif
+#endif
+#endif /* < 2.6.14 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) )
+#ifndef vmalloc_node
+#define vmalloc_node(a,b) vmalloc(a)
+#endif /* vmalloc_node*/
+
+#define setup_timer(_timer, _function, _data) \
+do { \
+ (_timer)->function = _function; \
+ (_timer)->data = _data; \
+ init_timer(_timer); \
+} while (0)
+#ifndef device_can_wakeup
+#define device_can_wakeup(dev) (1)
+#endif
+#ifndef device_set_wakeup_enable
+#define device_set_wakeup_enable(dev, val) do{}while(0)
+#endif
+#ifndef device_init_wakeup
+#define device_init_wakeup(dev,val) do {} while (0)
+#endif
+static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2)
+{
+ const u16 *a = (const u16 *) addr1;
+ const u16 *b = (const u16 *) addr2;
+
+ return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
+}
+#undef compare_ether_addr
+#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2)
+#endif /* < 2.6.15 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) )
+#undef DEFINE_MUTEX
+#define DEFINE_MUTEX(x) DECLARE_MUTEX(x)
+#define mutex_lock(x) down_interruptible(x)
+#define mutex_unlock(x) up(x)
+
+#ifndef ____cacheline_internodealigned_in_smp
+#ifdef CONFIG_SMP
+#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp
+#else
+#define ____cacheline_internodealigned_in_smp
+#endif /* CONFIG_SMP */
+#endif /* ____cacheline_internodealigned_in_smp */
+#undef HAVE_PCI_ERS
+#else /* 2.6.16 and above */
+#undef HAVE_PCI_ERS
+#define HAVE_PCI_ERS
+#if ( SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(10,4,0) )
+#ifdef device_can_wakeup
+#undef device_can_wakeup
+#endif /* device_can_wakeup */
+#define device_can_wakeup(dev) 1
+#endif /* SLE_VERSION(10,4,0) */
+#endif /* < 2.6.16 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) )
+#ifndef dev_notice
+#define dev_notice(dev, fmt, args...) \
+ dev_printk(KERN_NOTICE, dev, fmt, ## args)
+#endif
+
+#ifndef first_online_node
+#define first_online_node 0
+#endif
+#ifndef NET_SKB_PAD
+#define NET_SKB_PAD 16
+#endif
+#endif /* < 2.6.17 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) )
+
+#ifndef IRQ_HANDLED
+#define irqreturn_t void
+#define IRQ_HANDLED
+#define IRQ_NONE
+#endif
+
+#ifndef IRQF_PROBE_SHARED
+#ifdef SA_PROBEIRQ
+#define IRQF_PROBE_SHARED SA_PROBEIRQ
+#else
+#define IRQF_PROBE_SHARED 0
+#endif
+#endif
+
+#ifndef IRQF_SHARED
+#define IRQF_SHARED SA_SHIRQ
+#endif
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+#ifndef FIELD_SIZEOF
+#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
+#endif
+
+#ifndef skb_is_gso
+#ifdef NETIF_F_TSO
+#define skb_is_gso _kc_skb_is_gso
+static inline int _kc_skb_is_gso(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->gso_size;
+}
+#else
+#define skb_is_gso(a) 0
+#endif
+#endif
+
+#ifndef resource_size_t
+#define resource_size_t unsigned long
+#endif
+
+#ifdef skb_pad
+#undef skb_pad
+#endif
+#define skb_pad(x,y) _kc_skb_pad(x, y)
+int _kc_skb_pad(struct sk_buff *skb, int pad);
+#ifdef skb_padto
+#undef skb_padto
+#endif
+#define skb_padto(x,y) _kc_skb_padto(x, y)
+static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len)
+{
+ unsigned int size = skb->len;
+ if(likely(size >= len))
+ return 0;
+ return _kc_skb_pad(skb, len - size);
+}
+
+#ifndef DECLARE_PCI_UNMAP_ADDR
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
+ dma_addr_t ADDR_NAME
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
+ u32 LEN_NAME
+#define pci_unmap_addr(PTR, ADDR_NAME) \
+ ((PTR)->ADDR_NAME)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
+ (((PTR)->ADDR_NAME) = (VAL))
+#define pci_unmap_len(PTR, LEN_NAME) \
+ ((PTR)->LEN_NAME)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
+ (((PTR)->LEN_NAME) = (VAL))
+#endif /* DECLARE_PCI_UNMAP_ADDR */
+#endif /* < 2.6.18 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,0)))
+#define i_private u.generic_ip
+#endif /* >= RHEL 5.0 */
+
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+#endif
+#ifndef __ALIGN_MASK
+#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
+#endif
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) )
+#if (!((RHEL_RELEASE_CODE && \
+ ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \
+ (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0))))))
+typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *);
+#endif
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
+#undef CONFIG_INET_LRO
+#undef CONFIG_INET_LRO_MODULE
+#ifdef IXGBE_FCOE
+#undef CONFIG_FCOE
+#undef CONFIG_FCOE_MODULE
+#endif /* IXGBE_FCOE */
+#endif
+typedef irqreturn_t (*new_handler_t)(int, void*);
+static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
+#else /* 2.4.x */
+typedef void (*irq_handler_t)(int, void*, struct pt_regs *);
+typedef void (*new_handler_t)(int, void*);
+static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
+#endif /* >= 2.5.x */
+{
+ irq_handler_t new_handler = (irq_handler_t) handler;
+ return request_irq(irq, new_handler, flags, devname, dev_id);
+}
+
+#undef request_irq
+#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id))
+
+#define irq_handler_t new_handler_t
+/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))
+#define PCIE_CONFIG_SPACE_LEN 256
+#define PCI_CONFIG_SPACE_LEN 64
+#define PCIE_LINK_STATUS 0x12
+#define pci_config_space_ich8lan() do {} while(0)
+#undef pci_save_state
+extern int _kc_pci_save_state(struct pci_dev *);
+#define pci_save_state(pdev) _kc_pci_save_state(pdev)
+#undef pci_restore_state
+extern void _kc_pci_restore_state(struct pci_dev *);
+#define pci_restore_state(pdev) _kc_pci_restore_state(pdev)
+#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */
+
+#ifdef HAVE_PCI_ERS
+#undef free_netdev
+extern void _kc_free_netdev(struct net_device *);
+#define free_netdev(netdev) _kc_free_netdev(netdev)
+#endif
+static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
+{
+ return 0;
+}
+#define pci_disable_pcie_error_reporting(dev) do {} while (0)
+#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0)
+
+extern void *_kc_kmemdup(const void *src, size_t len, unsigned gfp);
+#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp)
+#ifndef bool
+#define bool _Bool
+#define true 1
+#define false 0
+#endif
+#else /* 2.6.19 */
+#include <linux/aer.h>
+#include <linux/string.h>
+#endif /* < 2.6.19 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) )
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) )
+#undef INIT_WORK
+#define INIT_WORK(_work, _func) \
+do { \
+ INIT_LIST_HEAD(&(_work)->entry); \
+ (_work)->pending = 0; \
+ (_work)->func = (void (*)(void *))_func; \
+ (_work)->data = _work; \
+ init_timer(&(_work)->timer); \
+} while (0)
+#endif
+
+#ifndef PCI_VDEVICE
+#define PCI_VDEVICE(ven, dev) \
+ PCI_VENDOR_ID_##ven, (dev), \
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0
+#endif
+
+#ifndef PCI_VENDOR_ID_INTEL
+#define PCI_VENDOR_ID_INTEL 0x8086
+#endif
+
+#ifndef round_jiffies
+#define round_jiffies(x) x
+#endif
+
+#define csum_offset csum
+
+#define HAVE_EARLY_VMALLOC_NODE
+#define dev_to_node(dev) -1
+#undef set_dev_node
+/* remove compiler warning with b=b, for unused variable */
+#define set_dev_node(a, b) do { (b) = (b); } while(0)
+
+#if (!(RHEL_RELEASE_CODE && \
+ (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \
+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \
+ !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0)))
+typedef __u16 __bitwise __sum16;
+typedef __u32 __bitwise __wsum;
+#endif
+
+#if (!(RHEL_RELEASE_CODE && \
+ (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \
+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \
+ !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0)))
+static inline __wsum csum_unfold(__sum16 n)
+{
+ return (__force __wsum)n;
+}
+#endif
+
+#else /* < 2.6.20 */
+#define HAVE_DEVICE_NUMA_NODE
+#endif /* < 2.6.20 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
+#define to_net_dev(class) container_of(class, struct net_device, class_dev)
+#define NETDEV_CLASS_DEV
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)))
+#define vlan_group_get_device(vg, id) (vg->vlan_devices[id])
+#define vlan_group_set_device(vg, id, dev) \
+ do { \
+ if (vg) vg->vlan_devices[id] = dev; \
+ } while (0)
+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */
+#define pci_channel_offline(pdev) (pdev->error_state && \
+ pdev->error_state != pci_channel_io_normal)
+#define pci_request_selected_regions(pdev, bars, name) \
+ pci_request_regions(pdev, name)
+#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev);
+
+#ifndef __aligned
+#define __aligned(x) __attribute__((aligned(x)))
+#endif
+
+extern struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev);
+#define netdev_to_dev(netdev) \
+ pci_dev_to_dev(_kc_netdev_to_pdev(netdev))
+#else
+static inline struct device *netdev_to_dev(struct net_device *netdev)
+{
+ return &netdev->dev;
+}
+
+#endif /* < 2.6.21 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
+#define tcp_hdr(skb) (skb->h.th)
+#define tcp_hdrlen(skb) (skb->h.th->doff << 2)
+#define skb_transport_offset(skb) (skb->h.raw - skb->data)
+#define skb_transport_header(skb) (skb->h.raw)
+#define ipv6_hdr(skb) (skb->nh.ipv6h)
+#define ip_hdr(skb) (skb->nh.iph)
+#define skb_network_offset(skb) (skb->nh.raw - skb->data)
+#define skb_network_header(skb) (skb->nh.raw)
+#define skb_tail_pointer(skb) skb->tail
+#define skb_reset_tail_pointer(skb) \
+ do { \
+ skb->tail = skb->data; \
+ } while (0)
+#define skb_set_tail_pointer(skb, offset) \
+ do { \
+ skb->tail = skb->data + offset; \
+ } while (0)
+#define skb_copy_to_linear_data(skb, from, len) \
+ memcpy(skb->data, from, len)
+#define skb_copy_to_linear_data_offset(skb, offset, from, len) \
+ memcpy(skb->data + offset, from, len)
+#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw)
+#define pci_register_driver pci_module_init
+#define skb_mac_header(skb) skb->mac.raw
+
+#ifdef NETIF_F_MULTI_QUEUE
+#ifndef alloc_etherdev_mq
+#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a)
+#endif
+#endif /* NETIF_F_MULTI_QUEUE */
+
+#ifndef ETH_FCS_LEN
+#define ETH_FCS_LEN 4
+#endif
+#define cancel_work_sync(x) flush_scheduled_work()
+#ifndef udp_hdr
+#define udp_hdr _udp_hdr
+static inline struct udphdr *_udp_hdr(const struct sk_buff *skb)
+{
+ return (struct udphdr *)skb_transport_header(skb);
+}
+#endif
+
+#ifdef cpu_to_be16
+#undef cpu_to_be16
+#endif
+#define cpu_to_be16(x) __constant_htons(x)
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)))
+enum {
+ DUMP_PREFIX_NONE,
+ DUMP_PREFIX_ADDRESS,
+ DUMP_PREFIX_OFFSET
+};
+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */
+#ifndef hex_asc
+#define hex_asc(x) "0123456789abcdef"[x]
+#endif
+#include <linux/ctype.h>
+extern void _kc_print_hex_dump(const char *level, const char *prefix_str,
+ int prefix_type, int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii);
+#define print_hex_dump(lvl, s, t, r, g, b, l, a) \
+ _kc_print_hex_dump(lvl, s, t, r, g, b, l, a)
+#ifndef ADVERTISED_2500baseX_Full
+#define ADVERTISED_2500baseX_Full (1 << 15)
+#endif
+#ifndef SUPPORTED_2500baseX_Full
+#define SUPPORTED_2500baseX_Full (1 << 15)
+#endif
+
+#ifdef HAVE_I2C_SUPPORT
+#include <linux/i2c.h>
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)))
+struct i2c_board_info {
+ char driver_name[KOBJ_NAME_LEN];
+ char type[I2C_NAME_SIZE];
+ unsigned short flags;
+ unsigned short addr;
+ void *platform_data;
+};
+#define I2C_BOARD_INFO(driver, dev_addr) .driver_name = (driver),\
+ .addr = (dev_addr)
+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */
+#define i2c_new_device(adap, info) _kc_i2c_new_device(adap, info)
+extern struct i2c_client *
+_kc_i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info);
+#endif /* HAVE_I2C_SUPPORT */
+
+#else /* 2.6.22 */
+#define ETH_TYPE_TRANS_SETS_DEV
+#define HAVE_NETDEV_STATS_IN_NETDEV
+#endif /* < 2.6.22 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) )
+#undef SET_MODULE_OWNER
+#define SET_MODULE_OWNER(dev) do { } while (0)
+#endif /* > 2.6.22 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) )
+#define netif_subqueue_stopped(_a, _b) 0
+#ifndef PTR_ALIGN
+#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
+#endif
+
+#ifndef CONFIG_PM_SLEEP
+#define CONFIG_PM_SLEEP CONFIG_PM
+#endif
+
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) )
+#define HAVE_ETHTOOL_GET_PERM_ADDR
+#endif /* 2.6.14 through 2.6.22 */
+#endif /* < 2.6.23 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
+#ifndef ETH_FLAG_LRO
+#define ETH_FLAG_LRO NETIF_F_LRO
+#endif
+
+/* if GRO is supported then the napi struct must already exist */
+#ifndef NETIF_F_GRO
+/* NAPI API changes in 2.6.24 break everything */
+struct napi_struct {
+ /* used to look up the real NAPI polling routine */
+ int (*poll)(struct napi_struct *, int);
+ struct net_device *dev;
+ int weight;
+};
+#endif
+
+#ifdef NAPI
+extern int __kc_adapter_clean(struct net_device *, int *);
+extern struct net_device *napi_to_poll_dev(const struct napi_struct *napi);
+#define netif_napi_add(_netdev, _napi, _poll, _weight) \
+ do { \
+ struct napi_struct *__napi = (_napi); \
+ struct net_device *poll_dev = napi_to_poll_dev(__napi); \
+ poll_dev->poll = &(__kc_adapter_clean); \
+ poll_dev->priv = (_napi); \
+ poll_dev->weight = (_weight); \
+ set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); \
+ set_bit(__LINK_STATE_START, &poll_dev->state);\
+ dev_hold(poll_dev); \
+ __napi->poll = &(_poll); \
+ __napi->weight = (_weight); \
+ __napi->dev = (_netdev); \
+ } while (0)
+#define netif_napi_del(_napi) \
+ do { \
+ struct net_device *poll_dev = napi_to_poll_dev(_napi); \
+ WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); \
+ dev_put(poll_dev); \
+ memset(poll_dev, 0, sizeof(struct net_device));\
+ } while (0)
+#define napi_schedule_prep(_napi) \
+ (netif_running((_napi)->dev) && netif_rx_schedule_prep(napi_to_poll_dev(_napi)))
+#define napi_schedule(_napi) \
+ do { \
+ if (napi_schedule_prep(_napi)) \
+ __netif_rx_schedule(napi_to_poll_dev(_napi)); \
+ } while (0)
+#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi))
+#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi))
+#ifdef CONFIG_SMP
+static inline void napi_synchronize(const struct napi_struct *n)
+{
+ struct net_device *dev = napi_to_poll_dev(n);
+
+ while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
+ /* No hurry. */
+ msleep(1);
+ }
+}
+#else
+#define napi_synchronize(n) barrier()
+#endif /* CONFIG_SMP */
+#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi))
+#ifndef NETIF_F_GRO
+#define napi_complete(_napi) netif_rx_complete(napi_to_poll_dev(_napi))
+#else
+#define napi_complete(_napi) \
+ do { \
+ napi_gro_flush(_napi); \
+ netif_rx_complete(napi_to_poll_dev(_napi)); \
+ } while (0)
+#endif /* NETIF_F_GRO */
+#else /* NAPI */
+#define netif_napi_add(_netdev, _napi, _poll, _weight) \
+ do { \
+ struct napi_struct *__napi = _napi; \
+ _netdev->poll = &(_poll); \
+ _netdev->weight = (_weight); \
+ __napi->poll = &(_poll); \
+ __napi->weight = (_weight); \
+ __napi->dev = (_netdev); \
+ } while (0)
+#define netif_napi_del(_a) do {} while (0)
+#endif /* NAPI */
+
+#undef dev_get_by_name
+#define dev_get_by_name(_a, _b) dev_get_by_name(_b)
+#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b)
+#ifndef DMA_BIT_MASK
+#define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1))
+#endif
+
+#ifdef NETIF_F_TSO6
+#define skb_is_gso_v6 _kc_skb_is_gso_v6
+static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
+}
+#endif /* NETIF_F_TSO6 */
+
+#ifndef KERN_CONT
+#define KERN_CONT ""
+#endif
+#ifndef pr_err
+#define pr_err(fmt, arg...) \
+ printk(KERN_ERR fmt, ##arg)
+#endif
+#else /* < 2.6.24 */
+#define HAVE_ETHTOOL_GET_SSET_COUNT
+#define HAVE_NETDEV_NAPI_LIST
+#endif /* < 2.6.24 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) )
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) )
+#include <linux/pm_qos_params.h>
+#else /* >= 3.2.0 */
+#include <linux/pm_qos.h>
+#endif /* else >= 3.2.0 */
+#endif /* > 2.6.24 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) )
+#define PM_QOS_CPU_DMA_LATENCY 1
+
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) )
+#include <linux/latency.h>
+#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY
+#define pm_qos_add_requirement(pm_qos_class, name, value) \
+ set_acceptable_latency(name, value)
+#define pm_qos_remove_requirement(pm_qos_class, name) \
+ remove_acceptable_latency(name)
+#define pm_qos_update_requirement(pm_qos_class, name, value) \
+ modify_acceptable_latency(name, value)
+#else
+#define PM_QOS_DEFAULT_VALUE -1
+#define pm_qos_add_requirement(pm_qos_class, name, value)
+#define pm_qos_remove_requirement(pm_qos_class, name)
+#define pm_qos_update_requirement(pm_qos_class, name, value) { \
+ if (value != PM_QOS_DEFAULT_VALUE) { \
+ printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \
+ pci_name(adapter->pdev)); \
+ } \
+}
+
+#endif /* > 2.6.18 */
+
+#define pci_enable_device_mem(pdev) pci_enable_device(pdev)
+
+#ifndef DEFINE_PCI_DEVICE_TABLE
+#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[]
+#endif /* DEFINE_PCI_DEVICE_TABLE */
+
+
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
+#ifndef IGB_PROCFS
+#define IGB_PROCFS
+#endif /* IGB_PROCFS */
+#endif /* >= 2.6.0 */
+
+#else /* < 2.6.25 */
+
+
+#if IS_ENABLED(CONFIG_HWMON)
+#ifndef IGB_HWMON
+#define IGB_HWMON
+#endif /* IGB_HWMON */
+#endif /* CONFIG_HWMON */
+
+#endif /* < 2.6.25 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
+#ifndef clamp_t
+#define clamp_t(type, val, min, max) ({ \
+ type __val = (val); \
+ type __min = (min); \
+ type __max = (max); \
+ __val = __val < __min ? __min : __val; \
+ __val > __max ? __max : __val; })
+#endif /* clamp_t */
+#undef kzalloc_node
+#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags)
+
+extern void _kc_pci_disable_link_state(struct pci_dev *dev, int state);
+#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s)
+#else /* < 2.6.26 */
+#include <linux/pci-aspm.h>
+#define HAVE_NETDEV_VLAN_FEATURES
+#ifndef PCI_EXP_LNKCAP_ASPMS
+#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */
+#endif /* PCI_EXP_LNKCAP_ASPMS */
+#endif /* < 2.6.26 */
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
+static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep,
+ __u32 speed)
+{
+ ep->speed = (__u16)speed;
+ /* ep->speed_hi = (__u16)(speed >> 16); */
+}
+#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set
+
+static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep)
+{
+ /* no speed_hi before 2.6.27, and probably no need for it yet */
+ return (__u32)ep->speed;
+}
+#define ethtool_cmd_speed _kc_ethtool_cmd_speed
+
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) )
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM))
+#define ANCIENT_PM 1
+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \
+ defined(CONFIG_PM_SLEEP))
+#define NEWER_PM 1
+#endif
+#if defined(ANCIENT_PM) || defined(NEWER_PM)
+#undef device_set_wakeup_enable
+#define device_set_wakeup_enable(dev, val) \
+ do { \
+ u16 pmc = 0; \
+ int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \
+ if (pm) { \
+ pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \
+ &pmc); \
+ } \
+ (dev)->power.can_wakeup = !!(pmc >> 11); \
+ (dev)->power.should_wakeup = (val && (pmc >> 11)); \
+ } while (0)
+#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */
+#endif /* 2.6.15 through 2.6.27 */
+#ifndef netif_napi_del
+#define netif_napi_del(_a) do {} while (0)
+#ifdef NAPI
+#ifdef CONFIG_NETPOLL
+#undef netif_napi_del
+#define netif_napi_del(_a) list_del(&(_a)->dev_list);
+#endif
+#endif
+#endif /* netif_napi_del */
+#ifdef dma_mapping_error
+#undef dma_mapping_error
+#endif
+#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr)
+
+#ifdef CONFIG_NETDEVICES_MULTIQUEUE
+#define HAVE_TX_MQ
+#endif
+
+#ifdef HAVE_TX_MQ
+extern void _kc_netif_tx_stop_all_queues(struct net_device *);
+extern void _kc_netif_tx_wake_all_queues(struct net_device *);
+extern void _kc_netif_tx_start_all_queues(struct net_device *);
+#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a)
+#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a)
+#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a)
+#undef netif_stop_subqueue
+#define netif_stop_subqueue(_ndev,_qi) do { \
+ if (netif_is_multiqueue((_ndev))) \
+ netif_stop_subqueue((_ndev), (_qi)); \
+ else \
+ netif_stop_queue((_ndev)); \
+ } while (0)
+#undef netif_start_subqueue
+#define netif_start_subqueue(_ndev,_qi) do { \
+ if (netif_is_multiqueue((_ndev))) \
+ netif_start_subqueue((_ndev), (_qi)); \
+ else \
+ netif_start_queue((_ndev)); \
+ } while (0)
+#else /* HAVE_TX_MQ */
+#define netif_tx_stop_all_queues(a) netif_stop_queue(a)
+#define netif_tx_wake_all_queues(a) netif_wake_queue(a)
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) )
+#define netif_tx_start_all_queues(a) netif_start_queue(a)
+#else
+#define netif_tx_start_all_queues(a) do {} while (0)
+#endif
+#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev))
+#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev))
+#endif /* HAVE_TX_MQ */
+#ifndef NETIF_F_MULTI_QUEUE
+#define NETIF_F_MULTI_QUEUE 0
+#define netif_is_multiqueue(a) 0
+#define netif_wake_subqueue(a, b)
+#endif /* NETIF_F_MULTI_QUEUE */
+
+#ifndef __WARN_printf
+extern void __kc_warn_slowpath(const char *file, const int line,
+ const char *fmt, ...) __attribute__((format(printf, 3, 4)));
+#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg)
+#endif /* __WARN_printf */
+
+#ifndef WARN
+#define WARN(condition, format...) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ __WARN_printf(format); \
+ unlikely(__ret_warn_on); \
+})
+#endif /* WARN */
+#undef HAVE_IXGBE_DEBUG_FS
+#undef HAVE_IGB_DEBUG_FS
+#else /* < 2.6.27 */
+#define HAVE_TX_MQ
+#define HAVE_NETDEV_SELECT_QUEUE
+#ifdef CONFIG_DEBUG_FS
+#define HAVE_IXGBE_DEBUG_FS
+#define HAVE_IGB_DEBUG_FS
+#endif /* CONFIG_DEBUG_FS */
+#endif /* < 2.6.27 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
+#define pci_ioremap_bar(pdev, bar) ioremap(pci_resource_start(pdev, bar), \
+ pci_resource_len(pdev, bar))
+#define pci_wake_from_d3 _kc_pci_wake_from_d3
+#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep
+extern int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable);
+extern int _kc_pci_prepare_to_sleep(struct pci_dev *dev);
+#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC)
+#ifndef __skb_queue_head_init
+static inline void __kc_skb_queue_head_init(struct sk_buff_head *list)
+{
+ list->prev = list->next = (struct sk_buff *)list;
+ list->qlen = 0;
+}
+#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q)
+#endif
+
+#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */
+#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
+
+#endif /* < 2.6.28 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
+#ifndef swap
+#define swap(a, b) \
+ do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
+#endif
+#define pci_request_selected_regions_exclusive(pdev, bars, name) \
+ pci_request_selected_regions(pdev, bars, name)
+#ifndef CONFIG_NR_CPUS
+#define CONFIG_NR_CPUS 1
+#endif /* CONFIG_NR_CPUS */
+#ifndef pcie_aspm_enabled
+#define pcie_aspm_enabled() (1)
+#endif /* pcie_aspm_enabled */
+
+#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */
+
+#ifndef pci_clear_master
+extern void _kc_pci_clear_master(struct pci_dev *dev);
+#define pci_clear_master(dev) _kc_pci_clear_master(dev)
+#endif
+
+#ifndef PCI_EXP_LNKCTL_ASPMC
+#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */
+#endif
+#else /* < 2.6.29 */
+#ifndef HAVE_NET_DEVICE_OPS
+#define HAVE_NET_DEVICE_OPS
+#endif
+#ifdef CONFIG_DCB
+#define HAVE_PFC_MODE_ENABLE
+#endif /* CONFIG_DCB */
+#endif /* < 2.6.29 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) )
+#define skb_rx_queue_recorded(a) false
+#define skb_get_rx_queue(a) 0
+#define skb_record_rx_queue(a, b) do {} while (0)
+#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues)
+#ifndef CONFIG_PCI_IOV
+#undef pci_enable_sriov
+#define pci_enable_sriov(a, b) -ENOTSUPP
+#undef pci_disable_sriov
+#define pci_disable_sriov(a) do {} while (0)
+#endif /* CONFIG_PCI_IOV */
+#ifndef pr_cont
+#define pr_cont(fmt, ...) \
+ printk(KERN_CONT fmt, ##__VA_ARGS__)
+#endif /* pr_cont */
+static inline void _kc_synchronize_irq(unsigned int a)
+{
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
+ synchronize_irq();
+#else /* < 2.5.28 */
+ synchronize_irq(a);
+#endif /* < 2.5.28 */
+}
+#undef synchronize_irq
+#define synchronize_irq(a) _kc_synchronize_irq(a)
+
+#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
+
+#else /* < 2.6.30 */
+#define HAVE_ASPM_QUIRKS
+#endif /* < 2.6.30 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) )
+#define ETH_P_1588 0x88F7
+#define ETH_P_FIP 0x8914
+#ifndef netdev_uc_count
+#define netdev_uc_count(dev) ((dev)->uc_count)
+#endif
+#ifndef netdev_for_each_uc_addr
+#define netdev_for_each_uc_addr(uclist, dev) \
+ for (uclist = dev->uc_list; uclist; uclist = uclist->next)
+#endif
+#ifndef PORT_OTHER
+#define PORT_OTHER 0xff
+#endif
+#ifndef MDIO_PHY_ID_PRTAD
+#define MDIO_PHY_ID_PRTAD 0x03e0
+#endif
+#ifndef MDIO_PHY_ID_DEVAD
+#define MDIO_PHY_ID_DEVAD 0x001f
+#endif
+#ifndef skb_dst
+#define skb_dst(s) ((s)->dst)
+#endif
+
+#ifndef SUPPORTED_1000baseKX_Full
+#define SUPPORTED_1000baseKX_Full (1 << 17)
+#endif
+#ifndef SUPPORTED_10000baseKX4_Full
+#define SUPPORTED_10000baseKX4_Full (1 << 18)
+#endif
+#ifndef SUPPORTED_10000baseKR_Full
+#define SUPPORTED_10000baseKR_Full (1 << 19)
+#endif
+
+#ifndef ADVERTISED_1000baseKX_Full
+#define ADVERTISED_1000baseKX_Full (1 << 17)
+#endif
+#ifndef ADVERTISED_10000baseKX4_Full
+#define ADVERTISED_10000baseKX4_Full (1 << 18)
+#endif
+#ifndef ADVERTISED_10000baseKR_Full
+#define ADVERTISED_10000baseKR_Full (1 << 19)
+#endif
+
+#else /* < 2.6.31 */
+#ifndef HAVE_NETDEV_STORAGE_ADDRESS
+#define HAVE_NETDEV_STORAGE_ADDRESS
+#endif
+#ifndef HAVE_NETDEV_HW_ADDR
+#define HAVE_NETDEV_HW_ADDR
+#endif
+#ifndef HAVE_TRANS_START_IN_QUEUE
+#define HAVE_TRANS_START_IN_QUEUE
+#endif
+#ifndef HAVE_INCLUDE_LINUX_MDIO_H
+#define HAVE_INCLUDE_LINUX_MDIO_H
+#endif
+#endif /* < 2.6.31 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) )
+#undef netdev_tx_t
+#define netdev_tx_t int
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef NETIF_F_FCOE_MTU
+#define NETIF_F_FCOE_MTU (1 << 26)
+#endif
+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+static inline int _kc_pm_runtime_get_sync()
+{
+ return 1;
+}
+#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync()
+#else /* 2.6.0 => 2.6.32 */
+static inline int _kc_pm_runtime_get_sync(struct device *dev)
+{
+ return 1;
+}
+#ifndef pm_runtime_get_sync
+#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync(dev)
+#endif
+#endif /* 2.6.0 => 2.6.32 */
+#ifndef pm_runtime_put
+#define pm_runtime_put(dev) do {} while (0)
+#endif
+#ifndef pm_runtime_put_sync
+#define pm_runtime_put_sync(dev) do {} while (0)
+#endif
+#ifndef pm_runtime_resume
+#define pm_runtime_resume(dev) do {} while (0)
+#endif
+#ifndef pm_schedule_suspend
+#define pm_schedule_suspend(dev, t) do {} while (0)
+#endif
+#ifndef pm_runtime_set_suspended
+#define pm_runtime_set_suspended(dev) do {} while (0)
+#endif
+#ifndef pm_runtime_disable
+#define pm_runtime_disable(dev) do {} while (0)
+#endif
+#ifndef pm_runtime_put_noidle
+#define pm_runtime_put_noidle(dev) do {} while (0)
+#endif
+#ifndef pm_runtime_set_active
+#define pm_runtime_set_active(dev) do {} while (0)
+#endif
+#ifndef pm_runtime_enable
+#define pm_runtime_enable(dev) do {} while (0)
+#endif
+#ifndef pm_runtime_get_noresume
+#define pm_runtime_get_noresume(dev) do {} while (0)
+#endif
+#else /* < 2.6.32 */
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE
+#define HAVE_NETDEV_OPS_FCOE_ENABLE
+#endif
+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+#ifdef CONFIG_DCB
+#ifndef HAVE_DCBNL_OPS_GETAPP
+#define HAVE_DCBNL_OPS_GETAPP
+#endif
+#endif /* CONFIG_DCB */
+#include <linux/pm_runtime.h>
+/* IOV bad DMA target work arounds require at least this kernel rev support */
+#define HAVE_PCIE_TYPE
+#endif /* < 2.6.32 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) )
+#ifndef pci_pcie_cap
+#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP)
+#endif
+#ifndef IPV4_FLOW
+#define IPV4_FLOW 0x10
+#endif /* IPV4_FLOW */
+#ifndef IPV6_FLOW
+#define IPV6_FLOW 0x11
+#endif /* IPV6_FLOW */
+/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */
+#if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \
+ (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) )
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN
+#define HAVE_NETDEV_OPS_FCOE_GETWWN
+#endif
+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+#endif /* RHEL6 or SLES11 SP1 */
+#ifndef __percpu
+#define __percpu
+#endif /* __percpu */
+#ifndef PORT_DA
+#define PORT_DA PORT_OTHER
+#endif
+#ifndef PORT_NONE
+#define PORT_NONE PORT_OTHER
+#endif
+
+#if ((RHEL_RELEASE_CODE && \
+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) && \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))))
+#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE)
+#undef DEFINE_DMA_UNMAP_ADDR
+#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
+#undef DEFINE_DMA_UNMAP_LEN
+#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
+#undef dma_unmap_addr
+#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
+#undef dma_unmap_addr_set
+#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
+#undef dma_unmap_len
+#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
+#undef dma_unmap_len_set
+#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
+#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */
+#endif /* RHEL_RELEASE_CODE */
+
+#if (!(RHEL_RELEASE_CODE && \
+ (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,8)) && \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))) || \
+ ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) && \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))))))
+static inline bool pci_is_pcie(struct pci_dev *dev)
+{
+ return !!pci_pcie_cap(dev);
+}
+#endif /* RHEL_RELEASE_CODE */
+
+#ifndef __always_unused
+#define __always_unused __attribute__((__unused__))
+#endif
+#ifndef __maybe_unused
+#define __maybe_unused __attribute__((__unused__))
+#endif
+
+#if (!(RHEL_RELEASE_CODE && \
+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2))))
+#define sk_tx_queue_get(_sk) (-1)
+#define sk_tx_queue_set(_sk, _tx_queue) do {} while(0)
+#endif /* !(RHEL >= 6.2) */
+
+#if (RHEL_RELEASE_CODE && \
+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))
+#define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+#define HAVE_ETHTOOL_SET_PHYS_ID
+#define HAVE_ETHTOOL_GET_TS_INFO
+#endif /* RHEL >= 6.4 && RHEL < 7.0 */
+
+#if (RHEL_RELEASE_CODE && \
+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) && \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))
+#define HAVE_RHEL6_NETDEV_OPS_EXT_FDB
+#endif /* RHEL >= 6.5 && RHEL < 7.0 */
+
+#else /* < 2.6.33 */
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN
+#define HAVE_NETDEV_OPS_FCOE_GETWWN
+#endif
+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+#endif /* < 2.6.33 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
+#ifndef pci_num_vf
+#define pci_num_vf(pdev) _kc_pci_num_vf(pdev)
+extern int _kc_pci_num_vf(struct pci_dev *dev);
+#endif
+#endif /* RHEL_RELEASE_CODE */
+
+#ifndef ETH_FLAG_NTUPLE
+#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE
+#endif
+
+#ifndef netdev_mc_count
+#define netdev_mc_count(dev) ((dev)->mc_count)
+#endif
+#ifndef netdev_mc_empty
+#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0)
+#endif
+#ifndef netdev_for_each_mc_addr
+#define netdev_for_each_mc_addr(mclist, dev) \
+ for (mclist = dev->mc_list; mclist; mclist = mclist->next)
+#endif
+#ifndef netdev_uc_count
+#define netdev_uc_count(dev) ((dev)->uc.count)
+#endif
+#ifndef netdev_uc_empty
+#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0)
+#endif
+#ifndef netdev_for_each_uc_addr
+#define netdev_for_each_uc_addr(ha, dev) \
+ list_for_each_entry(ha, &dev->uc.list, list)
+#endif
+#ifndef dma_set_coherent_mask
+#define dma_set_coherent_mask(dev,mask) \
+ pci_set_consistent_dma_mask(to_pci_dev(dev),(mask))
+#endif
+#ifndef pci_dev_run_wake
+#define pci_dev_run_wake(pdev) (0)
+#endif
+
+/* netdev logging taken from include/linux/netdevice.h */
+#ifndef netdev_name
+static inline const char *_kc_netdev_name(const struct net_device *dev)
+{
+ if (dev->reg_state != NETREG_REGISTERED)
+ return "(unregistered net_device)";
+ return dev->name;
+}
+#define netdev_name(netdev) _kc_netdev_name(netdev)
+#endif /* netdev_name */
+
+#undef netdev_printk
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+#define netdev_printk(level, netdev, format, args...) \
+do { \
+ struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \
+ printk(level "%s: " format, pci_name(pdev), ##args); \
+} while(0)
+#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
+#define netdev_printk(level, netdev, format, args...) \
+do { \
+ struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \
+ struct device *dev = pci_dev_to_dev(pdev); \
+ dev_printk(level, dev, "%s: " format, \
+ netdev_name(netdev), ##args); \
+} while(0)
+#else /* 2.6.21 => 2.6.34 */
+#define netdev_printk(level, netdev, format, args...) \
+ dev_printk(level, (netdev)->dev.parent, \
+ "%s: " format, \
+ netdev_name(netdev), ##args)
+#endif /* <2.6.0 <2.6.21 <2.6.34 */
+#undef netdev_emerg
+#define netdev_emerg(dev, format, args...) \
+ netdev_printk(KERN_EMERG, dev, format, ##args)
+#undef netdev_alert
+#define netdev_alert(dev, format, args...) \
+ netdev_printk(KERN_ALERT, dev, format, ##args)
+#undef netdev_crit
+#define netdev_crit(dev, format, args...) \
+ netdev_printk(KERN_CRIT, dev, format, ##args)
+#undef netdev_err
+#define netdev_err(dev, format, args...) \
+ netdev_printk(KERN_ERR, dev, format, ##args)
+#undef netdev_warn
+#define netdev_warn(dev, format, args...) \
+ netdev_printk(KERN_WARNING, dev, format, ##args)
+#undef netdev_notice
+#define netdev_notice(dev, format, args...) \
+ netdev_printk(KERN_NOTICE, dev, format, ##args)
+#undef netdev_info
+#define netdev_info(dev, format, args...) \
+ netdev_printk(KERN_INFO, dev, format, ##args)
+#undef netdev_dbg
+#if defined(DEBUG)
+#define netdev_dbg(__dev, format, args...) \
+ netdev_printk(KERN_DEBUG, __dev, format, ##args)
+#elif defined(CONFIG_DYNAMIC_DEBUG)
+#define netdev_dbg(__dev, format, args...) \
+do { \
+ dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \
+ netdev_name(__dev), ##args); \
+} while (0)
+#else /* DEBUG */
+#define netdev_dbg(__dev, format, args...) \
+({ \
+ if (0) \
+ netdev_printk(KERN_DEBUG, __dev, format, ##args); \
+ 0; \
+})
+#endif /* DEBUG */
+
+#undef netif_printk
+#define netif_printk(priv, type, level, dev, fmt, args...) \
+do { \
+ if (netif_msg_##type(priv)) \
+ netdev_printk(level, (dev), fmt, ##args); \
+} while (0)
+
+#undef netif_emerg
+#define netif_emerg(priv, type, dev, fmt, args...) \
+ netif_level(emerg, priv, type, dev, fmt, ##args)
+#undef netif_alert
+#define netif_alert(priv, type, dev, fmt, args...) \
+ netif_level(alert, priv, type, dev, fmt, ##args)
+#undef netif_crit
+#define netif_crit(priv, type, dev, fmt, args...) \
+ netif_level(crit, priv, type, dev, fmt, ##args)
+#undef netif_err
+#define netif_err(priv, type, dev, fmt, args...) \
+ netif_level(err, priv, type, dev, fmt, ##args)
+#undef netif_warn
+#define netif_warn(priv, type, dev, fmt, args...) \
+ netif_level(warn, priv, type, dev, fmt, ##args)
+#undef netif_notice
+#define netif_notice(priv, type, dev, fmt, args...) \
+ netif_level(notice, priv, type, dev, fmt, ##args)
+#undef netif_info
+#define netif_info(priv, type, dev, fmt, args...) \
+ netif_level(info, priv, type, dev, fmt, ##args)
+#undef netif_dbg
+#define netif_dbg(priv, type, dev, fmt, args...) \
+ netif_level(dbg, priv, type, dev, fmt, ##args)
+
+#ifdef SET_SYSTEM_SLEEP_PM_OPS
+#define HAVE_SYSTEM_SLEEP_PM_OPS
+#endif
+
+#ifndef for_each_set_bit
+#define for_each_set_bit(bit, addr, size) \
+ for ((bit) = find_first_bit((addr), (size)); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
+#endif /* for_each_set_bit */
+
+#ifndef DEFINE_DMA_UNMAP_ADDR
+#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR
+#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN
+#define dma_unmap_addr pci_unmap_addr
+#define dma_unmap_addr_set pci_unmap_addr_set
+#define dma_unmap_len pci_unmap_len
+#define dma_unmap_len_set pci_unmap_len_set
+#endif /* DEFINE_DMA_UNMAP_ADDR */
+
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,3))
+#ifdef IGB_HWMON
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#define sysfs_attr_init(attr) \
+ do { \
+ static struct lock_class_key __key; \
+ (attr)->key = &__key; \
+ } while (0)
+#else
+#define sysfs_attr_init(attr) do {} while (0)
+#endif /* CONFIG_DEBUG_LOCK_ALLOC */
+#endif /* IGB_HWMON */
+#endif /* RHEL_RELEASE_CODE */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+static inline bool _kc_pm_runtime_suspended()
+{
+ return false;
+}
+#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended()
+#else /* 2.6.0 => 2.6.34 */
+static inline bool _kc_pm_runtime_suspended(struct device *dev)
+{
+ return false;
+}
+#ifndef pm_runtime_suspended
+#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended(dev)
+#endif
+#endif /* 2.6.0 => 2.6.34 */
+
+#else /* < 2.6.34 */
+#define HAVE_SYSTEM_SLEEP_PM_OPS
+#ifndef HAVE_SET_RX_MODE
+#define HAVE_SET_RX_MODE
+#endif
+
+#endif /* < 2.6.34 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
+
+ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
+ const void __user *from, size_t count);
+#define simple_write_to_buffer _kc_simple_write_to_buffer
+
+#ifndef numa_node_id
+#define numa_node_id() 0
+#endif
+#ifdef HAVE_TX_MQ
+#include <net/sch_generic.h>
+#ifndef CONFIG_NETDEVICES_MULTIQUEUE
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)))
+void _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int);
+#define netif_set_real_num_tx_queues _kc_netif_set_real_num_tx_queues
+#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
+#else /* CONFIG_NETDEVICES_MULTI_QUEUE */
+#define netif_set_real_num_tx_queues(_netdev, _count) \
+ do { \
+ (_netdev)->egress_subqueue_count = _count; \
+ } while (0)
+#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */
+#else /* HAVE_TX_MQ */
+#define netif_set_real_num_tx_queues(_netdev, _count) do {} while(0)
+#endif /* HAVE_TX_MQ */
+#ifndef ETH_FLAG_RXHASH
+#define ETH_FLAG_RXHASH (1<<28)
+#endif /* ETH_FLAG_RXHASH */
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))
+#define HAVE_IRQ_AFFINITY_HINT
+#endif
+#else /* < 2.6.35 */
+#define HAVE_PM_QOS_REQUEST_LIST
+#define HAVE_IRQ_AFFINITY_HINT
+#endif /* < 2.6.35 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
+extern int _kc_ethtool_op_set_flags(struct net_device *, u32, u32);
+#define ethtool_op_set_flags _kc_ethtool_op_set_flags
+extern u32 _kc_ethtool_op_get_flags(struct net_device *);
+#define ethtool_op_get_flags _kc_ethtool_op_get_flags
+
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#ifdef NET_IP_ALIGN
+#undef NET_IP_ALIGN
+#endif
+#define NET_IP_ALIGN 0
+#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
+
+#ifdef NET_SKB_PAD
+#undef NET_SKB_PAD
+#endif
+
+#if (L1_CACHE_BYTES > 32)
+#define NET_SKB_PAD L1_CACHE_BYTES
+#else
+#define NET_SKB_PAD 32
+#endif
+
+static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev,
+ unsigned int length)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC);
+ if (skb) {
+#if (NET_IP_ALIGN + NET_SKB_PAD)
+ skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
+#endif
+ skb->dev = dev;
+ }
+ return skb;
+}
+
+#ifdef netdev_alloc_skb_ip_align
+#undef netdev_alloc_skb_ip_align
+#endif
+#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l)
+
+#undef netif_level
+#define netif_level(level, priv, type, dev, fmt, args...) \
+do { \
+ if (netif_msg_##type(priv)) \
+ netdev_##level(dev, fmt, ##args); \
+} while (0)
+
+#undef usleep_range
+#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
+
+#define u64_stats_update_begin(a) do { } while(0)
+#define u64_stats_update_end(a) do { } while(0)
+#define u64_stats_fetch_begin(a) do { } while(0)
+#define u64_stats_fetch_retry_bh(a) (0)
+#define u64_stats_fetch_begin_bh(a) (0)
+
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1))
+#define HAVE_8021P_SUPPORT
+#endif
+
+#else /* < 2.6.36 */
+
+
+#define HAVE_PM_QOS_REQUEST_ACTIVE
+#define HAVE_8021P_SUPPORT
+#define HAVE_NDO_GET_STATS64
+#endif /* < 2.6.36 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) )
+#ifndef netif_set_real_num_rx_queues
+static inline int __kc_netif_set_real_num_rx_queues(struct net_device *dev,
+ unsigned int rxq)
+{
+ return 0;
+}
+#define netif_set_real_num_rx_queues(dev, rxq) \
+ __kc_netif_set_real_num_rx_queues((dev), (rxq))
+#endif
+#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR
+#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2)
+#endif
+#ifndef VLAN_N_VID
+#define VLAN_N_VID VLAN_GROUP_ARRAY_LEN
+#endif /* VLAN_N_VID */
+#ifndef ETH_FLAG_TXVLAN
+#define ETH_FLAG_TXVLAN (1 << 7)
+#endif /* ETH_FLAG_TXVLAN */
+#ifndef ETH_FLAG_RXVLAN
+#define ETH_FLAG_RXVLAN (1 << 8)
+#endif /* ETH_FLAG_RXVLAN */
+
+static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb)
+{
+ WARN_ON(skb->ip_summed != CHECKSUM_NONE);
+}
+#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb)
+
+static inline void *_kc_vzalloc_node(unsigned long size, int node)
+{
+ void *addr = vmalloc_node(size, node);
+ if (addr)
+ memset(addr, 0, size);
+ return addr;
+}
+#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node)
+
+static inline void *_kc_vzalloc(unsigned long size)
+{
+ void *addr = vmalloc(size);
+ if (addr)
+ memset(addr, 0, size);
+ return addr;
+}
+#define vzalloc(_size) _kc_vzalloc(_size)
+
+#ifndef vlan_get_protocol
+static inline __be16 __kc_vlan_get_protocol(const struct sk_buff *skb)
+{
+ if (vlan_tx_tag_present(skb) ||
+ skb->protocol != cpu_to_be16(ETH_P_8021Q))
+ return skb->protocol;
+
+ if (skb_headlen(skb) < sizeof(struct vlan_ethhdr))
+ return 0;
+
+ return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto;
+}
+#define vlan_get_protocol(_skb) __kc_vlan_get_protocol(_skb)
+#endif
+#ifdef HAVE_HW_TIME_STAMP
+#define SKBTX_HW_TSTAMP (1 << 0)
+#define SKBTX_IN_PROGRESS (1 << 2)
+#define SKB_SHARED_TX_IS_UNION
+#endif
+
+#ifndef device_wakeup_enable
+#define device_wakeup_enable(dev) device_set_wakeup_enable(dev, true)
+#endif
+
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) )
+#ifndef HAVE_VLAN_RX_REGISTER
+#define HAVE_VLAN_RX_REGISTER
+#endif
+#endif /* > 2.4.18 */
+#endif /* < 2.6.37 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) )
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
+#define skb_checksum_start_offset(skb) skb_transport_offset(skb)
+#else /* 2.6.22 -> 2.6.37 */
+static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb)
+{
+ return skb->csum_start - skb_headroom(skb);
+}
+#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb)
+#endif /* 2.6.22 -> 2.6.37 */
+#ifdef CONFIG_DCB
+#ifndef IEEE_8021QAZ_MAX_TCS
+#define IEEE_8021QAZ_MAX_TCS 8
+#endif
+#ifndef DCB_CAP_DCBX_HOST
+#define DCB_CAP_DCBX_HOST 0x01
+#endif
+#ifndef DCB_CAP_DCBX_LLD_MANAGED
+#define DCB_CAP_DCBX_LLD_MANAGED 0x02
+#endif
+#ifndef DCB_CAP_DCBX_VER_CEE
+#define DCB_CAP_DCBX_VER_CEE 0x04
+#endif
+#ifndef DCB_CAP_DCBX_VER_IEEE
+#define DCB_CAP_DCBX_VER_IEEE 0x08
+#endif
+#ifndef DCB_CAP_DCBX_STATIC
+#define DCB_CAP_DCBX_STATIC 0x10
+#endif
+#endif /* CONFIG_DCB */
+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2))
+#define CONFIG_XPS
+#endif /* RHEL_RELEASE_VERSION(6,2) */
+#endif /* < 2.6.38 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
+#ifndef NETIF_F_RXCSUM
+#define NETIF_F_RXCSUM (1 << 29)
+#endif
+#ifndef skb_queue_reverse_walk_safe
+#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
+ for (skb = (queue)->prev, tmp = skb->prev; \
+ skb != (struct sk_buff *)(queue); \
+ skb = tmp, tmp = skb->prev)
+#endif
+#else /* < 2.6.39 */
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef HAVE_NETDEV_OPS_FCOE_DDP_TARGET
+#define HAVE_NETDEV_OPS_FCOE_DDP_TARGET
+#endif
+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+#ifndef HAVE_MQPRIO
+#define HAVE_MQPRIO
+#endif
+#ifndef HAVE_SETUP_TC
+#define HAVE_SETUP_TC
+#endif
+#ifdef CONFIG_DCB
+#ifndef HAVE_DCBNL_IEEE
+#define HAVE_DCBNL_IEEE
+#endif
+#endif /* CONFIG_DCB */
+#ifndef HAVE_NDO_SET_FEATURES
+#define HAVE_NDO_SET_FEATURES
+#endif
+#endif /* < 2.6.39 */
+
+/*****************************************************************************/
+/* use < 2.6.40 because of a Fedora 15 kernel update where they
+ * updated the kernel version to 2.6.40.x and they back-ported 3.0 features
+ * like set_phys_id for ethtool.
+ */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) )
+#ifdef ETHTOOL_GRXRINGS
+#ifndef FLOW_EXT
+#define FLOW_EXT 0x80000000
+union _kc_ethtool_flow_union {
+ struct ethtool_tcpip4_spec tcp_ip4_spec;
+ struct ethtool_usrip4_spec usr_ip4_spec;
+ __u8 hdata[60];
+};
+struct _kc_ethtool_flow_ext {
+ __be16 vlan_etype;
+ __be16 vlan_tci;
+ __be32 data[2];
+};
+struct _kc_ethtool_rx_flow_spec {
+ __u32 flow_type;
+ union _kc_ethtool_flow_union h_u;
+ struct _kc_ethtool_flow_ext h_ext;
+ union _kc_ethtool_flow_union m_u;
+ struct _kc_ethtool_flow_ext m_ext;
+ __u64 ring_cookie;
+ __u32 location;
+};
+#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec
+#endif /* FLOW_EXT */
+#endif
+
+#define pci_disable_link_state_locked pci_disable_link_state
+
+#ifndef PCI_LTR_VALUE_MASK
+#define PCI_LTR_VALUE_MASK 0x000003ff
+#endif
+#ifndef PCI_LTR_SCALE_MASK
+#define PCI_LTR_SCALE_MASK 0x00001c00
+#endif
+#ifndef PCI_LTR_SCALE_SHIFT
+#define PCI_LTR_SCALE_SHIFT 10
+#endif
+
+#else /* < 2.6.40 */
+#define HAVE_ETHTOOL_SET_PHYS_ID
+#endif /* < 2.6.40 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) )
+#define USE_LEGACY_PM_SUPPORT
+#endif /* < 3.0.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) )
+#ifndef __netdev_alloc_skb_ip_align
+#define __netdev_alloc_skb_ip_align(d,l,_g) netdev_alloc_skb_ip_align(d,l)
+#endif /* __netdev_alloc_skb_ip_align */
+#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app)
+#define dcb_ieee_delapp(dev, app) 0
+#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority)
+
+/* 1000BASE-T Control register */
+#define CTL1000_AS_MASTER 0x0800
+#define CTL1000_ENABLE_MASTER 0x1000
+
+#else /* < 3.1.0 */
+#ifndef HAVE_DCBNL_IEEE_DELAPP
+#define HAVE_DCBNL_IEEE_DELAPP
+#endif
+#endif /* < 3.1.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) )
+#ifdef ETHTOOL_GRXRINGS
+#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
+#endif /* ETHTOOL_GRXRINGS */
+
+#ifndef skb_frag_size
+#define skb_frag_size(frag) _kc_skb_frag_size(frag)
+static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag)
+{
+ return frag->size;
+}
+#endif /* skb_frag_size */
+
+#ifndef skb_frag_size_sub
+#define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta)
+static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta)
+{
+ frag->size -= delta;
+}
+#endif /* skb_frag_size_sub */
+
+#ifndef skb_frag_page
+#define skb_frag_page(frag) _kc_skb_frag_page(frag)
+static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag)
+{
+ return frag->page;
+}
+#endif /* skb_frag_page */
+
+#ifndef skb_frag_address
+#define skb_frag_address(frag) _kc_skb_frag_address(frag)
+static inline void *_kc_skb_frag_address(const skb_frag_t *frag)
+{
+ return page_address(skb_frag_page(frag)) + frag->page_offset;
+}
+#endif /* skb_frag_address */
+
+#ifndef skb_frag_dma_map
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
+#include <linux/dma-mapping.h>
+#endif
+#define skb_frag_dma_map(dev,frag,offset,size,dir) \
+ _kc_skb_frag_dma_map(dev,frag,offset,size,dir)
+static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev,
+ const skb_frag_t *frag,
+ size_t offset, size_t size,
+ enum dma_data_direction dir)
+{
+ return dma_map_page(dev, skb_frag_page(frag),
+ frag->page_offset + offset, size, dir);
+}
+#endif /* skb_frag_dma_map */
+
+#ifndef __skb_frag_unref
+#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag)
+static inline void __kc_skb_frag_unref(skb_frag_t *frag)
+{
+ put_page(skb_frag_page(frag));
+}
+#endif /* __skb_frag_unref */
+
+#ifndef SPEED_UNKNOWN
+#define SPEED_UNKNOWN -1
+#endif
+#ifndef DUPLEX_UNKNOWN
+#define DUPLEX_UNKNOWN 0xff
+#endif
+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3))
+#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED
+#define HAVE_PCI_DEV_FLAGS_ASSIGNED
+#endif
+#endif
+#else /* < 3.2.0 */
+#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED
+#define HAVE_PCI_DEV_FLAGS_ASSIGNED
+#define HAVE_VF_SPOOFCHK_CONFIGURE
+#endif
+#endif /* < 3.2.0 */
+
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,2))
+#undef ixgbe_get_netdev_tc_txq
+#define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc])
+#endif
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) )
+typedef u32 kni_netdev_features_t;
+#undef PCI_EXP_TYPE_RC_EC
+#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */
+#ifndef CONFIG_BQL
+#define netdev_tx_completed_queue(_q, _p, _b) do {} while (0)
+#define netdev_completed_queue(_n, _p, _b) do {} while (0)
+#define netdev_tx_sent_queue(_q, _b) do {} while (0)
+#define netdev_sent_queue(_n, _b) do {} while (0)
+#define netdev_tx_reset_queue(_q) do {} while (0)
+#define netdev_reset_queue(_n) do {} while (0)
+#endif
+#else /* ! < 3.3.0 */
+typedef netdev_features_t kni_netdev_features_t;
+#define HAVE_INT_NDO_VLAN_RX_ADD_VID
+#ifdef ETHTOOL_SRXNTUPLE
+#undef ETHTOOL_SRXNTUPLE
+#endif
+#endif /* < 3.3.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) )
+#ifndef NETIF_F_RXFCS
+#define NETIF_F_RXFCS 0
+#endif /* NETIF_F_RXFCS */
+#ifndef NETIF_F_RXALL
+#define NETIF_F_RXALL 0
+#endif /* NETIF_F_RXALL */
+
+#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))
+#define NUMTCS_RETURNS_U8
+
+int _kc_simple_open(struct inode *inode, struct file *file);
+#define simple_open _kc_simple_open
+#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */
+
+
+#ifndef skb_add_rx_frag
+#define skb_add_rx_frag _kc_skb_add_rx_frag
+extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *,
+ int, int, unsigned int);
+#endif
+#ifdef NET_ADDR_RANDOM
+#define eth_hw_addr_random(N) do { \
+ random_ether_addr(N->dev_addr); \
+ N->addr_assign_type |= NET_ADDR_RANDOM; \
+ } while (0)
+#else /* NET_ADDR_RANDOM */
+#define eth_hw_addr_random(N) random_ether_addr(N->dev_addr)
+#endif /* NET_ADDR_RANDOM */
+#else /* < 3.4.0 */
+#include <linux/kconfig.h>
+#endif /* >= 3.4.0 */
+
+/*****************************************************************************/
+#if defined(E1000E_PTP) || defined(IGB_PTP) || defined(IXGBE_PTP) || defined(I40E_PTP)
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) && IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+#define HAVE_PTP_1588_CLOCK
+#else
+#error Cannot enable PTP Hardware Clock support due to a pre-3.0 kernel version or CONFIG_PTP_1588_CLOCK not enabled in the kernel
+#endif /* > 3.0.0 && IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+#endif /* E1000E_PTP || IGB_PTP || IXGBE_PTP || I40E_PTP */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) )
+#define skb_tx_timestamp(skb) do {} while (0)
+static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2)
+{
+ return !compare_ether_addr(addr1, addr2);
+}
+#define ether_addr_equal(_addr1, _addr2) __kc_ether_addr_equal((_addr1),(_addr2))
+#else
+#define HAVE_FDB_OPS
+#define HAVE_ETHTOOL_GET_TS_INFO
+#endif /* < 3.5.0 */
+
+/*****************************************************************************/
+#include <linux/mdio.h>
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) )
+#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */
+
+#ifndef MDIO_EEE_100TX
+#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */
+#endif
+#ifndef MDIO_EEE_1000T
+#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */
+#endif
+#ifndef MDIO_EEE_10GT
+#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */
+#endif
+#ifndef MDIO_EEE_1000KX
+#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */
+#endif
+#ifndef MDIO_EEE_10GKX4
+#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */
+#endif
+#ifndef MDIO_EEE_10GKR
+#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */
+#endif
+#endif /* < 3.6.0 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) )
+#ifndef ADVERTISED_40000baseKR4_Full
+/* these defines were all added in one commit, so should be safe
+ * to trigger activiation on one define
+ */
+#define SUPPORTED_40000baseKR4_Full (1 << 23)
+#define SUPPORTED_40000baseCR4_Full (1 << 24)
+#define SUPPORTED_40000baseSR4_Full (1 << 25)
+#define SUPPORTED_40000baseLR4_Full (1 << 26)
+#define ADVERTISED_40000baseKR4_Full (1 << 23)
+#define ADVERTISED_40000baseCR4_Full (1 << 24)
+#define ADVERTISED_40000baseSR4_Full (1 << 25)
+#define ADVERTISED_40000baseLR4_Full (1 << 26)
+#endif
+
+/**
+ * mmd_eee_cap_to_ethtool_sup_t
+ * @eee_cap: value of the MMD EEE Capability register
+ *
+ * A small helper function that translates MMD EEE Capability (3.20) bits
+ * to ethtool supported settings.
+ */
+static inline u32 __kc_mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap)
+{
+ u32 supported = 0;
+
+ if (eee_cap & MDIO_EEE_100TX)
+ supported |= SUPPORTED_100baseT_Full;
+ if (eee_cap & MDIO_EEE_1000T)
+ supported |= SUPPORTED_1000baseT_Full;
+ if (eee_cap & MDIO_EEE_10GT)
+ supported |= SUPPORTED_10000baseT_Full;
+ if (eee_cap & MDIO_EEE_1000KX)
+ supported |= SUPPORTED_1000baseKX_Full;
+ if (eee_cap & MDIO_EEE_10GKX4)
+ supported |= SUPPORTED_10000baseKX4_Full;
+ if (eee_cap & MDIO_EEE_10GKR)
+ supported |= SUPPORTED_10000baseKR_Full;
+
+ return supported;
+}
+#define mmd_eee_cap_to_ethtool_sup_t(eee_cap) \
+ __kc_mmd_eee_cap_to_ethtool_sup_t(eee_cap)
+
+/**
+ * mmd_eee_adv_to_ethtool_adv_t
+ * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers
+ *
+ * A small helper function that translates the MMD EEE Advertisement (7.60)
+ * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement
+ * settings.
+ */
+static inline u32 __kc_mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv)
+{
+ u32 adv = 0;
+
+ if (eee_adv & MDIO_EEE_100TX)
+ adv |= ADVERTISED_100baseT_Full;
+ if (eee_adv & MDIO_EEE_1000T)
+ adv |= ADVERTISED_1000baseT_Full;
+ if (eee_adv & MDIO_EEE_10GT)
+ adv |= ADVERTISED_10000baseT_Full;
+ if (eee_adv & MDIO_EEE_1000KX)
+ adv |= ADVERTISED_1000baseKX_Full;
+ if (eee_adv & MDIO_EEE_10GKX4)
+ adv |= ADVERTISED_10000baseKX4_Full;
+ if (eee_adv & MDIO_EEE_10GKR)
+ adv |= ADVERTISED_10000baseKR_Full;
+
+ return adv;
+}
+#define mmd_eee_adv_to_ethtool_adv_t(eee_adv) \
+ __kc_mmd_eee_adv_to_ethtool_adv_t(eee_adv)
+
+/**
+ * ethtool_adv_to_mmd_eee_adv_t
+ * @adv: the ethtool advertisement settings
+ *
+ * A small helper function that translates ethtool advertisement settings
+ * to EEE advertisements for the MMD EEE Advertisement (7.60) and
+ * MMD EEE Link Partner Ability (7.61) registers.
+ */
+static inline u16 __kc_ethtool_adv_to_mmd_eee_adv_t(u32 adv)
+{
+ u16 reg = 0;
+
+ if (adv & ADVERTISED_100baseT_Full)
+ reg |= MDIO_EEE_100TX;
+ if (adv & ADVERTISED_1000baseT_Full)
+ reg |= MDIO_EEE_1000T;
+ if (adv & ADVERTISED_10000baseT_Full)
+ reg |= MDIO_EEE_10GT;
+ if (adv & ADVERTISED_1000baseKX_Full)
+ reg |= MDIO_EEE_1000KX;
+ if (adv & ADVERTISED_10000baseKX4_Full)
+ reg |= MDIO_EEE_10GKX4;
+ if (adv & ADVERTISED_10000baseKR_Full)
+ reg |= MDIO_EEE_10GKR;
+
+ return reg;
+}
+#define ethtool_adv_to_mmd_eee_adv_t(adv) \
+ __kc_ethtool_adv_to_mmd_eee_adv_t(adv)
+
+#ifndef pci_pcie_type
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
+static inline u8 pci_pcie_type(struct pci_dev *pdev)
+{
+ int pos;
+ u16 reg16;
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ if (!pos)
+ BUG();
+ pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
+ return (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
+}
+#else /* < 2.6.24 */
+#define pci_pcie_type(x) (x)->pcie_type
+#endif /* < 2.6.24 */
+#endif /* pci_pcie_type */
+
+#define ptp_clock_register(caps, args...) ptp_clock_register(caps)
+
+#ifndef PCI_EXP_LNKSTA2
+int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
+#define pcie_capability_read_word(d,p,v) __kc_pcie_capability_read_word(d,p,v)
+int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
+#define pcie_capability_write_word(d,p,v) __kc_pcie_capability_write_word(d,p,v)
+int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
+ u16 clear, u16 set);
+#define pcie_capability_clear_and_set_word(d,p,c,s) \
+ __kc_pcie_capability_clear_and_set_word(d,p,c,s)
+
+#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
+
+static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos,
+ u16 clear)
+{
+ return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0);
+}
+#endif /* !PCI_EXP_LNKSTA2 */
+
+#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))
+#define USE_CONST_DEV_UC_CHAR
+#endif
+
+#else /* >= 3.7.0 */
+#define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS
+#define USE_CONST_DEV_UC_CHAR
+#endif /* >= 3.7.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) )
+#ifndef PCI_EXP_LNKCTL_ASPM_L0S
+#define PCI_EXP_LNKCTL_ASPM_L0S 0x01 /* L0s Enable */
+#endif
+#ifndef PCI_EXP_LNKCTL_ASPM_L1
+#define PCI_EXP_LNKCTL_ASPM_L1 0x02 /* L1 Enable */
+#endif
+#define HAVE_CONFIG_HOTPLUG
+/* Reserved Ethernet Addresses per IEEE 802.1Q */
+static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = {
+ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
+#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) &&\
+ !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5))
+static inline bool is_link_local_ether_addr(const u8 *addr)
+{
+ __be16 *a = (__be16 *)addr;
+ static const __be16 *b = (const __be16 *)eth_reserved_addr_base;
+ static const __be16 m = cpu_to_be16(0xfff0);
+
+ return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
+}
+#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */
+#else /* >= 3.8.0 */
+#ifndef __devinit
+#define __devinit
+#define HAVE_ENCAP_CSUM_OFFLOAD
+#endif
+
+#ifndef __devinitdata
+#define __devinitdata
+#endif
+
+#ifndef __devexit
+#define __devexit
+#endif
+
+#ifndef __devexit_p
+#define __devexit_p
+#endif
+
+#ifndef HAVE_SRIOV_CONFIGURE
+#define HAVE_SRIOV_CONFIGURE
+#endif
+
+#define HAVE_BRIDGE_ATTRIBS
+#ifndef BRIDGE_MODE_VEB
+#define BRIDGE_MODE_VEB 0 /* Default loopback mode */
+#endif /* BRIDGE_MODE_VEB */
+#ifndef BRIDGE_MODE_VEPA
+#define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */
+#endif /* BRIDGE_MODE_VEPA */
+#endif /* >= 3.8.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) )
+
+#undef hlist_entry
+#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
+
+#undef hlist_entry_safe
+#define hlist_entry_safe(ptr, type, member) \
+ (ptr) ? hlist_entry(ptr, type, member) : NULL
+
+#undef hlist_for_each_entry
+#define hlist_for_each_entry(pos, head, member) \
+ for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \
+ pos; \
+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+#undef hlist_for_each_entry_safe
+#define hlist_for_each_entry_safe(pos, n, head, member) \
+ for (pos = hlist_entry_safe((head)->first, typeof(*pos), member); \
+ pos && ({ n = pos->member.next; 1; }); \
+ pos = hlist_entry_safe(n, typeof(*pos), member))
+
+#ifdef CONFIG_XPS
+extern int __kc_netif_set_xps_queue(struct net_device *, struct cpumask *, u16);
+#define netif_set_xps_queue(_dev, _mask, _idx) __kc_netif_set_xps_queue((_dev), (_mask), (_idx))
+#else /* CONFIG_XPS */
+#define netif_set_xps_queue(_dev, _mask, _idx) do {} while (0)
+#endif /* CONFIG_XPS */
+
+#ifdef HAVE_NETDEV_SELECT_QUEUE
+#define _kc_hashrnd 0xd631614b /* not so random hash salt */
+extern u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
+#define __netdev_pick_tx __kc_netdev_pick_tx
+#endif /* HAVE_NETDEV_SELECT_QUEUE */
+#else
+#define HAVE_BRIDGE_FILTER
+#define USE_DEFAULT_FDB_DEL_DUMP
+#endif /* < 3.9.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+#ifdef CONFIG_PCI_IOV
+extern int __kc_pci_vfs_assigned(struct pci_dev *dev);
+#else
+static inline int __kc_pci_vfs_assigned(struct pci_dev *dev)
+{
+ return 0;
+}
+#endif
+#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev)
+
+#ifndef VLAN_TX_COOKIE_MAGIC
+static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb,
+ u16 vlan_tci)
+{
+#ifdef VLAN_TAG_PRESENT
+ vlan_tci |= VLAN_TAG_PRESENT;
+#endif
+ skb->vlan_tci = vlan_tci;
+ return skb;
+}
+#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \
+ __kc__vlan_hwaccel_put_tag(skb, vlan_tci)
+#endif
+
+#else /* >= 3.10.0 */
+#define HAVE_ENCAP_TSO_OFFLOAD
+#endif /* >= 3.10.0 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) )
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)))
+#if (!(UBUNTU_KERNEL_CODE >= UBUNTU_KERNEL_VERSION(3,13,0,30,54) \
+ && (UBUNTU_RELEASE_CODE == UBUNTU_RELEASE_VERSION(12,4) \
+ || UBUNTU_RELEASE_CODE == UBUNTU_RELEASE_VERSION(14,4))))
+#ifdef NETIF_F_RXHASH
+#define PKT_HASH_TYPE_L3 0
+static inline void
+skb_set_hash(struct sk_buff *skb, __u32 hash, __always_unused int type)
+{
+ skb->rxhash = hash;
+}
+#endif /* NETIF_F_RXHASH */
+#endif /* < 3.13.0-30.54 (Ubuntu 14.04) */
+#endif /* < RHEL7 */
+#endif /* < 3.14.0 */
+
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0) )
+#define SET_ETHTOOL_OPS(netdev, ops) ((netdev)->ethtool_ops = (ops))
+#define HAVE_VF_MIN_MAX_TXRATE 1
+#endif /* >= 3.16.0 */
+
+#endif /* _KCOMPAT_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c
new file mode 100755
index 00000000..3adf8696
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/igb/kcompat_ethtool.c
@@ -0,0 +1,1172 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/*
+ * net/core/ethtool.c - Ethtool ioctl handler
+ * Copyright (c) 2003 Matthew Wilcox <matthew@wil.cx>
+ *
+ * This file is where we call all the ethtool_ops commands to get
+ * the information ethtool needs. We fall back to calling do_ioctl()
+ * for drivers which haven't been converted to ethtool_ops yet.
+ *
+ * It's GPL, stupid.
+ *
+ * Modification by sfeldma@pobox.com to work as backward compat
+ * solution for pre-ethtool_ops kernels.
+ * - copied struct ethtool_ops from ethtool.h
+ * - defined SET_ETHTOOL_OPS
+ * - put in some #ifndef NETIF_F_xxx wrappers
+ * - changes refs to dev->ethtool_ops to ethtool_ops
+ * - changed dev_ethtool to ethtool_ioctl
+ * - remove EXPORT_SYMBOL()s
+ * - added _kc_ prefix in built-in ethtool_op_xxx ops.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <asm/uaccess.h>
+
+#include "kcompat.h"
+
+#undef SUPPORTED_10000baseT_Full
+#define SUPPORTED_10000baseT_Full (1 << 12)
+#undef ADVERTISED_10000baseT_Full
+#define ADVERTISED_10000baseT_Full (1 << 12)
+#undef SPEED_10000
+#define SPEED_10000 10000
+
+#undef ethtool_ops
+#define ethtool_ops _kc_ethtool_ops
+
+struct _kc_ethtool_ops {
+ int (*get_settings)(struct net_device *, struct ethtool_cmd *);
+ int (*set_settings)(struct net_device *, struct ethtool_cmd *);
+ void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
+ int (*get_regs_len)(struct net_device *);
+ void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);
+ void (*get_wol)(struct net_device *, struct ethtool_wolinfo *);
+ int (*set_wol)(struct net_device *, struct ethtool_wolinfo *);
+ u32 (*get_msglevel)(struct net_device *);
+ void (*set_msglevel)(struct net_device *, u32);
+ int (*nway_reset)(struct net_device *);
+ u32 (*get_link)(struct net_device *);
+ int (*get_eeprom_len)(struct net_device *);
+ int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);
+ int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);
+ int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *);
+ int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *);
+ void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *);
+ int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *);
+ void (*get_pauseparam)(struct net_device *,
+ struct ethtool_pauseparam*);
+ int (*set_pauseparam)(struct net_device *,
+ struct ethtool_pauseparam*);
+ u32 (*get_rx_csum)(struct net_device *);
+ int (*set_rx_csum)(struct net_device *, u32);
+ u32 (*get_tx_csum)(struct net_device *);
+ int (*set_tx_csum)(struct net_device *, u32);
+ u32 (*get_sg)(struct net_device *);
+ int (*set_sg)(struct net_device *, u32);
+ u32 (*get_tso)(struct net_device *);
+ int (*set_tso)(struct net_device *, u32);
+ int (*self_test_count)(struct net_device *);
+ void (*self_test)(struct net_device *, struct ethtool_test *, u64 *);
+ void (*get_strings)(struct net_device *, u32 stringset, u8 *);
+ int (*phys_id)(struct net_device *, u32);
+ int (*get_stats_count)(struct net_device *);
+ void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *,
+ u64 *);
+} *ethtool_ops = NULL;
+
+#undef SET_ETHTOOL_OPS
+#define SET_ETHTOOL_OPS(netdev, ops) (ethtool_ops = (ops))
+
+/*
+ * Some useful ethtool_ops methods that are device independent. If we find that
+ * all drivers want to do the same thing here, we can turn these into dev_()
+ * function calls.
+ */
+
+#undef ethtool_op_get_link
+#define ethtool_op_get_link _kc_ethtool_op_get_link
+u32 _kc_ethtool_op_get_link(struct net_device *dev)
+{
+ return netif_carrier_ok(dev) ? 1 : 0;
+}
+
+#undef ethtool_op_get_tx_csum
+#define ethtool_op_get_tx_csum _kc_ethtool_op_get_tx_csum
+u32 _kc_ethtool_op_get_tx_csum(struct net_device *dev)
+{
+#ifdef NETIF_F_IP_CSUM
+ return (dev->features & NETIF_F_IP_CSUM) != 0;
+#else
+ return 0;
+#endif
+}
+
+#undef ethtool_op_set_tx_csum
+#define ethtool_op_set_tx_csum _kc_ethtool_op_set_tx_csum
+int _kc_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
+{
+#ifdef NETIF_F_IP_CSUM
+ if (data)
+#ifdef NETIF_F_IPV6_CSUM
+ dev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ else
+ dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+#else
+ dev->features |= NETIF_F_IP_CSUM;
+ else
+ dev->features &= ~NETIF_F_IP_CSUM;
+#endif
+#endif
+
+ return 0;
+}
+
+#undef ethtool_op_get_sg
+#define ethtool_op_get_sg _kc_ethtool_op_get_sg
+u32 _kc_ethtool_op_get_sg(struct net_device *dev)
+{
+#ifdef NETIF_F_SG
+ return (dev->features & NETIF_F_SG) != 0;
+#else
+ return 0;
+#endif
+}
+
+#undef ethtool_op_set_sg
+#define ethtool_op_set_sg _kc_ethtool_op_set_sg
+int _kc_ethtool_op_set_sg(struct net_device *dev, u32 data)
+{
+#ifdef NETIF_F_SG
+ if (data)
+ dev->features |= NETIF_F_SG;
+ else
+ dev->features &= ~NETIF_F_SG;
+#endif
+
+ return 0;
+}
+
+#undef ethtool_op_get_tso
+#define ethtool_op_get_tso _kc_ethtool_op_get_tso
+u32 _kc_ethtool_op_get_tso(struct net_device *dev)
+{
+#ifdef NETIF_F_TSO
+ return (dev->features & NETIF_F_TSO) != 0;
+#else
+ return 0;
+#endif
+}
+
+#undef ethtool_op_set_tso
+#define ethtool_op_set_tso _kc_ethtool_op_set_tso
+int _kc_ethtool_op_set_tso(struct net_device *dev, u32 data)
+{
+#ifdef NETIF_F_TSO
+ if (data)
+ dev->features |= NETIF_F_TSO;
+ else
+ dev->features &= ~NETIF_F_TSO;
+#endif
+
+ return 0;
+}
+
+/* Handlers for each ethtool command */
+
+static int ethtool_get_settings(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_cmd cmd = { ETHTOOL_GSET };
+ int err;
+
+ if (!ethtool_ops->get_settings)
+ return -EOPNOTSUPP;
+
+ err = ethtool_ops->get_settings(dev, &cmd);
+ if (err < 0)
+ return err;
+
+ if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_settings(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_cmd cmd;
+
+ if (!ethtool_ops->set_settings)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
+ return -EFAULT;
+
+ return ethtool_ops->set_settings(dev, &cmd);
+}
+
+static int ethtool_get_drvinfo(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_drvinfo info;
+ struct ethtool_ops *ops = ethtool_ops;
+
+ if (!ops->get_drvinfo)
+ return -EOPNOTSUPP;
+
+ memset(&info, 0, sizeof(info));
+ info.cmd = ETHTOOL_GDRVINFO;
+ ops->get_drvinfo(dev, &info);
+
+ if (ops->self_test_count)
+ info.testinfo_len = ops->self_test_count(dev);
+ if (ops->get_stats_count)
+ info.n_stats = ops->get_stats_count(dev);
+ if (ops->get_regs_len)
+ info.regdump_len = ops->get_regs_len(dev);
+ if (ops->get_eeprom_len)
+ info.eedump_len = ops->get_eeprom_len(dev);
+
+ if (copy_to_user(useraddr, &info, sizeof(info)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_get_regs(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_regs regs;
+ struct ethtool_ops *ops = ethtool_ops;
+ void *regbuf;
+ int reglen, ret;
+
+ if (!ops->get_regs || !ops->get_regs_len)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&regs, useraddr, sizeof(regs)))
+ return -EFAULT;
+
+ reglen = ops->get_regs_len(dev);
+ if (regs.len > reglen)
+ regs.len = reglen;
+
+ regbuf = kmalloc(reglen, GFP_USER);
+ if (!regbuf)
+ return -ENOMEM;
+
+ ops->get_regs(dev, &regs, regbuf);
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &regs, sizeof(regs)))
+ goto out;
+ useraddr += offsetof(struct ethtool_regs, data);
+ if (copy_to_user(useraddr, regbuf, reglen))
+ goto out;
+ ret = 0;
+
+out:
+ kfree(regbuf);
+ return ret;
+}
+
+static int ethtool_get_wol(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_wolinfo wol = { ETHTOOL_GWOL };
+
+ if (!ethtool_ops->get_wol)
+ return -EOPNOTSUPP;
+
+ ethtool_ops->get_wol(dev, &wol);
+
+ if (copy_to_user(useraddr, &wol, sizeof(wol)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_wol(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_wolinfo wol;
+
+ if (!ethtool_ops->set_wol)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&wol, useraddr, sizeof(wol)))
+ return -EFAULT;
+
+ return ethtool_ops->set_wol(dev, &wol);
+}
+
+static int ethtool_get_msglevel(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata = { ETHTOOL_GMSGLVL };
+
+ if (!ethtool_ops->get_msglevel)
+ return -EOPNOTSUPP;
+
+ edata.data = ethtool_ops->get_msglevel(dev);
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_msglevel(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (!ethtool_ops->set_msglevel)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ ethtool_ops->set_msglevel(dev, edata.data);
+ return 0;
+}
+
+static int ethtool_nway_reset(struct net_device *dev)
+{
+ if (!ethtool_ops->nway_reset)
+ return -EOPNOTSUPP;
+
+ return ethtool_ops->nway_reset(dev);
+}
+
+static int ethtool_get_link(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_value edata = { ETHTOOL_GLINK };
+
+ if (!ethtool_ops->get_link)
+ return -EOPNOTSUPP;
+
+ edata.data = ethtool_ops->get_link(dev);
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_get_eeprom(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_eeprom eeprom;
+ struct ethtool_ops *ops = ethtool_ops;
+ u8 *data;
+ int ret;
+
+ if (!ops->get_eeprom || !ops->get_eeprom_len)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
+ return -EFAULT;
+
+ /* Check for wrap and zero */
+ if (eeprom.offset + eeprom.len <= eeprom.offset)
+ return -EINVAL;
+
+ /* Check for exceeding total eeprom len */
+ if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
+ return -EINVAL;
+
+ data = kmalloc(eeprom.len, GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ ret = -EFAULT;
+ if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len))
+ goto out;
+
+ ret = ops->get_eeprom(dev, &eeprom, data);
+ if (ret)
+ goto out;
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &eeprom, sizeof(eeprom)))
+ goto out;
+ if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len))
+ goto out;
+ ret = 0;
+
+out:
+ kfree(data);
+ return ret;
+}
+
+static int ethtool_set_eeprom(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_eeprom eeprom;
+ struct ethtool_ops *ops = ethtool_ops;
+ u8 *data;
+ int ret;
+
+ if (!ops->set_eeprom || !ops->get_eeprom_len)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
+ return -EFAULT;
+
+ /* Check for wrap and zero */
+ if (eeprom.offset + eeprom.len <= eeprom.offset)
+ return -EINVAL;
+
+ /* Check for exceeding total eeprom len */
+ if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
+ return -EINVAL;
+
+ data = kmalloc(eeprom.len, GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ ret = -EFAULT;
+ if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len))
+ goto out;
+
+ ret = ops->set_eeprom(dev, &eeprom, data);
+ if (ret)
+ goto out;
+
+ if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len))
+ ret = -EFAULT;
+
+out:
+ kfree(data);
+ return ret;
+}
+
+static int ethtool_get_coalesce(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE };
+
+ if (!ethtool_ops->get_coalesce)
+ return -EOPNOTSUPP;
+
+ ethtool_ops->get_coalesce(dev, &coalesce);
+
+ if (copy_to_user(useraddr, &coalesce, sizeof(coalesce)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_coalesce(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_coalesce coalesce;
+
+ if (!ethtool_ops->get_coalesce)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&coalesce, useraddr, sizeof(coalesce)))
+ return -EFAULT;
+
+ return ethtool_ops->set_coalesce(dev, &coalesce);
+}
+
+static int ethtool_get_ringparam(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM };
+
+ if (!ethtool_ops->get_ringparam)
+ return -EOPNOTSUPP;
+
+ ethtool_ops->get_ringparam(dev, &ringparam);
+
+ if (copy_to_user(useraddr, &ringparam, sizeof(ringparam)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_ringparam(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_ringparam ringparam;
+
+ if (!ethtool_ops->get_ringparam)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&ringparam, useraddr, sizeof(ringparam)))
+ return -EFAULT;
+
+ return ethtool_ops->set_ringparam(dev, &ringparam);
+}
+
+static int ethtool_get_pauseparam(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM };
+
+ if (!ethtool_ops->get_pauseparam)
+ return -EOPNOTSUPP;
+
+ ethtool_ops->get_pauseparam(dev, &pauseparam);
+
+ if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_pauseparam(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_pauseparam pauseparam;
+
+ if (!ethtool_ops->get_pauseparam)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam)))
+ return -EFAULT;
+
+ return ethtool_ops->set_pauseparam(dev, &pauseparam);
+}
+
+static int ethtool_get_rx_csum(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata = { ETHTOOL_GRXCSUM };
+
+ if (!ethtool_ops->get_rx_csum)
+ return -EOPNOTSUPP;
+
+ edata.data = ethtool_ops->get_rx_csum(dev);
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_rx_csum(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (!ethtool_ops->set_rx_csum)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ ethtool_ops->set_rx_csum(dev, edata.data);
+ return 0;
+}
+
+static int ethtool_get_tx_csum(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata = { ETHTOOL_GTXCSUM };
+
+ if (!ethtool_ops->get_tx_csum)
+ return -EOPNOTSUPP;
+
+ edata.data = ethtool_ops->get_tx_csum(dev);
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_tx_csum(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (!ethtool_ops->set_tx_csum)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ return ethtool_ops->set_tx_csum(dev, edata.data);
+}
+
+static int ethtool_get_sg(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata = { ETHTOOL_GSG };
+
+ if (!ethtool_ops->get_sg)
+ return -EOPNOTSUPP;
+
+ edata.data = ethtool_ops->get_sg(dev);
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_sg(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (!ethtool_ops->set_sg)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ return ethtool_ops->set_sg(dev, edata.data);
+}
+
+static int ethtool_get_tso(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata = { ETHTOOL_GTSO };
+
+ if (!ethtool_ops->get_tso)
+ return -EOPNOTSUPP;
+
+ edata.data = ethtool_ops->get_tso(dev);
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_tso(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (!ethtool_ops->set_tso)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ return ethtool_ops->set_tso(dev, edata.data);
+}
+
+static int ethtool_self_test(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_test test;
+ struct ethtool_ops *ops = ethtool_ops;
+ u64 *data;
+ int ret;
+
+ if (!ops->self_test || !ops->self_test_count)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&test, useraddr, sizeof(test)))
+ return -EFAULT;
+
+ test.len = ops->self_test_count(dev);
+ data = kmalloc(test.len * sizeof(u64), GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ ops->self_test(dev, &test, data);
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &test, sizeof(test)))
+ goto out;
+ useraddr += sizeof(test);
+ if (copy_to_user(useraddr, data, test.len * sizeof(u64)))
+ goto out;
+ ret = 0;
+
+out:
+ kfree(data);
+ return ret;
+}
+
+static int ethtool_get_strings(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_gstrings gstrings;
+ struct ethtool_ops *ops = ethtool_ops;
+ u8 *data;
+ int ret;
+
+ if (!ops->get_strings)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
+ return -EFAULT;
+
+ switch (gstrings.string_set) {
+ case ETH_SS_TEST:
+ if (!ops->self_test_count)
+ return -EOPNOTSUPP;
+ gstrings.len = ops->self_test_count(dev);
+ break;
+ case ETH_SS_STATS:
+ if (!ops->get_stats_count)
+ return -EOPNOTSUPP;
+ gstrings.len = ops->get_stats_count(dev);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ ops->get_strings(dev, gstrings.string_set, data);
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
+ goto out;
+ useraddr += sizeof(gstrings);
+ if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
+ goto out;
+ ret = 0;
+
+out:
+ kfree(data);
+ return ret;
+}
+
+static int ethtool_phys_id(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_value id;
+
+ if (!ethtool_ops->phys_id)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&id, useraddr, sizeof(id)))
+ return -EFAULT;
+
+ return ethtool_ops->phys_id(dev, id.data);
+}
+
+static int ethtool_get_stats(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_stats stats;
+ struct ethtool_ops *ops = ethtool_ops;
+ u64 *data;
+ int ret;
+
+ if (!ops->get_ethtool_stats || !ops->get_stats_count)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&stats, useraddr, sizeof(stats)))
+ return -EFAULT;
+
+ stats.n_stats = ops->get_stats_count(dev);
+ data = kmalloc(stats.n_stats * sizeof(u64), GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ ops->get_ethtool_stats(dev, &stats, data);
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &stats, sizeof(stats)))
+ goto out;
+ useraddr += sizeof(stats);
+ if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64)))
+ goto out;
+ ret = 0;
+
+out:
+ kfree(data);
+ return ret;
+}
+
+/* The main entry point in this file. Called from net/core/dev.c */
+
+#define ETHTOOL_OPS_COMPAT
+int ethtool_ioctl(struct ifreq *ifr)
+{
+ struct net_device *dev = __dev_get_by_name(ifr->ifr_name);
+ void *useraddr = (void *) ifr->ifr_data;
+ u32 ethcmd;
+
+ /*
+ * XXX: This can be pushed down into the ethtool_* handlers that
+ * need it. Keep existing behavior for the moment.
+ */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!dev || !netif_device_present(dev))
+ return -ENODEV;
+
+ if (copy_from_user(&ethcmd, useraddr, sizeof (ethcmd)))
+ return -EFAULT;
+
+ switch (ethcmd) {
+ case ETHTOOL_GSET:
+ return ethtool_get_settings(dev, useraddr);
+ case ETHTOOL_SSET:
+ return ethtool_set_settings(dev, useraddr);
+ case ETHTOOL_GDRVINFO:
+ return ethtool_get_drvinfo(dev, useraddr);
+ case ETHTOOL_GREGS:
+ return ethtool_get_regs(dev, useraddr);
+ case ETHTOOL_GWOL:
+ return ethtool_get_wol(dev, useraddr);
+ case ETHTOOL_SWOL:
+ return ethtool_set_wol(dev, useraddr);
+ case ETHTOOL_GMSGLVL:
+ return ethtool_get_msglevel(dev, useraddr);
+ case ETHTOOL_SMSGLVL:
+ return ethtool_set_msglevel(dev, useraddr);
+ case ETHTOOL_NWAY_RST:
+ return ethtool_nway_reset(dev);
+ case ETHTOOL_GLINK:
+ return ethtool_get_link(dev, useraddr);
+ case ETHTOOL_GEEPROM:
+ return ethtool_get_eeprom(dev, useraddr);
+ case ETHTOOL_SEEPROM:
+ return ethtool_set_eeprom(dev, useraddr);
+ case ETHTOOL_GCOALESCE:
+ return ethtool_get_coalesce(dev, useraddr);
+ case ETHTOOL_SCOALESCE:
+ return ethtool_set_coalesce(dev, useraddr);
+ case ETHTOOL_GRINGPARAM:
+ return ethtool_get_ringparam(dev, useraddr);
+ case ETHTOOL_SRINGPARAM:
+ return ethtool_set_ringparam(dev, useraddr);
+ case ETHTOOL_GPAUSEPARAM:
+ return ethtool_get_pauseparam(dev, useraddr);
+ case ETHTOOL_SPAUSEPARAM:
+ return ethtool_set_pauseparam(dev, useraddr);
+ case ETHTOOL_GRXCSUM:
+ return ethtool_get_rx_csum(dev, useraddr);
+ case ETHTOOL_SRXCSUM:
+ return ethtool_set_rx_csum(dev, useraddr);
+ case ETHTOOL_GTXCSUM:
+ return ethtool_get_tx_csum(dev, useraddr);
+ case ETHTOOL_STXCSUM:
+ return ethtool_set_tx_csum(dev, useraddr);
+ case ETHTOOL_GSG:
+ return ethtool_get_sg(dev, useraddr);
+ case ETHTOOL_SSG:
+ return ethtool_set_sg(dev, useraddr);
+ case ETHTOOL_GTSO:
+ return ethtool_get_tso(dev, useraddr);
+ case ETHTOOL_STSO:
+ return ethtool_set_tso(dev, useraddr);
+ case ETHTOOL_TEST:
+ return ethtool_self_test(dev, useraddr);
+ case ETHTOOL_GSTRINGS:
+ return ethtool_get_strings(dev, useraddr);
+ case ETHTOOL_PHYS_ID:
+ return ethtool_phys_id(dev, useraddr);
+ case ETHTOOL_GSTATS:
+ return ethtool_get_stats(dev, useraddr);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+#define mii_if_info _kc_mii_if_info
+struct _kc_mii_if_info {
+ int phy_id;
+ int advertising;
+ int phy_id_mask;
+ int reg_num_mask;
+
+ unsigned int full_duplex : 1; /* is full duplex? */
+ unsigned int force_media : 1; /* is autoneg. disabled? */
+
+ struct net_device *dev;
+ int (*mdio_read) (struct net_device *dev, int phy_id, int location);
+ void (*mdio_write) (struct net_device *dev, int phy_id, int location, int val);
+};
+
+struct ethtool_cmd;
+struct mii_ioctl_data;
+
+#undef mii_link_ok
+#define mii_link_ok _kc_mii_link_ok
+#undef mii_nway_restart
+#define mii_nway_restart _kc_mii_nway_restart
+#undef mii_ethtool_gset
+#define mii_ethtool_gset _kc_mii_ethtool_gset
+#undef mii_ethtool_sset
+#define mii_ethtool_sset _kc_mii_ethtool_sset
+#undef mii_check_link
+#define mii_check_link _kc_mii_check_link
+extern int _kc_mii_link_ok (struct mii_if_info *mii);
+extern int _kc_mii_nway_restart (struct mii_if_info *mii);
+extern int _kc_mii_ethtool_gset(struct mii_if_info *mii,
+ struct ethtool_cmd *ecmd);
+extern int _kc_mii_ethtool_sset(struct mii_if_info *mii,
+ struct ethtool_cmd *ecmd);
+extern void _kc_mii_check_link (struct mii_if_info *mii);
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) )
+#undef generic_mii_ioctl
+#define generic_mii_ioctl _kc_generic_mii_ioctl
+extern int _kc_generic_mii_ioctl(struct mii_if_info *mii_if,
+ struct mii_ioctl_data *mii_data, int cmd,
+ unsigned int *duplex_changed);
+#endif /* > 2.4.6 */
+
+
+struct _kc_pci_dev_ext {
+ struct pci_dev *dev;
+ void *pci_drvdata;
+ struct pci_driver *driver;
+};
+
+struct _kc_net_dev_ext {
+ struct net_device *dev;
+ unsigned int carrier;
+};
+
+
+/**************************************/
+/* mii support */
+
+int _kc_mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
+{
+ struct net_device *dev = mii->dev;
+ u32 advert, bmcr, lpa, nego;
+
+ ecmd->supported =
+ (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
+
+ /* only supports twisted-pair */
+ ecmd->port = PORT_MII;
+
+ /* only supports internal transceiver */
+ ecmd->transceiver = XCVR_INTERNAL;
+
+ /* this isn't fully supported at higher layers */
+ ecmd->phy_address = mii->phy_id;
+
+ ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
+ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
+ if (advert & ADVERTISE_10HALF)
+ ecmd->advertising |= ADVERTISED_10baseT_Half;
+ if (advert & ADVERTISE_10FULL)
+ ecmd->advertising |= ADVERTISED_10baseT_Full;
+ if (advert & ADVERTISE_100HALF)
+ ecmd->advertising |= ADVERTISED_100baseT_Half;
+ if (advert & ADVERTISE_100FULL)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+
+ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
+ lpa = mii->mdio_read(dev, mii->phy_id, MII_LPA);
+ if (bmcr & BMCR_ANENABLE) {
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ ecmd->autoneg = AUTONEG_ENABLE;
+
+ nego = mii_nway_result(advert & lpa);
+ if (nego == LPA_100FULL || nego == LPA_100HALF)
+ ecmd->speed = SPEED_100;
+ else
+ ecmd->speed = SPEED_10;
+ if (nego == LPA_100FULL || nego == LPA_10FULL) {
+ ecmd->duplex = DUPLEX_FULL;
+ mii->full_duplex = 1;
+ } else {
+ ecmd->duplex = DUPLEX_HALF;
+ mii->full_duplex = 0;
+ }
+ } else {
+ ecmd->autoneg = AUTONEG_DISABLE;
+
+ ecmd->speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
+ ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+ }
+
+ /* ignore maxtxpkt, maxrxpkt for now */
+
+ return 0;
+}
+
+int _kc_mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
+{
+ struct net_device *dev = mii->dev;
+
+ if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
+ return -EINVAL;
+ if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
+ return -EINVAL;
+ if (ecmd->port != PORT_MII)
+ return -EINVAL;
+ if (ecmd->transceiver != XCVR_INTERNAL)
+ return -EINVAL;
+ if (ecmd->phy_address != mii->phy_id)
+ return -EINVAL;
+ if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
+ return -EINVAL;
+
+ /* ignore supported, maxtxpkt, maxrxpkt */
+
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ u32 bmcr, advert, tmp;
+
+ if ((ecmd->advertising & (ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full)) == 0)
+ return -EINVAL;
+
+ /* advertise only what has been requested */
+ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
+ tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+ if (ADVERTISED_10baseT_Half)
+ tmp |= ADVERTISE_10HALF;
+ if (ADVERTISED_10baseT_Full)
+ tmp |= ADVERTISE_10FULL;
+ if (ADVERTISED_100baseT_Half)
+ tmp |= ADVERTISE_100HALF;
+ if (ADVERTISED_100baseT_Full)
+ tmp |= ADVERTISE_100FULL;
+ if (advert != tmp) {
+ mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp);
+ mii->advertising = tmp;
+ }
+
+ /* turn on autonegotiation, and force a renegotiate */
+ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr);
+
+ mii->force_media = 0;
+ } else {
+ u32 bmcr, tmp;
+
+ /* turn off auto negotiation, set speed and duplexity */
+ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
+ tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
+ if (ecmd->speed == SPEED_100)
+ tmp |= BMCR_SPEED100;
+ if (ecmd->duplex == DUPLEX_FULL) {
+ tmp |= BMCR_FULLDPLX;
+ mii->full_duplex = 1;
+ } else
+ mii->full_duplex = 0;
+ if (bmcr != tmp)
+ mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp);
+
+ mii->force_media = 1;
+ }
+ return 0;
+}
+
+int _kc_mii_link_ok (struct mii_if_info *mii)
+{
+ /* first, a dummy read, needed to latch some MII phys */
+ mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
+ if (mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR) & BMSR_LSTATUS)
+ return 1;
+ return 0;
+}
+
+int _kc_mii_nway_restart (struct mii_if_info *mii)
+{
+ int bmcr;
+ int r = -EINVAL;
+
+ /* if autoneg is off, it's an error */
+ bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR);
+
+ if (bmcr & BMCR_ANENABLE) {
+ bmcr |= BMCR_ANRESTART;
+ mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, bmcr);
+ r = 0;
+ }
+
+ return r;
+}
+
+void _kc_mii_check_link (struct mii_if_info *mii)
+{
+ int cur_link = mii_link_ok(mii);
+ int prev_link = netif_carrier_ok(mii->dev);
+
+ if (cur_link && !prev_link)
+ netif_carrier_on(mii->dev);
+ else if (prev_link && !cur_link)
+ netif_carrier_off(mii->dev);
+}
+
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) )
+int _kc_generic_mii_ioctl(struct mii_if_info *mii_if,
+ struct mii_ioctl_data *mii_data, int cmd,
+ unsigned int *duplex_chg_out)
+{
+ int rc = 0;
+ unsigned int duplex_changed = 0;
+
+ if (duplex_chg_out)
+ *duplex_chg_out = 0;
+
+ mii_data->phy_id &= mii_if->phy_id_mask;
+ mii_data->reg_num &= mii_if->reg_num_mask;
+
+ switch(cmd) {
+ case SIOCDEVPRIVATE: /* binary compat, remove in 2.5 */
+ case SIOCGMIIPHY:
+ mii_data->phy_id = mii_if->phy_id;
+ /* fall through */
+
+ case SIOCDEVPRIVATE + 1:/* binary compat, remove in 2.5 */
+ case SIOCGMIIREG:
+ mii_data->val_out =
+ mii_if->mdio_read(mii_if->dev, mii_data->phy_id,
+ mii_data->reg_num);
+ break;
+
+ case SIOCDEVPRIVATE + 2:/* binary compat, remove in 2.5 */
+ case SIOCSMIIREG: {
+ u16 val = mii_data->val_in;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (mii_data->phy_id == mii_if->phy_id) {
+ switch(mii_data->reg_num) {
+ case MII_BMCR: {
+ unsigned int new_duplex = 0;
+ if (val & (BMCR_RESET|BMCR_ANENABLE))
+ mii_if->force_media = 0;
+ else
+ mii_if->force_media = 1;
+ if (mii_if->force_media &&
+ (val & BMCR_FULLDPLX))
+ new_duplex = 1;
+ if (mii_if->full_duplex != new_duplex) {
+ duplex_changed = 1;
+ mii_if->full_duplex = new_duplex;
+ }
+ break;
+ }
+ case MII_ADVERTISE:
+ mii_if->advertising = val;
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+ }
+
+ mii_if->mdio_write(mii_if->dev, mii_data->phy_id,
+ mii_data->reg_num, val);
+ break;
+ }
+
+ default:
+ rc = -EOPNOTSUPP;
+ break;
+ }
+
+ if ((rc == 0) && (duplex_chg_out) && (duplex_changed))
+ *duplex_chg_out = 1;
+
+ return rc;
+}
+#endif /* > 2.4.6 */
+
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/COPYING b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/COPYING
new file mode 100755
index 00000000..5f297e5b
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/COPYING
@@ -0,0 +1,339 @@
+
+"This software program is licensed subject to the GNU General Public License
+(GPL). Version 2, June 1991, available at
+<http://www.fsf.org/copyleft/gpl.html>"
+
+GNU General Public License
+
+Version 2, June 1991
+
+Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+
+Everyone is permitted to copy and distribute verbatim copies of this license
+document, but changing it is not allowed.
+
+Preamble
+
+The licenses for most software are designed to take away your freedom to
+share and change it. By contrast, the GNU General Public License is intended
+to guarantee your freedom to share and change free software--to make sure
+the software is free for all its users. This General Public License applies
+to most of the Free Software Foundation's software and to any other program
+whose authors commit to using it. (Some other Free Software Foundation
+software is covered by the GNU Library General Public License instead.) You
+can apply it to your programs, too.
+
+When we speak of free software, we are referring to freedom, not price. Our
+General Public Licenses are designed to make sure that you have the freedom
+to distribute copies of free software (and charge for this service if you
+wish), that you receive source code or can get it if you want it, that you
+can change the software or use pieces of it in new free programs; and that
+you know you can do these things.
+
+To protect your rights, we need to make restrictions that forbid anyone to
+deny you these rights or to ask you to surrender the rights. These
+restrictions translate to certain responsibilities for you if you distribute
+copies of the software, or if you modify it.
+
+For example, if you distribute copies of such a program, whether gratis or
+for a fee, you must give the recipients all the rights that you have. You
+must make sure that they, too, receive or can get the source code. And you
+must show them these terms so they know their rights.
+
+We protect your rights with two steps: (1) copyright the software, and (2)
+offer you this license which gives you legal permission to copy, distribute
+and/or modify the software.
+
+Also, for each author's protection and ours, we want to make certain that
+everyone understands that there is no warranty for this free software. If
+the software is modified by someone else and passed on, we want its
+recipients to know that what they have is not the original, so that any
+problems introduced by others will not reflect on the original authors'
+reputations.
+
+Finally, any free program is threatened constantly by software patents. We
+wish to avoid the danger that redistributors of a free program will
+individually obtain patent licenses, in effect making the program
+proprietary. To prevent this, we have made it clear that any patent must be
+licensed for everyone's free use or not licensed at all.
+
+The precise terms and conditions for copying, distribution and modification
+follow.
+
+TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+0. This License applies to any program or other work which contains a notice
+ placed by the copyright holder saying it may be distributed under the
+ terms of this General Public License. The "Program", below, refers to any
+ such program or work, and a "work based on the Program" means either the
+ Program or any derivative work under copyright law: that is to say, a
+ work containing the Program or a portion of it, either verbatim or with
+ modifications and/or translated into another language. (Hereinafter,
+ translation is included without limitation in the term "modification".)
+ Each licensee is addressed as "you".
+
+ Activities other than copying, distribution and modification are not
+ covered by this License; they are outside its scope. The act of running
+ the Program is not restricted, and the output from the Program is covered
+ only if its contents constitute a work based on the Program (independent
+ of having been made by running the Program). Whether that is true depends
+ on what the Program does.
+
+1. You may copy and distribute verbatim copies of the Program's source code
+ as you receive it, in any medium, provided that you conspicuously and
+ appropriately publish on each copy an appropriate copyright notice and
+ disclaimer of warranty; keep intact all the notices that refer to this
+ License and to the absence of any warranty; and give any other recipients
+ of the Program a copy of this License along with the Program.
+
+ You may charge a fee for the physical act of transferring a copy, and you
+ may at your option offer warranty protection in exchange for a fee.
+
+2. You may modify your copy or copies of the Program or any portion of it,
+ thus forming a work based on the Program, and copy and distribute such
+ modifications or work under the terms of Section 1 above, provided that
+ you also meet all of these conditions:
+
+ * a) You must cause the modified files to carry prominent notices stating
+ that you changed the files and the date of any change.
+
+ * b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any part
+ thereof, to be licensed as a whole at no charge to all third parties
+ under the terms of this License.
+
+ * c) If the modified program normally reads commands interactively when
+ run, you must cause it, when started running for such interactive
+ use in the most ordinary way, to print or display an announcement
+ including an appropriate copyright notice and a notice that there is
+ no warranty (or else, saying that you provide a warranty) and that
+ users may redistribute the program under these conditions, and
+ telling the user how to view a copy of this License. (Exception: if
+ the Program itself is interactive but does not normally print such
+ an announcement, your work based on the Program is not required to
+ print an announcement.)
+
+ These requirements apply to the modified work as a whole. If identifiable
+ sections of that work are not derived from the Program, and can be
+ reasonably considered independent and separate works in themselves, then
+ this License, and its terms, do not apply to those sections when you
+ distribute them as separate works. But when you distribute the same
+ sections as part of a whole which is a work based on the Program, the
+ distribution of the whole must be on the terms of this License, whose
+ permissions for other licensees extend to the entire whole, and thus to
+ each and every part regardless of who wrote it.
+
+ Thus, it is not the intent of this section to claim rights or contest
+ your rights to work written entirely by you; rather, the intent is to
+ exercise the right to control the distribution of derivative or
+ collective works based on the Program.
+
+ In addition, mere aggregation of another work not based on the Program
+ with the Program (or with a work based on the Program) on a volume of a
+ storage or distribution medium does not bring the other work under the
+ scope of this License.
+
+3. You may copy and distribute the Program (or a work based on it, under
+ Section 2) in object code or executable form under the terms of Sections
+ 1 and 2 above provided that you also do one of the following:
+
+ * a) Accompany it with the complete corresponding machine-readable source
+ code, which must be distributed under the terms of Sections 1 and 2
+ above on a medium customarily used for software interchange; or,
+
+ * b) Accompany it with a written offer, valid for at least three years,
+ to give any third party, for a charge no more than your cost of
+ physically performing source distribution, a complete machine-
+ readable copy of the corresponding source code, to be distributed
+ under the terms of Sections 1 and 2 above on a medium customarily
+ used for software interchange; or,
+
+ * c) Accompany it with the information you received as to the offer to
+ distribute corresponding source code. (This alternative is allowed
+ only for noncommercial distribution and only if you received the
+ program in object code or executable form with such an offer, in
+ accord with Subsection b above.)
+
+ The source code for a work means the preferred form of the work for
+ making modifications to it. For an executable work, complete source code
+ means all the source code for all modules it contains, plus any
+ associated interface definition files, plus the scripts used to control
+ compilation and installation of the executable. However, as a special
+ exception, the source code distributed need not include anything that is
+ normally distributed (in either source or binary form) with the major
+ components (compiler, kernel, and so on) of the operating system on which
+ the executable runs, unless that component itself accompanies the
+ executable.
+
+ If distribution of executable or object code is made by offering access
+ to copy from a designated place, then offering equivalent access to copy
+ the source code from the same place counts as distribution of the source
+ code, even though third parties are not compelled to copy the source
+ along with the object code.
+
+4. You may not copy, modify, sublicense, or distribute the Program except as
+ expressly provided under this License. Any attempt otherwise to copy,
+ modify, sublicense or distribute the Program is void, and will
+ automatically terminate your rights under this License. However, parties
+ who have received copies, or rights, from you under this License will not
+ have their licenses terminated so long as such parties remain in full
+ compliance.
+
+5. You are not required to accept this License, since you have not signed
+ it. However, nothing else grants you permission to modify or distribute
+ the Program or its derivative works. These actions are prohibited by law
+ if you do not accept this License. Therefore, by modifying or
+ distributing the Program (or any work based on the Program), you
+ indicate your acceptance of this License to do so, and all its terms and
+ conditions for copying, distributing or modifying the Program or works
+ based on it.
+
+6. Each time you redistribute the Program (or any work based on the
+ Program), the recipient automatically receives a license from the
+ original licensor to copy, distribute or modify the Program subject to
+ these terms and conditions. You may not impose any further restrictions
+ on the recipients' exercise of the rights granted herein. You are not
+ responsible for enforcing compliance by third parties to this License.
+
+7. If, as a consequence of a court judgment or allegation of patent
+ infringement or for any other reason (not limited to patent issues),
+ conditions are imposed on you (whether by court order, agreement or
+ otherwise) that contradict the conditions of this License, they do not
+ excuse you from the conditions of this License. If you cannot distribute
+ so as to satisfy simultaneously your obligations under this License and
+ any other pertinent obligations, then as a consequence you may not
+ distribute the Program at all. For example, if a patent license would
+ not permit royalty-free redistribution of the Program by all those who
+ receive copies directly or indirectly through you, then the only way you
+ could satisfy both it and this License would be to refrain entirely from
+ distribution of the Program.
+
+ If any portion of this section is held invalid or unenforceable under any
+ particular circumstance, the balance of the section is intended to apply
+ and the section as a whole is intended to apply in other circumstances.
+
+ It is not the purpose of this section to induce you to infringe any
+ patents or other property right claims or to contest validity of any
+ such claims; this section has the sole purpose of protecting the
+ integrity of the free software distribution system, which is implemented
+ by public license practices. Many people have made generous contributions
+ to the wide range of software distributed through that system in
+ reliance on consistent application of that system; it is up to the
+ author/donor to decide if he or she is willing to distribute software
+ through any other system and a licensee cannot impose that choice.
+
+ This section is intended to make thoroughly clear what is believed to be
+ a consequence of the rest of this License.
+
+8. If the distribution and/or use of the Program is restricted in certain
+ countries either by patents or by copyrighted interfaces, the original
+ copyright holder who places the Program under this License may add an
+ explicit geographical distribution limitation excluding those countries,
+ so that distribution is permitted only in or among countries not thus
+ excluded. In such case, this License incorporates the limitation as if
+ written in the body of this License.
+
+9. The Free Software Foundation may publish revised and/or new versions of
+ the General Public License from time to time. Such new versions will be
+ similar in spirit to the present version, but may differ in detail to
+ address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the Program
+ specifies a version number of this License which applies to it and "any
+ later version", you have the option of following the terms and
+ conditions either of that version or of any later version published by
+ the Free Software Foundation. If the Program does not specify a version
+ number of this License, you may choose any version ever published by the
+ Free Software Foundation.
+
+10. If you wish to incorporate parts of the Program into other free programs
+ whose distribution conditions are different, write to the author to ask
+ for permission. For software which is copyrighted by the Free Software
+ Foundation, write to the Free Software Foundation; we sometimes make
+ exceptions for this. Our decision will be guided by the two goals of
+ preserving the free status of all derivatives of our free software and
+ of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+ FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+ OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+ PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER
+ EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
+ ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH
+ YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL
+ NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+ REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR
+ DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL
+ DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM
+ (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED
+ INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF
+ THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR
+ OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+
+END OF TERMS AND CONDITIONS
+
+How to Apply These Terms to Your New Programs
+
+If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it free
+software which everyone can redistribute and change under these terms.
+
+To do so, attach the following notices to the program. It is safest to
+attach them to the start of each source file to most effectively convey the
+exclusion of warranty; and each file should have at least the "copyright"
+line and a pointer to where the full notice is found.
+
+one line to give the program's name and an idea of what it does.
+Copyright (C) yyyy name of author
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2 of the License, or (at your option)
+any later version.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc., 59
+Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this when
+it starts in an interactive mode:
+
+Gnomovision version 69, Copyright (C) year name of author Gnomovision comes
+with ABSOLUTELY NO WARRANTY; for details type 'show w'. This is free
+software, and you are welcome to redistribute it under certain conditions;
+type 'show c' for details.
+
+The hypothetical commands 'show w' and 'show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may be
+called something other than 'show w' and 'show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+'Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+signature of Ty Coon, 1 April 1989
+Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Library General Public
+License instead of this License.
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe.h
new file mode 100755
index 00000000..222c2c71
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe.h
@@ -0,0 +1,925 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2012 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_H_
+#define _IXGBE_H_
+
+#ifndef IXGBE_NO_LRO
+#include <net/tcp.h>
+#endif
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#ifdef HAVE_IRQ_AFFINITY_HINT
+#include <linux/cpumask.h>
+#endif /* HAVE_IRQ_AFFINITY_HINT */
+#include <linux/vmalloc.h>
+
+#ifdef SIOCETHTOOL
+#include <linux/ethtool.h>
+#endif
+#ifdef NETIF_F_HW_VLAN_TX
+#include <linux/if_vlan.h>
+#endif
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+#define IXGBE_DCA
+#include <linux/dca.h>
+#endif
+#include "ixgbe_dcb.h"
+
+#include "kcompat.h"
+
+#ifdef HAVE_SCTP
+#include <linux/sctp.h>
+#endif
+
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#define IXGBE_FCOE
+#include "ixgbe_fcoe.h"
+#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
+
+#if defined(CONFIG_PTP_1588_CLOCK) || defined(CONFIG_PTP_1588_CLOCK_MODULE)
+#define HAVE_IXGBE_PTP
+#endif
+
+#include "ixgbe_api.h"
+
+#define PFX "ixgbe: "
+#define DPRINTK(nlevel, klevel, fmt, args...) \
+ ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
+ printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
+ __func__ , ## args)))
+
+/* TX/RX descriptor defines */
+#define IXGBE_DEFAULT_TXD 512
+#define IXGBE_DEFAULT_TX_WORK 256
+#define IXGBE_MAX_TXD 4096
+#define IXGBE_MIN_TXD 64
+
+#define IXGBE_DEFAULT_RXD 512
+#define IXGBE_DEFAULT_RX_WORK 256
+#define IXGBE_MAX_RXD 4096
+#define IXGBE_MIN_RXD 64
+
+
+/* flow control */
+#define IXGBE_MIN_FCRTL 0x40
+#define IXGBE_MAX_FCRTL 0x7FF80
+#define IXGBE_MIN_FCRTH 0x600
+#define IXGBE_MAX_FCRTH 0x7FFF0
+#define IXGBE_DEFAULT_FCPAUSE 0xFFFF
+#define IXGBE_MIN_FCPAUSE 0
+#define IXGBE_MAX_FCPAUSE 0xFFFF
+
+/* Supported Rx Buffer Sizes */
+#define IXGBE_RXBUFFER_512 512 /* Used for packet split */
+#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+#define IXGBE_RXBUFFER_1536 1536
+#define IXGBE_RXBUFFER_2K 2048
+#define IXGBE_RXBUFFER_3K 3072
+#define IXGBE_RXBUFFER_4K 4096
+#define IXGBE_RXBUFFER_7K 7168
+#define IXGBE_RXBUFFER_8K 8192
+#define IXGBE_RXBUFFER_15K 15360
+#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */
+#define IXGBE_MAX_RXBUFFER 16384 /* largest size for single descriptor */
+
+/*
+ * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN mans we
+ * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
+ * this adds up to 512 bytes of extra data meaning the smallest allocation
+ * we could have is 1K.
+ * i.e. RXBUFFER_512 --> size-1024 slab
+ */
+#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_512
+
+#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+
+#define IXGBE_TX_FLAGS_CSUM (u32)(1)
+#define IXGBE_TX_FLAGS_HW_VLAN (u32)(1 << 1)
+#define IXGBE_TX_FLAGS_SW_VLAN (u32)(1 << 2)
+#define IXGBE_TX_FLAGS_TSO (u32)(1 << 3)
+#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 4)
+#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5)
+#define IXGBE_TX_FLAGS_FSO (u32)(1 << 6)
+#define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7)
+#define IXGBE_TX_FLAGS_TSTAMP (u32)(1 << 8)
+#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
+#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
+#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
+#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
+
+#define IXGBE_MAX_RX_DESC_POLL 10
+
+#define IXGBE_MAX_VF_MC_ENTRIES 30
+#define IXGBE_MAX_VF_FUNCTIONS 64
+#define IXGBE_MAX_VFTA_ENTRIES 128
+#define MAX_EMULATION_MAC_ADDRS 16
+#define IXGBE_MAX_PF_MACVLANS 15
+#define IXGBE_82599_VF_DEVICE_ID 0x10ED
+#define IXGBE_X540_VF_DEVICE_ID 0x1515
+
+#ifdef CONFIG_PCI_IOV
+#define VMDQ_P(p) ((p) + adapter->num_vfs)
+#else
+#define VMDQ_P(p) (p)
+#endif
+
+#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
+ { \
+ u32 current_counter = IXGBE_READ_REG(hw, reg); \
+ if (current_counter < last_counter) \
+ counter += 0x100000000LL; \
+ last_counter = current_counter; \
+ counter &= 0xFFFFFFFF00000000LL; \
+ counter |= current_counter; \
+ }
+
+#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
+ { \
+ u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
+ u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
+ u64 current_counter = (current_counter_msb << 32) | \
+ current_counter_lsb; \
+ if (current_counter < last_counter) \
+ counter += 0x1000000000LL; \
+ last_counter = current_counter; \
+ counter &= 0xFFFFFFF000000000LL; \
+ counter |= current_counter; \
+ }
+
+struct vf_stats {
+ u64 gprc;
+ u64 gorc;
+ u64 gptc;
+ u64 gotc;
+ u64 mprc;
+};
+
+struct vf_data_storage {
+ unsigned char vf_mac_addresses[ETH_ALEN];
+ u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
+ u16 num_vf_mc_hashes;
+ u16 default_vf_vlan_id;
+ u16 vlans_enabled;
+ bool clear_to_send;
+ struct vf_stats vfstats;
+ struct vf_stats last_vfstats;
+ struct vf_stats saved_rst_vfstats;
+ bool pf_set_mac;
+ u16 pf_vlan; /* When set, guest VLAN config not allowed. */
+ u16 pf_qos;
+ u16 tx_rate;
+ u16 vlan_count;
+ u8 spoofchk_enabled;
+ struct pci_dev *vfdev;
+};
+
+struct vf_macvlans {
+ struct list_head l;
+ int vf;
+ bool free;
+ bool is_macvlan;
+ u8 vf_macvlan[ETH_ALEN];
+};
+
+#ifndef IXGBE_NO_LRO
+#define IXGBE_LRO_MAX 32 /*Maximum number of LRO descriptors*/
+#define IXGBE_LRO_GLOBAL 10
+
+struct ixgbe_lro_stats {
+ u32 flushed;
+ u32 coal;
+};
+
+/*
+ * ixgbe_lro_header - header format to be aggregated by LRO
+ * @iph: IP header without options
+ * @tcp: TCP header
+ * @ts: Optional TCP timestamp data in TCP options
+ *
+ * This structure relies on the check above that verifies that the header
+ * is IPv4 and does not contain any options.
+ */
+struct ixgbe_lrohdr {
+ struct iphdr iph;
+ struct tcphdr th;
+ __be32 ts[0];
+};
+
+struct ixgbe_lro_list {
+ struct sk_buff_head active;
+ struct ixgbe_lro_stats stats;
+};
+
+#endif /* IXGBE_NO_LRO */
+#define IXGBE_MAX_TXD_PWR 14
+#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
+#ifdef MAX_SKB_FRAGS
+#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+#else
+#define DESC_NEEDED 4
+#endif
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer */
+struct ixgbe_tx_buffer {
+ union ixgbe_adv_tx_desc *next_to_watch;
+ unsigned long time_stamp;
+ struct sk_buff *skb;
+ unsigned int bytecount;
+ unsigned short gso_segs;
+ __be16 protocol;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+ u32 tx_flags;
+};
+
+struct ixgbe_rx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+ struct page *page;
+ unsigned int page_offset;
+#endif
+};
+
+struct ixgbe_queue_stats {
+ u64 packets;
+ u64 bytes;
+};
+
+struct ixgbe_tx_queue_stats {
+ u64 restart_queue;
+ u64 tx_busy;
+ u64 tx_done_old;
+};
+
+struct ixgbe_rx_queue_stats {
+ u64 rsc_count;
+ u64 rsc_flush;
+ u64 non_eop_descs;
+ u64 alloc_rx_page_failed;
+ u64 alloc_rx_buff_failed;
+ u64 csum_err;
+};
+
+enum ixgbe_ring_state_t {
+ __IXGBE_TX_FDIR_INIT_DONE,
+ __IXGBE_TX_DETECT_HANG,
+ __IXGBE_HANG_CHECK_ARMED,
+ __IXGBE_RX_RSC_ENABLED,
+#ifndef HAVE_NDO_SET_FEATURES
+ __IXGBE_RX_CSUM_ENABLED,
+#endif
+ __IXGBE_RX_CSUM_UDP_ZERO_ERR,
+#ifdef IXGBE_FCOE
+ __IXGBE_RX_FCOE_BUFSZ,
+#endif
+};
+
+#define check_for_tx_hang(ring) \
+ test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
+#define set_check_for_tx_hang(ring) \
+ set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
+#define clear_check_for_tx_hang(ring) \
+ clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
+#ifndef IXGBE_NO_HW_RSC
+#define ring_is_rsc_enabled(ring) \
+ test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
+#else
+#define ring_is_rsc_enabled(ring) false
+#endif
+#define set_ring_rsc_enabled(ring) \
+ set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
+#define clear_ring_rsc_enabled(ring) \
+ clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
+#define netdev_ring(ring) (ring->netdev)
+#define ring_queue_index(ring) (ring->queue_index)
+
+
+struct ixgbe_ring {
+ struct ixgbe_ring *next; /* pointer to next ring in q_vector */
+ struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */
+ struct net_device *netdev; /* netdev ring belongs to */
+ struct device *dev; /* device for DMA mapping */
+ void *desc; /* descriptor ring memory */
+ union {
+ struct ixgbe_tx_buffer *tx_buffer_info;
+ struct ixgbe_rx_buffer *rx_buffer_info;
+ };
+ unsigned long state;
+ u8 __iomem *tail;
+ dma_addr_t dma; /* phys. address of descriptor ring */
+ unsigned int size; /* length in bytes */
+
+ u16 count; /* amount of descriptors */
+
+ u8 queue_index; /* needed for multiqueue queue management */
+ u8 reg_idx; /* holds the special value that gets
+ * the hardware register offset
+ * associated with this ring, which is
+ * different for DCB and RSS modes
+ */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ union {
+#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+ u16 rx_buf_len;
+#else
+ u16 next_to_alloc;
+#endif
+ struct {
+ u8 atr_sample_rate;
+ u8 atr_count;
+ };
+ };
+
+ u8 dcb_tc;
+ struct ixgbe_queue_stats stats;
+ union {
+ struct ixgbe_tx_queue_stats tx_stats;
+ struct ixgbe_rx_queue_stats rx_stats;
+ };
+} ____cacheline_internodealigned_in_smp;
+
+enum ixgbe_ring_f_enum {
+ RING_F_NONE = 0,
+ RING_F_VMDQ, /* SR-IOV uses the same ring feature */
+ RING_F_RSS,
+ RING_F_FDIR,
+#ifdef IXGBE_FCOE
+ RING_F_FCOE,
+#endif /* IXGBE_FCOE */
+ RING_F_ARRAY_SIZE /* must be last in enum set */
+};
+
+#define IXGBE_MAX_DCB_INDICES 8
+#define IXGBE_MAX_RSS_INDICES 16
+#define IXGBE_MAX_VMDQ_INDICES 64
+#define IXGBE_MAX_FDIR_INDICES 64
+#ifdef IXGBE_FCOE
+#define IXGBE_MAX_FCOE_INDICES 8
+#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
+#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
+#else
+#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES
+#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
+#endif /* IXGBE_FCOE */
+struct ixgbe_ring_feature {
+ int indices;
+ int mask;
+};
+
+#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+/*
+ * FCoE requires that all Rx buffers be over 2200 bytes in length. Since
+ * this is twice the size of a half page we need to double the page order
+ * for FCoE enabled Rx queues.
+ */
+#if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192)
+static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
+{
+ return test_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state) ? 1 : 0;
+}
+#else
+#define ixgbe_rx_pg_order(_ring) 0
+#endif
+#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
+#define ixgbe_rx_bufsz(_ring) ((PAGE_SIZE / 2) << ixgbe_rx_pg_order(_ring))
+
+#endif
+struct ixgbe_ring_container {
+ struct ixgbe_ring *ring; /* pointer to linked list of rings */
+ unsigned int total_bytes; /* total bytes processed this int */
+ unsigned int total_packets; /* total packets processed this int */
+ u16 work_limit; /* total work allowed per interrupt */
+ u8 count; /* total number of rings in vector */
+ u8 itr; /* current ITR setting for ring */
+};
+
+/* iterator for handling rings in ring container */
+#define ixgbe_for_each_ring(pos, head) \
+ for (pos = (head).ring; pos != NULL; pos = pos->next)
+
+#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
+ ? 8 : 1)
+#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
+
+/* MAX_MSIX_Q_VECTORS of these are allocated,
+ * but we only use one per queue-specific vector.
+ */
+struct ixgbe_q_vector {
+ struct ixgbe_adapter *adapter;
+ int cpu; /* CPU for DCA */
+ u16 v_idx; /* index of q_vector within array, also used for
+ * finding the bit in EICR and friends that
+ * represents the vector for this ring */
+ u16 itr; /* Interrupt throttle rate written to EITR */
+ struct ixgbe_ring_container rx, tx;
+
+#ifdef CONFIG_IXGBE_NAPI
+ struct napi_struct napi;
+#endif
+#ifndef HAVE_NETDEV_NAPI_LIST
+ struct net_device poll_dev;
+#endif
+#ifdef HAVE_IRQ_AFFINITY_HINT
+ cpumask_t affinity_mask;
+#endif
+#ifndef IXGBE_NO_LRO
+ struct ixgbe_lro_list lrolist; /* LRO list for queue vector*/
+#endif
+ int numa_node;
+ char name[IFNAMSIZ + 9];
+
+ /* for dynamic allocation of rings associated with this q_vector */
+ struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
+};
+
+/*
+ * microsecond values for various ITR rates shifted by 2 to fit itr register
+ * with the first 3 bits reserved 0
+ */
+#define IXGBE_MIN_RSC_ITR 24
+#define IXGBE_100K_ITR 40
+#define IXGBE_20K_ITR 200
+#define IXGBE_16K_ITR 248
+#define IXGBE_10K_ITR 400
+#define IXGBE_8K_ITR 500
+
+/* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */
+static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc,
+ const u32 stat_err_bits)
+{
+ return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
+}
+
+/* ixgbe_desc_unused - calculate if we have unused descriptors */
+static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
+{
+ u16 ntc = ring->next_to_clean;
+ u16 ntu = ring->next_to_use;
+
+ return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
+}
+
+#define IXGBE_RX_DESC(R, i) \
+ (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
+#define IXGBE_TX_DESC(R, i) \
+ (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
+#define IXGBE_TX_CTXTDESC(R, i) \
+ (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
+
+#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
+#ifdef IXGBE_FCOE
+/* use 3K as the baby jumbo frame size for FCoE */
+#define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072
+#endif /* IXGBE_FCOE */
+
+#define TCP_TIMER_VECTOR 0
+#define OTHER_VECTOR 1
+#define NON_Q_VECTORS (OTHER_VECTOR + TCP_TIMER_VECTOR)
+
+#define IXGBE_MAX_MSIX_Q_VECTORS_82599 64
+#define IXGBE_MAX_MSIX_Q_VECTORS_82598 16
+
+struct ixgbe_mac_addr {
+ u8 addr[ETH_ALEN];
+ u16 queue;
+ u16 state; /* bitmask */
+};
+#define IXGBE_MAC_STATE_DEFAULT 0x1
+#define IXGBE_MAC_STATE_MODIFIED 0x2
+#define IXGBE_MAC_STATE_IN_USE 0x4
+
+#ifdef IXGBE_PROCFS
+struct ixgbe_therm_proc_data {
+ struct ixgbe_hw *hw;
+ struct ixgbe_thermal_diode_data *sensor_data;
+};
+
+#endif /* IXGBE_PROCFS */
+
+/*
+ * Only for array allocations in our adapter struct. On 82598, there will be
+ * unused entries in the array, but that's not a big deal. Also, in 82599,
+ * we can actually assign 64 queue vectors based on our extended-extended
+ * interrupt registers. This is different than 82598, which is limited to 16.
+ */
+#define MAX_MSIX_Q_VECTORS IXGBE_MAX_MSIX_Q_VECTORS_82599
+#define MAX_MSIX_COUNT IXGBE_MAX_MSIX_VECTORS_82599
+
+#define MIN_MSIX_Q_VECTORS 1
+#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
+
+/* default to trying for four seconds */
+#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
+
+/* board specific private data structure */
+struct ixgbe_adapter {
+#ifdef NETIF_F_HW_VLAN_TX
+#ifdef HAVE_VLAN_RX_REGISTER
+ struct vlan_group *vlgrp; /* must be first, see ixgbe_receive_skb */
+#else
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+#endif
+#endif /* NETIF_F_HW_VLAN_TX */
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+
+ unsigned long state;
+
+ /* Some features need tri-state capability,
+ * thus the additional *_CAPABLE flags.
+ */
+ u32 flags;
+#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 0)
+#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1)
+#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 2)
+#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3)
+#ifndef IXGBE_NO_LLI
+#define IXGBE_FLAG_LLI_PUSH (u32)(1 << 4)
+#endif
+#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 8)
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 9)
+#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 10)
+#define IXGBE_FLAG_DCA_ENABLED_DATA (u32)(1 << 11)
+#else
+#define IXGBE_FLAG_DCA_ENABLED (u32)0
+#define IXGBE_FLAG_DCA_CAPABLE (u32)0
+#define IXGBE_FLAG_DCA_ENABLED_DATA (u32)0
+#endif
+#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 12)
+#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 13)
+#define IXGBE_FLAG_DCB_CAPABLE (u32)(1 << 14)
+#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 15)
+#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 16)
+#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 18)
+#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 19)
+#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 20)
+#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 21)
+#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 22)
+#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 23)
+#ifdef IXGBE_FCOE
+#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 24)
+#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 25)
+#endif /* IXGBE_FCOE */
+#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 26)
+#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 27)
+#define IXGBE_FLAG_SRIOV_REPLICATION_ENABLE (u32)(1 << 28)
+#define IXGBE_FLAG_SRIOV_L2SWITCH_ENABLE (u32)(1 << 29)
+#define IXGBE_FLAG_SRIOV_L2LOOPBACK_ENABLE (u32)(1 << 30)
+#define IXGBE_FLAG_RX_BB_CAPABLE (u32)(1 << 31)
+
+ u32 flags2;
+#ifndef IXGBE_NO_HW_RSC
+#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1)
+#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1)
+#else
+#define IXGBE_FLAG2_RSC_CAPABLE 0
+#define IXGBE_FLAG2_RSC_ENABLED 0
+#endif
+#define IXGBE_FLAG2_VMDQ_DEFAULT_OVERRIDE (u32)(1 << 2)
+#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 4)
+#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 5)
+#define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 6)
+#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 7)
+#define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 8)
+#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 9)
+#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 10)
+#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 11)
+#define IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED (u32)(1 << 12)
+
+ /* Tx fast path data */
+ int num_tx_queues;
+ u16 tx_itr_setting;
+ u16 tx_work_limit;
+
+ /* Rx fast path data */
+ int num_rx_queues;
+ u16 rx_itr_setting;
+ u16 rx_work_limit;
+
+ /* TX */
+ struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
+
+ u64 restart_queue;
+ u64 lsc_int;
+ u32 tx_timeout_count;
+
+ /* RX */
+ struct ixgbe_ring *rx_ring[MAX_RX_QUEUES];
+ int num_rx_pools; /* == num_rx_queues in 82598 */
+ int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
+ u64 hw_csum_rx_error;
+ u64 hw_rx_no_dma_resources;
+ u64 rsc_total_count;
+ u64 rsc_total_flush;
+ u64 non_eop_descs;
+#ifndef CONFIG_IXGBE_NAPI
+ u64 rx_dropped_backlog; /* count drops from rx intr handler */
+#endif
+ u32 alloc_rx_page_failed;
+ u32 alloc_rx_buff_failed;
+
+ struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
+
+#ifdef HAVE_DCBNL_IEEE
+ struct ieee_pfc *ixgbe_ieee_pfc;
+ struct ieee_ets *ixgbe_ieee_ets;
+#endif
+ struct ixgbe_dcb_config dcb_cfg;
+ struct ixgbe_dcb_config temp_dcb_cfg;
+ u8 dcb_set_bitmap;
+ u8 dcbx_cap;
+#ifndef HAVE_MQPRIO
+ u8 tc;
+#endif
+ enum ixgbe_fc_mode last_lfc_mode;
+
+ int num_msix_vectors;
+ int max_msix_q_vectors; /* true count of q_vectors for device */
+ struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
+ struct msix_entry *msix_entries;
+
+#ifndef HAVE_NETDEV_STATS_IN_NETDEV
+ struct net_device_stats net_stats;
+#endif
+#ifndef IXGBE_NO_LRO
+ struct ixgbe_lro_stats lro_stats;
+#endif
+
+#ifdef ETHTOOL_TEST
+ u32 test_icr;
+ struct ixgbe_ring test_tx_ring;
+ struct ixgbe_ring test_rx_ring;
+#endif
+
+ /* structs defined in ixgbe_hw.h */
+ struct ixgbe_hw hw;
+ u16 msg_enable;
+ struct ixgbe_hw_stats stats;
+#ifndef IXGBE_NO_LLI
+ u32 lli_port;
+ u32 lli_size;
+ u32 lli_etype;
+ u32 lli_vlan_pri;
+#endif /* IXGBE_NO_LLI */
+
+ u32 *config_space;
+ u64 tx_busy;
+ unsigned int tx_ring_count;
+ unsigned int rx_ring_count;
+
+ u32 link_speed;
+ bool link_up;
+ unsigned long link_check_timeout;
+
+ struct timer_list service_timer;
+ struct work_struct service_task;
+
+ struct hlist_head fdir_filter_list;
+ unsigned long fdir_overflow; /* number of times ATR was backed off */
+ union ixgbe_atr_input fdir_mask;
+ int fdir_filter_count;
+ u32 fdir_pballoc;
+ u32 atr_sample_rate;
+ spinlock_t fdir_perfect_lock;
+
+#ifdef IXGBE_FCOE
+ struct ixgbe_fcoe fcoe;
+#endif /* IXGBE_FCOE */
+ u32 wol;
+
+ u16 bd_number;
+
+ char eeprom_id[32];
+ u16 eeprom_cap;
+ bool netdev_registered;
+ u32 interrupt_event;
+#ifdef HAVE_ETHTOOL_SET_PHYS_ID
+ u32 led_reg;
+#endif
+
+ DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
+ unsigned int num_vfs;
+ struct vf_data_storage *vfinfo;
+ int vf_rate_link_speed;
+ struct vf_macvlans vf_mvs;
+ struct vf_macvlans *mv_list;
+#ifdef CONFIG_PCI_IOV
+ u32 timer_event_accumulator;
+ u32 vferr_refcount;
+#endif
+ struct ixgbe_mac_addr *mac_table;
+#ifdef IXGBE_SYSFS
+ struct kobject *info_kobj;
+ struct kobject *therm_kobj[IXGBE_MAX_SENSORS];
+#else /* IXGBE_SYSFS */
+#ifdef IXGBE_PROCFS
+ struct proc_dir_entry *eth_dir;
+ struct proc_dir_entry *info_dir;
+ struct proc_dir_entry *therm_dir[IXGBE_MAX_SENSORS];
+ struct ixgbe_therm_proc_data therm_data[IXGBE_MAX_SENSORS];
+#endif /* IXGBE_PROCFS */
+#endif /* IXGBE_SYSFS */
+};
+
+struct ixgbe_fdir_filter {
+ struct hlist_node fdir_node;
+ union ixgbe_atr_input filter;
+ u16 sw_idx;
+ u16 action;
+};
+
+enum ixgbe_state_t {
+ __IXGBE_TESTING,
+ __IXGBE_RESETTING,
+ __IXGBE_DOWN,
+ __IXGBE_SERVICE_SCHED,
+ __IXGBE_IN_SFP_INIT,
+};
+
+struct ixgbe_cb {
+#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+ union { /* Union defining head/tail partner */
+ struct sk_buff *head;
+ struct sk_buff *tail;
+ };
+#endif
+ dma_addr_t dma;
+#ifndef IXGBE_NO_LRO
+ __be32 tsecr; /* timestamp echo response */
+ u32 tsval; /* timestamp value in host order */
+ u32 next_seq; /* next expected sequence number */
+ u16 free; /* 65521 minus total size */
+ u16 mss; /* size of data portion of packet */
+#endif /* IXGBE_NO_LRO */
+#ifdef HAVE_VLAN_RX_REGISTER
+ u16 vid; /* VLAN tag */
+#endif
+ u16 append_cnt; /* number of skb's appended */
+#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+ bool page_released;
+#endif
+};
+#define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb)
+
+#ifdef IXGBE_SYSFS
+void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
+int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
+#endif /* IXGBE_SYSFS */
+#ifdef IXGBE_PROCFS
+void ixgbe_procfs_exit(struct ixgbe_adapter *adapter);
+int ixgbe_procfs_init(struct ixgbe_adapter *adapter);
+int ixgbe_procfs_topdir_init(void);
+void ixgbe_procfs_topdir_exit(void);
+#endif /* IXGBE_PROCFS */
+
+extern struct dcbnl_rtnl_ops dcbnl_ops;
+extern int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max);
+
+extern u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 index);
+
+/* needed by ixgbe_main.c */
+extern int ixgbe_validate_mac_addr(u8 *mc_addr);
+extern void ixgbe_check_options(struct ixgbe_adapter *adapter);
+extern void ixgbe_assign_netdev_ops(struct net_device *netdev);
+
+/* needed by ixgbe_ethtool.c */
+extern char ixgbe_driver_name[];
+extern const char ixgbe_driver_version[];
+
+extern void ixgbe_up(struct ixgbe_adapter *adapter);
+extern void ixgbe_down(struct ixgbe_adapter *adapter);
+extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
+extern void ixgbe_reset(struct ixgbe_adapter *adapter);
+extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
+extern int ixgbe_setup_rx_resources(struct ixgbe_ring *);
+extern int ixgbe_setup_tx_resources(struct ixgbe_ring *);
+extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
+extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
+extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,
+ struct ixgbe_ring *);
+extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,
+ struct ixgbe_ring *);
+extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
+extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
+extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
+extern bool ixgbe_is_ixgbe(struct pci_dev *pcidev);
+extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
+ struct ixgbe_adapter *,
+ struct ixgbe_ring *);
+extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
+ struct ixgbe_tx_buffer *);
+extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
+extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *);
+extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *);
+extern void ixgbe_set_rx_mode(struct net_device *netdev);
+extern int ixgbe_write_mc_addr_list(struct net_device *netdev);
+extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
+#ifdef IXGBE_FCOE
+extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
+#endif /* IXGBE_FCOE */
+extern void ixgbe_do_reset(struct net_device *netdev);
+extern void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector);
+extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *);
+extern void ixgbe_vlan_stripping_enable(struct ixgbe_adapter *adapter);
+extern void ixgbe_vlan_stripping_disable(struct ixgbe_adapter *adapter);
+#ifdef ETHTOOL_OPS_COMPAT
+extern int ethtool_ioctl(struct ifreq *ifr);
+#endif
+
+#ifdef IXGBE_FCOE
+extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
+extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
+ struct ixgbe_tx_buffer *first,
+ u8 *hdr_len);
+extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
+extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb);
+extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc);
+#ifdef HAVE_NETDEV_OPS_FCOE_DDP_TARGET
+extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc);
+#endif /* HAVE_NETDEV_OPS_FCOE_DDP_TARGET */
+extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
+#ifdef HAVE_NETDEV_OPS_FCOE_ENABLE
+extern int ixgbe_fcoe_enable(struct net_device *netdev);
+extern int ixgbe_fcoe_disable(struct net_device *netdev);
+#endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */
+#ifdef CONFIG_DCB
+#ifdef HAVE_DCBNL_OPS_GETAPP
+extern u8 ixgbe_fcoe_getapp(struct net_device *netdev);
+#endif /* HAVE_DCBNL_OPS_GETAPP */
+extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
+#endif /* CONFIG_DCB */
+#ifdef HAVE_NETDEV_OPS_FCOE_GETWWN
+extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
+#endif
+#endif /* IXGBE_FCOE */
+
+#ifdef CONFIG_DCB
+#ifdef HAVE_DCBNL_IEEE
+s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame);
+#endif /* HAVE_DCBNL_IEEE */
+#endif /* CONFIG_DCB */
+
+extern void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring);
+extern int ixgbe_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd);
+extern int ixgbe_write_uc_addr_list(struct ixgbe_adapter *adapter,
+ struct net_device *netdev, unsigned int vfn);
+extern void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
+extern int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
+ u8 *addr, u16 queue);
+extern int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
+ u8 *addr, u16 queue);
+extern int ixgbe_available_rars(struct ixgbe_adapter *adapter);
+#ifndef HAVE_VLAN_RX_REGISTER
+extern void ixgbe_vlan_mode(struct net_device *, u32);
+#endif
+#ifndef ixgbe_get_netdev_tc_txq
+#define ixgbe_get_netdev_tc_txq(dev, tc) (&dev->tc_to_txq[tc])
+#endif
+extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
+#endif /* _IXGBE_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.c
new file mode 100755
index 00000000..24015844
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.c
@@ -0,0 +1,1296 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2012 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "ixgbe_type.h"
+#include "ixgbe_82598.h"
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg);
+static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
+static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
+ bool autoneg_wait_to_complete);
+static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed, bool *link_up,
+ bool link_up_wait_to_complete);
+static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete);
+static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete);
+static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
+static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
+static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
+ u32 headroom, int strategy);
+
+/**
+ * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
+ * @hw: pointer to the HW structure
+ *
+ * The defaults for 82598 should be in the range of 50us to 50ms,
+ * however the hardware default for these parts is 500us to 1ms which is less
+ * than the 10ms recommended by the pci-e spec. To address this we need to
+ * increase the value to either 10ms to 250ms for capability version 1 config,
+ * or 16ms to 55ms for version 2.
+ **/
+void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
+{
+ u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
+ u16 pcie_devctl2;
+
+ /* only take action if timeout value is defaulted to 0 */
+ if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
+ goto out;
+
+ /*
+ * if capababilities version is type 1 we can write the
+ * timeout of 10ms to 250ms through the GCR register
+ */
+ if (!(gcr & IXGBE_GCR_CAP_VER2)) {
+ gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
+ goto out;
+ }
+
+ /*
+ * for version 2 capabilities we need to write the config space
+ * directly in order to set the completion timeout value for
+ * 16ms to 55ms
+ */
+ pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
+ pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
+ IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
+out:
+ /* disable completion timeout resend */
+ gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
+ IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
+}
+
+/**
+ * ixgbe_init_ops_82598 - Inits func ptrs and MAC type
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the function pointers and assign the MAC type for 82598.
+ * Does not touch the hardware.
+ **/
+s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ ret_val = ixgbe_init_phy_ops_generic(hw);
+ ret_val = ixgbe_init_ops_generic(hw);
+
+ /* PHY */
+ phy->ops.init = &ixgbe_init_phy_ops_82598;
+
+ /* MAC */
+ mac->ops.start_hw = &ixgbe_start_hw_82598;
+ mac->ops.reset_hw = &ixgbe_reset_hw_82598;
+ mac->ops.get_media_type = &ixgbe_get_media_type_82598;
+ mac->ops.get_supported_physical_layer =
+ &ixgbe_get_supported_physical_layer_82598;
+ mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
+ mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
+ mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
+
+ /* RAR, Multicast, VLAN */
+ mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
+ mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
+ mac->ops.set_vfta = &ixgbe_set_vfta_82598;
+ mac->ops.set_vlvf = NULL;
+ mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
+
+ /* Flow Control */
+ mac->ops.fc_enable = &ixgbe_fc_enable_82598;
+
+ mac->mcft_size = 128;
+ mac->vft_size = 128;
+ mac->num_rar_entries = 16;
+ mac->rx_pb_size = 512;
+ mac->max_tx_queues = 32;
+ mac->max_rx_queues = 64;
+ mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
+
+ /* SFP+ Module */
+ phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
+
+ /* Link */
+ mac->ops.check_link = &ixgbe_check_mac_link_82598;
+ mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
+ mac->ops.flap_tx_laser = NULL;
+ mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82598;
+ mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598;
+
+ /* Manageability interface */
+ mac->ops.set_fw_drv_ver = NULL;
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
+ * @hw: pointer to hardware structure
+ *
+ * Initialize any function pointers that were not able to be
+ * set during init_shared_code because the PHY/SFP type was
+ * not known. Perform the SFP init if necessary.
+ *
+ **/
+s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ s32 ret_val = 0;
+ u16 list_offset, data_offset;
+
+ /* Identify the PHY */
+ phy->ops.identify(hw);
+
+ /* Overwrite the link function pointers if copper PHY */
+ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
+ mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
+ mac->ops.get_link_capabilities =
+ &ixgbe_get_copper_link_capabilities_generic;
+ }
+
+ switch (hw->phy.type) {
+ case ixgbe_phy_tn:
+ phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
+ phy->ops.check_link = &ixgbe_check_phy_link_tnx;
+ phy->ops.get_firmware_version =
+ &ixgbe_get_phy_firmware_version_tnx;
+ break;
+ case ixgbe_phy_nl:
+ phy->ops.reset = &ixgbe_reset_phy_nl;
+
+ /* Call SFP+ identify routine to get the SFP+ module type */
+ ret_val = phy->ops.identify_sfp(hw);
+ if (ret_val != 0)
+ goto out;
+ else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
+ ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ goto out;
+ }
+
+ /* Check to see if SFP+ module is supported */
+ ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
+ &list_offset,
+ &data_offset);
+ if (ret_val != 0) {
+ ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ goto out;
+ }
+ break;
+ default:
+ break;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware using the generic start_hw function.
+ * Disables relaxed ordering Then set pcie completion timeout
+ *
+ **/
+s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
+{
+ u32 regval;
+ u32 i;
+ s32 ret_val = 0;
+
+ ret_val = ixgbe_start_hw_generic(hw);
+
+ /* Disable relaxed ordering */
+ for (i = 0; ((i < hw->mac.max_tx_queues) &&
+ (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
+ regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
+ }
+
+ for (i = 0; ((i < hw->mac.max_rx_queues) &&
+ (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+ regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
+ IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
+ }
+
+ /* set the completion timeout for interface */
+ if (ret_val == 0)
+ ixgbe_set_pcie_completion_timeout(hw);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_get_link_capabilities_82598 - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: boolean auto-negotiation value
+ *
+ * Determines the link capabilities by reading the AUTOC register.
+ **/
+static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ s32 status = 0;
+ u32 autoc = 0;
+
+ /*
+ * Determine link capabilities based on the stored value of AUTOC,
+ * which represents EEPROM defaults. If AUTOC value has not been
+ * stored, use the current register value.
+ */
+ if (hw->mac.orig_link_settings_stored)
+ autoc = hw->mac.orig_autoc;
+ else
+ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+ switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+ case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = false;
+ break;
+
+ case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ *autoneg = false;
+ break;
+
+ case IXGBE_AUTOC_LMS_1G_AN:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = true;
+ break;
+
+ case IXGBE_AUTOC_LMS_KX4_AN:
+ case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ if (autoc & IXGBE_AUTOC_KX4_SUPP)
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (autoc & IXGBE_AUTOC_KX_SUPP)
+ *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = true;
+ break;
+
+ default:
+ status = IXGBE_ERR_LINK_SETUP;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_get_media_type_82598 - Determines media type
+ * @hw: pointer to hardware structure
+ *
+ * Returns the media type (fiber, copper, backplane)
+ **/
+static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
+{
+ enum ixgbe_media_type media_type;
+
+ /* Detect if there is a copper PHY attached. */
+ switch (hw->phy.type) {
+ case ixgbe_phy_cu_unknown:
+ case ixgbe_phy_tn:
+ media_type = ixgbe_media_type_copper;
+ goto out;
+ default:
+ break;
+ }
+
+ /* Media type for I82598 is based on device ID */
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82598:
+ case IXGBE_DEV_ID_82598_BX:
+ /* Default device ID is mezzanine card KX/KX4 */
+ media_type = ixgbe_media_type_backplane;
+ break;
+ case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+ case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+ case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
+ case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
+ case IXGBE_DEV_ID_82598EB_XF_LR:
+ case IXGBE_DEV_ID_82598EB_SFP_LOM:
+ media_type = ixgbe_media_type_fiber;
+ break;
+ case IXGBE_DEV_ID_82598EB_CX4:
+ case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
+ media_type = ixgbe_media_type_cx4;
+ break;
+ case IXGBE_DEV_ID_82598AT:
+ case IXGBE_DEV_ID_82598AT2:
+ media_type = ixgbe_media_type_copper;
+ break;
+ default:
+ media_type = ixgbe_media_type_unknown;
+ break;
+ }
+out:
+ return media_type;
+}
+
+/**
+ * ixgbe_fc_enable_82598 - Enable flow control
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to the current settings.
+ **/
+s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
+{
+ s32 ret_val = 0;
+ u32 fctrl_reg;
+ u32 rmcs_reg;
+ u32 reg;
+ u32 fcrtl, fcrth;
+ u32 link_speed = 0;
+ int i;
+ bool link_up;
+
+ /* Validate the water mark configuration */
+ if (!hw->fc.pause_time) {
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ /* Low water mark of zero causes XOFF floods */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ if (!hw->fc.low_water[i] ||
+ hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+ hw_dbg(hw, "Invalid water mark configuration\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+ }
+ }
+
+ /*
+ * On 82598 having Rx FC on causes resets while doing 1G
+ * so if it's on turn it off once we know link_speed. For
+ * more details see 82598 Specification update.
+ */
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_full:
+ hw->fc.requested_mode = ixgbe_fc_tx_pause;
+ break;
+ case ixgbe_fc_rx_pause:
+ hw->fc.requested_mode = ixgbe_fc_none;
+ break;
+ default:
+ /* no change */
+ break;
+ }
+ }
+
+ /* Negotiate the fc mode to use */
+ ixgbe_fc_autoneg(hw);
+
+ /* Disable any previous flow control settings */
+ fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
+
+ rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
+ rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
+
+ /*
+ * The possible values of fc.current_mode are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but
+ * we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: Invalid.
+ */
+ switch (hw->fc.current_mode) {
+ case ixgbe_fc_none:
+ /*
+ * Flow control is disabled by software override or autoneg.
+ * The code below will actually disable it in the HW.
+ */
+ break;
+ case ixgbe_fc_rx_pause:
+ /*
+ * Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ fctrl_reg |= IXGBE_FCTRL_RFCE;
+ break;
+ case ixgbe_fc_tx_pause:
+ /*
+ * Tx Flow control is enabled, and Rx Flow control is
+ * disabled by software override.
+ */
+ rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
+ break;
+ case ixgbe_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by SW override. */
+ fctrl_reg |= IXGBE_FCTRL_RFCE;
+ rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
+ break;
+ default:
+ hw_dbg(hw, "Flow control param set incorrectly\n");
+ ret_val = IXGBE_ERR_CONFIG;
+ goto out;
+ break;
+ }
+
+ /* Set 802.3x based flow control settings. */
+ fctrl_reg |= IXGBE_FCTRL_DPF;
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
+ IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
+
+ /* Set up and enable Rx high/low water mark thresholds, enable XON. */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
+ fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
+ }
+
+ }
+
+ /* Configure pause time (2 TCs per register) */
+ reg = hw->fc.pause_time * 0x00010001;
+ for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
+ IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
+
+ /* Configure flow control refresh threshold value */
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_start_mac_link_82598 - Configures MAC link settings
+ * @hw: pointer to hardware structure
+ *
+ * Configures link settings based on values in the ixgbe_hw struct.
+ * Restarts the link. Performs autonegotiation if needed.
+ **/
+static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
+ bool autoneg_wait_to_complete)
+{
+ u32 autoc_reg;
+ u32 links_reg;
+ u32 i;
+ s32 status = 0;
+
+ /* Restart link */
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+
+ /* Only poll for autoneg to complete if specified to do so */
+ if (autoneg_wait_to_complete) {
+ if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+ IXGBE_AUTOC_LMS_KX4_AN ||
+ (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+ IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
+ links_reg = 0; /* Just in case Autoneg time = 0 */
+ for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if (links_reg & IXGBE_LINKS_KX_AN_COMP)
+ break;
+ msleep(100);
+ }
+ if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
+ status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+ hw_dbg(hw, "Autonegotiation did not complete.\n");
+ }
+ }
+ }
+
+ /* Add delay to filter out noises during initial link setup */
+ msleep(50);
+
+ return status;
+}
+
+/**
+ * ixgbe_validate_link_ready - Function looks for phy link
+ * @hw: pointer to hardware structure
+ *
+ * Function indicates success when phy link is available. If phy is not ready
+ * within 5 seconds of MAC indicating link, the function returns error.
+ **/
+static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
+{
+ u32 timeout;
+ u16 an_reg;
+
+ if (hw->device_id != IXGBE_DEV_ID_82598AT2)
+ return 0;
+
+ for (timeout = 0;
+ timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
+
+ if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
+ (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
+ break;
+
+ msleep(100);
+ }
+
+ if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
+ hw_dbg(hw, "Link was indicated but link is down\n");
+ return IXGBE_ERR_LINK_SETUP;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_check_mac_link_82598 - Get link/speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true is link is up, false otherwise
+ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Reads the links register to determine if link is up and the current speed
+ **/
+static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed, bool *link_up,
+ bool link_up_wait_to_complete)
+{
+ u32 links_reg;
+ u32 i;
+ u16 link_reg, adapt_comp_reg;
+
+ /*
+ * SERDES PHY requires us to read link status from undocumented
+ * register 0xC79F. Bit 0 set indicates link is up/ready; clear
+ * indicates link down. OxC00C is read to check that the XAUI lanes
+ * are active. Bit 0 clear indicates active; set indicates inactive.
+ */
+ if (hw->phy.type == ixgbe_phy_nl) {
+ hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
+ hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
+ hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
+ &adapt_comp_reg);
+ if (link_up_wait_to_complete) {
+ for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
+ if ((link_reg & 1) &&
+ ((adapt_comp_reg & 1) == 0)) {
+ *link_up = true;
+ break;
+ } else {
+ *link_up = false;
+ }
+ msleep(100);
+ hw->phy.ops.read_reg(hw, 0xC79F,
+ IXGBE_TWINAX_DEV,
+ &link_reg);
+ hw->phy.ops.read_reg(hw, 0xC00C,
+ IXGBE_TWINAX_DEV,
+ &adapt_comp_reg);
+ }
+ } else {
+ if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
+ *link_up = true;
+ else
+ *link_up = false;
+ }
+
+ if (*link_up == false)
+ goto out;
+ }
+
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if (link_up_wait_to_complete) {
+ for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
+ if (links_reg & IXGBE_LINKS_UP) {
+ *link_up = true;
+ break;
+ } else {
+ *link_up = false;
+ }
+ msleep(100);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ }
+ } else {
+ if (links_reg & IXGBE_LINKS_UP)
+ *link_up = true;
+ else
+ *link_up = false;
+ }
+
+ if (links_reg & IXGBE_LINKS_SPEED)
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+ if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) &&
+ (ixgbe_validate_link_ready(hw) != 0))
+ *link_up = false;
+
+out:
+ return 0;
+}
+
+/**
+ * ixgbe_setup_mac_link_82598 - Set MAC link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: true if autonegotiation enabled
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Set the link speed in the AUTOC register and restarts link.
+ **/
+static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ s32 status = 0;
+ ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
+ u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 autoc = curr_autoc;
+ u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
+
+ /* Check to see if speed passed in is supported. */
+ ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
+ speed &= link_capabilities;
+
+ if (speed == IXGBE_LINK_SPEED_UNKNOWN)
+ status = IXGBE_ERR_LINK_SETUP;
+
+ /* Set KX4/KX support according to speed requested */
+ else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
+ link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
+ autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ autoc |= IXGBE_AUTOC_KX4_SUPP;
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ autoc |= IXGBE_AUTOC_KX_SUPP;
+ if (autoc != curr_autoc)
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+ }
+
+ if (status == 0) {
+ /*
+ * Setup and restart the link based on the new values in
+ * ixgbe_hw This will write the AUTOC register based on the new
+ * stored values
+ */
+ status = ixgbe_start_mac_link_82598(hw,
+ autoneg_wait_to_complete);
+ }
+
+ return status;
+}
+
+
+/**
+ * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: true if autonegotiation enabled
+ * @autoneg_wait_to_complete: true if waiting is needed to complete
+ *
+ * Sets the link speed in the AUTOC register in the MAC and restarts link.
+ **/
+static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ s32 status;
+
+ /* Setup the PHY according to input speed */
+ status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+ autoneg_wait_to_complete);
+ /* Set up MAC */
+ ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
+
+ return status;
+}
+
+/**
+ * ixgbe_reset_hw_82598 - Performs hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks and
+ * clears all interrupts, performing a PHY reset, and performing a link (MAC)
+ * reset.
+ **/
+static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
+{
+ s32 status = 0;
+ s32 phy_status = 0;
+ u32 ctrl;
+ u32 gheccr;
+ u32 i;
+ u32 autoc;
+ u8 analog_val;
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ status = hw->mac.ops.stop_adapter(hw);
+ if (status != 0)
+ goto reset_hw_out;
+
+ /*
+ * Power up the Atlas Tx lanes if they are currently powered down.
+ * Atlas Tx lanes are powered down for MAC loopback tests, but
+ * they are not automatically restored on reset.
+ */
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
+ if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
+ /* Enable Tx Atlas so packets can be transmitted again */
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
+ &analog_val);
+ analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
+ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
+ analog_val);
+
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
+ &analog_val);
+ analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
+ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
+ analog_val);
+
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
+ &analog_val);
+ analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
+ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
+ analog_val);
+
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
+ &analog_val);
+ analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
+ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
+ analog_val);
+ }
+
+ /* Reset PHY */
+ if (hw->phy.reset_disable == false) {
+ /* PHY ops must be identified and initialized prior to reset */
+
+ /* Init PHY and function pointers, perform SFP setup */
+ phy_status = hw->phy.ops.init(hw);
+ if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ goto reset_hw_out;
+ if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto mac_reset_top;
+
+ hw->phy.ops.reset(hw);
+ }
+
+mac_reset_top:
+ /*
+ * Issue global reset to the MAC. This needs to be a SW reset.
+ * If link reset is used, it might reset the MAC when mng is using it
+ */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Poll for reset bit to self-clear indicating reset is complete */
+ for (i = 0; i < 10; i++) {
+ udelay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST))
+ break;
+ }
+ if (ctrl & IXGBE_CTRL_RST) {
+ status = IXGBE_ERR_RESET_FAILED;
+ hw_dbg(hw, "Reset polling failed to complete.\n");
+ }
+
+ msleep(50);
+
+ /*
+ * Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to allow time
+ * for any pending HW events to complete.
+ */
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ goto mac_reset_top;
+ }
+
+ gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
+ gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
+ IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
+
+ /*
+ * Store the original AUTOC value if it has not been
+ * stored off yet. Otherwise restore the stored original
+ * AUTOC value since the reset operation sets back to deaults.
+ */
+ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ if (hw->mac.orig_link_settings_stored == false) {
+ hw->mac.orig_autoc = autoc;
+ hw->mac.orig_link_settings_stored = true;
+ } else if (autoc != hw->mac.orig_autoc) {
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
+ }
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /*
+ * Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table
+ */
+ hw->mac.ops.init_rx_addrs(hw);
+
+reset_hw_out:
+ if (phy_status != 0)
+ status = phy_status;
+
+ return status;
+}
+
+/**
+ * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
+ * @hw: pointer to hardware struct
+ * @rar: receive address register index to associate with a VMDq index
+ * @vmdq: VMDq set index
+ **/
+s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ u32 rar_high;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
+ hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+ rar_high &= ~IXGBE_RAH_VIND_MASK;
+ rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
+ return 0;
+}
+
+/**
+ * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
+ * @hw: pointer to hardware struct
+ * @rar: receive address register index to associate with a VMDq index
+ * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
+ **/
+static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ u32 rar_high;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
+ hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+ if (rar_high & IXGBE_RAH_VIND_MASK) {
+ rar_high &= ~IXGBE_RAH_VIND_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_set_vfta_82598 - Set VLAN filter table
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vind: VMDq output index that maps queue to VLAN id in VFTA
+ * @vlan_on: boolean flag to turn on/off VLAN in VFTA
+ *
+ * Turn on/off specified VLAN in the VLAN filter table.
+ **/
+s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on)
+{
+ u32 regindex;
+ u32 bitindex;
+ u32 bits;
+ u32 vftabyte;
+
+ if (vlan > 4095)
+ return IXGBE_ERR_PARAM;
+
+ /* Determine 32-bit word position in array */
+ regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
+
+ /* Determine the location of the (VMD) queue index */
+ vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
+ bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
+
+ /* Set the nibble for VMD queue index */
+ bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
+ bits &= (~(0x0F << bitindex));
+ bits |= (vind << bitindex);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
+
+ /* Determine the location of the bit for this VLAN id */
+ bitindex = vlan & 0x1F; /* lower five bits */
+
+ bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
+ if (vlan_on)
+ /* Turn on this VLAN id */
+ bits |= (1 << bitindex);
+ else
+ /* Turn off this VLAN id */
+ bits &= ~(1 << bitindex);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
+
+ return 0;
+}
+
+/**
+ * ixgbe_clear_vfta_82598 - Clear VLAN filter table
+ * @hw: pointer to hardware structure
+ *
+ * Clears the VLAN filer table, and the VMDq index associated with the filter
+ **/
+static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
+{
+ u32 offset;
+ u32 vlanbyte;
+
+ for (offset = 0; offset < hw->mac.vft_size; offset++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
+
+ for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
+ for (offset = 0; offset < hw->mac.vft_size; offset++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
+ 0);
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
+ * @hw: pointer to hardware structure
+ * @reg: analog register to read
+ * @val: read value
+ *
+ * Performs read operation to Atlas analog register specified.
+ **/
+s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
+{
+ u32 atlas_ctl;
+
+ IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
+ IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
+ IXGBE_WRITE_FLUSH(hw);
+ udelay(10);
+ atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
+ *val = (u8)atlas_ctl;
+
+ return 0;
+}
+
+/**
+ * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
+ * @hw: pointer to hardware structure
+ * @reg: atlas register to write
+ * @val: value to write
+ *
+ * Performs write operation to Atlas analog register specified.
+ **/
+s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
+{
+ u32 atlas_ctl;
+
+ atlas_ctl = (reg << 8) | val;
+ IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
+ IXGBE_WRITE_FLUSH(hw);
+ udelay(10);
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to read
+ * @eeprom_data: value read
+ *
+ * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data)
+{
+ s32 status = 0;
+ u16 sfp_addr = 0;
+ u16 sfp_data = 0;
+ u16 sfp_stat = 0;
+ u32 i;
+
+ if (hw->phy.type == ixgbe_phy_nl) {
+ /*
+ * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
+ * 0xC30D. These registers are used to talk to the SFP+
+ * module's EEPROM through the SDA/SCL (I2C) interface.
+ */
+ sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
+ sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
+ hw->phy.ops.write_reg(hw,
+ IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ sfp_addr);
+
+ /* Poll status */
+ for (i = 0; i < 100; i++) {
+ hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &sfp_stat);
+ sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
+ if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
+ break;
+ msleep(10);
+ }
+
+ if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
+ hw_dbg(hw, "EEPROM read did not pass.\n");
+ status = IXGBE_ERR_SFP_NOT_PRESENT;
+ goto out;
+ }
+
+ /* Read data */
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
+
+ *eeprom_data = (u8)(sfp_data >> 8);
+ } else {
+ status = IXGBE_ERR_PHY;
+ goto out;
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ **/
+u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
+{
+ u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
+ u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
+ u16 ext_ability = 0;
+
+ hw->phy.ops.identify(hw);
+
+ /* Copper PHY must be checked before AUTOC LMS to determine correct
+ * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
+ switch (hw->phy.type) {
+ case ixgbe_phy_tn:
+ case ixgbe_phy_cu_unknown:
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
+ if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+ if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+ goto out;
+ default:
+ break;
+ }
+
+ switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+ case IXGBE_AUTOC_LMS_1G_AN:
+ case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+ if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ else
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
+ break;
+ case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+ if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
+ else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+ else /* XAUI */
+ physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ break;
+ case IXGBE_AUTOC_LMS_KX4_AN:
+ case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
+ if (autoc & IXGBE_AUTOC_KX_SUPP)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ if (autoc & IXGBE_AUTOC_KX4_SUPP)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+ break;
+ default:
+ break;
+ }
+
+ if (hw->phy.type == ixgbe_phy_nl) {
+ hw->phy.ops.identify_sfp(hw);
+
+ switch (hw->phy.sfp_type) {
+ case ixgbe_sfp_type_da_cu:
+ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
+ break;
+ case ixgbe_sfp_type_sr:
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+ break;
+ case ixgbe_sfp_type_lr:
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ break;
+ default:
+ physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ break;
+ }
+ }
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
+ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
+ break;
+ case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+ case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+ case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+ break;
+ case IXGBE_DEV_ID_82598EB_XF_LR:
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ break;
+ default:
+ break;
+ }
+
+out:
+ return physical_layer;
+}
+
+/**
+ * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
+ * port devices.
+ * @hw: pointer to the HW structure
+ *
+ * Calls common function and corrects issue with some single port devices
+ * that enable LAN1 but not LAN0.
+ **/
+void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
+{
+ struct ixgbe_bus_info *bus = &hw->bus;
+ u16 pci_gen = 0;
+ u16 pci_ctrl2 = 0;
+
+ ixgbe_set_lan_id_multi_port_pcie(hw);
+
+ /* check if LAN0 is disabled */
+ hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
+ if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
+
+ hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
+
+ /* if LAN0 is completely disabled force function to 0 */
+ if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
+ !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
+ !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
+
+ bus->func = 0;
+ }
+ }
+}
+
+/**
+ * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
+ * @hw: pointer to hardware structure
+ * @num_pb: number of packet buffers to allocate
+ * @headroom: reserve n KB of headroom
+ * @strategy: packet buffer allocation strategy
+ **/
+static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
+ u32 headroom, int strategy)
+{
+ u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
+ u8 i = 0;
+
+ if (!num_pb)
+ return;
+
+ /* Setup Rx packet buffer sizes */
+ switch (strategy) {
+ case PBA_STRATEGY_WEIGHTED:
+ /* Setup the first four at 80KB */
+ rxpktsize = IXGBE_RXPBSIZE_80KB;
+ for (; i < 4; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ /* Setup the last four at 48KB...don't re-init i */
+ rxpktsize = IXGBE_RXPBSIZE_48KB;
+ /* Fall Through */
+ case PBA_STRATEGY_EQUAL:
+ default:
+ /* Divide the remaining Rx packet buffer evenly among the TCs */
+ for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ break;
+ }
+
+ /* Setup Tx packet buffer sizes */
+ for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
+
+ return;
+}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.h
new file mode 100755
index 00000000..c6abb020
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.h
@@ -0,0 +1,44 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2012 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_82598_H_
+#define _IXGBE_82598_H_
+
+u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw);
+s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw);
+s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
+s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
+s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
+s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data);
+u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
+s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
+void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
+void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
+#endif /* _IXGBE_82598_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.c
new file mode 100755
index 00000000..1ad4b769
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.c
@@ -0,0 +1,2314 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2012 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "ixgbe_type.h"
+#include "ixgbe_82599.h"
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete);
+static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
+static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
+ u16 offset, u16 *data);
+static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data);
+
+void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+
+ /* enable the laser control functions for SFP+ fiber */
+ if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
+ mac->ops.disable_tx_laser =
+ &ixgbe_disable_tx_laser_multispeed_fiber;
+ mac->ops.enable_tx_laser =
+ &ixgbe_enable_tx_laser_multispeed_fiber;
+ mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
+
+ } else {
+ mac->ops.disable_tx_laser = NULL;
+ mac->ops.enable_tx_laser = NULL;
+ mac->ops.flap_tx_laser = NULL;
+ }
+
+ if (hw->phy.multispeed_fiber) {
+ /* Set up dual speed SFP+ support */
+ mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
+ } else {
+ if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
+ (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
+ hw->phy.smart_speed == ixgbe_smart_speed_on) &&
+ !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
+ } else {
+ mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
+ }
+ }
+}
+
+/**
+ * ixgbe_init_phy_ops_82599 - PHY/SFP specific init
+ * @hw: pointer to hardware structure
+ *
+ * Initialize any function pointers that were not able to be
+ * set during init_shared_code because the PHY/SFP type was
+ * not known. Perform the SFP init if necessary.
+ *
+ **/
+s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ s32 ret_val = 0;
+ u32 esdp;
+
+ if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
+ /* Store flag indicating I2C bus access control unit. */
+ hw->phy.qsfp_shared_i2c_bus = TRUE;
+
+ /* Initialize access to QSFP+ I2C bus */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp |= IXGBE_ESDP_SDP0_DIR;
+ esdp &= ~IXGBE_ESDP_SDP1_DIR;
+ esdp &= ~IXGBE_ESDP_SDP0;
+ esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
+ esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+
+ phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_82599;
+ phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_82599;
+ }
+ /* Identify the PHY or SFP module */
+ ret_val = phy->ops.identify(hw);
+ if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ goto init_phy_ops_out;
+
+ /* Setup function pointers based on detected SFP module and speeds */
+ ixgbe_init_mac_link_ops_82599(hw);
+ if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
+ hw->phy.ops.reset = NULL;
+
+ /* If copper media, overwrite with copper function pointers */
+ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
+ mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
+ mac->ops.get_link_capabilities =
+ &ixgbe_get_copper_link_capabilities_generic;
+ }
+
+ /* Set necessary function pointers based on phy type */
+ switch (hw->phy.type) {
+ case ixgbe_phy_tn:
+ phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
+ phy->ops.check_link = &ixgbe_check_phy_link_tnx;
+ phy->ops.get_firmware_version =
+ &ixgbe_get_phy_firmware_version_tnx;
+ break;
+ default:
+ break;
+ }
+init_phy_ops_out:
+ return ret_val;
+}
+
+s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
+{
+ s32 ret_val = 0;
+ u32 reg_anlp1 = 0;
+ u32 i = 0;
+ u16 list_offset, data_offset, data_value;
+
+ if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
+ ixgbe_init_mac_link_ops_82599(hw);
+
+ hw->phy.ops.reset = NULL;
+
+ ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
+ &data_offset);
+ if (ret_val != 0)
+ goto setup_sfp_out;
+
+ /* PHY config will finish before releasing the semaphore */
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val != 0) {
+ ret_val = IXGBE_ERR_SWFW_SYNC;
+ goto setup_sfp_out;
+ }
+
+ hw->eeprom.ops.read(hw, ++data_offset, &data_value);
+ while (data_value != 0xffff) {
+ IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
+ IXGBE_WRITE_FLUSH(hw);
+ hw->eeprom.ops.read(hw, ++data_offset, &data_value);
+ }
+
+ /* Release the semaphore */
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+ /* Delay obtaining semaphore again to allow FW access */
+ msleep(hw->eeprom.semaphore_delay);
+
+ /* Now restart DSP by setting Restart_AN and clearing LMS */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
+ IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
+ IXGBE_AUTOC_AN_RESTART));
+
+ /* Wait for AN to leave state 0 */
+ for (i = 0; i < 10; i++) {
+ msleep(4);
+ reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+ if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
+ break;
+ }
+ if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
+ hw_dbg(hw, "sfp module setup not complete\n");
+ ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
+ goto setup_sfp_out;
+ }
+
+ /* Restart DSP by setting Restart_AN and return to SFI mode */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
+ IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
+ IXGBE_AUTOC_AN_RESTART));
+ }
+
+setup_sfp_out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_ops_82599 - Inits func ptrs and MAC type
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the function pointers and assign the MAC type for 82599.
+ * Does not touch the hardware.
+ **/
+
+s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ s32 ret_val;
+
+ ixgbe_init_phy_ops_generic(hw);
+ ret_val = ixgbe_init_ops_generic(hw);
+
+ /* PHY */
+ phy->ops.identify = &ixgbe_identify_phy_82599;
+ phy->ops.init = &ixgbe_init_phy_ops_82599;
+
+ /* MAC */
+ mac->ops.reset_hw = &ixgbe_reset_hw_82599;
+ mac->ops.get_media_type = &ixgbe_get_media_type_82599;
+ mac->ops.get_supported_physical_layer =
+ &ixgbe_get_supported_physical_layer_82599;
+ mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
+ mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
+ mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
+ mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
+ mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
+ mac->ops.start_hw = &ixgbe_start_hw_82599;
+ mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
+ mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
+ mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
+ mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
+ mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
+
+ /* RAR, Multicast, VLAN */
+ mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
+ mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
+ mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
+ mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
+ mac->rar_highwater = 1;
+ mac->ops.set_vfta = &ixgbe_set_vfta_generic;
+ mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
+ mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
+ mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
+ mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
+ mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
+ mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
+
+ /* Link */
+ mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
+ mac->ops.check_link = &ixgbe_check_mac_link_generic;
+ mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
+ ixgbe_init_mac_link_ops_82599(hw);
+
+ mac->mcft_size = 128;
+ mac->vft_size = 128;
+ mac->num_rar_entries = 128;
+ mac->rx_pb_size = 512;
+ mac->max_tx_queues = 128;
+ mac->max_rx_queues = 128;
+ mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
+
+ mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
+ IXGBE_FWSM_MODE_MASK) ? true : false;
+
+ //hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
+
+ /* EEPROM */
+ eeprom->ops.read = &ixgbe_read_eeprom_82599;
+ eeprom->ops.read_buffer = &ixgbe_read_eeprom_buffer_82599;
+
+ /* Manageability interface */
+ mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
+
+ mac->ops.get_thermal_sensor_data =
+ &ixgbe_get_thermal_sensor_data_generic;
+ mac->ops.init_thermal_sensor_thresh =
+ &ixgbe_init_thermal_sensor_thresh_generic;
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_get_link_capabilities_82599 - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @negotiation: true when autoneg or autotry is enabled
+ *
+ * Determines the link capabilities by reading the AUTOC register.
+ **/
+s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *negotiation)
+{
+ s32 status = 0;
+ u32 autoc = 0;
+
+ /* Check if 1G SFP module. */
+ if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ *negotiation = true;
+ goto out;
+ }
+
+ /*
+ * Determine link capabilities based on the stored value of AUTOC,
+ * which represents EEPROM defaults. If AUTOC value has not
+ * been stored, use the current register values.
+ */
+ if (hw->mac.orig_link_settings_stored)
+ autoc = hw->mac.orig_autoc;
+ else
+ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+ switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+ case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ *negotiation = false;
+ break;
+
+ case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ *negotiation = false;
+ break;
+
+ case IXGBE_AUTOC_LMS_1G_AN:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ *negotiation = true;
+ break;
+
+ case IXGBE_AUTOC_LMS_10G_SERIAL:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ *negotiation = false;
+ break;
+
+ case IXGBE_AUTOC_LMS_KX4_KX_KR:
+ case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ if (autoc & IXGBE_AUTOC_KR_SUPP)
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (autoc & IXGBE_AUTOC_KX4_SUPP)
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (autoc & IXGBE_AUTOC_KX_SUPP)
+ *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ *negotiation = true;
+ break;
+
+ case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ if (autoc & IXGBE_AUTOC_KR_SUPP)
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (autoc & IXGBE_AUTOC_KX4_SUPP)
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (autoc & IXGBE_AUTOC_KX_SUPP)
+ *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ *negotiation = true;
+ break;
+
+ case IXGBE_AUTOC_LMS_SGMII_1G_100M:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
+ *negotiation = false;
+ break;
+
+ default:
+ status = IXGBE_ERR_LINK_SETUP;
+ goto out;
+ break;
+ }
+
+ if (hw->phy.multispeed_fiber) {
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ *negotiation = true;
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_get_media_type_82599 - Get media type
+ * @hw: pointer to hardware structure
+ *
+ * Returns the media type (fiber, copper, backplane)
+ **/
+enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
+{
+ enum ixgbe_media_type media_type;
+
+ /* Detect if there is a copper PHY attached. */
+ switch (hw->phy.type) {
+ case ixgbe_phy_cu_unknown:
+ case ixgbe_phy_tn:
+ media_type = ixgbe_media_type_copper;
+ goto out;
+ default:
+ break;
+ }
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82599_KX4:
+ case IXGBE_DEV_ID_82599_KX4_MEZZ:
+ case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+ case IXGBE_DEV_ID_82599_KR:
+ case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
+ case IXGBE_DEV_ID_82599_XAUI_LOM:
+ /* Default device ID is mezzanine card KX/KX4 */
+ media_type = ixgbe_media_type_backplane;
+ break;
+ case IXGBE_DEV_ID_82599_SFP:
+ case IXGBE_DEV_ID_82599_SFP_FCOE:
+ case IXGBE_DEV_ID_82599_SFP_EM:
+ case IXGBE_DEV_ID_82599_SFP_SF2:
+ case IXGBE_DEV_ID_82599EN_SFP:
+ media_type = ixgbe_media_type_fiber;
+ break;
+ case IXGBE_DEV_ID_82599_CX4:
+ media_type = ixgbe_media_type_cx4;
+ break;
+ case IXGBE_DEV_ID_82599_T3_LOM:
+ media_type = ixgbe_media_type_copper;
+ break;
+ case IXGBE_DEV_ID_82599_LS:
+ media_type = ixgbe_media_type_fiber_lco;
+ break;
+ case IXGBE_DEV_ID_82599_QSFP_SF_QP:
+ media_type = ixgbe_media_type_fiber_qsfp;
+ break;
+ default:
+ media_type = ixgbe_media_type_unknown;
+ break;
+ }
+out:
+ return media_type;
+}
+
+/**
+ * ixgbe_start_mac_link_82599 - Setup MAC link settings
+ * @hw: pointer to hardware structure
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Configures link settings based on values in the ixgbe_hw struct.
+ * Restarts the link. Performs autonegotiation if needed.
+ **/
+s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+ bool autoneg_wait_to_complete)
+{
+ u32 autoc_reg;
+ u32 links_reg = 0;
+ u32 i;
+ s32 status = 0;
+
+ /* Restart link */
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+
+ /* Only poll for autoneg to complete if specified to do so */
+ if (autoneg_wait_to_complete) {
+ if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+ IXGBE_AUTOC_LMS_KX4_KX_KR ||
+ (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+ IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
+ (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+ IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
+ for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if (links_reg & IXGBE_LINKS_KX_AN_COMP)
+ break;
+ msleep(100);
+ }
+ if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
+ status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+ hw_dbg(hw, "Autoneg did not complete.\n");
+ }
+ }
+ }
+
+ /* Add delay to filter out noises during initial link setup */
+ msleep(50);
+
+ return status;
+}
+
+/**
+ * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * The base drivers may require better control over SFP+ module
+ * PHY states. This includes selectively shutting down the Tx
+ * laser on the PHY, effectively halting physical link.
+ **/
+void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+ u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+ /* Disable tx laser; allow 100us to go dark per spec */
+ esdp_reg |= IXGBE_ESDP_SDP3;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ udelay(100);
+}
+
+/**
+ * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * The base drivers may require better control over SFP+ module
+ * PHY states. This includes selectively turning on the Tx
+ * laser on the PHY, effectively starting physical link.
+ **/
+void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+ u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+ /* Enable tx laser; allow 100ms to light up */
+ esdp_reg &= ~IXGBE_ESDP_SDP3;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ msleep(100);
+}
+
+/**
+ * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * When the driver changes the link speeds that it can support,
+ * it sets autotry_restart to true to indicate that we need to
+ * initiate a new autotry session with the link partner. To do
+ * so, we set the speed then disable and re-enable the tx laser, to
+ * alert the link partner that it also needs to restart autotry on its
+ * end. This is consistent with true clause 37 autoneg, which also
+ * involves a loss of signal.
+ **/
+void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+ if (hw->mac.autotry_restart) {
+ ixgbe_disable_tx_laser_multispeed_fiber(hw);
+ ixgbe_enable_tx_laser_multispeed_fiber(hw);
+ hw->mac.autotry_restart = false;
+ }
+}
+
+/**
+ * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: true if autonegotiation enabled
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Set the link speed in the AUTOC register and restarts link.
+ **/
+s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ s32 status = 0;
+ ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ u32 speedcnt = 0;
+ u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ u32 i = 0;
+ bool link_up = false;
+ bool negotiation;
+
+ /* Mask off requested but non-supported speeds */
+ status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
+ if (status != 0)
+ return status;
+
+ speed &= link_speed;
+
+ /*
+ * Try each speed one by one, highest priority first. We do this in
+ * software because 10gb fiber doesn't support speed autonegotiation.
+ */
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+ speedcnt++;
+ highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
+
+ /* If we already have link at this speed, just jump out */
+ status = ixgbe_check_link(hw, &link_speed, &link_up, false);
+ if (status != 0)
+ return status;
+
+ if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
+ goto out;
+
+ /* Set the module link speed */
+ esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Allow module to change analog characteristics (1G->10G) */
+ msleep(40);
+
+ status = ixgbe_setup_mac_link_82599(hw,
+ IXGBE_LINK_SPEED_10GB_FULL,
+ autoneg,
+ autoneg_wait_to_complete);
+ if (status != 0)
+ return status;
+
+ /* Flap the tx laser if it has not already been done */
+ ixgbe_flap_tx_laser(hw);
+
+ /*
+ * Wait for the controller to acquire link. Per IEEE 802.3ap,
+ * Section 73.10.2, we may have to wait up to 500ms if KR is
+ * attempted. 82599 uses the same timing for 10g SFI.
+ */
+ for (i = 0; i < 5; i++) {
+ /* Wait for the link partner to also set speed */
+ msleep(100);
+
+ /* If we have link, just jump out */
+ status = ixgbe_check_link(hw, &link_speed,
+ &link_up, false);
+ if (status != 0)
+ return status;
+
+ if (link_up)
+ goto out;
+ }
+ }
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+ speedcnt++;
+ if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
+ highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+ /* If we already have link at this speed, just jump out */
+ status = ixgbe_check_link(hw, &link_speed, &link_up, false);
+ if (status != 0)
+ return status;
+
+ if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
+ goto out;
+
+ /* Set the module link speed */
+ esdp_reg &= ~IXGBE_ESDP_SDP5;
+ esdp_reg |= IXGBE_ESDP_SDP5_DIR;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Allow module to change analog characteristics (10G->1G) */
+ msleep(40);
+
+ status = ixgbe_setup_mac_link_82599(hw,
+ IXGBE_LINK_SPEED_1GB_FULL,
+ autoneg,
+ autoneg_wait_to_complete);
+ if (status != 0)
+ return status;
+
+ /* Flap the tx laser if it has not already been done */
+ ixgbe_flap_tx_laser(hw);
+
+ /* Wait for the link partner to also set speed */
+ msleep(100);
+
+ /* If we have link, just jump out */
+ status = ixgbe_check_link(hw, &link_speed, &link_up, false);
+ if (status != 0)
+ return status;
+
+ if (link_up)
+ goto out;
+ }
+
+ /*
+ * We didn't get link. Configure back to the highest speed we tried,
+ * (if there was more than one). We call ourselves back with just the
+ * single highest speed that the user requested.
+ */
+ if (speedcnt > 1)
+ status = ixgbe_setup_mac_link_multispeed_fiber(hw,
+ highest_link_speed, autoneg, autoneg_wait_to_complete);
+
+out:
+ /* Set autoneg_advertised value based on input link speed */
+ hw->phy.autoneg_advertised = 0;
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: true if autonegotiation enabled
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Implements the Intel SmartSpeed algorithm.
+ **/
+s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ s32 status = 0;
+ ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ s32 i, j;
+ bool link_up = false;
+ u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+ /* Set autoneg_advertised value based on input link speed */
+ hw->phy.autoneg_advertised = 0;
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_100_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
+
+ /*
+ * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the
+ * autoneg advertisement if link is unable to be established at the
+ * highest negotiated rate. This can sometimes happen due to integrity
+ * issues with the physical media connection.
+ */
+
+ /* First, try to get link with full advertisement */
+ hw->phy.smart_speed_active = false;
+ for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
+ status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+ autoneg_wait_to_complete);
+ if (status != 0)
+ goto out;
+
+ /*
+ * Wait for the controller to acquire link. Per IEEE 802.3ap,
+ * Section 73.10.2, we may have to wait up to 500ms if KR is
+ * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
+ * Table 9 in the AN MAS.
+ */
+ for (i = 0; i < 5; i++) {
+ msleep(100);
+
+ /* If we have link, just jump out */
+ status = ixgbe_check_link(hw, &link_speed, &link_up,
+ false);
+ if (status != 0)
+ goto out;
+
+ if (link_up)
+ goto out;
+ }
+ }
+
+ /*
+ * We didn't get link. If we advertised KR plus one of KX4/KX
+ * (or BX4/BX), then disable KR and try again.
+ */
+ if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
+ ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
+ goto out;
+
+ /* Turn SmartSpeed on to disable KR support */
+ hw->phy.smart_speed_active = true;
+ status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+ autoneg_wait_to_complete);
+ if (status != 0)
+ goto out;
+
+ /*
+ * Wait for the controller to acquire link. 600ms will allow for
+ * the AN link_fail_inhibit_timer as well for multiple cycles of
+ * parallel detect, both 10g and 1g. This allows for the maximum
+ * connect attempts as defined in the AN MAS table 73-7.
+ */
+ for (i = 0; i < 6; i++) {
+ msleep(100);
+
+ /* If we have link, just jump out */
+ status = ixgbe_check_link(hw, &link_speed, &link_up, false);
+ if (status != 0)
+ goto out;
+
+ if (link_up)
+ goto out;
+ }
+
+ /* We didn't get link. Turn SmartSpeed back off. */
+ hw->phy.smart_speed_active = false;
+ status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+ autoneg_wait_to_complete);
+
+out:
+ if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
+ hw_dbg(hw, "Smartspeed has downgraded the link speed "
+ "from the maximum advertised\n");
+ return status;
+}
+
+/**
+ * ixgbe_setup_mac_link_82599 - Set MAC link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: true if autonegotiation enabled
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Set the link speed in the AUTOC register and restarts link.
+ **/
+s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ s32 status = 0;
+ u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ u32 start_autoc = autoc;
+ u32 orig_autoc = 0;
+ u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
+ u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
+ u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
+ u32 links_reg = 0;
+ u32 i;
+ ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
+
+ /* Check to see if speed passed in is supported. */
+ status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
+ if (status != 0)
+ goto out;
+
+ speed &= link_capabilities;
+
+ if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
+ status = IXGBE_ERR_LINK_SETUP;
+ goto out;
+ }
+
+ /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
+ if (hw->mac.orig_link_settings_stored)
+ orig_autoc = hw->mac.orig_autoc;
+ else
+ orig_autoc = autoc;
+
+ if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
+ link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
+ link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
+ /* Set KX4/KX/KR support according to speed requested */
+ autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
+ autoc |= IXGBE_AUTOC_KX4_SUPP;
+ if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
+ (hw->phy.smart_speed_active == false))
+ autoc |= IXGBE_AUTOC_KR_SUPP;
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ autoc |= IXGBE_AUTOC_KX_SUPP;
+ } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
+ (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
+ link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
+ /* Switch from 1G SFI to 10G SFI if requested */
+ if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
+ (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
+ autoc &= ~IXGBE_AUTOC_LMS_MASK;
+ autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
+ }
+ } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
+ (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
+ /* Switch from 10G SFI to 1G SFI if requested */
+ if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
+ (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
+ autoc &= ~IXGBE_AUTOC_LMS_MASK;
+ if (autoneg)
+ autoc |= IXGBE_AUTOC_LMS_1G_AN;
+ else
+ autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
+ }
+ }
+
+ if (autoc != start_autoc) {
+ /* Restart link */
+ autoc |= IXGBE_AUTOC_AN_RESTART;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+
+ /* Only poll for autoneg to complete if specified to do so */
+ if (autoneg_wait_to_complete) {
+ if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
+ link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
+ link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
+ for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
+ links_reg =
+ IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if (links_reg & IXGBE_LINKS_KX_AN_COMP)
+ break;
+ msleep(100);
+ }
+ if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
+ status =
+ IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+ hw_dbg(hw, "Autoneg did not complete.\n");
+ }
+ }
+ }
+
+ /* Add delay to filter out noises during initial link setup */
+ msleep(50);
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: true if autonegotiation enabled
+ * @autoneg_wait_to_complete: true if waiting is needed to complete
+ *
+ * Restarts link on PHY and MAC based on settings passed in.
+ **/
+static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ s32 status;
+
+ /* Setup the PHY according to input speed */
+ status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+ autoneg_wait_to_complete);
+ /* Set up MAC */
+ ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
+
+ return status;
+}
+
+/**
+ * ixgbe_reset_hw_82599 - Perform hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks
+ * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
+ * reset.
+ **/
+s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
+{
+// ixgbe_link_speed link_speed;
+ s32 status = 0;
+// u32 ctrl, i, autoc, autoc2;
+// bool link_up = false;
+
+#if 0
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ status = hw->mac.ops.stop_adapter(hw);
+ if (status != 0)
+ goto reset_hw_out;
+
+ /* flush pending Tx transactions */
+ ixgbe_clear_tx_pending(hw);
+
+ /* PHY ops must be identified and initialized prior to reset */
+
+ /* Identify PHY and related function pointers */
+ status = hw->phy.ops.init(hw);
+
+ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ goto reset_hw_out;
+
+ /* Setup SFP module if there is one present. */
+ if (hw->phy.sfp_setup_needed) {
+ status = hw->mac.ops.setup_sfp(hw);
+ hw->phy.sfp_setup_needed = false;
+ }
+
+ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ goto reset_hw_out;
+
+ /* Reset PHY */
+ if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
+ hw->phy.ops.reset(hw);
+
+mac_reset_top:
+ /*
+ * Issue global reset to the MAC. Needs to be SW reset if link is up.
+ * If link reset is used when link is up, it might reset the PHY when
+ * mng is using it. If link is down or the flag to force full link
+ * reset is set, then perform link reset.
+ */
+ ctrl = IXGBE_CTRL_LNK_RST;
+ if (!hw->force_full_reset) {
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ if (link_up)
+ ctrl = IXGBE_CTRL_RST;
+ }
+
+ ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Poll for reset bit to self-clear indicating reset is complete */
+ for (i = 0; i < 10; i++) {
+ udelay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST_MASK))
+ break;
+ }
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
+ status = IXGBE_ERR_RESET_FAILED;
+ hw_dbg(hw, "Reset polling failed to complete.\n");
+ }
+
+ msleep(50);
+
+ /*
+ * Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to allow time
+ * for any pending HW events to complete.
+ */
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ goto mac_reset_top;
+ }
+
+ /*
+ * Store the original AUTOC/AUTOC2 values if they have not been
+ * stored off yet. Otherwise restore the stored original
+ * values since the reset operation sets back to defaults.
+ */
+ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ if (hw->mac.orig_link_settings_stored == false) {
+ hw->mac.orig_autoc = autoc;
+ hw->mac.orig_autoc2 = autoc2;
+ hw->mac.orig_link_settings_stored = true;
+ } else {
+ if (autoc != hw->mac.orig_autoc)
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
+ IXGBE_AUTOC_AN_RESTART));
+
+ if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
+ (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
+ autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
+ autoc2 |= (hw->mac.orig_autoc2 &
+ IXGBE_AUTOC2_UPPER_MASK);
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
+ }
+ }
+#endif
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /*
+ * Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to 128,
+ * since we modify this value when programming the SAN MAC address.
+ */
+ hw->mac.num_rar_entries = 128;
+ hw->mac.ops.init_rx_addrs(hw);
+
+ /* Store the permanent SAN mac address */
+ hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
+
+ /* Add the SAN MAC address to the RAR only if it's a valid address */
+ if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
+ hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
+ hw->mac.san_addr, 0, IXGBE_RAH_AV);
+
+ /* Save the SAN MAC RAR index */
+ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
+
+ /* Reserve the last RAR for the SAN MAC address */
+ hw->mac.num_rar_entries--;
+ }
+
+ /* Store the alternative WWNN/WWPN prefix */
+ hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
+ &hw->mac.wwpn_prefix);
+
+//reset_hw_out:
+ return status;
+}
+
+/**
+ * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
+{
+ int i;
+ u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+ fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
+
+ /*
+ * Before starting reinitialization process,
+ * FDIRCMD.CMD must be zero.
+ */
+ for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
+ if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
+ IXGBE_FDIRCMD_CMD_MASK))
+ break;
+ udelay(10);
+ }
+ if (i >= IXGBE_FDIRCMD_CMD_POLL) {
+ hw_dbg(hw, "Flow Director previous command isn't complete, "
+ "aborting table re-initialization.\n");
+ return IXGBE_ERR_FDIR_REINIT_FAILED;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
+ IXGBE_WRITE_FLUSH(hw);
+ /*
+ * 82599 adapters flow director init flow cannot be restarted,
+ * Workaround 82599 silicon errata by performing the following steps
+ * before re-writing the FDIRCTRL control register with the same value.
+ * - write 1 to bit 8 of FDIRCMD register &
+ * - write 0 to bit 8 of FDIRCMD register
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
+ IXGBE_FDIRCMD_CLEARHT));
+ IXGBE_WRITE_FLUSH(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
+ ~IXGBE_FDIRCMD_CLEARHT));
+ IXGBE_WRITE_FLUSH(hw);
+ /*
+ * Clear FDIR Hash register to clear any leftover hashes
+ * waiting to be programmed.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
+ IXGBE_WRITE_FLUSH(hw);
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Poll init-done after we write FDIRCTRL register */
+ for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
+ if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
+ IXGBE_FDIRCTRL_INIT_DONE)
+ break;
+ udelay(10);
+ }
+ if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
+ hw_dbg(hw, "Flow Director Signature poll time exceeded!\n");
+ return IXGBE_ERR_FDIR_REINIT_FAILED;
+ }
+
+ /* Clear FDIR statistics registers (read to clear) */
+ IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
+ IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
+ IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
+ IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+ IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
+
+ return 0;
+}
+
+/**
+ * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
+ * @hw: pointer to hardware structure
+ * @fdirctrl: value to write to flow director control register
+ **/
+static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
+{
+ int i;
+
+ /* Prime the keys for hashing */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
+
+ /*
+ * Poll init-done after we write the register. Estimated times:
+ * 10G: PBALLOC = 11b, timing is 60us
+ * 1G: PBALLOC = 11b, timing is 600us
+ * 100M: PBALLOC = 11b, timing is 6ms
+ *
+ * Multiple these timings by 4 if under full Rx load
+ *
+ * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
+ * 1 msec per poll time. If we're at line rate and drop to 100M, then
+ * this might not finish in our poll time, but we can live with that
+ * for now.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
+ IXGBE_WRITE_FLUSH(hw);
+ for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
+ if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
+ IXGBE_FDIRCTRL_INIT_DONE)
+ break;
+ msleep(1);
+ }
+
+ if (i >= IXGBE_FDIR_INIT_DONE_POLL)
+ hw_dbg(hw, "Flow Director poll time exceeded!\n");
+}
+
+/**
+ * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
+ * @hw: pointer to hardware structure
+ * @fdirctrl: value to write to flow director control register, initially
+ * contains just the value of the Rx packet buffer allocation
+ **/
+s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
+{
+ /*
+ * Continue setup of fdirctrl register bits:
+ * Move the flexible bytes to use the ethertype - shift 6 words
+ * Set the maximum length per hash bucket to 0xA filters
+ * Send interrupt when 64 filters are left
+ */
+ fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
+ (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
+ (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
+
+ /* write hashes and fdirctrl register, poll for completion */
+ ixgbe_fdir_enable_82599(hw, fdirctrl);
+
+ return 0;
+}
+
+/**
+ * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
+ * @hw: pointer to hardware structure
+ * @fdirctrl: value to write to flow director control register, initially
+ * contains just the value of the Rx packet buffer allocation
+ **/
+s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
+{
+ /*
+ * Continue setup of fdirctrl register bits:
+ * Turn perfect match filtering on
+ * Report hash in RSS field of Rx wb descriptor
+ * Initialize the drop queue
+ * Move the flexible bytes to use the ethertype - shift 6 words
+ * Set the maximum length per hash bucket to 0xA filters
+ * Send interrupt when 64 (0x4 * 16) filters are left
+ */
+ fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
+ IXGBE_FDIRCTRL_REPORT_STATUS |
+ (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
+ (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
+ (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
+ (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
+
+ /* write hashes and fdirctrl register, poll for completion */
+ ixgbe_fdir_enable_82599(hw, fdirctrl);
+
+ return 0;
+}
+
+/*
+ * These defines allow us to quickly generate all of the necessary instructions
+ * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
+ * for values 0 through 15
+ */
+#define IXGBE_ATR_COMMON_HASH_KEY \
+ (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
+#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
+do { \
+ u32 n = (_n); \
+ if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
+ common_hash ^= lo_hash_dword >> n; \
+ else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+ bucket_hash ^= lo_hash_dword >> n; \
+ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
+ sig_hash ^= lo_hash_dword << (16 - n); \
+ if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
+ common_hash ^= hi_hash_dword >> n; \
+ else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+ bucket_hash ^= hi_hash_dword >> n; \
+ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
+ sig_hash ^= hi_hash_dword << (16 - n); \
+} while (0);
+
+/**
+ * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
+ * @stream: input bitstream to compute the hash on
+ *
+ * This function is almost identical to the function above but contains
+ * several optomizations such as unwinding all of the loops, letting the
+ * compiler work out all of the conditional ifs since the keys are static
+ * defines, and computing two keys at once since the hashed dword stream
+ * will be the same for both keys.
+ **/
+u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common)
+{
+ u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+ u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
+
+ /* record the flow_vm_vlan bits as they are a key part to the hash */
+ flow_vm_vlan = IXGBE_NTOHL(input.dword);
+
+ /* generate common hash dword */
+ hi_hash_dword = IXGBE_NTOHL(common.dword);
+
+ /* low dword is word swapped version of common */
+ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+ /* apply flow ID/VM pool/VLAN ID bits to hash words */
+ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+
+ /* Process bits 0 and 16 */
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
+
+ /*
+ * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+ * delay this because bit 0 of the stream should not be processed
+ * so we do not add the vlan until after bit 0 was processed
+ */
+ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+ /* Process remaining 30 bit of the key */
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
+
+ /* combine common_hash result with signature and bucket hashes */
+ bucket_hash ^= common_hash;
+ bucket_hash &= IXGBE_ATR_HASH_MASK;
+
+ sig_hash ^= common_hash << 16;
+ sig_hash &= IXGBE_ATR_HASH_MASK << 16;
+
+ /* return completed signature hash */
+ return sig_hash ^ bucket_hash;
+}
+
+/**
+ * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
+ * @hw: pointer to hardware structure
+ * @input: unique input dword
+ * @common: compressed common input dword
+ * @queue: queue index to direct traffic to
+ **/
+s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common,
+ u8 queue)
+{
+ u64 fdirhashcmd;
+ u32 fdircmd;
+
+ /*
+ * Get the flow_type in order to program FDIRCMD properly
+ * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
+ */
+ switch (input.formatted.flow_type) {
+ case IXGBE_ATR_FLOW_TYPE_TCPV4:
+ case IXGBE_ATR_FLOW_TYPE_UDPV4:
+ case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+ case IXGBE_ATR_FLOW_TYPE_TCPV6:
+ case IXGBE_ATR_FLOW_TYPE_UDPV6:
+ case IXGBE_ATR_FLOW_TYPE_SCTPV6:
+ break;
+ default:
+ hw_dbg(hw, " Error on flow type input\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ /* configure FDIRCMD register */
+ fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+ fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+
+ /*
+ * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
+ * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
+ */
+ fdirhashcmd = (u64)fdircmd << 32;
+ fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
+ IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
+
+ hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
+
+ return 0;
+}
+
+#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
+do { \
+ u32 n = (_n); \
+ if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+ bucket_hash ^= lo_hash_dword >> n; \
+ if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+ bucket_hash ^= hi_hash_dword >> n; \
+} while (0);
+
+/**
+ * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
+ * @atr_input: input bitstream to compute the hash on
+ * @input_mask: mask for the input bitstream
+ *
+ * This function serves two main purposes. First it applys the input_mask
+ * to the atr_input resulting in a cleaned up atr_input data stream.
+ * Secondly it computes the hash and stores it in the bkt_hash field at
+ * the end of the input byte stream. This way it will be available for
+ * future use without needing to recompute the hash.
+ **/
+void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+ union ixgbe_atr_input *input_mask)
+{
+
+ u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+ u32 bucket_hash = 0;
+
+ /* Apply masks to input data */
+ input->dword_stream[0] &= input_mask->dword_stream[0];
+ input->dword_stream[1] &= input_mask->dword_stream[1];
+ input->dword_stream[2] &= input_mask->dword_stream[2];
+ input->dword_stream[3] &= input_mask->dword_stream[3];
+ input->dword_stream[4] &= input_mask->dword_stream[4];
+ input->dword_stream[5] &= input_mask->dword_stream[5];
+ input->dword_stream[6] &= input_mask->dword_stream[6];
+ input->dword_stream[7] &= input_mask->dword_stream[7];
+ input->dword_stream[8] &= input_mask->dword_stream[8];
+ input->dword_stream[9] &= input_mask->dword_stream[9];
+ input->dword_stream[10] &= input_mask->dword_stream[10];
+
+ /* record the flow_vm_vlan bits as they are a key part to the hash */
+ flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
+
+ /* generate common hash dword */
+ hi_hash_dword = IXGBE_NTOHL(input->dword_stream[1] ^
+ input->dword_stream[2] ^
+ input->dword_stream[3] ^
+ input->dword_stream[4] ^
+ input->dword_stream[5] ^
+ input->dword_stream[6] ^
+ input->dword_stream[7] ^
+ input->dword_stream[8] ^
+ input->dword_stream[9] ^
+ input->dword_stream[10]);
+
+ /* low dword is word swapped version of common */
+ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+ /* apply flow ID/VM pool/VLAN ID bits to hash words */
+ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+
+ /* Process bits 0 and 16 */
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
+
+ /*
+ * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+ * delay this because bit 0 of the stream should not be processed
+ * so we do not add the vlan until after bit 0 was processed
+ */
+ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+ /* Process remaining 30 bit of the key */
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(1);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(2);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
+
+ /*
+ * Limit hash to 13 bits since max bucket count is 8K.
+ * Store result at the end of the input stream.
+ */
+ input->formatted.bkt_hash = bucket_hash & 0x1FFF;
+}
+
+/**
+ * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
+ * @input_mask: mask to be bit swapped
+ *
+ * The source and destination port masks for flow director are bit swapped
+ * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
+ * generate a correctly swapped value we need to bit swap the mask and that
+ * is what is accomplished by this function.
+ **/
+static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
+{
+ u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
+ mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
+ mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
+ mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
+ mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
+ mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
+ return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
+}
+
+/*
+ * These two macros are meant to address the fact that we have registers
+ * that are either all or in part big-endian. As a result on big-endian
+ * systems we will end up byte swapping the value to little-endian before
+ * it is byte swapped again and written to the hardware in the original
+ * big-endian format.
+ */
+#define IXGBE_STORE_AS_BE32(_value) \
+ (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
+ (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
+
+#define IXGBE_WRITE_REG_BE32(a, reg, value) \
+ IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
+
+#define IXGBE_STORE_AS_BE16(_value) \
+ IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
+
+s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input_mask)
+{
+ /* mask IPv6 since it is currently not supported */
+ u32 fdirm = IXGBE_FDIRM_DIPv6;
+ u32 fdirtcpm;
+
+ /*
+ * Program the relevant mask registers. If src/dst_port or src/dst_addr
+ * are zero, then assume a full mask for that field. Also assume that
+ * a VLAN of 0 is unspecified, so mask that out as well. L4type
+ * cannot be masked out in this implementation.
+ *
+ * This also assumes IPv4 only. IPv6 masking isn't supported at this
+ * point in time.
+ */
+
+ /* verify bucket hash is cleared on hash generation */
+ if (input_mask->formatted.bkt_hash)
+ hw_dbg(hw, " bucket hash should always be 0 in mask\n");
+
+ /* Program FDIRM and verify partial masks */
+ switch (input_mask->formatted.vm_pool & 0x7F) {
+ case 0x0:
+ fdirm |= IXGBE_FDIRM_POOL;
+ case 0x7F:
+ break;
+ default:
+ hw_dbg(hw, " Error on vm pool mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
+ case 0x0:
+ fdirm |= IXGBE_FDIRM_L4P;
+ if (input_mask->formatted.dst_port ||
+ input_mask->formatted.src_port) {
+ hw_dbg(hw, " Error on src/dst port mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+ case IXGBE_ATR_L4TYPE_MASK:
+ break;
+ default:
+ hw_dbg(hw, " Error on flow type mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
+ case 0x0000:
+ /* mask VLAN ID, fall through to mask VLAN priority */
+ fdirm |= IXGBE_FDIRM_VLANID;
+ case 0x0FFF:
+ /* mask VLAN priority */
+ fdirm |= IXGBE_FDIRM_VLANP;
+ break;
+ case 0xE000:
+ /* mask VLAN ID only, fall through */
+ fdirm |= IXGBE_FDIRM_VLANID;
+ case 0xEFFF:
+ /* no VLAN fields masked */
+ break;
+ default:
+ hw_dbg(hw, " Error on VLAN mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ switch (input_mask->formatted.flex_bytes & 0xFFFF) {
+ case 0x0000:
+ /* Mask Flex Bytes, fall through */
+ fdirm |= IXGBE_FDIRM_FLEX;
+ case 0xFFFF:
+ break;
+ default:
+ hw_dbg(hw, " Error on flexible byte mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
+
+ /* store the TCP/UDP port masks, bit reversed from port layout */
+ fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
+
+ /* write both the same so that UDP and TCP use the same mask */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
+
+ /* store source and destination IP masks (big-enian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
+ ~input_mask->formatted.src_ip[0]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
+ ~input_mask->formatted.dst_ip[0]);
+
+ return 0;
+}
+
+s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id, u8 queue)
+{
+ u32 fdirport, fdirvlan, fdirhash, fdircmd;
+
+ /* currently IPv6 is not supported, must be programmed with 0 */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
+ input->formatted.src_ip[0]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
+ input->formatted.src_ip[1]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
+ input->formatted.src_ip[2]);
+
+ /* record the source address (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
+
+ /* record the first 32 bits of the destination address (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
+
+ /* record source and destination port (little-endian)*/
+ fdirport = IXGBE_NTOHS(input->formatted.dst_port);
+ fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
+ fdirport |= IXGBE_NTOHS(input->formatted.src_port);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
+
+ /* record vlan (little-endian) and flex_bytes(big-endian) */
+ fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
+ fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
+ fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
+
+ /* configure FDIRHASH register */
+ fdirhash = input->formatted.bkt_hash;
+ fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+
+ /*
+ * flush all previous writes to make certain registers are
+ * programmed prior to issuing the command
+ */
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* configure FDIRCMD register */
+ fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ if (queue == IXGBE_FDIR_DROP_QUEUE)
+ fdircmd |= IXGBE_FDIRCMD_DROP;
+ fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+ fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+ fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
+
+ return 0;
+}
+
+s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id)
+{
+ u32 fdirhash;
+ u32 fdircmd = 0;
+ u32 retry_count;
+ s32 err = 0;
+
+ /* configure FDIRHASH register */
+ fdirhash = input->formatted.bkt_hash;
+ fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+
+ /* flush hash to HW */
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Query if filter is present */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
+
+ for (retry_count = 10; retry_count; retry_count--) {
+ /* allow 10us for query to process */
+ udelay(10);
+ /* verify query completed successfully */
+ fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
+ if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
+ break;
+ }
+
+ if (!retry_count)
+ err = IXGBE_ERR_FDIR_REINIT_FAILED;
+
+ /* if filter exists in hardware then remove it */
+ if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+ IXGBE_WRITE_FLUSH(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+ IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
+ * @hw: pointer to hardware structure
+ * @input: input bitstream
+ * @input_mask: mask for the input bitstream
+ * @soft_id: software index for the filters
+ * @queue: queue index to direct traffic to
+ *
+ * Note that the caller to this function must lock before calling, since the
+ * hardware writes must be protected from one another.
+ **/
+s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ union ixgbe_atr_input *input_mask,
+ u16 soft_id, u8 queue)
+{
+ s32 err = IXGBE_ERR_CONFIG;
+
+ /*
+ * Check flow_type formatting, and bail out before we touch the hardware
+ * if there's a configuration issue
+ */
+ switch (input->formatted.flow_type) {
+ case IXGBE_ATR_FLOW_TYPE_IPV4:
+ input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
+ if (input->formatted.dst_port || input->formatted.src_port) {
+ hw_dbg(hw, " Error on src/dst port\n");
+ return IXGBE_ERR_CONFIG;
+ }
+ break;
+ case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+ if (input->formatted.dst_port || input->formatted.src_port) {
+ hw_dbg(hw, " Error on src/dst port\n");
+ return IXGBE_ERR_CONFIG;
+ }
+ case IXGBE_ATR_FLOW_TYPE_TCPV4:
+ case IXGBE_ATR_FLOW_TYPE_UDPV4:
+ input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
+ IXGBE_ATR_L4TYPE_MASK;
+ break;
+ default:
+ hw_dbg(hw, " Error on flow type input\n");
+ return err;
+ }
+
+ /* program input mask into the HW */
+ err = ixgbe_fdir_set_input_mask_82599(hw, input_mask);
+ if (err)
+ return err;
+
+ /* apply mask and compute/store hash */
+ ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
+
+ /* program filters to filter memory */
+ return ixgbe_fdir_write_perfect_filter_82599(hw, input,
+ soft_id, queue);
+}
+
+/**
+ * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
+ * @hw: pointer to hardware structure
+ * @reg: analog register to read
+ * @val: read value
+ *
+ * Performs read operation to Omer analog register specified.
+ **/
+s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
+{
+ u32 core_ctl;
+
+ IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
+ (reg << 8));
+ IXGBE_WRITE_FLUSH(hw);
+ udelay(10);
+ core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
+ *val = (u8)core_ctl;
+
+ return 0;
+}
+
+/**
+ * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
+ * @hw: pointer to hardware structure
+ * @reg: atlas register to write
+ * @val: value to write
+ *
+ * Performs write operation to Omer analog register specified.
+ **/
+s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
+{
+ u32 core_ctl;
+
+ core_ctl = (reg << 8) | val;
+ IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
+ IXGBE_WRITE_FLUSH(hw);
+ udelay(10);
+
+ return 0;
+}
+
+/**
+ * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware using the generic start_hw function
+ * and the generation start_hw function.
+ * Then performs revision-specific operations, if any.
+ **/
+s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
+{
+ s32 ret_val = 0;
+
+ ret_val = ixgbe_start_hw_generic(hw);
+ if (ret_val != 0)
+ goto out;
+
+ ret_val = ixgbe_start_hw_gen2(hw);
+ if (ret_val != 0)
+ goto out;
+
+ /* We need to run link autotry after the driver loads */
+ hw->mac.autotry_restart = true;
+
+ if (ret_val == 0)
+ ret_val = ixgbe_verify_fw_version_82599(hw);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_identify_phy_82599 - Get physical layer module
+ * @hw: pointer to hardware structure
+ *
+ * Determines the physical layer module found on the current adapter.
+ * If PHY already detected, maintains current PHY type in hw struct,
+ * otherwise executes the PHY detection routine.
+ **/
+s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+
+ /* Detect PHY if not unknown - returns success if already detected. */
+ status = ixgbe_identify_phy_generic(hw);
+ if (status != 0) {
+ /* 82599 10GBASE-T requires an external PHY */
+ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
+ goto out;
+ else
+ status = ixgbe_identify_module_generic(hw);
+ }
+
+ /* Set PHY type none if no PHY detected */
+ if (hw->phy.type == ixgbe_phy_unknown) {
+ hw->phy.type = ixgbe_phy_none;
+ status = 0;
+ }
+
+ /* Return error if SFP module has been detected but is not supported */
+ if (hw->phy.type == ixgbe_phy_sfp_unsupported)
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ **/
+u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
+{
+ u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
+ u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
+ u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
+ u16 ext_ability = 0;
+ u8 comp_codes_10g = 0;
+ u8 comp_codes_1g = 0;
+
+ hw->phy.ops.identify(hw);
+
+ switch (hw->phy.type) {
+ case ixgbe_phy_tn:
+ case ixgbe_phy_cu_unknown:
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
+ if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+ if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+ goto out;
+ default:
+ break;
+ }
+
+ switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+ case IXGBE_AUTOC_LMS_1G_AN:
+ case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+ if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
+ IXGBE_PHYSICAL_LAYER_1000BASE_BX;
+ goto out;
+ } else
+ /* SFI mode so read SFP module */
+ goto sfp_check;
+ break;
+ case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+ if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
+ else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+ else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
+ goto out;
+ break;
+ case IXGBE_AUTOC_LMS_10G_SERIAL:
+ if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
+ goto out;
+ } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
+ goto sfp_check;
+ break;
+ case IXGBE_AUTOC_LMS_KX4_KX_KR:
+ case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
+ if (autoc & IXGBE_AUTOC_KX_SUPP)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ if (autoc & IXGBE_AUTOC_KX4_SUPP)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+ if (autoc & IXGBE_AUTOC_KR_SUPP)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
+ goto out;
+ break;
+ default:
+ goto out;
+ break;
+ }
+
+sfp_check:
+ /* SFP check must be done last since DA modules are sometimes used to
+ * test KR mode - we need to id KR mode correctly before SFP module.
+ * Call identify_sfp because the pluggable module may have changed */
+ hw->phy.ops.identify_sfp(hw);
+ if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
+ goto out;
+
+ switch (hw->phy.type) {
+ case ixgbe_phy_sfp_passive_tyco:
+ case ixgbe_phy_sfp_passive_unknown:
+ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
+ break;
+ case ixgbe_phy_sfp_ftl_active:
+ case ixgbe_phy_sfp_active_unknown:
+ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
+ break;
+ case ixgbe_phy_sfp_avago:
+ case ixgbe_phy_sfp_ftl:
+ case ixgbe_phy_sfp_intel:
+ case ixgbe_phy_sfp_unknown:
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
+ if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+ else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX;
+ break;
+ default:
+ break;
+ }
+
+out:
+ return physical_layer;
+}
+
+/**
+ * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
+ * @hw: pointer to hardware structure
+ * @regval: register value to write to RXCTRL
+ *
+ * Enables the Rx DMA unit for 82599
+ **/
+s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
+{
+
+ /*
+ * Workaround for 82599 silicon errata when enabling the Rx datapath.
+ * If traffic is incoming before we enable the Rx unit, it could hang
+ * the Rx DMA unit. Therefore, make sure the security engine is
+ * completely disabled prior to enabling the Rx unit.
+ */
+
+ hw->mac.ops.disable_sec_rx_path(hw);
+
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
+
+ hw->mac.ops.enable_sec_rx_path(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_verify_fw_version_82599 - verify fw version for 82599
+ * @hw: pointer to hardware structure
+ *
+ * Verifies that installed the firmware version is 0.6 or higher
+ * for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
+ *
+ * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
+ * if the FW version is not supported.
+ **/
+static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_EEPROM_VERSION;
+ u16 fw_offset, fw_ptp_cfg_offset;
+ u16 fw_version = 0;
+
+ /* firmware check is only necessary for SFI devices */
+ if (hw->phy.media_type != ixgbe_media_type_fiber) {
+ status = 0;
+ goto fw_version_out;
+ }
+
+ /* get the offset to the Firmware Module block */
+ hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
+
+ if ((fw_offset == 0) || (fw_offset == 0xFFFF))
+ goto fw_version_out;
+
+ /* get the offset to the Pass Through Patch Configuration block */
+ hw->eeprom.ops.read(hw, (fw_offset +
+ IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
+ &fw_ptp_cfg_offset);
+
+ if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
+ goto fw_version_out;
+
+ /* get the firmware version */
+ hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
+ IXGBE_FW_PATCH_VERSION_4), &fw_version);
+
+ if (fw_version > 0x5)
+ status = 0;
+
+fw_version_out:
+ return status;
+}
+
+/**
+ * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
+ * @hw: pointer to hardware structure
+ *
+ * Returns true if the LESM FW module is present and enabled. Otherwise
+ * returns false. Smart Speed must be disabled if LESM FW module is enabled.
+ **/
+bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
+{
+ bool lesm_enabled = false;
+ u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
+ s32 status;
+
+ /* get the offset to the Firmware Module block */
+ status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
+
+ if ((status != 0) ||
+ (fw_offset == 0) || (fw_offset == 0xFFFF))
+ goto out;
+
+ /* get the offset to the LESM Parameters block */
+ status = hw->eeprom.ops.read(hw, (fw_offset +
+ IXGBE_FW_LESM_PARAMETERS_PTR),
+ &fw_lesm_param_offset);
+
+ if ((status != 0) ||
+ (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
+ goto out;
+
+ /* get the lesm state word */
+ status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
+ IXGBE_FW_LESM_STATE_1),
+ &fw_lesm_state);
+
+ if ((status == 0) &&
+ (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
+ lesm_enabled = true;
+
+out:
+ return lesm_enabled;
+}
+
+/**
+ * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
+ * fastest available method
+ *
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in EEPROM to read
+ * @words: number of words
+ * @data: word(s) read from the EEPROM
+ *
+ * Retrieves 16 bit word(s) read from EEPROM
+ **/
+static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ s32 ret_val = IXGBE_ERR_CONFIG;
+
+ /*
+ * If EEPROM is detected and can be addressed using 14 bits,
+ * use EERD otherwise use bit bang
+ */
+ if ((eeprom->type == ixgbe_eeprom_spi) &&
+ (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
+ ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
+ data);
+ else
+ ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
+ words,
+ data);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_read_eeprom_82599 - Read EEPROM word using
+ * fastest available method
+ *
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM
+ **/
+static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
+ u16 offset, u16 *data)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ s32 ret_val = IXGBE_ERR_CONFIG;
+
+ /*
+ * If EEPROM is detected and can be addressed using 14 bits,
+ * use EERD otherwise use bit bang
+ */
+ if ((eeprom->type == ixgbe_eeprom_spi) &&
+ (offset <= IXGBE_EERD_MAX_ADDR))
+ ret_val = ixgbe_read_eerd_generic(hw, offset, data);
+ else
+ ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ u32 esdp;
+ s32 status;
+ s32 timeout = 200;
+
+ if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
+ /* Acquire I2C bus ownership. */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp |= IXGBE_ESDP_SDP0;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+
+ while (timeout) {
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ if (esdp & IXGBE_ESDP_SDP1)
+ break;
+
+ msleep(5);
+ timeout--;
+ }
+
+ if (!timeout) {
+ hw_dbg(hw, "Driver can't access resource,"
+ " acquiring I2C bus timeout.\n");
+ status = IXGBE_ERR_I2C;
+ goto release_i2c_access;
+ }
+ }
+
+ status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
+
+release_i2c_access:
+
+ if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
+ /* Release I2C bus ownership. */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp &= ~IXGBE_ESDP_SDP0;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ u32 esdp;
+ s32 status;
+ s32 timeout = 200;
+
+ if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
+ /* Acquire I2C bus ownership. */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp |= IXGBE_ESDP_SDP0;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+
+ while (timeout) {
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ if (esdp & IXGBE_ESDP_SDP1)
+ break;
+
+ msleep(5);
+ timeout--;
+ }
+
+ if (!timeout) {
+ hw_dbg(hw, "Driver can't access resource,"
+ " acquiring I2C bus timeout.\n");
+ status = IXGBE_ERR_I2C;
+ goto release_i2c_access;
+ }
+ }
+
+ status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
+
+release_i2c_access:
+
+ if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
+ /* Release I2C bus ownership. */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp &= ~IXGBE_ESDP_SDP0;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
+ return status;
+}
+
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.h
new file mode 100755
index 00000000..02be92ab
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.h
@@ -0,0 +1,58 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2012 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_82599_H_
+#define _IXGBE_82599_H_
+
+s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed, bool *autoneg);
+enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
+void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg, bool autoneg_wait_to_complete);
+s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
+void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
+s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
+s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
+s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw);
+s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
+s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
+u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
+s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
+bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
+#endif /* _IXGBE_82599_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.c
new file mode 100755
index 00000000..9a0a43e6
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.c
@@ -0,0 +1,1158 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2012 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+
+/**
+ * ixgbe_init_shared_code - Initialize the shared code
+ * @hw: pointer to hardware structure
+ *
+ * This will assign function pointers and assign the MAC type and PHY code.
+ * Does not touch the hardware. This function must be called prior to any
+ * other function in the shared code. The ixgbe_hw structure should be
+ * memset to 0 prior to calling this function. The following fields in
+ * hw structure should be filled in prior to calling this function:
+ * hw_addr, back, device_id, vendor_id, subsystem_device_id,
+ * subsystem_vendor_id, and revision_id
+ **/
+s32 ixgbe_init_shared_code(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ /*
+ * Set the mac type
+ */
+ ixgbe_set_mac_type(hw);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ status = ixgbe_init_ops_82598(hw);
+ break;
+ case ixgbe_mac_82599EB:
+ status = ixgbe_init_ops_82599(hw);
+ break;
+ case ixgbe_mac_X540:
+ status = ixgbe_init_ops_X540(hw);
+ break;
+ default:
+ status = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
+{
+ s32 ret_val = 0;
+
+ if (hw->vendor_id == IXGBE_INTEL_VENDOR_ID) {
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82598:
+ case IXGBE_DEV_ID_82598_BX:
+ case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+ case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+ case IXGBE_DEV_ID_82598AT:
+ case IXGBE_DEV_ID_82598AT2:
+ case IXGBE_DEV_ID_82598EB_CX4:
+ case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
+ case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
+ case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
+ case IXGBE_DEV_ID_82598EB_XF_LR:
+ case IXGBE_DEV_ID_82598EB_SFP_LOM:
+ hw->mac.type = ixgbe_mac_82598EB;
+ break;
+ case IXGBE_DEV_ID_82599_KX4:
+ case IXGBE_DEV_ID_82599_KX4_MEZZ:
+ case IXGBE_DEV_ID_82599_XAUI_LOM:
+ case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+ case IXGBE_DEV_ID_82599_KR:
+ case IXGBE_DEV_ID_82599_SFP:
+ case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
+ case IXGBE_DEV_ID_82599_SFP_FCOE:
+ case IXGBE_DEV_ID_82599_SFP_EM:
+ case IXGBE_DEV_ID_82599_SFP_SF2:
+ case IXGBE_DEV_ID_82599_QSFP_SF_QP:
+ case IXGBE_DEV_ID_82599EN_SFP:
+ case IXGBE_DEV_ID_82599_CX4:
+ case IXGBE_DEV_ID_82599_LS:
+ case IXGBE_DEV_ID_82599_T3_LOM:
+ hw->mac.type = ixgbe_mac_82599EB;
+ break;
+ case IXGBE_DEV_ID_X540T:
+ hw->mac.type = ixgbe_mac_X540;
+ break;
+ default:
+ ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+ break;
+ }
+ } else {
+ ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ hw_dbg(hw, "ixgbe_set_mac_type found mac: %d, returns: %d\n",
+ hw->mac.type, ret_val);
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_hw - Initialize the hardware
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the hardware by resetting and then starting the hardware
+ **/
+s32 ixgbe_init_hw(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.init_hw, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_reset_hw - Performs a hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks and
+ * clears all interrupts, performs a PHY reset, and performs a MAC reset
+ **/
+s32 ixgbe_reset_hw(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.reset_hw, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_start_hw - Prepares hardware for Rx/Tx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware by filling the bus info structure and media type,
+ * clears all on chip counters, initializes receive address registers,
+ * multicast table, VLAN filter table, calls routine to setup link and
+ * flow control settings, and leaves transmit and receive units disabled
+ * and uninitialized.
+ **/
+s32 ixgbe_start_hw(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.start_hw, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_clear_hw_cntrs - Clear hardware counters
+ * @hw: pointer to hardware structure
+ *
+ * Clears all hardware statistics counters by reading them from the hardware
+ * Statistics counters are clear on read.
+ **/
+s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.clear_hw_cntrs, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_media_type - Get media type
+ * @hw: pointer to hardware structure
+ *
+ * Returns the media type (fiber, copper, backplane)
+ **/
+enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_media_type, (hw),
+ ixgbe_media_type_unknown);
+}
+
+/**
+ * ixgbe_get_mac_addr - Get MAC address
+ * @hw: pointer to hardware structure
+ * @mac_addr: Adapter MAC address
+ *
+ * Reads the adapter's MAC address from the first Receive Address Register
+ * (RAR0) A reset of the adapter must have been performed prior to calling
+ * this function in order for the MAC address to have been loaded from the
+ * EEPROM into RAR0
+ **/
+s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_mac_addr,
+ (hw, mac_addr), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_san_mac_addr - Get SAN MAC address
+ * @hw: pointer to hardware structure
+ * @san_mac_addr: SAN MAC address
+ *
+ * Reads the SAN MAC address from the EEPROM, if it's available. This is
+ * per-port, so set_lan_id() must be called before reading the addresses.
+ **/
+s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_san_mac_addr,
+ (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_san_mac_addr - Write a SAN MAC address
+ * @hw: pointer to hardware structure
+ * @san_mac_addr: SAN MAC address
+ *
+ * Writes A SAN MAC address to the EEPROM.
+ **/
+s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_san_mac_addr,
+ (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_device_caps - Get additional device capabilities
+ * @hw: pointer to hardware structure
+ * @device_caps: the EEPROM word for device capabilities
+ *
+ * Reads the extra device capabilities from the EEPROM
+ **/
+s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_device_caps,
+ (hw, device_caps), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_wwn_prefix - Get alternative WWNN/WWPN prefix from the EEPROM
+ * @hw: pointer to hardware structure
+ * @wwnn_prefix: the alternative WWNN prefix
+ * @wwpn_prefix: the alternative WWPN prefix
+ *
+ * This function will read the EEPROM from the alternative SAN MAC address
+ * block to check the support for the alternative WWNN/WWPN prefix support.
+ **/
+s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_wwn_prefix,
+ (hw, wwnn_prefix, wwpn_prefix),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_fcoe_boot_status - Get FCOE boot status from EEPROM
+ * @hw: pointer to hardware structure
+ * @bs: the fcoe boot status
+ *
+ * This function will read the FCOE boot status from the iSCSI FCOE block
+ **/
+s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_fcoe_boot_status,
+ (hw, bs),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_bus_info - Set PCI bus info
+ * @hw: pointer to hardware structure
+ *
+ * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
+ **/
+s32 ixgbe_get_bus_info(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_bus_info, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_num_of_tx_queues - Get Tx queues
+ * @hw: pointer to hardware structure
+ *
+ * Returns the number of transmit queues for the given adapter.
+ **/
+u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw)
+{
+ return hw->mac.max_tx_queues;
+}
+
+/**
+ * ixgbe_get_num_of_rx_queues - Get Rx queues
+ * @hw: pointer to hardware structure
+ *
+ * Returns the number of receive queues for the given adapter.
+ **/
+u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw)
+{
+ return hw->mac.max_rx_queues;
+}
+
+/**
+ * ixgbe_stop_adapter - Disable Rx/Tx units
+ * @hw: pointer to hardware structure
+ *
+ * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+ * disables transmit and receive units. The adapter_stopped flag is used by
+ * the shared code and drivers to determine if the adapter is in a stopped
+ * state and should not touch the hardware.
+ **/
+s32 ixgbe_stop_adapter(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.stop_adapter, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_read_pba_string - Reads part number string from EEPROM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the EEPROM
+ * @pba_num_size: part number string buffer length
+ *
+ * Reads the part number string from the EEPROM.
+ **/
+s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size)
+{
+ return ixgbe_read_pba_string_generic(hw, pba_num, pba_num_size);
+}
+
+/**
+ * ixgbe_identify_phy - Get PHY type
+ * @hw: pointer to hardware structure
+ *
+ * Determines the physical layer module found on the current adapter.
+ **/
+s32 ixgbe_identify_phy(struct ixgbe_hw *hw)
+{
+ s32 status = 0;
+
+ if (hw->phy.type == ixgbe_phy_unknown) {
+ status = ixgbe_call_func(hw, hw->phy.ops.identify, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_reset_phy - Perform a PHY reset
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reset_phy(struct ixgbe_hw *hw)
+{
+ s32 status = 0;
+
+ if (hw->phy.type == ixgbe_phy_unknown) {
+ if (ixgbe_identify_phy(hw) != 0)
+ status = IXGBE_ERR_PHY;
+ }
+
+ if (status == 0) {
+ status = ixgbe_call_func(hw, hw->phy.ops.reset, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+ }
+ return status;
+}
+
+/**
+ * ixgbe_get_phy_firmware_version -
+ * @hw: pointer to hardware structure
+ * @firmware_version: pointer to firmware version
+ **/
+s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, u16 *firmware_version)
+{
+ s32 status = 0;
+
+ status = ixgbe_call_func(hw, hw->phy.ops.get_firmware_version,
+ (hw, firmware_version),
+ IXGBE_NOT_IMPLEMENTED);
+ return status;
+}
+
+/**
+ * ixgbe_read_phy_reg - Read PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @phy_data: Pointer to read data from PHY register
+ *
+ * Reads a value from a specified PHY register
+ **/
+s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 *phy_data)
+{
+ if (hw->phy.id == 0)
+ ixgbe_identify_phy(hw);
+
+ return ixgbe_call_func(hw, hw->phy.ops.read_reg, (hw, reg_addr,
+ device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_write_phy_reg - Write PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @phy_data: Data to write to the PHY register
+ *
+ * Writes a value to specified PHY register
+ **/
+s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 phy_data)
+{
+ if (hw->phy.id == 0)
+ ixgbe_identify_phy(hw);
+
+ return ixgbe_call_func(hw, hw->phy.ops.write_reg, (hw, reg_addr,
+ device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_setup_phy_link - Restart PHY autoneg
+ * @hw: pointer to hardware structure
+ *
+ * Restart autonegotiation and PHY and waits for completion.
+ **/
+s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.setup_link, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_check_phy_link - Determine link and speed status
+ * @hw: pointer to hardware structure
+ *
+ * Reads a PHY register to determine if link is up and the current speed for
+ * the PHY.
+ **/
+s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.check_link, (hw, speed,
+ link_up), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_setup_phy_link_speed - Set auto advertise
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: true if autonegotiation enabled
+ *
+ * Sets the auto advertised capabilities
+ **/
+s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.setup_link_speed, (hw, speed,
+ autoneg, autoneg_wait_to_complete),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_check_link - Get link and speed status
+ * @hw: pointer to hardware structure
+ *
+ * Reads the links register to determine if link is up and the current speed
+ **/
+s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.check_link, (hw, speed,
+ link_up, link_up_wait_to_complete),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_disable_tx_laser - Disable Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * If the driver needs to disable the laser on SFI optics.
+ **/
+void ixgbe_disable_tx_laser(struct ixgbe_hw *hw)
+{
+ if (hw->mac.ops.disable_tx_laser)
+ hw->mac.ops.disable_tx_laser(hw);
+}
+
+/**
+ * ixgbe_enable_tx_laser - Enable Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * If the driver needs to enable the laser on SFI optics.
+ **/
+void ixgbe_enable_tx_laser(struct ixgbe_hw *hw)
+{
+ if (hw->mac.ops.enable_tx_laser)
+ hw->mac.ops.enable_tx_laser(hw);
+}
+
+/**
+ * ixgbe_flap_tx_laser - flap Tx laser to start autotry process
+ * @hw: pointer to hardware structure
+ *
+ * When the driver changes the link speeds that it can support then
+ * flap the tx laser to alert the link partner to start autotry
+ * process on its end.
+ **/
+void ixgbe_flap_tx_laser(struct ixgbe_hw *hw)
+{
+ if (hw->mac.ops.flap_tx_laser)
+ hw->mac.ops.flap_tx_laser(hw);
+}
+
+/**
+ * ixgbe_setup_link - Set link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: true if autonegotiation enabled
+ *
+ * Configures link settings. Restarts the link.
+ * Performs autonegotiation if needed.
+ **/
+s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.setup_link, (hw, speed,
+ autoneg, autoneg_wait_to_complete),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_link_capabilities - Returns link capabilities
+ * @hw: pointer to hardware structure
+ *
+ * Determines the link capabilities of the current configuration.
+ **/
+s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_link_capabilities, (hw,
+ speed, autoneg), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_led_on - Turn on LEDs
+ * @hw: pointer to hardware structure
+ * @index: led number to turn on
+ *
+ * Turns on the software controllable LEDs.
+ **/
+s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.led_on, (hw, index),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_led_off - Turn off LEDs
+ * @hw: pointer to hardware structure
+ * @index: led number to turn off
+ *
+ * Turns off the software controllable LEDs.
+ **/
+s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.led_off, (hw, index),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_blink_led_start - Blink LEDs
+ * @hw: pointer to hardware structure
+ * @index: led number to blink
+ *
+ * Blink LED based on index.
+ **/
+s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.blink_led_start, (hw, index),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_blink_led_stop - Stop blinking LEDs
+ * @hw: pointer to hardware structure
+ *
+ * Stop blinking LED based on index.
+ **/
+s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.blink_led_stop, (hw, index),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_init_eeprom_params - Initialize EEPROM parameters
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ * ixgbe_hw struct in order to set up EEPROM access.
+ **/
+s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.init_params, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+
+/**
+ * ixgbe_write_eeprom - Write word to EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be written to
+ * @data: 16 bit word to be written to the EEPROM
+ *
+ * Writes 16 bit value to EEPROM. If ixgbe_eeprom_update_checksum is not
+ * called after this function, the EEPROM will most likely contain an
+ * invalid checksum.
+ **/
+s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.write, (hw, offset, data),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_write_eeprom_buffer - Write word(s) to EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be written to
+ * @data: 16 bit word(s) to be written to the EEPROM
+ * @words: number of words
+ *
+ * Writes 16 bit word(s) to EEPROM. If ixgbe_eeprom_update_checksum is not
+ * called after this function, the EEPROM will most likely contain an
+ * invalid checksum.
+ **/
+s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.write_buffer,
+ (hw, offset, words, data),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_read_eeprom - Read word from EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be read
+ * @data: read 16 bit value from EEPROM
+ *
+ * Reads 16 bit value from EEPROM
+ **/
+s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.read, (hw, offset, data),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_read_eeprom_buffer - Read word(s) from EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be read
+ * @data: read 16 bit word(s) from EEPROM
+ * @words: number of words
+ *
+ * Reads 16 bit word(s) from EEPROM
+ **/
+s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.read_buffer,
+ (hw, offset, words, data),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_validate_eeprom_checksum - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum
+ **/
+s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.validate_checksum,
+ (hw, checksum_val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_eeprom_update_checksum - Updates the EEPROM checksum
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.update_checksum, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_insert_mac_addr - Find a RAR for this mac address
+ * @hw: pointer to hardware structure
+ * @addr: Address to put into receive address register
+ * @vmdq: VMDq pool to assign
+ *
+ * Puts an ethernet address into a receive address register, or
+ * finds the rar that it is aleady in; adds to the pool list
+ **/
+s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.insert_mac_addr,
+ (hw, addr, vmdq),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_rar - Set Rx address register
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @vmdq: VMDq "set"
+ * @enable_addr: set flag that address is active
+ *
+ * Puts an ethernet address into a receive address register.
+ **/
+s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_rar, (hw, index, addr, vmdq,
+ enable_addr), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_clear_rar - Clear Rx address register
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ *
+ * Puts an ethernet address into a receive address register.
+ **/
+s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.clear_rar, (hw, index),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_vmdq - Associate a VMDq index with a receive address
+ * @hw: pointer to hardware structure
+ * @rar: receive address register index to associate with VMDq index
+ * @vmdq: VMDq set or pool index
+ **/
+s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_vmdq, (hw, rar, vmdq),
+ IXGBE_NOT_IMPLEMENTED);
+
+}
+
+/**
+ * ixgbe_set_vmdq_san_mac - Associate VMDq index 127 with a receive address
+ * @hw: pointer to hardware structure
+ * @vmdq: VMDq default pool index
+ **/
+s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_vmdq_san_mac,
+ (hw, vmdq), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_clear_vmdq - Disassociate a VMDq index from a receive address
+ * @hw: pointer to hardware structure
+ * @rar: receive address register index to disassociate with VMDq index
+ * @vmdq: VMDq set or pool index
+ **/
+s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.clear_vmdq, (hw, rar, vmdq),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_init_rx_addrs - Initializes receive address filters.
+ * @hw: pointer to hardware structure
+ *
+ * Places the MAC address in receive address register 0 and clears the rest
+ * of the receive address registers. Clears the multicast table. Assumes
+ * the receiver is in reset when the routine is called.
+ **/
+s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.init_rx_addrs, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_num_rx_addrs - Returns the number of RAR entries.
+ * @hw: pointer to hardware structure
+ **/
+u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw)
+{
+ return hw->mac.num_rar_entries;
+}
+
+/**
+ * ixgbe_update_uc_addr_list - Updates the MAC's list of secondary addresses
+ * @hw: pointer to hardware structure
+ * @addr_list: the list of new multicast addresses
+ * @addr_count: number of addresses
+ * @func: iterator function to walk the multicast address list
+ *
+ * The given list replaces any existing list. Clears the secondary addrs from
+ * receive address registers. Uses unused receive address registers for the
+ * first secondary addresses, and falls back to promiscuous mode as needed.
+ **/
+s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
+ u32 addr_count, ixgbe_mc_addr_itr func)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.update_uc_addr_list, (hw,
+ addr_list, addr_count, func),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_update_mc_addr_list - Updates the MAC's list of multicast addresses
+ * @hw: pointer to hardware structure
+ * @mc_addr_list: the list of new multicast addresses
+ * @mc_addr_count: number of addresses
+ * @func: iterator function to walk the multicast address list
+ *
+ * The given list replaces any existing list. Clears the MC addrs from receive
+ * address registers and the multicast table. Uses unused receive address
+ * registers for the first multicast addresses, and hashes the rest into the
+ * multicast table.
+ **/
+s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, ixgbe_mc_addr_itr func,
+ bool clear)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.update_mc_addr_list, (hw,
+ mc_addr_list, mc_addr_count, func, clear),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_enable_mc - Enable multicast address in RAR
+ * @hw: pointer to hardware structure
+ *
+ * Enables multicast address in RAR and the use of the multicast hash table.
+ **/
+s32 ixgbe_enable_mc(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.enable_mc, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_disable_mc - Disable multicast address in RAR
+ * @hw: pointer to hardware structure
+ *
+ * Disables multicast address in RAR and the use of the multicast hash table.
+ **/
+s32 ixgbe_disable_mc(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.disable_mc, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_clear_vfta - Clear VLAN filter table
+ * @hw: pointer to hardware structure
+ *
+ * Clears the VLAN filer table, and the VMDq index associated with the filter
+ **/
+s32 ixgbe_clear_vfta(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.clear_vfta, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_vfta - Set VLAN filter table
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vind: VMDq output index that maps queue to VLAN id in VFTA
+ * @vlan_on: boolean flag to turn on/off VLAN in VFTA
+ *
+ * Turn on/off specified VLAN in the VLAN filter table.
+ **/
+s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_vfta, (hw, vlan, vind,
+ vlan_on), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_vlvf - Set VLAN Pool Filter
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vind: VMDq output index that maps queue to VLAN id in VFVFB
+ * @vlan_on: boolean flag to turn on/off VLAN in VFVF
+ * @vfta_changed: pointer to boolean flag which indicates whether VFTA
+ * should be changed
+ *
+ * Turn on/off specified bit in VLVF table.
+ **/
+s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on,
+ bool *vfta_changed)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_vlvf, (hw, vlan, vind,
+ vlan_on, vfta_changed), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_fc_enable - Enable flow control
+ * @hw: pointer to hardware structure
+ *
+ * Configures the flow control settings based on SW configuration.
+ **/
+s32 ixgbe_fc_enable(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.fc_enable, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_fw_drv_ver - Try to send the driver version number FW
+ * @hw: pointer to hardware structure
+ * @maj: driver major number to be sent to firmware
+ * @min: driver minor number to be sent to firmware
+ * @build: driver build number to be sent to firmware
+ * @ver: driver version number to be sent to firmware
+ **/
+s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
+ u8 ver)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_fw_drv_ver, (hw, maj, min,
+ build, ver), IXGBE_NOT_IMPLEMENTED);
+}
+
+
+/**
+ * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
+ * @hw: pointer to hardware structure
+ *
+ * Updates the temperatures in mac.thermal_sensor_data
+ **/
+s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_thermal_sensor_data, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds
+ * @hw: pointer to hardware structure
+ *
+ * Inits the thermal sensor thresholds according to the NVM map
+ **/
+s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.init_thermal_sensor_thresh, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+/**
+ * ixgbe_read_analog_reg8 - Reads 8 bit analog register
+ * @hw: pointer to hardware structure
+ * @reg: analog register to read
+ * @val: read value
+ *
+ * Performs write operation to analog register specified.
+ **/
+s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.read_analog_reg8, (hw, reg,
+ val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_write_analog_reg8 - Writes 8 bit analog register
+ * @hw: pointer to hardware structure
+ * @reg: analog register to write
+ * @val: value to write
+ *
+ * Performs write operation to Atlas analog register specified.
+ **/
+s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.write_analog_reg8, (hw, reg,
+ val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_init_uta_tables - Initializes Unicast Table Arrays.
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the Unicast Table Arrays to zero on device load. This
+ * is part of the Rx init addr execution path.
+ **/
+s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.init_uta_tables, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_read_i2c_byte - Reads 8 bit word over I2C at specified device address
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
+ u8 *data)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.read_i2c_byte, (hw, byte_offset,
+ dev_addr, data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_write_i2c_byte - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface
+ * at a specified device address.
+ **/
+s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
+ u8 data)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.write_i2c_byte, (hw, byte_offset,
+ dev_addr, data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_write_i2c_eeprom - Writes 8 bit EEPROM word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to write
+ * @eeprom_data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw,
+ u8 byte_offset, u8 eeprom_data)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.write_i2c_eeprom,
+ (hw, byte_offset, eeprom_data),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_read_i2c_eeprom - Reads 8 bit EEPROM word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to read
+ * @eeprom_data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.read_i2c_eeprom,
+ (hw, byte_offset, eeprom_data),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_supported_physical_layer - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ **/
+u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_supported_physical_layer,
+ (hw), IXGBE_PHYSICAL_LAYER_UNKNOWN);
+}
+
+/**
+ * ixgbe_enable_rx_dma - Enables Rx DMA unit, dependent on device specifics
+ * @hw: pointer to hardware structure
+ * @regval: bitfield to write to the Rx DMA register
+ *
+ * Enables the Rx DMA unit of the device.
+ **/
+s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.enable_rx_dma,
+ (hw, regval), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_disable_sec_rx_path - Stops the receive data path
+ * @hw: pointer to hardware structure
+ *
+ * Stops the receive data path.
+ **/
+s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.disable_sec_rx_path,
+ (hw), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_enable_sec_rx_path - Enables the receive data path
+ * @hw: pointer to hardware structure
+ *
+ * Enables the receive data path.
+ **/
+s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.enable_sec_rx_path,
+ (hw), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_acquire_swfw_semaphore - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore through SW_FW_SYNC register for the specified
+ * function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.acquire_swfw_sync,
+ (hw, mask), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_release_swfw_semaphore - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore through SW_FW_SYNC register for the specified
+ * function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u16 mask)
+{
+ if (hw->mac.ops.release_swfw_sync)
+ hw->mac.ops.release_swfw_sync(hw, mask);
+}
+
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.h
new file mode 100755
index 00000000..a6ab30d2
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.h
@@ -0,0 +1,168 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2012 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_API_H_
+#define _IXGBE_API_H_
+
+#include "ixgbe_type.h"
+
+s32 ixgbe_init_shared_code(struct ixgbe_hw *hw);
+
+extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw);
+
+s32 ixgbe_set_mac_type(struct ixgbe_hw *hw);
+s32 ixgbe_init_hw(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw(struct ixgbe_hw *hw);
+s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw);
+enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw);
+s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr);
+s32 ixgbe_get_bus_info(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw);
+s32 ixgbe_stop_adapter(struct ixgbe_hw *hw);
+s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size);
+
+s32 ixgbe_identify_phy(struct ixgbe_hw *hw);
+s32 ixgbe_reset_phy(struct ixgbe_hw *hw);
+s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 *phy_data);
+s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 phy_data);
+
+s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw);
+s32 ixgbe_check_phy_link(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up);
+s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete);
+void ixgbe_disable_tx_laser(struct ixgbe_hw *hw);
+void ixgbe_enable_tx_laser(struct ixgbe_hw *hw);
+void ixgbe_flap_tx_laser(struct ixgbe_hw *hw);
+s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg, bool autoneg_wait_to_complete);
+s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete);
+s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *autoneg);
+s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index);
+
+s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw);
+s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+
+s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val);
+s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw);
+
+s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
+s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr);
+s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq);
+s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw);
+s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
+ u32 addr_count, ixgbe_mc_addr_itr func);
+s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, ixgbe_mc_addr_itr func,
+ bool clear);
+void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr_list, u32 vmdq);
+s32 ixgbe_enable_mc(struct ixgbe_hw *hw);
+s32 ixgbe_disable_mc(struct ixgbe_hw *hw);
+s32 ixgbe_clear_vfta(struct ixgbe_hw *hw);
+s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan,
+ u32 vind, bool vlan_on);
+s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, bool *vfta_changed);
+s32 ixgbe_fc_enable(struct ixgbe_hw *hw);
+s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
+ u8 ver);
+s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw);
+s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw);
+void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr);
+s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw,
+ u16 *firmware_version);
+s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val);
+s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val);
+s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw);
+s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data);
+u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw);
+s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval);
+s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw);
+s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw);
+s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
+s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common,
+ u8 queue);
+s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input_mask);
+s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id, u8 queue);
+s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id);
+s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ union ixgbe_atr_input *mask,
+ u16 soft_id,
+ u8 queue);
+void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+ union ixgbe_atr_input *mask);
+u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common);
+s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
+ u8 *data);
+s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
+ u8 data);
+s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data);
+s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
+s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
+s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps);
+s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask);
+void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u16 mask);
+s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix);
+s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs);
+
+#endif /* _IXGBE_API_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c
new file mode 100755
index 00000000..a0a0046e
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c
@@ -0,0 +1,4083 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2012 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+#include "ixgbe_api.h"
+
+static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
+static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
+static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
+static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
+static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
+static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
+ u16 count);
+static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
+static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
+static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
+static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
+
+static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
+static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
+ u16 *san_mac_offset);
+static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
+ u16 offset);
+
+/**
+ * ixgbe_init_ops_generic - Inits function ptrs
+ * @hw: pointer to the hardware structure
+ *
+ * Initialize the function pointers.
+ **/
+s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ /* EEPROM */
+ eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
+ /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
+ if (eec & IXGBE_EEC_PRES) {
+ eeprom->ops.read = &ixgbe_read_eerd_generic;
+ eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_generic;
+ } else {
+ eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
+ eeprom->ops.read_buffer =
+ &ixgbe_read_eeprom_buffer_bit_bang_generic;
+ }
+ eeprom->ops.write = &ixgbe_write_eeprom_generic;
+ eeprom->ops.write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic;
+ eeprom->ops.validate_checksum =
+ &ixgbe_validate_eeprom_checksum_generic;
+ eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
+ eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic;
+
+ /* MAC */
+ mac->ops.init_hw = &ixgbe_init_hw_generic;
+ mac->ops.reset_hw = NULL;
+ mac->ops.start_hw = &ixgbe_start_hw_generic;
+ mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
+ mac->ops.get_media_type = NULL;
+ mac->ops.get_supported_physical_layer = NULL;
+ mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic;
+ mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
+ mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
+ mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
+ mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
+ mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync;
+ mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync;
+
+ /* LEDs */
+ mac->ops.led_on = &ixgbe_led_on_generic;
+ mac->ops.led_off = &ixgbe_led_off_generic;
+ mac->ops.blink_led_start = &ixgbe_blink_led_start_generic;
+ mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic;
+
+ /* RAR, Multicast, VLAN */
+ mac->ops.set_rar = &ixgbe_set_rar_generic;
+ mac->ops.clear_rar = &ixgbe_clear_rar_generic;
+ mac->ops.insert_mac_addr = NULL;
+ mac->ops.set_vmdq = NULL;
+ mac->ops.clear_vmdq = NULL;
+ mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
+ mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic;
+ mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
+ mac->ops.enable_mc = &ixgbe_enable_mc_generic;
+ mac->ops.disable_mc = &ixgbe_disable_mc_generic;
+ mac->ops.clear_vfta = NULL;
+ mac->ops.set_vfta = NULL;
+ mac->ops.set_vlvf = NULL;
+ mac->ops.init_uta_tables = NULL;
+
+ /* Flow Control */
+ mac->ops.fc_enable = &ixgbe_fc_enable_generic;
+
+ /* Link */
+ mac->ops.get_link_capabilities = NULL;
+ mac->ops.setup_link = NULL;
+ mac->ops.check_link = NULL;
+
+ return 0;
+}
+
+/**
+ * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
+ * control
+ * @hw: pointer to hardware structure
+ *
+ * There are several phys that do not support autoneg flow control. This
+ * function check the device id to see if the associated phy supports
+ * autoneg flow control.
+ **/
+static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
+{
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X540T:
+ return 0;
+ case IXGBE_DEV_ID_82599_T3_LOM:
+ return 0;
+ default:
+ return IXGBE_ERR_FC_NOT_SUPPORTED;
+ }
+}
+
+/**
+ * ixgbe_setup_fc - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Called at init time to set up flow control.
+ **/
+static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
+{
+ s32 ret_val = 0;
+ u32 reg = 0, reg_bp = 0;
+ u16 reg_cu = 0;
+
+ /*
+ * Validate the requested mode. Strict IEEE mode does not allow
+ * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
+ */
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ /*
+ * 10gig parts do not have a word in the EEPROM to determine the
+ * default flow control setting, so we explicitly set it to full.
+ */
+ if (hw->fc.requested_mode == ixgbe_fc_default)
+ hw->fc.requested_mode = ixgbe_fc_full;
+
+ /*
+ * Set up the 1G and 10G flow control advertisement registers so the
+ * HW will be able to do fc autoneg once the cable is plugged in. If
+ * we link at 10G, the 1G advertisement is harmless and vice versa.
+ */
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber:
+ case ixgbe_media_type_backplane:
+ reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+ reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ break;
+ case ixgbe_media_type_copper:
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg_cu);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * The possible values of fc.requested_mode are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but
+ * we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: Invalid.
+ */
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_none:
+ /* Flow control completely disabled by software override. */
+ reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+ if (hw->phy.media_type == ixgbe_media_type_backplane)
+ reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
+ IXGBE_AUTOC_ASM_PAUSE);
+ else if (hw->phy.media_type == ixgbe_media_type_copper)
+ reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
+ break;
+ case ixgbe_fc_tx_pause:
+ /*
+ * Tx Flow control is enabled, and Rx Flow control is
+ * disabled by software override.
+ */
+ reg |= IXGBE_PCS1GANA_ASM_PAUSE;
+ reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
+ if (hw->phy.media_type == ixgbe_media_type_backplane) {
+ reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
+ reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
+ } else if (hw->phy.media_type == ixgbe_media_type_copper) {
+ reg_cu |= IXGBE_TAF_ASM_PAUSE;
+ reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
+ }
+ break;
+ case ixgbe_fc_rx_pause:
+ /*
+ * Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE, as such we fall
+ * through to the fc_full statement. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ case ixgbe_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by SW override. */
+ reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
+ if (hw->phy.media_type == ixgbe_media_type_backplane)
+ reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
+ IXGBE_AUTOC_ASM_PAUSE;
+ else if (hw->phy.media_type == ixgbe_media_type_copper)
+ reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
+ break;
+ default:
+ hw_dbg(hw, "Flow control param set incorrectly\n");
+ ret_val = IXGBE_ERR_CONFIG;
+ goto out;
+ break;
+ }
+
+ if (hw->mac.type != ixgbe_mac_X540) {
+ /*
+ * Enable auto-negotiation between the MAC & PHY;
+ * the MAC will advertise clause 37 flow control.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
+ reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
+
+ /* Disable AN timeout */
+ if (hw->fc.strict_ieee)
+ reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
+ hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
+ }
+
+ /*
+ * AUTOC restart handles negotiation of 1G and 10G on backplane
+ * and copper. There is no need to set the PCS1GCTL register.
+ *
+ */
+ if (hw->phy.media_type == ixgbe_media_type_backplane) {
+ reg_bp |= IXGBE_AUTOC_AN_RESTART;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
+ } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
+ (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
+ hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
+ }
+
+ hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware by filling the bus info structure and media type, clears
+ * all on chip counters, initializes receive address registers, multicast
+ * table, VLAN filter table, calls routine to set up link and flow control
+ * settings, and leaves transmit and receive units disabled and uninitialized
+ **/
+s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
+{
+ s32 ret_val;
+ u32 ctrl_ext;
+
+ /* Set the media type */
+ hw->phy.media_type = hw->mac.ops.get_media_type(hw);
+
+ /* PHY ops initialization must be done in reset_hw() */
+
+ /* Clear the VLAN filter table */
+ hw->mac.ops.clear_vfta(hw);
+
+ /* Clear statistics registers */
+ hw->mac.ops.clear_hw_cntrs(hw);
+
+ /* Set No Snoop Disable */
+ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Setup flow control */
+ ret_val = ixgbe_setup_fc(hw);
+ if (ret_val != 0)
+ goto out;
+
+ /* Clear adapter stopped flag */
+ hw->adapter_stopped = false;
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_start_hw_gen2 - Init sequence for common device family
+ * @hw: pointer to hw structure
+ *
+ * Performs the init sequence common to the second generation
+ * of 10 GbE devices.
+ * Devices in the second generation:
+ * 82599
+ * X540
+ **/
+s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
+{
+ u32 i;
+ u32 regval;
+
+ /* Clear the rate limiters */
+ for (i = 0; i < hw->mac.max_tx_queues; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
+ }
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Disable relaxed ordering */
+ for (i = 0; i < hw->mac.max_tx_queues; i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
+ regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
+ }
+
+ for (i = 0; i < hw->mac.max_rx_queues; i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+ regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
+ IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_init_hw_generic - Generic hardware initialization
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the hardware by resetting the hardware, filling the bus info
+ * structure and media type, clears all on chip counters, initializes receive
+ * address registers, multicast table, VLAN filter table, calls routine to set
+ * up link and flow control settings, and leaves transmit and receive units
+ * disabled and uninitialized
+ **/
+s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ /* Reset the hardware */
+ status = hw->mac.ops.reset_hw(hw);
+
+ if (status == 0) {
+ /* Start the HW */
+ status = hw->mac.ops.start_hw(hw);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
+ * @hw: pointer to hardware structure
+ *
+ * Clears all hardware statistics counters by reading them from the hardware
+ * Statistics counters are clear on read.
+ **/
+s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
+{
+ u16 i = 0;
+
+ IXGBE_READ_REG(hw, IXGBE_CRCERRS);
+ IXGBE_READ_REG(hw, IXGBE_ILLERRC);
+ IXGBE_READ_REG(hw, IXGBE_ERRBC);
+ IXGBE_READ_REG(hw, IXGBE_MSPDC);
+ for (i = 0; i < 8; i++)
+ IXGBE_READ_REG(hw, IXGBE_MPC(i));
+
+ IXGBE_READ_REG(hw, IXGBE_MLFC);
+ IXGBE_READ_REG(hw, IXGBE_MRFC);
+ IXGBE_READ_REG(hw, IXGBE_RLEC);
+ IXGBE_READ_REG(hw, IXGBE_LXONTXC);
+ IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
+ if (hw->mac.type >= ixgbe_mac_82599EB) {
+ IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
+ IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+ } else {
+ IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+ IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+ }
+
+ for (i = 0; i < 8; i++) {
+ IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
+ IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
+ if (hw->mac.type >= ixgbe_mac_82599EB) {
+ IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
+ IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
+ } else {
+ IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
+ IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+ }
+ }
+ if (hw->mac.type >= ixgbe_mac_82599EB)
+ for (i = 0; i < 8; i++)
+ IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
+ IXGBE_READ_REG(hw, IXGBE_PRC64);
+ IXGBE_READ_REG(hw, IXGBE_PRC127);
+ IXGBE_READ_REG(hw, IXGBE_PRC255);
+ IXGBE_READ_REG(hw, IXGBE_PRC511);
+ IXGBE_READ_REG(hw, IXGBE_PRC1023);
+ IXGBE_READ_REG(hw, IXGBE_PRC1522);
+ IXGBE_READ_REG(hw, IXGBE_GPRC);
+ IXGBE_READ_REG(hw, IXGBE_BPRC);
+ IXGBE_READ_REG(hw, IXGBE_MPRC);
+ IXGBE_READ_REG(hw, IXGBE_GPTC);
+ IXGBE_READ_REG(hw, IXGBE_GORCL);
+ IXGBE_READ_REG(hw, IXGBE_GORCH);
+ IXGBE_READ_REG(hw, IXGBE_GOTCL);
+ IXGBE_READ_REG(hw, IXGBE_GOTCH);
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ for (i = 0; i < 8; i++)
+ IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+ IXGBE_READ_REG(hw, IXGBE_RUC);
+ IXGBE_READ_REG(hw, IXGBE_RFC);
+ IXGBE_READ_REG(hw, IXGBE_ROC);
+ IXGBE_READ_REG(hw, IXGBE_RJC);
+ IXGBE_READ_REG(hw, IXGBE_MNGPRC);
+ IXGBE_READ_REG(hw, IXGBE_MNGPDC);
+ IXGBE_READ_REG(hw, IXGBE_MNGPTC);
+ IXGBE_READ_REG(hw, IXGBE_TORL);
+ IXGBE_READ_REG(hw, IXGBE_TORH);
+ IXGBE_READ_REG(hw, IXGBE_TPR);
+ IXGBE_READ_REG(hw, IXGBE_TPT);
+ IXGBE_READ_REG(hw, IXGBE_PTC64);
+ IXGBE_READ_REG(hw, IXGBE_PTC127);
+ IXGBE_READ_REG(hw, IXGBE_PTC255);
+ IXGBE_READ_REG(hw, IXGBE_PTC511);
+ IXGBE_READ_REG(hw, IXGBE_PTC1023);
+ IXGBE_READ_REG(hw, IXGBE_PTC1522);
+ IXGBE_READ_REG(hw, IXGBE_MPTC);
+ IXGBE_READ_REG(hw, IXGBE_BPTC);
+ for (i = 0; i < 16; i++) {
+ IXGBE_READ_REG(hw, IXGBE_QPRC(i));
+ IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+ if (hw->mac.type >= ixgbe_mac_82599EB) {
+ IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
+ IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
+ IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
+ IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
+ IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+ } else {
+ IXGBE_READ_REG(hw, IXGBE_QBRC(i));
+ IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+ }
+ }
+
+ if (hw->mac.type == ixgbe_mac_X540) {
+ if (hw->phy.id == 0)
+ ixgbe_identify_phy(hw);
+ hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
+ IXGBE_MDIO_PCS_DEV_TYPE, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
+ IXGBE_MDIO_PCS_DEV_TYPE, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
+ IXGBE_MDIO_PCS_DEV_TYPE, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
+ IXGBE_MDIO_PCS_DEV_TYPE, &i);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the EEPROM
+ * @pba_num_size: part number string buffer length
+ *
+ * Reads the part number string from the EEPROM.
+ **/
+s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ s32 ret_val;
+ u16 data;
+ u16 pba_ptr;
+ u16 offset;
+ u16 length;
+
+ if (pba_num == NULL) {
+ hw_dbg(hw, "PBA string buffer was null\n");
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
+ if (ret_val) {
+ hw_dbg(hw, "NVM Read Error\n");
+ return ret_val;
+ }
+
+ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
+ if (ret_val) {
+ hw_dbg(hw, "NVM Read Error\n");
+ return ret_val;
+ }
+
+ /*
+ * if data is not ptr guard the PBA must be in legacy format which
+ * means pba_ptr is actually our second data word for the PBA number
+ * and we can decode it into an ascii string
+ */
+ if (data != IXGBE_PBANUM_PTR_GUARD) {
+ hw_dbg(hw, "NVM PBA number is not stored as string\n");
+
+ /* we will need 11 characters to store the PBA */
+ if (pba_num_size < 11) {
+ hw_dbg(hw, "PBA string buffer too small\n");
+ return IXGBE_ERR_NO_SPACE;
+ }
+
+ /* extract hex string from data and pba_ptr */
+ pba_num[0] = (data >> 12) & 0xF;
+ pba_num[1] = (data >> 8) & 0xF;
+ pba_num[2] = (data >> 4) & 0xF;
+ pba_num[3] = data & 0xF;
+ pba_num[4] = (pba_ptr >> 12) & 0xF;
+ pba_num[5] = (pba_ptr >> 8) & 0xF;
+ pba_num[6] = '-';
+ pba_num[7] = 0;
+ pba_num[8] = (pba_ptr >> 4) & 0xF;
+ pba_num[9] = pba_ptr & 0xF;
+
+ /* put a null character on the end of our string */
+ pba_num[10] = '\0';
+
+ /* switch all the data but the '-' to hex char */
+ for (offset = 0; offset < 10; offset++) {
+ if (pba_num[offset] < 0xA)
+ pba_num[offset] += '0';
+ else if (pba_num[offset] < 0x10)
+ pba_num[offset] += 'A' - 0xA;
+ }
+
+ return 0;
+ }
+
+ ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
+ if (ret_val) {
+ hw_dbg(hw, "NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (length == 0xFFFF || length == 0) {
+ hw_dbg(hw, "NVM PBA number section invalid length\n");
+ return IXGBE_ERR_PBA_SECTION;
+ }
+
+ /* check if pba_num buffer is big enough */
+ if (pba_num_size < (((u32)length * 2) - 1)) {
+ hw_dbg(hw, "PBA string buffer too small\n");
+ return IXGBE_ERR_NO_SPACE;
+ }
+
+ /* trim pba length from start of string */
+ pba_ptr++;
+ length--;
+
+ for (offset = 0; offset < length; offset++) {
+ ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
+ if (ret_val) {
+ hw_dbg(hw, "NVM Read Error\n");
+ return ret_val;
+ }
+ pba_num[offset * 2] = (u8)(data >> 8);
+ pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
+ }
+ pba_num[offset * 2] = '\0';
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_mac_addr_generic - Generic get MAC address
+ * @hw: pointer to hardware structure
+ * @mac_addr: Adapter MAC address
+ *
+ * Reads the adapter's MAC address from first Receive Address Register (RAR0)
+ * A reset of the adapter must be performed prior to calling this function
+ * in order for the MAC address to have been loaded from the EEPROM into RAR0
+ **/
+s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
+{
+ u32 rar_high;
+ u32 rar_low;
+ u16 i;
+
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
+ rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
+
+ for (i = 0; i < 4; i++)
+ mac_addr[i] = (u8)(rar_low >> (i*8));
+
+ for (i = 0; i < 2; i++)
+ mac_addr[i+4] = (u8)(rar_high >> (i*8));
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_bus_info_generic - Generic set PCI bus info
+ * @hw: pointer to hardware structure
+ *
+ * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
+ **/
+s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u16 link_status;
+
+ hw->bus.type = ixgbe_bus_type_pci_express;
+
+ /* Get the negotiated link width and speed from PCI config space */
+ link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
+
+ switch (link_status & IXGBE_PCI_LINK_WIDTH) {
+ case IXGBE_PCI_LINK_WIDTH_1:
+ hw->bus.width = ixgbe_bus_width_pcie_x1;
+ break;
+ case IXGBE_PCI_LINK_WIDTH_2:
+ hw->bus.width = ixgbe_bus_width_pcie_x2;
+ break;
+ case IXGBE_PCI_LINK_WIDTH_4:
+ hw->bus.width = ixgbe_bus_width_pcie_x4;
+ break;
+ case IXGBE_PCI_LINK_WIDTH_8:
+ hw->bus.width = ixgbe_bus_width_pcie_x8;
+ break;
+ default:
+ hw->bus.width = ixgbe_bus_width_unknown;
+ break;
+ }
+
+ switch (link_status & IXGBE_PCI_LINK_SPEED) {
+ case IXGBE_PCI_LINK_SPEED_2500:
+ hw->bus.speed = ixgbe_bus_speed_2500;
+ break;
+ case IXGBE_PCI_LINK_SPEED_5000:
+ hw->bus.speed = ixgbe_bus_speed_5000;
+ break;
+ case IXGBE_PCI_LINK_SPEED_8000:
+ hw->bus.speed = ixgbe_bus_speed_8000;
+ break;
+ default:
+ hw->bus.speed = ixgbe_bus_speed_unknown;
+ break;
+ }
+
+ mac->ops.set_lan_id(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
+ * @hw: pointer to the HW structure
+ *
+ * Determines the LAN function id by reading memory-mapped registers
+ * and swaps the port value if requested.
+ **/
+void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
+{
+ struct ixgbe_bus_info *bus = &hw->bus;
+ u32 reg;
+
+ reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
+ bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
+ bus->lan_id = bus->func;
+
+ /* check for a port swap */
+ reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
+ if (reg & IXGBE_FACTPS_LFS)
+ bus->func ^= 0x1;
+}
+
+/**
+ * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
+ * @hw: pointer to hardware structure
+ *
+ * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+ * disables transmit and receive units. The adapter_stopped flag is used by
+ * the shared code and drivers to determine if the adapter is in a stopped
+ * state and should not touch the hardware.
+ **/
+s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
+{
+ u32 reg_val;
+ u16 i;
+
+ /*
+ * Set the adapter_stopped flag so other driver functions stop touching
+ * the hardware
+ */
+ hw->adapter_stopped = true;
+
+ /* Disable the receive unit */
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
+
+ /* Clear interrupt mask to stop interrupts from being generated */
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
+
+ /* Clear any pending interrupts, flush previous writes */
+ IXGBE_READ_REG(hw, IXGBE_EICR);
+
+ /* Disable the transmit unit. Each queue must be disabled. */
+ for (i = 0; i < hw->mac.max_tx_queues; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
+
+ /* Disable the receive unit by stopping each queue */
+ for (i = 0; i < hw->mac.max_rx_queues; i++) {
+ reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+ reg_val &= ~IXGBE_RXDCTL_ENABLE;
+ reg_val |= IXGBE_RXDCTL_SWFLSH;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
+ }
+
+ /* flush all queues disables */
+ IXGBE_WRITE_FLUSH(hw);
+ msleep(2);
+
+ /*
+ * Prevent the PCI-E bus from from hanging by disabling PCI-E master
+ * access and verify no pending requests
+ */
+ return ixgbe_disable_pcie_master(hw);
+}
+
+/**
+ * ixgbe_led_on_generic - Turns on the software controllable LEDs.
+ * @hw: pointer to hardware structure
+ * @index: led number to turn on
+ **/
+s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
+{
+ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+ /* To turn on the LED, set mode to ON. */
+ led_reg &= ~IXGBE_LED_MODE_MASK(index);
+ led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_led_off_generic - Turns off the software controllable LEDs.
+ * @hw: pointer to hardware structure
+ * @index: led number to turn off
+ **/
+s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
+{
+ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+ /* To turn off the LED, set mode to OFF. */
+ led_reg &= ~IXGBE_LED_MODE_MASK(index);
+ led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ * ixgbe_hw struct in order to set up EEPROM access.
+ **/
+s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ u32 eec;
+ u16 eeprom_size;
+
+ if (eeprom->type == ixgbe_eeprom_uninitialized) {
+ eeprom->type = ixgbe_eeprom_none;
+ /* Set default semaphore delay to 10ms which is a well
+ * tested value */
+ eeprom->semaphore_delay = 10;
+ /* Clear EEPROM page size, it will be initialized as needed */
+ eeprom->word_page_size = 0;
+
+ /*
+ * Check for EEPROM present first.
+ * If not present leave as none
+ */
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ if (eec & IXGBE_EEC_PRES) {
+ eeprom->type = ixgbe_eeprom_spi;
+
+ /*
+ * SPI EEPROM is assumed here. This code would need to
+ * change if a future EEPROM is not SPI.
+ */
+ eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
+ IXGBE_EEC_SIZE_SHIFT);
+ eeprom->word_size = 1 << (eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
+ }
+
+ if (eec & IXGBE_EEC_ADDR_SIZE)
+ eeprom->address_bits = 16;
+ else
+ eeprom->address_bits = 8;
+ hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: "
+ "%d\n", eeprom->type, eeprom->word_size,
+ eeprom->address_bits);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to write
+ * @words: number of word(s)
+ * @data: 16 bit word(s) to write to EEPROM
+ *
+ * Reads 16 bit word(s) from EEPROM through bit-bang method
+ **/
+s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status = 0;
+ u16 i, count;
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (words == 0) {
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ if (offset + words > hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ /*
+ * The EEPROM page size cannot be queried from the chip. We do lazy
+ * initialization. It is worth to do that when we write large buffer.
+ */
+ if ((hw->eeprom.word_page_size == 0) &&
+ (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
+ ixgbe_detect_eeprom_page_size_generic(hw, offset);
+
+ /*
+ * We cannot hold synchronization semaphores for too long
+ * to avoid other entity starvation. However it is more efficient
+ * to read in bursts than synchronizing access for each word.
+ */
+ for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
+ count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
+ IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
+ status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
+ count, &data[i]);
+
+ if (status != 0)
+ break;
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of word(s)
+ * @data: 16 bit word(s) to be written to the EEPROM
+ *
+ * If ixgbe_eeprom_update_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ **/
+static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status;
+ u16 word;
+ u16 page_size;
+ u16 i;
+ u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
+
+ /* Prepare the EEPROM for writing */
+ status = ixgbe_acquire_eeprom(hw);
+
+ if (status == 0) {
+ if (ixgbe_ready_eeprom(hw) != 0) {
+ ixgbe_release_eeprom(hw);
+ status = IXGBE_ERR_EEPROM;
+ }
+ }
+
+ if (status == 0) {
+ for (i = 0; i < words; i++) {
+ ixgbe_standby_eeprom(hw);
+
+ /* Send the WRITE ENABLE command (8 bit opcode ) */
+ ixgbe_shift_out_eeprom_bits(hw,
+ IXGBE_EEPROM_WREN_OPCODE_SPI,
+ IXGBE_EEPROM_OPCODE_BITS);
+
+ ixgbe_standby_eeprom(hw);
+
+ /*
+ * Some SPI eeproms use the 8th address bit embedded
+ * in the opcode
+ */
+ if ((hw->eeprom.address_bits == 8) &&
+ ((offset + i) >= 128))
+ write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
+
+ /* Send the Write command (8-bit opcode + addr) */
+ ixgbe_shift_out_eeprom_bits(hw, write_opcode,
+ IXGBE_EEPROM_OPCODE_BITS);
+ ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
+ hw->eeprom.address_bits);
+
+ page_size = hw->eeprom.word_page_size;
+
+ /* Send the data in burst via SPI*/
+ do {
+ word = data[i];
+ word = (word >> 8) | (word << 8);
+ ixgbe_shift_out_eeprom_bits(hw, word, 16);
+
+ if (page_size == 0)
+ break;
+
+ /* do not wrap around page */
+ if (((offset + i) & (page_size - 1)) ==
+ (page_size - 1))
+ break;
+ } while (++i < words);
+
+ ixgbe_standby_eeprom(hw);
+ msleep(10);
+ }
+ /* Done with writing - release the EEPROM */
+ ixgbe_release_eeprom(hw);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be written to
+ * @data: 16 bit word to be written to the EEPROM
+ *
+ * If ixgbe_eeprom_update_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ **/
+s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ s32 status;
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (offset >= hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be read
+ * @data: read 16 bit words(s) from EEPROM
+ * @words: number of word(s)
+ *
+ * Reads 16 bit word(s) from EEPROM through bit-bang method
+ **/
+s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status = 0;
+ u16 i, count;
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (words == 0) {
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ if (offset + words > hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ /*
+ * We cannot hold synchronization semaphores for too long
+ * to avoid other entity starvation. However it is more efficient
+ * to read in bursts than synchronizing access for each word.
+ */
+ for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
+ count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
+ IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
+
+ status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
+ count, &data[i]);
+
+ if (status != 0)
+ break;
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be read
+ * @words: number of word(s)
+ * @data: read 16 bit word(s) from EEPROM
+ *
+ * Reads 16 bit word(s) from EEPROM through bit-bang method
+ **/
+static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status;
+ u16 word_in;
+ u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
+ u16 i;
+
+ /* Prepare the EEPROM for reading */
+ status = ixgbe_acquire_eeprom(hw);
+
+ if (status == 0) {
+ if (ixgbe_ready_eeprom(hw) != 0) {
+ ixgbe_release_eeprom(hw);
+ status = IXGBE_ERR_EEPROM;
+ }
+ }
+
+ if (status == 0) {
+ for (i = 0; i < words; i++) {
+ ixgbe_standby_eeprom(hw);
+ /*
+ * Some SPI eeproms use the 8th address bit embedded
+ * in the opcode
+ */
+ if ((hw->eeprom.address_bits == 8) &&
+ ((offset + i) >= 128))
+ read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
+
+ /* Send the READ command (opcode + addr) */
+ ixgbe_shift_out_eeprom_bits(hw, read_opcode,
+ IXGBE_EEPROM_OPCODE_BITS);
+ ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
+ hw->eeprom.address_bits);
+
+ /* Read the data. */
+ word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
+ data[i] = (word_in >> 8) | (word_in << 8);
+ }
+
+ /* End this read operation */
+ ixgbe_release_eeprom(hw);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be read
+ * @data: read 16 bit value from EEPROM
+ *
+ * Reads 16 bit value from EEPROM through bit-bang method
+ **/
+s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 *data)
+{
+ s32 status;
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (offset >= hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of word(s)
+ * @data: 16 bit word(s) from the EEPROM
+ *
+ * Reads a 16 bit word(s) from the EEPROM using the EERD register.
+ **/
+s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ u32 eerd;
+ s32 status = 0;
+ u32 i;
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (words == 0) {
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ if (offset >= hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ for (i = 0; i < words; i++) {
+ eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) +
+ IXGBE_EEPROM_RW_REG_START;
+
+ IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
+
+ if (status == 0) {
+ data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
+ IXGBE_EEPROM_RW_REG_DATA);
+ } else {
+ hw_dbg(hw, "Eeprom read timed out\n");
+ goto out;
+ }
+ }
+out:
+ return status;
+}
+
+/**
+ * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be used as a scratch pad
+ *
+ * Discover EEPROM page size by writing marching data at given offset.
+ * This function is called only when we are writing a new large buffer
+ * at given offset so the data would be overwritten anyway.
+ **/
+static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
+ u16 offset)
+{
+ u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
+ s32 status = 0;
+ u16 i;
+
+ for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
+ data[i] = i;
+
+ hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
+ status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
+ IXGBE_EEPROM_PAGE_SIZE_MAX, data);
+ hw->eeprom.word_page_size = 0;
+ if (status != 0)
+ goto out;
+
+ status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
+ if (status != 0)
+ goto out;
+
+ /*
+ * When writing in burst more than the actual page size
+ * EEPROM address wraps around current page.
+ */
+ hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
+
+ hw_dbg(hw, "Detected EEPROM page size = %d words.",
+ hw->eeprom.word_page_size);
+out:
+ return status;
+}
+
+/**
+ * ixgbe_read_eerd_generic - Read EEPROM word using EERD
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
+}
+
+/**
+ * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @words: number of word(s)
+ * @data: word(s) write to the EEPROM
+ *
+ * Write a 16 bit word(s) to the EEPROM using the EEWR register.
+ **/
+s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ u32 eewr;
+ s32 status = 0;
+ u16 i;
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (words == 0) {
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ if (offset >= hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ for (i = 0; i < words; i++) {
+ eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
+ (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
+ IXGBE_EEPROM_RW_REG_START;
+
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
+ if (status != 0) {
+ hw_dbg(hw, "Eeprom write EEWR timed out\n");
+ goto out;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
+
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
+ if (status != 0) {
+ hw_dbg(hw, "Eeprom write EEWR timed out\n");
+ goto out;
+ }
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the EEWR register.
+ **/
+s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
+}
+
+/**
+ * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
+ * @hw: pointer to hardware structure
+ * @ee_reg: EEPROM flag for polling
+ *
+ * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
+ * read or write is done respectively.
+ **/
+s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
+{
+ u32 i;
+ u32 reg;
+ s32 status = IXGBE_ERR_EEPROM;
+
+ for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
+ if (ee_reg == IXGBE_NVM_POLL_READ)
+ reg = IXGBE_READ_REG(hw, IXGBE_EERD);
+ else
+ reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
+
+ if (reg & IXGBE_EEPROM_RW_REG_DONE) {
+ status = 0;
+ break;
+ }
+ udelay(5);
+ }
+ return status;
+}
+
+/**
+ * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
+ * @hw: pointer to hardware structure
+ *
+ * Prepares EEPROM for access using bit-bang method. This function should
+ * be called before issuing a command to the EEPROM.
+ **/
+static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
+{
+ s32 status = 0;
+ u32 eec;
+ u32 i;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
+ != 0)
+ status = IXGBE_ERR_SWFW_SYNC;
+
+ if (status == 0) {
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ /* Request EEPROM Access */
+ eec |= IXGBE_EEC_REQ;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+
+ for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ if (eec & IXGBE_EEC_GNT)
+ break;
+ udelay(5);
+ }
+
+ /* Release if grant not acquired */
+ if (!(eec & IXGBE_EEC_GNT)) {
+ eec &= ~IXGBE_EEC_REQ;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ hw_dbg(hw, "Could not acquire EEPROM grant\n");
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ status = IXGBE_ERR_EEPROM;
+ }
+
+ /* Setup EEPROM for Read/Write */
+ if (status == 0) {
+ /* Clear CS and SK */
+ eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_FLUSH(hw);
+ udelay(1);
+ }
+ }
+ return status;
+}
+
+/**
+ * ixgbe_get_eeprom_semaphore - Get hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
+ **/
+static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_EEPROM;
+ u32 timeout = 2000;
+ u32 i;
+ u32 swsm;
+
+ /* Get SMBI software semaphore between device drivers first */
+ for (i = 0; i < timeout; i++) {
+ /*
+ * If the SMBI bit is 0 when we read it, then the bit will be
+ * set and we have the semaphore
+ */
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ if (!(swsm & IXGBE_SWSM_SMBI)) {
+ status = 0;
+ break;
+ }
+ udelay(50);
+ }
+
+ if (i == timeout) {
+ hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore "
+ "not granted.\n");
+ /*
+ * this release is particularly important because our attempts
+ * above to get the semaphore may have succeeded, and if there
+ * was a timeout, we should unconditionally clear the semaphore
+ * bits to free the driver to make progress
+ */
+ ixgbe_release_eeprom_semaphore(hw);
+
+ udelay(50);
+ /*
+ * one last try
+ * If the SMBI bit is 0 when we read it, then the bit will be
+ * set and we have the semaphore
+ */
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ if (!(swsm & IXGBE_SWSM_SMBI))
+ status = 0;
+ }
+
+ /* Now get the semaphore between SW/FW through the SWESMBI bit */
+ if (status == 0) {
+ for (i = 0; i < timeout; i++) {
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+
+ /* Set the SW EEPROM semaphore bit to request access */
+ swsm |= IXGBE_SWSM_SWESMBI;
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+
+ /*
+ * If we set the bit successfully then we got the
+ * semaphore.
+ */
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ if (swsm & IXGBE_SWSM_SWESMBI)
+ break;
+
+ udelay(50);
+ }
+
+ /*
+ * Release semaphores and return error if SW EEPROM semaphore
+ * was not granted because we don't have access to the EEPROM
+ */
+ if (i >= timeout) {
+ hw_dbg(hw, "SWESMBI Software EEPROM semaphore "
+ "not granted.\n");
+ ixgbe_release_eeprom_semaphore(hw);
+ status = IXGBE_ERR_EEPROM;
+ }
+ } else {
+ hw_dbg(hw, "Software semaphore SMBI between device drivers "
+ "not granted.\n");
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_release_eeprom_semaphore - Release hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * This function clears hardware semaphore bits.
+ **/
+static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
+{
+ u32 swsm;
+
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+
+ /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
+ swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_ready_eeprom - Polls for EEPROM ready
+ * @hw: pointer to hardware structure
+ **/
+static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
+{
+ s32 status = 0;
+ u16 i;
+ u8 spi_stat_reg;
+
+ /*
+ * Read "Status Register" repeatedly until the LSB is cleared. The
+ * EEPROM will signal that the command has been completed by clearing
+ * bit 0 of the internal status register. If it's not cleared within
+ * 5 milliseconds, then error out.
+ */
+ for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
+ ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
+ IXGBE_EEPROM_OPCODE_BITS);
+ spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
+ if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
+ break;
+
+ udelay(5);
+ ixgbe_standby_eeprom(hw);
+ };
+
+ /*
+ * On some parts, SPI write time could vary from 0-20mSec on 3.3V
+ * devices (and only 0-5mSec on 5V devices)
+ */
+ if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
+ hw_dbg(hw, "SPI EEPROM Status error\n");
+ status = IXGBE_ERR_EEPROM;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
+ * @hw: pointer to hardware structure
+ **/
+static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
+{
+ u32 eec;
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ /* Toggle CS to flush commands */
+ eec |= IXGBE_EEC_CS;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_FLUSH(hw);
+ udelay(1);
+ eec &= ~IXGBE_EEC_CS;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_FLUSH(hw);
+ udelay(1);
+}
+
+/**
+ * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
+ * @hw: pointer to hardware structure
+ * @data: data to send to the EEPROM
+ * @count: number of bits to shift out
+ **/
+static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
+ u16 count)
+{
+ u32 eec;
+ u32 mask;
+ u32 i;
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ /*
+ * Mask is used to shift "count" bits of "data" out to the EEPROM
+ * one bit at a time. Determine the starting bit based on count
+ */
+ mask = 0x01 << (count - 1);
+
+ for (i = 0; i < count; i++) {
+ /*
+ * A "1" is shifted out to the EEPROM by setting bit "DI" to a
+ * "1", and then raising and then lowering the clock (the SK
+ * bit controls the clock input to the EEPROM). A "0" is
+ * shifted out to the EEPROM by setting "DI" to "0" and then
+ * raising and then lowering the clock.
+ */
+ if (data & mask)
+ eec |= IXGBE_EEC_DI;
+ else
+ eec &= ~IXGBE_EEC_DI;
+
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_FLUSH(hw);
+
+ udelay(1);
+
+ ixgbe_raise_eeprom_clk(hw, &eec);
+ ixgbe_lower_eeprom_clk(hw, &eec);
+
+ /*
+ * Shift mask to signify next bit of data to shift in to the
+ * EEPROM
+ */
+ mask = mask >> 1;
+ };
+
+ /* We leave the "DI" bit set to "0" when we leave this routine. */
+ eec &= ~IXGBE_EEC_DI;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
+ * @hw: pointer to hardware structure
+ **/
+static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
+{
+ u32 eec;
+ u32 i;
+ u16 data = 0;
+
+ /*
+ * In order to read a register from the EEPROM, we need to shift
+ * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
+ * the clock input to the EEPROM (setting the SK bit), and then reading
+ * the value of the "DO" bit. During this "shifting in" process the
+ * "DI" bit should always be clear.
+ */
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
+
+ for (i = 0; i < count; i++) {
+ data = data << 1;
+ ixgbe_raise_eeprom_clk(hw, &eec);
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ eec &= ~(IXGBE_EEC_DI);
+ if (eec & IXGBE_EEC_DO)
+ data |= 1;
+
+ ixgbe_lower_eeprom_clk(hw, &eec);
+ }
+
+ return data;
+}
+
+/**
+ * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
+ * @hw: pointer to hardware structure
+ * @eec: EEC register's current value
+ **/
+static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
+{
+ /*
+ * Raise the clock input to the EEPROM
+ * (setting the SK bit), then delay
+ */
+ *eec = *eec | IXGBE_EEC_SK;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
+ IXGBE_WRITE_FLUSH(hw);
+ udelay(1);
+}
+
+/**
+ * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
+ * @hw: pointer to hardware structure
+ * @eecd: EECD's current value
+ **/
+static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
+{
+ /*
+ * Lower the clock input to the EEPROM (clearing the SK bit), then
+ * delay
+ */
+ *eec = *eec & ~IXGBE_EEC_SK;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
+ IXGBE_WRITE_FLUSH(hw);
+ udelay(1);
+}
+
+/**
+ * ixgbe_release_eeprom - Release EEPROM, release semaphores
+ * @hw: pointer to hardware structure
+ **/
+static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
+{
+ u32 eec;
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ eec |= IXGBE_EEC_CS; /* Pull CS high */
+ eec &= ~IXGBE_EEC_SK; /* Lower SCK */
+
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_FLUSH(hw);
+
+ udelay(1);
+
+ /* Stop requesting EEPROM access */
+ eec &= ~IXGBE_EEC_REQ;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+
+ /* Delay before attempt to obtain semaphore again to allow FW access */
+ msleep(hw->eeprom.semaphore_delay);
+}
+
+/**
+ * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ **/
+u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
+{
+ u16 i;
+ u16 j;
+ u16 checksum = 0;
+ u16 length = 0;
+ u16 pointer = 0;
+ u16 word = 0;
+
+ /* Include 0x0-0x3F in the checksum */
+ for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
+ if (hw->eeprom.ops.read(hw, i, &word) != 0) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ break;
+ }
+ checksum += word;
+ }
+
+ /* Include all data from pointers except for the fw pointer */
+ for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
+ hw->eeprom.ops.read(hw, i, &pointer);
+
+ /* Make sure the pointer seems valid */
+ if (pointer != 0xFFFF && pointer != 0) {
+ hw->eeprom.ops.read(hw, pointer, &length);
+
+ if (length != 0xFFFF && length != 0) {
+ for (j = pointer+1; j <= pointer+length; j++) {
+ hw->eeprom.ops.read(hw, j, &word);
+ checksum += word;
+ }
+ }
+ }
+ }
+
+ checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+
+ return checksum;
+}
+
+/**
+ * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ **/
+s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
+ u16 *checksum_val)
+{
+ s32 status;
+ u16 checksum;
+ u16 read_checksum = 0;
+
+ /*
+ * Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = hw->eeprom.ops.read(hw, 0, &checksum);
+
+ if (status == 0) {
+ checksum = hw->eeprom.ops.calc_checksum(hw);
+
+ hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
+
+ /*
+ * Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (read_checksum != checksum)
+ status = IXGBE_ERR_EEPROM_CHECKSUM;
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum_val)
+ *checksum_val = checksum;
+ } else {
+ hw_dbg(hw, "EEPROM read failed\n");
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u16 checksum;
+
+ /*
+ * Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = hw->eeprom.ops.read(hw, 0, &checksum);
+
+ if (status == 0) {
+ checksum = hw->eeprom.ops.calc_checksum(hw);
+ status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
+ checksum);
+ } else {
+ hw_dbg(hw, "EEPROM read failed\n");
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_validate_mac_addr - Validate MAC address
+ * @mac_addr: pointer to MAC address.
+ *
+ * Tests a MAC address to ensure it is a valid Individual Address
+ **/
+s32 ixgbe_validate_mac_addr(u8 *mac_addr)
+{
+ s32 status = 0;
+
+ /* Make sure it is not a multicast address */
+ if (IXGBE_IS_MULTICAST(mac_addr)) {
+ hw_dbg(hw, "MAC address is multicast\n");
+ status = IXGBE_ERR_INVALID_MAC_ADDR;
+ /* Not a broadcast address */
+ } else if (IXGBE_IS_BROADCAST(mac_addr)) {
+ hw_dbg(hw, "MAC address is broadcast\n");
+ status = IXGBE_ERR_INVALID_MAC_ADDR;
+ /* Reject the zero address */
+ } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
+ mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
+ hw_dbg(hw, "MAC address is all zeros\n");
+ status = IXGBE_ERR_INVALID_MAC_ADDR;
+ }
+ return status;
+}
+
+/**
+ * ixgbe_set_rar_generic - Set Rx address register
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @vmdq: VMDq "set" or "pool" index
+ * @enable_addr: set flag that address is active
+ *
+ * Puts an ethernet address into a receive address register.
+ **/
+s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr)
+{
+ u32 rar_low, rar_high;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ /* Make sure we are using a valid rar index range */
+ if (index >= rar_entries) {
+ hw_dbg(hw, "RAR index %d is out of range.\n", index);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ /* setup VMDq pool selection before this RAR gets enabled */
+ hw->mac.ops.set_vmdq(hw, index, vmdq);
+
+ /*
+ * HW expects these in little endian so we reverse the byte
+ * order from network order (big endian) to little endian
+ */
+ rar_low = ((u32)addr[0] |
+ ((u32)addr[1] << 8) |
+ ((u32)addr[2] << 16) |
+ ((u32)addr[3] << 24));
+ /*
+ * Some parts put the VMDq setting in the extra RAH bits,
+ * so save everything except the lower 16 bits that hold part
+ * of the address and the address valid bit.
+ */
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
+ rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
+ rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
+
+ if (enable_addr != 0)
+ rar_high |= IXGBE_RAH_AV;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+
+ return 0;
+}
+
+/**
+ * ixgbe_clear_rar_generic - Remove Rx address register
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ *
+ * Clears an ethernet address from a receive address register.
+ **/
+s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
+{
+ u32 rar_high;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ /* Make sure we are using a valid rar index range */
+ if (index >= rar_entries) {
+ hw_dbg(hw, "RAR index %d is out of range.\n", index);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ /*
+ * Some parts put the VMDq setting in the extra RAH bits,
+ * so save everything except the lower 16 bits that hold part
+ * of the address and the address valid bit.
+ */
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
+ rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
+
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+
+ /* clear VMDq pool/queue selection for this RAR */
+ hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
+
+ return 0;
+}
+
+/**
+ * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
+ * @hw: pointer to hardware structure
+ *
+ * Places the MAC address in receive address register 0 and clears the rest
+ * of the receive address registers. Clears the multicast table. Assumes
+ * the receiver is in reset when the routine is called.
+ **/
+s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
+{
+ u32 i;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ /*
+ * If the current mac address is valid, assume it is a software override
+ * to the permanent address.
+ * Otherwise, use the permanent address from the eeprom.
+ */
+ if (ixgbe_validate_mac_addr(hw->mac.addr) ==
+ IXGBE_ERR_INVALID_MAC_ADDR) {
+ /* Get the MAC address from the RAR0 for later reference */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
+
+ hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
+ hw->mac.addr[0], hw->mac.addr[1],
+ hw->mac.addr[2]);
+ hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
+ hw->mac.addr[4], hw->mac.addr[5]);
+ } else {
+ /* Setup the receive address. */
+ hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
+ hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ",
+ hw->mac.addr[0], hw->mac.addr[1],
+ hw->mac.addr[2]);
+ hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
+ hw->mac.addr[4], hw->mac.addr[5]);
+
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+
+ /* clear VMDq pool/queue selection for RAR 0 */
+ hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
+ }
+ hw->addr_ctrl.overflow_promisc = 0;
+
+ hw->addr_ctrl.rar_used_count = 1;
+
+ /* Zero out the other receive addresses. */
+ hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1);
+ for (i = 1; i < rar_entries; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
+ }
+
+ /* Clear the MTA */
+ hw->addr_ctrl.mta_in_use = 0;
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
+
+ hw_dbg(hw, " Clearing MTA\n");
+ for (i = 0; i < hw->mac.mcft_size; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
+
+ ixgbe_init_uta_tables(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_add_uc_addr - Adds a secondary unicast address.
+ * @hw: pointer to hardware structure
+ * @addr: new address
+ *
+ * Adds it to unused receive address register or goes into promiscuous mode.
+ **/
+void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
+{
+ u32 rar_entries = hw->mac.num_rar_entries;
+ u32 rar;
+
+ hw_dbg(hw, " UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+
+ /*
+ * Place this address in the RAR if there is room,
+ * else put the controller into promiscuous mode
+ */
+ if (hw->addr_ctrl.rar_used_count < rar_entries) {
+ rar = hw->addr_ctrl.rar_used_count;
+ hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
+ hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar);
+ hw->addr_ctrl.rar_used_count++;
+ } else {
+ hw->addr_ctrl.overflow_promisc++;
+ }
+
+ hw_dbg(hw, "ixgbe_add_uc_addr Complete\n");
+}
+
+/**
+ * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
+ * @hw: pointer to hardware structure
+ * @addr_list: the list of new addresses
+ * @addr_count: number of addresses
+ * @next: iterator function to walk the address list
+ *
+ * The given list replaces any existing list. Clears the secondary addrs from
+ * receive address registers. Uses unused receive address registers for the
+ * first secondary addresses, and falls back to promiscuous mode as needed.
+ *
+ * Drivers using secondary unicast addresses must set user_set_promisc when
+ * manually putting the device into promiscuous mode.
+ **/
+s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
+ u32 addr_count, ixgbe_mc_addr_itr next)
+{
+ u8 *addr;
+ u32 i;
+ u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
+ u32 uc_addr_in_use;
+ u32 fctrl;
+ u32 vmdq;
+
+ /*
+ * Clear accounting of old secondary address list,
+ * don't count RAR[0]
+ */
+ uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
+ hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
+ hw->addr_ctrl.overflow_promisc = 0;
+
+ /* Zero out the other receive addresses */
+ hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use+1);
+ for (i = 0; i < uc_addr_in_use; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
+ }
+
+ /* Add the new addresses */
+ for (i = 0; i < addr_count; i++) {
+ hw_dbg(hw, " Adding the secondary addresses:\n");
+ addr = next(hw, &addr_list, &vmdq);
+ ixgbe_add_uc_addr(hw, addr, vmdq);
+ }
+
+ if (hw->addr_ctrl.overflow_promisc) {
+ /* enable promisc if not already in overflow or set by user */
+ if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
+ hw_dbg(hw, " Entering address overflow promisc mode\n");
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl |= IXGBE_FCTRL_UPE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+ }
+ } else {
+ /* only disable if set by overflow, not by user */
+ if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
+ hw_dbg(hw, " Leaving address overflow promisc mode\n");
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl &= ~IXGBE_FCTRL_UPE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+ }
+ }
+
+ hw_dbg(hw, "ixgbe_update_uc_addr_list_generic Complete\n");
+ return 0;
+}
+
+/**
+ * ixgbe_mta_vector - Determines bit-vector in multicast table to set
+ * @hw: pointer to hardware structure
+ * @mc_addr: the multicast address
+ *
+ * Extracts the 12 bits, from a multicast address, to determine which
+ * bit-vector to set in the multicast table. The hardware uses 12 bits, from
+ * incoming rx multicast addresses, to determine the bit-vector to check in
+ * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
+ * by the MO field of the MCSTCTRL. The MO field is set during initialization
+ * to mc_filter_type.
+ **/
+static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+ u32 vector = 0;
+
+ switch (hw->mac.mc_filter_type) {
+ case 0: /* use bits [47:36] of the address */
+ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+ break;
+ case 1: /* use bits [46:35] of the address */
+ vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+ break;
+ case 2: /* use bits [45:34] of the address */
+ vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+ break;
+ case 3: /* use bits [43:32] of the address */
+ vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+ break;
+ default: /* Invalid mc_filter_type */
+ hw_dbg(hw, "MC filter type param set incorrectly\n");
+ break;
+ }
+
+ /* vector can only be 12-bits or boundary will be exceeded */
+ vector &= 0xFFF;
+ return vector;
+}
+
+/**
+ * ixgbe_set_mta - Set bit-vector in multicast table
+ * @hw: pointer to hardware structure
+ * @hash_value: Multicast address hash value
+ *
+ * Sets the bit-vector in the multicast table.
+ **/
+void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+ u32 vector;
+ u32 vector_bit;
+ u32 vector_reg;
+
+ hw->addr_ctrl.mta_in_use++;
+
+ vector = ixgbe_mta_vector(hw, mc_addr);
+ hw_dbg(hw, " bit-vector = 0x%03X\n", vector);
+
+ /*
+ * The MTA is a register array of 128 32-bit registers. It is treated
+ * like an array of 4096 bits. We want to set bit
+ * BitArray[vector_value]. So we figure out what register the bit is
+ * in, read it, OR in the new bit, then write back the new value. The
+ * register is determined by the upper 7 bits of the vector value and
+ * the bit within that register are determined by the lower 5 bits of
+ * the value.
+ */
+ vector_reg = (vector >> 5) & 0x7F;
+ vector_bit = vector & 0x1F;
+ hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
+}
+
+/**
+ * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
+ * @hw: pointer to hardware structure
+ * @mc_addr_list: the list of new multicast addresses
+ * @mc_addr_count: number of addresses
+ * @next: iterator function to walk the multicast address list
+ * @clear: flag, when set clears the table beforehand
+ *
+ * When the clear flag is set, the given list replaces any existing list.
+ * Hashes the given addresses into the multicast table.
+ **/
+s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, ixgbe_mc_addr_itr next,
+ bool clear)
+{
+ u32 i;
+ u32 vmdq;
+
+ /*
+ * Set the new number of MC addresses that we are being requested to
+ * use.
+ */
+ hw->addr_ctrl.num_mc_addrs = mc_addr_count;
+ hw->addr_ctrl.mta_in_use = 0;
+
+ /* Clear mta_shadow */
+ if (clear) {
+ hw_dbg(hw, " Clearing MTA\n");
+ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+ }
+
+ /* Update mta_shadow */
+ for (i = 0; i < mc_addr_count; i++) {
+ hw_dbg(hw, " Adding the multicast addresses:\n");
+ ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
+ }
+
+ /* Enable mta */
+ for (i = 0; i < hw->mac.mcft_size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
+ hw->mac.mta_shadow[i]);
+
+ if (hw->addr_ctrl.mta_in_use > 0)
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
+ IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
+
+ hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n");
+ return 0;
+}
+
+/**
+ * ixgbe_enable_mc_generic - Enable multicast address in RAR
+ * @hw: pointer to hardware structure
+ *
+ * Enables multicast address in RAR and the use of the multicast hash table.
+ **/
+s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
+
+ if (a->mta_in_use > 0)
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
+ hw->mac.mc_filter_type);
+
+ return 0;
+}
+
+/**
+ * ixgbe_disable_mc_generic - Disable multicast address in RAR
+ * @hw: pointer to hardware structure
+ *
+ * Disables multicast address in RAR and the use of the multicast hash table.
+ **/
+s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
+
+ if (a->mta_in_use > 0)
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
+
+ return 0;
+}
+
+/**
+ * ixgbe_fc_enable_generic - Enable flow control
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to the current settings.
+ **/
+s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
+{
+ s32 ret_val = 0;
+ u32 mflcn_reg, fccfg_reg;
+ u32 reg;
+ u32 fcrtl, fcrth;
+ int i;
+
+ /* Validate the water mark configuration */
+ if (!hw->fc.pause_time) {
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ /* Low water mark of zero causes XOFF floods */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ if (!hw->fc.low_water[i] ||
+ hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+ hw_dbg(hw, "Invalid water mark configuration\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+ }
+ }
+
+ /* Negotiate the fc mode to use */
+ ixgbe_fc_autoneg(hw);
+
+ /* Disable any previous flow control settings */
+ mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+ mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
+
+ fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
+ fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
+
+ /*
+ * The possible values of fc.current_mode are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but
+ * we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: Invalid.
+ */
+ switch (hw->fc.current_mode) {
+ case ixgbe_fc_none:
+ /*
+ * Flow control is disabled by software override or autoneg.
+ * The code below will actually disable it in the HW.
+ */
+ break;
+ case ixgbe_fc_rx_pause:
+ /*
+ * Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ mflcn_reg |= IXGBE_MFLCN_RFCE;
+ break;
+ case ixgbe_fc_tx_pause:
+ /*
+ * Tx Flow control is enabled, and Rx Flow control is
+ * disabled by software override.
+ */
+ fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
+ break;
+ case ixgbe_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by SW override. */
+ mflcn_reg |= IXGBE_MFLCN_RFCE;
+ fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
+ break;
+ default:
+ hw_dbg(hw, "Flow control param set incorrectly\n");
+ ret_val = IXGBE_ERR_CONFIG;
+ goto out;
+ break;
+ }
+
+ /* Set 802.3x based flow control settings. */
+ mflcn_reg |= IXGBE_MFLCN_DPF;
+ IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
+ IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
+
+
+ /* Set up and enable Rx high/low water mark thresholds, enable XON. */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
+ fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
+ /*
+ * In order to prevent Tx hangs when the internal Tx
+ * switch is enabled we must set the high water mark
+ * to the maximum FCRTH value. This allows the Tx
+ * switch to function even under heavy Rx workloads.
+ */
+ fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
+ }
+
+ /* Configure pause time (2 TCs per register) */
+ reg = hw->fc.pause_time * 0x00010001;
+ for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
+ IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
+
+ /* Configure flow control refresh threshold value */
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_negotiate_fc - Negotiate flow control
+ * @hw: pointer to hardware structure
+ * @adv_reg: flow control advertised settings
+ * @lp_reg: link partner's flow control settings
+ * @adv_sym: symmetric pause bit in advertisement
+ * @adv_asm: asymmetric pause bit in advertisement
+ * @lp_sym: symmetric pause bit in link partner advertisement
+ * @lp_asm: asymmetric pause bit in link partner advertisement
+ *
+ * Find the intersection between advertised settings and link partner's
+ * advertised settings
+ **/
+static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
+{
+ if ((!(adv_reg)) || (!(lp_reg)))
+ return IXGBE_ERR_FC_NOT_NEGOTIATED;
+
+ if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
+ /*
+ * Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise RX
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == ixgbe_fc_full) {
+ hw->fc.current_mode = ixgbe_fc_full;
+ hw_dbg(hw, "Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = ixgbe_fc_rx_pause;
+ hw_dbg(hw, "Flow Control=RX PAUSE frames only\n");
+ }
+ } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+ (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+ hw->fc.current_mode = ixgbe_fc_tx_pause;
+ hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
+ } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+ !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+ hw->fc.current_mode = ixgbe_fc_rx_pause;
+ hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
+ } else {
+ hw->fc.current_mode = ixgbe_fc_none;
+ hw_dbg(hw, "Flow Control = NONE.\n");
+ }
+ return 0;
+}
+
+/**
+ * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according on 1 gig fiber.
+ **/
+static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
+{
+ u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
+ s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+
+ /*
+ * On multispeed fiber at 1g, bail out if
+ * - link is up but AN did not complete, or if
+ * - link is up and AN completed but timed out
+ */
+
+ linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
+ if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
+ (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
+ goto out;
+
+ pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+ pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
+
+ ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
+ pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
+ IXGBE_PCS1GANA_ASM_PAUSE,
+ IXGBE_PCS1GANA_SYM_PAUSE,
+ IXGBE_PCS1GANA_ASM_PAUSE);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to IEEE clause 37.
+ **/
+static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
+{
+ u32 links2, anlp1_reg, autoc_reg, links;
+ s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+
+ /*
+ * On backplane, bail out if
+ * - backplane autoneg was not completed, or if
+ * - we are 82599 and link partner is not AN enabled
+ */
+ links = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
+ goto out;
+
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
+ if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
+ goto out;
+ }
+ /*
+ * Read the 10g AN autoc and LP ability registers and resolve
+ * local flow control settings accordingly
+ */
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+
+ ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
+ anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
+ IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to IEEE clause 37.
+ **/
+static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
+{
+ u16 technology_ability_reg = 0;
+ u16 lp_technology_ability_reg = 0;
+
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &technology_ability_reg);
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &lp_technology_ability_reg);
+
+ return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
+ (u32)lp_technology_ability_reg,
+ IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
+ IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
+}
+
+/**
+ * ixgbe_fc_autoneg - Configure flow control
+ * @hw: pointer to hardware structure
+ *
+ * Compares our advertised flow control capabilities to those advertised by
+ * our link partner, and determines the proper flow control mode to use.
+ **/
+void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ ixgbe_link_speed speed;
+ bool link_up;
+
+ /*
+ * AN should have completed when the cable was plugged in.
+ * Look for reasons to bail out. Bail out if:
+ * - FC autoneg is disabled, or if
+ * - link is not up.
+ */
+ if (hw->fc.disable_fc_autoneg)
+ goto out;
+
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+ if (!link_up)
+ goto out;
+
+ switch (hw->phy.media_type) {
+ /* Autoneg flow control on fiber adapters */
+ case ixgbe_media_type_fiber:
+ if (speed == IXGBE_LINK_SPEED_1GB_FULL)
+ ret_val = ixgbe_fc_autoneg_fiber(hw);
+ break;
+
+ /* Autoneg flow control on backplane adapters */
+ case ixgbe_media_type_backplane:
+ ret_val = ixgbe_fc_autoneg_backplane(hw);
+ break;
+
+ /* Autoneg flow control on copper adapters */
+ case ixgbe_media_type_copper:
+ if (ixgbe_device_supports_autoneg_fc(hw) == 0)
+ ret_val = ixgbe_fc_autoneg_copper(hw);
+ break;
+
+ default:
+ break;
+ }
+
+out:
+ if (ret_val == 0) {
+ hw->fc.fc_was_autonegged = true;
+ } else {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ }
+}
+
+/**
+ * ixgbe_disable_pcie_master - Disable PCI-express master access
+ * @hw: pointer to hardware structure
+ *
+ * Disables PCI-Express master access and verifies there are no pending
+ * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
+ * bit hasn't caused the master requests to be disabled, else 0
+ * is returned signifying master requests disabled.
+ **/
+s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
+{
+ s32 status = 0;
+ u32 i;
+
+ /* Always set this bit to ensure any future transactions are blocked */
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
+
+ /* Exit if master requets are blocked */
+ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
+ goto out;
+
+ /* Poll for master request bit to clear */
+ for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+ udelay(100);
+ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
+ goto out;
+ }
+
+ /*
+ * Two consecutive resets are required via CTRL.RST per datasheet
+ * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
+ * of this need. The first reset prevents new master requests from
+ * being issued by our device. We then must wait 1usec or more for any
+ * remaining completions from the PCIe bus to trickle in, and then reset
+ * again to clear out any effects they may have had on our device.
+ */
+ hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n");
+ hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+
+ /*
+ * Before proceeding, make sure that the PCIe block does not have
+ * transactions pending.
+ */
+ for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+ udelay(100);
+ if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
+ IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
+ goto out;
+ }
+
+ hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
+ status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore through the GSSR register for the specified
+ * function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
+{
+ u32 gssr;
+ u32 swmask = mask;
+ u32 fwmask = mask << 5;
+ s32 timeout = 200;
+
+ while (timeout) {
+ /*
+ * SW EEPROM semaphore bit is used for access to all
+ * SW_FW_SYNC/GSSR bits (not just EEPROM)
+ */
+ if (ixgbe_get_eeprom_semaphore(hw))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
+ if (!(gssr & (fwmask | swmask)))
+ break;
+
+ /*
+ * Firmware currently using resource (fwmask) or other software
+ * thread currently using resource (swmask)
+ */
+ ixgbe_release_eeprom_semaphore(hw);
+ msleep(5);
+ timeout--;
+ }
+
+ if (!timeout) {
+ hw_dbg(hw, "Driver can't access resource, SW_FW_SYNC timeout.\n");
+ return IXGBE_ERR_SWFW_SYNC;
+ }
+
+ gssr |= swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
+
+ ixgbe_release_eeprom_semaphore(hw);
+ return 0;
+}
+
+/**
+ * ixgbe_release_swfw_sync - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore through the GSSR register for the specified
+ * function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
+{
+ u32 gssr;
+ u32 swmask = mask;
+
+ ixgbe_get_eeprom_semaphore(hw);
+
+ gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
+ gssr &= ~swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
+
+ ixgbe_release_eeprom_semaphore(hw);
+}
+
+/**
+ * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
+ * @hw: pointer to hardware structure
+ *
+ * Stops the receive data path and waits for the HW to internally empty
+ * the Rx security block
+ **/
+s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
+{
+#define IXGBE_MAX_SECRX_POLL 40
+
+ int i;
+ int secrxreg;
+
+ secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
+ for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
+ secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
+ if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
+ break;
+ else
+ /* Use interrupt-safe sleep just in case */
+ udelay(1000);
+ }
+
+ /* For informational purposes only */
+ if (i >= IXGBE_MAX_SECRX_POLL)
+ hw_dbg(hw, "Rx unit being enabled before security "
+ "path fully disabled. Continuing with init.\n");
+
+ return 0;
+}
+
+/**
+ * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
+ * @hw: pointer to hardware structure
+ *
+ * Enables the receive data path.
+ **/
+s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
+{
+ int secrxreg;
+
+ secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
+ * @hw: pointer to hardware structure
+ * @regval: register value to write to RXCTRL
+ *
+ * Enables the Rx DMA unit
+ **/
+s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
+{
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
+
+ return 0;
+}
+
+/**
+ * ixgbe_blink_led_start_generic - Blink LED based on index.
+ * @hw: pointer to hardware structure
+ * @index: led number to blink
+ **/
+s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
+{
+ ixgbe_link_speed speed = 0;
+ bool link_up = 0;
+ u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+ /*
+ * Link must be up to auto-blink the LEDs;
+ * Force it if link is down.
+ */
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+
+ if (!link_up) {
+ autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+ autoc_reg |= IXGBE_AUTOC_FLU;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ msleep(10);
+ }
+
+ led_reg &= ~IXGBE_LED_MODE_MASK(index);
+ led_reg |= IXGBE_LED_BLINK(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
+ * @hw: pointer to hardware structure
+ * @index: led number to stop blinking
+ **/
+s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
+{
+ u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+ autoc_reg &= ~IXGBE_AUTOC_FLU;
+ autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+
+ led_reg &= ~IXGBE_LED_MODE_MASK(index);
+ led_reg &= ~IXGBE_LED_BLINK(index);
+ led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
+ * @hw: pointer to hardware structure
+ * @san_mac_offset: SAN MAC address offset
+ *
+ * This function will read the EEPROM location for the SAN MAC address
+ * pointer, and returns the value at that location. This is used in both
+ * get and set mac_addr routines.
+ **/
+static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
+ u16 *san_mac_offset)
+{
+ /*
+ * First read the EEPROM pointer to see if the MAC addresses are
+ * available.
+ */
+ hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
+ * @hw: pointer to hardware structure
+ * @san_mac_addr: SAN MAC address
+ *
+ * Reads the SAN MAC address from the EEPROM, if it's available. This is
+ * per-port, so set_lan_id() must be called before reading the addresses.
+ * set_lan_id() is called by identify_sfp(), but this cannot be relied
+ * upon for non-SFP connections, so we must call it here.
+ **/
+s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
+{
+ u16 san_mac_data, san_mac_offset;
+ u8 i;
+
+ /*
+ * First read the EEPROM pointer to see if the MAC addresses are
+ * available. If they're not, no point in calling set_lan_id() here.
+ */
+ ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
+
+ if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
+ /*
+ * No addresses available in this EEPROM. It's not an
+ * error though, so just wipe the local address and return.
+ */
+ for (i = 0; i < 6; i++)
+ san_mac_addr[i] = 0xFF;
+
+ goto san_mac_addr_out;
+ }
+
+ /* make sure we know which port we need to program */
+ hw->mac.ops.set_lan_id(hw);
+ /* apply the port offset to the address offset */
+ (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
+ (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
+ for (i = 0; i < 3; i++) {
+ hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
+ san_mac_addr[i * 2] = (u8)(san_mac_data);
+ san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
+ san_mac_offset++;
+ }
+
+san_mac_addr_out:
+ return 0;
+}
+
+/**
+ * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
+ * @hw: pointer to hardware structure
+ * @san_mac_addr: SAN MAC address
+ *
+ * Write a SAN MAC address to the EEPROM.
+ **/
+s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
+{
+ s32 status = 0;
+ u16 san_mac_data, san_mac_offset;
+ u8 i;
+
+ /* Look for SAN mac address pointer. If not defined, return */
+ ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
+
+ if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
+ status = IXGBE_ERR_NO_SAN_ADDR_PTR;
+ goto san_mac_addr_out;
+ }
+
+ /* Make sure we know which port we need to write */
+ hw->mac.ops.set_lan_id(hw);
+ /* Apply the port offset to the address offset */
+ (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
+ (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
+
+ for (i = 0; i < 3; i++) {
+ san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
+ san_mac_data |= (u16)(san_mac_addr[i * 2]);
+ hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
+ san_mac_offset++;
+ }
+
+san_mac_addr_out:
+ return status;
+}
+
+/**
+ * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
+ * @hw: pointer to hardware structure
+ *
+ * Read PCIe configuration space, and get the MSI-X vector count from
+ * the capabilities table.
+ **/
+u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
+{
+ u16 msix_count = 1;
+ u16 max_msix_count;
+ u16 pcie_offset;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
+ max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
+ max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
+ break;
+ default:
+ return msix_count;
+ }
+
+ msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
+ msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
+
+ /* MSI-X count is zero-based in HW */
+ msix_count++;
+
+ if (msix_count > max_msix_count)
+ msix_count = max_msix_count;
+
+ return msix_count;
+}
+
+/**
+ * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
+ * @hw: pointer to hardware structure
+ * @addr: Address to put into receive address register
+ * @vmdq: VMDq pool to assign
+ *
+ * Puts an ethernet address into a receive address register, or
+ * finds the rar that it is aleady in; adds to the pool list
+ **/
+s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
+{
+ static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
+ u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
+ u32 rar;
+ u32 rar_low, rar_high;
+ u32 addr_low, addr_high;
+
+ /* swap bytes for HW little endian */
+ addr_low = addr[0] | (addr[1] << 8)
+ | (addr[2] << 16)
+ | (addr[3] << 24);
+ addr_high = addr[4] | (addr[5] << 8);
+
+ /*
+ * Either find the mac_id in rar or find the first empty space.
+ * rar_highwater points to just after the highest currently used
+ * rar in order to shorten the search. It grows when we add a new
+ * rar to the top.
+ */
+ for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+
+ if (((IXGBE_RAH_AV & rar_high) == 0)
+ && first_empty_rar == NO_EMPTY_RAR_FOUND) {
+ first_empty_rar = rar;
+ } else if ((rar_high & 0xFFFF) == addr_high) {
+ rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
+ if (rar_low == addr_low)
+ break; /* found it already in the rars */
+ }
+ }
+
+ if (rar < hw->mac.rar_highwater) {
+ /* already there so just add to the pool bits */
+ ixgbe_set_vmdq(hw, rar, vmdq);
+ } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
+ /* stick it into first empty RAR slot we found */
+ rar = first_empty_rar;
+ ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
+ } else if (rar == hw->mac.rar_highwater) {
+ /* add it to the top of the list and inc the highwater mark */
+ ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
+ hw->mac.rar_highwater++;
+ } else if (rar >= hw->mac.num_rar_entries) {
+ return IXGBE_ERR_INVALID_MAC_ADDR;
+ }
+
+ /*
+ * If we found rar[0], make sure the default pool bit (we use pool 0)
+ * remains cleared to be sure default pool packets will get delivered
+ */
+ if (rar == 0)
+ ixgbe_clear_vmdq(hw, rar, 0);
+
+ return rar;
+}
+
+/**
+ * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
+ * @hw: pointer to hardware struct
+ * @rar: receive address register index to disassociate
+ * @vmdq: VMDq pool index to remove from the rar
+ **/
+s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ u32 mpsar_lo, mpsar_hi;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
+ hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+ mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+
+ if (!mpsar_lo && !mpsar_hi)
+ goto done;
+
+ if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
+ if (mpsar_lo) {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
+ mpsar_lo = 0;
+ }
+ if (mpsar_hi) {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
+ mpsar_hi = 0;
+ }
+ } else if (vmdq < 32) {
+ mpsar_lo &= ~(1 << vmdq);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
+ } else {
+ mpsar_hi &= ~(1 << (vmdq - 32));
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
+ }
+
+ /* was that the last pool using this rar? */
+ if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
+ hw->mac.ops.clear_rar(hw, rar);
+done:
+ return 0;
+}
+
+/**
+ * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
+ * @hw: pointer to hardware struct
+ * @rar: receive address register index to associate with a VMDq index
+ * @vmdq: VMDq pool index
+ **/
+s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ u32 mpsar;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
+ hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ if (vmdq < 32) {
+ mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+ mpsar |= 1 << vmdq;
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
+ } else {
+ mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+ mpsar |= 1 << (vmdq - 32);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
+ }
+ return 0;
+}
+
+/**
+ * This function should only be involved in the IOV mode.
+ * In IOV mode, Default pool is next pool after the number of
+ * VFs advertized and not 0.
+ * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
+ *
+ * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
+ * @hw: pointer to hardware struct
+ * @vmdq: VMDq pool index
+ **/
+s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
+{
+ u32 mpsar;
+ u32 rar = hw->mac.san_mac_rar_index;
+
+ if (vmdq < 32) {
+ mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+ mpsar |= 1 << vmdq;
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
+ } else {
+ mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+ mpsar |= 1 << (vmdq - 32);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
+{
+ int i;
+
+ hw_dbg(hw, " Clearing UTA\n");
+
+ for (i = 0; i < 128; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
+
+ return 0;
+}
+
+/**
+ * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ *
+ * return the VLVF index where this VLAN id should be placed
+ *
+ **/
+s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
+{
+ u32 bits = 0;
+ u32 first_empty_slot = 0;
+ s32 regindex;
+
+ /* short cut the special case */
+ if (vlan == 0)
+ return 0;
+
+ /*
+ * Search for the vlan id in the VLVF entries. Save off the first empty
+ * slot found along the way
+ */
+ for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
+ bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
+ if (!bits && !(first_empty_slot))
+ first_empty_slot = regindex;
+ else if ((bits & 0x0FFF) == vlan)
+ break;
+ }
+
+ /*
+ * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
+ * in the VLVF. Else use the first empty VLVF register for this
+ * vlan id.
+ */
+ if (regindex >= IXGBE_VLVF_ENTRIES) {
+ if (first_empty_slot)
+ regindex = first_empty_slot;
+ else {
+ hw_dbg(hw, "No space in VLVF.\n");
+ regindex = IXGBE_ERR_NO_SPACE;
+ }
+ }
+
+ return regindex;
+}
+
+/**
+ * ixgbe_set_vfta_generic - Set VLAN filter table
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vind: VMDq output index that maps queue to VLAN id in VFVFB
+ * @vlan_on: boolean flag to turn on/off VLAN in VFVF
+ *
+ * Turn on/off specified VLAN in the VLAN filter table.
+ **/
+s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on)
+{
+ s32 regindex;
+ u32 bitindex;
+ u32 vfta;
+ u32 targetbit;
+ s32 ret_val = 0;
+ bool vfta_changed = false;
+
+ if (vlan > 4095)
+ return IXGBE_ERR_PARAM;
+
+ /*
+ * this is a 2 part operation - first the VFTA, then the
+ * VLVF and VLVFB if VT Mode is set
+ * We don't write the VFTA until we know the VLVF part succeeded.
+ */
+
+ /* Part 1
+ * The VFTA is a bitstring made up of 128 32-bit registers
+ * that enable the particular VLAN id, much like the MTA:
+ * bits[11-5]: which register
+ * bits[4-0]: which bit in the register
+ */
+ regindex = (vlan >> 5) & 0x7F;
+ bitindex = vlan & 0x1F;
+ targetbit = (1 << bitindex);
+ vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
+
+ if (vlan_on) {
+ if (!(vfta & targetbit)) {
+ vfta |= targetbit;
+ vfta_changed = true;
+ }
+ } else {
+ if ((vfta & targetbit)) {
+ vfta &= ~targetbit;
+ vfta_changed = true;
+ }
+ }
+
+ /* Part 2
+ * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
+ */
+ ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
+ &vfta_changed);
+ if (ret_val != 0)
+ return ret_val;
+
+ if (vfta_changed)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
+
+ return 0;
+}
+
+/**
+ * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vind: VMDq output index that maps queue to VLAN id in VFVFB
+ * @vlan_on: boolean flag to turn on/off VLAN in VFVF
+ * @vfta_changed: pointer to boolean flag which indicates whether VFTA
+ * should be changed
+ *
+ * Turn on/off specified bit in VLVF table.
+ **/
+s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, bool *vfta_changed)
+{
+ u32 vt;
+
+ if (vlan > 4095)
+ return IXGBE_ERR_PARAM;
+
+ /* If VT Mode is set
+ * Either vlan_on
+ * make sure the vlan is in VLVF
+ * set the vind bit in the matching VLVFB
+ * Or !vlan_on
+ * clear the pool bit and possibly the vind
+ */
+ vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ if (vt & IXGBE_VT_CTL_VT_ENABLE) {
+ s32 vlvf_index;
+ u32 bits;
+
+ vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
+ if (vlvf_index < 0)
+ return vlvf_index;
+
+ if (vlan_on) {
+ /* set the pool bit */
+ if (vind < 32) {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2));
+ bits |= (1 << vind);
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2),
+ bits);
+ } else {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1));
+ bits |= (1 << (vind - 32));
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1),
+ bits);
+ }
+ } else {
+ /* clear the pool bit */
+ if (vind < 32) {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2));
+ bits &= ~(1 << vind);
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2),
+ bits);
+ bits |= IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1));
+ } else {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1));
+ bits &= ~(1 << (vind - 32));
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1),
+ bits);
+ bits |= IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2));
+ }
+ }
+
+ /*
+ * If there are still bits set in the VLVFB registers
+ * for the VLAN ID indicated we need to see if the
+ * caller is requesting that we clear the VFTA entry bit.
+ * If the caller has requested that we clear the VFTA
+ * entry bit but there are still pools/VFs using this VLAN
+ * ID entry then ignore the request. We're not worried
+ * about the case where we're turning the VFTA VLAN ID
+ * entry bit on, only when requested to turn it off as
+ * there may be multiple pools and/or VFs using the
+ * VLAN ID entry. In that case we cannot clear the
+ * VFTA bit until all pools/VFs using that VLAN ID have also
+ * been cleared. This will be indicated by "bits" being
+ * zero.
+ */
+ if (bits) {
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
+ (IXGBE_VLVF_VIEN | vlan));
+ if ((!vlan_on) && (vfta_changed != NULL)) {
+ /* someone wants to clear the vfta entry
+ * but some pools/VFs are still using it.
+ * Ignore it. */
+ *vfta_changed = false;
+ }
+ } else
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_clear_vfta_generic - Clear VLAN filter table
+ * @hw: pointer to hardware structure
+ *
+ * Clears the VLAN filer table, and the VMDq index associated with the filter
+ **/
+s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
+{
+ u32 offset;
+
+ for (offset = 0; offset < hw->mac.vft_size; offset++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
+
+ for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_check_mac_link_generic - Determine link and speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true when link is up
+ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Reads the links register to determine if link is up and the current speed
+ **/
+s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete)
+{
+ u32 links_reg, links_orig;
+ u32 i;
+
+ /* clear the old state */
+ links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
+
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+
+ if (links_orig != links_reg) {
+ hw_dbg(hw, "LINKS changed from %08X to %08X\n",
+ links_orig, links_reg);
+ }
+
+ if (link_up_wait_to_complete) {
+ for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
+ if (links_reg & IXGBE_LINKS_UP) {
+ *link_up = true;
+ break;
+ } else {
+ *link_up = false;
+ }
+ msleep(100);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ }
+ } else {
+ if (links_reg & IXGBE_LINKS_UP)
+ *link_up = true;
+ else
+ *link_up = false;
+ }
+
+ if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
+ IXGBE_LINKS_SPEED_10G_82599)
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
+ IXGBE_LINKS_SPEED_1G_82599)
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
+ IXGBE_LINKS_SPEED_100_82599)
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
+ * the EEPROM
+ * @hw: pointer to hardware structure
+ * @wwnn_prefix: the alternative WWNN prefix
+ * @wwpn_prefix: the alternative WWPN prefix
+ *
+ * This function will read the EEPROM from the alternative SAN MAC address
+ * block to check the support for the alternative WWNN/WWPN prefix support.
+ **/
+s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix)
+{
+ u16 offset, caps;
+ u16 alt_san_mac_blk_offset;
+
+ /* clear output first */
+ *wwnn_prefix = 0xFFFF;
+ *wwpn_prefix = 0xFFFF;
+
+ /* check if alternative SAN MAC is supported */
+ hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
+ &alt_san_mac_blk_offset);
+
+ if ((alt_san_mac_blk_offset == 0) ||
+ (alt_san_mac_blk_offset == 0xFFFF))
+ goto wwn_prefix_out;
+
+ /* check capability in alternative san mac address block */
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
+ hw->eeprom.ops.read(hw, offset, &caps);
+ if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
+ goto wwn_prefix_out;
+
+ /* get the corresponding prefix for WWNN/WWPN */
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
+ hw->eeprom.ops.read(hw, offset, wwnn_prefix);
+
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
+ hw->eeprom.ops.read(hw, offset, wwpn_prefix);
+
+wwn_prefix_out:
+ return 0;
+}
+
+/**
+ * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
+ * @hw: pointer to hardware structure
+ * @bs: the fcoe boot status
+ *
+ * This function will read the FCOE boot status from the iSCSI FCOE block
+ **/
+s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
+{
+ u16 offset, caps, flags;
+ s32 status;
+
+ /* clear output first */
+ *bs = ixgbe_fcoe_bootstatus_unavailable;
+
+ /* check if FCOE IBA block is present */
+ offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
+ status = hw->eeprom.ops.read(hw, offset, &caps);
+ if (status != 0)
+ goto out;
+
+ if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
+ goto out;
+
+ /* check if iSCSI FCOE block is populated */
+ status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
+ if (status != 0)
+ goto out;
+
+ if ((offset == 0) || (offset == 0xFFFF))
+ goto out;
+
+ /* read fcoe flags in iSCSI FCOE block */
+ offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
+ status = hw->eeprom.ops.read(hw, offset, &flags);
+ if (status != 0)
+ goto out;
+
+ if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
+ *bs = ixgbe_fcoe_bootstatus_enabled;
+ else
+ *bs = ixgbe_fcoe_bootstatus_disabled;
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
+ * @hw: pointer to hardware structure
+ * @enable: enable or disable switch for anti-spoofing
+ * @pf: Physical Function pool - do not enable anti-spoofing for the PF
+ *
+ **/
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
+{
+ int j;
+ int pf_target_reg = pf >> 3;
+ int pf_target_shift = pf % 8;
+ u32 pfvfspoof = 0;
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
+ if (enable)
+ pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
+
+ /*
+ * PFVFSPOOF register array is size 8 with 8 bits assigned to
+ * MAC anti-spoof enables in each register array element.
+ */
+ for (j = 0; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
+
+ /* If not enabling anti-spoofing then done */
+ if (!enable)
+ return;
+
+ /*
+ * The PF should be allowed to spoof so that it can support
+ * emulation mode NICs. Reset the bit assigned to the PF
+ */
+ pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg));
+ pfvfspoof ^= (1 << pf_target_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg), pfvfspoof);
+}
+
+/**
+ * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
+ * @hw: pointer to hardware structure
+ * @enable: enable or disable switch for VLAN anti-spoofing
+ * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
+ *
+ **/
+void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
+{
+ int vf_target_reg = vf >> 3;
+ int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
+ u32 pfvfspoof;
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
+ pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
+ if (enable)
+ pfvfspoof |= (1 << vf_target_shift);
+ else
+ pfvfspoof &= ~(1 << vf_target_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
+}
+
+/**
+ * ixgbe_get_device_caps_generic - Get additional device capabilities
+ * @hw: pointer to hardware structure
+ * @device_caps: the EEPROM word with the extra device capabilities
+ *
+ * This function will read the EEPROM location for the device capabilities,
+ * and return the word through device_caps.
+ **/
+s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
+{
+ hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
+
+ return 0;
+}
+
+/**
+ * ixgbe_calculate_checksum - Calculate checksum for buffer
+ * @buffer: pointer to EEPROM
+ * @length: size of EEPROM to calculate a checksum for
+ * Calculates the checksum for some buffer on a specified length. The
+ * checksum calculated is returned.
+ **/
+static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
+{
+ u32 i;
+ u8 sum = 0;
+
+ if (!buffer)
+ return 0;
+ for (i = 0; i < length; i++)
+ sum += buffer[i];
+
+ return (u8) (0 - sum);
+}
+
+/**
+ * ixgbe_host_interface_command - Issue command to manageability block
+ * @hw: pointer to the HW structure
+ * @buffer: contains the command to write and where the return status will
+ * be placed
+ * @length: length of buffer, must be multiple of 4 bytes
+ *
+ * Communicates with the manageability block. On success return 0
+ * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
+ **/
+static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
+ u32 length)
+{
+ u32 hicr, i, bi;
+ u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
+ u8 buf_len, dword_len;
+
+ s32 ret_val = 0;
+
+ if (length == 0 || length & 0x3 ||
+ length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
+ hw_dbg(hw, "Buffer length failure.\n");
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto out;
+ }
+
+ /* Check that the host interface is enabled. */
+ hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
+ if ((hicr & IXGBE_HICR_EN) == 0) {
+ hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n");
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto out;
+ }
+
+ /* Calculate length in DWORDs */
+ dword_len = length >> 2;
+
+ /*
+ * The device driver writes the relevant command block
+ * into the ram area.
+ */
+ for (i = 0; i < dword_len; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
+ i, IXGBE_CPU_TO_LE32(buffer[i]));
+
+ /* Setting this bit tells the ARC that a new command is pending. */
+ IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
+
+ for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
+ hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
+ if (!(hicr & IXGBE_HICR_C))
+ break;
+ msleep(1);
+ }
+
+ /* Check command successful completion. */
+ if (i == IXGBE_HI_COMMAND_TIMEOUT ||
+ (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
+ hw_dbg(hw, "Command has failed with no status valid.\n");
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto out;
+ }
+
+ /* Calculate length in DWORDs */
+ dword_len = hdr_size >> 2;
+
+ /* first pull in the header so we know the buffer length */
+ for (bi = 0; bi < dword_len; bi++) {
+ buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
+ IXGBE_LE32_TO_CPUS(&buffer[bi]);
+ }
+
+ /* If there is any thing in data position pull it in */
+ buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
+ if (buf_len == 0)
+ goto out;
+
+ if (length < (buf_len + hdr_size)) {
+ hw_dbg(hw, "Buffer not large enough for reply message.\n");
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto out;
+ }
+
+ /* Calculate length in DWORDs, add 3 for odd lengths */
+ dword_len = (buf_len + 3) >> 2;
+
+ /* Pull in the rest of the buffer (bi is where we left off)*/
+ for (; bi <= dword_len; bi++) {
+ buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
+ IXGBE_LE32_TO_CPUS(&buffer[bi]);
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
+ * @hw: pointer to the HW structure
+ * @maj: driver version major number
+ * @min: driver version minor number
+ * @build: driver version build number
+ * @sub: driver version sub build number
+ *
+ * Sends driver version number to firmware through the manageability
+ * block. On success return 0
+ * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
+ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ **/
+s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 sub)
+{
+ struct ixgbe_hic_drv_info fw_cmd;
+ int i;
+ s32 ret_val = 0;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)
+ != 0) {
+ ret_val = IXGBE_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
+ fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
+ fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+ fw_cmd.port_num = (u8)hw->bus.func;
+ fw_cmd.ver_maj = maj;
+ fw_cmd.ver_min = min;
+ fw_cmd.ver_build = build;
+ fw_cmd.ver_sub = sub;
+ fw_cmd.hdr.checksum = 0;
+ fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
+ (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
+ fw_cmd.pad = 0;
+ fw_cmd.pad2 = 0;
+
+ for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
+ ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+ sizeof(fw_cmd));
+ if (ret_val != 0)
+ continue;
+
+ if (fw_cmd.hdr.cmd_or_resp.ret_status ==
+ FW_CEM_RESP_STATUS_SUCCESS)
+ ret_val = 0;
+ else
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+
+ break;
+ }
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
+ * @hw: pointer to hardware structure
+ * @num_pb: number of packet buffers to allocate
+ * @headroom: reserve n KB of headroom
+ * @strategy: packet buffer allocation strategy
+ **/
+void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
+ int strategy)
+{
+ u32 pbsize = hw->mac.rx_pb_size;
+ int i = 0;
+ u32 rxpktsize, txpktsize, txpbthresh;
+
+ /* Reserve headroom */
+ pbsize -= headroom;
+
+ if (!num_pb)
+ num_pb = 1;
+
+ /* Divide remaining packet buffer space amongst the number of packet
+ * buffers requested using supplied strategy.
+ */
+ switch (strategy) {
+ case PBA_STRATEGY_WEIGHTED:
+ /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
+ * buffer with 5/8 of the packet buffer space.
+ */
+ rxpktsize = (pbsize * 5) / (num_pb * 4);
+ pbsize -= rxpktsize * (num_pb / 2);
+ rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
+ for (; i < (num_pb / 2); i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ /* Fall through to configure remaining packet buffers */
+ case PBA_STRATEGY_EQUAL:
+ rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
+ for (; i < num_pb; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ break;
+ default:
+ break;
+ }
+
+ /* Only support an equally distributed Tx packet buffer strategy. */
+ txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
+ txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
+ for (i = 0; i < num_pb; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
+ }
+
+ /* Clear unused TCs, if any, to zero buffer size*/
+ for (; i < IXGBE_MAX_PB; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
+ }
+}
+
+/**
+ * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
+ * @hw: pointer to the hardware structure
+ *
+ * The 82599 and x540 MACs can experience issues if TX work is still pending
+ * when a reset occurs. This function prevents this by flushing the PCIe
+ * buffers on the system.
+ **/
+void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
+{
+ u32 gcr_ext, hlreg0;
+
+ /*
+ * If double reset is not requested then all transactions should
+ * already be clear and as such there is no work to do
+ */
+ if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
+ return;
+
+ /*
+ * Set loopback enable to prevent any transmits from being sent
+ * should the link come up. This assumes that the RXCTRL.RXEN bit
+ * has already been cleared.
+ */
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
+
+ /* initiate cleaning flow for buffers in the PCIe transaction layer */
+ gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
+ gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
+
+ /* Flush all writes and allow 20usec for all transactions to clear */
+ IXGBE_WRITE_FLUSH(hw);
+ udelay(20);
+
+ /* restore previous register values */
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+}
+
+static const u8 ixgbe_emc_temp_data[4] = {
+ IXGBE_EMC_INTERNAL_DATA,
+ IXGBE_EMC_DIODE1_DATA,
+ IXGBE_EMC_DIODE2_DATA,
+ IXGBE_EMC_DIODE3_DATA
+};
+static const u8 ixgbe_emc_therm_limit[4] = {
+ IXGBE_EMC_INTERNAL_THERM_LIMIT,
+ IXGBE_EMC_DIODE1_THERM_LIMIT,
+ IXGBE_EMC_DIODE2_THERM_LIMIT,
+ IXGBE_EMC_DIODE3_THERM_LIMIT
+};
+
+/**
+ * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
+ * @hw: pointer to hardware structure
+ * @data: pointer to the thermal sensor data structure
+ *
+ * Returns the thermal sensor data structure
+ **/
+s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
+{
+ s32 status = 0;
+ u16 ets_offset;
+ u16 ets_cfg;
+ u16 ets_sensor;
+ u8 num_sensors;
+ u8 sensor_index;
+ u8 sensor_location;
+ u8 i;
+ struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+
+ /* Only support thermal sensors attached to 82599 physical port 0 */
+ if ((hw->mac.type != ixgbe_mac_82599EB) ||
+ (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
+ status = IXGBE_NOT_IMPLEMENTED;
+ goto out;
+ }
+
+ status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, &ets_offset);
+ if (status)
+ goto out;
+
+ if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) {
+ status = IXGBE_NOT_IMPLEMENTED;
+ goto out;
+ }
+
+ status = hw->eeprom.ops.read(hw, ets_offset, &ets_cfg);
+ if (status)
+ goto out;
+
+ if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
+ != IXGBE_ETS_TYPE_EMC) {
+ status = IXGBE_NOT_IMPLEMENTED;
+ goto out;
+ }
+
+ num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
+ if (num_sensors > IXGBE_MAX_SENSORS)
+ num_sensors = IXGBE_MAX_SENSORS;
+
+ for (i = 0; i < num_sensors; i++) {
+ status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
+ &ets_sensor);
+ if (status)
+ goto out;
+
+ sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
+ IXGBE_ETS_DATA_INDEX_SHIFT);
+ sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
+ IXGBE_ETS_DATA_LOC_SHIFT);
+
+ if (sensor_location != 0) {
+ status = hw->phy.ops.read_i2c_byte(hw,
+ ixgbe_emc_temp_data[sensor_index],
+ IXGBE_I2C_THERMAL_SENSOR_ADDR,
+ &data->sensor[i].temp);
+ if (status)
+ goto out;
+ }
+ }
+out:
+ return status;
+}
+
+/**
+ * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds
+ * @hw: pointer to hardware structure
+ *
+ * Inits the thermal sensor thresholds according to the NVM map
+ * and save off the threshold and location values into mac.thermal_sensor_data
+ **/
+s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
+{
+ s32 status = 0;
+ u16 ets_offset;
+ u16 ets_cfg;
+ u16 ets_sensor;
+ u8 low_thresh_delta;
+ u8 num_sensors;
+ u8 sensor_index;
+ u8 sensor_location;
+ u8 therm_limit;
+ u8 i;
+ struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+
+ memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
+
+ /* Only support thermal sensors attached to 82599 physical port 0 */
+ if ((hw->mac.type != ixgbe_mac_82599EB) ||
+ (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
+ return IXGBE_NOT_IMPLEMENTED;
+
+ hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, &ets_offset);
+ if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
+ return IXGBE_NOT_IMPLEMENTED;
+
+ hw->eeprom.ops.read(hw, ets_offset, &ets_cfg);
+ if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
+ != IXGBE_ETS_TYPE_EMC)
+ return IXGBE_NOT_IMPLEMENTED;
+
+ low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
+ IXGBE_ETS_LTHRES_DELTA_SHIFT);
+ num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
+
+ for (i = 0; i < num_sensors; i++) {
+ hw->eeprom.ops.read(hw, (ets_offset + 1 + i), &ets_sensor);
+ sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
+ IXGBE_ETS_DATA_INDEX_SHIFT);
+ sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
+ IXGBE_ETS_DATA_LOC_SHIFT);
+ therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK;
+
+ hw->phy.ops.write_i2c_byte(hw,
+ ixgbe_emc_therm_limit[sensor_index],
+ IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit);
+
+ if ((i < IXGBE_MAX_SENSORS) && (sensor_location != 0)) {
+ data->sensor[i].location = sensor_location;
+ data->sensor[i].caution_thresh = therm_limit;
+ data->sensor[i].max_op_thresh = therm_limit -
+ low_thresh_delta;
+ }
+ }
+ return status;
+}
+
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.h
new file mode 100755
index 00000000..9bd6f534
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.h
@@ -0,0 +1,140 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2012 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_COMMON_H_
+#define _IXGBE_COMMON_H_
+
+#include "ixgbe_type.h"
+
+u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
+
+s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
+s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
+s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
+s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
+s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
+s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
+void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
+s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
+
+s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
+
+s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
+s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 *data);
+s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
+s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
+ u16 *checksum_val);
+s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
+s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
+
+s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr);
+s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
+s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count,
+ ixgbe_mc_addr_itr func, bool clear);
+s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
+ u32 addr_count, ixgbe_mc_addr_itr func);
+s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
+s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
+s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
+s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw);
+s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw);
+
+s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
+void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
+
+s32 ixgbe_validate_mac_addr(u8 *mac_addr);
+s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
+void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
+s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
+
+s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
+
+s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
+s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
+
+s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
+s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
+s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
+s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
+ u32 vind, bool vlan_on);
+s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, bool *vfta_changed);
+s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
+s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan);
+
+s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete);
+
+s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix);
+
+s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs);
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
+void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
+s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
+void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
+ int strategy);
+s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 ver);
+void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
+
+#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
+#define IXGBE_EMC_INTERNAL_DATA 0x00
+#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20
+#define IXGBE_EMC_DIODE1_DATA 0x01
+#define IXGBE_EMC_DIODE1_THERM_LIMIT 0x19
+#define IXGBE_EMC_DIODE2_DATA 0x23
+#define IXGBE_EMC_DIODE2_THERM_LIMIT 0x1A
+#define IXGBE_EMC_DIODE3_DATA 0x2A
+#define IXGBE_EMC_DIODE3_THERM_LIMIT 0x30
+
+s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
+s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
+#endif /* IXGBE_COMMON */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_dcb.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_dcb.h
new file mode 100755
index 00000000..a6690451
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_dcb.h
@@ -0,0 +1,168 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2012 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_DCB_H_
+#define _IXGBE_DCB_H_
+
+
+#include "ixgbe_type.h"
+
+/* DCB defines */
+/* DCB credit calculation defines */
+#define IXGBE_DCB_CREDIT_QUANTUM 64
+#define IXGBE_DCB_MAX_CREDIT_REFILL 200 /* 200 * 64B = 12800B */
+#define IXGBE_DCB_MAX_TSO_SIZE (32 * 1024) /* Max TSO pkt size in DCB*/
+#define IXGBE_DCB_MAX_CREDIT (2 * IXGBE_DCB_MAX_CREDIT_REFILL)
+
+/* 513 for 32KB TSO packet */
+#define IXGBE_DCB_MIN_TSO_CREDIT \
+ ((IXGBE_DCB_MAX_TSO_SIZE / IXGBE_DCB_CREDIT_QUANTUM) + 1)
+
+/* DCB configuration defines */
+#define IXGBE_DCB_MAX_USER_PRIORITY 8
+#define IXGBE_DCB_MAX_BW_GROUP 8
+#define IXGBE_DCB_BW_PERCENT 100
+
+#define IXGBE_DCB_TX_CONFIG 0
+#define IXGBE_DCB_RX_CONFIG 1
+
+/* DCB capability defines */
+#define IXGBE_DCB_PG_SUPPORT 0x00000001
+#define IXGBE_DCB_PFC_SUPPORT 0x00000002
+#define IXGBE_DCB_BCN_SUPPORT 0x00000004
+#define IXGBE_DCB_UP2TC_SUPPORT 0x00000008
+#define IXGBE_DCB_GSP_SUPPORT 0x00000010
+
+struct ixgbe_dcb_support {
+ u32 capabilities; /* DCB capabilities */
+
+ /* Each bit represents a number of TCs configurable in the hw.
+ * If 8 traffic classes can be configured, the value is 0x80. */
+ u8 traffic_classes;
+ u8 pfc_traffic_classes;
+};
+
+enum ixgbe_dcb_tsa {
+ ixgbe_dcb_tsa_ets = 0,
+ ixgbe_dcb_tsa_group_strict_cee,
+ ixgbe_dcb_tsa_strict
+};
+
+/* Traffic class bandwidth allocation per direction */
+struct ixgbe_dcb_tc_path {
+ u8 bwg_id; /* Bandwidth Group (BWG) ID */
+ u8 bwg_percent; /* % of BWG's bandwidth */
+ u8 link_percent; /* % of link bandwidth */
+ u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */
+ u16 data_credits_refill; /* Credit refill amount in 64B granularity */
+ u16 data_credits_max; /* Max credits for a configured packet buffer
+ * in 64B granularity.*/
+ enum ixgbe_dcb_tsa tsa; /* Link or Group Strict Priority */
+};
+
+enum ixgbe_dcb_pfc {
+ ixgbe_dcb_pfc_disabled = 0,
+ ixgbe_dcb_pfc_enabled,
+ ixgbe_dcb_pfc_enabled_txonly,
+ ixgbe_dcb_pfc_enabled_rxonly
+};
+
+/* Traffic class configuration */
+struct ixgbe_dcb_tc_config {
+ struct ixgbe_dcb_tc_path path[2]; /* One each for Tx/Rx */
+ enum ixgbe_dcb_pfc pfc; /* Class based flow control setting */
+
+ u16 desc_credits_max; /* For Tx Descriptor arbitration */
+ u8 tc; /* Traffic class (TC) */
+};
+
+enum ixgbe_dcb_pba {
+ /* PBA[0-7] each use 64KB FIFO */
+ ixgbe_dcb_pba_equal = PBA_STRATEGY_EQUAL,
+ /* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */
+ ixgbe_dcb_pba_80_48 = PBA_STRATEGY_WEIGHTED
+};
+
+struct ixgbe_dcb_num_tcs {
+ u8 pg_tcs;
+ u8 pfc_tcs;
+};
+
+struct ixgbe_dcb_config {
+ struct ixgbe_dcb_tc_config tc_config[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+ struct ixgbe_dcb_support support;
+ struct ixgbe_dcb_num_tcs num_tcs;
+ u8 bw_percentage[2][IXGBE_DCB_MAX_BW_GROUP]; /* One each for Tx/Rx */
+ bool pfc_mode_enable;
+ bool round_robin_enable;
+
+ enum ixgbe_dcb_pba rx_pba_cfg;
+
+ u32 dcb_cfg_version; /* Not used...OS-specific? */
+ u32 link_speed; /* For bandwidth allocation validation purpose */
+ bool vt_mode;
+};
+
+/* DCB driver APIs */
+
+/* DCB rule checking */
+s32 ixgbe_dcb_check_config_cee(struct ixgbe_dcb_config *);
+
+/* DCB credits calculation */
+s32 ixgbe_dcb_calculate_tc_credits(u8 *, u16 *, u16 *, int);
+s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *,
+ struct ixgbe_dcb_config *, u32, u8);
+
+/* DCB PFC */
+s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *, u8, u8 *);
+s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+
+/* DCB stats */
+s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *);
+s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
+s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
+
+/* DCB config arbiters */
+s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *,
+ struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *,
+ struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *,
+ struct ixgbe_dcb_config *);
+
+/* DCB unpack routines */
+void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *, u8 *, u8 *);
+void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *, int, u16 *);
+void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *, u16 *);
+void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *, int, u8 *);
+void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *, int, u8 *);
+void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *, int, u8 *);
+
+/* DCB initialization */
+s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, u16 *, u16 *, u8 *, u8 *, u8 *);
+s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+#endif /* _IXGBE_DCB_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c
new file mode 100755
index 00000000..11472bd3
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c
@@ -0,0 +1,2901 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2012 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool support for ixgbe */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/vmalloc.h>
+#include <linux/highmem.h>
+#ifdef SIOCETHTOOL
+#include <asm/uaccess.h>
+
+#include "ixgbe.h"
+
+#ifndef ETH_GSTRING_LEN
+#define ETH_GSTRING_LEN 32
+#endif
+
+#define IXGBE_ALL_RAR_ENTRIES 16
+
+#ifdef ETHTOOL_OPS_COMPAT
+#include "kcompat_ethtool.c"
+#endif
+#ifdef ETHTOOL_GSTATS
+struct ixgbe_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define IXGBE_NETDEV_STAT(_net_stat) { \
+ .stat_string = #_net_stat, \
+ .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
+ .stat_offset = offsetof(struct net_device_stats, _net_stat) \
+}
+static const struct ixgbe_stats ixgbe_gstrings_net_stats[] = {
+ IXGBE_NETDEV_STAT(rx_packets),
+ IXGBE_NETDEV_STAT(tx_packets),
+ IXGBE_NETDEV_STAT(rx_bytes),
+ IXGBE_NETDEV_STAT(tx_bytes),
+ IXGBE_NETDEV_STAT(rx_errors),
+ IXGBE_NETDEV_STAT(tx_errors),
+ IXGBE_NETDEV_STAT(rx_dropped),
+ IXGBE_NETDEV_STAT(tx_dropped),
+ IXGBE_NETDEV_STAT(multicast),
+ IXGBE_NETDEV_STAT(collisions),
+ IXGBE_NETDEV_STAT(rx_over_errors),
+ IXGBE_NETDEV_STAT(rx_crc_errors),
+ IXGBE_NETDEV_STAT(rx_frame_errors),
+ IXGBE_NETDEV_STAT(rx_fifo_errors),
+ IXGBE_NETDEV_STAT(rx_missed_errors),
+ IXGBE_NETDEV_STAT(tx_aborted_errors),
+ IXGBE_NETDEV_STAT(tx_carrier_errors),
+ IXGBE_NETDEV_STAT(tx_fifo_errors),
+ IXGBE_NETDEV_STAT(tx_heartbeat_errors),
+};
+
+#define IXGBE_STAT(_name, _stat) { \
+ .stat_string = _name, \
+ .sizeof_stat = FIELD_SIZEOF(struct ixgbe_adapter, _stat), \
+ .stat_offset = offsetof(struct ixgbe_adapter, _stat) \
+}
+static struct ixgbe_stats ixgbe_gstrings_stats[] = {
+ IXGBE_STAT("rx_pkts_nic", stats.gprc),
+ IXGBE_STAT("tx_pkts_nic", stats.gptc),
+ IXGBE_STAT("rx_bytes_nic", stats.gorc),
+ IXGBE_STAT("tx_bytes_nic", stats.gotc),
+ IXGBE_STAT("lsc_int", lsc_int),
+ IXGBE_STAT("tx_busy", tx_busy),
+ IXGBE_STAT("non_eop_descs", non_eop_descs),
+#ifndef CONFIG_IXGBE_NAPI
+ IXGBE_STAT("rx_dropped_backlog", rx_dropped_backlog),
+#endif
+ IXGBE_STAT("broadcast", stats.bprc),
+ IXGBE_STAT("rx_no_buffer_count", stats.rnbc[0]) ,
+ IXGBE_STAT("tx_timeout_count", tx_timeout_count),
+ IXGBE_STAT("tx_restart_queue", restart_queue),
+ IXGBE_STAT("rx_long_length_errors", stats.roc),
+ IXGBE_STAT("rx_short_length_errors", stats.ruc),
+ IXGBE_STAT("tx_flow_control_xon", stats.lxontxc),
+ IXGBE_STAT("rx_flow_control_xon", stats.lxonrxc),
+ IXGBE_STAT("tx_flow_control_xoff", stats.lxofftxc),
+ IXGBE_STAT("rx_flow_control_xoff", stats.lxoffrxc),
+ IXGBE_STAT("rx_csum_offload_errors", hw_csum_rx_error),
+ IXGBE_STAT("alloc_rx_page_failed", alloc_rx_page_failed),
+ IXGBE_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
+#ifndef IXGBE_NO_LRO
+ IXGBE_STAT("lro_aggregated", lro_stats.coal),
+ IXGBE_STAT("lro_flushed", lro_stats.flushed),
+#endif /* IXGBE_NO_LRO */
+ IXGBE_STAT("rx_no_dma_resources", hw_rx_no_dma_resources),
+ IXGBE_STAT("hw_rsc_aggregated", rsc_total_count),
+ IXGBE_STAT("hw_rsc_flushed", rsc_total_flush),
+#ifdef HAVE_TX_MQ
+ IXGBE_STAT("fdir_match", stats.fdirmatch),
+ IXGBE_STAT("fdir_miss", stats.fdirmiss),
+ IXGBE_STAT("fdir_overflow", fdir_overflow),
+#endif /* HAVE_TX_MQ */
+#ifdef IXGBE_FCOE
+ IXGBE_STAT("fcoe_bad_fccrc", stats.fccrc),
+ IXGBE_STAT("fcoe_last_errors", stats.fclast),
+ IXGBE_STAT("rx_fcoe_dropped", stats.fcoerpdc),
+ IXGBE_STAT("rx_fcoe_packets", stats.fcoeprc),
+ IXGBE_STAT("rx_fcoe_dwords", stats.fcoedwrc),
+ IXGBE_STAT("fcoe_noddp", stats.fcoe_noddp),
+ IXGBE_STAT("fcoe_noddp_ext_buff", stats.fcoe_noddp_ext_buff),
+ IXGBE_STAT("tx_fcoe_packets", stats.fcoeptc),
+ IXGBE_STAT("tx_fcoe_dwords", stats.fcoedwtc),
+#endif /* IXGBE_FCOE */
+ IXGBE_STAT("os2bmc_rx_by_bmc", stats.o2bgptc),
+ IXGBE_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
+ IXGBE_STAT("os2bmc_tx_by_host", stats.o2bspc),
+ IXGBE_STAT("os2bmc_rx_by_host", stats.b2ogprc),
+};
+
+#define IXGBE_QUEUE_STATS_LEN \
+ ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \
+ ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \
+ (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
+#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
+#define IXGBE_NETDEV_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_net_stats)
+#define IXGBE_PB_STATS_LEN ( \
+ (((struct ixgbe_adapter *)netdev_priv(netdev))->flags & \
+ IXGBE_FLAG_DCB_ENABLED) ? \
+ (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
+ sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
+ sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
+ sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
+ / sizeof(u64) : 0)
+#define IXGBE_VF_STATS_LEN \
+ ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_vfs) * \
+ (sizeof(struct vf_stats) / sizeof(u64)))
+#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
+ IXGBE_NETDEV_STATS_LEN + \
+ IXGBE_PB_STATS_LEN + \
+ IXGBE_QUEUE_STATS_LEN + \
+ IXGBE_VF_STATS_LEN)
+
+#endif /* ETHTOOL_GSTATS */
+#ifdef ETHTOOL_TEST
+static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register test (offline)", "Eeprom test (offline)",
+ "Interrupt test (offline)", "Loopback test (offline)",
+ "Link test (on/offline)"
+};
+#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
+#endif /* ETHTOOL_TEST */
+
+int ixgbe_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 link_speed = 0;
+ bool link_up;
+
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->autoneg = AUTONEG_ENABLE;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ if ((hw->phy.media_type == ixgbe_media_type_copper) ||
+ (hw->phy.multispeed_fiber)) {
+ ecmd->supported |= (SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg);
+ switch (hw->mac.type) {
+ case ixgbe_mac_X540:
+ ecmd->supported |= SUPPORTED_100baseT_Full;
+ break;
+ default:
+ break;
+ }
+
+ ecmd->advertising = ADVERTISED_Autoneg;
+ if (hw->phy.autoneg_advertised) {
+ if (hw->phy.autoneg_advertised &
+ IXGBE_LINK_SPEED_100_FULL)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+ if (hw->phy.autoneg_advertised &
+ IXGBE_LINK_SPEED_10GB_FULL)
+ ecmd->advertising |= ADVERTISED_10000baseT_Full;
+ if (hw->phy.autoneg_advertised &
+ IXGBE_LINK_SPEED_1GB_FULL)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ } else {
+ /*
+ * Default advertised modes in case
+ * phy.autoneg_advertised isn't set.
+ */
+ ecmd->advertising |= (ADVERTISED_10000baseT_Full |
+ ADVERTISED_1000baseT_Full);
+ if (hw->mac.type == ixgbe_mac_X540)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+ }
+
+ if (hw->phy.media_type == ixgbe_media_type_copper) {
+ ecmd->supported |= SUPPORTED_TP;
+ ecmd->advertising |= ADVERTISED_TP;
+ ecmd->port = PORT_TP;
+ } else {
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_FIBRE;
+ }
+ } else if (hw->phy.media_type == ixgbe_media_type_backplane) {
+ /* Set as FIBRE until SERDES defined in kernel */
+ if (hw->device_id == IXGBE_DEV_ID_82598_BX) {
+ ecmd->supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE);
+ ecmd->advertising = (ADVERTISED_1000baseT_Full |
+ ADVERTISED_FIBRE);
+ ecmd->port = PORT_FIBRE;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE)
+ || (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) {
+ ecmd->supported |= (SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_FIBRE);
+ ecmd->advertising = (ADVERTISED_10000baseT_Full |
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_Autoneg |
+ ADVERTISED_FIBRE);
+ ecmd->port = PORT_FIBRE;
+ } else {
+ ecmd->supported |= (SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE);
+ ecmd->advertising = (ADVERTISED_10000baseT_Full |
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_FIBRE);
+ ecmd->port = PORT_FIBRE;
+ }
+ } else {
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising = (ADVERTISED_10000baseT_Full |
+ ADVERTISED_FIBRE);
+ ecmd->port = PORT_FIBRE;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ }
+
+#ifdef HAVE_ETHTOOL_SFP_DISPLAY_PORT
+ /* Get PHY type */
+ switch (adapter->hw.phy.type) {
+ case ixgbe_phy_tn:
+ case ixgbe_phy_aq:
+ case ixgbe_phy_cu_unknown:
+ /* Copper 10G-BASET */
+ ecmd->port = PORT_TP;
+ break;
+ case ixgbe_phy_qt:
+ ecmd->port = PORT_FIBRE;
+ break;
+ case ixgbe_phy_nl:
+ case ixgbe_phy_sfp_passive_tyco:
+ case ixgbe_phy_sfp_passive_unknown:
+ case ixgbe_phy_sfp_ftl:
+ case ixgbe_phy_sfp_avago:
+ case ixgbe_phy_sfp_intel:
+ case ixgbe_phy_sfp_unknown:
+ switch (adapter->hw.phy.sfp_type) {
+ /* SFP+ devices, further checking needed */
+ case ixgbe_sfp_type_da_cu:
+ case ixgbe_sfp_type_da_cu_core0:
+ case ixgbe_sfp_type_da_cu_core1:
+ ecmd->port = PORT_DA;
+ break;
+ case ixgbe_sfp_type_sr:
+ case ixgbe_sfp_type_lr:
+ case ixgbe_sfp_type_srlr_core0:
+ case ixgbe_sfp_type_srlr_core1:
+ ecmd->port = PORT_FIBRE;
+ break;
+ case ixgbe_sfp_type_not_present:
+ ecmd->port = PORT_NONE;
+ break;
+ case ixgbe_sfp_type_1g_cu_core0:
+ case ixgbe_sfp_type_1g_cu_core1:
+ ecmd->port = PORT_TP;
+ ecmd->supported = SUPPORTED_TP;
+ ecmd->advertising = (ADVERTISED_1000baseT_Full |
+ ADVERTISED_TP);
+ break;
+ case ixgbe_sfp_type_1g_sx_core0:
+ case ixgbe_sfp_type_1g_sx_core1:
+ ecmd->port = PORT_FIBRE;
+ ecmd->supported = SUPPORTED_FIBRE;
+ ecmd->advertising = (ADVERTISED_1000baseT_Full |
+ ADVERTISED_FIBRE);
+ break;
+ case ixgbe_sfp_type_unknown:
+ default:
+ ecmd->port = PORT_OTHER;
+ break;
+ }
+ break;
+ case ixgbe_phy_xaui:
+ ecmd->port = PORT_NONE;
+ break;
+ case ixgbe_phy_unknown:
+ case ixgbe_phy_generic:
+ case ixgbe_phy_sfp_unsupported:
+ default:
+ ecmd->port = PORT_OTHER;
+ break;
+ }
+#endif
+
+ if (!in_interrupt()) {
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ } else {
+ /*
+ * this case is a special workaround for RHEL5 bonding
+ * that calls this routine from interrupt context
+ */
+ link_speed = adapter->link_speed;
+ link_up = adapter->link_up;
+ }
+
+ if (link_up) {
+ switch (link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ ecmd->speed = SPEED_10000;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ ecmd->speed = SPEED_1000;
+ break;
+ case IXGBE_LINK_SPEED_100_FULL:
+ ecmd->speed = SPEED_100;
+ break;
+ default:
+ break;
+ }
+ ecmd->duplex = DUPLEX_FULL;
+ } else {
+ ecmd->speed = -1;
+ ecmd->duplex = -1;
+ }
+
+ return 0;
+}
+
+static int ixgbe_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 advertised, old;
+ s32 err = 0;
+
+ if ((hw->phy.media_type == ixgbe_media_type_copper) ||
+ (hw->phy.multispeed_fiber)) {
+ /*
+ * this function does not support duplex forcing, but can
+ * limit the advertising of the adapter to the specified speed
+ */
+ if (ecmd->autoneg == AUTONEG_DISABLE)
+ return -EINVAL;
+
+ if (ecmd->advertising & ~ecmd->supported)
+ return -EINVAL;
+
+ old = hw->phy.autoneg_advertised;
+ advertised = 0;
+ if (ecmd->advertising & ADVERTISED_10000baseT_Full)
+ advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ if (ecmd->advertising & ADVERTISED_1000baseT_Full)
+ advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+ if (ecmd->advertising & ADVERTISED_100baseT_Full)
+ advertised |= IXGBE_LINK_SPEED_100_FULL;
+
+ if (old == advertised)
+ return err;
+ /* this sets the link speed and restarts auto-neg */
+ hw->mac.autotry_restart = true;
+ err = hw->mac.ops.setup_link(hw, advertised, true, true);
+ if (err) {
+ e_info(probe, "setup link failed with code %d\n", err);
+ hw->mac.ops.setup_link(hw, old, true, true);
+ }
+ }
+ return err;
+}
+
+static void ixgbe_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (hw->fc.disable_fc_autoneg)
+ pause->autoneg = 0;
+ else
+ pause->autoneg = 1;
+
+ if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
+ pause->rx_pause = 1;
+ } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
+ pause->tx_pause = 1;
+ } else if (hw->fc.current_mode == ixgbe_fc_full) {
+ pause->rx_pause = 1;
+ pause->tx_pause = 1;
+ }
+}
+
+static int ixgbe_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_fc_info fc = hw->fc;
+
+ /* 82598 does no support link flow control with DCB enabled */
+ if ((hw->mac.type == ixgbe_mac_82598EB) &&
+ (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+ return -EINVAL;
+
+ fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
+
+ if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
+ fc.requested_mode = ixgbe_fc_full;
+ else if (pause->rx_pause)
+ fc.requested_mode = ixgbe_fc_rx_pause;
+ else if (pause->tx_pause)
+ fc.requested_mode = ixgbe_fc_tx_pause;
+ else
+ fc.requested_mode = ixgbe_fc_none;
+
+ /* if the thing changed then we'll update and use new autoneg */
+ if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
+ hw->fc = fc;
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+ else
+ ixgbe_reset(adapter);
+ }
+
+ return 0;
+}
+
+static u32 ixgbe_get_msglevel(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ return adapter->msg_enable;
+}
+
+static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ adapter->msg_enable = data;
+}
+
+static int ixgbe_get_regs_len(struct net_device *netdev)
+{
+#define IXGBE_REGS_LEN 1129
+ return IXGBE_REGS_LEN * sizeof(u32);
+}
+
+#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
+
+
+static void ixgbe_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 *regs_buff = p;
+ u8 i;
+
+ printk(KERN_DEBUG "ixgbe_get_regs_1\n");
+ memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
+ printk(KERN_DEBUG "ixgbe_get_regs_2 0x%p\n", hw->hw_addr);
+
+ regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
+
+ /* General Registers */
+ regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ printk(KERN_DEBUG "ixgbe_get_regs_3\n");
+ regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
+ regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
+ regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
+ regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
+
+ printk(KERN_DEBUG "ixgbe_get_regs_4\n");
+
+ /* NVM Register */
+ regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC);
+ regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
+ regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA);
+ regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
+ regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
+ regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
+ regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
+ regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
+ regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
+ regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC);
+
+ /* Interrupt */
+ /* don't read EICR because it can clear interrupt causes, instead
+ * read EICS which is a shadow but doesn't clear EICR */
+ regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
+ regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
+ regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
+ regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
+ regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
+ regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
+ regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
+ regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
+ regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
+ regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
+ regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
+ regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
+
+ /* Flow Control */
+ regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
+ regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0));
+ regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
+ regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
+ regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
+ for (i = 0; i < 8; i++) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
+ regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ regs_buff[35 + i] = IXGBE_READ_REG(hw,
+ IXGBE_FCRTL_82599(i));
+ regs_buff[43 + i] = IXGBE_READ_REG(hw,
+ IXGBE_FCRTH_82599(i));
+ break;
+ default:
+ break;
+ }
+ }
+ regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
+ regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
+
+ /* Receive DMA */
+ for (i = 0; i < 64; i++)
+ regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
+ for (i = 0; i < 64; i++)
+ regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
+ for (i = 0; i < 64; i++)
+ regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
+ for (i = 0; i < 64; i++)
+ regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
+ for (i = 0; i < 64; i++)
+ regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
+ for (i = 0; i < 64; i++)
+ regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+ for (i = 0; i < 16; i++)
+ regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
+ for (i = 0; i < 16; i++)
+ regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+ regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+ for (i = 0; i < 8; i++)
+ regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
+ regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
+
+ /* Receive */
+ regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
+ regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
+ for (i = 0; i < 16; i++)
+ regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
+ for (i = 0; i < 16; i++)
+ regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
+ regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
+ regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
+ regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
+ for (i = 0; i < 8; i++)
+ regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
+ regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
+
+ /* Transmit */
+ for (i = 0; i < 32; i++)
+ regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
+ for (i = 0; i < 32; i++)
+ regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
+ for (i = 0; i < 32; i++)
+ regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
+ for (i = 0; i < 32; i++)
+ regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
+ for (i = 0; i < 32; i++)
+ regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
+ for (i = 0; i < 32; i++)
+ regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
+ for (i = 0; i < 32; i++)
+ regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
+ for (i = 0; i < 32; i++)
+ regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
+ regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
+ for (i = 0; i < 16; i++)
+ regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
+ regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
+ for (i = 0; i < 8; i++)
+ regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
+ regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
+
+ /* Wake Up */
+ regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
+ regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
+ regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
+ regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
+ regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
+ regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
+ regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
+ regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
+ regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
+
+ /* DCB */
+ regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
+ regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
+ regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
+ regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
+ for (i = 0; i < 8; i++)
+ regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i));
+
+ /* Statistics */
+ regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
+ regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
+ regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
+ regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
+ for (i = 0; i < 8; i++)
+ regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
+ regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
+ regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
+ regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
+ regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
+ regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
+ regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
+ regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
+ for (i = 0; i < 8; i++)
+ regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
+ for (i = 0; i < 8; i++)
+ regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
+ for (i = 0; i < 8; i++)
+ regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
+ for (i = 0; i < 8; i++)
+ regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
+ regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
+ regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
+ regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
+ regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
+ regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
+ regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
+ regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
+ regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
+ regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
+ regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
+ regs_buff[942] = IXGBE_GET_STAT(adapter, gorc);
+ regs_buff[944] = IXGBE_GET_STAT(adapter, gotc);
+ for (i = 0; i < 8; i++)
+ regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
+ regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
+ regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
+ regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
+ regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
+ regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
+ regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
+ regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
+ regs_buff[961] = IXGBE_GET_STAT(adapter, tor);
+ regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
+ regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
+ regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
+ regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
+ regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
+ regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
+ regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
+ regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
+ regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
+ regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
+ regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
+ for (i = 0; i < 16; i++)
+ regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
+ for (i = 0; i < 16; i++)
+ regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
+ for (i = 0; i < 16; i++)
+ regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
+ for (i = 0; i < 16; i++)
+ regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
+
+ /* MAC */
+ regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
+ regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
+ regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
+ regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
+ regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
+ regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+ regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
+ regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
+ regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
+ regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
+ regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
+ regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
+ regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
+ regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
+ regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
+ regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
+ regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
+ regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
+ regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
+ regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
+ regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
+ regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
+ regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
+ regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
+ regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
+ regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+ regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
+ regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
+
+ /* Diagnostic */
+ regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
+ for (i = 0; i < 8; i++)
+ regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
+ regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
+ for (i = 0; i < 4; i++)
+ regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
+ regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
+ regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
+ for (i = 0; i < 8; i++)
+ regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
+ regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
+ for (i = 0; i < 4; i++)
+ regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
+ regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
+ regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
+ regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
+ regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1);
+ regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2);
+ regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3);
+ regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
+ regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0);
+ regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1);
+ regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
+ regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
+ for (i = 0; i < 8; i++)
+ regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
+ regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
+ regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
+ regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
+ regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
+ regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
+ regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
+ regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
+ regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
+ regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
+
+ /* 82599 X540 specific registers */
+ regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+}
+
+static int ixgbe_get_eeprom_len(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ return adapter->hw.eeprom.word_size * 2;
+}
+
+static int ixgbe_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u16 *eeprom_buff;
+ int first_word, last_word, eeprom_len;
+ int ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ eeprom_len = last_word - first_word + 1;
+
+ eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ ret_val = ixgbe_read_eeprom_buffer(hw, first_word, eeprom_len,
+ eeprom_buff);
+
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < eeprom_len; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
+ kfree(eeprom_buff);
+
+ return ret_val;
+}
+
+static int ixgbe_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u16 *eeprom_buff;
+ void *ptr;
+ int max_len, first_word, last_word, ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
+ return -EINVAL;
+
+ max_len = hw->eeprom.word_size * 2;
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ ptr = eeprom_buff;
+
+ if (eeprom->offset & 1) {
+ /*
+ * need read/modify/write of first changed EEPROM word
+ * only the second byte of the word is being modified
+ */
+ ret_val = ixgbe_read_eeprom(hw, first_word, &eeprom_buff[0]);
+ if (ret_val)
+ goto err;
+
+ ptr++;
+ }
+ if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
+ /*
+ * need read/modify/write of last changed EEPROM word
+ * only the first byte of the word is being modified
+ */
+ ret_val = ixgbe_read_eeprom(hw, last_word,
+ &eeprom_buff[last_word - first_word]);
+ if (ret_val)
+ goto err;
+ }
+
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(ptr, bytes, eeprom->len);
+
+ for (i = 0; i < last_word - first_word + 1; i++)
+ cpu_to_le16s(&eeprom_buff[i]);
+
+ ret_val = ixgbe_write_eeprom_buffer(hw, first_word,
+ last_word - first_word + 1,
+ eeprom_buff);
+
+ /* Update the checksum */
+ if (ret_val == 0)
+ ixgbe_update_eeprom_checksum(hw);
+
+err:
+ kfree(eeprom_buff);
+ return ret_val;
+}
+
+static void ixgbe_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
+
+ strlcpy(drvinfo->version, ixgbe_driver_version,
+ sizeof(drvinfo->version));
+
+ strlcpy(drvinfo->fw_version, adapter->eeprom_id,
+ sizeof(drvinfo->fw_version));
+
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
+
+ drvinfo->n_stats = IXGBE_STATS_LEN;
+ drvinfo->testinfo_len = IXGBE_TEST_LEN;
+ drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
+}
+
+static void ixgbe_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ ring->rx_max_pending = IXGBE_MAX_RXD;
+ ring->tx_max_pending = IXGBE_MAX_TXD;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = adapter->rx_ring_count;
+ ring->tx_pending = adapter->tx_ring_count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static int ixgbe_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_ring *tx_ring = NULL, *rx_ring = NULL;
+ u32 new_rx_count, new_tx_count;
+ int i, err = 0;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ new_tx_count = clamp_t(u32, ring->tx_pending,
+ IXGBE_MIN_TXD, IXGBE_MAX_TXD);
+ new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
+
+ new_rx_count = clamp_t(u32, ring->rx_pending,
+ IXGBE_MIN_RXD, IXGBE_MAX_RXD);
+ new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ /* if nothing to do return success */
+ if ((new_tx_count == adapter->tx_ring_count) &&
+ (new_rx_count == adapter->rx_ring_count))
+ return 0;
+
+ while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ if (!netif_running(adapter->netdev)) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_ring[i]->count = new_tx_count;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i]->count = new_rx_count;
+ adapter->tx_ring_count = new_tx_count;
+ adapter->rx_ring_count = new_rx_count;
+ goto clear_reset;
+ }
+
+ /* alloc updated Tx resources */
+ if (new_tx_count != adapter->tx_ring_count) {
+ tx_ring = vmalloc(adapter->num_tx_queues * sizeof(*tx_ring));
+ if (!tx_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
+ }
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ /* clone ring and setup updated count */
+ tx_ring[i] = *adapter->tx_ring[i];
+ tx_ring[i].count = new_tx_count;
+ err = ixgbe_setup_tx_resources(&tx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbe_free_tx_resources(&tx_ring[i]);
+ }
+
+ vfree(tx_ring);
+ tx_ring = NULL;
+
+ goto clear_reset;
+ }
+ }
+ }
+
+ /* alloc updated Rx resources */
+ if (new_rx_count != adapter->rx_ring_count) {
+ rx_ring = vmalloc(adapter->num_rx_queues * sizeof(*rx_ring));
+ if (!rx_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
+ }
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ /* clone ring and setup updated count */
+ rx_ring[i] = *adapter->rx_ring[i];
+ rx_ring[i].count = new_rx_count;
+ err = ixgbe_setup_rx_resources(&rx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbe_free_rx_resources(&rx_ring[i]);
+ }
+
+ vfree(rx_ring);
+ rx_ring = NULL;
+
+ goto clear_reset;
+ }
+ }
+ }
+
+ /* bring interface down to prepare for update */
+ ixgbe_down(adapter);
+
+ /* Tx */
+ if (tx_ring) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ ixgbe_free_tx_resources(adapter->tx_ring[i]);
+ *adapter->tx_ring[i] = tx_ring[i];
+ }
+ adapter->tx_ring_count = new_tx_count;
+
+ vfree(tx_ring);
+ tx_ring = NULL;
+ }
+
+ /* Rx */
+ if (rx_ring) {
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ ixgbe_free_rx_resources(adapter->rx_ring[i]);
+ *adapter->rx_ring[i] = rx_ring[i];
+ }
+ adapter->rx_ring_count = new_rx_count;
+
+ vfree(rx_ring);
+ rx_ring = NULL;
+ }
+
+ /* restore interface using new values */
+ ixgbe_up(adapter);
+
+clear_reset:
+ /* free Tx resources if Rx error is encountered */
+ if (tx_ring) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ ixgbe_free_tx_resources(&tx_ring[i]);
+ vfree(tx_ring);
+ }
+
+ clear_bit(__IXGBE_RESETTING, &adapter->state);
+ return err;
+}
+
+#ifndef HAVE_ETHTOOL_GET_SSET_COUNT
+static int ixgbe_get_stats_count(struct net_device *netdev)
+{
+ return IXGBE_STATS_LEN;
+}
+
+#else /* HAVE_ETHTOOL_GET_SSET_COUNT */
+static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return IXGBE_TEST_LEN;
+ case ETH_SS_STATS:
+ return IXGBE_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
+static void ixgbe_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+#ifdef HAVE_NETDEV_STATS_IN_NETDEV
+ struct net_device_stats *net_stats = &netdev->stats;
+#else
+ struct net_device_stats *net_stats = &adapter->net_stats;
+#endif
+ u64 *queue_stat;
+ int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64);
+ int i, j, k;
+ char *p;
+
+ printk(KERN_DEBUG "ixgbe_stats 0\n");
+ ixgbe_update_stats(adapter);
+ printk(KERN_DEBUG "ixgbe_stats 1\n");
+
+ for (i = 0; i < IXGBE_NETDEV_STATS_LEN; i++) {
+ p = (char *)net_stats + ixgbe_gstrings_net_stats[i].stat_offset;
+ data[i] = (ixgbe_gstrings_net_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ for (j = 0; j < IXGBE_GLOBAL_STATS_LEN; j++, i++) {
+ p = (char *)adapter + ixgbe_gstrings_stats[j].stat_offset;
+ data[i] = (ixgbe_gstrings_stats[j].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ printk(KERN_DEBUG "ixgbe_stats 2\n");
+#ifdef NO_VNIC
+ for (j = 0; j < adapter->num_tx_queues; j++) {
+ queue_stat = (u64 *)&adapter->tx_ring[j]->stats;
+ for (k = 0; k < stat_count; k++)
+ data[i + k] = queue_stat[k];
+ i += k;
+ }
+ for (j = 0; j < adapter->num_rx_queues; j++) {
+ queue_stat = (u64 *)&adapter->rx_ring[j]->stats;
+ for (k = 0; k < stat_count; k++)
+ data[i + k] = queue_stat[k];
+ i += k;
+ }
+ printk(KERN_DEBUG "ixgbe_stats 3\n");
+#endif
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+ for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) {
+ data[i++] = adapter->stats.pxontxc[j];
+ data[i++] = adapter->stats.pxofftxc[j];
+ }
+ for (j = 0; j < MAX_RX_PACKET_BUFFERS; j++) {
+ data[i++] = adapter->stats.pxonrxc[j];
+ data[i++] = adapter->stats.pxoffrxc[j];
+ }
+ }
+ printk(KERN_DEBUG "ixgbe_stats 4\n");
+ stat_count = sizeof(struct vf_stats) / sizeof(u64);
+ for (j = 0; j < adapter->num_vfs; j++) {
+ queue_stat = (u64 *)&adapter->vfinfo[j].vfstats;
+ for (k = 0; k < stat_count; k++)
+ data[i + k] = queue_stat[k];
+ queue_stat = (u64 *)&adapter->vfinfo[j].saved_rst_vfstats;
+ for (k = 0; k < stat_count; k++)
+ data[i + k] += queue_stat[k];
+ i += k;
+ }
+}
+
+static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ char *p = (char *)data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *ixgbe_gstrings_test,
+ IXGBE_TEST_LEN * ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ for (i = 0; i < IXGBE_NETDEV_STATS_LEN; i++) {
+ memcpy(p, ixgbe_gstrings_net_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, ixgbe_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ sprintf(p, "tx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ sprintf(p, "rx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+ for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
+ sprintf(p, "tx_pb_%u_pxon", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_pb_%u_pxoff", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < MAX_RX_PACKET_BUFFERS; i++) {
+ sprintf(p, "rx_pb_%u_pxon", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_pb_%u_pxoff", i);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+ for (i = 0; i < adapter->num_vfs; i++) {
+ sprintf(p, "VF %d Rx Packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "VF %d Rx Bytes", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "VF %d Tx Packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "VF %d Tx Bytes", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "VF %d MC Packets", i);
+ p += ETH_GSTRING_LEN;
+ }
+ /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
+ break;
+ }
+}
+
+static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool link_up;
+ u32 link_speed = 0;
+ *data = 0;
+
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
+ if (link_up)
+ return *data;
+ else
+ *data = 1;
+ return *data;
+}
+
+/* ethtool register test data */
+struct ixgbe_reg_test {
+ u16 reg;
+ u8 array_len;
+ u8 test_type;
+ u32 mask;
+ u32 write;
+};
+
+/* In the hardware, registers are laid out either singly, in arrays
+ * spaced 0x40 bytes apart, or in contiguous tables. We assume
+ * most tests take place on arrays or single registers (handled
+ * as a single-element array) and special-case the tables.
+ * Table tests are always pattern tests.
+ *
+ * We also make provision for some required setup steps by specifying
+ * registers to be written without any read-back testing.
+ */
+
+#define PATTERN_TEST 1
+#define SET_READ_TEST 2
+#define WRITE_NO_TEST 3
+#define TABLE32_TEST 4
+#define TABLE64_TEST_LO 5
+#define TABLE64_TEST_HI 6
+
+/* default 82599 register test */
+static struct ixgbe_reg_test reg_test_82599[] = {
+ { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+ { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+ { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
+ { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
+ { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
+ { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
+ { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+ { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
+ { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
+ { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
+ { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0, 0, 0, 0 }
+};
+
+/* default 82598 register test */
+static struct ixgbe_reg_test reg_test_82598[] = {
+ { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+ { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+ { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
+ { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ /* Enable all four RX queues before testing. */
+ { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
+ /* RDH is read-only for 82598, only test RDT. */
+ { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
+ { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+ { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
+ { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
+ { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
+ { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
+ { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0, 0, 0, 0 }
+};
+
+#define REG_PATTERN_TEST(R, M, W) \
+{ \
+ u32 pat, val, before; \
+ const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
+ for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \
+ before = readl(adapter->hw.hw_addr + R); \
+ writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \
+ val = readl(adapter->hw.hw_addr + R); \
+ if (val != (_test[pat] & W & M)) { \
+ e_err(drv, "pattern test reg %04X failed: got " \
+ "0x%08X expected 0x%08X\n", \
+ R, val, (_test[pat] & W & M)); \
+ *data = R; \
+ writel(before, adapter->hw.hw_addr + R); \
+ return 1; \
+ } \
+ writel(before, adapter->hw.hw_addr + R); \
+ } \
+}
+
+#define REG_SET_AND_CHECK(R, M, W) \
+{ \
+ u32 val, before; \
+ before = readl(adapter->hw.hw_addr + R); \
+ writel((W & M), (adapter->hw.hw_addr + R)); \
+ val = readl(adapter->hw.hw_addr + R); \
+ if ((W & M) != (val & M)) { \
+ e_err(drv, "set/check reg %04X test failed: got 0x%08X " \
+ "expected 0x%08X\n", R, (val & M), (W & M)); \
+ *data = R; \
+ writel(before, (adapter->hw.hw_addr + R)); \
+ return 1; \
+ } \
+ writel(before, (adapter->hw.hw_addr + R)); \
+}
+
+static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
+{
+ struct ixgbe_reg_test *test;
+ u32 value, status_before, status_after;
+ u32 i, toggle;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ toggle = 0x7FFFF3FF;
+ test = reg_test_82598;
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ toggle = 0x7FFFF30F;
+ test = reg_test_82599;
+ break;
+ default:
+ *data = 1;
+ return 1;
+ break;
+ }
+
+ /*
+ * Because the status register is such a special case,
+ * we handle it separately from the rest of the register
+ * tests. Some bits are read-only, some toggle, and some
+ * are writeable on newer MACs.
+ */
+ status_before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS);
+ value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
+ status_after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
+ if (value != status_after) {
+ e_err(drv, "failed STATUS register test got: "
+ "0x%08X expected: 0x%08X\n", status_after, value);
+ *data = 1;
+ return 1;
+ }
+ /* restore previous status */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, status_before);
+
+ /*
+ * Perform the remainder of the register test, looping through
+ * the test table until we either fail or reach the null entry.
+ */
+ while (test->reg) {
+ for (i = 0; i < test->array_len; i++) {
+ switch (test->test_type) {
+ case PATTERN_TEST:
+ REG_PATTERN_TEST(test->reg + (i * 0x40),
+ test->mask,
+ test->write);
+ break;
+ case SET_READ_TEST:
+ REG_SET_AND_CHECK(test->reg + (i * 0x40),
+ test->mask,
+ test->write);
+ break;
+ case WRITE_NO_TEST:
+ writel(test->write,
+ (adapter->hw.hw_addr + test->reg)
+ + (i * 0x40));
+ break;
+ case TABLE32_TEST:
+ REG_PATTERN_TEST(test->reg + (i * 4),
+ test->mask,
+ test->write);
+ break;
+ case TABLE64_TEST_LO:
+ REG_PATTERN_TEST(test->reg + (i * 8),
+ test->mask,
+ test->write);
+ break;
+ case TABLE64_TEST_HI:
+ REG_PATTERN_TEST((test->reg + 4) + (i * 8),
+ test->mask,
+ test->write);
+ break;
+ }
+ }
+ test++;
+ }
+
+ *data = 0;
+ return 0;
+}
+
+static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
+{
+ if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL))
+ *data = 1;
+ else
+ *data = 0;
+ return *data;
+}
+
+static irqreturn_t ixgbe_test_intr(int irq, void *data)
+{
+ struct net_device *netdev = (struct net_device *) data;
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
+
+ return IRQ_HANDLED;
+}
+
+static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
+{
+ struct net_device *netdev = adapter->netdev;
+ u32 mask, i = 0, shared_int = true;
+ u32 irq = adapter->pdev->irq;
+
+ *data = 0;
+
+ /* Hook up test interrupt handler just for this test */
+ if (adapter->msix_entries) {
+ /* NOTE: we don't test MSI-X interrupts here, yet */
+ return 0;
+ } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
+ shared_int = false;
+ if (request_irq(irq, &ixgbe_test_intr, 0, netdev->name,
+ netdev)) {
+ *data = 1;
+ return -1;
+ }
+ } else if (!request_irq(irq, &ixgbe_test_intr, IRQF_PROBE_SHARED,
+ netdev->name, netdev)) {
+ shared_int = false;
+ } else if (request_irq(irq, &ixgbe_test_intr, IRQF_SHARED,
+ netdev->name, netdev)) {
+ *data = 1;
+ return -1;
+ }
+ e_info(hw, "testing %s interrupt\n",
+ (shared_int ? "shared" : "unshared"));
+
+ /* Disable all the interrupts */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
+ usleep_range(10000, 20000);
+
+ /* Test each interrupt */
+ for (; i < 10; i++) {
+ /* Interrupt to test */
+ mask = 1 << i;
+
+ if (!shared_int) {
+ /*
+ * Disable the interrupts to be reported in
+ * the cause register and then force the same
+ * interrupt and see if one gets posted. If
+ * an interrupt was posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
+ ~mask & 0x00007FFF);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
+ ~mask & 0x00007FFF);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
+ usleep_range(10000, 20000);
+
+ if (adapter->test_icr & mask) {
+ *data = 3;
+ break;
+ }
+ }
+
+ /*
+ * Enable the interrupt to be reported in the cause
+ * register and then force the same interrupt and see
+ * if one gets posted. If an interrupt was not posted
+ * to the bus, the test failed.
+ */
+ adapter->test_icr = 0;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
+ usleep_range(10000, 20000);
+
+ if (!(adapter->test_icr & mask)) {
+ *data = 4;
+ break;
+ }
+
+ if (!shared_int) {
+ /*
+ * Disable the other interrupts to be reported in
+ * the cause register and then force the other
+ * interrupts and see if any get posted. If
+ * an interrupt was posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
+ ~mask & 0x00007FFF);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
+ ~mask & 0x00007FFF);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
+ usleep_range(10000, 20000);
+
+ if (adapter->test_icr) {
+ *data = 5;
+ break;
+ }
+ }
+ }
+
+ /* Disable all the interrupts */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
+ usleep_range(10000, 20000);
+
+ /* Unhook test interrupt handler */
+ free_irq(irq, netdev);
+
+ return *data;
+}
+
+
+
+static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 reg_data;
+
+ /* X540 needs to set the MACC.FLU bit to force link up */
+ if (adapter->hw.mac.type == ixgbe_mac_X540) {
+ reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
+ reg_data |= IXGBE_MACC_FLU;
+ IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
+ }
+
+ /* right now we only support MAC loopback in the driver */
+ reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ /* Setup MAC loopback */
+ reg_data |= IXGBE_HLREG0_LPBK;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data);
+
+ reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
+
+ reg_data = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ reg_data &= ~IXGBE_AUTOC_LMS_MASK;
+ reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
+ IXGBE_WRITE_FLUSH(hw);
+ usleep_range(10000, 20000);
+
+ /* Disable Atlas Tx lanes; re-enabled in reset path */
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ u8 atlas;
+
+ ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
+ atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
+ ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
+
+ ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
+ atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
+ ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
+
+ ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
+ atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
+ ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
+
+ ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
+ atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
+ ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
+ }
+
+ return 0;
+}
+
+static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
+{
+ u32 reg_data;
+
+ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
+ reg_data &= ~IXGBE_HLREG0_LPBK;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
+}
+
+
+
+
+
+
+static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
+{
+
+ //*data = ixgbe_setup_desc_rings(adapter);
+ //if (*data)
+ // goto out;
+ *data = ixgbe_setup_loopback_test(adapter);
+ if (*data)
+ goto err_loopback;
+ //*data = ixgbe_run_loopback_test(adapter);
+ ixgbe_loopback_cleanup(adapter);
+
+err_loopback:
+ //ixgbe_free_desc_rings(adapter);
+//out:
+ return *data;
+
+}
+
+#ifndef HAVE_ETHTOOL_GET_SSET_COUNT
+static int ixgbe_diag_test_count(struct net_device *netdev)
+{
+ return IXGBE_TEST_LEN;
+}
+
+#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
+static void ixgbe_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ bool if_running = netif_running(netdev);
+
+ set_bit(__IXGBE_TESTING, &adapter->state);
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ /* Offline tests */
+
+ e_info(hw, "offline testing starting\n");
+
+ /* Link test performed before hardware reset so autoneg doesn't
+ * interfere with test result */
+ if (ixgbe_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ int i;
+ for (i = 0; i < adapter->num_vfs; i++) {
+ if (adapter->vfinfo[i].clear_to_send) {
+ e_warn(drv, "Please take active VFS "
+ "offline and restart the "
+ "adapter before running NIC "
+ "diagnostics\n");
+ data[0] = 1;
+ data[1] = 1;
+ data[2] = 1;
+ data[3] = 1;
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ clear_bit(__IXGBE_TESTING,
+ &adapter->state);
+ goto skip_ol_tests;
+ }
+ }
+ }
+
+ if (if_running)
+ /* indicate we're in test mode */
+ dev_close(netdev);
+ else
+ ixgbe_reset(adapter);
+
+ e_info(hw, "register testing starting\n");
+ if (ixgbe_reg_test(adapter, &data[0]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ ixgbe_reset(adapter);
+ e_info(hw, "eeprom testing starting\n");
+ if (ixgbe_eeprom_test(adapter, &data[1]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ ixgbe_reset(adapter);
+ e_info(hw, "interrupt testing starting\n");
+ if (ixgbe_intr_test(adapter, &data[2]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* If SRIOV or VMDq is enabled then skip MAC
+ * loopback diagnostic. */
+ if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
+ IXGBE_FLAG_VMDQ_ENABLED)) {
+ e_info(hw, "skip MAC loopback diagnostic in VT mode\n");
+ data[3] = 0;
+ goto skip_loopback;
+ }
+
+ ixgbe_reset(adapter);
+ e_info(hw, "loopback testing starting\n");
+ if (ixgbe_loopback_test(adapter, &data[3]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+skip_loopback:
+ ixgbe_reset(adapter);
+
+ clear_bit(__IXGBE_TESTING, &adapter->state);
+ if (if_running)
+ dev_open(netdev);
+ } else {
+ e_info(hw, "online testing starting\n");
+ /* Online tests */
+ if (ixgbe_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* Online tests aren't run; pass by default */
+ data[0] = 0;
+ data[1] = 0;
+ data[2] = 0;
+ data[3] = 0;
+
+ clear_bit(__IXGBE_TESTING, &adapter->state);
+ }
+skip_ol_tests:
+ msleep_interruptible(4 * 1000);
+}
+
+static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
+ struct ethtool_wolinfo *wol)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int retval = 1;
+ u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
+
+ /* WOL not supported except for the following */
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82599_SFP:
+ /* Only these subdevice could supports WOL */
+ switch (hw->subsystem_device_id) {
+ case IXGBE_SUBDEV_ID_82599_560FLR:
+ /* only support first port */
+ if (hw->bus.func != 0) {
+ wol->supported = 0;
+ break;
+ }
+ case IXGBE_SUBDEV_ID_82599_SFP:
+ retval = 0;
+ break;
+ default:
+ wol->supported = 0;
+ break;
+ }
+ break;
+ case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+ /* All except this subdevice support WOL */
+ if (hw->subsystem_device_id ==
+ IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) {
+ wol->supported = 0;
+ break;
+ }
+ retval = 0;
+ break;
+ case IXGBE_DEV_ID_82599_KX4:
+ retval = 0;
+ break;
+ case IXGBE_DEV_ID_X540T:
+ /* check eeprom to see if enabled wol */
+ if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
+ ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
+ (hw->bus.func == 0))) {
+ retval = 0;
+ break;
+ }
+
+ /* All others not supported */
+ wol->supported = 0;
+ break;
+ default:
+ wol->supported = 0;
+ }
+ return retval;
+}
+
+static void ixgbe_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ wol->supported = WAKE_UCAST | WAKE_MCAST |
+ WAKE_BCAST | WAKE_MAGIC;
+ wol->wolopts = 0;
+
+ if (ixgbe_wol_exclusion(adapter, wol) ||
+ !device_can_wakeup(&adapter->pdev->dev))
+ return;
+
+ if (adapter->wol & IXGBE_WUFC_EX)
+ wol->wolopts |= WAKE_UCAST;
+ if (adapter->wol & IXGBE_WUFC_MC)
+ wol->wolopts |= WAKE_MCAST;
+ if (adapter->wol & IXGBE_WUFC_BC)
+ wol->wolopts |= WAKE_BCAST;
+ if (adapter->wol & IXGBE_WUFC_MAG)
+ wol->wolopts |= WAKE_MAGIC;
+}
+
+static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
+ return -EOPNOTSUPP;
+
+ if (ixgbe_wol_exclusion(adapter, wol))
+ return wol->wolopts ? -EOPNOTSUPP : 0;
+
+ adapter->wol = 0;
+
+ if (wol->wolopts & WAKE_UCAST)
+ adapter->wol |= IXGBE_WUFC_EX;
+ if (wol->wolopts & WAKE_MCAST)
+ adapter->wol |= IXGBE_WUFC_MC;
+ if (wol->wolopts & WAKE_BCAST)
+ adapter->wol |= IXGBE_WUFC_BC;
+ if (wol->wolopts & WAKE_MAGIC)
+ adapter->wol |= IXGBE_WUFC_MAG;
+
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ return 0;
+}
+
+static int ixgbe_nway_reset(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+
+ return 0;
+}
+
+#ifdef HAVE_ETHTOOL_SET_PHYS_ID
+static int ixgbe_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ return 2;
+
+ case ETHTOOL_ID_ON:
+ hw->mac.ops.led_on(hw, IXGBE_LED_ON);
+ break;
+
+ case ETHTOOL_ID_OFF:
+ hw->mac.ops.led_off(hw, IXGBE_LED_ON);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ /* Restore LED settings */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
+ break;
+ }
+
+ return 0;
+}
+#else
+static int ixgbe_phys_id(struct net_device *netdev, u32 data)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ u32 i;
+
+ if (!data || data > 300)
+ data = 300;
+
+ for (i = 0; i < (data * 1000); i += 400) {
+ ixgbe_led_on(hw, IXGBE_LED_ON);
+ msleep_interruptible(200);
+ ixgbe_led_off(hw, IXGBE_LED_ON);
+ msleep_interruptible(200);
+ }
+
+ /* Restore LED settings */
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+
+ return 0;
+}
+#endif /* HAVE_ETHTOOL_SET_PHYS_ID */
+
+static int ixgbe_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit;
+#ifndef CONFIG_IXGBE_NAPI
+ ec->rx_max_coalesced_frames_irq = adapter->rx_work_limit;
+#endif /* CONFIG_IXGBE_NAPI */
+ /* only valid if in constant ITR mode */
+ if (adapter->rx_itr_setting <= 1)
+ ec->rx_coalesce_usecs = adapter->rx_itr_setting;
+ else
+ ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
+
+ /* if in mixed tx/rx queues per vector mode, report only rx settings */
+ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
+ return 0;
+
+ /* only valid if in constant ITR mode */
+ if (adapter->tx_itr_setting <= 1)
+ ec->tx_coalesce_usecs = adapter->tx_itr_setting;
+ else
+ ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
+
+ return 0;
+}
+
+/*
+ * this function must be called before setting the new value of
+ * rx_itr_setting
+ */
+#ifdef NO_VNIC
+static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ /* nothing to do if LRO or RSC are not enabled */
+ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ||
+ !(netdev->features & NETIF_F_LRO))
+ return false;
+
+ /* check the feature flag value and enable RSC if necessary */
+ if (adapter->rx_itr_setting == 1 ||
+ adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
+ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
+ adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
+ e_info(probe, "rx-usecs value high enough "
+ "to re-enable RSC\n");
+ return true;
+ }
+ /* if interrupt rate is too high then disable RSC */
+ } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
+ adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
+#ifdef IXGBE_NO_LRO
+ e_info(probe, "rx-usecs set too low, disabling RSC\n");
+#else
+ e_info(probe, "rx-usecs set too low, "
+ "falling back to software LRO\n");
+#endif
+ return true;
+ }
+ return false;
+}
+#endif
+
+static int ixgbe_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+#ifdef NO_VNIC
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_q_vector *q_vector;
+ int i;
+ int num_vectors;
+ u16 tx_itr_param, rx_itr_param;
+ bool need_reset = false;
+
+ /* don't accept tx specific changes if we've got mixed RxTx vectors */
+ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count
+ && ec->tx_coalesce_usecs)
+ return -EINVAL;
+
+ if (ec->tx_max_coalesced_frames_irq)
+ adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq;
+
+#ifndef CONFIG_IXGBE_NAPI
+ if (ec->rx_max_coalesced_frames_irq)
+ adapter->rx_work_limit = ec->rx_max_coalesced_frames_irq;
+
+#endif
+ if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
+ (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
+ return -EINVAL;
+
+ if (ec->rx_coalesce_usecs > 1)
+ adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
+ else
+ adapter->rx_itr_setting = ec->rx_coalesce_usecs;
+
+ if (adapter->rx_itr_setting == 1)
+ rx_itr_param = IXGBE_20K_ITR;
+ else
+ rx_itr_param = adapter->rx_itr_setting;
+
+ if (ec->tx_coalesce_usecs > 1)
+ adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
+ else
+ adapter->tx_itr_setting = ec->tx_coalesce_usecs;
+
+ if (adapter->tx_itr_setting == 1)
+ tx_itr_param = IXGBE_10K_ITR;
+ else
+ tx_itr_param = adapter->tx_itr_setting;
+
+ /* check the old value and enable RSC if necessary */
+ need_reset = ixgbe_update_rsc(adapter);
+
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ else
+ num_vectors = 1;
+
+ for (i = 0; i < num_vectors; i++) {
+ q_vector = adapter->q_vector[i];
+ q_vector->tx.work_limit = adapter->tx_work_limit;
+ q_vector->rx.work_limit = adapter->rx_work_limit;
+ if (q_vector->tx.count && !q_vector->rx.count)
+ /* tx only */
+ q_vector->itr = tx_itr_param;
+ else
+ /* rx only or mixed */
+ q_vector->itr = rx_itr_param;
+ ixgbe_write_eitr(q_vector);
+ }
+
+ /*
+ * do reset here at the end to make sure EITR==0 case is handled
+ * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
+ * also locks in RSC enable/disable which requires reset
+ */
+ if (need_reset)
+ ixgbe_do_reset(netdev);
+#endif
+ return 0;
+}
+
+#ifndef HAVE_NDO_SET_FEATURES
+static u32 ixgbe_get_rx_csum(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_ring *ring = adapter->rx_ring[0];
+ return test_bit(__IXGBE_RX_CSUM_ENABLED, &ring->state);
+}
+
+static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+ if (data)
+ set_bit(__IXGBE_RX_CSUM_ENABLED, &ring->state);
+ else
+ clear_bit(__IXGBE_RX_CSUM_ENABLED, &ring->state);
+ }
+
+ /* LRO and RSC both depend on RX checksum to function */
+ if (!data && (netdev->features & NETIF_F_LRO)) {
+ netdev->features &= ~NETIF_F_LRO;
+
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
+ adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
+ ixgbe_do_reset(netdev);
+ }
+ }
+
+ return 0;
+}
+
+static u32 ixgbe_get_tx_csum(struct net_device *netdev)
+{
+ return (netdev->features & NETIF_F_IP_CSUM) != 0;
+}
+
+static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ u32 feature_list;
+
+#ifdef NETIF_F_IPV6_CSUM
+ feature_list = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+#else
+ feature_list = NETIF_F_IP_CSUM;
+#endif
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ feature_list |= NETIF_F_SCTP_CSUM;
+ break;
+ default:
+ break;
+ }
+ if (data)
+ netdev->features |= feature_list;
+ else
+ netdev->features &= ~feature_list;
+
+ return 0;
+}
+
+#ifdef NETIF_F_TSO
+static int ixgbe_set_tso(struct net_device *netdev, u32 data)
+{
+ if (data) {
+ netdev->features |= NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+ netdev->features |= NETIF_F_TSO6;
+#endif
+ } else {
+#ifndef HAVE_NETDEV_VLAN_FEATURES
+#ifdef NETIF_F_HW_VLAN_TX
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ /* disable TSO on all VLANs if they're present */
+ if (adapter->vlgrp) {
+ int i;
+ struct net_device *v_netdev;
+ for (i = 0; i < VLAN_N_VID; i++) {
+ v_netdev =
+ vlan_group_get_device(adapter->vlgrp, i);
+ if (v_netdev) {
+ v_netdev->features &= ~NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+ v_netdev->features &= ~NETIF_F_TSO6;
+#endif
+ vlan_group_set_device(adapter->vlgrp, i,
+ v_netdev);
+ }
+ }
+ }
+#endif
+#endif /* HAVE_NETDEV_VLAN_FEATURES */
+ netdev->features &= ~NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+ netdev->features &= ~NETIF_F_TSO6;
+#endif
+ }
+ return 0;
+}
+
+#endif /* NETIF_F_TSO */
+#ifdef ETHTOOL_GFLAGS
+static int ixgbe_set_flags(struct net_device *netdev, u32 data)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ u32 supported_flags = ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN;
+ u32 changed = netdev->features ^ data;
+ bool need_reset = false;
+ int rc;
+
+#ifndef HAVE_VLAN_RX_REGISTER
+ if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
+ !(data & ETH_FLAG_RXVLAN))
+ return -EINVAL;
+
+#endif
+#ifdef NETIF_F_RXHASH
+ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED)
+ supported_flags |= ETH_FLAG_RXHASH;
+#endif
+#ifdef IXGBE_NO_LRO
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
+#endif
+ supported_flags |= ETH_FLAG_LRO;
+
+#ifdef ETHTOOL_GRXRINGS
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_X540:
+ case ixgbe_mac_82599EB:
+ supported_flags |= ETH_FLAG_NTUPLE;
+ default:
+ break;
+ }
+
+#endif
+ rc = ethtool_op_set_flags(netdev, data, supported_flags);
+ if (rc)
+ return rc;
+
+#ifndef HAVE_VLAN_RX_REGISTER
+ if (changed & ETH_FLAG_RXVLAN)
+ ixgbe_vlan_mode(netdev, netdev->features);
+
+#endif
+ /* if state changes we need to update adapter->flags and reset */
+ if (!(netdev->features & NETIF_F_LRO)) {
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
+ need_reset = true;
+ adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
+ } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
+ !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
+ if (adapter->rx_itr_setting == 1 ||
+ adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
+ adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
+ need_reset = true;
+ } else if (changed & ETH_FLAG_LRO) {
+#ifdef IXGBE_NO_LRO
+ e_info(probe, "rx-usecs set too low, "
+ "disabling RSC\n");
+#else
+ e_info(probe, "rx-usecs set too low, "
+ "falling back to software LRO\n");
+#endif
+ }
+ }
+
+#ifdef ETHTOOL_GRXRINGS
+ /*
+ * Check if Flow Director n-tuple support was enabled or disabled. If
+ * the state changed, we need to reset.
+ */
+ if (!(netdev->features & NETIF_F_NTUPLE)) {
+ if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
+ /* turn off Flow Director, set ATR and reset */
+ if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
+ !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ need_reset = true;
+ }
+ adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ } else if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
+ /* turn off ATR, enable perfect filters and reset */
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ need_reset = true;
+ }
+
+#endif /* ETHTOOL_GRXRINGS */
+ if (need_reset)
+ ixgbe_do_reset(netdev);
+
+ return 0;
+}
+
+#endif /* ETHTOOL_GFLAGS */
+#endif /* HAVE_NDO_SET_FEATURES */
+#ifdef ETHTOOL_GRXRINGS
+static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ union ixgbe_atr_input *mask = &adapter->fdir_mask;
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct hlist_node *node, *node2;
+ struct ixgbe_fdir_filter *rule = NULL;
+
+ /* report total rule count */
+ cmd->data = (1024 << adapter->fdir_pballoc) - 2;
+
+ hlist_for_each_entry_safe(rule, node, node2,
+ &adapter->fdir_filter_list, fdir_node) {
+ if (fsp->location <= rule->sw_idx)
+ break;
+ }
+
+ if (!rule || fsp->location != rule->sw_idx)
+ return -EINVAL;
+
+ /* fill out the flow spec entry */
+
+ /* set flow type field */
+ switch (rule->filter.formatted.flow_type) {
+ case IXGBE_ATR_FLOW_TYPE_TCPV4:
+ fsp->flow_type = TCP_V4_FLOW;
+ break;
+ case IXGBE_ATR_FLOW_TYPE_UDPV4:
+ fsp->flow_type = UDP_V4_FLOW;
+ break;
+ case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+ fsp->flow_type = SCTP_V4_FLOW;
+ break;
+ case IXGBE_ATR_FLOW_TYPE_IPV4:
+ fsp->flow_type = IP_USER_FLOW;
+ fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fsp->h_u.usr_ip4_spec.proto = 0;
+ fsp->m_u.usr_ip4_spec.proto = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
+ fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
+ fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
+ fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
+ fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
+ fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
+ fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
+ fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
+ fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
+ fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
+ fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
+ fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
+ fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
+ fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
+ fsp->flow_type |= FLOW_EXT;
+
+ /* record action */
+ if (rule->action == IXGBE_FDIR_DROP_QUEUE)
+ fsp->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ fsp->ring_cookie = rule->action;
+
+ return 0;
+}
+
+static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct hlist_node *node, *node2;
+ struct ixgbe_fdir_filter *rule;
+ int cnt = 0;
+
+ /* report total rule count */
+ cmd->data = (1024 << adapter->fdir_pballoc) - 2;
+
+ hlist_for_each_entry_safe(rule, node, node2,
+ &adapter->fdir_filter_list, fdir_node) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+ rule_locs[cnt] = rule->sw_idx;
+ cnt++;
+ }
+
+ cmd->rule_cnt = cnt;
+
+ return 0;
+}
+
+static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ cmd->data = 0;
+
+ /* if RSS is disabled then report no hashing */
+ if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
+ return 0;
+
+ /* Report default options for RSS on ixgbe */
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case UDP_V4_FLOW:
+ if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case IPV4_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case UDP_V6_FLOW:
+ if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IPV6_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
+ void *rule_locs)
+#else
+ u32 *rule_locs)
+#endif
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->num_rx_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = adapter->fdir_filter_count;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = ixgbe_get_ethtool_fdir_all(adapter, cmd,
+ (u32 *)rule_locs);
+ break;
+ case ETHTOOL_GRXFH:
+ ret = ixgbe_get_rss_hash_opts(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
+ struct ixgbe_fdir_filter *input,
+ u16 sw_idx)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct hlist_node *node, *node2, *parent;
+ struct ixgbe_fdir_filter *rule;
+ int err = -EINVAL;
+
+ parent = NULL;
+ rule = NULL;
+
+ hlist_for_each_entry_safe(rule, node, node2,
+ &adapter->fdir_filter_list, fdir_node) {
+ /* hash found, or no matching entry */
+ if (rule->sw_idx >= sw_idx)
+ break;
+ parent = node;
+ }
+
+ /* if there is an old rule occupying our place remove it */
+ if (rule && (rule->sw_idx == sw_idx)) {
+ if (!input || (rule->filter.formatted.bkt_hash !=
+ input->filter.formatted.bkt_hash)) {
+ err = ixgbe_fdir_erase_perfect_filter_82599(hw,
+ &rule->filter,
+ sw_idx);
+ }
+
+ hlist_del(&rule->fdir_node);
+ kfree(rule);
+ adapter->fdir_filter_count--;
+ }
+
+ /*
+ * If no input this was a delete, err should be 0 if a rule was
+ * successfully found and removed from the list else -EINVAL
+ */
+ if (!input)
+ return err;
+
+ /* initialize node and set software index */
+ INIT_HLIST_NODE(&input->fdir_node);
+
+ /* add filter to the list */
+ if (parent)
+ hlist_add_after(parent, &input->fdir_node);
+ else
+ hlist_add_head(&input->fdir_node,
+ &adapter->fdir_filter_list);
+
+ /* update counts */
+ adapter->fdir_filter_count++;
+
+ return 0;
+}
+
+static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
+ u8 *flow_type)
+{
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
+ break;
+ case UDP_V4_FLOW:
+ *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
+ break;
+ case SCTP_V4_FLOW:
+ *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
+ break;
+ case IP_USER_FLOW:
+ switch (fsp->h_u.usr_ip4_spec.proto) {
+ case IPPROTO_TCP:
+ *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
+ break;
+ case IPPROTO_UDP:
+ *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
+ break;
+ case IPPROTO_SCTP:
+ *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
+ break;
+ case 0:
+ if (!fsp->m_u.usr_ip4_spec.proto) {
+ *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
+ break;
+ }
+ default:
+ return 0;
+ }
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_fdir_filter *input;
+ union ixgbe_atr_input mask;
+ int err;
+
+ if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
+ return -EOPNOTSUPP;
+
+ /*
+ * Don't allow programming if the action is a queue greater than
+ * the number of online Rx queues.
+ */
+ if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
+ (fsp->ring_cookie >= adapter->num_rx_queues))
+ return -EINVAL;
+
+ /* Don't allow indexes to exist outside of available space */
+ if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
+ e_err(drv, "Location out of range\n");
+ return -EINVAL;
+ }
+
+ input = kzalloc(sizeof(*input), GFP_ATOMIC);
+ if (!input)
+ return -ENOMEM;
+
+ memset(&mask, 0, sizeof(union ixgbe_atr_input));
+
+ /* set SW index */
+ input->sw_idx = fsp->location;
+
+ /* record flow type */
+ if (!ixgbe_flowspec_to_flow_type(fsp,
+ &input->filter.formatted.flow_type)) {
+ e_err(drv, "Unrecognized flow type\n");
+ goto err_out;
+ }
+
+ mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
+ IXGBE_ATR_L4TYPE_MASK;
+
+ if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
+ mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
+
+ /* Copy input into formatted structures */
+ input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
+ mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
+ input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
+ mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
+ input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
+ mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
+ input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
+ mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
+
+ if (fsp->flow_type & FLOW_EXT) {
+ input->filter.formatted.vm_pool =
+ (unsigned char)ntohl(fsp->h_ext.data[1]);
+ mask.formatted.vm_pool =
+ (unsigned char)ntohl(fsp->m_ext.data[1]);
+ input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
+ mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
+ input->filter.formatted.flex_bytes =
+ fsp->h_ext.vlan_etype;
+ mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
+ }
+
+ /* determine if we need to drop or route the packet */
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
+ input->action = IXGBE_FDIR_DROP_QUEUE;
+ else
+ input->action = fsp->ring_cookie;
+
+ spin_lock(&adapter->fdir_perfect_lock);
+
+ if (hlist_empty(&adapter->fdir_filter_list)) {
+ /* save mask and program input mask into HW */
+ memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
+ err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
+ if (err) {
+ e_err(drv, "Error writing mask\n");
+ goto err_out_w_lock;
+ }
+ } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
+ e_err(drv, "Only one mask supported per port\n");
+ goto err_out_w_lock;
+ }
+
+ /* apply mask and compute/store hash */
+ ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
+
+ /* program filters to filter memory */
+ err = ixgbe_fdir_write_perfect_filter_82599(hw,
+ &input->filter, input->sw_idx,
+ (input->action == IXGBE_FDIR_DROP_QUEUE) ?
+ IXGBE_FDIR_DROP_QUEUE :
+ adapter->rx_ring[input->action]->reg_idx);
+ if (err)
+ goto err_out_w_lock;
+
+ ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
+
+ spin_unlock(&adapter->fdir_perfect_lock);
+
+ kfree(input);
+ return err;
+err_out_w_lock:
+ spin_unlock(&adapter->fdir_perfect_lock);
+err_out:
+ kfree(input);
+ return -EINVAL;
+}
+
+static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ int err;
+
+ spin_lock(&adapter->fdir_perfect_lock);
+ err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, (u16)(fsp->location));
+ spin_unlock(&adapter->fdir_perfect_lock);
+
+ return err;
+}
+
+#ifdef ETHTOOL_SRXNTUPLE
+/*
+ * We need to keep this around for kernels 2.6.33 - 2.6.39 in order to avoid
+ * a null pointer dereference as it was assumend if the NETIF_F_NTUPLE flag
+ * was defined that this function was present.
+ */
+static int ixgbe_set_rx_ntuple(struct net_device *dev,
+ struct ethtool_rx_ntuple *cmd)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif
+#define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
+ IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
+static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
+ struct ethtool_rxnfc *nfc)
+{
+ u32 flags2 = adapter->flags2;
+
+ /*
+ * RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ !(nfc->data & RXH_L4_B_0_1) ||
+ !(nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ case UDP_V4_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ (nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* if we changed something we need to update flags */
+ if (flags2 != adapter->flags2) {
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+
+ if ((flags2 & UDP_RSS_FLAGS) &&
+ !(adapter->flags2 & UDP_RSS_FLAGS))
+ e_warn(drv, "enabling UDP RSS: fragmented packets"
+ " may arrive out of order to the stack above\n");
+
+ adapter->flags2 = flags2;
+
+ /* Perform hash on these packet types */
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
+ | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
+ | IXGBE_MRQC_RSS_FIELD_IPV6
+ | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
+
+ mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
+ IXGBE_MRQC_RSS_FIELD_IPV6_UDP);
+
+ if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
+
+ if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+ }
+
+ return 0;
+}
+
+static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
+ break;
+ case ETHTOOL_SRXFH:
+ ret = ixgbe_set_rss_hash_opt(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+#endif /* ETHTOOL_GRXRINGS */
+//static
+struct ethtool_ops ixgbe_ethtool_ops = {
+ .get_settings = ixgbe_get_settings,
+ .set_settings = ixgbe_set_settings,
+ .get_drvinfo = ixgbe_get_drvinfo,
+ .get_regs_len = ixgbe_get_regs_len,
+ .get_regs = ixgbe_get_regs,
+ .get_wol = ixgbe_get_wol,
+ .set_wol = ixgbe_set_wol,
+ .nway_reset = ixgbe_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = ixgbe_get_eeprom_len,
+ .get_eeprom = ixgbe_get_eeprom,
+ .set_eeprom = ixgbe_set_eeprom,
+ .get_ringparam = ixgbe_get_ringparam,
+ .set_ringparam = ixgbe_set_ringparam,
+ .get_pauseparam = ixgbe_get_pauseparam,
+ .set_pauseparam = ixgbe_set_pauseparam,
+ .get_msglevel = ixgbe_get_msglevel,
+ .set_msglevel = ixgbe_set_msglevel,
+#ifndef HAVE_ETHTOOL_GET_SSET_COUNT
+ .self_test_count = ixgbe_diag_test_count,
+#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
+ .self_test = ixgbe_diag_test,
+ .get_strings = ixgbe_get_strings,
+#ifdef HAVE_ETHTOOL_SET_PHYS_ID
+ .set_phys_id = ixgbe_set_phys_id,
+#else
+ .phys_id = ixgbe_phys_id,
+#endif /* HAVE_ETHTOOL_SET_PHYS_ID */
+#ifndef HAVE_ETHTOOL_GET_SSET_COUNT
+ .get_stats_count = ixgbe_get_stats_count,
+#else /* HAVE_ETHTOOL_GET_SSET_COUNT */
+ .get_sset_count = ixgbe_get_sset_count,
+#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
+ .get_ethtool_stats = ixgbe_get_ethtool_stats,
+#ifdef HAVE_ETHTOOL_GET_PERM_ADDR
+ .get_perm_addr = ethtool_op_get_perm_addr,
+#endif
+ .get_coalesce = ixgbe_get_coalesce,
+ .set_coalesce = ixgbe_set_coalesce,
+#ifndef HAVE_NDO_SET_FEATURES
+ .get_rx_csum = ixgbe_get_rx_csum,
+ .set_rx_csum = ixgbe_set_rx_csum,
+ .get_tx_csum = ixgbe_get_tx_csum,
+ .set_tx_csum = ixgbe_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+#ifdef NETIF_F_TSO
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = ixgbe_set_tso,
+#endif
+#ifdef ETHTOOL_GFLAGS
+ .get_flags = ethtool_op_get_flags,
+ .set_flags = ixgbe_set_flags,
+#endif
+#endif /* HAVE_NDO_SET_FEATURES */
+#ifdef ETHTOOL_GRXRINGS
+ .get_rxnfc = ixgbe_get_rxnfc,
+ .set_rxnfc = ixgbe_set_rxnfc,
+#ifdef ETHTOOL_SRXNTUPLE
+ .set_rx_ntuple = ixgbe_set_rx_ntuple,
+#endif
+#endif
+};
+
+void ixgbe_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops);
+}
+#endif /* SIOCETHTOOL */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_fcoe.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_fcoe.h
new file mode 100755
index 00000000..cad28622
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_fcoe.h
@@ -0,0 +1,91 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2012 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_FCOE_H
+#define _IXGBE_FCOE_H
+
+#ifdef IXGBE_FCOE
+
+#include <scsi/fc/fc_fs.h>
+#include <scsi/fc/fc_fcoe.h>
+
+/* shift bits within STAT fo FCSTAT */
+#define IXGBE_RXDADV_FCSTAT_SHIFT 4
+
+/* ddp user buffer */
+#define IXGBE_BUFFCNT_MAX 256 /* 8 bits bufcnt */
+#define IXGBE_FCPTR_ALIGN 16
+#define IXGBE_FCPTR_MAX (IXGBE_BUFFCNT_MAX * sizeof(dma_addr_t))
+#define IXGBE_FCBUFF_4KB 0x0
+#define IXGBE_FCBUFF_8KB 0x1
+#define IXGBE_FCBUFF_16KB 0x2
+#define IXGBE_FCBUFF_64KB 0x3
+#define IXGBE_FCBUFF_MAX 65536 /* 64KB max */
+#define IXGBE_FCBUFF_MIN 4096 /* 4KB min */
+#define IXGBE_FCOE_DDP_MAX 512 /* 9 bits xid */
+
+/* Default traffic class to use for FCoE */
+#define IXGBE_FCOE_DEFTC 3
+
+/* fcerr */
+#define IXGBE_FCERR_BADCRC 0x00100000
+#define IXGBE_FCERR_EOFSOF 0x00200000
+#define IXGBE_FCERR_NOFIRST 0x00300000
+#define IXGBE_FCERR_OOOSEQ 0x00400000
+#define IXGBE_FCERR_NODMA 0x00500000
+#define IXGBE_FCERR_PKTLOST 0x00600000
+
+/* FCoE DDP for target mode */
+#define __IXGBE_FCOE_TARGET 1
+
+struct ixgbe_fcoe_ddp {
+ int len;
+ u32 err;
+ unsigned int sgc;
+ struct scatterlist *sgl;
+ dma_addr_t udp;
+ u64 *udl;
+ struct pci_pool *pool;
+};
+
+struct ixgbe_fcoe {
+ struct pci_pool **pool;
+ atomic_t refcnt;
+ spinlock_t lock;
+ struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
+ unsigned char *extra_ddp_buffer;
+ dma_addr_t extra_ddp_buffer_dma;
+ u64 __percpu *pcpu_noddp;
+ u64 __percpu *pcpu_noddp_ext_buff;
+ unsigned long mode;
+ u8 tc;
+ u8 up;
+ u8 up_set;
+};
+#endif /* IXGBE_FCOE */
+
+#endif /* _IXGBE_FCOE_H */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c
new file mode 100755
index 00000000..cb569068
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c
@@ -0,0 +1,2975 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2012 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/******************************************************************************
+ Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
+******************************************************************************/
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/highmem.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#ifdef HAVE_SCTP
+#include <linux/sctp.h>
+#endif
+#include <linux/pkt_sched.h>
+#include <linux/ipv6.h>
+#ifdef NETIF_F_TSO
+#include <net/checksum.h>
+#ifdef NETIF_F_TSO6
+#include <net/ip6_checksum.h>
+#endif
+#endif
+#ifdef SIOCETHTOOL
+#include <linux/ethtool.h>
+#endif
+
+#include "ixgbe.h"
+
+#undef CONFIG_DCA
+#undef CONFIG_DCA_MODULE
+
+char ixgbe_driver_name[] = "ixgbe";
+static const char ixgbe_driver_string[] =
+ "Intel(R) 10 Gigabit PCI Express Network Driver";
+#define DRV_HW_PERF
+
+#ifndef CONFIG_IXGBE_NAPI
+#define DRIVERNAPI
+#else
+#define DRIVERNAPI "-NAPI"
+#endif
+
+#define FPGA
+
+#define VMDQ_TAG
+
+#define MAJ 3
+#define MIN 9
+#define BUILD 17
+#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
+ __stringify(BUILD) DRIVERNAPI DRV_HW_PERF FPGA VMDQ_TAG
+const char ixgbe_driver_version[] = DRV_VERSION;
+static const char ixgbe_copyright[] =
+ "Copyright (c) 1999-2012 Intel Corporation.";
+
+/* ixgbe_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598)},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT)},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT)},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT)},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2)},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4)},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT)},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT)},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM)},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR)},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM)},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX)},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4)},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM)},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR)},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP)},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM)},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ)},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4)},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE)},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE)},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM)},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE)},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T)},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2)},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS)},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP)},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP)},
+ /* required last entry */
+ {0, }
+};
+
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
+ void *p);
+static struct notifier_block dca_notifier = {
+ .notifier_call = ixgbe_notify_dca,
+ .next = NULL,
+ .priority = 0
+};
+
+#endif
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+
+
+static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
+{
+ u32 ctrl_ext;
+
+ /* Let firmware take over control of h/w */
+ ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
+ ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
+}
+
+#ifdef NO_VNIC
+static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
+{
+ u32 ctrl_ext;
+
+ /* Let firmware know the driver has taken over */
+ ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
+ ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
+}
+#endif
+
+
+static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_hw_stats *hwstats = &adapter->stats;
+ int i;
+ u32 data;
+
+ if ((hw->fc.current_mode != ixgbe_fc_full) &&
+ (hw->fc.current_mode != ixgbe_fc_rx_pause))
+ return;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+ break;
+ default:
+ data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+ }
+ hwstats->lxoffrxc += data;
+
+ /* refill credits (no tx hang) if we received xoff */
+ if (!data)
+ return;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ clear_bit(__IXGBE_HANG_CHECK_ARMED,
+ &adapter->tx_ring[i]->state);
+}
+
+static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_hw_stats *hwstats = &adapter->stats;
+ u32 xoff[8] = {0};
+ int i;
+ bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
+
+#ifdef HAVE_DCBNL_IEEE
+ if (adapter->ixgbe_ieee_pfc)
+ pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
+
+#endif
+ if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
+ ixgbe_update_xoff_rx_lfc(adapter);
+ return;
+ }
+
+ /* update stats for each tc, only valid with PFC enabled */
+ for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+ break;
+ default:
+ xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
+ }
+ hwstats->pxoffrxc[i] += xoff[i];
+ }
+
+ /* disarm tx queues that have received xoff frames */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+ u8 tc = tx_ring->dcb_tc;
+
+ if ((tc <= 7) && (xoff[tc]))
+ clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
+ }
+}
+
+
+
+
+#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
+
+
+
+
+#ifdef HAVE_8021P_SUPPORT
+/**
+ * ixgbe_vlan_stripping_disable - helper to disable vlan tag stripping
+ * @adapter: driver data
+ */
+void ixgbe_vlan_stripping_disable(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vlnctrl;
+ int i;
+
+ /* leave vlan tag stripping enabled for DCB */
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
+ return;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlnctrl &= ~IXGBE_VLNCTRL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ u8 reg_idx = adapter->rx_ring[i]->reg_idx;
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ vlnctrl &= ~IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), vlnctrl);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+#endif
+/**
+ * ixgbe_vlan_stripping_enable - helper to enable vlan tag stripping
+ * @adapter: driver data
+ */
+void ixgbe_vlan_stripping_enable(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vlnctrl;
+ int i;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlnctrl |= IXGBE_VLNCTRL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ u8 reg_idx = adapter->rx_ring[i]->reg_idx;
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ vlnctrl |= IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), vlnctrl);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+#ifdef HAVE_VLAN_RX_REGISTER
+void ixgbe_vlan_mode(struct net_device *netdev, struct vlan_group *grp)
+#else
+void ixgbe_vlan_mode(struct net_device *netdev, u32 features)
+#endif
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+#ifdef HAVE_8021P_SUPPORT
+ bool enable;
+#endif
+#ifdef HAVE_VLAN_RX_REGISTER
+
+ //if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ // ixgbe_irq_disable(adapter);
+
+ adapter->vlgrp = grp;
+
+ //if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ // ixgbe_irq_enable(adapter, true, true);
+#endif
+#ifdef HAVE_8021P_SUPPORT
+#ifdef HAVE_VLAN_RX_REGISTER
+ enable = (grp || (adapter->flags & IXGBE_FLAG_DCB_ENABLED));
+#else
+ enable = !!(features & NETIF_F_HW_VLAN_RX);
+#endif
+ if (enable)
+ /* enable VLAN tag insert/strip */
+ ixgbe_vlan_stripping_enable(adapter);
+ else
+ /* disable VLAN tag insert/strip */
+ ixgbe_vlan_stripping_disable(adapter);
+
+#endif
+}
+
+static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
+{
+#ifdef NETDEV_HW_ADDR_T_MULTICAST
+ struct netdev_hw_addr *mc_ptr;
+#else
+ struct dev_mc_list *mc_ptr;
+#endif
+ struct ixgbe_adapter *adapter = hw->back;
+ u8 *addr = *mc_addr_ptr;
+
+ *vmdq = adapter->num_vfs;
+
+#ifdef NETDEV_HW_ADDR_T_MULTICAST
+ mc_ptr = container_of(addr, struct netdev_hw_addr, addr[0]);
+ if (mc_ptr->list.next) {
+ struct netdev_hw_addr *ha;
+
+ ha = list_entry(mc_ptr->list.next, struct netdev_hw_addr, list);
+ *mc_addr_ptr = ha->addr;
+ }
+#else
+ mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
+ if (mc_ptr->next)
+ *mc_addr_ptr = mc_ptr->next->dmi_addr;
+#endif
+ else
+ *mc_addr_ptr = NULL;
+
+ return addr;
+}
+
+/**
+ * ixgbe_write_mc_addr_list - write multicast addresses to MTA
+ * @netdev: network interface device structure
+ *
+ * Writes multicast address list to the MTA hash table.
+ * Returns: -ENOMEM on failure
+ * 0 on no addresses written
+ * X on writing X addresses to MTA
+ **/
+int ixgbe_write_mc_addr_list(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+#ifdef NETDEV_HW_ADDR_T_MULTICAST
+ struct netdev_hw_addr *ha;
+#endif
+ u8 *addr_list = NULL;
+ int addr_count = 0;
+
+ if (!hw->mac.ops.update_mc_addr_list)
+ return -ENOMEM;
+
+ if (!netif_running(netdev))
+ return 0;
+
+
+ hw->mac.ops.update_mc_addr_list(hw, NULL, 0,
+ ixgbe_addr_list_itr, true);
+
+ if (!netdev_mc_empty(netdev)) {
+#ifdef NETDEV_HW_ADDR_T_MULTICAST
+ ha = list_first_entry(&netdev->mc.list,
+ struct netdev_hw_addr, list);
+ addr_list = ha->addr;
+#else
+ addr_list = netdev->mc_list->dmi_addr;
+#endif
+ addr_count = netdev_mc_count(netdev);
+
+ hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
+ ixgbe_addr_list_itr, false);
+ }
+
+#ifdef CONFIG_PCI_IOV
+ //ixgbe_restore_vf_multicasts(adapter);
+#endif
+ return addr_count;
+}
+
+
+void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE) {
+ hw->mac.ops.set_rar(hw, i, adapter->mac_table[i].addr,
+ adapter->mac_table[i].queue,
+ IXGBE_RAH_AV);
+ } else {
+ hw->mac.ops.clear_rar(hw, i);
+ }
+ }
+}
+
+void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (adapter->mac_table[i].state & IXGBE_MAC_STATE_MODIFIED) {
+ if (adapter->mac_table[i].state &
+ IXGBE_MAC_STATE_IN_USE) {
+ hw->mac.ops.set_rar(hw, i,
+ adapter->mac_table[i].addr,
+ adapter->mac_table[i].queue,
+ IXGBE_RAH_AV);
+ } else {
+ hw->mac.ops.clear_rar(hw, i);
+ }
+ adapter->mac_table[i].state &=
+ ~(IXGBE_MAC_STATE_MODIFIED);
+ }
+ }
+}
+
+int ixgbe_available_rars(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, count = 0;
+
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (adapter->mac_table[i].state == 0)
+ count++;
+ }
+ return count;
+}
+
+int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+
+ if (is_zero_ether_addr(addr))
+ return 0;
+
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
+ continue;
+ adapter->mac_table[i].state |= (IXGBE_MAC_STATE_MODIFIED |
+ IXGBE_MAC_STATE_IN_USE);
+ memcpy(adapter->mac_table[i].addr, addr, ETH_ALEN);
+ adapter->mac_table[i].queue = queue;
+ ixgbe_sync_mac_table(adapter);
+ return i;
+ }
+ return -ENOMEM;
+}
+
+void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
+{
+ int i;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
+ adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ adapter->mac_table[i].queue = 0;
+ }
+ ixgbe_sync_mac_table(adapter);
+}
+
+void ixgbe_del_mac_filter_by_index(struct ixgbe_adapter *adapter, int index)
+{
+ adapter->mac_table[index].state |= IXGBE_MAC_STATE_MODIFIED;
+ adapter->mac_table[index].state &= ~IXGBE_MAC_STATE_IN_USE;
+ memset(adapter->mac_table[index].addr, 0, ETH_ALEN);
+ adapter->mac_table[index].queue = 0;
+ ixgbe_sync_mac_table(adapter);
+}
+
+int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8* addr, u16 queue)
+{
+ /* search table for addr, if found, set to 0 and sync */
+ int i;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (is_zero_ether_addr(addr))
+ return 0;
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (ether_addr_equal(addr, adapter->mac_table[i].addr) &&
+ adapter->mac_table[i].queue == queue) {
+ adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
+ adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ adapter->mac_table[i].queue = 0;
+ ixgbe_sync_mac_table(adapter);
+ return 0;
+ }
+ }
+ return -ENOMEM;
+}
+#ifdef HAVE_SET_RX_MODE
+/**
+ * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
+ * @netdev: network interface device structure
+ *
+ * Writes unicast address list to the RAR table.
+ * Returns: -ENOMEM on failure/insufficient address space
+ * 0 on no addresses written
+ * X on writing X addresses to the RAR table
+ **/
+int ixgbe_write_uc_addr_list(struct ixgbe_adapter *adapter,
+ struct net_device *netdev, unsigned int vfn)
+{
+ int count = 0;
+
+ /* return ENOMEM indicating insufficient memory for addresses */
+ if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter))
+ return -ENOMEM;
+
+ if (!netdev_uc_empty(netdev)) {
+#ifdef NETDEV_HW_ADDR_T_UNICAST
+ struct netdev_hw_addr *ha;
+#else
+ struct dev_mc_list *ha;
+#endif
+ netdev_for_each_uc_addr(ha, netdev) {
+#ifdef NETDEV_HW_ADDR_T_UNICAST
+ ixgbe_del_mac_filter(adapter, ha->addr, (u16)vfn);
+ ixgbe_add_mac_filter(adapter, ha->addr, (u16)vfn);
+#else
+ ixgbe_del_mac_filter(adapter, ha->da_addr, (u16)vfn);
+ ixgbe_add_mac_filter(adapter, ha->da_addr, (u16)vfn);
+#endif
+ count++;
+ }
+ }
+ return count;
+}
+
+#endif
+/**
+ * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_method entry point is called whenever the unicast/multicast
+ * address list or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper unicast, multicast and
+ * promiscuous mode.
+ **/
+void ixgbe_set_rx_mode(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
+ u32 vlnctrl;
+ int count;
+
+ /* Check for Promiscuous and All Multicast modes */
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+
+ /* set all bits that we expect to always be set */
+ fctrl |= IXGBE_FCTRL_BAM;
+ fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
+ fctrl |= IXGBE_FCTRL_PMCF;
+
+ /* clear the bits we are changing the status of */
+ fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+ vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
+
+ if (netdev->flags & IFF_PROMISC) {
+ hw->addr_ctrl.user_set_promisc = true;
+ fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+ vmolr |= IXGBE_VMOLR_MPE;
+ } else {
+ if (netdev->flags & IFF_ALLMULTI) {
+ fctrl |= IXGBE_FCTRL_MPE;
+ vmolr |= IXGBE_VMOLR_MPE;
+ } else {
+ /*
+ * Write addresses to the MTA, if the attempt fails
+ * then we should just turn on promiscuous mode so
+ * that we can at least receive multicast traffic
+ */
+ count = ixgbe_write_mc_addr_list(netdev);
+ if (count < 0) {
+ fctrl |= IXGBE_FCTRL_MPE;
+ vmolr |= IXGBE_VMOLR_MPE;
+ } else if (count) {
+ vmolr |= IXGBE_VMOLR_ROMPE;
+ }
+ }
+#ifdef NETIF_F_HW_VLAN_TX
+ /* enable hardware vlan filtering */
+ vlnctrl |= IXGBE_VLNCTRL_VFE;
+#endif
+ hw->addr_ctrl.user_set_promisc = false;
+#ifdef HAVE_SET_RX_MODE
+ /*
+ * Write addresses to available RAR registers, if there is not
+ * sufficient space to store all the addresses then enable
+ * unicast promiscuous mode
+ */
+ count = ixgbe_write_uc_addr_list(adapter, netdev,
+ adapter->num_vfs);
+ if (count < 0) {
+ fctrl |= IXGBE_FCTRL_UPE;
+ vmolr |= IXGBE_VMOLR_ROPE;
+ }
+#endif
+ }
+
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) &
+ ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
+ IXGBE_VMOLR_ROPE);
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr);
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+}
+
+
+
+
+
+
+
+
+/* Additional bittime to account for IXGBE framing */
+#define IXGBE_ETH_FRAMING 20
+
+/*
+ * ixgbe_hpbthresh - calculate high water mark for flow control
+ *
+ * @adapter: board private structure to calculate for
+ * @pb - packet buffer to calculate
+ */
+static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *dev = adapter->netdev;
+ int link, tc, kb, marker;
+ u32 dv_id, rx_pba;
+
+ /* Calculate max LAN frame size */
+ tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING;
+
+#ifdef IXGBE_FCOE
+ /* FCoE traffic class uses FCOE jumbo frames */
+ if (dev->features & NETIF_F_FCOE_MTU) {
+ int fcoe_pb = 0;
+
+ fcoe_pb = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
+
+ if (fcoe_pb == pb && tc < IXGBE_FCOE_JUMBO_FRAME_SIZE)
+ tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
+ }
+#endif
+
+ /* Calculate delay value for device */
+ switch (hw->mac.type) {
+ case ixgbe_mac_X540:
+ dv_id = IXGBE_DV_X540(link, tc);
+ break;
+ default:
+ dv_id = IXGBE_DV(link, tc);
+ break;
+ }
+
+ /* Loopback switch introduces additional latency */
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ dv_id += IXGBE_B2BT(tc);
+
+ /* Delay value is calculated in bit times convert to KB */
+ kb = IXGBE_BT2KB(dv_id);
+ rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10;
+
+ marker = rx_pba - kb;
+
+ /* It is possible that the packet buffer is not large enough
+ * to provide required headroom. In this case throw an error
+ * to user and a do the best we can.
+ */
+ if (marker < 0) {
+ e_warn(drv, "Packet Buffer(%i) can not provide enough"
+ "headroom to suppport flow control."
+ "Decrease MTU or number of traffic classes\n", pb);
+ marker = tc + 1;
+ }
+
+ return marker;
+}
+
+/*
+ * ixgbe_lpbthresh - calculate low water mark for for flow control
+ *
+ * @adapter: board private structure to calculate for
+ * @pb - packet buffer to calculate
+ */
+static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *dev = adapter->netdev;
+ int tc;
+ u32 dv_id;
+
+ /* Calculate max LAN frame size */
+ tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
+
+#ifdef IXGBE_FCOE
+ /* FCoE traffic class uses FCOE jumbo frames */
+ if (dev->features & NETIF_F_FCOE_MTU) {
+ int fcoe_pb = 0;
+
+ fcoe_pb = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
+
+ if (fcoe_pb == pb && tc < IXGBE_FCOE_JUMBO_FRAME_SIZE)
+ tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
+ }
+#endif
+
+ /* Calculate delay value for device */
+ switch (hw->mac.type) {
+ case ixgbe_mac_X540:
+ dv_id = IXGBE_LOW_DV_X540(tc);
+ break;
+ default:
+ dv_id = IXGBE_LOW_DV(tc);
+ break;
+ }
+
+ /* Delay value is calculated in bit times convert to KB */
+ return IXGBE_BT2KB(dv_id);
+}
+
+/*
+ * ixgbe_pbthresh_setup - calculate and setup high low water marks
+ */
+static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int num_tc = netdev_get_num_tc(adapter->netdev);
+ int i;
+
+ if (!num_tc)
+ num_tc = 1;
+ if (num_tc > IXGBE_DCB_MAX_TRAFFIC_CLASS)
+ num_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
+
+ for (i = 0; i < num_tc; i++) {
+ hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
+ hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
+
+ /* Low water marks must not be larger than high water marks */
+ if (hw->fc.low_water[i] > hw->fc.high_water[i])
+ hw->fc.low_water[i] = 0;
+ }
+
+ for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++)
+ hw->fc.high_water[i] = 0;
+}
+
+
+
+#ifdef NO_VNIC
+static void ixgbe_configure(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ ixgbe_configure_pb(adapter);
+ ixgbe_configure_dcb(adapter);
+
+ ixgbe_set_rx_mode(adapter->netdev);
+#ifdef NETIF_F_HW_VLAN_TX
+ ixgbe_restore_vlan(adapter);
+#endif
+
+#ifdef IXGBE_FCOE
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+ ixgbe_configure_fcoe(adapter);
+
+#endif /* IXGBE_FCOE */
+
+ if (adapter->hw.mac.type != ixgbe_mac_82598EB)
+ hw->mac.ops.disable_sec_rx_path(hw);
+
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+ ixgbe_init_fdir_signature_82599(&adapter->hw,
+ adapter->fdir_pballoc);
+ } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
+ ixgbe_init_fdir_perfect_82599(&adapter->hw,
+ adapter->fdir_pballoc);
+ ixgbe_fdir_filter_restore(adapter);
+ }
+
+ if (adapter->hw.mac.type != ixgbe_mac_82598EB)
+ hw->mac.ops.enable_sec_rx_path(hw);
+
+ ixgbe_configure_virtualization(adapter);
+
+ ixgbe_configure_tx(adapter);
+ ixgbe_configure_rx(adapter);
+}
+#endif
+
+static bool ixgbe_is_sfp(struct ixgbe_hw *hw)
+{
+ switch (hw->phy.type) {
+ case ixgbe_phy_sfp_avago:
+ case ixgbe_phy_sfp_ftl:
+ case ixgbe_phy_sfp_intel:
+ case ixgbe_phy_sfp_unknown:
+ case ixgbe_phy_sfp_passive_tyco:
+ case ixgbe_phy_sfp_passive_unknown:
+ case ixgbe_phy_sfp_active_unknown:
+ case ixgbe_phy_sfp_ftl_active:
+ return true;
+ case ixgbe_phy_nl:
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return true;
+ default:
+ return false;
+ }
+}
+
+
+/**
+ * ixgbe_clear_vf_stats_counters - Clear out VF stats after reset
+ * @adapter: board private structure
+ *
+ * On a reset we need to clear out the VF stats or accounting gets
+ * messed up because they're not clear on read.
+ **/
+void ixgbe_clear_vf_stats_counters(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+
+ for (i = 0; i < adapter->num_vfs; i++) {
+ adapter->vfinfo[i].last_vfstats.gprc =
+ IXGBE_READ_REG(hw, IXGBE_PVFGPRC(i));
+ adapter->vfinfo[i].saved_rst_vfstats.gprc +=
+ adapter->vfinfo[i].vfstats.gprc;
+ adapter->vfinfo[i].vfstats.gprc = 0;
+ adapter->vfinfo[i].last_vfstats.gptc =
+ IXGBE_READ_REG(hw, IXGBE_PVFGPTC(i));
+ adapter->vfinfo[i].saved_rst_vfstats.gptc +=
+ adapter->vfinfo[i].vfstats.gptc;
+ adapter->vfinfo[i].vfstats.gptc = 0;
+ adapter->vfinfo[i].last_vfstats.gorc =
+ IXGBE_READ_REG(hw, IXGBE_PVFGORC_LSB(i));
+ adapter->vfinfo[i].saved_rst_vfstats.gorc +=
+ adapter->vfinfo[i].vfstats.gorc;
+ adapter->vfinfo[i].vfstats.gorc = 0;
+ adapter->vfinfo[i].last_vfstats.gotc =
+ IXGBE_READ_REG(hw, IXGBE_PVFGOTC_LSB(i));
+ adapter->vfinfo[i].saved_rst_vfstats.gotc +=
+ adapter->vfinfo[i].vfstats.gotc;
+ adapter->vfinfo[i].vfstats.gotc = 0;
+ adapter->vfinfo[i].last_vfstats.mprc =
+ IXGBE_READ_REG(hw, IXGBE_PVFMPRC(i));
+ adapter->vfinfo[i].saved_rst_vfstats.mprc +=
+ adapter->vfinfo[i].vfstats.mprc;
+ adapter->vfinfo[i].vfstats.mprc = 0;
+ }
+}
+
+
+
+void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
+{
+#ifdef NO_VNIC
+ WARN_ON(in_interrupt());
+ /* put off any impending NetWatchDogTimeout */
+ adapter->netdev->trans_start = jiffies;
+
+ while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+ ixgbe_down(adapter);
+ /*
+ * If SR-IOV enabled then wait a bit before bringing the adapter
+ * back up to give the VFs time to respond to the reset. The
+ * two second wait is based upon the watchdog timer cycle in
+ * the VF driver.
+ */
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ msleep(2000);
+ ixgbe_up(adapter);
+ clear_bit(__IXGBE_RESETTING, &adapter->state);
+#endif
+}
+
+void ixgbe_up(struct ixgbe_adapter *adapter)
+{
+ /* hardware has been reset, we need to reload some things */
+ //ixgbe_configure(adapter);
+
+ //ixgbe_up_complete(adapter);
+}
+
+void ixgbe_reset(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ /* lock SFP init bit to prevent race conditions with the watchdog */
+ while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
+ usleep_range(1000, 2000);
+
+ /* clear all SFP and link config related flags while holding SFP_INIT */
+ adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
+ IXGBE_FLAG2_SFP_NEEDS_RESET);
+ adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
+
+ err = hw->mac.ops.init_hw(hw);
+ switch (err) {
+ case 0:
+ case IXGBE_ERR_SFP_NOT_PRESENT:
+ case IXGBE_ERR_SFP_NOT_SUPPORTED:
+ break;
+ case IXGBE_ERR_MASTER_REQUESTS_PENDING:
+ e_dev_err("master disable timed out\n");
+ break;
+ case IXGBE_ERR_EEPROM_VERSION:
+ /* We are running on a pre-production device, log a warning */
+ e_dev_warn("This device is a pre-production adapter/LOM. "
+ "Please be aware there may be issues associated "
+ "with your hardware. If you are experiencing "
+ "problems please contact your Intel or hardware "
+ "representative who provided you with this "
+ "hardware.\n");
+ break;
+ default:
+ e_dev_err("Hardware Error: %d\n", err);
+ }
+
+ clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
+
+ ixgbe_flush_sw_mac_table(adapter);
+ memcpy(&adapter->mac_table[0].addr, hw->mac.perm_addr,
+ netdev->addr_len);
+ adapter->mac_table[0].queue = adapter->num_vfs;
+ adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT |
+ IXGBE_MAC_STATE_IN_USE);
+ hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr,
+ adapter->mac_table[0].queue,
+ IXGBE_RAH_AV);
+}
+
+
+
+
+
+
+void ixgbe_down(struct ixgbe_adapter *adapter)
+{
+#ifdef NO_VNIC
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 rxctrl;
+ int i;
+
+ /* signal that we are down to the interrupt handler */
+ set_bit(__IXGBE_DOWN, &adapter->state);
+
+ /* disable receives */
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
+
+ /* disable all enabled rx queues */
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ /* this call also flushes the previous write */
+ ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
+
+ usleep_range(10000, 20000);
+
+ netif_tx_stop_all_queues(netdev);
+
+ /* call carrier off first to avoid false dev_watchdog timeouts */
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ ixgbe_irq_disable(adapter);
+
+ ixgbe_napi_disable_all(adapter);
+
+ adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT |
+ IXGBE_FLAG2_RESET_REQUESTED);
+ adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
+
+ del_timer_sync(&adapter->service_timer);
+
+ if (adapter->num_vfs) {
+ /* Clear EITR Select mapping */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
+
+ /* Mark all the VFs as inactive */
+ for (i = 0 ; i < adapter->num_vfs; i++)
+ adapter->vfinfo[i].clear_to_send = 0;
+
+ /* ping all the active vfs to let them know we are going down */
+ ixgbe_ping_all_vfs(adapter);
+
+ /* Disable all VFTE/VFRE TX/RX */
+ ixgbe_disable_tx_rx(adapter);
+ }
+
+ /* disable transmits in the hardware now that interrupts are off */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ u8 reg_idx = adapter->tx_ring[i]->reg_idx;
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
+ }
+
+ /* Disable the Tx DMA engine on 82599 and X540 */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
+ (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
+ ~IXGBE_DMATXCTL_TE));
+ break;
+ default:
+ break;
+ }
+
+#ifdef HAVE_PCI_ERS
+ if (!pci_channel_offline(adapter->pdev))
+#endif
+ ixgbe_reset(adapter);
+ /* power down the optics */
+ if ((hw->phy.multispeed_fiber) ||
+ ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+ (hw->mac.type == ixgbe_mac_82599EB)))
+ ixgbe_disable_tx_laser(hw);
+
+ ixgbe_clean_all_tx_rings(adapter);
+ ixgbe_clean_all_rx_rings(adapter);
+
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+ /* since we reset the hardware DCA settings were cleared */
+ ixgbe_setup_dca(adapter);
+#endif
+
+#endif /* NO_VNIC */
+}
+
+#ifndef NO_VNIC
+
+#undef IXGBE_FCOE
+
+/* Artificial max queue cap per traffic class in DCB mode */
+#define DCB_QUEUE_CAP 8
+
+/**
+ * ixgbe_set_dcb_queues: Allocate queues for a DCB-enabled device
+ * @adapter: board private structure to initialize
+ *
+ * When DCB (Data Center Bridging) is enabled, allocate queues for
+ * each traffic class. If multiqueue isn't available,then abort DCB
+ * initialization.
+ *
+ * This function handles all combinations of DCB, RSS, and FCoE.
+ *
+ **/
+static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
+{
+ int tcs;
+#ifdef HAVE_MQPRIO
+ int rss_i, i, offset = 0;
+ struct net_device *dev = adapter->netdev;
+
+ /* Map queue offset and counts onto allocated tx queues */
+ tcs = netdev_get_num_tc(dev);
+
+ if (!tcs)
+ return false;
+
+ rss_i = min_t(int, dev->num_tx_queues / tcs, num_online_cpus());
+
+ if (rss_i > DCB_QUEUE_CAP)
+ rss_i = DCB_QUEUE_CAP;
+
+ for (i = 0; i < tcs; i++) {
+ netdev_set_tc_queue(dev, i, rss_i, offset);
+ offset += rss_i;
+ }
+
+ adapter->num_tx_queues = rss_i * tcs;
+ adapter->num_rx_queues = rss_i * tcs;
+
+#ifdef IXGBE_FCOE
+ /* FCoE enabled queues require special configuration indexed
+ * by feature specific indices and mask. Here we map FCoE
+ * indices onto the DCB queue pairs allowing FCoE to own
+ * configuration later.
+ */
+
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+ struct ixgbe_ring_feature *f;
+ int tc;
+ u8 prio_tc[IXGBE_DCB_MAX_USER_PRIORITY] = {0};
+
+ ixgbe_dcb_unpack_map_cee(&adapter->dcb_cfg,
+ IXGBE_DCB_TX_CONFIG,
+ prio_tc);
+ tc = prio_tc[adapter->fcoe.up];
+
+ f = &adapter->ring_feature[RING_F_FCOE];
+ f->indices = min_t(int, rss_i, f->indices);
+ f->mask = rss_i * tc;
+ }
+#endif /* IXGBE_FCOE */
+#else
+ if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+ return false;
+
+ /* Enable one Queue per traffic class */
+ tcs = adapter->tc;
+ if (!tcs)
+ return false;
+
+#ifdef IXGBE_FCOE
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+ struct ixgbe_ring_feature *f;
+ int tc = netdev_get_prio_tc_map(adapter->netdev,
+ adapter->fcoe.up);
+
+ f = &adapter->ring_feature[RING_F_FCOE];
+
+ /*
+ * We have max 8 queues for FCoE, where 8 the is
+ * FCoE redirection table size. We must also share
+ * ring resources with network traffic so if FCoE TC is
+ * 4 or greater and we are in 8 TC mode we can only use
+ * 7 queues.
+ */
+ if ((tcs > 4) && (tc >= 4) && (f->indices > 7))
+ f->indices = 7;
+
+ f->indices = min_t(int, num_online_cpus(), f->indices);
+ f->mask = tcs;
+
+ adapter->num_rx_queues = f->indices + tcs;
+ adapter->num_tx_queues = f->indices + tcs;
+
+ return true;
+ }
+
+#endif /* IXGBE_FCOE */
+ adapter->num_rx_queues = tcs;
+ adapter->num_tx_queues = tcs;
+#endif /* HAVE_MQ */
+
+ return true;
+}
+
+/**
+ * ixgbe_set_vmdq_queues: Allocate queues for VMDq devices
+ * @adapter: board private structure to initialize
+ *
+ * When VMDq (Virtual Machine Devices queue) is enabled, allocate queues
+ * and VM pools where appropriate. If RSS is available, then also try and
+ * enable RSS and map accordingly.
+ *
+ **/
+static bool ixgbe_set_vmdq_queues(struct ixgbe_adapter *adapter)
+{
+ int vmdq_i = adapter->ring_feature[RING_F_VMDQ].indices;
+ int vmdq_m = 0;
+ int rss_i = adapter->ring_feature[RING_F_RSS].indices;
+ unsigned long i;
+ int rss_shift;
+ bool ret = false;
+
+
+ switch (adapter->flags & (IXGBE_FLAG_RSS_ENABLED
+ | IXGBE_FLAG_DCB_ENABLED
+ | IXGBE_FLAG_VMDQ_ENABLED)) {
+
+ case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED):
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ vmdq_i = min((int)IXGBE_MAX_VMDQ_INDICES, vmdq_i);
+ if (vmdq_i > 32)
+ rss_i = 2;
+ else
+ rss_i = 4;
+ i = rss_i;
+ rss_shift = find_first_bit(&i, sizeof(i) * 8);
+ vmdq_m = ((IXGBE_MAX_VMDQ_INDICES - 1) <<
+ rss_shift) & (MAX_RX_QUEUES - 1);
+ break;
+ default:
+ break;
+ }
+ adapter->num_rx_queues = vmdq_i * rss_i;
+ adapter->num_tx_queues = min((int)MAX_TX_QUEUES, vmdq_i * rss_i);
+ ret = true;
+ break;
+
+ case (IXGBE_FLAG_VMDQ_ENABLED):
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ vmdq_m = (IXGBE_MAX_VMDQ_INDICES - 1);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ vmdq_m = (IXGBE_MAX_VMDQ_INDICES - 1) << 1;
+ break;
+ default:
+ break;
+ }
+ adapter->num_rx_queues = vmdq_i;
+ adapter->num_tx_queues = vmdq_i;
+ ret = true;
+ break;
+
+ default:
+ ret = false;
+ goto vmdq_queues_out;
+ }
+
+ if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
+ adapter->num_rx_pools = vmdq_i;
+ adapter->num_rx_queues_per_pool = adapter->num_rx_queues /
+ vmdq_i;
+ } else {
+ adapter->num_rx_pools = adapter->num_rx_queues;
+ adapter->num_rx_queues_per_pool = 1;
+ }
+ /* save the mask for later use */
+ adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
+vmdq_queues_out:
+ return ret;
+}
+
+/**
+ * ixgbe_set_rss_queues: Allocate queues for RSS
+ * @adapter: board private structure to initialize
+ *
+ * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
+ * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
+ *
+ **/
+static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_ring_feature *f;
+
+ if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) {
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ return false;
+ }
+
+ /* set mask for 16 queue limit of RSS */
+ f = &adapter->ring_feature[RING_F_RSS];
+ f->mask = 0xF;
+
+ /*
+ * Use Flow Director in addition to RSS to ensure the best
+ * distribution of flows across cores, even when an FDIR flow
+ * isn't matched.
+ */
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+ f = &adapter->ring_feature[RING_F_FDIR];
+
+ f->indices = min_t(int, num_online_cpus(), f->indices);
+ f->mask = 0;
+ }
+
+ adapter->num_rx_queues = f->indices;
+#ifdef HAVE_TX_MQ
+ adapter->num_tx_queues = f->indices;
+#endif
+
+ return true;
+}
+
+#ifdef IXGBE_FCOE
+/**
+ * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
+ * @adapter: board private structure to initialize
+ *
+ * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
+ * The ring feature mask is not used as a mask for FCoE, as it can take any 8
+ * rx queues out of the max number of rx queues, instead, it is used as the
+ * index of the first rx queue used by FCoE.
+ *
+ **/
+static bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_ring_feature *f;
+
+ if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+ return false;
+
+ ixgbe_set_rss_queues(adapter);
+
+ f = &adapter->ring_feature[RING_F_FCOE];
+ f->indices = min_t(int, num_online_cpus(), f->indices);
+
+ /* adding FCoE queues */
+ f->mask = adapter->num_rx_queues;
+ adapter->num_rx_queues += f->indices;
+ adapter->num_tx_queues += f->indices;
+
+ return true;
+}
+
+#endif /* IXGBE_FCOE */
+/*
+ * ixgbe_set_num_queues: Allocate queues for device, feature dependent
+ * @adapter: board private structure to initialize
+ *
+ * This is the top level queue allocation routine. The order here is very
+ * important, starting with the "most" number of features turned on at once,
+ * and ending with the smallest set of features. This way large combinations
+ * can be allocated if they're turned on, and smaller combinations are the
+ * fallthrough conditions.
+ *
+ **/
+static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
+{
+ /* Start with base case */
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+ adapter->num_rx_pools = adapter->num_rx_queues;
+ adapter->num_rx_queues_per_pool = 1;
+
+ if (ixgbe_set_vmdq_queues(adapter))
+ return;
+
+ if (ixgbe_set_dcb_queues(adapter))
+ return;
+
+#ifdef IXGBE_FCOE
+ if (ixgbe_set_fcoe_queues(adapter))
+ return;
+
+#endif /* IXGBE_FCOE */
+ ixgbe_set_rss_queues(adapter);
+}
+
+#endif
+
+
+/**
+ * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * ixgbe_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ int err;
+
+ /* PCI config space info */
+
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+
+ err = ixgbe_init_shared_code(hw);
+ if (err) {
+ e_err(probe, "init_shared_code failed: %d\n", err);
+ goto out;
+ }
+ adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
+ hw->mac.num_rar_entries,
+ GFP_ATOMIC);
+ /* Set capability flags */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ adapter->flags |= IXGBE_FLAG_MSI_CAPABLE |
+ IXGBE_FLAG_MSIX_CAPABLE |
+ IXGBE_FLAG_MQ_CAPABLE |
+ IXGBE_FLAG_RSS_CAPABLE;
+ adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+ adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
+#endif
+ adapter->flags &= ~IXGBE_FLAG_SRIOV_CAPABLE;
+ adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
+
+ if (hw->device_id == IXGBE_DEV_ID_82598AT)
+ adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
+
+ adapter->max_msix_q_vectors = IXGBE_MAX_MSIX_Q_VECTORS_82598;
+ break;
+ case ixgbe_mac_X540:
+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
+ case ixgbe_mac_82599EB:
+ adapter->flags |= IXGBE_FLAG_MSI_CAPABLE |
+ IXGBE_FLAG_MSIX_CAPABLE |
+ IXGBE_FLAG_MQ_CAPABLE |
+ IXGBE_FLAG_RSS_CAPABLE;
+ adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+ adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
+#endif
+ adapter->flags |= IXGBE_FLAG_SRIOV_CAPABLE;
+ adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
+#ifdef IXGBE_FCOE
+ adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
+ adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
+ adapter->ring_feature[RING_F_FCOE].indices = 0;
+#ifdef CONFIG_DCB
+ /* Default traffic class to use for FCoE */
+ adapter->fcoe.tc = IXGBE_FCOE_DEFTC;
+ adapter->fcoe.up = IXGBE_FCOE_DEFTC;
+ adapter->fcoe.up_set = IXGBE_FCOE_DEFTC;
+#endif
+#endif
+ if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
+#ifndef IXGBE_NO_SMART_SPEED
+ hw->phy.smart_speed = ixgbe_smart_speed_on;
+#else
+ hw->phy.smart_speed = ixgbe_smart_speed_off;
+#endif
+ adapter->max_msix_q_vectors = IXGBE_MAX_MSIX_Q_VECTORS_82599;
+ default:
+ break;
+ }
+
+ /* n-tuple support exists, always init our spinlock */
+ //spin_lock_init(&adapter->fdir_perfect_lock);
+
+ if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) {
+ int j;
+ struct ixgbe_dcb_tc_config *tc;
+ int dcb_i = IXGBE_DCB_MAX_TRAFFIC_CLASS;
+
+
+ adapter->dcb_cfg.num_tcs.pg_tcs = dcb_i;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = dcb_i;
+ for (j = 0; j < dcb_i; j++) {
+ tc = &adapter->dcb_cfg.tc_config[j];
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = 0;
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 100 / dcb_i;
+ tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = 0;
+ tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 100 / dcb_i;
+ tc->pfc = ixgbe_dcb_pfc_disabled;
+ if (j == 0) {
+ /* total of all TCs bandwidth needs to be 100 */
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent +=
+ 100 % dcb_i;
+ tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent +=
+ 100 % dcb_i;
+ }
+ }
+
+ /* Initialize default user to priority mapping, UPx->TC0 */
+ tc = &adapter->dcb_cfg.tc_config[0];
+ tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
+ tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
+
+ adapter->dcb_cfg.bw_percentage[IXGBE_DCB_TX_CONFIG][0] = 100;
+ adapter->dcb_cfg.bw_percentage[IXGBE_DCB_RX_CONFIG][0] = 100;
+ adapter->dcb_cfg.rx_pba_cfg = ixgbe_dcb_pba_equal;
+ adapter->dcb_cfg.pfc_mode_enable = false;
+ adapter->dcb_cfg.round_robin_enable = false;
+ adapter->dcb_set_bitmap = 0x00;
+#ifdef CONFIG_DCB
+ adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
+#endif /* CONFIG_DCB */
+
+ if (hw->mac.type == ixgbe_mac_X540) {
+ adapter->dcb_cfg.num_tcs.pg_tcs = 4;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = 4;
+ }
+ }
+#ifdef CONFIG_DCB
+ /* XXX does this need to be initialized even w/o DCB? */
+ //memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
+ // sizeof(adapter->temp_dcb_cfg));
+
+#endif
+ //if (hw->mac.type == ixgbe_mac_82599EB ||
+ // hw->mac.type == ixgbe_mac_X540)
+ // hw->mbx.ops.init_params(hw);
+
+ /* default flow control settings */
+ hw->fc.requested_mode = ixgbe_fc_full;
+ hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
+
+ adapter->last_lfc_mode = hw->fc.current_mode;
+ ixgbe_pbthresh_setup(adapter);
+ hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
+ hw->fc.send_xon = true;
+ hw->fc.disable_fc_autoneg = false;
+
+ /* set default ring sizes */
+ adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
+ adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
+
+ /* set default work limits */
+ adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
+ adapter->rx_work_limit = IXGBE_DEFAULT_RX_WORK;
+
+ set_bit(__IXGBE_DOWN, &adapter->state);
+out:
+ return err;
+}
+
+/**
+ * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @tx_ring: tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
+{
+ struct device *dev = tx_ring->dev;
+ //int orig_node = dev_to_node(dev);
+ int numa_node = -1;
+ int size;
+
+ size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
+
+ if (tx_ring->q_vector)
+ numa_node = tx_ring->q_vector->numa_node;
+
+ tx_ring->tx_buffer_info = vzalloc_node(size, numa_node);
+ if (!tx_ring->tx_buffer_info)
+ tx_ring->tx_buffer_info = vzalloc(size);
+ if (!tx_ring->tx_buffer_info)
+ goto err;
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+ //set_dev_node(dev, numa_node);
+ //tx_ring->desc = dma_alloc_coherent(dev,
+ // tx_ring->size,
+ // &tx_ring->dma,
+ // GFP_KERNEL);
+ //set_dev_node(dev, orig_node);
+ //if (!tx_ring->desc)
+ // tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+ // &tx_ring->dma, GFP_KERNEL);
+ //if (!tx_ring->desc)
+ // goto err;
+
+ return 0;
+
+err:
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+ dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
+ return -ENOMEM;
+}
+
+/**
+ * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
+ if (!err)
+ continue;
+ e_err(probe, "Allocation for Tx Queue %u failed\n", i);
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ //int orig_node = dev_to_node(dev);
+ int numa_node = -1;
+ int size;
+
+ size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
+
+ if (rx_ring->q_vector)
+ numa_node = rx_ring->q_vector->numa_node;
+
+ rx_ring->rx_buffer_info = vzalloc_node(size, numa_node);
+ if (!rx_ring->rx_buffer_info)
+ rx_ring->rx_buffer_info = vzalloc(size);
+ if (!rx_ring->rx_buffer_info)
+ goto err;
+
+ /* Round up to nearest 4K */
+ rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+#ifdef NO_VNIC
+ set_dev_node(dev, numa_node);
+ rx_ring->desc = dma_alloc_coherent(dev,
+ rx_ring->size,
+ &rx_ring->dma,
+ GFP_KERNEL);
+ set_dev_node(dev, orig_node);
+ if (!rx_ring->desc)
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+ if (!rx_ring->desc)
+ goto err;
+
+#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+ ixgbe_init_rx_page_offset(rx_ring);
+
+#endif
+
+#endif /* NO_VNIC */
+ return 0;
+err:
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
+ return -ENOMEM;
+}
+
+/**
+ * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
+ if (!err)
+ continue;
+ e_err(probe, "Allocation for Rx Queue %u failed\n", i);
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_free_tx_resources - Free Tx Resources per Queue
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
+{
+ //ixgbe_clean_tx_ring(tx_ring);
+
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+
+ /* if not set, then don't free */
+ if (!tx_ring->desc)
+ return;
+
+ //dma_free_coherent(tx_ring->dev, tx_ring->size,
+ // tx_ring->desc, tx_ring->dma);
+
+ tx_ring->desc = NULL;
+}
+
+/**
+ * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ if (adapter->tx_ring[i]->desc)
+ ixgbe_free_tx_resources(adapter->tx_ring[i]);
+}
+
+/**
+ * ixgbe_free_rx_resources - Free Rx Resources
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
+{
+ //ixgbe_clean_rx_ring(rx_ring);
+
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+
+ /* if not set, then don't free */
+ if (!rx_ring->desc)
+ return;
+
+ //dma_free_coherent(rx_ring->dev, rx_ring->size,
+ // rx_ring->desc, rx_ring->dma);
+
+ rx_ring->desc = NULL;
+}
+
+/**
+ * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ if (adapter->rx_ring[i]->desc)
+ ixgbe_free_rx_resources(adapter->rx_ring[i]);
+}
+
+
+/**
+ * ixgbe_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+//static
+int ixgbe_open(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ /* disallow open during test */
+ if (test_bit(__IXGBE_TESTING, &adapter->state))
+ return -EBUSY;
+
+ netif_carrier_off(netdev);
+
+ /* allocate transmit descriptors */
+ err = ixgbe_setup_all_tx_resources(adapter);
+ if (err)
+ goto err_setup_tx;
+
+ /* allocate receive descriptors */
+ err = ixgbe_setup_all_rx_resources(adapter);
+ if (err)
+ goto err_setup_rx;
+
+#ifdef NO_VNIC
+ ixgbe_configure(adapter);
+
+ err = ixgbe_request_irq(adapter);
+ if (err)
+ goto err_req_irq;
+
+ ixgbe_up_complete(adapter);
+
+err_req_irq:
+#else
+ return 0;
+#endif
+err_setup_rx:
+ ixgbe_free_all_rx_resources(adapter);
+err_setup_tx:
+ ixgbe_free_all_tx_resources(adapter);
+ ixgbe_reset(adapter);
+
+ return err;
+}
+
+/**
+ * ixgbe_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+//static
+int ixgbe_close(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ //ixgbe_down(adapter);
+ //ixgbe_free_irq(adapter);
+
+ //ixgbe_fdir_filter_exit(adapter);
+
+ //ixgbe_free_all_tx_resources(adapter);
+ //ixgbe_free_all_rx_resources(adapter);
+
+ ixgbe_release_hw_control(adapter);
+
+ return 0;
+}
+
+
+
+
+
+/**
+ * ixgbe_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+//static
+struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ /* update the stats data */
+ ixgbe_update_stats(adapter);
+
+#ifdef HAVE_NETDEV_STATS_IN_NETDEV
+ /* only return the current stats */
+ return &netdev->stats;
+#else
+ /* only return the current stats */
+ return &adapter->net_stats;
+#endif /* HAVE_NETDEV_STATS_IN_NETDEV */
+}
+
+/**
+ * ixgbe_update_stats - Update the board statistics counters.
+ * @adapter: board private structure
+ **/
+void ixgbe_update_stats(struct ixgbe_adapter *adapter)
+{
+#ifdef HAVE_NETDEV_STATS_IN_NETDEV
+ struct net_device_stats *net_stats = &adapter->netdev->stats;
+#else
+ struct net_device_stats *net_stats = &adapter->net_stats;
+#endif /* HAVE_NETDEV_STATS_IN_NETDEV */
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_hw_stats *hwstats = &adapter->stats;
+ u64 total_mpc = 0;
+ u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
+ u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
+ u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
+ u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
+#ifndef IXGBE_NO_LRO
+ u32 flushed = 0, coal = 0;
+ int num_q_vectors = 1;
+#endif
+#ifdef IXGBE_FCOE
+ struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+ unsigned int cpu;
+ u64 fcoe_noddp_counts_sum = 0, fcoe_noddp_ext_buff_counts_sum = 0;
+#endif /* IXGBE_FCOE */
+
+ printk(KERN_DEBUG "ixgbe_update_stats, tx_queues=%d, rx_queues=%d\n",
+ adapter->num_tx_queues, adapter->num_rx_queues);
+
+ if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+ test_bit(__IXGBE_RESETTING, &adapter->state))
+ return;
+
+#ifndef IXGBE_NO_LRO
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+#endif
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
+ u64 rsc_count = 0;
+ u64 rsc_flush = 0;
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
+ rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
+ }
+ adapter->rsc_total_count = rsc_count;
+ adapter->rsc_total_flush = rsc_flush;
+ }
+
+#ifndef IXGBE_NO_LRO
+ for (i = 0; i < num_q_vectors; i++) {
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
+ if (!q_vector)
+ continue;
+ flushed += q_vector->lrolist.stats.flushed;
+ coal += q_vector->lrolist.stats.coal;
+ }
+ adapter->lro_stats.flushed = flushed;
+ adapter->lro_stats.coal = coal;
+
+#endif
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
+ non_eop_descs += rx_ring->rx_stats.non_eop_descs;
+ alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
+ alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
+ hw_csum_rx_error += rx_ring->rx_stats.csum_err;
+ bytes += rx_ring->stats.bytes;
+ packets += rx_ring->stats.packets;
+
+ }
+ adapter->non_eop_descs = non_eop_descs;
+ adapter->alloc_rx_page_failed = alloc_rx_page_failed;
+ adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
+ adapter->hw_csum_rx_error = hw_csum_rx_error;
+ net_stats->rx_bytes = bytes;
+ net_stats->rx_packets = packets;
+
+ bytes = 0;
+ packets = 0;
+ /* gather some stats to the adapter struct that are per queue */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+ restart_queue += tx_ring->tx_stats.restart_queue;
+ tx_busy += tx_ring->tx_stats.tx_busy;
+ bytes += tx_ring->stats.bytes;
+ packets += tx_ring->stats.packets;
+ }
+ adapter->restart_queue = restart_queue;
+ adapter->tx_busy = tx_busy;
+ net_stats->tx_bytes = bytes;
+ net_stats->tx_packets = packets;
+
+ hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
+
+ /* 8 register reads */
+ for (i = 0; i < 8; i++) {
+ /* for packet buffers not used, the register should read 0 */
+ mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
+ missed_rx += mpc;
+ hwstats->mpc[i] += mpc;
+ total_mpc += hwstats->mpc[i];
+ hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
+ hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+ hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+ hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
+ hwstats->pxonrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ hwstats->pxonrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
+ break;
+ default:
+ break;
+ }
+ }
+
+ /*16 register reads */
+ for (i = 0; i < 16; i++) {
+ hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+ hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
+ if ((hw->mac.type == ixgbe_mac_82599EB) ||
+ (hw->mac.type == ixgbe_mac_X540)) {
+ hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
+ IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */
+ hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
+ IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); /* to clear */
+ }
+ }
+
+ hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
+ /* work around hardware counting issue */
+ hwstats->gprc -= missed_rx;
+
+ ixgbe_update_xoff_received(adapter);
+
+ /* 82598 hardware only has a 32 bit counter in the high register */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+ hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
+ hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
+ hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
+ break;
+ case ixgbe_mac_X540:
+ /* OS2BMC stats are X540 only*/
+ hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
+ hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
+ hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
+ hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
+ case ixgbe_mac_82599EB:
+ for (i = 0; i < 16; i++)
+ adapter->hw_rx_no_dma_resources +=
+ IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+ hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
+ IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
+ hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
+ IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
+ hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
+ IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
+ hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
+#ifdef HAVE_TX_MQ
+ hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
+ hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+#endif /* HAVE_TX_MQ */
+#ifdef IXGBE_FCOE
+ hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
+ hwstats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
+ hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
+ hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
+ hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
+ hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
+ hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
+ /* Add up per cpu counters for total ddp aloc fail */
+ if (fcoe && fcoe->pcpu_noddp && fcoe->pcpu_noddp_ext_buff) {
+ for_each_possible_cpu(cpu) {
+ fcoe_noddp_counts_sum +=
+ *per_cpu_ptr(fcoe->pcpu_noddp, cpu);
+ fcoe_noddp_ext_buff_counts_sum +=
+ *per_cpu_ptr(fcoe->
+ pcpu_noddp_ext_buff, cpu);
+ }
+ }
+ hwstats->fcoe_noddp = fcoe_noddp_counts_sum;
+ hwstats->fcoe_noddp_ext_buff = fcoe_noddp_ext_buff_counts_sum;
+
+#endif /* IXGBE_FCOE */
+ break;
+ default:
+ break;
+ }
+ bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
+ hwstats->bprc += bprc;
+ hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ hwstats->mprc -= bprc;
+ hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
+ hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
+ hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
+ hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
+ hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
+ hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
+ hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
+ hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
+ lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
+ hwstats->lxontxc += lxon;
+ lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
+ hwstats->lxofftxc += lxoff;
+ hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
+ hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
+ /*
+ * 82598 errata - tx of flow control packets is included in tx counters
+ */
+ xon_off_tot = lxon + lxoff;
+ hwstats->gptc -= xon_off_tot;
+ hwstats->mptc -= xon_off_tot;
+ hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
+ hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
+ hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
+ hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
+ hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
+ hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
+ hwstats->ptc64 -= xon_off_tot;
+ hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
+ hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
+ hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
+ hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
+ hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
+ hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
+ /* Fill out the OS statistics structure */
+ net_stats->multicast = hwstats->mprc;
+
+ /* Rx Errors */
+ net_stats->rx_errors = hwstats->crcerrs +
+ hwstats->rlec;
+ net_stats->rx_dropped = 0;
+ net_stats->rx_length_errors = hwstats->rlec;
+ net_stats->rx_crc_errors = hwstats->crcerrs;
+ net_stats->rx_missed_errors = total_mpc;
+
+ /*
+ * VF Stats Collection - skip while resetting because these
+ * are not clear on read and otherwise you'll sometimes get
+ * crazy values.
+ */
+ if (!test_bit(__IXGBE_RESETTING, &adapter->state)) {
+ for (i = 0; i < adapter->num_vfs; i++) {
+ UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPRC(i), \
+ adapter->vfinfo[i].last_vfstats.gprc, \
+ adapter->vfinfo[i].vfstats.gprc);
+ UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPTC(i), \
+ adapter->vfinfo[i].last_vfstats.gptc, \
+ adapter->vfinfo[i].vfstats.gptc);
+ UPDATE_VF_COUNTER_36bit(IXGBE_PVFGORC_LSB(i), \
+ IXGBE_PVFGORC_MSB(i), \
+ adapter->vfinfo[i].last_vfstats.gorc, \
+ adapter->vfinfo[i].vfstats.gorc);
+ UPDATE_VF_COUNTER_36bit(IXGBE_PVFGOTC_LSB(i), \
+ IXGBE_PVFGOTC_MSB(i), \
+ adapter->vfinfo[i].last_vfstats.gotc, \
+ adapter->vfinfo[i].vfstats.gotc);
+ UPDATE_VF_COUNTER_32bit(IXGBE_PVFMPRC(i), \
+ adapter->vfinfo[i].last_vfstats.mprc, \
+ adapter->vfinfo[i].vfstats.mprc);
+ }
+ }
+}
+
+
+#ifdef NO_VNIC
+
+/**
+ * ixgbe_watchdog_update_link - update the link status
+ * @adapter - pointer to the device adapter structure
+ * @link_speed - pointer to a u32 to store the link_speed
+ **/
+static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 link_speed = adapter->link_speed;
+ bool link_up = adapter->link_up;
+ bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
+
+ if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
+ return;
+
+ if (hw->mac.ops.check_link) {
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ } else {
+ /* always assume link is up, if no check link function */
+ link_speed = IXGBE_LINK_SPEED_10GB_FULL;
+ link_up = true;
+ }
+
+#ifdef HAVE_DCBNL_IEEE
+ if (adapter->ixgbe_ieee_pfc)
+ pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
+
+#endif
+ if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) {
+ hw->mac.ops.fc_enable(hw);
+ //ixgbe_set_rx_drop_en(adapter);
+ }
+
+ if (link_up ||
+ time_after(jiffies, (adapter->link_check_timeout +
+ IXGBE_TRY_LINK_TIMEOUT))) {
+ adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
+ adapter->link_up = link_up;
+ adapter->link_speed = link_speed;
+}
+#endif
+
+
+
+#ifdef NO_VNIC
+
+/**
+ * ixgbe_service_task - manages and runs subtasks
+ * @work: pointer to work_struct containing our data
+ **/
+static void ixgbe_service_task(struct work_struct *work)
+{
+ //struct ixgbe_adapter *adapter = container_of(work,
+ // struct ixgbe_adapter,
+ // service_task);
+
+ //ixgbe_reset_subtask(adapter);
+ //ixgbe_sfp_detection_subtask(adapter);
+ //ixgbe_sfp_link_config_subtask(adapter);
+ //ixgbe_check_overtemp_subtask(adapter);
+ //ixgbe_watchdog_subtask(adapter);
+#ifdef HAVE_TX_MQ
+ //ixgbe_fdir_reinit_subtask(adapter);
+#endif
+ //ixgbe_check_hang_subtask(adapter);
+
+ //ixgbe_service_event_complete(adapter);
+}
+
+
+
+
+#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
+ IXGBE_TXD_CMD_RS)
+
+
+/**
+ * ixgbe_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int ixgbe_set_mac(struct net_device *netdev, void *p)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct sockaddr *addr = p;
+ int ret;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ ixgbe_del_mac_filter(adapter, hw->mac.addr,
+ adapter->num_vfs);
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+
+
+ /* set the correct pool for the new PF MAC address in entry 0 */
+ ret = ixgbe_add_mac_filter(adapter, hw->mac.addr,
+ adapter->num_vfs);
+ return (ret > 0 ? 0 : ret);
+}
+
+
+/**
+ * ixgbe_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ **/
+static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+#ifdef ETHTOOL_OPS_COMPAT
+ case SIOCETHTOOL:
+ return ethtool_ioctl(ifr);
+#endif
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+#endif /* NO_VNIC */
+
+
+void ixgbe_do_reset(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+ else
+ ixgbe_reset(adapter);
+}
+
+
+
+
+
+
+/**
+ * ixgbe_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in ixgbe_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * ixgbe_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+//static
+int ixgbe_kni_probe(struct pci_dev *pdev,
+ struct net_device **lad_dev)
+{
+ size_t count;
+ struct net_device *netdev;
+ struct ixgbe_adapter *adapter = NULL;
+ struct ixgbe_hw *hw = NULL;
+ static int cards_found;
+ int i, err;
+ u16 offset;
+ u16 eeprom_verh, eeprom_verl, eeprom_cfg_blkh, eeprom_cfg_blkl;
+ u32 etrack_id;
+ u16 build, major, patch;
+ char *info_string, *i_s_var;
+ u8 part_str[IXGBE_PBANUM_LENGTH];
+ enum ixgbe_mac_type mac_type = ixgbe_mac_unknown;
+#ifdef HAVE_TX_MQ
+ unsigned int indices = num_possible_cpus();
+#endif /* HAVE_TX_MQ */
+#ifdef IXGBE_FCOE
+ u16 device_caps;
+#endif
+ u16 wol_cap;
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+
+#ifdef NO_VNIC
+ err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
+ IORESOURCE_MEM), ixgbe_driver_name);
+ if (err) {
+ dev_err(pci_dev_to_dev(pdev),
+ "pci_request_selected_regions failed 0x%x\n", err);
+ goto err_pci_reg;
+ }
+#endif
+
+ /*
+ * The mac_type is needed before we have the adapter is set up
+ * so rather than maintain two devID -> MAC tables we dummy up
+ * an ixgbe_hw stuct and use ixgbe_set_mac_type.
+ */
+ hw = vmalloc(sizeof(struct ixgbe_hw));
+ if (!hw) {
+ pr_info("Unable to allocate memory for early mac "
+ "check\n");
+ } else {
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ ixgbe_set_mac_type(hw);
+ mac_type = hw->mac.type;
+ vfree(hw);
+ }
+
+#ifdef NO_VNIC
+ /*
+ * Workaround of Silicon errata on 82598. Disable LOs in the PCI switch
+ * port to which the 82598 is connected to prevent duplicate
+ * completions caused by LOs. We need the mac type so that we only
+ * do this on 82598 devices, ixgbe_set_mac_type does this for us if
+ * we set it's device ID.
+ */
+ if (mac_type == ixgbe_mac_82598EB)
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
+
+ pci_enable_pcie_error_reporting(pdev);
+
+ pci_set_master(pdev);
+#endif
+
+#ifdef HAVE_TX_MQ
+#ifdef CONFIG_DCB
+#ifdef HAVE_MQPRIO
+ indices *= IXGBE_DCB_MAX_TRAFFIC_CLASS;
+#else
+ indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES);
+#endif /* HAVE_MQPRIO */
+#endif /* CONFIG_DCB */
+
+ if (mac_type == ixgbe_mac_82598EB)
+ indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
+ else
+ indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
+
+#ifdef IXGBE_FCOE
+ indices += min_t(unsigned int, num_possible_cpus(),
+ IXGBE_MAX_FCOE_INDICES);
+#endif
+ netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
+#else /* HAVE_TX_MQ */
+ netdev = alloc_etherdev(sizeof(struct ixgbe_adapter));
+#endif /* HAVE_TX_MQ */
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_alloc_etherdev;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter = netdev_priv(netdev);
+ //pci_set_drvdata(pdev, adapter);
+
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ hw = &adapter->hw;
+ hw->back = adapter;
+ adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+
+#ifdef HAVE_PCI_ERS
+ /*
+ * call save state here in standalone driver because it relies on
+ * adapter struct to exist, and needs to call netdev_priv
+ */
+ pci_save_state(pdev);
+
+#endif
+ hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!hw->hw_addr) {
+ err = -EIO;
+ goto err_ioremap;
+ }
+ //ixgbe_assign_netdev_ops(netdev);
+ ixgbe_set_ethtool_ops(netdev);
+
+ strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
+
+ adapter->bd_number = cards_found;
+
+ /* setup the private structure */
+ err = ixgbe_sw_init(adapter);
+ if (err)
+ goto err_sw_init;
+
+ /* Make it possible the adapter to be woken up via WOL */
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * check_options must be called before setup_link to set up
+ * hw->fc completely
+ */
+ //ixgbe_check_options(adapter);
+
+#ifndef NO_VNIC
+ /* reset_hw fills in the perm_addr as well */
+ hw->phy.reset_if_overtemp = true;
+ err = hw->mac.ops.reset_hw(hw);
+ hw->phy.reset_if_overtemp = false;
+ if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
+ hw->mac.type == ixgbe_mac_82598EB) {
+ err = 0;
+ } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+ e_dev_err("failed to load because an unsupported SFP+ "
+ "module type was detected.\n");
+ e_dev_err("Reload the driver after installing a supported "
+ "module.\n");
+ goto err_sw_init;
+ } else if (err) {
+ e_dev_err("HW Init failed: %d\n", err);
+ goto err_sw_init;
+ }
+#endif
+
+ //if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ // ixgbe_probe_vf(adapter);
+
+
+#ifdef MAX_SKB_FRAGS
+ netdev->features |= NETIF_F_SG |
+ NETIF_F_IP_CSUM;
+
+#ifdef NETIF_F_IPV6_CSUM
+ netdev->features |= NETIF_F_IPV6_CSUM;
+#endif
+
+#ifdef NETIF_F_HW_VLAN_TX
+ netdev->features |= NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX;
+#endif
+#ifdef NETIF_F_TSO
+ netdev->features |= NETIF_F_TSO;
+#endif /* NETIF_F_TSO */
+#ifdef NETIF_F_TSO6
+ netdev->features |= NETIF_F_TSO6;
+#endif /* NETIF_F_TSO6 */
+#ifdef NETIF_F_RXHASH
+ netdev->features |= NETIF_F_RXHASH;
+#endif /* NETIF_F_RXHASH */
+
+#ifdef HAVE_NDO_SET_FEATURES
+ netdev->features |= NETIF_F_RXCSUM;
+
+ /* copy netdev features into list of user selectable features */
+ netdev->hw_features |= netdev->features;
+
+ /* give us the option of enabling RSC/LRO later */
+#ifdef IXGBE_NO_LRO
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
+#endif
+ netdev->hw_features |= NETIF_F_LRO;
+
+#else
+#ifdef NETIF_F_GRO
+
+ /* this is only needed on kernels prior to 2.6.39 */
+ netdev->features |= NETIF_F_GRO;
+#endif /* NETIF_F_GRO */
+#endif
+
+#ifdef NETIF_F_HW_VLAN_TX
+ /* set this bit last since it cannot be part of hw_features */
+ netdev->features |= NETIF_F_HW_VLAN_FILTER;
+#endif
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ netdev->features |= NETIF_F_SCTP_CSUM;
+#ifdef HAVE_NDO_SET_FEATURES
+ netdev->hw_features |= NETIF_F_SCTP_CSUM |
+ NETIF_F_NTUPLE;
+#endif
+ break;
+ default:
+ break;
+ }
+
+#ifdef HAVE_NETDEV_VLAN_FEATURES
+ netdev->vlan_features |= NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6;
+
+#endif /* HAVE_NETDEV_VLAN_FEATURES */
+ /*
+ * If perfect filters were enabled in check_options(), enable them
+ * on the netdevice too.
+ */
+ if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
+ netdev->features |= NETIF_F_NTUPLE;
+ if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
+ adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
+ adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+ if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ /* clear n-tuple support in the netdev unconditionally */
+ netdev->features &= ~NETIF_F_NTUPLE;
+ }
+
+#ifdef NETIF_F_RXHASH
+ if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
+ netdev->features &= ~NETIF_F_RXHASH;
+
+#endif /* NETIF_F_RXHASH */
+ if (netdev->features & NETIF_F_LRO) {
+ if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
+ ((adapter->rx_itr_setting == 1) ||
+ (adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR))) {
+ adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
+ } else if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) {
+#ifdef IXGBE_NO_LRO
+ e_info(probe, "InterruptThrottleRate set too high, "
+ "disabling RSC\n");
+#else
+ e_info(probe, "InterruptThrottleRate set too high, "
+ "falling back to software LRO\n");
+#endif
+ }
+ }
+#ifdef CONFIG_DCB
+ //netdev->dcbnl_ops = &dcbnl_ops;
+#endif
+
+#ifdef IXGBE_FCOE
+#ifdef NETIF_F_FSO
+ if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
+ ixgbe_get_device_caps(hw, &device_caps);
+ if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) {
+ adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
+ adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
+ e_info(probe, "FCoE offload feature is not available. "
+ "Disabling FCoE offload feature\n");
+ }
+#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE
+ else {
+ adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
+ adapter->ring_feature[RING_F_FCOE].indices =
+ IXGBE_FCRETA_SIZE;
+ netdev->features |= NETIF_F_FSO |
+ NETIF_F_FCOE_CRC |
+ NETIF_F_FCOE_MTU;
+ netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
+ }
+#endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */
+#ifdef HAVE_NETDEV_VLAN_FEATURES
+ netdev->vlan_features |= NETIF_F_FSO |
+ NETIF_F_FCOE_CRC |
+ NETIF_F_FCOE_MTU;
+#endif /* HAVE_NETDEV_VLAN_FEATURES */
+ }
+#endif /* NETIF_F_FSO */
+#endif /* IXGBE_FCOE */
+
+#endif /* MAX_SKB_FRAGS */
+ /* make sure the EEPROM is good */
+ if (hw->eeprom.ops.validate_checksum &&
+ (hw->eeprom.ops.validate_checksum(hw, NULL) < 0)) {
+ e_dev_err("The EEPROM Checksum Is Not Valid\n");
+ err = -EIO;
+ goto err_sw_init;
+ }
+
+ memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
+#ifdef ETHTOOL_GPERMADDR
+ memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
+
+ if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
+ e_dev_err("invalid MAC address\n");
+ err = -EIO;
+ goto err_sw_init;
+ }
+#else
+ if (ixgbe_validate_mac_addr(netdev->dev_addr)) {
+ e_dev_err("invalid MAC address\n");
+ err = -EIO;
+ goto err_sw_init;
+ }
+#endif
+ memcpy(&adapter->mac_table[0].addr, hw->mac.perm_addr,
+ netdev->addr_len);
+ adapter->mac_table[0].queue = adapter->num_vfs;
+ adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT |
+ IXGBE_MAC_STATE_IN_USE);
+ hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr,
+ adapter->mac_table[0].queue,
+ IXGBE_RAH_AV);
+
+ //setup_timer(&adapter->service_timer, &ixgbe_service_timer,
+ // (unsigned long) adapter);
+
+ //INIT_WORK(&adapter->service_task, ixgbe_service_task);
+ //clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
+
+ //err = ixgbe_init_interrupt_scheme(adapter);
+ //if (err)
+ // goto err_sw_init;
+
+ //adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+ ixgbe_set_num_queues(adapter);
+
+ adapter->wol = 0;
+ /* WOL not supported for all but the following */
+ switch (pdev->device) {
+ case IXGBE_DEV_ID_82599_SFP:
+ /* Only these subdevice supports WOL */
+ switch (pdev->subsystem_device) {
+ case IXGBE_SUBDEV_ID_82599_560FLR:
+ /* only support first port */
+ if (hw->bus.func != 0)
+ break;
+ case IXGBE_SUBDEV_ID_82599_SFP:
+ adapter->wol = IXGBE_WUFC_MAG;
+ break;
+ }
+ break;
+ case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+ /* All except this subdevice support WOL */
+ if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
+ adapter->wol = IXGBE_WUFC_MAG;
+ break;
+ case IXGBE_DEV_ID_82599_KX4:
+ adapter->wol = IXGBE_WUFC_MAG;
+ break;
+ case IXGBE_DEV_ID_X540T:
+ /* Check eeprom to see if it is enabled */
+ ixgbe_read_eeprom(hw, 0x2c, &adapter->eeprom_cap);
+ wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
+
+ if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
+ ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
+ (hw->bus.func == 0)))
+ adapter->wol = IXGBE_WUFC_MAG;
+ break;
+ }
+ //device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+
+ /*
+ * Save off EEPROM version number and Option Rom version which
+ * together make a unique identify for the eeprom
+ */
+ ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh);
+ ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl);
+
+ etrack_id = (eeprom_verh << 16) | eeprom_verl;
+
+ ixgbe_read_eeprom(hw, 0x17, &offset);
+
+ /* Make sure offset to SCSI block is valid */
+ if (!(offset == 0x0) && !(offset == 0xffff)) {
+ ixgbe_read_eeprom(hw, offset + 0x84, &eeprom_cfg_blkh);
+ ixgbe_read_eeprom(hw, offset + 0x83, &eeprom_cfg_blkl);
+
+ /* Only display Option Rom if exist */
+ if (eeprom_cfg_blkl && eeprom_cfg_blkh) {
+ major = eeprom_cfg_blkl >> 8;
+ build = (eeprom_cfg_blkl << 8) | (eeprom_cfg_blkh >> 8);
+ patch = eeprom_cfg_blkh & 0x00ff;
+
+ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ "0x%08x, %d.%d.%d", etrack_id, major, build,
+ patch);
+ } else {
+ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ "0x%08x", etrack_id);
+ }
+ } else {
+ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ "0x%08x", etrack_id);
+ }
+
+ /* reset the hardware with the new settings */
+ err = hw->mac.ops.start_hw(hw);
+ if (err == IXGBE_ERR_EEPROM_VERSION) {
+ /* We are running on a pre-production device, log a warning */
+ e_dev_warn("This device is a pre-production adapter/LOM. "
+ "Please be aware there may be issues associated "
+ "with your hardware. If you are experiencing "
+ "problems please contact your Intel or hardware "
+ "representative who provided you with this "
+ "hardware.\n");
+ }
+ /* pick up the PCI bus settings for reporting later */
+ if (hw->mac.ops.get_bus_info)
+ hw->mac.ops.get_bus_info(hw);
+
+ strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
+ *lad_dev = netdev;
+
+ adapter->netdev_registered = true;
+#ifdef NO_VNIC
+ /* power down the optics */
+ if ((hw->phy.multispeed_fiber) ||
+ ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+ (hw->mac.type == ixgbe_mac_82599EB)))
+ ixgbe_disable_tx_laser(hw);
+
+ /* carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+ /* keep stopping all the transmit queues for older kernels */
+ netif_tx_stop_all_queues(netdev);
+#endif
+
+ /* print all messages at the end so that we use our eth%d name */
+ /* print bus type/speed/width info */
+ e_dev_info("(PCI Express:%s:%s) ",
+ (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" :
+ hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" :
+ "Unknown"),
+ (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
+ hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
+ hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
+ "Unknown"));
+
+ /* print the MAC address */
+ for (i = 0; i < 6; i++)
+ pr_cont("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
+
+ /* First try to read PBA as a string */
+ err = ixgbe_read_pba_string(hw, part_str, IXGBE_PBANUM_LENGTH);
+ if (err)
+ strlcpy(part_str, "Unknown", sizeof(part_str));
+ if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
+ e_info(probe, "MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
+ hw->mac.type, hw->phy.type, hw->phy.sfp_type, part_str);
+ else
+ e_info(probe, "MAC: %d, PHY: %d, PBA No: %s\n",
+ hw->mac.type, hw->phy.type, part_str);
+
+ if (((hw->bus.speed == ixgbe_bus_speed_2500) &&
+ (hw->bus.width <= ixgbe_bus_width_pcie_x4)) ||
+ (hw->bus.width <= ixgbe_bus_width_pcie_x2)) {
+ e_dev_warn("PCI-Express bandwidth available for this "
+ "card is not sufficient for optimal "
+ "performance.\n");
+ e_dev_warn("For optimal performance a x8 PCI-Express "
+ "slot is required.\n");
+ }
+
+#define INFO_STRING_LEN 255
+ info_string = kzalloc(INFO_STRING_LEN, GFP_KERNEL);
+ if (!info_string) {
+ e_err(probe, "allocation for info string failed\n");
+ goto no_info_string;
+ }
+ count = 0;
+ i_s_var = info_string;
+ count += snprintf(i_s_var, INFO_STRING_LEN, "Enabled Features: ");
+
+ i_s_var = info_string + count;
+ count += snprintf(i_s_var, (INFO_STRING_LEN - count),
+ "RxQ: %d TxQ: %d ", adapter->num_rx_queues,
+ adapter->num_tx_queues);
+ i_s_var = info_string + count;
+#ifdef IXGBE_FCOE
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+ count += snprintf(i_s_var, INFO_STRING_LEN - count, "FCoE ");
+ i_s_var = info_string + count;
+ }
+#endif
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+ count += snprintf(i_s_var, INFO_STRING_LEN - count,
+ "FdirHash ");
+ i_s_var = info_string + count;
+ }
+ if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
+ count += snprintf(i_s_var, INFO_STRING_LEN - count,
+ "FdirPerfect ");
+ i_s_var = info_string + count;
+ }
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+ count += snprintf(i_s_var, INFO_STRING_LEN - count, "DCB ");
+ i_s_var = info_string + count;
+ }
+ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
+ count += snprintf(i_s_var, INFO_STRING_LEN - count, "RSS ");
+ i_s_var = info_string + count;
+ }
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
+ count += snprintf(i_s_var, INFO_STRING_LEN - count, "DCA ");
+ i_s_var = info_string + count;
+ }
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
+ count += snprintf(i_s_var, INFO_STRING_LEN - count, "RSC ");
+ i_s_var = info_string + count;
+ }
+#ifndef IXGBE_NO_LRO
+ else if (netdev->features & NETIF_F_LRO) {
+ count += snprintf(i_s_var, INFO_STRING_LEN - count, "LRO ");
+ i_s_var = info_string + count;
+ }
+#endif
+
+ BUG_ON(i_s_var > (info_string + INFO_STRING_LEN));
+ /* end features printing */
+ e_info(probe, "%s\n", info_string);
+ kfree(info_string);
+no_info_string:
+
+ /* firmware requires blank driver version */
+ ixgbe_set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF);
+
+#if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN)
+ /* add san mac addr to netdev */
+ //ixgbe_add_sanmac_netdev(netdev);
+
+#endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && (NETDEV_HW_ADDR_T_SAN) */
+ e_info(probe, "Intel(R) 10 Gigabit Network Connection\n");
+ cards_found++;
+
+#ifdef IXGBE_SYSFS
+ //if (ixgbe_sysfs_init(adapter))
+ // e_err(probe, "failed to allocate sysfs resources\n");
+#else
+#ifdef IXGBE_PROCFS
+ //if (ixgbe_procfs_init(adapter))
+ // e_err(probe, "failed to allocate procfs resources\n");
+#endif /* IXGBE_PROCFS */
+#endif /* IXGBE_SYSFS */
+
+ return 0;
+
+//err_register:
+ //ixgbe_clear_interrupt_scheme(adapter);
+ //ixgbe_release_hw_control(adapter);
+err_sw_init:
+ adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
+ if (adapter->mac_table)
+ kfree(adapter->mac_table);
+ iounmap(hw->hw_addr);
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ //pci_release_selected_regions(pdev,
+ // pci_select_bars(pdev, IORESOURCE_MEM));
+//err_pci_reg:
+//err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * ixgbe_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * ixgbe_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+void ixgbe_kni_remove(struct pci_dev *pdev)
+{
+ pci_disable_device(pdev);
+}
+
+
+u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
+{
+ u16 value;
+ struct ixgbe_adapter *adapter = hw->back;
+
+ pci_read_config_word(adapter->pdev, reg, &value);
+ return value;
+}
+
+void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+
+ pci_write_config_word(adapter->pdev, reg, value);
+}
+
+void ewarn(struct ixgbe_hw *hw, const char *st, u32 status)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+
+ netif_warn(adapter, drv, adapter->netdev, "%s", st);
+}
+
+
+
+
+
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_mbx.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_mbx.h
new file mode 100755
index 00000000..124f00de
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_mbx.h
@@ -0,0 +1,105 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2012 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_MBX_H_
+#define _IXGBE_MBX_H_
+
+#include "ixgbe_type.h"
+
+#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+#define IXGBE_ERR_MBX -100
+
+#define IXGBE_VFMAILBOX 0x002FC
+#define IXGBE_VFMBMEM 0x00200
+
+/* Define mailbox register bits */
+#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
+#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */
+#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
+#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
+#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */
+#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
+#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
+
+#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
+#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
+#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
+
+#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
+#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
+#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
+#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
+
+
+/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
+ * PF. The reverse is true if it is IXGBE_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
+ * this are the ACK */
+#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
+ * this are the NACK */
+#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
+ * clear to send requests */
+#define IXGBE_VT_MSGINFO_SHIFT 16
+/* bits 23:16 are used for extra info for certain messages */
+#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+
+#define IXGBE_VF_RESET 0x01 /* VF requests reset */
+#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
+
+/* length of permanent address message returned from PF */
+#define IXGBE_VF_PERMADDR_MSG_LEN 4
+/* word in permanent address message with the current multicast type */
+#define IXGBE_VF_MC_TYPE_WORD 3
+
+#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
+
+
+#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+
+s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
+s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
+s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
+void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw);
+void ixgbe_init_mbx_params_vf(struct ixgbe_hw *);
+void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
+
+#endif /* _IXGBE_MBX_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_osdep.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_osdep.h
new file mode 100755
index 00000000..d161600b
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_osdep.h
@@ -0,0 +1,132 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2012 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/* glue for the OS independent part of ixgbe
+ * includes register access macros
+ */
+
+#ifndef _IXGBE_OSDEP_H_
+#define _IXGBE_OSDEP_H_
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/sched.h>
+#include "kcompat.h"
+
+
+#ifndef msleep
+#define msleep(x) do { if (in_interrupt()) { \
+ /* Don't mdelay in interrupt context! */ \
+ BUG(); \
+ } else { \
+ msleep(x); \
+ } } while (0)
+
+#endif
+
+#undef ASSERT
+
+#ifdef DBG
+#define hw_dbg(hw, S, A...) printk(KERN_DEBUG S, ## A)
+#else
+#define hw_dbg(hw, S, A...) do {} while (0)
+#endif
+
+#define e_dev_info(format, arg...) \
+ dev_info(pci_dev_to_dev(adapter->pdev), format, ## arg)
+#define e_dev_warn(format, arg...) \
+ dev_warn(pci_dev_to_dev(adapter->pdev), format, ## arg)
+#define e_dev_err(format, arg...) \
+ dev_err(pci_dev_to_dev(adapter->pdev), format, ## arg)
+#define e_dev_notice(format, arg...) \
+ dev_notice(pci_dev_to_dev(adapter->pdev), format, ## arg)
+#define e_info(msglvl, format, arg...) \
+ netif_info(adapter, msglvl, adapter->netdev, format, ## arg)
+#define e_err(msglvl, format, arg...) \
+ netif_err(adapter, msglvl, adapter->netdev, format, ## arg)
+#define e_warn(msglvl, format, arg...) \
+ netif_warn(adapter, msglvl, adapter->netdev, format, ## arg)
+#define e_crit(msglvl, format, arg...) \
+ netif_crit(adapter, msglvl, adapter->netdev, format, ## arg)
+
+
+#ifdef DBG
+#define IXGBE_WRITE_REG(a, reg, value) do {\
+ switch (reg) { \
+ case IXGBE_EIMS: \
+ case IXGBE_EIMC: \
+ case IXGBE_EIAM: \
+ case IXGBE_EIAC: \
+ case IXGBE_EICR: \
+ case IXGBE_EICS: \
+ printk("%s: Reg - 0x%05X, value - 0x%08X\n", __func__, \
+ reg, (u32)(value)); \
+ default: \
+ break; \
+ } \
+ writel((value), ((a)->hw_addr + (reg))); \
+} while (0)
+#else
+#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+#endif
+
+#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
+
+#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) ( \
+ writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
+
+#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \
+ readl((a)->hw_addr + (reg) + ((offset) << 2)))
+
+#ifndef writeq
+#define writeq(val, addr) do { writel((u32) (val), addr); \
+ writel((u32) (val >> 32), (addr + 4)); \
+ } while (0);
+#endif
+
+#define IXGBE_WRITE_REG64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
+
+#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
+struct ixgbe_hw;
+extern u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg);
+extern void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value);
+extern void ewarn(struct ixgbe_hw *hw, const char *str, u32 status);
+
+#define IXGBE_READ_PCIE_WORD ixgbe_read_pci_cfg_word
+#define IXGBE_WRITE_PCIE_WORD ixgbe_write_pci_cfg_word
+#define IXGBE_EEPROM_GRANT_ATTEMPS 100
+#define IXGBE_HTONL(_i) htonl(_i)
+#define IXGBE_NTOHL(_i) ntohl(_i)
+#define IXGBE_NTOHS(_i) ntohs(_i)
+#define IXGBE_CPU_TO_LE32(_i) cpu_to_le32(_i)
+#define IXGBE_LE32_TO_CPUS(_i) le32_to_cpus(_i)
+#define EWARN(H, W, S) ewarn(H, W, S)
+
+#endif /* _IXGBE_OSDEP_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.c
new file mode 100755
index 00000000..e3f5275e
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.c
@@ -0,0 +1,1847 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2012 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+static void ixgbe_i2c_start(struct ixgbe_hw *hw);
+static void ixgbe_i2c_stop(struct ixgbe_hw *hw);
+static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data);
+static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
+static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
+static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
+static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
+static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
+static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
+static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
+static bool ixgbe_get_i2c_data(u32 *i2cctl);
+
+/**
+ * ixgbe_init_phy_ops_generic - Inits PHY function ptrs
+ * @hw: pointer to the hardware structure
+ *
+ * Initialize the function pointers.
+ **/
+s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_phy_info *phy = &hw->phy;
+
+ /* PHY */
+ phy->ops.identify = &ixgbe_identify_phy_generic;
+ phy->ops.reset = &ixgbe_reset_phy_generic;
+ phy->ops.read_reg = &ixgbe_read_phy_reg_generic;
+ phy->ops.write_reg = &ixgbe_write_phy_reg_generic;
+ phy->ops.setup_link = &ixgbe_setup_phy_link_generic;
+ phy->ops.setup_link_speed = &ixgbe_setup_phy_link_speed_generic;
+ phy->ops.check_link = NULL;
+ phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_generic;
+ phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_generic;
+ phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_generic;
+ phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic;
+ phy->ops.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic;
+ phy->ops.i2c_bus_clear = &ixgbe_i2c_bus_clear;
+ phy->ops.identify_sfp = &ixgbe_identify_module_generic;
+ phy->sfp_type = ixgbe_sfp_type_unknown;
+ phy->ops.check_overtemp = &ixgbe_tn_check_overtemp;
+ return 0;
+}
+
+/**
+ * ixgbe_identify_phy_generic - Get physical layer module
+ * @hw: pointer to hardware structure
+ *
+ * Determines the physical layer module found on the current adapter.
+ **/
+s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+ u32 phy_addr;
+ u16 ext_ability = 0;
+
+ if (hw->phy.type == ixgbe_phy_unknown) {
+ for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
+ if (ixgbe_validate_phy_addr(hw, phy_addr)) {
+ hw->phy.addr = phy_addr;
+ ixgbe_get_phy_id(hw);
+ hw->phy.type =
+ ixgbe_get_phy_type_from_id(hw->phy.id);
+
+ if (hw->phy.type == ixgbe_phy_unknown) {
+ hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_PHY_EXT_ABILITY,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &ext_ability);
+ if (ext_ability &
+ (IXGBE_MDIO_PHY_10GBASET_ABILITY |
+ IXGBE_MDIO_PHY_1000BASET_ABILITY))
+ hw->phy.type =
+ ixgbe_phy_cu_unknown;
+ else
+ hw->phy.type =
+ ixgbe_phy_generic;
+ }
+
+ status = 0;
+ break;
+ }
+ }
+ /* clear value if nothing found */
+ if (status != 0)
+ hw->phy.addr = 0;
+ } else {
+ status = 0;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_validate_phy_addr - Determines phy address is valid
+ * @hw: pointer to hardware structure
+ *
+ **/
+bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr)
+{
+ u16 phy_id = 0;
+ bool valid = false;
+
+ hw->phy.addr = phy_addr;
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id);
+
+ if (phy_id != 0xFFFF && phy_id != 0x0)
+ valid = true;
+
+ return valid;
+}
+
+/**
+ * ixgbe_get_phy_id - Get the phy type
+ * @hw: pointer to hardware structure
+ *
+ **/
+s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
+{
+ u32 status;
+ u16 phy_id_high = 0;
+ u16 phy_id_low = 0;
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &phy_id_high);
+
+ if (status == 0) {
+ hw->phy.id = (u32)(phy_id_high << 16);
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &phy_id_low);
+ hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
+ hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
+ }
+ return status;
+}
+
+/**
+ * ixgbe_get_phy_type_from_id - Get the phy type
+ * @hw: pointer to hardware structure
+ *
+ **/
+enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
+{
+ enum ixgbe_phy_type phy_type;
+
+ switch (phy_id) {
+ case TN1010_PHY_ID:
+ phy_type = ixgbe_phy_tn;
+ break;
+ case X540_PHY_ID:
+ phy_type = ixgbe_phy_aq;
+ break;
+ case QT2022_PHY_ID:
+ phy_type = ixgbe_phy_qt;
+ break;
+ case ATH_PHY_ID:
+ phy_type = ixgbe_phy_nl;
+ break;
+ default:
+ phy_type = ixgbe_phy_unknown;
+ break;
+ }
+
+ hw_dbg(hw, "phy type found is %d\n", phy_type);
+ return phy_type;
+}
+
+/**
+ * ixgbe_reset_phy_generic - Performs a PHY reset
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
+{
+ u32 i;
+ u16 ctrl = 0;
+ s32 status = 0;
+
+ if (hw->phy.type == ixgbe_phy_unknown)
+ status = ixgbe_identify_phy_generic(hw);
+
+ if (status != 0 || hw->phy.type == ixgbe_phy_none)
+ goto out;
+
+ /* Don't reset PHY if it's shut down due to overtemp. */
+ if (!hw->phy.reset_if_overtemp &&
+ (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
+ goto out;
+
+ /*
+ * Perform soft PHY reset to the PHY_XS.
+ * This will cause a soft reset to the PHY
+ */
+ hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+ IXGBE_MDIO_PHY_XS_DEV_TYPE,
+ IXGBE_MDIO_PHY_XS_RESET);
+
+ /*
+ * Poll for reset bit to self-clear indicating reset is complete.
+ * Some PHYs could take up to 3 seconds to complete and need about
+ * 1.7 usec delay after the reset is complete.
+ */
+ for (i = 0; i < 30; i++) {
+ msleep(100);
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+ IXGBE_MDIO_PHY_XS_DEV_TYPE, &ctrl);
+ if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) {
+ udelay(2);
+ break;
+ }
+ }
+
+ if (ctrl & IXGBE_MDIO_PHY_XS_RESET) {
+ status = IXGBE_ERR_RESET_FAILED;
+ hw_dbg(hw, "PHY reset polling failed to complete.\n");
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @phy_data: Pointer to read data from PHY register
+ **/
+s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data)
+{
+ u32 command;
+ u32 i;
+ u32 data;
+ s32 status = 0;
+ u16 gssr;
+
+ if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+ gssr = IXGBE_GSSR_PHY1_SM;
+ else
+ gssr = IXGBE_GSSR_PHY0_SM;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
+ status = IXGBE_ERR_SWFW_SYNC;
+
+ if (status == 0) {
+ /* Setup and write the address cycle command */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /*
+ * Check every 10 usec to see if the address cycle completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ udelay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ hw_dbg(hw, "PHY address command did not complete.\n");
+ status = IXGBE_ERR_PHY;
+ }
+
+ if (status == 0) {
+ /*
+ * Address cycle complete, setup and write the read
+ * command
+ */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /*
+ * Check every 10 usec to see if the address cycle
+ * completed. The MDI Command bit will clear when the
+ * operation is complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ udelay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ hw_dbg(hw, "PHY read command didn't complete\n");
+ status = IXGBE_ERR_PHY;
+ } else {
+ /*
+ * Read operation is complete. Get the data
+ * from MSRWD
+ */
+ data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
+ data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
+ *phy_data = (u16)(data);
+ }
+ }
+
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 5 bit device type
+ * @phy_data: Data to write to the PHY register
+ **/
+s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data)
+{
+ u32 command;
+ u32 i;
+ s32 status = 0;
+ u16 gssr;
+
+ if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+ gssr = IXGBE_GSSR_PHY1_SM;
+ else
+ gssr = IXGBE_GSSR_PHY0_SM;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
+ status = IXGBE_ERR_SWFW_SYNC;
+
+ if (status == 0) {
+ /* Put the data in the MDI single read and write data register*/
+ IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
+
+ /* Setup and write the address cycle command */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /*
+ * Check every 10 usec to see if the address cycle completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ udelay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ hw_dbg(hw, "PHY address cmd didn't complete\n");
+ status = IXGBE_ERR_PHY;
+ }
+
+ if (status == 0) {
+ /*
+ * Address cycle complete, setup and write the write
+ * command
+ */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /*
+ * Check every 10 usec to see if the address cycle
+ * completed. The MDI Command bit will clear when the
+ * operation is complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ udelay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ hw_dbg(hw, "PHY address cmd didn't complete\n");
+ status = IXGBE_ERR_PHY;
+ }
+ }
+
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_phy_link_generic - Set and restart autoneg
+ * @hw: pointer to hardware structure
+ *
+ * Restart autonegotiation and PHY and waits for completion.
+ **/
+s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
+{
+ s32 status = 0;
+ u32 time_out;
+ u32 max_time_out = 10;
+ u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
+ bool autoneg = false;
+ ixgbe_link_speed speed;
+
+ ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+ /* Set or unset auto-negotiation 10G advertisement */
+ hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+ autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE;
+
+ hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
+ }
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+ /* Set or unset auto-negotiation 1G advertisement */
+ hw->phy.ops.read_reg(hw,
+ IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+ autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
+
+ hw->phy.ops.write_reg(hw,
+ IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
+ }
+
+ if (speed & IXGBE_LINK_SPEED_100_FULL) {
+ /* Set or unset auto-negotiation 100M advertisement */
+ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ autoneg_reg &= ~(IXGBE_MII_100BASE_T_ADVERTISE |
+ IXGBE_MII_100BASE_T_ADVERTISE_HALF);
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
+ autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE;
+
+ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
+ }
+
+ /* Restart PHY autonegotiation and wait for completion */
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
+
+ autoneg_reg |= IXGBE_MII_RESTART;
+
+ hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
+
+ /* Wait for autonegotiation to finish */
+ for (time_out = 0; time_out < max_time_out; time_out++) {
+ udelay(10);
+ /* Restart PHY autonegotiation and wait for completion */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
+ if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE)
+ break;
+ }
+
+ if (time_out == max_time_out) {
+ status = IXGBE_ERR_LINK_SETUP;
+ hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out");
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: true if autonegotiation enabled
+ **/
+s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+
+ /*
+ * Clear autoneg_advertised and set new values based on input link
+ * speed.
+ */
+ hw->phy.autoneg_advertised = 0;
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_100_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
+
+ /* Setup link based on the new speed settings */
+ hw->phy.ops.setup_link(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: boolean auto-negotiation value
+ *
+ * Determines the link capabilities by reading the AUTOC register.
+ **/
+s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ s32 status = IXGBE_ERR_LINK_SETUP;
+ u16 speed_ability;
+
+ *speed = 0;
+ *autoneg = true;
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &speed_ability);
+
+ if (status == 0) {
+ if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G)
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G)
+ *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ if (speed_ability & IXGBE_MDIO_PHY_SPEED_100M)
+ *speed |= IXGBE_LINK_SPEED_100_FULL;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_check_phy_link_tnx - Determine link and speed status
+ * @hw: pointer to hardware structure
+ *
+ * Reads the VS1 register to determine if link is up and the current speed for
+ * the PHY.
+ **/
+s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up)
+{
+ s32 status = 0;
+ u32 time_out;
+ u32 max_time_out = 10;
+ u16 phy_link = 0;
+ u16 phy_speed = 0;
+ u16 phy_data = 0;
+
+ /* Initialize speed and link to default case */
+ *link_up = false;
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+
+ /*
+ * Check current speed and link status of the PHY register.
+ * This is a vendor specific register and may have to
+ * be changed for other copper PHYs.
+ */
+ for (time_out = 0; time_out < max_time_out; time_out++) {
+ udelay(10);
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &phy_data);
+ phy_link = phy_data & IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
+ phy_speed = phy_data &
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
+ if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
+ *link_up = true;
+ if (phy_speed ==
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_phy_link_tnx - Set and restart autoneg
+ * @hw: pointer to hardware structure
+ *
+ * Restart autonegotiation and PHY and waits for completion.
+ **/
+s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
+{
+ s32 status = 0;
+ u32 time_out;
+ u32 max_time_out = 10;
+ u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
+ bool autoneg = false;
+ ixgbe_link_speed speed;
+
+ ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+ /* Set or unset auto-negotiation 10G advertisement */
+ hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+ autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE;
+
+ hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
+ }
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+ /* Set or unset auto-negotiation 1G advertisement */
+ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+ autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
+
+ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
+ }
+
+ if (speed & IXGBE_LINK_SPEED_100_FULL) {
+ /* Set or unset auto-negotiation 100M advertisement */
+ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ autoneg_reg &= ~IXGBE_MII_100BASE_T_ADVERTISE;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
+ autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE;
+
+ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
+ }
+
+ /* Restart PHY autonegotiation and wait for completion */
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
+
+ autoneg_reg |= IXGBE_MII_RESTART;
+
+ hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
+
+ /* Wait for autonegotiation to finish */
+ for (time_out = 0; time_out < max_time_out; time_out++) {
+ udelay(10);
+ /* Restart PHY autonegotiation and wait for completion */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
+ if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE)
+ break;
+ }
+
+ if (time_out == max_time_out) {
+ status = IXGBE_ERR_LINK_SETUP;
+ hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out");
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
+ * @hw: pointer to hardware structure
+ * @firmware_version: pointer to the PHY Firmware Version
+ **/
+s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
+ u16 *firmware_version)
+{
+ s32 status = 0;
+
+ status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ firmware_version);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
+ * @hw: pointer to hardware structure
+ * @firmware_version: pointer to the PHY Firmware Version
+ **/
+s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
+ u16 *firmware_version)
+{
+ s32 status = 0;
+
+ status = hw->phy.ops.read_reg(hw, AQ_FW_REV,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ firmware_version);
+
+ return status;
+}
+
+/**
+ * ixgbe_reset_phy_nl - Performs a PHY reset
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
+{
+ u16 phy_offset, control, eword, edata, block_crc;
+ bool end_data = false;
+ u16 list_offset, data_offset;
+ u16 phy_data = 0;
+ s32 ret_val = 0;
+ u32 i;
+
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+ IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
+
+ /* reset the PHY and poll for completion */
+ hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+ IXGBE_MDIO_PHY_XS_DEV_TYPE,
+ (phy_data | IXGBE_MDIO_PHY_XS_RESET));
+
+ for (i = 0; i < 100; i++) {
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+ IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
+ if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) == 0)
+ break;
+ msleep(10);
+ }
+
+ if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) != 0) {
+ hw_dbg(hw, "PHY reset did not complete.\n");
+ ret_val = IXGBE_ERR_PHY;
+ goto out;
+ }
+
+ /* Get init offsets */
+ ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
+ &data_offset);
+ if (ret_val != 0)
+ goto out;
+
+ ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc);
+ data_offset++;
+ while (!end_data) {
+ /*
+ * Read control word from PHY init contents offset
+ */
+ ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
+ control = (eword & IXGBE_CONTROL_MASK_NL) >>
+ IXGBE_CONTROL_SHIFT_NL;
+ edata = eword & IXGBE_DATA_MASK_NL;
+ switch (control) {
+ case IXGBE_DELAY_NL:
+ data_offset++;
+ hw_dbg(hw, "DELAY: %d MS\n", edata);
+ msleep(edata);
+ break;
+ case IXGBE_DATA_NL:
+ hw_dbg(hw, "DATA:\n");
+ data_offset++;
+ hw->eeprom.ops.read(hw, data_offset++,
+ &phy_offset);
+ for (i = 0; i < edata; i++) {
+ hw->eeprom.ops.read(hw, data_offset, &eword);
+ hw->phy.ops.write_reg(hw, phy_offset,
+ IXGBE_TWINAX_DEV, eword);
+ hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword,
+ phy_offset);
+ data_offset++;
+ phy_offset++;
+ }
+ break;
+ case IXGBE_CONTROL_NL:
+ data_offset++;
+ hw_dbg(hw, "CONTROL:\n");
+ if (edata == IXGBE_CONTROL_EOL_NL) {
+ hw_dbg(hw, "EOL\n");
+ end_data = true;
+ } else if (edata == IXGBE_CONTROL_SOL_NL) {
+ hw_dbg(hw, "SOL\n");
+ } else {
+ hw_dbg(hw, "Bad control value\n");
+ ret_val = IXGBE_ERR_PHY;
+ goto out;
+ }
+ break;
+ default:
+ hw_dbg(hw, "Bad control type\n");
+ ret_val = IXGBE_ERR_PHY;
+ goto out;
+ }
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_identify_module_generic - Identifies module type
+ * @hw: pointer to hardware structure
+ *
+ * Determines HW type and calls appropriate function.
+ **/
+s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_SFP_NOT_PRESENT;
+
+ switch (hw->mac.ops.get_media_type(hw)) {
+ case ixgbe_media_type_fiber:
+ status = ixgbe_identify_sfp_module_generic(hw);
+ break;
+
+ case ixgbe_media_type_fiber_qsfp:
+ status = ixgbe_identify_qsfp_module_generic(hw);
+ break;
+
+ default:
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ status = IXGBE_ERR_SFP_NOT_PRESENT;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_identify_sfp_module_generic - Identifies SFP modules
+ * @hw: pointer to hardware structure
+ *
+ * Searches for and identifies the SFP module and assigns appropriate PHY type.
+ **/
+s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+ u32 vendor_oui = 0;
+ enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
+ u8 identifier = 0;
+ u8 comp_codes_1g = 0;
+ u8 comp_codes_10g = 0;
+ u8 oui_bytes[3] = {0, 0, 0};
+ u8 cable_tech = 0;
+ u8 cable_spec = 0;
+ u16 enforce_sfp = 0;
+
+ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ status = IXGBE_ERR_SFP_NOT_PRESENT;
+ goto out;
+ }
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_IDENTIFIER,
+ &identifier);
+
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
+
+ /* LAN ID is needed for sfp_type determination */
+ hw->mac.ops.set_lan_id(hw);
+
+ if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ } else {
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_1GBE_COMP_CODES,
+ &comp_codes_1g);
+
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_10GBE_COMP_CODES,
+ &comp_codes_10g);
+
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_CABLE_TECHNOLOGY,
+ &cable_tech);
+
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
+
+ /* ID Module
+ * =========
+ * 0 SFP_DA_CU
+ * 1 SFP_SR
+ * 2 SFP_LR
+ * 3 SFP_DA_CORE0 - 82599-specific
+ * 4 SFP_DA_CORE1 - 82599-specific
+ * 5 SFP_SR/LR_CORE0 - 82599-specific
+ * 6 SFP_SR/LR_CORE1 - 82599-specific
+ * 7 SFP_act_lmt_DA_CORE0 - 82599-specific
+ * 8 SFP_act_lmt_DA_CORE1 - 82599-specific
+ * 9 SFP_1g_cu_CORE0 - 82599-specific
+ * 10 SFP_1g_cu_CORE1 - 82599-specific
+ * 11 SFP_1g_sx_CORE0 - 82599-specific
+ * 12 SFP_1g_sx_CORE1 - 82599-specific
+ */
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+ hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
+ else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
+ hw->phy.sfp_type = ixgbe_sfp_type_sr;
+ else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
+ hw->phy.sfp_type = ixgbe_sfp_type_lr;
+ else
+ hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+ } else if (hw->mac.type == ixgbe_mac_82599EB) {
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_cu_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_cu_core1;
+ } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
+ hw->phy.ops.read_i2c_eeprom(
+ hw, IXGBE_SFF_CABLE_SPEC_COMP,
+ &cable_spec);
+ if (cable_spec &
+ IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_act_lmt_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_act_lmt_core1;
+ } else {
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_unknown;
+ }
+ } else if (comp_codes_10g &
+ (IXGBE_SFF_10GBASESR_CAPABLE |
+ IXGBE_SFF_10GBASELR_CAPABLE)) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_srlr_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_srlr_core1;
+ } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_cu_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_cu_core1;
+ } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_sx_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_sx_core1;
+ } else {
+ hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+ }
+ }
+
+ if (hw->phy.sfp_type != stored_sfp_type)
+ hw->phy.sfp_setup_needed = true;
+
+ /* Determine if the SFP+ PHY is dual speed or not. */
+ hw->phy.multispeed_fiber = false;
+ if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
+ (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
+ ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
+ (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
+ hw->phy.multispeed_fiber = true;
+
+ /* Determine PHY vendor */
+ if (hw->phy.type != ixgbe_phy_nl) {
+ hw->phy.id = identifier;
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_VENDOR_OUI_BYTE0,
+ &oui_bytes[0]);
+
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_VENDOR_OUI_BYTE1,
+ &oui_bytes[1]);
+
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_VENDOR_OUI_BYTE2,
+ &oui_bytes[2]);
+
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
+
+ vendor_oui =
+ ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
+ (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
+ (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
+
+ switch (vendor_oui) {
+ case IXGBE_SFF_VENDOR_OUI_TYCO:
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+ hw->phy.type =
+ ixgbe_phy_sfp_passive_tyco;
+ break;
+ case IXGBE_SFF_VENDOR_OUI_FTL:
+ if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
+ hw->phy.type = ixgbe_phy_sfp_ftl_active;
+ else
+ hw->phy.type = ixgbe_phy_sfp_ftl;
+ break;
+ case IXGBE_SFF_VENDOR_OUI_AVAGO:
+ hw->phy.type = ixgbe_phy_sfp_avago;
+ break;
+ case IXGBE_SFF_VENDOR_OUI_INTEL:
+ hw->phy.type = ixgbe_phy_sfp_intel;
+ break;
+ default:
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+ hw->phy.type =
+ ixgbe_phy_sfp_passive_unknown;
+ else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
+ hw->phy.type =
+ ixgbe_phy_sfp_active_unknown;
+ else
+ hw->phy.type = ixgbe_phy_sfp_unknown;
+ break;
+ }
+ }
+
+ /* Allow any DA cable vendor */
+ if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
+ IXGBE_SFF_DA_ACTIVE_CABLE)) {
+ status = 0;
+ goto out;
+ }
+
+ /* Verify supported 1G SFP modules */
+ if (comp_codes_10g == 0 &&
+ !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ goto out;
+ }
+
+ /* Anything else 82598-based is supported */
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ status = 0;
+ goto out;
+ }
+
+ ixgbe_get_device_caps(hw, &enforce_sfp);
+ if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
+ !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) ||
+ (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) ||
+ (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0) ||
+ (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1))) {
+ /* Make sure we're a supported PHY type */
+ if (hw->phy.type == ixgbe_phy_sfp_intel) {
+ status = 0;
+ } else {
+ if (hw->allow_unsupported_sfp == true) {
+ EWARN(hw, "WARNING: Intel (R) Network "
+ "Connections are quality tested "
+ "using Intel (R) Ethernet Optics."
+ " Using untested modules is not "
+ "supported and may cause unstable"
+ " operation or damage to the "
+ "module or the adapter. Intel "
+ "Corporation is not responsible "
+ "for any harm caused by using "
+ "untested modules.\n", status);
+ status = 0;
+ } else {
+ hw_dbg(hw, "SFP+ module not supported\n");
+ hw->phy.type =
+ ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ }
+ }
+ } else {
+ status = 0;
+ }
+ }
+
+out:
+ return status;
+
+err_read_i2c_eeprom:
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ if (hw->phy.type != ixgbe_phy_nl) {
+ hw->phy.id = 0;
+ hw->phy.type = ixgbe_phy_unknown;
+ }
+ return IXGBE_ERR_SFP_NOT_PRESENT;
+}
+
+/**
+ * ixgbe_identify_qsfp_module_generic - Identifies QSFP modules
+ * @hw: pointer to hardware structure
+ *
+ * Searches for and identifies the QSFP module and assigns appropriate PHY type
+ **/
+s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
+{
+ s32 status = 0;
+
+ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) {
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ status = IXGBE_ERR_SFP_NOT_PRESENT;
+ }
+
+ return status;
+}
+
+
+/**
+ * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
+ * @hw: pointer to hardware structure
+ * @list_offset: offset to the SFP ID list
+ * @data_offset: offset to the SFP data block
+ *
+ * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if
+ * so it returns the offsets to the phy init sequence block.
+ **/
+s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+ u16 *list_offset,
+ u16 *data_offset)
+{
+ u16 sfp_id;
+ u16 sfp_type = hw->phy.sfp_type;
+
+ if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
+
+ if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
+ return IXGBE_ERR_SFP_NOT_PRESENT;
+
+ if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) &&
+ (hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
+
+ /*
+ * Limiting active cables and 1G Phys must be initialized as
+ * SR modules
+ */
+ if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
+ sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+ sfp_type == ixgbe_sfp_type_1g_sx_core0)
+ sfp_type = ixgbe_sfp_type_srlr_core0;
+ else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
+ sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+ sfp_type == ixgbe_sfp_type_1g_sx_core1)
+ sfp_type = ixgbe_sfp_type_srlr_core1;
+
+ /* Read offset to PHY init contents */
+ hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset);
+
+ if ((!*list_offset) || (*list_offset == 0xFFFF))
+ return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
+
+ /* Shift offset to first ID word */
+ (*list_offset)++;
+
+ /*
+ * Find the matching SFP ID in the EEPROM
+ * and program the init sequence
+ */
+ hw->eeprom.ops.read(hw, *list_offset, &sfp_id);
+
+ while (sfp_id != IXGBE_PHY_INIT_END_NL) {
+ if (sfp_id == sfp_type) {
+ (*list_offset)++;
+ hw->eeprom.ops.read(hw, *list_offset, data_offset);
+ if ((!*data_offset) || (*data_offset == 0xFFFF)) {
+ hw_dbg(hw, "SFP+ module not supported\n");
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ } else {
+ break;
+ }
+ } else {
+ (*list_offset) += 2;
+ if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
+ return IXGBE_ERR_PHY;
+ }
+ }
+
+ if (sfp_id == IXGBE_PHY_INIT_END_NL) {
+ hw_dbg(hw, "No matching SFP+ module found\n");
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_i2c_eeprom_generic - Reads 8 bit EEPROM word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to read
+ * @eeprom_data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data)
+{
+ return hw->phy.ops.read_i2c_byte(hw, byte_offset,
+ IXGBE_I2C_EEPROM_DEV_ADDR,
+ eeprom_data);
+}
+
+/**
+ * ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to write
+ * @eeprom_data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 eeprom_data)
+{
+ return hw->phy.ops.write_i2c_byte(hw, byte_offset,
+ IXGBE_I2C_EEPROM_DEV_ADDR,
+ eeprom_data);
+}
+
+/**
+ * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ s32 status = 0;
+ u32 max_retry = 10;
+ u32 retry = 0;
+ u16 swfw_mask = 0;
+ bool nack = 1;
+ *data = 0;
+
+ if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+ swfw_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ swfw_mask = IXGBE_GSSR_PHY0_SM;
+
+ do {
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)
+ != 0) {
+ status = IXGBE_ERR_SWFW_SYNC;
+ goto read_byte_out;
+ }
+
+ ixgbe_i2c_start(hw);
+
+ /* Device Address and write indication */
+ status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
+ if (status != 0)
+ goto fail;
+
+ status = ixgbe_get_i2c_ack(hw);
+ if (status != 0)
+ goto fail;
+
+ status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
+ if (status != 0)
+ goto fail;
+
+ status = ixgbe_get_i2c_ack(hw);
+ if (status != 0)
+ goto fail;
+
+ ixgbe_i2c_start(hw);
+
+ /* Device Address and read indication */
+ status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1));
+ if (status != 0)
+ goto fail;
+
+ status = ixgbe_get_i2c_ack(hw);
+ if (status != 0)
+ goto fail;
+
+ status = ixgbe_clock_in_i2c_byte(hw, data);
+ if (status != 0)
+ goto fail;
+
+ status = ixgbe_clock_out_i2c_bit(hw, nack);
+ if (status != 0)
+ goto fail;
+
+ ixgbe_i2c_stop(hw);
+ break;
+
+fail:
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ msleep(100);
+ ixgbe_i2c_bus_clear(hw);
+ retry++;
+ if (retry < max_retry)
+ hw_dbg(hw, "I2C byte read error - Retrying.\n");
+ else
+ hw_dbg(hw, "I2C byte read error.\n");
+
+ } while (retry < max_retry);
+
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+read_byte_out:
+ return status;
+}
+
+/**
+ * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ s32 status = 0;
+ u32 max_retry = 1;
+ u32 retry = 0;
+ u16 swfw_mask = 0;
+
+ if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+ swfw_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ swfw_mask = IXGBE_GSSR_PHY0_SM;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) {
+ status = IXGBE_ERR_SWFW_SYNC;
+ goto write_byte_out;
+ }
+
+ do {
+ ixgbe_i2c_start(hw);
+
+ status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
+ if (status != 0)
+ goto fail;
+
+ status = ixgbe_get_i2c_ack(hw);
+ if (status != 0)
+ goto fail;
+
+ status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
+ if (status != 0)
+ goto fail;
+
+ status = ixgbe_get_i2c_ack(hw);
+ if (status != 0)
+ goto fail;
+
+ status = ixgbe_clock_out_i2c_byte(hw, data);
+ if (status != 0)
+ goto fail;
+
+ status = ixgbe_get_i2c_ack(hw);
+ if (status != 0)
+ goto fail;
+
+ ixgbe_i2c_stop(hw);
+ break;
+
+fail:
+ ixgbe_i2c_bus_clear(hw);
+ retry++;
+ if (retry < max_retry)
+ hw_dbg(hw, "I2C byte write error - Retrying.\n");
+ else
+ hw_dbg(hw, "I2C byte write error.\n");
+ } while (retry < max_retry);
+
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+write_byte_out:
+ return status;
+}
+
+/**
+ * ixgbe_i2c_start - Sets I2C start condition
+ * @hw: pointer to hardware structure
+ *
+ * Sets I2C start condition (High -> Low on SDA while SCL is High)
+ **/
+static void ixgbe_i2c_start(struct ixgbe_hw *hw)
+{
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+
+ /* Start condition must begin with data and clock high */
+ ixgbe_set_i2c_data(hw, &i2cctl, 1);
+ ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+ /* Setup time for start condition (4.7us) */
+ udelay(IXGBE_I2C_T_SU_STA);
+
+ ixgbe_set_i2c_data(hw, &i2cctl, 0);
+
+ /* Hold time for start condition (4us) */
+ udelay(IXGBE_I2C_T_HD_STA);
+
+ ixgbe_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us */
+ udelay(IXGBE_I2C_T_LOW);
+
+}
+
+/**
+ * ixgbe_i2c_stop - Sets I2C stop condition
+ * @hw: pointer to hardware structure
+ *
+ * Sets I2C stop condition (Low -> High on SDA while SCL is High)
+ **/
+static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
+{
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+
+ /* Stop condition must begin with data low and clock high */
+ ixgbe_set_i2c_data(hw, &i2cctl, 0);
+ ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+ /* Setup time for stop condition (4us) */
+ udelay(IXGBE_I2C_T_SU_STO);
+
+ ixgbe_set_i2c_data(hw, &i2cctl, 1);
+
+ /* bus free time between stop and start (4.7us)*/
+ udelay(IXGBE_I2C_T_BUF);
+}
+
+/**
+ * ixgbe_clock_in_i2c_byte - Clocks in one byte via I2C
+ * @hw: pointer to hardware structure
+ * @data: data byte to clock in
+ *
+ * Clocks in one byte data via I2C data/clock
+ **/
+static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
+{
+ s32 i;
+ bool bit = 0;
+
+ for (i = 7; i >= 0; i--) {
+ ixgbe_clock_in_i2c_bit(hw, &bit);
+ *data |= bit << i;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_clock_out_i2c_byte - Clocks out one byte via I2C
+ * @hw: pointer to hardware structure
+ * @data: data byte clocked out
+ *
+ * Clocks out one byte data via I2C data/clock
+ **/
+static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
+{
+ s32 status = 0;
+ s32 i;
+ u32 i2cctl;
+ bool bit = 0;
+
+ for (i = 7; i >= 0; i--) {
+ bit = (data >> i) & 0x1;
+ status = ixgbe_clock_out_i2c_bit(hw, bit);
+
+ if (status != 0)
+ break;
+ }
+
+ /* Release SDA line (set high) */
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ i2cctl |= IXGBE_I2C_DATA_OUT;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_i2c_ack - Polls for I2C ACK
+ * @hw: pointer to hardware structure
+ *
+ * Clocks in/out one bit via I2C data/clock
+ **/
+static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
+{
+ s32 status = 0;
+ u32 i = 0;
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ u32 timeout = 10;
+ bool ack = 1;
+
+ ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+
+ /* Minimum high period of clock is 4us */
+ udelay(IXGBE_I2C_T_HIGH);
+
+ /* Poll for ACK. Note that ACK in I2C spec is
+ * transition from 1 to 0 */
+ for (i = 0; i < timeout; i++) {
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ ack = ixgbe_get_i2c_data(&i2cctl);
+
+ udelay(1);
+ if (ack == 0)
+ break;
+ }
+
+ if (ack == 1) {
+ hw_dbg(hw, "I2C ack was not received.\n");
+ status = IXGBE_ERR_I2C;
+ }
+
+ ixgbe_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us */
+ udelay(IXGBE_I2C_T_LOW);
+
+ return status;
+}
+
+/**
+ * ixgbe_clock_in_i2c_bit - Clocks in one bit via I2C data/clock
+ * @hw: pointer to hardware structure
+ * @data: read data value
+ *
+ * Clocks in one bit via I2C data/clock
+ **/
+static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
+{
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+
+ ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+ /* Minimum high period of clock is 4us */
+ udelay(IXGBE_I2C_T_HIGH);
+
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ *data = ixgbe_get_i2c_data(&i2cctl);
+
+ ixgbe_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us */
+ udelay(IXGBE_I2C_T_LOW);
+
+ return 0;
+}
+
+/**
+ * ixgbe_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock
+ * @hw: pointer to hardware structure
+ * @data: data value to write
+ *
+ * Clocks out one bit via I2C data/clock
+ **/
+static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
+{
+ s32 status;
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+
+ status = ixgbe_set_i2c_data(hw, &i2cctl, data);
+ if (status == 0) {
+ ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+ /* Minimum high period of clock is 4us */
+ udelay(IXGBE_I2C_T_HIGH);
+
+ ixgbe_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us.
+ * This also takes care of the data hold time.
+ */
+ udelay(IXGBE_I2C_T_LOW);
+ } else {
+ status = IXGBE_ERR_I2C;
+ hw_dbg(hw, "I2C data was not set to %X\n", data);
+ }
+
+ return status;
+}
+/**
+ * ixgbe_raise_i2c_clk - Raises the I2C SCL clock
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Raises the I2C clock line '0'->'1'
+ **/
+static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
+{
+ u32 i = 0;
+ u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT;
+ u32 i2cctl_r = 0;
+
+ for (i = 0; i < timeout; i++) {
+ *i2cctl |= IXGBE_I2C_CLK_OUT;
+
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+ /* SCL rise time (1000ns) */
+ udelay(IXGBE_I2C_T_RISE);
+
+ i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ if (i2cctl_r & IXGBE_I2C_CLK_IN)
+ break;
+ }
+}
+
+/**
+ * ixgbe_lower_i2c_clk - Lowers the I2C SCL clock
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Lowers the I2C clock line '1'->'0'
+ **/
+static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
+{
+
+ *i2cctl &= ~IXGBE_I2C_CLK_OUT;
+
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* SCL fall time (300ns) */
+ udelay(IXGBE_I2C_T_FALL);
+}
+
+/**
+ * ixgbe_set_i2c_data - Sets the I2C data bit
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ * @data: I2C data value (0 or 1) to set
+ *
+ * Sets the I2C data bit
+ **/
+static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
+{
+ s32 status = 0;
+
+ if (data)
+ *i2cctl |= IXGBE_I2C_DATA_OUT;
+ else
+ *i2cctl &= ~IXGBE_I2C_DATA_OUT;
+
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
+ udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
+
+ /* Verify data was set correctly */
+ *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ if (data != ixgbe_get_i2c_data(i2cctl)) {
+ status = IXGBE_ERR_I2C;
+ hw_dbg(hw, "Error - I2C data was not set to %X.\n", data);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_get_i2c_data - Reads the I2C SDA data bit
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Returns the I2C data bit value
+ **/
+static bool ixgbe_get_i2c_data(u32 *i2cctl)
+{
+ bool data;
+
+ if (*i2cctl & IXGBE_I2C_DATA_IN)
+ data = 1;
+ else
+ data = 0;
+
+ return data;
+}
+
+/**
+ * ixgbe_i2c_bus_clear - Clears the I2C bus
+ * @hw: pointer to hardware structure
+ *
+ * Clears the I2C bus by sending nine clock pulses.
+ * Used when data line is stuck low.
+ **/
+void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
+{
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ u32 i;
+
+ ixgbe_i2c_start(hw);
+
+ ixgbe_set_i2c_data(hw, &i2cctl, 1);
+
+ for (i = 0; i < 9; i++) {
+ ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+ /* Min high period of clock is 4us */
+ udelay(IXGBE_I2C_T_HIGH);
+
+ ixgbe_lower_i2c_clk(hw, &i2cctl);
+
+ /* Min low period of clock is 4.7us*/
+ udelay(IXGBE_I2C_T_LOW);
+ }
+
+ ixgbe_i2c_start(hw);
+
+ /* Put the i2c bus back to default state */
+ ixgbe_i2c_stop(hw);
+}
+
+/**
+ * ixgbe_tn_check_overtemp - Checks if an overtemp occurred.
+ * @hw: pointer to hardware structure
+ *
+ * Checks if the LASI temp alarm status was triggered due to overtemp
+ **/
+s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
+{
+ s32 status = 0;
+ u16 phy_data = 0;
+
+ if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
+ goto out;
+
+ /* Check that the LASI temp alarm status was triggered */
+ hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_data);
+
+ if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
+ goto out;
+
+ status = IXGBE_ERR_OVERTEMP;
+out:
+ return status;
+}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.h
new file mode 100755
index 00000000..bbe5a9e3
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.h
@@ -0,0 +1,137 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2012 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_PHY_H_
+#define _IXGBE_PHY_H_
+
+#include "ixgbe_type.h"
+#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0
+
+/* EEPROM byte offsets */
+#define IXGBE_SFF_IDENTIFIER 0x0
+#define IXGBE_SFF_IDENTIFIER_SFP 0x3
+#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25
+#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26
+#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27
+#define IXGBE_SFF_1GBE_COMP_CODES 0x6
+#define IXGBE_SFF_10GBE_COMP_CODES 0x3
+#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
+#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
+
+/* Bitmasks */
+#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
+#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
+#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
+#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
+#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
+#define IXGBE_SFF_1GBASET_CAPABLE 0x8
+#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
+#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
+#define IXGBE_I2C_EEPROM_READ_MASK 0x100
+#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
+#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
+#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
+#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
+#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
+
+/* Flow control defines */
+#define IXGBE_TAF_SYM_PAUSE 0x400
+#define IXGBE_TAF_ASM_PAUSE 0x800
+
+/* Bit-shift macros */
+#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24
+#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16
+#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8
+
+/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
+#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600
+#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500
+#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00
+#define IXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100
+
+/* I2C SDA and SCL timing parameters for standard mode */
+#define IXGBE_I2C_T_HD_STA 4
+#define IXGBE_I2C_T_LOW 5
+#define IXGBE_I2C_T_HIGH 4
+#define IXGBE_I2C_T_SU_STA 5
+#define IXGBE_I2C_T_HD_DATA 5
+#define IXGBE_I2C_T_SU_DATA 1
+#define IXGBE_I2C_T_RISE 1
+#define IXGBE_I2C_T_FALL 1
+#define IXGBE_I2C_T_SU_STO 4
+#define IXGBE_I2C_T_BUF 5
+
+#define IXGBE_TN_LASI_STATUS_REG 0x9005
+#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
+
+s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
+bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
+enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
+s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
+s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
+s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
+s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data);
+s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data);
+s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
+s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg);
+
+/* PHY specific */
+s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up);
+s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
+s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
+ u16 *firmware_version);
+s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
+ u16 *firmware_version);
+
+s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
+s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
+s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
+s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
+s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+ u16 *list_offset,
+ u16 *data_offset);
+s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
+s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data);
+s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data);
+s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 eeprom_data);
+void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
+#endif /* _IXGBE_PHY_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_sriov.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_sriov.h
new file mode 100755
index 00000000..b1cc9d04
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_sriov.h
@@ -0,0 +1,74 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2012 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+#ifndef _IXGBE_SRIOV_H_
+#define _IXGBE_SRIOV_H_
+
+int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
+ int entries, u16 *hash_list, u32 vf);
+void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
+int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf);
+void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe);
+void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf);
+void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf);
+void ixgbe_msg_task(struct ixgbe_adapter *adapter);
+int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
+ int vf, unsigned char *mac_addr);
+void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
+void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
+#ifdef IFLA_VF_MAX
+int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
+int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
+ u8 qos);
+int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
+#endif
+int ixgbe_ndo_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi);
+#endif
+void ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
+#ifdef CONFIG_PCI_IOV
+int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
+void ixgbe_enable_sriov(struct ixgbe_adapter *adapter);
+#endif
+int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter);
+#ifdef IFLA_VF_MAX
+void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
+#endif /* IFLA_VF_MAX */
+void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
+
+/*
+ * These are defined in ixgbe_type.h on behalf of the VF driver
+ * but we need them here unwrapped for the PF driver.
+ */
+#define IXGBE_DEV_ID_82599_VF 0x10ED
+#define IXGBE_DEV_ID_X540_VF 0x1515
+
+#endif /* _IXGBE_SRIOV_H_ */
+
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_type.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_type.h
new file mode 100755
index 00000000..6b21c879
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_type.h
@@ -0,0 +1,3254 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2012 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_TYPE_H_
+#define _IXGBE_TYPE_H_
+
+#include "ixgbe_osdep.h"
+
+
+/* Vendor ID */
+#define IXGBE_INTEL_VENDOR_ID 0x8086
+
+/* Device IDs */
+#define IXGBE_DEV_ID_82598 0x10B6
+#define IXGBE_DEV_ID_82598_BX 0x1508
+#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6
+#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
+#define IXGBE_DEV_ID_82598AT 0x10C8
+#define IXGBE_DEV_ID_82598AT2 0x150B
+#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB
+#define IXGBE_DEV_ID_82598EB_CX4 0x10DD
+#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC
+#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1
+#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1
+#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
+#define IXGBE_DEV_ID_82599_KX4 0x10F7
+#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
+#define IXGBE_DEV_ID_82599_KR 0x1517
+#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
+#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
+#define IXGBE_DEV_ID_82599_CX4 0x10F9
+#define IXGBE_DEV_ID_82599_SFP 0x10FB
+#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
+#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
+#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152A
+#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
+#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
+#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
+#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558
+#define IXGBE_DEV_ID_82599EN_SFP 0x1557
+#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
+#define IXGBE_DEV_ID_82599_T3_LOM 0x151C
+#define IXGBE_DEV_ID_82599_LS 0x154F
+#define IXGBE_DEV_ID_X540T 0x1528
+
+/* General Registers */
+#define IXGBE_CTRL 0x00000
+#define IXGBE_STATUS 0x00008
+#define IXGBE_CTRL_EXT 0x00018
+#define IXGBE_ESDP 0x00020
+#define IXGBE_EODSDP 0x00028
+#define IXGBE_I2CCTL 0x00028
+#define IXGBE_PHY_GPIO 0x00028
+#define IXGBE_MAC_GPIO 0x00030
+#define IXGBE_PHYINT_STATUS0 0x00100
+#define IXGBE_PHYINT_STATUS1 0x00104
+#define IXGBE_PHYINT_STATUS2 0x00108
+#define IXGBE_LEDCTL 0x00200
+#define IXGBE_FRTIMER 0x00048
+#define IXGBE_TCPTIMER 0x0004C
+#define IXGBE_CORESPARE 0x00600
+#define IXGBE_EXVET 0x05078
+
+/* NVM Registers */
+#define IXGBE_EEC 0x10010
+#define IXGBE_EERD 0x10014
+#define IXGBE_EEWR 0x10018
+#define IXGBE_FLA 0x1001C
+#define IXGBE_EEMNGCTL 0x10110
+#define IXGBE_EEMNGDATA 0x10114
+#define IXGBE_FLMNGCTL 0x10118
+#define IXGBE_FLMNGDATA 0x1011C
+#define IXGBE_FLMNGCNT 0x10120
+#define IXGBE_FLOP 0x1013C
+#define IXGBE_GRC 0x10200
+#define IXGBE_SRAMREL 0x10210
+#define IXGBE_PHYDBG 0x10218
+
+/* General Receive Control */
+#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */
+#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */
+
+#define IXGBE_VPDDIAG0 0x10204
+#define IXGBE_VPDDIAG1 0x10208
+
+/* I2CCTL Bit Masks */
+#define IXGBE_I2C_CLK_IN 0x00000001
+#define IXGBE_I2C_CLK_OUT 0x00000002
+#define IXGBE_I2C_DATA_IN 0x00000004
+#define IXGBE_I2C_DATA_OUT 0x00000008
+#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500
+
+#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
+#define IXGBE_EMC_INTERNAL_DATA 0x00
+#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20
+#define IXGBE_EMC_DIODE1_DATA 0x01
+#define IXGBE_EMC_DIODE1_THERM_LIMIT 0x19
+#define IXGBE_EMC_DIODE2_DATA 0x23
+#define IXGBE_EMC_DIODE2_THERM_LIMIT 0x1A
+
+#define IXGBE_MAX_SENSORS 3
+
+struct ixgbe_thermal_diode_data {
+ u8 location;
+ u8 temp;
+ u8 caution_thresh;
+ u8 max_op_thresh;
+};
+
+struct ixgbe_thermal_sensor_data {
+ struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS];
+};
+
+/* Interrupt Registers */
+#define IXGBE_EICR 0x00800
+#define IXGBE_EICS 0x00808
+#define IXGBE_EIMS 0x00880
+#define IXGBE_EIMC 0x00888
+#define IXGBE_EIAC 0x00810
+#define IXGBE_EIAM 0x00890
+#define IXGBE_EICS_EX(_i) (0x00A90 + (_i) * 4)
+#define IXGBE_EIMS_EX(_i) (0x00AA0 + (_i) * 4)
+#define IXGBE_EIMC_EX(_i) (0x00AB0 + (_i) * 4)
+#define IXGBE_EIAM_EX(_i) (0x00AD0 + (_i) * 4)
+/* 82599 EITR is only 12 bits, with the lower 3 always zero */
+/*
+ * 82598 EITR is 16 bits but set the limits based on the max
+ * supported by all ixgbe hardware
+ */
+#define IXGBE_MAX_INT_RATE 488281
+#define IXGBE_MIN_INT_RATE 956
+#define IXGBE_MAX_EITR 0x00000FF8
+#define IXGBE_MIN_EITR 8
+#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \
+ (0x012300 + (((_i) - 24) * 4)))
+#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8
+#define IXGBE_EITR_LLI_MOD 0x00008000
+#define IXGBE_EITR_CNT_WDIS 0x80000000
+#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */
+#define IXGBE_IVAR_MISC 0x00A00 /* misc MSI-X interrupt causes */
+#define IXGBE_EITRSEL 0x00894
+#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */
+#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */
+#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4)))
+#define IXGBE_GPIE 0x00898
+
+/* Flow Control Registers */
+#define IXGBE_FCADBUL 0x03210
+#define IXGBE_FCADBUH 0x03214
+#define IXGBE_FCAMACL 0x04328
+#define IXGBE_FCAMACH 0x0432C
+#define IXGBE_FCRTH_82599(_i) (0x03260 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_FCRTL_82599(_i) (0x03220 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_PFCTOP 0x03008
+#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */
+#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */
+#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */
+#define IXGBE_FCRTV 0x032A0
+#define IXGBE_FCCFG 0x03D00
+#define IXGBE_TFCS 0x0CE00
+
+/* Receive DMA Registers */
+#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \
+ (0x0D000 + (((_i) - 64) * 0x40)))
+#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \
+ (0x0D004 + (((_i) - 64) * 0x40)))
+#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \
+ (0x0D008 + (((_i) - 64) * 0x40)))
+#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \
+ (0x0D010 + (((_i) - 64) * 0x40)))
+#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \
+ (0x0D018 + (((_i) - 64) * 0x40)))
+#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
+ (0x0D028 + (((_i) - 64) * 0x40)))
+#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
+ (0x0D02C + (((_i) - 64) * 0x40)))
+#define IXGBE_RSCDBU 0x03028
+#define IXGBE_RDDCC 0x02F20
+#define IXGBE_RXMEMWRAP 0x03190
+#define IXGBE_STARCTRL 0x03024
+/*
+ * Split and Replication Receive Control Registers
+ * 00-15 : 0x02100 + n*4
+ * 16-64 : 0x01014 + n*0x40
+ * 64-127: 0x0D014 + (n-64)*0x40
+ */
+#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
+ (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
+ (0x0D014 + (((_i) - 64) * 0x40))))
+/*
+ * Rx DCA Control Register:
+ * 00-15 : 0x02200 + n*4
+ * 16-64 : 0x0100C + n*0x40
+ * 64-127: 0x0D00C + (n-64)*0x40
+ */
+#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
+ (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
+ (0x0D00C + (((_i) - 64) * 0x40))))
+#define IXGBE_RDRXCTL 0x02F00
+#define IXGBE_RDRXCTL_RSC_PUSH 0x80
+/* 8 of these 0x03C00 - 0x03C1C */
+#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4))
+#define IXGBE_RXCTRL 0x03000
+#define IXGBE_DROPEN 0x03D04
+#define IXGBE_RXPBSIZE_SHIFT 10
+
+/* Receive Registers */
+#define IXGBE_RXCSUM 0x05000
+#define IXGBE_RFCTL 0x05008
+#define IXGBE_DRECCCTL 0x02F08
+#define IXGBE_DRECCCTL_DISABLE 0
+#define IXGBE_DRECCCTL2 0x02F8C
+
+/* Multicast Table Array - 128 entries */
+#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4))
+#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+ (0x0A200 + ((_i) * 8)))
+#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+ (0x0A204 + ((_i) * 8)))
+#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8))
+#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8))
+/* Packet split receive type */
+#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \
+ (0x0EA00 + ((_i) * 4)))
+/* array of 4096 1-bit vlan filters */
+#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4))
+/*array of 4096 4-bit vlan vmdq indices */
+#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4))
+#define IXGBE_FCTRL 0x05080
+#define IXGBE_VLNCTRL 0x05088
+#define IXGBE_MCSTCTRL 0x05090
+#define IXGBE_MRQC 0x05818
+#define IXGBE_SAQF(_i) (0x0E000 + ((_i) * 4)) /* Source Address Queue Filter */
+#define IXGBE_DAQF(_i) (0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */
+#define IXGBE_SDPQF(_i) (0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter */
+#define IXGBE_FTQF(_i) (0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */
+#define IXGBE_ETQF(_i) (0x05128 + ((_i) * 4)) /* EType Queue Filter */
+#define IXGBE_ETQS(_i) (0x0EC00 + ((_i) * 4)) /* EType Queue Select */
+#define IXGBE_SYNQF 0x0EC30 /* SYN Packet Queue Filter */
+#define IXGBE_RQTC 0x0EC70
+#define IXGBE_MTQC 0x08120
+#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */
+#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */
+#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */
+#define IXGBE_VT_CTL 0x051B0
+#define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */
+/* 64 Mailboxes, 16 DW each */
+#define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i)))
+#define IXGBE_PFMBICR(_i) (0x00710 + (4 * (_i))) /* 4 total */
+#define IXGBE_PFMBIMR(_i) (0x00720 + (4 * (_i))) /* 4 total */
+#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4))
+#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4))
+#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4))
+#define IXGBE_QDE 0x2F04
+#define IXGBE_VMTXSW(_i) (0x05180 + ((_i) * 4)) /* 2 total */
+#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */
+#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4))
+#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4))
+#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4))
+#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4))
+#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/
+#define IXGBE_RXFECCERR0 0x051B8
+#define IXGBE_LLITHRESH 0x0EC90
+#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_IMIRVP 0x05AC0
+#define IXGBE_VMD_CTL 0x0581C
+#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */
+#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */
+
+/* Flow Director registers */
+#define IXGBE_FDIRCTRL 0x0EE00
+#define IXGBE_FDIRHKEY 0x0EE68
+#define IXGBE_FDIRSKEY 0x0EE6C
+#define IXGBE_FDIRDIP4M 0x0EE3C
+#define IXGBE_FDIRSIP4M 0x0EE40
+#define IXGBE_FDIRTCPM 0x0EE44
+#define IXGBE_FDIRUDPM 0x0EE48
+#define IXGBE_FDIRIP6M 0x0EE74
+#define IXGBE_FDIRM 0x0EE70
+
+/* Flow Director Stats registers */
+#define IXGBE_FDIRFREE 0x0EE38
+#define IXGBE_FDIRLEN 0x0EE4C
+#define IXGBE_FDIRUSTAT 0x0EE50
+#define IXGBE_FDIRFSTAT 0x0EE54
+#define IXGBE_FDIRMATCH 0x0EE58
+#define IXGBE_FDIRMISS 0x0EE5C
+
+/* Flow Director Programming registers */
+#define IXGBE_FDIRSIPv6(_i) (0x0EE0C + ((_i) * 4)) /* 3 of these (0-2) */
+#define IXGBE_FDIRIPSA 0x0EE18
+#define IXGBE_FDIRIPDA 0x0EE1C
+#define IXGBE_FDIRPORT 0x0EE20
+#define IXGBE_FDIRVLAN 0x0EE24
+#define IXGBE_FDIRHASH 0x0EE28
+#define IXGBE_FDIRCMD 0x0EE2C
+
+/* Transmit DMA registers */
+#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of them (0-31)*/
+#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40))
+#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40))
+#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40))
+#define IXGBE_TDT(_i) (0x06018 + ((_i) * 0x40))
+#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40))
+#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40))
+#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40))
+#define IXGBE_DTXCTL 0x07E00
+
+#define IXGBE_DMATXCTL 0x04A80
+#define IXGBE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */
+#define IXGBE_PFDTXGSWC 0x08220
+#define IXGBE_DTXMXSZRQ 0x08100
+#define IXGBE_DTXTCPFLGL 0x04A88
+#define IXGBE_DTXTCPFLGH 0x04A8C
+#define IXGBE_LBDRPEN 0x0CA00
+#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */
+
+#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */
+#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */
+#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */
+#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */
+
+#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
+
+/* Anti-spoofing defines */
+#define IXGBE_SPOOF_MACAS_MASK 0xFF
+#define IXGBE_SPOOF_VLANAS_MASK 0xFF00
+#define IXGBE_SPOOF_VLANAS_SHIFT 8
+#define IXGBE_PFVFSPOOF_REG_COUNT 8
+/* 16 of these (0-15) */
+#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4))
+/* Tx DCA Control register : 128 of these (0-127) */
+#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40))
+#define IXGBE_TIPG 0x0CB00
+#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_MNGTXMAP 0x0CD10
+#define IXGBE_TIPG_FIBER_DEFAULT 3
+#define IXGBE_TXPBSIZE_SHIFT 10
+
+/* Wake up registers */
+#define IXGBE_WUC 0x05800
+#define IXGBE_WUFC 0x05808
+#define IXGBE_WUS 0x05810
+#define IXGBE_IPAV 0x05838
+#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */
+#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */
+
+#define IXGBE_WUPL 0x05900
+#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
+#define IXGBE_FHFT(_n) (0x09000 + (_n * 0x100)) /* Flex host filter table */
+/* Ext Flexible Host Filter Table */
+#define IXGBE_FHFT_EXT(_n) (0x09800 + (_n * 0x100))
+
+#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4
+#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2
+
+/* Each Flexible Filter is at most 128 (0x80) bytes in length */
+#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX 128
+#define IXGBE_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */
+#define IXGBE_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */
+#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */
+
+/* Wake Up Filter Control */
+#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define IXGBE_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define IXGBE_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define IXGBE_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define IXGBE_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define IXGBE_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
+#define IXGBE_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define IXGBE_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
+#define IXGBE_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */
+
+#define IXGBE_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */
+#define IXGBE_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+#define IXGBE_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
+#define IXGBE_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
+#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
+#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */
+#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */
+#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */
+/* Mask for Ext. flex filters */
+#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000
+#define IXGBE_WUFC_ALL_FILTERS 0x003F00FF /* Mask for all wakeup filters */
+#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
+
+/* Wake Up Status */
+#define IXGBE_WUS_LNKC IXGBE_WUFC_LNKC
+#define IXGBE_WUS_MAG IXGBE_WUFC_MAG
+#define IXGBE_WUS_EX IXGBE_WUFC_EX
+#define IXGBE_WUS_MC IXGBE_WUFC_MC
+#define IXGBE_WUS_BC IXGBE_WUFC_BC
+#define IXGBE_WUS_ARP IXGBE_WUFC_ARP
+#define IXGBE_WUS_IPV4 IXGBE_WUFC_IPV4
+#define IXGBE_WUS_IPV6 IXGBE_WUFC_IPV6
+#define IXGBE_WUS_MNG IXGBE_WUFC_MNG
+#define IXGBE_WUS_FLX0 IXGBE_WUFC_FLX0
+#define IXGBE_WUS_FLX1 IXGBE_WUFC_FLX1
+#define IXGBE_WUS_FLX2 IXGBE_WUFC_FLX2
+#define IXGBE_WUS_FLX3 IXGBE_WUFC_FLX3
+#define IXGBE_WUS_FLX4 IXGBE_WUFC_FLX4
+#define IXGBE_WUS_FLX5 IXGBE_WUFC_FLX5
+#define IXGBE_WUS_FLX_FILTERS IXGBE_WUFC_FLX_FILTERS
+
+/* Wake Up Packet Length */
+#define IXGBE_WUPL_LENGTH_MASK 0xFFFF
+
+/* DCB registers */
+#define IXGBE_DCB_MAX_TRAFFIC_CLASS 8
+#define IXGBE_RMCS 0x03D00
+#define IXGBE_DPMCS 0x07F40
+#define IXGBE_PDPMCS 0x0CD00
+#define IXGBE_RUPPBMR 0x050A0
+#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_TDTQ2TCCR(_i) (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */
+#define IXGBE_TDTQ2TCSR(_i) (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */
+#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
+
+
+/* Security Control Registers */
+#define IXGBE_SECTXCTRL 0x08800
+#define IXGBE_SECTXSTAT 0x08804
+#define IXGBE_SECTXBUFFAF 0x08808
+#define IXGBE_SECTXMINIFG 0x08810
+#define IXGBE_SECRXCTRL 0x08D00
+#define IXGBE_SECRXSTAT 0x08D04
+
+/* Security Bit Fields and Masks */
+#define IXGBE_SECTXCTRL_SECTX_DIS 0x00000001
+#define IXGBE_SECTXCTRL_TX_DIS 0x00000002
+#define IXGBE_SECTXCTRL_STORE_FORWARD 0x00000004
+
+#define IXGBE_SECTXSTAT_SECTX_RDY 0x00000001
+#define IXGBE_SECTXSTAT_ECC_TXERR 0x00000002
+
+#define IXGBE_SECRXCTRL_SECRX_DIS 0x00000001
+#define IXGBE_SECRXCTRL_RX_DIS 0x00000002
+
+#define IXGBE_SECRXSTAT_SECRX_RDY 0x00000001
+#define IXGBE_SECRXSTAT_ECC_RXERR 0x00000002
+
+/* LinkSec (MacSec) Registers */
+#define IXGBE_LSECTXCAP 0x08A00
+#define IXGBE_LSECRXCAP 0x08F00
+#define IXGBE_LSECTXCTRL 0x08A04
+#define IXGBE_LSECTXSCL 0x08A08 /* SCI Low */
+#define IXGBE_LSECTXSCH 0x08A0C /* SCI High */
+#define IXGBE_LSECTXSA 0x08A10
+#define IXGBE_LSECTXPN0 0x08A14
+#define IXGBE_LSECTXPN1 0x08A18
+#define IXGBE_LSECTXKEY0(_n) (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */
+#define IXGBE_LSECTXKEY1(_n) (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */
+#define IXGBE_LSECRXCTRL 0x08F04
+#define IXGBE_LSECRXSCL 0x08F08
+#define IXGBE_LSECRXSCH 0x08F0C
+#define IXGBE_LSECRXSA(_i) (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */
+#define IXGBE_LSECRXPN(_i) (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */
+#define IXGBE_LSECRXKEY(_n, _m) (0x08F20 + ((0x10 * (_n)) + (4 * (_m))))
+#define IXGBE_LSECTXUT 0x08A3C /* OutPktsUntagged */
+#define IXGBE_LSECTXPKTE 0x08A40 /* OutPktsEncrypted */
+#define IXGBE_LSECTXPKTP 0x08A44 /* OutPktsProtected */
+#define IXGBE_LSECTXOCTE 0x08A48 /* OutOctetsEncrypted */
+#define IXGBE_LSECTXOCTP 0x08A4C /* OutOctetsProtected */
+#define IXGBE_LSECRXUT 0x08F40 /* InPktsUntagged/InPktsNoTag */
+#define IXGBE_LSECRXOCTD 0x08F44 /* InOctetsDecrypted */
+#define IXGBE_LSECRXOCTV 0x08F48 /* InOctetsValidated */
+#define IXGBE_LSECRXBAD 0x08F4C /* InPktsBadTag */
+#define IXGBE_LSECRXNOSCI 0x08F50 /* InPktsNoSci */
+#define IXGBE_LSECRXUNSCI 0x08F54 /* InPktsUnknownSci */
+#define IXGBE_LSECRXUNCH 0x08F58 /* InPktsUnchecked */
+#define IXGBE_LSECRXDELAY 0x08F5C /* InPktsDelayed */
+#define IXGBE_LSECRXLATE 0x08F60 /* InPktsLate */
+#define IXGBE_LSECRXOK(_n) (0x08F64 + (0x04 * (_n))) /* InPktsOk */
+#define IXGBE_LSECRXINV(_n) (0x08F6C + (0x04 * (_n))) /* InPktsInvalid */
+#define IXGBE_LSECRXNV(_n) (0x08F74 + (0x04 * (_n))) /* InPktsNotValid */
+#define IXGBE_LSECRXUNSA 0x08F7C /* InPktsUnusedSa */
+#define IXGBE_LSECRXNUSA 0x08F80 /* InPktsNotUsingSa */
+
+/* LinkSec (MacSec) Bit Fields and Masks */
+#define IXGBE_LSECTXCAP_SUM_MASK 0x00FF0000
+#define IXGBE_LSECTXCAP_SUM_SHIFT 16
+#define IXGBE_LSECRXCAP_SUM_MASK 0x00FF0000
+#define IXGBE_LSECRXCAP_SUM_SHIFT 16
+
+#define IXGBE_LSECTXCTRL_EN_MASK 0x00000003
+#define IXGBE_LSECTXCTRL_DISABLE 0x0
+#define IXGBE_LSECTXCTRL_AUTH 0x1
+#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT 0x2
+#define IXGBE_LSECTXCTRL_AISCI 0x00000020
+#define IXGBE_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00
+#define IXGBE_LSECTXCTRL_RSV_MASK 0x000000D8
+
+#define IXGBE_LSECRXCTRL_EN_MASK 0x0000000C
+#define IXGBE_LSECRXCTRL_EN_SHIFT 2
+#define IXGBE_LSECRXCTRL_DISABLE 0x0
+#define IXGBE_LSECRXCTRL_CHECK 0x1
+#define IXGBE_LSECRXCTRL_STRICT 0x2
+#define IXGBE_LSECRXCTRL_DROP 0x3
+#define IXGBE_LSECRXCTRL_PLSH 0x00000040
+#define IXGBE_LSECRXCTRL_RP 0x00000080
+#define IXGBE_LSECRXCTRL_RSV_MASK 0xFFFFFF33
+
+/* IpSec Registers */
+#define IXGBE_IPSTXIDX 0x08900
+#define IXGBE_IPSTXSALT 0x08904
+#define IXGBE_IPSTXKEY(_i) (0x08908 + (4 * (_i))) /* 4 of these (0-3) */
+#define IXGBE_IPSRXIDX 0x08E00
+#define IXGBE_IPSRXIPADDR(_i) (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */
+#define IXGBE_IPSRXSPI 0x08E14
+#define IXGBE_IPSRXIPIDX 0x08E18
+#define IXGBE_IPSRXKEY(_i) (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */
+#define IXGBE_IPSRXSALT 0x08E2C
+#define IXGBE_IPSRXMOD 0x08E30
+
+#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4
+
+/* DCB registers */
+#define IXGBE_RTRPCS 0x02430
+#define IXGBE_RTTDCS 0x04900
+#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */
+#define IXGBE_RTTPCS 0x0CD00
+#define IXGBE_RTRUP2TC 0x03020
+#define IXGBE_RTTUP2TC 0x0C800
+#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_TXLLQ(_i) (0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */
+#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTDQSEL 0x04904
+#define IXGBE_RTTDT1C 0x04908
+#define IXGBE_RTTDT1S 0x0490C
+#define IXGBE_RTTDTECC 0x04990
+#define IXGBE_RTTDTECC_NO_BCN 0x00000100
+
+#define IXGBE_RTTBCNRC 0x04984
+#define IXGBE_RTTBCNRC_RS_ENA 0x80000000
+#define IXGBE_RTTBCNRC_RF_DEC_MASK 0x00003FFF
+#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14
+#define IXGBE_RTTBCNRC_RF_INT_MASK \
+ (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
+#define IXGBE_RTTBCNRM 0x04980
+
+/* FCoE DMA Context Registers */
+#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */
+#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */
+#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */
+#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */
+#define IXGBE_FCINVST0 0x03FC0 /* FC Invalid DMA Context Status Reg 0*/
+#define IXGBE_FCINVST(_i) (IXGBE_FCINVST0 + ((_i) * 4))
+#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */
+#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */
+#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */
+#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */
+#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */
+#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3
+#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8
+#define IXGBE_FCBUFF_OFFSET_SHIFT 16
+#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */
+#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */
+#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */
+#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */
+#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16
+/* FCoE SOF/EOF */
+#define IXGBE_TEOFF 0x04A94 /* Tx FC EOF */
+#define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */
+#define IXGBE_REOFF 0x05158 /* Rx FC EOF */
+#define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */
+/* FCoE Filter Context Registers */
+#define IXGBE_FCFLT 0x05108 /* FC FLT Context */
+#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */
+#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */
+#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */
+#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */
+#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */
+#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */
+#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */
+#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */
+#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */
+/* FCoE Receive Control */
+#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */
+#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */
+#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */
+#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */
+#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */
+#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */
+#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */
+#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */
+#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */
+#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */
+#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8
+/* FCoE Redirection */
+#define IXGBE_FCRECTL 0x0ED00 /* FC Redirection Control */
+#define IXGBE_FCRETA0 0x0ED10 /* FC Redirection Table 0 */
+#define IXGBE_FCRETA(_i) (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */
+#define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */
+#define IXGBE_FCRETASEL_ENA 0x2 /* FCoE FCRETASEL bit */
+#define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */
+#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */
+
+/* Stats registers */
+#define IXGBE_CRCERRS 0x04000
+#define IXGBE_ILLERRC 0x04004
+#define IXGBE_ERRBC 0x04008
+#define IXGBE_MSPDC 0x04010
+#define IXGBE_MPC(_i) (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/
+#define IXGBE_MLFC 0x04034
+#define IXGBE_MRFC 0x04038
+#define IXGBE_RLEC 0x04040
+#define IXGBE_LXONTXC 0x03F60
+#define IXGBE_LXONRXC 0x0CF60
+#define IXGBE_LXOFFTXC 0x03F68
+#define IXGBE_LXOFFRXC 0x0CF68
+#define IXGBE_LXONRXCNT 0x041A4
+#define IXGBE_LXOFFRXCNT 0x041A8
+#define IXGBE_PXONRXCNT(_i) (0x04140 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_PXOFFRXCNT(_i) (0x04160 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_PXON2OFFCNT(_i) (0x03240 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_PXONTXC(_i) (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/
+#define IXGBE_PXONRXC(_i) (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/
+#define IXGBE_PXOFFTXC(_i) (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/
+#define IXGBE_PXOFFRXC(_i) (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/
+#define IXGBE_PRC64 0x0405C
+#define IXGBE_PRC127 0x04060
+#define IXGBE_PRC255 0x04064
+#define IXGBE_PRC511 0x04068
+#define IXGBE_PRC1023 0x0406C
+#define IXGBE_PRC1522 0x04070
+#define IXGBE_GPRC 0x04074
+#define IXGBE_BPRC 0x04078
+#define IXGBE_MPRC 0x0407C
+#define IXGBE_GPTC 0x04080
+#define IXGBE_GORCL 0x04088
+#define IXGBE_GORCH 0x0408C
+#define IXGBE_GOTCL 0x04090
+#define IXGBE_GOTCH 0x04094
+#define IXGBE_RNBC(_i) (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/
+#define IXGBE_RUC 0x040A4
+#define IXGBE_RFC 0x040A8
+#define IXGBE_ROC 0x040AC
+#define IXGBE_RJC 0x040B0
+#define IXGBE_MNGPRC 0x040B4
+#define IXGBE_MNGPDC 0x040B8
+#define IXGBE_MNGPTC 0x0CF90
+#define IXGBE_TORL 0x040C0
+#define IXGBE_TORH 0x040C4
+#define IXGBE_TPR 0x040D0
+#define IXGBE_TPT 0x040D4
+#define IXGBE_PTC64 0x040D8
+#define IXGBE_PTC127 0x040DC
+#define IXGBE_PTC255 0x040E0
+#define IXGBE_PTC511 0x040E4
+#define IXGBE_PTC1023 0x040E8
+#define IXGBE_PTC1522 0x040EC
+#define IXGBE_MPTC 0x040F0
+#define IXGBE_BPTC 0x040F4
+#define IXGBE_XEC 0x04120
+#define IXGBE_SSVPC 0x08780
+
+#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4))
+#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \
+ (0x08600 + ((_i) * 4)))
+#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4))
+
+#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */
+#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */
+#define IXGBE_FCCRC 0x05118 /* Num of Good Eth CRC w/ Bad FC CRC */
+#define IXGBE_FCOERPDC 0x0241C /* FCoE Rx Packets Dropped Count */
+#define IXGBE_FCLAST 0x02424 /* FCoE Last Error Count */
+#define IXGBE_FCOEPRC 0x02428 /* Number of FCoE Packets Received */
+#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */
+#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */
+#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */
+#define IXGBE_FCCRC_CNT_MASK 0x0000FFFF /* CRC_CNT: bit 0 - 15 */
+#define IXGBE_FCLAST_CNT_MASK 0x0000FFFF /* Last_CNT: bit 0 - 15 */
+#define IXGBE_O2BGPTC 0x041C4
+#define IXGBE_O2BSPC 0x087B0
+#define IXGBE_B2OSPC 0x041C0
+#define IXGBE_B2OGPRC 0x02F90
+#define IXGBE_BUPRC 0x04180
+#define IXGBE_BMPRC 0x04184
+#define IXGBE_BBPRC 0x04188
+#define IXGBE_BUPTC 0x0418C
+#define IXGBE_BMPTC 0x04190
+#define IXGBE_BBPTC 0x04194
+#define IXGBE_BCRCERRS 0x04198
+#define IXGBE_BXONRXC 0x0419C
+#define IXGBE_BXOFFRXC 0x041E0
+#define IXGBE_BXONTXC 0x041E4
+#define IXGBE_BXOFFTXC 0x041E8
+#define IXGBE_PCRC8ECL 0x0E810
+#define IXGBE_PCRC8ECH 0x0E811
+#define IXGBE_PCRC8ECH_MASK 0x1F
+#define IXGBE_LDPCECL 0x0E820
+#define IXGBE_LDPCECH 0x0E821
+
+/* Management */
+#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_MANC 0x05820
+#define IXGBE_MFVAL 0x05824
+#define IXGBE_MANC2H 0x05860
+#define IXGBE_MDEF(_i) (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_MIPAF 0x058B0
+#define IXGBE_MMAL(_i) (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */
+#define IXGBE_MMAH(_i) (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */
+#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */
+#define IXGBE_METF(_i) (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */
+#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_LSWFW 0x15014
+#define IXGBE_BMCIP(_i) (0x05050 + ((_i) * 4)) /* 0x5050-0x505C */
+#define IXGBE_BMCIPVAL 0x05060
+#define IXGBE_BMCIP_IPADDR_TYPE 0x00000001
+#define IXGBE_BMCIP_IPADDR_VALID 0x00000002
+
+/* Management Bit Fields and Masks */
+#define IXGBE_MANC_EN_BMC2OS 0x10000000 /* Ena BMC2OS and OS2BMC traffic */
+#define IXGBE_MANC_EN_BMC2OS_SHIFT 28
+
+/* Firmware Semaphore Register */
+#define IXGBE_FWSM_MODE_MASK 0xE
+
+/* ARC Subsystem registers */
+#define IXGBE_HICR 0x15F00
+#define IXGBE_FWSTS 0x15F0C
+#define IXGBE_HSMC0R 0x15F04
+#define IXGBE_HSMC1R 0x15F08
+#define IXGBE_SWSR 0x15F10
+#define IXGBE_HFDR 0x15FE8
+#define IXGBE_FLEX_MNG 0x15800 /* 0x15800 - 0x15EFC */
+
+#define IXGBE_HICR_EN 0x01 /* Enable bit - RO */
+/* Driver sets this bit when done to put command in RAM */
+#define IXGBE_HICR_C 0x02
+#define IXGBE_HICR_SV 0x04 /* Status Validity */
+#define IXGBE_HICR_FW_RESET_ENABLE 0x40
+#define IXGBE_HICR_FW_RESET 0x80
+
+/* PCI-E registers */
+#define IXGBE_GCR 0x11000
+#define IXGBE_GTV 0x11004
+#define IXGBE_FUNCTAG 0x11008
+#define IXGBE_GLT 0x1100C
+#define IXGBE_PCIEPIPEADR 0x11004
+#define IXGBE_PCIEPIPEDAT 0x11008
+#define IXGBE_GSCL_1 0x11010
+#define IXGBE_GSCL_2 0x11014
+#define IXGBE_GSCL_3 0x11018
+#define IXGBE_GSCL_4 0x1101C
+#define IXGBE_GSCN_0 0x11020
+#define IXGBE_GSCN_1 0x11024
+#define IXGBE_GSCN_2 0x11028
+#define IXGBE_GSCN_3 0x1102C
+#define IXGBE_FACTPS 0x10150
+#define IXGBE_PCIEANACTL 0x11040
+#define IXGBE_SWSM 0x10140
+#define IXGBE_FWSM 0x10148
+#define IXGBE_GSSR 0x10160
+#define IXGBE_MREVID 0x11064
+#define IXGBE_DCA_ID 0x11070
+#define IXGBE_DCA_CTRL 0x11074
+#define IXGBE_SWFW_SYNC IXGBE_GSSR
+
+/* PCI-E registers 82599-Specific */
+#define IXGBE_GCR_EXT 0x11050
+#define IXGBE_GSCL_5_82599 0x11030
+#define IXGBE_GSCL_6_82599 0x11034
+#define IXGBE_GSCL_7_82599 0x11038
+#define IXGBE_GSCL_8_82599 0x1103C
+#define IXGBE_PHYADR_82599 0x11040
+#define IXGBE_PHYDAT_82599 0x11044
+#define IXGBE_PHYCTL_82599 0x11048
+#define IXGBE_PBACLR_82599 0x11068
+#define IXGBE_CIAA_82599 0x11088
+#define IXGBE_CIAD_82599 0x1108C
+#define IXGBE_PICAUSE 0x110B0
+#define IXGBE_PIENA 0x110B8
+#define IXGBE_CDQ_MBR_82599 0x110B4
+#define IXGBE_PCIESPARE 0x110BC
+#define IXGBE_MISC_REG_82599 0x110F0
+#define IXGBE_ECC_CTRL_0_82599 0x11100
+#define IXGBE_ECC_CTRL_1_82599 0x11104
+#define IXGBE_ECC_STATUS_82599 0x110E0
+#define IXGBE_BAR_CTRL_82599 0x110F4
+
+/* PCI Express Control */
+#define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000
+#define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000
+#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000
+#define IXGBE_GCR_CAP_VER2 0x00040000
+
+#define IXGBE_GCR_EXT_MSIX_EN 0x80000000
+#define IXGBE_GCR_EXT_BUFFERS_CLEAR 0x40000000
+#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001
+#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002
+#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
+#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
+ IXGBE_GCR_EXT_VT_MODE_64)
+/* Time Sync Registers */
+#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
+#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */
+#define IXGBE_RXSTMPL 0x051E8 /* Rx timestamp Low - RO */
+#define IXGBE_RXSTMPH 0x051A4 /* Rx timestamp High - RO */
+#define IXGBE_RXSATRL 0x051A0 /* Rx timestamp attribute low - RO */
+#define IXGBE_RXSATRH 0x051A8 /* Rx timestamp attribute high - RO */
+#define IXGBE_RXMTRL 0x05120 /* RX message type register low - RW */
+#define IXGBE_TXSTMPL 0x08C04 /* Tx timestamp value Low - RO */
+#define IXGBE_TXSTMPH 0x08C08 /* Tx timestamp value High - RO */
+#define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */
+#define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */
+#define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */
+#define IXGBE_TIMADJL 0x08C18 /* Time Adjustment Offset register Low - RW */
+#define IXGBE_TIMADJH 0x08C1C /* Time Adjustment Offset register High - RW */
+#define IXGBE_TSAUXC 0x08C20 /* TimeSync Auxiliary Control register - RW */
+#define IXGBE_TRGTTIML0 0x08C24 /* Target Time Register 0 Low - RW */
+#define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */
+#define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */
+#define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */
+#define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */
+#define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */
+#define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */
+#define IXGBE_AUXSTMPH0 0x08C40 /* Auxiliary Time Stamp 0 register High - RO */
+#define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */
+#define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */
+
+/* Diagnostic Registers */
+#define IXGBE_RDSTATCTL 0x02C20
+#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */
+#define IXGBE_RDHMPN 0x02F08
+#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4))
+#define IXGBE_RDPROBE 0x02F20
+#define IXGBE_RDMAM 0x02F30
+#define IXGBE_RDMAD 0x02F34
+#define IXGBE_TDSTATCTL 0x07C20
+#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */
+#define IXGBE_TDHMPN 0x07F08
+#define IXGBE_TDHMPN2 0x082FC
+#define IXGBE_TXDESCIC 0x082CC
+#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4))
+#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4))
+#define IXGBE_TDPROBE 0x07F20
+#define IXGBE_TXBUFCTRL 0x0C600
+#define IXGBE_TXBUFDATA0 0x0C610
+#define IXGBE_TXBUFDATA1 0x0C614
+#define IXGBE_TXBUFDATA2 0x0C618
+#define IXGBE_TXBUFDATA3 0x0C61C
+#define IXGBE_RXBUFCTRL 0x03600
+#define IXGBE_RXBUFDATA0 0x03610
+#define IXGBE_RXBUFDATA1 0x03614
+#define IXGBE_RXBUFDATA2 0x03618
+#define IXGBE_RXBUFDATA3 0x0361C
+#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_RFVAL 0x050A4
+#define IXGBE_MDFTC1 0x042B8
+#define IXGBE_MDFTC2 0x042C0
+#define IXGBE_MDFTFIFO1 0x042C4
+#define IXGBE_MDFTFIFO2 0x042C8
+#define IXGBE_MDFTS 0x042CC
+#define IXGBE_RXDATAWRPTR(_i) (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/
+#define IXGBE_RXDESCWRPTR(_i) (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/
+#define IXGBE_RXDATARDPTR(_i) (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/
+#define IXGBE_RXDESCRDPTR(_i) (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/
+#define IXGBE_TXDATAWRPTR(_i) (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/
+#define IXGBE_TXDESCWRPTR(_i) (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/
+#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/
+#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/
+#define IXGBE_PCIEECCCTL 0x1106C
+#define IXGBE_RXWRPTR(_i) (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/
+#define IXGBE_RXUSED(_i) (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/
+#define IXGBE_RXRDPTR(_i) (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/
+#define IXGBE_RXRDWRPTR(_i) (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/
+#define IXGBE_TXWRPTR(_i) (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/
+#define IXGBE_TXUSED(_i) (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/
+#define IXGBE_TXRDPTR(_i) (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/
+#define IXGBE_TXRDWRPTR(_i) (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/
+#define IXGBE_PCIEECCCTL0 0x11100
+#define IXGBE_PCIEECCCTL1 0x11104
+#define IXGBE_RXDBUECC 0x03F70
+#define IXGBE_TXDBUECC 0x0CF70
+#define IXGBE_RXDBUEST 0x03F74
+#define IXGBE_TXDBUEST 0x0CF74
+#define IXGBE_PBTXECC 0x0C300
+#define IXGBE_PBRXECC 0x03300
+#define IXGBE_GHECCR 0x110B0
+
+/* MAC Registers */
+#define IXGBE_PCS1GCFIG 0x04200
+#define IXGBE_PCS1GLCTL 0x04208
+#define IXGBE_PCS1GLSTA 0x0420C
+#define IXGBE_PCS1GDBG0 0x04210
+#define IXGBE_PCS1GDBG1 0x04214
+#define IXGBE_PCS1GANA 0x04218
+#define IXGBE_PCS1GANLP 0x0421C
+#define IXGBE_PCS1GANNP 0x04220
+#define IXGBE_PCS1GANLPNP 0x04224
+#define IXGBE_HLREG0 0x04240
+#define IXGBE_HLREG1 0x04244
+#define IXGBE_PAP 0x04248
+#define IXGBE_MACA 0x0424C
+#define IXGBE_APAE 0x04250
+#define IXGBE_ARD 0x04254
+#define IXGBE_AIS 0x04258
+#define IXGBE_MSCA 0x0425C
+#define IXGBE_MSRWD 0x04260
+#define IXGBE_MLADD 0x04264
+#define IXGBE_MHADD 0x04268
+#define IXGBE_MAXFRS 0x04268
+#define IXGBE_TREG 0x0426C
+#define IXGBE_PCSS1 0x04288
+#define IXGBE_PCSS2 0x0428C
+#define IXGBE_XPCSS 0x04290
+#define IXGBE_MFLCN 0x04294
+#define IXGBE_SERDESC 0x04298
+#define IXGBE_MACS 0x0429C
+#define IXGBE_AUTOC 0x042A0
+#define IXGBE_LINKS 0x042A4
+#define IXGBE_LINKS2 0x04324
+#define IXGBE_AUTOC2 0x042A8
+#define IXGBE_AUTOC3 0x042AC
+#define IXGBE_ANLP1 0x042B0
+#define IXGBE_ANLP2 0x042B4
+#define IXGBE_MACC 0x04330
+#define IXGBE_ATLASCTL 0x04800
+#define IXGBE_MMNGC 0x042D0
+#define IXGBE_ANLPNP1 0x042D4
+#define IXGBE_ANLPNP2 0x042D8
+#define IXGBE_KRPCSFC 0x042E0
+#define IXGBE_KRPCSS 0x042E4
+#define IXGBE_FECS1 0x042E8
+#define IXGBE_FECS2 0x042EC
+#define IXGBE_SMADARCTL 0x14F10
+#define IXGBE_MPVC 0x04318
+#define IXGBE_SGMIIC 0x04314
+
+/* Statistics Registers */
+#define IXGBE_RXNFGPC 0x041B0
+#define IXGBE_RXNFGBCL 0x041B4
+#define IXGBE_RXNFGBCH 0x041B8
+#define IXGBE_RXDGPC 0x02F50
+#define IXGBE_RXDGBCL 0x02F54
+#define IXGBE_RXDGBCH 0x02F58
+#define IXGBE_RXDDGPC 0x02F5C
+#define IXGBE_RXDDGBCL 0x02F60
+#define IXGBE_RXDDGBCH 0x02F64
+#define IXGBE_RXLPBKGPC 0x02F68
+#define IXGBE_RXLPBKGBCL 0x02F6C
+#define IXGBE_RXLPBKGBCH 0x02F70
+#define IXGBE_RXDLPBKGPC 0x02F74
+#define IXGBE_RXDLPBKGBCL 0x02F78
+#define IXGBE_RXDLPBKGBCH 0x02F7C
+#define IXGBE_TXDGPC 0x087A0
+#define IXGBE_TXDGBCL 0x087A4
+#define IXGBE_TXDGBCH 0x087A8
+
+#define IXGBE_RXDSTATCTRL 0x02F40
+
+/* Copper Pond 2 link timeout */
+#define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50
+
+/* Omer CORECTL */
+#define IXGBE_CORECTL 0x014F00
+/* BARCTRL */
+#define IXGBE_BARCTRL 0x110F4
+#define IXGBE_BARCTRL_FLSIZE 0x0700
+#define IXGBE_BARCTRL_FLSIZE_SHIFT 8
+#define IXGBE_BARCTRL_CSRSIZE 0x2000
+
+/* RSCCTL Bit Masks */
+#define IXGBE_RSCCTL_RSCEN 0x01
+#define IXGBE_RSCCTL_MAXDESC_1 0x00
+#define IXGBE_RSCCTL_MAXDESC_4 0x04
+#define IXGBE_RSCCTL_MAXDESC_8 0x08
+#define IXGBE_RSCCTL_MAXDESC_16 0x0C
+
+/* RSCDBU Bit Masks */
+#define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F
+#define IXGBE_RSCDBU_RSCACKDIS 0x00000080
+
+/* RDRXCTL Bit Masks */
+#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min THLD Size */
+#define IXGBE_RDRXCTL_CRCSTRIP 0x00000002 /* CRC Strip */
+#define IXGBE_RDRXCTL_MVMEN 0x00000020
+#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */
+#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */
+#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */
+#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disabl RSC compl on LLI */
+#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC ena */
+#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC ena */
+
+/* RQTC Bit Masks and Shifts */
+#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4)
+#define IXGBE_RQTC_TC0_MASK (0x7 << 0)
+#define IXGBE_RQTC_TC1_MASK (0x7 << 4)
+#define IXGBE_RQTC_TC2_MASK (0x7 << 8)
+#define IXGBE_RQTC_TC3_MASK (0x7 << 12)
+#define IXGBE_RQTC_TC4_MASK (0x7 << 16)
+#define IXGBE_RQTC_TC5_MASK (0x7 << 20)
+#define IXGBE_RQTC_TC6_MASK (0x7 << 24)
+#define IXGBE_RQTC_TC7_MASK (0x7 << 28)
+
+/* PSRTYPE.RQPL Bit masks and shift */
+#define IXGBE_PSRTYPE_RQPL_MASK 0x7
+#define IXGBE_PSRTYPE_RQPL_SHIFT 29
+
+/* CTRL Bit Masks */
+#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */
+#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */
+#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
+#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST)
+
+/* FACTPS */
+#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */
+
+/* MHADD Bit Masks */
+#define IXGBE_MHADD_MFS_MASK 0xFFFF0000
+#define IXGBE_MHADD_MFS_SHIFT 16
+
+/* Extended Device Control */
+#define IXGBE_CTRL_EXT_PFRSTD 0x00004000 /* Physical Function Reset Done */
+#define IXGBE_CTRL_EXT_NS_DIS 0x00010000 /* No Snoop disable */
+#define IXGBE_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
+#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
+
+/* Direct Cache Access (DCA) definitions */
+#define IXGBE_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */
+#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */
+
+#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
+#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
+
+#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
+#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */
+#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */
+#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */
+#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */
+#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */
+#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */
+#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */
+
+#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
+#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */
+#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */
+#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
+#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
+#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */
+
+/* MSCA Bit Masks */
+#define IXGBE_MSCA_NP_ADDR_MASK 0x0000FFFF /* MDI Addr (new prot) */
+#define IXGBE_MSCA_NP_ADDR_SHIFT 0
+#define IXGBE_MSCA_DEV_TYPE_MASK 0x001F0000 /* Dev Type (new prot) */
+#define IXGBE_MSCA_DEV_TYPE_SHIFT 16 /* Register Address (old prot */
+#define IXGBE_MSCA_PHY_ADDR_MASK 0x03E00000 /* PHY Address mask */
+#define IXGBE_MSCA_PHY_ADDR_SHIFT 21 /* PHY Address shift*/
+#define IXGBE_MSCA_OP_CODE_MASK 0x0C000000 /* OP CODE mask */
+#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */
+#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */
+#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (wr) */
+#define IXGBE_MSCA_READ 0x0C000000 /* OP CODE 11 (rd) */
+#define IXGBE_MSCA_READ_AUTOINC 0x08000000 /* OP CODE 10 (rd auto inc)*/
+#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */
+#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */
+#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new prot) */
+#define IXGBE_MSCA_OLD_PROTOCOL 0x10000000 /* ST CODE 01 (old prot) */
+#define IXGBE_MSCA_MDI_COMMAND 0x40000000 /* Initiate MDI command */
+#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress ena */
+
+/* MSRWD bit masks */
+#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF
+#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0
+#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000
+#define IXGBE_MSRWD_READ_DATA_SHIFT 16
+
+/* Atlas registers */
+#define IXGBE_ATLAS_PDN_LPBK 0x24
+#define IXGBE_ATLAS_PDN_10G 0xB
+#define IXGBE_ATLAS_PDN_1G 0xC
+#define IXGBE_ATLAS_PDN_AN 0xD
+
+/* Atlas bit masks */
+#define IXGBE_ATLASCTL_WRITE_CMD 0x00010000
+#define IXGBE_ATLAS_PDN_TX_REG_EN 0x10
+#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL 0xF0
+#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0
+#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0
+
+/* Omer bit masks */
+#define IXGBE_CORECTL_WRITE_CMD 0x00010000
+
+/* Device Type definitions for new protocol MDIO commands */
+#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1
+#define IXGBE_MDIO_PCS_DEV_TYPE 0x3
+#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4
+#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */
+#define IXGBE_TWINAX_DEV 1
+
+#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */
+
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Ctrl Reg */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0-10G, 1-1G */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010
+
+#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */
+#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */
+#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */
+#define IXGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */
+#define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */
+#define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/
+#define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/
+#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */
+#define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */
+#define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */
+#define IXGBE_MDIO_PHY_SPEED_100M 0x0020 /* 100M capable */
+#define IXGBE_MDIO_PHY_EXT_ABILITY 0xB /* Ext Ability Reg */
+#define IXGBE_MDIO_PHY_10GBASET_ABILITY 0x0004 /* 10GBaseT capable */
+#define IXGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */
+#define IXGBE_MDIO_PHY_100BASETX_ABILITY 0x0080 /* 100BaseTX capable */
+#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */
+
+#define IXGBE_MDIO_PMA_PMD_CONTROL_ADDR 0x0000 /* PMA/PMD Control Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */
+
+/* MII clause 22/28 definitions */
+#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800
+
+#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG 0x20 /* 10G Control Reg */
+#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */
+#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */
+#define IXGBE_MII_AUTONEG_ADVERTISE_REG 0x10 /* 100M Advertisement */
+#define IXGBE_MII_10GBASE_T_ADVERTISE 0x1000 /* full duplex, bit:12*/
+#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/
+#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/
+#define IXGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */
+#define IXGBE_MII_100BASE_T_ADVERTISE_HALF 0x0080 /* half duplex, bit:7 */
+#define IXGBE_MII_RESTART 0x200
+#define IXGBE_MII_AUTONEG_COMPLETE 0x20
+#define IXGBE_MII_AUTONEG_LINK_UP 0x04
+#define IXGBE_MII_AUTONEG_REG 0x0
+
+#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0
+#define IXGBE_MAX_PHY_ADDR 32
+
+/* PHY IDs*/
+#define TN1010_PHY_ID 0x00A19410
+#define TNX_FW_REV 0xB
+#define X540_PHY_ID 0x01540200
+#define AQ_FW_REV 0x20
+#define QT2022_PHY_ID 0x0043A400
+#define ATH_PHY_ID 0x03429050
+
+/* PHY Types */
+#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
+
+/* Special PHY Init Routine */
+#define IXGBE_PHY_INIT_OFFSET_NL 0x002B
+#define IXGBE_PHY_INIT_END_NL 0xFFFF
+#define IXGBE_CONTROL_MASK_NL 0xF000
+#define IXGBE_DATA_MASK_NL 0x0FFF
+#define IXGBE_CONTROL_SHIFT_NL 12
+#define IXGBE_DELAY_NL 0
+#define IXGBE_DATA_NL 1
+#define IXGBE_CONTROL_NL 0x000F
+#define IXGBE_CONTROL_EOL_NL 0x0FFF
+#define IXGBE_CONTROL_SOL_NL 0x0000
+
+/* General purpose Interrupt Enable */
+#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */
+#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */
+#define IXGBE_SDP2_GPIEN 0x00000004 /* SDP2 */
+#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */
+#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */
+#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */
+#define IXGBE_GPIE_EIAME 0x40000000
+#define IXGBE_GPIE_PBA_SUPPORT 0x80000000
+#define IXGBE_GPIE_RSC_DELAY_SHIFT 11
+#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */
+#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */
+#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */
+#define IXGBE_GPIE_VTMODE_64 0x0000C000 /* 64 VFs 2 queues per VF */
+
+/* Packet Buffer Initialization */
+#define IXGBE_MAX_PACKET_BUFFERS 8
+
+#define IXGBE_TXPBSIZE_20KB 0x00005000 /* 20KB Packet Buffer */
+#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */
+#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */
+#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */
+#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */
+#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */
+#define IXGBE_RXPBSIZE_MAX 0x00080000 /* 512KB Packet Buffer */
+#define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer */
+
+#define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */
+#define IXGBE_MAX_PB 8
+
+/* Packet buffer allocation strategies */
+enum {
+ PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */
+#define PBA_STRATEGY_EQUAL PBA_STRATEGY_EQUAL
+ PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */
+#define PBA_STRATEGY_WEIGHTED PBA_STRATEGY_WEIGHTED
+};
+
+/* Transmit Flow Control status */
+#define IXGBE_TFCS_TXOFF 0x00000001
+#define IXGBE_TFCS_TXOFF0 0x00000100
+#define IXGBE_TFCS_TXOFF1 0x00000200
+#define IXGBE_TFCS_TXOFF2 0x00000400
+#define IXGBE_TFCS_TXOFF3 0x00000800
+#define IXGBE_TFCS_TXOFF4 0x00001000
+#define IXGBE_TFCS_TXOFF5 0x00002000
+#define IXGBE_TFCS_TXOFF6 0x00004000
+#define IXGBE_TFCS_TXOFF7 0x00008000
+
+/* TCP Timer */
+#define IXGBE_TCPTIMER_KS 0x00000100
+#define IXGBE_TCPTIMER_COUNT_ENABLE 0x00000200
+#define IXGBE_TCPTIMER_COUNT_FINISH 0x00000400
+#define IXGBE_TCPTIMER_LOOP 0x00000800
+#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF
+
+/* HLREG0 Bit Masks */
+#define IXGBE_HLREG0_TXCRCEN 0x00000001 /* bit 0 */
+#define IXGBE_HLREG0_RXCRCSTRP 0x00000002 /* bit 1 */
+#define IXGBE_HLREG0_JUMBOEN 0x00000004 /* bit 2 */
+#define IXGBE_HLREG0_TXPADEN 0x00000400 /* bit 10 */
+#define IXGBE_HLREG0_TXPAUSEEN 0x00001000 /* bit 12 */
+#define IXGBE_HLREG0_RXPAUSEEN 0x00004000 /* bit 14 */
+#define IXGBE_HLREG0_LPBK 0x00008000 /* bit 15 */
+#define IXGBE_HLREG0_MDCSPD 0x00010000 /* bit 16 */
+#define IXGBE_HLREG0_CONTMDC 0x00020000 /* bit 17 */
+#define IXGBE_HLREG0_CTRLFLTR 0x00040000 /* bit 18 */
+#define IXGBE_HLREG0_PREPEND 0x00F00000 /* bits 20-23 */
+#define IXGBE_HLREG0_PRIPAUSEEN 0x01000000 /* bit 24 */
+#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000 /* bits 25-26 */
+#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000 /* bit 27 */
+#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000 /* bit 28 */
+
+/* VMD_CTL bitmasks */
+#define IXGBE_VMD_CTL_VMDQ_EN 0x00000001
+#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002
+
+/* VT_CTL bitmasks */
+#define IXGBE_VT_CTL_DIS_DEFPL 0x20000000 /* disable default pool */
+#define IXGBE_VT_CTL_REPLEN 0x40000000 /* replication enabled */
+#define IXGBE_VT_CTL_VT_ENABLE 0x00000001 /* Enable VT Mode */
+#define IXGBE_VT_CTL_POOL_SHIFT 7
+#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT)
+
+/* VMOLR bitmasks */
+#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */
+#define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */
+#define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */
+#define IXGBE_VMOLR_BAM 0x08000000 /* accept broadcast packets */
+#define IXGBE_VMOLR_MPE 0x10000000 /* multicast promiscuous */
+
+/* VFRE bitmask */
+#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF
+
+#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+
+/* RDHMPN and TDHMPN bitmasks */
+#define IXGBE_RDHMPN_RDICADDR 0x007FF800
+#define IXGBE_RDHMPN_RDICRDREQ 0x00800000
+#define IXGBE_RDHMPN_RDICADDR_SHIFT 11
+#define IXGBE_TDHMPN_TDICADDR 0x003FF800
+#define IXGBE_TDHMPN_TDICRDREQ 0x00800000
+#define IXGBE_TDHMPN_TDICADDR_SHIFT 11
+
+#define IXGBE_RDMAM_MEM_SEL_SHIFT 13
+#define IXGBE_RDMAM_DWORD_SHIFT 9
+#define IXGBE_RDMAM_DESC_COMP_FIFO 1
+#define IXGBE_RDMAM_DFC_CMD_FIFO 2
+#define IXGBE_RDMAM_RSC_HEADER_ADDR 3
+#define IXGBE_RDMAM_TCN_STATUS_RAM 4
+#define IXGBE_RDMAM_WB_COLL_FIFO 5
+#define IXGBE_RDMAM_QSC_CNT_RAM 6
+#define IXGBE_RDMAM_QSC_FCOE_RAM 7
+#define IXGBE_RDMAM_QSC_QUEUE_CNT 8
+#define IXGBE_RDMAM_QSC_QUEUE_RAM 0xA
+#define IXGBE_RDMAM_QSC_RSC_RAM 0xB
+#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE 135
+#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT 4
+#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE 48
+#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT 7
+#define IXGBE_RDMAM_RSC_HEADER_ADDR_RANGE 32
+#define IXGBE_RDMAM_RSC_HEADER_ADDR_COUNT 4
+#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE 256
+#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT 9
+#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE 8
+#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT 4
+#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE 64
+#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT 4
+#define IXGBE_RDMAM_QSC_FCOE_RAM_RANGE 512
+#define IXGBE_RDMAM_QSC_FCOE_RAM_COUNT 5
+#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE 32
+#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT 4
+#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE 128
+#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT 8
+#define IXGBE_RDMAM_QSC_RSC_RAM_RANGE 32
+#define IXGBE_RDMAM_QSC_RSC_RAM_COUNT 8
+
+#define IXGBE_TXDESCIC_READY 0x80000000
+
+/* Receive Checksum Control */
+#define IXGBE_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
+#define IXGBE_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
+
+/* FCRTL Bit Masks */
+#define IXGBE_FCRTL_XONE 0x80000000 /* XON enable */
+#define IXGBE_FCRTH_FCEN 0x80000000 /* Packet buffer fc enable */
+
+/* PAP bit masks*/
+#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */
+
+/* RMCS Bit Masks */
+#define IXGBE_RMCS_RRM 0x00000002 /* Rx Recycle Mode enable */
+/* Receive Arbitration Control: 0 Round Robin, 1 DFP */
+#define IXGBE_RMCS_RAC 0x00000004
+/* Deficit Fixed Prio ena */
+#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC
+#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority FC ena */
+#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority FC ena */
+#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */
+
+/* FCCFG Bit Masks */
+#define IXGBE_FCCFG_TFCE_802_3X 0x00000008 /* Tx link FC enable */
+#define IXGBE_FCCFG_TFCE_PRIORITY 0x00000010 /* Tx priority FC enable */
+
+/* Interrupt register bitmasks */
+
+/* Extended Interrupt Cause Read */
+#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
+#define IXGBE_EICR_FLOW_DIR 0x00010000 /* FDir Exception */
+#define IXGBE_EICR_RX_MISS 0x00020000 /* Packet Buffer Overrun */
+#define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */
+#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
+#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */
+#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */
+#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
+#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */
+#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */
+#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */
+#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */
+#define IXGBE_EICR_ECC 0x10000000 /* ECC Error */
+#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */
+#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */
+#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */
+#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
+
+/* Extended Interrupt Cause Set */
+#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EICS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */
+#define IXGBE_EICS_RX_MISS IXGBE_EICR_RX_MISS /* Pkt Buffer Overrun */
+#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */
+#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
+#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
+#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
+#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
+#define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */
+#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
+#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */
+#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
+#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+/* Extended Interrupt Mask Set */
+#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */
+#define IXGBE_EIMS_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */
+#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */
+#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
+#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermal Sensor Event */
+#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
+#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
+#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
+#define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */
+#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
+#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */
+#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
+#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+/* Extended Interrupt Mask Clear */
+#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMC_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */
+#define IXGBE_EIMC_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */
+#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */
+#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
+#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
+#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
+#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
+#define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */
+#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
+#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */
+#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
+#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+#define IXGBE_EIMS_ENABLE_MASK ( \
+ IXGBE_EIMS_RTX_QUEUE | \
+ IXGBE_EIMS_LSC | \
+ IXGBE_EIMS_TCP_TIMER | \
+ IXGBE_EIMS_OTHER)
+
+/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
+#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
+#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */
+#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
+#define IXGBE_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */
+#define IXGBE_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */
+#define IXGBE_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */
+#define IXGBE_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */
+#define IXGBE_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */
+#define IXGBE_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */
+#define IXGBE_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of control bits */
+#define IXGBE_IMIR_SIZE_BP_82599 0x00001000 /* Packet size bypass */
+#define IXGBE_IMIR_CTRL_URG_82599 0x00002000 /* Check URG bit in header */
+#define IXGBE_IMIR_CTRL_ACK_82599 0x00004000 /* Check ACK bit in header */
+#define IXGBE_IMIR_CTRL_PSH_82599 0x00008000 /* Check PSH bit in header */
+#define IXGBE_IMIR_CTRL_RST_82599 0x00010000 /* Check RST bit in header */
+#define IXGBE_IMIR_CTRL_SYN_82599 0x00020000 /* Check SYN bit in header */
+#define IXGBE_IMIR_CTRL_FIN_82599 0x00040000 /* Check FIN bit in header */
+#define IXGBE_IMIR_CTRL_BP_82599 0x00080000 /* Bypass chk of ctrl bits */
+#define IXGBE_IMIR_LLI_EN_82599 0x00100000 /* Enables low latency Int */
+#define IXGBE_IMIR_RX_QUEUE_MASK_82599 0x0000007F /* Rx Queue Mask */
+#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599 21 /* Rx Queue Shift */
+#define IXGBE_IMIRVP_PRIORITY_MASK 0x00000007 /* VLAN priority mask */
+#define IXGBE_IMIRVP_PRIORITY_EN 0x00000008 /* VLAN priority enable */
+
+#define IXGBE_MAX_FTQF_FILTERS 128
+#define IXGBE_FTQF_PROTOCOL_MASK 0x00000003
+#define IXGBE_FTQF_PROTOCOL_TCP 0x00000000
+#define IXGBE_FTQF_PROTOCOL_UDP 0x00000001
+#define IXGBE_FTQF_PROTOCOL_SCTP 2
+#define IXGBE_FTQF_PRIORITY_MASK 0x00000007
+#define IXGBE_FTQF_PRIORITY_SHIFT 2
+#define IXGBE_FTQF_POOL_MASK 0x0000003F
+#define IXGBE_FTQF_POOL_SHIFT 8
+#define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F
+#define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25
+#define IXGBE_FTQF_SOURCE_ADDR_MASK 0x1E
+#define IXGBE_FTQF_DEST_ADDR_MASK 0x1D
+#define IXGBE_FTQF_SOURCE_PORT_MASK 0x1B
+#define IXGBE_FTQF_DEST_PORT_MASK 0x17
+#define IXGBE_FTQF_PROTOCOL_COMP_MASK 0x0F
+#define IXGBE_FTQF_POOL_MASK_EN 0x40000000
+#define IXGBE_FTQF_QUEUE_ENABLE 0x80000000
+
+/* Interrupt clear mask */
+#define IXGBE_IRQ_CLEAR_MASK 0xFFFFFFFF
+
+/* Interrupt Vector Allocation Registers */
+#define IXGBE_IVAR_REG_NUM 25
+#define IXGBE_IVAR_REG_NUM_82599 64
+#define IXGBE_IVAR_TXRX_ENTRY 96
+#define IXGBE_IVAR_RX_ENTRY 64
+#define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i))
+#define IXGBE_IVAR_TX_QUEUE(_i) (64 + (_i))
+#define IXGBE_IVAR_TX_ENTRY 32
+
+#define IXGBE_IVAR_TCP_TIMER_INDEX 96 /* 0 based index */
+#define IXGBE_IVAR_OTHER_CAUSES_INDEX 97 /* 0 based index */
+
+#define IXGBE_MSIX_VECTOR(_i) (0 + (_i))
+
+#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
+
+/* ETYPE Queue Filter/Select Bit Masks */
+#define IXGBE_MAX_ETQF_FILTERS 8
+#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */
+#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */
+#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */
+#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */
+#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */
+
+#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */
+#define IXGBE_ETQS_RX_QUEUE_SHIFT 16
+#define IXGBE_ETQS_LLI 0x20000000 /* bit 29 */
+#define IXGBE_ETQS_QUEUE_EN 0x80000000 /* bit 31 */
+
+/*
+ * ETQF filter list: one static filter per filter consumer. This is
+ * to avoid filter collisions later. Add new filters
+ * here!!
+ *
+ * Current filters:
+ * EAPOL 802.1x (0x888e): Filter 0
+ * FCoE (0x8906): Filter 2
+ * 1588 (0x88f7): Filter 3
+ * FIP (0x8914): Filter 4
+ */
+#define IXGBE_ETQF_FILTER_EAPOL 0
+#define IXGBE_ETQF_FILTER_FCOE 2
+#define IXGBE_ETQF_FILTER_1588 3
+#define IXGBE_ETQF_FILTER_FIP 4
+/* VLAN Control Bit Masks */
+#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */
+#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */
+#define IXGBE_VLNCTRL_CFIEN 0x20000000 /* bit 29 */
+#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */
+#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */
+
+/* VLAN pool filtering masks */
+#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */
+#define IXGBE_VLVF_ENTRIES 64
+#define IXGBE_VLVF_VLANID_MASK 0x00000FFF
+/* Per VF Port VLAN insertion rules */
+#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
+#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
+
+#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
+
+/* STATUS Bit Masks */
+#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */
+#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/
+#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Ena Status */
+
+#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */
+#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */
+
+/* ESDP Bit Masks */
+#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */
+#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */
+#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */
+#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */
+#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */
+#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */
+#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */
+#define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */
+#define IXGBE_ESDP_SDP1_DIR 0x00000200 /* SDP1 IO direction */
+#define IXGBE_ESDP_SDP4_DIR 0x00001000 /* SDP4 IO direction */
+#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */
+#define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 IO mode */
+#define IXGBE_ESDP_SDP1_NATIVE 0x00020000 /* SDP1 IO mode */
+
+
+/* LEDCTL Bit Masks */
+#define IXGBE_LED_IVRT_BASE 0x00000040
+#define IXGBE_LED_BLINK_BASE 0x00000080
+#define IXGBE_LED_MODE_MASK_BASE 0x0000000F
+#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i)))
+#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i))
+#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i)
+#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i)
+#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i)
+
+/* LED modes */
+#define IXGBE_LED_LINK_UP 0x0
+#define IXGBE_LED_LINK_10G 0x1
+#define IXGBE_LED_MAC 0x2
+#define IXGBE_LED_FILTER 0x3
+#define IXGBE_LED_LINK_ACTIVE 0x4
+#define IXGBE_LED_LINK_1G 0x5
+#define IXGBE_LED_ON 0xE
+#define IXGBE_LED_OFF 0xF
+
+/* AUTOC Bit Masks */
+#define IXGBE_AUTOC_KX4_KX_SUPP_MASK 0xC0000000
+#define IXGBE_AUTOC_KX4_SUPP 0x80000000
+#define IXGBE_AUTOC_KX_SUPP 0x40000000
+#define IXGBE_AUTOC_PAUSE 0x30000000
+#define IXGBE_AUTOC_ASM_PAUSE 0x20000000
+#define IXGBE_AUTOC_SYM_PAUSE 0x10000000
+#define IXGBE_AUTOC_RF 0x08000000
+#define IXGBE_AUTOC_PD_TMR 0x06000000
+#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000
+#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000
+#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000
+#define IXGBE_AUTOC_FECA 0x00040000
+#define IXGBE_AUTOC_FECR 0x00020000
+#define IXGBE_AUTOC_KR_SUPP 0x00010000
+#define IXGBE_AUTOC_AN_RESTART 0x00001000
+#define IXGBE_AUTOC_FLU 0x00000001
+#define IXGBE_AUTOC_LMS_SHIFT 13
+#define IXGBE_AUTOC_LMS_10G_SERIAL (0x3 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_KX_KR (0x4 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_SGMII_1G_100M (0x5 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII (0x7 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+
+#define IXGBE_AUTOC_1G_PMA_PMD_MASK 0x00000200
+#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9
+#define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180
+#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7
+#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_SFI (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_KX_BX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+
+#define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000
+#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000
+#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16
+#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+
+#define IXGBE_MACC_FLU 0x00000001
+#define IXGBE_MACC_FSV_10G 0x00030000
+#define IXGBE_MACC_FS 0x00040000
+#define IXGBE_MAC_RX2TX_LPBK 0x00000002
+
+/* LINKS Bit Masks */
+#define IXGBE_LINKS_KX_AN_COMP 0x80000000
+#define IXGBE_LINKS_UP 0x40000000
+#define IXGBE_LINKS_SPEED 0x20000000
+#define IXGBE_LINKS_MODE 0x18000000
+#define IXGBE_LINKS_RX_MODE 0x06000000
+#define IXGBE_LINKS_TX_MODE 0x01800000
+#define IXGBE_LINKS_XGXS_EN 0x00400000
+#define IXGBE_LINKS_SGMII_EN 0x02000000
+#define IXGBE_LINKS_PCS_1G_EN 0x00200000
+#define IXGBE_LINKS_1G_AN_EN 0x00100000
+#define IXGBE_LINKS_KX_AN_IDLE 0x00080000
+#define IXGBE_LINKS_1G_SYNC 0x00040000
+#define IXGBE_LINKS_10G_ALIGN 0x00020000
+#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000
+#define IXGBE_LINKS_TL_FAULT 0x00001000
+#define IXGBE_LINKS_SIGNAL 0x00000F00
+
+#define IXGBE_LINKS_SPEED_82599 0x30000000
+#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
+#define IXGBE_LINKS_SPEED_1G_82599 0x20000000
+#define IXGBE_LINKS_SPEED_100_82599 0x10000000
+#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
+#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
+
+#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040
+
+/* PCS1GLSTA Bit Masks */
+#define IXGBE_PCS1GLSTA_LINK_OK 1
+#define IXGBE_PCS1GLSTA_SYNK_OK 0x10
+#define IXGBE_PCS1GLSTA_AN_COMPLETE 0x10000
+#define IXGBE_PCS1GLSTA_AN_PAGE_RX 0x20000
+#define IXGBE_PCS1GLSTA_AN_TIMED_OUT 0x40000
+#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000
+#define IXGBE_PCS1GLSTA_AN_ERROR_RWS 0x100000
+
+#define IXGBE_PCS1GANA_SYM_PAUSE 0x80
+#define IXGBE_PCS1GANA_ASM_PAUSE 0x100
+
+/* PCS1GLCTL Bit Masks */
+#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg to en */
+#define IXGBE_PCS1GLCTL_FLV_LINK_UP 1
+#define IXGBE_PCS1GLCTL_FORCE_LINK 0x20
+#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH 0x40
+#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000
+#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000
+
+/* ANLP1 Bit Masks */
+#define IXGBE_ANLP1_PAUSE 0x0C00
+#define IXGBE_ANLP1_SYM_PAUSE 0x0400
+#define IXGBE_ANLP1_ASM_PAUSE 0x0800
+#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000
+
+/* SW Semaphore Register bitmasks */
+#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
+#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
+#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */
+#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */
+
+/* SW_FW_SYNC/GSSR definitions */
+#define IXGBE_GSSR_EEP_SM 0x0001
+#define IXGBE_GSSR_PHY0_SM 0x0002
+#define IXGBE_GSSR_PHY1_SM 0x0004
+#define IXGBE_GSSR_MAC_CSR_SM 0x0008
+#define IXGBE_GSSR_FLASH_SM 0x0010
+#define IXGBE_GSSR_SW_MNG_SM 0x0400
+
+/* FW Status register bitmask */
+#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */
+
+/* EEC Register */
+#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */
+#define IXGBE_EEC_CS 0x00000002 /* EEPROM Chip Select */
+#define IXGBE_EEC_DI 0x00000004 /* EEPROM Data In */
+#define IXGBE_EEC_DO 0x00000008 /* EEPROM Data Out */
+#define IXGBE_EEC_FWE_MASK 0x00000030 /* FLASH Write Enable */
+#define IXGBE_EEC_FWE_DIS 0x00000010 /* Disable FLASH writes */
+#define IXGBE_EEC_FWE_EN 0x00000020 /* Enable FLASH writes */
+#define IXGBE_EEC_FWE_SHIFT 4
+#define IXGBE_EEC_REQ 0x00000040 /* EEPROM Access Request */
+#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */
+#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */
+#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */
+#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */
+#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */
+#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */
+/* EEPROM Addressing bits based on type (0-small, 1-large) */
+#define IXGBE_EEC_ADDR_SIZE 0x00000400
+#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */
+#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */
+
+#define IXGBE_EEC_SIZE_SHIFT 11
+#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6
+#define IXGBE_EEPROM_OPCODE_BITS 8
+
+/* Part Number String Length */
+#define IXGBE_PBANUM_LENGTH 11
+
+/* Checksum and EEPROM pointers */
+#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
+#define IXGBE_EEPROM_CHECKSUM 0x3F
+#define IXGBE_EEPROM_SUM 0xBABA
+#define IXGBE_PCIE_ANALOG_PTR 0x03
+#define IXGBE_ATLAS0_CONFIG_PTR 0x04
+#define IXGBE_PHY_PTR 0x04
+#define IXGBE_ATLAS1_CONFIG_PTR 0x05
+#define IXGBE_OPTION_ROM_PTR 0x05
+#define IXGBE_PCIE_GENERAL_PTR 0x06
+#define IXGBE_PCIE_CONFIG0_PTR 0x07
+#define IXGBE_PCIE_CONFIG1_PTR 0x08
+#define IXGBE_CORE0_PTR 0x09
+#define IXGBE_CORE1_PTR 0x0A
+#define IXGBE_MAC0_PTR 0x0B
+#define IXGBE_MAC1_PTR 0x0C
+#define IXGBE_CSR0_CONFIG_PTR 0x0D
+#define IXGBE_CSR1_CONFIG_PTR 0x0E
+#define IXGBE_FW_PTR 0x0F
+#define IXGBE_PBANUM0_PTR 0x15
+#define IXGBE_PBANUM1_PTR 0x16
+#define IXGBE_ALT_MAC_ADDR_PTR 0x37
+#define IXGBE_FREE_SPACE_PTR 0X3E
+
+/* External Thermal Sensor Config */
+#define IXGBE_ETS_CFG 0x26
+#define IXGBE_ETS_LTHRES_DELTA_MASK 0x07C0
+#define IXGBE_ETS_LTHRES_DELTA_SHIFT 6
+#define IXGBE_ETS_TYPE_MASK 0x0038
+#define IXGBE_ETS_TYPE_SHIFT 3
+#define IXGBE_ETS_TYPE_EMC 0x000
+#define IXGBE_ETS_NUM_SENSORS_MASK 0x0007
+#define IXGBE_ETS_DATA_LOC_MASK 0x3C00
+#define IXGBE_ETS_DATA_LOC_SHIFT 10
+#define IXGBE_ETS_DATA_INDEX_MASK 0x0300
+#define IXGBE_ETS_DATA_INDEX_SHIFT 8
+#define IXGBE_ETS_DATA_HTHRESH_MASK 0x00FF
+
+#define IXGBE_SAN_MAC_ADDR_PTR 0x28
+#define IXGBE_DEVICE_CAPS 0x2C
+#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
+#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
+#define IXGBE_MAX_MSIX_VECTORS_82599 0x40
+#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
+#define IXGBE_MAX_MSIX_VECTORS_82598 0x13
+
+/* MSI-X capability fields masks */
+#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF
+
+/* Legacy EEPROM word offsets */
+#define IXGBE_ISCSI_BOOT_CAPS 0x0033
+#define IXGBE_ISCSI_SETUP_PORT_0 0x0030
+#define IXGBE_ISCSI_SETUP_PORT_1 0x0034
+
+/* EEPROM Commands - SPI */
+#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */
+#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01
+#define IXGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */
+#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */
+#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */
+#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */
+/* EEPROM reset Write Enable latch */
+#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04
+#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */
+#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */
+#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */
+#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */
+#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */
+
+/* EEPROM Read Register */
+#define IXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */
+#define IXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */
+#define IXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */
+#define IXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
+#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for wr complete */
+#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for rd complete */
+
+#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
+
+#define IXGBE_EEPROM_PAGE_SIZE_MAX 128
+#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* words rd in burst */
+#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* words wr in burst */
+
+#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS
+#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM attempts to gain grant */
+#endif
+
+#ifndef IXGBE_EERD_EEWR_ATTEMPTS
+/* Number of 5 microseconds we wait for EERD read and
+ * EERW write to complete */
+#define IXGBE_EERD_EEWR_ATTEMPTS 100000
+#endif
+
+#ifndef IXGBE_FLUDONE_ATTEMPTS
+/* # attempts we wait for flush update to complete */
+#define IXGBE_FLUDONE_ATTEMPTS 20000
+#endif
+
+#define IXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */
+#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */
+#define IXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */
+#define IXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */
+
+#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0
+#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3
+#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1
+#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2
+#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2
+#define IXGBE_FW_LESM_STATE_1 0x1
+#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */
+#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4
+#define IXGBE_FW_PATCH_VERSION_4 0x7
+#define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */
+#define IXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */
+#define IXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */
+#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */
+#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */
+#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt SAN MAC capability */
+#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt SAN MAC 0 offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt SAN MAC 1 offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt WWNN prefix offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt WWPN prefix offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt SAN MAC exists */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt WWN base exists */
+
+#define IXGBE_DEVICE_CAPS_WOL_PORT0_1 0x4 /* WoL supported on ports 0 & 1 */
+#define IXGBE_DEVICE_CAPS_WOL_PORT0 0x8 /* WoL supported on port 0 */
+#define IXGBE_DEVICE_CAPS_WOL_MASK 0xC /* Mask for WoL capabilities */
+
+/* PCI Bus Info */
+#define IXGBE_PCI_DEVICE_STATUS 0xAA
+#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
+#define IXGBE_PCI_LINK_STATUS 0xB2
+#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
+#define IXGBE_PCI_LINK_WIDTH 0x3F0
+#define IXGBE_PCI_LINK_WIDTH_1 0x10
+#define IXGBE_PCI_LINK_WIDTH_2 0x20
+#define IXGBE_PCI_LINK_WIDTH_4 0x40
+#define IXGBE_PCI_LINK_WIDTH_8 0x80
+#define IXGBE_PCI_LINK_SPEED 0xF
+#define IXGBE_PCI_LINK_SPEED_2500 0x1
+#define IXGBE_PCI_LINK_SPEED_5000 0x2
+#define IXGBE_PCI_LINK_SPEED_8000 0x3
+#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E
+#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
+#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
+
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
+
+/* Check whether address is multicast. This is little-endian specific check.*/
+#define IXGBE_IS_MULTICAST(Address) \
+ (bool)(((u8 *)(Address))[0] & ((u8)0x01))
+
+/* Check whether an address is broadcast. */
+#define IXGBE_IS_BROADCAST(Address) \
+ ((((u8 *)(Address))[0] == ((u8)0xff)) && \
+ (((u8 *)(Address))[1] == ((u8)0xff)))
+
+/* RAH */
+#define IXGBE_RAH_VIND_MASK 0x003C0000
+#define IXGBE_RAH_VIND_SHIFT 18
+#define IXGBE_RAH_AV 0x80000000
+#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF
+
+/* Header split receive */
+#define IXGBE_RFCTL_ISCSI_DIS 0x00000001
+#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E
+#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1
+#define IXGBE_RFCTL_RSC_DIS 0x00000010
+#define IXGBE_RFCTL_NFSW_DIS 0x00000040
+#define IXGBE_RFCTL_NFSR_DIS 0x00000080
+#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300
+#define IXGBE_RFCTL_NFS_VER_SHIFT 8
+#define IXGBE_RFCTL_NFS_VER_2 0
+#define IXGBE_RFCTL_NFS_VER_3 1
+#define IXGBE_RFCTL_NFS_VER_4 2
+#define IXGBE_RFCTL_IPV6_DIS 0x00000400
+#define IXGBE_RFCTL_IPV6_XSUM_DIS 0x00000800
+#define IXGBE_RFCTL_IPFRSP_DIS 0x00004000
+#define IXGBE_RFCTL_IPV6_EX_DIS 0x00010000
+#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
+
+/* Transmit Config masks */
+#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */
+#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */
+#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */
+/* Enable short packet padding to 64 bytes */
+#define IXGBE_TX_PAD_ENABLE 0x00000400
+#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */
+/* This allows for 16K packets + 4k for vlan */
+#define IXGBE_MAX_FRAME_SZ 0x40040000
+
+#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */
+#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */
+
+/* Receive Config masks */
+#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
+#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Desc Monitor Bypass */
+#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Ena specific Rx Queue */
+#define IXGBE_RXDCTL_SWFLSH 0x04000000 /* Rx Desc wr-bk flushing */
+#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* X540 supported only */
+#define IXGBE_RXDCTL_RLPML_EN 0x00008000
+#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
+
+#define IXGBE_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
+#define IXGBE_TSYNCTXCTL_ENABLED 0x00000010 /* Tx timestamping enabled */
+
+#define IXGBE_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */
+#define IXGBE_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */
+#define IXGBE_TSYNCRXCTL_TYPE_L2_V2 0x00
+#define IXGBE_TSYNCRXCTL_TYPE_L4_V1 0x02
+#define IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
+#define IXGBE_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
+#define IXGBE_TSYNCRXCTL_ENABLED 0x00000010 /* Rx Timestamping enabled */
+
+#define IXGBE_RXMTRL_V1_CTRLT_MASK 0x000000FF
+#define IXGBE_RXMTRL_V1_SYNC_MSG 0x00
+#define IXGBE_RXMTRL_V1_DELAY_REQ_MSG 0x01
+#define IXGBE_RXMTRL_V1_FOLLOWUP_MSG 0x02
+#define IXGBE_RXMTRL_V1_DELAY_RESP_MSG 0x03
+#define IXGBE_RXMTRL_V1_MGMT_MSG 0x04
+
+#define IXGBE_RXMTRL_V2_MSGID_MASK 0x0000FF00
+#define IXGBE_RXMTRL_V2_SYNC_MSG 0x0000
+#define IXGBE_RXMTRL_V2_DELAY_REQ_MSG 0x0100
+#define IXGBE_RXMTRL_V2_PDELAY_REQ_MSG 0x0200
+#define IXGBE_RXMTRL_V2_PDELAY_RESP_MSG 0x0300
+#define IXGBE_RXMTRL_V2_FOLLOWUP_MSG 0x0800
+#define IXGBE_RXMTRL_V2_DELAY_RESP_MSG 0x0900
+#define IXGBE_RXMTRL_V2_PDELAY_FOLLOWUP_MSG 0x0A00
+#define IXGBE_RXMTRL_V2_ANNOUNCE_MSG 0x0B00
+#define IXGBE_RXMTRL_V2_SIGNALLING_MSG 0x0C00
+#define IXGBE_RXMTRL_V2_MGMT_MSG 0x0D00
+
+#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
+#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
+#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */
+#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */
+#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */
+#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */
+/* Receive Priority Flow Control Enable */
+#define IXGBE_FCTRL_RPFCE 0x00004000
+#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */
+#define IXGBE_MFLCN_PMCF 0x00000001 /* Pass MAC Control Frames */
+#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */
+#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */
+#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */
+#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF4 /* Rx Priority FC bitmap mask */
+#define IXGBE_MFLCN_RPFCE_SHIFT 4 /* Rx Priority FC bitmap shift */
+
+/* Multiple Receive Queue Control */
+#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */
+#define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */
+#define IXGBE_MRQC_RT8TCEN 0x00000002 /* 8 TC no RSS */
+#define IXGBE_MRQC_RT4TCEN 0x00000003 /* 4 TC no RSS */
+#define IXGBE_MRQC_RTRSS8TCEN 0x00000004 /* 8 TC w/ RSS */
+#define IXGBE_MRQC_RTRSS4TCEN 0x00000005 /* 4 TC w/ RSS */
+#define IXGBE_MRQC_VMDQEN 0x00000008 /* VMDq2 64 pools no RSS */
+#define IXGBE_MRQC_VMDQRSS32EN 0x0000000A /* VMDq2 32 pools w/ RSS */
+#define IXGBE_MRQC_VMDQRSS64EN 0x0000000B /* VMDq2 64 pools w/ RSS */
+#define IXGBE_MRQC_VMDQRT8TCEN 0x0000000C /* VMDq2/RT 16 pool 8 TC */
+#define IXGBE_MRQC_VMDQRT4TCEN 0x0000000D /* VMDq2/RT 32 pool 4 TC */
+#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000
+#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
+#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_EX 0x00080000
+#define IXGBE_MRQC_RSS_FIELD_IPV6 0x00100000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
+#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000
+#define IXGBE_MRQC_L3L4TXSWEN 0x00008000
+
+/* Queue Drop Enable */
+#define IXGBE_QDE_ENABLE 0x00000001
+#define IXGBE_QDE_IDX_MASK 0x00007F00
+#define IXGBE_QDE_IDX_SHIFT 8
+#define IXGBE_QDE_WRITE 0x00010000
+#define IXGBE_QDE_READ 0x00020000
+
+#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */
+#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */
+#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */
+#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */
+#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
+#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+
+#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000
+#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000
+#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000
+#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED 0x18000000
+#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000
+/* Multiple Transmit Queue Command Register */
+#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */
+#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */
+#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */
+#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */
+#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */
+#define IXGBE_MTQC_4TC_4TQ 0x8 /* 4 TC if RT_ENA and VT_ENA */
+#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */
+
+/* Receive Descriptor bit definitions */
+#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */
+#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */
+#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */
+#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */
+#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004
+#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
+#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */
+#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
+#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
+#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */
+#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */
+#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
+#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
+#define IXGBE_RXD_STAT_LLINT 0x800 /* Pkt caused Low Latency Interrupt */
+#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */
+#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */
+#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */
+#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
+#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */
+#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */
+#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */
+#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */
+#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */
+#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */
+#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
+#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */
+#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */
+#define IXGBE_RXDADV_ERR_RXE 0x20000000 /* Any MAC Error */
+#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */
+#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */
+#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */
+#define IXGBE_RXDADV_ERR_FDIR_DROP 0x00200000 /* FDIR Drop error */
+#define IXGBE_RXDADV_ERR_FDIR_COLL 0x00400000 /* FDIR Collision error */
+#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */
+#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */
+#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
+#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
+#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */
+#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */
+#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */
+#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */
+#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
+#define IXGBE_RXD_PRI_SHIFT 13
+#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */
+#define IXGBE_RXD_CFI_SHIFT 12
+
+#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */
+#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */
+#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */
+#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */
+#define IXGBE_RXDADV_STAT_MASK 0x000fffff /* Stat/NEXTP: bit 0-19 */
+#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */
+#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */
+#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
+#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */
+#define IXGBE_RXDADV_STAT_TS 0x00010000 /* IEEE1588 Time Stamp */
+
+/* PSRTYPE bit definitions */
+#define IXGBE_PSRTYPE_TCPHDR 0x00000010
+#define IXGBE_PSRTYPE_UDPHDR 0x00000020
+#define IXGBE_PSRTYPE_IPV4HDR 0x00000100
+#define IXGBE_PSRTYPE_IPV6HDR 0x00000200
+#define IXGBE_PSRTYPE_L2HDR 0x00001000
+
+/* SRRCTL bit definitions */
+#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
+#define IXGBE_SRRCTL_RDMTS_SHIFT 22
+#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000
+#define IXGBE_SRRCTL_DROP_EN 0x10000000
+#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F
+#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00
+#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000
+#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000
+
+#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000
+#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
+
+#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F
+#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0
+#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0
+#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0
+#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000
+#define IXGBE_RXDADV_RSCCNT_SHIFT 17
+#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5
+#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000
+#define IXGBE_RXDADV_SPH 0x8000
+
+/* RSS Hash results */
+#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000
+#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001
+#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003
+#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004
+#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
+#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
+
+/* RSS Packet Types as indicated in the receive descriptor. */
+#define IXGBE_RXDADV_PKTTYPE_NONE 0x00000000
+#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */
+#define IXGBE_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPv4 hdr + extensions */
+#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */
+#define IXGBE_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPv6 hdr + extensions */
+#define IXGBE_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */
+#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
+#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
+#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
+#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */
+#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */
+#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */
+#define IXGBE_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */
+#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */
+#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */
+
+/* Security Processing bit Indication */
+#define IXGBE_RXDADV_LNKSEC_STATUS_SECP 0x00020000
+#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000
+#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000
+#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000
+#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000
+
+/* Masks to determine if packets should be dropped due to frame errors */
+#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
+ IXGBE_RXD_ERR_CE | \
+ IXGBE_RXD_ERR_LE | \
+ IXGBE_RXD_ERR_PE | \
+ IXGBE_RXD_ERR_OSE | \
+ IXGBE_RXD_ERR_USE)
+
+#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
+ IXGBE_RXDADV_ERR_CE | \
+ IXGBE_RXDADV_ERR_LE | \
+ IXGBE_RXDADV_ERR_PE | \
+ IXGBE_RXDADV_ERR_OSE | \
+ IXGBE_RXDADV_ERR_USE)
+
+#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK_82599 IXGBE_RXDADV_ERR_RXE
+
+/* Multicast bit mask */
+#define IXGBE_MCSTCTRL_MFE 0x4
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8
+#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024
+
+/* Vlan-specific macros */
+#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */
+#define IXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */
+#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */
+#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
+
+/* SR-IOV specific macros */
+#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4)
+#define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4))
+#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600))
+#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4))
+/* Translated register #defines */
+#define IXGBE_PVFCTRL(P) (0x00300 + (4 * (P)))
+#define IXGBE_PVFSTATUS(P) (0x00008 + (0 * (P)))
+#define IXGBE_PVFLINKS(P) (0x042A4 + (0 * (P)))
+#define IXGBE_PVFRTIMER(P) (0x00048 + (0 * (P)))
+#define IXGBE_PVFMAILBOX(P) (0x04C00 + (4 * (P)))
+#define IXGBE_PVFRXMEMWRAP(P) (0x03190 + (0 * (P)))
+#define IXGBE_PVTEICR(P) (0x00B00 + (4 * (P)))
+#define IXGBE_PVTEICS(P) (0x00C00 + (4 * (P)))
+#define IXGBE_PVTEIMS(P) (0x00D00 + (4 * (P)))
+#define IXGBE_PVTEIMC(P) (0x00E00 + (4 * (P)))
+#define IXGBE_PVTEIAC(P) (0x00F00 + (4 * (P)))
+#define IXGBE_PVTEIAM(P) (0x04D00 + (4 * (P)))
+#define IXGBE_PVTEITR(P) (((P) < 24) ? (0x00820 + ((P) * 4)) : \
+ (0x012300 + (((P) - 24) * 4)))
+#define IXGBE_PVTIVAR(P) (0x12500 + (4 * (P)))
+#define IXGBE_PVTIVAR_MISC(P) (0x04E00 + (4 * (P)))
+#define IXGBE_PVTRSCINT(P) (0x12000 + (4 * (P)))
+#define IXGBE_VFPBACL(P) (0x110C8 + (4 * (P)))
+#define IXGBE_PVFRDBAL(P) ((P < 64) ? (0x01000 + (0x40 * (P))) \
+ : (0x0D000 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRDBAH(P) ((P < 64) ? (0x01004 + (0x40 * (P))) \
+ : (0x0D004 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRDLEN(P) ((P < 64) ? (0x01008 + (0x40 * (P))) \
+ : (0x0D008 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRDH(P) ((P < 64) ? (0x01010 + (0x40 * (P))) \
+ : (0x0D010 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRDT(P) ((P < 64) ? (0x01018 + (0x40 * (P))) \
+ : (0x0D018 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRXDCTL(P) ((P < 64) ? (0x01028 + (0x40 * (P))) \
+ : (0x0D028 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFSRRCTL(P) ((P < 64) ? (0x01014 + (0x40 * (P))) \
+ : (0x0D014 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFPSRTYPE(P) (0x0EA00 + (4 * (P)))
+#define IXGBE_PVFTDBAL(P) (0x06000 + (0x40 * (P)))
+#define IXGBE_PVFTDBAH(P) (0x06004 + (0x40 * (P)))
+#define IXGBE_PVFTTDLEN(P) (0x06008 + (0x40 * (P)))
+#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P)))
+#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P)))
+#define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P)))
+#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P)))
+#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P)))
+#define IXGBE_PVFDCA_RXCTRL(P) (((P) < 64) ? (0x0100C + (0x40 * (P))) \
+ : (0x0D00C + (0x40 * ((P) - 64))))
+#define IXGBE_PVFDCA_TXCTRL(P) (0x0600C + (0x40 * (P)))
+#define IXGBE_PVFGPRC(x) (0x0101C + (0x40 * (x)))
+#define IXGBE_PVFGPTC(x) (0x08300 + (0x04 * (x)))
+#define IXGBE_PVFGORC_LSB(x) (0x01020 + (0x40 * (x)))
+#define IXGBE_PVFGORC_MSB(x) (0x0D020 + (0x40 * (x)))
+#define IXGBE_PVFGOTC_LSB(x) (0x08400 + (0x08 * (x)))
+#define IXGBE_PVFGOTC_MSB(x) (0x08404 + (0x08 * (x)))
+#define IXGBE_PVFMPRC(x) (0x0D01C + (0x40 * (x)))
+
+#define IXGBE_PVFTDWBALn(q_per_pool, vf_number, vf_q_index) \
+ (IXGBE_PVFTDWBAL((q_per_pool)*(vf_number) + (vf_q_index)))
+#define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \
+ (IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index)))
+
+/* Little Endian defines */
+#ifndef __le16
+#define __le16 u16
+#endif
+#ifndef __le32
+#define __le32 u32
+#endif
+#ifndef __le64
+#define __le64 u64
+
+#endif
+#ifndef __be16
+/* Big Endian defines */
+#define __be16 u16
+#define __be32 u32
+#define __be64 u64
+
+#endif
+enum ixgbe_fdir_pballoc_type {
+ IXGBE_FDIR_PBALLOC_NONE = 0,
+ IXGBE_FDIR_PBALLOC_64K = 1,
+ IXGBE_FDIR_PBALLOC_128K = 2,
+ IXGBE_FDIR_PBALLOC_256K = 3,
+};
+
+/* Flow Director register values */
+#define IXGBE_FDIRCTRL_PBALLOC_64K 0x00000001
+#define IXGBE_FDIRCTRL_PBALLOC_128K 0x00000002
+#define IXGBE_FDIRCTRL_PBALLOC_256K 0x00000003
+#define IXGBE_FDIRCTRL_INIT_DONE 0x00000008
+#define IXGBE_FDIRCTRL_PERFECT_MATCH 0x00000010
+#define IXGBE_FDIRCTRL_REPORT_STATUS 0x00000020
+#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080
+#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8
+#define IXGBE_FDIRCTRL_FLEX_SHIFT 16
+#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000
+#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24
+#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000
+#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT 28
+
+#define IXGBE_FDIRTCPM_DPORTM_SHIFT 16
+#define IXGBE_FDIRUDPM_DPORTM_SHIFT 16
+#define IXGBE_FDIRIP6M_DIPM_SHIFT 16
+#define IXGBE_FDIRM_VLANID 0x00000001
+#define IXGBE_FDIRM_VLANP 0x00000002
+#define IXGBE_FDIRM_POOL 0x00000004
+#define IXGBE_FDIRM_L4P 0x00000008
+#define IXGBE_FDIRM_FLEX 0x00000010
+#define IXGBE_FDIRM_DIPv6 0x00000020
+
+#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF
+#define IXGBE_FDIRFREE_FREE_SHIFT 0
+#define IXGBE_FDIRFREE_COLL_MASK 0x7FFF0000
+#define IXGBE_FDIRFREE_COLL_SHIFT 16
+#define IXGBE_FDIRLEN_MAXLEN_MASK 0x3F
+#define IXGBE_FDIRLEN_MAXLEN_SHIFT 0
+#define IXGBE_FDIRLEN_MAXHASH_MASK 0x7FFF0000
+#define IXGBE_FDIRLEN_MAXHASH_SHIFT 16
+#define IXGBE_FDIRUSTAT_ADD_MASK 0xFFFF
+#define IXGBE_FDIRUSTAT_ADD_SHIFT 0
+#define IXGBE_FDIRUSTAT_REMOVE_MASK 0xFFFF0000
+#define IXGBE_FDIRUSTAT_REMOVE_SHIFT 16
+#define IXGBE_FDIRFSTAT_FADD_MASK 0x00FF
+#define IXGBE_FDIRFSTAT_FADD_SHIFT 0
+#define IXGBE_FDIRFSTAT_FREMOVE_MASK 0xFF00
+#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT 8
+#define IXGBE_FDIRPORT_DESTINATION_SHIFT 16
+#define IXGBE_FDIRVLAN_FLEX_SHIFT 16
+#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT 15
+#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT 16
+
+#define IXGBE_FDIRCMD_CMD_MASK 0x00000003
+#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001
+#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002
+#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003
+#define IXGBE_FDIRCMD_FILTER_VALID 0x00000004
+#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008
+#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010
+#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020
+#define IXGBE_FDIRCMD_L4TYPE_TCP 0x00000040
+#define IXGBE_FDIRCMD_L4TYPE_SCTP 0x00000060
+#define IXGBE_FDIRCMD_IPV6 0x00000080
+#define IXGBE_FDIRCMD_CLEARHT 0x00000100
+#define IXGBE_FDIRCMD_DROP 0x00000200
+#define IXGBE_FDIRCMD_INT 0x00000400
+#define IXGBE_FDIRCMD_LAST 0x00000800
+#define IXGBE_FDIRCMD_COLLISION 0x00001000
+#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000
+#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5
+#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16
+#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24
+#define IXGBE_FDIR_INIT_DONE_POLL 10
+#define IXGBE_FDIRCMD_CMD_POLL 10
+
+#define IXGBE_FDIR_DROP_QUEUE 127
+
+#define IXGBE_STATUS_OVERHEATING_BIT 20 /* STATUS overtemp bit num */
+
+/* Manageablility Host Interface defines */
+#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
+#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */
+#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */
+
+/* CEM Support */
+#define FW_CEM_HDR_LEN 0x4
+#define FW_CEM_CMD_DRIVER_INFO 0xDD
+#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5
+#define FW_CEM_CMD_RESERVED 0X0
+#define FW_CEM_UNUSED_VER 0x0
+#define FW_CEM_MAX_RETRIES 3
+#define FW_CEM_RESP_STATUS_SUCCESS 0x1
+
+/* Host Interface Command Structures */
+
+struct ixgbe_hic_hdr {
+ u8 cmd;
+ u8 buf_len;
+ union {
+ u8 cmd_resv;
+ u8 ret_status;
+ } cmd_or_resp;
+ u8 checksum;
+};
+
+struct ixgbe_hic_drv_info {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_num;
+ u8 ver_sub;
+ u8 ver_build;
+ u8 ver_min;
+ u8 ver_maj;
+ u8 pad; /* end spacing to ensure length is mult. of dword */
+ u16 pad2; /* end spacing to ensure length is mult. of dword2 */
+};
+
+/* Transmit Descriptor - Legacy */
+struct ixgbe_legacy_tx_desc {
+ u64 buffer_addr; /* Address of the descriptor's data buffer */
+ union {
+ __le32 data;
+ struct {
+ __le16 length; /* Data buffer length */
+ u8 cso; /* Checksum offset */
+ u8 cmd; /* Descriptor control */
+ } flags;
+ } lower;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 css; /* Checksum start */
+ __le16 vlan;
+ } fields;
+ } upper;
+};
+
+/* Transmit Descriptor - Advanced */
+union ixgbe_adv_tx_desc {
+ struct {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le32 cmd_type_len;
+ __le32 olinfo_status;
+ } read;
+ struct {
+ __le64 rsvd; /* Reserved */
+ __le32 nxtseq_seed;
+ __le32 status;
+ } wb;
+};
+
+/* Receive Descriptor - Legacy */
+struct ixgbe_legacy_rx_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ __le16 length; /* Length of data DMAed into data buffer */
+ __le16 csum; /* Packet checksum */
+ u8 status; /* Descriptor status */
+ u8 errors; /* Descriptor Errors */
+ __le16 vlan;
+};
+
+/* Receive Descriptor - Advanced */
+union ixgbe_adv_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ union {
+ __le32 data;
+ struct {
+ __le16 pkt_info; /* RSS, Pkt type */
+ __le16 hdr_info; /* Splithdr, hdrlen */
+ } hs_rss;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length; /* Packet length */
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+/* Context descriptors */
+struct ixgbe_adv_tx_context_desc {
+ __le32 vlan_macip_lens;
+ __le32 seqnum_seed;
+ __le32 type_tucmd_mlhl;
+ __le32 mss_l4len_idx;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */
+#define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */
+#define IXGBE_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 time stamp */
+#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */
+#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */
+#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
+#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Adv Context Desc */
+#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Adv Data Descriptor */
+#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */
+#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
+#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */
+#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */
+#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext 1=Adv */
+#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */
+#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */
+#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */
+#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */
+#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
+#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */
+#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
+#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
+ IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
+ IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
+/* 1st&Last TSO-full iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800
+#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */
+#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
+#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
+#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
+#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
+#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
+#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
+#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
+#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
+#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /* req Markers and CRC */
+#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
+#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
+#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */
+#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */
+#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */
+#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */
+#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */
+#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation End */
+#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation Start */
+#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */
+#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */
+#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */
+#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */
+#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
+#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+
+/* Autonegotiation advertised speeds */
+typedef u32 ixgbe_autoneg_advertised;
+/* Link speed */
+typedef u32 ixgbe_link_speed;
+#define IXGBE_LINK_SPEED_UNKNOWN 0
+#define IXGBE_LINK_SPEED_100_FULL 0x0008
+#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
+#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
+#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
+ IXGBE_LINK_SPEED_10GB_FULL)
+#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \
+ IXGBE_LINK_SPEED_1GB_FULL | \
+ IXGBE_LINK_SPEED_10GB_FULL)
+
+
+/* Physical layer type */
+typedef u32 ixgbe_physical_layer;
+#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0
+#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001
+#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002
+#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x0004
+#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020
+#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x0040
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x0080
+#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x0100
+#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200
+#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800
+#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
+#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
+#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x4000
+
+/* Flow Control Data Sheet defined values
+ * Calculation and defines taken from 802.1bb Annex O
+ */
+
+/* BitTimes (BT) conversion */
+#define IXGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024))
+#define IXGBE_B2BT(BT) (BT * 8)
+
+/* Calculate Delay to respond to PFC */
+#define IXGBE_PFC_D 672
+
+/* Calculate Cable Delay */
+#define IXGBE_CABLE_DC 5556 /* Delay Copper */
+#define IXGBE_CABLE_DO 5000 /* Delay Optical */
+
+/* Calculate Interface Delay X540 */
+#define IXGBE_PHY_DC 25600 /* Delay 10G BASET */
+#define IXGBE_MAC_DC 8192 /* Delay Copper XAUI interface */
+#define IXGBE_XAUI_DC (2 * 2048) /* Delay Copper Phy */
+
+#define IXGBE_ID_X540 (IXGBE_MAC_DC + IXGBE_XAUI_DC + IXGBE_PHY_DC)
+
+/* Calculate Interface Delay 82598, 82599 */
+#define IXGBE_PHY_D 12800
+#define IXGBE_MAC_D 4096
+#define IXGBE_XAUI_D (2 * 1024)
+
+#define IXGBE_ID (IXGBE_MAC_D + IXGBE_XAUI_D + IXGBE_PHY_D)
+
+/* Calculate Delay incurred from higher layer */
+#define IXGBE_HD 6144
+
+/* Calculate PCI Bus delay for low thresholds */
+#define IXGBE_PCI_DELAY 10000
+
+/* Calculate X540 delay value in bit times */
+#define IXGBE_DV_X540(_max_frame_link, _max_frame_tc) \
+ ((36 * \
+ (IXGBE_B2BT(_max_frame_link) + \
+ IXGBE_PFC_D + \
+ (2 * IXGBE_CABLE_DC) + \
+ (2 * IXGBE_ID_X540) + \
+ IXGBE_HD) / 25 + 1) + \
+ 2 * IXGBE_B2BT(_max_frame_tc))
+
+/* Calculate 82599, 82598 delay value in bit times */
+#define IXGBE_DV(_max_frame_link, _max_frame_tc) \
+ ((36 * \
+ (IXGBE_B2BT(_max_frame_link) + \
+ IXGBE_PFC_D + \
+ (2 * IXGBE_CABLE_DC) + \
+ (2 * IXGBE_ID) + \
+ IXGBE_HD) / 25 + 1) + \
+ 2 * IXGBE_B2BT(_max_frame_tc))
+
+/* Calculate low threshold delay values */
+#define IXGBE_LOW_DV_X540(_max_frame_tc) \
+ (2 * IXGBE_B2BT(_max_frame_tc) + \
+ (36 * IXGBE_PCI_DELAY / 25) + 1)
+#define IXGBE_LOW_DV(_max_frame_tc) \
+ (2 * IXGBE_LOW_DV_X540(_max_frame_tc))
+
+/* Software ATR hash keys */
+#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
+#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
+
+/* Software ATR input stream values and masks */
+#define IXGBE_ATR_HASH_MASK 0x7fff
+#define IXGBE_ATR_L4TYPE_MASK 0x3
+#define IXGBE_ATR_L4TYPE_UDP 0x1
+#define IXGBE_ATR_L4TYPE_TCP 0x2
+#define IXGBE_ATR_L4TYPE_SCTP 0x3
+#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
+enum ixgbe_atr_flow_type {
+ IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0,
+ IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1,
+ IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2,
+ IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3,
+ IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4,
+ IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5,
+ IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6,
+ IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7,
+};
+
+/* Flow Director ATR input struct. */
+union ixgbe_atr_input {
+ /*
+ * Byte layout in order, all values with MSB first:
+ *
+ * vm_pool - 1 byte
+ * flow_type - 1 byte
+ * vlan_id - 2 bytes
+ * src_ip - 16 bytes
+ * dst_ip - 16 bytes
+ * src_port - 2 bytes
+ * dst_port - 2 bytes
+ * flex_bytes - 2 bytes
+ * bkt_hash - 2 bytes
+ */
+ struct {
+ u8 vm_pool;
+ u8 flow_type;
+ __be16 vlan_id;
+ __be32 dst_ip[4];
+ __be32 src_ip[4];
+ __be16 src_port;
+ __be16 dst_port;
+ __be16 flex_bytes;
+ __be16 bkt_hash;
+ } formatted;
+ __be32 dword_stream[11];
+};
+
+/* Flow Director compressed ATR hash input struct */
+union ixgbe_atr_hash_dword {
+ struct {
+ u8 vm_pool;
+ u8 flow_type;
+ __be16 vlan_id;
+ } formatted;
+ __be32 ip;
+ struct {
+ __be16 src;
+ __be16 dst;
+ } port;
+ __be16 flex_bytes;
+ __be32 dword;
+};
+
+
+/*
+ * Unavailable: The FCoE Boot Option ROM is not present in the flash.
+ * Disabled: Present; boot order is not set for any targets on the port.
+ * Enabled: Present; boot order is set for at least one target on the port.
+ */
+enum ixgbe_fcoe_boot_status {
+ ixgbe_fcoe_bootstatus_disabled = 0,
+ ixgbe_fcoe_bootstatus_enabled = 1,
+ ixgbe_fcoe_bootstatus_unavailable = 0xFFFF
+};
+
+enum ixgbe_eeprom_type {
+ ixgbe_eeprom_uninitialized = 0,
+ ixgbe_eeprom_spi,
+ ixgbe_flash,
+ ixgbe_eeprom_none /* No NVM support */
+};
+
+enum ixgbe_mac_type {
+ ixgbe_mac_unknown = 0,
+ ixgbe_mac_82598EB,
+ ixgbe_mac_82599EB,
+ ixgbe_mac_X540,
+ ixgbe_num_macs
+};
+
+enum ixgbe_phy_type {
+ ixgbe_phy_unknown = 0,
+ ixgbe_phy_none,
+ ixgbe_phy_tn,
+ ixgbe_phy_aq,
+ ixgbe_phy_cu_unknown,
+ ixgbe_phy_qt,
+ ixgbe_phy_xaui,
+ ixgbe_phy_nl,
+ ixgbe_phy_sfp_passive_tyco,
+ ixgbe_phy_sfp_passive_unknown,
+ ixgbe_phy_sfp_active_unknown,
+ ixgbe_phy_sfp_avago,
+ ixgbe_phy_sfp_ftl,
+ ixgbe_phy_sfp_ftl_active,
+ ixgbe_phy_sfp_unknown,
+ ixgbe_phy_sfp_intel,
+ ixgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/
+ ixgbe_phy_generic
+};
+
+/*
+ * SFP+ module type IDs:
+ *
+ * ID Module Type
+ * =============
+ * 0 SFP_DA_CU
+ * 1 SFP_SR
+ * 2 SFP_LR
+ * 3 SFP_DA_CU_CORE0 - 82599-specific
+ * 4 SFP_DA_CU_CORE1 - 82599-specific
+ * 5 SFP_SR/LR_CORE0 - 82599-specific
+ * 6 SFP_SR/LR_CORE1 - 82599-specific
+ */
+enum ixgbe_sfp_type {
+ ixgbe_sfp_type_da_cu = 0,
+ ixgbe_sfp_type_sr = 1,
+ ixgbe_sfp_type_lr = 2,
+ ixgbe_sfp_type_da_cu_core0 = 3,
+ ixgbe_sfp_type_da_cu_core1 = 4,
+ ixgbe_sfp_type_srlr_core0 = 5,
+ ixgbe_sfp_type_srlr_core1 = 6,
+ ixgbe_sfp_type_da_act_lmt_core0 = 7,
+ ixgbe_sfp_type_da_act_lmt_core1 = 8,
+ ixgbe_sfp_type_1g_cu_core0 = 9,
+ ixgbe_sfp_type_1g_cu_core1 = 10,
+ ixgbe_sfp_type_1g_sx_core0 = 11,
+ ixgbe_sfp_type_1g_sx_core1 = 12,
+ ixgbe_sfp_type_not_present = 0xFFFE,
+ ixgbe_sfp_type_unknown = 0xFFFF
+};
+
+enum ixgbe_media_type {
+ ixgbe_media_type_unknown = 0,
+ ixgbe_media_type_fiber,
+ ixgbe_media_type_fiber_qsfp,
+ ixgbe_media_type_fiber_lco,
+ ixgbe_media_type_copper,
+ ixgbe_media_type_backplane,
+ ixgbe_media_type_cx4,
+ ixgbe_media_type_virtual
+};
+
+/* Flow Control Settings */
+enum ixgbe_fc_mode {
+ ixgbe_fc_none = 0,
+ ixgbe_fc_rx_pause,
+ ixgbe_fc_tx_pause,
+ ixgbe_fc_full,
+ ixgbe_fc_default
+};
+
+/* Smart Speed Settings */
+#define IXGBE_SMARTSPEED_MAX_RETRIES 3
+enum ixgbe_smart_speed {
+ ixgbe_smart_speed_auto = 0,
+ ixgbe_smart_speed_on,
+ ixgbe_smart_speed_off
+};
+
+/* PCI bus types */
+enum ixgbe_bus_type {
+ ixgbe_bus_type_unknown = 0,
+ ixgbe_bus_type_pci,
+ ixgbe_bus_type_pcix,
+ ixgbe_bus_type_pci_express,
+ ixgbe_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum ixgbe_bus_speed {
+ ixgbe_bus_speed_unknown = 0,
+ ixgbe_bus_speed_33 = 33,
+ ixgbe_bus_speed_66 = 66,
+ ixgbe_bus_speed_100 = 100,
+ ixgbe_bus_speed_120 = 120,
+ ixgbe_bus_speed_133 = 133,
+ ixgbe_bus_speed_2500 = 2500,
+ ixgbe_bus_speed_5000 = 5000,
+ ixgbe_bus_speed_8000 = 8000,
+ ixgbe_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum ixgbe_bus_width {
+ ixgbe_bus_width_unknown = 0,
+ ixgbe_bus_width_pcie_x1 = 1,
+ ixgbe_bus_width_pcie_x2 = 2,
+ ixgbe_bus_width_pcie_x4 = 4,
+ ixgbe_bus_width_pcie_x8 = 8,
+ ixgbe_bus_width_32 = 32,
+ ixgbe_bus_width_64 = 64,
+ ixgbe_bus_width_reserved
+};
+
+struct ixgbe_addr_filter_info {
+ u32 num_mc_addrs;
+ u32 rar_used_count;
+ u32 mta_in_use;
+ u32 overflow_promisc;
+ bool user_set_promisc;
+};
+
+/* Bus parameters */
+struct ixgbe_bus_info {
+ enum ixgbe_bus_speed speed;
+ enum ixgbe_bus_width width;
+ enum ixgbe_bus_type type;
+
+ u16 func;
+ u16 lan_id;
+};
+
+/* Flow control parameters */
+struct ixgbe_fc_info {
+ u32 high_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl High-water */
+ u32 low_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl Low-water */
+ u16 pause_time; /* Flow Control Pause timer */
+ bool send_xon; /* Flow control send XON */
+ bool strict_ieee; /* Strict IEEE mode */
+ bool disable_fc_autoneg; /* Do not autonegotiate FC */
+ bool fc_was_autonegged; /* Is current_mode the result of autonegging? */
+ enum ixgbe_fc_mode current_mode; /* FC mode in effect */
+ enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+/* Statistics counters collected by the MAC */
+struct ixgbe_hw_stats {
+ u64 crcerrs;
+ u64 illerrc;
+ u64 errbc;
+ u64 mspdc;
+ u64 mpctotal;
+ u64 mpc[8];
+ u64 mlfc;
+ u64 mrfc;
+ u64 rlec;
+ u64 lxontxc;
+ u64 lxonrxc;
+ u64 lxofftxc;
+ u64 lxoffrxc;
+ u64 pxontxc[8];
+ u64 pxonrxc[8];
+ u64 pxofftxc[8];
+ u64 pxoffrxc[8];
+ u64 prc64;
+ u64 prc127;
+ u64 prc255;
+ u64 prc511;
+ u64 prc1023;
+ u64 prc1522;
+ u64 gprc;
+ u64 bprc;
+ u64 mprc;
+ u64 gptc;
+ u64 gorc;
+ u64 gotc;
+ u64 rnbc[8];
+ u64 ruc;
+ u64 rfc;
+ u64 roc;
+ u64 rjc;
+ u64 mngprc;
+ u64 mngpdc;
+ u64 mngptc;
+ u64 tor;
+ u64 tpr;
+ u64 tpt;
+ u64 ptc64;
+ u64 ptc127;
+ u64 ptc255;
+ u64 ptc511;
+ u64 ptc1023;
+ u64 ptc1522;
+ u64 mptc;
+ u64 bptc;
+ u64 xec;
+ u64 qprc[16];
+ u64 qptc[16];
+ u64 qbrc[16];
+ u64 qbtc[16];
+ u64 qprdc[16];
+ u64 pxon2offc[8];
+ u64 fdirustat_add;
+ u64 fdirustat_remove;
+ u64 fdirfstat_fadd;
+ u64 fdirfstat_fremove;
+ u64 fdirmatch;
+ u64 fdirmiss;
+ u64 fccrc;
+ u64 fclast;
+ u64 fcoerpdc;
+ u64 fcoeprc;
+ u64 fcoeptc;
+ u64 fcoedwrc;
+ u64 fcoedwtc;
+ u64 fcoe_noddp;
+ u64 fcoe_noddp_ext_buff;
+ u64 ldpcec;
+ u64 pcrc8ec;
+ u64 b2ospc;
+ u64 b2ogprc;
+ u64 o2bgptc;
+ u64 o2bspc;
+};
+
+/* forward declaration */
+struct ixgbe_hw;
+
+/* iterator type for walking multicast address lists */
+typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
+ u32 *vmdq);
+
+/* Function pointer table */
+struct ixgbe_eeprom_operations {
+ s32 (*init_params)(struct ixgbe_hw *);
+ s32 (*read)(struct ixgbe_hw *, u16, u16 *);
+ s32 (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
+ s32 (*write)(struct ixgbe_hw *, u16, u16);
+ s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
+ s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
+ s32 (*update_checksum)(struct ixgbe_hw *);
+ u16 (*calc_checksum)(struct ixgbe_hw *);
+};
+
+struct ixgbe_mac_operations {
+ s32 (*init_hw)(struct ixgbe_hw *);
+ s32 (*reset_hw)(struct ixgbe_hw *);
+ s32 (*start_hw)(struct ixgbe_hw *);
+ s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
+ enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
+ u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
+ s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
+ s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
+ s32 (*set_san_mac_addr)(struct ixgbe_hw *, u8 *);
+ s32 (*get_device_caps)(struct ixgbe_hw *, u16 *);
+ s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *);
+ s32 (*get_fcoe_boot_status)(struct ixgbe_hw *, u16 *);
+ s32 (*stop_adapter)(struct ixgbe_hw *);
+ s32 (*get_bus_info)(struct ixgbe_hw *);
+ void (*set_lan_id)(struct ixgbe_hw *);
+ s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
+ s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
+ s32 (*setup_sfp)(struct ixgbe_hw *);
+ s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
+ s32 (*disable_sec_rx_path)(struct ixgbe_hw *);
+ s32 (*enable_sec_rx_path)(struct ixgbe_hw *);
+ s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
+ void (*release_swfw_sync)(struct ixgbe_hw *, u16);
+
+ /* Link */
+ void (*disable_tx_laser)(struct ixgbe_hw *);
+ void (*enable_tx_laser)(struct ixgbe_hw *);
+ void (*flap_tx_laser)(struct ixgbe_hw *);
+ s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
+ s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
+ s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
+ bool *);
+
+ /* Packet Buffer manipulation */
+ void (*setup_rxpba)(struct ixgbe_hw *, int, u32, int);
+
+ /* LED */
+ s32 (*led_on)(struct ixgbe_hw *, u32);
+ s32 (*led_off)(struct ixgbe_hw *, u32);
+ s32 (*blink_led_start)(struct ixgbe_hw *, u32);
+ s32 (*blink_led_stop)(struct ixgbe_hw *, u32);
+
+ /* RAR, Multicast, VLAN */
+ s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
+ s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *);
+ s32 (*clear_rar)(struct ixgbe_hw *, u32);
+ s32 (*insert_mac_addr)(struct ixgbe_hw *, u8 *, u32);
+ s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
+ s32 (*set_vmdq_san_mac)(struct ixgbe_hw *, u32);
+ s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
+ s32 (*init_rx_addrs)(struct ixgbe_hw *);
+ s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32,
+ ixgbe_mc_addr_itr);
+ s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
+ ixgbe_mc_addr_itr, bool clear);
+ s32 (*enable_mc)(struct ixgbe_hw *);
+ s32 (*disable_mc)(struct ixgbe_hw *);
+ s32 (*clear_vfta)(struct ixgbe_hw *);
+ s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
+ s32 (*set_vlvf)(struct ixgbe_hw *, u32, u32, bool, bool *);
+ s32 (*init_uta_tables)(struct ixgbe_hw *);
+ void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int);
+ void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
+
+ /* Flow Control */
+ s32 (*fc_enable)(struct ixgbe_hw *);
+
+ /* Manageability interface */
+ s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
+ s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
+ s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
+};
+
+struct ixgbe_phy_operations {
+ s32 (*identify)(struct ixgbe_hw *);
+ s32 (*identify_sfp)(struct ixgbe_hw *);
+ s32 (*init)(struct ixgbe_hw *);
+ s32 (*reset)(struct ixgbe_hw *);
+ s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
+ s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
+ s32 (*setup_link)(struct ixgbe_hw *);
+ s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool,
+ bool);
+ s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
+ s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
+ s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
+ s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
+ s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
+ s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
+ void (*i2c_bus_clear)(struct ixgbe_hw *);
+ s32 (*check_overtemp)(struct ixgbe_hw *);
+};
+
+struct ixgbe_eeprom_info {
+ struct ixgbe_eeprom_operations ops;
+ enum ixgbe_eeprom_type type;
+ u32 semaphore_delay;
+ u16 word_size;
+ u16 address_bits;
+ u16 word_page_size;
+};
+
+#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
+struct ixgbe_mac_info {
+ struct ixgbe_mac_operations ops;
+ enum ixgbe_mac_type type;
+ u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+ u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+ u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+ /* prefix for World Wide Node Name (WWNN) */
+ u16 wwnn_prefix;
+ /* prefix for World Wide Port Name (WWPN) */
+ u16 wwpn_prefix;
+#define IXGBE_MAX_MTA 128
+ u32 mta_shadow[IXGBE_MAX_MTA];
+ s32 mc_filter_type;
+ u32 mcft_size;
+ u32 vft_size;
+ u32 num_rar_entries;
+ u32 rar_highwater;
+ u32 rx_pb_size;
+ u32 max_tx_queues;
+ u32 max_rx_queues;
+ u32 orig_autoc;
+ u8 san_mac_rar_index;
+ u32 orig_autoc2;
+ u16 max_msix_vectors;
+ bool arc_subsystem_valid;
+ bool orig_link_settings_stored;
+ bool autotry_restart;
+ u8 flags;
+ struct ixgbe_thermal_sensor_data thermal_sensor_data;
+};
+
+struct ixgbe_phy_info {
+ struct ixgbe_phy_operations ops;
+ enum ixgbe_phy_type type;
+ u32 addr;
+ u32 id;
+ enum ixgbe_sfp_type sfp_type;
+ bool sfp_setup_needed;
+ u32 revision;
+ enum ixgbe_media_type media_type;
+ bool reset_disable;
+ ixgbe_autoneg_advertised autoneg_advertised;
+ enum ixgbe_smart_speed smart_speed;
+ bool smart_speed_active;
+ bool multispeed_fiber;
+ bool reset_if_overtemp;
+ bool qsfp_shared_i2c_bus;
+};
+
+#include "ixgbe_mbx.h"
+
+struct ixgbe_mbx_operations {
+ void (*init_params)(struct ixgbe_hw *hw);
+ s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*check_for_msg)(struct ixgbe_hw *, u16);
+ s32 (*check_for_ack)(struct ixgbe_hw *, u16);
+ s32 (*check_for_rst)(struct ixgbe_hw *, u16);
+};
+
+struct ixgbe_mbx_stats {
+ u32 msgs_tx;
+ u32 msgs_rx;
+
+ u32 acks;
+ u32 reqs;
+ u32 rsts;
+};
+
+struct ixgbe_mbx_info {
+ struct ixgbe_mbx_operations ops;
+ struct ixgbe_mbx_stats stats;
+ u32 timeout;
+ u32 udelay;
+ u32 v2p_mailbox;
+ u16 size;
+};
+
+struct ixgbe_hw {
+ u8 __iomem *hw_addr;
+ void *back;
+ struct ixgbe_mac_info mac;
+ struct ixgbe_addr_filter_info addr_ctrl;
+ struct ixgbe_fc_info fc;
+ struct ixgbe_phy_info phy;
+ struct ixgbe_eeprom_info eeprom;
+ struct ixgbe_bus_info bus;
+ struct ixgbe_mbx_info mbx;
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ bool adapter_stopped;
+ bool force_full_reset;
+ bool allow_unsupported_sfp;
+};
+
+#define ixgbe_call_func(hw, func, params, error) \
+ (func != NULL) ? func params : error
+
+
+/* Error Codes */
+#define IXGBE_ERR_EEPROM -1
+#define IXGBE_ERR_EEPROM_CHECKSUM -2
+#define IXGBE_ERR_PHY -3
+#define IXGBE_ERR_CONFIG -4
+#define IXGBE_ERR_PARAM -5
+#define IXGBE_ERR_MAC_TYPE -6
+#define IXGBE_ERR_UNKNOWN_PHY -7
+#define IXGBE_ERR_LINK_SETUP -8
+#define IXGBE_ERR_ADAPTER_STOPPED -9
+#define IXGBE_ERR_INVALID_MAC_ADDR -10
+#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11
+#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12
+#define IXGBE_ERR_INVALID_LINK_SETTINGS -13
+#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14
+#define IXGBE_ERR_RESET_FAILED -15
+#define IXGBE_ERR_SWFW_SYNC -16
+#define IXGBE_ERR_PHY_ADDR_INVALID -17
+#define IXGBE_ERR_I2C -18
+#define IXGBE_ERR_SFP_NOT_SUPPORTED -19
+#define IXGBE_ERR_SFP_NOT_PRESENT -20
+#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21
+#define IXGBE_ERR_NO_SAN_ADDR_PTR -22
+#define IXGBE_ERR_FDIR_REINIT_FAILED -23
+#define IXGBE_ERR_EEPROM_VERSION -24
+#define IXGBE_ERR_NO_SPACE -25
+#define IXGBE_ERR_OVERTEMP -26
+#define IXGBE_ERR_FC_NOT_NEGOTIATED -27
+#define IXGBE_ERR_FC_NOT_SUPPORTED -28
+#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
+#define IXGBE_ERR_PBA_SECTION -31
+#define IXGBE_ERR_INVALID_ARGUMENT -32
+#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33
+#define IXGBE_ERR_OUT_OF_MEM -34
+
+#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
+
+#define UNREFERENCED_XPARAMETER
+
+#endif /* _IXGBE_TYPE_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_x540.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_x540.c
new file mode 100755
index 00000000..efffe6f6
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_x540.c
@@ -0,0 +1,938 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2012 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "ixgbe_x540.h"
+#include "ixgbe_type.h"
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
+static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
+static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
+static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
+
+/**
+ * ixgbe_init_ops_X540 - Inits func ptrs and MAC type
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the function pointers and assign the MAC type for X540.
+ * Does not touch the hardware.
+ **/
+s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ s32 ret_val;
+
+ ret_val = ixgbe_init_phy_ops_generic(hw);
+ ret_val = ixgbe_init_ops_generic(hw);
+
+
+ /* EEPROM */
+ eeprom->ops.init_params = &ixgbe_init_eeprom_params_X540;
+ eeprom->ops.read = &ixgbe_read_eerd_X540;
+ eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_X540;
+ eeprom->ops.write = &ixgbe_write_eewr_X540;
+ eeprom->ops.write_buffer = &ixgbe_write_eewr_buffer_X540;
+ eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_X540;
+ eeprom->ops.validate_checksum = &ixgbe_validate_eeprom_checksum_X540;
+ eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_X540;
+
+ /* PHY */
+ phy->ops.init = &ixgbe_init_phy_ops_generic;
+ phy->ops.reset = NULL;
+
+ /* MAC */
+ mac->ops.reset_hw = &ixgbe_reset_hw_X540;
+ mac->ops.get_media_type = &ixgbe_get_media_type_X540;
+ mac->ops.get_supported_physical_layer =
+ &ixgbe_get_supported_physical_layer_X540;
+ mac->ops.read_analog_reg8 = NULL;
+ mac->ops.write_analog_reg8 = NULL;
+ mac->ops.start_hw = &ixgbe_start_hw_X540;
+ mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
+ mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
+ mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
+ mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
+ mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
+ mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540;
+ mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync_X540;
+ mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
+ mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
+
+ /* RAR, Multicast, VLAN */
+ mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
+ mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
+ mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
+ mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
+ mac->rar_highwater = 1;
+ mac->ops.set_vfta = &ixgbe_set_vfta_generic;
+ mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
+ mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
+ mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
+ mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
+ mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
+
+ /* Link */
+ mac->ops.get_link_capabilities =
+ &ixgbe_get_copper_link_capabilities_generic;
+ mac->ops.setup_link = &ixgbe_setup_mac_link_X540;
+ mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
+ mac->ops.check_link = &ixgbe_check_mac_link_generic;
+
+ mac->mcft_size = 128;
+ mac->vft_size = 128;
+ mac->num_rar_entries = 128;
+ mac->rx_pb_size = 384;
+ mac->max_tx_queues = 128;
+ mac->max_rx_queues = 128;
+ mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
+
+ /*
+ * FWSM register
+ * ARC supported; valid only if manageability features are
+ * enabled.
+ */
+ mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
+ IXGBE_FWSM_MODE_MASK) ? true : false;
+
+ //hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
+
+ /* LEDs */
+ mac->ops.blink_led_start = ixgbe_blink_led_start_X540;
+ mac->ops.blink_led_stop = ixgbe_blink_led_stop_X540;
+
+ /* Manageability interface */
+ mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_get_link_capabilities_X540 - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: true when autoneg or autotry is enabled
+ *
+ * Determines the link capabilities by reading the AUTOC register.
+ **/
+s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ ixgbe_get_copper_link_capabilities_generic(hw, speed, autoneg);
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_media_type_X540 - Get media type
+ * @hw: pointer to hardware structure
+ *
+ * Returns the media type (fiber, copper, backplane)
+ **/
+enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
+{
+ return ixgbe_media_type_copper;
+}
+
+/**
+ * ixgbe_setup_mac_link_X540 - Sets the auto advertised capabilities
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: true if autonegotiation enabled
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ **/
+s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+ autoneg_wait_to_complete);
+}
+
+/**
+ * ixgbe_reset_hw_X540 - Perform hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks
+ * and clears all interrupts, and perform a reset.
+ **/
+s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
+{
+ s32 status = 0;
+
+ /*
+ * Userland DPDK takes the ownershiop of device
+ * Kernel driver here used as the simple path for ethtool only
+ * Won't real reset device anyway
+ */
+#if 0
+ u32 ctrl, i;
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ status = hw->mac.ops.stop_adapter(hw);
+ if (status != 0)
+ goto reset_hw_out;
+
+ /* flush pending Tx transactions */
+ ixgbe_clear_tx_pending(hw);
+
+mac_reset_top:
+ ctrl = IXGBE_CTRL_RST;
+ ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Poll for reset bit to self-clear indicating reset is complete */
+ for (i = 0; i < 10; i++) {
+ udelay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST_MASK))
+ break;
+ }
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
+ status = IXGBE_ERR_RESET_FAILED;
+ hw_dbg(hw, "Reset polling failed to complete.\n");
+ }
+ msleep(100);
+
+ /*
+ * Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to allow time
+ * for any pending HW events to complete.
+ */
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ goto mac_reset_top;
+ }
+
+ /* Set the Rx packet buffer size. */
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
+
+#endif
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /*
+ * Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to 128,
+ * since we modify this value when programming the SAN MAC address.
+ */
+ hw->mac.num_rar_entries = 128;
+ hw->mac.ops.init_rx_addrs(hw);
+
+ /* Store the permanent SAN mac address */
+ hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
+
+ /* Add the SAN MAC address to the RAR only if it's a valid address */
+ if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
+ hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
+ hw->mac.san_addr, 0, IXGBE_RAH_AV);
+
+ /* Save the SAN MAC RAR index */
+ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
+
+ /* Reserve the last RAR for the SAN MAC address */
+ hw->mac.num_rar_entries--;
+ }
+
+ /* Store the alternative WWNN/WWPN prefix */
+ hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
+ &hw->mac.wwpn_prefix);
+
+//reset_hw_out:
+ return status;
+}
+
+/**
+ * ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware using the generic start_hw function
+ * and the generation start_hw function.
+ * Then performs revision-specific operations, if any.
+ **/
+s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
+{
+ s32 ret_val = 0;
+
+ ret_val = ixgbe_start_hw_generic(hw);
+ if (ret_val != 0)
+ goto out;
+
+ ret_val = ixgbe_start_hw_gen2(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ **/
+u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
+{
+ u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u16 ext_ability = 0;
+
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
+ if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+ if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+
+ return physical_layer;
+}
+
+/**
+ * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ * ixgbe_hw struct in order to set up EEPROM access.
+ **/
+s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ u32 eec;
+ u16 eeprom_size;
+
+ if (eeprom->type == ixgbe_eeprom_uninitialized) {
+ eeprom->semaphore_delay = 10;
+ eeprom->type = ixgbe_flash;
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
+ IXGBE_EEC_SIZE_SHIFT);
+ eeprom->word_size = 1 << (eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
+
+ hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
+ eeprom->type, eeprom->word_size);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_eerd_X540- Read EEPROM word using EERD
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ s32 status = 0;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ 0)
+ status = ixgbe_read_eerd_generic(hw, offset, data);
+ else
+ status = IXGBE_ERR_SWFW_SYNC;
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ return status;
+}
+
+/**
+ * ixgbe_read_eerd_buffer_X540- Read EEPROM word(s) using EERD
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words
+ * @data: word(s) read from the EEPROM
+ *
+ * Reads a 16 bit word(s) from the EEPROM using the EERD register.
+ **/
+s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
+ u16 offset, u16 words, u16 *data)
+{
+ s32 status = 0;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ 0)
+ status = ixgbe_read_eerd_buffer_generic(hw, offset,
+ words, data);
+ else
+ status = IXGBE_ERR_SWFW_SYNC;
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ return status;
+}
+
+/**
+ * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the EEWR register.
+ **/
+s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ s32 status = 0;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ 0)
+ status = ixgbe_write_eewr_generic(hw, offset, data);
+ else
+ status = IXGBE_ERR_SWFW_SYNC;
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ return status;
+}
+
+/**
+ * ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @words: number of words
+ * @data: word(s) write to the EEPROM
+ *
+ * Write a 16 bit word(s) to the EEPROM using the EEWR register.
+ **/
+s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
+ u16 offset, u16 words, u16 *data)
+{
+ s32 status = 0;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ 0)
+ status = ixgbe_write_eewr_buffer_generic(hw, offset,
+ words, data);
+ else
+ status = IXGBE_ERR_SWFW_SYNC;
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ return status;
+}
+
+/**
+ * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum
+ *
+ * This function does not use synchronization for EERD and EEWR. It can
+ * be used internally by function which utilize ixgbe_acquire_swfw_sync_X540.
+ *
+ * @hw: pointer to hardware structure
+ **/
+u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
+{
+ u16 i;
+ u16 j;
+ u16 checksum = 0;
+ u16 length = 0;
+ u16 pointer = 0;
+ u16 word = 0;
+
+ /*
+ * Do not use hw->eeprom.ops.read because we do not want to take
+ * the synchronization semaphores here. Instead use
+ * ixgbe_read_eerd_generic
+ */
+
+ /* Include 0x0-0x3F in the checksum */
+ for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
+ if (ixgbe_read_eerd_generic(hw, i, &word) != 0) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ break;
+ }
+ checksum += word;
+ }
+
+ /*
+ * Include all data from pointers 0x3, 0x6-0xE. This excludes the
+ * FW, PHY module, and PCIe Expansion/Option ROM pointers.
+ */
+ for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
+ if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
+ continue;
+
+ if (ixgbe_read_eerd_generic(hw, i, &pointer) != 0) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ break;
+ }
+
+ /* Skip pointer section if the pointer is invalid. */
+ if (pointer == 0xFFFF || pointer == 0 ||
+ pointer >= hw->eeprom.word_size)
+ continue;
+
+ if (ixgbe_read_eerd_generic(hw, pointer, &length) !=
+ 0) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ break;
+ }
+
+ /* Skip pointer section if length is invalid. */
+ if (length == 0xFFFF || length == 0 ||
+ (pointer + length) >= hw->eeprom.word_size)
+ continue;
+
+ for (j = pointer+1; j <= pointer+length; j++) {
+ if (ixgbe_read_eerd_generic(hw, j, &word) !=
+ 0) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ break;
+ }
+ checksum += word;
+ }
+ }
+
+ checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+
+ return checksum;
+}
+
+/**
+ * ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ **/
+s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
+ u16 *checksum_val)
+{
+ s32 status;
+ u16 checksum;
+ u16 read_checksum = 0;
+
+ /*
+ * Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = hw->eeprom.ops.read(hw, 0, &checksum);
+
+ if (status != 0) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ goto out;
+ }
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ 0) {
+ checksum = hw->eeprom.ops.calc_checksum(hw);
+
+ /*
+ * Do not use hw->eeprom.ops.read because we do not want to take
+ * the synchronization semaphores twice here.
+ */
+ ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
+ &read_checksum);
+
+ /*
+ * Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (read_checksum != checksum)
+ status = IXGBE_ERR_EEPROM_CHECKSUM;
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum_val)
+ *checksum_val = checksum;
+ } else {
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+out:
+ return status;
+}
+
+/**
+ * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash
+ * @hw: pointer to hardware structure
+ *
+ * After writing EEPROM to shadow RAM using EEWR register, software calculates
+ * checksum and updates the EEPROM and instructs the hardware to update
+ * the flash.
+ **/
+s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u16 checksum;
+
+ /*
+ * Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = hw->eeprom.ops.read(hw, 0, &checksum);
+
+ if (status != 0)
+ hw_dbg(hw, "EEPROM read failed\n");
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ 0) {
+ checksum = hw->eeprom.ops.calc_checksum(hw);
+
+ /*
+ * Do not use hw->eeprom.ops.write because we do not want to
+ * take the synchronization semaphores twice here.
+ */
+ status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM,
+ checksum);
+
+ if (status == 0)
+ status = ixgbe_update_flash_X540(hw);
+ else
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+
+ return status;
+}
+
+/**
+ * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device
+ * @hw: pointer to hardware structure
+ *
+ * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
+ * EEPROM from shadow RAM to the flash device.
+ **/
+static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
+{
+ u32 flup;
+ s32 status = IXGBE_ERR_EEPROM;
+
+ status = ixgbe_poll_flash_update_done_X540(hw);
+ if (status == IXGBE_ERR_EEPROM) {
+ hw_dbg(hw, "Flash update time out\n");
+ goto out;
+ }
+
+ flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+
+ status = ixgbe_poll_flash_update_done_X540(hw);
+ if (status == 0)
+ hw_dbg(hw, "Flash update complete\n");
+ else
+ hw_dbg(hw, "Flash update time out\n");
+
+ if (hw->revision_id == 0) {
+ flup = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ if (flup & IXGBE_EEC_SEC1VAL) {
+ flup |= IXGBE_EEC_FLUP;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+ }
+
+ status = ixgbe_poll_flash_update_done_X540(hw);
+ if (status == 0)
+ hw_dbg(hw, "Flash update complete\n");
+ else
+ hw_dbg(hw, "Flash update time out\n");
+ }
+out:
+ return status;
+}
+
+/**
+ * ixgbe_poll_flash_update_done_X540 - Poll flash update status
+ * @hw: pointer to hardware structure
+ *
+ * Polls the FLUDONE (bit 26) of the EEC Register to determine when the
+ * flash update is done.
+ **/
+static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
+{
+ u32 i;
+ u32 reg;
+ s32 status = IXGBE_ERR_EEPROM;
+
+ for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
+ reg = IXGBE_READ_REG(hw, IXGBE_EEC);
+ if (reg & IXGBE_EEC_FLUDONE) {
+ status = 0;
+ break;
+ }
+ udelay(5);
+ }
+ return status;
+}
+
+/**
+ * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore thought the SW_FW_SYNC register for
+ * the specified function (CSR, PHY0, PHY1, NVM, Flash)
+ **/
+s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+ u32 swmask = mask;
+ u32 fwmask = mask << 5;
+ u32 hwmask = 0;
+ u32 timeout = 200;
+ u32 i;
+ s32 ret_val = 0;
+
+ if (swmask == IXGBE_GSSR_EEP_SM)
+ hwmask = IXGBE_GSSR_FLASH_SM;
+
+ /* SW only mask doesn't have FW bit pair */
+ if (swmask == IXGBE_GSSR_SW_MNG_SM)
+ fwmask = 0;
+
+ for (i = 0; i < timeout; i++) {
+ /*
+ * SW NVM semaphore bit is used for access to all
+ * SW_FW_SYNC bits (not just NVM)
+ */
+ if (ixgbe_get_swfw_sync_semaphore(hw)) {
+ ret_val = IXGBE_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ if (!(swfw_sync & (fwmask | swmask | hwmask))) {
+ swfw_sync |= swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+ ixgbe_release_swfw_sync_semaphore(hw);
+ msleep(5);
+ goto out;
+ } else {
+ /*
+ * Firmware currently using resource (fwmask), hardware
+ * currently using resource (hwmask), or other software
+ * thread currently using resource (swmask)
+ */
+ ixgbe_release_swfw_sync_semaphore(hw);
+ msleep(5);
+ }
+ }
+
+ /* Failed to get SW only semaphore */
+ if (swmask == IXGBE_GSSR_SW_MNG_SM) {
+ ret_val = IXGBE_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ /* If the resource is not released by the FW/HW the SW can assume that
+ * the FW/HW malfunctions. In that case the SW should sets the SW bit(s)
+ * of the requested resource(s) while ignoring the corresponding FW/HW
+ * bits in the SW_FW_SYNC register.
+ */
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ if (swfw_sync & (fwmask | hwmask)) {
+ if (ixgbe_get_swfw_sync_semaphore(hw)) {
+ ret_val = IXGBE_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ swfw_sync |= swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+ ixgbe_release_swfw_sync_semaphore(hw);
+ msleep(5);
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore through the SW_FW_SYNC register
+ * for the specified function (CSR, PHY0, PHY1, EVM, Flash)
+ **/
+void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+ u32 swmask = mask;
+
+ ixgbe_get_swfw_sync_semaphore(hw);
+
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swfw_sync &= ~swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+
+ ixgbe_release_swfw_sync_semaphore(hw);
+ msleep(5);
+}
+
+/**
+ * ixgbe_get_nvm_semaphore - Get hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * Sets the hardware semaphores so SW/FW can gain control of shared resources
+ **/
+static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_EEPROM;
+ u32 timeout = 2000;
+ u32 i;
+ u32 swsm;
+
+ /* Get SMBI software semaphore between device drivers first */
+ for (i = 0; i < timeout; i++) {
+ /*
+ * If the SMBI bit is 0 when we read it, then the bit will be
+ * set and we have the semaphore
+ */
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ if (!(swsm & IXGBE_SWSM_SMBI)) {
+ status = 0;
+ break;
+ }
+ udelay(50);
+ }
+
+ /* Now get the semaphore between SW/FW through the REGSMP bit */
+ if (status == 0) {
+ for (i = 0; i < timeout; i++) {
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ if (!(swsm & IXGBE_SWFW_REGSMP))
+ break;
+
+ udelay(50);
+ }
+
+ /*
+ * Release semaphores and return error if SW NVM semaphore
+ * was not granted because we don't have access to the EEPROM
+ */
+ if (i >= timeout) {
+ hw_dbg(hw, "REGSMP Software NVM semaphore not "
+ "granted.\n");
+ ixgbe_release_swfw_sync_semaphore(hw);
+ status = IXGBE_ERR_EEPROM;
+ }
+ } else {
+ hw_dbg(hw, "Software semaphore SMBI between device drivers "
+ "not granted.\n");
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_release_nvm_semaphore - Release hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * This function clears hardware semaphore bits.
+ **/
+static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
+{
+ u32 swsm;
+
+ /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
+
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm &= ~IXGBE_SWSM_SMBI;
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swsm &= ~IXGBE_SWFW_REGSMP;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_blink_led_start_X540 - Blink LED based on index.
+ * @hw: pointer to hardware structure
+ * @index: led number to blink
+ *
+ * Devices that implement the version 2 interface:
+ * X540
+ **/
+s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
+{
+ u32 macc_reg;
+ u32 ledctl_reg;
+ ixgbe_link_speed speed;
+ bool link_up;
+
+ /*
+ * Link should be up in order for the blink bit in the LED control
+ * register to work. Force link and speed in the MAC if link is down.
+ * This will be reversed when we stop the blinking.
+ */
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+ if (link_up == false) {
+ macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+ macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
+ IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
+ }
+ /* Set the LED to LINK_UP + BLINK. */
+ ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
+ ledctl_reg |= IXGBE_LED_BLINK(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_blink_led_stop_X540 - Stop blinking LED based on index.
+ * @hw: pointer to hardware structure
+ * @index: led number to stop blinking
+ *
+ * Devices that implement the version 2 interface:
+ * X540
+ **/
+s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
+{
+ u32 macc_reg;
+ u32 ledctl_reg;
+
+ /* Restore the LED to its default value. */
+ ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
+ ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
+ ledctl_reg &= ~IXGBE_LED_BLINK(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
+
+ /* Unforce link and speed in the MAC. */
+ macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+ macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS);
+ IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_x540.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_x540.h
new file mode 100755
index 00000000..77e8952d
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_x540.h
@@ -0,0 +1,58 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2012 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_X540_H_
+#define _IXGBE_X540_H_
+
+#include "ixgbe_type.h"
+
+s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed, bool *autoneg);
+enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg, bool link_up_wait_to_complete);
+s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw);
+u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw);
+
+s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
+s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words,
+ u16 *data);
+s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words,
+ u16 *data);
+s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw);
+s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, u16 *checksum_val);
+u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw);
+
+s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
+void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
+
+s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
+#endif /* _IXGBE_X540_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.c
new file mode 100755
index 00000000..ca9f4224
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.c
@@ -0,0 +1,1246 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2012 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "ixgbe.h"
+#include "kcompat.h"
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) )
+/* From lib/vsprintf.c */
+#include <asm/div64.h>
+
+static int skip_atoi(const char **s)
+{
+ int i=0;
+
+ while (isdigit(**s))
+ i = i*10 + *((*s)++) - '0';
+ return i;
+}
+
+#define _kc_ZEROPAD 1 /* pad with zero */
+#define _kc_SIGN 2 /* unsigned/signed long */
+#define _kc_PLUS 4 /* show plus */
+#define _kc_SPACE 8 /* space if plus */
+#define _kc_LEFT 16 /* left justified */
+#define _kc_SPECIAL 32 /* 0x */
+#define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */
+
+static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type)
+{
+ char c,sign,tmp[66];
+ const char *digits;
+ const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz";
+ const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+ int i;
+
+ digits = (type & _kc_LARGE) ? large_digits : small_digits;
+ if (type & _kc_LEFT)
+ type &= ~_kc_ZEROPAD;
+ if (base < 2 || base > 36)
+ return 0;
+ c = (type & _kc_ZEROPAD) ? '0' : ' ';
+ sign = 0;
+ if (type & _kc_SIGN) {
+ if (num < 0) {
+ sign = '-';
+ num = -num;
+ size--;
+ } else if (type & _kc_PLUS) {
+ sign = '+';
+ size--;
+ } else if (type & _kc_SPACE) {
+ sign = ' ';
+ size--;
+ }
+ }
+ if (type & _kc_SPECIAL) {
+ if (base == 16)
+ size -= 2;
+ else if (base == 8)
+ size--;
+ }
+ i = 0;
+ if (num == 0)
+ tmp[i++]='0';
+ else while (num != 0)
+ tmp[i++] = digits[do_div(num,base)];
+ if (i > precision)
+ precision = i;
+ size -= precision;
+ if (!(type&(_kc_ZEROPAD+_kc_LEFT))) {
+ while(size-->0) {
+ if (buf <= end)
+ *buf = ' ';
+ ++buf;
+ }
+ }
+ if (sign) {
+ if (buf <= end)
+ *buf = sign;
+ ++buf;
+ }
+ if (type & _kc_SPECIAL) {
+ if (base==8) {
+ if (buf <= end)
+ *buf = '0';
+ ++buf;
+ } else if (base==16) {
+ if (buf <= end)
+ *buf = '0';
+ ++buf;
+ if (buf <= end)
+ *buf = digits[33];
+ ++buf;
+ }
+ }
+ if (!(type & _kc_LEFT)) {
+ while (size-- > 0) {
+ if (buf <= end)
+ *buf = c;
+ ++buf;
+ }
+ }
+ while (i < precision--) {
+ if (buf <= end)
+ *buf = '0';
+ ++buf;
+ }
+ while (i-- > 0) {
+ if (buf <= end)
+ *buf = tmp[i];
+ ++buf;
+ }
+ while (size-- > 0) {
+ if (buf <= end)
+ *buf = ' ';
+ ++buf;
+ }
+ return buf;
+}
+
+int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
+{
+ int len;
+ unsigned long long num;
+ int i, base;
+ char *str, *end, c;
+ const char *s;
+
+ int flags; /* flags to number() */
+
+ int field_width; /* width of output field */
+ int precision; /* min. # of digits for integers; max
+ number of chars for from string */
+ int qualifier; /* 'h', 'l', or 'L' for integer fields */
+ /* 'z' support added 23/7/1999 S.H. */
+ /* 'z' changed to 'Z' --davidm 1/25/99 */
+
+ str = buf;
+ end = buf + size - 1;
+
+ if (end < buf - 1) {
+ end = ((void *) -1);
+ size = end - buf + 1;
+ }
+
+ for (; *fmt ; ++fmt) {
+ if (*fmt != '%') {
+ if (str <= end)
+ *str = *fmt;
+ ++str;
+ continue;
+ }
+
+ /* process flags */
+ flags = 0;
+ repeat:
+ ++fmt; /* this also skips first '%' */
+ switch (*fmt) {
+ case '-': flags |= _kc_LEFT; goto repeat;
+ case '+': flags |= _kc_PLUS; goto repeat;
+ case ' ': flags |= _kc_SPACE; goto repeat;
+ case '#': flags |= _kc_SPECIAL; goto repeat;
+ case '0': flags |= _kc_ZEROPAD; goto repeat;
+ }
+
+ /* get field width */
+ field_width = -1;
+ if (isdigit(*fmt))
+ field_width = skip_atoi(&fmt);
+ else if (*fmt == '*') {
+ ++fmt;
+ /* it's the next argument */
+ field_width = va_arg(args, int);
+ if (field_width < 0) {
+ field_width = -field_width;
+ flags |= _kc_LEFT;
+ }
+ }
+
+ /* get the precision */
+ precision = -1;
+ if (*fmt == '.') {
+ ++fmt;
+ if (isdigit(*fmt))
+ precision = skip_atoi(&fmt);
+ else if (*fmt == '*') {
+ ++fmt;
+ /* it's the next argument */
+ precision = va_arg(args, int);
+ }
+ if (precision < 0)
+ precision = 0;
+ }
+
+ /* get the conversion qualifier */
+ qualifier = -1;
+ if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') {
+ qualifier = *fmt;
+ ++fmt;
+ }
+
+ /* default base */
+ base = 10;
+
+ switch (*fmt) {
+ case 'c':
+ if (!(flags & _kc_LEFT)) {
+ while (--field_width > 0) {
+ if (str <= end)
+ *str = ' ';
+ ++str;
+ }
+ }
+ c = (unsigned char) va_arg(args, int);
+ if (str <= end)
+ *str = c;
+ ++str;
+ while (--field_width > 0) {
+ if (str <= end)
+ *str = ' ';
+ ++str;
+ }
+ continue;
+
+ case 's':
+ s = va_arg(args, char *);
+ if (!s)
+ s = "<NULL>";
+
+ len = strnlen(s, precision);
+
+ if (!(flags & _kc_LEFT)) {
+ while (len < field_width--) {
+ if (str <= end)
+ *str = ' ';
+ ++str;
+ }
+ }
+ for (i = 0; i < len; ++i) {
+ if (str <= end)
+ *str = *s;
+ ++str; ++s;
+ }
+ while (len < field_width--) {
+ if (str <= end)
+ *str = ' ';
+ ++str;
+ }
+ continue;
+
+ case 'p':
+ if (field_width == -1) {
+ field_width = 2*sizeof(void *);
+ flags |= _kc_ZEROPAD;
+ }
+ str = number(str, end,
+ (unsigned long) va_arg(args, void *),
+ 16, field_width, precision, flags);
+ continue;
+
+
+ case 'n':
+ /* FIXME:
+ * What does C99 say about the overflow case here? */
+ if (qualifier == 'l') {
+ long * ip = va_arg(args, long *);
+ *ip = (str - buf);
+ } else if (qualifier == 'Z') {
+ size_t * ip = va_arg(args, size_t *);
+ *ip = (str - buf);
+ } else {
+ int * ip = va_arg(args, int *);
+ *ip = (str - buf);
+ }
+ continue;
+
+ case '%':
+ if (str <= end)
+ *str = '%';
+ ++str;
+ continue;
+
+ /* integer number formats - set up the flags and "break" */
+ case 'o':
+ base = 8;
+ break;
+
+ case 'X':
+ flags |= _kc_LARGE;
+ case 'x':
+ base = 16;
+ break;
+
+ case 'd':
+ case 'i':
+ flags |= _kc_SIGN;
+ case 'u':
+ break;
+
+ default:
+ if (str <= end)
+ *str = '%';
+ ++str;
+ if (*fmt) {
+ if (str <= end)
+ *str = *fmt;
+ ++str;
+ } else {
+ --fmt;
+ }
+ continue;
+ }
+ if (qualifier == 'L')
+ num = va_arg(args, long long);
+ else if (qualifier == 'l') {
+ num = va_arg(args, unsigned long);
+ if (flags & _kc_SIGN)
+ num = (signed long) num;
+ } else if (qualifier == 'Z') {
+ num = va_arg(args, size_t);
+ } else if (qualifier == 'h') {
+ num = (unsigned short) va_arg(args, int);
+ if (flags & _kc_SIGN)
+ num = (signed short) num;
+ } else {
+ num = va_arg(args, unsigned int);
+ if (flags & _kc_SIGN)
+ num = (signed int) num;
+ }
+ str = number(str, end, num, base,
+ field_width, precision, flags);
+ }
+ if (str <= end)
+ *str = '\0';
+ else if (size > 0)
+ /* don't write out a null byte if the buf size is zero */
+ *end = '\0';
+ /* the trailing null byte doesn't count towards the total
+ * ++str;
+ */
+ return str-buf;
+}
+
+int _kc_snprintf(char * buf, size_t size, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ i = _kc_vsnprintf(buf,size,fmt,args);
+ va_end(args);
+ return i;
+}
+#endif /* < 2.4.8 */
+
+
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+#ifdef CONFIG_PCI_IOV
+int __kc_pci_vfs_assigned(struct pci_dev *dev)
+{
+ unsigned int vfs_assigned = 0;
+#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED
+ int pos;
+ struct pci_dev *vfdev;
+ unsigned short dev_id;
+
+ /* only search if we are a PF */
+ if (!dev->is_physfn)
+ return 0;
+
+ /* find SR-IOV capability */
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos)
+ return 0;
+
+ /*
+ * * determine the device ID for the VFs, the vendor ID will be the
+ * * same as the PF so there is no need to check for that one
+ * */
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id);
+
+ /* loop through all the VFs to see if we own any that are assigned */
+ vfdev = pci_get_device(dev->vendor, dev_id, NULL);
+ while (vfdev) {
+ /*
+ * * It is considered assigned if it is a virtual function with
+ * * our dev as the physical function and the assigned bit is set
+ * */
+ if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
+ (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED))
+ vfs_assigned++;
+
+ vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
+ }
+
+#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */
+ return vfs_assigned;
+}
+
+#endif /* CONFIG_PCI_IOV */
+#endif /* 3.10.0 */
+
+
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
+
+/**************************************/
+/* PCI DMA MAPPING */
+
+#if defined(CONFIG_HIGHMEM)
+
+#ifndef PCI_DRAM_OFFSET
+#define PCI_DRAM_OFFSET 0
+#endif
+
+u64
+_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
+ size_t size, int direction)
+{
+ return (((u64) (page - mem_map) << PAGE_SHIFT) + offset +
+ PCI_DRAM_OFFSET);
+}
+
+#else /* CONFIG_HIGHMEM */
+
+u64
+_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
+ size_t size, int direction)
+{
+ return pci_map_single(dev, (void *)page_address(page) + offset, size,
+ direction);
+}
+
+#endif /* CONFIG_HIGHMEM */
+
+void
+_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size,
+ int direction)
+{
+ return pci_unmap_single(dev, dma_addr, size, direction);
+}
+
+#endif /* 2.4.13 => 2.4.3 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
+
+/**************************************/
+/* PCI DRIVER API */
+
+int
+_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
+{
+ if (!pci_dma_supported(dev, mask))
+ return -EIO;
+ dev->dma_mask = mask;
+ return 0;
+}
+
+int
+_kc_pci_request_regions(struct pci_dev *dev, char *res_name)
+{
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ if (pci_resource_len(dev, i) == 0)
+ continue;
+
+ if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
+ if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
+ pci_release_regions(dev);
+ return -EBUSY;
+ }
+ } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
+ if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
+ pci_release_regions(dev);
+ return -EBUSY;
+ }
+ }
+ }
+ return 0;
+}
+
+void
+_kc_pci_release_regions(struct pci_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ if (pci_resource_len(dev, i) == 0)
+ continue;
+
+ if (pci_resource_flags(dev, i) & IORESOURCE_IO)
+ release_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
+
+ else if (pci_resource_flags(dev, i) & IORESOURCE_MEM)
+ release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
+ }
+}
+
+/**************************************/
+/* NETWORK DRIVER API */
+
+struct net_device *
+_kc_alloc_etherdev(int sizeof_priv)
+{
+ struct net_device *dev;
+ int alloc_size;
+
+ alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31;
+ dev = kzalloc(alloc_size, GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ if (sizeof_priv)
+ dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31);
+ dev->name[0] = '\0';
+ ether_setup(dev);
+
+ return dev;
+}
+
+int
+_kc_is_valid_ether_addr(u8 *addr)
+{
+ const char zaddr[6] = { 0, };
+
+ return !(addr[0] & 1) && memcmp(addr, zaddr, 6);
+}
+
+#endif /* 2.4.3 => 2.4.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
+
+int
+_kc_pci_set_power_state(struct pci_dev *dev, int state)
+{
+ return 0;
+}
+
+int
+_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable)
+{
+ return 0;
+}
+
+#endif /* 2.4.6 => 2.4.3 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page,
+ int off, int size)
+{
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ frag->page = page;
+ frag->page_offset = off;
+ frag->size = size;
+ skb_shinfo(skb)->nr_frags = i + 1;
+}
+
+/*
+ * Original Copyright:
+ * find_next_bit.c: fallback find next bit implementation
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
+ */
+unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ const unsigned long *p = addr + BITOP_WORD(offset);
+ unsigned long result = offset & ~(BITS_PER_LONG-1);
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset %= BITS_PER_LONG;
+ if (offset) {
+ tmp = *(p++);
+ tmp &= (~0UL << offset);
+ if (size < BITS_PER_LONG)
+ goto found_first;
+ if (tmp)
+ goto found_middle;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
+ }
+ while (size & ~(BITS_PER_LONG-1)) {
+ if ((tmp = *(p++)))
+ goto found_middle;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp &= (~0UL >> (BITS_PER_LONG - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + ffs(tmp);
+}
+
+size_t _kc_strlcpy(char *dest, const char *src, size_t size)
+{
+ size_t ret = strlen(src);
+
+ if (size) {
+ size_t len = (ret >= size) ? size - 1 : ret;
+ memcpy(dest, src, len);
+ dest[len] = '\0';
+ }
+ return ret;
+}
+
+#endif /* 2.6.0 => 2.4.6 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
+int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ i = vsnprintf(buf, size, fmt, args);
+ va_end(args);
+ return (i >= size) ? (size - 1) : i;
+}
+#endif /* < 2.6.4 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
+DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1};
+#endif /* < 2.6.10 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) )
+char *_kc_kstrdup(const char *s, unsigned int gfp)
+{
+ size_t len;
+ char *buf;
+
+ if (!s)
+ return NULL;
+
+ len = strlen(s) + 1;
+ buf = kmalloc(len, gfp);
+ if (buf)
+ memcpy(buf, s, len);
+ return buf;
+}
+#endif /* < 2.6.13 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
+void *_kc_kzalloc(size_t size, int flags)
+{
+ void *ret = kmalloc(size, flags);
+ if (ret)
+ memset(ret, 0, size);
+ return ret;
+}
+#endif /* <= 2.6.13 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
+int _kc_skb_pad(struct sk_buff *skb, int pad)
+{
+ int ntail;
+
+ /* If the skbuff is non linear tailroom is always zero.. */
+ if(!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
+ memset(skb->data+skb->len, 0, pad);
+ return 0;
+ }
+
+ ntail = skb->data_len + pad - (skb->end - skb->tail);
+ if (likely(skb_cloned(skb) || ntail > 0)) {
+ if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC));
+ goto free_skb;
+ }
+
+#ifdef MAX_SKB_FRAGS
+ if (skb_is_nonlinear(skb) &&
+ !__pskb_pull_tail(skb, skb->data_len))
+ goto free_skb;
+
+#endif
+ memset(skb->data + skb->len, 0, pad);
+ return 0;
+
+free_skb:
+ kfree_skb(skb);
+ return -ENOMEM;
+}
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))
+int _kc_pci_save_state(struct pci_dev *pdev)
+{
+ struct adapter_struct *adapter = pci_get_drvdata(pdev);
+ int size = PCI_CONFIG_SPACE_LEN, i;
+ u16 pcie_cap_offset, pcie_link_status;
+
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
+ /* no ->dev for 2.4 kernels */
+ WARN_ON(pdev->dev.driver_data == NULL);
+#endif
+ pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ if (pcie_cap_offset) {
+ if (!pci_read_config_word(pdev,
+ pcie_cap_offset + PCIE_LINK_STATUS,
+ &pcie_link_status))
+ size = PCIE_CONFIG_SPACE_LEN;
+ }
+ pci_config_space_ich8lan();
+#ifdef HAVE_PCI_ERS
+ if (adapter->config_space == NULL)
+#else
+ WARN_ON(adapter->config_space != NULL);
+#endif
+ adapter->config_space = kmalloc(size, GFP_KERNEL);
+ if (!adapter->config_space) {
+ printk(KERN_ERR "Out of memory in pci_save_state\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < (size / 4); i++)
+ pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]);
+ return 0;
+}
+
+void _kc_pci_restore_state(struct pci_dev *pdev)
+{
+ struct adapter_struct *adapter = pci_get_drvdata(pdev);
+ int size = PCI_CONFIG_SPACE_LEN, i;
+ u16 pcie_cap_offset;
+ u16 pcie_link_status;
+
+ if (adapter->config_space != NULL) {
+ pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ if (pcie_cap_offset &&
+ !pci_read_config_word(pdev,
+ pcie_cap_offset + PCIE_LINK_STATUS,
+ &pcie_link_status))
+ size = PCIE_CONFIG_SPACE_LEN;
+
+ pci_config_space_ich8lan();
+ for (i = 0; i < (size / 4); i++)
+ pci_write_config_dword(pdev, i * 4, adapter->config_space[i]);
+#ifndef HAVE_PCI_ERS
+ kfree(adapter->config_space);
+ adapter->config_space = NULL;
+#endif
+ }
+}
+#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */
+
+#ifdef HAVE_PCI_ERS
+void _kc_free_netdev(struct net_device *netdev)
+{
+ struct adapter_struct *adapter = netdev_priv(netdev);
+
+ if (adapter->config_space != NULL)
+ kfree(adapter->config_space);
+#ifdef CONFIG_SYSFS
+ if (netdev->reg_state == NETREG_UNINITIALIZED) {
+ kfree((char *)netdev - netdev->padded);
+ } else {
+ BUG_ON(netdev->reg_state != NETREG_UNREGISTERED);
+ netdev->reg_state = NETREG_RELEASED;
+ class_device_put(&netdev->class_dev);
+ }
+#else
+ kfree((char *)netdev - netdev->padded);
+#endif
+}
+#endif
+
+void *_kc_kmemdup(const void *src, size_t len, unsigned gfp)
+{
+ void *p;
+
+ p = kzalloc(len, gfp);
+ if (p)
+ memcpy(p, src, len);
+ return p;
+}
+#endif /* <= 2.6.19 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
+/* hexdump code taken from lib/hexdump.c */
+static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
+ int groupsize, unsigned char *linebuf,
+ size_t linebuflen, bool ascii)
+{
+ const u8 *ptr = buf;
+ u8 ch;
+ int j, lx = 0;
+ int ascii_column;
+
+ if (rowsize != 16 && rowsize != 32)
+ rowsize = 16;
+
+ if (!len)
+ goto nil;
+ if (len > rowsize) /* limit to one line at a time */
+ len = rowsize;
+ if ((len % groupsize) != 0) /* no mixed size output */
+ groupsize = 1;
+
+ switch (groupsize) {
+ case 8: {
+ const u64 *ptr8 = buf;
+ int ngroups = len / groupsize;
+
+ for (j = 0; j < ngroups; j++)
+ lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
+ "%s%16.16llx", j ? " " : "",
+ (unsigned long long)*(ptr8 + j));
+ ascii_column = 17 * ngroups + 2;
+ break;
+ }
+
+ case 4: {
+ const u32 *ptr4 = buf;
+ int ngroups = len / groupsize;
+
+ for (j = 0; j < ngroups; j++)
+ lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
+ "%s%8.8x", j ? " " : "", *(ptr4 + j));
+ ascii_column = 9 * ngroups + 2;
+ break;
+ }
+
+ case 2: {
+ const u16 *ptr2 = buf;
+ int ngroups = len / groupsize;
+
+ for (j = 0; j < ngroups; j++)
+ lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
+ "%s%4.4x", j ? " " : "", *(ptr2 + j));
+ ascii_column = 5 * ngroups + 2;
+ break;
+ }
+
+ default:
+ for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
+ ch = ptr[j];
+ linebuf[lx++] = hex_asc(ch >> 4);
+ linebuf[lx++] = hex_asc(ch & 0x0f);
+ linebuf[lx++] = ' ';
+ }
+ if (j)
+ lx--;
+
+ ascii_column = 3 * rowsize + 2;
+ break;
+ }
+ if (!ascii)
+ goto nil;
+
+ while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
+ linebuf[lx++] = ' ';
+ for (j = 0; (j < len) && (lx + 2) < linebuflen; j++)
+ linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j]
+ : '.';
+nil:
+ linebuf[lx++] = '\0';
+}
+
+void _kc_print_hex_dump(const char *level,
+ const char *prefix_str, int prefix_type,
+ int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii)
+{
+ const u8 *ptr = buf;
+ int i, linelen, remaining = len;
+ unsigned char linebuf[200];
+
+ if (rowsize != 16 && rowsize != 32)
+ rowsize = 16;
+
+ for (i = 0; i < len; i += rowsize) {
+ linelen = min(remaining, rowsize);
+ remaining -= rowsize;
+ _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
+ linebuf, sizeof(linebuf), ascii);
+
+ switch (prefix_type) {
+ case DUMP_PREFIX_ADDRESS:
+ printk("%s%s%*p: %s\n", level, prefix_str,
+ (int)(2 * sizeof(void *)), ptr + i, linebuf);
+ break;
+ case DUMP_PREFIX_OFFSET:
+ printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
+ break;
+ default:
+ printk("%s%s%s\n", level, prefix_str, linebuf);
+ break;
+ }
+ }
+}
+#endif /* < 2.6.22 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) )
+int ixgbe_dcb_netlink_register(void)
+{
+ return 0;
+}
+
+int ixgbe_dcb_netlink_unregister(void)
+{
+ return 0;
+}
+
+int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max)
+{
+ return 0;
+}
+#endif /* < 2.6.23 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
+#ifdef NAPI
+struct net_device *napi_to_poll_dev(struct napi_struct *napi)
+{
+ struct adapter_q_vector *q_vector = container_of(napi,
+ struct adapter_q_vector,
+ napi);
+ return &q_vector->poll_dev;
+}
+
+int __kc_adapter_clean(struct net_device *netdev, int *budget)
+{
+ int work_done;
+ int work_to_do = min(*budget, netdev->quota);
+ /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */
+ struct napi_struct *napi = netdev->priv;
+ work_done = napi->poll(napi, work_to_do);
+ *budget -= work_done;
+ netdev->quota -= work_done;
+ return (work_done >= work_to_do) ? 1 : 0;
+}
+#endif /* NAPI */
+#endif /* <= 2.6.24 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
+void _kc_pci_disable_link_state(struct pci_dev *pdev, int state)
+{
+ struct pci_dev *parent = pdev->bus->self;
+ u16 link_state;
+ int pos;
+
+ if (!parent)
+ return;
+
+ pos = pci_find_capability(parent, PCI_CAP_ID_EXP);
+ if (pos) {
+ pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state);
+ link_state &= ~state;
+ pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state);
+ }
+}
+#endif /* < 2.6.26 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
+#ifdef HAVE_TX_MQ
+void _kc_netif_tx_stop_all_queues(struct net_device *netdev)
+{
+ struct adapter_struct *adapter = netdev_priv(netdev);
+ int i;
+
+ netif_stop_queue(netdev);
+ if (netif_is_multiqueue(netdev))
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ netif_stop_subqueue(netdev, i);
+}
+void _kc_netif_tx_wake_all_queues(struct net_device *netdev)
+{
+ struct adapter_struct *adapter = netdev_priv(netdev);
+ int i;
+
+ netif_wake_queue(netdev);
+ if (netif_is_multiqueue(netdev))
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ netif_wake_subqueue(netdev, i);
+}
+void _kc_netif_tx_start_all_queues(struct net_device *netdev)
+{
+ struct adapter_struct *adapter = netdev_priv(netdev);
+ int i;
+
+ netif_start_queue(netdev);
+ if (netif_is_multiqueue(netdev))
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ netif_start_subqueue(netdev, i);
+}
+#endif /* HAVE_TX_MQ */
+
+#ifndef __WARN_printf
+void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...)
+{
+ va_list args;
+
+ printk(KERN_WARNING "------------[ cut here ]------------\n");
+ printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file, line);
+ va_start(args, fmt);
+ vprintk(fmt, args);
+ va_end(args);
+
+ dump_stack();
+}
+#endif /* __WARN_printf */
+#endif /* < 2.6.27 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
+
+int
+_kc_pci_prepare_to_sleep(struct pci_dev *dev)
+{
+ pci_power_t target_state;
+ int error;
+
+ target_state = pci_choose_state(dev, PMSG_SUSPEND);
+
+ pci_enable_wake(dev, target_state, true);
+
+ error = pci_set_power_state(dev, target_state);
+
+ if (error)
+ pci_enable_wake(dev, target_state, false);
+
+ return error;
+}
+
+int
+_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable)
+{
+ int err;
+
+ err = pci_enable_wake(dev, PCI_D3cold, enable);
+ if (err)
+ goto out;
+
+ err = pci_enable_wake(dev, PCI_D3hot, enable);
+
+out:
+ return err;
+}
+#endif /* < 2.6.28 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) )
+void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
+ int off, int size)
+{
+ skb_fill_page_desc(skb, i, page, off, size);
+ skb->len += size;
+ skb->data_len += size;
+ skb->truesize += size;
+}
+#endif /* < 3.4.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) )
+#ifdef HAVE_NETDEV_SELECT_QUEUE
+#include <net/ip.h>
+static u32 _kc_simple_tx_hashrnd;
+static u32 _kc_simple_tx_hashrnd_initialized;
+
+u16 _kc_skb_tx_hash(struct net_device *dev, struct sk_buff *skb)
+{
+ u32 addr1, addr2, ports;
+ u32 hash, ihl;
+ u8 ip_proto = 0;
+
+ if (unlikely(!_kc_simple_tx_hashrnd_initialized)) {
+ get_random_bytes(&_kc_simple_tx_hashrnd, 4);
+ _kc_simple_tx_hashrnd_initialized = 1;
+ }
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)))
+ ip_proto = ip_hdr(skb)->protocol;
+ addr1 = ip_hdr(skb)->saddr;
+ addr2 = ip_hdr(skb)->daddr;
+ ihl = ip_hdr(skb)->ihl;
+ break;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ case htons(ETH_P_IPV6):
+ ip_proto = ipv6_hdr(skb)->nexthdr;
+ addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
+ addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
+ ihl = (40 >> 2);
+ break;
+#endif
+ default:
+ return 0;
+ }
+
+
+ switch (ip_proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_DCCP:
+ case IPPROTO_ESP:
+ case IPPROTO_AH:
+ case IPPROTO_SCTP:
+ case IPPROTO_UDPLITE:
+ ports = *((u32 *) (skb_network_header(skb) + (ihl * 4)));
+ break;
+
+ default:
+ ports = 0;
+ break;
+ }
+
+ hash = jhash_3words(addr1, addr2, ports, _kc_simple_tx_hashrnd);
+
+ return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
+}
+#endif /* HAVE_NETDEV_SELECT_QUEUE */
+#endif /* < 2.6.30 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
+#ifdef HAVE_TX_MQ
+#ifndef CONFIG_NETDEVICES_MULTIQUEUE
+void _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
+{
+ unsigned int real_num = dev->real_num_tx_queues;
+ struct Qdisc *qdisc;
+ int i;
+
+ if (unlikely(txq > dev->num_tx_queues))
+ ;
+ else if (txq > real_num)
+ dev->real_num_tx_queues = txq;
+ else if ( txq < real_num) {
+ dev->real_num_tx_queues = txq;
+ for (i = txq; i < dev->num_tx_queues; i++) {
+ qdisc = netdev_get_tx_queue(dev, i)->qdisc;
+ if (qdisc) {
+ spin_lock_bh(qdisc_lock(qdisc));
+ qdisc_reset(qdisc);
+ spin_unlock_bh(qdisc_lock(qdisc));
+ }
+ }
+ }
+}
+#endif /* CONFIG_NETDEVICES_MULTIQUEUE */
+#endif /* HAVE_TX_MQ */
+#endif /* < 2.6.35 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
+static const u32 _kc_flags_dup_features =
+ (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH);
+
+u32 _kc_ethtool_op_get_flags(struct net_device *dev)
+{
+ return dev->features & _kc_flags_dup_features;
+}
+
+int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported)
+{
+ if (data & ~supported)
+ return -EINVAL;
+
+ dev->features = ((dev->features & ~_kc_flags_dup_features) |
+ (data & _kc_flags_dup_features));
+ return 0;
+}
+#endif /* < 2.6.36 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)))
+u8 _kc_netdev_get_num_tc(struct net_device *dev)
+{
+ struct adapter_struct *kc_adapter = netdev_priv(dev);
+ if (kc_adapter->flags & IXGBE_FLAG_DCB_ENABLED)
+ return kc_adapter->tc;
+ else
+ return 0;
+}
+
+u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up)
+{
+ struct adapter_struct *kc_adapter = netdev_priv(dev);
+ int tc;
+ u8 map;
+
+ for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) {
+ map = kc_adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap;
+
+ if (map & (1 << up))
+ return tc;
+ }
+
+ return 0;
+}
+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */
+#endif /* < 2.6.39 */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.h
new file mode 100755
index 00000000..e327d659
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.h
@@ -0,0 +1,3143 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2012 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _KCOMPAT_H_
+#define _KCOMPAT_H_
+
+#ifndef LINUX_VERSION_CODE
+#include <linux/version.h>
+#else
+#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
+#endif
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/mii.h>
+#include <linux/vmalloc.h>
+#include <asm/io.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+
+/* NAPI enable/disable flags here */
+/* enable NAPI for ixgbe by default */
+#undef CONFIG_IXGBE_NAPI
+#define CONFIG_IXGBE_NAPI
+#define NAPI
+#ifdef CONFIG_IXGBE_NAPI
+#undef NAPI
+#define NAPI
+#endif /* CONFIG_IXGBE_NAPI */
+#ifdef IXGBE_NAPI
+#undef NAPI
+#define NAPI
+#endif /* IXGBE_NAPI */
+#ifdef IXGBE_NO_NAPI
+#undef NAPI
+#endif /* IXGBE_NO_NAPI */
+
+#define adapter_struct ixgbe_adapter
+#define adapter_q_vector ixgbe_q_vector
+
+/* and finally set defines so that the code sees the changes */
+#ifdef NAPI
+#ifndef CONFIG_IXGBE_NAPI
+#define CONFIG_IXGBE_NAPI
+#endif
+#else
+#undef CONFIG_IXGBE_NAPI
+#endif /* NAPI */
+
+/* packet split disable/enable */
+#ifdef DISABLE_PACKET_SPLIT
+#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+#define CONFIG_IXGBE_DISABLE_PACKET_SPLIT
+#endif
+#endif /* DISABLE_PACKET_SPLIT */
+
+/* MSI compatibility code for all kernels and drivers */
+#ifdef DISABLE_PCI_MSI
+#undef CONFIG_PCI_MSI
+#endif
+#ifndef CONFIG_PCI_MSI
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
+struct msix_entry {
+ u16 vector; /* kernel uses to write allocated vector */
+ u16 entry; /* driver uses to specify entry, OS writes */
+};
+#endif
+#undef pci_enable_msi
+#define pci_enable_msi(a) -ENOTSUPP
+#undef pci_disable_msi
+#define pci_disable_msi(a) do {} while (0)
+#undef pci_enable_msix
+#define pci_enable_msix(a, b, c) -ENOTSUPP
+#undef pci_disable_msix
+#define pci_disable_msix(a) do {} while (0)
+#define msi_remove_pci_irq_vectors(a) do {} while (0)
+#endif /* CONFIG_PCI_MSI */
+#ifdef DISABLE_PM
+#undef CONFIG_PM
+#endif
+
+#ifdef DISABLE_NET_POLL_CONTROLLER
+#undef CONFIG_NET_POLL_CONTROLLER
+#endif
+
+#ifndef PMSG_SUSPEND
+#define PMSG_SUSPEND 3
+#endif
+
+/* generic boolean compatibility */
+#undef TRUE
+#undef FALSE
+#define TRUE true
+#define FALSE false
+#ifdef GCC_VERSION
+#if ( GCC_VERSION < 3000 )
+#define _Bool char
+#endif
+#else
+#define _Bool char
+#endif
+
+/* kernels less than 2.4.14 don't have this */
+#ifndef ETH_P_8021Q
+#define ETH_P_8021Q 0x8100
+#endif
+
+#ifndef module_param
+#define module_param(v,t,p) MODULE_PARM(v, "i");
+#endif
+
+#ifndef DMA_64BIT_MASK
+#define DMA_64BIT_MASK 0xffffffffffffffffULL
+#endif
+
+#ifndef DMA_32BIT_MASK
+#define DMA_32BIT_MASK 0x00000000ffffffffULL
+#endif
+
+#ifndef PCI_CAP_ID_EXP
+#define PCI_CAP_ID_EXP 0x10
+#endif
+
+#ifndef PCIE_LINK_STATE_L0S
+#define PCIE_LINK_STATE_L0S 1
+#endif
+#ifndef PCIE_LINK_STATE_L1
+#define PCIE_LINK_STATE_L1 2
+#endif
+
+#ifndef mmiowb
+#ifdef CONFIG_IA64
+#define mmiowb() asm volatile ("mf.a" ::: "memory")
+#else
+#define mmiowb()
+#endif
+#endif
+
+#ifndef SET_NETDEV_DEV
+#define SET_NETDEV_DEV(net, pdev)
+#endif
+
+#if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) )
+#define free_netdev(x) kfree(x)
+#endif
+
+#ifdef HAVE_POLL_CONTROLLER
+#define CONFIG_NET_POLL_CONTROLLER
+#endif
+
+#ifndef SKB_DATAREF_SHIFT
+/* if we do not have the infrastructure to detect if skb_header is cloned
+ just return false in all cases */
+#define skb_header_cloned(x) 0
+#endif
+
+#ifndef NETIF_F_GSO
+#define gso_size tso_size
+#define gso_segs tso_segs
+#endif
+
+#ifndef NETIF_F_GRO
+#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \
+ vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan)
+#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb)
+#endif
+
+#ifndef NETIF_F_SCTP_CSUM
+#define NETIF_F_SCTP_CSUM 0
+#endif
+
+#ifndef NETIF_F_LRO
+#define NETIF_F_LRO (1 << 15)
+#endif
+
+#ifndef NETIF_F_NTUPLE
+#define NETIF_F_NTUPLE (1 << 27)
+#endif
+
+#ifndef IPPROTO_SCTP
+#define IPPROTO_SCTP 132
+#endif
+
+#ifndef CHECKSUM_PARTIAL
+#define CHECKSUM_PARTIAL CHECKSUM_HW
+#define CHECKSUM_COMPLETE CHECKSUM_HW
+#endif
+
+#ifndef __read_mostly
+#define __read_mostly
+#endif
+
+#ifndef MII_RESV1
+#define MII_RESV1 0x17 /* Reserved... */
+#endif
+
+#ifndef unlikely
+#define unlikely(_x) _x
+#define likely(_x) _x
+#endif
+
+#ifndef WARN_ON
+#define WARN_ON(x)
+#endif
+
+#ifndef PCI_DEVICE
+#define PCI_DEVICE(vend,dev) \
+ .vendor = (vend), .device = (dev), \
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
+#endif
+
+#ifndef node_online
+#define node_online(node) ((node) == 0)
+#endif
+
+#ifndef num_online_cpus
+#define num_online_cpus() smp_num_cpus
+#endif
+
+#ifndef cpu_online
+#define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map)
+#endif
+
+#ifndef _LINUX_RANDOM_H
+#include <linux/random.h>
+#endif
+
+#ifndef DECLARE_BITMAP
+#ifndef BITS_TO_LONGS
+#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
+#endif
+#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)]
+#endif
+
+#ifndef VLAN_HLEN
+#define VLAN_HLEN 4
+#endif
+
+#ifndef VLAN_ETH_HLEN
+#define VLAN_ETH_HLEN 18
+#endif
+
+#ifndef VLAN_ETH_FRAME_LEN
+#define VLAN_ETH_FRAME_LEN 1518
+#endif
+
+#if !defined(IXGBE_DCA) && !defined(IGB_DCA)
+#define dca_get_tag(b) 0
+#define dca_add_requester(a) -1
+#define dca_remove_requester(b) do { } while(0)
+#define DCA_PROVIDER_ADD 0x0001
+#define DCA_PROVIDER_REMOVE 0x0002
+#endif
+
+#ifndef DCA_GET_TAG_TWO_ARGS
+#define dca3_get_tag(a,b) dca_get_tag(b)
+#endif
+
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#if defined(__i386__) || defined(__x86_64__)
+#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#endif
+#endif
+
+/* taken from 2.6.24 definition in linux/kernel.h */
+#ifndef IS_ALIGNED
+#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0)
+#endif
+
+#ifndef NETIF_F_HW_VLAN_TX
+struct _kc_vlan_ethhdr {
+ unsigned char h_dest[ETH_ALEN];
+ unsigned char h_source[ETH_ALEN];
+ __be16 h_vlan_proto;
+ __be16 h_vlan_TCI;
+ __be16 h_vlan_encapsulated_proto;
+};
+#define vlan_ethhdr _kc_vlan_ethhdr
+struct _kc_vlan_hdr {
+ __be16 h_vlan_TCI;
+ __be16 h_vlan_encapsulated_proto;
+};
+#define vlan_hdr _kc_vlan_hdr
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+#define vlan_tx_tag_present(_skb) 0
+#define vlan_tx_tag_get(_skb) 0
+#endif
+#endif
+
+#ifndef VLAN_PRIO_SHIFT
+#define VLAN_PRIO_SHIFT 13
+#endif
+
+
+#ifndef __GFP_COLD
+#define __GFP_COLD 0
+#endif
+
+/*****************************************************************************/
+/* Installations with ethtool version without eeprom, adapter id, or statistics
+ * support */
+
+#ifndef ETH_GSTRING_LEN
+#define ETH_GSTRING_LEN 32
+#endif
+
+#ifndef ETHTOOL_GSTATS
+#define ETHTOOL_GSTATS 0x1d
+#undef ethtool_drvinfo
+#define ethtool_drvinfo k_ethtool_drvinfo
+struct k_ethtool_drvinfo {
+ u32 cmd;
+ char driver[32];
+ char version[32];
+ char fw_version[32];
+ char bus_info[32];
+ char reserved1[32];
+ char reserved2[16];
+ u32 n_stats;
+ u32 testinfo_len;
+ u32 eedump_len;
+ u32 regdump_len;
+};
+
+struct ethtool_stats {
+ u32 cmd;
+ u32 n_stats;
+ u64 data[0];
+};
+#endif /* ETHTOOL_GSTATS */
+
+#ifndef ETHTOOL_PHYS_ID
+#define ETHTOOL_PHYS_ID 0x1c
+#endif /* ETHTOOL_PHYS_ID */
+
+#ifndef ETHTOOL_GSTRINGS
+#define ETHTOOL_GSTRINGS 0x1b
+enum ethtool_stringset {
+ ETH_SS_TEST = 0,
+ ETH_SS_STATS,
+};
+struct ethtool_gstrings {
+ u32 cmd; /* ETHTOOL_GSTRINGS */
+ u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/
+ u32 len; /* number of strings in the string set */
+ u8 data[0];
+};
+#endif /* ETHTOOL_GSTRINGS */
+
+#ifndef ETHTOOL_TEST
+#define ETHTOOL_TEST 0x1a
+enum ethtool_test_flags {
+ ETH_TEST_FL_OFFLINE = (1 << 0),
+ ETH_TEST_FL_FAILED = (1 << 1),
+};
+struct ethtool_test {
+ u32 cmd;
+ u32 flags;
+ u32 reserved;
+ u32 len;
+ u64 data[0];
+};
+#endif /* ETHTOOL_TEST */
+
+#ifndef ETHTOOL_GEEPROM
+#define ETHTOOL_GEEPROM 0xb
+#undef ETHTOOL_GREGS
+struct ethtool_eeprom {
+ u32 cmd;
+ u32 magic;
+ u32 offset;
+ u32 len;
+ u8 data[0];
+};
+
+struct ethtool_value {
+ u32 cmd;
+ u32 data;
+};
+#endif /* ETHTOOL_GEEPROM */
+
+#ifndef ETHTOOL_GLINK
+#define ETHTOOL_GLINK 0xa
+#endif /* ETHTOOL_GLINK */
+
+#ifndef ETHTOOL_GWOL
+#define ETHTOOL_GWOL 0x5
+#define ETHTOOL_SWOL 0x6
+#define SOPASS_MAX 6
+struct ethtool_wolinfo {
+ u32 cmd;
+ u32 supported;
+ u32 wolopts;
+ u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */
+};
+#endif /* ETHTOOL_GWOL */
+
+#ifndef ETHTOOL_GREGS
+#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */
+#define ethtool_regs _kc_ethtool_regs
+/* for passing big chunks of data */
+struct _kc_ethtool_regs {
+ u32 cmd;
+ u32 version; /* driver-specific, indicates different chips/revs */
+ u32 len; /* bytes */
+ u8 data[0];
+};
+#endif /* ETHTOOL_GREGS */
+
+#ifndef ETHTOOL_GMSGLVL
+#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */
+#endif
+#ifndef ETHTOOL_SMSGLVL
+#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */
+#endif
+#ifndef ETHTOOL_NWAY_RST
+#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */
+#endif
+#ifndef ETHTOOL_GLINK
+#define ETHTOOL_GLINK 0x0000000a /* Get link status */
+#endif
+#ifndef ETHTOOL_GEEPROM
+#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */
+#endif
+#ifndef ETHTOOL_SEEPROM
+#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */
+#endif
+#ifndef ETHTOOL_GCOALESCE
+#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */
+/* for configuring coalescing parameters of chip */
+#define ethtool_coalesce _kc_ethtool_coalesce
+struct _kc_ethtool_coalesce {
+ u32 cmd; /* ETHTOOL_{G,S}COALESCE */
+
+ /* How many usecs to delay an RX interrupt after
+ * a packet arrives. If 0, only rx_max_coalesced_frames
+ * is used.
+ */
+ u32 rx_coalesce_usecs;
+
+ /* How many packets to delay an RX interrupt after
+ * a packet arrives. If 0, only rx_coalesce_usecs is
+ * used. It is illegal to set both usecs and max frames
+ * to zero as this would cause RX interrupts to never be
+ * generated.
+ */
+ u32 rx_max_coalesced_frames;
+
+ /* Same as above two parameters, except that these values
+ * apply while an IRQ is being serviced by the host. Not
+ * all cards support this feature and the values are ignored
+ * in that case.
+ */
+ u32 rx_coalesce_usecs_irq;
+ u32 rx_max_coalesced_frames_irq;
+
+ /* How many usecs to delay a TX interrupt after
+ * a packet is sent. If 0, only tx_max_coalesced_frames
+ * is used.
+ */
+ u32 tx_coalesce_usecs;
+
+ /* How many packets to delay a TX interrupt after
+ * a packet is sent. If 0, only tx_coalesce_usecs is
+ * used. It is illegal to set both usecs and max frames
+ * to zero as this would cause TX interrupts to never be
+ * generated.
+ */
+ u32 tx_max_coalesced_frames;
+
+ /* Same as above two parameters, except that these values
+ * apply while an IRQ is being serviced by the host. Not
+ * all cards support this feature and the values are ignored
+ * in that case.
+ */
+ u32 tx_coalesce_usecs_irq;
+ u32 tx_max_coalesced_frames_irq;
+
+ /* How many usecs to delay in-memory statistics
+ * block updates. Some drivers do not have an in-memory
+ * statistic block, and in such cases this value is ignored.
+ * This value must not be zero.
+ */
+ u32 stats_block_coalesce_usecs;
+
+ /* Adaptive RX/TX coalescing is an algorithm implemented by
+ * some drivers to improve latency under low packet rates and
+ * improve throughput under high packet rates. Some drivers
+ * only implement one of RX or TX adaptive coalescing. Anything
+ * not implemented by the driver causes these values to be
+ * silently ignored.
+ */
+ u32 use_adaptive_rx_coalesce;
+ u32 use_adaptive_tx_coalesce;
+
+ /* When the packet rate (measured in packets per second)
+ * is below pkt_rate_low, the {rx,tx}_*_low parameters are
+ * used.
+ */
+ u32 pkt_rate_low;
+ u32 rx_coalesce_usecs_low;
+ u32 rx_max_coalesced_frames_low;
+ u32 tx_coalesce_usecs_low;
+ u32 tx_max_coalesced_frames_low;
+
+ /* When the packet rate is below pkt_rate_high but above
+ * pkt_rate_low (both measured in packets per second) the
+ * normal {rx,tx}_* coalescing parameters are used.
+ */
+
+ /* When the packet rate is (measured in packets per second)
+ * is above pkt_rate_high, the {rx,tx}_*_high parameters are
+ * used.
+ */
+ u32 pkt_rate_high;
+ u32 rx_coalesce_usecs_high;
+ u32 rx_max_coalesced_frames_high;
+ u32 tx_coalesce_usecs_high;
+ u32 tx_max_coalesced_frames_high;
+
+ /* How often to do adaptive coalescing packet rate sampling,
+ * measured in seconds. Must not be zero.
+ */
+ u32 rate_sample_interval;
+};
+#endif /* ETHTOOL_GCOALESCE */
+
+#ifndef ETHTOOL_SCOALESCE
+#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */
+#endif
+#ifndef ETHTOOL_GRINGPARAM
+#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */
+/* for configuring RX/TX ring parameters */
+#define ethtool_ringparam _kc_ethtool_ringparam
+struct _kc_ethtool_ringparam {
+ u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */
+
+ /* Read only attributes. These indicate the maximum number
+ * of pending RX/TX ring entries the driver will allow the
+ * user to set.
+ */
+ u32 rx_max_pending;
+ u32 rx_mini_max_pending;
+ u32 rx_jumbo_max_pending;
+ u32 tx_max_pending;
+
+ /* Values changeable by the user. The valid values are
+ * in the range 1 to the "*_max_pending" counterpart above.
+ */
+ u32 rx_pending;
+ u32 rx_mini_pending;
+ u32 rx_jumbo_pending;
+ u32 tx_pending;
+};
+#endif /* ETHTOOL_GRINGPARAM */
+
+#ifndef ETHTOOL_SRINGPARAM
+#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */
+#endif
+#ifndef ETHTOOL_GPAUSEPARAM
+#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */
+/* for configuring link flow control parameters */
+#define ethtool_pauseparam _kc_ethtool_pauseparam
+struct _kc_ethtool_pauseparam {
+ u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */
+
+ /* If the link is being auto-negotiated (via ethtool_cmd.autoneg
+ * being true) the user may set 'autoneg' here non-zero to have the
+ * pause parameters be auto-negotiated too. In such a case, the
+ * {rx,tx}_pause values below determine what capabilities are
+ * advertised.
+ *
+ * If 'autoneg' is zero or the link is not being auto-negotiated,
+ * then {rx,tx}_pause force the driver to use/not-use pause
+ * flow control.
+ */
+ u32 autoneg;
+ u32 rx_pause;
+ u32 tx_pause;
+};
+#endif /* ETHTOOL_GPAUSEPARAM */
+
+#ifndef ETHTOOL_SPAUSEPARAM
+#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */
+#endif
+#ifndef ETHTOOL_GRXCSUM
+#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_SRXCSUM
+#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_GTXCSUM
+#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_STXCSUM
+#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_GSG
+#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable
+ * (ethtool_value) */
+#endif
+#ifndef ETHTOOL_SSG
+#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable
+ * (ethtool_value). */
+#endif
+#ifndef ETHTOOL_TEST
+#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */
+#endif
+#ifndef ETHTOOL_GSTRINGS
+#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */
+#endif
+#ifndef ETHTOOL_PHYS_ID
+#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */
+#endif
+#ifndef ETHTOOL_GSTATS
+#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */
+#endif
+#ifndef ETHTOOL_GTSO
+#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_STSO
+#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */
+#endif
+
+#ifndef ETHTOOL_BUSINFO_LEN
+#define ETHTOOL_BUSINFO_LEN 32
+#endif
+
+#ifndef RHEL_RELEASE_CODE
+/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */
+#define RHEL_RELEASE_CODE 0
+#endif
+#ifndef RHEL_RELEASE_VERSION
+#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b))
+#endif
+#ifndef AX_RELEASE_CODE
+#define AX_RELEASE_CODE 0
+#endif
+#ifndef AX_RELEASE_VERSION
+#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b))
+#endif
+
+/* SuSE version macro is the same as Linux kernel version */
+#ifndef SLE_VERSION
+#define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c)
+#endif
+#ifndef SLE_VERSION_CODE
+#ifdef CONFIG_SUSE_KERNEL
+/* SLES11 GA is 2.6.27 based */
+#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) )
+#define SLE_VERSION_CODE SLE_VERSION(11,0,0)
+#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) )
+/* SLES11 SP1 is 2.6.32 based */
+#define SLE_VERSION_CODE SLE_VERSION(11,1,0)
+#else
+#define SLE_VERSION_CODE 0
+#endif
+#else /* CONFIG_SUSE_KERNEL */
+#define SLE_VERSION_CODE 0
+#endif /* CONFIG_SUSE_KERNEL */
+#endif /* SLE_VERSION_CODE */
+
+#ifdef __KLOCWORK__
+#ifdef ARRAY_SIZE
+#undef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+#endif /* __KLOCWORK__ */
+
+/*****************************************************************************/
+/* 2.4.3 => 2.4.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
+
+/**************************************/
+/* PCI DRIVER API */
+
+#ifndef pci_set_dma_mask
+#define pci_set_dma_mask _kc_pci_set_dma_mask
+extern int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask);
+#endif
+
+#ifndef pci_request_regions
+#define pci_request_regions _kc_pci_request_regions
+extern int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name);
+#endif
+
+#ifndef pci_release_regions
+#define pci_release_regions _kc_pci_release_regions
+extern void _kc_pci_release_regions(struct pci_dev *pdev);
+#endif
+
+/**************************************/
+/* NETWORK DRIVER API */
+
+#ifndef alloc_etherdev
+#define alloc_etherdev _kc_alloc_etherdev
+extern struct net_device * _kc_alloc_etherdev(int sizeof_priv);
+#endif
+
+#ifndef is_valid_ether_addr
+#define is_valid_ether_addr _kc_is_valid_ether_addr
+extern int _kc_is_valid_ether_addr(u8 *addr);
+#endif
+
+/**************************************/
+/* MISCELLANEOUS */
+
+#ifndef INIT_TQUEUE
+#define INIT_TQUEUE(_tq, _routine, _data) \
+ do { \
+ INIT_LIST_HEAD(&(_tq)->list); \
+ (_tq)->sync = 0; \
+ (_tq)->routine = _routine; \
+ (_tq)->data = _data; \
+ } while (0)
+#endif
+
+#endif /* 2.4.3 => 2.4.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) )
+/* Generic MII registers. */
+#define MII_BMCR 0x00 /* Basic mode control register */
+#define MII_BMSR 0x01 /* Basic mode status register */
+#define MII_PHYSID1 0x02 /* PHYS ID 1 */
+#define MII_PHYSID2 0x03 /* PHYS ID 2 */
+#define MII_ADVERTISE 0x04 /* Advertisement control reg */
+#define MII_LPA 0x05 /* Link partner ability reg */
+#define MII_EXPANSION 0x06 /* Expansion register */
+/* Basic mode control register. */
+#define BMCR_FULLDPLX 0x0100 /* Full duplex */
+#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */
+/* Basic mode status register. */
+#define BMSR_ERCAP 0x0001 /* Ext-reg capability */
+#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */
+#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */
+#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */
+#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */
+#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */
+/* Advertisement control register. */
+#define ADVERTISE_CSMA 0x0001 /* Only selector supported */
+#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */
+#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
+#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
+#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
+#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \
+ ADVERTISE_100HALF | ADVERTISE_100FULL)
+/* Expansion register for auto-negotiation. */
+#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */
+#endif
+
+/*****************************************************************************/
+/* 2.4.6 => 2.4.3 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
+
+#ifndef pci_set_power_state
+#define pci_set_power_state _kc_pci_set_power_state
+extern int _kc_pci_set_power_state(struct pci_dev *dev, int state);
+#endif
+
+#ifndef pci_enable_wake
+#define pci_enable_wake _kc_pci_enable_wake
+extern int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable);
+#endif
+
+#ifndef pci_disable_device
+#define pci_disable_device _kc_pci_disable_device
+extern void _kc_pci_disable_device(struct pci_dev *pdev);
+#endif
+
+/* PCI PM entry point syntax changed, so don't support suspend/resume */
+#undef CONFIG_PM
+
+#endif /* 2.4.6 => 2.4.3 */
+
+#ifndef HAVE_PCI_SET_MWI
+#define pci_set_mwi(X) pci_write_config_word(X, \
+ PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \
+ PCI_COMMAND_INVALIDATE);
+#define pci_clear_mwi(X) pci_write_config_word(X, \
+ PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \
+ ~PCI_COMMAND_INVALIDATE);
+#endif
+
+/*****************************************************************************/
+/* 2.4.10 => 2.4.9 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) )
+
+/**************************************/
+/* MODULE API */
+
+#ifndef MODULE_LICENSE
+ #define MODULE_LICENSE(X)
+#endif
+
+/**************************************/
+/* OTHER */
+
+#undef min
+#define min(x,y) ({ \
+ const typeof(x) _x = (x); \
+ const typeof(y) _y = (y); \
+ (void) (&_x == &_y); \
+ _x < _y ? _x : _y; })
+
+#undef max
+#define max(x,y) ({ \
+ const typeof(x) _x = (x); \
+ const typeof(y) _y = (y); \
+ (void) (&_x == &_y); \
+ _x > _y ? _x : _y; })
+
+#define min_t(type,x,y) ({ \
+ type _x = (x); \
+ type _y = (y); \
+ _x < _y ? _x : _y; })
+
+#define max_t(type,x,y) ({ \
+ type _x = (x); \
+ type _y = (y); \
+ _x > _y ? _x : _y; })
+
+#ifndef list_for_each_safe
+#define list_for_each_safe(pos, n, head) \
+ for (pos = (head)->next, n = pos->next; pos != (head); \
+ pos = n, n = pos->next)
+#endif
+
+#ifndef ____cacheline_aligned_in_smp
+#ifdef CONFIG_SMP
+#define ____cacheline_aligned_in_smp ____cacheline_aligned
+#else
+#define ____cacheline_aligned_in_smp
+#endif /* CONFIG_SMP */
+#endif
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) )
+extern int _kc_snprintf(char * buf, size_t size, const char *fmt, ...);
+#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args)
+extern int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
+#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args)
+#else /* 2.4.8 => 2.4.9 */
+extern int snprintf(char * buf, size_t size, const char *fmt, ...);
+extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
+#endif
+#endif /* 2.4.10 -> 2.4.6 */
+
+
+/*****************************************************************************/
+/* 2.4.12 => 2.4.10 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,12) )
+#ifndef HAVE_NETIF_MSG
+#define HAVE_NETIF_MSG 1
+enum {
+ NETIF_MSG_DRV = 0x0001,
+ NETIF_MSG_PROBE = 0x0002,
+ NETIF_MSG_LINK = 0x0004,
+ NETIF_MSG_TIMER = 0x0008,
+ NETIF_MSG_IFDOWN = 0x0010,
+ NETIF_MSG_IFUP = 0x0020,
+ NETIF_MSG_RX_ERR = 0x0040,
+ NETIF_MSG_TX_ERR = 0x0080,
+ NETIF_MSG_TX_QUEUED = 0x0100,
+ NETIF_MSG_INTR = 0x0200,
+ NETIF_MSG_TX_DONE = 0x0400,
+ NETIF_MSG_RX_STATUS = 0x0800,
+ NETIF_MSG_PKTDATA = 0x1000,
+ NETIF_MSG_HW = 0x2000,
+ NETIF_MSG_WOL = 0x4000,
+};
+
+#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
+#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
+#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
+#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
+#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
+#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
+#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
+#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
+#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
+#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
+#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
+#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
+#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
+#endif /* !HAVE_NETIF_MSG */
+#endif /* 2.4.12 => 2.4.10 */
+
+/*****************************************************************************/
+/* 2.4.13 => 2.4.12 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
+
+/**************************************/
+/* PCI DMA MAPPING */
+
+#ifndef virt_to_page
+ #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT))
+#endif
+
+#ifndef pci_map_page
+#define pci_map_page _kc_pci_map_page
+extern u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction);
+#endif
+
+#ifndef pci_unmap_page
+#define pci_unmap_page _kc_pci_unmap_page
+extern void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction);
+#endif
+
+/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */
+
+#undef DMA_32BIT_MASK
+#define DMA_32BIT_MASK 0xffffffff
+#undef DMA_64BIT_MASK
+#define DMA_64BIT_MASK 0xffffffff
+
+/**************************************/
+/* OTHER */
+
+#ifndef cpu_relax
+#define cpu_relax() rep_nop()
+#endif
+
+struct vlan_ethhdr {
+ unsigned char h_dest[ETH_ALEN];
+ unsigned char h_source[ETH_ALEN];
+ unsigned short h_vlan_proto;
+ unsigned short h_vlan_TCI;
+ unsigned short h_vlan_encapsulated_proto;
+};
+#endif /* 2.4.13 => 2.4.12 */
+
+/*****************************************************************************/
+/* 2.4.17 => 2.4.12 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) )
+
+#ifndef __devexit_p
+ #define __devexit_p(x) &(x)
+#endif
+
+#endif /* 2.4.17 => 2.4.13 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) )
+#define NETIF_MSG_HW 0x2000
+#define NETIF_MSG_WOL 0x4000
+
+#ifndef netif_msg_hw
+#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
+#endif
+#ifndef netif_msg_wol
+#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
+#endif
+#endif /* 2.4.18 */
+
+/*****************************************************************************/
+
+/*****************************************************************************/
+/* 2.4.20 => 2.4.19 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) )
+
+/* we won't support NAPI on less than 2.4.20 */
+#ifdef NAPI
+#undef NAPI
+#undef CONFIG_IXGBE_NAPI
+#endif
+
+#endif /* 2.4.20 => 2.4.19 */
+
+/*****************************************************************************/
+/* 2.4.22 => 2.4.17 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
+#define pci_name(x) ((x)->slot_name)
+#endif
+
+/*****************************************************************************/
+/* 2.4.22 => 2.4.17 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
+#ifndef IXGBE_NO_LRO
+/* Don't enable LRO for these legacy kernels */
+#define IXGBE_NO_LRO
+#endif
+#endif
+
+/*****************************************************************************/
+/*****************************************************************************/
+/* 2.4.23 => 2.4.22 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) )
+/*****************************************************************************/
+#ifdef NAPI
+#ifndef netif_poll_disable
+#define netif_poll_disable(x) _kc_netif_poll_disable(x)
+static inline void _kc_netif_poll_disable(struct net_device *netdev)
+{
+ while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) {
+ /* No hurry */
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(1);
+ }
+}
+#endif
+#ifndef netif_poll_enable
+#define netif_poll_enable(x) _kc_netif_poll_enable(x)
+static inline void _kc_netif_poll_enable(struct net_device *netdev)
+{
+ clear_bit(__LINK_STATE_RX_SCHED, &netdev->state);
+}
+#endif
+#endif /* NAPI */
+#ifndef netif_tx_disable
+#define netif_tx_disable(x) _kc_netif_tx_disable(x)
+static inline void _kc_netif_tx_disable(struct net_device *dev)
+{
+ spin_lock_bh(&dev->xmit_lock);
+ netif_stop_queue(dev);
+ spin_unlock_bh(&dev->xmit_lock);
+}
+#endif
+#else /* 2.4.23 => 2.4.22 */
+#define HAVE_SCTP
+#endif /* 2.4.23 => 2.4.22 */
+
+/*****************************************************************************/
+/* 2.6.4 => 2.6.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \
+ ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
+ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) )
+#define ETHTOOL_OPS_COMPAT
+#endif /* 2.6.4 => 2.6.0 */
+
+/*****************************************************************************/
+/* 2.5.71 => 2.4.x */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) )
+#define sk_protocol protocol
+#define pci_get_device pci_find_device
+#endif /* 2.5.70 => 2.4.x */
+
+/*****************************************************************************/
+/* < 2.4.27 or 2.6.0 <= 2.6.5 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \
+ ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
+ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) )
+
+#ifndef netif_msg_init
+#define netif_msg_init _kc_netif_msg_init
+static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits)
+{
+ /* use default */
+ if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
+ return default_msg_enable_bits;
+ if (debug_value == 0) /* no output */
+ return 0;
+ /* set low N bits */
+ return (1 << debug_value) -1;
+}
+#endif
+
+#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */
+/*****************************************************************************/
+#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \
+ (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \
+ ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )))
+#define netdev_priv(x) x->priv
+#endif
+
+/*****************************************************************************/
+/* <= 2.5.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) )
+#include <linux/rtnetlink.h>
+#undef pci_register_driver
+#define pci_register_driver pci_module_init
+
+/*
+ * Most of the dma compat code is copied/modifed from the 2.4.37
+ * /include/linux/libata-compat.h header file
+ */
+/* These definitions mirror those in pci.h, so they can be used
+ * interchangeably with their PCI_ counterparts */
+enum dma_data_direction {
+ DMA_BIDIRECTIONAL = 0,
+ DMA_TO_DEVICE = 1,
+ DMA_FROM_DEVICE = 2,
+ DMA_NONE = 3,
+};
+
+struct device {
+ struct pci_dev pdev;
+};
+
+static inline struct pci_dev *to_pci_dev (struct device *dev)
+{
+ return (struct pci_dev *) dev;
+}
+static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
+{
+ return (struct device *) pdev;
+}
+
+#define pdev_printk(lvl, pdev, fmt, args...) \
+ printk("%s %s: " fmt, lvl, pci_name(pdev), ## args)
+#define dev_err(dev, fmt, args...) \
+ pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args)
+#define dev_info(dev, fmt, args...) \
+ pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args)
+#define dev_warn(dev, fmt, args...) \
+ pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args)
+
+/* NOTE: dangerous! we ignore the 'gfp' argument */
+#define dma_alloc_coherent(dev,sz,dma,gfp) \
+ pci_alloc_consistent(to_pci_dev(dev),(sz),(dma))
+#define dma_free_coherent(dev,sz,addr,dma_addr) \
+ pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr))
+
+#define dma_map_page(dev,a,b,c,d) \
+ pci_map_page(to_pci_dev(dev),(a),(b),(c),(d))
+#define dma_unmap_page(dev,a,b,c) \
+ pci_unmap_page(to_pci_dev(dev),(a),(b),(c))
+
+#define dma_map_single(dev,a,b,c) \
+ pci_map_single(to_pci_dev(dev),(a),(b),(c))
+#define dma_unmap_single(dev,a,b,c) \
+ pci_unmap_single(to_pci_dev(dev),(a),(b),(c))
+
+#define dma_sync_single(dev,a,b,c) \
+ pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c))
+
+/* for range just sync everything, that's all the pci API can do */
+#define dma_sync_single_range(dev,addr,off,sz,dir) \
+ pci_dma_sync_single(to_pci_dev(dev),(addr),(off)+(sz),(dir))
+
+#define dma_set_mask(dev,mask) \
+ pci_set_dma_mask(to_pci_dev(dev),(mask))
+
+/* hlist_* code - double linked lists */
+struct hlist_head {
+ struct hlist_node *first;
+};
+
+struct hlist_node {
+ struct hlist_node *next, **pprev;
+};
+
+static inline void __hlist_del(struct hlist_node *n)
+{
+ struct hlist_node *next = n->next;
+ struct hlist_node **pprev = n->pprev;
+ *pprev = next;
+ if (next)
+ next->pprev = pprev;
+}
+
+static inline void hlist_del(struct hlist_node *n)
+{
+ __hlist_del(n);
+ n->next = NULL;
+ n->pprev = NULL;
+}
+
+static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
+{
+ struct hlist_node *first = h->first;
+ n->next = first;
+ if (first)
+ first->pprev = &n->next;
+ h->first = n;
+ n->pprev = &h->first;
+}
+
+static inline int hlist_empty(const struct hlist_head *h)
+{
+ return !h->first;
+}
+#define HLIST_HEAD_INIT { .first = NULL }
+#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
+#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
+static inline void INIT_HLIST_NODE(struct hlist_node *h)
+{
+ h->next = NULL;
+ h->pprev = NULL;
+}
+#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
+
+#define hlist_for_each_entry(tpos, pos, head, member) \
+ for (pos = (head)->first; \
+ pos && ({ prefetch(pos->next); 1;}) && \
+ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+ pos = pos->next)
+
+#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
+ for (pos = (head)->first; \
+ pos && ({ n = pos->next; 1; }) && \
+ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+ pos = n)
+
+#ifndef might_sleep
+#define might_sleep()
+#endif
+#else
+static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
+{
+ return &pdev->dev;
+}
+#endif /* <= 2.5.0 */
+
+/*****************************************************************************/
+/* 2.5.28 => 2.4.23 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
+
+static inline void _kc_synchronize_irq(void)
+{
+ synchronize_irq();
+}
+#undef synchronize_irq
+#define synchronize_irq(X) _kc_synchronize_irq()
+
+#include <linux/tqueue.h>
+#define work_struct tq_struct
+#undef INIT_WORK
+#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a)
+#undef container_of
+#define container_of list_entry
+#define schedule_work schedule_task
+#define flush_scheduled_work flush_scheduled_tasks
+#define cancel_work_sync(x) flush_scheduled_work()
+
+#endif /* 2.5.28 => 2.4.17 */
+
+/*****************************************************************************/
+/* 2.6.0 => 2.5.28 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+#undef get_cpu
+#define get_cpu() smp_processor_id()
+#undef put_cpu
+#define put_cpu() do { } while(0)
+#define MODULE_INFO(version, _version)
+#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
+#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1
+#endif
+#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1
+
+#define dma_set_coherent_mask(dev,mask) 1
+
+#undef dev_put
+#define dev_put(dev) __dev_put(dev)
+
+#ifndef skb_fill_page_desc
+#define skb_fill_page_desc _kc_skb_fill_page_desc
+extern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size);
+#endif
+
+#undef ALIGN
+#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
+
+#ifndef page_count
+#define page_count(p) atomic_read(&(p)->count)
+#endif
+
+#ifdef MAX_NUMNODES
+#undef MAX_NUMNODES
+#endif
+#define MAX_NUMNODES 1
+
+/* find_first_bit and find_next bit are not defined for most
+ * 2.4 kernels (except for the redhat 2.4.21 kernels
+ */
+#include <linux/bitops.h>
+#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
+#undef find_next_bit
+#define find_next_bit _kc_find_next_bit
+extern unsigned long _kc_find_next_bit(const unsigned long *addr,
+ unsigned long size,
+ unsigned long offset);
+#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
+
+
+#ifndef netdev_name
+static inline const char *_kc_netdev_name(const struct net_device *dev)
+{
+ if (strchr(dev->name, '%'))
+ return "(unregistered net_device)";
+ return dev->name;
+}
+#define netdev_name(netdev) _kc_netdev_name(netdev)
+#endif /* netdev_name */
+
+#ifndef strlcpy
+#define strlcpy _kc_strlcpy
+extern size_t _kc_strlcpy(char *dest, const char *src, size_t size);
+#endif /* strlcpy */
+
+#endif /* 2.6.0 => 2.5.28 */
+
+/*****************************************************************************/
+/* 2.6.4 => 2.6.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
+#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
+#endif /* 2.6.4 => 2.6.0 */
+
+/*****************************************************************************/
+/* 2.6.5 => 2.6.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) )
+#define dma_sync_single_for_cpu dma_sync_single
+#define dma_sync_single_for_device dma_sync_single
+#define dma_sync_single_range_for_cpu dma_sync_single_range
+#define dma_sync_single_range_for_device dma_sync_single_range
+#ifndef pci_dma_mapping_error
+#define pci_dma_mapping_error _kc_pci_dma_mapping_error
+static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr)
+{
+ return dma_addr == 0;
+}
+#endif
+#endif /* 2.6.5 => 2.6.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
+extern int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...);
+#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args)
+#endif /* < 2.6.4 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) )
+/* taken from 2.6 include/linux/bitmap.h */
+#undef bitmap_zero
+#define bitmap_zero _kc_bitmap_zero
+static inline void _kc_bitmap_zero(unsigned long *dst, int nbits)
+{
+ if (nbits <= BITS_PER_LONG)
+ *dst = 0UL;
+ else {
+ int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ memset(dst, 0, len);
+ }
+}
+#define random_ether_addr _kc_random_ether_addr
+static inline void _kc_random_ether_addr(u8 *addr)
+{
+ get_random_bytes(addr, ETH_ALEN);
+ addr[0] &= 0xfe; /* clear multicast */
+ addr[0] |= 0x02; /* set local assignment */
+}
+#define page_to_nid(x) 0
+
+#endif /* < 2.6.6 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) )
+#undef if_mii
+#define if_mii _kc_if_mii
+static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq)
+{
+ return (struct mii_ioctl_data *) &rq->ifr_ifru;
+}
+
+#ifndef __force
+#define __force
+#endif
+#endif /* < 2.6.7 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
+#ifndef PCI_EXP_DEVCTL
+#define PCI_EXP_DEVCTL 8
+#endif
+#ifndef PCI_EXP_DEVCTL_CERE
+#define PCI_EXP_DEVCTL_CERE 0x0001
+#endif
+#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \
+ schedule_timeout((x * HZ)/1000 + 2); \
+ } while (0)
+
+#endif /* < 2.6.8 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9))
+#include <net/dsfield.h>
+#define __iomem
+
+#ifndef kcalloc
+#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags)
+extern void *_kc_kzalloc(size_t size, int flags);
+#endif
+#define MSEC_PER_SEC 1000L
+static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j)
+{
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+ return (MSEC_PER_SEC / HZ) * j;
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+ return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
+#else
+ return (j * MSEC_PER_SEC) / HZ;
+#endif
+}
+static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m)
+{
+ if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET))
+ return MAX_JIFFY_OFFSET;
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+ return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+ return m * (HZ / MSEC_PER_SEC);
+#else
+ return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
+#endif
+}
+
+#define msleep_interruptible _kc_msleep_interruptible
+static inline unsigned long _kc_msleep_interruptible(unsigned int msecs)
+{
+ unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1;
+
+ while (timeout && !signal_pending(current)) {
+ __set_current_state(TASK_INTERRUPTIBLE);
+ timeout = schedule_timeout(timeout);
+ }
+ return _kc_jiffies_to_msecs(timeout);
+}
+
+/* Basic mode control register. */
+#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */
+
+#ifndef __le16
+#define __le16 u16
+#endif
+#ifndef __le32
+#define __le32 u32
+#endif
+#ifndef __le64
+#define __le64 u64
+#endif
+#ifndef __be16
+#define __be16 u16
+#endif
+#ifndef __be32
+#define __be32 u32
+#endif
+#ifndef __be64
+#define __be64 u64
+#endif
+
+static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
+{
+ return (struct vlan_ethhdr *)skb->mac.raw;
+}
+
+/* Wake-On-Lan options. */
+#define WAKE_PHY (1 << 0)
+#define WAKE_UCAST (1 << 1)
+#define WAKE_MCAST (1 << 2)
+#define WAKE_BCAST (1 << 3)
+#define WAKE_ARP (1 << 4)
+#define WAKE_MAGIC (1 << 5)
+#define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */
+
+#define skb_header_pointer _kc_skb_header_pointer
+static inline void *_kc_skb_header_pointer(const struct sk_buff *skb,
+ int offset, int len, void *buffer)
+{
+ int hlen = skb_headlen(skb);
+
+ if (hlen - offset >= len)
+ return skb->data + offset;
+
+#ifdef MAX_SKB_FRAGS
+ if (skb_copy_bits(skb, offset, buffer, len) < 0)
+ return NULL;
+
+ return buffer;
+#else
+ return NULL;
+#endif
+
+#ifndef NETDEV_TX_OK
+#define NETDEV_TX_OK 0
+#endif
+#ifndef NETDEV_TX_BUSY
+#define NETDEV_TX_BUSY 1
+#endif
+#ifndef NETDEV_TX_LOCKED
+#define NETDEV_TX_LOCKED -1
+#endif
+}
+
+#ifndef __bitwise
+#define __bitwise
+#endif
+#endif /* < 2.6.9 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
+#ifdef module_param_array_named
+#undef module_param_array_named
+#define module_param_array_named(name, array, type, nump, perm) \
+ static struct kparam_array __param_arr_##name \
+ = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \
+ sizeof(array[0]), array }; \
+ module_param_call(name, param_array_set, param_array_get, \
+ &__param_arr_##name, perm)
+#endif /* module_param_array_named */
+/*
+ * num_online is broken for all < 2.6.10 kernels. This is needed to support
+ * Node module parameter of ixgbe.
+ */
+#undef num_online_nodes
+#define num_online_nodes(n) 1
+extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES);
+#undef node_online_map
+#define node_online_map _kcompat_node_online_map
+#endif /* < 2.6.10 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) )
+#define PCI_D0 0
+#define PCI_D1 1
+#define PCI_D2 2
+#define PCI_D3hot 3
+#define PCI_D3cold 4
+typedef int pci_power_t;
+#define pci_choose_state(pdev,state) state
+#define PMSG_SUSPEND 3
+#define PCI_EXP_LNKCTL 16
+
+#undef NETIF_F_LLTX
+
+#ifndef ARCH_HAS_PREFETCH
+#define prefetch(X)
+#endif
+
+#ifndef NET_IP_ALIGN
+#define NET_IP_ALIGN 2
+#endif
+
+#define KC_USEC_PER_SEC 1000000L
+#define usecs_to_jiffies _kc_usecs_to_jiffies
+static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j)
+{
+#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
+ return (KC_USEC_PER_SEC / HZ) * j;
+#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
+ return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC);
+#else
+ return (j * KC_USEC_PER_SEC) / HZ;
+#endif
+}
+static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m)
+{
+ if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET))
+ return MAX_JIFFY_OFFSET;
+#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
+ return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ);
+#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
+ return m * (HZ / KC_USEC_PER_SEC);
+#else
+ return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC;
+#endif
+}
+#endif /* < 2.6.11 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) )
+#include <linux/reboot.h>
+#define USE_REBOOT_NOTIFIER
+
+/* Generic MII registers. */
+#define MII_CTRL1000 0x09 /* 1000BASE-T control */
+#define MII_STAT1000 0x0a /* 1000BASE-T status */
+/* Advertisement control register. */
+#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */
+#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */
+/* 1000BASE-T Control register */
+#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */
+#ifndef is_zero_ether_addr
+#define is_zero_ether_addr _kc_is_zero_ether_addr
+static inline int _kc_is_zero_ether_addr(const u8 *addr)
+{
+ return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
+}
+#endif /* is_zero_ether_addr */
+#ifndef is_multicast_ether_addr
+#define is_multicast_ether_addr _kc_is_multicast_ether_addr
+static inline int _kc_is_multicast_ether_addr(const u8 *addr)
+{
+ return addr[0] & 0x01;
+}
+#endif /* is_multicast_ether_addr */
+#endif /* < 2.6.12 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) )
+#ifndef kstrdup
+#define kstrdup _kc_kstrdup
+extern char *_kc_kstrdup(const char *s, unsigned int gfp);
+#endif
+#endif /* < 2.6.13 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
+#define pm_message_t u32
+#ifndef kzalloc
+#define kzalloc _kc_kzalloc
+extern void *_kc_kzalloc(size_t size, int flags);
+#endif
+
+/* Generic MII registers. */
+#define MII_ESTATUS 0x0f /* Extended Status */
+/* Basic mode status register. */
+#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */
+/* Extended status register. */
+#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */
+#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */
+
+#define ADVERTISED_Pause (1 << 13)
+#define ADVERTISED_Asym_Pause (1 << 14)
+
+#if (!(RHEL_RELEASE_CODE && \
+ (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))))
+#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t))
+#define gfp_t unsigned
+#else
+typedef unsigned gfp_t;
+#endif
+#endif /* !RHEL4.3->RHEL5.0 */
+
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) )
+#ifdef CONFIG_X86_64
+#define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \
+ dma_sync_single_for_cpu(dev, dma_handle, size, dir)
+#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
+ dma_sync_single_for_device(dev, dma_handle, size, dir)
+#endif
+#endif
+#endif /* < 2.6.14 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) )
+#ifndef vmalloc_node
+#define vmalloc_node(a,b) vmalloc(a)
+#endif /* vmalloc_node*/
+
+#define setup_timer(_timer, _function, _data) \
+do { \
+ (_timer)->function = _function; \
+ (_timer)->data = _data; \
+ init_timer(_timer); \
+} while (0)
+#ifndef device_can_wakeup
+#define device_can_wakeup(dev) (1)
+#endif
+#ifndef device_set_wakeup_enable
+#define device_set_wakeup_enable(dev, val) do{}while(0)
+#endif
+#ifndef device_init_wakeup
+#define device_init_wakeup(dev,val) do {} while (0)
+#endif
+static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2)
+{
+ const u16 *a = (const u16 *) addr1;
+ const u16 *b = (const u16 *) addr2;
+
+ return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
+}
+#undef compare_ether_addr
+#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2)
+#endif /* < 2.6.15 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) )
+#undef DEFINE_MUTEX
+#define DEFINE_MUTEX(x) DECLARE_MUTEX(x)
+#define mutex_lock(x) down_interruptible(x)
+#define mutex_unlock(x) up(x)
+
+#ifndef ____cacheline_internodealigned_in_smp
+#ifdef CONFIG_SMP
+#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp
+#else
+#define ____cacheline_internodealigned_in_smp
+#endif /* CONFIG_SMP */
+#endif /* ____cacheline_internodealigned_in_smp */
+#undef HAVE_PCI_ERS
+#else /* 2.6.16 and above */
+#undef HAVE_PCI_ERS
+#define HAVE_PCI_ERS
+#endif /* < 2.6.16 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) )
+#ifndef first_online_node
+#define first_online_node 0
+#endif
+#ifndef NET_SKB_PAD
+#define NET_SKB_PAD 16
+#endif
+#endif /* < 2.6.17 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) )
+
+#ifndef IRQ_HANDLED
+#define irqreturn_t void
+#define IRQ_HANDLED
+#define IRQ_NONE
+#endif
+
+#ifndef IRQF_PROBE_SHARED
+#ifdef SA_PROBEIRQ
+#define IRQF_PROBE_SHARED SA_PROBEIRQ
+#else
+#define IRQF_PROBE_SHARED 0
+#endif
+#endif
+
+#ifndef IRQF_SHARED
+#define IRQF_SHARED SA_SHIRQ
+#endif
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+#ifndef FIELD_SIZEOF
+#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
+#endif
+
+#ifndef skb_is_gso
+#ifdef NETIF_F_TSO
+#define skb_is_gso _kc_skb_is_gso
+static inline int _kc_skb_is_gso(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->gso_size;
+}
+#else
+#define skb_is_gso(a) 0
+#endif
+#endif
+
+#ifndef resource_size_t
+#define resource_size_t unsigned long
+#endif
+
+#ifdef skb_pad
+#undef skb_pad
+#endif
+#define skb_pad(x,y) _kc_skb_pad(x, y)
+int _kc_skb_pad(struct sk_buff *skb, int pad);
+#ifdef skb_padto
+#undef skb_padto
+#endif
+#define skb_padto(x,y) _kc_skb_padto(x, y)
+static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len)
+{
+ unsigned int size = skb->len;
+ if(likely(size >= len))
+ return 0;
+ return _kc_skb_pad(skb, len - size);
+}
+
+#ifndef DECLARE_PCI_UNMAP_ADDR
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
+ dma_addr_t ADDR_NAME
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
+ u32 LEN_NAME
+#define pci_unmap_addr(PTR, ADDR_NAME) \
+ ((PTR)->ADDR_NAME)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
+ (((PTR)->ADDR_NAME) = (VAL))
+#define pci_unmap_len(PTR, LEN_NAME) \
+ ((PTR)->LEN_NAME)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
+ (((PTR)->LEN_NAME) = (VAL))
+#endif /* DECLARE_PCI_UNMAP_ADDR */
+#endif /* < 2.6.18 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
+
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+#endif
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) )
+#if (!((RHEL_RELEASE_CODE && \
+ ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \
+ (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0)))) || \
+ (AX_RELEASE_CODE && AX_RELEASE_CODE > AX_RELEASE_VERSION(3,0))))
+typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *);
+#endif
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
+#undef CONFIG_INET_LRO
+#undef CONFIG_INET_LRO_MODULE
+#undef CONFIG_FCOE
+#undef CONFIG_FCOE_MODULE
+#endif
+typedef irqreturn_t (*new_handler_t)(int, void*);
+static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
+#else /* 2.4.x */
+typedef void (*irq_handler_t)(int, void*, struct pt_regs *);
+typedef void (*new_handler_t)(int, void*);
+static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
+#endif /* >= 2.5.x */
+{
+ irq_handler_t new_handler = (irq_handler_t) handler;
+ return request_irq(irq, new_handler, flags, devname, dev_id);
+}
+
+#undef request_irq
+#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id))
+
+#define irq_handler_t new_handler_t
+/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))
+#define PCIE_CONFIG_SPACE_LEN 256
+#define PCI_CONFIG_SPACE_LEN 64
+#define PCIE_LINK_STATUS 0x12
+#define pci_config_space_ich8lan() do {} while(0)
+#undef pci_save_state
+extern int _kc_pci_save_state(struct pci_dev *);
+#define pci_save_state(pdev) _kc_pci_save_state(pdev)
+#undef pci_restore_state
+extern void _kc_pci_restore_state(struct pci_dev *);
+#define pci_restore_state(pdev) _kc_pci_restore_state(pdev)
+#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */
+
+#ifdef HAVE_PCI_ERS
+#undef free_netdev
+extern void _kc_free_netdev(struct net_device *);
+#define free_netdev(netdev) _kc_free_netdev(netdev)
+#endif
+static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
+{
+ return 0;
+}
+#define pci_disable_pcie_error_reporting(dev) do {} while (0)
+#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0)
+
+extern void *_kc_kmemdup(const void *src, size_t len, unsigned gfp);
+#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp)
+#ifndef bool
+#define bool _Bool
+#define true 1
+#define false 0
+#endif
+#else /* 2.6.19 */
+#include <linux/aer.h>
+#include <linux/string.h>
+#endif /* < 2.6.19 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) )
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) )
+#undef INIT_WORK
+#define INIT_WORK(_work, _func) \
+do { \
+ INIT_LIST_HEAD(&(_work)->entry); \
+ (_work)->pending = 0; \
+ (_work)->func = (void (*)(void *))_func; \
+ (_work)->data = _work; \
+ init_timer(&(_work)->timer); \
+} while (0)
+#endif
+
+#ifndef PCI_VDEVICE
+#define PCI_VDEVICE(ven, dev) \
+ PCI_VENDOR_ID_##ven, (dev), \
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0
+#endif
+
+#ifndef round_jiffies
+#define round_jiffies(x) x
+#endif
+
+#define csum_offset csum
+
+#define HAVE_EARLY_VMALLOC_NODE
+#define dev_to_node(dev) -1
+#undef set_dev_node
+/* remove compiler warning with b=b, for unused variable */
+#define set_dev_node(a, b) do { (b) = (b); } while(0)
+
+#if (!(RHEL_RELEASE_CODE && \
+ (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \
+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \
+ !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0)))
+typedef __u16 __bitwise __sum16;
+typedef __u32 __bitwise __wsum;
+#endif
+
+#if (!(RHEL_RELEASE_CODE && \
+ (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \
+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \
+ !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0)))
+static inline __wsum csum_unfold(__sum16 n)
+{
+ return (__force __wsum)n;
+}
+#endif
+
+#else /* < 2.6.20 */
+#define HAVE_DEVICE_NUMA_NODE
+#endif /* < 2.6.20 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
+#define to_net_dev(class) container_of(class, struct net_device, class_dev)
+#define NETDEV_CLASS_DEV
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)))
+#define vlan_group_get_device(vg, id) (vg->vlan_devices[id])
+#define vlan_group_set_device(vg, id, dev) \
+ do { \
+ if (vg) vg->vlan_devices[id] = dev; \
+ } while (0)
+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */
+#define pci_channel_offline(pdev) (pdev->error_state && \
+ pdev->error_state != pci_channel_io_normal)
+#define pci_request_selected_regions(pdev, bars, name) \
+ pci_request_regions(pdev, name)
+#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev);
+#endif /* < 2.6.21 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
+#define tcp_hdr(skb) (skb->h.th)
+#define tcp_hdrlen(skb) (skb->h.th->doff << 2)
+#define skb_transport_offset(skb) (skb->h.raw - skb->data)
+#define skb_transport_header(skb) (skb->h.raw)
+#define ipv6_hdr(skb) (skb->nh.ipv6h)
+#define ip_hdr(skb) (skb->nh.iph)
+#define skb_network_offset(skb) (skb->nh.raw - skb->data)
+#define skb_network_header(skb) (skb->nh.raw)
+#define skb_tail_pointer(skb) skb->tail
+#define skb_reset_tail_pointer(skb) \
+ do { \
+ skb->tail = skb->data; \
+ } while (0)
+#define skb_copy_to_linear_data(skb, from, len) \
+ memcpy(skb->data, from, len)
+#define skb_copy_to_linear_data_offset(skb, offset, from, len) \
+ memcpy(skb->data + offset, from, len)
+#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw)
+#define pci_register_driver pci_module_init
+#define skb_mac_header(skb) skb->mac.raw
+
+#ifdef NETIF_F_MULTI_QUEUE
+#ifndef alloc_etherdev_mq
+#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a)
+#endif
+#endif /* NETIF_F_MULTI_QUEUE */
+
+#ifndef ETH_FCS_LEN
+#define ETH_FCS_LEN 4
+#endif
+#define cancel_work_sync(x) flush_scheduled_work()
+#ifndef udp_hdr
+#define udp_hdr _udp_hdr
+static inline struct udphdr *_udp_hdr(const struct sk_buff *skb)
+{
+ return (struct udphdr *)skb_transport_header(skb);
+}
+#endif
+
+#ifdef cpu_to_be16
+#undef cpu_to_be16
+#endif
+#define cpu_to_be16(x) __constant_htons(x)
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)))
+enum {
+ DUMP_PREFIX_NONE,
+ DUMP_PREFIX_ADDRESS,
+ DUMP_PREFIX_OFFSET
+};
+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */
+#ifndef hex_asc
+#define hex_asc(x) "0123456789abcdef"[x]
+#endif
+#include <linux/ctype.h>
+extern void _kc_print_hex_dump(const char *level, const char *prefix_str,
+ int prefix_type, int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii);
+#define print_hex_dump(lvl, s, t, r, g, b, l, a) \
+ _kc_print_hex_dump(lvl, s, t, r, g, b, l, a)
+#else /* 2.6.22 */
+#define ETH_TYPE_TRANS_SETS_DEV
+#define HAVE_NETDEV_STATS_IN_NETDEV
+#endif /* < 2.6.22 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) )
+#endif /* > 2.6.22 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) )
+#define netif_subqueue_stopped(_a, _b) 0
+#ifndef PTR_ALIGN
+#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
+#endif
+
+#ifndef CONFIG_PM_SLEEP
+#define CONFIG_PM_SLEEP CONFIG_PM
+#endif
+
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) )
+#define HAVE_ETHTOOL_GET_PERM_ADDR
+#endif /* 2.6.14 through 2.6.22 */
+#endif /* < 2.6.23 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
+#ifndef ETH_FLAG_LRO
+#define ETH_FLAG_LRO NETIF_F_LRO
+#endif
+
+/* if GRO is supported then the napi struct must already exist */
+#ifndef NETIF_F_GRO
+/* NAPI API changes in 2.6.24 break everything */
+struct napi_struct {
+ /* used to look up the real NAPI polling routine */
+ int (*poll)(struct napi_struct *, int);
+ struct net_device *dev;
+ int weight;
+};
+#endif
+
+#ifdef NAPI
+extern int __kc_adapter_clean(struct net_device *, int *);
+extern struct net_device *napi_to_poll_dev(struct napi_struct *napi);
+#define netif_napi_add(_netdev, _napi, _poll, _weight) \
+ do { \
+ struct napi_struct *__napi = (_napi); \
+ struct net_device *poll_dev = napi_to_poll_dev(__napi); \
+ poll_dev->poll = &(__kc_adapter_clean); \
+ poll_dev->priv = (_napi); \
+ poll_dev->weight = (_weight); \
+ set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); \
+ set_bit(__LINK_STATE_START, &poll_dev->state);\
+ dev_hold(poll_dev); \
+ __napi->poll = &(_poll); \
+ __napi->weight = (_weight); \
+ __napi->dev = (_netdev); \
+ } while (0)
+#define netif_napi_del(_napi) \
+ do { \
+ struct net_device *poll_dev = napi_to_poll_dev(_napi); \
+ WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); \
+ dev_put(poll_dev); \
+ memset(poll_dev, 0, sizeof(struct net_device));\
+ } while (0)
+#define napi_schedule_prep(_napi) \
+ (netif_running((_napi)->dev) && netif_rx_schedule_prep(napi_to_poll_dev(_napi)))
+#define napi_schedule(_napi) \
+ do { \
+ if (napi_schedule_prep(_napi)) \
+ __netif_rx_schedule(napi_to_poll_dev(_napi)); \
+ } while (0)
+#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi))
+#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi))
+#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi))
+#ifndef NETIF_F_GRO
+#define napi_complete(_napi) netif_rx_complete(napi_to_poll_dev(_napi))
+#else
+#define napi_complete(_napi) \
+ do { \
+ napi_gro_flush(_napi); \
+ netif_rx_complete(napi_to_poll_dev(_napi)); \
+ } while (0)
+#endif /* NETIF_F_GRO */
+#else /* NAPI */
+#define netif_napi_add(_netdev, _napi, _poll, _weight) \
+ do { \
+ struct napi_struct *__napi = _napi; \
+ _netdev->poll = &(_poll); \
+ _netdev->weight = (_weight); \
+ __napi->poll = &(_poll); \
+ __napi->weight = (_weight); \
+ __napi->dev = (_netdev); \
+ } while (0)
+#define netif_napi_del(_a) do {} while (0)
+#endif /* NAPI */
+
+#undef dev_get_by_name
+#define dev_get_by_name(_a, _b) dev_get_by_name(_b)
+#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b)
+#ifndef DMA_BIT_MASK
+#define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1))
+#endif
+
+#ifdef NETIF_F_TSO6
+#define skb_is_gso_v6 _kc_skb_is_gso_v6
+static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
+}
+#endif /* NETIF_F_TSO6 */
+
+#ifndef KERN_CONT
+#define KERN_CONT ""
+#endif
+#else /* < 2.6.24 */
+#define HAVE_ETHTOOL_GET_SSET_COUNT
+#define HAVE_NETDEV_NAPI_LIST
+#endif /* < 2.6.24 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) )
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) )
+#include <linux/pm_qos_params.h>
+#else /* >= 3.2.0 */
+#include <linux/pm_qos.h>
+#endif /* else >= 3.2.0 */
+#endif /* > 2.6.24 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) )
+#define PM_QOS_CPU_DMA_LATENCY 1
+
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) )
+#include <linux/latency.h>
+#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY
+#define pm_qos_add_requirement(pm_qos_class, name, value) \
+ set_acceptable_latency(name, value)
+#define pm_qos_remove_requirement(pm_qos_class, name) \
+ remove_acceptable_latency(name)
+#define pm_qos_update_requirement(pm_qos_class, name, value) \
+ modify_acceptable_latency(name, value)
+#else
+#define PM_QOS_DEFAULT_VALUE -1
+#define pm_qos_add_requirement(pm_qos_class, name, value)
+#define pm_qos_remove_requirement(pm_qos_class, name)
+#define pm_qos_update_requirement(pm_qos_class, name, value) { \
+ if (value != PM_QOS_DEFAULT_VALUE) { \
+ printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \
+ pci_name(adapter->pdev)); \
+ } \
+}
+
+#endif /* > 2.6.18 */
+
+#define pci_enable_device_mem(pdev) pci_enable_device(pdev)
+
+#ifndef DEFINE_PCI_DEVICE_TABLE
+#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[]
+#endif /* DEFINE_PCI_DEVICE_TABLE */
+
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
+#ifndef IXGBE_PROCFS
+#define IXGBE_PROCFS
+#endif /* IXGBE_PROCFS */
+#endif /* >= 2.6.0 */
+
+
+#else /* < 2.6.25 */
+
+#ifndef IXGBE_SYSFS
+#define IXGBE_SYSFS
+#endif /* IXGBE_SYSFS */
+
+
+#endif /* < 2.6.25 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
+#ifndef clamp_t
+#define clamp_t(type, val, min, max) ({ \
+ type __val = (val); \
+ type __min = (min); \
+ type __max = (max); \
+ __val = __val < __min ? __min : __val; \
+ __val > __max ? __max : __val; })
+#endif /* clamp_t */
+#ifdef NETIF_F_TSO
+#ifdef NETIF_F_TSO6
+#define netif_set_gso_max_size(_netdev, size) \
+ do { \
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { \
+ _netdev->features &= ~NETIF_F_TSO; \
+ _netdev->features &= ~NETIF_F_TSO6; \
+ } else { \
+ _netdev->features |= NETIF_F_TSO; \
+ _netdev->features |= NETIF_F_TSO6; \
+ } \
+ } while (0)
+#else /* NETIF_F_TSO6 */
+#define netif_set_gso_max_size(_netdev, size) \
+ do { \
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
+ _netdev->features &= ~NETIF_F_TSO; \
+ else \
+ _netdev->features |= NETIF_F_TSO; \
+ } while (0)
+#endif /* NETIF_F_TSO6 */
+#else
+#define netif_set_gso_max_size(_netdev, size) do {} while (0)
+#endif /* NETIF_F_TSO */
+#undef kzalloc_node
+#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags)
+
+extern void _kc_pci_disable_link_state(struct pci_dev *dev, int state);
+#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s)
+#else /* < 2.6.26 */
+#include <linux/pci-aspm.h>
+#define HAVE_NETDEV_VLAN_FEATURES
+#endif /* < 2.6.26 */
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
+static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep,
+ __u32 speed)
+{
+ ep->speed = (__u16)speed;
+ /* ep->speed_hi = (__u16)(speed >> 16); */
+}
+#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set
+
+static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep)
+{
+ /* no speed_hi before 2.6.27, and probably no need for it yet */
+ return (__u32)ep->speed;
+}
+#define ethtool_cmd_speed _kc_ethtool_cmd_speed
+
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) )
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM))
+#define ANCIENT_PM 1
+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \
+ defined(CONFIG_PM_SLEEP))
+#define NEWER_PM 1
+#endif
+#if defined(ANCIENT_PM) || defined(NEWER_PM)
+#undef device_set_wakeup_enable
+#define device_set_wakeup_enable(dev, val) \
+ do { \
+ u16 pmc = 0; \
+ int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \
+ if (pm) { \
+ pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \
+ &pmc); \
+ } \
+ (dev)->power.can_wakeup = !!(pmc >> 11); \
+ (dev)->power.should_wakeup = (val && (pmc >> 11)); \
+ } while (0)
+#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */
+#endif /* 2.6.15 through 2.6.27 */
+#ifndef netif_napi_del
+#define netif_napi_del(_a) do {} while (0)
+#ifdef NAPI
+#ifdef CONFIG_NETPOLL
+#undef netif_napi_del
+#define netif_napi_del(_a) list_del(&(_a)->dev_list);
+#endif
+#endif
+#endif /* netif_napi_del */
+#ifdef dma_mapping_error
+#undef dma_mapping_error
+#endif
+#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr)
+
+#ifdef CONFIG_NETDEVICES_MULTIQUEUE
+#define HAVE_TX_MQ
+#endif
+
+#ifdef HAVE_TX_MQ
+extern void _kc_netif_tx_stop_all_queues(struct net_device *);
+extern void _kc_netif_tx_wake_all_queues(struct net_device *);
+extern void _kc_netif_tx_start_all_queues(struct net_device *);
+#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a)
+#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a)
+#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a)
+#undef netif_stop_subqueue
+#define netif_stop_subqueue(_ndev,_qi) do { \
+ if (netif_is_multiqueue((_ndev))) \
+ netif_stop_subqueue((_ndev), (_qi)); \
+ else \
+ netif_stop_queue((_ndev)); \
+ } while (0)
+#undef netif_start_subqueue
+#define netif_start_subqueue(_ndev,_qi) do { \
+ if (netif_is_multiqueue((_ndev))) \
+ netif_start_subqueue((_ndev), (_qi)); \
+ else \
+ netif_start_queue((_ndev)); \
+ } while (0)
+#else /* HAVE_TX_MQ */
+#define netif_tx_stop_all_queues(a) netif_stop_queue(a)
+#define netif_tx_wake_all_queues(a) netif_wake_queue(a)
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) )
+#define netif_tx_start_all_queues(a) netif_start_queue(a)
+#else
+#define netif_tx_start_all_queues(a) do {} while (0)
+#endif
+#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev))
+#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev))
+#endif /* HAVE_TX_MQ */
+#ifndef NETIF_F_MULTI_QUEUE
+#define NETIF_F_MULTI_QUEUE 0
+#define netif_is_multiqueue(a) 0
+#define netif_wake_subqueue(a, b)
+#endif /* NETIF_F_MULTI_QUEUE */
+
+#ifndef __WARN_printf
+extern void __kc_warn_slowpath(const char *file, const int line,
+ const char *fmt, ...) __attribute__((format(printf, 3, 4)));
+#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg)
+#endif /* __WARN_printf */
+
+#ifndef WARN
+#define WARN(condition, format...) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ __WARN_printf(format); \
+ unlikely(__ret_warn_on); \
+})
+#endif /* WARN */
+#else /* < 2.6.27 */
+#define HAVE_TX_MQ
+#define HAVE_NETDEV_SELECT_QUEUE
+#endif /* < 2.6.27 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
+#define pci_ioremap_bar(pdev, bar) ioremap(pci_resource_start(pdev, bar), \
+ pci_resource_len(pdev, bar))
+#define pci_wake_from_d3 _kc_pci_wake_from_d3
+#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep
+extern int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable);
+extern int _kc_pci_prepare_to_sleep(struct pci_dev *dev);
+#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC)
+#ifndef __skb_queue_head_init
+static inline void __kc_skb_queue_head_init(struct sk_buff_head *list)
+{
+ list->prev = list->next = (struct sk_buff *)list;
+ list->qlen = 0;
+}
+#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q)
+#endif
+#endif /* < 2.6.28 */
+
+#ifndef skb_add_rx_frag
+#define skb_add_rx_frag _kc_skb_add_rx_frag
+extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, int, int);
+#endif
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
+#ifndef swap
+#define swap(a, b) \
+ do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
+#endif
+#define pci_request_selected_regions_exclusive(pdev, bars, name) \
+ pci_request_selected_regions(pdev, bars, name)
+#ifndef CONFIG_NR_CPUS
+#define CONFIG_NR_CPUS 1
+#endif /* CONFIG_NR_CPUS */
+#ifndef pcie_aspm_enabled
+#define pcie_aspm_enabled() (1)
+#endif /* pcie_aspm_enabled */
+#else /* < 2.6.29 */
+#ifndef HAVE_NET_DEVICE_OPS
+#define HAVE_NET_DEVICE_OPS
+#endif
+#ifdef CONFIG_DCB
+#define HAVE_PFC_MODE_ENABLE
+#endif /* CONFIG_DCB */
+#endif /* < 2.6.29 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) )
+#define skb_rx_queue_recorded(a) false
+#define skb_get_rx_queue(a) 0
+#undef CONFIG_FCOE
+#undef CONFIG_FCOE_MODULE
+extern u16 _kc_skb_tx_hash(struct net_device *dev, struct sk_buff *skb);
+#define skb_tx_hash(n, s) _kc_skb_tx_hash(n, s)
+#define skb_record_rx_queue(a, b) do {} while (0)
+#ifndef CONFIG_PCI_IOV
+#undef pci_enable_sriov
+#define pci_enable_sriov(a, b) -ENOTSUPP
+#undef pci_disable_sriov
+#define pci_disable_sriov(a) do {} while (0)
+#endif /* CONFIG_PCI_IOV */
+#ifndef pr_cont
+#define pr_cont(fmt, ...) \
+ printk(KERN_CONT fmt, ##__VA_ARGS__)
+#endif /* pr_cont */
+#else
+#define HAVE_ASPM_QUIRKS
+#endif /* < 2.6.30 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) )
+#define ETH_P_1588 0x88F7
+#define ETH_P_FIP 0x8914
+#ifndef netdev_uc_count
+#define netdev_uc_count(dev) ((dev)->uc_count)
+#endif
+#ifndef netdev_for_each_uc_addr
+#define netdev_for_each_uc_addr(uclist, dev) \
+ for (uclist = dev->uc_list; uclist; uclist = uclist->next)
+#endif
+#else
+#ifndef HAVE_NETDEV_STORAGE_ADDRESS
+#define HAVE_NETDEV_STORAGE_ADDRESS
+#endif
+#ifndef HAVE_NETDEV_HW_ADDR
+#define HAVE_NETDEV_HW_ADDR
+#endif
+#ifndef HAVE_TRANS_START_IN_QUEUE
+#define HAVE_TRANS_START_IN_QUEUE
+#endif
+#endif /* < 2.6.31 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) )
+#undef netdev_tx_t
+#define netdev_tx_t int
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef NETIF_F_FCOE_MTU
+#define NETIF_F_FCOE_MTU (1 << 26)
+#endif
+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+
+#ifndef pm_runtime_get_sync
+#define pm_runtime_get_sync(dev) do {} while (0)
+#endif
+#ifndef pm_runtime_put
+#define pm_runtime_put(dev) do {} while (0)
+#endif
+#ifndef pm_runtime_put_sync
+#define pm_runtime_put_sync(dev) do {} while (0)
+#endif
+#ifndef pm_runtime_resume
+#define pm_runtime_resume(dev) do {} while (0)
+#endif
+#ifndef pm_schedule_suspend
+#define pm_schedule_suspend(dev, t) do {} while (0)
+#endif
+#ifndef pm_runtime_set_suspended
+#define pm_runtime_set_suspended(dev) do {} while (0)
+#endif
+#ifndef pm_runtime_disable
+#define pm_runtime_disable(dev) do {} while (0)
+#endif
+#ifndef pm_runtime_put_noidle
+#define pm_runtime_put_noidle(dev) do {} while (0)
+#endif
+#ifndef pm_runtime_set_active
+#define pm_runtime_set_active(dev) do {} while (0)
+#endif
+#ifndef pm_runtime_enable
+#define pm_runtime_enable(dev) do {} while (0)
+#endif
+#ifndef pm_runtime_get_noresume
+#define pm_runtime_get_noresume(dev) do {} while (0)
+#endif
+#else /* < 2.6.32 */
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE
+#define HAVE_NETDEV_OPS_FCOE_ENABLE
+#endif
+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+#ifdef CONFIG_DCB
+#ifndef HAVE_DCBNL_OPS_GETAPP
+#define HAVE_DCBNL_OPS_GETAPP
+#endif
+#endif /* CONFIG_DCB */
+#include <linux/pm_runtime.h>
+/* IOV bad DMA target work arounds require at least this kernel rev support */
+#define HAVE_PCIE_TYPE
+#endif /* < 2.6.32 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) )
+#ifndef pci_pcie_cap
+#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP)
+#endif
+#ifndef IPV4_FLOW
+#define IPV4_FLOW 0x10
+#endif /* IPV4_FLOW */
+#ifndef IPV6_FLOW
+#define IPV6_FLOW 0x11
+#endif /* IPV6_FLOW */
+/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */
+#if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \
+ (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) )
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN
+#define HAVE_NETDEV_OPS_FCOE_GETWWN
+#endif
+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+#endif /* RHEL6 or SLES11 SP1 */
+#ifndef __percpu
+#define __percpu
+#endif /* __percpu */
+#else /* < 2.6.33 */
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN
+#define HAVE_NETDEV_OPS_FCOE_GETWWN
+#endif
+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+#define HAVE_ETHTOOL_SFP_DISPLAY_PORT
+#endif /* < 2.6.33 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
+#ifndef ETH_FLAG_NTUPLE
+#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE
+#endif
+
+#ifndef netdev_mc_count
+#define netdev_mc_count(dev) ((dev)->mc_count)
+#endif
+#ifndef netdev_mc_empty
+#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0)
+#endif
+#ifndef netdev_for_each_mc_addr
+#define netdev_for_each_mc_addr(mclist, dev) \
+ for (mclist = dev->mc_list; mclist; mclist = mclist->next)
+#endif
+#ifndef netdev_uc_count
+#define netdev_uc_count(dev) ((dev)->uc.count)
+#endif
+#ifndef netdev_uc_empty
+#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0)
+#endif
+#ifndef netdev_for_each_uc_addr
+#define netdev_for_each_uc_addr(ha, dev) \
+ list_for_each_entry(ha, &dev->uc.list, list)
+#endif
+#ifndef dma_set_coherent_mask
+#define dma_set_coherent_mask(dev,mask) \
+ pci_set_consistent_dma_mask(to_pci_dev(dev),(mask))
+#endif
+#ifndef pci_dev_run_wake
+#define pci_dev_run_wake(pdev) (0)
+#endif
+
+/* netdev logging taken from include/linux/netdevice.h */
+#ifndef netdev_name
+static inline const char *_kc_netdev_name(const struct net_device *dev)
+{
+ if (dev->reg_state != NETREG_REGISTERED)
+ return "(unregistered net_device)";
+ return dev->name;
+}
+#define netdev_name(netdev) _kc_netdev_name(netdev)
+#endif /* netdev_name */
+
+#undef netdev_printk
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+#define netdev_printk(level, netdev, format, args...) \
+do { \
+ struct adapter_struct *kc_adapter = netdev_priv(netdev);\
+ struct pci_dev *pdev = kc_adapter->pdev; \
+ printk("%s %s: " format, level, pci_name(pdev), \
+ ##args); \
+} while(0)
+#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
+#define netdev_printk(level, netdev, format, args...) \
+do { \
+ struct adapter_struct *kc_adapter = netdev_priv(netdev);\
+ struct pci_dev *pdev = kc_adapter->pdev; \
+ struct device *dev = pci_dev_to_dev(pdev); \
+ dev_printk(level, dev, "%s: " format, \
+ netdev_name(netdev), ##args); \
+} while(0)
+#else /* 2.6.21 => 2.6.34 */
+#define netdev_printk(level, netdev, format, args...) \
+ dev_printk(level, (netdev)->dev.parent, \
+ "%s: " format, \
+ netdev_name(netdev), ##args)
+#endif /* <2.6.0 <2.6.21 <2.6.34 */
+#undef netdev_emerg
+#define netdev_emerg(dev, format, args...) \
+ netdev_printk(KERN_EMERG, dev, format, ##args)
+#undef netdev_alert
+#define netdev_alert(dev, format, args...) \
+ netdev_printk(KERN_ALERT, dev, format, ##args)
+#undef netdev_crit
+#define netdev_crit(dev, format, args...) \
+ netdev_printk(KERN_CRIT, dev, format, ##args)
+#undef netdev_err
+#define netdev_err(dev, format, args...) \
+ netdev_printk(KERN_ERR, dev, format, ##args)
+#undef netdev_warn
+#define netdev_warn(dev, format, args...) \
+ netdev_printk(KERN_WARNING, dev, format, ##args)
+#undef netdev_notice
+#define netdev_notice(dev, format, args...) \
+ netdev_printk(KERN_NOTICE, dev, format, ##args)
+#undef netdev_info
+#define netdev_info(dev, format, args...) \
+ netdev_printk(KERN_INFO, dev, format, ##args)
+#undef netdev_dbg
+#if defined(DEBUG)
+#define netdev_dbg(__dev, format, args...) \
+ netdev_printk(KERN_DEBUG, __dev, format, ##args)
+#elif defined(CONFIG_DYNAMIC_DEBUG)
+#define netdev_dbg(__dev, format, args...) \
+do { \
+ dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \
+ netdev_name(__dev), ##args); \
+} while (0)
+#else /* DEBUG */
+#define netdev_dbg(__dev, format, args...) \
+({ \
+ if (0) \
+ netdev_printk(KERN_DEBUG, __dev, format, ##args); \
+ 0; \
+})
+#endif /* DEBUG */
+
+#undef netif_printk
+#define netif_printk(priv, type, level, dev, fmt, args...) \
+do { \
+ if (netif_msg_##type(priv)) \
+ netdev_printk(level, (dev), fmt, ##args); \
+} while (0)
+
+#undef netif_emerg
+#define netif_emerg(priv, type, dev, fmt, args...) \
+ netif_level(emerg, priv, type, dev, fmt, ##args)
+#undef netif_alert
+#define netif_alert(priv, type, dev, fmt, args...) \
+ netif_level(alert, priv, type, dev, fmt, ##args)
+#undef netif_crit
+#define netif_crit(priv, type, dev, fmt, args...) \
+ netif_level(crit, priv, type, dev, fmt, ##args)
+#undef netif_err
+#define netif_err(priv, type, dev, fmt, args...) \
+ netif_level(err, priv, type, dev, fmt, ##args)
+#undef netif_warn
+#define netif_warn(priv, type, dev, fmt, args...) \
+ netif_level(warn, priv, type, dev, fmt, ##args)
+#undef netif_notice
+#define netif_notice(priv, type, dev, fmt, args...) \
+ netif_level(notice, priv, type, dev, fmt, ##args)
+#undef netif_info
+#define netif_info(priv, type, dev, fmt, args...) \
+ netif_level(info, priv, type, dev, fmt, ##args)
+
+#ifdef SET_SYSTEM_SLEEP_PM_OPS
+#define HAVE_SYSTEM_SLEEP_PM_OPS
+#endif
+
+#ifndef for_each_set_bit
+#define for_each_set_bit(bit, addr, size) \
+ for ((bit) = find_first_bit((addr), (size)); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
+#endif /* for_each_set_bit */
+
+#ifndef DEFINE_DMA_UNMAP_ADDR
+#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR
+#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN
+#define dma_unmap_addr pci_unmap_addr
+#define dma_unmap_addr_set pci_unmap_addr_set
+#define dma_unmap_len pci_unmap_len
+#define dma_unmap_len_set pci_unmap_len_set
+#endif /* DEFINE_DMA_UNMAP_ADDR */
+#else /* < 2.6.34 */
+#define HAVE_SYSTEM_SLEEP_PM_OPS
+#ifndef HAVE_SET_RX_MODE
+#define HAVE_SET_RX_MODE
+#endif
+
+#endif /* < 2.6.34 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
+#ifndef numa_node_id
+#define numa_node_id() 0
+#endif
+#ifdef HAVE_TX_MQ
+#include <net/sch_generic.h>
+#ifndef CONFIG_NETDEVICES_MULTIQUEUE
+void _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int);
+#define netif_set_real_num_tx_queues _kc_netif_set_real_num_tx_queues
+#else /* CONFIG_NETDEVICES_MULTI_QUEUE */
+#define netif_set_real_num_tx_queues(_netdev, _count) \
+ do { \
+ (_netdev)->egress_subqueue_count = _count; \
+ } while (0)
+#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */
+#else
+#define netif_set_real_num_tx_queues(_netdev, _count) do {} while(0)
+#endif /* HAVE_TX_MQ */
+#ifndef ETH_FLAG_RXHASH
+#define ETH_FLAG_RXHASH (1<<28)
+#endif /* ETH_FLAG_RXHASH */
+#else /* < 2.6.35 */
+#define HAVE_PM_QOS_REQUEST_LIST
+#define HAVE_IRQ_AFFINITY_HINT
+#endif /* < 2.6.35 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
+extern int _kc_ethtool_op_set_flags(struct net_device *, u32, u32);
+#define ethtool_op_set_flags _kc_ethtool_op_set_flags
+extern u32 _kc_ethtool_op_get_flags(struct net_device *);
+#define ethtool_op_get_flags _kc_ethtool_op_get_flags
+
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#ifdef NET_IP_ALIGN
+#undef NET_IP_ALIGN
+#endif
+#define NET_IP_ALIGN 0
+#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
+
+#ifdef NET_SKB_PAD
+#undef NET_SKB_PAD
+#endif
+
+#if (L1_CACHE_BYTES > 32)
+#define NET_SKB_PAD L1_CACHE_BYTES
+#else
+#define NET_SKB_PAD 32
+#endif
+
+static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev,
+ unsigned int length)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC);
+ if (skb) {
+#if (NET_IP_ALIGN + NET_SKB_PAD)
+ skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
+#endif
+ skb->dev = dev;
+ }
+ return skb;
+}
+
+#ifdef netdev_alloc_skb_ip_align
+#undef netdev_alloc_skb_ip_align
+#endif
+#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l)
+
+#undef netif_level
+#define netif_level(level, priv, type, dev, fmt, args...) \
+do { \
+ if (netif_msg_##type(priv)) \
+ netdev_##level(dev, fmt, ##args); \
+} while (0)
+
+#undef usleep_range
+#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
+
+#else /* < 2.6.36 */
+#define HAVE_PM_QOS_REQUEST_ACTIVE
+#define HAVE_8021P_SUPPORT
+#define HAVE_NDO_GET_STATS64
+#endif /* < 2.6.36 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) )
+#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR
+#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2)
+#endif
+#ifndef VLAN_N_VID
+#define VLAN_N_VID VLAN_GROUP_ARRAY_LEN
+#endif /* VLAN_N_VID */
+#ifndef ETH_FLAG_TXVLAN
+#define ETH_FLAG_TXVLAN (1 << 7)
+#endif /* ETH_FLAG_TXVLAN */
+#ifndef ETH_FLAG_RXVLAN
+#define ETH_FLAG_RXVLAN (1 << 8)
+#endif /* ETH_FLAG_RXVLAN */
+
+static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb)
+{
+ WARN_ON(skb->ip_summed != CHECKSUM_NONE);
+}
+#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb)
+
+static inline void *_kc_vzalloc_node(unsigned long size, int node)
+{
+ void *addr = vmalloc_node(size, node);
+ if (addr)
+ memset(addr, 0, size);
+ return addr;
+}
+#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node)
+
+static inline void *_kc_vzalloc(unsigned long size)
+{
+ void *addr = vmalloc(size);
+ if (addr)
+ memset(addr, 0, size);
+ return addr;
+}
+#define vzalloc(_size) _kc_vzalloc(_size)
+
+#ifndef vlan_get_protocol
+static inline __be16 __kc_vlan_get_protocol(const struct sk_buff *skb)
+{
+ if (vlan_tx_tag_present(skb) ||
+ skb->protocol != cpu_to_be16(ETH_P_8021Q))
+ return skb->protocol;
+
+ if (skb_headlen(skb) < sizeof(struct vlan_ethhdr))
+ return 0;
+
+ return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto;
+}
+#define vlan_get_protocol(_skb) __kc_vlan_get_protocol(_skb)
+#endif
+#ifdef HAVE_HW_TIME_STAMP
+#define SKBTX_HW_TSTAMP (1 << 0)
+#define SKBTX_IN_PROGRESS (1 << 2)
+#define SKB_SHARED_TX_IS_UNION
+#endif
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) )
+#ifndef HAVE_VLAN_RX_REGISTER
+#define HAVE_VLAN_RX_REGISTER
+#endif
+#endif /* > 2.4.18 */
+#endif /* < 2.6.37 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) )
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
+#define skb_checksum_start_offset(skb) skb_transport_offset(skb)
+#else /* 2.6.22 -> 2.6.37 */
+static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb)
+{
+ return skb->csum_start - skb_headroom(skb);
+}
+#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb)
+#endif /* 2.6.22 -> 2.6.37 */
+#ifdef CONFIG_DCB
+#ifndef IEEE_8021QAZ_MAX_TCS
+#define IEEE_8021QAZ_MAX_TCS 8
+#endif
+#ifndef DCB_CAP_DCBX_HOST
+#define DCB_CAP_DCBX_HOST 0x01
+#endif
+#ifndef DCB_CAP_DCBX_LLD_MANAGED
+#define DCB_CAP_DCBX_LLD_MANAGED 0x02
+#endif
+#ifndef DCB_CAP_DCBX_VER_CEE
+#define DCB_CAP_DCBX_VER_CEE 0x04
+#endif
+#ifndef DCB_CAP_DCBX_VER_IEEE
+#define DCB_CAP_DCBX_VER_IEEE 0x08
+#endif
+#ifndef DCB_CAP_DCBX_STATIC
+#define DCB_CAP_DCBX_STATIC 0x10
+#endif
+#endif /* CONFIG_DCB */
+#else /* < 2.6.38 */
+#endif /* < 2.6.38 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
+#ifndef skb_queue_reverse_walk_safe
+#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
+ for (skb = (queue)->prev, tmp = skb->prev; \
+ skb != (struct sk_buff *)(queue); \
+ skb = tmp, tmp = skb->prev)
+#endif
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)))
+extern u8 _kc_netdev_get_num_tc(struct net_device *dev);
+#define netdev_get_num_tc(dev) _kc_netdev_get_num_tc(dev)
+extern u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up);
+#define netdev_get_prio_tc_map(dev, up) _kc_netdev_get_prio_tc_map(dev, up)
+#define netdev_set_prio_tc_map(dev, up, tc) do {} while (0)
+#else /* RHEL6.1 or greater */
+#ifndef HAVE_MQPRIO
+#define HAVE_MQPRIO
+#endif /* HAVE_MQPRIO */
+#ifdef CONFIG_DCB
+#ifndef HAVE_DCBNL_IEEE
+#define HAVE_DCBNL_IEEE
+#ifndef IEEE_8021QAZ_TSA_STRICT
+#define IEEE_8021QAZ_TSA_STRICT 0
+#endif
+#ifndef IEEE_8021QAZ_TSA_ETS
+#define IEEE_8021QAZ_TSA_ETS 2
+#endif
+#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE
+#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1
+#endif
+#endif
+#endif /* CONFIG_DCB */
+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */
+#else /* < 2.6.39 */
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef HAVE_NETDEV_OPS_FCOE_DDP_TARGET
+#define HAVE_NETDEV_OPS_FCOE_DDP_TARGET
+#endif
+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+#ifndef HAVE_MQPRIO
+#define HAVE_MQPRIO
+#endif
+#ifndef HAVE_SETUP_TC
+#define HAVE_SETUP_TC
+#endif
+#ifdef CONFIG_DCB
+#ifndef HAVE_DCBNL_IEEE
+#define HAVE_DCBNL_IEEE
+#endif
+#endif /* CONFIG_DCB */
+#ifndef HAVE_NDO_SET_FEATURES
+#define HAVE_NDO_SET_FEATURES
+#endif
+#endif /* < 2.6.39 */
+
+/*****************************************************************************/
+/* use < 2.6.40 because of a Fedora 15 kernel update where they
+ * updated the kernel version to 2.6.40.x and they back-ported 3.0 features
+ * like set_phys_id for ethtool.
+ */
+#undef ETHTOOL_GRXRINGS
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) )
+#ifdef ETHTOOL_GRXRINGS
+#ifndef FLOW_EXT
+#define FLOW_EXT 0x80000000
+union _kc_ethtool_flow_union {
+ struct ethtool_tcpip4_spec tcp_ip4_spec;
+ struct ethtool_usrip4_spec usr_ip4_spec;
+ __u8 hdata[60];
+};
+struct _kc_ethtool_flow_ext {
+ __be16 vlan_etype;
+ __be16 vlan_tci;
+ __be32 data[2];
+};
+struct _kc_ethtool_rx_flow_spec {
+ __u32 flow_type;
+ union _kc_ethtool_flow_union h_u;
+ struct _kc_ethtool_flow_ext h_ext;
+ union _kc_ethtool_flow_union m_u;
+ struct _kc_ethtool_flow_ext m_ext;
+ __u64 ring_cookie;
+ __u32 location;
+};
+#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec
+#endif /* FLOW_EXT */
+#endif
+
+#define pci_disable_link_state_locked pci_disable_link_state
+
+#ifndef PCI_LTR_VALUE_MASK
+#define PCI_LTR_VALUE_MASK 0x000003ff
+#endif
+#ifndef PCI_LTR_SCALE_MASK
+#define PCI_LTR_SCALE_MASK 0x00001c00
+#endif
+#ifndef PCI_LTR_SCALE_SHIFT
+#define PCI_LTR_SCALE_SHIFT 10
+#endif
+
+#else /* < 2.6.40 */
+#define HAVE_ETHTOOL_SET_PHYS_ID
+#endif /* < 2.6.40 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) )
+#ifndef __netdev_alloc_skb_ip_align
+#define __netdev_alloc_skb_ip_align(d,l,_g) netdev_alloc_skb_ip_align(d,l)
+#endif /* __netdev_alloc_skb_ip_align */
+#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app)
+#define dcb_ieee_delapp(dev, app) 0
+#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority)
+#else /* < 3.1.0 */
+#ifndef HAVE_DCBNL_IEEE_DELAPP
+#define HAVE_DCBNL_IEEE_DELAPP
+#endif
+#endif /* < 3.1.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) )
+#ifdef ETHTOOL_GRXRINGS
+#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
+#endif /* ETHTOOL_GRXRINGS */
+
+#ifndef skb_frag_size
+#define skb_frag_size(frag) _kc_skb_frag_size(frag)
+static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag)
+{
+ return frag->size;
+}
+#endif /* skb_frag_size */
+
+#ifndef skb_frag_size_sub
+#define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta)
+static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta)
+{
+ frag->size -= delta;
+}
+#endif /* skb_frag_size_sub */
+
+#ifndef skb_frag_page
+#define skb_frag_page(frag) _kc_skb_frag_page(frag)
+static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag)
+{
+ return frag->page;
+}
+#endif /* skb_frag_page */
+
+#ifndef skb_frag_address
+#define skb_frag_address(frag) _kc_skb_frag_address(frag)
+static inline void *_kc_skb_frag_address(const skb_frag_t *frag)
+{
+ return page_address(skb_frag_page(frag)) + frag->page_offset;
+}
+#endif /* skb_frag_address */
+
+#ifndef skb_frag_dma_map
+#define skb_frag_dma_map(dev,frag,offset,size,dir) \
+ _kc_skb_frag_dma_map(dev,frag,offset,size,dir)
+static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev,
+ const skb_frag_t *frag,
+ size_t offset, size_t size,
+ enum dma_data_direction dir)
+{
+ return dma_map_page(dev, skb_frag_page(frag),
+ frag->page_offset + offset, size, dir);
+}
+#endif /* skb_frag_dma_map */
+
+#ifndef __skb_frag_unref
+#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag)
+static inline void __kc_skb_frag_unref(skb_frag_t *frag)
+{
+ put_page(skb_frag_page(frag));
+}
+#endif /* __skb_frag_unref */
+#else /* < 3.2.0 */
+#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED
+#define HAVE_PCI_DEV_FLAGS_ASSIGNED
+#define HAVE_VF_SPOOFCHK_CONFIGURE
+#endif
+#endif /* < 3.2.0 */
+
+#if (RHEL_RELEASE_CODE && \
+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) && \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))
+#undef ixgbe_get_netdev_tc_txq
+#define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc])
+#endif
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) )
+typedef u32 kni_netdev_features_t;
+#else /* ! < 3.3.0 */
+typedef netdev_features_t kni_netdev_features_t;
+#define HAVE_INT_NDO_VLAN_RX_ADD_VID
+#ifdef ETHTOOL_SRXNTUPLE
+#undef ETHTOOL_SRXNTUPLE
+#endif
+#endif /* < 3.3.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) )
+#ifndef NETIF_F_RXFCS
+#define NETIF_F_RXFCS 0
+#endif /* NETIF_F_RXFCS */
+#ifndef NETIF_F_RXALL
+#define NETIF_F_RXALL 0
+#endif /* NETIF_F_RXALL */
+
+#define NUMTCS_RETURNS_U8
+
+
+#endif /* < 3.4.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) )
+static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2)
+{
+ return !compare_ether_addr(addr1, addr2);
+}
+#define ether_addr_equal(_addr1, _addr2) __kc_ether_addr_equal((_addr1),(_addr2))
+#else
+#define HAVE_FDB_OPS
+#endif /* < 3.5.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) )
+#define NETIF_F_HW_VLAN_TX NETIF_F_HW_VLAN_CTAG_TX
+#define NETIF_F_HW_VLAN_RX NETIF_F_HW_VLAN_CTAG_RX
+#define NETIF_F_HW_VLAN_FILTER NETIF_F_HW_VLAN_CTAG_FILTER
+#endif /* >= 3.10.0 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+#ifdef CONFIG_PCI_IOV
+extern int __kc_pci_vfs_assigned(struct pci_dev *dev);
+#else
+static inline int __kc_pci_vfs_assigned(struct pci_dev *dev)
+{
+ return 0;
+}
+#endif
+#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev)
+
+#endif
+
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0) )
+#define SET_ETHTOOL_OPS(netdev, ops) ((netdev)->ethtool_ops = (ops))
+#endif /* >= 3.16.0 */
+
+#endif /* _KCOMPAT_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_dev.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_dev.h
new file mode 100755
index 00000000..e79e4721
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_dev.h
@@ -0,0 +1,150 @@
+/*-
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation
+ */
+
+#ifndef _KNI_DEV_H_
+#define _KNI_DEV_H_
+
+#include <linux/if.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+
+#ifdef RTE_KNI_VHOST
+#include <net/sock.h>
+#endif
+
+#include <exec-env/rte_kni_common.h>
+#define KNI_KTHREAD_RESCHEDULE_INTERVAL 5 /* us */
+
+/**
+ * A structure describing the private information for a kni device.
+ */
+
+struct kni_dev {
+ /* kni list */
+ struct list_head list;
+
+ struct net_device_stats stats;
+ int status;
+ uint16_t group_id; /* Group ID of a group of KNI devices */
+ unsigned core_id; /* Core ID to bind */
+ char name[RTE_KNI_NAMESIZE]; /* Network device name */
+ struct task_struct *pthread;
+
+ /* wait queue for req/resp */
+ wait_queue_head_t wq;
+ struct mutex sync_lock;
+
+ /* PCI device id */
+ uint16_t device_id;
+
+ /* kni device */
+ struct net_device *net_dev;
+ struct net_device *lad_dev;
+ struct pci_dev *pci_dev;
+
+ /* queue for packets to be sent out */
+ void *tx_q;
+
+ /* queue for the packets received */
+ void *rx_q;
+
+ /* queue for the allocated mbufs those can be used to save sk buffs */
+ void *alloc_q;
+
+ /* free queue for the mbufs to be freed */
+ void *free_q;
+
+ /* request queue */
+ void *req_q;
+
+ /* response queue */
+ void *resp_q;
+
+ void * sync_kva;
+ void *sync_va;
+
+ void *mbuf_kva;
+ void *mbuf_va;
+
+ /* mbuf size */
+ unsigned mbuf_size;
+
+ /* synchro for request processing */
+ unsigned long synchro;
+
+#ifdef RTE_KNI_VHOST
+ struct kni_vhost_queue* vhost_queue;
+ volatile enum {
+ BE_STOP = 0x1,
+ BE_START = 0x2,
+ BE_FINISH = 0x4,
+ }vq_status;
+#endif
+};
+
+#define KNI_ERR(args...) printk(KERN_DEBUG "KNI: Error: " args)
+#define KNI_PRINT(args...) printk(KERN_DEBUG "KNI: " args)
+#ifdef RTE_KNI_KO_DEBUG
+ #define KNI_DBG(args...) printk(KERN_DEBUG "KNI: " args)
+#else
+ #define KNI_DBG(args...)
+#endif
+
+#ifdef RTE_KNI_VHOST
+unsigned int
+kni_poll(struct file *file, struct socket *sock, poll_table * wait);
+int kni_chk_vhost_rx(struct kni_dev *kni);
+int kni_vhost_init(struct kni_dev *kni);
+int kni_vhost_backend_release(struct kni_dev *kni);
+
+struct kni_vhost_queue {
+ struct sock sk;
+ struct socket *sock;
+ int vnet_hdr_sz;
+ struct kni_dev *kni;
+ int sockfd;
+ unsigned int flags;
+ struct sk_buff* cache;
+ struct rte_kni_fifo* fifo;
+};
+
+#endif
+
+#ifdef RTE_KNI_VHOST_DEBUG_RX
+ #define KNI_DBG_RX(args...) printk(KERN_DEBUG "KNI RX: " args)
+#else
+ #define KNI_DBG_RX(args...)
+#endif
+
+#ifdef RTE_KNI_VHOST_DEBUG_TX
+ #define KNI_DBG_TX(args...) printk(KERN_DEBUG "KNI TX: " args)
+#else
+ #define KNI_DBG_TX(args...)
+#endif
+
+#endif
+
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_ethtool.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_ethtool.c
new file mode 100755
index 00000000..06b6d463
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_ethtool.c
@@ -0,0 +1,217 @@
+/*-
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation
+ */
+
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include "kni_dev.h"
+
+static int
+kni_check_if_running(struct net_device *dev)
+{
+ struct kni_dev *priv = netdev_priv(dev);
+ if (priv->lad_dev)
+ return 0;
+ else
+ return -EOPNOTSUPP;
+}
+
+static void
+kni_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct kni_dev *priv = netdev_priv(dev);
+ priv->lad_dev->ethtool_ops->get_drvinfo(priv->lad_dev, info);
+}
+
+static int
+kni_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct kni_dev *priv = netdev_priv(dev);
+ return priv->lad_dev->ethtool_ops->get_settings(priv->lad_dev, ecmd);
+}
+
+static int
+kni_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct kni_dev *priv = netdev_priv(dev);
+ return priv->lad_dev->ethtool_ops->set_settings(priv->lad_dev, ecmd);
+}
+
+static void
+kni_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct kni_dev *priv = netdev_priv(dev);
+ priv->lad_dev->ethtool_ops->get_wol(priv->lad_dev, wol);
+}
+
+static int
+kni_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct kni_dev *priv = netdev_priv(dev);
+ return priv->lad_dev->ethtool_ops->set_wol(priv->lad_dev, wol);
+}
+
+static int
+kni_nway_reset(struct net_device *dev)
+{
+ struct kni_dev *priv = netdev_priv(dev);
+ return priv->lad_dev->ethtool_ops->nway_reset(priv->lad_dev);
+}
+
+static int
+kni_get_eeprom_len(struct net_device *dev)
+{
+ struct kni_dev *priv = netdev_priv(dev);
+ return priv->lad_dev->ethtool_ops->get_eeprom_len(priv->lad_dev);
+}
+
+static int
+kni_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+ u8 *bytes)
+{
+ struct kni_dev *priv = netdev_priv(dev);
+ return priv->lad_dev->ethtool_ops->get_eeprom(priv->lad_dev, eeprom,
+ bytes);
+}
+
+static int
+kni_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+ u8 *bytes)
+{
+ struct kni_dev *priv = netdev_priv(dev);
+ return priv->lad_dev->ethtool_ops->set_eeprom(priv->lad_dev, eeprom,
+ bytes);
+}
+
+static void
+kni_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
+{
+ struct kni_dev *priv = netdev_priv(dev);
+ priv->lad_dev->ethtool_ops->get_ringparam(priv->lad_dev, ring);
+}
+
+static int
+kni_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
+{
+ struct kni_dev *priv = netdev_priv(dev);
+ return priv->lad_dev->ethtool_ops->set_ringparam(priv->lad_dev, ring);
+}
+
+static void
+kni_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
+{
+ struct kni_dev *priv = netdev_priv(dev);
+ priv->lad_dev->ethtool_ops->get_pauseparam(priv->lad_dev, pause);
+}
+
+static int
+kni_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
+{
+ struct kni_dev *priv = netdev_priv(dev);
+ return priv->lad_dev->ethtool_ops->set_pauseparam(priv->lad_dev,
+ pause);
+}
+
+static u32
+kni_get_msglevel(struct net_device *dev)
+{
+ struct kni_dev *priv = netdev_priv(dev);
+ return priv->lad_dev->ethtool_ops->get_msglevel(priv->lad_dev);
+}
+
+static void
+kni_set_msglevel(struct net_device *dev, u32 data)
+{
+ struct kni_dev *priv = netdev_priv(dev);
+ priv->lad_dev->ethtool_ops->set_msglevel(priv->lad_dev, data);
+}
+
+static int
+kni_get_regs_len(struct net_device *dev)
+{
+ struct kni_dev *priv = netdev_priv(dev);
+ return priv->lad_dev->ethtool_ops->get_regs_len(priv->lad_dev);
+}
+
+static void
+kni_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
+{
+ struct kni_dev *priv = netdev_priv(dev);
+ priv->lad_dev->ethtool_ops->get_regs(priv->lad_dev, regs, p);
+}
+
+static void
+kni_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ struct kni_dev *priv = netdev_priv(dev);
+ priv->lad_dev->ethtool_ops->get_strings(priv->lad_dev, stringset,
+ data);
+}
+
+static int
+kni_get_sset_count(struct net_device *dev, int sset)
+{
+ struct kni_dev *priv = netdev_priv(dev);
+ return priv->lad_dev->ethtool_ops->get_sset_count(priv->lad_dev, sset);
+}
+
+static void
+kni_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct kni_dev *priv = netdev_priv(dev);
+ priv->lad_dev->ethtool_ops->get_ethtool_stats(priv->lad_dev, stats,
+ data);
+}
+
+struct ethtool_ops kni_ethtool_ops = {
+ .begin = kni_check_if_running,
+ .get_drvinfo = kni_get_drvinfo,
+ .get_settings = kni_get_settings,
+ .set_settings = kni_set_settings,
+ .get_regs_len = kni_get_regs_len,
+ .get_regs = kni_get_regs,
+ .get_wol = kni_get_wol,
+ .set_wol = kni_set_wol,
+ .nway_reset = kni_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = kni_get_eeprom_len,
+ .get_eeprom = kni_get_eeprom,
+ .set_eeprom = kni_set_eeprom,
+ .get_ringparam = kni_get_ringparam,
+ .set_ringparam = kni_set_ringparam,
+ .get_pauseparam = kni_get_pauseparam,
+ .set_pauseparam = kni_set_pauseparam,
+ .get_msglevel = kni_get_msglevel,
+ .set_msglevel = kni_set_msglevel,
+ .get_strings = kni_get_strings,
+ .get_sset_count = kni_get_sset_count,
+ .get_ethtool_stats = kni_get_ethtool_stats,
+};
+
+void
+kni_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &kni_ethtool_ops;
+}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_fifo.h b/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_fifo.h
new file mode 100755
index 00000000..3ea750e2
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_fifo.h
@@ -0,0 +1,108 @@
+/*-
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation
+ */
+
+#ifndef _KNI_FIFO_H_
+#define _KNI_FIFO_H_
+
+#include <exec-env/rte_kni_common.h>
+
+/**
+ * Adds num elements into the fifo. Return the number actually written
+ */
+static inline unsigned
+kni_fifo_put(struct rte_kni_fifo *fifo, void **data, unsigned num)
+{
+ unsigned i = 0;
+ unsigned fifo_write = fifo->write;
+ unsigned fifo_read = fifo->read;
+ unsigned new_write = fifo_write;
+
+ for (i = 0; i < num; i++) {
+ new_write = (new_write + 1) & (fifo->len - 1);
+
+ if (new_write == fifo_read)
+ break;
+ fifo->buffer[fifo_write] = data[i];
+ fifo_write = new_write;
+ }
+ fifo->write = fifo_write;
+
+ return i;
+}
+
+/**
+ * Get up to num elements from the fifo. Return the number actully read
+ */
+static inline unsigned
+kni_fifo_get(struct rte_kni_fifo *fifo, void **data, unsigned num)
+{
+ unsigned i = 0;
+ unsigned new_read = fifo->read;
+ unsigned fifo_write = fifo->write;
+
+ for (i = 0; i < num; i++) {
+ if (new_read == fifo_write)
+ break;
+
+ data[i] = fifo->buffer[new_read];
+ new_read = (new_read + 1) & (fifo->len - 1);
+ }
+ fifo->read = new_read;
+
+ return i;
+}
+
+/**
+ * Get the num of elements in the fifo
+ */
+static inline unsigned
+kni_fifo_count(struct rte_kni_fifo *fifo)
+{
+ return (fifo->len + fifo->write - fifo->read) & ( fifo->len - 1);
+}
+
+/**
+ * Get the num of available elements in the fifo
+ */
+static inline unsigned
+kni_fifo_free_count(struct rte_kni_fifo *fifo)
+{
+ return (fifo->read - fifo->write - 1) & (fifo->len - 1);
+}
+
+#ifdef RTE_KNI_VHOST
+/**
+ * Initializes the kni fifo structure
+ */
+static inline void
+kni_fifo_init(struct rte_kni_fifo *fifo, unsigned size)
+{
+ fifo->write = 0;
+ fifo->read = 0;
+ fifo->len = size;
+ fifo->elem_size = sizeof(void *);
+}
+#endif
+
+#endif /* _KNI_FIFO_H_ */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_misc.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_misc.c
new file mode 100755
index 00000000..868b3254
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_misc.c
@@ -0,0 +1,606 @@
+/*-
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/rwsem.h>
+
+#include <exec-env/rte_kni_common.h>
+#include "kni_dev.h"
+#include <rte_config.h>
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Kernel Module for managing kni devices");
+
+#define KNI_RX_LOOP_NUM 1000
+
+#define KNI_MAX_DEVICES 32
+
+extern void kni_net_rx(struct kni_dev *kni);
+extern void kni_net_init(struct net_device *dev);
+extern void kni_net_config_lo_mode(char *lo_str);
+extern void kni_net_poll_resp(struct kni_dev *kni);
+extern void kni_set_ethtool_ops(struct net_device *netdev);
+
+extern int ixgbe_kni_probe(struct pci_dev *pdev, struct net_device **lad_dev);
+extern void ixgbe_kni_remove(struct pci_dev *pdev);
+extern int igb_kni_probe(struct pci_dev *pdev, struct net_device **lad_dev);
+extern void igb_kni_remove(struct pci_dev *pdev);
+
+static int kni_open(struct inode *inode, struct file *file);
+static int kni_release(struct inode *inode, struct file *file);
+static int kni_ioctl(struct inode *inode, unsigned int ioctl_num,
+ unsigned long ioctl_param);
+static int kni_compat_ioctl(struct inode *inode, unsigned int ioctl_num,
+ unsigned long ioctl_param);
+static int kni_dev_remove(struct kni_dev *dev);
+
+static int __init kni_parse_kthread_mode(void);
+
+/* KNI processing for single kernel thread mode */
+static int kni_thread_single(void *unused);
+/* KNI processing for multiple kernel thread mode */
+static int kni_thread_multiple(void *param);
+
+static struct file_operations kni_fops = {
+ .owner = THIS_MODULE,
+ .open = kni_open,
+ .release = kni_release,
+ .unlocked_ioctl = (void *)kni_ioctl,
+ .compat_ioctl = (void *)kni_compat_ioctl,
+};
+
+static struct miscdevice kni_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = KNI_DEVICE,
+ .fops = &kni_fops,
+};
+
+/* loopback mode */
+static char *lo_mode = NULL;
+
+/* Kernel thread mode */
+static char *kthread_mode = NULL;
+static unsigned multiple_kthread_on = 0;
+
+#define KNI_DEV_IN_USE_BIT_NUM 0 /* Bit number for device in use */
+
+static volatile unsigned long device_in_use; /* device in use flag */
+static struct task_struct *kni_kthread;
+
+/* kni list lock */
+static DECLARE_RWSEM(kni_list_lock);
+
+/* kni list */
+static struct list_head kni_list_head = LIST_HEAD_INIT(kni_list_head);
+
+static int __init
+kni_init(void)
+{
+ KNI_PRINT("######## DPDK kni module loading ########\n");
+
+ if (kni_parse_kthread_mode() < 0) {
+ KNI_ERR("Invalid parameter for kthread_mode\n");
+ return -EINVAL;
+ }
+
+ if (misc_register(&kni_misc) != 0) {
+ KNI_ERR("Misc registration failed\n");
+ return -EPERM;
+ }
+
+ /* Clear the bit of device in use */
+ clear_bit(KNI_DEV_IN_USE_BIT_NUM, &device_in_use);
+
+ /* Configure the lo mode according to the input parameter */
+ kni_net_config_lo_mode(lo_mode);
+
+ KNI_PRINT("######## DPDK kni module loaded ########\n");
+
+ return 0;
+}
+
+static void __exit
+kni_exit(void)
+{
+ misc_deregister(&kni_misc);
+ KNI_PRINT("####### DPDK kni module unloaded #######\n");
+}
+
+static int __init
+kni_parse_kthread_mode(void)
+{
+ if (!kthread_mode)
+ return 0;
+
+ if (strcmp(kthread_mode, "single") == 0)
+ return 0;
+ else if (strcmp(kthread_mode, "multiple") == 0)
+ multiple_kthread_on = 1;
+ else
+ return -1;
+
+ return 0;
+}
+
+static int
+kni_open(struct inode *inode, struct file *file)
+{
+ /* kni device can be opened by one user only, test and set bit */
+ if (test_and_set_bit(KNI_DEV_IN_USE_BIT_NUM, &device_in_use))
+ return -EBUSY;
+
+ /* Create kernel thread for single mode */
+ if (multiple_kthread_on == 0) {
+ KNI_PRINT("Single kernel thread for all KNI devices\n");
+ /* Create kernel thread for RX */
+ kni_kthread = kthread_run(kni_thread_single, NULL,
+ "kni_single");
+ if (IS_ERR(kni_kthread)) {
+ KNI_ERR("Unable to create kernel threaed\n");
+ return PTR_ERR(kni_kthread);
+ }
+ } else
+ KNI_PRINT("Multiple kernel thread mode enabled\n");
+
+ KNI_PRINT("/dev/kni opened\n");
+
+ return 0;
+}
+
+static int
+kni_release(struct inode *inode, struct file *file)
+{
+ struct kni_dev *dev, *n;
+
+ /* Stop kernel thread for single mode */
+ if (multiple_kthread_on == 0) {
+ /* Stop kernel thread */
+ kthread_stop(kni_kthread);
+ kni_kthread = NULL;
+ }
+
+ down_write(&kni_list_lock);
+ list_for_each_entry_safe(dev, n, &kni_list_head, list) {
+ /* Stop kernel thread for multiple mode */
+ if (multiple_kthread_on && dev->pthread != NULL) {
+ kthread_stop(dev->pthread);
+ dev->pthread = NULL;
+ }
+
+#ifdef RTE_KNI_VHOST
+ kni_vhost_backend_release(dev);
+#endif
+ kni_dev_remove(dev);
+ list_del(&dev->list);
+ }
+ up_write(&kni_list_lock);
+
+ /* Clear the bit of device in use */
+ clear_bit(KNI_DEV_IN_USE_BIT_NUM, &device_in_use);
+
+ KNI_PRINT("/dev/kni closed\n");
+
+ return 0;
+}
+
+static int
+kni_thread_single(void *unused)
+{
+ int j;
+ struct kni_dev *dev, *n;
+
+ while (!kthread_should_stop()) {
+ down_read(&kni_list_lock);
+ for (j = 0; j < KNI_RX_LOOP_NUM; j++) {
+ list_for_each_entry_safe(dev, n,
+ &kni_list_head, list) {
+#ifdef RTE_KNI_VHOST
+ kni_chk_vhost_rx(dev);
+#else
+ kni_net_rx(dev);
+#endif
+ kni_net_poll_resp(dev);
+ }
+ }
+ up_read(&kni_list_lock);
+ /* reschedule out for a while */
+ schedule_timeout_interruptible(usecs_to_jiffies( \
+ KNI_KTHREAD_RESCHEDULE_INTERVAL));
+ }
+
+ return 0;
+}
+
+static int
+kni_thread_multiple(void *param)
+{
+ int j;
+ struct kni_dev *dev = (struct kni_dev *)param;
+
+ while (!kthread_should_stop()) {
+ for (j = 0; j < KNI_RX_LOOP_NUM; j++) {
+#ifdef RTE_KNI_VHOST
+ kni_chk_vhost_rx(dev);
+#else
+ kni_net_rx(dev);
+#endif
+ kni_net_poll_resp(dev);
+ }
+ schedule_timeout_interruptible(usecs_to_jiffies( \
+ KNI_KTHREAD_RESCHEDULE_INTERVAL));
+ }
+
+ return 0;
+}
+
+static int
+kni_dev_remove(struct kni_dev *dev)
+{
+ if (!dev)
+ return -ENODEV;
+
+ switch (dev->device_id) {
+ #define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) case (dev):
+ #include <rte_pci_dev_ids.h>
+ igb_kni_remove(dev->pci_dev);
+ break;
+ #define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) case (dev):
+ #include <rte_pci_dev_ids.h>
+ ixgbe_kni_remove(dev->pci_dev);
+ break;
+ default:
+ break;
+ }
+
+ if (dev->net_dev) {
+ unregister_netdev(dev->net_dev);
+ free_netdev(dev->net_dev);
+ }
+
+ return 0;
+}
+
+static int
+kni_check_param(struct kni_dev *kni, struct rte_kni_device_info *dev)
+{
+ if (!kni || !dev)
+ return -1;
+
+ /* Check if network name has been used */
+ if (!strncmp(kni->name, dev->name, RTE_KNI_NAMESIZE)) {
+ KNI_ERR("KNI name %s duplicated\n", dev->name);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+kni_ioctl_create(unsigned int ioctl_num, unsigned long ioctl_param)
+{
+ int ret;
+ struct rte_kni_device_info dev_info;
+ struct pci_dev *pci = NULL;
+ struct pci_dev *found_pci = NULL;
+ struct net_device *net_dev = NULL;
+ struct net_device *lad_dev = NULL;
+ struct kni_dev *kni, *dev, *n;
+ struct net *net;
+
+ printk(KERN_INFO "KNI: Creating kni...\n");
+ /* Check the buffer size, to avoid warning */
+ if (_IOC_SIZE(ioctl_num) > sizeof(dev_info))
+ return -EINVAL;
+
+ /* Copy kni info from user space */
+ ret = copy_from_user(&dev_info, (void *)ioctl_param, sizeof(dev_info));
+ if (ret) {
+ KNI_ERR("copy_from_user in kni_ioctl_create");
+ return -EIO;
+ }
+
+ /**
+ * Check if the cpu core id is valid for binding,
+ * for multiple kernel thread mode.
+ */
+ if (multiple_kthread_on && dev_info.force_bind &&
+ !cpu_online(dev_info.core_id)) {
+ KNI_ERR("cpu %u is not online\n", dev_info.core_id);
+ return -EINVAL;
+ }
+
+ /* Check if it has been created */
+ down_read(&kni_list_lock);
+ list_for_each_entry_safe(dev, n, &kni_list_head, list) {
+ if (kni_check_param(dev, &dev_info) < 0) {
+ up_read(&kni_list_lock);
+ return -EINVAL;
+ }
+ }
+ up_read(&kni_list_lock);
+
+ net_dev = alloc_netdev(sizeof(struct kni_dev), dev_info.name,
+#ifdef NET_NAME_UNKNOWN
+ NET_NAME_UNKNOWN,
+#endif
+ kni_net_init);
+ if (net_dev == NULL) {
+ KNI_ERR("error allocating device \"%s\"\n", dev_info.name);
+ return -EBUSY;
+ }
+
+ net = get_net_ns_by_pid(current->pid);
+ if (IS_ERR(net)) {
+ free_netdev(net_dev);
+ return PTR_ERR(net);
+ }
+ dev_net_set(net_dev, net);
+ put_net(net);
+
+ kni = netdev_priv(net_dev);
+
+ kni->net_dev = net_dev;
+ kni->group_id = dev_info.group_id;
+ kni->core_id = dev_info.core_id;
+ strncpy(kni->name, dev_info.name, RTE_KNI_NAMESIZE);
+
+ /* Translate user space info into kernel space info */
+ kni->tx_q = phys_to_virt(dev_info.tx_phys);
+ kni->rx_q = phys_to_virt(dev_info.rx_phys);
+ kni->alloc_q = phys_to_virt(dev_info.alloc_phys);
+ kni->free_q = phys_to_virt(dev_info.free_phys);
+
+ kni->req_q = phys_to_virt(dev_info.req_phys);
+ kni->resp_q = phys_to_virt(dev_info.resp_phys);
+ kni->sync_va = dev_info.sync_va;
+ kni->sync_kva = phys_to_virt(dev_info.sync_phys);
+
+ kni->mbuf_kva = phys_to_virt(dev_info.mbuf_phys);
+ kni->mbuf_va = dev_info.mbuf_va;
+
+#ifdef RTE_KNI_VHOST
+ kni->vhost_queue = NULL;
+ kni->vq_status = BE_STOP;
+#endif
+ kni->mbuf_size = dev_info.mbuf_size;
+
+ KNI_PRINT("tx_phys: 0x%016llx, tx_q addr: 0x%p\n",
+ (unsigned long long) dev_info.tx_phys, kni->tx_q);
+ KNI_PRINT("rx_phys: 0x%016llx, rx_q addr: 0x%p\n",
+ (unsigned long long) dev_info.rx_phys, kni->rx_q);
+ KNI_PRINT("alloc_phys: 0x%016llx, alloc_q addr: 0x%p\n",
+ (unsigned long long) dev_info.alloc_phys, kni->alloc_q);
+ KNI_PRINT("free_phys: 0x%016llx, free_q addr: 0x%p\n",
+ (unsigned long long) dev_info.free_phys, kni->free_q);
+ KNI_PRINT("req_phys: 0x%016llx, req_q addr: 0x%p\n",
+ (unsigned long long) dev_info.req_phys, kni->req_q);
+ KNI_PRINT("resp_phys: 0x%016llx, resp_q addr: 0x%p\n",
+ (unsigned long long) dev_info.resp_phys, kni->resp_q);
+ KNI_PRINT("mbuf_phys: 0x%016llx, mbuf_kva: 0x%p\n",
+ (unsigned long long) dev_info.mbuf_phys, kni->mbuf_kva);
+ KNI_PRINT("mbuf_va: 0x%p\n", dev_info.mbuf_va);
+ KNI_PRINT("mbuf_size: %u\n", kni->mbuf_size);
+
+ KNI_DBG("PCI: %02x:%02x.%02x %04x:%04x\n",
+ dev_info.bus,
+ dev_info.devid,
+ dev_info.function,
+ dev_info.vendor_id,
+ dev_info.device_id);
+
+ pci = pci_get_device(dev_info.vendor_id, dev_info.device_id, NULL);
+
+ /* Support Ethtool */
+ while (pci) {
+ KNI_PRINT("pci_bus: %02x:%02x:%02x \n",
+ pci->bus->number,
+ PCI_SLOT(pci->devfn),
+ PCI_FUNC(pci->devfn));
+
+ if ((pci->bus->number == dev_info.bus) &&
+ (PCI_SLOT(pci->devfn) == dev_info.devid) &&
+ (PCI_FUNC(pci->devfn) == dev_info.function)) {
+ found_pci = pci;
+ switch (dev_info.device_id) {
+ #define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) case (dev):
+ #include <rte_pci_dev_ids.h>
+ ret = igb_kni_probe(found_pci, &lad_dev);
+ break;
+ #define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) \
+ case (dev):
+ #include <rte_pci_dev_ids.h>
+ ret = ixgbe_kni_probe(found_pci, &lad_dev);
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+
+ KNI_DBG("PCI found: pci=0x%p, lad_dev=0x%p\n",
+ pci, lad_dev);
+ if (ret == 0) {
+ kni->lad_dev = lad_dev;
+ kni_set_ethtool_ops(kni->net_dev);
+ } else {
+ KNI_ERR("Device not supported by ethtool");
+ kni->lad_dev = NULL;
+ }
+
+ kni->pci_dev = found_pci;
+ kni->device_id = dev_info.device_id;
+ break;
+ }
+ pci = pci_get_device(dev_info.vendor_id,
+ dev_info.device_id, pci);
+ }
+ if (pci)
+ pci_dev_put(pci);
+
+ ret = register_netdev(net_dev);
+ if (ret) {
+ KNI_ERR("error %i registering device \"%s\"\n",
+ ret, dev_info.name);
+ kni_dev_remove(kni);
+ return -ENODEV;
+ }
+
+#ifdef RTE_KNI_VHOST
+ kni_vhost_init(kni);
+#endif
+
+ /**
+ * Create a new kernel thread for multiple mode, set its core affinity,
+ * and finally wake it up.
+ */
+ if (multiple_kthread_on) {
+ kni->pthread = kthread_create(kni_thread_multiple,
+ (void *)kni,
+ "kni_%s", kni->name);
+ if (IS_ERR(kni->pthread)) {
+ kni_dev_remove(kni);
+ return -ECANCELED;
+ }
+ if (dev_info.force_bind)
+ kthread_bind(kni->pthread, kni->core_id);
+ wake_up_process(kni->pthread);
+ }
+
+ down_write(&kni_list_lock);
+ list_add(&kni->list, &kni_list_head);
+ up_write(&kni_list_lock);
+
+ return 0;
+}
+
+static int
+kni_ioctl_release(unsigned int ioctl_num, unsigned long ioctl_param)
+{
+ int ret = -EINVAL;
+ struct kni_dev *dev, *n;
+ struct rte_kni_device_info dev_info;
+
+ if (_IOC_SIZE(ioctl_num) > sizeof(dev_info))
+ return -EINVAL;
+
+ ret = copy_from_user(&dev_info, (void *)ioctl_param, sizeof(dev_info));
+ if (ret) {
+ KNI_ERR("copy_from_user in kni_ioctl_release");
+ return -EIO;
+ }
+
+ /* Release the network device according to its name */
+ if (strlen(dev_info.name) == 0)
+ return ret;
+
+ down_write(&kni_list_lock);
+ list_for_each_entry_safe(dev, n, &kni_list_head, list) {
+ if (strncmp(dev->name, dev_info.name, RTE_KNI_NAMESIZE) != 0)
+ continue;
+
+ if (multiple_kthread_on && dev->pthread != NULL) {
+ kthread_stop(dev->pthread);
+ dev->pthread = NULL;
+ }
+
+#ifdef RTE_KNI_VHOST
+ kni_vhost_backend_release(dev);
+#endif
+ kni_dev_remove(dev);
+ list_del(&dev->list);
+ ret = 0;
+ break;
+ }
+ up_write(&kni_list_lock);
+ printk(KERN_INFO "KNI: %s release kni named %s\n",
+ (ret == 0 ? "Successfully" : "Unsuccessfully"), dev_info.name);
+
+ return ret;
+}
+
+static int
+kni_ioctl(struct inode *inode,
+ unsigned int ioctl_num,
+ unsigned long ioctl_param)
+{
+ int ret = -EINVAL;
+
+ KNI_DBG("IOCTL num=0x%0x param=0x%0lx \n", ioctl_num, ioctl_param);
+
+ /*
+ * Switch according to the ioctl called
+ */
+ switch (_IOC_NR(ioctl_num)) {
+ case _IOC_NR(RTE_KNI_IOCTL_TEST):
+ /* For test only, not used */
+ break;
+ case _IOC_NR(RTE_KNI_IOCTL_CREATE):
+ ret = kni_ioctl_create(ioctl_num, ioctl_param);
+ break;
+ case _IOC_NR(RTE_KNI_IOCTL_RELEASE):
+ ret = kni_ioctl_release(ioctl_num, ioctl_param);
+ break;
+ default:
+ KNI_DBG("IOCTL default \n");
+ break;
+ }
+
+ return ret;
+}
+
+static int
+kni_compat_ioctl(struct inode *inode,
+ unsigned int ioctl_num,
+ unsigned long ioctl_param)
+{
+ /* 32 bits app on 64 bits OS to be supported later */
+ KNI_PRINT("Not implemented.\n");
+
+ return -EINVAL;
+}
+
+module_init(kni_init);
+module_exit(kni_exit);
+
+module_param(lo_mode, charp, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(lo_mode,
+"KNI loopback mode (default=lo_mode_none):\n"
+" lo_mode_none Kernel loopback disabled\n"
+" lo_mode_fifo Enable kernel loopback with fifo\n"
+" lo_mode_fifo_skb Enable kernel loopback with fifo and skb buffer\n"
+"\n"
+);
+
+module_param(kthread_mode, charp, S_IRUGO);
+MODULE_PARM_DESC(kthread_mode,
+"Kernel thread mode (default=single):\n"
+" single Single kernel thread mode enabled.\n"
+" multiple Multiple kernel thread mode enabled.\n"
+"\n"
+);
+
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_net.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_net.c
new file mode 100755
index 00000000..dd95db5b
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_net.c
@@ -0,0 +1,687 @@
+/*-
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation
+ */
+
+/*
+ * This code is inspired from the book "Linux Device Drivers" by
+ * Alessandro Rubini and Jonathan Corbet, published by O'Reilly & Associates
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h> /* eth_type_trans */
+#include <linux/skbuff.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+
+#include <rte_config.h>
+#include <exec-env/rte_kni_common.h>
+#include <kni_fifo.h>
+#include "kni_dev.h"
+
+#define WD_TIMEOUT 5 /*jiffies */
+
+#define MBUF_BURST_SZ 32
+
+#define KNI_WAIT_RESPONSE_TIMEOUT 300 /* 3 seconds */
+
+/* typedef for rx function */
+typedef void (*kni_net_rx_t)(struct kni_dev *kni);
+
+static int kni_net_tx(struct sk_buff *skb, struct net_device *dev);
+static void kni_net_rx_normal(struct kni_dev *kni);
+static void kni_net_rx_lo_fifo(struct kni_dev *kni);
+static void kni_net_rx_lo_fifo_skb(struct kni_dev *kni);
+static int kni_net_process_request(struct kni_dev *kni,
+ struct rte_kni_request *req);
+
+/* kni rx function pointer, with default to normal rx */
+static kni_net_rx_t kni_net_rx_func = kni_net_rx_normal;
+
+/*
+ * Open and close
+ */
+static int
+kni_net_open(struct net_device *dev)
+{
+ int ret;
+ struct rte_kni_request req;
+ struct kni_dev *kni = netdev_priv(dev);
+
+ if (kni->lad_dev)
+ memcpy(dev->dev_addr, kni->lad_dev->dev_addr, ETH_ALEN);
+ else
+ /*
+ * Generate random mac address. eth_random_addr() is the newer
+ * version of generating mac address in linux kernel.
+ */
+ random_ether_addr(dev->dev_addr);
+
+ netif_start_queue(dev);
+
+ memset(&req, 0, sizeof(req));
+ req.req_id = RTE_KNI_REQ_CFG_NETWORK_IF;
+
+ /* Setting if_up to non-zero means up */
+ req.if_up = 1;
+ ret = kni_net_process_request(kni, &req);
+
+ return (ret == 0 ? req.result : ret);
+}
+
+static int
+kni_net_release(struct net_device *dev)
+{
+ int ret;
+ struct rte_kni_request req;
+ struct kni_dev *kni = netdev_priv(dev);
+
+ netif_stop_queue(dev); /* can't transmit any more */
+
+ memset(&req, 0, sizeof(req));
+ req.req_id = RTE_KNI_REQ_CFG_NETWORK_IF;
+
+ /* Setting if_up to 0 means down */
+ req.if_up = 0;
+ ret = kni_net_process_request(kni, &req);
+
+ return (ret == 0 ? req.result : ret);
+}
+
+/*
+ * Configuration changes (passed on by ifconfig)
+ */
+static int
+kni_net_config(struct net_device *dev, struct ifmap *map)
+{
+ if (dev->flags & IFF_UP) /* can't act on a running interface */
+ return -EBUSY;
+
+ /* ignore other fields */
+ return 0;
+}
+
+/*
+ * RX: normal working mode
+ */
+static void
+kni_net_rx_normal(struct kni_dev *kni)
+{
+ unsigned ret;
+ uint32_t len;
+ unsigned i, num, num_rq, num_fq;
+ struct rte_kni_mbuf *kva;
+ struct rte_kni_mbuf *va[MBUF_BURST_SZ];
+ void * data_kva;
+
+ struct sk_buff *skb;
+ struct net_device *dev = kni->net_dev;
+
+ /* Get the number of entries in rx_q */
+ num_rq = kni_fifo_count(kni->rx_q);
+
+ /* Get the number of free entries in free_q */
+ num_fq = kni_fifo_free_count(kni->free_q);
+
+ /* Calculate the number of entries to dequeue in rx_q */
+ num = min(num_rq, num_fq);
+ num = min(num, (unsigned)MBUF_BURST_SZ);
+
+ /* Return if no entry in rx_q and no free entry in free_q */
+ if (num == 0)
+ return;
+
+ /* Burst dequeue from rx_q */
+ ret = kni_fifo_get(kni->rx_q, (void **)va, num);
+ if (ret == 0)
+ return; /* Failing should not happen */
+
+ /* Transfer received packets to netif */
+ for (i = 0; i < num; i++) {
+ kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
+ len = kva->data_len;
+ data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va
+ + kni->mbuf_kva;
+
+ skb = dev_alloc_skb(len + 2);
+ if (!skb) {
+ KNI_ERR("Out of mem, dropping pkts\n");
+ /* Update statistics */
+ kni->stats.rx_dropped++;
+ }
+ else {
+ /* Align IP on 16B boundary */
+ skb_reserve(skb, 2);
+ memcpy(skb_put(skb, len), data_kva, len);
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* Call netif interface */
+ netif_rx(skb);
+
+ /* Update statistics */
+ kni->stats.rx_bytes += len;
+ kni->stats.rx_packets++;
+ }
+ }
+
+ /* Burst enqueue mbufs into free_q */
+ ret = kni_fifo_put(kni->free_q, (void **)va, num);
+ if (ret != num)
+ /* Failing should not happen */
+ KNI_ERR("Fail to enqueue entries into free_q\n");
+}
+
+/*
+ * RX: loopback with enqueue/dequeue fifos.
+ */
+static void
+kni_net_rx_lo_fifo(struct kni_dev *kni)
+{
+ unsigned ret;
+ uint32_t len;
+ unsigned i, num, num_rq, num_tq, num_aq, num_fq;
+ struct rte_kni_mbuf *kva;
+ struct rte_kni_mbuf *va[MBUF_BURST_SZ];
+ void * data_kva;
+
+ struct rte_kni_mbuf *alloc_kva;
+ struct rte_kni_mbuf *alloc_va[MBUF_BURST_SZ];
+ void *alloc_data_kva;
+
+ /* Get the number of entries in rx_q */
+ num_rq = kni_fifo_count(kni->rx_q);
+
+ /* Get the number of free entrie in tx_q */
+ num_tq = kni_fifo_free_count(kni->tx_q);
+
+ /* Get the number of entries in alloc_q */
+ num_aq = kni_fifo_count(kni->alloc_q);
+
+ /* Get the number of free entries in free_q */
+ num_fq = kni_fifo_free_count(kni->free_q);
+
+ /* Calculate the number of entries to be dequeued from rx_q */
+ num = min(num_rq, num_tq);
+ num = min(num, num_aq);
+ num = min(num, num_fq);
+ num = min(num, (unsigned)MBUF_BURST_SZ);
+
+ /* Return if no entry to dequeue from rx_q */
+ if (num == 0)
+ return;
+
+ /* Burst dequeue from rx_q */
+ ret = kni_fifo_get(kni->rx_q, (void **)va, num);
+ if (ret == 0)
+ return; /* Failing should not happen */
+
+ /* Dequeue entries from alloc_q */
+ ret = kni_fifo_get(kni->alloc_q, (void **)alloc_va, num);
+ if (ret) {
+ num = ret;
+ /* Copy mbufs */
+ for (i = 0; i < num; i++) {
+ kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
+ len = kva->pkt_len;
+ data_kva = kva->buf_addr + kva->data_off -
+ kni->mbuf_va + kni->mbuf_kva;
+
+ alloc_kva = (void *)alloc_va[i] - kni->mbuf_va +
+ kni->mbuf_kva;
+ alloc_data_kva = alloc_kva->buf_addr +
+ alloc_kva->data_off - kni->mbuf_va +
+ kni->mbuf_kva;
+ memcpy(alloc_data_kva, data_kva, len);
+ alloc_kva->pkt_len = len;
+ alloc_kva->data_len = len;
+
+ kni->stats.tx_bytes += len;
+ kni->stats.rx_bytes += len;
+ }
+
+ /* Burst enqueue mbufs into tx_q */
+ ret = kni_fifo_put(kni->tx_q, (void **)alloc_va, num);
+ if (ret != num)
+ /* Failing should not happen */
+ KNI_ERR("Fail to enqueue mbufs into tx_q\n");
+ }
+
+ /* Burst enqueue mbufs into free_q */
+ ret = kni_fifo_put(kni->free_q, (void **)va, num);
+ if (ret != num)
+ /* Failing should not happen */
+ KNI_ERR("Fail to enqueue mbufs into free_q\n");
+
+ /**
+ * Update statistic, and enqueue/dequeue failure is impossible,
+ * as all queues are checked at first.
+ */
+ kni->stats.tx_packets += num;
+ kni->stats.rx_packets += num;
+}
+
+/*
+ * RX: loopback with enqueue/dequeue fifos and sk buffer copies.
+ */
+static void
+kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
+{
+ unsigned ret;
+ uint32_t len;
+ unsigned i, num_rq, num_fq, num;
+ struct rte_kni_mbuf *kva;
+ struct rte_kni_mbuf *va[MBUF_BURST_SZ];
+ void * data_kva;
+
+ struct sk_buff *skb;
+ struct net_device *dev = kni->net_dev;
+
+ /* Get the number of entries in rx_q */
+ num_rq = kni_fifo_count(kni->rx_q);
+
+ /* Get the number of free entries in free_q */
+ num_fq = kni_fifo_free_count(kni->free_q);
+
+ /* Calculate the number of entries to dequeue from rx_q */
+ num = min(num_rq, num_fq);
+ num = min(num, (unsigned)MBUF_BURST_SZ);
+
+ /* Return if no entry to dequeue from rx_q */
+ if (num == 0)
+ return;
+
+ /* Burst dequeue mbufs from rx_q */
+ ret = kni_fifo_get(kni->rx_q, (void **)va, num);
+ if (ret == 0)
+ return;
+
+ /* Copy mbufs to sk buffer and then call tx interface */
+ for (i = 0; i < num; i++) {
+ kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
+ len = kva->data_len;
+ data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va +
+ kni->mbuf_kva;
+
+ skb = dev_alloc_skb(len + 2);
+ if (skb == NULL)
+ KNI_ERR("Out of mem, dropping pkts\n");
+ else {
+ /* Align IP on 16B boundary */
+ skb_reserve(skb, 2);
+ memcpy(skb_put(skb, len), data_kva, len);
+ skb->dev = dev;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ dev_kfree_skb(skb);
+ }
+
+ /* Simulate real usage, allocate/copy skb twice */
+ skb = dev_alloc_skb(len + 2);
+ if (skb == NULL) {
+ KNI_ERR("Out of mem, dropping pkts\n");
+ kni->stats.rx_dropped++;
+ }
+ else {
+ /* Align IP on 16B boundary */
+ skb_reserve(skb, 2);
+ memcpy(skb_put(skb, len), data_kva, len);
+ skb->dev = dev;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ kni->stats.rx_bytes += len;
+ kni->stats.rx_packets++;
+
+ /* call tx interface */
+ kni_net_tx(skb, dev);
+ }
+ }
+
+ /* enqueue all the mbufs from rx_q into free_q */
+ ret = kni_fifo_put(kni->free_q, (void **)&va, num);
+ if (ret != num)
+ /* Failing should not happen */
+ KNI_ERR("Fail to enqueue mbufs into free_q\n");
+}
+
+/* rx interface */
+void
+kni_net_rx(struct kni_dev *kni)
+{
+ /**
+ * It doesn't need to check if it is NULL pointer,
+ * as it has a default value
+ */
+ (*kni_net_rx_func)(kni);
+}
+
+/*
+ * Transmit a packet (called by the kernel)
+ */
+#ifdef RTE_KNI_VHOST
+static int
+kni_net_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct kni_dev *kni = netdev_priv(dev);
+
+ dev_kfree_skb(skb);
+ kni->stats.tx_dropped++;
+
+ return NETDEV_TX_OK;
+}
+#else
+static int
+kni_net_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ int len = 0;
+ unsigned ret;
+ struct kni_dev *kni = netdev_priv(dev);
+ struct rte_kni_mbuf *pkt_kva = NULL;
+ struct rte_kni_mbuf *pkt_va = NULL;
+
+ dev->trans_start = jiffies; /* save the timestamp */
+
+ /* Check if the length of skb is less than mbuf size */
+ if (skb->len > kni->mbuf_size)
+ goto drop;
+
+ /**
+ * Check if it has at least one free entry in tx_q and
+ * one entry in alloc_q.
+ */
+ if (kni_fifo_free_count(kni->tx_q) == 0 ||
+ kni_fifo_count(kni->alloc_q) == 0) {
+ /**
+ * If no free entry in tx_q or no entry in alloc_q,
+ * drops skb and goes out.
+ */
+ goto drop;
+ }
+
+ /* dequeue a mbuf from alloc_q */
+ ret = kni_fifo_get(kni->alloc_q, (void **)&pkt_va, 1);
+ if (likely(ret == 1)) {
+ void *data_kva;
+
+ pkt_kva = (void *)pkt_va - kni->mbuf_va + kni->mbuf_kva;
+ data_kva = pkt_kva->buf_addr + pkt_kva->data_off - kni->mbuf_va
+ + kni->mbuf_kva;
+
+ len = skb->len;
+ memcpy(data_kva, skb->data, len);
+ if (unlikely(len < ETH_ZLEN)) {
+ memset(data_kva + len, 0, ETH_ZLEN - len);
+ len = ETH_ZLEN;
+ }
+ pkt_kva->pkt_len = len;
+ pkt_kva->data_len = len;
+
+ /* enqueue mbuf into tx_q */
+ ret = kni_fifo_put(kni->tx_q, (void **)&pkt_va, 1);
+ if (unlikely(ret != 1)) {
+ /* Failing should not happen */
+ KNI_ERR("Fail to enqueue mbuf into tx_q\n");
+ goto drop;
+ }
+ } else {
+ /* Failing should not happen */
+ KNI_ERR("Fail to dequeue mbuf from alloc_q\n");
+ goto drop;
+ }
+
+ /* Free skb and update statistics */
+ dev_kfree_skb(skb);
+ kni->stats.tx_bytes += len;
+ kni->stats.tx_packets++;
+
+ return NETDEV_TX_OK;
+
+drop:
+ /* Free skb and update statistics */
+ dev_kfree_skb(skb);
+ kni->stats.tx_dropped++;
+
+ return NETDEV_TX_OK;
+}
+#endif
+
+/*
+ * Deal with a transmit timeout.
+ */
+static void
+kni_net_tx_timeout (struct net_device *dev)
+{
+ struct kni_dev *kni = netdev_priv(dev);
+
+ KNI_DBG("Transmit timeout at %ld, latency %ld\n", jiffies,
+ jiffies - dev->trans_start);
+
+ kni->stats.tx_errors++;
+ netif_wake_queue(dev);
+ return;
+}
+
+/*
+ * Ioctl commands
+ */
+static int
+kni_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ KNI_DBG("kni_net_ioctl %d\n",
+ ((struct kni_dev *)netdev_priv(dev))->group_id);
+
+ return 0;
+}
+
+static int
+kni_net_change_mtu(struct net_device *dev, int new_mtu)
+{
+ int ret;
+ struct rte_kni_request req;
+ struct kni_dev *kni = netdev_priv(dev);
+
+ KNI_DBG("kni_net_change_mtu new mtu %d to be set\n", new_mtu);
+
+ memset(&req, 0, sizeof(req));
+ req.req_id = RTE_KNI_REQ_CHANGE_MTU;
+ req.new_mtu = new_mtu;
+ ret = kni_net_process_request(kni, &req);
+ if (ret == 0 && req.result == 0)
+ dev->mtu = new_mtu;
+
+ return (ret == 0 ? req.result : ret);
+}
+
+/*
+ * Checks if the user space application provided the resp message
+ */
+void
+kni_net_poll_resp(struct kni_dev *kni)
+{
+ if (kni_fifo_count(kni->resp_q))
+ wake_up_interruptible(&kni->wq);
+}
+
+/*
+ * It can be called to process the request.
+ */
+static int
+kni_net_process_request(struct kni_dev *kni, struct rte_kni_request *req)
+{
+ int ret = -1;
+ void *resp_va;
+ unsigned num;
+ int ret_val;
+
+ if (!kni || !req) {
+ KNI_ERR("No kni instance or request\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&kni->sync_lock);
+
+ /* Construct data */
+ memcpy(kni->sync_kva, req, sizeof(struct rte_kni_request));
+ num = kni_fifo_put(kni->req_q, &kni->sync_va, 1);
+ if (num < 1) {
+ KNI_ERR("Cannot send to req_q\n");
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ ret_val = wait_event_interruptible_timeout(kni->wq,
+ kni_fifo_count(kni->resp_q), 3 * HZ);
+ if (signal_pending(current) || ret_val <= 0) {
+ ret = -ETIME;
+ goto fail;
+ }
+ num = kni_fifo_get(kni->resp_q, (void **)&resp_va, 1);
+ if (num != 1 || resp_va != kni->sync_va) {
+ /* This should never happen */
+ KNI_ERR("No data in resp_q\n");
+ ret = -ENODATA;
+ goto fail;
+ }
+
+ memcpy(req, kni->sync_kva, sizeof(struct rte_kni_request));
+ ret = 0;
+
+fail:
+ mutex_unlock(&kni->sync_lock);
+ return ret;
+}
+
+/*
+ * Return statistics to the caller
+ */
+static struct net_device_stats *
+kni_net_stats(struct net_device *dev)
+{
+ struct kni_dev *kni = netdev_priv(dev);
+ return &kni->stats;
+}
+
+/*
+ * Fill the eth header
+ */
+static int
+kni_net_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, const void *daddr,
+ const void *saddr, unsigned int len)
+{
+ struct ethhdr *eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
+
+ memcpy(eth->h_source, saddr ? saddr : dev->dev_addr, dev->addr_len);
+ memcpy(eth->h_dest, daddr ? daddr : dev->dev_addr, dev->addr_len);
+ eth->h_proto = htons(type);
+
+ return (dev->hard_header_len);
+}
+
+
+/*
+ * Re-fill the eth header
+ */
+static int
+kni_net_rebuild_header(struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ struct ethhdr *eth = (struct ethhdr *) skb->data;
+
+ memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
+ memcpy(eth->h_dest, dev->dev_addr, dev->addr_len);
+
+ return 0;
+}
+
+/**
+ * kni_net_set_mac - Change the Ethernet Address of the KNI NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int kni_net_set_mac(struct net_device *netdev, void *p)
+{
+ struct sockaddr *addr = p;
+ if (!is_valid_ether_addr((unsigned char *)(addr->sa_data)))
+ return -EADDRNOTAVAIL;
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ return 0;
+}
+
+static const struct header_ops kni_net_header_ops = {
+ .create = kni_net_header,
+ .rebuild = kni_net_rebuild_header,
+ .cache = NULL, /* disable caching */
+};
+
+static const struct net_device_ops kni_net_netdev_ops = {
+ .ndo_open = kni_net_open,
+ .ndo_stop = kni_net_release,
+ .ndo_set_config = kni_net_config,
+ .ndo_start_xmit = kni_net_tx,
+ .ndo_change_mtu = kni_net_change_mtu,
+ .ndo_do_ioctl = kni_net_ioctl,
+ .ndo_get_stats = kni_net_stats,
+ .ndo_tx_timeout = kni_net_tx_timeout,
+ .ndo_set_mac_address = kni_net_set_mac,
+};
+
+void
+kni_net_init(struct net_device *dev)
+{
+ struct kni_dev *kni = netdev_priv(dev);
+
+ KNI_DBG("kni_net_init\n");
+
+ init_waitqueue_head(&kni->wq);
+ mutex_init(&kni->sync_lock);
+
+ ether_setup(dev); /* assign some of the fields */
+ dev->netdev_ops = &kni_net_netdev_ops;
+ dev->header_ops = &kni_net_header_ops;
+ dev->watchdog_timeo = WD_TIMEOUT;
+}
+
+void
+kni_net_config_lo_mode(char *lo_str)
+{
+ if (!lo_str) {
+ KNI_PRINT("loopback disabled");
+ return;
+ }
+
+ if (!strcmp(lo_str, "lo_mode_none"))
+ KNI_PRINT("loopback disabled");
+ else if (!strcmp(lo_str, "lo_mode_fifo")) {
+ KNI_PRINT("loopback mode=lo_mode_fifo enabled");
+ kni_net_rx_func = kni_net_rx_lo_fifo;
+ } else if (!strcmp(lo_str, "lo_mode_fifo_skb")) {
+ KNI_PRINT("loopback mode=lo_mode_fifo_skb enabled");
+ kni_net_rx_func = kni_net_rx_lo_fifo_skb;
+ } else
+ KNI_PRINT("Incognizant parameter, loopback disabled");
+}
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_vhost.c b/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_vhost.c
new file mode 100755
index 00000000..7141f833
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/kni/kni_vhost.c
@@ -0,0 +1,811 @@
+/*-
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/net.h>
+#include <net/sock.h>
+#include <linux/virtio_net.h>
+#include <linux/wait.h>
+#include <linux/mm.h>
+#include <linux/nsproxy.h>
+#include <linux/sched.h>
+#include <linux/if_tun.h>
+#include <linux/version.h>
+
+#include "compat.h"
+#include "kni_dev.h"
+#include "kni_fifo.h"
+
+#define RX_BURST_SZ 4
+
+extern void put_unused_fd(unsigned int fd);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+extern struct file*
+sock_alloc_file(struct socket *sock,
+ int flags, const char *dname);
+
+extern int get_unused_fd_flags(unsigned flags);
+
+extern void fd_install(unsigned int fd, struct file *file);
+
+static int kni_sock_map_fd(struct socket *sock)
+{
+ struct file *file;
+ int fd = get_unused_fd_flags(0);
+ if (fd < 0)
+ return fd;
+
+ file = sock_alloc_file(sock, 0, NULL);
+ if (IS_ERR(file)) {
+ put_unused_fd(fd);
+ return PTR_ERR(file);
+ }
+ fd_install(fd, file);
+ return fd;
+}
+#else
+#define kni_sock_map_fd(s) sock_map_fd(s, 0)
+#endif
+
+static struct proto kni_raw_proto = {
+ .name = "kni_vhost",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct kni_vhost_queue),
+};
+
+static inline int
+kni_vhost_net_tx(struct kni_dev *kni, struct iovec *iov,
+ unsigned offset, unsigned len)
+{
+ struct rte_kni_mbuf *pkt_kva = NULL;
+ struct rte_kni_mbuf *pkt_va = NULL;
+ int ret;
+
+ KNI_DBG_TX("tx offset=%d, len=%d, iovlen=%d\n",
+ offset, len, (int)iov->iov_len);
+
+ /**
+ * Check if it has at least one free entry in tx_q and
+ * one entry in alloc_q.
+ */
+ if (kni_fifo_free_count(kni->tx_q) == 0 ||
+ kni_fifo_count(kni->alloc_q) == 0) {
+ /**
+ * If no free entry in tx_q or no entry in alloc_q,
+ * drops skb and goes out.
+ */
+ goto drop;
+ }
+
+ /* dequeue a mbuf from alloc_q */
+ ret = kni_fifo_get(kni->alloc_q, (void **)&pkt_va, 1);
+ if (likely(ret == 1)) {
+ void *data_kva;
+
+ pkt_kva = (void *)pkt_va - kni->mbuf_va + kni->mbuf_kva;
+ data_kva = pkt_kva->buf_addr + pkt_kva->data_off
+ - kni->mbuf_va + kni->mbuf_kva;
+
+ memcpy_fromiovecend(data_kva, iov, offset, len);
+ if (unlikely(len < ETH_ZLEN)) {
+ memset(data_kva + len, 0, ETH_ZLEN - len);
+ len = ETH_ZLEN;
+ }
+ pkt_kva->pkt_len = len;
+ pkt_kva->data_len = len;
+
+ /* enqueue mbuf into tx_q */
+ ret = kni_fifo_put(kni->tx_q, (void **)&pkt_va, 1);
+ if (unlikely(ret != 1)) {
+ /* Failing should not happen */
+ KNI_ERR("Fail to enqueue mbuf into tx_q\n");
+ goto drop;
+ }
+ } else {
+ /* Failing should not happen */
+ KNI_ERR("Fail to dequeue mbuf from alloc_q\n");
+ goto drop;
+ }
+
+ /* update statistics */
+ kni->stats.tx_bytes += len;
+ kni->stats.tx_packets++;
+
+ return 0;
+
+drop:
+ /* update statistics */
+ kni->stats.tx_dropped++;
+
+ return 0;
+}
+
+static inline int
+kni_vhost_net_rx(struct kni_dev *kni, struct iovec *iov,
+ unsigned offset, unsigned len)
+{
+ uint32_t pkt_len;
+ struct rte_kni_mbuf *kva;
+ struct rte_kni_mbuf *va;
+ void * data_kva;
+ struct sk_buff *skb;
+ struct kni_vhost_queue *q = kni->vhost_queue;
+
+ if (unlikely(q == NULL))
+ return 0;
+
+ /* ensure at least one entry in free_q */
+ if (unlikely(kni_fifo_free_count(kni->free_q) == 0))
+ return 0;
+
+ skb = skb_dequeue(&q->sk.sk_receive_queue);
+ if (unlikely(skb == NULL))
+ return 0;
+
+ kva = (struct rte_kni_mbuf*)skb->data;
+
+ /* free skb to cache */
+ skb->data = NULL;
+ if (unlikely(1 != kni_fifo_put(q->fifo, (void **)&skb, 1)))
+ /* Failing should not happen */
+ KNI_ERR("Fail to enqueue entries into rx cache fifo\n");
+
+ pkt_len = kva->data_len;
+ if (unlikely(pkt_len > len))
+ goto drop;
+
+ KNI_DBG_RX("rx offset=%d, len=%d, pkt_len=%d, iovlen=%d\n",
+ offset, len, pkt_len, (int)iov->iov_len);
+
+ data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va + kni->mbuf_kva;
+ if (unlikely(memcpy_toiovecend(iov, data_kva, offset, pkt_len)))
+ goto drop;
+
+ /* Update statistics */
+ kni->stats.rx_bytes += pkt_len;
+ kni->stats.rx_packets++;
+
+ /* enqueue mbufs into free_q */
+ va = (void*)kva - kni->mbuf_kva + kni->mbuf_va;
+ if (unlikely(1 != kni_fifo_put(kni->free_q, (void **)&va, 1)))
+ /* Failing should not happen */
+ KNI_ERR("Fail to enqueue entries into free_q\n");
+
+ KNI_DBG_RX("receive done %d\n", pkt_len);
+
+ return pkt_len;
+
+drop:
+ /* Update drop statistics */
+ kni->stats.rx_dropped++;
+
+ return 0;
+}
+
+static unsigned int
+kni_sock_poll(struct file *file, struct socket *sock, poll_table * wait)
+{
+ struct kni_vhost_queue *q =
+ container_of(sock->sk, struct kni_vhost_queue, sk);
+ struct kni_dev *kni;
+ unsigned int mask = 0;
+
+ if (unlikely(q == NULL || q->kni == NULL))
+ return POLLERR;
+
+ kni = q->kni;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
+ KNI_DBG("start kni_poll on group %d, wq 0x%16llx\n",
+ kni->group_id, (uint64_t)sock->wq);
+#else
+ KNI_DBG("start kni_poll on group %d, wait at 0x%16llx\n",
+ kni->group_id, (uint64_t)&sock->wait);
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
+ poll_wait(file, &sock->wq->wait, wait);
+#else
+ poll_wait(file, &sock->wait, wait);
+#endif
+
+ if (kni_fifo_count(kni->rx_q) > 0)
+ mask |= POLLIN | POLLRDNORM;
+
+ if (sock_writeable(&q->sk) ||
+ (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock->flags) &&
+ sock_writeable(&q->sk)))
+ mask |= POLLOUT | POLLWRNORM;
+
+ return mask;
+}
+
+static inline void
+kni_vhost_enqueue(struct kni_dev *kni, struct kni_vhost_queue *q,
+ struct sk_buff *skb, struct rte_kni_mbuf *va)
+{
+ struct rte_kni_mbuf *kva;
+
+ kva = (void *)(va) - kni->mbuf_va + kni->mbuf_kva;
+ (skb)->data = (unsigned char*)kva;
+ (skb)->len = kva->data_len;
+ skb_queue_tail(&q->sk.sk_receive_queue, skb);
+}
+
+static inline void
+kni_vhost_enqueue_burst(struct kni_dev *kni, struct kni_vhost_queue *q,
+ struct sk_buff **skb, struct rte_kni_mbuf **va)
+{
+ int i;
+ for (i = 0; i < RX_BURST_SZ; skb++, va++, i++)
+ kni_vhost_enqueue(kni, q, *skb, *va);
+}
+
+int
+kni_chk_vhost_rx(struct kni_dev *kni)
+{
+ struct kni_vhost_queue *q = kni->vhost_queue;
+ unsigned nb_in, nb_mbuf, nb_skb;
+ const unsigned BURST_MASK = RX_BURST_SZ - 1;
+ unsigned nb_burst, nb_backlog, i;
+ struct sk_buff *skb[RX_BURST_SZ];
+ struct rte_kni_mbuf *va[RX_BURST_SZ];
+
+ if (unlikely(BE_STOP & kni->vq_status)) {
+ kni->vq_status |= BE_FINISH;
+ return 0;
+ }
+
+ if (unlikely(q == NULL))
+ return 0;
+
+ nb_skb = kni_fifo_count(q->fifo);
+ nb_mbuf = kni_fifo_count(kni->rx_q);
+
+ nb_in = min(nb_mbuf, nb_skb);
+ nb_in = min(nb_in, (unsigned)RX_BURST_SZ);
+ nb_burst = (nb_in & ~BURST_MASK);
+ nb_backlog = (nb_in & BURST_MASK);
+
+ /* enqueue skb_queue per BURST_SIZE bulk */
+ if (0 != nb_burst) {
+ if (unlikely(RX_BURST_SZ != kni_fifo_get(
+ kni->rx_q, (void **)&va,
+ RX_BURST_SZ)))
+ goto except;
+
+ if (unlikely(RX_BURST_SZ != kni_fifo_get(
+ q->fifo, (void **)&skb,
+ RX_BURST_SZ)))
+ goto except;
+
+ kni_vhost_enqueue_burst(kni, q, skb, va);
+ }
+
+ /* all leftover, do one by one */
+ for (i = 0; i < nb_backlog; ++i) {
+ if (unlikely(1 != kni_fifo_get(
+ kni->rx_q,(void **)&va, 1)))
+ goto except;
+
+ if (unlikely(1 != kni_fifo_get(
+ q->fifo, (void **)&skb, 1)))
+ goto except;
+
+ kni_vhost_enqueue(kni, q, *skb, *va);
+ }
+
+ /* Ondemand wake up */
+ if ((nb_in == RX_BURST_SZ) || (nb_skb == 0) ||
+ ((nb_mbuf < RX_BURST_SZ) && (nb_mbuf != 0))) {
+ wake_up_interruptible_poll(sk_sleep(&q->sk),
+ POLLIN | POLLRDNORM | POLLRDBAND);
+ KNI_DBG_RX("RX CHK KICK nb_mbuf %d, nb_skb %d, nb_in %d\n",
+ nb_mbuf, nb_skb, nb_in);
+ }
+
+ return 0;
+
+except:
+ /* Failing should not happen */
+ KNI_ERR("Fail to enqueue fifo, it shouldn't happen \n");
+ BUG_ON(1);
+
+ return 0;
+}
+
+static int
+kni_sock_sndmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len)
+{
+ struct kni_vhost_queue *q =
+ container_of(sock->sk, struct kni_vhost_queue, sk);
+ int vnet_hdr_len = 0;
+ unsigned long len = total_len;
+
+ if (unlikely(q == NULL || q->kni == NULL))
+ return 0;
+
+ KNI_DBG_TX("kni_sndmsg len %ld, flags 0x%08x, nb_iov %d\n",
+ len, q->flags, (int)m->msg_iovlen);
+
+#ifdef RTE_KNI_VHOST_VNET_HDR_EN
+ if (likely(q->flags & IFF_VNET_HDR)) {
+ vnet_hdr_len = q->vnet_hdr_sz;
+ if (unlikely(len < vnet_hdr_len))
+ return -EINVAL;
+ len -= vnet_hdr_len;
+ }
+#endif
+
+ if (unlikely(len < ETH_HLEN + q->vnet_hdr_sz))
+ return -EINVAL;
+
+ return kni_vhost_net_tx(q->kni, m->msg_iov, vnet_hdr_len, len);
+}
+
+static int
+kni_sock_rcvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t len, int flags)
+{
+ int vnet_hdr_len = 0;
+ int pkt_len = 0;
+ struct kni_vhost_queue *q =
+ container_of(sock->sk, struct kni_vhost_queue, sk);
+ static struct virtio_net_hdr
+ __attribute__ ((unused)) vnet_hdr = {
+ .flags = 0,
+ .gso_type = VIRTIO_NET_HDR_GSO_NONE
+ };
+
+ if (unlikely(q == NULL || q->kni == NULL))
+ return 0;
+
+#ifdef RTE_KNI_VHOST_VNET_HDR_EN
+ if (likely(q->flags & IFF_VNET_HDR)) {
+ vnet_hdr_len = q->vnet_hdr_sz;
+ if ((len -= vnet_hdr_len) < 0)
+ return -EINVAL;
+ }
+#endif
+
+ if (unlikely(0 == (pkt_len = kni_vhost_net_rx(q->kni,
+ m->msg_iov, vnet_hdr_len, len))))
+ return 0;
+
+#ifdef RTE_KNI_VHOST_VNET_HDR_EN
+ /* no need to copy hdr when no pkt received */
+ if (unlikely(memcpy_toiovecend(m->msg_iov,
+ (void *)&vnet_hdr, 0, vnet_hdr_len)))
+ return -EFAULT;
+#endif
+ KNI_DBG_RX("kni_rcvmsg expect_len %ld, flags 0x%08x, pkt_len %d\n",
+ (unsigned long)len, q->flags, pkt_len);
+
+ return (pkt_len + vnet_hdr_len);
+}
+
+/* dummy tap like ioctl */
+static int
+kni_sock_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ struct ifreq __user *ifr = argp;
+ unsigned int __user *up = argp;
+ struct kni_vhost_queue *q =
+ container_of(sock->sk, struct kni_vhost_queue, sk);
+ struct kni_dev *kni;
+ unsigned int u;
+ int __user *sp = argp;
+ int s;
+ int ret;
+
+ KNI_DBG("tap ioctl cmd 0x%08x\n", cmd);
+
+ switch (cmd) {
+ case TUNSETIFF:
+ KNI_DBG("TUNSETIFF\n");
+ /* ignore the name, just look at flags */
+ if (get_user(u, &ifr->ifr_flags))
+ return -EFAULT;
+
+ ret = 0;
+ if ((u & ~IFF_VNET_HDR) != (IFF_NO_PI | IFF_TAP))
+ ret = -EINVAL;
+ else
+ q->flags = u;
+
+ return ret;
+
+ case TUNGETIFF:
+ KNI_DBG("TUNGETIFF\n");
+ rcu_read_lock_bh();
+ kni = rcu_dereference_bh(q->kni);
+ if (kni)
+ dev_hold(kni->net_dev);
+ rcu_read_unlock_bh();
+
+ if (!kni)
+ return -ENOLINK;
+
+ ret = 0;
+ if (copy_to_user(&ifr->ifr_name, kni->net_dev->name, IFNAMSIZ) ||
+ put_user(q->flags, &ifr->ifr_flags))
+ ret = -EFAULT;
+ dev_put(kni->net_dev);
+ return ret;
+
+ case TUNGETFEATURES:
+ KNI_DBG("TUNGETFEATURES\n");
+ u = IFF_TAP | IFF_NO_PI;
+#ifdef RTE_KNI_VHOST_VNET_HDR_EN
+ u |= IFF_VNET_HDR;
+#endif
+ if (put_user(u, up))
+ return -EFAULT;
+ return 0;
+
+ case TUNSETSNDBUF:
+ KNI_DBG("TUNSETSNDBUF\n");
+ if (get_user(u, up))
+ return -EFAULT;
+
+ q->sk.sk_sndbuf = u;
+ return 0;
+
+ case TUNGETVNETHDRSZ:
+ s = q->vnet_hdr_sz;
+ if (put_user(s, sp))
+ return -EFAULT;
+ KNI_DBG("TUNGETVNETHDRSZ %d\n", s);
+ return 0;
+
+ case TUNSETVNETHDRSZ:
+ if (get_user(s, sp))
+ return -EFAULT;
+ if (s < (int)sizeof(struct virtio_net_hdr))
+ return -EINVAL;
+
+ KNI_DBG("TUNSETVNETHDRSZ %d\n", s);
+ q->vnet_hdr_sz = s;
+ return 0;
+
+ case TUNSETOFFLOAD:
+ KNI_DBG("TUNSETOFFLOAD %lx\n", arg);
+#ifdef RTE_KNI_VHOST_VNET_HDR_EN
+ /* not support any offload yet */
+ if (!(q->flags & IFF_VNET_HDR))
+ return -EINVAL;
+
+ return 0;
+#else
+ return -EINVAL;
+#endif
+
+ default:
+ KNI_DBG("NOT SUPPORT\n");
+ return -EINVAL;
+ }
+}
+
+static int
+kni_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg)
+{
+ /* 32 bits app on 64 bits OS to be supported later */
+ KNI_PRINT("Not implemented.\n");
+
+ return -EINVAL;
+}
+
+#define KNI_VHOST_WAIT_WQ_SAFE() \
+do { \
+ while ((BE_FINISH | BE_STOP) == kni->vq_status) \
+ msleep(1); \
+}while(0) \
+
+
+static int
+kni_sock_release(struct socket *sock)
+{
+ struct kni_vhost_queue *q =
+ container_of(sock->sk, struct kni_vhost_queue, sk);
+ struct kni_dev *kni;
+
+ if (q == NULL)
+ return 0;
+
+ if (NULL != (kni = q->kni)) {
+ kni->vq_status = BE_STOP;
+ KNI_VHOST_WAIT_WQ_SAFE();
+ kni->vhost_queue = NULL;
+ q->kni = NULL;
+ }
+
+ if (q->sockfd != -1)
+ q->sockfd = -1;
+
+ sk_set_socket(&q->sk, NULL);
+ sock->sk = NULL;
+
+ sock_put(&q->sk);
+
+ KNI_DBG("dummy sock release done\n");
+
+ return 0;
+}
+
+int
+kni_sock_getname (struct socket *sock,
+ struct sockaddr *addr,
+ int *sockaddr_len, int peer)
+{
+ KNI_DBG("dummy sock getname\n");
+ ((struct sockaddr_ll*)addr)->sll_family = AF_PACKET;
+ return 0;
+}
+
+static const struct proto_ops kni_socket_ops = {
+ .getname = kni_sock_getname,
+ .sendmsg = kni_sock_sndmsg,
+ .recvmsg = kni_sock_rcvmsg,
+ .release = kni_sock_release,
+ .poll = kni_sock_poll,
+ .ioctl = kni_sock_ioctl,
+ .compat_ioctl = kni_sock_compat_ioctl,
+};
+
+static void
+kni_sk_write_space(struct sock *sk)
+{
+ wait_queue_head_t *wqueue;
+
+ if (!sock_writeable(sk) ||
+ !test_and_clear_bit(SOCK_ASYNC_NOSPACE,
+ &sk->sk_socket->flags))
+ return;
+ wqueue = sk_sleep(sk);
+ if (wqueue && waitqueue_active(wqueue))
+ wake_up_interruptible_poll(
+ wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
+}
+
+static void
+kni_sk_destruct(struct sock *sk)
+{
+ struct kni_vhost_queue *q =
+ container_of(sk, struct kni_vhost_queue, sk);
+
+ if (!q)
+ return;
+
+ /* make sure there's no packet in buffer */
+ while (skb_dequeue(&sk->sk_receive_queue) != NULL)
+ ;
+
+ mb();
+
+ if (q->fifo != NULL) {
+ kfree(q->fifo);
+ q->fifo = NULL;
+ }
+
+ if (q->cache != NULL) {
+ kfree(q->cache);
+ q->cache = NULL;
+ }
+}
+
+static int
+kni_vhost_backend_init(struct kni_dev *kni)
+{
+ struct kni_vhost_queue *q;
+ struct net *net = current->nsproxy->net_ns;
+ int err, i, sockfd;
+ struct rte_kni_fifo *fifo;
+ struct sk_buff *elem;
+
+ if (kni->vhost_queue != NULL)
+ return -1;
+
+ if (!(q = (struct kni_vhost_queue *)sk_alloc(
+ net, AF_UNSPEC, GFP_KERNEL, &kni_raw_proto)))
+ return -ENOMEM;
+
+ err = sock_create_lite(AF_UNSPEC, SOCK_RAW, IPPROTO_RAW, &q->sock);
+ if (err)
+ goto free_sk;
+
+ sockfd = kni_sock_map_fd(q->sock);
+ if (sockfd < 0) {
+ err = sockfd;
+ goto free_sock;
+ }
+
+ /* cache init */
+ q->cache = (struct sk_buff*)
+ kzalloc(RTE_KNI_VHOST_MAX_CACHE_SIZE * sizeof(struct sk_buff),
+ GFP_KERNEL);
+ if (!q->cache)
+ goto free_fd;
+
+ fifo = (struct rte_kni_fifo*)
+ kzalloc(RTE_KNI_VHOST_MAX_CACHE_SIZE * sizeof(void *)
+ + sizeof(struct rte_kni_fifo), GFP_KERNEL);
+ if (!fifo)
+ goto free_cache;
+
+ kni_fifo_init(fifo, RTE_KNI_VHOST_MAX_CACHE_SIZE);
+
+ for (i = 0; i < RTE_KNI_VHOST_MAX_CACHE_SIZE; i++) {
+ elem = &q->cache[i];
+ kni_fifo_put(fifo, (void**)&elem, 1);
+ }
+ q->fifo = fifo;
+
+ /* store sockfd in vhost_queue */
+ q->sockfd = sockfd;
+
+ /* init socket */
+ q->sock->type = SOCK_RAW;
+ q->sock->state = SS_CONNECTED;
+ q->sock->ops = &kni_socket_ops;
+ sock_init_data(q->sock, &q->sk);
+
+ /* init sock data */
+ q->sk.sk_write_space = kni_sk_write_space;
+ q->sk.sk_destruct = kni_sk_destruct;
+ q->flags = IFF_NO_PI | IFF_TAP;
+ q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
+#ifdef RTE_KNI_VHOST_VNET_HDR_EN
+ q->flags |= IFF_VNET_HDR;
+#endif
+
+ /* bind kni_dev with vhost_queue */
+ q->kni = kni;
+ kni->vhost_queue = q;
+
+ wmb();
+
+ kni->vq_status = BE_START;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
+ KNI_DBG("backend init sockfd=%d, sock->wq=0x%16llx,"
+ "sk->sk_wq=0x%16llx",
+ q->sockfd, (uint64_t)q->sock->wq,
+ (uint64_t)q->sk.sk_wq);
+#else
+ KNI_DBG("backend init sockfd=%d, sock->wait at 0x%16llx,"
+ "sk->sk_sleep=0x%16llx",
+ q->sockfd, (uint64_t)&q->sock->wait,
+ (uint64_t)q->sk.sk_sleep);
+#endif
+
+ return 0;
+
+free_cache:
+ kfree(q->cache);
+ q->cache = NULL;
+
+free_fd:
+ put_unused_fd(sockfd);
+
+free_sock:
+ q->kni = NULL;
+ kni->vhost_queue = NULL;
+ kni->vq_status |= BE_FINISH;
+ sock_release(q->sock);
+ q->sock->ops = NULL;
+ q->sock = NULL;
+
+free_sk:
+ sk_free((struct sock*)q);
+
+ return err;
+}
+
+/* kni vhost sock sysfs */
+static ssize_t
+show_sock_fd(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct net_device *net_dev = container_of(dev, struct net_device, dev);
+ struct kni_dev *kni = netdev_priv(net_dev);
+ int sockfd = -1;
+ if (kni->vhost_queue != NULL)
+ sockfd = kni->vhost_queue->sockfd;
+ return snprintf(buf, 10, "%d\n", sockfd);
+}
+
+static ssize_t
+show_sock_en(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct net_device *net_dev = container_of(dev, struct net_device, dev);
+ struct kni_dev *kni = netdev_priv(net_dev);
+ return snprintf(buf, 10, "%u\n", (kni->vhost_queue == NULL ? 0 : 1));
+}
+
+static ssize_t
+set_sock_en(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct net_device *net_dev = container_of(dev, struct net_device, dev);
+ struct kni_dev *kni = netdev_priv(net_dev);
+ unsigned long en;
+ int err = 0;
+
+ if (0 != kstrtoul(buf, 0, &en))
+ return -EINVAL;
+
+ if (en)
+ err = kni_vhost_backend_init(kni);
+
+ return err ? err : count;
+}
+
+static DEVICE_ATTR(sock_fd, S_IRUGO | S_IRUSR, show_sock_fd, NULL);
+static DEVICE_ATTR(sock_en, S_IRUGO | S_IWUSR, show_sock_en, set_sock_en);
+static struct attribute *dev_attrs[] = {
+ &dev_attr_sock_fd.attr,
+ &dev_attr_sock_en.attr,
+ NULL,
+};
+
+static const struct attribute_group dev_attr_grp = {
+ .attrs = dev_attrs,
+};
+
+int
+kni_vhost_backend_release(struct kni_dev *kni)
+{
+ struct kni_vhost_queue *q = kni->vhost_queue;
+
+ if (q == NULL)
+ return 0;
+
+ /* dettach from kni */
+ q->kni = NULL;
+
+ KNI_DBG("release backend done\n");
+
+ return 0;
+}
+
+int
+kni_vhost_init(struct kni_dev *kni)
+{
+ struct net_device *dev = kni->net_dev;
+
+ if (sysfs_create_group(&dev->dev.kobj, &dev_attr_grp))
+ sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
+
+ kni->vq_status = BE_STOP;
+
+ KNI_DBG("kni_vhost_init done\n");
+
+ return 0;
+}
+
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/xen_dom0/Makefile b/src/dpdk_lib18/librte_eal/linuxapp/xen_dom0/Makefile
new file mode 100755
index 00000000..9d22fb97
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/xen_dom0/Makefile
@@ -0,0 +1,56 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# module name and path
+#
+MODULE = rte_dom0_mm
+
+#
+# CFLAGS
+#
+MODULE_CFLAGS += -I$(SRCDIR) --param max-inline-insns-single=50
+MODULE_CFLAGS += -I$(RTE_OUTPUT)/include
+MODULE_CFLAGS += -include $(RTE_OUTPUT)/include/rte_config.h
+MODULE_CFLAGS += -Wall -Werror
+
+# this lib needs main eal
+DEPDIRS-y += lib/librte_eal/linuxapp/eal
+
+#
+# all source are stored in SRCS-y
+#
+
+SRCS-y += dom0_mm_misc.c
+
+include $(RTE_SDK)/mk/rte.module.mk
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/xen_dom0/compat.h b/src/dpdk_lib18/librte_eal/linuxapp/xen_dom0/compat.h
new file mode 100755
index 00000000..e6eb97f2
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/xen_dom0/compat.h
@@ -0,0 +1,15 @@
+/*
+ * Minimal wrappers to allow compiling xen_dom0 on older kernels.
+ */
+
+#ifndef RHEL_RELEASE_VERSION
+#define RHEL_RELEASE_VERSION(a, b) (((a) << 8) + (b))
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) && \
+ (!(defined(RHEL_RELEASE_CODE) && \
+ RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4)))
+
+#define kstrtoul strict_strtoul
+
+#endif /* < 2.6.39 */
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/xen_dom0/dom0_mm_dev.h b/src/dpdk_lib18/librte_eal/linuxapp/xen_dom0/dom0_mm_dev.h
new file mode 100755
index 00000000..a9dd0d26
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/xen_dom0/dom0_mm_dev.h
@@ -0,0 +1,107 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ifndef _DOM0_MM_DEV_H_
+#define _DOM0_MM_DEV_H_
+
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <exec-env/rte_dom0_common.h>
+
+#define NUM_MEM_CTX 256 /**< Maximum number of memory context*/
+#define MAX_EXCHANGE_FAIL_TIME 5 /**< Maximum times of allowing exchange fail .*/
+#define MAX_MEMBLOCK_SIZE (2 * DOM0_MEMBLOCK_SIZE)
+#define MAX_NUM_ORDER (DOM0_CONTIG_NUM_ORDER + 1)
+#define SIZE_PER_BLOCK 2 /** < size of per memory block(2MB)).*/
+
+/**
+ * A structure describing the private information for a dom0 device.
+ */
+struct dom0_mm_dev {
+ struct miscdevice miscdev;
+ uint8_t fail_times;
+ uint32_t used_memsize;
+ uint32_t num_mem_ctx;
+ uint32_t config_memsize;
+ uint32_t num_bigblock;
+ struct dom0_mm_data *mm_data[NUM_MEM_CTX];
+ struct mutex data_lock;
+};
+
+struct dom0_mm_data{
+ uint32_t refcnt;
+ uint32_t num_memseg; /**< Number of memory segment. */
+ uint32_t mem_size; /**< Size of requesting memory. */
+
+ char name[DOM0_NAME_MAX];
+
+ /** Store global memory block IDs used by an instance */
+ uint32_t block_num[DOM0_NUM_MEMBLOCK];
+
+ /** Store memory block information.*/
+ struct memblock_info block_info[DOM0_NUM_MEMBLOCK];
+
+ /** Store memory segment information.*/
+ struct memseg_info seg_info[DOM0_NUM_MEMSEG];
+};
+
+#define XEN_ERR(args...) printk(KERN_DEBUG "XEN_DOM0: Error: " args)
+#define XEN_PRINT(args...) printk(KERN_DEBUG "XEN_DOM0: " args)
+#endif
diff --git a/src/dpdk_lib18/librte_eal/linuxapp/xen_dom0/dom0_mm_misc.c b/src/dpdk_lib18/librte_eal/linuxapp/xen_dom0/dom0_mm_misc.c
new file mode 100755
index 00000000..543bf574
--- /dev/null
+++ b/src/dpdk_lib18/librte_eal/linuxapp/xen_dom0/dom0_mm_misc.c
@@ -0,0 +1,781 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/version.h>
+
+#include <xen/xen.h>
+#include <xen/page.h>
+#include <xen/xen-ops.h>
+#include <xen/interface/memory.h>
+
+#include <rte_config.h>
+#include <exec-env/rte_dom0_common.h>
+
+#include "compat.h"
+#include "dom0_mm_dev.h"
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Kernel Module for supporting DPDK running on Xen Dom0");
+
+static struct dom0_mm_dev dom0_dev;
+static struct kobject *dom0_kobj = NULL;
+
+static struct memblock_info *rsv_mm_info;
+
+/* Default configuration for reserved memory size(2048 MB). */
+static uint32_t rsv_memsize = 2048;
+
+static int dom0_open(struct inode *inode, struct file *file);
+static int dom0_release(struct inode *inode, struct file *file);
+static int dom0_ioctl(struct file *file, unsigned int ioctl_num,
+ unsigned long ioctl_param);
+static int dom0_mmap(struct file *file, struct vm_area_struct *vma);
+static int dom0_memory_free(uint32_t size);
+static int dom0_memory_release(struct dom0_mm_data *mm_data);
+
+static const struct file_operations data_fops = {
+ .owner = THIS_MODULE,
+ .open = dom0_open,
+ .release = dom0_release,
+ .mmap = dom0_mmap,
+ .unlocked_ioctl = (void *)dom0_ioctl,
+};
+
+static ssize_t
+show_memsize_rsvd(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, 10, "%u\n", dom0_dev.used_memsize);
+}
+
+static ssize_t
+show_memsize(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, 10, "%u\n", dom0_dev.config_memsize);
+}
+
+static ssize_t
+store_memsize(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err = 0;
+ unsigned long mem_size;
+
+ if (0 != kstrtoul(buf, 0, &mem_size))
+ return -EINVAL;
+
+ mutex_lock(&dom0_dev.data_lock);
+ if (0 == mem_size) {
+ err = -EINVAL;
+ goto fail;
+ } else if (mem_size > (rsv_memsize - dom0_dev.used_memsize)) {
+ XEN_ERR("configure memory size fail\n");
+ err = -EINVAL;
+ goto fail;
+ } else
+ dom0_dev.config_memsize = mem_size;
+
+fail:
+ mutex_unlock(&dom0_dev.data_lock);
+ return err ? err : count;
+}
+
+static DEVICE_ATTR(memsize, S_IRUGO | S_IWUSR, show_memsize, store_memsize);
+static DEVICE_ATTR(memsize_rsvd, S_IRUGO, show_memsize_rsvd, NULL);
+
+static struct attribute *dev_attrs[] = {
+ &dev_attr_memsize.attr,
+ &dev_attr_memsize_rsvd.attr,
+ NULL,
+};
+
+/* the memory size unit is MB */
+static const struct attribute_group dev_attr_grp = {
+ .name = "memsize-mB",
+ .attrs = dev_attrs,
+};
+
+
+static void
+sort_viraddr(struct memblock_info *mb, int cnt)
+{
+ int i,j;
+ uint64_t tmp_pfn;
+ uint64_t tmp_viraddr;
+
+ /*sort virtual address and pfn */
+ for(i = 0; i < cnt; i ++) {
+ for(j = cnt - 1; j > i; j--) {
+ if(mb[j].pfn < mb[j - 1].pfn) {
+ tmp_pfn = mb[j - 1].pfn;
+ mb[j - 1].pfn = mb[j].pfn;
+ mb[j].pfn = tmp_pfn;
+
+ tmp_viraddr = mb[j - 1].vir_addr;
+ mb[j - 1].vir_addr = mb[j].vir_addr;
+ mb[j].vir_addr = tmp_viraddr;
+ }
+ }
+ }
+}
+
+static int
+dom0_find_memdata(const char * mem_name)
+{
+ unsigned i;
+ int idx = -1;
+ for(i = 0; i< NUM_MEM_CTX; i++) {
+ if(dom0_dev.mm_data[i] == NULL)
+ continue;
+ if (!strncmp(dom0_dev.mm_data[i]->name, mem_name,
+ sizeof(char) * DOM0_NAME_MAX)) {
+ idx = i;
+ break;
+ }
+ }
+
+ return idx;
+}
+
+static int
+dom0_find_mempos(void)
+{
+ unsigned i;
+ int idx = -1;
+
+ for(i = 0; i< NUM_MEM_CTX; i++) {
+ if(dom0_dev.mm_data[i] == NULL){
+ idx = i;
+ break;
+ }
+ }
+
+ return idx;
+}
+
+static int
+dom0_memory_release(struct dom0_mm_data *mm_data)
+{
+ int idx;
+ uint32_t num_block, block_id;
+
+ /* each memory block is 2M */
+ num_block = mm_data->mem_size / SIZE_PER_BLOCK;
+ if (num_block == 0)
+ return -EINVAL;
+
+ /* reset global memory data */
+ idx = dom0_find_memdata(mm_data->name);
+ if (idx >= 0) {
+ dom0_dev.used_memsize -= mm_data->mem_size;
+ dom0_dev.mm_data[idx] = NULL;
+ dom0_dev.num_mem_ctx--;
+ }
+
+ /* reset these memory blocks status as free */
+ for (idx = 0; idx < num_block; idx++) {
+ block_id = mm_data->block_num[idx];
+ rsv_mm_info[block_id].used = 0;
+ }
+
+ memset(mm_data, 0, sizeof(struct dom0_mm_data));
+ vfree(mm_data);
+ return 0;
+}
+
+static int
+dom0_memory_free(uint32_t rsv_size)
+{
+ uint64_t vstart, vaddr;
+ uint32_t i, num_block, size;
+
+ if (!xen_pv_domain())
+ return -1;
+
+ /* each memory block is 2M */
+ num_block = rsv_size / SIZE_PER_BLOCK;
+ if (num_block == 0)
+ return -EINVAL;
+
+ /* free all memory blocks of size of 4M and destroy contiguous region */
+ for (i = 0; i < dom0_dev.num_bigblock * 2; i += 2) {
+ vstart = rsv_mm_info[i].vir_addr;
+ if (vstart) {
+ #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
+ if (rsv_mm_info[i].exchange_flag)
+ xen_destroy_contiguous_region(vstart,
+ DOM0_CONTIG_NUM_ORDER);
+ if (rsv_mm_info[i + 1].exchange_flag)
+ xen_destroy_contiguous_region(vstart +
+ DOM0_MEMBLOCK_SIZE,
+ DOM0_CONTIG_NUM_ORDER);
+ #else
+ if (rsv_mm_info[i].exchange_flag)
+ xen_destroy_contiguous_region(rsv_mm_info[i].pfn
+ * PAGE_SIZE,
+ DOM0_CONTIG_NUM_ORDER);
+ if (rsv_mm_info[i + 1].exchange_flag)
+ xen_destroy_contiguous_region(rsv_mm_info[i].pfn
+ * PAGE_SIZE + DOM0_MEMBLOCK_SIZE,
+ DOM0_CONTIG_NUM_ORDER);
+ #endif
+
+ size = DOM0_MEMBLOCK_SIZE * 2;
+ vaddr = vstart;
+ while (size > 0) {
+ ClearPageReserved(virt_to_page(vaddr));
+ vaddr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ free_pages(vstart, MAX_NUM_ORDER);
+ }
+ }
+
+ /* free all memory blocks size of 2M and destroy contiguous region */
+ for (; i < num_block; i++) {
+ vstart = rsv_mm_info[i].vir_addr;
+ if (vstart) {
+ if (rsv_mm_info[i].exchange_flag)
+ xen_destroy_contiguous_region(vstart,
+ DOM0_CONTIG_NUM_ORDER);
+
+ size = DOM0_MEMBLOCK_SIZE;
+ vaddr = vstart;
+ while (size > 0) {
+ ClearPageReserved(virt_to_page(vaddr));
+ vaddr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ free_pages(vstart, DOM0_CONTIG_NUM_ORDER);
+ }
+ }
+
+ memset(rsv_mm_info, 0, sizeof(struct memblock_info) * num_block);
+ vfree(rsv_mm_info);
+ rsv_mm_info = NULL;
+
+ return 0;
+}
+
+static void
+find_free_memory(uint32_t count, struct dom0_mm_data *mm_data)
+{
+ uint32_t i = 0;
+ uint32_t j = 0;
+
+ while ((i < count) && (j < rsv_memsize / SIZE_PER_BLOCK)) {
+ if (rsv_mm_info[j].used == 0) {
+ mm_data->block_info[i].pfn = rsv_mm_info[j].pfn;
+ mm_data->block_info[i].vir_addr =
+ rsv_mm_info[j].vir_addr;
+ mm_data->block_info[i].mfn = rsv_mm_info[j].mfn;
+ mm_data->block_info[i].exchange_flag =
+ rsv_mm_info[j].exchange_flag;
+ mm_data->block_num[i] = j;
+ rsv_mm_info[j].used = 1;
+ i++;
+ }
+ j++;
+ }
+}
+
+/**
+ * Find all memory segments in which physical addresses are contiguous.
+ */
+static void
+find_memseg(int count, struct dom0_mm_data * mm_data)
+{
+ int i = 0;
+ int j, k, idx = 0;
+ uint64_t zone_len, pfn, num_block;
+
+ while(i < count) {
+ if (mm_data->block_info[i].exchange_flag == 0) {
+ i++;
+ continue;
+ }
+ k = 0;
+ pfn = mm_data->block_info[i].pfn;
+ mm_data->seg_info[idx].pfn = pfn;
+ mm_data->seg_info[idx].mfn[k] = mm_data->block_info[i].mfn;
+
+ for (j = i + 1; j < count; j++) {
+
+ /* ignore exchange fail memory block */
+ if (mm_data->block_info[j].exchange_flag == 0)
+ break;
+
+ if (mm_data->block_info[j].pfn !=
+ (mm_data->block_info[j - 1].pfn +
+ DOM0_MEMBLOCK_SIZE / PAGE_SIZE))
+ break;
+ ++k;
+ mm_data->seg_info[idx].mfn[k] = mm_data->block_info[j].mfn;
+ }
+
+ num_block = j - i;
+ zone_len = num_block * DOM0_MEMBLOCK_SIZE;
+ mm_data->seg_info[idx].size = zone_len;
+
+ XEN_PRINT("memseg id=%d, size=0x%llx\n", idx, zone_len);
+ i = i+ num_block;
+ idx++;
+ if (idx == DOM0_NUM_MEMSEG)
+ break;
+ }
+ mm_data->num_memseg = idx;
+}
+
+static int
+dom0_memory_reserve(uint32_t rsv_size)
+{
+ uint64_t pfn, vstart, vaddr;
+ uint32_t i, num_block, size, allocated_size = 0;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+ dma_addr_t dma_handle;
+#endif
+
+ /* 2M as memory block */
+ num_block = rsv_size / SIZE_PER_BLOCK;
+
+ rsv_mm_info = vmalloc(sizeof(struct memblock_info) * num_block);
+ if (!rsv_mm_info) {
+ XEN_ERR("Unable to allocate device memory information\n");
+ return -ENOMEM;
+ }
+ memset(rsv_mm_info, 0, sizeof(struct memblock_info) * num_block);
+
+ /* try alloc size of 4M once */
+ for (i = 0; i < num_block; i += 2) {
+ vstart = (unsigned long)
+ __get_free_pages(GFP_ATOMIC, MAX_NUM_ORDER);
+ if (vstart == 0)
+ break;
+
+ dom0_dev.num_bigblock = i / 2 + 1;
+ allocated_size = SIZE_PER_BLOCK * (i + 2);
+
+ /* size of 4M */
+ size = DOM0_MEMBLOCK_SIZE * 2;
+
+ vaddr = vstart;
+ while (size > 0) {
+ SetPageReserved(virt_to_page(vaddr));
+ vaddr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+
+ pfn = virt_to_pfn(vstart);
+ rsv_mm_info[i].pfn = pfn;
+ rsv_mm_info[i].vir_addr = vstart;
+ rsv_mm_info[i + 1].pfn =
+ pfn + DOM0_MEMBLOCK_SIZE / PAGE_SIZE;
+ rsv_mm_info[i + 1].vir_addr =
+ vstart + DOM0_MEMBLOCK_SIZE;
+ }
+
+ /*if it failed to alloc 4M, and continue to alloc 2M once */
+ for (; i < num_block; i++) {
+ vstart = (unsigned long)
+ __get_free_pages(GFP_ATOMIC, DOM0_CONTIG_NUM_ORDER);
+ if (vstart == 0) {
+ XEN_ERR("allocate memory fail.\n");
+ dom0_memory_free(allocated_size);
+ return -ENOMEM;
+ }
+
+ allocated_size += SIZE_PER_BLOCK;
+
+ size = DOM0_MEMBLOCK_SIZE;
+ vaddr = vstart;
+ while (size > 0) {
+ SetPageReserved(virt_to_page(vaddr));
+ vaddr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ pfn = virt_to_pfn(vstart);
+ rsv_mm_info[i].pfn = pfn;
+ rsv_mm_info[i].vir_addr = vstart;
+ }
+
+ sort_viraddr(rsv_mm_info, num_block);
+
+ for (i = 0; i< num_block; i++) {
+
+ /*
+ * This API is used to exchage MFN for getting a block of
+ * contiguous physical addresses, its maximum size is 2M.
+ */
+ #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
+ if (xen_create_contiguous_region(rsv_mm_info[i].vir_addr,
+ DOM0_CONTIG_NUM_ORDER, 0) == 0) {
+ #else
+ if (xen_create_contiguous_region(rsv_mm_info[i].pfn * PAGE_SIZE,
+ DOM0_CONTIG_NUM_ORDER, 0, &dma_handle) == 0) {
+ #endif
+ rsv_mm_info[i].exchange_flag = 1;
+ rsv_mm_info[i].mfn =
+ pfn_to_mfn(rsv_mm_info[i].pfn);
+ rsv_mm_info[i].used = 0;
+ } else {
+ XEN_ERR("exchange memeory fail\n");
+ rsv_mm_info[i].exchange_flag = 0;
+ dom0_dev.fail_times++;
+ if (dom0_dev.fail_times > MAX_EXCHANGE_FAIL_TIME) {
+ dom0_memory_free(rsv_size);
+ return -EFAULT;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+dom0_prepare_memsegs(struct memory_info *meminfo, struct dom0_mm_data *mm_data)
+{
+ uint32_t num_block;
+ int idx;
+
+ /* check if there is a free name buffer */
+ memcpy(mm_data->name, meminfo->name, DOM0_NAME_MAX);
+ mm_data->name[DOM0_NAME_MAX - 1] = '\0';
+ idx = dom0_find_mempos();
+ if (idx < 0)
+ return -1;
+
+ num_block = meminfo->size / SIZE_PER_BLOCK;
+ /* find free memory and new memory segments*/
+ find_free_memory(num_block, mm_data);
+ find_memseg(num_block, mm_data);
+
+ /* update private memory data */
+ mm_data->refcnt++;
+ mm_data->mem_size = meminfo->size;
+
+ /* update global memory data */
+ dom0_dev.mm_data[idx] = mm_data;
+ dom0_dev.num_mem_ctx++;
+ dom0_dev.used_memsize += mm_data->mem_size;
+
+ return 0;
+}
+
+static int
+dom0_check_memory (struct memory_info *meminfo)
+{
+ int idx;
+ uint64_t mem_size;
+
+ /* round memory size to the next even number. */
+ if (meminfo->size % 2)
+ ++meminfo->size;
+
+ mem_size = meminfo->size;
+ if (dom0_dev.num_mem_ctx > NUM_MEM_CTX) {
+ XEN_ERR("Memory data space is full in Dom0 driver\n");
+ return -1;
+ }
+ idx = dom0_find_memdata(meminfo->name);
+ if (idx >= 0) {
+ XEN_ERR("Memory data name %s has already exsited in Dom0 driver.\n",
+ meminfo->name);
+ return -1;
+ }
+ if ((dom0_dev.used_memsize + mem_size) > rsv_memsize) {
+ XEN_ERR("Total size can't be larger than reserved size.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int __init
+dom0_init(void)
+{
+ if (!xen_domain())
+ return -ENODEV;
+
+ if (rsv_memsize > DOM0_CONFIG_MEMSIZE) {
+ XEN_ERR("The reserved memory size cannot be greater than %d\n",
+ DOM0_CONFIG_MEMSIZE);
+ return -EINVAL;
+ }
+
+ /* Setup the misc device */
+ dom0_dev.miscdev.minor = MISC_DYNAMIC_MINOR;
+ dom0_dev.miscdev.name = "dom0_mm";
+ dom0_dev.miscdev.fops = &data_fops;
+
+ /* register misc char device */
+ if (misc_register(&dom0_dev.miscdev) != 0) {
+ XEN_ERR("Misc device registration failed\n");
+ return -EPERM;
+ }
+
+ mutex_init(&dom0_dev.data_lock);
+ dom0_kobj = kobject_create_and_add("dom0-mm", mm_kobj);
+
+ if (!dom0_kobj) {
+ XEN_ERR("dom0-mm object creation failed\n");
+ misc_deregister(&dom0_dev.miscdev);
+ return -ENOMEM;
+ }
+
+ if (sysfs_create_group(dom0_kobj, &dev_attr_grp)) {
+ kobject_put(dom0_kobj);
+ misc_deregister(&dom0_dev.miscdev);
+ return -EPERM;
+ }
+
+ if (dom0_memory_reserve(rsv_memsize) < 0) {
+ sysfs_remove_group(dom0_kobj, &dev_attr_grp);
+ kobject_put(dom0_kobj);
+ misc_deregister(&dom0_dev.miscdev);
+ return -ENOMEM;
+ }
+
+ XEN_PRINT("####### DPDK Xen Dom0 module loaded #######\n");
+
+ return 0;
+}
+
+static void __exit
+dom0_exit(void)
+{
+ if (rsv_mm_info != NULL)
+ dom0_memory_free(rsv_memsize);
+
+ sysfs_remove_group(dom0_kobj, &dev_attr_grp);
+ kobject_put(dom0_kobj);
+ misc_deregister(&dom0_dev.miscdev);
+
+ XEN_PRINT("####### DPDK Xen Dom0 module unloaded #######\n");
+}
+
+static int
+dom0_open(struct inode *inode, struct file *file)
+{
+ file->private_data = NULL;
+
+ XEN_PRINT(KERN_INFO "/dev/dom0_mm opened\n");
+ return 0;
+}
+
+static int
+dom0_release(struct inode *inode, struct file *file)
+{
+ int ret = 0;
+ struct dom0_mm_data *mm_data = file->private_data;
+
+ if (mm_data == NULL)
+ return ret;
+
+ mutex_lock(&dom0_dev.data_lock);
+ if (--mm_data->refcnt == 0)
+ ret = dom0_memory_release(mm_data);
+ mutex_unlock(&dom0_dev.data_lock);
+
+ file->private_data = NULL;
+ XEN_PRINT(KERN_INFO "/dev/dom0_mm closed\n");
+ return ret;
+}
+
+static int
+dom0_mmap(struct file *file, struct vm_area_struct *vm)
+{
+ int status = 0;
+ uint32_t idx = vm->vm_pgoff;
+ uint64_t pfn, size = vm->vm_end - vm->vm_start;
+ struct dom0_mm_data *mm_data = file->private_data;
+
+ if(mm_data == NULL)
+ return -EINVAL;
+
+ mutex_lock(&dom0_dev.data_lock);
+ if (idx >= mm_data->num_memseg) {
+ mutex_unlock(&dom0_dev.data_lock);
+ return -EINVAL;
+ }
+
+ if (size > mm_data->seg_info[idx].size){
+ mutex_unlock(&dom0_dev.data_lock);
+ return -EINVAL;
+ }
+
+ XEN_PRINT("mmap memseg idx =%d,size = 0x%llx\n", idx, size);
+
+ pfn = mm_data->seg_info[idx].pfn;
+ mutex_unlock(&dom0_dev.data_lock);
+
+ status = remap_pfn_range(vm, vm->vm_start, pfn, size, PAGE_SHARED);
+
+ return status;
+}
+static int
+dom0_ioctl(struct file *file,
+ unsigned int ioctl_num,
+ unsigned long ioctl_param)
+{
+ int idx, ret;
+ char name[DOM0_NAME_MAX] = {0};
+ struct memory_info meminfo;
+ struct dom0_mm_data *mm_data = file->private_data;
+
+ XEN_PRINT("IOCTL num=0x%0x param=0x%0lx \n", ioctl_num, ioctl_param);
+
+ /**
+ * Switch according to the ioctl called
+ */
+ switch _IOC_NR(ioctl_num) {
+ case _IOC_NR(RTE_DOM0_IOCTL_PREPARE_MEMSEG):
+ ret = copy_from_user(&meminfo, (void *)ioctl_param,
+ sizeof(struct memory_info));
+ if (ret)
+ return -EFAULT;
+
+ if (mm_data != NULL) {
+ XEN_ERR("Cannot create memory segment for the same"
+ " file descriptor\n");
+ return -EINVAL;
+ }
+
+ /* Allocate private data */
+ mm_data = vmalloc(sizeof(struct dom0_mm_data));
+ if (!mm_data) {
+ XEN_ERR("Unable to allocate device private data\n");
+ return -ENOMEM;
+ }
+ memset(mm_data, 0, sizeof(struct dom0_mm_data));
+
+ mutex_lock(&dom0_dev.data_lock);
+ /* check if we can allocate memory*/
+ if (dom0_check_memory(&meminfo) < 0) {
+ mutex_unlock(&dom0_dev.data_lock);
+ vfree(mm_data);
+ return -EINVAL;
+ }
+
+ /* allocate memory and created memory segments*/
+ if (dom0_prepare_memsegs(&meminfo, mm_data) < 0) {
+ XEN_ERR("create memory segment fail.\n");
+ mutex_unlock(&dom0_dev.data_lock);
+ return -EIO;
+ }
+
+ file->private_data = mm_data;
+ mutex_unlock(&dom0_dev.data_lock);
+ break;
+
+ /* support multiple process in term of memory mapping*/
+ case _IOC_NR(RTE_DOM0_IOCTL_ATTACH_TO_MEMSEG):
+ ret = copy_from_user(name, (void *)ioctl_param,
+ sizeof(char) * DOM0_NAME_MAX);
+ if (ret)
+ return -EFAULT;
+
+ mutex_lock(&dom0_dev.data_lock);
+ idx = dom0_find_memdata(name);
+ if (idx < 0) {
+ mutex_unlock(&dom0_dev.data_lock);
+ return -EINVAL;
+ }
+
+ mm_data = dom0_dev.mm_data[idx];
+ mm_data->refcnt++;
+ file->private_data = mm_data;
+ mutex_unlock(&dom0_dev.data_lock);
+ break;
+
+ case _IOC_NR(RTE_DOM0_IOCTL_GET_NUM_MEMSEG):
+ ret = copy_to_user((void *)ioctl_param, &mm_data->num_memseg,
+ sizeof(int));
+ if (ret)
+ return -EFAULT;
+ break;
+
+ case _IOC_NR(RTE_DOM0_IOCTL_GET_MEMSEG_INFO):
+ ret = copy_to_user((void *)ioctl_param,
+ &mm_data->seg_info[0],
+ sizeof(struct memseg_info) *
+ mm_data->num_memseg);
+ if (ret)
+ return -EFAULT;
+ break;
+ default:
+ XEN_PRINT("IOCTL default \n");
+ break;
+ }
+
+ return 0;
+}
+
+module_init(dom0_init);
+module_exit(dom0_exit);
+
+module_param(rsv_memsize, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(rsv_memsize, "Xen-dom0 reserved memory size(MB).\n");
diff --git a/src/dpdk_lib18/librte_ether/Makefile b/src/dpdk_lib18/librte_ether/Makefile
new file mode 100755
index 00000000..a461c312
--- /dev/null
+++ b/src/dpdk_lib18/librte_ether/Makefile
@@ -0,0 +1,54 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = libethdev.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+SRCS-y += rte_ethdev.c
+
+#
+# Export include files
+#
+SYMLINK-y-include += rte_ether.h
+SYMLINK-y-include += rte_ethdev.h
+SYMLINK-y-include += rte_eth_ctrl.h
+
+# this lib depends upon:
+DEPDIRS-y += lib/librte_eal lib/librte_mempool lib/librte_ring lib/librte_mbuf
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_ether/rte_eth_ctrl.h b/src/dpdk_lib18/librte_ether/rte_eth_ctrl.h
new file mode 100755
index 00000000..642adb76
--- /dev/null
+++ b/src/dpdk_lib18/librte_ether/rte_eth_ctrl.h
@@ -0,0 +1,458 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ETH_CTRL_H_
+#define _RTE_ETH_CTRL_H_
+
+/**
+ * @file
+ *
+ * Ethernet device features and related data structures used
+ * by control APIs should be defined in this file.
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Feature filter types
+ */
+enum rte_filter_type {
+ RTE_ETH_FILTER_NONE = 0,
+ RTE_ETH_FILTER_MACVLAN,
+ RTE_ETH_FILTER_ETHERTYPE,
+ RTE_ETH_FILTER_TUNNEL,
+ RTE_ETH_FILTER_FDIR,
+ RTE_ETH_FILTER_MAX
+};
+
+/**
+ * Generic operations on filters
+ */
+enum rte_filter_op {
+ RTE_ETH_FILTER_NOP = 0,
+ /**< used to check whether the type filter is supported */
+ RTE_ETH_FILTER_ADD, /**< add filter entry */
+ RTE_ETH_FILTER_UPDATE, /**< update filter entry */
+ RTE_ETH_FILTER_DELETE, /**< delete filter entry */
+ RTE_ETH_FILTER_FLUSH, /**< flush all entries */
+ RTE_ETH_FILTER_GET, /**< get filter entry */
+ RTE_ETH_FILTER_SET, /**< configurations */
+ RTE_ETH_FILTER_INFO, /**< retrieve information */
+ RTE_ETH_FILTER_STATS, /**< retrieve statistics */
+ RTE_ETH_FILTER_OP_MAX
+};
+
+/**
+ * MAC filter type
+ */
+enum rte_mac_filter_type {
+ RTE_MAC_PERFECT_MATCH = 1, /**< exact match of MAC addr. */
+ RTE_MACVLAN_PERFECT_MATCH,
+ /**< exact match of MAC addr and VLAN ID. */
+ RTE_MAC_HASH_MATCH, /**< hash match of MAC addr. */
+ RTE_MACVLAN_HASH_MATCH,
+ /**< hash match of MAC addr and exact match of VLAN ID. */
+};
+
+/**
+ * MAC filter info
+ */
+struct rte_eth_mac_filter {
+ uint8_t is_vf; /**< 1 for VF, 0 for port dev */
+ uint16_t dst_id; /**< VF ID, available when is_vf is 1*/
+ enum rte_mac_filter_type filter_type; /**< MAC filter type */
+ struct ether_addr mac_addr;
+};
+
+/**
+ * Define all structures for Ethertype Filter type.
+ */
+
+#define RTE_ETHTYPE_FLAGS_MAC 0x0001 /**< If set, compare mac */
+#define RTE_ETHTYPE_FLAGS_DROP 0x0002 /**< If set, drop packet when match */
+
+/**
+ * A structure used to define the ethertype filter entry
+ * to support RTE_ETH_FILTER_ETHERTYPE with RTE_ETH_FILTER_ADD,
+ * RTE_ETH_FILTER_DELETE and RTE_ETH_FILTER_GET operations.
+ */
+struct rte_eth_ethertype_filter {
+ struct ether_addr mac_addr; /**< Mac address to match. */
+ uint16_t ether_type; /**< Ether type to match */
+ uint16_t flags; /**< Flags from RTE_ETHTYPE_FLAGS_* */
+ uint16_t queue; /**< Queue assigned to when match*/
+};
+
+/**
+ * Tunneled type.
+ */
+enum rte_eth_tunnel_type {
+ RTE_TUNNEL_TYPE_NONE = 0,
+ RTE_TUNNEL_TYPE_VXLAN,
+ RTE_TUNNEL_TYPE_GENEVE,
+ RTE_TUNNEL_TYPE_TEREDO,
+ RTE_TUNNEL_TYPE_NVGRE,
+ RTE_TUNNEL_TYPE_MAX,
+};
+
+/**
+ * filter type of tunneling packet
+ */
+#define ETH_TUNNEL_FILTER_OMAC 0x01 /**< filter by outer MAC addr */
+#define ETH_TUNNEL_FILTER_OIP 0x02 /**< filter by outer IP Addr */
+#define ETH_TUNNEL_FILTER_TENID 0x04 /**< filter by tenant ID */
+#define ETH_TUNNEL_FILTER_IMAC 0x08 /**< filter by inner MAC addr */
+#define ETH_TUNNEL_FILTER_IVLAN 0x10 /**< filter by inner VLAN ID */
+#define ETH_TUNNEL_FILTER_IIP 0x20 /**< filter by inner IP addr */
+
+#define RTE_TUNNEL_FILTER_IMAC_IVLAN (ETH_TUNNEL_FILTER_IMAC | \
+ ETH_TUNNEL_FILTER_IVLAN)
+#define RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID (ETH_TUNNEL_FILTER_IMAC | \
+ ETH_TUNNEL_FILTER_IVLAN | \
+ ETH_TUNNEL_FILTER_TENID)
+#define RTE_TUNNEL_FILTER_IMAC_TENID (ETH_TUNNEL_FILTER_IMAC | \
+ ETH_TUNNEL_FILTER_TENID)
+#define RTE_TUNNEL_FILTER_OMAC_TENID_IMAC (ETH_TUNNEL_FILTER_OMAC | \
+ ETH_TUNNEL_FILTER_TENID | \
+ ETH_TUNNEL_FILTER_IMAC)
+
+/**
+ * Select IPv4 or IPv6 for tunnel filters.
+ */
+enum rte_tunnel_iptype {
+ RTE_TUNNEL_IPTYPE_IPV4 = 0, /**< IPv4. */
+ RTE_TUNNEL_IPTYPE_IPV6, /**< IPv6. */
+};
+
+/**
+ * Tunneling Packet filter configuration.
+ */
+struct rte_eth_tunnel_filter_conf {
+ struct ether_addr *outer_mac; /**< Outer MAC address filter. */
+ struct ether_addr *inner_mac; /**< Inner MAC address filter. */
+ uint16_t inner_vlan; /**< Inner VLAN filter. */
+ enum rte_tunnel_iptype ip_type; /**< IP address type. */
+ union {
+ uint32_t ipv4_addr; /**< IPv4 source address to match. */
+ uint32_t ipv6_addr[4]; /**< IPv6 source address to match. */
+ } ip_addr; /**< IPv4/IPv6 source address to match (union of above). */
+
+ uint16_t filter_type; /**< Filter type. */
+ enum rte_eth_tunnel_type tunnel_type; /**< Tunnel Type. */
+ uint32_t tenant_id; /** < Tenant number. */
+ uint16_t queue_id; /** < queue number. */
+};
+
+#define RTE_ETH_FDIR_MAX_FLEXLEN 16 /** < Max length of flexbytes. */
+
+/**
+ * Flow type
+ */
+enum rte_eth_flow_type {
+ RTE_ETH_FLOW_TYPE_NONE = 0,
+ RTE_ETH_FLOW_TYPE_UDPV4,
+ RTE_ETH_FLOW_TYPE_TCPV4,
+ RTE_ETH_FLOW_TYPE_SCTPV4,
+ RTE_ETH_FLOW_TYPE_IPV4_OTHER,
+ RTE_ETH_FLOW_TYPE_FRAG_IPV4,
+ RTE_ETH_FLOW_TYPE_UDPV6,
+ RTE_ETH_FLOW_TYPE_TCPV6,
+ RTE_ETH_FLOW_TYPE_SCTPV6,
+ RTE_ETH_FLOW_TYPE_IPV6_OTHER,
+ RTE_ETH_FLOW_TYPE_FRAG_IPV6,
+ RTE_ETH_FLOW_TYPE_MAX = 64,
+};
+
+/**
+ * A structure used to define the input for IPV4 flow
+ */
+struct rte_eth_ipv4_flow {
+ uint32_t src_ip; /**< IPv4 source address to match. */
+ uint32_t dst_ip; /**< IPv4 destination address to match. */
+};
+
+/**
+ * A structure used to define the input for IPV4 UDP flow
+ */
+struct rte_eth_udpv4_flow {
+ struct rte_eth_ipv4_flow ip; /**< IPv4 fields to match. */
+ uint16_t src_port; /**< UDP source port to match. */
+ uint16_t dst_port; /**< UDP destination port to match. */
+};
+
+/**
+ * A structure used to define the input for IPV4 TCP flow
+ */
+struct rte_eth_tcpv4_flow {
+ struct rte_eth_ipv4_flow ip; /**< IPv4 fields to match. */
+ uint16_t src_port; /**< TCP source port to match. */
+ uint16_t dst_port; /**< TCP destination port to match. */
+};
+
+/**
+ * A structure used to define the input for IPV4 SCTP flow
+ */
+struct rte_eth_sctpv4_flow {
+ struct rte_eth_ipv4_flow ip; /**< IPv4 fields to match. */
+ uint32_t verify_tag; /**< Verify tag to match */
+};
+
+/**
+ * A structure used to define the input for IPV6 flow
+ */
+struct rte_eth_ipv6_flow {
+ uint32_t src_ip[4]; /**< IPv6 source address to match. */
+ uint32_t dst_ip[4]; /**< IPv6 destination address to match. */
+};
+
+/**
+ * A structure used to define the input for IPV6 UDP flow
+ */
+struct rte_eth_udpv6_flow {
+ struct rte_eth_ipv6_flow ip; /**< IPv6 fields to match. */
+ uint16_t src_port; /**< UDP source port to match. */
+ uint16_t dst_port; /**< UDP destination port to match. */
+};
+
+/**
+ * A structure used to define the input for IPV6 TCP flow
+ */
+struct rte_eth_tcpv6_flow {
+ struct rte_eth_ipv6_flow ip; /**< IPv6 fields to match. */
+ uint16_t src_port; /**< TCP source port to match. */
+ uint16_t dst_port; /**< TCP destination port to match. */
+};
+
+/**
+ * A structure used to define the input for IPV6 SCTP flow
+ */
+struct rte_eth_sctpv6_flow {
+ struct rte_eth_ipv6_flow ip; /**< IPv6 fields to match. */
+ uint32_t verify_tag; /**< Verify tag to match */
+};
+
+/**
+ * An union contains the inputs for all types of flow
+ */
+union rte_eth_fdir_flow {
+ struct rte_eth_udpv4_flow udp4_flow;
+ struct rte_eth_tcpv4_flow tcp4_flow;
+ struct rte_eth_sctpv4_flow sctp4_flow;
+ struct rte_eth_ipv4_flow ip4_flow;
+ struct rte_eth_udpv6_flow udp6_flow;
+ struct rte_eth_tcpv6_flow tcp6_flow;
+ struct rte_eth_sctpv6_flow sctp6_flow;
+ struct rte_eth_ipv6_flow ipv6_flow;
+};
+
+/**
+ * A structure used to contain extend input of flow
+ */
+struct rte_eth_fdir_flow_ext {
+ uint16_t vlan_tci;
+ uint8_t flexbytes[RTE_ETH_FDIR_MAX_FLEXLEN];
+ /**< It is filled by the flexible payload to match. */
+};
+
+/**
+ * A structure used to define the input for a flow director filter entry
+ */
+struct rte_eth_fdir_input {
+ enum rte_eth_flow_type flow_type; /**< Type of flow */
+ union rte_eth_fdir_flow flow;
+ uint8_t ttl;
+ /**< Flow fields to match, dependent on flow_type */
+ struct rte_eth_fdir_flow_ext flow_ext;
+ /**< Additional fields to match */
+};
+
+/**
+ * Behavior will be taken if FDIR match
+ */
+enum rte_eth_fdir_behavior {
+ RTE_ETH_FDIR_ACCEPT = 0,
+ RTE_ETH_FDIR_REJECT,
+};
+
+/**
+ * Flow director report status
+ * It defines what will be reported if FDIR entry is matched.
+ */
+enum rte_eth_fdir_status {
+ RTE_ETH_FDIR_NO_REPORT_STATUS = 0, /**< Report nothing. */
+ RTE_ETH_FDIR_REPORT_ID, /**< Only report FD ID. */
+ RTE_ETH_FDIR_REPORT_ID_FLEX_4, /**< Report FD ID and 4 flex bytes. */
+ RTE_ETH_FDIR_REPORT_FLEX_8, /**< Report 8 flex bytes. */
+};
+
+/**
+ * A structure used to define an action when match FDIR packet filter.
+ */
+struct rte_eth_fdir_action {
+ uint16_t rx_queue; /**< Queue assigned to if FDIR match. */
+ enum rte_eth_fdir_behavior behavior; /**< Behavior will be taken */
+ enum rte_eth_fdir_status report_status; /**< Status report option */
+ uint8_t flex_off;
+ /**< If report_status is RTE_ETH_FDIR_REPORT_ID_FLEX_4 or
+ RTE_ETH_FDIR_REPORT_FLEX_8, flex_off specifies where the reported
+ flex bytes start from in flexible payload. */
+};
+
+/**
+ * A structure used to define the flow director filter entry by filter_ctrl API
+ * It supports RTE_ETH_FILTER_FDIR with RTE_ETH_FILTER_ADD and
+ * RTE_ETH_FILTER_DELETE operations.
+ */
+struct rte_eth_fdir_filter {
+ uint32_t soft_id;
+ /**< ID, an unique value is required when deal with FDIR entry */
+ struct rte_eth_fdir_input input; /**< Input set */
+ struct rte_eth_fdir_action action; /**< Action taken when match */
+};
+
+/**
+ * Payload type
+ */
+enum rte_eth_payload_type {
+ RTE_ETH_PAYLOAD_UNKNOWN = 0,
+ RTE_ETH_L2_PAYLOAD,
+ RTE_ETH_L3_PAYLOAD,
+ RTE_ETH_L4_PAYLOAD,
+ RTE_ETH_PAYLOAD_MAX = 8,
+};
+
+/**
+ * A structure used to select bytes extracted from the protocol layers to
+ * flexible payload for filter
+ */
+struct rte_eth_flex_payload_cfg {
+ enum rte_eth_payload_type type; /**< Payload type */
+ uint16_t src_offset[RTE_ETH_FDIR_MAX_FLEXLEN];
+ /**< Offset in bytes from the beginning of packet's payload
+ src_offset[i] indicates the flexbyte i's offset in original
+ packet payload. This value should be less than
+ flex_payload_limit in struct rte_eth_fdir_info.*/
+};
+
+/**
+ * A structure used to define FDIR masks for flexible payload
+ * for each flow type
+ */
+struct rte_eth_fdir_flex_mask {
+ enum rte_eth_flow_type flow_type; /**< Flow type */
+ uint8_t mask[RTE_ETH_FDIR_MAX_FLEXLEN];
+ /**< Mask for the whole flexible payload */
+};
+
+/**
+ * A structure used to define all flexible payload related setting
+ * include flexpay load and flex mask
+ */
+struct rte_eth_fdir_flex_conf {
+ uint16_t nb_payloads; /**< The number of following payload cfg */
+ uint16_t nb_flexmasks; /**< The number of following mask */
+ struct rte_eth_flex_payload_cfg flex_set[RTE_ETH_PAYLOAD_MAX];
+ /**< Flex payload configuration for each payload type */
+ struct rte_eth_fdir_flex_mask flex_mask[RTE_ETH_FLOW_TYPE_MAX];
+ /**< Flex mask configuration for each flow type */
+};
+
+/**
+ * Flow Director setting modes: none, signature or perfect.
+ */
+enum rte_fdir_mode {
+ RTE_FDIR_MODE_NONE = 0, /**< Disable FDIR support. */
+ RTE_FDIR_MODE_SIGNATURE, /**< Enable FDIR signature filter mode. */
+ RTE_FDIR_MODE_PERFECT, /**< Enable FDIR perfect filter mode. */
+};
+
+/**
+ * A structure used to get the information of flow director filter.
+ * It supports RTE_ETH_FILTER_FDIR with RTE_ETH_FILTER_INFO operation.
+ * It includes the mode, flexible payload configuration information,
+ * capabilities and supported flow types, flexible payload characters.
+ * It can be gotten to help taking specific configurations per device.
+ */
+struct rte_eth_fdir_info {
+ enum rte_fdir_mode mode; /**< Flow director mode */
+ struct rte_eth_fdir_flex_conf flex_conf;
+ /**< Flex payload configuration information */
+ uint32_t guarant_spc; /**< Guaranteed spaces.*/
+ uint32_t best_spc; /**< Best effort spaces.*/
+ uint32_t flow_types_mask[RTE_ETH_FLOW_TYPE_MAX / sizeof(uint32_t)];
+ /**< Bit mask for every supported flow type. */
+ uint32_t max_flexpayload; /**< Total flex payload in bytes. */
+ uint32_t flex_payload_unit;
+ /**< Flexible payload unit in bytes. Size and alignments of all flex
+ payload segments should be multiplies of this value. */
+ uint32_t max_flex_payload_segment_num;
+ /**< Max number of flexible payload continuous segments.
+ Each segment should be a multiple of flex_payload_unit.*/
+ uint16_t flex_payload_limit;
+ /**< Maximum src_offset in bytes allowed. It indicates that
+ src_offset[i] in struct rte_eth_flex_payload_cfg should be
+ less than this value. */
+ uint32_t flex_bitmask_unit;
+ /**< Flex bitmask unit in bytes. Size of flex bitmasks should
+ be a multiply of this value. */
+ uint32_t max_flex_bitmask_num;
+ /**< Max supported size of flex bitmasks in flex_bitmask_unit */
+};
+
+/**
+ * A structure used to define the statistics of flow director.
+ * It supports RTE_ETH_FILTER_FDIR with RTE_ETH_FILTER_STATS operation.
+ */
+struct rte_eth_fdir_stats {
+ uint32_t collision; /**< Number of filters with collision. */
+ uint32_t free; /**< Number of free filters. */
+ uint32_t maxhash;
+ /**< The lookup hash value of the added filter that updated the value
+ of the MAXLEN field */
+ uint32_t maxlen; /**< Longest linked list of filters. */
+ uint64_t add; /**< Number of added filters. */
+ uint64_t remove; /**< Number of removed filters. */
+ uint64_t f_add; /**< Number of failed added filters. */
+ uint64_t f_remove; /**< Number of failed removed filters. */
+ uint32_t guarant_cnt; /**< Number of filters in guaranteed spaces. */
+ uint32_t best_cnt; /**< Number of filters in best effort spaces. */
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_ETH_CTRL_H_ */
diff --git a/src/dpdk_lib18/librte_ether/rte_ethdev.c b/src/dpdk_lib18/librte_ether/rte_ethdev.c
new file mode 100755
index 00000000..38cf247e
--- /dev/null
+++ b/src/dpdk_lib18/librte_ether/rte_ethdev.c
@@ -0,0 +1,3271 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <netinet/in.h>
+
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_errno.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+
+#include "rte_ether.h"
+#include "rte_ethdev.h"
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+#define PMD_DEBUG_TRACE(fmt, args...) do { \
+ RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
+ } while (0)
+#else
+#define PMD_DEBUG_TRACE(fmt, args...)
+#endif
+
+/* Macros for checking for restricting functions to primary instance only */
+#define PROC_PRIMARY_OR_ERR_RET(retval) do { \
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
+ PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
+ return (retval); \
+ } \
+} while(0)
+#define PROC_PRIMARY_OR_RET() do { \
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
+ PMD_DEBUG_TRACE("Cannot run in secondary processes\n"); \
+ return; \
+ } \
+} while(0)
+
+/* Macros to check for invlaid function pointers in dev_ops structure */
+#define FUNC_PTR_OR_ERR_RET(func, retval) do { \
+ if ((func) == NULL) { \
+ PMD_DEBUG_TRACE("Function not supported\n"); \
+ return (retval); \
+ } \
+} while(0)
+#define FUNC_PTR_OR_RET(func) do { \
+ if ((func) == NULL) { \
+ PMD_DEBUG_TRACE("Function not supported\n"); \
+ return; \
+ } \
+} while(0)
+
+static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
+struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
+static struct rte_eth_dev_data *rte_eth_dev_data = NULL;
+static uint8_t nb_ports = 0;
+
+/* spinlock for eth device callbacks */
+static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
+
+/* store statistics names and its offset in stats structure */
+struct rte_eth_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned offset;
+};
+
+static struct rte_eth_xstats_name_off rte_stats_strings[] = {
+ {"rx_packets", offsetof(struct rte_eth_stats, ipackets)},
+ {"tx_packets", offsetof(struct rte_eth_stats, opackets)},
+ {"rx_bytes", offsetof(struct rte_eth_stats, ibytes)},
+ {"tx_bytes", offsetof(struct rte_eth_stats, obytes)},
+ {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
+ {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
+ {"rx_crc_errors", offsetof(struct rte_eth_stats, ibadcrc)},
+ {"rx_bad_length_errors", offsetof(struct rte_eth_stats, ibadlen)},
+ {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
+ {"alloc_rx_buff_failed", offsetof(struct rte_eth_stats, rx_nombuf)},
+ {"fdir_match", offsetof(struct rte_eth_stats, fdirmatch)},
+ {"fdir_miss", offsetof(struct rte_eth_stats, fdirmiss)},
+ {"tx_flow_control_xon", offsetof(struct rte_eth_stats, tx_pause_xon)},
+ {"rx_flow_control_xon", offsetof(struct rte_eth_stats, rx_pause_xon)},
+ {"tx_flow_control_xoff", offsetof(struct rte_eth_stats, tx_pause_xoff)},
+ {"rx_flow_control_xoff", offsetof(struct rte_eth_stats, rx_pause_xoff)},
+};
+#define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
+
+static struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
+ {"rx_packets", offsetof(struct rte_eth_stats, q_ipackets)},
+ {"rx_bytes", offsetof(struct rte_eth_stats, q_ibytes)},
+};
+#define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) / \
+ sizeof(rte_rxq_stats_strings[0]))
+
+static struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
+ {"tx_packets", offsetof(struct rte_eth_stats, q_opackets)},
+ {"tx_bytes", offsetof(struct rte_eth_stats, q_obytes)},
+ {"tx_errors", offsetof(struct rte_eth_stats, q_errors)},
+};
+#define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) / \
+ sizeof(rte_txq_stats_strings[0]))
+
+
+/**
+ * The user application callback description.
+ *
+ * It contains callback address to be registered by user application,
+ * the pointer to the parameters for callback, and the event type.
+ */
+struct rte_eth_dev_callback {
+ TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
+ rte_eth_dev_cb_fn cb_fn; /**< Callback address */
+ void *cb_arg; /**< Parameter for callback */
+ enum rte_eth_event_type event; /**< Interrupt event type */
+ uint32_t active; /**< Callback is executing */
+};
+
+enum {
+ STAT_QMAP_TX = 0,
+ STAT_QMAP_RX
+};
+
+static inline void
+rte_eth_dev_data_alloc(void)
+{
+ const unsigned flags = 0;
+ const struct rte_memzone *mz;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY){
+ mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
+ RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
+ rte_socket_id(), flags);
+ } else
+ mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
+ if (mz == NULL)
+ rte_panic("Cannot allocate memzone for ethernet port data\n");
+
+ rte_eth_dev_data = mz->addr;
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ memset(rte_eth_dev_data, 0,
+ RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
+}
+
+static struct rte_eth_dev *
+rte_eth_dev_allocated(const char *name)
+{
+ unsigned i;
+
+ for (i = 0; i < nb_ports; i++) {
+ if (strcmp(rte_eth_devices[i].data->name, name) == 0)
+ return &rte_eth_devices[i];
+ }
+ return NULL;
+}
+
+struct rte_eth_dev *
+rte_eth_dev_allocate(const char *name)
+{
+ struct rte_eth_dev *eth_dev;
+
+ if (nb_ports == RTE_MAX_ETHPORTS) {
+ PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
+ return NULL;
+ }
+
+ if (rte_eth_dev_data == NULL)
+ rte_eth_dev_data_alloc();
+
+ if (rte_eth_dev_allocated(name) != NULL) {
+ PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n", name);
+ return NULL;
+ }
+
+ eth_dev = &rte_eth_devices[nb_ports];
+ eth_dev->data = &rte_eth_dev_data[nb_ports];
+ snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
+ eth_dev->data->port_id = nb_ports++;
+ return eth_dev;
+}
+
+static int
+rte_eth_dev_init(struct rte_pci_driver *pci_drv,
+ struct rte_pci_device *pci_dev)
+{
+ struct eth_driver *eth_drv;
+ struct rte_eth_dev *eth_dev;
+ char ethdev_name[RTE_ETH_NAME_MAX_LEN];
+
+ int diag;
+
+ eth_drv = (struct eth_driver *)pci_drv;
+
+ /* Create unique Ethernet device name using PCI address */
+ snprintf(ethdev_name, RTE_ETH_NAME_MAX_LEN, "%d:%d.%d",
+ pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
+
+ eth_dev = rte_eth_dev_allocate(ethdev_name);
+ if (eth_dev == NULL)
+ return -ENOMEM;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY){
+ eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
+ eth_drv->dev_private_size,
+ RTE_CACHE_LINE_SIZE);
+ if (eth_dev->data->dev_private == NULL)
+ rte_panic("Cannot allocate memzone for private port data\n");
+ }
+ eth_dev->pci_dev = pci_dev;
+ eth_dev->driver = eth_drv;
+ eth_dev->data->rx_mbuf_alloc_failed = 0;
+
+ /* init user callbacks */
+ TAILQ_INIT(&(eth_dev->callbacks));
+
+ /*
+ * Set the default MTU.
+ */
+ eth_dev->data->mtu = ETHER_MTU;
+
+ /* Invoke PMD device initialization function */
+ diag = (*eth_drv->eth_dev_init)(eth_drv, eth_dev);
+ if (diag == 0)
+ return (0);
+
+ PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x)"
+ " failed\n", pci_drv->name,
+ (unsigned) pci_dev->id.vendor_id,
+ (unsigned) pci_dev->id.device_id);
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(eth_dev->data->dev_private);
+ nb_ports--;
+ return diag;
+}
+
+/**
+ * Register an Ethernet [Poll Mode] driver.
+ *
+ * Function invoked by the initialization function of an Ethernet driver
+ * to simultaneously register itself as a PCI driver and as an Ethernet
+ * Poll Mode Driver.
+ * Invokes the rte_eal_pci_register() function to register the *pci_drv*
+ * structure embedded in the *eth_drv* structure, after having stored the
+ * address of the rte_eth_dev_init() function in the *devinit* field of
+ * the *pci_drv* structure.
+ * During the PCI probing phase, the rte_eth_dev_init() function is
+ * invoked for each PCI [Ethernet device] matching the embedded PCI
+ * identifiers provided by the driver.
+ */
+void
+rte_eth_driver_register(struct eth_driver *eth_drv)
+{
+ eth_drv->pci_drv.devinit = rte_eth_dev_init;
+ rte_eal_pci_register(&eth_drv->pci_drv);
+}
+
+int
+rte_eth_dev_socket_id(uint8_t port_id)
+{
+ if (port_id >= nb_ports)
+ return -1;
+ return rte_eth_devices[port_id].pci_dev->numa_node;
+}
+
+uint8_t
+rte_eth_dev_count(void)
+{
+ return (nb_ports);
+}
+
+static int
+rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
+{
+ uint16_t old_nb_queues = dev->data->nb_rx_queues;
+ void **rxq;
+ unsigned i;
+
+ if (dev->data->rx_queues == NULL) { /* first time configuration */
+ dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
+ sizeof(dev->data->rx_queues[0]) * nb_queues,
+ RTE_CACHE_LINE_SIZE);
+ if (dev->data->rx_queues == NULL) {
+ dev->data->nb_rx_queues = 0;
+ return -(ENOMEM);
+ }
+ } else { /* re-configure */
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
+
+ rxq = dev->data->rx_queues;
+
+ for (i = nb_queues; i < old_nb_queues; i++)
+ (*dev->dev_ops->rx_queue_release)(rxq[i]);
+ rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
+ RTE_CACHE_LINE_SIZE);
+ if (rxq == NULL)
+ return -(ENOMEM);
+
+ if (nb_queues > old_nb_queues)
+ memset(rxq + old_nb_queues, 0,
+ sizeof(rxq[0]) * (nb_queues - old_nb_queues));
+
+ dev->data->rx_queues = rxq;
+
+ }
+ dev->data->nb_rx_queues = nb_queues;
+ return (0);
+}
+
+int
+rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
+{
+ struct rte_eth_dev *dev;
+
+ /* This function is only safe when called from the primary process
+ * in a multi-process setup*/
+ PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ if (rx_queue_id >= dev->data->nb_rx_queues) {
+ PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
+ return -EINVAL;
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
+
+ return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
+
+}
+
+int
+rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
+{
+ struct rte_eth_dev *dev;
+
+ /* This function is only safe when called from the primary process
+ * in a multi-process setup*/
+ PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ if (rx_queue_id >= dev->data->nb_rx_queues) {
+ PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
+ return -EINVAL;
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
+
+ return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
+
+}
+
+int
+rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
+{
+ struct rte_eth_dev *dev;
+
+ /* This function is only safe when called from the primary process
+ * in a multi-process setup*/
+ PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ if (tx_queue_id >= dev->data->nb_tx_queues) {
+ PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
+ return -EINVAL;
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
+
+ return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
+
+}
+
+int
+rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
+{
+ struct rte_eth_dev *dev;
+
+ /* This function is only safe when called from the primary process
+ * in a multi-process setup*/
+ PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ if (tx_queue_id >= dev->data->nb_tx_queues) {
+ PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
+ return -EINVAL;
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
+
+ return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
+
+}
+
+static int
+rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
+{
+ uint16_t old_nb_queues = dev->data->nb_tx_queues;
+ void **txq;
+ unsigned i;
+
+ if (dev->data->tx_queues == NULL) { /* first time configuration */
+ dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
+ sizeof(dev->data->tx_queues[0]) * nb_queues,
+ RTE_CACHE_LINE_SIZE);
+ if (dev->data->tx_queues == NULL) {
+ dev->data->nb_tx_queues = 0;
+ return -(ENOMEM);
+ }
+ } else { /* re-configure */
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
+
+ txq = dev->data->tx_queues;
+
+ for (i = nb_queues; i < old_nb_queues; i++)
+ (*dev->dev_ops->tx_queue_release)(txq[i]);
+ txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
+ RTE_CACHE_LINE_SIZE);
+ if (txq == NULL)
+ return -(ENOMEM);
+
+ if (nb_queues > old_nb_queues)
+ memset(txq + old_nb_queues, 0,
+ sizeof(txq[0]) * (nb_queues - old_nb_queues));
+
+ dev->data->tx_queues = txq;
+
+ }
+ dev->data->nb_tx_queues = nb_queues;
+ return (0);
+}
+
+static int
+rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
+ const struct rte_eth_conf *dev_conf)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+
+ if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
+ /* check multi-queue mode */
+ if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ||
+ (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
+ (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) ||
+ (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
+ /* SRIOV only works in VMDq enable mode */
+ PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
+ " SRIOV active, "
+ "wrong VMDQ mq_mode rx %u tx %u\n",
+ port_id,
+ dev_conf->rxmode.mq_mode,
+ dev_conf->txmode.mq_mode);
+ return (-EINVAL);
+ }
+
+ switch (dev_conf->rxmode.mq_mode) {
+ case ETH_MQ_RX_VMDQ_RSS:
+ case ETH_MQ_RX_VMDQ_DCB:
+ case ETH_MQ_RX_VMDQ_DCB_RSS:
+ /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
+ PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
+ " SRIOV active, "
+ "unsupported VMDQ mq_mode rx %u\n",
+ port_id, dev_conf->rxmode.mq_mode);
+ return (-EINVAL);
+ default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
+ /* if nothing mq mode configure, use default scheme */
+ dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
+ if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
+ RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
+ break;
+ }
+
+ switch (dev_conf->txmode.mq_mode) {
+ case ETH_MQ_TX_VMDQ_DCB:
+ /* DCB VMDQ in SRIOV mode, not implement yet */
+ PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
+ " SRIOV active, "
+ "unsupported VMDQ mq_mode tx %u\n",
+ port_id, dev_conf->txmode.mq_mode);
+ return (-EINVAL);
+ default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
+ /* if nothing mq mode configure, use default scheme */
+ dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
+ if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
+ RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
+ break;
+ }
+
+ /* check valid queue number */
+ if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
+ (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
+ "queue number must less equal to %d\n",
+ port_id, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
+ return (-EINVAL);
+ }
+ } else {
+ /* For vmdb+dcb mode check our configuration before we go further */
+ if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
+ const struct rte_eth_vmdq_dcb_conf *conf;
+
+ if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
+ "!= %d\n",
+ port_id, ETH_VMDQ_DCB_NUM_QUEUES);
+ return (-EINVAL);
+ }
+ conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
+ if (! (conf->nb_queue_pools == ETH_16_POOLS ||
+ conf->nb_queue_pools == ETH_32_POOLS)) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
+ "nb_queue_pools must be %d or %d\n",
+ port_id, ETH_16_POOLS, ETH_32_POOLS);
+ return (-EINVAL);
+ }
+ }
+ if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ const struct rte_eth_vmdq_dcb_tx_conf *conf;
+
+ if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q "
+ "!= %d\n",
+ port_id, ETH_VMDQ_DCB_NUM_QUEUES);
+ return (-EINVAL);
+ }
+ conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf);
+ if (! (conf->nb_queue_pools == ETH_16_POOLS ||
+ conf->nb_queue_pools == ETH_32_POOLS)) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
+ "nb_queue_pools != %d or nb_queue_pools "
+ "!= %d\n",
+ port_id, ETH_16_POOLS, ETH_32_POOLS);
+ return (-EINVAL);
+ }
+ }
+
+ /* For DCB mode check our configuration before we go further */
+ if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
+ const struct rte_eth_dcb_rx_conf *conf;
+
+ if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
+ "!= %d\n",
+ port_id, ETH_DCB_NUM_QUEUES);
+ return (-EINVAL);
+ }
+ conf = &(dev_conf->rx_adv_conf.dcb_rx_conf);
+ if (! (conf->nb_tcs == ETH_4_TCS ||
+ conf->nb_tcs == ETH_8_TCS)) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
+ "nb_tcs != %d or nb_tcs "
+ "!= %d\n",
+ port_id, ETH_4_TCS, ETH_8_TCS);
+ return (-EINVAL);
+ }
+ }
+
+ if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+ const struct rte_eth_dcb_tx_conf *conf;
+
+ if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
+ "!= %d\n",
+ port_id, ETH_DCB_NUM_QUEUES);
+ return (-EINVAL);
+ }
+ conf = &(dev_conf->tx_adv_conf.dcb_tx_conf);
+ if (! (conf->nb_tcs == ETH_4_TCS ||
+ conf->nb_tcs == ETH_8_TCS)) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
+ "nb_tcs != %d or nb_tcs "
+ "!= %d\n",
+ port_id, ETH_4_TCS, ETH_8_TCS);
+ return (-EINVAL);
+ }
+ }
+ }
+ return 0;
+}
+
+int
+rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
+ const struct rte_eth_conf *dev_conf)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ int diag;
+
+ /* This function is only safe when called from the primary process
+ * in a multi-process setup*/
+ PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+
+ if (port_id >= nb_ports || port_id >= RTE_MAX_ETHPORTS) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-EINVAL);
+ }
+ dev = &rte_eth_devices[port_id];
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+
+ if (dev->data->dev_started) {
+ PMD_DEBUG_TRACE(
+ "port %d must be stopped to allow configuration\n", port_id);
+ return (-EBUSY);
+ }
+
+ /*
+ * Check that the numbers of RX and TX queues are not greater
+ * than the maximum number of RX and TX queues supported by the
+ * configured device.
+ */
+ (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
+ if (nb_rx_q > dev_info.max_rx_queues) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
+ port_id, nb_rx_q, dev_info.max_rx_queues);
+ return (-EINVAL);
+ }
+ if (nb_rx_q == 0) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
+ return (-EINVAL);
+ }
+
+ if (nb_tx_q > dev_info.max_tx_queues) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
+ port_id, nb_tx_q, dev_info.max_tx_queues);
+ return (-EINVAL);
+ }
+ if (nb_tx_q == 0) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
+ return (-EINVAL);
+ }
+
+ /* Copy the dev_conf parameter into the dev structure */
+ memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
+
+ /*
+ * If link state interrupt is enabled, check that the
+ * device supports it.
+ */
+ if (dev_conf->intr_conf.lsc == 1) {
+ const struct rte_pci_driver *pci_drv = &dev->driver->pci_drv;
+
+ if (!(pci_drv->drv_flags & RTE_PCI_DRV_INTR_LSC)) {
+ PMD_DEBUG_TRACE("driver %s does not support lsc\n",
+ pci_drv->name);
+ return (-EINVAL);
+ }
+ }
+
+ /*
+ * If jumbo frames are enabled, check that the maximum RX packet
+ * length is supported by the configured device.
+ */
+ if (dev_conf->rxmode.jumbo_frame == 1) {
+ if (dev_conf->rxmode.max_rx_pkt_len >
+ dev_info.max_rx_pktlen) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
+ " > max valid value %u\n",
+ port_id,
+ (unsigned)dev_conf->rxmode.max_rx_pkt_len,
+ (unsigned)dev_info.max_rx_pktlen);
+ return (-EINVAL);
+ }
+ else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
+ " < min valid value %u\n",
+ port_id,
+ (unsigned)dev_conf->rxmode.max_rx_pkt_len,
+ (unsigned)ETHER_MIN_LEN);
+ return (-EINVAL);
+ }
+ } else {
+ if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
+ dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
+ /* Use default value */
+ dev->data->dev_conf.rxmode.max_rx_pkt_len =
+ ETHER_MAX_LEN;
+ }
+
+ /* multipe queue mode checking */
+ diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf);
+ if (diag != 0) {
+ PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n",
+ port_id, diag);
+ return diag;
+ }
+
+ /*
+ * Setup new number of RX/TX queues and reconfigure device.
+ */
+ diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
+ if (diag != 0) {
+ PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
+ port_id, diag);
+ return diag;
+ }
+
+ diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
+ if (diag != 0) {
+ PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
+ port_id, diag);
+ rte_eth_dev_rx_queue_config(dev, 0);
+ return diag;
+ }
+
+ diag = (*dev->dev_ops->dev_configure)(dev);
+ if (diag != 0) {
+ PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
+ port_id, diag);
+ rte_eth_dev_rx_queue_config(dev, 0);
+ rte_eth_dev_tx_queue_config(dev, 0);
+ return diag;
+ }
+
+ return 0;
+}
+
+static void
+rte_eth_dev_config_restore(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct ether_addr addr;
+ uint16_t i;
+ uint32_t pool = 0;
+
+ dev = &rte_eth_devices[port_id];
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+
+ if (RTE_ETH_DEV_SRIOV(dev).active)
+ pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
+
+ /* replay MAC address configuration */
+ for (i = 0; i < dev_info.max_mac_addrs; i++) {
+ addr = dev->data->mac_addrs[i];
+
+ /* skip zero address */
+ if (is_zero_ether_addr(&addr))
+ continue;
+
+ /* add address to the hardware */
+ if (*dev->dev_ops->mac_addr_add &&
+ dev->data->mac_pool_sel[i] & (1ULL << pool))
+ (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
+ else {
+ PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
+ port_id);
+ /* exit the loop but not return an error */
+ break;
+ }
+ }
+
+ /* replay promiscuous configuration */
+ if (rte_eth_promiscuous_get(port_id) == 1)
+ rte_eth_promiscuous_enable(port_id);
+ else if (rte_eth_promiscuous_get(port_id) == 0)
+ rte_eth_promiscuous_disable(port_id);
+
+ /* replay allmulticast configuration */
+ if (rte_eth_allmulticast_get(port_id) == 1)
+ rte_eth_allmulticast_enable(port_id);
+ else if (rte_eth_allmulticast_get(port_id) == 0)
+ rte_eth_allmulticast_disable(port_id);
+}
+
+int
+rte_eth_dev_start(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+ int diag;
+
+ /* This function is only safe when called from the primary process
+ * in a multi-process setup*/
+ PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%" PRIu8 "\n", port_id);
+ return (-EINVAL);
+ }
+ dev = &rte_eth_devices[port_id];
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
+
+ if (dev->data->dev_started != 0) {
+ PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
+ " already started\n",
+ port_id);
+ return (0);
+ }
+
+ diag = (*dev->dev_ops->dev_start)(dev);
+ if (diag == 0)
+ dev->data->dev_started = 1;
+ else
+ return diag;
+
+ rte_eth_dev_config_restore(port_id);
+
+ return 0;
+}
+
+void
+rte_eth_dev_stop(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ /* This function is only safe when called from the primary process
+ * in a multi-process setup*/
+ PROC_PRIMARY_OR_RET();
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%" PRIu8 "\n", port_id);
+ return;
+ }
+ dev = &rte_eth_devices[port_id];
+
+ FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
+
+ if (dev->data->dev_started == 0) {
+ PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
+ " already stopped\n",
+ port_id);
+ return;
+ }
+
+ dev->data->dev_started = 0;
+ (*dev->dev_ops->dev_stop)(dev);
+}
+
+int
+rte_eth_dev_set_link_up(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ /* This function is only safe when called from the primary process
+ * in a multi-process setup*/
+ PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -EINVAL;
+ }
+ dev = &rte_eth_devices[port_id];
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
+ return (*dev->dev_ops->dev_set_link_up)(dev);
+}
+
+int
+rte_eth_dev_set_link_down(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ /* This function is only safe when called from the primary process
+ * in a multi-process setup*/
+ PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -EINVAL;
+ }
+ dev = &rte_eth_devices[port_id];
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
+ return (*dev->dev_ops->dev_set_link_down)(dev);
+}
+
+void
+rte_eth_dev_close(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ /* This function is only safe when called from the primary process
+ * in a multi-process setup*/
+ PROC_PRIMARY_OR_RET();
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return;
+ }
+
+ dev = &rte_eth_devices[port_id];
+
+ FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
+ dev->data->dev_started = 0;
+ (*dev->dev_ops->dev_close)(dev);
+}
+
+int
+rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ int ret;
+ uint32_t mbp_buf_size;
+ struct rte_eth_dev *dev;
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ struct rte_eth_dev_info dev_info;
+
+ /* This function is only safe when called from the primary process
+ * in a multi-process setup*/
+ PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-EINVAL);
+ }
+ dev = &rte_eth_devices[port_id];
+ if (rx_queue_id >= dev->data->nb_rx_queues) {
+ PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
+ return (-EINVAL);
+ }
+
+ if (dev->data->dev_started) {
+ PMD_DEBUG_TRACE(
+ "port %d must be stopped to allow configuration\n", port_id);
+ return -EBUSY;
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
+
+ /*
+ * Check the size of the mbuf data buffer.
+ * This value must be provided in the private data of the memory pool.
+ * First check that the memory pool has a valid private data.
+ */
+ rte_eth_dev_info_get(port_id, &dev_info);
+ if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
+ PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
+ mp->name, (int) mp->private_data_size,
+ (int) sizeof(struct rte_pktmbuf_pool_private));
+ return (-ENOSPC);
+ }
+ mbp_priv = rte_mempool_get_priv(mp);
+ mbp_buf_size = mbp_priv->mbuf_data_room_size;
+
+ if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
+ PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
+ "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
+ "=%d)\n",
+ mp->name,
+ (int)mbp_buf_size,
+ (int)(RTE_PKTMBUF_HEADROOM +
+ dev_info.min_rx_bufsize),
+ (int)RTE_PKTMBUF_HEADROOM,
+ (int)dev_info.min_rx_bufsize);
+ return (-EINVAL);
+ }
+
+ if (rx_conf == NULL)
+ rx_conf = &dev_info.default_rxconf;
+
+ ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
+ socket_id, rx_conf, mp);
+ if (!ret) {
+ if (!dev->data->min_rx_buf_size ||
+ dev->data->min_rx_buf_size > mbp_buf_size)
+ dev->data->min_rx_buf_size = mbp_buf_size;
+ }
+
+ return ret;
+}
+
+int
+rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+
+ /* This function is only safe when called from the primary process
+ * in a multi-process setup*/
+ PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+
+ if (port_id >= RTE_MAX_ETHPORTS || port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-EINVAL);
+ }
+ dev = &rte_eth_devices[port_id];
+ if (tx_queue_id >= dev->data->nb_tx_queues) {
+ PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
+ return (-EINVAL);
+ }
+
+ if (dev->data->dev_started) {
+ PMD_DEBUG_TRACE(
+ "port %d must be stopped to allow configuration\n", port_id);
+ return -EBUSY;
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+
+ if (tx_conf == NULL)
+ tx_conf = &dev_info.default_txconf;
+
+ return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
+ socket_id, tx_conf);
+}
+
+void
+rte_eth_promiscuous_enable(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return;
+ }
+ dev = &rte_eth_devices[port_id];
+
+ FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
+ (*dev->dev_ops->promiscuous_enable)(dev);
+ dev->data->promiscuous = 1;
+}
+
+void
+rte_eth_promiscuous_disable(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return;
+ }
+ dev = &rte_eth_devices[port_id];
+
+ FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
+ dev->data->promiscuous = 0;
+ (*dev->dev_ops->promiscuous_disable)(dev);
+}
+
+int
+rte_eth_promiscuous_get(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -1;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ return dev->data->promiscuous;
+}
+
+void
+rte_eth_allmulticast_enable(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return;
+ }
+ dev = &rte_eth_devices[port_id];
+
+ FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
+ (*dev->dev_ops->allmulticast_enable)(dev);
+ dev->data->all_multicast = 1;
+}
+
+void
+rte_eth_allmulticast_disable(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return;
+ }
+ dev = &rte_eth_devices[port_id];
+
+ FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
+ dev->data->all_multicast = 0;
+ (*dev->dev_ops->allmulticast_disable)(dev);
+}
+
+int
+rte_eth_allmulticast_get(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -1;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ return dev->data->all_multicast;
+}
+
+static inline int
+rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = link;
+ struct rte_eth_link *src = &(dev->data->dev_link);
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+void
+rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return;
+ }
+ dev = &rte_eth_devices[port_id];
+
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ rte_eth_dev_atomic_read_link_status(dev, eth_link);
+ else {
+ FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
+ (*dev->dev_ops->link_update)(dev, 1);
+ *eth_link = dev->data->dev_link;
+ }
+}
+
+void
+rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return;
+ }
+ dev = &rte_eth_devices[port_id];
+
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ rte_eth_dev_atomic_read_link_status(dev, eth_link);
+ else {
+ FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
+ (*dev->dev_ops->link_update)(dev, 0);
+ *eth_link = dev->data->dev_link;
+ }
+}
+
+void
+rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return;
+ }
+ dev = &rte_eth_devices[port_id];
+ memset(stats, 0, sizeof(*stats));
+
+ FUNC_PTR_OR_RET(*dev->dev_ops->stats_get);
+ (*dev->dev_ops->stats_get)(dev, stats);
+ stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
+}
+
+void
+rte_eth_stats_reset(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return;
+ }
+ dev = &rte_eth_devices[port_id];
+
+ FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
+ (*dev->dev_ops->stats_reset)(dev);
+}
+
+/* retrieve ethdev extended statistics */
+int
+rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
+ unsigned n)
+{
+ struct rte_eth_stats eth_stats;
+ struct rte_eth_dev *dev;
+ unsigned count, i, q;
+ uint64_t val;
+ char *stats_ptr;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -1;
+ }
+ dev = &rte_eth_devices[port_id];
+
+ /* implemented by the driver */
+ if (dev->dev_ops->xstats_get != NULL)
+ return (*dev->dev_ops->xstats_get)(dev, xstats, n);
+
+ /* else, return generic statistics */
+ count = RTE_NB_STATS;
+ count += dev->data->nb_rx_queues * RTE_NB_RXQ_STATS;
+ count += dev->data->nb_tx_queues * RTE_NB_TXQ_STATS;
+ if (n < count)
+ return count;
+
+ /* now fill the xstats structure */
+
+ count = 0;
+ memset(&eth_stats, 0, sizeof(eth_stats));
+ rte_eth_stats_get(port_id, &eth_stats);
+
+ /* global stats */
+ for (i = 0; i < RTE_NB_STATS; i++) {
+ stats_ptr = (char *)&eth_stats + rte_stats_strings[i].offset;
+ val = *(uint64_t *)stats_ptr;
+ snprintf(xstats[count].name, sizeof(xstats[count].name),
+ "%s", rte_stats_strings[i].name);
+ xstats[count++].value = val;
+ }
+
+ /* per-rxq stats */
+ for (q = 0; q < dev->data->nb_rx_queues; q++) {
+ for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
+ stats_ptr = (char *)&eth_stats;
+ stats_ptr += rte_rxq_stats_strings[i].offset;
+ stats_ptr += q * sizeof(uint64_t);
+ val = *(uint64_t *)stats_ptr;
+ snprintf(xstats[count].name, sizeof(xstats[count].name),
+ "rx_queue_%u_%s", q,
+ rte_rxq_stats_strings[i].name);
+ xstats[count++].value = val;
+ }
+ }
+
+ /* per-txq stats */
+ for (q = 0; q < dev->data->nb_tx_queues; q++) {
+ for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
+ stats_ptr = (char *)&eth_stats;
+ stats_ptr += rte_txq_stats_strings[i].offset;
+ stats_ptr += q * sizeof(uint64_t);
+ val = *(uint64_t *)stats_ptr;
+ snprintf(xstats[count].name, sizeof(xstats[count].name),
+ "tx_queue_%u_%s", q,
+ rte_txq_stats_strings[i].name);
+ xstats[count++].value = val;
+ }
+ }
+
+ return count;
+}
+
+/* reset ethdev extended statistics */
+void
+rte_eth_xstats_reset(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return;
+ }
+ dev = &rte_eth_devices[port_id];
+
+ /* implemented by the driver */
+ if (dev->dev_ops->xstats_reset != NULL) {
+ (*dev->dev_ops->xstats_reset)(dev);
+ return;
+ }
+
+ /* fallback to default */
+ rte_eth_stats_reset(port_id);
+}
+
+static int
+set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
+ uint8_t is_rx)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+ dev = &rte_eth_devices[port_id];
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
+ return (*dev->dev_ops->queue_stats_mapping_set)
+ (dev, queue_id, stat_idx, is_rx);
+}
+
+
+int
+rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
+ uint8_t stat_idx)
+{
+ return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
+ STAT_QMAP_TX);
+}
+
+
+int
+rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
+ uint8_t stat_idx)
+{
+ return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
+ STAT_QMAP_RX);
+}
+
+
+void
+rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return;
+ }
+ dev = &rte_eth_devices[port_id];
+
+ memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
+
+ FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
+ (*dev->dev_ops->dev_infos_get)(dev, dev_info);
+ dev_info->pci_dev = dev->pci_dev;
+ if (dev->driver)
+ dev_info->driver_name = dev->driver->pci_drv.name;
+}
+
+void
+rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return;
+ }
+ dev = &rte_eth_devices[port_id];
+ ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
+}
+
+
+int
+rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ dev = &rte_eth_devices[port_id];
+ *mtu = dev->data->mtu;
+ return 0;
+}
+
+int
+rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
+{
+ int ret;
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
+
+ ret = (*dev->dev_ops->mtu_set)(dev, mtu);
+ if (!ret)
+ dev->data->mtu = mtu;
+
+ return ret;
+}
+
+int
+rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+ dev = &rte_eth_devices[port_id];
+ if (! (dev->data->dev_conf.rxmode.hw_vlan_filter)) {
+ PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
+ return (-ENOSYS);
+ }
+
+ if (vlan_id > 4095) {
+ PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
+ port_id, (unsigned) vlan_id);
+ return (-EINVAL);
+ }
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
+ (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
+ return (0);
+}
+
+int
+rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ dev = &rte_eth_devices[port_id];
+ if (rx_queue_id >= dev->data->nb_rx_queues) {
+ PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
+ return (-EINVAL);
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
+ (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
+
+ return (0);
+}
+
+int
+rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
+ (*dev->dev_ops->vlan_tpid_set)(dev, tpid);
+
+ return (0);
+}
+
+int
+rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
+{
+ struct rte_eth_dev *dev;
+ int ret = 0;
+ int mask = 0;
+ int cur, org = 0;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ dev = &rte_eth_devices[port_id];
+
+ /*check which option changed by application*/
+ cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
+ org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
+ if (cur != org){
+ dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
+ mask |= ETH_VLAN_STRIP_MASK;
+ }
+
+ cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
+ org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
+ if (cur != org){
+ dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
+ mask |= ETH_VLAN_FILTER_MASK;
+ }
+
+ cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
+ org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
+ if (cur != org){
+ dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
+ mask |= ETH_VLAN_EXTEND_MASK;
+ }
+
+ /*no change*/
+ if(mask == 0)
+ return ret;
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
+ (*dev->dev_ops->vlan_offload_set)(dev, mask);
+
+ return ret;
+}
+
+int
+rte_eth_dev_get_vlan_offload(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+ int ret = 0;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ dev = &rte_eth_devices[port_id];
+
+ if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ ret |= ETH_VLAN_STRIP_OFFLOAD ;
+
+ if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ ret |= ETH_VLAN_FILTER_OFFLOAD ;
+
+ if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+ ret |= ETH_VLAN_EXTEND_OFFLOAD ;
+
+ return ret;
+}
+
+int
+rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
+ (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
+
+ return 0;
+}
+
+int
+rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
+ struct rte_fdir_filter *fdir_filter,
+ uint8_t queue)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ dev = &rte_eth_devices[port_id];
+
+ if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
+ PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
+ port_id, dev->data->dev_conf.fdir_conf.mode);
+ return (-ENOSYS);
+ }
+
+ if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
+ || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
+ && (fdir_filter->port_src || fdir_filter->port_dst)) {
+ PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
+ "None l4type, source & destinations ports " \
+ "should be null!\n");
+ return (-EINVAL);
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_signature_filter, -ENOTSUP);
+ return (*dev->dev_ops->fdir_add_signature_filter)(dev, fdir_filter,
+ queue);
+}
+
+int
+rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
+ struct rte_fdir_filter *fdir_filter,
+ uint8_t queue)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ dev = &rte_eth_devices[port_id];
+
+ if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
+ PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
+ port_id, dev->data->dev_conf.fdir_conf.mode);
+ return (-ENOSYS);
+ }
+
+ if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
+ || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
+ && (fdir_filter->port_src || fdir_filter->port_dst)) {
+ PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
+ "None l4type, source & destinations ports " \
+ "should be null!\n");
+ return (-EINVAL);
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_signature_filter, -ENOTSUP);
+ return (*dev->dev_ops->fdir_update_signature_filter)(dev, fdir_filter,
+ queue);
+
+}
+
+int
+rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
+ struct rte_fdir_filter *fdir_filter)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ dev = &rte_eth_devices[port_id];
+
+ if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
+ PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
+ port_id, dev->data->dev_conf.fdir_conf.mode);
+ return (-ENOSYS);
+ }
+
+ if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
+ || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
+ && (fdir_filter->port_src || fdir_filter->port_dst)) {
+ PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
+ "None l4type source & destinations ports " \
+ "should be null!\n");
+ return (-EINVAL);
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_signature_filter, -ENOTSUP);
+ return (*dev->dev_ops->fdir_remove_signature_filter)(dev, fdir_filter);
+}
+
+int
+rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ dev = &rte_eth_devices[port_id];
+ if (! (dev->data->dev_conf.fdir_conf.mode)) {
+ PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
+ return (-ENOSYS);
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_infos_get, -ENOTSUP);
+
+ (*dev->dev_ops->fdir_infos_get)(dev, fdir);
+ return (0);
+}
+
+int
+rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
+ struct rte_fdir_filter *fdir_filter,
+ uint16_t soft_id, uint8_t queue,
+ uint8_t drop)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ dev = &rte_eth_devices[port_id];
+
+ if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
+ PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
+ port_id, dev->data->dev_conf.fdir_conf.mode);
+ return (-ENOSYS);
+ }
+
+ if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
+ || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
+ && (fdir_filter->port_src || fdir_filter->port_dst)) {
+ PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
+ "None l4type, source & destinations ports " \
+ "should be null!\n");
+ return (-EINVAL);
+ }
+
+ /* For now IPv6 is not supported with perfect filter */
+ //if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
+ // return (-ENOTSUP);
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_perfect_filter, -ENOTSUP);
+ return (*dev->dev_ops->fdir_add_perfect_filter)(dev, fdir_filter,
+ soft_id, queue,
+ drop);
+}
+
+int
+rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
+ struct rte_fdir_filter *fdir_filter,
+ uint16_t soft_id, uint8_t queue,
+ uint8_t drop)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ dev = &rte_eth_devices[port_id];
+
+ if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
+ PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
+ port_id, dev->data->dev_conf.fdir_conf.mode);
+ return (-ENOSYS);
+ }
+
+ if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
+ || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
+ && (fdir_filter->port_src || fdir_filter->port_dst)) {
+ PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
+ "None l4type, source & destinations ports " \
+ "should be null!\n");
+ return (-EINVAL);
+ }
+
+ /* For now IPv6 is not supported with perfect filter */
+ if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
+ return (-ENOTSUP);
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_perfect_filter, -ENOTSUP);
+ return (*dev->dev_ops->fdir_update_perfect_filter)(dev, fdir_filter,
+ soft_id, queue, drop);
+}
+
+int
+rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
+ struct rte_fdir_filter *fdir_filter,
+ uint16_t soft_id)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ dev = &rte_eth_devices[port_id];
+
+ if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
+ PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
+ port_id, dev->data->dev_conf.fdir_conf.mode);
+ return (-ENOSYS);
+ }
+
+ if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
+ || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
+ && (fdir_filter->port_src || fdir_filter->port_dst)) {
+ PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
+ "None l4type, source & destinations ports " \
+ "should be null!\n");
+ return (-EINVAL);
+ }
+
+ /* For now IPv6 is not supported with perfect filter */
+ if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
+ return (-ENOTSUP);
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_perfect_filter, -ENOTSUP);
+ return (*dev->dev_ops->fdir_remove_perfect_filter)(dev, fdir_filter,
+ soft_id);
+}
+
+int
+rte_eth_dev_fdir_set_masks(uint8_t port_id, struct rte_fdir_masks *fdir_mask)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ dev = &rte_eth_devices[port_id];
+ if (! (dev->data->dev_conf.fdir_conf.mode)) {
+ PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
+ return (-ENOSYS);
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_set_masks, -ENOTSUP);
+ return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask);
+}
+
+int
+rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
+ memset(fc_conf, 0, sizeof(*fc_conf));
+ return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
+}
+
+int
+rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
+ PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
+ return (-EINVAL);
+ }
+
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
+ return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
+}
+
+int
+rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
+ PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
+ return (-EINVAL);
+ }
+
+ dev = &rte_eth_devices[port_id];
+ /* High water, low water validation are device specific */
+ if (*dev->dev_ops->priority_flow_ctrl_set)
+ return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
+ return (-ENOTSUP);
+}
+
+static inline int
+rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ uint16_t i, num;
+
+ if (!reta_conf)
+ return -EINVAL;
+
+ if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
+ PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
+ RTE_RETA_GROUP_SIZE);
+ return -EINVAL;
+ }
+
+ num = reta_size / RTE_RETA_GROUP_SIZE;
+ for (i = 0; i < num; i++) {
+ if (reta_conf[i].mask)
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size,
+ uint8_t max_rxq)
+{
+ uint16_t i, idx, shift;
+
+ if (!reta_conf)
+ return -EINVAL;
+
+ if (max_rxq == 0) {
+ PMD_DEBUG_TRACE("No receive queue is available\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if ((reta_conf[idx].mask & (1ULL << shift)) &&
+ (reta_conf[idx].reta[shift] >= max_rxq)) {
+ PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
+ "the maximum rxq index: %u\n", idx, shift,
+ reta_conf[idx].reta[shift], max_rxq);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int
+rte_eth_dev_rss_reta_update(uint8_t port_id,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct rte_eth_dev *dev;
+ int ret;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+
+ /* Check mask bits */
+ ret = rte_eth_check_reta_mask(reta_conf, reta_size);
+ if (ret < 0)
+ return ret;
+
+ dev = &rte_eth_devices[port_id];
+
+ /* Check entry value */
+ ret = rte_eth_check_reta_entry(reta_conf, reta_size,
+ dev->data->nb_rx_queues);
+ if (ret < 0)
+ return ret;
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
+ return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
+}
+
+int
+rte_eth_dev_rss_reta_query(uint8_t port_id,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct rte_eth_dev *dev;
+ int ret;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+
+ /* Check mask bits */
+ ret = rte_eth_check_reta_mask(reta_conf, reta_size);
+ if (ret < 0)
+ return ret;
+
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
+ return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
+}
+
+int
+rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
+{
+ struct rte_eth_dev *dev;
+ uint16_t rss_hash_protos;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+ rss_hash_protos = rss_conf->rss_hf;
+ if ((rss_hash_protos != 0) &&
+ ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
+ PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
+ rss_hash_protos);
+ return (-EINVAL);
+ }
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
+ return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
+}
+
+int
+rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
+ return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
+}
+
+int
+rte_eth_dev_udp_tunnel_add(uint8_t port_id,
+ struct rte_eth_udp_tunnel *udp_tunnel)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+
+ if (udp_tunnel == NULL) {
+ PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
+ return -EINVAL;
+ }
+
+ if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
+ PMD_DEBUG_TRACE("Invalid tunnel type\n");
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_add, -ENOTSUP);
+ return (*dev->dev_ops->udp_tunnel_add)(dev, udp_tunnel);
+}
+
+int
+rte_eth_dev_udp_tunnel_delete(uint8_t port_id,
+ struct rte_eth_udp_tunnel *udp_tunnel)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+ dev = &rte_eth_devices[port_id];
+
+ if (udp_tunnel == NULL) {
+ PMD_DEBUG_TRACE("Invalid udp_tunnel parametr\n");
+ return -EINVAL;
+ }
+
+ if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
+ PMD_DEBUG_TRACE("Invalid tunnel type\n");
+ return -EINVAL;
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_del, -ENOTSUP);
+ return (*dev->dev_ops->udp_tunnel_del)(dev, udp_tunnel);
+}
+
+int
+rte_eth_led_on(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
+ return ((*dev->dev_ops->dev_led_on)(dev));
+}
+
+int
+rte_eth_led_off(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
+ return ((*dev->dev_ops->dev_led_off)(dev));
+}
+
+/*
+ * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
+ * an empty spot.
+ */
+static inline int
+get_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
+{
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ unsigned i;
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+
+ for (i = 0; i < dev_info.max_mac_addrs; i++)
+ if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
+ return i;
+
+ return -1;
+}
+
+static struct ether_addr null_mac_addr = {{0, 0, 0, 0, 0, 0}};
+
+int
+rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
+ uint32_t pool)
+{
+ struct rte_eth_dev *dev;
+ int index;
+ uint64_t pool_mask;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
+
+ if (is_zero_ether_addr(addr)) {
+ PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
+ port_id);
+ return (-EINVAL);
+ }
+ if (pool >= ETH_64_POOLS) {
+ PMD_DEBUG_TRACE("pool id must be 0-%d\n",ETH_64_POOLS - 1);
+ return (-EINVAL);
+ }
+
+ index = get_mac_addr_index(port_id, addr);
+ if (index < 0) {
+ index = get_mac_addr_index(port_id, &null_mac_addr);
+ if (index < 0) {
+ PMD_DEBUG_TRACE("port %d: MAC address array full\n",
+ port_id);
+ return (-ENOSPC);
+ }
+ } else {
+ pool_mask = dev->data->mac_pool_sel[index];
+
+ /* Check if both MAC address and pool is alread there, and do nothing */
+ if (pool_mask & (1ULL << pool))
+ return 0;
+ }
+
+ /* Update NIC */
+ (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
+
+ /* Update address in NIC data structure */
+ ether_addr_copy(addr, &dev->data->mac_addrs[index]);
+
+ /* Update pool bitmap in NIC data structure */
+ dev->data->mac_pool_sel[index] |= (1ULL << pool);
+
+ return 0;
+}
+
+int
+rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
+{
+ struct rte_eth_dev *dev;
+ int index;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
+
+ index = get_mac_addr_index(port_id, addr);
+ if (index == 0) {
+ PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
+ return (-EADDRINUSE);
+ } else if (index < 0)
+ return 0; /* Do nothing if address wasn't found */
+
+ /* Update NIC */
+ (*dev->dev_ops->mac_addr_remove)(dev, index);
+
+ /* Update address in NIC data structure */
+ ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
+
+ /* reset pool bitmap */
+ dev->data->mac_pool_sel[index] = 0;
+
+ return 0;
+}
+
+int
+rte_eth_dev_set_vf_rxmode(uint8_t port_id, uint16_t vf,
+ uint16_t rx_mode, uint8_t on)
+{
+ uint16_t num_vfs;
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("set VF RX mode:Invalid port_id=%d\n",
+ port_id);
+ return (-ENODEV);
+ }
+
+ dev = &rte_eth_devices[port_id];
+ rte_eth_dev_info_get(port_id, &dev_info);
+
+ num_vfs = dev_info.max_vfs;
+ if (vf > num_vfs)
+ {
+ PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
+ return (-EINVAL);
+ }
+ if (rx_mode == 0)
+ {
+ PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
+ return (-EINVAL);
+ }
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
+ return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
+}
+
+/*
+ * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
+ * an empty spot.
+ */
+static inline int
+get_hash_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
+{
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ unsigned i;
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+ if (!dev->data->hash_mac_addrs)
+ return -1;
+
+ for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
+ if (memcmp(addr, &dev->data->hash_mac_addrs[i],
+ ETHER_ADDR_LEN) == 0)
+ return i;
+
+ return -1;
+}
+
+int
+rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
+ uint8_t on)
+{
+ int index;
+ int ret;
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
+ port_id);
+ return (-ENODEV);
+ }
+
+ dev = &rte_eth_devices[port_id];
+ if (is_zero_ether_addr(addr)) {
+ PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
+ port_id);
+ return (-EINVAL);
+ }
+
+ index = get_hash_mac_addr_index(port_id, addr);
+ /* Check if it's already there, and do nothing */
+ if ((index >= 0) && (on))
+ return 0;
+
+ if (index < 0) {
+ if (!on) {
+ PMD_DEBUG_TRACE("port %d: the MAC address was not"
+ "set in UTA\n", port_id);
+ return (-EINVAL);
+ }
+
+ index = get_hash_mac_addr_index(port_id, &null_mac_addr);
+ if (index < 0) {
+ PMD_DEBUG_TRACE("port %d: MAC address array full\n",
+ port_id);
+ return (-ENOSPC);
+ }
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
+ ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
+ if (ret == 0) {
+ /* Update address in NIC data structure */
+ if (on)
+ ether_addr_copy(addr,
+ &dev->data->hash_mac_addrs[index]);
+ else
+ ether_addr_copy(&null_mac_addr,
+ &dev->data->hash_mac_addrs[index]);
+ }
+
+ return ret;
+}
+
+int
+rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n",
+ port_id);
+ return (-ENODEV);
+ }
+
+ dev = &rte_eth_devices[port_id];
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
+ return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
+}
+
+int
+rte_eth_dev_set_vf_rx(uint8_t port_id,uint16_t vf, uint8_t on)
+{
+ uint16_t num_vfs;
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ dev = &rte_eth_devices[port_id];
+ rte_eth_dev_info_get(port_id, &dev_info);
+
+ num_vfs = dev_info.max_vfs;
+ if (vf > num_vfs)
+ {
+ PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
+ return (-EINVAL);
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
+ return (*dev->dev_ops->set_vf_rx)(dev, vf,on);
+}
+
+int
+rte_eth_dev_set_vf_tx(uint8_t port_id,uint16_t vf, uint8_t on)
+{
+ uint16_t num_vfs;
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("set pool tx:Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ dev = &rte_eth_devices[port_id];
+ rte_eth_dev_info_get(port_id, &dev_info);
+
+ num_vfs = dev_info.max_vfs;
+ if (vf > num_vfs)
+ {
+ PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
+ return (-EINVAL);
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
+ return (*dev->dev_ops->set_vf_tx)(dev, vf,on);
+}
+
+int
+rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
+ uint64_t vf_mask,uint8_t vlan_on)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("VF VLAN filter:invalid port id=%d\n",
+ port_id);
+ return (-ENODEV);
+ }
+ dev = &rte_eth_devices[port_id];
+
+ if(vlan_id > ETHER_MAX_VLAN_ID)
+ {
+ PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
+ vlan_id);
+ return (-EINVAL);
+ }
+ if (vf_mask == 0)
+ {
+ PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
+ return (-EINVAL);
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
+ return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
+ vf_mask,vlan_on);
+}
+
+int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
+ uint16_t tx_rate)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_link link;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("set queue rate limit:invalid port id=%d\n",
+ port_id);
+ return -ENODEV;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ rte_eth_dev_info_get(port_id, &dev_info);
+ link = dev->data->dev_link;
+
+ if (queue_idx > dev_info.max_tx_queues) {
+ PMD_DEBUG_TRACE("set queue rate limit:port %d: "
+ "invalid queue id=%d\n", port_id, queue_idx);
+ return -EINVAL;
+ }
+
+ if (tx_rate > link.link_speed) {
+ PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
+ "bigger than link speed= %d\n",
+ tx_rate, link.link_speed);
+ return -EINVAL;
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
+ return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
+}
+
+int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
+ uint64_t q_msk)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_link link;
+
+ if (q_msk == 0)
+ return 0;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("set VF rate limit:invalid port id=%d\n",
+ port_id);
+ return -ENODEV;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ rte_eth_dev_info_get(port_id, &dev_info);
+ link = dev->data->dev_link;
+
+ if (vf > dev_info.max_vfs) {
+ PMD_DEBUG_TRACE("set VF rate limit:port %d: "
+ "invalid vf id=%d\n", port_id, vf);
+ return -EINVAL;
+ }
+
+ if (tx_rate > link.link_speed) {
+ PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
+ "bigger than link speed= %d\n",
+ tx_rate, link.link_speed);
+ return -EINVAL;
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
+ return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
+}
+
+int
+rte_eth_mirror_rule_set(uint8_t port_id,
+ struct rte_eth_vmdq_mirror_conf *mirror_conf,
+ uint8_t rule_id, uint8_t on)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ if (mirror_conf->rule_type_mask == 0) {
+ PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
+ return (-EINVAL);
+ }
+
+ if (mirror_conf->dst_pool >= ETH_64_POOLS) {
+ PMD_DEBUG_TRACE("Invalid dst pool, pool id must"
+ "be 0-%d\n",ETH_64_POOLS - 1);
+ return (-EINVAL);
+ }
+
+ if ((mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) &&
+ (mirror_conf->pool_mask == 0)) {
+ PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not"
+ "be 0.\n");
+ return (-EINVAL);
+ }
+
+ if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
+ {
+ PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
+ ETH_VMDQ_NUM_MIRROR_RULE - 1);
+ return (-EINVAL);
+ }
+
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
+
+ return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
+}
+
+int
+rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE)
+ {
+ PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n",
+ ETH_VMDQ_NUM_MIRROR_RULE-1);
+ return (-EINVAL);
+ }
+
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
+
+ return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
+}
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+uint16_t
+rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return 0;
+ }
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
+ if (queue_id >= dev->data->nb_rx_queues) {
+ PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
+ return 0;
+ }
+ return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
+ rx_pkts, nb_pkts);
+}
+
+uint16_t
+rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return 0;
+ }
+ dev = &rte_eth_devices[port_id];
+
+ FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
+ if (queue_id >= dev->data->nb_tx_queues) {
+ PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
+ return 0;
+ }
+ return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
+ tx_pkts, nb_pkts);
+}
+
+uint32_t
+rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return 0;
+ }
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, 0);
+ return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
+}
+
+int
+rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
+ return (*dev->dev_ops->rx_descriptor_done)( \
+ dev->data->rx_queues[queue_id], offset);
+}
+#endif
+
+int
+rte_eth_dev_callback_register(uint8_t port_id,
+ enum rte_eth_event_type event,
+ rte_eth_dev_cb_fn cb_fn, void *cb_arg)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_callback *user_cb;
+
+ if (!cb_fn)
+ return (-EINVAL);
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-EINVAL);
+ }
+
+ dev = &rte_eth_devices[port_id];
+ rte_spinlock_lock(&rte_eth_dev_cb_lock);
+
+ TAILQ_FOREACH(user_cb, &(dev->callbacks), next) {
+ if (user_cb->cb_fn == cb_fn &&
+ user_cb->cb_arg == cb_arg &&
+ user_cb->event == event) {
+ break;
+ }
+ }
+
+ /* create a new callback. */
+ if (user_cb == NULL && (user_cb = rte_zmalloc("INTR_USER_CALLBACK",
+ sizeof(struct rte_eth_dev_callback), 0)) != NULL) {
+ user_cb->cb_fn = cb_fn;
+ user_cb->cb_arg = cb_arg;
+ user_cb->event = event;
+ TAILQ_INSERT_TAIL(&(dev->callbacks), user_cb, next);
+ }
+
+ rte_spinlock_unlock(&rte_eth_dev_cb_lock);
+ return ((user_cb == NULL) ? -ENOMEM : 0);
+}
+
+int
+rte_eth_dev_callback_unregister(uint8_t port_id,
+ enum rte_eth_event_type event,
+ rte_eth_dev_cb_fn cb_fn, void *cb_arg)
+{
+ int ret;
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_callback *cb, *next;
+
+ if (!cb_fn)
+ return (-EINVAL);
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-EINVAL);
+ }
+
+ dev = &rte_eth_devices[port_id];
+ rte_spinlock_lock(&rte_eth_dev_cb_lock);
+
+ ret = 0;
+ for (cb = TAILQ_FIRST(&dev->callbacks); cb != NULL; cb = next) {
+
+ next = TAILQ_NEXT(cb, next);
+
+ if (cb->cb_fn != cb_fn || cb->event != event ||
+ (cb->cb_arg != (void *)-1 &&
+ cb->cb_arg != cb_arg))
+ continue;
+
+ /*
+ * if this callback is not executing right now,
+ * then remove it.
+ */
+ if (cb->active == 0) {
+ TAILQ_REMOVE(&(dev->callbacks), cb, next);
+ rte_free(cb);
+ } else {
+ ret = -EAGAIN;
+ }
+ }
+
+ rte_spinlock_unlock(&rte_eth_dev_cb_lock);
+ return (ret);
+}
+
+void
+_rte_eth_dev_callback_process(struct rte_eth_dev *dev,
+ enum rte_eth_event_type event)
+{
+ struct rte_eth_dev_callback *cb_lst;
+ struct rte_eth_dev_callback dev_cb;
+
+ rte_spinlock_lock(&rte_eth_dev_cb_lock);
+ TAILQ_FOREACH(cb_lst, &(dev->callbacks), next) {
+ if (cb_lst->cb_fn == NULL || cb_lst->event != event)
+ continue;
+ dev_cb = *cb_lst;
+ cb_lst->active = 1;
+ rte_spinlock_unlock(&rte_eth_dev_cb_lock);
+ dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
+ dev_cb.cb_arg);
+ rte_spinlock_lock(&rte_eth_dev_cb_lock);
+ cb_lst->active = 0;
+ }
+ rte_spinlock_unlock(&rte_eth_dev_cb_lock);
+}
+#ifdef RTE_NIC_BYPASS
+int rte_eth_dev_bypass_init(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ if ((dev= &rte_eth_devices[port_id]) == NULL) {
+ PMD_DEBUG_TRACE("Invalid port device\n");
+ return (-ENODEV);
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
+ (*dev->dev_ops->bypass_init)(dev);
+ return 0;
+}
+
+int
+rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ if ((dev= &rte_eth_devices[port_id]) == NULL) {
+ PMD_DEBUG_TRACE("Invalid port device\n");
+ return (-ENODEV);
+ }
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
+ (*dev->dev_ops->bypass_state_show)(dev, state);
+ return 0;
+}
+
+int
+rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ if ((dev= &rte_eth_devices[port_id]) == NULL) {
+ PMD_DEBUG_TRACE("Invalid port device\n");
+ return (-ENODEV);
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
+ (*dev->dev_ops->bypass_state_set)(dev, new_state);
+ return 0;
+}
+
+int
+rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ if ((dev= &rte_eth_devices[port_id]) == NULL) {
+ PMD_DEBUG_TRACE("Invalid port device\n");
+ return (-ENODEV);
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
+ (*dev->dev_ops->bypass_event_show)(dev, event, state);
+ return 0;
+}
+
+int
+rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ if ((dev= &rte_eth_devices[port_id]) == NULL) {
+ PMD_DEBUG_TRACE("Invalid port device\n");
+ return (-ENODEV);
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
+ (*dev->dev_ops->bypass_event_set)(dev, event, state);
+ return 0;
+}
+
+int
+rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ if ((dev= &rte_eth_devices[port_id]) == NULL) {
+ PMD_DEBUG_TRACE("Invalid port device\n");
+ return (-ENODEV);
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
+ (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
+ return 0;
+}
+
+int
+rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ if ((dev= &rte_eth_devices[port_id]) == NULL) {
+ PMD_DEBUG_TRACE("Invalid port device\n");
+ return (-ENODEV);
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
+ (*dev->dev_ops->bypass_ver_show)(dev, ver);
+ return 0;
+}
+
+int
+rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ if ((dev= &rte_eth_devices[port_id]) == NULL) {
+ PMD_DEBUG_TRACE("Invalid port device\n");
+ return (-ENODEV);
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
+ (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
+ return 0;
+}
+
+int
+rte_eth_dev_bypass_wd_reset(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ if ((dev= &rte_eth_devices[port_id]) == NULL) {
+ PMD_DEBUG_TRACE("Invalid port device\n");
+ return (-ENODEV);
+ }
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
+ (*dev->dev_ops->bypass_wd_reset)(dev);
+ return 0;
+}
+#endif
+
+int
+rte_eth_dev_add_syn_filter(uint8_t port_id,
+ struct rte_syn_filter *filter, uint16_t rx_queue)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_syn_filter, -ENOTSUP);
+ return (*dev->dev_ops->add_syn_filter)(dev, filter, rx_queue);
+}
+
+int
+rte_eth_dev_remove_syn_filter(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->remove_syn_filter, -ENOTSUP);
+ return (*dev->dev_ops->remove_syn_filter)(dev);
+}
+
+int
+rte_eth_dev_get_syn_filter(uint8_t port_id,
+ struct rte_syn_filter *filter, uint16_t *rx_queue)
+{
+ struct rte_eth_dev *dev;
+
+ if (filter == NULL || rx_queue == NULL)
+ return -EINVAL;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_syn_filter, -ENOTSUP);
+ return (*dev->dev_ops->get_syn_filter)(dev, filter, rx_queue);
+}
+
+int
+rte_eth_dev_add_ethertype_filter(uint8_t port_id, uint16_t index,
+ struct rte_ethertype_filter *filter, uint16_t rx_queue)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+ if (filter->ethertype == ETHER_TYPE_IPv4 ||
+ filter->ethertype == ETHER_TYPE_IPv6){
+ PMD_DEBUG_TRACE("IP and IPv6 are not supported"
+ " in ethertype filter\n");
+ return -EINVAL;
+ }
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_ethertype_filter, -ENOTSUP);
+ return (*dev->dev_ops->add_ethertype_filter)(dev, index,
+ filter, rx_queue);
+}
+
+int
+rte_eth_dev_remove_ethertype_filter(uint8_t port_id, uint16_t index)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->remove_ethertype_filter, -ENOTSUP);
+ return (*dev->dev_ops->remove_ethertype_filter)(dev, index);
+}
+
+int
+rte_eth_dev_get_ethertype_filter(uint8_t port_id, uint16_t index,
+ struct rte_ethertype_filter *filter, uint16_t *rx_queue)
+{
+ struct rte_eth_dev *dev;
+
+ if (filter == NULL || rx_queue == NULL)
+ return -EINVAL;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_ethertype_filter, -ENOTSUP);
+ return (*dev->dev_ops->get_ethertype_filter)(dev, index,
+ filter, rx_queue);
+}
+
+int
+rte_eth_dev_add_2tuple_filter(uint8_t port_id, uint16_t index,
+ struct rte_2tuple_filter *filter, uint16_t rx_queue)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+ if (filter->protocol != IPPROTO_TCP &&
+ filter->tcp_flags != 0){
+ PMD_DEBUG_TRACE("tcp flags is 0x%x, but the protocol value"
+ " is not TCP\n",
+ filter->tcp_flags);
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_2tuple_filter, -ENOTSUP);
+ return (*dev->dev_ops->add_2tuple_filter)(dev, index, filter, rx_queue);
+}
+
+int
+rte_eth_dev_remove_2tuple_filter(uint8_t port_id, uint16_t index)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->remove_2tuple_filter, -ENOTSUP);
+ return (*dev->dev_ops->remove_2tuple_filter)(dev, index);
+}
+
+int
+rte_eth_dev_get_2tuple_filter(uint8_t port_id, uint16_t index,
+ struct rte_2tuple_filter *filter, uint16_t *rx_queue)
+{
+ struct rte_eth_dev *dev;
+
+ if (filter == NULL || rx_queue == NULL)
+ return -EINVAL;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_2tuple_filter, -ENOTSUP);
+ return (*dev->dev_ops->get_2tuple_filter)(dev, index, filter, rx_queue);
+}
+
+int
+rte_eth_dev_add_5tuple_filter(uint8_t port_id, uint16_t index,
+ struct rte_5tuple_filter *filter, uint16_t rx_queue)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+
+ if (filter->protocol != IPPROTO_TCP &&
+ filter->tcp_flags != 0){
+ PMD_DEBUG_TRACE("tcp flags is 0x%x, but the protocol value"
+ " is not TCP\n",
+ filter->tcp_flags);
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_5tuple_filter, -ENOTSUP);
+ return (*dev->dev_ops->add_5tuple_filter)(dev, index, filter, rx_queue);
+}
+
+int
+rte_eth_dev_remove_5tuple_filter(uint8_t port_id, uint16_t index)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->remove_5tuple_filter, -ENOTSUP);
+ return (*dev->dev_ops->remove_5tuple_filter)(dev, index);
+}
+
+int
+rte_eth_dev_get_5tuple_filter(uint8_t port_id, uint16_t index,
+ struct rte_5tuple_filter *filter, uint16_t *rx_queue)
+{
+ struct rte_eth_dev *dev;
+
+ if (filter == NULL || rx_queue == NULL)
+ return -EINVAL;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_5tuple_filter, -ENOTSUP);
+ return (*dev->dev_ops->get_5tuple_filter)(dev, index, filter,
+ rx_queue);
+}
+
+int
+rte_eth_dev_add_flex_filter(uint8_t port_id, uint16_t index,
+ struct rte_flex_filter *filter, uint16_t rx_queue)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_flex_filter, -ENOTSUP);
+ return (*dev->dev_ops->add_flex_filter)(dev, index, filter, rx_queue);
+}
+
+int
+rte_eth_dev_remove_flex_filter(uint8_t port_id, uint16_t index)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->remove_flex_filter, -ENOTSUP);
+ return (*dev->dev_ops->remove_flex_filter)(dev, index);
+}
+
+int
+rte_eth_dev_get_flex_filter(uint8_t port_id, uint16_t index,
+ struct rte_flex_filter *filter, uint16_t *rx_queue)
+{
+ struct rte_eth_dev *dev;
+
+ if (filter == NULL || rx_queue == NULL)
+ return -EINVAL;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_flex_filter, -ENOTSUP);
+ return (*dev->dev_ops->get_flex_filter)(dev, index, filter,
+ rx_queue);
+}
+
+int
+rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
+ return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
+ RTE_ETH_FILTER_NOP, NULL);
+}
+
+int
+rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op, void *arg)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return -ENODEV;
+ }
+
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
+ return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
+}
diff --git a/src/dpdk_lib18/librte_ether/rte_ethdev.h b/src/dpdk_lib18/librte_ether/rte_ethdev.h
new file mode 100755
index 00000000..ce0528f5
--- /dev/null
+++ b/src/dpdk_lib18/librte_ether/rte_ethdev.h
@@ -0,0 +1,3759 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ETHDEV_H_
+#define _RTE_ETHDEV_H_
+
+/**
+ * @file
+ *
+ * RTE Ethernet Device API
+ *
+ * The Ethernet Device API is composed of two parts:
+ *
+ * - The application-oriented Ethernet API that includes functions to setup
+ * an Ethernet device (configure it, setup its RX and TX queues and start it),
+ * to get its MAC address, the speed and the status of its physical link,
+ * to receive and to transmit packets, and so on.
+ *
+ * - The driver-oriented Ethernet API that exports a function allowing
+ * an Ethernet Poll Mode Driver (PMD) to simultaneously register itself as
+ * an Ethernet device driver and as a PCI driver for a set of matching PCI
+ * [Ethernet] devices classes.
+ *
+ * By default, all the functions of the Ethernet Device API exported by a PMD
+ * are lock-free functions which assume to not be invoked in parallel on
+ * different logical cores to work on the same target object. For instance,
+ * the receive function of a PMD cannot be invoked in parallel on two logical
+ * cores to poll the same RX queue [of the same port]. Of course, this function
+ * can be invoked in parallel by different logical cores on different RX queues.
+ * It is the responsibility of the upper level application to enforce this rule.
+ *
+ * If needed, parallel accesses by multiple logical cores to shared queues
+ * shall be explicitly protected by dedicated inline lock-aware functions
+ * built on top of their corresponding lock-free functions of the PMD API.
+ *
+ * In all functions of the Ethernet API, the Ethernet device is
+ * designated by an integer >= 0 named the device port identifier.
+ *
+ * At the Ethernet driver level, Ethernet devices are represented by a generic
+ * data structure of type *rte_eth_dev*.
+ *
+ * Ethernet devices are dynamically registered during the PCI probing phase
+ * performed at EAL initialization time.
+ * When an Ethernet device is being probed, an *rte_eth_dev* structure and
+ * a new port identifier are allocated for that device. Then, the eth_dev_init()
+ * function supplied by the Ethernet driver matching the probed PCI
+ * device is invoked to properly initialize the device.
+ *
+ * The role of the device init function consists of resetting the hardware,
+ * checking access to Non-volatile Memory (NVM), reading the MAC address
+ * from NVM etc.
+ *
+ * If the device init operation is successful, the correspondence between
+ * the port identifier assigned to the new device and its associated
+ * *rte_eth_dev* structure is effectively registered.
+ * Otherwise, both the *rte_eth_dev* structure and the port identifier are
+ * freed.
+ *
+ * The functions exported by the application Ethernet API to setup a device
+ * designated by its port identifier must be invoked in the following order:
+ * - rte_eth_dev_configure()
+ * - rte_eth_tx_queue_setup()
+ * - rte_eth_rx_queue_setup()
+ * - rte_eth_dev_start()
+ *
+ * Then, the network application can invoke, in any order, the functions
+ * exported by the Ethernet API to get the MAC address of a given device, to
+ * get the speed and the status of a device physical link, to receive/transmit
+ * [burst of] packets, and so on.
+ *
+ * If the application wants to change the configuration (i.e. call
+ * rte_eth_dev_configure(), rte_eth_tx_queue_setup(), or
+ * rte_eth_rx_queue_setup()), it must call rte_eth_dev_stop() first to stop the
+ * device and then do the reconfiguration before calling rte_eth_dev_start()
+ * again. The tramsit and receive functions should not be invoked when the
+ * device is stopped.
+ *
+ * Please note that some configuration is not stored between calls to
+ * rte_eth_dev_stop()/rte_eth_dev_start(). The following configuration will
+ * be retained:
+ *
+ * - flow control settings
+ * - receive mode configuration (promiscuous mode, hardware checksum mode,
+ * RSS/VMDQ settings etc.)
+ * - VLAN filtering configuration
+ * - MAC addresses supplied to MAC address array
+ * - flow director filtering mode (but not filtering rules)
+ * - NIC queue statistics mappings
+ *
+ * Any other configuration will not be stored and will need to be re-entered
+ * after a call to rte_eth_dev_start().
+ *
+ * Finally, a network application can close an Ethernet device by invoking the
+ * rte_eth_dev_close() function.
+ *
+ * Each function of the application Ethernet API invokes a specific function
+ * of the PMD that controls the target device designated by its port
+ * identifier.
+ * For this purpose, all device-specific functions of an Ethernet driver are
+ * supplied through a set of pointers contained in a generic structure of type
+ * *eth_dev_ops*.
+ * The address of the *eth_dev_ops* structure is stored in the *rte_eth_dev*
+ * structure by the device init function of the Ethernet driver, which is
+ * invoked during the PCI probing phase, as explained earlier.
+ *
+ * In other words, each function of the Ethernet API simply retrieves the
+ * *rte_eth_dev* structure associated with the device port identifier and
+ * performs an indirect invocation of the corresponding driver function
+ * supplied in the *eth_dev_ops* structure of the *rte_eth_dev* structure.
+ *
+ * For performance reasons, the address of the burst-oriented RX and TX
+ * functions of the Ethernet driver are not contained in the *eth_dev_ops*
+ * structure. Instead, they are directly stored at the beginning of the
+ * *rte_eth_dev* structure to avoid an extra indirect memory access during
+ * their invocation.
+ *
+ * RTE ethernet device drivers do not use interrupts for transmitting or
+ * receiving. Instead, Ethernet drivers export Poll-Mode receive and transmit
+ * functions to applications.
+ * Both receive and transmit functions are packet-burst oriented to minimize
+ * their cost per packet through the following optimizations:
+ *
+ * - Sharing among multiple packets the incompressible cost of the
+ * invocation of receive/transmit functions.
+ *
+ * - Enabling receive/transmit functions to take advantage of burst-oriented
+ * hardware features (L1 cache, prefetch instructions, NIC head/tail
+ * registers) to minimize the number of CPU cycles per packet, for instance,
+ * by avoiding useless read memory accesses to ring descriptors, or by
+ * systematically using arrays of pointers that exactly fit L1 cache line
+ * boundaries and sizes.
+ *
+ * The burst-oriented receive function does not provide any error notification,
+ * to avoid the corresponding overhead. As a hint, the upper-level application
+ * might check the status of the device link once being systematically returned
+ * a 0 value by the receive function of the driver for a given number of tries.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+#include <rte_log.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_mbuf.h>
+#include "rte_ether.h"
+#include "rte_eth_ctrl.h"
+
+/**
+ * A structure used to retrieve statistics for an Ethernet port.
+ */
+struct rte_eth_stats {
+ uint64_t ipackets; /**< Total number of successfully received packets. */
+ uint64_t opackets; /**< Total number of successfully transmitted packets.*/
+ uint64_t ibytes; /**< Total number of successfully received bytes. */
+ uint64_t obytes; /**< Total number of successfully transmitted bytes. */
+ uint64_t imissed; /**< Total of RX missed packets (e.g full FIFO). */
+ uint64_t ibadcrc; /**< Total of RX packets with CRC error. */
+ uint64_t ibadlen; /**< Total of RX packets with bad length. */
+ uint64_t ierrors; /**< Total number of erroneous received packets. */
+ uint64_t oerrors; /**< Total number of failed transmitted packets. */
+ uint64_t imcasts; /**< Total number of multicast received packets. */
+ uint64_t rx_nombuf; /**< Total number of RX mbuf allocation failures. */
+ uint64_t fdirmatch; /**< Total number of RX packets matching a filter. */
+ uint64_t fdirmiss; /**< Total number of RX packets not matching any filter. */
+ uint64_t tx_pause_xon; /**< Total nb. of XON pause frame sent. */
+ uint64_t rx_pause_xon; /**< Total nb. of XON pause frame received. */
+ uint64_t tx_pause_xoff; /**< Total nb. of XOFF pause frame sent. */
+ uint64_t rx_pause_xoff; /**< Total nb. of XOFF pause frame received. */
+ uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
+ /**< Total number of queue RX packets. */
+ uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
+ /**< Total number of queue TX packets. */
+ uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
+ /**< Total number of successfully received queue bytes. */
+ uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
+ /**< Total number of successfully transmitted queue bytes. */
+ uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
+ /**< Total number of queue packets received that are dropped. */
+ uint64_t ilbpackets;
+ /**< Total number of good packets received from loopback,VF Only */
+ uint64_t olbpackets;
+ /**< Total number of good packets transmitted to loopback,VF Only */
+ uint64_t ilbbytes;
+ /**< Total number of good bytes received from loopback,VF Only */
+ uint64_t olbbytes;
+ /**< Total number of good bytes transmitted to loopback,VF Only */
+};
+
+/**
+ * A structure used to retrieve link-level information of an Ethernet port.
+ */
+struct rte_eth_link {
+ uint16_t link_speed; /**< ETH_LINK_SPEED_[10, 100, 1000, 10000] */
+ uint16_t link_duplex; /**< ETH_LINK_[HALF_DUPLEX, FULL_DUPLEX] */
+ uint8_t link_status : 1; /**< 1 -> link up, 0 -> link down */
+}__attribute__((aligned(8))); /**< aligned for atomic64 read/write */
+
+#define ETH_LINK_SPEED_AUTONEG 0 /**< Auto-negotiate link speed. */
+#define ETH_LINK_SPEED_10 10 /**< 10 megabits/second. */
+#define ETH_LINK_SPEED_100 100 /**< 100 megabits/second. */
+#define ETH_LINK_SPEED_1000 1000 /**< 1 gigabits/second. */
+#define ETH_LINK_SPEED_10000 10000 /**< 10 gigabits/second. */
+#define ETH_LINK_SPEED_10G 10000 /**< alias of 10 gigabits/second. */
+#define ETH_LINK_SPEED_20G 20000 /**< 20 gigabits/second. */
+#define ETH_LINK_SPEED_40G 40000 /**< 40 gigabits/second. */
+
+#define ETH_LINK_AUTONEG_DUPLEX 0 /**< Auto-negotiate duplex. */
+#define ETH_LINK_HALF_DUPLEX 1 /**< Half-duplex connection. */
+#define ETH_LINK_FULL_DUPLEX 2 /**< Full-duplex connection. */
+
+/**
+ * A structure used to configure the ring threshold registers of an RX/TX
+ * queue for an Ethernet port.
+ */
+struct rte_eth_thresh {
+ uint8_t pthresh; /**< Ring prefetch threshold. */
+ uint8_t hthresh; /**< Ring host threshold. */
+ uint8_t wthresh; /**< Ring writeback threshold. */
+};
+
+/**
+ * Simple flags are used for rte_eth_conf.rxmode.mq_mode.
+ */
+#define ETH_MQ_RX_RSS_FLAG 0x1
+#define ETH_MQ_RX_DCB_FLAG 0x2
+#define ETH_MQ_RX_VMDQ_FLAG 0x4
+
+/**
+ * A set of values to identify what method is to be used to route
+ * packets to multiple queues.
+ */
+enum rte_eth_rx_mq_mode {
+ /** None of DCB,RSS or VMDQ mode */
+ ETH_MQ_RX_NONE = 0,
+
+ /** For RX side, only RSS is on */
+ ETH_MQ_RX_RSS = ETH_MQ_RX_RSS_FLAG,
+ /** For RX side,only DCB is on. */
+ ETH_MQ_RX_DCB = ETH_MQ_RX_DCB_FLAG,
+ /** Both DCB and RSS enable */
+ ETH_MQ_RX_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG,
+
+ /** Only VMDQ, no RSS nor DCB */
+ ETH_MQ_RX_VMDQ_ONLY = ETH_MQ_RX_VMDQ_FLAG,
+ /** RSS mode with VMDQ */
+ ETH_MQ_RX_VMDQ_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG,
+ /** Use VMDQ+DCB to route traffic to queues */
+ ETH_MQ_RX_VMDQ_DCB = ETH_MQ_RX_VMDQ_FLAG | ETH_MQ_RX_DCB_FLAG,
+ /** Enable both VMDQ and DCB in VMDq */
+ ETH_MQ_RX_VMDQ_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG |
+ ETH_MQ_RX_VMDQ_FLAG,
+};
+
+/**
+ * for rx mq mode backward compatible
+ */
+#define ETH_RSS ETH_MQ_RX_RSS
+#define VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
+#define ETH_DCB_RX ETH_MQ_RX_DCB
+
+/**
+ * A set of values to identify what method is to be used to transmit
+ * packets using multi-TCs.
+ */
+enum rte_eth_tx_mq_mode {
+ ETH_MQ_TX_NONE = 0, /**< It is in neither DCB nor VT mode. */
+ ETH_MQ_TX_DCB, /**< For TX side,only DCB is on. */
+ ETH_MQ_TX_VMDQ_DCB, /**< For TX side,both DCB and VT is on. */
+ ETH_MQ_TX_VMDQ_ONLY, /**< Only VT on, no DCB */
+};
+
+/**
+ * for tx mq mode backward compatible
+ */
+#define ETH_DCB_NONE ETH_MQ_TX_NONE
+#define ETH_VMDQ_DCB_TX ETH_MQ_TX_VMDQ_DCB
+#define ETH_DCB_TX ETH_MQ_TX_DCB
+
+/**
+ * A structure used to configure the RX features of an Ethernet port.
+ */
+struct rte_eth_rxmode {
+ /** The multi-queue packet distribution mode to be used, e.g. RSS. */
+ enum rte_eth_rx_mq_mode mq_mode;
+ uint32_t max_rx_pkt_len; /**< Only used if jumbo_frame enabled. */
+ uint16_t split_hdr_size; /**< hdr buf size (header_split enabled).*/
+ uint8_t header_split : 1, /**< Header Split enable. */
+ hw_ip_checksum : 1, /**< IP/UDP/TCP checksum offload enable. */
+ hw_vlan_filter : 1, /**< VLAN filter enable. */
+ hw_vlan_strip : 1, /**< VLAN strip enable. */
+ hw_vlan_extend : 1, /**< Extended VLAN enable. */
+ jumbo_frame : 1, /**< Jumbo Frame Receipt enable. */
+ hw_strip_crc : 1, /**< Enable CRC stripping by hardware. */
+ enable_scatter : 1; /**< Enable scatter packets rx handler */
+};
+
+/**
+ * A structure used to configure the Receive Side Scaling (RSS) feature
+ * of an Ethernet port.
+ * If not NULL, the *rss_key* pointer of the *rss_conf* structure points
+ * to an array holding the RSS key to use for hashing specific header
+ * fields of received packets. The length of this array should be indicated
+ * by *rss_key_len* below. Otherwise, a default random hash key is used by
+ * the device driver.
+ *
+ * The *rss_key_len* field of the *rss_conf* structure indicates the length
+ * in bytes of the array pointed by *rss_key*. To be compatible, this length
+ * will be checked in i40e only. Others assume 40 bytes to be used as before.
+ *
+ * The *rss_hf* field of the *rss_conf* structure indicates the different
+ * types of IPv4/IPv6 packets to which the RSS hashing must be applied.
+ * Supplying an *rss_hf* equal to zero disables the RSS feature.
+ */
+struct rte_eth_rss_conf {
+ uint8_t *rss_key; /**< If not NULL, 40-byte hash key. */
+ uint8_t rss_key_len; /**< hash key length in bytes. */
+ uint64_t rss_hf; /**< Hash functions to apply - see below. */
+};
+
+/* Supported RSS offloads */
+/* for 1G & 10G */
+#define ETH_RSS_IPV4_SHIFT 0
+#define ETH_RSS_IPV4_TCP_SHIFT 1
+#define ETH_RSS_IPV6_SHIFT 2
+#define ETH_RSS_IPV6_EX_SHIFT 3
+#define ETH_RSS_IPV6_TCP_SHIFT 4
+#define ETH_RSS_IPV6_TCP_EX_SHIFT 5
+#define ETH_RSS_IPV4_UDP_SHIFT 6
+#define ETH_RSS_IPV6_UDP_SHIFT 7
+#define ETH_RSS_IPV6_UDP_EX_SHIFT 8
+/* for 40G only */
+#define ETH_RSS_NONF_IPV4_UDP_SHIFT 31
+#define ETH_RSS_NONF_IPV4_TCP_SHIFT 33
+#define ETH_RSS_NONF_IPV4_SCTP_SHIFT 34
+#define ETH_RSS_NONF_IPV4_OTHER_SHIFT 35
+#define ETH_RSS_FRAG_IPV4_SHIFT 36
+#define ETH_RSS_NONF_IPV6_UDP_SHIFT 41
+#define ETH_RSS_NONF_IPV6_TCP_SHIFT 43
+#define ETH_RSS_NONF_IPV6_SCTP_SHIFT 44
+#define ETH_RSS_NONF_IPV6_OTHER_SHIFT 45
+#define ETH_RSS_FRAG_IPV6_SHIFT 46
+#define ETH_RSS_FCOE_OX_SHIFT 48
+#define ETH_RSS_FCOE_RX_SHIFT 49
+#define ETH_RSS_FCOE_OTHER_SHIFT 50
+#define ETH_RSS_L2_PAYLOAD_SHIFT 63
+
+/* for 1G & 10G */
+#define ETH_RSS_IPV4 (1 << ETH_RSS_IPV4_SHIFT)
+#define ETH_RSS_IPV4_TCP (1 << ETH_RSS_IPV4_TCP_SHIFT)
+#define ETH_RSS_IPV6 (1 << ETH_RSS_IPV6_SHIFT)
+#define ETH_RSS_IPV6_EX (1 << ETH_RSS_IPV6_EX_SHIFT)
+#define ETH_RSS_IPV6_TCP (1 << ETH_RSS_IPV6_TCP_SHIFT)
+#define ETH_RSS_IPV6_TCP_EX (1 << ETH_RSS_IPV6_TCP_EX_SHIFT)
+#define ETH_RSS_IPV4_UDP (1 << ETH_RSS_IPV4_UDP_SHIFT)
+#define ETH_RSS_IPV6_UDP (1 << ETH_RSS_IPV6_UDP_SHIFT)
+#define ETH_RSS_IPV6_UDP_EX (1 << ETH_RSS_IPV6_UDP_EX_SHIFT)
+/* for 40G only */
+#define ETH_RSS_NONF_IPV4_UDP (1ULL << ETH_RSS_NONF_IPV4_UDP_SHIFT)
+#define ETH_RSS_NONF_IPV4_TCP (1ULL << ETH_RSS_NONF_IPV4_TCP_SHIFT)
+#define ETH_RSS_NONF_IPV4_SCTP (1ULL << ETH_RSS_NONF_IPV4_SCTP_SHIFT)
+#define ETH_RSS_NONF_IPV4_OTHER (1ULL << ETH_RSS_NONF_IPV4_OTHER_SHIFT)
+#define ETH_RSS_FRAG_IPV4 (1ULL << ETH_RSS_FRAG_IPV4_SHIFT)
+#define ETH_RSS_NONF_IPV6_UDP (1ULL << ETH_RSS_NONF_IPV6_UDP_SHIFT)
+#define ETH_RSS_NONF_IPV6_TCP (1ULL << ETH_RSS_NONF_IPV6_TCP_SHIFT)
+#define ETH_RSS_NONF_IPV6_SCTP (1ULL << ETH_RSS_NONF_IPV6_SCTP_SHIFT)
+#define ETH_RSS_NONF_IPV6_OTHER (1ULL << ETH_RSS_NONF_IPV6_OTHER_SHIFT)
+#define ETH_RSS_FRAG_IPV6 (1ULL << ETH_RSS_FRAG_IPV6_SHIFT)
+/* FCOE relevant should not be used */
+#define ETH_RSS_FCOE_OX (1ULL << ETH_RSS_FCOE_OX_SHIFT)
+#define ETH_RSS_FCOE_RX (1ULL << ETH_RSS_FCOE_RX_SHIFT)
+#define ETH_RSS_FCOE_OTHER (1ULL << ETH_RSS_FCOE_OTHER_SHIFT)
+#define ETH_RSS_L2_PAYLOAD (1ULL << ETH_RSS_L2_PAYLOAD_SHIFT)
+
+#define ETH_RSS_IP ( \
+ ETH_RSS_IPV4 | \
+ ETH_RSS_IPV6 | \
+ ETH_RSS_NONF_IPV4_OTHER | \
+ ETH_RSS_FRAG_IPV4 | \
+ ETH_RSS_NONF_IPV6_OTHER | \
+ ETH_RSS_FRAG_IPV6)
+#define ETH_RSS_UDP ( \
+ ETH_RSS_IPV4 | \
+ ETH_RSS_IPV6 | \
+ ETH_RSS_IPV4_UDP | \
+ ETH_RSS_IPV6_UDP | \
+ ETH_RSS_IPV6_UDP_EX | \
+ ETH_RSS_NONF_IPV4_UDP | \
+ ETH_RSS_NONF_IPV6_UDP)
+/**< Mask of valid RSS hash protocols */
+#define ETH_RSS_PROTO_MASK ( \
+ ETH_RSS_IPV4 | \
+ ETH_RSS_IPV4_TCP | \
+ ETH_RSS_IPV6 | \
+ ETH_RSS_IPV6_EX | \
+ ETH_RSS_IPV6_TCP | \
+ ETH_RSS_IPV6_TCP_EX | \
+ ETH_RSS_IPV4_UDP | \
+ ETH_RSS_IPV6_UDP | \
+ ETH_RSS_IPV6_UDP_EX | \
+ ETH_RSS_NONF_IPV4_UDP | \
+ ETH_RSS_NONF_IPV4_TCP | \
+ ETH_RSS_NONF_IPV4_SCTP | \
+ ETH_RSS_NONF_IPV4_OTHER | \
+ ETH_RSS_FRAG_IPV4 | \
+ ETH_RSS_NONF_IPV6_UDP | \
+ ETH_RSS_NONF_IPV6_TCP | \
+ ETH_RSS_NONF_IPV6_SCTP | \
+ ETH_RSS_NONF_IPV6_OTHER | \
+ ETH_RSS_FRAG_IPV6 | \
+ ETH_RSS_L2_PAYLOAD)
+
+/*
+ * Definitions used for redirection table entry size.
+ * Some RSS RETA sizes may not be supported by some drivers, check the
+ * documentation or the description of relevant functions for more details.
+ */
+#define ETH_RSS_RETA_SIZE_64 64
+#define ETH_RSS_RETA_SIZE_128 128
+#define ETH_RSS_RETA_SIZE_512 512
+#define RTE_RETA_GROUP_SIZE 64
+
+/* Definitions used for VMDQ and DCB functionality */
+#define ETH_VMDQ_MAX_VLAN_FILTERS 64 /**< Maximum nb. of VMDQ vlan filters. */
+#define ETH_DCB_NUM_USER_PRIORITIES 8 /**< Maximum nb. of DCB priorities. */
+#define ETH_VMDQ_DCB_NUM_QUEUES 128 /**< Maximum nb. of VMDQ DCB queues. */
+#define ETH_DCB_NUM_QUEUES 128 /**< Maximum nb. of DCB queues. */
+
+/* DCB capability defines */
+#define ETH_DCB_PG_SUPPORT 0x00000001 /**< Priority Group(ETS) support. */
+#define ETH_DCB_PFC_SUPPORT 0x00000002 /**< Priority Flow Control support. */
+
+/* Definitions used for VLAN Offload functionality */
+#define ETH_VLAN_STRIP_OFFLOAD 0x0001 /**< VLAN Strip On/Off */
+#define ETH_VLAN_FILTER_OFFLOAD 0x0002 /**< VLAN Filter On/Off */
+#define ETH_VLAN_EXTEND_OFFLOAD 0x0004 /**< VLAN Extend On/Off */
+
+/* Definitions used for mask VLAN setting */
+#define ETH_VLAN_STRIP_MASK 0x0001 /**< VLAN Strip setting mask */
+#define ETH_VLAN_FILTER_MASK 0x0002 /**< VLAN Filter setting mask*/
+#define ETH_VLAN_EXTEND_MASK 0x0004 /**< VLAN Extend setting mask*/
+#define ETH_VLAN_ID_MAX 0x0FFF /**< VLAN ID is in lower 12 bits*/
+
+/* Definitions used for receive MAC address */
+#define ETH_NUM_RECEIVE_MAC_ADDR 128 /**< Maximum nb. of receive mac addr. */
+
+/* Definitions used for unicast hash */
+#define ETH_VMDQ_NUM_UC_HASH_ARRAY 128 /**< Maximum nb. of UC hash array. */
+
+/* Definitions used for VMDQ pool rx mode setting */
+#define ETH_VMDQ_ACCEPT_UNTAG 0x0001 /**< accept untagged packets. */
+#define ETH_VMDQ_ACCEPT_HASH_MC 0x0002 /**< accept packets in multicast table . */
+#define ETH_VMDQ_ACCEPT_HASH_UC 0x0004 /**< accept packets in unicast table. */
+#define ETH_VMDQ_ACCEPT_BROADCAST 0x0008 /**< accept broadcast packets. */
+#define ETH_VMDQ_ACCEPT_MULTICAST 0x0010 /**< multicast promiscuous. */
+
+/* Definitions used for VMDQ mirror rules setting */
+#define ETH_VMDQ_NUM_MIRROR_RULE 4 /**< Maximum nb. of mirror rules. . */
+
+#define ETH_VMDQ_POOL_MIRROR 0x0001 /**< Virtual Pool Mirroring. */
+#define ETH_VMDQ_UPLINK_MIRROR 0x0002 /**< Uplink Port Mirroring. */
+#define ETH_VMDQ_DOWNLIN_MIRROR 0x0004 /**< Downlink Port Mirroring. */
+#define ETH_VMDQ_VLAN_MIRROR 0x0008 /**< VLAN Mirroring. */
+
+/**
+ * A structure used to configure VLAN traffic mirror of an Ethernet port.
+ */
+struct rte_eth_vlan_mirror {
+ uint64_t vlan_mask; /**< mask for valid VLAN ID. */
+ uint16_t vlan_id[ETH_VMDQ_MAX_VLAN_FILTERS];
+ /** VLAN ID list for vlan mirror. */
+};
+
+/**
+ * A structure used to configure traffic mirror of an Ethernet port.
+ */
+struct rte_eth_vmdq_mirror_conf {
+ uint8_t rule_type_mask; /**< Mirroring rule type mask we want to set */
+ uint8_t dst_pool; /**< Destination pool for this mirror rule. */
+ uint64_t pool_mask; /**< Bitmap of pool for pool mirroring */
+ struct rte_eth_vlan_mirror vlan; /**< VLAN ID setting for VLAN mirroring */
+};
+
+/**
+ * A structure used to configure 64 entries of Redirection Table of the
+ * Receive Side Scaling (RSS) feature of an Ethernet port. To configure
+ * more than 64 entries supported by hardware, an array of this structure
+ * is needed.
+ */
+struct rte_eth_rss_reta_entry64 {
+ uint64_t mask;
+ /**< Mask bits indicate which entries need to be updated/queried. */
+ uint8_t reta[RTE_RETA_GROUP_SIZE];
+ /**< Group of 64 redirection table entries. */
+};
+
+/**
+ * This enum indicates the possible number of traffic classes
+ * in DCB configratioins
+ */
+enum rte_eth_nb_tcs {
+ ETH_4_TCS = 4, /**< 4 TCs with DCB. */
+ ETH_8_TCS = 8 /**< 8 TCs with DCB. */
+};
+
+/**
+ * This enum indicates the possible number of queue pools
+ * in VMDQ configurations.
+ */
+enum rte_eth_nb_pools {
+ ETH_8_POOLS = 8, /**< 8 VMDq pools. */
+ ETH_16_POOLS = 16, /**< 16 VMDq pools. */
+ ETH_32_POOLS = 32, /**< 32 VMDq pools. */
+ ETH_64_POOLS = 64 /**< 64 VMDq pools. */
+};
+
+/* This structure may be extended in future. */
+struct rte_eth_dcb_rx_conf {
+ enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs */
+ uint8_t dcb_queue[ETH_DCB_NUM_USER_PRIORITIES];
+ /**< Possible DCB queue,4 or 8. */
+};
+
+struct rte_eth_vmdq_dcb_tx_conf {
+ enum rte_eth_nb_pools nb_queue_pools; /**< With DCB, 16 or 32 pools. */
+ uint8_t dcb_queue[ETH_DCB_NUM_USER_PRIORITIES];
+ /**< Possible DCB queue,4 or 8. */
+};
+
+struct rte_eth_dcb_tx_conf {
+ enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs. */
+ uint8_t dcb_queue[ETH_DCB_NUM_USER_PRIORITIES];
+ /**< Possible DCB queue,4 or 8. */
+};
+
+struct rte_eth_vmdq_tx_conf {
+ enum rte_eth_nb_pools nb_queue_pools; /**< VMDq mode, 64 pools. */
+};
+
+/**
+ * A structure used to configure the VMDQ+DCB feature
+ * of an Ethernet port.
+ *
+ * Using this feature, packets are routed to a pool of queues, based
+ * on the vlan id in the vlan tag, and then to a specific queue within
+ * that pool, using the user priority vlan tag field.
+ *
+ * A default pool may be used, if desired, to route all traffic which
+ * does not match the vlan filter rules.
+ */
+struct rte_eth_vmdq_dcb_conf {
+ enum rte_eth_nb_pools nb_queue_pools; /**< With DCB, 16 or 32 pools */
+ uint8_t enable_default_pool; /**< If non-zero, use a default pool */
+ uint8_t default_pool; /**< The default pool, if applicable */
+ uint8_t nb_pool_maps; /**< We can have up to 64 filters/mappings */
+ struct {
+ uint16_t vlan_id; /**< The vlan id of the received frame */
+ uint64_t pools; /**< Bitmask of pools for packet rx */
+ } pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq vlan pool maps. */
+ uint8_t dcb_queue[ETH_DCB_NUM_USER_PRIORITIES];
+ /**< Selects a queue in a pool */
+};
+
+struct rte_eth_vmdq_rx_conf {
+ enum rte_eth_nb_pools nb_queue_pools; /**< VMDq only mode, 8 or 64 pools */
+ uint8_t enable_default_pool; /**< If non-zero, use a default pool */
+ uint8_t default_pool; /**< The default pool, if applicable */
+ uint8_t enable_loop_back; /**< Enable VT loop back */
+ uint8_t nb_pool_maps; /**< We can have up to 64 filters/mappings */
+ uint32_t rx_mode; /**< Flags from ETH_VMDQ_ACCEPT_* */
+ struct {
+ uint16_t vlan_id; /**< The vlan id of the received frame */
+ uint64_t pools; /**< Bitmask of pools for packet rx */
+ } pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq vlan pool maps. */
+};
+
+/**
+ * A structure used to configure the TX features of an Ethernet port.
+ */
+struct rte_eth_txmode {
+ enum rte_eth_tx_mq_mode mq_mode; /**< TX multi-queues mode. */
+
+ /* For i40e specifically */
+ uint16_t pvid;
+ uint8_t hw_vlan_reject_tagged : 1,
+ /**< If set, reject sending out tagged pkts */
+ hw_vlan_reject_untagged : 1,
+ /**< If set, reject sending out untagged pkts */
+ hw_vlan_insert_pvid : 1;
+ /**< If set, enable port based VLAN insertion */
+};
+
+/**
+ * A structure used to configure an RX ring of an Ethernet port.
+ */
+struct rte_eth_rxconf {
+ struct rte_eth_thresh rx_thresh; /**< RX ring threshold registers. */
+ uint16_t rx_free_thresh; /**< Drives the freeing of RX descriptors. */
+ uint8_t rx_drop_en; /**< Drop packets if no descriptors are available. */
+ uint8_t rx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */
+};
+
+#define ETH_TXQ_FLAGS_NOMULTSEGS 0x0001 /**< nb_segs=1 for all mbufs */
+#define ETH_TXQ_FLAGS_NOREFCOUNT 0x0002 /**< refcnt can be ignored */
+#define ETH_TXQ_FLAGS_NOMULTMEMP 0x0004 /**< all bufs come from same mempool */
+#define ETH_TXQ_FLAGS_NOVLANOFFL 0x0100 /**< disable VLAN offload */
+#define ETH_TXQ_FLAGS_NOXSUMSCTP 0x0200 /**< disable SCTP checksum offload */
+#define ETH_TXQ_FLAGS_NOXSUMUDP 0x0400 /**< disable UDP checksum offload */
+#define ETH_TXQ_FLAGS_NOXSUMTCP 0x0800 /**< disable TCP checksum offload */
+#define ETH_TXQ_FLAGS_NOOFFLOADS \
+ (ETH_TXQ_FLAGS_NOVLANOFFL | ETH_TXQ_FLAGS_NOXSUMSCTP | \
+ ETH_TXQ_FLAGS_NOXSUMUDP | ETH_TXQ_FLAGS_NOXSUMTCP)
+/**
+ * A structure used to configure a TX ring of an Ethernet port.
+ */
+struct rte_eth_txconf {
+ struct rte_eth_thresh tx_thresh; /**< TX ring threshold registers. */
+ uint16_t tx_rs_thresh; /**< Drives the setting of RS bit on TXDs. */
+ uint16_t tx_free_thresh; /**< Drives the freeing of TX buffers. */
+ uint32_t txq_flags; /**< Set flags for the Tx queue */
+ uint8_t tx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */
+};
+
+/**
+ * This enum indicates the flow control mode
+ */
+enum rte_eth_fc_mode {
+ RTE_FC_NONE = 0, /**< Disable flow control. */
+ RTE_FC_RX_PAUSE, /**< RX pause frame, enable flowctrl on TX side. */
+ RTE_FC_TX_PAUSE, /**< TX pause frame, enable flowctrl on RX side. */
+ RTE_FC_FULL /**< Enable flow control on both side. */
+};
+
+/**
+ * A structure used to configure Ethernet flow control parameter.
+ * These parameters will be configured into the register of the NIC.
+ * Please refer to the corresponding data sheet for proper value.
+ */
+struct rte_eth_fc_conf {
+ uint32_t high_water; /**< High threshold value to trigger XOFF */
+ uint32_t low_water; /**< Low threshold value to trigger XON */
+ uint16_t pause_time; /**< Pause quota in the Pause frame */
+ uint16_t send_xon; /**< Is XON frame need be sent */
+ enum rte_eth_fc_mode mode; /**< Link flow control mode */
+ uint8_t mac_ctrl_frame_fwd; /**< Forward MAC control frames */
+ uint8_t autoneg; /**< Use Pause autoneg */
+};
+
+/**
+ * A structure used to configure Ethernet priority flow control parameter.
+ * These parameters will be configured into the register of the NIC.
+ * Please refer to the corresponding data sheet for proper value.
+ */
+struct rte_eth_pfc_conf {
+ struct rte_eth_fc_conf fc; /**< General flow control parameter. */
+ uint8_t priority; /**< VLAN User Priority. */
+};
+
+/**
+ * Memory space that can be configured to store Flow Director filters
+ * in the board memory.
+ */
+enum rte_fdir_pballoc_type {
+ RTE_FDIR_PBALLOC_64K = 0, /**< 64k. */
+ RTE_FDIR_PBALLOC_128K, /**< 128k. */
+ RTE_FDIR_PBALLOC_256K, /**< 256k. */
+};
+
+/**
+ * Select report mode of FDIR hash information in RX descriptors.
+ */
+enum rte_fdir_status_mode {
+ RTE_FDIR_NO_REPORT_STATUS = 0, /**< Never report FDIR hash. */
+ RTE_FDIR_REPORT_STATUS, /**< Only report FDIR hash for matching pkts. */
+ RTE_FDIR_REPORT_STATUS_ALWAYS, /**< Always report FDIR hash. */
+};
+
+/**
+ * A structure used to configure the Flow Director (FDIR) feature
+ * of an Ethernet port.
+ *
+ * If mode is RTE_FDIR_DISABLE, the pballoc value is ignored.
+ */
+struct rte_fdir_conf {
+ enum rte_fdir_mode mode; /**< Flow Director mode. */
+ enum rte_fdir_pballoc_type pballoc; /**< Space for FDIR filters. */
+ enum rte_fdir_status_mode status; /**< How to report FDIR hash. */
+ /** Offset of flexbytes field in RX packets (in 16-bit word units). */
+ uint8_t flexbytes_offset;
+ /** RX queue of packets matching a "drop" filter in perfect mode. */
+ uint8_t drop_queue;
+ struct rte_eth_fdir_flex_conf flex_conf;
+ /**< Flex payload configuration. */
+};
+
+/**
+ * UDP tunneling configuration.
+ */
+struct rte_eth_udp_tunnel {
+ uint16_t udp_port;
+ uint8_t prot_type;
+};
+
+/**
+ * Possible l4type of FDIR filters.
+ */
+enum rte_l4type {
+ RTE_FDIR_L4TYPE_NONE = 0, /**< None. */
+ RTE_FDIR_L4TYPE_UDP, /**< UDP. */
+ RTE_FDIR_L4TYPE_TCP, /**< TCP. */
+ RTE_FDIR_L4TYPE_SCTP, /**< SCTP. */
+};
+
+/**
+ * Select IPv4 or IPv6 FDIR filters.
+ */
+enum rte_iptype {
+ RTE_FDIR_IPTYPE_IPV4 = 0, /**< IPv4. */
+ RTE_FDIR_IPTYPE_IPV6 , /**< IPv6. */
+};
+
+/**
+ * A structure used to define a FDIR packet filter.
+ */
+struct rte_fdir_filter {
+ uint16_t flex_bytes; /**< Flex bytes value to match. */
+ uint16_t vlan_id; /**< VLAN ID value to match, 0 otherwise. */
+ uint16_t port_src; /**< Source port to match, 0 otherwise. */
+ uint16_t port_dst; /**< Destination port to match, 0 otherwise. */
+ union {
+ uint32_t ipv4_addr; /**< IPv4 source address to match. */
+ uint32_t ipv6_addr[4]; /**< IPv6 source address to match. */
+ } ip_src; /**< IPv4/IPv6 source address to match (union of above). */
+ union {
+ uint32_t ipv4_addr; /**< IPv4 destination address to match. */
+ uint32_t ipv6_addr[4]; /**< IPv6 destination address to match */
+ } ip_dst; /**< IPv4/IPv6 destination address to match (union of above). */
+ enum rte_l4type l4type; /**< l4type to match: NONE/UDP/TCP/SCTP. */
+ enum rte_iptype iptype; /**< IP packet type to match: IPv4 or IPv6. */
+};
+
+/**
+ * A structure used to configure FDIR masks that are used by the device
+ * to match the various fields of RX packet headers.
+ * @note The only_ip_flow field has the opposite meaning compared to other
+ * masks!
+ */
+struct rte_fdir_masks {
+ /** When set to 1, packet l4type is \b NOT relevant in filters, and
+ source and destination port masks must be set to zero. */
+ uint8_t only_ip_flow;
+ /** If set to 1, vlan_id is relevant in filters. */
+ uint8_t vlan_id;
+ /** If set to 1, vlan_prio is relevant in filters. */
+ uint8_t vlan_prio;
+ /** If set to 1, flexbytes is relevant in filters. */
+ uint8_t flexbytes;
+ /** If set to 1, set the IPv6 masks. Otherwise set the IPv4 masks. */
+ uint8_t set_ipv6_mask;
+ /** When set to 1, comparison of destination IPv6 address with IP6AT
+ registers is meaningful. */
+ uint8_t comp_ipv6_dst;
+ /** Mask of Destination IPv4 Address. All bits set to 1 define the
+ relevant bits to use in the destination address of an IPv4 packet
+ when matching it against FDIR filters. */
+ uint32_t dst_ipv4_mask;
+ /** Mask of Source IPv4 Address. All bits set to 1 define
+ the relevant bits to use in the source address of an IPv4 packet
+ when matching it against FDIR filters. */
+ uint32_t src_ipv4_mask;
+ /** Mask of Source IPv6 Address. All bits set to 1 define the
+ relevant BYTES to use in the source address of an IPv6 packet
+ when matching it against FDIR filters. */
+ uint16_t dst_ipv6_mask;
+ /** Mask of Destination IPv6 Address. All bits set to 1 define the
+ relevant BYTES to use in the destination address of an IPv6 packet
+ when matching it against FDIR filters. */
+ uint16_t src_ipv6_mask;
+ /** Mask of Source Port. All bits set to 1 define the relevant
+ bits to use in the source port of an IP packets when matching it
+ against FDIR filters. */
+ uint16_t src_port_mask;
+ /** Mask of Destination Port. All bits set to 1 define the relevant
+ bits to use in the destination port of an IP packet when matching it
+ against FDIR filters. */
+ uint16_t dst_port_mask;
+};
+
+/**
+ * A structure used to report the status of the flow director filters in use.
+ */
+struct rte_eth_fdir {
+ /** Number of filters with collision indication. */
+ uint16_t collision;
+ /** Number of free (non programmed) filters. */
+ uint16_t free;
+ /** The Lookup hash value of the added filter that updated the value
+ of the MAXLEN field */
+ uint16_t maxhash;
+ /** Longest linked list of filters in the table. */
+ uint8_t maxlen;
+ /** Number of added filters. */
+ uint64_t add;
+ /** Number of removed filters. */
+ uint64_t remove;
+ /** Number of failed added filters (no more space in device). */
+ uint64_t f_add;
+ /** Number of failed removed filters. */
+ uint64_t f_remove;
+};
+
+/**
+ * A structure used to enable/disable specific device interrupts.
+ */
+struct rte_intr_conf {
+ /** enable/disable lsc interrupt. 0 (default) - disable, 1 enable */
+ uint16_t lsc;
+};
+
+/**
+ * A structure used to configure an Ethernet port.
+ * Depending upon the RX multi-queue mode, extra advanced
+ * configuration settings may be needed.
+ */
+struct rte_eth_conf {
+ uint16_t link_speed;
+ /**< ETH_LINK_SPEED_10[0|00|000], or 0 for autonegotation */
+ uint16_t link_duplex;
+ /**< ETH_LINK_[HALF_DUPLEX|FULL_DUPLEX], or 0 for autonegotation */
+ struct rte_eth_rxmode rxmode; /**< Port RX configuration. */
+ struct rte_eth_txmode txmode; /**< Port TX configuration. */
+ uint32_t lpbk_mode; /**< Loopback operation mode. By default the value
+ is 0, meaning the loopback mode is disabled.
+ Read the datasheet of given ethernet controller
+ for details. The possible values of this field
+ are defined in implementation of each driver. */
+ struct {
+ struct rte_eth_rss_conf rss_conf; /**< Port RSS configuration */
+ struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf;
+ /**< Port vmdq+dcb configuration. */
+ struct rte_eth_dcb_rx_conf dcb_rx_conf;
+ /**< Port dcb RX configuration. */
+ struct rte_eth_vmdq_rx_conf vmdq_rx_conf;
+ /**< Port vmdq RX configuration. */
+ } rx_adv_conf; /**< Port RX filtering configuration (union). */
+ union {
+ struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
+ /**< Port vmdq+dcb TX configuration. */
+ struct rte_eth_dcb_tx_conf dcb_tx_conf;
+ /**< Port dcb TX configuration. */
+ struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
+ /**< Port vmdq TX configuration. */
+ } tx_adv_conf; /**< Port TX DCB configuration (union). */
+ /** Currently,Priority Flow Control(PFC) are supported,if DCB with PFC
+ is needed,and the variable must be set ETH_DCB_PFC_SUPPORT. */
+ uint32_t dcb_capability_en;
+ struct rte_fdir_conf fdir_conf; /**< FDIR configuration. */
+ struct rte_intr_conf intr_conf; /**< Interrupt mode configuration. */
+};
+
+/**
+ * A structure used to retrieve the contextual information of
+ * an Ethernet device, such as the controlling driver of the device,
+ * its PCI context, etc...
+ */
+
+/**
+ * RX offload capabilities of a device.
+ */
+#define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001
+#define DEV_RX_OFFLOAD_IPV4_CKSUM 0x00000002
+#define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004
+#define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008
+#define DEV_RX_OFFLOAD_TCP_LRO 0x00000010
+
+/**
+ * TX offload capabilities of a device.
+ */
+#define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001
+#define DEV_TX_OFFLOAD_IPV4_CKSUM 0x00000002
+#define DEV_TX_OFFLOAD_UDP_CKSUM 0x00000004
+#define DEV_TX_OFFLOAD_TCP_CKSUM 0x00000008
+#define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010
+#define DEV_TX_OFFLOAD_TCP_TSO 0x00000020
+#define DEV_TX_OFFLOAD_UDP_TSO 0x00000040
+
+struct rte_eth_dev_info {
+ struct rte_pci_device *pci_dev; /**< Device PCI information. */
+ const char *driver_name; /**< Device Driver name. */
+ unsigned int if_index; /**< Index to bound host interface, or 0 if none.
+ Use if_indextoname() to translate into an interface name. */
+ uint32_t min_rx_bufsize; /**< Minimum size of RX buffer. */
+ uint32_t max_rx_pktlen; /**< Maximum configurable length of RX pkt. */
+ uint16_t max_rx_queues; /**< Maximum number of RX queues. */
+ uint16_t max_tx_queues; /**< Maximum number of TX queues. */
+ uint32_t max_mac_addrs; /**< Maximum number of MAC addresses. */
+ uint32_t max_hash_mac_addrs;
+ /** Maximum number of hash MAC addresses for MTA and UTA. */
+ uint16_t max_vfs; /**< Maximum number of VFs. */
+ uint16_t max_vmdq_pools; /**< Maximum number of VMDq pools. */
+ uint32_t rx_offload_capa; /**< Device RX offload capabilities. */
+ uint32_t tx_offload_capa; /**< Device TX offload capabilities. */
+ uint16_t reta_size;
+ /**< Device redirection table size, the total number of entries. */
+ struct rte_eth_rxconf default_rxconf; /**< Default RX configuration */
+ struct rte_eth_txconf default_txconf; /**< Default TX configuration */
+ uint16_t vmdq_queue_base; /**< First queue ID for VMDQ pools. */
+ uint16_t vmdq_queue_num; /**< Queue number for VMDQ pools. */
+ uint16_t vmdq_pool_base; /**< First ID of VMDQ pools. */
+};
+
+/** Maximum name length for extended statistics counters */
+#define RTE_ETH_XSTATS_NAME_SIZE 64
+
+/**
+ * An Ethernet device extended statistic structure
+ *
+ * This structure is used by ethdev->eth_xstats_get() to provide
+ * statistics that are not provided in the generic rte_eth_stats
+ * structure.
+ */
+struct rte_eth_xstats {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ uint64_t value;
+};
+
+struct rte_eth_dev;
+
+struct rte_eth_dev_callback;
+/** @internal Structure to keep track of registered callbacks */
+TAILQ_HEAD(rte_eth_dev_cb_list, rte_eth_dev_callback);
+
+#define TCP_UGR_FLAG 0x20
+#define TCP_ACK_FLAG 0x10
+#define TCP_PSH_FLAG 0x08
+#define TCP_RST_FLAG 0x04
+#define TCP_SYN_FLAG 0x02
+#define TCP_FIN_FLAG 0x01
+#define TCP_FLAG_ALL 0x3F
+
+/**
+ * A structure used to define an ethertype filter.
+ */
+struct rte_ethertype_filter {
+ uint16_t ethertype; /**< little endian. */
+ uint8_t priority_en; /**< compare priority enable. */
+ uint8_t priority;
+};
+
+/**
+ * A structure used to define an syn filter.
+ */
+struct rte_syn_filter {
+ uint8_t hig_pri; /**< 1 means higher pri than 2tuple, 5tupe,
+ and flex filter, 0 means lower pri. */
+};
+
+/**
+ * A structure used to define a 2tuple filter.
+ */
+struct rte_2tuple_filter {
+ uint16_t dst_port; /**< big endian. */
+ uint8_t protocol;
+ uint8_t tcp_flags;
+ uint16_t priority; /**< used when more than one filter matches. */
+ uint8_t dst_port_mask:1, /**< if mask is 1b, means not compare. */
+ protocol_mask:1;
+};
+
+/**
+ * A structure used to define a flex filter.
+ */
+struct rte_flex_filter {
+ uint16_t len;
+ uint32_t dwords[32]; /**< flex bytes in big endian. */
+ uint8_t mask[16]; /**< if mask bit is 1b, do not compare
+ corresponding byte in dwords. */
+ uint8_t priority;
+};
+
+/**
+ * A structure used to define a 5tuple filter.
+ */
+struct rte_5tuple_filter {
+ uint32_t dst_ip; /**< destination IP address in big endian. */
+ uint32_t src_ip; /**< source IP address in big endian. */
+ uint16_t dst_port; /**< destination port in big endian. */
+ uint16_t src_port; /**< source Port big endian. */
+ uint8_t protocol; /**< l4 protocol. */
+ uint8_t tcp_flags; /**< tcp flags. */
+ uint16_t priority; /**< seven evels (001b-111b), 111b is highest,
+ used when more than one filter matches. */
+ uint8_t dst_ip_mask:1, /**< if mask is 1b, do not compare dst ip. */
+ src_ip_mask:1, /**< if mask is 1b, do not compare src ip. */
+ dst_port_mask:1, /**< if mask is 1b, do not compare dst port. */
+ src_port_mask:1, /**< if mask is 1b, do not compare src port. */
+ protocol_mask:1; /**< if mask is 1b, do not compare protocol. */
+};
+
+/*
+ * Definitions of all functions exported by an Ethernet driver through the
+ * the generic structure of type *eth_dev_ops* supplied in the *rte_eth_dev*
+ * structure associated with an Ethernet device.
+ */
+
+typedef int (*eth_dev_configure_t)(struct rte_eth_dev *dev);
+/**< @internal Ethernet device configuration. */
+
+typedef int (*eth_dev_start_t)(struct rte_eth_dev *dev);
+/**< @internal Function used to start a configured Ethernet device. */
+
+typedef void (*eth_dev_stop_t)(struct rte_eth_dev *dev);
+/**< @internal Function used to stop a configured Ethernet device. */
+
+typedef int (*eth_dev_set_link_up_t)(struct rte_eth_dev *dev);
+/**< @internal Function used to link up a configured Ethernet device. */
+
+typedef int (*eth_dev_set_link_down_t)(struct rte_eth_dev *dev);
+/**< @internal Function used to link down a configured Ethernet device. */
+
+typedef void (*eth_dev_close_t)(struct rte_eth_dev *dev);
+/**< @internal Function used to close a configured Ethernet device. */
+
+typedef void (*eth_promiscuous_enable_t)(struct rte_eth_dev *dev);
+/**< @internal Function used to enable the RX promiscuous mode of an Ethernet device. */
+
+typedef void (*eth_promiscuous_disable_t)(struct rte_eth_dev *dev);
+/**< @internal Function used to disable the RX promiscuous mode of an Ethernet device. */
+
+typedef void (*eth_allmulticast_enable_t)(struct rte_eth_dev *dev);
+/**< @internal Enable the receipt of all multicast packets by an Ethernet device. */
+
+typedef void (*eth_allmulticast_disable_t)(struct rte_eth_dev *dev);
+/**< @internal Disable the receipt of all multicast packets by an Ethernet device. */
+
+typedef int (*eth_link_update_t)(struct rte_eth_dev *dev,
+ int wait_to_complete);
+/**< @internal Get link speed, duplex mode and state (up/down) of an Ethernet device. */
+
+typedef void (*eth_stats_get_t)(struct rte_eth_dev *dev,
+ struct rte_eth_stats *igb_stats);
+/**< @internal Get global I/O statistics of an Ethernet device. */
+
+typedef void (*eth_stats_reset_t)(struct rte_eth_dev *dev);
+/**< @internal Reset global I/O statistics of an Ethernet device to 0. */
+
+typedef int (*eth_xstats_get_t)(struct rte_eth_dev *dev,
+ struct rte_eth_xstats *stats, unsigned n);
+/**< @internal Get extended stats of an Ethernet device. */
+
+typedef void (*eth_xstats_reset_t)(struct rte_eth_dev *dev);
+/**< @internal Reset extended stats of an Ethernet device. */
+
+typedef int (*eth_queue_stats_mapping_set_t)(struct rte_eth_dev *dev,
+ uint16_t queue_id,
+ uint8_t stat_idx,
+ uint8_t is_rx);
+/**< @internal Set a queue statistics mapping for a tx/rx queue of an Ethernet device. */
+
+typedef void (*eth_dev_infos_get_t)(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+/**< @internal Get specific informations of an Ethernet device. */
+
+typedef int (*eth_queue_start_t)(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+/**< @internal Start rx and tx of a queue of an Ethernet device. */
+
+typedef int (*eth_queue_stop_t)(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+/**< @internal Stop rx and tx of a queue of an Ethernet device. */
+
+typedef int (*eth_rx_queue_setup_t)(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
+/**< @internal Set up a receive queue of an Ethernet device. */
+
+typedef int (*eth_tx_queue_setup_t)(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+/**< @internal Setup a transmit queue of an Ethernet device. */
+
+typedef void (*eth_queue_release_t)(void *queue);
+/**< @internal Release memory resources allocated by given RX/TX queue. */
+
+typedef uint32_t (*eth_rx_queue_count_t)(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+/**< @Get number of available descriptors on a receive queue of an Ethernet device. */
+
+typedef int (*eth_rx_descriptor_done_t)(void *rxq, uint16_t offset);
+/**< @Check DD bit of specific RX descriptor */
+
+typedef int (*mtu_set_t)(struct rte_eth_dev *dev, uint16_t mtu);
+/**< @internal Set MTU. */
+
+typedef int (*vlan_filter_set_t)(struct rte_eth_dev *dev,
+ uint16_t vlan_id,
+ int on);
+/**< @internal filtering of a VLAN Tag Identifier by an Ethernet device. */
+
+typedef void (*vlan_tpid_set_t)(struct rte_eth_dev *dev,
+ uint16_t tpid);
+/**< @internal set the outer VLAN-TPID by an Ethernet device. */
+
+typedef void (*vlan_offload_set_t)(struct rte_eth_dev *dev, int mask);
+/**< @internal set VLAN offload function by an Ethernet device. */
+
+typedef int (*vlan_pvid_set_t)(struct rte_eth_dev *dev,
+ uint16_t vlan_id,
+ int on);
+/**< @internal set port based TX VLAN insertion by an Ethernet device. */
+
+typedef void (*vlan_strip_queue_set_t)(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ int on);
+/**< @internal VLAN stripping enable/disable by an queue of Ethernet device. */
+
+typedef uint16_t (*eth_rx_burst_t)(void *rxq,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+/**< @internal Retrieve input packets from a receive queue of an Ethernet device. */
+
+typedef uint16_t (*eth_tx_burst_t)(void *txq,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+/**< @internal Send output packets on a transmit queue of an Ethernet device. */
+
+typedef int (*fdir_add_signature_filter_t)(struct rte_eth_dev *dev,
+ struct rte_fdir_filter *fdir_ftr,
+ uint8_t rx_queue);
+/**< @internal Setup a new signature filter rule on an Ethernet device */
+
+typedef int (*fdir_update_signature_filter_t)(struct rte_eth_dev *dev,
+ struct rte_fdir_filter *fdir_ftr,
+ uint8_t rx_queue);
+/**< @internal Update a signature filter rule on an Ethernet device */
+
+typedef int (*fdir_remove_signature_filter_t)(struct rte_eth_dev *dev,
+ struct rte_fdir_filter *fdir_ftr);
+/**< @internal Remove a signature filter rule on an Ethernet device */
+
+typedef void (*fdir_infos_get_t)(struct rte_eth_dev *dev,
+ struct rte_eth_fdir *fdir);
+/**< @internal Get information about fdir status */
+
+typedef int (*fdir_add_perfect_filter_t)(struct rte_eth_dev *dev,
+ struct rte_fdir_filter *fdir_ftr,
+ uint16_t soft_id, uint8_t rx_queue,
+ uint8_t drop);
+/**< @internal Setup a new perfect filter rule on an Ethernet device */
+
+typedef int (*fdir_update_perfect_filter_t)(struct rte_eth_dev *dev,
+ struct rte_fdir_filter *fdir_ftr,
+ uint16_t soft_id, uint8_t rx_queue,
+ uint8_t drop);
+/**< @internal Update a perfect filter rule on an Ethernet device */
+
+typedef int (*fdir_remove_perfect_filter_t)(struct rte_eth_dev *dev,
+ struct rte_fdir_filter *fdir_ftr,
+ uint16_t soft_id);
+/**< @internal Remove a perfect filter rule on an Ethernet device */
+
+typedef int (*fdir_set_masks_t)(struct rte_eth_dev *dev,
+ struct rte_fdir_masks *fdir_masks);
+/**< @internal Setup flow director masks on an Ethernet device */
+
+typedef int (*flow_ctrl_get_t)(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+/**< @internal Get current flow control parameter on an Ethernet device */
+
+typedef int (*flow_ctrl_set_t)(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+/**< @internal Setup flow control parameter on an Ethernet device */
+
+typedef int (*priority_flow_ctrl_set_t)(struct rte_eth_dev *dev,
+ struct rte_eth_pfc_conf *pfc_conf);
+/**< @internal Setup priority flow control parameter on an Ethernet device */
+
+typedef int (*reta_update_t)(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+/**< @internal Update RSS redirection table on an Ethernet device */
+
+typedef int (*reta_query_t)(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+/**< @internal Query RSS redirection table on an Ethernet device */
+
+typedef int (*rss_hash_update_t)(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+/**< @internal Update RSS hash configuration of an Ethernet device */
+
+typedef int (*rss_hash_conf_get_t)(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+/**< @internal Get current RSS hash configuration of an Ethernet device */
+
+typedef int (*eth_dev_led_on_t)(struct rte_eth_dev *dev);
+/**< @internal Turn on SW controllable LED on an Ethernet device */
+
+typedef int (*eth_dev_led_off_t)(struct rte_eth_dev *dev);
+/**< @internal Turn off SW controllable LED on an Ethernet device */
+
+typedef void (*eth_mac_addr_remove_t)(struct rte_eth_dev *dev, uint32_t index);
+/**< @internal Remove MAC address from receive address register */
+
+typedef void (*eth_mac_addr_add_t)(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index,
+ uint32_t vmdq);
+/**< @internal Set a MAC address into Receive Address Address Register */
+
+typedef int (*eth_uc_hash_table_set_t)(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint8_t on);
+/**< @internal Set a Unicast Hash bitmap */
+
+typedef int (*eth_uc_all_hash_table_set_t)(struct rte_eth_dev *dev,
+ uint8_t on);
+/**< @internal Set all Unicast Hash bitmap */
+
+typedef int (*eth_set_vf_rx_mode_t)(struct rte_eth_dev *dev,
+ uint16_t vf,
+ uint16_t rx_mode,
+ uint8_t on);
+/**< @internal Set a VF receive mode */
+
+typedef int (*eth_set_vf_rx_t)(struct rte_eth_dev *dev,
+ uint16_t vf,
+ uint8_t on);
+/**< @internal Set a VF receive mode */
+
+typedef int (*eth_set_vf_tx_t)(struct rte_eth_dev *dev,
+ uint16_t vf,
+ uint8_t on);
+/**< @internal Enable or disable a VF transmit */
+
+typedef int (*eth_set_vf_vlan_filter_t)(struct rte_eth_dev *dev,
+ uint16_t vlan,
+ uint64_t vf_mask,
+ uint8_t vlan_on);
+/**< @internal Set VF VLAN pool filter */
+
+typedef int (*eth_set_queue_rate_limit_t)(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t tx_rate);
+/**< @internal Set queue TX rate */
+
+typedef int (*eth_set_vf_rate_limit_t)(struct rte_eth_dev *dev,
+ uint16_t vf,
+ uint16_t tx_rate,
+ uint64_t q_msk);
+/**< @internal Set VF TX rate */
+
+typedef int (*eth_mirror_rule_set_t)(struct rte_eth_dev *dev,
+ struct rte_eth_vmdq_mirror_conf *mirror_conf,
+ uint8_t rule_id,
+ uint8_t on);
+/**< @internal Add a traffic mirroring rule on an Ethernet device */
+
+typedef int (*eth_mirror_rule_reset_t)(struct rte_eth_dev *dev,
+ uint8_t rule_id);
+/**< @internal Remove a traffic mirroring rule on an Ethernet device */
+
+typedef int (*eth_udp_tunnel_add_t)(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *tunnel_udp);
+/**< @internal Add tunneling UDP info */
+
+typedef int (*eth_udp_tunnel_del_t)(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *tunnel_udp);
+/**< @internal Delete tunneling UDP info */
+
+
+#ifdef RTE_NIC_BYPASS
+
+enum {
+ RTE_BYPASS_MODE_NONE,
+ RTE_BYPASS_MODE_NORMAL,
+ RTE_BYPASS_MODE_BYPASS,
+ RTE_BYPASS_MODE_ISOLATE,
+ RTE_BYPASS_MODE_NUM,
+};
+
+#define RTE_BYPASS_MODE_VALID(x) \
+ ((x) > RTE_BYPASS_MODE_NONE && (x) < RTE_BYPASS_MODE_NUM)
+
+enum {
+ RTE_BYPASS_EVENT_NONE,
+ RTE_BYPASS_EVENT_START,
+ RTE_BYPASS_EVENT_OS_ON = RTE_BYPASS_EVENT_START,
+ RTE_BYPASS_EVENT_POWER_ON,
+ RTE_BYPASS_EVENT_OS_OFF,
+ RTE_BYPASS_EVENT_POWER_OFF,
+ RTE_BYPASS_EVENT_TIMEOUT,
+ RTE_BYPASS_EVENT_NUM
+};
+
+#define RTE_BYPASS_EVENT_VALID(x) \
+ ((x) > RTE_BYPASS_EVENT_NONE && (x) < RTE_BYPASS_MODE_NUM)
+
+enum {
+ RTE_BYPASS_TMT_OFF, /* timeout disabled. */
+ RTE_BYPASS_TMT_1_5_SEC, /* timeout for 1.5 seconds */
+ RTE_BYPASS_TMT_2_SEC, /* timeout for 2 seconds */
+ RTE_BYPASS_TMT_3_SEC, /* timeout for 3 seconds */
+ RTE_BYPASS_TMT_4_SEC, /* timeout for 4 seconds */
+ RTE_BYPASS_TMT_8_SEC, /* timeout for 8 seconds */
+ RTE_BYPASS_TMT_16_SEC, /* timeout for 16 seconds */
+ RTE_BYPASS_TMT_32_SEC, /* timeout for 32 seconds */
+ RTE_BYPASS_TMT_NUM
+};
+
+#define RTE_BYPASS_TMT_VALID(x) \
+ ((x) == RTE_BYPASS_TMT_OFF || \
+ ((x) > RTE_BYPASS_TMT_OFF && (x) < RTE_BYPASS_TMT_NUM))
+
+typedef void (*bypass_init_t)(struct rte_eth_dev *dev);
+typedef int32_t (*bypass_state_set_t)(struct rte_eth_dev *dev, uint32_t *new_state);
+typedef int32_t (*bypass_state_show_t)(struct rte_eth_dev *dev, uint32_t *state);
+typedef int32_t (*bypass_event_set_t)(struct rte_eth_dev *dev, uint32_t state, uint32_t event);
+typedef int32_t (*bypass_event_show_t)(struct rte_eth_dev *dev, uint32_t event_shift, uint32_t *event);
+typedef int32_t (*bypass_wd_timeout_set_t)(struct rte_eth_dev *dev, uint32_t timeout);
+typedef int32_t (*bypass_wd_timeout_show_t)(struct rte_eth_dev *dev, uint32_t *wd_timeout);
+typedef int32_t (*bypass_ver_show_t)(struct rte_eth_dev *dev, uint32_t *ver);
+typedef int32_t (*bypass_wd_reset_t)(struct rte_eth_dev *dev);
+#endif
+
+typedef int (*eth_add_syn_filter_t)(struct rte_eth_dev *dev,
+ struct rte_syn_filter *filter, uint16_t rx_queue);
+/**< @internal add syn filter rule on an Ethernet device */
+
+typedef int (*eth_remove_syn_filter_t)(struct rte_eth_dev *dev);
+/**< @internal remove syn filter rule on an Ethernet device */
+
+typedef int (*eth_get_syn_filter_t)(struct rte_eth_dev *dev,
+ struct rte_syn_filter *filter, uint16_t *rx_queue);
+/**< @internal Get syn filter rule on an Ethernet device */
+
+typedef int (*eth_add_ethertype_filter_t)(struct rte_eth_dev *dev,
+ uint16_t index, struct rte_ethertype_filter *filter,
+ uint16_t rx_queue);
+/**< @internal Setup a new ethertype filter rule on an Ethernet device */
+
+typedef int (*eth_remove_ethertype_filter_t)(struct rte_eth_dev *dev,
+ uint16_t index);
+/**< @internal Remove an ethertype filter rule on an Ethernet device */
+
+typedef int (*eth_get_ethertype_filter_t)(struct rte_eth_dev *dev,
+ uint16_t index, struct rte_ethertype_filter *filter,
+ uint16_t *rx_queue);
+/**< @internal Get an ethertype filter rule on an Ethernet device */
+
+typedef int (*eth_add_2tuple_filter_t)(struct rte_eth_dev *dev,
+ uint16_t index, struct rte_2tuple_filter *filter,
+ uint16_t rx_queue);
+/**< @internal Setup a new 2tuple filter rule on an Ethernet device */
+
+typedef int (*eth_remove_2tuple_filter_t)(struct rte_eth_dev *dev,
+ uint16_t index);
+/**< @internal Remove a 2tuple filter rule on an Ethernet device */
+
+typedef int (*eth_get_2tuple_filter_t)(struct rte_eth_dev *dev,
+ uint16_t index, struct rte_2tuple_filter *filter,
+ uint16_t *rx_queue);
+/**< @internal Get a 2tuple filter rule on an Ethernet device */
+
+typedef int (*eth_add_5tuple_filter_t)(struct rte_eth_dev *dev,
+ uint16_t index, struct rte_5tuple_filter *filter,
+ uint16_t rx_queue);
+/**< @internal Setup a new 5tuple filter rule on an Ethernet device */
+
+typedef int (*eth_remove_5tuple_filter_t)(struct rte_eth_dev *dev,
+ uint16_t index);
+/**< @internal Remove a 5tuple filter rule on an Ethernet device */
+
+typedef int (*eth_get_5tuple_filter_t)(struct rte_eth_dev *dev,
+ uint16_t index, struct rte_5tuple_filter *filter,
+ uint16_t *rx_queue);
+/**< @internal Get a 5tuple filter rule on an Ethernet device */
+
+typedef int (*eth_add_flex_filter_t)(struct rte_eth_dev *dev,
+ uint16_t index, struct rte_flex_filter *filter,
+ uint16_t rx_queue);
+/**< @internal Setup a new flex filter rule on an Ethernet device */
+
+typedef int (*eth_remove_flex_filter_t)(struct rte_eth_dev *dev,
+ uint16_t index);
+/**< @internal Remove a flex filter rule on an Ethernet device */
+
+typedef int (*eth_get_flex_filter_t)(struct rte_eth_dev *dev,
+ uint16_t index, struct rte_flex_filter *filter,
+ uint16_t *rx_queue);
+/**< @internal Get a flex filter rule on an Ethernet device */
+
+typedef int (*eth_filter_ctrl_t)(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg);
+/**< @internal Take operations to assigned filter type on an Ethernet device */
+
+/**
+ * @internal A structure containing the functions exported by an Ethernet driver.
+ */
+struct eth_dev_ops {
+ eth_dev_configure_t dev_configure; /**< Configure device. */
+ eth_dev_start_t dev_start; /**< Start device. */
+ eth_dev_stop_t dev_stop; /**< Stop device. */
+ eth_dev_set_link_up_t dev_set_link_up; /**< Device link up. */
+ eth_dev_set_link_down_t dev_set_link_down; /**< Device link down. */
+ eth_dev_close_t dev_close; /**< Close device. */
+ eth_promiscuous_enable_t promiscuous_enable; /**< Promiscuous ON. */
+ eth_promiscuous_disable_t promiscuous_disable;/**< Promiscuous OFF. */
+ eth_allmulticast_enable_t allmulticast_enable;/**< RX multicast ON. */
+ eth_allmulticast_disable_t allmulticast_disable;/**< RX multicast OF. */
+ eth_link_update_t link_update; /**< Get device link state. */
+ eth_stats_get_t stats_get; /**< Get generic device statistics. */
+ eth_stats_reset_t stats_reset; /**< Reset generic device statistics. */
+ eth_xstats_get_t xstats_get; /**< Get extended device statistics. */
+ eth_xstats_reset_t xstats_reset; /**< Reset extended device statistics. */
+ eth_queue_stats_mapping_set_t queue_stats_mapping_set;
+ /**< Configure per queue stat counter mapping. */
+ eth_dev_infos_get_t dev_infos_get; /**< Get device info. */
+ mtu_set_t mtu_set; /**< Set MTU. */
+ vlan_filter_set_t vlan_filter_set; /**< Filter VLAN Setup. */
+ vlan_tpid_set_t vlan_tpid_set; /**< Outer VLAN TPID Setup. */
+ vlan_strip_queue_set_t vlan_strip_queue_set; /**< VLAN Stripping on queue. */
+ vlan_offload_set_t vlan_offload_set; /**< Set VLAN Offload. */
+ vlan_pvid_set_t vlan_pvid_set; /**< Set port based TX VLAN insertion */
+ eth_queue_start_t rx_queue_start;/**< Start RX for a queue.*/
+ eth_queue_stop_t rx_queue_stop;/**< Stop RX for a queue.*/
+ eth_queue_start_t tx_queue_start;/**< Start TX for a queue.*/
+ eth_queue_stop_t tx_queue_stop;/**< Stop TX for a queue.*/
+ eth_rx_queue_setup_t rx_queue_setup;/**< Set up device RX queue.*/
+ eth_queue_release_t rx_queue_release;/**< Release RX queue.*/
+ eth_rx_queue_count_t rx_queue_count; /**< Get Rx queue count. */
+ eth_rx_descriptor_done_t rx_descriptor_done; /**< Check rxd DD bit */
+ eth_tx_queue_setup_t tx_queue_setup;/**< Set up device TX queue.*/
+ eth_queue_release_t tx_queue_release;/**< Release TX queue.*/
+ eth_dev_led_on_t dev_led_on; /**< Turn on LED. */
+ eth_dev_led_off_t dev_led_off; /**< Turn off LED. */
+ flow_ctrl_get_t flow_ctrl_get; /**< Get flow control. */
+ flow_ctrl_set_t flow_ctrl_set; /**< Setup flow control. */
+ priority_flow_ctrl_set_t priority_flow_ctrl_set; /**< Setup priority flow control.*/
+ eth_mac_addr_remove_t mac_addr_remove; /**< Remove MAC address */
+ eth_mac_addr_add_t mac_addr_add; /**< Add a MAC address */
+ eth_uc_hash_table_set_t uc_hash_table_set; /**< Set Unicast Table Array */
+ eth_uc_all_hash_table_set_t uc_all_hash_table_set; /**< Set Unicast hash bitmap */
+ eth_mirror_rule_set_t mirror_rule_set; /**< Add a traffic mirror rule.*/
+ eth_mirror_rule_reset_t mirror_rule_reset; /**< reset a traffic mirror rule.*/
+ eth_set_vf_rx_mode_t set_vf_rx_mode; /**< Set VF RX mode */
+ eth_set_vf_rx_t set_vf_rx; /**< enable/disable a VF receive */
+ eth_set_vf_tx_t set_vf_tx; /**< enable/disable a VF transmit */
+ eth_set_vf_vlan_filter_t set_vf_vlan_filter; /**< Set VF VLAN filter */
+ eth_udp_tunnel_add_t udp_tunnel_add;
+ eth_udp_tunnel_del_t udp_tunnel_del;
+ eth_set_queue_rate_limit_t set_queue_rate_limit; /**< Set queue rate limit */
+ eth_set_vf_rate_limit_t set_vf_rate_limit; /**< Set VF rate limit */
+
+ /** Add a signature filter. */
+ fdir_add_signature_filter_t fdir_add_signature_filter;
+ /** Update a signature filter. */
+ fdir_update_signature_filter_t fdir_update_signature_filter;
+ /** Remove a signature filter. */
+ fdir_remove_signature_filter_t fdir_remove_signature_filter;
+ /** Get information about FDIR status. */
+ fdir_infos_get_t fdir_infos_get;
+ /** Add a perfect filter. */
+ fdir_add_perfect_filter_t fdir_add_perfect_filter;
+ /** Update a perfect filter. */
+ fdir_update_perfect_filter_t fdir_update_perfect_filter;
+ /** Remove a perfect filter. */
+ fdir_remove_perfect_filter_t fdir_remove_perfect_filter;
+ /** Setup masks for FDIR filtering. */
+ fdir_set_masks_t fdir_set_masks;
+ /** Update redirection table. */
+ reta_update_t reta_update;
+ /** Query redirection table. */
+ reta_query_t reta_query;
+ /* bypass control */
+#ifdef RTE_NIC_BYPASS
+ bypass_init_t bypass_init;
+ bypass_state_set_t bypass_state_set;
+ bypass_state_show_t bypass_state_show;
+ bypass_event_set_t bypass_event_set;
+ bypass_event_show_t bypass_event_show;
+ bypass_wd_timeout_set_t bypass_wd_timeout_set;
+ bypass_wd_timeout_show_t bypass_wd_timeout_show;
+ bypass_ver_show_t bypass_ver_show;
+ bypass_wd_reset_t bypass_wd_reset;
+#endif
+
+ /** Configure RSS hash protocols. */
+ rss_hash_update_t rss_hash_update;
+ /** Get current RSS hash configuration. */
+ rss_hash_conf_get_t rss_hash_conf_get;
+ eth_add_syn_filter_t add_syn_filter; /**< add syn filter. */
+ eth_remove_syn_filter_t remove_syn_filter; /**< remove syn filter. */
+ eth_get_syn_filter_t get_syn_filter; /**< get syn filter. */
+ eth_add_ethertype_filter_t add_ethertype_filter; /**< add ethertype filter. */
+ eth_remove_ethertype_filter_t remove_ethertype_filter; /**< remove ethertype filter. */
+ eth_get_ethertype_filter_t get_ethertype_filter; /**< get ethertype filter. */
+ eth_add_2tuple_filter_t add_2tuple_filter; /**< add 2tuple filter. */
+ eth_remove_2tuple_filter_t remove_2tuple_filter; /**< remove 2tuple filter. */
+ eth_get_2tuple_filter_t get_2tuple_filter; /**< get 2tuple filter. */
+ eth_add_5tuple_filter_t add_5tuple_filter; /**< add 5tuple filter. */
+ eth_remove_5tuple_filter_t remove_5tuple_filter; /**< remove 5tuple filter. */
+ eth_get_5tuple_filter_t get_5tuple_filter; /**< get 5tuple filter. */
+ eth_add_flex_filter_t add_flex_filter; /**< add flex filter. */
+ eth_remove_flex_filter_t remove_flex_filter; /**< remove flex filter. */
+ eth_get_flex_filter_t get_flex_filter; /**< get flex filter. */
+ eth_filter_ctrl_t filter_ctrl; /**< common filter control*/
+};
+
+/**
+ * @internal
+ * The generic data structure associated with each ethernet device.
+ *
+ * Pointers to burst-oriented packet receive and transmit functions are
+ * located at the beginning of the structure, along with the pointer to
+ * where all the data elements for the particular device are stored in shared
+ * memory. This split allows the function pointer and driver data to be per-
+ * process, while the actual configuration data for the device is shared.
+ */
+struct rte_eth_dev {
+ eth_rx_burst_t rx_pkt_burst; /**< Pointer to PMD receive function. */
+ eth_tx_burst_t tx_pkt_burst; /**< Pointer to PMD transmit function. */
+ struct rte_eth_dev_data *data; /**< Pointer to device data */
+ const struct eth_driver *driver;/**< Driver for this device */
+ struct eth_dev_ops *dev_ops; /**< Functions exported by PMD */
+ struct rte_pci_device *pci_dev; /**< PCI info. supplied by probing */
+ struct rte_eth_dev_cb_list callbacks; /**< User application callbacks */
+};
+
+struct rte_eth_dev_sriov {
+ uint8_t active; /**< SRIOV is active with 16, 32 or 64 pools */
+ uint8_t nb_q_per_pool; /**< rx queue number per pool */
+ uint16_t def_vmdq_idx; /**< Default pool num used for PF */
+ uint16_t def_pool_q_idx; /**< Default pool queue start reg index */
+};
+#define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
+
+#define RTE_ETH_NAME_MAX_LEN (32)
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each ethernet device.
+ *
+ * This structure is safe to place in shared memory to be common among different
+ * processes in a multi-process configuration.
+ */
+struct rte_eth_dev_data {
+ char name[RTE_ETH_NAME_MAX_LEN]; /**< Unique identifier name */
+
+ void **rx_queues; /**< Array of pointers to RX queues. */
+ void **tx_queues; /**< Array of pointers to TX queues. */
+ uint16_t nb_rx_queues; /**< Number of RX queues. */
+ uint16_t nb_tx_queues; /**< Number of TX queues. */
+
+ struct rte_eth_dev_sriov sriov; /**< SRIOV data */
+
+ void *dev_private; /**< PMD-specific private data */
+
+ struct rte_eth_link dev_link;
+ /**< Link-level information & status */
+
+ struct rte_eth_conf dev_conf; /**< Configuration applied to device. */
+ uint16_t mtu; /**< Maximum Transmission Unit. */
+
+ uint32_t min_rx_buf_size;
+ /**< Common rx buffer size handled by all queues */
+
+ uint64_t rx_mbuf_alloc_failed; /**< RX ring mbuf allocation failures. */
+ struct ether_addr* mac_addrs;/**< Device Ethernet Link address. */
+ uint64_t mac_pool_sel[ETH_NUM_RECEIVE_MAC_ADDR];
+ /** bitmap array of associating Ethernet MAC addresses to pools */
+ struct ether_addr* hash_mac_addrs;
+ /** Device Ethernet MAC addresses of hash filtering. */
+ uint8_t port_id; /**< Device [external] port identifier. */
+ uint8_t promiscuous : 1, /**< RX promiscuous mode ON(1) / OFF(0). */
+ scattered_rx : 1, /**< RX of scattered packets is ON(1) / OFF(0) */
+ all_multicast : 1, /**< RX all multicast mode ON(1) / OFF(0). */
+ dev_started : 1; /**< Device state: STARTED(1) / STOPPED(0). */
+};
+
+/**
+ * @internal
+ * The pool of *rte_eth_dev* structures. The size of the pool
+ * is configured at compile-time in the <rte_ethdev.c> file.
+ */
+extern struct rte_eth_dev rte_eth_devices[];
+
+/**
+ * Get the total number of Ethernet devices that have been successfully
+ * initialized by the [matching] Ethernet driver during the PCI probing phase.
+ * All devices whose port identifier is in the range
+ * [0, rte_eth_dev_count() - 1] can be operated on by network applications.
+ *
+ * @return
+ * - The total number of usable Ethernet devices.
+ */
+extern uint8_t rte_eth_dev_count(void);
+
+/**
+ * Function for internal use by dummy drivers primarily, e.g. ring-based
+ * driver.
+ * Allocates a new ethdev slot for an ethernet device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name Unique identifier name for each Ethernet device
+ * @return
+ * - Slot in the rte_dev_devices array for a new device;
+ */
+struct rte_eth_dev *rte_eth_dev_allocate(const char *name);
+
+struct eth_driver;
+/**
+ * @internal
+ * Initialization function of an Ethernet driver invoked for each matching
+ * Ethernet PCI device detected during the PCI probing phase.
+ *
+ * @param eth_drv
+ * The pointer to the [matching] Ethernet driver structure supplied by
+ * the PMD when it registered itself.
+ * @param eth_dev
+ * The *eth_dev* pointer is the address of the *rte_eth_dev* structure
+ * associated with the matching device and which have been [automatically]
+ * allocated in the *rte_eth_devices* array.
+ * The *eth_dev* structure is supplied to the driver initialization function
+ * with the following fields already initialized:
+ *
+ * - *pci_dev*: Holds the pointers to the *rte_pci_device* structure which
+ * contains the generic PCI information of the matching device.
+ *
+ * - *dev_private*: Holds a pointer to the device private data structure.
+ *
+ * - *mtu*: Contains the default Ethernet maximum frame length (1500).
+ *
+ * - *port_id*: Contains the port index of the device (actually the index
+ * of the *eth_dev* structure in the *rte_eth_devices* array).
+ *
+ * @return
+ * - 0: Success, the device is properly initialized by the driver.
+ * In particular, the driver MUST have set up the *dev_ops* pointer
+ * of the *eth_dev* structure.
+ * - <0: Error code of the device initialization failure.
+ */
+typedef int (*eth_dev_init_t)(struct eth_driver *eth_drv,
+ struct rte_eth_dev *eth_dev);
+
+/**
+ * @internal
+ * The structure associated with a PMD Ethernet driver.
+ *
+ * Each Ethernet driver acts as a PCI driver and is represented by a generic
+ * *eth_driver* structure that holds:
+ *
+ * - An *rte_pci_driver* structure (which must be the first field).
+ *
+ * - The *eth_dev_init* function invoked for each matching PCI device.
+ *
+ * - The size of the private data to allocate for each matching device.
+ */
+struct eth_driver {
+ struct rte_pci_driver pci_drv; /**< The PMD is also a PCI driver. */
+ eth_dev_init_t eth_dev_init; /**< Device init function. */
+ unsigned int dev_private_size; /**< Size of device private data. */
+};
+
+/**
+ * @internal
+ * A function invoked by the initialization function of an Ethernet driver
+ * to simultaneously register itself as a PCI driver and as an Ethernet
+ * Poll Mode Driver (PMD).
+ *
+ * @param eth_drv
+ * The pointer to the *eth_driver* structure associated with
+ * the Ethernet driver.
+ */
+extern void rte_eth_driver_register(struct eth_driver *eth_drv);
+
+/**
+ * Configure an Ethernet device.
+ * This function must be invoked first before any other function in the
+ * Ethernet API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device to configure.
+ * @param nb_rx_queue
+ * The number of receive queues to set up for the Ethernet device.
+ * @param nb_tx_queue
+ * The number of transmit queues to set up for the Ethernet device.
+ * @param eth_conf
+ * The pointer to the configuration data to be used for the Ethernet device.
+ * The *rte_eth_conf* structure includes:
+ * - the hardware offload features to activate, with dedicated fields for
+ * each statically configurable offload hardware feature provided by
+ * Ethernet devices, such as IP checksum or VLAN tag stripping for
+ * example.
+ * - the Receive Side Scaling (RSS) configuration when using multiple RX
+ * queues per port.
+ *
+ * Embedding all configuration information in a single data structure
+ * is the more flexible method that allows the addition of new features
+ * without changing the syntax of the API.
+ * @return
+ * - 0: Success, device configured.
+ * - <0: Error code returned by the driver configuration function.
+ */
+extern int rte_eth_dev_configure(uint8_t port_id,
+ uint16_t nb_rx_queue,
+ uint16_t nb_tx_queue,
+ const struct rte_eth_conf *eth_conf);
+
+/**
+ * Allocate and set up a receive queue for an Ethernet device.
+ *
+ * The function allocates a contiguous block of memory for *nb_rx_desc*
+ * receive descriptors from a memory zone associated with *socket_id*
+ * and initializes each receive descriptor with a network buffer allocated
+ * from the memory pool *mb_pool*.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param rx_queue_id
+ * The index of the receive queue to set up.
+ * The value must be in the range [0, nb_rx_queue - 1] previously supplied
+ * to rte_eth_dev_configure().
+ * @param nb_rx_desc
+ * The number of receive descriptors to allocate for the receive ring.
+ * @param socket_id
+ * The *socket_id* argument is the socket identifier in case of NUMA.
+ * The value can be *SOCKET_ID_ANY* if there is no NUMA constraint for
+ * the DMA memory allocated for the receive descriptors of the ring.
+ * @param rx_conf
+ * The pointer to the configuration data to be used for the receive queue.
+ * NULL value is allowed, in which case default RX configuration
+ * will be used.
+ * The *rx_conf* structure contains an *rx_thresh* structure with the values
+ * of the Prefetch, Host, and Write-Back threshold registers of the receive
+ * ring.
+ * @param mb_pool
+ * The pointer to the memory pool from which to allocate *rte_mbuf* network
+ * memory buffers to populate each descriptor of the receive ring.
+ * @return
+ * - 0: Success, receive queue correctly set up.
+ * - -EINVAL: The size of network buffers which can be allocated from the
+ * memory pool does not fit the various buffer sizes allowed by the
+ * device controller.
+ * - -ENOMEM: Unable to allocate the receive ring descriptors or to
+ * allocate network memory buffers from the memory pool when
+ * initializing receive descriptors.
+ */
+extern int rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
+
+/**
+ * Allocate and set up a transmit queue for an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param tx_queue_id
+ * The index of the transmit queue to set up.
+ * The value must be in the range [0, nb_tx_queue - 1] previously supplied
+ * to rte_eth_dev_configure().
+ * @param nb_tx_desc
+ * The number of transmit descriptors to allocate for the transmit ring.
+ * @param socket_id
+ * The *socket_id* argument is the socket identifier in case of NUMA.
+ * Its value can be *SOCKET_ID_ANY* if there is no NUMA constraint for
+ * the DMA memory allocated for the transmit descriptors of the ring.
+ * @param tx_conf
+ * The pointer to the configuration data to be used for the transmit queue.
+ * NULL value is allowed, in which case default RX configuration
+ * will be used.
+ * The *tx_conf* structure contains the following data:
+ * - The *tx_thresh* structure with the values of the Prefetch, Host, and
+ * Write-Back threshold registers of the transmit ring.
+ * When setting Write-Back threshold to the value greater then zero,
+ * *tx_rs_thresh* value should be explicitly set to one.
+ * - The *tx_free_thresh* value indicates the [minimum] number of network
+ * buffers that must be pending in the transmit ring to trigger their
+ * [implicit] freeing by the driver transmit function.
+ * - The *tx_rs_thresh* value indicates the [minimum] number of transmit
+ * descriptors that must be pending in the transmit ring before setting the
+ * RS bit on a descriptor by the driver transmit function.
+ * The *tx_rs_thresh* value should be less or equal then
+ * *tx_free_thresh* value, and both of them should be less then
+ * *nb_tx_desc* - 3.
+ * - The *txq_flags* member contains flags to pass to the TX queue setup
+ * function to configure the behavior of the TX queue. This should be set
+ * to 0 if no special configuration is required.
+ *
+ * Note that setting *tx_free_thresh* or *tx_rs_thresh* value to 0 forces
+ * the transmit function to use default values.
+ * @return
+ * - 0: Success, the transmit queue is correctly set up.
+ * - -ENOMEM: Unable to allocate the transmit ring descriptors.
+ */
+extern int rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+
+/*
+ * Return the NUMA socket to which an Ethernet device is connected
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device
+ * @return
+ * The NUMA socket id to which the Ethernet device is connected or
+ * a default of zero if the socket could not be determined.
+ * -1 is returned is the port_id value is out of range.
+ */
+extern int rte_eth_dev_socket_id(uint8_t port_id);
+
+/*
+ * Allocate mbuf from mempool, setup the DMA physical address
+ * and then start RX for specified queue of a port. It is used
+ * when rx_deferred_start flag of the specified queue is true.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device
+ * @param rx_queue_id
+ * The index of the rx queue to update the ring.
+ * The value must be in the range [0, nb_rx_queue - 1] previously supplied
+ * to rte_eth_dev_configure().
+ * @return
+ * - 0: Success, the transmit queue is correctly set up.
+ * - -EINVAL: The port_id or the queue_id out of range.
+ * - -ENOTSUP: The function not supported in PMD driver.
+ */
+extern int rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id);
+
+/*
+ * Stop specified RX queue of a port
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device
+ * @param rx_queue_id
+ * The index of the rx queue to update the ring.
+ * The value must be in the range [0, nb_rx_queue - 1] previously supplied
+ * to rte_eth_dev_configure().
+ * @return
+ * - 0: Success, the transmit queue is correctly set up.
+ * - -EINVAL: The port_id or the queue_id out of range.
+ * - -ENOTSUP: The function not supported in PMD driver.
+ */
+extern int rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id);
+
+/*
+ * Start TX for specified queue of a port. It is used when tx_deferred_start
+ * flag of the specified queue is true.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device
+ * @param tx_queue_id
+ * The index of the tx queue to update the ring.
+ * The value must be in the range [0, nb_tx_queue - 1] previously supplied
+ * to rte_eth_dev_configure().
+ * @return
+ * - 0: Success, the transmit queue is correctly set up.
+ * - -EINVAL: The port_id or the queue_id out of range.
+ * - -ENOTSUP: The function not supported in PMD driver.
+ */
+extern int rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id);
+
+/*
+ * Stop specified TX queue of a port
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device
+ * @param tx_queue_id
+ * The index of the tx queue to update the ring.
+ * The value must be in the range [0, nb_tx_queue - 1] previously supplied
+ * to rte_eth_dev_configure().
+ * @return
+ * - 0: Success, the transmit queue is correctly set up.
+ * - -EINVAL: The port_id or the queue_id out of range.
+ * - -ENOTSUP: The function not supported in PMD driver.
+ */
+extern int rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id);
+
+
+
+/**
+ * Start an Ethernet device.
+ *
+ * The device start step is the last one and consists of setting the configured
+ * offload features and in starting the transmit and the receive units of the
+ * device.
+ * On success, all basic functions exported by the Ethernet API (link status,
+ * receive/transmit, and so on) can be invoked.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @return
+ * - 0: Success, Ethernet device started.
+ * - <0: Error code of the driver device start function.
+ */
+extern int rte_eth_dev_start(uint8_t port_id);
+
+/**
+ * Stop an Ethernet device. The device can be restarted with a call to
+ * rte_eth_dev_start()
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ */
+extern void rte_eth_dev_stop(uint8_t port_id);
+
+
+/**
+ * Link up an Ethernet device.
+ *
+ * Set device link up will re-enable the device rx/tx
+ * functionality after it is previously set device linked down.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @return
+ * - 0: Success, Ethernet device linked up.
+ * - <0: Error code of the driver device link up function.
+ */
+extern int rte_eth_dev_set_link_up(uint8_t port_id);
+
+/**
+ * Link down an Ethernet device.
+ * The device rx/tx functionality will be disabled if success,
+ * and it can be re-enabled with a call to
+ * rte_eth_dev_set_link_up()
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ */
+extern int rte_eth_dev_set_link_down(uint8_t port_id);
+
+/**
+ * Close an Ethernet device. The device cannot be restarted!
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ */
+extern void rte_eth_dev_close(uint8_t port_id);
+
+/**
+ * Enable receipt in promiscuous mode for an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ */
+extern void rte_eth_promiscuous_enable(uint8_t port_id);
+
+/**
+ * Disable receipt in promiscuous mode for an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ */
+extern void rte_eth_promiscuous_disable(uint8_t port_id);
+
+/**
+ * Return the value of promiscuous mode for an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (1) if promiscuous is enabled
+ * - (0) if promiscuous is disabled.
+ * - (-1) on error
+ */
+extern int rte_eth_promiscuous_get(uint8_t port_id);
+
+/**
+ * Enable the receipt of any multicast frame by an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ */
+extern void rte_eth_allmulticast_enable(uint8_t port_id);
+
+/**
+ * Disable the receipt of all multicast frames by an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ */
+extern void rte_eth_allmulticast_disable(uint8_t port_id);
+
+/**
+ * Return the value of allmulticast mode for an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (1) if allmulticast is enabled
+ * - (0) if allmulticast is disabled.
+ * - (-1) on error
+ */
+extern int rte_eth_allmulticast_get(uint8_t port_id);
+
+/**
+ * Retrieve the status (ON/OFF), the speed (in Mbps) and the mode (HALF-DUPLEX
+ * or FULL-DUPLEX) of the physical link of an Ethernet device. It might need
+ * to wait up to 9 seconds in it.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param link
+ * A pointer to an *rte_eth_link* structure to be filled with
+ * the status, the speed and the mode of the Ethernet device link.
+ */
+extern void rte_eth_link_get(uint8_t port_id, struct rte_eth_link *link);
+
+/**
+ * Retrieve the status (ON/OFF), the speed (in Mbps) and the mode (HALF-DUPLEX
+ * or FULL-DUPLEX) of the physical link of an Ethernet device. It is a no-wait
+ * version of rte_eth_link_get().
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param link
+ * A pointer to an *rte_eth_link* structure to be filled with
+ * the status, the speed and the mode of the Ethernet device link.
+ */
+extern void rte_eth_link_get_nowait(uint8_t port_id,
+ struct rte_eth_link *link);
+
+/**
+ * Retrieve the general I/O statistics of an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param stats
+ * A pointer to a structure of type *rte_eth_stats* to be filled with
+ * the values of device counters for the following set of statistics:
+ * - *ipackets* with the total of successfully received packets.
+ * - *opackets* with the total of successfully transmitted packets.
+ * - *ibytes* with the total of successfully received bytes.
+ * - *obytes* with the total of successfully transmitted bytes.
+ * - *ierrors* with the total of erroneous received packets.
+ * - *oerrors* with the total of failed transmitted packets.
+ */
+extern void rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats);
+
+/**
+ * Reset the general I/O statistics of an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ */
+extern void rte_eth_stats_reset(uint8_t port_id);
+
+/**
+ * Retrieve extended statistics of an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param xstats
+ * A pointer to a table of structure of type *rte_eth_xstats*
+ * to be filled with device statistics names and values.
+ * This parameter can be set to NULL if n is 0.
+ * @param n
+ * The size of the stats table, which should be large enough to store
+ * all the statistics of the device.
+ * @return
+ * - positive value lower or equal to n: success. The return value
+ * is the number of entries filled in the stats table.
+ * - positive value higher than n: error, the given statistics table
+ * is too small. The return value corresponds to the size that should
+ * be given to succeed. The entries in the table are not valid and
+ * shall not be used by the caller.
+ * - negative value on error (invalid port id)
+ */
+extern int rte_eth_xstats_get(uint8_t port_id,
+ struct rte_eth_xstats *xstats, unsigned n);
+
+/**
+ * Reset extended statistics of an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ */
+extern void rte_eth_xstats_reset(uint8_t port_id);
+
+/**
+ * Set a mapping for the specified transmit queue to the specified per-queue
+ * statistics counter.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param tx_queue_id
+ * The index of the transmit queue for which a queue stats mapping is required.
+ * The value must be in the range [0, nb_tx_queue - 1] previously supplied
+ * to rte_eth_dev_configure().
+ * @param stat_idx
+ * The per-queue packet statistics functionality number that the transmit
+ * queue is to be assigned.
+ * The value must be in the range [0, RTE_MAX_ETHPORT_QUEUE_STATS_MAPS - 1].
+ * @return
+ * Zero if successful. Non-zero otherwise.
+ */
+extern int rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id,
+ uint16_t tx_queue_id,
+ uint8_t stat_idx);
+
+/**
+ * Set a mapping for the specified receive queue to the specified per-queue
+ * statistics counter.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param rx_queue_id
+ * The index of the receive queue for which a queue stats mapping is required.
+ * The value must be in the range [0, nb_rx_queue - 1] previously supplied
+ * to rte_eth_dev_configure().
+ * @param stat_idx
+ * The per-queue packet statistics functionality number that the receive
+ * queue is to be assigned.
+ * The value must be in the range [0, RTE_MAX_ETHPORT_QUEUE_STATS_MAPS - 1].
+ * @return
+ * Zero if successful. Non-zero otherwise.
+ */
+extern int rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id,
+ uint16_t rx_queue_id,
+ uint8_t stat_idx);
+
+/**
+ * Retrieve the Ethernet address of an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param mac_addr
+ * A pointer to a structure of type *ether_addr* to be filled with
+ * the Ethernet address of the Ethernet device.
+ */
+extern void rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr);
+
+/**
+ * Retrieve the contextual information of an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param dev_info
+ * A pointer to a structure of type *rte_eth_dev_info* to be filled with
+ * the contextual information of the Ethernet device.
+ */
+extern void rte_eth_dev_info_get(uint8_t port_id,
+ struct rte_eth_dev_info *dev_info);
+
+/**
+ * Retrieve the MTU of an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param mtu
+ * A pointer to a uint16_t where the retrieved MTU is to be stored.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port_id* invalid.
+ */
+extern int rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu);
+
+/**
+ * Change the MTU of an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param mtu
+ * A uint16_t for the MTU to be applied.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if operation is not supported.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if *mtu* invalid.
+ */
+extern int rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu);
+
+/**
+ * Enable/Disable hardware filtering by an Ethernet device of received
+ * VLAN packets tagged with a given VLAN Tag Identifier.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param vlan_id
+ * The VLAN Tag Identifier whose filtering must be enabled or disabled.
+ * @param on
+ * If > 0, enable VLAN filtering of VLAN packets tagged with *vlan_id*.
+ * Otherwise, disable VLAN filtering of VLAN packets tagged with *vlan_id*.
+ * @return
+ * - (0) if successful.
+ * - (-ENOSUP) if hardware-assisted VLAN filtering not configured.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-ENOSYS) if VLAN filtering on *port_id* disabled.
+ * - (-EINVAL) if *vlan_id* > 4095.
+ */
+extern int rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id , int on);
+
+/**
+ * Enable/Disable hardware VLAN Strip by a rx queue of an Ethernet device.
+ * 82599/X540/X550 can support VLAN stripping at the rx queue level
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param rx_queue_id
+ * The index of the receive queue for which a queue stats mapping is required.
+ * The value must be in the range [0, nb_rx_queue - 1] previously supplied
+ * to rte_eth_dev_configure().
+ * @param on
+ * If 1, Enable VLAN Stripping of the receive queue of the Ethernet port.
+ * If 0, Disable VLAN Stripping of the receive queue of the Ethernet port.
+ * @return
+ * - (0) if successful.
+ * - (-ENOSUP) if hardware-assisted VLAN stripping not configured.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if *rx_queue_id* invalid.
+ */
+extern int rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id,
+ uint16_t rx_queue_id, int on);
+
+/**
+ * Set the Outer VLAN Ether Type by an Ethernet device, it can be inserted to
+ * the VLAN Header. This is a register setup available on some Intel NIC, not
+ * but all, please check the data sheet for availability.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param tag_type
+ * The Tag Protocol ID
+ * @return
+ * - (0) if successful.
+ * - (-ENOSUP) if hardware-assisted VLAN TPID setup is not supported.
+ * - (-ENODEV) if *port_id* invalid.
+ */
+extern int rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tag_type);
+
+/**
+ * Set VLAN offload configuration on an Ethernet device
+ * Enable/Disable Extended VLAN by an Ethernet device, This is a register setup
+ * available on some Intel NIC, not but all, please check the data sheet for
+ * availability.
+ * Enable/Disable VLAN Strip can be done on rx queue for certain NIC, but here
+ * the configuration is applied on the port level.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param offload_mask
+ * The VLAN Offload bit mask can be mixed use with "OR"
+ * ETH_VLAN_STRIP_OFFLOAD
+ * ETH_VLAN_FILTER_OFFLOAD
+ * ETH_VLAN_EXTEND_OFFLOAD
+ * @return
+ * - (0) if successful.
+ * - (-ENOSUP) if hardware-assisted VLAN filtering not configured.
+ * - (-ENODEV) if *port_id* invalid.
+ */
+extern int rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask);
+
+/**
+ * Read VLAN Offload configuration from an Ethernet device
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (>0) if successful. Bit mask to indicate
+ * ETH_VLAN_STRIP_OFFLOAD
+ * ETH_VLAN_FILTER_OFFLOAD
+ * ETH_VLAN_EXTEND_OFFLOAD
+ * - (-ENODEV) if *port_id* invalid.
+ */
+extern int rte_eth_dev_get_vlan_offload(uint8_t port_id);
+
+/**
+ * Set port based TX VLAN insersion on or off.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param pvid
+ * Port based TX VLAN identifier togeth with user priority.
+ * @param on
+ * Turn on or off the port based TX VLAN insertion.
+ *
+ * @return
+ * - (0) if successful.
+ * - negative if failed.
+ */
+extern int rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on);
+
+/**
+ *
+ * Retrieve a burst of input packets from a receive queue of an Ethernet
+ * device. The retrieved packets are stored in *rte_mbuf* structures whose
+ * pointers are supplied in the *rx_pkts* array.
+ *
+ * The rte_eth_rx_burst() function loops, parsing the RX ring of the
+ * receive queue, up to *nb_pkts* packets, and for each completed RX
+ * descriptor in the ring, it performs the following operations:
+ *
+ * - Initialize the *rte_mbuf* data structure associated with the
+ * RX descriptor according to the information provided by the NIC into
+ * that RX descriptor.
+ *
+ * - Store the *rte_mbuf* data structure into the next entry of the
+ * *rx_pkts* array.
+ *
+ * - Replenish the RX descriptor with a new *rte_mbuf* buffer
+ * allocated from the memory pool associated with the receive queue at
+ * initialization time.
+ *
+ * When retrieving an input packet that was scattered by the controller
+ * into multiple receive descriptors, the rte_eth_rx_burst() function
+ * appends the associated *rte_mbuf* buffers to the first buffer of the
+ * packet.
+ *
+ * The rte_eth_rx_burst() function returns the number of packets
+ * actually retrieved, which is the number of *rte_mbuf* data structures
+ * effectively supplied into the *rx_pkts* array.
+ * A return value equal to *nb_pkts* indicates that the RX queue contained
+ * at least *rx_pkts* packets, and this is likely to signify that other
+ * received packets remain in the input queue. Applications implementing
+ * a "retrieve as much received packets as possible" policy can check this
+ * specific case and keep invoking the rte_eth_rx_burst() function until
+ * a value less than *nb_pkts* is returned.
+ *
+ * This receive method has the following advantages:
+ *
+ * - It allows a run-to-completion network stack engine to retrieve and
+ * to immediately process received packets in a fast burst-oriented
+ * approach, avoiding the overhead of unnecessary intermediate packet
+ * queue/dequeue operations.
+ *
+ * - Conversely, it also allows an asynchronous-oriented processing
+ * method to retrieve bursts of received packets and to immediately
+ * queue them for further parallel processing by another logical core,
+ * for instance. However, instead of having received packets being
+ * individually queued by the driver, this approach allows the invoker
+ * of the rte_eth_rx_burst() function to queue a burst of retrieved
+ * packets at a time and therefore dramatically reduce the cost of
+ * enqueue/dequeue operations per packet.
+ *
+ * - It allows the rte_eth_rx_burst() function of the driver to take
+ * advantage of burst-oriented hardware features (CPU cache,
+ * prefetch instructions, and so on) to minimize the number of CPU
+ * cycles per packet.
+ *
+ * To summarize, the proposed receive API enables many
+ * burst-oriented optimizations in both synchronous and asynchronous
+ * packet processing environments with no overhead in both cases.
+ *
+ * The rte_eth_rx_burst() function does not provide any error
+ * notification to avoid the corresponding overhead. As a hint, the
+ * upper-level application might check the status of the device link once
+ * being systematically returned a 0 value for a given number of tries.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param queue_id
+ * The index of the receive queue from which to retrieve input packets.
+ * The value must be in the range [0, nb_rx_queue - 1] previously supplied
+ * to rte_eth_dev_configure().
+ * @param rx_pkts
+ * The address of an array of pointers to *rte_mbuf* structures that
+ * must be large enough to store *nb_pkts* pointers in it.
+ * @param nb_pkts
+ * The maximum number of packets to retrieve.
+ * @return
+ * The number of packets actually retrieved, which is the number
+ * of pointers to *rte_mbuf* structures effectively supplied to the
+ * *rx_pkts* array.
+ */
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+extern uint16_t rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+#else
+static inline uint16_t
+rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct rte_eth_dev *dev;
+
+ dev = &rte_eth_devices[port_id];
+ return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id], rx_pkts, nb_pkts);
+}
+#endif
+
+/**
+ * Get the number of used descriptors in a specific queue
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param queue_id
+ * The queue id on the specific port.
+ * @return
+ * The number of used descriptors in the specific queue.
+ */
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+extern uint32_t rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id);
+#else
+static inline uint32_t
+rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
+{
+ struct rte_eth_dev *dev;
+
+ dev = &rte_eth_devices[port_id];
+ return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
+}
+#endif
+
+/**
+ * Check if the DD bit of the specific RX descriptor in the queue has been set
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param queue_id
+ * The queue id on the specific port.
+ * @offset
+ * The offset of the descriptor ID from tail.
+ * @return
+ * - (1) if the specific DD bit is set.
+ * - (0) if the specific DD bit is not set.
+ * - (-ENODEV) if *port_id* invalid.
+ */
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+extern int rte_eth_rx_descriptor_done(uint8_t port_id,
+ uint16_t queue_id,
+ uint16_t offset);
+#else
+static inline int
+rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
+{
+ struct rte_eth_dev *dev;
+
+ dev = &rte_eth_devices[port_id];
+ return (*dev->dev_ops->rx_descriptor_done)( \
+ dev->data->rx_queues[queue_id], offset);
+}
+#endif
+
+/**
+ * Send a burst of output packets on a transmit queue of an Ethernet device.
+ *
+ * The rte_eth_tx_burst() function is invoked to transmit output packets
+ * on the output queue *queue_id* of the Ethernet device designated by its
+ * *port_id*.
+ * The *nb_pkts* parameter is the number of packets to send which are
+ * supplied in the *tx_pkts* array of *rte_mbuf* structures.
+ * The rte_eth_tx_burst() function loops, sending *nb_pkts* packets,
+ * up to the number of transmit descriptors available in the TX ring of the
+ * transmit queue.
+ * For each packet to send, the rte_eth_tx_burst() function performs
+ * the following operations:
+ *
+ * - Pick up the next available descriptor in the transmit ring.
+ *
+ * - Free the network buffer previously sent with that descriptor, if any.
+ *
+ * - Initialize the transmit descriptor with the information provided
+ * in the *rte_mbuf data structure.
+ *
+ * In the case of a segmented packet composed of a list of *rte_mbuf* buffers,
+ * the rte_eth_tx_burst() function uses several transmit descriptors
+ * of the ring.
+ *
+ * The rte_eth_tx_burst() function returns the number of packets it
+ * actually sent. A return value equal to *nb_pkts* means that all packets
+ * have been sent, and this is likely to signify that other output packets
+ * could be immediately transmitted again. Applications that implement a
+ * "send as many packets to transmit as possible" policy can check this
+ * specific case and keep invoking the rte_eth_tx_burst() function until
+ * a value less than *nb_pkts* is returned.
+ *
+ * It is the responsibility of the rte_eth_tx_burst() function to
+ * transparently free the memory buffers of packets previously sent.
+ * This feature is driven by the *tx_free_thresh* value supplied to the
+ * rte_eth_dev_configure() function at device configuration time.
+ * When the number of previously sent packets reached the "minimum transmit
+ * packets to free" threshold, the rte_eth_tx_burst() function must
+ * [attempt to] free the *rte_mbuf* buffers of those packets whose
+ * transmission was effectively completed.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param queue_id
+ * The index of the transmit queue through which output packets must be
+ * sent.
+ * The value must be in the range [0, nb_tx_queue - 1] previously supplied
+ * to rte_eth_dev_configure().
+ * @param tx_pkts
+ * The address of an array of *nb_pkts* pointers to *rte_mbuf* structures
+ * which contain the output packets.
+ * @param nb_pkts
+ * The maximum number of packets to transmit.
+ * @return
+ * The number of output packets actually stored in transmit descriptors of
+ * the transmit ring. The return value can be less than the value of the
+ * *tx_pkts* parameter when the transmit ring is full or has been filled up.
+ */
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+extern uint16_t rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+#else
+static inline uint16_t
+rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct rte_eth_dev *dev;
+
+ dev = &rte_eth_devices[port_id];
+ return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
+}
+#endif
+
+/**
+ * Setup a new signature filter rule on an Ethernet device
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param fdir_filter
+ * The pointer to the fdir filter structure describing the signature filter
+ * rule.
+ * The *rte_fdir_filter* structure includes the values of the different fields
+ * to match: source and destination IP addresses, vlan id, flexbytes, source
+ * and destination ports, and so on.
+ * @param rx_queue
+ * The index of the RX queue where to store RX packets matching the added
+ * signature filter defined in fdir_filter.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support flow director mode.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-ENOSYS) if the FDIR mode is not configured in signature mode
+ * on *port_id*.
+ * - (-EINVAL) if the fdir_filter information is not correct.
+ */
+int rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
+ struct rte_fdir_filter *fdir_filter,
+ uint8_t rx_queue);
+
+/**
+ * Update a signature filter rule on an Ethernet device.
+ * If the rule doesn't exits, it is created.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param fdir_ftr
+ * The pointer to the structure describing the signature filter rule.
+ * The *rte_fdir_filter* structure includes the values of the different fields
+ * to match: source and destination IP addresses, vlan id, flexbytes, source
+ * and destination ports, and so on.
+ * @param rx_queue
+ * The index of the RX queue where to store RX packets matching the added
+ * signature filter defined in fdir_ftr.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support flow director mode.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-ENOSYS) if the flow director mode is not configured in signature mode
+ * on *port_id*.
+ * - (-EINVAL) if the fdir_filter information is not correct.
+ */
+int rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
+ struct rte_fdir_filter *fdir_ftr,
+ uint8_t rx_queue);
+
+/**
+ * Remove a signature filter rule on an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param fdir_ftr
+ * The pointer to the structure describing the signature filter rule.
+ * The *rte_fdir_filter* structure includes the values of the different fields
+ * to match: source and destination IP addresses, vlan id, flexbytes, source
+ * and destination ports, and so on.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support flow director mode.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-ENOSYS) if the flow director mode is not configured in signature mode
+ * on *port_id*.
+ * - (-EINVAL) if the fdir_filter information is not correct.
+ */
+int rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
+ struct rte_fdir_filter *fdir_ftr);
+
+/**
+ * Retrieve the flow director information of an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param fdir
+ * A pointer to a structure of type *rte_eth_dev_fdir* to be filled with
+ * the flow director information of the Ethernet device.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support flow director mode.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-ENOSYS) if the flow director mode is not configured on *port_id*.
+ */
+int rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir);
+
+/**
+ * Add a new perfect filter rule on an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param fdir_filter
+ * The pointer to the structure describing the perfect filter rule.
+ * The *rte_fdir_filter* structure includes the values of the different fields
+ * to match: source and destination IP addresses, vlan id, flexbytes, source
+ * and destination ports, and so on.
+ * IPv6 are not supported.
+ * @param soft_id
+ * The 16-bit value supplied in the field hash.fdir.id of mbuf for RX
+ * packets matching the perfect filter.
+ * @param rx_queue
+ * The index of the RX queue where to store RX packets matching the added
+ * perfect filter defined in fdir_filter.
+ * @param drop
+ * If drop is set to 1, matching RX packets are stored into the RX drop
+ * queue defined in the rte_fdir_conf.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support flow director mode.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-ENOSYS) if the flow director mode is not configured in perfect mode
+ * on *port_id*.
+ * - (-EINVAL) if the fdir_filter information is not correct.
+ */
+int rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
+ struct rte_fdir_filter *fdir_filter,
+ uint16_t soft_id, uint8_t rx_queue,
+ uint8_t drop);
+
+/**
+ * Update a perfect filter rule on an Ethernet device.
+ * If the rule doesn't exits, it is created.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param fdir_filter
+ * The pointer to the structure describing the perfect filter rule.
+ * The *rte_fdir_filter* structure includes the values of the different fields
+ * to match: source and destination IP addresses, vlan id, flexbytes, source
+ * and destination ports, and so on.
+ * IPv6 are not supported.
+ * @param soft_id
+ * The 16-bit value supplied in the field hash.fdir.id of mbuf for RX
+ * packets matching the perfect filter.
+ * @param rx_queue
+ * The index of the RX queue where to store RX packets matching the added
+ * perfect filter defined in fdir_filter.
+ * @param drop
+ * If drop is set to 1, matching RX packets are stored into the RX drop
+ * queue defined in the rte_fdir_conf.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support flow director mode.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-ENOSYS) if the flow director mode is not configured in perfect mode
+ * on *port_id*.
+ * - (-EINVAL) if the fdir_filter information is not correct.
+ */
+int rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
+ struct rte_fdir_filter *fdir_filter,
+ uint16_t soft_id, uint8_t rx_queue,
+ uint8_t drop);
+
+/**
+ * Remove a perfect filter rule on an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param fdir_filter
+ * The pointer to the structure describing the perfect filter rule.
+ * The *rte_fdir_filter* structure includes the values of the different fields
+ * to match: source and destination IP addresses, vlan id, flexbytes, source
+ * and destination ports, and so on.
+ * IPv6 are not supported.
+ * @param soft_id
+ * The soft_id value provided when adding/updating the removed filter.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support flow director mode.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-ENOSYS) if the flow director mode is not configured in perfect mode
+ * on *port_id*.
+ * - (-EINVAL) if the fdir_filter information is not correct.
+ */
+int rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
+ struct rte_fdir_filter *fdir_filter,
+ uint16_t soft_id);
+/**
+ * Configure globally the masks for flow director mode for an Ethernet device.
+ * For example, the device can match packets with only the first 24 bits of
+ * the IPv4 source address.
+ *
+ * The following fields can be masked: IPv4 addresses and L4 port numbers.
+ * The following fields can be either enabled or disabled completely for the
+ * matching functionality: VLAN ID tag; VLAN Priority + CFI bit; Flexible 2-byte
+ * tuple.
+ * IPv6 masks are not supported.
+ *
+ * All filters must comply with the masks previously configured.
+ * For example, with a mask equal to 255.255.255.0 for the source IPv4 address,
+ * all IPv4 filters must be created with a source IPv4 address that fits the
+ * "X.X.X.0" format.
+ *
+ * This function flushes all filters that have been previously added in
+ * the device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param fdir_mask
+ * The pointer to the fdir mask structure describing relevant headers fields
+ * and relevant bits to use when matching packets addresses and ports.
+ * IPv6 masks are not supported.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support flow director mode.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-ENOSYS) if the flow director mode is not configured in perfect
+ * mode on *port_id*.
+ * - (-EINVAL) if the fdir_filter information is not correct
+ */
+int rte_eth_dev_fdir_set_masks(uint8_t port_id,
+ struct rte_fdir_masks *fdir_mask);
+
+/**
+ * The eth device event type for interrupt, and maybe others in the future.
+ */
+enum rte_eth_event_type {
+ RTE_ETH_EVENT_UNKNOWN, /**< unknown event type */
+ RTE_ETH_EVENT_INTR_LSC, /**< lsc interrupt event */
+ RTE_ETH_EVENT_MAX /**< max value of this enum */
+};
+
+typedef void (*rte_eth_dev_cb_fn)(uint8_t port_id, \
+ enum rte_eth_event_type event, void *cb_arg);
+/**< user application callback to be registered for interrupts */
+
+
+
+/**
+ * Register a callback function for specific port id.
+ *
+ * @param port_id
+ * Port id.
+ * @param event
+ * Event interested.
+ * @param cb_fn
+ * User supplied callback function to be called.
+ * @param cb_arg
+ * Pointer to the parameters for the registered callback.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+int rte_eth_dev_callback_register(uint8_t port_id,
+ enum rte_eth_event_type event,
+ rte_eth_dev_cb_fn cb_fn, void *cb_arg);
+
+/**
+ * Unregister a callback function for specific port id.
+ *
+ * @param port_id
+ * Port id.
+ * @param event
+ * Event interested.
+ * @param cb_fn
+ * User supplied callback function to be called.
+ * @param cb_arg
+ * Pointer to the parameters for the registered callback. -1 means to
+ * remove all for the same callback address and same event.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+int rte_eth_dev_callback_unregister(uint8_t port_id,
+ enum rte_eth_event_type event,
+ rte_eth_dev_cb_fn cb_fn, void *cb_arg);
+
+/**
+ * @internal Executes all the user application registered callbacks for
+ * the specific device. It is for DPDK internal user only. User
+ * application should not call it directly.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ * @param event
+ * Eth device interrupt event type.
+ *
+ * @return
+ * void
+ */
+void _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
+ enum rte_eth_event_type event);
+
+/**
+ * Turn on the LED on the Ethernet device.
+ * This function turns on the LED on the Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if underlying hardware OR driver doesn't support
+ * that operation.
+ * - (-ENODEV) if *port_id* invalid.
+ */
+int rte_eth_led_on(uint8_t port_id);
+
+/**
+ * Turn off the LED on the Ethernet device.
+ * This function turns off the LED on the Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if underlying hardware OR driver doesn't support
+ * that operation.
+ * - (-ENODEV) if *port_id* invalid.
+ */
+int rte_eth_led_off(uint8_t port_id);
+
+/**
+ * Get current status of the Ethernet link flow control for Ethernet device
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param fc_conf
+ * The pointer to the structure where to store the flow control parameters.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support flow control.
+ * - (-ENODEV) if *port_id* invalid.
+ */
+int rte_eth_dev_flow_ctrl_get(uint8_t port_id,
+ struct rte_eth_fc_conf *fc_conf);
+
+/**
+ * Configure the Ethernet link flow control for Ethernet device
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param fc_conf
+ * The pointer to the structure of the flow control parameters.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support flow control mode.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if bad parameter
+ * - (-EIO) if flow control setup failure
+ */
+int rte_eth_dev_flow_ctrl_set(uint8_t port_id,
+ struct rte_eth_fc_conf *fc_conf);
+
+/**
+ * Configure the Ethernet priority flow control under DCB environment
+ * for Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param pfc_conf
+ * The pointer to the structure of the priority flow control parameters.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support priority flow control mode.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if bad parameter
+ * - (-EIO) if flow control setup failure
+ */
+int rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id,
+ struct rte_eth_pfc_conf *pfc_conf);
+
+/**
+ * Add a MAC address to an internal array of addresses used to enable whitelist
+ * filtering to accept packets only if the destination MAC address matches.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param mac_addr
+ * The MAC address to add.
+ * @param pool
+ * VMDq pool index to associate address with (if VMDq is enabled). If VMDq is
+ * not enabled, this should be set to 0.
+ * @return
+ * - (0) if successfully added or *mac_addr" was already added.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ * - (-ENODEV) if *port* is invalid.
+ * - (-ENOSPC) if no more MAC addresses can be added.
+ * - (-EINVAL) if MAC address is invalid.
+ */
+int rte_eth_dev_mac_addr_add(uint8_t port, struct ether_addr *mac_addr,
+ uint32_t pool);
+
+/**
+ * Remove a MAC address from the internal array of addresses.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param mac_addr
+ * MAC address to remove.
+ * @return
+ * - (0) if successful, or *mac_addr* didn't exist.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EADDRINUSE) if attempting to remove the default MAC address
+ */
+int rte_eth_dev_mac_addr_remove(uint8_t port, struct ether_addr *mac_addr);
+
+/**
+ * Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param reta_conf
+ * RETA to update.
+ * @param reta_size
+ * Redirection table size. The table size can be queried by
+ * rte_eth_dev_info_get().
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_eth_dev_rss_reta_update(uint8_t port,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+
+ /**
+ * Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param reta_conf
+ * RETA to query.
+ * @param reta_size
+ * Redirection table size. The table size can be queried by
+ * rte_eth_dev_info_get().
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_eth_dev_rss_reta_query(uint8_t port,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+
+ /**
+ * Updates unicast hash table for receiving packet with the given destination
+ * MAC address, and the packet is routed to all VFs for which the RX mode is
+ * accept packets that match the unicast hash table.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param addr
+ * Unicast MAC address.
+ * @param on
+ * 1 - Set an unicast hash bit for receiving packets with the MAC address.
+ * 0 - Clear an unicast hash bit.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_eth_dev_uc_hash_table_set(uint8_t port,struct ether_addr *addr,
+ uint8_t on);
+
+ /**
+ * Updates all unicast hash bitmaps for receiving packet with any Unicast
+ * Ethernet MAC addresses,the packet is routed to all VFs for which the RX
+ * mode is accept packets that match the unicast hash table.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param on
+ * 1 - Set all unicast hash bitmaps for receiving all the Ethernet
+ * MAC addresses
+ * 0 - Clear all unicast hash bitmaps
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_eth_dev_uc_all_hash_table_set(uint8_t port,uint8_t on);
+
+ /**
+ * Set RX L2 Filtering mode of a VF of an Ethernet device.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @param rx_mode
+ * The RX mode mask, which is one or more of accepting Untagged Packets,
+ * packets that match the PFUTA table, Broadcast and Multicast Promiscuous.
+ * ETH_VMDQ_ACCEPT_UNTAG,ETH_VMDQ_ACCEPT_HASH_UC,
+ * ETH_VMDQ_ACCEPT_BROADCAST and ETH_VMDQ_ACCEPT_MULTICAST will be used
+ * in rx_mode.
+ * @param on
+ * 1 - Enable a VF RX mode.
+ * 0 - Disable a VF RX mode.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_eth_dev_set_vf_rxmode(uint8_t port, uint16_t vf, uint16_t rx_mode,
+ uint8_t on);
+
+/**
+* Enable or disable a VF traffic transmit of the Ethernet device.
+*
+* @param port
+* The port identifier of the Ethernet device.
+* @param vf
+* VF id.
+* @param on
+* 1 - Enable a VF traffic transmit.
+* 0 - Disable a VF traffic transmit.
+* @return
+* - (0) if successful.
+* - (-ENODEV) if *port_id* invalid.
+* - (-ENOTSUP) if hardware doesn't support.
+* - (-EINVAL) if bad parameter.
+*/
+int
+rte_eth_dev_set_vf_tx(uint8_t port,uint16_t vf, uint8_t on);
+
+/**
+* Enable or disable a VF traffic receive of an Ethernet device.
+*
+* @param port
+* The port identifier of the Ethernet device.
+* @param vf
+* VF id.
+* @param on
+* 1 - Enable a VF traffic receive.
+* 0 - Disable a VF traffic receive.
+* @return
+* - (0) if successful.
+* - (-ENOTSUP) if hardware doesn't support.
+* - (-ENODEV) if *port_id* invalid.
+* - (-EINVAL) if bad parameter.
+*/
+int
+rte_eth_dev_set_vf_rx(uint8_t port,uint16_t vf, uint8_t on);
+
+/**
+* Enable/Disable hardware VF VLAN filtering by an Ethernet device of
+* received VLAN packets tagged with a given VLAN Tag Identifier.
+*
+* @param port id
+* The port identifier of the Ethernet device.
+* @param vlan_id
+* The VLAN Tag Identifier whose filtering must be enabled or disabled.
+* @param vf_mask
+* Bitmap listing which VFs participate in the VLAN filtering.
+* @param vlan_on
+* 1 - Enable VFs VLAN filtering.
+* 0 - Disable VFs VLAN filtering.
+* @return
+* - (0) if successful.
+* - (-ENOTSUP) if hardware doesn't support.
+* - (-ENODEV) if *port_id* invalid.
+* - (-EINVAL) if bad parameter.
+*/
+int
+rte_eth_dev_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id,
+ uint64_t vf_mask,
+ uint8_t vlan_on);
+
+/**
+ * Set a traffic mirroring rule on an Ethernet device
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param mirror_conf
+ * The pointer to the traffic mirroring structure describing the mirroring rule.
+ * The *rte_eth_vm_mirror_conf* structure includes the type of mirroring rule,
+ * destination pool and the value of rule if enable vlan or pool mirroring.
+ *
+ * @param rule_id
+ * The index of traffic mirroring rule, we support four separated rules.
+ * @param on
+ * 1 - Enable a mirroring rule.
+ * 0 - Disable a mirroring rule.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if the mr_conf information is not correct.
+ */
+int rte_eth_mirror_rule_set(uint8_t port_id,
+ struct rte_eth_vmdq_mirror_conf *mirror_conf,
+ uint8_t rule_id,
+ uint8_t on);
+
+/**
+ * Reset a traffic mirroring rule on an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param rule_id
+ * The index of traffic mirroring rule, we support four separated rules.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_eth_mirror_rule_reset(uint8_t port_id,
+ uint8_t rule_id);
+
+/**
+ * Set the rate limitation for a queue on an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param queue_idx
+ * The queue id.
+ * @param tx_rate
+ * The tx rate allocated from the total link speed for this queue.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
+ uint16_t tx_rate);
+
+/**
+ * Set the rate limitation for a vf on an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @param tx_rate
+ * The tx rate allocated from the total link speed for this VF id.
+ * @param q_msk
+ * The queue mask which need to set the rate.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk);
+
+/**
+ * Initialize bypass logic. This function needs to be called before
+ * executing any other bypass API.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_eth_dev_bypass_init(uint8_t port);
+
+/**
+ * Return bypass state.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param state
+ * The return bypass state.
+ * - (1) Normal mode
+ * - (2) Bypass mode
+ * - (3) Isolate mode
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_eth_dev_bypass_state_show(uint8_t port, uint32_t *state);
+
+/**
+ * Set bypass state
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param state
+ * The current bypass state.
+ * - (1) Normal mode
+ * - (2) Bypass mode
+ * - (3) Isolate mode
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_eth_dev_bypass_state_set(uint8_t port, uint32_t *new_state);
+
+/**
+ * Return bypass state when given event occurs.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param event
+ * The bypass event
+ * - (1) Main power on (power button is pushed)
+ * - (2) Auxiliary power on (power supply is being plugged)
+ * - (3) Main power off (system shutdown and power supply is left plugged in)
+ * - (4) Auxiliary power off (power supply is being unplugged)
+ * - (5) Display or set the watchdog timer
+ * @param state
+ * The bypass state when given event occurred.
+ * - (1) Normal mode
+ * - (2) Bypass mode
+ * - (3) Isolate mode
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_eth_dev_bypass_event_show(uint8_t port, uint32_t event, uint32_t *state);
+
+/**
+ * Set bypass state when given event occurs.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param event
+ * The bypass event
+ * - (1) Main power on (power button is pushed)
+ * - (2) Auxiliary power on (power supply is being plugged)
+ * - (3) Main power off (system shutdown and power supply is left plugged in)
+ * - (4) Auxiliary power off (power supply is being unplugged)
+ * - (5) Display or set the watchdog timer
+ * @param state
+ * The assigned state when given event occurs.
+ * - (1) Normal mode
+ * - (2) Bypass mode
+ * - (3) Isolate mode
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_eth_dev_bypass_event_store(uint8_t port, uint32_t event, uint32_t state);
+
+/**
+ * Set bypass watchdog timeout count.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param state
+ * The timeout to be set.
+ * - (0) 0 seconds (timer is off)
+ * - (1) 1.5 seconds
+ * - (2) 2 seconds
+ * - (3) 3 seconds
+ * - (4) 4 seconds
+ * - (5) 8 seconds
+ * - (6) 16 seconds
+ * - (7) 32 seconds
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_eth_dev_wd_timeout_store(uint8_t port, uint32_t timeout);
+
+/**
+ * Get bypass firmware version.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param ver
+ * The firmware version
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_eth_dev_bypass_ver_show(uint8_t port, uint32_t *ver);
+
+/**
+ * Return bypass watchdog timeout in seconds
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param wd_timeout
+ * The return watchdog timeout. "0" represents timer expired
+ * - (0) 0 seconds (timer is off)
+ * - (1) 1.5 seconds
+ * - (2) 2 seconds
+ * - (3) 3 seconds
+ * - (4) 4 seconds
+ * - (5) 8 seconds
+ * - (6) 16 seconds
+ * - (7) 32 seconds
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_eth_dev_bypass_wd_timeout_show(uint8_t port, uint32_t *wd_timeout);
+
+/**
+ * Reset bypass watchdog timer
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_eth_dev_bypass_wd_reset(uint8_t port);
+
+ /**
+ * Configuration of Receive Side Scaling hash computation of Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param rss_conf
+ * The new configuration to use for RSS hash computation on the port.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if port identifier is invalid.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_eth_dev_rss_hash_update(uint8_t port_id,
+ struct rte_eth_rss_conf *rss_conf);
+
+ /**
+ * Retrieve current configuration of Receive Side Scaling hash computation
+ * of Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param rss_conf
+ * Where to store the current RSS hash configuration of the Ethernet device.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if port identifier is invalid.
+ * - (-ENOTSUP) if hardware doesn't support RSS.
+ */
+int
+rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
+ struct rte_eth_rss_conf *rss_conf);
+
+ /**
+ * Add UDP tunneling port of an Ethernet device for filtering a specific
+ * tunneling packet by UDP port number.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param tunnel_udp
+ * UDP tunneling configuration.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if port identifier is invalid.
+ * - (-ENOTSUP) if hardware doesn't support tunnel type.
+ */
+int
+rte_eth_dev_udp_tunnel_add(uint8_t port_id,
+ struct rte_eth_udp_tunnel *tunnel_udp);
+
+ /**
+ * Detete UDP tunneling port configuration of Ethernet device
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param tunnel_udp
+ * UDP tunneling configuration.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if port identifier is invalid.
+ * - (-ENOTSUP) if hardware doesn't support tunnel type.
+ */
+int
+rte_eth_dev_udp_tunnel_delete(uint8_t port_id,
+ struct rte_eth_udp_tunnel *tunnel_udp);
+
+/**
+ * add syn filter
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param rx_queue
+ * The index of RX queue where to store RX packets matching the syn filter.
+ * @param filter
+ * The pointer to the structure describing the syn filter rule.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_eth_dev_add_syn_filter(uint8_t port_id,
+ struct rte_syn_filter *filter, uint16_t rx_queue);
+
+/**
+ * remove syn filter
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_eth_dev_remove_syn_filter(uint8_t port_id);
+
+/**
+ * get syn filter
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param filter
+ * The pointer to the structure describing the syn filter.
+ * @param rx_queue
+ * A pointer to get the queue index of syn filter.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_eth_dev_get_syn_filter(uint8_t port_id,
+ struct rte_syn_filter *filter, uint16_t *rx_queue);
+
+/**
+ * Add a new ethertype filter rule on an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param index
+ * The identifier of ethertype filter.
+ * @param filter
+ * The pointer to the structure describing the ethertype filter rule.
+ * The *rte_ethertype_filter* structure includes the values of the different
+ * fields to match: ethertype and priority in vlan tag.
+ * priority in vlan tag is not supported for E1000 dev.
+ * @param rx_queue
+ * The index of the RX queue where to store RX packets matching the added
+ * ethertype filter.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support ethertype filter.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if the filter information is not correct.
+ */
+int rte_eth_dev_add_ethertype_filter(uint8_t port_id, uint16_t index,
+ struct rte_ethertype_filter *filter, uint16_t rx_queue);
+
+/**
+ * remove an ethertype filter rule on an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param index
+ * The identifier of ethertype filter.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support ethertype filter.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if the filter information is not correct.
+ */
+int rte_eth_dev_remove_ethertype_filter(uint8_t port_id,
+ uint16_t index);
+
+/**
+ * Get an ethertype filter rule on an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param index
+ * The identifier of ethertype filter.
+ * @param filter
+ * A pointer to a structure of type *rte_ethertype_filter* to be filled with
+ * the information of the Ethertype filter.
+ * @param rx_queue
+ * A pointer to get the queue index.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support ethertype filter.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if the filter information is not correct.
+ * - (-ENOENT) if no enabled filter in this index.
+ */
+int rte_eth_dev_get_ethertype_filter(uint8_t port_id, uint16_t index,
+ struct rte_ethertype_filter *filter, uint16_t *rx_queue);
+
+/**
+ * Add a new 2tuple filter rule on an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param index
+ * The identifier of 2tuple filter.
+ * @param filter
+ * The pointer to the structure describing the 2tuple filter rule.
+ * The *rte_2tuple_filter* structure includes the values of the different
+ * fields to match: protocol, dst_port and
+ * tcp_flags if the protocol is tcp type.
+ * @param rx_queue
+ * The index of the RX queue where to store RX packets matching the added
+ * 2tuple filter.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support 2tuple filter.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if the filter information is not correct.
+ */
+int rte_eth_dev_add_2tuple_filter(uint8_t port_id, uint16_t index,
+ struct rte_2tuple_filter *filter, uint16_t rx_queue);
+
+/**
+ * remove a 2tuple filter rule on an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param index
+ * The identifier of 2tuple filter.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support 2tuple filter.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if the filter information is not correct.
+ */
+int rte_eth_dev_remove_2tuple_filter(uint8_t port_id, uint16_t index);
+
+/**
+ * Get an 2tuple filter rule on an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param index
+ * The identifier of 2tuple filter.
+ * @param filter
+ * A pointer to a structure of type *rte_2tuple_filter* to be filled with
+ * the information of the 2tuple filter.
+ * @param rx_queue
+ * A pointer to get the queue index.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support 2tuple filter.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if the filter information is not correct.
+ * - (-ENOENT) if no enabled filter in this index.
+ */
+int rte_eth_dev_get_2tuple_filter(uint8_t port_id, uint16_t index,
+ struct rte_2tuple_filter *filter, uint16_t *rx_queue);
+
+/**
+ * Add a new 5tuple filter rule on an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param index
+ * The identifier of 5tuple filter.
+ * @param filter
+ * The pointer to the structure describing the 5tuple filter rule.
+ * The *rte_5tuple_filter* structure includes the values of the different
+ * fields to match: dst src IP, dst src port, protocol and relative masks
+ * @param rx_queue
+ * The index of the RX queue where to store RX packets matching the added
+ * 5tuple filter.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support 5tuple filter.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if the filter information is not correct.
+ */
+int rte_eth_dev_add_5tuple_filter(uint8_t port_id, uint16_t index,
+ struct rte_5tuple_filter *filter, uint16_t rx_queue);
+
+/**
+ * remove a 5tuple filter rule on an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param index
+ * The identifier of 5tuple filter.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support 5tuple filter.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if the filter information is not correct.
+ */
+int rte_eth_dev_remove_5tuple_filter(uint8_t port_id, uint16_t index);
+
+/**
+ * Get an 5tuple filter rule on an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param index
+ * The identifier of 5tuple filter.
+ * @param filter
+ * A pointer to a structure of type *rte_5tuple_filter* to be filled with
+ * the information of the 5tuple filter.
+ * @param rx_queue
+ * A pointer to get the queue index.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support 5tuple filter.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if the filter information is not correct.
+ */
+int rte_eth_dev_get_5tuple_filter(uint8_t port_id, uint16_t index,
+ struct rte_5tuple_filter *filter, uint16_t *rx_queue);
+
+/**
+ * Add a new flex filter rule on an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param index
+ * The identifier of flex filter.
+ * @param filter
+ * The pointer to the structure describing the flex filter rule.
+ * The *rte_flex_filter* structure includes the values of the different fields
+ * to match: the dwords (first len bytes of packet ) and relative masks.
+ * @param rx_queue
+ * The index of the RX queue where to store RX packets matching the added
+ * flex filter.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support flex filter.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if the filter information is not correct.
+ * - (-ENOENT) if no enabled filter in this index.
+ */
+int rte_eth_dev_add_flex_filter(uint8_t port_id, uint16_t index,
+ struct rte_flex_filter *filter, uint16_t rx_queue);
+
+/**
+ * remove a flex filter rule on an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param index
+ * The identifier of flex filter.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support flex filter.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if the filter information is not correct.
+ */
+int rte_eth_dev_remove_flex_filter(uint8_t port_id, uint16_t index);
+
+/**
+ * Get an flex filter rule on an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param index
+ * The identifier of flex filter.
+ * @param filter
+ * A pointer to a structure of type *rte_flex_filter* to be filled with
+ * the information of the flex filter.
+ * @param rx_queue
+ * A pointer to get the queue index.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support flex filter.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if the filter information is not correct.
+ * - (-ENOENT) if no enabled filter in this index.
+ */
+int rte_eth_dev_get_flex_filter(uint8_t port_id, uint16_t index,
+ struct rte_flex_filter *filter, uint16_t *rx_queue);
+
+/**
+ * Check whether the filter type is supported on an Ethernet device.
+ * All the supported filter types are defined in 'rte_eth_ctrl.h'.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param filter_type
+ * Filter type.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support this filter type.
+ * - (-ENODEV) if *port_id* invalid.
+ */
+int rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type);
+
+/**
+ * Take operations to assigned filter type on an Ethernet device.
+ * All the supported operations and filter types are defined in 'rte_eth_ctrl.h'.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param filter_type
+ * Filter type.
+ * @param filter_op
+ * Type of operation.
+ * @param arg
+ * A pointer to arguments defined specifically for the operation.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port_id* invalid.
+ * - others depends on the specific operations implementation.
+ */
+int rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op, void *arg);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_ETHDEV_H_ */
diff --git a/src/dpdk_lib18/librte_ether/rte_ether.h b/src/dpdk_lib18/librte_ether/rte_ether.h
new file mode 100755
index 00000000..7e7d22cc
--- /dev/null
+++ b/src/dpdk_lib18/librte_ether/rte_ether.h
@@ -0,0 +1,340 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ETHER_H_
+#define _RTE_ETHER_H_
+
+/**
+ * @file
+ *
+ * Ethernet Helpers in RTE
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <stdio.h>
+
+#include <rte_memcpy.h>
+#include <rte_random.h>
+
+#define ETHER_ADDR_LEN 6 /**< Length of Ethernet address. */
+#define ETHER_TYPE_LEN 2 /**< Length of Ethernet type field. */
+#define ETHER_CRC_LEN 4 /**< Length of Ethernet CRC. */
+#define ETHER_HDR_LEN \
+ (ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN) /**< Length of Ethernet header. */
+#define ETHER_MIN_LEN 64 /**< Minimum frame len, including CRC. */
+#define ETHER_MAX_LEN 1518 /**< Maximum frame len, including CRC. */
+#define ETHER_MTU \
+ (ETHER_MAX_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN) /**< Ethernet MTU. */
+
+#define ETHER_MAX_VLAN_FRAME_LEN \
+ (ETHER_MAX_LEN + 4) /**< Maximum VLAN frame length, including CRC. */
+
+#define ETHER_MAX_JUMBO_FRAME_LEN \
+ 0x3F00 /**< Maximum Jumbo frame length, including CRC. */
+
+#define ETHER_MAX_VLAN_ID 4095 /**< Maximum VLAN ID. */
+
+#define ETHER_MIN_MTU 68 /**< Minimum MTU for IPv4 packets, see RFC 791. */
+
+/**
+ * Ethernet address:
+ * A universally administered address is uniquely assigned to a device by its
+ * manufacturer. The first three octets (in transmission order) contain the
+ * Organizationally Unique Identifier (OUI). The following three (MAC-48 and
+ * EUI-48) octets are assigned by that organization with the only constraint
+ * of uniqueness.
+ * A locally administered address is assigned to a device by a network
+ * administrator and does not contain OUIs.
+ * See http://standards.ieee.org/regauth/groupmac/tutorial.html
+ */
+struct ether_addr {
+ uint8_t addr_bytes[ETHER_ADDR_LEN]; /**< Address bytes in transmission order */
+} __attribute__((__packed__));
+
+#define ETHER_LOCAL_ADMIN_ADDR 0x02 /**< Locally assigned Eth. address. */
+#define ETHER_GROUP_ADDR 0x01 /**< Multicast or broadcast Eth. address. */
+
+/**
+ * Check if two Ethernet addresses are the same.
+ *
+ * @param ea1
+ * A pointer to the first ether_addr structure containing
+ * the ethernet address.
+ * @param ea2
+ * A pointer to the second ether_addr structure containing
+ * the ethernet address.
+ *
+ * @return
+ * True (1) if the given two ethernet address are the same;
+ * False (0) otherwise.
+ */
+static inline int is_same_ether_addr(const struct ether_addr *ea1,
+ const struct ether_addr *ea2)
+{
+ int i;
+ for (i = 0; i < ETHER_ADDR_LEN; i++)
+ if (ea1->addr_bytes[i] != ea2->addr_bytes[i])
+ return 0;
+ return 1;
+}
+
+/**
+ * Check if an Ethernet address is filled with zeros.
+ *
+ * @param ea
+ * A pointer to a ether_addr structure containing the ethernet address
+ * to check.
+ * @return
+ * True (1) if the given ethernet address is filled with zeros;
+ * false (0) otherwise.
+ */
+static inline int is_zero_ether_addr(const struct ether_addr *ea)
+{
+ int i;
+ for (i = 0; i < ETHER_ADDR_LEN; i++)
+ if (ea->addr_bytes[i] != 0x00)
+ return 0;
+ return 1;
+}
+
+/**
+ * Check if an Ethernet address is a unicast address.
+ *
+ * @param ea
+ * A pointer to a ether_addr structure containing the ethernet address
+ * to check.
+ * @return
+ * True (1) if the given ethernet address is a unicast address;
+ * false (0) otherwise.
+ */
+static inline int is_unicast_ether_addr(const struct ether_addr *ea)
+{
+ return ((ea->addr_bytes[0] & ETHER_GROUP_ADDR) == 0);
+}
+
+/**
+ * Check if an Ethernet address is a multicast address.
+ *
+ * @param ea
+ * A pointer to a ether_addr structure containing the ethernet address
+ * to check.
+ * @return
+ * True (1) if the given ethernet address is a multicast address;
+ * false (0) otherwise.
+ */
+static inline int is_multicast_ether_addr(const struct ether_addr *ea)
+{
+ return (ea->addr_bytes[0] & ETHER_GROUP_ADDR);
+}
+
+/**
+ * Check if an Ethernet address is a broadcast address.
+ *
+ * @param ea
+ * A pointer to a ether_addr structure containing the ethernet address
+ * to check.
+ * @return
+ * True (1) if the given ethernet address is a broadcast address;
+ * false (0) otherwise.
+ */
+static inline int is_broadcast_ether_addr(const struct ether_addr *ea)
+{
+ const uint16_t *ea_words = (const uint16_t *)ea;
+
+ return (ea_words[0] == 0xFFFF && ea_words[1] == 0xFFFF &&
+ ea_words[2] == 0xFFFF);
+}
+
+/**
+ * Check if an Ethernet address is a universally assigned address.
+ *
+ * @param ea
+ * A pointer to a ether_addr structure containing the ethernet address
+ * to check.
+ * @return
+ * True (1) if the given ethernet address is a universally assigned address;
+ * false (0) otherwise.
+ */
+static inline int is_universal_ether_addr(const struct ether_addr *ea)
+{
+ return ((ea->addr_bytes[0] & ETHER_LOCAL_ADMIN_ADDR) == 0);
+}
+
+/**
+ * Check if an Ethernet address is a locally assigned address.
+ *
+ * @param ea
+ * A pointer to a ether_addr structure containing the ethernet address
+ * to check.
+ * @return
+ * True (1) if the given ethernet address is a locally assigned address;
+ * false (0) otherwise.
+ */
+static inline int is_local_admin_ether_addr(const struct ether_addr *ea)
+{
+ return ((ea->addr_bytes[0] & ETHER_LOCAL_ADMIN_ADDR) != 0);
+}
+
+/**
+ * Check if an Ethernet address is a valid address. Checks that the address is a
+ * unicast address and is not filled with zeros.
+ *
+ * @param ea
+ * A pointer to a ether_addr structure containing the ethernet address
+ * to check.
+ * @return
+ * True (1) if the given ethernet address is valid;
+ * false (0) otherwise.
+ */
+static inline int is_valid_assigned_ether_addr(const struct ether_addr *ea)
+{
+ return (is_unicast_ether_addr(ea) && (! is_zero_ether_addr(ea)));
+}
+
+/**
+ * Generate a random Ethernet address that is locally administered
+ * and not multicast.
+ * @param addr
+ * A pointer to Ethernet address.
+ */
+static inline void eth_random_addr(uint8_t *addr)
+{
+ uint64_t rand = rte_rand();
+ uint8_t *p = (uint8_t*)&rand;
+
+ rte_memcpy(addr, p, ETHER_ADDR_LEN);
+ addr[0] &= ~ETHER_GROUP_ADDR; /* clear multicast bit */
+ addr[0] |= ETHER_LOCAL_ADMIN_ADDR; /* set local assignment bit */
+}
+
+/**
+ * Fast copy an Ethernet address.
+ *
+ * @param ea_from
+ * A pointer to a ether_addr structure holding the Ethernet address to copy.
+ * @param ea_to
+ * A pointer to a ether_addr structure where to copy the Ethernet address.
+ */
+static inline void ether_addr_copy(const struct ether_addr *ea_from,
+ struct ether_addr *ea_to)
+{
+#ifdef __INTEL_COMPILER
+ uint16_t *from_words = (uint16_t *)(ea_from->addr_bytes);
+ uint16_t *to_words = (uint16_t *)(ea_to->addr_bytes);
+
+ to_words[0] = from_words[0];
+ to_words[1] = from_words[1];
+ to_words[2] = from_words[2];
+#else
+ /*
+ * Use the common way, because of a strange gcc warning.
+ */
+ *ea_to = *ea_from;
+#endif
+}
+
+#define ETHER_ADDR_FMT_SIZE 18
+/**
+ * Format 48bits Ethernet address in pattern xx:xx:xx:xx:xx:xx.
+ *
+ * @param buf
+ * A pointer to buffer contains the formatted MAC address.
+ * @param size
+ * The format buffer size.
+ * @param ea_to
+ * A pointer to a ether_addr structure.
+ */
+static inline void
+ether_format_addr(char *buf, uint16_t size,
+ const struct ether_addr *eth_addr)
+{
+ snprintf(buf, size, "%02X:%02X:%02X:%02X:%02X:%02X",
+ eth_addr->addr_bytes[0],
+ eth_addr->addr_bytes[1],
+ eth_addr->addr_bytes[2],
+ eth_addr->addr_bytes[3],
+ eth_addr->addr_bytes[4],
+ eth_addr->addr_bytes[5]);
+}
+
+/**
+ * Ethernet header: Contains the destination address, source address
+ * and frame type.
+ */
+struct ether_hdr {
+ struct ether_addr d_addr; /**< Destination address. */
+ struct ether_addr s_addr; /**< Source address. */
+ uint16_t ether_type; /**< Frame type. */
+} __attribute__((__packed__));
+
+/**
+ * Ethernet VLAN Header.
+ * Contains the 16-bit VLAN Tag Control Identifier and the Ethernet type
+ * of the encapsulated frame.
+ */
+struct vlan_hdr {
+ uint16_t vlan_tci; /**< Priority (3) + CFI (1) + Identifier Code (12) */
+ uint16_t eth_proto;/**< Ethernet type of encapsulated frame. */
+} __attribute__((__packed__));
+
+/**
+ * VXLAN protocol header.
+ * Contains the 8-bit flag, 24-bit VXLAN Network Identifier and
+ * Reserved fields (24 bits and 8 bits)
+ */
+struct vxlan_hdr {
+ uint32_t vx_flags; /**< flag (8) + Reserved (24). */
+ uint32_t vx_vni; /**< VNI (24) + Reserved (8). */
+} __attribute__((__packed__));
+
+/* Ethernet frame types */
+#define ETHER_TYPE_IPv4 0x0800 /**< IPv4 Protocol. */
+#define ETHER_TYPE_IPv6 0x86DD /**< IPv6 Protocol. */
+#define ETHER_TYPE_ARP 0x0806 /**< Arp Protocol. */
+#define ETHER_TYPE_RARP 0x8035 /**< Reverse Arp Protocol. */
+#define ETHER_TYPE_VLAN 0x8100 /**< IEEE 802.1Q VLAN tagging. */
+#define ETHER_TYPE_1588 0x88F7 /**< IEEE 802.1AS 1588 Precise Time Protocol. */
+#define ETHER_TYPE_SLOW 0x8809 /**< Slow protocols (LACP and Marker). */
+
+#define ETHER_VXLAN_HLEN (sizeof(struct udp_hdr) + sizeof(struct vxlan_hdr))
+/**< VXLAN tunnel header length. */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_ETHER_H_ */
diff --git a/src/dpdk_lib18/librte_hash/Makefile b/src/dpdk_lib18/librte_hash/Makefile
new file mode 100755
index 00000000..95e4c09c
--- /dev/null
+++ b/src/dpdk_lib18/librte_hash/Makefile
@@ -0,0 +1,53 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_hash.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_HASH) := rte_hash.c
+SRCS-$(CONFIG_RTE_LIBRTE_HASH) += rte_fbk_hash.c
+
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_HASH)-include := rte_hash.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_HASH)-include += rte_hash_crc.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_HASH)-include += rte_jhash.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_HASH)-include += rte_fbk_hash.h
+
+# this lib needs eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_HASH) += lib/librte_eal lib/librte_malloc
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_hash/rte_fbk_hash.c b/src/dpdk_lib18/librte_hash/rte_fbk_hash.c
new file mode 100755
index 00000000..421e1cdd
--- /dev/null
+++ b/src/dpdk_lib18/librte_hash/rte_fbk_hash.c
@@ -0,0 +1,240 @@
+/**
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <errno.h>
+
+#include <sys/queue.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_malloc.h>
+#include <rte_common.h>
+#include <rte_per_lcore.h>
+#include <rte_errno.h>
+#include <rte_string_fns.h>
+#include <rte_cpuflags.h>
+#include <rte_log.h>
+#include <rte_spinlock.h>
+
+#include "rte_fbk_hash.h"
+
+TAILQ_HEAD(rte_fbk_hash_list, rte_tailq_entry);
+
+/**
+ * Performs a lookup for an existing hash table, and returns a pointer to
+ * the table if found.
+ *
+ * @param name
+ * Name of the hash table to find
+ *
+ * @return
+ * pointer to hash table structure or NULL on error.
+ */
+struct rte_fbk_hash_table *
+rte_fbk_hash_find_existing(const char *name)
+{
+ struct rte_fbk_hash_table *h = NULL;
+ struct rte_tailq_entry *te;
+ struct rte_fbk_hash_list *fbk_hash_list;
+
+ /* check that we have an initialised tail queue */
+ if ((fbk_hash_list =
+ RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_FBK_HASH,
+ rte_fbk_hash_list)) == NULL) {
+ rte_errno = E_RTE_NO_TAILQ;
+ return NULL;
+ }
+
+ rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
+ TAILQ_FOREACH(te, fbk_hash_list, next) {
+ h = (struct rte_fbk_hash_table *) te->data;
+ if (strncmp(name, h->name, RTE_FBK_HASH_NAMESIZE) == 0)
+ break;
+ }
+ rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
+ if (te == NULL) {
+ rte_errno = ENOENT;
+ return NULL;
+ }
+ return h;
+}
+
+/**
+ * Create a new hash table for use with four byte keys.
+ *
+ * @param params
+ * Parameters used in creation of hash table.
+ *
+ * @return
+ * Pointer to hash table structure that is used in future hash table
+ * operations, or NULL on error.
+ */
+struct rte_fbk_hash_table *
+rte_fbk_hash_create(const struct rte_fbk_hash_params *params)
+{
+ struct rte_fbk_hash_table *ht = NULL;
+ struct rte_tailq_entry *te;
+ char hash_name[RTE_FBK_HASH_NAMESIZE];
+ const uint32_t mem_size =
+ sizeof(*ht) + (sizeof(ht->t[0]) * params->entries);
+ uint32_t i;
+ struct rte_fbk_hash_list *fbk_hash_list;
+
+ /* check that we have an initialised tail queue */
+ if ((fbk_hash_list =
+ RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_FBK_HASH,
+ rte_fbk_hash_list)) == NULL) {
+ rte_errno = E_RTE_NO_TAILQ;
+ return NULL;
+ }
+
+ /* Error checking of parameters. */
+ if ((!rte_is_power_of_2(params->entries)) ||
+ (!rte_is_power_of_2(params->entries_per_bucket)) ||
+ (params->entries == 0) ||
+ (params->entries_per_bucket == 0) ||
+ (params->entries_per_bucket > params->entries) ||
+ (params->entries > RTE_FBK_HASH_ENTRIES_MAX) ||
+ (params->entries_per_bucket > RTE_FBK_HASH_ENTRIES_PER_BUCKET_MAX)){
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ snprintf(hash_name, sizeof(hash_name), "FBK_%s", params->name);
+
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ /* guarantee there's no existing */
+ TAILQ_FOREACH(te, fbk_hash_list, next) {
+ ht = (struct rte_fbk_hash_table *) te->data;
+ if (strncmp(params->name, ht->name, RTE_FBK_HASH_NAMESIZE) == 0)
+ break;
+ }
+ if (te != NULL)
+ goto exit;
+
+ te = rte_zmalloc("FBK_HASH_TAILQ_ENTRY", sizeof(*te), 0);
+ if (te == NULL) {
+ RTE_LOG(ERR, HASH, "Failed to allocate tailq entry\n");
+ goto exit;
+ }
+
+ /* Allocate memory for table. */
+ ht = (struct rte_fbk_hash_table *)rte_zmalloc_socket(hash_name, mem_size,
+ 0, params->socket_id);
+ if (ht == NULL) {
+ RTE_LOG(ERR, HASH, "Failed to allocate fbk hash table\n");
+ rte_free(te);
+ goto exit;
+ }
+
+ /* Set up hash table context. */
+ snprintf(ht->name, sizeof(ht->name), "%s", params->name);
+ ht->entries = params->entries;
+ ht->entries_per_bucket = params->entries_per_bucket;
+ ht->used_entries = 0;
+ ht->bucket_mask = (params->entries / params->entries_per_bucket) - 1;
+ for (ht->bucket_shift = 0, i = 1;
+ (params->entries_per_bucket & i) == 0;
+ ht->bucket_shift++, i <<= 1)
+ ; /* empty loop body */
+
+ if (params->hash_func != NULL) {
+ ht->hash_func = params->hash_func;
+ ht->init_val = params->init_val;
+ }
+ else {
+ ht->hash_func = RTE_FBK_HASH_FUNC_DEFAULT;
+ ht->init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT;
+ }
+
+ te->data = (void *) ht;
+
+ TAILQ_INSERT_TAIL(fbk_hash_list, te, next);
+
+exit:
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ return ht;
+}
+
+/**
+ * Free all memory used by a hash table.
+ *
+ * @param ht
+ * Hash table to deallocate.
+ */
+void
+rte_fbk_hash_free(struct rte_fbk_hash_table *ht)
+{
+ struct rte_tailq_entry *te;
+ struct rte_fbk_hash_list *fbk_hash_list;
+
+ if (ht == NULL)
+ return;
+
+ /* check that we have an initialised tail queue */
+ if ((fbk_hash_list =
+ RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_FBK_HASH,
+ rte_fbk_hash_list)) == NULL) {
+ rte_errno = E_RTE_NO_TAILQ;
+ return;
+ }
+
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ /* find out tailq entry */
+ TAILQ_FOREACH(te, fbk_hash_list, next) {
+ if (te->data == (void *) ht)
+ break;
+ }
+
+ if (te == NULL) {
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ return;
+ }
+
+ TAILQ_REMOVE(fbk_hash_list, te, next);
+
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ rte_free(ht);
+ rte_free(te);
+}
+
diff --git a/src/dpdk_lib18/librte_hash/rte_fbk_hash.h b/src/dpdk_lib18/librte_hash/rte_fbk_hash.h
new file mode 100755
index 00000000..3d229bf9
--- /dev/null
+++ b/src/dpdk_lib18/librte_hash/rte_fbk_hash.h
@@ -0,0 +1,397 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_FBK_HASH_H_
+#define _RTE_FBK_HASH_H_
+
+/**
+ * @file
+ *
+ * This is a hash table implementation for four byte keys (fbk).
+ *
+ * Note that the return value of the add function should always be checked as,
+ * if a bucket is full, the key is not added even if there is space in other
+ * buckets. This keeps the lookup function very simple and therefore fast.
+ */
+
+#include <stdint.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <string.h>
+
+#ifndef RTE_FBK_HASH_FUNC_DEFAULT
+#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
+#include <rte_hash_crc.h>
+/** Default four-byte key hash function if none is specified. */
+#define RTE_FBK_HASH_FUNC_DEFAULT rte_hash_crc_4byte
+#else
+#include <rte_jhash.h>
+#define RTE_FBK_HASH_FUNC_DEFAULT rte_jhash_1word
+#endif
+#endif
+
+#ifndef RTE_FBK_HASH_INIT_VAL_DEFAULT
+/** Initialising value used when calculating hash. */
+#define RTE_FBK_HASH_INIT_VAL_DEFAULT 0xFFFFFFFF
+#endif
+
+/** The maximum number of entries in the hash table that is supported. */
+#define RTE_FBK_HASH_ENTRIES_MAX (1 << 20)
+
+/** The maximum number of entries in each bucket that is supported. */
+#define RTE_FBK_HASH_ENTRIES_PER_BUCKET_MAX 256
+
+/** Maximum size of string for naming the hash. */
+#define RTE_FBK_HASH_NAMESIZE 32
+
+/** Type of function that can be used for calculating the hash value. */
+typedef uint32_t (*rte_fbk_hash_fn)(uint32_t key, uint32_t init_val);
+
+/** Parameters used when creating four-byte key hash table. */
+struct rte_fbk_hash_params {
+ const char *name; /**< Name of the hash table. */
+ uint32_t entries; /**< Total number of entries. */
+ uint32_t entries_per_bucket; /**< Number of entries in a bucket. */
+ int socket_id; /**< Socket to allocate memory on. */
+ rte_fbk_hash_fn hash_func; /**< The hash function. */
+ uint32_t init_val; /**< For initialising hash function. */
+};
+
+/** Individual entry in the four-byte key hash table. */
+union rte_fbk_hash_entry {
+ uint64_t whole_entry; /**< For accessing entire entry. */
+ struct {
+ uint16_t is_entry; /**< Non-zero if entry is active. */
+ uint16_t value; /**< Value returned by lookup. */
+ uint32_t key; /**< Key used to find value. */
+ } entry; /**< For accessing each entry part. */
+};
+
+
+/** The four-byte key hash table structure. */
+struct rte_fbk_hash_table {
+ char name[RTE_FBK_HASH_NAMESIZE]; /**< Name of the hash. */
+ uint32_t entries; /**< Total number of entries. */
+ uint32_t entries_per_bucket; /**< Number of entries in a bucket. */
+ uint32_t used_entries; /**< How many entries are used. */
+ uint32_t bucket_mask; /**< To find which bucket the key is in. */
+ uint32_t bucket_shift; /**< Convert bucket to table offset. */
+ rte_fbk_hash_fn hash_func; /**< The hash function. */
+ uint32_t init_val; /**< For initialising hash function. */
+
+ /** A flat table of all buckets. */
+ union rte_fbk_hash_entry t[0];
+};
+
+/**
+ * Find the offset into hash table of the bucket containing a particular key.
+ *
+ * @param ht
+ * Pointer to hash table.
+ * @param key
+ * Key to calculate bucket for.
+ * @return
+ * Offset into hash table.
+ */
+static inline uint32_t
+rte_fbk_hash_get_bucket(const struct rte_fbk_hash_table *ht, uint32_t key)
+{
+ return (ht->hash_func(key, ht->init_val) & ht->bucket_mask) <<
+ ht->bucket_shift;
+}
+
+/**
+ * Add a key to an existing hash table with bucket id.
+ * This operation is not multi-thread safe
+ * and should only be called from one thread.
+ *
+ * @param ht
+ * Hash table to add the key to.
+ * @param key
+ * Key to add to the hash table.
+ * @param value
+ * Value to associate with key.
+ * @param bucket
+ * Bucket to associate with key.
+ * @return
+ * 0 if ok, or negative value on error.
+ */
+static inline int
+rte_fbk_hash_add_key_with_bucket(struct rte_fbk_hash_table *ht,
+ uint32_t key, uint16_t value, uint32_t bucket)
+{
+ /*
+ * The writing of a new value to the hash table is done as a single
+ * 64bit operation. This should help prevent individual entries being
+ * corrupted due to race conditions, but it's still possible to
+ * overwrite entries that have just been made valid.
+ */
+ const uint64_t new_entry = ((uint64_t)(key) << 32) |
+ ((uint64_t)(value) << 16) |
+ 1; /* 1 = is_entry bit. */
+ uint32_t i;
+
+ for (i = 0; i < ht->entries_per_bucket; i++) {
+ /* Set entry if unused. */
+ if (! ht->t[bucket + i].entry.is_entry) {
+ ht->t[bucket + i].whole_entry = new_entry;
+ ht->used_entries++;
+ return 0;
+ }
+ /* Change value if key already exists. */
+ if (ht->t[bucket + i].entry.key == key) {
+ ht->t[bucket + i].entry.value = value;
+ return 0;
+ }
+ }
+
+ return -ENOSPC; /* No space in bucket. */
+}
+
+/**
+ * Add a key to an existing hash table. This operation is not multi-thread safe
+ * and should only be called from one thread.
+ *
+ * @param ht
+ * Hash table to add the key to.
+ * @param key
+ * Key to add to the hash table.
+ * @param value
+ * Value to associate with key.
+ * @return
+ * 0 if ok, or negative value on error.
+ */
+static inline int
+rte_fbk_hash_add_key(struct rte_fbk_hash_table *ht,
+ uint32_t key, uint16_t value)
+{
+ return rte_fbk_hash_add_key_with_bucket(ht,
+ key, value, rte_fbk_hash_get_bucket(ht, key));
+}
+
+/**
+ * Remove a key with a given bucket id from an existing hash table.
+ * This operation is not multi-thread
+ * safe and should only be called from one thread.
+ *
+ * @param ht
+ * Hash table to remove the key from.
+ * @param key
+ * Key to remove from the hash table.
+ * @param bucket
+ * Bucket id associate with key.
+ * @return
+ * 0 if ok, or negative value on error.
+ */
+static inline int
+rte_fbk_hash_delete_key_with_bucket(struct rte_fbk_hash_table *ht,
+ uint32_t key, uint32_t bucket)
+{
+ uint32_t last_entry = ht->entries_per_bucket - 1;
+ uint32_t i, j;
+
+ for (i = 0; i < ht->entries_per_bucket; i++) {
+ if (ht->t[bucket + i].entry.key == key) {
+ /* Find last key in bucket. */
+ for (j = ht->entries_per_bucket - 1; j > i; j-- ) {
+ if (! ht->t[bucket + j].entry.is_entry) {
+ last_entry = j - 1;
+ }
+ }
+ /*
+ * Move the last key to the deleted key's position, and
+ * delete the last key. lastEntry and i may be same but
+ * it doesn't matter.
+ */
+ ht->t[bucket + i].whole_entry =
+ ht->t[bucket + last_entry].whole_entry;
+ ht->t[bucket + last_entry].whole_entry = 0;
+
+ ht->used_entries--;
+ return 0;
+ }
+ }
+
+ return -ENOENT; /* Key didn't exist. */
+}
+
+/**
+ * Remove a key from an existing hash table. This operation is not multi-thread
+ * safe and should only be called from one thread.
+ *
+ * @param ht
+ * Hash table to remove the key from.
+ * @param key
+ * Key to remove from the hash table.
+ * @return
+ * 0 if ok, or negative value on error.
+ */
+static inline int
+rte_fbk_hash_delete_key(struct rte_fbk_hash_table *ht, uint32_t key)
+{
+ return rte_fbk_hash_delete_key_with_bucket(ht,
+ key, rte_fbk_hash_get_bucket(ht, key));
+}
+
+/**
+ * Find a key in the hash table with a given bucketid.
+ * This operation is multi-thread safe.
+ *
+ * @param ht
+ * Hash table to look in.
+ * @param key
+ * Key to find.
+ * @param bucket
+ * Bucket associate to the key.
+ * @return
+ * The value that was associated with the key, or negative value on error.
+ */
+static inline int
+rte_fbk_hash_lookup_with_bucket(const struct rte_fbk_hash_table *ht,
+ uint32_t key, uint32_t bucket)
+{
+ union rte_fbk_hash_entry current_entry;
+ uint32_t i;
+
+ for (i = 0; i < ht->entries_per_bucket; i++) {
+ /* Single read of entry, which should be atomic. */
+ current_entry.whole_entry = ht->t[bucket + i].whole_entry;
+ if (! current_entry.entry.is_entry) {
+ return -ENOENT; /* Error once we hit an empty field. */
+ }
+ if (current_entry.entry.key == key) {
+ return current_entry.entry.value;
+ }
+ }
+ return -ENOENT; /* Key didn't exist. */
+}
+
+/**
+ * Find a key in the hash table. This operation is multi-thread safe.
+ *
+ * @param ht
+ * Hash table to look in.
+ * @param key
+ * Key to find.
+ * @return
+ * The value that was associated with the key, or negative value on error.
+ */
+static inline int
+rte_fbk_hash_lookup(const struct rte_fbk_hash_table *ht, uint32_t key)
+{
+ return rte_fbk_hash_lookup_with_bucket(ht,
+ key, rte_fbk_hash_get_bucket(ht, key));
+}
+
+/**
+ * Delete all entries in a hash table. This operation is not multi-thread
+ * safe and should only be called from one thread.
+ *
+ * @param ht
+ * Hash table to delete entries in.
+ */
+static inline void
+rte_fbk_hash_clear_all(struct rte_fbk_hash_table *ht)
+{
+ memset(ht->t, 0, sizeof(ht->t[0]) * ht->entries);
+ ht->used_entries = 0;
+}
+
+/**
+ * Find what fraction of entries are being used.
+ *
+ * @param ht
+ * Hash table to find how many entries are being used in.
+ * @return
+ * Load factor of the hash table, or negative value on error.
+ */
+static inline double
+rte_fbk_hash_get_load_factor(struct rte_fbk_hash_table *ht)
+{
+ return (double)ht->used_entries / (double)ht->entries;
+}
+
+/**
+ * Performs a lookup for an existing hash table, and returns a pointer to
+ * the table if found.
+ *
+ * @param name
+ * Name of the hash table to find
+ *
+ * @return
+ * pointer to hash table structure or NULL on error with rte_errno
+ * set appropriately. Possible rte_errno values include:
+ * - ENOENT - required entry not available to return.
+ */
+struct rte_fbk_hash_table *rte_fbk_hash_find_existing(const char *name);
+
+/**
+ * Create a new hash table for use with four byte keys.
+ *
+ * @param params
+ * Parameters used in creation of hash table.
+ *
+ * @return
+ * Pointer to hash table structure that is used in future hash table
+ * operations, or NULL on error with rte_errno set appropriately.
+ * Possible rte_errno error values include:
+ * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
+ * - E_RTE_SECONDARY - function was called from a secondary process instance
+ * - E_RTE_NO_TAILQ - no tailq list could be got for the fbk hash table list
+ * - EINVAL - invalid parameter value passed to function
+ * - ENOSPC - the maximum number of memzones has already been allocated
+ * - EEXIST - a memzone with the same name already exists
+ * - ENOMEM - no appropriate memory area found in which to create memzone
+ */
+struct rte_fbk_hash_table * \
+rte_fbk_hash_create(const struct rte_fbk_hash_params *params);
+
+/**
+ * Free all memory used by a hash table.
+ * Has no effect on hash tables allocated in memory zones
+ *
+ * @param ht
+ * Hash table to deallocate.
+ */
+void rte_fbk_hash_free(struct rte_fbk_hash_table *ht);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_FBK_HASH_H_ */
diff --git a/src/dpdk_lib18/librte_hash/rte_hash.c b/src/dpdk_lib18/librte_hash/rte_hash.c
new file mode 100755
index 00000000..ba827d25
--- /dev/null
+++ b/src/dpdk_lib18/librte_hash/rte_hash.c
@@ -0,0 +1,483 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <stdint.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <sys/queue.h>
+
+#include <rte_common.h>
+#include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
+#include <rte_log.h>
+#include <rte_memcpy.h>
+#include <rte_prefetch.h>
+#include <rte_branch_prediction.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_per_lcore.h>
+#include <rte_errno.h>
+#include <rte_string_fns.h>
+#include <rte_cpuflags.h>
+#include <rte_log.h>
+#include <rte_rwlock.h>
+#include <rte_spinlock.h>
+
+#include "rte_hash.h"
+
+
+TAILQ_HEAD(rte_hash_list, rte_tailq_entry);
+
+/* Macro to enable/disable run-time checking of function parameters */
+#if defined(RTE_LIBRTE_HASH_DEBUG)
+#define RETURN_IF_TRUE(cond, retval) do { \
+ if (cond) return (retval); \
+} while (0)
+#else
+#define RETURN_IF_TRUE(cond, retval)
+#endif
+
+/* Hash function used if none is specified */
+#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
+#include <rte_hash_crc.h>
+#define DEFAULT_HASH_FUNC rte_hash_crc
+#else
+#include <rte_jhash.h>
+#define DEFAULT_HASH_FUNC rte_jhash
+#endif
+
+/* Signature bucket size is a multiple of this value */
+#define SIG_BUCKET_ALIGNMENT 16
+
+/* Stoered key size is a multiple of this value */
+#define KEY_ALIGNMENT 16
+
+/* The high bit is always set in real signatures */
+#define NULL_SIGNATURE 0
+
+/* Returns a pointer to the first signature in specified bucket. */
+static inline hash_sig_t *
+get_sig_tbl_bucket(const struct rte_hash *h, uint32_t bucket_index)
+{
+ return (hash_sig_t *)
+ &(h->sig_tbl[bucket_index * h->sig_tbl_bucket_size]);
+}
+
+/* Returns a pointer to the first key in specified bucket. */
+static inline uint8_t *
+get_key_tbl_bucket(const struct rte_hash *h, uint32_t bucket_index)
+{
+ return (uint8_t *) &(h->key_tbl[bucket_index * h->bucket_entries *
+ h->key_tbl_key_size]);
+}
+
+/* Returns a pointer to a key at a specific position in a specified bucket. */
+static inline void *
+get_key_from_bucket(const struct rte_hash *h, uint8_t *bkt, uint32_t pos)
+{
+ return (void *) &bkt[pos * h->key_tbl_key_size];
+}
+
+/* Does integer division with rounding-up of result. */
+static inline uint32_t
+div_roundup(uint32_t numerator, uint32_t denominator)
+{
+ return (numerator + denominator - 1) / denominator;
+}
+
+/* Increases a size (if needed) to a multiple of alignment. */
+static inline uint32_t
+align_size(uint32_t val, uint32_t alignment)
+{
+ return alignment * div_roundup(val, alignment);
+}
+
+/* Returns the index into the bucket of the first occurrence of a signature. */
+static inline int
+find_first(uint32_t sig, const uint32_t *sig_bucket, uint32_t num_sigs)
+{
+ uint32_t i;
+ for (i = 0; i < num_sigs; i++) {
+ if (sig == sig_bucket[i])
+ return i;
+ }
+ return -1;
+}
+
+struct rte_hash *
+rte_hash_find_existing(const char *name)
+{
+ struct rte_hash *h = NULL;
+ struct rte_tailq_entry *te;
+ struct rte_hash_list *hash_list;
+
+ /* check that we have an initialised tail queue */
+ if ((hash_list =
+ RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_HASH, rte_hash_list)) == NULL) {
+ rte_errno = E_RTE_NO_TAILQ;
+ return NULL;
+ }
+
+ rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
+ TAILQ_FOREACH(te, hash_list, next) {
+ h = (struct rte_hash *) te->data;
+ if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
+ break;
+ }
+ rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ if (te == NULL) {
+ rte_errno = ENOENT;
+ return NULL;
+ }
+ return h;
+}
+
+struct rte_hash *
+rte_hash_create(const struct rte_hash_parameters *params)
+{
+ struct rte_hash *h = NULL;
+ struct rte_tailq_entry *te;
+ uint32_t num_buckets, sig_bucket_size, key_size,
+ hash_tbl_size, sig_tbl_size, key_tbl_size, mem_size;
+ char hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_list *hash_list;
+
+ /* check that we have an initialised tail queue */
+ if ((hash_list =
+ RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_HASH, rte_hash_list)) == NULL) {
+ rte_errno = E_RTE_NO_TAILQ;
+ return NULL;
+ }
+
+ /* Check for valid parameters */
+ if ((params == NULL) ||
+ (params->entries > RTE_HASH_ENTRIES_MAX) ||
+ (params->bucket_entries > RTE_HASH_BUCKET_ENTRIES_MAX) ||
+ (params->entries < params->bucket_entries) ||
+ !rte_is_power_of_2(params->entries) ||
+ !rte_is_power_of_2(params->bucket_entries) ||
+ (params->key_len == 0) ||
+ (params->key_len > RTE_HASH_KEY_LENGTH_MAX)) {
+ rte_errno = EINVAL;
+ RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
+ return NULL;
+ }
+
+ snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
+
+ /* Calculate hash dimensions */
+ num_buckets = params->entries / params->bucket_entries;
+ sig_bucket_size = align_size(params->bucket_entries *
+ sizeof(hash_sig_t), SIG_BUCKET_ALIGNMENT);
+ key_size = align_size(params->key_len, KEY_ALIGNMENT);
+
+ hash_tbl_size = align_size(sizeof(struct rte_hash), RTE_CACHE_LINE_SIZE);
+ sig_tbl_size = align_size(num_buckets * sig_bucket_size,
+ RTE_CACHE_LINE_SIZE);
+ key_tbl_size = align_size(num_buckets * key_size *
+ params->bucket_entries, RTE_CACHE_LINE_SIZE);
+
+ /* Total memory required for hash context */
+ mem_size = hash_tbl_size + sig_tbl_size + key_tbl_size;
+
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ /* guarantee there's no existing */
+ TAILQ_FOREACH(te, hash_list, next) {
+ h = (struct rte_hash *) te->data;
+ if (strncmp(params->name, h->name, RTE_HASH_NAMESIZE) == 0)
+ break;
+ }
+ if (te != NULL)
+ goto exit;
+
+ te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
+ if (te == NULL) {
+ RTE_LOG(ERR, HASH, "tailq entry allocation failed\n");
+ goto exit;
+ }
+
+ h = (struct rte_hash *)rte_zmalloc_socket(hash_name, mem_size,
+ RTE_CACHE_LINE_SIZE, params->socket_id);
+ if (h == NULL) {
+ RTE_LOG(ERR, HASH, "memory allocation failed\n");
+ rte_free(te);
+ goto exit;
+ }
+
+ /* Setup hash context */
+ snprintf(h->name, sizeof(h->name), "%s", params->name);
+ h->entries = params->entries;
+ h->bucket_entries = params->bucket_entries;
+ h->key_len = params->key_len;
+ h->hash_func_init_val = params->hash_func_init_val;
+ h->num_buckets = num_buckets;
+ h->bucket_bitmask = h->num_buckets - 1;
+ h->sig_msb = 1 << (sizeof(hash_sig_t) * 8 - 1);
+ h->sig_tbl = (uint8_t *)h + hash_tbl_size;
+ h->sig_tbl_bucket_size = sig_bucket_size;
+ h->key_tbl = h->sig_tbl + sig_tbl_size;
+ h->key_tbl_key_size = key_size;
+ h->hash_func = (params->hash_func == NULL) ?
+ DEFAULT_HASH_FUNC : params->hash_func;
+
+ te->data = (void *) h;
+
+ TAILQ_INSERT_TAIL(hash_list, te, next);
+
+exit:
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ return h;
+}
+
+void
+rte_hash_free(struct rte_hash *h)
+{
+ struct rte_tailq_entry *te;
+ struct rte_hash_list *hash_list;
+
+ if (h == NULL)
+ return;
+
+ /* check that we have an initialised tail queue */
+ if ((hash_list =
+ RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_HASH, rte_hash_list)) == NULL) {
+ rte_errno = E_RTE_NO_TAILQ;
+ return;
+ }
+
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ /* find out tailq entry */
+ TAILQ_FOREACH(te, hash_list, next) {
+ if (te->data == (void *) h)
+ break;
+ }
+
+ if (te == NULL) {
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ return;
+ }
+
+ TAILQ_REMOVE(hash_list, te, next);
+
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ rte_free(h);
+ rte_free(te);
+}
+
+static inline int32_t
+__rte_hash_add_key_with_hash(const struct rte_hash *h,
+ const void *key, hash_sig_t sig)
+{
+ hash_sig_t *sig_bucket;
+ uint8_t *key_bucket;
+ uint32_t bucket_index, i;
+ int32_t pos;
+
+ /* Get the hash signature and bucket index */
+ sig |= h->sig_msb;
+ bucket_index = sig & h->bucket_bitmask;
+ sig_bucket = get_sig_tbl_bucket(h, bucket_index);
+ key_bucket = get_key_tbl_bucket(h, bucket_index);
+
+ /* Check if key is already present in the hash */
+ for (i = 0; i < h->bucket_entries; i++) {
+ if ((sig == sig_bucket[i]) &&
+ likely(memcmp(key, get_key_from_bucket(h, key_bucket, i),
+ h->key_len) == 0)) {
+ return bucket_index * h->bucket_entries + i;
+ }
+ }
+
+ /* Check if any free slot within the bucket to add the new key */
+ pos = find_first(NULL_SIGNATURE, sig_bucket, h->bucket_entries);
+
+ if (unlikely(pos < 0))
+ return -ENOSPC;
+
+ /* Add the new key to the bucket */
+ sig_bucket[pos] = sig;
+ rte_memcpy(get_key_from_bucket(h, key_bucket, pos), key, h->key_len);
+ return bucket_index * h->bucket_entries + pos;
+}
+
+int32_t
+rte_hash_add_key_with_hash(const struct rte_hash *h,
+ const void *key, hash_sig_t sig)
+{
+ RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
+ return __rte_hash_add_key_with_hash(h, key, sig);
+}
+
+int32_t
+rte_hash_add_key(const struct rte_hash *h, const void *key)
+{
+ RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
+ return __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key));
+}
+
+static inline int32_t
+__rte_hash_del_key_with_hash(const struct rte_hash *h,
+ const void *key, hash_sig_t sig)
+{
+ hash_sig_t *sig_bucket;
+ uint8_t *key_bucket;
+ uint32_t bucket_index, i;
+
+ /* Get the hash signature and bucket index */
+ sig = sig | h->sig_msb;
+ bucket_index = sig & h->bucket_bitmask;
+ sig_bucket = get_sig_tbl_bucket(h, bucket_index);
+ key_bucket = get_key_tbl_bucket(h, bucket_index);
+
+ /* Check if key is already present in the hash */
+ for (i = 0; i < h->bucket_entries; i++) {
+ if ((sig == sig_bucket[i]) &&
+ likely(memcmp(key, get_key_from_bucket(h, key_bucket, i),
+ h->key_len) == 0)) {
+ sig_bucket[i] = NULL_SIGNATURE;
+ return bucket_index * h->bucket_entries + i;
+ }
+ }
+
+ return -ENOENT;
+}
+
+int32_t
+rte_hash_del_key_with_hash(const struct rte_hash *h,
+ const void *key, hash_sig_t sig)
+{
+ RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
+ return __rte_hash_del_key_with_hash(h, key, sig);
+}
+
+int32_t
+rte_hash_del_key(const struct rte_hash *h, const void *key)
+{
+ RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
+ return __rte_hash_del_key_with_hash(h, key, rte_hash_hash(h, key));
+}
+
+static inline int32_t
+__rte_hash_lookup_with_hash(const struct rte_hash *h,
+ const void *key, hash_sig_t sig)
+{
+ hash_sig_t *sig_bucket;
+ uint8_t *key_bucket;
+ uint32_t bucket_index, i;
+
+ /* Get the hash signature and bucket index */
+ sig |= h->sig_msb;
+ bucket_index = sig & h->bucket_bitmask;
+ sig_bucket = get_sig_tbl_bucket(h, bucket_index);
+ key_bucket = get_key_tbl_bucket(h, bucket_index);
+
+ /* Check if key is already present in the hash */
+ for (i = 0; i < h->bucket_entries; i++) {
+ if ((sig == sig_bucket[i]) &&
+ likely(memcmp(key, get_key_from_bucket(h, key_bucket, i),
+ h->key_len) == 0)) {
+ return bucket_index * h->bucket_entries + i;
+ }
+ }
+
+ return -ENOENT;
+}
+
+int32_t
+rte_hash_lookup_with_hash(const struct rte_hash *h,
+ const void *key, hash_sig_t sig)
+{
+ RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
+ return __rte_hash_lookup_with_hash(h, key, sig);
+}
+
+int32_t
+rte_hash_lookup(const struct rte_hash *h, const void *key)
+{
+ RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
+ return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key));
+}
+
+int
+rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
+ uint32_t num_keys, int32_t *positions)
+{
+ uint32_t i, j, bucket_index;
+ hash_sig_t sigs[RTE_HASH_LOOKUP_BULK_MAX];
+
+ RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
+ (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
+ (positions == NULL)), -EINVAL);
+
+ /* Get the hash signature and bucket index */
+ for (i = 0; i < num_keys; i++) {
+ sigs[i] = h->hash_func(keys[i], h->key_len,
+ h->hash_func_init_val) | h->sig_msb;
+ bucket_index = sigs[i] & h->bucket_bitmask;
+
+ /* Pre-fetch relevant buckets */
+ rte_prefetch1((void *) get_sig_tbl_bucket(h, bucket_index));
+ rte_prefetch1((void *) get_key_tbl_bucket(h, bucket_index));
+ }
+
+ /* Check if key is already present in the hash */
+ for (i = 0; i < num_keys; i++) {
+ bucket_index = sigs[i] & h->bucket_bitmask;
+ hash_sig_t *sig_bucket = get_sig_tbl_bucket(h, bucket_index);
+ uint8_t *key_bucket = get_key_tbl_bucket(h, bucket_index);
+
+ positions[i] = -ENOENT;
+
+ for (j = 0; j < h->bucket_entries; j++) {
+ if ((sigs[i] == sig_bucket[j]) &&
+ likely(memcmp(keys[i],
+ get_key_from_bucket(h, key_bucket, j),
+ h->key_len) == 0)) {
+ positions[i] = bucket_index *
+ h->bucket_entries + j;
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_hash/rte_hash.h b/src/dpdk_lib18/librte_hash/rte_hash.h
new file mode 100755
index 00000000..2ecaf1ad
--- /dev/null
+++ b/src/dpdk_lib18/librte_hash/rte_hash.h
@@ -0,0 +1,310 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_HASH_H_
+#define _RTE_HASH_H_
+
+/**
+ * @file
+ *
+ * RTE Hash Table
+ */
+
+#include <stdint.h>
+#include <sys/queue.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Maximum size of hash table that can be created. */
+#define RTE_HASH_ENTRIES_MAX (1 << 26)
+
+/** Maximum bucket size that can be created. */
+#define RTE_HASH_BUCKET_ENTRIES_MAX 16
+
+/** Maximum length of key that can be used. */
+#define RTE_HASH_KEY_LENGTH_MAX 64
+
+/** Max number of keys that can be searched for using rte_hash_lookup_multi. */
+#define RTE_HASH_LOOKUP_BULK_MAX 16
+#define RTE_HASH_LOOKUP_MULTI_MAX RTE_HASH_LOOKUP_BULK_MAX
+
+/** Max number of characters in hash name.*/
+#define RTE_HASH_NAMESIZE 32
+
+/** Signature of key that is stored internally. */
+typedef uint32_t hash_sig_t;
+
+/** Type of function that can be used for calculating the hash value. */
+typedef uint32_t (*rte_hash_function)(const void *key, uint32_t key_len,
+ uint32_t init_val);
+
+/**
+ * Parameters used when creating the hash table. The total table entries and
+ * bucket entries must be a power of 2.
+ */
+struct rte_hash_parameters {
+ const char *name; /**< Name of the hash. */
+ uint32_t entries; /**< Total hash table entries. */
+ uint32_t bucket_entries; /**< Bucket entries. */
+ uint32_t key_len; /**< Length of hash key. */
+ rte_hash_function hash_func; /**< Function used to calculate hash. */
+ uint32_t hash_func_init_val; /**< Init value used by hash_func. */
+ int socket_id; /**< NUMA Socket ID for memory. */
+};
+
+/** A hash table structure. */
+struct rte_hash {
+ char name[RTE_HASH_NAMESIZE]; /**< Name of the hash. */
+ uint32_t entries; /**< Total table entries. */
+ uint32_t bucket_entries; /**< Bucket entries. */
+ uint32_t key_len; /**< Length of hash key. */
+ rte_hash_function hash_func; /**< Function used to calculate hash. */
+ uint32_t hash_func_init_val; /**< Init value used by hash_func. */
+ uint32_t num_buckets; /**< Number of buckets in table. */
+ uint32_t bucket_bitmask; /**< Bitmask for getting bucket index
+ from hash signature. */
+ hash_sig_t sig_msb; /**< MSB is always set in valid signatures. */
+ uint8_t *sig_tbl; /**< Flat array of hash signature buckets. */
+ uint32_t sig_tbl_bucket_size; /**< Signature buckets may be padded for
+ alignment reasons, and this is the
+ bucket size used by sig_tbl. */
+ uint8_t *key_tbl; /**< Flat array of key value buckets. */
+ uint32_t key_tbl_key_size; /**< Keys may be padded for alignment
+ reasons, and this is the key size
+ used by key_tbl. */
+};
+
+/**
+ * Create a new hash table.
+ *
+ * @param params
+ * Parameters used to create and initialise the hash table.
+ * @return
+ * Pointer to hash table structure that is used in future hash table
+ * operations, or NULL on error, with error code set in rte_errno.
+ * Possible rte_errno errors include:
+ * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
+ * - E_RTE_SECONDARY - function was called from a secondary process instance
+ * - E_RTE_NO_TAILQ - no tailq list could be got for the hash table list
+ * - ENOENT - missing entry
+ * - EINVAL - invalid parameter passed to function
+ * - ENOSPC - the maximum number of memzones has already been allocated
+ * - EEXIST - a memzone with the same name already exists
+ * - ENOMEM - no appropriate memory area found in which to create memzone
+ */
+struct rte_hash *
+rte_hash_create(const struct rte_hash_parameters *params);
+
+
+/**
+ * Find an existing hash table object and return a pointer to it.
+ *
+ * @param name
+ * Name of the hash table as passed to rte_hash_create()
+ * @return
+ * Pointer to hash table or NULL if object not found
+ * with rte_errno set appropriately. Possible rte_errno values include:
+ * - ENOENT - value not available for return
+ */
+struct rte_hash *
+rte_hash_find_existing(const char *name);
+
+/**
+ * De-allocate all memory used by hash table.
+ * @param h
+ * Hash table to free
+ */
+void
+rte_hash_free(struct rte_hash *h);
+
+/**
+ * Add a key to an existing hash table. This operation is not multi-thread safe
+ * and should only be called from one thread.
+ *
+ * @param h
+ * Hash table to add the key to.
+ * @param key
+ * Key to add to the hash table.
+ * @return
+ * - -EINVAL if the parameters are invalid.
+ * - -ENOSPC if there is no space in the hash for this key.
+ * - A positive value that can be used by the caller as an offset into an
+ * array of user data. This value is unique for this key.
+ */
+int32_t
+rte_hash_add_key(const struct rte_hash *h, const void *key);
+
+/**
+ * Add a key to an existing hash table. This operation is not multi-thread safe
+ * and should only be called from one thread.
+ *
+ * @param h
+ * Hash table to add the key to.
+ * @param key
+ * Key to add to the hash table.
+ * @param sig
+ * Hash value to add to the hash table.
+ * @return
+ * - -EINVAL if the parameters are invalid.
+ * - -ENOSPC if there is no space in the hash for this key.
+ * - A positive value that can be used by the caller as an offset into an
+ * array of user data. This value is unique for this key.
+ */
+int32_t
+rte_hash_add_key_with_hash(const struct rte_hash *h,
+ const void *key, hash_sig_t sig);
+
+/**
+ * Remove a key from an existing hash table. This operation is not multi-thread
+ * safe and should only be called from one thread.
+ *
+ * @param h
+ * Hash table to remove the key from.
+ * @param key
+ * Key to remove from the hash table.
+ * @return
+ * - -EINVAL if the parameters are invalid.
+ * - -ENOENT if the key is not found.
+ * - A positive value that can be used by the caller as an offset into an
+ * array of user data. This value is unique for this key, and is the same
+ * value that was returned when the key was added.
+ */
+int32_t
+rte_hash_del_key(const struct rte_hash *h, const void *key);
+
+/**
+ * Remove a key from an existing hash table. This operation is not multi-thread
+ * safe and should only be called from one thread.
+ *
+ * @param h
+ * Hash table to remove the key from.
+ * @param key
+ * Key to remove from the hash table.
+ * @param sig
+ * Hash value to remove from the hash table.
+ * @return
+ * - -EINVAL if the parameters are invalid.
+ * - -ENOENT if the key is not found.
+ * - A positive value that can be used by the caller as an offset into an
+ * array of user data. This value is unique for this key, and is the same
+ * value that was returned when the key was added.
+ */
+int32_t
+rte_hash_del_key_with_hash(const struct rte_hash *h,
+ const void *key, hash_sig_t sig);
+
+
+/**
+ * Find a key in the hash table. This operation is multi-thread safe.
+ *
+ * @param h
+ * Hash table to look in.
+ * @param key
+ * Key to find.
+ * @return
+ * - -EINVAL if the parameters are invalid.
+ * - -ENOENT if the key is not found.
+ * - A positive value that can be used by the caller as an offset into an
+ * array of user data. This value is unique for this key, and is the same
+ * value that was returned when the key was added.
+ */
+int32_t
+rte_hash_lookup(const struct rte_hash *h, const void *key);
+
+/**
+ * Find a key in the hash table. This operation is multi-thread safe.
+ *
+ * @param h
+ * Hash table to look in.
+ * @param key
+ * Key to find.
+ * @param sig
+ * Hash value to find.
+ * @return
+ * - -EINVAL if the parameters are invalid.
+ * - -ENOENT if the key is not found.
+ * - A positive value that can be used by the caller as an offset into an
+ * array of user data. This value is unique for this key, and is the same
+ * value that was returned when the key was added.
+ */
+int32_t
+rte_hash_lookup_with_hash(const struct rte_hash *h,
+ const void *key, hash_sig_t sig);
+
+
+/**
+ * Calc a hash value by key. This operation is not multi-process safe.
+ *
+ * @param h
+ * Hash table to look in.
+ * @param key
+ * Key to find.
+ * @return
+ * - hash value
+ */
+static inline hash_sig_t
+rte_hash_hash(const struct rte_hash *h, const void *key)
+{
+ /* calc hash result by key */
+ return h->hash_func(key, h->key_len, h->hash_func_init_val);
+}
+
+#define rte_hash_lookup_multi rte_hash_lookup_bulk
+/**
+ * Find multiple keys in the hash table. This operation is multi-thread safe.
+ *
+ * @param h
+ * Hash table to look in.
+ * @param keys
+ * A pointer to a list of keys to look for.
+ * @param num_keys
+ * How many keys are in the keys list (less than RTE_HASH_LOOKUP_BULK_MAX).
+ * @param positions
+ * Output containing a list of values, corresponding to the list of keys that
+ * can be used by the caller as an offset into an array of user data. These
+ * values are unique for each key, and are the same values that were returned
+ * when each key was added. If a key in the list was not found, then -ENOENT
+ * will be the value.
+ * @return
+ * -EINVAL if there's an error, otherwise 0.
+ */
+int
+rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
+ uint32_t num_keys, int32_t *positions);
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_HASH_H_ */
diff --git a/src/dpdk_lib18/librte_hash/rte_hash_crc.h b/src/dpdk_lib18/librte_hash/rte_hash_crc.h
new file mode 100755
index 00000000..b48b0db1
--- /dev/null
+++ b/src/dpdk_lib18/librte_hash/rte_hash_crc.h
@@ -0,0 +1,110 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_HASH_CRC_H_
+#define _RTE_HASH_CRC_H_
+
+/**
+ * @file
+ *
+ * RTE CRC Hash
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <nmmintrin.h>
+
+/**
+ * Use single crc32 instruction to perform a hash on a 4 byte value.
+ *
+ * @param data
+ * Data to perform hash on.
+ * @param init_val
+ * Value to initialise hash generator.
+ * @return
+ * 32bit calculated hash value.
+ */
+static inline uint32_t
+rte_hash_crc_4byte(uint32_t data, uint32_t init_val)
+{
+ return _mm_crc32_u32(init_val, data);
+}
+
+/**
+ * Use crc32 instruction to perform a hash.
+ *
+ * @param data
+ * Data to perform hash on.
+ * @param data_len
+ * How many bytes to use to calculate hash value.
+ * @param init_val
+ * Value to initialise hash generator.
+ * @return
+ * 32bit calculated hash value.
+ */
+static inline uint32_t
+rte_hash_crc(const void *data, uint32_t data_len, uint32_t init_val)
+{
+ unsigned i;
+ uint32_t temp = 0;
+ const uint32_t *p32 = (const uint32_t *)data;
+
+ for (i = 0; i < data_len / 4; i++) {
+ init_val = rte_hash_crc_4byte(*p32++, init_val);
+ }
+
+ switch (3 - (data_len & 0x03)) {
+ case 0:
+ temp |= *((const uint8_t *)p32 + 2) << 16;
+ /* Fallthrough */
+ case 1:
+ temp |= *((const uint8_t *)p32 + 1) << 8;
+ /* Fallthrough */
+ case 2:
+ temp |= *((const uint8_t *)p32);
+ init_val = rte_hash_crc_4byte(temp, init_val);
+ default:
+ break;
+ }
+
+ return init_val;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_HASH_CRC_H_ */
diff --git a/src/dpdk_lib18/librte_hash/rte_jhash.h b/src/dpdk_lib18/librte_hash/rte_jhash.h
new file mode 100755
index 00000000..a4bf5a1b
--- /dev/null
+++ b/src/dpdk_lib18/librte_hash/rte_jhash.h
@@ -0,0 +1,253 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_JHASH_H
+#define _RTE_JHASH_H
+
+/**
+ * @file
+ *
+ * jhash functions.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/* jhash.h: Jenkins hash support.
+ *
+ * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
+ *
+ * http://burtleburtle.net/bob/hash/
+ *
+ * These are the credits from Bob's sources:
+ *
+ * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
+ * hash(), hash2(), hash3, and mix() are externally useful functions.
+ * Routines to test the hash are included if SELF_TEST is defined.
+ * You can use this free for any purpose. It has no warranty.
+ *
+ * $FreeBSD$
+ */
+
+/** @internal Internal function. NOTE: Arguments are modified. */
+#define __rte_jhash_mix(a, b, c) do { \
+ a -= b; a -= c; a ^= (c>>13); \
+ b -= c; b -= a; b ^= (a<<8); \
+ c -= a; c -= b; c ^= (b>>13); \
+ a -= b; a -= c; a ^= (c>>12); \
+ b -= c; b -= a; b ^= (a<<16); \
+ c -= a; c -= b; c ^= (b>>5); \
+ a -= b; a -= c; a ^= (c>>3); \
+ b -= c; b -= a; b ^= (a<<10); \
+ c -= a; c -= b; c ^= (b>>15); \
+} while (0)
+
+/** The golden ratio: an arbitrary value. */
+#define RTE_JHASH_GOLDEN_RATIO 0x9e3779b9
+
+/**
+ * The most generic version, hashes an arbitrary sequence
+ * of bytes. No alignment or length assumptions are made about
+ * the input key.
+ *
+ * @param key
+ * Key to calculate hash of.
+ * @param length
+ * Length of key in bytes.
+ * @param initval
+ * Initialising value of hash.
+ * @return
+ * Calculated hash value.
+ */
+static inline uint32_t
+rte_jhash(const void *key, uint32_t length, uint32_t initval)
+{
+ uint32_t a, b, c, len;
+ const uint8_t *k = (const uint8_t *)key;
+ const uint32_t *k32 = (const uint32_t *)key;
+
+ len = length;
+ a = b = RTE_JHASH_GOLDEN_RATIO;
+ c = initval;
+
+ while (len >= 12) {
+ a += k32[0];
+ b += k32[1];
+ c += k32[2];
+
+ __rte_jhash_mix(a,b,c);
+
+ k += (3 * sizeof(uint32_t)), k32 += 3;
+ len -= (3 * sizeof(uint32_t));
+ }
+
+ c += length;
+ switch (len) {
+ case 11: c += ((uint32_t)k[10] << 24);
+ case 10: c += ((uint32_t)k[9] << 16);
+ case 9 : c += ((uint32_t)k[8] << 8);
+ case 8 : b += ((uint32_t)k[7] << 24);
+ case 7 : b += ((uint32_t)k[6] << 16);
+ case 6 : b += ((uint32_t)k[5] << 8);
+ case 5 : b += k[4];
+ case 4 : a += ((uint32_t)k[3] << 24);
+ case 3 : a += ((uint32_t)k[2] << 16);
+ case 2 : a += ((uint32_t)k[1] << 8);
+ case 1 : a += k[0];
+ default: break;
+ };
+
+ __rte_jhash_mix(a,b,c);
+
+ return c;
+}
+
+/**
+ * A special optimized version that handles 1 or more of uint32_ts.
+ * The length parameter here is the number of uint32_ts in the key.
+ *
+ * @param k
+ * Key to calculate hash of.
+ * @param length
+ * Length of key in units of 4 bytes.
+ * @param initval
+ * Initialising value of hash.
+ * @return
+ * Calculated hash value.
+ */
+static inline uint32_t
+rte_jhash2(const uint32_t *k, uint32_t length, uint32_t initval)
+{
+ uint32_t a, b, c, len;
+
+ a = b = RTE_JHASH_GOLDEN_RATIO;
+ c = initval;
+ len = length;
+
+ while (len >= 3) {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ __rte_jhash_mix(a, b, c);
+ k += 3; len -= 3;
+ }
+
+ c += length * 4;
+
+ switch (len) {
+ case 2 : b += k[1];
+ case 1 : a += k[0];
+ default: break;
+ };
+
+ __rte_jhash_mix(a,b,c);
+
+ return c;
+}
+
+
+/**
+ * A special ultra-optimized versions that knows it is hashing exactly
+ * 3 words.
+ *
+ * @param a
+ * First word to calcuate hash of.
+ * @param b
+ * Second word to calcuate hash of.
+ * @param c
+ * Third word to calcuate hash of.
+ * @param initval
+ * Initialising value of hash.
+ * @return
+ * Calculated hash value.
+ */
+static inline uint32_t
+rte_jhash_3words(uint32_t a, uint32_t b, uint32_t c, uint32_t initval)
+{
+ a += RTE_JHASH_GOLDEN_RATIO;
+ b += RTE_JHASH_GOLDEN_RATIO;
+ c += initval;
+
+ __rte_jhash_mix(a, b, c);
+
+ /*
+ * NOTE: In particular the "c += length; __rte_jhash_mix(a,b,c);"
+ * normally done at the end is not done here.
+ */
+ return c;
+}
+
+/**
+ * A special ultra-optimized versions that knows it is hashing exactly
+ * 2 words.
+ *
+ * @param a
+ * First word to calcuate hash of.
+ * @param b
+ * Second word to calcuate hash of.
+ * @param initval
+ * Initialising value of hash.
+ * @return
+ * Calculated hash value.
+ */
+static inline uint32_t
+rte_jhash_2words(uint32_t a, uint32_t b, uint32_t initval)
+{
+ return rte_jhash_3words(a, b, 0, initval);
+}
+
+/**
+ * A special ultra-optimized versions that knows it is hashing exactly
+ * 1 word.
+ *
+ * @param a
+ * Word to calcuate hash of.
+ * @param initval
+ * Initialising value of hash.
+ * @return
+ * Calculated hash value.
+ */
+static inline uint32_t
+rte_jhash_1word(uint32_t a, uint32_t initval)
+{
+ return rte_jhash_3words(a, 0, 0, initval);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_JHASH_H */
diff --git a/src/dpdk_lib18/librte_ip_frag/Makefile b/src/dpdk_lib18/librte_ip_frag/Makefile
new file mode 100755
index 00000000..8c00d39c
--- /dev/null
+++ b/src/dpdk_lib18/librte_ip_frag/Makefile
@@ -0,0 +1,59 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_ip_frag.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+
+#source files
+ifeq ($(CONFIG_RTE_MBUF_REFCNT),y)
+SRCS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += rte_ipv4_fragmentation.c
+SRCS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += rte_ipv6_fragmentation.c
+else
+$(info WARNING: Fragmentation feature is disabled because it needs MBUF_REFCNT.)
+endif
+SRCS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += rte_ipv4_reassembly.c
+SRCS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += rte_ipv6_reassembly.c
+SRCS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += rte_ip_frag_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += ip_frag_internal.c
+
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_IP_FRAG)-include += rte_ip_frag.h
+
+
+# this library depends on rte_ether
+DEPDIRS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += lib/librte_mempool lib/librte_ether
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_ip_frag/ip_frag_common.h b/src/dpdk_lib18/librte_ip_frag/ip_frag_common.h
new file mode 100755
index 00000000..210f409d
--- /dev/null
+++ b/src/dpdk_lib18/librte_ip_frag/ip_frag_common.h
@@ -0,0 +1,192 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _IP_FRAG_COMMON_H_
+#define _IP_FRAG_COMMON_H_
+
+#include "rte_ip_frag.h"
+
+/* logging macros. */
+#ifdef RTE_LIBRTE_IP_FRAG_DEBUG
+
+#define IP_FRAG_LOG(lvl, fmt, args...) RTE_LOG(lvl, USER1, fmt, ##args)
+
+#define IP_FRAG_ASSERT(exp) \
+if (!(exp)) { \
+ rte_panic("function %s, line%d\tassert \"" #exp "\" failed\n", \
+ __func__, __LINE__); \
+}
+#else
+#define IP_FRAG_LOG(lvl, fmt, args...) do {} while(0)
+#define IP_FRAG_ASSERT(exp) do {} while (0)
+#endif /* IP_FRAG_DEBUG */
+
+#define IPV4_KEYLEN 1
+#define IPV6_KEYLEN 4
+
+/* helper macros */
+#define IP_FRAG_MBUF2DR(dr, mb) ((dr)->row[(dr)->cnt++] = (mb))
+
+#define IPv6_KEY_BYTES(key) \
+ (key)[0], (key)[1], (key)[2], (key)[3]
+#define IPv6_KEY_BYTES_FMT \
+ "%08" PRIx64 "%08" PRIx64 "%08" PRIx64 "%08" PRIx64
+
+/* internal functions declarations */
+struct rte_mbuf * ip_frag_process(struct ip_frag_pkt *fp,
+ struct rte_ip_frag_death_row *dr, struct rte_mbuf *mb,
+ uint16_t ofs, uint16_t len, uint16_t more_frags);
+
+struct ip_frag_pkt * ip_frag_find(struct rte_ip_frag_tbl *tbl,
+ struct rte_ip_frag_death_row *dr,
+ const struct ip_frag_key *key, uint64_t tms);
+
+struct ip_frag_pkt * ip_frag_lookup(struct rte_ip_frag_tbl *tbl,
+ const struct ip_frag_key *key, uint64_t tms,
+ struct ip_frag_pkt **free, struct ip_frag_pkt **stale);
+
+/* these functions need to be declared here as ip_frag_process relies on them */
+struct rte_mbuf * ipv4_frag_reassemble(const struct ip_frag_pkt *fp);
+struct rte_mbuf * ipv6_frag_reassemble(const struct ip_frag_pkt *fp);
+
+
+
+/*
+ * misc frag key functions
+ */
+
+/* check if key is empty */
+static inline int
+ip_frag_key_is_empty(const struct ip_frag_key * key)
+{
+ uint32_t i;
+ for (i = 0; i < key->key_len; i++)
+ if (key->src_dst[i] != 0)
+ return 0;
+ return 1;
+}
+
+/* empty the key */
+static inline void
+ip_frag_key_invalidate(struct ip_frag_key * key)
+{
+ uint32_t i;
+ for (i = 0; i < key->key_len; i++)
+ key->src_dst[i] = 0;
+}
+
+/* compare two keys */
+static inline int
+ip_frag_key_cmp(const struct ip_frag_key * k1, const struct ip_frag_key * k2)
+{
+ uint32_t i, val;
+ val = k1->id ^ k2->id;
+ for (i = 0; i < k1->key_len; i++)
+ val |= k1->src_dst[i] ^ k2->src_dst[i];
+ return val;
+}
+
+/*
+ * misc fragment functions
+ */
+
+/* put fragment on death row */
+static inline void
+ip_frag_free(struct ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr)
+{
+ uint32_t i, k;
+
+ k = dr->cnt;
+ for (i = 0; i != fp->last_idx; i++) {
+ if (fp->frags[i].mb != NULL) {
+ dr->row[k++] = fp->frags[i].mb;
+ fp->frags[i].mb = NULL;
+ }
+ }
+
+ fp->last_idx = 0;
+ dr->cnt = k;
+}
+
+/* if key is empty, mark key as in use */
+static inline void
+ip_frag_inuse(struct rte_ip_frag_tbl *tbl, const struct ip_frag_pkt *fp)
+{
+ if (ip_frag_key_is_empty(&fp->key)) {
+ TAILQ_REMOVE(&tbl->lru, fp, lru);
+ tbl->use_entries--;
+ }
+}
+
+/* reset the fragment */
+static inline void
+ip_frag_reset(struct ip_frag_pkt *fp, uint64_t tms)
+{
+ static const struct ip_frag zero_frag = {
+ .ofs = 0,
+ .len = 0,
+ .mb = NULL,
+ };
+
+ fp->start = tms;
+ fp->total_size = UINT32_MAX;
+ fp->frag_size = 0;
+ fp->last_idx = IP_MIN_FRAG_NUM;
+ fp->frags[IP_LAST_FRAG_IDX] = zero_frag;
+ fp->frags[IP_FIRST_FRAG_IDX] = zero_frag;
+}
+
+/* chain two mbufs */
+static inline void
+ip_frag_chain(struct rte_mbuf *mn, struct rte_mbuf *mp)
+{
+ struct rte_mbuf *ms;
+
+ /* adjust start of the last fragment data. */
+ rte_pktmbuf_adj(mp, (uint16_t)(mp->l2_len + mp->l3_len));
+
+ /* chain two fragments. */
+ ms = rte_pktmbuf_lastseg(mn);
+ ms->next = mp;
+
+ /* accumulate number of segments and total length. */
+ mn->nb_segs = (uint8_t)(mn->nb_segs + mp->nb_segs);
+ mn->pkt_len += mp->pkt_len;
+
+ /* reset pkt_len and nb_segs for chained fragment. */
+ mp->pkt_len = mp->data_len;
+ mp->nb_segs = 1;
+}
+
+
+#endif /* _IP_FRAG_COMMON_H_ */
diff --git a/src/dpdk_lib18/librte_ip_frag/ip_frag_internal.c b/src/dpdk_lib18/librte_ip_frag/ip_frag_internal.c
new file mode 100755
index 00000000..a2c645bf
--- /dev/null
+++ b/src/dpdk_lib18/librte_ip_frag/ip_frag_internal.c
@@ -0,0 +1,418 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stddef.h>
+
+#include <rte_jhash.h>
+#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
+#include <rte_hash_crc.h>
+#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
+
+#include "ip_frag_common.h"
+
+#define PRIME_VALUE 0xeaad8405
+
+#define IP_FRAG_TBL_POS(tbl, sig) \
+ ((tbl)->pkt + ((sig) & (tbl)->entry_mask))
+
+#ifdef RTE_LIBRTE_IP_FRAG_TBL_STAT
+#define IP_FRAG_TBL_STAT_UPDATE(s, f, v) ((s)->f += (v))
+#else
+#define IP_FRAG_TBL_STAT_UPDATE(s, f, v) do {} while (0)
+#endif /* IP_FRAG_TBL_STAT */
+
+/* local frag table helper functions */
+static inline void
+ip_frag_tbl_del(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
+ struct ip_frag_pkt *fp)
+{
+ ip_frag_free(fp, dr);
+ ip_frag_key_invalidate(&fp->key);
+ TAILQ_REMOVE(&tbl->lru, fp, lru);
+ tbl->use_entries--;
+ IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, del_num, 1);
+}
+
+static inline void
+ip_frag_tbl_add(struct rte_ip_frag_tbl *tbl, struct ip_frag_pkt *fp,
+ const struct ip_frag_key *key, uint64_t tms)
+{
+ fp->key = key[0];
+ ip_frag_reset(fp, tms);
+ TAILQ_INSERT_TAIL(&tbl->lru, fp, lru);
+ tbl->use_entries++;
+ IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, add_num, 1);
+}
+
+static inline void
+ip_frag_tbl_reuse(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
+ struct ip_frag_pkt *fp, uint64_t tms)
+{
+ ip_frag_free(fp, dr);
+ ip_frag_reset(fp, tms);
+ TAILQ_REMOVE(&tbl->lru, fp, lru);
+ TAILQ_INSERT_TAIL(&tbl->lru, fp, lru);
+ IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, reuse_num, 1);
+}
+
+
+static inline void
+ipv4_frag_hash(const struct ip_frag_key *key, uint32_t *v1, uint32_t *v2)
+{
+ uint32_t v;
+ const uint32_t *p;
+
+ p = (const uint32_t *)&key->src_dst;
+
+#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
+ v = rte_hash_crc_4byte(p[0], PRIME_VALUE);
+ v = rte_hash_crc_4byte(p[1], v);
+ v = rte_hash_crc_4byte(key->id, v);
+#else
+
+ v = rte_jhash_3words(p[0], p[1], key->id, PRIME_VALUE);
+#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
+
+ *v1 = v;
+ *v2 = (v << 7) + (v >> 14);
+}
+
+static inline void
+ipv6_frag_hash(const struct ip_frag_key *key, uint32_t *v1, uint32_t *v2)
+{
+ uint32_t v;
+ const uint32_t *p;
+
+ p = (const uint32_t *) &key->src_dst;
+
+#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
+ v = rte_hash_crc_4byte(p[0], PRIME_VALUE);
+ v = rte_hash_crc_4byte(p[1], v);
+ v = rte_hash_crc_4byte(p[2], v);
+ v = rte_hash_crc_4byte(p[3], v);
+ v = rte_hash_crc_4byte(p[4], v);
+ v = rte_hash_crc_4byte(p[5], v);
+ v = rte_hash_crc_4byte(p[6], v);
+ v = rte_hash_crc_4byte(p[7], v);
+ v = rte_hash_crc_4byte(key->id, v);
+#else
+
+ v = rte_jhash_3words(p[0], p[1], p[2], PRIME_VALUE);
+ v = rte_jhash_3words(p[3], p[4], p[5], v);
+ v = rte_jhash_3words(p[6], p[7], key->id, v);
+#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
+
+ *v1 = v;
+ *v2 = (v << 7) + (v >> 14);
+}
+
+struct rte_mbuf *
+ip_frag_process(struct ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr,
+ struct rte_mbuf *mb, uint16_t ofs, uint16_t len, uint16_t more_frags)
+{
+ uint32_t idx;
+
+ fp->frag_size += len;
+
+ /* this is the first fragment. */
+ if (ofs == 0) {
+ idx = (fp->frags[IP_FIRST_FRAG_IDX].mb == NULL) ?
+ IP_FIRST_FRAG_IDX : UINT32_MAX;
+
+ /* this is the last fragment. */
+ } else if (more_frags == 0) {
+ fp->total_size = ofs + len;
+ idx = (fp->frags[IP_LAST_FRAG_IDX].mb == NULL) ?
+ IP_LAST_FRAG_IDX : UINT32_MAX;
+
+ /* this is the intermediate fragment. */
+ } else if ((idx = fp->last_idx) <
+ sizeof (fp->frags) / sizeof (fp->frags[0])) {
+ fp->last_idx++;
+ }
+
+ /*
+ * errorneous packet: either exceeed max allowed number of fragments,
+ * or duplicate first/last fragment encountered.
+ */
+ if (idx >= sizeof (fp->frags) / sizeof (fp->frags[0])) {
+
+ /* report an error. */
+ if (fp->key.key_len == IPV4_KEYLEN)
+ IP_FRAG_LOG(DEBUG, "%s:%d invalid fragmented packet:\n"
+ "ipv4_frag_pkt: %p, key: <%" PRIx64 ", %#x>, "
+ "total_size: %u, frag_size: %u, last_idx: %u\n"
+ "first fragment: ofs: %u, len: %u\n"
+ "last fragment: ofs: %u, len: %u\n\n",
+ __func__, __LINE__,
+ fp, fp->key.src_dst[0], fp->key.id,
+ fp->total_size, fp->frag_size, fp->last_idx,
+ fp->frags[IP_FIRST_FRAG_IDX].ofs,
+ fp->frags[IP_FIRST_FRAG_IDX].len,
+ fp->frags[IP_LAST_FRAG_IDX].ofs,
+ fp->frags[IP_LAST_FRAG_IDX].len);
+ else
+ IP_FRAG_LOG(DEBUG, "%s:%d invalid fragmented packet:\n"
+ "ipv4_frag_pkt: %p, key: <" IPv6_KEY_BYTES_FMT ", %#x>, "
+ "total_size: %u, frag_size: %u, last_idx: %u\n"
+ "first fragment: ofs: %u, len: %u\n"
+ "last fragment: ofs: %u, len: %u\n\n",
+ __func__, __LINE__,
+ fp, IPv6_KEY_BYTES(fp->key.src_dst), fp->key.id,
+ fp->total_size, fp->frag_size, fp->last_idx,
+ fp->frags[IP_FIRST_FRAG_IDX].ofs,
+ fp->frags[IP_FIRST_FRAG_IDX].len,
+ fp->frags[IP_LAST_FRAG_IDX].ofs,
+ fp->frags[IP_LAST_FRAG_IDX].len);
+
+ /* free all fragments, invalidate the entry. */
+ ip_frag_free(fp, dr);
+ ip_frag_key_invalidate(&fp->key);
+ IP_FRAG_MBUF2DR(dr, mb);
+
+ return (NULL);
+ }
+
+ fp->frags[idx].ofs = ofs;
+ fp->frags[idx].len = len;
+ fp->frags[idx].mb = mb;
+
+ mb = NULL;
+
+ /* not all fragments are collected yet. */
+ if (likely (fp->frag_size < fp->total_size)) {
+ return (mb);
+
+ /* if we collected all fragments, then try to reassemble. */
+ } else if (fp->frag_size == fp->total_size &&
+ fp->frags[IP_FIRST_FRAG_IDX].mb != NULL) {
+ if (fp->key.key_len == IPV4_KEYLEN)
+ mb = ipv4_frag_reassemble(fp);
+ else
+ mb = ipv6_frag_reassemble(fp);
+ }
+
+ /* errorenous set of fragments. */
+ if (mb == NULL) {
+
+ /* report an error. */
+ if (fp->key.key_len == IPV4_KEYLEN)
+ IP_FRAG_LOG(DEBUG, "%s:%d invalid fragmented packet:\n"
+ "ipv4_frag_pkt: %p, key: <%" PRIx64 ", %#x>, "
+ "total_size: %u, frag_size: %u, last_idx: %u\n"
+ "first fragment: ofs: %u, len: %u\n"
+ "last fragment: ofs: %u, len: %u\n\n",
+ __func__, __LINE__,
+ fp, fp->key.src_dst[0], fp->key.id,
+ fp->total_size, fp->frag_size, fp->last_idx,
+ fp->frags[IP_FIRST_FRAG_IDX].ofs,
+ fp->frags[IP_FIRST_FRAG_IDX].len,
+ fp->frags[IP_LAST_FRAG_IDX].ofs,
+ fp->frags[IP_LAST_FRAG_IDX].len);
+ else
+ IP_FRAG_LOG(DEBUG, "%s:%d invalid fragmented packet:\n"
+ "ipv4_frag_pkt: %p, key: <" IPv6_KEY_BYTES_FMT ", %#x>, "
+ "total_size: %u, frag_size: %u, last_idx: %u\n"
+ "first fragment: ofs: %u, len: %u\n"
+ "last fragment: ofs: %u, len: %u\n\n",
+ __func__, __LINE__,
+ fp, IPv6_KEY_BYTES(fp->key.src_dst), fp->key.id,
+ fp->total_size, fp->frag_size, fp->last_idx,
+ fp->frags[IP_FIRST_FRAG_IDX].ofs,
+ fp->frags[IP_FIRST_FRAG_IDX].len,
+ fp->frags[IP_LAST_FRAG_IDX].ofs,
+ fp->frags[IP_LAST_FRAG_IDX].len);
+
+ /* free associated resources. */
+ ip_frag_free(fp, dr);
+ }
+
+ /* we are done with that entry, invalidate it. */
+ ip_frag_key_invalidate(&fp->key);
+ return (mb);
+}
+
+
+/*
+ * Find an entry in the table for the corresponding fragment.
+ * If such entry is not present, then allocate a new one.
+ * If the entry is stale, then free and reuse it.
+ */
+struct ip_frag_pkt *
+ip_frag_find(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
+ const struct ip_frag_key *key, uint64_t tms)
+{
+ struct ip_frag_pkt *pkt, *free, *stale, *lru;
+ uint64_t max_cycles;
+
+ /*
+ * Actually the two line below are totally redundant.
+ * they are here, just to make gcc 4.6 happy.
+ */
+ free = NULL;
+ stale = NULL;
+ max_cycles = tbl->max_cycles;
+
+ IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, find_num, 1);
+
+ if ((pkt = ip_frag_lookup(tbl, key, tms, &free, &stale)) == NULL) {
+
+ /*timed-out entry, free and invalidate it*/
+ if (stale != NULL) {
+ ip_frag_tbl_del(tbl, dr, stale);
+ free = stale;
+
+ /*
+ * we found a free entry, check if we can use it.
+ * If we run out of free entries in the table, then
+ * check if we have a timed out entry to delete.
+ */
+ } else if (free != NULL &&
+ tbl->max_entries <= tbl->use_entries) {
+ lru = TAILQ_FIRST(&tbl->lru);
+ if (max_cycles + lru->start < tms) {
+ ip_frag_tbl_del(tbl, dr, lru);
+ } else {
+ free = NULL;
+ IP_FRAG_TBL_STAT_UPDATE(&tbl->stat,
+ fail_nospace, 1);
+ }
+ }
+
+ /* found a free entry to reuse. */
+ if (free != NULL) {
+ ip_frag_tbl_add(tbl, free, key, tms);
+ pkt = free;
+ }
+
+ /*
+ * we found the flow, but it is already timed out,
+ * so free associated resources, reposition it in the LRU list,
+ * and reuse it.
+ */
+ } else if (max_cycles + pkt->start < tms) {
+ ip_frag_tbl_reuse(tbl, dr, pkt, tms);
+ }
+
+ IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, fail_total, (pkt == NULL));
+
+ tbl->last = pkt;
+ return (pkt);
+}
+
+struct ip_frag_pkt *
+ip_frag_lookup(struct rte_ip_frag_tbl *tbl,
+ const struct ip_frag_key *key, uint64_t tms,
+ struct ip_frag_pkt **free, struct ip_frag_pkt **stale)
+{
+ struct ip_frag_pkt *p1, *p2;
+ struct ip_frag_pkt *empty, *old;
+ uint64_t max_cycles;
+ uint32_t i, assoc, sig1, sig2;
+
+ empty = NULL;
+ old = NULL;
+
+ max_cycles = tbl->max_cycles;
+ assoc = tbl->bucket_entries;
+
+ if (tbl->last != NULL && ip_frag_key_cmp(key, &tbl->last->key) == 0)
+ return (tbl->last);
+
+ /* different hashing methods for IPv4 and IPv6 */
+ if (key->key_len == IPV4_KEYLEN)
+ ipv4_frag_hash(key, &sig1, &sig2);
+ else
+ ipv6_frag_hash(key, &sig1, &sig2);
+
+ p1 = IP_FRAG_TBL_POS(tbl, sig1);
+ p2 = IP_FRAG_TBL_POS(tbl, sig2);
+
+ for (i = 0; i != assoc; i++) {
+ if (p1->key.key_len == IPV4_KEYLEN)
+ IP_FRAG_LOG(DEBUG, "%s:%d:\n"
+ "tbl: %p, max_entries: %u, use_entries: %u\n"
+ "ipv6_frag_pkt line0: %p, index: %u from %u\n"
+ "key: <%" PRIx64 ", %#x>, start: %" PRIu64 "\n",
+ __func__, __LINE__,
+ tbl, tbl->max_entries, tbl->use_entries,
+ p1, i, assoc,
+ p1[i].key.src_dst[0], p1[i].key.id, p1[i].start);
+ else
+ IP_FRAG_LOG(DEBUG, "%s:%d:\n"
+ "tbl: %p, max_entries: %u, use_entries: %u\n"
+ "ipv6_frag_pkt line0: %p, index: %u from %u\n"
+ "key: <" IPv6_KEY_BYTES_FMT ", %#x>, start: %" PRIu64 "\n",
+ __func__, __LINE__,
+ tbl, tbl->max_entries, tbl->use_entries,
+ p1, i, assoc,
+ IPv6_KEY_BYTES(p1[i].key.src_dst), p1[i].key.id, p1[i].start);
+
+ if (ip_frag_key_cmp(key, &p1[i].key) == 0)
+ return (p1 + i);
+ else if (ip_frag_key_is_empty(&p1[i].key))
+ empty = (empty == NULL) ? (p1 + i) : empty;
+ else if (max_cycles + p1[i].start < tms)
+ old = (old == NULL) ? (p1 + i) : old;
+
+ if (p2->key.key_len == IPV4_KEYLEN)
+ IP_FRAG_LOG(DEBUG, "%s:%d:\n"
+ "tbl: %p, max_entries: %u, use_entries: %u\n"
+ "ipv6_frag_pkt line1: %p, index: %u from %u\n"
+ "key: <%" PRIx64 ", %#x>, start: %" PRIu64 "\n",
+ __func__, __LINE__,
+ tbl, tbl->max_entries, tbl->use_entries,
+ p2, i, assoc,
+ p2[i].key.src_dst[0], p2[i].key.id, p2[i].start);
+ else
+ IP_FRAG_LOG(DEBUG, "%s:%d:\n"
+ "tbl: %p, max_entries: %u, use_entries: %u\n"
+ "ipv6_frag_pkt line1: %p, index: %u from %u\n"
+ "key: <" IPv6_KEY_BYTES_FMT ", %#x>, start: %" PRIu64 "\n",
+ __func__, __LINE__,
+ tbl, tbl->max_entries, tbl->use_entries,
+ p2, i, assoc,
+ IPv6_KEY_BYTES(p2[i].key.src_dst), p2[i].key.id, p2[i].start);
+
+ if (ip_frag_key_cmp(key, &p2[i].key) == 0)
+ return (p2 + i);
+ else if (ip_frag_key_is_empty(&p2[i].key))
+ empty = (empty == NULL) ?( p2 + i) : empty;
+ else if (max_cycles + p2[i].start < tms)
+ old = (old == NULL) ? (p2 + i) : old;
+ }
+
+ *free = empty;
+ *stale = old;
+ return (NULL);
+}
diff --git a/src/dpdk_lib18/librte_ip_frag/rte_ip_frag.h b/src/dpdk_lib18/librte_ip_frag/rte_ip_frag.h
new file mode 100755
index 00000000..3989a5a8
--- /dev/null
+++ b/src/dpdk_lib18/librte_ip_frag/rte_ip_frag.h
@@ -0,0 +1,353 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_IP_FRAG_H_
+#define _RTE_IP_FRAG_H_
+
+/**
+ * @file
+ * RTE IP Fragmentation and Reassembly
+ *
+ * Implementation of IP packet fragmentation and reassembly.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_ip.h>
+#include <rte_byteorder.h>
+
+enum {
+ IP_LAST_FRAG_IDX, /**< index of last fragment */
+ IP_FIRST_FRAG_IDX, /**< index of first fragment */
+ IP_MIN_FRAG_NUM, /**< minimum number of fragments */
+ IP_MAX_FRAG_NUM = RTE_LIBRTE_IP_FRAG_MAX_FRAG,
+ /**< maximum number of fragments per packet */
+};
+
+/** @internal fragmented mbuf */
+struct ip_frag {
+ uint16_t ofs; /**< offset into the packet */
+ uint16_t len; /**< length of fragment */
+ struct rte_mbuf *mb; /**< fragment mbuf */
+};
+
+/** @internal <src addr, dst_addr, id> to uniquely indetify fragmented datagram. */
+struct ip_frag_key {
+ uint64_t src_dst[4]; /**< src address, first 8 bytes used for IPv4 */
+ uint32_t id; /**< dst address */
+ uint32_t key_len; /**< src/dst key length */
+};
+
+/*
+ * @internal Fragmented packet to reassemble.
+ * First two entries in the frags[] array are for the last and first fragments.
+ */
+struct ip_frag_pkt {
+ TAILQ_ENTRY(ip_frag_pkt) lru; /**< LRU list */
+ struct ip_frag_key key; /**< fragmentation key */
+ uint64_t start; /**< creation timestamp */
+ uint32_t total_size; /**< expected reassembled size */
+ uint32_t frag_size; /**< size of fragments received */
+ uint32_t last_idx; /**< index of next entry to fill */
+ struct ip_frag frags[IP_MAX_FRAG_NUM]; /**< fragments */
+} __rte_cache_aligned;
+
+#define IP_FRAG_DEATH_ROW_LEN 32 /**< death row size (in packets) */
+
+/** mbuf death row (packets to be freed) */
+struct rte_ip_frag_death_row {
+ uint32_t cnt; /**< number of mbufs currently on death row */
+ struct rte_mbuf *row[IP_FRAG_DEATH_ROW_LEN * (IP_MAX_FRAG_NUM + 1)];
+ /**< mbufs to be freed */
+};
+
+TAILQ_HEAD(ip_pkt_list, ip_frag_pkt); /**< @internal fragments tailq */
+
+/** fragmentation table statistics */
+struct ip_frag_tbl_stat {
+ uint64_t find_num; /**< total # of find/insert attempts. */
+ uint64_t add_num; /**< # of add ops. */
+ uint64_t del_num; /**< # of del ops. */
+ uint64_t reuse_num; /**< # of reuse (del/add) ops. */
+ uint64_t fail_total; /**< total # of add failures. */
+ uint64_t fail_nospace; /**< # of 'no space' add failures. */
+} __rte_cache_aligned;
+
+/** fragmentation table */
+struct rte_ip_frag_tbl {
+ uint64_t max_cycles; /**< ttl for table entries. */
+ uint32_t entry_mask; /**< hash value mask. */
+ uint32_t max_entries; /**< max entries allowed. */
+ uint32_t use_entries; /**< entries in use. */
+ uint32_t bucket_entries; /**< hash assocaitivity. */
+ uint32_t nb_entries; /**< total size of the table. */
+ uint32_t nb_buckets; /**< num of associativity lines. */
+ struct ip_frag_pkt *last; /**< last used entry. */
+ struct ip_pkt_list lru; /**< LRU list for table entries. */
+ struct ip_frag_tbl_stat stat; /**< statistics counters. */
+ struct ip_frag_pkt pkt[0]; /**< hash table. */
+};
+
+/** IPv6 fragment extension header */
+struct ipv6_extension_fragment {
+ uint8_t next_header; /**< Next header type */
+ uint8_t reserved1; /**< Reserved */
+ union {
+ struct {
+ uint16_t frag_offset:13; /**< Offset from the start of the packet */
+ uint16_t reserved2:2; /**< Reserved */
+ uint16_t more_frags:1;
+ /**< 1 if more fragments left, 0 if last fragment */
+ };
+ uint16_t frag_data;
+ /**< union of all fragmentation data */
+ };
+ uint32_t id; /**< Packet ID */
+} __attribute__((__packed__));
+
+
+
+/*
+ * Create a new IP fragmentation table.
+ *
+ * @param bucket_num
+ * Number of buckets in the hash table.
+ * @param bucket_entries
+ * Number of entries per bucket (e.g. hash associativity).
+ * Should be power of two.
+ * @param max_entries
+ * Maximum number of entries that could be stored in the table.
+ * The value should be less or equal then bucket_num * bucket_entries.
+ * @param max_cycles
+ * Maximum TTL in cycles for each fragmented packet.
+ * @param socket_id
+ * The *socket_id* argument is the socket identifier in the case of
+ * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA constraints.
+ * @return
+ * The pointer to the new allocated fragmentation table, on success. NULL on error.
+ */
+struct rte_ip_frag_tbl * rte_ip_frag_table_create(uint32_t bucket_num,
+ uint32_t bucket_entries, uint32_t max_entries,
+ uint64_t max_cycles, int socket_id);
+
+/*
+ * Free allocated IP fragmentation table.
+ *
+ * @param btl
+ * Fragmentation table to free.
+ */
+static inline void
+rte_ip_frag_table_destroy( struct rte_ip_frag_tbl *tbl)
+{
+ rte_free(tbl);
+}
+
+#ifdef RTE_MBUF_REFCNT
+/**
+ * This function implements the fragmentation of IPv6 packets.
+ *
+ * @param pkt_in
+ * The input packet.
+ * @param pkts_out
+ * Array storing the output fragments.
+ * @param nb_pkts_out
+ * Number of fragments.
+ * @param mtu_size
+ * Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv6
+ * datagrams. This value includes the size of the IPv6 header.
+ * @param pool_direct
+ * MBUF pool used for allocating direct buffers for the output fragments.
+ * @param pool_indirect
+ * MBUF pool used for allocating indirect buffers for the output fragments.
+ * @return
+ * Upon successful completion - number of output fragments placed
+ * in the pkts_out array.
+ * Otherwise - (-1) * errno.
+ */
+int32_t
+rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in,
+ struct rte_mbuf **pkts_out,
+ uint16_t nb_pkts_out,
+ uint16_t mtu_size,
+ struct rte_mempool *pool_direct,
+ struct rte_mempool *pool_indirect);
+#endif
+
+/*
+ * This function implements reassembly of fragmented IPv6 packets.
+ * Incoming mbuf should have its l2_len/l3_len fields setup correctly.
+ *
+ * @param tbl
+ * Table where to lookup/add the fragmented packet.
+ * @param dr
+ * Death row to free buffers to
+ * @param mb
+ * Incoming mbuf with IPv6 fragment.
+ * @param tms
+ * Fragment arrival timestamp.
+ * @param ip_hdr
+ * Pointer to the IPv6 header.
+ * @param frag_hdr
+ * Pointer to the IPv6 fragment extension header.
+ * @return
+ * Pointer to mbuf for reassembled packet, or NULL if:
+ * - an error occured.
+ * - not all fragments of the packet are collected yet.
+ */
+struct rte_mbuf *rte_ipv6_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
+ struct rte_ip_frag_death_row *dr,
+ struct rte_mbuf *mb, uint64_t tms, struct ipv6_hdr *ip_hdr,
+ struct ipv6_extension_fragment *frag_hdr);
+
+/*
+ * Return a pointer to the packet's fragment header, if found.
+ * It only looks at the extension header that's right after the fixed IPv6
+ * header, and doesn't follow the whole chain of extension headers.
+ *
+ * @param hdr
+ * Pointer to the IPv6 header.
+ * @return
+ * Pointer to the IPv6 fragment extension header, or NULL if it's not
+ * present.
+ */
+static inline struct ipv6_extension_fragment *
+rte_ipv6_frag_get_ipv6_fragment_header(struct ipv6_hdr *hdr)
+{
+ if (hdr->proto == IPPROTO_FRAGMENT) {
+ return (struct ipv6_extension_fragment *) ++hdr;
+ }
+ else
+ return NULL;
+}
+
+#ifdef RTE_MBUF_REFCNT
+/**
+ * IPv4 fragmentation.
+ *
+ * This function implements the fragmentation of IPv4 packets.
+ *
+ * @param pkt_in
+ * The input packet.
+ * @param pkts_out
+ * Array storing the output fragments.
+ * @param nb_pkts_out
+ * Number of fragments.
+ * @param mtu_size
+ * Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
+ * datagrams. This value includes the size of the IPv4 header.
+ * @param pool_direct
+ * MBUF pool used for allocating direct buffers for the output fragments.
+ * @param pool_indirect
+ * MBUF pool used for allocating indirect buffers for the output fragments.
+ * @return
+ * Upon successful completion - number of output fragments placed
+ * in the pkts_out array.
+ * Otherwise - (-1) * errno.
+ */
+int32_t rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
+ struct rte_mbuf **pkts_out,
+ uint16_t nb_pkts_out, uint16_t mtu_size,
+ struct rte_mempool *pool_direct,
+ struct rte_mempool *pool_indirect);
+#endif
+
+/*
+ * This function implements reassembly of fragmented IPv4 packets.
+ * Incoming mbufs should have its l2_len/l3_len fields setup correclty.
+ *
+ * @param tbl
+ * Table where to lookup/add the fragmented packet.
+ * @param dr
+ * Death row to free buffers to
+ * @param mb
+ * Incoming mbuf with IPv4 fragment.
+ * @param tms
+ * Fragment arrival timestamp.
+ * @param ip_hdr
+ * Pointer to the IPV4 header inside the fragment.
+ * @return
+ * Pointer to mbuf for reassebled packet, or NULL if:
+ * - an error occured.
+ * - not all fragments of the packet are collected yet.
+ */
+struct rte_mbuf * rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
+ struct rte_ip_frag_death_row *dr,
+ struct rte_mbuf *mb, uint64_t tms, struct ipv4_hdr *ip_hdr);
+
+/*
+ * Check if the IPv4 packet is fragmented
+ *
+ * @param hdr
+ * IPv4 header of the packet
+ * @return
+ * 1 if fragmented, 0 if not fragmented
+ */
+static inline int
+rte_ipv4_frag_pkt_is_fragmented(const struct ipv4_hdr * hdr) {
+ uint16_t flag_offset, ip_flag, ip_ofs;
+
+ flag_offset = rte_be_to_cpu_16(hdr->fragment_offset);
+ ip_ofs = (uint16_t)(flag_offset & IPV4_HDR_OFFSET_MASK);
+ ip_flag = (uint16_t)(flag_offset & IPV4_HDR_MF_FLAG);
+
+ return ip_flag != 0 || ip_ofs != 0;
+}
+
+/*
+ * Free mbufs on a given death row.
+ *
+ * @param dr
+ * Death row to free mbufs in.
+ * @param prefetch
+ * How many buffers to prefetch before freeing.
+ */
+void rte_ip_frag_free_death_row(struct rte_ip_frag_death_row *dr,
+ uint32_t prefetch);
+
+
+/*
+ * Dump fragmentation table statistics to file.
+ *
+ * @param f
+ * File to dump statistics to
+ * @param tbl
+ * Fragmentation table to dump statistics from
+ */
+void
+rte_ip_frag_table_statistics_dump(FILE * f, const struct rte_ip_frag_tbl *tbl);
+
+#endif /* _RTE_IP_FRAG_H_ */
diff --git a/src/dpdk_lib18/librte_ip_frag/rte_ip_frag_common.c b/src/dpdk_lib18/librte_ip_frag/rte_ip_frag_common.c
new file mode 100755
index 00000000..c982d8cc
--- /dev/null
+++ b/src/dpdk_lib18/librte_ip_frag/rte_ip_frag_common.c
@@ -0,0 +1,139 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stddef.h>
+#include <stdio.h>
+
+#include <rte_memory.h>
+#include <rte_log.h>
+
+#include "ip_frag_common.h"
+
+#define IP_FRAG_HASH_FNUM 2
+
+/* free mbufs from death row */
+void
+rte_ip_frag_free_death_row(struct rte_ip_frag_death_row *dr,
+ uint32_t prefetch)
+{
+ uint32_t i, k, n;
+
+ k = RTE_MIN(prefetch, dr->cnt);
+ n = dr->cnt;
+
+ for (i = 0; i != k; i++)
+ rte_prefetch0(dr->row[i]);
+
+ for (i = 0; i != n - k; i++) {
+ rte_prefetch0(dr->row[i + k]);
+ rte_pktmbuf_free(dr->row[i]);
+ }
+
+ for (; i != n; i++)
+ rte_pktmbuf_free(dr->row[i]);
+
+ dr->cnt = 0;
+}
+
+/* create fragmentation table */
+struct rte_ip_frag_tbl *
+rte_ip_frag_table_create(uint32_t bucket_num, uint32_t bucket_entries,
+ uint32_t max_entries, uint64_t max_cycles, int socket_id)
+{
+ struct rte_ip_frag_tbl *tbl;
+ size_t sz;
+ uint64_t nb_entries;
+
+ nb_entries = rte_align32pow2(bucket_num);
+ nb_entries *= bucket_entries;
+ nb_entries *= IP_FRAG_HASH_FNUM;
+
+ /* check input parameters. */
+ if (rte_is_power_of_2(bucket_entries) == 0 ||
+ nb_entries > UINT32_MAX || nb_entries == 0 ||
+ nb_entries < max_entries) {
+ RTE_LOG(ERR, USER1, "%s: invalid input parameter\n", __func__);
+ return (NULL);
+ }
+
+ sz = sizeof (*tbl) + nb_entries * sizeof (tbl->pkt[0]);
+ if ((tbl = rte_zmalloc_socket(__func__, sz, RTE_CACHE_LINE_SIZE,
+ socket_id)) == NULL) {
+ RTE_LOG(ERR, USER1,
+ "%s: allocation of %zu bytes at socket %d failed do\n",
+ __func__, sz, socket_id);
+ return (NULL);
+ }
+
+ RTE_LOG(INFO, USER1, "%s: allocated of %zu bytes at socket %d\n",
+ __func__, sz, socket_id);
+
+ tbl->max_cycles = max_cycles;
+ tbl->max_entries = max_entries;
+ tbl->nb_entries = (uint32_t)nb_entries;
+ tbl->nb_buckets = bucket_num;
+ tbl->bucket_entries = bucket_entries;
+ tbl->entry_mask = (tbl->nb_entries - 1) & ~(tbl->bucket_entries - 1);
+
+ TAILQ_INIT(&(tbl->lru));
+ return (tbl);
+}
+
+/* dump frag table statistics to file */
+void
+rte_ip_frag_table_statistics_dump(FILE *f, const struct rte_ip_frag_tbl *tbl)
+{
+ uint64_t fail_total, fail_nospace;
+
+ fail_total = tbl->stat.fail_total;
+ fail_nospace = tbl->stat.fail_nospace;
+
+ fprintf(f, "max entries:\t%u;\n"
+ "entries in use:\t%u;\n"
+ "finds/inserts:\t%" PRIu64 ";\n"
+ "entries added:\t%" PRIu64 ";\n"
+ "entries deleted by timeout:\t%" PRIu64 ";\n"
+ "entries reused by timeout:\t%" PRIu64 ";\n"
+ "total add failures:\t%" PRIu64 ";\n"
+ "add no-space failures:\t%" PRIu64 ";\n"
+ "add hash-collisions failures:\t%" PRIu64 ";\n",
+ tbl->max_entries,
+ tbl->use_entries,
+ tbl->stat.find_num,
+ tbl->stat.add_num,
+ tbl->stat.del_num,
+ tbl->stat.reuse_num,
+ fail_total,
+ fail_nospace,
+ fail_total - fail_nospace);
+}
diff --git a/src/dpdk_lib18/librte_ip_frag/rte_ipv4_fragmentation.c b/src/dpdk_lib18/librte_ip_frag/rte_ipv4_fragmentation.c
new file mode 100755
index 00000000..a4ed9238
--- /dev/null
+++ b/src/dpdk_lib18/librte_ip_frag/rte_ipv4_fragmentation.c
@@ -0,0 +1,209 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stddef.h>
+#include <errno.h>
+
+#include <rte_memcpy.h>
+#include <rte_mempool.h>
+#include <rte_debug.h>
+
+#include "ip_frag_common.h"
+
+/* Fragment Offset */
+#define IPV4_HDR_DF_SHIFT 14
+#define IPV4_HDR_MF_SHIFT 13
+#define IPV4_HDR_FO_SHIFT 3
+
+#define IPV4_HDR_DF_MASK (1 << IPV4_HDR_DF_SHIFT)
+#define IPV4_HDR_MF_MASK (1 << IPV4_HDR_MF_SHIFT)
+
+#define IPV4_HDR_FO_MASK ((1 << IPV4_HDR_FO_SHIFT) - 1)
+
+static inline void __fill_ipv4hdr_frag(struct ipv4_hdr *dst,
+ const struct ipv4_hdr *src, uint16_t len, uint16_t fofs,
+ uint16_t dofs, uint32_t mf)
+{
+ rte_memcpy(dst, src, sizeof(*dst));
+ fofs = (uint16_t)(fofs + (dofs >> IPV4_HDR_FO_SHIFT));
+ fofs = (uint16_t)(fofs | mf << IPV4_HDR_MF_SHIFT);
+ dst->fragment_offset = rte_cpu_to_be_16(fofs);
+ dst->total_length = rte_cpu_to_be_16(len);
+ dst->hdr_checksum = 0;
+}
+
+static inline void __free_fragments(struct rte_mbuf *mb[], uint32_t num)
+{
+ uint32_t i;
+ for (i = 0; i != num; i++)
+ rte_pktmbuf_free(mb[i]);
+}
+
+/**
+ * IPv4 fragmentation.
+ *
+ * This function implements the fragmentation of IPv4 packets.
+ *
+ * @param pkt_in
+ * The input packet.
+ * @param pkts_out
+ * Array storing the output fragments.
+ * @param mtu_size
+ * Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
+ * datagrams. This value includes the size of the IPv4 header.
+ * @param pool_direct
+ * MBUF pool used for allocating direct buffers for the output fragments.
+ * @param pool_indirect
+ * MBUF pool used for allocating indirect buffers for the output fragments.
+ * @return
+ * Upon successful completion - number of output fragments placed
+ * in the pkts_out array.
+ * Otherwise - (-1) * <errno>.
+ */
+int32_t
+rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
+ struct rte_mbuf **pkts_out,
+ uint16_t nb_pkts_out,
+ uint16_t mtu_size,
+ struct rte_mempool *pool_direct,
+ struct rte_mempool *pool_indirect)
+{
+ struct rte_mbuf *in_seg = NULL;
+ struct ipv4_hdr *in_hdr;
+ uint32_t out_pkt_pos, in_seg_data_pos;
+ uint32_t more_in_segs;
+ uint16_t fragment_offset, flag_offset, frag_size;
+
+ frag_size = (uint16_t)(mtu_size - sizeof(struct ipv4_hdr));
+
+ /* Fragment size should be a multiply of 8. */
+ IP_FRAG_ASSERT((frag_size & IPV4_HDR_FO_MASK) == 0);
+
+ in_hdr = rte_pktmbuf_mtod(pkt_in, struct ipv4_hdr *);
+ flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
+
+ /* If Don't Fragment flag is set */
+ if (unlikely ((flag_offset & IPV4_HDR_DF_MASK) != 0))
+ return -ENOTSUP;
+
+ /* Check that pkts_out is big enough to hold all fragments */
+ if (unlikely(frag_size * nb_pkts_out <
+ (uint16_t)(pkt_in->pkt_len - sizeof (struct ipv4_hdr))))
+ return -EINVAL;
+
+ in_seg = pkt_in;
+ in_seg_data_pos = sizeof(struct ipv4_hdr);
+ out_pkt_pos = 0;
+ fragment_offset = 0;
+
+ more_in_segs = 1;
+ while (likely(more_in_segs)) {
+ struct rte_mbuf *out_pkt = NULL, *out_seg_prev = NULL;
+ uint32_t more_out_segs;
+ struct ipv4_hdr *out_hdr;
+
+ /* Allocate direct buffer */
+ out_pkt = rte_pktmbuf_alloc(pool_direct);
+ if (unlikely(out_pkt == NULL)) {
+ __free_fragments(pkts_out, out_pkt_pos);
+ return -ENOMEM;
+ }
+
+ /* Reserve space for the IP header that will be built later */
+ out_pkt->data_len = sizeof(struct ipv4_hdr);
+ out_pkt->pkt_len = sizeof(struct ipv4_hdr);
+
+ out_seg_prev = out_pkt;
+ more_out_segs = 1;
+ while (likely(more_out_segs && more_in_segs)) {
+ struct rte_mbuf *out_seg = NULL;
+ uint32_t len;
+
+ /* Allocate indirect buffer */
+ out_seg = rte_pktmbuf_alloc(pool_indirect);
+ if (unlikely(out_seg == NULL)) {
+ rte_pktmbuf_free(out_pkt);
+ __free_fragments(pkts_out, out_pkt_pos);
+ return -ENOMEM;
+ }
+ out_seg_prev->next = out_seg;
+ out_seg_prev = out_seg;
+
+ /* Prepare indirect buffer */
+ rte_pktmbuf_attach(out_seg, in_seg);
+ len = mtu_size - out_pkt->pkt_len;
+ if (len > (in_seg->data_len - in_seg_data_pos)) {
+ len = in_seg->data_len - in_seg_data_pos;
+ }
+ out_seg->data_off = in_seg->data_off + in_seg_data_pos;
+ out_seg->data_len = (uint16_t)len;
+ out_pkt->pkt_len = (uint16_t)(len +
+ out_pkt->pkt_len);
+ out_pkt->nb_segs += 1;
+ in_seg_data_pos += len;
+
+ /* Current output packet (i.e. fragment) done ? */
+ if (unlikely(out_pkt->pkt_len >= mtu_size))
+ more_out_segs = 0;
+
+ /* Current input segment done ? */
+ if (unlikely(in_seg_data_pos == in_seg->data_len)) {
+ in_seg = in_seg->next;
+ in_seg_data_pos = 0;
+
+ if (unlikely(in_seg == NULL))
+ more_in_segs = 0;
+ }
+ }
+
+ /* Build the IP header */
+
+ out_hdr = rte_pktmbuf_mtod(out_pkt, struct ipv4_hdr *);
+
+ __fill_ipv4hdr_frag(out_hdr, in_hdr,
+ (uint16_t)out_pkt->pkt_len,
+ flag_offset, fragment_offset, more_in_segs);
+
+ fragment_offset = (uint16_t)(fragment_offset +
+ out_pkt->pkt_len - sizeof(struct ipv4_hdr));
+
+ out_pkt->ol_flags |= PKT_TX_IP_CKSUM;
+ out_pkt->l3_len = sizeof(struct ipv4_hdr);
+
+ /* Write the fragment to the output list */
+ pkts_out[out_pkt_pos] = out_pkt;
+ out_pkt_pos ++;
+ }
+
+ return out_pkt_pos;
+}
diff --git a/src/dpdk_lib18/librte_ip_frag/rte_ipv4_reassembly.c b/src/dpdk_lib18/librte_ip_frag/rte_ipv4_reassembly.c
new file mode 100755
index 00000000..0b8ceebd
--- /dev/null
+++ b/src/dpdk_lib18/librte_ip_frag/rte_ipv4_reassembly.c
@@ -0,0 +1,183 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stddef.h>
+
+#include <rte_debug.h>
+#include <rte_tailq.h>
+
+#include "ip_frag_common.h"
+
+/*
+ * Reassemble fragments into one packet.
+ */
+struct rte_mbuf *
+ipv4_frag_reassemble(const struct ip_frag_pkt *fp)
+{
+ struct ipv4_hdr *ip_hdr;
+ struct rte_mbuf *m, *prev;
+ uint32_t i, n, ofs, first_len;
+
+ first_len = fp->frags[IP_FIRST_FRAG_IDX].len;
+ n = fp->last_idx - 1;
+
+ /*start from the last fragment. */
+ m = fp->frags[IP_LAST_FRAG_IDX].mb;
+ ofs = fp->frags[IP_LAST_FRAG_IDX].ofs;
+
+ while (ofs != first_len) {
+
+ prev = m;
+
+ for (i = n; i != IP_FIRST_FRAG_IDX && ofs != first_len; i--) {
+
+ /* previous fragment found. */
+ if(fp->frags[i].ofs + fp->frags[i].len == ofs) {
+
+ ip_frag_chain(fp->frags[i].mb, m);
+
+ /* update our last fragment and offset. */
+ m = fp->frags[i].mb;
+ ofs = fp->frags[i].ofs;
+ }
+ }
+
+ /* error - hole in the packet. */
+ if (m == prev) {
+ return (NULL);
+ }
+ }
+
+ /* chain with the first fragment. */
+ ip_frag_chain(fp->frags[IP_FIRST_FRAG_IDX].mb, m);
+ m = fp->frags[IP_FIRST_FRAG_IDX].mb;
+
+ /* update mbuf fields for reassembled packet. */
+ m->ol_flags |= PKT_TX_IP_CKSUM;
+
+ /* update ipv4 header for the reassmebled packet */
+ ip_hdr = (struct ipv4_hdr*)(rte_pktmbuf_mtod(m, uint8_t *) +
+ m->l2_len);
+
+ ip_hdr->total_length = rte_cpu_to_be_16((uint16_t)(fp->total_size +
+ m->l3_len));
+ ip_hdr->fragment_offset = (uint16_t)(ip_hdr->fragment_offset &
+ rte_cpu_to_be_16(IPV4_HDR_DF_FLAG));
+ ip_hdr->hdr_checksum = 0;
+
+ return (m);
+}
+
+/*
+ * Process new mbuf with fragment of IPV4 packet.
+ * Incoming mbuf should have it's l2_len/l3_len fields setuped correclty.
+ * @param tbl
+ * Table where to lookup/add the fragmented packet.
+ * @param mb
+ * Incoming mbuf with IPV4 fragment.
+ * @param tms
+ * Fragment arrival timestamp.
+ * @param ip_hdr
+ * Pointer to the IPV4 header inside the fragment.
+ * @return
+ * Pointer to mbuf for reassebled packet, or NULL if:
+ * - an error occured.
+ * - not all fragments of the packet are collected yet.
+ */
+struct rte_mbuf *
+rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
+ struct rte_ip_frag_death_row *dr, struct rte_mbuf *mb, uint64_t tms,
+ struct ipv4_hdr *ip_hdr)
+{
+ struct ip_frag_pkt *fp;
+ struct ip_frag_key key;
+ const uint64_t *psd;
+ uint16_t ip_len;
+ uint16_t flag_offset, ip_ofs, ip_flag;
+
+ flag_offset = rte_be_to_cpu_16(ip_hdr->fragment_offset);
+ ip_ofs = (uint16_t)(flag_offset & IPV4_HDR_OFFSET_MASK);
+ ip_flag = (uint16_t)(flag_offset & IPV4_HDR_MF_FLAG);
+
+ psd = (uint64_t *)&ip_hdr->src_addr;
+ /* use first 8 bytes only */
+ key.src_dst[0] = psd[0];
+ key.id = ip_hdr->packet_id;
+ key.key_len = IPV4_KEYLEN;
+
+ ip_ofs *= IPV4_HDR_OFFSET_UNITS;
+ ip_len = (uint16_t)(rte_be_to_cpu_16(ip_hdr->total_length) -
+ mb->l3_len);
+
+ IP_FRAG_LOG(DEBUG, "%s:%d:\n"
+ "mbuf: %p, tms: %" PRIu64
+ ", key: <%" PRIx64 ", %#x>, ofs: %u, len: %u, flags: %#x\n"
+ "tbl: %p, max_cycles: %" PRIu64 ", entry_mask: %#x, "
+ "max_entries: %u, use_entries: %u\n\n",
+ __func__, __LINE__,
+ mb, tms, key.src_dst[0], key.id, ip_ofs, ip_len, ip_flag,
+ tbl, tbl->max_cycles, tbl->entry_mask, tbl->max_entries,
+ tbl->use_entries);
+
+ /* try to find/add entry into the fragment's table. */
+ if ((fp = ip_frag_find(tbl, dr, &key, tms)) == NULL) {
+ IP_FRAG_MBUF2DR(dr, mb);
+ return (NULL);
+ }
+
+ IP_FRAG_LOG(DEBUG, "%s:%d:\n"
+ "tbl: %p, max_entries: %u, use_entries: %u\n"
+ "ipv4_frag_pkt: %p, key: <%" PRIx64 ", %#x>, start: %" PRIu64
+ ", total_size: %u, frag_size: %u, last_idx: %u\n\n",
+ __func__, __LINE__,
+ tbl, tbl->max_entries, tbl->use_entries,
+ fp, fp->key.src_dst[0], fp->key.id, fp->start,
+ fp->total_size, fp->frag_size, fp->last_idx);
+
+
+ /* process the fragmented packet. */
+ mb = ip_frag_process(fp, dr, mb, ip_ofs, ip_len, ip_flag);
+ ip_frag_inuse(tbl, fp);
+
+ IP_FRAG_LOG(DEBUG, "%s:%d:\n"
+ "mbuf: %p\n"
+ "tbl: %p, max_entries: %u, use_entries: %u\n"
+ "ipv4_frag_pkt: %p, key: <%" PRIx64 ", %#x>, start: %" PRIu64
+ ", total_size: %u, frag_size: %u, last_idx: %u\n\n",
+ __func__, __LINE__, mb,
+ tbl, tbl->max_entries, tbl->use_entries,
+ fp, fp->key.src_dst[0], fp->key.id, fp->start,
+ fp->total_size, fp->frag_size, fp->last_idx);
+
+ return (mb);
+}
diff --git a/src/dpdk_lib18/librte_ip_frag/rte_ipv6_fragmentation.c b/src/dpdk_lib18/librte_ip_frag/rte_ipv6_fragmentation.c
new file mode 100755
index 00000000..4ffcc7c6
--- /dev/null
+++ b/src/dpdk_lib18/librte_ip_frag/rte_ipv6_fragmentation.c
@@ -0,0 +1,215 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stddef.h>
+#include <errno.h>
+
+#include <rte_memcpy.h>
+
+#include "ip_frag_common.h"
+
+/**
+ * @file
+ * RTE IPv6 Fragmentation
+ *
+ * Implementation of IPv6 fragmentation.
+ *
+ */
+
+/* Fragment Extension Header */
+#define IPV6_HDR_MF_SHIFT 0
+#define IPV6_HDR_FO_SHIFT 3
+#define IPV6_HDR_MF_MASK (1 << IPV6_HDR_MF_SHIFT)
+#define IPV6_HDR_FO_MASK ((1 << IPV6_HDR_FO_SHIFT) - 1)
+
+static inline void
+__fill_ipv6hdr_frag(struct ipv6_hdr *dst,
+ const struct ipv6_hdr *src, uint16_t len, uint16_t fofs,
+ uint32_t mf)
+{
+ struct ipv6_extension_fragment *fh;
+
+ rte_memcpy(dst, src, sizeof(*dst));
+ dst->payload_len = rte_cpu_to_be_16(len);
+ dst->proto = IPPROTO_FRAGMENT;
+
+ fh = (struct ipv6_extension_fragment *) ++dst;
+ fh->next_header = src->proto;
+ fh->reserved1 = 0;
+ fh->frag_offset = rte_cpu_to_be_16(fofs);
+ fh->reserved2 = 0;
+ fh->more_frags = rte_cpu_to_be_16(mf);
+ fh->id = 0;
+}
+
+static inline void
+__free_fragments(struct rte_mbuf *mb[], uint32_t num)
+{
+ uint32_t i;
+ for (i = 0; i < num; i++)
+ rte_pktmbuf_free(mb[i]);
+}
+
+/**
+ * IPv6 fragmentation.
+ *
+ * This function implements the fragmentation of IPv6 packets.
+ *
+ * @param pkt_in
+ * The input packet.
+ * @param pkts_out
+ * Array storing the output fragments.
+ * @param mtu_size
+ * Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv6
+ * datagrams. This value includes the size of the IPv6 header.
+ * @param pool_direct
+ * MBUF pool used for allocating direct buffers for the output fragments.
+ * @param pool_indirect
+ * MBUF pool used for allocating indirect buffers for the output fragments.
+ * @return
+ * Upon successful completion - number of output fragments placed
+ * in the pkts_out array.
+ * Otherwise - (-1) * <errno>.
+ */
+int32_t
+rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in,
+ struct rte_mbuf **pkts_out,
+ uint16_t nb_pkts_out,
+ uint16_t mtu_size,
+ struct rte_mempool *pool_direct,
+ struct rte_mempool *pool_indirect)
+{
+ struct rte_mbuf *in_seg = NULL;
+ struct ipv6_hdr *in_hdr;
+ uint32_t out_pkt_pos, in_seg_data_pos;
+ uint32_t more_in_segs;
+ uint16_t fragment_offset, frag_size;
+
+ frag_size = (uint16_t)(mtu_size - sizeof(struct ipv6_hdr));
+
+ /* Fragment size should be a multiple of 8. */
+ IP_FRAG_ASSERT((frag_size & IPV6_HDR_FO_MASK) == 0);
+
+ /* Check that pkts_out is big enough to hold all fragments */
+ if (unlikely (frag_size * nb_pkts_out <
+ (uint16_t)(pkt_in->pkt_len - sizeof (struct ipv6_hdr))))
+ return (-EINVAL);
+
+ in_hdr = rte_pktmbuf_mtod(pkt_in, struct ipv6_hdr *);
+
+ in_seg = pkt_in;
+ in_seg_data_pos = sizeof(struct ipv6_hdr);
+ out_pkt_pos = 0;
+ fragment_offset = 0;
+
+ more_in_segs = 1;
+ while (likely(more_in_segs)) {
+ struct rte_mbuf *out_pkt = NULL, *out_seg_prev = NULL;
+ uint32_t more_out_segs;
+ struct ipv6_hdr *out_hdr;
+
+ /* Allocate direct buffer */
+ out_pkt = rte_pktmbuf_alloc(pool_direct);
+ if (unlikely(out_pkt == NULL)) {
+ __free_fragments(pkts_out, out_pkt_pos);
+ return (-ENOMEM);
+ }
+
+ /* Reserve space for the IP header that will be built later */
+ out_pkt->data_len = sizeof(struct ipv6_hdr) + sizeof(struct ipv6_extension_fragment);
+ out_pkt->pkt_len = sizeof(struct ipv6_hdr) + sizeof(struct ipv6_extension_fragment);
+
+ out_seg_prev = out_pkt;
+ more_out_segs = 1;
+ while (likely(more_out_segs && more_in_segs)) {
+ struct rte_mbuf *out_seg = NULL;
+ uint32_t len;
+
+ /* Allocate indirect buffer */
+ out_seg = rte_pktmbuf_alloc(pool_indirect);
+ if (unlikely(out_seg == NULL)) {
+ rte_pktmbuf_free(out_pkt);
+ __free_fragments(pkts_out, out_pkt_pos);
+ return (-ENOMEM);
+ }
+ out_seg_prev->next = out_seg;
+ out_seg_prev = out_seg;
+
+ /* Prepare indirect buffer */
+ rte_pktmbuf_attach(out_seg, in_seg);
+ len = mtu_size - out_pkt->pkt_len;
+ if (len > (in_seg->data_len - in_seg_data_pos)) {
+ len = in_seg->data_len - in_seg_data_pos;
+ }
+ out_seg->data_off = in_seg->data_off + in_seg_data_pos;
+ out_seg->data_len = (uint16_t)len;
+ out_pkt->pkt_len = (uint16_t)(len +
+ out_pkt->pkt_len);
+ out_pkt->nb_segs += 1;
+ in_seg_data_pos += len;
+
+ /* Current output packet (i.e. fragment) done ? */
+ if (unlikely(out_pkt->pkt_len >= mtu_size)) {
+ more_out_segs = 0;
+ }
+
+ /* Current input segment done ? */
+ if (unlikely(in_seg_data_pos == in_seg->data_len)) {
+ in_seg = in_seg->next;
+ in_seg_data_pos = 0;
+
+ if (unlikely(in_seg == NULL)) {
+ more_in_segs = 0;
+ }
+ }
+ }
+
+ /* Build the IP header */
+
+ out_hdr = rte_pktmbuf_mtod(out_pkt, struct ipv6_hdr *);
+
+ __fill_ipv6hdr_frag(out_hdr, in_hdr,
+ (uint16_t) out_pkt->pkt_len - sizeof(struct ipv6_hdr),
+ fragment_offset, more_in_segs);
+
+ fragment_offset = (uint16_t)(fragment_offset +
+ out_pkt->pkt_len - sizeof(struct ipv6_hdr)
+ - sizeof(struct ipv6_extension_fragment));
+
+ /* Write the fragment to the output list */
+ pkts_out[out_pkt_pos] = out_pkt;
+ out_pkt_pos ++;
+ }
+
+ return (out_pkt_pos);
+}
diff --git a/src/dpdk_lib18/librte_ip_frag/rte_ipv6_reassembly.c b/src/dpdk_lib18/librte_ip_frag/rte_ipv6_reassembly.c
new file mode 100755
index 00000000..71cf721c
--- /dev/null
+++ b/src/dpdk_lib18/librte_ip_frag/rte_ipv6_reassembly.c
@@ -0,0 +1,222 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stddef.h>
+
+#include <rte_memcpy.h>
+
+#include "ip_frag_common.h"
+
+/**
+ * @file
+ * IPv6 reassemble
+ *
+ * Implementation of IPv6 reassembly.
+ *
+ */
+
+static inline void
+ip_frag_memmove(char *dst, char *src, int len)
+{
+ int i;
+
+ /* go backwards to make sure we don't overwrite anything important */
+ for (i = len - 1; i >= 0; i--)
+ dst[i] = src[i];
+}
+
+/*
+ * Reassemble fragments into one packet.
+ */
+struct rte_mbuf *
+ipv6_frag_reassemble(const struct ip_frag_pkt *fp)
+{
+ struct ipv6_hdr *ip_hdr;
+ struct ipv6_extension_fragment *frag_hdr;
+ struct rte_mbuf *m, *prev;
+ uint32_t i, n, ofs, first_len;
+ uint32_t last_len, move_len, payload_len;
+
+ first_len = fp->frags[IP_FIRST_FRAG_IDX].len;
+ n = fp->last_idx - 1;
+
+ /*start from the last fragment. */
+ m = fp->frags[IP_LAST_FRAG_IDX].mb;
+ ofs = fp->frags[IP_LAST_FRAG_IDX].ofs;
+ last_len = fp->frags[IP_LAST_FRAG_IDX].len;
+
+ payload_len = ofs + last_len;
+
+ while (ofs != first_len) {
+
+ prev = m;
+
+ for (i = n; i != IP_FIRST_FRAG_IDX && ofs != first_len; i--) {
+
+ /* previous fragment found. */
+ if (fp->frags[i].ofs + fp->frags[i].len == ofs) {
+
+ ip_frag_chain(fp->frags[i].mb, m);
+
+ /* update our last fragment and offset. */
+ m = fp->frags[i].mb;
+ ofs = fp->frags[i].ofs;
+ }
+ }
+
+ /* error - hole in the packet. */
+ if (m == prev) {
+ return NULL;
+ }
+ }
+
+ /* chain with the first fragment. */
+ ip_frag_chain(fp->frags[IP_FIRST_FRAG_IDX].mb, m);
+ m = fp->frags[IP_FIRST_FRAG_IDX].mb;
+
+ /* update mbuf fields for reassembled packet. */
+ m->ol_flags |= PKT_TX_IP_CKSUM;
+
+ /* update ipv6 header for the reassembled datagram */
+ ip_hdr = (struct ipv6_hdr *) (rte_pktmbuf_mtod(m, uint8_t *) +
+ m->l2_len);
+
+ ip_hdr->payload_len = rte_cpu_to_be_16(payload_len);
+
+ /*
+ * remove fragmentation header. note that per RFC2460, we need to update
+ * the last non-fragmentable header with the "next header" field to contain
+ * type of the first fragmentable header, but we currently don't support
+ * other headers, so we assume there are no other headers and thus update
+ * the main IPv6 header instead.
+ */
+ move_len = m->l2_len + m->l3_len - sizeof(*frag_hdr);
+ frag_hdr = (struct ipv6_extension_fragment *) (ip_hdr + 1);
+ ip_hdr->proto = frag_hdr->next_header;
+
+ ip_frag_memmove(rte_pktmbuf_mtod(m, char*) + sizeof(*frag_hdr),
+ rte_pktmbuf_mtod(m, char*), move_len);
+
+ rte_pktmbuf_adj(m, sizeof(*frag_hdr));
+
+ return m;
+}
+
+/*
+ * Process new mbuf with fragment of IPV6 datagram.
+ * Incoming mbuf should have its l2_len/l3_len fields setup correctly.
+ * @param tbl
+ * Table where to lookup/add the fragmented packet.
+ * @param mb
+ * Incoming mbuf with IPV6 fragment.
+ * @param tms
+ * Fragment arrival timestamp.
+ * @param ip_hdr
+ * Pointer to the IPV6 header.
+ * @param frag_hdr
+ * Pointer to the IPV6 fragment extension header.
+ * @return
+ * Pointer to mbuf for reassembled packet, or NULL if:
+ * - an error occured.
+ * - not all fragments of the packet are collected yet.
+ */
+#define MORE_FRAGS(x) (((x) & 0x100) >> 8)
+#define FRAG_OFFSET(x) (rte_cpu_to_be_16(x) >> 3)
+struct rte_mbuf *
+rte_ipv6_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
+ struct rte_ip_frag_death_row *dr, struct rte_mbuf *mb, uint64_t tms,
+ struct ipv6_hdr *ip_hdr, struct ipv6_extension_fragment *frag_hdr)
+{
+ struct ip_frag_pkt *fp;
+ struct ip_frag_key key;
+ uint16_t ip_len, ip_ofs;
+
+ rte_memcpy(&key.src_dst[0], ip_hdr->src_addr, 16);
+ rte_memcpy(&key.src_dst[2], ip_hdr->dst_addr, 16);
+
+ key.id = frag_hdr->id;
+ key.key_len = IPV6_KEYLEN;
+
+ ip_ofs = FRAG_OFFSET(frag_hdr->frag_data) * 8;
+
+ /*
+ * as per RFC2460, payload length contains all extension headers as well.
+ * since we don't support anything but frag headers, this is what we remove
+ * from the payload len.
+ */
+ ip_len = rte_be_to_cpu_16(ip_hdr->payload_len) - sizeof(*frag_hdr);
+
+ IP_FRAG_LOG(DEBUG, "%s:%d:\n"
+ "mbuf: %p, tms: %" PRIu64
+ ", key: <" IPv6_KEY_BYTES_FMT ", %#x>, ofs: %u, len: %u, flags: %#x\n"
+ "tbl: %p, max_cycles: %" PRIu64 ", entry_mask: %#x, "
+ "max_entries: %u, use_entries: %u\n\n",
+ __func__, __LINE__,
+ mb, tms, IPv6_KEY_BYTES(key.src_dst), key.id, ip_ofs, ip_len, frag_hdr->more_frags,
+ tbl, tbl->max_cycles, tbl->entry_mask, tbl->max_entries,
+ tbl->use_entries);
+
+ /* try to find/add entry into the fragment's table. */
+ fp = ip_frag_find(tbl, dr, &key, tms);
+ if (fp == NULL) {
+ IP_FRAG_MBUF2DR(dr, mb);
+ return NULL;
+ }
+
+ IP_FRAG_LOG(DEBUG, "%s:%d:\n"
+ "tbl: %p, max_entries: %u, use_entries: %u\n"
+ "ipv6_frag_pkt: %p, key: <" IPv6_KEY_BYTES_FMT ", %#x>, start: %" PRIu64
+ ", total_size: %u, frag_size: %u, last_idx: %u\n\n",
+ __func__, __LINE__,
+ tbl, tbl->max_entries, tbl->use_entries,
+ fp, IPv6_KEY_BYTES(fp->key.src_dst), fp->key.id, fp->start,
+ fp->total_size, fp->frag_size, fp->last_idx);
+
+
+ /* process the fragmented packet. */
+ mb = ip_frag_process(fp, dr, mb, ip_ofs, ip_len,
+ MORE_FRAGS(frag_hdr->frag_data));
+ ip_frag_inuse(tbl, fp);
+
+ IP_FRAG_LOG(DEBUG, "%s:%d:\n"
+ "mbuf: %p\n"
+ "tbl: %p, max_entries: %u, use_entries: %u\n"
+ "ipv6_frag_pkt: %p, key: <" IPv6_KEY_BYTES_FMT ", %#x>, start: %" PRIu64
+ ", total_size: %u, frag_size: %u, last_idx: %u\n\n",
+ __func__, __LINE__, mb,
+ tbl, tbl->max_entries, tbl->use_entries,
+ fp, IPv6_KEY_BYTES(fp->key.src_dst), fp->key.id, fp->start,
+ fp->total_size, fp->frag_size, fp->last_idx);
+
+ return mb;
+}
diff --git a/src/dpdk_lib18/librte_ivshmem/Makefile b/src/dpdk_lib18/librte_ivshmem/Makefile
new file mode 100755
index 00000000..536814c9
--- /dev/null
+++ b/src/dpdk_lib18/librte_ivshmem/Makefile
@@ -0,0 +1,48 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_ivshmem.a
+
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_IVSHMEM) := rte_ivshmem.c
+
+# install includes
+SYMLINK-$(CONFIG_RTE_LIBRTE_IVSHMEM)-include := rte_ivshmem.h
+
+# this lib needs eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_IVSHMEM) += lib/librte_mempool
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_ivshmem/rte_ivshmem.c b/src/dpdk_lib18/librte_ivshmem/rte_ivshmem.c
new file mode 100755
index 00000000..7ca55edb
--- /dev/null
+++ b/src/dpdk_lib18/librte_ivshmem/rte_ivshmem.c
@@ -0,0 +1,884 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <fcntl.h>
+#include <limits.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <string.h>
+#include <stdio.h>
+
+#include <rte_eal_memconfig.h>
+#include <rte_memory.h>
+#include <rte_ivshmem.h>
+#include <rte_string_fns.h>
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_spinlock.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+
+#include "rte_ivshmem.h"
+
+#define IVSHMEM_CONFIG_FILE_FMT "/var/run/.dpdk_ivshmem_metadata_%s"
+#define IVSHMEM_QEMU_CMD_LINE_HEADER_FMT "-device ivshmem,size=%" PRIu64 "M,shm=fd%s"
+#define IVSHMEM_QEMU_CMD_FD_FMT ":%s:0x%" PRIx64 ":0x%" PRIx64
+#define IVSHMEM_QEMU_CMDLINE_BUFSIZE 1024
+#define IVSHMEM_MAX_PAGES (1 << 12)
+#define adjacent(x,y) (((x).phys_addr+(x).len)==(y).phys_addr)
+#define METADATA_SIZE_ALIGNED \
+ (RTE_ALIGN_CEIL(sizeof(struct rte_ivshmem_metadata),pagesz))
+
+#define GET_PAGEMAP_ADDR(in,addr,dlm,err) \
+{ \
+ char *end; \
+ errno = 0; \
+ addr = strtoull((in), &end, 16); \
+ if (errno != 0 || *end != (dlm)) { \
+ RTE_LOG(ERR, EAL, err); \
+ goto error; \
+ } \
+ (in) = end + 1; \
+}
+
+static int pagesz;
+
+struct memseg_cache_entry {
+ char filepath[PATH_MAX];
+ uint64_t offset;
+ uint64_t len;
+};
+
+struct ivshmem_config {
+ struct rte_ivshmem_metadata * metadata;
+ struct memseg_cache_entry memseg_cache[IVSHMEM_MAX_PAGES];
+ /**< account for multiple files per segment case */
+ struct flock lock;
+ rte_spinlock_t sl;
+};
+
+static struct ivshmem_config
+ivshmem_global_config[RTE_LIBRTE_IVSHMEM_MAX_METADATA_FILES];
+
+static rte_spinlock_t global_cfg_sl;
+
+static struct ivshmem_config *
+get_config_by_name(const char * name)
+{
+ struct rte_ivshmem_metadata * config;
+ unsigned i;
+
+ for (i = 0; i < RTE_DIM(ivshmem_global_config); i++) {
+ config = ivshmem_global_config[i].metadata;
+ if (config == NULL)
+ return NULL;
+ if (strncmp(name, config->name, IVSHMEM_NAME_LEN) == 0)
+ return &ivshmem_global_config[i];
+ }
+
+ return NULL;
+}
+
+static int
+overlap(const struct rte_memzone * s1, const struct rte_memzone * s2)
+{
+ uint64_t start1, end1, start2, end2;
+
+ start1 = s1->addr_64;
+ end1 = s1->addr_64 + s1->len;
+ start2 = s2->addr_64;
+ end2 = s2->addr_64 + s2->len;
+
+ if (start1 >= start2 && start1 < end2)
+ return 1;
+ if (start2 >= start1 && start2 < end1)
+ return 1;
+
+ return 0;
+}
+
+static struct rte_memzone *
+get_memzone_by_addr(const void * addr)
+{
+ struct rte_memzone * tmp, * mz;
+ struct rte_mem_config * mcfg;
+ int i;
+
+ mcfg = rte_eal_get_configuration()->mem_config;
+ mz = NULL;
+
+ /* find memzone for the ring */
+ for (i = 0; i < RTE_MAX_MEMZONE; i++) {
+ tmp = &mcfg->memzone[i];
+
+ if (tmp->addr_64 == (uint64_t) addr) {
+ mz = tmp;
+ break;
+ }
+ }
+
+ return mz;
+}
+
+static int
+entry_compare(const void * a, const void * b)
+{
+ const struct rte_ivshmem_metadata_entry * e1 =
+ (const struct rte_ivshmem_metadata_entry*) a;
+ const struct rte_ivshmem_metadata_entry * e2 =
+ (const struct rte_ivshmem_metadata_entry*) b;
+
+ /* move unallocated zones to the end */
+ if (e1->mz.addr == NULL && e2->mz.addr == NULL)
+ return 0;
+ if (e1->mz.addr == 0)
+ return 1;
+ if (e2->mz.addr == 0)
+ return -1;
+
+ return e1->mz.phys_addr > e2->mz.phys_addr;
+}
+
+/* fills hugepage cache entry for a given start virt_addr */
+static int
+get_hugefile_by_virt_addr(uint64_t virt_addr, struct memseg_cache_entry * e)
+{
+ uint64_t start_addr, end_addr;
+ char *start,*path_end;
+ char buf[PATH_MAX*2];
+ FILE *f;
+
+ start = NULL;
+ path_end = NULL;
+ start_addr = 0;
+
+ memset(e->filepath, 0, sizeof(e->filepath));
+
+ /* open /proc/self/maps */
+ f = fopen("/proc/self/maps", "r");
+ if (f == NULL) {
+ RTE_LOG(ERR, EAL, "cannot open /proc/self/maps!\n");
+ return -1;
+ }
+
+ /* parse maps */
+ while (fgets(buf, sizeof(buf), f) != NULL) {
+
+ /* get endptr to end of start addr */
+ start = buf;
+
+ GET_PAGEMAP_ADDR(start,start_addr,'-',
+ "Cannot find start address in maps!\n");
+
+ /* if start address is bigger than our address, skip */
+ if (start_addr > virt_addr)
+ continue;
+
+ GET_PAGEMAP_ADDR(start,end_addr,' ',
+ "Cannot find end address in maps!\n");
+
+ /* if end address is less than our address, skip */
+ if (end_addr <= virt_addr)
+ continue;
+
+ /* find where the path starts */
+ start = strstr(start, "/");
+
+ if (start == NULL)
+ continue;
+
+ /* at this point, we know that this is our map.
+ * now let's find the file */
+ path_end = strstr(start, "\n");
+ break;
+ }
+
+ if (path_end == NULL) {
+ RTE_LOG(ERR, EAL, "Hugefile path not found!\n");
+ goto error;
+ }
+
+ /* calculate offset and copy the file path */
+ snprintf(e->filepath, RTE_PTR_DIFF(path_end, start) + 1, "%s", start);
+
+ e->offset = virt_addr - start_addr;
+
+ fclose(f);
+
+ return 0;
+error:
+ fclose(f);
+ return -1;
+}
+
+/*
+ * This is a complex function. What it does is the following:
+ * 1. Goes through metadata and gets list of hugepages involved
+ * 2. Sorts the hugepages by size (1G first)
+ * 3. Goes through metadata again and writes correct offsets
+ * 4. Goes through pages and finds out their filenames, offsets etc.
+ */
+static int
+build_config(struct rte_ivshmem_metadata * metadata)
+{
+ struct rte_ivshmem_metadata_entry * e_local;
+ struct memseg_cache_entry * ms_local;
+ struct rte_memseg pages[IVSHMEM_MAX_PAGES];
+ struct rte_ivshmem_metadata_entry *entry;
+ struct memseg_cache_entry * c_entry, * prev_entry;
+ struct ivshmem_config * config;
+ unsigned i, j, mz_iter, ms_iter;
+ uint64_t biggest_len;
+ int biggest_idx;
+
+ /* return error if we try to use an unknown config file */
+ config = get_config_by_name(metadata->name);
+ if (config == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot find IVSHMEM config %s!\n", metadata->name);
+ goto fail_e;
+ }
+
+ memset(pages, 0, sizeof(pages));
+
+ e_local = malloc(sizeof(config->metadata->entry));
+ if (e_local == NULL)
+ goto fail_e;
+ ms_local = malloc(sizeof(config->memseg_cache));
+ if (ms_local == NULL)
+ goto fail_ms;
+
+
+ /* make local copies before doing anything */
+ memcpy(e_local, config->metadata->entry, sizeof(config->metadata->entry));
+ memcpy(ms_local, config->memseg_cache, sizeof(config->memseg_cache));
+
+ qsort(e_local, RTE_DIM(config->metadata->entry), sizeof(struct rte_ivshmem_metadata_entry),
+ entry_compare);
+
+ /* first pass - collect all huge pages */
+ for (mz_iter = 0; mz_iter < RTE_DIM(config->metadata->entry); mz_iter++) {
+
+ entry = &e_local[mz_iter];
+
+ uint64_t start_addr = RTE_ALIGN_FLOOR(entry->mz.addr_64,
+ entry->mz.hugepage_sz);
+ uint64_t offset = entry->mz.addr_64 - start_addr;
+ uint64_t len = RTE_ALIGN_CEIL(entry->mz.len + offset,
+ entry->mz.hugepage_sz);
+
+ if (entry->mz.addr_64 == 0 || start_addr == 0 || len == 0)
+ continue;
+
+ int start_page;
+
+ /* find first unused page - mz are phys_addr sorted so we don't have to
+ * look out for holes */
+ for (i = 0; i < RTE_DIM(pages); i++) {
+
+ /* skip if we already have this page */
+ if (pages[i].addr_64 == start_addr) {
+ start_addr += entry->mz.hugepage_sz;
+ len -= entry->mz.hugepage_sz;
+ continue;
+ }
+ /* we found a new page */
+ else if (pages[i].addr_64 == 0) {
+ start_page = i;
+ break;
+ }
+ }
+ if (i == RTE_DIM(pages)) {
+ RTE_LOG(ERR, EAL, "Cannot find unused page!\n");
+ goto fail;
+ }
+
+ /* populate however many pages the memzone has */
+ for (i = start_page; i < RTE_DIM(pages) && len != 0; i++) {
+
+ pages[i].addr_64 = start_addr;
+ pages[i].len = entry->mz.hugepage_sz;
+ start_addr += entry->mz.hugepage_sz;
+ len -= entry->mz.hugepage_sz;
+ }
+ /* if there's still length left */
+ if (len != 0) {
+ RTE_LOG(ERR, EAL, "Not enough space for pages!\n");
+ goto fail;
+ }
+ }
+
+ /* second pass - sort pages by size */
+ for (i = 0; i < RTE_DIM(pages); i++) {
+
+ if (pages[i].addr == NULL)
+ break;
+
+ biggest_len = 0;
+ biggest_idx = -1;
+
+ /*
+ * browse all entries starting at 'i', and find the
+ * entry with the smallest addr
+ */
+ for (j=i; j< RTE_DIM(pages); j++) {
+ if (pages[j].addr == NULL)
+ break;
+ if (biggest_len == 0 ||
+ pages[j].len > biggest_len) {
+ biggest_len = pages[j].len;
+ biggest_idx = j;
+ }
+ }
+
+ /* should not happen */
+ if (biggest_idx == -1) {
+ RTE_LOG(ERR, EAL, "Error sorting by size!\n");
+ goto fail;
+ }
+ if (i != (unsigned) biggest_idx) {
+ struct rte_memseg tmp;
+
+ memcpy(&tmp, &pages[biggest_idx], sizeof(struct rte_memseg));
+
+ /* we don't want to break contiguousness, so instead of just
+ * swapping segments, we move all the preceding segments to the
+ * right and then put the old segment @ biggest_idx in place of
+ * segment @ i */
+ for (j = biggest_idx - 1; j >= i; j--) {
+ memcpy(&pages[j+1], &pages[j], sizeof(struct rte_memseg));
+ memset(&pages[j], 0, sizeof(struct rte_memseg));
+ }
+
+ /* put old biggest segment to its new place */
+ memcpy(&pages[i], &tmp, sizeof(struct rte_memseg));
+ }
+ }
+
+ /* third pass - write correct offsets */
+ for (mz_iter = 0; mz_iter < RTE_DIM(config->metadata->entry); mz_iter++) {
+
+ uint64_t offset = 0;
+
+ entry = &e_local[mz_iter];
+
+ if (entry->mz.addr_64 == 0)
+ break;
+
+ /* find page for current memzone */
+ for (i = 0; i < RTE_DIM(pages); i++) {
+ /* we found our page */
+ if (entry->mz.addr_64 >= pages[i].addr_64 &&
+ entry->mz.addr_64 < pages[i].addr_64 + pages[i].len) {
+ entry->offset = (entry->mz.addr_64 - pages[i].addr_64) +
+ offset;
+ break;
+ }
+ offset += pages[i].len;
+ }
+ if (i == RTE_DIM(pages)) {
+ RTE_LOG(ERR, EAL, "Page not found!\n");
+ goto fail;
+ }
+ }
+
+ ms_iter = 0;
+ prev_entry = NULL;
+
+ /* fourth pass - create proper memseg cache */
+ for (i = 0; i < RTE_DIM(pages) &&
+ ms_iter <= RTE_DIM(config->memseg_cache); i++) {
+ if (pages[i].addr_64 == 0)
+ break;
+
+
+ if (ms_iter == RTE_DIM(pages)) {
+ RTE_LOG(ERR, EAL, "The universe has collapsed!\n");
+ goto fail;
+ }
+
+ c_entry = &ms_local[ms_iter];
+ c_entry->len = pages[i].len;
+
+ if (get_hugefile_by_virt_addr(pages[i].addr_64, c_entry) < 0)
+ goto fail;
+
+ /* if previous entry has the same filename and is contiguous,
+ * clear current entry and increase previous entry's length
+ */
+ if (prev_entry != NULL &&
+ strncmp(c_entry->filepath, prev_entry->filepath,
+ sizeof(c_entry->filepath)) == 0 &&
+ prev_entry->offset + prev_entry->len == c_entry->offset) {
+ prev_entry->len += pages[i].len;
+ memset(c_entry, 0, sizeof(struct memseg_cache_entry));
+ }
+ else {
+ prev_entry = c_entry;
+ ms_iter++;
+ }
+ }
+
+ /* update current configuration with new valid data */
+ memcpy(config->metadata->entry, e_local, sizeof(config->metadata->entry));
+ memcpy(config->memseg_cache, ms_local, sizeof(config->memseg_cache));
+
+ free(ms_local);
+ free(e_local);
+
+ return 0;
+fail:
+ free(ms_local);
+fail_ms:
+ free(e_local);
+fail_e:
+ return -1;
+}
+
+static int
+add_memzone_to_metadata(const struct rte_memzone * mz,
+ struct ivshmem_config * config)
+{
+ struct rte_ivshmem_metadata_entry * entry;
+ unsigned i;
+
+ rte_spinlock_lock(&config->sl);
+
+ /* find free slot in this config */
+ for (i = 0; i < RTE_DIM(config->metadata->entry); i++) {
+ entry = &config->metadata->entry[i];
+
+ if (&entry->mz.addr_64 != 0 && overlap(mz, &entry->mz)) {
+ RTE_LOG(ERR, EAL, "Overlapping memzones!\n");
+ goto fail;
+ }
+
+ /* if addr is zero, the memzone is probably free */
+ if (entry->mz.addr_64 == 0) {
+ RTE_LOG(DEBUG, EAL, "Adding memzone '%s' at %p to metadata %s\n",
+ mz->name, mz->addr, config->metadata->name);
+ memcpy(&entry->mz, mz, sizeof(struct rte_memzone));
+
+ /* run config file parser */
+ if (build_config(config->metadata) < 0)
+ goto fail;
+
+ break;
+ }
+ }
+
+ /* if we reached the maximum, that means we have no place in config */
+ if (i == RTE_DIM(config->metadata->entry)) {
+ RTE_LOG(ERR, EAL, "No space left in IVSHMEM metadata %s!\n",
+ config->metadata->name);
+ goto fail;
+ }
+
+ rte_spinlock_unlock(&config->sl);
+ return 0;
+fail:
+ rte_spinlock_unlock(&config->sl);
+ return -1;
+}
+
+static int
+add_ring_to_metadata(const struct rte_ring * r,
+ struct ivshmem_config * config)
+{
+ struct rte_memzone * mz;
+
+ mz = get_memzone_by_addr(r);
+
+ if (!mz) {
+ RTE_LOG(ERR, EAL, "Cannot find memzone for ring!\n");
+ return -1;
+ }
+
+ return add_memzone_to_metadata(mz, config);
+}
+
+static int
+add_mempool_to_metadata(const struct rte_mempool * mp,
+ struct ivshmem_config * config)
+{
+ struct rte_memzone * mz;
+ int ret;
+
+ mz = get_memzone_by_addr(mp);
+ ret = 0;
+
+ if (!mz) {
+ RTE_LOG(ERR, EAL, "Cannot find memzone for mempool!\n");
+ return -1;
+ }
+
+ /* mempool consists of memzone and ring */
+ ret = add_memzone_to_metadata(mz, config);
+ if (ret < 0)
+ return -1;
+
+ return add_ring_to_metadata(mp->ring, config);
+}
+
+int
+rte_ivshmem_metadata_add_ring(const struct rte_ring * r, const char * name)
+{
+ struct ivshmem_config * config;
+
+ if (name == NULL || r == NULL)
+ return -1;
+
+ config = get_config_by_name(name);
+
+ if (config == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot find IVSHMEM config %s!\n", name);
+ return -1;
+ }
+
+ return add_ring_to_metadata(r, config);
+}
+
+int
+rte_ivshmem_metadata_add_memzone(const struct rte_memzone * mz, const char * name)
+{
+ struct ivshmem_config * config;
+
+ if (name == NULL || mz == NULL)
+ return -1;
+
+ config = get_config_by_name(name);
+
+ if (config == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot find IVSHMEM config %s!\n", name);
+ return -1;
+ }
+
+ return add_memzone_to_metadata(mz, config);
+}
+
+int
+rte_ivshmem_metadata_add_mempool(const struct rte_mempool * mp, const char * name)
+{
+ struct ivshmem_config * config;
+
+ if (name == NULL || mp == NULL)
+ return -1;
+
+ config = get_config_by_name(name);
+
+ if (config == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot find IVSHMEM config %s!\n", name);
+ return -1;
+ }
+
+ return add_mempool_to_metadata(mp, config);
+}
+
+static inline void
+ivshmem_config_path(char *buffer, size_t bufflen, const char *name)
+{
+ snprintf(buffer, bufflen, IVSHMEM_CONFIG_FILE_FMT, name);
+}
+
+
+
+static inline
+void *ivshmem_metadata_create(const char *name, size_t size,
+ struct flock *lock)
+{
+ int retval, fd;
+ void *metadata_addr;
+ char pathname[PATH_MAX];
+
+ ivshmem_config_path(pathname, sizeof(pathname), name);
+
+ fd = open(pathname, O_RDWR | O_CREAT, 0660);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open '%s'\n", pathname);
+ return NULL;
+ }
+
+ size = METADATA_SIZE_ALIGNED;
+
+ retval = fcntl(fd, F_SETLK, lock);
+ if (retval < 0){
+ close(fd);
+ RTE_LOG(ERR, EAL, "Cannot create lock on '%s'. Is another "
+ "process using it?\n", pathname);
+ return NULL;
+ }
+
+ retval = ftruncate(fd, size);
+ if (retval < 0){
+ close(fd);
+ RTE_LOG(ERR, EAL, "Cannot resize '%s'\n", pathname);
+ return NULL;
+ }
+
+ metadata_addr = mmap(NULL, size,
+ PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+
+ if (metadata_addr == MAP_FAILED){
+ RTE_LOG(ERR, EAL, "Cannot mmap memory for '%s'\n", pathname);
+
+ /* we don't care if we can't unlock */
+ fcntl(fd, F_UNLCK, lock);
+ close(fd);
+
+ return NULL;
+ }
+
+ return metadata_addr;
+}
+
+int rte_ivshmem_metadata_create(const char *name)
+{
+ struct ivshmem_config * ivshmem_config;
+ unsigned index;
+
+ if (pagesz == 0)
+ pagesz = getpagesize();
+
+ if (name == NULL)
+ return -1;
+
+ rte_spinlock_lock(&global_cfg_sl);
+
+ for (index = 0; index < RTE_DIM(ivshmem_global_config); index++) {
+ if (ivshmem_global_config[index].metadata == NULL) {
+ ivshmem_config = &ivshmem_global_config[index];
+ break;
+ }
+ }
+
+ if (index == RTE_DIM(ivshmem_global_config)) {
+ RTE_LOG(ERR, EAL, "Cannot create more ivshmem config files. "
+ "Maximum has been reached\n");
+ rte_spinlock_unlock(&global_cfg_sl);
+ return -1;
+ }
+
+ ivshmem_config->lock.l_type = F_WRLCK;
+ ivshmem_config->lock.l_whence = SEEK_SET;
+
+ ivshmem_config->lock.l_start = 0;
+ ivshmem_config->lock.l_len = METADATA_SIZE_ALIGNED;
+
+ ivshmem_global_config[index].metadata = ((struct rte_ivshmem_metadata *)
+ ivshmem_metadata_create(
+ name,
+ sizeof(struct rte_ivshmem_metadata),
+ &ivshmem_config->lock));
+
+ if (ivshmem_global_config[index].metadata == NULL) {
+ rte_spinlock_unlock(&global_cfg_sl);
+ return -1;
+ }
+
+ /* Metadata setup */
+ memset(ivshmem_config->metadata, 0, sizeof(struct rte_ivshmem_metadata));
+ ivshmem_config->metadata->magic_number = IVSHMEM_MAGIC;
+ snprintf(ivshmem_config->metadata->name,
+ sizeof(ivshmem_config->metadata->name), "%s", name);
+
+ rte_spinlock_unlock(&global_cfg_sl);
+
+ return 0;
+}
+
+int
+rte_ivshmem_metadata_cmdline_generate(char *buffer, unsigned size, const char *name)
+{
+ const struct memseg_cache_entry * ms_cache, *entry;
+ struct ivshmem_config * config;
+ char cmdline[IVSHMEM_QEMU_CMDLINE_BUFSIZE], *cmdline_ptr;
+ char cfg_file_path[PATH_MAX];
+ unsigned remaining_len, tmplen, iter;
+ uint64_t shared_mem_size, zero_size, total_size;
+
+ if (buffer == NULL || name == NULL)
+ return -1;
+
+ config = get_config_by_name(name);
+
+ if (config == NULL) {
+ RTE_LOG(ERR, EAL, "Config %s not found!\n", name);
+ return -1;
+ }
+
+ rte_spinlock_lock(&config->sl);
+
+ /* prepare metadata file path */
+ snprintf(cfg_file_path, sizeof(cfg_file_path), IVSHMEM_CONFIG_FILE_FMT,
+ config->metadata->name);
+
+ ms_cache = config->memseg_cache;
+
+ cmdline_ptr = cmdline;
+ remaining_len = sizeof(cmdline);
+
+ shared_mem_size = 0;
+ iter = 0;
+
+ while ((ms_cache[iter].len != 0) && (iter < RTE_DIM(config->metadata->entry))) {
+
+ entry = &ms_cache[iter];
+
+ /* Offset and sizes within the current pathname */
+ tmplen = snprintf(cmdline_ptr, remaining_len, IVSHMEM_QEMU_CMD_FD_FMT,
+ entry->filepath, entry->offset, entry->len);
+
+ shared_mem_size += entry->len;
+
+ cmdline_ptr = RTE_PTR_ADD(cmdline_ptr, tmplen);
+ remaining_len -= tmplen;
+
+ if (remaining_len == 0) {
+ RTE_LOG(ERR, EAL, "Command line too long!\n");
+ rte_spinlock_unlock(&config->sl);
+ return -1;
+ }
+
+ iter++;
+ }
+
+ total_size = rte_align64pow2(shared_mem_size + METADATA_SIZE_ALIGNED);
+ zero_size = total_size - shared_mem_size - METADATA_SIZE_ALIGNED;
+
+ /* add /dev/zero to command-line to fill the space */
+ tmplen = snprintf(cmdline_ptr, remaining_len, IVSHMEM_QEMU_CMD_FD_FMT,
+ "/dev/zero",
+ (uint64_t)0x0,
+ zero_size);
+
+ cmdline_ptr = RTE_PTR_ADD(cmdline_ptr, tmplen);
+ remaining_len -= tmplen;
+
+ if (remaining_len == 0) {
+ RTE_LOG(ERR, EAL, "Command line too long!\n");
+ rte_spinlock_unlock(&config->sl);
+ return -1;
+ }
+
+ /* add metadata file to the end of command-line */
+ tmplen = snprintf(cmdline_ptr, remaining_len, IVSHMEM_QEMU_CMD_FD_FMT,
+ cfg_file_path,
+ (uint64_t)0x0,
+ METADATA_SIZE_ALIGNED);
+
+ cmdline_ptr = RTE_PTR_ADD(cmdline_ptr, tmplen);
+ remaining_len -= tmplen;
+
+ if (remaining_len == 0) {
+ RTE_LOG(ERR, EAL, "Command line too long!\n");
+ rte_spinlock_unlock(&config->sl);
+ return -1;
+ }
+
+ /* if current length of the command line is bigger than the buffer supplied
+ * by the user, or if command-line is bigger than what IVSHMEM accepts */
+ if ((sizeof(cmdline) - remaining_len) > size) {
+ RTE_LOG(ERR, EAL, "Buffer is too short!\n");
+ rte_spinlock_unlock(&config->sl);
+ return -1;
+ }
+ /* complete the command-line */
+ snprintf(buffer, size,
+ IVSHMEM_QEMU_CMD_LINE_HEADER_FMT,
+ total_size >> 20,
+ cmdline);
+
+ rte_spinlock_unlock(&config->sl);
+
+ return 0;
+}
+
+void
+rte_ivshmem_metadata_dump(FILE *f, const char *name)
+{
+ unsigned i = 0;
+ struct ivshmem_config * config;
+ struct rte_ivshmem_metadata_entry *entry;
+#ifdef RTE_LIBRTE_IVSHMEM_DEBUG
+ uint64_t addr;
+ uint64_t end, hugepage_sz;
+ struct memseg_cache_entry e;
+#endif
+
+ if (name == NULL)
+ return;
+
+ /* return error if we try to use an unknown config file */
+ config = get_config_by_name(name);
+ if (config == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot find IVSHMEM config %s!\n", name);
+ return;
+ }
+
+ rte_spinlock_lock(&config->sl);
+
+ entry = &config->metadata->entry[0];
+
+ while (entry->mz.addr != NULL && i < RTE_DIM(config->metadata->entry)) {
+
+ fprintf(f, "Entry %u: name:<%-20s>, phys:0x%-15lx, len:0x%-15lx, "
+ "virt:%-15p, off:0x%-15lx\n",
+ i,
+ entry->mz.name,
+ entry->mz.phys_addr,
+ entry->mz.len,
+ entry->mz.addr,
+ entry->offset);
+ i++;
+
+#ifdef RTE_LIBRTE_IVSHMEM_DEBUG
+ fprintf(f, "\tHugepage files:\n");
+
+ hugepage_sz = entry->mz.hugepage_sz;
+ addr = RTE_ALIGN_FLOOR(entry->mz.addr_64, hugepage_sz);
+ end = addr + RTE_ALIGN_CEIL(entry->mz.len + (entry->mz.addr_64 - addr),
+ hugepage_sz);
+
+ for (; addr < end; addr += hugepage_sz) {
+ memset(&e, 0, sizeof(e));
+
+ get_hugefile_by_virt_addr(addr, &e);
+
+ fprintf(f, "\t0x%"PRIx64 "-0x%" PRIx64 " offset: 0x%" PRIx64 " %s\n",
+ addr, addr + hugepage_sz, e.offset, e.filepath);
+ }
+#endif
+ entry++;
+ }
+
+ rte_spinlock_unlock(&config->sl);
+}
diff --git a/src/dpdk_lib18/librte_ivshmem/rte_ivshmem.h b/src/dpdk_lib18/librte_ivshmem/rte_ivshmem.h
new file mode 100755
index 00000000..a5d36d6b
--- /dev/null
+++ b/src/dpdk_lib18/librte_ivshmem/rte_ivshmem.h
@@ -0,0 +1,165 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RTE_IVSHMEM_H_
+#define RTE_IVSHMEM_H_
+
+#include <rte_memzone.h>
+#include <rte_mempool.h>
+
+/**
+ * @file
+ *
+ * The RTE IVSHMEM interface provides functions to create metadata files
+ * describing memory segments to be shared via QEMU IVSHMEM.
+ */
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define IVSHMEM_MAGIC 0x0BADC0DE
+#define IVSHMEM_NAME_LEN 32
+
+/**
+ * Structure that holds IVSHMEM shared metadata entry.
+ */
+struct rte_ivshmem_metadata_entry {
+ struct rte_memzone mz; /**< shared memzone */
+ uint64_t offset; /**< offset of memzone within IVSHMEM device */
+};
+
+/**
+ * Structure that holds IVSHMEM metadata.
+ */
+struct rte_ivshmem_metadata {
+ int magic_number; /**< magic number */
+ char name[IVSHMEM_NAME_LEN]; /**< name of the metadata file */
+ struct rte_ivshmem_metadata_entry entry[RTE_LIBRTE_IVSHMEM_MAX_ENTRIES];
+ /**< metadata entries */
+};
+
+/**
+ * Creates metadata file with a given name
+ *
+ * @param name
+ * Name of metadata file to be created
+ *
+ * @return
+ * - On success, zero
+ * - On failure, a negative value
+ */
+int rte_ivshmem_metadata_create(const char * name);
+
+/**
+ * Adds memzone to a specific metadata file
+ *
+ * @param mz
+ * Memzone to be added
+ * @param md_name
+ * Name of metadata file for the memzone to be added to
+ *
+ * @return
+ * - On success, zero
+ * - On failure, a negative value
+ */
+int rte_ivshmem_metadata_add_memzone(const struct rte_memzone * mz,
+ const char * md_name);
+
+/**
+ * Adds a ring descriptor to a specific metadata file
+ *
+ * @param r
+ * Ring descriptor to be added
+ * @param md_name
+ * Name of metadata file for the ring to be added to
+ *
+ * @return
+ * - On success, zero
+ * - On failure, a negative value
+ */
+int rte_ivshmem_metadata_add_ring(const struct rte_ring * r,
+ const char * md_name);
+
+/**
+ * Adds a mempool to a specific metadata file
+ *
+ * @param mp
+ * Mempool to be added
+ * @param md_name
+ * Name of metadata file for the mempool to be added to
+ *
+ * @return
+ * - On success, zero
+ * - On failure, a negative value
+ */
+int rte_ivshmem_metadata_add_mempool(const struct rte_mempool * mp,
+ const char * md_name);
+
+
+/**
+ * Generates the QEMU command-line for IVSHMEM device for a given metadata file.
+ * This function is to be called after all the objects were added.
+ *
+ * @param buffer
+ * Buffer to be filled with the command line arguments.
+ * @param size
+ * Size of the buffer.
+ * @param name
+ * Name of metadata file to generate QEMU command-line parameters for
+ *
+ * @return
+ * - On success, zero
+ * - On failure, a negative value
+ */
+int rte_ivshmem_metadata_cmdline_generate(char *buffer, unsigned size,
+ const char *name);
+
+
+/**
+ * Dump all metadata entries from a given metadata file to the console.
+ *
+ * @param f
+ * A pointer to a file for output
+ * @name
+ * Name of the metadata file to be dumped to console.
+ */
+void rte_ivshmem_metadata_dump(FILE *f, const char *name);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_IVSHMEM_H_ */
diff --git a/src/dpdk_lib18/librte_kni/Makefile b/src/dpdk_lib18/librte_kni/Makefile
new file mode 100755
index 00000000..52673040
--- /dev/null
+++ b/src/dpdk_lib18/librte_kni/Makefile
@@ -0,0 +1,49 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_kni.a
+
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3 -fno-strict-aliasing
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_KNI) := rte_kni.c
+
+# install includes
+SYMLINK-$(CONFIG_RTE_LIBRTE_KNI)-include := rte_kni.h
+
+# this lib needs eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_KNI) += lib/librte_eal lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_KNI) += lib/librte_ether
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_kni/rte_kni.c b/src/dpdk_lib18/librte_kni/rte_kni.c
new file mode 100755
index 00000000..fdb75094
--- /dev/null
+++ b/src/dpdk_lib18/librte_kni/rte_kni.c
@@ -0,0 +1,747 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RTE_EXEC_ENV_LINUXAPP
+#error "KNI is not supported"
+#endif
+
+#include <string.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_log.h>
+#include <rte_kni.h>
+#include <rte_memzone.h>
+#include <exec-env/rte_kni_common.h>
+#include "rte_kni_fifo.h"
+
+#define MAX_MBUF_BURST_NUM 32
+
+/* Maximum number of ring entries */
+#define KNI_FIFO_COUNT_MAX 1024
+#define KNI_FIFO_SIZE (KNI_FIFO_COUNT_MAX * sizeof(void *) + \
+ sizeof(struct rte_kni_fifo))
+
+#define KNI_REQUEST_MBUF_NUM_MAX 32
+
+#define KNI_MEM_CHECK(cond) do { if (cond) goto kni_fail; } while (0)
+
+/**
+ * KNI context
+ */
+struct rte_kni {
+ char name[RTE_KNI_NAMESIZE]; /**< KNI interface name */
+ uint16_t group_id; /**< Group ID of KNI devices */
+ uint32_t slot_id; /**< KNI pool slot ID */
+ struct rte_mempool *pktmbuf_pool; /**< pkt mbuf mempool */
+ unsigned mbuf_size; /**< mbuf size */
+
+ struct rte_kni_fifo *tx_q; /**< TX queue */
+ struct rte_kni_fifo *rx_q; /**< RX queue */
+ struct rte_kni_fifo *alloc_q; /**< Allocated mbufs queue */
+ struct rte_kni_fifo *free_q; /**< To be freed mbufs queue */
+
+ /* For request & response */
+ struct rte_kni_fifo *req_q; /**< Request queue */
+ struct rte_kni_fifo *resp_q; /**< Response queue */
+ void * sync_addr; /**< Req/Resp Mem address */
+
+ struct rte_kni_ops ops; /**< operations for request */
+ uint8_t in_use : 1; /**< kni in use */
+};
+
+enum kni_ops_status {
+ KNI_REQ_NO_REGISTER = 0,
+ KNI_REQ_REGISTERED,
+};
+
+/**
+ * KNI memzone pool slot
+ */
+struct rte_kni_memzone_slot {
+ uint32_t id;
+ uint8_t in_use : 1; /**< slot in use */
+
+ /* Memzones */
+ const struct rte_memzone *m_ctx; /**< KNI ctx */
+ const struct rte_memzone *m_tx_q; /**< TX queue */
+ const struct rte_memzone *m_rx_q; /**< RX queue */
+ const struct rte_memzone *m_alloc_q; /**< Allocated mbufs queue */
+ const struct rte_memzone *m_free_q; /**< To be freed mbufs queue */
+ const struct rte_memzone *m_req_q; /**< Request queue */
+ const struct rte_memzone *m_resp_q; /**< Response queue */
+ const struct rte_memzone *m_sync_addr;
+
+ /* Free linked list */
+ struct rte_kni_memzone_slot *next; /**< Next slot link.list */
+};
+
+/**
+ * KNI memzone pool
+ */
+struct rte_kni_memzone_pool {
+ uint8_t initialized : 1; /**< Global KNI pool init flag */
+
+ uint32_t max_ifaces; /**< Max. num of KNI ifaces */
+ struct rte_kni_memzone_slot *slots; /**< Pool slots */
+ rte_spinlock_t mutex; /**< alloc/relase mutex */
+
+ /* Free memzone slots linked-list */
+ struct rte_kni_memzone_slot *free; /**< First empty slot */
+ struct rte_kni_memzone_slot *free_tail; /**< Last empty slot */
+};
+
+
+static void kni_free_mbufs(struct rte_kni *kni);
+static void kni_allocate_mbufs(struct rte_kni *kni);
+
+static volatile int kni_fd = -1;
+static struct rte_kni_memzone_pool kni_memzone_pool = {
+ .initialized = 0,
+};
+
+static const struct rte_memzone *
+kni_memzone_reserve(const char *name, size_t len, int socket_id,
+ unsigned flags)
+{
+ const struct rte_memzone *mz = rte_memzone_lookup(name);
+
+ if (mz == NULL)
+ mz = rte_memzone_reserve(name, len, socket_id, flags);
+
+ return mz;
+}
+
+/* Pool mgmt */
+static struct rte_kni_memzone_slot*
+kni_memzone_pool_alloc(void)
+{
+ struct rte_kni_memzone_slot *slot;
+
+ rte_spinlock_lock(&kni_memzone_pool.mutex);
+
+ if (!kni_memzone_pool.free) {
+ rte_spinlock_unlock(&kni_memzone_pool.mutex);
+ return NULL;
+ }
+
+ slot = kni_memzone_pool.free;
+ kni_memzone_pool.free = slot->next;
+ slot->in_use = 1;
+
+ if (!kni_memzone_pool.free)
+ kni_memzone_pool.free_tail = NULL;
+
+ rte_spinlock_unlock(&kni_memzone_pool.mutex);
+
+ return slot;
+}
+
+static void
+kni_memzone_pool_release(struct rte_kni_memzone_slot *slot)
+{
+ rte_spinlock_lock(&kni_memzone_pool.mutex);
+
+ if (kni_memzone_pool.free)
+ kni_memzone_pool.free_tail->next = slot;
+ else
+ kni_memzone_pool.free = slot;
+
+ kni_memzone_pool.free_tail = slot;
+ slot->next = NULL;
+ slot->in_use = 0;
+
+ rte_spinlock_unlock(&kni_memzone_pool.mutex);
+}
+
+
+/* Shall be called before any allocation happens */
+void
+rte_kni_init(unsigned int max_kni_ifaces)
+{
+ uint32_t i;
+ struct rte_kni_memzone_slot *it;
+ const struct rte_memzone *mz;
+#define OBJNAMSIZ 32
+ char obj_name[OBJNAMSIZ];
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+
+ if (max_kni_ifaces == 0) {
+ RTE_LOG(ERR, KNI, "Invalid number of max_kni_ifaces %d\n",
+ max_kni_ifaces);
+ rte_panic("Unable to initialize KNI\n");
+ }
+
+ /* Check FD and open */
+ if (kni_fd < 0) {
+ kni_fd = open("/dev/" KNI_DEVICE, O_RDWR);
+ if (kni_fd < 0)
+ rte_panic("Can not open /dev/%s\n", KNI_DEVICE);
+ }
+
+ /* Allocate slot objects */
+ kni_memzone_pool.slots = (struct rte_kni_memzone_slot *)
+ rte_malloc(NULL,
+ sizeof(struct rte_kni_memzone_slot) *
+ max_kni_ifaces,
+ 0);
+ KNI_MEM_CHECK(kni_memzone_pool.slots == NULL);
+
+ /* Initialize general pool variables */
+ kni_memzone_pool.initialized = 1;
+ kni_memzone_pool.max_ifaces = max_kni_ifaces;
+ kni_memzone_pool.free = &kni_memzone_pool.slots[0];
+ rte_spinlock_init(&kni_memzone_pool.mutex);
+
+ /* Pre-allocate all memzones of all the slots; panic on error */
+ for (i = 0; i < max_kni_ifaces; i++) {
+
+ /* Recover current slot */
+ it = &kni_memzone_pool.slots[i];
+ it->id = i;
+
+ /* Allocate KNI context */
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "KNI_INFO_%d", i);
+ mz = kni_memzone_reserve(mz_name, sizeof(struct rte_kni),
+ SOCKET_ID_ANY, 0);
+ KNI_MEM_CHECK(mz == NULL);
+ it->m_ctx = mz;
+
+ /* TX RING */
+ snprintf(obj_name, OBJNAMSIZ, "kni_tx_%d", i);
+ mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
+ SOCKET_ID_ANY, 0);
+ KNI_MEM_CHECK(mz == NULL);
+ it->m_tx_q = mz;
+
+ /* RX RING */
+ snprintf(obj_name, OBJNAMSIZ, "kni_rx_%d", i);
+ mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
+ SOCKET_ID_ANY, 0);
+ KNI_MEM_CHECK(mz == NULL);
+ it->m_rx_q = mz;
+
+ /* ALLOC RING */
+ snprintf(obj_name, OBJNAMSIZ, "kni_alloc_%d", i);
+ mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
+ SOCKET_ID_ANY, 0);
+ KNI_MEM_CHECK(mz == NULL);
+ it->m_alloc_q = mz;
+
+ /* FREE RING */
+ snprintf(obj_name, OBJNAMSIZ, "kni_free_%d", i);
+ mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
+ SOCKET_ID_ANY, 0);
+ KNI_MEM_CHECK(mz == NULL);
+ it->m_free_q = mz;
+
+ /* Request RING */
+ snprintf(obj_name, OBJNAMSIZ, "kni_req_%d", i);
+ mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
+ SOCKET_ID_ANY, 0);
+ KNI_MEM_CHECK(mz == NULL);
+ it->m_req_q = mz;
+
+ /* Response RING */
+ snprintf(obj_name, OBJNAMSIZ, "kni_resp_%d", i);
+ mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
+ SOCKET_ID_ANY, 0);
+ KNI_MEM_CHECK(mz == NULL);
+ it->m_resp_q = mz;
+
+ /* Req/Resp sync mem area */
+ snprintf(obj_name, OBJNAMSIZ, "kni_sync_%d", i);
+ mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
+ SOCKET_ID_ANY, 0);
+ KNI_MEM_CHECK(mz == NULL);
+ it->m_sync_addr = mz;
+
+ if ((i+1) == max_kni_ifaces) {
+ it->next = NULL;
+ kni_memzone_pool.free_tail = it;
+ } else
+ it->next = &kni_memzone_pool.slots[i+1];
+ }
+
+ return;
+
+kni_fail:
+ rte_panic("Unable to allocate memory for max_kni_ifaces:%d. Increase the amount of hugepages memory\n",
+ max_kni_ifaces);
+}
+
+/* It is deprecated and just for backward compatibility */
+struct rte_kni *
+rte_kni_create(uint8_t port_id,
+ unsigned mbuf_size,
+ struct rte_mempool *pktmbuf_pool,
+ struct rte_kni_ops *ops)
+{
+ struct rte_kni_conf conf;
+ struct rte_eth_dev_info info;
+
+ memset(&info, 0, sizeof(info));
+ memset(&conf, 0, sizeof(conf));
+ rte_eth_dev_info_get(port_id, &info);
+
+ snprintf(conf.name, sizeof(conf.name), "vEth%u", port_id);
+ conf.addr = info.pci_dev->addr;
+ conf.id = info.pci_dev->id;
+ conf.group_id = (uint16_t)port_id;
+ conf.mbuf_size = mbuf_size;
+
+ /* Save the port id for request handling */
+ ops->port_id = port_id;
+
+ return rte_kni_alloc(pktmbuf_pool, &conf, ops);
+}
+
+struct rte_kni *
+rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
+ const struct rte_kni_conf *conf,
+ struct rte_kni_ops *ops)
+{
+ int ret;
+ struct rte_kni_device_info dev_info;
+ struct rte_kni *ctx;
+ char intf_name[RTE_KNI_NAMESIZE];
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+ struct rte_kni_memzone_slot *slot = NULL;
+
+ if (!pktmbuf_pool || !conf || !conf->name[0])
+ return NULL;
+
+ /* Check if KNI subsystem has been initialized */
+ if (kni_memzone_pool.initialized != 1) {
+ RTE_LOG(ERR, KNI, "KNI subsystem has not been initialized. Invoke rte_kni_init() first\n");
+ return NULL;
+ }
+
+ /* Get an available slot from the pool */
+ slot = kni_memzone_pool_alloc();
+ if (!slot) {
+ RTE_LOG(ERR, KNI, "Cannot allocate more KNI interfaces; increase the number of max_kni_ifaces(current %d) or release unusued ones.\n",
+ kni_memzone_pool.max_ifaces);
+ return NULL;
+ }
+
+ /* Recover ctx */
+ ctx = slot->m_ctx->addr;
+ snprintf(intf_name, RTE_KNI_NAMESIZE, "%s", conf->name);
+
+ if (ctx->in_use) {
+ RTE_LOG(ERR, KNI, "KNI %s is in use\n", ctx->name);
+ return NULL;
+ }
+ memset(ctx, 0, sizeof(struct rte_kni));
+ if (ops)
+ memcpy(&ctx->ops, ops, sizeof(struct rte_kni_ops));
+
+ memset(&dev_info, 0, sizeof(dev_info));
+ dev_info.bus = conf->addr.bus;
+ dev_info.devid = conf->addr.devid;
+ dev_info.function = conf->addr.function;
+ dev_info.vendor_id = conf->id.vendor_id;
+ dev_info.device_id = conf->id.device_id;
+ dev_info.core_id = conf->core_id;
+ dev_info.force_bind = conf->force_bind;
+ dev_info.group_id = conf->group_id;
+ dev_info.mbuf_size = conf->mbuf_size;
+
+ snprintf(ctx->name, RTE_KNI_NAMESIZE, "%s", intf_name);
+ snprintf(dev_info.name, RTE_KNI_NAMESIZE, "%s", intf_name);
+
+ RTE_LOG(INFO, KNI, "pci: %02x:%02x:%02x \t %02x:%02x\n",
+ dev_info.bus, dev_info.devid, dev_info.function,
+ dev_info.vendor_id, dev_info.device_id);
+ /* TX RING */
+ mz = slot->m_tx_q;
+ ctx->tx_q = mz->addr;
+ kni_fifo_init(ctx->tx_q, KNI_FIFO_COUNT_MAX);
+ dev_info.tx_phys = mz->phys_addr;
+
+ /* RX RING */
+ mz = slot->m_rx_q;
+ ctx->rx_q = mz->addr;
+ kni_fifo_init(ctx->rx_q, KNI_FIFO_COUNT_MAX);
+ dev_info.rx_phys = mz->phys_addr;
+
+ /* ALLOC RING */
+ mz = slot->m_alloc_q;
+ ctx->alloc_q = mz->addr;
+ kni_fifo_init(ctx->alloc_q, KNI_FIFO_COUNT_MAX);
+ dev_info.alloc_phys = mz->phys_addr;
+
+ /* FREE RING */
+ mz = slot->m_free_q;
+ ctx->free_q = mz->addr;
+ kni_fifo_init(ctx->free_q, KNI_FIFO_COUNT_MAX);
+ dev_info.free_phys = mz->phys_addr;
+
+ /* Request RING */
+ mz = slot->m_req_q;
+ ctx->req_q = mz->addr;
+ kni_fifo_init(ctx->req_q, KNI_FIFO_COUNT_MAX);
+ dev_info.req_phys = mz->phys_addr;
+
+ /* Response RING */
+ mz = slot->m_resp_q;
+ ctx->resp_q = mz->addr;
+ kni_fifo_init(ctx->resp_q, KNI_FIFO_COUNT_MAX);
+ dev_info.resp_phys = mz->phys_addr;
+
+ /* Req/Resp sync mem area */
+ mz = slot->m_sync_addr;
+ ctx->sync_addr = mz->addr;
+ dev_info.sync_va = mz->addr;
+ dev_info.sync_phys = mz->phys_addr;
+
+
+ /* MBUF mempool */
+ snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_OBJ_NAME,
+ pktmbuf_pool->name);
+ mz = rte_memzone_lookup(mz_name);
+ KNI_MEM_CHECK(mz == NULL);
+ dev_info.mbuf_va = mz->addr;
+ dev_info.mbuf_phys = mz->phys_addr;
+ ctx->pktmbuf_pool = pktmbuf_pool;
+ ctx->group_id = conf->group_id;
+ ctx->slot_id = slot->id;
+ ctx->mbuf_size = conf->mbuf_size;
+
+ ret = ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info);
+ KNI_MEM_CHECK(ret < 0);
+
+ ctx->in_use = 1;
+
+ return ctx;
+
+kni_fail:
+ if (slot)
+ kni_memzone_pool_release(&kni_memzone_pool.slots[slot->id]);
+
+ return NULL;
+}
+
+static void
+kni_free_fifo(struct rte_kni_fifo *fifo)
+{
+ int ret;
+ struct rte_mbuf *pkt;
+
+ do {
+ ret = kni_fifo_get(fifo, (void **)&pkt, 1);
+ if (ret)
+ rte_pktmbuf_free(pkt);
+ } while (ret);
+}
+
+int
+rte_kni_release(struct rte_kni *kni)
+{
+ struct rte_kni_device_info dev_info;
+ uint32_t slot_id;
+
+ if (!kni || !kni->in_use)
+ return -1;
+
+ snprintf(dev_info.name, sizeof(dev_info.name), "%s", kni->name);
+ if (ioctl(kni_fd, RTE_KNI_IOCTL_RELEASE, &dev_info) < 0) {
+ RTE_LOG(ERR, KNI, "Fail to release kni device\n");
+ return -1;
+ }
+
+ /* mbufs in all fifo should be released, except request/response */
+ kni_free_fifo(kni->tx_q);
+ kni_free_fifo(kni->rx_q);
+ kni_free_fifo(kni->alloc_q);
+ kni_free_fifo(kni->free_q);
+
+ slot_id = kni->slot_id;
+
+ /* Memset the KNI struct */
+ memset(kni, 0, sizeof(struct rte_kni));
+
+ /* Release memzone */
+ if (slot_id > kni_memzone_pool.max_ifaces) {
+ rte_panic("KNI pool: corrupted slot ID: %d, max: %d\n",
+ slot_id, kni_memzone_pool.max_ifaces);
+ }
+ kni_memzone_pool_release(&kni_memzone_pool.slots[slot_id]);
+
+ return 0;
+}
+
+int
+rte_kni_handle_request(struct rte_kni *kni)
+{
+ unsigned ret;
+ struct rte_kni_request *req;
+
+ if (kni == NULL)
+ return -1;
+
+ /* Get request mbuf */
+ ret = kni_fifo_get(kni->req_q, (void **)&req, 1);
+ if (ret != 1)
+ return 0; /* It is OK of can not getting the request mbuf */
+
+ if (req != kni->sync_addr) {
+ rte_panic("Wrong req pointer %p\n", req);
+ }
+
+ /* Analyze the request and call the relevant actions for it */
+ switch (req->req_id) {
+ case RTE_KNI_REQ_CHANGE_MTU: /* Change MTU */
+ if (kni->ops.change_mtu)
+ req->result = kni->ops.change_mtu(kni->ops.port_id,
+ req->new_mtu);
+ break;
+ case RTE_KNI_REQ_CFG_NETWORK_IF: /* Set network interface up/down */
+ if (kni->ops.config_network_if)
+ req->result = kni->ops.config_network_if(\
+ kni->ops.port_id, req->if_up);
+ break;
+ default:
+ RTE_LOG(ERR, KNI, "Unknown request id %u\n", req->req_id);
+ req->result = -EINVAL;
+ break;
+ }
+
+ /* Construct response mbuf and put it back to resp_q */
+ ret = kni_fifo_put(kni->resp_q, (void **)&req, 1);
+ if (ret != 1) {
+ RTE_LOG(ERR, KNI, "Fail to put the muf back to resp_q\n");
+ return -1; /* It is an error of can't putting the mbuf back */
+ }
+
+ return 0;
+}
+
+unsigned
+rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num)
+{
+ unsigned ret = kni_fifo_put(kni->rx_q, (void **)mbufs, num);
+
+ /* Get mbufs from free_q and then free them */
+ kni_free_mbufs(kni);
+
+ return ret;
+}
+
+unsigned
+rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num)
+{
+ unsigned ret = kni_fifo_get(kni->tx_q, (void **)mbufs, num);
+
+ /* Allocate mbufs and then put them into alloc_q */
+ kni_allocate_mbufs(kni);
+
+ return ret;
+}
+
+static void
+kni_free_mbufs(struct rte_kni *kni)
+{
+ int i, ret;
+ struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
+
+ ret = kni_fifo_get(kni->free_q, (void **)pkts, MAX_MBUF_BURST_NUM);
+ if (likely(ret > 0)) {
+ for (i = 0; i < ret; i++)
+ rte_pktmbuf_free(pkts[i]);
+ }
+}
+
+static void
+kni_allocate_mbufs(struct rte_kni *kni)
+{
+ int i, ret;
+ struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
+
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pool) !=
+ offsetof(struct rte_kni_mbuf, pool));
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_addr) !=
+ offsetof(struct rte_kni_mbuf, buf_addr));
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, next) !=
+ offsetof(struct rte_kni_mbuf, next));
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
+ offsetof(struct rte_kni_mbuf, data_off));
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_kni_mbuf, data_len));
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_kni_mbuf, pkt_len));
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
+ offsetof(struct rte_kni_mbuf, ol_flags));
+
+ /* Check if pktmbuf pool has been configured */
+ if (kni->pktmbuf_pool == NULL) {
+ RTE_LOG(ERR, KNI, "No valid mempool for allocating mbufs\n");
+ return;
+ }
+
+ for (i = 0; i < MAX_MBUF_BURST_NUM; i++) {
+ pkts[i] = rte_pktmbuf_alloc(kni->pktmbuf_pool);
+ if (unlikely(pkts[i] == NULL)) {
+ /* Out of memory */
+ RTE_LOG(ERR, KNI, "Out of memory\n");
+ break;
+ }
+ }
+
+ /* No pkt mbuf alocated */
+ if (i <= 0)
+ return;
+
+ ret = kni_fifo_put(kni->alloc_q, (void **)pkts, i);
+
+ /* Check if any mbufs not put into alloc_q, and then free them */
+ if (ret >= 0 && ret < i && ret < MAX_MBUF_BURST_NUM) {
+ int j;
+
+ for (j = ret; j < i; j++)
+ rte_pktmbuf_free(pkts[j]);
+ }
+}
+
+/* It is deprecated and just for backward compatibility */
+uint8_t
+rte_kni_get_port_id(struct rte_kni *kni)
+{
+ if (!kni)
+ return ~0x0;
+
+ return kni->ops.port_id;
+}
+
+struct rte_kni *
+rte_kni_get(const char *name)
+{
+ uint32_t i;
+ struct rte_kni_memzone_slot *it;
+ struct rte_kni *kni;
+
+ /* Note: could be improved perf-wise if necessary */
+ for (i = 0; i < kni_memzone_pool.max_ifaces; i++) {
+ it = &kni_memzone_pool.slots[i];
+ if (it->in_use == 0)
+ continue;
+ kni = it->m_ctx->addr;
+ if (strncmp(kni->name, name, RTE_KNI_NAMESIZE) == 0)
+ return kni;
+ }
+
+ return NULL;
+}
+
+/*
+ * It is deprecated and just for backward compatibility.
+ */
+struct rte_kni *
+rte_kni_info_get(uint8_t port_id)
+{
+ char name[RTE_MEMZONE_NAMESIZE];
+
+ if (port_id >= RTE_MAX_ETHPORTS)
+ return NULL;
+
+ snprintf(name, RTE_MEMZONE_NAMESIZE, "vEth%u", port_id);
+
+ return rte_kni_get(name);
+}
+
+static enum kni_ops_status
+kni_check_request_register(struct rte_kni_ops *ops)
+{
+ /* check if KNI request ops has been registered*/
+ if( NULL == ops )
+ return KNI_REQ_NO_REGISTER;
+
+ if((NULL == ops->change_mtu) && (NULL == ops->config_network_if))
+ return KNI_REQ_NO_REGISTER;
+
+ return KNI_REQ_REGISTERED;
+}
+
+int
+rte_kni_register_handlers(struct rte_kni *kni,struct rte_kni_ops *ops)
+{
+ enum kni_ops_status req_status;
+
+ if (NULL == ops) {
+ RTE_LOG(ERR, KNI, "Invalid KNI request operation.\n");
+ return -1;
+ }
+
+ if (NULL == kni) {
+ RTE_LOG(ERR, KNI, "Invalid kni info.\n");
+ return -1;
+ }
+
+ req_status = kni_check_request_register(&kni->ops);
+ if ( KNI_REQ_REGISTERED == req_status) {
+ RTE_LOG(ERR, KNI, "The KNI request operation has already registered.\n");
+ return -1;
+ }
+
+ memcpy(&kni->ops, ops, sizeof(struct rte_kni_ops));
+ return 0;
+}
+
+int
+rte_kni_unregister_handlers(struct rte_kni *kni)
+{
+ if (NULL == kni) {
+ RTE_LOG(ERR, KNI, "Invalid kni info.\n");
+ return -1;
+ }
+
+ kni->ops.change_mtu = NULL;
+ kni->ops.config_network_if = NULL;
+ return 0;
+}
+void
+rte_kni_close(void)
+{
+ if (kni_fd < 0)
+ return;
+
+ close(kni_fd);
+ kni_fd = -1;
+}
diff --git a/src/dpdk_lib18/librte_kni/rte_kni.h b/src/dpdk_lib18/librte_kni/rte_kni.h
new file mode 100755
index 00000000..815b8e2b
--- /dev/null
+++ b/src/dpdk_lib18/librte_kni/rte_kni.h
@@ -0,0 +1,306 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_KNI_H_
+#define _RTE_KNI_H_
+
+/**
+ * @file
+ * RTE KNI
+ *
+ * The KNI library provides the ability to create and destroy kernel NIC
+ * interfaces that may be used by the RTE application to receive/transmit
+ * packets from/to Linux kernel net interfaces.
+ *
+ * This library provide two APIs to burst receive packets from KNI interfaces,
+ * and burst transmit packets to KNI interfaces.
+ */
+
+#include <rte_pci.h>
+#include <rte_mbuf.h>
+
+#include <exec-env/rte_kni_common.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct rte_kni;
+
+/**
+ * Structure which has the function pointers for KNI interface.
+ */
+struct rte_kni_ops {
+ uint8_t port_id; /* Port ID */
+
+ /* Pointer to function of changing MTU */
+ int (*change_mtu)(uint8_t port_id, unsigned new_mtu);
+
+ /* Pointer to function of configuring network interface */
+ int (*config_network_if)(uint8_t port_id, uint8_t if_up);
+};
+
+/**
+ * Structure for configuring KNI device.
+ */
+struct rte_kni_conf {
+ /*
+ * KNI name which will be used in relevant network device.
+ * Let the name as short as possible, as it will be part of
+ * memzone name.
+ */
+ char name[RTE_KNI_NAMESIZE];
+ uint32_t core_id; /* Core ID to bind kernel thread on */
+ uint16_t group_id; /* Group ID */
+ unsigned mbuf_size; /* mbuf size */
+ struct rte_pci_addr addr;
+ struct rte_pci_id id;
+
+ uint8_t force_bind : 1; /* Flag to bind kernel thread */
+};
+
+/**
+ * Initialize and preallocate KNI subsystem
+ *
+ * This function is to be executed on the MASTER lcore only, after EAL
+ * initialization and before any KNI interface is attempted to be
+ * allocated
+ *
+ * @param max_kni_ifaces
+ * The maximum number of KNI interfaces that can coexist concurrently
+ */
+extern void rte_kni_init(unsigned int max_kni_ifaces);
+
+
+/**
+ * Allocate KNI interface according to the port id, mbuf size, mbuf pool,
+ * configurations and callbacks for kernel requests.The KNI interface created
+ * in the kernel space is the net interface the traditional Linux application
+ * talking to.
+ *
+ * The rte_kni_alloc shall not be called before rte_kni_init() has been
+ * called. rte_kni_alloc is thread safe.
+ *
+ * @param pktmbuf_pool
+ * The mempool for allocting mbufs for packets.
+ * @param conf
+ * The pointer to the configurations of the KNI device.
+ * @param ops
+ * The pointer to the callbacks for the KNI kernel requests.
+ *
+ * @return
+ * - The pointer to the context of a KNI interface.
+ * - NULL indicate error.
+ */
+extern struct rte_kni *rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
+ const struct rte_kni_conf *conf,
+ struct rte_kni_ops *ops);
+
+/**
+ * It create a KNI device for specific port.
+ *
+ * Note: It is deprecated and just for backward compatibility.
+ *
+ * @param port_id
+ * Port ID.
+ * @param mbuf_size
+ * mbuf size.
+ * @param pktmbuf_pool
+ * The mempool for allocting mbufs for packets.
+ * @param ops
+ * The pointer to the callbacks for the KNI kernel requests.
+ *
+ * @return
+ * - The pointer to the context of a KNI interface.
+ * - NULL indicate error.
+ */
+extern struct rte_kni *rte_kni_create(uint8_t port_id,
+ unsigned mbuf_size,
+ struct rte_mempool *pktmbuf_pool,
+ struct rte_kni_ops *ops) \
+ __attribute__ ((deprecated));
+
+/**
+ * Release KNI interface according to the context. It will also release the
+ * paired KNI interface in kernel space. All processing on the specific KNI
+ * context need to be stopped before calling this interface.
+ *
+ * rte_kni_release is thread safe.
+ *
+ * @param kni
+ * The pointer to the context of an existent KNI interface.
+ *
+ * @return
+ * - 0 indicates success.
+ * - negative value indicates failure.
+ */
+extern int rte_kni_release(struct rte_kni *kni);
+
+/**
+ * It is used to handle the request mbufs sent from kernel space.
+ * Then analyzes it and calls the specific actions for the specific requests.
+ * Finally constructs the response mbuf and puts it back to the resp_q.
+ *
+ * @param kni
+ * The pointer to the context of an existent KNI interface.
+ *
+ * @return
+ * - 0
+ * - negative value indicates failure.
+ */
+extern int rte_kni_handle_request(struct rte_kni *kni);
+
+/**
+ * Retrieve a burst of packets from a KNI interface. The retrieved packets are
+ * stored in rte_mbuf structures whose pointers are supplied in the array of
+ * mbufs, and the maximum number is indicated by num. It handles the freeing of
+ * the mbufs in the free queue of KNI interface.
+ *
+ * @param kni
+ * The KNI interface context.
+ * @param mbufs
+ * The array to store the pointers of mbufs.
+ * @param num
+ * The maximum number per burst.
+ *
+ * @return
+ * The actual number of packets retrieved.
+ */
+extern unsigned rte_kni_rx_burst(struct rte_kni *kni,
+ struct rte_mbuf **mbufs, unsigned num);
+
+/**
+ * Send a burst of packets to a KNI interface. The packets to be sent out are
+ * stored in rte_mbuf structures whose pointers are supplied in the array of
+ * mbufs, and the maximum number is indicated by num. It handles allocating
+ * the mbufs for KNI interface alloc queue.
+ *
+ * @param kni
+ * The KNI interface context.
+ * @param mbufs
+ * The array to store the pointers of mbufs.
+ * @param num
+ * The maximum number per burst.
+ *
+ * @return
+ * The actual number of packets sent.
+ */
+extern unsigned rte_kni_tx_burst(struct rte_kni *kni,
+ struct rte_mbuf **mbufs, unsigned num);
+
+/**
+ * Get the port id from KNI interface.
+ *
+ * Note: It is deprecated and just for backward compatibility.
+ *
+ * @param kni
+ * The KNI interface context.
+ *
+ * @return
+ * On success: The port id.
+ * On failure: ~0x0
+ */
+extern uint8_t rte_kni_get_port_id(struct rte_kni *kni) \
+ __attribute__ ((deprecated));
+
+/**
+ * Get the KNI context of its name.
+ *
+ * @param name
+ * pointer to the KNI device name.
+ *
+ * @return
+ * On success: Pointer to KNI interface.
+ * On failure: NULL.
+ */
+extern struct rte_kni *rte_kni_get(const char *name);
+
+/**
+ * Get the KNI context of the specific port.
+ *
+ * Note: It is deprecated and just for backward compatibility.
+ *
+ * @param port_id
+ * the port id.
+ *
+ * @return
+ * On success: Pointer to KNI interface.
+ * On failure: NULL
+ */
+extern struct rte_kni *rte_kni_info_get(uint8_t port_id) \
+ __attribute__ ((deprecated));
+
+/**
+ * Register KNI request handling for a specified port,and it can
+ * be called by master process or slave process.
+ *
+ * @param kni
+ * pointer to struct rte_kni.
+ * @param ops
+ * ponter to struct rte_kni_ops.
+ *
+ * @return
+ * On success: 0
+ * On failure: -1
+ */
+extern int rte_kni_register_handlers(struct rte_kni *kni,
+ struct rte_kni_ops *ops);
+
+/**
+ * Unregister KNI request handling for a specified port.
+ *
+ * @param kni
+ * pointer to struct rte_kni.
+ *
+ * @return
+ * On success: 0
+ * On failure: -1
+ */
+extern int rte_kni_unregister_handlers(struct rte_kni *kni);
+
+/**
+ * close KNI device.
+ *
+ * @param void
+ *
+ * @return
+ * void
+ */
+extern void rte_kni_close(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_KNI_H_ */
+
diff --git a/src/dpdk_lib18/librte_kni/rte_kni_fifo.h b/src/dpdk_lib18/librte_kni/rte_kni_fifo.h
new file mode 100755
index 00000000..8cb85873
--- /dev/null
+++ b/src/dpdk_lib18/librte_kni/rte_kni_fifo.h
@@ -0,0 +1,93 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+
+/**
+ * Initializes the kni fifo structure
+ */
+static void
+kni_fifo_init(struct rte_kni_fifo *fifo, unsigned size)
+{
+ /* Ensure size is power of 2 */
+ if (size & (size - 1))
+ rte_panic("KNI fifo size must be power of 2\n");
+
+ fifo->write = 0;
+ fifo->read = 0;
+ fifo->len = size;
+ fifo->elem_size = sizeof(void *);
+}
+
+/**
+ * Adds num elements into the fifo. Return the number actually written
+ */
+static inline unsigned
+kni_fifo_put(struct rte_kni_fifo *fifo, void **data, unsigned num)
+{
+ unsigned i = 0;
+ unsigned fifo_write = fifo->write;
+ unsigned fifo_read = fifo->read;
+ unsigned new_write = fifo_write;
+
+ for (i = 0; i < num; i++) {
+ new_write = (new_write + 1) & (fifo->len - 1);
+
+ if (new_write == fifo_read)
+ break;
+ fifo->buffer[fifo_write] = data[i];
+ fifo_write = new_write;
+ }
+ fifo->write = fifo_write;
+ return i;
+}
+
+/**
+ * Get up to num elements from the fifo. Return the number actully read
+ */
+static inline unsigned
+kni_fifo_get(struct rte_kni_fifo *fifo, void **data, unsigned num)
+{
+ unsigned i = 0;
+ unsigned new_read = fifo->read;
+ unsigned fifo_write = fifo->write;
+ for (i = 0; i < num; i++) {
+ if (new_read == fifo_write)
+ break;
+
+ data[i] = fifo->buffer[new_read];
+ new_read = (new_read + 1) & (fifo->len - 1);
+ }
+ fifo->read = new_read;
+ return i;
+}
diff --git a/src/dpdk_lib18/librte_kvargs/Makefile b/src/dpdk_lib18/librte_kvargs/Makefile
new file mode 100755
index 00000000..b09359a5
--- /dev/null
+++ b/src/dpdk_lib18/librte_kvargs/Makefile
@@ -0,0 +1,51 @@
+# BSD LICENSE
+#
+# Copyright 2014 6WIND S.A.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# - Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# - Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+#
+# - Neither the name of 6WIND S.A. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+# OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_kvargs.a
+
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_KVARGS) := rte_kvargs.c
+
+# install includes
+INCS := rte_kvargs.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_KVARGS)-include := $(INCS)
+
+# this lib needs eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_KVARGS) += lib/librte_eal
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_kvargs/rte_kvargs.c b/src/dpdk_lib18/librte_kvargs/rte_kvargs.c
new file mode 100755
index 00000000..8bc1e461
--- /dev/null
+++ b/src/dpdk_lib18/librte_kvargs/rte_kvargs.c
@@ -0,0 +1,208 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2014 6WIND S.A.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <string.h>
+#include <stdlib.h>
+
+#include <rte_log.h>
+#include <rte_string_fns.h>
+
+#include "rte_kvargs.h"
+
+/*
+ * Receive a string with a list of arguments following the pattern
+ * key=value;key=value;... and insert them into the list.
+ * strtok() is used so the params string will be copied to be modified.
+ */
+static int
+rte_kvargs_tokenize(struct rte_kvargs *kvlist, const char *params)
+{
+ unsigned i;
+ char *str;
+ char *ctx1 = NULL;
+ char *ctx2 = NULL;
+
+ /* Copy the const char *params to a modifiable string
+ * to pass to rte_strsplit
+ */
+ kvlist->str = strdup(params);
+ if (kvlist->str == NULL) {
+ RTE_LOG(ERR, PMD, "Cannot parse arguments: not enough memory\n");
+ return -1;
+ }
+
+ /* browse each key/value pair and add it in kvlist */
+ str = kvlist->str;
+ while ((str = strtok_r(str, RTE_KVARGS_PAIRS_DELIM, &ctx1)) != NULL) {
+
+ i = kvlist->count;
+ if (i >= RTE_KVARGS_MAX) {
+ RTE_LOG(ERR, PMD, "Cannot parse arguments: list full\n");
+ return -1;
+ }
+
+ kvlist->pairs[i].key = strtok_r(str, RTE_KVARGS_KV_DELIM, &ctx2);
+ kvlist->pairs[i].value = strtok_r(NULL, RTE_KVARGS_KV_DELIM, &ctx2);
+ if (kvlist->pairs[i].key == NULL || kvlist->pairs[i].value == NULL) {
+ RTE_LOG(ERR, PMD,
+ "Cannot parse arguments: wrong key or value\n"
+ "params=<%s>\n", params);
+ return -1;
+ }
+
+ kvlist->count++;
+ str = NULL;
+ }
+
+ return 0;
+}
+
+/*
+ * Determine whether a key is valid or not by looking
+ * into a list of valid keys.
+ */
+static int
+is_valid_key(const char *valid[], const char *key_match)
+{
+ const char **valid_ptr;
+
+ for (valid_ptr = valid; *valid_ptr != NULL; valid_ptr++) {
+ if (strcmp(key_match, *valid_ptr) == 0)
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Determine whether all keys are valid or not by looking
+ * into a list of valid keys.
+ */
+static int
+check_for_valid_keys(struct rte_kvargs *kvlist,
+ const char *valid[])
+{
+ unsigned i, ret;
+ struct rte_kvargs_pair *pair;
+
+ for (i = 0; i < kvlist->count; i++) {
+ pair = &kvlist->pairs[i];
+ ret = is_valid_key(valid, pair->key);
+ if (!ret) {
+ RTE_LOG(ERR, PMD,
+ "Error parsing device, invalid key <%s>\n",
+ pair->key);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Return the number of times a given arg_name exists in the key/value list.
+ * E.g. given a list = { rx = 0, rx = 1, tx = 2 } the number of args for
+ * arg "rx" will be 2.
+ */
+unsigned
+rte_kvargs_count(const struct rte_kvargs *kvlist, const char *key_match)
+{
+ const struct rte_kvargs_pair *pair;
+ unsigned i, ret;
+
+ ret = 0;
+ for (i = 0; i < kvlist->count; i++) {
+ pair = &kvlist->pairs[i];
+ if (key_match == NULL || strcmp(pair->key, key_match) == 0)
+ ret++;
+ }
+
+ return ret;
+}
+
+/*
+ * For each matching key, call the given handler function.
+ */
+int
+rte_kvargs_process(const struct rte_kvargs *kvlist,
+ const char *key_match,
+ arg_handler_t handler,
+ void *opaque_arg)
+{
+ const struct rte_kvargs_pair *pair;
+ unsigned i;
+
+ for (i = 0; i < kvlist->count; i++) {
+ pair = &kvlist->pairs[i];
+ if (key_match == NULL || strcmp(pair->key, key_match) == 0) {
+ if ((*handler)(pair->key, pair->value, opaque_arg) < 0)
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/* free the rte_kvargs structure */
+void
+rte_kvargs_free(struct rte_kvargs *kvlist)
+{
+ if (kvlist->str != NULL)
+ free(kvlist->str);
+ free(kvlist);
+}
+
+/*
+ * Parse the arguments "key=value;key=value;..." string and return
+ * an allocated structure that contains a key/value list. Also
+ * check if only valid keys were used.
+ */
+struct rte_kvargs *
+rte_kvargs_parse(const char *args, const char *valid_keys[])
+{
+ struct rte_kvargs *kvlist;
+
+ kvlist = malloc(sizeof(*kvlist));
+ if (kvlist == NULL)
+ return NULL;
+ memset(kvlist, 0, sizeof(*kvlist));
+
+ if (rte_kvargs_tokenize(kvlist, args) < 0) {
+ rte_kvargs_free(kvlist);
+ return NULL;
+ }
+
+ if (valid_keys != NULL && check_for_valid_keys(kvlist, valid_keys) < 0) {
+ rte_kvargs_free(kvlist);
+ return NULL;
+ }
+
+ return kvlist;
+}
diff --git a/src/dpdk_lib18/librte_kvargs/rte_kvargs.h b/src/dpdk_lib18/librte_kvargs/rte_kvargs.h
new file mode 100755
index 00000000..ef4efabf
--- /dev/null
+++ b/src/dpdk_lib18/librte_kvargs/rte_kvargs.h
@@ -0,0 +1,155 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2014 6WIND S.A.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_KVARGS_H_
+#define _RTE_KVARGS_H_
+
+/**
+ * @file
+ * RTE Argument parsing
+ *
+ * This module can be used to parse arguments whose format is
+ * key1=value1,key2=value2,key3=value3,...
+ *
+ * The same key can appear several times with the same or a different
+ * value. Indeed, the arguments are stored as a list of key/values
+ * associations and not as a dictionary.
+ *
+ * This file provides some helpers that are especially used by virtual
+ * ethernet devices at initialization for arguments parsing.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Maximum number of key/value associations */
+#define RTE_KVARGS_MAX 32
+
+/** separator character used between each pair */
+#define RTE_KVARGS_PAIRS_DELIM ","
+
+/** separator character used between key and value */
+#define RTE_KVARGS_KV_DELIM "="
+
+/** Type of callback function used by rte_kvargs_process() */
+typedef int (*arg_handler_t)(const char *key, const char *value, void *opaque);
+
+/** A key/value association */
+struct rte_kvargs_pair {
+ char *key; /**< the name (key) of the association */
+ char *value; /**< the value associated to that key */
+};
+
+/** Store a list of key/value associations */
+struct rte_kvargs {
+ char *str; /**< copy of the argument string */
+ unsigned count; /**< number of entries in the list */
+ struct rte_kvargs_pair pairs[RTE_KVARGS_MAX]; /**< list of key/values */
+};
+
+/**
+ * Allocate a rte_kvargs and store key/value associations from a string
+ *
+ * The function allocates and fills a rte_kvargs structure from a given
+ * string whose format is key1=value1,key2=value2,...
+ *
+ * The structure can be freed with rte_kvargs_free().
+ *
+ * @param args
+ * The input string containing the key/value associations
+ * @param valid_keys
+ * A list of valid keys (table of const char *, the last must be NULL).
+ * This argument is ignored if NULL
+ *
+ * @return
+ * - A pointer to an allocated rte_kvargs structure on success
+ * - NULL on error
+ */
+struct rte_kvargs *rte_kvargs_parse(const char *args, const char *valid_keys[]);
+
+/**
+ * Free a rte_kvargs structure
+ *
+ * Free a rte_kvargs structure previously allocated with
+ * rte_kvargs_parse().
+ *
+ * @param kvlist
+ * The rte_kvargs structure
+ */
+void rte_kvargs_free(struct rte_kvargs *kvlist);
+
+/**
+ * Call a handler function for each key/value matching the key
+ *
+ * For each key/value association that matches the given key, calls the
+ * handler function with the for a given arg_name passing the value on the
+ * dictionary for that key and a given extra argument.
+ *
+ * @param kvlist
+ * The rte_kvargs structure
+ * @param key_match
+ * The key on which the handler should be called, or NULL to process handler
+ * on all associations
+ * @param handler
+ * The function to call for each matching key
+ * @param opaque_arg
+ * A pointer passed unchanged to the handler
+ *
+ * @return
+ * - 0 on success
+ * - Negative on error
+ */
+int rte_kvargs_process(const struct rte_kvargs *kvlist,
+ const char *key_match, arg_handler_t handler, void *opaque_arg);
+
+/**
+ * Count the number of associations matching the given key
+ *
+ * @param kvlist
+ * The rte_kvargs structure
+ * @param key_match
+ * The key that should match, or NULL to count all associations
+
+ * @return
+ * The number of entries
+ */
+unsigned rte_kvargs_count(const struct rte_kvargs *kvlist,
+ const char *key_match);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_lpm/Makefile b/src/dpdk_lib18/librte_lpm/Makefile
new file mode 100755
index 00000000..fa94163f
--- /dev/null
+++ b/src/dpdk_lib18/librte_lpm/Makefile
@@ -0,0 +1,49 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_lpm.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_LPM) := rte_lpm.c rte_lpm6.c
+
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_LPM)-include := rte_lpm.h rte_lpm6.h
+
+# this lib needs eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_LPM) += lib/librte_eal lib/librte_malloc
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_lpm/rte_lpm.c b/src/dpdk_lib18/librte_lpm/rte_lpm.c
new file mode 100755
index 00000000..983e04b1
--- /dev/null
+++ b/src/dpdk_lib18/librte_lpm/rte_lpm.c
@@ -0,0 +1,1017 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <stdint.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_log.h>
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_per_lcore.h>
+#include <rte_string_fns.h>
+#include <rte_errno.h>
+#include <rte_rwlock.h>
+#include <rte_spinlock.h>
+
+#include "rte_lpm.h"
+
+TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
+
+#define MAX_DEPTH_TBL24 24
+
+enum valid_flag {
+ INVALID = 0,
+ VALID
+};
+
+/* Macro to enable/disable run-time checks. */
+#if defined(RTE_LIBRTE_LPM_DEBUG)
+#include <rte_debug.h>
+#define VERIFY_DEPTH(depth) do { \
+ if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH)) \
+ rte_panic("LPM: Invalid depth (%u) at line %d", \
+ (unsigned)(depth), __LINE__); \
+} while (0)
+#else
+#define VERIFY_DEPTH(depth)
+#endif
+
+/*
+ * Converts a given depth value to its corresponding mask value.
+ *
+ * depth (IN) : range = 1 - 32
+ * mask (OUT) : 32bit mask
+ */
+static uint32_t __attribute__((pure))
+depth_to_mask(uint8_t depth)
+{
+ VERIFY_DEPTH(depth);
+
+ /* To calculate a mask start with a 1 on the left hand side and right
+ * shift while populating the left hand side with 1's
+ */
+ return (int)0x80000000 >> (depth - 1);
+}
+
+/*
+ * Converts given depth value to its corresponding range value.
+ */
+static inline uint32_t __attribute__((pure))
+depth_to_range(uint8_t depth)
+{
+ VERIFY_DEPTH(depth);
+
+ /*
+ * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
+ */
+ if (depth <= MAX_DEPTH_TBL24)
+ return 1 << (MAX_DEPTH_TBL24 - depth);
+
+ /* Else if depth is greater than 24 */
+ return (1 << (RTE_LPM_MAX_DEPTH - depth));
+}
+
+/*
+ * Find an existing lpm table and return a pointer to it.
+ */
+struct rte_lpm *
+rte_lpm_find_existing(const char *name)
+{
+ struct rte_lpm *l = NULL;
+ struct rte_tailq_entry *te;
+ struct rte_lpm_list *lpm_list;
+
+ /* check that we have an initialised tail queue */
+ if ((lpm_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM,
+ rte_lpm_list)) == NULL) {
+ rte_errno = E_RTE_NO_TAILQ;
+ return NULL;
+ }
+
+ rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
+ TAILQ_FOREACH(te, lpm_list, next) {
+ l = (struct rte_lpm *) te->data;
+ if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
+ break;
+ }
+ rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ if (te == NULL) {
+ rte_errno = ENOENT;
+ return NULL;
+ }
+
+ return l;
+}
+
+/*
+ * Allocates memory for LPM object
+ */
+struct rte_lpm *
+rte_lpm_create(const char *name, int socket_id, int max_rules,
+ __rte_unused int flags)
+{
+ char mem_name[RTE_LPM_NAMESIZE];
+ struct rte_lpm *lpm = NULL;
+ struct rte_tailq_entry *te;
+ uint32_t mem_size;
+ struct rte_lpm_list *lpm_list;
+
+ /* check that we have an initialised tail queue */
+ if ((lpm_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM,
+ rte_lpm_list)) == NULL) {
+ rte_errno = E_RTE_NO_TAILQ;
+ return NULL;
+ }
+
+ RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl24_entry) != 2);
+ RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl8_entry) != 2);
+
+ /* Check user arguments. */
+ if ((name == NULL) || (socket_id < -1) || (max_rules == 0)){
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
+
+ /* Determine the amount of memory to allocate. */
+ mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
+
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ /* guarantee there's no existing */
+ TAILQ_FOREACH(te, lpm_list, next) {
+ lpm = (struct rte_lpm *) te->data;
+ if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
+ break;
+ }
+ if (te != NULL)
+ goto exit;
+
+ /* allocate tailq entry */
+ te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
+ if (te == NULL) {
+ RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
+ goto exit;
+ }
+
+ /* Allocate memory to store the LPM data structures. */
+ lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (lpm == NULL) {
+ RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
+ rte_free(te);
+ goto exit;
+ }
+
+ /* Save user arguments. */
+ lpm->max_rules = max_rules;
+ snprintf(lpm->name, sizeof(lpm->name), "%s", name);
+
+ te->data = (void *) lpm;
+
+ TAILQ_INSERT_TAIL(lpm_list, te, next);
+
+exit:
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ return lpm;
+}
+
+/*
+ * Deallocates memory for given LPM table.
+ */
+void
+rte_lpm_free(struct rte_lpm *lpm)
+{
+ struct rte_lpm_list *lpm_list;
+ struct rte_tailq_entry *te;
+
+ /* Check user arguments. */
+ if (lpm == NULL)
+ return;
+
+ /* check that we have an initialised tail queue */
+ if ((lpm_list =
+ RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM, rte_lpm_list)) == NULL) {
+ rte_errno = E_RTE_NO_TAILQ;
+ return;
+ }
+
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ /* find our tailq entry */
+ TAILQ_FOREACH(te, lpm_list, next) {
+ if (te->data == (void *) lpm)
+ break;
+ }
+ if (te == NULL) {
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ return;
+ }
+
+ TAILQ_REMOVE(lpm_list, te, next);
+
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ rte_free(lpm);
+ rte_free(te);
+}
+
+/*
+ * Adds a rule to the rule table.
+ *
+ * NOTE: The rule table is split into 32 groups. Each group contains rules that
+ * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
+ * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
+ * to refer to depth 1 because even though the depth range is 1 - 32, depths
+ * are stored in the rule table from 0 - 31.
+ * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
+ */
+static inline int32_t
+rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
+ uint8_t next_hop)
+{
+ uint32_t rule_gindex, rule_index, last_rule;
+ int i;
+
+ VERIFY_DEPTH(depth);
+
+ /* Scan through rule group to see if rule already exists. */
+ if (lpm->rule_info[depth - 1].used_rules > 0) {
+
+ /* rule_gindex stands for rule group index. */
+ rule_gindex = lpm->rule_info[depth - 1].first_rule;
+ /* Initialise rule_index to point to start of rule group. */
+ rule_index = rule_gindex;
+ /* Last rule = Last used rule in this rule group. */
+ last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
+
+ for (; rule_index < last_rule; rule_index++) {
+
+ /* If rule already exists update its next_hop and return. */
+ if (lpm->rules_tbl[rule_index].ip == ip_masked) {
+ lpm->rules_tbl[rule_index].next_hop = next_hop;
+
+ return rule_index;
+ }
+ }
+ } else {
+ /* Calculate the position in which the rule will be stored. */
+ rule_index = 0;
+
+ for (i = depth - 1; i > 0; i--) {
+ if (lpm->rule_info[i - 1].used_rules > 0) {
+ rule_index = lpm->rule_info[i - 1].first_rule + lpm->rule_info[i - 1].used_rules;
+ break;
+ }
+ }
+ if (rule_index == lpm->max_rules)
+ return -ENOSPC;
+
+ lpm->rule_info[depth - 1].first_rule = rule_index;
+ }
+
+ /* Make room for the new rule in the array. */
+ for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
+ if (lpm->rule_info[i - 1].first_rule + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
+ return -ENOSPC;
+
+ if (lpm->rule_info[i - 1].used_rules > 0) {
+ lpm->rules_tbl[lpm->rule_info[i - 1].first_rule + lpm->rule_info[i - 1].used_rules]
+ = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
+ lpm->rule_info[i - 1].first_rule++;
+ }
+ }
+
+ /* Add the new rule. */
+ lpm->rules_tbl[rule_index].ip = ip_masked;
+ lpm->rules_tbl[rule_index].next_hop = next_hop;
+
+ /* Increment the used rules counter for this rule group. */
+ lpm->rule_info[depth - 1].used_rules++;
+
+ return rule_index;
+}
+
+/*
+ * Delete a rule from the rule table.
+ * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
+ */
+static inline void
+rule_delete(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
+{
+ int i;
+
+ VERIFY_DEPTH(depth);
+
+ lpm->rules_tbl[rule_index] = lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
+ + lpm->rule_info[depth - 1].used_rules - 1];
+
+ for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
+ if (lpm->rule_info[i].used_rules > 0) {
+ lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
+ lpm->rules_tbl[lpm->rule_info[i].first_rule + lpm->rule_info[i].used_rules - 1];
+ lpm->rule_info[i].first_rule--;
+ }
+ }
+
+ lpm->rule_info[depth - 1].used_rules--;
+}
+
+/*
+ * Finds a rule in rule table.
+ * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
+ */
+static inline int32_t
+rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
+{
+ uint32_t rule_gindex, last_rule, rule_index;
+
+ VERIFY_DEPTH(depth);
+
+ rule_gindex = lpm->rule_info[depth - 1].first_rule;
+ last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
+
+ /* Scan used rules at given depth to find rule. */
+ for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
+ /* If rule is found return the rule index. */
+ if (lpm->rules_tbl[rule_index].ip == ip_masked)
+ return (rule_index);
+ }
+
+ /* If rule is not found return -E_RTE_NO_TAILQ. */
+ return -E_RTE_NO_TAILQ;
+}
+
+/*
+ * Find, clean and allocate a tbl8.
+ */
+static inline int32_t
+tbl8_alloc(struct rte_lpm_tbl8_entry *tbl8)
+{
+ uint32_t tbl8_gindex; /* tbl8 group index. */
+ struct rte_lpm_tbl8_entry *tbl8_entry;
+
+ /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
+ for (tbl8_gindex = 0; tbl8_gindex < RTE_LPM_TBL8_NUM_GROUPS;
+ tbl8_gindex++) {
+ tbl8_entry = &tbl8[tbl8_gindex *
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
+ /* If a free tbl8 group is found clean it and set as VALID. */
+ if (!tbl8_entry->valid_group) {
+ memset(&tbl8_entry[0], 0,
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
+ sizeof(tbl8_entry[0]));
+
+ tbl8_entry->valid_group = VALID;
+
+ /* Return group index for allocated tbl8 group. */
+ return tbl8_gindex;
+ }
+ }
+
+ /* If there are no tbl8 groups free then return error. */
+ return -ENOSPC;
+}
+
+static inline void
+tbl8_free(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start)
+{
+ /* Set tbl8 group invalid*/
+ tbl8[tbl8_group_start].valid_group = INVALID;
+}
+
+static inline int32_t
+add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+ uint8_t next_hop)
+{
+ uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
+
+ /* Calculate the index into Table24. */
+ tbl24_index = ip >> 8;
+ tbl24_range = depth_to_range(depth);
+
+ for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
+ /*
+ * For invalid OR valid and non-extended tbl 24 entries set
+ * entry.
+ */
+ if (!lpm->tbl24[i].valid || (lpm->tbl24[i].ext_entry == 0 &&
+ lpm->tbl24[i].depth <= depth)) {
+
+ struct rte_lpm_tbl24_entry new_tbl24_entry = {
+ { .next_hop = next_hop, },
+ .valid = VALID,
+ .ext_entry = 0,
+ .depth = depth,
+ };
+
+ /* Setting tbl24 entry in one go to avoid race
+ * conditions */
+ lpm->tbl24[i] = new_tbl24_entry;
+
+ continue;
+ }
+
+ /* If tbl24 entry is valid and extended calculate the index
+ * into tbl8. */
+ tbl8_index = lpm->tbl24[i].tbl8_gindex *
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+ tbl8_group_end = tbl8_index + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+
+ for (j = tbl8_index; j < tbl8_group_end; j++) {
+ if (!lpm->tbl8[j].valid ||
+ lpm->tbl8[j].depth <= depth) {
+ struct rte_lpm_tbl8_entry new_tbl8_entry = {
+ .valid = VALID,
+ .valid_group = VALID,
+ .depth = depth,
+ .next_hop = next_hop,
+ };
+
+ /*
+ * Setting tbl8 entry in one go to avoid race
+ * conditions
+ */
+ lpm->tbl8[j] = new_tbl8_entry;
+
+ continue;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static inline int32_t
+add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
+ uint8_t next_hop)
+{
+ uint32_t tbl24_index;
+ int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
+ tbl8_range, i;
+
+ tbl24_index = (ip_masked >> 8);
+ tbl8_range = depth_to_range(depth);
+
+ if (!lpm->tbl24[tbl24_index].valid) {
+ /* Search for a free tbl8 group. */
+ tbl8_group_index = tbl8_alloc(lpm->tbl8);
+
+ /* Check tbl8 allocation was successful. */
+ if (tbl8_group_index < 0) {
+ return tbl8_group_index;
+ }
+
+ /* Find index into tbl8 and range. */
+ tbl8_index = (tbl8_group_index *
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
+ (ip_masked & 0xFF);
+
+ /* Set tbl8 entry. */
+ for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
+ lpm->tbl8[i].depth = depth;
+ lpm->tbl8[i].next_hop = next_hop;
+ lpm->tbl8[i].valid = VALID;
+ }
+
+ /*
+ * Update tbl24 entry to point to new tbl8 entry. Note: The
+ * ext_flag and tbl8_index need to be updated simultaneously,
+ * so assign whole structure in one go
+ */
+
+ struct rte_lpm_tbl24_entry new_tbl24_entry = {
+ { .tbl8_gindex = (uint8_t)tbl8_group_index, },
+ .valid = VALID,
+ .ext_entry = 1,
+ .depth = 0,
+ };
+
+ lpm->tbl24[tbl24_index] = new_tbl24_entry;
+
+ }/* If valid entry but not extended calculate the index into Table8. */
+ else if (lpm->tbl24[tbl24_index].ext_entry == 0) {
+ /* Search for free tbl8 group. */
+ tbl8_group_index = tbl8_alloc(lpm->tbl8);
+
+ if (tbl8_group_index < 0) {
+ return tbl8_group_index;
+ }
+
+ tbl8_group_start = tbl8_group_index *
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+ tbl8_group_end = tbl8_group_start +
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+
+ /* Populate new tbl8 with tbl24 value. */
+ for (i = tbl8_group_start; i < tbl8_group_end; i++) {
+ lpm->tbl8[i].valid = VALID;
+ lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
+ lpm->tbl8[i].next_hop =
+ lpm->tbl24[tbl24_index].next_hop;
+ }
+
+ tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
+
+ /* Insert new rule into the tbl8 entry. */
+ for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
+ if (!lpm->tbl8[i].valid ||
+ lpm->tbl8[i].depth <= depth) {
+ lpm->tbl8[i].valid = VALID;
+ lpm->tbl8[i].depth = depth;
+ lpm->tbl8[i].next_hop = next_hop;
+
+ continue;
+ }
+ }
+
+ /*
+ * Update tbl24 entry to point to new tbl8 entry. Note: The
+ * ext_flag and tbl8_index need to be updated simultaneously,
+ * so assign whole structure in one go.
+ */
+
+ struct rte_lpm_tbl24_entry new_tbl24_entry = {
+ { .tbl8_gindex = (uint8_t)tbl8_group_index, },
+ .valid = VALID,
+ .ext_entry = 1,
+ .depth = 0,
+ };
+
+ lpm->tbl24[tbl24_index] = new_tbl24_entry;
+
+ }
+ else { /*
+ * If it is valid, extended entry calculate the index into tbl8.
+ */
+ tbl8_group_index = lpm->tbl24[tbl24_index].tbl8_gindex;
+ tbl8_group_start = tbl8_group_index *
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+ tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
+
+ for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
+
+ if (!lpm->tbl8[i].valid ||
+ lpm->tbl8[i].depth <= depth) {
+ struct rte_lpm_tbl8_entry new_tbl8_entry = {
+ .valid = VALID,
+ .depth = depth,
+ .next_hop = next_hop,
+ .valid_group = lpm->tbl8[i].valid_group,
+ };
+
+ /*
+ * Setting tbl8 entry in one go to avoid race
+ * condition
+ */
+ lpm->tbl8[i] = new_tbl8_entry;
+
+ continue;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Add a route
+ */
+int
+rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+ uint8_t next_hop)
+{
+ int32_t rule_index, status = 0;
+ uint32_t ip_masked;
+
+ /* Check user arguments. */
+ if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
+ return -EINVAL;
+
+ ip_masked = ip & depth_to_mask(depth);
+
+ /* Add the rule to the rule table. */
+ rule_index = rule_add(lpm, ip_masked, depth, next_hop);
+
+ /* If the is no space available for new rule return error. */
+ if (rule_index < 0) {
+ return rule_index;
+ }
+
+ if (depth <= MAX_DEPTH_TBL24) {
+ status = add_depth_small(lpm, ip_masked, depth, next_hop);
+ }
+ else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
+ status = add_depth_big(lpm, ip_masked, depth, next_hop);
+
+ /*
+ * If add fails due to exhaustion of tbl8 extensions delete
+ * rule that was added to rule table.
+ */
+ if (status < 0) {
+ rule_delete(lpm, rule_index, depth);
+
+ return status;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Look for a rule in the high-level rules table
+ */
+int
+rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+uint8_t *next_hop)
+{
+ uint32_t ip_masked;
+ int32_t rule_index;
+
+ /* Check user arguments. */
+ if ((lpm == NULL) ||
+ (next_hop == NULL) ||
+ (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
+ return -EINVAL;
+
+ /* Look for the rule using rule_find. */
+ ip_masked = ip & depth_to_mask(depth);
+ rule_index = rule_find(lpm, ip_masked, depth);
+
+ if (rule_index >= 0) {
+ *next_hop = lpm->rules_tbl[rule_index].next_hop;
+ return 1;
+ }
+
+ /* If rule is not found return 0. */
+ return 0;
+}
+
+static inline int32_t
+find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t *sub_rule_depth)
+{
+ int32_t rule_index;
+ uint32_t ip_masked;
+ uint8_t prev_depth;
+
+ for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
+ ip_masked = ip & depth_to_mask(prev_depth);
+
+ rule_index = rule_find(lpm, ip_masked, prev_depth);
+
+ if (rule_index >= 0) {
+ *sub_rule_depth = prev_depth;
+ return rule_index;
+ }
+ }
+
+ return -1;
+}
+
+static inline int32_t
+delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
+ uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
+{
+ uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
+
+ /* Calculate the range and index into Table24. */
+ tbl24_range = depth_to_range(depth);
+ tbl24_index = (ip_masked >> 8);
+
+ /*
+ * Firstly check the sub_rule_index. A -1 indicates no replacement rule
+ * and a positive number indicates a sub_rule_index.
+ */
+ if (sub_rule_index < 0) {
+ /*
+ * If no replacement rule exists then invalidate entries
+ * associated with this rule.
+ */
+ for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
+
+ if (lpm->tbl24[i].ext_entry == 0 &&
+ lpm->tbl24[i].depth <= depth ) {
+ lpm->tbl24[i].valid = INVALID;
+ }
+ else {
+ /*
+ * If TBL24 entry is extended, then there has
+ * to be a rule with depth >= 25 in the
+ * associated TBL8 group.
+ */
+
+ tbl8_group_index = lpm->tbl24[i].tbl8_gindex;
+ tbl8_index = tbl8_group_index *
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+
+ for (j = tbl8_index; j < (tbl8_index +
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
+
+ if (lpm->tbl8[j].depth <= depth)
+ lpm->tbl8[j].valid = INVALID;
+ }
+ }
+ }
+ }
+ else {
+ /*
+ * If a replacement rule exists then modify entries
+ * associated with this rule.
+ */
+
+ struct rte_lpm_tbl24_entry new_tbl24_entry = {
+ {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,},
+ .valid = VALID,
+ .ext_entry = 0,
+ .depth = sub_rule_depth,
+ };
+
+ struct rte_lpm_tbl8_entry new_tbl8_entry = {
+ .valid = VALID,
+ .depth = sub_rule_depth,
+ .next_hop = lpm->rules_tbl
+ [sub_rule_index].next_hop,
+ };
+
+ for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
+
+ if (lpm->tbl24[i].ext_entry == 0 &&
+ lpm->tbl24[i].depth <= depth ) {
+ lpm->tbl24[i] = new_tbl24_entry;
+ }
+ else {
+ /*
+ * If TBL24 entry is extended, then there has
+ * to be a rule with depth >= 25 in the
+ * associated TBL8 group.
+ */
+
+ tbl8_group_index = lpm->tbl24[i].tbl8_gindex;
+ tbl8_index = tbl8_group_index *
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+
+ for (j = tbl8_index; j < (tbl8_index +
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
+
+ if (lpm->tbl8[j].depth <= depth)
+ lpm->tbl8[j] = new_tbl8_entry;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Checks if table 8 group can be recycled.
+ *
+ * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
+ * Return of -EINVAL means tbl8 is empty and thus can be recycled
+ * Return of value > -1 means tbl8 is in use but has all the same values and
+ * thus can be recycled
+ */
+static inline int32_t
+tbl8_recycle_check(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start)
+{
+ uint32_t tbl8_group_end, i;
+ tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+
+ /*
+ * Check the first entry of the given tbl8. If it is invalid we know
+ * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
+ * (As they would affect all entries in a tbl8) and thus this table
+ * can not be recycled.
+ */
+ if (tbl8[tbl8_group_start].valid) {
+ /*
+ * If first entry is valid check if the depth is less than 24
+ * and if so check the rest of the entries to verify that they
+ * are all of this depth.
+ */
+ if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) {
+ for (i = (tbl8_group_start + 1); i < tbl8_group_end;
+ i++) {
+
+ if (tbl8[i].depth !=
+ tbl8[tbl8_group_start].depth) {
+
+ return -EEXIST;
+ }
+ }
+ /* If all entries are the same return the tb8 index */
+ return tbl8_group_start;
+ }
+
+ return -EEXIST;
+ }
+ /*
+ * If the first entry is invalid check if the rest of the entries in
+ * the tbl8 are invalid.
+ */
+ for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
+ if (tbl8[i].valid)
+ return -EEXIST;
+ }
+ /* If no valid entries are found then return -EINVAL. */
+ return -EINVAL;
+}
+
+static inline int32_t
+delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
+ uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
+{
+ uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
+ tbl8_range, i;
+ int32_t tbl8_recycle_index;
+
+ /*
+ * Calculate the index into tbl24 and range. Note: All depths larger
+ * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
+ */
+ tbl24_index = ip_masked >> 8;
+
+ /* Calculate the index into tbl8 and range. */
+ tbl8_group_index = lpm->tbl24[tbl24_index].tbl8_gindex;
+ tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+ tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
+ tbl8_range = depth_to_range(depth);
+
+ if (sub_rule_index < 0) {
+ /*
+ * Loop through the range of entries on tbl8 for which the
+ * rule_to_delete must be removed or modified.
+ */
+ for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
+ if (lpm->tbl8[i].depth <= depth)
+ lpm->tbl8[i].valid = INVALID;
+ }
+ }
+ else {
+ /* Set new tbl8 entry. */
+ struct rte_lpm_tbl8_entry new_tbl8_entry = {
+ .valid = VALID,
+ .depth = sub_rule_depth,
+ .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
+ .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
+ };
+
+ /*
+ * Loop through the range of entries on tbl8 for which the
+ * rule_to_delete must be modified.
+ */
+ for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
+ if (lpm->tbl8[i].depth <= depth)
+ lpm->tbl8[i] = new_tbl8_entry;
+ }
+ }
+
+ /*
+ * Check if there are any valid entries in this tbl8 group. If all
+ * tbl8 entries are invalid we can free the tbl8 and invalidate the
+ * associated tbl24 entry.
+ */
+
+ tbl8_recycle_index = tbl8_recycle_check(lpm->tbl8, tbl8_group_start);
+
+ if (tbl8_recycle_index == -EINVAL){
+ /* Set tbl24 before freeing tbl8 to avoid race condition. */
+ lpm->tbl24[tbl24_index].valid = 0;
+ tbl8_free(lpm->tbl8, tbl8_group_start);
+ }
+ else if (tbl8_recycle_index > -1) {
+ /* Update tbl24 entry. */
+ struct rte_lpm_tbl24_entry new_tbl24_entry = {
+ { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, },
+ .valid = VALID,
+ .ext_entry = 0,
+ .depth = lpm->tbl8[tbl8_recycle_index].depth,
+ };
+
+ /* Set tbl24 before freeing tbl8 to avoid race condition. */
+ lpm->tbl24[tbl24_index] = new_tbl24_entry;
+ tbl8_free(lpm->tbl8, tbl8_group_start);
+ }
+
+ return 0;
+}
+
+/*
+ * Deletes a rule
+ */
+int
+rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
+{
+ int32_t rule_to_delete_index, sub_rule_index;
+ uint32_t ip_masked;
+ uint8_t sub_rule_depth;
+ /*
+ * Check input arguments. Note: IP must be a positive integer of 32
+ * bits in length therefore it need not be checked.
+ */
+ if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
+ return -EINVAL;
+ }
+
+ ip_masked = ip & depth_to_mask(depth);
+
+ /*
+ * Find the index of the input rule, that needs to be deleted, in the
+ * rule table.
+ */
+ rule_to_delete_index = rule_find(lpm, ip_masked, depth);
+
+ /*
+ * Check if rule_to_delete_index was found. If no rule was found the
+ * function rule_find returns -E_RTE_NO_TAILQ.
+ */
+ if (rule_to_delete_index < 0)
+ return -E_RTE_NO_TAILQ;
+
+ /* Delete the rule from the rule table. */
+ rule_delete(lpm, rule_to_delete_index, depth);
+
+ /*
+ * Find rule to replace the rule_to_delete. If there is no rule to
+ * replace the rule_to_delete we return -1 and invalidate the table
+ * entries associated with this rule.
+ */
+ sub_rule_depth = 0;
+ sub_rule_index = find_previous_rule(lpm, ip, depth, &sub_rule_depth);
+
+ /*
+ * If the input depth value is less than 25 use function
+ * delete_depth_small otherwise use delete_depth_big.
+ */
+ if (depth <= MAX_DEPTH_TBL24) {
+ return delete_depth_small(lpm, ip_masked, depth,
+ sub_rule_index, sub_rule_depth);
+ }
+ else { /* If depth > MAX_DEPTH_TBL24 */
+ return delete_depth_big(lpm, ip_masked, depth, sub_rule_index, sub_rule_depth);
+ }
+}
+
+/*
+ * Delete all rules from the LPM table.
+ */
+void
+rte_lpm_delete_all(struct rte_lpm *lpm)
+{
+ /* Zero rule information. */
+ memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
+
+ /* Zero tbl24. */
+ memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
+
+ /* Zero tbl8. */
+ memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
+
+ /* Delete all rules form the rules table. */
+ memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
+}
+
diff --git a/src/dpdk_lib18/librte_lpm/rte_lpm.h b/src/dpdk_lib18/librte_lpm/rte_lpm.h
new file mode 100755
index 00000000..62d7736e
--- /dev/null
+++ b/src/dpdk_lib18/librte_lpm/rte_lpm.h
@@ -0,0 +1,472 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_LPM_H_
+#define _RTE_LPM_H_
+
+/**
+ * @file
+ * RTE Longest Prefix Match (LPM)
+ */
+
+#include <errno.h>
+#include <sys/queue.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_common.h>
+#include <rte_common_vect.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Max number of characters in LPM name. */
+#define RTE_LPM_NAMESIZE 32
+
+/** @deprecated Possible location to allocate memory. This was for last
+ * parameter of rte_lpm_create(), but is now redundant. The LPM table is always
+ * allocated in memory using librte_malloc which uses a memzone. */
+#define RTE_LPM_HEAP 0
+
+/** @deprecated Possible location to allocate memory. This was for last
+ * parameter of rte_lpm_create(), but is now redundant. The LPM table is always
+ * allocated in memory using librte_malloc which uses a memzone. */
+#define RTE_LPM_MEMZONE 1
+
+/** Maximum depth value possible for IPv4 LPM. */
+#define RTE_LPM_MAX_DEPTH 32
+
+/** @internal Total number of tbl24 entries. */
+#define RTE_LPM_TBL24_NUM_ENTRIES (1 << 24)
+
+/** @internal Number of entries in a tbl8 group. */
+#define RTE_LPM_TBL8_GROUP_NUM_ENTRIES 256
+
+/** @internal Total number of tbl8 groups in the tbl8. */
+#define RTE_LPM_TBL8_NUM_GROUPS 256
+
+/** @internal Total number of tbl8 entries. */
+#define RTE_LPM_TBL8_NUM_ENTRIES (RTE_LPM_TBL8_NUM_GROUPS * \
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES)
+
+/** @internal Macro to enable/disable run-time checks. */
+#if defined(RTE_LIBRTE_LPM_DEBUG)
+#define RTE_LPM_RETURN_IF_TRUE(cond, retval) do { \
+ if (cond) return (retval); \
+} while (0)
+#else
+#define RTE_LPM_RETURN_IF_TRUE(cond, retval)
+#endif
+
+/** @internal bitmask with valid and ext_entry/valid_group fields set */
+#define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x0300
+
+/** Bitmask used to indicate successful lookup */
+#define RTE_LPM_LOOKUP_SUCCESS 0x0100
+
+/** @internal Tbl24 entry structure. */
+struct rte_lpm_tbl24_entry {
+ /* Stores Next hop or group index (i.e. gindex)into tbl8. */
+ union {
+ uint8_t next_hop;
+ uint8_t tbl8_gindex;
+ };
+ /* Using single uint8_t to store 3 values. */
+ uint8_t valid :1; /**< Validation flag. */
+ uint8_t ext_entry :1; /**< External entry. */
+ uint8_t depth :6; /**< Rule depth. */
+};
+
+/** @internal Tbl8 entry structure. */
+struct rte_lpm_tbl8_entry {
+ uint8_t next_hop; /**< next hop. */
+ /* Using single uint8_t to store 3 values. */
+ uint8_t valid :1; /**< Validation flag. */
+ uint8_t valid_group :1; /**< Group validation flag. */
+ uint8_t depth :6; /**< Rule depth. */
+};
+
+/** @internal Rule structure. */
+struct rte_lpm_rule {
+ uint32_t ip; /**< Rule IP address. */
+ uint8_t next_hop; /**< Rule next hop. */
+};
+
+/** @internal Contains metadata about the rules table. */
+struct rte_lpm_rule_info {
+ uint32_t used_rules; /**< Used rules so far. */
+ uint32_t first_rule; /**< Indexes the first rule of a given depth. */
+};
+
+/** @internal LPM structure. */
+struct rte_lpm {
+ /* LPM metadata. */
+ char name[RTE_LPM_NAMESIZE]; /**< Name of the lpm. */
+ int mem_location; /**< @deprecated @see RTE_LPM_HEAP and RTE_LPM_MEMZONE. */
+ uint32_t max_rules; /**< Max. balanced rules per lpm. */
+ struct rte_lpm_rule_info rule_info[RTE_LPM_MAX_DEPTH]; /**< Rule info table. */
+
+ /* LPM Tables. */
+ struct rte_lpm_tbl24_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES] \
+ __rte_cache_aligned; /**< LPM tbl24 table. */
+ struct rte_lpm_tbl8_entry tbl8[RTE_LPM_TBL8_NUM_ENTRIES] \
+ __rte_cache_aligned; /**< LPM tbl8 table. */
+ struct rte_lpm_rule rules_tbl[0] \
+ __rte_cache_aligned; /**< LPM rules. */
+};
+
+/**
+ * Create an LPM object.
+ *
+ * @param name
+ * LPM object name
+ * @param socket_id
+ * NUMA socket ID for LPM table memory allocation
+ * @param max_rules
+ * Maximum number of LPM rules that can be added
+ * @param flags
+ * This parameter is currently unused
+ * @return
+ * Handle to LPM object on success, NULL otherwise with rte_errno set
+ * to an appropriate values. Possible rte_errno values include:
+ * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
+ * - E_RTE_SECONDARY - function was called from a secondary process instance
+ * - E_RTE_NO_TAILQ - no tailq list could be got for the lpm object list
+ * - EINVAL - invalid parameter passed to function
+ * - ENOSPC - the maximum number of memzones has already been allocated
+ * - EEXIST - a memzone with the same name already exists
+ * - ENOMEM - no appropriate memory area found in which to create memzone
+ */
+struct rte_lpm *
+rte_lpm_create(const char *name, int socket_id, int max_rules, int flags);
+
+/**
+ * Find an existing LPM object and return a pointer to it.
+ *
+ * @param name
+ * Name of the lpm object as passed to rte_lpm_create()
+ * @return
+ * Pointer to lpm object or NULL if object not found with rte_errno
+ * set appropriately. Possible rte_errno values include:
+ * - ENOENT - required entry not available to return.
+ */
+struct rte_lpm *
+rte_lpm_find_existing(const char *name);
+
+/**
+ * Free an LPM object.
+ *
+ * @param lpm
+ * LPM object handle
+ * @return
+ * None
+ */
+void
+rte_lpm_free(struct rte_lpm *lpm);
+
+/**
+ * Add a rule to the LPM table.
+ *
+ * @param lpm
+ * LPM object handle
+ * @param ip
+ * IP of the rule to be added to the LPM table
+ * @param depth
+ * Depth of the rule to be added to the LPM table
+ * @param next_hop
+ * Next hop of the rule to be added to the LPM table
+ * @return
+ * 0 on success, negative value otherwise
+ */
+int
+rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t next_hop);
+
+/**
+ * Check if a rule is present in the LPM table,
+ * and provide its next hop if it is.
+ *
+ * @param lpm
+ * LPM object handle
+ * @param ip
+ * IP of the rule to be searched
+ * @param depth
+ * Depth of the rule to searched
+ * @param next_hop
+ * Next hop of the rule (valid only if it is found)
+ * @return
+ * 1 if the rule exists, 0 if it does not, a negative value on failure
+ */
+int
+rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+uint8_t *next_hop);
+
+/**
+ * Delete a rule from the LPM table.
+ *
+ * @param lpm
+ * LPM object handle
+ * @param ip
+ * IP of the rule to be deleted from the LPM table
+ * @param depth
+ * Depth of the rule to be deleted from the LPM table
+ * @return
+ * 0 on success, negative value otherwise
+ */
+int
+rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth);
+
+/**
+ * Delete all rules from the LPM table.
+ *
+ * @param lpm
+ * LPM object handle
+ */
+void
+rte_lpm_delete_all(struct rte_lpm *lpm);
+
+/**
+ * Lookup an IP into the LPM table.
+ *
+ * @param lpm
+ * LPM object handle
+ * @param ip
+ * IP to be looked up in the LPM table
+ * @param next_hop
+ * Next hop of the most specific rule found for IP (valid on lookup hit only)
+ * @return
+ * -EINVAL for incorrect arguments, -ENOENT on lookup miss, 0 on lookup hit
+ */
+static inline int
+rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint8_t *next_hop)
+{
+ unsigned tbl24_index = (ip >> 8);
+ uint16_t tbl_entry;
+
+ /* DEBUG: Check user input arguments. */
+ RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)), -EINVAL);
+
+ /* Copy tbl24 entry */
+ tbl_entry = *(const uint16_t *)&lpm->tbl24[tbl24_index];
+
+ /* Copy tbl8 entry (only if needed) */
+ if (unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
+ RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
+
+ unsigned tbl8_index = (uint8_t)ip +
+ ((uint8_t)tbl_entry * RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
+
+ tbl_entry = *(const uint16_t *)&lpm->tbl8[tbl8_index];
+ }
+
+ *next_hop = (uint8_t)tbl_entry;
+ return (tbl_entry & RTE_LPM_LOOKUP_SUCCESS) ? 0 : -ENOENT;
+}
+
+/**
+ * Lookup multiple IP addresses in an LPM table. This may be implemented as a
+ * macro, so the address of the function should not be used.
+ *
+ * @param lpm
+ * LPM object handle
+ * @param ips
+ * Array of IPs to be looked up in the LPM table
+ * @param next_hops
+ * Next hop of the most specific rule found for IP (valid on lookup hit only).
+ * This is an array of two byte values. The most significant byte in each
+ * value says whether the lookup was successful (bitmask
+ * RTE_LPM_LOOKUP_SUCCESS is set). The least significant byte is the
+ * actual next hop.
+ * @param n
+ * Number of elements in ips (and next_hops) array to lookup. This should be a
+ * compile time constant, and divisible by 8 for best performance.
+ * @return
+ * -EINVAL for incorrect arguments, otherwise 0
+ */
+#define rte_lpm_lookup_bulk(lpm, ips, next_hops, n) \
+ rte_lpm_lookup_bulk_func(lpm, ips, next_hops, n)
+
+static inline int
+rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t * ips,
+ uint16_t * next_hops, const unsigned n)
+{
+ unsigned i;
+ unsigned tbl24_indexes[n];
+
+ /* DEBUG: Check user input arguments. */
+ RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (ips == NULL) ||
+ (next_hops == NULL)), -EINVAL);
+
+ for (i = 0; i < n; i++) {
+ tbl24_indexes[i] = ips[i] >> 8;
+ }
+
+ for (i = 0; i < n; i++) {
+ /* Simply copy tbl24 entry to output */
+ next_hops[i] = *(const uint16_t *)&lpm->tbl24[tbl24_indexes[i]];
+
+ /* Overwrite output with tbl8 entry if needed */
+ if (unlikely((next_hops[i] & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
+ RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
+
+ unsigned tbl8_index = (uint8_t)ips[i] +
+ ((uint8_t)next_hops[i] *
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
+
+ next_hops[i] = *(const uint16_t *)&lpm->tbl8[tbl8_index];
+ }
+ }
+ return 0;
+}
+
+/* Mask four results. */
+#define RTE_LPM_MASKX4_RES UINT64_C(0x00ff00ff00ff00ff)
+
+/**
+ * Lookup four IP addresses in an LPM table.
+ *
+ * @param lpm
+ * LPM object handle
+ * @param ip
+ * Four IPs to be looked up in the LPM table
+ * @param hop
+ * Next hop of the most specific rule found for IP (valid on lookup hit only).
+ * This is an 4 elements array of two byte values.
+ * If the lookup was succesfull for the given IP, then least significant byte
+ * of the corresponding element is the actual next hop and the most
+ * significant byte is zero.
+ * If the lookup for the given IP failed, then corresponding element would
+ * contain default value, see description of then next parameter.
+ * @param defv
+ * Default value to populate into corresponding element of hop[] array,
+ * if lookup would fail.
+ */
+static inline void
+rte_lpm_lookupx4(const struct rte_lpm *lpm, __m128i ip, uint16_t hop[4],
+ uint16_t defv)
+{
+ __m128i i24;
+ rte_xmm_t i8;
+ uint16_t tbl[4];
+ uint64_t idx, pt;
+
+ const __m128i mask8 =
+ _mm_set_epi32(UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX);
+
+ /*
+ * RTE_LPM_VALID_EXT_ENTRY_BITMASK for 4 LPM entries
+ * as one 64-bit value (0x0300030003000300).
+ */
+ const uint64_t mask_xv =
+ ((uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK |
+ (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 16 |
+ (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 32 |
+ (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 48);
+
+ /*
+ * RTE_LPM_LOOKUP_SUCCESS for 4 LPM entries
+ * as one 64-bit value (0x0100010001000100).
+ */
+ const uint64_t mask_v =
+ ((uint64_t)RTE_LPM_LOOKUP_SUCCESS |
+ (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 16 |
+ (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 32 |
+ (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 48);
+
+ /* get 4 indexes for tbl24[]. */
+ i24 = _mm_srli_epi32(ip, CHAR_BIT);
+
+ /* extract values from tbl24[] */
+ idx = _mm_cvtsi128_si64(i24);
+ i24 = _mm_srli_si128(i24, sizeof(uint64_t));
+
+ tbl[0] = *(const uint16_t *)&lpm->tbl24[(uint32_t)idx];
+ tbl[1] = *(const uint16_t *)&lpm->tbl24[idx >> 32];
+
+ idx = _mm_cvtsi128_si64(i24);
+
+ tbl[2] = *(const uint16_t *)&lpm->tbl24[(uint32_t)idx];
+ tbl[3] = *(const uint16_t *)&lpm->tbl24[idx >> 32];
+
+ /* get 4 indexes for tbl8[]. */
+ i8.m = _mm_and_si128(ip, mask8);
+
+ pt = (uint64_t)tbl[0] |
+ (uint64_t)tbl[1] << 16 |
+ (uint64_t)tbl[2] << 32 |
+ (uint64_t)tbl[3] << 48;
+
+ /* search successfully finished for all 4 IP addresses. */
+ if (likely((pt & mask_xv) == mask_v)) {
+ uintptr_t ph = (uintptr_t)hop;
+ *(uint64_t *)ph = pt & RTE_LPM_MASKX4_RES;
+ return;
+ }
+
+ if (unlikely((pt & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
+ RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
+ i8.u32[0] = i8.u32[0] +
+ (uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+ tbl[0] = *(const uint16_t *)&lpm->tbl8[i8.u32[0]];
+ }
+ if (unlikely((pt >> 16 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
+ RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
+ i8.u32[1] = i8.u32[1] +
+ (uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+ tbl[1] = *(const uint16_t *)&lpm->tbl8[i8.u32[1]];
+ }
+ if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
+ RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
+ i8.u32[2] = i8.u32[2] +
+ (uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+ tbl[2] = *(const uint16_t *)&lpm->tbl8[i8.u32[2]];
+ }
+ if (unlikely((pt >> 48 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
+ RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
+ i8.u32[3] = i8.u32[3] +
+ (uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+ tbl[3] = *(const uint16_t *)&lpm->tbl8[i8.u32[3]];
+ }
+
+ hop[0] = (tbl[0] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[0] : defv;
+ hop[1] = (tbl[1] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[1] : defv;
+ hop[2] = (tbl[2] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[2] : defv;
+ hop[3] = (tbl[3] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[3] : defv;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_LPM_H_ */
diff --git a/src/dpdk_lib18/librte_lpm/rte_lpm6.c b/src/dpdk_lib18/librte_lpm/rte_lpm6.c
new file mode 100755
index 00000000..42e6d800
--- /dev/null
+++ b/src/dpdk_lib18/librte_lpm/rte_lpm6.c
@@ -0,0 +1,892 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <string.h>
+#include <stdint.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_log.h>
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_memcpy.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_per_lcore.h>
+#include <rte_string_fns.h>
+#include <rte_errno.h>
+#include <rte_rwlock.h>
+#include <rte_spinlock.h>
+
+#include "rte_lpm6.h"
+
+#define RTE_LPM6_TBL24_NUM_ENTRIES (1 << 24)
+#define RTE_LPM6_TBL8_GROUP_NUM_ENTRIES 256
+#define RTE_LPM6_TBL8_MAX_NUM_GROUPS (1 << 21)
+
+#define RTE_LPM6_VALID_EXT_ENTRY_BITMASK 0xA0000000
+#define RTE_LPM6_LOOKUP_SUCCESS 0x20000000
+#define RTE_LPM6_TBL8_BITMASK 0x001FFFFF
+
+#define ADD_FIRST_BYTE 3
+#define LOOKUP_FIRST_BYTE 4
+#define BYTE_SIZE 8
+#define BYTES2_SIZE 16
+
+#define lpm6_tbl8_gindex next_hop
+
+/** Flags for setting an entry as valid/invalid. */
+enum valid_flag {
+ INVALID = 0,
+ VALID
+};
+
+TAILQ_HEAD(rte_lpm6_list, rte_tailq_entry);
+
+/** Tbl entry structure. It is the same for both tbl24 and tbl8 */
+struct rte_lpm6_tbl_entry {
+ uint32_t next_hop: 21; /**< Next hop / next table to be checked. */
+ uint32_t depth :8; /**< Rule depth. */
+
+ /* Flags. */
+ uint32_t valid :1; /**< Validation flag. */
+ uint32_t valid_group :1; /**< Group validation flag. */
+ uint32_t ext_entry :1; /**< External entry. */
+};
+
+/** Rules tbl entry structure. */
+struct rte_lpm6_rule {
+ uint8_t ip[RTE_LPM6_IPV6_ADDR_SIZE]; /**< Rule IP address. */
+ uint8_t next_hop; /**< Rule next hop. */
+ uint8_t depth; /**< Rule depth. */
+};
+
+/** LPM6 structure. */
+struct rte_lpm6 {
+ /* LPM metadata. */
+ char name[RTE_LPM6_NAMESIZE]; /**< Name of the lpm. */
+ uint32_t max_rules; /**< Max number of rules. */
+ uint32_t used_rules; /**< Used rules so far. */
+ uint32_t number_tbl8s; /**< Number of tbl8s to allocate. */
+ uint32_t next_tbl8; /**< Next tbl8 to be used. */
+
+ /* LPM Tables. */
+ struct rte_lpm6_rule *rules_tbl; /**< LPM rules. */
+ struct rte_lpm6_tbl_entry tbl24[RTE_LPM6_TBL24_NUM_ENTRIES]
+ __rte_cache_aligned; /**< LPM tbl24 table. */
+ struct rte_lpm6_tbl_entry tbl8[0]
+ __rte_cache_aligned; /**< LPM tbl8 table. */
+};
+
+/*
+ * Takes an array of uint8_t (IPv6 address) and masks it using the depth.
+ * It leaves untouched one bit per unit in the depth variable
+ * and set the rest to 0.
+ */
+static inline void
+mask_ip(uint8_t *ip, uint8_t depth)
+{
+ int16_t part_depth, mask;
+ int i;
+
+ part_depth = depth;
+
+ for (i = 0; i < RTE_LPM6_IPV6_ADDR_SIZE; i++) {
+ if (part_depth < BYTE_SIZE && part_depth >= 0) {
+ mask = (uint16_t)(~(UINT8_MAX >> part_depth));
+ ip[i] = (uint8_t)(ip[i] & mask);
+ } else if (part_depth < 0) {
+ ip[i] = 0;
+ }
+ part_depth -= BYTE_SIZE;
+ }
+}
+
+/*
+ * Allocates memory for LPM object
+ */
+struct rte_lpm6 *
+rte_lpm6_create(const char *name, int socket_id,
+ const struct rte_lpm6_config *config)
+{
+ char mem_name[RTE_LPM6_NAMESIZE];
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_tailq_entry *te;
+ uint64_t mem_size, rules_size;
+ struct rte_lpm6_list *lpm_list;
+
+ /* Check that we have an initialised tail queue */
+ if ((lpm_list =
+ RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM6, rte_lpm6_list)) == NULL) {
+ rte_errno = E_RTE_NO_TAILQ;
+ return NULL;
+ }
+
+ RTE_BUILD_BUG_ON(sizeof(struct rte_lpm6_tbl_entry) != sizeof(uint32_t));
+
+ /* Check user arguments. */
+ if ((name == NULL) || (socket_id < -1) || (config == NULL) ||
+ (config->max_rules == 0) ||
+ config->number_tbl8s > RTE_LPM6_TBL8_MAX_NUM_GROUPS) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
+
+ /* Determine the amount of memory to allocate. */
+ mem_size = sizeof(*lpm) + (sizeof(lpm->tbl8[0]) *
+ RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
+ rules_size = sizeof(struct rte_lpm6_rule) * config->max_rules;
+
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ /* Guarantee there's no existing */
+ TAILQ_FOREACH(te, lpm_list, next) {
+ lpm = (struct rte_lpm6 *) te->data;
+ if (strncmp(name, lpm->name, RTE_LPM6_NAMESIZE) == 0)
+ break;
+ }
+ if (te != NULL)
+ goto exit;
+
+ /* allocate tailq entry */
+ te = rte_zmalloc("LPM6_TAILQ_ENTRY", sizeof(*te), 0);
+ if (te == NULL) {
+ RTE_LOG(ERR, LPM, "Failed to allocate tailq entry!\n");
+ goto exit;
+ }
+
+ /* Allocate memory to store the LPM data structures. */
+ lpm = (struct rte_lpm6 *)rte_zmalloc_socket(mem_name, (size_t)mem_size,
+ RTE_CACHE_LINE_SIZE, socket_id);
+
+ if (lpm == NULL) {
+ RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
+ rte_free(te);
+ goto exit;
+ }
+
+ lpm->rules_tbl = (struct rte_lpm6_rule *)rte_zmalloc_socket(NULL,
+ (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
+
+ if (lpm->rules_tbl == NULL) {
+ RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
+ rte_free(lpm);
+ rte_free(te);
+ goto exit;
+ }
+
+ /* Save user arguments. */
+ lpm->max_rules = config->max_rules;
+ lpm->number_tbl8s = config->number_tbl8s;
+ snprintf(lpm->name, sizeof(lpm->name), "%s", name);
+
+ te->data = (void *) lpm;
+
+ TAILQ_INSERT_TAIL(lpm_list, te, next);
+
+exit:
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ return lpm;
+}
+
+/*
+ * Find an existing lpm table and return a pointer to it.
+ */
+struct rte_lpm6 *
+rte_lpm6_find_existing(const char *name)
+{
+ struct rte_lpm6 *l = NULL;
+ struct rte_tailq_entry *te;
+ struct rte_lpm6_list *lpm_list;
+
+ /* Check that we have an initialised tail queue */
+ if ((lpm_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM6,
+ rte_lpm6_list)) == NULL) {
+ rte_errno = E_RTE_NO_TAILQ;
+ return NULL;
+ }
+
+ rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
+ TAILQ_FOREACH(te, lpm_list, next) {
+ l = (struct rte_lpm6 *) te->data;
+ if (strncmp(name, l->name, RTE_LPM6_NAMESIZE) == 0)
+ break;
+ }
+ rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ if (te == NULL) {
+ rte_errno = ENOENT;
+ return NULL;
+ }
+
+ return l;
+}
+
+/*
+ * Deallocates memory for given LPM table.
+ */
+void
+rte_lpm6_free(struct rte_lpm6 *lpm)
+{
+ struct rte_lpm6_list *lpm_list;
+ struct rte_tailq_entry *te;
+
+ /* Check user arguments. */
+ if (lpm == NULL)
+ return;
+
+ /* check that we have an initialised tail queue */
+ if ((lpm_list =
+ RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM, rte_lpm6_list)) == NULL) {
+ rte_errno = E_RTE_NO_TAILQ;
+ return;
+ }
+
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ /* find our tailq entry */
+ TAILQ_FOREACH(te, lpm_list, next) {
+ if (te->data == (void *) lpm)
+ break;
+ }
+ if (te == NULL) {
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ return;
+ }
+
+ TAILQ_REMOVE(lpm_list, te, next);
+
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ rte_free(lpm);
+ rte_free(te);
+}
+
+/*
+ * Checks if a rule already exists in the rules table and updates
+ * the nexthop if so. Otherwise it adds a new rule if enough space is available.
+ */
+static inline int32_t
+rule_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t next_hop, uint8_t depth)
+{
+ uint32_t rule_index;
+
+ /* Scan through rule list to see if rule already exists. */
+ for (rule_index = 0; rule_index < lpm->used_rules; rule_index++) {
+
+ /* If rule already exists update its next_hop and return. */
+ if ((memcmp (lpm->rules_tbl[rule_index].ip, ip,
+ RTE_LPM6_IPV6_ADDR_SIZE) == 0) &&
+ lpm->rules_tbl[rule_index].depth == depth) {
+ lpm->rules_tbl[rule_index].next_hop = next_hop;
+
+ return rule_index;
+ }
+ }
+
+ /*
+ * If rule does not exist check if there is space to add a new rule to
+ * this rule group. If there is no space return error.
+ */
+ if (lpm->used_rules == lpm->max_rules) {
+ return -ENOSPC;
+ }
+
+ /* If there is space for the new rule add it. */
+ rte_memcpy(lpm->rules_tbl[rule_index].ip, ip, RTE_LPM6_IPV6_ADDR_SIZE);
+ lpm->rules_tbl[rule_index].next_hop = next_hop;
+ lpm->rules_tbl[rule_index].depth = depth;
+
+ /* Increment the used rules counter for this rule group. */
+ lpm->used_rules++;
+
+ return rule_index;
+}
+
+/*
+ * Function that expands a rule across the data structure when a less-generic
+ * one has been added before. It assures that every possible combination of bits
+ * in the IP address returns a match.
+ */
+static void
+expand_rule(struct rte_lpm6 *lpm, uint32_t tbl8_gindex, uint8_t depth,
+ uint8_t next_hop)
+{
+ uint32_t tbl8_group_end, tbl8_gindex_next, j;
+
+ tbl8_group_end = tbl8_gindex + RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
+
+ struct rte_lpm6_tbl_entry new_tbl8_entry = {
+ .valid = VALID,
+ .valid_group = VALID,
+ .depth = depth,
+ .next_hop = next_hop,
+ .ext_entry = 0,
+ };
+
+ for (j = tbl8_gindex; j < tbl8_group_end; j++) {
+ if (!lpm->tbl8[j].valid || (lpm->tbl8[j].ext_entry == 0
+ && lpm->tbl8[j].depth <= depth)) {
+
+ lpm->tbl8[j] = new_tbl8_entry;
+
+ } else if (lpm->tbl8[j].ext_entry == 1) {
+
+ tbl8_gindex_next = lpm->tbl8[j].lpm6_tbl8_gindex
+ * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
+ expand_rule(lpm, tbl8_gindex_next, depth, next_hop);
+ }
+ }
+}
+
+/*
+ * Partially adds a new route to the data structure (tbl24+tbl8s).
+ * It returns 0 on success, a negative number on failure, or 1 if
+ * the process needs to be continued by calling the function again.
+ */
+static inline int
+add_step(struct rte_lpm6 *lpm, struct rte_lpm6_tbl_entry *tbl,
+ struct rte_lpm6_tbl_entry **tbl_next, uint8_t *ip, uint8_t bytes,
+ uint8_t first_byte, uint8_t depth, uint8_t next_hop)
+{
+ uint32_t tbl_index, tbl_range, tbl8_group_start, tbl8_group_end, i;
+ int32_t tbl8_gindex;
+ int8_t bitshift;
+ uint8_t bits_covered;
+
+ /*
+ * Calculate index to the table based on the number and position
+ * of the bytes being inspected in this step.
+ */
+ tbl_index = 0;
+ for (i = first_byte; i < (uint32_t)(first_byte + bytes); i++) {
+ bitshift = (int8_t)((bytes - i)*BYTE_SIZE);
+
+ if (bitshift < 0) bitshift = 0;
+ tbl_index = tbl_index | ip[i-1] << bitshift;
+ }
+
+ /* Number of bits covered in this step */
+ bits_covered = (uint8_t)((bytes+first_byte-1)*BYTE_SIZE);
+
+ /*
+ * If depth if smaller than this number (ie this is the last step)
+ * expand the rule across the relevant positions in the table.
+ */
+ if (depth <= bits_covered) {
+ tbl_range = 1 << (bits_covered - depth);
+
+ for (i = tbl_index; i < (tbl_index + tbl_range); i++) {
+ if (!tbl[i].valid || (tbl[i].ext_entry == 0 &&
+ tbl[i].depth <= depth)) {
+
+ struct rte_lpm6_tbl_entry new_tbl_entry = {
+ .next_hop = next_hop,
+ .depth = depth,
+ .valid = VALID,
+ .valid_group = VALID,
+ .ext_entry = 0,
+ };
+
+ tbl[i] = new_tbl_entry;
+
+ } else if (tbl[i].ext_entry == 1) {
+
+ /*
+ * If tbl entry is valid and extended calculate the index
+ * into next tbl8 and expand the rule across the data structure.
+ */
+ tbl8_gindex = tbl[i].lpm6_tbl8_gindex *
+ RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
+ expand_rule(lpm, tbl8_gindex, depth, next_hop);
+ }
+ }
+
+ return 0;
+ }
+ /*
+ * If this is not the last step just fill one position
+ * and calculate the index to the next table.
+ */
+ else {
+ /* If it's invalid a new tbl8 is needed */
+ if (!tbl[tbl_index].valid) {
+ if (lpm->next_tbl8 < lpm->number_tbl8s)
+ tbl8_gindex = (lpm->next_tbl8)++;
+ else
+ return -ENOSPC;
+
+ struct rte_lpm6_tbl_entry new_tbl_entry = {
+ .lpm6_tbl8_gindex = tbl8_gindex,
+ .depth = 0,
+ .valid = VALID,
+ .valid_group = VALID,
+ .ext_entry = 1,
+ };
+
+ tbl[tbl_index] = new_tbl_entry;
+ }
+ /*
+ * If it's valid but not extended the rule that was stored *
+ * here needs to be moved to the next table.
+ */
+ else if (tbl[tbl_index].ext_entry == 0) {
+ /* Search for free tbl8 group. */
+ if (lpm->next_tbl8 < lpm->number_tbl8s)
+ tbl8_gindex = (lpm->next_tbl8)++;
+ else
+ return -ENOSPC;
+
+ tbl8_group_start = tbl8_gindex *
+ RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
+ tbl8_group_end = tbl8_group_start +
+ RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
+
+ /* Populate new tbl8 with tbl value. */
+ for (i = tbl8_group_start; i < tbl8_group_end; i++) {
+ lpm->tbl8[i].valid = VALID;
+ lpm->tbl8[i].depth = tbl[tbl_index].depth;
+ lpm->tbl8[i].next_hop = tbl[tbl_index].next_hop;
+ lpm->tbl8[i].ext_entry = 0;
+ }
+
+ /*
+ * Update tbl entry to point to new tbl8 entry. Note: The
+ * ext_flag and tbl8_index need to be updated simultaneously,
+ * so assign whole structure in one go.
+ */
+ struct rte_lpm6_tbl_entry new_tbl_entry = {
+ .lpm6_tbl8_gindex = tbl8_gindex,
+ .depth = 0,
+ .valid = VALID,
+ .valid_group = VALID,
+ .ext_entry = 1,
+ };
+
+ tbl[tbl_index] = new_tbl_entry;
+ }
+
+ *tbl_next = &(lpm->tbl8[tbl[tbl_index].lpm6_tbl8_gindex *
+ RTE_LPM6_TBL8_GROUP_NUM_ENTRIES]);
+ }
+
+ return 1;
+}
+
+/*
+ * Add a route
+ */
+int
+rte_lpm6_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
+ uint8_t next_hop)
+{
+ struct rte_lpm6_tbl_entry *tbl;
+ struct rte_lpm6_tbl_entry *tbl_next;
+ int32_t rule_index;
+ int status;
+ uint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE];
+ int i;
+
+ /* Check user arguments. */
+ if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH))
+ return -EINVAL;
+
+ /* Copy the IP and mask it to avoid modifying user's input data. */
+ memcpy(masked_ip, ip, RTE_LPM6_IPV6_ADDR_SIZE);
+ mask_ip(masked_ip, depth);
+
+ /* Add the rule to the rule table. */
+ rule_index = rule_add(lpm, masked_ip, next_hop, depth);
+
+ /* If there is no space available for new rule return error. */
+ if (rule_index < 0) {
+ return rule_index;
+ }
+
+ /* Inspect the first three bytes through tbl24 on the first step. */
+ tbl = lpm->tbl24;
+ status = add_step (lpm, tbl, &tbl_next, masked_ip, ADD_FIRST_BYTE, 1,
+ depth, next_hop);
+ if (status < 0) {
+ rte_lpm6_delete(lpm, masked_ip, depth);
+
+ return status;
+ }
+
+ /*
+ * Inspect one by one the rest of the bytes until
+ * the process is completed.
+ */
+ for (i = ADD_FIRST_BYTE; i < RTE_LPM6_IPV6_ADDR_SIZE && status == 1; i++) {
+ tbl = tbl_next;
+ status = add_step (lpm, tbl, &tbl_next, masked_ip, 1, (uint8_t)(i+1),
+ depth, next_hop);
+ if (status < 0) {
+ rte_lpm6_delete(lpm, masked_ip, depth);
+
+ return status;
+ }
+ }
+
+ return status;
+}
+
+/*
+ * Takes a pointer to a table entry and inspect one level.
+ * The function returns 0 on lookup success, ENOENT if no match was found
+ * or 1 if the process needs to be continued by calling the function again.
+ */
+static inline int
+lookup_step(const struct rte_lpm6 *lpm, const struct rte_lpm6_tbl_entry *tbl,
+ const struct rte_lpm6_tbl_entry **tbl_next, uint8_t *ip,
+ uint8_t first_byte, uint8_t *next_hop)
+{
+ uint32_t tbl8_index, tbl_entry;
+
+ /* Take the integer value from the pointer. */
+ tbl_entry = *(const uint32_t *)tbl;
+
+ /* If it is valid and extended we calculate the new pointer to return. */
+ if ((tbl_entry & RTE_LPM6_VALID_EXT_ENTRY_BITMASK) ==
+ RTE_LPM6_VALID_EXT_ENTRY_BITMASK) {
+
+ tbl8_index = ip[first_byte-1] +
+ ((tbl_entry & RTE_LPM6_TBL8_BITMASK) *
+ RTE_LPM6_TBL8_GROUP_NUM_ENTRIES);
+
+ *tbl_next = &lpm->tbl8[tbl8_index];
+
+ return 1;
+ } else {
+ /* If not extended then we can have a match. */
+ *next_hop = (uint8_t)tbl_entry;
+ return (tbl_entry & RTE_LPM6_LOOKUP_SUCCESS) ? 0 : -ENOENT;
+ }
+}
+
+/*
+ * Looks up an IP
+ */
+int
+rte_lpm6_lookup(const struct rte_lpm6 *lpm, uint8_t *ip, uint8_t *next_hop)
+{
+ const struct rte_lpm6_tbl_entry *tbl;
+ const struct rte_lpm6_tbl_entry *tbl_next;
+ int status;
+ uint8_t first_byte;
+ uint32_t tbl24_index;
+
+ /* DEBUG: Check user input arguments. */
+ if ((lpm == NULL) || (ip == NULL) || (next_hop == NULL)) {
+ return -EINVAL;
+ }
+
+ first_byte = LOOKUP_FIRST_BYTE;
+ tbl24_index = (ip[0] << BYTES2_SIZE) | (ip[1] << BYTE_SIZE) | ip[2];
+
+ /* Calculate pointer to the first entry to be inspected */
+ tbl = &lpm->tbl24[tbl24_index];
+
+ do {
+ /* Continue inspecting following levels until success or failure */
+ status = lookup_step(lpm, tbl, &tbl_next, ip, first_byte++, next_hop);
+ tbl = tbl_next;
+ } while (status == 1);
+
+ return status;
+}
+
+/*
+ * Looks up a group of IP addresses
+ */
+int
+rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,
+ uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
+ int16_t * next_hops, unsigned n)
+{
+ unsigned i;
+ const struct rte_lpm6_tbl_entry *tbl;
+ const struct rte_lpm6_tbl_entry *tbl_next;
+ uint32_t tbl24_index;
+ uint8_t first_byte, next_hop;
+ int status;
+
+ /* DEBUG: Check user input arguments. */
+ if ((lpm == NULL) || (ips == NULL) || (next_hops == NULL)) {
+ return -EINVAL;
+ }
+
+ for (i = 0; i < n; i++) {
+ first_byte = LOOKUP_FIRST_BYTE;
+ tbl24_index = (ips[i][0] << BYTES2_SIZE) |
+ (ips[i][1] << BYTE_SIZE) | ips[i][2];
+
+ /* Calculate pointer to the first entry to be inspected */
+ tbl = &lpm->tbl24[tbl24_index];
+
+ do {
+ /* Continue inspecting following levels until success or failure */
+ status = lookup_step(lpm, tbl, &tbl_next, ips[i], first_byte++,
+ &next_hop);
+ tbl = tbl_next;
+ } while (status == 1);
+
+ if (status < 0)
+ next_hops[i] = -1;
+ else
+ next_hops[i] = next_hop;
+ }
+
+ return 0;
+}
+
+/*
+ * Finds a rule in rule table.
+ * NOTE: Valid range for depth parameter is 1 .. 128 inclusive.
+ */
+static inline int32_t
+rule_find(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth)
+{
+ uint32_t rule_index;
+
+ /* Scan used rules at given depth to find rule. */
+ for (rule_index = 0; rule_index < lpm->used_rules; rule_index++) {
+ /* If rule is found return the rule index. */
+ if ((memcmp (lpm->rules_tbl[rule_index].ip, ip,
+ RTE_LPM6_IPV6_ADDR_SIZE) == 0) &&
+ lpm->rules_tbl[rule_index].depth == depth) {
+
+ return rule_index;
+ }
+ }
+
+ /* If rule is not found return -ENOENT. */
+ return -ENOENT;
+}
+
+/*
+ * Look for a rule in the high-level rules table
+ */
+int
+rte_lpm6_is_rule_present(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
+uint8_t *next_hop)
+{
+ uint8_t ip_masked[RTE_LPM6_IPV6_ADDR_SIZE];
+ int32_t rule_index;
+
+ /* Check user arguments. */
+ if ((lpm == NULL) || next_hop == NULL || ip == NULL ||
+ (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH))
+ return -EINVAL;
+
+ /* Copy the IP and mask it to avoid modifying user's input data. */
+ memcpy(ip_masked, ip, RTE_LPM6_IPV6_ADDR_SIZE);
+ mask_ip(ip_masked, depth);
+
+ /* Look for the rule using rule_find. */
+ rule_index = rule_find(lpm, ip_masked, depth);
+
+ if (rule_index >= 0) {
+ *next_hop = lpm->rules_tbl[rule_index].next_hop;
+ return 1;
+ }
+
+ /* If rule is not found return 0. */
+ return 0;
+}
+
+/*
+ * Delete a rule from the rule table.
+ * NOTE: Valid range for depth parameter is 1 .. 128 inclusive.
+ */
+static inline void
+rule_delete(struct rte_lpm6 *lpm, int32_t rule_index)
+{
+ /*
+ * Overwrite redundant rule with last rule in group and decrement rule
+ * counter.
+ */
+ lpm->rules_tbl[rule_index] = lpm->rules_tbl[lpm->used_rules-1];
+ lpm->used_rules--;
+}
+
+/*
+ * Deletes a rule
+ */
+int
+rte_lpm6_delete(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth)
+{
+ int32_t rule_to_delete_index;
+ uint8_t ip_masked[RTE_LPM6_IPV6_ADDR_SIZE];
+ unsigned i;
+
+ /*
+ * Check input arguments.
+ */
+ if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH)) {
+ return -EINVAL;
+ }
+
+ /* Copy the IP and mask it to avoid modifying user's input data. */
+ memcpy(ip_masked, ip, RTE_LPM6_IPV6_ADDR_SIZE);
+ mask_ip(ip_masked, depth);
+
+ /*
+ * Find the index of the input rule, that needs to be deleted, in the
+ * rule table.
+ */
+ rule_to_delete_index = rule_find(lpm, ip_masked, depth);
+
+ /*
+ * Check if rule_to_delete_index was found. If no rule was found the
+ * function rule_find returns -ENOENT.
+ */
+ if (rule_to_delete_index < 0)
+ return rule_to_delete_index;
+
+ /* Delete the rule from the rule table. */
+ rule_delete(lpm, rule_to_delete_index);
+
+ /*
+ * Set all the table entries to 0 (ie delete every rule
+ * from the data structure.
+ */
+ lpm->next_tbl8 = 0;
+ memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
+ memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
+ * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
+
+ /*
+ * Add every rule again (except for the one that was removed from
+ * the rules table).
+ */
+ for (i = 0; i < lpm->used_rules; i++) {
+ rte_lpm6_add(lpm, lpm->rules_tbl[i].ip, lpm->rules_tbl[i].depth,
+ lpm->rules_tbl[i].next_hop);
+ }
+
+ return 0;
+}
+
+/*
+ * Deletes a group of rules
+ */
+int
+rte_lpm6_delete_bulk_func(struct rte_lpm6 *lpm,
+ uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE], uint8_t *depths, unsigned n)
+{
+ int32_t rule_to_delete_index;
+ uint8_t ip_masked[RTE_LPM6_IPV6_ADDR_SIZE];
+ unsigned i;
+
+ /*
+ * Check input arguments.
+ */
+ if ((lpm == NULL) || (ips == NULL) || (depths == NULL)) {
+ return -EINVAL;
+ }
+
+ for (i = 0; i < n; i++) {
+ /* Copy the IP and mask it to avoid modifying user's input data. */
+ memcpy(ip_masked, ips[i], RTE_LPM6_IPV6_ADDR_SIZE);
+ mask_ip(ip_masked, depths[i]);
+
+ /*
+ * Find the index of the input rule, that needs to be deleted, in the
+ * rule table.
+ */
+ rule_to_delete_index = rule_find(lpm, ip_masked, depths[i]);
+
+ /*
+ * Check if rule_to_delete_index was found. If no rule was found the
+ * function rule_find returns -ENOENT.
+ */
+ if (rule_to_delete_index < 0)
+ continue;
+
+ /* Delete the rule from the rule table. */
+ rule_delete(lpm, rule_to_delete_index);
+ }
+
+ /*
+ * Set all the table entries to 0 (ie delete every rule
+ * from the data structure.
+ */
+ lpm->next_tbl8 = 0;
+ memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
+ memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
+ * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
+
+ /*
+ * Add every rule again (except for the ones that were removed from
+ * the rules table).
+ */
+ for (i = 0; i < lpm->used_rules; i++) {
+ rte_lpm6_add(lpm, lpm->rules_tbl[i].ip, lpm->rules_tbl[i].depth,
+ lpm->rules_tbl[i].next_hop);
+ }
+
+ return 0;
+}
+
+/*
+ * Delete all rules from the LPM table.
+ */
+void
+rte_lpm6_delete_all(struct rte_lpm6 *lpm)
+{
+ /* Zero used rules counter. */
+ lpm->used_rules = 0;
+
+ /* Zero next tbl8 index. */
+ lpm->next_tbl8 = 0;
+
+ /* Zero tbl24. */
+ memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
+
+ /* Zero tbl8. */
+ memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0]) *
+ RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
+
+ /* Delete all rules form the rules table. */
+ memset(lpm->rules_tbl, 0, sizeof(struct rte_lpm6_rule) * lpm->max_rules);
+}
diff --git a/src/dpdk_lib18/librte_lpm/rte_lpm6.h b/src/dpdk_lib18/librte_lpm/rte_lpm6.h
new file mode 100755
index 00000000..4db810f9
--- /dev/null
+++ b/src/dpdk_lib18/librte_lpm/rte_lpm6.h
@@ -0,0 +1,228 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _RTE_LPM6_H_
+#define _RTE_LPM6_H_
+
+/**
+ * @file
+ * RTE Longest Prefix Match for IPv6 (LPM6)
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#define RTE_LPM6_MAX_DEPTH 128
+#define RTE_LPM6_IPV6_ADDR_SIZE 16
+/** Max number of characters in LPM name. */
+#define RTE_LPM6_NAMESIZE 32
+
+/** LPM structure. */
+struct rte_lpm6;
+
+/** LPM configuration structure. */
+struct rte_lpm6_config {
+ uint32_t max_rules; /**< Max number of rules. */
+ uint32_t number_tbl8s; /**< Number of tbl8s to allocate. */
+ int flags; /**< This field is currently unused. */
+};
+
+/**
+ * Create an LPM object.
+ *
+ * @param name
+ * LPM object name
+ * @param socket_id
+ * NUMA socket ID for LPM table memory allocation
+ * @param config
+ * Structure containing the configuration
+ * @return
+ * Handle to LPM object on success, NULL otherwise with rte_errno set
+ * to an appropriate values. Possible rte_errno values include:
+ * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
+ * - E_RTE_SECONDARY - function was called from a secondary process instance
+ * - E_RTE_NO_TAILQ - no tailq list could be got for the lpm object list
+ * - EINVAL - invalid parameter passed to function
+ * - ENOSPC - the maximum number of memzones has already been allocated
+ * - EEXIST - a memzone with the same name already exists
+ * - ENOMEM - no appropriate memory area found in which to create memzone
+ */
+struct rte_lpm6 *
+rte_lpm6_create(const char *name, int socket_id,
+ const struct rte_lpm6_config *config);
+
+/**
+ * Find an existing LPM object and return a pointer to it.
+ *
+ * @param name
+ * Name of the lpm object as passed to rte_lpm6_create()
+ * @return
+ * Pointer to lpm object or NULL if object not found with rte_errno
+ * set appropriately. Possible rte_errno values include:
+ * - ENOENT - required entry not available to return.
+ */
+struct rte_lpm6 *
+rte_lpm6_find_existing(const char *name);
+
+/**
+ * Free an LPM object.
+ *
+ * @param lpm
+ * LPM object handle
+ * @return
+ * None
+ */
+void
+rte_lpm6_free(struct rte_lpm6 *lpm);
+
+/**
+ * Add a rule to the LPM table.
+ *
+ * @param lpm
+ * LPM object handle
+ * @param ip
+ * IP of the rule to be added to the LPM table
+ * @param depth
+ * Depth of the rule to be added to the LPM table
+ * @param next_hop
+ * Next hop of the rule to be added to the LPM table
+ * @return
+ * 0 on success, negative value otherwise
+ */
+int
+rte_lpm6_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
+ uint8_t next_hop);
+
+/**
+ * Check if a rule is present in the LPM table,
+ * and provide its next hop if it is.
+ *
+ * @param lpm
+ * LPM object handle
+ * @param ip
+ * IP of the rule to be searched
+ * @param depth
+ * Depth of the rule to searched
+ * @param next_hop
+ * Next hop of the rule (valid only if it is found)
+ * @return
+ * 1 if the rule exists, 0 if it does not, a negative value on failure
+ */
+int
+rte_lpm6_is_rule_present(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
+uint8_t *next_hop);
+
+/**
+ * Delete a rule from the LPM table.
+ *
+ * @param lpm
+ * LPM object handle
+ * @param ip
+ * IP of the rule to be deleted from the LPM table
+ * @param depth
+ * Depth of the rule to be deleted from the LPM table
+ * @return
+ * 0 on success, negative value otherwise
+ */
+int
+rte_lpm6_delete(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth);
+
+/**
+ * Delete a rule from the LPM table.
+ *
+ * @param lpm
+ * LPM object handle
+ * @param ips
+ * Array of IPs to be deleted from the LPM table
+ * @param depths
+ * Array of depths of the rules to be deleted from the LPM table
+ * @param n
+ * Number of rules to be deleted from the LPM table
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+int
+rte_lpm6_delete_bulk_func(struct rte_lpm6 *lpm,
+ uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE], uint8_t *depths, unsigned n);
+
+/**
+ * Delete all rules from the LPM table.
+ *
+ * @param lpm
+ * LPM object handle
+ */
+void
+rte_lpm6_delete_all(struct rte_lpm6 *lpm);
+
+/**
+ * Lookup an IP into the LPM table.
+ *
+ * @param lpm
+ * LPM object handle
+ * @param ip
+ * IP to be looked up in the LPM table
+ * @param next_hop
+ * Next hop of the most specific rule found for IP (valid on lookup hit only)
+ * @return
+ * -EINVAL for incorrect arguments, -ENOENT on lookup miss, 0 on lookup hit
+ */
+int
+rte_lpm6_lookup(const struct rte_lpm6 *lpm, uint8_t *ip, uint8_t *next_hop);
+
+/**
+ * Lookup multiple IP addresses in an LPM table.
+ *
+ * @param lpm
+ * LPM object handle
+ * @param ips
+ * Array of IPs to be looked up in the LPM table
+ * @param next_hops
+ * Next hop of the most specific rule found for IP (valid on lookup hit only).
+ * This is an array of two byte values. The next hop will be stored on
+ * each position on success; otherwise the position will be set to -1.
+ * @param n
+ * Number of elements in ips (and next_hops) array to lookup.
+ * @return
+ * -EINVAL for incorrect arguments, otherwise 0
+ */
+int
+rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,
+ uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
+ int16_t * next_hops, unsigned n);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_malloc/Makefile b/src/dpdk_lib18/librte_malloc/Makefile
new file mode 100755
index 00000000..ba87e34b
--- /dev/null
+++ b/src/dpdk_lib18/librte_malloc/Makefile
@@ -0,0 +1,48 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_malloc.a
+
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_MALLOC) := rte_malloc.c malloc_elem.c malloc_heap.c
+
+# install includes
+SYMLINK-$(CONFIG_RTE_LIBRTE_MALLOC)-include := rte_malloc.h
+
+# this lib needs eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_MALLOC) += lib/librte_eal
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_malloc/malloc_elem.c b/src/dpdk_lib18/librte_malloc/malloc_elem.c
new file mode 100755
index 00000000..ef26e472
--- /dev/null
+++ b/src/dpdk_lib18/librte_malloc/malloc_elem.c
@@ -0,0 +1,321 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <stdint.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/queue.h>
+
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_launch.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_debug.h>
+#include <rte_common.h>
+#include <rte_spinlock.h>
+
+#include "malloc_elem.h"
+#include "malloc_heap.h"
+
+#define MIN_DATA_SIZE (RTE_CACHE_LINE_SIZE)
+
+/*
+ * initialise a general malloc_elem header structure
+ */
+void
+malloc_elem_init(struct malloc_elem *elem,
+ struct malloc_heap *heap, const struct rte_memzone *mz, size_t size)
+{
+ elem->heap = heap;
+ elem->mz = mz;
+ elem->prev = NULL;
+ memset(&elem->free_list, 0, sizeof(elem->free_list));
+ elem->state = ELEM_FREE;
+ elem->size = size;
+ elem->pad = 0;
+ set_header(elem);
+ set_trailer(elem);
+}
+
+/*
+ * initialise a dummy malloc_elem header for the end-of-memzone marker
+ */
+void
+malloc_elem_mkend(struct malloc_elem *elem, struct malloc_elem *prev)
+{
+ malloc_elem_init(elem, prev->heap, prev->mz, 0);
+ elem->prev = prev;
+ elem->state = ELEM_BUSY; /* mark busy so its never merged */
+}
+
+/*
+ * calculate the starting point of where data of the requested size
+ * and alignment would fit in the current element. If the data doesn't
+ * fit, return NULL.
+ */
+static void *
+elem_start_pt(struct malloc_elem *elem, size_t size, unsigned align)
+{
+ const uintptr_t end_pt = (uintptr_t)elem +
+ elem->size - MALLOC_ELEM_TRAILER_LEN;
+ const uintptr_t new_data_start = rte_align_floor_int((end_pt - size),align);
+ const uintptr_t new_elem_start = new_data_start - MALLOC_ELEM_HEADER_LEN;
+
+ /* if the new start point is before the exist start, it won't fit */
+ return (new_elem_start < (uintptr_t)elem) ? NULL : (void *)new_elem_start;
+}
+
+/*
+ * use elem_start_pt to determine if we get meet the size and
+ * alignment request from the current element
+ */
+int
+malloc_elem_can_hold(struct malloc_elem *elem, size_t size, unsigned align)
+{
+ return elem_start_pt(elem, size, align) != NULL;
+}
+
+/*
+ * split an existing element into two smaller elements at the given
+ * split_pt parameter.
+ */
+static void
+split_elem(struct malloc_elem *elem, struct malloc_elem *split_pt)
+{
+ struct malloc_elem *next_elem = RTE_PTR_ADD(elem, elem->size);
+ const unsigned old_elem_size = (uintptr_t)split_pt - (uintptr_t)elem;
+ const unsigned new_elem_size = elem->size - old_elem_size;
+
+ malloc_elem_init(split_pt, elem->heap, elem->mz, new_elem_size);
+ split_pt->prev = elem;
+ next_elem->prev = split_pt;
+ elem->size = old_elem_size;
+ set_trailer(elem);
+}
+
+/*
+ * Given an element size, compute its freelist index.
+ * We free an element into the freelist containing similarly-sized elements.
+ * We try to allocate elements starting with the freelist containing
+ * similarly-sized elements, and if necessary, we search freelists
+ * containing larger elements.
+ *
+ * Example element size ranges for a heap with five free lists:
+ * heap->free_head[0] - (0 , 2^8]
+ * heap->free_head[1] - (2^8 , 2^10]
+ * heap->free_head[2] - (2^10 ,2^12]
+ * heap->free_head[3] - (2^12, 2^14]
+ * heap->free_head[4] - (2^14, MAX_SIZE]
+ */
+size_t
+malloc_elem_free_list_index(size_t size)
+{
+#define MALLOC_MINSIZE_LOG2 8
+#define MALLOC_LOG2_INCREMENT 2
+
+ size_t log2;
+ size_t index;
+
+ if (size <= (1UL << MALLOC_MINSIZE_LOG2))
+ return 0;
+
+ /* Find next power of 2 >= size. */
+ log2 = sizeof(size) * 8 - __builtin_clzl(size-1);
+
+ /* Compute freelist index, based on log2(size). */
+ index = (log2 - MALLOC_MINSIZE_LOG2 + MALLOC_LOG2_INCREMENT - 1) /
+ MALLOC_LOG2_INCREMENT;
+
+ return (index <= RTE_HEAP_NUM_FREELISTS-1?
+ index: RTE_HEAP_NUM_FREELISTS-1);
+}
+
+/*
+ * Add the specified element to its heap's free list.
+ */
+void
+malloc_elem_free_list_insert(struct malloc_elem *elem)
+{
+ size_t idx = malloc_elem_free_list_index(elem->size - MALLOC_ELEM_HEADER_LEN);
+
+ elem->state = ELEM_FREE;
+ LIST_INSERT_HEAD(&elem->heap->free_head[idx], elem, free_list);
+}
+
+/*
+ * Remove the specified element from its heap's free list.
+ */
+static void
+elem_free_list_remove(struct malloc_elem *elem)
+{
+ LIST_REMOVE(elem, free_list);
+}
+
+/*
+ * reserve a block of data in an existing malloc_elem. If the malloc_elem
+ * is much larger than the data block requested, we split the element in two.
+ * This function is only called from malloc_heap_alloc so parameter checking
+ * is not done here, as it's done there previously.
+ */
+struct malloc_elem *
+malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align)
+{
+ struct malloc_elem *new_elem = elem_start_pt(elem, size, align);
+ const unsigned old_elem_size = (uintptr_t)new_elem - (uintptr_t)elem;
+
+ if (old_elem_size < MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE){
+ /* don't split it, pad the element instead */
+ elem->state = ELEM_BUSY;
+ elem->pad = old_elem_size;
+
+ /* put a dummy header in padding, to point to real element header */
+ if (elem->pad > 0){ /* pad will be at least 64-bytes, as everything
+ * is cache-line aligned */
+ new_elem->pad = elem->pad;
+ new_elem->state = ELEM_PAD;
+ new_elem->size = elem->size - elem->pad;
+ set_header(new_elem);
+ }
+ /* remove element from free list */
+ elem_free_list_remove(elem);
+
+ return new_elem;
+ }
+
+ /* we are going to split the element in two. The original element
+ * remains free, and the new element is the one allocated.
+ * Re-insert original element, in case its new size makes it
+ * belong on a different list.
+ */
+ elem_free_list_remove(elem);
+ split_elem(elem, new_elem);
+ new_elem->state = ELEM_BUSY;
+ malloc_elem_free_list_insert(elem);
+
+ return new_elem;
+}
+
+/*
+ * joing two struct malloc_elem together. elem1 and elem2 must
+ * be contiguous in memory.
+ */
+static inline void
+join_elem(struct malloc_elem *elem1, struct malloc_elem *elem2)
+{
+ struct malloc_elem *next = RTE_PTR_ADD(elem2, elem2->size);
+ elem1->size += elem2->size;
+ next->prev = elem1;
+}
+
+/*
+ * free a malloc_elem block by adding it to the free list. If the
+ * blocks either immediately before or immediately after newly freed block
+ * are also free, the blocks are merged together.
+ */
+int
+malloc_elem_free(struct malloc_elem *elem)
+{
+ if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY)
+ return -1;
+
+ rte_spinlock_lock(&(elem->heap->lock));
+ struct malloc_elem *next = RTE_PTR_ADD(elem, elem->size);
+ if (next->state == ELEM_FREE){
+ /* remove from free list, join to this one */
+ elem_free_list_remove(next);
+ join_elem(elem, next);
+ }
+
+ /* check if previous element is free, if so join with it and return,
+ * need to re-insert in free list, as that element's size is changing
+ */
+ if (elem->prev != NULL && elem->prev->state == ELEM_FREE) {
+ elem_free_list_remove(elem->prev);
+ join_elem(elem->prev, elem);
+ malloc_elem_free_list_insert(elem->prev);
+ }
+ /* otherwise add ourselves to the free list */
+ else {
+ malloc_elem_free_list_insert(elem);
+ elem->pad = 0;
+ }
+ /* decrease heap's count of allocated elements */
+ elem->heap->alloc_count--;
+ rte_spinlock_unlock(&(elem->heap->lock));
+
+ return 0;
+}
+
+/*
+ * attempt to resize a malloc_elem by expanding into any free space
+ * immediately after it in memory.
+ */
+int
+malloc_elem_resize(struct malloc_elem *elem, size_t size)
+{
+ const size_t new_size = size + MALLOC_ELEM_OVERHEAD;
+ /* if we request a smaller size, then always return ok */
+ const size_t current_size = elem->size - elem->pad;
+ if (current_size >= new_size)
+ return 0;
+
+ struct malloc_elem *next = RTE_PTR_ADD(elem, elem->size);
+ rte_spinlock_lock(&elem->heap->lock);
+ if (next ->state != ELEM_FREE)
+ goto err_return;
+ if (current_size + next->size < new_size)
+ goto err_return;
+
+ /* we now know the element fits, so remove from free list,
+ * join the two
+ */
+ elem_free_list_remove(next);
+ join_elem(elem, next);
+
+ if (elem->size - new_size >= MIN_DATA_SIZE + MALLOC_ELEM_OVERHEAD){
+ /* now we have a big block together. Lets cut it down a bit, by splitting */
+ struct malloc_elem *split_pt = RTE_PTR_ADD(elem, new_size);
+ split_pt = RTE_PTR_ALIGN_CEIL(split_pt, RTE_CACHE_LINE_SIZE);
+ split_elem(elem, split_pt);
+ malloc_elem_free_list_insert(split_pt);
+ }
+ rte_spinlock_unlock(&elem->heap->lock);
+ return 0;
+
+err_return:
+ rte_spinlock_unlock(&elem->heap->lock);
+ return -1;
+}
diff --git a/src/dpdk_lib18/librte_malloc/malloc_elem.h b/src/dpdk_lib18/librte_malloc/malloc_elem.h
new file mode 100755
index 00000000..9790b1a0
--- /dev/null
+++ b/src/dpdk_lib18/librte_malloc/malloc_elem.h
@@ -0,0 +1,190 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MALLOC_ELEM_H_
+#define MALLOC_ELEM_H_
+
+#include <rte_memory.h>
+
+/* dummy definition of struct so we can use pointers to it in malloc_elem struct */
+struct malloc_heap;
+
+enum elem_state {
+ ELEM_FREE = 0,
+ ELEM_BUSY,
+ ELEM_PAD /* element is a padding-only header */
+};
+
+struct malloc_elem {
+ struct malloc_heap *heap;
+ struct malloc_elem *volatile prev; /* points to prev elem in memzone */
+ LIST_ENTRY(malloc_elem) free_list; /* list of free elements in heap */
+ const struct rte_memzone *mz;
+ volatile enum elem_state state;
+ uint32_t pad;
+ size_t size;
+#ifdef RTE_LIBRTE_MALLOC_DEBUG
+ uint64_t header_cookie; /* Cookie marking start of data */
+ /* trailer cookie at start + size */
+#endif
+} __rte_cache_aligned;
+
+#ifndef RTE_LIBRTE_MALLOC_DEBUG
+static const unsigned MALLOC_ELEM_TRAILER_LEN = 0;
+
+/* dummy function - just check if pointer is non-null */
+static inline int
+malloc_elem_cookies_ok(const struct malloc_elem *elem){ return elem != NULL; }
+
+/* dummy function - no header if malloc_debug is not enabled */
+static inline void
+set_header(struct malloc_elem *elem __rte_unused){ }
+
+/* dummy function - no trailer if malloc_debug is not enabled */
+static inline void
+set_trailer(struct malloc_elem *elem __rte_unused){ }
+
+
+#else
+static const unsigned MALLOC_ELEM_TRAILER_LEN = RTE_CACHE_LINE_SIZE;
+
+#define MALLOC_HEADER_COOKIE 0xbadbadbadadd2e55ULL /**< Header cookie. */
+#define MALLOC_TRAILER_COOKIE 0xadd2e55badbadbadULL /**< Trailer cookie.*/
+
+/* define macros to make referencing the header and trailer cookies easier */
+#define MALLOC_ELEM_TRAILER(elem) (*((uint64_t*)RTE_PTR_ADD(elem, \
+ elem->size - MALLOC_ELEM_TRAILER_LEN)))
+#define MALLOC_ELEM_HEADER(elem) (elem->header_cookie)
+
+static inline void
+set_header(struct malloc_elem *elem)
+{
+ if (elem != NULL)
+ MALLOC_ELEM_HEADER(elem) = MALLOC_HEADER_COOKIE;
+}
+
+static inline void
+set_trailer(struct malloc_elem *elem)
+{
+ if (elem != NULL)
+ MALLOC_ELEM_TRAILER(elem) = MALLOC_TRAILER_COOKIE;
+}
+
+/* check that the header and trailer cookies are set correctly */
+static inline int
+malloc_elem_cookies_ok(const struct malloc_elem *elem)
+{
+ return (elem != NULL &&
+ MALLOC_ELEM_HEADER(elem) == MALLOC_HEADER_COOKIE &&
+ MALLOC_ELEM_TRAILER(elem) == MALLOC_TRAILER_COOKIE);
+}
+
+#endif
+
+static const unsigned MALLOC_ELEM_HEADER_LEN = sizeof(struct malloc_elem);
+#define MALLOC_ELEM_OVERHEAD (MALLOC_ELEM_HEADER_LEN + MALLOC_ELEM_TRAILER_LEN)
+
+/*
+ * Given a pointer to the start of a memory block returned by malloc, get
+ * the actual malloc_elem header for that block.
+ */
+static inline struct malloc_elem *
+malloc_elem_from_data(const void *data)
+{
+ if (data == NULL)
+ return NULL;
+
+ struct malloc_elem *elem = RTE_PTR_SUB(data, MALLOC_ELEM_HEADER_LEN);
+ if (!malloc_elem_cookies_ok(elem))
+ return NULL;
+ return elem->state != ELEM_PAD ? elem: RTE_PTR_SUB(elem, elem->pad);
+}
+
+/*
+ * initialise a malloc_elem header
+ */
+void
+malloc_elem_init(struct malloc_elem *elem,
+ struct malloc_heap *heap,
+ const struct rte_memzone *mz,
+ size_t size);
+
+/*
+ * initialise a dummy malloc_elem header for the end-of-memzone marker
+ */
+void
+malloc_elem_mkend(struct malloc_elem *elem,
+ struct malloc_elem *prev_free);
+
+/*
+ * return true if the current malloc_elem can hold a block of data
+ * of the requested size and with the requested alignment
+ */
+int
+malloc_elem_can_hold(struct malloc_elem *elem, size_t size, unsigned align);
+
+/*
+ * reserve a block of data in an existing malloc_elem. If the malloc_elem
+ * is much larger than the data block requested, we split the element in two.
+ */
+struct malloc_elem *
+malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align);
+
+/*
+ * free a malloc_elem block by adding it to the free list. If the
+ * blocks either immediately before or immediately after newly freed block
+ * are also free, the blocks are merged together.
+ */
+int
+malloc_elem_free(struct malloc_elem *elem);
+
+/*
+ * attempt to resize a malloc_elem by expanding into any free space
+ * immediately after it in memory.
+ */
+int
+malloc_elem_resize(struct malloc_elem *elem, size_t size);
+
+/*
+ * Given an element size, compute its freelist index.
+ */
+size_t
+malloc_elem_free_list_index(size_t size);
+
+/*
+ * Add element to its heap's free list.
+ */
+void
+malloc_elem_free_list_insert(struct malloc_elem *elem);
+
+#endif /* MALLOC_ELEM_H_ */
diff --git a/src/dpdk_lib18/librte_malloc/malloc_heap.c b/src/dpdk_lib18/librte_malloc/malloc_heap.c
new file mode 100755
index 00000000..95fcfecf
--- /dev/null
+++ b/src/dpdk_lib18/librte_malloc/malloc_heap.c
@@ -0,0 +1,210 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <stdint.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_launch.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_common.h>
+#include <rte_string_fns.h>
+#include <rte_spinlock.h>
+#include <rte_memcpy.h>
+#include <rte_atomic.h>
+
+#include "malloc_elem.h"
+#include "malloc_heap.h"
+
+/* since the memzone size starts with a digit, it will appear unquoted in
+ * rte_config.h, so quote it so it can be passed to rte_str_to_size */
+#define MALLOC_MEMZONE_SIZE RTE_STR(RTE_MALLOC_MEMZONE_SIZE)
+
+/*
+ * returns the configuration setting for the memzone size as a size_t value
+ */
+static inline size_t
+get_malloc_memzone_size(void)
+{
+ return rte_str_to_size(MALLOC_MEMZONE_SIZE);
+}
+
+/*
+ * reserve an extra memory zone and make it available for use by a particular
+ * heap. This reserves the zone and sets a dummy malloc_elem header at the end
+ * to prevent overflow. The rest of the zone is added to free list as a single
+ * large free block
+ */
+static int
+malloc_heap_add_memzone(struct malloc_heap *heap, size_t size, unsigned align)
+{
+ const unsigned mz_flags = 0;
+ const size_t block_size = get_malloc_memzone_size();
+ /* ensure the data we want to allocate will fit in the memzone */
+ const size_t min_size = size + align + MALLOC_ELEM_OVERHEAD * 2;
+ const struct rte_memzone *mz = NULL;
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ unsigned numa_socket = heap - mcfg->malloc_heaps;
+
+ size_t mz_size = min_size;
+ if (mz_size < block_size)
+ mz_size = block_size;
+
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ snprintf(mz_name, sizeof(mz_name), "MALLOC_S%u_HEAP_%u",
+ numa_socket, heap->mz_count++);
+
+ /* try getting a block. if we fail and we don't need as big a block
+ * as given in the config, we can shrink our request and try again
+ */
+ do {
+ mz = rte_memzone_reserve(mz_name, mz_size, numa_socket,
+ mz_flags);
+ if (mz == NULL)
+ mz_size /= 2;
+ } while (mz == NULL && mz_size > min_size);
+ if (mz == NULL)
+ return -1;
+
+ /* allocate the memory block headers, one at end, one at start */
+ struct malloc_elem *start_elem = (struct malloc_elem *)mz->addr;
+ struct malloc_elem *end_elem = RTE_PTR_ADD(mz->addr,
+ mz_size - MALLOC_ELEM_OVERHEAD);
+ end_elem = RTE_PTR_ALIGN_FLOOR(end_elem, RTE_CACHE_LINE_SIZE);
+
+ const unsigned elem_size = (uintptr_t)end_elem - (uintptr_t)start_elem;
+ malloc_elem_init(start_elem, heap, mz, elem_size);
+ malloc_elem_mkend(end_elem, start_elem);
+ malloc_elem_free_list_insert(start_elem);
+
+ /* increase heap total size by size of new memzone */
+ heap->total_size+=mz_size - MALLOC_ELEM_OVERHEAD;
+ return 0;
+}
+
+/*
+ * Iterates through the freelist for a heap to find a free element
+ * which can store data of the required size and with the requested alignment.
+ * Returns null on failure, or pointer to element on success.
+ */
+static struct malloc_elem *
+find_suitable_element(struct malloc_heap *heap, size_t size, unsigned align)
+{
+ size_t idx;
+ struct malloc_elem *elem;
+
+ for (idx = malloc_elem_free_list_index(size);
+ idx < RTE_HEAP_NUM_FREELISTS; idx++)
+ {
+ for (elem = LIST_FIRST(&heap->free_head[idx]);
+ !!elem; elem = LIST_NEXT(elem, free_list))
+ {
+ if (malloc_elem_can_hold(elem, size, align))
+ return elem;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Main function called by malloc to allocate a block of memory from the
+ * heap. It locks the free list, scans it, and adds a new memzone if the
+ * scan fails. Once the new memzone is added, it re-scans and should return
+ * the new element after releasing the lock.
+ */
+void *
+malloc_heap_alloc(struct malloc_heap *heap,
+ const char *type __attribute__((unused)), size_t size, unsigned align)
+{
+ size = RTE_CACHE_LINE_ROUNDUP(size);
+ align = RTE_CACHE_LINE_ROUNDUP(align);
+ rte_spinlock_lock(&heap->lock);
+ struct malloc_elem *elem = find_suitable_element(heap, size, align);
+ if (elem == NULL){
+ if ((malloc_heap_add_memzone(heap, size, align)) == 0)
+ elem = find_suitable_element(heap, size, align);
+ }
+
+ if (elem != NULL){
+ elem = malloc_elem_alloc(elem, size, align);
+ /* increase heap's count of allocated elements */
+ heap->alloc_count++;
+ }
+ rte_spinlock_unlock(&heap->lock);
+ return elem == NULL ? NULL : (void *)(&elem[1]);
+
+}
+
+/*
+ * Function to retrieve data for heap on given socket
+ */
+int
+malloc_heap_get_stats(const struct malloc_heap *heap,
+ struct rte_malloc_socket_stats *socket_stats)
+{
+ size_t idx;
+ struct malloc_elem *elem;
+
+ /* Initialise variables for heap */
+ socket_stats->free_count = 0;
+ socket_stats->heap_freesz_bytes = 0;
+ socket_stats->greatest_free_size = 0;
+
+ /* Iterate through free list */
+ for (idx = 0; idx < RTE_HEAP_NUM_FREELISTS; idx++) {
+ for (elem = LIST_FIRST(&heap->free_head[idx]);
+ !!elem; elem = LIST_NEXT(elem, free_list))
+ {
+ socket_stats->free_count++;
+ socket_stats->heap_freesz_bytes += elem->size;
+ if (elem->size > socket_stats->greatest_free_size)
+ socket_stats->greatest_free_size = elem->size;
+ }
+ }
+ /* Get stats on overall heap and allocated memory on this heap */
+ socket_stats->heap_totalsz_bytes = heap->total_size;
+ socket_stats->heap_allocsz_bytes = (socket_stats->heap_totalsz_bytes -
+ socket_stats->heap_freesz_bytes);
+ socket_stats->alloc_count = heap->alloc_count;
+ return 0;
+}
+
diff --git a/src/dpdk_lib18/librte_malloc/malloc_heap.h b/src/dpdk_lib18/librte_malloc/malloc_heap.h
new file mode 100755
index 00000000..b4aec451
--- /dev/null
+++ b/src/dpdk_lib18/librte_malloc/malloc_heap.h
@@ -0,0 +1,65 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MALLOC_HEAP_H_
+#define MALLOC_HEAP_H_
+
+#include <rte_malloc.h>
+#include <rte_malloc_heap.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static inline unsigned
+malloc_get_numa_socket(void)
+{
+ return rte_socket_id();
+}
+
+void *
+malloc_heap_alloc(struct malloc_heap *heap, const char *type,
+ size_t size, unsigned align);
+
+int
+malloc_heap_get_stats(const struct malloc_heap *heap,
+ struct rte_malloc_socket_stats *socket_stats);
+
+int
+rte_eal_heap_memzone_init(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* MALLOC_HEAP_H_ */
diff --git a/src/dpdk_lib18/librte_malloc/rte_malloc.c b/src/dpdk_lib18/librte_malloc/rte_malloc.c
new file mode 100755
index 00000000..b966fc7d
--- /dev/null
+++ b/src/dpdk_lib18/librte_malloc/rte_malloc.c
@@ -0,0 +1,261 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/queue.h>
+
+#include <rte_memcpy.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_branch_prediction.h>
+#include <rte_debug.h>
+#include <rte_launch.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_common.h>
+#include <rte_spinlock.h>
+
+#include <rte_malloc.h>
+#include "malloc_elem.h"
+#include "malloc_heap.h"
+
+
+/* Free the memory space back to heap */
+void rte_free(void *addr)
+{
+ if (addr == NULL) return;
+ if (malloc_elem_free(malloc_elem_from_data(addr)) < 0)
+ rte_panic("Fatal error: Invalid memory\n");
+}
+
+/*
+ * Allocate memory on specified heap.
+ */
+void *
+rte_malloc_socket(const char *type, size_t size, unsigned align, int socket_arg)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ int socket, i;
+ void *ret;
+
+ /* return NULL if size is 0 or alignment is not power-of-2 */
+ if (size == 0 || !rte_is_power_of_2(align))
+ return NULL;
+
+ if (socket_arg == SOCKET_ID_ANY)
+ socket = malloc_get_numa_socket();
+ else
+ socket = socket_arg;
+
+ /* Check socket parameter */
+ if (socket >= RTE_MAX_NUMA_NODES)
+ return NULL;
+
+ ret = malloc_heap_alloc(&mcfg->malloc_heaps[socket], type,
+ size, align == 0 ? 1 : align);
+ if (ret != NULL || socket_arg != SOCKET_ID_ANY)
+ return ret;
+
+ /* try other heaps */
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++) {
+ /* we already tried this one */
+ if (i == socket)
+ continue;
+
+ ret = malloc_heap_alloc(&mcfg->malloc_heaps[i], type,
+ size, align == 0 ? 1 : align);
+ if (ret != NULL)
+ return ret;
+ }
+
+ return NULL;
+}
+
+/*
+ * Allocate memory on default heap.
+ */
+void *
+rte_malloc(const char *type, size_t size, unsigned align)
+{
+ return rte_malloc_socket(type, size, align, SOCKET_ID_ANY);
+}
+
+/*
+ * Allocate zero'd memory on specified heap.
+ */
+void *
+rte_zmalloc_socket(const char *type, size_t size, unsigned align, int socket)
+{
+ void *ptr = rte_malloc_socket(type, size, align, socket);
+
+ if (ptr != NULL)
+ memset(ptr, 0, size);
+ return ptr;
+}
+
+/*
+ * Allocate zero'd memory on default heap.
+ */
+void *
+rte_zmalloc(const char *type, size_t size, unsigned align)
+{
+ return rte_zmalloc_socket(type, size, align, SOCKET_ID_ANY);
+}
+
+/*
+ * Allocate zero'd memory on specified heap.
+ */
+void *
+rte_calloc_socket(const char *type, size_t num, size_t size, unsigned align, int socket)
+{
+ return rte_zmalloc_socket(type, num * size, align, socket);
+}
+
+/*
+ * Allocate zero'd memory on default heap.
+ */
+void *
+rte_calloc(const char *type, size_t num, size_t size, unsigned align)
+{
+ return rte_zmalloc(type, num * size, align);
+}
+
+/*
+ * Resize allocated memory.
+ */
+void *
+rte_realloc(void *ptr, size_t size, unsigned align)
+{
+ if (ptr == NULL)
+ return rte_malloc(NULL, size, align);
+
+ struct malloc_elem *elem = malloc_elem_from_data(ptr);
+ if (elem == NULL)
+ rte_panic("Fatal error: memory corruption detected\n");
+
+ size = RTE_CACHE_LINE_ROUNDUP(size), align = RTE_CACHE_LINE_ROUNDUP(align);
+ /* check alignment matches first, and if ok, see if we can resize block */
+ if (RTE_PTR_ALIGN(ptr,align) == ptr &&
+ malloc_elem_resize(elem, size) == 0)
+ return ptr;
+
+ /* either alignment is off, or we have no room to expand,
+ * so move data. */
+ void *new_ptr = rte_malloc(NULL, size, align);
+ if (new_ptr == NULL)
+ return NULL;
+ const unsigned old_size = elem->size - MALLOC_ELEM_OVERHEAD;
+ rte_memcpy(new_ptr, ptr, old_size < size ? old_size : size);
+ rte_free(ptr);
+
+ return new_ptr;
+}
+
+int
+rte_malloc_validate(const void *ptr, size_t *size)
+{
+ const struct malloc_elem *elem = malloc_elem_from_data(ptr);
+ if (!malloc_elem_cookies_ok(elem))
+ return -1;
+ if (size != NULL)
+ *size = elem->size - elem->pad - MALLOC_ELEM_OVERHEAD;
+ return 0;
+}
+
+/*
+ * Function to retrieve data for heap on given socket
+ */
+int
+rte_malloc_get_socket_stats(int socket,
+ struct rte_malloc_socket_stats *socket_stats)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+
+ if (socket >= RTE_MAX_NUMA_NODES || socket < 0)
+ return -1;
+
+ return malloc_heap_get_stats(&mcfg->malloc_heaps[socket], socket_stats);
+}
+
+/*
+ * Print stats on memory type. If type is NULL, info on all types is printed
+ */
+void
+rte_malloc_dump_stats(FILE *f, __rte_unused const char *type)
+{
+ unsigned int socket;
+ struct rte_malloc_socket_stats sock_stats;
+ /* Iterate through all initialised heaps */
+ for (socket=0; socket< RTE_MAX_NUMA_NODES; socket++) {
+ if ((rte_malloc_get_socket_stats(socket, &sock_stats) < 0))
+ continue;
+
+ fprintf(f, "Socket:%u\n", socket);
+ fprintf(f, "\tHeap_size:%zu,\n", sock_stats.heap_totalsz_bytes);
+ fprintf(f, "\tFree_size:%zu,\n", sock_stats.heap_freesz_bytes);
+ fprintf(f, "\tAlloc_size:%zu,\n", sock_stats.heap_allocsz_bytes);
+ fprintf(f, "\tGreatest_free_size:%zu,\n",
+ sock_stats.greatest_free_size);
+ fprintf(f, "\tAlloc_count:%u,\n",sock_stats.alloc_count);
+ fprintf(f, "\tFree_count:%u,\n", sock_stats.free_count);
+ }
+ return;
+}
+
+/*
+ * TODO: Set limit to memory that can be allocated to memory type
+ */
+int
+rte_malloc_set_limit(__rte_unused const char *type,
+ __rte_unused size_t max)
+{
+ return 0;
+}
+
+/*
+ * Return the physical address of a virtual address obtained through rte_malloc
+ */
+phys_addr_t
+rte_malloc_virt2phy(const void *addr)
+{
+ const struct malloc_elem *elem = malloc_elem_from_data(addr);
+ if (elem == NULL)
+ return 0;
+ return elem->mz->phys_addr + ((uintptr_t)addr - (uintptr_t)elem->mz->addr);
+}
diff --git a/src/dpdk_lib18/librte_malloc/rte_malloc.h b/src/dpdk_lib18/librte_malloc/rte_malloc.h
new file mode 100755
index 00000000..74bb78c7
--- /dev/null
+++ b/src/dpdk_lib18/librte_malloc/rte_malloc.h
@@ -0,0 +1,342 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_MALLOC_H_
+#define _RTE_MALLOC_H_
+
+/**
+ * @file
+ * RTE Malloc. This library provides methods for dynamically allocating memory
+ * from hugepages.
+ */
+
+#include <stdio.h>
+#include <stddef.h>
+#include <rte_memory.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Structure to hold heap statistics obtained from rte_malloc_get_socket_stats function.
+ */
+struct rte_malloc_socket_stats {
+ size_t heap_totalsz_bytes; /**< Total bytes on heap */
+ size_t heap_freesz_bytes; /**< Total free bytes on heap */
+ size_t greatest_free_size; /**< Size in bytes of largest free block */
+ unsigned free_count; /**< Number of free elements on heap */
+ unsigned alloc_count; /**< Number of allocated elements on heap */
+ size_t heap_allocsz_bytes; /**< Total allocated bytes on heap */
+};
+
+/**
+ * This function allocates memory from the huge-page area of memory. The memory
+ * is not cleared. In NUMA systems, the memory allocated resides on the same
+ * NUMA socket as the core that calls this function.
+ *
+ * @param type
+ * A string identifying the type of allocated objects (useful for debug
+ * purposes, such as identifying the cause of a memory leak). Can be NULL.
+ * @param size
+ * Size (in bytes) to be allocated.
+ * @param align
+ * If 0, the return is a pointer that is suitably aligned for any kind of
+ * variable (in the same manner as malloc()).
+ * Otherwise, the return is a pointer that is a multiple of *align*. In
+ * this case, it must be a power of two. (Minimum alignment is the
+ * cacheline size, i.e. 64-bytes)
+ * @return
+ * - NULL on error. Not enough memory, or invalid arguments (size is 0,
+ * align is not a power of two).
+ * - Otherwise, the pointer to the allocated object.
+ */
+void *
+rte_malloc(const char *type, size_t size, unsigned align);
+
+/**
+ * Allocate zero'ed memory from the heap.
+ *
+ * Equivalent to rte_malloc() except that the memory zone is
+ * initialised with zeros. In NUMA systems, the memory allocated resides on the
+ * same NUMA socket as the core that calls this function.
+ *
+ * @param type
+ * A string identifying the type of allocated objects (useful for debug
+ * purposes, such as identifying the cause of a memory leak). Can be NULL.
+ * @param size
+ * Size (in bytes) to be allocated.
+ * @param align
+ * If 0, the return is a pointer that is suitably aligned for any kind of
+ * variable (in the same manner as malloc()).
+ * Otherwise, the return is a pointer that is a multiple of *align*. In
+ * this case, it must obviously be a power of two. (Minimum alignment is the
+ * cacheline size, i.e. 64-bytes)
+ * @return
+ * - NULL on error. Not enough memory, or invalid arguments (size is 0,
+ * align is not a power of two).
+ * - Otherwise, the pointer to the allocated object.
+ */
+void *
+rte_zmalloc(const char *type, size_t size, unsigned align);
+
+/**
+ * Replacement function for calloc(), using huge-page memory. Memory area is
+ * initialised with zeros. In NUMA systems, the memory allocated resides on the
+ * same NUMA socket as the core that calls this function.
+ *
+ * @param type
+ * A string identifying the type of allocated objects (useful for debug
+ * purposes, such as identifying the cause of a memory leak). Can be NULL.
+ * @param num
+ * Number of elements to be allocated.
+ * @param size
+ * Size (in bytes) of a single element.
+ * @param align
+ * If 0, the return is a pointer that is suitably aligned for any kind of
+ * variable (in the same manner as malloc()).
+ * Otherwise, the return is a pointer that is a multiple of *align*. In
+ * this case, it must obviously be a power of two. (Minimum alignment is the
+ * cacheline size, i.e. 64-bytes)
+ * @return
+ * - NULL on error. Not enough memory, or invalid arguments (size is 0,
+ * align is not a power of two).
+ * - Otherwise, the pointer to the allocated object.
+ */
+void *
+rte_calloc(const char *type, size_t num, size_t size, unsigned align);
+
+/**
+ * Replacement function for realloc(), using huge-page memory. Reserved area
+ * memory is resized, preserving contents. In NUMA systems, the new area
+ * resides on the same NUMA socket as the old area.
+ *
+ * @param ptr
+ * Pointer to already allocated memory
+ * @param size
+ * Size (in bytes) of new area. If this is 0, memory is freed.
+ * @param align
+ * If 0, the return is a pointer that is suitably aligned for any kind of
+ * variable (in the same manner as malloc()).
+ * Otherwise, the return is a pointer that is a multiple of *align*. In
+ * this case, it must obviously be a power of two. (Minimum alignment is the
+ * cacheline size, i.e. 64-bytes)
+ * @return
+ * - NULL on error. Not enough memory, or invalid arguments (size is 0,
+ * align is not a power of two).
+ * - Otherwise, the pointer to the reallocated memory.
+ */
+void *
+rte_realloc(void *ptr, size_t size, unsigned align);
+
+/**
+ * This function allocates memory from the huge-page area of memory. The memory
+ * is not cleared.
+ *
+ * @param type
+ * A string identifying the type of allocated objects (useful for debug
+ * purposes, such as identifying the cause of a memory leak). Can be NULL.
+ * @param size
+ * Size (in bytes) to be allocated.
+ * @param align
+ * If 0, the return is a pointer that is suitably aligned for any kind of
+ * variable (in the same manner as malloc()).
+ * Otherwise, the return is a pointer that is a multiple of *align*. In
+ * this case, it must be a power of two. (Minimum alignment is the
+ * cacheline size, i.e. 64-bytes)
+ * @param socket
+ * NUMA socket to allocate memory on. If SOCKET_ID_ANY is used, this function
+ * will behave the same as rte_malloc().
+ * @return
+ * - NULL on error. Not enough memory, or invalid arguments (size is 0,
+ * align is not a power of two).
+ * - Otherwise, the pointer to the allocated object.
+ */
+void *
+rte_malloc_socket(const char *type, size_t size, unsigned align, int socket);
+
+/**
+ * Allocate zero'ed memory from the heap.
+ *
+ * Equivalent to rte_malloc() except that the memory zone is
+ * initialised with zeros.
+ *
+ * @param type
+ * A string identifying the type of allocated objects (useful for debug
+ * purposes, such as identifying the cause of a memory leak). Can be NULL.
+ * @param size
+ * Size (in bytes) to be allocated.
+ * @param align
+ * If 0, the return is a pointer that is suitably aligned for any kind of
+ * variable (in the same manner as malloc()).
+ * Otherwise, the return is a pointer that is a multiple of *align*. In
+ * this case, it must obviously be a power of two. (Minimum alignment is the
+ * cacheline size, i.e. 64-bytes)
+ * @param socket
+ * NUMA socket to allocate memory on. If SOCKET_ID_ANY is used, this function
+ * will behave the same as rte_zmalloc().
+ * @return
+ * - NULL on error. Not enough memory, or invalid arguments (size is 0,
+ * align is not a power of two).
+ * - Otherwise, the pointer to the allocated object.
+ */
+void *
+rte_zmalloc_socket(const char *type, size_t size, unsigned align, int socket);
+
+/**
+ * Replacement function for calloc(), using huge-page memory. Memory area is
+ * initialised with zeros.
+ *
+ * @param type
+ * A string identifying the type of allocated objects (useful for debug
+ * purposes, such as identifying the cause of a memory leak). Can be NULL.
+ * @param num
+ * Number of elements to be allocated.
+ * @param size
+ * Size (in bytes) of a single element.
+ * @param align
+ * If 0, the return is a pointer that is suitably aligned for any kind of
+ * variable (in the same manner as malloc()).
+ * Otherwise, the return is a pointer that is a multiple of *align*. In
+ * this case, it must obviously be a power of two. (Minimum alignment is the
+ * cacheline size, i.e. 64-bytes)
+ * @param socket
+ * NUMA socket to allocate memory on. If SOCKET_ID_ANY is used, this function
+ * will behave the same as rte_calloc().
+ * @return
+ * - NULL on error. Not enough memory, or invalid arguments (size is 0,
+ * align is not a power of two).
+ * - Otherwise, the pointer to the allocated object.
+ */
+void *
+rte_calloc_socket(const char *type, size_t num, size_t size, unsigned align, int socket);
+
+/**
+ * Frees the memory space pointed to by the provided pointer.
+ *
+ * This pointer must have been returned by a previous call to
+ * rte_malloc(), rte_zmalloc(), rte_calloc() or rte_realloc(). The behaviour of
+ * rte_free() is undefined if the pointer does not match this requirement.
+ *
+ * If the pointer is NULL, the function does nothing.
+ *
+ * @param ptr
+ * The pointer to memory to be freed.
+ */
+void
+rte_free(void *ptr);
+
+/**
+ * If malloc debug is enabled, check a memory block for header
+ * and trailer markers to indicate that all is well with the block.
+ * If size is non-null, also return the size of the block.
+ *
+ * @param ptr
+ * pointer to the start of a data block, must have been returned
+ * by a previous call to rte_malloc(), rte_zmalloc(), rte_calloc()
+ * or rte_realloc()
+ * @param size
+ * if non-null, and memory block pointer is valid, returns the size
+ * of the memory block
+ * @return
+ * -1 on error, invalid pointer passed or header and trailer markers
+ * are missing or corrupted
+ * 0 on success
+ */
+int
+rte_malloc_validate(const void *ptr, size_t *size);
+
+/**
+ * Get heap statistics for the specified heap.
+ *
+ * @param socket
+ * An unsigned integer specifying the socket to get heap statistics for
+ * @param socket_stats
+ * A structure which provides memory to store statistics
+ * @return
+ * Null on error
+ * Pointer to structure storing statistics on success
+ */
+int
+rte_malloc_get_socket_stats(int socket,
+ struct rte_malloc_socket_stats *socket_stats);
+
+/**
+ * Dump statistics.
+ *
+ * Dump for the specified type to the console. If the type argument is
+ * NULL, all memory types will be dumped.
+ *
+ * @param f
+ * A pointer to a file for output
+ * @param type
+ * A string identifying the type of objects to dump, or NULL
+ * to dump all objects.
+ */
+void
+rte_malloc_dump_stats(FILE *f, const char *type);
+
+/**
+ * Set the maximum amount of allocated memory for this type.
+ *
+ * This is not yet implemented
+ *
+ * @param type
+ * A string identifying the type of allocated objects.
+ * @param max
+ * The maximum amount of allocated bytes for this type.
+ * @return
+ * - 0: Success.
+ * - (-1): Error.
+ */
+int
+rte_malloc_set_limit(const char *type, size_t max);
+
+/**
+ * Return the physical address of a virtual address obtained through
+ * rte_malloc
+ *
+ * @param addr
+ * Adress obtained from a previous rte_malloc call
+ * @return
+ * NULL on error
+ * otherwise return physical address of the buffer
+ */
+phys_addr_t
+rte_malloc_virt2phy(const void *addr);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MALLOC_H_ */
diff --git a/src/dpdk_lib18/librte_mbuf/Makefile b/src/dpdk_lib18/librte_mbuf/Makefile
new file mode 100755
index 00000000..9b45ba43
--- /dev/null
+++ b/src/dpdk_lib18/librte_mbuf/Makefile
@@ -0,0 +1,48 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_mbuf.a
+
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_MBUF) := rte_mbuf.c
+
+# install includes
+SYMLINK-$(CONFIG_RTE_LIBRTE_MBUF)-include := rte_mbuf.h
+
+# this lib needs eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_MBUF) += lib/librte_eal lib/librte_mempool
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_mbuf/rte_mbuf.c b/src/dpdk_lib18/librte_mbuf/rte_mbuf.c
new file mode 100755
index 00000000..1b14e027
--- /dev/null
+++ b/src/dpdk_lib18/librte_mbuf/rte_mbuf.c
@@ -0,0 +1,252 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright 2014 6WIND S.A.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <ctype.h>
+#include <sys/queue.h>
+
+#include <rte_debug.h>
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_string_fns.h>
+#include <rte_hexdump.h>
+
+/*
+ * ctrlmbuf constructor, given as a callback function to
+ * rte_mempool_create()
+ */
+void
+rte_ctrlmbuf_init(struct rte_mempool *mp,
+ __attribute__((unused)) void *opaque_arg,
+ void *_m,
+ __attribute__((unused)) unsigned i)
+{
+ struct rte_mbuf *m = _m;
+ rte_pktmbuf_init(mp, opaque_arg, _m, i);
+ m->ol_flags |= CTRL_MBUF_FLAG;
+}
+
+/*
+ * pktmbuf pool constructor, given as a callback function to
+ * rte_mempool_create()
+ */
+void
+rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
+{
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ uint16_t roomsz;
+
+ mbp_priv = rte_mempool_get_priv(mp);
+ roomsz = (uint16_t)(uintptr_t)opaque_arg;
+
+ /* Use default data room size. */
+ if (0 == roomsz)
+ roomsz = 2048 + RTE_PKTMBUF_HEADROOM;
+
+ mbp_priv->mbuf_data_room_size = roomsz;
+}
+
+/*
+ * pktmbuf constructor, given as a callback function to
+ * rte_mempool_create().
+ * Set the fields of a packet mbuf to their default values.
+ */
+void
+rte_pktmbuf_init(struct rte_mempool *mp,
+ __attribute__((unused)) void *opaque_arg,
+ void *_m,
+ __attribute__((unused)) unsigned i)
+{
+ struct rte_mbuf *m = _m;
+ uint32_t buf_len = mp->elt_size - sizeof(struct rte_mbuf);
+
+ RTE_MBUF_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf));
+
+ memset(m, 0, mp->elt_size);
+
+ /* start of buffer is just after mbuf structure */
+ m->buf_addr = (char *)m + sizeof(struct rte_mbuf);
+ m->buf_physaddr = rte_mempool_virt2phy(mp, m) +
+ sizeof(struct rte_mbuf);
+ m->buf_len = (uint16_t)buf_len;
+
+ /* keep some headroom between start of buffer and data */
+ m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, (uint16_t)m->buf_len);
+
+ /* init some constant fields */
+ m->pool = mp;
+ m->nb_segs = 1;
+ m->port = 0xff;
+}
+
+/* do some sanity checks on a mbuf: panic if it fails */
+void
+rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
+{
+ const struct rte_mbuf *m_seg;
+ unsigned nb_segs;
+
+ if (m == NULL)
+ rte_panic("mbuf is NULL\n");
+
+ /* generic checks */
+ if (m->pool == NULL)
+ rte_panic("bad mbuf pool\n");
+ if (m->buf_physaddr == 0)
+ rte_panic("bad phys addr\n");
+ if (m->buf_addr == NULL)
+ rte_panic("bad virt addr\n");
+
+#ifdef RTE_MBUF_REFCNT
+ uint16_t cnt = rte_mbuf_refcnt_read(m);
+ if ((cnt == 0) || (cnt == UINT16_MAX))
+ rte_panic("bad ref cnt\n");
+#endif
+
+ /* nothing to check for sub-segments */
+ if (is_header == 0)
+ return;
+
+ nb_segs = m->nb_segs;
+ m_seg = m;
+ while (m_seg && nb_segs != 0) {
+ m_seg = m_seg->next;
+ nb_segs--;
+ }
+ if (nb_segs != 0)
+ rte_panic("bad nb_segs\n");
+}
+
+/* dump a mbuf on console */
+void
+rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
+{
+ unsigned int len;
+ unsigned nb_segs;
+
+ __rte_mbuf_sanity_check(m, 1);
+
+ fprintf(f, "dump mbuf at 0x%p, phys=%"PRIx64", buf_len=%u\n",
+ m, (uint64_t)m->buf_physaddr, (unsigned)m->buf_len);
+ fprintf(f, " pkt_len=%"PRIu32", ol_flags=%"PRIx64", nb_segs=%u, "
+ "in_port=%u\n", m->pkt_len, m->ol_flags,
+ (unsigned)m->nb_segs, (unsigned)m->port);
+ nb_segs = m->nb_segs;
+
+ while (m && nb_segs != 0) {
+ __rte_mbuf_sanity_check(m, 0);
+
+ fprintf(f, " segment at 0x%p, data=0x%p, data_len=%u\n",
+ m, rte_pktmbuf_mtod(m, void *), (unsigned)m->data_len);
+ len = dump_len;
+ if (len > m->data_len)
+ len = m->data_len;
+ if (len != 0)
+ rte_hexdump(f, NULL, rte_pktmbuf_mtod(m, void *), len);
+ dump_len -= len;
+ m = m->next;
+ nb_segs --;
+ }
+}
+
+/*
+ * Get the name of a RX offload flag. Must be kept synchronized with flag
+ * definitions in rte_mbuf.h.
+ */
+const char *rte_get_rx_ol_flag_name(uint64_t mask)
+{
+ switch (mask) {
+ case PKT_RX_VLAN_PKT: return "PKT_RX_VLAN_PKT";
+ case PKT_RX_RSS_HASH: return "PKT_RX_RSS_HASH";
+ case PKT_RX_FDIR: return "PKT_RX_FDIR";
+ case PKT_RX_L4_CKSUM_BAD: return "PKT_RX_L4_CKSUM_BAD";
+ case PKT_RX_IP_CKSUM_BAD: return "PKT_RX_IP_CKSUM_BAD";
+ /* case PKT_RX_EIP_CKSUM_BAD: return "PKT_RX_EIP_CKSUM_BAD"; */
+ /* case PKT_RX_OVERSIZE: return "PKT_RX_OVERSIZE"; */
+ /* case PKT_RX_HBUF_OVERFLOW: return "PKT_RX_HBUF_OVERFLOW"; */
+ /* case PKT_RX_RECIP_ERR: return "PKT_RX_RECIP_ERR"; */
+ /* case PKT_RX_MAC_ERR: return "PKT_RX_MAC_ERR"; */
+ case PKT_RX_IPV4_HDR: return "PKT_RX_IPV4_HDR";
+ case PKT_RX_IPV4_HDR_EXT: return "PKT_RX_IPV4_HDR_EXT";
+ case PKT_RX_IPV6_HDR: return "PKT_RX_IPV6_HDR";
+ case PKT_RX_IPV6_HDR_EXT: return "PKT_RX_IPV6_HDR_EXT";
+ case PKT_RX_IEEE1588_PTP: return "PKT_RX_IEEE1588_PTP";
+ case PKT_RX_IEEE1588_TMST: return "PKT_RX_IEEE1588_TMST";
+ case PKT_RX_TUNNEL_IPV4_HDR: return "PKT_RX_TUNNEL_IPV4_HDR";
+ case PKT_RX_TUNNEL_IPV6_HDR: return "PKT_RX_TUNNEL_IPV6_HDR";
+ default: return NULL;
+ }
+}
+
+/*
+ * Get the name of a TX offload flag. Must be kept synchronized with flag
+ * definitions in rte_mbuf.h.
+ */
+const char *rte_get_tx_ol_flag_name(uint64_t mask)
+{
+ switch (mask) {
+ case PKT_TX_VLAN_PKT: return "PKT_TX_VLAN_PKT";
+ case PKT_TX_IP_CKSUM: return "PKT_TX_IP_CKSUM";
+ case PKT_TX_TCP_CKSUM: return "PKT_TX_TCP_CKSUM";
+ case PKT_TX_SCTP_CKSUM: return "PKT_TX_SCTP_CKSUM";
+ case PKT_TX_UDP_CKSUM: return "PKT_TX_UDP_CKSUM";
+ case PKT_TX_IEEE1588_TMST: return "PKT_TX_IEEE1588_TMST";
+ case PKT_TX_UDP_TUNNEL_PKT: return "PKT_TX_UDP_TUNNEL_PKT";
+ case PKT_TX_TCP_SEG: return "PKT_TX_TCP_SEG";
+ case PKT_TX_IPV4: return "PKT_TX_IPV4";
+ case PKT_TX_IPV6: return "PKT_TX_IPV6";
+ case PKT_TX_OUTER_IP_CKSUM: return "PKT_TX_OUTER_IP_CKSUM";
+ case PKT_TX_OUTER_IPV4: return "PKT_TX_OUTER_IPV4";
+ case PKT_TX_OUTER_IPV6: return "PKT_TX_OUTER_IPV6";
+ default: return NULL;
+ }
+}
diff --git a/src/dpdk_lib18/librte_mbuf/rte_mbuf.h b/src/dpdk_lib18/librte_mbuf/rte_mbuf.h
new file mode 100755
index 00000000..16059c66
--- /dev/null
+++ b/src/dpdk_lib18/librte_mbuf/rte_mbuf.h
@@ -0,0 +1,1133 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright 2014 6WIND S.A.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_MBUF_H_
+#define _RTE_MBUF_H_
+
+/**
+ * @file
+ * RTE Mbuf
+ *
+ * The mbuf library provides the ability to create and destroy buffers
+ * that may be used by the RTE application to store message
+ * buffers. The message buffers are stored in a mempool, using the
+ * RTE mempool library.
+ *
+ * This library provide an API to allocate/free packet mbufs, which are
+ * used to carry network packets.
+ *
+ * To understand the concepts of packet buffers or mbufs, you
+ * should read "TCP/IP Illustrated, Volume 2: The Implementation,
+ * Addison-Wesley, 1995, ISBN 0-201-63354-X from Richard Stevens"
+ * http://www.kohala.com/start/tcpipiv2.html
+ */
+
+#include <stdint.h>
+#include <rte_mempool.h>
+#include <rte_memory.h>
+#include <rte_atomic.h>
+#include <rte_prefetch.h>
+#include <rte_branch_prediction.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* deprecated feature, renamed in RTE_MBUF_REFCNT */
+#pragma GCC poison RTE_MBUF_SCATTER_GATHER
+
+/*
+ * Packet Offload Features Flags. It also carry packet type information.
+ * Critical resources. Both rx/tx shared these bits. Be cautious on any change
+ *
+ * - RX flags start at bit position zero, and get added to the left of previous
+ * flags.
+ * - The most-significant 8 bits are reserved for generic mbuf flags
+ * - TX flags therefore start at bit position 55 (i.e. 63-8), and new flags get
+ * added to the right of the previously defined flags
+ *
+ * Keep these flags synchronized with rte_get_rx_ol_flag_name() and
+ * rte_get_tx_ol_flag_name().
+ */
+#define PKT_RX_VLAN_PKT (1ULL << 0) /**< RX packet is a 802.1q VLAN packet. */
+#define PKT_RX_RSS_HASH (1ULL << 1) /**< RX packet with RSS hash result. */
+#define PKT_RX_FDIR (1ULL << 2) /**< RX packet with FDIR match indicate. */
+#define PKT_RX_L4_CKSUM_BAD (1ULL << 3) /**< L4 cksum of RX pkt. is not OK. */
+#define PKT_RX_IP_CKSUM_BAD (1ULL << 4) /**< IP cksum of RX pkt. is not OK. */
+#define PKT_RX_EIP_CKSUM_BAD (0ULL << 0) /**< External IP header checksum error. */
+#define PKT_RX_OVERSIZE (0ULL << 0) /**< Num of desc of an RX pkt oversize. */
+#define PKT_RX_HBUF_OVERFLOW (0ULL << 0) /**< Header buffer overflow. */
+#define PKT_RX_RECIP_ERR (0ULL << 0) /**< Hardware processing error. */
+#define PKT_RX_MAC_ERR (0ULL << 0) /**< MAC error. */
+#define PKT_RX_IPV4_HDR (1ULL << 5) /**< RX packet with IPv4 header. */
+#define PKT_RX_IPV4_HDR_EXT (1ULL << 6) /**< RX packet with extended IPv4 header. */
+#define PKT_RX_IPV6_HDR (1ULL << 7) /**< RX packet with IPv6 header. */
+#define PKT_RX_IPV6_HDR_EXT (1ULL << 8) /**< RX packet with extended IPv6 header. */
+#define PKT_RX_IEEE1588_PTP (1ULL << 9) /**< RX IEEE1588 L2 Ethernet PT Packet. */
+#define PKT_RX_IEEE1588_TMST (1ULL << 10) /**< RX IEEE1588 L2/L4 timestamped packet.*/
+#define PKT_RX_TUNNEL_IPV4_HDR (1ULL << 11) /**< RX tunnel packet with IPv4 header.*/
+#define PKT_RX_TUNNEL_IPV6_HDR (1ULL << 12) /**< RX tunnel packet with IPv6 header. */
+#define PKT_RX_FDIR_ID (1ULL << 13) /**< FD id reported if FDIR match. */
+#define PKT_RX_FDIR_FLX (1ULL << 14) /**< Flexible bytes reported if FDIR match. */
+/* add new RX flags here */
+
+/* add new TX flags here */
+
+/**
+ * TCP segmentation offload. To enable this offload feature for a
+ * packet to be transmitted on hardware supporting TSO:
+ * - set the PKT_TX_TCP_SEG flag in mbuf->ol_flags (this flag implies
+ * PKT_TX_TCP_CKSUM)
+ * - set the flag PKT_TX_IPV4 or PKT_TX_IPV6
+ * - if it's IPv4, set the PKT_TX_IP_CKSUM flag and write the IP checksum
+ * to 0 in the packet
+ * - fill the mbuf offload information: l2_len, l3_len, l4_len, tso_segsz
+ * - calculate the pseudo header checksum without taking ip_len in account,
+ * and set it in the TCP header. Refer to rte_ipv4_phdr_cksum() and
+ * rte_ipv6_phdr_cksum() that can be used as helpers.
+ */
+#define PKT_TX_TCP_SEG (1ULL << 49)
+
+/** TX packet is an UDP tunneled packet. It must be specified when using
+ * outer checksum offload (PKT_TX_OUTER_IP_CKSUM) */
+#define PKT_TX_UDP_TUNNEL_PKT (1ULL << 50) /**< TX packet is an UDP tunneled packet */
+#define PKT_TX_IEEE1588_TMST (1ULL << 51) /**< TX IEEE1588 packet to timestamp. */
+
+/**
+ * Bits 52+53 used for L4 packet type with checksum enabled: 00: Reserved,
+ * 01: TCP checksum, 10: SCTP checksum, 11: UDP checksum. To use hardware
+ * L4 checksum offload, the user needs to:
+ * - fill l2_len and l3_len in mbuf
+ * - set the flags PKT_TX_TCP_CKSUM, PKT_TX_SCTP_CKSUM or PKT_TX_UDP_CKSUM
+ * - set the flag PKT_TX_IPV4 or PKT_TX_IPV6
+ * - calculate the pseudo header checksum and set it in the L4 header (only
+ * for TCP or UDP). See rte_ipv4_phdr_cksum() and rte_ipv6_phdr_cksum().
+ * For SCTP, set the crc field to 0.
+ */
+#define PKT_TX_L4_NO_CKSUM (0ULL << 52) /**< Disable L4 cksum of TX pkt. */
+#define PKT_TX_TCP_CKSUM (1ULL << 52) /**< TCP cksum of TX pkt. computed by NIC. */
+#define PKT_TX_SCTP_CKSUM (2ULL << 52) /**< SCTP cksum of TX pkt. computed by NIC. */
+#define PKT_TX_UDP_CKSUM (3ULL << 52) /**< UDP cksum of TX pkt. computed by NIC. */
+#define PKT_TX_L4_MASK (3ULL << 52) /**< Mask for L4 cksum offload request. */
+
+#define PKT_TX_IP_CKSUM (1ULL << 54) /**< IP cksum of TX pkt. computed by NIC. */
+#define PKT_TX_IPV4_CSUM PKT_TX_IP_CKSUM /**< Alias of PKT_TX_IP_CKSUM. */
+
+/** Packet is IPv4 without requiring IP checksum offload. */
+#define PKT_TX_IPV4 (1ULL << 55)
+
+/** Tell the NIC it's an IPv6 packet.*/
+#define PKT_TX_IPV6 (1ULL << 56)
+
+#define PKT_TX_VLAN_PKT (1ULL << 57) /**< TX packet is a 802.1q VLAN packet. */
+
+/** Outer IP checksum of TX packet, computed by NIC for tunneling packet.
+ * The tunnel type must also be specified, ex: PKT_TX_UDP_TUNNEL_PKT. */
+#define PKT_TX_OUTER_IP_CKSUM (1ULL << 58)
+
+/** Packet is outer IPv4 without requiring IP checksum offload for tunneling packet. */
+#define PKT_TX_OUTER_IPV4 (1ULL << 59)
+
+/** Tell the NIC it's an outer IPv6 packet for tunneling packet */
+#define PKT_TX_OUTER_IPV6 (1ULL << 60)
+
+/* Use final bit of flags to indicate a control mbuf */
+#define CTRL_MBUF_FLAG (1ULL << 63) /**< Mbuf contains control data */
+
+/**
+ * Get the name of a RX offload flag
+ *
+ * @param mask
+ * The mask describing the flag.
+ * @return
+ * The name of this flag, or NULL if it's not a valid RX flag.
+ */
+const char *rte_get_rx_ol_flag_name(uint64_t mask);
+
+/**
+ * Get the name of a TX offload flag
+ *
+ * @param mask
+ * The mask describing the flag. Usually only one bit must be set.
+ * Several bits can be given if they belong to the same mask.
+ * Ex: PKT_TX_L4_MASK.
+ * @return
+ * The name of this flag, or NULL if it's not a valid TX flag.
+ */
+const char *rte_get_tx_ol_flag_name(uint64_t mask);
+
+/* define a set of marker types that can be used to refer to set points in the
+ * mbuf */
+typedef void *MARKER[0]; /**< generic marker for a point in a structure */
+typedef uint8_t MARKER8[0]; /**< generic marker with 1B alignment */
+typedef uint64_t MARKER64[0]; /**< marker that allows us to overwrite 8 bytes
+ * with a single assignment */
+
+/**
+ * The generic rte_mbuf, containing a packet mbuf.
+ */
+struct rte_mbuf {
+ MARKER cacheline0;
+
+ void *buf_addr; /**< Virtual address of segment buffer. */
+ phys_addr_t buf_physaddr; /**< Physical address of segment buffer. */
+
+ uint16_t buf_len; /**< Length of segment buffer. */
+
+ /* next 6 bytes are initialised on RX descriptor rearm */
+ MARKER8 rearm_data;
+ uint16_t data_off;
+
+ /**
+ * 16-bit Reference counter.
+ * It should only be accessed using the following functions:
+ * rte_mbuf_refcnt_update(), rte_mbuf_refcnt_read(), and
+ * rte_mbuf_refcnt_set(). The functionality of these functions (atomic,
+ * or non-atomic) is controlled by the CONFIG_RTE_MBUF_REFCNT_ATOMIC
+ * config option.
+ */
+ union {
+#ifdef RTE_MBUF_REFCNT
+ rte_atomic16_t refcnt_atomic; /**< Atomically accessed refcnt */
+ uint16_t refcnt; /**< Non-atomically accessed refcnt */
+#endif
+ uint16_t refcnt_reserved; /**< Do not use this field */
+ };
+ uint8_t nb_segs; /**< Number of segments. */
+ uint8_t port; /**< Input port. */
+
+ uint64_t ol_flags; /**< Offload features. */
+
+ /* remaining bytes are set on RX when pulling packet from descriptor */
+ MARKER rx_descriptor_fields1;
+
+ /**
+ * The packet type, which is used to indicate ordinary packet and also
+ * tunneled packet format, i.e. each number is represented a type of
+ * packet.
+ */
+ uint16_t packet_type;
+
+ uint16_t data_len; /**< Amount of data in segment buffer. */
+ uint32_t pkt_len; /**< Total pkt len: sum of all segments. */
+ uint16_t vlan_tci; /**< VLAN Tag Control Identifier (CPU order) */
+ uint16_t reserved;
+ union {
+ uint32_t rss; /**< RSS hash result if RSS enabled */
+ struct {
+ union {
+ struct {
+ uint16_t hash;
+ uint16_t id;
+ };
+ uint32_t lo;
+ /**< Second 4 flexible bytes */
+ };
+ uint32_t hi;
+ /**< First 4 flexible bytes or FD ID, dependent on
+ PKT_RX_FDIR_* flag in ol_flags. */
+ } fdir; /**< Filter identifier if FDIR enabled */
+ uint32_t sched; /**< Hierarchical scheduler */
+ uint32_t usr; /**< User defined tags. See @rte_distributor_process */
+ } hash; /**< hash information */
+
+ /* second cache line - fields only used in slow path or on TX */
+ MARKER cacheline1 __rte_cache_aligned;
+
+ union {
+ void *userdata; /**< Can be used for external metadata */
+ uint64_t udata64; /**< Allow 8-byte userdata on 32-bit */
+ };
+
+ struct rte_mempool *pool; /**< Pool from which mbuf was allocated. */
+ struct rte_mbuf *next; /**< Next segment of scattered packet. */
+
+ /* fields to support TX offloads */
+ union {
+ uint64_t tx_offload; /**< combined for easy fetch */
+ struct {
+ uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
+ uint64_t l3_len:9; /**< L3 (IP) Header Length. */
+ uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
+ uint64_t tso_segsz:16; /**< TCP TSO segment size */
+
+ /* fields for TX offloading of tunnels */
+ uint64_t outer_l3_len:9; /**< Outer L3 (IP) Hdr Length. */
+ uint64_t outer_l2_len:7; /**< Outer L2 (MAC) Hdr Length. */
+
+ /* uint64_t unused:8; */
+ };
+ };
+} __rte_cache_aligned;
+
+/**
+ * Given the buf_addr returns the pointer to corresponding mbuf.
+ */
+#define RTE_MBUF_FROM_BADDR(ba) (((struct rte_mbuf *)(ba)) - 1)
+
+/**
+ * Given the pointer to mbuf returns an address where it's buf_addr
+ * should point to.
+ */
+#define RTE_MBUF_TO_BADDR(mb) (((struct rte_mbuf *)(mb)) + 1)
+
+/**
+ * Returns TRUE if given mbuf is indirect, or FALSE otherwise.
+ */
+#define RTE_MBUF_INDIRECT(mb) (RTE_MBUF_FROM_BADDR((mb)->buf_addr) != (mb))
+
+/**
+ * Returns TRUE if given mbuf is direct, or FALSE otherwise.
+ */
+#define RTE_MBUF_DIRECT(mb) (RTE_MBUF_FROM_BADDR((mb)->buf_addr) == (mb))
+
+
+/**
+ * Private data in case of pktmbuf pool.
+ *
+ * A structure that contains some pktmbuf_pool-specific data that are
+ * appended after the mempool structure (in private data).
+ */
+struct rte_pktmbuf_pool_private {
+ uint16_t mbuf_data_room_size; /**< Size of data space in each mbuf.*/
+};
+
+#ifdef RTE_LIBRTE_MBUF_DEBUG
+
+/** check mbuf type in debug mode */
+#define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
+
+/** check mbuf type in debug mode if mbuf pointer is not null */
+#define __rte_mbuf_sanity_check_raw(m, is_h) do { \
+ if ((m) != NULL) \
+ rte_mbuf_sanity_check(m, is_h); \
+} while (0)
+
+/** MBUF asserts in debug mode */
+#define RTE_MBUF_ASSERT(exp) \
+if (!(exp)) { \
+ rte_panic("line%d\tassert \"" #exp "\" failed\n", __LINE__); \
+}
+
+#else /* RTE_LIBRTE_MBUF_DEBUG */
+
+/** check mbuf type in debug mode */
+#define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
+
+/** check mbuf type in debug mode if mbuf pointer is not null */
+#define __rte_mbuf_sanity_check_raw(m, is_h) do { } while (0)
+
+/** MBUF asserts in debug mode */
+#define RTE_MBUF_ASSERT(exp) do { } while (0)
+
+#endif /* RTE_LIBRTE_MBUF_DEBUG */
+
+#ifdef RTE_MBUF_REFCNT
+#ifdef RTE_MBUF_REFCNT_ATOMIC
+
+/**
+ * Adds given value to an mbuf's refcnt and returns its new value.
+ * @param m
+ * Mbuf to update
+ * @param value
+ * Value to add/subtract
+ * @return
+ * Updated value
+ */
+static inline uint16_t
+rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
+{
+ return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value));
+}
+
+/**
+ * Reads the value of an mbuf's refcnt.
+ * @param m
+ * Mbuf to read
+ * @return
+ * Reference count number.
+ */
+static inline uint16_t
+rte_mbuf_refcnt_read(const struct rte_mbuf *m)
+{
+ return (uint16_t)(rte_atomic16_read(&m->refcnt_atomic));
+}
+
+/**
+ * Sets an mbuf's refcnt to a defined value.
+ * @param m
+ * Mbuf to update
+ * @param new_value
+ * Value set
+ */
+static inline void
+rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
+{
+ rte_atomic16_set(&m->refcnt_atomic, new_value);
+}
+
+#else /* ! RTE_MBUF_REFCNT_ATOMIC */
+
+/**
+ * Adds given value to an mbuf's refcnt and returns its new value.
+ */
+static inline uint16_t
+rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
+{
+ m->refcnt = (uint16_t)(m->refcnt + value);
+ return m->refcnt;
+}
+
+/**
+ * Reads the value of an mbuf's refcnt.
+ */
+static inline uint16_t
+rte_mbuf_refcnt_read(const struct rte_mbuf *m)
+{
+ return m->refcnt;
+}
+
+/**
+ * Sets an mbuf's refcnt to the defined value.
+ */
+static inline void
+rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
+{
+ m->refcnt = new_value;
+}
+
+#endif /* RTE_MBUF_REFCNT_ATOMIC */
+
+/** Mbuf prefetch */
+#define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
+ if ((m) != NULL) \
+ rte_prefetch0(m); \
+} while (0)
+
+#else /* ! RTE_MBUF_REFCNT */
+
+/** Mbuf prefetch */
+#define RTE_MBUF_PREFETCH_TO_FREE(m) do { } while(0)
+
+#define rte_mbuf_refcnt_set(m,v) do { } while(0)
+
+#endif /* RTE_MBUF_REFCNT */
+
+
+/**
+ * Sanity checks on an mbuf.
+ *
+ * Check the consistency of the given mbuf. The function will cause a
+ * panic if corruption is detected.
+ *
+ * @param m
+ * The mbuf to be checked.
+ * @param is_header
+ * True if the mbuf is a packet header, false if it is a sub-segment
+ * of a packet (in this case, some fields like nb_segs are not checked)
+ */
+void
+rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
+
+/**
+ * @internal Allocate a new mbuf from mempool *mp*.
+ * The use of that function is reserved for RTE internal needs.
+ * Please use rte_pktmbuf_alloc().
+ *
+ * @param mp
+ * The mempool from which mbuf is allocated.
+ * @return
+ * - The pointer to the new mbuf on success.
+ * - NULL if allocation failed.
+ */
+static inline struct rte_mbuf *__rte_mbuf_raw_alloc(struct rte_mempool *mp)
+{
+ struct rte_mbuf *m;
+ void *mb = NULL;
+ if (rte_mempool_get(mp, &mb) < 0)
+ return NULL;
+ m = (struct rte_mbuf *)mb;
+#ifdef RTE_MBUF_REFCNT
+ RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(m) == 0);
+ rte_mbuf_refcnt_set(m, 1);
+#endif /* RTE_MBUF_REFCNT */
+ return (m);
+}
+
+/**
+ * @internal Put mbuf back into its original mempool.
+ * The use of that function is reserved for RTE internal needs.
+ * Please use rte_pktmbuf_free().
+ *
+ * @param m
+ * The mbuf to be freed.
+ */
+static inline void __attribute__((always_inline))
+__rte_mbuf_raw_free(struct rte_mbuf *m)
+{
+#ifdef RTE_MBUF_REFCNT
+ RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(m) == 0);
+#endif /* RTE_MBUF_REFCNT */
+ rte_mempool_put(m->pool, m);
+}
+
+/* Operations on ctrl mbuf */
+
+/**
+ * The control mbuf constructor.
+ *
+ * This function initializes some fields in an mbuf structure that are
+ * not modified by the user once created (mbuf type, origin pool, buffer
+ * start address, and so on). This function is given as a callback function
+ * to rte_mempool_create() at pool creation time.
+ *
+ * @param mp
+ * The mempool from which the mbuf is allocated.
+ * @param opaque_arg
+ * A pointer that can be used by the user to retrieve useful information
+ * for mbuf initialization. This pointer comes from the ``init_arg``
+ * parameter of rte_mempool_create().
+ * @param m
+ * The mbuf to initialize.
+ * @param i
+ * The index of the mbuf in the pool table.
+ */
+void rte_ctrlmbuf_init(struct rte_mempool *mp, void *opaque_arg,
+ void *m, unsigned i);
+
+/**
+ * Allocate a new mbuf (type is ctrl) from mempool *mp*.
+ *
+ * This new mbuf is initialized with data pointing to the beginning of
+ * buffer, and with a length of zero.
+ *
+ * @param mp
+ * The mempool from which the mbuf is allocated.
+ * @return
+ * - The pointer to the new mbuf on success.
+ * - NULL if allocation failed.
+ */
+#define rte_ctrlmbuf_alloc(mp) rte_pktmbuf_alloc(mp)
+
+/**
+ * Free a control mbuf back into its original mempool.
+ *
+ * @param m
+ * The control mbuf to be freed.
+ */
+#define rte_ctrlmbuf_free(m) rte_pktmbuf_free(m)
+
+/**
+ * A macro that returns the pointer to the carried data.
+ *
+ * The value that can be read or assigned.
+ *
+ * @param m
+ * The control mbuf.
+ */
+#define rte_ctrlmbuf_data(m) ((char *)((m)->buf_addr) + (m)->data_off)
+
+/**
+ * A macro that returns the length of the carried data.
+ *
+ * The value that can be read or assigned.
+ *
+ * @param m
+ * The control mbuf.
+ */
+#define rte_ctrlmbuf_len(m) rte_pktmbuf_data_len(m)
+
+/**
+ * Tests if an mbuf is a control mbuf
+ *
+ * @param m
+ * The mbuf to be tested
+ * @return
+ * - True (1) if the mbuf is a control mbuf
+ * - False(0) otherwise
+ */
+static inline int
+rte_is_ctrlmbuf(struct rte_mbuf *m)
+{
+ return (!!(m->ol_flags & CTRL_MBUF_FLAG));
+}
+
+/* Operations on pkt mbuf */
+
+/**
+ * The packet mbuf constructor.
+ *
+ * This function initializes some fields in the mbuf structure that are
+ * not modified by the user once created (origin pool, buffer start
+ * address, and so on). This function is given as a callback function to
+ * rte_mempool_create() at pool creation time.
+ *
+ * @param mp
+ * The mempool from which mbufs originate.
+ * @param opaque_arg
+ * A pointer that can be used by the user to retrieve useful information
+ * for mbuf initialization. This pointer comes from the ``init_arg``
+ * parameter of rte_mempool_create().
+ * @param m
+ * The mbuf to initialize.
+ * @param i
+ * The index of the mbuf in the pool table.
+ */
+void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
+ void *m, unsigned i);
+
+
+/**
+ * A packet mbuf pool constructor.
+ *
+ * This function initializes the mempool private data in the case of a
+ * pktmbuf pool. This private data is needed by the driver. The
+ * function is given as a callback function to rte_mempool_create() at
+ * pool creation. It can be extended by the user, for example, to
+ * provide another packet size.
+ *
+ * @param mp
+ * The mempool from which mbufs originate.
+ * @param opaque_arg
+ * A pointer that can be used by the user to retrieve useful information
+ * for mbuf initialization. This pointer comes from the ``init_arg``
+ * parameter of rte_mempool_create().
+ */
+void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
+
+/**
+ * Reset the fields of a packet mbuf to their default values.
+ *
+ * The given mbuf must have only one segment.
+ *
+ * @param m
+ * The packet mbuf to be resetted.
+ */
+static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
+{
+ m->next = NULL;
+ m->pkt_len = 0;
+ m->tx_offload = 0;
+ m->vlan_tci = 0;
+ m->nb_segs = 1;
+ m->port = 0xff;
+
+ m->ol_flags = 0;
+ m->packet_type = 0;
+ m->data_off = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
+ RTE_PKTMBUF_HEADROOM : m->buf_len;
+
+ m->data_len = 0;
+ __rte_mbuf_sanity_check(m, 1);
+}
+
+/**
+ * Allocate a new mbuf from a mempool.
+ *
+ * This new mbuf contains one segment, which has a length of 0. The pointer
+ * to data is initialized to have some bytes of headroom in the buffer
+ * (if buffer size allows).
+ *
+ * @param mp
+ * The mempool from which the mbuf is allocated.
+ * @return
+ * - The pointer to the new mbuf on success.
+ * - NULL if allocation failed.
+ */
+static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
+{
+ struct rte_mbuf *m;
+ if ((m = __rte_mbuf_raw_alloc(mp)) != NULL)
+ rte_pktmbuf_reset(m);
+ return (m);
+}
+
+#ifdef RTE_MBUF_REFCNT
+
+/**
+ * Attach packet mbuf to another packet mbuf.
+ * After attachment we refer the mbuf we attached as 'indirect',
+ * while mbuf we attached to as 'direct'.
+ * Right now, not supported:
+ * - attachment to indirect mbuf (e.g. - md has to be direct).
+ * - attachment for already indirect mbuf (e.g. - mi has to be direct).
+ * - mbuf we trying to attach (mi) is used by someone else
+ * e.g. it's reference counter is greater then 1.
+ *
+ * @param mi
+ * The indirect packet mbuf.
+ * @param md
+ * The direct packet mbuf.
+ */
+
+static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *md)
+{
+ RTE_MBUF_ASSERT(RTE_MBUF_DIRECT(md) &&
+ RTE_MBUF_DIRECT(mi) &&
+ rte_mbuf_refcnt_read(mi) == 1);
+
+ rte_mbuf_refcnt_update(md, 1);
+ mi->buf_physaddr = md->buf_physaddr;
+ mi->buf_addr = md->buf_addr;
+ mi->buf_len = md->buf_len;
+
+ mi->next = md->next;
+ mi->data_off = md->data_off;
+ mi->data_len = md->data_len;
+ mi->port = md->port;
+ mi->vlan_tci = md->vlan_tci;
+ mi->tx_offload = md->tx_offload;
+ mi->hash = md->hash;
+
+ mi->next = NULL;
+ mi->pkt_len = mi->data_len;
+ mi->nb_segs = 1;
+ mi->ol_flags = md->ol_flags;
+ mi->packet_type = md->packet_type;
+
+ __rte_mbuf_sanity_check(mi, 1);
+ __rte_mbuf_sanity_check(md, 0);
+}
+
+/**
+ * Detach an indirect packet mbuf -
+ * - restore original mbuf address and length values.
+ * - reset pktmbuf data and data_len to their default values.
+ * All other fields of the given packet mbuf will be left intact.
+ *
+ * @param m
+ * The indirect attached packet mbuf.
+ */
+
+static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
+{
+ const struct rte_mempool *mp = m->pool;
+ void *buf = RTE_MBUF_TO_BADDR(m);
+ uint32_t buf_len = mp->elt_size - sizeof(*m);
+ m->buf_physaddr = rte_mempool_virt2phy(mp, m) + sizeof (*m);
+
+ m->buf_addr = buf;
+ m->buf_len = (uint16_t)buf_len;
+
+ m->data_off = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
+ RTE_PKTMBUF_HEADROOM : m->buf_len;
+
+ m->data_len = 0;
+}
+
+#endif /* RTE_MBUF_REFCNT */
+
+
+static inline struct rte_mbuf* __attribute__((always_inline))
+__rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
+{
+ __rte_mbuf_sanity_check(m, 0);
+
+#ifdef RTE_MBUF_REFCNT
+ if (likely (rte_mbuf_refcnt_read(m) == 1) ||
+ likely (rte_mbuf_refcnt_update(m, -1) == 0)) {
+ struct rte_mbuf *md = RTE_MBUF_FROM_BADDR(m->buf_addr);
+
+ rte_mbuf_refcnt_set(m, 0);
+
+ /* if this is an indirect mbuf, then
+ * - detach mbuf
+ * - free attached mbuf segment
+ */
+ if (unlikely (md != m)) {
+ rte_pktmbuf_detach(m);
+ if (rte_mbuf_refcnt_update(md, -1) == 0)
+ __rte_mbuf_raw_free(md);
+ }
+#endif
+ return(m);
+#ifdef RTE_MBUF_REFCNT
+ }
+ return (NULL);
+#endif
+}
+
+/**
+ * Free a segment of a packet mbuf into its original mempool.
+ *
+ * Free an mbuf, without parsing other segments in case of chained
+ * buffers.
+ *
+ * @param m
+ * The packet mbuf segment to be freed.
+ */
+static inline void __attribute__((always_inline))
+rte_pktmbuf_free_seg(struct rte_mbuf *m)
+{
+ if (likely(NULL != (m = __rte_pktmbuf_prefree_seg(m)))) {
+ m->next = NULL;
+ __rte_mbuf_raw_free(m);
+ }
+}
+
+/**
+ * Free a packet mbuf back into its original mempool.
+ *
+ * Free an mbuf, and all its segments in case of chained buffers. Each
+ * segment is added back into its original mempool.
+ *
+ * @param m
+ * The packet mbuf to be freed.
+ */
+static inline void rte_pktmbuf_free(struct rte_mbuf *m)
+{
+ struct rte_mbuf *m_next;
+
+ __rte_mbuf_sanity_check(m, 1);
+
+ while (m != NULL) {
+ m_next = m->next;
+ rte_pktmbuf_free_seg(m);
+ m = m_next;
+ }
+}
+
+#ifdef RTE_MBUF_REFCNT
+
+/**
+ * Creates a "clone" of the given packet mbuf.
+ *
+ * Walks through all segments of the given packet mbuf, and for each of them:
+ * - Creates a new packet mbuf from the given pool.
+ * - Attaches newly created mbuf to the segment.
+ * Then updates pkt_len and nb_segs of the "clone" packet mbuf to match values
+ * from the original packet mbuf.
+ *
+ * @param md
+ * The packet mbuf to be cloned.
+ * @param mp
+ * The mempool from which the "clone" mbufs are allocated.
+ * @return
+ * - The pointer to the new "clone" mbuf on success.
+ * - NULL if allocation fails.
+ */
+static inline struct rte_mbuf *rte_pktmbuf_clone(struct rte_mbuf *md,
+ struct rte_mempool *mp)
+{
+ struct rte_mbuf *mc, *mi, **prev;
+ uint32_t pktlen;
+ uint8_t nseg;
+
+ if (unlikely ((mc = rte_pktmbuf_alloc(mp)) == NULL))
+ return (NULL);
+
+ mi = mc;
+ prev = &mi->next;
+ pktlen = md->pkt_len;
+ nseg = 0;
+
+ do {
+ nseg++;
+ rte_pktmbuf_attach(mi, md);
+ *prev = mi;
+ prev = &mi->next;
+ } while ((md = md->next) != NULL &&
+ (mi = rte_pktmbuf_alloc(mp)) != NULL);
+
+ *prev = NULL;
+ mc->nb_segs = nseg;
+ mc->pkt_len = pktlen;
+
+ /* Allocation of new indirect segment failed */
+ if (unlikely (mi == NULL)) {
+ rte_pktmbuf_free(mc);
+ return (NULL);
+ }
+
+ __rte_mbuf_sanity_check(mc, 1);
+ return (mc);
+}
+
+/**
+ * Adds given value to the refcnt of all packet mbuf segments.
+ *
+ * Walks through all segments of given packet mbuf and for each of them
+ * invokes rte_mbuf_refcnt_update().
+ *
+ * @param m
+ * The packet mbuf whose refcnt to be updated.
+ * @param v
+ * The value to add to the mbuf's segments refcnt.
+ */
+static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
+{
+ __rte_mbuf_sanity_check(m, 1);
+
+ do {
+ rte_mbuf_refcnt_update(m, v);
+ } while ((m = m->next) != NULL);
+}
+
+#endif /* RTE_MBUF_REFCNT */
+
+/**
+ * Get the headroom in a packet mbuf.
+ *
+ * @param m
+ * The packet mbuf.
+ * @return
+ * The length of the headroom.
+ */
+static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
+{
+ __rte_mbuf_sanity_check(m, 1);
+ return m->data_off;
+}
+
+/**
+ * Get the tailroom of a packet mbuf.
+ *
+ * @param m
+ * The packet mbuf.
+ * @return
+ * The length of the tailroom.
+ */
+static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
+{
+ __rte_mbuf_sanity_check(m, 1);
+ return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
+ m->data_len);
+}
+
+/**
+ * Get the last segment of the packet.
+ *
+ * @param m
+ * The packet mbuf.
+ * @return
+ * The last segment of the given mbuf.
+ */
+static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
+{
+ struct rte_mbuf *m2 = (struct rte_mbuf *)m;
+
+ __rte_mbuf_sanity_check(m, 1);
+ while (m2->next != NULL)
+ m2 = m2->next;
+ return m2;
+}
+
+/**
+ * A macro that points to the start of the data in the mbuf.
+ *
+ * The returned pointer is cast to type t. Before using this
+ * function, the user must ensure that m_headlen(m) is large enough to
+ * read its data.
+ *
+ * @param m
+ * The packet mbuf.
+ * @param t
+ * The type to cast the result into.
+ */
+#define rte_pktmbuf_mtod(m, t) ((t)((char *)(m)->buf_addr + (m)->data_off))
+
+/**
+ * A macro that returns the length of the packet.
+ *
+ * The value can be read or assigned.
+ *
+ * @param m
+ * The packet mbuf.
+ */
+#define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
+
+/**
+ * A macro that returns the length of the segment.
+ *
+ * The value can be read or assigned.
+ *
+ * @param m
+ * The packet mbuf.
+ */
+#define rte_pktmbuf_data_len(m) ((m)->data_len)
+
+/**
+ * Prepend len bytes to an mbuf data area.
+ *
+ * Returns a pointer to the new
+ * data start address. If there is not enough headroom in the first
+ * segment, the function will return NULL, without modifying the mbuf.
+ *
+ * @param m
+ * The pkt mbuf.
+ * @param len
+ * The amount of data to prepend (in bytes).
+ * @return
+ * A pointer to the start of the newly prepended data, or
+ * NULL if there is not enough headroom space in the first segment
+ */
+static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m,
+ uint16_t len)
+{
+ __rte_mbuf_sanity_check(m, 1);
+
+ if (unlikely(len > rte_pktmbuf_headroom(m)))
+ return NULL;
+
+ m->data_off -= len;
+ m->data_len = (uint16_t)(m->data_len + len);
+ m->pkt_len = (m->pkt_len + len);
+
+ return (char *)m->buf_addr + m->data_off;
+}
+
+/**
+ * Append len bytes to an mbuf.
+ *
+ * Append len bytes to an mbuf and return a pointer to the start address
+ * of the added data. If there is not enough tailroom in the last
+ * segment, the function will return NULL, without modifying the mbuf.
+ *
+ * @param m
+ * The packet mbuf.
+ * @param len
+ * The amount of data to append (in bytes).
+ * @return
+ * A pointer to the start of the newly appended data, or
+ * NULL if there is not enough tailroom space in the last segment
+ */
+static inline char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
+{
+ void *tail;
+ struct rte_mbuf *m_last;
+
+ __rte_mbuf_sanity_check(m, 1);
+
+ m_last = rte_pktmbuf_lastseg(m);
+ if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
+ return NULL;
+
+ tail = (char *)m_last->buf_addr + m_last->data_off + m_last->data_len;
+ m_last->data_len = (uint16_t)(m_last->data_len + len);
+ m->pkt_len = (m->pkt_len + len);
+ return (char*) tail;
+}
+
+/**
+ * Remove len bytes at the beginning of an mbuf.
+ *
+ * Returns a pointer to the start address of the new data area. If the
+ * length is greater than the length of the first segment, then the
+ * function will fail and return NULL, without modifying the mbuf.
+ *
+ * @param m
+ * The packet mbuf.
+ * @param len
+ * The amount of data to remove (in bytes).
+ * @return
+ * A pointer to the new start of the data.
+ */
+static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
+{
+ __rte_mbuf_sanity_check(m, 1);
+
+ if (unlikely(len > m->data_len))
+ return NULL;
+
+ m->data_len = (uint16_t)(m->data_len - len);
+ m->data_off += len;
+ m->pkt_len = (m->pkt_len - len);
+ return (char *)m->buf_addr + m->data_off;
+}
+
+/**
+ * Remove len bytes of data at the end of the mbuf.
+ *
+ * If the length is greater than the length of the last segment, the
+ * function will fail and return -1 without modifying the mbuf.
+ *
+ * @param m
+ * The packet mbuf.
+ * @param len
+ * The amount of data to remove (in bytes).
+ * @return
+ * - 0: On success.
+ * - -1: On error.
+ */
+static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
+{
+ struct rte_mbuf *m_last;
+
+ __rte_mbuf_sanity_check(m, 1);
+
+ m_last = rte_pktmbuf_lastseg(m);
+ if (unlikely(len > m_last->data_len))
+ return -1;
+
+ m_last->data_len = (uint16_t)(m_last->data_len - len);
+ m->pkt_len = (m->pkt_len - len);
+ return 0;
+}
+
+/**
+ * Test if mbuf data is contiguous.
+ *
+ * @param m
+ * The packet mbuf.
+ * @return
+ * - 1, if all data is contiguous (one segment).
+ * - 0, if there is several segments.
+ */
+static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
+{
+ __rte_mbuf_sanity_check(m, 1);
+ return !!(m->nb_segs == 1);
+}
+
+/**
+ * Dump an mbuf structure to the console.
+ *
+ * Dump all fields for the given packet mbuf and all its associated
+ * segments (in the case of a chained buffer).
+ *
+ * @param f
+ * A pointer to a file for output
+ * @param m
+ * The packet mbuf.
+ * @param dump_len
+ * If dump_len != 0, also dump the "dump_len" first data bytes of
+ * the packet.
+ */
+void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MBUF_H_ */
diff --git a/src/dpdk_lib18/librte_mempool/Makefile b/src/dpdk_lib18/librte_mempool/Makefile
new file mode 100755
index 00000000..9939e100
--- /dev/null
+++ b/src/dpdk_lib18/librte_mempool/Makefile
@@ -0,0 +1,51 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_mempool.a
+
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool.c
+ifeq ($(CONFIG_RTE_LIBRTE_XEN_DOM0),y)
+SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_dom0_mempool.c
+endif
+# install includes
+SYMLINK-$(CONFIG_RTE_LIBRTE_MEMPOOL)-include := rte_mempool.h
+
+# this lib needs eal, rte_ring and rte_malloc
+DEPDIRS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += lib/librte_eal lib/librte_ring
+DEPDIRS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += lib/librte_malloc
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_mempool/rte_dom0_mempool.c b/src/dpdk_lib18/librte_mempool/rte_dom0_mempool.c
new file mode 100755
index 00000000..9ec68fb3
--- /dev/null
+++ b/src/dpdk_lib18/librte_mempool/rte_dom0_mempool.c
@@ -0,0 +1,134 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_atomic.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_errno.h>
+#include <rte_string_fns.h>
+#include <rte_spinlock.h>
+
+#include "rte_mempool.h"
+
+static void
+get_phys_map(void *va, phys_addr_t pa[], uint32_t pg_num,
+ uint32_t pg_sz, uint32_t memseg_id)
+{
+ uint32_t i;
+ uint64_t virt_addr, mfn_id;
+ struct rte_mem_config *mcfg;
+ uint32_t page_size = getpagesize();
+
+ /* get pointer to global configuration */
+ mcfg = rte_eal_get_configuration()->mem_config;
+ virt_addr =(uintptr_t) mcfg->memseg[memseg_id].addr;
+
+ for (i = 0; i != pg_num; i++) {
+ mfn_id = ((uintptr_t)va + i * pg_sz - virt_addr) / RTE_PGSIZE_2M;
+ pa[i] = mcfg->memseg[memseg_id].mfn[mfn_id] * page_size;
+ }
+}
+
+/* create the mempool for supporting Dom0 */
+struct rte_mempool *
+rte_dom0_mempool_create(const char *name, unsigned elt_num, unsigned elt_size,
+ unsigned cache_size, unsigned private_data_size,
+ rte_mempool_ctor_t *mp_init, void *mp_init_arg,
+ rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
+ int socket_id, unsigned flags)
+{
+ struct rte_mempool *mp = NULL;
+ phys_addr_t *pa;
+ char *va;
+ size_t sz;
+ uint32_t pg_num, pg_shift, pg_sz, total_size;
+ const struct rte_memzone *mz;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
+
+ pg_sz = RTE_PGSIZE_2M;
+
+ pg_shift = rte_bsf32(pg_sz);
+ total_size = rte_mempool_calc_obj_size(elt_size, flags, NULL);
+
+ /* calc max memory size and max number of pages needed. */
+ sz = rte_mempool_xmem_size(elt_num, total_size, pg_shift) +
+ RTE_PGSIZE_2M;
+ pg_num = sz >> pg_shift;
+
+ /* extract physical mappings of the allocated memory. */
+ pa = calloc(pg_num, sizeof (*pa));
+ if (pa == NULL)
+ return mp;
+
+ snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_OBJ_NAME, name);
+ mz = rte_memzone_reserve(mz_name, sz, socket_id, mz_flags);
+ if (mz == NULL) {
+ free(pa);
+ return mp;
+ }
+
+ va = (char *)RTE_ALIGN_CEIL((uintptr_t)mz->addr, RTE_PGSIZE_2M);
+ /* extract physical mappings of the allocated memory. */
+ get_phys_map(va, pa, pg_num, pg_sz, mz->memseg_id);
+
+ mp = rte_mempool_xmem_create(name, elt_num, elt_size,
+ cache_size, private_data_size,
+ mp_init, mp_init_arg,
+ obj_init, obj_init_arg,
+ socket_id, flags, va, pa, pg_num, pg_shift);
+
+ free(pa);
+
+ return (mp);
+}
diff --git a/src/dpdk_lib18/librte_mempool/rte_mempool.c b/src/dpdk_lib18/librte_mempool/rte_mempool.c
new file mode 100755
index 00000000..4cf6c25b
--- /dev/null
+++ b/src/dpdk_lib18/librte_mempool/rte_mempool.c
@@ -0,0 +1,901 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_atomic.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_errno.h>
+#include <rte_string_fns.h>
+#include <rte_spinlock.h>
+
+#include "rte_mempool.h"
+
+TAILQ_HEAD(rte_mempool_list, rte_tailq_entry);
+
+#define CACHE_FLUSHTHRESH_MULTIPLIER 1.5
+
+/*
+ * return the greatest common divisor between a and b (fast algorithm)
+ *
+ */
+static unsigned get_gcd(unsigned a, unsigned b)
+{
+ unsigned c;
+
+ if (0 == a)
+ return b;
+ if (0 == b)
+ return a;
+
+ if (a < b) {
+ c = a;
+ a = b;
+ b = c;
+ }
+
+ while (b != 0) {
+ c = a % b;
+ a = b;
+ b = c;
+ }
+
+ return a;
+}
+
+/*
+ * Depending on memory configuration, objects addresses are spread
+ * between channels and ranks in RAM: the pool allocator will add
+ * padding between objects. This function return the new size of the
+ * object.
+ */
+static unsigned optimize_object_size(unsigned obj_size)
+{
+ unsigned nrank, nchan;
+ unsigned new_obj_size;
+
+ /* get number of channels */
+ nchan = rte_memory_get_nchannel();
+ if (nchan == 0)
+ nchan = 1;
+
+ nrank = rte_memory_get_nrank();
+ if (nrank == 0)
+ nrank = 1;
+
+ /* process new object size */
+ new_obj_size = (obj_size + RTE_CACHE_LINE_MASK) / RTE_CACHE_LINE_SIZE;
+ while (get_gcd(new_obj_size, nrank * nchan) != 1)
+ new_obj_size++;
+ return new_obj_size * RTE_CACHE_LINE_SIZE;
+}
+
+static void
+mempool_add_elem(struct rte_mempool *mp, void *obj, uint32_t obj_idx,
+ rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg)
+{
+ struct rte_mempool **mpp;
+
+ obj = (char *)obj + mp->header_size;
+
+ /* set mempool ptr in header */
+ mpp = __mempool_from_obj(obj);
+ *mpp = mp;
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ __mempool_write_header_cookie(obj, 1);
+ __mempool_write_trailer_cookie(obj);
+#endif
+ /* call the initializer */
+ if (obj_init)
+ obj_init(mp, obj_init_arg, obj, obj_idx);
+
+ /* enqueue in ring */
+ rte_ring_sp_enqueue(mp->ring, obj);
+}
+
+uint32_t
+rte_mempool_obj_iter(void *vaddr, uint32_t elt_num, size_t elt_sz, size_t align,
+ const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
+ rte_mempool_obj_iter_t obj_iter, void *obj_iter_arg)
+{
+ uint32_t i, j, k;
+ uint32_t pgn;
+ uintptr_t end, start, va;
+ uintptr_t pg_sz;
+
+ pg_sz = (uintptr_t)1 << pg_shift;
+ va = (uintptr_t)vaddr;
+
+ i = 0;
+ j = 0;
+
+ while (i != elt_num && j != pg_num) {
+
+ start = RTE_ALIGN_CEIL(va, align);
+ end = start + elt_sz;
+
+ pgn = (end >> pg_shift) - (start >> pg_shift);
+ pgn += j;
+
+ /* do we have enough space left for the next element. */
+ if (pgn >= pg_num)
+ break;
+
+ for (k = j;
+ k != pgn &&
+ paddr[k] + pg_sz == paddr[k + 1];
+ k++)
+ ;
+
+ /*
+ * if next pgn chunks of memory physically continuous,
+ * use it to create next element.
+ * otherwise, just skip that chunk unused.
+ */
+ if (k == pgn) {
+ if (obj_iter != NULL)
+ obj_iter(obj_iter_arg, (void *)start,
+ (void *)end, i);
+ va = end;
+ j = pgn;
+ i++;
+ } else {
+ va = RTE_ALIGN_CEIL((va + 1), pg_sz);
+ j++;
+ }
+ }
+
+ return (i);
+}
+
+/*
+ * Populate mempool with the objects.
+ */
+
+struct mempool_populate_arg {
+ struct rte_mempool *mp;
+ rte_mempool_obj_ctor_t *obj_init;
+ void *obj_init_arg;
+};
+
+static void
+mempool_obj_populate(void *arg, void *start, void *end, uint32_t idx)
+{
+ struct mempool_populate_arg *pa = arg;
+
+ mempool_add_elem(pa->mp, start, idx, pa->obj_init, pa->obj_init_arg);
+ pa->mp->elt_va_end = (uintptr_t)end;
+}
+
+static void
+mempool_populate(struct rte_mempool *mp, size_t num, size_t align,
+ rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg)
+{
+ uint32_t elt_sz;
+ struct mempool_populate_arg arg;
+
+ elt_sz = mp->elt_size + mp->header_size + mp->trailer_size;
+ arg.mp = mp;
+ arg.obj_init = obj_init;
+ arg.obj_init_arg = obj_init_arg;
+
+ mp->size = rte_mempool_obj_iter((void *)mp->elt_va_start,
+ num, elt_sz, align,
+ mp->elt_pa, mp->pg_num, mp->pg_shift,
+ mempool_obj_populate, &arg);
+}
+
+uint32_t
+rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
+ struct rte_mempool_objsz *sz)
+{
+ struct rte_mempool_objsz lsz;
+
+ sz = (sz != NULL) ? sz : &lsz;
+
+ /*
+ * In header, we have at least the pointer to the pool, and
+ * optionaly a 64 bits cookie.
+ */
+ sz->header_size = 0;
+ sz->header_size += sizeof(struct rte_mempool *); /* ptr to pool */
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ sz->header_size += sizeof(uint64_t); /* cookie */
+#endif
+ if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0)
+ sz->header_size = RTE_ALIGN_CEIL(sz->header_size,
+ RTE_CACHE_LINE_SIZE);
+
+ /* trailer contains the cookie in debug mode */
+ sz->trailer_size = 0;
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ sz->trailer_size += sizeof(uint64_t); /* cookie */
+#endif
+ /* element size is 8 bytes-aligned at least */
+ sz->elt_size = RTE_ALIGN_CEIL(elt_size, sizeof(uint64_t));
+
+ /* expand trailer to next cache line */
+ if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0) {
+ sz->total_size = sz->header_size + sz->elt_size +
+ sz->trailer_size;
+ sz->trailer_size += ((RTE_CACHE_LINE_SIZE -
+ (sz->total_size & RTE_CACHE_LINE_MASK)) &
+ RTE_CACHE_LINE_MASK);
+ }
+
+ /*
+ * increase trailer to add padding between objects in order to
+ * spread them across memory channels/ranks
+ */
+ if ((flags & MEMPOOL_F_NO_SPREAD) == 0) {
+ unsigned new_size;
+ new_size = optimize_object_size(sz->header_size + sz->elt_size +
+ sz->trailer_size);
+ sz->trailer_size = new_size - sz->header_size - sz->elt_size;
+ }
+
+ if (! rte_eal_has_hugepages()) {
+ /*
+ * compute trailer size so that pool elements fit exactly in
+ * a standard page
+ */
+ int page_size = getpagesize();
+ int new_size = page_size - sz->header_size - sz->elt_size;
+ if (new_size < 0 || (unsigned int)new_size < sz->trailer_size) {
+ printf("When hugepages are disabled, pool objects "
+ "can't exceed PAGE_SIZE: %d + %d + %d > %d\n",
+ sz->header_size, sz->elt_size, sz->trailer_size,
+ page_size);
+ return 0;
+ }
+ sz->trailer_size = new_size;
+ }
+
+ /* this is the size of an object, including header and trailer */
+ sz->total_size = sz->header_size + sz->elt_size + sz->trailer_size;
+
+ return (sz->total_size);
+}
+
+
+/*
+ * Calculate maximum amount of memory required to store given number of objects.
+ */
+size_t
+rte_mempool_xmem_size(uint32_t elt_num, size_t elt_sz, uint32_t pg_shift)
+{
+ size_t n, pg_num, pg_sz, sz;
+
+ pg_sz = (size_t)1 << pg_shift;
+
+ if ((n = pg_sz / elt_sz) > 0) {
+ pg_num = (elt_num + n - 1) / n;
+ sz = pg_num << pg_shift;
+ } else {
+ sz = RTE_ALIGN_CEIL(elt_sz, pg_sz) * elt_num;
+ }
+
+ return (sz);
+}
+
+/*
+ * Calculate how much memory would be actually required with the
+ * given memory footprint to store required number of elements.
+ */
+static void
+mempool_lelem_iter(void *arg, __rte_unused void *start, void *end,
+ __rte_unused uint32_t idx)
+{
+ *(uintptr_t *)arg = (uintptr_t)end;
+}
+
+ssize_t
+rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t elt_sz,
+ const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift)
+{
+ uint32_t n;
+ uintptr_t va, uv;
+ size_t pg_sz, usz;
+
+ pg_sz = (size_t)1 << pg_shift;
+ va = (uintptr_t)vaddr;
+ uv = va;
+
+ if ((n = rte_mempool_obj_iter(vaddr, elt_num, elt_sz, 1,
+ paddr, pg_num, pg_shift, mempool_lelem_iter,
+ &uv)) != elt_num) {
+ return (-n);
+ }
+
+ uv = RTE_ALIGN_CEIL(uv, pg_sz);
+ usz = uv - va;
+ return (usz);
+}
+
+/* create the mempool */
+struct rte_mempool *
+rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
+ unsigned cache_size, unsigned private_data_size,
+ rte_mempool_ctor_t *mp_init, void *mp_init_arg,
+ rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
+ int socket_id, unsigned flags)
+{
+#ifdef RTE_LIBRTE_XEN_DOM0
+ return (rte_dom0_mempool_create(name, n, elt_size,
+ cache_size, private_data_size,
+ mp_init, mp_init_arg,
+ obj_init, obj_init_arg,
+ socket_id, flags));
+#else
+ return (rte_mempool_xmem_create(name, n, elt_size,
+ cache_size, private_data_size,
+ mp_init, mp_init_arg,
+ obj_init, obj_init_arg,
+ socket_id, flags,
+ NULL, NULL, MEMPOOL_PG_NUM_DEFAULT, MEMPOOL_PG_SHIFT_MAX));
+#endif
+}
+
+/*
+ * Create the mempool over already allocated chunk of memory.
+ * That external memory buffer can consists of physically disjoint pages.
+ * Setting vaddr to NULL, makes mempool to fallback to original behaviour
+ * and allocate space for mempool and it's elements as one big chunk of
+ * physically continuos memory.
+ * */
+struct rte_mempool *
+rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
+ unsigned cache_size, unsigned private_data_size,
+ rte_mempool_ctor_t *mp_init, void *mp_init_arg,
+ rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
+ int socket_id, unsigned flags, void *vaddr,
+ const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift)
+{
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ char rg_name[RTE_RING_NAMESIZE];
+ struct rte_mempool *mp = NULL;
+ struct rte_tailq_entry *te;
+ struct rte_ring *r;
+ const struct rte_memzone *mz;
+ size_t mempool_size;
+ int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
+ int rg_flags = 0;
+ void *obj;
+ struct rte_mempool_objsz objsz;
+ void *startaddr;
+ int page_size = getpagesize();
+
+ /* compilation-time checks */
+ RTE_BUILD_BUG_ON((sizeof(struct rte_mempool) &
+ RTE_CACHE_LINE_MASK) != 0);
+#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
+ RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_cache) &
+ RTE_CACHE_LINE_MASK) != 0);
+ RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, local_cache) &
+ RTE_CACHE_LINE_MASK) != 0);
+#endif
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_debug_stats) &
+ RTE_CACHE_LINE_MASK) != 0);
+ RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, stats) &
+ RTE_CACHE_LINE_MASK) != 0);
+#endif
+
+ /* check that we have an initialised tail queue */
+ if (RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_MEMPOOL,
+ rte_mempool_list) == NULL) {
+ rte_errno = E_RTE_NO_TAILQ;
+ return NULL;
+ }
+
+ /* asked cache too big */
+ if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /* check that we have both VA and PA */
+ if (vaddr != NULL && paddr == NULL) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /* Check that pg_num and pg_shift parameters are valid. */
+ if (pg_num < RTE_DIM(mp->elt_pa) || pg_shift > MEMPOOL_PG_SHIFT_MAX) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /* "no cache align" imply "no spread" */
+ if (flags & MEMPOOL_F_NO_CACHE_ALIGN)
+ flags |= MEMPOOL_F_NO_SPREAD;
+
+ /* ring flags */
+ if (flags & MEMPOOL_F_SP_PUT)
+ rg_flags |= RING_F_SP_ENQ;
+ if (flags & MEMPOOL_F_SC_GET)
+ rg_flags |= RING_F_SC_DEQ;
+
+ /* calculate mempool object sizes. */
+ if (!rte_mempool_calc_obj_size(elt_size, flags, &objsz)) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ rte_rwlock_write_lock(RTE_EAL_MEMPOOL_RWLOCK);
+
+ /* allocate the ring that will be used to store objects */
+ /* Ring functions will return appropriate errors if we are
+ * running as a secondary process etc., so no checks made
+ * in this function for that condition */
+ snprintf(rg_name, sizeof(rg_name), RTE_MEMPOOL_MZ_FORMAT, name);
+ r = rte_ring_create(rg_name, rte_align32pow2(n+1), socket_id, rg_flags);
+ if (r == NULL)
+ goto exit;
+
+ /*
+ * reserve a memory zone for this mempool: private data is
+ * cache-aligned
+ */
+ private_data_size = (private_data_size +
+ RTE_CACHE_LINE_MASK) & (~RTE_CACHE_LINE_MASK);
+
+ if (! rte_eal_has_hugepages()) {
+ /*
+ * expand private data size to a whole page, so that the
+ * first pool element will start on a new standard page
+ */
+ int head = sizeof(struct rte_mempool);
+ int new_size = (private_data_size + head) % page_size;
+ if (new_size) {
+ private_data_size += page_size - new_size;
+ }
+ }
+
+ /* try to allocate tailq entry */
+ te = rte_zmalloc("MEMPOOL_TAILQ_ENTRY", sizeof(*te), 0);
+ if (te == NULL) {
+ RTE_LOG(ERR, MEMPOOL, "Cannot allocate tailq entry!\n");
+ goto exit;
+ }
+
+ /*
+ * If user provided an external memory buffer, then use it to
+ * store mempool objects. Otherwise reserve memzone big enough to
+ * hold mempool header and metadata plus mempool objects.
+ */
+ mempool_size = MEMPOOL_HEADER_SIZE(mp, pg_num) + private_data_size;
+ if (vaddr == NULL)
+ mempool_size += (size_t)objsz.total_size * n;
+
+ if (! rte_eal_has_hugepages()) {
+ /*
+ * we want the memory pool to start on a page boundary,
+ * because pool elements crossing page boundaries would
+ * result in discontiguous physical addresses
+ */
+ mempool_size += page_size;
+ }
+
+ snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT, name);
+
+ mz = rte_memzone_reserve(mz_name, mempool_size, socket_id, mz_flags);
+
+ /*
+ * no more memory: in this case we loose previously reserved
+ * space for the as we cannot free it
+ */
+ if (mz == NULL) {
+ rte_free(te);
+ goto exit;
+ }
+
+ if (rte_eal_has_hugepages()) {
+ startaddr = (void*)mz->addr;
+ } else {
+ /* align memory pool start address on a page boundary */
+ unsigned long addr = (unsigned long)mz->addr;
+ if (addr & (page_size - 1)) {
+ addr += page_size;
+ addr &= ~(page_size - 1);
+ }
+ startaddr = (void*)addr;
+ }
+
+ /* init the mempool structure */
+ mp = startaddr;
+ memset(mp, 0, sizeof(*mp));
+ snprintf(mp->name, sizeof(mp->name), "%s", name);
+ mp->phys_addr = mz->phys_addr;
+ mp->ring = r;
+ mp->size = n;
+ mp->flags = flags;
+ mp->elt_size = objsz.elt_size;
+ mp->header_size = objsz.header_size;
+ mp->trailer_size = objsz.trailer_size;
+ mp->cache_size = cache_size;
+ mp->cache_flushthresh = (uint32_t)
+ (cache_size * CACHE_FLUSHTHRESH_MULTIPLIER);
+ mp->private_data_size = private_data_size;
+
+ /* calculate address of the first element for continuous mempool. */
+ obj = (char *)mp + MEMPOOL_HEADER_SIZE(mp, pg_num) +
+ private_data_size;
+
+ /* populate address translation fields. */
+ mp->pg_num = pg_num;
+ mp->pg_shift = pg_shift;
+ mp->pg_mask = RTE_LEN2MASK(mp->pg_shift, typeof(mp->pg_mask));
+
+ /* mempool elements allocated together with mempool */
+ if (vaddr == NULL) {
+ mp->elt_va_start = (uintptr_t)obj;
+ mp->elt_pa[0] = mp->phys_addr +
+ (mp->elt_va_start - (uintptr_t)mp);
+
+ /* mempool elements in a separate chunk of memory. */
+ } else {
+ mp->elt_va_start = (uintptr_t)vaddr;
+ memcpy(mp->elt_pa, paddr, sizeof (mp->elt_pa[0]) * pg_num);
+ }
+
+ mp->elt_va_end = mp->elt_va_start;
+
+ /* call the initializer */
+ if (mp_init)
+ mp_init(mp, mp_init_arg);
+
+ mempool_populate(mp, n, 1, obj_init, obj_init_arg);
+
+ te->data = (void *) mp;
+
+ RTE_EAL_TAILQ_INSERT_TAIL(RTE_TAILQ_MEMPOOL, rte_mempool_list, te);
+
+exit:
+ rte_rwlock_write_unlock(RTE_EAL_MEMPOOL_RWLOCK);
+
+ return mp;
+}
+
+/* Return the number of entries in the mempool */
+unsigned
+rte_mempool_count(const struct rte_mempool *mp)
+{
+ unsigned count;
+
+ count = rte_ring_count(mp->ring);
+
+#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
+ {
+ unsigned lcore_id;
+ if (mp->cache_size == 0)
+ return count;
+
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
+ count += mp->local_cache[lcore_id].len;
+ }
+#endif
+
+ /*
+ * due to race condition (access to len is not locked), the
+ * total can be greater than size... so fix the result
+ */
+ if (count > mp->size)
+ return mp->size;
+ return count;
+}
+
+/* dump the cache status */
+static unsigned
+rte_mempool_dump_cache(FILE *f, const struct rte_mempool *mp)
+{
+#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
+ unsigned lcore_id;
+ unsigned count = 0;
+ unsigned cache_count;
+
+ fprintf(f, " cache infos:\n");
+ fprintf(f, " cache_size=%"PRIu32"\n", mp->cache_size);
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ cache_count = mp->local_cache[lcore_id].len;
+ fprintf(f, " cache_count[%u]=%u\n", lcore_id, cache_count);
+ count += cache_count;
+ }
+ fprintf(f, " total_cache_count=%u\n", count);
+ return count;
+#else
+ RTE_SET_USED(mp);
+ fprintf(f, " cache disabled\n");
+ return 0;
+#endif
+}
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+/* check cookies before and after objects */
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+struct mempool_audit_arg {
+ const struct rte_mempool *mp;
+ uintptr_t obj_end;
+ uint32_t obj_num;
+};
+
+static void
+mempool_obj_audit(void *arg, void *start, void *end, uint32_t idx)
+{
+ struct mempool_audit_arg *pa = arg;
+ void *obj;
+
+ obj = (char *)start + pa->mp->header_size;
+ pa->obj_end = (uintptr_t)end;
+ pa->obj_num = idx + 1;
+ __mempool_check_cookies(pa->mp, &obj, 1, 2);
+}
+
+static void
+mempool_audit_cookies(const struct rte_mempool *mp)
+{
+ uint32_t elt_sz, num;
+ struct mempool_audit_arg arg;
+
+ elt_sz = mp->elt_size + mp->header_size + mp->trailer_size;
+
+ arg.mp = mp;
+ arg.obj_end = mp->elt_va_start;
+ arg.obj_num = 0;
+
+ num = rte_mempool_obj_iter((void *)mp->elt_va_start,
+ mp->size, elt_sz, 1,
+ mp->elt_pa, mp->pg_num, mp->pg_shift,
+ mempool_obj_audit, &arg);
+
+ if (num != mp->size) {
+ rte_panic("rte_mempool_obj_iter(mempool=%p, size=%u) "
+ "iterated only over %u elements\n",
+ mp, mp->size, num);
+ } else if (arg.obj_end != mp->elt_va_end || arg.obj_num != mp->size) {
+ rte_panic("rte_mempool_obj_iter(mempool=%p, size=%u) "
+ "last callback va_end: %#tx (%#tx expeceted), "
+ "num of objects: %u (%u expected)\n",
+ mp, mp->size,
+ arg.obj_end, mp->elt_va_end,
+ arg.obj_num, mp->size);
+ }
+}
+
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic error "-Wcast-qual"
+#endif
+#else
+#define mempool_audit_cookies(mp) do {} while(0)
+#endif
+
+#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
+/* check cookies before and after objects */
+static void
+mempool_audit_cache(const struct rte_mempool *mp)
+{
+ /* check cache size consistency */
+ unsigned lcore_id;
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ if (mp->local_cache[lcore_id].len > mp->cache_flushthresh) {
+ RTE_LOG(CRIT, MEMPOOL, "badness on cache[%u]\n",
+ lcore_id);
+ rte_panic("MEMPOOL: invalid cache len\n");
+ }
+ }
+}
+#else
+#define mempool_audit_cache(mp) do {} while(0)
+#endif
+
+
+/* check the consistency of mempool (size, cookies, ...) */
+void
+rte_mempool_audit(const struct rte_mempool *mp)
+{
+ mempool_audit_cache(mp);
+ mempool_audit_cookies(mp);
+
+ /* For case where mempool DEBUG is not set, and cache size is 0 */
+ RTE_SET_USED(mp);
+}
+
+/* dump the status of the mempool on the console */
+void
+rte_mempool_dump(FILE *f, const struct rte_mempool *mp)
+{
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ struct rte_mempool_debug_stats sum;
+ unsigned lcore_id;
+#endif
+ unsigned common_count;
+ unsigned cache_count;
+
+ RTE_VERIFY(f != NULL);
+ RTE_VERIFY(mp != NULL);
+
+ fprintf(f, "mempool <%s>@%p\n", mp->name, mp);
+ fprintf(f, " flags=%x\n", mp->flags);
+ fprintf(f, " ring=<%s>@%p\n", mp->ring->name, mp->ring);
+ fprintf(f, " phys_addr=0x%" PRIx64 "\n", mp->phys_addr);
+ fprintf(f, " size=%"PRIu32"\n", mp->size);
+ fprintf(f, " header_size=%"PRIu32"\n", mp->header_size);
+ fprintf(f, " elt_size=%"PRIu32"\n", mp->elt_size);
+ fprintf(f, " trailer_size=%"PRIu32"\n", mp->trailer_size);
+ fprintf(f, " total_obj_size=%"PRIu32"\n",
+ mp->header_size + mp->elt_size + mp->trailer_size);
+
+ fprintf(f, " private_data_size=%"PRIu32"\n", mp->private_data_size);
+ fprintf(f, " pg_num=%"PRIu32"\n", mp->pg_num);
+ fprintf(f, " pg_shift=%"PRIu32"\n", mp->pg_shift);
+ fprintf(f, " pg_mask=%#tx\n", mp->pg_mask);
+ fprintf(f, " elt_va_start=%#tx\n", mp->elt_va_start);
+ fprintf(f, " elt_va_end=%#tx\n", mp->elt_va_end);
+ fprintf(f, " elt_pa[0]=0x%" PRIx64 "\n", mp->elt_pa[0]);
+
+ if (mp->size != 0)
+ fprintf(f, " avg bytes/object=%#Lf\n",
+ (long double)(mp->elt_va_end - mp->elt_va_start) /
+ mp->size);
+
+ cache_count = rte_mempool_dump_cache(f, mp);
+ common_count = rte_ring_count(mp->ring);
+ if ((cache_count + common_count) > mp->size)
+ common_count = mp->size - cache_count;
+ fprintf(f, " common_pool_count=%u\n", common_count);
+
+ /* sum and dump statistics */
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ memset(&sum, 0, sizeof(sum));
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ sum.put_bulk += mp->stats[lcore_id].put_bulk;
+ sum.put_objs += mp->stats[lcore_id].put_objs;
+ sum.get_success_bulk += mp->stats[lcore_id].get_success_bulk;
+ sum.get_success_objs += mp->stats[lcore_id].get_success_objs;
+ sum.get_fail_bulk += mp->stats[lcore_id].get_fail_bulk;
+ sum.get_fail_objs += mp->stats[lcore_id].get_fail_objs;
+ }
+ fprintf(f, " stats:\n");
+ fprintf(f, " put_bulk=%"PRIu64"\n", sum.put_bulk);
+ fprintf(f, " put_objs=%"PRIu64"\n", sum.put_objs);
+ fprintf(f, " get_success_bulk=%"PRIu64"\n", sum.get_success_bulk);
+ fprintf(f, " get_success_objs=%"PRIu64"\n", sum.get_success_objs);
+ fprintf(f, " get_fail_bulk=%"PRIu64"\n", sum.get_fail_bulk);
+ fprintf(f, " get_fail_objs=%"PRIu64"\n", sum.get_fail_objs);
+#else
+ fprintf(f, " no statistics available\n");
+#endif
+
+ rte_mempool_audit(mp);
+}
+
+/* dump the status of all mempools on the console */
+void
+rte_mempool_list_dump(FILE *f)
+{
+ const struct rte_mempool *mp = NULL;
+ struct rte_tailq_entry *te;
+ struct rte_mempool_list *mempool_list;
+
+ if ((mempool_list =
+ RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_MEMPOOL, rte_mempool_list)) == NULL) {
+ rte_errno = E_RTE_NO_TAILQ;
+ return;
+ }
+
+ rte_rwlock_read_lock(RTE_EAL_MEMPOOL_RWLOCK);
+
+ TAILQ_FOREACH(te, mempool_list, next) {
+ mp = (struct rte_mempool *) te->data;
+ rte_mempool_dump(f, mp);
+ }
+
+ rte_rwlock_read_unlock(RTE_EAL_MEMPOOL_RWLOCK);
+}
+
+/* search a mempool from its name */
+struct rte_mempool *
+rte_mempool_lookup(const char *name)
+{
+ struct rte_mempool *mp = NULL;
+ struct rte_tailq_entry *te;
+ struct rte_mempool_list *mempool_list;
+
+ if ((mempool_list =
+ RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_MEMPOOL, rte_mempool_list)) == NULL) {
+ rte_errno = E_RTE_NO_TAILQ;
+ return NULL;
+ }
+
+ rte_rwlock_read_lock(RTE_EAL_MEMPOOL_RWLOCK);
+
+ TAILQ_FOREACH(te, mempool_list, next) {
+ mp = (struct rte_mempool *) te->data;
+ if (strncmp(name, mp->name, RTE_MEMPOOL_NAMESIZE) == 0)
+ break;
+ }
+
+ rte_rwlock_read_unlock(RTE_EAL_MEMPOOL_RWLOCK);
+
+ if (te == NULL) {
+ rte_errno = ENOENT;
+ return NULL;
+ }
+
+ return mp;
+}
+
+void rte_mempool_walk(void (*func)(const struct rte_mempool *, void *),
+ void *arg)
+{
+ struct rte_tailq_entry *te = NULL;
+ struct rte_mempool_list *mempool_list;
+
+ if ((mempool_list =
+ RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_MEMPOOL, rte_mempool_list)) == NULL) {
+ rte_errno = E_RTE_NO_TAILQ;
+ return;
+ }
+
+ rte_rwlock_read_lock(RTE_EAL_MEMPOOL_RWLOCK);
+
+ TAILQ_FOREACH(te, mempool_list, next) {
+ (*func)((struct rte_mempool *) te->data, arg);
+ }
+
+ rte_rwlock_read_unlock(RTE_EAL_MEMPOOL_RWLOCK);
+}
diff --git a/src/dpdk_lib18/librte_mempool/rte_mempool.h b/src/dpdk_lib18/librte_mempool/rte_mempool.h
new file mode 100755
index 00000000..33146518
--- /dev/null
+++ b/src/dpdk_lib18/librte_mempool/rte_mempool.h
@@ -0,0 +1,1392 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_MEMPOOL_H_
+#define _RTE_MEMPOOL_H_
+
+/**
+ * @file
+ * RTE Mempool.
+ *
+ * A memory pool is an allocator of fixed-size object. It is
+ * identified by its name, and uses a ring to store free objects. It
+ * provides some other optional services, like a per-core object
+ * cache, and an alignment helper to ensure that objects are padded
+ * to spread them equally on all RAM channels, ranks, and so on.
+ *
+ * Objects owned by a mempool should never be added in another
+ * mempool. When an object is freed using rte_mempool_put() or
+ * equivalent, the object data is not modified; the user can save some
+ * meta-data in the object data and retrieve them when allocating a
+ * new object.
+ *
+ * Note: the mempool implementation is not preemptable. A lcore must
+ * not be interrupted by another task that uses the same mempool
+ * (because it uses a ring which is not preemptable). Also, mempool
+ * functions must not be used outside the DPDK environment: for
+ * example, in linuxapp environment, a thread that is not created by
+ * the EAL must not use mempools. This is due to the per-lcore cache
+ * that won't work as rte_lcore_id() will not return a correct value.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <sys/queue.h>
+
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_lcore.h>
+#include <rte_memory.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL /**< Header cookie. */
+#define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL /**< Header cookie. */
+#define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL /**< Trailer cookie.*/
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+/**
+ * A structure that stores the mempool statistics (per-lcore).
+ */
+struct rte_mempool_debug_stats {
+ uint64_t put_bulk; /**< Number of puts. */
+ uint64_t put_objs; /**< Number of objects successfully put. */
+ uint64_t get_success_bulk; /**< Successful allocation number. */
+ uint64_t get_success_objs; /**< Objects successfully allocated. */
+ uint64_t get_fail_bulk; /**< Failed allocation number. */
+ uint64_t get_fail_objs; /**< Objects that failed to be allocated. */
+} __rte_cache_aligned;
+#endif
+
+#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
+/**
+ * A structure that stores a per-core object cache.
+ */
+struct rte_mempool_cache {
+ unsigned len; /**< Cache len */
+ /*
+ * Cache is allocated to this size to allow it to overflow in certain
+ * cases to avoid needless emptying of cache.
+ */
+ void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 3]; /**< Cache objects */
+} __rte_cache_aligned;
+#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
+
+struct rte_mempool_objsz {
+ uint32_t elt_size; /**< Size of an element. */
+ uint32_t header_size; /**< Size of header (before elt). */
+ uint32_t trailer_size; /**< Size of trailer (after elt). */
+ uint32_t total_size;
+ /**< Total size of an object (header + elt + trailer). */
+};
+
+#define RTE_MEMPOOL_NAMESIZE 32 /**< Maximum length of a memory pool. */
+#define RTE_MEMPOOL_MZ_PREFIX "MP_"
+
+/* "MP_<name>" */
+#define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s"
+
+#ifdef RTE_LIBRTE_XEN_DOM0
+
+/* "<name>_MP_elt" */
+#define RTE_MEMPOOL_OBJ_NAME "%s_" RTE_MEMPOOL_MZ_PREFIX "elt"
+
+#else
+
+#define RTE_MEMPOOL_OBJ_NAME RTE_MEMPOOL_MZ_FORMAT
+
+#endif /* RTE_LIBRTE_XEN_DOM0 */
+
+#define MEMPOOL_PG_SHIFT_MAX (sizeof(uintptr_t) * CHAR_BIT - 1)
+
+/** Mempool over one chunk of physically continuous memory */
+#define MEMPOOL_PG_NUM_DEFAULT 1
+
+/**
+ * The RTE mempool structure.
+ */
+struct rte_mempool {
+ char name[RTE_MEMPOOL_NAMESIZE]; /**< Name of mempool. */
+ struct rte_ring *ring; /**< Ring to store objects. */
+ phys_addr_t phys_addr; /**< Phys. addr. of mempool struct. */
+ int flags; /**< Flags of the mempool. */
+ uint32_t size; /**< Size of the mempool. */
+ uint32_t cache_size; /**< Size of per-lcore local cache. */
+ uint32_t cache_flushthresh;
+ /**< Threshold before we flush excess elements. */
+
+ uint32_t elt_size; /**< Size of an element. */
+ uint32_t header_size; /**< Size of header (before elt). */
+ uint32_t trailer_size; /**< Size of trailer (after elt). */
+
+ unsigned private_data_size; /**< Size of private data. */
+
+#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
+ /** Per-lcore local cache. */
+ struct rte_mempool_cache local_cache[RTE_MAX_LCORE];
+#endif
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ /** Per-lcore statistics. */
+ struct rte_mempool_debug_stats stats[RTE_MAX_LCORE];
+#endif
+
+ /* Address translation support, starts from next cache line. */
+
+ /** Number of elements in the elt_pa array. */
+ uint32_t pg_num __rte_cache_aligned;
+ uint32_t pg_shift; /**< LOG2 of the physical pages. */
+ uintptr_t pg_mask; /**< physical page mask value. */
+ uintptr_t elt_va_start;
+ /**< Virtual address of the first mempool object. */
+ uintptr_t elt_va_end;
+ /**< Virtual address of the <size + 1> mempool object. */
+ phys_addr_t elt_pa[MEMPOOL_PG_NUM_DEFAULT];
+ /**< Array of physical pages addresses for the mempool objects buffer. */
+
+} __rte_cache_aligned;
+
+#define MEMPOOL_F_NO_SPREAD 0x0001 /**< Do not spread in memory. */
+#define MEMPOOL_F_NO_CACHE_ALIGN 0x0002 /**< Do not align objs on cache lines.*/
+#define MEMPOOL_F_SP_PUT 0x0004 /**< Default put is "single-producer".*/
+#define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/
+
+/**
+ * @internal When debug is enabled, store some statistics.
+ * @param mp
+ * Pointer to the memory pool.
+ * @param name
+ * Name of the statistics field to increment in the memory pool.
+ * @param n
+ * Number to add to the object-oriented statistics.
+ */
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+#define __MEMPOOL_STAT_ADD(mp, name, n) do { \
+ unsigned __lcore_id = rte_lcore_id(); \
+ mp->stats[__lcore_id].name##_objs += n; \
+ mp->stats[__lcore_id].name##_bulk += 1; \
+ } while(0)
+#else
+#define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0)
+#endif
+
+/**
+ * Calculates size of the mempool header.
+ * @param mp
+ * Pointer to the memory pool.
+ * @param pgn
+ * Number of page used to store mempool objects.
+ */
+#define MEMPOOL_HEADER_SIZE(mp, pgn) (sizeof(*(mp)) + \
+ RTE_ALIGN_CEIL(((pgn) - RTE_DIM((mp)->elt_pa)) * \
+ sizeof ((mp)->elt_pa[0]), RTE_CACHE_LINE_SIZE))
+
+/**
+ * Returns TRUE if whole mempool is allocated in one contiguous block of memory.
+ */
+#define MEMPOOL_IS_CONTIG(mp) \
+ ((mp)->pg_num == MEMPOOL_PG_NUM_DEFAULT && \
+ (mp)->phys_addr == (mp)->elt_pa[0])
+
+/**
+ * @internal Get a pointer to a mempool pointer in the object header.
+ * @param obj
+ * Pointer to object.
+ * @return
+ * The pointer to the mempool from which the object was allocated.
+ */
+static inline struct rte_mempool **__mempool_from_obj(void *obj)
+{
+ struct rte_mempool **mpp;
+ unsigned off;
+
+ off = sizeof(struct rte_mempool *);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ off += sizeof(uint64_t);
+#endif
+ mpp = (struct rte_mempool **)((char *)obj - off);
+ return mpp;
+}
+
+/**
+ * Return a pointer to the mempool owning this object.
+ *
+ * @param obj
+ * An object that is owned by a pool. If this is not the case,
+ * the behavior is undefined.
+ * @return
+ * A pointer to the mempool structure.
+ */
+static inline const struct rte_mempool *rte_mempool_from_obj(void *obj)
+{
+ struct rte_mempool * const *mpp;
+ mpp = __mempool_from_obj(obj);
+ return *mpp;
+}
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+/* get header cookie value */
+static inline uint64_t __mempool_read_header_cookie(const void *obj)
+{
+ return *(const uint64_t *)((const char *)obj - sizeof(uint64_t));
+}
+
+/* get trailer cookie value */
+static inline uint64_t __mempool_read_trailer_cookie(void *obj)
+{
+ struct rte_mempool **mpp = __mempool_from_obj(obj);
+ return *(uint64_t *)((char *)obj + (*mpp)->elt_size);
+}
+
+/* write header cookie value */
+static inline void __mempool_write_header_cookie(void *obj, int free)
+{
+ uint64_t *cookie_p;
+ cookie_p = (uint64_t *)((char *)obj - sizeof(uint64_t));
+ if (free == 0)
+ *cookie_p = RTE_MEMPOOL_HEADER_COOKIE1;
+ else
+ *cookie_p = RTE_MEMPOOL_HEADER_COOKIE2;
+
+}
+
+/* write trailer cookie value */
+static inline void __mempool_write_trailer_cookie(void *obj)
+{
+ uint64_t *cookie_p;
+ struct rte_mempool **mpp = __mempool_from_obj(obj);
+ cookie_p = (uint64_t *)((char *)obj + (*mpp)->elt_size);
+ *cookie_p = RTE_MEMPOOL_TRAILER_COOKIE;
+}
+#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
+
+/**
+ * @internal Check and update cookies or panic.
+ *
+ * @param mp
+ * Pointer to the memory pool.
+ * @param obj_table_const
+ * Pointer to a table of void * pointers (objects).
+ * @param n
+ * Index of object in object table.
+ * @param free
+ * - 0: object is supposed to be allocated, mark it as free
+ * - 1: object is supposed to be free, mark it as allocated
+ * - 2: just check that cookie is valid (free or allocated)
+ */
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+static inline void __mempool_check_cookies(const struct rte_mempool *mp,
+ void * const *obj_table_const,
+ unsigned n, int free)
+{
+ uint64_t cookie;
+ void *tmp;
+ void *obj;
+ void **obj_table;
+
+ /* Force to drop the "const" attribute. This is done only when
+ * DEBUG is enabled */
+ tmp = (void *) obj_table_const;
+ obj_table = (void **) tmp;
+
+ while (n--) {
+ obj = obj_table[n];
+
+ if (rte_mempool_from_obj(obj) != mp)
+ rte_panic("MEMPOOL: object is owned by another "
+ "mempool\n");
+
+ cookie = __mempool_read_header_cookie(obj);
+
+ if (free == 0) {
+ if (cookie != RTE_MEMPOOL_HEADER_COOKIE1) {
+ rte_log_set_history(0);
+ RTE_LOG(CRIT, MEMPOOL,
+ "obj=%p, mempool=%p, cookie=%"PRIx64"\n",
+ obj, mp, cookie);
+ rte_panic("MEMPOOL: bad header cookie (put)\n");
+ }
+ __mempool_write_header_cookie(obj, 1);
+ }
+ else if (free == 1) {
+ if (cookie != RTE_MEMPOOL_HEADER_COOKIE2) {
+ rte_log_set_history(0);
+ RTE_LOG(CRIT, MEMPOOL,
+ "obj=%p, mempool=%p, cookie=%"PRIx64"\n",
+ obj, mp, cookie);
+ rte_panic("MEMPOOL: bad header cookie (get)\n");
+ }
+ __mempool_write_header_cookie(obj, 0);
+ }
+ else if (free == 2) {
+ if (cookie != RTE_MEMPOOL_HEADER_COOKIE1 &&
+ cookie != RTE_MEMPOOL_HEADER_COOKIE2) {
+ rte_log_set_history(0);
+ RTE_LOG(CRIT, MEMPOOL,
+ "obj=%p, mempool=%p, cookie=%"PRIx64"\n",
+ obj, mp, cookie);
+ rte_panic("MEMPOOL: bad header cookie (audit)\n");
+ }
+ }
+ cookie = __mempool_read_trailer_cookie(obj);
+ if (cookie != RTE_MEMPOOL_TRAILER_COOKIE) {
+ rte_log_set_history(0);
+ RTE_LOG(CRIT, MEMPOOL,
+ "obj=%p, mempool=%p, cookie=%"PRIx64"\n",
+ obj, mp, cookie);
+ rte_panic("MEMPOOL: bad trailer cookie\n");
+ }
+ }
+}
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic error "-Wcast-qual"
+#endif
+#else
+#define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0)
+#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
+
+/**
+ * An mempool's object iterator callback function.
+ */
+typedef void (*rte_mempool_obj_iter_t)(void * /*obj_iter_arg*/,
+ void * /*obj_start*/,
+ void * /*obj_end*/,
+ uint32_t /*obj_index */);
+
+/*
+ * Iterates across objects of the given size and alignment in the
+ * provided chunk of memory. The given memory buffer can consist of
+ * disjoint physical pages.
+ * For each object calls the provided callback (if any).
+ * Used to populate mempool, walk through all elements of the mempool,
+ * estimate how many elements of the given size could be created in the given
+ * memory buffer.
+ * @param vaddr
+ * Virtual address of the memory buffer.
+ * @param elt_num
+ * Maximum number of objects to iterate through.
+ * @param elt_sz
+ * Size of each object.
+ * @param paddr
+ * Array of phyiscall addresses of the pages that comprises given memory
+ * buffer.
+ * @param pg_num
+ * Number of elements in the paddr array.
+ * @param pg_shift
+ * LOG2 of the physical pages size.
+ * @param obj_iter
+ * Object iterator callback function (could be NULL).
+ * @param obj_iter_arg
+ * User defined Prameter for the object iterator callback function.
+ *
+ * @return
+ * Number of objects iterated through.
+ */
+
+uint32_t rte_mempool_obj_iter(void *vaddr,
+ uint32_t elt_num, size_t elt_sz, size_t align,
+ const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
+ rte_mempool_obj_iter_t obj_iter, void *obj_iter_arg);
+
+/**
+ * An object constructor callback function for mempool.
+ *
+ * Arguments are the mempool, the opaque pointer given by the user in
+ * rte_mempool_create(), the pointer to the element and the index of
+ * the element in the pool.
+ */
+typedef void (rte_mempool_obj_ctor_t)(struct rte_mempool *, void *,
+ void *, unsigned);
+
+/**
+ * A mempool constructor callback function.
+ *
+ * Arguments are the mempool and the opaque pointer given by the user in
+ * rte_mempool_create().
+ */
+typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
+
+/**
+ * Creates a new mempool named *name* in memory.
+ *
+ * This function uses ``memzone_reserve()`` to allocate memory. The
+ * pool contains n elements of elt_size. Its size is set to n.
+ * All elements of the mempool are allocated together with the mempool header,
+ * in one physically continuous chunk of memory.
+ *
+ * @param name
+ * The name of the mempool.
+ * @param n
+ * The number of elements in the mempool. The optimum size (in terms of
+ * memory usage) for a mempool is when n is a power of two minus one:
+ * n = (2^q - 1).
+ * @param elt_size
+ * The size of each element.
+ * @param cache_size
+ * If cache_size is non-zero, the rte_mempool library will try to
+ * limit the accesses to the common lockless pool, by maintaining a
+ * per-lcore object cache. This argument must be lower or equal to
+ * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE. It is advised to choose
+ * cache_size to have "n modulo cache_size == 0": if this is
+ * not the case, some elements will always stay in the pool and will
+ * never be used. The access to the per-lcore table is of course
+ * faster than the multi-producer/consumer pool. The cache can be
+ * disabled if the cache_size argument is set to 0; it can be useful to
+ * avoid losing objects in cache. Note that even if not used, the
+ * memory space for cache is always reserved in a mempool structure,
+ * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0.
+ * @param private_data_size
+ * The size of the private data appended after the mempool
+ * structure. This is useful for storing some private data after the
+ * mempool structure, as is done for rte_mbuf_pool for example.
+ * @param mp_init
+ * A function pointer that is called for initialization of the pool,
+ * before object initialization. The user can initialize the private
+ * data in this function if needed. This parameter can be NULL if
+ * not needed.
+ * @param mp_init_arg
+ * An opaque pointer to data that can be used in the mempool
+ * constructor function.
+ * @param obj_init
+ * A function pointer that is called for each object at
+ * initialization of the pool. The user can set some meta data in
+ * objects if needed. This parameter can be NULL if not needed.
+ * The obj_init() function takes the mempool pointer, the init_arg,
+ * the object pointer and the object number as parameters.
+ * @param obj_init_arg
+ * An opaque pointer to data that can be used as an argument for
+ * each call to the object constructor function.
+ * @param socket_id
+ * The *socket_id* argument is the socket identifier in the case of
+ * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
+ * constraint for the reserved zone.
+ * @param flags
+ * The *flags* arguments is an OR of following flags:
+ * - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread
+ * between channels in RAM: the pool allocator will add padding
+ * between objects depending on the hardware configuration. See
+ * Memory alignment constraints for details. If this flag is set,
+ * the allocator will just align them to a cache line.
+ * - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are
+ * cache-aligned. This flag removes this constraint, and no
+ * padding will be present between objects. This flag implies
+ * MEMPOOL_F_NO_SPREAD.
+ * - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior
+ * when using rte_mempool_put() or rte_mempool_put_bulk() is
+ * "single-producer". Otherwise, it is "multi-producers".
+ * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior
+ * when using rte_mempool_get() or rte_mempool_get_bulk() is
+ * "single-consumer". Otherwise, it is "multi-consumers".
+ * @return
+ * The pointer to the new allocated mempool, on success. NULL on error
+ * with rte_errno set appropriately. Possible rte_errno values include:
+ * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
+ * - E_RTE_SECONDARY - function was called from a secondary process instance
+ * - E_RTE_NO_TAILQ - no tailq list could be got for the ring or mempool list
+ * - EINVAL - cache size provided is too large
+ * - ENOSPC - the maximum number of memzones has already been allocated
+ * - EEXIST - a memzone with the same name already exists
+ * - ENOMEM - no appropriate memory area found in which to create memzone
+ */
+struct rte_mempool *
+rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
+ unsigned cache_size, unsigned private_data_size,
+ rte_mempool_ctor_t *mp_init, void *mp_init_arg,
+ rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
+ int socket_id, unsigned flags);
+
+/**
+ * Creates a new mempool named *name* in memory.
+ *
+ * This function uses ``memzone_reserve()`` to allocate memory. The
+ * pool contains n elements of elt_size. Its size is set to n.
+ * Depending on the input parameters, mempool elements can be either allocated
+ * together with the mempool header, or an externally provided memory buffer
+ * could be used to store mempool objects. In later case, that external
+ * memory buffer can consist of set of disjoint phyiscal pages.
+ *
+ * @param name
+ * The name of the mempool.
+ * @param n
+ * The number of elements in the mempool. The optimum size (in terms of
+ * memory usage) for a mempool is when n is a power of two minus one:
+ * n = (2^q - 1).
+ * @param elt_size
+ * The size of each element.
+ * @param cache_size
+ * If cache_size is non-zero, the rte_mempool library will try to
+ * limit the accesses to the common lockless pool, by maintaining a
+ * per-lcore object cache. This argument must be lower or equal to
+ * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE. It is advised to choose
+ * cache_size to have "n modulo cache_size == 0": if this is
+ * not the case, some elements will always stay in the pool and will
+ * never be used. The access to the per-lcore table is of course
+ * faster than the multi-producer/consumer pool. The cache can be
+ * disabled if the cache_size argument is set to 0; it can be useful to
+ * avoid losing objects in cache. Note that even if not used, the
+ * memory space for cache is always reserved in a mempool structure,
+ * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0.
+ * @param private_data_size
+ * The size of the private data appended after the mempool
+ * structure. This is useful for storing some private data after the
+ * mempool structure, as is done for rte_mbuf_pool for example.
+ * @param mp_init
+ * A function pointer that is called for initialization of the pool,
+ * before object initialization. The user can initialize the private
+ * data in this function if needed. This parameter can be NULL if
+ * not needed.
+ * @param mp_init_arg
+ * An opaque pointer to data that can be used in the mempool
+ * constructor function.
+ * @param obj_init
+ * A function pointer that is called for each object at
+ * initialization of the pool. The user can set some meta data in
+ * objects if needed. This parameter can be NULL if not needed.
+ * The obj_init() function takes the mempool pointer, the init_arg,
+ * the object pointer and the object number as parameters.
+ * @param obj_init_arg
+ * An opaque pointer to data that can be used as an argument for
+ * each call to the object constructor function.
+ * @param socket_id
+ * The *socket_id* argument is the socket identifier in the case of
+ * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
+ * constraint for the reserved zone.
+ * @param flags
+ * The *flags* arguments is an OR of following flags:
+ * - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread
+ * between channels in RAM: the pool allocator will add padding
+ * between objects depending on the hardware configuration. See
+ * Memory alignment constraints for details. If this flag is set,
+ * the allocator will just align them to a cache line.
+ * - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are
+ * cache-aligned. This flag removes this constraint, and no
+ * padding will be present between objects. This flag implies
+ * MEMPOOL_F_NO_SPREAD.
+ * - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior
+ * when using rte_mempool_put() or rte_mempool_put_bulk() is
+ * "single-producer". Otherwise, it is "multi-producers".
+ * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior
+ * when using rte_mempool_get() or rte_mempool_get_bulk() is
+ * "single-consumer". Otherwise, it is "multi-consumers".
+ * @param vaddr
+ * Virtual address of the externally allocated memory buffer.
+ * Will be used to store mempool objects.
+ * @param paddr
+ * Array of phyiscall addresses of the pages that comprises given memory
+ * buffer.
+ * @param pg_num
+ * Number of elements in the paddr array.
+ * @param pg_shift
+ * LOG2 of the physical pages size.
+ * @return
+ * The pointer to the new allocated mempool, on success. NULL on error
+ * with rte_errno set appropriately. Possible rte_errno values include:
+ * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
+ * - E_RTE_SECONDARY - function was called from a secondary process instance
+ * - E_RTE_NO_TAILQ - no tailq list could be got for the ring or mempool list
+ * - EINVAL - cache size provided is too large
+ * - ENOSPC - the maximum number of memzones has already been allocated
+ * - EEXIST - a memzone with the same name already exists
+ * - ENOMEM - no appropriate memory area found in which to create memzone
+ */
+struct rte_mempool *
+rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
+ unsigned cache_size, unsigned private_data_size,
+ rte_mempool_ctor_t *mp_init, void *mp_init_arg,
+ rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
+ int socket_id, unsigned flags, void *vaddr,
+ const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift);
+
+#ifdef RTE_LIBRTE_XEN_DOM0
+/**
+ * Creates a new mempool named *name* in memory on Xen Dom0.
+ *
+ * This function uses ``rte_mempool_xmem_create()`` to allocate memory. The
+ * pool contains n elements of elt_size. Its size is set to n.
+ * All elements of the mempool are allocated together with the mempool header,
+ * and memory buffer can consist of set of disjoint phyiscal pages.
+ *
+ * @param name
+ * The name of the mempool.
+ * @param n
+ * The number of elements in the mempool. The optimum size (in terms of
+ * memory usage) for a mempool is when n is a power of two minus one:
+ * n = (2^q - 1).
+ * @param elt_size
+ * The size of each element.
+ * @param cache_size
+ * If cache_size is non-zero, the rte_mempool library will try to
+ * limit the accesses to the common lockless pool, by maintaining a
+ * per-lcore object cache. This argument must be lower or equal to
+ * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE. It is advised to choose
+ * cache_size to have "n modulo cache_size == 0": if this is
+ * not the case, some elements will always stay in the pool and will
+ * never be used. The access to the per-lcore table is of course
+ * faster than the multi-producer/consumer pool. The cache can be
+ * disabled if the cache_size argument is set to 0; it can be useful to
+ * avoid losing objects in cache. Note that even if not used, the
+ * memory space for cache is always reserved in a mempool structure,
+ * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0.
+ * @param private_data_size
+ * The size of the private data appended after the mempool
+ * structure. This is useful for storing some private data after the
+ * mempool structure, as is done for rte_mbuf_pool for example.
+ * @param mp_init
+ * A function pointer that is called for initialization of the pool,
+ * before object initialization. The user can initialize the private
+ * data in this function if needed. This parameter can be NULL if
+ * not needed.
+ * @param mp_init_arg
+ * An opaque pointer to data that can be used in the mempool
+ * constructor function.
+ * @param obj_init
+ * A function pointer that is called for each object at
+ * initialization of the pool. The user can set some meta data in
+ * objects if needed. This parameter can be NULL if not needed.
+ * The obj_init() function takes the mempool pointer, the init_arg,
+ * the object pointer and the object number as parameters.
+ * @param obj_init_arg
+ * An opaque pointer to data that can be used as an argument for
+ * each call to the object constructor function.
+ * @param socket_id
+ * The *socket_id* argument is the socket identifier in the case of
+ * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
+ * constraint for the reserved zone.
+ * @param flags
+ * The *flags* arguments is an OR of following flags:
+ * - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread
+ * between channels in RAM: the pool allocator will add padding
+ * between objects depending on the hardware configuration. See
+ * Memory alignment constraints for details. If this flag is set,
+ * the allocator will just align them to a cache line.
+ * - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are
+ * cache-aligned. This flag removes this constraint, and no
+ * padding will be present between objects. This flag implies
+ * MEMPOOL_F_NO_SPREAD.
+ * - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior
+ * when using rte_mempool_put() or rte_mempool_put_bulk() is
+ * "single-producer". Otherwise, it is "multi-producers".
+ * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior
+ * when using rte_mempool_get() or rte_mempool_get_bulk() is
+ * "single-consumer". Otherwise, it is "multi-consumers".
+ * @return
+ * The pointer to the new allocated mempool, on success. NULL on error
+ * with rte_errno set appropriately. Possible rte_errno values include:
+ * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
+ * - E_RTE_SECONDARY - function was called from a secondary process instance
+ * - E_RTE_NO_TAILQ - no tailq list could be got for the ring or mempool list
+ * - EINVAL - cache size provided is too large
+ * - ENOSPC - the maximum number of memzones has already been allocated
+ * - EEXIST - a memzone with the same name already exists
+ * - ENOMEM - no appropriate memory area found in which to create memzone
+ */
+struct rte_mempool *
+rte_dom0_mempool_create(const char *name, unsigned n, unsigned elt_size,
+ unsigned cache_size, unsigned private_data_size,
+ rte_mempool_ctor_t *mp_init, void *mp_init_arg,
+ rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
+ int socket_id, unsigned flags);
+#endif
+
+/**
+ * Dump the status of the mempool to the console.
+ *
+ * @param f
+ * A pointer to a file for output
+ * @param mp
+ * A pointer to the mempool structure.
+ */
+void rte_mempool_dump(FILE *f, const struct rte_mempool *mp);
+
+/**
+ * @internal Put several objects back in the mempool; used internally.
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects).
+ * @param n
+ * The number of objects to store back in the mempool, must be strictly
+ * positive.
+ * @param is_mp
+ * Mono-producer (0) or multi-producers (1).
+ */
+static inline void __attribute__((always_inline))
+__mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
+ unsigned n, int is_mp)
+{
+#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
+ struct rte_mempool_cache *cache;
+ uint32_t index;
+ void **cache_objs;
+ unsigned lcore_id = rte_lcore_id();
+ uint32_t cache_size = mp->cache_size;
+ uint32_t flushthresh = mp->cache_flushthresh;
+#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
+
+ /* increment stat now, adding in mempool always success */
+ __MEMPOOL_STAT_ADD(mp, put, n);
+
+#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
+ /* cache is not enabled or single producer */
+ if (unlikely(cache_size == 0 || is_mp == 0))
+ goto ring_enqueue;
+
+ /* Go straight to ring if put would overflow mem allocated for cache */
+ if (unlikely(n > RTE_MEMPOOL_CACHE_MAX_SIZE))
+ goto ring_enqueue;
+
+ cache = &mp->local_cache[lcore_id];
+ cache_objs = &cache->objs[cache->len];
+
+ /*
+ * The cache follows the following algorithm
+ * 1. Add the objects to the cache
+ * 2. Anything greater than the cache min value (if it crosses the
+ * cache flush threshold) is flushed to the ring.
+ */
+
+ /* Add elements back into the cache */
+ for (index = 0; index < n; ++index, obj_table++)
+ cache_objs[index] = *obj_table;
+
+ cache->len += n;
+
+ if (cache->len >= flushthresh) {
+ rte_ring_mp_enqueue_bulk(mp->ring, &cache->objs[cache_size],
+ cache->len - cache_size);
+ cache->len = cache_size;
+ }
+
+ return;
+
+ring_enqueue:
+#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
+
+ /* push remaining objects in ring */
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ if (is_mp) {
+ if (rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n) < 0)
+ rte_panic("cannot put objects in mempool\n");
+ }
+ else {
+ if (rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n) < 0)
+ rte_panic("cannot put objects in mempool\n");
+ }
+#else
+ if (is_mp)
+ rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n);
+ else
+ rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n);
+#endif
+}
+
+
+/**
+ * Put several objects back in the mempool (multi-producers safe).
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects).
+ * @param n
+ * The number of objects to add in the mempool from the obj_table.
+ */
+static inline void __attribute__((always_inline))
+rte_mempool_mp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
+ unsigned n)
+{
+ __mempool_check_cookies(mp, obj_table, n, 0);
+ __mempool_put_bulk(mp, obj_table, n, 1);
+}
+
+/**
+ * Put several objects back in the mempool (NOT multi-producers safe).
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects).
+ * @param n
+ * The number of objects to add in the mempool from obj_table.
+ */
+static inline void
+rte_mempool_sp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
+ unsigned n)
+{
+ __mempool_check_cookies(mp, obj_table, n, 0);
+ __mempool_put_bulk(mp, obj_table, n, 0);
+}
+
+/**
+ * Put several objects back in the mempool.
+ *
+ * This function calls the multi-producer or the single-producer
+ * version depending on the default behavior that was specified at
+ * mempool creation time (see flags).
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects).
+ * @param n
+ * The number of objects to add in the mempool from obj_table.
+ */
+static inline void __attribute__((always_inline))
+rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
+ unsigned n)
+{
+ __mempool_check_cookies(mp, obj_table, n, 0);
+ __mempool_put_bulk(mp, obj_table, n, !(mp->flags & MEMPOOL_F_SP_PUT));
+}
+
+/**
+ * Put one object in the mempool (multi-producers safe).
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj
+ * A pointer to the object to be added.
+ */
+static inline void __attribute__((always_inline))
+rte_mempool_mp_put(struct rte_mempool *mp, void *obj)
+{
+ rte_mempool_mp_put_bulk(mp, &obj, 1);
+}
+
+/**
+ * Put one object back in the mempool (NOT multi-producers safe).
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj
+ * A pointer to the object to be added.
+ */
+static inline void __attribute__((always_inline))
+rte_mempool_sp_put(struct rte_mempool *mp, void *obj)
+{
+ rte_mempool_sp_put_bulk(mp, &obj, 1);
+}
+
+/**
+ * Put one object back in the mempool.
+ *
+ * This function calls the multi-producer or the single-producer
+ * version depending on the default behavior that was specified at
+ * mempool creation time (see flags).
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj
+ * A pointer to the object to be added.
+ */
+static inline void __attribute__((always_inline))
+rte_mempool_put(struct rte_mempool *mp, void *obj)
+{
+ rte_mempool_put_bulk(mp, &obj, 1);
+}
+
+/**
+ * @internal Get several objects from the mempool; used internally.
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects).
+ * @param n
+ * The number of objects to get, must be strictly positive.
+ * @param is_mc
+ * Mono-consumer (0) or multi-consumers (1).
+ * @return
+ * - >=0: Success; number of objects supplied.
+ * - <0: Error; code of ring dequeue function.
+ */
+static inline int __attribute__((always_inline))
+__mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
+ unsigned n, int is_mc)
+{
+ int ret;
+#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
+ struct rte_mempool_cache *cache;
+ uint32_t index, len;
+ void **cache_objs;
+ unsigned lcore_id = rte_lcore_id();
+ uint32_t cache_size = mp->cache_size;
+
+ /* cache is not enabled or single consumer */
+ if (unlikely(cache_size == 0 || is_mc == 0 || n >= cache_size))
+ goto ring_dequeue;
+
+ cache = &mp->local_cache[lcore_id];
+ cache_objs = cache->objs;
+
+ /* Can this be satisfied from the cache? */
+ if (cache->len < n) {
+ /* No. Backfill the cache first, and then fill from it */
+ uint32_t req = n + (cache_size - cache->len);
+
+ /* How many do we require i.e. number to fill the cache + the request */
+ ret = rte_ring_mc_dequeue_bulk(mp->ring, &cache->objs[cache->len], req);
+ if (unlikely(ret < 0)) {
+ /*
+ * In the offchance that we are buffer constrained,
+ * where we are not able to allocate cache + n, go to
+ * the ring directly. If that fails, we are truly out of
+ * buffers.
+ */
+ goto ring_dequeue;
+ }
+
+ cache->len += req;
+ }
+
+ /* Now fill in the response ... */
+ for (index = 0, len = cache->len - 1; index < n; ++index, len--, obj_table++)
+ *obj_table = cache_objs[len];
+
+ cache->len -= n;
+
+ __MEMPOOL_STAT_ADD(mp, get_success, n);
+
+ return 0;
+
+ring_dequeue:
+#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
+
+ /* get remaining objects from ring */
+ if (is_mc)
+ ret = rte_ring_mc_dequeue_bulk(mp->ring, obj_table, n);
+ else
+ ret = rte_ring_sc_dequeue_bulk(mp->ring, obj_table, n);
+
+ if (ret < 0)
+ __MEMPOOL_STAT_ADD(mp, get_fail, n);
+ else
+ __MEMPOOL_STAT_ADD(mp, get_success, n);
+
+ return ret;
+}
+
+/**
+ * Get several objects from the mempool (multi-consumers safe).
+ *
+ * If cache is enabled, objects will be retrieved first from cache,
+ * subsequently from the common pool. Note that it can return -ENOENT when
+ * the local cache and common pool are empty, even if cache from other
+ * lcores are full.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects) that will be filled.
+ * @param n
+ * The number of objects to get from mempool to obj_table.
+ * @return
+ * - 0: Success; objects taken.
+ * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
+ */
+static inline int __attribute__((always_inline))
+rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
+{
+ int ret;
+ ret = __mempool_get_bulk(mp, obj_table, n, 1);
+ if (ret == 0)
+ __mempool_check_cookies(mp, obj_table, n, 1);
+ return ret;
+}
+
+/**
+ * Get several objects from the mempool (NOT multi-consumers safe).
+ *
+ * If cache is enabled, objects will be retrieved first from cache,
+ * subsequently from the common pool. Note that it can return -ENOENT when
+ * the local cache and common pool are empty, even if cache from other
+ * lcores are full.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects) that will be filled.
+ * @param n
+ * The number of objects to get from the mempool to obj_table.
+ * @return
+ * - 0: Success; objects taken.
+ * - -ENOENT: Not enough entries in the mempool; no object is
+ * retrieved.
+ */
+static inline int __attribute__((always_inline))
+rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
+{
+ int ret;
+ ret = __mempool_get_bulk(mp, obj_table, n, 0);
+ if (ret == 0)
+ __mempool_check_cookies(mp, obj_table, n, 1);
+ return ret;
+}
+
+/**
+ * Get several objects from the mempool.
+ *
+ * This function calls the multi-consumers or the single-consumer
+ * version, depending on the default behaviour that was specified at
+ * mempool creation time (see flags).
+ *
+ * If cache is enabled, objects will be retrieved first from cache,
+ * subsequently from the common pool. Note that it can return -ENOENT when
+ * the local cache and common pool are empty, even if cache from other
+ * lcores are full.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects) that will be filled.
+ * @param n
+ * The number of objects to get from the mempool to obj_table.
+ * @return
+ * - 0: Success; objects taken
+ * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
+ */
+static inline int __attribute__((always_inline))
+rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
+{
+ int ret;
+ ret = __mempool_get_bulk(mp, obj_table, n,
+ !(mp->flags & MEMPOOL_F_SC_GET));
+ if (ret == 0)
+ __mempool_check_cookies(mp, obj_table, n, 1);
+ return ret;
+}
+
+/**
+ * Get one object from the mempool (multi-consumers safe).
+ *
+ * If cache is enabled, objects will be retrieved first from cache,
+ * subsequently from the common pool. Note that it can return -ENOENT when
+ * the local cache and common pool are empty, even if cache from other
+ * lcores are full.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_p
+ * A pointer to a void * pointer (object) that will be filled.
+ * @return
+ * - 0: Success; objects taken.
+ * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
+ */
+static inline int __attribute__((always_inline))
+rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p)
+{
+ return rte_mempool_mc_get_bulk(mp, obj_p, 1);
+}
+
+/**
+ * Get one object from the mempool (NOT multi-consumers safe).
+ *
+ * If cache is enabled, objects will be retrieved first from cache,
+ * subsequently from the common pool. Note that it can return -ENOENT when
+ * the local cache and common pool are empty, even if cache from other
+ * lcores are full.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_p
+ * A pointer to a void * pointer (object) that will be filled.
+ * @return
+ * - 0: Success; objects taken.
+ * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
+ */
+static inline int __attribute__((always_inline))
+rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p)
+{
+ return rte_mempool_sc_get_bulk(mp, obj_p, 1);
+}
+
+/**
+ * Get one object from the mempool.
+ *
+ * This function calls the multi-consumers or the single-consumer
+ * version, depending on the default behavior that was specified at
+ * mempool creation (see flags).
+ *
+ * If cache is enabled, objects will be retrieved first from cache,
+ * subsequently from the common pool. Note that it can return -ENOENT when
+ * the local cache and common pool are empty, even if cache from other
+ * lcores are full.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_p
+ * A pointer to a void * pointer (object) that will be filled.
+ * @return
+ * - 0: Success; objects taken.
+ * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
+ */
+static inline int __attribute__((always_inline))
+rte_mempool_get(struct rte_mempool *mp, void **obj_p)
+{
+ return rte_mempool_get_bulk(mp, obj_p, 1);
+}
+
+/**
+ * Return the number of entries in the mempool.
+ *
+ * When cache is enabled, this function has to browse the length of
+ * all lcores, so it should not be used in a data path, but only for
+ * debug purposes.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @return
+ * The number of entries in the mempool.
+ */
+unsigned rte_mempool_count(const struct rte_mempool *mp);
+
+/**
+ * Return the number of free entries in the mempool ring.
+ * i.e. how many entries can be freed back to the mempool.
+ *
+ * NOTE: This corresponds to the number of elements *allocated* from the
+ * memory pool, not the number of elements in the pool itself. To count
+ * the number elements currently available in the pool, use "rte_mempool_count"
+ *
+ * When cache is enabled, this function has to browse the length of
+ * all lcores, so it should not be used in a data path, but only for
+ * debug purposes.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @return
+ * The number of free entries in the mempool.
+ */
+static inline unsigned
+rte_mempool_free_count(const struct rte_mempool *mp)
+{
+ return mp->size - rte_mempool_count(mp);
+}
+
+/**
+ * Test if the mempool is full.
+ *
+ * When cache is enabled, this function has to browse the length of all
+ * lcores, so it should not be used in a data path, but only for debug
+ * purposes.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @return
+ * - 1: The mempool is full.
+ * - 0: The mempool is not full.
+ */
+static inline int
+rte_mempool_full(const struct rte_mempool *mp)
+{
+ return !!(rte_mempool_count(mp) == mp->size);
+}
+
+/**
+ * Test if the mempool is empty.
+ *
+ * When cache is enabled, this function has to browse the length of all
+ * lcores, so it should not be used in a data path, but only for debug
+ * purposes.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @return
+ * - 1: The mempool is empty.
+ * - 0: The mempool is not empty.
+ */
+static inline int
+rte_mempool_empty(const struct rte_mempool *mp)
+{
+ return !!(rte_mempool_count(mp) == 0);
+}
+
+/**
+ * Return the physical address of elt, which is an element of the pool mp.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param elt
+ * A pointer (virtual address) to the element of the pool.
+ * @return
+ * The physical address of the elt element.
+ */
+static inline phys_addr_t
+rte_mempool_virt2phy(const struct rte_mempool *mp, const void *elt)
+{
+ if (rte_eal_has_hugepages()) {
+ uintptr_t off;
+
+ off = (const char *)elt - (const char *)mp->elt_va_start;
+ return (mp->elt_pa[off >> mp->pg_shift] + (off & mp->pg_mask));
+ } else {
+ /*
+ * If huge pages are disabled, we cannot assume the
+ * memory region to be physically contiguous.
+ * Lookup for each element.
+ */
+ return rte_mem_virt2phy(elt);
+ }
+}
+
+/**
+ * Check the consistency of mempool objects.
+ *
+ * Verify the coherency of fields in the mempool structure. Also check
+ * that the cookies of mempool objects (even the ones that are not
+ * present in pool) have a correct value. If not, a panic will occur.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ */
+void rte_mempool_audit(const struct rte_mempool *mp);
+
+/**
+ * Return a pointer to the private data in an mempool structure.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @return
+ * A pointer to the private data.
+ */
+static inline void *rte_mempool_get_priv(struct rte_mempool *mp)
+{
+ return (char *)mp + MEMPOOL_HEADER_SIZE(mp, mp->pg_num);
+}
+
+/**
+ * Dump the status of all mempools on the console
+ *
+ * @param f
+ * A pointer to a file for output
+ */
+void rte_mempool_list_dump(FILE *f);
+
+/**
+ * Search a mempool from its name
+ *
+ * @param name
+ * The name of the mempool.
+ * @return
+ * The pointer to the mempool matching the name, or NULL if not found.
+ * NULL on error
+ * with rte_errno set appropriately. Possible rte_errno values include:
+ * - ENOENT - required entry not available to return.
+ *
+ */
+struct rte_mempool *rte_mempool_lookup(const char *name);
+
+/**
+ * Given a desired size of the mempool element and mempool flags,
+ * caluclates header, trailer, body and total sizes of the mempool object.
+ * @param elt_size
+ * The size of each element.
+ * @param flags
+ * The flags used for the mempool creation.
+ * Consult rte_mempool_create() for more information about possible values.
+ * The size of each element.
+ * @return
+ * Total size of the mempool object.
+ */
+uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
+ struct rte_mempool_objsz *sz);
+
+/**
+ * Calculate maximum amount of memory required to store given number of objects.
+ * Assumes that the memory buffer will be aligned at page boundary.
+ * Note, that if object size is bigger then page size, then it assumes that
+ * we have a subsets of physically continuous pages big enough to store
+ * at least one object.
+ * @param elt_num
+ * Number of elements.
+ * @param elt_sz
+ * The size of each element.
+ * @param pg_shift
+ * LOG2 of the physical pages size.
+ * @return
+ * Required memory size aligned at page boundary.
+ */
+size_t rte_mempool_xmem_size(uint32_t elt_num, size_t elt_sz,
+ uint32_t pg_shift);
+
+/**
+ * Calculate how much memory would be actually required with the given
+ * memory footprint to store required number of objects.
+ * @param vaddr
+ * Virtual address of the externally allocated memory buffer.
+ * Will be used to store mempool objects.
+ * @param elt_num
+ * Number of elements.
+ * @param elt_sz
+ * The size of each element.
+ * @param paddr
+ * Array of phyiscall addresses of the pages that comprises given memory
+ * buffer.
+ * @param pg_num
+ * Number of elements in the paddr array.
+ * @param pg_shift
+ * LOG2 of the physical pages size.
+ * @return
+ * Number of bytes needed to store given number of objects,
+ * aligned to the given page size.
+ * If provided memory buffer is not big enough:
+ * (-1) * actual number of elemnts that can be stored in that buffer.
+ */
+ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t elt_sz,
+ const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift);
+
+/**
+ * Walk list of all memory pools
+ *
+ * @param func
+ * Iterator function
+ * @param arg
+ * Argument passed to iterator
+ */
+void rte_mempool_walk(void (*func)(const struct rte_mempool *, void *arg),
+ void *arg);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MEMPOOL_H_ */
diff --git a/src/dpdk_lib18/librte_meter/Makefile b/src/dpdk_lib18/librte_meter/Makefile
new file mode 100755
index 00000000..b25c0cc4
--- /dev/null
+++ b/src/dpdk_lib18/librte_meter/Makefile
@@ -0,0 +1,53 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_meter.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_METER) := rte_meter.c
+
+# install includes
+SYMLINK-$(CONFIG_RTE_LIBRTE_METER)-include := rte_meter.h
+
+# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_METER) += lib/librte_eal
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_meter/rte_meter.c b/src/dpdk_lib18/librte_meter/rte_meter.c
new file mode 100755
index 00000000..5e2dadb8
--- /dev/null
+++ b/src/dpdk_lib18/librte_meter/rte_meter.c
@@ -0,0 +1,120 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <math.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_cycles.h>
+
+#include "rte_meter.h"
+
+#ifndef RTE_METER_TB_PERIOD_MIN
+#define RTE_METER_TB_PERIOD_MIN 100
+#endif
+
+static void
+rte_meter_get_tb_params(uint64_t hz, uint64_t rate, uint64_t *tb_period, uint64_t *tb_bytes_per_period)
+{
+ double period = ((double) hz) / ((double) rate);
+
+ if (period >= RTE_METER_TB_PERIOD_MIN) {
+ *tb_bytes_per_period = 1;
+ *tb_period = (uint64_t) period;
+ } else {
+ *tb_bytes_per_period = (uint64_t) ceil(RTE_METER_TB_PERIOD_MIN / period);
+ *tb_period = (hz * (*tb_bytes_per_period)) / rate;
+ }
+}
+
+int
+rte_meter_srtcm_config(struct rte_meter_srtcm *m, struct rte_meter_srtcm_params *params)
+{
+ uint64_t hz;
+
+ /* Check input parameters */
+ if ((m == NULL) || (params == NULL)) {
+ return -1;
+ }
+
+ if ((params->cir == 0) || ((params->cbs == 0) && (params->ebs == 0))) {
+ return -2;
+ }
+
+ /* Initialize srTCM run-time structure */
+ hz = rte_get_tsc_hz();
+ m->time = rte_get_tsc_cycles();
+ m->tc = m->cbs = params->cbs;
+ m->te = m->ebs = params->ebs;
+ rte_meter_get_tb_params(hz, params->cir, &m->cir_period, &m->cir_bytes_per_period);
+
+ RTE_LOG(INFO, METER, "Low level srTCM config: \n"
+ "\tCIR period = %" PRIu64 ", CIR bytes per period = %" PRIu64 "\n",
+ m->cir_period, m->cir_bytes_per_period);
+
+ return 0;
+}
+
+int
+rte_meter_trtcm_config(struct rte_meter_trtcm *m, struct rte_meter_trtcm_params *params)
+{
+ uint64_t hz;
+
+ /* Check input parameters */
+ if ((m == NULL) || (params == NULL)) {
+ return -1;
+ }
+
+ if ((params->cir == 0) || (params->pir == 0) || (params->pir < params->cir) ||
+ (params->cbs == 0) || (params->pbs == 0)) {
+ return -2;
+ }
+
+ /* Initialize trTCM run-time structure */
+ hz = rte_get_tsc_hz();
+ m->time_tc = m->time_tp = rte_get_tsc_cycles();
+ m->tc = m->cbs = params->cbs;
+ m->tp = m->pbs = params->pbs;
+ rte_meter_get_tb_params(hz, params->cir, &m->cir_period, &m->cir_bytes_per_period);
+ rte_meter_get_tb_params(hz, params->pir, &m->pir_period, &m->pir_bytes_per_period);
+
+ RTE_LOG(INFO, METER, "Low level trTCM config: \n"
+ "\tCIR period = %" PRIu64 ", CIR bytes per period = %" PRIu64 "\n"
+ "\tPIR period = %" PRIu64 ", PIR bytes per period = %" PRIu64 "\n",
+ m->cir_period, m->cir_bytes_per_period,
+ m->pir_period, m->pir_bytes_per_period);
+
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_meter/rte_meter.h b/src/dpdk_lib18/librte_meter/rte_meter.h
new file mode 100755
index 00000000..92728a5b
--- /dev/null
+++ b/src/dpdk_lib18/librte_meter/rte_meter.h
@@ -0,0 +1,387 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_METER_H__
+#define __INCLUDE_RTE_METER_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file
+ * RTE Traffic Metering
+ *
+ * Traffic metering algorithms:
+ * 1. Single Rate Three Color Marker (srTCM): defined by IETF RFC 2697
+ * 2. Two Rate Three Color Marker (trTCM): defined by IETF RFC 2698
+ *
+ ***/
+
+#include <stdint.h>
+
+/*
+ * Application Programmer's Interface (API)
+ *
+ ***/
+
+/** Packet Color Set */
+enum rte_meter_color {
+ e_RTE_METER_GREEN = 0, /**< Green */
+ e_RTE_METER_YELLOW, /**< Yellow */
+ e_RTE_METER_RED, /**< Red */
+ e_RTE_METER_COLORS /**< Number of available colors */
+};
+
+/** srTCM parameters per metered traffic flow. The CIR, CBS and EBS parameters only
+count bytes of IP packets and do not include link specific headers. At least one of
+the CBS or EBS parameters has to be greater than zero. */
+struct rte_meter_srtcm_params {
+ uint64_t cir; /**< Committed Information Rate (CIR). Measured in bytes per second. */
+ uint64_t cbs; /**< Committed Burst Size (CBS). Measured in bytes. */
+ uint64_t ebs; /**< Excess Burst Size (EBS). Measured in bytes. */
+};
+
+/** trTCM parameters per metered traffic flow. The CIR, PIR, CBS and PBS parameters
+only count bytes of IP packets and do not include link specific headers. PIR has to
+be greater than or equal to CIR. Both CBS or EBS have to be greater than zero. */
+struct rte_meter_trtcm_params {
+ uint64_t cir; /**< Committed Information Rate (CIR). Measured in bytes per second. */
+ uint64_t pir; /**< Peak Information Rate (PIR). Measured in bytes per second. */
+ uint64_t cbs; /**< Committed Burst Size (CBS). Measured in byes. */
+ uint64_t pbs; /**< Peak Burst Size (PBS). Measured in bytes. */
+};
+
+/** Internal data structure storing the srTCM run-time context per metered traffic flow. */
+struct rte_meter_srtcm;
+
+/** Internal data structure storing the trTCM run-time context per metered traffic flow. */
+struct rte_meter_trtcm;
+
+/**
+ * srTCM configuration per metered traffic flow
+ *
+ * @param m
+ * Pointer to pre-allocated srTCM data structure
+ * @param params
+ * User parameters per srTCM metered traffic flow
+ * @return
+ * 0 upon success, error code otherwise
+ */
+int
+rte_meter_srtcm_config(struct rte_meter_srtcm *m,
+ struct rte_meter_srtcm_params *params);
+
+/**
+ * trTCM configuration per metered traffic flow
+ *
+ * @param m
+ * Pointer to pre-allocated trTCM data structure
+ * @param params
+ * User parameters per trTCM metered traffic flow
+ * @return
+ * 0 upon success, error code otherwise
+ */
+int
+rte_meter_trtcm_config(struct rte_meter_trtcm *m,
+ struct rte_meter_trtcm_params *params);
+
+/**
+ * srTCM color blind traffic metering
+ *
+ * @param m
+ * Handle to srTCM instance
+ * @param time
+ * Current CPU time stamp (measured in CPU cycles)
+ * @param pkt_length
+ * Length of the current IP packet (measured in bytes)
+ * @return
+ * Color assigned to the current IP packet
+ */
+static inline enum rte_meter_color
+rte_meter_srtcm_color_blind_check(struct rte_meter_srtcm *m,
+ uint64_t time,
+ uint32_t pkt_len);
+
+/**
+ * srTCM color aware traffic metering
+ *
+ * @param m
+ * Handle to srTCM instance
+ * @param time
+ * Current CPU time stamp (measured in CPU cycles)
+ * @param pkt_length
+ * Length of the current IP packet (measured in bytes)
+ * @param pkt_color
+ * Input color of the current IP packet
+ * @return
+ * Color assigned to the current IP packet
+ */
+static inline enum rte_meter_color
+rte_meter_srtcm_color_aware_check(struct rte_meter_srtcm *m,
+ uint64_t time,
+ uint32_t pkt_len,
+ enum rte_meter_color pkt_color);
+
+/**
+ * trTCM color blind traffic metering
+ *
+ * @param m
+ * Handle to trTCM instance
+ * @param time
+ * Current CPU time stamp (measured in CPU cycles)
+ * @param pkt_length
+ * Length of the current IP packet (measured in bytes)
+ * @return
+ * Color assigned to the current IP packet
+ */
+static inline enum rte_meter_color
+rte_meter_trtcm_color_blind_check(struct rte_meter_trtcm *m,
+ uint64_t time,
+ uint32_t pkt_len);
+
+/**
+ * trTCM color aware traffic metering
+ *
+ * @param m
+ * Handle to trTCM instance
+ * @param time
+ * Current CPU time stamp (measured in CPU cycles)
+ * @param pkt_length
+ * Length of the current IP packet (measured in bytes)
+ * @param pkt_color
+ * Input color of the current IP packet
+ * @return
+ * Color assigned to the current IP packet
+ */
+static inline enum rte_meter_color
+rte_meter_trtcm_color_aware_check(struct rte_meter_trtcm *m,
+ uint64_t time,
+ uint32_t pkt_len,
+ enum rte_meter_color pkt_color);
+
+/*
+ * Inline implementation of run-time methods
+ *
+ ***/
+
+/* Internal data structure storing the srTCM run-time context per metered traffic flow. */
+struct rte_meter_srtcm {
+ uint64_t time; /* Time of latest update of C and E token buckets */
+ uint64_t tc; /* Number of bytes currently available in the committed (C) token bucket */
+ uint64_t te; /* Number of bytes currently available in the excess (E) token bucket */
+ uint64_t cbs; /* Upper limit for C token bucket */
+ uint64_t ebs; /* Upper limit for E token bucket */
+ uint64_t cir_period; /* Number of CPU cycles for one update of C and E token buckets */
+ uint64_t cir_bytes_per_period; /* Number of bytes to add to C and E token buckets on each update */
+};
+
+/* Internal data structure storing the trTCM run-time context per metered traffic flow. */
+struct rte_meter_trtcm {
+ uint64_t time_tc; /* Time of latest update of C token bucket */
+ uint64_t time_tp; /* Time of latest update of E token bucket */
+ uint64_t tc; /* Number of bytes currently available in the committed (C) token bucket */
+ uint64_t tp; /* Number of bytes currently available in the peak (P) token bucket */
+ uint64_t cbs; /* Upper limit for C token bucket */
+ uint64_t pbs; /* Upper limit for P token bucket */
+ uint64_t cir_period; /* Number of CPU cycles for one update of C token bucket */
+ uint64_t cir_bytes_per_period; /* Number of bytes to add to C token bucket on each update */
+ uint64_t pir_period; /* Number of CPU cycles for one update of P token bucket */
+ uint64_t pir_bytes_per_period; /* Number of bytes to add to P token bucket on each update */
+};
+
+static inline enum rte_meter_color
+rte_meter_srtcm_color_blind_check(struct rte_meter_srtcm *m,
+ uint64_t time,
+ uint32_t pkt_len)
+{
+ uint64_t time_diff, n_periods, tc, te;
+
+ /* Bucket update */
+ time_diff = time - m->time;
+ n_periods = time_diff / m->cir_period;
+ m->time += n_periods * m->cir_period;
+
+ tc = m->tc + n_periods * m->cir_bytes_per_period;
+ if (tc > m->cbs)
+ tc = m->cbs;
+
+ te = m->te + n_periods * m->cir_bytes_per_period;
+ if (te > m->ebs)
+ te = m->ebs;
+
+ /* Color logic */
+ if (tc >= pkt_len) {
+ m->tc = tc - pkt_len;
+ m->te = te;
+ return e_RTE_METER_GREEN;
+ }
+
+ if (te >= pkt_len) {
+ m->tc = tc;
+ m->te = te - pkt_len;
+ return e_RTE_METER_YELLOW;
+ }
+
+ m->tc = tc;
+ m->te = te;
+ return e_RTE_METER_RED;
+}
+
+static inline enum rte_meter_color
+rte_meter_srtcm_color_aware_check(struct rte_meter_srtcm *m,
+ uint64_t time,
+ uint32_t pkt_len,
+ enum rte_meter_color pkt_color)
+{
+ uint64_t time_diff, n_periods, tc, te;
+
+ /* Bucket update */
+ time_diff = time - m->time;
+ n_periods = time_diff / m->cir_period;
+ m->time += n_periods * m->cir_period;
+
+ tc = m->tc + n_periods * m->cir_bytes_per_period;
+ if (tc > m->cbs)
+ tc = m->cbs;
+
+ te = m->te + n_periods * m->cir_bytes_per_period;
+ if (te > m->ebs)
+ te = m->ebs;
+
+ /* Color logic */
+ if ((pkt_color == e_RTE_METER_GREEN) && (tc >= pkt_len)) {
+ m->tc = tc - pkt_len;
+ m->te = te;
+ return e_RTE_METER_GREEN;
+ }
+
+ if ((pkt_color != e_RTE_METER_RED) && (te >= pkt_len)) {
+ m->tc = tc;
+ m->te = te - pkt_len;
+ return e_RTE_METER_YELLOW;
+ }
+
+ m->tc = tc;
+ m->te = te;
+ return e_RTE_METER_RED;
+}
+
+static inline enum rte_meter_color
+rte_meter_trtcm_color_blind_check(struct rte_meter_trtcm *m,
+ uint64_t time,
+ uint32_t pkt_len)
+{
+ uint64_t time_diff_tc, time_diff_tp, n_periods_tc, n_periods_tp, tc, tp;
+
+ /* Bucket update */
+ time_diff_tc = time - m->time_tc;
+ time_diff_tp = time - m->time_tp;
+ n_periods_tc = time_diff_tc / m->cir_period;
+ n_periods_tp = time_diff_tp / m->pir_period;
+ m->time_tc += n_periods_tc * m->cir_period;
+ m->time_tp += n_periods_tp * m->pir_period;
+
+ tc = m->tc + n_periods_tc * m->cir_bytes_per_period;
+ if (tc > m->cbs)
+ tc = m->cbs;
+
+ tp = m->tp + n_periods_tp * m->pir_bytes_per_period;
+ if (tp > m->pbs)
+ tp = m->pbs;
+
+ /* Color logic */
+ if (tp < pkt_len) {
+ m->tc = tc;
+ m->tp = tp;
+ return e_RTE_METER_RED;
+ }
+
+ if (tc < pkt_len) {
+ m->tc = tc;
+ m->tp = tp - pkt_len;
+ return e_RTE_METER_YELLOW;
+ }
+
+ m->tc = tc - pkt_len;
+ m->tp = tp - pkt_len;
+ return e_RTE_METER_GREEN;
+}
+
+static inline enum rte_meter_color
+rte_meter_trtcm_color_aware_check(struct rte_meter_trtcm *m,
+ uint64_t time,
+ uint32_t pkt_len,
+ enum rte_meter_color pkt_color)
+{
+ uint64_t time_diff_tc, time_diff_tp, n_periods_tc, n_periods_tp, tc, tp;
+
+ /* Bucket update */
+ time_diff_tc = time - m->time_tc;
+ time_diff_tp = time - m->time_tp;
+ n_periods_tc = time_diff_tc / m->cir_period;
+ n_periods_tp = time_diff_tp / m->pir_period;
+ m->time_tc += n_periods_tc * m->cir_period;
+ m->time_tp += n_periods_tp * m->pir_period;
+
+ tc = m->tc + n_periods_tc * m->cir_bytes_per_period;
+ if (tc > m->cbs)
+ tc = m->cbs;
+
+ tp = m->tp + n_periods_tp * m->pir_bytes_per_period;
+ if (tp > m->pbs)
+ tp = m->pbs;
+
+ /* Color logic */
+ if ((pkt_color == e_RTE_METER_RED) || (tp < pkt_len)) {
+ m->tc = tc;
+ m->tp = tp;
+ return e_RTE_METER_RED;
+ }
+
+ if ((pkt_color == e_RTE_METER_YELLOW) || (tc < pkt_len)) {
+ m->tc = tc;
+ m->tp = tp - pkt_len;
+ return e_RTE_METER_YELLOW;
+ }
+
+ m->tc = tc - pkt_len;
+ m->tp = tp - pkt_len;
+ return e_RTE_METER_GREEN;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __INCLUDE_RTE_METER_H__ */
diff --git a/src/dpdk_lib18/librte_net/Makefile b/src/dpdk_lib18/librte_net/Makefile
new file mode 100755
index 00000000..ad2e482d
--- /dev/null
+++ b/src/dpdk_lib18/librte_net/Makefile
@@ -0,0 +1,40 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+
+# install includes
+SYMLINK-$(CONFIG_RTE_LIBRTE_NET)-include := rte_ip.h rte_tcp.h rte_udp.h rte_sctp.h rte_icmp.h rte_arp.h
+
+
+include $(RTE_SDK)/mk/rte.install.mk
diff --git a/src/dpdk_lib18/librte_net/rte_arp.h b/src/dpdk_lib18/librte_net/rte_arp.h
new file mode 100755
index 00000000..c7b0e514
--- /dev/null
+++ b/src/dpdk_lib18/librte_net/rte_arp.h
@@ -0,0 +1,84 @@
+/* BSD LICENSE
+ *
+ * Copyright(c) 2013 6WIND.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ARP_H_
+#define _RTE_ARP_H_
+
+/**
+ * @file
+ *
+ * ARP-related defines
+ */
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * ARP header IPv4 payload.
+ */
+struct arp_ipv4 {
+ uint8_t arp_sha[6]; /* sender hardware address */
+ uint8_t arp_sip[4]; /* sender IP address */
+ uint8_t arp_tha[6]; /* target hardware address */
+ uint8_t arp_tip[4]; /* target IP address */
+} __attribute__((__packed__));
+
+/**
+ * ARP header.
+ */
+struct arp_hdr {
+ uint16_t arp_hrd; /* format of hardware address */
+#define ARP_HRD_ETHER 1 /* ARP Ethernet address format */
+
+ uint16_t arp_pro; /* format of protocol address */
+ uint8_t arp_hln; /* length of hardware address */
+ uint8_t arp_pln; /* length of protocol address */
+ uint16_t arp_op; /* ARP opcode (command) */
+#define ARP_OP_REQUEST 1 /* request to resolve address */
+#define ARP_OP_REPLY 2 /* response to previous request */
+#define ARP_OP_REVREQUEST 3 /* request proto addr given hardware */
+#define ARP_OP_REVREPLY 4 /* response giving protocol address */
+#define ARP_OP_INVREQUEST 8 /* request to identify peer */
+#define ARP_OP_INVREPLY 9 /* response identifying peer */
+
+ union {
+ struct arp_ipv4 arp_ip;
+ } arp_data;
+} __attribute__((__packed__));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_ARP_H_ */
diff --git a/src/dpdk_lib18/librte_net/rte_icmp.h b/src/dpdk_lib18/librte_net/rte_icmp.h
new file mode 100755
index 00000000..8b287f6d
--- /dev/null
+++ b/src/dpdk_lib18/librte_net/rte_icmp.h
@@ -0,0 +1,101 @@
+/* BSD LICENSE
+ *
+ * Copyright(c) 2013 6WIND.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * Copyright (c) 1982, 1986, 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in.h 8.3 (Berkeley) 1/3/94
+ * $FreeBSD: src/sys/netinet/in.h,v 1.82 2003/10/25 09:37:10 ume Exp $
+ */
+
+#ifndef _RTE_ICMP_H_
+#define _RTE_ICMP_H_
+
+/**
+ * @file
+ *
+ * ICMP-related defines
+ */
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * ICMP Header
+ */
+struct icmp_hdr {
+ uint8_t icmp_type; /* ICMP packet type. */
+ uint8_t icmp_code; /* ICMP packet code. */
+ uint16_t icmp_cksum; /* ICMP packet checksum. */
+ uint16_t icmp_ident; /* ICMP packet identifier. */
+ uint16_t icmp_seq_nb; /* ICMP packet sequence number. */
+} __attribute__((__packed__));
+
+/* ICMP packet types */
+#define IP_ICMP_ECHO_REPLY 0
+#define IP_ICMP_ECHO_REQUEST 8
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_ICMP_H_ */
diff --git a/src/dpdk_lib18/librte_net/rte_ip.h b/src/dpdk_lib18/librte_net/rte_ip.h
new file mode 100755
index 00000000..f0ec543b
--- /dev/null
+++ b/src/dpdk_lib18/librte_net/rte_ip.h
@@ -0,0 +1,402 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright 2014 6WIND S.A.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1982, 1986, 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in.h 8.3 (Berkeley) 1/3/94
+ * $FreeBSD: src/sys/netinet/in.h,v 1.82 2003/10/25 09:37:10 ume Exp $
+ */
+
+#ifndef _RTE_IP_H_
+#define _RTE_IP_H_
+
+/**
+ * @file
+ *
+ * IP-related defines
+ */
+
+#include <stdint.h>
+#include <netinet/in.h>
+
+#include <rte_memcpy.h>
+#include <rte_byteorder.h>
+#include <rte_mbuf.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * IPv4 Header
+ */
+struct ipv4_hdr {
+ uint8_t version_ihl; /**< version and header length */
+ uint8_t type_of_service; /**< type of service */
+ uint16_t total_length; /**< length of packet */
+ uint16_t packet_id; /**< packet ID */
+ uint16_t fragment_offset; /**< fragmentation offset */
+ uint8_t time_to_live; /**< time to live */
+ uint8_t next_proto_id; /**< protocol ID */
+ uint16_t hdr_checksum; /**< header checksum */
+ uint32_t src_addr; /**< source address */
+ uint32_t dst_addr; /**< destination address */
+} __attribute__((__packed__));
+
+/** Create IPv4 address */
+#define IPv4(a,b,c,d) ((uint32_t)(((a) & 0xff) << 24) | \
+ (((b) & 0xff) << 16) | \
+ (((c) & 0xff) << 8) | \
+ ((d) & 0xff))
+
+/* Fragment Offset * Flags. */
+#define IPV4_HDR_DF_SHIFT 14
+#define IPV4_HDR_MF_SHIFT 13
+#define IPV4_HDR_FO_SHIFT 3
+
+#define IPV4_HDR_DF_FLAG (1 << IPV4_HDR_DF_SHIFT)
+#define IPV4_HDR_MF_FLAG (1 << IPV4_HDR_MF_SHIFT)
+
+#define IPV4_HDR_OFFSET_MASK ((1 << IPV4_HDR_MF_SHIFT) - 1)
+
+#define IPV4_HDR_OFFSET_UNITS 8
+
+/*
+ * IPv4 address types
+ */
+#define IPV4_ANY ((uint32_t)0x00000000) /**< 0.0.0.0 */
+#define IPV4_LOOPBACK ((uint32_t)0x7f000001) /**< 127.0.0.1 */
+#define IPV4_BROADCAST ((uint32_t)0xe0000000) /**< 224.0.0.0 */
+#define IPV4_ALLHOSTS_GROUP ((uint32_t)0xe0000001) /**< 224.0.0.1 */
+#define IPV4_ALLRTRS_GROUP ((uint32_t)0xe0000002) /**< 224.0.0.2 */
+#define IPV4_MAX_LOCAL_GROUP ((uint32_t)0xe00000ff) /**< 224.0.0.255 */
+
+/*
+ * IPv4 Multicast-related macros
+ */
+#define IPV4_MIN_MCAST IPv4(224, 0, 0, 0) /**< Minimal IPv4-multicast address */
+#define IPV4_MAX_MCAST IPv4(239, 255, 255, 255) /**< Maximum IPv4 multicast address */
+
+#define IS_IPV4_MCAST(x) \
+ ((x) >= IPV4_MIN_MCAST && (x) <= IPV4_MAX_MCAST) /**< check if IPv4 address is multicast */
+
+/**
+ * @internal Calculate a sum of all words in the buffer.
+ * Helper routine for the rte_raw_cksum().
+ *
+ * @param buf
+ * Pointer to the buffer.
+ * @param len
+ * Length of the buffer.
+ * @param sum
+ * Initial value of the sum.
+ * @return
+ * sum += Sum of all words in the buffer.
+ */
+static inline uint32_t
+__rte_raw_cksum(const void *buf, size_t len, uint32_t sum)
+{
+ /* workaround gcc strict-aliasing warning */
+ uintptr_t ptr = (uintptr_t)buf;
+ const uint16_t *u16 = (const uint16_t *)ptr;
+
+ while (len >= (sizeof(*u16) * 4)) {
+ sum += u16[0];
+ sum += u16[1];
+ sum += u16[2];
+ sum += u16[3];
+ len -= sizeof(*u16) * 4;
+ u16 += 4;
+ }
+ while (len >= sizeof(*u16)) {
+ sum += *u16;
+ len -= sizeof(*u16);
+ u16 += 1;
+ }
+
+ /* if length is in odd bytes */
+ if (len == 1)
+ sum += *((const uint8_t *)u16);
+
+ return sum;
+}
+
+/**
+ * @internal Reduce a sum to the non-complemented checksum.
+ * Helper routine for the rte_raw_cksum().
+ *
+ * @param sum
+ * Value of the sum.
+ * @return
+ * The non-complemented checksum.
+ */
+static inline uint16_t
+__rte_raw_cksum_reduce(uint32_t sum)
+{
+ sum = ((sum & 0xffff0000) >> 16) + (sum & 0xffff);
+ sum = ((sum & 0xffff0000) >> 16) + (sum & 0xffff);
+ return (uint16_t)sum;
+}
+
+/**
+ * Process the non-complemented checksum of a buffer.
+ *
+ * @param buf
+ * Pointer to the buffer.
+ * @param len
+ * Length of the buffer.
+ * @return
+ * The non-complemented checksum.
+ */
+static inline uint16_t
+rte_raw_cksum(const void *buf, size_t len)
+{
+ uint32_t sum;
+
+ sum = __rte_raw_cksum(buf, len, 0);
+ return __rte_raw_cksum_reduce(sum);
+}
+
+/**
+ * Process the IPv4 checksum of an IPv4 header.
+ *
+ * The checksum field must be set to 0 by the caller.
+ *
+ * @param ipv4_hdr
+ * The pointer to the contiguous IPv4 header.
+ * @return
+ * The complemented checksum to set in the IP packet.
+ */
+static inline uint16_t
+rte_ipv4_cksum(const struct ipv4_hdr *ipv4_hdr)
+{
+ uint16_t cksum;
+ cksum = rte_raw_cksum(ipv4_hdr, sizeof(struct ipv4_hdr));
+ return ((cksum == 0xffff) ? cksum : ~cksum);
+}
+
+/**
+ * Process the pseudo-header checksum of an IPv4 header.
+ *
+ * The checksum field must be set to 0 by the caller.
+ *
+ * Depending on the ol_flags, the pseudo-header checksum expected by the
+ * drivers is not the same. For instance, when TSO is enabled, the IP
+ * payload length must not be included in the packet.
+ *
+ * When ol_flags is 0, it computes the standard pseudo-header checksum.
+ *
+ * @param ipv4_hdr
+ * The pointer to the contiguous IPv4 header.
+ * @param ol_flags
+ * The ol_flags of the associated mbuf.
+ * @return
+ * The non-complemented checksum to set in the L4 header.
+ */
+static inline uint16_t
+rte_ipv4_phdr_cksum(const struct ipv4_hdr *ipv4_hdr, uint64_t ol_flags)
+{
+ struct ipv4_psd_header {
+ uint32_t src_addr; /* IP address of source host. */
+ uint32_t dst_addr; /* IP address of destination host. */
+ uint8_t zero; /* zero. */
+ uint8_t proto; /* L4 protocol type. */
+ uint16_t len; /* L4 length. */
+ } psd_hdr;
+
+ psd_hdr.src_addr = ipv4_hdr->src_addr;
+ psd_hdr.dst_addr = ipv4_hdr->dst_addr;
+ psd_hdr.zero = 0;
+ psd_hdr.proto = ipv4_hdr->next_proto_id;
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ psd_hdr.len = 0;
+ } else {
+ psd_hdr.len = rte_cpu_to_be_16(
+ (uint16_t)(rte_be_to_cpu_16(ipv4_hdr->total_length)
+ - sizeof(struct ipv4_hdr)));
+ }
+ return rte_raw_cksum(&psd_hdr, sizeof(psd_hdr));
+}
+
+/**
+ * Process the IPv4 UDP or TCP checksum.
+ *
+ * The IPv4 header should not contains options. The IP and layer 4
+ * checksum must be set to 0 in the packet by the caller.
+ *
+ * @param ipv4_hdr
+ * The pointer to the contiguous IPv4 header.
+ * @param l4_hdr
+ * The pointer to the beginning of the L4 header.
+ * @return
+ * The complemented checksum to set in the IP packet.
+ */
+static inline uint16_t
+rte_ipv4_udptcp_cksum(const struct ipv4_hdr *ipv4_hdr, const void *l4_hdr)
+{
+ uint32_t cksum;
+ uint32_t l4_len;
+
+ l4_len = rte_be_to_cpu_16(ipv4_hdr->total_length) -
+ sizeof(struct ipv4_hdr);
+
+ cksum = rte_raw_cksum(l4_hdr, l4_len);
+ cksum += rte_ipv4_phdr_cksum(ipv4_hdr, 0);
+
+ cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
+ cksum = (~cksum) & 0xffff;
+ if (cksum == 0)
+ cksum = 0xffff;
+
+ return cksum;
+}
+
+/**
+ * IPv6 Header
+ */
+struct ipv6_hdr {
+ uint32_t vtc_flow; /**< IP version, traffic class & flow label. */
+ uint16_t payload_len; /**< IP packet length - includes sizeof(ip_header). */
+ uint8_t proto; /**< Protocol, next header. */
+ uint8_t hop_limits; /**< Hop limits. */
+ uint8_t src_addr[16]; /**< IP address of source host. */
+ uint8_t dst_addr[16]; /**< IP address of destination host(s). */
+} __attribute__((__packed__));
+
+/**
+ * Process the pseudo-header checksum of an IPv6 header.
+ *
+ * Depending on the ol_flags, the pseudo-header checksum expected by the
+ * drivers is not the same. For instance, when TSO is enabled, the IPv6
+ * payload length must not be included in the packet.
+ *
+ * When ol_flags is 0, it computes the standard pseudo-header checksum.
+ *
+ * @param ipv6_hdr
+ * The pointer to the contiguous IPv6 header.
+ * @param ol_flags
+ * The ol_flags of the associated mbuf.
+ * @return
+ * The non-complemented checksum to set in the L4 header.
+ */
+static inline uint16_t
+rte_ipv6_phdr_cksum(const struct ipv6_hdr *ipv6_hdr, uint64_t ol_flags)
+{
+ uint32_t sum;
+ struct {
+ uint32_t len; /* L4 length. */
+ uint32_t proto; /* L4 protocol - top 3 bytes must be zero */
+ } psd_hdr;
+
+ psd_hdr.proto = (ipv6_hdr->proto << 24);
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ psd_hdr.len = 0;
+ } else {
+ psd_hdr.len = ipv6_hdr->payload_len;
+ }
+
+ sum = __rte_raw_cksum(ipv6_hdr->src_addr,
+ sizeof(ipv6_hdr->src_addr) + sizeof(ipv6_hdr->dst_addr),
+ 0);
+ sum = __rte_raw_cksum(&psd_hdr, sizeof(psd_hdr), sum);
+ return __rte_raw_cksum_reduce(sum);
+}
+
+/**
+ * Process the IPv6 UDP or TCP checksum.
+ *
+ * The IPv4 header should not contains options. The layer 4 checksum
+ * must be set to 0 in the packet by the caller.
+ *
+ * @param ipv6_hdr
+ * The pointer to the contiguous IPv6 header.
+ * @param l4_hdr
+ * The pointer to the beginning of the L4 header.
+ * @return
+ * The complemented checksum to set in the IP packet.
+ */
+static inline uint16_t
+rte_ipv6_udptcp_cksum(const struct ipv6_hdr *ipv6_hdr, const void *l4_hdr)
+{
+ uint32_t cksum;
+ uint32_t l4_len;
+
+ l4_len = rte_be_to_cpu_16(ipv6_hdr->payload_len);
+
+ cksum = rte_raw_cksum(l4_hdr, l4_len);
+ cksum += rte_ipv6_phdr_cksum(ipv6_hdr, 0);
+
+ cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
+ cksum = (~cksum) & 0xffff;
+ if (cksum == 0)
+ cksum = 0xffff;
+
+ return cksum;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_IP_H_ */
diff --git a/src/dpdk_lib18/librte_net/rte_sctp.h b/src/dpdk_lib18/librte_net/rte_sctp.h
new file mode 100755
index 00000000..688e126f
--- /dev/null
+++ b/src/dpdk_lib18/librte_net/rte_sctp.h
@@ -0,0 +1,99 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1982, 1986, 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in.h 8.3 (Berkeley) 1/3/94
+ * $FreeBSD: src/sys/netinet/in.h,v 1.82 2003/10/25 09:37:10 ume Exp $
+ */
+
+/**
+ * @file
+ *
+ * SCTP-related defines
+ */
+
+#ifndef _RTE_SCTP_H_
+#define _RTE_SCTP_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/**
+ * SCTP Header
+ */
+struct sctp_hdr {
+ uint16_t src_port; /**< Source port. */
+ uint16_t dst_port; /**< Destin port. */
+ uint32_t tag; /**< Validation tag. */
+ uint32_t cksum; /**< Checksum. */
+} __attribute__((__packed__));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_SCTP_H_ */
diff --git a/src/dpdk_lib18/librte_net/rte_tcp.h b/src/dpdk_lib18/librte_net/rte_tcp.h
new file mode 100755
index 00000000..28b61e6d
--- /dev/null
+++ b/src/dpdk_lib18/librte_net/rte_tcp.h
@@ -0,0 +1,104 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1982, 1986, 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in.h 8.3 (Berkeley) 1/3/94
+ * $FreeBSD: src/sys/netinet/in.h,v 1.82 2003/10/25 09:37:10 ume Exp $
+ */
+
+#ifndef _RTE_TCP_H_
+#define _RTE_TCP_H_
+
+/**
+ * @file
+ *
+ * TCP-related defines
+ */
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * TCP Header
+ */
+struct tcp_hdr {
+ uint16_t src_port; /**< TCP source port. */
+ uint16_t dst_port; /**< TCP destination port. */
+ uint32_t sent_seq; /**< TX data sequence number. */
+ uint32_t recv_ack; /**< RX data acknowledgement sequence number. */
+ uint8_t data_off; /**< Data offset. */
+ uint8_t tcp_flags; /**< TCP flags */
+ uint16_t rx_win; /**< RX flow control window. */
+ uint16_t cksum; /**< TCP checksum. */
+ uint16_t tcp_urp; /**< TCP urgent pointer, if any. */
+} __attribute__((__packed__));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_TCP_H_ */
diff --git a/src/dpdk_lib18/librte_net/rte_udp.h b/src/dpdk_lib18/librte_net/rte_udp.h
new file mode 100755
index 00000000..bc5be4af
--- /dev/null
+++ b/src/dpdk_lib18/librte_net/rte_udp.h
@@ -0,0 +1,99 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1982, 1986, 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in.h 8.3 (Berkeley) 1/3/94
+ * $FreeBSD: src/sys/netinet/in.h,v 1.82 2003/10/25 09:37:10 ume Exp $
+ */
+
+#ifndef _RTE_UDP_H_
+#define _RTE_UDP_H_
+
+/**
+ * @file
+ *
+ * UDP-related defines
+ */
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * UDP Header
+ */
+struct udp_hdr {
+ uint16_t src_port; /**< UDP source port. */
+ uint16_t dst_port; /**< UDP destination port. */
+ uint16_t dgram_len; /**< UDP datagram length */
+ uint16_t dgram_cksum; /**< UDP datagram checksum */
+} __attribute__((__packed__));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_UDP_H_ */
diff --git a/src/dpdk_lib18/librte_pipeline/Makefile b/src/dpdk_lib18/librte_pipeline/Makefile
new file mode 100755
index 00000000..cf8fde8a
--- /dev/null
+++ b/src/dpdk_lib18/librte_pipeline/Makefile
@@ -0,0 +1,54 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pipeline.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) := rte_pipeline.c
+
+# install includes
+SYMLINK-$(CONFIG_RTE_LIBRTE_PIPELINE)-include += rte_pipeline.h
+
+# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) := lib/librte_table
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) += lib/librte_port
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_pipeline/rte_pipeline.c b/src/dpdk_lib18/librte_pipeline/rte_pipeline.c
new file mode 100755
index 00000000..ac7e8873
--- /dev/null
+++ b/src/dpdk_lib18/librte_pipeline/rte_pipeline.c
@@ -0,0 +1,1373 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <stdio.h>
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_branch_prediction.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+
+#include "rte_pipeline.h"
+
+#define RTE_TABLE_INVALID UINT32_MAX
+
+struct rte_port_in {
+ /* Input parameters */
+ struct rte_port_in_ops ops;
+ rte_pipeline_port_in_action_handler f_action;
+ void *arg_ah;
+ uint32_t burst_size;
+
+ /* The table to which this port is connected */
+ uint32_t table_id;
+
+ /* Handle to low-level port */
+ void *h_port;
+
+ /* List of enabled ports */
+ struct rte_port_in *next;
+};
+
+struct rte_port_out {
+ /* Input parameters */
+ struct rte_port_out_ops ops;
+ rte_pipeline_port_out_action_handler f_action;
+ rte_pipeline_port_out_action_handler_bulk f_action_bulk;
+ void *arg_ah;
+
+ /* Handle to low-level port */
+ void *h_port;
+};
+
+struct rte_table {
+ /* Input parameters */
+ struct rte_table_ops ops;
+ rte_pipeline_table_action_handler_hit f_action_hit;
+ rte_pipeline_table_action_handler_miss f_action_miss;
+ void *arg_ah;
+ struct rte_pipeline_table_entry *default_entry;
+ uint32_t entry_size;
+
+ uint32_t table_next_id;
+ uint32_t table_next_id_valid;
+
+ /* Handle to the low-level table object */
+ void *h_table;
+};
+
+#define RTE_PIPELINE_MAX_NAME_SZ 124
+
+struct rte_pipeline {
+ /* Input parameters */
+ char name[RTE_PIPELINE_MAX_NAME_SZ];
+ int socket_id;
+ uint32_t offset_port_id;
+
+ /* Internal tables */
+ struct rte_port_in ports_in[RTE_PIPELINE_PORT_IN_MAX];
+ struct rte_port_out ports_out[RTE_PIPELINE_PORT_OUT_MAX];
+ struct rte_table tables[RTE_PIPELINE_TABLE_MAX];
+
+ /* Occupancy of internal tables */
+ uint32_t num_ports_in;
+ uint32_t num_ports_out;
+ uint32_t num_tables;
+
+ /* List of enabled ports */
+ uint64_t enabled_port_in_mask;
+ struct rte_port_in *port_in_first;
+
+ /* Pipeline run structures */
+ struct rte_mbuf *pkts[RTE_PORT_IN_BURST_SIZE_MAX];
+ struct rte_pipeline_table_entry *entries[RTE_PORT_IN_BURST_SIZE_MAX];
+ uint64_t action_mask0[RTE_PIPELINE_ACTIONS];
+ uint64_t action_mask1[RTE_PIPELINE_ACTIONS];
+} __rte_cache_aligned;
+
+static inline uint32_t
+rte_mask_get_next(uint64_t mask, uint32_t pos)
+{
+ uint64_t mask_rot = (mask << ((63 - pos) & 0x3F)) |
+ (mask >> ((pos + 1) & 0x3F));
+ return (__builtin_ctzll(mask_rot) - (63 - pos)) & 0x3F;
+}
+
+static inline uint32_t
+rte_mask_get_prev(uint64_t mask, uint32_t pos)
+{
+ uint64_t mask_rot = (mask >> (pos & 0x3F)) |
+ (mask << ((64 - pos) & 0x3F));
+ return ((63 - __builtin_clzll(mask_rot)) + pos) & 0x3F;
+}
+
+static void
+rte_pipeline_table_free(struct rte_table *table);
+
+static void
+rte_pipeline_port_in_free(struct rte_port_in *port);
+
+static void
+rte_pipeline_port_out_free(struct rte_port_out *port);
+
+/*
+ * Pipeline
+ *
+ */
+static int
+rte_pipeline_check_params(struct rte_pipeline_params *params)
+{
+ if (params == NULL) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: Incorrect value for parameter params\n", __func__);
+ return -EINVAL;
+ }
+
+ /* name */
+ if (params->name == NULL) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: Incorrect value for parameter name\n", __func__);
+ return -EINVAL;
+ }
+
+ /* socket */
+ if ((params->socket_id < 0) ||
+ (params->socket_id >= RTE_MAX_NUMA_NODES)) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: Incorrect value for parameter socket_id\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* offset_port_id */
+ if (params->offset_port_id & 0x3) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: Incorrect value for parameter offset_port_id\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct rte_pipeline *
+rte_pipeline_create(struct rte_pipeline_params *params)
+{
+ struct rte_pipeline *p;
+ int status;
+
+ /* Check input parameters */
+ status = rte_pipeline_check_params(params);
+ if (status != 0) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: Pipeline params check failed (%d)\n",
+ __func__, status);
+ return NULL;
+ }
+
+ /* Allocate memory for the pipeline on requested socket */
+ p = rte_zmalloc_socket("PIPELINE", sizeof(struct rte_pipeline),
+ RTE_CACHE_LINE_SIZE, params->socket_id);
+
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: Pipeline memory allocation failed\n", __func__);
+ return NULL;
+ }
+
+ /* Save input parameters */
+ snprintf(p->name, RTE_PIPELINE_MAX_NAME_SZ, "%s", params->name);
+ p->socket_id = params->socket_id;
+ p->offset_port_id = params->offset_port_id;
+
+ /* Initialize pipeline internal data structure */
+ p->num_ports_in = 0;
+ p->num_ports_out = 0;
+ p->num_tables = 0;
+ p->enabled_port_in_mask = 0;
+ p->port_in_first = NULL;
+
+ return p;
+}
+
+int
+rte_pipeline_free(struct rte_pipeline *p)
+{
+ uint32_t i;
+
+ /* Check input parameters */
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: rte_pipeline parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Free input ports */
+ for (i = 0; i < p->num_ports_in; i++) {
+ struct rte_port_in *port = &p->ports_in[i];
+
+ rte_pipeline_port_in_free(port);
+ }
+
+ /* Free tables */
+ for (i = 0; i < p->num_tables; i++) {
+ struct rte_table *table = &p->tables[i];
+
+ rte_pipeline_table_free(table);
+ }
+
+ /* Free output ports */
+ for (i = 0; i < p->num_ports_out; i++) {
+ struct rte_port_out *port = &p->ports_out[i];
+
+ rte_pipeline_port_out_free(port);
+ }
+
+ /* Free pipeline memory */
+ rte_free(p);
+
+ return 0;
+}
+
+/*
+ * Table
+ *
+ */
+static int
+rte_table_check_params(struct rte_pipeline *p,
+ struct rte_pipeline_table_params *params,
+ uint32_t *table_id)
+{
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (params == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: params parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (table_id == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: table_id parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* ops */
+ if (params->ops == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: params->ops is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (params->ops->f_create == NULL) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: f_create function pointer is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (params->ops->f_lookup == NULL) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: f_lookup function pointer is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ /* De we have room for one more table? */
+ if (p->num_tables == RTE_PIPELINE_TABLE_MAX) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: Incorrect value for num_tables parameter\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int
+rte_pipeline_table_create(struct rte_pipeline *p,
+ struct rte_pipeline_table_params *params,
+ uint32_t *table_id)
+{
+ struct rte_table *table;
+ struct rte_pipeline_table_entry *default_entry;
+ void *h_table;
+ uint32_t entry_size, id;
+ int status;
+
+ /* Check input arguments */
+ status = rte_table_check_params(p, params, table_id);
+ if (status != 0)
+ return status;
+
+ id = p->num_tables;
+ table = &p->tables[id];
+
+ /* Allocate space for the default table entry */
+ entry_size = sizeof(struct rte_pipeline_table_entry) +
+ params->action_data_size;
+ default_entry = (struct rte_pipeline_table_entry *) rte_zmalloc_socket(
+ "PIPELINE", entry_size, RTE_CACHE_LINE_SIZE, p->socket_id);
+ if (default_entry == NULL) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: Failed to allocate default entry\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Create the table */
+ h_table = params->ops->f_create(params->arg_create, p->socket_id,
+ entry_size);
+ if (h_table == NULL) {
+ rte_free(default_entry);
+ RTE_LOG(ERR, PIPELINE, "%s: Table creation failed\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Commit current table to the pipeline */
+ p->num_tables++;
+ *table_id = id;
+
+ /* Save input parameters */
+ memcpy(&table->ops, params->ops, sizeof(struct rte_table_ops));
+ table->f_action_hit = params->f_action_hit;
+ table->f_action_miss = params->f_action_miss;
+ table->arg_ah = params->arg_ah;
+ table->entry_size = entry_size;
+
+ /* Clear the lookup miss actions (to be set later through API) */
+ table->default_entry = default_entry;
+ table->default_entry->action = RTE_PIPELINE_ACTION_DROP;
+
+ /* Initialize table internal data structure */
+ table->h_table = h_table;
+ table->table_next_id = 0;
+ table->table_next_id_valid = 0;
+
+ return 0;
+}
+
+void
+rte_pipeline_table_free(struct rte_table *table)
+{
+ if (table->ops.f_free != NULL)
+ table->ops.f_free(table->h_table);
+
+ rte_free(table->default_entry);
+}
+
+int
+rte_pipeline_table_default_entry_add(struct rte_pipeline *p,
+ uint32_t table_id,
+ struct rte_pipeline_table_entry *default_entry,
+ struct rte_pipeline_table_entry **default_entry_ptr)
+{
+ struct rte_table *table;
+
+ /* Check input arguments */
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (default_entry == NULL) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: default_entry parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (table_id >= p->num_tables) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: table_id %d out of range\n", __func__, table_id);
+ return -EINVAL;
+ }
+
+ table = &p->tables[table_id];
+
+ if ((default_entry->action == RTE_PIPELINE_ACTION_TABLE) &&
+ table->table_next_id_valid &&
+ (default_entry->table_id != table->table_next_id)) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: Tree-like topologies not allowed\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Set the lookup miss actions */
+ if ((default_entry->action == RTE_PIPELINE_ACTION_TABLE) &&
+ (table->table_next_id_valid == 0)) {
+ table->table_next_id = default_entry->table_id;
+ table->table_next_id_valid = 1;
+ }
+
+ memcpy(table->default_entry, default_entry, table->entry_size);
+
+ *default_entry_ptr = table->default_entry;
+ return 0;
+}
+
+int
+rte_pipeline_table_default_entry_delete(struct rte_pipeline *p,
+ uint32_t table_id,
+ struct rte_pipeline_table_entry *entry)
+{
+ struct rte_table *table;
+
+ /* Check input arguments */
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: pipeline parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (table_id >= p->num_tables) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: table_id %d out of range\n", __func__, table_id);
+ return -EINVAL;
+ }
+
+ table = &p->tables[table_id];
+
+ /* Save the current contents of the default entry */
+ if (entry)
+ memcpy(entry, table->default_entry, table->entry_size);
+
+ /* Clear the lookup miss actions */
+ memset(table->default_entry, 0, table->entry_size);
+ table->default_entry->action = RTE_PIPELINE_ACTION_DROP;
+
+ return 0;
+}
+
+int
+rte_pipeline_table_entry_add(struct rte_pipeline *p,
+ uint32_t table_id,
+ void *key,
+ struct rte_pipeline_table_entry *entry,
+ int *key_found,
+ struct rte_pipeline_table_entry **entry_ptr)
+{
+ struct rte_table *table;
+
+ /* Check input arguments */
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (key == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: key parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (entry == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: entry parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (table_id >= p->num_tables) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: table_id %d out of range\n", __func__, table_id);
+ return -EINVAL;
+ }
+
+ table = &p->tables[table_id];
+
+ if (table->ops.f_add == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: f_add function pointer NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((entry->action == RTE_PIPELINE_ACTION_TABLE) &&
+ table->table_next_id_valid &&
+ (entry->table_id != table->table_next_id)) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: Tree-like topologies not allowed\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Add entry */
+ if ((entry->action == RTE_PIPELINE_ACTION_TABLE) &&
+ (table->table_next_id_valid == 0)) {
+ table->table_next_id = entry->table_id;
+ table->table_next_id_valid = 1;
+ }
+
+ return (table->ops.f_add)(table->h_table, key, (void *) entry,
+ key_found, (void **) entry_ptr);
+}
+
+int
+rte_pipeline_table_entry_delete(struct rte_pipeline *p,
+ uint32_t table_id,
+ void *key,
+ int *key_found,
+ struct rte_pipeline_table_entry *entry)
+{
+ struct rte_table *table;
+
+ /* Check input arguments */
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (key == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: key parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (table_id >= p->num_tables) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: table_id %d out of range\n", __func__, table_id);
+ return -EINVAL;
+ }
+
+ table = &p->tables[table_id];
+
+ if (table->ops.f_delete == NULL) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: f_delete function pointer NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ return (table->ops.f_delete)(table->h_table, key, key_found, entry);
+}
+
+/*
+ * Port
+ *
+ */
+static int
+rte_pipeline_port_in_check_params(struct rte_pipeline *p,
+ struct rte_pipeline_port_in_params *params,
+ uint32_t *port_id)
+{
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (params == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: params parameter NULL\n", __func__);
+ return -EINVAL;
+ }
+ if (port_id == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: port_id parameter NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* ops */
+ if (params->ops == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: params->ops parameter NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (params->ops->f_create == NULL) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: f_create function pointer NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (params->ops->f_rx == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: f_rx function pointer NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* burst_size */
+ if ((params->burst_size == 0) ||
+ (params->burst_size > RTE_PORT_IN_BURST_SIZE_MAX)) {
+ RTE_LOG(ERR, PIPELINE, "%s: invalid value for burst_size\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Do we have room for one more port? */
+ if (p->num_ports_in == RTE_PIPELINE_PORT_IN_MAX) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: invalid value for num_ports_in\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+rte_pipeline_port_out_check_params(struct rte_pipeline *p,
+ struct rte_pipeline_port_out_params *params,
+ uint32_t *port_id)
+{
+ rte_pipeline_port_out_action_handler f_ah;
+ rte_pipeline_port_out_action_handler_bulk f_ah_bulk;
+
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (params == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: params parameter NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (port_id == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: port_id parameter NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* ops */
+ if (params->ops == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: params->ops parameter NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (params->ops->f_create == NULL) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: f_create function pointer NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (params->ops->f_tx == NULL) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: f_tx function pointer NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (params->ops->f_tx_bulk == NULL) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: f_tx_bulk function pointer NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ f_ah = params->f_action;
+ f_ah_bulk = params->f_action_bulk;
+ if (((f_ah != NULL) && (f_ah_bulk == NULL)) ||
+ ((f_ah == NULL) && (f_ah_bulk != NULL))) {
+ RTE_LOG(ERR, PIPELINE, "%s: Action handlers have to be either"
+ "both enabled or both disabled\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Do we have room for one more port? */
+ if (p->num_ports_out == RTE_PIPELINE_PORT_OUT_MAX) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: invalid value for num_ports_out\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int
+rte_pipeline_port_in_create(struct rte_pipeline *p,
+ struct rte_pipeline_port_in_params *params,
+ uint32_t *port_id)
+{
+ struct rte_port_in *port;
+ void *h_port;
+ uint32_t id;
+ int status;
+
+ /* Check input arguments */
+ status = rte_pipeline_port_in_check_params(p, params, port_id);
+ if (status != 0)
+ return status;
+
+ id = p->num_ports_in;
+ port = &p->ports_in[id];
+
+ /* Create the port */
+ h_port = params->ops->f_create(params->arg_create, p->socket_id);
+ if (h_port == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: Port creation failed\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Commit current table to the pipeline */
+ p->num_ports_in++;
+ *port_id = id;
+
+ /* Save input parameters */
+ memcpy(&port->ops, params->ops, sizeof(struct rte_port_in_ops));
+ port->f_action = params->f_action;
+ port->arg_ah = params->arg_ah;
+ port->burst_size = params->burst_size;
+
+ /* Initialize port internal data structure */
+ port->table_id = RTE_TABLE_INVALID;
+ port->h_port = h_port;
+ port->next = NULL;
+
+ return 0;
+}
+
+void
+rte_pipeline_port_in_free(struct rte_port_in *port)
+{
+ if (port->ops.f_free != NULL)
+ port->ops.f_free(port->h_port);
+}
+
+int
+rte_pipeline_port_out_create(struct rte_pipeline *p,
+ struct rte_pipeline_port_out_params *params,
+ uint32_t *port_id)
+{
+ struct rte_port_out *port;
+ void *h_port;
+ uint32_t id;
+ int status;
+
+ /* Check input arguments */
+ status = rte_pipeline_port_out_check_params(p, params, port_id);
+ if (status != 0)
+ return status;
+
+ id = p->num_ports_out;
+ port = &p->ports_out[id];
+
+ /* Create the port */
+ h_port = params->ops->f_create(params->arg_create, p->socket_id);
+ if (h_port == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: Port creation failed\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Commit current table to the pipeline */
+ p->num_ports_out++;
+ *port_id = id;
+
+ /* Save input parameters */
+ memcpy(&port->ops, params->ops, sizeof(struct rte_port_out_ops));
+ port->f_action = params->f_action;
+ port->f_action_bulk = params->f_action_bulk;
+ port->arg_ah = params->arg_ah;
+
+ /* Initialize port internal data structure */
+ port->h_port = h_port;
+
+ return 0;
+}
+
+void
+rte_pipeline_port_out_free(struct rte_port_out *port)
+{
+ if (port->ops.f_free != NULL)
+ port->ops.f_free(port->h_port);
+}
+
+int
+rte_pipeline_port_in_connect_to_table(struct rte_pipeline *p,
+ uint32_t port_id,
+ uint32_t table_id)
+{
+ struct rte_port_in *port;
+
+ /* Check input arguments */
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (port_id >= p->num_ports_in) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: port IN ID %u is out of range\n",
+ __func__, port_id);
+ return -EINVAL;
+ }
+
+ if (table_id >= p->num_tables) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: Table ID %u is out of range\n",
+ __func__, table_id);
+ return -EINVAL;
+ }
+
+ port = &p->ports_in[port_id];
+ port->table_id = table_id;
+
+ return 0;
+}
+
+int
+rte_pipeline_port_in_enable(struct rte_pipeline *p, uint32_t port_id)
+{
+ struct rte_port_in *port, *port_prev, *port_next;
+ struct rte_port_in *port_first, *port_last;
+ uint64_t port_mask;
+ uint32_t port_prev_id, port_next_id, port_first_id, port_last_id;
+
+ /* Check input arguments */
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (port_id >= p->num_ports_in) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: port IN ID %u is out of range\n",
+ __func__, port_id);
+ return -EINVAL;
+ }
+
+ /* Return if current input port is already enabled */
+ port_mask = 1LLU << port_id;
+ if (p->enabled_port_in_mask & port_mask)
+ return 0;
+
+ p->enabled_port_in_mask |= port_mask;
+
+ /* Add current input port to the pipeline chain of enabled ports */
+ port_prev_id = rte_mask_get_prev(p->enabled_port_in_mask, port_id);
+ port_next_id = rte_mask_get_next(p->enabled_port_in_mask, port_id);
+
+ port_prev = &p->ports_in[port_prev_id];
+ port_next = &p->ports_in[port_next_id];
+ port = &p->ports_in[port_id];
+
+ port_prev->next = port;
+ port->next = port_next;
+
+ /* Update the first and last input ports in the chain */
+ port_first_id = __builtin_ctzll(p->enabled_port_in_mask);
+ port_last_id = 63 - __builtin_clzll(p->enabled_port_in_mask);
+
+ port_first = &p->ports_in[port_first_id];
+ port_last = &p->ports_in[port_last_id];
+
+ p->port_in_first = port_first;
+ port_last->next = NULL;
+
+ return 0;
+}
+
+int
+rte_pipeline_port_in_disable(struct rte_pipeline *p, uint32_t port_id)
+{
+ struct rte_port_in *port_prev, *port_next, *port_first, *port_last;
+ uint64_t port_mask;
+ uint32_t port_prev_id, port_next_id, port_first_id, port_last_id;
+
+ /* Check input arguments */
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (port_id >= p->num_ports_in) {
+ RTE_LOG(ERR, PIPELINE, "%s: port IN ID %u is out of range\n",
+ __func__, port_id);
+ return -EINVAL;
+ }
+
+ /* Return if current input port is already disabled */
+ port_mask = 1LLU << port_id;
+ if ((p->enabled_port_in_mask & port_mask) == 0)
+ return 0;
+
+ /* Return if no other enabled ports */
+ if (__builtin_popcountll(p->enabled_port_in_mask) == 1) {
+ p->enabled_port_in_mask &= ~port_mask;
+ p->port_in_first = NULL;
+
+ return 0;
+ }
+
+ /* Add current input port to the pipeline chain of enabled ports */
+ port_prev_id = rte_mask_get_prev(p->enabled_port_in_mask, port_id);
+ port_next_id = rte_mask_get_next(p->enabled_port_in_mask, port_id);
+
+ port_prev = &p->ports_in[port_prev_id];
+ port_next = &p->ports_in[port_next_id];
+
+ port_prev->next = port_next;
+ p->enabled_port_in_mask &= ~port_mask;
+
+ /* Update the first and last input ports in the chain */
+ port_first_id = __builtin_ctzll(p->enabled_port_in_mask);
+ port_last_id = 63 - __builtin_clzll(p->enabled_port_in_mask);
+
+ port_first = &p->ports_in[port_first_id];
+ port_last = &p->ports_in[port_last_id];
+
+ p->port_in_first = port_first;
+ port_last->next = NULL;
+
+ return 0;
+}
+
+/*
+ * Pipeline run-time
+ *
+ */
+int
+rte_pipeline_check(struct rte_pipeline *p)
+{
+ uint32_t port_in_id;
+
+ /* Check input arguments */
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Check that pipeline has at least one input port, one table and one
+ output port */
+ if (p->num_ports_in == 0) {
+ RTE_LOG(ERR, PIPELINE, "%s: must have at least 1 input port\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (p->num_tables == 0) {
+ RTE_LOG(ERR, PIPELINE, "%s: must have at least 1 table\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (p->num_ports_out == 0) {
+ RTE_LOG(ERR, PIPELINE, "%s: must have at least 1 output port\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Check that all input ports are connected */
+ for (port_in_id = 0; port_in_id < p->num_ports_in; port_in_id++) {
+ struct rte_port_in *port_in = &p->ports_in[port_in_id];
+
+ if (port_in->table_id == RTE_TABLE_INVALID) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: Port IN ID %u is not connected\n",
+ __func__, port_in_id);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static inline void
+rte_pipeline_compute_masks(struct rte_pipeline *p, uint64_t pkts_mask)
+{
+ p->action_mask1[RTE_PIPELINE_ACTION_DROP] = 0;
+ p->action_mask1[RTE_PIPELINE_ACTION_PORT] = 0;
+ p->action_mask1[RTE_PIPELINE_ACTION_TABLE] = 0;
+
+ if ((pkts_mask & (pkts_mask + 1)) == 0) {
+ uint64_t n_pkts = __builtin_popcountll(pkts_mask);
+ uint32_t i;
+
+ for (i = 0; i < n_pkts; i++) {
+ uint64_t pkt_mask = 1LLU << i;
+ uint32_t pos = p->entries[i]->action;
+
+ p->action_mask1[pos] |= pkt_mask;
+ }
+ } else {
+ uint32_t i;
+
+ for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++) {
+ uint64_t pkt_mask = 1LLU << i;
+ uint32_t pos;
+
+ if ((pkt_mask & pkts_mask) == 0)
+ continue;
+
+ pos = p->entries[i]->action;
+ p->action_mask1[pos] |= pkt_mask;
+ }
+ }
+}
+
+static inline void
+rte_pipeline_action_handler_port_bulk(struct rte_pipeline *p,
+ uint64_t pkts_mask, uint32_t port_id)
+{
+ struct rte_port_out *port_out = &p->ports_out[port_id];
+
+ /* Output port user actions */
+ if (port_out->f_action_bulk != NULL) {
+ uint64_t mask = pkts_mask;
+
+ port_out->f_action_bulk(p->pkts, &pkts_mask, port_out->arg_ah);
+ p->action_mask0[RTE_PIPELINE_ACTION_DROP] |= pkts_mask ^ mask;
+ }
+
+ /* Output port TX */
+ if (pkts_mask != 0)
+ port_out->ops.f_tx_bulk(port_out->h_port, p->pkts, pkts_mask);
+}
+
+static inline void
+rte_pipeline_action_handler_port(struct rte_pipeline *p, uint64_t pkts_mask)
+{
+ if ((pkts_mask & (pkts_mask + 1)) == 0) {
+ uint64_t n_pkts = __builtin_popcountll(pkts_mask);
+ uint32_t i;
+
+ for (i = 0; i < n_pkts; i++) {
+ struct rte_mbuf *pkt = p->pkts[i];
+ uint32_t port_out_id = p->entries[i]->port_id;
+ struct rte_port_out *port_out =
+ &p->ports_out[port_out_id];
+
+ /* Output port user actions */
+ if (port_out->f_action == NULL) /* Output port TX */
+ port_out->ops.f_tx(port_out->h_port, pkt);
+ else {
+ uint64_t pkt_mask = 1LLU;
+
+ port_out->f_action(pkt, &pkt_mask,
+ port_out->arg_ah);
+ p->action_mask0[RTE_PIPELINE_ACTION_DROP] |=
+ (pkt_mask ^ 1LLU) << i;
+
+ /* Output port TX */
+ if (pkt_mask != 0)
+ port_out->ops.f_tx(port_out->h_port,
+ pkt);
+ }
+ }
+ } else {
+ uint32_t i;
+
+ for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++) {
+ uint64_t pkt_mask = 1LLU << i;
+ struct rte_mbuf *pkt;
+ struct rte_port_out *port_out;
+ uint32_t port_out_id;
+
+ if ((pkt_mask & pkts_mask) == 0)
+ continue;
+
+ pkt = p->pkts[i];
+ port_out_id = p->entries[i]->port_id;
+ port_out = &p->ports_out[port_out_id];
+
+ /* Output port user actions */
+ if (port_out->f_action == NULL) /* Output port TX */
+ port_out->ops.f_tx(port_out->h_port, pkt);
+ else {
+ pkt_mask = 1LLU;
+
+ port_out->f_action(pkt, &pkt_mask,
+ port_out->arg_ah);
+ p->action_mask0[RTE_PIPELINE_ACTION_DROP] |=
+ (pkt_mask ^ 1LLU) << i;
+
+ /* Output port TX */
+ if (pkt_mask != 0)
+ port_out->ops.f_tx(port_out->h_port,
+ pkt);
+ }
+ }
+ }
+}
+
+static inline void
+rte_pipeline_action_handler_port_meta(struct rte_pipeline *p,
+ uint64_t pkts_mask)
+{
+ if ((pkts_mask & (pkts_mask + 1)) == 0) {
+ uint64_t n_pkts = __builtin_popcountll(pkts_mask);
+ uint32_t i;
+
+ for (i = 0; i < n_pkts; i++) {
+ struct rte_mbuf *pkt = p->pkts[i];
+ uint32_t port_out_id =
+ RTE_MBUF_METADATA_UINT32(pkt,
+ p->offset_port_id);
+ struct rte_port_out *port_out = &p->ports_out[
+ port_out_id];
+
+ /* Output port user actions */
+ if (port_out->f_action == NULL) /* Output port TX */
+ port_out->ops.f_tx(port_out->h_port, pkt);
+ else {
+ uint64_t pkt_mask = 1LLU;
+
+ port_out->f_action(pkt, &pkt_mask,
+ port_out->arg_ah);
+ p->action_mask0[RTE_PIPELINE_ACTION_DROP] |=
+ (pkt_mask ^ 1LLU) << i;
+
+ /* Output port TX */
+ if (pkt_mask != 0)
+ port_out->ops.f_tx(port_out->h_port,
+ pkt);
+ }
+ }
+ } else {
+ uint32_t i;
+
+ for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++) {
+ uint64_t pkt_mask = 1LLU << i;
+ struct rte_mbuf *pkt;
+ struct rte_port_out *port_out;
+ uint32_t port_out_id;
+
+ if ((pkt_mask & pkts_mask) == 0)
+ continue;
+
+ pkt = p->pkts[i];
+ port_out_id = RTE_MBUF_METADATA_UINT32(pkt,
+ p->offset_port_id);
+ port_out = &p->ports_out[port_out_id];
+
+ /* Output port user actions */
+ if (port_out->f_action == NULL) /* Output port TX */
+ port_out->ops.f_tx(port_out->h_port, pkt);
+ else {
+ pkt_mask = 1LLU;
+
+ port_out->f_action(pkt, &pkt_mask,
+ port_out->arg_ah);
+ p->action_mask0[RTE_PIPELINE_ACTION_DROP] |=
+ (pkt_mask ^ 1LLU) << i;
+
+ /* Output port TX */
+ if (pkt_mask != 0)
+ port_out->ops.f_tx(port_out->h_port,
+ pkt);
+ }
+ }
+ }
+}
+
+static inline void
+rte_pipeline_action_handler_drop(struct rte_pipeline *p, uint64_t pkts_mask)
+{
+ if ((pkts_mask & (pkts_mask + 1)) == 0) {
+ uint64_t n_pkts = __builtin_popcountll(pkts_mask);
+ uint32_t i;
+
+ for (i = 0; i < n_pkts; i++)
+ rte_pktmbuf_free(p->pkts[i]);
+ } else {
+ uint32_t i;
+
+ for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++) {
+ uint64_t pkt_mask = 1LLU << i;
+
+ if ((pkt_mask & pkts_mask) == 0)
+ continue;
+
+ rte_pktmbuf_free(p->pkts[i]);
+ }
+ }
+}
+
+int
+rte_pipeline_run(struct rte_pipeline *p)
+{
+ struct rte_port_in *port_in;
+
+ for (port_in = p->port_in_first; port_in != NULL;
+ port_in = port_in->next) {
+ uint64_t pkts_mask;
+ uint32_t n_pkts, table_id;
+
+ /* Input port RX */
+ n_pkts = port_in->ops.f_rx(port_in->h_port, p->pkts,
+ port_in->burst_size);
+ if (n_pkts == 0)
+ continue;
+
+ pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t);
+ p->action_mask0[RTE_PIPELINE_ACTION_DROP] = 0;
+ p->action_mask0[RTE_PIPELINE_ACTION_PORT] = 0;
+ p->action_mask0[RTE_PIPELINE_ACTION_TABLE] = 0;
+
+ /* Input port user actions */
+ if (port_in->f_action != NULL) {
+ uint64_t mask = pkts_mask;
+
+ port_in->f_action(p->pkts, n_pkts, &pkts_mask,
+ port_in->arg_ah);
+ p->action_mask0[RTE_PIPELINE_ACTION_DROP] |=
+ pkts_mask ^ mask;
+ }
+
+ /* Table */
+ for (table_id = port_in->table_id; pkts_mask != 0; ) {
+ struct rte_table *table;
+ uint64_t lookup_hit_mask, lookup_miss_mask;
+
+ /* Lookup */
+ table = &p->tables[table_id];
+ table->ops.f_lookup(table->h_table, p->pkts, pkts_mask,
+ &lookup_hit_mask, (void **) p->entries);
+ lookup_miss_mask = pkts_mask & (~lookup_hit_mask);
+
+ /* Lookup miss */
+ if (lookup_miss_mask != 0) {
+ struct rte_pipeline_table_entry *default_entry =
+ table->default_entry;
+
+ /* Table user actions */
+ if (table->f_action_miss != NULL) {
+ uint64_t mask = lookup_miss_mask;
+
+ table->f_action_miss(p->pkts,
+ &lookup_miss_mask,
+ default_entry, table->arg_ah);
+ p->action_mask0[
+ RTE_PIPELINE_ACTION_DROP] |=
+ lookup_miss_mask ^ mask;
+ }
+
+ /* Table reserved actions */
+ if ((default_entry->action ==
+ RTE_PIPELINE_ACTION_PORT) &&
+ (lookup_miss_mask != 0))
+ rte_pipeline_action_handler_port_bulk(p,
+ lookup_miss_mask,
+ default_entry->port_id);
+ else {
+ uint32_t pos = default_entry->action;
+
+ p->action_mask0[pos] = lookup_miss_mask;
+ }
+ }
+
+ /* Lookup hit */
+ if (lookup_hit_mask != 0) {
+ /* Table user actions */
+ if (table->f_action_hit != NULL) {
+ uint64_t mask = lookup_hit_mask;
+
+ table->f_action_hit(p->pkts,
+ &lookup_hit_mask,
+ p->entries, table->arg_ah);
+ p->action_mask0[
+ RTE_PIPELINE_ACTION_DROP] |=
+ lookup_hit_mask ^ mask;
+ }
+
+ /* Table reserved actions */
+ rte_pipeline_compute_masks(p, lookup_hit_mask);
+ p->action_mask0[RTE_PIPELINE_ACTION_DROP] |=
+ p->action_mask1[
+ RTE_PIPELINE_ACTION_DROP];
+ p->action_mask0[RTE_PIPELINE_ACTION_PORT] |=
+ p->action_mask1[
+ RTE_PIPELINE_ACTION_PORT];
+ p->action_mask0[RTE_PIPELINE_ACTION_TABLE] |=
+ p->action_mask1[
+ RTE_PIPELINE_ACTION_TABLE];
+ }
+
+ /* Prepare for next iteration */
+ pkts_mask = p->action_mask0[RTE_PIPELINE_ACTION_TABLE];
+ table_id = table->table_next_id;
+ p->action_mask0[RTE_PIPELINE_ACTION_TABLE] = 0;
+ }
+
+ /* Table reserved action PORT */
+ rte_pipeline_action_handler_port(p,
+ p->action_mask0[RTE_PIPELINE_ACTION_PORT]);
+
+ /* Table reserved action PORT META */
+ rte_pipeline_action_handler_port_meta(p,
+ p->action_mask0[RTE_PIPELINE_ACTION_PORT_META]);
+
+ /* Table reserved action DROP */
+ rte_pipeline_action_handler_drop(p,
+ p->action_mask0[RTE_PIPELINE_ACTION_DROP]);
+ }
+
+ return 0;
+}
+
+int
+rte_pipeline_flush(struct rte_pipeline *p)
+{
+ uint32_t port_id;
+
+ /* Check input arguments */
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ for (port_id = 0; port_id < p->num_ports_out; port_id++) {
+ struct rte_port_out *port = &p->ports_out[port_id];
+
+ if (port->ops.f_flush != NULL)
+ port->ops.f_flush(port->h_port);
+ }
+
+ return 0;
+}
+
+int
+rte_pipeline_port_out_packet_insert(struct rte_pipeline *p,
+ uint32_t port_id, struct rte_mbuf *pkt)
+{
+ struct rte_port_out *port_out = &p->ports_out[port_id];
+
+ /* Output port user actions */
+ if (port_out->f_action == NULL)
+ port_out->ops.f_tx(port_out->h_port, pkt); /* Output port TX */
+ else {
+ uint64_t pkt_mask = 1LLU;
+
+ port_out->f_action(pkt, &pkt_mask, port_out->arg_ah);
+
+ if (pkt_mask != 0) /* Output port TX */
+ port_out->ops.f_tx(port_out->h_port, pkt);
+ else
+ rte_pktmbuf_free(pkt);
+ }
+
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_pipeline/rte_pipeline.h b/src/dpdk_lib18/librte_pipeline/rte_pipeline.h
new file mode 100755
index 00000000..fb1014a0
--- /dev/null
+++ b/src/dpdk_lib18/librte_pipeline/rte_pipeline.h
@@ -0,0 +1,664 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_PIPELINE_H__
+#define __INCLUDE_RTE_PIPELINE_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file
+ * RTE Pipeline
+ *
+ * This tool is part of the Intel DPDK Packet Framework tool suite and provides
+ * a standard methodology (logically similar to OpenFlow) for rapid development
+ * of complex packet processing pipelines out of ports, tables and actions.
+ *
+ * <B>Basic operation.</B> A pipeline is constructed by connecting its input
+ * ports to its output ports through a chain of lookup tables. As result of
+ * lookup operation into the current table, one of the table entries (or the
+ * default table entry, in case of lookup miss) is identified to provide the
+ * actions to be executed on the current packet and the associated action
+ * meta-data. The behavior of user actions is defined through the configurable
+ * table action handler, while the reserved actions define the next hop for the
+ * current packet (either another table, an output port or packet drop) and are
+ * handled transparently by the framework.
+ *
+ * <B>Initialization and run-time flows.</B> Once all the pipeline elements
+ * (input ports, tables, output ports) have been created, input ports connected
+ * to tables, table action handlers configured, tables populated with the
+ * initial set of entries (actions and action meta-data) and input ports
+ * enabled, the pipeline runs automatically, pushing packets from input ports
+ * to tables and output ports. At each table, the identified user actions are
+ * being executed, resulting in action meta-data (stored in the table entry)
+ * and packet meta-data (stored with the packet descriptor) being updated. The
+ * pipeline tables can have further updates and input ports can be disabled or
+ * enabled later on as required.
+ *
+ * <B>Multi-core scaling.</B> Typically, each CPU core will run its own
+ * pipeline instance. Complex application-level pipelines can be implemented by
+ * interconnecting multiple CPU core-level pipelines in tree-like topologies,
+ * as the same port devices (e.g. SW rings) can serve as output ports for the
+ * pipeline running on CPU core A, as well as input ports for the pipeline
+ * running on CPU core B. This approach enables the application development
+ * using the pipeline (CPU cores connected serially), cluster/run-to-completion
+ * (CPU cores connected in parallel) or mixed (pipeline of CPU core clusters)
+ * programming models.
+ *
+ * <B>Thread safety.</B> It is possible to have multiple pipelines running on
+ * the same CPU core, but it is not allowed (for thread safety reasons) to have
+ * multiple CPU cores running the same pipeline instance.
+ *
+ ***/
+
+#include <stdint.h>
+
+#include <rte_mbuf.h>
+#include <rte_port.h>
+#include <rte_table.h>
+
+/*
+ * Pipeline
+ *
+ */
+/** Opaque data type for pipeline */
+struct rte_pipeline;
+
+/** Parameters for pipeline creation */
+struct rte_pipeline_params {
+ /** Pipeline name */
+ const char *name;
+
+ /** CPU socket ID where memory for the pipeline and its elements (ports
+ and tables) should be allocated */
+ int socket_id;
+
+ /** Offset within packet meta-data to port_id to be used by action
+ "Send packet to output port read from packet meta-data". Has to be
+ 4-byte aligned. */
+ uint32_t offset_port_id;
+};
+
+/**
+ * Pipeline create
+ *
+ * @param params
+ * Parameters for pipeline creation
+ * @return
+ * Handle to pipeline instance on success or NULL otherwise
+ */
+struct rte_pipeline *rte_pipeline_create(struct rte_pipeline_params *params);
+
+/**
+ * Pipeline free
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_free(struct rte_pipeline *p);
+
+/**
+ * Pipeline consistency check
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_check(struct rte_pipeline *p);
+
+/**
+ * Pipeline run
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_run(struct rte_pipeline *p);
+
+/**
+ * Pipeline flush
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_flush(struct rte_pipeline *p);
+
+/*
+ * Actions
+ *
+ */
+/** Reserved actions */
+enum rte_pipeline_action {
+ /** Drop the packet */
+ RTE_PIPELINE_ACTION_DROP = 0,
+
+ /** Send packet to output port */
+ RTE_PIPELINE_ACTION_PORT,
+
+ /** Send packet to output port read from packet meta-data */
+ RTE_PIPELINE_ACTION_PORT_META,
+
+ /** Send packet to table */
+ RTE_PIPELINE_ACTION_TABLE,
+
+ /** Number of reserved actions */
+ RTE_PIPELINE_ACTIONS
+};
+
+/*
+ * Table
+ *
+ */
+/** Maximum number of tables allowed for any given pipeline instance. The
+ value of this parameter cannot be changed. */
+#define RTE_PIPELINE_TABLE_MAX 64
+
+/**
+ * Head format for the table entry of any pipeline table. For any given
+ * pipeline table, all table entries should have the same size and format. For
+ * any given pipeline table, the table entry has to start with a head of this
+ * structure, which contains the reserved actions and their associated
+ * meta-data, and then optionally continues with user actions and their
+ * associated meta-data. As all the currently defined reserved actions are
+ * mutually exclusive, only one reserved action can be set per table entry.
+ */
+struct rte_pipeline_table_entry {
+ /** Reserved action */
+ enum rte_pipeline_action action;
+
+ union {
+ /** Output port ID (meta-data for "Send packet to output port"
+ action) */
+ uint32_t port_id;
+ /** Table ID (meta-data for "Send packet to table" action) */
+ uint32_t table_id;
+ };
+ /** Start of table entry area for user defined actions and meta-data */
+ uint8_t action_data[0];
+};
+
+/**
+ * Pipeline table action handler on lookup hit
+ *
+ * The action handler can decide to drop packets by resetting the associated
+ * packet bit in the pkts_mask parameter. In this case, the action handler is
+ * required not to free the packet buffer, which will be freed eventually by
+ * the pipeline.
+ *
+ * @param pkts
+ * Burst of input packets specified as array of up to 64 pointers to struct
+ * rte_mbuf
+ * @param pkts_mask
+ * 64-bit bitmask specifying which packets in the input burst are valid. When
+ * pkts_mask bit n is set, then element n of pkts array is pointing to a
+ * valid packet and element n of entries array is pointing to a valid table
+ * entry associated with the packet, with the association typically done by
+ * the table lookup operation. Otherwise, element n of pkts array and element
+ * n of entries array will not be accessed.
+ * @param entries
+ * Set of table entries specified as array of up to 64 pointers to struct
+ * rte_pipeline_table_entry
+ * @param arg
+ * Opaque parameter registered by the user at the pipeline table creation
+ * time
+ * @return
+ * 0 on success, error code otherwise
+ */
+typedef int (*rte_pipeline_table_action_handler_hit)(
+ struct rte_mbuf **pkts,
+ uint64_t *pkts_mask,
+ struct rte_pipeline_table_entry **entries,
+ void *arg);
+
+/**
+ * Pipeline table action handler on lookup miss
+ *
+ * The action handler can decide to drop packets by resetting the associated
+ * packet bit in the pkts_mask parameter. In this case, the action handler is
+ * required not to free the packet buffer, which will be freed eventually by
+ * the pipeline.
+ *
+ * @param pkts
+ * Burst of input packets specified as array of up to 64 pointers to struct
+ * rte_mbuf
+ * @param pkts_mask
+ * 64-bit bitmask specifying which packets in the input burst are valid. When
+ * pkts_mask bit n is set, then element n of pkts array is pointing to a
+ * valid packet. Otherwise, element n of pkts array will not be accessed.
+ * @param entry
+ * Single table entry associated with all the valid packets from the input
+ * burst, specified as pointer to struct rte_pipeline_table_entry.
+ * This entry is the pipeline table default entry that is associated by the
+ * table lookup operation with the input packets that have resulted in lookup
+ * miss.
+ * @param arg
+ * Opaque parameter registered by the user at the pipeline table creation
+ * time
+ * @return
+ * 0 on success, error code otherwise
+ */
+typedef int (*rte_pipeline_table_action_handler_miss)(
+ struct rte_mbuf **pkts,
+ uint64_t *pkts_mask,
+ struct rte_pipeline_table_entry *entry,
+ void *arg);
+
+/** Parameters for pipeline table creation. Action handlers have to be either
+ both enabled or both disabled (they can be disabled by setting them to
+ NULL). */
+struct rte_pipeline_table_params {
+ /** Table operations (specific to each table type) */
+ struct rte_table_ops *ops;
+ /** Opaque param to be passed to the table create operation when
+ invoked */
+ void *arg_create;
+ /** Callback function to execute the user actions on input packets in
+ case of lookup hit */
+ rte_pipeline_table_action_handler_hit f_action_hit;
+ /** Callback function to execute the user actions on input packets in
+ case of lookup miss */
+ rte_pipeline_table_action_handler_miss f_action_miss;
+
+ /** Opaque parameter to be passed to lookup hit and/or lookup miss
+ action handlers when invoked */
+ void *arg_ah;
+ /** Memory size to be reserved per table entry for storing the user
+ actions and their meta-data */
+ uint32_t action_data_size;
+};
+
+/**
+ * Pipeline table create
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param params
+ * Parameters for pipeline table creation
+ * @param table_id
+ * Table ID. Valid only within the scope of table IDs of the current
+ * pipeline. Only returned after a successful invocation.
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_table_create(struct rte_pipeline *p,
+ struct rte_pipeline_table_params *params,
+ uint32_t *table_id);
+
+/**
+ * Pipeline table default entry add
+ *
+ * The contents of the table default entry is updated with the provided actions
+ * and meta-data. When the default entry is not configured (by using this
+ * function), the built-in default entry has the action "Drop" and meta-data
+ * set to all-zeros.
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param table_id
+ * Table ID (returned by previous invocation of pipeline table create)
+ * @param default_entry
+ * New contents for the table default entry
+ * @param default_entry_ptr
+ * On successful invocation, pointer to the default table entry which can be
+ * used for further read-write accesses to this table entry. This pointer
+ * is valid until the default entry is deleted or re-added.
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_table_default_entry_add(struct rte_pipeline *p,
+ uint32_t table_id,
+ struct rte_pipeline_table_entry *default_entry,
+ struct rte_pipeline_table_entry **default_entry_ptr);
+
+/**
+ * Pipeline table default entry delete
+ *
+ * The new contents of the table default entry is set to reserved action "Drop
+ * the packet" with meta-data cleared (i.e. set to all-zeros).
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param table_id
+ * Table ID (returned by previous invocation of pipeline table create)
+ * @param entry
+ * On successful invocation, when entry points to a valid buffer, the
+ * previous contents of the table default entry (as it was just before the
+ * delete operation) is copied to this buffer
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_table_default_entry_delete(struct rte_pipeline *p,
+ uint32_t table_id,
+ struct rte_pipeline_table_entry *entry);
+
+/**
+ * Pipeline table entry add
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param table_id
+ * Table ID (returned by previous invocation of pipeline table create)
+ * @param key
+ * Table entry key
+ * @param entry
+ * New contents for the table entry identified by key
+ * @param key_found
+ * On successful invocation, set to TRUE (value different than 0) if key was
+ * already present in the table before the add operation and to FALSE (value
+ * 0) if not
+ * @param entry_ptr
+ * On successful invocation, pointer to the table entry associated with key.
+ * This can be used for further read-write accesses to this table entry and
+ * is valid until the key is deleted from the table or re-added (usually for
+ * associating different actions and/or action meta-data to the current key)
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_table_entry_add(struct rte_pipeline *p,
+ uint32_t table_id,
+ void *key,
+ struct rte_pipeline_table_entry *entry,
+ int *key_found,
+ struct rte_pipeline_table_entry **entry_ptr);
+
+/**
+ * Pipeline table entry delete
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param table_id
+ * Table ID (returned by previous invocation of pipeline table create)
+ * @param key
+ * Table entry key
+ * @param key_found
+ * On successful invocation, set to TRUE (value different than 0) if key was
+ * found in the table before the delete operation and to FALSE (value 0) if
+ * not
+ * @param entry
+ * On successful invocation, when key is found in the table and entry points
+ * to a valid buffer, the table entry contents (as it was before the delete
+ * was performed) is copied to this buffer
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_table_entry_delete(struct rte_pipeline *p,
+ uint32_t table_id,
+ void *key,
+ int *key_found,
+ struct rte_pipeline_table_entry *entry);
+
+/*
+ * Port IN
+ *
+ */
+/** Maximum number of input ports allowed for any given pipeline instance. The
+ value of this parameter cannot be changed. */
+#define RTE_PIPELINE_PORT_IN_MAX 64
+
+/**
+ * Pipeline input port action handler
+ *
+ * The action handler can decide to drop packets by resetting the associated
+ * packet bit in the pkts_mask parameter. In this case, the action handler is
+ * required not to free the packet buffer, which will be freed eventually by
+ * the pipeline.
+ *
+ * @param pkts
+ * Burst of input packets specified as array of up to 64 pointers to struct
+ * rte_mbuf
+ * @param n
+ * Number of packets in the input burst. This parameter specifies that
+ * elements 0 to (n-1) of pkts array are valid.
+ * @param pkts_mask
+ * 64-bit bitmask specifying which packets in the input burst are still valid
+ * after the action handler is executed. When pkts_mask bit n is set, then
+ * element n of pkts array is pointing to a valid packet.
+ * @param arg
+ * Opaque parameter registered by the user at the pipeline table creation
+ * time
+ * @return
+ * 0 on success, error code otherwise
+ */
+typedef int (*rte_pipeline_port_in_action_handler)(
+ struct rte_mbuf **pkts,
+ uint32_t n,
+ uint64_t *pkts_mask,
+ void *arg);
+
+/** Parameters for pipeline input port creation */
+struct rte_pipeline_port_in_params {
+ /** Input port operations (specific to each table type) */
+ struct rte_port_in_ops *ops;
+ /** Opaque parameter to be passed to create operation when invoked */
+ void *arg_create;
+
+ /** Callback function to execute the user actions on input packets.
+ Disabled if set to NULL. */
+ rte_pipeline_port_in_action_handler f_action;
+ /** Opaque parameter to be passed to the action handler when invoked */
+ void *arg_ah;
+
+ /** Recommended burst size for the RX operation(in number of pkts) */
+ uint32_t burst_size;
+};
+
+/**
+ * Pipeline input port create
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param params
+ * Parameters for pipeline input port creation
+ * @param port_id
+ * Input port ID. Valid only within the scope of input port IDs of the
+ * current pipeline. Only returned after a successful invocation.
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_port_in_create(struct rte_pipeline *p,
+ struct rte_pipeline_port_in_params *params,
+ uint32_t *port_id);
+
+/**
+ * Pipeline input port connect to table
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param port_id
+ * Port ID (returned by previous invocation of pipeline input port create)
+ * @param table_id
+ * Table ID (returned by previous invocation of pipeline table create)
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_port_in_connect_to_table(struct rte_pipeline *p,
+ uint32_t port_id,
+ uint32_t table_id);
+
+/**
+ * Pipeline input port enable
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param port_id
+ * Port ID (returned by previous invocation of pipeline input port create)
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_port_in_enable(struct rte_pipeline *p,
+ uint32_t port_id);
+
+/**
+ * Pipeline input port disable
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param port_id
+ * Port ID (returned by previous invocation of pipeline input port create)
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_port_in_disable(struct rte_pipeline *p,
+ uint32_t port_id);
+
+/*
+ * Port OUT
+ *
+ */
+/** Maximum number of output ports allowed for any given pipeline instance. The
+ value of this parameter cannot be changed. */
+#define RTE_PIPELINE_PORT_OUT_MAX 64
+
+/**
+ * Pipeline output port action handler for single packet
+ *
+ * The action handler can decide to drop packets by resetting the pkt_mask
+ * argument. In this case, the action handler is required not to free the
+ * packet buffer, which will be freed eventually by the pipeline.
+ *
+ * @param pkt
+ * Input packet
+ * @param pkt_mask
+ * Output argument set to 0 when the action handler decides to drop the input
+ * packet and to 1LLU otherwise
+ * @param arg
+ * Opaque parameter registered by the user at the pipeline table creation
+ * time
+ * @return
+ * 0 on success, error code otherwise
+ */
+typedef int (*rte_pipeline_port_out_action_handler)(
+ struct rte_mbuf *pkt,
+ uint64_t *pkt_mask,
+ void *arg);
+
+/**
+ * Pipeline output port action handler bulk
+ *
+ * The action handler can decide to drop packets by resetting the associated
+ * packet bit in the pkts_mask parameter. In this case, the action handler is
+ * required not to free the packet buffer, which will be freed eventually by
+ * the pipeline.
+ *
+ * @param pkts
+ * Burst of input packets specified as array of up to 64 pointers to struct
+ * rte_mbuf
+ * @param pkts_mask
+ * 64-bit bitmask specifying which packets in the input burst are valid. When
+ * pkts_mask bit n is set, then element n of pkts array is pointing to a
+ * valid packet. Otherwise, element n of pkts array will not be accessed.
+ * @param arg
+ * Opaque parameter registered by the user at the pipeline table creation
+ * time
+ * @return
+ * 0 on success, error code otherwise
+ */
+typedef int (*rte_pipeline_port_out_action_handler_bulk)(
+ struct rte_mbuf **pkts,
+ uint64_t *pkts_mask,
+ void *arg);
+
+/** Parameters for pipeline output port creation. The action handlers have to
+be either both enabled or both disabled (by setting them to NULL). When
+enabled, the pipeline selects between them at different moments, based on the
+number of packets that have to be sent to the same output port. */
+struct rte_pipeline_port_out_params {
+ /** Output port operations (specific to each table type) */
+ struct rte_port_out_ops *ops;
+ /** Opaque parameter to be passed to create operation when invoked */
+ void *arg_create;
+
+ /** Callback function executing the user actions on single input
+ packet */
+ rte_pipeline_port_out_action_handler f_action;
+ /** Callback function executing the user actions on bust of input
+ packets */
+ rte_pipeline_port_out_action_handler_bulk f_action_bulk;
+ /** Opaque parameter to be passed to the action handler when invoked */
+ void *arg_ah;
+};
+
+/**
+ * Pipeline output port create
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param params
+ * Parameters for pipeline output port creation
+ * @param port_id
+ * Output port ID. Valid only within the scope of output port IDs of the
+ * current pipeline. Only returned after a successful invocation.
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_port_out_create(struct rte_pipeline *p,
+ struct rte_pipeline_port_out_params *params,
+ uint32_t *port_id);
+
+/**
+ * Pipeline output port packet insert
+ *
+ * This function is called by the table action handler whenever it generates a
+ * new packet to be sent out though one of the pipeline output ports. This
+ * packet is not part of the burst of input packets read from any of the
+ * pipeline input ports, so it is not an element of the pkts array input
+ * parameter of the table action handler. This packet can be dropped by the
+ * output port action handler.
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param port_id
+ * Output port ID (returned by previous invocation of pipeline output port
+ * create) to send the packet specified by pkt
+ * @param pkt
+ * New packet generated by the table action handler
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_port_out_packet_insert(struct rte_pipeline *p,
+ uint32_t port_id,
+ struct rte_mbuf *pkt);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_pmd_af_packet/Makefile b/src/dpdk_lib18/librte_pmd_af_packet/Makefile
new file mode 100755
index 00000000..6955e5c2
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_af_packet/Makefile
@@ -0,0 +1,60 @@
+# BSD LICENSE
+#
+# Copyright(c) 2014 John W. Linville <linville@redhat.com>
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# Copyright(c) 2014 6WIND S.A.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_af_packet.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += rte_eth_af_packet.c
+
+#
+# Export include files
+#
+SYMLINK-y-include += rte_eth_af_packet.h
+
+# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_ether
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_malloc
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_kvargs
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_pmd_af_packet/rte_eth_af_packet.c b/src/dpdk_lib18/librte_pmd_af_packet/rte_eth_af_packet.c
new file mode 100755
index 00000000..755780a1
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_af_packet/rte_eth_af_packet.c
@@ -0,0 +1,846 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 John W. Linville <linville@tuxdriver.com>
+ *
+ * Originally based upon librte_pmd_pcap code:
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2014 6WIND S.A.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <arpa/inet.h>
+#include <net/if.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <poll.h>
+
+#include "rte_eth_af_packet.h"
+
+#define ETH_AF_PACKET_IFACE_ARG "iface"
+#define ETH_AF_PACKET_NUM_Q_ARG "qpairs"
+#define ETH_AF_PACKET_BLOCKSIZE_ARG "blocksz"
+#define ETH_AF_PACKET_FRAMESIZE_ARG "framesz"
+#define ETH_AF_PACKET_FRAMECOUNT_ARG "framecnt"
+
+#define DFLT_BLOCK_SIZE (1 << 12)
+#define DFLT_FRAME_SIZE (1 << 11)
+#define DFLT_FRAME_COUNT (1 << 9)
+
+struct pkt_rx_queue {
+ int sockfd;
+
+ struct iovec *rd;
+ uint8_t *map;
+ unsigned int framecount;
+ unsigned int framenum;
+
+ struct rte_mempool *mb_pool;
+
+ volatile unsigned long rx_pkts;
+ volatile unsigned long err_pkts;
+};
+
+struct pkt_tx_queue {
+ int sockfd;
+
+ struct iovec *rd;
+ uint8_t *map;
+ unsigned int framecount;
+ unsigned int framenum;
+
+ volatile unsigned long tx_pkts;
+ volatile unsigned long err_pkts;
+};
+
+struct pmd_internals {
+ unsigned nb_queues;
+
+ int if_index;
+ struct ether_addr eth_addr;
+
+ struct tpacket_req req;
+
+ struct pkt_rx_queue rx_queue[RTE_PMD_AF_PACKET_MAX_RINGS];
+ struct pkt_tx_queue tx_queue[RTE_PMD_AF_PACKET_MAX_RINGS];
+};
+
+static const char *valid_arguments[] = {
+ ETH_AF_PACKET_IFACE_ARG,
+ ETH_AF_PACKET_NUM_Q_ARG,
+ ETH_AF_PACKET_BLOCKSIZE_ARG,
+ ETH_AF_PACKET_FRAMESIZE_ARG,
+ ETH_AF_PACKET_FRAMECOUNT_ARG,
+ NULL
+};
+
+static const char *drivername = "AF_PACKET PMD";
+
+static struct rte_eth_link pmd_link = {
+ .link_speed = 10000,
+ .link_duplex = ETH_LINK_FULL_DUPLEX,
+ .link_status = 0
+};
+
+static uint16_t
+eth_af_packet_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ unsigned i;
+ struct tpacket2_hdr *ppd;
+ struct rte_mbuf *mbuf;
+ uint8_t *pbuf;
+ struct pkt_rx_queue *pkt_q = queue;
+ uint16_t num_rx = 0;
+ unsigned int framecount, framenum;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ /*
+ * Reads the given number of packets from the AF_PACKET socket one by
+ * one and copies the packet data into a newly allocated mbuf.
+ */
+ framecount = pkt_q->framecount;
+ framenum = pkt_q->framenum;
+ for (i = 0; i < nb_pkts; i++) {
+ /* point at the next incoming frame */
+ ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base;
+ if ((ppd->tp_status & TP_STATUS_USER) == 0)
+ break;
+
+ /* allocate the next mbuf */
+ mbuf = rte_pktmbuf_alloc(pkt_q->mb_pool);
+ if (unlikely(mbuf == NULL))
+ break;
+
+ /* packet will fit in the mbuf, go ahead and receive it */
+ rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf) = ppd->tp_snaplen;
+ pbuf = (uint8_t *) ppd + ppd->tp_mac;
+ memcpy(rte_pktmbuf_mtod(mbuf, void *), pbuf, rte_pktmbuf_data_len(mbuf));
+
+ /* release incoming frame and advance ring buffer */
+ ppd->tp_status = TP_STATUS_KERNEL;
+ if (++framenum >= framecount)
+ framenum = 0;
+
+ /* account for the receive frame */
+ bufs[i] = mbuf;
+ num_rx++;
+ }
+ pkt_q->framenum = framenum;
+ pkt_q->rx_pkts += num_rx;
+ return num_rx;
+}
+
+/*
+ * Callback to handle sending packets through a real NIC.
+ */
+static uint16_t
+eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ struct tpacket2_hdr *ppd;
+ struct rte_mbuf *mbuf;
+ uint8_t *pbuf;
+ unsigned int framecount, framenum;
+ struct pollfd pfd;
+ struct pkt_tx_queue *pkt_q = queue;
+ uint16_t num_tx = 0;
+ int i;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ memset(&pfd, 0, sizeof(pfd));
+ pfd.fd = pkt_q->sockfd;
+ pfd.events = POLLOUT;
+ pfd.revents = 0;
+
+ framecount = pkt_q->framecount;
+ framenum = pkt_q->framenum;
+ ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base;
+ for (i = 0; i < nb_pkts; i++) {
+ /* point at the next incoming frame */
+ if ((ppd->tp_status != TP_STATUS_AVAILABLE) &&
+ (poll(&pfd, 1, -1) < 0))
+ continue;
+
+ /* copy the tx frame data */
+ mbuf = bufs[num_tx];
+ pbuf = (uint8_t *) ppd + TPACKET2_HDRLEN -
+ sizeof(struct sockaddr_ll);
+ memcpy(pbuf, rte_pktmbuf_mtod(mbuf, void*), rte_pktmbuf_data_len(mbuf));
+ ppd->tp_len = ppd->tp_snaplen = rte_pktmbuf_data_len(mbuf);
+
+ /* release incoming frame and advance ring buffer */
+ ppd->tp_status = TP_STATUS_SEND_REQUEST;
+ if (++framenum >= framecount)
+ framenum = 0;
+ ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base;
+
+ num_tx++;
+ rte_pktmbuf_free(mbuf);
+ }
+
+ /* kick-off transmits */
+ sendto(pkt_q->sockfd, NULL, 0, MSG_DONTWAIT, NULL, 0);
+
+ pkt_q->framenum = framenum;
+ pkt_q->tx_pkts += num_tx;
+ pkt_q->err_pkts += nb_pkts - num_tx;
+ return num_tx;
+}
+
+static int
+eth_dev_start(struct rte_eth_dev *dev)
+{
+ dev->data->dev_link.link_status = 1;
+ return 0;
+}
+
+/*
+ * This function gets called when the current port gets stopped.
+ */
+static void
+eth_dev_stop(struct rte_eth_dev *dev)
+{
+ unsigned i;
+ int sockfd;
+ struct pmd_internals *internals = dev->data->dev_private;
+
+ for (i = 0; i < internals->nb_queues; i++) {
+ sockfd = internals->rx_queue[i].sockfd;
+ if (sockfd != -1)
+ close(sockfd);
+ sockfd = internals->tx_queue[i].sockfd;
+ if (sockfd != -1)
+ close(sockfd);
+ }
+
+ dev->data->dev_link.link_status = 0;
+}
+
+static int
+eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
+{
+ return 0;
+}
+
+static void
+eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+
+ dev_info->driver_name = drivername;
+ dev_info->if_index = internals->if_index;
+ dev_info->max_mac_addrs = 1;
+ dev_info->max_rx_pktlen = (uint32_t)ETH_FRAME_LEN;
+ dev_info->max_rx_queues = (uint16_t)internals->nb_queues;
+ dev_info->max_tx_queues = (uint16_t)internals->nb_queues;
+ dev_info->min_rx_bufsize = 0;
+ dev_info->pci_dev = NULL;
+}
+
+static void
+eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
+{
+ unsigned i, imax;
+ unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
+ const struct pmd_internals *internal = dev->data->dev_private;
+
+ memset(igb_stats, 0, sizeof(*igb_stats));
+
+ imax = (internal->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS ?
+ internal->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ for (i = 0; i < imax; i++) {
+ igb_stats->q_ipackets[i] = internal->rx_queue[i].rx_pkts;
+ rx_total += igb_stats->q_ipackets[i];
+ }
+
+ imax = (internal->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS ?
+ internal->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ for (i = 0; i < imax; i++) {
+ igb_stats->q_opackets[i] = internal->tx_queue[i].tx_pkts;
+ igb_stats->q_errors[i] = internal->tx_queue[i].err_pkts;
+ tx_total += igb_stats->q_opackets[i];
+ tx_err_total += igb_stats->q_errors[i];
+ }
+
+ igb_stats->ipackets = rx_total;
+ igb_stats->opackets = tx_total;
+ igb_stats->oerrors = tx_err_total;
+}
+
+static void
+eth_stats_reset(struct rte_eth_dev *dev)
+{
+ unsigned i;
+ struct pmd_internals *internal = dev->data->dev_private;
+
+ for (i = 0; i < internal->nb_queues; i++)
+ internal->rx_queue[i].rx_pkts = 0;
+
+ for (i = 0; i < internal->nb_queues; i++) {
+ internal->tx_queue[i].tx_pkts = 0;
+ internal->tx_queue[i].err_pkts = 0;
+ }
+}
+
+static void
+eth_dev_close(struct rte_eth_dev *dev __rte_unused)
+{
+}
+
+static void
+eth_queue_release(void *q __rte_unused)
+{
+}
+
+static int
+eth_link_update(struct rte_eth_dev *dev __rte_unused,
+ int wait_to_complete __rte_unused)
+{
+ return 0;
+}
+
+static int
+eth_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mb_pool)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ struct pkt_rx_queue *pkt_q = &internals->rx_queue[rx_queue_id];
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ uint16_t buf_size;
+
+ pkt_q->mb_pool = mb_pool;
+
+ /* Now get the space available for data in the mbuf */
+ mbp_priv = rte_mempool_get_priv(pkt_q->mb_pool);
+ buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
+ RTE_PKTMBUF_HEADROOM);
+
+ if (ETH_FRAME_LEN > buf_size) {
+ RTE_LOG(ERR, PMD,
+ "%s: %d bytes will not fit in mbuf (%d bytes)\n",
+ dev->data->name, ETH_FRAME_LEN, buf_size);
+ return -ENOMEM;
+ }
+
+ dev->data->rx_queues[rx_queue_id] = pkt_q;
+
+ return 0;
+}
+
+static int
+eth_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+
+ struct pmd_internals *internals = dev->data->dev_private;
+
+ dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id];
+ return 0;
+}
+
+static struct eth_dev_ops ops = {
+ .dev_start = eth_dev_start,
+ .dev_stop = eth_dev_stop,
+ .dev_close = eth_dev_close,
+ .dev_configure = eth_dev_configure,
+ .dev_infos_get = eth_dev_info,
+ .rx_queue_setup = eth_rx_queue_setup,
+ .tx_queue_setup = eth_tx_queue_setup,
+ .rx_queue_release = eth_queue_release,
+ .tx_queue_release = eth_queue_release,
+ .link_update = eth_link_update,
+ .stats_get = eth_stats_get,
+ .stats_reset = eth_stats_reset,
+};
+
+/*
+ * Opens an AF_PACKET socket
+ */
+static int
+open_packet_iface(const char *key __rte_unused,
+ const char *value __rte_unused,
+ void *extra_args)
+{
+ int *sockfd = extra_args;
+
+ /* Open an AF_PACKET socket... */
+ *sockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
+ if (*sockfd == -1) {
+ RTE_LOG(ERR, PMD, "Could not open AF_PACKET socket\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+rte_pmd_init_internals(const char *name,
+ const int sockfd,
+ const unsigned nb_queues,
+ unsigned int blocksize,
+ unsigned int blockcnt,
+ unsigned int framesize,
+ unsigned int framecnt,
+ const unsigned numa_node,
+ struct pmd_internals **internals,
+ struct rte_eth_dev **eth_dev,
+ struct rte_kvargs *kvlist)
+{
+ struct rte_eth_dev_data *data = NULL;
+ struct rte_pci_device *pci_dev = NULL;
+ struct rte_kvargs_pair *pair = NULL;
+ struct ifreq ifr;
+ size_t ifnamelen;
+ unsigned k_idx;
+ struct sockaddr_ll sockaddr;
+ struct tpacket_req *req;
+ struct pkt_rx_queue *rx_queue;
+ struct pkt_tx_queue *tx_queue;
+ int rc, qsockfd, tpver, discard;
+ unsigned int i, q, rdsize;
+ int fanout_arg __rte_unused, bypass __rte_unused;
+
+ for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
+ pair = &kvlist->pairs[k_idx];
+ if (strstr(pair->key, ETH_AF_PACKET_IFACE_ARG) != NULL)
+ break;
+ }
+ if (pair == NULL) {
+ RTE_LOG(ERR, PMD,
+ "%s: no interface specified for AF_PACKET ethdev\n",
+ name);
+ goto error;
+ }
+
+ RTE_LOG(INFO, PMD,
+ "%s: creating AF_PACKET-backed ethdev on numa socket %u\n",
+ name, numa_node);
+
+ /*
+ * now do all data allocation - for eth_dev structure, dummy pci driver
+ * and internal (private) data
+ */
+ data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
+ if (data == NULL)
+ goto error;
+
+ pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, numa_node);
+ if (pci_dev == NULL)
+ goto error;
+
+ *internals = rte_zmalloc_socket(name, sizeof(**internals),
+ 0, numa_node);
+ if (*internals == NULL)
+ goto error;
+
+ for (q = 0; q < nb_queues; q++) {
+ (*internals)->rx_queue[q].map = MAP_FAILED;
+ (*internals)->tx_queue[q].map = MAP_FAILED;
+ }
+
+ req = &((*internals)->req);
+
+ req->tp_block_size = blocksize;
+ req->tp_block_nr = blockcnt;
+ req->tp_frame_size = framesize;
+ req->tp_frame_nr = framecnt;
+
+ ifnamelen = strlen(pair->value);
+ if (ifnamelen < sizeof(ifr.ifr_name)) {
+ memcpy(ifr.ifr_name, pair->value, ifnamelen);
+ ifr.ifr_name[ifnamelen] = '\0';
+ } else {
+ RTE_LOG(ERR, PMD,
+ "%s: I/F name too long (%s)\n",
+ name, pair->value);
+ goto error;
+ }
+ if (ioctl(sockfd, SIOCGIFINDEX, &ifr) == -1) {
+ RTE_LOG(ERR, PMD,
+ "%s: ioctl failed (SIOCGIFINDEX)\n",
+ name);
+ goto error;
+ }
+ (*internals)->if_index = ifr.ifr_ifindex;
+
+ if (ioctl(sockfd, SIOCGIFHWADDR, &ifr) == -1) {
+ RTE_LOG(ERR, PMD,
+ "%s: ioctl failed (SIOCGIFHWADDR)\n",
+ name);
+ goto error;
+ }
+ memcpy(&(*internals)->eth_addr, ifr.ifr_hwaddr.sa_data, ETH_ALEN);
+
+ memset(&sockaddr, 0, sizeof(sockaddr));
+ sockaddr.sll_family = AF_PACKET;
+ sockaddr.sll_protocol = htons(ETH_P_ALL);
+ sockaddr.sll_ifindex = (*internals)->if_index;
+
+#if defined(PACKET_FANOUT)
+ fanout_arg = (getpid() ^ (*internals)->if_index) & 0xffff;
+ fanout_arg |= (PACKET_FANOUT_HASH | PACKET_FANOUT_FLAG_DEFRAG) << 16;
+#if defined(PACKET_FANOUT_FLAG_ROLLOVER)
+ fanout_arg |= PACKET_FANOUT_FLAG_ROLLOVER << 16;
+#endif
+#endif
+
+ for (q = 0; q < nb_queues; q++) {
+ /* Open an AF_PACKET socket for this queue... */
+ qsockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
+ if (qsockfd == -1) {
+ RTE_LOG(ERR, PMD,
+ "%s: could not open AF_PACKET socket\n",
+ name);
+ return -1;
+ }
+
+ tpver = TPACKET_V2;
+ rc = setsockopt(qsockfd, SOL_PACKET, PACKET_VERSION,
+ &tpver, sizeof(tpver));
+ if (rc == -1) {
+ RTE_LOG(ERR, PMD,
+ "%s: could not set PACKET_VERSION on AF_PACKET "
+ "socket for %s\n", name, pair->value);
+ goto error;
+ }
+
+ discard = 1;
+ rc = setsockopt(qsockfd, SOL_PACKET, PACKET_LOSS,
+ &discard, sizeof(discard));
+ if (rc == -1) {
+ RTE_LOG(ERR, PMD,
+ "%s: could not set PACKET_LOSS on "
+ "AF_PACKET socket for %s\n", name, pair->value);
+ goto error;
+ }
+
+#if defined(PACKET_QDISC_BYPASS)
+ bypass = 1;
+ rc = setsockopt(qsockfd, SOL_PACKET, PACKET_QDISC_BYPASS,
+ &bypass, sizeof(bypass));
+ if (rc == -1) {
+ RTE_LOG(ERR, PMD,
+ "%s: could not set PACKET_QDISC_BYPASS "
+ "on AF_PACKET socket for %s\n", name,
+ pair->value);
+ goto error;
+ }
+#endif
+
+ rc = setsockopt(qsockfd, SOL_PACKET, PACKET_RX_RING, req, sizeof(*req));
+ if (rc == -1) {
+ RTE_LOG(ERR, PMD,
+ "%s: could not set PACKET_RX_RING on AF_PACKET "
+ "socket for %s\n", name, pair->value);
+ goto error;
+ }
+
+ rc = setsockopt(qsockfd, SOL_PACKET, PACKET_TX_RING, req, sizeof(*req));
+ if (rc == -1) {
+ RTE_LOG(ERR, PMD,
+ "%s: could not set PACKET_TX_RING on AF_PACKET "
+ "socket for %s\n", name, pair->value);
+ goto error;
+ }
+
+ rx_queue = &((*internals)->rx_queue[q]);
+ rx_queue->framecount = req->tp_frame_nr;
+
+ rx_queue->map = mmap(NULL, 2 * req->tp_block_size * req->tp_block_nr,
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED,
+ qsockfd, 0);
+ if (rx_queue->map == MAP_FAILED) {
+ RTE_LOG(ERR, PMD,
+ "%s: call to mmap failed on AF_PACKET socket for %s\n",
+ name, pair->value);
+ goto error;
+ }
+
+ /* rdsize is same for both Tx and Rx */
+ rdsize = req->tp_frame_nr * sizeof(*(rx_queue->rd));
+
+ rx_queue->rd = rte_zmalloc_socket(name, rdsize, 0, numa_node);
+ if (rx_queue->rd == NULL)
+ goto error;
+ for (i = 0; i < req->tp_frame_nr; ++i) {
+ rx_queue->rd[i].iov_base = rx_queue->map + (i * framesize);
+ rx_queue->rd[i].iov_len = req->tp_frame_size;
+ }
+ rx_queue->sockfd = qsockfd;
+
+ tx_queue = &((*internals)->tx_queue[q]);
+ tx_queue->framecount = req->tp_frame_nr;
+
+ tx_queue->map = rx_queue->map + req->tp_block_size * req->tp_block_nr;
+
+ tx_queue->rd = rte_zmalloc_socket(name, rdsize, 0, numa_node);
+ if (tx_queue->rd == NULL)
+ goto error;
+ for (i = 0; i < req->tp_frame_nr; ++i) {
+ tx_queue->rd[i].iov_base = tx_queue->map + (i * framesize);
+ tx_queue->rd[i].iov_len = req->tp_frame_size;
+ }
+ tx_queue->sockfd = qsockfd;
+
+ rc = bind(qsockfd, (const struct sockaddr*)&sockaddr, sizeof(sockaddr));
+ if (rc == -1) {
+ RTE_LOG(ERR, PMD,
+ "%s: could not bind AF_PACKET socket to %s\n",
+ name, pair->value);
+ goto error;
+ }
+
+#if defined(PACKET_FANOUT)
+ rc = setsockopt(qsockfd, SOL_PACKET, PACKET_FANOUT,
+ &fanout_arg, sizeof(fanout_arg));
+ if (rc == -1) {
+ RTE_LOG(ERR, PMD,
+ "%s: could not set PACKET_FANOUT on AF_PACKET socket "
+ "for %s\n", name, pair->value);
+ goto error;
+ }
+#endif
+ }
+
+ /* reserve an ethdev entry */
+ *eth_dev = rte_eth_dev_allocate(name);
+ if (*eth_dev == NULL)
+ goto error;
+
+ /*
+ * now put it all together
+ * - store queue data in internals,
+ * - store numa_node info in pci_driver
+ * - point eth_dev_data to internals and pci_driver
+ * - and point eth_dev structure to new eth_dev_data structure
+ */
+
+ (*internals)->nb_queues = nb_queues;
+
+ data->dev_private = *internals;
+ data->port_id = (*eth_dev)->data->port_id;
+ data->nb_rx_queues = (uint16_t)nb_queues;
+ data->nb_tx_queues = (uint16_t)nb_queues;
+ data->dev_link = pmd_link;
+ data->mac_addrs = &(*internals)->eth_addr;
+
+ pci_dev->numa_node = numa_node;
+
+ (*eth_dev)->data = data;
+ (*eth_dev)->dev_ops = &ops;
+ (*eth_dev)->pci_dev = pci_dev;
+
+ return 0;
+
+error:
+ if (data)
+ rte_free(data);
+ if (pci_dev)
+ rte_free(pci_dev);
+ if (*internals) {
+ for (q = 0; q < nb_queues; q++) {
+ munmap((*internals)->rx_queue[q].map,
+ 2 * req->tp_block_size * req->tp_block_nr);
+ if ((*internals)->rx_queue[q].rd)
+ rte_free((*internals)->rx_queue[q].rd);
+ if ((*internals)->tx_queue[q].rd)
+ rte_free((*internals)->tx_queue[q].rd);
+ }
+ rte_free(*internals);
+ }
+ return -1;
+}
+
+static int
+rte_eth_from_packet(const char *name,
+ int const *sockfd,
+ const unsigned numa_node,
+ struct rte_kvargs *kvlist)
+{
+ struct pmd_internals *internals = NULL;
+ struct rte_eth_dev *eth_dev = NULL;
+ struct rte_kvargs_pair *pair = NULL;
+ unsigned k_idx;
+ unsigned int blockcount;
+ unsigned int blocksize = DFLT_BLOCK_SIZE;
+ unsigned int framesize = DFLT_FRAME_SIZE;
+ unsigned int framecount = DFLT_FRAME_COUNT;
+ unsigned int qpairs = 1;
+
+ /* do some parameter checking */
+ if (*sockfd < 0)
+ return -1;
+
+ /*
+ * Walk arguments for configurable settings
+ */
+ for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
+ pair = &kvlist->pairs[k_idx];
+ if (strstr(pair->key, ETH_AF_PACKET_NUM_Q_ARG) != NULL) {
+ qpairs = atoi(pair->value);
+ if (qpairs < 1 ||
+ qpairs > RTE_PMD_AF_PACKET_MAX_RINGS) {
+ RTE_LOG(ERR, PMD,
+ "%s: invalid qpairs value\n",
+ name);
+ return -1;
+ }
+ continue;
+ }
+ if (strstr(pair->key, ETH_AF_PACKET_BLOCKSIZE_ARG) != NULL) {
+ blocksize = atoi(pair->value);
+ if (!blocksize) {
+ RTE_LOG(ERR, PMD,
+ "%s: invalid blocksize value\n",
+ name);
+ return -1;
+ }
+ continue;
+ }
+ if (strstr(pair->key, ETH_AF_PACKET_FRAMESIZE_ARG) != NULL) {
+ framesize = atoi(pair->value);
+ if (!framesize) {
+ RTE_LOG(ERR, PMD,
+ "%s: invalid framesize value\n",
+ name);
+ return -1;
+ }
+ continue;
+ }
+ if (strstr(pair->key, ETH_AF_PACKET_FRAMECOUNT_ARG) != NULL) {
+ framecount = atoi(pair->value);
+ if (!framecount) {
+ RTE_LOG(ERR, PMD,
+ "%s: invalid framecount value\n",
+ name);
+ return -1;
+ }
+ continue;
+ }
+ }
+
+ if (framesize > blocksize) {
+ RTE_LOG(ERR, PMD,
+ "%s: AF_PACKET MMAP frame size exceeds block size!\n",
+ name);
+ return -1;
+ }
+
+ blockcount = framecount / (blocksize / framesize);
+ if (!blockcount) {
+ RTE_LOG(ERR, PMD,
+ "%s: invalid AF_PACKET MMAP parameters\n", name);
+ return -1;
+ }
+
+ RTE_LOG(INFO, PMD, "%s: AF_PACKET MMAP parameters:\n", name);
+ RTE_LOG(INFO, PMD, "%s:\tblock size %d\n", name, blocksize);
+ RTE_LOG(INFO, PMD, "%s:\tblock count %d\n", name, blockcount);
+ RTE_LOG(INFO, PMD, "%s:\tframe size %d\n", name, framesize);
+ RTE_LOG(INFO, PMD, "%s:\tframe count %d\n", name, framecount);
+
+ if (rte_pmd_init_internals(name, *sockfd, qpairs,
+ blocksize, blockcount,
+ framesize, framecount,
+ numa_node, &internals, &eth_dev,
+ kvlist) < 0)
+ return -1;
+
+ eth_dev->rx_pkt_burst = eth_af_packet_rx;
+ eth_dev->tx_pkt_burst = eth_af_packet_tx;
+
+ return 0;
+}
+
+int
+rte_pmd_af_packet_devinit(const char *name, const char *params)
+{
+ unsigned numa_node;
+ int ret;
+ struct rte_kvargs *kvlist;
+ int sockfd = -1;
+
+ RTE_LOG(INFO, PMD, "Initializing pmd_af_packet for %s\n", name);
+
+ numa_node = rte_socket_id();
+
+ kvlist = rte_kvargs_parse(params, valid_arguments);
+ if (kvlist == NULL)
+ return -1;
+
+ /*
+ * If iface argument is passed we open the NICs and use them for
+ * reading / writing
+ */
+ if (rte_kvargs_count(kvlist, ETH_AF_PACKET_IFACE_ARG) == 1) {
+
+ ret = rte_kvargs_process(kvlist, ETH_AF_PACKET_IFACE_ARG,
+ &open_packet_iface, &sockfd);
+ if (ret < 0)
+ return -1;
+ }
+
+ ret = rte_eth_from_packet(name, &sockfd, numa_node, kvlist);
+ close(sockfd); /* no longer needed */
+
+ if (ret < 0)
+ return -1;
+
+ return 0;
+}
+
+static struct rte_driver pmd_af_packet_drv = {
+ .name = "eth_af_packet",
+ .type = PMD_VDEV,
+ .init = rte_pmd_af_packet_devinit,
+};
+
+PMD_REGISTER_DRIVER(pmd_af_packet_drv);
diff --git a/src/dpdk_lib18/librte_pmd_af_packet/rte_eth_af_packet.h b/src/dpdk_lib18/librte_pmd_af_packet/rte_eth_af_packet.h
new file mode 100755
index 00000000..5d1bc7e6
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_af_packet/rte_eth_af_packet.h
@@ -0,0 +1,53 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ETH_AF_PACKET_H_
+#define _RTE_ETH_AF_PACKET_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_PMD_AF_PACKET_MAX_RINGS 16
+
+/**
+ * For use by the EAL only. Called as part of EAL init to set up any dummy NICs
+ * configured on command line.
+ */
+int rte_pmd_af_packet_devinit(const char *name, const char *params);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_pmd_bond/Makefile b/src/dpdk_lib18/librte_pmd_bond/Makefile
new file mode 100755
index 00000000..cdff1262
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_bond/Makefile
@@ -0,0 +1,67 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_bond.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_api.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_args.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_8023ad.c
+
+ifeq ($(CONFIG_RTE_MBUF_REFCNT),n)
+$(info WARNING: Link Bonding Broadcast mode is disabled because it needs MBUF_REFCNT.)
+endif
+
+#
+# Export include files
+#
+SYMLINK-y-include += rte_eth_bond.h
+SYMLINK-y-include += rte_eth_bond_8023ad.h
+
+# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_ether
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_malloc
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_kvargs
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond.h b/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond.h
new file mode 100755
index 00000000..71779831
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond.h
@@ -0,0 +1,359 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ETH_BOND_H_
+#define _RTE_ETH_BOND_H_
+
+/**
+ * @file rte_eth_bond.h
+ *
+ * RTE Link Bonding Ethernet Device
+ * Link Bonding for 1GbE and 10GbE ports to allow the aggregation of multiple
+ * (slave) NICs into a single logical interface. The bonded device processes
+ * these interfaces based on the mode of operation specified and supported.
+ * This implementation supports 4 modes of operation round robin, active backup
+ * balance and broadcast. Providing redundant links, fault tolerance and/or
+ * load balancing of network ports
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_ether.h>
+
+/* Supported modes of operation of link bonding library */
+
+#define BONDING_MODE_ROUND_ROBIN (0)
+/**< Round Robin (Mode 0).
+ * In this mode all transmitted packets will be balanced equally across all
+ * active slaves of the bonded in a round robin fashion. */
+#define BONDING_MODE_ACTIVE_BACKUP (1)
+/**< Active Backup (Mode 1).
+ * In this mode all packets transmitted will be transmitted on the primary
+ * slave until such point as the primary slave is no longer available and then
+ * transmitted packets will be sent on the next available slaves. The primary
+ * slave can be defined by the user but defaults to the first active slave
+ * available if not specified. */
+#define BONDING_MODE_BALANCE (2)
+/**< Balance (Mode 2).
+ * In this mode all packets transmitted will be balanced across the available
+ * slaves using one of three available transmit policies - l2, l2+3 or l3+4.
+ * See BALANCE_XMIT_POLICY macros definitions for further details on transmit
+ * policies. */
+#ifdef RTE_MBUF_REFCNT
+#define BONDING_MODE_BROADCAST (3)
+/**< Broadcast (Mode 3).
+ * In this mode all transmitted packets will be transmitted on all available
+ * active slaves of the bonded. */
+#endif
+#define BONDING_MODE_8023AD (4)
+/**< 802.3AD (Mode 4).
+ *
+ * This mode provides auto negotiation/configuration
+ * of peers and well as link status changes monitoring using out of band
+ * LACP (link aggregation control protocol) messages. For further details of
+ * LACP specification see the IEEE 802.3ad/802.1AX standards. It is also
+ * described here
+ * https://www.kernel.org/doc/Documentation/networking/bonding.txt.
+ *
+ * Important Usage Notes:
+ * - for LACP mode to work the rx/tx burst functions must be invoked
+ * at least once every 100ms, otherwise the out-of-band LACP messages will not
+ * be handled with the expected latency and this may cause the link status to be
+ * incorrectly marked as down or failure to correctly negotiate with peers.
+ * - For optimal performance during initial handshaking the array of mbufs provided
+ * to rx_burst should be at least 2 times the slave count size.
+ *
+ */
+#define BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING (5)
+/**< Adaptive TLB (Mode 5)
+ * This mode provides an adaptive transmit load balancing. It dynamically
+ * changes the transmitting slave, according to the computed load. Statistics
+ * are collected in 100ms intervals and scheduled every 10ms */
+
+/* Balance Mode Transmit Policies */
+#define BALANCE_XMIT_POLICY_LAYER2 (0)
+/**< Layer 2 (Ethernet MAC) */
+#define BALANCE_XMIT_POLICY_LAYER23 (1)
+/**< Layer 2+3 (Ethernet MAC + IP Addresses) transmit load balancing */
+#define BALANCE_XMIT_POLICY_LAYER34 (2)
+/**< Layer 3+4 (IP Addresses + UDP Ports) transmit load balancing */
+
+/**
+ * Create a bonded rte_eth_dev device
+ *
+ * @param name Name of new link bonding device.
+ * @param mode Mode to initialize bonding device in.
+ * @param socket_id Socket Id on which to allocate eth_dev resources.
+ *
+ * @return
+ * Port Id of created rte_eth_dev on success, negative value otherwise
+ */
+int
+rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id);
+
+/**
+ * Add a rte_eth_dev device as a slave to the bonded device
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ * @param slave_port_id Port ID of slave device.
+ *
+ * @return
+ * 0 on success, negative value otherwise
+ */
+int
+rte_eth_bond_slave_add(uint8_t bonded_port_id, uint8_t slave_port_id);
+
+/**
+ * Remove a slave rte_eth_dev device from the bonded device
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ * @param slave_port_id Port ID of slave device.
+ *
+ * @return
+ * 0 on success, negative value otherwise
+ */
+int
+rte_eth_bond_slave_remove(uint8_t bonded_port_id, uint8_t slave_port_id);
+
+/**
+ * Set link bonding mode of bonded device
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ * @param mode Bonding mode to set
+ *
+ * @return
+ * 0 on success, negative value otherwise
+ */
+int
+rte_eth_bond_mode_set(uint8_t bonded_port_id, uint8_t mode);
+
+/**
+ * Get link bonding mode of bonded device
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ *
+ * @return
+ * link bonding mode on success, negative value otherwise
+ */
+int
+rte_eth_bond_mode_get(uint8_t bonded_port_id);
+
+/**
+ * Set slave rte_eth_dev as primary slave of bonded device
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ * @param slave_port_id Port ID of slave device.
+ *
+ * @return
+ * 0 on success, negative value otherwise
+ */
+int
+rte_eth_bond_primary_set(uint8_t bonded_port_id, uint8_t slave_port_id);
+
+/**
+ * Get primary slave of bonded device
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ *
+ * @return
+ * Port Id of primary slave on success, -1 on failure
+ */
+int
+rte_eth_bond_primary_get(uint8_t bonded_port_id);
+
+/**
+ * Populate an array with list of the slaves port id's of the bonded device
+ *
+ * @param bonded_port_id Port ID of bonded eth_dev to interrogate
+ * @param slaves Array to be populated with the current active slaves
+ * @param len Length of slaves array
+ *
+ * @return
+ * Number of slaves associated with bonded device on success,
+ * negative value otherwise
+ */
+int
+rte_eth_bond_slaves_get(uint8_t bonded_port_id, uint8_t slaves[], uint8_t len);
+
+/**
+ * Populate an array with list of the active slaves port id's of the bonded
+ * device.
+ *
+ * @param bonded_port_id Port ID of bonded eth_dev to interrogate
+ * @param slaves Array to be populated with the current active slaves
+ * @param len Length of slaves array
+ *
+ * @return
+ * Number of active slaves associated with bonded device on success,
+ * negative value otherwise
+ */
+int
+rte_eth_bond_active_slaves_get(uint8_t bonded_port_id, uint8_t slaves[],
+ uint8_t len);
+
+/**
+ * Set explicit MAC address to use on bonded device and it's slaves.
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ * @param mac_addr MAC Address to use on bonded device overriding
+ * slaves MAC addresses
+ *
+ * @return
+ * 0 on success, negative value otherwise
+ */
+int
+rte_eth_bond_mac_address_set(uint8_t bonded_port_id,
+ struct ether_addr *mac_addr);
+
+/**
+ * Reset bonded device to use MAC from primary slave on bonded device and it's
+ * slaves.
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ *
+ * @return
+ * 0 on success, negative value otherwise
+ */
+int
+rte_eth_bond_mac_address_reset(uint8_t bonded_port_id);
+
+/**
+ * Set the transmit policy for bonded device to use when it is operating in
+ * balance mode, this parameter is otherwise ignored in other modes of
+ * operation.
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ * @param policy Balance mode transmission policy.
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+int
+rte_eth_bond_xmit_policy_set(uint8_t bonded_port_id, uint8_t policy);
+
+/**
+ * Get the transmit policy set on bonded device for balance mode operation
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ *
+ * @return
+ * Balance transmit policy on success, negative value otherwise.
+ */
+int
+rte_eth_bond_xmit_policy_get(uint8_t bonded_port_id);
+
+/**
+ * Set the link monitoring frequency (in ms) for monitoring the link status of
+ * slave devices
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ * @param internal_ms Monitoring interval in milliseconds
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+
+int
+rte_eth_bond_link_monitoring_set(uint8_t bonded_port_id, uint32_t internal_ms);
+
+/**
+ * Get the current link monitoring frequency (in ms) for monitoring of the link
+ * status of slave devices
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ *
+ * @return
+ * Monitoring interval on success, negative value otherwise.
+ */
+int
+rte_eth_bond_link_monitoring_get(uint8_t bonded_port_id);
+
+
+/**
+ * Set the period in milliseconds for delaying the disabling of a bonded link
+ * when the link down status has been detected
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ * @param delay_ms Delay period in milliseconds.
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+int
+rte_eth_bond_link_down_prop_delay_set(uint8_t bonded_port_id, uint32_t delay_ms);
+
+/**
+ * Get the period in milliseconds set for delaying the disabling of a bonded
+ * link when the link down status has been detected
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ *
+ * @return
+ * Delay period on success, negative value otherwise.
+ */
+int
+rte_eth_bond_link_down_prop_delay_get(uint8_t bonded_port_id);
+
+/**
+ * Set the period in milliseconds for delaying the enabling of a bonded link
+ * when the link up status has been detected
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ * @param delay_ms Delay period in milliseconds.
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+int
+rte_eth_bond_link_up_prop_delay_set(uint8_t bonded_port_id, uint32_t delay_ms);
+
+/**
+ * Get the period in milliseconds set for delaying the enabling of a bonded
+ * link when the link up status has been detected
+ *
+ * @param bonded_port_id Port ID of bonded device.
+ *
+ * @return
+ * Delay period on success, negative value otherwise.
+ */
+int
+rte_eth_bond_link_up_prop_delay_get(uint8_t bonded_port_id);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_8023ad.c b/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_8023ad.c
new file mode 100755
index 00000000..f1cf81a6
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_8023ad.c
@@ -0,0 +1,1216 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stddef.h>
+#include <string.h>
+#include <stdbool.h>
+
+#include <rte_alarm.h>
+#include <rte_malloc.h>
+#include <rte_errno.h>
+#include <rte_cycles.h>
+
+#include "rte_eth_bond_private.h"
+
+#ifdef RTE_LIBRTE_BOND_DEBUG_8023AD
+#define MODE4_DEBUG(fmt, ...) RTE_LOG(DEBUG, PMD, "%6u [Port %u: %s] " fmt, \
+ bond_dbg_get_time_diff_ms(), slave_id, \
+ __func__, ##__VA_ARGS__)
+
+static uint64_t start_time;
+
+static unsigned
+bond_dbg_get_time_diff_ms(void)
+{
+ uint64_t now;
+
+ now = rte_rdtsc();
+ if (start_time == 0)
+ start_time = now;
+
+ return ((now - start_time) * 1000) / rte_get_tsc_hz();
+}
+
+static void
+bond_print_lacp(struct lacpdu *l)
+{
+ char a_address[18];
+ char p_address[18];
+ char a_state[256] = { 0 };
+ char p_state[256] = { 0 };
+
+ static const char * const state_labels[] = {
+ "ACT", "TIMEOUT", "AGG", "SYNC", "COL", "DIST", "DEF", "EXP"
+ };
+
+ int a_len = 0;
+ int p_len = 0;
+ uint8_t i;
+ uint8_t *addr;
+
+ addr = l->actor.port_params.system.addr_bytes;
+ snprintf(a_address, sizeof(a_address), "%02X:%02X:%02X:%02X:%02X:%02X",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+
+ addr = l->partner.port_params.system.addr_bytes;
+ snprintf(p_address, sizeof(p_address), "%02X:%02X:%02X:%02X:%02X:%02X",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+
+ for (i = 0; i < 8; i++) {
+ if ((l->actor.state >> i) & 1) {
+ a_len += snprintf(&a_state[a_len], RTE_DIM(a_state) - a_len, "%s ",
+ state_labels[i]);
+ }
+
+ if ((l->partner.state >> i) & 1) {
+ p_len += snprintf(&p_state[p_len], RTE_DIM(p_state) - p_len, "%s ",
+ state_labels[i]);
+ }
+ }
+
+ if (a_len && a_state[a_len-1] == ' ')
+ a_state[a_len-1] = '\0';
+
+ if (p_len && p_state[p_len-1] == ' ')
+ p_state[p_len-1] = '\0';
+
+ RTE_LOG(DEBUG, PMD, "LACP: {\n"\
+ " subtype= %02X\n"\
+ " ver_num=%02X\n"\
+ " actor={ tlv=%02X, len=%02X\n"\
+ " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"\
+ " state={ %s }\n"\
+ " }\n"\
+ " partner={ tlv=%02X, len=%02X\n"\
+ " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"\
+ " state={ %s }\n"\
+ " }\n"\
+ " collector={info=%02X, length=%02X, max_delay=%04X\n, " \
+ "type_term=%02X, terminator_length = %02X}\n",\
+ l->subtype,\
+ l->version_number,\
+ l->actor.tlv_type_info,\
+ l->actor.info_length,\
+ l->actor.port_params.system_priority,\
+ a_address,\
+ l->actor.port_params.key,\
+ l->actor.port_params.port_priority,\
+ l->actor.port_params.port_number,\
+ a_state,\
+ l->partner.tlv_type_info,\
+ l->partner.info_length,\
+ l->partner.port_params.system_priority,\
+ p_address,\
+ l->partner.port_params.key,\
+ l->partner.port_params.port_priority,\
+ l->partner.port_params.port_number,\
+ p_state,\
+ l->tlv_type_collector_info,\
+ l->collector_info_length,\
+ l->collector_max_delay,\
+ l->tlv_type_terminator,\
+ l->terminator_length);
+
+}
+#define BOND_PRINT_LACP(lacpdu) bond_print_lacp(lacpdu)
+#else
+#define BOND_PRINT_LACP(lacpdu) do { } while (0)
+#define MODE4_DEBUG(fmt, ...) do { } while (0)
+#endif
+
+static const struct ether_addr lacp_mac_addr = {
+ .addr_bytes = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02 }
+};
+
+struct port mode_8023ad_ports[RTE_MAX_ETHPORTS];
+
+static void
+timer_cancel(uint64_t *timer)
+{
+ *timer = 0;
+}
+
+static void
+timer_set(uint64_t *timer, uint64_t timeout)
+{
+ *timer = rte_rdtsc() + timeout;
+}
+
+/* Forces given timer to be in expired state. */
+static void
+timer_force_expired(uint64_t *timer)
+{
+ *timer = rte_rdtsc();
+}
+
+static bool
+timer_is_stopped(uint64_t *timer)
+{
+ return *timer == 0;
+}
+
+static bool
+timer_is_expired(uint64_t *timer)
+{
+ return *timer < rte_rdtsc();
+}
+
+/* Timer is in running state if it is not stopped nor expired */
+static bool
+timer_is_running(uint64_t *timer)
+{
+ return !timer_is_stopped(timer) && !timer_is_expired(timer);
+}
+
+static void
+set_warning_flags(struct port *port, uint16_t flags)
+{
+ int retval;
+ uint16_t old;
+ uint16_t new_flag = 0;
+
+ do {
+ old = port->warnings_to_show;
+ new_flag = old | flags;
+ retval = rte_atomic16_cmpset(&port->warnings_to_show, old, new_flag);
+ } while (unlikely(retval == 0));
+}
+
+static void
+show_warnings(uint8_t slave_id)
+{
+ struct port *port = &mode_8023ad_ports[slave_id];
+ uint8_t warnings;
+
+ do {
+ warnings = port->warnings_to_show;
+ } while (rte_atomic16_cmpset(&port->warnings_to_show, warnings, 0) == 0);
+
+ if (!warnings)
+ return;
+
+ if (!timer_is_expired(&port->warning_timer))
+ return;
+
+
+ timer_set(&port->warning_timer, BOND_8023AD_WARNINGS_PERIOD_MS *
+ rte_get_tsc_hz() / 1000);
+
+ if (warnings & WRN_RX_QUEUE_FULL) {
+ RTE_LOG(DEBUG, PMD,
+ "Slave %u: failed to enqueue LACP packet into RX ring.\n"
+ "Receive and transmit functions must be invoked on bonded\n"
+ "interface at least 10 times per second or LACP will not\n"
+ "work correctly\n", slave_id);
+ }
+
+ if (warnings & WRN_TX_QUEUE_FULL) {
+ RTE_LOG(DEBUG, PMD,
+ "Slave %u: failed to enqueue LACP packet into TX ring.\n"
+ "Receive and transmit functions must be invoked on bonded\n"
+ "interface at least 10 times per second or LACP will not\n"
+ "work correctly\n", slave_id);
+ }
+
+ if (warnings & WRN_RX_MARKER_TO_FAST)
+ RTE_LOG(INFO, PMD, "Slave %u: marker to early - ignoring.\n", slave_id);
+
+ if (warnings & WRN_UNKNOWN_SLOW_TYPE) {
+ RTE_LOG(INFO, PMD,
+ "Slave %u: ignoring unknown slow protocol frame type", slave_id);
+ }
+
+ if (warnings & WRN_UNKNOWN_MARKER_TYPE)
+ RTE_LOG(INFO, PMD, "Slave %u: ignoring unknown marker type", slave_id);
+
+ if (warnings & WRN_NOT_LACP_CAPABLE)
+ MODE4_DEBUG("Port %u is not LACP capable!\n", slave_id);
+}
+
+static void
+record_default(struct port *port)
+{
+ /* Record default parameters for partner. Partner admin parameters
+ * are not implemented so set them to arbitrary default (last known) and
+ * mark actor that parner is in defaulted state. */
+ port->partner_state = STATE_LACP_ACTIVE;
+ ACTOR_STATE_SET(port, DEFAULTED);
+}
+
+/** Function handles rx state machine.
+ *
+ * This function implements Receive State Machine from point 5.4.12 in
+ * 802.1AX documentation. It should be called periodically.
+ *
+ * @param lacpdu LACPDU received.
+ * @param port Port on which LACPDU was received.
+ */
+static void
+rx_machine(struct bond_dev_private *internals, uint8_t slave_id,
+ struct lacpdu *lacp)
+{
+ struct port *agg, *port = &mode_8023ad_ports[slave_id];
+ uint64_t timeout;
+
+ if (SM_FLAG(port, BEGIN)) {
+ /* Initialize stuff */
+ MODE4_DEBUG("-> INITIALIZE\n");
+ SM_FLAG_CLR(port, MOVED);
+ port->selected = UNSELECTED;
+
+ record_default(port);
+
+ ACTOR_STATE_CLR(port, EXPIRED);
+ timer_cancel(&port->current_while_timer);
+
+ /* DISABLED: On initialization partner is out of sync */
+ PARTNER_STATE_CLR(port, SYNCHRONIZATION);
+
+ /* LACP DISABLED stuff if LACP not enabled on this port */
+ if (!SM_FLAG(port, LACP_ENABLED))
+ PARTNER_STATE_CLR(port, AGGREGATION);
+ else
+ PARTNER_STATE_SET(port, AGGREGATION);
+ }
+
+ if (!SM_FLAG(port, LACP_ENABLED)) {
+ /* Update parameters only if state changed */
+ if (!timer_is_stopped(&port->current_while_timer)) {
+ port->selected = UNSELECTED;
+ record_default(port);
+ PARTNER_STATE_CLR(port, AGGREGATION);
+ ACTOR_STATE_CLR(port, EXPIRED);
+ timer_cancel(&port->current_while_timer);
+ }
+ return;
+ }
+
+ if (lacp) {
+ MODE4_DEBUG("LACP -> CURRENT\n");
+ BOND_PRINT_LACP(lacp);
+ /* Update selected flag. If partner parameters are defaulted assume they
+ * are match. If not defaulted compare LACP actor with ports parner
+ * params. */
+ if (!ACTOR_STATE(port, DEFAULTED) &&
+ (ACTOR_STATE(port, AGGREGATION) != PARTNER_STATE(port, AGGREGATION)
+ || memcmp(&port->partner, &lacp->actor.port_params,
+ sizeof(port->partner)) != 0)) {
+ MODE4_DEBUG("selected <- UNSELECTED\n");
+ port->selected = UNSELECTED;
+ }
+
+ /* Record this PDU actor params as partner params */
+ memcpy(&port->partner, &lacp->actor.port_params,
+ sizeof(struct port_params));
+ port->partner_state = lacp->actor.state;
+
+ /* Partner parameters are not defaulted any more */
+ ACTOR_STATE_CLR(port, DEFAULTED);
+
+ /* If LACP partner params match this port actor params */
+ agg = &mode_8023ad_ports[port->aggregator_port_id];
+ bool match = port->actor.system_priority ==
+ lacp->partner.port_params.system_priority &&
+ is_same_ether_addr(&agg->actor.system,
+ &lacp->partner.port_params.system) &&
+ port->actor.port_priority ==
+ lacp->partner.port_params.port_priority &&
+ port->actor.port_number ==
+ lacp->partner.port_params.port_number;
+
+ /* Update NTT if partners information are outdated (xored and masked
+ * bits are set)*/
+ uint8_t state_mask = STATE_LACP_ACTIVE | STATE_LACP_SHORT_TIMEOUT |
+ STATE_SYNCHRONIZATION | STATE_AGGREGATION;
+
+ if (((port->actor_state ^ lacp->partner.state) & state_mask) ||
+ match == false) {
+ SM_FLAG_SET(port, NTT);
+ }
+
+ /* If LACP partner params match this port actor params */
+ if (match == true && ACTOR_STATE(port, AGGREGATION) ==
+ PARTNER_STATE(port, AGGREGATION))
+ PARTNER_STATE_SET(port, SYNCHRONIZATION);
+ else if (!PARTNER_STATE(port, AGGREGATION) && ACTOR_STATE(port,
+ AGGREGATION))
+ PARTNER_STATE_SET(port, SYNCHRONIZATION);
+ else
+ PARTNER_STATE_CLR(port, SYNCHRONIZATION);
+
+ if (ACTOR_STATE(port, LACP_SHORT_TIMEOUT))
+ timeout = internals->mode4.short_timeout;
+ else
+ timeout = internals->mode4.long_timeout;
+
+ timer_set(&port->current_while_timer, timeout);
+ ACTOR_STATE_CLR(port, EXPIRED);
+ return; /* No state change */
+ }
+
+ /* If CURRENT state timer is not running (stopped or expired)
+ * transit to EXPIRED state from DISABLED or CURRENT */
+ if (!timer_is_running(&port->current_while_timer)) {
+ ACTOR_STATE_SET(port, EXPIRED);
+ PARTNER_STATE_CLR(port, SYNCHRONIZATION);
+ PARTNER_STATE_SET(port, LACP_SHORT_TIMEOUT);
+ timer_set(&port->current_while_timer, internals->mode4.short_timeout);
+ }
+}
+
+/**
+ * Function handles periodic tx state machine.
+ *
+ * Function implements Periodic Transmission state machine from point 5.4.13
+ * in 802.1AX documentation. It should be called periodically.
+ *
+ * @param port Port to handle state machine.
+ */
+static void
+periodic_machine(struct bond_dev_private *internals, uint8_t slave_id)
+{
+ struct port *port = &mode_8023ad_ports[slave_id];
+ /* Calculate if either site is LACP enabled */
+ uint64_t timeout;
+ uint8_t active = ACTOR_STATE(port, LACP_ACTIVE) ||
+ PARTNER_STATE(port, LACP_ACTIVE);
+
+ uint8_t is_partner_fast, was_partner_fast;
+ /* No periodic is on BEGIN, LACP DISABLE or when both sides are pasive */
+ if (SM_FLAG(port, BEGIN) || !SM_FLAG(port, LACP_ENABLED) || !active) {
+ timer_cancel(&port->periodic_timer);
+ timer_force_expired(&port->tx_machine_timer);
+ SM_FLAG_CLR(port, PARTNER_SHORT_TIMEOUT);
+
+ MODE4_DEBUG("-> NO_PERIODIC ( %s%s%s)\n",
+ SM_FLAG(port, BEGIN) ? "begind " : "",
+ SM_FLAG(port, LACP_ENABLED) ? "" : "LACP disabled ",
+ active ? "LACP active " : "LACP pasive ");
+ return;
+ }
+
+ is_partner_fast = PARTNER_STATE(port, LACP_SHORT_TIMEOUT);
+ was_partner_fast = SM_FLAG(port, PARTNER_SHORT_TIMEOUT);
+
+ /* If periodic timer is not started, transit from NO PERIODIC to FAST/SLOW.
+ * Other case: check if timer expire or partners settings changed. */
+ if (!timer_is_stopped(&port->periodic_timer)) {
+ if (timer_is_expired(&port->periodic_timer)) {
+ SM_FLAG_SET(port, NTT);
+ } else if (is_partner_fast != was_partner_fast) {
+ /* Partners timeout was slow and now it is fast -> send LACP.
+ * In other case (was fast and now it is slow) just switch
+ * timeout to slow without forcing send of LACP (because standard
+ * say so)*/
+ if (!is_partner_fast)
+ SM_FLAG_SET(port, NTT);
+ } else
+ return; /* Nothing changed */
+ }
+
+ /* Handle state transition to FAST/SLOW LACP timeout */
+ if (is_partner_fast) {
+ timeout = internals->mode4.fast_periodic_timeout;
+ SM_FLAG_SET(port, PARTNER_SHORT_TIMEOUT);
+ } else {
+ timeout = internals->mode4.slow_periodic_timeout;
+ SM_FLAG_CLR(port, PARTNER_SHORT_TIMEOUT);
+ }
+
+ timer_set(&port->periodic_timer, timeout);
+}
+
+/**
+ * Function handles mux state machine.
+ *
+ * Function implements Mux Machine from point 5.4.15 in 802.1AX documentation.
+ * It should be called periodically.
+ *
+ * @param port Port to handle state machine.
+ */
+static void
+mux_machine(struct bond_dev_private *internals, uint8_t slave_id)
+{
+ struct port *port = &mode_8023ad_ports[slave_id];
+
+ /* Save current state for later use */
+ const uint8_t state_mask = STATE_SYNCHRONIZATION | STATE_DISTRIBUTING |
+ STATE_COLLECTING;
+
+ /* Enter DETACHED state on BEGIN condition or from any other state if
+ * port was unselected */
+ if (SM_FLAG(port, BEGIN) ||
+ port->selected == UNSELECTED || (port->selected == STANDBY &&
+ (port->actor_state & state_mask) != 0)) {
+ /* detach mux from aggregator */
+ port->actor_state &= ~state_mask;
+ /* Set ntt to true if BEGIN condition or transition from any other state
+ * which is indicated that wait_while_timer was started */
+ if (SM_FLAG(port, BEGIN) ||
+ !timer_is_stopped(&port->wait_while_timer)) {
+ SM_FLAG_SET(port, NTT);
+ MODE4_DEBUG("-> DETACHED\n");
+ }
+ timer_cancel(&port->wait_while_timer);
+ }
+
+ if (timer_is_stopped(&port->wait_while_timer)) {
+ if (port->selected == SELECTED || port->selected == STANDBY) {
+ timer_set(&port->wait_while_timer,
+ internals->mode4.aggregate_wait_timeout);
+
+ MODE4_DEBUG("DETACHED -> WAITING\n");
+ }
+ /* Waiting state entered */
+ return;
+ }
+
+ /* Transit next state if port is ready */
+ if (!timer_is_expired(&port->wait_while_timer))
+ return;
+
+ if ((ACTOR_STATE(port, DISTRIBUTING) || ACTOR_STATE(port, COLLECTING)) &&
+ !PARTNER_STATE(port, SYNCHRONIZATION)) {
+ /* If in COLLECTING or DISTRIBUTING state and partner becomes out of
+ * sync transit to ATACHED state. */
+ ACTOR_STATE_CLR(port, DISTRIBUTING);
+ ACTOR_STATE_CLR(port, COLLECTING);
+ /* Clear actor sync to activate transit ATACHED in condition bellow */
+ ACTOR_STATE_CLR(port, SYNCHRONIZATION);
+ MODE4_DEBUG("Out of sync -> ATTACHED\n");
+ }
+
+ if (!ACTOR_STATE(port, SYNCHRONIZATION)) {
+ /* attach mux to aggregator */
+ RTE_VERIFY((port->actor_state & (STATE_COLLECTING |
+ STATE_DISTRIBUTING)) == 0);
+
+ ACTOR_STATE_SET(port, SYNCHRONIZATION);
+ SM_FLAG_SET(port, NTT);
+ MODE4_DEBUG("ATTACHED Entered\n");
+ } else if (!ACTOR_STATE(port, COLLECTING)) {
+ /* Start collecting if in sync */
+ if (PARTNER_STATE(port, SYNCHRONIZATION)) {
+ MODE4_DEBUG("ATTACHED -> COLLECTING\n");
+ ACTOR_STATE_SET(port, COLLECTING);
+ SM_FLAG_SET(port, NTT);
+ }
+ } else if (ACTOR_STATE(port, COLLECTING)) {
+ /* Check if partner is in COLLECTING state. If so this port can
+ * distribute frames to it */
+ if (!ACTOR_STATE(port, DISTRIBUTING)) {
+ if (PARTNER_STATE(port, COLLECTING)) {
+ /* Enable DISTRIBUTING if partner is collecting */
+ ACTOR_STATE_SET(port, DISTRIBUTING);
+ SM_FLAG_SET(port, NTT);
+ MODE4_DEBUG("COLLECTING -> DISTRIBUTING\n");
+ RTE_LOG(INFO, PMD,
+ "Bond %u: slave id %u distributing started.\n",
+ internals->port_id, slave_id);
+ }
+ } else {
+ if (!PARTNER_STATE(port, COLLECTING)) {
+ /* Disable DISTRIBUTING (enter COLLECTING state) if partner
+ * is not collecting */
+ ACTOR_STATE_CLR(port, DISTRIBUTING);
+ SM_FLAG_SET(port, NTT);
+ MODE4_DEBUG("DISTRIBUTING -> COLLECTING\n");
+ RTE_LOG(INFO, PMD,
+ "Bond %u: slave id %u distributing stopped.\n",
+ internals->port_id, slave_id);
+ }
+ }
+ }
+}
+
+/**
+ * Function handles transmit state machine.
+ *
+ * Function implements Transmit Machine from point 5.4.16 in 802.1AX
+ * documentation.
+ *
+ * @param port
+ */
+static void
+tx_machine(struct bond_dev_private *internals, uint8_t slave_id)
+{
+ struct port *agg, *port = &mode_8023ad_ports[slave_id];
+
+ struct rte_mbuf *lacp_pkt = NULL;
+ struct lacpdu_header *hdr;
+ struct lacpdu *lacpdu;
+
+ /* If periodic timer is not running periodic machine is in NO PERIODIC and
+ * according to 802.3ax standard tx machine should not transmit any frames
+ * and set ntt to false. */
+ if (timer_is_stopped(&port->periodic_timer))
+ SM_FLAG_CLR(port, NTT);
+
+ if (!SM_FLAG(port, NTT))
+ return;
+
+ if (!timer_is_expired(&port->tx_machine_timer))
+ return;
+
+ lacp_pkt = rte_pktmbuf_alloc(port->mbuf_pool);
+ if (lacp_pkt == NULL) {
+ RTE_LOG(ERR, PMD, "Failed to allocate LACP packet from pool\n");
+ return;
+ }
+
+ lacp_pkt->data_len = sizeof(*hdr);
+ lacp_pkt->pkt_len = sizeof(*hdr);
+
+ hdr = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
+
+ /* Source and destination MAC */
+ ether_addr_copy(&lacp_mac_addr, &hdr->eth_hdr.d_addr);
+ rte_eth_macaddr_get(slave_id, &hdr->eth_hdr.s_addr);
+ hdr->eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_SLOW);
+
+ lacpdu = &hdr->lacpdu;
+ memset(lacpdu, 0, sizeof(*lacpdu));
+
+ /* Initialize LACP part */
+ lacpdu->subtype = SLOW_SUBTYPE_LACP;
+ lacpdu->version_number = 1;
+
+ /* ACTOR */
+ lacpdu->actor.tlv_type_info = TLV_TYPE_ACTOR_INFORMATION;
+ lacpdu->actor.info_length = sizeof(struct lacpdu_actor_partner_params);
+ memcpy(&hdr->lacpdu.actor.port_params, &port->actor,
+ sizeof(port->actor));
+ agg = &mode_8023ad_ports[port->aggregator_port_id];
+ ether_addr_copy(&agg->actor.system, &hdr->lacpdu.actor.port_params.system);
+ lacpdu->actor.state = port->actor_state;
+
+ /* PARTNER */
+ lacpdu->partner.tlv_type_info = TLV_TYPE_PARTNER_INFORMATION;
+ lacpdu->partner.info_length = sizeof(struct lacpdu_actor_partner_params);
+ memcpy(&lacpdu->partner.port_params, &port->partner,
+ sizeof(struct port_params));
+ lacpdu->partner.state = port->partner_state;
+
+ /* Other fields */
+ lacpdu->tlv_type_collector_info = TLV_TYPE_COLLECTOR_INFORMATION;
+ lacpdu->collector_info_length = 0x10;
+ lacpdu->collector_max_delay = 0;
+
+ lacpdu->tlv_type_terminator = TLV_TYPE_TERMINATOR_INFORMATION;
+ lacpdu->terminator_length = 0;
+
+ if (rte_ring_enqueue(port->tx_ring, lacp_pkt) == -ENOBUFS) {
+ /* If TX ring full, drop packet and free message. Retransmission
+ * will happen in next function call. */
+ rte_pktmbuf_free(lacp_pkt);
+ set_warning_flags(port, WRN_TX_QUEUE_FULL);
+ return;
+ }
+
+ MODE4_DEBUG("sending LACP frame\n");
+ BOND_PRINT_LACP(lacpdu);
+
+ timer_set(&port->tx_machine_timer, internals->mode4.tx_period_timeout);
+ SM_FLAG_CLR(port, NTT);
+}
+
+/**
+ * Function assigns port to aggregator.
+ *
+ * @param bond_dev_private Pointer to bond_dev_private structure.
+ * @param port_pos Port to assign.
+ */
+static void
+selection_logic(struct bond_dev_private *internals, uint8_t slave_id)
+{
+ struct port *agg, *port;
+ uint8_t slaves_count, new_agg_id, i;
+ uint8_t *slaves;
+
+ slaves = internals->active_slaves;
+ slaves_count = internals->active_slave_count;
+ port = &mode_8023ad_ports[slave_id];
+
+ /* Search for aggregator suitable for this port */
+ for (i = 0; i < slaves_count; ++i) {
+ agg = &mode_8023ad_ports[slaves[i]];
+ /* Skip ports that are not aggreagators */
+ if (agg->aggregator_port_id != slaves[i])
+ continue;
+
+ /* Actors system ID is not checked since all slave device have the same
+ * ID (MAC address). */
+ if ((agg->actor.key == port->actor.key &&
+ agg->partner.system_priority == port->partner.system_priority &&
+ is_same_ether_addr(&agg->partner.system, &port->partner.system) == 1
+ && (agg->partner.key == port->partner.key)) &&
+ is_zero_ether_addr(&port->partner.system) != 1 &&
+ (agg->actor.key &
+ rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) != 0) {
+
+ break;
+ }
+ }
+
+ /* By default, port uses it self as agregator */
+ if (i == slaves_count)
+ new_agg_id = slave_id;
+ else
+ new_agg_id = slaves[i];
+
+ if (new_agg_id != port->aggregator_port_id) {
+ port->aggregator_port_id = new_agg_id;
+
+ MODE4_DEBUG("-> SELECTED: ID=%3u\n"
+ "\t%s aggregator ID=%3u\n",
+ port->aggregator_port_id,
+ port->aggregator_port_id == slave_id ?
+ "aggregator not found, using default" : "aggregator found",
+ port->aggregator_port_id);
+ }
+
+ port->selected = SELECTED;
+}
+
+/* Function maps DPDK speed to bonding speed stored in key field */
+static uint16_t
+link_speed_key(uint16_t speed) {
+ uint16_t key_speed;
+
+ switch (speed) {
+ case ETH_LINK_SPEED_AUTONEG:
+ key_speed = 0x00;
+ break;
+ case ETH_LINK_SPEED_10:
+ key_speed = BOND_LINK_SPEED_KEY_10M;
+ break;
+ case ETH_LINK_SPEED_100:
+ key_speed = BOND_LINK_SPEED_KEY_100M;
+ break;
+ case ETH_LINK_SPEED_1000:
+ key_speed = BOND_LINK_SPEED_KEY_1000M;
+ break;
+ case ETH_LINK_SPEED_10G:
+ key_speed = BOND_LINK_SPEED_KEY_10G;
+ break;
+ case ETH_LINK_SPEED_20G:
+ key_speed = BOND_LINK_SPEED_KEY_20G;
+ break;
+ case ETH_LINK_SPEED_40G:
+ key_speed = BOND_LINK_SPEED_KEY_40G;
+ break;
+ default:
+ /* Unknown speed*/
+ key_speed = 0xFFFF;
+ }
+
+ return key_speed;
+}
+
+static void
+bond_mode_8023ad_periodic_cb(void *arg)
+{
+ struct rte_eth_dev *bond_dev = arg;
+ struct bond_dev_private *internals = bond_dev->data->dev_private;
+ struct port *port;
+ struct rte_eth_link link_info;
+ struct ether_addr slave_addr;
+
+ void *pkt = NULL;
+ uint16_t i, slave_id;
+
+
+ /* Update link status on each port */
+ for (i = 0; i < internals->active_slave_count; i++) {
+ uint16_t key;
+
+ slave_id = internals->active_slaves[i];
+ rte_eth_link_get(slave_id, &link_info);
+ rte_eth_macaddr_get(slave_id, &slave_addr);
+
+ if (link_info.link_status != 0) {
+ key = link_speed_key(link_info.link_speed) << 1;
+ if (link_info.link_duplex == ETH_LINK_FULL_DUPLEX)
+ key |= BOND_LINK_FULL_DUPLEX_KEY;
+ } else
+ key = 0;
+
+ port = &mode_8023ad_ports[slave_id];
+
+ key = rte_cpu_to_be_16(key);
+ if (key != port->actor.key) {
+ if (!(key & rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)))
+ set_warning_flags(port, WRN_NOT_LACP_CAPABLE);
+
+ port->actor.key = key;
+ SM_FLAG_SET(port, NTT);
+ }
+
+ if (!is_same_ether_addr(&port->actor.system, &slave_addr)) {
+ ether_addr_copy(&slave_addr, &port->actor.system);
+ if (port->aggregator_port_id == slave_id)
+ SM_FLAG_SET(port, NTT);
+ }
+ }
+
+ for (i = 0; i < internals->active_slave_count; i++) {
+ slave_id = internals->active_slaves[i];
+ port = &mode_8023ad_ports[slave_id];
+
+ if ((port->actor.key &
+ rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) == 0) {
+
+ SM_FLAG_SET(port, BEGIN);
+
+ /* LACP is disabled on half duples or link is down */
+ if (SM_FLAG(port, LACP_ENABLED)) {
+ /* If port was enabled set it to BEGIN state */
+ SM_FLAG_CLR(port, LACP_ENABLED);
+ ACTOR_STATE_CLR(port, DISTRIBUTING);
+ ACTOR_STATE_CLR(port, COLLECTING);
+ }
+
+ /* Skip this port processing */
+ continue;
+ }
+
+ SM_FLAG_SET(port, LACP_ENABLED);
+
+ /* Find LACP packet to this port. Do not check subtype, it is done in
+ * function that queued packet */
+ if (rte_ring_dequeue(port->rx_ring, &pkt) == 0) {
+ struct rte_mbuf *lacp_pkt = pkt;
+ struct lacpdu_header *lacp;
+
+ lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
+ RTE_VERIFY(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
+
+ /* This is LACP frame so pass it to rx_machine */
+ rx_machine(internals, slave_id, &lacp->lacpdu);
+ rte_pktmbuf_free(lacp_pkt);
+ } else
+ rx_machine(internals, slave_id, NULL);
+
+ periodic_machine(internals, slave_id);
+ mux_machine(internals, slave_id);
+ tx_machine(internals, slave_id);
+ selection_logic(internals, slave_id);
+
+ SM_FLAG_CLR(port, BEGIN);
+ show_warnings(slave_id);
+ }
+
+ rte_eal_alarm_set(internals->mode4.update_timeout_us,
+ bond_mode_8023ad_periodic_cb, arg);
+}
+
+void
+bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id)
+{
+ struct bond_dev_private *internals = bond_dev->data->dev_private;
+
+ struct port *port = &mode_8023ad_ports[slave_id];
+ struct port_params initial = {
+ .system = { { 0 } },
+ .system_priority = rte_cpu_to_be_16(0xFFFF),
+ .key = rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY),
+ .port_priority = rte_cpu_to_be_16(0x00FF),
+ .port_number = 0,
+ };
+
+ char mem_name[RTE_ETH_NAME_MAX_LEN];
+ uint8_t socket_id;
+ unsigned element_size;
+
+ /* Given slave mus not be in active list */
+ RTE_VERIFY(find_slave_by_id(internals->active_slaves,
+ internals->active_slave_count, slave_id) == internals->active_slave_count);
+
+ memcpy(&port->actor, &initial, sizeof(struct port_params));
+ /* Standard requires that port ID must be grater than 0.
+ * Add 1 do get corresponding port_number */
+ port->actor.port_number = rte_cpu_to_be_16((uint16_t)slave_id + 1);
+
+ memcpy(&port->partner, &initial, sizeof(struct port_params));
+
+ /* default states */
+ port->actor_state = STATE_AGGREGATION | STATE_LACP_ACTIVE | STATE_DEFAULTED;
+ port->partner_state = STATE_LACP_ACTIVE;
+ port->sm_flags = SM_FLAGS_BEGIN;
+
+ /* use this port as agregator */
+ port->aggregator_port_id = slave_id;
+ rte_eth_promiscuous_enable(slave_id);
+
+ timer_cancel(&port->warning_timer);
+
+ if (port->mbuf_pool != NULL)
+ return;
+
+ RTE_VERIFY(port->rx_ring == NULL);
+ RTE_VERIFY(port->tx_ring == NULL);
+ socket_id = rte_eth_devices[slave_id].pci_dev->numa_node;
+
+ element_size = sizeof(struct slow_protocol_frame) + sizeof(struct rte_mbuf)
+ + RTE_PKTMBUF_HEADROOM;
+
+ /* How big memory pool should be? If driver will not
+ * free packets quick enough there will be ENOMEM in tx_machine.
+ * For now give 511 pkts * max number of queued TX packets per slave.
+ * Hope it will be enough. */
+ snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_pool", slave_id);
+ port->mbuf_pool = rte_mempool_create(mem_name,
+ BOND_MODE_8023AX_SLAVE_TX_PKTS * 512 - 1,
+ element_size,
+ RTE_MEMPOOL_CACHE_MAX_SIZE >= 32 ? 32 : RTE_MEMPOOL_CACHE_MAX_SIZE,
+ sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init,
+ NULL, rte_pktmbuf_init, NULL, socket_id, MEMPOOL_F_NO_SPREAD);
+
+ /* Any memory allocation failure in initalization is critical because
+ * resources can't be free, so reinitialization is impossible. */
+ if (port->mbuf_pool == NULL) {
+ rte_panic("Slave %u: Failed to create memory pool '%s': %s\n",
+ slave_id, mem_name, rte_strerror(rte_errno));
+ }
+
+ snprintf(mem_name, RTE_DIM(mem_name), "slave_%u_rx", slave_id);
+ port->rx_ring = rte_ring_create(mem_name,
+ rte_align32pow2(BOND_MODE_8023AX_SLAVE_RX_PKTS), socket_id, 0);
+
+ if (port->rx_ring == NULL) {
+ rte_panic("Slave %u: Failed to create rx ring '%s': %s\n", slave_id,
+ mem_name, rte_strerror(rte_errno));
+ }
+
+ /* TX ring is at least one pkt longer to make room for marker packet. */
+ snprintf(mem_name, RTE_DIM(mem_name), "slave_%u_tx", slave_id);
+ port->tx_ring = rte_ring_create(mem_name,
+ rte_align32pow2(BOND_MODE_8023AX_SLAVE_TX_PKTS + 1), socket_id, 0);
+
+ if (port->tx_ring == NULL) {
+ rte_panic("Slave %u: Failed to create tx ring '%s': %s\n", slave_id,
+ mem_name, rte_strerror(rte_errno));
+ }
+}
+
+int
+bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *bond_dev,
+ uint8_t slave_id)
+{
+ struct bond_dev_private *internals = bond_dev->data->dev_private;
+ void *pkt = NULL;
+ struct port *port;
+ uint8_t i;
+
+ /* Given slave mus be in active list */
+ RTE_VERIFY(find_slave_by_id(internals->active_slaves,
+ internals->active_slave_count, slave_id) < internals->active_slave_count);
+
+ /* Exclude slave from transmit policy. If this slave is an aggregator
+ * make all aggregated slaves unselected to force sellection logic
+ * to select suitable aggregator for this port. */
+ for (i = 0; i < internals->active_slave_count; i++) {
+ port = &mode_8023ad_ports[internals->active_slaves[i]];
+ if (port->aggregator_port_id != slave_id)
+ continue;
+
+ port->selected = UNSELECTED;
+
+ /* Use default aggregator */
+ port->aggregator_port_id = internals->active_slaves[i];
+ }
+
+ port = &mode_8023ad_ports[slave_id];
+ port->selected = UNSELECTED;
+ port->actor_state &= ~(STATE_SYNCHRONIZATION | STATE_DISTRIBUTING |
+ STATE_COLLECTING);
+
+ while (rte_ring_dequeue(port->rx_ring, &pkt) == 0)
+ rte_pktmbuf_free((struct rte_mbuf *)pkt);
+
+ while (rte_ring_dequeue(port->tx_ring, &pkt) == 0)
+ rte_pktmbuf_free((struct rte_mbuf *)pkt);
+ return 0;
+}
+
+void
+bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev)
+{
+ struct bond_dev_private *internals = bond_dev->data->dev_private;
+ struct ether_addr slave_addr;
+ struct port *slave, *agg_slave;
+ uint8_t slave_id, i, j;
+
+ bond_mode_8023ad_stop(bond_dev);
+
+ for (i = 0; i < internals->active_slave_count; i++) {
+ slave_id = internals->active_slaves[i];
+ slave = &mode_8023ad_ports[slave_id];
+ rte_eth_macaddr_get(slave_id, &slave_addr);
+
+ if (is_same_ether_addr(&slave_addr, &slave->actor.system))
+ continue;
+
+ ether_addr_copy(&slave_addr, &slave->actor.system);
+ /* Do nothing if this port is not an aggregator. In other case
+ * Set NTT flag on every port that use this aggregator. */
+ if (slave->aggregator_port_id != slave_id)
+ continue;
+
+ for (j = 0; j < internals->active_slave_count; j++) {
+ agg_slave = &mode_8023ad_ports[internals->active_slaves[j]];
+ if (agg_slave->aggregator_port_id == slave_id)
+ SM_FLAG_SET(agg_slave, NTT);
+ }
+ }
+
+ if (bond_dev->data->dev_started)
+ bond_mode_8023ad_start(bond_dev);
+}
+
+void
+bond_mode_8023ad_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_bond_8023ad_conf *conf)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+ struct mode8023ad_private *mode4 = &internals->mode4;
+ uint64_t ms_ticks = rte_get_tsc_hz() / 1000;
+
+ conf->fast_periodic_ms = mode4->fast_periodic_timeout / ms_ticks;
+ conf->slow_periodic_ms = mode4->slow_periodic_timeout / ms_ticks;
+ conf->short_timeout_ms = mode4->short_timeout / ms_ticks;
+ conf->long_timeout_ms = mode4->long_timeout / ms_ticks;
+ conf->aggregate_wait_timeout_ms = mode4->aggregate_wait_timeout / ms_ticks;
+ conf->tx_period_ms = mode4->tx_period_timeout / ms_ticks;
+ conf->update_timeout_ms = mode4->update_timeout_us / 1000;
+}
+
+void
+bond_mode_8023ad_setup(struct rte_eth_dev *dev,
+ struct rte_eth_bond_8023ad_conf *conf)
+{
+ struct rte_eth_bond_8023ad_conf def_conf;
+ struct bond_dev_private *internals = dev->data->dev_private;
+ struct mode8023ad_private *mode4 = &internals->mode4;
+ uint64_t ms_ticks = rte_get_tsc_hz() / 1000;
+
+ if (conf == NULL) {
+ conf = &def_conf;
+ conf->fast_periodic_ms = BOND_8023AD_FAST_PERIODIC_MS;
+ conf->slow_periodic_ms = BOND_8023AD_SLOW_PERIODIC_MS;
+ conf->short_timeout_ms = BOND_8023AD_SHORT_TIMEOUT_MS;
+ conf->long_timeout_ms = BOND_8023AD_LONG_TIMEOUT_MS;
+ conf->aggregate_wait_timeout_ms = BOND_8023AD_AGGREGATE_WAIT_TIMEOUT_MS;
+ conf->tx_period_ms = BOND_8023AD_TX_MACHINE_PERIOD_MS;
+ conf->rx_marker_period_ms = BOND_8023AD_RX_MARKER_PERIOD_MS;
+ conf->update_timeout_ms = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS;
+ }
+
+ mode4->fast_periodic_timeout = conf->fast_periodic_ms * ms_ticks;
+ mode4->slow_periodic_timeout = conf->slow_periodic_ms * ms_ticks;
+ mode4->short_timeout = conf->short_timeout_ms * ms_ticks;
+ mode4->long_timeout = conf->long_timeout_ms * ms_ticks;
+ mode4->aggregate_wait_timeout = conf->aggregate_wait_timeout_ms * ms_ticks;
+ mode4->tx_period_timeout = conf->tx_period_ms * ms_ticks;
+ mode4->rx_marker_timeout = conf->rx_marker_period_ms * ms_ticks;
+ mode4->update_timeout_us = conf->update_timeout_ms * 1000;
+}
+
+int
+bond_mode_8023ad_enable(struct rte_eth_dev *bond_dev)
+{
+ struct bond_dev_private *internals = bond_dev->data->dev_private;
+ uint16_t i;
+
+ for (i = 0; i < internals->active_slave_count; i++)
+ bond_mode_8023ad_activate_slave(bond_dev, i);
+
+ return 0;
+}
+
+int
+bond_mode_8023ad_start(struct rte_eth_dev *bond_dev)
+{
+ return rte_eal_alarm_set(BOND_MODE_8023AX_UPDATE_TIMEOUT_MS * 1000,
+ &bond_mode_8023ad_periodic_cb, bond_dev);
+}
+
+void
+bond_mode_8023ad_stop(struct rte_eth_dev *bond_dev)
+{
+ rte_eal_alarm_cancel(&bond_mode_8023ad_periodic_cb, bond_dev);
+}
+
+void
+bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals,
+ uint8_t slave_id, struct rte_mbuf *pkt)
+{
+ struct mode8023ad_private *mode4 = &internals->mode4;
+ struct port *port = &mode_8023ad_ports[slave_id];
+ struct marker_header *m_hdr;
+ uint64_t marker_timer, old_marker_timer;
+ int retval;
+ uint8_t wrn, subtype;
+ /* If packet is a marker, we send response now by reusing given packet
+ * and update only source MAC, destination MAC is multicast so don't
+ * update it. Other frames will be handled later by state machines */
+ subtype = rte_pktmbuf_mtod(pkt,
+ struct slow_protocol_frame *)->slow_protocol.subtype;
+
+ if (subtype == SLOW_SUBTYPE_MARKER) {
+ m_hdr = rte_pktmbuf_mtod(pkt, struct marker_header *);
+
+ if (likely(m_hdr->marker.tlv_type_marker != MARKER_TLV_TYPE_INFO)) {
+ wrn = WRN_UNKNOWN_MARKER_TYPE;
+ goto free_out;
+ }
+
+ /* Setup marker timer. Do it in loop in case concurent access. */
+ do {
+ old_marker_timer = port->rx_marker_timer;
+ if (!timer_is_expired(&old_marker_timer)) {
+ wrn = WRN_RX_MARKER_TO_FAST;
+ goto free_out;
+ }
+
+ timer_set(&marker_timer, mode4->rx_marker_timeout);
+ retval = rte_atomic64_cmpset(&port->rx_marker_timer,
+ old_marker_timer, marker_timer);
+ } while (unlikely(retval == 0));
+
+ m_hdr->marker.tlv_type_marker = MARKER_TLV_TYPE_RESP;
+ rte_eth_macaddr_get(slave_id, &m_hdr->eth_hdr.s_addr);
+
+ if (unlikely(rte_ring_enqueue(port->tx_ring, pkt) == -ENOBUFS)) {
+ /* reset timer */
+ port->rx_marker_timer = 0;
+ wrn = WRN_TX_QUEUE_FULL;
+ goto free_out;
+ }
+ } else if (likely(subtype == SLOW_SUBTYPE_LACP)) {
+ if (unlikely(rte_ring_enqueue(port->rx_ring, pkt) == -ENOBUFS)) {
+ /* If RX fing full free lacpdu message and drop packet */
+ wrn = WRN_RX_QUEUE_FULL;
+ goto free_out;
+ }
+ } else {
+ wrn = WRN_UNKNOWN_SLOW_TYPE;
+ goto free_out;
+ }
+
+ return;
+
+free_out:
+ set_warning_flags(port, wrn);
+ rte_pktmbuf_free(pkt);
+}
+
+int
+rte_eth_bond_8023ad_conf_get(uint8_t port_id,
+ struct rte_eth_bond_8023ad_conf *conf)
+{
+ struct rte_eth_dev *bond_dev;
+
+ if (valid_bonded_port_id(port_id) != 0)
+ return -EINVAL;
+
+ if (conf == NULL)
+ return -EINVAL;
+
+ bond_dev = &rte_eth_devices[port_id];
+ bond_mode_8023ad_conf_get(bond_dev, conf);
+ return 0;
+}
+
+int
+rte_eth_bond_8023ad_setup(uint8_t port_id,
+ struct rte_eth_bond_8023ad_conf *conf)
+{
+ struct rte_eth_dev *bond_dev;
+
+ if (valid_bonded_port_id(port_id) != 0)
+ return -EINVAL;
+
+ if (conf != NULL) {
+ /* Basic sanity check */
+ if (conf->slow_periodic_ms == 0 ||
+ conf->fast_periodic_ms >= conf->slow_periodic_ms ||
+ conf->long_timeout_ms == 0 ||
+ conf->short_timeout_ms >= conf->long_timeout_ms ||
+ conf->aggregate_wait_timeout_ms == 0 ||
+ conf->tx_period_ms == 0 ||
+ conf->rx_marker_period_ms == 0 ||
+ conf->update_timeout_ms == 0) {
+ RTE_LOG(ERR, PMD, "given mode 4 configuration is invalid\n");
+ return -EINVAL;
+ }
+ }
+
+ bond_dev = &rte_eth_devices[port_id];
+ bond_mode_8023ad_setup(bond_dev, conf);
+
+ return 0;
+}
+
+int
+rte_eth_bond_8023ad_slave_info(uint8_t port_id, uint8_t slave_id,
+ struct rte_eth_bond_8023ad_slave_info *info)
+{
+ struct rte_eth_dev *bond_dev;
+ struct bond_dev_private *internals;
+ struct port *port;
+
+ if (info == NULL || valid_bonded_port_id(port_id) != 0 ||
+ rte_eth_bond_mode_get(port_id) != BONDING_MODE_8023AD)
+ return -EINVAL;
+
+ bond_dev = &rte_eth_devices[port_id];
+
+ internals = bond_dev->data->dev_private;
+ if (find_slave_by_id(internals->active_slaves,
+ internals->active_slave_count, slave_id) ==
+ internals->active_slave_count)
+ return -EINVAL;
+
+ port = &mode_8023ad_ports[slave_id];
+ info->selected = port->selected;
+
+ info->actor_state = port->actor_state;
+ rte_memcpy(&info->actor, &port->actor, sizeof(port->actor));
+
+ info->partner_state = port->partner_state;
+ rte_memcpy(&info->partner, &port->partner, sizeof(port->partner));
+
+ info->agg_port_id = port->aggregator_port_id;
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_8023ad.h b/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_8023ad.h
new file mode 100755
index 00000000..9adc6aa1
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_8023ad.h
@@ -0,0 +1,214 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RTE_ETH_BOND_8023AD_H_
+#define RTE_ETH_BOND_8023AD_H_
+
+#include <rte_ether.h>
+
+/**
+ * Actor/partner states
+ */
+#define STATE_LACP_ACTIVE 0x01
+#define STATE_LACP_SHORT_TIMEOUT 0x02
+#define STATE_AGGREGATION 0x04
+#define STATE_SYNCHRONIZATION 0x08
+#define STATE_COLLECTING 0x10
+#define STATE_DISTRIBUTING 0x20
+/** Partners parameters are defaulted */
+#define STATE_DEFAULTED 0x40
+#define STATE_EXPIRED 0x80
+
+#define TLV_TYPE_ACTOR_INFORMATION 0x01
+#define TLV_TYPE_PARTNER_INFORMATION 0x02
+#define TLV_TYPE_COLLECTOR_INFORMATION 0x03
+#define TLV_TYPE_TERMINATOR_INFORMATION 0x00
+
+#define SLOW_SUBTYPE_LACP 0x01
+#define SLOW_SUBTYPE_MARKER 0x02
+
+#define MARKER_TLV_TYPE_INFO 0x01
+#define MARKER_TLV_TYPE_RESP 0x02
+
+enum rte_bond_8023ad_selection {
+ UNSELECTED,
+ STANDBY,
+ SELECTED
+};
+
+/** Generic slow protocol structure */
+struct slow_protocol {
+ uint8_t subtype;
+ uint8_t reserved_119[119];
+} __attribute__((__packed__));
+
+/** Generic slow protocol frame type structure */
+struct slow_protocol_frame {
+ struct ether_hdr eth_hdr;
+ struct slow_protocol slow_protocol;
+} __attribute__((__packed__));
+
+struct port_params {
+ uint16_t system_priority;
+ /**< System priority (unused in current implementation) */
+ struct ether_addr system;
+ /**< System ID - Slave MAC address, same as bonding MAC address */
+ uint16_t key;
+ /**< Speed information (implementation dependednt) and duplex. */
+ uint16_t port_priority;
+ /**< Priority of this (unused in current implementation) */
+ uint16_t port_number;
+ /**< Port number. It corresponds to slave port id. */
+} __attribute__((__packed__));
+
+struct lacpdu_actor_partner_params {
+ uint8_t tlv_type_info;
+ uint8_t info_length;
+ struct port_params port_params;
+ uint8_t state;
+ uint8_t reserved_3[3];
+} __attribute__((__packed__));
+
+/** LACPDU structure (5.4.2 in 802.1AX documentation). */
+struct lacpdu {
+ uint8_t subtype;
+ uint8_t version_number;
+
+ struct lacpdu_actor_partner_params actor;
+ struct lacpdu_actor_partner_params partner;
+
+ uint8_t tlv_type_collector_info;
+ uint8_t collector_info_length;
+ uint16_t collector_max_delay;
+ uint8_t reserved_12[12];
+
+ uint8_t tlv_type_terminator;
+ uint8_t terminator_length;
+ uint8_t reserved_50[50];
+} __attribute__((__packed__));
+
+/** LACPDU frame: Contains ethernet header and LACPDU. */
+struct lacpdu_header {
+ struct ether_hdr eth_hdr;
+ struct lacpdu lacpdu;
+} __attribute__((__packed__));
+
+struct marker {
+ uint8_t subtype;
+ uint8_t version_number;
+
+ uint8_t tlv_type_marker;
+ uint8_t info_length;
+ uint16_t requester_port;
+ struct ether_addr requester_system;
+ uint32_t requester_transaction_id;
+ uint8_t reserved_2[2];
+
+ uint8_t tlv_type_terminator;
+ uint8_t terminator_length;
+ uint8_t reserved_90[90];
+} __attribute__((__packed__));
+
+struct marker_header {
+ struct ether_hdr eth_hdr;
+ struct marker marker;
+} __attribute__((__packed__));
+
+struct rte_eth_bond_8023ad_conf {
+ uint32_t fast_periodic_ms;
+ uint32_t slow_periodic_ms;
+ uint32_t short_timeout_ms;
+ uint32_t long_timeout_ms;
+ uint32_t aggregate_wait_timeout_ms;
+ uint32_t tx_period_ms;
+ uint32_t rx_marker_period_ms;
+ uint32_t update_timeout_ms;
+};
+
+struct rte_eth_bond_8023ad_slave_info {
+ enum rte_bond_8023ad_selection selected;
+ uint8_t actor_state;
+ struct port_params actor;
+ uint8_t partner_state;
+ struct port_params partner;
+ uint8_t agg_port_id;
+};
+
+/**
+ * @internal
+ *
+ * Function returns current configuration of 802.3AX mode.
+ *
+ * @param port_id Bonding device id
+ * @param conf Pointer to timeout structure.
+ *
+ * @return
+ * 0 - if ok
+ * -EINVAL if conf is NULL
+ */
+int
+rte_eth_bond_8023ad_conf_get(uint8_t port_id,
+ struct rte_eth_bond_8023ad_conf *conf);
+
+/**
+ * @internal
+ *
+ * Function set new configuration of 802.3AX mode.
+ *
+ * @param port_id Bonding device id
+ * @param conf Configuration, if NULL set default configuration.
+ * @return
+ * 0 - if ok
+ * -EINVAL if configuration is invalid.
+ */
+int
+rte_eth_bond_8023ad_setup(uint8_t port_id,
+ struct rte_eth_bond_8023ad_conf *conf);
+
+/**
+ * @internal
+ *
+ * Function returns current state of given slave device.
+ *
+ * @param slave_id Port id of valid slave.
+ * @param conf buffer for configuration
+ * @return
+ * 0 - if ok
+ * -EINVAL if conf is NULL or slave id is invalid (not a slave of given
+ * bonded device or is not inactive).
+ */
+int
+rte_eth_bond_8023ad_slave_info(uint8_t port_id, uint8_t slave_id,
+ struct rte_eth_bond_8023ad_slave_info *conf);
+
+#endif /* RTE_ETH_BOND_8023AD_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_8023ad_private.h b/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_8023ad_private.h
new file mode 100755
index 00000000..8adee70b
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_8023ad_private.h
@@ -0,0 +1,308 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RTE_ETH_BOND_8023AD_PRIVATE_H_
+#define RTE_ETH_BOND_8023AD_PRIVATE_H_
+
+#include <stdint.h>
+
+#include <rte_ether.h>
+#include <rte_byteorder.h>
+#include <rte_atomic.h>
+
+#include "rte_eth_bond_8023ad.h"
+
+#define BOND_MODE_8023AX_UPDATE_TIMEOUT_MS 100
+/** Maximum number of packets to one slave queued in TX ring. */
+#define BOND_MODE_8023AX_SLAVE_RX_PKTS 3
+/** Maximum number of LACP packets from one slave queued in TX ring. */
+#define BOND_MODE_8023AX_SLAVE_TX_PKTS 1
+/**
+ * Timeouts deffinitions (5.4.4 in 802.1AX documentation).
+ */
+#define BOND_8023AD_FAST_PERIODIC_MS 900
+#define BOND_8023AD_SLOW_PERIODIC_MS 29000
+#define BOND_8023AD_SHORT_TIMEOUT_MS 3000
+#define BOND_8023AD_LONG_TIMEOUT_MS 90000
+#define BOND_8023AD_CHURN_DETECTION_TIMEOUT_MS 60000
+#define BOND_8023AD_AGGREGATE_WAIT_TIMEOUT_MS 2000
+#define BOND_8023AD_TX_MACHINE_PERIOD_MS 500
+#define BOND_8023AD_RX_MARKER_PERIOD_MS 2000
+
+/**
+ * Interval of showing warning message from state machines. All messages will
+ * be held (and gathered together) to prevent flooding.
+ * This is no parto of 802.1AX standard.
+ */
+#define BOND_8023AD_WARNINGS_PERIOD_MS 1000
+
+
+
+/**
+ * State machine flags
+ */
+#define SM_FLAGS_BEGIN 0x0001
+#define SM_FLAGS_LACP_ENABLED 0x0002
+#define SM_FLAGS_ACTOR_CHURN 0x0004
+#define SM_FLAGS_PARTNER_CHURN 0x0008
+#define SM_FLAGS_MOVED 0x0100
+#define SM_FLAGS_PARTNER_SHORT_TIMEOUT 0x0200
+#define SM_FLAGS_NTT 0x0400
+
+#define BOND_LINK_FULL_DUPLEX_KEY 0x01
+#define BOND_LINK_SPEED_KEY_10M 0x02
+#define BOND_LINK_SPEED_KEY_100M 0x04
+#define BOND_LINK_SPEED_KEY_1000M 0x08
+#define BOND_LINK_SPEED_KEY_10G 0x10
+#define BOND_LINK_SPEED_KEY_20G 0x11
+#define BOND_LINK_SPEED_KEY_40G 0x12
+
+#define WRN_RX_MARKER_TO_FAST 0x01
+#define WRN_UNKNOWN_SLOW_TYPE 0x02
+#define WRN_UNKNOWN_MARKER_TYPE 0x04
+#define WRN_NOT_LACP_CAPABLE 0x08
+#define WRN_RX_QUEUE_FULL 0x10
+#define WRN_TX_QUEUE_FULL 0x20
+
+#define CHECK_FLAGS(_variable, _f) ((_variable) & (_f))
+#define SET_FLAGS(_variable, _f) ((_variable) |= (_f))
+#define CLEAR_FLAGS(_variable, _f) ((_variable) &= ~(_f))
+
+#define SM_FLAG(_p, _f) (!!CHECK_FLAGS((_p)->sm_flags, SM_FLAGS_ ## _f))
+#define SM_FLAG_SET(_p, _f) SET_FLAGS((_p)->sm_flags, SM_FLAGS_ ## _f)
+#define SM_FLAG_CLR(_p, _f) CLEAR_FLAGS((_p)->sm_flags, SM_FLAGS_ ## _f)
+
+#define ACTOR_STATE(_p, _f) (!!CHECK_FLAGS((_p)->actor_state, STATE_ ## _f))
+#define ACTOR_STATE_SET(_p, _f) SET_FLAGS((_p)->actor_state, STATE_ ## _f)
+#define ACTOR_STATE_CLR(_p, _f) CLEAR_FLAGS((_p)->actor_state, STATE_ ## _f)
+
+#define PARTNER_STATE(_p, _f) (!!CHECK_FLAGS((_p)->partner_state, STATE_ ## _f))
+#define PARTNER_STATE_SET(_p, _f) SET_FLAGS((_p)->partner_state, STATE_ ## _f)
+#define PARTNER_STATE_CLR(_p, _f) CLEAR_FLAGS((_p)->partner_state, STATE_ ## _f)
+
+/** Variables associated with each port (5.4.7 in 802.1AX documentation). */
+struct port {
+ /**
+ * The operational values of the Actor's state parameters. Bitmask
+ * of port states.
+ */
+ uint8_t actor_state;
+
+ /** The operational Actor's port parameters */
+ struct port_params actor;
+
+ /**
+ * The operational value of the Actor's view of the current values of
+ * the Partner's state parameters. The Actor sets this variable either
+ * to the value received from the Partner in an LACPDU, or to the value
+ * of Partner_Admin_Port_State. Bitmask of port states.
+ */
+ uint8_t partner_state;
+
+ /** The operational Partner's port parameters */
+ struct port_params partner;
+
+ /* Additional port parameters not listed in documentation */
+ /** State machine flags */
+ uint16_t sm_flags;
+ enum rte_bond_8023ad_selection selected;
+
+ uint64_t current_while_timer;
+ uint64_t periodic_timer;
+ uint64_t wait_while_timer;
+ uint64_t tx_machine_timer;
+ uint64_t tx_marker_timer;
+ /* Agregator parameters */
+ /** Used aggregator port ID */
+ uint16_t aggregator_port_id;
+
+ /** Memory pool used to allocate rings */
+ struct rte_mempool *mbuf_pool;
+
+ /** Ring of LACP packets from RX burst function */
+ struct rte_ring *rx_ring;
+
+ /** Ring of slow protocol packets (LACP and MARKERS) to TX burst function */
+ struct rte_ring *tx_ring;
+
+ /** Timer which is also used as mutex. If is 0 (not running) RX marker
+ * packet might be responded. Otherwise shall be dropped. It is zeroed in
+ * mode 4 callback function after expire. */
+ volatile uint64_t rx_marker_timer;
+
+ uint64_t warning_timer;
+ volatile uint16_t warnings_to_show;
+};
+
+struct mode8023ad_private {
+ uint64_t fast_periodic_timeout;
+ uint64_t slow_periodic_timeout;
+ uint64_t short_timeout;
+ uint64_t long_timeout;
+ uint64_t aggregate_wait_timeout;
+ uint64_t tx_period_timeout;
+ uint64_t rx_marker_timeout;
+ uint64_t update_timeout_us;
+};
+
+/**
+ * @internal
+ * The pool of *port* structures. The size of the pool
+ * is configured at compile-time in the <rte_eth_bond_8023ad.c> file.
+ */
+extern struct port mode_8023ad_ports[];
+
+/* Forward declaration */
+struct bond_dev_private;
+
+/**
+ * @internal
+ *
+ * Get configuration of bonded interface.
+ *
+ *
+ * @param dev Bonded interface
+ * @param conf returned configuration
+ */
+void
+bond_mode_8023ad_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_bond_8023ad_conf *conf);
+
+/**
+ * @internal
+ *
+ * Set mode 4 configuration of bonded interface.
+ *
+ * @pre Bonded interface must be stopped.
+ *
+ * @param dev Bonded interface
+ * @param conf new configuration. If NULL set default configuration.
+ */
+void
+bond_mode_8023ad_setup(struct rte_eth_dev *dev,
+ struct rte_eth_bond_8023ad_conf *conf);
+
+/**
+ * @internal
+ *
+ * Enables 802.1AX mode and all active slaves on bonded interface.
+ *
+ * @param dev Bonded interface
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+int
+bond_mode_8023ad_enable(struct rte_eth_dev *dev);
+
+/**
+ * @internal
+ *
+ * Disables 802.1AX mode of the bonded interface and slaves.
+ *
+ * @param dev Bonded interface
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+int bond_mode_8023ad_disable(struct rte_eth_dev *dev);
+
+/**
+ * @internal
+ *
+ * Starts 802.3AX state machines management logic.
+ * @param dev Bonded interface
+ * @return
+ * 0 if machines was started, 1 if machines was already running,
+ * negative value otherwise.
+ */
+int
+bond_mode_8023ad_start(struct rte_eth_dev *dev);
+
+/**
+ * @internal
+ *
+ * Stops 802.3AX state machines management logic.
+ * @param dev Bonded interface
+ * @return
+ * 0 if this call stopped state machines, -ENOENT if alarm was not set.
+ */
+void
+bond_mode_8023ad_stop(struct rte_eth_dev *dev);
+
+/**
+ * @internal
+ *
+ * Passes given slow packet to state machines management logic.
+ * @param internals Bonded device private data.
+ * @param slave_id Slave port id.
+ * @param slot_pkt Slow packet.
+ */
+void
+bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals,
+ uint8_t slave_id, struct rte_mbuf *pkt);
+
+/**
+ * @internal
+ *
+ * Appends given slave used slave
+ *
+ * @param dev Bonded interface.
+ * @param port_id Slave port ID to be added
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+void
+bond_mode_8023ad_activate_slave(struct rte_eth_dev *dev, uint8_t port_id);
+
+/**
+ * @internal
+ *
+ * Denitializes and removes given slave from 802.1AX mode.
+ *
+ * @param dev Bonded interface.
+ * @param slave_num Position of slave in active_slaves array
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+int
+bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *dev, uint8_t slave_pos);
+
+/**
+ * Updates state when MAC was changed on bonded device or one of its slaves.
+ * @param bond_dev Bonded device
+ */
+void
+bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev);
+
+#endif /* RTE_ETH_BOND_8023AD_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_api.c b/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_api.c
new file mode 100755
index 00000000..c2a99a3b
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_api.c
@@ -0,0 +1,822 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_ethdev.h>
+#include <rte_tcp.h>
+
+#include "rte_eth_bond.h"
+#include "rte_eth_bond_private.h"
+#include "rte_eth_bond_8023ad_private.h"
+
+#define DEFAULT_POLLING_INTERVAL_10_MS (10)
+
+int
+valid_bonded_ethdev(struct rte_eth_dev *eth_dev)
+{
+ size_t len;
+
+ /* Check valid pointer */
+ if (eth_dev->driver->pci_drv.name == NULL || driver_name == NULL)
+ return -1;
+
+ /* Check string lengths are equal */
+ len = strlen(driver_name);
+ if (strlen(eth_dev->driver->pci_drv.name) != len)
+ return -1;
+
+ /* Compare strings */
+ return strncmp(eth_dev->driver->pci_drv.name, driver_name, len);
+}
+
+int
+valid_port_id(uint8_t port_id)
+{
+ /* Verify that port id is valid */
+ int ethdev_count = rte_eth_dev_count();
+ if (port_id >= ethdev_count) {
+ RTE_BOND_LOG(ERR, "Port Id %d is greater than rte_eth_dev_count %d",
+ port_id, ethdev_count);
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+valid_bonded_port_id(uint8_t port_id)
+{
+ /* Verify that port id's are valid */
+ if (valid_port_id(port_id))
+ return -1;
+
+ /* Verify that bonded_port_id refers to a bonded port */
+ if (valid_bonded_ethdev(&rte_eth_devices[port_id])) {
+ RTE_BOND_LOG(ERR, "Specified port Id %d is not a bonded eth_dev device",
+ port_id);
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+valid_slave_port_id(uint8_t port_id)
+{
+ /* Verify that port id's are valid */
+ if (valid_port_id(port_id))
+ return -1;
+
+ /* Verify that port_id refers to a non bonded port */
+ if (!valid_bonded_ethdev(&rte_eth_devices[port_id]))
+ return -1;
+
+ return 0;
+}
+
+void
+activate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
+{
+ struct bond_dev_private *internals = eth_dev->data->dev_private;
+
+ if (internals->mode == BONDING_MODE_8023AD)
+ bond_mode_8023ad_activate_slave(eth_dev, port_id);
+
+ RTE_VERIFY(internals->active_slave_count <
+ (RTE_DIM(internals->active_slaves) - 1));
+
+ internals->active_slaves[internals->active_slave_count] = port_id;
+ internals->active_slave_count++;
+}
+
+void
+deactivate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
+{
+ uint8_t slave_pos;
+ struct bond_dev_private *internals = eth_dev->data->dev_private;
+ uint8_t active_count = internals->active_slave_count;
+
+ if (internals->mode == BONDING_MODE_8023AD) {
+ bond_mode_8023ad_stop(eth_dev);
+ bond_mode_8023ad_deactivate_slave(eth_dev, port_id);
+ }
+
+ slave_pos = find_slave_by_id(internals->active_slaves, active_count,
+ port_id);
+
+ /* If slave was not at the end of the list
+ * shift active slaves up active array list */
+ if (slave_pos < active_count) {
+ active_count--;
+ memmove(internals->active_slaves + slave_pos,
+ internals->active_slaves + slave_pos + 1,
+ (active_count - slave_pos) *
+ sizeof(internals->active_slaves[0]));
+ }
+
+ RTE_VERIFY(active_count < RTE_DIM(internals->active_slaves));
+ internals->active_slave_count = active_count;
+
+ if (eth_dev->data->dev_started && internals->mode == BONDING_MODE_8023AD)
+ bond_mode_8023ad_start(eth_dev);
+}
+
+uint8_t
+number_of_sockets(void)
+{
+ int sockets = 0;
+ int i;
+ const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+
+ for (i = 0; ((i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL)); i++) {
+ if (sockets < ms[i].socket_id)
+ sockets = ms[i].socket_id;
+ }
+
+ /* Number of sockets = maximum socket_id + 1 */
+ return ++sockets;
+}
+
+const char *driver_name = "Link Bonding PMD";
+
+int
+rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id)
+{
+ struct rte_pci_device *pci_dev = NULL;
+ struct bond_dev_private *internals = NULL;
+ struct rte_eth_dev *eth_dev = NULL;
+ struct eth_driver *eth_drv = NULL;
+ struct rte_pci_driver *pci_drv = NULL;
+ struct rte_pci_id *pci_id_table = NULL;
+ /* now do all data allocation - for eth_dev structure, dummy pci driver
+ * and internal (private) data
+ */
+
+ if (name == NULL) {
+ RTE_BOND_LOG(ERR, "Invalid name specified");
+ goto err;
+ }
+
+ if (socket_id >= number_of_sockets()) {
+ RTE_BOND_LOG(ERR,
+ "Invalid socket id specified to create bonded device on.");
+ goto err;
+ }
+
+ pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, socket_id);
+ if (pci_dev == NULL) {
+ RTE_BOND_LOG(ERR, "Unable to malloc pci dev on socket");
+ goto err;
+ }
+
+ eth_drv = rte_zmalloc_socket(name, sizeof(*eth_drv), 0, socket_id);
+ if (eth_drv == NULL) {
+ RTE_BOND_LOG(ERR, "Unable to malloc eth_drv on socket");
+ goto err;
+ }
+
+ pci_drv = rte_zmalloc_socket(name, sizeof(*pci_drv), 0, socket_id);
+ if (pci_drv == NULL) {
+ RTE_BOND_LOG(ERR, "Unable to malloc pci_drv on socket");
+ goto err;
+ }
+ pci_id_table = rte_zmalloc_socket(name, sizeof(*pci_id_table), 0, socket_id);
+ if (pci_id_table == NULL) {
+ RTE_BOND_LOG(ERR, "Unable to malloc pci_id_table on socket");
+ goto err;
+ }
+
+ pci_drv->id_table = pci_id_table;
+
+ pci_drv->id_table->device_id = PCI_ANY_ID;
+ pci_drv->id_table->subsystem_device_id = PCI_ANY_ID;
+ pci_drv->id_table->vendor_id = PCI_ANY_ID;
+ pci_drv->id_table->subsystem_vendor_id = PCI_ANY_ID;
+
+ pci_drv->drv_flags = RTE_PCI_DRV_INTR_LSC;
+
+ internals = rte_zmalloc_socket(name, sizeof(*internals), 0, socket_id);
+ if (internals == NULL) {
+ RTE_BOND_LOG(ERR, "Unable to malloc internals on socket");
+ goto err;
+ }
+
+ /* reserve an ethdev entry */
+ eth_dev = rte_eth_dev_allocate(name);
+ if (eth_dev == NULL) {
+ RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev");
+ goto err;
+ }
+
+ pci_dev->numa_node = socket_id;
+ pci_drv->name = driver_name;
+
+ eth_drv->pci_drv = (struct rte_pci_driver)(*pci_drv);
+ eth_dev->driver = eth_drv;
+
+ eth_dev->data->dev_private = internals;
+ eth_dev->data->nb_rx_queues = (uint16_t)1;
+ eth_dev->data->nb_tx_queues = (uint16_t)1;
+
+ TAILQ_INIT(&(eth_dev->callbacks));
+
+ eth_dev->data->dev_link.link_status = 0;
+
+ eth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN, 0,
+ socket_id);
+
+ eth_dev->data->dev_started = 0;
+ eth_dev->data->promiscuous = 0;
+ eth_dev->data->scattered_rx = 0;
+ eth_dev->data->all_multicast = 0;
+
+ eth_dev->dev_ops = &default_dev_ops;
+ eth_dev->pci_dev = pci_dev;
+
+ rte_spinlock_init(&internals->lock);
+
+ internals->port_id = eth_dev->data->port_id;
+ internals->mode = BONDING_MODE_INVALID;
+ internals->current_primary_port = 0;
+ internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
+ internals->user_defined_mac = 0;
+ internals->link_props_set = 0;
+
+ internals->link_status_polling_enabled = 0;
+
+ internals->link_status_polling_interval_ms = DEFAULT_POLLING_INTERVAL_10_MS;
+ internals->link_down_delay_ms = 0;
+ internals->link_up_delay_ms = 0;
+
+ internals->slave_count = 0;
+ internals->active_slave_count = 0;
+ internals->rx_offload_capa = 0;
+ internals->tx_offload_capa = 0;
+
+ memset(internals->active_slaves, 0, sizeof(internals->active_slaves));
+ memset(internals->slaves, 0, sizeof(internals->slaves));
+
+ /* Set mode 4 default configuration */
+ bond_mode_8023ad_setup(eth_dev, NULL);
+ if (bond_ethdev_mode_set(eth_dev, mode)) {
+ RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode too %d",
+ eth_dev->data->port_id, mode);
+ goto err;
+ }
+
+ return eth_dev->data->port_id;
+
+err:
+ if (pci_dev)
+ rte_free(pci_dev);
+ if (pci_drv)
+ rte_free(pci_drv);
+ if (pci_id_table)
+ rte_free(pci_id_table);
+ if (eth_drv)
+ rte_free(eth_drv);
+ if (internals)
+ rte_free(internals);
+ return -1;
+}
+
+static int
+__eth_bond_slave_add_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
+{
+ struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
+ struct bond_dev_private *internals;
+ struct bond_dev_private *temp_internals;
+ struct rte_eth_link link_props;
+ struct rte_eth_dev_info dev_info;
+
+ int i, j;
+
+ if (valid_slave_port_id(slave_port_id) != 0)
+ return -1;
+
+ bonded_eth_dev = &rte_eth_devices[bonded_port_id];
+ internals = bonded_eth_dev->data->dev_private;
+
+ /* Verify that new slave device is not already a slave of another
+ * bonded device */
+ for (i = rte_eth_dev_count()-1; i >= 0; i--) {
+ if (valid_bonded_ethdev(&rte_eth_devices[i]) == 0) {
+ temp_internals = rte_eth_devices[i].data->dev_private;
+
+ for (j = 0; j < temp_internals->slave_count; j++) {
+ /* Device already a slave of a bonded device */
+ if (temp_internals->slaves[j].port_id == slave_port_id) {
+ RTE_BOND_LOG(ERR, "Slave port %d is already a slave",
+ slave_port_id);
+ return -1;
+ }
+ }
+ }
+ }
+
+ slave_eth_dev = &rte_eth_devices[slave_port_id];
+
+ /* Add slave details to bonded device */
+ slave_add(internals, slave_eth_dev);
+
+ memset(&dev_info, 0, sizeof(dev_info));
+ rte_eth_dev_info_get(slave_port_id, &dev_info);
+
+ if (internals->slave_count < 1) {
+ /* if MAC is not user defined then use MAC of first slave add to
+ * bonded device */
+ if (!internals->user_defined_mac)
+ mac_address_set(bonded_eth_dev, slave_eth_dev->data->mac_addrs);
+
+ /* Inherit eth dev link properties from first slave */
+ link_properties_set(bonded_eth_dev,
+ &(slave_eth_dev->data->dev_link));
+
+ /* Make primary slave */
+ internals->primary_port = slave_port_id;
+
+ /* Take the first dev's offload capabilities */
+ internals->rx_offload_capa = dev_info.rx_offload_capa;
+ internals->tx_offload_capa = dev_info.tx_offload_capa;
+
+ } else {
+ /* Check slave link properties are supported if props are set,
+ * all slaves must be the same */
+ if (internals->link_props_set) {
+ if (link_properties_valid(&(bonded_eth_dev->data->dev_link),
+ &(slave_eth_dev->data->dev_link))) {
+ RTE_BOND_LOG(ERR,
+ "Slave port %d link speed/duplex not supported",
+ slave_port_id);
+ return -1;
+ }
+ } else {
+ link_properties_set(bonded_eth_dev,
+ &(slave_eth_dev->data->dev_link));
+ }
+ internals->rx_offload_capa &= dev_info.rx_offload_capa;
+ internals->tx_offload_capa &= dev_info.tx_offload_capa;
+ }
+
+ internals->slave_count++;
+
+ /* Update all slave devices MACs*/
+ mac_address_slaves_update(bonded_eth_dev);
+
+ if (bonded_eth_dev->data->dev_started) {
+ if (slave_configure(bonded_eth_dev, slave_eth_dev) != 0) {
+ RTE_BOND_LOG(ERR, "rte_bond_slaves_configure: port=%d",
+ slave_port_id);
+ return -1;
+ }
+ }
+
+ /* Register link status change callback with bonded device pointer as
+ * argument*/
+ rte_eth_dev_callback_register(slave_port_id, RTE_ETH_EVENT_INTR_LSC,
+ bond_ethdev_lsc_event_callback, &bonded_eth_dev->data->port_id);
+
+ /* If bonded device is started then we can add the slave to our active
+ * slave array */
+ if (bonded_eth_dev->data->dev_started) {
+ rte_eth_link_get_nowait(slave_port_id, &link_props);
+
+ if (link_props.link_status == 1)
+ activate_slave(bonded_eth_dev, slave_port_id);
+ }
+ return 0;
+
+}
+
+int
+rte_eth_bond_slave_add(uint8_t bonded_port_id, uint8_t slave_port_id)
+{
+ struct rte_eth_dev *bonded_eth_dev;
+ struct bond_dev_private *internals;
+
+ int retval;
+
+ /* Verify that port id's are valid bonded and slave ports */
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ bonded_eth_dev = &rte_eth_devices[bonded_port_id];
+ internals = bonded_eth_dev->data->dev_private;
+
+ rte_spinlock_lock(&internals->lock);
+
+ retval = __eth_bond_slave_add_lock_free(bonded_port_id, slave_port_id);
+
+ rte_spinlock_unlock(&internals->lock);
+
+ return retval;
+}
+
+static int
+__eth_bond_slave_remove_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
+{
+ struct rte_eth_dev *bonded_eth_dev;
+ struct bond_dev_private *internals;
+
+ int i, slave_idx;
+
+ if (valid_slave_port_id(slave_port_id) != 0)
+ return -1;
+
+ bonded_eth_dev = &rte_eth_devices[bonded_port_id];
+ internals = bonded_eth_dev->data->dev_private;
+
+ /* first remove from active slave list */
+ slave_idx = find_slave_by_id(internals->active_slaves,
+ internals->active_slave_count, slave_port_id);
+
+ if (slave_idx < internals->active_slave_count)
+ deactivate_slave(bonded_eth_dev, slave_port_id);
+
+ slave_idx = -1;
+ /* now find in slave list */
+ for (i = 0; i < internals->slave_count; i++)
+ if (internals->slaves[i].port_id == slave_port_id) {
+ slave_idx = i;
+ break;
+ }
+
+ if (slave_idx < 0) {
+ RTE_BOND_LOG(ERR, "Couldn't find slave in port list, slave count %d",
+ internals->slave_count);
+ return -1;
+ }
+
+ /* Un-register link status change callback with bonded device pointer as
+ * argument*/
+ rte_eth_dev_callback_unregister(slave_port_id, RTE_ETH_EVENT_INTR_LSC,
+ bond_ethdev_lsc_event_callback,
+ &rte_eth_devices[bonded_port_id].data->port_id);
+
+ /* Restore original MAC address of slave device */
+ mac_address_set(&rte_eth_devices[slave_port_id],
+ &(internals->slaves[slave_idx].persisted_mac_addr));
+
+ slave_remove(internals, &rte_eth_devices[slave_port_id]);
+
+ /* first slave in the active list will be the primary by default,
+ * otherwise use first device in list */
+ if (internals->current_primary_port == slave_port_id) {
+ if (internals->active_slave_count > 0)
+ internals->current_primary_port = internals->active_slaves[0];
+ else if (internals->slave_count > 0)
+ internals->current_primary_port = internals->slaves[0].port_id;
+ else
+ internals->primary_port = 0;
+ }
+
+ if (internals->active_slave_count < 1) {
+ /* reset device link properties as no slaves are active */
+ link_properties_reset(&rte_eth_devices[bonded_port_id]);
+
+ /* if no slaves are any longer attached to bonded device and MAC is not
+ * user defined then clear MAC of bonded device as it will be reset
+ * when a new slave is added */
+ if (internals->slave_count < 1 && !internals->user_defined_mac)
+ memset(rte_eth_devices[bonded_port_id].data->mac_addrs, 0,
+ sizeof(*(rte_eth_devices[bonded_port_id].data->mac_addrs)));
+ }
+ if (internals->slave_count == 0) {
+ internals->rx_offload_capa = 0;
+ internals->tx_offload_capa = 0;
+ }
+ return 0;
+}
+
+int
+rte_eth_bond_slave_remove(uint8_t bonded_port_id, uint8_t slave_port_id)
+{
+ struct rte_eth_dev *bonded_eth_dev;
+ struct bond_dev_private *internals;
+ int retval;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ bonded_eth_dev = &rte_eth_devices[bonded_port_id];
+ internals = bonded_eth_dev->data->dev_private;
+
+ rte_spinlock_lock(&internals->lock);
+
+ retval = __eth_bond_slave_remove_lock_free(bonded_port_id, slave_port_id);
+
+ rte_spinlock_unlock(&internals->lock);
+
+ return retval;
+}
+
+int
+rte_eth_bond_mode_set(uint8_t bonded_port_id, uint8_t mode)
+{
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ return bond_ethdev_mode_set(&rte_eth_devices[bonded_port_id], mode);
+}
+
+int
+rte_eth_bond_mode_get(uint8_t bonded_port_id)
+{
+ struct bond_dev_private *internals;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ internals = rte_eth_devices[bonded_port_id].data->dev_private;
+
+ return internals->mode;
+}
+
+int
+rte_eth_bond_primary_set(uint8_t bonded_port_id, uint8_t slave_port_id)
+{
+ struct bond_dev_private *internals;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ if (valid_slave_port_id(slave_port_id) != 0)
+ return -1;
+
+ internals = rte_eth_devices[bonded_port_id].data->dev_private;
+
+ internals->user_defined_primary_port = 1;
+ internals->primary_port = slave_port_id;
+
+ bond_ethdev_primary_set(internals, slave_port_id);
+
+ return 0;
+}
+
+int
+rte_eth_bond_primary_get(uint8_t bonded_port_id)
+{
+ struct bond_dev_private *internals;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ internals = rte_eth_devices[bonded_port_id].data->dev_private;
+
+ if (internals->slave_count < 1)
+ return -1;
+
+ return internals->current_primary_port;
+}
+
+int
+rte_eth_bond_slaves_get(uint8_t bonded_port_id, uint8_t slaves[], uint8_t len)
+{
+ struct bond_dev_private *internals;
+ uint8_t i;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ if (slaves == NULL)
+ return -1;
+
+ internals = rte_eth_devices[bonded_port_id].data->dev_private;
+
+ if (internals->slave_count > len)
+ return -1;
+
+ for (i = 0; i < internals->slave_count; i++)
+ slaves[i] = internals->slaves[i].port_id;
+
+ return internals->slave_count;
+}
+
+int
+rte_eth_bond_active_slaves_get(uint8_t bonded_port_id, uint8_t slaves[],
+ uint8_t len)
+{
+ struct bond_dev_private *internals;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ if (slaves == NULL)
+ return -1;
+
+ internals = rte_eth_devices[bonded_port_id].data->dev_private;
+
+ if (internals->active_slave_count > len)
+ return -1;
+
+ memcpy(slaves, internals->active_slaves, internals->active_slave_count);
+
+ return internals->active_slave_count;
+}
+
+int
+rte_eth_bond_mac_address_set(uint8_t bonded_port_id,
+ struct ether_addr *mac_addr)
+{
+ struct rte_eth_dev *bonded_eth_dev;
+ struct bond_dev_private *internals;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ bonded_eth_dev = &rte_eth_devices[bonded_port_id];
+ internals = bonded_eth_dev->data->dev_private;
+
+ /* Set MAC Address of Bonded Device */
+ if (mac_address_set(bonded_eth_dev, mac_addr))
+ return -1;
+
+ internals->user_defined_mac = 1;
+
+ /* Update all slave devices MACs*/
+ if (internals->slave_count > 0)
+ return mac_address_slaves_update(bonded_eth_dev);
+
+ return 0;
+}
+
+int
+rte_eth_bond_mac_address_reset(uint8_t bonded_port_id)
+{
+ struct rte_eth_dev *bonded_eth_dev;
+ struct bond_dev_private *internals;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ bonded_eth_dev = &rte_eth_devices[bonded_port_id];
+ internals = bonded_eth_dev->data->dev_private;
+
+ internals->user_defined_mac = 0;
+
+ if (internals->slave_count > 0) {
+ /* Set MAC Address of Bonded Device */
+ if (mac_address_set(bonded_eth_dev,
+ &internals->slaves[internals->primary_port].persisted_mac_addr)
+ != 0) {
+ RTE_BOND_LOG(ERR, "Failed to set MAC address on bonded device");
+ return -1;
+ }
+ /* Update all slave devices MAC addresses */
+ return mac_address_slaves_update(bonded_eth_dev);
+ }
+ /* No need to update anything as no slaves present */
+ return 0;
+}
+
+int
+rte_eth_bond_xmit_policy_set(uint8_t bonded_port_id, uint8_t policy)
+{
+ struct bond_dev_private *internals;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ internals = rte_eth_devices[bonded_port_id].data->dev_private;
+
+ switch (policy) {
+ case BALANCE_XMIT_POLICY_LAYER2:
+ case BALANCE_XMIT_POLICY_LAYER23:
+ case BALANCE_XMIT_POLICY_LAYER34:
+ internals->balance_xmit_policy = policy;
+ break;
+
+ default:
+ return -1;
+ }
+ return 0;
+}
+
+int
+rte_eth_bond_xmit_policy_get(uint8_t bonded_port_id)
+{
+ struct bond_dev_private *internals;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ internals = rte_eth_devices[bonded_port_id].data->dev_private;
+
+ return internals->balance_xmit_policy;
+}
+
+int
+rte_eth_bond_link_monitoring_set(uint8_t bonded_port_id, uint32_t internal_ms)
+{
+ struct bond_dev_private *internals;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ internals = rte_eth_devices[bonded_port_id].data->dev_private;
+ internals->link_status_polling_interval_ms = internal_ms;
+
+ return 0;
+}
+
+int
+rte_eth_bond_link_monitoring_get(uint8_t bonded_port_id)
+{
+ struct bond_dev_private *internals;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ internals = rte_eth_devices[bonded_port_id].data->dev_private;
+
+ return internals->link_status_polling_interval_ms;
+}
+
+int
+rte_eth_bond_link_down_prop_delay_set(uint8_t bonded_port_id, uint32_t delay_ms)
+
+{
+ struct bond_dev_private *internals;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ internals = rte_eth_devices[bonded_port_id].data->dev_private;
+ internals->link_down_delay_ms = delay_ms;
+
+ return 0;
+}
+
+int
+rte_eth_bond_link_down_prop_delay_get(uint8_t bonded_port_id)
+{
+ struct bond_dev_private *internals;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ internals = rte_eth_devices[bonded_port_id].data->dev_private;
+
+ return internals->link_down_delay_ms;
+}
+
+int
+rte_eth_bond_link_up_prop_delay_set(uint8_t bonded_port_id, uint32_t delay_ms)
+
+{
+ struct bond_dev_private *internals;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ internals = rte_eth_devices[bonded_port_id].data->dev_private;
+ internals->link_up_delay_ms = delay_ms;
+
+ return 0;
+}
+
+int
+rte_eth_bond_link_up_prop_delay_get(uint8_t bonded_port_id)
+{
+ struct bond_dev_private *internals;
+
+ if (valid_bonded_port_id(bonded_port_id) != 0)
+ return -1;
+
+ internals = rte_eth_devices[bonded_port_id].data->dev_private;
+
+ return internals->link_up_delay_ms;
+}
diff --git a/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_args.c b/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_args.c
new file mode 100755
index 00000000..ca4de38f
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_args.c
@@ -0,0 +1,279 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_devargs.h>
+#include <rte_kvargs.h>
+
+#include <cmdline_parse.h>
+#include <cmdline_parse_etheraddr.h>
+
+#include "rte_eth_bond.h"
+#include "rte_eth_bond_private.h"
+
+const char *pmd_bond_init_valid_arguments[] = {
+ PMD_BOND_SLAVE_PORT_KVARG,
+ PMD_BOND_PRIMARY_SLAVE_KVARG,
+ PMD_BOND_MODE_KVARG,
+ PMD_BOND_XMIT_POLICY_KVARG,
+ PMD_BOND_SOCKET_ID_KVARG,
+ PMD_BOND_MAC_ADDR_KVARG,
+
+ NULL
+};
+
+static inline int
+find_port_id_by_pci_addr(const struct rte_pci_addr *pci_addr)
+{
+ struct rte_pci_addr *eth_pci_addr;
+ unsigned i;
+
+ for (i = 0; i < rte_eth_dev_count(); i++) {
+
+ if (rte_eth_devices[i].pci_dev == NULL)
+ continue;
+
+ eth_pci_addr = &(rte_eth_devices[i].pci_dev->addr);
+
+ if (pci_addr->bus == eth_pci_addr->bus &&
+ pci_addr->devid == eth_pci_addr->devid &&
+ pci_addr->domain == eth_pci_addr->domain &&
+ pci_addr->function == eth_pci_addr->function)
+ return i;
+ }
+ return -1;
+}
+
+static inline int
+find_port_id_by_dev_name(const char *name)
+{
+ unsigned i;
+
+ for (i = 0; i < rte_eth_dev_count(); i++) {
+ if (rte_eth_devices[i].data == NULL)
+ continue;
+
+ if (strcmp(rte_eth_devices[i].data->name, name) == 0)
+ return i;
+ }
+ return -1;
+}
+
+/**
+ * Parses a port identifier string to a port id by pci address, then by name,
+ * and finally port id.
+ */
+static inline int
+parse_port_id(const char *port_str)
+{
+ struct rte_pci_addr dev_addr;
+ int port_id;
+
+ /* try parsing as pci address, physical devices */
+ if (eal_parse_pci_DomBDF(port_str, &dev_addr) == 0) {
+ port_id = find_port_id_by_pci_addr(&dev_addr);
+ if (port_id < 0)
+ return -1;
+ } else {
+ /* try parsing as device name, virtual devices */
+ port_id = find_port_id_by_dev_name(port_str);
+ if (port_id < 0) {
+ char *end;
+ errno = 0;
+
+ /* try parsing as port id */
+ port_id = strtol(port_str, &end, 10);
+ if (*end != 0 || errno != 0)
+ return -1;
+ }
+ }
+
+ if (port_id < 0 || port_id > RTE_MAX_ETHPORTS) {
+ RTE_BOND_LOG(ERR, "Slave port specified (%s) outside expected range",
+ port_str);
+ return -1;
+ }
+ return port_id;
+}
+
+int
+bond_ethdev_parse_slave_port_kvarg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct bond_ethdev_slave_ports *slave_ports;
+
+ if (value == NULL || extra_args == NULL)
+ return -1;
+
+ slave_ports = extra_args;
+
+ if (strcmp(key, PMD_BOND_SLAVE_PORT_KVARG) == 0) {
+ int port_id = parse_port_id(value);
+ if (port_id < 0) {
+ RTE_BOND_LOG(ERR, "Invalid slave port value (%s) specified", value);
+ return -1;
+ } else
+ slave_ports->slaves[slave_ports->slave_count++] =
+ (uint8_t)port_id;
+ }
+ return 0;
+}
+
+int
+bond_ethdev_parse_slave_mode_kvarg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ uint8_t *mode;
+ char *endptr;
+
+ if (value == NULL || extra_args == NULL)
+ return -1;
+
+ mode = extra_args;
+
+ errno = 0;
+ *mode = strtol(value, &endptr, 10);
+ if (*endptr != 0 || errno != 0)
+ return -1;
+
+ /* validate mode value */
+ switch (*mode) {
+ case BONDING_MODE_ROUND_ROBIN:
+ case BONDING_MODE_ACTIVE_BACKUP:
+ case BONDING_MODE_BALANCE:
+#ifdef RTE_MBUF_REFCNT
+ case BONDING_MODE_BROADCAST:
+#endif
+ case BONDING_MODE_8023AD:
+ case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
+ return 0;
+ default:
+ RTE_BOND_LOG(ERR, "Invalid slave mode value (%s) specified", value);
+ return -1;
+ }
+}
+
+int
+bond_ethdev_parse_socket_id_kvarg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ int socket_id;
+ char *endptr;
+
+ if (value == NULL || extra_args == NULL)
+ return -1;
+
+ errno = 0;
+ socket_id = (uint8_t)strtol(value, &endptr, 10);
+ if (*endptr != 0 || errno != 0)
+ return -1;
+
+ /* validate mode value */
+ if (socket_id >= 0 && socket_id < number_of_sockets()) {
+ *(uint8_t *)extra_args = (uint8_t)socket_id;
+ return 0;
+ }
+ return -1;
+}
+
+int
+bond_ethdev_parse_primary_slave_port_id_kvarg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ int primary_slave_port_id;
+
+ if (value == NULL || extra_args == NULL)
+ return -1;
+
+ primary_slave_port_id = parse_port_id(value);
+ if (primary_slave_port_id < 0)
+ return -1;
+
+ *(uint8_t *)extra_args = (uint8_t)primary_slave_port_id;
+
+ return 0;
+}
+
+int
+bond_ethdev_parse_balance_xmit_policy_kvarg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ uint8_t *xmit_policy;
+
+ if (value == NULL || extra_args == NULL)
+ return -1;
+
+ xmit_policy = extra_args;
+
+ if (strcmp(PMD_BOND_XMIT_POLICY_LAYER2_KVARG, value) == 0)
+ *xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
+ else if (strcmp(PMD_BOND_XMIT_POLICY_LAYER23_KVARG, value) == 0)
+ *xmit_policy = BALANCE_XMIT_POLICY_LAYER23;
+ else if (strcmp(PMD_BOND_XMIT_POLICY_LAYER34_KVARG, value) == 0)
+ *xmit_policy = BALANCE_XMIT_POLICY_LAYER34;
+ else
+ return -1;
+
+ return 0;
+}
+
+int
+bond_ethdev_parse_bond_mac_addr_kvarg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ if (value == NULL || extra_args == NULL)
+ return -1;
+
+ /* Parse MAC */
+ return cmdline_parse_etheraddr(NULL, value, extra_args,
+ sizeof(struct ether_addr));
+}
+
+int
+bond_ethdev_parse_time_ms_kvarg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ uint32_t time_ms;
+ char *endptr;
+
+ if (value == NULL || extra_args == NULL)
+ return -1;
+
+ errno = 0;
+ time_ms = (uint32_t)strtol(value, &endptr, 10);
+ if (*endptr != 0 || errno != 0)
+ return -1;
+
+ *(uint32_t *)extra_args = time_ms;
+
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_pmd.c b/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_pmd.c
new file mode 100755
index 00000000..bb4a5379
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_pmd.c
@@ -0,0 +1,1881 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <stdlib.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_ethdev.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_ip.h>
+#include <rte_devargs.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_alarm.h>
+#include <rte_cycles.h>
+
+#include "rte_eth_bond.h"
+#include "rte_eth_bond_private.h"
+#include "rte_eth_bond_8023ad_private.h"
+
+#define REORDER_PERIOD_MS 10
+/* Table for statistics in mode 5 TLB */
+static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
+
+static uint16_t
+bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ struct bond_dev_private *internals;
+
+ uint16_t num_rx_slave = 0;
+ uint16_t num_rx_total = 0;
+
+ int i;
+
+ /* Cast to structure, containing bonded device's port id and queue id */
+ struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
+
+ internals = bd_rx_q->dev_private;
+
+
+ for (i = 0; i < internals->active_slave_count && nb_pkts; i++) {
+ /* Offset of pointer to *bufs increases as packets are received
+ * from other slaves */
+ num_rx_slave = rte_eth_rx_burst(internals->active_slaves[i],
+ bd_rx_q->queue_id, bufs + num_rx_total, nb_pkts);
+ if (num_rx_slave) {
+ num_rx_total += num_rx_slave;
+ nb_pkts -= num_rx_slave;
+ }
+ }
+
+ return num_rx_total;
+}
+
+static uint16_t
+bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_pkts)
+{
+ struct bond_dev_private *internals;
+
+ /* Cast to structure, containing bonded device's port id and queue id */
+ struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
+
+ internals = bd_rx_q->dev_private;
+
+ return rte_eth_rx_burst(internals->current_primary_port,
+ bd_rx_q->queue_id, bufs, nb_pkts);
+}
+
+static uint16_t
+bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_pkts)
+{
+ /* Cast to structure, containing bonded device's port id and queue id */
+ struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
+ struct bond_dev_private *internals = bd_rx_q->dev_private;
+ struct ether_addr bond_mac;
+
+ struct ether_hdr *hdr;
+
+ const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
+ uint16_t num_rx_total = 0; /* Total number of received packets */
+ uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint8_t slave_count;
+
+ uint8_t collecting; /* current slave collecting status */
+ const uint8_t promisc = internals->promiscuous_en;
+ uint8_t i, j, k;
+
+ rte_eth_macaddr_get(internals->port_id, &bond_mac);
+ /* Copy slave list to protect against slave up/down changes during tx
+ * bursting */
+ slave_count = internals->active_slave_count;
+ memcpy(slaves, internals->active_slaves,
+ sizeof(internals->active_slaves[0]) * slave_count);
+
+ for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
+ j = num_rx_total;
+ collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[i]], COLLECTING);
+
+ /* Read packets from this slave */
+ num_rx_total += rte_eth_rx_burst(slaves[i], bd_rx_q->queue_id,
+ &bufs[num_rx_total], nb_pkts - num_rx_total);
+
+ for (k = j; k < 2 && k < num_rx_total; k++)
+ rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *));
+
+ /* Handle slow protocol packets. */
+ while (j < num_rx_total) {
+ if (j + 3 < num_rx_total)
+ rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
+
+ hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
+ /* Remove packet from array if it is slow packet or slave is not
+ * in collecting state or bondign interface is not in promiscus
+ * mode and packet address does not match. */
+ if (unlikely(hdr->ether_type == ether_type_slow_be ||
+ !collecting || (!promisc &&
+ !is_same_ether_addr(&bond_mac, &hdr->d_addr)))) {
+
+ if (hdr->ether_type == ether_type_slow_be) {
+ bond_mode_8023ad_handle_slow_pkt(internals, slaves[i],
+ bufs[j]);
+ } else
+ rte_pktmbuf_free(bufs[j]);
+
+ /* Packet is managed by mode 4 or dropped, shift the array */
+ num_rx_total--;
+ if (j < num_rx_total) {
+ memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) *
+ (num_rx_total - j));
+ }
+ } else
+ j++;
+ }
+ }
+
+ return num_rx_total;
+}
+
+static uint16_t
+bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_pkts)
+{
+ struct bond_dev_private *internals;
+ struct bond_tx_queue *bd_tx_q;
+
+ struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
+ uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
+
+ uint8_t num_of_slaves;
+ uint8_t slaves[RTE_MAX_ETHPORTS];
+
+ uint16_t num_tx_total = 0, num_tx_slave;
+
+ static int slave_idx = 0;
+ int i, cslave_idx = 0, tx_fail_total = 0;
+
+ bd_tx_q = (struct bond_tx_queue *)queue;
+ internals = bd_tx_q->dev_private;
+
+ /* Copy slave list to protect against slave up/down changes during tx
+ * bursting */
+ num_of_slaves = internals->active_slave_count;
+ memcpy(slaves, internals->active_slaves,
+ sizeof(internals->active_slaves[0]) * num_of_slaves);
+
+ if (num_of_slaves < 1)
+ return num_tx_total;
+
+ /* Populate slaves mbuf with which packets are to be sent on it */
+ for (i = 0; i < nb_pkts; i++) {
+ cslave_idx = (slave_idx + i) % num_of_slaves;
+ slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i];
+ }
+
+ /* increment current slave index so the next call to tx burst starts on the
+ * next slave */
+ slave_idx = ++cslave_idx;
+
+ /* Send packet burst on each slave device */
+ for (i = 0; i < num_of_slaves; i++) {
+ if (slave_nb_pkts[i] > 0) {
+ num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
+ slave_bufs[i], slave_nb_pkts[i]);
+
+ /* if tx burst fails move packets to end of bufs */
+ if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
+ int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave;
+
+ tx_fail_total += tx_fail_slave;
+
+ memcpy(&bufs[nb_pkts - tx_fail_total],
+ &slave_bufs[i][num_tx_slave],
+ tx_fail_slave * sizeof(bufs[0]));
+ }
+ num_tx_total += num_tx_slave;
+ }
+ }
+
+ return num_tx_total;
+}
+
+static uint16_t
+bond_ethdev_tx_burst_active_backup(void *queue,
+ struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ struct bond_dev_private *internals;
+ struct bond_tx_queue *bd_tx_q;
+
+ bd_tx_q = (struct bond_tx_queue *)queue;
+ internals = bd_tx_q->dev_private;
+
+ if (internals->active_slave_count < 1)
+ return 0;
+
+ return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
+ bufs, nb_pkts);
+}
+
+static inline uint16_t
+ether_hash(struct ether_hdr *eth_hdr)
+{
+ uint16_t *word_src_addr = (uint16_t *)eth_hdr->s_addr.addr_bytes;
+ uint16_t *word_dst_addr = (uint16_t *)eth_hdr->d_addr.addr_bytes;
+
+ return (word_src_addr[0] ^ word_dst_addr[0]) ^
+ (word_src_addr[1] ^ word_dst_addr[1]) ^
+ (word_src_addr[2] ^ word_dst_addr[2]);
+}
+
+static inline uint32_t
+ipv4_hash(struct ipv4_hdr *ipv4_hdr)
+{
+ return (ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr);
+}
+
+static inline uint32_t
+ipv6_hash(struct ipv6_hdr *ipv6_hdr)
+{
+ uint32_t *word_src_addr = (uint32_t *)&(ipv6_hdr->src_addr[0]);
+ uint32_t *word_dst_addr = (uint32_t *)&(ipv6_hdr->dst_addr[0]);
+
+ return (word_src_addr[0] ^ word_dst_addr[0]) ^
+ (word_src_addr[1] ^ word_dst_addr[1]) ^
+ (word_src_addr[2] ^ word_dst_addr[2]) ^
+ (word_src_addr[3] ^ word_dst_addr[3]);
+}
+
+static uint32_t
+udp_hash(struct udp_hdr *hdr)
+{
+ return hdr->src_port ^ hdr->dst_port;
+}
+
+static inline uint16_t
+xmit_slave_hash(const struct rte_mbuf *buf, uint8_t slave_count, uint8_t policy)
+{
+ struct ether_hdr *eth_hdr;
+ struct udp_hdr *udp_hdr;
+ size_t eth_offset = 0;
+ uint32_t hash = 0;
+
+ if (slave_count == 1)
+ return 0;
+
+ switch (policy) {
+ case BALANCE_XMIT_POLICY_LAYER2:
+ eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
+
+ hash = ether_hash(eth_hdr);
+ hash ^= hash >> 8;
+ return hash % slave_count;
+
+ case BALANCE_XMIT_POLICY_LAYER23:
+ eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
+
+ if (buf->ol_flags & PKT_RX_VLAN_PKT)
+ eth_offset = sizeof(struct ether_hdr) + sizeof(struct vlan_hdr);
+ else
+ eth_offset = sizeof(struct ether_hdr);
+
+ if (buf->ol_flags & PKT_RX_IPV4_HDR) {
+ struct ipv4_hdr *ipv4_hdr;
+ ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(buf,
+ unsigned char *) + eth_offset);
+
+ hash = ether_hash(eth_hdr) ^ ipv4_hash(ipv4_hdr);
+
+ } else {
+ struct ipv6_hdr *ipv6_hdr;
+
+ ipv6_hdr = (struct ipv6_hdr *)(rte_pktmbuf_mtod(buf,
+ unsigned char *) + eth_offset);
+
+ hash = ether_hash(eth_hdr) ^ ipv6_hash(ipv6_hdr);
+ }
+ break;
+
+ case BALANCE_XMIT_POLICY_LAYER34:
+ if (buf->ol_flags & PKT_RX_VLAN_PKT)
+ eth_offset = sizeof(struct ether_hdr) + sizeof(struct vlan_hdr);
+ else
+ eth_offset = sizeof(struct ether_hdr);
+
+ if (buf->ol_flags & PKT_RX_IPV4_HDR) {
+ struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
+ (rte_pktmbuf_mtod(buf, unsigned char *) + eth_offset);
+
+ if (ipv4_hdr->next_proto_id == IPPROTO_UDP) {
+ udp_hdr = (struct udp_hdr *)
+ (rte_pktmbuf_mtod(buf, unsigned char *) + eth_offset +
+ sizeof(struct ipv4_hdr));
+ hash = ipv4_hash(ipv4_hdr) ^ udp_hash(udp_hdr);
+ } else {
+ hash = ipv4_hash(ipv4_hdr);
+ }
+ } else {
+ struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
+ (rte_pktmbuf_mtod(buf, unsigned char *) + eth_offset);
+
+ if (ipv6_hdr->proto == IPPROTO_UDP) {
+ udp_hdr = (struct udp_hdr *)
+ (rte_pktmbuf_mtod(buf, unsigned char *) + eth_offset +
+ sizeof(struct ipv6_hdr));
+ hash = ipv6_hash(ipv6_hdr) ^ udp_hash(udp_hdr);
+ } else {
+ hash = ipv6_hash(ipv6_hdr);
+ }
+ }
+ break;
+ }
+
+ hash ^= hash >> 16;
+ hash ^= hash >> 8;
+
+ return hash % slave_count;
+}
+
+struct bwg_slave {
+ uint64_t bwg_left_int;
+ uint64_t bwg_left_remainder;
+ uint8_t slave;
+};
+
+static int
+bandwidth_cmp(const void *a, const void *b)
+{
+ const struct bwg_slave *bwg_a = a;
+ const struct bwg_slave *bwg_b = b;
+ int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int;
+ int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder -
+ (int64_t)bwg_a->bwg_left_remainder;
+ if (diff > 0)
+ return 1;
+ else if (diff < 0)
+ return -1;
+ else if (diff2 > 0)
+ return 1;
+ else if (diff2 < 0)
+ return -1;
+ else
+ return 0;
+}
+
+static void
+bandwidth_left(int port_id, uint64_t load, uint8_t update_idx,
+ struct bwg_slave *bwg_slave)
+{
+ struct rte_eth_link link_status;
+
+ rte_eth_link_get(port_id, &link_status);
+ uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
+ if (link_bwg == 0)
+ return;
+ link_bwg = (link_bwg * (update_idx+1) * REORDER_PERIOD_MS);
+ bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg;
+ bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg;
+}
+
+static void
+bond_ethdev_update_tlb_slave_cb(void *arg)
+{
+ struct bond_dev_private *internals = arg;
+ struct rte_eth_stats slave_stats;
+ struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
+ uint8_t slave_count;
+ uint64_t tx_bytes;
+
+ uint8_t update_stats = 0;
+ uint8_t i, slave_id;
+
+ internals->slave_update_idx++;
+
+
+ if (internals->slave_update_idx >= REORDER_PERIOD_MS)
+ update_stats = 1;
+
+ for (i = 0; i < internals->active_slave_count; i++) {
+ slave_id = internals->active_slaves[i];
+ rte_eth_stats_get(slave_id, &slave_stats);
+ tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id];
+ bandwidth_left(slave_id, tx_bytes,
+ internals->slave_update_idx, &bwg_array[i]);
+ bwg_array[i].slave = slave_id;
+
+ if (update_stats)
+ tlb_last_obytets[slave_id] = slave_stats.obytes;
+ }
+
+ if (update_stats == 1)
+ internals->slave_update_idx = 0;
+
+ slave_count = i;
+ qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp);
+ for (i = 0; i < slave_count; i++)
+ internals->active_slaves[i] = bwg_array[i].slave;
+
+ rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb,
+ (struct bond_dev_private *)internals);
+}
+
+static uint16_t
+bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
+ struct bond_dev_private *internals = bd_tx_q->dev_private;
+
+ struct rte_eth_dev *primary_port =
+ &rte_eth_devices[internals->primary_port];
+ uint16_t num_tx_total = 0;
+ uint8_t i, j;
+
+ uint8_t num_of_slaves = internals->active_slave_count;
+ uint8_t slaves[RTE_MAX_ETHPORTS];
+
+ struct ether_hdr *ether_hdr;
+ struct ether_addr primary_slave_addr;
+ struct ether_addr active_slave_addr;
+
+ if (num_of_slaves < 1)
+ return num_tx_total;
+
+ memcpy(slaves, internals->active_slaves,
+ sizeof(internals->active_slaves[0]) * num_of_slaves);
+
+
+ ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
+
+ if (nb_pkts > 3) {
+ for (i = 0; i < 3; i++)
+ rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*));
+ }
+
+ for (i = 0; i < num_of_slaves; i++) {
+ ether_addr_copy(&internals->slaves[slaves[i]].persisted_mac_addr,
+ &active_slave_addr);
+
+ for (j = num_tx_total; j < nb_pkts; j++) {
+ if (j + 3 < nb_pkts)
+ rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
+
+ ether_hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
+ if (is_same_ether_addr(&ether_hdr->s_addr, &primary_slave_addr))
+ ether_addr_copy(&active_slave_addr, &ether_hdr->s_addr);
+ }
+
+ num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
+ bufs + num_tx_total, nb_pkts - num_tx_total);
+
+ if (num_tx_total == nb_pkts)
+ break;
+ }
+
+ return num_tx_total;
+}
+
+static uint16_t
+bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_pkts)
+{
+ struct bond_dev_private *internals;
+ struct bond_tx_queue *bd_tx_q;
+
+ uint8_t num_of_slaves;
+ uint8_t slaves[RTE_MAX_ETHPORTS];
+
+ uint16_t num_tx_total = 0, num_tx_slave = 0, tx_fail_total = 0;
+
+ int i, op_slave_id;
+
+ struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
+ uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
+
+ bd_tx_q = (struct bond_tx_queue *)queue;
+ internals = bd_tx_q->dev_private;
+
+ /* Copy slave list to protect against slave up/down changes during tx
+ * bursting */
+ num_of_slaves = internals->active_slave_count;
+ memcpy(slaves, internals->active_slaves,
+ sizeof(internals->active_slaves[0]) * num_of_slaves);
+
+ if (num_of_slaves < 1)
+ return num_tx_total;
+
+ /* Populate slaves mbuf with the packets which are to be sent on it */
+ for (i = 0; i < nb_pkts; i++) {
+ /* Select output slave using hash based on xmit policy */
+ op_slave_id = xmit_slave_hash(bufs[i], num_of_slaves,
+ internals->balance_xmit_policy);
+
+ /* Populate slave mbuf arrays with mbufs for that slave */
+ slave_bufs[op_slave_id][slave_nb_pkts[op_slave_id]++] = bufs[i];
+ }
+
+ /* Send packet burst on each slave device */
+ for (i = 0; i < num_of_slaves; i++) {
+ if (slave_nb_pkts[i] > 0) {
+ num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
+ slave_bufs[i], slave_nb_pkts[i]);
+
+ /* if tx burst fails move packets to end of bufs */
+ if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
+ int slave_tx_fail_count = slave_nb_pkts[i] - num_tx_slave;
+
+ tx_fail_total += slave_tx_fail_count;
+ memcpy(&bufs[nb_pkts - tx_fail_total],
+ &slave_bufs[i][num_tx_slave],
+ slave_tx_fail_count * sizeof(bufs[0]));
+ }
+
+ num_tx_total += num_tx_slave;
+ }
+ }
+
+ return num_tx_total;
+}
+
+static uint16_t
+bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_pkts)
+{
+ struct bond_dev_private *internals;
+ struct bond_tx_queue *bd_tx_q;
+
+ uint8_t num_of_slaves;
+ uint8_t slaves[RTE_MAX_ETHPORTS];
+ /* possitions in slaves, not ID */
+ uint8_t distributing_offsets[RTE_MAX_ETHPORTS];
+ uint8_t distributing_count;
+
+ uint16_t num_tx_slave, num_tx_total = 0, num_tx_fail_total = 0;
+ uint16_t i, j, op_slave_idx;
+ const uint16_t buffs_size = nb_pkts + BOND_MODE_8023AX_SLAVE_TX_PKTS + 1;
+
+ /* Allocate additional packets in case 8023AD mode. */
+ struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][buffs_size];
+ void *slow_pkts[BOND_MODE_8023AX_SLAVE_TX_PKTS] = { NULL };
+
+ /* Total amount of packets in slave_bufs */
+ uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
+ /* Slow packets placed in each slave */
+ uint8_t slave_slow_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
+
+ bd_tx_q = (struct bond_tx_queue *)queue;
+ internals = bd_tx_q->dev_private;
+
+ /* Copy slave list to protect against slave up/down changes during tx
+ * bursting */
+ num_of_slaves = internals->active_slave_count;
+ if (num_of_slaves < 1)
+ return num_tx_total;
+
+ memcpy(slaves, internals->active_slaves, sizeof(slaves[0]) * num_of_slaves);
+
+ distributing_count = 0;
+ for (i = 0; i < num_of_slaves; i++) {
+ struct port *port = &mode_8023ad_ports[slaves[i]];
+
+ slave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring,
+ slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS);
+ slave_nb_pkts[i] = slave_slow_nb_pkts[i];
+
+ for (j = 0; j < slave_slow_nb_pkts[i]; j++)
+ slave_bufs[i][j] = slow_pkts[j];
+
+ if (ACTOR_STATE(port, DISTRIBUTING))
+ distributing_offsets[distributing_count++] = i;
+ }
+
+ if (likely(distributing_count > 0)) {
+ /* Populate slaves mbuf with the packets which are to be sent on it */
+ for (i = 0; i < nb_pkts; i++) {
+ /* Select output slave using hash based on xmit policy */
+ op_slave_idx = xmit_slave_hash(bufs[i], distributing_count,
+ internals->balance_xmit_policy);
+
+ /* Populate slave mbuf arrays with mbufs for that slave. Use only
+ * slaves that are currently distributing. */
+ uint8_t slave_offset = distributing_offsets[op_slave_idx];
+ slave_bufs[slave_offset][slave_nb_pkts[slave_offset]] = bufs[i];
+ slave_nb_pkts[slave_offset]++;
+ }
+ }
+
+ /* Send packet burst on each slave device */
+ for (i = 0; i < num_of_slaves; i++) {
+ if (slave_nb_pkts[i] == 0)
+ continue;
+
+ num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
+ slave_bufs[i], slave_nb_pkts[i]);
+
+ /* If tx burst fails drop slow packets */
+ for ( ; num_tx_slave < slave_slow_nb_pkts[i]; num_tx_slave++)
+ rte_pktmbuf_free(slave_bufs[i][num_tx_slave]);
+
+ num_tx_total += num_tx_slave - slave_slow_nb_pkts[i];
+ num_tx_fail_total += slave_nb_pkts[i] - num_tx_slave;
+
+ /* If tx burst fails move packets to end of bufs */
+ if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
+ uint16_t j = nb_pkts - num_tx_fail_total;
+ for ( ; num_tx_slave < slave_nb_pkts[i]; j++, num_tx_slave++)
+ bufs[j] = slave_bufs[i][num_tx_slave];
+ }
+ }
+
+ return num_tx_total;
+}
+
+#ifdef RTE_MBUF_REFCNT
+static uint16_t
+bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_pkts)
+{
+ struct bond_dev_private *internals;
+ struct bond_tx_queue *bd_tx_q;
+
+ uint8_t tx_failed_flag = 0, num_of_slaves;
+ uint8_t slaves[RTE_MAX_ETHPORTS];
+
+ uint16_t max_nb_of_tx_pkts = 0;
+
+ int slave_tx_total[RTE_MAX_ETHPORTS];
+ int i, most_successful_tx_slave = -1;
+
+ bd_tx_q = (struct bond_tx_queue *)queue;
+ internals = bd_tx_q->dev_private;
+
+ /* Copy slave list to protect against slave up/down changes during tx
+ * bursting */
+ num_of_slaves = internals->active_slave_count;
+ memcpy(slaves, internals->active_slaves,
+ sizeof(internals->active_slaves[0]) * num_of_slaves);
+
+ if (num_of_slaves < 1)
+ return 0;
+
+ /* Increment reference count on mbufs */
+ for (i = 0; i < nb_pkts; i++)
+ rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1);
+
+ /* Transmit burst on each active slave */
+ for (i = 0; i < num_of_slaves; i++) {
+ slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
+ bufs, nb_pkts);
+
+ if (unlikely(slave_tx_total[i] < nb_pkts))
+ tx_failed_flag = 1;
+
+ /* record the value and slave index for the slave which transmits the
+ * maximum number of packets */
+ if (slave_tx_total[i] > max_nb_of_tx_pkts) {
+ max_nb_of_tx_pkts = slave_tx_total[i];
+ most_successful_tx_slave = i;
+ }
+ }
+
+ /* if slaves fail to transmit packets from burst, the calling application
+ * is not expected to know about multiple references to packets so we must
+ * handle failures of all packets except those of the most successful slave
+ */
+ if (unlikely(tx_failed_flag))
+ for (i = 0; i < num_of_slaves; i++)
+ if (i != most_successful_tx_slave)
+ while (slave_tx_total[i] < nb_pkts)
+ rte_pktmbuf_free(bufs[slave_tx_total[i]++]);
+
+ return max_nb_of_tx_pkts;
+}
+#endif
+
+void
+link_properties_set(struct rte_eth_dev *bonded_eth_dev,
+ struct rte_eth_link *slave_dev_link)
+{
+ struct rte_eth_link *bonded_dev_link = &bonded_eth_dev->data->dev_link;
+ struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
+
+ if (slave_dev_link->link_status &&
+ bonded_eth_dev->data->dev_started) {
+ bonded_dev_link->link_duplex = slave_dev_link->link_duplex;
+ bonded_dev_link->link_speed = slave_dev_link->link_speed;
+
+ internals->link_props_set = 1;
+ }
+}
+
+void
+link_properties_reset(struct rte_eth_dev *bonded_eth_dev)
+{
+ struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
+
+ memset(&(bonded_eth_dev->data->dev_link), 0,
+ sizeof(bonded_eth_dev->data->dev_link));
+
+ internals->link_props_set = 0;
+}
+
+int
+link_properties_valid(struct rte_eth_link *bonded_dev_link,
+ struct rte_eth_link *slave_dev_link)
+{
+ if (bonded_dev_link->link_duplex != slave_dev_link->link_duplex ||
+ bonded_dev_link->link_speed != slave_dev_link->link_speed)
+ return -1;
+
+ return 0;
+}
+
+int
+mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr)
+{
+ struct ether_addr *mac_addr;
+
+ if (eth_dev == NULL) {
+ RTE_LOG(ERR, PMD, "%s: NULL pointer eth_dev specified\n", __func__);
+ return -1;
+ }
+
+ if (dst_mac_addr == NULL) {
+ RTE_LOG(ERR, PMD, "%s: NULL pointer MAC specified\n", __func__);
+ return -1;
+ }
+
+ mac_addr = eth_dev->data->mac_addrs;
+
+ ether_addr_copy(mac_addr, dst_mac_addr);
+ return 0;
+}
+
+int
+mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr)
+{
+ struct ether_addr *mac_addr;
+
+ if (eth_dev == NULL) {
+ RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
+ return -1;
+ }
+
+ if (new_mac_addr == NULL) {
+ RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
+ return -1;
+ }
+
+ mac_addr = eth_dev->data->mac_addrs;
+
+ /* If new MAC is different to current MAC then update */
+ if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
+ memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
+
+ return 0;
+}
+
+int
+mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
+{
+ struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
+ int i;
+
+ /* Update slave devices MAC addresses */
+ if (internals->slave_count < 1)
+ return -1;
+
+ switch (internals->mode) {
+ case BONDING_MODE_ROUND_ROBIN:
+ case BONDING_MODE_BALANCE:
+#ifdef RTE_MBUF_REFCNT
+ case BONDING_MODE_BROADCAST:
+#endif
+ for (i = 0; i < internals->slave_count; i++) {
+ if (mac_address_set(&rte_eth_devices[internals->slaves[i].port_id],
+ bonded_eth_dev->data->mac_addrs)) {
+ RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
+ internals->slaves[i].port_id);
+ return -1;
+ }
+ }
+ break;
+ case BONDING_MODE_8023AD:
+ bond_mode_8023ad_mac_address_update(bonded_eth_dev);
+ break;
+ case BONDING_MODE_ACTIVE_BACKUP:
+ case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
+ default:
+ for (i = 0; i < internals->slave_count; i++) {
+ if (internals->slaves[i].port_id ==
+ internals->current_primary_port) {
+ if (mac_address_set(&rte_eth_devices[internals->primary_port],
+ bonded_eth_dev->data->mac_addrs)) {
+ RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
+ internals->current_primary_port);
+ return -1;
+ }
+ } else {
+ if (mac_address_set(
+ &rte_eth_devices[internals->slaves[i].port_id],
+ &internals->slaves[i].persisted_mac_addr)) {
+ RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
+ internals->slaves[i].port_id);
+ return -1;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+int
+bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
+{
+ struct bond_dev_private *internals;
+
+ internals = eth_dev->data->dev_private;
+
+ switch (mode) {
+ case BONDING_MODE_ROUND_ROBIN:
+ eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin;
+ eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
+ break;
+ case BONDING_MODE_ACTIVE_BACKUP:
+ eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup;
+ eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
+ break;
+ case BONDING_MODE_BALANCE:
+ eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
+ eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
+ break;
+#ifdef RTE_MBUF_REFCNT
+ case BONDING_MODE_BROADCAST:
+ eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
+ eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
+ break;
+#endif
+ case BONDING_MODE_8023AD:
+ if (bond_mode_8023ad_enable(eth_dev) != 0)
+ return -1;
+
+ eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
+ eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
+ RTE_BOND_LOG(WARNING,
+ "Using mode 4, it is necessary to do TX burst and RX burst "
+ "at least every 100ms.");
+ break;
+ case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
+ eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
+ eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
+ break;
+ default:
+ return -1;
+ }
+
+ internals->mode = mode;
+
+ return 0;
+}
+
+int
+slave_configure(struct rte_eth_dev *bonded_eth_dev,
+ struct rte_eth_dev *slave_eth_dev)
+{
+ struct bond_rx_queue *bd_rx_q;
+ struct bond_tx_queue *bd_tx_q;
+
+ int errval, q_id;
+
+ /* Stop slave */
+ rte_eth_dev_stop(slave_eth_dev->data->port_id);
+
+ /* Enable interrupts on slave device if supported */
+ if (slave_eth_dev->driver->pci_drv.drv_flags & RTE_PCI_DRV_INTR_LSC)
+ slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
+
+ /* Configure device */
+ errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
+ bonded_eth_dev->data->nb_rx_queues,
+ bonded_eth_dev->data->nb_tx_queues,
+ &(slave_eth_dev->data->dev_conf));
+ if (errval != 0) {
+ RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u , err (%d)",
+ slave_eth_dev->data->port_id, errval);
+ return errval;
+ }
+
+ /* Setup Rx Queues */
+ for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
+ bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
+
+ errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
+ bd_rx_q->nb_rx_desc,
+ rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
+ &(bd_rx_q->rx_conf), bd_rx_q->mb_pool);
+ if (errval != 0) {
+ RTE_BOND_LOG(ERR,
+ "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
+ slave_eth_dev->data->port_id, q_id, errval);
+ return errval;
+ }
+ }
+
+ /* Setup Tx Queues */
+ for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
+ bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
+
+ errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
+ bd_tx_q->nb_tx_desc,
+ rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
+ &bd_tx_q->tx_conf);
+ if (errval != 0) {
+ RTE_BOND_LOG(ERR,
+ "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
+ slave_eth_dev->data->port_id, q_id, errval);
+ return errval;
+ }
+ }
+
+ /* Start device */
+ errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
+ if (errval != 0) {
+ RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)",
+ slave_eth_dev->data->port_id, errval);
+ return -1;
+ }
+
+ return 0;
+}
+
+void
+slave_remove(struct bond_dev_private *internals,
+ struct rte_eth_dev *slave_eth_dev)
+{
+ uint8_t i;
+
+ for (i = 0; i < internals->slave_count; i++)
+ if (internals->slaves[i].port_id ==
+ slave_eth_dev->data->port_id)
+ break;
+
+ if (i < (internals->slave_count - 1))
+ memmove(&internals->slaves[i], &internals->slaves[i + 1],
+ sizeof(internals->slaves[0]) *
+ (internals->slave_count - i - 1));
+
+ internals->slave_count--;
+}
+
+static void
+bond_ethdev_slave_link_status_change_monitor(void *cb_arg);
+
+void
+slave_add(struct bond_dev_private *internals,
+ struct rte_eth_dev *slave_eth_dev)
+{
+ struct bond_slave_details *slave_details =
+ &internals->slaves[internals->slave_count];
+
+ slave_details->port_id = slave_eth_dev->data->port_id;
+ slave_details->last_link_status = 0;
+
+ /* If slave device doesn't support interrupts then we need to enabled
+ * polling to monitor link status */
+ if (!(slave_eth_dev->pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)) {
+ slave_details->link_status_poll_enabled = 1;
+
+ if (!internals->link_status_polling_enabled) {
+ internals->link_status_polling_enabled = 1;
+
+ rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
+ bond_ethdev_slave_link_status_change_monitor,
+ (void *)&rte_eth_devices[internals->port_id]);
+ }
+ }
+
+ slave_details->link_status_wait_to_complete = 0;
+ /* clean tlb_last_obytes when adding port for bonding device */
+ memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs,
+ sizeof(struct ether_addr));
+}
+
+void
+bond_ethdev_primary_set(struct bond_dev_private *internals,
+ uint8_t slave_port_id)
+{
+ int i;
+
+ if (internals->active_slave_count < 1)
+ internals->current_primary_port = slave_port_id;
+ else
+ /* Search bonded device slave ports for new proposed primary port */
+ for (i = 0; i < internals->active_slave_count; i++) {
+ if (internals->active_slaves[i] == slave_port_id)
+ internals->current_primary_port = slave_port_id;
+ }
+}
+
+static void
+bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
+
+static int
+bond_ethdev_start(struct rte_eth_dev *eth_dev)
+{
+ struct bond_dev_private *internals;
+ int i;
+
+ /* slave eth dev will be started by bonded device */
+ if (valid_bonded_ethdev(eth_dev)) {
+ RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)",
+ eth_dev->data->port_id);
+ return -1;
+ }
+
+ eth_dev->data->dev_link.link_status = 0;
+ eth_dev->data->dev_started = 1;
+
+ internals = eth_dev->data->dev_private;
+
+ if (internals->slave_count == 0) {
+ RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
+ return -1;
+ }
+
+ if (internals->user_defined_mac == 0) {
+ struct ether_addr *new_mac_addr = NULL;
+
+ for (i = 0; i < internals->slave_count; i++)
+ if (internals->slaves[i].port_id == internals->primary_port)
+ new_mac_addr = &internals->slaves[i].persisted_mac_addr;
+
+ if (new_mac_addr == NULL)
+ return -1;
+
+ if (mac_address_set(eth_dev, new_mac_addr) != 0) {
+ RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
+ eth_dev->data->port_id);
+ return -1;
+ }
+ }
+
+ /* Update all slave devices MACs*/
+ if (mac_address_slaves_update(eth_dev) != 0)
+ return -1;
+
+ /* If bonded device is configure in promiscuous mode then re-apply config */
+ if (internals->promiscuous_en)
+ bond_ethdev_promiscuous_enable(eth_dev);
+
+ /* Reconfigure each slave device if starting bonded device */
+ for (i = 0; i < internals->slave_count; i++) {
+ if (slave_configure(eth_dev,
+ &(rte_eth_devices[internals->slaves[i].port_id])) != 0) {
+ RTE_BOND_LOG(ERR,
+ "bonded port (%d) failed to reconfigure slave device (%d)",
+ eth_dev->data->port_id, internals->slaves[i].port_id);
+ return -1;
+ }
+ }
+
+ if (internals->user_defined_primary_port)
+ bond_ethdev_primary_set(internals, internals->primary_port);
+
+ if (internals->mode == BONDING_MODE_8023AD)
+ bond_mode_8023ad_start(eth_dev);
+
+ if (internals->mode == BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING)
+ bond_ethdev_update_tlb_slave_cb(internals);
+
+ return 0;
+}
+
+static void
+bond_ethdev_stop(struct rte_eth_dev *eth_dev)
+{
+ struct bond_dev_private *internals = eth_dev->data->dev_private;
+ uint8_t i;
+
+ if (internals->mode == BONDING_MODE_8023AD) {
+ struct port *port;
+ void *pkt = NULL;
+
+ bond_mode_8023ad_stop(eth_dev);
+
+ /* Discard all messages to/from mode 4 state machines */
+ for (i = 0; i < internals->slave_count; i++) {
+ port = &mode_8023ad_ports[internals->slaves[i].port_id];
+
+ RTE_VERIFY(port->rx_ring != NULL);
+ while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
+ rte_pktmbuf_free(pkt);
+
+ RTE_VERIFY(port->tx_ring != NULL);
+ while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
+ rte_pktmbuf_free(pkt);
+ }
+ }
+
+ if (internals->mode == BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING) {
+ rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
+ }
+
+ internals->active_slave_count = 0;
+ internals->link_status_polling_enabled = 0;
+
+ eth_dev->data->dev_link.link_status = 0;
+ eth_dev->data->dev_started = 0;
+}
+
+static void
+bond_ethdev_close(struct rte_eth_dev *dev __rte_unused)
+{
+}
+
+/* forward declaration */
+static int bond_ethdev_configure(struct rte_eth_dev *dev);
+
+static void
+bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+
+ dev_info->driver_name = driver_name;
+ dev_info->max_mac_addrs = 1;
+
+ dev_info->max_rx_pktlen = (uint32_t)2048;
+
+ dev_info->max_rx_queues = (uint16_t)128;
+ dev_info->max_tx_queues = (uint16_t)512;
+
+ dev_info->min_rx_bufsize = 0;
+ dev_info->pci_dev = dev->pci_dev;
+
+ dev_info->rx_offload_capa = internals->rx_offload_capa;
+ dev_info->tx_offload_capa = internals->tx_offload_capa;
+}
+
+static int
+bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
+ const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
+{
+ struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
+ rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
+ 0, dev->pci_dev->numa_node);
+ if (bd_rx_q == NULL)
+ return -1;
+
+ bd_rx_q->queue_id = rx_queue_id;
+ bd_rx_q->dev_private = dev->data->dev_private;
+
+ bd_rx_q->nb_rx_desc = nb_rx_desc;
+
+ memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
+ bd_rx_q->mb_pool = mb_pool;
+
+ dev->data->rx_queues[rx_queue_id] = bd_rx_q;
+
+ return 0;
+}
+
+static int
+bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)
+ rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
+ 0, dev->pci_dev->numa_node);
+
+ if (bd_tx_q == NULL)
+ return -1;
+
+ bd_tx_q->queue_id = tx_queue_id;
+ bd_tx_q->dev_private = dev->data->dev_private;
+
+ bd_tx_q->nb_tx_desc = nb_tx_desc;
+ memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
+
+ dev->data->tx_queues[tx_queue_id] = bd_tx_q;
+
+ return 0;
+}
+
+static void
+bond_ethdev_rx_queue_release(void *queue)
+{
+ if (queue == NULL)
+ return;
+
+ rte_free(queue);
+}
+
+static void
+bond_ethdev_tx_queue_release(void *queue)
+{
+ if (queue == NULL)
+ return;
+
+ rte_free(queue);
+}
+
+static void
+bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
+{
+ struct rte_eth_dev *bonded_ethdev, *slave_ethdev;
+ struct bond_dev_private *internals;
+
+ /* Default value for polling slave found is true as we don't want to
+ * disable the polling thread if we cannot get the lock */
+ int i, polling_slave_found = 1;
+
+ if (cb_arg == NULL)
+ return;
+
+ bonded_ethdev = (struct rte_eth_dev *)cb_arg;
+ internals = (struct bond_dev_private *)bonded_ethdev->data->dev_private;
+
+ if (!bonded_ethdev->data->dev_started ||
+ !internals->link_status_polling_enabled)
+ return;
+
+ /* If device is currently being configured then don't check slaves link
+ * status, wait until next period */
+ if (rte_spinlock_trylock(&internals->lock)) {
+ if (internals->slave_count > 0)
+ polling_slave_found = 0;
+
+ for (i = 0; i < internals->slave_count; i++) {
+ if (!internals->slaves[i].link_status_poll_enabled)
+ continue;
+
+ slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id];
+ polling_slave_found = 1;
+
+ /* Update slave link status */
+ (*slave_ethdev->dev_ops->link_update)(slave_ethdev,
+ internals->slaves[i].link_status_wait_to_complete);
+
+ /* if link status has changed since last checked then call lsc
+ * event callback */
+ if (slave_ethdev->data->dev_link.link_status !=
+ internals->slaves[i].last_link_status) {
+ internals->slaves[i].last_link_status =
+ slave_ethdev->data->dev_link.link_status;
+
+ bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
+ RTE_ETH_EVENT_INTR_LSC,
+ &bonded_ethdev->data->port_id);
+ }
+ }
+ rte_spinlock_unlock(&internals->lock);
+ }
+
+ if (polling_slave_found)
+ /* Set alarm to continue monitoring link status of slave ethdev's */
+ rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
+ bond_ethdev_slave_link_status_change_monitor, cb_arg);
+}
+
+static int
+bond_ethdev_link_update(struct rte_eth_dev *bonded_eth_dev,
+ int wait_to_complete)
+{
+ struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
+
+ if (!bonded_eth_dev->data->dev_started ||
+ internals->active_slave_count == 0) {
+ bonded_eth_dev->data->dev_link.link_status = 0;
+ return 0;
+ } else {
+ struct rte_eth_dev *slave_eth_dev;
+ int i, link_up = 0;
+
+ for (i = 0; i < internals->active_slave_count; i++) {
+ slave_eth_dev = &rte_eth_devices[internals->active_slaves[i]];
+
+ (*slave_eth_dev->dev_ops->link_update)(slave_eth_dev,
+ wait_to_complete);
+ if (slave_eth_dev->data->dev_link.link_status == 1) {
+ link_up = 1;
+ break;
+ }
+ }
+
+ bonded_eth_dev->data->dev_link.link_status = link_up;
+ }
+
+ return 0;
+}
+
+static void
+bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+ struct rte_eth_stats slave_stats;
+
+ int i;
+
+ /* clear bonded stats before populating from slaves */
+ memset(stats, 0, sizeof(*stats));
+
+ for (i = 0; i < internals->slave_count; i++) {
+ rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
+
+ stats->ipackets += slave_stats.ipackets;
+ stats->opackets += slave_stats.opackets;
+ stats->ibytes += slave_stats.ibytes;
+ stats->obytes += slave_stats.obytes;
+ stats->ierrors += slave_stats.ierrors;
+ stats->oerrors += slave_stats.oerrors;
+ stats->imcasts += slave_stats.imcasts;
+ stats->rx_nombuf += slave_stats.rx_nombuf;
+ stats->fdirmatch += slave_stats.fdirmatch;
+ stats->fdirmiss += slave_stats.fdirmiss;
+ stats->tx_pause_xon += slave_stats.tx_pause_xon;
+ stats->rx_pause_xon += slave_stats.rx_pause_xon;
+ stats->tx_pause_xoff += slave_stats.tx_pause_xoff;
+ stats->rx_pause_xoff += slave_stats.rx_pause_xoff;
+ }
+}
+
+static void
+bond_ethdev_stats_reset(struct rte_eth_dev *dev)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+ int i;
+
+ for (i = 0; i < internals->slave_count; i++)
+ rte_eth_stats_reset(internals->slaves[i].port_id);
+}
+
+static void
+bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
+{
+ struct bond_dev_private *internals = eth_dev->data->dev_private;
+ int i;
+
+ internals->promiscuous_en = 1;
+
+ switch (internals->mode) {
+ /* Promiscuous mode is propagated to all slaves */
+ case BONDING_MODE_ROUND_ROBIN:
+ case BONDING_MODE_BALANCE:
+#ifdef RTE_MBUF_REFCNT
+ case BONDING_MODE_BROADCAST:
+#endif
+ for (i = 0; i < internals->slave_count; i++)
+ rte_eth_promiscuous_enable(internals->slaves[i].port_id);
+ break;
+ /* In mode4 promiscus mode is managed when slave is added/removed */
+ case BONDING_MODE_8023AD:
+ break;
+ /* Promiscuous mode is propagated only to primary slave */
+ case BONDING_MODE_ACTIVE_BACKUP:
+ case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
+ default:
+ rte_eth_promiscuous_enable(internals->current_primary_port);
+ }
+}
+
+static void
+bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+ int i;
+
+ internals->promiscuous_en = 0;
+
+ switch (internals->mode) {
+ /* Promiscuous mode is propagated to all slaves */
+ case BONDING_MODE_ROUND_ROBIN:
+ case BONDING_MODE_BALANCE:
+#ifdef RTE_MBUF_REFCNT
+ case BONDING_MODE_BROADCAST:
+#endif
+ for (i = 0; i < internals->slave_count; i++)
+ rte_eth_promiscuous_disable(internals->slaves[i].port_id);
+ break;
+ /* In mode4 promiscus mode is set managed when slave is added/removed */
+ case BONDING_MODE_8023AD:
+ break;
+ /* Promiscuous mode is propagated only to primary slave */
+ case BONDING_MODE_ACTIVE_BACKUP:
+ case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
+ default:
+ rte_eth_promiscuous_disable(internals->current_primary_port);
+ }
+}
+
+static void
+bond_ethdev_delayed_lsc_propagation(void *arg)
+{
+ if (arg == NULL)
+ return;
+
+ _rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
+ RTE_ETH_EVENT_INTR_LSC);
+}
+
+void
+bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
+ void *param)
+{
+ struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
+ struct bond_dev_private *internals;
+ struct rte_eth_link link;
+
+ int i, valid_slave = 0;
+ uint8_t active_pos;
+ uint8_t lsc_flag = 0;
+
+ if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
+ return;
+
+ bonded_eth_dev = &rte_eth_devices[*(uint8_t *)param];
+ slave_eth_dev = &rte_eth_devices[port_id];
+
+ if (valid_bonded_ethdev(bonded_eth_dev))
+ return;
+
+ internals = bonded_eth_dev->data->dev_private;
+
+ /* If the device isn't started don't handle interrupts */
+ if (!bonded_eth_dev->data->dev_started)
+ return;
+
+ /* verify that port_id is a valid slave of bonded port */
+ for (i = 0; i < internals->slave_count; i++) {
+ if (internals->slaves[i].port_id == port_id) {
+ valid_slave = 1;
+ break;
+ }
+ }
+
+ if (!valid_slave)
+ return;
+
+ /* Search for port in active port list */
+ active_pos = find_slave_by_id(internals->active_slaves,
+ internals->active_slave_count, port_id);
+
+ rte_eth_link_get_nowait(port_id, &link);
+ if (link.link_status) {
+ if (active_pos < internals->active_slave_count)
+ return;
+
+ /* if no active slave ports then set this port to be primary port */
+ if (internals->active_slave_count < 1) {
+ /* If first active slave, then change link status */
+ bonded_eth_dev->data->dev_link.link_status = 1;
+ internals->current_primary_port = port_id;
+ lsc_flag = 1;
+
+ mac_address_slaves_update(bonded_eth_dev);
+
+ /* Inherit eth dev link properties from first active slave */
+ link_properties_set(bonded_eth_dev,
+ &(slave_eth_dev->data->dev_link));
+ }
+
+ activate_slave(bonded_eth_dev, port_id);
+
+ /* If user has defined the primary port then default to using it */
+ if (internals->user_defined_primary_port &&
+ internals->primary_port == port_id)
+ bond_ethdev_primary_set(internals, port_id);
+ } else {
+ if (active_pos == internals->active_slave_count)
+ return;
+
+ /* Remove from active slave list */
+ deactivate_slave(bonded_eth_dev, port_id);
+
+ /* No active slaves, change link status to down and reset other
+ * link properties */
+ if (internals->active_slave_count < 1) {
+ lsc_flag = 1;
+ bonded_eth_dev->data->dev_link.link_status = 0;
+
+ link_properties_reset(bonded_eth_dev);
+ }
+
+ /* Update primary id, take first active slave from list or if none
+ * available set to -1 */
+ if (port_id == internals->current_primary_port) {
+ if (internals->active_slave_count > 0)
+ bond_ethdev_primary_set(internals,
+ internals->active_slaves[0]);
+ else
+ internals->current_primary_port = internals->primary_port;
+ }
+ }
+
+ if (lsc_flag) {
+ /* Cancel any possible outstanding interrupts if delays are enabled */
+ if (internals->link_up_delay_ms > 0 ||
+ internals->link_down_delay_ms > 0)
+ rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation,
+ bonded_eth_dev);
+
+ if (bonded_eth_dev->data->dev_link.link_status) {
+ if (internals->link_up_delay_ms > 0)
+ rte_eal_alarm_set(internals->link_up_delay_ms * 1000,
+ bond_ethdev_delayed_lsc_propagation,
+ (void *)bonded_eth_dev);
+ else
+ _rte_eth_dev_callback_process(bonded_eth_dev,
+ RTE_ETH_EVENT_INTR_LSC);
+
+ } else {
+ if (internals->link_down_delay_ms > 0)
+ rte_eal_alarm_set(internals->link_down_delay_ms * 1000,
+ bond_ethdev_delayed_lsc_propagation,
+ (void *)bonded_eth_dev);
+ else
+ _rte_eth_dev_callback_process(bonded_eth_dev,
+ RTE_ETH_EVENT_INTR_LSC);
+ }
+ }
+}
+
+struct eth_dev_ops default_dev_ops = {
+ .dev_start = bond_ethdev_start,
+ .dev_stop = bond_ethdev_stop,
+ .dev_close = bond_ethdev_close,
+ .dev_configure = bond_ethdev_configure,
+ .dev_infos_get = bond_ethdev_info,
+ .rx_queue_setup = bond_ethdev_rx_queue_setup,
+ .tx_queue_setup = bond_ethdev_tx_queue_setup,
+ .rx_queue_release = bond_ethdev_rx_queue_release,
+ .tx_queue_release = bond_ethdev_tx_queue_release,
+ .link_update = bond_ethdev_link_update,
+ .stats_get = bond_ethdev_stats_get,
+ .stats_reset = bond_ethdev_stats_reset,
+ .promiscuous_enable = bond_ethdev_promiscuous_enable,
+ .promiscuous_disable = bond_ethdev_promiscuous_disable
+};
+
+static int
+bond_init(const char *name, const char *params)
+{
+ struct bond_dev_private *internals;
+ struct rte_kvargs *kvlist;
+ uint8_t bonding_mode, socket_id;
+ int arg_count, port_id;
+
+ RTE_LOG(INFO, EAL, "Initializing pmd_bond for %s\n", name);
+
+ kvlist = rte_kvargs_parse(params, pmd_bond_init_valid_arguments);
+ if (kvlist == NULL)
+ return -1;
+
+ /* Parse link bonding mode */
+ if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
+ if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
+ &bond_ethdev_parse_slave_mode_kvarg, &bonding_mode) != 0) {
+ RTE_LOG(ERR, EAL, "Invalid mode for bonded device %s\n", name);
+ return -1;
+ }
+ } else {
+ RTE_LOG(ERR, EAL,
+ "Mode must be specified only once for bonded device %s\n",
+ name);
+ return -1;
+ }
+
+ /* Parse socket id to create bonding device on */
+ arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
+ if (arg_count == 1) {
+ if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
+ &bond_ethdev_parse_socket_id_kvarg, &socket_id) != 0) {
+ RTE_LOG(ERR, EAL,
+ "Invalid socket Id specified for bonded device %s\n",
+ name);
+ return -1;
+ }
+ } else if (arg_count > 1) {
+ RTE_LOG(ERR, EAL,
+ "Socket Id can be specified only once for bonded device %s\n",
+ name);
+ return -1;
+ } else {
+ socket_id = rte_socket_id();
+ }
+
+ /* Create link bonding eth device */
+ port_id = rte_eth_bond_create(name, bonding_mode, socket_id);
+ if (port_id < 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to create socket %s in mode %u on socket %u.\n",
+ name, bonding_mode, socket_id);
+ return -1;
+ }
+ internals = rte_eth_devices[port_id].data->dev_private;
+ internals->kvlist = kvlist;
+
+ RTE_LOG(INFO, EAL,
+ "Create bonded device %s on port %d in mode %u on socket %u.\n",
+ name, port_id, bonding_mode, socket_id);
+ return 0;
+}
+
+/* this part will resolve the slave portids after all the other pdev and vdev
+ * have been allocated */
+static int
+bond_ethdev_configure(struct rte_eth_dev *dev)
+{
+ char *name = dev->data->name;
+ struct bond_dev_private *internals = dev->data->dev_private;
+ struct rte_kvargs *kvlist = internals->kvlist;
+ int arg_count, port_id = dev - rte_eth_devices;
+
+ /*
+ * if no kvlist, it means that this bonded device has been created
+ * through the bonding api.
+ */
+ if (!kvlist)
+ return 0;
+
+ /* Parse MAC address for bonded device */
+ arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
+ if (arg_count == 1) {
+ struct ether_addr bond_mac;
+
+ if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
+ &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
+ RTE_LOG(INFO, EAL, "Invalid mac address for bonded device %s\n",
+ name);
+ return -1;
+ }
+
+ /* Set MAC address */
+ if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to set mac address on bonded device %s\n",
+ name);
+ return -1;
+ }
+ } else if (arg_count > 1) {
+ RTE_LOG(ERR, EAL,
+ "MAC address can be specified only once for bonded device %s\n",
+ name);
+ return -1;
+ }
+
+ /* Parse/set balance mode transmit policy */
+ arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
+ if (arg_count == 1) {
+ uint8_t xmit_policy;
+
+ if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
+ &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
+ 0) {
+ RTE_LOG(INFO, EAL,
+ "Invalid xmit policy specified for bonded device %s\n",
+ name);
+ return -1;
+ }
+
+ /* Set balance mode transmit policy*/
+ if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to set balance xmit policy on bonded device %s\n",
+ name);
+ return -1;
+ }
+ } else if (arg_count > 1) {
+ RTE_LOG(ERR, EAL,
+ "Transmit policy can be specified only once for bonded device"
+ " %s\n", name);
+ return -1;
+ }
+
+ /* Parse/add slave ports to bonded device */
+ if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
+ struct bond_ethdev_slave_ports slave_ports;
+ unsigned i;
+
+ memset(&slave_ports, 0, sizeof(slave_ports));
+
+ if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
+ &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to parse slave ports for bonded device %s\n",
+ name);
+ return -1;
+ }
+
+ for (i = 0; i < slave_ports.slave_count; i++) {
+ if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to add port %d as slave to bonded device %s\n",
+ slave_ports.slaves[i], name);
+ }
+ }
+
+ } else {
+ RTE_LOG(INFO, EAL, "No slaves specified for bonded device %s\n", name);
+ return -1;
+ }
+
+ /* Parse/set primary slave port id*/
+ arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
+ if (arg_count == 1) {
+ uint8_t primary_slave_port_id;
+
+ if (rte_kvargs_process(kvlist,
+ PMD_BOND_PRIMARY_SLAVE_KVARG,
+ &bond_ethdev_parse_primary_slave_port_id_kvarg,
+ &primary_slave_port_id) < 0) {
+ RTE_LOG(INFO, EAL,
+ "Invalid primary slave port id specified for bonded device"
+ " %s\n", name);
+ return -1;
+ }
+
+ /* Set balance mode transmit policy*/
+ if (rte_eth_bond_primary_set(port_id, (uint8_t)primary_slave_port_id)
+ != 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to set primary slave port %d on bonded device %s\n",
+ primary_slave_port_id, name);
+ return -1;
+ }
+ } else if (arg_count > 1) {
+ RTE_LOG(INFO, EAL,
+ "Primary slave can be specified only once for bonded device"
+ " %s\n", name);
+ return -1;
+ }
+
+ /* Parse link status monitor polling interval */
+ arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG);
+ if (arg_count == 1) {
+ uint32_t lsc_poll_interval_ms;
+
+ if (rte_kvargs_process(kvlist,
+ PMD_BOND_LSC_POLL_PERIOD_KVARG,
+ &bond_ethdev_parse_time_ms_kvarg,
+ &lsc_poll_interval_ms) < 0) {
+ RTE_LOG(INFO, EAL,
+ "Invalid lsc polling interval value specified for bonded"
+ " device %s\n", name);
+ return -1;
+ }
+
+ if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
+ != 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to set lsc monitor polling interval (%u ms) on"
+ " bonded device %s\n", lsc_poll_interval_ms, name);
+ return -1;
+ }
+ } else if (arg_count > 1) {
+ RTE_LOG(INFO, EAL,
+ "LSC polling interval can be specified only once for bonded"
+ " device %s\n", name);
+ return -1;
+ }
+
+ /* Parse link up interrupt propagation delay */
+ arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG);
+ if (arg_count == 1) {
+ uint32_t link_up_delay_ms;
+
+ if (rte_kvargs_process(kvlist,
+ PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
+ &bond_ethdev_parse_time_ms_kvarg,
+ &link_up_delay_ms) < 0) {
+ RTE_LOG(INFO, EAL,
+ "Invalid link up propagation delay value specified for"
+ " bonded device %s\n", name);
+ return -1;
+ }
+
+ /* Set balance mode transmit policy*/
+ if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
+ != 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to set link up propagation delay (%u ms) on bonded"
+ " device %s\n", link_up_delay_ms, name);
+ return -1;
+ }
+ } else if (arg_count > 1) {
+ RTE_LOG(INFO, EAL,
+ "Link up propagation delay can be specified only once for"
+ " bonded device %s\n", name);
+ return -1;
+ }
+
+ /* Parse link down interrupt propagation delay */
+ arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG);
+ if (arg_count == 1) {
+ uint32_t link_down_delay_ms;
+
+ if (rte_kvargs_process(kvlist,
+ PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
+ &bond_ethdev_parse_time_ms_kvarg,
+ &link_down_delay_ms) < 0) {
+ RTE_LOG(INFO, EAL,
+ "Invalid link down propagation delay value specified for"
+ " bonded device %s\n", name);
+ return -1;
+ }
+
+ /* Set balance mode transmit policy*/
+ if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
+ != 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to set link down propagation delay (%u ms) on"
+ " bonded device %s\n", link_down_delay_ms, name);
+ return -1;
+ }
+ } else if (arg_count > 1) {
+ RTE_LOG(INFO, EAL,
+ "Link down propagation delay can be specified only once for"
+ " bonded device %s\n", name);
+ return -1;
+ }
+
+ return 0;
+}
+
+static struct rte_driver bond_drv = {
+ .name = "eth_bond",
+ .type = PMD_VDEV,
+ .init = bond_init,
+};
+
+PMD_REGISTER_DRIVER(bond_drv);
diff --git a/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_private.h b/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_private.h
new file mode 100755
index 00000000..f913c5b9
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_bond/rte_eth_bond_private.h
@@ -0,0 +1,268 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ETH_BOND_PRIVATE_H_
+#define _RTE_ETH_BOND_PRIVATE_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_ethdev.h>
+#include <rte_spinlock.h>
+
+#include "rte_eth_bond.h"
+#include "rte_eth_bond_8023ad_private.h"
+
+#define PMD_BOND_SLAVE_PORT_KVARG ("slave")
+#define PMD_BOND_PRIMARY_SLAVE_KVARG ("primary")
+#define PMD_BOND_MODE_KVARG ("mode")
+#define PMD_BOND_XMIT_POLICY_KVARG ("xmit_policy")
+#define PMD_BOND_SOCKET_ID_KVARG ("socket_id")
+#define PMD_BOND_MAC_ADDR_KVARG ("mac")
+#define PMD_BOND_LSC_POLL_PERIOD_KVARG ("lsc_poll_period_ms")
+#define PMD_BOND_LINK_UP_PROP_DELAY_KVARG ("up_delay")
+#define PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG ("down_delay")
+
+#define PMD_BOND_XMIT_POLICY_LAYER2_KVARG ("l2")
+#define PMD_BOND_XMIT_POLICY_LAYER23_KVARG ("l23")
+#define PMD_BOND_XMIT_POLICY_LAYER34_KVARG ("l34")
+
+#define RTE_BOND_LOG(lvl, msg, ...) \
+ RTE_LOG(lvl, PMD, "%s(%d) - " msg "\n", __func__, __LINE__, ##__VA_ARGS__)
+
+#define BONDING_MODE_INVALID 0xFF
+
+extern const char *pmd_bond_init_valid_arguments[];
+
+extern const char *driver_name;
+
+/** Port Queue Mapping Structure */
+struct bond_rx_queue {
+ int queue_id;
+ /**< Queue Id */
+ struct bond_dev_private *dev_private;
+ /**< Reference to eth_dev private structure */
+ uint16_t nb_rx_desc;
+ /**< Number of RX descriptors available for the queue */
+ struct rte_eth_rxconf rx_conf;
+ /**< Copy of RX configuration structure for queue */
+ struct rte_mempool *mb_pool;
+ /**< Reference to mbuf pool to use for RX queue */
+};
+
+struct bond_tx_queue {
+ int queue_id;
+ /**< Queue Id */
+ struct bond_dev_private *dev_private;
+ /**< Reference to dev private structure */
+ uint16_t nb_tx_desc;
+ /**< Number of TX descriptors available for the queue */
+ struct rte_eth_txconf tx_conf;
+ /**< Copy of TX configuration structure for queue */
+};
+
+/** Bonded slave devices structure */
+struct bond_ethdev_slave_ports {
+ uint8_t slaves[RTE_MAX_ETHPORTS]; /**< Slave port id array */
+ uint8_t slave_count; /**< Number of slaves */
+};
+
+struct bond_slave_details {
+ uint8_t port_id;
+
+ uint8_t link_status_poll_enabled;
+ uint8_t link_status_wait_to_complete;
+ uint8_t last_link_status;
+ /**< Port Id of slave eth_dev */
+ struct ether_addr persisted_mac_addr;
+};
+
+/** Link Bonding PMD device private configuration Structure */
+struct bond_dev_private {
+ uint8_t port_id; /**< Port Id of Bonded Port */
+ uint8_t mode; /**< Link Bonding Mode */
+
+ rte_spinlock_t lock;
+
+ uint8_t primary_port; /**< Primary Slave Port */
+ uint8_t current_primary_port; /**< Primary Slave Port */
+ uint8_t user_defined_primary_port;
+ /**< Flag for whether primary port is user defined or not */
+
+ uint8_t balance_xmit_policy;
+ /**< Transmit policy - l2 / l23 / l34 for operation in balance mode */
+ uint8_t user_defined_mac;
+ /**< Flag for whether MAC address is user defined or not */
+ uint8_t promiscuous_en;
+ /**< Enabled/disable promiscuous mode on bonding device */
+ uint8_t link_props_set;
+ /**< flag to denote if the link properties are set */
+
+ uint8_t link_status_polling_enabled;
+ uint32_t link_status_polling_interval_ms;
+
+ uint32_t link_down_delay_ms;
+ uint32_t link_up_delay_ms;
+
+ uint16_t nb_rx_queues; /**< Total number of rx queues */
+ uint16_t nb_tx_queues; /**< Total number of tx queues*/
+
+ uint8_t active_slave_count; /**< Number of active slaves */
+ uint8_t active_slaves[RTE_MAX_ETHPORTS]; /**< Active slave list */
+
+ uint8_t slave_count; /**< Number of bonded slaves */
+ struct bond_slave_details slaves[RTE_MAX_ETHPORTS];
+ /**< Arary of bonded slaves details */
+
+ struct mode8023ad_private mode4;
+
+ uint32_t rx_offload_capa; /** Rx offload capability */
+ uint32_t tx_offload_capa; /** Tx offload capability */
+
+ struct rte_kvargs *kvlist;
+ uint8_t slave_update_idx;
+};
+
+extern struct eth_dev_ops default_dev_ops;
+
+int
+valid_bonded_ethdev(struct rte_eth_dev *eth_dev);
+
+/* Search given slave array to find possition of given id.
+ * Return slave pos or slaves_count if not found. */
+static inline uint8_t
+find_slave_by_id(uint8_t *slaves, uint8_t slaves_count, uint8_t slave_id) {
+
+ uint8_t pos;
+ for (pos = 0; pos < slaves_count; pos++) {
+ if (slave_id == slaves[pos])
+ break;
+ }
+
+ return pos;
+}
+
+int
+valid_port_id(uint8_t port_id);
+
+int
+valid_bonded_port_id(uint8_t port_id);
+
+int
+valid_slave_port_id(uint8_t port_id);
+
+void
+deactivate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id);
+
+void
+activate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id);
+
+void
+link_properties_set(struct rte_eth_dev *bonded_eth_dev,
+ struct rte_eth_link *slave_dev_link);
+void
+link_properties_reset(struct rte_eth_dev *bonded_eth_dev);
+
+int
+link_properties_valid(struct rte_eth_link *bonded_dev_link,
+ struct rte_eth_link *slave_dev_link);
+
+int
+mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr);
+
+int
+mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr);
+
+int
+mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev);
+
+uint8_t
+number_of_sockets(void);
+
+int
+bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode);
+
+int
+slave_configure(struct rte_eth_dev *bonded_eth_dev,
+ struct rte_eth_dev *slave_eth_dev);
+
+void
+slave_remove(struct bond_dev_private *internals,
+ struct rte_eth_dev *slave_eth_dev);
+
+void
+slave_add(struct bond_dev_private *internals,
+ struct rte_eth_dev *slave_eth_dev);
+
+void
+bond_ethdev_primary_set(struct bond_dev_private *internals,
+ uint8_t slave_port_id);
+
+void
+bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
+ void *param);
+
+int
+bond_ethdev_parse_slave_port_kvarg(const char *key __rte_unused,
+ const char *value, void *extra_args);
+
+int
+bond_ethdev_parse_slave_mode_kvarg(const char *key __rte_unused,
+ const char *value, void *extra_args);
+
+int
+bond_ethdev_parse_socket_id_kvarg(const char *key __rte_unused,
+ const char *value, void *extra_args);
+
+int
+bond_ethdev_parse_primary_slave_port_id_kvarg(const char *key __rte_unused,
+ const char *value, void *extra_args);
+
+int
+bond_ethdev_parse_balance_xmit_policy_kvarg(const char *key __rte_unused,
+ const char *value, void *extra_args);
+
+int
+bond_ethdev_parse_bond_mac_addr_kvarg(const char *key __rte_unused,
+ const char *value, void *extra_args);
+
+int
+bond_ethdev_parse_time_ms_kvarg(const char *key __rte_unused,
+ const char *value, void *extra_args);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_pmd_e1000/Makefile b/src/dpdk_lib18/librte_pmd_e1000/Makefile
new file mode 100755
index 00000000..14bc4a24
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/Makefile
@@ -0,0 +1,95 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_e1000.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+ifeq ($(CC), icc)
+#
+# CFLAGS for icc
+#
+CFLAGS_BASE_DRIVER = -wd177 -wd181 -wd188 -wd869 -wd2259
+else
+#
+# CFLAGS for gcc
+#
+CFLAGS_BASE_DRIVER = -Wno-uninitialized -Wno-unused-parameter
+CFLAGS_BASE_DRIVER += -Wno-unused-variable
+endif
+
+#
+# Add extra flags for base driver files (also known as shared code)
+# to disable warnings in them
+#
+BASE_DRIVER_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(RTE_SDK)/lib/librte_pmd_e1000/e1000/*.c)))
+$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
+
+VPATH += $(RTE_SDK)/lib/librte_pmd_e1000/e1000
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_80003es2lan.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82540.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82541.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82542.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82543.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82571.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82575.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_i210.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_api.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_ich8lan.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_mac.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_manage.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_mbx.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_nvm.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_osdep.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_phy.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_vf.c
+SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_pf.c
+SRCS-$(CONFIG_RTE_LIBRTE_EM_PMD) += em_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_EM_PMD) += em_rxtx.c
+
+# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += lib/librte_eal lib/librte_ether
+DEPDIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += lib/librte_mempool lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += lib/librte_net lib/librte_malloc
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/README b/src/dpdk_lib18/librte_pmd_e1000/e1000/README
new file mode 100755
index 00000000..851e54e1
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/README
@@ -0,0 +1,39 @@
+..
+ BSD LICENSE
+
+ Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+This directory contains source code of FreeBSD em & igb drivers of version
+cid-shared-code.2014.04.21 released by LAD. The sub-directory of lad/
+contains the original source package.
+Few changes to the original FreeBSD sources were made to:
+- Adopt it for PMD usage mode:
+ e1000_osdep.c
+ e1000_osdep.h
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_80003es2lan.c b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_80003es2lan.c
new file mode 100755
index 00000000..72692d9e
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_80003es2lan.c
@@ -0,0 +1,1514 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+/* 80003ES2LAN Gigabit Ethernet Controller (Copper)
+ * 80003ES2LAN Gigabit Ethernet Controller (Serdes)
+ */
+
+#include "e1000_api.h"
+
+STATIC s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw);
+STATIC void e1000_release_phy_80003es2lan(struct e1000_hw *hw);
+STATIC s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw);
+STATIC void e1000_release_nvm_80003es2lan(struct e1000_hw *hw);
+STATIC s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
+ u32 offset,
+ u16 *data);
+STATIC s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
+ u32 offset,
+ u16 data);
+STATIC s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+STATIC s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw);
+STATIC s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw);
+STATIC s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw);
+STATIC s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex);
+STATIC s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw);
+STATIC s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw);
+STATIC s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw);
+STATIC void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw);
+STATIC s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
+STATIC s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex);
+STATIC s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw);
+STATIC s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw);
+STATIC s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+ u16 *data);
+STATIC s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+ u16 data);
+STATIC void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw);
+STATIC void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
+STATIC s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw);
+STATIC void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw);
+
+/* A table for the GG82563 cable length where the range is defined
+ * with a lower bound at "index" and the upper bound at
+ * "index + 5".
+ */
+STATIC const u16 e1000_gg82563_cable_length_table[] = {
+ 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF };
+#define GG82563_CABLE_LENGTH_TABLE_SIZE \
+ (sizeof(e1000_gg82563_cable_length_table) / \
+ sizeof(e1000_gg82563_cable_length_table[0]))
+
+/**
+ * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_init_phy_params_80003es2lan");
+
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ phy->type = e1000_phy_none;
+ return E1000_SUCCESS;
+ } else {
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan;
+ }
+
+ phy->addr = 1;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->reset_delay_us = 100;
+ phy->type = e1000_phy_gg82563;
+
+ phy->ops.acquire = e1000_acquire_phy_80003es2lan;
+ phy->ops.check_polarity = e1000_check_polarity_m88;
+ phy->ops.check_reset_block = e1000_check_reset_block_generic;
+ phy->ops.commit = e1000_phy_sw_reset_generic;
+ phy->ops.get_cfg_done = e1000_get_cfg_done_80003es2lan;
+ phy->ops.get_info = e1000_get_phy_info_m88;
+ phy->ops.release = e1000_release_phy_80003es2lan;
+ phy->ops.reset = e1000_phy_hw_reset_generic;
+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic;
+
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan;
+ phy->ops.get_cable_length = e1000_get_cable_length_80003es2lan;
+ phy->ops.read_reg = e1000_read_phy_reg_gg82563_80003es2lan;
+ phy->ops.write_reg = e1000_write_phy_reg_gg82563_80003es2lan;
+
+ phy->ops.cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan;
+
+ /* This can only be done after all function pointers are setup. */
+ ret_val = e1000_get_phy_id(hw);
+
+ /* Verify phy id */
+ if (phy->id != GG82563_E_PHY_ID)
+ return -E1000_ERR_PHY;
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+ u16 size;
+
+ DEBUGFUNC("e1000_init_nvm_params_80003es2lan");
+
+ nvm->opcode_bits = 8;
+ nvm->delay_usec = 1;
+ switch (nvm->override) {
+ case e1000_nvm_override_spi_large:
+ nvm->page_size = 32;
+ nvm->address_bits = 16;
+ break;
+ case e1000_nvm_override_spi_small:
+ nvm->page_size = 8;
+ nvm->address_bits = 8;
+ break;
+ default:
+ nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+ nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
+ break;
+ }
+
+ nvm->type = e1000_nvm_eeprom_spi;
+
+ size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+ E1000_EECD_SIZE_EX_SHIFT);
+
+ /* Added to a constant, "size" becomes the left-shift value
+ * for setting word_size.
+ */
+ size += NVM_WORD_SIZE_BASE_SHIFT;
+
+ /* EEPROM access above 16k is unsupported */
+ if (size > 14)
+ size = 14;
+ nvm->word_size = 1 << size;
+
+ /* Function Pointers */
+ nvm->ops.acquire = e1000_acquire_nvm_80003es2lan;
+ nvm->ops.read = e1000_read_nvm_eerd;
+ nvm->ops.release = e1000_release_nvm_80003es2lan;
+ nvm->ops.update = e1000_update_nvm_checksum_generic;
+ nvm->ops.valid_led_default = e1000_valid_led_default_generic;
+ nvm->ops.validate = e1000_validate_nvm_checksum_generic;
+ nvm->ops.write = e1000_write_nvm_80003es2lan;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+
+ DEBUGFUNC("e1000_init_mac_params_80003es2lan");
+
+ /* Set media type and media-dependent function pointers */
+ switch (hw->device_id) {
+ case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+ mac->ops.check_for_link = e1000_check_for_serdes_link_generic;
+ mac->ops.setup_physical_interface =
+ e1000_setup_fiber_serdes_link_generic;
+ break;
+ default:
+ hw->phy.media_type = e1000_media_type_copper;
+ mac->ops.check_for_link = e1000_check_for_copper_link_generic;
+ mac->ops.setup_physical_interface =
+ e1000_setup_copper_link_80003es2lan;
+ break;
+ }
+
+ /* Set mta register count */
+ mac->mta_reg_count = 128;
+ /* Set rar entry count */
+ mac->rar_entry_count = E1000_RAR_ENTRIES;
+ /* Set if part includes ASF firmware */
+ mac->asf_firmware_present = true;
+ /* FWSM register */
+ mac->has_fwsm = true;
+ /* ARC supported; valid only if manageability features are enabled. */
+ mac->arc_subsystem_valid = !!(E1000_READ_REG(hw, E1000_FWSM) &
+ E1000_FWSM_MODE_MASK);
+ /* Adaptive IFS not supported */
+ mac->adaptive_ifs = false;
+
+ /* Function pointers */
+
+ /* bus type/speed/width */
+ mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic;
+ /* reset */
+ mac->ops.reset_hw = e1000_reset_hw_80003es2lan;
+ /* hw initialization */
+ mac->ops.init_hw = e1000_init_hw_80003es2lan;
+ /* link setup */
+ mac->ops.setup_link = e1000_setup_link_generic;
+ /* check management mode */
+ mac->ops.check_mng_mode = e1000_check_mng_mode_generic;
+ /* multicast address update */
+ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
+ /* writing VFTA */
+ mac->ops.write_vfta = e1000_write_vfta_generic;
+ /* clearing VFTA */
+ mac->ops.clear_vfta = e1000_clear_vfta_generic;
+ /* read mac address */
+ mac->ops.read_mac_addr = e1000_read_mac_addr_80003es2lan;
+ /* ID LED init */
+ mac->ops.id_led_init = e1000_id_led_init_generic;
+ /* blink LED */
+ mac->ops.blink_led = e1000_blink_led_generic;
+ /* setup LED */
+ mac->ops.setup_led = e1000_setup_led_generic;
+ /* cleanup LED */
+ mac->ops.cleanup_led = e1000_cleanup_led_generic;
+ /* turn on/off LED */
+ mac->ops.led_on = e1000_led_on_generic;
+ mac->ops.led_off = e1000_led_off_generic;
+ /* clear hardware counters */
+ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan;
+ /* link info */
+ mac->ops.get_link_up_info = e1000_get_link_up_info_80003es2lan;
+
+ /* set lan id for port to determine which phy lock to use */
+ hw->mac.ops.set_lan_id(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_function_pointers_80003es2lan - Init ESB2 func ptrs.
+ * @hw: pointer to the HW structure
+ *
+ * Called to initialize all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_80003es2lan(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_init_function_pointers_80003es2lan");
+
+ hw->mac.ops.init_params = e1000_init_mac_params_80003es2lan;
+ hw->nvm.ops.init_params = e1000_init_nvm_params_80003es2lan;
+ hw->phy.ops.init_params = e1000_init_phy_params_80003es2lan;
+}
+
+/**
+ * e1000_acquire_phy_80003es2lan - Acquire rights to access PHY
+ * @hw: pointer to the HW structure
+ *
+ * A wrapper to acquire access rights to the correct PHY.
+ **/
+STATIC s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw)
+{
+ u16 mask;
+
+ DEBUGFUNC("e1000_acquire_phy_80003es2lan");
+
+ mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
+ return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ * e1000_release_phy_80003es2lan - Release rights to access PHY
+ * @hw: pointer to the HW structure
+ *
+ * A wrapper to release access rights to the correct PHY.
+ **/
+STATIC void e1000_release_phy_80003es2lan(struct e1000_hw *hw)
+{
+ u16 mask;
+
+ DEBUGFUNC("e1000_release_phy_80003es2lan");
+
+ mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
+ e1000_release_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ * e1000_acquire_mac_csr_80003es2lan - Acquire right to access Kumeran register
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the semaphore to access the Kumeran interface.
+ *
+ **/
+STATIC s32 e1000_acquire_mac_csr_80003es2lan(struct e1000_hw *hw)
+{
+ u16 mask;
+
+ DEBUGFUNC("e1000_acquire_mac_csr_80003es2lan");
+
+ mask = E1000_SWFW_CSR_SM;
+
+ return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ * e1000_release_mac_csr_80003es2lan - Release right to access Kumeran Register
+ * @hw: pointer to the HW structure
+ *
+ * Release the semaphore used to access the Kumeran interface
+ **/
+STATIC void e1000_release_mac_csr_80003es2lan(struct e1000_hw *hw)
+{
+ u16 mask;
+
+ DEBUGFUNC("e1000_release_mac_csr_80003es2lan");
+
+ mask = E1000_SWFW_CSR_SM;
+
+ e1000_release_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ * e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the semaphore to access the EEPROM.
+ **/
+STATIC s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_acquire_nvm_80003es2lan");
+
+ ret_val = e1000_acquire_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_acquire_nvm_generic(hw);
+
+ if (ret_val)
+ e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
+
+ return ret_val;
+}
+
+/**
+ * e1000_release_nvm_80003es2lan - Relinquish rights to access NVM
+ * @hw: pointer to the HW structure
+ *
+ * Release the semaphore used to access the EEPROM.
+ **/
+STATIC void e1000_release_nvm_80003es2lan(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_release_nvm_80003es2lan");
+
+ e1000_release_nvm_generic(hw);
+ e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
+}
+
+/**
+ * e1000_acquire_swfw_sync_80003es2lan - Acquire SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
+ * will also specify which port we're acquiring the lock for.
+ **/
+STATIC s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+ u32 swmask = mask;
+ u32 fwmask = mask << 16;
+ s32 i = 0;
+ s32 timeout = 50;
+
+ DEBUGFUNC("e1000_acquire_swfw_sync_80003es2lan");
+
+ while (i < timeout) {
+ if (e1000_get_hw_semaphore_generic(hw))
+ return -E1000_ERR_SWFW_SYNC;
+
+ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+ if (!(swfw_sync & (fwmask | swmask)))
+ break;
+
+ /* Firmware currently using resource (fwmask)
+ * or other software thread using resource (swmask)
+ */
+ e1000_put_hw_semaphore_generic(hw);
+ msec_delay_irq(5);
+ i++;
+ }
+
+ if (i == timeout) {
+ DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
+ return -E1000_ERR_SWFW_SYNC;
+ }
+
+ swfw_sync |= swmask;
+ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+ e1000_put_hw_semaphore_generic(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_release_swfw_sync_80003es2lan - Release SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Release the SW/FW semaphore used to access the PHY or NVM. The mask
+ * will also specify which port we're releasing the lock for.
+ **/
+STATIC void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+
+ DEBUGFUNC("e1000_release_swfw_sync_80003es2lan");
+
+ while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS)
+ ; /* Empty */
+
+ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+ swfw_sync &= ~mask;
+ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+ e1000_put_hw_semaphore_generic(hw);
+}
+
+/**
+ * e1000_read_phy_reg_gg82563_80003es2lan - Read GG82563 PHY register
+ * @hw: pointer to the HW structure
+ * @offset: offset of the register to read
+ * @data: pointer to the data returned from the operation
+ *
+ * Read the GG82563 PHY register.
+ **/
+STATIC s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
+ u32 offset, u16 *data)
+{
+ s32 ret_val;
+ u32 page_select;
+ u16 temp;
+
+ DEBUGFUNC("e1000_read_phy_reg_gg82563_80003es2lan");
+
+ ret_val = e1000_acquire_phy_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Select Configuration Page */
+ if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
+ page_select = GG82563_PHY_PAGE_SELECT;
+ } else {
+ /* Use Alternative Page Select register to access
+ * registers 30 and 31
+ */
+ page_select = GG82563_PHY_PAGE_SELECT_ALT;
+ }
+
+ temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
+ ret_val = e1000_write_phy_reg_mdic(hw, page_select, temp);
+ if (ret_val) {
+ e1000_release_phy_80003es2lan(hw);
+ return ret_val;
+ }
+
+ if (hw->dev_spec._80003es2lan.mdic_wa_enable) {
+ /* The "ready" bit in the MDIC register may be incorrectly set
+ * before the device has completed the "Page Select" MDI
+ * transaction. So we wait 200us after each MDI command...
+ */
+ usec_delay(200);
+
+ /* ...and verify the command was successful. */
+ ret_val = e1000_read_phy_reg_mdic(hw, page_select, &temp);
+
+ if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
+ e1000_release_phy_80003es2lan(hw);
+ return -E1000_ERR_PHY;
+ }
+
+ usec_delay(200);
+
+ ret_val = e1000_read_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+ usec_delay(200);
+ } else {
+ ret_val = e1000_read_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
+ }
+
+ e1000_release_phy_80003es2lan(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_phy_reg_gg82563_80003es2lan - Write GG82563 PHY register
+ * @hw: pointer to the HW structure
+ * @offset: offset of the register to read
+ * @data: value to write to the register
+ *
+ * Write to the GG82563 PHY register.
+ **/
+STATIC s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
+ u32 offset, u16 data)
+{
+ s32 ret_val;
+ u32 page_select;
+ u16 temp;
+
+ DEBUGFUNC("e1000_write_phy_reg_gg82563_80003es2lan");
+
+ ret_val = e1000_acquire_phy_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Select Configuration Page */
+ if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
+ page_select = GG82563_PHY_PAGE_SELECT;
+ } else {
+ /* Use Alternative Page Select register to access
+ * registers 30 and 31
+ */
+ page_select = GG82563_PHY_PAGE_SELECT_ALT;
+ }
+
+ temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
+ ret_val = e1000_write_phy_reg_mdic(hw, page_select, temp);
+ if (ret_val) {
+ e1000_release_phy_80003es2lan(hw);
+ return ret_val;
+ }
+
+ if (hw->dev_spec._80003es2lan.mdic_wa_enable) {
+ /* The "ready" bit in the MDIC register may be incorrectly set
+ * before the device has completed the "Page Select" MDI
+ * transaction. So we wait 200us after each MDI command...
+ */
+ usec_delay(200);
+
+ /* ...and verify the command was successful. */
+ ret_val = e1000_read_phy_reg_mdic(hw, page_select, &temp);
+
+ if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
+ e1000_release_phy_80003es2lan(hw);
+ return -E1000_ERR_PHY;
+ }
+
+ usec_delay(200);
+
+ ret_val = e1000_write_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+ usec_delay(200);
+ } else {
+ ret_val = e1000_write_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
+ }
+
+ e1000_release_phy_80003es2lan(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_nvm_80003es2lan - Write to ESB2 NVM
+ * @hw: pointer to the HW structure
+ * @offset: offset of the register to read
+ * @words: number of words to write
+ * @data: buffer of data to write to the NVM
+ *
+ * Write "words" of data to the ESB2 NVM.
+ **/
+STATIC s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ DEBUGFUNC("e1000_write_nvm_80003es2lan");
+
+ return e1000_write_nvm_spi(hw, offset, words, data);
+}
+
+/**
+ * e1000_get_cfg_done_80003es2lan - Wait for configuration to complete
+ * @hw: pointer to the HW structure
+ *
+ * Wait a specific amount of time for manageability processes to complete.
+ * This is a function pointer entry point called by the phy module.
+ **/
+STATIC s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw)
+{
+ s32 timeout = PHY_CFG_TIMEOUT;
+ u32 mask = E1000_NVM_CFG_DONE_PORT_0;
+
+ DEBUGFUNC("e1000_get_cfg_done_80003es2lan");
+
+ if (hw->bus.func == 1)
+ mask = E1000_NVM_CFG_DONE_PORT_1;
+
+ while (timeout) {
+ if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask)
+ break;
+ msec_delay(1);
+ timeout--;
+ }
+ if (!timeout) {
+ DEBUGOUT("MNG configuration cycle has not completed.\n");
+ return -E1000_ERR_RESET;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_force_speed_duplex_80003es2lan - Force PHY speed and duplex
+ * @hw: pointer to the HW structure
+ *
+ * Force the speed and duplex settings onto the PHY. This is a
+ * function pointer entry point called by the phy module.
+ **/
+STATIC s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+ DEBUGFUNC("e1000_phy_force_speed_duplex_80003es2lan");
+
+ if (!(hw->phy.ops.read_reg))
+ return E1000_SUCCESS;
+
+ /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
+ * forced whenever speed and duplex are forced.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_AUTO;
+ ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ DEBUGOUT1("GG82563 PSCR: %X\n", phy_data);
+
+ ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+
+ /* Reset the phy to commit changes. */
+ phy_data |= MII_CR_RESET;
+
+ ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ usec_delay(1);
+
+ if (hw->phy.autoneg_wait_to_complete) {
+ DEBUGOUT("Waiting for forced speed/duplex link on GG82563 phy.\n");
+
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link) {
+ /* We didn't get link.
+ * Reset the DSP and cross our fingers.
+ */
+ ret_val = e1000_phy_reset_dsp_generic(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Try once more */
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+ }
+
+ ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Resetting the phy means we need to verify the TX_CLK corresponds
+ * to the link speed. 10Mbps -> 2.5MHz, else 25MHz.
+ */
+ phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
+ if (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED)
+ phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5;
+ else
+ phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25;
+
+ /* In addition, we must re-enable CRS on Tx for both half and full
+ * duplex.
+ */
+ phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+ ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
+ phy_data);
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_cable_length_80003es2lan - Set approximate cable length
+ * @hw: pointer to the HW structure
+ *
+ * Find the approximate cable length as measured by the GG82563 PHY.
+ * This is a function pointer entry point called by the phy module.
+ **/
+STATIC s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, index;
+
+ DEBUGFUNC("e1000_get_cable_length_80003es2lan");
+
+ if (!(hw->phy.ops.read_reg))
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_DSP_DISTANCE, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ index = phy_data & GG82563_DSPD_CABLE_LENGTH;
+
+ if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5)
+ return -E1000_ERR_PHY;
+
+ phy->min_cable_length = e1000_gg82563_cable_length_table[index];
+ phy->max_cable_length = e1000_gg82563_cable_length_table[index + 5];
+
+ phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_link_up_info_80003es2lan - Report speed and duplex
+ * @hw: pointer to the HW structure
+ * @speed: pointer to speed buffer
+ * @duplex: pointer to duplex buffer
+ *
+ * Retrieve the current speed and duplex configuration.
+ **/
+STATIC s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_get_link_up_info_80003es2lan");
+
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed,
+ duplex);
+ hw->phy.ops.cfg_on_link_up(hw);
+ } else {
+ ret_val = e1000_get_speed_and_duplex_fiber_serdes_generic(hw,
+ speed,
+ duplex);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_reset_hw_80003es2lan - Reset the ESB2 controller
+ * @hw: pointer to the HW structure
+ *
+ * Perform a global reset to the ESB2 controller.
+ **/
+STATIC s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ u16 kum_reg_data;
+
+ DEBUGFUNC("e1000_reset_hw_80003es2lan");
+
+ /* Prevent the PCI-E bus from sticking if there is no TLP connection
+ * on the last TLP read/write transaction when MAC is reset.
+ */
+ ret_val = e1000_disable_pcie_master_generic(hw);
+ if (ret_val)
+ DEBUGOUT("PCI-E Master disable polling has failed.\n");
+
+ DEBUGOUT("Masking off all interrupts\n");
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+
+ E1000_WRITE_REG(hw, E1000_RCTL, 0);
+ E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+ E1000_WRITE_FLUSH(hw);
+
+ msec_delay(10);
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ ret_val = e1000_acquire_phy_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ DEBUGOUT("Issuing a global reset to MAC\n");
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+ e1000_release_phy_80003es2lan(hw);
+
+ /* Disable IBIST slave mode (far-end loopback) */
+ ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_INBAND_PARAM, &kum_reg_data);
+ if (ret_val)
+ return ret_val;
+ kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
+ e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+ kum_reg_data);
+
+ ret_val = e1000_get_auto_rd_done_generic(hw);
+ if (ret_val)
+ /* We don't want to continue accessing MAC registers. */
+ return ret_val;
+
+ /* Clear any pending interrupt events. */
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+ E1000_READ_REG(hw, E1000_ICR);
+
+ return e1000_check_alt_mac_addr_generic(hw);
+}
+
+/**
+ * e1000_init_hw_80003es2lan - Initialize the ESB2 controller
+ * @hw: pointer to the HW structure
+ *
+ * Initialize the hw bits, LED, VFTA, MTA, link and hw counters.
+ **/
+STATIC s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 reg_data;
+ s32 ret_val;
+ u16 kum_reg_data;
+ u16 i;
+
+ DEBUGFUNC("e1000_init_hw_80003es2lan");
+
+ e1000_initialize_hw_bits_80003es2lan(hw);
+
+ /* Initialize identification LED */
+ ret_val = mac->ops.id_led_init(hw);
+ /* An error is not fatal and we should not stop init due to this */
+ if (ret_val)
+ DEBUGOUT("Error initializing identification LED\n");
+
+ /* Disabling VLAN filtering */
+ DEBUGOUT("Initializing the IEEE VLAN\n");
+ mac->ops.clear_vfta(hw);
+
+ /* Setup the receive address. */
+ e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
+
+ /* Zero out the Multicast HASH table */
+ DEBUGOUT("Zeroing the MTA\n");
+ for (i = 0; i < mac->mta_reg_count; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+ /* Setup link and flow control */
+ ret_val = mac->ops.setup_link(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Disable IBIST slave mode (far-end loopback) */
+ e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+ &kum_reg_data);
+ kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
+ e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+ kum_reg_data);
+
+ /* Set the transmit descriptor write-back policy */
+ reg_data = E1000_READ_REG(hw, E1000_TXDCTL(0));
+ reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
+ E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg_data);
+
+ /* ...for both queues. */
+ reg_data = E1000_READ_REG(hw, E1000_TXDCTL(1));
+ reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
+ E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg_data);
+
+ /* Enable retransmit on late collisions */
+ reg_data = E1000_READ_REG(hw, E1000_TCTL);
+ reg_data |= E1000_TCTL_RTLC;
+ E1000_WRITE_REG(hw, E1000_TCTL, reg_data);
+
+ /* Configure Gigabit Carry Extend Padding */
+ reg_data = E1000_READ_REG(hw, E1000_TCTL_EXT);
+ reg_data &= ~E1000_TCTL_EXT_GCEX_MASK;
+ reg_data |= DEFAULT_TCTL_EXT_GCEX_80003ES2LAN;
+ E1000_WRITE_REG(hw, E1000_TCTL_EXT, reg_data);
+
+ /* Configure Transmit Inter-Packet Gap */
+ reg_data = E1000_READ_REG(hw, E1000_TIPG);
+ reg_data &= ~E1000_TIPG_IPGT_MASK;
+ reg_data |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN;
+ E1000_WRITE_REG(hw, E1000_TIPG, reg_data);
+
+ reg_data = E1000_READ_REG_ARRAY(hw, E1000_FFLT, 0x0001);
+ reg_data &= ~0x00100000;
+ E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data);
+
+ /* default to true to enable the MDIC W/A */
+ hw->dev_spec._80003es2lan.mdic_wa_enable = true;
+
+ ret_val =
+ e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_OFFSET >>
+ E1000_KMRNCTRLSTA_OFFSET_SHIFT, &i);
+ if (!ret_val) {
+ if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) ==
+ E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO)
+ hw->dev_spec._80003es2lan.mdic_wa_enable = false;
+ }
+
+ /* Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ e1000_clear_hw_cntrs_80003es2lan(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_initialize_hw_bits_80003es2lan - Init hw bits of ESB2
+ * @hw: pointer to the HW structure
+ *
+ * Initializes required hardware-dependent bits needed for normal operation.
+ **/
+STATIC void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
+{
+ u32 reg;
+
+ DEBUGFUNC("e1000_initialize_hw_bits_80003es2lan");
+
+ /* Transmit Descriptor Control 0 */
+ reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
+ reg |= (1 << 22);
+ E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
+
+ /* Transmit Descriptor Control 1 */
+ reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
+ reg |= (1 << 22);
+ E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
+
+ /* Transmit Arbitration Control 0 */
+ reg = E1000_READ_REG(hw, E1000_TARC(0));
+ reg &= ~(0xF << 27); /* 30:27 */
+ if (hw->phy.media_type != e1000_media_type_copper)
+ reg &= ~(1 << 20);
+ E1000_WRITE_REG(hw, E1000_TARC(0), reg);
+
+ /* Transmit Arbitration Control 1 */
+ reg = E1000_READ_REG(hw, E1000_TARC(1));
+ if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
+ reg &= ~(1 << 28);
+ else
+ reg |= (1 << 28);
+ E1000_WRITE_REG(hw, E1000_TARC(1), reg);
+
+ /* Disable IPv6 extension header parsing because some malformed
+ * IPv6 headers can hang the Rx.
+ */
+ reg = E1000_READ_REG(hw, E1000_RFCTL);
+ reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
+ E1000_WRITE_REG(hw, E1000_RFCTL, reg);
+
+ return;
+}
+
+/**
+ * e1000_copper_link_setup_gg82563_80003es2lan - Configure GG82563 Link
+ * @hw: pointer to the HW structure
+ *
+ * Setup some GG82563 PHY registers for obtaining link
+ **/
+STATIC s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u32 reg;
+ u16 data;
+
+ DEBUGFUNC("e1000_copper_link_setup_gg82563_80003es2lan");
+
+ ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+ /* Use 25MHz for both link down and 1000Base-T for Tx clock. */
+ data |= GG82563_MSCR_TX_CLK_1000MBPS_25;
+
+ ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, data);
+ if (ret_val)
+ return ret_val;
+
+ /* Options:
+ * MDI/MDI-X = 0 (default)
+ * 0 - Auto for all speeds
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+ */
+ ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_SPEC_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK;
+
+ switch (phy->mdix) {
+ case 1:
+ data |= GG82563_PSCR_CROSSOVER_MODE_MDI;
+ break;
+ case 2:
+ data |= GG82563_PSCR_CROSSOVER_MODE_MDIX;
+ break;
+ case 0:
+ default:
+ data |= GG82563_PSCR_CROSSOVER_MODE_AUTO;
+ break;
+ }
+
+ /* Options:
+ * disable_polarity_correction = 0 (default)
+ * Automatic Correction for Reversed Cable Polarity
+ * 0 - Disabled
+ * 1 - Enabled
+ */
+ data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+ if (phy->disable_polarity_correction)
+ data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+
+ ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_SPEC_CTRL, data);
+ if (ret_val)
+ return ret_val;
+
+ /* SW Reset the PHY so all changes take effect */
+ ret_val = hw->phy.ops.commit(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Resetting the PHY\n");
+ return ret_val;
+ }
+
+ /* Bypass Rx and Tx FIFO's */
+ reg = E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL;
+ data = (E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
+ E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data);
+ if (ret_val)
+ return ret_val;
+
+ reg = E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE;
+ ret_val = e1000_read_kmrn_reg_80003es2lan(hw, reg, &data);
+ if (ret_val)
+ return ret_val;
+ data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE;
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_SPEC_CTRL_2, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG;
+ ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_SPEC_CTRL_2, data);
+ if (ret_val)
+ return ret_val;
+
+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ reg &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+
+ ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_PWR_MGMT_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ /* Do not init these registers when the HW is in IAMT mode, since the
+ * firmware will have already initialized them. We only initialize
+ * them if the HW is not in IAMT mode.
+ */
+ if (!hw->mac.ops.check_mng_mode(hw)) {
+ /* Enable Electrical Idle on the PHY */
+ data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE;
+ ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_PWR_MGMT_CTRL,
+ data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+ ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+ data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Workaround: Disable padding in Kumeran interface in the MAC
+ * and in the PHY to avoid CRC errors.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_INBAND_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= GG82563_ICR_DIS_PADDING;
+ ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_INBAND_CTRL, data);
+ if (ret_val)
+ return ret_val;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_setup_copper_link_80003es2lan - Setup Copper Link for ESB2
+ * @hw: pointer to the HW structure
+ *
+ * Essentially a wrapper for setting up all things "copper" related.
+ * This is a function pointer entry point called by the mac module.
+ **/
+STATIC s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ u16 reg_data;
+
+ DEBUGFUNC("e1000_setup_copper_link_80003es2lan");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ /* Set the mac to wait the maximum time between each
+ * iteration and increase the max iterations when
+ * polling the phy; this fixes erroneous timeouts at 10Mbps.
+ */
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4),
+ 0xFFFF);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
+ &reg_data);
+ if (ret_val)
+ return ret_val;
+ reg_data |= 0x3F;
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
+ reg_data);
+ if (ret_val)
+ return ret_val;
+ ret_val =
+ e1000_read_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
+ &reg_data);
+ if (ret_val)
+ return ret_val;
+ reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING;
+ ret_val =
+ e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
+ reg_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_copper_link_setup_gg82563_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ return e1000_setup_copper_link_generic(hw);
+}
+
+/**
+ * e1000_cfg_on_link_up_80003es2lan - es2 link configuration after link-up
+ * @hw: pointer to the HW structure
+ * @duplex: current duplex setting
+ *
+ * Configure the KMRN interface by applying last minute quirks for
+ * 10/100 operation.
+ **/
+STATIC s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 speed;
+ u16 duplex;
+
+ DEBUGFUNC("e1000_configure_on_link_up");
+
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ ret_val = e1000_get_speed_and_duplex_copper_generic(hw, &speed,
+ &duplex);
+ if (ret_val)
+ return ret_val;
+
+ if (speed == SPEED_1000)
+ ret_val = e1000_cfg_kmrn_1000_80003es2lan(hw);
+ else
+ ret_val = e1000_cfg_kmrn_10_100_80003es2lan(hw, duplex);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_cfg_kmrn_10_100_80003es2lan - Apply "quirks" for 10/100 operation
+ * @hw: pointer to the HW structure
+ * @duplex: current duplex setting
+ *
+ * Configure the KMRN interface by applying last minute quirks for
+ * 10/100 operation.
+ **/
+STATIC s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
+{
+ s32 ret_val;
+ u32 tipg;
+ u32 i = 0;
+ u16 reg_data, reg_data2;
+
+ DEBUGFUNC("e1000_configure_kmrn_for_10_100");
+
+ reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT;
+ ret_val =
+ e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
+ reg_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Configure Transmit Inter-Packet Gap */
+ tipg = E1000_READ_REG(hw, E1000_TIPG);
+ tipg &= ~E1000_TIPG_IPGT_MASK;
+ tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN;
+ E1000_WRITE_REG(hw, E1000_TIPG, tipg);
+
+ do {
+ ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+ &reg_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+ &reg_data2);
+ if (ret_val)
+ return ret_val;
+ i++;
+ } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY));
+
+ if (duplex == HALF_DUPLEX)
+ reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
+ else
+ reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+
+ return hw->phy.ops.write_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+}
+
+/**
+ * e1000_cfg_kmrn_1000_80003es2lan - Apply "quirks" for gigabit operation
+ * @hw: pointer to the HW structure
+ *
+ * Configure the KMRN interface by applying last minute quirks for
+ * gigabit operation.
+ **/
+STATIC s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 reg_data, reg_data2;
+ u32 tipg;
+ u32 i = 0;
+
+ DEBUGFUNC("e1000_configure_kmrn_for_1000");
+
+ reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT;
+ ret_val =
+ e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
+ reg_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Configure Transmit Inter-Packet Gap */
+ tipg = E1000_READ_REG(hw, E1000_TIPG);
+ tipg &= ~E1000_TIPG_IPGT_MASK;
+ tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN;
+ E1000_WRITE_REG(hw, E1000_TIPG, tipg);
+
+ do {
+ ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+ &reg_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+ &reg_data2);
+ if (ret_val)
+ return ret_val;
+ i++;
+ } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY));
+
+ reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+
+ return hw->phy.ops.write_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+}
+
+/**
+ * e1000_read_kmrn_reg_80003es2lan - Read kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquire semaphore, then read the PHY register at offset
+ * using the kumeran interface. The information retrieved is stored in data.
+ * Release the semaphore before exiting.
+ **/
+STATIC s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+ u16 *data)
+{
+ u32 kmrnctrlsta;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_read_kmrn_reg_80003es2lan");
+
+ ret_val = e1000_acquire_mac_csr_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+ E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
+ E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
+ E1000_WRITE_FLUSH(hw);
+
+ usec_delay(2);
+
+ kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA);
+ *data = (u16)kmrnctrlsta;
+
+ e1000_release_mac_csr_80003es2lan(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_kmrn_reg_80003es2lan - Write kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquire semaphore, then write the data to PHY register
+ * at the offset using the kumeran interface. Release semaphore
+ * before exiting.
+ **/
+STATIC s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+ u16 data)
+{
+ u32 kmrnctrlsta;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_write_kmrn_reg_80003es2lan");
+
+ ret_val = e1000_acquire_mac_csr_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+ E1000_KMRNCTRLSTA_OFFSET) | data;
+ E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
+ E1000_WRITE_FLUSH(hw);
+
+ usec_delay(2);
+
+ e1000_release_mac_csr_80003es2lan(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_mac_addr_80003es2lan - Read device MAC address
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_read_mac_addr_80003es2lan");
+
+ /* If there's an alternate MAC address place it in RAR0
+ * so that it will override the Si installed default perm
+ * address.
+ */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ return ret_val;
+
+ return e1000_read_mac_addr_generic(hw);
+}
+
+/**
+ * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+STATIC void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw)
+{
+ /* If the management interface is not enabled, then power down */
+ if (!(hw->mac.ops.check_mng_mode(hw) ||
+ hw->phy.ops.check_reset_block(hw)))
+ e1000_power_down_phy_copper(hw);
+
+ return;
+}
+
+/**
+ * e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the hardware counters by reading the counter registers.
+ **/
+STATIC void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_clear_hw_cntrs_80003es2lan");
+
+ e1000_clear_hw_cntrs_base_generic(hw);
+
+ E1000_READ_REG(hw, E1000_PRC64);
+ E1000_READ_REG(hw, E1000_PRC127);
+ E1000_READ_REG(hw, E1000_PRC255);
+ E1000_READ_REG(hw, E1000_PRC511);
+ E1000_READ_REG(hw, E1000_PRC1023);
+ E1000_READ_REG(hw, E1000_PRC1522);
+ E1000_READ_REG(hw, E1000_PTC64);
+ E1000_READ_REG(hw, E1000_PTC127);
+ E1000_READ_REG(hw, E1000_PTC255);
+ E1000_READ_REG(hw, E1000_PTC511);
+ E1000_READ_REG(hw, E1000_PTC1023);
+ E1000_READ_REG(hw, E1000_PTC1522);
+
+ E1000_READ_REG(hw, E1000_ALGNERRC);
+ E1000_READ_REG(hw, E1000_RXERRC);
+ E1000_READ_REG(hw, E1000_TNCRS);
+ E1000_READ_REG(hw, E1000_CEXTERR);
+ E1000_READ_REG(hw, E1000_TSCTC);
+ E1000_READ_REG(hw, E1000_TSCTFC);
+
+ E1000_READ_REG(hw, E1000_MGTPRC);
+ E1000_READ_REG(hw, E1000_MGTPDC);
+ E1000_READ_REG(hw, E1000_MGTPTC);
+
+ E1000_READ_REG(hw, E1000_IAC);
+ E1000_READ_REG(hw, E1000_ICRXOC);
+
+ E1000_READ_REG(hw, E1000_ICRXPTC);
+ E1000_READ_REG(hw, E1000_ICRXATC);
+ E1000_READ_REG(hw, E1000_ICTXPTC);
+ E1000_READ_REG(hw, E1000_ICTXATC);
+ E1000_READ_REG(hw, E1000_ICTXQEC);
+ E1000_READ_REG(hw, E1000_ICTXQMTC);
+ E1000_READ_REG(hw, E1000_ICRXDMTC);
+}
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_80003es2lan.h b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_80003es2lan.h
new file mode 100755
index 00000000..f5fe9677
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_80003es2lan.h
@@ -0,0 +1,100 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_80003ES2LAN_H_
+#define _E1000_80003ES2LAN_H_
+
+#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00
+#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02
+#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10
+#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F
+
+#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008
+#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800
+#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010
+
+#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004
+#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000
+#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000
+
+#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C
+#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004
+
+#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gig Carry Extend Padding */
+#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000
+
+#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8
+#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9
+
+/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
+#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Dis */
+#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060
+#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */
+#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */
+#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */
+
+/* PHY Specific Control Register 2 (Page 0, Register 26) */
+#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 /* 1=Reverse Auto-Neg */
+
+/* MAC Specific Control Register (Page 2, Register 21) */
+/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
+#define GG82563_MSCR_TX_CLK_MASK 0x0007
+#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004
+#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005
+#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007
+
+#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */
+
+/* DSP Distance Register (Page 5, Register 26)
+ * 0 = <50M
+ * 1 = 50-80M
+ * 2 = 80-100M
+ * 3 = 110-140M
+ * 4 = >140M
+ */
+#define GG82563_DSPD_CABLE_LENGTH 0x0007
+
+/* Kumeran Mode Control Register (Page 193, Register 16) */
+#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800
+
+/* Max number of times Kumeran read/write should be validated */
+#define GG82563_MAX_KMRN_RETRY 0x5
+
+/* Power Management Control Register (Page 193, Register 20) */
+/* 1=Enable SERDES Electrical Idle */
+#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001
+
+/* In-Band Control Register (Page 194, Register 18) */
+#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */
+
+#endif
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82540.c b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82540.c
new file mode 100755
index 00000000..fc1fa946
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82540.c
@@ -0,0 +1,717 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+/*
+ * 82540EM Gigabit Ethernet Controller
+ * 82540EP Gigabit Ethernet Controller
+ * 82545EM Gigabit Ethernet Controller (Copper)
+ * 82545EM Gigabit Ethernet Controller (Fiber)
+ * 82545GM Gigabit Ethernet Controller
+ * 82546EB Gigabit Ethernet Controller (Copper)
+ * 82546EB Gigabit Ethernet Controller (Fiber)
+ * 82546GB Gigabit Ethernet Controller
+ */
+
+#include "e1000_api.h"
+
+STATIC s32 e1000_init_phy_params_82540(struct e1000_hw *hw);
+STATIC s32 e1000_init_nvm_params_82540(struct e1000_hw *hw);
+STATIC s32 e1000_init_mac_params_82540(struct e1000_hw *hw);
+STATIC s32 e1000_adjust_serdes_amplitude_82540(struct e1000_hw *hw);
+STATIC void e1000_clear_hw_cntrs_82540(struct e1000_hw *hw);
+STATIC s32 e1000_init_hw_82540(struct e1000_hw *hw);
+STATIC s32 e1000_reset_hw_82540(struct e1000_hw *hw);
+STATIC s32 e1000_set_phy_mode_82540(struct e1000_hw *hw);
+STATIC s32 e1000_set_vco_speed_82540(struct e1000_hw *hw);
+STATIC s32 e1000_setup_copper_link_82540(struct e1000_hw *hw);
+STATIC s32 e1000_setup_fiber_serdes_link_82540(struct e1000_hw *hw);
+STATIC void e1000_power_down_phy_copper_82540(struct e1000_hw *hw);
+STATIC s32 e1000_read_mac_addr_82540(struct e1000_hw *hw);
+
+/**
+ * e1000_init_phy_params_82540 - Init PHY func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_phy_params_82540(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ phy->addr = 1;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->reset_delay_us = 10000;
+ phy->type = e1000_phy_m88;
+
+ /* Function Pointers */
+ phy->ops.check_polarity = e1000_check_polarity_m88;
+ phy->ops.commit = e1000_phy_sw_reset_generic;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
+ phy->ops.get_cable_length = e1000_get_cable_length_m88;
+ phy->ops.get_cfg_done = e1000_get_cfg_done_generic;
+ phy->ops.read_reg = e1000_read_phy_reg_m88;
+ phy->ops.reset = e1000_phy_hw_reset_generic;
+ phy->ops.write_reg = e1000_write_phy_reg_m88;
+ phy->ops.get_info = e1000_get_phy_info_m88;
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper_82540;
+
+ ret_val = e1000_get_phy_id(hw);
+ if (ret_val)
+ goto out;
+
+ /* Verify phy id */
+ switch (hw->mac.type) {
+ case e1000_82540:
+ case e1000_82545:
+ case e1000_82545_rev_3:
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ if (phy->id == M88E1011_I_PHY_ID)
+ break;
+ /* Fall Through */
+ default:
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ break;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params_82540 - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_nvm_params_82540(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+
+ DEBUGFUNC("e1000_init_nvm_params_82540");
+
+ nvm->type = e1000_nvm_eeprom_microwire;
+ nvm->delay_usec = 50;
+ nvm->opcode_bits = 3;
+ switch (nvm->override) {
+ case e1000_nvm_override_microwire_large:
+ nvm->address_bits = 8;
+ nvm->word_size = 256;
+ break;
+ case e1000_nvm_override_microwire_small:
+ nvm->address_bits = 6;
+ nvm->word_size = 64;
+ break;
+ default:
+ nvm->address_bits = eecd & E1000_EECD_SIZE ? 8 : 6;
+ nvm->word_size = eecd & E1000_EECD_SIZE ? 256 : 64;
+ break;
+ }
+
+ /* Function Pointers */
+ nvm->ops.acquire = e1000_acquire_nvm_generic;
+ nvm->ops.read = e1000_read_nvm_microwire;
+ nvm->ops.release = e1000_release_nvm_generic;
+ nvm->ops.update = e1000_update_nvm_checksum_generic;
+ nvm->ops.valid_led_default = e1000_valid_led_default_generic;
+ nvm->ops.validate = e1000_validate_nvm_checksum_generic;
+ nvm->ops.write = e1000_write_nvm_microwire;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_mac_params_82540 - Init MAC func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_mac_params_82540(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_init_mac_params_82540");
+
+ /* Set media type */
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82545EM_FIBER:
+ case E1000_DEV_ID_82545GM_FIBER:
+ case E1000_DEV_ID_82546EB_FIBER:
+ case E1000_DEV_ID_82546GB_FIBER:
+ hw->phy.media_type = e1000_media_type_fiber;
+ break;
+ case E1000_DEV_ID_82545GM_SERDES:
+ case E1000_DEV_ID_82546GB_SERDES:
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+ break;
+ default:
+ hw->phy.media_type = e1000_media_type_copper;
+ break;
+ }
+
+ /* Set mta register count */
+ mac->mta_reg_count = 128;
+ /* Set rar entry count */
+ mac->rar_entry_count = E1000_RAR_ENTRIES;
+
+ /* Function pointers */
+
+ /* bus type/speed/width */
+ mac->ops.get_bus_info = e1000_get_bus_info_pci_generic;
+ /* function id */
+ mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pci;
+ /* reset */
+ mac->ops.reset_hw = e1000_reset_hw_82540;
+ /* hw initialization */
+ mac->ops.init_hw = e1000_init_hw_82540;
+ /* link setup */
+ mac->ops.setup_link = e1000_setup_link_generic;
+ /* physical interface setup */
+ mac->ops.setup_physical_interface =
+ (hw->phy.media_type == e1000_media_type_copper)
+ ? e1000_setup_copper_link_82540
+ : e1000_setup_fiber_serdes_link_82540;
+ /* check for link */
+ switch (hw->phy.media_type) {
+ case e1000_media_type_copper:
+ mac->ops.check_for_link = e1000_check_for_copper_link_generic;
+ break;
+ case e1000_media_type_fiber:
+ mac->ops.check_for_link = e1000_check_for_fiber_link_generic;
+ break;
+ case e1000_media_type_internal_serdes:
+ mac->ops.check_for_link = e1000_check_for_serdes_link_generic;
+ break;
+ default:
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ break;
+ }
+ /* link info */
+ mac->ops.get_link_up_info =
+ (hw->phy.media_type == e1000_media_type_copper)
+ ? e1000_get_speed_and_duplex_copper_generic
+ : e1000_get_speed_and_duplex_fiber_serdes_generic;
+ /* multicast address update */
+ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
+ /* writing VFTA */
+ mac->ops.write_vfta = e1000_write_vfta_generic;
+ /* clearing VFTA */
+ mac->ops.clear_vfta = e1000_clear_vfta_generic;
+ /* read mac address */
+ mac->ops.read_mac_addr = e1000_read_mac_addr_82540;
+ /* ID LED init */
+ mac->ops.id_led_init = e1000_id_led_init_generic;
+ /* setup LED */
+ mac->ops.setup_led = e1000_setup_led_generic;
+ /* cleanup LED */
+ mac->ops.cleanup_led = e1000_cleanup_led_generic;
+ /* turn on/off LED */
+ mac->ops.led_on = e1000_led_on_generic;
+ mac->ops.led_off = e1000_led_off_generic;
+ /* clear hardware counters */
+ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82540;
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_function_pointers_82540 - Init func ptrs.
+ * @hw: pointer to the HW structure
+ *
+ * Called to initialize all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_82540(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_init_function_pointers_82540");
+
+ hw->mac.ops.init_params = e1000_init_mac_params_82540;
+ hw->nvm.ops.init_params = e1000_init_nvm_params_82540;
+ hw->phy.ops.init_params = e1000_init_phy_params_82540;
+}
+
+/**
+ * e1000_reset_hw_82540 - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+ * This resets the hardware into a known state.
+ **/
+STATIC s32 e1000_reset_hw_82540(struct e1000_hw *hw)
+{
+ u32 ctrl, manc;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_reset_hw_82540");
+
+ DEBUGOUT("Masking off all interrupts\n");
+ E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF);
+
+ E1000_WRITE_REG(hw, E1000_RCTL, 0);
+ E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+ E1000_WRITE_FLUSH(hw);
+
+ /*
+ * Delay to allow any outstanding PCI transactions to complete
+ * before resetting the device.
+ */
+ msec_delay(10);
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ DEBUGOUT("Issuing a global reset to 82540/82545/82546 MAC\n");
+ switch (hw->mac.type) {
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ E1000_WRITE_REG(hw, E1000_CTRL_DUP, ctrl | E1000_CTRL_RST);
+ break;
+ default:
+ /*
+ * These controllers can't ack the 64-bit write when
+ * issuing the reset, so we use IO-mapping as a
+ * workaround to issue the reset.
+ */
+ E1000_WRITE_REG_IO(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+ break;
+ }
+
+ /* Wait for EEPROM reload */
+ msec_delay(5);
+
+ /* Disable HW ARPs on ASF enabled adapters */
+ manc = E1000_READ_REG(hw, E1000_MANC);
+ manc &= ~E1000_MANC_ARP_EN;
+ E1000_WRITE_REG(hw, E1000_MANC, manc);
+
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+ E1000_READ_REG(hw, E1000_ICR);
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_hw_82540 - Initialize hardware
+ * @hw: pointer to the HW structure
+ *
+ * This inits the hardware readying it for operation.
+ **/
+STATIC s32 e1000_init_hw_82540(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 txdctl, ctrl_ext;
+ s32 ret_val;
+ u16 i;
+
+ DEBUGFUNC("e1000_init_hw_82540");
+
+ /* Initialize identification LED */
+ ret_val = mac->ops.id_led_init(hw);
+ if (ret_val) {
+ DEBUGOUT("Error initializing identification LED\n");
+ /* This is not fatal and we should not stop init due to this */
+ }
+
+ /* Disabling VLAN filtering */
+ DEBUGOUT("Initializing the IEEE VLAN\n");
+ if (mac->type < e1000_82545_rev_3)
+ E1000_WRITE_REG(hw, E1000_VET, 0);
+
+ mac->ops.clear_vfta(hw);
+
+ /* Setup the receive address. */
+ e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
+
+ /* Zero out the Multicast HASH table */
+ DEBUGOUT("Zeroing the MTA\n");
+ for (i = 0; i < mac->mta_reg_count; i++) {
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+ /*
+ * Avoid back to back register writes by adding the register
+ * read (flush). This is to protect against some strange
+ * bridge configurations that may issue Memory Write Block
+ * (MWB) to our register space. The *_rev_3 hardware at
+ * least doesn't respond correctly to every other dword in an
+ * MWB to our register space.
+ */
+ E1000_WRITE_FLUSH(hw);
+ }
+
+ if (mac->type < e1000_82545_rev_3)
+ e1000_pcix_mmrbc_workaround_generic(hw);
+
+ /* Setup link and flow control */
+ ret_val = mac->ops.setup_link(hw);
+
+ txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
+ txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB;
+ E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
+
+ /*
+ * Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ e1000_clear_hw_cntrs_82540(hw);
+
+ if ((hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER) ||
+ (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3)) {
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ /*
+ * Relaxed ordering must be disabled to avoid a parity
+ * error crash in a PCI slot.
+ */
+ ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_setup_copper_link_82540 - Configure copper link settings
+ * @hw: pointer to the HW structure
+ *
+ * Calls the appropriate function to configure the link for auto-neg or forced
+ * speed and duplex. Then we check for link, once link is established calls
+ * to configure collision distance and flow control are called. If link is
+ * not established, we return -E1000_ERR_PHY (-2).
+ **/
+STATIC s32 e1000_setup_copper_link_82540(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("e1000_setup_copper_link_82540");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ ret_val = e1000_set_phy_mode_82540(hw);
+ if (ret_val)
+ goto out;
+
+ if (hw->mac.type == e1000_82545_rev_3 ||
+ hw->mac.type == e1000_82546_rev_3) {
+ ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL,
+ &data);
+ if (ret_val)
+ goto out;
+ data |= 0x00000008;
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL,
+ data);
+ if (ret_val)
+ goto out;
+ }
+
+ ret_val = e1000_copper_link_setup_m88(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_setup_copper_link_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_setup_fiber_serdes_link_82540 - Setup link for fiber/serdes
+ * @hw: pointer to the HW structure
+ *
+ * Set the output amplitude to the value in the EEPROM and adjust the VCO
+ * speed to improve Bit Error Rate (BER) performance. Configures collision
+ * distance and flow control for fiber and serdes links. Upon successful
+ * setup, poll for link.
+ **/
+STATIC s32 e1000_setup_fiber_serdes_link_82540(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_setup_fiber_serdes_link_82540");
+
+ switch (mac->type) {
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ if (hw->phy.media_type == e1000_media_type_internal_serdes) {
+ /*
+ * If we're on serdes media, adjust the output
+ * amplitude to value set in the EEPROM.
+ */
+ ret_val = e1000_adjust_serdes_amplitude_82540(hw);
+ if (ret_val)
+ goto out;
+ }
+ /* Adjust VCO speed to improve BER performance */
+ ret_val = e1000_set_vco_speed_82540(hw);
+ if (ret_val)
+ goto out;
+ default:
+ break;
+ }
+
+ ret_val = e1000_setup_fiber_serdes_link_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_adjust_serdes_amplitude_82540 - Adjust amplitude based on EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Adjust the SERDES output amplitude based on the EEPROM settings.
+ **/
+STATIC s32 e1000_adjust_serdes_amplitude_82540(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 nvm_data;
+
+ DEBUGFUNC("e1000_adjust_serdes_amplitude_82540");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_SERDES_AMPLITUDE, 1, &nvm_data);
+ if (ret_val)
+ goto out;
+
+ if (nvm_data != NVM_RESERVED_WORD) {
+ /* Adjust serdes output amplitude only. */
+ nvm_data &= NVM_SERDES_AMPLITUDE_MASK;
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_EXT_CTRL,
+ nvm_data);
+ if (ret_val)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_set_vco_speed_82540 - Set VCO speed for better performance
+ * @hw: pointer to the HW structure
+ *
+ * Set the VCO speed to improve Bit Error Rate (BER) performance.
+ **/
+STATIC s32 e1000_set_vco_speed_82540(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 default_page = 0;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_set_vco_speed_82540");
+
+ /* Set PHY register 30, page 5, bit 8 to 0 */
+
+ ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_PAGE_SELECT,
+ &default_page);
+ if (ret_val)
+ goto out;
+
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005);
+ if (ret_val)
+ goto out;
+
+ ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data &= ~M88E1000_PHY_VCO_REG_BIT8;
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
+ if (ret_val)
+ goto out;
+
+ /* Set PHY register 30, page 4, bit 11 to 1 */
+
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004);
+ if (ret_val)
+ goto out;
+
+ ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data |= M88E1000_PHY_VCO_REG_BIT11;
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
+ if (ret_val)
+ goto out;
+
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT,
+ default_page);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_set_phy_mode_82540 - Set PHY to class A mode
+ * @hw: pointer to the HW structure
+ *
+ * Sets the PHY to class A mode and assumes the following operations will
+ * follow to enable the new class mode:
+ * 1. Do a PHY soft reset.
+ * 2. Restart auto-negotiation or force link.
+ **/
+STATIC s32 e1000_set_phy_mode_82540(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 nvm_data;
+
+ DEBUGFUNC("e1000_set_phy_mode_82540");
+
+ if (hw->mac.type != e1000_82545_rev_3)
+ goto out;
+
+ ret_val = hw->nvm.ops.read(hw, NVM_PHY_CLASS_WORD, 1, &nvm_data);
+ if (ret_val) {
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+ if ((nvm_data != NVM_RESERVED_WORD) && (nvm_data & NVM_PHY_CLASS_A)) {
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT,
+ 0x000B);
+ if (ret_val) {
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL,
+ 0x8104);
+ if (ret_val) {
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_power_down_phy_copper_82540 - Remove link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+STATIC void e1000_power_down_phy_copper_82540(struct e1000_hw *hw)
+{
+ /* If the management interface is not enabled, then power down */
+ if (!(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_SMBUS_EN))
+ e1000_power_down_phy_copper(hw);
+
+ return;
+}
+
+/**
+ * e1000_clear_hw_cntrs_82540 - Clear device specific hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the hardware counters by reading the counter registers.
+ **/
+STATIC void e1000_clear_hw_cntrs_82540(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_clear_hw_cntrs_82540");
+
+ e1000_clear_hw_cntrs_base_generic(hw);
+
+ E1000_READ_REG(hw, E1000_PRC64);
+ E1000_READ_REG(hw, E1000_PRC127);
+ E1000_READ_REG(hw, E1000_PRC255);
+ E1000_READ_REG(hw, E1000_PRC511);
+ E1000_READ_REG(hw, E1000_PRC1023);
+ E1000_READ_REG(hw, E1000_PRC1522);
+ E1000_READ_REG(hw, E1000_PTC64);
+ E1000_READ_REG(hw, E1000_PTC127);
+ E1000_READ_REG(hw, E1000_PTC255);
+ E1000_READ_REG(hw, E1000_PTC511);
+ E1000_READ_REG(hw, E1000_PTC1023);
+ E1000_READ_REG(hw, E1000_PTC1522);
+
+ E1000_READ_REG(hw, E1000_ALGNERRC);
+ E1000_READ_REG(hw, E1000_RXERRC);
+ E1000_READ_REG(hw, E1000_TNCRS);
+ E1000_READ_REG(hw, E1000_CEXTERR);
+ E1000_READ_REG(hw, E1000_TSCTC);
+ E1000_READ_REG(hw, E1000_TSCTFC);
+
+ E1000_READ_REG(hw, E1000_MGTPRC);
+ E1000_READ_REG(hw, E1000_MGTPDC);
+ E1000_READ_REG(hw, E1000_MGTPTC);
+}
+
+/**
+ * e1000_read_mac_addr_82540 - Read device MAC address
+ * @hw: pointer to the HW structure
+ *
+ * Reads the device MAC address from the EEPROM and stores the value.
+ * Since devices with two ports use the same EEPROM, we increment the
+ * last bit in the MAC address for the second port.
+ *
+ * This version is being used over generic because of customer issues
+ * with VmWare and Virtual Box when using generic. It seems in
+ * the emulated 82545, RAR[0] does NOT have a valid address after a
+ * reset, this older method works and using this breaks nothing for
+ * these legacy adapters.
+ **/
+s32 e1000_read_mac_addr_82540(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 offset, nvm_data, i;
+
+ DEBUGFUNC("e1000_read_mac_addr");
+
+ for (i = 0; i < ETH_ADDR_LEN; i += 2) {
+ offset = i >> 1;
+ ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+ hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
+ hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
+ }
+
+ /* Flip last bit of mac address if we're on second port */
+ if (hw->bus.func == E1000_FUNC_1)
+ hw->mac.perm_addr[5] ^= 1;
+
+ for (i = 0; i < ETH_ADDR_LEN; i++)
+ hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+out:
+ return ret_val;
+}
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82541.c b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82541.c
new file mode 100755
index 00000000..952aea28
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82541.c
@@ -0,0 +1,1268 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+/*
+ * 82541EI Gigabit Ethernet Controller
+ * 82541ER Gigabit Ethernet Controller
+ * 82541GI Gigabit Ethernet Controller
+ * 82541PI Gigabit Ethernet Controller
+ * 82547EI Gigabit Ethernet Controller
+ * 82547GI Gigabit Ethernet Controller
+ */
+
+#include "e1000_api.h"
+
+STATIC s32 e1000_init_phy_params_82541(struct e1000_hw *hw);
+STATIC s32 e1000_init_nvm_params_82541(struct e1000_hw *hw);
+STATIC s32 e1000_init_mac_params_82541(struct e1000_hw *hw);
+STATIC s32 e1000_reset_hw_82541(struct e1000_hw *hw);
+STATIC s32 e1000_init_hw_82541(struct e1000_hw *hw);
+STATIC s32 e1000_get_link_up_info_82541(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex);
+STATIC s32 e1000_phy_hw_reset_82541(struct e1000_hw *hw);
+STATIC s32 e1000_setup_copper_link_82541(struct e1000_hw *hw);
+STATIC s32 e1000_check_for_link_82541(struct e1000_hw *hw);
+STATIC s32 e1000_get_cable_length_igp_82541(struct e1000_hw *hw);
+STATIC s32 e1000_set_d3_lplu_state_82541(struct e1000_hw *hw,
+ bool active);
+STATIC s32 e1000_setup_led_82541(struct e1000_hw *hw);
+STATIC s32 e1000_cleanup_led_82541(struct e1000_hw *hw);
+STATIC void e1000_clear_hw_cntrs_82541(struct e1000_hw *hw);
+STATIC s32 e1000_config_dsp_after_link_change_82541(struct e1000_hw *hw,
+ bool link_up);
+STATIC s32 e1000_phy_init_script_82541(struct e1000_hw *hw);
+STATIC void e1000_power_down_phy_copper_82541(struct e1000_hw *hw);
+
+STATIC const u16 e1000_igp_cable_length_table[] = {
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 10, 10, 10, 10, 10,
+ 10, 10, 20, 20, 20, 20, 20, 25, 25, 25, 25, 25, 25, 25, 30, 30, 30, 30,
+ 40, 40, 40, 40, 40, 40, 40, 40, 40, 50, 50, 50, 50, 50, 50, 50, 60, 60,
+ 60, 60, 60, 60, 60, 60, 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80,
+ 80, 90, 90, 90, 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100,
+ 100, 100, 100, 100, 100, 100, 100, 100, 110, 110, 110, 110, 110, 110,
+ 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 120, 120,
+ 120, 120, 120, 120, 120, 120, 120, 120};
+#define IGP01E1000_AGC_LENGTH_TABLE_SIZE \
+ (sizeof(e1000_igp_cable_length_table) / \
+ sizeof(e1000_igp_cable_length_table[0]))
+
+/**
+ * e1000_init_phy_params_82541 - Init PHY func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_phy_params_82541(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_init_phy_params_82541");
+
+ phy->addr = 1;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->reset_delay_us = 10000;
+ phy->type = e1000_phy_igp;
+
+ /* Function Pointers */
+ phy->ops.check_polarity = e1000_check_polarity_igp;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
+ phy->ops.get_cable_length = e1000_get_cable_length_igp_82541;
+ phy->ops.get_cfg_done = e1000_get_cfg_done_generic;
+ phy->ops.get_info = e1000_get_phy_info_igp;
+ phy->ops.read_reg = e1000_read_phy_reg_igp;
+ phy->ops.reset = e1000_phy_hw_reset_82541;
+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82541;
+ phy->ops.write_reg = e1000_write_phy_reg_igp;
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper_82541;
+
+ ret_val = e1000_get_phy_id(hw);
+ if (ret_val)
+ goto out;
+
+ /* Verify phy id */
+ if (phy->id != IGP01E1000_I_PHY_ID) {
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params_82541 - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_nvm_params_82541(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ s32 ret_val = E1000_SUCCESS;
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+ u16 size;
+
+ DEBUGFUNC("e1000_init_nvm_params_82541");
+
+ switch (nvm->override) {
+ case e1000_nvm_override_spi_large:
+ nvm->type = e1000_nvm_eeprom_spi;
+ eecd |= E1000_EECD_ADDR_BITS;
+ break;
+ case e1000_nvm_override_spi_small:
+ nvm->type = e1000_nvm_eeprom_spi;
+ eecd &= ~E1000_EECD_ADDR_BITS;
+ break;
+ case e1000_nvm_override_microwire_large:
+ nvm->type = e1000_nvm_eeprom_microwire;
+ eecd |= E1000_EECD_SIZE;
+ break;
+ case e1000_nvm_override_microwire_small:
+ nvm->type = e1000_nvm_eeprom_microwire;
+ eecd &= ~E1000_EECD_SIZE;
+ break;
+ default:
+ nvm->type = eecd & E1000_EECD_TYPE ? e1000_nvm_eeprom_spi
+ : e1000_nvm_eeprom_microwire;
+ break;
+ }
+
+ if (nvm->type == e1000_nvm_eeprom_spi) {
+ nvm->address_bits = (eecd & E1000_EECD_ADDR_BITS) ? 16 : 8;
+ nvm->delay_usec = 1;
+ nvm->opcode_bits = 8;
+ nvm->page_size = (eecd & E1000_EECD_ADDR_BITS) ? 32 : 8;
+
+ /* Function Pointers */
+ nvm->ops.acquire = e1000_acquire_nvm_generic;
+ nvm->ops.read = e1000_read_nvm_spi;
+ nvm->ops.release = e1000_release_nvm_generic;
+ nvm->ops.update = e1000_update_nvm_checksum_generic;
+ nvm->ops.valid_led_default = e1000_valid_led_default_generic;
+ nvm->ops.validate = e1000_validate_nvm_checksum_generic;
+ nvm->ops.write = e1000_write_nvm_spi;
+
+ /*
+ * nvm->word_size must be discovered after the pointers
+ * are set so we can verify the size from the nvm image
+ * itself. Temporarily set it to a dummy value so the
+ * read will work.
+ */
+ nvm->word_size = 64;
+ ret_val = nvm->ops.read(hw, NVM_CFG, 1, &size);
+ if (ret_val)
+ goto out;
+ size = (size & NVM_SIZE_MASK) >> NVM_SIZE_SHIFT;
+ /*
+ * if size != 0, it can be added to a constant and become
+ * the left-shift value to set the word_size. Otherwise,
+ * word_size stays at 64.
+ */
+ if (size) {
+ size += NVM_WORD_SIZE_BASE_SHIFT_82541;
+ nvm->word_size = 1 << size;
+ }
+ } else {
+ nvm->address_bits = (eecd & E1000_EECD_ADDR_BITS) ? 8 : 6;
+ nvm->delay_usec = 50;
+ nvm->opcode_bits = 3;
+ nvm->word_size = (eecd & E1000_EECD_ADDR_BITS) ? 256 : 64;
+
+ /* Function Pointers */
+ nvm->ops.acquire = e1000_acquire_nvm_generic;
+ nvm->ops.read = e1000_read_nvm_microwire;
+ nvm->ops.release = e1000_release_nvm_generic;
+ nvm->ops.update = e1000_update_nvm_checksum_generic;
+ nvm->ops.valid_led_default = e1000_valid_led_default_generic;
+ nvm->ops.validate = e1000_validate_nvm_checksum_generic;
+ nvm->ops.write = e1000_write_nvm_microwire;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_mac_params_82541 - Init MAC func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_mac_params_82541(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+
+ DEBUGFUNC("e1000_init_mac_params_82541");
+
+ /* Set media type */
+ hw->phy.media_type = e1000_media_type_copper;
+ /* Set mta register count */
+ mac->mta_reg_count = 128;
+ /* Set rar entry count */
+ mac->rar_entry_count = E1000_RAR_ENTRIES;
+ /* Set if part includes ASF firmware */
+ mac->asf_firmware_present = true;
+
+ /* Function Pointers */
+
+ /* bus type/speed/width */
+ mac->ops.get_bus_info = e1000_get_bus_info_pci_generic;
+ /* function id */
+ mac->ops.set_lan_id = e1000_set_lan_id_single_port;
+ /* reset */
+ mac->ops.reset_hw = e1000_reset_hw_82541;
+ /* hw initialization */
+ mac->ops.init_hw = e1000_init_hw_82541;
+ /* link setup */
+ mac->ops.setup_link = e1000_setup_link_generic;
+ /* physical interface link setup */
+ mac->ops.setup_physical_interface = e1000_setup_copper_link_82541;
+ /* check for link */
+ mac->ops.check_for_link = e1000_check_for_link_82541;
+ /* link info */
+ mac->ops.get_link_up_info = e1000_get_link_up_info_82541;
+ /* multicast address update */
+ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
+ /* writing VFTA */
+ mac->ops.write_vfta = e1000_write_vfta_generic;
+ /* clearing VFTA */
+ mac->ops.clear_vfta = e1000_clear_vfta_generic;
+ /* ID LED init */
+ mac->ops.id_led_init = e1000_id_led_init_generic;
+ /* setup LED */
+ mac->ops.setup_led = e1000_setup_led_82541;
+ /* cleanup LED */
+ mac->ops.cleanup_led = e1000_cleanup_led_82541;
+ /* turn on/off LED */
+ mac->ops.led_on = e1000_led_on_generic;
+ mac->ops.led_off = e1000_led_off_generic;
+ /* clear hardware counters */
+ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82541;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_function_pointers_82541 - Init func ptrs.
+ * @hw: pointer to the HW structure
+ *
+ * Called to initialize all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_82541(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_init_function_pointers_82541");
+
+ hw->mac.ops.init_params = e1000_init_mac_params_82541;
+ hw->nvm.ops.init_params = e1000_init_nvm_params_82541;
+ hw->phy.ops.init_params = e1000_init_phy_params_82541;
+}
+
+/**
+ * e1000_reset_hw_82541 - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+ * This resets the hardware into a known state.
+ **/
+STATIC s32 e1000_reset_hw_82541(struct e1000_hw *hw)
+{
+ u32 ledctl, ctrl, manc;
+
+ DEBUGFUNC("e1000_reset_hw_82541");
+
+ DEBUGOUT("Masking off all interrupts\n");
+ E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF);
+
+ E1000_WRITE_REG(hw, E1000_RCTL, 0);
+ E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+ E1000_WRITE_FLUSH(hw);
+
+ /*
+ * Delay to allow any outstanding PCI transactions to complete
+ * before resetting the device.
+ */
+ msec_delay(10);
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ /* Must reset the Phy before resetting the MAC */
+ if ((hw->mac.type == e1000_82541) || (hw->mac.type == e1000_82547)) {
+ E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_PHY_RST));
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(5);
+ }
+
+ DEBUGOUT("Issuing a global reset to 82541/82547 MAC\n");
+ switch (hw->mac.type) {
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ /*
+ * These controllers can't ack the 64-bit write when
+ * issuing the reset, so we use IO-mapping as a
+ * workaround to issue the reset.
+ */
+ E1000_WRITE_REG_IO(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+ break;
+ default:
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+ break;
+ }
+
+ /* Wait for NVM reload */
+ msec_delay(20);
+
+ /* Disable HW ARPs on ASF enabled adapters */
+ manc = E1000_READ_REG(hw, E1000_MANC);
+ manc &= ~E1000_MANC_ARP_EN;
+ E1000_WRITE_REG(hw, E1000_MANC, manc);
+
+ if ((hw->mac.type == e1000_82541) || (hw->mac.type == e1000_82547)) {
+ e1000_phy_init_script_82541(hw);
+
+ /* Configure activity LED after Phy reset */
+ ledctl = E1000_READ_REG(hw, E1000_LEDCTL);
+ ledctl &= IGP_ACTIVITY_LED_MASK;
+ ledctl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+ E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl);
+ }
+
+ /* Once again, mask the interrupts */
+ DEBUGOUT("Masking off all interrupts\n");
+ E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF);
+
+ /* Clear any pending interrupt events. */
+ E1000_READ_REG(hw, E1000_ICR);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_hw_82541 - Initialize hardware
+ * @hw: pointer to the HW structure
+ *
+ * This inits the hardware readying it for operation.
+ **/
+STATIC s32 e1000_init_hw_82541(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541;
+ u32 i, txdctl;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_init_hw_82541");
+
+ /* Initialize identification LED */
+ ret_val = mac->ops.id_led_init(hw);
+ if (ret_val) {
+ DEBUGOUT("Error initializing identification LED\n");
+ /* This is not fatal and we should not stop init due to this */
+ }
+
+ /* Storing the Speed Power Down value for later use */
+ ret_val = hw->phy.ops.read_reg(hw, IGP01E1000_GMII_FIFO,
+ &dev_spec->spd_default);
+ if (ret_val)
+ goto out;
+
+ /* Disabling VLAN filtering */
+ DEBUGOUT("Initializing the IEEE VLAN\n");
+ mac->ops.clear_vfta(hw);
+
+ /* Setup the receive address. */
+ e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
+
+ /* Zero out the Multicast HASH table */
+ DEBUGOUT("Zeroing the MTA\n");
+ for (i = 0; i < mac->mta_reg_count; i++) {
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+ /*
+ * Avoid back to back register writes by adding the register
+ * read (flush). This is to protect against some strange
+ * bridge configurations that may issue Memory Write Block
+ * (MWB) to our register space.
+ */
+ E1000_WRITE_FLUSH(hw);
+ }
+
+ /* Setup link and flow control */
+ ret_val = mac->ops.setup_link(hw);
+
+ txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
+ txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB;
+ E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
+
+ /*
+ * Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ e1000_clear_hw_cntrs_82541(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_get_link_up_info_82541 - Report speed and duplex
+ * @hw: pointer to the HW structure
+ * @speed: pointer to speed buffer
+ * @duplex: pointer to duplex buffer
+ *
+ * Retrieve the current speed and duplex configuration.
+ **/
+STATIC s32 e1000_get_link_up_info_82541(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("e1000_get_link_up_info_82541");
+
+ ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
+ if (ret_val)
+ goto out;
+
+ if (!phy->speed_downgraded)
+ goto out;
+
+ /*
+ * IGP01 PHY may advertise full duplex operation after speed
+ * downgrade even if it is operating at half duplex.
+ * Here we set the duplex settings to match the duplex in the
+ * link partner's capabilities.
+ */
+ ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_EXP, &data);
+ if (ret_val)
+ goto out;
+
+ if (!(data & NWAY_ER_LP_NWAY_CAPS)) {
+ *duplex = HALF_DUPLEX;
+ } else {
+ ret_val = phy->ops.read_reg(hw, PHY_LP_ABILITY, &data);
+ if (ret_val)
+ goto out;
+
+ if (*speed == SPEED_100) {
+ if (!(data & NWAY_LPAR_100TX_FD_CAPS))
+ *duplex = HALF_DUPLEX;
+ } else if (*speed == SPEED_10) {
+ if (!(data & NWAY_LPAR_10T_FD_CAPS))
+ *duplex = HALF_DUPLEX;
+ }
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_phy_hw_reset_82541 - PHY hardware reset
+ * @hw: pointer to the HW structure
+ *
+ * Verify the reset block is not blocking us from resetting. Acquire
+ * semaphore (if necessary) and read/set/write the device control reset
+ * bit in the PHY. Wait the appropriate delay time for the device to
+ * reset and release the semaphore (if necessary).
+ **/
+STATIC s32 e1000_phy_hw_reset_82541(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u32 ledctl;
+
+ DEBUGFUNC("e1000_phy_hw_reset_82541");
+
+ ret_val = e1000_phy_hw_reset_generic(hw);
+ if (ret_val)
+ goto out;
+
+ e1000_phy_init_script_82541(hw);
+
+ if ((hw->mac.type == e1000_82541) || (hw->mac.type == e1000_82547)) {
+ /* Configure activity LED after PHY reset */
+ ledctl = E1000_READ_REG(hw, E1000_LEDCTL);
+ ledctl &= IGP_ACTIVITY_LED_MASK;
+ ledctl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+ E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl);
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_setup_copper_link_82541 - Configure copper link settings
+ * @hw: pointer to the HW structure
+ *
+ * Calls the appropriate function to configure the link for auto-neg or forced
+ * speed and duplex. Then we check for link, once link is established calls
+ * to configure collision distance and flow control are called. If link is
+ * not established, we return -E1000_ERR_PHY (-2).
+ **/
+STATIC s32 e1000_setup_copper_link_82541(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541;
+ s32 ret_val;
+ u32 ctrl, ledctl;
+
+ DEBUGFUNC("e1000_setup_copper_link_82541");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+
+ /* Earlier revs of the IGP phy require us to force MDI. */
+ if (hw->mac.type == e1000_82541 || hw->mac.type == e1000_82547) {
+ dev_spec->dsp_config = e1000_dsp_config_disabled;
+ phy->mdix = 1;
+ } else {
+ dev_spec->dsp_config = e1000_dsp_config_enabled;
+ }
+
+ ret_val = e1000_copper_link_setup_igp(hw);
+ if (ret_val)
+ goto out;
+
+ if (hw->mac.autoneg) {
+ if (dev_spec->ffe_config == e1000_ffe_config_active)
+ dev_spec->ffe_config = e1000_ffe_config_enabled;
+ }
+
+ /* Configure activity LED after Phy reset */
+ ledctl = E1000_READ_REG(hw, E1000_LEDCTL);
+ ledctl &= IGP_ACTIVITY_LED_MASK;
+ ledctl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+ E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl);
+
+ ret_val = e1000_setup_copper_link_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_link_82541 - Check/Store link connection
+ * @hw: pointer to the HW structure
+ *
+ * This checks the link condition of the adapter and stores the
+ * results in the hw->mac structure.
+ **/
+STATIC s32 e1000_check_for_link_82541(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ bool link;
+
+ DEBUGFUNC("e1000_check_for_link_82541");
+
+ /*
+ * We only want to go out to the PHY registers to see if Auto-Neg
+ * has completed and/or if our link status has changed. The
+ * get_link_status flag is set upon receiving a Link Status
+ * Change or Rx Sequence Error interrupt.
+ */
+ if (!mac->get_link_status) {
+ ret_val = E1000_SUCCESS;
+ goto out;
+ }
+
+ /*
+ * First we want to see if the MII Status Register reports
+ * link. If so, then we want to get the current speed/duplex
+ * of the PHY.
+ */
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ goto out;
+
+ if (!link) {
+ ret_val = e1000_config_dsp_after_link_change_82541(hw, false);
+ goto out; /* No link detected */
+ }
+
+ mac->get_link_status = false;
+
+ /*
+ * Check if there was DownShift, must be checked
+ * immediately after link-up
+ */
+ e1000_check_downshift_generic(hw);
+
+ /*
+ * If we are forcing speed/duplex, then we simply return since
+ * we have already determined whether we have link or not.
+ */
+ if (!mac->autoneg) {
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+ ret_val = e1000_config_dsp_after_link_change_82541(hw, true);
+
+ /*
+ * Auto-Neg is enabled. Auto Speed Detection takes care
+ * of MAC speed/duplex configuration. So we only need to
+ * configure Collision Distance in the MAC.
+ */
+ mac->ops.config_collision_dist(hw);
+
+ /*
+ * Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ if (ret_val)
+ DEBUGOUT("Error configuring flow control\n");
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_config_dsp_after_link_change_82541 - Config DSP after link
+ * @hw: pointer to the HW structure
+ * @link_up: boolean flag for link up status
+ *
+ * Return E1000_ERR_PHY when failing to read/write the PHY, else E1000_SUCCESS
+ * at any other case.
+ *
+ * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a
+ * gigabit link is achieved to improve link quality.
+ **/
+STATIC s32 e1000_config_dsp_after_link_change_82541(struct e1000_hw *hw,
+ bool link_up)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541;
+ s32 ret_val;
+ u32 idle_errs = 0;
+ u16 phy_data, phy_saved_data, speed, duplex, i;
+ u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20;
+ u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = {
+ IGP01E1000_PHY_AGC_PARAM_A,
+ IGP01E1000_PHY_AGC_PARAM_B,
+ IGP01E1000_PHY_AGC_PARAM_C,
+ IGP01E1000_PHY_AGC_PARAM_D};
+
+ DEBUGFUNC("e1000_config_dsp_after_link_change_82541");
+
+ if (link_up) {
+ ret_val = hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
+ if (ret_val) {
+ DEBUGOUT("Error getting link speed and duplex\n");
+ goto out;
+ }
+
+ if (speed != SPEED_1000) {
+ ret_val = E1000_SUCCESS;
+ goto out;
+ }
+
+ ret_val = phy->ops.get_cable_length(hw);
+ if (ret_val)
+ goto out;
+
+ if ((dev_spec->dsp_config == e1000_dsp_config_enabled) &&
+ phy->min_cable_length >= 50) {
+
+ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+ ret_val = phy->ops.read_reg(hw,
+ dsp_reg_array[i],
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
+
+ ret_val = phy->ops.write_reg(hw,
+ dsp_reg_array[i],
+ phy_data);
+ if (ret_val)
+ goto out;
+ }
+ dev_spec->dsp_config = e1000_dsp_config_activated;
+ }
+
+ if ((dev_spec->ffe_config != e1000_ffe_config_enabled) ||
+ (phy->min_cable_length >= 50)) {
+ ret_val = E1000_SUCCESS;
+ goto out;
+ }
+
+ /* clear previous idle error counts */
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data);
+ if (ret_val)
+ goto out;
+
+ for (i = 0; i < ffe_idle_err_timeout; i++) {
+ usec_delay(1000);
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS,
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT);
+ if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) {
+ dev_spec->ffe_config = e1000_ffe_config_active;
+
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_DSP_FFE,
+ IGP01E1000_PHY_DSP_FFE_CM_CP);
+ if (ret_val)
+ goto out;
+ break;
+ }
+
+ if (idle_errs)
+ ffe_idle_err_timeout =
+ FFE_IDLE_ERR_COUNT_TIMEOUT_100;
+ }
+ } else {
+ if (dev_spec->dsp_config == e1000_dsp_config_activated) {
+ /*
+ * Save off the current value of register 0x2F5B
+ * to be restored at the end of the routines.
+ */
+ ret_val = phy->ops.read_reg(hw, 0x2F5B,
+ &phy_saved_data);
+ if (ret_val)
+ goto out;
+
+ /* Disable the PHY transmitter */
+ ret_val = phy->ops.write_reg(hw, 0x2F5B, 0x0003);
+ if (ret_val)
+ goto out;
+
+ msec_delay_irq(20);
+
+ ret_val = phy->ops.write_reg(hw, 0x0000,
+ IGP01E1000_IEEE_FORCE_GIG);
+ if (ret_val)
+ goto out;
+ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+ ret_val = phy->ops.read_reg(hw,
+ dsp_reg_array[i],
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
+ phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS;
+
+ ret_val = phy->ops.write_reg(hw,
+ dsp_reg_array[i],
+ phy_data);
+ if (ret_val)
+ goto out;
+ }
+
+ ret_val = phy->ops.write_reg(hw, 0x0000,
+ IGP01E1000_IEEE_RESTART_AUTONEG);
+ if (ret_val)
+ goto out;
+
+ msec_delay_irq(20);
+
+ /* Now enable the transmitter */
+ ret_val = phy->ops.write_reg(hw, 0x2F5B,
+ phy_saved_data);
+ if (ret_val)
+ goto out;
+
+ dev_spec->dsp_config = e1000_dsp_config_enabled;
+ }
+
+ if (dev_spec->ffe_config != e1000_ffe_config_active) {
+ ret_val = E1000_SUCCESS;
+ goto out;
+ }
+
+ /*
+ * Save off the current value of register 0x2F5B
+ * to be restored at the end of the routines.
+ */
+ ret_val = phy->ops.read_reg(hw, 0x2F5B, &phy_saved_data);
+ if (ret_val)
+ goto out;
+
+ /* Disable the PHY transmitter */
+ ret_val = phy->ops.write_reg(hw, 0x2F5B, 0x0003);
+ if (ret_val)
+ goto out;
+
+ msec_delay_irq(20);
+
+ ret_val = phy->ops.write_reg(hw, 0x0000,
+ IGP01E1000_IEEE_FORCE_GIG);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_DSP_FFE,
+ IGP01E1000_PHY_DSP_FFE_DEFAULT);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, 0x0000,
+ IGP01E1000_IEEE_RESTART_AUTONEG);
+ if (ret_val)
+ goto out;
+
+ msec_delay_irq(20);
+
+ /* Now enable the transmitter */
+ ret_val = phy->ops.write_reg(hw, 0x2F5B, phy_saved_data);
+
+ if (ret_val)
+ goto out;
+
+ dev_spec->ffe_config = e1000_ffe_config_enabled;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_get_cable_length_igp_82541 - Determine cable length for igp PHY
+ * @hw: pointer to the HW structure
+ *
+ * The automatic gain control (agc) normalizes the amplitude of the
+ * received signal, adjusting for the attenuation produced by the
+ * cable. By reading the AGC registers, which represent the
+ * combination of coarse and fine gain value, the value can be put
+ * into a lookup table to obtain the approximate cable length
+ * for each channel.
+ **/
+STATIC s32 e1000_get_cable_length_igp_82541(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+ u16 i, data;
+ u16 cur_agc_value, agc_value = 0;
+ u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
+ u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = {IGP01E1000_PHY_AGC_A,
+ IGP01E1000_PHY_AGC_B,
+ IGP01E1000_PHY_AGC_C,
+ IGP01E1000_PHY_AGC_D};
+
+ DEBUGFUNC("e1000_get_cable_length_igp_82541");
+
+ /* Read the AGC registers for all channels */
+ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+ ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &data);
+ if (ret_val)
+ goto out;
+
+ cur_agc_value = data >> IGP01E1000_AGC_LENGTH_SHIFT;
+
+ /* Bounds checking */
+ if ((cur_agc_value >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) ||
+ (cur_agc_value == 0)) {
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+ agc_value += cur_agc_value;
+
+ if (min_agc_value > cur_agc_value)
+ min_agc_value = cur_agc_value;
+ }
+
+ /* Remove the minimal AGC result for length < 50m */
+ if (agc_value < IGP01E1000_PHY_CHANNEL_NUM * 50) {
+ agc_value -= min_agc_value;
+ /* Average the three remaining channels for the length. */
+ agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1);
+ } else {
+ /* Average the channels for the length. */
+ agc_value /= IGP01E1000_PHY_CHANNEL_NUM;
+ }
+
+ phy->min_cable_length = (e1000_igp_cable_length_table[agc_value] >
+ IGP01E1000_AGC_RANGE)
+ ? (e1000_igp_cable_length_table[agc_value] -
+ IGP01E1000_AGC_RANGE)
+ : 0;
+ phy->max_cable_length = e1000_igp_cable_length_table[agc_value] +
+ IGP01E1000_AGC_RANGE;
+
+ phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_set_d3_lplu_state_82541 - Sets low power link up state for D3
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * The low power link up (lplu) state is set to the power management level D3
+ * and SmartSpeed is disabled when active is true, else clear lplu for D3
+ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
+ * is used during Dx states where the power conservation is most important.
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained.
+ **/
+STATIC s32 e1000_set_d3_lplu_state_82541(struct e1000_hw *hw, bool active)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("e1000_set_d3_lplu_state_82541");
+
+ switch (hw->mac.type) {
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ break;
+ default:
+ ret_val = e1000_set_d3_lplu_state_generic(hw, active);
+ goto out;
+ break;
+ }
+
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_GMII_FIFO, &data);
+ if (ret_val)
+ goto out;
+
+ if (!active) {
+ data &= ~IGP01E1000_GMII_FLEX_SPD;
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_GMII_FIFO, data);
+ if (ret_val)
+ goto out;
+
+ /*
+ * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ goto out;
+
+ data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ goto out;
+ } else if (phy->smart_speed == e1000_smart_speed_off) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ goto out;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ goto out;
+ }
+ } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+ (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+ (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+ data |= IGP01E1000_GMII_FLEX_SPD;
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_GMII_FIFO, data);
+ if (ret_val)
+ goto out;
+
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ goto out;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_setup_led_82541 - Configures SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * This prepares the SW controllable LED for use and saves the current state
+ * of the LED so it can be later restored.
+ **/
+STATIC s32 e1000_setup_led_82541(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_setup_led_82541");
+
+ ret_val = hw->phy.ops.read_reg(hw, IGP01E1000_GMII_FIFO,
+ &dev_spec->spd_default);
+ if (ret_val)
+ goto out;
+
+ ret_val = hw->phy.ops.write_reg(hw, IGP01E1000_GMII_FIFO,
+ (u16)(dev_spec->spd_default &
+ ~IGP01E1000_GMII_SPD));
+ if (ret_val)
+ goto out;
+
+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_cleanup_led_82541 - Set LED config to default operation
+ * @hw: pointer to the HW structure
+ *
+ * Remove the current LED configuration and set the LED configuration
+ * to the default value, saved from the EEPROM.
+ **/
+STATIC s32 e1000_cleanup_led_82541(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_cleanup_led_82541");
+
+ ret_val = hw->phy.ops.write_reg(hw, IGP01E1000_GMII_FIFO,
+ dev_spec->spd_default);
+ if (ret_val)
+ goto out;
+
+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_phy_init_script_82541 - Initialize GbE PHY
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the IGP PHY.
+ **/
+STATIC s32 e1000_phy_init_script_82541(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541;
+ u32 ret_val;
+ u16 phy_saved_data;
+
+ DEBUGFUNC("e1000_phy_init_script_82541");
+
+ if (!dev_spec->phy_init_script) {
+ ret_val = E1000_SUCCESS;
+ goto out;
+ }
+
+ /* Delay after phy reset to enable NVM configuration to load */
+ msec_delay(20);
+
+ /*
+ * Save off the current value of register 0x2F5B to be restored at
+ * the end of this routine.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, 0x2F5B, &phy_saved_data);
+
+ /* Disabled the PHY transmitter */
+ hw->phy.ops.write_reg(hw, 0x2F5B, 0x0003);
+
+ msec_delay(20);
+
+ hw->phy.ops.write_reg(hw, 0x0000, 0x0140);
+
+ msec_delay(5);
+
+ switch (hw->mac.type) {
+ case e1000_82541:
+ case e1000_82547:
+ hw->phy.ops.write_reg(hw, 0x1F95, 0x0001);
+
+ hw->phy.ops.write_reg(hw, 0x1F71, 0xBD21);
+
+ hw->phy.ops.write_reg(hw, 0x1F79, 0x0018);
+
+ hw->phy.ops.write_reg(hw, 0x1F30, 0x1600);
+
+ hw->phy.ops.write_reg(hw, 0x1F31, 0x0014);
+
+ hw->phy.ops.write_reg(hw, 0x1F32, 0x161C);
+
+ hw->phy.ops.write_reg(hw, 0x1F94, 0x0003);
+
+ hw->phy.ops.write_reg(hw, 0x1F96, 0x003F);
+
+ hw->phy.ops.write_reg(hw, 0x2010, 0x0008);
+ break;
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ hw->phy.ops.write_reg(hw, 0x1F73, 0x0099);
+ break;
+ default:
+ break;
+ }
+
+ hw->phy.ops.write_reg(hw, 0x0000, 0x3300);
+
+ msec_delay(20);
+
+ /* Now enable the transmitter */
+ hw->phy.ops.write_reg(hw, 0x2F5B, phy_saved_data);
+
+ if (hw->mac.type == e1000_82547) {
+ u16 fused, fine, coarse;
+
+ /* Move to analog registers page */
+ hw->phy.ops.read_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS,
+ &fused);
+
+ if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) {
+ hw->phy.ops.read_reg(hw, IGP01E1000_ANALOG_FUSE_STATUS,
+ &fused);
+
+ fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK;
+ coarse = fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK;
+
+ if (coarse > IGP01E1000_ANALOG_FUSE_COARSE_THRESH) {
+ coarse -= IGP01E1000_ANALOG_FUSE_COARSE_10;
+ fine -= IGP01E1000_ANALOG_FUSE_FINE_1;
+ } else if (coarse ==
+ IGP01E1000_ANALOG_FUSE_COARSE_THRESH)
+ fine -= IGP01E1000_ANALOG_FUSE_FINE_10;
+
+ fused = (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) |
+ (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) |
+ (coarse & IGP01E1000_ANALOG_FUSE_COARSE_MASK);
+
+ hw->phy.ops.write_reg(hw,
+ IGP01E1000_ANALOG_FUSE_CONTROL,
+ fused);
+ hw->phy.ops.write_reg(hw,
+ IGP01E1000_ANALOG_FUSE_BYPASS,
+ IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL);
+ }
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_script_state_82541 - Enable/Disable PHY init script
+ * @hw: pointer to the HW structure
+ * @state: boolean value used to enable/disable PHY init script
+ *
+ * Allows the driver to enable/disable the PHY init script, if the PHY is an
+ * IGP PHY.
+ **/
+void e1000_init_script_state_82541(struct e1000_hw *hw, bool state)
+{
+ struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541;
+
+ DEBUGFUNC("e1000_init_script_state_82541");
+
+ if (hw->phy.type != e1000_phy_igp) {
+ DEBUGOUT("Initialization script not necessary.\n");
+ goto out;
+ }
+
+ dev_spec->phy_init_script = state;
+
+out:
+ return;
+}
+
+/**
+ * e1000_power_down_phy_copper_82541 - Remove link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+STATIC void e1000_power_down_phy_copper_82541(struct e1000_hw *hw)
+{
+ /* If the management interface is not enabled, then power down */
+ if (!(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_SMBUS_EN))
+ e1000_power_down_phy_copper(hw);
+
+ return;
+}
+
+/**
+ * e1000_clear_hw_cntrs_82541 - Clear device specific hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the hardware counters by reading the counter registers.
+ **/
+STATIC void e1000_clear_hw_cntrs_82541(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_clear_hw_cntrs_82541");
+
+ e1000_clear_hw_cntrs_base_generic(hw);
+
+ E1000_READ_REG(hw, E1000_PRC64);
+ E1000_READ_REG(hw, E1000_PRC127);
+ E1000_READ_REG(hw, E1000_PRC255);
+ E1000_READ_REG(hw, E1000_PRC511);
+ E1000_READ_REG(hw, E1000_PRC1023);
+ E1000_READ_REG(hw, E1000_PRC1522);
+ E1000_READ_REG(hw, E1000_PTC64);
+ E1000_READ_REG(hw, E1000_PTC127);
+ E1000_READ_REG(hw, E1000_PTC255);
+ E1000_READ_REG(hw, E1000_PTC511);
+ E1000_READ_REG(hw, E1000_PTC1023);
+ E1000_READ_REG(hw, E1000_PTC1522);
+
+ E1000_READ_REG(hw, E1000_ALGNERRC);
+ E1000_READ_REG(hw, E1000_RXERRC);
+ E1000_READ_REG(hw, E1000_TNCRS);
+ E1000_READ_REG(hw, E1000_CEXTERR);
+ E1000_READ_REG(hw, E1000_TSCTC);
+ E1000_READ_REG(hw, E1000_TSCTFC);
+
+ E1000_READ_REG(hw, E1000_MGTPRC);
+ E1000_READ_REG(hw, E1000_MGTPDC);
+ E1000_READ_REG(hw, E1000_MGTPTC);
+}
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82541.h b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82541.h
new file mode 100755
index 00000000..0f50f556
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82541.h
@@ -0,0 +1,91 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_82541_H_
+#define _E1000_82541_H_
+
+#define NVM_WORD_SIZE_BASE_SHIFT_82541 (NVM_WORD_SIZE_BASE_SHIFT + 1)
+
+#define IGP01E1000_PHY_CHANNEL_NUM 4
+
+#define IGP01E1000_PHY_AGC_A 0x1172
+#define IGP01E1000_PHY_AGC_B 0x1272
+#define IGP01E1000_PHY_AGC_C 0x1472
+#define IGP01E1000_PHY_AGC_D 0x1872
+
+#define IGP01E1000_PHY_AGC_PARAM_A 0x1171
+#define IGP01E1000_PHY_AGC_PARAM_B 0x1271
+#define IGP01E1000_PHY_AGC_PARAM_C 0x1471
+#define IGP01E1000_PHY_AGC_PARAM_D 0x1871
+
+#define IGP01E1000_PHY_EDAC_MU_INDEX 0xC000
+#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS 0x8000
+
+#define IGP01E1000_PHY_DSP_RESET 0x1F33
+
+#define IGP01E1000_PHY_DSP_FFE 0x1F35
+#define IGP01E1000_PHY_DSP_FFE_CM_CP 0x0069
+#define IGP01E1000_PHY_DSP_FFE_DEFAULT 0x002A
+
+#define IGP01E1000_IEEE_FORCE_GIG 0x0140
+#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300
+
+#define IGP01E1000_AGC_LENGTH_SHIFT 7
+#define IGP01E1000_AGC_RANGE 10
+
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_20 20
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100
+
+#define IGP01E1000_ANALOG_FUSE_STATUS 0x20D0
+#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS 0x20D1
+#define IGP01E1000_ANALOG_FUSE_CONTROL 0x20DC
+#define IGP01E1000_ANALOG_FUSE_BYPASS 0x20DE
+
+#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED 0x0100
+#define IGP01E1000_ANALOG_FUSE_FINE_MASK 0x0F80
+#define IGP01E1000_ANALOG_FUSE_COARSE_MASK 0x0070
+#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH 0x0040
+#define IGP01E1000_ANALOG_FUSE_COARSE_10 0x0010
+#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080
+#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500
+#define IGP01E1000_ANALOG_FUSE_POLY_MASK 0xF000
+#define IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL 0x0002
+
+#define IGP01E1000_MSE_CHANNEL_D 0x000F
+#define IGP01E1000_MSE_CHANNEL_C 0x00F0
+#define IGP01E1000_MSE_CHANNEL_B 0x0F00
+#define IGP01E1000_MSE_CHANNEL_A 0xF000
+
+
+void e1000_init_script_state_82541(struct e1000_hw *hw, bool state);
+#endif
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82542.c b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82542.c
new file mode 100755
index 00000000..afea4697
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82542.c
@@ -0,0 +1,588 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+/*
+ * 82542 Gigabit Ethernet Controller
+ */
+
+#include "e1000_api.h"
+
+STATIC s32 e1000_init_phy_params_82542(struct e1000_hw *hw);
+STATIC s32 e1000_init_nvm_params_82542(struct e1000_hw *hw);
+STATIC s32 e1000_init_mac_params_82542(struct e1000_hw *hw);
+STATIC s32 e1000_get_bus_info_82542(struct e1000_hw *hw);
+STATIC s32 e1000_reset_hw_82542(struct e1000_hw *hw);
+STATIC s32 e1000_init_hw_82542(struct e1000_hw *hw);
+STATIC s32 e1000_setup_link_82542(struct e1000_hw *hw);
+STATIC s32 e1000_led_on_82542(struct e1000_hw *hw);
+STATIC s32 e1000_led_off_82542(struct e1000_hw *hw);
+STATIC void e1000_rar_set_82542(struct e1000_hw *hw, u8 *addr, u32 index);
+STATIC void e1000_clear_hw_cntrs_82542(struct e1000_hw *hw);
+STATIC s32 e1000_read_mac_addr_82542(struct e1000_hw *hw);
+
+/**
+ * e1000_init_phy_params_82542 - Init PHY func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_phy_params_82542(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_init_phy_params_82542");
+
+ phy->type = e1000_phy_none;
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params_82542 - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_nvm_params_82542(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+
+ DEBUGFUNC("e1000_init_nvm_params_82542");
+
+ nvm->address_bits = 6;
+ nvm->delay_usec = 50;
+ nvm->opcode_bits = 3;
+ nvm->type = e1000_nvm_eeprom_microwire;
+ nvm->word_size = 64;
+
+ /* Function Pointers */
+ nvm->ops.read = e1000_read_nvm_microwire;
+ nvm->ops.release = e1000_stop_nvm;
+ nvm->ops.write = e1000_write_nvm_microwire;
+ nvm->ops.update = e1000_update_nvm_checksum_generic;
+ nvm->ops.validate = e1000_validate_nvm_checksum_generic;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_mac_params_82542 - Init MAC func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_mac_params_82542(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+
+ DEBUGFUNC("e1000_init_mac_params_82542");
+
+ /* Set media type */
+ hw->phy.media_type = e1000_media_type_fiber;
+
+ /* Set mta register count */
+ mac->mta_reg_count = 128;
+ /* Set rar entry count */
+ mac->rar_entry_count = E1000_RAR_ENTRIES;
+
+ /* Function pointers */
+
+ /* bus type/speed/width */
+ mac->ops.get_bus_info = e1000_get_bus_info_82542;
+ /* function id */
+ mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pci;
+ /* reset */
+ mac->ops.reset_hw = e1000_reset_hw_82542;
+ /* hw initialization */
+ mac->ops.init_hw = e1000_init_hw_82542;
+ /* link setup */
+ mac->ops.setup_link = e1000_setup_link_82542;
+ /* phy/fiber/serdes setup */
+ mac->ops.setup_physical_interface =
+ e1000_setup_fiber_serdes_link_generic;
+ /* check for link */
+ mac->ops.check_for_link = e1000_check_for_fiber_link_generic;
+ /* multicast address update */
+ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
+ /* writing VFTA */
+ mac->ops.write_vfta = e1000_write_vfta_generic;
+ /* clearing VFTA */
+ mac->ops.clear_vfta = e1000_clear_vfta_generic;
+ /* read mac address */
+ mac->ops.read_mac_addr = e1000_read_mac_addr_82542;
+ /* set RAR */
+ mac->ops.rar_set = e1000_rar_set_82542;
+ /* turn on/off LED */
+ mac->ops.led_on = e1000_led_on_82542;
+ mac->ops.led_off = e1000_led_off_82542;
+ /* clear hardware counters */
+ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82542;
+ /* link info */
+ mac->ops.get_link_up_info =
+ e1000_get_speed_and_duplex_fiber_serdes_generic;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_function_pointers_82542 - Init func ptrs.
+ * @hw: pointer to the HW structure
+ *
+ * Called to initialize all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_82542(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_init_function_pointers_82542");
+
+ hw->mac.ops.init_params = e1000_init_mac_params_82542;
+ hw->nvm.ops.init_params = e1000_init_nvm_params_82542;
+ hw->phy.ops.init_params = e1000_init_phy_params_82542;
+}
+
+/**
+ * e1000_get_bus_info_82542 - Obtain bus information for adapter
+ * @hw: pointer to the HW structure
+ *
+ * This will obtain information about the HW bus for which the
+ * adapter is attached and stores it in the hw structure.
+ **/
+STATIC s32 e1000_get_bus_info_82542(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_get_bus_info_82542");
+
+ hw->bus.type = e1000_bus_type_pci;
+ hw->bus.speed = e1000_bus_speed_unknown;
+ hw->bus.width = e1000_bus_width_unknown;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_reset_hw_82542 - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+ * This resets the hardware into a known state.
+ **/
+STATIC s32 e1000_reset_hw_82542(struct e1000_hw *hw)
+{
+ struct e1000_bus_info *bus = &hw->bus;
+ s32 ret_val = E1000_SUCCESS;
+ u32 ctrl;
+
+ DEBUGFUNC("e1000_reset_hw_82542");
+
+ if (hw->revision_id == E1000_REVISION_2) {
+ DEBUGOUT("Disabling MWI on 82542 rev 2\n");
+ e1000_pci_clear_mwi(hw);
+ }
+
+ DEBUGOUT("Masking off all interrupts\n");
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+
+ E1000_WRITE_REG(hw, E1000_RCTL, 0);
+ E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+ E1000_WRITE_FLUSH(hw);
+
+ /*
+ * Delay to allow any outstanding PCI transactions to complete before
+ * resetting the device
+ */
+ msec_delay(10);
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ DEBUGOUT("Issuing a global reset to 82542/82543 MAC\n");
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+
+ hw->nvm.ops.reload(hw);
+ msec_delay(2);
+
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+ E1000_READ_REG(hw, E1000_ICR);
+
+ if (hw->revision_id == E1000_REVISION_2) {
+ if (bus->pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
+ e1000_pci_set_mwi(hw);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_hw_82542 - Initialize hardware
+ * @hw: pointer to the HW structure
+ *
+ * This inits the hardware readying it for operation.
+ **/
+STATIC s32 e1000_init_hw_82542(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_dev_spec_82542 *dev_spec = &hw->dev_spec._82542;
+ s32 ret_val = E1000_SUCCESS;
+ u32 ctrl;
+ u16 i;
+
+ DEBUGFUNC("e1000_init_hw_82542");
+
+ /* Disabling VLAN filtering */
+ E1000_WRITE_REG(hw, E1000_VET, 0);
+ mac->ops.clear_vfta(hw);
+
+ /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
+ if (hw->revision_id == E1000_REVISION_2) {
+ DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
+ e1000_pci_clear_mwi(hw);
+ E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST);
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(5);
+ }
+
+ /* Setup the receive address. */
+ e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
+
+ /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */
+ if (hw->revision_id == E1000_REVISION_2) {
+ E1000_WRITE_REG(hw, E1000_RCTL, 0);
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(1);
+ if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
+ e1000_pci_set_mwi(hw);
+ }
+
+ /* Zero out the Multicast HASH table */
+ DEBUGOUT("Zeroing the MTA\n");
+ for (i = 0; i < mac->mta_reg_count; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+ /*
+ * Set the PCI priority bit correctly in the CTRL register. This
+ * determines if the adapter gives priority to receives, or if it
+ * gives equal priority to transmits and receives.
+ */
+ if (dev_spec->dma_fairness) {
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PRIOR);
+ }
+
+ /* Setup link and flow control */
+ ret_val = e1000_setup_link_82542(hw);
+
+ /*
+ * Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ e1000_clear_hw_cntrs_82542(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_setup_link_82542 - Setup flow control and link settings
+ * @hw: pointer to the HW structure
+ *
+ * Determines which flow control settings to use, then configures flow
+ * control. Calls the appropriate media-specific link configuration
+ * function. Assuming the adapter has a valid link partner, a valid link
+ * should be established. Assumes the hardware has previously been reset
+ * and the transmitter and receiver are not enabled.
+ **/
+STATIC s32 e1000_setup_link_82542(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_setup_link_82542");
+
+ ret_val = e1000_set_default_fc_generic(hw);
+ if (ret_val)
+ goto out;
+
+ hw->fc.requested_mode &= ~e1000_fc_tx_pause;
+
+ if (mac->report_tx_early)
+ hw->fc.requested_mode &= ~e1000_fc_rx_pause;
+
+ /*
+ * Save off the requested flow control mode for use later. Depending
+ * on the link partner's capabilities, we may or may not use this mode.
+ */
+ hw->fc.current_mode = hw->fc.requested_mode;
+
+ DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
+ hw->fc.current_mode);
+
+ /* Call the necessary subroutine to configure the link. */
+ ret_val = mac->ops.setup_physical_interface(hw);
+ if (ret_val)
+ goto out;
+
+ /*
+ * Initialize the flow control address, type, and PAUSE timer
+ * registers to their default values. This is done even if flow
+ * control is disabled, because it does not hurt anything to
+ * initialize these registers.
+ */
+ DEBUGOUT("Initializing Flow Control address, type and timer regs\n");
+
+ E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
+ E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+ E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE);
+
+ E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
+
+ ret_val = e1000_set_fc_watermarks_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_led_on_82542 - Turn on SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * Turns the SW defined LED on.
+ **/
+STATIC s32 e1000_led_on_82542(struct e1000_hw *hw)
+{
+ u32 ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ DEBUGFUNC("e1000_led_on_82542");
+
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_led_off_82542 - Turn off SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * Turns the SW defined LED off.
+ **/
+STATIC s32 e1000_led_off_82542(struct e1000_hw *hw)
+{
+ u32 ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ DEBUGFUNC("e1000_led_off_82542");
+
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_rar_set_82542 - Set receive address register
+ * @hw: pointer to the HW structure
+ * @addr: pointer to the receive address
+ * @index: receive address array register
+ *
+ * Sets the receive address array register at index to the address passed
+ * in by addr.
+ **/
+STATIC void e1000_rar_set_82542(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+ u32 rar_low, rar_high;
+
+ DEBUGFUNC("e1000_rar_set_82542");
+
+ /*
+ * HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
+ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+
+ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+ /* If MAC address zero, no need to set the AV bit */
+ if (rar_low || rar_high)
+ rar_high |= E1000_RAH_AV;
+
+ E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low);
+ E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high);
+}
+
+/**
+ * e1000_translate_register_82542 - Translate the proper register offset
+ * @reg: e1000 register to be read
+ *
+ * Registers in 82542 are located in different offsets than other adapters
+ * even though they function in the same manner. This function takes in
+ * the name of the register to read and returns the correct offset for
+ * 82542 silicon.
+ **/
+u32 e1000_translate_register_82542(u32 reg)
+{
+ /*
+ * Some of the 82542 registers are located at different
+ * offsets than they are in newer adapters.
+ * Despite the difference in location, the registers
+ * function in the same manner.
+ */
+ switch (reg) {
+ case E1000_RA:
+ reg = 0x00040;
+ break;
+ case E1000_RDTR:
+ reg = 0x00108;
+ break;
+ case E1000_RDBAL(0):
+ reg = 0x00110;
+ break;
+ case E1000_RDBAH(0):
+ reg = 0x00114;
+ break;
+ case E1000_RDLEN(0):
+ reg = 0x00118;
+ break;
+ case E1000_RDH(0):
+ reg = 0x00120;
+ break;
+ case E1000_RDT(0):
+ reg = 0x00128;
+ break;
+ case E1000_RDBAL(1):
+ reg = 0x00138;
+ break;
+ case E1000_RDBAH(1):
+ reg = 0x0013C;
+ break;
+ case E1000_RDLEN(1):
+ reg = 0x00140;
+ break;
+ case E1000_RDH(1):
+ reg = 0x00148;
+ break;
+ case E1000_RDT(1):
+ reg = 0x00150;
+ break;
+ case E1000_FCRTH:
+ reg = 0x00160;
+ break;
+ case E1000_FCRTL:
+ reg = 0x00168;
+ break;
+ case E1000_MTA:
+ reg = 0x00200;
+ break;
+ case E1000_TDBAL(0):
+ reg = 0x00420;
+ break;
+ case E1000_TDBAH(0):
+ reg = 0x00424;
+ break;
+ case E1000_TDLEN(0):
+ reg = 0x00428;
+ break;
+ case E1000_TDH(0):
+ reg = 0x00430;
+ break;
+ case E1000_TDT(0):
+ reg = 0x00438;
+ break;
+ case E1000_TIDV:
+ reg = 0x00440;
+ break;
+ case E1000_VFTA:
+ reg = 0x00600;
+ break;
+ case E1000_TDFH:
+ reg = 0x08010;
+ break;
+ case E1000_TDFT:
+ reg = 0x08018;
+ break;
+ default:
+ break;
+ }
+
+ return reg;
+}
+
+/**
+ * e1000_clear_hw_cntrs_82542 - Clear device specific hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the hardware counters by reading the counter registers.
+ **/
+STATIC void e1000_clear_hw_cntrs_82542(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_clear_hw_cntrs_82542");
+
+ e1000_clear_hw_cntrs_base_generic(hw);
+
+ E1000_READ_REG(hw, E1000_PRC64);
+ E1000_READ_REG(hw, E1000_PRC127);
+ E1000_READ_REG(hw, E1000_PRC255);
+ E1000_READ_REG(hw, E1000_PRC511);
+ E1000_READ_REG(hw, E1000_PRC1023);
+ E1000_READ_REG(hw, E1000_PRC1522);
+ E1000_READ_REG(hw, E1000_PTC64);
+ E1000_READ_REG(hw, E1000_PTC127);
+ E1000_READ_REG(hw, E1000_PTC255);
+ E1000_READ_REG(hw, E1000_PTC511);
+ E1000_READ_REG(hw, E1000_PTC1023);
+ E1000_READ_REG(hw, E1000_PTC1522);
+}
+
+/**
+ * e1000_read_mac_addr_82542 - Read device MAC address
+ * @hw: pointer to the HW structure
+ *
+ * Reads the device MAC address from the EEPROM and stores the value.
+ **/
+s32 e1000_read_mac_addr_82542(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 offset, nvm_data, i;
+
+ DEBUGFUNC("e1000_read_mac_addr");
+
+ for (i = 0; i < ETH_ADDR_LEN; i += 2) {
+ offset = i >> 1;
+ ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+ hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
+ hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
+ }
+
+ for (i = 0; i < ETH_ADDR_LEN; i++)
+ hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+out:
+ return ret_val;
+}
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82543.c b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82543.c
new file mode 100755
index 00000000..36335ba2
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82543.c
@@ -0,0 +1,1553 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+/*
+ * 82543GC Gigabit Ethernet Controller (Fiber)
+ * 82543GC Gigabit Ethernet Controller (Copper)
+ * 82544EI Gigabit Ethernet Controller (Copper)
+ * 82544EI Gigabit Ethernet Controller (Fiber)
+ * 82544GC Gigabit Ethernet Controller (Copper)
+ * 82544GC Gigabit Ethernet Controller (LOM)
+ */
+
+#include "e1000_api.h"
+
+STATIC s32 e1000_init_phy_params_82543(struct e1000_hw *hw);
+STATIC s32 e1000_init_nvm_params_82543(struct e1000_hw *hw);
+STATIC s32 e1000_init_mac_params_82543(struct e1000_hw *hw);
+STATIC s32 e1000_read_phy_reg_82543(struct e1000_hw *hw, u32 offset,
+ u16 *data);
+STATIC s32 e1000_write_phy_reg_82543(struct e1000_hw *hw, u32 offset,
+ u16 data);
+STATIC s32 e1000_phy_force_speed_duplex_82543(struct e1000_hw *hw);
+STATIC s32 e1000_phy_hw_reset_82543(struct e1000_hw *hw);
+STATIC s32 e1000_reset_hw_82543(struct e1000_hw *hw);
+STATIC s32 e1000_init_hw_82543(struct e1000_hw *hw);
+STATIC s32 e1000_setup_link_82543(struct e1000_hw *hw);
+STATIC s32 e1000_setup_copper_link_82543(struct e1000_hw *hw);
+STATIC s32 e1000_setup_fiber_link_82543(struct e1000_hw *hw);
+STATIC s32 e1000_check_for_copper_link_82543(struct e1000_hw *hw);
+STATIC s32 e1000_check_for_fiber_link_82543(struct e1000_hw *hw);
+STATIC s32 e1000_led_on_82543(struct e1000_hw *hw);
+STATIC s32 e1000_led_off_82543(struct e1000_hw *hw);
+STATIC void e1000_write_vfta_82543(struct e1000_hw *hw, u32 offset,
+ u32 value);
+STATIC void e1000_clear_hw_cntrs_82543(struct e1000_hw *hw);
+STATIC s32 e1000_config_mac_to_phy_82543(struct e1000_hw *hw);
+STATIC bool e1000_init_phy_disabled_82543(struct e1000_hw *hw);
+STATIC void e1000_lower_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl);
+STATIC s32 e1000_polarity_reversal_workaround_82543(struct e1000_hw *hw);
+STATIC void e1000_raise_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl);
+STATIC u16 e1000_shift_in_mdi_bits_82543(struct e1000_hw *hw);
+STATIC void e1000_shift_out_mdi_bits_82543(struct e1000_hw *hw, u32 data,
+ u16 count);
+STATIC bool e1000_tbi_compatibility_enabled_82543(struct e1000_hw *hw);
+STATIC void e1000_set_tbi_sbp_82543(struct e1000_hw *hw, bool state);
+
+/**
+ * e1000_init_phy_params_82543 - Init PHY func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_phy_params_82543(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_init_phy_params_82543");
+
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ phy->type = e1000_phy_none;
+ goto out;
+ } else {
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper;
+ }
+
+ phy->addr = 1;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->reset_delay_us = 10000;
+ phy->type = e1000_phy_m88;
+
+ /* Function Pointers */
+ phy->ops.check_polarity = e1000_check_polarity_m88;
+ phy->ops.commit = e1000_phy_sw_reset_generic;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_82543;
+ phy->ops.get_cable_length = e1000_get_cable_length_m88;
+ phy->ops.get_cfg_done = e1000_get_cfg_done_generic;
+ phy->ops.read_reg = (hw->mac.type == e1000_82543)
+ ? e1000_read_phy_reg_82543
+ : e1000_read_phy_reg_m88;
+ phy->ops.reset = (hw->mac.type == e1000_82543)
+ ? e1000_phy_hw_reset_82543
+ : e1000_phy_hw_reset_generic;
+ phy->ops.write_reg = (hw->mac.type == e1000_82543)
+ ? e1000_write_phy_reg_82543
+ : e1000_write_phy_reg_m88;
+ phy->ops.get_info = e1000_get_phy_info_m88;
+
+ /*
+ * The external PHY of the 82543 can be in a funky state.
+ * Resetting helps us read the PHY registers for acquiring
+ * the PHY ID.
+ */
+ if (!e1000_init_phy_disabled_82543(hw)) {
+ ret_val = phy->ops.reset(hw);
+ if (ret_val) {
+ DEBUGOUT("Resetting PHY during init failed.\n");
+ goto out;
+ }
+ msec_delay(20);
+ }
+
+ ret_val = e1000_get_phy_id(hw);
+ if (ret_val)
+ goto out;
+
+ /* Verify phy id */
+ switch (hw->mac.type) {
+ case e1000_82543:
+ if (phy->id != M88E1000_E_PHY_ID) {
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+ break;
+ case e1000_82544:
+ if (phy->id != M88E1000_I_PHY_ID) {
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ break;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params_82543 - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_nvm_params_82543(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+
+ DEBUGFUNC("e1000_init_nvm_params_82543");
+
+ nvm->type = e1000_nvm_eeprom_microwire;
+ nvm->word_size = 64;
+ nvm->delay_usec = 50;
+ nvm->address_bits = 6;
+ nvm->opcode_bits = 3;
+
+ /* Function Pointers */
+ nvm->ops.read = e1000_read_nvm_microwire;
+ nvm->ops.update = e1000_update_nvm_checksum_generic;
+ nvm->ops.valid_led_default = e1000_valid_led_default_generic;
+ nvm->ops.validate = e1000_validate_nvm_checksum_generic;
+ nvm->ops.write = e1000_write_nvm_microwire;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_mac_params_82543 - Init MAC func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_mac_params_82543(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+
+ DEBUGFUNC("e1000_init_mac_params_82543");
+
+ /* Set media type */
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82543GC_FIBER:
+ case E1000_DEV_ID_82544EI_FIBER:
+ hw->phy.media_type = e1000_media_type_fiber;
+ break;
+ default:
+ hw->phy.media_type = e1000_media_type_copper;
+ break;
+ }
+
+ /* Set mta register count */
+ mac->mta_reg_count = 128;
+ /* Set rar entry count */
+ mac->rar_entry_count = E1000_RAR_ENTRIES;
+
+ /* Function pointers */
+
+ /* bus type/speed/width */
+ mac->ops.get_bus_info = e1000_get_bus_info_pci_generic;
+ /* function id */
+ mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pci;
+ /* reset */
+ mac->ops.reset_hw = e1000_reset_hw_82543;
+ /* hw initialization */
+ mac->ops.init_hw = e1000_init_hw_82543;
+ /* link setup */
+ mac->ops.setup_link = e1000_setup_link_82543;
+ /* physical interface setup */
+ mac->ops.setup_physical_interface =
+ (hw->phy.media_type == e1000_media_type_copper)
+ ? e1000_setup_copper_link_82543 : e1000_setup_fiber_link_82543;
+ /* check for link */
+ mac->ops.check_for_link =
+ (hw->phy.media_type == e1000_media_type_copper)
+ ? e1000_check_for_copper_link_82543
+ : e1000_check_for_fiber_link_82543;
+ /* link info */
+ mac->ops.get_link_up_info =
+ (hw->phy.media_type == e1000_media_type_copper)
+ ? e1000_get_speed_and_duplex_copper_generic
+ : e1000_get_speed_and_duplex_fiber_serdes_generic;
+ /* multicast address update */
+ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
+ /* writing VFTA */
+ mac->ops.write_vfta = e1000_write_vfta_82543;
+ /* clearing VFTA */
+ mac->ops.clear_vfta = e1000_clear_vfta_generic;
+ /* turn on/off LED */
+ mac->ops.led_on = e1000_led_on_82543;
+ mac->ops.led_off = e1000_led_off_82543;
+ /* clear hardware counters */
+ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82543;
+
+ /* Set tbi compatibility */
+ if ((hw->mac.type != e1000_82543) ||
+ (hw->phy.media_type == e1000_media_type_fiber))
+ e1000_set_tbi_compatibility_82543(hw, false);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_function_pointers_82543 - Init func ptrs.
+ * @hw: pointer to the HW structure
+ *
+ * Called to initialize all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_82543(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_init_function_pointers_82543");
+
+ hw->mac.ops.init_params = e1000_init_mac_params_82543;
+ hw->nvm.ops.init_params = e1000_init_nvm_params_82543;
+ hw->phy.ops.init_params = e1000_init_phy_params_82543;
+}
+
+/**
+ * e1000_tbi_compatibility_enabled_82543 - Returns TBI compat status
+ * @hw: pointer to the HW structure
+ *
+ * Returns the current status of 10-bit Interface (TBI) compatibility
+ * (enabled/disabled).
+ **/
+STATIC bool e1000_tbi_compatibility_enabled_82543(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_82543 *dev_spec = &hw->dev_spec._82543;
+ bool state = false;
+
+ DEBUGFUNC("e1000_tbi_compatibility_enabled_82543");
+
+ if (hw->mac.type != e1000_82543) {
+ DEBUGOUT("TBI compatibility workaround for 82543 only.\n");
+ goto out;
+ }
+
+ state = !!(dev_spec->tbi_compatibility & TBI_COMPAT_ENABLED);
+
+out:
+ return state;
+}
+
+/**
+ * e1000_set_tbi_compatibility_82543 - Set TBI compatibility
+ * @hw: pointer to the HW structure
+ * @state: enable/disable TBI compatibility
+ *
+ * Enables or disabled 10-bit Interface (TBI) compatibility.
+ **/
+void e1000_set_tbi_compatibility_82543(struct e1000_hw *hw, bool state)
+{
+ struct e1000_dev_spec_82543 *dev_spec = &hw->dev_spec._82543;
+
+ DEBUGFUNC("e1000_set_tbi_compatibility_82543");
+
+ if (hw->mac.type != e1000_82543) {
+ DEBUGOUT("TBI compatibility workaround for 82543 only.\n");
+ goto out;
+ }
+
+ if (state)
+ dev_spec->tbi_compatibility |= TBI_COMPAT_ENABLED;
+ else
+ dev_spec->tbi_compatibility &= ~TBI_COMPAT_ENABLED;
+
+out:
+ return;
+}
+
+/**
+ * e1000_tbi_sbp_enabled_82543 - Returns TBI SBP status
+ * @hw: pointer to the HW structure
+ *
+ * Returns the current status of 10-bit Interface (TBI) store bad packet (SBP)
+ * (enabled/disabled).
+ **/
+bool e1000_tbi_sbp_enabled_82543(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_82543 *dev_spec = &hw->dev_spec._82543;
+ bool state = false;
+
+ DEBUGFUNC("e1000_tbi_sbp_enabled_82543");
+
+ if (hw->mac.type != e1000_82543) {
+ DEBUGOUT("TBI compatibility workaround for 82543 only.\n");
+ goto out;
+ }
+
+ state = !!(dev_spec->tbi_compatibility & TBI_SBP_ENABLED);
+
+out:
+ return state;
+}
+
+/**
+ * e1000_set_tbi_sbp_82543 - Set TBI SBP
+ * @hw: pointer to the HW structure
+ * @state: enable/disable TBI store bad packet
+ *
+ * Enables or disabled 10-bit Interface (TBI) store bad packet (SBP).
+ **/
+STATIC void e1000_set_tbi_sbp_82543(struct e1000_hw *hw, bool state)
+{
+ struct e1000_dev_spec_82543 *dev_spec = &hw->dev_spec._82543;
+
+ DEBUGFUNC("e1000_set_tbi_sbp_82543");
+
+ if (state && e1000_tbi_compatibility_enabled_82543(hw))
+ dev_spec->tbi_compatibility |= TBI_SBP_ENABLED;
+ else
+ dev_spec->tbi_compatibility &= ~TBI_SBP_ENABLED;
+
+ return;
+}
+
+/**
+ * e1000_init_phy_disabled_82543 - Returns init PHY status
+ * @hw: pointer to the HW structure
+ *
+ * Returns the current status of whether PHY initialization is disabled.
+ * True if PHY initialization is disabled else false.
+ **/
+STATIC bool e1000_init_phy_disabled_82543(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_82543 *dev_spec = &hw->dev_spec._82543;
+ bool ret_val;
+
+ DEBUGFUNC("e1000_init_phy_disabled_82543");
+
+ if (hw->mac.type != e1000_82543) {
+ ret_val = false;
+ goto out;
+ }
+
+ ret_val = dev_spec->init_phy_disabled;
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_tbi_adjust_stats_82543 - Adjust stats when TBI enabled
+ * @hw: pointer to the HW structure
+ * @stats: Struct containing statistic register values
+ * @frame_len: The length of the frame in question
+ * @mac_addr: The Ethernet destination address of the frame in question
+ * @max_frame_size: The maximum frame size
+ *
+ * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
+ **/
+void e1000_tbi_adjust_stats_82543(struct e1000_hw *hw,
+ struct e1000_hw_stats *stats, u32 frame_len,
+ u8 *mac_addr, u32 max_frame_size)
+{
+ if (!(e1000_tbi_sbp_enabled_82543(hw)))
+ goto out;
+
+ /* First adjust the frame length. */
+ frame_len--;
+ /*
+ * We need to adjust the statistics counters, since the hardware
+ * counters overcount this packet as a CRC error and undercount
+ * the packet as a good packet
+ */
+ /* This packet should not be counted as a CRC error. */
+ stats->crcerrs--;
+ /* This packet does count as a Good Packet Received. */
+ stats->gprc++;
+
+ /* Adjust the Good Octets received counters */
+ stats->gorc += frame_len;
+
+ /*
+ * Is this a broadcast or multicast? Check broadcast first,
+ * since the test for a multicast frame will test positive on
+ * a broadcast frame.
+ */
+ if ((mac_addr[0] == 0xff) && (mac_addr[1] == 0xff))
+ /* Broadcast packet */
+ stats->bprc++;
+ else if (*mac_addr & 0x01)
+ /* Multicast packet */
+ stats->mprc++;
+
+ /*
+ * In this case, the hardware has over counted the number of
+ * oversize frames.
+ */
+ if ((frame_len == max_frame_size) && (stats->roc > 0))
+ stats->roc--;
+
+ /*
+ * Adjust the bin counters when the extra byte put the frame in the
+ * wrong bin. Remember that the frame_len was adjusted above.
+ */
+ if (frame_len == 64) {
+ stats->prc64++;
+ stats->prc127--;
+ } else if (frame_len == 127) {
+ stats->prc127++;
+ stats->prc255--;
+ } else if (frame_len == 255) {
+ stats->prc255++;
+ stats->prc511--;
+ } else if (frame_len == 511) {
+ stats->prc511++;
+ stats->prc1023--;
+ } else if (frame_len == 1023) {
+ stats->prc1023++;
+ stats->prc1522--;
+ } else if (frame_len == 1522) {
+ stats->prc1522++;
+ }
+
+out:
+ return;
+}
+
+/**
+ * e1000_read_phy_reg_82543 - Read PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY at offset and stores the information read to data.
+ **/
+STATIC s32 e1000_read_phy_reg_82543(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ u32 mdic;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_read_phy_reg_82543");
+
+ if (offset > MAX_PHY_REG_ADDRESS) {
+ DEBUGOUT1("PHY Address %d is out of range\n", offset);
+ ret_val = -E1000_ERR_PARAM;
+ goto out;
+ }
+
+ /*
+ * We must first send a preamble through the MDIO pin to signal the
+ * beginning of an MII instruction. This is done by sending 32
+ * consecutive "1" bits.
+ */
+ e1000_shift_out_mdi_bits_82543(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
+
+ /*
+ * Now combine the next few fields that are required for a read
+ * operation. We use this method instead of calling the
+ * e1000_shift_out_mdi_bits routine five different times. The format
+ * of an MII read instruction consists of a shift out of 14 bits and
+ * is defined as follows:
+ * <Preamble><SOF><Op Code><Phy Addr><Offset>
+ * followed by a shift in of 18 bits. This first two bits shifted in
+ * are TurnAround bits used to avoid contention on the MDIO pin when a
+ * READ operation is performed. These two bits are thrown away
+ * followed by a shift in of 16 bits which contains the desired data.
+ */
+ mdic = (offset | (hw->phy.addr << 5) |
+ (PHY_OP_READ << 10) | (PHY_SOF << 12));
+
+ e1000_shift_out_mdi_bits_82543(hw, mdic, 14);
+
+ /*
+ * Now that we've shifted out the read command to the MII, we need to
+ * "shift in" the 16-bit value (18 total bits) of the requested PHY
+ * register address.
+ */
+ *data = e1000_shift_in_mdi_bits_82543(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_write_phy_reg_82543 - Write PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be written
+ * @data: pointer to the data to be written at offset
+ *
+ * Writes data to the PHY at offset.
+ **/
+STATIC s32 e1000_write_phy_reg_82543(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ u32 mdic;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_write_phy_reg_82543");
+
+ if (offset > MAX_PHY_REG_ADDRESS) {
+ DEBUGOUT1("PHY Address %d is out of range\n", offset);
+ ret_val = -E1000_ERR_PARAM;
+ goto out;
+ }
+
+ /*
+ * We'll need to use the SW defined pins to shift the write command
+ * out to the PHY. We first send a preamble to the PHY to signal the
+ * beginning of the MII instruction. This is done by sending 32
+ * consecutive "1" bits.
+ */
+ e1000_shift_out_mdi_bits_82543(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
+
+ /*
+ * Now combine the remaining required fields that will indicate a
+ * write operation. We use this method instead of calling the
+ * e1000_shift_out_mdi_bits routine for each field in the command. The
+ * format of a MII write instruction is as follows:
+ * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>.
+ */
+ mdic = ((PHY_TURNAROUND) | (offset << 2) | (hw->phy.addr << 7) |
+ (PHY_OP_WRITE << 12) | (PHY_SOF << 14));
+ mdic <<= 16;
+ mdic |= (u32)data;
+
+ e1000_shift_out_mdi_bits_82543(hw, mdic, 32);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_raise_mdi_clk_82543 - Raise Management Data Input clock
+ * @hw: pointer to the HW structure
+ * @ctrl: pointer to the control register
+ *
+ * Raise the management data input clock by setting the MDC bit in the control
+ * register.
+ **/
+STATIC void e1000_raise_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl)
+{
+ /*
+ * Raise the clock input to the Management Data Clock (by setting the
+ * MDC bit), and then delay a sufficient amount of time.
+ */
+ E1000_WRITE_REG(hw, E1000_CTRL, (*ctrl | E1000_CTRL_MDC));
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(10);
+}
+
+/**
+ * e1000_lower_mdi_clk_82543 - Lower Management Data Input clock
+ * @hw: pointer to the HW structure
+ * @ctrl: pointer to the control register
+ *
+ * Lower the management data input clock by clearing the MDC bit in the
+ * control register.
+ **/
+STATIC void e1000_lower_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl)
+{
+ /*
+ * Lower the clock input to the Management Data Clock (by clearing the
+ * MDC bit), and then delay a sufficient amount of time.
+ */
+ E1000_WRITE_REG(hw, E1000_CTRL, (*ctrl & ~E1000_CTRL_MDC));
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(10);
+}
+
+/**
+ * e1000_shift_out_mdi_bits_82543 - Shift data bits our to the PHY
+ * @hw: pointer to the HW structure
+ * @data: data to send to the PHY
+ * @count: number of bits to shift out
+ *
+ * We need to shift 'count' bits out to the PHY. So, the value in the
+ * "data" parameter will be shifted out to the PHY one bit at a time.
+ * In order to do this, "data" must be broken down into bits.
+ **/
+STATIC void e1000_shift_out_mdi_bits_82543(struct e1000_hw *hw, u32 data,
+ u16 count)
+{
+ u32 ctrl, mask;
+
+ /*
+ * We need to shift "count" number of bits out to the PHY. So, the
+ * value in the "data" parameter will be shifted out to the PHY one
+ * bit at a time. In order to do this, "data" must be broken down
+ * into bits.
+ */
+ mask = 0x01;
+ mask <<= (count - 1);
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */
+ ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
+
+ while (mask) {
+ /*
+ * A "1" is shifted out to the PHY by setting the MDIO bit to
+ * "1" and then raising and lowering the Management Data Clock.
+ * A "0" is shifted out to the PHY by setting the MDIO bit to
+ * "0" and then raising and lowering the clock.
+ */
+ if (data & mask)
+ ctrl |= E1000_CTRL_MDIO;
+ else
+ ctrl &= ~E1000_CTRL_MDIO;
+
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ E1000_WRITE_FLUSH(hw);
+
+ usec_delay(10);
+
+ e1000_raise_mdi_clk_82543(hw, &ctrl);
+ e1000_lower_mdi_clk_82543(hw, &ctrl);
+
+ mask >>= 1;
+ }
+}
+
+/**
+ * e1000_shift_in_mdi_bits_82543 - Shift data bits in from the PHY
+ * @hw: pointer to the HW structure
+ *
+ * In order to read a register from the PHY, we need to shift 18 bits
+ * in from the PHY. Bits are "shifted in" by raising the clock input to
+ * the PHY (setting the MDC bit), and then reading the value of the data out
+ * MDIO bit.
+ **/
+STATIC u16 e1000_shift_in_mdi_bits_82543(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ u16 data = 0;
+ u8 i;
+
+ /*
+ * In order to read a register from the PHY, we need to shift in a
+ * total of 18 bits from the PHY. The first two bit (turnaround)
+ * times are used to avoid contention on the MDIO pin when a read
+ * operation is performed. These two bits are ignored by us and
+ * thrown away. Bits are "shifted in" by raising the input to the
+ * Management Data Clock (setting the MDC bit) and then reading the
+ * value of the MDIO bit.
+ */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ /*
+ * Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as
+ * input.
+ */
+ ctrl &= ~E1000_CTRL_MDIO_DIR;
+ ctrl &= ~E1000_CTRL_MDIO;
+
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ E1000_WRITE_FLUSH(hw);
+
+ /*
+ * Raise and lower the clock before reading in the data. This accounts
+ * for the turnaround bits. The first clock occurred when we clocked
+ * out the last bit of the Register Address.
+ */
+ e1000_raise_mdi_clk_82543(hw, &ctrl);
+ e1000_lower_mdi_clk_82543(hw, &ctrl);
+
+ for (data = 0, i = 0; i < 16; i++) {
+ data <<= 1;
+ e1000_raise_mdi_clk_82543(hw, &ctrl);
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ /* Check to see if we shifted in a "1". */
+ if (ctrl & E1000_CTRL_MDIO)
+ data |= 1;
+ e1000_lower_mdi_clk_82543(hw, &ctrl);
+ }
+
+ e1000_raise_mdi_clk_82543(hw, &ctrl);
+ e1000_lower_mdi_clk_82543(hw, &ctrl);
+
+ return data;
+}
+
+/**
+ * e1000_phy_force_speed_duplex_82543 - Force speed/duplex for PHY
+ * @hw: pointer to the HW structure
+ *
+ * Calls the function to force speed and duplex for the m88 PHY, and
+ * if the PHY is not auto-negotiating and the speed is forced to 10Mbit,
+ * then call the function for polarity reversal workaround.
+ **/
+STATIC s32 e1000_phy_force_speed_duplex_82543(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_phy_force_speed_duplex_82543");
+
+ ret_val = e1000_phy_force_speed_duplex_m88(hw);
+ if (ret_val)
+ goto out;
+
+ if (!hw->mac.autoneg && (hw->mac.forced_speed_duplex &
+ E1000_ALL_10_SPEED))
+ ret_val = e1000_polarity_reversal_workaround_82543(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_polarity_reversal_workaround_82543 - Workaround polarity reversal
+ * @hw: pointer to the HW structure
+ *
+ * When forcing link to 10 Full or 10 Half, the PHY can reverse the polarity
+ * inadvertently. To workaround the issue, we disable the transmitter on
+ * the PHY until we have established the link partner's link parameters.
+ **/
+STATIC s32 e1000_polarity_reversal_workaround_82543(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 mii_status_reg;
+ u16 i;
+ bool link;
+
+ if (!(hw->phy.ops.write_reg))
+ goto out;
+
+ /* Polarity reversal workaround for forced 10F/10H links. */
+
+ /* Disable the transmitter on the PHY */
+
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
+ if (ret_val)
+ goto out;
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF);
+ if (ret_val)
+ goto out;
+
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
+ if (ret_val)
+ goto out;
+
+ /*
+ * This loop will early-out if the NO link condition has been met.
+ * In other words, DO NOT use e1000_phy_has_link_generic() here.
+ */
+ for (i = PHY_FORCE_TIME; i > 0; i--) {
+ /*
+ * Read the MII Status Register and wait for Link Status bit
+ * to be clear.
+ */
+
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ goto out;
+
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ goto out;
+
+ if (!(mii_status_reg & ~MII_SR_LINK_STATUS))
+ break;
+ msec_delay_irq(100);
+ }
+
+ /* Recommended delay time after link has been lost */
+ msec_delay_irq(1000);
+
+ /* Now we will re-enable the transmitter on the PHY */
+
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
+ if (ret_val)
+ goto out;
+ msec_delay_irq(50);
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0);
+ if (ret_val)
+ goto out;
+ msec_delay_irq(50);
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00);
+ if (ret_val)
+ goto out;
+ msec_delay_irq(50);
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000);
+ if (ret_val)
+ goto out;
+
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
+ if (ret_val)
+ goto out;
+
+ /*
+ * Read the MII Status Register and wait for Link Status bit
+ * to be set.
+ */
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_TIME, 100000, &link);
+ if (ret_val)
+ goto out;
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_phy_hw_reset_82543 - PHY hardware reset
+ * @hw: pointer to the HW structure
+ *
+ * Sets the PHY_RESET_DIR bit in the extended device control register
+ * to put the PHY into a reset and waits for completion. Once the reset
+ * has been accomplished, clear the PHY_RESET_DIR bit to take the PHY out
+ * of reset.
+ **/
+STATIC s32 e1000_phy_hw_reset_82543(struct e1000_hw *hw)
+{
+ u32 ctrl_ext;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_phy_hw_reset_82543");
+
+ /*
+ * Read the Extended Device Control Register, assert the PHY_RESET_DIR
+ * bit to put the PHY into reset...
+ */
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR;
+ ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH(hw);
+
+ msec_delay(10);
+
+ /* ...then take it out of reset. */
+ ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH(hw);
+
+ usec_delay(150);
+
+ if (!(hw->phy.ops.get_cfg_done))
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.get_cfg_done(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_reset_hw_82543 - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+ * This resets the hardware into a known state.
+ **/
+STATIC s32 e1000_reset_hw_82543(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_reset_hw_82543");
+
+ DEBUGOUT("Masking off all interrupts\n");
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+
+ E1000_WRITE_REG(hw, E1000_RCTL, 0);
+ E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+ E1000_WRITE_FLUSH(hw);
+
+ e1000_set_tbi_sbp_82543(hw, false);
+
+ /*
+ * Delay to allow any outstanding PCI transactions to complete before
+ * resetting the device
+ */
+ msec_delay(10);
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ DEBUGOUT("Issuing a global reset to 82543/82544 MAC\n");
+ if (hw->mac.type == e1000_82543) {
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+ } else {
+ /*
+ * The 82544 can't ACK the 64-bit write when issuing the
+ * reset, so use IO-mapping as a workaround.
+ */
+ E1000_WRITE_REG_IO(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+ }
+
+ /*
+ * After MAC reset, force reload of NVM to restore power-on
+ * settings to device.
+ */
+ hw->nvm.ops.reload(hw);
+ msec_delay(2);
+
+ /* Masking off and clearing any pending interrupts */
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+ E1000_READ_REG(hw, E1000_ICR);
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_hw_82543 - Initialize hardware
+ * @hw: pointer to the HW structure
+ *
+ * This inits the hardware readying it for operation.
+ **/
+STATIC s32 e1000_init_hw_82543(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_dev_spec_82543 *dev_spec = &hw->dev_spec._82543;
+ u32 ctrl;
+ s32 ret_val;
+ u16 i;
+
+ DEBUGFUNC("e1000_init_hw_82543");
+
+ /* Disabling VLAN filtering */
+ E1000_WRITE_REG(hw, E1000_VET, 0);
+ mac->ops.clear_vfta(hw);
+
+ /* Setup the receive address. */
+ e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
+
+ /* Zero out the Multicast HASH table */
+ DEBUGOUT("Zeroing the MTA\n");
+ for (i = 0; i < mac->mta_reg_count; i++) {
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+ E1000_WRITE_FLUSH(hw);
+ }
+
+ /*
+ * Set the PCI priority bit correctly in the CTRL register. This
+ * determines if the adapter gives priority to receives, or if it
+ * gives equal priority to transmits and receives.
+ */
+ if (hw->mac.type == e1000_82543 && dev_spec->dma_fairness) {
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PRIOR);
+ }
+
+ e1000_pcix_mmrbc_workaround_generic(hw);
+
+ /* Setup link and flow control */
+ ret_val = mac->ops.setup_link(hw);
+
+ /*
+ * Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ e1000_clear_hw_cntrs_82543(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_setup_link_82543 - Setup flow control and link settings
+ * @hw: pointer to the HW structure
+ *
+ * Read the EEPROM to determine the initial polarity value and write the
+ * extended device control register with the information before calling
+ * the generic setup link function, which does the following:
+ * Determines which flow control settings to use, then configures flow
+ * control. Calls the appropriate media-specific link configuration
+ * function. Assuming the adapter has a valid link partner, a valid link
+ * should be established. Assumes the hardware has previously been reset
+ * and the transmitter and receiver are not enabled.
+ **/
+STATIC s32 e1000_setup_link_82543(struct e1000_hw *hw)
+{
+ u32 ctrl_ext;
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("e1000_setup_link_82543");
+
+ /*
+ * Take the 4 bits from NVM word 0xF that determine the initial
+ * polarity value for the SW controlled pins, and setup the
+ * Extended Device Control reg with that info.
+ * This is needed because one of the SW controlled pins is used for
+ * signal detection. So this should be done before phy setup.
+ */
+ if (hw->mac.type == e1000_82543) {
+ ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+ ctrl_ext = ((data & NVM_WORD0F_SWPDIO_EXT_MASK) <<
+ NVM_SWDPIO_EXT_SHIFT);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ }
+
+ ret_val = e1000_setup_link_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_setup_copper_link_82543 - Configure copper link settings
+ * @hw: pointer to the HW structure
+ *
+ * Configures the link for auto-neg or forced speed and duplex. Then we check
+ * for link, once link is established calls to configure collision distance
+ * and flow control are called.
+ **/
+STATIC s32 e1000_setup_copper_link_82543(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ bool link;
+
+ DEBUGFUNC("e1000_setup_copper_link_82543");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL) | E1000_CTRL_SLU;
+ /*
+ * With 82543, we need to force speed and duplex on the MAC
+ * equal to what the PHY speed and duplex configuration is.
+ * In addition, we need to perform a hardware reset on the
+ * PHY to take it out of reset.
+ */
+ if (hw->mac.type == e1000_82543) {
+ ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ ret_val = hw->phy.ops.reset(hw);
+ if (ret_val)
+ goto out;
+ } else {
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ }
+
+ /* Set MDI/MDI-X, Polarity Reversal, and downshift settings */
+ ret_val = e1000_copper_link_setup_m88(hw);
+ if (ret_val)
+ goto out;
+
+ if (hw->mac.autoneg) {
+ /*
+ * Setup autoneg and flow control advertisement and perform
+ * autonegotiation.
+ */
+ ret_val = e1000_copper_link_autoneg(hw);
+ if (ret_val)
+ goto out;
+ } else {
+ /*
+ * PHY will be set to 10H, 10F, 100H or 100F
+ * depending on user settings.
+ */
+ DEBUGOUT("Forcing Speed and Duplex\n");
+ ret_val = e1000_phy_force_speed_duplex_82543(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Forcing Speed and Duplex\n");
+ goto out;
+ }
+ }
+
+ /*
+ * Check link status. Wait up to 100 microseconds for link to become
+ * valid.
+ */
+ ret_val = e1000_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10,
+ &link);
+ if (ret_val)
+ goto out;
+
+
+ if (link) {
+ DEBUGOUT("Valid link established!!!\n");
+ /* Config the MAC and PHY after link is up */
+ if (hw->mac.type == e1000_82544) {
+ hw->mac.ops.config_collision_dist(hw);
+ } else {
+ ret_val = e1000_config_mac_to_phy_82543(hw);
+ if (ret_val)
+ goto out;
+ }
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ } else {
+ DEBUGOUT("Unable to establish link!!!\n");
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_setup_fiber_link_82543 - Setup link for fiber
+ * @hw: pointer to the HW structure
+ *
+ * Configures collision distance and flow control for fiber links. Upon
+ * successful setup, poll for link.
+ **/
+STATIC s32 e1000_setup_fiber_link_82543(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_setup_fiber_link_82543");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ /* Take the link out of reset */
+ ctrl &= ~E1000_CTRL_LRST;
+
+ hw->mac.ops.config_collision_dist(hw);
+
+ ret_val = e1000_commit_fc_settings_generic(hw);
+ if (ret_val)
+ goto out;
+
+ DEBUGOUT("Auto-negotiation enabled\n");
+
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(1);
+
+ /*
+ * For these adapters, the SW definable pin 1 is cleared when the
+ * optics detect a signal. If we have a signal, then poll for a
+ * "Link-Up" indication.
+ */
+ if (!(E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1))
+ ret_val = e1000_poll_fiber_serdes_link_generic(hw);
+ else
+ DEBUGOUT("No signal detected\n");
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_copper_link_82543 - Check for link (Copper)
+ * @hw: pointer to the HW structure
+ *
+ * Checks the phy for link, if link exists, do the following:
+ * - check for downshift
+ * - do polarity workaround (if necessary)
+ * - configure collision distance
+ * - configure flow control after link up
+ * - configure tbi compatibility
+ **/
+STATIC s32 e1000_check_for_copper_link_82543(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 icr, rctl;
+ s32 ret_val;
+ u16 speed, duplex;
+ bool link;
+
+ DEBUGFUNC("e1000_check_for_copper_link_82543");
+
+ if (!mac->get_link_status) {
+ ret_val = E1000_SUCCESS;
+ goto out;
+ }
+
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ goto out;
+
+ if (!link)
+ goto out; /* No link detected */
+
+ mac->get_link_status = false;
+
+ e1000_check_downshift_generic(hw);
+
+ /*
+ * If we are forcing speed/duplex, then we can return since
+ * we have already determined whether we have link or not.
+ */
+ if (!mac->autoneg) {
+ /*
+ * If speed and duplex are forced to 10H or 10F, then we will
+ * implement the polarity reversal workaround. We disable
+ * interrupts first, and upon returning, place the devices
+ * interrupt state to its previous value except for the link
+ * status change interrupt which will happened due to the
+ * execution of this workaround.
+ */
+ if (mac->forced_speed_duplex & E1000_ALL_10_SPEED) {
+ E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF);
+ ret_val = e1000_polarity_reversal_workaround_82543(hw);
+ icr = E1000_READ_REG(hw, E1000_ICR);
+ E1000_WRITE_REG(hw, E1000_ICS, (icr & ~E1000_ICS_LSC));
+ E1000_WRITE_REG(hw, E1000_IMS, IMS_ENABLE_MASK);
+ }
+
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+ /*
+ * We have a M88E1000 PHY and Auto-Neg is enabled. If we
+ * have Si on board that is 82544 or newer, Auto
+ * Speed Detection takes care of MAC speed/duplex
+ * configuration. So we only need to configure Collision
+ * Distance in the MAC. Otherwise, we need to force
+ * speed/duplex on the MAC to the current PHY speed/duplex
+ * settings.
+ */
+ if (mac->type == e1000_82544)
+ hw->mac.ops.config_collision_dist(hw);
+ else {
+ ret_val = e1000_config_mac_to_phy_82543(hw);
+ if (ret_val) {
+ DEBUGOUT("Error configuring MAC to PHY settings\n");
+ goto out;
+ }
+ }
+
+ /*
+ * Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ if (ret_val)
+ DEBUGOUT("Error configuring flow control\n");
+
+ /*
+ * At this point we know that we are on copper and we have
+ * auto-negotiated link. These are conditions for checking the link
+ * partner capability register. We use the link speed to determine if
+ * TBI compatibility needs to be turned on or off. If the link is not
+ * at gigabit speed, then TBI compatibility is not needed. If we are
+ * at gigabit speed, we turn on TBI compatibility.
+ */
+ if (e1000_tbi_compatibility_enabled_82543(hw)) {
+ ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
+ if (ret_val) {
+ DEBUGOUT("Error getting link speed and duplex\n");
+ return ret_val;
+ }
+ if (speed != SPEED_1000) {
+ /*
+ * If link speed is not set to gigabit speed,
+ * we do not need to enable TBI compatibility.
+ */
+ if (e1000_tbi_sbp_enabled_82543(hw)) {
+ /*
+ * If we previously were in the mode,
+ * turn it off.
+ */
+ e1000_set_tbi_sbp_82543(hw, false);
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl &= ~E1000_RCTL_SBP;
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ }
+ } else {
+ /*
+ * If TBI compatibility is was previously off,
+ * turn it on. For compatibility with a TBI link
+ * partner, we will store bad packets. Some
+ * frames have an additional byte on the end and
+ * will look like CRC errors to to the hardware.
+ */
+ if (!e1000_tbi_sbp_enabled_82543(hw)) {
+ e1000_set_tbi_sbp_82543(hw, true);
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl |= E1000_RCTL_SBP;
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ }
+ }
+ }
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_fiber_link_82543 - Check for link (Fiber)
+ * @hw: pointer to the HW structure
+ *
+ * Checks for link up on the hardware. If link is not up and we have
+ * a signal, then we need to force link up.
+ **/
+STATIC s32 e1000_check_for_fiber_link_82543(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 rxcw, ctrl, status;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_check_for_fiber_link_82543");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ rxcw = E1000_READ_REG(hw, E1000_RXCW);
+
+ /*
+ * If we don't have link (auto-negotiation failed or link partner
+ * cannot auto-negotiate), the cable is plugged in (we have signal),
+ * and our link partner is not trying to auto-negotiate with us (we
+ * are receiving idles or data), we need to force link up. We also
+ * need to give auto-negotiation time to complete, in case the cable
+ * was just plugged in. The autoneg_failed flag does this.
+ */
+ /* (ctrl & E1000_CTRL_SWDPIN1) == 0 == have signal */
+ if ((!(ctrl & E1000_CTRL_SWDPIN1)) &&
+ (!(status & E1000_STATUS_LU)) &&
+ (!(rxcw & E1000_RXCW_C))) {
+ if (!mac->autoneg_failed) {
+ mac->autoneg_failed = true;
+ ret_val = 0;
+ goto out;
+ }
+ DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
+
+ /* Disable auto-negotiation in the TXCW register */
+ E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+
+ /* Force link-up and also force full-duplex. */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ /* Configure Flow Control after forcing link up. */
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ if (ret_val) {
+ DEBUGOUT("Error configuring flow control\n");
+ goto out;
+ }
+ } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+ /*
+ * If we are forcing link and we are receiving /C/ ordered
+ * sets, re-enable auto-negotiation in the TXCW register
+ * and disable forced link in the Device Control register
+ * in an attempt to auto-negotiate with our link partner.
+ */
+ DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
+ E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
+ E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+ mac->serdes_has_link = true;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_config_mac_to_phy_82543 - Configure MAC to PHY settings
+ * @hw: pointer to the HW structure
+ *
+ * For the 82543 silicon, we need to set the MAC to match the settings
+ * of the PHY, even if the PHY is auto-negotiating.
+ **/
+STATIC s32 e1000_config_mac_to_phy_82543(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val = E1000_SUCCESS;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_config_mac_to_phy_82543");
+
+ if (!(hw->phy.ops.read_reg))
+ goto out;
+
+ /* Set the bits to force speed and duplex */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS);
+
+ /*
+ * Set up duplex in the Device Control and Transmit Control
+ * registers depending on negotiated values.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ if (ret_val)
+ goto out;
+
+ ctrl &= ~E1000_CTRL_FD;
+ if (phy_data & M88E1000_PSSR_DPLX)
+ ctrl |= E1000_CTRL_FD;
+
+ hw->mac.ops.config_collision_dist(hw);
+
+ /*
+ * Set up speed in the Device Control register depending on
+ * negotiated values.
+ */
+ if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
+ ctrl |= E1000_CTRL_SPD_1000;
+ else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS)
+ ctrl |= E1000_CTRL_SPD_100;
+
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_write_vfta_82543 - Write value to VLAN filter table
+ * @hw: pointer to the HW structure
+ * @offset: the 32-bit offset in which to write the value to.
+ * @value: the 32-bit value to write at location offset.
+ *
+ * This writes a 32-bit value to a 32-bit offset in the VLAN filter
+ * table.
+ **/
+STATIC void e1000_write_vfta_82543(struct e1000_hw *hw, u32 offset, u32 value)
+{
+ u32 temp;
+
+ DEBUGFUNC("e1000_write_vfta_82543");
+
+ if ((hw->mac.type == e1000_82544) && (offset & 1)) {
+ temp = E1000_READ_REG_ARRAY(hw, E1000_VFTA, offset - 1);
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
+ E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset - 1, temp);
+ E1000_WRITE_FLUSH(hw);
+ } else {
+ e1000_write_vfta_generic(hw, offset, value);
+ }
+}
+
+/**
+ * e1000_led_on_82543 - Turn on SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * Turns the SW defined LED on.
+ **/
+STATIC s32 e1000_led_on_82543(struct e1000_hw *hw)
+{
+ u32 ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ DEBUGFUNC("e1000_led_on_82543");
+
+ if (hw->mac.type == e1000_82544 &&
+ hw->phy.media_type == e1000_media_type_copper) {
+ /* Clear SW-definable Pin 0 to turn on the LED */
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ } else {
+ /* Fiber 82544 and all 82543 use this method */
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ }
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_led_off_82543 - Turn off SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * Turns the SW defined LED off.
+ **/
+STATIC s32 e1000_led_off_82543(struct e1000_hw *hw)
+{
+ u32 ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ DEBUGFUNC("e1000_led_off_82543");
+
+ if (hw->mac.type == e1000_82544 &&
+ hw->phy.media_type == e1000_media_type_copper) {
+ /* Set SW-definable Pin 0 to turn off the LED */
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ } else {
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ }
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_clear_hw_cntrs_82543 - Clear device specific hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the hardware counters by reading the counter registers.
+ **/
+STATIC void e1000_clear_hw_cntrs_82543(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_clear_hw_cntrs_82543");
+
+ e1000_clear_hw_cntrs_base_generic(hw);
+
+ E1000_READ_REG(hw, E1000_PRC64);
+ E1000_READ_REG(hw, E1000_PRC127);
+ E1000_READ_REG(hw, E1000_PRC255);
+ E1000_READ_REG(hw, E1000_PRC511);
+ E1000_READ_REG(hw, E1000_PRC1023);
+ E1000_READ_REG(hw, E1000_PRC1522);
+ E1000_READ_REG(hw, E1000_PTC64);
+ E1000_READ_REG(hw, E1000_PTC127);
+ E1000_READ_REG(hw, E1000_PTC255);
+ E1000_READ_REG(hw, E1000_PTC511);
+ E1000_READ_REG(hw, E1000_PTC1023);
+ E1000_READ_REG(hw, E1000_PTC1522);
+
+ E1000_READ_REG(hw, E1000_ALGNERRC);
+ E1000_READ_REG(hw, E1000_RXERRC);
+ E1000_READ_REG(hw, E1000_TNCRS);
+ E1000_READ_REG(hw, E1000_CEXTERR);
+ E1000_READ_REG(hw, E1000_TSCTC);
+ E1000_READ_REG(hw, E1000_TSCTFC);
+}
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82543.h b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82543.h
new file mode 100755
index 00000000..51056dbc
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82543.h
@@ -0,0 +1,56 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_82543_H_
+#define _E1000_82543_H_
+
+#define PHY_PREAMBLE 0xFFFFFFFF
+#define PHY_PREAMBLE_SIZE 32
+#define PHY_SOF 0x1
+#define PHY_OP_READ 0x2
+#define PHY_OP_WRITE 0x1
+#define PHY_TURNAROUND 0x2
+
+#define TBI_COMPAT_ENABLED 0x1 /* Global "knob" for the workaround */
+/* If TBI_COMPAT_ENABLED, then this is the current state (on/off) */
+#define TBI_SBP_ENABLED 0x2
+
+void e1000_tbi_adjust_stats_82543(struct e1000_hw *hw,
+ struct e1000_hw_stats *stats,
+ u32 frame_len, u8 *mac_addr,
+ u32 max_frame_size);
+void e1000_set_tbi_compatibility_82543(struct e1000_hw *hw,
+ bool state);
+bool e1000_tbi_sbp_enabled_82543(struct e1000_hw *hw);
+
+#endif
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82571.c b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82571.c
new file mode 100755
index 00000000..8ae1cb12
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82571.c
@@ -0,0 +1,2026 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+/* 82571EB Gigabit Ethernet Controller
+ * 82571EB Gigabit Ethernet Controller (Copper)
+ * 82571EB Gigabit Ethernet Controller (Fiber)
+ * 82571EB Dual Port Gigabit Mezzanine Adapter
+ * 82571EB Quad Port Gigabit Mezzanine Adapter
+ * 82571PT Gigabit PT Quad Port Server ExpressModule
+ * 82572EI Gigabit Ethernet Controller (Copper)
+ * 82572EI Gigabit Ethernet Controller (Fiber)
+ * 82572EI Gigabit Ethernet Controller
+ * 82573V Gigabit Ethernet Controller (Copper)
+ * 82573E Gigabit Ethernet Controller (Copper)
+ * 82573L Gigabit Ethernet Controller
+ * 82574L Gigabit Network Connection
+ * 82583V Gigabit Network Connection
+ */
+
+#include "e1000_api.h"
+
+STATIC s32 e1000_acquire_nvm_82571(struct e1000_hw *hw);
+STATIC void e1000_release_nvm_82571(struct e1000_hw *hw);
+STATIC s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+STATIC s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw);
+STATIC s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw);
+STATIC s32 e1000_get_cfg_done_82571(struct e1000_hw *hw);
+STATIC s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw,
+ bool active);
+STATIC s32 e1000_reset_hw_82571(struct e1000_hw *hw);
+STATIC s32 e1000_init_hw_82571(struct e1000_hw *hw);
+STATIC void e1000_clear_vfta_82571(struct e1000_hw *hw);
+STATIC bool e1000_check_mng_mode_82574(struct e1000_hw *hw);
+STATIC s32 e1000_led_on_82574(struct e1000_hw *hw);
+STATIC s32 e1000_setup_link_82571(struct e1000_hw *hw);
+STATIC s32 e1000_setup_copper_link_82571(struct e1000_hw *hw);
+STATIC s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw);
+STATIC s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw);
+STATIC s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data);
+STATIC void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw);
+STATIC s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw);
+STATIC s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw);
+STATIC s32 e1000_get_phy_id_82571(struct e1000_hw *hw);
+STATIC void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
+STATIC void e1000_put_hw_semaphore_82573(struct e1000_hw *hw);
+STATIC s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw);
+STATIC void e1000_put_hw_semaphore_82574(struct e1000_hw *hw);
+STATIC s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw,
+ bool active);
+STATIC s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw,
+ bool active);
+STATIC void e1000_initialize_hw_bits_82571(struct e1000_hw *hw);
+STATIC s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+STATIC s32 e1000_read_mac_addr_82571(struct e1000_hw *hw);
+STATIC void e1000_power_down_phy_copper_82571(struct e1000_hw *hw);
+
+/**
+ * e1000_init_phy_params_82571 - Init PHY func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_init_phy_params_82571");
+
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ phy->type = e1000_phy_none;
+ return E1000_SUCCESS;
+ }
+
+ phy->addr = 1;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->reset_delay_us = 100;
+
+ phy->ops.check_reset_block = e1000_check_reset_block_generic;
+ phy->ops.reset = e1000_phy_hw_reset_generic;
+ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82571;
+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic;
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper_82571;
+
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ phy->type = e1000_phy_igp_2;
+ phy->ops.get_cfg_done = e1000_get_cfg_done_82571;
+ phy->ops.get_info = e1000_get_phy_info_igp;
+ phy->ops.check_polarity = e1000_check_polarity_igp;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
+ phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
+ phy->ops.read_reg = e1000_read_phy_reg_igp;
+ phy->ops.write_reg = e1000_write_phy_reg_igp;
+ phy->ops.acquire = e1000_get_hw_semaphore_82571;
+ phy->ops.release = e1000_put_hw_semaphore_82571;
+ break;
+ case e1000_82573:
+ phy->type = e1000_phy_m88;
+ phy->ops.get_cfg_done = e1000_get_cfg_done_generic;
+ phy->ops.get_info = e1000_get_phy_info_m88;
+ phy->ops.check_polarity = e1000_check_polarity_m88;
+ phy->ops.commit = e1000_phy_sw_reset_generic;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
+ phy->ops.get_cable_length = e1000_get_cable_length_m88;
+ phy->ops.read_reg = e1000_read_phy_reg_m88;
+ phy->ops.write_reg = e1000_write_phy_reg_m88;
+ phy->ops.acquire = e1000_get_hw_semaphore_82571;
+ phy->ops.release = e1000_put_hw_semaphore_82571;
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ E1000_MUTEX_INIT(&hw->dev_spec._82571.swflag_mutex);
+
+ phy->type = e1000_phy_bm;
+ phy->ops.get_cfg_done = e1000_get_cfg_done_generic;
+ phy->ops.get_info = e1000_get_phy_info_m88;
+ phy->ops.check_polarity = e1000_check_polarity_m88;
+ phy->ops.commit = e1000_phy_sw_reset_generic;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
+ phy->ops.get_cable_length = e1000_get_cable_length_m88;
+ phy->ops.read_reg = e1000_read_phy_reg_bm2;
+ phy->ops.write_reg = e1000_write_phy_reg_bm2;
+ phy->ops.acquire = e1000_get_hw_semaphore_82574;
+ phy->ops.release = e1000_put_hw_semaphore_82574;
+ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574;
+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574;
+ break;
+ default:
+ return -E1000_ERR_PHY;
+ break;
+ }
+
+ /* This can only be done after all function pointers are setup. */
+ ret_val = e1000_get_phy_id_82571(hw);
+ if (ret_val) {
+ DEBUGOUT("Error getting PHY ID\n");
+ return ret_val;
+ }
+
+ /* Verify phy id */
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ if (phy->id != IGP01E1000_I_PHY_ID)
+ ret_val = -E1000_ERR_PHY;
+ break;
+ case e1000_82573:
+ if (phy->id != M88E1111_I_PHY_ID)
+ ret_val = -E1000_ERR_PHY;
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ if (phy->id != BME1000_E_PHY_ID_R2)
+ ret_val = -E1000_ERR_PHY;
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ break;
+ }
+
+ if (ret_val)
+ DEBUGOUT1("PHY ID unknown: type = 0x%08x\n", phy->id);
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params_82571 - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+ u16 size;
+
+ DEBUGFUNC("e1000_init_nvm_params_82571");
+
+ nvm->opcode_bits = 8;
+ nvm->delay_usec = 1;
+ switch (nvm->override) {
+ case e1000_nvm_override_spi_large:
+ nvm->page_size = 32;
+ nvm->address_bits = 16;
+ break;
+ case e1000_nvm_override_spi_small:
+ nvm->page_size = 8;
+ nvm->address_bits = 8;
+ break;
+ default:
+ nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+ nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
+ break;
+ }
+
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ if (((eecd >> 15) & 0x3) == 0x3) {
+ nvm->type = e1000_nvm_flash_hw;
+ nvm->word_size = 2048;
+ /* Autonomous Flash update bit must be cleared due
+ * to Flash update issue.
+ */
+ eecd &= ~E1000_EECD_AUPDEN;
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ break;
+ }
+ /* Fall Through */
+ default:
+ nvm->type = e1000_nvm_eeprom_spi;
+ size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+ E1000_EECD_SIZE_EX_SHIFT);
+ /* Added to a constant, "size" becomes the left-shift value
+ * for setting word_size.
+ */
+ size += NVM_WORD_SIZE_BASE_SHIFT;
+
+ /* EEPROM access above 16k is unsupported */
+ if (size > 14)
+ size = 14;
+ nvm->word_size = 1 << size;
+ break;
+ }
+
+ /* Function Pointers */
+ switch (hw->mac.type) {
+ case e1000_82574:
+ case e1000_82583:
+ nvm->ops.acquire = e1000_get_hw_semaphore_82574;
+ nvm->ops.release = e1000_put_hw_semaphore_82574;
+ break;
+ default:
+ nvm->ops.acquire = e1000_acquire_nvm_82571;
+ nvm->ops.release = e1000_release_nvm_82571;
+ break;
+ }
+ nvm->ops.read = e1000_read_nvm_eerd;
+ nvm->ops.update = e1000_update_nvm_checksum_82571;
+ nvm->ops.validate = e1000_validate_nvm_checksum_82571;
+ nvm->ops.valid_led_default = e1000_valid_led_default_82571;
+ nvm->ops.write = e1000_write_nvm_82571;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_mac_params_82571 - Init MAC func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 swsm = 0;
+ u32 swsm2 = 0;
+ bool force_clear_smbi = false;
+
+ DEBUGFUNC("e1000_init_mac_params_82571");
+
+ /* Set media type and media-dependent function pointers */
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82571EB_FIBER:
+ case E1000_DEV_ID_82572EI_FIBER:
+ case E1000_DEV_ID_82571EB_QUAD_FIBER:
+ hw->phy.media_type = e1000_media_type_fiber;
+ mac->ops.setup_physical_interface =
+ e1000_setup_fiber_serdes_link_82571;
+ mac->ops.check_for_link = e1000_check_for_fiber_link_generic;
+ mac->ops.get_link_up_info =
+ e1000_get_speed_and_duplex_fiber_serdes_generic;
+ break;
+ case E1000_DEV_ID_82571EB_SERDES:
+ case E1000_DEV_ID_82571EB_SERDES_DUAL:
+ case E1000_DEV_ID_82571EB_SERDES_QUAD:
+ case E1000_DEV_ID_82572EI_SERDES:
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+ mac->ops.setup_physical_interface =
+ e1000_setup_fiber_serdes_link_82571;
+ mac->ops.check_for_link = e1000_check_for_serdes_link_82571;
+ mac->ops.get_link_up_info =
+ e1000_get_speed_and_duplex_fiber_serdes_generic;
+ break;
+ default:
+ hw->phy.media_type = e1000_media_type_copper;
+ mac->ops.setup_physical_interface =
+ e1000_setup_copper_link_82571;
+ mac->ops.check_for_link = e1000_check_for_copper_link_generic;
+ mac->ops.get_link_up_info =
+ e1000_get_speed_and_duplex_copper_generic;
+ break;
+ }
+
+ /* Set mta register count */
+ mac->mta_reg_count = 128;
+ /* Set rar entry count */
+ mac->rar_entry_count = E1000_RAR_ENTRIES;
+ /* Set if part includes ASF firmware */
+ mac->asf_firmware_present = true;
+ /* Adaptive IFS supported */
+ mac->adaptive_ifs = true;
+
+ /* Function pointers */
+
+ /* bus type/speed/width */
+ mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic;
+ /* reset */
+ mac->ops.reset_hw = e1000_reset_hw_82571;
+ /* hw initialization */
+ mac->ops.init_hw = e1000_init_hw_82571;
+ /* link setup */
+ mac->ops.setup_link = e1000_setup_link_82571;
+ /* multicast address update */
+ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
+ /* writing VFTA */
+ mac->ops.write_vfta = e1000_write_vfta_generic;
+ /* clearing VFTA */
+ mac->ops.clear_vfta = e1000_clear_vfta_82571;
+ /* read mac address */
+ mac->ops.read_mac_addr = e1000_read_mac_addr_82571;
+ /* ID LED init */
+ mac->ops.id_led_init = e1000_id_led_init_generic;
+ /* setup LED */
+ mac->ops.setup_led = e1000_setup_led_generic;
+ /* cleanup LED */
+ mac->ops.cleanup_led = e1000_cleanup_led_generic;
+ /* turn off LED */
+ mac->ops.led_off = e1000_led_off_generic;
+ /* clear hardware counters */
+ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82571;
+
+ /* MAC-specific function pointers */
+ switch (hw->mac.type) {
+ case e1000_82573:
+ mac->ops.set_lan_id = e1000_set_lan_id_single_port;
+ mac->ops.check_mng_mode = e1000_check_mng_mode_generic;
+ mac->ops.led_on = e1000_led_on_generic;
+ mac->ops.blink_led = e1000_blink_led_generic;
+
+ /* FWSM register */
+ mac->has_fwsm = true;
+ /* ARC supported; valid only if manageability features are
+ * enabled.
+ */
+ mac->arc_subsystem_valid = !!(E1000_READ_REG(hw, E1000_FWSM) &
+ E1000_FWSM_MODE_MASK);
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ mac->ops.set_lan_id = e1000_set_lan_id_single_port;
+ mac->ops.check_mng_mode = e1000_check_mng_mode_82574;
+ mac->ops.led_on = e1000_led_on_82574;
+ break;
+ default:
+ mac->ops.check_mng_mode = e1000_check_mng_mode_generic;
+ mac->ops.led_on = e1000_led_on_generic;
+ mac->ops.blink_led = e1000_blink_led_generic;
+
+ /* FWSM register */
+ mac->has_fwsm = true;
+ break;
+ }
+
+ /* Ensure that the inter-port SWSM.SMBI lock bit is clear before
+ * first NVM or PHY access. This should be done for single-port
+ * devices, and for one port only on dual-port devices so that
+ * for those devices we can still use the SMBI lock to synchronize
+ * inter-port accesses to the PHY & NVM.
+ */
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ swsm2 = E1000_READ_REG(hw, E1000_SWSM2);
+
+ if (!(swsm2 & E1000_SWSM2_LOCK)) {
+ /* Only do this for the first interface on this card */
+ E1000_WRITE_REG(hw, E1000_SWSM2, swsm2 |
+ E1000_SWSM2_LOCK);
+ force_clear_smbi = true;
+ } else {
+ force_clear_smbi = false;
+ }
+ break;
+ default:
+ force_clear_smbi = true;
+ break;
+ }
+
+ if (force_clear_smbi) {
+ /* Make sure SWSM.SMBI is clear */
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ if (swsm & E1000_SWSM_SMBI) {
+ /* This bit should not be set on a first interface, and
+ * indicates that the bootagent or EFI code has
+ * improperly left this bit enabled
+ */
+ DEBUGOUT("Please update your 82571 Bootagent\n");
+ }
+ E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_SMBI);
+ }
+
+ /* Initialze device specific counter of SMBI acquisition timeouts. */
+ hw->dev_spec._82571.smb_counter = 0;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_function_pointers_82571 - Init func ptrs.
+ * @hw: pointer to the HW structure
+ *
+ * Called to initialize all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_82571(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_init_function_pointers_82571");
+
+ hw->mac.ops.init_params = e1000_init_mac_params_82571;
+ hw->nvm.ops.init_params = e1000_init_nvm_params_82571;
+ hw->phy.ops.init_params = e1000_init_phy_params_82571;
+}
+
+/**
+ * e1000_get_phy_id_82571 - Retrieve the PHY ID and revision
+ * @hw: pointer to the HW structure
+ *
+ * Reads the PHY registers and stores the PHY ID and possibly the PHY
+ * revision in the hardware structure.
+ **/
+STATIC s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_id = 0;
+
+ DEBUGFUNC("e1000_get_phy_id_82571");
+
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ /* The 82571 firmware may still be configuring the PHY.
+ * In this case, we cannot access the PHY until the
+ * configuration is done. So we explicitly set the
+ * PHY ID.
+ */
+ phy->id = IGP01E1000_I_PHY_ID;
+ break;
+ case e1000_82573:
+ return e1000_get_phy_id(hw);
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
+ if (ret_val)
+ return ret_val;
+
+ phy->id = (u32)(phy_id << 16);
+ usec_delay(20);
+ ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
+ if (ret_val)
+ return ret_val;
+
+ phy->id |= (u32)(phy_id);
+ phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
+ break;
+ default:
+ return -E1000_ERR_PHY;
+ break;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_hw_semaphore_82571 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM
+ **/
+STATIC s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
+{
+ u32 swsm;
+ s32 sw_timeout = hw->nvm.word_size + 1;
+ s32 fw_timeout = hw->nvm.word_size + 1;
+ s32 i = 0;
+
+ DEBUGFUNC("e1000_get_hw_semaphore_82571");
+
+ /* If we have timedout 3 times on trying to acquire
+ * the inter-port SMBI semaphore, there is old code
+ * operating on the other port, and it is not
+ * releasing SMBI. Modify the number of times that
+ * we try for the semaphore to interwork with this
+ * older code.
+ */
+ if (hw->dev_spec._82571.smb_counter > 2)
+ sw_timeout = 1;
+
+ /* Get the SW semaphore */
+ while (i < sw_timeout) {
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ if (!(swsm & E1000_SWSM_SMBI))
+ break;
+
+ usec_delay(50);
+ i++;
+ }
+
+ if (i == sw_timeout) {
+ DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
+ hw->dev_spec._82571.smb_counter++;
+ }
+ /* Get the FW semaphore. */
+ for (i = 0; i < fw_timeout; i++) {
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
+
+ /* Semaphore acquired if bit latched */
+ if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
+ break;
+
+ usec_delay(50);
+ }
+
+ if (i == fw_timeout) {
+ /* Release semaphores */
+ e1000_put_hw_semaphore_82571(hw);
+ DEBUGOUT("Driver can't access the NVM\n");
+ return -E1000_ERR_NVM;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_put_hw_semaphore_82571 - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used to access the PHY or NVM
+ **/
+STATIC void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
+{
+ u32 swsm;
+
+ DEBUGFUNC("e1000_put_hw_semaphore_generic");
+
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+
+ swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
+
+ E1000_WRITE_REG(hw, E1000_SWSM, swsm);
+}
+
+/**
+ * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore during reset.
+ *
+ **/
+STATIC s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw)
+{
+ u32 extcnf_ctrl;
+ s32 i = 0;
+
+ DEBUGFUNC("e1000_get_hw_semaphore_82573");
+
+ extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+ do {
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+ E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
+ extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+
+ if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
+ break;
+
+ msec_delay(2);
+ i++;
+ } while (i < MDIO_OWNERSHIP_TIMEOUT);
+
+ if (i == MDIO_OWNERSHIP_TIMEOUT) {
+ /* Release semaphores */
+ e1000_put_hw_semaphore_82573(hw);
+ DEBUGOUT("Driver can't access the PHY\n");
+ return -E1000_ERR_PHY;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_put_hw_semaphore_82573 - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used during reset.
+ *
+ **/
+STATIC void e1000_put_hw_semaphore_82573(struct e1000_hw *hw)
+{
+ u32 extcnf_ctrl;
+
+ DEBUGFUNC("e1000_put_hw_semaphore_82573");
+
+ extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+ extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+ E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
+}
+
+/**
+ * e1000_get_hw_semaphore_82574 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM.
+ *
+ **/
+STATIC s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_get_hw_semaphore_82574");
+
+ E1000_MUTEX_LOCK(&hw->dev_spec._82571.swflag_mutex);
+ ret_val = e1000_get_hw_semaphore_82573(hw);
+ if (ret_val)
+ E1000_MUTEX_UNLOCK(&hw->dev_spec._82571.swflag_mutex);
+ return ret_val;
+}
+
+/**
+ * e1000_put_hw_semaphore_82574 - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used to access the PHY or NVM
+ *
+ **/
+STATIC void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_put_hw_semaphore_82574");
+
+ e1000_put_hw_semaphore_82573(hw);
+ E1000_MUTEX_UNLOCK(&hw->dev_spec._82571.swflag_mutex);
+}
+
+/**
+ * e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU D0 state according to the active flag.
+ * LPLU will not be activated unless the
+ * device autonegotiation advertisement meets standards of
+ * either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+STATIC s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
+{
+ u32 data = E1000_READ_REG(hw, E1000_POEMB);
+
+ DEBUGFUNC("e1000_set_d0_lplu_state_82574");
+
+ if (active)
+ data |= E1000_PHY_CTRL_D0A_LPLU;
+ else
+ data &= ~E1000_PHY_CTRL_D0A_LPLU;
+
+ E1000_WRITE_REG(hw, E1000_POEMB, data);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+ * The low power link up (lplu) state is set to the power management level D3
+ * when active is true, else clear lplu for D3. LPLU
+ * is used during Dx states where the power conservation is most important.
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained.
+ **/
+STATIC s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
+{
+ u32 data = E1000_READ_REG(hw, E1000_POEMB);
+
+ DEBUGFUNC("e1000_set_d3_lplu_state_82574");
+
+ if (!active) {
+ data &= ~E1000_PHY_CTRL_NOND0A_LPLU;
+ } else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+ (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) ||
+ (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) {
+ data |= E1000_PHY_CTRL_NOND0A_LPLU;
+ }
+
+ E1000_WRITE_REG(hw, E1000_POEMB, data);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_acquire_nvm_82571 - Request for access to the EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * To gain access to the EEPROM, first we must obtain a hardware semaphore.
+ * Then for non-82573 hardware, set the EEPROM access request bit and wait
+ * for EEPROM access grant bit. If the access grant bit is not set, release
+ * hardware semaphore.
+ **/
+STATIC s32 e1000_acquire_nvm_82571(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_acquire_nvm_82571");
+
+ ret_val = e1000_get_hw_semaphore_82571(hw);
+ if (ret_val)
+ return ret_val;
+
+ switch (hw->mac.type) {
+ case e1000_82573:
+ break;
+ default:
+ ret_val = e1000_acquire_nvm_generic(hw);
+ break;
+ }
+
+ if (ret_val)
+ e1000_put_hw_semaphore_82571(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_release_nvm_82571 - Release exclusive access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Stop any current commands to the EEPROM and clear the EEPROM request bit.
+ **/
+STATIC void e1000_release_nvm_82571(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_release_nvm_82571");
+
+ e1000_release_nvm_generic(hw);
+ e1000_put_hw_semaphore_82571(hw);
+}
+
+/**
+ * e1000_write_nvm_82571 - Write to EEPROM using appropriate interface
+ * @hw: pointer to the HW structure
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the EEPROM
+ *
+ * For non-82573 silicon, write data to EEPROM at offset using SPI interface.
+ *
+ * If e1000_update_nvm_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ **/
+STATIC s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_write_nvm_82571");
+
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data);
+ break;
+ case e1000_82571:
+ case e1000_82572:
+ ret_val = e1000_write_nvm_spi(hw, offset, words, data);
+ break;
+ default:
+ ret_val = -E1000_ERR_NVM;
+ break;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_update_nvm_checksum_82571 - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ * up to the checksum. Then calculates the EEPROM checksum and writes the
+ * value to the EEPROM.
+ **/
+STATIC s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
+{
+ u32 eecd;
+ s32 ret_val;
+ u16 i;
+
+ DEBUGFUNC("e1000_update_nvm_checksum_82571");
+
+ ret_val = e1000_update_nvm_checksum_generic(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* If our nvm is an EEPROM, then we're done
+ * otherwise, commit the checksum to the flash NVM.
+ */
+ if (hw->nvm.type != e1000_nvm_flash_hw)
+ return E1000_SUCCESS;
+
+ /* Check for pending operations. */
+ for (i = 0; i < E1000_FLASH_UPDATES; i++) {
+ msec_delay(1);
+ if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_FLUPD))
+ break;
+ }
+
+ if (i == E1000_FLASH_UPDATES)
+ return -E1000_ERR_NVM;
+
+ /* Reset the firmware if using STM opcode. */
+ if ((E1000_READ_REG(hw, E1000_FLOP) & 0xFF00) == E1000_STM_OPCODE) {
+ /* The enabling of and the actual reset must be done
+ * in two write cycles.
+ */
+ E1000_WRITE_REG(hw, E1000_HICR, E1000_HICR_FW_RESET_ENABLE);
+ E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_REG(hw, E1000_HICR, E1000_HICR_FW_RESET);
+ }
+
+ /* Commit the write to flash */
+ eecd = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD;
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+
+ for (i = 0; i < E1000_FLASH_UPDATES; i++) {
+ msec_delay(1);
+ if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_FLUPD))
+ break;
+ }
+
+ if (i == E1000_FLASH_UPDATES)
+ return -E1000_ERR_NVM;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_validate_nvm_checksum_82571 - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ * and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+STATIC s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_validate_nvm_checksum_82571");
+
+ if (hw->nvm.type == e1000_nvm_flash_hw)
+ e1000_fix_nvm_checksum_82571(hw);
+
+ return e1000_validate_nvm_checksum_generic(hw);
+}
+
+/**
+ * e1000_write_nvm_eewr_82571 - Write to EEPROM for 82573 silicon
+ * @hw: pointer to the HW structure
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the EEPROM
+ *
+ * After checking for invalid values, poll the EEPROM to ensure the previous
+ * command has completed before trying to write the next word. After write
+ * poll for completion.
+ *
+ * If e1000_update_nvm_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ **/
+STATIC s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 i, eewr = 0;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_write_nvm_eewr_82571");
+
+ /* A check for invalid values: offset too large, too many words,
+ * and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ return -E1000_ERR_NVM;
+ }
+
+ for (i = 0; i < words; i++) {
+ eewr = ((data[i] << E1000_NVM_RW_REG_DATA) |
+ ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) |
+ E1000_NVM_RW_REG_START);
+
+ ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
+ if (ret_val)
+ break;
+
+ E1000_WRITE_REG(hw, E1000_EEWR, eewr);
+
+ ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
+ if (ret_val)
+ break;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_cfg_done_82571 - Poll for configuration done
+ * @hw: pointer to the HW structure
+ *
+ * Reads the management control register for the config done bit to be set.
+ **/
+STATIC s32 e1000_get_cfg_done_82571(struct e1000_hw *hw)
+{
+ s32 timeout = PHY_CFG_TIMEOUT;
+
+ DEBUGFUNC("e1000_get_cfg_done_82571");
+
+ while (timeout) {
+ if (E1000_READ_REG(hw, E1000_EEMNGCTL) &
+ E1000_NVM_CFG_DONE_PORT_0)
+ break;
+ msec_delay(1);
+ timeout--;
+ }
+ if (!timeout) {
+ DEBUGOUT("MNG configuration cycle has not completed.\n");
+ return -E1000_ERR_RESET;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU D0 state according to the active flag. When activating LPLU
+ * this function also disables smart speed and vice versa. LPLU will not be
+ * activated unless the device autonegotiation advertisement meets standards
+ * of either 10 or 10/100 or 10/100/1000 at all duplexes. This is a function
+ * pointer entry point only called by PHY setup routines.
+ **/
+STATIC s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("e1000_set_d0_lplu_state_82571");
+
+ if (!(phy->ops.read_reg))
+ return E1000_SUCCESS;
+
+ ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (active) {
+ data |= IGP02E1000_PM_D0_LPLU;
+ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ data);
+ if (ret_val)
+ return ret_val;
+
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ } else {
+ data &= ~IGP02E1000_PM_D0_LPLU;
+ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ data);
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ } else if (phy->smart_speed == e1000_smart_speed_off) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_reset_hw_82571 - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+ * This resets the hardware into a known state.
+ **/
+STATIC s32 e1000_reset_hw_82571(struct e1000_hw *hw)
+{
+ u32 ctrl, ctrl_ext, eecd, tctl;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_reset_hw_82571");
+
+ /* Prevent the PCI-E bus from sticking if there is no TLP connection
+ * on the last TLP read/write transaction when MAC is reset.
+ */
+ ret_val = e1000_disable_pcie_master_generic(hw);
+ if (ret_val)
+ DEBUGOUT("PCI-E Master disable polling has failed.\n");
+
+ DEBUGOUT("Masking off all interrupts\n");
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+
+ E1000_WRITE_REG(hw, E1000_RCTL, 0);
+ tctl = E1000_READ_REG(hw, E1000_TCTL);
+ tctl &= ~E1000_TCTL_EN;
+ E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+ E1000_WRITE_FLUSH(hw);
+
+ msec_delay(10);
+
+ /* Must acquire the MDIO ownership before MAC reset.
+ * Ownership defaults to firmware after a reset.
+ */
+ switch (hw->mac.type) {
+ case e1000_82573:
+ ret_val = e1000_get_hw_semaphore_82573(hw);
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ ret_val = e1000_get_hw_semaphore_82574(hw);
+ break;
+ default:
+ break;
+ }
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ DEBUGOUT("Issuing a global reset to MAC\n");
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+
+ /* Must release MDIO ownership and mutex after MAC reset. */
+ switch (hw->mac.type) {
+ case e1000_82573:
+ /* Release mutex only if the hw semaphore is acquired */
+ if (!ret_val)
+ e1000_put_hw_semaphore_82573(hw);
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ /* Release mutex only if the hw semaphore is acquired */
+ if (!ret_val)
+ e1000_put_hw_semaphore_82574(hw);
+ break;
+ default:
+ break;
+ }
+
+ if (hw->nvm.type == e1000_nvm_flash_hw) {
+ usec_delay(10);
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH(hw);
+ }
+
+ ret_val = e1000_get_auto_rd_done_generic(hw);
+ if (ret_val)
+ /* We don't want to continue accessing MAC registers. */
+ return ret_val;
+
+ /* Phy configuration from NVM just starts after EECD_AUTO_RD is set.
+ * Need to wait for Phy configuration completion before accessing
+ * NVM and Phy.
+ */
+
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ /* REQ and GNT bits need to be cleared when using AUTO_RD
+ * to access the EEPROM.
+ */
+ eecd = E1000_READ_REG(hw, E1000_EECD);
+ eecd &= ~(E1000_EECD_REQ | E1000_EECD_GNT);
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ break;
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ msec_delay(25);
+ break;
+ default:
+ break;
+ }
+
+ /* Clear any pending interrupt events. */
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+ E1000_READ_REG(hw, E1000_ICR);
+
+ if (hw->mac.type == e1000_82571) {
+ /* Install any alternate MAC address into RAR0 */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ return ret_val;
+
+ e1000_set_laa_state_82571(hw, true);
+ }
+
+ /* Reinitialize the 82571 serdes link state machine */
+ if (hw->phy.media_type == e1000_media_type_internal_serdes)
+ hw->mac.serdes_link_state = e1000_serdes_link_down;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_hw_82571 - Initialize hardware
+ * @hw: pointer to the HW structure
+ *
+ * This inits the hardware readying it for operation.
+ **/
+STATIC s32 e1000_init_hw_82571(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 reg_data;
+ s32 ret_val;
+ u16 i, rar_count = mac->rar_entry_count;
+
+ DEBUGFUNC("e1000_init_hw_82571");
+
+ e1000_initialize_hw_bits_82571(hw);
+
+ /* Initialize identification LED */
+ ret_val = mac->ops.id_led_init(hw);
+ /* An error is not fatal and we should not stop init due to this */
+ if (ret_val)
+ DEBUGOUT("Error initializing identification LED\n");
+
+ /* Disabling VLAN filtering */
+ DEBUGOUT("Initializing the IEEE VLAN\n");
+ mac->ops.clear_vfta(hw);
+
+ /* Setup the receive address.
+ * If, however, a locally administered address was assigned to the
+ * 82571, we must reserve a RAR for it to work around an issue where
+ * resetting one port will reload the MAC on the other port.
+ */
+ if (e1000_get_laa_state_82571(hw))
+ rar_count--;
+ e1000_init_rx_addrs_generic(hw, rar_count);
+
+ /* Zero out the Multicast HASH table */
+ DEBUGOUT("Zeroing the MTA\n");
+ for (i = 0; i < mac->mta_reg_count; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+ /* Setup link and flow control */
+ ret_val = mac->ops.setup_link(hw);
+
+ /* Set the transmit descriptor write-back policy */
+ reg_data = E1000_READ_REG(hw, E1000_TXDCTL(0));
+ reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
+ E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg_data);
+
+ /* ...for both queues. */
+ switch (mac->type) {
+ case e1000_82573:
+ e1000_enable_tx_pkt_filtering_generic(hw);
+ /* fall through */
+ case e1000_82574:
+ case e1000_82583:
+ reg_data = E1000_READ_REG(hw, E1000_GCR);
+ reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
+ E1000_WRITE_REG(hw, E1000_GCR, reg_data);
+ break;
+ default:
+ reg_data = E1000_READ_REG(hw, E1000_TXDCTL(1));
+ reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB |
+ E1000_TXDCTL_COUNT_DESC);
+ E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg_data);
+ break;
+ }
+
+ /* Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ e1000_clear_hw_cntrs_82571(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_initialize_hw_bits_82571 - Initialize hardware-dependent bits
+ * @hw: pointer to the HW structure
+ *
+ * Initializes required hardware-dependent bits needed for normal operation.
+ **/
+STATIC void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
+{
+ u32 reg;
+
+ DEBUGFUNC("e1000_initialize_hw_bits_82571");
+
+ /* Transmit Descriptor Control 0 */
+ reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
+ reg |= (1 << 22);
+ E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
+
+ /* Transmit Descriptor Control 1 */
+ reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
+ reg |= (1 << 22);
+ E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
+
+ /* Transmit Arbitration Control 0 */
+ reg = E1000_READ_REG(hw, E1000_TARC(0));
+ reg &= ~(0xF << 27); /* 30:27 */
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26);
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ reg |= (1 << 26);
+ break;
+ default:
+ break;
+ }
+ E1000_WRITE_REG(hw, E1000_TARC(0), reg);
+
+ /* Transmit Arbitration Control 1 */
+ reg = E1000_READ_REG(hw, E1000_TARC(1));
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ reg &= ~((1 << 29) | (1 << 30));
+ reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26);
+ if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
+ reg &= ~(1 << 28);
+ else
+ reg |= (1 << 28);
+ E1000_WRITE_REG(hw, E1000_TARC(1), reg);
+ break;
+ default:
+ break;
+ }
+
+ /* Device Control */
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ reg = E1000_READ_REG(hw, E1000_CTRL);
+ reg &= ~(1 << 29);
+ E1000_WRITE_REG(hw, E1000_CTRL, reg);
+ break;
+ default:
+ break;
+ }
+
+ /* Extended Device Control */
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ reg &= ~(1 << 23);
+ reg |= (1 << 22);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+ break;
+ default:
+ break;
+ }
+
+ if (hw->mac.type == e1000_82571) {
+ reg = E1000_READ_REG(hw, E1000_PBA_ECC);
+ reg |= E1000_PBA_ECC_CORR_EN;
+ E1000_WRITE_REG(hw, E1000_PBA_ECC, reg);
+ }
+
+ /* Workaround for hardware errata.
+ * Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572
+ */
+ if ((hw->mac.type == e1000_82571) ||
+ (hw->mac.type == e1000_82572)) {
+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ reg &= ~E1000_CTRL_EXT_DMA_DYN_CLK_EN;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+ }
+
+ /* Disable IPv6 extension header parsing because some malformed
+ * IPv6 headers can hang the Rx.
+ */
+ if (hw->mac.type <= e1000_82573) {
+ reg = E1000_READ_REG(hw, E1000_RFCTL);
+ reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
+ E1000_WRITE_REG(hw, E1000_RFCTL, reg);
+ }
+
+ /* PCI-Ex Control Registers */
+ switch (hw->mac.type) {
+ case e1000_82574:
+ case e1000_82583:
+ reg = E1000_READ_REG(hw, E1000_GCR);
+ reg |= (1 << 22);
+ E1000_WRITE_REG(hw, E1000_GCR, reg);
+
+ /* Workaround for hardware errata.
+ * apply workaround for hardware errata documented in errata
+ * docs Fixes issue where some error prone or unreliable PCIe
+ * completions are occurring, particularly with ASPM enabled.
+ * Without fix, issue can cause Tx timeouts.
+ */
+ reg = E1000_READ_REG(hw, E1000_GCR2);
+ reg |= 1;
+ E1000_WRITE_REG(hw, E1000_GCR2, reg);
+ break;
+ default:
+ break;
+ }
+
+ return;
+}
+
+/**
+ * e1000_clear_vfta_82571 - Clear VLAN filter table
+ * @hw: pointer to the HW structure
+ *
+ * Clears the register array which contains the VLAN filter table by
+ * setting all the values to 0.
+ **/
+STATIC void e1000_clear_vfta_82571(struct e1000_hw *hw)
+{
+ u32 offset;
+ u32 vfta_value = 0;
+ u32 vfta_offset = 0;
+ u32 vfta_bit_in_reg = 0;
+
+ DEBUGFUNC("e1000_clear_vfta_82571");
+
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ if (hw->mng_cookie.vlan_id != 0) {
+ /* The VFTA is a 4096b bit-field, each identifying
+ * a single VLAN ID. The following operations
+ * determine which 32b entry (i.e. offset) into the
+ * array we want to set the VLAN ID (i.e. bit) of
+ * the manageability unit.
+ */
+ vfta_offset = (hw->mng_cookie.vlan_id >>
+ E1000_VFTA_ENTRY_SHIFT) &
+ E1000_VFTA_ENTRY_MASK;
+ vfta_bit_in_reg =
+ 1 << (hw->mng_cookie.vlan_id &
+ E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
+ }
+ break;
+ default:
+ break;
+ }
+ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+ /* If the offset we want to clear is the same offset of the
+ * manageability VLAN ID, then clear all bits except that of
+ * the manageability unit.
+ */
+ vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, vfta_value);
+ E1000_WRITE_FLUSH(hw);
+ }
+}
+
+/**
+ * e1000_check_mng_mode_82574 - Check manageability is enabled
+ * @hw: pointer to the HW structure
+ *
+ * Reads the NVM Initialization Control Word 2 and returns true
+ * (>0) if any manageability is enabled, else false (0).
+ **/
+STATIC bool e1000_check_mng_mode_82574(struct e1000_hw *hw)
+{
+ u16 data;
+
+ DEBUGFUNC("e1000_check_mng_mode_82574");
+
+ hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &data);
+ return (data & E1000_NVM_INIT_CTRL2_MNGM) != 0;
+}
+
+/**
+ * e1000_led_on_82574 - Turn LED on
+ * @hw: pointer to the HW structure
+ *
+ * Turn LED on.
+ **/
+STATIC s32 e1000_led_on_82574(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ u32 i;
+
+ DEBUGFUNC("e1000_led_on_82574");
+
+ ctrl = hw->mac.ledctl_mode2;
+ if (!(E1000_STATUS_LU & E1000_READ_REG(hw, E1000_STATUS))) {
+ /* If no link, then turn LED on by setting the invert bit
+ * for each LED that's "on" (0x0E) in ledctl_mode2.
+ */
+ for (i = 0; i < 4; i++)
+ if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
+ E1000_LEDCTL_MODE_LED_ON)
+ ctrl |= (E1000_LEDCTL_LED0_IVRT << (i * 8));
+ }
+ E1000_WRITE_REG(hw, E1000_LEDCTL, ctrl);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_check_phy_82574 - check 82574 phy hung state
+ * @hw: pointer to the HW structure
+ *
+ * Returns whether phy is hung or not
+ **/
+bool e1000_check_phy_82574(struct e1000_hw *hw)
+{
+ u16 status_1kbt = 0;
+ u16 receive_errors = 0;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_check_phy_82574");
+
+ /* Read PHY Receive Error counter first, if its is max - all F's then
+ * read the Base1000T status register If both are max then PHY is hung.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, E1000_RECEIVE_ERROR_COUNTER,
+ &receive_errors);
+ if (ret_val)
+ return false;
+ if (receive_errors == E1000_RECEIVE_ERROR_MAX) {
+ ret_val = hw->phy.ops.read_reg(hw, E1000_BASE1000T_STATUS,
+ &status_1kbt);
+ if (ret_val)
+ return false;
+ if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) ==
+ E1000_IDLE_ERROR_COUNT_MASK)
+ return true;
+ }
+
+ return false;
+}
+
+
+/**
+ * e1000_setup_link_82571 - Setup flow control and link settings
+ * @hw: pointer to the HW structure
+ *
+ * Determines which flow control settings to use, then configures flow
+ * control. Calls the appropriate media-specific link configuration
+ * function. Assuming the adapter has a valid link partner, a valid link
+ * should be established. Assumes the hardware has previously been reset
+ * and the transmitter and receiver are not enabled.
+ **/
+STATIC s32 e1000_setup_link_82571(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_setup_link_82571");
+
+ /* 82573 does not have a word in the NVM to determine
+ * the default flow control setting, so we explicitly
+ * set it to full.
+ */
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ if (hw->fc.requested_mode == e1000_fc_default)
+ hw->fc.requested_mode = e1000_fc_full;
+ break;
+ default:
+ break;
+ }
+
+ return e1000_setup_link_generic(hw);
+}
+
+/**
+ * e1000_setup_copper_link_82571 - Configure copper link settings
+ * @hw: pointer to the HW structure
+ *
+ * Configures the link for auto-neg or forced speed and duplex. Then we check
+ * for link, once link is established calls to configure collision distance
+ * and flow control are called.
+ **/
+STATIC s32 e1000_setup_copper_link_82571(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_setup_copper_link_82571");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ switch (hw->phy.type) {
+ case e1000_phy_m88:
+ case e1000_phy_bm:
+ ret_val = e1000_copper_link_setup_m88(hw);
+ break;
+ case e1000_phy_igp_2:
+ ret_val = e1000_copper_link_setup_igp(hw);
+ break;
+ default:
+ return -E1000_ERR_PHY;
+ break;
+ }
+
+ if (ret_val)
+ return ret_val;
+
+ return e1000_setup_copper_link_generic(hw);
+}
+
+/**
+ * e1000_setup_fiber_serdes_link_82571 - Setup link for fiber/serdes
+ * @hw: pointer to the HW structure
+ *
+ * Configures collision distance and flow control for fiber and serdes links.
+ * Upon successful setup, poll for link.
+ **/
+STATIC s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_setup_fiber_serdes_link_82571");
+
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ /* If SerDes loopback mode is entered, there is no form
+ * of reset to take the adapter out of that mode. So we
+ * have to explicitly take the adapter out of loopback
+ * mode. This prevents drivers from twiddling their thumbs
+ * if another tool failed to take it out of loopback mode.
+ */
+ E1000_WRITE_REG(hw, E1000_SCTL,
+ E1000_SCTL_DISABLE_SERDES_LOOPBACK);
+ break;
+ default:
+ break;
+ }
+
+ return e1000_setup_fiber_serdes_link_generic(hw);
+}
+
+/**
+ * e1000_check_for_serdes_link_82571 - Check for link (Serdes)
+ * @hw: pointer to the HW structure
+ *
+ * Reports the link state as up or down.
+ *
+ * If autonegotiation is supported by the link partner, the link state is
+ * determined by the result of autonegotiation. This is the most likely case.
+ * If autonegotiation is not supported by the link partner, and the link
+ * has a valid signal, force the link up.
+ *
+ * The link state is represented internally here by 4 states:
+ *
+ * 1) down
+ * 2) autoneg_progress
+ * 3) autoneg_complete (the link successfully autonegotiated)
+ * 4) forced_up (the link has been forced up, it did not autonegotiate)
+ *
+ **/
+STATIC s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 rxcw;
+ u32 ctrl;
+ u32 status;
+ u32 txcw;
+ u32 i;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_check_for_serdes_link_82571");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ E1000_READ_REG(hw, E1000_RXCW);
+ /* SYNCH bit and IV bit are sticky */
+ usec_delay(10);
+ rxcw = E1000_READ_REG(hw, E1000_RXCW);
+
+ if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
+ /* Receiver is synchronized with no invalid bits. */
+ switch (mac->serdes_link_state) {
+ case e1000_serdes_link_autoneg_complete:
+ if (!(status & E1000_STATUS_LU)) {
+ /* We have lost link, retry autoneg before
+ * reporting link failure
+ */
+ mac->serdes_link_state =
+ e1000_serdes_link_autoneg_progress;
+ mac->serdes_has_link = false;
+ DEBUGOUT("AN_UP -> AN_PROG\n");
+ } else {
+ mac->serdes_has_link = true;
+ }
+ break;
+
+ case e1000_serdes_link_forced_up:
+ /* If we are receiving /C/ ordered sets, re-enable
+ * auto-negotiation in the TXCW register and disable
+ * forced link in the Device Control register in an
+ * attempt to auto-negotiate with our link partner.
+ */
+ if (rxcw & E1000_RXCW_C) {
+ /* Enable autoneg, and unforce link up */
+ E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
+ E1000_WRITE_REG(hw, E1000_CTRL,
+ (ctrl & ~E1000_CTRL_SLU));
+ mac->serdes_link_state =
+ e1000_serdes_link_autoneg_progress;
+ mac->serdes_has_link = false;
+ DEBUGOUT("FORCED_UP -> AN_PROG\n");
+ } else {
+ mac->serdes_has_link = true;
+ }
+ break;
+
+ case e1000_serdes_link_autoneg_progress:
+ if (rxcw & E1000_RXCW_C) {
+ /* We received /C/ ordered sets, meaning the
+ * link partner has autonegotiated, and we can
+ * trust the Link Up (LU) status bit.
+ */
+ if (status & E1000_STATUS_LU) {
+ mac->serdes_link_state =
+ e1000_serdes_link_autoneg_complete;
+ DEBUGOUT("AN_PROG -> AN_UP\n");
+ mac->serdes_has_link = true;
+ } else {
+ /* Autoneg completed, but failed. */
+ mac->serdes_link_state =
+ e1000_serdes_link_down;
+ DEBUGOUT("AN_PROG -> DOWN\n");
+ }
+ } else {
+ /* The link partner did not autoneg.
+ * Force link up and full duplex, and change
+ * state to forced.
+ */
+ E1000_WRITE_REG(hw, E1000_TXCW,
+ (mac->txcw & ~E1000_TXCW_ANE));
+ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ /* Configure Flow Control after link up. */
+ ret_val =
+ e1000_config_fc_after_link_up_generic(hw);
+ if (ret_val) {
+ DEBUGOUT("Error config flow control\n");
+ break;
+ }
+ mac->serdes_link_state =
+ e1000_serdes_link_forced_up;
+ mac->serdes_has_link = true;
+ DEBUGOUT("AN_PROG -> FORCED_UP\n");
+ }
+ break;
+
+ case e1000_serdes_link_down:
+ default:
+ /* The link was down but the receiver has now gained
+ * valid sync, so lets see if we can bring the link
+ * up.
+ */
+ E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
+ E1000_WRITE_REG(hw, E1000_CTRL, (ctrl &
+ ~E1000_CTRL_SLU));
+ mac->serdes_link_state =
+ e1000_serdes_link_autoneg_progress;
+ mac->serdes_has_link = false;
+ DEBUGOUT("DOWN -> AN_PROG\n");
+ break;
+ }
+ } else {
+ if (!(rxcw & E1000_RXCW_SYNCH)) {
+ mac->serdes_has_link = false;
+ mac->serdes_link_state = e1000_serdes_link_down;
+ DEBUGOUT("ANYSTATE -> DOWN\n");
+ } else {
+ /* Check several times, if SYNCH bit and CONFIG
+ * bit both are consistently 1 then simply ignore
+ * the IV bit and restart Autoneg
+ */
+ for (i = 0; i < AN_RETRY_COUNT; i++) {
+ usec_delay(10);
+ rxcw = E1000_READ_REG(hw, E1000_RXCW);
+ if ((rxcw & E1000_RXCW_SYNCH) &&
+ (rxcw & E1000_RXCW_C))
+ continue;
+
+ if (rxcw & E1000_RXCW_IV) {
+ mac->serdes_has_link = false;
+ mac->serdes_link_state =
+ e1000_serdes_link_down;
+ DEBUGOUT("ANYSTATE -> DOWN\n");
+ break;
+ }
+ }
+
+ if (i == AN_RETRY_COUNT) {
+ txcw = E1000_READ_REG(hw, E1000_TXCW);
+ txcw |= E1000_TXCW_ANE;
+ E1000_WRITE_REG(hw, E1000_TXCW, txcw);
+ mac->serdes_link_state =
+ e1000_serdes_link_autoneg_progress;
+ mac->serdes_has_link = false;
+ DEBUGOUT("ANYSTATE -> AN_PROG\n");
+ }
+ }
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_valid_led_default_82571 - Verify a valid default LED config
+ * @hw: pointer to the HW structure
+ * @data: pointer to the NVM (EEPROM)
+ *
+ * Read the EEPROM for the current default LED configuration. If the
+ * LED configuration is not valid, set to a valid LED configuration.
+ **/
+STATIC s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_valid_led_default_82571");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ if (*data == ID_LED_RESERVED_F746)
+ *data = ID_LED_DEFAULT_82573;
+ break;
+ default:
+ if (*data == ID_LED_RESERVED_0000 ||
+ *data == ID_LED_RESERVED_FFFF)
+ *data = ID_LED_DEFAULT;
+ break;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_laa_state_82571 - Get locally administered address state
+ * @hw: pointer to the HW structure
+ *
+ * Retrieve and return the current locally administered address state.
+ **/
+bool e1000_get_laa_state_82571(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_get_laa_state_82571");
+
+ if (hw->mac.type != e1000_82571)
+ return false;
+
+ return hw->dev_spec._82571.laa_is_present;
+}
+
+/**
+ * e1000_set_laa_state_82571 - Set locally administered address state
+ * @hw: pointer to the HW structure
+ * @state: enable/disable locally administered address
+ *
+ * Enable/Disable the current locally administered address state.
+ **/
+void e1000_set_laa_state_82571(struct e1000_hw *hw, bool state)
+{
+ DEBUGFUNC("e1000_set_laa_state_82571");
+
+ if (hw->mac.type != e1000_82571)
+ return;
+
+ hw->dev_spec._82571.laa_is_present = state;
+
+ /* If workaround is activated... */
+ if (state)
+ /* Hold a copy of the LAA in RAR[14] This is done so that
+ * between the time RAR[0] gets clobbered and the time it
+ * gets fixed, the actual LAA is in one of the RARs and no
+ * incoming packets directed to this port are dropped.
+ * Eventually the LAA will be in RAR[0] and RAR[14].
+ */
+ hw->mac.ops.rar_set(hw, hw->mac.addr,
+ hw->mac.rar_entry_count - 1);
+ return;
+}
+
+/**
+ * e1000_fix_nvm_checksum_82571 - Fix EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Verifies that the EEPROM has completed the update. After updating the
+ * EEPROM, we need to check bit 15 in work 0x23 for the checksum fix. If
+ * the checksum fix is not implemented, we need to set the bit and update
+ * the checksum. Otherwise, if bit 15 is set and the checksum is incorrect,
+ * we need to return bad checksum.
+ **/
+STATIC s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("e1000_fix_nvm_checksum_82571");
+
+ if (nvm->type != e1000_nvm_flash_hw)
+ return E1000_SUCCESS;
+
+ /* Check bit 4 of word 10h. If it is 0, firmware is done updating
+ * 10h-12h. Checksum may need to be fixed.
+ */
+ ret_val = nvm->ops.read(hw, 0x10, 1, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (!(data & 0x10)) {
+ /* Read 0x23 and check bit 15. This bit is a 1
+ * when the checksum has already been fixed. If
+ * the checksum is still wrong and this bit is a
+ * 1, we need to return bad checksum. Otherwise,
+ * we need to set this bit to a 1 and update the
+ * checksum.
+ */
+ ret_val = nvm->ops.read(hw, 0x23, 1, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (!(data & 0x8000)) {
+ data |= 0x8000;
+ ret_val = nvm->ops.write(hw, 0x23, 1, &data);
+ if (ret_val)
+ return ret_val;
+ ret_val = nvm->ops.update(hw);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+
+/**
+ * e1000_read_mac_addr_82571 - Read device MAC address
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_read_mac_addr_82571");
+
+ if (hw->mac.type == e1000_82571) {
+ s32 ret_val;
+
+ /* If there's an alternate MAC address place it in RAR0
+ * so that it will override the Si installed default perm
+ * address.
+ */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return e1000_read_mac_addr_generic(hw);
+}
+
+/**
+ * e1000_power_down_phy_copper_82571 - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+STATIC void e1000_power_down_phy_copper_82571(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ struct e1000_mac_info *mac = &hw->mac;
+
+ if (!phy->ops.check_reset_block)
+ return;
+
+ /* If the management interface is not enabled, then power down */
+ if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw)))
+ e1000_power_down_phy_copper(hw);
+
+ return;
+}
+
+/**
+ * e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the hardware counters by reading the counter registers.
+ **/
+STATIC void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_clear_hw_cntrs_82571");
+
+ e1000_clear_hw_cntrs_base_generic(hw);
+
+ E1000_READ_REG(hw, E1000_PRC64);
+ E1000_READ_REG(hw, E1000_PRC127);
+ E1000_READ_REG(hw, E1000_PRC255);
+ E1000_READ_REG(hw, E1000_PRC511);
+ E1000_READ_REG(hw, E1000_PRC1023);
+ E1000_READ_REG(hw, E1000_PRC1522);
+ E1000_READ_REG(hw, E1000_PTC64);
+ E1000_READ_REG(hw, E1000_PTC127);
+ E1000_READ_REG(hw, E1000_PTC255);
+ E1000_READ_REG(hw, E1000_PTC511);
+ E1000_READ_REG(hw, E1000_PTC1023);
+ E1000_READ_REG(hw, E1000_PTC1522);
+
+ E1000_READ_REG(hw, E1000_ALGNERRC);
+ E1000_READ_REG(hw, E1000_RXERRC);
+ E1000_READ_REG(hw, E1000_TNCRS);
+ E1000_READ_REG(hw, E1000_CEXTERR);
+ E1000_READ_REG(hw, E1000_TSCTC);
+ E1000_READ_REG(hw, E1000_TSCTFC);
+
+ E1000_READ_REG(hw, E1000_MGTPRC);
+ E1000_READ_REG(hw, E1000_MGTPDC);
+ E1000_READ_REG(hw, E1000_MGTPTC);
+
+ E1000_READ_REG(hw, E1000_IAC);
+ E1000_READ_REG(hw, E1000_ICRXOC);
+
+ E1000_READ_REG(hw, E1000_ICRXPTC);
+ E1000_READ_REG(hw, E1000_ICRXATC);
+ E1000_READ_REG(hw, E1000_ICTXPTC);
+ E1000_READ_REG(hw, E1000_ICTXATC);
+ E1000_READ_REG(hw, E1000_ICTXQEC);
+ E1000_READ_REG(hw, E1000_ICTXQMTC);
+ E1000_READ_REG(hw, E1000_ICRXDMTC);
+}
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82571.h b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82571.h
new file mode 100755
index 00000000..bdf64469
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82571.h
@@ -0,0 +1,65 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_82571_H_
+#define _E1000_82571_H_
+
+#define ID_LED_RESERVED_F746 0xF746
+#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \
+ (ID_LED_OFF1_ON2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_DEF1_DEF2))
+
+#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
+#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */
+
+/* Intr Throttling - RW */
+#define E1000_EITR_82574(_n) (0x000E8 + (0x4 * (_n)))
+
+#define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */
+#define E1000_EIAC_MASK_82574 0x01F00000
+
+#define E1000_IVAR_INT_ALLOC_VALID 0x8
+
+/* Manageability Operation Mode mask */
+#define E1000_NVM_INIT_CTRL2_MNGM 0x6000
+
+#define E1000_BASE1000T_STATUS 10
+#define E1000_IDLE_ERROR_COUNT_MASK 0xFF
+#define E1000_RECEIVE_ERROR_COUNTER 21
+#define E1000_RECEIVE_ERROR_MAX 0xFFFF
+bool e1000_check_phy_82574(struct e1000_hw *hw);
+bool e1000_get_laa_state_82571(struct e1000_hw *hw);
+void e1000_set_laa_state_82571(struct e1000_hw *hw, bool state);
+
+#endif
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82575.c b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82575.c
new file mode 100755
index 00000000..25fa6727
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82575.c
@@ -0,0 +1,3639 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+/*
+ * 82575EB Gigabit Network Connection
+ * 82575EB Gigabit Backplane Connection
+ * 82575GB Gigabit Network Connection
+ * 82576 Gigabit Network Connection
+ * 82576 Quad Port Gigabit Mezzanine Adapter
+ * 82580 Gigabit Network Connection
+ * I350 Gigabit Network Connection
+ */
+
+#include "e1000_api.h"
+#include "e1000_i210.h"
+
+STATIC s32 e1000_init_phy_params_82575(struct e1000_hw *hw);
+STATIC s32 e1000_init_mac_params_82575(struct e1000_hw *hw);
+STATIC s32 e1000_acquire_phy_82575(struct e1000_hw *hw);
+STATIC void e1000_release_phy_82575(struct e1000_hw *hw);
+STATIC s32 e1000_acquire_nvm_82575(struct e1000_hw *hw);
+STATIC void e1000_release_nvm_82575(struct e1000_hw *hw);
+STATIC s32 e1000_check_for_link_82575(struct e1000_hw *hw);
+STATIC s32 e1000_check_for_link_media_swap(struct e1000_hw *hw);
+STATIC s32 e1000_get_cfg_done_82575(struct e1000_hw *hw);
+STATIC s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex);
+STATIC s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw);
+STATIC s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+ u16 *data);
+STATIC s32 e1000_reset_hw_82575(struct e1000_hw *hw);
+STATIC s32 e1000_reset_hw_82580(struct e1000_hw *hw);
+STATIC s32 e1000_read_phy_reg_82580(struct e1000_hw *hw,
+ u32 offset, u16 *data);
+STATIC s32 e1000_write_phy_reg_82580(struct e1000_hw *hw,
+ u32 offset, u16 data);
+STATIC s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw,
+ bool active);
+STATIC s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw,
+ bool active);
+STATIC s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw,
+ bool active);
+STATIC s32 e1000_setup_copper_link_82575(struct e1000_hw *hw);
+STATIC s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw);
+STATIC s32 e1000_get_media_type_82575(struct e1000_hw *hw);
+STATIC s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw);
+STATIC s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data);
+STATIC s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw,
+ u32 offset, u16 data);
+STATIC void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw);
+STATIC s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
+STATIC s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
+ u16 *speed, u16 *duplex);
+STATIC s32 e1000_get_phy_id_82575(struct e1000_hw *hw);
+STATIC void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
+STATIC bool e1000_sgmii_active_82575(struct e1000_hw *hw);
+STATIC s32 e1000_reset_init_script_82575(struct e1000_hw *hw);
+STATIC s32 e1000_read_mac_addr_82575(struct e1000_hw *hw);
+STATIC void e1000_config_collision_dist_82575(struct e1000_hw *hw);
+STATIC void e1000_power_down_phy_copper_82575(struct e1000_hw *hw);
+STATIC void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw);
+STATIC void e1000_power_up_serdes_link_82575(struct e1000_hw *hw);
+STATIC s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw);
+STATIC s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw);
+STATIC s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw);
+STATIC s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw);
+STATIC s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw,
+ u16 offset);
+STATIC s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
+ u16 offset);
+STATIC s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw);
+STATIC s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw);
+STATIC void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value);
+STATIC void e1000_clear_vfta_i350(struct e1000_hw *hw);
+
+STATIC void e1000_i2c_start(struct e1000_hw *hw);
+STATIC void e1000_i2c_stop(struct e1000_hw *hw);
+STATIC s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data);
+STATIC s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data);
+STATIC s32 e1000_get_i2c_ack(struct e1000_hw *hw);
+STATIC s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data);
+STATIC s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data);
+STATIC void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl);
+STATIC void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl);
+STATIC s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data);
+STATIC bool e1000_get_i2c_data(u32 *i2cctl);
+
+STATIC const u16 e1000_82580_rxpbs_table[] = {
+ 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 };
+#define E1000_82580_RXPBS_TABLE_SIZE \
+ (sizeof(e1000_82580_rxpbs_table) / \
+ sizeof(e1000_82580_rxpbs_table[0]))
+
+
+/**
+ * e1000_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
+ * @hw: pointer to the HW structure
+ *
+ * Called to determine if the I2C pins are being used for I2C or as an
+ * external MDIO interface since the two options are mutually exclusive.
+ **/
+STATIC bool e1000_sgmii_uses_mdio_82575(struct e1000_hw *hw)
+{
+ u32 reg = 0;
+ bool ext_mdio = false;
+
+ DEBUGFUNC("e1000_sgmii_uses_mdio_82575");
+
+ switch (hw->mac.type) {
+ case e1000_82575:
+ case e1000_82576:
+ reg = E1000_READ_REG(hw, E1000_MDIC);
+ ext_mdio = !!(reg & E1000_MDIC_DEST);
+ break;
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ case e1000_i210:
+ case e1000_i211:
+ reg = E1000_READ_REG(hw, E1000_MDICNFG);
+ ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
+ break;
+ default:
+ break;
+ }
+ return ext_mdio;
+}
+
+/**
+ * e1000_init_phy_params_82575 - Init PHY func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+ u32 ctrl_ext;
+
+ DEBUGFUNC("e1000_init_phy_params_82575");
+
+ phy->ops.read_i2c_byte = e1000_read_i2c_byte_generic;
+ phy->ops.write_i2c_byte = e1000_write_i2c_byte_generic;
+
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ phy->type = e1000_phy_none;
+ goto out;
+ }
+
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper_82575;
+
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->reset_delay_us = 100;
+
+ phy->ops.acquire = e1000_acquire_phy_82575;
+ phy->ops.check_reset_block = e1000_check_reset_block_generic;
+ phy->ops.commit = e1000_phy_sw_reset_generic;
+ phy->ops.get_cfg_done = e1000_get_cfg_done_82575;
+ phy->ops.release = e1000_release_phy_82575;
+
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+
+ if (e1000_sgmii_active_82575(hw)) {
+ phy->ops.reset = e1000_phy_hw_reset_sgmii_82575;
+ ctrl_ext |= E1000_CTRL_I2C_ENA;
+ } else {
+ phy->ops.reset = e1000_phy_hw_reset_generic;
+ ctrl_ext &= ~E1000_CTRL_I2C_ENA;
+ }
+
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ e1000_reset_mdicnfg_82580(hw);
+
+ if (e1000_sgmii_active_82575(hw) && !e1000_sgmii_uses_mdio_82575(hw)) {
+ phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575;
+ phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575;
+ } else {
+ switch (hw->mac.type) {
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ phy->ops.read_reg = e1000_read_phy_reg_82580;
+ phy->ops.write_reg = e1000_write_phy_reg_82580;
+ break;
+ case e1000_i210:
+ case e1000_i211:
+ phy->ops.read_reg = e1000_read_phy_reg_gs40g;
+ phy->ops.write_reg = e1000_write_phy_reg_gs40g;
+ break;
+ default:
+ phy->ops.read_reg = e1000_read_phy_reg_igp;
+ phy->ops.write_reg = e1000_write_phy_reg_igp;
+ }
+ }
+
+ /* Set phy->phy_addr and phy->id. */
+ ret_val = e1000_get_phy_id_82575(hw);
+
+ /* Verify phy id and set remaining function pointers */
+ switch (phy->id) {
+ case M88E1543_E_PHY_ID:
+ case M88E1512_E_PHY_ID:
+ case I347AT4_E_PHY_ID:
+ case M88E1112_E_PHY_ID:
+ case M88E1340M_E_PHY_ID:
+ case M88E1111_I_PHY_ID:
+ phy->type = e1000_phy_m88;
+ phy->ops.check_polarity = e1000_check_polarity_m88;
+ phy->ops.get_info = e1000_get_phy_info_m88;
+ if (phy->id == I347AT4_E_PHY_ID ||
+ phy->id == M88E1112_E_PHY_ID ||
+ phy->id == M88E1340M_E_PHY_ID)
+ phy->ops.get_cable_length =
+ e1000_get_cable_length_m88_gen2;
+ else if (phy->id == M88E1543_E_PHY_ID ||
+ phy->id == M88E1512_E_PHY_ID)
+ phy->ops.get_cable_length =
+ e1000_get_cable_length_m88_gen2;
+ else
+ phy->ops.get_cable_length = e1000_get_cable_length_m88;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
+ /* Check if this PHY is confgured for media swap. */
+ if (phy->id == M88E1112_E_PHY_ID) {
+ u16 data;
+
+ ret_val = phy->ops.write_reg(hw,
+ E1000_M88E1112_PAGE_ADDR,
+ 2);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.read_reg(hw,
+ E1000_M88E1112_MAC_CTRL_1,
+ &data);
+ if (ret_val)
+ goto out;
+
+ data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >>
+ E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT;
+ if (data == E1000_M88E1112_AUTO_COPPER_SGMII ||
+ data == E1000_M88E1112_AUTO_COPPER_BASEX)
+ hw->mac.ops.check_for_link =
+ e1000_check_for_link_media_swap;
+ }
+ if (phy->id == M88E1512_E_PHY_ID) {
+ ret_val = e1000_initialize_M88E1512_phy(hw);
+ if (ret_val)
+ goto out;
+ }
+ break;
+ case IGP03E1000_E_PHY_ID:
+ case IGP04E1000_E_PHY_ID:
+ phy->type = e1000_phy_igp_3;
+ phy->ops.check_polarity = e1000_check_polarity_igp;
+ phy->ops.get_info = e1000_get_phy_info_igp;
+ phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
+ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82575;
+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic;
+ break;
+ case I82580_I_PHY_ID:
+ case I350_I_PHY_ID:
+ phy->type = e1000_phy_82580;
+ phy->ops.check_polarity = e1000_check_polarity_82577;
+ phy->ops.force_speed_duplex =
+ e1000_phy_force_speed_duplex_82577;
+ phy->ops.get_cable_length = e1000_get_cable_length_82577;
+ phy->ops.get_info = e1000_get_phy_info_82577;
+ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580;
+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580;
+ break;
+ case I210_I_PHY_ID:
+ phy->type = e1000_phy_i210;
+ phy->ops.check_polarity = e1000_check_polarity_m88;
+ phy->ops.get_info = e1000_get_phy_info_m88;
+ phy->ops.get_cable_length = e1000_get_cable_length_m88_gen2;
+ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580;
+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params_82575 - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_init_nvm_params_82575(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+ u16 size;
+
+ DEBUGFUNC("e1000_init_nvm_params_82575");
+
+ size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+ E1000_EECD_SIZE_EX_SHIFT);
+ /*
+ * Added to a constant, "size" becomes the left-shift value
+ * for setting word_size.
+ */
+ size += NVM_WORD_SIZE_BASE_SHIFT;
+
+ /* Just in case size is out of range, cap it to the largest
+ * EEPROM size supported
+ */
+ if (size > 15)
+ size = 15;
+
+ nvm->word_size = 1 << size;
+ if (hw->mac.type < e1000_i210) {
+ nvm->opcode_bits = 8;
+ nvm->delay_usec = 1;
+
+ switch (nvm->override) {
+ case e1000_nvm_override_spi_large:
+ nvm->page_size = 32;
+ nvm->address_bits = 16;
+ break;
+ case e1000_nvm_override_spi_small:
+ nvm->page_size = 8;
+ nvm->address_bits = 8;
+ break;
+ default:
+ nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+ nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
+ 16 : 8;
+ break;
+ }
+ if (nvm->word_size == (1 << 15))
+ nvm->page_size = 128;
+
+ nvm->type = e1000_nvm_eeprom_spi;
+ } else {
+ nvm->type = e1000_nvm_flash_hw;
+ }
+
+ /* Function Pointers */
+ nvm->ops.acquire = e1000_acquire_nvm_82575;
+ nvm->ops.release = e1000_release_nvm_82575;
+ if (nvm->word_size < (1 << 15))
+ nvm->ops.read = e1000_read_nvm_eerd;
+ else
+ nvm->ops.read = e1000_read_nvm_spi;
+
+ nvm->ops.write = e1000_write_nvm_spi;
+ nvm->ops.validate = e1000_validate_nvm_checksum_generic;
+ nvm->ops.update = e1000_update_nvm_checksum_generic;
+ nvm->ops.valid_led_default = e1000_valid_led_default_82575;
+
+ /* override generic family function pointers for specific descendants */
+ switch (hw->mac.type) {
+ case e1000_82580:
+ nvm->ops.validate = e1000_validate_nvm_checksum_82580;
+ nvm->ops.update = e1000_update_nvm_checksum_82580;
+ break;
+ case e1000_i350:
+ case e1000_i354:
+ nvm->ops.validate = e1000_validate_nvm_checksum_i350;
+ nvm->ops.update = e1000_update_nvm_checksum_i350;
+ break;
+ default:
+ break;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_mac_params_82575 - Init MAC func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+
+ DEBUGFUNC("e1000_init_mac_params_82575");
+
+ /* Derives media type */
+ e1000_get_media_type_82575(hw);
+ /* Set mta register count */
+ mac->mta_reg_count = 128;
+ /* Set uta register count */
+ mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128;
+ /* Set rar entry count */
+ mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
+ if (mac->type == e1000_82576)
+ mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
+ if (mac->type == e1000_82580)
+ mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
+ if (mac->type == e1000_i350 || mac->type == e1000_i354)
+ mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
+
+ /* Enable EEE default settings for EEE supported devices */
+ if (mac->type >= e1000_i350)
+ dev_spec->eee_disable = false;
+
+ /* Allow a single clear of the SW semaphore on I210 and newer */
+ if (mac->type >= e1000_i210)
+ dev_spec->clear_semaphore_once = true;
+
+ /* Set if part includes ASF firmware */
+ mac->asf_firmware_present = true;
+ /* FWSM register */
+ mac->has_fwsm = true;
+ /* ARC supported; valid only if manageability features are enabled. */
+ mac->arc_subsystem_valid =
+ !!(E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK);
+
+ /* Function pointers */
+
+ /* bus type/speed/width */
+ mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic;
+ /* reset */
+ if (mac->type >= e1000_82580)
+ mac->ops.reset_hw = e1000_reset_hw_82580;
+ else
+ mac->ops.reset_hw = e1000_reset_hw_82575;
+ /* hw initialization */
+ if ((mac->type == e1000_i210) || (mac->type == e1000_i211))
+ mac->ops.init_hw = e1000_init_hw_i210;
+ else
+ mac->ops.init_hw = e1000_init_hw_82575;
+ /* link setup */
+ mac->ops.setup_link = e1000_setup_link_generic;
+ /* physical interface link setup */
+ mac->ops.setup_physical_interface =
+ (hw->phy.media_type == e1000_media_type_copper)
+ ? e1000_setup_copper_link_82575 : e1000_setup_serdes_link_82575;
+ /* physical interface shutdown */
+ mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575;
+ /* physical interface power up */
+ mac->ops.power_up_serdes = e1000_power_up_serdes_link_82575;
+ /* check for link */
+ mac->ops.check_for_link = e1000_check_for_link_82575;
+ /* read mac address */
+ mac->ops.read_mac_addr = e1000_read_mac_addr_82575;
+ /* configure collision distance */
+ mac->ops.config_collision_dist = e1000_config_collision_dist_82575;
+ /* multicast address update */
+ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
+ if (hw->mac.type == e1000_i350 || mac->type == e1000_i354) {
+ /* writing VFTA */
+ mac->ops.write_vfta = e1000_write_vfta_i350;
+ /* clearing VFTA */
+ mac->ops.clear_vfta = e1000_clear_vfta_i350;
+ } else {
+ /* writing VFTA */
+ mac->ops.write_vfta = e1000_write_vfta_generic;
+ /* clearing VFTA */
+ mac->ops.clear_vfta = e1000_clear_vfta_generic;
+ }
+ if (hw->mac.type >= e1000_82580)
+ mac->ops.validate_mdi_setting =
+ e1000_validate_mdi_setting_crossover_generic;
+ /* ID LED init */
+ mac->ops.id_led_init = e1000_id_led_init_generic;
+ /* blink LED */
+ mac->ops.blink_led = e1000_blink_led_generic;
+ /* setup LED */
+ mac->ops.setup_led = e1000_setup_led_generic;
+ /* cleanup LED */
+ mac->ops.cleanup_led = e1000_cleanup_led_generic;
+ /* turn on/off LED */
+ mac->ops.led_on = e1000_led_on_generic;
+ mac->ops.led_off = e1000_led_off_generic;
+ /* clear hardware counters */
+ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575;
+ /* link info */
+ mac->ops.get_link_up_info = e1000_get_link_up_info_82575;
+ /* acquire SW_FW sync */
+ mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_82575;
+ mac->ops.release_swfw_sync = e1000_release_swfw_sync_82575;
+ if (mac->type >= e1000_i210) {
+ mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_i210;
+ mac->ops.release_swfw_sync = e1000_release_swfw_sync_i210;
+ }
+
+ /* set lan id for port to determine which phy lock to use */
+ hw->mac.ops.set_lan_id(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_function_pointers_82575 - Init func ptrs.
+ * @hw: pointer to the HW structure
+ *
+ * Called to initialize all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_82575(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_init_function_pointers_82575");
+
+ hw->mac.ops.init_params = e1000_init_mac_params_82575;
+ hw->nvm.ops.init_params = e1000_init_nvm_params_82575;
+ hw->phy.ops.init_params = e1000_init_phy_params_82575;
+ hw->mbx.ops.init_params = e1000_init_mbx_params_pf;
+}
+
+/**
+ * e1000_acquire_phy_82575 - Acquire rights to access PHY
+ * @hw: pointer to the HW structure
+ *
+ * Acquire access rights to the correct PHY.
+ **/
+STATIC s32 e1000_acquire_phy_82575(struct e1000_hw *hw)
+{
+ u16 mask = E1000_SWFW_PHY0_SM;
+
+ DEBUGFUNC("e1000_acquire_phy_82575");
+
+ if (hw->bus.func == E1000_FUNC_1)
+ mask = E1000_SWFW_PHY1_SM;
+ else if (hw->bus.func == E1000_FUNC_2)
+ mask = E1000_SWFW_PHY2_SM;
+ else if (hw->bus.func == E1000_FUNC_3)
+ mask = E1000_SWFW_PHY3_SM;
+
+ return hw->mac.ops.acquire_swfw_sync(hw, mask);
+}
+
+/**
+ * e1000_release_phy_82575 - Release rights to access PHY
+ * @hw: pointer to the HW structure
+ *
+ * A wrapper to release access rights to the correct PHY.
+ **/
+STATIC void e1000_release_phy_82575(struct e1000_hw *hw)
+{
+ u16 mask = E1000_SWFW_PHY0_SM;
+
+ DEBUGFUNC("e1000_release_phy_82575");
+
+ if (hw->bus.func == E1000_FUNC_1)
+ mask = E1000_SWFW_PHY1_SM;
+ else if (hw->bus.func == E1000_FUNC_2)
+ mask = E1000_SWFW_PHY2_SM;
+ else if (hw->bus.func == E1000_FUNC_3)
+ mask = E1000_SWFW_PHY3_SM;
+
+ hw->mac.ops.release_swfw_sync(hw, mask);
+}
+
+/**
+ * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset using the serial gigabit media independent
+ * interface and stores the retrieved information in data.
+ **/
+STATIC s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+ u16 *data)
+{
+ s32 ret_val = -E1000_ERR_PARAM;
+
+ DEBUGFUNC("e1000_read_phy_reg_sgmii_82575");
+
+ if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
+ DEBUGOUT1("PHY Address %u is out of range\n", offset);
+ goto out;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_read_phy_reg_i2c(hw, offset, data);
+
+ hw->phy.ops.release(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes the data to PHY register at the offset using the serial gigabit
+ * media independent interface.
+ **/
+STATIC s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+ u16 data)
+{
+ s32 ret_val = -E1000_ERR_PARAM;
+
+ DEBUGFUNC("e1000_write_phy_reg_sgmii_82575");
+
+ if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
+ DEBUGOUT1("PHY Address %d is out of range\n", offset);
+ goto out;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_write_phy_reg_i2c(hw, offset, data);
+
+ hw->phy.ops.release(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_get_phy_id_82575 - Retrieve PHY addr and id
+ * @hw: pointer to the HW structure
+ *
+ * Retrieves the PHY address and ID for both PHY's which do and do not use
+ * sgmi interface.
+ **/
+STATIC s32 e1000_get_phy_id_82575(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+ u16 phy_id;
+ u32 ctrl_ext;
+ u32 mdic;
+
+ DEBUGFUNC("e1000_get_phy_id_82575");
+
+ /* some i354 devices need an extra read for phy id */
+ if (hw->mac.type == e1000_i354)
+ e1000_get_phy_id(hw);
+
+ /*
+ * For SGMII PHYs, we try the list of possible addresses until
+ * we find one that works. For non-SGMII PHYs
+ * (e.g. integrated copper PHYs), an address of 1 should
+ * work. The result of this function should mean phy->phy_addr
+ * and phy->id are set correctly.
+ */
+ if (!e1000_sgmii_active_82575(hw)) {
+ phy->addr = 1;
+ ret_val = e1000_get_phy_id(hw);
+ goto out;
+ }
+
+ if (e1000_sgmii_uses_mdio_82575(hw)) {
+ switch (hw->mac.type) {
+ case e1000_82575:
+ case e1000_82576:
+ mdic = E1000_READ_REG(hw, E1000_MDIC);
+ mdic &= E1000_MDIC_PHY_MASK;
+ phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
+ break;
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ case e1000_i210:
+ case e1000_i211:
+ mdic = E1000_READ_REG(hw, E1000_MDICNFG);
+ mdic &= E1000_MDICNFG_PHY_MASK;
+ phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ break;
+ }
+ ret_val = e1000_get_phy_id(hw);
+ goto out;
+ }
+
+ /* Power on sgmii phy if it is disabled */
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT,
+ ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(300);
+
+ /*
+ * The address field in the I2CCMD register is 3 bits and 0 is invalid.
+ * Therefore, we need to test 1-7
+ */
+ for (phy->addr = 1; phy->addr < 8; phy->addr++) {
+ ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
+ if (ret_val == E1000_SUCCESS) {
+ DEBUGOUT2("Vendor ID 0x%08X read at address %u\n",
+ phy_id, phy->addr);
+ /*
+ * At the time of this writing, The M88 part is
+ * the only supported SGMII PHY product.
+ */
+ if (phy_id == M88_VENDOR)
+ break;
+ } else {
+ DEBUGOUT1("PHY address %u was unreadable\n",
+ phy->addr);
+ }
+ }
+
+ /* A valid PHY type couldn't be found. */
+ if (phy->addr == 8) {
+ phy->addr = 0;
+ ret_val = -E1000_ERR_PHY;
+ } else {
+ ret_val = e1000_get_phy_id(hw);
+ }
+
+ /* restore previous sfp cage power state */
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset
+ * @hw: pointer to the HW structure
+ *
+ * Resets the PHY using the serial gigabit media independent interface.
+ **/
+STATIC s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ struct e1000_phy_info *phy = &hw->phy;
+
+ DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575");
+
+ /*
+ * This isn't a true "hard" reset, but is the only reset
+ * available to us at this time.
+ */
+
+ DEBUGOUT("Soft resetting SGMII attached PHY...\n");
+
+ if (!(hw->phy.ops.write_reg))
+ goto out;
+
+ /*
+ * SFP documentation requires the following to configure the SPF module
+ * to work on SGMII. No further documentation is given.
+ */
+ ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
+ if (ret_val)
+ goto out;
+
+ ret_val = hw->phy.ops.commit(hw);
+ if (ret_val)
+ goto out;
+
+ if (phy->id == M88E1512_E_PHY_ID)
+ ret_val = e1000_initialize_M88E1512_phy(hw);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU D0 state according to the active flag. When
+ * activating LPLU this function also disables smart speed
+ * and vice versa. LPLU will not be activated unless the
+ * device autonegotiation advertisement meets standards of
+ * either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+STATIC s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+ u16 data;
+
+ DEBUGFUNC("e1000_set_d0_lplu_state_82575");
+
+ if (!(hw->phy.ops.read_reg))
+ goto out;
+
+ ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+ if (ret_val)
+ goto out;
+
+ if (active) {
+ data |= IGP02E1000_PM_D0_LPLU;
+ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ data);
+ if (ret_val)
+ goto out;
+
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ goto out;
+ } else {
+ data &= ~IGP02E1000_PM_D0_LPLU;
+ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ data);
+ /*
+ * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ goto out;
+
+ data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ goto out;
+ } else if (phy->smart_speed == e1000_smart_speed_off) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ goto out;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ goto out;
+ }
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU D0 state according to the active flag. When
+ * activating LPLU this function also disables smart speed
+ * and vice versa. LPLU will not be activated unless the
+ * device autonegotiation advertisement meets standards of
+ * either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+STATIC s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+ u32 data;
+
+ DEBUGFUNC("e1000_set_d0_lplu_state_82580");
+
+ data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
+
+ if (active) {
+ data |= E1000_82580_PM_D0_LPLU;
+
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ data &= ~E1000_82580_PM_SPD;
+ } else {
+ data &= ~E1000_82580_PM_D0_LPLU;
+
+ /*
+ * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on)
+ data |= E1000_82580_PM_SPD;
+ else if (phy->smart_speed == e1000_smart_speed_off)
+ data &= ~E1000_82580_PM_SPD;
+ }
+
+ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data);
+ return ret_val;
+}
+
+/**
+ * e1000_set_d3_lplu_state_82580 - Sets low power link up state for D3
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * The low power link up (lplu) state is set to the power management level D3
+ * and SmartSpeed is disabled when active is true, else clear lplu for D3
+ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
+ * is used during Dx states where the power conservation is most important.
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained.
+ **/
+s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+ u32 data;
+
+ DEBUGFUNC("e1000_set_d3_lplu_state_82580");
+
+ data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
+
+ if (!active) {
+ data &= ~E1000_82580_PM_D3_LPLU;
+ /*
+ * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on)
+ data |= E1000_82580_PM_SPD;
+ else if (phy->smart_speed == e1000_smart_speed_off)
+ data &= ~E1000_82580_PM_SPD;
+ } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+ (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+ (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+ data |= E1000_82580_PM_D3_LPLU;
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ data &= ~E1000_82580_PM_SPD;
+ }
+
+ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data);
+ return ret_val;
+}
+
+/**
+ * e1000_acquire_nvm_82575 - Request for access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the necessary semaphores for exclusive access to the EEPROM.
+ * Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ * Return successful if access grant bit set, else clear the request for
+ * EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+STATIC s32 e1000_acquire_nvm_82575(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_acquire_nvm_82575");
+
+ ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
+ if (ret_val)
+ goto out;
+
+ /*
+ * Check if there is some access
+ * error this access may hook on
+ */
+ if (hw->mac.type == e1000_i350) {
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+ if (eecd & (E1000_EECD_BLOCKED | E1000_EECD_ABORT |
+ E1000_EECD_TIMEOUT)) {
+ /* Clear all access error flags */
+ E1000_WRITE_REG(hw, E1000_EECD, eecd |
+ E1000_EECD_ERROR_CLR);
+ DEBUGOUT("Nvm bit banging access error detected and cleared.\n");
+ }
+ }
+ if (hw->mac.type == e1000_82580) {
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+ if (eecd & E1000_EECD_BLOCKED) {
+ /* Clear access error flag */
+ E1000_WRITE_REG(hw, E1000_EECD, eecd |
+ E1000_EECD_BLOCKED);
+ DEBUGOUT("Nvm bit banging access error detected and cleared.\n");
+ }
+ }
+
+
+ ret_val = e1000_acquire_nvm_generic(hw);
+ if (ret_val)
+ e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_release_nvm_82575 - Release exclusive access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Stop any current commands to the EEPROM and clear the EEPROM request bit,
+ * then release the semaphores acquired.
+ **/
+STATIC void e1000_release_nvm_82575(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_release_nvm_82575");
+
+ e1000_release_nvm_generic(hw);
+
+ e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
+}
+
+/**
+ * e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
+ * will also specify which port we're acquiring the lock for.
+ **/
+STATIC s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+ u32 swmask = mask;
+ u32 fwmask = mask << 16;
+ s32 ret_val = E1000_SUCCESS;
+ s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
+
+ DEBUGFUNC("e1000_acquire_swfw_sync_82575");
+
+ while (i < timeout) {
+ if (e1000_get_hw_semaphore_generic(hw)) {
+ ret_val = -E1000_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+ if (!(swfw_sync & (fwmask | swmask)))
+ break;
+
+ /*
+ * Firmware currently using resource (fwmask)
+ * or other software thread using resource (swmask)
+ */
+ e1000_put_hw_semaphore_generic(hw);
+ msec_delay_irq(5);
+ i++;
+ }
+
+ if (i == timeout) {
+ DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
+ ret_val = -E1000_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ swfw_sync |= swmask;
+ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+ e1000_put_hw_semaphore_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_release_swfw_sync_82575 - Release SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Release the SW/FW semaphore used to access the PHY or NVM. The mask
+ * will also specify which port we're releasing the lock for.
+ **/
+STATIC void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+
+ DEBUGFUNC("e1000_release_swfw_sync_82575");
+
+ while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS)
+ ; /* Empty */
+
+ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+ swfw_sync &= ~mask;
+ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+ e1000_put_hw_semaphore_generic(hw);
+}
+
+/**
+ * e1000_get_cfg_done_82575 - Read config done bit
+ * @hw: pointer to the HW structure
+ *
+ * Read the management control register for the config done bit for
+ * completion status. NOTE: silicon which is EEPROM-less will fail trying
+ * to read the config done bit, so an error is *ONLY* logged and returns
+ * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon
+ * would not be able to be reset or change link.
+ **/
+STATIC s32 e1000_get_cfg_done_82575(struct e1000_hw *hw)
+{
+ s32 timeout = PHY_CFG_TIMEOUT;
+ s32 ret_val = E1000_SUCCESS;
+ u32 mask = E1000_NVM_CFG_DONE_PORT_0;
+
+ DEBUGFUNC("e1000_get_cfg_done_82575");
+
+ if (hw->bus.func == E1000_FUNC_1)
+ mask = E1000_NVM_CFG_DONE_PORT_1;
+ else if (hw->bus.func == E1000_FUNC_2)
+ mask = E1000_NVM_CFG_DONE_PORT_2;
+ else if (hw->bus.func == E1000_FUNC_3)
+ mask = E1000_NVM_CFG_DONE_PORT_3;
+ while (timeout) {
+ if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask)
+ break;
+ msec_delay(1);
+ timeout--;
+ }
+ if (!timeout)
+ DEBUGOUT("MNG configuration cycle has not completed.\n");
+
+ /* If EEPROM is not marked present, init the PHY manually */
+ if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
+ (hw->phy.type == e1000_phy_igp_3))
+ e1000_phy_init_script_igp3(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_link_up_info_82575 - Get link speed/duplex info
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+ *
+ * This is a wrapper function, if using the serial gigabit media independent
+ * interface, use PCS to retrieve the link speed and duplex information.
+ * Otherwise, use the generic function to get the link speed and duplex info.
+ **/
+STATIC s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_get_link_up_info_82575");
+
+ if (hw->phy.media_type != e1000_media_type_copper)
+ ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed,
+ duplex);
+ else
+ ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed,
+ duplex);
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_link_82575 - Check for link
+ * @hw: pointer to the HW structure
+ *
+ * If sgmii is enabled, then use the pcs register to determine link, otherwise
+ * use the generic interface for determining link.
+ **/
+STATIC s32 e1000_check_for_link_82575(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 speed, duplex;
+
+ DEBUGFUNC("e1000_check_for_link_82575");
+
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed,
+ &duplex);
+ /*
+ * Use this flag to determine if link needs to be checked or
+ * not. If we have link clear the flag so that we do not
+ * continue to check for link.
+ */
+ hw->mac.get_link_status = !hw->mac.serdes_has_link;
+
+ /*
+ * Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ if (ret_val)
+ DEBUGOUT("Error configuring flow control\n");
+ } else {
+ ret_val = e1000_check_for_copper_link_generic(hw);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_link_media_swap - Check which M88E1112 interface linked
+ * @hw: pointer to the HW structure
+ *
+ * Poll the M88E1112 interfaces to see which interface achieved link.
+ */
+STATIC s32 e1000_check_for_link_media_swap(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ u8 port = 0;
+
+ DEBUGFUNC("e1000_check_for_link_media_swap");
+
+ /* Check the copper medium. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (data & E1000_M88E1112_STATUS_LINK)
+ port = E1000_MEDIA_PORT_COPPER;
+
+ /* Check the other medium. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ /* reset page to 0 */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+ if (ret_val)
+ return ret_val;
+
+ if (data & E1000_M88E1112_STATUS_LINK)
+ port = E1000_MEDIA_PORT_OTHER;
+
+ /* Determine if a swap needs to happen. */
+ if (port && (hw->dev_spec._82575.media_port != port)) {
+ hw->dev_spec._82575.media_port = port;
+ hw->dev_spec._82575.media_changed = true;
+ } else {
+ ret_val = e1000_check_for_link_82575(hw);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_power_up_serdes_link_82575 - Power up the serdes link after shutdown
+ * @hw: pointer to the HW structure
+ **/
+STATIC void e1000_power_up_serdes_link_82575(struct e1000_hw *hw)
+{
+ u32 reg;
+
+ DEBUGFUNC("e1000_power_up_serdes_link_82575");
+
+ if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
+ !e1000_sgmii_active_82575(hw))
+ return;
+
+ /* Enable PCS to turn on link */
+ reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
+ reg |= E1000_PCS_CFG_PCS_EN;
+ E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
+
+ /* Power up the laser */
+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ reg &= ~E1000_CTRL_EXT_SDP3_DATA;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+
+ /* flush the write to verify completion */
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(1);
+}
+
+/**
+ * e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+ *
+ * Using the physical coding sub-layer (PCS), retrieve the current speed and
+ * duplex, then store the values in the pointers provided.
+ **/
+STATIC s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
+ u16 *speed, u16 *duplex)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 pcs;
+ u32 status;
+
+ DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575");
+
+ /*
+ * Read the PCS Status register for link state. For non-copper mode,
+ * the status register is not accurate. The PCS status register is
+ * used instead.
+ */
+ pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT);
+
+ /*
+ * The link up bit determines when link is up on autoneg.
+ */
+ if (pcs & E1000_PCS_LSTS_LINK_OK) {
+ mac->serdes_has_link = true;
+
+ /* Detect and store PCS speed */
+ if (pcs & E1000_PCS_LSTS_SPEED_1000)
+ *speed = SPEED_1000;
+ else if (pcs & E1000_PCS_LSTS_SPEED_100)
+ *speed = SPEED_100;
+ else
+ *speed = SPEED_10;
+
+ /* Detect and store PCS duplex */
+ if (pcs & E1000_PCS_LSTS_DUPLEX_FULL)
+ *duplex = FULL_DUPLEX;
+ else
+ *duplex = HALF_DUPLEX;
+
+ /* Check if it is an I354 2.5Gb backplane connection. */
+ if (mac->type == e1000_i354) {
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if ((status & E1000_STATUS_2P5_SKU) &&
+ !(status & E1000_STATUS_2P5_SKU_OVER)) {
+ *speed = SPEED_2500;
+ *duplex = FULL_DUPLEX;
+ DEBUGOUT("2500 Mbs, ");
+ DEBUGOUT("Full Duplex\n");
+ }
+ }
+
+ } else {
+ mac->serdes_has_link = false;
+ *speed = 0;
+ *duplex = 0;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_shutdown_serdes_link_82575 - Remove link during power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of serdes shut down sfp and PCS on driver unload
+ * when management pass thru is not enabled.
+ **/
+void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw)
+{
+ u32 reg;
+
+ DEBUGFUNC("e1000_shutdown_serdes_link_82575");
+
+ if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
+ !e1000_sgmii_active_82575(hw))
+ return;
+
+ if (!e1000_enable_mng_pass_thru(hw)) {
+ /* Disable PCS to turn off link */
+ reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
+ reg &= ~E1000_PCS_CFG_PCS_EN;
+ E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
+
+ /* shutdown the laser */
+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ reg |= E1000_CTRL_EXT_SDP3_DATA;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+
+ /* flush the write to verify completion */
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(1);
+ }
+
+ return;
+}
+
+/**
+ * e1000_reset_hw_82575 - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+ * This resets the hardware into a known state.
+ **/
+STATIC s32 e1000_reset_hw_82575(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_reset_hw_82575");
+
+ /*
+ * Prevent the PCI-E bus from sticking if there is no TLP connection
+ * on the last TLP read/write transaction when MAC is reset.
+ */
+ ret_val = e1000_disable_pcie_master_generic(hw);
+ if (ret_val)
+ DEBUGOUT("PCI-E Master disable polling has failed.\n");
+
+ /* set the completion timeout for interface */
+ ret_val = e1000_set_pcie_completion_timeout(hw);
+ if (ret_val)
+ DEBUGOUT("PCI-E Set completion timeout has failed.\n");
+
+ DEBUGOUT("Masking off all interrupts\n");
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+
+ E1000_WRITE_REG(hw, E1000_RCTL, 0);
+ E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+ E1000_WRITE_FLUSH(hw);
+
+ msec_delay(10);
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ DEBUGOUT("Issuing a global reset to MAC\n");
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+
+ ret_val = e1000_get_auto_rd_done_generic(hw);
+ if (ret_val) {
+ /*
+ * When auto config read does not complete, do not
+ * return with an error. This can happen in situations
+ * where there is no eeprom and prevents getting link.
+ */
+ DEBUGOUT("Auto Read Done did not complete\n");
+ }
+
+ /* If EEPROM is not present, run manual init scripts */
+ if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES))
+ e1000_reset_init_script_82575(hw);
+
+ /* Clear any pending interrupt events. */
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+ E1000_READ_REG(hw, E1000_ICR);
+
+ /* Install any alternate MAC address into RAR0 */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_hw_82575 - Initialize hardware
+ * @hw: pointer to the HW structure
+ *
+ * This inits the hardware readying it for operation.
+ **/
+s32 e1000_init_hw_82575(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ u16 i, rar_count = mac->rar_entry_count;
+
+ DEBUGFUNC("e1000_init_hw_82575");
+
+ /* Initialize identification LED */
+ ret_val = mac->ops.id_led_init(hw);
+ if (ret_val) {
+ DEBUGOUT("Error initializing identification LED\n");
+ /* This is not fatal and we should not stop init due to this */
+ }
+
+ /* Disabling VLAN filtering */
+ DEBUGOUT("Initializing the IEEE VLAN\n");
+ mac->ops.clear_vfta(hw);
+
+ /* Setup the receive address */
+ e1000_init_rx_addrs_generic(hw, rar_count);
+
+ /* Zero out the Multicast HASH table */
+ DEBUGOUT("Zeroing the MTA\n");
+ for (i = 0; i < mac->mta_reg_count; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+ /* Zero out the Unicast HASH table */
+ DEBUGOUT("Zeroing the UTA\n");
+ for (i = 0; i < mac->uta_reg_count; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0);
+
+ /* Setup link and flow control */
+ ret_val = mac->ops.setup_link(hw);
+
+ /* Set the default MTU size */
+ hw->dev_spec._82575.mtu = 1500;
+
+ /*
+ * Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ e1000_clear_hw_cntrs_82575(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_setup_copper_link_82575 - Configure copper link settings
+ * @hw: pointer to the HW structure
+ *
+ * Configures the link for auto-neg or forced speed and duplex. Then we check
+ * for link, once link is established calls to configure collision distance
+ * and flow control are called.
+ **/
+STATIC s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ u32 phpm_reg;
+
+ DEBUGFUNC("e1000_setup_copper_link_82575");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ /* Clear Go Link Disconnect bit on supported devices */
+ switch (hw->mac.type) {
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i210:
+ case e1000_i211:
+ phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
+ phpm_reg &= ~E1000_82580_PM_GO_LINKD;
+ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
+ break;
+ default:
+ break;
+ }
+
+ ret_val = e1000_setup_serdes_link_82575(hw);
+ if (ret_val)
+ goto out;
+
+ if (e1000_sgmii_active_82575(hw)) {
+ /* allow time for SFP cage time to power up phy */
+ msec_delay(300);
+
+ ret_val = hw->phy.ops.reset(hw);
+ if (ret_val) {
+ DEBUGOUT("Error resetting the PHY.\n");
+ goto out;
+ }
+ }
+ switch (hw->phy.type) {
+ case e1000_phy_i210:
+ case e1000_phy_m88:
+ switch (hw->phy.id) {
+ case I347AT4_E_PHY_ID:
+ case M88E1112_E_PHY_ID:
+ case M88E1340M_E_PHY_ID:
+ case M88E1543_E_PHY_ID:
+ case M88E1512_E_PHY_ID:
+ case I210_I_PHY_ID:
+ ret_val = e1000_copper_link_setup_m88_gen2(hw);
+ break;
+ default:
+ ret_val = e1000_copper_link_setup_m88(hw);
+ break;
+ }
+ break;
+ case e1000_phy_igp_3:
+ ret_val = e1000_copper_link_setup_igp(hw);
+ break;
+ case e1000_phy_82580:
+ ret_val = e1000_copper_link_setup_82577(hw);
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ break;
+ }
+
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_setup_copper_link_generic(hw);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_setup_serdes_link_82575 - Setup link for serdes
+ * @hw: pointer to the HW structure
+ *
+ * Configure the physical coding sub-layer (PCS) link. The PCS link is
+ * used on copper connections where the serialized gigabit media independent
+ * interface (sgmii), or serdes fiber is being used. Configures the link
+ * for auto-negotiation or forces speed/duplex.
+ **/
+STATIC s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw)
+{
+ u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
+ bool pcs_autoneg;
+ s32 ret_val = E1000_SUCCESS;
+ u16 data;
+
+ DEBUGFUNC("e1000_setup_serdes_link_82575");
+
+ if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
+ !e1000_sgmii_active_82575(hw))
+ return ret_val;
+
+ /*
+ * On the 82575, SerDes loopback mode persists until it is
+ * explicitly turned off or a power cycle is performed. A read to
+ * the register does not indicate its status. Therefore, we ensure
+ * loopback mode is disabled during initialization.
+ */
+ E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
+
+ /* power on the sfp cage if present */
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+
+ ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl_reg |= E1000_CTRL_SLU;
+
+ /* set both sw defined pins on 82575/82576*/
+ if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576)
+ ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
+
+ reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
+
+ /* default pcs_autoneg to the same setting as mac autoneg */
+ pcs_autoneg = hw->mac.autoneg;
+
+ switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
+ case E1000_CTRL_EXT_LINK_MODE_SGMII:
+ /* sgmii mode lets the phy handle forcing speed/duplex */
+ pcs_autoneg = true;
+ /* autoneg time out should be disabled for SGMII mode */
+ reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
+ break;
+ case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
+ /* disable PCS autoneg and support parallel detect only */
+ pcs_autoneg = false;
+ /* fall through to default case */
+ default:
+ if (hw->mac.type == e1000_82575 ||
+ hw->mac.type == e1000_82576) {
+ ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT)
+ pcs_autoneg = false;
+ }
+
+ /*
+ * non-SGMII modes only supports a speed of 1000/Full for the
+ * link so it is best to just force the MAC and let the pcs
+ * link either autoneg or be forced to 1000/Full
+ */
+ ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
+ E1000_CTRL_FD | E1000_CTRL_FRCDPX;
+
+ /* set speed of 1000/Full if speed/duplex is forced */
+ reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
+ break;
+ }
+
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
+
+ /*
+ * New SerDes mode allows for forcing speed or autonegotiating speed
+ * at 1gb. Autoneg should be default set by most drivers. This is the
+ * mode that will be compatible with older link partners and switches.
+ * However, both are supported by the hardware and some drivers/tools.
+ */
+ reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
+ E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
+
+ if (pcs_autoneg) {
+ /* Set PCS register for autoneg */
+ reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
+ E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
+
+ /* Disable force flow control for autoneg */
+ reg &= ~E1000_PCS_LCTL_FORCE_FCTRL;
+
+ /* Configure flow control advertisement for autoneg */
+ anadv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV);
+ anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE);
+
+ switch (hw->fc.requested_mode) {
+ case e1000_fc_full:
+ case e1000_fc_rx_pause:
+ anadv_reg |= E1000_TXCW_ASM_DIR;
+ anadv_reg |= E1000_TXCW_PAUSE;
+ break;
+ case e1000_fc_tx_pause:
+ anadv_reg |= E1000_TXCW_ASM_DIR;
+ break;
+ default:
+ break;
+ }
+
+ E1000_WRITE_REG(hw, E1000_PCS_ANADV, anadv_reg);
+
+ DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
+ } else {
+ /* Set PCS register for forced link */
+ reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
+
+ /* Force flow control for forced link */
+ reg |= E1000_PCS_LCTL_FORCE_FCTRL;
+
+ DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
+ }
+
+ E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
+
+ if (!pcs_autoneg && !e1000_sgmii_active_82575(hw))
+ e1000_force_mac_fc_generic(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_media_type_82575 - derives current media type.
+ * @hw: pointer to the HW structure
+ *
+ * The media type is chosen reflecting few settings.
+ * The following are taken into account:
+ * - link mode set in the current port Init Control Word #3
+ * - current link mode settings in CSR register
+ * - MDIO vs. I2C PHY control interface chosen
+ * - SFP module media type
+ **/
+STATIC s32 e1000_get_media_type_82575(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+ s32 ret_val = E1000_SUCCESS;
+ u32 ctrl_ext = 0;
+ u32 link_mode = 0;
+
+ /* Set internal phy as default */
+ dev_spec->sgmii_active = false;
+ dev_spec->module_plugged = false;
+
+ /* Get CSR setting */
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+
+ /* extract link mode setting */
+ link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK;
+
+ switch (link_mode) {
+ case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+ break;
+ case E1000_CTRL_EXT_LINK_MODE_GMII:
+ hw->phy.media_type = e1000_media_type_copper;
+ break;
+ case E1000_CTRL_EXT_LINK_MODE_SGMII:
+ /* Get phy control interface type set (MDIO vs. I2C)*/
+ if (e1000_sgmii_uses_mdio_82575(hw)) {
+ hw->phy.media_type = e1000_media_type_copper;
+ dev_spec->sgmii_active = true;
+ break;
+ }
+ /* fall through for I2C based SGMII */
+ case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
+ /* read media type from SFP EEPROM */
+ ret_val = e1000_set_sfp_media_type_82575(hw);
+ if ((ret_val != E1000_SUCCESS) ||
+ (hw->phy.media_type == e1000_media_type_unknown)) {
+ /*
+ * If media type was not identified then return media
+ * type defined by the CTRL_EXT settings.
+ */
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+
+ if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) {
+ hw->phy.media_type = e1000_media_type_copper;
+ dev_spec->sgmii_active = true;
+ }
+
+ break;
+ }
+
+ /* do not change link mode for 100BaseFX */
+ if (dev_spec->eth_flags.e100_base_fx)
+ break;
+
+ /* change current link mode setting */
+ ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
+
+ if (hw->phy.media_type == e1000_media_type_copper)
+ ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII;
+ else
+ ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+
+ break;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_set_sfp_media_type_82575 - derives SFP module media type.
+ * @hw: pointer to the HW structure
+ *
+ * The media type is chosen based on SFP module.
+ * compatibility flags retrieved from SFP ID EEPROM.
+ **/
+STATIC s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_ERR_CONFIG;
+ u32 ctrl_ext = 0;
+ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+ struct sfp_e1000_flags *eth_flags = &dev_spec->eth_flags;
+ u8 tranceiver_type = 0;
+ s32 timeout = 3;
+
+ /* Turn I2C interface ON and power on sfp cage */
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA);
+
+ E1000_WRITE_FLUSH(hw);
+
+ /* Read SFP module data */
+ while (timeout) {
+ ret_val = e1000_read_sfp_data_byte(hw,
+ E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET),
+ &tranceiver_type);
+ if (ret_val == E1000_SUCCESS)
+ break;
+ msec_delay(100);
+ timeout--;
+ }
+ if (ret_val != E1000_SUCCESS)
+ goto out;
+
+ ret_val = e1000_read_sfp_data_byte(hw,
+ E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET),
+ (u8 *)eth_flags);
+ if (ret_val != E1000_SUCCESS)
+ goto out;
+
+ /* Check if there is some SFP module plugged and powered */
+ if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) ||
+ (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) {
+ dev_spec->module_plugged = true;
+ if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+ } else if (eth_flags->e100_base_fx) {
+ dev_spec->sgmii_active = true;
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+ } else if (eth_flags->e1000_base_t) {
+ dev_spec->sgmii_active = true;
+ hw->phy.media_type = e1000_media_type_copper;
+ } else {
+ hw->phy.media_type = e1000_media_type_unknown;
+ DEBUGOUT("PHY module has not been recognized\n");
+ goto out;
+ }
+ } else {
+ hw->phy.media_type = e1000_media_type_unknown;
+ }
+ ret_val = E1000_SUCCESS;
+out:
+ /* Restore I2C interface setting */
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ return ret_val;
+}
+
+/**
+ * e1000_valid_led_default_82575 - Verify a valid default LED config
+ * @hw: pointer to the HW structure
+ * @data: pointer to the NVM (EEPROM)
+ *
+ * Read the EEPROM for the current default LED configuration. If the
+ * LED configuration is not valid, set to a valid LED configuration.
+ **/
+STATIC s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_valid_led_default_82575");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+
+ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
+ switch (hw->phy.media_type) {
+ case e1000_media_type_internal_serdes:
+ *data = ID_LED_DEFAULT_82575_SERDES;
+ break;
+ case e1000_media_type_copper:
+ default:
+ *data = ID_LED_DEFAULT;
+ break;
+ }
+ }
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_sgmii_active_82575 - Return sgmii state
+ * @hw: pointer to the HW structure
+ *
+ * 82575 silicon has a serialized gigabit media independent interface (sgmii)
+ * which can be enabled for use in the embedded applications. Simply
+ * return the current state of the sgmii interface.
+ **/
+STATIC bool e1000_sgmii_active_82575(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+ return dev_spec->sgmii_active;
+}
+
+/**
+ * e1000_reset_init_script_82575 - Inits HW defaults after reset
+ * @hw: pointer to the HW structure
+ *
+ * Inits recommended HW defaults after a reset when there is no EEPROM
+ * detected. This is only for the 82575.
+ **/
+STATIC s32 e1000_reset_init_script_82575(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_reset_init_script_82575");
+
+ if (hw->mac.type == e1000_82575) {
+ DEBUGOUT("Running reset init script for 82575\n");
+ /* SerDes configuration via SERDESCTRL */
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x00, 0x0C);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x01, 0x78);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x1B, 0x23);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x23, 0x15);
+
+ /* CCM configuration via CCMCTL register */
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x14, 0x00);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x10, 0x00);
+
+ /* PCIe lanes configuration */
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x00, 0xEC);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x61, 0xDF);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x34, 0x05);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x2F, 0x81);
+
+ /* PCIe PLL Configuration */
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x02, 0x47);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x14, 0x00);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x10, 0x00);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_mac_addr_82575 - Read device MAC address
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_read_mac_addr_82575(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_read_mac_addr_82575");
+
+ /*
+ * If there's an alternate MAC address place it in RAR0
+ * so that it will override the Si installed default perm
+ * address.
+ */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_read_mac_addr_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_config_collision_dist_82575 - Configure collision distance
+ * @hw: pointer to the HW structure
+ *
+ * Configures the collision distance to the default value and is used
+ * during link setup.
+ **/
+STATIC void e1000_config_collision_dist_82575(struct e1000_hw *hw)
+{
+ u32 tctl_ext;
+
+ DEBUGFUNC("e1000_config_collision_dist_82575");
+
+ tctl_ext = E1000_READ_REG(hw, E1000_TCTL_EXT);
+
+ tctl_ext &= ~E1000_TCTL_EXT_COLD;
+ tctl_ext |= E1000_COLLISION_DISTANCE << E1000_TCTL_EXT_COLD_SHIFT;
+
+ E1000_WRITE_REG(hw, E1000_TCTL_EXT, tctl_ext);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ * e1000_power_down_phy_copper_82575 - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+STATIC void e1000_power_down_phy_copper_82575(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+
+ if (!(phy->ops.check_reset_block))
+ return;
+
+ /* If the management interface is not enabled, then power down */
+ if (!(e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw)))
+ e1000_power_down_phy_copper(hw);
+
+ return;
+}
+
+/**
+ * e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the hardware counters by reading the counter registers.
+ **/
+STATIC void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_clear_hw_cntrs_82575");
+
+ e1000_clear_hw_cntrs_base_generic(hw);
+
+ E1000_READ_REG(hw, E1000_PRC64);
+ E1000_READ_REG(hw, E1000_PRC127);
+ E1000_READ_REG(hw, E1000_PRC255);
+ E1000_READ_REG(hw, E1000_PRC511);
+ E1000_READ_REG(hw, E1000_PRC1023);
+ E1000_READ_REG(hw, E1000_PRC1522);
+ E1000_READ_REG(hw, E1000_PTC64);
+ E1000_READ_REG(hw, E1000_PTC127);
+ E1000_READ_REG(hw, E1000_PTC255);
+ E1000_READ_REG(hw, E1000_PTC511);
+ E1000_READ_REG(hw, E1000_PTC1023);
+ E1000_READ_REG(hw, E1000_PTC1522);
+
+ E1000_READ_REG(hw, E1000_ALGNERRC);
+ E1000_READ_REG(hw, E1000_RXERRC);
+ E1000_READ_REG(hw, E1000_TNCRS);
+ E1000_READ_REG(hw, E1000_CEXTERR);
+ E1000_READ_REG(hw, E1000_TSCTC);
+ E1000_READ_REG(hw, E1000_TSCTFC);
+
+ E1000_READ_REG(hw, E1000_MGTPRC);
+ E1000_READ_REG(hw, E1000_MGTPDC);
+ E1000_READ_REG(hw, E1000_MGTPTC);
+
+ E1000_READ_REG(hw, E1000_IAC);
+ E1000_READ_REG(hw, E1000_ICRXOC);
+
+ E1000_READ_REG(hw, E1000_ICRXPTC);
+ E1000_READ_REG(hw, E1000_ICRXATC);
+ E1000_READ_REG(hw, E1000_ICTXPTC);
+ E1000_READ_REG(hw, E1000_ICTXATC);
+ E1000_READ_REG(hw, E1000_ICTXQEC);
+ E1000_READ_REG(hw, E1000_ICTXQMTC);
+ E1000_READ_REG(hw, E1000_ICRXDMTC);
+
+ E1000_READ_REG(hw, E1000_CBTMPC);
+ E1000_READ_REG(hw, E1000_HTDPMC);
+ E1000_READ_REG(hw, E1000_CBRMPC);
+ E1000_READ_REG(hw, E1000_RPTHC);
+ E1000_READ_REG(hw, E1000_HGPTC);
+ E1000_READ_REG(hw, E1000_HTCBDPC);
+ E1000_READ_REG(hw, E1000_HGORCL);
+ E1000_READ_REG(hw, E1000_HGORCH);
+ E1000_READ_REG(hw, E1000_HGOTCL);
+ E1000_READ_REG(hw, E1000_HGOTCH);
+ E1000_READ_REG(hw, E1000_LENERRS);
+
+ /* This register should not be read in copper configurations */
+ if ((hw->phy.media_type == e1000_media_type_internal_serdes) ||
+ e1000_sgmii_active_82575(hw))
+ E1000_READ_REG(hw, E1000_SCVPC);
+}
+
+/**
+ * e1000_rx_fifo_flush_82575 - Clean rx fifo after Rx enable
+ * @hw: pointer to the HW structure
+ *
+ * After rx enable if managability is enabled then there is likely some
+ * bad data at the start of the fifo and possibly in the DMA fifo. This
+ * function clears the fifos and flushes any packets that came in as rx was
+ * being enabled.
+ **/
+void e1000_rx_fifo_flush_82575(struct e1000_hw *hw)
+{
+ u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
+ int i, ms_wait;
+
+ DEBUGFUNC("e1000_rx_fifo_workaround_82575");
+ if (hw->mac.type != e1000_82575 ||
+ !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN))
+ return;
+
+ /* Disable all Rx queues */
+ for (i = 0; i < 4; i++) {
+ rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
+ E1000_WRITE_REG(hw, E1000_RXDCTL(i),
+ rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
+ }
+ /* Poll all queues to verify they have shut down */
+ for (ms_wait = 0; ms_wait < 10; ms_wait++) {
+ msec_delay(1);
+ rx_enabled = 0;
+ for (i = 0; i < 4; i++)
+ rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i));
+ if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
+ break;
+ }
+
+ if (ms_wait == 10)
+ DEBUGOUT("Queue disable timed out after 10ms\n");
+
+ /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
+ * incoming packets are rejected. Set enable and wait 2ms so that
+ * any packet that was coming in as RCTL.EN was set is flushed
+ */
+ rfctl = E1000_READ_REG(hw, E1000_RFCTL);
+ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
+
+ rlpml = E1000_READ_REG(hw, E1000_RLPML);
+ E1000_WRITE_REG(hw, E1000_RLPML, 0);
+
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
+ temp_rctl |= E1000_RCTL_LPE;
+
+ E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl);
+ E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN);
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(2);
+
+ /* Enable Rx queues that were previously enabled and restore our
+ * previous state
+ */
+ for (i = 0; i < 4; i++)
+ E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ E1000_WRITE_FLUSH(hw);
+
+ E1000_WRITE_REG(hw, E1000_RLPML, rlpml);
+ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
+
+ /* Flush receive errors generated by workaround */
+ E1000_READ_REG(hw, E1000_ROC);
+ E1000_READ_REG(hw, E1000_RNBC);
+ E1000_READ_REG(hw, E1000_MPC);
+}
+
+/**
+ * e1000_set_pcie_completion_timeout - set pci-e completion timeout
+ * @hw: pointer to the HW structure
+ *
+ * The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
+ * however the hardware default for these parts is 500us to 1ms which is less
+ * than the 10ms recommended by the pci-e spec. To address this we need to
+ * increase the value to either 10ms to 200ms for capability version 1 config,
+ * or 16ms to 55ms for version 2.
+ **/
+STATIC s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw)
+{
+ u32 gcr = E1000_READ_REG(hw, E1000_GCR);
+ s32 ret_val = E1000_SUCCESS;
+ u16 pcie_devctl2;
+
+ /* only take action if timeout value is defaulted to 0 */
+ if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
+ goto out;
+
+ /*
+ * if capababilities version is type 1 we can write the
+ * timeout of 10ms to 200ms through the GCR register
+ */
+ if (!(gcr & E1000_GCR_CAP_VER2)) {
+ gcr |= E1000_GCR_CMPL_TMOUT_10ms;
+ goto out;
+ }
+
+ /*
+ * for version 2 capabilities we need to write the config space
+ * directly in order to set the completion timeout value for
+ * 16ms to 55ms
+ */
+ ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
+ &pcie_devctl2);
+ if (ret_val)
+ goto out;
+
+ pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
+
+ ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
+ &pcie_devctl2);
+out:
+ /* disable completion timeout resend */
+ gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
+
+ E1000_WRITE_REG(hw, E1000_GCR, gcr);
+ return ret_val;
+}
+
+/**
+ * e1000_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
+ * @hw: pointer to the hardware struct
+ * @enable: state to enter, either enabled or disabled
+ * @pf: Physical Function pool - do not set anti-spoofing for the PF
+ *
+ * enables/disables L2 switch anti-spoofing functionality.
+ **/
+void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
+{
+ u32 reg_val, reg_offset;
+
+ switch (hw->mac.type) {
+ case e1000_82576:
+ reg_offset = E1000_DTXSWC;
+ break;
+ case e1000_i350:
+ case e1000_i354:
+ reg_offset = E1000_TXSWC;
+ break;
+ default:
+ return;
+ }
+
+ reg_val = E1000_READ_REG(hw, reg_offset);
+ if (enable) {
+ reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK |
+ E1000_DTXSWC_VLAN_SPOOF_MASK);
+ /* The PF can spoof - it has to in order to
+ * support emulation mode NICs
+ */
+ reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
+ } else {
+ reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
+ E1000_DTXSWC_VLAN_SPOOF_MASK);
+ }
+ E1000_WRITE_REG(hw, reg_offset, reg_val);
+}
+
+/**
+ * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback
+ * @hw: pointer to the hardware struct
+ * @enable: state to enter, either enabled or disabled
+ *
+ * enables/disables L2 switch loopback functionality.
+ **/
+void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
+{
+ u32 dtxswc;
+
+ switch (hw->mac.type) {
+ case e1000_82576:
+ dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
+ if (enable)
+ dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+ else
+ dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+ E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
+ break;
+ case e1000_i350:
+ case e1000_i354:
+ dtxswc = E1000_READ_REG(hw, E1000_TXSWC);
+ if (enable)
+ dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+ else
+ dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+ E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc);
+ break;
+ default:
+ /* Currently no other hardware supports loopback */
+ break;
+ }
+
+
+}
+
+/**
+ * e1000_vmdq_set_replication_pf - enable or disable vmdq replication
+ * @hw: pointer to the hardware struct
+ * @enable: state to enter, either enabled or disabled
+ *
+ * enables/disables replication of packets across multiple pools.
+ **/
+void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
+{
+ u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
+
+ if (enable)
+ vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
+ else
+ vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
+
+ E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
+}
+
+/**
+ * e1000_read_phy_reg_82580 - Read 82580 MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the MDI control register in the PHY at offset and stores the
+ * information read to data.
+ **/
+STATIC s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_read_phy_reg_82580");
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_read_phy_reg_mdic(hw, offset, data);
+
+ hw->phy.ops.release(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_write_phy_reg_82580 - Write 82580 MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write to register at offset
+ *
+ * Writes data to MDI control register in the PHY at offset.
+ **/
+STATIC s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_write_phy_reg_82580");
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_write_phy_reg_mdic(hw, offset, data);
+
+ hw->phy.ops.release(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
+ * @hw: pointer to the HW structure
+ *
+ * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
+ * the values found in the EEPROM. This addresses an issue in which these
+ * bits are not restored from EEPROM after reset.
+ **/
+STATIC s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u32 mdicnfg;
+ u16 nvm_data = 0;
+
+ DEBUGFUNC("e1000_reset_mdicnfg_82580");
+
+ if (hw->mac.type != e1000_82580)
+ goto out;
+ if (!e1000_sgmii_active_82575(hw))
+ goto out;
+
+ ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
+ NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
+ &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+
+ mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG);
+ if (nvm_data & NVM_WORD24_EXT_MDIO)
+ mdicnfg |= E1000_MDICNFG_EXT_MDIO;
+ if (nvm_data & NVM_WORD24_COM_MDIO)
+ mdicnfg |= E1000_MDICNFG_COM_MDIO;
+ E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_reset_hw_82580 - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+ * This resets function or entire device (all ports, etc.)
+ * to a known state.
+ **/
+STATIC s32 e1000_reset_hw_82580(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ /* BH SW mailbox bit in SW_FW_SYNC */
+ u16 swmbsw_mask = E1000_SW_SYNCH_MB;
+ u32 ctrl;
+ bool global_device_reset = hw->dev_spec._82575.global_device_reset;
+
+ DEBUGFUNC("e1000_reset_hw_82580");
+
+ hw->dev_spec._82575.global_device_reset = false;
+
+ /* 82580 does not reliably do global_device_reset due to hw errata */
+ if (hw->mac.type == e1000_82580)
+ global_device_reset = false;
+
+ /* Get current control state. */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ /*
+ * Prevent the PCI-E bus from sticking if there is no TLP connection
+ * on the last TLP read/write transaction when MAC is reset.
+ */
+ ret_val = e1000_disable_pcie_master_generic(hw);
+ if (ret_val)
+ DEBUGOUT("PCI-E Master disable polling has failed.\n");
+
+ DEBUGOUT("Masking off all interrupts\n");
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+ E1000_WRITE_REG(hw, E1000_RCTL, 0);
+ E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+ E1000_WRITE_FLUSH(hw);
+
+ msec_delay(10);
+
+ /* Determine whether or not a global dev reset is requested */
+ if (global_device_reset && hw->mac.ops.acquire_swfw_sync(hw,
+ swmbsw_mask))
+ global_device_reset = false;
+
+ if (global_device_reset && !(E1000_READ_REG(hw, E1000_STATUS) &
+ E1000_STAT_DEV_RST_SET))
+ ctrl |= E1000_CTRL_DEV_RST;
+ else
+ ctrl |= E1000_CTRL_RST;
+
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ E1000_WRITE_FLUSH(hw);
+
+ /* Add delay to insure DEV_RST has time to complete */
+ if (global_device_reset)
+ msec_delay(5);
+
+ ret_val = e1000_get_auto_rd_done_generic(hw);
+ if (ret_val) {
+ /*
+ * When auto config read does not complete, do not
+ * return with an error. This can happen in situations
+ * where there is no eeprom and prevents getting link.
+ */
+ DEBUGOUT("Auto Read Done did not complete\n");
+ }
+
+ /* clear global device reset status bit */
+ E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET);
+
+ /* Clear any pending interrupt events. */
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+ E1000_READ_REG(hw, E1000_ICR);
+
+ ret_val = e1000_reset_mdicnfg_82580(hw);
+ if (ret_val)
+ DEBUGOUT("Could not reset MDICNFG based on EEPROM\n");
+
+ /* Install any alternate MAC address into RAR0 */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+
+ /* Release semaphore */
+ if (global_device_reset)
+ hw->mac.ops.release_swfw_sync(hw, swmbsw_mask);
+
+ return ret_val;
+}
+
+/**
+ * e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual Rx PBA size
+ * @data: data received by reading RXPBS register
+ *
+ * The 82580 uses a table based approach for packet buffer allocation sizes.
+ * This function converts the retrieved value into the correct table value
+ * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
+ * 0x0 36 72 144 1 2 4 8 16
+ * 0x8 35 70 140 rsv rsv rsv rsv rsv
+ */
+u16 e1000_rxpbs_adjust_82580(u32 data)
+{
+ u16 ret_val = 0;
+
+ if (data < E1000_82580_RXPBS_TABLE_SIZE)
+ ret_val = e1000_82580_rxpbs_table[data];
+
+ return ret_val;
+}
+
+/**
+ * e1000_validate_nvm_checksum_with_offset - Validate EEPROM
+ * checksum
+ * @hw: pointer to the HW structure
+ * @offset: offset in words of the checksum protected region
+ *
+ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ * and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ DEBUGFUNC("e1000_validate_nvm_checksum_with_offset");
+
+ for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
+ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+ checksum += nvm_data;
+ }
+
+ if (checksum != (u16) NVM_SUM) {
+ DEBUGOUT("NVM Checksum Invalid\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_update_nvm_checksum_with_offset - Update EEPROM
+ * checksum
+ * @hw: pointer to the HW structure
+ * @offset: offset in words of the checksum protected region
+ *
+ * Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ * up to the checksum. Then calculates the EEPROM checksum and writes the
+ * value to the EEPROM.
+ **/
+s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
+{
+ s32 ret_val;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ DEBUGFUNC("e1000_update_nvm_checksum_with_offset");
+
+ for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
+ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error while updating checksum.\n");
+ goto out;
+ }
+ checksum += nvm_data;
+ }
+ checksum = (u16) NVM_SUM - checksum;
+ ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
+ &checksum);
+ if (ret_val)
+ DEBUGOUT("NVM Write Error while updating checksum.\n");
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_validate_nvm_checksum_82580 - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM section checksum by reading/adding each word of
+ * the EEPROM and then verifies that the sum of the EEPROM is
+ * equal to 0xBABA.
+ **/
+STATIC s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 eeprom_regions_count = 1;
+ u16 j, nvm_data;
+ u16 nvm_offset;
+
+ DEBUGFUNC("e1000_validate_nvm_checksum_82580");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+
+ if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
+ /* if chekcsums compatibility bit is set validate checksums
+ * for all 4 ports. */
+ eeprom_regions_count = 4;
+ }
+
+ for (j = 0; j < eeprom_regions_count; j++) {
+ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+ ret_val = e1000_validate_nvm_checksum_with_offset(hw,
+ nvm_offset);
+ if (ret_val != E1000_SUCCESS)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_update_nvm_checksum_82580 - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM section checksums for all 4 ports by reading/adding
+ * each word of the EEPROM up to the checksum. Then calculates the EEPROM
+ * checksum and writes the value to the EEPROM.
+ **/
+STATIC s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 j, nvm_data;
+ u16 nvm_offset;
+
+ DEBUGFUNC("e1000_update_nvm_checksum_82580");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error while updating checksum compatibility bit.\n");
+ goto out;
+ }
+
+ if (!(nvm_data & NVM_COMPATIBILITY_BIT_MASK)) {
+ /* set compatibility bit to validate checksums appropriately */
+ nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
+ ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
+ &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Write Error while updating checksum compatibility bit.\n");
+ goto out;
+ }
+ }
+
+ for (j = 0; j < 4; j++) {
+ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+ ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset);
+ if (ret_val)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_validate_nvm_checksum_i350 - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM section checksum by reading/adding each word of
+ * the EEPROM and then verifies that the sum of the EEPROM is
+ * equal to 0xBABA.
+ **/
+STATIC s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 j;
+ u16 nvm_offset;
+
+ DEBUGFUNC("e1000_validate_nvm_checksum_i350");
+
+ for (j = 0; j < 4; j++) {
+ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+ ret_val = e1000_validate_nvm_checksum_with_offset(hw,
+ nvm_offset);
+ if (ret_val != E1000_SUCCESS)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_update_nvm_checksum_i350 - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM section checksums for all 4 ports by reading/adding
+ * each word of the EEPROM up to the checksum. Then calculates the EEPROM
+ * checksum and writes the value to the EEPROM.
+ **/
+STATIC s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 j;
+ u16 nvm_offset;
+
+ DEBUGFUNC("e1000_update_nvm_checksum_i350");
+
+ for (j = 0; j < 4; j++) {
+ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+ ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset);
+ if (ret_val != E1000_SUCCESS)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * __e1000_access_emi_reg - Read/write EMI register
+ * @hw: pointer to the HW structure
+ * @addr: EMI address to program
+ * @data: pointer to value to read/write from/to the EMI address
+ * @read: boolean flag to indicate read or write
+ **/
+STATIC s32 __e1000_access_emi_reg(struct e1000_hw *hw, u16 address,
+ u16 *data, bool read)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("__e1000_access_emi_reg");
+
+ ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
+ if (ret_val)
+ return ret_val;
+
+ if (read)
+ ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data);
+ else
+ ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_emi_reg - Read Extended Management Interface register
+ * @hw: pointer to the HW structure
+ * @addr: EMI address to program
+ * @data: value to be read from the EMI address
+ **/
+s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
+{
+ DEBUGFUNC("e1000_read_emi_reg");
+
+ return __e1000_access_emi_reg(hw, addr, data, true);
+}
+
+/**
+ * e1000_initialize_M88E1512_phy - Initialize M88E1512 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Initialize Marverl 1512 to work correctly with Avoton.
+ **/
+s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_initialize_M88E1512_phy");
+
+ /* Check if this is correct PHY. */
+ if (phy->id != M88E1512_E_PHY_ID)
+ goto out;
+
+ /* Switch to PHY page 0xFF. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xCC0C);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159);
+ if (ret_val)
+ goto out;
+
+ /* Switch to PHY page 0xFB. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x000D);
+ if (ret_val)
+ goto out;
+
+ /* Switch to PHY page 0x12. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12);
+ if (ret_val)
+ goto out;
+
+ /* Change mode to SGMII-to-Copper */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001);
+ if (ret_val)
+ goto out;
+
+ /* Return the PHY to page 0. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.commit(hw);
+ if (ret_val) {
+ DEBUGOUT("Error committing the PHY changes\n");
+ return ret_val;
+ }
+
+ msec_delay(1000);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_set_eee_i350 - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ *
+ * Enable/disable EEE based on setting in dev_spec structure.
+ *
+ **/
+s32 e1000_set_eee_i350(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u32 ipcnfg, eeer;
+
+ DEBUGFUNC("e1000_set_eee_i350");
+
+ if ((hw->mac.type < e1000_i350) ||
+ (hw->phy.media_type != e1000_media_type_copper))
+ goto out;
+ ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG);
+ eeer = E1000_READ_REG(hw, E1000_EEER);
+
+ /* enable or disable per user setting */
+ if (!(hw->dev_spec._82575.eee_disable)) {
+ u32 eee_su = E1000_READ_REG(hw, E1000_EEE_SU);
+
+ ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
+ eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
+ E1000_EEER_LPI_FC);
+
+ /* This bit should not be set in normal operation. */
+ if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
+ DEBUGOUT("LPI Clock Stop Bit should not be set!\n");
+ } else {
+ ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
+ eeer &= ~(E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
+ E1000_EEER_LPI_FC);
+ }
+ E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg);
+ E1000_WRITE_REG(hw, E1000_EEER, eeer);
+ E1000_READ_REG(hw, E1000_IPCNFG);
+ E1000_READ_REG(hw, E1000_EEER);
+out:
+
+ return ret_val;
+}
+
+/**
+ * e1000_set_eee_i354 - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ *
+ * Enable/disable EEE legacy mode based on setting in dev_spec structure.
+ *
+ **/
+s32 e1000_set_eee_i354(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_set_eee_i354");
+
+ if ((hw->phy.media_type != e1000_media_type_copper) ||
+ ((phy->id != M88E1543_E_PHY_ID) &&
+ (phy->id != M88E1512_E_PHY_ID)))
+ goto out;
+
+ if (!hw->dev_spec._82575.eee_disable) {
+ /* Switch to PHY page 18. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1,
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data |= E1000_M88E1543_EEE_CTRL_1_MS;
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1,
+ phy_data);
+ if (ret_val)
+ goto out;
+
+ /* Return the PHY to page 0. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
+ if (ret_val)
+ goto out;
+
+ /* Turn on EEE advertisement. */
+ ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+ E1000_EEE_ADV_DEV_I354,
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data |= E1000_EEE_ADV_100_SUPPORTED |
+ E1000_EEE_ADV_1000_SUPPORTED;
+ ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+ E1000_EEE_ADV_DEV_I354,
+ phy_data);
+ } else {
+ /* Turn off EEE advertisement. */
+ ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+ E1000_EEE_ADV_DEV_I354,
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED |
+ E1000_EEE_ADV_1000_SUPPORTED);
+ ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+ E1000_EEE_ADV_DEV_I354,
+ phy_data);
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_get_eee_status_i354 - Get EEE status
+ * @hw: pointer to the HW structure
+ * @status: EEE status
+ *
+ * Get EEE status by guessing based on whether Tx or Rx LPI indications have
+ * been received.
+ **/
+s32 e1000_get_eee_status_i354(struct e1000_hw *hw, bool *status)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_get_eee_status_i354");
+
+ /* Check if EEE is supported on this device. */
+ if ((hw->phy.media_type != e1000_media_type_copper) ||
+ ((phy->id != M88E1543_E_PHY_ID) &&
+ (phy->id != M88E1512_E_PHY_ID)))
+ goto out;
+
+ ret_val = e1000_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
+ E1000_PCS_STATUS_DEV_I354,
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD |
+ E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false;
+
+out:
+ return ret_val;
+}
+
+/* Due to a hw errata, if the host tries to configure the VFTA register
+ * while performing queries from the BMC or DMA, then the VFTA in some
+ * cases won't be written.
+ */
+
+/**
+ * e1000_clear_vfta_i350 - Clear VLAN filter table
+ * @hw: pointer to the HW structure
+ *
+ * Clears the register array which contains the VLAN filter table by
+ * setting all the values to 0.
+ **/
+void e1000_clear_vfta_i350(struct e1000_hw *hw)
+{
+ u32 offset;
+ int i;
+
+ DEBUGFUNC("e1000_clear_vfta_350");
+
+ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+ for (i = 0; i < 10; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
+
+ E1000_WRITE_FLUSH(hw);
+ }
+}
+
+/**
+ * e1000_write_vfta_i350 - Write value to VLAN filter table
+ * @hw: pointer to the HW structure
+ * @offset: register offset in VLAN filter table
+ * @value: register value written to VLAN filter table
+ *
+ * Writes value at the given offset in the register array which stores
+ * the VLAN filter table.
+ **/
+void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
+{
+ int i;
+
+ DEBUGFUNC("e1000_write_vfta_350");
+
+ for (i = 0; i < 10; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
+
+ E1000_WRITE_FLUSH(hw);
+}
+
+
+/**
+ * e1000_set_i2c_bb - Enable I2C bit-bang
+ * @hw: pointer to the HW structure
+ *
+ * Enable I2C bit-bang interface
+ *
+ **/
+s32 e1000_set_i2c_bb(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u32 ctrl_ext, i2cparams;
+
+ DEBUGFUNC("e1000_set_i2c_bb");
+
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_I2C_ENA;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH(hw);
+
+ i2cparams = E1000_READ_REG(hw, E1000_I2CPARAMS);
+ i2cparams |= E1000_I2CBB_EN;
+ i2cparams |= E1000_I2C_DATA_OE_N;
+ i2cparams |= E1000_I2C_CLK_OE_N;
+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cparams);
+ E1000_WRITE_FLUSH(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_i2c_byte_generic - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @dev_addr: device address
+ * @data: value read
+ *
+ * Performs byte read operation over I2C interface at
+ * a specified device address.
+ **/
+s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ s32 status = E1000_SUCCESS;
+ u32 max_retry = 10;
+ u32 retry = 1;
+ u16 swfw_mask = 0;
+
+ bool nack = true;
+
+ DEBUGFUNC("e1000_read_i2c_byte_generic");
+
+ swfw_mask = E1000_SWFW_PHY0_SM;
+
+ do {
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)
+ != E1000_SUCCESS) {
+ status = E1000_ERR_SWFW_SYNC;
+ goto read_byte_out;
+ }
+
+ e1000_i2c_start(hw);
+
+ /* Device Address and write indication */
+ status = e1000_clock_out_i2c_byte(hw, dev_addr);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_get_i2c_ack(hw);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_clock_out_i2c_byte(hw, byte_offset);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_get_i2c_ack(hw);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ e1000_i2c_start(hw);
+
+ /* Device Address and read indication */
+ status = e1000_clock_out_i2c_byte(hw, (dev_addr | 0x1));
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_get_i2c_ack(hw);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_clock_in_i2c_byte(hw, data);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_clock_out_i2c_bit(hw, nack);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ e1000_i2c_stop(hw);
+ break;
+
+fail:
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ msec_delay(100);
+ e1000_i2c_bus_clear(hw);
+ retry++;
+ if (retry < max_retry)
+ DEBUGOUT("I2C byte read error - Retrying.\n");
+ else
+ DEBUGOUT("I2C byte read error.\n");
+
+ } while (retry < max_retry);
+
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+read_byte_out:
+
+ return status;
+}
+
+/**
+ * e1000_write_i2c_byte_generic - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @dev_addr: device address
+ * @data: value to write
+ *
+ * Performs byte write operation over I2C interface at
+ * a specified device address.
+ **/
+s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ s32 status = E1000_SUCCESS;
+ u32 max_retry = 1;
+ u32 retry = 0;
+ u16 swfw_mask = 0;
+
+ DEBUGFUNC("e1000_write_i2c_byte_generic");
+
+ swfw_mask = E1000_SWFW_PHY0_SM;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS) {
+ status = E1000_ERR_SWFW_SYNC;
+ goto write_byte_out;
+ }
+
+ do {
+ e1000_i2c_start(hw);
+
+ status = e1000_clock_out_i2c_byte(hw, dev_addr);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_get_i2c_ack(hw);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_clock_out_i2c_byte(hw, byte_offset);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_get_i2c_ack(hw);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_clock_out_i2c_byte(hw, data);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_get_i2c_ack(hw);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ e1000_i2c_stop(hw);
+ break;
+
+fail:
+ e1000_i2c_bus_clear(hw);
+ retry++;
+ if (retry < max_retry)
+ DEBUGOUT("I2C byte write error - Retrying.\n");
+ else
+ DEBUGOUT("I2C byte write error.\n");
+ } while (retry < max_retry);
+
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+write_byte_out:
+
+ return status;
+}
+
+/**
+ * e1000_i2c_start - Sets I2C start condition
+ * @hw: pointer to hardware structure
+ *
+ * Sets I2C start condition (High -> Low on SDA while SCL is High)
+ **/
+STATIC void e1000_i2c_start(struct e1000_hw *hw)
+{
+ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ DEBUGFUNC("e1000_i2c_start");
+
+ /* Start condition must begin with data and clock high */
+ e1000_set_i2c_data(hw, &i2cctl, 1);
+ e1000_raise_i2c_clk(hw, &i2cctl);
+
+ /* Setup time for start condition (4.7us) */
+ usec_delay(E1000_I2C_T_SU_STA);
+
+ e1000_set_i2c_data(hw, &i2cctl, 0);
+
+ /* Hold time for start condition (4us) */
+ usec_delay(E1000_I2C_T_HD_STA);
+
+ e1000_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us */
+ usec_delay(E1000_I2C_T_LOW);
+
+}
+
+/**
+ * e1000_i2c_stop - Sets I2C stop condition
+ * @hw: pointer to hardware structure
+ *
+ * Sets I2C stop condition (Low -> High on SDA while SCL is High)
+ **/
+STATIC void e1000_i2c_stop(struct e1000_hw *hw)
+{
+ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ DEBUGFUNC("e1000_i2c_stop");
+
+ /* Stop condition must begin with data low and clock high */
+ e1000_set_i2c_data(hw, &i2cctl, 0);
+ e1000_raise_i2c_clk(hw, &i2cctl);
+
+ /* Setup time for stop condition (4us) */
+ usec_delay(E1000_I2C_T_SU_STO);
+
+ e1000_set_i2c_data(hw, &i2cctl, 1);
+
+ /* bus free time between stop and start (4.7us)*/
+ usec_delay(E1000_I2C_T_BUF);
+}
+
+/**
+ * e1000_clock_in_i2c_byte - Clocks in one byte via I2C
+ * @hw: pointer to hardware structure
+ * @data: data byte to clock in
+ *
+ * Clocks in one byte data via I2C data/clock
+ **/
+STATIC s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data)
+{
+ s32 i;
+ bool bit = 0;
+
+ DEBUGFUNC("e1000_clock_in_i2c_byte");
+
+ *data = 0;
+ for (i = 7; i >= 0; i--) {
+ e1000_clock_in_i2c_bit(hw, &bit);
+ *data |= bit << i;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_clock_out_i2c_byte - Clocks out one byte via I2C
+ * @hw: pointer to hardware structure
+ * @data: data byte clocked out
+ *
+ * Clocks out one byte data via I2C data/clock
+ **/
+STATIC s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data)
+{
+ s32 status = E1000_SUCCESS;
+ s32 i;
+ u32 i2cctl;
+ bool bit = 0;
+
+ DEBUGFUNC("e1000_clock_out_i2c_byte");
+
+ for (i = 7; i >= 0; i--) {
+ bit = (data >> i) & 0x1;
+ status = e1000_clock_out_i2c_bit(hw, bit);
+
+ if (status != E1000_SUCCESS)
+ break;
+ }
+
+ /* Release SDA line (set high) */
+ i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ i2cctl |= E1000_I2C_DATA_OE_N;
+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl);
+ E1000_WRITE_FLUSH(hw);
+
+ return status;
+}
+
+/**
+ * e1000_get_i2c_ack - Polls for I2C ACK
+ * @hw: pointer to hardware structure
+ *
+ * Clocks in/out one bit via I2C data/clock
+ **/
+STATIC s32 e1000_get_i2c_ack(struct e1000_hw *hw)
+{
+ s32 status = E1000_SUCCESS;
+ u32 i = 0;
+ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+ u32 timeout = 10;
+ bool ack = true;
+
+ DEBUGFUNC("e1000_get_i2c_ack");
+
+ e1000_raise_i2c_clk(hw, &i2cctl);
+
+ /* Minimum high period of clock is 4us */
+ usec_delay(E1000_I2C_T_HIGH);
+
+ /* Wait until SCL returns high */
+ for (i = 0; i < timeout; i++) {
+ usec_delay(1);
+ i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+ if (i2cctl & E1000_I2C_CLK_IN)
+ break;
+ }
+ if (!(i2cctl & E1000_I2C_CLK_IN))
+ return E1000_ERR_I2C;
+
+ ack = e1000_get_i2c_data(&i2cctl);
+ if (ack) {
+ DEBUGOUT("I2C ack was not received.\n");
+ status = E1000_ERR_I2C;
+ }
+
+ e1000_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us */
+ usec_delay(E1000_I2C_T_LOW);
+
+ return status;
+}
+
+/**
+ * e1000_clock_in_i2c_bit - Clocks in one bit via I2C data/clock
+ * @hw: pointer to hardware structure
+ * @data: read data value
+ *
+ * Clocks in one bit via I2C data/clock
+ **/
+STATIC s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data)
+{
+ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ DEBUGFUNC("e1000_clock_in_i2c_bit");
+
+ e1000_raise_i2c_clk(hw, &i2cctl);
+
+ /* Minimum high period of clock is 4us */
+ usec_delay(E1000_I2C_T_HIGH);
+
+ i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+ *data = e1000_get_i2c_data(&i2cctl);
+
+ e1000_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us */
+ usec_delay(E1000_I2C_T_LOW);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock
+ * @hw: pointer to hardware structure
+ * @data: data value to write
+ *
+ * Clocks out one bit via I2C data/clock
+ **/
+STATIC s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data)
+{
+ s32 status;
+ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ DEBUGFUNC("e1000_clock_out_i2c_bit");
+
+ status = e1000_set_i2c_data(hw, &i2cctl, data);
+ if (status == E1000_SUCCESS) {
+ e1000_raise_i2c_clk(hw, &i2cctl);
+
+ /* Minimum high period of clock is 4us */
+ usec_delay(E1000_I2C_T_HIGH);
+
+ e1000_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us.
+ * This also takes care of the data hold time.
+ */
+ usec_delay(E1000_I2C_T_LOW);
+ } else {
+ status = E1000_ERR_I2C;
+ DEBUGOUT1("I2C data was not set to %X\n", data);
+ }
+
+ return status;
+}
+/**
+ * e1000_raise_i2c_clk - Raises the I2C SCL clock
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Raises the I2C clock line '0'->'1'
+ **/
+STATIC void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl)
+{
+ DEBUGFUNC("e1000_raise_i2c_clk");
+
+ *i2cctl |= E1000_I2C_CLK_OUT;
+ *i2cctl &= ~E1000_I2C_CLK_OE_N;
+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl);
+ E1000_WRITE_FLUSH(hw);
+
+ /* SCL rise time (1000ns) */
+ usec_delay(E1000_I2C_T_RISE);
+}
+
+/**
+ * e1000_lower_i2c_clk - Lowers the I2C SCL clock
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Lowers the I2C clock line '1'->'0'
+ **/
+STATIC void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl)
+{
+
+ DEBUGFUNC("e1000_lower_i2c_clk");
+
+ *i2cctl &= ~E1000_I2C_CLK_OUT;
+ *i2cctl &= ~E1000_I2C_CLK_OE_N;
+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl);
+ E1000_WRITE_FLUSH(hw);
+
+ /* SCL fall time (300ns) */
+ usec_delay(E1000_I2C_T_FALL);
+}
+
+/**
+ * e1000_set_i2c_data - Sets the I2C data bit
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ * @data: I2C data value (0 or 1) to set
+ *
+ * Sets the I2C data bit
+ **/
+STATIC s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data)
+{
+ s32 status = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_set_i2c_data");
+
+ if (data)
+ *i2cctl |= E1000_I2C_DATA_OUT;
+ else
+ *i2cctl &= ~E1000_I2C_DATA_OUT;
+
+ *i2cctl &= ~E1000_I2C_DATA_OE_N;
+ *i2cctl |= E1000_I2C_CLK_OE_N;
+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl);
+ E1000_WRITE_FLUSH(hw);
+
+ /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
+ usec_delay(E1000_I2C_T_RISE + E1000_I2C_T_FALL + E1000_I2C_T_SU_DATA);
+
+ *i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+ if (data != e1000_get_i2c_data(i2cctl)) {
+ status = E1000_ERR_I2C;
+ DEBUGOUT1("Error - I2C data was not set to %X.\n", data);
+ }
+
+ return status;
+}
+
+/**
+ * e1000_get_i2c_data - Reads the I2C SDA data bit
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Returns the I2C data bit value
+ **/
+STATIC bool e1000_get_i2c_data(u32 *i2cctl)
+{
+ bool data;
+
+ DEBUGFUNC("e1000_get_i2c_data");
+
+ if (*i2cctl & E1000_I2C_DATA_IN)
+ data = 1;
+ else
+ data = 0;
+
+ return data;
+}
+
+/**
+ * e1000_i2c_bus_clear - Clears the I2C bus
+ * @hw: pointer to hardware structure
+ *
+ * Clears the I2C bus by sending nine clock pulses.
+ * Used when data line is stuck low.
+ **/
+void e1000_i2c_bus_clear(struct e1000_hw *hw)
+{
+ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+ u32 i;
+
+ DEBUGFUNC("e1000_i2c_bus_clear");
+
+ e1000_i2c_start(hw);
+
+ e1000_set_i2c_data(hw, &i2cctl, 1);
+
+ for (i = 0; i < 9; i++) {
+ e1000_raise_i2c_clk(hw, &i2cctl);
+
+ /* Min high period of clock is 4us */
+ usec_delay(E1000_I2C_T_HIGH);
+
+ e1000_lower_i2c_clk(hw, &i2cctl);
+
+ /* Min low period of clock is 4.7us*/
+ usec_delay(E1000_I2C_T_LOW);
+ }
+
+ e1000_i2c_start(hw);
+
+ /* Put the i2c bus back to default state */
+ e1000_i2c_stop(hw);
+}
+
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82575.h b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82575.h
new file mode 100755
index 00000000..09b7bf2e
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_82575.h
@@ -0,0 +1,520 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_82575_H_
+#define _E1000_82575_H_
+
+#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
+ (ID_LED_DEF1_DEF2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_OFF1_ON2))
+/*
+ * Receive Address Register Count
+ * Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor.
+ * These entries are also used for MAC-based filtering.
+ */
+/*
+ * For 82576, there are an additional set of RARs that begin at an offset
+ * separate from the first set of RARs.
+ */
+#define E1000_RAR_ENTRIES_82575 16
+#define E1000_RAR_ENTRIES_82576 24
+#define E1000_RAR_ENTRIES_82580 24
+#define E1000_RAR_ENTRIES_I350 32
+#define E1000_SW_SYNCH_MB 0x00000100
+#define E1000_STAT_DEV_RST_SET 0x00100000
+#define E1000_CTRL_DEV_RST 0x20000000
+
+#ifdef E1000_BIT_FIELDS
+struct e1000_adv_data_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ union {
+ u32 data;
+ struct {
+ u32 datalen:16; /* Data buffer length */
+ u32 rsvd:4;
+ u32 dtyp:4; /* Descriptor type */
+ u32 dcmd:8; /* Descriptor command */
+ } config;
+ } lower;
+ union {
+ u32 data;
+ struct {
+ u32 status:4; /* Descriptor status */
+ u32 idx:4;
+ u32 popts:6; /* Packet Options */
+ u32 paylen:18; /* Payload length */
+ } options;
+ } upper;
+};
+
+#define E1000_TXD_DTYP_ADV_C 0x2 /* Advanced Context Descriptor */
+#define E1000_TXD_DTYP_ADV_D 0x3 /* Advanced Data Descriptor */
+#define E1000_ADV_TXD_CMD_DEXT 0x20 /* Descriptor extension (0 = legacy) */
+#define E1000_ADV_TUCMD_IPV4 0x2 /* IP Packet Type: 1=IPv4 */
+#define E1000_ADV_TUCMD_IPV6 0x0 /* IP Packet Type: 0=IPv6 */
+#define E1000_ADV_TUCMD_L4T_UDP 0x0 /* L4 Packet TYPE of UDP */
+#define E1000_ADV_TUCMD_L4T_TCP 0x4 /* L4 Packet TYPE of TCP */
+#define E1000_ADV_TUCMD_MKRREQ 0x10 /* Indicates markers are required */
+#define E1000_ADV_DCMD_EOP 0x1 /* End of Packet */
+#define E1000_ADV_DCMD_IFCS 0x2 /* Insert FCS (Ethernet CRC) */
+#define E1000_ADV_DCMD_RS 0x8 /* Report Status */
+#define E1000_ADV_DCMD_VLE 0x40 /* Add VLAN tag */
+#define E1000_ADV_DCMD_TSE 0x80 /* TCP Seg enable */
+/* Extended Device Control */
+#define E1000_CTRL_EXT_NSICR 0x00000001 /* Disable Intr Clear all on read */
+
+struct e1000_adv_context_desc {
+ union {
+ u32 ip_config;
+ struct {
+ u32 iplen:9;
+ u32 maclen:7;
+ u32 vlan_tag:16;
+ } fields;
+ } ip_setup;
+ u32 seq_num;
+ union {
+ u64 l4_config;
+ struct {
+ u32 mkrloc:9;
+ u32 tucmd:11;
+ u32 dtyp:4;
+ u32 adv:8;
+ u32 rsvd:4;
+ u32 idx:4;
+ u32 l4len:8;
+ u32 mss:16;
+ } fields;
+ } l4_setup;
+};
+#endif
+
+/* SRRCTL bit definitions */
+#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
+#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00
+#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
+#define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000
+#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000
+#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000
+#define E1000_SRRCTL_TIMESTAMP 0x40000000
+#define E1000_SRRCTL_DROP_EN 0x80000000
+
+#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F
+#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00
+
+#define E1000_TX_HEAD_WB_ENABLE 0x1
+#define E1000_TX_SEQNUM_WB_ENABLE 0x2
+
+#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002
+#define E1000_MRQC_ENABLE_VMDQ 0x00000003
+#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005
+#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
+#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
+#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
+#define E1000_MRQC_ENABLE_RSS_8Q 0x00000002
+
+#define E1000_VMRCTL_MIRROR_PORT_SHIFT 8
+#define E1000_VMRCTL_MIRROR_DSTPORT_MASK (7 << \
+ E1000_VMRCTL_MIRROR_PORT_SHIFT)
+#define E1000_VMRCTL_POOL_MIRROR_ENABLE (1 << 0)
+#define E1000_VMRCTL_UPLINK_MIRROR_ENABLE (1 << 1)
+#define E1000_VMRCTL_DOWNLINK_MIRROR_ENABLE (1 << 2)
+
+#define E1000_EICR_TX_QUEUE ( \
+ E1000_EICR_TX_QUEUE0 | \
+ E1000_EICR_TX_QUEUE1 | \
+ E1000_EICR_TX_QUEUE2 | \
+ E1000_EICR_TX_QUEUE3)
+
+#define E1000_EICR_RX_QUEUE ( \
+ E1000_EICR_RX_QUEUE0 | \
+ E1000_EICR_RX_QUEUE1 | \
+ E1000_EICR_RX_QUEUE2 | \
+ E1000_EICR_RX_QUEUE3)
+
+#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE
+#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE
+
+#define EIMS_ENABLE_MASK ( \
+ E1000_EIMS_RX_QUEUE | \
+ E1000_EIMS_TX_QUEUE | \
+ E1000_EIMS_TCP_TIMER | \
+ E1000_EIMS_OTHER)
+
+/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
+#define E1000_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
+#define E1000_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */
+#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
+#define E1000_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */
+#define E1000_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */
+#define E1000_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */
+#define E1000_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */
+#define E1000_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */
+#define E1000_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */
+#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */
+
+/* Receive Descriptor - Advanced */
+union e1000_adv_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ union {
+ __le32 data;
+ struct {
+ __le16 pkt_info; /*RSS type, Pkt type*/
+ /* Split Header, header buffer len */
+ __le16 hdr_info;
+ } hs_rss;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length; /* Packet length */
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+#define E1000_RXDADV_RSSTYPE_MASK 0x0000000F
+#define E1000_RXDADV_RSSTYPE_SHIFT 12
+#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
+#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
+#define E1000_RXDADV_SPLITHEADER_EN 0x00001000
+#define E1000_RXDADV_SPH 0x8000
+#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
+#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
+#define E1000_RXDADV_ERR_HBO 0x00800000
+
+/* RSS Hash results */
+#define E1000_RXDADV_RSSTYPE_NONE 0x00000000
+#define E1000_RXDADV_RSSTYPE_IPV4_TCP 0x00000001
+#define E1000_RXDADV_RSSTYPE_IPV4 0x00000002
+#define E1000_RXDADV_RSSTYPE_IPV6_TCP 0x00000003
+#define E1000_RXDADV_RSSTYPE_IPV6_EX 0x00000004
+#define E1000_RXDADV_RSSTYPE_IPV6 0x00000005
+#define E1000_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
+#define E1000_RXDADV_RSSTYPE_IPV4_UDP 0x00000007
+#define E1000_RXDADV_RSSTYPE_IPV6_UDP 0x00000008
+#define E1000_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
+
+/* RSS Packet Types as indicated in the receive descriptor */
+#define E1000_RXDADV_PKTTYPE_ILMASK 0x000000F0
+#define E1000_RXDADV_PKTTYPE_TLMASK 0x00000F00
+#define E1000_RXDADV_PKTTYPE_NONE 0x00000000
+#define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */
+#define E1000_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPV4 hdr + extensions */
+#define E1000_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPV6 hdr present */
+#define E1000_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPV6 hdr + extensions */
+#define E1000_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */
+#define E1000_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
+#define E1000_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
+#define E1000_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
+
+#define E1000_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */
+#define E1000_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */
+#define E1000_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */
+#define E1000_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */
+#define E1000_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */
+#define E1000_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */
+
+/* LinkSec results */
+/* Security Processing bit Indication */
+#define E1000_RXDADV_LNKSEC_STATUS_SECP 0x00020000
+#define E1000_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000
+#define E1000_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000
+#define E1000_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000
+#define E1000_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000
+
+#define E1000_RXDADV_IPSEC_STATUS_SECP 0x00020000
+#define E1000_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000
+#define E1000_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000
+#define E1000_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000
+#define E1000_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED 0x18000000
+
+/* Transmit Descriptor - Advanced */
+union e1000_adv_tx_desc {
+ struct {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le32 cmd_type_len;
+ __le32 olinfo_status;
+ } read;
+ struct {
+ __le64 rsvd; /* Reserved */
+ __le32 nxtseq_seed;
+ __le32 status;
+ } wb;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
+#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
+#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
+#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */
+#define E1000_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */
+#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
+#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
+#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+#define E1000_ADVTXD_MAC_LINKSEC 0x00040000 /* Apply LinkSec on pkt */
+#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp pkt */
+#define E1000_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED prsnt in WB */
+#define E1000_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
+#define E1000_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
+#define E1000_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
+#define E1000_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
+/* 1st & Last TSO-full iSCSI PDU*/
+#define E1000_ADVTXD_POPTS_ISCO_FULL 0x00001800
+#define E1000_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
+#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
+
+/* Context descriptors */
+struct e1000_adv_tx_context_desc {
+ __le32 vlan_macip_lens;
+ __le32 seqnum_seed;
+ __le32 type_tucmd_mlhl;
+ __le32 mss_l4len_idx;
+};
+
+#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
+#define E1000_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
+#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
+#define E1000_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
+#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
+#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
+#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
+#define E1000_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
+/* IPSec Encrypt Enable for ESP */
+#define E1000_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000
+/* Req requires Markers and CRC */
+#define E1000_ADVTXD_TUCMD_MKRREQ 0x00002000
+#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
+#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+/* Adv ctxt IPSec SA IDX mask */
+#define E1000_ADVTXD_IPSEC_SA_INDEX_MASK 0x000000FF
+/* Adv ctxt IPSec ESP len mask */
+#define E1000_ADVTXD_IPSEC_ESP_LEN_MASK 0x000000FF
+
+/* Additional Transmit Descriptor Control definitions */
+#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */
+#define E1000_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wbk flushing */
+/* Tx Queue Arbitration Priority 0=low, 1=high */
+#define E1000_TXDCTL_PRIORITY 0x08000000
+
+/* Additional Receive Descriptor Control definitions */
+#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */
+#define E1000_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. wbk flushing */
+
+/* Direct Cache Access (DCA) definitions */
+#define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */
+#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */
+
+#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
+#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
+
+#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
+#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
+#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header ena */
+#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload ena */
+#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx Desc Relax Order */
+
+#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
+#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
+#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
+#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
+
+#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
+#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */
+#define E1000_DCA_TXCTRL_CPUID_SHIFT_82576 24 /* Tx CPUID */
+#define E1000_DCA_RXCTRL_CPUID_SHIFT_82576 24 /* Rx CPUID */
+
+/* Additional interrupt register bit definitions */
+#define E1000_ICR_LSECPNS 0x00000020 /* PN threshold - server */
+#define E1000_IMS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */
+#define E1000_ICS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */
+
+/* ETQF register bit definitions */
+#define E1000_ETQF_FILTER_ENABLE (1 << 26)
+#define E1000_ETQF_IMM_INT (1 << 29)
+#define E1000_ETQF_1588 (1 << 30)
+#define E1000_ETQF_QUEUE_ENABLE (1 << 31)
+/*
+ * ETQF filter list: one static filter per filter consumer. This is
+ * to avoid filter collisions later. Add new filters
+ * here!!
+ *
+ * Current filters:
+ * EAPOL 802.1x (0x888e): Filter 0
+ */
+#define E1000_ETQF_FILTER_EAPOL 0
+
+#define E1000_FTQF_VF_BP 0x00008000
+#define E1000_FTQF_1588_TIME_STAMP 0x08000000
+#define E1000_FTQF_MASK 0xF0000000
+#define E1000_FTQF_MASK_PROTO_BP 0x10000000
+#define E1000_FTQF_MASK_SOURCE_ADDR_BP 0x20000000
+#define E1000_FTQF_MASK_DEST_ADDR_BP 0x40000000
+#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000
+
+#define E1000_NVM_APME_82575 0x0400
+#define MAX_NUM_VFS 7
+
+#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof cntrl */
+#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof cntrl */
+#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */
+#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
+#define E1000_DTXSWC_LLE_SHIFT 16
+#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */
+
+/* Easy defines for setting default pool, would normally be left a zero */
+#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
+#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
+
+/* Other useful VMD_CTL register defines */
+#define E1000_VT_CTL_IGNORE_MAC (1 << 28)
+#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29)
+#define E1000_VT_CTL_VM_REPL_EN (1 << 30)
+
+/* Per VM Offload register setup */
+#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
+#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */
+#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */
+#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */
+#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */
+#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */
+#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */
+#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */
+#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
+#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */
+
+#define E1000_VMOLR_VPE 0x00800000 /* VLAN promiscuous enable */
+#define E1000_VMOLR_UPE 0x20000000 /* Unicast promisuous enable */
+#define E1000_DVMOLR_HIDVLAN 0x20000000 /* Vlan hiding enable */
+#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
+#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */
+
+#define E1000_PBRWAC_WALPB 0x00000007 /* Wrap around event on LAN Rx PB */
+#define E1000_PBRWAC_PBE 0x00000008 /* Rx packet buffer empty */
+
+#define E1000_VLVF_ARRAY_SIZE 32
+#define E1000_VLVF_VLANID_MASK 0x00000FFF
+#define E1000_VLVF_POOLSEL_SHIFT 12
+#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT)
+#define E1000_VLVF_LVLAN 0x00100000
+#define E1000_VLVF_VLANID_ENABLE 0x80000000
+
+#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
+#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
+
+#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+
+#define E1000_IOVCTL 0x05BBC
+#define E1000_IOVCTL_REUSE_VFQ 0x00000001
+
+#define E1000_RPLOLR_STRVLAN 0x40000000
+#define E1000_RPLOLR_STRCRC 0x80000000
+
+#define E1000_TCTL_EXT_COLD 0x000FFC00
+#define E1000_TCTL_EXT_COLD_SHIFT 10
+
+#define E1000_DTXCTL_8023LL 0x0004
+#define E1000_DTXCTL_VLAN_ADDED 0x0008
+#define E1000_DTXCTL_OOS_ENABLE 0x0010
+#define E1000_DTXCTL_MDP_EN 0x0020
+#define E1000_DTXCTL_SPOOF_INT 0x0040
+
+#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14)
+
+#define ALL_QUEUES 0xFFFF
+
+/* Rx packet buffer size defines */
+#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F
+void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable);
+void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf);
+void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable);
+s32 e1000_init_nvm_params_82575(struct e1000_hw *hw);
+s32 e1000_init_hw_82575(struct e1000_hw *hw);
+
+enum e1000_promisc_type {
+ e1000_promisc_disabled = 0, /* all promisc modes disabled */
+ e1000_promisc_unicast = 1, /* unicast promiscuous enabled */
+ e1000_promisc_multicast = 2, /* multicast promiscuous enabled */
+ e1000_promisc_enabled = 3, /* both uni and multicast promisc */
+ e1000_num_promisc_types
+};
+
+void e1000_vfta_set_vf(struct e1000_hw *, u16, bool);
+void e1000_rlpml_set_vf(struct e1000_hw *, u16);
+s32 e1000_promisc_set_vf(struct e1000_hw *, enum e1000_promisc_type type);
+u16 e1000_rxpbs_adjust_82580(u32 data);
+s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data);
+s32 e1000_set_eee_i350(struct e1000_hw *);
+s32 e1000_set_eee_i354(struct e1000_hw *);
+s32 e1000_get_eee_status_i354(struct e1000_hw *, bool *);
+s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw);
+
+/* I2C SDA and SCL timing parameters for standard mode */
+#define E1000_I2C_T_HD_STA 4
+#define E1000_I2C_T_LOW 5
+#define E1000_I2C_T_HIGH 4
+#define E1000_I2C_T_SU_STA 5
+#define E1000_I2C_T_HD_DATA 5
+#define E1000_I2C_T_SU_DATA 1
+#define E1000_I2C_T_RISE 1
+#define E1000_I2C_T_FALL 1
+#define E1000_I2C_T_SU_STO 4
+#define E1000_I2C_T_BUF 5
+
+s32 e1000_set_i2c_bb(struct e1000_hw *hw);
+s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data);
+void e1000_i2c_bus_clear(struct e1000_hw *hw);
+#endif /* _E1000_82575_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_api.c b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_api.c
new file mode 100755
index 00000000..a0645651
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_api.c
@@ -0,0 +1,1357 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "e1000_api.h"
+
+/**
+ * e1000_init_mac_params - Initialize MAC function pointers
+ * @hw: pointer to the HW structure
+ *
+ * This function initializes the function pointers for the MAC
+ * set of functions. Called by drivers or by e1000_setup_init_funcs.
+ **/
+s32 e1000_init_mac_params(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ if (hw->mac.ops.init_params) {
+ ret_val = hw->mac.ops.init_params(hw);
+ if (ret_val) {
+ DEBUGOUT("MAC Initialization Error\n");
+ goto out;
+ }
+ } else {
+ DEBUGOUT("mac.init_mac_params was NULL\n");
+ ret_val = -E1000_ERR_CONFIG;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params - Initialize NVM function pointers
+ * @hw: pointer to the HW structure
+ *
+ * This function initializes the function pointers for the NVM
+ * set of functions. Called by drivers or by e1000_setup_init_funcs.
+ **/
+s32 e1000_init_nvm_params(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ if (hw->nvm.ops.init_params) {
+ ret_val = hw->nvm.ops.init_params(hw);
+ if (ret_val) {
+ DEBUGOUT("NVM Initialization Error\n");
+ goto out;
+ }
+ } else {
+ DEBUGOUT("nvm.init_nvm_params was NULL\n");
+ ret_val = -E1000_ERR_CONFIG;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_phy_params - Initialize PHY function pointers
+ * @hw: pointer to the HW structure
+ *
+ * This function initializes the function pointers for the PHY
+ * set of functions. Called by drivers or by e1000_setup_init_funcs.
+ **/
+s32 e1000_init_phy_params(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ if (hw->phy.ops.init_params) {
+ ret_val = hw->phy.ops.init_params(hw);
+ if (ret_val) {
+ DEBUGOUT("PHY Initialization Error\n");
+ goto out;
+ }
+ } else {
+ DEBUGOUT("phy.init_phy_params was NULL\n");
+ ret_val = -E1000_ERR_CONFIG;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_mbx_params - Initialize mailbox function pointers
+ * @hw: pointer to the HW structure
+ *
+ * This function initializes the function pointers for the PHY
+ * set of functions. Called by drivers or by e1000_setup_init_funcs.
+ **/
+s32 e1000_init_mbx_params(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ if (hw->mbx.ops.init_params) {
+ ret_val = hw->mbx.ops.init_params(hw);
+ if (ret_val) {
+ DEBUGOUT("Mailbox Initialization Error\n");
+ goto out;
+ }
+ } else {
+ DEBUGOUT("mbx.init_mbx_params was NULL\n");
+ ret_val = -E1000_ERR_CONFIG;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * device ID stored in the hw structure.
+ * MUST BE FIRST FUNCTION CALLED (explicitly or through
+ * e1000_setup_init_funcs()).
+ **/
+s32 e1000_set_mac_type(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_set_mac_type");
+
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82542:
+ mac->type = e1000_82542;
+ break;
+ case E1000_DEV_ID_82543GC_FIBER:
+ case E1000_DEV_ID_82543GC_COPPER:
+ mac->type = e1000_82543;
+ break;
+ case E1000_DEV_ID_82544EI_COPPER:
+ case E1000_DEV_ID_82544EI_FIBER:
+ case E1000_DEV_ID_82544GC_COPPER:
+ case E1000_DEV_ID_82544GC_LOM:
+ mac->type = e1000_82544;
+ break;
+ case E1000_DEV_ID_82540EM:
+ case E1000_DEV_ID_82540EM_LOM:
+ case E1000_DEV_ID_82540EP:
+ case E1000_DEV_ID_82540EP_LOM:
+ case E1000_DEV_ID_82540EP_LP:
+ mac->type = e1000_82540;
+ break;
+ case E1000_DEV_ID_82545EM_COPPER:
+ case E1000_DEV_ID_82545EM_FIBER:
+ mac->type = e1000_82545;
+ break;
+ case E1000_DEV_ID_82545GM_COPPER:
+ case E1000_DEV_ID_82545GM_FIBER:
+ case E1000_DEV_ID_82545GM_SERDES:
+ mac->type = e1000_82545_rev_3;
+ break;
+ case E1000_DEV_ID_82546EB_COPPER:
+ case E1000_DEV_ID_82546EB_FIBER:
+ case E1000_DEV_ID_82546EB_QUAD_COPPER:
+ mac->type = e1000_82546;
+ break;
+ case E1000_DEV_ID_82546GB_COPPER:
+ case E1000_DEV_ID_82546GB_FIBER:
+ case E1000_DEV_ID_82546GB_SERDES:
+ case E1000_DEV_ID_82546GB_PCIE:
+ case E1000_DEV_ID_82546GB_QUAD_COPPER:
+ case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+ mac->type = e1000_82546_rev_3;
+ break;
+ case E1000_DEV_ID_82541EI:
+ case E1000_DEV_ID_82541EI_MOBILE:
+ case E1000_DEV_ID_82541ER_LOM:
+ mac->type = e1000_82541;
+ break;
+ case E1000_DEV_ID_82541ER:
+ case E1000_DEV_ID_82541GI:
+ case E1000_DEV_ID_82541GI_LF:
+ case E1000_DEV_ID_82541GI_MOBILE:
+ mac->type = e1000_82541_rev_2;
+ break;
+ case E1000_DEV_ID_82547EI:
+ case E1000_DEV_ID_82547EI_MOBILE:
+ mac->type = e1000_82547;
+ break;
+ case E1000_DEV_ID_82547GI:
+ mac->type = e1000_82547_rev_2;
+ break;
+ case E1000_DEV_ID_82571EB_COPPER:
+ case E1000_DEV_ID_82571EB_FIBER:
+ case E1000_DEV_ID_82571EB_SERDES:
+ case E1000_DEV_ID_82571EB_SERDES_DUAL:
+ case E1000_DEV_ID_82571EB_SERDES_QUAD:
+ case E1000_DEV_ID_82571EB_QUAD_COPPER:
+ case E1000_DEV_ID_82571PT_QUAD_COPPER:
+ case E1000_DEV_ID_82571EB_QUAD_FIBER:
+ case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
+ mac->type = e1000_82571;
+ break;
+ case E1000_DEV_ID_82572EI:
+ case E1000_DEV_ID_82572EI_COPPER:
+ case E1000_DEV_ID_82572EI_FIBER:
+ case E1000_DEV_ID_82572EI_SERDES:
+ mac->type = e1000_82572;
+ break;
+ case E1000_DEV_ID_82573E:
+ case E1000_DEV_ID_82573E_IAMT:
+ case E1000_DEV_ID_82573L:
+ mac->type = e1000_82573;
+ break;
+ case E1000_DEV_ID_82574L:
+ case E1000_DEV_ID_82574LA:
+ mac->type = e1000_82574;
+ break;
+ case E1000_DEV_ID_82583V:
+ mac->type = e1000_82583;
+ break;
+ case E1000_DEV_ID_80003ES2LAN_COPPER_DPT:
+ case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
+ case E1000_DEV_ID_80003ES2LAN_COPPER_SPT:
+ case E1000_DEV_ID_80003ES2LAN_SERDES_SPT:
+ mac->type = e1000_80003es2lan;
+ break;
+ case E1000_DEV_ID_ICH8_IFE:
+ case E1000_DEV_ID_ICH8_IFE_GT:
+ case E1000_DEV_ID_ICH8_IFE_G:
+ case E1000_DEV_ID_ICH8_IGP_M:
+ case E1000_DEV_ID_ICH8_IGP_M_AMT:
+ case E1000_DEV_ID_ICH8_IGP_AMT:
+ case E1000_DEV_ID_ICH8_IGP_C:
+ case E1000_DEV_ID_ICH8_82567V_3:
+ mac->type = e1000_ich8lan;
+ break;
+ case E1000_DEV_ID_ICH9_IFE:
+ case E1000_DEV_ID_ICH9_IFE_GT:
+ case E1000_DEV_ID_ICH9_IFE_G:
+ case E1000_DEV_ID_ICH9_IGP_M:
+ case E1000_DEV_ID_ICH9_IGP_M_AMT:
+ case E1000_DEV_ID_ICH9_IGP_M_V:
+ case E1000_DEV_ID_ICH9_IGP_AMT:
+ case E1000_DEV_ID_ICH9_BM:
+ case E1000_DEV_ID_ICH9_IGP_C:
+ case E1000_DEV_ID_ICH10_R_BM_LM:
+ case E1000_DEV_ID_ICH10_R_BM_LF:
+ case E1000_DEV_ID_ICH10_R_BM_V:
+ mac->type = e1000_ich9lan;
+ break;
+ case E1000_DEV_ID_ICH10_D_BM_LM:
+ case E1000_DEV_ID_ICH10_D_BM_LF:
+ case E1000_DEV_ID_ICH10_D_BM_V:
+ mac->type = e1000_ich10lan;
+ break;
+ case E1000_DEV_ID_PCH_D_HV_DM:
+ case E1000_DEV_ID_PCH_D_HV_DC:
+ case E1000_DEV_ID_PCH_M_HV_LM:
+ case E1000_DEV_ID_PCH_M_HV_LC:
+ mac->type = e1000_pchlan;
+ break;
+ case E1000_DEV_ID_PCH2_LV_LM:
+ case E1000_DEV_ID_PCH2_LV_V:
+ mac->type = e1000_pch2lan;
+ break;
+ case E1000_DEV_ID_PCH_LPT_I217_LM:
+ case E1000_DEV_ID_PCH_LPT_I217_V:
+ case E1000_DEV_ID_PCH_LPTLP_I218_LM:
+ case E1000_DEV_ID_PCH_LPTLP_I218_V:
+ mac->type = e1000_pch_lpt;
+ break;
+ case E1000_DEV_ID_82575EB_COPPER:
+ case E1000_DEV_ID_82575EB_FIBER_SERDES:
+ case E1000_DEV_ID_82575GB_QUAD_COPPER:
+ mac->type = e1000_82575;
+ break;
+ case E1000_DEV_ID_82576:
+ case E1000_DEV_ID_82576_FIBER:
+ case E1000_DEV_ID_82576_SERDES:
+ case E1000_DEV_ID_82576_QUAD_COPPER:
+ case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
+ case E1000_DEV_ID_82576_NS:
+ case E1000_DEV_ID_82576_NS_SERDES:
+ case E1000_DEV_ID_82576_SERDES_QUAD:
+ mac->type = e1000_82576;
+ break;
+ case E1000_DEV_ID_82580_COPPER:
+ case E1000_DEV_ID_82580_FIBER:
+ case E1000_DEV_ID_82580_SERDES:
+ case E1000_DEV_ID_82580_SGMII:
+ case E1000_DEV_ID_82580_COPPER_DUAL:
+ case E1000_DEV_ID_82580_QUAD_FIBER:
+ case E1000_DEV_ID_DH89XXCC_SGMII:
+ case E1000_DEV_ID_DH89XXCC_SERDES:
+ case E1000_DEV_ID_DH89XXCC_BACKPLANE:
+ case E1000_DEV_ID_DH89XXCC_SFP:
+ mac->type = e1000_82580;
+ break;
+ case E1000_DEV_ID_I350_COPPER:
+ case E1000_DEV_ID_I350_FIBER:
+ case E1000_DEV_ID_I350_SERDES:
+ case E1000_DEV_ID_I350_SGMII:
+ case E1000_DEV_ID_I350_DA4:
+ mac->type = e1000_i350;
+ break;
+ case E1000_DEV_ID_I210_COPPER_FLASHLESS:
+ case E1000_DEV_ID_I210_SERDES_FLASHLESS:
+ case E1000_DEV_ID_I210_COPPER:
+ case E1000_DEV_ID_I210_COPPER_OEM1:
+ case E1000_DEV_ID_I210_COPPER_IT:
+ case E1000_DEV_ID_I210_FIBER:
+ case E1000_DEV_ID_I210_SERDES:
+ case E1000_DEV_ID_I210_SGMII:
+ mac->type = e1000_i210;
+ break;
+ case E1000_DEV_ID_I211_COPPER:
+ mac->type = e1000_i211;
+ break;
+ case E1000_DEV_ID_82576_VF:
+ case E1000_DEV_ID_82576_VF_HV:
+ mac->type = e1000_vfadapt;
+ break;
+ case E1000_DEV_ID_I350_VF:
+ case E1000_DEV_ID_I350_VF_HV:
+ mac->type = e1000_vfadapt_i350;
+ break;
+
+ case E1000_DEV_ID_I354_BACKPLANE_1GBPS:
+ case E1000_DEV_ID_I354_SGMII:
+ case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS:
+ mac->type = e1000_i354;
+ break;
+ default:
+ /* Should never have loaded on this device */
+ ret_val = -E1000_ERR_MAC_INIT;
+ break;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_setup_init_funcs - Initializes function pointers
+ * @hw: pointer to the HW structure
+ * @init_device: true will initialize the rest of the function pointers
+ * getting the device ready for use. false will only set
+ * MAC type and the function pointers for the other init
+ * functions. Passing false will not generate any hardware
+ * reads or writes.
+ *
+ * This function must be called by a driver in order to use the rest
+ * of the 'shared' code files. Called by drivers only.
+ **/
+s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device)
+{
+ s32 ret_val;
+
+ /* Can't do much good without knowing the MAC type. */
+ ret_val = e1000_set_mac_type(hw);
+ if (ret_val) {
+ DEBUGOUT("ERROR: MAC type could not be set properly.\n");
+ goto out;
+ }
+
+ if (!hw->hw_addr) {
+ DEBUGOUT("ERROR: Registers not mapped\n");
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+ /*
+ * Init function pointers to generic implementations. We do this first
+ * allowing a driver module to override it afterward.
+ */
+ e1000_init_mac_ops_generic(hw);
+ e1000_init_phy_ops_generic(hw);
+ e1000_init_nvm_ops_generic(hw);
+ e1000_init_mbx_ops_generic(hw);
+
+ /*
+ * Set up the init function pointers. These are functions within the
+ * adapter family file that sets up function pointers for the rest of
+ * the functions in that family.
+ */
+ switch (hw->mac.type) {
+ case e1000_82542:
+ e1000_init_function_pointers_82542(hw);
+ break;
+ case e1000_82543:
+ case e1000_82544:
+ e1000_init_function_pointers_82543(hw);
+ break;
+ case e1000_82540:
+ case e1000_82545:
+ case e1000_82545_rev_3:
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ e1000_init_function_pointers_82540(hw);
+ break;
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ e1000_init_function_pointers_82541(hw);
+ break;
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ e1000_init_function_pointers_82571(hw);
+ break;
+ case e1000_80003es2lan:
+ e1000_init_function_pointers_80003es2lan(hw);
+ break;
+ case e1000_ich8lan:
+ case e1000_ich9lan:
+ case e1000_ich10lan:
+ case e1000_pchlan:
+ case e1000_pch2lan:
+ case e1000_pch_lpt:
+ e1000_init_function_pointers_ich8lan(hw);
+ break;
+ case e1000_82575:
+ case e1000_82576:
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ e1000_init_function_pointers_82575(hw);
+ break;
+ case e1000_i210:
+ case e1000_i211:
+ e1000_init_function_pointers_i210(hw);
+ break;
+ case e1000_vfadapt:
+ e1000_init_function_pointers_vf(hw);
+ break;
+ case e1000_vfadapt_i350:
+ e1000_init_function_pointers_vf(hw);
+ break;
+ default:
+ DEBUGOUT("Hardware not supported\n");
+ ret_val = -E1000_ERR_CONFIG;
+ break;
+ }
+
+ /*
+ * Initialize the rest of the function pointers. These require some
+ * register reads/writes in some cases.
+ */
+ if (!(ret_val) && init_device) {
+ ret_val = e1000_init_mac_params(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_init_nvm_params(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_init_phy_params(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_init_mbx_params(hw);
+ if (ret_val)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_get_bus_info - Obtain bus information for adapter
+ * @hw: pointer to the HW structure
+ *
+ * This will obtain information about the HW bus for which the
+ * adapter is attached and stores it in the hw structure. This is a
+ * function pointer entry point called by drivers.
+ **/
+s32 e1000_get_bus_info(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.get_bus_info)
+ return hw->mac.ops.get_bus_info(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_clear_vfta - Clear VLAN filter table
+ * @hw: pointer to the HW structure
+ *
+ * This clears the VLAN filter table on the adapter. This is a function
+ * pointer entry point called by drivers.
+ **/
+void e1000_clear_vfta(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.clear_vfta)
+ hw->mac.ops.clear_vfta(hw);
+}
+
+/**
+ * e1000_write_vfta - Write value to VLAN filter table
+ * @hw: pointer to the HW structure
+ * @offset: the 32-bit offset in which to write the value to.
+ * @value: the 32-bit value to write at location offset.
+ *
+ * This writes a 32-bit value to a 32-bit offset in the VLAN filter
+ * table. This is a function pointer entry point called by drivers.
+ **/
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
+{
+ if (hw->mac.ops.write_vfta)
+ hw->mac.ops.write_vfta(hw, offset, value);
+}
+
+/**
+ * e1000_update_mc_addr_list - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ *
+ * Updates the Multicast Table Array.
+ * The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count)
+{
+ if (hw->mac.ops.update_mc_addr_list)
+ hw->mac.ops.update_mc_addr_list(hw, mc_addr_list,
+ mc_addr_count);
+}
+
+/**
+ * e1000_force_mac_fc - Force MAC flow control
+ * @hw: pointer to the HW structure
+ *
+ * Force the MAC's flow control settings. Currently no func pointer exists
+ * and all implementations are handled in the generic version of this
+ * function.
+ **/
+s32 e1000_force_mac_fc(struct e1000_hw *hw)
+{
+ return e1000_force_mac_fc_generic(hw);
+}
+
+/**
+ * e1000_check_for_link - Check/Store link connection
+ * @hw: pointer to the HW structure
+ *
+ * This checks the link condition of the adapter and stores the
+ * results in the hw->mac structure. This is a function pointer entry
+ * point called by drivers.
+ **/
+s32 e1000_check_for_link(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.check_for_link)
+ return hw->mac.ops.check_for_link(hw);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_check_mng_mode - Check management mode
+ * @hw: pointer to the HW structure
+ *
+ * This checks if the adapter has manageability enabled.
+ * This is a function pointer entry point called by drivers.
+ **/
+bool e1000_check_mng_mode(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.check_mng_mode)
+ return hw->mac.ops.check_mng_mode(hw);
+
+ return false;
+}
+
+/**
+ * e1000_mng_write_dhcp_info - Writes DHCP info to host interface
+ * @hw: pointer to the HW structure
+ * @buffer: pointer to the host interface
+ * @length: size of the buffer
+ *
+ * Writes the DHCP information to the host interface.
+ **/
+s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
+{
+ return e1000_mng_write_dhcp_info_generic(hw, buffer, length);
+}
+
+/**
+ * e1000_reset_hw - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+ * This resets the hardware into a known state. This is a function pointer
+ * entry point called by drivers.
+ **/
+s32 e1000_reset_hw(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.reset_hw)
+ return hw->mac.ops.reset_hw(hw);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_init_hw - Initialize hardware
+ * @hw: pointer to the HW structure
+ *
+ * This inits the hardware readying it for operation. This is a function
+ * pointer entry point called by drivers.
+ **/
+s32 e1000_init_hw(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.init_hw)
+ return hw->mac.ops.init_hw(hw);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_setup_link - Configures link and flow control
+ * @hw: pointer to the HW structure
+ *
+ * This configures link and flow control settings for the adapter. This
+ * is a function pointer entry point called by drivers. While modules can
+ * also call this, they probably call their own version of this function.
+ **/
+s32 e1000_setup_link(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.setup_link)
+ return hw->mac.ops.setup_link(hw);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_get_speed_and_duplex - Returns current speed and duplex
+ * @hw: pointer to the HW structure
+ * @speed: pointer to a 16-bit value to store the speed
+ * @duplex: pointer to a 16-bit value to store the duplex.
+ *
+ * This returns the speed and duplex of the adapter in the two 'out'
+ * variables passed in. This is a function pointer entry point called
+ * by drivers.
+ **/
+s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
+{
+ if (hw->mac.ops.get_link_up_info)
+ return hw->mac.ops.get_link_up_info(hw, speed, duplex);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_setup_led - Configures SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * This prepares the SW controllable LED for use and saves the current state
+ * of the LED so it can be later restored. This is a function pointer entry
+ * point called by drivers.
+ **/
+s32 e1000_setup_led(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.setup_led)
+ return hw->mac.ops.setup_led(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_cleanup_led - Restores SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * This restores the SW controllable LED to the value saved off by
+ * e1000_setup_led. This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_cleanup_led(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.cleanup_led)
+ return hw->mac.ops.cleanup_led(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_blink_led - Blink SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * This starts the adapter LED blinking. Request the LED to be setup first
+ * and cleaned up after. This is a function pointer entry point called by
+ * drivers.
+ **/
+s32 e1000_blink_led(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.blink_led)
+ return hw->mac.ops.blink_led(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_id_led_init - store LED configurations in SW
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the LED config in SW. This is a function pointer entry point
+ * called by drivers.
+ **/
+s32 e1000_id_led_init(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.id_led_init)
+ return hw->mac.ops.id_led_init(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_led_on - Turn on SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * Turns the SW defined LED on. This is a function pointer entry point
+ * called by drivers.
+ **/
+s32 e1000_led_on(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.led_on)
+ return hw->mac.ops.led_on(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_led_off - Turn off SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * Turns the SW defined LED off. This is a function pointer entry point
+ * called by drivers.
+ **/
+s32 e1000_led_off(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.led_off)
+ return hw->mac.ops.led_off(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_reset_adaptive - Reset adaptive IFS
+ * @hw: pointer to the HW structure
+ *
+ * Resets the adaptive IFS. Currently no func pointer exists and all
+ * implementations are handled in the generic version of this function.
+ **/
+void e1000_reset_adaptive(struct e1000_hw *hw)
+{
+ e1000_reset_adaptive_generic(hw);
+}
+
+/**
+ * e1000_update_adaptive - Update adaptive IFS
+ * @hw: pointer to the HW structure
+ *
+ * Updates adapter IFS. Currently no func pointer exists and all
+ * implementations are handled in the generic version of this function.
+ **/
+void e1000_update_adaptive(struct e1000_hw *hw)
+{
+ e1000_update_adaptive_generic(hw);
+}
+
+/**
+ * e1000_disable_pcie_master - Disable PCI-Express master access
+ * @hw: pointer to the HW structure
+ *
+ * Disables PCI-Express master access and verifies there are no pending
+ * requests. Currently no func pointer exists and all implementations are
+ * handled in the generic version of this function.
+ **/
+s32 e1000_disable_pcie_master(struct e1000_hw *hw)
+{
+ return e1000_disable_pcie_master_generic(hw);
+}
+
+/**
+ * e1000_config_collision_dist - Configure collision distance
+ * @hw: pointer to the HW structure
+ *
+ * Configures the collision distance to the default value and is used
+ * during link setup.
+ **/
+void e1000_config_collision_dist(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.config_collision_dist)
+ hw->mac.ops.config_collision_dist(hw);
+}
+
+/**
+ * e1000_rar_set - Sets a receive address register
+ * @hw: pointer to the HW structure
+ * @addr: address to set the RAR to
+ * @index: the RAR to set
+ *
+ * Sets a Receive Address Register (RAR) to the specified address.
+ **/
+void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+ if (hw->mac.ops.rar_set)
+ hw->mac.ops.rar_set(hw, addr, index);
+}
+
+/**
+ * e1000_validate_mdi_setting - Ensures valid MDI/MDIX SW state
+ * @hw: pointer to the HW structure
+ *
+ * Ensures that the MDI/MDIX SW state is valid.
+ **/
+s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.validate_mdi_setting)
+ return hw->mac.ops.validate_mdi_setting(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_hash_mc_addr - Determines address location in multicast table
+ * @hw: pointer to the HW structure
+ * @mc_addr: Multicast address to hash.
+ *
+ * This hashes an address to determine its location in the multicast
+ * table. Currently no func pointer exists and all implementations
+ * are handled in the generic version of this function.
+ **/
+u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+{
+ return e1000_hash_mc_addr_generic(hw, mc_addr);
+}
+
+/**
+ * e1000_enable_tx_pkt_filtering - Enable packet filtering on TX
+ * @hw: pointer to the HW structure
+ *
+ * Enables packet filtering on transmit packets if manageability is enabled
+ * and host interface is enabled.
+ * Currently no func pointer exists and all implementations are handled in the
+ * generic version of this function.
+ **/
+bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
+{
+ return e1000_enable_tx_pkt_filtering_generic(hw);
+}
+
+/**
+ * e1000_mng_host_if_write - Writes to the manageability host interface
+ * @hw: pointer to the HW structure
+ * @buffer: pointer to the host interface buffer
+ * @length: size of the buffer
+ * @offset: location in the buffer to write to
+ * @sum: sum of the data (not checksum)
+ *
+ * This function writes the buffer content at the offset given on the host if.
+ * It also does alignment considerations to do the writes in most efficient
+ * way. Also fills up the sum of the buffer in *buffer parameter.
+ **/
+s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
+ u16 offset, u8 *sum)
+{
+ return e1000_mng_host_if_write_generic(hw, buffer, length, offset, sum);
+}
+
+/**
+ * e1000_mng_write_cmd_header - Writes manageability command header
+ * @hw: pointer to the HW structure
+ * @hdr: pointer to the host interface command header
+ *
+ * Writes the command header after does the checksum calculation.
+ **/
+s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
+ struct e1000_host_mng_command_header *hdr)
+{
+ return e1000_mng_write_cmd_header_generic(hw, hdr);
+}
+
+/**
+ * e1000_mng_enable_host_if - Checks host interface is enabled
+ * @hw: pointer to the HW structure
+ *
+ * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
+ *
+ * This function checks whether the HOST IF is enabled for command operation
+ * and also checks whether the previous command is completed. It busy waits
+ * in case of previous command is not completed.
+ **/
+s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
+{
+ return e1000_mng_enable_host_if_generic(hw);
+}
+
+/**
+ * e1000_check_reset_block - Verifies PHY can be reset
+ * @hw: pointer to the HW structure
+ *
+ * Checks if the PHY is in a state that can be reset or if manageability
+ * has it tied up. This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_check_reset_block(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.check_reset_block)
+ return hw->phy.ops.check_reset_block(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_phy_reg - Reads PHY register
+ * @hw: pointer to the HW structure
+ * @offset: the register to read
+ * @data: the buffer to store the 16-bit read.
+ *
+ * Reads the PHY register and returns the value in data.
+ * This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ if (hw->phy.ops.read_reg)
+ return hw->phy.ops.read_reg(hw, offset, data);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_phy_reg - Writes PHY register
+ * @hw: pointer to the HW structure
+ * @offset: the register to write
+ * @data: the value to write.
+ *
+ * Writes the PHY register at offset with the value in data.
+ * This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ if (hw->phy.ops.write_reg)
+ return hw->phy.ops.write_reg(hw, offset, data);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_release_phy - Generic release PHY
+ * @hw: pointer to the HW structure
+ *
+ * Return if silicon family does not require a semaphore when accessing the
+ * PHY.
+ **/
+void e1000_release_phy(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.release)
+ hw->phy.ops.release(hw);
+}
+
+/**
+ * e1000_acquire_phy - Generic acquire PHY
+ * @hw: pointer to the HW structure
+ *
+ * Return success if silicon family does not require a semaphore when
+ * accessing the PHY.
+ **/
+s32 e1000_acquire_phy(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.acquire)
+ return hw->phy.ops.acquire(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_cfg_on_link_up - Configure PHY upon link up
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_cfg_on_link_up(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.cfg_on_link_up)
+ return hw->phy.ops.cfg_on_link_up(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_kmrn_reg - Reads register using Kumeran interface
+ * @hw: pointer to the HW structure
+ * @offset: the register to read
+ * @data: the location to store the 16-bit value read.
+ *
+ * Reads a register out of the Kumeran interface. Currently no func pointer
+ * exists and all implementations are handled in the generic version of
+ * this function.
+ **/
+s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return e1000_read_kmrn_reg_generic(hw, offset, data);
+}
+
+/**
+ * e1000_write_kmrn_reg - Writes register using Kumeran interface
+ * @hw: pointer to the HW structure
+ * @offset: the register to write
+ * @data: the value to write.
+ *
+ * Writes a register to the Kumeran interface. Currently no func pointer
+ * exists and all implementations are handled in the generic version of
+ * this function.
+ **/
+s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return e1000_write_kmrn_reg_generic(hw, offset, data);
+}
+
+/**
+ * e1000_get_cable_length - Retrieves cable length estimation
+ * @hw: pointer to the HW structure
+ *
+ * This function estimates the cable length and stores them in
+ * hw->phy.min_length and hw->phy.max_length. This is a function pointer
+ * entry point called by drivers.
+ **/
+s32 e1000_get_cable_length(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.get_cable_length)
+ return hw->phy.ops.get_cable_length(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_phy_info - Retrieves PHY information from registers
+ * @hw: pointer to the HW structure
+ *
+ * This function gets some information from various PHY registers and
+ * populates hw->phy values with it. This is a function pointer entry
+ * point called by drivers.
+ **/
+s32 e1000_get_phy_info(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.get_info)
+ return hw->phy.ops.get_info(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_hw_reset - Hard PHY reset
+ * @hw: pointer to the HW structure
+ *
+ * Performs a hard PHY reset. This is a function pointer entry point called
+ * by drivers.
+ **/
+s32 e1000_phy_hw_reset(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.reset)
+ return hw->phy.ops.reset(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_commit - Soft PHY reset
+ * @hw: pointer to the HW structure
+ *
+ * Performs a soft PHY reset on those that apply. This is a function pointer
+ * entry point called by drivers.
+ **/
+s32 e1000_phy_commit(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.commit)
+ return hw->phy.ops.commit(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_d0_lplu_state - Sets low power link up state for D0
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * The low power link up (lplu) state is set to the power management level D0
+ * and SmartSpeed is disabled when active is true, else clear lplu for D0
+ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
+ * is used during Dx states where the power conservation is most important.
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained. This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
+{
+ if (hw->phy.ops.set_d0_lplu_state)
+ return hw->phy.ops.set_d0_lplu_state(hw, active);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_d3_lplu_state - Sets low power link up state for D3
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * The low power link up (lplu) state is set to the power management level D3
+ * and SmartSpeed is disabled when active is true, else clear lplu for D3
+ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
+ * is used during Dx states where the power conservation is most important.
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained. This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
+{
+ if (hw->phy.ops.set_d3_lplu_state)
+ return hw->phy.ops.set_d3_lplu_state(hw, active);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_mac_addr - Reads MAC address
+ * @hw: pointer to the HW structure
+ *
+ * Reads the MAC address out of the adapter and stores it in the HW structure.
+ * Currently no func pointer exists and all implementations are handled in the
+ * generic version of this function.
+ **/
+s32 e1000_read_mac_addr(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.read_mac_addr)
+ return hw->mac.ops.read_mac_addr(hw);
+
+ return e1000_read_mac_addr_generic(hw);
+}
+
+/**
+ * e1000_read_pba_string - Read device part number string
+ * @hw: pointer to the HW structure
+ * @pba_num: pointer to device part number
+ * @pba_num_size: size of part number buffer
+ *
+ * Reads the product board assembly (PBA) number from the EEPROM and stores
+ * the value in pba_num.
+ * Currently no func pointer exists and all implementations are handled in the
+ * generic version of this function.
+ **/
+s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size)
+{
+ return e1000_read_pba_string_generic(hw, pba_num, pba_num_size);
+}
+
+/**
+ * e1000_read_pba_length - Read device part number string length
+ * @hw: pointer to the HW structure
+ * @pba_num_size: size of part number buffer
+ *
+ * Reads the product board assembly (PBA) number length from the EEPROM and
+ * stores the value in pba_num.
+ * Currently no func pointer exists and all implementations are handled in the
+ * generic version of this function.
+ **/
+s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size)
+{
+ return e1000_read_pba_length_generic(hw, pba_num_size);
+}
+
+/**
+ * e1000_read_pba_num - Read device part number
+ * @hw: pointer to the HW structure
+ * @pba_num: pointer to device part number
+ *
+ * Reads the product board assembly (PBA) number from the EEPROM and stores
+ * the value in pba_num.
+ * Currently no func pointer exists and all implementations are handled in the
+ * generic version of this function.
+ **/
+s32 e1000_read_pba_num(struct e1000_hw *hw, u32 *pba_num)
+{
+ return e1000_read_pba_num_generic(hw, pba_num);
+}
+
+/**
+ * e1000_validate_nvm_checksum - Verifies NVM (EEPROM) checksum
+ * @hw: pointer to the HW structure
+ *
+ * Validates the NVM checksum is correct. This is a function pointer entry
+ * point called by drivers.
+ **/
+s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
+{
+ if (hw->nvm.ops.validate)
+ return hw->nvm.ops.validate(hw);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_update_nvm_checksum - Updates NVM (EEPROM) checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the NVM checksum. Currently no func pointer exists and all
+ * implementations are handled in the generic version of this function.
+ **/
+s32 e1000_update_nvm_checksum(struct e1000_hw *hw)
+{
+ if (hw->nvm.ops.update)
+ return hw->nvm.ops.update(hw);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_reload_nvm - Reloads EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
+ * extended control register.
+ **/
+void e1000_reload_nvm(struct e1000_hw *hw)
+{
+ if (hw->nvm.ops.reload)
+ hw->nvm.ops.reload(hw);
+}
+
+/**
+ * e1000_read_nvm - Reads NVM (EEPROM)
+ * @hw: pointer to the HW structure
+ * @offset: the word offset to read
+ * @words: number of 16-bit words to read
+ * @data: pointer to the properly sized buffer for the data.
+ *
+ * Reads 16-bit chunks of data from the NVM (EEPROM). This is a function
+ * pointer entry point called by drivers.
+ **/
+s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ if (hw->nvm.ops.read)
+ return hw->nvm.ops.read(hw, offset, words, data);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_write_nvm - Writes to NVM (EEPROM)
+ * @hw: pointer to the HW structure
+ * @offset: the word offset to read
+ * @words: number of 16-bit words to write
+ * @data: pointer to the properly sized buffer for the data.
+ *
+ * Writes 16-bit chunks of data to the NVM (EEPROM). This is a function
+ * pointer entry point called by drivers.
+ **/
+s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ if (hw->nvm.ops.write)
+ return hw->nvm.ops.write(hw, offset, words, data);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_8bit_ctrl_reg - Writes 8bit Control register
+ * @hw: pointer to the HW structure
+ * @reg: 32bit register offset
+ * @offset: the register to write
+ * @data: the value to write.
+ *
+ * Writes the PHY register at offset with the value in data.
+ * This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset,
+ u8 data)
+{
+ return e1000_write_8bit_ctrl_reg_generic(hw, reg, offset, data);
+}
+
+/**
+ * e1000_power_up_phy - Restores link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * The phy may be powered down to save power, to turn off link when the
+ * driver is unloaded, or wake on lan is not enabled (among others).
+ **/
+void e1000_power_up_phy(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.power_up)
+ hw->phy.ops.power_up(hw);
+
+ e1000_setup_link(hw);
+}
+
+/**
+ * e1000_power_down_phy - Power down PHY
+ * @hw: pointer to the HW structure
+ *
+ * The phy may be powered down to save power, to turn off link when the
+ * driver is unloaded, or wake on lan is not enabled (among others).
+ **/
+void e1000_power_down_phy(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.power_down)
+ hw->phy.ops.power_down(hw);
+}
+
+/**
+ * e1000_power_up_fiber_serdes_link - Power up serdes link
+ * @hw: pointer to the HW structure
+ *
+ * Power on the optics and PCS.
+ **/
+void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.power_up_serdes)
+ hw->mac.ops.power_up_serdes(hw);
+}
+
+/**
+ * e1000_shutdown_fiber_serdes_link - Remove link during power down
+ * @hw: pointer to the HW structure
+ *
+ * Shutdown the optics and PCS on driver unload.
+ **/
+void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.shutdown_serdes)
+ hw->mac.ops.shutdown_serdes(hw);
+}
+
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_api.h b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_api.h
new file mode 100755
index 00000000..02b16da3
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_api.h
@@ -0,0 +1,167 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_API_H_
+#define _E1000_API_H_
+
+#include "e1000_hw.h"
+
+extern void e1000_init_function_pointers_82542(struct e1000_hw *hw);
+extern void e1000_init_function_pointers_82543(struct e1000_hw *hw);
+extern void e1000_init_function_pointers_82540(struct e1000_hw *hw);
+extern void e1000_init_function_pointers_82571(struct e1000_hw *hw);
+extern void e1000_init_function_pointers_82541(struct e1000_hw *hw);
+extern void e1000_init_function_pointers_80003es2lan(struct e1000_hw *hw);
+extern void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw);
+extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
+extern void e1000_rx_fifo_flush_82575(struct e1000_hw *hw);
+extern void e1000_init_function_pointers_vf(struct e1000_hw *hw);
+extern void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw);
+extern void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw);
+extern void e1000_init_function_pointers_i210(struct e1000_hw *hw);
+
+s32 e1000_set_obff_timer(struct e1000_hw *hw, u32 itr);
+s32 e1000_set_mac_type(struct e1000_hw *hw);
+s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device);
+s32 e1000_init_mac_params(struct e1000_hw *hw);
+s32 e1000_init_nvm_params(struct e1000_hw *hw);
+s32 e1000_init_phy_params(struct e1000_hw *hw);
+s32 e1000_init_mbx_params(struct e1000_hw *hw);
+s32 e1000_get_bus_info(struct e1000_hw *hw);
+void e1000_clear_vfta(struct e1000_hw *hw);
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
+s32 e1000_force_mac_fc(struct e1000_hw *hw);
+s32 e1000_check_for_link(struct e1000_hw *hw);
+s32 e1000_reset_hw(struct e1000_hw *hw);
+s32 e1000_init_hw(struct e1000_hw *hw);
+s32 e1000_setup_link(struct e1000_hw *hw);
+s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex);
+s32 e1000_disable_pcie_master(struct e1000_hw *hw);
+void e1000_config_collision_dist(struct e1000_hw *hw);
+void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
+u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
+void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count);
+s32 e1000_setup_led(struct e1000_hw *hw);
+s32 e1000_cleanup_led(struct e1000_hw *hw);
+s32 e1000_check_reset_block(struct e1000_hw *hw);
+s32 e1000_blink_led(struct e1000_hw *hw);
+s32 e1000_led_on(struct e1000_hw *hw);
+s32 e1000_led_off(struct e1000_hw *hw);
+s32 e1000_id_led_init(struct e1000_hw *hw);
+void e1000_reset_adaptive(struct e1000_hw *hw);
+void e1000_update_adaptive(struct e1000_hw *hw);
+s32 e1000_get_cable_length(struct e1000_hw *hw);
+s32 e1000_validate_mdi_setting(struct e1000_hw *hw);
+s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset,
+ u8 data);
+s32 e1000_get_phy_info(struct e1000_hw *hw);
+void e1000_release_phy(struct e1000_hw *hw);
+s32 e1000_acquire_phy(struct e1000_hw *hw);
+s32 e1000_cfg_on_link_up(struct e1000_hw *hw);
+s32 e1000_phy_hw_reset(struct e1000_hw *hw);
+s32 e1000_phy_commit(struct e1000_hw *hw);
+void e1000_power_up_phy(struct e1000_hw *hw);
+void e1000_power_down_phy(struct e1000_hw *hw);
+s32 e1000_read_mac_addr(struct e1000_hw *hw);
+s32 e1000_read_pba_num(struct e1000_hw *hw, u32 *part_num);
+s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size);
+s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size);
+void e1000_reload_nvm(struct e1000_hw *hw);
+s32 e1000_update_nvm_checksum(struct e1000_hw *hw);
+s32 e1000_validate_nvm_checksum(struct e1000_hw *hw);
+s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
+bool e1000_check_mng_mode(struct e1000_hw *hw);
+bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
+s32 e1000_mng_enable_host_if(struct e1000_hw *hw);
+s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
+ u16 offset, u8 *sum);
+s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
+ struct e1000_host_mng_command_header *hdr);
+s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
+u32 e1000_translate_register_82542(u32 reg);
+
+
+
+/*
+ * TBI_ACCEPT macro definition:
+ *
+ * This macro requires:
+ * adapter = a pointer to struct e1000_hw
+ * status = the 8 bit status field of the Rx descriptor with EOP set
+ * error = the 8 bit error field of the Rx descriptor with EOP set
+ * length = the sum of all the length fields of the Rx descriptors that
+ * make up the current frame
+ * last_byte = the last byte of the frame DMAed by the hardware
+ * max_frame_length = the maximum frame length we want to accept.
+ * min_frame_length = the minimum frame length we want to accept.
+ *
+ * This macro is a conditional that should be used in the interrupt
+ * handler's Rx processing routine when RxErrors have been detected.
+ *
+ * Typical use:
+ * ...
+ * if (TBI_ACCEPT) {
+ * accept_frame = true;
+ * e1000_tbi_adjust_stats(adapter, MacAddress);
+ * frame_length--;
+ * } else {
+ * accept_frame = false;
+ * }
+ * ...
+ */
+
+/* The carrier extension symbol, as received by the NIC. */
+#define CARRIER_EXTENSION 0x0F
+
+#define TBI_ACCEPT(a, status, errors, length, last_byte, \
+ min_frame_size, max_frame_size) \
+ (e1000_tbi_sbp_enabled_82543(a) && \
+ (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \
+ ((last_byte) == CARRIER_EXTENSION) && \
+ (((status) & E1000_RXD_STAT_VP) ? \
+ (((length) > (min_frame_size - VLAN_TAG_SIZE)) && \
+ ((length) <= (max_frame_size + 1))) : \
+ (((length) > min_frame_size) && \
+ ((length) <= (max_frame_size + VLAN_TAG_SIZE + 1)))))
+
+#define E1000_MAX(a, b) ((a) > (b) ? (a) : (b))
+#define E1000_DIVIDE_ROUND_UP(a, b) (((a) + (b) - 1) / (b)) /* ceil(a/b) */
+#endif /* _E1000_API_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_defines.h b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_defines.h
new file mode 100755
index 00000000..278c5072
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_defines.h
@@ -0,0 +1,1498 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_DEFINES_H_
+#define _E1000_DEFINES_H_
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define REQ_RX_DESCRIPTOR_MULTIPLE 8
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define E1000_WUC_APME 0x00000001 /* APM Enable */
+#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
+#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */
+#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */
+
+/* Wake Up Filter Control */
+#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
+#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+
+/* Wake Up Status */
+#define E1000_WUS_LNKC E1000_WUFC_LNKC
+#define E1000_WUS_MAG E1000_WUFC_MAG
+#define E1000_WUS_EX E1000_WUFC_EX
+#define E1000_WUS_MC E1000_WUFC_MC
+#define E1000_WUS_BC E1000_WUFC_BC
+
+/* Extended Device Control */
+#define E1000_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */
+#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* SW Definable Pin 4 data */
+#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* SW Definable Pin 6 data */
+#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* SW Definable Pin 3 data */
+/* SDP 4/5 (bits 8,9) are reserved in >= 82575 */
+#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */
+#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */
+#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* Direction of SDP3 0=in 1=out */
+#define E1000_CTRL_EXT_FORCE_SMBUS 0x00000800 /* Force SMBus mode */
+#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
+/* Physical Func Reset Done Indication */
+#define E1000_CTRL_EXT_PFRSTD 0x00004000
+#define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */
+#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
+#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
+#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clk Gating */
+#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+/* Offset of the link mode field in Ctrl Ext register */
+#define E1000_CTRL_EXT_LINK_MODE_OFFSET 22
+#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
+#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
+#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
+#define E1000_CTRL_EXT_EIAME 0x01000000
+#define E1000_CTRL_EXT_IRCA 0x00000001
+#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */
+#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */
+#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
+#define E1000_CTRL_EXT_LSECCK 0x00001000
+#define E1000_CTRL_EXT_PHYPDEN 0x00100000
+#define E1000_I2CCMD_REG_ADDR_SHIFT 16
+#define E1000_I2CCMD_PHY_ADDR_SHIFT 24
+#define E1000_I2CCMD_OPCODE_READ 0x08000000
+#define E1000_I2CCMD_OPCODE_WRITE 0x00000000
+#define E1000_I2CCMD_READY 0x20000000
+#define E1000_I2CCMD_ERROR 0x80000000
+#define E1000_I2CCMD_SFP_DATA_ADDR(a) (0x0000 + (a))
+#define E1000_I2CCMD_SFP_DIAG_ADDR(a) (0x0100 + (a))
+#define E1000_MAX_SGMII_PHY_REG_ADDR 255
+#define E1000_I2CCMD_PHY_TIMEOUT 200
+#define E1000_IVAR_VALID 0x80
+#define E1000_GPIE_NSICR 0x00000001
+#define E1000_GPIE_MSIX_MODE 0x00000010
+#define E1000_GPIE_EIAME 0x40000000
+#define E1000_GPIE_PBA 0x80000000
+
+/* Receive Descriptor bit definitions */
+#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
+#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
+#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
+#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
+#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
+#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
+#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */
+#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */
+#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
+#define E1000_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
+#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
+#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
+#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
+#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
+#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
+#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
+#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+
+#define E1000_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */
+#define E1000_RXDEXT_STATERR_LB 0x00040000
+#define E1000_RXDEXT_STATERR_CE 0x01000000
+#define E1000_RXDEXT_STATERR_SE 0x02000000
+#define E1000_RXDEXT_STATERR_SEQ 0x04000000
+#define E1000_RXDEXT_STATERR_CXE 0x10000000
+#define E1000_RXDEXT_STATERR_TCPE 0x20000000
+#define E1000_RXDEXT_STATERR_IPE 0x40000000
+#define E1000_RXDEXT_STATERR_RXE 0x80000000
+
+/* mask to determine if packets should be dropped due to frame errors */
+#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
+ E1000_RXD_ERR_CE | \
+ E1000_RXD_ERR_SE | \
+ E1000_RXD_ERR_SEQ | \
+ E1000_RXD_ERR_CXE | \
+ E1000_RXD_ERR_RXE)
+
+/* Same mask, but for extended and packet split descriptors */
+#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
+ E1000_RXDEXT_STATERR_CE | \
+ E1000_RXDEXT_STATERR_SE | \
+ E1000_RXDEXT_STATERR_SEQ | \
+ E1000_RXDEXT_STATERR_CXE | \
+ E1000_RXDEXT_STATERR_RXE)
+
+#if !defined(EXTERNAL_RELEASE) || defined(E1000E_MQ)
+#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001
+#endif /* !EXTERNAL_RELEASE || E1000E_MQ */
+#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000
+#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
+#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
+#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
+
+#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000
+
+/* Management Control */
+#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
+#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
+#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
+#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
+#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
+/* Enable MAC address filtering */
+#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000
+/* Enable MNG packets to host memory */
+#define E1000_MANC_EN_MNG2HOST 0x00200000
+
+#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */
+#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */
+#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */
+#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */
+
+/* Receive Control */
+#define E1000_RCTL_RST 0x00000001 /* Software reset */
+#define E1000_RCTL_EN 0x00000002 /* enable */
+#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
+#define E1000_RCTL_UPE 0x00000008 /* unicast promisc enable */
+#define E1000_RCTL_MPE 0x00000010 /* multicast promisc enable */
+#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
+#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */
+#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
+#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
+#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
+#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */
+#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
+#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
+#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
+#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */
+#define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */
+#define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */
+#define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
+#define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */
+#define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */
+#define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */
+#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
+#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
+#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
+#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */
+#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
+#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
+#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
+
+/* Use byte values for the following shift parameters
+ * Usage:
+ * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
+ * E1000_PSRCTL_BSIZE0_MASK) |
+ * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
+ * E1000_PSRCTL_BSIZE1_MASK) |
+ * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
+ * E1000_PSRCTL_BSIZE2_MASK) |
+ * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
+ * E1000_PSRCTL_BSIZE3_MASK))
+ * where value0 = [128..16256], default=256
+ * value1 = [1024..64512], default=4096
+ * value2 = [0..64512], default=4096
+ * value3 = [0..64512], default=0
+ */
+
+#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F
+#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00
+#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
+#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000
+
+#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */
+#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */
+#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
+#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
+
+/* SWFW_SYNC Definitions */
+#define E1000_SWFW_EEP_SM 0x01
+#define E1000_SWFW_PHY0_SM 0x02
+#define E1000_SWFW_PHY1_SM 0x04
+#define E1000_SWFW_CSR_SM 0x08
+#define E1000_SWFW_PHY2_SM 0x20
+#define E1000_SWFW_PHY3_SM 0x40
+#define E1000_SWFW_SW_MNG_SM 0x400
+
+/* Device Control */
+#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
+#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */
+#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master reqs */
+#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
+#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
+#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
+#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
+#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
+#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */
+#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
+#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
+#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
+#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
+#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
+#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */
+#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */
+#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
+#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
+#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */
+#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
+#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */
+#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */
+#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
+#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */
+#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */
+#define E1000_CTRL_RST 0x04000000 /* Global reset */
+#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
+#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
+#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
+#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
+#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */
+
+#define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2
+#define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2
+#define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3
+#define E1000_CTRL_MDC E1000_CTRL_SWDPIN3
+
+#define E1000_CONNSW_ENRGSRC 0x4
+#define E1000_CONNSW_PHYSD 0x400
+#define E1000_CONNSW_PHY_PDN 0x800
+#define E1000_CONNSW_SERDESD 0x200
+#define E1000_CONNSW_AUTOSENSE_CONF 0x2
+#define E1000_CONNSW_AUTOSENSE_EN 0x1
+#define E1000_PCS_CFG_PCS_EN 8
+#define E1000_PCS_LCTL_FLV_LINK_UP 1
+#define E1000_PCS_LCTL_FSV_10 0
+#define E1000_PCS_LCTL_FSV_100 2
+#define E1000_PCS_LCTL_FSV_1000 4
+#define E1000_PCS_LCTL_FDV_FULL 8
+#define E1000_PCS_LCTL_FSD 0x10
+#define E1000_PCS_LCTL_FORCE_LINK 0x20
+#define E1000_PCS_LCTL_FORCE_FCTRL 0x80
+#define E1000_PCS_LCTL_AN_ENABLE 0x10000
+#define E1000_PCS_LCTL_AN_RESTART 0x20000
+#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000
+#define E1000_ENABLE_SERDES_LOOPBACK 0x0410
+
+#define E1000_PCS_LSTS_LINK_OK 1
+#define E1000_PCS_LSTS_SPEED_100 2
+#define E1000_PCS_LSTS_SPEED_1000 4
+#define E1000_PCS_LSTS_DUPLEX_FULL 8
+#define E1000_PCS_LSTS_SYNK_OK 0x10
+#define E1000_PCS_LSTS_AN_COMPLETE 0x10000
+
+/* Device Status */
+#define E1000_STATUS_FD 0x00000001 /* Duplex 0=half 1=full */
+#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
+#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
+#define E1000_STATUS_FUNC_SHIFT 2
+#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
+#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
+#define E1000_STATUS_SPEED_MASK 0x000000C0
+#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
+#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
+#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Compltn by NVM */
+#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master request status */
+#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */
+#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */
+#define E1000_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */
+#define E1000_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */
+#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */
+#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */
+
+/* Constants used to interpret the masked PCI-X bus speed. */
+#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus spd 50-66MHz */
+#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus spd 66-100MHz */
+#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus spd 100-133MHz*/
+
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+#define SPEED_2500 2500
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+#define PHY_FORCE_TIME 20
+
+#define ADVERTISE_10_HALF 0x0001
+#define ADVERTISE_10_FULL 0x0002
+#define ADVERTISE_100_HALF 0x0004
+#define ADVERTISE_100_FULL 0x0008
+#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
+#define ADVERTISE_1000_FULL 0x0020
+
+/* 1000/H is not supported, nor spec-compliant. */
+#define E1000_ALL_SPEED_DUPLEX ( \
+ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \
+ ADVERTISE_100_FULL | ADVERTISE_1000_FULL)
+#define E1000_ALL_NOT_GIG ( \
+ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \
+ ADVERTISE_100_FULL)
+#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL)
+#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL)
+#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF)
+
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
+
+/* LED Control */
+#define E1000_PHY_LED0_MODE_MASK 0x00000007
+#define E1000_PHY_LED0_IVRT 0x00000008
+#define E1000_PHY_LED0_MASK 0x0000001F
+
+#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
+#define E1000_LEDCTL_LED0_MODE_SHIFT 0
+#define E1000_LEDCTL_LED0_IVRT 0x00000040
+#define E1000_LEDCTL_LED0_BLINK 0x00000080
+
+#define E1000_LEDCTL_MODE_LINK_UP 0x2
+#define E1000_LEDCTL_MODE_LED_ON 0xE
+#define E1000_LEDCTL_MODE_LED_OFF 0xF
+
+/* Transmit Descriptor bit definitions */
+#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */
+#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */
+#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
+#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
+#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
+#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
+#define E1000_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */
+#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
+#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
+#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
+#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
+#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
+#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
+#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
+#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
+#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
+#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */
+
+/* Transmit Control */
+#define E1000_TCTL_EN 0x00000002 /* enable Tx */
+#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
+#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
+#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
+#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
+#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */
+
+/* Transmit Arbitration Count */
+#define E1000_TARC0_ENABLE 0x00000400 /* Enable Tx Queue 0 */
+
+/* SerDes Control */
+#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
+#define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410
+
+/* Receive Checksum Control */
+#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */
+#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
+#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
+#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
+#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
+
+/* Header split receive */
+#define E1000_RFCTL_NFSW_DIS 0x00000040
+#define E1000_RFCTL_NFSR_DIS 0x00000080
+#define E1000_RFCTL_ACK_DIS 0x00001000
+#define E1000_RFCTL_EXTEN 0x00008000
+#define E1000_RFCTL_IPV6_EX_DIS 0x00010000
+#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
+#define E1000_RFCTL_LEF 0x00040000
+
+/* Collision related configuration parameters */
+#define E1000_COLLISION_THRESHOLD 15
+#define E1000_CT_SHIFT 4
+#define E1000_COLLISION_DISTANCE 63
+#define E1000_COLD_SHIFT 12
+
+/* Default values for the transmit IPG register */
+#define DEFAULT_82542_TIPG_IPGT 10
+#define DEFAULT_82543_TIPG_IPGT_FIBER 9
+#define DEFAULT_82543_TIPG_IPGT_COPPER 8
+
+#define E1000_TIPG_IPGT_MASK 0x000003FF
+
+#define DEFAULT_82542_TIPG_IPGR1 2
+#define DEFAULT_82543_TIPG_IPGR1 8
+#define E1000_TIPG_IPGR1_SHIFT 10
+
+#define DEFAULT_82542_TIPG_IPGR2 10
+#define DEFAULT_82543_TIPG_IPGR2 6
+#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
+#define E1000_TIPG_IPGR2_SHIFT 20
+
+/* Ethertype field values */
+#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
+
+#define ETHERNET_FCS_SIZE 4
+#define MAX_JUMBO_FRAME_SIZE 0x3F00
+
+/* Extended Configuration Control and Size */
+#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
+#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001
+#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008
+#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
+#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16
+
+#define E1000_PHY_CTRL_D0A_LPLU 0x00000002
+#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004
+#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
+#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040
+
+#define E1000_KABGTXD_BGSQLBIAS 0x00050000
+
+/* Low Power IDLE Control */
+#define E1000_LPIC_LPIET_SHIFT 24 /* Low Power Idle Entry Time */
+
+/* PBA constants */
+#define E1000_PBA_8K 0x0008 /* 8KB */
+#define E1000_PBA_10K 0x000A /* 10KB */
+#define E1000_PBA_12K 0x000C /* 12KB */
+#define E1000_PBA_14K 0x000E /* 14KB */
+#define E1000_PBA_16K 0x0010 /* 16KB */
+#define E1000_PBA_18K 0x0012
+#define E1000_PBA_20K 0x0014
+#define E1000_PBA_22K 0x0016
+#define E1000_PBA_24K 0x0018
+#define E1000_PBA_26K 0x001A
+#define E1000_PBA_30K 0x001E
+#define E1000_PBA_32K 0x0020
+#define E1000_PBA_34K 0x0022
+#define E1000_PBA_35K 0x0023
+#define E1000_PBA_38K 0x0026
+#define E1000_PBA_40K 0x0028
+#define E1000_PBA_48K 0x0030 /* 48KB */
+#define E1000_PBA_64K 0x0040 /* 64KB */
+
+#define E1000_PBA_RXA_MASK 0xFFFF
+
+#define E1000_PBS_16K E1000_PBA_16K
+
+/* Uncorrectable/correctable ECC Error counts and enable bits */
+#define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF
+#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00
+#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8
+#define E1000_PBECCSTS_ECC_ENABLE 0x00010000
+
+#define IFS_MAX 80
+#define IFS_MIN 40
+#define IFS_RATIO 4
+#define IFS_STEP 10
+#define MIN_NUM_XMITS 1000
+
+/* SW Semaphore Register */
+#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
+#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
+#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
+
+#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */
+
+/* Interrupt Cause Read */
+#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
+#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */
+#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
+#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
+#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
+#define E1000_ICR_RXO 0x00000040 /* Rx overrun */
+#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
+#define E1000_ICR_VMMB 0x00000100 /* VM MB event */
+#define E1000_ICR_RXCFG 0x00000400 /* Rx /c/ ordered set */
+#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */
+#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */
+#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */
+#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */
+#define E1000_ICR_TXD_LOW 0x00008000
+#define E1000_ICR_MNG 0x00040000 /* Manageability event */
+#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
+#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */
+#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */
+/* If this bit asserted, the driver should claim the interrupt */
+#define E1000_ICR_INT_ASSERTED 0x80000000
+#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */
+#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */
+#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
+#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */
+#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */
+#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */
+#define E1000_ICR_FER 0x00400000 /* Fatal Error */
+
+#define E1000_ICR_THS 0x00800000 /* ICR.THS: Thermal Sensor Event*/
+#define E1000_ICR_MDDET 0x10000000 /* Malicious Driver Detect */
+
+/* PBA ECC Register */
+#define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */
+#define E1000_PBA_ECC_COUNTER_SHIFT 20 /* ECC counter shift value */
+#define E1000_PBA_ECC_CORR_EN 0x00000001 /* Enable ECC error correction */
+#define E1000_PBA_ECC_STAT_CLR 0x00000002 /* Clear ECC error counter */
+#define E1000_PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 on ECC error */
+
+/* Extended Interrupt Cause Read */
+#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */
+#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */
+#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */
+#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */
+#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */
+#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */
+#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */
+#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */
+#define E1000_EICR_TCP_TIMER 0x40000000 /* TCP Timer */
+#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
+/* TCP Timer */
+#define E1000_TCPTIMER_KS 0x00000100 /* KickStart */
+#define E1000_TCPTIMER_COUNT_ENABLE 0x00000200 /* Count Enable */
+#define E1000_TCPTIMER_COUNT_FINISH 0x00000400 /* Count finish */
+#define E1000_TCPTIMER_LOOP 0x00000800 /* Loop */
+
+/* This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register. Each bit is documented below:
+ * o RXT0 = Receiver Timer Interrupt (ring 0)
+ * o TXDW = Transmit Descriptor Written Back
+ * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ * o RXSEQ = Receive Sequence Error
+ * o LSC = Link Status Change
+ */
+#define IMS_ENABLE_MASK ( \
+ E1000_IMS_RXT0 | \
+ E1000_IMS_TXDW | \
+ E1000_IMS_RXDMT0 | \
+ E1000_IMS_RXSEQ | \
+ E1000_IMS_LSC)
+
+/* Interrupt Mask Set */
+#define E1000_IMS_TXDW E1000_ICR_TXDW /* Tx desc written back */
+#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
+#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */
+#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
+#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
+#define E1000_IMS_RXO E1000_ICR_RXO /* Rx overrun */
+#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
+#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW
+#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */
+#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */
+#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */
+#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
+#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */
+#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */
+#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */
+#define E1000_IMS_TXQ1 E1000_ICR_TXQ1 /* Tx Queue 1 Interrupt */
+#define E1000_IMS_OTHER E1000_ICR_OTHER /* Other Interrupts */
+#define E1000_IMS_FER E1000_ICR_FER /* Fatal Error */
+
+#define E1000_IMS_THS E1000_ICR_THS /* ICR.TS: Thermal Sensor Event*/
+#define E1000_IMS_MDDET E1000_ICR_MDDET /* Malicious Driver Detect */
+/* Extended Interrupt Mask Set */
+#define E1000_EIMS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
+#define E1000_EIMS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
+#define E1000_EIMS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
+#define E1000_EIMS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
+#define E1000_EIMS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
+#define E1000_EIMS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
+#define E1000_EIMS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
+#define E1000_EIMS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
+#define E1000_EIMS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */
+#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */
+
+/* Interrupt Cause Set */
+#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
+#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
+
+/* Extended Interrupt Cause Set */
+#define E1000_EICS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
+#define E1000_EICS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
+#define E1000_EICS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
+#define E1000_EICS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
+#define E1000_EICS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
+#define E1000_EICS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
+#define E1000_EICS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
+#define E1000_EICS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
+#define E1000_EICS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */
+#define E1000_EICS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */
+
+#define E1000_EITR_ITR_INT_MASK 0x0000FFFF
+/* E1000_EITR_CNT_IGNR is only for 82576 and newer */
+#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */
+#define E1000_EITR_INTERVAL 0x00007FFC
+
+/* Transmit Descriptor Control */
+#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
+#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
+#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
+#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */
+#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
+#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
+/* Enable the counting of descriptors still to be processed. */
+#define E1000_TXDCTL_COUNT_DESC 0x00400000
+
+/* Flow Control Constants */
+#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
+#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
+#define FLOW_CONTROL_TYPE 0x8808
+
+/* 802.1q VLAN Packet Size */
+#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */
+#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
+
+/* Receive Address
+ * Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor.
+ * Technically, we have 16 spots. However, we reserve one of these spots
+ * (RAR[15]) for our directed address used by controllers with
+ * manageability enabled, allowing us room for 15 multicast addresses.
+ */
+#define E1000_RAR_ENTRIES 15
+#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
+#define E1000_RAL_MAC_ADDR_LEN 4
+#define E1000_RAH_MAC_ADDR_LEN 2
+#define E1000_RAH_QUEUE_MASK_82575 0x000C0000
+#define E1000_RAH_POOL_1 0x00040000
+
+/* Error Codes */
+#define E1000_SUCCESS 0
+#define E1000_ERR_NVM 1
+#define E1000_ERR_PHY 2
+#define E1000_ERR_CONFIG 3
+#define E1000_ERR_PARAM 4
+#define E1000_ERR_MAC_INIT 5
+#define E1000_ERR_PHY_TYPE 6
+#define E1000_ERR_RESET 9
+#define E1000_ERR_MASTER_REQUESTS_PENDING 10
+#define E1000_ERR_HOST_INTERFACE_COMMAND 11
+#define E1000_BLK_PHY_RESET 12
+#define E1000_ERR_SWFW_SYNC 13
+#define E1000_NOT_IMPLEMENTED 14
+#define E1000_ERR_MBX 15
+#define E1000_ERR_INVALID_ARGUMENT 16
+#define E1000_ERR_NO_SPACE 17
+#define E1000_ERR_NVM_PBA_SECTION 18
+#define E1000_ERR_I2C 19
+#define E1000_ERR_INVM_VALUE_NOT_FOUND 20
+
+/* Loop limit on how long we wait for auto-negotiation to complete */
+#define FIBER_LINK_UP_LIMIT 50
+#define COPPER_LINK_UP_LIMIT 10
+#define PHY_AUTO_NEG_LIMIT 45
+#define PHY_FORCE_LIMIT 20
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define MASTER_DISABLE_TIMEOUT 800
+/* Number of milliseconds we wait for PHY configuration done after MAC reset */
+#define PHY_CFG_TIMEOUT 100
+/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */
+#define MDIO_OWNERSHIP_TIMEOUT 10
+/* Number of milliseconds for NVM auto read done after MAC reset. */
+#define AUTO_READ_DONE_TIMEOUT 10
+
+/* Flow Control */
+#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */
+#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */
+#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
+
+/* Transmit Configuration Word */
+#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
+#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
+#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
+#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */
+#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
+
+/* Receive Configuration Word */
+#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */
+#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */
+#define E1000_RXCW_C 0x20000000 /* Receive config */
+#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
+
+#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
+#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */
+
+#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */
+#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */
+#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00
+#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02
+#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
+#define E1000_TSYNCRXCTL_TYPE_ALL 0x08
+#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
+#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */
+#define E1000_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */
+
+#define E1000_RXMTRL_PTP_V1_SYNC_MESSAGE 0x00000000
+#define E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE 0x00010000
+
+#define E1000_RXMTRL_PTP_V2_SYNC_MESSAGE 0x00000000
+#define E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE 0x01000000
+
+#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF
+#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00
+#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01
+#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02
+#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03
+#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04
+
+#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00
+#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000
+#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300
+#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800
+#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00
+#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00
+#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00
+#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00
+
+#define E1000_TIMINCA_16NS_SHIFT 24
+#define E1000_TIMINCA_INCPERIOD_SHIFT 24
+#define E1000_TIMINCA_INCVALUE_MASK 0x00FFFFFF
+
+#define E1000_TSICR_TXTS 0x00000002
+#define E1000_TSIM_TXTS 0x00000002
+/* TUPLE Filtering Configuration */
+#define E1000_TTQF_DISABLE_MASK 0xF0008000 /* TTQF Disable Mask */
+#define E1000_TTQF_QUEUE_ENABLE 0x100 /* TTQF Queue Enable Bit */
+#define E1000_TTQF_PROTOCOL_MASK 0xFF /* TTQF Protocol Mask */
+/* TTQF TCP Bit, shift with E1000_TTQF_PROTOCOL SHIFT */
+#define E1000_TTQF_PROTOCOL_TCP 0x0
+/* TTQF UDP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */
+#define E1000_TTQF_PROTOCOL_UDP 0x1
+/* TTQF SCTP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */
+#define E1000_TTQF_PROTOCOL_SCTP 0x2
+#define E1000_TTQF_PROTOCOL_SHIFT 5 /* TTQF Protocol Shift */
+#define E1000_TTQF_QUEUE_SHIFT 16 /* TTQF Queue Shfit */
+#define E1000_TTQF_RX_QUEUE_MASK 0x70000 /* TTQF Queue Mask */
+#define E1000_TTQF_MASK_ENABLE 0x10000000 /* TTQF Mask Enable Bit */
+#define E1000_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */
+#define E1000_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */
+#define E1000_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */
+#define E1000_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */
+
+#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */
+#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */
+#define E1000_MDICNFG_PHY_MASK 0x03E00000
+#define E1000_MDICNFG_PHY_SHIFT 21
+
+#define E1000_MEDIA_PORT_COPPER 1
+#define E1000_MEDIA_PORT_OTHER 2
+#define E1000_M88E1112_AUTO_COPPER_SGMII 0x2
+#define E1000_M88E1112_AUTO_COPPER_BASEX 0x3
+#define E1000_M88E1112_STATUS_LINK 0x0004 /* Interface Link Bit */
+#define E1000_M88E1112_MAC_CTRL_1 0x10
+#define E1000_M88E1112_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */
+#define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7
+#define E1000_M88E1112_PAGE_ADDR 0x16
+#define E1000_M88E1112_STATUS 0x01
+
+#define E1000_THSTAT_LOW_EVENT 0x20000000 /* Low thermal threshold */
+#define E1000_THSTAT_MID_EVENT 0x00200000 /* Mid thermal threshold */
+#define E1000_THSTAT_HIGH_EVENT 0x00002000 /* High thermal threshold */
+#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */
+#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Spd Throttle Event */
+
+/* I350 EEE defines */
+#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */
+#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */
+#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */
+#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */
+#define E1000_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */
+/* EEE status */
+#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */
+#define E1000_EEER_RX_LPI_STATUS 0x40000000 /* Rx in LPI state */
+#define E1000_EEER_TX_LPI_STATUS 0x80000000 /* Tx in LPI state */
+#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */
+#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */
+#define E1000_M88E1543_EEE_CTRL_1 0x0
+#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */
+#define E1000_EEE_ADV_DEV_I354 7
+#define E1000_EEE_ADV_ADDR_I354 60
+#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */
+#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */
+#define E1000_PCS_STATUS_DEV_I354 3
+#define E1000_PCS_STATUS_ADDR_I354 1
+#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400
+#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800
+#define E1000_M88E1512_CFG_REG_1 0x0010
+#define E1000_M88E1512_CFG_REG_2 0x0011
+#define E1000_M88E1512_CFG_REG_3 0x0007
+#define E1000_M88E1512_MODE 0x0014
+#define E1000_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */
+#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */
+#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */
+/* PCI Express Control */
+#define E1000_GCR_RXD_NO_SNOOP 0x00000001
+#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
+#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004
+#define E1000_GCR_TXD_NO_SNOOP 0x00000008
+#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010
+#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020
+#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000
+#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000
+#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000
+#define E1000_GCR_CAP_VER2 0x00040000
+
+#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \
+ E1000_GCR_RXDSCW_NO_SNOOP | \
+ E1000_GCR_RXDSCR_NO_SNOOP | \
+ E1000_GCR_TXD_NO_SNOOP | \
+ E1000_GCR_TXDSCW_NO_SNOOP | \
+ E1000_GCR_TXDSCR_NO_SNOOP)
+
+#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */
+
+/* mPHY address control and data registers */
+#define E1000_MPHY_ADDR_CTL 0x0024 /* Address Control Reg */
+#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000
+#define E1000_MPHY_DATA 0x0E10 /* Data Register */
+
+/* AFE CSR Offset for PCS CLK */
+#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004
+/* Override for near end digital loopback. */
+#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10
+
+/* PHY Control Register */
+#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
+#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
+#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
+#define MII_CR_POWER_DOWN 0x0800 /* Power down */
+#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
+#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
+#define MII_CR_SPEED_1000 0x0040
+#define MII_CR_SPEED_100 0x2000
+#define MII_CR_SPEED_10 0x0000
+
+/* PHY Status Register */
+#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
+#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
+#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
+#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
+#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
+#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
+#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
+#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
+#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
+#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
+#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
+#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
+#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
+#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
+
+/* Autoneg Advertisement Register */
+#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */
+#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
+#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
+#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
+#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
+#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
+#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
+#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
+#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
+#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
+
+/* Link Partner Ability Register (Base Page) */
+#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */
+#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP 10T Half Dplx Capable */
+#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP 10T Full Dplx Capable */
+#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP 100TX Half Dplx Capable */
+#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP 100TX Full Dplx Capable */
+#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */
+#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
+#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asym Pause Direction bit */
+#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP detected Remote Fault */
+#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP rx'd link code word */
+#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */
+
+/* Autoneg Expansion Register */
+#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */
+#define NWAY_ER_PAGE_RXD 0x0002 /* LP 10T Half Dplx Capable */
+#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP 10T Full Dplx Capable */
+#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP 100TX Half Dplx Capable */
+#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP 100TX Full Dplx Capable */
+
+/* 1000BASE-T Control Register */
+#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */
+#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
+#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
+/* 1=Repeater/switch device port 0=DTE device */
+#define CR_1000T_REPEATER_DTE 0x0400
+/* 1=Configure PHY as Master 0=Configure PHY as Slave */
+#define CR_1000T_MS_VALUE 0x0800
+/* 1=Master/Slave manual config value 0=Automatic Master/Slave config */
+#define CR_1000T_MS_ENABLE 0x1000
+#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
+#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
+#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
+#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
+#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
+
+/* 1000BASE-T Status Register */
+#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle err since last rd */
+#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asym pause direction bit */
+#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
+#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
+#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
+#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local Tx Master, 0=Slave */
+#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
+
+#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5
+
+/* PHY 1000 MII Register/Bit Definitions */
+/* PHY Registers defined by IEEE */
+#define PHY_CONTROL 0x00 /* Control Register */
+#define PHY_STATUS 0x01 /* Status Register */
+#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
+#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
+#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
+#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
+#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */
+#define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */
+#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
+#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
+#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
+
+#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */
+
+/* NVM Control */
+#define E1000_EECD_SK 0x00000001 /* NVM Clock */
+#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */
+#define E1000_EECD_DI 0x00000004 /* NVM Data In */
+#define E1000_EECD_DO 0x00000008 /* NVM Data Out */
+#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */
+#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */
+#define E1000_EECD_PRES 0x00000100 /* NVM Present */
+#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */
+#define E1000_EECD_BLOCKED 0x00008000 /* Bit banging access blocked flag */
+#define E1000_EECD_ABORT 0x00010000 /* NVM operation aborted flag */
+#define E1000_EECD_TIMEOUT 0x00020000 /* NVM read operation timeout flag */
+#define E1000_EECD_ERROR_CLR 0x00040000 /* NVM error status clear bit */
+/* NVM Addressing bits based on type 0=small, 1=large */
+#define E1000_EECD_ADDR_BITS 0x00000400
+#define E1000_EECD_TYPE 0x00002000 /* NVM Type (1-SPI, 0-Microwire) */
+#ifndef E1000_NVM_GRANT_ATTEMPTS
+#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */
+#endif
+#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
+#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
+#define E1000_EECD_SIZE_EX_SHIFT 11
+#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */
+#define E1000_EECD_AUPDEN 0x00100000 /* Ena Auto FLASH update */
+#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
+#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES)
+#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */
+#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done */
+#define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */
+#define E1000_EECD_SEC1VAL_I210 0x02000000 /* Sector One Valid */
+#define E1000_FLUDONE_ATTEMPTS 20000
+#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */
+#define E1000_I210_FIFO_SEL_RX 0x00
+#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i))
+#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0)
+#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06
+#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01
+
+#define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */
+/* Secure FLASH mode requires removing MSb */
+#define E1000_I210_FW_PTR_MASK 0x7FFF
+/* Firmware code revision field word offset*/
+#define E1000_I210_FW_VER_OFFSET 328
+
+#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write regs */
+#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
+#define E1000_NVM_RW_REG_START 1 /* Start operation */
+#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
+#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
+#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */
+#define E1000_FLASH_UPDATES 2000
+
+/* NVM Word Offsets */
+#define NVM_COMPAT 0x0003
+#define NVM_ID_LED_SETTINGS 0x0004
+#define NVM_VERSION 0x0005
+#define NVM_SERDES_AMPLITUDE 0x0006 /* SERDES output amplitude */
+#define NVM_PHY_CLASS_WORD 0x0007
+#define E1000_I210_NVM_FW_MODULE_PTR 0x0010
+#define E1000_I350_NVM_FW_MODULE_PTR 0x0051
+#define NVM_FUTURE_INIT_WORD1 0x0019
+#define NVM_ETRACK_WORD 0x0042
+#define NVM_ETRACK_HIWORD 0x0043
+#define NVM_COMB_VER_OFF 0x0083
+#define NVM_COMB_VER_PTR 0x003d
+
+/* NVM version defines */
+#define NVM_MAJOR_MASK 0xF000
+#define NVM_MINOR_MASK 0x0FF0
+#define NVM_IMAGE_ID_MASK 0x000F
+#define NVM_COMB_VER_MASK 0x00FF
+#define NVM_MAJOR_SHIFT 12
+#define NVM_MINOR_SHIFT 4
+#define NVM_COMB_VER_SHFT 8
+#define NVM_VER_INVALID 0xFFFF
+#define NVM_ETRACK_SHIFT 16
+#define NVM_ETRACK_VALID 0x8000
+#define NVM_NEW_DEC_MASK 0x0F00
+#define NVM_HEX_CONV 16
+#define NVM_HEX_TENS 10
+
+/* FW version defines */
+/* Offset of "Loader patch ptr" in Firmware Header */
+#define E1000_I350_NVM_FW_LOADER_PATCH_PTR_OFFSET 0x01
+/* Patch generation hour & minutes */
+#define E1000_I350_NVM_FW_VER_WORD1_OFFSET 0x04
+/* Patch generation month & day */
+#define E1000_I350_NVM_FW_VER_WORD2_OFFSET 0x05
+/* Patch generation year */
+#define E1000_I350_NVM_FW_VER_WORD3_OFFSET 0x06
+/* Patch major & minor numbers */
+#define E1000_I350_NVM_FW_VER_WORD4_OFFSET 0x07
+
+#define NVM_MAC_ADDR 0x0000
+#define NVM_SUB_DEV_ID 0x000B
+#define NVM_SUB_VEN_ID 0x000C
+#define NVM_DEV_ID 0x000D
+#define NVM_VEN_ID 0x000E
+#define NVM_INIT_CTRL_2 0x000F
+#define NVM_INIT_CTRL_4 0x0013
+#define NVM_LED_1_CFG 0x001C
+#define NVM_LED_0_2_CFG 0x001F
+
+#define NVM_COMPAT_VALID_CSUM 0x0001
+#define NVM_FUTURE_INIT_WORD1_VALID_CSUM 0x0040
+
+#define NVM_INIT_CONTROL2_REG 0x000F
+#define NVM_INIT_CONTROL3_PORT_B 0x0014
+#define NVM_INIT_3GIO_3 0x001A
+#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020
+#define NVM_INIT_CONTROL3_PORT_A 0x0024
+#define NVM_CFG 0x0012
+#define NVM_ALT_MAC_ADDR_PTR 0x0037
+#define NVM_CHECKSUM_REG 0x003F
+#define NVM_COMPATIBILITY_REG_3 0x0003
+#define NVM_COMPATIBILITY_BIT_MASK 0x8000
+
+#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
+#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
+#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */
+#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */
+
+#define NVM_82580_LAN_FUNC_OFFSET(a) ((a) ? (0x40 + (0x40 * (a))) : 0)
+
+/* Mask bits for fields in Word 0x24 of the NVM */
+#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */
+#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed extrnl */
+/* Offset of Link Mode bits for 82575/82576 */
+#define NVM_WORD24_LNK_MODE_OFFSET 8
+/* Offset of Link Mode bits for 82580 up */
+#define NVM_WORD24_82580_LNK_MODE_OFFSET 4
+
+
+/* Mask bits for fields in Word 0x0f of the NVM */
+#define NVM_WORD0F_PAUSE_MASK 0x3000
+#define NVM_WORD0F_PAUSE 0x1000
+#define NVM_WORD0F_ASM_DIR 0x2000
+#define NVM_WORD0F_SWPDIO_EXT_MASK 0x00F0
+
+/* Mask bits for fields in Word 0x1a of the NVM */
+#define NVM_WORD1A_ASPM_MASK 0x000C
+
+/* Mask bits for fields in Word 0x03 of the EEPROM */
+#define NVM_COMPAT_LOM 0x0800
+
+/* length of string needed to store PBA number */
+#define E1000_PBANUM_LENGTH 11
+
+/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
+#define NVM_SUM 0xBABA
+
+/* PBA (printed board assembly) number words */
+#define NVM_PBA_OFFSET_0 8
+#define NVM_PBA_OFFSET_1 9
+#define NVM_PBA_PTR_GUARD 0xFAFA
+#define NVM_RESERVED_WORD 0xFFFF
+#define NVM_PHY_CLASS_A 0x8000
+#define NVM_SERDES_AMPLITUDE_MASK 0x000F
+#define NVM_SIZE_MASK 0x1C00
+#define NVM_SIZE_SHIFT 10
+#define NVM_WORD_SIZE_BASE_SHIFT 6
+#define NVM_SWDPIO_EXT_SHIFT 4
+
+/* NVM Commands - Microwire */
+#define NVM_READ_OPCODE_MICROWIRE 0x6 /* NVM read opcode */
+#define NVM_WRITE_OPCODE_MICROWIRE 0x5 /* NVM write opcode */
+#define NVM_ERASE_OPCODE_MICROWIRE 0x7 /* NVM erase opcode */
+#define NVM_EWEN_OPCODE_MICROWIRE 0x13 /* NVM erase/write enable */
+#define NVM_EWDS_OPCODE_MICROWIRE 0x10 /* NVM erase/write disable */
+
+/* NVM Commands - SPI */
+#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
+#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */
+#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */
+#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
+#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */
+#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */
+
+/* SPI NVM Status Register */
+#define NVM_STATUS_RDY_SPI 0x01
+
+/* Word definitions for ID LED Settings */
+#define ID_LED_RESERVED_0000 0x0000
+#define ID_LED_RESERVED_FFFF 0xFFFF
+#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
+ (ID_LED_OFF1_OFF2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_DEF1_DEF2))
+#define ID_LED_DEF1_DEF2 0x1
+#define ID_LED_DEF1_ON2 0x2
+#define ID_LED_DEF1_OFF2 0x3
+#define ID_LED_ON1_DEF2 0x4
+#define ID_LED_ON1_ON2 0x5
+#define ID_LED_ON1_OFF2 0x6
+#define ID_LED_OFF1_DEF2 0x7
+#define ID_LED_OFF1_ON2 0x8
+#define ID_LED_OFF1_OFF2 0x9
+
+#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF
+#define IGP_ACTIVITY_LED_ENABLE 0x0300
+#define IGP_LED3_MODE 0x07000000
+
+/* PCI/PCI-X/PCI-EX Config space */
+#define PCIX_COMMAND_REGISTER 0xE6
+#define PCIX_STATUS_REGISTER_LO 0xE8
+#define PCIX_STATUS_REGISTER_HI 0xEA
+#define PCI_HEADER_TYPE_REGISTER 0x0E
+#define PCIE_LINK_STATUS 0x12
+#define PCIE_DEVICE_CONTROL2 0x28
+
+#define PCIX_COMMAND_MMRBC_MASK 0x000C
+#define PCIX_COMMAND_MMRBC_SHIFT 0x2
+#define PCIX_STATUS_HI_MMRBC_MASK 0x0060
+#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5
+#define PCIX_STATUS_HI_MMRBC_4K 0x3
+#define PCIX_STATUS_HI_MMRBC_2K 0x2
+#define PCIX_STATUS_LO_FUNC_MASK 0x7
+#define PCI_HEADER_TYPE_MULTIFUNC 0x80
+#define PCIE_LINK_WIDTH_MASK 0x3F0
+#define PCIE_LINK_WIDTH_SHIFT 4
+#define PCIE_LINK_SPEED_MASK 0x0F
+#define PCIE_LINK_SPEED_2500 0x01
+#define PCIE_LINK_SPEED_5000 0x02
+#define PCIE_DEVICE_CONTROL2_16ms 0x0005
+
+#ifndef ETH_ADDR_LEN
+#define ETH_ADDR_LEN 6
+#endif
+
+#define PHY_REVISION_MASK 0xFFFFFFF0
+#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
+#define MAX_PHY_MULTI_PAGE_REG 0xF
+
+/* Bit definitions for valid PHY IDs.
+ * I = Integrated
+ * E = External
+ */
+#define M88E1000_E_PHY_ID 0x01410C50
+#define M88E1000_I_PHY_ID 0x01410C30
+#define M88E1011_I_PHY_ID 0x01410C20
+#define IGP01E1000_I_PHY_ID 0x02A80380
+#define M88E1111_I_PHY_ID 0x01410CC0
+#define M88E1543_E_PHY_ID 0x01410EA0
+#define M88E1512_E_PHY_ID 0x01410DD0
+#define M88E1112_E_PHY_ID 0x01410C90
+#define I347AT4_E_PHY_ID 0x01410DC0
+#define M88E1340M_E_PHY_ID 0x01410DF0
+#define GG82563_E_PHY_ID 0x01410CA0
+#define IGP03E1000_E_PHY_ID 0x02A80390
+#define IFE_E_PHY_ID 0x02A80330
+#define IFE_PLUS_E_PHY_ID 0x02A80320
+#define IFE_C_E_PHY_ID 0x02A80310
+#define BME1000_E_PHY_ID 0x01410CB0
+#define BME1000_E_PHY_ID_R2 0x01410CB1
+#define I82577_E_PHY_ID 0x01540050
+#define I82578_E_PHY_ID 0x004DD040
+#define I82579_E_PHY_ID 0x01540090
+#define I217_E_PHY_ID 0x015400A0
+#define I82580_I_PHY_ID 0x015403A0
+#define I350_I_PHY_ID 0x015403B0
+#define I210_I_PHY_ID 0x01410C00
+#define IGP04E1000_E_PHY_ID 0x02A80391
+#define M88_VENDOR 0x0141
+
+/* M88E1000 Specific Registers */
+#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Reg */
+#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Reg */
+#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Cntrl */
+#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */
+
+#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */
+#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for pg number setting */
+#define M88E1000_PHY_GEN_CONTROL 0x1E /* meaning depends on reg 29 */
+#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */
+#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */
+
+/* M88E1000 PHY Specific Control Register */
+#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reverse enabled */
+/* MDI Crossover Mode bits 6:5 Manual MDI configuration */
+#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000
+#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
+/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
+#define M88E1000_PSCR_AUTO_X_1000T 0x0040
+/* Auto crossover enabled all speeds */
+#define M88E1000_PSCR_AUTO_X_MODE 0x0060
+#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Tx */
+
+/* M88E1000 PHY Specific Status Register */
+#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
+#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
+#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
+/* 0 = <50M
+ * 1 = 50-80M
+ * 2 = 80-110M
+ * 3 = 110-140M
+ * 4 = >140M
+ */
+#define M88E1000_PSSR_CABLE_LENGTH 0x0380
+#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */
+#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
+#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */
+#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
+#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */
+#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
+
+#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
+
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master
+ */
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the slave
+ */
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
+#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
+
+/* Intel I347AT4 Registers */
+#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */
+#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */
+#define I347AT4_PAGE_SELECT 0x16
+
+/* I347AT4 Extended PHY Specific Control Register */
+
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master
+ */
+#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800
+#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000
+#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000
+#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000
+#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000
+#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000
+#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000
+#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000
+#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000
+#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000
+
+/* I347AT4 PHY Cable Diagnostics Control */
+#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */
+
+/* M88E1112 only registers */
+#define M88E1112_VCT_DSP_DISTANCE 0x001A
+
+/* M88EC018 Rev 2 specific DownShift settings */
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
+
+#define I82578_EPSCR_DOWNSHIFT_ENABLE 0x0020
+#define I82578_EPSCR_DOWNSHIFT_COUNTER_MASK 0x001C
+
+/* BME1000 PHY Specific Control Register */
+#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */
+
+/* Bits...
+ * 15-5: page
+ * 4-0: register offset
+ */
+#define GG82563_PAGE_SHIFT 5
+#define GG82563_REG(page, reg) \
+ (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
+#define GG82563_MIN_ALT_REG 30
+
+/* GG82563 Specific Registers */
+#define GG82563_PHY_SPEC_CTRL GG82563_REG(0, 16) /* PHY Spec Cntrl */
+#define GG82563_PHY_PAGE_SELECT GG82563_REG(0, 22) /* Page Select */
+#define GG82563_PHY_SPEC_CTRL_2 GG82563_REG(0, 26) /* PHY Spec Cntrl2 */
+#define GG82563_PHY_PAGE_SELECT_ALT GG82563_REG(0, 29) /* Alt Page Select */
+
+/* MAC Specific Control Register */
+#define GG82563_PHY_MAC_SPEC_CTRL GG82563_REG(2, 21)
+
+#define GG82563_PHY_DSP_DISTANCE GG82563_REG(5, 26) /* DSP Distance */
+
+/* Page 193 - Port Control Registers */
+/* Kumeran Mode Control */
+#define GG82563_PHY_KMRN_MODE_CTRL GG82563_REG(193, 16)
+#define GG82563_PHY_PWR_MGMT_CTRL GG82563_REG(193, 20) /* Pwr Mgt Ctrl */
+
+/* Page 194 - KMRN Registers */
+#define GG82563_PHY_INBAND_CTRL GG82563_REG(194, 18) /* Inband Ctrl */
+
+/* MDI Control */
+#define E1000_MDIC_REG_MASK 0x001F0000
+#define E1000_MDIC_REG_SHIFT 16
+#define E1000_MDIC_PHY_MASK 0x03E00000
+#define E1000_MDIC_PHY_SHIFT 21
+#define E1000_MDIC_OP_WRITE 0x04000000
+#define E1000_MDIC_OP_READ 0x08000000
+#define E1000_MDIC_READY 0x10000000
+#define E1000_MDIC_ERROR 0x40000000
+#define E1000_MDIC_DEST 0x80000000
+
+/* SerDes Control */
+#define E1000_GEN_CTL_READY 0x80000000
+#define E1000_GEN_CTL_ADDRESS_SHIFT 8
+#define E1000_GEN_POLL_TIMEOUT 640
+
+/* LinkSec register fields */
+#define E1000_LSECTXCAP_SUM_MASK 0x00FF0000
+#define E1000_LSECTXCAP_SUM_SHIFT 16
+#define E1000_LSECRXCAP_SUM_MASK 0x00FF0000
+#define E1000_LSECRXCAP_SUM_SHIFT 16
+
+#define E1000_LSECTXCTRL_EN_MASK 0x00000003
+#define E1000_LSECTXCTRL_DISABLE 0x0
+#define E1000_LSECTXCTRL_AUTH 0x1
+#define E1000_LSECTXCTRL_AUTH_ENCRYPT 0x2
+#define E1000_LSECTXCTRL_AISCI 0x00000020
+#define E1000_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00
+#define E1000_LSECTXCTRL_RSV_MASK 0x000000D8
+
+#define E1000_LSECRXCTRL_EN_MASK 0x0000000C
+#define E1000_LSECRXCTRL_EN_SHIFT 2
+#define E1000_LSECRXCTRL_DISABLE 0x0
+#define E1000_LSECRXCTRL_CHECK 0x1
+#define E1000_LSECRXCTRL_STRICT 0x2
+#define E1000_LSECRXCTRL_DROP 0x3
+#define E1000_LSECRXCTRL_PLSH 0x00000040
+#define E1000_LSECRXCTRL_RP 0x00000080
+#define E1000_LSECRXCTRL_RSV_MASK 0xFFFFFF33
+
+/* Tx Rate-Scheduler Config fields */
+#define E1000_RTTBCNRC_RS_ENA 0x80000000
+#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF
+#define E1000_RTTBCNRC_RF_INT_SHIFT 14
+#define E1000_RTTBCNRC_RF_INT_MASK \
+ (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT)
+
+/* DMA Coalescing register fields */
+/* DMA Coalescing Watchdog Timer */
+#define E1000_DMACR_DMACWT_MASK 0x00003FFF
+/* DMA Coalescing Rx Threshold */
+#define E1000_DMACR_DMACTHR_MASK 0x00FF0000
+#define E1000_DMACR_DMACTHR_SHIFT 16
+/* Lx when no PCIe transactions */
+#define E1000_DMACR_DMAC_LX_MASK 0x30000000
+#define E1000_DMACR_DMAC_LX_SHIFT 28
+#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
+/* DMA Coalescing BMC-to-OS Watchdog Enable */
+#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000
+
+/* DMA Coalescing Transmit Threshold */
+#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF
+
+#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */
+
+/* Rx Traffic Rate Threshold */
+#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF
+/* Rx packet rate in current window */
+#define E1000_DMCRTRH_LRPRCW 0x80000000
+
+/* DMA Coal Rx Traffic Current Count */
+#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF
+
+/* Flow ctrl Rx Threshold High val */
+#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0
+#define E1000_FCRTC_RTH_COAL_SHIFT 4
+/* Lx power decision based on DMA coal */
+#define E1000_PCIEMISC_LX_DECISION 0x00000080
+
+#define E1000_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */
+#define E1000_RXPBS_SIZE_I210_MASK 0x0000003F /* Rx packet buffer size */
+#define E1000_TXPB0S_SIZE_I210_MASK 0x0000003F /* Tx packet buffer 0 size */
+
+/* Proxy Filter Control */
+#define E1000_PROXYFC_D0 0x00000001 /* Enable offload in D0 */
+#define E1000_PROXYFC_EX 0x00000004 /* Directed exact proxy */
+#define E1000_PROXYFC_MC 0x00000008 /* Directed MC Proxy */
+#define E1000_PROXYFC_BC 0x00000010 /* Broadcast Proxy Enable */
+#define E1000_PROXYFC_ARP_DIRECTED 0x00000020 /* Directed ARP Proxy Ena */
+#define E1000_PROXYFC_IPV4 0x00000040 /* Directed IPv4 Enable */
+#define E1000_PROXYFC_IPV6 0x00000080 /* Directed IPv6 Enable */
+#define E1000_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */
+#define E1000_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Ena */
+/* Proxy Status */
+#define E1000_PROXYS_CLEAR 0xFFFFFFFF /* Clear */
+
+/* Firmware Status */
+#define E1000_FWSTS_FWRI 0x80000000 /* FW Reset Indication */
+/* VF Control */
+#define E1000_VTCTRL_RST 0x04000000 /* Reset VF */
+
+#define E1000_STATUS_LAN_ID_MASK 0x00000000C /* Mask for Lan ID field */
+/* Lan ID bit field offset in status register */
+#define E1000_STATUS_LAN_ID_OFFSET 2
+#define E1000_VFTA_ENTRIES 128
+#ifndef E1000_UNUSEDARG
+#define E1000_UNUSEDARG
+#endif /* E1000_UNUSEDARG */
+#ifndef ERROR_REPORT
+#define ERROR_REPORT(fmt) do { } while (0)
+#endif /* ERROR_REPORT */
+#endif /* _E1000_DEFINES_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_hw.h b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_hw.h
new file mode 100755
index 00000000..4dd92a30
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_hw.h
@@ -0,0 +1,1026 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_HW_H_
+#define _E1000_HW_H_
+
+#include "e1000_osdep.h"
+#include "e1000_regs.h"
+#include "e1000_defines.h"
+
+struct e1000_hw;
+
+#define E1000_DEV_ID_82542 0x1000
+#define E1000_DEV_ID_82543GC_FIBER 0x1001
+#define E1000_DEV_ID_82543GC_COPPER 0x1004
+#define E1000_DEV_ID_82544EI_COPPER 0x1008
+#define E1000_DEV_ID_82544EI_FIBER 0x1009
+#define E1000_DEV_ID_82544GC_COPPER 0x100C
+#define E1000_DEV_ID_82544GC_LOM 0x100D
+#define E1000_DEV_ID_82540EM 0x100E
+#define E1000_DEV_ID_82540EM_LOM 0x1015
+#define E1000_DEV_ID_82540EP_LOM 0x1016
+#define E1000_DEV_ID_82540EP 0x1017
+#define E1000_DEV_ID_82540EP_LP 0x101E
+#define E1000_DEV_ID_82545EM_COPPER 0x100F
+#define E1000_DEV_ID_82545EM_FIBER 0x1011
+#define E1000_DEV_ID_82545GM_COPPER 0x1026
+#define E1000_DEV_ID_82545GM_FIBER 0x1027
+#define E1000_DEV_ID_82545GM_SERDES 0x1028
+#define E1000_DEV_ID_82546EB_COPPER 0x1010
+#define E1000_DEV_ID_82546EB_FIBER 0x1012
+#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D
+#define E1000_DEV_ID_82546GB_COPPER 0x1079
+#define E1000_DEV_ID_82546GB_FIBER 0x107A
+#define E1000_DEV_ID_82546GB_SERDES 0x107B
+#define E1000_DEV_ID_82546GB_PCIE 0x108A
+#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
+#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
+#define E1000_DEV_ID_82541EI 0x1013
+#define E1000_DEV_ID_82541EI_MOBILE 0x1018
+#define E1000_DEV_ID_82541ER_LOM 0x1014
+#define E1000_DEV_ID_82541ER 0x1078
+#define E1000_DEV_ID_82541GI 0x1076
+#define E1000_DEV_ID_82541GI_LF 0x107C
+#define E1000_DEV_ID_82541GI_MOBILE 0x1077
+#define E1000_DEV_ID_82547EI 0x1019
+#define E1000_DEV_ID_82547EI_MOBILE 0x101A
+#define E1000_DEV_ID_82547GI 0x1075
+#define E1000_DEV_ID_82571EB_COPPER 0x105E
+#define E1000_DEV_ID_82571EB_FIBER 0x105F
+#define E1000_DEV_ID_82571EB_SERDES 0x1060
+#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9
+#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA
+#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
+#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5
+#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5
+#define E1000_DEV_ID_82571EB_QUAD_COPPER_LP 0x10BC
+#define E1000_DEV_ID_82572EI_COPPER 0x107D
+#define E1000_DEV_ID_82572EI_FIBER 0x107E
+#define E1000_DEV_ID_82572EI_SERDES 0x107F
+#define E1000_DEV_ID_82572EI 0x10B9
+#define E1000_DEV_ID_82573E 0x108B
+#define E1000_DEV_ID_82573E_IAMT 0x108C
+#define E1000_DEV_ID_82573L 0x109A
+#define E1000_DEV_ID_82574L 0x10D3
+#define E1000_DEV_ID_82574LA 0x10F6
+#define E1000_DEV_ID_82583V 0x150C
+#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096
+#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098
+#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA
+#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB
+#define E1000_DEV_ID_ICH8_82567V_3 0x1501
+#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049
+#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A
+#define E1000_DEV_ID_ICH8_IGP_C 0x104B
+#define E1000_DEV_ID_ICH8_IFE 0x104C
+#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4
+#define E1000_DEV_ID_ICH8_IFE_G 0x10C5
+#define E1000_DEV_ID_ICH8_IGP_M 0x104D
+#define E1000_DEV_ID_ICH9_IGP_M 0x10BF
+#define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5
+#define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB
+#define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD
+#define E1000_DEV_ID_ICH9_BM 0x10E5
+#define E1000_DEV_ID_ICH9_IGP_C 0x294C
+#define E1000_DEV_ID_ICH9_IFE 0x10C0
+#define E1000_DEV_ID_ICH9_IFE_GT 0x10C3
+#define E1000_DEV_ID_ICH9_IFE_G 0x10C2
+#define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC
+#define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD
+#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE
+#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE
+#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF
+#define E1000_DEV_ID_ICH10_D_BM_V 0x1525
+#define E1000_DEV_ID_PCH_M_HV_LM 0x10EA
+#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB
+#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF
+#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0
+#define E1000_DEV_ID_PCH2_LV_LM 0x1502
+#define E1000_DEV_ID_PCH2_LV_V 0x1503
+#define E1000_DEV_ID_PCH_LPT_I217_LM 0x153A
+#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B
+#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A
+#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559
+#define E1000_DEV_ID_82576 0x10C9
+#define E1000_DEV_ID_82576_FIBER 0x10E6
+#define E1000_DEV_ID_82576_SERDES 0x10E7
+#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
+#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526
+#define E1000_DEV_ID_82576_NS 0x150A
+#define E1000_DEV_ID_82576_NS_SERDES 0x1518
+#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D
+#define E1000_DEV_ID_82576_VF 0x10CA
+#define E1000_DEV_ID_82576_VF_HV 0x152D
+#define E1000_DEV_ID_I350_VF 0x1520
+#define E1000_DEV_ID_I350_VF_HV 0x152F
+#define E1000_DEV_ID_82575EB_COPPER 0x10A7
+#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
+#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
+#define E1000_DEV_ID_82580_COPPER 0x150E
+#define E1000_DEV_ID_82580_FIBER 0x150F
+#define E1000_DEV_ID_82580_SERDES 0x1510
+#define E1000_DEV_ID_82580_SGMII 0x1511
+#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
+#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527
+#define E1000_DEV_ID_I350_COPPER 0x1521
+#define E1000_DEV_ID_I350_FIBER 0x1522
+#define E1000_DEV_ID_I350_SERDES 0x1523
+#define E1000_DEV_ID_I350_SGMII 0x1524
+#define E1000_DEV_ID_I350_DA4 0x1546
+#define E1000_DEV_ID_I210_COPPER 0x1533
+#define E1000_DEV_ID_I210_COPPER_OEM1 0x1534
+#define E1000_DEV_ID_I210_COPPER_IT 0x1535
+#define E1000_DEV_ID_I210_FIBER 0x1536
+#define E1000_DEV_ID_I210_SERDES 0x1537
+#define E1000_DEV_ID_I210_SGMII 0x1538
+#define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B
+#define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C
+#define E1000_DEV_ID_I211_COPPER 0x1539
+#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40
+#define E1000_DEV_ID_I354_SGMII 0x1F41
+#define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45
+#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438
+#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A
+#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C
+#define E1000_DEV_ID_DH89XXCC_SFP 0x0440
+
+#define E1000_REVISION_0 0
+#define E1000_REVISION_1 1
+#define E1000_REVISION_2 2
+#define E1000_REVISION_3 3
+#define E1000_REVISION_4 4
+
+#define E1000_FUNC_0 0
+#define E1000_FUNC_1 1
+#define E1000_FUNC_2 2
+#define E1000_FUNC_3 3
+
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9
+
+enum e1000_mac_type {
+ e1000_undefined = 0,
+ e1000_82542,
+ e1000_82543,
+ e1000_82544,
+ e1000_82540,
+ e1000_82545,
+ e1000_82545_rev_3,
+ e1000_82546,
+ e1000_82546_rev_3,
+ e1000_82541,
+ e1000_82541_rev_2,
+ e1000_82547,
+ e1000_82547_rev_2,
+ e1000_82571,
+ e1000_82572,
+ e1000_82573,
+ e1000_82574,
+ e1000_82583,
+ e1000_80003es2lan,
+ e1000_ich8lan,
+ e1000_ich9lan,
+ e1000_ich10lan,
+ e1000_pchlan,
+ e1000_pch2lan,
+ e1000_pch_lpt,
+ e1000_82575,
+ e1000_82576,
+ e1000_82580,
+ e1000_i350,
+ e1000_i354,
+ e1000_i210,
+ e1000_i211,
+ e1000_vfadapt,
+ e1000_vfadapt_i350,
+ e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
+};
+
+enum e1000_media_type {
+ e1000_media_type_unknown = 0,
+ e1000_media_type_copper = 1,
+ e1000_media_type_fiber = 2,
+ e1000_media_type_internal_serdes = 3,
+ e1000_num_media_types
+};
+
+enum e1000_nvm_type {
+ e1000_nvm_unknown = 0,
+ e1000_nvm_none,
+ e1000_nvm_eeprom_spi,
+ e1000_nvm_eeprom_microwire,
+ e1000_nvm_flash_hw,
+ e1000_nvm_invm,
+ e1000_nvm_flash_sw
+};
+
+enum e1000_nvm_override {
+ e1000_nvm_override_none = 0,
+ e1000_nvm_override_spi_small,
+ e1000_nvm_override_spi_large,
+ e1000_nvm_override_microwire_small,
+ e1000_nvm_override_microwire_large
+};
+
+enum e1000_phy_type {
+ e1000_phy_unknown = 0,
+ e1000_phy_none,
+ e1000_phy_m88,
+ e1000_phy_igp,
+ e1000_phy_igp_2,
+ e1000_phy_gg82563,
+ e1000_phy_igp_3,
+ e1000_phy_ife,
+ e1000_phy_bm,
+ e1000_phy_82578,
+ e1000_phy_82577,
+ e1000_phy_82579,
+ e1000_phy_i217,
+ e1000_phy_82580,
+ e1000_phy_vf,
+ e1000_phy_i210,
+};
+
+enum e1000_bus_type {
+ e1000_bus_type_unknown = 0,
+ e1000_bus_type_pci,
+ e1000_bus_type_pcix,
+ e1000_bus_type_pci_express,
+ e1000_bus_type_reserved
+};
+
+enum e1000_bus_speed {
+ e1000_bus_speed_unknown = 0,
+ e1000_bus_speed_33,
+ e1000_bus_speed_66,
+ e1000_bus_speed_100,
+ e1000_bus_speed_120,
+ e1000_bus_speed_133,
+ e1000_bus_speed_2500,
+ e1000_bus_speed_5000,
+ e1000_bus_speed_reserved
+};
+
+enum e1000_bus_width {
+ e1000_bus_width_unknown = 0,
+ e1000_bus_width_pcie_x1,
+ e1000_bus_width_pcie_x2,
+ e1000_bus_width_pcie_x4 = 4,
+ e1000_bus_width_pcie_x8 = 8,
+ e1000_bus_width_32,
+ e1000_bus_width_64,
+ e1000_bus_width_reserved
+};
+
+enum e1000_1000t_rx_status {
+ e1000_1000t_rx_status_not_ok = 0,
+ e1000_1000t_rx_status_ok,
+ e1000_1000t_rx_status_undefined = 0xFF
+};
+
+enum e1000_rev_polarity {
+ e1000_rev_polarity_normal = 0,
+ e1000_rev_polarity_reversed,
+ e1000_rev_polarity_undefined = 0xFF
+};
+
+enum e1000_fc_mode {
+ e1000_fc_none = 0,
+ e1000_fc_rx_pause,
+ e1000_fc_tx_pause,
+ e1000_fc_full,
+ e1000_fc_default = 0xFF
+};
+
+enum e1000_ffe_config {
+ e1000_ffe_config_enabled = 0,
+ e1000_ffe_config_active,
+ e1000_ffe_config_blocked
+};
+
+enum e1000_dsp_config {
+ e1000_dsp_config_disabled = 0,
+ e1000_dsp_config_enabled,
+ e1000_dsp_config_activated,
+ e1000_dsp_config_undefined = 0xFF
+};
+
+enum e1000_ms_type {
+ e1000_ms_hw_default = 0,
+ e1000_ms_force_master,
+ e1000_ms_force_slave,
+ e1000_ms_auto
+};
+
+enum e1000_smart_speed {
+ e1000_smart_speed_default = 0,
+ e1000_smart_speed_on,
+ e1000_smart_speed_off
+};
+
+enum e1000_serdes_link_state {
+ e1000_serdes_link_down = 0,
+ e1000_serdes_link_autoneg_progress,
+ e1000_serdes_link_autoneg_complete,
+ e1000_serdes_link_forced_up
+};
+
+#define __le16 u16
+#define __le32 u32
+#define __le64 u64
+/* Receive Descriptor */
+struct e1000_rx_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ __le16 length; /* Length of data DMAed into data buffer */
+ __le16 csum; /* Packet checksum */
+ u8 status; /* Descriptor status */
+ u8 errors; /* Descriptor Errors */
+ __le16 special;
+};
+
+/* Receive Descriptor - Extended */
+union e1000_rx_desc_extended {
+ struct {
+ __le64 buffer_addr;
+ __le64 reserved;
+ } read;
+ struct {
+ struct {
+ __le32 mrq; /* Multiple Rx Queues */
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length;
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+#define MAX_PS_BUFFERS 4
+
+/* Number of packet split data buffers (not including the header buffer) */
+#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
+
+/* Receive Descriptor - Packet Split */
+union e1000_rx_desc_packet_split {
+ struct {
+ /* one buffer for protocol header(s), three data buffers */
+ __le64 buffer_addr[MAX_PS_BUFFERS];
+ } read;
+ struct {
+ struct {
+ __le32 mrq; /* Multiple Rx Queues */
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length0; /* length of buffer 0 */
+ __le16 vlan; /* VLAN tag */
+ } middle;
+ struct {
+ __le16 header_status;
+ /* length of buffers 1-3 */
+ __le16 length[PS_PAGE_BUFFERS];
+ } upper;
+ __le64 reserved;
+ } wb; /* writeback */
+};
+
+/* Transmit Descriptor */
+struct e1000_tx_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ union {
+ __le32 data;
+ struct {
+ __le16 length; /* Data buffer length */
+ u8 cso; /* Checksum offset */
+ u8 cmd; /* Descriptor control */
+ } flags;
+ } lower;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 css; /* Checksum start */
+ __le16 special;
+ } fields;
+ } upper;
+};
+
+/* Offload Context Descriptor */
+struct e1000_context_desc {
+ union {
+ __le32 ip_config;
+ struct {
+ u8 ipcss; /* IP checksum start */
+ u8 ipcso; /* IP checksum offset */
+ __le16 ipcse; /* IP checksum end */
+ } ip_fields;
+ } lower_setup;
+ union {
+ __le32 tcp_config;
+ struct {
+ u8 tucss; /* TCP checksum start */
+ u8 tucso; /* TCP checksum offset */
+ __le16 tucse; /* TCP checksum end */
+ } tcp_fields;
+ } upper_setup;
+ __le32 cmd_and_length;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 hdr_len; /* Header length */
+ __le16 mss; /* Maximum segment size */
+ } fields;
+ } tcp_seg_setup;
+};
+
+/* Offload data descriptor */
+struct e1000_data_desc {
+ __le64 buffer_addr; /* Address of the descriptor's buffer address */
+ union {
+ __le32 data;
+ struct {
+ __le16 length; /* Data buffer length */
+ u8 typ_len_ext;
+ u8 cmd;
+ } flags;
+ } lower;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 popts; /* Packet Options */
+ __le16 special;
+ } fields;
+ } upper;
+};
+
+/* Statistics counters collected by the MAC */
+struct e1000_hw_stats {
+ u64 crcerrs;
+ u64 algnerrc;
+ u64 symerrs;
+ u64 rxerrc;
+ u64 mpc;
+ u64 scc;
+ u64 ecol;
+ u64 mcc;
+ u64 latecol;
+ u64 colc;
+ u64 dc;
+ u64 tncrs;
+ u64 sec;
+ u64 cexterr;
+ u64 rlec;
+ u64 xonrxc;
+ u64 xontxc;
+ u64 xoffrxc;
+ u64 xofftxc;
+ u64 fcruc;
+ u64 prc64;
+ u64 prc127;
+ u64 prc255;
+ u64 prc511;
+ u64 prc1023;
+ u64 prc1522;
+ u64 gprc;
+ u64 bprc;
+ u64 mprc;
+ u64 gptc;
+ u64 gorc;
+ u64 gotc;
+ u64 rnbc;
+ u64 ruc;
+ u64 rfc;
+ u64 roc;
+ u64 rjc;
+ u64 mgprc;
+ u64 mgpdc;
+ u64 mgptc;
+ u64 tor;
+ u64 tot;
+ u64 tpr;
+ u64 tpt;
+ u64 ptc64;
+ u64 ptc127;
+ u64 ptc255;
+ u64 ptc511;
+ u64 ptc1023;
+ u64 ptc1522;
+ u64 mptc;
+ u64 bptc;
+ u64 tsctc;
+ u64 tsctfc;
+ u64 iac;
+ u64 icrxptc;
+ u64 icrxatc;
+ u64 ictxptc;
+ u64 ictxatc;
+ u64 ictxqec;
+ u64 ictxqmtc;
+ u64 icrxdmtc;
+ u64 icrxoc;
+ u64 cbtmpc;
+ u64 htdpmc;
+ u64 cbrdpc;
+ u64 cbrmpc;
+ u64 rpthc;
+ u64 hgptc;
+ u64 htcbdpc;
+ u64 hgorc;
+ u64 hgotc;
+ u64 lenerrs;
+ u64 scvpc;
+ u64 hrmpc;
+ u64 doosync;
+ u64 o2bgptc;
+ u64 o2bspc;
+ u64 b2ospc;
+ u64 b2ogprc;
+};
+
+struct e1000_vf_stats {
+ u64 base_gprc;
+ u64 base_gptc;
+ u64 base_gorc;
+ u64 base_gotc;
+ u64 base_mprc;
+ u64 base_gotlbc;
+ u64 base_gptlbc;
+ u64 base_gorlbc;
+ u64 base_gprlbc;
+
+ u32 last_gprc;
+ u32 last_gptc;
+ u32 last_gorc;
+ u32 last_gotc;
+ u32 last_mprc;
+ u32 last_gotlbc;
+ u32 last_gptlbc;
+ u32 last_gorlbc;
+ u32 last_gprlbc;
+
+ u64 gprc;
+ u64 gptc;
+ u64 gorc;
+ u64 gotc;
+ u64 mprc;
+ u64 gotlbc;
+ u64 gptlbc;
+ u64 gorlbc;
+ u64 gprlbc;
+};
+
+struct e1000_phy_stats {
+ u32 idle_errors;
+ u32 receive_errors;
+};
+
+struct e1000_host_mng_dhcp_cookie {
+ u32 signature;
+ u8 status;
+ u8 reserved0;
+ u16 vlan_id;
+ u32 reserved1;
+ u16 reserved2;
+ u8 reserved3;
+ u8 checksum;
+};
+
+/* Host Interface "Rev 1" */
+struct e1000_host_command_header {
+ u8 command_id;
+ u8 command_length;
+ u8 command_options;
+ u8 checksum;
+};
+
+#define E1000_HI_MAX_DATA_LENGTH 252
+struct e1000_host_command_info {
+ struct e1000_host_command_header command_header;
+ u8 command_data[E1000_HI_MAX_DATA_LENGTH];
+};
+
+/* Host Interface "Rev 2" */
+struct e1000_host_mng_command_header {
+ u8 command_id;
+ u8 checksum;
+ u16 reserved1;
+ u16 reserved2;
+ u16 command_length;
+};
+
+#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
+struct e1000_host_mng_command_info {
+ struct e1000_host_mng_command_header command_header;
+ u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
+};
+
+#include "e1000_mac.h"
+#include "e1000_phy.h"
+#include "e1000_nvm.h"
+#include "e1000_manage.h"
+#include "e1000_mbx.h"
+
+/* Function pointers for the MAC. */
+struct e1000_mac_operations {
+ s32 (*init_params)(struct e1000_hw *);
+ s32 (*id_led_init)(struct e1000_hw *);
+ s32 (*blink_led)(struct e1000_hw *);
+ bool (*check_mng_mode)(struct e1000_hw *);
+ s32 (*check_for_link)(struct e1000_hw *);
+ s32 (*cleanup_led)(struct e1000_hw *);
+ void (*clear_hw_cntrs)(struct e1000_hw *);
+ void (*clear_vfta)(struct e1000_hw *);
+ s32 (*get_bus_info)(struct e1000_hw *);
+ void (*set_lan_id)(struct e1000_hw *);
+ s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
+ s32 (*led_on)(struct e1000_hw *);
+ s32 (*led_off)(struct e1000_hw *);
+ void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
+ s32 (*reset_hw)(struct e1000_hw *);
+ s32 (*init_hw)(struct e1000_hw *);
+ void (*shutdown_serdes)(struct e1000_hw *);
+ void (*power_up_serdes)(struct e1000_hw *);
+ s32 (*setup_link)(struct e1000_hw *);
+ s32 (*setup_physical_interface)(struct e1000_hw *);
+ s32 (*setup_led)(struct e1000_hw *);
+ void (*write_vfta)(struct e1000_hw *, u32, u32);
+ void (*config_collision_dist)(struct e1000_hw *);
+ void (*rar_set)(struct e1000_hw *, u8*, u32);
+ s32 (*read_mac_addr)(struct e1000_hw *);
+ s32 (*validate_mdi_setting)(struct e1000_hw *);
+ s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
+ void (*release_swfw_sync)(struct e1000_hw *, u16);
+};
+
+/* When to use various PHY register access functions:
+ *
+ * Func Caller
+ * Function Does Does When to use
+ * ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * X_reg L,P,A n/a for simple PHY reg accesses
+ * X_reg_locked P,A L for multiple accesses of different regs
+ * on different pages
+ * X_reg_page A L,P for multiple accesses of different regs
+ * on the same page
+ *
+ * Where X=[read|write], L=locking, P=sets page, A=register access
+ *
+ */
+struct e1000_phy_operations {
+ s32 (*init_params)(struct e1000_hw *);
+ s32 (*acquire)(struct e1000_hw *);
+ s32 (*cfg_on_link_up)(struct e1000_hw *);
+ s32 (*check_polarity)(struct e1000_hw *);
+ s32 (*check_reset_block)(struct e1000_hw *);
+ s32 (*commit)(struct e1000_hw *);
+ s32 (*force_speed_duplex)(struct e1000_hw *);
+ s32 (*get_cfg_done)(struct e1000_hw *hw);
+ s32 (*get_cable_length)(struct e1000_hw *);
+ s32 (*get_info)(struct e1000_hw *);
+ s32 (*set_page)(struct e1000_hw *, u16);
+ s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
+ s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *);
+ s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *);
+ void (*release)(struct e1000_hw *);
+ s32 (*reset)(struct e1000_hw *);
+ s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
+ s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
+ s32 (*write_reg)(struct e1000_hw *, u32, u16);
+ s32 (*write_reg_locked)(struct e1000_hw *, u32, u16);
+ s32 (*write_reg_page)(struct e1000_hw *, u32, u16);
+ void (*power_up)(struct e1000_hw *);
+ void (*power_down)(struct e1000_hw *);
+ s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *);
+ s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8);
+};
+
+/* Function pointers for the NVM. */
+struct e1000_nvm_operations {
+ s32 (*init_params)(struct e1000_hw *);
+ s32 (*acquire)(struct e1000_hw *);
+ s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
+ void (*release)(struct e1000_hw *);
+ void (*reload)(struct e1000_hw *);
+ s32 (*update)(struct e1000_hw *);
+ s32 (*valid_led_default)(struct e1000_hw *, u16 *);
+ s32 (*validate)(struct e1000_hw *);
+ s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
+};
+
+struct e1000_mac_info {
+ struct e1000_mac_operations ops;
+ u8 addr[ETH_ADDR_LEN];
+ u8 perm_addr[ETH_ADDR_LEN];
+
+ enum e1000_mac_type type;
+
+ u32 collision_delta;
+ u32 ledctl_default;
+ u32 ledctl_mode1;
+ u32 ledctl_mode2;
+ u32 mc_filter_type;
+ u32 tx_packet_delta;
+ u32 txcw;
+
+ u16 current_ifs_val;
+ u16 ifs_max_val;
+ u16 ifs_min_val;
+ u16 ifs_ratio;
+ u16 ifs_step_size;
+ u16 mta_reg_count;
+ u16 uta_reg_count;
+
+ /* Maximum size of the MTA register table in all supported adapters */
+ #define MAX_MTA_REG 128
+ u32 mta_shadow[MAX_MTA_REG];
+ u16 rar_entry_count;
+
+ u8 forced_speed_duplex;
+
+ bool adaptive_ifs;
+ bool has_fwsm;
+ bool arc_subsystem_valid;
+ bool asf_firmware_present;
+ bool autoneg;
+ bool autoneg_failed;
+ bool get_link_status;
+ bool in_ifs_mode;
+ bool report_tx_early;
+ enum e1000_serdes_link_state serdes_link_state;
+ bool serdes_has_link;
+ bool tx_pkt_filtering;
+};
+
+struct e1000_phy_info {
+ struct e1000_phy_operations ops;
+ enum e1000_phy_type type;
+
+ enum e1000_1000t_rx_status local_rx;
+ enum e1000_1000t_rx_status remote_rx;
+ enum e1000_ms_type ms_type;
+ enum e1000_ms_type original_ms_type;
+ enum e1000_rev_polarity cable_polarity;
+ enum e1000_smart_speed smart_speed;
+
+ u32 addr;
+ u32 id;
+ u32 reset_delay_us; /* in usec */
+ u32 revision;
+
+ enum e1000_media_type media_type;
+
+ u16 autoneg_advertised;
+ u16 autoneg_mask;
+ u16 cable_length;
+ u16 max_cable_length;
+ u16 min_cable_length;
+
+ u8 mdix;
+
+ bool disable_polarity_correction;
+ bool is_mdix;
+ bool polarity_correction;
+ bool speed_downgraded;
+ bool autoneg_wait_to_complete;
+};
+
+struct e1000_nvm_info {
+ struct e1000_nvm_operations ops;
+ enum e1000_nvm_type type;
+ enum e1000_nvm_override override;
+
+ u32 flash_bank_size;
+ u32 flash_base_addr;
+
+ u16 word_size;
+ u16 delay_usec;
+ u16 address_bits;
+ u16 opcode_bits;
+ u16 page_size;
+};
+
+struct e1000_bus_info {
+ enum e1000_bus_type type;
+ enum e1000_bus_speed speed;
+ enum e1000_bus_width width;
+
+ u16 func;
+ u16 pci_cmd_word;
+};
+
+struct e1000_fc_info {
+ u32 high_water; /* Flow control high-water mark */
+ u32 low_water; /* Flow control low-water mark */
+ u16 pause_time; /* Flow control pause timer */
+ u16 refresh_time; /* Flow control refresh timer */
+ bool send_xon; /* Flow control send XON */
+ bool strict_ieee; /* Strict IEEE mode */
+ enum e1000_fc_mode current_mode; /* FC mode in effect */
+ enum e1000_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+struct e1000_mbx_operations {
+ s32 (*init_params)(struct e1000_hw *hw);
+ s32 (*read)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*write)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*check_for_msg)(struct e1000_hw *, u16);
+ s32 (*check_for_ack)(struct e1000_hw *, u16);
+ s32 (*check_for_rst)(struct e1000_hw *, u16);
+};
+
+struct e1000_mbx_stats {
+ u32 msgs_tx;
+ u32 msgs_rx;
+
+ u32 acks;
+ u32 reqs;
+ u32 rsts;
+};
+
+struct e1000_mbx_info {
+ struct e1000_mbx_operations ops;
+ struct e1000_mbx_stats stats;
+ u32 timeout;
+ u32 usec_delay;
+ u16 size;
+};
+
+struct e1000_dev_spec_82541 {
+ enum e1000_dsp_config dsp_config;
+ enum e1000_ffe_config ffe_config;
+ u16 spd_default;
+ bool phy_init_script;
+};
+
+struct e1000_dev_spec_82542 {
+ bool dma_fairness;
+};
+
+struct e1000_dev_spec_82543 {
+ u32 tbi_compatibility;
+ bool dma_fairness;
+ bool init_phy_disabled;
+};
+
+struct e1000_dev_spec_82571 {
+ bool laa_is_present;
+ u32 smb_counter;
+ E1000_MUTEX swflag_mutex;
+};
+
+struct e1000_dev_spec_80003es2lan {
+ bool mdic_wa_enable;
+};
+
+struct e1000_shadow_ram {
+ u16 value;
+ bool modified;
+};
+
+#define E1000_SHADOW_RAM_WORDS 2048
+
+#if defined(NAHUM6LP_HW) && defined(ULP_SUPPORT)
+/* I218 PHY Ultra Low Power (ULP) states */
+enum e1000_ulp_state {
+ e1000_ulp_state_unknown,
+ e1000_ulp_state_off,
+ e1000_ulp_state_on,
+};
+
+#endif /* NAHUM6LP_HW && ULP_SUPPORT */
+struct e1000_dev_spec_ich8lan {
+ bool kmrn_lock_loss_workaround_enabled;
+ struct e1000_shadow_ram shadow_ram[E1000_SHADOW_RAM_WORDS];
+ E1000_MUTEX nvm_mutex;
+ E1000_MUTEX swflag_mutex;
+ bool nvm_k1_enabled;
+ bool eee_disable;
+ u16 eee_lp_ability;
+#if defined(NAHUM6LP_HW) && defined(ULP_SUPPORT)
+ enum e1000_ulp_state ulp_state;
+#endif /* NAHUM6LP_HW && ULP_SUPPORT */
+ u16 lat_enc;
+ u16 max_ltr_enc;
+ bool smbus_disable;
+};
+
+struct e1000_dev_spec_82575 {
+ bool sgmii_active;
+ bool global_device_reset;
+ bool eee_disable;
+ bool module_plugged;
+ bool clear_semaphore_once;
+ u32 mtu;
+ struct sfp_e1000_flags eth_flags;
+ u8 media_port;
+ bool media_changed;
+};
+
+struct e1000_dev_spec_vf {
+ u32 vf_number;
+ u32 v2p_mailbox;
+};
+
+struct e1000_hw {
+ void *back;
+
+ u8 *hw_addr;
+ u8 *flash_address;
+ unsigned long io_base;
+
+ struct e1000_mac_info mac;
+ struct e1000_fc_info fc;
+ struct e1000_phy_info phy;
+ struct e1000_nvm_info nvm;
+ struct e1000_bus_info bus;
+ struct e1000_mbx_info mbx;
+ struct e1000_host_mng_dhcp_cookie mng_cookie;
+
+ union {
+ struct e1000_dev_spec_82541 _82541;
+ struct e1000_dev_spec_82542 _82542;
+ struct e1000_dev_spec_82543 _82543;
+ struct e1000_dev_spec_82571 _82571;
+ struct e1000_dev_spec_80003es2lan _80003es2lan;
+ struct e1000_dev_spec_ich8lan ich8lan;
+ struct e1000_dev_spec_82575 _82575;
+ struct e1000_dev_spec_vf vf;
+ } dev_spec;
+
+ u16 device_id;
+ u16 subsystem_vendor_id;
+ u16 subsystem_device_id;
+ u16 vendor_id;
+
+ u8 revision_id;
+};
+
+#include "e1000_82541.h"
+#include "e1000_82543.h"
+#include "e1000_82571.h"
+#include "e1000_80003es2lan.h"
+#include "e1000_ich8lan.h"
+#include "e1000_82575.h"
+#include "e1000_i210.h"
+
+/* These functions must be implemented by drivers */
+void e1000_pci_clear_mwi(struct e1000_hw *hw);
+void e1000_pci_set_mwi(struct e1000_hw *hw);
+s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+void e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+
+#endif
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_i210.c b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_i210.c
new file mode 100755
index 00000000..1f5600d5
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_i210.c
@@ -0,0 +1,1000 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "e1000_api.h"
+
+
+STATIC s32 e1000_acquire_nvm_i210(struct e1000_hw *hw);
+STATIC void e1000_release_nvm_i210(struct e1000_hw *hw);
+STATIC s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw);
+STATIC s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+STATIC s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw);
+STATIC s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
+
+/**
+ * e1000_acquire_nvm_i210 - Request for access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the necessary semaphores for exclusive access to the EEPROM.
+ * Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ * Return successful if access grant bit set, else clear the request for
+ * EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+STATIC s32 e1000_acquire_nvm_i210(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_acquire_nvm_i210");
+
+ ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
+
+ return ret_val;
+}
+
+/**
+ * e1000_release_nvm_i210 - Release exclusive access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Stop any current commands to the EEPROM and clear the EEPROM request bit,
+ * then release the semaphores acquired.
+ **/
+STATIC void e1000_release_nvm_i210(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_release_nvm_i210");
+
+ e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
+}
+
+/**
+ * e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
+ * will also specify which port we're acquiring the lock for.
+ **/
+s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+ u32 swmask = mask;
+ u32 fwmask = mask << 16;
+ s32 ret_val = E1000_SUCCESS;
+ s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
+
+ DEBUGFUNC("e1000_acquire_swfw_sync_i210");
+
+ while (i < timeout) {
+ if (e1000_get_hw_semaphore_i210(hw)) {
+ ret_val = -E1000_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+ if (!(swfw_sync & (fwmask | swmask)))
+ break;
+
+ /*
+ * Firmware currently using resource (fwmask)
+ * or other software thread using resource (swmask)
+ */
+ e1000_put_hw_semaphore_generic(hw);
+ msec_delay_irq(5);
+ i++;
+ }
+
+ if (i == timeout) {
+ DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
+ ret_val = -E1000_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ swfw_sync |= swmask;
+ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+ e1000_put_hw_semaphore_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_release_swfw_sync_i210 - Release SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Release the SW/FW semaphore used to access the PHY or NVM. The mask
+ * will also specify which port we're releasing the lock for.
+ **/
+void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+
+ DEBUGFUNC("e1000_release_swfw_sync_i210");
+
+ while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS)
+ ; /* Empty */
+
+ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+ swfw_sync &= ~mask;
+ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+ e1000_put_hw_semaphore_generic(hw);
+}
+
+/**
+ * e1000_get_hw_semaphore_i210 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM
+ **/
+STATIC s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw)
+{
+ u32 swsm;
+ s32 timeout = hw->nvm.word_size + 1;
+ s32 i = 0;
+
+ DEBUGFUNC("e1000_get_hw_semaphore_i210");
+
+ /* Get the SW semaphore */
+ while (i < timeout) {
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ if (!(swsm & E1000_SWSM_SMBI))
+ break;
+
+ usec_delay(50);
+ i++;
+ }
+
+ if (i == timeout) {
+ /* In rare circumstances, the SW semaphore may already be held
+ * unintentionally. Clear the semaphore once before giving up.
+ */
+ if (hw->dev_spec._82575.clear_semaphore_once) {
+ hw->dev_spec._82575.clear_semaphore_once = false;
+ e1000_put_hw_semaphore_generic(hw);
+ for (i = 0; i < timeout; i++) {
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ if (!(swsm & E1000_SWSM_SMBI))
+ break;
+
+ usec_delay(50);
+ }
+ }
+
+ /* If we do not have the semaphore here, we have to give up. */
+ if (i == timeout) {
+ DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
+ return -E1000_ERR_NVM;
+ }
+ }
+
+ /* Get the FW semaphore. */
+ for (i = 0; i < timeout; i++) {
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
+
+ /* Semaphore acquired if bit latched */
+ if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
+ break;
+
+ usec_delay(50);
+ }
+
+ if (i == timeout) {
+ /* Release semaphores */
+ e1000_put_hw_semaphore_generic(hw);
+ DEBUGOUT("Driver can't access the NVM\n");
+ return -E1000_ERR_NVM;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
+ * @hw: pointer to the HW structure
+ * @offset: offset of word in the Shadow Ram to read
+ * @words: number of words to read
+ * @data: word read from the Shadow Ram
+ *
+ * Reads a 16 bit word from the Shadow Ram using the EERD register.
+ * Uses necessary synchronization semaphores.
+ **/
+s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ s32 status = E1000_SUCCESS;
+ u16 i, count;
+
+ DEBUGFUNC("e1000_read_nvm_srrd_i210");
+
+ /* We cannot hold synchronization semaphores for too long,
+ * because of forceful takeover procedure. However it is more efficient
+ * to read in bursts than synchronizing access for each word. */
+ for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
+ count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
+ E1000_EERD_EEWR_MAX_COUNT : (words - i);
+ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ status = e1000_read_nvm_eerd(hw, offset, count,
+ data + i);
+ hw->nvm.ops.release(hw);
+ } else {
+ status = E1000_ERR_SWFW_SYNC;
+ }
+
+ if (status != E1000_SUCCESS)
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
+ * @hw: pointer to the HW structure
+ * @offset: offset within the Shadow RAM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the Shadow RAM
+ *
+ * Writes data to Shadow RAM at offset using EEWR register.
+ *
+ * If e1000_update_nvm_checksum is not called after this function , the
+ * data will not be committed to FLASH and also Shadow RAM will most likely
+ * contain an invalid checksum.
+ *
+ * If error code is returned, data and Shadow RAM may be inconsistent - buffer
+ * partially written.
+ **/
+s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ s32 status = E1000_SUCCESS;
+ u16 i, count;
+
+ DEBUGFUNC("e1000_write_nvm_srwr_i210");
+
+ /* We cannot hold synchronization semaphores for too long,
+ * because of forceful takeover procedure. However it is more efficient
+ * to write in bursts than synchronizing access for each word. */
+ for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
+ count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
+ E1000_EERD_EEWR_MAX_COUNT : (words - i);
+ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ status = e1000_write_nvm_srwr(hw, offset, count,
+ data + i);
+ hw->nvm.ops.release(hw);
+ } else {
+ status = E1000_ERR_SWFW_SYNC;
+ }
+
+ if (status != E1000_SUCCESS)
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * e1000_write_nvm_srwr - Write to Shadow Ram using EEWR
+ * @hw: pointer to the HW structure
+ * @offset: offset within the Shadow Ram to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the Shadow Ram
+ *
+ * Writes data to Shadow Ram at offset using EEWR register.
+ *
+ * If e1000_update_nvm_checksum is not called after this function , the
+ * Shadow Ram will most likely contain an invalid checksum.
+ **/
+STATIC s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 i, k, eewr = 0;
+ u32 attempts = 100000;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_write_nvm_srwr");
+
+ /*
+ * A check for invalid values: offset too large, too many words,
+ * too many words for the offset, and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+
+ for (i = 0; i < words; i++) {
+ eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
+ (data[i] << E1000_NVM_RW_REG_DATA) |
+ E1000_NVM_RW_REG_START;
+
+ E1000_WRITE_REG(hw, E1000_SRWR, eewr);
+
+ for (k = 0; k < attempts; k++) {
+ if (E1000_NVM_RW_REG_DONE &
+ E1000_READ_REG(hw, E1000_SRWR)) {
+ ret_val = E1000_SUCCESS;
+ break;
+ }
+ usec_delay(5);
+ }
+
+ if (ret_val != E1000_SUCCESS) {
+ DEBUGOUT("Shadow RAM write EEWR timed out\n");
+ break;
+ }
+ }
+
+out:
+ return ret_val;
+}
+
+/** e1000_read_invm_word_i210 - Reads OTP
+ * @hw: pointer to the HW structure
+ * @address: the word address (aka eeprom offset) to read
+ * @data: pointer to the data read
+ *
+ * Reads 16-bit words from the OTP. Return error when the word is not
+ * stored in OTP.
+ **/
+STATIC s32 e1000_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
+{
+ s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+ u32 invm_dword;
+ u16 i;
+ u8 record_type, word_address;
+
+ DEBUGFUNC("e1000_read_invm_word_i210");
+
+ for (i = 0; i < E1000_INVM_SIZE; i++) {
+ invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
+ /* Get record type */
+ record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
+ if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
+ break;
+ if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
+ i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
+ if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
+ i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
+ if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
+ word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
+ if (word_address == address) {
+ *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
+ DEBUGOUT2("Read INVM Word 0x%02x = %x",
+ address, *data);
+ status = E1000_SUCCESS;
+ break;
+ }
+ }
+ }
+ if (status != E1000_SUCCESS)
+ DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address);
+ return status;
+}
+
+/** e1000_read_invm_i210 - Read invm wrapper function for I210/I211
+ * @hw: pointer to the HW structure
+ * @address: the word address (aka eeprom offset) to read
+ * @data: pointer to the data read
+ *
+ * Wrapper function to return data formerly found in the NVM.
+ **/
+STATIC s32 e1000_read_invm_i210(struct e1000_hw *hw, u16 offset,
+ u16 E1000_UNUSEDARG words, u16 *data)
+{
+ s32 ret_val = E1000_SUCCESS;
+ UNREFERENCED_1PARAMETER(words);
+
+ DEBUGFUNC("e1000_read_invm_i210");
+
+ /* Only the MAC addr is required to be present in the iNVM */
+ switch (offset) {
+ case NVM_MAC_ADDR:
+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, &data[0]);
+ ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+1,
+ &data[1]);
+ ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+2,
+ &data[2]);
+ if (ret_val != E1000_SUCCESS)
+ DEBUGOUT("MAC Addr not found in iNVM\n");
+ break;
+ case NVM_INIT_CTRL_2:
+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_INIT_CTRL_2_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
+ case NVM_INIT_CTRL_4:
+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_INIT_CTRL_4_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
+ case NVM_LED_1_CFG:
+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_LED_1_CFG_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
+ case NVM_LED_0_2_CFG:
+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_LED_0_2_CFG_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
+ case NVM_ID_LED_SETTINGS:
+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = ID_LED_RESERVED_FFFF;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
+ case NVM_SUB_DEV_ID:
+ *data = hw->subsystem_device_id;
+ break;
+ case NVM_SUB_VEN_ID:
+ *data = hw->subsystem_vendor_id;
+ break;
+ case NVM_DEV_ID:
+ *data = hw->device_id;
+ break;
+ case NVM_VEN_ID:
+ *data = hw->vendor_id;
+ break;
+ default:
+ DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset);
+ *data = NVM_RESERVED_WORD;
+ break;
+ }
+ return ret_val;
+}
+
+/**
+ * e1000_read_invm_version - Reads iNVM version and image type
+ * @hw: pointer to the HW structure
+ * @invm_ver: version structure for the version read
+ *
+ * Reads iNVM version and image type.
+ **/
+s32 e1000_read_invm_version(struct e1000_hw *hw,
+ struct e1000_fw_version *invm_ver)
+{
+ u32 *record = NULL;
+ u32 *next_record = NULL;
+ u32 i = 0;
+ u32 invm_dword = 0;
+ u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
+ E1000_INVM_RECORD_SIZE_IN_BYTES);
+ u32 buffer[E1000_INVM_SIZE];
+ s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+ u16 version = 0;
+
+ DEBUGFUNC("e1000_read_invm_version");
+
+ /* Read iNVM memory */
+ for (i = 0; i < E1000_INVM_SIZE; i++) {
+ invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
+ buffer[i] = invm_dword;
+ }
+
+ /* Read version number */
+ for (i = 1; i < invm_blocks; i++) {
+ record = &buffer[invm_blocks - i];
+ next_record = &buffer[invm_blocks - i + 1];
+
+ /* Check if we have first version location used */
+ if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
+ version = 0;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /* Check if we have second version location used */
+ else if ((i == 1) &&
+ ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
+ version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /*
+ * Check if we have odd version location
+ * used and it is the last one used
+ */
+ else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
+ ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
+ (i != 1))) {
+ version = (*next_record & E1000_INVM_VER_FIELD_TWO)
+ >> 13;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /*
+ * Check if we have even version location
+ * used and it is the last one used
+ */
+ else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
+ ((*record & 0x3) == 0)) {
+ version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+ status = E1000_SUCCESS;
+ break;
+ }
+ }
+
+ if (status == E1000_SUCCESS) {
+ invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
+ >> E1000_INVM_MAJOR_SHIFT;
+ invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
+ }
+ /* Read Image Type */
+ for (i = 1; i < invm_blocks; i++) {
+ record = &buffer[invm_blocks - i];
+ next_record = &buffer[invm_blocks - i + 1];
+
+ /* Check if we have image type in first location used */
+ if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
+ invm_ver->invm_img_type = 0;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /* Check if we have image type in first location used */
+ else if ((((*record & 0x3) == 0) &&
+ ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
+ ((((*record & 0x3) != 0) && (i != 1)))) {
+ invm_ver->invm_img_type =
+ (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
+ status = E1000_SUCCESS;
+ break;
+ }
+ }
+ return status;
+}
+
+/**
+ * e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ * and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw)
+{
+ s32 status = E1000_SUCCESS;
+ s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
+
+ DEBUGFUNC("e1000_validate_nvm_checksum_i210");
+
+ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+
+ /*
+ * Replace the read function with semaphore grabbing with
+ * the one that skips this for a while.
+ * We have semaphore taken already here.
+ */
+ read_op_ptr = hw->nvm.ops.read;
+ hw->nvm.ops.read = e1000_read_nvm_eerd;
+
+ status = e1000_validate_nvm_checksum_generic(hw);
+
+ /* Revert original read operation. */
+ hw->nvm.ops.read = read_op_ptr;
+
+ hw->nvm.ops.release(hw);
+ } else {
+ status = E1000_ERR_SWFW_SYNC;
+ }
+
+ return status;
+}
+
+
+/**
+ * e1000_update_nvm_checksum_i210 - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ * up to the checksum. Then calculates the EEPROM checksum and writes the
+ * value to the EEPROM. Next commit EEPROM data onto the Flash.
+ **/
+s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ DEBUGFUNC("e1000_update_nvm_checksum_i210");
+
+ /*
+ * Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data);
+ if (ret_val != E1000_SUCCESS) {
+ DEBUGOUT("EEPROM read failed\n");
+ goto out;
+ }
+
+ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ /*
+ * Do not use hw->nvm.ops.write, hw->nvm.ops.read
+ * because we do not want to take the synchronization
+ * semaphores twice here.
+ */
+
+ for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+ ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ hw->nvm.ops.release(hw);
+ DEBUGOUT("NVM Read Error while updating checksum.\n");
+ goto out;
+ }
+ checksum += nvm_data;
+ }
+ checksum = (u16) NVM_SUM - checksum;
+ ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
+ &checksum);
+ if (ret_val != E1000_SUCCESS) {
+ hw->nvm.ops.release(hw);
+ DEBUGOUT("NVM Write Error while updating checksum.\n");
+ goto out;
+ }
+
+ hw->nvm.ops.release(hw);
+
+ ret_val = e1000_update_flash_i210(hw);
+ } else {
+ ret_val = E1000_ERR_SWFW_SYNC;
+ }
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_get_flash_presence_i210 - Check if flash device is detected.
+ * @hw: pointer to the HW structure
+ *
+ **/
+bool e1000_get_flash_presence_i210(struct e1000_hw *hw)
+{
+ u32 eec = 0;
+ bool ret_val = false;
+
+ DEBUGFUNC("e1000_get_flash_presence_i210");
+
+ eec = E1000_READ_REG(hw, E1000_EECD);
+
+ if (eec & E1000_EECD_FLASH_DETECTED_I210)
+ ret_val = true;
+
+ return ret_val;
+}
+
+/**
+ * e1000_update_flash_i210 - Commit EEPROM to the flash
+ * @hw: pointer to the HW structure
+ *
+ **/
+s32 e1000_update_flash_i210(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u32 flup;
+
+ DEBUGFUNC("e1000_update_flash_i210");
+
+ ret_val = e1000_pool_flash_update_done_i210(hw);
+ if (ret_val == -E1000_ERR_NVM) {
+ DEBUGOUT("Flash update time out\n");
+ goto out;
+ }
+
+ flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210;
+ E1000_WRITE_REG(hw, E1000_EECD, flup);
+
+ ret_val = e1000_pool_flash_update_done_i210(hw);
+ if (ret_val == E1000_SUCCESS)
+ DEBUGOUT("Flash update complete\n");
+ else
+ DEBUGOUT("Flash update time out\n");
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_pool_flash_update_done_i210 - Pool FLUDONE status.
+ * @hw: pointer to the HW structure
+ *
+ **/
+s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw)
+{
+ s32 ret_val = -E1000_ERR_NVM;
+ u32 i, reg;
+
+ DEBUGFUNC("e1000_pool_flash_update_done_i210");
+
+ for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
+ reg = E1000_READ_REG(hw, E1000_EECD);
+ if (reg & E1000_EECD_FLUDONE_I210) {
+ ret_val = E1000_SUCCESS;
+ break;
+ }
+ usec_delay(5);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Initialize the i210/i211 NVM parameters and function pointers.
+ **/
+STATIC s32 e1000_init_nvm_params_i210(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ struct e1000_nvm_info *nvm = &hw->nvm;
+
+ DEBUGFUNC("e1000_init_nvm_params_i210");
+
+ ret_val = e1000_init_nvm_params_82575(hw);
+ nvm->ops.acquire = e1000_acquire_nvm_i210;
+ nvm->ops.release = e1000_release_nvm_i210;
+ nvm->ops.valid_led_default = e1000_valid_led_default_i210;
+ if (e1000_get_flash_presence_i210(hw)) {
+ hw->nvm.type = e1000_nvm_flash_hw;
+ nvm->ops.read = e1000_read_nvm_srrd_i210;
+ nvm->ops.write = e1000_write_nvm_srwr_i210;
+ nvm->ops.validate = e1000_validate_nvm_checksum_i210;
+ nvm->ops.update = e1000_update_nvm_checksum_i210;
+ } else {
+ hw->nvm.type = e1000_nvm_invm;
+ nvm->ops.read = e1000_read_invm_i210;
+ nvm->ops.write = e1000_null_write_nvm;
+ nvm->ops.validate = e1000_null_ops_generic;
+ nvm->ops.update = e1000_null_ops_generic;
+ }
+ return ret_val;
+}
+
+/**
+ * e1000_init_function_pointers_i210 - Init func ptrs.
+ * @hw: pointer to the HW structure
+ *
+ * Called to initialize all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_i210(struct e1000_hw *hw)
+{
+ e1000_init_function_pointers_82575(hw);
+ hw->nvm.ops.init_params = e1000_init_nvm_params_i210;
+
+ return;
+}
+
+/**
+ * e1000_valid_led_default_i210 - Verify a valid default LED config
+ * @hw: pointer to the HW structure
+ * @data: pointer to the NVM (EEPROM)
+ *
+ * Read the EEPROM for the current default LED configuration. If the
+ * LED configuration is not valid, set to a valid LED configuration.
+ **/
+STATIC s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_valid_led_default_i210");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+
+ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
+ switch (hw->phy.media_type) {
+ case e1000_media_type_internal_serdes:
+ *data = ID_LED_DEFAULT_I210_SERDES;
+ break;
+ case e1000_media_type_copper:
+ default:
+ *data = ID_LED_DEFAULT_I210;
+ break;
+ }
+ }
+out:
+ return ret_val;
+}
+
+/**
+ * __e1000_access_xmdio_reg - Read/write XMDIO register
+ * @hw: pointer to the HW structure
+ * @address: XMDIO address to program
+ * @dev_addr: device address to program
+ * @data: pointer to value to read/write from/to the XMDIO address
+ * @read: boolean flag to indicate read or write
+ **/
+STATIC s32 __e1000_access_xmdio_reg(struct e1000_hw *hw, u16 address,
+ u8 dev_addr, u16 *data, bool read)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("__e1000_access_xmdio_reg");
+
+ ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA |
+ dev_addr);
+ if (ret_val)
+ return ret_val;
+
+ if (read)
+ ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data);
+ else
+ ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data);
+ if (ret_val)
+ return ret_val;
+
+ /* Recalibrate the device back to 0 */
+ ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0);
+ if (ret_val)
+ return ret_val;
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_xmdio_reg - Read XMDIO register
+ * @hw: pointer to the HW structure
+ * @addr: XMDIO address to program
+ * @dev_addr: device address to program
+ * @data: value to be read from the EMI address
+ **/
+s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data)
+{
+ DEBUGFUNC("e1000_read_xmdio_reg");
+
+ return __e1000_access_xmdio_reg(hw, addr, dev_addr, data, true);
+}
+
+/**
+ * e1000_write_xmdio_reg - Write XMDIO register
+ * @hw: pointer to the HW structure
+ * @addr: XMDIO address to program
+ * @dev_addr: device address to program
+ * @data: value to be written to the XMDIO address
+ **/
+s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
+{
+ DEBUGFUNC("e1000_read_xmdio_reg");
+
+ return __e1000_access_xmdio_reg(hw, addr, dev_addr, &data, false);
+}
+
+/**
+ * e1000_pll_workaround_i210
+ * @hw: pointer to the HW structure
+ *
+ * Works around an errata in the PLL circuit where it occasionally
+ * provides the wrong clock frequency after power up.
+ **/
+STATIC s32 e1000_pll_workaround_i210(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u32 wuc, mdicnfg, ctrl_ext, reg_val;
+ u16 nvm_word, phy_word, pci_word, tmp_nvm;
+ int i;
+
+ /* Get and set needed register values */
+ wuc = E1000_READ_REG(hw, E1000_WUC);
+ mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG);
+ reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
+ E1000_WRITE_REG(hw, E1000_MDICNFG, reg_val);
+
+ /* Get data from NVM, or set default */
+ ret_val = e1000_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
+ &nvm_word);
+ if (ret_val != E1000_SUCCESS)
+ nvm_word = E1000_INVM_DEFAULT_AL;
+ tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
+ for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
+ /* check current state */
+ hw->phy.ops.read_reg(hw, (E1000_PHY_PLL_FREQ_PAGE |
+ E1000_PHY_PLL_FREQ_REG), &phy_word);
+ if ((phy_word & E1000_PHY_PLL_UNCONF)
+ != E1000_PHY_PLL_UNCONF) {
+ ret_val = E1000_SUCCESS;
+ break;
+ } else {
+ ret_val = -E1000_ERR_PHY;
+ }
+ hw->phy.ops.reset(hw);
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+
+ E1000_WRITE_REG(hw, E1000_WUC, 0);
+ reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
+ E1000_WRITE_REG(hw, E1000_EEARBC, reg_val);
+
+ e1000_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+ pci_word |= E1000_PCI_PMCSR_D3;
+ e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+ msec_delay(1);
+ pci_word &= ~E1000_PCI_PMCSR_D3;
+ e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+ reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
+ E1000_WRITE_REG(hw, E1000_EEARBC, reg_val);
+
+ /* restore WUC register */
+ E1000_WRITE_REG(hw, E1000_WUC, wuc);
+ }
+ /* restore MDICNFG setting */
+ E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
+ return ret_val;
+}
+
+/**
+ * e1000_init_hw_i210 - Init hw for I210/I211
+ * @hw: pointer to the HW structure
+ *
+ * Called to initialize hw for i210 hw family.
+ **/
+s32 e1000_init_hw_i210(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_init_hw_i210");
+ if ((hw->mac.type >= e1000_i210) &&
+ !(e1000_get_flash_presence_i210(hw))) {
+ ret_val = e1000_pll_workaround_i210(hw);
+ if (ret_val != E1000_SUCCESS)
+ return ret_val;
+ }
+ ret_val = e1000_init_hw_82575(hw);
+ return ret_val;
+}
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_i210.h b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_i210.h
new file mode 100755
index 00000000..f2bd43bb
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_i210.h
@@ -0,0 +1,110 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_I210_H_
+#define _E1000_I210_H_
+
+bool e1000_get_flash_presence_i210(struct e1000_hw *hw);
+s32 e1000_update_flash_i210(struct e1000_hw *hw);
+s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw);
+s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw);
+s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 e1000_read_invm_version(struct e1000_hw *hw,
+ struct e1000_fw_version *invm_ver);
+s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
+ u16 *data);
+s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
+ u16 data);
+s32 e1000_init_hw_i210(struct e1000_hw *hw);
+
+#define E1000_STM_OPCODE 0xDB00
+#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
+
+#define INVM_DWORD_TO_RECORD_TYPE(invm_dword) \
+ (u8)((invm_dword) & 0x7)
+#define INVM_DWORD_TO_WORD_ADDRESS(invm_dword) \
+ (u8)(((invm_dword) & 0x0000FE00) >> 9)
+#define INVM_DWORD_TO_WORD_DATA(invm_dword) \
+ (u16)(((invm_dword) & 0xFFFF0000) >> 16)
+
+enum E1000_INVM_STRUCTURE_TYPE {
+ E1000_INVM_UNINITIALIZED_STRUCTURE = 0x00,
+ E1000_INVM_WORD_AUTOLOAD_STRUCTURE = 0x01,
+ E1000_INVM_CSR_AUTOLOAD_STRUCTURE = 0x02,
+ E1000_INVM_PHY_REGISTER_AUTOLOAD_STRUCTURE = 0x03,
+ E1000_INVM_RSA_KEY_SHA256_STRUCTURE = 0x04,
+ E1000_INVM_INVALIDATED_STRUCTURE = 0x0F,
+};
+
+#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8
+#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1
+#define E1000_INVM_ULT_BYTES_SIZE 8
+#define E1000_INVM_RECORD_SIZE_IN_BYTES 4
+#define E1000_INVM_VER_FIELD_ONE 0x1FF8
+#define E1000_INVM_VER_FIELD_TWO 0x7FE000
+#define E1000_INVM_IMGTYPE_FIELD 0x1F800000
+
+#define E1000_INVM_MAJOR_MASK 0x3F0
+#define E1000_INVM_MINOR_MASK 0xF
+#define E1000_INVM_MAJOR_SHIFT 4
+
+#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_OFF1_OFF2))
+#define ID_LED_DEFAULT_I210_SERDES ((ID_LED_DEF1_DEF2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_OFF1_ON2))
+
+/* NVM offset defaults for I211 devices */
+#define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243
+#define NVM_INIT_CTRL_4_DEFAULT_I211 0x00C1
+#define NVM_LED_1_CFG_DEFAULT_I211 0x0184
+#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C
+
+/* PLL Defines */
+#define E1000_PCI_PMCSR 0x44
+#define E1000_PCI_PMCSR_D3 0x03
+#define E1000_MAX_PLL_TRIES 5
+#define E1000_PHY_PLL_UNCONF 0xFF
+#define E1000_PHY_PLL_FREQ_PAGE 0xFC0000
+#define E1000_PHY_PLL_FREQ_REG 0x000E
+#define E1000_INVM_DEFAULT_AL 0x202F
+#define E1000_INVM_AUTOLOAD 0x0A
+#define E1000_INVM_PLL_WO_VAL 0x0010
+
+#endif
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_ich8lan.c b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_ich8lan.c
new file mode 100755
index 00000000..3b1627bf
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_ich8lan.c
@@ -0,0 +1,5260 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+/* 82562G 10/100 Network Connection
+ * 82562G-2 10/100 Network Connection
+ * 82562GT 10/100 Network Connection
+ * 82562GT-2 10/100 Network Connection
+ * 82562V 10/100 Network Connection
+ * 82562V-2 10/100 Network Connection
+ * 82566DC-2 Gigabit Network Connection
+ * 82566DC Gigabit Network Connection
+ * 82566DM-2 Gigabit Network Connection
+ * 82566DM Gigabit Network Connection
+ * 82566MC Gigabit Network Connection
+ * 82566MM Gigabit Network Connection
+ * 82567LM Gigabit Network Connection
+ * 82567LF Gigabit Network Connection
+ * 82567V Gigabit Network Connection
+ * 82567LM-2 Gigabit Network Connection
+ * 82567LF-2 Gigabit Network Connection
+ * 82567V-2 Gigabit Network Connection
+ * 82567LF-3 Gigabit Network Connection
+ * 82567LM-3 Gigabit Network Connection
+ * 82567LM-4 Gigabit Network Connection
+ * 82577LM Gigabit Network Connection
+ * 82577LC Gigabit Network Connection
+ * 82578DM Gigabit Network Connection
+ * 82578DC Gigabit Network Connection
+ * 82579LM Gigabit Network Connection
+ * 82579V Gigabit Network Connection
+ * Ethernet Connection I217-LM
+ * Ethernet Connection I217-V
+ * Ethernet Connection I218-V
+ * Ethernet Connection I218-LM
+ */
+
+#include "e1000_api.h"
+
+STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
+STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
+STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
+STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
+STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
+STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
+STATIC void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
+STATIC void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
+STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
+#ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
+STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
+ u8 *mc_addr_list,
+ u32 mc_addr_count);
+#endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
+STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
+STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
+ bool active);
+STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
+ bool active);
+STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
+ u16 *data);
+STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
+STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
+STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
+ u16 *speed, u16 *duplex);
+STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
+STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
+STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
+STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw);
+STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw);
+STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
+STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
+ u32 offset, u8 *data);
+STATIC s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
+ u8 size, u16 *data);
+STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
+ u32 offset, u16 *data);
+STATIC s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
+ u32 offset, u8 byte);
+STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
+STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
+STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
+STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
+
+/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
+/* Offset 04h HSFSTS */
+union ich8_hws_flash_status {
+ struct ich8_hsfsts {
+ u16 flcdone:1; /* bit 0 Flash Cycle Done */
+ u16 flcerr:1; /* bit 1 Flash Cycle Error */
+ u16 dael:1; /* bit 2 Direct Access error Log */
+ u16 berasesz:2; /* bit 4:3 Sector Erase Size */
+ u16 flcinprog:1; /* bit 5 flash cycle in Progress */
+ u16 reserved1:2; /* bit 13:6 Reserved */
+ u16 reserved2:6; /* bit 13:6 Reserved */
+ u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
+ u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
+ } hsf_status;
+ u16 regval;
+};
+
+/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
+/* Offset 06h FLCTL */
+union ich8_hws_flash_ctrl {
+ struct ich8_hsflctl {
+ u16 flcgo:1; /* 0 Flash Cycle Go */
+ u16 flcycle:2; /* 2:1 Flash Cycle */
+ u16 reserved:5; /* 7:3 Reserved */
+ u16 fldbcount:2; /* 9:8 Flash Data Byte Count */
+ u16 flockdn:6; /* 15:10 Reserved */
+ } hsf_ctrl;
+ u16 regval;
+};
+
+/* ICH Flash Region Access Permissions */
+union ich8_hws_flash_regacc {
+ struct ich8_flracc {
+ u32 grra:8; /* 0:7 GbE region Read Access */
+ u32 grwa:8; /* 8:15 GbE region Write Access */
+ u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
+ u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
+ } hsf_flregacc;
+ u16 regval;
+};
+
+/**
+ * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
+ * @hw: pointer to the HW structure
+ *
+ * Test access to the PHY registers by reading the PHY ID registers. If
+ * the PHY ID is already known (e.g. resume path) compare it with known ID,
+ * otherwise assume the read PHY ID is correct if it is valid.
+ *
+ * Assumes the sw/fw/hw semaphore is already acquired.
+ **/
+STATIC bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
+{
+ u16 phy_reg = 0;
+ u32 phy_id = 0;
+ s32 ret_val = 0;
+ u16 retry_count;
+ u32 mac_reg = 0;
+
+ for (retry_count = 0; retry_count < 2; retry_count++) {
+ ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
+ if (ret_val || (phy_reg == 0xFFFF))
+ continue;
+ phy_id = (u32)(phy_reg << 16);
+
+ ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
+ if (ret_val || (phy_reg == 0xFFFF)) {
+ phy_id = 0;
+ continue;
+ }
+ phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
+ break;
+ }
+
+ if (hw->phy.id) {
+ if (hw->phy.id == phy_id)
+ goto out;
+ } else if (phy_id) {
+ hw->phy.id = phy_id;
+ hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
+ goto out;
+ }
+
+ /* In case the PHY needs to be in mdio slow mode,
+ * set slow mode and try to get the PHY id again.
+ */
+ if (hw->mac.type < e1000_pch_lpt) {
+ hw->phy.ops.release(hw);
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (!ret_val)
+ ret_val = e1000_get_phy_id(hw);
+ hw->phy.ops.acquire(hw);
+ }
+
+ if (ret_val)
+ return false;
+out:
+ if (hw->mac.type == e1000_pch_lpt) {
+ /* Unforce SMBus mode in PHY */
+ hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
+ phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
+ hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
+
+ /* Unforce SMBus mode in MAC */
+ mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
+ }
+
+ return true;
+}
+
+/**
+ * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
+ * @hw: pointer to the HW structure
+ *
+ * Toggling the LANPHYPC pin value fully power-cycles the PHY and is
+ * used to reset the PHY to a quiescent state when necessary.
+ **/
+STATIC void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
+{
+ u32 mac_reg;
+
+ DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
+
+ /* Set Phy Config Counter to 50msec */
+ mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
+ mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
+ mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
+ E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
+
+ /* Toggle LANPHYPC Value bit */
+ mac_reg = E1000_READ_REG(hw, E1000_CTRL);
+ mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
+ mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
+ E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(10);
+ mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
+ E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
+ E1000_WRITE_FLUSH(hw);
+
+ if (hw->mac.type < e1000_pch_lpt) {
+ msec_delay(50);
+ } else {
+ u16 count = 20;
+
+ do {
+ msec_delay(5);
+ } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
+ E1000_CTRL_EXT_LPCD) && count--);
+
+ msec_delay(30);
+ }
+}
+
+/**
+ * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
+ * @hw: pointer to the HW structure
+ *
+ * Workarounds/flow necessary for PHY initialization during driver load
+ * and resume paths.
+ **/
+STATIC s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
+{
+ u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
+
+ /* Gate automatic PHY configuration by hardware on managed and
+ * non-managed 82579 and newer adapters.
+ */
+ e1000_gate_hw_phy_config_ich8lan(hw, true);
+
+#if defined(NAHUM6LP_HW) && defined(ULP_SUPPORT)
+ /* It is not possible to be certain of the current state of ULP
+ * so forcibly disable it.
+ */
+ hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
+
+#endif /* NAHUM6LP_HW && ULP_SUPPORT */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val) {
+ DEBUGOUT("Failed to initialize PHY flow\n");
+ goto out;
+ }
+
+ /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
+ * inaccessible and resetting the PHY is not blocked, toggle the
+ * LANPHYPC Value bit to force the interconnect to PCIe mode.
+ */
+ switch (hw->mac.type) {
+ case e1000_pch_lpt:
+ if (e1000_phy_is_accessible_pchlan(hw))
+ break;
+
+ /* Before toggling LANPHYPC, see if PHY is accessible by
+ * forcing MAC to SMBus mode first.
+ */
+ mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
+
+ /* Wait 50 milliseconds for MAC to finish any retries
+ * that it might be trying to perform from previous
+ * attempts to acknowledge any phy read requests.
+ */
+ msec_delay(50);
+
+ /* fall-through */
+ case e1000_pch2lan:
+ if (e1000_phy_is_accessible_pchlan(hw))
+ break;
+
+ /* fall-through */
+ case e1000_pchlan:
+ if ((hw->mac.type == e1000_pchlan) &&
+ (fwsm & E1000_ICH_FWSM_FW_VALID))
+ break;
+
+ if (hw->phy.ops.check_reset_block(hw)) {
+ DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
+ ret_val = -E1000_ERR_PHY;
+ break;
+ }
+
+ /* Toggle LANPHYPC Value bit */
+ e1000_toggle_lanphypc_pch_lpt(hw);
+ if (hw->mac.type >= e1000_pch_lpt) {
+ if (e1000_phy_is_accessible_pchlan(hw))
+ break;
+
+ /* Toggling LANPHYPC brings the PHY out of SMBus mode
+ * so ensure that the MAC is also out of SMBus mode
+ */
+ mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
+
+ if (e1000_phy_is_accessible_pchlan(hw))
+ break;
+
+ ret_val = -E1000_ERR_PHY;
+ }
+ break;
+ default:
+ break;
+ }
+
+ hw->phy.ops.release(hw);
+ if (!ret_val) {
+
+ /* Check to see if able to reset PHY. Print error if not */
+ if (hw->phy.ops.check_reset_block(hw)) {
+ ERROR_REPORT("Reset blocked by ME\n");
+ goto out;
+ }
+
+ /* Reset the PHY before any access to it. Doing so, ensures
+ * that the PHY is in a known good state before we read/write
+ * PHY registers. The generic reset is sufficient here,
+ * because we haven't determined the PHY type yet.
+ */
+ ret_val = e1000_phy_hw_reset_generic(hw);
+ if (ret_val)
+ goto out;
+
+ /* On a successful reset, possibly need to wait for the PHY
+ * to quiesce to an accessible state before returning control
+ * to the calling function. If the PHY does not quiesce, then
+ * return E1000E_BLK_PHY_RESET, as this is the condition that
+ * the PHY is in.
+ */
+ ret_val = hw->phy.ops.check_reset_block(hw);
+ if (ret_val)
+ ERROR_REPORT("ME blocked access to PHY after reset\n");
+ }
+
+out:
+ /* Ungate automatic PHY configuration on non-managed 82579 */
+ if ((hw->mac.type == e1000_pch2lan) &&
+ !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
+ msec_delay(10);
+ e1000_gate_hw_phy_config_ich8lan(hw, false);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_phy_params_pchlan - Initialize PHY function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Initialize family-specific PHY parameters and function pointers.
+ **/
+STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_init_phy_params_pchlan");
+
+ phy->addr = 1;
+ phy->reset_delay_us = 100;
+
+ phy->ops.acquire = e1000_acquire_swflag_ich8lan;
+ phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
+ phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
+ phy->ops.set_page = e1000_set_page_igp;
+ phy->ops.read_reg = e1000_read_phy_reg_hv;
+ phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
+ phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
+ phy->ops.release = e1000_release_swflag_ich8lan;
+ phy->ops.reset = e1000_phy_hw_reset_ich8lan;
+ phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
+ phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
+ phy->ops.write_reg = e1000_write_phy_reg_hv;
+ phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
+ phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+
+ phy->id = e1000_phy_unknown;
+
+ ret_val = e1000_init_phy_workarounds_pchlan(hw);
+ if (ret_val)
+ return ret_val;
+
+ if (phy->id == e1000_phy_unknown)
+ switch (hw->mac.type) {
+ default:
+ ret_val = e1000_get_phy_id(hw);
+ if (ret_val)
+ return ret_val;
+ if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
+ break;
+ /* fall-through */
+ case e1000_pch2lan:
+ case e1000_pch_lpt:
+ /* In case the PHY needs to be in mdio slow mode,
+ * set slow mode and try to get the PHY id again.
+ */
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_get_phy_id(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ }
+ phy->type = e1000_get_phy_type_from_id(phy->id);
+
+ switch (phy->type) {
+ case e1000_phy_82577:
+ case e1000_phy_82579:
+ case e1000_phy_i217:
+ phy->ops.check_polarity = e1000_check_polarity_82577;
+ phy->ops.force_speed_duplex =
+ e1000_phy_force_speed_duplex_82577;
+ phy->ops.get_cable_length = e1000_get_cable_length_82577;
+ phy->ops.get_info = e1000_get_phy_info_82577;
+ phy->ops.commit = e1000_phy_sw_reset_generic;
+ break;
+ case e1000_phy_82578:
+ phy->ops.check_polarity = e1000_check_polarity_m88;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
+ phy->ops.get_cable_length = e1000_get_cable_length_m88;
+ phy->ops.get_info = e1000_get_phy_info_m88;
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ break;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Initialize family-specific PHY parameters and function pointers.
+ **/
+STATIC s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 i = 0;
+
+ DEBUGFUNC("e1000_init_phy_params_ich8lan");
+
+ phy->addr = 1;
+ phy->reset_delay_us = 100;
+
+ phy->ops.acquire = e1000_acquire_swflag_ich8lan;
+ phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
+ phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
+ phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
+ phy->ops.read_reg = e1000_read_phy_reg_igp;
+ phy->ops.release = e1000_release_swflag_ich8lan;
+ phy->ops.reset = e1000_phy_hw_reset_ich8lan;
+ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
+ phy->ops.write_reg = e1000_write_phy_reg_igp;
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
+
+ /* We may need to do this twice - once for IGP and if that fails,
+ * we'll set BM func pointers and try again
+ */
+ ret_val = e1000_determine_phy_address(hw);
+ if (ret_val) {
+ phy->ops.write_reg = e1000_write_phy_reg_bm;
+ phy->ops.read_reg = e1000_read_phy_reg_bm;
+ ret_val = e1000_determine_phy_address(hw);
+ if (ret_val) {
+ DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
+ return ret_val;
+ }
+ }
+
+ phy->id = 0;
+ while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
+ (i++ < 100)) {
+ msec_delay(1);
+ ret_val = e1000_get_phy_id(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Verify phy id */
+ switch (phy->id) {
+ case IGP03E1000_E_PHY_ID:
+ phy->type = e1000_phy_igp_3;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
+ phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
+ phy->ops.get_info = e1000_get_phy_info_igp;
+ phy->ops.check_polarity = e1000_check_polarity_igp;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
+ break;
+ case IFE_E_PHY_ID:
+ case IFE_PLUS_E_PHY_ID:
+ case IFE_C_E_PHY_ID:
+ phy->type = e1000_phy_ife;
+ phy->autoneg_mask = E1000_ALL_NOT_GIG;
+ phy->ops.get_info = e1000_get_phy_info_ife;
+ phy->ops.check_polarity = e1000_check_polarity_ife;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
+ break;
+ case BME1000_E_PHY_ID:
+ phy->type = e1000_phy_bm;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->ops.read_reg = e1000_read_phy_reg_bm;
+ phy->ops.write_reg = e1000_write_phy_reg_bm;
+ phy->ops.commit = e1000_phy_sw_reset_generic;
+ phy->ops.get_info = e1000_get_phy_info_m88;
+ phy->ops.check_polarity = e1000_check_polarity_m88;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
+ break;
+ default:
+ return -E1000_ERR_PHY;
+ break;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Initialize family-specific NVM parameters and function
+ * pointers.
+ **/
+STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 gfpreg, sector_base_addr, sector_end_addr;
+ u16 i;
+
+ DEBUGFUNC("e1000_init_nvm_params_ich8lan");
+
+ /* Can't read flash registers if the register set isn't mapped. */
+ nvm->type = e1000_nvm_flash_sw;
+ if (!hw->flash_address) {
+ DEBUGOUT("ERROR: Flash registers not mapped\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
+
+ /* sector_X_addr is a "sector"-aligned address (4096 bytes)
+ * Add 1 to sector_end_addr since this sector is included in
+ * the overall size.
+ */
+ sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
+ sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
+
+ /* flash_base_addr is byte-aligned */
+ nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
+
+ /* find total size of the NVM, then cut in half since the total
+ * size represents two separate NVM banks.
+ */
+ nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
+ << FLASH_SECTOR_ADDR_SHIFT);
+ nvm->flash_bank_size /= 2;
+ /* Adjust to word count */
+ nvm->flash_bank_size /= sizeof(u16);
+
+ nvm->word_size = E1000_SHADOW_RAM_WORDS;
+
+ /* Clear shadow ram */
+ for (i = 0; i < nvm->word_size; i++) {
+ dev_spec->shadow_ram[i].modified = false;
+ dev_spec->shadow_ram[i].value = 0xFFFF;
+ }
+
+ E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
+ E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
+
+ /* Function Pointers */
+ nvm->ops.acquire = e1000_acquire_nvm_ich8lan;
+ nvm->ops.release = e1000_release_nvm_ich8lan;
+ nvm->ops.read = e1000_read_nvm_ich8lan;
+ nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
+ nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
+ nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan;
+ nvm->ops.write = e1000_write_nvm_ich8lan;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Initialize family-specific MAC parameters and function
+ * pointers.
+ **/
+STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+#if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
+ u16 pci_cfg;
+#endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
+
+ DEBUGFUNC("e1000_init_mac_params_ich8lan");
+
+ /* Set media type function pointer */
+ hw->phy.media_type = e1000_media_type_copper;
+
+ /* Set mta register count */
+ mac->mta_reg_count = 32;
+ /* Set rar entry count */
+ mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
+ if (mac->type == e1000_ich8lan)
+ mac->rar_entry_count--;
+ /* Set if part includes ASF firmware */
+ mac->asf_firmware_present = true;
+ /* FWSM register */
+ mac->has_fwsm = true;
+ /* ARC subsystem not supported */
+ mac->arc_subsystem_valid = false;
+ /* Adaptive IFS supported */
+ mac->adaptive_ifs = true;
+
+ /* Function pointers */
+
+ /* bus type/speed/width */
+ mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
+ /* function id */
+ mac->ops.set_lan_id = e1000_set_lan_id_single_port;
+ /* reset */
+ mac->ops.reset_hw = e1000_reset_hw_ich8lan;
+ /* hw initialization */
+ mac->ops.init_hw = e1000_init_hw_ich8lan;
+ /* link setup */
+ mac->ops.setup_link = e1000_setup_link_ich8lan;
+ /* physical interface setup */
+ mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
+ /* check for link */
+ mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
+ /* link info */
+ mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
+ /* multicast address update */
+ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
+ /* clear hardware counters */
+ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
+
+ /* LED and other operations */
+ switch (mac->type) {
+ case e1000_ich8lan:
+ case e1000_ich9lan:
+ case e1000_ich10lan:
+ /* check management mode */
+ mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
+ /* ID LED init */
+ mac->ops.id_led_init = e1000_id_led_init_generic;
+ /* blink LED */
+ mac->ops.blink_led = e1000_blink_led_generic;
+ /* setup LED */
+ mac->ops.setup_led = e1000_setup_led_generic;
+ /* cleanup LED */
+ mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
+ /* turn on/off LED */
+ mac->ops.led_on = e1000_led_on_ich8lan;
+ mac->ops.led_off = e1000_led_off_ich8lan;
+ break;
+ case e1000_pch2lan:
+ mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
+ mac->ops.rar_set = e1000_rar_set_pch2lan;
+ /* fall-through */
+ case e1000_pch_lpt:
+#ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
+ /* multicast address update for pch2 */
+ mac->ops.update_mc_addr_list =
+ e1000_update_mc_addr_list_pch2lan;
+#endif
+ case e1000_pchlan:
+#if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
+ /* save PCH revision_id */
+ e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg);
+ hw->revision_id = (u8)(pci_cfg &= 0x000F);
+#endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
+ /* check management mode */
+ mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
+ /* ID LED init */
+ mac->ops.id_led_init = e1000_id_led_init_pchlan;
+ /* setup LED */
+ mac->ops.setup_led = e1000_setup_led_pchlan;
+ /* cleanup LED */
+ mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
+ /* turn on/off LED */
+ mac->ops.led_on = e1000_led_on_pchlan;
+ mac->ops.led_off = e1000_led_off_pchlan;
+ break;
+ default:
+ break;
+ }
+
+ if (mac->type == e1000_pch_lpt) {
+ mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
+ mac->ops.rar_set = e1000_rar_set_pch_lpt;
+ mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
+ }
+
+ /* Enable PCS Lock-loss workaround for ICH8 */
+ if (mac->type == e1000_ich8lan)
+ e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * __e1000_access_emi_reg_locked - Read/write EMI register
+ * @hw: pointer to the HW structure
+ * @addr: EMI address to program
+ * @data: pointer to value to read/write from/to the EMI address
+ * @read: boolean flag to indicate read or write
+ *
+ * This helper function assumes the SW/FW/HW Semaphore is already acquired.
+ **/
+STATIC s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
+ u16 *data, bool read)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("__e1000_access_emi_reg_locked");
+
+ ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
+ if (ret_val)
+ return ret_val;
+
+ if (read)
+ ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
+ data);
+ else
+ ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
+ *data);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_emi_reg_locked - Read Extended Management Interface register
+ * @hw: pointer to the HW structure
+ * @addr: EMI address to program
+ * @data: value to be read from the EMI address
+ *
+ * Assumes the SW/FW/HW Semaphore is already acquired.
+ **/
+s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
+{
+ DEBUGFUNC("e1000_read_emi_reg_locked");
+
+ return __e1000_access_emi_reg_locked(hw, addr, data, true);
+}
+
+/**
+ * e1000_write_emi_reg_locked - Write Extended Management Interface register
+ * @hw: pointer to the HW structure
+ * @addr: EMI address to program
+ * @data: value to be written to the EMI address
+ *
+ * Assumes the SW/FW/HW Semaphore is already acquired.
+ **/
+s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
+{
+ DEBUGFUNC("e1000_read_emi_reg_locked");
+
+ return __e1000_access_emi_reg_locked(hw, addr, &data, false);
+}
+
+/**
+ * e1000_set_eee_pchlan - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ *
+ * Enable/disable EEE based on setting in dev_spec structure, the duplex of
+ * the link and the EEE capabilities of the link partner. The LPI Control
+ * register bits will remain set only if/when link is up.
+ *
+ * EEE LPI must not be asserted earlier than one second after link is up.
+ * On 82579, EEE LPI should not be enabled until such time otherwise there
+ * can be link issues with some switches. Other devices can have EEE LPI
+ * enabled immediately upon link up since they have a timer in hardware which
+ * prevents LPI from being asserted too early.
+ **/
+s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ s32 ret_val;
+ u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
+
+ DEBUGFUNC("e1000_set_eee_pchlan");
+
+ switch (hw->phy.type) {
+ case e1000_phy_82579:
+ lpa = I82579_EEE_LP_ABILITY;
+ pcs_status = I82579_EEE_PCS_STATUS;
+ adv_addr = I82579_EEE_ADVERTISEMENT;
+ break;
+ case e1000_phy_i217:
+ lpa = I217_EEE_LP_ABILITY;
+ pcs_status = I217_EEE_PCS_STATUS;
+ adv_addr = I217_EEE_ADVERTISEMENT;
+ break;
+ default:
+ return E1000_SUCCESS;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
+ if (ret_val)
+ goto release;
+
+ /* Clear bits that enable EEE in various speeds */
+ lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
+
+ /* Enable EEE if not disabled by user */
+ if (!dev_spec->eee_disable) {
+ /* Save off link partner's EEE ability */
+ ret_val = e1000_read_emi_reg_locked(hw, lpa,
+ &dev_spec->eee_lp_ability);
+ if (ret_val)
+ goto release;
+
+ /* Read EEE advertisement */
+ ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
+ if (ret_val)
+ goto release;
+
+ /* Enable EEE only for speeds in which the link partner is
+ * EEE capable and for which we advertise EEE.
+ */
+ if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
+ lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
+
+ if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
+ hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
+ if (data & NWAY_LPAR_100TX_FD_CAPS)
+ lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
+ else
+ /* EEE is not supported in 100Half, so ignore
+ * partner's EEE in 100 ability if full-duplex
+ * is not advertised.
+ */
+ dev_spec->eee_lp_ability &=
+ ~I82579_EEE_100_SUPPORTED;
+ }
+ }
+
+ /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
+ ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
+ if (ret_val)
+ goto release;
+
+ ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
+release:
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
+ * @hw: pointer to the HW structure
+ * @link: link up bool flag
+ *
+ * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
+ * preventing further DMA write requests. Workaround the issue by disabling
+ * the de-assertion of the clock request when in 1Gpbs mode.
+ * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
+ * speeds in order to avoid Tx hangs.
+ **/
+STATIC s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
+{
+ u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
+ u32 status = E1000_READ_REG(hw, E1000_STATUS);
+ s32 ret_val = E1000_SUCCESS;
+ u16 reg;
+
+ if (link && (status & E1000_STATUS_SPEED_1000)) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val =
+ e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
+ &reg);
+ if (ret_val)
+ goto release;
+
+ ret_val =
+ e1000_write_kmrn_reg_locked(hw,
+ E1000_KMRNCTRLSTA_K1_CONFIG,
+ reg &
+ ~E1000_KMRNCTRLSTA_K1_ENABLE);
+ if (ret_val)
+ goto release;
+
+ usec_delay(10);
+
+ E1000_WRITE_REG(hw, E1000_FEXTNVM6,
+ fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
+
+ ret_val =
+ e1000_write_kmrn_reg_locked(hw,
+ E1000_KMRNCTRLSTA_K1_CONFIG,
+ reg);
+release:
+ hw->phy.ops.release(hw);
+ } else {
+ /* clear FEXTNVM6 bit 8 on link down or 10/100 */
+ fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
+
+ if (!link || ((status & E1000_STATUS_SPEED_100) &&
+ (status & E1000_STATUS_FD)))
+ goto update_fextnvm6;
+
+ ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
+ if (ret_val)
+ return ret_val;
+
+ /* Clear link status transmit timeout */
+ reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
+
+ if (status & E1000_STATUS_SPEED_100) {
+ /* Set inband Tx timeout to 5x10us for 100Half */
+ reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
+
+ /* Do not extend the K1 entry latency for 100Half */
+ fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
+ } else {
+ /* Set inband Tx timeout to 50x10us for 10Full/Half */
+ reg |= 50 <<
+ I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
+
+ /* Extend the K1 entry latency for 10 Mbps */
+ fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
+ }
+
+ ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
+ if (ret_val)
+ return ret_val;
+
+update_fextnvm6:
+ E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
+ }
+
+ return ret_val;
+}
+
+#if defined(NAHUM6LP_HW) && defined(ULP_SUPPORT)
+/**
+ * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
+ * @hw: pointer to the HW structure
+ * @to_sx: boolean indicating a system power state transition to Sx
+ *
+ * When link is down, configure ULP mode to significantly reduce the power
+ * to the PHY. If on a Manageability Engine (ME) enabled system, tell the
+ * ME firmware to start the ULP configuration. If not on an ME enabled
+ * system, configure the ULP mode by software.
+ */
+s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
+{
+ u32 mac_reg;
+ s32 ret_val = E1000_SUCCESS;
+ u16 phy_reg;
+
+ if ((hw->mac.type < e1000_pch_lpt) ||
+ (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
+ (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
+ (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
+ return 0;
+
+ if (!to_sx) {
+ int i = 0;
+
+ /* Poll up to 5 seconds for Cable Disconnected indication */
+ while (!(E1000_READ_REG(hw, E1000_FEXT) &
+ E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
+ /* Bail if link is re-acquired */
+ if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
+ return -E1000_ERR_PHY;
+
+ if (i++ == 100)
+ break;
+
+ msec_delay(50);
+ }
+ DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
+ (E1000_READ_REG(hw, E1000_FEXT) &
+ E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
+ i * 50);
+ }
+
+ if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ /* Request ME configure ULP mode in the PHY */
+ mac_reg = E1000_READ_REG(hw, E1000_H2ME);
+ mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
+ E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
+
+ goto out;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ /* During S0 Idle keep the phy in PCI-E mode */
+ if (hw->dev_spec.ich8lan.smbus_disable)
+ goto skip_smbus;
+
+ /* Force SMBus mode in PHY */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
+ e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
+
+ /* Force SMBus mode in MAC */
+ mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
+
+skip_smbus:
+ if (!to_sx) {
+ /* Change the 'Link Status Change' interrupt to trigger
+ * on 'Cable Status Change'
+ */
+ ret_val = e1000_read_kmrn_reg_locked(hw,
+ E1000_KMRNCTRLSTA_OP_MODES,
+ &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg |= E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
+ e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
+ phy_reg);
+ }
+
+ /* Set Inband ULP Exit, Reset to SMBus mode and
+ * Disable SMBus Release on PERST# in PHY
+ */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
+ I218_ULP_CONFIG1_DISABLE_SMB_PERST);
+ if (to_sx) {
+ if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
+ phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
+
+ phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
+ } else {
+ phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
+ }
+ e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+ /* Set Disable SMBus Release on PERST# in MAC */
+ mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
+ mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
+ E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
+
+ /* Commit ULP changes in PHY by starting auto ULP configuration */
+ phy_reg |= I218_ULP_CONFIG1_START;
+ e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+ if (!to_sx) {
+ /* Disable Tx so that the MAC doesn't send any (buffered)
+ * packets to the PHY.
+ */
+ mac_reg = E1000_READ_REG(hw, E1000_TCTL);
+ mac_reg &= ~E1000_TCTL_EN;
+ E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
+ }
+release:
+ hw->phy.ops.release(hw);
+out:
+ if (ret_val)
+ DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
+ else
+ hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
+
+ return ret_val;
+}
+
+/**
+ * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
+ * @hw: pointer to the HW structure
+ * @force: boolean indicating whether or not to force disabling ULP
+ *
+ * Un-configure ULP mode when link is up, the system is transitioned from
+ * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled
+ * system, poll for an indication from ME that ULP has been un-configured.
+ * If not on an ME enabled system, un-configure the ULP mode by software.
+ *
+ * During nominal operation, this function is called when link is acquired
+ * to disable ULP mode (force=false); otherwise, for example when unloading
+ * the driver or during Sx->S0 transitions, this is called with force=true
+ * to forcibly disable ULP.
+
+ * When the cable is plugged in while the device is in D0, a Cable Status
+ * Change interrupt is generated which causes this function to be called
+ * to partially disable ULP mode and restart autonegotiation. This function
+ * is then called again due to the resulting Link Status Change interrupt
+ * to finish cleaning up after the ULP flow.
+ */
+s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u32 mac_reg;
+ u16 phy_reg;
+ int i = 0;
+
+ if ((hw->mac.type < e1000_pch_lpt) ||
+ (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
+ (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
+ (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
+ return 0;
+
+ if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ if (force) {
+ /* Request ME un-configure ULP mode in the PHY */
+ mac_reg = E1000_READ_REG(hw, E1000_H2ME);
+ mac_reg &= ~E1000_H2ME_ULP;
+ mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
+ E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
+ }
+
+ /* Poll up to 100msec for ME to clear ULP_CFG_DONE */
+ while (E1000_READ_REG(hw, E1000_FWSM) &
+ E1000_FWSM_ULP_CFG_DONE) {
+ if (i++ == 10) {
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+ msec_delay(10);
+ }
+ DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
+
+ if (force) {
+ mac_reg = E1000_READ_REG(hw, E1000_H2ME);
+ mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
+ E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
+ } else {
+ /* Clear H2ME.ULP after ME ULP configuration */
+ mac_reg = E1000_READ_REG(hw, E1000_H2ME);
+ mac_reg &= ~E1000_H2ME_ULP;
+ E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
+
+ /* Restore link speed advertisements and restart
+ * Auto-negotiation
+ */
+ ret_val = e1000_phy_setup_autoneg(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_oem_bits_config_ich8lan(hw, true);
+ }
+
+ goto out;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ /* Revert the change to the 'Link Status Change'
+ * interrupt to trigger on 'Cable Status Change'
+ */
+ ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
+ &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg &= ~E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
+ e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES, phy_reg);
+
+ if (force)
+ /* Toggle LANPHYPC Value bit */
+ e1000_toggle_lanphypc_pch_lpt(hw);
+
+ /* Unforce SMBus mode in PHY */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
+ if (ret_val) {
+ /* The MAC might be in PCIe mode, so temporarily force to
+ * SMBus mode in order to access the PHY.
+ */
+ mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
+
+ msec_delay(50);
+
+ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
+ &phy_reg);
+ if (ret_val)
+ goto release;
+ }
+ phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
+ e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
+
+ /* Unforce SMBus mode in MAC */
+ mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
+
+ /* When ULP mode was previously entered, K1 was disabled by the
+ * hardware. Re-Enable K1 in the PHY when exiting ULP.
+ */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg |= HV_PM_CTRL_K1_ENABLE;
+ e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
+
+ /* Clear ULP enabled configuration */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
+ if (ret_val)
+ goto release;
+ /* CSC interrupt received due to ULP Indication */
+ if ((phy_reg & I218_ULP_CONFIG1_IND) || force) {
+ phy_reg &= ~(I218_ULP_CONFIG1_IND |
+ I218_ULP_CONFIG1_STICKY_ULP |
+ I218_ULP_CONFIG1_RESET_TO_SMBUS |
+ I218_ULP_CONFIG1_WOL_HOST |
+ I218_ULP_CONFIG1_INBAND_EXIT |
+ I218_ULP_CONFIG1_DISABLE_SMB_PERST);
+ e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+ /* Commit ULP changes by starting auto ULP configuration */
+ phy_reg |= I218_ULP_CONFIG1_START;
+ e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+ /* Clear Disable SMBus Release on PERST# in MAC */
+ mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
+ mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
+ E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
+
+ if (!force) {
+ hw->phy.ops.release(hw);
+
+ if (hw->mac.autoneg)
+ e1000_phy_setup_autoneg(hw);
+
+ e1000_sw_lcd_config_ich8lan(hw);
+
+ e1000_oem_bits_config_ich8lan(hw, true);
+
+ /* Set ULP state to unknown and return non-zero to
+ * indicate no link (yet) and re-enter on the next LSC
+ * to finish disabling ULP flow.
+ */
+ hw->dev_spec.ich8lan.ulp_state =
+ e1000_ulp_state_unknown;
+
+ return 1;
+ }
+ }
+
+ /* Re-enable Tx */
+ mac_reg = E1000_READ_REG(hw, E1000_TCTL);
+ mac_reg |= E1000_TCTL_EN;
+ E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
+
+release:
+ hw->phy.ops.release(hw);
+ if (force) {
+ hw->phy.ops.reset(hw);
+ msec_delay(50);
+ }
+out:
+ if (ret_val)
+ DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
+ else
+ hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
+
+ return ret_val;
+}
+
+#endif /* NAHUM6LP_HW && ULP_SUPPORT */
+/**
+ * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
+ * @hw: pointer to the HW structure
+ *
+ * Checks to see of the link status of the hardware has changed. If a
+ * change in link status has been detected, then we read the PHY registers
+ * to get the current speed/duplex if link exists.
+ **/
+STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ bool link = false;
+ u16 phy_reg;
+
+ DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
+
+ /* We only want to go out to the PHY registers to see if Auto-Neg
+ * has completed and/or if our link status has changed. The
+ * get_link_status flag is set upon receiving a Link Status
+ * Change or Rx Sequence Error interrupt.
+ */
+ if (!mac->get_link_status)
+ return E1000_SUCCESS;
+
+ if ((hw->mac.type < e1000_pch_lpt) ||
+ (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
+ (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V)) {
+ /* First we want to see if the MII Status Register reports
+ * link. If so, then we want to get the current speed/duplex
+ * of the PHY.
+ */
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* Check the MAC's STATUS register to determine link state
+ * since the PHY could be inaccessible while in ULP mode.
+ */
+ link = !!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU);
+ if (link)
+ ret_val = e1000_disable_ulp_lpt_lp(hw, false);
+ else
+ ret_val = e1000_enable_ulp_lpt_lp(hw, false);
+
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (hw->mac.type == e1000_pchlan) {
+ ret_val = e1000_k1_gig_workaround_hv(hw, link);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* When connected at 10Mbps half-duplex, some parts are excessively
+ * aggressive resulting in many collisions. To avoid this, increase
+ * the IPG and reduce Rx latency in the PHY.
+ */
+ if (((hw->mac.type == e1000_pch2lan) ||
+ (hw->mac.type == e1000_pch_lpt)) && link) {
+ u32 reg;
+ reg = E1000_READ_REG(hw, E1000_STATUS);
+ if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
+ u16 emi_addr;
+
+ reg = E1000_READ_REG(hw, E1000_TIPG);
+ reg &= ~E1000_TIPG_IPGT_MASK;
+ reg |= 0xFF;
+ E1000_WRITE_REG(hw, E1000_TIPG, reg);
+
+ /* Reduce Rx latency in analog PHY */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ if (hw->mac.type == e1000_pch2lan)
+ emi_addr = I82579_RX_CONFIG;
+ else
+ emi_addr = I217_RX_CONFIG;
+ ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0);
+
+ hw->phy.ops.release(hw);
+
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ /* Work-around I218 hang issue */
+ if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
+ (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
+ ret_val = e1000_k1_workaround_lpt_lp(hw, link);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Clear link partner's EEE ability */
+ hw->dev_spec.ich8lan.eee_lp_ability = 0;
+
+ if (!link)
+ return E1000_SUCCESS; /* No link detected */
+
+ mac->get_link_status = false;
+
+ switch (hw->mac.type) {
+ case e1000_pch2lan:
+ ret_val = e1000_k1_workaround_lv(hw);
+ if (ret_val)
+ return ret_val;
+ /* fall-thru */
+ case e1000_pchlan:
+ if (hw->phy.type == e1000_phy_82578) {
+ ret_val = e1000_link_stall_workaround_hv(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Workaround for PCHx parts in half-duplex:
+ * Set the number of preambles removed from the packet
+ * when it is passed from the PHY to the MAC to prevent
+ * the MAC from misinterpreting the packet type.
+ */
+ hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
+ phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
+
+ if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
+ E1000_STATUS_FD)
+ phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
+
+ hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
+ break;
+ default:
+ break;
+ }
+
+ /* Check if there was DownShift, must be checked
+ * immediately after link-up
+ */
+ e1000_check_downshift_generic(hw);
+
+ /* Enable/Disable EEE after link up */
+ if (hw->phy.type > e1000_phy_82579) {
+ ret_val = e1000_set_eee_pchlan(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* If we are forcing speed/duplex, then we simply return since
+ * we have already determined whether we have link or not.
+ */
+ if (!mac->autoneg)
+ return -E1000_ERR_CONFIG;
+
+ /* Auto-Neg is enabled. Auto Speed Detection takes care
+ * of MAC speed/duplex configuration. So we only need to
+ * configure Collision Distance in the MAC.
+ */
+ mac->ops.config_collision_dist(hw);
+
+ /* Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ if (ret_val)
+ DEBUGOUT("Error configuring flow control\n");
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Initialize family-specific function pointers for PHY, MAC, and NVM.
+ **/
+void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_init_function_pointers_ich8lan");
+
+ hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
+ hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
+ switch (hw->mac.type) {
+ case e1000_ich8lan:
+ case e1000_ich9lan:
+ case e1000_ich10lan:
+ hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
+ break;
+ case e1000_pchlan:
+ case e1000_pch2lan:
+ case e1000_pch_lpt:
+ hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
+ * @hw: pointer to the HW structure
+ *
+ * Acquires the mutex for performing NVM operations.
+ **/
+STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_acquire_nvm_ich8lan");
+
+ E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_release_nvm_ich8lan - Release NVM mutex
+ * @hw: pointer to the HW structure
+ *
+ * Releases the mutex used while performing NVM operations.
+ **/
+STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_release_nvm_ich8lan");
+
+ E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
+
+ return;
+}
+
+/**
+ * e1000_acquire_swflag_ich8lan - Acquire software control flag
+ * @hw: pointer to the HW structure
+ *
+ * Acquires the software control flag for performing PHY and select
+ * MAC CSR accesses.
+ **/
+STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
+{
+ u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_acquire_swflag_ich8lan");
+
+ E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
+
+ while (timeout) {
+ extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+ if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
+ break;
+
+ msec_delay_irq(1);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("SW has already locked the resource.\n");
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+ timeout = SW_FLAG_TIMEOUT;
+
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
+ E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
+
+ while (timeout) {
+ extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+ if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
+ break;
+
+ msec_delay_irq(1);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
+ E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
+ extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
+ E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+out:
+ if (ret_val)
+ E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
+
+ return ret_val;
+}
+
+/**
+ * e1000_release_swflag_ich8lan - Release software control flag
+ * @hw: pointer to the HW structure
+ *
+ * Releases the software control flag for performing PHY and select
+ * MAC CSR accesses.
+ **/
+STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
+{
+ u32 extcnf_ctrl;
+
+ DEBUGFUNC("e1000_release_swflag_ich8lan");
+
+ extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+
+ if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
+ extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
+ E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
+ } else {
+ DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
+ }
+
+ E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
+
+ return;
+}
+
+/**
+ * e1000_check_mng_mode_ich8lan - Checks management mode
+ * @hw: pointer to the HW structure
+ *
+ * This checks if the adapter has any manageability enabled.
+ * This is a function pointer entry point only called by read/write
+ * routines for the PHY and NVM parts.
+ **/
+STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
+{
+ u32 fwsm;
+
+ DEBUGFUNC("e1000_check_mng_mode_ich8lan");
+
+ fwsm = E1000_READ_REG(hw, E1000_FWSM);
+
+ return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
+ ((fwsm & E1000_FWSM_MODE_MASK) ==
+ (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
+}
+
+/**
+ * e1000_check_mng_mode_pchlan - Checks management mode
+ * @hw: pointer to the HW structure
+ *
+ * This checks if the adapter has iAMT enabled.
+ * This is a function pointer entry point only called by read/write
+ * routines for the PHY and NVM parts.
+ **/
+STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
+{
+ u32 fwsm;
+
+ DEBUGFUNC("e1000_check_mng_mode_pchlan");
+
+ fwsm = E1000_READ_REG(hw, E1000_FWSM);
+
+ return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
+ (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
+}
+
+/**
+ * e1000_rar_set_pch2lan - Set receive address register
+ * @hw: pointer to the HW structure
+ * @addr: pointer to the receive address
+ * @index: receive address array register
+ *
+ * Sets the receive address array register at index to the address passed
+ * in by addr. For 82579, RAR[0] is the base address register that is to
+ * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
+ * Use SHRA[0-3] in place of those reserved for ME.
+ **/
+STATIC void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+ u32 rar_low, rar_high;
+
+ DEBUGFUNC("e1000_rar_set_pch2lan");
+
+ /* HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ rar_low = ((u32) addr[0] |
+ ((u32) addr[1] << 8) |
+ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+
+ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+ /* If MAC address zero, no need to set the AV bit */
+ if (rar_low || rar_high)
+ rar_high |= E1000_RAH_AV;
+
+ if (index == 0) {
+ E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
+ E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
+ E1000_WRITE_FLUSH(hw);
+ return;
+ }
+
+ /* RAR[1-6] are owned by manageability. Skip those and program the
+ * next address into the SHRA register array.
+ */
+ if (index < (u32) (hw->mac.rar_entry_count)) {
+ s32 ret_val;
+
+ ret_val = e1000_acquire_swflag_ich8lan(hw);
+ if (ret_val)
+ goto out;
+
+ E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
+ E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
+ E1000_WRITE_FLUSH(hw);
+
+ e1000_release_swflag_ich8lan(hw);
+
+ /* verify the register updates */
+ if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
+ (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
+ return;
+
+ DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
+ (index - 1), E1000_READ_REG(hw, E1000_FWSM));
+ }
+
+out:
+ DEBUGOUT1("Failed to write receive address at index %d\n", index);
+}
+
+/**
+ * e1000_rar_set_pch_lpt - Set receive address registers
+ * @hw: pointer to the HW structure
+ * @addr: pointer to the receive address
+ * @index: receive address array register
+ *
+ * Sets the receive address register array at index to the address passed
+ * in by addr. For LPT, RAR[0] is the base address register that is to
+ * contain the MAC address. SHRA[0-10] are the shared receive address
+ * registers that are shared between the Host and manageability engine (ME).
+ **/
+STATIC void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+ u32 rar_low, rar_high;
+ u32 wlock_mac;
+
+ DEBUGFUNC("e1000_rar_set_pch_lpt");
+
+ /* HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
+ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+
+ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+ /* If MAC address zero, no need to set the AV bit */
+ if (rar_low || rar_high)
+ rar_high |= E1000_RAH_AV;
+
+ if (index == 0) {
+ E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
+ E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
+ E1000_WRITE_FLUSH(hw);
+ return;
+ }
+
+ /* The manageability engine (ME) can lock certain SHRAR registers that
+ * it is using - those registers are unavailable for use.
+ */
+ if (index < hw->mac.rar_entry_count) {
+ wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
+ E1000_FWSM_WLOCK_MAC_MASK;
+ wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
+
+ /* Check if all SHRAR registers are locked */
+ if (wlock_mac == 1)
+ goto out;
+
+ if ((wlock_mac == 0) || (index <= wlock_mac)) {
+ s32 ret_val;
+
+ ret_val = e1000_acquire_swflag_ich8lan(hw);
+
+ if (ret_val)
+ goto out;
+
+ E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
+ rar_low);
+ E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
+ rar_high);
+ E1000_WRITE_FLUSH(hw);
+
+ e1000_release_swflag_ich8lan(hw);
+
+ /* verify the register updates */
+ if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
+ (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
+ return;
+ }
+ }
+
+out:
+ DEBUGOUT1("Failed to write receive address at index %d\n", index);
+}
+
+#ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
+/**
+ * e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ *
+ * Updates entire Multicast Table Array of the PCH2 MAC and PHY.
+ * The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
+ u8 *mc_addr_list,
+ u32 mc_addr_count)
+{
+ u16 phy_reg = 0;
+ int i;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
+
+ e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return;
+
+ ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+ if (ret_val)
+ goto release;
+
+ for (i = 0; i < hw->mac.mta_reg_count; i++) {
+ hw->phy.ops.write_reg_page(hw, BM_MTA(i),
+ (u16)(hw->mac.mta_shadow[i] &
+ 0xFFFF));
+ hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
+ (u16)((hw->mac.mta_shadow[i] >> 16) &
+ 0xFFFF));
+ }
+
+ e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+
+release:
+ hw->phy.ops.release(hw);
+}
+
+#endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
+/**
+ * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
+ * @hw: pointer to the HW structure
+ *
+ * Checks if firmware is blocking the reset of the PHY.
+ * This is a function pointer entry point only called by
+ * reset routines.
+ **/
+STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
+{
+ u32 fwsm;
+ bool blocked = false;
+ int i = 0;
+
+ DEBUGFUNC("e1000_check_reset_block_ich8lan");
+
+ do {
+ fwsm = E1000_READ_REG(hw, E1000_FWSM);
+ if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
+ blocked = true;
+ msec_delay(10);
+ continue;
+ }
+ blocked = false;
+ } while (blocked && (i++ < 10));
+ return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
+ * @hw: pointer to the HW structure
+ *
+ * Assumes semaphore already acquired.
+ *
+ **/
+STATIC s32 e1000_write_smbus_addr(struct e1000_hw *hw)
+{
+ u16 phy_data;
+ u32 strap = E1000_READ_REG(hw, E1000_STRAP);
+ u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
+ E1000_STRAP_SMT_FREQ_SHIFT;
+ s32 ret_val;
+
+ strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
+
+ ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~HV_SMB_ADDR_MASK;
+ phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
+ phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
+
+ if (hw->phy.type == e1000_phy_i217) {
+ /* Restore SMBus frequency */
+ if (freq--) {
+ phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
+ phy_data |= (freq & (1 << 0)) <<
+ HV_SMB_ADDR_FREQ_LOW_SHIFT;
+ phy_data |= (freq & (1 << 1)) <<
+ (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
+ } else {
+ DEBUGOUT("Unsupported SMB frequency in PHY\n");
+ }
+ }
+
+ return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
+}
+
+/**
+ * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
+ * @hw: pointer to the HW structure
+ *
+ * SW should configure the LCD from the NVM extended configuration region
+ * as a workaround for certain parts.
+ **/
+STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
+ s32 ret_val = E1000_SUCCESS;
+ u16 word_addr, reg_data, reg_addr, phy_page = 0;
+
+ DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
+
+ /* Initialize the PHY from the NVM on ICH platforms. This
+ * is needed due to an issue where the NVM configuration is
+ * not properly autoloaded after power transitions.
+ * Therefore, after each PHY reset, we will load the
+ * configuration data out of the NVM manually.
+ */
+ switch (hw->mac.type) {
+ case e1000_ich8lan:
+ if (phy->type != e1000_phy_igp_3)
+ return ret_val;
+
+ if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
+ (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
+ sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
+ break;
+ }
+ /* Fall-thru */
+ case e1000_pchlan:
+ case e1000_pch2lan:
+ case e1000_pch_lpt:
+ sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
+ break;
+ default:
+ return ret_val;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ data = E1000_READ_REG(hw, E1000_FEXTNVM);
+ if (!(data & sw_cfg_mask))
+ goto release;
+
+ /* Make sure HW does not configure LCD from PHY
+ * extended configuration before SW configuration
+ */
+ data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+ if ((hw->mac.type < e1000_pch2lan) &&
+ (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
+ goto release;
+
+ cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
+ cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
+ cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
+ if (!cnf_size)
+ goto release;
+
+ cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
+ cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
+
+ if (((hw->mac.type == e1000_pchlan) &&
+ !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
+ (hw->mac.type > e1000_pchlan)) {
+ /* HW configures the SMBus address and LEDs when the
+ * OEM and LCD Write Enable bits are set in the NVM.
+ * When both NVM bits are cleared, SW will configure
+ * them instead.
+ */
+ ret_val = e1000_write_smbus_addr(hw);
+ if (ret_val)
+ goto release;
+
+ data = E1000_READ_REG(hw, E1000_LEDCTL);
+ ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
+ (u16)data);
+ if (ret_val)
+ goto release;
+ }
+
+ /* Configure LCD from extended configuration region. */
+
+ /* cnf_base_addr is in DWORD */
+ word_addr = (u16)(cnf_base_addr << 1);
+
+ for (i = 0; i < cnf_size; i++) {
+ ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
+ &reg_data);
+ if (ret_val)
+ goto release;
+
+ ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
+ 1, &reg_addr);
+ if (ret_val)
+ goto release;
+
+ /* Save off the PHY page for future writes. */
+ if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
+ phy_page = reg_data;
+ continue;
+ }
+
+ reg_addr &= PHY_REG_MASK;
+ reg_addr |= phy_page;
+
+ ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
+ reg_data);
+ if (ret_val)
+ goto release;
+ }
+
+release:
+ hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000_k1_gig_workaround_hv - K1 Si workaround
+ * @hw: pointer to the HW structure
+ * @link: link up bool flag
+ *
+ * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
+ * from a lower speed. This workaround disables K1 whenever link is at 1Gig
+ * If link is down, the function will restore the default K1 setting located
+ * in the NVM.
+ **/
+STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 status_reg = 0;
+ bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
+
+ DEBUGFUNC("e1000_k1_gig_workaround_hv");
+
+ if (hw->mac.type != e1000_pchlan)
+ return E1000_SUCCESS;
+
+ /* Wrap the whole flow with the sw flag */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
+ if (link) {
+ if (hw->phy.type == e1000_phy_82578) {
+ ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
+ &status_reg);
+ if (ret_val)
+ goto release;
+
+ status_reg &= (BM_CS_STATUS_LINK_UP |
+ BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_MASK);
+
+ if (status_reg == (BM_CS_STATUS_LINK_UP |
+ BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_1000))
+ k1_enable = false;
+ }
+
+ if (hw->phy.type == e1000_phy_82577) {
+ ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
+ &status_reg);
+ if (ret_val)
+ goto release;
+
+ status_reg &= (HV_M_STATUS_LINK_UP |
+ HV_M_STATUS_AUTONEG_COMPLETE |
+ HV_M_STATUS_SPEED_MASK);
+
+ if (status_reg == (HV_M_STATUS_LINK_UP |
+ HV_M_STATUS_AUTONEG_COMPLETE |
+ HV_M_STATUS_SPEED_1000))
+ k1_enable = false;
+ }
+
+ /* Link stall fix for link up */
+ ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
+ 0x0100);
+ if (ret_val)
+ goto release;
+
+ } else {
+ /* Link stall fix for link down */
+ ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
+ 0x4100);
+ if (ret_val)
+ goto release;
+ }
+
+ ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
+
+release:
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_configure_k1_ich8lan - Configure K1 power state
+ * @hw: pointer to the HW structure
+ * @enable: K1 state to configure
+ *
+ * Configure the K1 power state based on the provided parameter.
+ * Assumes semaphore already acquired.
+ *
+ * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ **/
+s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
+{
+ s32 ret_val;
+ u32 ctrl_reg = 0;
+ u32 ctrl_ext = 0;
+ u32 reg = 0;
+ u16 kmrn_reg = 0;
+
+ DEBUGFUNC("e1000_configure_k1_ich8lan");
+
+ ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
+ &kmrn_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (k1_enable)
+ kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
+ else
+ kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
+
+ ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
+ kmrn_reg);
+ if (ret_val)
+ return ret_val;
+
+ usec_delay(20);
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
+
+ reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+ reg |= E1000_CTRL_FRCSPD;
+ E1000_WRITE_REG(hw, E1000_CTRL, reg);
+
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(20);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(20);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
+ * @hw: pointer to the HW structure
+ * @d0_state: boolean if entering d0 or d3 device state
+ *
+ * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
+ * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
+ * in NVM determines whether HW should configure LPLU and Gbe Disable.
+ **/
+STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
+{
+ s32 ret_val = 0;
+ u32 mac_reg;
+ u16 oem_reg;
+
+ DEBUGFUNC("e1000_oem_bits_config_ich8lan");
+
+ if (hw->mac.type < e1000_pchlan)
+ return ret_val;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ if (hw->mac.type == e1000_pchlan) {
+ mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+ if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
+ goto release;
+ }
+
+ mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
+ if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
+ goto release;
+
+ mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
+
+ ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
+ if (ret_val)
+ goto release;
+
+ oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
+
+ if (d0_state) {
+ if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
+ oem_reg |= HV_OEM_BITS_GBE_DIS;
+
+ if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
+ oem_reg |= HV_OEM_BITS_LPLU;
+ } else {
+ if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
+ E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
+ oem_reg |= HV_OEM_BITS_GBE_DIS;
+
+ if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
+ E1000_PHY_CTRL_NOND0A_LPLU))
+ oem_reg |= HV_OEM_BITS_LPLU;
+ }
+
+ /* Set Restart auto-neg to activate the bits */
+ if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
+ !hw->phy.ops.check_reset_block(hw))
+ oem_reg |= HV_OEM_BITS_RESTART_AN;
+
+ ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
+
+release:
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+
+/**
+ * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
+
+ ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= HV_KMRN_MDIO_SLOW;
+
+ ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
+
+ return ret_val;
+}
+
+/**
+ * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
+ * done after every PHY reset.
+ **/
+STATIC s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
+
+ if (hw->mac.type != e1000_pchlan)
+ return E1000_SUCCESS;
+
+ /* Set MDIO slow mode before any other MDIO access */
+ if (hw->phy.type == e1000_phy_82577) {
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (((hw->phy.type == e1000_phy_82577) &&
+ ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
+ ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
+ /* Disable generation of early preamble */
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
+ if (ret_val)
+ return ret_val;
+
+ /* Preamble tuning for SSC */
+ ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
+ 0xA204);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (hw->phy.type == e1000_phy_82578) {
+ /* Return registers to default by doing a soft reset then
+ * writing 0x3140 to the control register.
+ */
+ if (hw->phy.revision < 2) {
+ e1000_phy_sw_reset_generic(hw);
+ ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
+ 0x3140);
+ }
+ }
+
+ /* Select page 0 */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ hw->phy.addr = 1;
+ ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
+ hw->phy.ops.release(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Configure the K1 Si workaround during phy reset assuming there is
+ * link so that it disables K1 if link is in 1Gbps.
+ */
+ ret_val = e1000_k1_gig_workaround_hv(hw, true);
+ if (ret_val)
+ return ret_val;
+
+ /* Workaround for link disconnects on a busy hub in half duplex */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
+ if (ret_val)
+ goto release;
+ ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
+ phy_data & 0x00FF);
+ if (ret_val)
+ goto release;
+
+ /* set MSE higher to enable link to stay up when noise is high */
+ ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
+release:
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
+ * @hw: pointer to the HW structure
+ **/
+void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
+{
+ u32 mac_reg;
+ u16 i, phy_reg = 0;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return;
+ ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+ if (ret_val)
+ goto release;
+
+ /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
+ for (i = 0; i < (hw->mac.rar_entry_count); i++) {
+ mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
+ hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
+ (u16)(mac_reg & 0xFFFF));
+ hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
+ (u16)((mac_reg >> 16) & 0xFFFF));
+
+ mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
+ hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
+ (u16)(mac_reg & 0xFFFF));
+ hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
+ (u16)((mac_reg & E1000_RAH_AV)
+ >> 16));
+ }
+
+ e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+
+release:
+ hw->phy.ops.release(hw);
+}
+
+#ifndef CRC32_OS_SUPPORT
+STATIC u32 e1000_calc_rx_da_crc(u8 mac[])
+{
+ u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
+ u32 i, j, mask, crc;
+
+ DEBUGFUNC("e1000_calc_rx_da_crc");
+
+ crc = 0xffffffff;
+ for (i = 0; i < 6; i++) {
+ crc = crc ^ mac[i];
+ for (j = 8; j > 0; j--) {
+ mask = (crc & 1) * (-1);
+ crc = (crc >> 1) ^ (poly & mask);
+ }
+ }
+ return ~crc;
+}
+
+#endif /* CRC32_OS_SUPPORT */
+/**
+ * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
+ * with 82579 PHY
+ * @hw: pointer to the HW structure
+ * @enable: flag to enable/disable workaround when enabling/disabling jumbos
+ **/
+s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 phy_reg, data;
+ u32 mac_reg;
+ u16 i;
+
+ DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
+
+ if (hw->mac.type < e1000_pch2lan)
+ return E1000_SUCCESS;
+
+ /* disable Rx path while enabling/disabling workaround */
+ hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
+ phy_reg | (1 << 14));
+ if (ret_val)
+ return ret_val;
+
+ if (enable) {
+ /* Write Rx addresses (rar_entry_count for RAL/H, and
+ * SHRAL/H) and initial CRC values to the MAC
+ */
+ for (i = 0; i < hw->mac.rar_entry_count; i++) {
+ u8 mac_addr[ETH_ADDR_LEN] = {0};
+ u32 addr_high, addr_low;
+
+ addr_high = E1000_READ_REG(hw, E1000_RAH(i));
+ if (!(addr_high & E1000_RAH_AV))
+ continue;
+ addr_low = E1000_READ_REG(hw, E1000_RAL(i));
+ mac_addr[0] = (addr_low & 0xFF);
+ mac_addr[1] = ((addr_low >> 8) & 0xFF);
+ mac_addr[2] = ((addr_low >> 16) & 0xFF);
+ mac_addr[3] = ((addr_low >> 24) & 0xFF);
+ mac_addr[4] = (addr_high & 0xFF);
+ mac_addr[5] = ((addr_high >> 8) & 0xFF);
+
+#ifndef CRC32_OS_SUPPORT
+ E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
+ e1000_calc_rx_da_crc(mac_addr));
+#else /* CRC32_OS_SUPPORT */
+ E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
+ E1000_CRC32(ETH_ADDR_LEN, mac_addr));
+#endif /* CRC32_OS_SUPPORT */
+ }
+
+ /* Write Rx addresses to the PHY */
+ e1000_copy_rx_addrs_to_phy_ich8lan(hw);
+
+ /* Enable jumbo frame workaround in the MAC */
+ mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
+ mac_reg &= ~(1 << 14);
+ mac_reg |= (7 << 15);
+ E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
+
+ mac_reg = E1000_READ_REG(hw, E1000_RCTL);
+ mac_reg |= E1000_RCTL_SECRC;
+ E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
+
+ ret_val = e1000_read_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ &data);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_write_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ data | (1 << 0));
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ &data);
+ if (ret_val)
+ return ret_val;
+ data &= ~(0xF << 8);
+ data |= (0xB << 8);
+ ret_val = e1000_write_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ data);
+ if (ret_val)
+ return ret_val;
+
+ /* Enable jumbo frame workaround in the PHY */
+ hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
+ data &= ~(0x7F << 5);
+ data |= (0x37 << 5);
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
+ if (ret_val)
+ return ret_val;
+ hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
+ data &= ~(1 << 13);
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
+ if (ret_val)
+ return ret_val;
+ hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
+ data &= ~(0x3FF << 2);
+ data |= (0x1A << 2);
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
+ if (ret_val)
+ return ret_val;
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
+ if (ret_val)
+ return ret_val;
+ hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
+ ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
+ (1 << 10));
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* Write MAC register values back to h/w defaults */
+ mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
+ mac_reg &= ~(0xF << 14);
+ E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
+
+ mac_reg = E1000_READ_REG(hw, E1000_RCTL);
+ mac_reg &= ~E1000_RCTL_SECRC;
+ E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
+
+ ret_val = e1000_read_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ &data);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_write_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ data & ~(1 << 0));
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ &data);
+ if (ret_val)
+ return ret_val;
+ data &= ~(0xF << 8);
+ data |= (0xB << 8);
+ ret_val = e1000_write_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ data);
+ if (ret_val)
+ return ret_val;
+
+ /* Write PHY register values back to h/w defaults */
+ hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
+ data &= ~(0x7F << 5);
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
+ if (ret_val)
+ return ret_val;
+ hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
+ data |= (1 << 13);
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
+ if (ret_val)
+ return ret_val;
+ hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
+ data &= ~(0x3FF << 2);
+ data |= (0x8 << 2);
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
+ if (ret_val)
+ return ret_val;
+ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
+ if (ret_val)
+ return ret_val;
+ hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
+ ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
+ ~(1 << 10));
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* re-enable Rx path after enabling/disabling workaround */
+ return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
+ ~(1 << 14));
+}
+
+/**
+ * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
+ * done after every PHY reset.
+ **/
+STATIC s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
+
+ if (hw->mac.type != e1000_pch2lan)
+ return E1000_SUCCESS;
+
+ /* Set MDIO slow mode before any other MDIO access */
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ /* set MSE higher to enable link to stay up when noise is high */
+ ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
+ if (ret_val)
+ goto release;
+ /* drop link after 5 times MSE threshold was reached */
+ ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
+release:
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_k1_gig_workaround_lv - K1 Si workaround
+ * @hw: pointer to the HW structure
+ *
+ * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
+ * Disable K1 for 1000 and 100 speeds
+ **/
+STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 status_reg = 0;
+
+ DEBUGFUNC("e1000_k1_workaround_lv");
+
+ if (hw->mac.type != e1000_pch2lan)
+ return E1000_SUCCESS;
+
+ /* Set K1 beacon duration based on 10Mbs speed */
+ ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
+ if (ret_val)
+ return ret_val;
+
+ if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
+ == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
+ if (status_reg &
+ (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
+ u16 pm_phy_reg;
+
+ /* LV 1G/100 Packet drop issue wa */
+ ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
+ &pm_phy_reg);
+ if (ret_val)
+ return ret_val;
+ pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
+ ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
+ pm_phy_reg);
+ if (ret_val)
+ return ret_val;
+ } else {
+ u32 mac_reg;
+ mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
+ mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
+ mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
+ E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
+ }
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
+ * @hw: pointer to the HW structure
+ * @gate: boolean set to true to gate, false to ungate
+ *
+ * Gate/ungate the automatic PHY configuration via hardware; perform
+ * the configuration via software instead.
+ **/
+STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
+{
+ u32 extcnf_ctrl;
+
+ DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
+
+ if (hw->mac.type < e1000_pch2lan)
+ return;
+
+ extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+
+ if (gate)
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+ else
+ extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+
+ E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
+}
+
+/**
+ * e1000_lan_init_done_ich8lan - Check for PHY config completion
+ * @hw: pointer to the HW structure
+ *
+ * Check the appropriate indication the MAC has finished configuring the
+ * PHY after a software reset.
+ **/
+STATIC void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
+{
+ u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
+
+ DEBUGFUNC("e1000_lan_init_done_ich8lan");
+
+ /* Wait for basic configuration completes before proceeding */
+ do {
+ data = E1000_READ_REG(hw, E1000_STATUS);
+ data &= E1000_STATUS_LAN_INIT_DONE;
+ usec_delay(100);
+ } while ((!data) && --loop);
+
+ /* If basic configuration is incomplete before the above loop
+ * count reaches 0, loading the configuration from NVM will
+ * leave the PHY in a bad state possibly resulting in no link.
+ */
+ if (loop == 0)
+ DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
+
+ /* Clear the Init Done bit for the next init event */
+ data = E1000_READ_REG(hw, E1000_STATUS);
+ data &= ~E1000_STATUS_LAN_INIT_DONE;
+ E1000_WRITE_REG(hw, E1000_STATUS, data);
+}
+
+/**
+ * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 reg;
+
+ DEBUGFUNC("e1000_post_phy_reset_ich8lan");
+
+ if (hw->phy.ops.check_reset_block(hw))
+ return E1000_SUCCESS;
+
+ /* Allow time for h/w to get to quiescent state after reset */
+ msec_delay(10);
+
+ /* Perform any necessary post-reset workarounds */
+ switch (hw->mac.type) {
+ case e1000_pchlan:
+ ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ case e1000_pch2lan:
+ ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ default:
+ break;
+ }
+
+ /* Clear the host wakeup bit after lcd reset */
+ if (hw->mac.type >= e1000_pchlan) {
+ hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
+ reg &= ~BM_WUC_HOST_WU_BIT;
+ hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
+ }
+
+ /* Configure the LCD with the extended configuration region in NVM */
+ ret_val = e1000_sw_lcd_config_ich8lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Configure the LCD with the OEM bits in NVM */
+ ret_val = e1000_oem_bits_config_ich8lan(hw, true);
+
+ if (hw->mac.type == e1000_pch2lan) {
+ /* Ungate automatic PHY configuration on non-managed 82579 */
+ if (!(E1000_READ_REG(hw, E1000_FWSM) &
+ E1000_ICH_FWSM_FW_VALID)) {
+ msec_delay(10);
+ e1000_gate_hw_phy_config_ich8lan(hw, false);
+ }
+
+ /* Set EEE LPI Update Timer to 200usec */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_write_emi_reg_locked(hw,
+ I82579_LPI_UPDATE_TIMER,
+ 0x1387);
+ hw->phy.ops.release(hw);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
+ * @hw: pointer to the HW structure
+ *
+ * Resets the PHY
+ * This is a function pointer entry point called by drivers
+ * or other shared routines.
+ **/
+STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
+
+ /* Gate automatic PHY configuration by hardware on non-managed 82579 */
+ if ((hw->mac.type == e1000_pch2lan) &&
+ !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
+ e1000_gate_hw_phy_config_ich8lan(hw, true);
+
+ ret_val = e1000_phy_hw_reset_generic(hw);
+ if (ret_val)
+ return ret_val;
+
+ return e1000_post_phy_reset_ich8lan(hw);
+}
+
+/**
+ * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU state according to the active flag. For PCH, if OEM write
+ * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
+ * the phy speed. This function will manually set the LPLU bit and restart
+ * auto-neg as hw would do. D3 and D0 LPLU will call the same function
+ * since it configures the same bit.
+ **/
+STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
+{
+ s32 ret_val;
+ u16 oem_reg;
+
+ DEBUGFUNC("e1000_set_lplu_state_pchlan");
+
+ ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (active)
+ oem_reg |= HV_OEM_BITS_LPLU;
+ else
+ oem_reg &= ~HV_OEM_BITS_LPLU;
+
+ if (!hw->phy.ops.check_reset_block(hw))
+ oem_reg |= HV_OEM_BITS_RESTART_AN;
+
+ return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
+}
+
+/**
+ * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU D0 state according to the active flag. When
+ * activating LPLU this function also disables smart speed
+ * and vice versa. LPLU will not be activated unless the
+ * device autonegotiation advertisement meets standards of
+ * either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 phy_ctrl;
+ s32 ret_val = E1000_SUCCESS;
+ u16 data;
+
+ DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
+
+ if (phy->type == e1000_phy_ife)
+ return E1000_SUCCESS;
+
+ phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
+
+ if (active) {
+ phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
+ E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
+
+ if (phy->type != e1000_phy_igp_3)
+ return E1000_SUCCESS;
+
+ /* Call gig speed drop workaround on LPLU before accessing
+ * any PHY registers
+ */
+ if (hw->mac.type == e1000_ich8lan)
+ e1000_gig_downshift_workaround_ich8lan(hw);
+
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ } else {
+ phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
+ E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
+
+ if (phy->type != e1000_phy_igp_3)
+ return E1000_SUCCESS;
+
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ } else if (phy->smart_speed == e1000_smart_speed_off) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU D3 state according to the active flag. When
+ * activating LPLU this function also disables smart speed
+ * and vice versa. LPLU will not be activated unless the
+ * device autonegotiation advertisement meets standards of
+ * either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 phy_ctrl;
+ s32 ret_val = E1000_SUCCESS;
+ u16 data;
+
+ DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
+
+ phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
+
+ if (!active) {
+ phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
+ E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
+
+ if (phy->type != e1000_phy_igp_3)
+ return E1000_SUCCESS;
+
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ } else if (phy->smart_speed == e1000_smart_speed_off) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ }
+ } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+ (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+ (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+ phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
+ E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
+
+ if (phy->type != e1000_phy_igp_3)
+ return E1000_SUCCESS;
+
+ /* Call gig speed drop workaround on LPLU before accessing
+ * any PHY registers
+ */
+ if (hw->mac.type == e1000_ich8lan)
+ e1000_gig_downshift_workaround_ich8lan(hw);
+
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
+ * @hw: pointer to the HW structure
+ * @bank: pointer to the variable that returns the active bank
+ *
+ * Reads signature byte from the NVM using the flash access registers.
+ * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
+ **/
+STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
+{
+ u32 eecd;
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
+ u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
+ u8 sig_byte = 0;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
+
+ switch (hw->mac.type) {
+ case e1000_ich8lan:
+ case e1000_ich9lan:
+ eecd = E1000_READ_REG(hw, E1000_EECD);
+ if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
+ E1000_EECD_SEC1VAL_VALID_MASK) {
+ if (eecd & E1000_EECD_SEC1VAL)
+ *bank = 1;
+ else
+ *bank = 0;
+
+ return E1000_SUCCESS;
+ }
+ DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
+ /* fall-thru */
+ default:
+ /* set bank to 0 in case flash read fails */
+ *bank = 0;
+
+ /* Check bank 0 */
+ ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
+ &sig_byte);
+ if (ret_val)
+ return ret_val;
+ if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
+ E1000_ICH_NVM_SIG_VALUE) {
+ *bank = 0;
+ return E1000_SUCCESS;
+ }
+
+ /* Check bank 1 */
+ ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
+ bank1_offset,
+ &sig_byte);
+ if (ret_val)
+ return ret_val;
+ if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
+ E1000_ICH_NVM_SIG_VALUE) {
+ *bank = 1;
+ return E1000_SUCCESS;
+ }
+
+ DEBUGOUT("ERROR: No valid NVM bank present\n");
+ return -E1000_ERR_NVM;
+ }
+}
+
+/**
+ * e1000_read_nvm_ich8lan - Read word(s) from the NVM
+ * @hw: pointer to the HW structure
+ * @offset: The offset (in bytes) of the word(s) to read.
+ * @words: Size of data to read in words
+ * @data: Pointer to the word(s) to read at offset.
+ *
+ * Reads a word(s) from the NVM using the flash access registers.
+ **/
+STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 act_offset;
+ s32 ret_val = E1000_SUCCESS;
+ u32 bank = 0;
+ u16 i, word;
+
+ DEBUGFUNC("e1000_read_nvm_ich8lan");
+
+ if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
+ (words == 0)) {
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+
+ nvm->ops.acquire(hw);
+
+ ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+ if (ret_val != E1000_SUCCESS) {
+ DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
+ bank = 0;
+ }
+
+ act_offset = (bank) ? nvm->flash_bank_size : 0;
+ act_offset += offset;
+
+ ret_val = E1000_SUCCESS;
+ for (i = 0; i < words; i++) {
+ if (dev_spec->shadow_ram[offset+i].modified) {
+ data[i] = dev_spec->shadow_ram[offset+i].value;
+ } else {
+ ret_val = e1000_read_flash_word_ich8lan(hw,
+ act_offset + i,
+ &word);
+ if (ret_val)
+ break;
+ data[i] = word;
+ }
+ }
+
+ nvm->ops.release(hw);
+
+out:
+ if (ret_val)
+ DEBUGOUT1("NVM read error: %d\n", ret_val);
+
+ return ret_val;
+}
+
+/**
+ * e1000_flash_cycle_init_ich8lan - Initialize flash
+ * @hw: pointer to the HW structure
+ *
+ * This function does initial flash setup so that a new read/write/erase cycle
+ * can be started.
+ **/
+STATIC s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
+{
+ union ich8_hws_flash_status hsfsts;
+ s32 ret_val = -E1000_ERR_NVM;
+
+ DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
+
+ hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+
+ /* Check if the flash descriptor is valid */
+ if (!hsfsts.hsf_status.fldesvalid) {
+ DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.\n");
+ return -E1000_ERR_NVM;
+ }
+
+ /* Clear FCERR and DAEL in hw status by writing 1 */
+ hsfsts.hsf_status.flcerr = 1;
+ hsfsts.hsf_status.dael = 1;
+ E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
+
+ /* Either we should have a hardware SPI cycle in progress
+ * bit to check against, in order to start a new cycle or
+ * FDONE bit should be changed in the hardware so that it
+ * is 1 after hardware reset, which can then be used as an
+ * indication whether a cycle is in progress or has been
+ * completed.
+ */
+
+ if (!hsfsts.hsf_status.flcinprog) {
+ /* There is no cycle running at present,
+ * so we can start a cycle.
+ * Begin by setting Flash Cycle Done.
+ */
+ hsfsts.hsf_status.flcdone = 1;
+ E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
+ ret_val = E1000_SUCCESS;
+ } else {
+ s32 i;
+
+ /* Otherwise poll for sometime so the current
+ * cycle has a chance to end before giving up.
+ */
+ for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
+ hsfsts.regval = E1000_READ_FLASH_REG16(hw,
+ ICH_FLASH_HSFSTS);
+ if (!hsfsts.hsf_status.flcinprog) {
+ ret_val = E1000_SUCCESS;
+ break;
+ }
+ usec_delay(1);
+ }
+ if (ret_val == E1000_SUCCESS) {
+ /* Successful in waiting for previous cycle to timeout,
+ * now set the Flash Cycle Done.
+ */
+ hsfsts.hsf_status.flcdone = 1;
+ E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
+ hsfsts.regval);
+ } else {
+ DEBUGOUT("Flash controller busy, cannot get access\n");
+ }
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
+ * @hw: pointer to the HW structure
+ * @timeout: maximum time to wait for completion
+ *
+ * This function starts a flash cycle and waits for its completion.
+ **/
+STATIC s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
+{
+ union ich8_hws_flash_ctrl hsflctl;
+ union ich8_hws_flash_status hsfsts;
+ u32 i = 0;
+
+ DEBUGFUNC("e1000_flash_cycle_ich8lan");
+
+ /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
+ hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+ hsflctl.hsf_ctrl.flcgo = 1;
+
+ E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+
+ /* wait till FDONE bit is set to 1 */
+ do {
+ hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcdone)
+ break;
+ usec_delay(1);
+ } while (i++ < timeout);
+
+ if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
+ return E1000_SUCCESS;
+
+ return -E1000_ERR_NVM;
+}
+
+/**
+ * e1000_read_flash_word_ich8lan - Read word from flash
+ * @hw: pointer to the HW structure
+ * @offset: offset to data location
+ * @data: pointer to the location for storing the data
+ *
+ * Reads the flash word at offset into data. Offset is converted
+ * to bytes before read.
+ **/
+STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
+ u16 *data)
+{
+ DEBUGFUNC("e1000_read_flash_word_ich8lan");
+
+ if (!data)
+ return -E1000_ERR_NVM;
+
+ /* Must convert offset into bytes. */
+ offset <<= 1;
+
+ return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
+}
+
+/**
+ * e1000_read_flash_byte_ich8lan - Read byte from flash
+ * @hw: pointer to the HW structure
+ * @offset: The offset of the byte to read.
+ * @data: Pointer to a byte to store the value read.
+ *
+ * Reads a single byte from the NVM using the flash access registers.
+ **/
+STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
+ u8 *data)
+{
+ s32 ret_val;
+ u16 word = 0;
+
+ ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
+
+ if (ret_val)
+ return ret_val;
+
+ *data = (u8)word;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_flash_data_ich8lan - Read byte or word from NVM
+ * @hw: pointer to the HW structure
+ * @offset: The offset (in bytes) of the byte or word to read.
+ * @size: Size of data to read, 1=byte 2=word
+ * @data: Pointer to the word to store the value read.
+ *
+ * Reads a byte or word from the NVM using the flash access registers.
+ **/
+STATIC s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
+ u8 size, u16 *data)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ u32 flash_linear_addr;
+ u32 flash_data = 0;
+ s32 ret_val = -E1000_ERR_NVM;
+ u8 count = 0;
+
+ DEBUGFUNC("e1000_read_flash_data_ich8lan");
+
+ if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
+ return -E1000_ERR_NVM;
+ flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+ hw->nvm.flash_base_addr);
+
+ do {
+ usec_delay(1);
+ /* Steps */
+ ret_val = e1000_flash_cycle_init_ich8lan(hw);
+ if (ret_val != E1000_SUCCESS)
+ break;
+ hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+
+ /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+ hsflctl.hsf_ctrl.fldbcount = size - 1;
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
+ E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
+
+ ret_val =
+ e1000_flash_cycle_ich8lan(hw,
+ ICH_FLASH_READ_COMMAND_TIMEOUT);
+
+ /* Check if FCERR is set to 1, if set to 1, clear it
+ * and try the whole sequence a few more times, else
+ * read in (shift in) the Flash Data0, the order is
+ * least significant byte first msb to lsb
+ */
+ if (ret_val == E1000_SUCCESS) {
+ flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
+ if (size == 1)
+ *data = (u8)(flash_data & 0x000000FF);
+ else if (size == 2)
+ *data = (u16)(flash_data & 0x0000FFFF);
+ break;
+ } else {
+ /* If we've gotten here, then things are probably
+ * completely hosed, but if the error condition is
+ * detected, it won't hurt to give it another try...
+ * ICH_FLASH_CYCLE_REPEAT_COUNT times.
+ */
+ hsfsts.regval = E1000_READ_FLASH_REG16(hw,
+ ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcerr) {
+ /* Repeat for some time before giving up. */
+ continue;
+ } else if (!hsfsts.hsf_status.flcdone) {
+ DEBUGOUT("Timeout error - flash cycle did not complete.\n");
+ break;
+ }
+ }
+ } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_nvm_ich8lan - Write word(s) to the NVM
+ * @hw: pointer to the HW structure
+ * @offset: The offset (in bytes) of the word(s) to write.
+ * @words: Size of data to write in words
+ * @data: Pointer to the word(s) to write at offset.
+ *
+ * Writes a byte or word to the NVM using the flash access registers.
+ **/
+STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u16 i;
+
+ DEBUGFUNC("e1000_write_nvm_ich8lan");
+
+ if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
+ (words == 0)) {
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ return -E1000_ERR_NVM;
+ }
+
+ nvm->ops.acquire(hw);
+
+ for (i = 0; i < words; i++) {
+ dev_spec->shadow_ram[offset+i].modified = true;
+ dev_spec->shadow_ram[offset+i].value = data[i];
+ }
+
+ nvm->ops.release(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
+ * @hw: pointer to the HW structure
+ *
+ * The NVM checksum is updated by calling the generic update_nvm_checksum,
+ * which writes the checksum to the shadow ram. The changes in the shadow
+ * ram are then committed to the EEPROM by processing each bank at a time
+ * checking for the modified bit and writing only the pending changes.
+ * After a successful commit, the shadow ram is cleared and is ready for
+ * future writes.
+ **/
+STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
+
+ ret_val = e1000_update_nvm_checksum_generic(hw);
+ if (ret_val)
+ goto out;
+
+ if (nvm->type != e1000_nvm_flash_sw)
+ goto out;
+
+ nvm->ops.acquire(hw);
+
+ /* We're writing to the opposite bank so if we're on bank 1,
+ * write to bank 0 etc. We also need to erase the segment that
+ * is going to be written
+ */
+ ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+ if (ret_val != E1000_SUCCESS) {
+ DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
+ bank = 0;
+ }
+
+ if (bank == 0) {
+ new_bank_offset = nvm->flash_bank_size;
+ old_bank_offset = 0;
+ ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
+ if (ret_val)
+ goto release;
+ } else {
+ old_bank_offset = nvm->flash_bank_size;
+ new_bank_offset = 0;
+ ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
+ if (ret_val)
+ goto release;
+ }
+
+ for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+ /* Determine whether to write the value stored
+ * in the other NVM bank or a modified value stored
+ * in the shadow RAM
+ */
+ if (dev_spec->shadow_ram[i].modified) {
+ data = dev_spec->shadow_ram[i].value;
+ } else {
+ ret_val = e1000_read_flash_word_ich8lan(hw, i +
+ old_bank_offset,
+ &data);
+ if (ret_val)
+ break;
+ }
+
+ /* If the word is 0x13, then make sure the signature bits
+ * (15:14) are 11b until the commit has completed.
+ * This will allow us to write 10b which indicates the
+ * signature is valid. We want to do this after the write
+ * has completed so that we don't mark the segment valid
+ * while the write is still in progress
+ */
+ if (i == E1000_ICH_NVM_SIG_WORD)
+ data |= E1000_ICH_NVM_SIG_MASK;
+
+ /* Convert offset to bytes. */
+ act_offset = (i + new_bank_offset) << 1;
+
+ usec_delay(100);
+ /* Write the bytes to the new bank. */
+ ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
+ act_offset,
+ (u8)data);
+ if (ret_val)
+ break;
+
+ usec_delay(100);
+ ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
+ act_offset + 1,
+ (u8)(data >> 8));
+ if (ret_val)
+ break;
+ }
+
+ /* Don't bother writing the segment valid bits if sector
+ * programming failed.
+ */
+ if (ret_val) {
+ DEBUGOUT("Flash commit failed.\n");
+ goto release;
+ }
+
+ /* Finally validate the new segment by setting bit 15:14
+ * to 10b in word 0x13 , this can be done without an
+ * erase as well since these bits are 11 to start with
+ * and we need to change bit 14 to 0b
+ */
+ act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
+ ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
+ if (ret_val)
+ goto release;
+
+ data &= 0xBFFF;
+ ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
+ act_offset * 2 + 1,
+ (u8)(data >> 8));
+ if (ret_val)
+ goto release;
+
+ /* And invalidate the previously valid segment by setting
+ * its signature word (0x13) high_byte to 0b. This can be
+ * done without an erase because flash erase sets all bits
+ * to 1's. We can write 1's to 0's without an erase
+ */
+ act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
+ ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
+ if (ret_val)
+ goto release;
+
+ /* Great! Everything worked, we can now clear the cached entries. */
+ for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+ dev_spec->shadow_ram[i].modified = false;
+ dev_spec->shadow_ram[i].value = 0xFFFF;
+ }
+
+release:
+ nvm->ops.release(hw);
+
+ /* Reload the EEPROM, or else modifications will not appear
+ * until after the next adapter reset.
+ */
+ if (!ret_val) {
+ nvm->ops.reload(hw);
+ msec_delay(10);
+ }
+
+out:
+ if (ret_val)
+ DEBUGOUT1("NVM update error: %d\n", ret_val);
+
+ return ret_val;
+}
+
+/**
+ * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
+ * If the bit is 0, that the EEPROM had been modified, but the checksum was not
+ * calculated, in which case we need to calculate the checksum and set bit 6.
+ **/
+STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 data;
+ u16 word;
+ u16 valid_csum_mask;
+
+ DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
+
+ /* Read NVM and check Invalid Image CSUM bit. If this bit is 0,
+ * the checksum needs to be fixed. This bit is an indication that
+ * the NVM was prepared by OEM software and did not calculate
+ * the checksum...a likely scenario.
+ */
+ switch (hw->mac.type) {
+ case e1000_pch_lpt:
+ word = NVM_COMPAT;
+ valid_csum_mask = NVM_COMPAT_VALID_CSUM;
+ break;
+ default:
+ word = NVM_FUTURE_INIT_WORD1;
+ valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
+ break;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, word, 1, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (!(data & valid_csum_mask)) {
+ data |= valid_csum_mask;
+ ret_val = hw->nvm.ops.write(hw, word, 1, &data);
+ if (ret_val)
+ return ret_val;
+ ret_val = hw->nvm.ops.update(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return e1000_validate_nvm_checksum_generic(hw);
+}
+
+/**
+ * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
+ * @hw: pointer to the HW structure
+ * @offset: The offset (in bytes) of the byte/word to read.
+ * @size: Size of data to read, 1=byte 2=word
+ * @data: The byte(s) to write to the NVM.
+ *
+ * Writes one/two bytes to the NVM using the flash access registers.
+ **/
+STATIC s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
+ u8 size, u16 data)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ u32 flash_linear_addr;
+ u32 flash_data = 0;
+ s32 ret_val;
+ u8 count = 0;
+
+ DEBUGFUNC("e1000_write_ich8_data");
+
+ if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
+ return -E1000_ERR_NVM;
+
+ flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+ hw->nvm.flash_base_addr);
+
+ do {
+ usec_delay(1);
+ /* Steps */
+ ret_val = e1000_flash_cycle_init_ich8lan(hw);
+ if (ret_val != E1000_SUCCESS)
+ break;
+ hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+
+ /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+ hsflctl.hsf_ctrl.fldbcount = size - 1;
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
+ E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
+
+ if (size == 1)
+ flash_data = (u32)data & 0x00FF;
+ else
+ flash_data = (u32)data;
+
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
+
+ /* check if FCERR is set to 1 , if set to 1, clear it
+ * and try the whole sequence a few more times else done
+ */
+ ret_val =
+ e1000_flash_cycle_ich8lan(hw,
+ ICH_FLASH_WRITE_COMMAND_TIMEOUT);
+ if (ret_val == E1000_SUCCESS)
+ break;
+
+ /* If we're here, then things are most likely
+ * completely hosed, but if the error condition
+ * is detected, it won't hurt to give it another
+ * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
+ */
+ hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcerr)
+ /* Repeat for some time before giving up. */
+ continue;
+ if (!hsfsts.hsf_status.flcdone) {
+ DEBUGOUT("Timeout error - flash cycle did not complete.\n");
+ break;
+ }
+ } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_flash_byte_ich8lan - Write a single byte to NVM
+ * @hw: pointer to the HW structure
+ * @offset: The index of the byte to read.
+ * @data: The byte to write to the NVM.
+ *
+ * Writes a single byte to the NVM using the flash access registers.
+ **/
+STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
+ u8 data)
+{
+ u16 word = (u16)data;
+
+ DEBUGFUNC("e1000_write_flash_byte_ich8lan");
+
+ return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
+}
+
+/**
+ * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
+ * @hw: pointer to the HW structure
+ * @offset: The offset of the byte to write.
+ * @byte: The byte to write to the NVM.
+ *
+ * Writes a single byte to the NVM using the flash access registers.
+ * Goes through a retry algorithm before giving up.
+ **/
+STATIC s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
+ u32 offset, u8 byte)
+{
+ s32 ret_val;
+ u16 program_retries;
+
+ DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
+
+ ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
+ if (!ret_val)
+ return ret_val;
+
+ for (program_retries = 0; program_retries < 100; program_retries++) {
+ DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
+ usec_delay(100);
+ ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
+ if (ret_val == E1000_SUCCESS)
+ break;
+ }
+ if (program_retries == 100)
+ return -E1000_ERR_NVM;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
+ * @hw: pointer to the HW structure
+ * @bank: 0 for first bank, 1 for second bank, etc.
+ *
+ * Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
+ * bank N is 4096 * N + flash_reg_addr.
+ **/
+STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ u32 flash_linear_addr;
+ /* bank size is in 16bit words - adjust to bytes */
+ u32 flash_bank_size = nvm->flash_bank_size * 2;
+ s32 ret_val;
+ s32 count = 0;
+ s32 j, iteration, sector_size;
+
+ DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
+
+ hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+
+ /* Determine HW Sector size: Read BERASE bits of hw flash status
+ * register
+ * 00: The Hw sector is 256 bytes, hence we need to erase 16
+ * consecutive sectors. The start index for the nth Hw sector
+ * can be calculated as = bank * 4096 + n * 256
+ * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
+ * The start index for the nth Hw sector can be calculated
+ * as = bank * 4096
+ * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
+ * (ich9 only, otherwise error condition)
+ * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
+ */
+ switch (hsfsts.hsf_status.berasesz) {
+ case 0:
+ /* Hw sector size 256 */
+ sector_size = ICH_FLASH_SEG_SIZE_256;
+ iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
+ break;
+ case 1:
+ sector_size = ICH_FLASH_SEG_SIZE_4K;
+ iteration = 1;
+ break;
+ case 2:
+ sector_size = ICH_FLASH_SEG_SIZE_8K;
+ iteration = 1;
+ break;
+ case 3:
+ sector_size = ICH_FLASH_SEG_SIZE_64K;
+ iteration = 1;
+ break;
+ default:
+ return -E1000_ERR_NVM;
+ }
+
+ /* Start with the base address, then add the sector offset. */
+ flash_linear_addr = hw->nvm.flash_base_addr;
+ flash_linear_addr += (bank) ? flash_bank_size : 0;
+
+ for (j = 0; j < iteration; j++) {
+ do {
+ u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
+
+ /* Steps */
+ ret_val = e1000_flash_cycle_init_ich8lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Write a value 11 (block Erase) in Flash
+ * Cycle field in hw flash control
+ */
+ hsflctl.regval =
+ E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
+ E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
+ hsflctl.regval);
+
+ /* Write the last 24 bits of an index within the
+ * block into Flash Linear address field in Flash
+ * Address.
+ */
+ flash_linear_addr += (j * sector_size);
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
+ flash_linear_addr);
+
+ ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
+ if (ret_val == E1000_SUCCESS)
+ break;
+
+ /* Check if FCERR is set to 1. If 1,
+ * clear it and try the whole sequence
+ * a few more times else Done
+ */
+ hsfsts.regval = E1000_READ_FLASH_REG16(hw,
+ ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcerr)
+ /* repeat for some time before giving up */
+ continue;
+ else if (!hsfsts.hsf_status.flcdone)
+ return ret_val;
+ } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_valid_led_default_ich8lan - Set the default LED settings
+ * @hw: pointer to the HW structure
+ * @data: Pointer to the LED settings
+ *
+ * Reads the LED default settings from the NVM to data. If the NVM LED
+ * settings is all 0's or F's, set the LED default to a valid LED default
+ * setting.
+ **/
+STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_valid_led_default_ich8lan");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
+ *data = ID_LED_DEFAULT_ICH8LAN;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_id_led_init_pchlan - store LED configurations
+ * @hw: pointer to the HW structure
+ *
+ * PCH does not control LEDs via the LEDCTL register, rather it uses
+ * the PHY LED configuration register.
+ *
+ * PCH also does not have an "always on" or "always off" mode which
+ * complicates the ID feature. Instead of using the "on" mode to indicate
+ * in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
+ * use "link_up" mode. The LEDs will still ID on request if there is no
+ * link based on logic in e1000_led_[on|off]_pchlan().
+ **/
+STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
+ const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
+ u16 data, i, temp, shift;
+
+ DEBUGFUNC("e1000_id_led_init_pchlan");
+
+ /* Get default ID LED modes */
+ ret_val = hw->nvm.ops.valid_led_default(hw, &data);
+ if (ret_val)
+ return ret_val;
+
+ mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
+ mac->ledctl_mode1 = mac->ledctl_default;
+ mac->ledctl_mode2 = mac->ledctl_default;
+
+ for (i = 0; i < 4; i++) {
+ temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
+ shift = (i * 5);
+ switch (temp) {
+ case ID_LED_ON1_DEF2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_ON1_OFF2:
+ mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
+ mac->ledctl_mode1 |= (ledctl_on << shift);
+ break;
+ case ID_LED_OFF1_DEF2:
+ case ID_LED_OFF1_ON2:
+ case ID_LED_OFF1_OFF2:
+ mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
+ mac->ledctl_mode1 |= (ledctl_off << shift);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ switch (temp) {
+ case ID_LED_DEF1_ON2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_OFF1_ON2:
+ mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
+ mac->ledctl_mode2 |= (ledctl_on << shift);
+ break;
+ case ID_LED_DEF1_OFF2:
+ case ID_LED_ON1_OFF2:
+ case ID_LED_OFF1_OFF2:
+ mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
+ mac->ledctl_mode2 |= (ledctl_off << shift);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_bus_info_ich8lan - Get/Set the bus type and width
+ * @hw: pointer to the HW structure
+ *
+ * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
+ * register, so the the bus width is hard coded.
+ **/
+STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_bus_info *bus = &hw->bus;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_get_bus_info_ich8lan");
+
+ ret_val = e1000_get_bus_info_pcie_generic(hw);
+
+ /* ICH devices are "PCI Express"-ish. They have
+ * a configuration space, but do not contain
+ * PCI Express Capability registers, so bus width
+ * must be hardcoded.
+ */
+ if (bus->width == e1000_bus_width_unknown)
+ bus->width = e1000_bus_width_pcie_x1;
+
+ return ret_val;
+}
+
+/**
+ * e1000_reset_hw_ich8lan - Reset the hardware
+ * @hw: pointer to the HW structure
+ *
+ * Does a full reset of the hardware which includes a reset of the PHY and
+ * MAC.
+ **/
+STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u16 kum_cfg;
+ u32 ctrl, reg;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_reset_hw_ich8lan");
+
+ /* Prevent the PCI-E bus from sticking if there is no TLP connection
+ * on the last TLP read/write transaction when MAC is reset.
+ */
+ ret_val = e1000_disable_pcie_master_generic(hw);
+ if (ret_val)
+ DEBUGOUT("PCI-E Master disable polling has failed.\n");
+
+ DEBUGOUT("Masking off all interrupts\n");
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+
+ /* Disable the Transmit and Receive units. Then delay to allow
+ * any pending transactions to complete before we hit the MAC
+ * with the global reset.
+ */
+ E1000_WRITE_REG(hw, E1000_RCTL, 0);
+ E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+ E1000_WRITE_FLUSH(hw);
+
+ msec_delay(10);
+
+ /* Workaround for ICH8 bit corruption issue in FIFO memory */
+ if (hw->mac.type == e1000_ich8lan) {
+ /* Set Tx and Rx buffer allocation to 8k apiece. */
+ E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
+ /* Set Packet Buffer Size to 16k. */
+ E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
+ }
+
+ if (hw->mac.type == e1000_pchlan) {
+ /* Save the NVM K1 bit setting*/
+ ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
+ if (ret_val)
+ return ret_val;
+
+ if (kum_cfg & E1000_NVM_K1_ENABLE)
+ dev_spec->nvm_k1_enabled = true;
+ else
+ dev_spec->nvm_k1_enabled = false;
+ }
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ if (!hw->phy.ops.check_reset_block(hw)) {
+ /* Full-chip reset requires MAC and PHY reset at the same
+ * time to make sure the interface between MAC and the
+ * external PHY is reset.
+ */
+ ctrl |= E1000_CTRL_PHY_RST;
+
+ /* Gate automatic PHY configuration by hardware on
+ * non-managed 82579
+ */
+ if ((hw->mac.type == e1000_pch2lan) &&
+ !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
+ e1000_gate_hw_phy_config_ich8lan(hw, true);
+ }
+ ret_val = e1000_acquire_swflag_ich8lan(hw);
+ DEBUGOUT("Issuing a global reset to ich8lan\n");
+ E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
+ /* cannot issue a flush here because it hangs the hardware */
+ msec_delay(20);
+
+ /* Set Phy Config Counter to 50msec */
+ if (hw->mac.type == e1000_pch2lan) {
+ reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
+ reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
+ reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
+ E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
+ }
+
+ if (!ret_val)
+ E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
+
+ if (ctrl & E1000_CTRL_PHY_RST) {
+ ret_val = hw->phy.ops.get_cfg_done(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_post_phy_reset_ich8lan(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* For PCH, this write will make sure that any noise
+ * will be detected as a CRC error and be dropped rather than show up
+ * as a bad packet to the DMA engine.
+ */
+ if (hw->mac.type == e1000_pchlan)
+ E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
+
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+ E1000_READ_REG(hw, E1000_ICR);
+
+ reg = E1000_READ_REG(hw, E1000_KABGTXD);
+ reg |= E1000_KABGTXD_BGSQLBIAS;
+ E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_hw_ich8lan - Initialize the hardware
+ * @hw: pointer to the HW structure
+ *
+ * Prepares the hardware for transmit and receive by doing the following:
+ * - initialize hardware bits
+ * - initialize LED identification
+ * - setup receive address registers
+ * - setup flow control
+ * - setup transmit descriptors
+ * - clear statistics
+ **/
+STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 ctrl_ext, txdctl, snoop;
+ s32 ret_val;
+ u16 i;
+
+ DEBUGFUNC("e1000_init_hw_ich8lan");
+
+ e1000_initialize_hw_bits_ich8lan(hw);
+
+ /* Initialize identification LED */
+ ret_val = mac->ops.id_led_init(hw);
+ /* An error is not fatal and we should not stop init due to this */
+ if (ret_val)
+ DEBUGOUT("Error initializing identification LED\n");
+
+ /* Setup the receive address. */
+ e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
+
+ /* Zero out the Multicast HASH table */
+ DEBUGOUT("Zeroing the MTA\n");
+ for (i = 0; i < mac->mta_reg_count; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+ /* The 82578 Rx buffer will stall if wakeup is enabled in host and
+ * the ME. Disable wakeup by clearing the host wakeup bit.
+ * Reset the phy after disabling host wakeup to reset the Rx buffer.
+ */
+ if (hw->phy.type == e1000_phy_82578) {
+ hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
+ i &= ~BM_WUC_HOST_WU_BIT;
+ hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
+ ret_val = e1000_phy_hw_reset_ich8lan(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Setup link and flow control */
+ ret_val = mac->ops.setup_link(hw);
+
+ /* Set the transmit descriptor write-back policy for both queues */
+ txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
+ txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB);
+ txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
+ E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
+ E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
+ txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
+ txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB);
+ txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
+ E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
+ E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
+
+ /* ICH8 has opposite polarity of no_snoop bits.
+ * By default, we should use snoop behavior.
+ */
+ if (mac->type == e1000_ich8lan)
+ snoop = PCIE_ICH8_SNOOP_ALL;
+ else
+ snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
+ e1000_set_pcie_no_snoop_generic(hw, snoop);
+
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+
+ /* Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ e1000_clear_hw_cntrs_ich8lan(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
+ * @hw: pointer to the HW structure
+ *
+ * Sets/Clears required hardware bits necessary for correctly setting up the
+ * hardware for transmit and receive.
+ **/
+STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
+{
+ u32 reg;
+
+ DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
+
+ /* Extended Device Control */
+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ reg |= (1 << 22);
+ /* Enable PHY low-power state when MAC is at D3 w/o WoL */
+ if (hw->mac.type >= e1000_pchlan)
+ reg |= E1000_CTRL_EXT_PHYPDEN;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+
+ /* Transmit Descriptor Control 0 */
+ reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
+ reg |= (1 << 22);
+ E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
+
+ /* Transmit Descriptor Control 1 */
+ reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
+ reg |= (1 << 22);
+ E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
+
+ /* Transmit Arbitration Control 0 */
+ reg = E1000_READ_REG(hw, E1000_TARC(0));
+ if (hw->mac.type == e1000_ich8lan)
+ reg |= (1 << 28) | (1 << 29);
+ reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
+ E1000_WRITE_REG(hw, E1000_TARC(0), reg);
+
+ /* Transmit Arbitration Control 1 */
+ reg = E1000_READ_REG(hw, E1000_TARC(1));
+ if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
+ reg &= ~(1 << 28);
+ else
+ reg |= (1 << 28);
+ reg |= (1 << 24) | (1 << 26) | (1 << 30);
+ E1000_WRITE_REG(hw, E1000_TARC(1), reg);
+
+ /* Device Status */
+ if (hw->mac.type == e1000_ich8lan) {
+ reg = E1000_READ_REG(hw, E1000_STATUS);
+ reg &= ~(1 << 31);
+ E1000_WRITE_REG(hw, E1000_STATUS, reg);
+ }
+
+ /* work-around descriptor data corruption issue during nfs v2 udp
+ * traffic, just disable the nfs filtering capability
+ */
+ reg = E1000_READ_REG(hw, E1000_RFCTL);
+ reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
+
+ /* Disable IPv6 extension header parsing because some malformed
+ * IPv6 headers can hang the Rx.
+ */
+ if (hw->mac.type == e1000_ich8lan)
+ reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
+ E1000_WRITE_REG(hw, E1000_RFCTL, reg);
+
+ /* Enable ECC on Lynxpoint */
+ if (hw->mac.type == e1000_pch_lpt) {
+ reg = E1000_READ_REG(hw, E1000_PBECCSTS);
+ reg |= E1000_PBECCSTS_ECC_ENABLE;
+ E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
+
+ reg = E1000_READ_REG(hw, E1000_CTRL);
+ reg |= E1000_CTRL_MEHE;
+ E1000_WRITE_REG(hw, E1000_CTRL, reg);
+ }
+
+ return;
+}
+
+/**
+ * e1000_setup_link_ich8lan - Setup flow control and link settings
+ * @hw: pointer to the HW structure
+ *
+ * Determines which flow control settings to use, then configures flow
+ * control. Calls the appropriate media-specific link configuration
+ * function. Assuming the adapter has a valid link partner, a valid link
+ * should be established. Assumes the hardware has previously been reset
+ * and the transmitter and receiver are not enabled.
+ **/
+STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_setup_link_ich8lan");
+
+ if (hw->phy.ops.check_reset_block(hw))
+ return E1000_SUCCESS;
+
+ /* ICH parts do not have a word in the NVM to determine
+ * the default flow control setting, so we explicitly
+ * set it to full.
+ */
+ if (hw->fc.requested_mode == e1000_fc_default)
+ hw->fc.requested_mode = e1000_fc_full;
+
+ /* Save off the requested flow control mode for use later. Depending
+ * on the link partner's capabilities, we may or may not use this mode.
+ */
+ hw->fc.current_mode = hw->fc.requested_mode;
+
+ DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
+ hw->fc.current_mode);
+
+ /* Continue to configure the copper link. */
+ ret_val = hw->mac.ops.setup_physical_interface(hw);
+ if (ret_val)
+ return ret_val;
+
+ E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
+ if ((hw->phy.type == e1000_phy_82578) ||
+ (hw->phy.type == e1000_phy_82579) ||
+ (hw->phy.type == e1000_phy_i217) ||
+ (hw->phy.type == e1000_phy_82577)) {
+ E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
+
+ ret_val = hw->phy.ops.write_reg(hw,
+ PHY_REG(BM_PORT_CTRL_PAGE, 27),
+ hw->fc.pause_time);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return e1000_set_fc_watermarks_generic(hw);
+}
+
+/**
+ * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
+ * @hw: pointer to the HW structure
+ *
+ * Configures the kumeran interface to the PHY to wait the appropriate time
+ * when polling the PHY, then call the generic setup_copper_link to finish
+ * configuring the copper link.
+ **/
+STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ u16 reg_data;
+
+ DEBUGFUNC("e1000_setup_copper_link_ich8lan");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ /* Set the mac to wait the maximum time between each iteration
+ * and increase the max iterations when polling the phy;
+ * this fixes erroneous timeouts at 10Mbps.
+ */
+ ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
+ 0xFFFF);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_INBAND_PARAM,
+ &reg_data);
+ if (ret_val)
+ return ret_val;
+ reg_data |= 0x3F;
+ ret_val = e1000_write_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_INBAND_PARAM,
+ reg_data);
+ if (ret_val)
+ return ret_val;
+
+ switch (hw->phy.type) {
+ case e1000_phy_igp_3:
+ ret_val = e1000_copper_link_setup_igp(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ case e1000_phy_bm:
+ case e1000_phy_82578:
+ ret_val = e1000_copper_link_setup_m88(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ case e1000_phy_82577:
+ case e1000_phy_82579:
+ ret_val = e1000_copper_link_setup_82577(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ case e1000_phy_ife:
+ ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
+ &reg_data);
+ if (ret_val)
+ return ret_val;
+
+ reg_data &= ~IFE_PMC_AUTO_MDIX;
+
+ switch (hw->phy.mdix) {
+ case 1:
+ reg_data &= ~IFE_PMC_FORCE_MDIX;
+ break;
+ case 2:
+ reg_data |= IFE_PMC_FORCE_MDIX;
+ break;
+ case 0:
+ default:
+ reg_data |= IFE_PMC_AUTO_MDIX;
+ break;
+ }
+ ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
+ reg_data);
+ if (ret_val)
+ return ret_val;
+ break;
+ default:
+ break;
+ }
+
+ return e1000_setup_copper_link_generic(hw);
+}
+
+/**
+ * e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
+ * @hw: pointer to the HW structure
+ *
+ * Calls the PHY specific link setup function and then calls the
+ * generic setup_copper_link to finish configuring the link for
+ * Lynxpoint PCH devices
+ **/
+STATIC s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ ret_val = e1000_copper_link_setup_82577(hw);
+ if (ret_val)
+ return ret_val;
+
+ return e1000_setup_copper_link_generic(hw);
+}
+
+/**
+ * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
+ * @hw: pointer to the HW structure
+ * @speed: pointer to store current link speed
+ * @duplex: pointer to store the current link duplex
+ *
+ * Calls the generic get_speed_and_duplex to retrieve the current link
+ * information and then calls the Kumeran lock loss workaround for links at
+ * gigabit speeds.
+ **/
+STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_get_link_up_info_ich8lan");
+
+ ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
+ if (ret_val)
+ return ret_val;
+
+ if ((hw->mac.type == e1000_ich8lan) &&
+ (hw->phy.type == e1000_phy_igp_3) &&
+ (*speed == SPEED_1000)) {
+ ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
+ * @hw: pointer to the HW structure
+ *
+ * Work-around for 82566 Kumeran PCS lock loss:
+ * On link status change (i.e. PCI reset, speed change) and link is up and
+ * speed is gigabit-
+ * 0) if workaround is optionally disabled do nothing
+ * 1) wait 1ms for Kumeran link to come up
+ * 2) check Kumeran Diagnostic register PCS lock loss bit
+ * 3) if not set the link is locked (all is good), otherwise...
+ * 4) reset the PHY
+ * 5) repeat up to 10 times
+ * Note: this is only called for IGP3 copper when speed is 1gb.
+ **/
+STATIC s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 phy_ctrl;
+ s32 ret_val;
+ u16 i, data;
+ bool link;
+
+ DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
+
+ if (!dev_spec->kmrn_lock_loss_workaround_enabled)
+ return E1000_SUCCESS;
+
+ /* Make sure link is up before proceeding. If not just return.
+ * Attempting this while link is negotiating fouled up link
+ * stability
+ */
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (!link)
+ return E1000_SUCCESS;
+
+ for (i = 0; i < 10; i++) {
+ /* read once to clear */
+ ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
+ if (ret_val)
+ return ret_val;
+ /* and again to get new status */
+ ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
+ if (ret_val)
+ return ret_val;
+
+ /* check for PCS lock */
+ if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
+ return E1000_SUCCESS;
+
+ /* Issue PHY reset */
+ hw->phy.ops.reset(hw);
+ msec_delay_irq(5);
+ }
+ /* Disable GigE link negotiation */
+ phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
+ phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
+ E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+ E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
+
+ /* Call gig speed drop workaround on Gig disable before accessing
+ * any PHY registers
+ */
+ e1000_gig_downshift_workaround_ich8lan(hw);
+
+ /* unable to acquire PCS lock */
+ return -E1000_ERR_PHY;
+}
+
+/**
+ * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
+ * @hw: pointer to the HW structure
+ * @state: boolean value used to set the current Kumeran workaround state
+ *
+ * If ICH8, set the current Kumeran workaround state (enabled - true
+ * /disabled - false).
+ **/
+void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
+ bool state)
+{
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+
+ DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
+
+ if (hw->mac.type != e1000_ich8lan) {
+ DEBUGOUT("Workaround applies to ICH8 only.\n");
+ return;
+ }
+
+ dev_spec->kmrn_lock_loss_workaround_enabled = state;
+
+ return;
+}
+
+/**
+ * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
+ * @hw: pointer to the HW structure
+ *
+ * Workaround for 82566 power-down on D3 entry:
+ * 1) disable gigabit link
+ * 2) write VR power-down enable
+ * 3) read it back
+ * Continue if successful, else issue LCD reset and repeat
+ **/
+void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
+{
+ u32 reg;
+ u16 data;
+ u8 retry = 0;
+
+ DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
+
+ if (hw->phy.type != e1000_phy_igp_3)
+ return;
+
+ /* Try the workaround twice (if needed) */
+ do {
+ /* Disable link */
+ reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
+ reg |= (E1000_PHY_CTRL_GBE_DISABLE |
+ E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+ E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
+
+ /* Call gig speed drop workaround on Gig disable before
+ * accessing any PHY registers
+ */
+ if (hw->mac.type == e1000_ich8lan)
+ e1000_gig_downshift_workaround_ich8lan(hw);
+
+ /* Write VR power-down enable */
+ hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
+ data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
+ hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
+ data | IGP3_VR_CTRL_MODE_SHUTDOWN);
+
+ /* Read it back and test */
+ hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
+ data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
+ if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
+ break;
+
+ /* Issue PHY reset and repeat at most one more time */
+ reg = E1000_READ_REG(hw, E1000_CTRL);
+ E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
+ retry++;
+ } while (retry);
+}
+
+/**
+ * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
+ * @hw: pointer to the HW structure
+ *
+ * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
+ * LPLU, Gig disable, MDIC PHY reset):
+ * 1) Set Kumeran Near-end loopback
+ * 2) Clear Kumeran Near-end loopback
+ * Should only be called for ICH8[m] devices with any 1G Phy.
+ **/
+void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 reg_data;
+
+ DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
+
+ if ((hw->mac.type != e1000_ich8lan) ||
+ (hw->phy.type == e1000_phy_ife))
+ return;
+
+ ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
+ &reg_data);
+ if (ret_val)
+ return;
+ reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
+ ret_val = e1000_write_kmrn_reg_generic(hw,
+ E1000_KMRNCTRLSTA_DIAG_OFFSET,
+ reg_data);
+ if (ret_val)
+ return;
+ reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
+ e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
+ reg_data);
+}
+
+/**
+ * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
+ * @hw: pointer to the HW structure
+ *
+ * During S0 to Sx transition, it is possible the link remains at gig
+ * instead of negotiating to a lower speed. Before going to Sx, set
+ * 'Gig Disable' to force link speed negotiation to a lower speed based on
+ * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
+ * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
+ * needs to be written.
+ * Parts that support (and are linked to a partner which support) EEE in
+ * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
+ * than 10Mbps w/o EEE.
+ **/
+void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 phy_ctrl;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
+
+ phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
+ phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
+
+ if (hw->phy.type == e1000_phy_i217) {
+ u16 phy_reg, device_id = hw->device_id;
+
+ if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
+ (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
+ u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
+
+ E1000_WRITE_REG(hw, E1000_FEXTNVM6,
+ fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ if (!dev_spec->eee_disable) {
+ u16 eee_advert;
+
+ ret_val =
+ e1000_read_emi_reg_locked(hw,
+ I217_EEE_ADVERTISEMENT,
+ &eee_advert);
+ if (ret_val)
+ goto release;
+
+ /* Disable LPLU if both link partners support 100BaseT
+ * EEE and 100Full is advertised on both ends of the
+ * link, and enable Auto Enable LPI since there will
+ * be no driver to enable LPI while in Sx.
+ */
+ if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
+ (dev_spec->eee_lp_ability &
+ I82579_EEE_100_SUPPORTED) &&
+ (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
+ phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
+ E1000_PHY_CTRL_NOND0A_LPLU);
+
+ /* Set Auto Enable LPI after link up */
+ hw->phy.ops.read_reg_locked(hw,
+ I217_LPI_GPIO_CTRL,
+ &phy_reg);
+ phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
+ hw->phy.ops.write_reg_locked(hw,
+ I217_LPI_GPIO_CTRL,
+ phy_reg);
+ }
+ }
+
+ /* For i217 Intel Rapid Start Technology support,
+ * when the system is going into Sx and no manageability engine
+ * is present, the driver must configure proxy to reset only on
+ * power good. LPI (Low Power Idle) state must also reset only
+ * on power good, as well as the MTA (Multicast table array).
+ * The SMBus release must also be disabled on LCD reset.
+ */
+ if (!(E1000_READ_REG(hw, E1000_FWSM) &
+ E1000_ICH_FWSM_FW_VALID)) {
+ /* Enable proxy to reset only on power good. */
+ hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
+ &phy_reg);
+ phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
+ hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
+ phy_reg);
+
+ /* Set bit enable LPI (EEE) to reset only on
+ * power good.
+ */
+ hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
+ phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
+ hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
+
+ /* Disable the SMB release on LCD reset. */
+ hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
+ phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
+ hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
+ }
+
+ /* Enable MTA to reset for Intel Rapid Start Technology
+ * Support
+ */
+ hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
+ phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
+ hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
+
+release:
+ hw->phy.ops.release(hw);
+ }
+out:
+ E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
+
+ if (hw->mac.type == e1000_ich8lan)
+ e1000_gig_downshift_workaround_ich8lan(hw);
+
+ if (hw->mac.type >= e1000_pchlan) {
+ e1000_oem_bits_config_ich8lan(hw, false);
+
+ /* Reset PHY to activate OEM bits on 82577/8 */
+ if (hw->mac.type == e1000_pchlan)
+ e1000_phy_hw_reset_generic(hw);
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return;
+ e1000_write_smbus_addr(hw);
+ hw->phy.ops.release(hw);
+ }
+
+ return;
+}
+
+/**
+ * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
+ * @hw: pointer to the HW structure
+ *
+ * During Sx to S0 transitions on non-managed devices or managed devices
+ * on which PHY resets are not blocked, if the PHY registers cannot be
+ * accessed properly by the s/w toggle the LANPHYPC value to power cycle
+ * the PHY.
+ * On i217, setup Intel Rapid Start Technology.
+ **/
+void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_resume_workarounds_pchlan");
+
+ if (hw->mac.type < e1000_pch2lan)
+ return;
+
+ ret_val = e1000_init_phy_workarounds_pchlan(hw);
+ if (ret_val) {
+ DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
+ return;
+ }
+
+ /* For i217 Intel Rapid Start Technology support when the system
+ * is transitioning from Sx and no manageability engine is present
+ * configure SMBus to restore on reset, disable proxy, and enable
+ * the reset on MTA (Multicast table array).
+ */
+ if (hw->phy.type == e1000_phy_i217) {
+ u16 phy_reg;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val) {
+ DEBUGOUT("Failed to setup iRST\n");
+ return;
+ }
+
+ /* Clear Auto Enable LPI after link up */
+ hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
+ phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
+ hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
+
+ if (!(E1000_READ_REG(hw, E1000_FWSM) &
+ E1000_ICH_FWSM_FW_VALID)) {
+ /* Restore clear on SMB if no manageability engine
+ * is present
+ */
+ ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
+ &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
+ hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
+
+ /* Disable Proxy */
+ hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
+ }
+ /* Enable reset on MTA */
+ ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
+ &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
+ hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
+release:
+ if (ret_val)
+ DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
+ hw->phy.ops.release(hw);
+ }
+}
+
+/**
+ * e1000_cleanup_led_ich8lan - Restore the default LED operation
+ * @hw: pointer to the HW structure
+ *
+ * Return the LED back to the default configuration.
+ **/
+STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_cleanup_led_ich8lan");
+
+ if (hw->phy.type == e1000_phy_ife)
+ return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+ 0);
+
+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_led_on_ich8lan - Turn LEDs on
+ * @hw: pointer to the HW structure
+ *
+ * Turn on the LEDs.
+ **/
+STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_led_on_ich8lan");
+
+ if (hw->phy.type == e1000_phy_ife)
+ return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+ (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
+
+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_led_off_ich8lan - Turn LEDs off
+ * @hw: pointer to the HW structure
+ *
+ * Turn off the LEDs.
+ **/
+STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_led_off_ich8lan");
+
+ if (hw->phy.type == e1000_phy_ife)
+ return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+ (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
+
+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_setup_led_pchlan - Configures SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * This prepares the SW controllable LED for use.
+ **/
+STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_setup_led_pchlan");
+
+ return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
+ (u16)hw->mac.ledctl_mode1);
+}
+
+/**
+ * e1000_cleanup_led_pchlan - Restore the default LED operation
+ * @hw: pointer to the HW structure
+ *
+ * Return the LED back to the default configuration.
+ **/
+STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_cleanup_led_pchlan");
+
+ return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
+ (u16)hw->mac.ledctl_default);
+}
+
+/**
+ * e1000_led_on_pchlan - Turn LEDs on
+ * @hw: pointer to the HW structure
+ *
+ * Turn on the LEDs.
+ **/
+STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw)
+{
+ u16 data = (u16)hw->mac.ledctl_mode2;
+ u32 i, led;
+
+ DEBUGFUNC("e1000_led_on_pchlan");
+
+ /* If no link, then turn LED on by setting the invert bit
+ * for each LED that's mode is "link_up" in ledctl_mode2.
+ */
+ if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
+ for (i = 0; i < 3; i++) {
+ led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
+ if ((led & E1000_PHY_LED0_MODE_MASK) !=
+ E1000_LEDCTL_MODE_LINK_UP)
+ continue;
+ if (led & E1000_PHY_LED0_IVRT)
+ data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
+ else
+ data |= (E1000_PHY_LED0_IVRT << (i * 5));
+ }
+ }
+
+ return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
+}
+
+/**
+ * e1000_led_off_pchlan - Turn LEDs off
+ * @hw: pointer to the HW structure
+ *
+ * Turn off the LEDs.
+ **/
+STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw)
+{
+ u16 data = (u16)hw->mac.ledctl_mode1;
+ u32 i, led;
+
+ DEBUGFUNC("e1000_led_off_pchlan");
+
+ /* If no link, then turn LED off by clearing the invert bit
+ * for each LED that's mode is "link_up" in ledctl_mode1.
+ */
+ if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
+ for (i = 0; i < 3; i++) {
+ led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
+ if ((led & E1000_PHY_LED0_MODE_MASK) !=
+ E1000_LEDCTL_MODE_LINK_UP)
+ continue;
+ if (led & E1000_PHY_LED0_IVRT)
+ data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
+ else
+ data |= (E1000_PHY_LED0_IVRT << (i * 5));
+ }
+ }
+
+ return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
+}
+
+/**
+ * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
+ * @hw: pointer to the HW structure
+ *
+ * Read appropriate register for the config done bit for completion status
+ * and configure the PHY through s/w for EEPROM-less parts.
+ *
+ * NOTE: some silicon which is EEPROM-less will fail trying to read the
+ * config done bit, so only an error is logged and continues. If we were
+ * to return with error, EEPROM-less silicon would not be able to be reset
+ * or change link.
+ **/
+STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u32 bank = 0;
+ u32 status;
+
+ DEBUGFUNC("e1000_get_cfg_done_ich8lan");
+
+ e1000_get_cfg_done_generic(hw);
+
+ /* Wait for indication from h/w that it has completed basic config */
+ if (hw->mac.type >= e1000_ich10lan) {
+ e1000_lan_init_done_ich8lan(hw);
+ } else {
+ ret_val = e1000_get_auto_rd_done_generic(hw);
+ if (ret_val) {
+ /* When auto config read does not complete, do not
+ * return with an error. This can happen in situations
+ * where there is no eeprom and prevents getting link.
+ */
+ DEBUGOUT("Auto Read Done did not complete\n");
+ ret_val = E1000_SUCCESS;
+ }
+ }
+
+ /* Clear PHY Reset Asserted bit */
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if (status & E1000_STATUS_PHYRA)
+ E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
+ else
+ DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
+
+ /* If EEPROM is not marked present, init the IGP 3 PHY manually */
+ if (hw->mac.type <= e1000_ich9lan) {
+ if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
+ (hw->phy.type == e1000_phy_igp_3)) {
+ e1000_phy_init_script_igp3(hw);
+ }
+ } else {
+ if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
+ /* Maybe we should do a basic PHY config */
+ DEBUGOUT("EEPROM not present\n");
+ ret_val = -E1000_ERR_CONFIG;
+ }
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
+{
+ /* If the management interface is not enabled, then power down */
+ if (!(hw->mac.ops.check_mng_mode(hw) ||
+ hw->phy.ops.check_reset_block(hw)))
+ e1000_power_down_phy_copper(hw);
+
+ return;
+}
+
+/**
+ * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears hardware counters specific to the silicon family and calls
+ * clear_hw_cntrs_generic to clear all general purpose counters.
+ **/
+STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
+{
+ u16 phy_data;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
+
+ e1000_clear_hw_cntrs_base_generic(hw);
+
+ E1000_READ_REG(hw, E1000_ALGNERRC);
+ E1000_READ_REG(hw, E1000_RXERRC);
+ E1000_READ_REG(hw, E1000_TNCRS);
+ E1000_READ_REG(hw, E1000_CEXTERR);
+ E1000_READ_REG(hw, E1000_TSCTC);
+ E1000_READ_REG(hw, E1000_TSCTFC);
+
+ E1000_READ_REG(hw, E1000_MGTPRC);
+ E1000_READ_REG(hw, E1000_MGTPDC);
+ E1000_READ_REG(hw, E1000_MGTPTC);
+
+ E1000_READ_REG(hw, E1000_IAC);
+ E1000_READ_REG(hw, E1000_ICRXOC);
+
+ /* Clear PHY statistics registers */
+ if ((hw->phy.type == e1000_phy_82578) ||
+ (hw->phy.type == e1000_phy_82579) ||
+ (hw->phy.type == e1000_phy_i217) ||
+ (hw->phy.type == e1000_phy_82577)) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return;
+ ret_val = hw->phy.ops.set_page(hw,
+ HV_STATS_PAGE << IGP_PAGE_SHIFT);
+ if (ret_val)
+ goto release;
+ hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
+release:
+ hw->phy.ops.release(hw);
+ }
+}
+
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_ich8lan.h b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_ich8lan.h
new file mode 100755
index 00000000..8c5e9c32
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_ich8lan.h
@@ -0,0 +1,313 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_ICH8LAN_H_
+#define _E1000_ICH8LAN_H_
+
+#define ICH_FLASH_GFPREG 0x0000
+#define ICH_FLASH_HSFSTS 0x0004
+#define ICH_FLASH_HSFCTL 0x0006
+#define ICH_FLASH_FADDR 0x0008
+#define ICH_FLASH_FDATA0 0x0010
+
+/* Requires up to 10 seconds when MNG might be accessing part. */
+#define ICH_FLASH_READ_COMMAND_TIMEOUT 10000000
+#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 10000000
+#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 10000000
+#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
+#define ICH_FLASH_CYCLE_REPEAT_COUNT 10
+
+#define ICH_CYCLE_READ 0
+#define ICH_CYCLE_WRITE 2
+#define ICH_CYCLE_ERASE 3
+
+#define FLASH_GFPREG_BASE_MASK 0x1FFF
+#define FLASH_SECTOR_ADDR_SHIFT 12
+
+#define ICH_FLASH_SEG_SIZE_256 256
+#define ICH_FLASH_SEG_SIZE_4K 4096
+#define ICH_FLASH_SEG_SIZE_8K 8192
+#define ICH_FLASH_SEG_SIZE_64K 65536
+
+#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */
+/* FW established a valid mode */
+#define E1000_ICH_FWSM_FW_VALID 0x00008000
+#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */
+#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000
+
+#define E1000_ICH_MNG_IAMT_MODE 0x2
+
+#define E1000_FWSM_WLOCK_MAC_MASK 0x0380
+#define E1000_FWSM_WLOCK_MAC_SHIFT 7
+#if !defined(EXTERNAL_RELEASE) || (defined(NAHUM6LP_HW) && defined(ULP_SUPPORT))
+#define E1000_FWSM_ULP_CFG_DONE 0x00000400 /* Low power cfg done */
+#endif /* !EXTERNAL_RELEASE || (NAHUM6LP_HW && ULP_SUPPORT) */
+
+/* Shared Receive Address Registers */
+#define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8))
+#define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8))
+
+#if !defined(EXTERNAL_RELEASE) || (defined(NAHUM6LP_HW) && defined(ULP_SUPPORT))
+#define E1000_H2ME 0x05B50 /* Host to ME */
+#endif /* !EXTERNAL_RELEASE || (NAHUM6LP_HW && ULP_SUPPORT) */
+#if !defined(EXTERNAL_RELEASE) || (defined(NAHUM6LP_HW) && defined(ULP_SUPPORT))
+#define E1000_H2ME_ULP 0x00000800 /* ULP Indication Bit */
+#define E1000_H2ME_ENFORCE_SETTINGS 0x00001000 /* Enforce Settings */
+
+#endif /* !EXTERNAL_RELEASE || (NAHUM6LP_HW && ULP_SUPPORT) */
+#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
+ (ID_LED_OFF1_OFF2 << 8) | \
+ (ID_LED_OFF1_ON2 << 4) | \
+ (ID_LED_DEF1_DEF2))
+
+#define E1000_ICH_NVM_SIG_WORD 0x13
+#define E1000_ICH_NVM_SIG_MASK 0xC000
+#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0
+#define E1000_ICH_NVM_SIG_VALUE 0x80
+
+#define E1000_ICH8_LAN_INIT_TIMEOUT 1500
+
+#if !defined(EXTERNAL_RELEASE) || (defined(NAHUM6LP_HW) && defined(ULP_SUPPORT))
+/* FEXT register bit definition */
+#define E1000_FEXT_PHY_CABLE_DISCONNECTED 0x00000004
+
+#endif /* !EXTERNAL_RELEASE || (NAHUM6LP_HW && ULP_SUPPORT) */
+#define E1000_FEXTNVM_SW_CONFIG 1
+#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* different on ICH8M */
+
+#define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK 0x0C000000
+#define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC 0x08000000
+
+#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
+
+#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100
+#define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200
+
+#if !defined(EXTERNAL_RELEASE) || (defined(NAHUM6LP_HW) && defined(ULP_SUPPORT))
+#define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020
+
+#endif /* !EXTERNAL_RELEASE || (NAHUM6LP_HW && ULP_SUPPORT) */
+#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
+
+#define E1000_ICH_RAR_ENTRIES 7
+#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */
+#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */
+
+#define PHY_PAGE_SHIFT 5
+#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
+ ((reg) & MAX_PHY_REG_ADDRESS))
+#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */
+#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */
+
+#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002
+#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300
+#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200
+
+/* PHY Wakeup Registers and defines */
+#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17)
+#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0)
+#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
+#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
+#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
+#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2)))
+#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2)))
+#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2)))
+#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2)))
+#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1)))
+
+#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */
+#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */
+#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */
+#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */
+#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */
+#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */
+#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */
+
+#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */
+#define HV_MUX_DATA_CTRL PHY_REG(776, 16)
+#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400
+#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004
+#define HV_STATS_PAGE 778
+/* Half-duplex collision counts */
+#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision */
+#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17)
+#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. */
+#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19)
+#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Collision */
+#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21)
+#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision */
+#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24)
+#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision */
+#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26)
+#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */
+#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28)
+#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Tx with no CRS */
+#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30)
+
+#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */
+
+#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
+#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
+
+/* SMBus Control Phy Register */
+#define CV_SMB_CTRL PHY_REG(769, 23)
+#define CV_SMB_CTRL_FORCE_SMBUS 0x0001
+
+#if !defined(EXTERNAL_RELEASE) || (defined(NAHUM6LP_HW) && defined(ULP_SUPPORT))
+/* I218 Ultra Low Power Configuration 1 Register */
+#define I218_ULP_CONFIG1 PHY_REG(779, 16)
+#define I218_ULP_CONFIG1_START 0x0001 /* Start auto ULP config */
+#define I218_ULP_CONFIG1_IND 0x0004 /* Pwr up from ULP indication */
+#define I218_ULP_CONFIG1_STICKY_ULP 0x0010 /* Set sticky ULP mode */
+#define I218_ULP_CONFIG1_INBAND_EXIT 0x0020 /* Inband on ULP exit */
+#define I218_ULP_CONFIG1_WOL_HOST 0x0040 /* WoL Host on ULP exit */
+#define I218_ULP_CONFIG1_RESET_TO_SMBUS 0x0100 /* Reset to SMBus mode */
+#define I218_ULP_CONFIG1_DISABLE_SMB_PERST 0x1000 /* Disable on PERST# */
+
+#endif /* !EXTERNAL_RELEASE || (NAHUM6LP_HW && ULP_SUPPORT) */
+/* SMBus Address Phy Register */
+#define HV_SMB_ADDR PHY_REG(768, 26)
+#define HV_SMB_ADDR_MASK 0x007F
+#define HV_SMB_ADDR_PEC_EN 0x0200
+#define HV_SMB_ADDR_VALID 0x0080
+#define HV_SMB_ADDR_FREQ_MASK 0x1100
+#define HV_SMB_ADDR_FREQ_LOW_SHIFT 8
+#define HV_SMB_ADDR_FREQ_HIGH_SHIFT 12
+
+/* Strapping Option Register - RO */
+#define E1000_STRAP 0x0000C
+#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
+#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17
+#define E1000_STRAP_SMT_FREQ_MASK 0x00003000
+#define E1000_STRAP_SMT_FREQ_SHIFT 12
+
+/* OEM Bits Phy Register */
+#define HV_OEM_BITS PHY_REG(768, 25)
+#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */
+#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */
+#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
+
+/* KMRN Mode Control */
+#define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
+#define HV_KMRN_MDIO_SLOW 0x0400
+
+/* KMRN FIFO Control and Status */
+#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16)
+#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000
+#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12
+
+/* PHY Power Management Control */
+#define HV_PM_CTRL PHY_REG(770, 17)
+#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
+#define HV_PM_CTRL_K1_ENABLE 0x4000
+
+#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */
+
+/* Inband Control */
+#define I217_INBAND_CTRL PHY_REG(770, 18)
+#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK 0x3F00
+#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT 8
+
+/* Low Power Idle GPIO Control */
+#define I217_LPI_GPIO_CTRL PHY_REG(772, 18)
+#define I217_LPI_GPIO_CTRL_AUTO_EN_LPI 0x0800
+
+/* PHY Low Power Idle Control */
+#define I82579_LPI_CTRL PHY_REG(772, 20)
+#define I82579_LPI_CTRL_100_ENABLE 0x2000
+#define I82579_LPI_CTRL_1000_ENABLE 0x4000
+#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
+
+/* 82579 DFT Control */
+#define I82579_DFT_CTRL PHY_REG(769, 20)
+#define I82579_DFT_CTRL_GATE_PHY_RESET 0x0040 /* Gate PHY Reset on MAC Reset */
+
+/* Extended Management Interface (EMI) Registers */
+#define I82579_EMI_ADDR 0x10
+#define I82579_EMI_DATA 0x11
+#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */
+#define I82579_MSE_THRESHOLD 0x084F /* 82579 Mean Square Error Threshold */
+#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */
+#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
+#define I82579_RX_CONFIG 0x3412 /* Receive configuration */
+#define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */
+#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */
+#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */
+#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */
+#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */
+#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */
+#define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */
+#define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */
+#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */
+#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */
+#define I217_RX_CONFIG 0xB20C /* Receive configuration */
+
+#define E1000_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */
+#define E1000_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */
+
+/* Intel Rapid Start Technology Support */
+#define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70)
+#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080
+#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28)
+#define I217_SxCTRL_ENABLE_LPI_RESET 0x1000
+#define I217_CGFREG PHY_REG(772, 29)
+#define I217_CGFREG_ENABLE_MTA_RESET 0x0002
+#define I217_MEMPWR PHY_REG(772, 26)
+#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010
+
+/* Receive Address Initial CRC Calculation */
+#define E1000_PCH_RAICC(_n) (0x05F50 + ((_n) * 4))
+
+#if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
+#define E1000_PCI_REVISION_ID_REG 0x08
+#endif /* defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT) */
+void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
+ bool state);
+void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
+void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
+void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw);
+void e1000_resume_workarounds_pchlan(struct e1000_hw *hw);
+s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
+void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
+s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
+s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data);
+s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data);
+s32 e1000_set_eee_pchlan(struct e1000_hw *hw);
+#if defined(NAHUM6LP_HW) && defined(ULP_SUPPORT)
+s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx);
+s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force);
+#endif /* NAHUM6LP_HW && ULP_SUPPORT */
+#endif /* _E1000_ICH8LAN_H_ */
+void e1000_demote_ltr(struct e1000_hw *hw, bool demote, bool link);
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_mac.c b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_mac.c
new file mode 100755
index 00000000..c8ec049b
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_mac.c
@@ -0,0 +1,2247 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "e1000_api.h"
+
+STATIC s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw);
+STATIC void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
+STATIC void e1000_config_collision_dist_generic(struct e1000_hw *hw);
+STATIC void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
+
+/**
+ * e1000_init_mac_ops_generic - Initialize MAC function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setups up the function pointers to no-op functions
+ **/
+void e1000_init_mac_ops_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ DEBUGFUNC("e1000_init_mac_ops_generic");
+
+ /* General Setup */
+ mac->ops.init_params = e1000_null_ops_generic;
+ mac->ops.init_hw = e1000_null_ops_generic;
+ mac->ops.reset_hw = e1000_null_ops_generic;
+ mac->ops.setup_physical_interface = e1000_null_ops_generic;
+ mac->ops.get_bus_info = e1000_null_ops_generic;
+ mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pcie;
+ mac->ops.read_mac_addr = e1000_read_mac_addr_generic;
+ mac->ops.config_collision_dist = e1000_config_collision_dist_generic;
+ mac->ops.clear_hw_cntrs = e1000_null_mac_generic;
+ /* LED */
+ mac->ops.cleanup_led = e1000_null_ops_generic;
+ mac->ops.setup_led = e1000_null_ops_generic;
+ mac->ops.blink_led = e1000_null_ops_generic;
+ mac->ops.led_on = e1000_null_ops_generic;
+ mac->ops.led_off = e1000_null_ops_generic;
+ /* LINK */
+ mac->ops.setup_link = e1000_null_ops_generic;
+ mac->ops.get_link_up_info = e1000_null_link_info;
+ mac->ops.check_for_link = e1000_null_ops_generic;
+ /* Management */
+ mac->ops.check_mng_mode = e1000_null_mng_mode;
+ /* VLAN, MC, etc. */
+ mac->ops.update_mc_addr_list = e1000_null_update_mc;
+ mac->ops.clear_vfta = e1000_null_mac_generic;
+ mac->ops.write_vfta = e1000_null_write_vfta;
+ mac->ops.rar_set = e1000_rar_set_generic;
+ mac->ops.validate_mdi_setting = e1000_validate_mdi_setting_generic;
+}
+
+/**
+ * e1000_null_ops_generic - No-op function, returns 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_ops_generic(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_null_ops_generic");
+ UNREFERENCED_1PARAMETER(hw);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_mac_generic - No-op function, return void
+ * @hw: pointer to the HW structure
+ **/
+void e1000_null_mac_generic(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_null_mac_generic");
+ UNREFERENCED_1PARAMETER(hw);
+ return;
+}
+
+/**
+ * e1000_null_link_info - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_link_info(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 E1000_UNUSEDARG *s, u16 E1000_UNUSEDARG *d)
+{
+ DEBUGFUNC("e1000_null_link_info");
+ UNREFERENCED_3PARAMETER(hw, s, d);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_mng_mode - No-op function, return false
+ * @hw: pointer to the HW structure
+ **/
+bool e1000_null_mng_mode(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_null_mng_mode");
+ UNREFERENCED_1PARAMETER(hw);
+ return false;
+}
+
+/**
+ * e1000_null_update_mc - No-op function, return void
+ * @hw: pointer to the HW structure
+ **/
+void e1000_null_update_mc(struct e1000_hw E1000_UNUSEDARG *hw,
+ u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a)
+{
+ DEBUGFUNC("e1000_null_update_mc");
+ UNREFERENCED_3PARAMETER(hw, h, a);
+ return;
+}
+
+/**
+ * e1000_null_write_vfta - No-op function, return void
+ * @hw: pointer to the HW structure
+ **/
+void e1000_null_write_vfta(struct e1000_hw E1000_UNUSEDARG *hw,
+ u32 E1000_UNUSEDARG a, u32 E1000_UNUSEDARG b)
+{
+ DEBUGFUNC("e1000_null_write_vfta");
+ UNREFERENCED_3PARAMETER(hw, a, b);
+ return;
+}
+
+/**
+ * e1000_null_rar_set - No-op function, return void
+ * @hw: pointer to the HW structure
+ **/
+void e1000_null_rar_set(struct e1000_hw E1000_UNUSEDARG *hw,
+ u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a)
+{
+ DEBUGFUNC("e1000_null_rar_set");
+ UNREFERENCED_3PARAMETER(hw, h, a);
+ return;
+}
+
+/**
+ * e1000_get_bus_info_pci_generic - Get PCI(x) bus information
+ * @hw: pointer to the HW structure
+ *
+ * Determines and stores the system bus information for a particular
+ * network interface. The following bus information is determined and stored:
+ * bus speed, bus width, type (PCI/PCIx), and PCI(-x) function.
+ **/
+s32 e1000_get_bus_info_pci_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_bus_info *bus = &hw->bus;
+ u32 status = E1000_READ_REG(hw, E1000_STATUS);
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_get_bus_info_pci_generic");
+
+ /* PCI or PCI-X? */
+ bus->type = (status & E1000_STATUS_PCIX_MODE)
+ ? e1000_bus_type_pcix
+ : e1000_bus_type_pci;
+
+ /* Bus speed */
+ if (bus->type == e1000_bus_type_pci) {
+ bus->speed = (status & E1000_STATUS_PCI66)
+ ? e1000_bus_speed_66
+ : e1000_bus_speed_33;
+ } else {
+ switch (status & E1000_STATUS_PCIX_SPEED) {
+ case E1000_STATUS_PCIX_SPEED_66:
+ bus->speed = e1000_bus_speed_66;
+ break;
+ case E1000_STATUS_PCIX_SPEED_100:
+ bus->speed = e1000_bus_speed_100;
+ break;
+ case E1000_STATUS_PCIX_SPEED_133:
+ bus->speed = e1000_bus_speed_133;
+ break;
+ default:
+ bus->speed = e1000_bus_speed_reserved;
+ break;
+ }
+ }
+
+ /* Bus width */
+ bus->width = (status & E1000_STATUS_BUS64)
+ ? e1000_bus_width_64
+ : e1000_bus_width_32;
+
+ /* Which PCI(-X) function? */
+ mac->ops.set_lan_id(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_bus_info_pcie_generic - Get PCIe bus information
+ * @hw: pointer to the HW structure
+ *
+ * Determines and stores the system bus information for a particular
+ * network interface. The following bus information is determined and stored:
+ * bus speed, bus width, type (PCIe), and PCIe function.
+ **/
+s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_bus_info *bus = &hw->bus;
+ s32 ret_val;
+ u16 pcie_link_status;
+
+ DEBUGFUNC("e1000_get_bus_info_pcie_generic");
+
+ bus->type = e1000_bus_type_pci_express;
+
+ ret_val = e1000_read_pcie_cap_reg(hw, PCIE_LINK_STATUS,
+ &pcie_link_status);
+ if (ret_val) {
+ bus->width = e1000_bus_width_unknown;
+ bus->speed = e1000_bus_speed_unknown;
+ } else {
+ switch (pcie_link_status & PCIE_LINK_SPEED_MASK) {
+ case PCIE_LINK_SPEED_2500:
+ bus->speed = e1000_bus_speed_2500;
+ break;
+ case PCIE_LINK_SPEED_5000:
+ bus->speed = e1000_bus_speed_5000;
+ break;
+ default:
+ bus->speed = e1000_bus_speed_unknown;
+ break;
+ }
+
+ bus->width = (enum e1000_bus_width)((pcie_link_status &
+ PCIE_LINK_WIDTH_MASK) >> PCIE_LINK_WIDTH_SHIFT);
+ }
+
+ mac->ops.set_lan_id(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
+ *
+ * @hw: pointer to the HW structure
+ *
+ * Determines the LAN function id by reading memory-mapped registers
+ * and swaps the port value if requested.
+ **/
+STATIC void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
+{
+ struct e1000_bus_info *bus = &hw->bus;
+ u32 reg;
+
+ /* The status register reports the correct function number
+ * for the device regardless of function swap state.
+ */
+ reg = E1000_READ_REG(hw, E1000_STATUS);
+ bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
+}
+
+/**
+ * e1000_set_lan_id_multi_port_pci - Set LAN id for PCI multiple port devices
+ * @hw: pointer to the HW structure
+ *
+ * Determines the LAN function id by reading PCI config space.
+ **/
+void e1000_set_lan_id_multi_port_pci(struct e1000_hw *hw)
+{
+ struct e1000_bus_info *bus = &hw->bus;
+ u16 pci_header_type;
+ u32 status;
+
+ e1000_read_pci_cfg(hw, PCI_HEADER_TYPE_REGISTER, &pci_header_type);
+ if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ bus->func = (status & E1000_STATUS_FUNC_MASK)
+ >> E1000_STATUS_FUNC_SHIFT;
+ } else {
+ bus->func = 0;
+ }
+}
+
+/**
+ * e1000_set_lan_id_single_port - Set LAN id for a single port device
+ * @hw: pointer to the HW structure
+ *
+ * Sets the LAN function id to zero for a single port device.
+ **/
+void e1000_set_lan_id_single_port(struct e1000_hw *hw)
+{
+ struct e1000_bus_info *bus = &hw->bus;
+
+ bus->func = 0;
+}
+
+/**
+ * e1000_clear_vfta_generic - Clear VLAN filter table
+ * @hw: pointer to the HW structure
+ *
+ * Clears the register array which contains the VLAN filter table by
+ * setting all the values to 0.
+ **/
+void e1000_clear_vfta_generic(struct e1000_hw *hw)
+{
+ u32 offset;
+
+ DEBUGFUNC("e1000_clear_vfta_generic");
+
+ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
+ E1000_WRITE_FLUSH(hw);
+ }
+}
+
+/**
+ * e1000_write_vfta_generic - Write value to VLAN filter table
+ * @hw: pointer to the HW structure
+ * @offset: register offset in VLAN filter table
+ * @value: register value written to VLAN filter table
+ *
+ * Writes value at the given offset in the register array which stores
+ * the VLAN filter table.
+ **/
+void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
+{
+ DEBUGFUNC("e1000_write_vfta_generic");
+
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ * e1000_init_rx_addrs_generic - Initialize receive address's
+ * @hw: pointer to the HW structure
+ * @rar_count: receive address registers
+ *
+ * Setup the receive address registers by setting the base receive address
+ * register to the devices MAC address and clearing all the other receive
+ * address registers to 0.
+ **/
+void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count)
+{
+ u32 i;
+ u8 mac_addr[ETH_ADDR_LEN] = {0};
+
+ DEBUGFUNC("e1000_init_rx_addrs_generic");
+
+ /* Setup the receive address */
+ DEBUGOUT("Programming MAC Address into RAR[0]\n");
+
+ hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
+
+ /* Zero out the other (rar_entry_count - 1) receive addresses */
+ DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count-1);
+ for (i = 1; i < rar_count; i++)
+ hw->mac.ops.rar_set(hw, mac_addr, i);
+}
+
+/**
+ * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr
+ * @hw: pointer to the HW structure
+ *
+ * Checks the nvm for an alternate MAC address. An alternate MAC address
+ * can be setup by pre-boot software and must be treated like a permanent
+ * address and must override the actual permanent MAC address. If an
+ * alternate MAC address is found it is programmed into RAR0, replacing
+ * the permanent address that was installed into RAR0 by the Si on reset.
+ * This function will return SUCCESS unless it encounters an error while
+ * reading the EEPROM.
+ **/
+s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
+{
+ u32 i;
+ s32 ret_val;
+ u16 offset, nvm_alt_mac_addr_offset, nvm_data;
+ u8 alt_mac_addr[ETH_ADDR_LEN];
+
+ DEBUGFUNC("e1000_check_alt_mac_addr_generic");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &nvm_data);
+ if (ret_val)
+ return ret_val;
+
+ /* not supported on older hardware or 82573 */
+ if ((hw->mac.type < e1000_82571) || (hw->mac.type == e1000_82573))
+ return E1000_SUCCESS;
+
+ /* Alternate MAC address is handled by the option ROM for 82580
+ * and newer. SW support not required.
+ */
+ if (hw->mac.type >= e1000_82580)
+ return E1000_SUCCESS;
+
+ ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
+ &nvm_alt_mac_addr_offset);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
+ (nvm_alt_mac_addr_offset == 0x0000))
+ /* There is no Alternate MAC Address */
+ return E1000_SUCCESS;
+
+ if (hw->bus.func == E1000_FUNC_1)
+ nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
+ if (hw->bus.func == E1000_FUNC_2)
+ nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2;
+
+ if (hw->bus.func == E1000_FUNC_3)
+ nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3;
+ for (i = 0; i < ETH_ADDR_LEN; i += 2) {
+ offset = nvm_alt_mac_addr_offset + (i >> 1);
+ ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
+ alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
+ }
+
+ /* if multicast bit is set, the alternate address will not be used */
+ if (alt_mac_addr[0] & 0x01) {
+ DEBUGOUT("Ignoring Alternate Mac Address with MC bit set\n");
+ return E1000_SUCCESS;
+ }
+
+ /* We have a valid alternate MAC address, and we want to treat it the
+ * same as the normal permanent MAC address stored by the HW into the
+ * RAR. Do this by mapping this address into RAR0.
+ */
+ hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_rar_set_generic - Set receive address register
+ * @hw: pointer to the HW structure
+ * @addr: pointer to the receive address
+ * @index: receive address array register
+ *
+ * Sets the receive address array register at index to the address passed
+ * in by addr.
+ **/
+STATIC void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+ u32 rar_low, rar_high;
+
+ DEBUGFUNC("e1000_rar_set_generic");
+
+ /* HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
+ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+
+ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+ /* If MAC address zero, no need to set the AV bit */
+ if (rar_low || rar_high)
+ rar_high |= E1000_RAH_AV;
+
+ /* Some bridges will combine consecutive 32-bit writes into
+ * a single burst write, which will malfunction on some parts.
+ * The flushes avoid this.
+ */
+ E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
+ E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ * e1000_hash_mc_addr_generic - Generate a multicast hash value
+ * @hw: pointer to the HW structure
+ * @mc_addr: pointer to a multicast address
+ *
+ * Generates a multicast address hash value which is used to determine
+ * the multicast filter table array address and new table value.
+ **/
+u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr)
+{
+ u32 hash_value, hash_mask;
+ u8 bit_shift = 0;
+
+ DEBUGFUNC("e1000_hash_mc_addr_generic");
+
+ /* Register count multiplied by bits per register */
+ hash_mask = (hw->mac.mta_reg_count * 32) - 1;
+
+ /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
+ * where 0xFF would still fall within the hash mask.
+ */
+ while (hash_mask >> bit_shift != 0xFF)
+ bit_shift++;
+
+ /* The portion of the address that is used for the hash table
+ * is determined by the mc_filter_type setting.
+ * The algorithm is such that there is a total of 8 bits of shifting.
+ * The bit_shift for a mc_filter_type of 0 represents the number of
+ * left-shifts where the MSB of mc_addr[5] would still fall within
+ * the hash_mask. Case 0 does this exactly. Since there are a total
+ * of 8 bits of shifting, then mc_addr[4] will shift right the
+ * remaining number of bits. Thus 8 - bit_shift. The rest of the
+ * cases are a variation of this algorithm...essentially raising the
+ * number of bits to shift mc_addr[5] left, while still keeping the
+ * 8-bit shifting total.
+ *
+ * For example, given the following Destination MAC Address and an
+ * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
+ * we can see that the bit_shift for case 0 is 4. These are the hash
+ * values resulting from each mc_filter_type...
+ * [0] [1] [2] [3] [4] [5]
+ * 01 AA 00 12 34 56
+ * LSB MSB
+ *
+ * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
+ * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
+ * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
+ * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
+ */
+ switch (hw->mac.mc_filter_type) {
+ default:
+ case 0:
+ break;
+ case 1:
+ bit_shift += 1;
+ break;
+ case 2:
+ bit_shift += 2;
+ break;
+ case 3:
+ bit_shift += 4;
+ break;
+ }
+
+ hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
+ (((u16) mc_addr[5]) << bit_shift)));
+
+ return hash_value;
+}
+
+/**
+ * e1000_update_mc_addr_list_generic - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ *
+ * Updates entire Multicast Table Array.
+ * The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count)
+{
+ u32 hash_value, hash_bit, hash_reg;
+ int i;
+
+ DEBUGFUNC("e1000_update_mc_addr_list_generic");
+
+ /* clear mta_shadow */
+ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+
+ /* update mta_shadow from mc_addr_list */
+ for (i = 0; (u32) i < mc_addr_count; i++) {
+ hash_value = e1000_hash_mc_addr_generic(hw, mc_addr_list);
+
+ hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
+ hash_bit = hash_value & 0x1F;
+
+ hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+ mc_addr_list += (ETH_ADDR_LEN);
+ }
+
+ /* replace the entire MTA table */
+ for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ * e1000_pcix_mmrbc_workaround_generic - Fix incorrect MMRBC value
+ * @hw: pointer to the HW structure
+ *
+ * In certain situations, a system BIOS may report that the PCIx maximum
+ * memory read byte count (MMRBC) value is higher than than the actual
+ * value. We check the PCIx command register with the current PCIx status
+ * register.
+ **/
+void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw)
+{
+ u16 cmd_mmrbc;
+ u16 pcix_cmd;
+ u16 pcix_stat_hi_word;
+ u16 stat_mmrbc;
+
+ DEBUGFUNC("e1000_pcix_mmrbc_workaround_generic");
+
+ /* Workaround for PCI-X issue when BIOS sets MMRBC incorrectly */
+ if (hw->bus.type != e1000_bus_type_pcix)
+ return;
+
+ e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd);
+ e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI, &pcix_stat_hi_word);
+ cmd_mmrbc = (pcix_cmd & PCIX_COMMAND_MMRBC_MASK) >>
+ PCIX_COMMAND_MMRBC_SHIFT;
+ stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >>
+ PCIX_STATUS_HI_MMRBC_SHIFT;
+ if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K)
+ stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K;
+ if (cmd_mmrbc > stat_mmrbc) {
+ pcix_cmd &= ~PCIX_COMMAND_MMRBC_MASK;
+ pcix_cmd |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT;
+ e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd);
+ }
+}
+
+/**
+ * e1000_clear_hw_cntrs_base_generic - Clear base hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the base hardware counters by reading the counter registers.
+ **/
+void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_clear_hw_cntrs_base_generic");
+
+ E1000_READ_REG(hw, E1000_CRCERRS);
+ E1000_READ_REG(hw, E1000_SYMERRS);
+ E1000_READ_REG(hw, E1000_MPC);
+ E1000_READ_REG(hw, E1000_SCC);
+ E1000_READ_REG(hw, E1000_ECOL);
+ E1000_READ_REG(hw, E1000_MCC);
+ E1000_READ_REG(hw, E1000_LATECOL);
+ E1000_READ_REG(hw, E1000_COLC);
+ E1000_READ_REG(hw, E1000_DC);
+ E1000_READ_REG(hw, E1000_SEC);
+ E1000_READ_REG(hw, E1000_RLEC);
+ E1000_READ_REG(hw, E1000_XONRXC);
+ E1000_READ_REG(hw, E1000_XONTXC);
+ E1000_READ_REG(hw, E1000_XOFFRXC);
+ E1000_READ_REG(hw, E1000_XOFFTXC);
+ E1000_READ_REG(hw, E1000_FCRUC);
+ E1000_READ_REG(hw, E1000_GPRC);
+ E1000_READ_REG(hw, E1000_BPRC);
+ E1000_READ_REG(hw, E1000_MPRC);
+ E1000_READ_REG(hw, E1000_GPTC);
+ E1000_READ_REG(hw, E1000_GORCL);
+ E1000_READ_REG(hw, E1000_GORCH);
+ E1000_READ_REG(hw, E1000_GOTCL);
+ E1000_READ_REG(hw, E1000_GOTCH);
+ E1000_READ_REG(hw, E1000_RNBC);
+ E1000_READ_REG(hw, E1000_RUC);
+ E1000_READ_REG(hw, E1000_RFC);
+ E1000_READ_REG(hw, E1000_ROC);
+ E1000_READ_REG(hw, E1000_RJC);
+ E1000_READ_REG(hw, E1000_TORL);
+ E1000_READ_REG(hw, E1000_TORH);
+ E1000_READ_REG(hw, E1000_TOTL);
+ E1000_READ_REG(hw, E1000_TOTH);
+ E1000_READ_REG(hw, E1000_TPR);
+ E1000_READ_REG(hw, E1000_TPT);
+ E1000_READ_REG(hw, E1000_MPTC);
+ E1000_READ_REG(hw, E1000_BPTC);
+}
+
+/**
+ * e1000_check_for_copper_link_generic - Check for link (Copper)
+ * @hw: pointer to the HW structure
+ *
+ * Checks to see of the link status of the hardware has changed. If a
+ * change in link status has been detected, then we read the PHY registers
+ * to get the current speed/duplex if link exists.
+ **/
+s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ bool link;
+
+ DEBUGFUNC("e1000_check_for_copper_link");
+
+ /* We only want to go out to the PHY registers to see if Auto-Neg
+ * has completed and/or if our link status has changed. The
+ * get_link_status flag is set upon receiving a Link Status
+ * Change or Rx Sequence Error interrupt.
+ */
+ if (!mac->get_link_status)
+ return E1000_SUCCESS;
+
+ /* First we want to see if the MII Status Register reports
+ * link. If so, then we want to get the current speed/duplex
+ * of the PHY.
+ */
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link)
+ return E1000_SUCCESS; /* No link detected */
+
+ mac->get_link_status = false;
+
+ /* Check if there was DownShift, must be checked
+ * immediately after link-up
+ */
+ e1000_check_downshift_generic(hw);
+
+ /* If we are forcing speed/duplex, then we simply return since
+ * we have already determined whether we have link or not.
+ */
+ if (!mac->autoneg)
+ return -E1000_ERR_CONFIG;
+
+ /* Auto-Neg is enabled. Auto Speed Detection takes care
+ * of MAC speed/duplex configuration. So we only need to
+ * configure Collision Distance in the MAC.
+ */
+ mac->ops.config_collision_dist(hw);
+
+ /* Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ if (ret_val)
+ DEBUGOUT("Error configuring flow control\n");
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_fiber_link_generic - Check for link (Fiber)
+ * @hw: pointer to the HW structure
+ *
+ * Checks for link up on the hardware. If link is not up and we have
+ * a signal, then we need to force link up.
+ **/
+s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 rxcw;
+ u32 ctrl;
+ u32 status;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_check_for_fiber_link_generic");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ rxcw = E1000_READ_REG(hw, E1000_RXCW);
+
+ /* If we don't have link (auto-negotiation failed or link partner
+ * cannot auto-negotiate), the cable is plugged in (we have signal),
+ * and our link partner is not trying to auto-negotiate with us (we
+ * are receiving idles or data), we need to force link up. We also
+ * need to give auto-negotiation time to complete, in case the cable
+ * was just plugged in. The autoneg_failed flag does this.
+ */
+ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
+ if ((ctrl & E1000_CTRL_SWDPIN1) && !(status & E1000_STATUS_LU) &&
+ !(rxcw & E1000_RXCW_C)) {
+ if (!mac->autoneg_failed) {
+ mac->autoneg_failed = true;
+ return E1000_SUCCESS;
+ }
+ DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
+
+ /* Disable auto-negotiation in the TXCW register */
+ E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+
+ /* Force link-up and also force full-duplex. */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ /* Configure Flow Control after forcing link up. */
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ if (ret_val) {
+ DEBUGOUT("Error configuring flow control\n");
+ return ret_val;
+ }
+ } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+ /* If we are forcing link and we are receiving /C/ ordered
+ * sets, re-enable auto-negotiation in the TXCW register
+ * and disable forced link in the Device Control register
+ * in an attempt to auto-negotiate with our link partner.
+ */
+ DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
+ E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
+ E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+ mac->serdes_has_link = true;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_check_for_serdes_link_generic - Check for link (Serdes)
+ * @hw: pointer to the HW structure
+ *
+ * Checks for link up on the hardware. If link is not up and we have
+ * a signal, then we need to force link up.
+ **/
+s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 rxcw;
+ u32 ctrl;
+ u32 status;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_check_for_serdes_link_generic");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ rxcw = E1000_READ_REG(hw, E1000_RXCW);
+
+ /* If we don't have link (auto-negotiation failed or link partner
+ * cannot auto-negotiate), and our link partner is not trying to
+ * auto-negotiate with us (we are receiving idles or data),
+ * we need to force link up. We also need to give auto-negotiation
+ * time to complete.
+ */
+ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
+ if (!(status & E1000_STATUS_LU) && !(rxcw & E1000_RXCW_C)) {
+ if (!mac->autoneg_failed) {
+ mac->autoneg_failed = true;
+ return E1000_SUCCESS;
+ }
+ DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
+
+ /* Disable auto-negotiation in the TXCW register */
+ E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+
+ /* Force link-up and also force full-duplex. */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ /* Configure Flow Control after forcing link up. */
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ if (ret_val) {
+ DEBUGOUT("Error configuring flow control\n");
+ return ret_val;
+ }
+ } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+ /* If we are forcing link and we are receiving /C/ ordered
+ * sets, re-enable auto-negotiation in the TXCW register
+ * and disable forced link in the Device Control register
+ * in an attempt to auto-negotiate with our link partner.
+ */
+ DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
+ E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
+ E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+ mac->serdes_has_link = true;
+ } else if (!(E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW))) {
+ /* If we force link for non-auto-negotiation switch, check
+ * link status based on MAC synchronization for internal
+ * serdes media type.
+ */
+ /* SYNCH bit and IV bit are sticky. */
+ usec_delay(10);
+ rxcw = E1000_READ_REG(hw, E1000_RXCW);
+ if (rxcw & E1000_RXCW_SYNCH) {
+ if (!(rxcw & E1000_RXCW_IV)) {
+ mac->serdes_has_link = true;
+ DEBUGOUT("SERDES: Link up - forced.\n");
+ }
+ } else {
+ mac->serdes_has_link = false;
+ DEBUGOUT("SERDES: Link down - force failed.\n");
+ }
+ }
+
+ if (E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW)) {
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if (status & E1000_STATUS_LU) {
+ /* SYNCH bit and IV bit are sticky, so reread rxcw. */
+ usec_delay(10);
+ rxcw = E1000_READ_REG(hw, E1000_RXCW);
+ if (rxcw & E1000_RXCW_SYNCH) {
+ if (!(rxcw & E1000_RXCW_IV)) {
+ mac->serdes_has_link = true;
+ DEBUGOUT("SERDES: Link up - autoneg completed successfully.\n");
+ } else {
+ mac->serdes_has_link = false;
+ DEBUGOUT("SERDES: Link down - invalid codewords detected in autoneg.\n");
+ }
+ } else {
+ mac->serdes_has_link = false;
+ DEBUGOUT("SERDES: Link down - no sync.\n");
+ }
+ } else {
+ mac->serdes_has_link = false;
+ DEBUGOUT("SERDES: Link down - autoneg failed\n");
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_default_fc_generic - Set flow control default values
+ * @hw: pointer to the HW structure
+ *
+ * Read the EEPROM for the default values for flow control and store the
+ * values.
+ **/
+s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 nvm_data;
+ u16 nvm_offset = 0;
+
+ DEBUGFUNC("e1000_set_default_fc_generic");
+
+ /* Read and store word 0x0F of the EEPROM. This word contains bits
+ * that determine the hardware's default PAUSE (flow control) mode,
+ * a bit that determines whether the HW defaults to enabling or
+ * disabling auto-negotiation, and the direction of the
+ * SW defined pins. If there is no SW over-ride of the flow
+ * control setting, then the variable hw->fc will
+ * be initialized based on a value in the EEPROM.
+ */
+ if (hw->mac.type == e1000_i350) {
+ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
+ ret_val = hw->nvm.ops.read(hw,
+ NVM_INIT_CONTROL2_REG +
+ nvm_offset,
+ 1, &nvm_data);
+ } else {
+ ret_val = hw->nvm.ops.read(hw,
+ NVM_INIT_CONTROL2_REG,
+ 1, &nvm_data);
+ }
+
+
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (!(nvm_data & NVM_WORD0F_PAUSE_MASK))
+ hw->fc.requested_mode = e1000_fc_none;
+ else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
+ NVM_WORD0F_ASM_DIR)
+ hw->fc.requested_mode = e1000_fc_tx_pause;
+ else
+ hw->fc.requested_mode = e1000_fc_full;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_setup_link_generic - Setup flow control and link settings
+ * @hw: pointer to the HW structure
+ *
+ * Determines which flow control settings to use, then configures flow
+ * control. Calls the appropriate media-specific link configuration
+ * function. Assuming the adapter has a valid link partner, a valid link
+ * should be established. Assumes the hardware has previously been reset
+ * and the transmitter and receiver are not enabled.
+ **/
+s32 e1000_setup_link_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_setup_link_generic");
+
+ /* In the case of the phy reset being blocked, we already have a link.
+ * We do not need to set it up again.
+ */
+ if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
+ return E1000_SUCCESS;
+
+ /* If requested flow control is set to default, set flow control
+ * based on the EEPROM flow control settings.
+ */
+ if (hw->fc.requested_mode == e1000_fc_default) {
+ ret_val = e1000_set_default_fc_generic(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Save off the requested flow control mode for use later. Depending
+ * on the link partner's capabilities, we may or may not use this mode.
+ */
+ hw->fc.current_mode = hw->fc.requested_mode;
+
+ DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
+ hw->fc.current_mode);
+
+ /* Call the necessary media_type subroutine to configure the link. */
+ ret_val = hw->mac.ops.setup_physical_interface(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Initialize the flow control address, type, and PAUSE timer
+ * registers to their default values. This is done even if flow
+ * control is disabled, because it does not hurt anything to
+ * initialize these registers.
+ */
+ DEBUGOUT("Initializing the Flow Control address, type and timer regs\n");
+ E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE);
+ E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+ E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
+
+ E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
+
+ return e1000_set_fc_watermarks_generic(hw);
+}
+
+/**
+ * e1000_commit_fc_settings_generic - Configure flow control
+ * @hw: pointer to the HW structure
+ *
+ * Write the flow control settings to the Transmit Config Word Register (TXCW)
+ * base on the flow control settings in e1000_mac_info.
+ **/
+s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 txcw;
+
+ DEBUGFUNC("e1000_commit_fc_settings_generic");
+
+ /* Check for a software override of the flow control settings, and
+ * setup the device accordingly. If auto-negotiation is enabled, then
+ * software will have to set the "PAUSE" bits to the correct value in
+ * the Transmit Config Word Register (TXCW) and re-start auto-
+ * negotiation. However, if auto-negotiation is disabled, then
+ * software will have to manually configure the two flow control enable
+ * bits in the CTRL register.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but we
+ * do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ */
+ switch (hw->fc.current_mode) {
+ case e1000_fc_none:
+ /* Flow control completely disabled by a software over-ride. */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
+ break;
+ case e1000_fc_rx_pause:
+ /* Rx Flow control is enabled and Tx Flow control is disabled
+ * by a software over-ride. Since there really isn't a way to
+ * advertise that we are capable of Rx Pause ONLY, we will
+ * advertise that we support both symmetric and asymmetric Rx
+ * PAUSE. Later, we will disable the adapter's ability to send
+ * PAUSE frames.
+ */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+ break;
+ case e1000_fc_tx_pause:
+ /* Tx Flow control is enabled, and Rx Flow control is disabled,
+ * by a software over-ride.
+ */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
+ break;
+ case e1000_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by a software
+ * over-ride.
+ */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ break;
+ }
+
+ E1000_WRITE_REG(hw, E1000_TXCW, txcw);
+ mac->txcw = txcw;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_poll_fiber_serdes_link_generic - Poll for link up
+ * @hw: pointer to the HW structure
+ *
+ * Polls for link up by reading the status register, if link fails to come
+ * up with auto-negotiation, then the link is forced if a signal is detected.
+ **/
+s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 i, status;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_poll_fiber_serdes_link_generic");
+
+ /* If we have a signal (the cable is plugged in, or assumed true for
+ * serdes media) then poll for a "Link-Up" indication in the Device
+ * Status Register. Time-out if a link isn't seen in 500 milliseconds
+ * seconds (Auto-negotiation should complete in less than 500
+ * milliseconds even if the other end is doing it in SW).
+ */
+ for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
+ msec_delay(10);
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if (status & E1000_STATUS_LU)
+ break;
+ }
+ if (i == FIBER_LINK_UP_LIMIT) {
+ DEBUGOUT("Never got a valid link from auto-neg!!!\n");
+ mac->autoneg_failed = true;
+ /* AutoNeg failed to achieve a link, so we'll call
+ * mac->check_for_link. This routine will force the
+ * link up if we detect a signal. This will allow us to
+ * communicate with non-autonegotiating link partners.
+ */
+ ret_val = mac->ops.check_for_link(hw);
+ if (ret_val) {
+ DEBUGOUT("Error while checking for link\n");
+ return ret_val;
+ }
+ mac->autoneg_failed = false;
+ } else {
+ mac->autoneg_failed = false;
+ DEBUGOUT("Valid Link Found\n");
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_setup_fiber_serdes_link_generic - Setup link for fiber/serdes
+ * @hw: pointer to the HW structure
+ *
+ * Configures collision distance and flow control for fiber and serdes
+ * links. Upon successful setup, poll for link.
+ **/
+s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_setup_fiber_serdes_link_generic");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ /* Take the link out of reset */
+ ctrl &= ~E1000_CTRL_LRST;
+
+ hw->mac.ops.config_collision_dist(hw);
+
+ ret_val = e1000_commit_fc_settings_generic(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Since auto-negotiation is enabled, take the link out of reset (the
+ * link will be in reset, because we previously reset the chip). This
+ * will restart auto-negotiation. If auto-negotiation is successful
+ * then the link-up status bit will be set and the flow control enable
+ * bits (RFCE and TFCE) will be set according to their negotiated value.
+ */
+ DEBUGOUT("Auto-negotiation enabled\n");
+
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(1);
+
+ /* For these adapters, the SW definable pin 1 is set when the optics
+ * detect a signal. If we have a signal, then poll for a "Link-Up"
+ * indication.
+ */
+ if (hw->phy.media_type == e1000_media_type_internal_serdes ||
+ (E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) {
+ ret_val = e1000_poll_fiber_serdes_link_generic(hw);
+ } else {
+ DEBUGOUT("No signal detected\n");
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_config_collision_dist_generic - Configure collision distance
+ * @hw: pointer to the HW structure
+ *
+ * Configures the collision distance to the default value and is used
+ * during link setup.
+ **/
+STATIC void e1000_config_collision_dist_generic(struct e1000_hw *hw)
+{
+ u32 tctl;
+
+ DEBUGFUNC("e1000_config_collision_dist_generic");
+
+ tctl = E1000_READ_REG(hw, E1000_TCTL);
+
+ tctl &= ~E1000_TCTL_COLD;
+ tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
+
+ E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ * e1000_set_fc_watermarks_generic - Set flow control high/low watermarks
+ * @hw: pointer to the HW structure
+ *
+ * Sets the flow control high/low threshold (watermark) registers. If
+ * flow control XON frame transmission is enabled, then set XON frame
+ * transmission as well.
+ **/
+s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw)
+{
+ u32 fcrtl = 0, fcrth = 0;
+
+ DEBUGFUNC("e1000_set_fc_watermarks_generic");
+
+ /* Set the flow control receive threshold registers. Normally,
+ * these registers will be set to a default threshold that may be
+ * adjusted later by the driver's runtime code. However, if the
+ * ability to transmit pause frames is not enabled, then these
+ * registers will be set to 0.
+ */
+ if (hw->fc.current_mode & e1000_fc_tx_pause) {
+ /* We need to set up the Receive Threshold high and low water
+ * marks as well as (optionally) enabling the transmission of
+ * XON frames.
+ */
+ fcrtl = hw->fc.low_water;
+ if (hw->fc.send_xon)
+ fcrtl |= E1000_FCRTL_XONE;
+
+ fcrth = hw->fc.high_water;
+ }
+ E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl);
+ E1000_WRITE_REG(hw, E1000_FCRTH, fcrth);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_force_mac_fc_generic - Force the MAC's flow control settings
+ * @hw: pointer to the HW structure
+ *
+ * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
+ * device control register to reflect the adapter settings. TFCE and RFCE
+ * need to be explicitly set by software when a copper PHY is used because
+ * autonegotiation is managed by the PHY rather than the MAC. Software must
+ * also configure these bits when link is forced on a fiber connection.
+ **/
+s32 e1000_force_mac_fc_generic(struct e1000_hw *hw)
+{
+ u32 ctrl;
+
+ DEBUGFUNC("e1000_force_mac_fc_generic");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ /* Because we didn't get link via the internal auto-negotiation
+ * mechanism (we either forced link or we got link via PHY
+ * auto-neg), we have to manually enable/disable transmit an
+ * receive flow control.
+ *
+ * The "Case" statement below enables/disable flow control
+ * according to the "hw->fc.current_mode" parameter.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause
+ * frames but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * frames but we do not receive pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) is enabled.
+ * other: No other values should be possible at this point.
+ */
+ DEBUGOUT1("hw->fc.current_mode = %u\n", hw->fc.current_mode);
+
+ switch (hw->fc.current_mode) {
+ case e1000_fc_none:
+ ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
+ break;
+ case e1000_fc_rx_pause:
+ ctrl &= (~E1000_CTRL_TFCE);
+ ctrl |= E1000_CTRL_RFCE;
+ break;
+ case e1000_fc_tx_pause:
+ ctrl &= (~E1000_CTRL_RFCE);
+ ctrl |= E1000_CTRL_TFCE;
+ break;
+ case e1000_fc_full:
+ ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_config_fc_after_link_up_generic - Configures flow control after link
+ * @hw: pointer to the HW structure
+ *
+ * Checks the status of auto-negotiation after link up to ensure that the
+ * speed and duplex were not forced. If the link needed to be forced, then
+ * flow control needs to be forced also. If auto-negotiation is enabled
+ * and did not fail, then we configure flow control based on our link
+ * partner.
+ **/
+s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val = E1000_SUCCESS;
+ u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
+ u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
+ u16 speed, duplex;
+
+ DEBUGFUNC("e1000_config_fc_after_link_up_generic");
+
+ /* Check for the case where we have fiber media and auto-neg failed
+ * so we had to force link. In this case, we need to force the
+ * configuration of the MAC to match the "fc" parameter.
+ */
+ if (mac->autoneg_failed) {
+ if (hw->phy.media_type == e1000_media_type_fiber ||
+ hw->phy.media_type == e1000_media_type_internal_serdes)
+ ret_val = e1000_force_mac_fc_generic(hw);
+ } else {
+ if (hw->phy.media_type == e1000_media_type_copper)
+ ret_val = e1000_force_mac_fc_generic(hw);
+ }
+
+ if (ret_val) {
+ DEBUGOUT("Error forcing flow control settings\n");
+ return ret_val;
+ }
+
+ /* Check for the case where we have copper media and auto-neg is
+ * enabled. In this case, we need to check and see if Auto-Neg
+ * has completed, and if so, how the PHY and link partner has
+ * flow control configured.
+ */
+ if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
+ /* Read the MII Status Register and check to see if AutoNeg
+ * has completed. We read this twice because this reg has
+ * some "sticky" (latched) bits.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
+ DEBUGOUT("Copper PHY and Auto Neg has not completed.\n");
+ return ret_val;
+ }
+
+ /* The AutoNeg process has completed, so we now need to
+ * read both the Auto Negotiation Advertisement
+ * Register (Address 4) and the Auto_Negotiation Base
+ * Page Ability Register (Address 5) to determine how
+ * flow control was negotiated.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
+ &mii_nway_adv_reg);
+ if (ret_val)
+ return ret_val;
+ ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
+ &mii_nway_lp_ability_reg);
+ if (ret_val)
+ return ret_val;
+
+ /* Two bits in the Auto Negotiation Advertisement Register
+ * (Address 4) and two bits in the Auto Negotiation Base
+ * Page Ability Register (Address 5) determine flow control
+ * for both the PHY and the link partner. The following
+ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+ * 1999, describes these PAUSE resolution bits and how flow
+ * control is determined based upon these settings.
+ * NOTE: DC = Don't Care
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+ *-------|---------|-------|---------|--------------------
+ * 0 | 0 | DC | DC | e1000_fc_none
+ * 0 | 1 | 0 | DC | e1000_fc_none
+ * 0 | 1 | 1 | 0 | e1000_fc_none
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ * 1 | 0 | 0 | DC | e1000_fc_none
+ * 1 | DC | 1 | DC | e1000_fc_full
+ * 1 | 1 | 0 | 0 | e1000_fc_none
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ *
+ * Are both PAUSE bits set to 1? If so, this implies
+ * Symmetric Flow Control is enabled at both ends. The
+ * ASM_DIR bits are irrelevant per the spec.
+ *
+ * For Symmetric Flow Control:
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | DC | 1 | DC | E1000_fc_full
+ *
+ */
+ if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+ /* Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise Rx
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == e1000_fc_full) {
+ hw->fc.current_mode = e1000_fc_full;
+ DEBUGOUT("Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
+ }
+ }
+ /* For receiving PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ */
+ else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_tx_pause;
+ DEBUGOUT("Flow Control = Tx PAUSE frames only.\n");
+ }
+ /* For transmitting PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ */
+ else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
+ } else {
+ /* Per the IEEE spec, at this point flow control
+ * should be disabled.
+ */
+ hw->fc.current_mode = e1000_fc_none;
+ DEBUGOUT("Flow Control = NONE.\n");
+ }
+
+ /* Now we need to do one last check... If we auto-
+ * negotiated to HALF DUPLEX, flow control should not be
+ * enabled per IEEE 802.3 spec.
+ */
+ ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
+ if (ret_val) {
+ DEBUGOUT("Error getting link speed and duplex\n");
+ return ret_val;
+ }
+
+ if (duplex == HALF_DUPLEX)
+ hw->fc.current_mode = e1000_fc_none;
+
+ /* Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+ ret_val = e1000_force_mac_fc_generic(hw);
+ if (ret_val) {
+ DEBUGOUT("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ }
+
+ /* Check for the case where we have SerDes media and auto-neg is
+ * enabled. In this case, we need to check and see if Auto-Neg
+ * has completed, and if so, how the PHY and link partner has
+ * flow control configured.
+ */
+ if ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
+ mac->autoneg) {
+ /* Read the PCS_LSTS and check to see if AutoNeg
+ * has completed.
+ */
+ pcs_status_reg = E1000_READ_REG(hw, E1000_PCS_LSTAT);
+
+ if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
+ DEBUGOUT("PCS Auto Neg has not completed.\n");
+ return ret_val;
+ }
+
+ /* The AutoNeg process has completed, so we now need to
+ * read both the Auto Negotiation Advertisement
+ * Register (PCS_ANADV) and the Auto_Negotiation Base
+ * Page Ability Register (PCS_LPAB) to determine how
+ * flow control was negotiated.
+ */
+ pcs_adv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV);
+ pcs_lp_ability_reg = E1000_READ_REG(hw, E1000_PCS_LPAB);
+
+ /* Two bits in the Auto Negotiation Advertisement Register
+ * (PCS_ANADV) and two bits in the Auto Negotiation Base
+ * Page Ability Register (PCS_LPAB) determine flow control
+ * for both the PHY and the link partner. The following
+ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+ * 1999, describes these PAUSE resolution bits and how flow
+ * control is determined based upon these settings.
+ * NOTE: DC = Don't Care
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+ *-------|---------|-------|---------|--------------------
+ * 0 | 0 | DC | DC | e1000_fc_none
+ * 0 | 1 | 0 | DC | e1000_fc_none
+ * 0 | 1 | 1 | 0 | e1000_fc_none
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ * 1 | 0 | 0 | DC | e1000_fc_none
+ * 1 | DC | 1 | DC | e1000_fc_full
+ * 1 | 1 | 0 | 0 | e1000_fc_none
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ *
+ * Are both PAUSE bits set to 1? If so, this implies
+ * Symmetric Flow Control is enabled at both ends. The
+ * ASM_DIR bits are irrelevant per the spec.
+ *
+ * For Symmetric Flow Control:
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | DC | 1 | DC | e1000_fc_full
+ *
+ */
+ if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
+ /* Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise Rx
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == e1000_fc_full) {
+ hw->fc.current_mode = e1000_fc_full;
+ DEBUGOUT("Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
+ }
+ }
+ /* For receiving PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ */
+ else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
+ (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_tx_pause;
+ DEBUGOUT("Flow Control = Tx PAUSE frames only.\n");
+ }
+ /* For transmitting PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ */
+ else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
+ !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
+ } else {
+ /* Per the IEEE spec, at this point flow control
+ * should be disabled.
+ */
+ hw->fc.current_mode = e1000_fc_none;
+ DEBUGOUT("Flow Control = NONE.\n");
+ }
+
+ /* Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+ pcs_ctrl_reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
+ pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
+ E1000_WRITE_REG(hw, E1000_PCS_LCTL, pcs_ctrl_reg);
+
+ ret_val = e1000_force_mac_fc_generic(hw);
+ if (ret_val) {
+ DEBUGOUT("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_speed_and_duplex_copper_generic - Retrieve current speed/duplex
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+ *
+ * Read the status register for the current speed/duplex and store the current
+ * speed and duplex for copper connections.
+ **/
+s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ u32 status;
+
+ DEBUGFUNC("e1000_get_speed_and_duplex_copper_generic");
+
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if (status & E1000_STATUS_SPEED_1000) {
+ *speed = SPEED_1000;
+ DEBUGOUT("1000 Mbs, ");
+ } else if (status & E1000_STATUS_SPEED_100) {
+ *speed = SPEED_100;
+ DEBUGOUT("100 Mbs, ");
+ } else {
+ *speed = SPEED_10;
+ DEBUGOUT("10 Mbs, ");
+ }
+
+ if (status & E1000_STATUS_FD) {
+ *duplex = FULL_DUPLEX;
+ DEBUGOUT("Full Duplex\n");
+ } else {
+ *duplex = HALF_DUPLEX;
+ DEBUGOUT("Half Duplex\n");
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_speed_and_duplex_fiber_generic - Retrieve current speed/duplex
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+ *
+ * Sets the speed and duplex to gigabit full duplex (the only possible option)
+ * for fiber/serdes links.
+ **/
+s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 *speed, u16 *duplex)
+{
+ DEBUGFUNC("e1000_get_speed_and_duplex_fiber_serdes_generic");
+ UNREFERENCED_1PARAMETER(hw);
+
+ *speed = SPEED_1000;
+ *duplex = FULL_DUPLEX;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_hw_semaphore_generic - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM
+ **/
+s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw)
+{
+ u32 swsm;
+ s32 timeout = hw->nvm.word_size + 1;
+ s32 i = 0;
+
+ DEBUGFUNC("e1000_get_hw_semaphore_generic");
+
+ /* Get the SW semaphore */
+ while (i < timeout) {
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ if (!(swsm & E1000_SWSM_SMBI))
+ break;
+
+ usec_delay(50);
+ i++;
+ }
+
+ if (i == timeout) {
+ DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
+ return -E1000_ERR_NVM;
+ }
+
+ /* Get the FW semaphore. */
+ for (i = 0; i < timeout; i++) {
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
+
+ /* Semaphore acquired if bit latched */
+ if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
+ break;
+
+ usec_delay(50);
+ }
+
+ if (i == timeout) {
+ /* Release semaphores */
+ e1000_put_hw_semaphore_generic(hw);
+ DEBUGOUT("Driver can't access the NVM\n");
+ return -E1000_ERR_NVM;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_put_hw_semaphore_generic - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used to access the PHY or NVM
+ **/
+void e1000_put_hw_semaphore_generic(struct e1000_hw *hw)
+{
+ u32 swsm;
+
+ DEBUGFUNC("e1000_put_hw_semaphore_generic");
+
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+
+ swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
+
+ E1000_WRITE_REG(hw, E1000_SWSM, swsm);
+}
+
+/**
+ * e1000_get_auto_rd_done_generic - Check for auto read completion
+ * @hw: pointer to the HW structure
+ *
+ * Check EEPROM for Auto Read done bit.
+ **/
+s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw)
+{
+ s32 i = 0;
+
+ DEBUGFUNC("e1000_get_auto_rd_done_generic");
+
+ while (i < AUTO_READ_DONE_TIMEOUT) {
+ if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_AUTO_RD)
+ break;
+ msec_delay(1);
+ i++;
+ }
+
+ if (i == AUTO_READ_DONE_TIMEOUT) {
+ DEBUGOUT("Auto read by HW from NVM has not completed.\n");
+ return -E1000_ERR_RESET;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_valid_led_default_generic - Verify a valid default LED config
+ * @hw: pointer to the HW structure
+ * @data: pointer to the NVM (EEPROM)
+ *
+ * Read the EEPROM for the current default LED configuration. If the
+ * LED configuration is not valid, set to a valid LED configuration.
+ **/
+s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_valid_led_default_generic");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
+ *data = ID_LED_DEFAULT;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_id_led_init_generic -
+ * @hw: pointer to the HW structure
+ *
+ **/
+s32 e1000_id_led_init_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ const u32 ledctl_mask = 0x000000FF;
+ const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
+ const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
+ u16 data, i, temp;
+ const u16 led_mask = 0x0F;
+
+ DEBUGFUNC("e1000_id_led_init_generic");
+
+ ret_val = hw->nvm.ops.valid_led_default(hw, &data);
+ if (ret_val)
+ return ret_val;
+
+ mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
+ mac->ledctl_mode1 = mac->ledctl_default;
+ mac->ledctl_mode2 = mac->ledctl_default;
+
+ for (i = 0; i < 4; i++) {
+ temp = (data >> (i << 2)) & led_mask;
+ switch (temp) {
+ case ID_LED_ON1_DEF2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_ON1_OFF2:
+ mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+ mac->ledctl_mode1 |= ledctl_on << (i << 3);
+ break;
+ case ID_LED_OFF1_DEF2:
+ case ID_LED_OFF1_ON2:
+ case ID_LED_OFF1_OFF2:
+ mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+ mac->ledctl_mode1 |= ledctl_off << (i << 3);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ switch (temp) {
+ case ID_LED_DEF1_ON2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_OFF1_ON2:
+ mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+ mac->ledctl_mode2 |= ledctl_on << (i << 3);
+ break;
+ case ID_LED_DEF1_OFF2:
+ case ID_LED_ON1_OFF2:
+ case ID_LED_OFF1_OFF2:
+ mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+ mac->ledctl_mode2 |= ledctl_off << (i << 3);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_setup_led_generic - Configures SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * This prepares the SW controllable LED for use and saves the current state
+ * of the LED so it can be later restored.
+ **/
+s32 e1000_setup_led_generic(struct e1000_hw *hw)
+{
+ u32 ledctl;
+
+ DEBUGFUNC("e1000_setup_led_generic");
+
+ if (hw->mac.ops.setup_led != e1000_setup_led_generic)
+ return -E1000_ERR_CONFIG;
+
+ if (hw->phy.media_type == e1000_media_type_fiber) {
+ ledctl = E1000_READ_REG(hw, E1000_LEDCTL);
+ hw->mac.ledctl_default = ledctl;
+ /* Turn off LED0 */
+ ledctl &= ~(E1000_LEDCTL_LED0_IVRT | E1000_LEDCTL_LED0_BLINK |
+ E1000_LEDCTL_LED0_MODE_MASK);
+ ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
+ E1000_LEDCTL_LED0_MODE_SHIFT);
+ E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl);
+ } else if (hw->phy.media_type == e1000_media_type_copper) {
+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_cleanup_led_generic - Set LED config to default operation
+ * @hw: pointer to the HW structure
+ *
+ * Remove the current LED configuration and set the LED configuration
+ * to the default value, saved from the EEPROM.
+ **/
+s32 e1000_cleanup_led_generic(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_cleanup_led_generic");
+
+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_blink_led_generic - Blink LED
+ * @hw: pointer to the HW structure
+ *
+ * Blink the LEDs which are set to be on.
+ **/
+s32 e1000_blink_led_generic(struct e1000_hw *hw)
+{
+ u32 ledctl_blink = 0;
+ u32 i;
+
+ DEBUGFUNC("e1000_blink_led_generic");
+
+ if (hw->phy.media_type == e1000_media_type_fiber) {
+ /* always blink LED0 for PCI-E fiber */
+ ledctl_blink = E1000_LEDCTL_LED0_BLINK |
+ (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
+ } else {
+ /* Set the blink bit for each LED that's "on" (0x0E)
+ * (or "off" if inverted) in ledctl_mode2. The blink
+ * logic in hardware only works when mode is set to "on"
+ * so it must be changed accordingly when the mode is
+ * "off" and inverted.
+ */
+ ledctl_blink = hw->mac.ledctl_mode2;
+ for (i = 0; i < 32; i += 8) {
+ u32 mode = (hw->mac.ledctl_mode2 >> i) &
+ E1000_LEDCTL_LED0_MODE_MASK;
+ u32 led_default = hw->mac.ledctl_default >> i;
+
+ if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
+ (mode == E1000_LEDCTL_MODE_LED_ON)) ||
+ ((led_default & E1000_LEDCTL_LED0_IVRT) &&
+ (mode == E1000_LEDCTL_MODE_LED_OFF))) {
+ ledctl_blink &=
+ ~(E1000_LEDCTL_LED0_MODE_MASK << i);
+ ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
+ E1000_LEDCTL_MODE_LED_ON) << i;
+ }
+ }
+ }
+
+ E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_led_on_generic - Turn LED on
+ * @hw: pointer to the HW structure
+ *
+ * Turn LED on.
+ **/
+s32 e1000_led_on_generic(struct e1000_hw *hw)
+{
+ u32 ctrl;
+
+ DEBUGFUNC("e1000_led_on_generic");
+
+ switch (hw->phy.media_type) {
+ case e1000_media_type_fiber:
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ break;
+ case e1000_media_type_copper:
+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
+ break;
+ default:
+ break;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_led_off_generic - Turn LED off
+ * @hw: pointer to the HW structure
+ *
+ * Turn LED off.
+ **/
+s32 e1000_led_off_generic(struct e1000_hw *hw)
+{
+ u32 ctrl;
+
+ DEBUGFUNC("e1000_led_off_generic");
+
+ switch (hw->phy.media_type) {
+ case e1000_media_type_fiber:
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ break;
+ case e1000_media_type_copper:
+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
+ break;
+ default:
+ break;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_pcie_no_snoop_generic - Set PCI-express capabilities
+ * @hw: pointer to the HW structure
+ * @no_snoop: bitmap of snoop events
+ *
+ * Set the PCI-express register to snoop for events enabled in 'no_snoop'.
+ **/
+void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop)
+{
+ u32 gcr;
+
+ DEBUGFUNC("e1000_set_pcie_no_snoop_generic");
+
+ if (hw->bus.type != e1000_bus_type_pci_express)
+ return;
+
+ if (no_snoop) {
+ gcr = E1000_READ_REG(hw, E1000_GCR);
+ gcr &= ~(PCIE_NO_SNOOP_ALL);
+ gcr |= no_snoop;
+ E1000_WRITE_REG(hw, E1000_GCR, gcr);
+ }
+}
+
+/**
+ * e1000_disable_pcie_master_generic - Disables PCI-express master access
+ * @hw: pointer to the HW structure
+ *
+ * Returns E1000_SUCCESS if successful, else returns -10
+ * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
+ * the master requests to be disabled.
+ *
+ * Disables PCI-Express master access and verifies there are no pending
+ * requests.
+ **/
+s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 timeout = MASTER_DISABLE_TIMEOUT;
+
+ DEBUGFUNC("e1000_disable_pcie_master_generic");
+
+ if (hw->bus.type != e1000_bus_type_pci_express)
+ return E1000_SUCCESS;
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ while (timeout) {
+ if (!(E1000_READ_REG(hw, E1000_STATUS) &
+ E1000_STATUS_GIO_MASTER_ENABLE) ||
+ E1000_REMOVED(hw->hw_addr))
+ break;
+ usec_delay(100);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("Master requests are pending.\n");
+ return -E1000_ERR_MASTER_REQUESTS_PENDING;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_reset_adaptive_generic - Reset Adaptive Interframe Spacing
+ * @hw: pointer to the HW structure
+ *
+ * Reset the Adaptive Interframe Spacing throttle to default values.
+ **/
+void e1000_reset_adaptive_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+
+ DEBUGFUNC("e1000_reset_adaptive_generic");
+
+ if (!mac->adaptive_ifs) {
+ DEBUGOUT("Not in Adaptive IFS mode!\n");
+ return;
+ }
+
+ mac->current_ifs_val = 0;
+ mac->ifs_min_val = IFS_MIN;
+ mac->ifs_max_val = IFS_MAX;
+ mac->ifs_step_size = IFS_STEP;
+ mac->ifs_ratio = IFS_RATIO;
+
+ mac->in_ifs_mode = false;
+ E1000_WRITE_REG(hw, E1000_AIT, 0);
+}
+
+/**
+ * e1000_update_adaptive_generic - Update Adaptive Interframe Spacing
+ * @hw: pointer to the HW structure
+ *
+ * Update the Adaptive Interframe Spacing Throttle value based on the
+ * time between transmitted packets and time between collisions.
+ **/
+void e1000_update_adaptive_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+
+ DEBUGFUNC("e1000_update_adaptive_generic");
+
+ if (!mac->adaptive_ifs) {
+ DEBUGOUT("Not in Adaptive IFS mode!\n");
+ return;
+ }
+
+ if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
+ if (mac->tx_packet_delta > MIN_NUM_XMITS) {
+ mac->in_ifs_mode = true;
+ if (mac->current_ifs_val < mac->ifs_max_val) {
+ if (!mac->current_ifs_val)
+ mac->current_ifs_val = mac->ifs_min_val;
+ else
+ mac->current_ifs_val +=
+ mac->ifs_step_size;
+ E1000_WRITE_REG(hw, E1000_AIT,
+ mac->current_ifs_val);
+ }
+ }
+ } else {
+ if (mac->in_ifs_mode &&
+ (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
+ mac->current_ifs_val = 0;
+ mac->in_ifs_mode = false;
+ E1000_WRITE_REG(hw, E1000_AIT, 0);
+ }
+ }
+}
+
+/**
+ * e1000_validate_mdi_setting_generic - Verify MDI/MDIx settings
+ * @hw: pointer to the HW structure
+ *
+ * Verify that when not using auto-negotiation that MDI/MDIx is correctly
+ * set, which is forced to MDI mode only.
+ **/
+STATIC s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_validate_mdi_setting_generic");
+
+ if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
+ DEBUGOUT("Invalid MDI setting detected\n");
+ hw->phy.mdix = 1;
+ return -E1000_ERR_CONFIG;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_validate_mdi_setting_crossover_generic - Verify MDI/MDIx settings
+ * @hw: pointer to the HW structure
+ *
+ * Validate the MDI/MDIx setting, allowing for auto-crossover during forced
+ * operation.
+ **/
+s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_validate_mdi_setting_crossover_generic");
+ UNREFERENCED_1PARAMETER(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_8bit_ctrl_reg_generic - Write a 8bit CTRL register
+ * @hw: pointer to the HW structure
+ * @reg: 32bit register offset such as E1000_SCTL
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes an address/data control type register. There are several of these
+ * and they all have the format address << 8 | data and bit 31 is polled for
+ * completion.
+ **/
+s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
+ u32 offset, u8 data)
+{
+ u32 i, regvalue = 0;
+
+ DEBUGFUNC("e1000_write_8bit_ctrl_reg_generic");
+
+ /* Set up the address and data */
+ regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
+ E1000_WRITE_REG(hw, reg, regvalue);
+
+ /* Poll the ready bit to see if the MDI read completed */
+ for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
+ usec_delay(5);
+ regvalue = E1000_READ_REG(hw, reg);
+ if (regvalue & E1000_GEN_CTL_READY)
+ break;
+ }
+ if (!(regvalue & E1000_GEN_CTL_READY)) {
+ DEBUGOUT1("Reg %08x did not indicate ready\n", reg);
+ return -E1000_ERR_PHY;
+ }
+
+ return E1000_SUCCESS;
+}
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_mac.h b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_mac.h
new file mode 100755
index 00000000..5a7ce4a4
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_mac.h
@@ -0,0 +1,95 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_MAC_H_
+#define _E1000_MAC_H_
+
+void e1000_init_mac_ops_generic(struct e1000_hw *hw);
+#ifndef E1000_REMOVED
+#define E1000_REMOVED(a) (0)
+#endif /* E1000_REMOVED */
+void e1000_null_mac_generic(struct e1000_hw *hw);
+s32 e1000_null_ops_generic(struct e1000_hw *hw);
+s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d);
+bool e1000_null_mng_mode(struct e1000_hw *hw);
+void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a);
+void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b);
+void e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a);
+s32 e1000_blink_led_generic(struct e1000_hw *hw);
+s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw);
+s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw);
+s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw);
+s32 e1000_cleanup_led_generic(struct e1000_hw *hw);
+s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw);
+s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw);
+s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw);
+s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw);
+s32 e1000_force_mac_fc_generic(struct e1000_hw *hw);
+s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw);
+s32 e1000_get_bus_info_pci_generic(struct e1000_hw *hw);
+s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw);
+void e1000_set_lan_id_single_port(struct e1000_hw *hw);
+void e1000_set_lan_id_multi_port_pci(struct e1000_hw *hw);
+s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw);
+s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex);
+s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw,
+ u16 *speed, u16 *duplex);
+s32 e1000_id_led_init_generic(struct e1000_hw *hw);
+s32 e1000_led_on_generic(struct e1000_hw *hw);
+s32 e1000_led_off_generic(struct e1000_hw *hw);
+void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count);
+s32 e1000_set_default_fc_generic(struct e1000_hw *hw);
+s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw);
+s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw);
+s32 e1000_setup_led_generic(struct e1000_hw *hw);
+s32 e1000_setup_link_generic(struct e1000_hw *hw);
+s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw *hw);
+s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
+ u32 offset, u8 data);
+
+u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr);
+
+void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw);
+void e1000_clear_vfta_generic(struct e1000_hw *hw);
+void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count);
+void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw);
+void e1000_put_hw_semaphore_generic(struct e1000_hw *hw);
+s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
+void e1000_reset_adaptive_generic(struct e1000_hw *hw);
+void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop);
+void e1000_update_adaptive_generic(struct e1000_hw *hw);
+void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
+
+#endif
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_manage.c b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_manage.c
new file mode 100755
index 00000000..30db8920
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_manage.c
@@ -0,0 +1,573 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "e1000_api.h"
+
+/**
+ * e1000_calculate_checksum - Calculate checksum for buffer
+ * @buffer: pointer to EEPROM
+ * @length: size of EEPROM to calculate a checksum for
+ *
+ * Calculates the checksum for some buffer on a specified length. The
+ * checksum calculated is returned.
+ **/
+u8 e1000_calculate_checksum(u8 *buffer, u32 length)
+{
+ u32 i;
+ u8 sum = 0;
+
+ DEBUGFUNC("e1000_calculate_checksum");
+
+ if (!buffer)
+ return 0;
+
+ for (i = 0; i < length; i++)
+ sum += buffer[i];
+
+ return (u8) (0 - sum);
+}
+
+/**
+ * e1000_mng_enable_host_if_generic - Checks host interface is enabled
+ * @hw: pointer to the HW structure
+ *
+ * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
+ *
+ * This function checks whether the HOST IF is enabled for command operation
+ * and also checks whether the previous command is completed. It busy waits
+ * in case of previous command is not completed.
+ **/
+s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw)
+{
+ u32 hicr;
+ u8 i;
+
+ DEBUGFUNC("e1000_mng_enable_host_if_generic");
+
+ if (!hw->mac.arc_subsystem_valid) {
+ DEBUGOUT("ARC subsystem not valid.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Check that the host interface is enabled. */
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ if (!(hicr & E1000_HICR_EN)) {
+ DEBUGOUT("E1000_HOST_EN bit disabled.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+ /* check the previous command is completed */
+ for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ if (!(hicr & E1000_HICR_C))
+ break;
+ msec_delay_irq(1);
+ }
+
+ if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
+ DEBUGOUT("Previous command timeout failed .\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_check_mng_mode_generic - Generic check management mode
+ * @hw: pointer to the HW structure
+ *
+ * Reads the firmware semaphore register and returns true (>0) if
+ * manageability is enabled, else false (0).
+ **/
+bool e1000_check_mng_mode_generic(struct e1000_hw *hw)
+{
+ u32 fwsm = E1000_READ_REG(hw, E1000_FWSM);
+
+ DEBUGFUNC("e1000_check_mng_mode_generic");
+
+
+ return (fwsm & E1000_FWSM_MODE_MASK) ==
+ (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
+}
+
+/**
+ * e1000_enable_tx_pkt_filtering_generic - Enable packet filtering on Tx
+ * @hw: pointer to the HW structure
+ *
+ * Enables packet filtering on transmit packets if manageability is enabled
+ * and host interface is enabled.
+ **/
+bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw)
+{
+ struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
+ u32 *buffer = (u32 *)&hw->mng_cookie;
+ u32 offset;
+ s32 ret_val, hdr_csum, csum;
+ u8 i, len;
+
+ DEBUGFUNC("e1000_enable_tx_pkt_filtering_generic");
+
+ hw->mac.tx_pkt_filtering = true;
+
+ /* No manageability, no filtering */
+ if (!hw->mac.ops.check_mng_mode(hw)) {
+ hw->mac.tx_pkt_filtering = false;
+ return hw->mac.tx_pkt_filtering;
+ }
+
+ /* If we can't read from the host interface for whatever
+ * reason, disable filtering.
+ */
+ ret_val = e1000_mng_enable_host_if_generic(hw);
+ if (ret_val != E1000_SUCCESS) {
+ hw->mac.tx_pkt_filtering = false;
+ return hw->mac.tx_pkt_filtering;
+ }
+
+ /* Read in the header. Length and offset are in dwords. */
+ len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
+ offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
+ for (i = 0; i < len; i++)
+ *(buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF,
+ offset + i);
+ hdr_csum = hdr->checksum;
+ hdr->checksum = 0;
+ csum = e1000_calculate_checksum((u8 *)hdr,
+ E1000_MNG_DHCP_COOKIE_LENGTH);
+ /* If either the checksums or signature don't match, then
+ * the cookie area isn't considered valid, in which case we
+ * take the safe route of assuming Tx filtering is enabled.
+ */
+ if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
+ hw->mac.tx_pkt_filtering = true;
+ return hw->mac.tx_pkt_filtering;
+ }
+
+ /* Cookie area is valid, make the final check for filtering. */
+ if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING))
+ hw->mac.tx_pkt_filtering = false;
+
+ return hw->mac.tx_pkt_filtering;
+}
+
+/**
+ * e1000_mng_write_cmd_header_generic - Writes manageability command header
+ * @hw: pointer to the HW structure
+ * @hdr: pointer to the host interface command header
+ *
+ * Writes the command header after does the checksum calculation.
+ **/
+s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw,
+ struct e1000_host_mng_command_header *hdr)
+{
+ u16 i, length = sizeof(struct e1000_host_mng_command_header);
+
+ DEBUGFUNC("e1000_mng_write_cmd_header_generic");
+
+ /* Write the whole command header structure with new checksum. */
+
+ hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
+
+ length >>= 2;
+ /* Write the relevant command block into the ram area. */
+ for (i = 0; i < length; i++) {
+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i,
+ *((u32 *) hdr + i));
+ E1000_WRITE_FLUSH(hw);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_mng_host_if_write_generic - Write to the manageability host interface
+ * @hw: pointer to the HW structure
+ * @buffer: pointer to the host interface buffer
+ * @length: size of the buffer
+ * @offset: location in the buffer to write to
+ * @sum: sum of the data (not checksum)
+ *
+ * This function writes the buffer content at the offset given on the host if.
+ * It also does alignment considerations to do the writes in most efficient
+ * way. Also fills up the sum of the buffer in *buffer parameter.
+ **/
+s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
+ u16 length, u16 offset, u8 *sum)
+{
+ u8 *tmp;
+ u8 *bufptr = buffer;
+ u32 data = 0;
+ u16 remaining, i, j, prev_bytes;
+
+ DEBUGFUNC("e1000_mng_host_if_write_generic");
+
+ /* sum = only sum of the data and it is not checksum */
+
+ if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH)
+ return -E1000_ERR_PARAM;
+
+ tmp = (u8 *)&data;
+ prev_bytes = offset & 0x3;
+ offset >>= 2;
+
+ if (prev_bytes) {
+ data = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset);
+ for (j = prev_bytes; j < sizeof(u32); j++) {
+ *(tmp + j) = *bufptr++;
+ *sum += *(tmp + j);
+ }
+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset, data);
+ length -= j - prev_bytes;
+ offset++;
+ }
+
+ remaining = length & 0x3;
+ length -= remaining;
+
+ /* Calculate length in DWORDs */
+ length >>= 2;
+
+ /* The device driver writes the relevant command block into the
+ * ram area.
+ */
+ for (i = 0; i < length; i++) {
+ for (j = 0; j < sizeof(u32); j++) {
+ *(tmp + j) = *bufptr++;
+ *sum += *(tmp + j);
+ }
+
+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i,
+ data);
+ }
+ if (remaining) {
+ for (j = 0; j < sizeof(u32); j++) {
+ if (j < remaining)
+ *(tmp + j) = *bufptr++;
+ else
+ *(tmp + j) = 0;
+
+ *sum += *(tmp + j);
+ }
+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i,
+ data);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_mng_write_dhcp_info_generic - Writes DHCP info to host interface
+ * @hw: pointer to the HW structure
+ * @buffer: pointer to the host interface
+ * @length: size of the buffer
+ *
+ * Writes the DHCP information to the host interface.
+ **/
+s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, u8 *buffer,
+ u16 length)
+{
+ struct e1000_host_mng_command_header hdr;
+ s32 ret_val;
+ u32 hicr;
+
+ DEBUGFUNC("e1000_mng_write_dhcp_info_generic");
+
+ hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
+ hdr.command_length = length;
+ hdr.reserved1 = 0;
+ hdr.reserved2 = 0;
+ hdr.checksum = 0;
+
+ /* Enable the host interface */
+ ret_val = e1000_mng_enable_host_if_generic(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Populate the host interface with the contents of "buffer". */
+ ret_val = e1000_mng_host_if_write_generic(hw, buffer, length,
+ sizeof(hdr), &(hdr.checksum));
+ if (ret_val)
+ return ret_val;
+
+ /* Write the manageability command header */
+ ret_val = e1000_mng_write_cmd_header_generic(hw, &hdr);
+ if (ret_val)
+ return ret_val;
+
+ /* Tell the ARC a new command is pending. */
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_enable_mng_pass_thru - Check if management passthrough is needed
+ * @hw: pointer to the HW structure
+ *
+ * Verifies the hardware needs to leave interface enabled so that frames can
+ * be directed to and from the management interface.
+ **/
+bool e1000_enable_mng_pass_thru(struct e1000_hw *hw)
+{
+ u32 manc;
+ u32 fwsm, factps;
+
+ DEBUGFUNC("e1000_enable_mng_pass_thru");
+
+ if (!hw->mac.asf_firmware_present)
+ return false;
+
+ manc = E1000_READ_REG(hw, E1000_MANC);
+
+ if (!(manc & E1000_MANC_RCV_TCO_EN))
+ return false;
+
+ if (hw->mac.has_fwsm) {
+ fwsm = E1000_READ_REG(hw, E1000_FWSM);
+ factps = E1000_READ_REG(hw, E1000_FACTPS);
+
+ if (!(factps & E1000_FACTPS_MNGCG) &&
+ ((fwsm & E1000_FWSM_MODE_MASK) ==
+ (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT)))
+ return true;
+ } else if ((hw->mac.type == e1000_82574) ||
+ (hw->mac.type == e1000_82583)) {
+ u16 data;
+
+ factps = E1000_READ_REG(hw, E1000_FACTPS);
+ e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
+
+ if (!(factps & E1000_FACTPS_MNGCG) &&
+ ((data & E1000_NVM_INIT_CTRL2_MNGM) ==
+ (e1000_mng_mode_pt << 13)))
+ return true;
+ } else if ((manc & E1000_MANC_SMBUS_EN) &&
+ !(manc & E1000_MANC_ASF_EN)) {
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * e1000_host_interface_command - Writes buffer to host interface
+ * @hw: pointer to the HW structure
+ * @buffer: contains a command to write
+ * @length: the byte length of the buffer, must be multiple of 4 bytes
+ *
+ * Writes a buffer to the Host Interface. Upon success, returns E1000_SUCCESS
+ * else returns E1000_ERR_HOST_INTERFACE_COMMAND.
+ **/
+s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length)
+{
+ u32 hicr, i;
+
+ DEBUGFUNC("e1000_host_interface_command");
+
+ if (!(hw->mac.arc_subsystem_valid)) {
+ DEBUGOUT("Hardware doesn't support host interface command.\n");
+ return E1000_SUCCESS;
+ }
+
+ if (!hw->mac.asf_firmware_present) {
+ DEBUGOUT("Firmware is not present.\n");
+ return E1000_SUCCESS;
+ }
+
+ if (length == 0 || length & 0x3 ||
+ length > E1000_HI_MAX_BLOCK_BYTE_LENGTH) {
+ DEBUGOUT("Buffer length failure.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Check that the host interface is enabled. */
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ if (!(hicr & E1000_HICR_EN)) {
+ DEBUGOUT("E1000_HOST_EN bit disabled.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Calculate length in DWORDs */
+ length >>= 2;
+
+ /* The device driver writes the relevant command block
+ * into the ram area.
+ */
+ for (i = 0; i < length; i++)
+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i,
+ *((u32 *)buffer + i));
+
+ /* Setting this bit tells the ARC that a new command is pending. */
+ E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
+
+ for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ if (!(hicr & E1000_HICR_C))
+ break;
+ msec_delay(1);
+ }
+
+ /* Check command successful completion. */
+ if (i == E1000_HI_COMMAND_TIMEOUT ||
+ (!(E1000_READ_REG(hw, E1000_HICR) & E1000_HICR_SV))) {
+ DEBUGOUT("Command has failed with no status valid.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ for (i = 0; i < length; i++)
+ *((u32 *)buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw,
+ E1000_HOST_IF,
+ i);
+
+ return E1000_SUCCESS;
+}
+/**
+ * e1000_load_firmware - Writes proxy FW code buffer to host interface
+ * and execute.
+ * @hw: pointer to the HW structure
+ * @buffer: contains a firmware to write
+ * @length: the byte length of the buffer, must be multiple of 4 bytes
+ *
+ * Upon success returns E1000_SUCCESS, returns E1000_ERR_CONFIG if not enabled
+ * in HW else returns E1000_ERR_HOST_INTERFACE_COMMAND.
+ **/
+s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length)
+{
+ u32 hicr, hibba, fwsm, icr, i;
+
+ DEBUGFUNC("e1000_load_firmware");
+
+ if (hw->mac.type < e1000_i210) {
+ DEBUGOUT("Hardware doesn't support loading FW by the driver\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ /* Check that the host interface is enabled. */
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ if (!(hicr & E1000_HICR_EN)) {
+ DEBUGOUT("E1000_HOST_EN bit disabled.\n");
+ return -E1000_ERR_CONFIG;
+ }
+ if (!(hicr & E1000_HICR_MEMORY_BASE_EN)) {
+ DEBUGOUT("E1000_HICR_MEMORY_BASE_EN bit disabled.\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ if (length == 0 || length & 0x3 || length > E1000_HI_FW_MAX_LENGTH) {
+ DEBUGOUT("Buffer length failure.\n");
+ return -E1000_ERR_INVALID_ARGUMENT;
+ }
+
+ /* Clear notification from ROM-FW by reading ICR register */
+ icr = E1000_READ_REG(hw, E1000_ICR_V2);
+
+ /* Reset ROM-FW */
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ hicr |= E1000_HICR_FW_RESET_ENABLE;
+ E1000_WRITE_REG(hw, E1000_HICR, hicr);
+ hicr |= E1000_HICR_FW_RESET;
+ E1000_WRITE_REG(hw, E1000_HICR, hicr);
+ E1000_WRITE_FLUSH(hw);
+
+ /* Wait till MAC notifies about its readiness after ROM-FW reset */
+ for (i = 0; i < (E1000_HI_COMMAND_TIMEOUT * 2); i++) {
+ icr = E1000_READ_REG(hw, E1000_ICR_V2);
+ if (icr & E1000_ICR_MNG)
+ break;
+ msec_delay(1);
+ }
+
+ /* Check for timeout */
+ if (i == E1000_HI_COMMAND_TIMEOUT) {
+ DEBUGOUT("FW reset failed.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Wait till MAC is ready to accept new FW code */
+ for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
+ fwsm = E1000_READ_REG(hw, E1000_FWSM);
+ if ((fwsm & E1000_FWSM_FW_VALID) &&
+ ((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT ==
+ E1000_FWSM_HI_EN_ONLY_MODE))
+ break;
+ msec_delay(1);
+ }
+
+ /* Check for timeout */
+ if (i == E1000_HI_COMMAND_TIMEOUT) {
+ DEBUGOUT("FW reset failed.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Calculate length in DWORDs */
+ length >>= 2;
+
+ /* The device driver writes the relevant FW code block
+ * into the ram area in DWORDs via 1kB ram addressing window.
+ */
+ for (i = 0; i < length; i++) {
+ if (!(i % E1000_HI_FW_BLOCK_DWORD_LENGTH)) {
+ /* Point to correct 1kB ram window */
+ hibba = E1000_HI_FW_BASE_ADDRESS +
+ ((E1000_HI_FW_BLOCK_DWORD_LENGTH << 2) *
+ (i / E1000_HI_FW_BLOCK_DWORD_LENGTH));
+
+ E1000_WRITE_REG(hw, E1000_HIBBA, hibba);
+ }
+
+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF,
+ i % E1000_HI_FW_BLOCK_DWORD_LENGTH,
+ *((u32 *)buffer + i));
+ }
+
+ /* Setting this bit tells the ARC that a new FW is ready to execute. */
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
+
+ for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ if (!(hicr & E1000_HICR_C))
+ break;
+ msec_delay(1);
+ }
+
+ /* Check for successful FW start. */
+ if (i == E1000_HI_COMMAND_TIMEOUT) {
+ DEBUGOUT("New FW did not start within timeout period.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ return E1000_SUCCESS;
+}
+
+
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_manage.h b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_manage.h
new file mode 100755
index 00000000..e6f92c0c
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_manage.h
@@ -0,0 +1,95 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_MANAGE_H_
+#define _E1000_MANAGE_H_
+
+bool e1000_check_mng_mode_generic(struct e1000_hw *hw);
+bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw);
+s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw);
+s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
+ u16 length, u16 offset, u8 *sum);
+s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw,
+ struct e1000_host_mng_command_header *hdr);
+s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw,
+ u8 *buffer, u16 length);
+bool e1000_enable_mng_pass_thru(struct e1000_hw *hw);
+u8 e1000_calculate_checksum(u8 *buffer, u32 length);
+s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length);
+s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length);
+
+enum e1000_mng_mode {
+ e1000_mng_mode_none = 0,
+ e1000_mng_mode_asf,
+ e1000_mng_mode_pt,
+ e1000_mng_mode_ipmi,
+ e1000_mng_mode_host_if_only
+};
+
+#define E1000_FACTPS_MNGCG 0x20000000
+
+#define E1000_FWSM_MODE_MASK 0xE
+#define E1000_FWSM_MODE_SHIFT 1
+#define E1000_FWSM_FW_VALID 0x00008000
+#define E1000_FWSM_HI_EN_ONLY_MODE 0x4
+
+#define E1000_MNG_IAMT_MODE 0x3
+#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10
+#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0
+#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10
+#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
+#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
+
+#define E1000_VFTA_ENTRY_SHIFT 5
+#define E1000_VFTA_ENTRY_MASK 0x7F
+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
+
+#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
+#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */
+#define E1000_HI_COMMAND_TIMEOUT 500 /* Process HI cmd limit */
+#define E1000_HI_FW_BASE_ADDRESS 0x10000
+#define E1000_HI_FW_MAX_LENGTH (64 * 1024) /* Num of bytes */
+#define E1000_HI_FW_BLOCK_DWORD_LENGTH 256 /* Num of DWORDs per page */
+#define E1000_HICR_MEMORY_BASE_EN 0x200 /* MB Enable bit - RO */
+#define E1000_HICR_EN 0x01 /* Enable bit - RO */
+/* Driver sets this bit when done to put command in RAM */
+#define E1000_HICR_C 0x02
+#define E1000_HICR_SV 0x04 /* Status Validity */
+#define E1000_HICR_FW_RESET_ENABLE 0x40
+#define E1000_HICR_FW_RESET 0x80
+
+/* Intel(R) Active Management Technology signature */
+#define E1000_IAMT_SIGNATURE 0x544D4149
+
+#endif
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_mbx.c b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_mbx.c
new file mode 100755
index 00000000..7ec4c564
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_mbx.c
@@ -0,0 +1,777 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "e1000_mbx.h"
+
+/**
+ * e1000_null_mbx_check_for_flag - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_null_mbx_check_for_flag(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 E1000_UNUSEDARG mbx_id)
+{
+ DEBUGFUNC("e1000_null_mbx_check_flag");
+ UNREFERENCED_2PARAMETER(hw, mbx_id);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_mbx_transact - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_null_mbx_transact(struct e1000_hw E1000_UNUSEDARG *hw,
+ u32 E1000_UNUSEDARG *msg,
+ u16 E1000_UNUSEDARG size,
+ u16 E1000_UNUSEDARG mbx_id)
+{
+ DEBUGFUNC("e1000_null_mbx_rw_msg");
+ UNREFERENCED_4PARAMETER(hw, msg, size, mbx_id);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_mbx - Reads a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns SUCCESS if it successfully read message from buffer
+ **/
+s32 e1000_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ DEBUGFUNC("e1000_read_mbx");
+
+ /* limit read to size of mailbox */
+ if (size > mbx->size)
+ size = mbx->size;
+
+ if (mbx->ops.read)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_mbx - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+s32 e1000_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_write_mbx");
+
+ if (size > mbx->size)
+ ret_val = -E1000_ERR_MBX;
+
+ else if (mbx->ops.write)
+ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_msg - checks to see if someone sent us mail
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 e1000_check_for_msg(struct e1000_hw *hw, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ DEBUGFUNC("e1000_check_for_msg");
+
+ if (mbx->ops.check_for_msg)
+ ret_val = mbx->ops.check_for_msg(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_ack - checks to see if someone sent us ACK
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 e1000_check_for_ack(struct e1000_hw *hw, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ DEBUGFUNC("e1000_check_for_ack");
+
+ if (mbx->ops.check_for_ack)
+ ret_val = mbx->ops.check_for_ack(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_rst - checks to see if other side has reset
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 e1000_check_for_rst(struct e1000_hw *hw, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ DEBUGFUNC("e1000_check_for_rst");
+
+ if (mbx->ops.check_for_rst)
+ ret_val = mbx->ops.check_for_rst(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * e1000_poll_for_msg - Wait for message notification
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification
+ **/
+STATIC s32 e1000_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ DEBUGFUNC("e1000_poll_for_msg");
+
+ if (!countdown || !mbx->ops.check_for_msg)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+ break;
+ usec_delay(mbx->usec_delay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+out:
+ return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
+}
+
+/**
+ * e1000_poll_for_ack - Wait for message acknowledgement
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message acknowledgement
+ **/
+STATIC s32 e1000_poll_for_ack(struct e1000_hw *hw, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ DEBUGFUNC("e1000_poll_for_ack");
+
+ if (!countdown || !mbx->ops.check_for_ack)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+ break;
+ usec_delay(mbx->usec_delay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+out:
+ return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
+}
+
+/**
+ * e1000_read_posted_mbx - Wait for message notification and receive message
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification and
+ * copied it into the receive buffer.
+ **/
+s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ DEBUGFUNC("e1000_read_posted_mbx");
+
+ if (!mbx->ops.read)
+ goto out;
+
+ ret_val = e1000_poll_for_msg(hw, mbx_id);
+
+ /* if ack received read message, otherwise we timed out */
+ if (!ret_val)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout period
+ **/
+s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ DEBUGFUNC("e1000_write_posted_mbx");
+
+ /* exit if either we can't write or there isn't a defined timeout */
+ if (!mbx->ops.write || !mbx->timeout)
+ goto out;
+
+ /* send msg */
+ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+ /* if msg sent wait until we receive an ack */
+ if (!ret_val)
+ ret_val = e1000_poll_for_ack(hw, mbx_id);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_mbx_ops_generic - Initialize mbx function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Sets the function pointers to no-op functions
+ **/
+void e1000_init_mbx_ops_generic(struct e1000_hw *hw)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ mbx->ops.init_params = e1000_null_ops_generic;
+ mbx->ops.read = e1000_null_mbx_transact;
+ mbx->ops.write = e1000_null_mbx_transact;
+ mbx->ops.check_for_msg = e1000_null_mbx_check_for_flag;
+ mbx->ops.check_for_ack = e1000_null_mbx_check_for_flag;
+ mbx->ops.check_for_rst = e1000_null_mbx_check_for_flag;
+ mbx->ops.read_posted = e1000_read_posted_mbx;
+ mbx->ops.write_posted = e1000_write_posted_mbx;
+}
+
+/**
+ * e1000_read_v2p_mailbox - read v2p mailbox
+ * @hw: pointer to the HW structure
+ *
+ * This function is used to read the v2p mailbox without losing the read to
+ * clear status bits.
+ **/
+STATIC u32 e1000_read_v2p_mailbox(struct e1000_hw *hw)
+{
+ u32 v2p_mailbox = E1000_READ_REG(hw, E1000_V2PMAILBOX(0));
+
+ v2p_mailbox |= hw->dev_spec.vf.v2p_mailbox;
+ hw->dev_spec.vf.v2p_mailbox |= v2p_mailbox & E1000_V2PMAILBOX_R2C_BITS;
+
+ return v2p_mailbox;
+}
+
+/**
+ * e1000_check_for_bit_vf - Determine if a status bit was set
+ * @hw: pointer to the HW structure
+ * @mask: bitmask for bits to be tested and cleared
+ *
+ * This function is used to check for the read to clear bits within
+ * the V2P mailbox.
+ **/
+STATIC s32 e1000_check_for_bit_vf(struct e1000_hw *hw, u32 mask)
+{
+ u32 v2p_mailbox = e1000_read_v2p_mailbox(hw);
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (v2p_mailbox & mask)
+ ret_val = E1000_SUCCESS;
+
+ hw->dev_spec.vf.v2p_mailbox &= ~mask;
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_msg_vf - checks to see if the PF has sent mail
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the PF has set the Status bit or else ERR_MBX
+ **/
+STATIC s32 e1000_check_for_msg_vf(struct e1000_hw *hw,
+ u16 E1000_UNUSEDARG mbx_id)
+{
+ s32 ret_val = -E1000_ERR_MBX;
+
+ UNREFERENCED_1PARAMETER(mbx_id);
+ DEBUGFUNC("e1000_check_for_msg_vf");
+
+ if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFSTS)) {
+ ret_val = E1000_SUCCESS;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_ack_vf - checks to see if the PF has ACK'd
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX
+ **/
+STATIC s32 e1000_check_for_ack_vf(struct e1000_hw *hw,
+ u16 E1000_UNUSEDARG mbx_id)
+{
+ s32 ret_val = -E1000_ERR_MBX;
+
+ UNREFERENCED_1PARAMETER(mbx_id);
+ DEBUGFUNC("e1000_check_for_ack_vf");
+
+ if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFACK)) {
+ ret_val = E1000_SUCCESS;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_rst_vf - checks to see if the PF has reset
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns true if the PF has set the reset done bit or else false
+ **/
+STATIC s32 e1000_check_for_rst_vf(struct e1000_hw *hw,
+ u16 E1000_UNUSEDARG mbx_id)
+{
+ s32 ret_val = -E1000_ERR_MBX;
+
+ UNREFERENCED_1PARAMETER(mbx_id);
+ DEBUGFUNC("e1000_check_for_rst_vf");
+
+ if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD |
+ E1000_V2PMAILBOX_RSTI))) {
+ ret_val = E1000_SUCCESS;
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_obtain_mbx_lock_vf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ *
+ * return SUCCESS if we obtained the mailbox lock
+ **/
+STATIC s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw)
+{
+ s32 ret_val = -E1000_ERR_MBX;
+
+ DEBUGFUNC("e1000_obtain_mbx_lock_vf");
+
+ /* Take ownership of the buffer */
+ E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_VFU);
+
+ /* reserve mailbox for vf use */
+ if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU)
+ ret_val = E1000_SUCCESS;
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_mbx_vf - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+STATIC s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size,
+ u16 E1000_UNUSEDARG mbx_id)
+{
+ s32 ret_val;
+ u16 i;
+
+ UNREFERENCED_1PARAMETER(mbx_id);
+
+ DEBUGFUNC("e1000_write_mbx_vf");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = e1000_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ e1000_check_for_msg_vf(hw, 0);
+ e1000_check_for_ack_vf(hw, 0);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(0), i, msg[i]);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+ /* Drop VFU and interrupt the PF to tell it a message has been sent */
+ E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_REQ);
+
+out_no_write:
+ return ret_val;
+}
+
+/**
+ * e1000_read_mbx_vf - Reads a message from the inbox intended for vf
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns SUCCESS if it successfully read message from buffer
+ **/
+STATIC s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size,
+ u16 E1000_UNUSEDARG mbx_id)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 i;
+
+ DEBUGFUNC("e1000_read_mbx_vf");
+ UNREFERENCED_1PARAMETER(mbx_id);
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = e1000_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message from the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(0), i);
+
+ /* Acknowledge receipt and release mailbox, then we're done */
+ E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return ret_val;
+}
+
+/**
+ * e1000_init_mbx_params_vf - set initial values for vf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for vf mailbox
+ */
+s32 e1000_init_mbx_params_vf(struct e1000_hw *hw)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+
+ /* start mailbox as timed out and let the reset_hw call set the timeout
+ * value to begin communications */
+ mbx->timeout = 0;
+ mbx->usec_delay = E1000_VF_MBX_INIT_DELAY;
+
+ mbx->size = E1000_VFMAILBOX_SIZE;
+
+ mbx->ops.read = e1000_read_mbx_vf;
+ mbx->ops.write = e1000_write_mbx_vf;
+ mbx->ops.read_posted = e1000_read_posted_mbx;
+ mbx->ops.write_posted = e1000_write_posted_mbx;
+ mbx->ops.check_for_msg = e1000_check_for_msg_vf;
+ mbx->ops.check_for_ack = e1000_check_for_ack_vf;
+ mbx->ops.check_for_rst = e1000_check_for_rst_vf;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+
+ return E1000_SUCCESS;
+}
+
+STATIC s32 e1000_check_for_bit_pf(struct e1000_hw *hw, u32 mask)
+{
+ u32 mbvficr = E1000_READ_REG(hw, E1000_MBVFICR);
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (mbvficr & mask) {
+ ret_val = E1000_SUCCESS;
+ E1000_WRITE_REG(hw, E1000_MBVFICR, mask);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_msg_pf - checks to see if the VF has sent mail
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+STATIC s32 e1000_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number)
+{
+ s32 ret_val = -E1000_ERR_MBX;
+
+ DEBUGFUNC("e1000_check_for_msg_pf");
+
+ if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) {
+ ret_val = E1000_SUCCESS;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_ack_pf - checks to see if the VF has ACKed
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+STATIC s32 e1000_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number)
+{
+ s32 ret_val = -E1000_ERR_MBX;
+
+ DEBUGFUNC("e1000_check_for_ack_pf");
+
+ if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) {
+ ret_val = E1000_SUCCESS;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_rst_pf - checks to see if the VF has reset
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+STATIC s32 e1000_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
+{
+ u32 vflre = E1000_READ_REG(hw, E1000_VFLRE);
+ s32 ret_val = -E1000_ERR_MBX;
+
+ DEBUGFUNC("e1000_check_for_rst_pf");
+
+ if (vflre & (1 << vf_number)) {
+ ret_val = E1000_SUCCESS;
+ E1000_WRITE_REG(hw, E1000_VFLRE, (1 << vf_number));
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_obtain_mbx_lock_pf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * return SUCCESS if we obtained the mailbox lock
+ **/
+STATIC s32 e1000_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
+{
+ s32 ret_val = -E1000_ERR_MBX;
+ u32 p2v_mailbox;
+
+ DEBUGFUNC("e1000_obtain_mbx_lock_pf");
+
+ /* Take ownership of the buffer */
+ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
+
+ /* reserve mailbox for vf use */
+ p2v_mailbox = E1000_READ_REG(hw, E1000_P2VMAILBOX(vf_number));
+ if (p2v_mailbox & E1000_P2VMAILBOX_PFU)
+ ret_val = E1000_SUCCESS;
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_mbx_pf - Places a message in the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+STATIC s32 e1000_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
+ u16 vf_number)
+{
+ s32 ret_val;
+ u16 i;
+
+ DEBUGFUNC("e1000_write_mbx_pf");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ e1000_check_for_msg_pf(hw, vf_number);
+ e1000_check_for_ack_pf(hw, vf_number);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i, msg[i]);
+
+ /* Interrupt VF to tell it a message has been sent and release buffer*/
+ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+out_no_write:
+ return ret_val;
+
+}
+
+/**
+ * e1000_read_mbx_pf - Read a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * This function copies a message from the mailbox buffer to the caller's
+ * memory buffer. The presumption is that the caller knows that there was
+ * a message due to a VF request so no polling for message is needed.
+ **/
+STATIC s32 e1000_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
+ u16 vf_number)
+{
+ s32 ret_val;
+ u16 i;
+
+ DEBUGFUNC("e1000_read_mbx_pf");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i);
+
+ /* Acknowledge the message and release buffer */
+ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return ret_val;
+}
+
+/**
+ * e1000_init_mbx_params_pf - set initial values for pf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for pf mailbox
+ */
+s32 e1000_init_mbx_params_pf(struct e1000_hw *hw)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+
+ switch (hw->mac.type) {
+ case e1000_82576:
+ case e1000_i350:
+ case e1000_i354:
+ mbx->timeout = 0;
+ mbx->usec_delay = 0;
+
+ mbx->size = E1000_VFMAILBOX_SIZE;
+
+ mbx->ops.read = e1000_read_mbx_pf;
+ mbx->ops.write = e1000_write_mbx_pf;
+ mbx->ops.read_posted = e1000_read_posted_mbx;
+ mbx->ops.write_posted = e1000_write_posted_mbx;
+ mbx->ops.check_for_msg = e1000_check_for_msg_pf;
+ mbx->ops.check_for_ack = e1000_check_for_ack_pf;
+ mbx->ops.check_for_rst = e1000_check_for_rst_pf;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+ default:
+ return E1000_SUCCESS;
+ }
+}
+
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_mbx.h b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_mbx.h
new file mode 100755
index 00000000..e9524fc8
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_mbx.h
@@ -0,0 +1,105 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_MBX_H_
+#define _E1000_MBX_H_
+
+#include "e1000_api.h"
+
+/* Define mailbox register bits */
+#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
+#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */
+#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
+#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
+#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */
+#define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
+#define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
+
+#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */
+#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
+#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
+
+#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */
+#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
+#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */
+#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
+
+#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+
+/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
+ * PF. The reverse is true if it is E1000_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+/* Msgs below or'd with this are the ACK */
+#define E1000_VT_MSGTYPE_ACK 0x80000000
+/* Msgs below or'd with this are the NACK */
+#define E1000_VT_MSGTYPE_NACK 0x40000000
+/* Indicates that VF is still clear to send requests */
+#define E1000_VT_MSGTYPE_CTS 0x20000000
+#define E1000_VT_MSGINFO_SHIFT 16
+/* bits 23:16 are used for extra info for certain messages */
+#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
+
+#define E1000_VF_RESET 0x01 /* VF requests reset */
+#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */
+#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */
+#define E1000_VF_SET_MULTICAST_COUNT_MASK (0x1F << E1000_VT_MSGINFO_SHIFT)
+#define E1000_VF_SET_MULTICAST_OVERFLOW (0x80 << E1000_VT_MSGINFO_SHIFT)
+#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */
+#define E1000_VF_SET_VLAN_ADD (0x01 << E1000_VT_MSGINFO_SHIFT)
+#define E1000_VF_SET_LPE 0x05 /* reqs to set VMOLR.LPE */
+#define E1000_VF_SET_PROMISC 0x06 /* reqs to clear VMOLR.ROPE/MPME*/
+#define E1000_VF_SET_PROMISC_UNICAST (0x01 << E1000_VT_MSGINFO_SHIFT)
+#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT)
+
+#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
+
+#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define E1000_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+
+s32 e1000_read_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 e1000_write_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 e1000_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 e1000_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 e1000_check_for_msg(struct e1000_hw *, u16);
+s32 e1000_check_for_ack(struct e1000_hw *, u16);
+s32 e1000_check_for_rst(struct e1000_hw *, u16);
+void e1000_init_mbx_ops_generic(struct e1000_hw *hw);
+s32 e1000_init_mbx_params_vf(struct e1000_hw *);
+s32 e1000_init_mbx_params_pf(struct e1000_hw *);
+
+#endif /* _E1000_MBX_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_nvm.c b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_nvm.c
new file mode 100755
index 00000000..8be437a8
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_nvm.c
@@ -0,0 +1,1377 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "e1000_api.h"
+
+STATIC void e1000_reload_nvm_generic(struct e1000_hw *hw);
+
+/**
+ * e1000_init_nvm_ops_generic - Initialize NVM function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setups up the function pointers to no-op functions
+ **/
+void e1000_init_nvm_ops_generic(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ DEBUGFUNC("e1000_init_nvm_ops_generic");
+
+ /* Initialize function pointers */
+ nvm->ops.init_params = e1000_null_ops_generic;
+ nvm->ops.acquire = e1000_null_ops_generic;
+ nvm->ops.read = e1000_null_read_nvm;
+ nvm->ops.release = e1000_null_nvm_generic;
+ nvm->ops.reload = e1000_reload_nvm_generic;
+ nvm->ops.update = e1000_null_ops_generic;
+ nvm->ops.valid_led_default = e1000_null_led_default;
+ nvm->ops.validate = e1000_null_ops_generic;
+ nvm->ops.write = e1000_null_write_nvm;
+}
+
+/**
+ * e1000_null_nvm_read - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_read_nvm(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b,
+ u16 E1000_UNUSEDARG *c)
+{
+ DEBUGFUNC("e1000_null_read_nvm");
+ UNREFERENCED_4PARAMETER(hw, a, b, c);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_nvm_generic - No-op function, return void
+ * @hw: pointer to the HW structure
+ **/
+void e1000_null_nvm_generic(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_null_nvm_generic");
+ UNREFERENCED_1PARAMETER(hw);
+ return;
+}
+
+/**
+ * e1000_null_led_default - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_led_default(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 E1000_UNUSEDARG *data)
+{
+ DEBUGFUNC("e1000_null_led_default");
+ UNREFERENCED_2PARAMETER(hw, data);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_write_nvm - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_write_nvm(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b,
+ u16 E1000_UNUSEDARG *c)
+{
+ DEBUGFUNC("e1000_null_write_nvm");
+ UNREFERENCED_4PARAMETER(hw, a, b, c);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_raise_eec_clk - Raise EEPROM clock
+ * @hw: pointer to the HW structure
+ * @eecd: pointer to the EEPROM
+ *
+ * Enable/Raise the EEPROM clock bit.
+ **/
+STATIC void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+ *eecd = *eecd | E1000_EECD_SK;
+ E1000_WRITE_REG(hw, E1000_EECD, *eecd);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(hw->nvm.delay_usec);
+}
+
+/**
+ * e1000_lower_eec_clk - Lower EEPROM clock
+ * @hw: pointer to the HW structure
+ * @eecd: pointer to the EEPROM
+ *
+ * Clear/Lower the EEPROM clock bit.
+ **/
+STATIC void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+ *eecd = *eecd & ~E1000_EECD_SK;
+ E1000_WRITE_REG(hw, E1000_EECD, *eecd);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(hw->nvm.delay_usec);
+}
+
+/**
+ * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
+ * @hw: pointer to the HW structure
+ * @data: data to send to the EEPROM
+ * @count: number of bits to shift out
+ *
+ * We need to shift 'count' bits out to the EEPROM. So, the value in the
+ * "data" parameter will be shifted out to the EEPROM one bit at a time.
+ * In order to do this, "data" must be broken down into bits.
+ **/
+STATIC void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+ u32 mask;
+
+ DEBUGFUNC("e1000_shift_out_eec_bits");
+
+ mask = 0x01 << (count - 1);
+ if (nvm->type == e1000_nvm_eeprom_microwire)
+ eecd &= ~E1000_EECD_DO;
+ else
+ if (nvm->type == e1000_nvm_eeprom_spi)
+ eecd |= E1000_EECD_DO;
+
+ do {
+ eecd &= ~E1000_EECD_DI;
+
+ if (data & mask)
+ eecd |= E1000_EECD_DI;
+
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ E1000_WRITE_FLUSH(hw);
+
+ usec_delay(nvm->delay_usec);
+
+ e1000_raise_eec_clk(hw, &eecd);
+ e1000_lower_eec_clk(hw, &eecd);
+
+ mask >>= 1;
+ } while (mask);
+
+ eecd &= ~E1000_EECD_DI;
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+}
+
+/**
+ * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
+ * @hw: pointer to the HW structure
+ * @count: number of bits to shift in
+ *
+ * In order to read a register from the EEPROM, we need to shift 'count' bits
+ * in from the EEPROM. Bits are "shifted in" by raising the clock input to
+ * the EEPROM (setting the SK bit), and then reading the value of the data out
+ * "DO" bit. During this "shifting in" process the data in "DI" bit should
+ * always be clear.
+ **/
+STATIC u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
+{
+ u32 eecd;
+ u32 i;
+ u16 data;
+
+ DEBUGFUNC("e1000_shift_in_eec_bits");
+
+ eecd = E1000_READ_REG(hw, E1000_EECD);
+
+ eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
+ data = 0;
+
+ for (i = 0; i < count; i++) {
+ data <<= 1;
+ e1000_raise_eec_clk(hw, &eecd);
+
+ eecd = E1000_READ_REG(hw, E1000_EECD);
+
+ eecd &= ~E1000_EECD_DI;
+ if (eecd & E1000_EECD_DO)
+ data |= 1;
+
+ e1000_lower_eec_clk(hw, &eecd);
+ }
+
+ return data;
+}
+
+/**
+ * e1000_poll_eerd_eewr_done - Poll for EEPROM read/write completion
+ * @hw: pointer to the HW structure
+ * @ee_reg: EEPROM flag for polling
+ *
+ * Polls the EEPROM status bit for either read or write completion based
+ * upon the value of 'ee_reg'.
+ **/
+s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
+{
+ u32 attempts = 100000;
+ u32 i, reg = 0;
+
+ DEBUGFUNC("e1000_poll_eerd_eewr_done");
+
+ for (i = 0; i < attempts; i++) {
+ if (ee_reg == E1000_NVM_POLL_READ)
+ reg = E1000_READ_REG(hw, E1000_EERD);
+ else
+ reg = E1000_READ_REG(hw, E1000_EEWR);
+
+ if (reg & E1000_NVM_RW_REG_DONE)
+ return E1000_SUCCESS;
+
+ usec_delay(5);
+ }
+
+ return -E1000_ERR_NVM;
+}
+
+/**
+ * e1000_acquire_nvm_generic - Generic request for access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ * Return successful if access grant bit set, else clear the request for
+ * EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+s32 e1000_acquire_nvm_generic(struct e1000_hw *hw)
+{
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+ s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
+
+ DEBUGFUNC("e1000_acquire_nvm_generic");
+
+ E1000_WRITE_REG(hw, E1000_EECD, eecd | E1000_EECD_REQ);
+ eecd = E1000_READ_REG(hw, E1000_EECD);
+
+ while (timeout) {
+ if (eecd & E1000_EECD_GNT)
+ break;
+ usec_delay(5);
+ eecd = E1000_READ_REG(hw, E1000_EECD);
+ timeout--;
+ }
+
+ if (!timeout) {
+ eecd &= ~E1000_EECD_REQ;
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ DEBUGOUT("Could not acquire NVM grant\n");
+ return -E1000_ERR_NVM;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_standby_nvm - Return EEPROM to standby state
+ * @hw: pointer to the HW structure
+ *
+ * Return the EEPROM to a standby state.
+ **/
+STATIC void e1000_standby_nvm(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+
+ DEBUGFUNC("e1000_standby_nvm");
+
+ if (nvm->type == e1000_nvm_eeprom_microwire) {
+ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(nvm->delay_usec);
+
+ e1000_raise_eec_clk(hw, &eecd);
+
+ /* Select EEPROM */
+ eecd |= E1000_EECD_CS;
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(nvm->delay_usec);
+
+ e1000_lower_eec_clk(hw, &eecd);
+ } else if (nvm->type == e1000_nvm_eeprom_spi) {
+ /* Toggle CS to flush commands */
+ eecd |= E1000_EECD_CS;
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(nvm->delay_usec);
+ eecd &= ~E1000_EECD_CS;
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(nvm->delay_usec);
+ }
+}
+
+/**
+ * e1000_stop_nvm - Terminate EEPROM command
+ * @hw: pointer to the HW structure
+ *
+ * Terminates the current command by inverting the EEPROM's chip select pin.
+ **/
+void e1000_stop_nvm(struct e1000_hw *hw)
+{
+ u32 eecd;
+
+ DEBUGFUNC("e1000_stop_nvm");
+
+ eecd = E1000_READ_REG(hw, E1000_EECD);
+ if (hw->nvm.type == e1000_nvm_eeprom_spi) {
+ /* Pull CS high */
+ eecd |= E1000_EECD_CS;
+ e1000_lower_eec_clk(hw, &eecd);
+ } else if (hw->nvm.type == e1000_nvm_eeprom_microwire) {
+ /* CS on Microwire is active-high */
+ eecd &= ~(E1000_EECD_CS | E1000_EECD_DI);
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ e1000_raise_eec_clk(hw, &eecd);
+ e1000_lower_eec_clk(hw, &eecd);
+ }
+}
+
+/**
+ * e1000_release_nvm_generic - Release exclusive access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Stop any current commands to the EEPROM and clear the EEPROM request bit.
+ **/
+void e1000_release_nvm_generic(struct e1000_hw *hw)
+{
+ u32 eecd;
+
+ DEBUGFUNC("e1000_release_nvm_generic");
+
+ e1000_stop_nvm(hw);
+
+ eecd = E1000_READ_REG(hw, E1000_EECD);
+ eecd &= ~E1000_EECD_REQ;
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+}
+
+/**
+ * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
+ * @hw: pointer to the HW structure
+ *
+ * Setups the EEPROM for reading and writing.
+ **/
+STATIC s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+ u8 spi_stat_reg;
+
+ DEBUGFUNC("e1000_ready_nvm_eeprom");
+
+ if (nvm->type == e1000_nvm_eeprom_microwire) {
+ /* Clear SK and DI */
+ eecd &= ~(E1000_EECD_DI | E1000_EECD_SK);
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ /* Set CS */
+ eecd |= E1000_EECD_CS;
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ } else if (nvm->type == e1000_nvm_eeprom_spi) {
+ u16 timeout = NVM_MAX_RETRY_SPI;
+
+ /* Clear SK and CS */
+ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(1);
+
+ /* Read "Status Register" repeatedly until the LSB is cleared.
+ * The EEPROM will signal that the command has been completed
+ * by clearing bit 0 of the internal status register. If it's
+ * not cleared within 'timeout', then error out.
+ */
+ while (timeout) {
+ e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
+ hw->nvm.opcode_bits);
+ spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
+ if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
+ break;
+
+ usec_delay(5);
+ e1000_standby_nvm(hw);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("SPI NVM Status error\n");
+ return -E1000_ERR_NVM;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_nvm_spi - Read EEPROM's using SPI
+ * @hw: pointer to the HW structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM.
+ **/
+s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 i = 0;
+ s32 ret_val;
+ u16 word_in;
+ u8 read_opcode = NVM_READ_OPCODE_SPI;
+
+ DEBUGFUNC("e1000_read_nvm_spi");
+
+ /* A check for invalid values: offset too large, too many words,
+ * and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ return -E1000_ERR_NVM;
+ }
+
+ ret_val = nvm->ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_ready_nvm_eeprom(hw);
+ if (ret_val)
+ goto release;
+
+ e1000_standby_nvm(hw);
+
+ if ((nvm->address_bits == 8) && (offset >= 128))
+ read_opcode |= NVM_A8_OPCODE_SPI;
+
+ /* Send the READ command (opcode + addr) */
+ e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
+ e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
+
+ /* Read the data. SPI NVMs increment the address with each byte
+ * read and will roll over if reading beyond the end. This allows
+ * us to read the whole NVM from any offset
+ */
+ for (i = 0; i < words; i++) {
+ word_in = e1000_shift_in_eec_bits(hw, 16);
+ data[i] = (word_in >> 8) | (word_in << 8);
+ }
+
+release:
+ nvm->ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_nvm_microwire - Reads EEPROM's using microwire
+ * @hw: pointer to the HW structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM.
+ **/
+s32 e1000_read_nvm_microwire(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 i = 0;
+ s32 ret_val;
+ u8 read_opcode = NVM_READ_OPCODE_MICROWIRE;
+
+ DEBUGFUNC("e1000_read_nvm_microwire");
+
+ /* A check for invalid values: offset too large, too many words,
+ * and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ return -E1000_ERR_NVM;
+ }
+
+ ret_val = nvm->ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_ready_nvm_eeprom(hw);
+ if (ret_val)
+ goto release;
+
+ for (i = 0; i < words; i++) {
+ /* Send the READ command (opcode + addr) */
+ e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
+ e1000_shift_out_eec_bits(hw, (u16)(offset + i),
+ nvm->address_bits);
+
+ /* Read the data. For microwire, each word requires the
+ * overhead of setup and tear-down.
+ */
+ data[i] = e1000_shift_in_eec_bits(hw, 16);
+ e1000_standby_nvm(hw);
+ }
+
+release:
+ nvm->ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_nvm_eerd - Reads EEPROM using EERD register
+ * @hw: pointer to the HW structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 i, eerd = 0;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_read_nvm_eerd");
+
+ /* A check for invalid values: offset too large, too many words,
+ * too many words for the offset, and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ return -E1000_ERR_NVM;
+ }
+
+ for (i = 0; i < words; i++) {
+ eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
+ E1000_NVM_RW_REG_START;
+
+ E1000_WRITE_REG(hw, E1000_EERD, eerd);
+ ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
+ if (ret_val)
+ break;
+
+ data[i] = (E1000_READ_REG(hw, E1000_EERD) >>
+ E1000_NVM_RW_REG_DATA);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_nvm_spi - Write to EEPROM using SPI
+ * @hw: pointer to the HW structure
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the EEPROM
+ *
+ * Writes data to EEPROM at offset using SPI interface.
+ *
+ * If e1000_update_nvm_checksum is not called after this function , the
+ * EEPROM will most likely contain an invalid checksum.
+ **/
+s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ s32 ret_val = -E1000_ERR_NVM;
+ u16 widx = 0;
+
+ DEBUGFUNC("e1000_write_nvm_spi");
+
+ /* A check for invalid values: offset too large, too many words,
+ * and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ return -E1000_ERR_NVM;
+ }
+
+ while (widx < words) {
+ u8 write_opcode = NVM_WRITE_OPCODE_SPI;
+
+ ret_val = nvm->ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_ready_nvm_eeprom(hw);
+ if (ret_val) {
+ nvm->ops.release(hw);
+ return ret_val;
+ }
+
+ e1000_standby_nvm(hw);
+
+ /* Send the WRITE ENABLE command (8 bit opcode) */
+ e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
+ nvm->opcode_bits);
+
+ e1000_standby_nvm(hw);
+
+ /* Some SPI eeproms use the 8th address bit embedded in the
+ * opcode
+ */
+ if ((nvm->address_bits == 8) && (offset >= 128))
+ write_opcode |= NVM_A8_OPCODE_SPI;
+
+ /* Send the Write command (8-bit opcode + addr) */
+ e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
+ e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
+ nvm->address_bits);
+
+ /* Loop to allow for up to whole page write of eeprom */
+ while (widx < words) {
+ u16 word_out = data[widx];
+ word_out = (word_out >> 8) | (word_out << 8);
+ e1000_shift_out_eec_bits(hw, word_out, 16);
+ widx++;
+
+ if ((((offset + widx) * 2) % nvm->page_size) == 0) {
+ e1000_standby_nvm(hw);
+ break;
+ }
+ }
+ msec_delay(10);
+ nvm->ops.release(hw);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_nvm_microwire - Writes EEPROM using microwire
+ * @hw: pointer to the HW structure
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the EEPROM
+ *
+ * Writes data to EEPROM at offset using microwire interface.
+ *
+ * If e1000_update_nvm_checksum is not called after this function , the
+ * EEPROM will most likely contain an invalid checksum.
+ **/
+s32 e1000_write_nvm_microwire(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ s32 ret_val;
+ u32 eecd;
+ u16 words_written = 0;
+ u16 widx = 0;
+
+ DEBUGFUNC("e1000_write_nvm_microwire");
+
+ /* A check for invalid values: offset too large, too many words,
+ * and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ return -E1000_ERR_NVM;
+ }
+
+ ret_val = nvm->ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_ready_nvm_eeprom(hw);
+ if (ret_val)
+ goto release;
+
+ e1000_shift_out_eec_bits(hw, NVM_EWEN_OPCODE_MICROWIRE,
+ (u16)(nvm->opcode_bits + 2));
+
+ e1000_shift_out_eec_bits(hw, 0, (u16)(nvm->address_bits - 2));
+
+ e1000_standby_nvm(hw);
+
+ while (words_written < words) {
+ e1000_shift_out_eec_bits(hw, NVM_WRITE_OPCODE_MICROWIRE,
+ nvm->opcode_bits);
+
+ e1000_shift_out_eec_bits(hw, (u16)(offset + words_written),
+ nvm->address_bits);
+
+ e1000_shift_out_eec_bits(hw, data[words_written], 16);
+
+ e1000_standby_nvm(hw);
+
+ for (widx = 0; widx < 200; widx++) {
+ eecd = E1000_READ_REG(hw, E1000_EECD);
+ if (eecd & E1000_EECD_DO)
+ break;
+ usec_delay(50);
+ }
+
+ if (widx == 200) {
+ DEBUGOUT("NVM Write did not complete\n");
+ ret_val = -E1000_ERR_NVM;
+ goto release;
+ }
+
+ e1000_standby_nvm(hw);
+
+ words_written++;
+ }
+
+ e1000_shift_out_eec_bits(hw, NVM_EWDS_OPCODE_MICROWIRE,
+ (u16)(nvm->opcode_bits + 2));
+
+ e1000_shift_out_eec_bits(hw, 0, (u16)(nvm->address_bits - 2));
+
+release:
+ nvm->ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_pba_string_generic - Read device part number
+ * @hw: pointer to the HW structure
+ * @pba_num: pointer to device part number
+ * @pba_num_size: size of part number buffer
+ *
+ * Reads the product board assembly (PBA) number from the EEPROM and stores
+ * the value in pba_num.
+ **/
+s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ s32 ret_val;
+ u16 nvm_data;
+ u16 pba_ptr;
+ u16 offset;
+ u16 length;
+
+ DEBUGFUNC("e1000_read_pba_string_generic");
+
+ if ((hw->mac.type >= e1000_i210) &&
+ !e1000_get_flash_presence_i210(hw)) {
+ DEBUGOUT("Flashless no PBA string\n");
+ return -E1000_ERR_NVM_PBA_SECTION;
+ }
+
+ if (pba_num == NULL) {
+ DEBUGOUT("PBA string buffer was null\n");
+ return -E1000_ERR_INVALID_ARGUMENT;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ /* if nvm_data is not ptr guard the PBA must be in legacy format which
+ * means pba_ptr is actually our second data word for the PBA number
+ * and we can decode it into an ascii string
+ */
+ if (nvm_data != NVM_PBA_PTR_GUARD) {
+ DEBUGOUT("NVM PBA number is not stored as string\n");
+
+ /* make sure callers buffer is big enough to store the PBA */
+ if (pba_num_size < E1000_PBANUM_LENGTH) {
+ DEBUGOUT("PBA string buffer too small\n");
+ return E1000_ERR_NO_SPACE;
+ }
+
+ /* extract hex string from data and pba_ptr */
+ pba_num[0] = (nvm_data >> 12) & 0xF;
+ pba_num[1] = (nvm_data >> 8) & 0xF;
+ pba_num[2] = (nvm_data >> 4) & 0xF;
+ pba_num[3] = nvm_data & 0xF;
+ pba_num[4] = (pba_ptr >> 12) & 0xF;
+ pba_num[5] = (pba_ptr >> 8) & 0xF;
+ pba_num[6] = '-';
+ pba_num[7] = 0;
+ pba_num[8] = (pba_ptr >> 4) & 0xF;
+ pba_num[9] = pba_ptr & 0xF;
+
+ /* put a null character on the end of our string */
+ pba_num[10] = '\0';
+
+ /* switch all the data but the '-' to hex char */
+ for (offset = 0; offset < 10; offset++) {
+ if (pba_num[offset] < 0xA)
+ pba_num[offset] += '0';
+ else if (pba_num[offset] < 0x10)
+ pba_num[offset] += 'A' - 0xA;
+ }
+
+ return E1000_SUCCESS;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (length == 0xFFFF || length == 0) {
+ DEBUGOUT("NVM PBA number section invalid length\n");
+ return -E1000_ERR_NVM_PBA_SECTION;
+ }
+ /* check if pba_num buffer is big enough */
+ if (pba_num_size < (((u32)length * 2) - 1)) {
+ DEBUGOUT("PBA string buffer too small\n");
+ return -E1000_ERR_NO_SPACE;
+ }
+
+ /* trim pba length from start of string */
+ pba_ptr++;
+ length--;
+
+ for (offset = 0; offset < length; offset++) {
+ ret_val = hw->nvm.ops.read(hw, pba_ptr + offset, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+ pba_num[offset * 2] = (u8)(nvm_data >> 8);
+ pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
+ }
+ pba_num[offset * 2] = '\0';
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_pba_length_generic - Read device part number length
+ * @hw: pointer to the HW structure
+ * @pba_num_size: size of part number buffer
+ *
+ * Reads the product board assembly (PBA) number length from the EEPROM and
+ * stores the value in pba_num_size.
+ **/
+s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size)
+{
+ s32 ret_val;
+ u16 nvm_data;
+ u16 pba_ptr;
+ u16 length;
+
+ DEBUGFUNC("e1000_read_pba_length_generic");
+
+ if (pba_num_size == NULL) {
+ DEBUGOUT("PBA buffer size was null\n");
+ return -E1000_ERR_INVALID_ARGUMENT;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ /* if data is not ptr guard the PBA must be in legacy format */
+ if (nvm_data != NVM_PBA_PTR_GUARD) {
+ *pba_num_size = E1000_PBANUM_LENGTH;
+ return E1000_SUCCESS;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (length == 0xFFFF || length == 0) {
+ DEBUGOUT("NVM PBA number section invalid length\n");
+ return -E1000_ERR_NVM_PBA_SECTION;
+ }
+
+ /* Convert from length in u16 values to u8 chars, add 1 for NULL,
+ * and subtract 2 because length field is included in length.
+ */
+ *pba_num_size = ((u32)length * 2) - 1;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_pba_num_generic - Read device part number
+ * @hw: pointer to the HW structure
+ * @pba_num: pointer to device part number
+ *
+ * Reads the product board assembly (PBA) number from the EEPROM and stores
+ * the value in pba_num.
+ **/
+s32 e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num)
+{
+ s32 ret_val;
+ u16 nvm_data;
+
+ DEBUGFUNC("e1000_read_pba_num_generic");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ } else if (nvm_data == NVM_PBA_PTR_GUARD) {
+ DEBUGOUT("NVM Not Supported\n");
+ return -E1000_NOT_IMPLEMENTED;
+ }
+ *pba_num = (u32)(nvm_data << 16);
+
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+ *pba_num |= nvm_data;
+
+ return E1000_SUCCESS;
+}
+
+
+/**
+ * e1000_read_pba_raw
+ * @hw: pointer to the HW structure
+ * @eeprom_buf: optional pointer to EEPROM image
+ * @eeprom_buf_size: size of EEPROM image in words
+ * @max_pba_block_size: PBA block size limit
+ * @pba: pointer to output PBA structure
+ *
+ * Reads PBA from EEPROM image when eeprom_buf is not NULL.
+ * Reads PBA from physical EEPROM device when eeprom_buf is NULL.
+ *
+ **/
+s32 e1000_read_pba_raw(struct e1000_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, u16 max_pba_block_size,
+ struct e1000_pba *pba)
+{
+ s32 ret_val;
+ u16 pba_block_size;
+
+ if (pba == NULL)
+ return -E1000_ERR_PARAM;
+
+ if (eeprom_buf == NULL) {
+ ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 2,
+ &pba->word[0]);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > NVM_PBA_OFFSET_1) {
+ pba->word[0] = eeprom_buf[NVM_PBA_OFFSET_0];
+ pba->word[1] = eeprom_buf[NVM_PBA_OFFSET_1];
+ } else {
+ return -E1000_ERR_PARAM;
+ }
+ }
+
+ if (pba->word[0] == NVM_PBA_PTR_GUARD) {
+ if (pba->pba_block == NULL)
+ return -E1000_ERR_PARAM;
+
+ ret_val = e1000_get_pba_block_size(hw, eeprom_buf,
+ eeprom_buf_size,
+ &pba_block_size);
+ if (ret_val)
+ return ret_val;
+
+ if (pba_block_size > max_pba_block_size)
+ return -E1000_ERR_PARAM;
+
+ if (eeprom_buf == NULL) {
+ ret_val = e1000_read_nvm(hw, pba->word[1],
+ pba_block_size,
+ pba->pba_block);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > (u32)(pba->word[1] +
+ pba_block_size)) {
+ memcpy(pba->pba_block,
+ &eeprom_buf[pba->word[1]],
+ pba_block_size * sizeof(u16));
+ } else {
+ return -E1000_ERR_PARAM;
+ }
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_pba_raw
+ * @hw: pointer to the HW structure
+ * @eeprom_buf: optional pointer to EEPROM image
+ * @eeprom_buf_size: size of EEPROM image in words
+ * @pba: pointer to PBA structure
+ *
+ * Writes PBA to EEPROM image when eeprom_buf is not NULL.
+ * Writes PBA to physical EEPROM device when eeprom_buf is NULL.
+ *
+ **/
+s32 e1000_write_pba_raw(struct e1000_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, struct e1000_pba *pba)
+{
+ s32 ret_val;
+
+ if (pba == NULL)
+ return -E1000_ERR_PARAM;
+
+ if (eeprom_buf == NULL) {
+ ret_val = e1000_write_nvm(hw, NVM_PBA_OFFSET_0, 2,
+ &pba->word[0]);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > NVM_PBA_OFFSET_1) {
+ eeprom_buf[NVM_PBA_OFFSET_0] = pba->word[0];
+ eeprom_buf[NVM_PBA_OFFSET_1] = pba->word[1];
+ } else {
+ return -E1000_ERR_PARAM;
+ }
+ }
+
+ if (pba->word[0] == NVM_PBA_PTR_GUARD) {
+ if (pba->pba_block == NULL)
+ return -E1000_ERR_PARAM;
+
+ if (eeprom_buf == NULL) {
+ ret_val = e1000_write_nvm(hw, pba->word[1],
+ pba->pba_block[0],
+ pba->pba_block);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > (u32)(pba->word[1] +
+ pba->pba_block[0])) {
+ memcpy(&eeprom_buf[pba->word[1]],
+ pba->pba_block,
+ pba->pba_block[0] * sizeof(u16));
+ } else {
+ return -E1000_ERR_PARAM;
+ }
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_pba_block_size
+ * @hw: pointer to the HW structure
+ * @eeprom_buf: optional pointer to EEPROM image
+ * @eeprom_buf_size: size of EEPROM image in words
+ * @pba_data_size: pointer to output variable
+ *
+ * Returns the size of the PBA block in words. Function operates on EEPROM
+ * image if the eeprom_buf pointer is not NULL otherwise it accesses physical
+ * EEPROM device.
+ *
+ **/
+s32 e1000_get_pba_block_size(struct e1000_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, u16 *pba_block_size)
+{
+ s32 ret_val;
+ u16 pba_word[2];
+ u16 length;
+
+ DEBUGFUNC("e1000_get_pba_block_size");
+
+ if (eeprom_buf == NULL) {
+ ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 2, &pba_word[0]);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > NVM_PBA_OFFSET_1) {
+ pba_word[0] = eeprom_buf[NVM_PBA_OFFSET_0];
+ pba_word[1] = eeprom_buf[NVM_PBA_OFFSET_1];
+ } else {
+ return -E1000_ERR_PARAM;
+ }
+ }
+
+ if (pba_word[0] == NVM_PBA_PTR_GUARD) {
+ if (eeprom_buf == NULL) {
+ ret_val = e1000_read_nvm(hw, pba_word[1] + 0, 1,
+ &length);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > pba_word[1])
+ length = eeprom_buf[pba_word[1] + 0];
+ else
+ return -E1000_ERR_PARAM;
+ }
+
+ if (length == 0xFFFF || length == 0)
+ return -E1000_ERR_NVM_PBA_SECTION;
+ } else {
+ /* PBA number in legacy format, there is no PBA Block. */
+ length = 0;
+ }
+
+ if (pba_block_size != NULL)
+ *pba_block_size = length;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_mac_addr_generic - Read device MAC address
+ * @hw: pointer to the HW structure
+ *
+ * Reads the device MAC address from the EEPROM and stores the value.
+ * Since devices with two ports use the same EEPROM, we increment the
+ * last bit in the MAC address for the second port.
+ **/
+s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
+{
+ u32 rar_high;
+ u32 rar_low;
+ u16 i;
+
+ rar_high = E1000_READ_REG(hw, E1000_RAH(0));
+ rar_low = E1000_READ_REG(hw, E1000_RAL(0));
+
+ for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
+
+ for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
+
+ for (i = 0; i < ETH_ADDR_LEN; i++)
+ hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_validate_nvm_checksum_generic - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ * and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ DEBUGFUNC("e1000_validate_nvm_checksum_generic");
+
+ for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
+ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+ checksum += nvm_data;
+ }
+
+ if (checksum != (u16) NVM_SUM) {
+ DEBUGOUT("NVM Checksum Invalid\n");
+ return -E1000_ERR_NVM;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_update_nvm_checksum_generic - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ * up to the checksum. Then calculates the EEPROM checksum and writes the
+ * value to the EEPROM.
+ **/
+s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ DEBUGFUNC("e1000_update_nvm_checksum");
+
+ for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error while updating checksum.\n");
+ return ret_val;
+ }
+ checksum += nvm_data;
+ }
+ checksum = (u16) NVM_SUM - checksum;
+ ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
+ if (ret_val)
+ DEBUGOUT("NVM Write Error while updating checksum.\n");
+
+ return ret_val;
+}
+
+/**
+ * e1000_reload_nvm_generic - Reloads EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
+ * extended control register.
+ **/
+STATIC void e1000_reload_nvm_generic(struct e1000_hw *hw)
+{
+ u32 ctrl_ext;
+
+ DEBUGFUNC("e1000_reload_nvm_generic");
+
+ usec_delay(10);
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ * e1000_get_fw_version - Get firmware version information
+ * @hw: pointer to the HW structure
+ * @fw_vers: pointer to output version structure
+ *
+ * unsupported/not present features return 0 in version structure
+ **/
+void e1000_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
+{
+ u16 eeprom_verh, eeprom_verl, etrack_test, fw_version;
+ u8 q, hval, rem, result;
+ u16 comb_verh, comb_verl, comb_offset;
+
+ memset(fw_vers, 0, sizeof(struct e1000_fw_version));
+
+ /* basic eeprom version numbers, bits used vary by part and by tool
+ * used to create the nvm images */
+ /* Check which data format we have */
+ switch (hw->mac.type) {
+ case e1000_i211:
+ e1000_read_invm_version(hw, fw_vers);
+ return;
+ case e1000_82575:
+ case e1000_82576:
+ case e1000_82580:
+ hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
+ /* Use this format, unless EETRACK ID exists,
+ * then use alternate format
+ */
+ if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) {
+ hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+ fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
+ >> NVM_MAJOR_SHIFT;
+ fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK)
+ >> NVM_MINOR_SHIFT;
+ fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK);
+ goto etrack_id;
+ }
+ break;
+ case e1000_i210:
+ if (!(e1000_get_flash_presence_i210(hw))) {
+ e1000_read_invm_version(hw, fw_vers);
+ return;
+ }
+ /* fall through */
+ case e1000_i350:
+ hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
+ /* find combo image version */
+ hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
+ if ((comb_offset != 0x0) &&
+ (comb_offset != NVM_VER_INVALID)) {
+
+ hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
+ + 1), 1, &comb_verh);
+ hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
+ 1, &comb_verl);
+
+ /* get Option Rom version if it exists and is valid */
+ if ((comb_verh && comb_verl) &&
+ ((comb_verh != NVM_VER_INVALID) &&
+ (comb_verl != NVM_VER_INVALID))) {
+
+ fw_vers->or_valid = true;
+ fw_vers->or_major =
+ comb_verl >> NVM_COMB_VER_SHFT;
+ fw_vers->or_build =
+ (comb_verl << NVM_COMB_VER_SHFT)
+ | (comb_verh >> NVM_COMB_VER_SHFT);
+ fw_vers->or_patch =
+ comb_verh & NVM_COMB_VER_MASK;
+ }
+ }
+ break;
+ default:
+ hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
+ return;
+ }
+ hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+ fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
+ >> NVM_MAJOR_SHIFT;
+
+ /* check for old style version format in newer images*/
+ if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) {
+ eeprom_verl = (fw_version & NVM_COMB_VER_MASK);
+ } else {
+ eeprom_verl = (fw_version & NVM_MINOR_MASK)
+ >> NVM_MINOR_SHIFT;
+ }
+ /* Convert minor value to hex before assigning to output struct
+ * Val to be converted will not be higher than 99, per tool output
+ */
+ q = eeprom_verl / NVM_HEX_CONV;
+ hval = q * NVM_HEX_TENS;
+ rem = eeprom_verl % NVM_HEX_CONV;
+ result = hval + rem;
+ fw_vers->eep_minor = result;
+
+etrack_id:
+ if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) {
+ hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
+ hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
+ fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT)
+ | eeprom_verl;
+ }
+ return;
+}
+
+
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_nvm.h b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_nvm.h
new file mode 100755
index 00000000..dee1f62f
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_nvm.h
@@ -0,0 +1,98 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_NVM_H_
+#define _E1000_NVM_H_
+
+struct e1000_pba {
+ u16 word[2];
+ u16 *pba_block;
+};
+
+struct e1000_fw_version {
+ u32 etrack_id;
+ u16 eep_major;
+ u16 eep_minor;
+ u16 eep_build;
+
+ u8 invm_major;
+ u8 invm_minor;
+ u8 invm_img_type;
+
+ bool or_valid;
+ u16 or_major;
+ u16 or_build;
+ u16 or_patch;
+};
+
+
+void e1000_init_nvm_ops_generic(struct e1000_hw *hw);
+s32 e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c);
+void e1000_null_nvm_generic(struct e1000_hw *hw);
+s32 e1000_null_led_default(struct e1000_hw *hw, u16 *data);
+s32 e1000_null_write_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c);
+s32 e1000_acquire_nvm_generic(struct e1000_hw *hw);
+
+s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
+s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
+s32 e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num);
+s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
+s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size);
+s32 e1000_read_pba_raw(struct e1000_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, u16 max_pba_block_size,
+ struct e1000_pba *pba);
+s32 e1000_write_pba_raw(struct e1000_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, struct e1000_pba *pba);
+s32 e1000_get_pba_block_size(struct e1000_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, u16 *pba_block_size);
+s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32 e1000_read_nvm_microwire(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data);
+s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw);
+s32 e1000_write_nvm_microwire(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw);
+void e1000_stop_nvm(struct e1000_hw *hw);
+void e1000_release_nvm_generic(struct e1000_hw *hw);
+void e1000_get_fw_version(struct e1000_hw *hw,
+ struct e1000_fw_version *fw_vers);
+
+#define E1000_STM_OPCODE 0xDB00
+
+#endif
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_osdep.c b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_osdep.c
new file mode 100755
index 00000000..7270edfa
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_osdep.c
@@ -0,0 +1,83 @@
+/******************************************************************************
+
+ Copyright (c) 2001-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "e1000_api.h"
+
+/*
+ * NOTE: the following routines using the e1000
+ * naming style are provided to the shared
+ * code but are OS specific
+ */
+
+void
+e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+ return;
+}
+
+void
+e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+ *value = 0;
+ return;
+}
+
+void
+e1000_pci_set_mwi(struct e1000_hw *hw)
+{
+}
+
+void
+e1000_pci_clear_mwi(struct e1000_hw *hw)
+{
+}
+
+
+/*
+ * Read the PCI Express capabilities
+ */
+int32_t
+e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+ return E1000_NOT_IMPLEMENTED;
+}
+
+/*
+ * Write the PCI Express capabilities
+ */
+int32_t
+e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+ return E1000_NOT_IMPLEMENTED;
+}
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_osdep.h b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_osdep.h
new file mode 100755
index 00000000..438641e2
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_osdep.h
@@ -0,0 +1,182 @@
+/******************************************************************************
+
+ Copyright (c) 2001-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _E1000_OSDEP_H_
+#define _E1000_OSDEP_H_
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+
+#include "../e1000_logs.h"
+
+#define DELAY(x) rte_delay_us(x)
+#define usec_delay(x) DELAY(x)
+#define usec_delay_irq(x) DELAY(x)
+#define msec_delay(x) DELAY(1000*(x))
+#define msec_delay_irq(x) DELAY(1000*(x))
+
+#define DEBUGFUNC(F) DEBUGOUT(F "\n");
+#define DEBUGOUT(S, args...) PMD_DRV_LOG_RAW(DEBUG, S, ##args)
+#define DEBUGOUT1(S, args...) DEBUGOUT(S, ##args)
+#define DEBUGOUT2(S, args...) DEBUGOUT(S, ##args)
+#define DEBUGOUT3(S, args...) DEBUGOUT(S, ##args)
+#define DEBUGOUT6(S, args...) DEBUGOUT(S, ##args)
+#define DEBUGOUT7(S, args...) DEBUGOUT(S, ##args)
+
+#define UNREFERENCED_PARAMETER(_p)
+#define UNREFERENCED_1PARAMETER(_p)
+#define UNREFERENCED_2PARAMETER(_p, _q)
+#define UNREFERENCED_3PARAMETER(_p, _q, _r)
+#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s)
+
+#define FALSE 0
+#define TRUE 1
+
+#define CMD_MEM_WRT_INVALIDATE 0x0010 /* BIT_4 */
+
+/* Mutex used in the shared code */
+#define E1000_MUTEX uintptr_t
+#define E1000_MUTEX_INIT(mutex) (*(mutex) = 0)
+#define E1000_MUTEX_LOCK(mutex) (*(mutex) = 1)
+#define E1000_MUTEX_UNLOCK(mutex) (*(mutex) = 0)
+
+typedef uint64_t u64;
+typedef uint32_t u32;
+typedef uint16_t u16;
+typedef uint8_t u8;
+typedef int64_t s64;
+typedef int32_t s32;
+typedef int16_t s16;
+typedef int8_t s8;
+typedef int bool;
+
+#define __le16 u16
+#define __le32 u32
+#define __le64 u64
+
+#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS)
+
+#define E1000_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
+
+#define E1000_PCI_REG_WRITE(reg, value) do { \
+ E1000_PCI_REG((reg)) = (value); \
+} while (0)
+
+#define E1000_PCI_REG_ADDR(hw, reg) \
+ ((volatile uint32_t *)((char *)(hw)->hw_addr + (reg)))
+
+#define E1000_PCI_REG_ARRAY_ADDR(hw, reg, index) \
+ E1000_PCI_REG_ADDR((hw), (reg) + ((index) << 2))
+
+static inline uint32_t e1000_read_addr(volatile void* addr)
+{
+ return E1000_PCI_REG(addr);
+}
+
+/* Necessary defines */
+#define E1000_MRQC_ENABLE_MASK 0x00000007
+#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000
+#define E1000_ALL_FULL_DUPLEX ( \
+ ADVERTISE_10_FULL | ADVERTISE_100_FULL | ADVERTISE_1000_FULL)
+
+#define M88E1543_E_PHY_ID 0x01410EA0
+#define NAHUM6LP_HW
+#define ULP_SUPPORT
+
+#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */
+#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000
+
+/* Register READ/WRITE macros */
+
+#define E1000_READ_REG(hw, reg) \
+ e1000_read_addr(E1000_PCI_REG_ADDR((hw), (reg)))
+
+#define E1000_WRITE_REG(hw, reg, value) \
+ E1000_PCI_REG_WRITE(E1000_PCI_REG_ADDR((hw), (reg)), (value))
+
+#define E1000_READ_REG_ARRAY(hw, reg, index) \
+ E1000_PCI_REG(E1000_PCI_REG_ARRAY_ADDR((hw), (reg), (index)))
+
+#define E1000_WRITE_REG_ARRAY(hw, reg, index, value) \
+ E1000_PCI_REG_WRITE(E1000_PCI_REG_ARRAY_ADDR((hw), (reg), (index)), (value))
+
+#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
+#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
+
+#define E1000_ACCESS_PANIC(x, hw, reg, value) \
+ rte_panic("%s:%u\t" RTE_STR(x) "(%p, 0x%x, 0x%x)", \
+ __FILE__, __LINE__, (hw), (reg), (unsigned int)(value))
+
+/*
+ * To be able to do IO write, we need to map IO BAR
+ * (bar 2/4 depending on device).
+ * Right now mapping multiple BARs is not supported by DPDK.
+ * Fortunatelly we need it only for legacy hw support.
+ */
+
+#define E1000_WRITE_REG_IO(hw, reg, value) \
+ E1000_WRITE_REG(hw, reg, value)
+
+/*
+ * Not implemented.
+ */
+
+#define E1000_READ_FLASH_REG(hw, reg) \
+ (E1000_ACCESS_PANIC(E1000_READ_FLASH_REG, hw, reg, 0), 0)
+
+#define E1000_READ_FLASH_REG16(hw, reg) \
+ (E1000_ACCESS_PANIC(E1000_READ_FLASH_REG16, hw, reg, 0), 0)
+
+#define E1000_WRITE_FLASH_REG(hw, reg, value) \
+ E1000_ACCESS_PANIC(E1000_WRITE_FLASH_REG, hw, reg, value)
+
+#define E1000_WRITE_FLASH_REG16(hw, reg, value) \
+ E1000_ACCESS_PANIC(E1000_WRITE_FLASH_REG16, hw, reg, value)
+
+#define STATIC static
+
+#ifndef ETH_ADDR_LEN
+#define ETH_ADDR_LEN 6
+#endif
+
+#define false FALSE
+#define true TRUE
+
+#endif /* _E1000_OSDEP_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_phy.c b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_phy.c
new file mode 100755
index 00000000..e214f179
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_phy.c
@@ -0,0 +1,4273 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "e1000_api.h"
+
+STATIC s32 e1000_wait_autoneg(struct e1000_hw *hw);
+STATIC s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
+ u16 *data, bool read, bool page_set);
+STATIC u32 e1000_get_phy_addr_for_hv_page(u32 page);
+STATIC s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
+ u16 *data, bool read);
+
+/* Cable length tables */
+STATIC const u16 e1000_m88_cable_length_table[] = {
+ 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
+#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
+ (sizeof(e1000_m88_cable_length_table) / \
+ sizeof(e1000_m88_cable_length_table[0]))
+
+STATIC const u16 e1000_igp_2_cable_length_table[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3,
+ 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22,
+ 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40,
+ 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61,
+ 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82,
+ 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95,
+ 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121,
+ 124};
+#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
+ (sizeof(e1000_igp_2_cable_length_table) / \
+ sizeof(e1000_igp_2_cable_length_table[0]))
+
+/**
+ * e1000_init_phy_ops_generic - Initialize PHY function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setups up the function pointers to no-op functions
+ **/
+void e1000_init_phy_ops_generic(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ DEBUGFUNC("e1000_init_phy_ops_generic");
+
+ /* Initialize function pointers */
+ phy->ops.init_params = e1000_null_ops_generic;
+ phy->ops.acquire = e1000_null_ops_generic;
+ phy->ops.check_polarity = e1000_null_ops_generic;
+ phy->ops.check_reset_block = e1000_null_ops_generic;
+ phy->ops.commit = e1000_null_ops_generic;
+ phy->ops.force_speed_duplex = e1000_null_ops_generic;
+ phy->ops.get_cfg_done = e1000_null_ops_generic;
+ phy->ops.get_cable_length = e1000_null_ops_generic;
+ phy->ops.get_info = e1000_null_ops_generic;
+ phy->ops.set_page = e1000_null_set_page;
+ phy->ops.read_reg = e1000_null_read_reg;
+ phy->ops.read_reg_locked = e1000_null_read_reg;
+ phy->ops.read_reg_page = e1000_null_read_reg;
+ phy->ops.release = e1000_null_phy_generic;
+ phy->ops.reset = e1000_null_ops_generic;
+ phy->ops.set_d0_lplu_state = e1000_null_lplu_state;
+ phy->ops.set_d3_lplu_state = e1000_null_lplu_state;
+ phy->ops.write_reg = e1000_null_write_reg;
+ phy->ops.write_reg_locked = e1000_null_write_reg;
+ phy->ops.write_reg_page = e1000_null_write_reg;
+ phy->ops.power_up = e1000_null_phy_generic;
+ phy->ops.power_down = e1000_null_phy_generic;
+ phy->ops.read_i2c_byte = e1000_read_i2c_byte_null;
+ phy->ops.write_i2c_byte = e1000_write_i2c_byte_null;
+ phy->ops.cfg_on_link_up = e1000_null_ops_generic;
+}
+
+/**
+ * e1000_null_set_page - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_set_page(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 E1000_UNUSEDARG data)
+{
+ DEBUGFUNC("e1000_null_set_page");
+ UNREFERENCED_2PARAMETER(hw, data);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_read_reg - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_read_reg(struct e1000_hw E1000_UNUSEDARG *hw,
+ u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG *data)
+{
+ DEBUGFUNC("e1000_null_read_reg");
+ UNREFERENCED_3PARAMETER(hw, offset, data);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_phy_generic - No-op function, return void
+ * @hw: pointer to the HW structure
+ **/
+void e1000_null_phy_generic(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_null_phy_generic");
+ UNREFERENCED_1PARAMETER(hw);
+ return;
+}
+
+/**
+ * e1000_null_lplu_state - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_lplu_state(struct e1000_hw E1000_UNUSEDARG *hw,
+ bool E1000_UNUSEDARG active)
+{
+ DEBUGFUNC("e1000_null_lplu_state");
+ UNREFERENCED_2PARAMETER(hw, active);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_write_reg - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_write_reg(struct e1000_hw E1000_UNUSEDARG *hw,
+ u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG data)
+{
+ DEBUGFUNC("e1000_null_write_reg");
+ UNREFERENCED_3PARAMETER(hw, offset, data);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_i2c_byte_null - No-op function, return 0
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @dev_addr: device address
+ * @data: data value read
+ *
+ **/
+s32 e1000_read_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw,
+ u8 E1000_UNUSEDARG byte_offset,
+ u8 E1000_UNUSEDARG dev_addr,
+ u8 E1000_UNUSEDARG *data)
+{
+ DEBUGFUNC("e1000_read_i2c_byte_null");
+ UNREFERENCED_4PARAMETER(hw, byte_offset, dev_addr, data);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_i2c_byte_null - No-op function, return 0
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @dev_addr: device address
+ * @data: data value to write
+ *
+ **/
+s32 e1000_write_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw,
+ u8 E1000_UNUSEDARG byte_offset,
+ u8 E1000_UNUSEDARG dev_addr,
+ u8 E1000_UNUSEDARG data)
+{
+ DEBUGFUNC("e1000_write_i2c_byte_null");
+ UNREFERENCED_4PARAMETER(hw, byte_offset, dev_addr, data);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_check_reset_block_generic - Check if PHY reset is blocked
+ * @hw: pointer to the HW structure
+ *
+ * Read the PHY management control register and check whether a PHY reset
+ * is blocked. If a reset is not blocked return E1000_SUCCESS, otherwise
+ * return E1000_BLK_PHY_RESET (12).
+ **/
+s32 e1000_check_reset_block_generic(struct e1000_hw *hw)
+{
+ u32 manc;
+
+ DEBUGFUNC("e1000_check_reset_block");
+
+ manc = E1000_READ_REG(hw, E1000_MANC);
+
+ return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
+ E1000_BLK_PHY_RESET : E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_phy_id - Retrieve the PHY ID and revision
+ * @hw: pointer to the HW structure
+ *
+ * Reads the PHY registers and stores the PHY ID and possibly the PHY
+ * revision in the hardware structure.
+ **/
+s32 e1000_get_phy_id(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+ u16 phy_id;
+ u16 retry_count = 0;
+
+ DEBUGFUNC("e1000_get_phy_id");
+
+ if (!phy->ops.read_reg)
+ return E1000_SUCCESS;
+
+ while (retry_count < 2) {
+ ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
+ if (ret_val)
+ return ret_val;
+
+ phy->id = (u32)(phy_id << 16);
+ usec_delay(20);
+ ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
+ if (ret_val)
+ return ret_val;
+
+ phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
+ phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
+
+ if (phy->id != 0 && phy->id != PHY_REVISION_MASK)
+ return E1000_SUCCESS;
+
+ retry_count++;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_reset_dsp_generic - Reset PHY DSP
+ * @hw: pointer to the HW structure
+ *
+ * Reset the digital signal processor.
+ **/
+s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_phy_reset_dsp_generic");
+
+ if (!hw->phy.ops.write_reg)
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
+ if (ret_val)
+ return ret_val;
+
+ return hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0);
+}
+
+/**
+ * e1000_read_phy_reg_mdic - Read MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the MDI control register in the PHY at offset and stores the
+ * information read to data.
+ **/
+s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, mdic = 0;
+
+ DEBUGFUNC("e1000_read_phy_reg_mdic");
+
+ if (offset > MAX_PHY_REG_ADDRESS) {
+ DEBUGOUT1("PHY Address %d is out of range\n", offset);
+ return -E1000_ERR_PARAM;
+ }
+
+ /* Set up Op-code, Phy Address, and register offset in the MDI
+ * Control register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ mdic = ((offset << E1000_MDIC_REG_SHIFT) |
+ (phy->addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_READ));
+
+ E1000_WRITE_REG(hw, E1000_MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read completed
+ * Increasing the time out as testing showed failures with
+ * the lower time out
+ */
+ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+ usec_delay_irq(50);
+ mdic = E1000_READ_REG(hw, E1000_MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ DEBUGOUT("MDI Read did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+ DEBUGOUT("MDI Error\n");
+ return -E1000_ERR_PHY;
+ }
+ if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
+ DEBUGOUT2("MDI Read offset error - requested %d, returned %d\n",
+ offset,
+ (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
+ return -E1000_ERR_PHY;
+ }
+ *data = (u16) mdic;
+
+ /* Allow some time after each MDIC transaction to avoid
+ * reading duplicate data in the next MDIC transaction.
+ */
+ if (hw->mac.type == e1000_pch2lan)
+ usec_delay_irq(100);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_phy_reg_mdic - Write MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write to register at offset
+ *
+ * Writes data to MDI control register in the PHY at offset.
+ **/
+s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, mdic = 0;
+
+ DEBUGFUNC("e1000_write_phy_reg_mdic");
+
+ if (offset > MAX_PHY_REG_ADDRESS) {
+ DEBUGOUT1("PHY Address %d is out of range\n", offset);
+ return -E1000_ERR_PARAM;
+ }
+
+ /* Set up Op-code, Phy Address, and register offset in the MDI
+ * Control register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ mdic = (((u32)data) |
+ (offset << E1000_MDIC_REG_SHIFT) |
+ (phy->addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_WRITE));
+
+ E1000_WRITE_REG(hw, E1000_MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read completed
+ * Increasing the time out as testing showed failures with
+ * the lower time out
+ */
+ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+ usec_delay_irq(50);
+ mdic = E1000_READ_REG(hw, E1000_MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ DEBUGOUT("MDI Write did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+ DEBUGOUT("MDI Error\n");
+ return -E1000_ERR_PHY;
+ }
+ if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
+ DEBUGOUT2("MDI Write offset error - requested %d, returned %d\n",
+ offset,
+ (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
+ return -E1000_ERR_PHY;
+ }
+
+ /* Allow some time after each MDIC transaction to avoid
+ * reading duplicate data in the next MDIC transaction.
+ */
+ if (hw->mac.type == e1000_pch2lan)
+ usec_delay_irq(100);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_phy_reg_i2c - Read PHY register using i2c
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset using the i2c interface and stores the
+ * retrieved information in data.
+ **/
+s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, i2ccmd = 0;
+
+ DEBUGFUNC("e1000_read_phy_reg_i2c");
+
+ /* Set up Op-code, Phy Address, and register address in the I2CCMD
+ * register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+ (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
+ (E1000_I2CCMD_OPCODE_READ));
+
+ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+
+ /* Poll the ready bit to see if the I2C read completed */
+ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+ usec_delay(50);
+ i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
+ if (i2ccmd & E1000_I2CCMD_READY)
+ break;
+ }
+ if (!(i2ccmd & E1000_I2CCMD_READY)) {
+ DEBUGOUT("I2CCMD Read did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (i2ccmd & E1000_I2CCMD_ERROR) {
+ DEBUGOUT("I2CCMD Error bit set\n");
+ return -E1000_ERR_PHY;
+ }
+
+ /* Need to byte-swap the 16-bit value. */
+ *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_phy_reg_i2c - Write PHY register using i2c
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes the data to PHY register at the offset using the i2c interface.
+ **/
+s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, i2ccmd = 0;
+ u16 phy_data_swapped;
+
+ DEBUGFUNC("e1000_write_phy_reg_i2c");
+
+ /* Prevent overwritting SFP I2C EEPROM which is at A0 address.*/
+ if ((hw->phy.addr == 0) || (hw->phy.addr > 7)) {
+ DEBUGOUT1("PHY I2C Address %d is out of range.\n",
+ hw->phy.addr);
+ return -E1000_ERR_CONFIG;
+ }
+
+ /* Swap the data bytes for the I2C interface */
+ phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
+
+ /* Set up Op-code, Phy Address, and register address in the I2CCMD
+ * register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+ (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
+ E1000_I2CCMD_OPCODE_WRITE |
+ phy_data_swapped);
+
+ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+
+ /* Poll the ready bit to see if the I2C read completed */
+ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+ usec_delay(50);
+ i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
+ if (i2ccmd & E1000_I2CCMD_READY)
+ break;
+ }
+ if (!(i2ccmd & E1000_I2CCMD_READY)) {
+ DEBUGOUT("I2CCMD Write did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (i2ccmd & E1000_I2CCMD_ERROR) {
+ DEBUGOUT("I2CCMD Error bit set\n");
+ return -E1000_ERR_PHY;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_sfp_data_byte - Reads SFP module data.
+ * @hw: pointer to the HW structure
+ * @offset: byte location offset to be read
+ * @data: read data buffer pointer
+ *
+ * Reads one byte from SFP module data stored
+ * in SFP resided EEPROM memory or SFP diagnostic area.
+ * Function should be called with
+ * E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access
+ * E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters
+ * access
+ **/
+s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data)
+{
+ u32 i = 0;
+ u32 i2ccmd = 0;
+ u32 data_local = 0;
+
+ DEBUGFUNC("e1000_read_sfp_data_byte");
+
+ if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) {
+ DEBUGOUT("I2CCMD command address exceeds upper limit\n");
+ return -E1000_ERR_PHY;
+ }
+
+ /* Set up Op-code, EEPROM Address,in the I2CCMD
+ * register. The MAC will take care of interfacing with the
+ * EEPROM to retrieve the desired data.
+ */
+ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+ E1000_I2CCMD_OPCODE_READ);
+
+ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+
+ /* Poll the ready bit to see if the I2C read completed */
+ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+ usec_delay(50);
+ data_local = E1000_READ_REG(hw, E1000_I2CCMD);
+ if (data_local & E1000_I2CCMD_READY)
+ break;
+ }
+ if (!(data_local & E1000_I2CCMD_READY)) {
+ DEBUGOUT("I2CCMD Read did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (data_local & E1000_I2CCMD_ERROR) {
+ DEBUGOUT("I2CCMD Error bit set\n");
+ return -E1000_ERR_PHY;
+ }
+ *data = (u8) data_local & 0xFF;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_sfp_data_byte - Writes SFP module data.
+ * @hw: pointer to the HW structure
+ * @offset: byte location offset to write to
+ * @data: data to write
+ *
+ * Writes one byte to SFP module data stored
+ * in SFP resided EEPROM memory or SFP diagnostic area.
+ * Function should be called with
+ * E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access
+ * E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters
+ * access
+ **/
+s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data)
+{
+ u32 i = 0;
+ u32 i2ccmd = 0;
+ u32 data_local = 0;
+
+ DEBUGFUNC("e1000_write_sfp_data_byte");
+
+ if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) {
+ DEBUGOUT("I2CCMD command address exceeds upper limit\n");
+ return -E1000_ERR_PHY;
+ }
+ /* The programming interface is 16 bits wide
+ * so we need to read the whole word first
+ * then update appropriate byte lane and write
+ * the updated word back.
+ */
+ /* Set up Op-code, EEPROM Address,in the I2CCMD
+ * register. The MAC will take care of interfacing
+ * with an EEPROM to write the data given.
+ */
+ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+ E1000_I2CCMD_OPCODE_READ);
+ /* Set a command to read single word */
+ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+ usec_delay(50);
+ /* Poll the ready bit to see if lastly
+ * launched I2C operation completed
+ */
+ i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
+ if (i2ccmd & E1000_I2CCMD_READY) {
+ /* Check if this is READ or WRITE phase */
+ if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) ==
+ E1000_I2CCMD_OPCODE_READ) {
+ /* Write the selected byte
+ * lane and update whole word
+ */
+ data_local = i2ccmd & 0xFF00;
+ data_local |= data;
+ i2ccmd = ((offset <<
+ E1000_I2CCMD_REG_ADDR_SHIFT) |
+ E1000_I2CCMD_OPCODE_WRITE | data_local);
+ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+ } else {
+ break;
+ }
+ }
+ }
+ if (!(i2ccmd & E1000_I2CCMD_READY)) {
+ DEBUGOUT("I2CCMD Write did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (i2ccmd & E1000_I2CCMD_ERROR) {
+ DEBUGOUT("I2CCMD Error bit set\n");
+ return -E1000_ERR_PHY;
+ }
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_phy_reg_m88 - Read m88 PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore, if necessary, then reads the PHY register at offset
+ * and storing the retrieved information in data. Release any acquired
+ * semaphores before exiting.
+ **/
+s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_read_phy_reg_m88");
+
+ if (!hw->phy.ops.acquire)
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_phy_reg_m88 - Write m88 PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_write_phy_reg_m88");
+
+ if (!hw->phy.ops.acquire)
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_set_page_igp - Set page as on IGP-like PHY(s)
+ * @hw: pointer to the HW structure
+ * @page: page to set (shifted left when necessary)
+ *
+ * Sets PHY page required for PHY register access. Assumes semaphore is
+ * already acquired. Note, this function sets phy.addr to 1 so the caller
+ * must set it appropriately (if necessary) after this function returns.
+ **/
+s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page)
+{
+ DEBUGFUNC("e1000_set_page_igp");
+
+ DEBUGOUT1("Setting page 0x%x\n", page);
+
+ hw->phy.addr = 1;
+
+ return e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page);
+}
+
+/**
+ * __e1000_read_phy_reg_igp - Read igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary, then reads the PHY register at offset
+ * and stores the retrieved information in data. Release any acquired
+ * semaphores before exiting.
+ **/
+STATIC s32 __e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
+ bool locked)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("__e1000_read_phy_reg_igp");
+
+ if (!locked) {
+ if (!hw->phy.ops.acquire)
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG)
+ ret_val = e1000_write_phy_reg_mdic(hw,
+ IGP01E1000_PHY_PAGE_SELECT,
+ (u16)offset);
+ if (!ret_val)
+ ret_val = e1000_read_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
+ if (!locked)
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_phy_reg_igp - Read igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore then reads the PHY register at offset and stores the
+ * retrieved information in data.
+ * Release the acquired semaphore before exiting.
+ **/
+s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_phy_reg_igp(hw, offset, data, false);
+}
+
+/**
+ * e1000_read_phy_reg_igp_locked - Read igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset and stores the retrieved information
+ * in data. Assumes semaphore already acquired.
+ **/
+s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_phy_reg_igp(hw, offset, data, true);
+}
+
+/**
+ * e1000_write_phy_reg_igp - Write igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+STATIC s32 __e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
+ bool locked)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_write_phy_reg_igp");
+
+ if (!locked) {
+ if (!hw->phy.ops.acquire)
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG)
+ ret_val = e1000_write_phy_reg_mdic(hw,
+ IGP01E1000_PHY_PAGE_SELECT,
+ (u16)offset);
+ if (!ret_val)
+ ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS &
+ offset,
+ data);
+ if (!locked)
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_phy_reg_igp - Write igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_phy_reg_igp(hw, offset, data, false);
+}
+
+/**
+ * e1000_write_phy_reg_igp_locked - Write igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes the data to PHY register at the offset.
+ * Assumes semaphore already acquired.
+ **/
+s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_phy_reg_igp(hw, offset, data, true);
+}
+
+/**
+ * __e1000_read_kmrn_reg - Read kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary. Then reads the PHY register at offset
+ * using the kumeran interface. The information retrieved is stored in data.
+ * Release any acquired semaphores before exiting.
+ **/
+STATIC s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
+ bool locked)
+{
+ u32 kmrnctrlsta;
+
+ DEBUGFUNC("__e1000_read_kmrn_reg");
+
+ if (!locked) {
+ s32 ret_val = E1000_SUCCESS;
+
+ if (!hw->phy.ops.acquire)
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+ E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
+ E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
+ E1000_WRITE_FLUSH(hw);
+
+ usec_delay(2);
+
+ kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA);
+ *data = (u16)kmrnctrlsta;
+
+ if (!locked)
+ hw->phy.ops.release(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_kmrn_reg_generic - Read kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore then reads the PHY register at offset using the
+ * kumeran interface. The information retrieved is stored in data.
+ * Release the acquired semaphore before exiting.
+ **/
+s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_kmrn_reg(hw, offset, data, false);
+}
+
+/**
+ * e1000_read_kmrn_reg_locked - Read kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset using the kumeran interface. The
+ * information retrieved is stored in data.
+ * Assumes semaphore already acquired.
+ **/
+s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_kmrn_reg(hw, offset, data, true);
+}
+
+/**
+ * __e1000_write_kmrn_reg - Write kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary. Then write the data to PHY register
+ * at the offset using the kumeran interface. Release any acquired semaphores
+ * before exiting.
+ **/
+STATIC s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
+ bool locked)
+{
+ u32 kmrnctrlsta;
+
+ DEBUGFUNC("e1000_write_kmrn_reg_generic");
+
+ if (!locked) {
+ s32 ret_val = E1000_SUCCESS;
+
+ if (!hw->phy.ops.acquire)
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+ E1000_KMRNCTRLSTA_OFFSET) | data;
+ E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
+ E1000_WRITE_FLUSH(hw);
+
+ usec_delay(2);
+
+ if (!locked)
+ hw->phy.ops.release(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_kmrn_reg_generic - Write kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore then writes the data to the PHY register at the offset
+ * using the kumeran interface. Release the acquired semaphore before exiting.
+ **/
+s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_kmrn_reg(hw, offset, data, false);
+}
+
+/**
+ * e1000_write_kmrn_reg_locked - Write kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Write the data to PHY register at the offset using the kumeran interface.
+ * Assumes semaphore already acquired.
+ **/
+s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_kmrn_reg(hw, offset, data, true);
+}
+
+/**
+ * e1000_set_master_slave_mode - Setup PHY for Master/slave mode
+ * @hw: pointer to the HW structure
+ *
+ * Sets up Master/slave mode
+ **/
+STATIC s32 e1000_set_master_slave_mode(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ /* Resolve Master/Slave mode */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* load defaults for future use */
+ hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ?
+ ((phy_data & CR_1000T_MS_VALUE) ?
+ e1000_ms_force_master :
+ e1000_ms_force_slave) : e1000_ms_auto;
+
+ switch (hw->phy.ms_type) {
+ case e1000_ms_force_master:
+ phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+ break;
+ case e1000_ms_force_slave:
+ phy_data |= CR_1000T_MS_ENABLE;
+ phy_data &= ~(CR_1000T_MS_VALUE);
+ break;
+ case e1000_ms_auto:
+ phy_data &= ~CR_1000T_MS_ENABLE;
+ /* fall-through */
+ default:
+ break;
+ }
+
+ return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data);
+}
+
+/**
+ * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Sets up Carrier-sense on Transmit and downshift values.
+ **/
+s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_copper_link_setup_82577");
+
+ if (hw->phy.type == e1000_phy_82580) {
+ ret_val = hw->phy.ops.reset(hw);
+ if (ret_val) {
+ DEBUGOUT("Error resetting the PHY.\n");
+ return ret_val;
+ }
+ }
+
+ /* Enable CRS on Tx. This must be set for half-duplex operation. */
+ ret_val = hw->phy.ops.read_reg(hw, I82577_CFG_REG, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= I82577_CFG_ASSERT_CRS_ON_TX;
+
+ /* Enable downshift */
+ phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
+
+ ret_val = hw->phy.ops.write_reg(hw, I82577_CFG_REG, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Set MDI/MDIX mode */
+ ret_val = hw->phy.ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data);
+ if (ret_val)
+ return ret_val;
+ phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK;
+ /* Options:
+ * 0 - Auto (default)
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ */
+ switch (hw->phy.mdix) {
+ case 1:
+ break;
+ case 2:
+ phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX;
+ break;
+ case 0:
+ default:
+ phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX;
+ break;
+ }
+ ret_val = hw->phy.ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ return e1000_set_master_slave_mode(hw);
+}
+
+/**
+ * e1000_copper_link_setup_m88 - Setup m88 PHY's for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock
+ * and downshift values are set also.
+ **/
+s32 e1000_copper_link_setup_m88(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_copper_link_setup_m88");
+
+
+ /* Enable CRS on Tx. This must be set for half-duplex operation. */
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* For BM PHY this bit is downshift enable */
+ if (phy->type != e1000_phy_bm)
+ phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+
+ /* Options:
+ * MDI/MDI-X = 0 (default)
+ * 0 - Auto for all speeds
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+ */
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+ switch (phy->mdix) {
+ case 1:
+ phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+ break;
+ case 2:
+ phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+ break;
+ case 3:
+ phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+ break;
+ case 0:
+ default:
+ phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+ break;
+ }
+
+ /* Options:
+ * disable_polarity_correction = 0 (default)
+ * Automatic Correction for Reversed Cable Polarity
+ * 0 - Disabled
+ * 1 - Enabled
+ */
+ phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+ if (phy->disable_polarity_correction)
+ phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+
+ /* Enable downshift on BM (disabled by default) */
+ if (phy->type == e1000_phy_bm) {
+ /* For 82574/82583, first disable then enable downshift */
+ if (phy->id == BME1000_E_PHY_ID_R2) {
+ phy_data &= ~BME1000_PSCR_ENABLE_DOWNSHIFT;
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ /* Commit the changes. */
+ ret_val = phy->ops.commit(hw);
+ if (ret_val) {
+ DEBUGOUT("Error committing the PHY changes\n");
+ return ret_val;
+ }
+ }
+
+ phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT;
+ }
+
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if ((phy->type == e1000_phy_m88) &&
+ (phy->revision < E1000_REVISION_4) &&
+ (phy->id != BME1000_E_PHY_ID_R2)) {
+ /* Force TX_CLK in the Extended PHY Specific Control Register
+ * to 25MHz clock.
+ */
+ ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_EPSCR_TX_CLK_25;
+
+ if ((phy->revision == E1000_REVISION_2) &&
+ (phy->id == M88E1111_I_PHY_ID)) {
+ /* 82573L PHY - set the downshift counter to 5x. */
+ phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
+ phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
+ } else {
+ /* Configure Master and Slave downshift values */
+ phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
+ M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
+ phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
+ M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
+ }
+ ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if ((phy->type == e1000_phy_bm) && (phy->id == BME1000_E_PHY_ID_R2)) {
+ /* Set PHY page 0, register 29 to 0x0003 */
+ ret_val = phy->ops.write_reg(hw, 29, 0x0003);
+ if (ret_val)
+ return ret_val;
+
+ /* Set PHY page 0, register 30 to 0x0000 */
+ ret_val = phy->ops.write_reg(hw, 30, 0x0000);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Commit the changes. */
+ ret_val = phy->ops.commit(hw);
+ if (ret_val) {
+ DEBUGOUT("Error committing the PHY changes\n");
+ return ret_val;
+ }
+
+ if (phy->type == e1000_phy_82578) {
+ ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* 82578 PHY - set the downshift count to 1x. */
+ phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE;
+ phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK;
+ ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's.
+ * Also enables and sets the downshift parameters.
+ **/
+s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_copper_link_setup_m88_gen2");
+
+
+ /* Enable CRS on Tx. This must be set for half-duplex operation. */
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Options:
+ * MDI/MDI-X = 0 (default)
+ * 0 - Auto for all speeds
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+ */
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+ switch (phy->mdix) {
+ case 1:
+ phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+ break;
+ case 2:
+ phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+ break;
+ case 3:
+ /* M88E1112 does not support this mode) */
+ if (phy->id != M88E1112_E_PHY_ID) {
+ phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+ break;
+ }
+ case 0:
+ default:
+ phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+ break;
+ }
+
+ /* Options:
+ * disable_polarity_correction = 0 (default)
+ * Automatic Correction for Reversed Cable Polarity
+ * 0 - Disabled
+ * 1 - Enabled
+ */
+ phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+ if (phy->disable_polarity_correction)
+ phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+
+ /* Enable downshift and setting it to X6 */
+ if (phy->id == M88E1543_E_PHY_ID) {
+ phy_data &= ~I347AT4_PSCR_DOWNSHIFT_ENABLE;
+ ret_val =
+ phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.commit(hw);
+ if (ret_val) {
+ DEBUGOUT("Error committing the PHY changes\n");
+ return ret_val;
+ }
+ }
+
+ phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK;
+ phy_data |= I347AT4_PSCR_DOWNSHIFT_6X;
+ phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE;
+
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Commit the changes. */
+ ret_val = phy->ops.commit(hw);
+ if (ret_val) {
+ DEBUGOUT("Error committing the PHY changes\n");
+ return ret_val;
+ }
+
+ ret_val = e1000_set_master_slave_mode(hw);
+ if (ret_val)
+ return ret_val;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_copper_link_setup_igp - Setup igp PHY's for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for
+ * igp PHY's.
+ **/
+s32 e1000_copper_link_setup_igp(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("e1000_copper_link_setup_igp");
+
+
+ ret_val = hw->phy.ops.reset(hw);
+ if (ret_val) {
+ DEBUGOUT("Error resetting the PHY.\n");
+ return ret_val;
+ }
+
+ /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid
+ * timeout issues when LFS is enabled.
+ */
+ msec_delay(100);
+
+ /* The NVM settings will configure LPLU in D3 for
+ * non-IGP1 PHYs.
+ */
+ if (phy->type == e1000_phy_igp) {
+ /* disable lplu d3 during driver init */
+ ret_val = hw->phy.ops.set_d3_lplu_state(hw, false);
+ if (ret_val) {
+ DEBUGOUT("Error Disabling LPLU D3\n");
+ return ret_val;
+ }
+ }
+
+ /* disable lplu d0 during driver init */
+ if (hw->phy.ops.set_d0_lplu_state) {
+ ret_val = hw->phy.ops.set_d0_lplu_state(hw, false);
+ if (ret_val) {
+ DEBUGOUT("Error Disabling LPLU D0\n");
+ return ret_val;
+ }
+ }
+ /* Configure mdi-mdix settings */
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+
+ switch (phy->mdix) {
+ case 1:
+ data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+ break;
+ case 2:
+ data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
+ break;
+ case 0:
+ default:
+ data |= IGP01E1000_PSCR_AUTO_MDIX;
+ break;
+ }
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data);
+ if (ret_val)
+ return ret_val;
+
+ /* set auto-master slave resolution settings */
+ if (hw->mac.autoneg) {
+ /* when autonegotiation advertisement is only 1000Mbps then we
+ * should disable SmartSpeed and enable Auto MasterSlave
+ * resolution as hardware default.
+ */
+ if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
+ /* Disable SmartSpeed */
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+
+ /* Set auto Master/Slave resolution process */
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~CR_1000T_MS_ENABLE;
+ ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ ret_val = e1000_set_master_slave_mode(hw);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation
+ * @hw: pointer to the HW structure
+ *
+ * Reads the MII auto-neg advertisement register and/or the 1000T control
+ * register and if the PHY is already setup for auto-negotiation, then
+ * return successful. Otherwise, setup advertisement and flow control to
+ * the appropriate values for the wanted auto-negotiation.
+ **/
+s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 mii_autoneg_adv_reg;
+ u16 mii_1000t_ctrl_reg = 0;
+
+ DEBUGFUNC("e1000_phy_setup_autoneg");
+
+ phy->autoneg_advertised &= phy->autoneg_mask;
+
+ /* Read the MII Auto-Neg Advertisement Register (Address 4). */
+ ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
+ /* Read the MII 1000Base-T Control Register (Address 9). */
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL,
+ &mii_1000t_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Need to parse both autoneg_advertised and fc and set up
+ * the appropriate PHY registers. First we will parse for
+ * autoneg_advertised software override. Since we can advertise
+ * a plethora of combinations, we need to check each bit
+ * individually.
+ */
+
+ /* First we clear all the 10/100 mb speed bits in the Auto-Neg
+ * Advertisement Register (Address 4) and the 1000 mb speed bits in
+ * the 1000Base-T Control Register (Address 9).
+ */
+ mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
+ NWAY_AR_100TX_HD_CAPS |
+ NWAY_AR_10T_FD_CAPS |
+ NWAY_AR_10T_HD_CAPS);
+ mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
+
+ DEBUGOUT1("autoneg_advertised %x\n", phy->autoneg_advertised);
+
+ /* Do we want to advertise 10 Mb Half Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
+ DEBUGOUT("Advertise 10mb Half duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+ }
+
+ /* Do we want to advertise 10 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
+ DEBUGOUT("Advertise 10mb Full duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+ }
+
+ /* Do we want to advertise 100 Mb Half Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
+ DEBUGOUT("Advertise 100mb Half duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+ }
+
+ /* Do we want to advertise 100 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
+ DEBUGOUT("Advertise 100mb Full duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+ }
+
+ /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
+ if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
+ DEBUGOUT("Advertise 1000mb Half duplex request denied!\n");
+
+ /* Do we want to advertise 1000 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
+ DEBUGOUT("Advertise 1000mb Full duplex\n");
+ mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+ }
+
+ /* Check for a software override of the flow control settings, and
+ * setup the PHY advertisement registers accordingly. If
+ * auto-negotiation is enabled, then software will have to set the
+ * "PAUSE" bits to the correct value in the Auto-Negotiation
+ * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
+ * negotiation.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * but we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: No software override. The flow control configuration
+ * in the EEPROM is used.
+ */
+ switch (hw->fc.current_mode) {
+ case e1000_fc_none:
+ /* Flow control (Rx & Tx) is completely disabled by a
+ * software over-ride.
+ */
+ mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ case e1000_fc_rx_pause:
+ /* Rx Flow control is enabled, and Tx Flow control is
+ * disabled, by a software over-ride.
+ *
+ * Since there really isn't a way to advertise that we are
+ * capable of Rx Pause ONLY, we will advertise that we
+ * support both symmetric and asymmetric Rx PAUSE. Later
+ * (in e1000_config_fc_after_link_up) we will disable the
+ * hw's ability to send PAUSE frames.
+ */
+ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ case e1000_fc_tx_pause:
+ /* Tx Flow control is enabled, and Rx Flow control is
+ * disabled, by a software over-ride.
+ */
+ mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
+ mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+ break;
+ case e1000_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by a software
+ * over-ride.
+ */
+ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+ if (ret_val)
+ return ret_val;
+
+ DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+
+ if (phy->autoneg_mask & ADVERTISE_1000_FULL)
+ ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL,
+ mii_1000t_ctrl_reg);
+
+ return ret_val;
+}
+
+/**
+ * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Performs initial bounds checking on autoneg advertisement parameter, then
+ * configure to advertise the full capability. Setup the PHY to autoneg
+ * and restart the negotiation process between the link partner. If
+ * autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
+ **/
+s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_ctrl;
+
+ DEBUGFUNC("e1000_copper_link_autoneg");
+
+ /* Perform some bounds checking on the autoneg advertisement
+ * parameter.
+ */
+ phy->autoneg_advertised &= phy->autoneg_mask;
+
+ /* If autoneg_advertised is zero, we assume it was not defaulted
+ * by the calling code so we set to advertise full capability.
+ */
+ if (!phy->autoneg_advertised)
+ phy->autoneg_advertised = phy->autoneg_mask;
+
+ DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
+ ret_val = e1000_phy_setup_autoneg(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Setting up Auto-Negotiation\n");
+ return ret_val;
+ }
+ DEBUGOUT("Restarting Auto-Neg\n");
+
+ /* Restart auto-negotiation by setting the Auto Neg Enable bit and
+ * the Auto Neg Restart bit in the PHY control register.
+ */
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
+ if (ret_val)
+ return ret_val;
+
+ phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
+ if (ret_val)
+ return ret_val;
+
+ /* Does the user want to wait for Auto-Neg to complete here, or
+ * check at a later time (for example, callback routine).
+ */
+ if (phy->autoneg_wait_to_complete) {
+ ret_val = e1000_wait_autoneg(hw);
+ if (ret_val) {
+ DEBUGOUT("Error while waiting for autoneg to complete\n");
+ return ret_val;
+ }
+ }
+
+ hw->mac.get_link_status = true;
+
+ return ret_val;
+}
+
+/**
+ * e1000_setup_copper_link_generic - Configure copper link settings
+ * @hw: pointer to the HW structure
+ *
+ * Calls the appropriate function to configure the link for auto-neg or forced
+ * speed and duplex. Then we check for link, once link is established calls
+ * to configure collision distance and flow control are called. If link is
+ * not established, we return -E1000_ERR_PHY (-2).
+ **/
+s32 e1000_setup_copper_link_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ bool link;
+
+ DEBUGFUNC("e1000_setup_copper_link_generic");
+
+ if (hw->mac.autoneg) {
+ /* Setup autoneg and flow control advertisement and perform
+ * autonegotiation.
+ */
+ ret_val = e1000_copper_link_autoneg(hw);
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* PHY will be set to 10H, 10F, 100H or 100F
+ * depending on user settings.
+ */
+ DEBUGOUT("Forcing Speed and Duplex\n");
+ ret_val = hw->phy.ops.force_speed_duplex(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Forcing Speed and Duplex\n");
+ return ret_val;
+ }
+ }
+
+ /* Check link status. Wait up to 100 microseconds for link to become
+ * valid.
+ */
+ ret_val = e1000_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10,
+ &link);
+ if (ret_val)
+ return ret_val;
+
+ if (link) {
+ DEBUGOUT("Valid link established!!!\n");
+ hw->mac.ops.config_collision_dist(hw);
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ } else {
+ DEBUGOUT("Unable to establish link!!!\n");
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
+ * @hw: pointer to the HW structure
+ *
+ * Calls the PHY setup function to force speed and duplex. Clears the
+ * auto-crossover to force MDI manually. Waits for link and returns
+ * successful if link up is successful, else -E1000_ERR_PHY (-2).
+ **/
+s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+ DEBUGFUNC("e1000_phy_force_speed_duplex_igp");
+
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Clear Auto-Crossover to force MDI manually. IGP requires MDI
+ * forced whenever speed and duplex are forced.
+ */
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+ phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ DEBUGOUT1("IGP PSCR: %X\n", phy_data);
+
+ usec_delay(1);
+
+ if (phy->autoneg_wait_to_complete) {
+ DEBUGOUT("Waiting for forced speed/duplex link on IGP phy.\n");
+
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link)
+ DEBUGOUT("Link taking longer than expected.\n");
+
+ /* Try once more */
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Calls the PHY setup function to force speed and duplex. Clears the
+ * auto-crossover to force MDI manually. Resets the PHY to commit the
+ * changes. If time expires while waiting for link up, we reset the DSP.
+ * After reset, TX_CLK and CRS on Tx must be set. Return successful upon
+ * successful completion, else return corresponding error code.
+ **/
+s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+ DEBUGFUNC("e1000_phy_force_speed_duplex_m88");
+
+ /* I210 and I211 devices support Auto-Crossover in forced operation. */
+ if (phy->type != e1000_phy_i210) {
+ /* Clear Auto-Crossover to force MDI manually. M88E1000
+ * requires MDI forced whenever speed and duplex are forced.
+ */
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data);
+
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Reset the phy to commit changes. */
+ ret_val = hw->phy.ops.commit(hw);
+ if (ret_val)
+ return ret_val;
+
+ if (phy->autoneg_wait_to_complete) {
+ DEBUGOUT("Waiting for forced speed/duplex link on M88 phy.\n");
+
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link) {
+ bool reset_dsp = true;
+
+ switch (hw->phy.id) {
+ case I347AT4_E_PHY_ID:
+ case M88E1340M_E_PHY_ID:
+ case M88E1112_E_PHY_ID:
+ case M88E1543_E_PHY_ID:
+ case M88E1512_E_PHY_ID:
+ case I210_I_PHY_ID:
+ reset_dsp = false;
+ break;
+ default:
+ if (hw->phy.type != e1000_phy_m88)
+ reset_dsp = false;
+ break;
+ }
+
+ if (!reset_dsp) {
+ DEBUGOUT("Link taking longer than expected.\n");
+ } else {
+ /* We didn't get link.
+ * Reset the DSP and cross our fingers.
+ */
+ ret_val = phy->ops.write_reg(hw,
+ M88E1000_PHY_PAGE_SELECT,
+ 0x001d);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_phy_reset_dsp_generic(hw);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ /* Try once more */
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (hw->phy.type != e1000_phy_m88)
+ return E1000_SUCCESS;
+
+ if (hw->phy.id == I347AT4_E_PHY_ID ||
+ hw->phy.id == M88E1340M_E_PHY_ID ||
+ hw->phy.id == M88E1112_E_PHY_ID)
+ return E1000_SUCCESS;
+ if (hw->phy.id == I210_I_PHY_ID)
+ return E1000_SUCCESS;
+ if ((hw->phy.id == M88E1543_E_PHY_ID) ||
+ (hw->phy.id == M88E1512_E_PHY_ID))
+ return E1000_SUCCESS;
+ ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Resetting the phy means we need to re-force TX_CLK in the
+ * Extended PHY Specific Control Register to 25MHz clock from
+ * the reset value of 2.5MHz.
+ */
+ phy_data |= M88E1000_EPSCR_TX_CLK_25;
+ ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* In addition, we must re-enable CRS on Tx for both half and full
+ * duplex.
+ */
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex
+ * @hw: pointer to the HW structure
+ *
+ * Forces the speed and duplex settings of the PHY.
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ bool link;
+
+ DEBUGFUNC("e1000_phy_force_speed_duplex_ife");
+
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &data);
+ if (ret_val)
+ return ret_val;
+
+ e1000_phy_force_speed_duplex_setup(hw, &data);
+
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, data);
+ if (ret_val)
+ return ret_val;
+
+ /* Disable MDI-X support for 10/100 */
+ ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IFE_PMC_AUTO_MDIX;
+ data &= ~IFE_PMC_FORCE_MDIX;
+
+ ret_val = phy->ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, data);
+ if (ret_val)
+ return ret_val;
+
+ DEBUGOUT1("IFE PMC: %X\n", data);
+
+ usec_delay(1);
+
+ if (phy->autoneg_wait_to_complete) {
+ DEBUGOUT("Waiting for forced speed/duplex link on IFE phy.\n");
+
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link)
+ DEBUGOUT("Link taking longer than expected.\n");
+
+ /* Try once more */
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
+ * @hw: pointer to the HW structure
+ * @phy_ctrl: pointer to current value of PHY_CONTROL
+ *
+ * Forces speed and duplex on the PHY by doing the following: disable flow
+ * control, force speed/duplex on the MAC, disable auto speed detection,
+ * disable auto-negotiation, configure duplex, configure speed, configure
+ * the collision distance, write configuration to CTRL register. The
+ * caller must write to the PHY_CONTROL register for these settings to
+ * take affect.
+ **/
+void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 ctrl;
+
+ DEBUGFUNC("e1000_phy_force_speed_duplex_setup");
+
+ /* Turn off flow control when forcing speed/duplex */
+ hw->fc.current_mode = e1000_fc_none;
+
+ /* Force speed/duplex on the mac */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ctrl &= ~E1000_CTRL_SPD_SEL;
+
+ /* Disable Auto Speed Detection */
+ ctrl &= ~E1000_CTRL_ASDE;
+
+ /* Disable autoneg on the phy */
+ *phy_ctrl &= ~MII_CR_AUTO_NEG_EN;
+
+ /* Forcing Full or Half Duplex? */
+ if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
+ ctrl &= ~E1000_CTRL_FD;
+ *phy_ctrl &= ~MII_CR_FULL_DUPLEX;
+ DEBUGOUT("Half Duplex\n");
+ } else {
+ ctrl |= E1000_CTRL_FD;
+ *phy_ctrl |= MII_CR_FULL_DUPLEX;
+ DEBUGOUT("Full Duplex\n");
+ }
+
+ /* Forcing 10mb or 100mb? */
+ if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
+ ctrl |= E1000_CTRL_SPD_100;
+ *phy_ctrl |= MII_CR_SPEED_100;
+ *phy_ctrl &= ~MII_CR_SPEED_1000;
+ DEBUGOUT("Forcing 100mb\n");
+ } else {
+ ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+ *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
+ DEBUGOUT("Forcing 10mb\n");
+ }
+
+ hw->mac.ops.config_collision_dist(hw);
+
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+}
+
+/**
+ * e1000_set_d3_lplu_state_generic - Sets low power link up state for D3
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * The low power link up (lplu) state is set to the power management level D3
+ * and SmartSpeed is disabled when active is true, else clear lplu for D3
+ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
+ * is used during Dx states where the power conservation is most important.
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained.
+ **/
+s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("e1000_set_d3_lplu_state_generic");
+
+ if (!hw->phy.ops.read_reg)
+ return E1000_SUCCESS;
+
+ ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (!active) {
+ data &= ~IGP02E1000_PM_D3_LPLU;
+ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ data);
+ if (ret_val)
+ return ret_val;
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ } else if (phy->smart_speed == e1000_smart_speed_off) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ }
+ } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+ (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+ (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+ data |= IGP02E1000_PM_D3_LPLU;
+ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ data);
+ if (ret_val)
+ return ret_val;
+
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_downshift_generic - Checks whether a downshift in speed occurred
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * A downshift is detected by querying the PHY link health.
+ **/
+s32 e1000_check_downshift_generic(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, offset, mask;
+
+ DEBUGFUNC("e1000_check_downshift_generic");
+
+ switch (phy->type) {
+ case e1000_phy_i210:
+ case e1000_phy_m88:
+ case e1000_phy_gg82563:
+ case e1000_phy_bm:
+ case e1000_phy_82578:
+ offset = M88E1000_PHY_SPEC_STATUS;
+ mask = M88E1000_PSSR_DOWNSHIFT;
+ break;
+ case e1000_phy_igp:
+ case e1000_phy_igp_2:
+ case e1000_phy_igp_3:
+ offset = IGP01E1000_PHY_LINK_HEALTH;
+ mask = IGP01E1000_PLHR_SS_DOWNGRADE;
+ break;
+ default:
+ /* speed downshift not supported */
+ phy->speed_downgraded = false;
+ return E1000_SUCCESS;
+ }
+
+ ret_val = phy->ops.read_reg(hw, offset, &phy_data);
+
+ if (!ret_val)
+ phy->speed_downgraded = !!(phy_data & mask);
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_polarity_m88 - Checks the polarity.
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ * Polarity is determined based on the PHY specific status register.
+ **/
+s32 e1000_check_polarity_m88(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("e1000_check_polarity_m88");
+
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data);
+
+ if (!ret_val)
+ phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_polarity_igp - Checks the polarity.
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ * Polarity is determined based on the PHY port status register, and the
+ * current speed (since there is no polarity at 100Mbps).
+ **/
+s32 e1000_check_polarity_igp(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data, offset, mask;
+
+ DEBUGFUNC("e1000_check_polarity_igp");
+
+ /* Polarity is determined based on the speed of
+ * our connection.
+ */
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+ IGP01E1000_PSSR_SPEED_1000MBPS) {
+ offset = IGP01E1000_PHY_PCS_INIT_REG;
+ mask = IGP01E1000_PHY_POLARITY_MASK;
+ } else {
+ /* This really only applies to 10Mbps since
+ * there is no polarity for 100Mbps (always 0).
+ */
+ offset = IGP01E1000_PHY_PORT_STATUS;
+ mask = IGP01E1000_PSSR_POLARITY_REVERSED;
+ }
+
+ ret_val = phy->ops.read_reg(hw, offset, &data);
+
+ if (!ret_val)
+ phy->cable_polarity = ((data & mask)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_polarity_ife - Check cable polarity for IFE PHY
+ * @hw: pointer to the HW structure
+ *
+ * Polarity is determined on the polarity reversal feature being enabled.
+ **/
+s32 e1000_check_polarity_ife(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, offset, mask;
+
+ DEBUGFUNC("e1000_check_polarity_ife");
+
+ /* Polarity is determined based on the reversal feature being enabled.
+ */
+ if (phy->polarity_correction) {
+ offset = IFE_PHY_EXTENDED_STATUS_CONTROL;
+ mask = IFE_PESC_POLARITY_REVERSED;
+ } else {
+ offset = IFE_PHY_SPECIAL_CONTROL;
+ mask = IFE_PSC_FORCE_POLARITY;
+ }
+
+ ret_val = phy->ops.read_reg(hw, offset, &phy_data);
+
+ if (!ret_val)
+ phy->cable_polarity = ((phy_data & mask)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
+
+ return ret_val;
+}
+
+/**
+ * e1000_wait_autoneg - Wait for auto-neg completion
+ * @hw: pointer to the HW structure
+ *
+ * Waits for auto-negotiation to complete or for the auto-negotiation time
+ * limit to expire, which ever happens first.
+ **/
+STATIC s32 e1000_wait_autoneg(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 i, phy_status;
+
+ DEBUGFUNC("e1000_wait_autoneg");
+
+ if (!hw->phy.ops.read_reg)
+ return E1000_SUCCESS;
+
+ /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
+ for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val)
+ break;
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val)
+ break;
+ if (phy_status & MII_SR_AUTONEG_COMPLETE)
+ break;
+ msec_delay(100);
+ }
+
+ /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
+ * has completed.
+ */
+ return ret_val;
+}
+
+/**
+ * e1000_phy_has_link_generic - Polls PHY for link
+ * @hw: pointer to the HW structure
+ * @iterations: number of times to poll for link
+ * @usec_interval: delay between polling attempts
+ * @success: pointer to whether polling was successful or not
+ *
+ * Polls the PHY status register for link, 'iterations' number of times.
+ **/
+s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+ u32 usec_interval, bool *success)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 i, phy_status;
+
+ DEBUGFUNC("e1000_phy_has_link_generic");
+
+ if (!hw->phy.ops.read_reg)
+ return E1000_SUCCESS;
+
+ for (i = 0; i < iterations; i++) {
+ /* Some PHYs require the PHY_STATUS register to be read
+ * twice due to the link bit being sticky. No harm doing
+ * it across the board.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val) {
+ /* If the first read fails, another entity may have
+ * ownership of the resources, wait and try again to
+ * see if they have relinquished the resources yet.
+ */
+ if (usec_interval >= 1000)
+ msec_delay(usec_interval/1000);
+ else
+ usec_delay(usec_interval);
+ }
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val)
+ break;
+ if (phy_status & MII_SR_LINK_STATUS)
+ break;
+ if (usec_interval >= 1000)
+ msec_delay(usec_interval/1000);
+ else
+ usec_delay(usec_interval);
+ }
+
+ *success = (i < iterations);
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_cable_length_m88 - Determine cable length for m88 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Reads the PHY specific status register to retrieve the cable length
+ * information. The cable length is determined by averaging the minimum and
+ * maximum values to get the "average" cable length. The m88 PHY has four
+ * possible cable length values, which are:
+ * Register Value Cable Length
+ * 0 < 50 meters
+ * 1 50 - 80 meters
+ * 2 80 - 110 meters
+ * 3 110 - 140 meters
+ * 4 > 140 meters
+ **/
+s32 e1000_get_cable_length_m88(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, index;
+
+ DEBUGFUNC("e1000_get_cable_length_m88");
+
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+ M88E1000_PSSR_CABLE_LENGTH_SHIFT);
+
+ if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1)
+ return -E1000_ERR_PHY;
+
+ phy->min_cable_length = e1000_m88_cable_length_table[index];
+ phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
+
+ phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+ return E1000_SUCCESS;
+}
+
+s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, phy_data2, is_cm;
+ u16 index, default_page;
+
+ DEBUGFUNC("e1000_get_cable_length_m88_gen2");
+
+ switch (hw->phy.id) {
+ case I210_I_PHY_ID:
+ /* Get cable length from PHY Cable Diagnostics Control Reg */
+ ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
+ (I347AT4_PCDL + phy->addr),
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Check if the unit of cable length is meters or cm */
+ ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
+ I347AT4_PCDC, &phy_data2);
+ if (ret_val)
+ return ret_val;
+
+ is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT);
+
+ /* Populate the phy structure with cable length in meters */
+ phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
+ phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
+ phy->cable_length = phy_data / (is_cm ? 100 : 1);
+ break;
+ case M88E1543_E_PHY_ID:
+ case M88E1512_E_PHY_ID:
+ case M88E1340M_E_PHY_ID:
+ case I347AT4_E_PHY_ID:
+ /* Remember the original page select and set it to 7 */
+ ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
+ &default_page);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07);
+ if (ret_val)
+ return ret_val;
+
+ /* Get cable length from PHY Cable Diagnostics Control Reg */
+ ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr),
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Check if the unit of cable length is meters or cm */
+ ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2);
+ if (ret_val)
+ return ret_val;
+
+ is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT);
+
+ /* Populate the phy structure with cable length in meters */
+ phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
+ phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
+ phy->cable_length = phy_data / (is_cm ? 100 : 1);
+
+ /* Reset the page select to its original value */
+ ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
+ default_page);
+ if (ret_val)
+ return ret_val;
+ break;
+
+ case M88E1112_E_PHY_ID:
+ /* Remember the original page select and set it to 5 */
+ ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
+ &default_page);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+ M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+
+ if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1)
+ return -E1000_ERR_PHY;
+
+ phy->min_cable_length = e1000_m88_cable_length_table[index];
+ phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
+
+ phy->cable_length = (phy->min_cable_length +
+ phy->max_cable_length) / 2;
+
+ /* Reset the page select to its original value */
+ ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
+ default_page);
+ if (ret_val)
+ return ret_val;
+
+ break;
+ default:
+ return -E1000_ERR_PHY;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_cable_length_igp_2 - Determine cable length for igp2 PHY
+ * @hw: pointer to the HW structure
+ *
+ * The automatic gain control (agc) normalizes the amplitude of the
+ * received signal, adjusting for the attenuation produced by the
+ * cable. By reading the AGC registers, which represent the
+ * combination of coarse and fine gain value, the value can be put
+ * into a lookup table to obtain the approximate cable length
+ * for each channel.
+ **/
+s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, i, agc_value = 0;
+ u16 cur_agc_index, max_agc_index = 0;
+ u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
+ static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
+ IGP02E1000_PHY_AGC_A,
+ IGP02E1000_PHY_AGC_B,
+ IGP02E1000_PHY_AGC_C,
+ IGP02E1000_PHY_AGC_D
+ };
+
+ DEBUGFUNC("e1000_get_cable_length_igp_2");
+
+ /* Read the AGC registers for all channels */
+ for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
+ ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Getting bits 15:9, which represent the combination of
+ * coarse and fine gain values. The result is a number
+ * that can be put into the lookup table to obtain the
+ * approximate cable length.
+ */
+ cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+ IGP02E1000_AGC_LENGTH_MASK);
+
+ /* Array index bound check. */
+ if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
+ (cur_agc_index == 0))
+ return -E1000_ERR_PHY;
+
+ /* Remove min & max AGC values from calculation. */
+ if (e1000_igp_2_cable_length_table[min_agc_index] >
+ e1000_igp_2_cable_length_table[cur_agc_index])
+ min_agc_index = cur_agc_index;
+ if (e1000_igp_2_cable_length_table[max_agc_index] <
+ e1000_igp_2_cable_length_table[cur_agc_index])
+ max_agc_index = cur_agc_index;
+
+ agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
+ }
+
+ agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
+ e1000_igp_2_cable_length_table[max_agc_index]);
+ agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
+
+ /* Calculate cable length with the error range of +/- 10 meters. */
+ phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
+ (agc_value - IGP02E1000_AGC_RANGE) : 0);
+ phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
+
+ phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_phy_info_m88 - Retrieve PHY information
+ * @hw: pointer to the HW structure
+ *
+ * Valid for only copper links. Read the PHY status register (sticky read)
+ * to verify that link is up. Read the PHY special control register to
+ * determine the polarity and 10base-T extended distance. Read the PHY
+ * special status register to determine MDI/MDIx and current speed. If
+ * speed is 1000, then determine cable length, local and remote receiver.
+ **/
+s32 e1000_get_phy_info_m88(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+ DEBUGFUNC("e1000_get_phy_info_m88");
+
+ if (phy->media_type != e1000_media_type_copper) {
+ DEBUGOUT("Phy info is only valid for copper media\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link) {
+ DEBUGOUT("Phy info is only valid if link is up\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy->polarity_correction = !!(phy_data &
+ M88E1000_PSCR_POLARITY_REVERSAL);
+
+ ret_val = e1000_check_polarity_m88(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX);
+
+ if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
+ ret_val = hw->phy.ops.get_cable_length(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+
+ phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+ } else {
+ /* Set values to "undefined" */
+ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+ phy->local_rx = e1000_1000t_rx_status_undefined;
+ phy->remote_rx = e1000_1000t_rx_status_undefined;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_phy_info_igp - Retrieve igp PHY information
+ * @hw: pointer to the HW structure
+ *
+ * Read PHY status to determine if link is up. If link is up, then
+ * set/determine 10base-T extended distance and polarity correction. Read
+ * PHY port status to determine MDI/MDIx and speed. Based on the speed,
+ * determine on the cable length, local and remote receiver.
+ **/
+s32 e1000_get_phy_info_igp(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ bool link;
+
+ DEBUGFUNC("e1000_get_phy_info_igp");
+
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link) {
+ DEBUGOUT("Phy info is only valid if link is up\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ phy->polarity_correction = true;
+
+ ret_val = e1000_check_polarity_igp(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX);
+
+ if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+ IGP01E1000_PSSR_SPEED_1000MBPS) {
+ ret_val = phy->ops.get_cable_length(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+
+ phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+ } else {
+ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+ phy->local_rx = e1000_1000t_rx_status_undefined;
+ phy->remote_rx = e1000_1000t_rx_status_undefined;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_phy_info_ife - Retrieves various IFE PHY states
+ * @hw: pointer to the HW structure
+ *
+ * Populates "phy" structure with various feature states.
+ **/
+s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ bool link;
+
+ DEBUGFUNC("e1000_get_phy_info_ife");
+
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link) {
+ DEBUGOUT("Phy info is only valid if link is up\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = phy->ops.read_reg(hw, IFE_PHY_SPECIAL_CONTROL, &data);
+ if (ret_val)
+ return ret_val;
+ phy->polarity_correction = !(data & IFE_PSC_AUTO_POLARITY_DISABLE);
+
+ if (phy->polarity_correction) {
+ ret_val = e1000_check_polarity_ife(hw);
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* Polarity is forced */
+ phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
+ }
+
+ ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
+ if (ret_val)
+ return ret_val;
+
+ phy->is_mdix = !!(data & IFE_PMC_MDIX_STATUS);
+
+ /* The following parameters are undefined for 10/100 operation. */
+ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+ phy->local_rx = e1000_1000t_rx_status_undefined;
+ phy->remote_rx = e1000_1000t_rx_status_undefined;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_sw_reset_generic - PHY software reset
+ * @hw: pointer to the HW structure
+ *
+ * Does a software reset of the PHY by reading the PHY control register and
+ * setting/write the control register reset bit to the PHY.
+ **/
+s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_ctrl;
+
+ DEBUGFUNC("e1000_phy_sw_reset_generic");
+
+ if (!hw->phy.ops.read_reg)
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
+ if (ret_val)
+ return ret_val;
+
+ phy_ctrl |= MII_CR_RESET;
+ ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
+ if (ret_val)
+ return ret_val;
+
+ usec_delay(1);
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_hw_reset_generic - PHY hardware reset
+ * @hw: pointer to the HW structure
+ *
+ * Verify the reset block is not blocking us from resetting. Acquire
+ * semaphore (if necessary) and read/set/write the device control reset
+ * bit in the PHY. Wait the appropriate delay time for the device to
+ * reset and release the semaphore (if necessary).
+ **/
+s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u32 ctrl;
+
+ DEBUGFUNC("e1000_phy_hw_reset_generic");
+
+ if (phy->ops.check_reset_block) {
+ ret_val = phy->ops.check_reset_block(hw);
+ if (ret_val)
+ return E1000_SUCCESS;
+ }
+
+ ret_val = phy->ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PHY_RST);
+ E1000_WRITE_FLUSH(hw);
+
+ usec_delay(phy->reset_delay_us);
+
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ E1000_WRITE_FLUSH(hw);
+
+ usec_delay(150);
+
+ phy->ops.release(hw);
+
+ return phy->ops.get_cfg_done(hw);
+}
+
+/**
+ * e1000_get_cfg_done_generic - Generic configuration done
+ * @hw: pointer to the HW structure
+ *
+ * Generic function to wait 10 milli-seconds for configuration to complete
+ * and return success.
+ **/
+s32 e1000_get_cfg_done_generic(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_get_cfg_done_generic");
+ UNREFERENCED_1PARAMETER(hw);
+
+ msec_delay_irq(10);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_init_script_igp3 - Inits the IGP3 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Initializes a Intel Gigabit PHY3 when an EEPROM is not present.
+ **/
+s32 e1000_phy_init_script_igp3(struct e1000_hw *hw)
+{
+ DEBUGOUT("Running IGP 3 PHY init script\n");
+
+ /* PHY init IGP 3 */
+ /* Enable rise/fall, 10-mode work in class-A */
+ hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018);
+ /* Remove all caps from Replica path filter */
+ hw->phy.ops.write_reg(hw, 0x2F52, 0x0000);
+ /* Bias trimming for ADC, AFE and Driver (Default) */
+ hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24);
+ /* Increase Hybrid poly bias */
+ hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0);
+ /* Add 4% to Tx amplitude in Gig mode */
+ hw->phy.ops.write_reg(hw, 0x2010, 0x10B0);
+ /* Disable trimming (TTT) */
+ hw->phy.ops.write_reg(hw, 0x2011, 0x0000);
+ /* Poly DC correction to 94.6% + 2% for all channels */
+ hw->phy.ops.write_reg(hw, 0x20DD, 0x249A);
+ /* ABS DC correction to 95.9% */
+ hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3);
+ /* BG temp curve trim */
+ hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE);
+ /* Increasing ADC OPAMP stage 1 currents to max */
+ hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4);
+ /* Force 1000 ( required for enabling PHY regs configuration) */
+ hw->phy.ops.write_reg(hw, 0x0000, 0x0140);
+ /* Set upd_freq to 6 */
+ hw->phy.ops.write_reg(hw, 0x1F30, 0x1606);
+ /* Disable NPDFE */
+ hw->phy.ops.write_reg(hw, 0x1F31, 0xB814);
+ /* Disable adaptive fixed FFE (Default) */
+ hw->phy.ops.write_reg(hw, 0x1F35, 0x002A);
+ /* Enable FFE hysteresis */
+ hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067);
+ /* Fixed FFE for short cable lengths */
+ hw->phy.ops.write_reg(hw, 0x1F54, 0x0065);
+ /* Fixed FFE for medium cable lengths */
+ hw->phy.ops.write_reg(hw, 0x1F55, 0x002A);
+ /* Fixed FFE for long cable lengths */
+ hw->phy.ops.write_reg(hw, 0x1F56, 0x002A);
+ /* Enable Adaptive Clip Threshold */
+ hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0);
+ /* AHT reset limit to 1 */
+ hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF);
+ /* Set AHT master delay to 127 msec */
+ hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC);
+ /* Set scan bits for AHT */
+ hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF);
+ /* Set AHT Preset bits */
+ hw->phy.ops.write_reg(hw, 0x1F79, 0x0210);
+ /* Change integ_factor of channel A to 3 */
+ hw->phy.ops.write_reg(hw, 0x1895, 0x0003);
+ /* Change prop_factor of channels BCD to 8 */
+ hw->phy.ops.write_reg(hw, 0x1796, 0x0008);
+ /* Change cg_icount + enable integbp for channels BCD */
+ hw->phy.ops.write_reg(hw, 0x1798, 0xD008);
+ /* Change cg_icount + enable integbp + change prop_factor_master
+ * to 8 for channel A
+ */
+ hw->phy.ops.write_reg(hw, 0x1898, 0xD918);
+ /* Disable AHT in Slave mode on channel A */
+ hw->phy.ops.write_reg(hw, 0x187A, 0x0800);
+ /* Enable LPLU and disable AN to 1000 in non-D0a states,
+ * Enable SPD+B2B
+ */
+ hw->phy.ops.write_reg(hw, 0x0019, 0x008D);
+ /* Enable restart AN on an1000_dis change */
+ hw->phy.ops.write_reg(hw, 0x001B, 0x2080);
+ /* Enable wh_fifo read clock in 10/100 modes */
+ hw->phy.ops.write_reg(hw, 0x0014, 0x0045);
+ /* Restart AN, Speed selection is 1000 */
+ hw->phy.ops.write_reg(hw, 0x0000, 0x1340);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_phy_type_from_id - Get PHY type from id
+ * @phy_id: phy_id read from the phy
+ *
+ * Returns the phy type from the id.
+ **/
+enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id)
+{
+ enum e1000_phy_type phy_type = e1000_phy_unknown;
+
+ switch (phy_id) {
+ case M88E1000_I_PHY_ID:
+ case M88E1000_E_PHY_ID:
+ case M88E1111_I_PHY_ID:
+ case M88E1011_I_PHY_ID:
+ case M88E1543_E_PHY_ID:
+ case M88E1512_E_PHY_ID:
+ case I347AT4_E_PHY_ID:
+ case M88E1112_E_PHY_ID:
+ case M88E1340M_E_PHY_ID:
+ phy_type = e1000_phy_m88;
+ break;
+ case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */
+ phy_type = e1000_phy_igp_2;
+ break;
+ case GG82563_E_PHY_ID:
+ phy_type = e1000_phy_gg82563;
+ break;
+ case IGP03E1000_E_PHY_ID:
+ phy_type = e1000_phy_igp_3;
+ break;
+ case IFE_E_PHY_ID:
+ case IFE_PLUS_E_PHY_ID:
+ case IFE_C_E_PHY_ID:
+ phy_type = e1000_phy_ife;
+ break;
+ case BME1000_E_PHY_ID:
+ case BME1000_E_PHY_ID_R2:
+ phy_type = e1000_phy_bm;
+ break;
+ case I82578_E_PHY_ID:
+ phy_type = e1000_phy_82578;
+ break;
+ case I82577_E_PHY_ID:
+ phy_type = e1000_phy_82577;
+ break;
+ case I82579_E_PHY_ID:
+ phy_type = e1000_phy_82579;
+ break;
+ case I217_E_PHY_ID:
+ phy_type = e1000_phy_i217;
+ break;
+ case I82580_I_PHY_ID:
+ phy_type = e1000_phy_82580;
+ break;
+ case I210_I_PHY_ID:
+ phy_type = e1000_phy_i210;
+ break;
+ default:
+ phy_type = e1000_phy_unknown;
+ break;
+ }
+ return phy_type;
+}
+
+/**
+ * e1000_determine_phy_address - Determines PHY address.
+ * @hw: pointer to the HW structure
+ *
+ * This uses a trial and error method to loop through possible PHY
+ * addresses. It tests each by reading the PHY ID registers and
+ * checking for a match.
+ **/
+s32 e1000_determine_phy_address(struct e1000_hw *hw)
+{
+ u32 phy_addr = 0;
+ u32 i;
+ enum e1000_phy_type phy_type = e1000_phy_unknown;
+
+ hw->phy.id = phy_type;
+
+ for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) {
+ hw->phy.addr = phy_addr;
+ i = 0;
+
+ do {
+ e1000_get_phy_id(hw);
+ phy_type = e1000_get_phy_type_from_id(hw->phy.id);
+
+ /* If phy_type is valid, break - we found our
+ * PHY address
+ */
+ if (phy_type != e1000_phy_unknown)
+ return E1000_SUCCESS;
+
+ msec_delay(1);
+ i++;
+ } while (i < 10);
+ }
+
+ return -E1000_ERR_PHY_TYPE;
+}
+
+/**
+ * e1000_get_phy_addr_for_bm_page - Retrieve PHY page address
+ * @page: page to access
+ *
+ * Returns the phy address for the page requested.
+ **/
+STATIC u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg)
+{
+ u32 phy_addr = 2;
+
+ if ((page >= 768) || (page == 0 && reg == 25) || (reg == 31))
+ phy_addr = 1;
+
+ return phy_addr;
+}
+
+/**
+ * e1000_write_phy_reg_bm - Write BM PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ s32 ret_val;
+ u32 page = offset >> IGP_PAGE_SHIFT;
+
+ DEBUGFUNC("e1000_write_phy_reg_bm");
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Page 800 works differently than the rest so it has its own func */
+ if (page == BM_WUC_PAGE) {
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
+ false, false);
+ goto release;
+ }
+
+ hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ u32 page_shift, page_select;
+
+ /* Page select is register 31 for phy address 1 and 22 for
+ * phy address 2 and 3. Page select is shifted only for
+ * phy address 1.
+ */
+ if (hw->phy.addr == 1) {
+ page_shift = IGP_PAGE_SHIFT;
+ page_select = IGP01E1000_PHY_PAGE_SELECT;
+ } else {
+ page_shift = 0;
+ page_select = BM_PHY_PAGE_SELECT;
+ }
+
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000_write_phy_reg_mdic(hw, page_select,
+ (page << page_shift));
+ if (ret_val)
+ goto release;
+ }
+
+ ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+release:
+ hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000_read_phy_reg_bm - Read BM PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore, if necessary, then reads the PHY register at offset
+ * and storing the retrieved information in data. Release any acquired
+ * semaphores before exiting.
+ **/
+s32 e1000_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ s32 ret_val;
+ u32 page = offset >> IGP_PAGE_SHIFT;
+
+ DEBUGFUNC("e1000_read_phy_reg_bm");
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Page 800 works differently than the rest so it has its own func */
+ if (page == BM_WUC_PAGE) {
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
+ true, false);
+ goto release;
+ }
+
+ hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ u32 page_shift, page_select;
+
+ /* Page select is register 31 for phy address 1 and 22 for
+ * phy address 2 and 3. Page select is shifted only for
+ * phy address 1.
+ */
+ if (hw->phy.addr == 1) {
+ page_shift = IGP_PAGE_SHIFT;
+ page_select = IGP01E1000_PHY_PAGE_SELECT;
+ } else {
+ page_shift = 0;
+ page_select = BM_PHY_PAGE_SELECT;
+ }
+
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000_write_phy_reg_mdic(hw, page_select,
+ (page << page_shift));
+ if (ret_val)
+ goto release;
+ }
+
+ ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+release:
+ hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000_read_phy_reg_bm2 - Read BM PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore, if necessary, then reads the PHY register at offset
+ * and storing the retrieved information in data. Release any acquired
+ * semaphores before exiting.
+ **/
+s32 e1000_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ s32 ret_val;
+ u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
+
+ DEBUGFUNC("e1000_read_phy_reg_bm2");
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Page 800 works differently than the rest so it has its own func */
+ if (page == BM_WUC_PAGE) {
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
+ true, false);
+ goto release;
+ }
+
+ hw->phy.addr = 1;
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
+ page);
+
+ if (ret_val)
+ goto release;
+ }
+
+ ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+release:
+ hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000_write_phy_reg_bm2 - Write BM PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ s32 ret_val;
+ u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
+
+ DEBUGFUNC("e1000_write_phy_reg_bm2");
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Page 800 works differently than the rest so it has its own func */
+ if (page == BM_WUC_PAGE) {
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
+ false, false);
+ goto release;
+ }
+
+ hw->phy.addr = 1;
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
+ page);
+
+ if (ret_val)
+ goto release;
+ }
+
+ ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+release:
+ hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
+ * @hw: pointer to the HW structure
+ * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
+ *
+ * Assumes semaphore already acquired and phy_reg points to a valid memory
+ * address to store contents of the BM_WUC_ENABLE_REG register.
+ **/
+s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
+{
+ s32 ret_val;
+ u16 temp;
+
+ DEBUGFUNC("e1000_enable_phy_wakeup_reg_access_bm");
+
+ if (!phy_reg)
+ return -E1000_ERR_PARAM;
+
+ /* All page select, port ctrl and wakeup registers use phy address 1 */
+ hw->phy.addr = 1;
+
+ /* Select Port Control Registers page */
+ ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
+ if (ret_val) {
+ DEBUGOUT("Could not set Port Control page\n");
+ return ret_val;
+ }
+
+ ret_val = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
+ if (ret_val) {
+ DEBUGOUT2("Could not read PHY register %d.%d\n",
+ BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG);
+ return ret_val;
+ }
+
+ /* Enable both PHY wakeup mode and Wakeup register page writes.
+ * Prevent a power state change by disabling ME and Host PHY wakeup.
+ */
+ temp = *phy_reg;
+ temp |= BM_WUC_ENABLE_BIT;
+ temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
+
+ ret_val = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, temp);
+ if (ret_val) {
+ DEBUGOUT2("Could not write PHY register %d.%d\n",
+ BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG);
+ return ret_val;
+ }
+
+ /* Select Host Wakeup Registers page - caller now able to write
+ * registers on the Wakeup registers page
+ */
+ return e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT));
+}
+
+/**
+ * e1000_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
+ * @hw: pointer to the HW structure
+ * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
+ *
+ * Restore BM_WUC_ENABLE_REG to its original value.
+ *
+ * Assumes semaphore already acquired and *phy_reg is the contents of the
+ * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
+ * caller.
+ **/
+s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_disable_phy_wakeup_reg_access_bm");
+
+ if (!phy_reg)
+ return -E1000_ERR_PARAM;
+
+ /* Select Port Control Registers page */
+ ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
+ if (ret_val) {
+ DEBUGOUT("Could not set Port Control page\n");
+ return ret_val;
+ }
+
+ /* Restore 769.17 to its original value */
+ ret_val = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, *phy_reg);
+ if (ret_val)
+ DEBUGOUT2("Could not restore PHY register %d.%d\n",
+ BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG);
+
+ return ret_val;
+}
+
+/**
+ * e1000_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read or written
+ * @data: pointer to the data to read or write
+ * @read: determines if operation is read or write
+ * @page_set: BM_WUC_PAGE already set and access enabled
+ *
+ * Read the PHY register at offset and store the retrieved information in
+ * data, or write data to PHY register at offset. Note the procedure to
+ * access the PHY wakeup registers is different than reading the other PHY
+ * registers. It works as such:
+ * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1
+ * 2) Set page to 800 for host (801 if we were manageability)
+ * 3) Write the address using the address opcode (0x11)
+ * 4) Read or write the data using the data opcode (0x12)
+ * 5) Restore 769.17.2 to its original value
+ *
+ * Steps 1 and 2 are done by e1000_enable_phy_wakeup_reg_access_bm() and
+ * step 5 is done by e1000_disable_phy_wakeup_reg_access_bm().
+ *
+ * Assumes semaphore is already acquired. When page_set==true, assumes
+ * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
+ * is responsible for calls to e1000_[enable|disable]_phy_wakeup_reg_bm()).
+ **/
+STATIC s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
+ u16 *data, bool read, bool page_set)
+{
+ s32 ret_val;
+ u16 reg = BM_PHY_REG_NUM(offset);
+ u16 page = BM_PHY_REG_PAGE(offset);
+ u16 phy_reg = 0;
+
+ DEBUGFUNC("e1000_access_phy_wakeup_reg_bm");
+
+ /* Gig must be disabled for MDIO accesses to Host Wakeup reg page */
+ if ((hw->mac.type == e1000_pchlan) &&
+ (!(E1000_READ_REG(hw, E1000_PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE)))
+ DEBUGOUT1("Attempting to access page %d while gig enabled.\n",
+ page);
+
+ if (!page_set) {
+ /* Enable access to PHY wakeup registers */
+ ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+ if (ret_val) {
+ DEBUGOUT("Could not enable PHY wakeup reg access\n");
+ return ret_val;
+ }
+ }
+
+ DEBUGOUT2("Accessing PHY page %d reg 0x%x\n", page, reg);
+
+ /* Write the Wakeup register page offset value using opcode 0x11 */
+ ret_val = e1000_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg);
+ if (ret_val) {
+ DEBUGOUT1("Could not write address opcode to page %d\n", page);
+ return ret_val;
+ }
+
+ if (read) {
+ /* Read the Wakeup register page value using opcode 0x12 */
+ ret_val = e1000_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
+ data);
+ } else {
+ /* Write the Wakeup register page value using opcode 0x12 */
+ ret_val = e1000_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
+ *data);
+ }
+
+ if (ret_val) {
+ DEBUGOUT2("Could not access PHY reg %d.%d\n", page, reg);
+ return ret_val;
+ }
+
+ if (!page_set)
+ ret_val = e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+
+ return ret_val;
+}
+
+/**
+ * e1000_power_up_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, restore the link to previous
+ * settings.
+ **/
+void e1000_power_up_phy_copper(struct e1000_hw *hw)
+{
+ u16 mii_reg = 0;
+ u16 power_reg = 0;
+
+ /* The PHY will retain its settings across a power down/up cycle */
+ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
+ mii_reg &= ~MII_CR_POWER_DOWN;
+ if (hw->phy.type == e1000_phy_i210) {
+ hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
+ power_reg &= ~GS40G_CS_POWER_DOWN;
+ hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
+ }
+ hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
+}
+
+/**
+ * e1000_power_down_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, restore the link to previous
+ * settings.
+ **/
+void e1000_power_down_phy_copper(struct e1000_hw *hw)
+{
+ u16 mii_reg = 0;
+ u16 power_reg = 0;
+
+ /* The PHY will retain its settings across a power down/up cycle */
+ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
+ mii_reg |= MII_CR_POWER_DOWN;
+ /* i210 Phy requires an additional bit for power up/down */
+ if (hw->phy.type == e1000_phy_i210) {
+ hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
+ power_reg |= GS40G_CS_POWER_DOWN;
+ hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
+ }
+ hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
+ msec_delay(1);
+}
+
+/**
+ * __e1000_read_phy_reg_hv - Read HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary, then reads the PHY register at offset
+ * and stores the retrieved information in data. Release any acquired
+ * semaphore before exiting.
+ **/
+STATIC s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
+ bool locked, bool page_set)
+{
+ s32 ret_val;
+ u16 page = BM_PHY_REG_PAGE(offset);
+ u16 reg = BM_PHY_REG_NUM(offset);
+ u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
+
+ DEBUGFUNC("__e1000_read_phy_reg_hv");
+
+ if (!locked) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Page 800 works differently than the rest so it has its own func */
+ if (page == BM_WUC_PAGE) {
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
+ true, page_set);
+ goto out;
+ }
+
+ if (page > 0 && page < HV_INTC_FC_PAGE_START) {
+ ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
+ data, true);
+ goto out;
+ }
+
+ if (!page_set) {
+ if (page == HV_INTC_FC_PAGE_START)
+ page = 0;
+
+ if (reg > MAX_PHY_MULTI_PAGE_REG) {
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000_set_page_igp(hw,
+ (page << IGP_PAGE_SHIFT));
+
+ hw->phy.addr = phy_addr;
+
+ if (ret_val)
+ goto out;
+ }
+ }
+
+ DEBUGOUT3("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page,
+ page << IGP_PAGE_SHIFT, reg);
+
+ ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
+ data);
+out:
+ if (!locked)
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_phy_reg_hv - Read HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore then reads the PHY register at offset and stores
+ * the retrieved information in data. Release the acquired semaphore
+ * before exiting.
+ **/
+s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_phy_reg_hv(hw, offset, data, false, false);
+}
+
+/**
+ * e1000_read_phy_reg_hv_locked - Read HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset and stores the retrieved information
+ * in data. Assumes semaphore already acquired.
+ **/
+s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_phy_reg_hv(hw, offset, data, true, false);
+}
+
+/**
+ * e1000_read_phy_reg_page_hv - Read HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Reads the PHY register at offset and stores the retrieved information
+ * in data. Assumes semaphore already acquired and page already set.
+ **/
+s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_phy_reg_hv(hw, offset, data, true, true);
+}
+
+/**
+ * __e1000_write_phy_reg_hv - Write HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+STATIC s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
+ bool locked, bool page_set)
+{
+ s32 ret_val;
+ u16 page = BM_PHY_REG_PAGE(offset);
+ u16 reg = BM_PHY_REG_NUM(offset);
+ u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
+
+ DEBUGFUNC("__e1000_write_phy_reg_hv");
+
+ if (!locked) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Page 800 works differently than the rest so it has its own func */
+ if (page == BM_WUC_PAGE) {
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
+ false, page_set);
+ goto out;
+ }
+
+ if (page > 0 && page < HV_INTC_FC_PAGE_START) {
+ ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
+ &data, false);
+ goto out;
+ }
+
+ if (!page_set) {
+ if (page == HV_INTC_FC_PAGE_START)
+ page = 0;
+
+ /* Workaround MDIO accesses being disabled after entering IEEE
+ * Power Down (when bit 11 of the PHY Control register is set)
+ */
+ if ((hw->phy.type == e1000_phy_82578) &&
+ (hw->phy.revision >= 1) &&
+ (hw->phy.addr == 2) &&
+ !(MAX_PHY_REG_ADDRESS & reg) &&
+ (data & (1 << 11))) {
+ u16 data2 = 0x7EFF;
+ ret_val = e1000_access_phy_debug_regs_hv(hw,
+ (1 << 6) | 0x3,
+ &data2, false);
+ if (ret_val)
+ goto out;
+ }
+
+ if (reg > MAX_PHY_MULTI_PAGE_REG) {
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000_set_page_igp(hw,
+ (page << IGP_PAGE_SHIFT));
+
+ hw->phy.addr = phy_addr;
+
+ if (ret_val)
+ goto out;
+ }
+ }
+
+ DEBUGOUT3("writing PHY page %d (or 0x%x shifted) reg 0x%x\n", page,
+ page << IGP_PAGE_SHIFT, reg);
+
+ ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
+ data);
+
+out:
+ if (!locked)
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_phy_reg_hv - Write HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore then writes the data to PHY register at the offset.
+ * Release the acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_phy_reg_hv(hw, offset, data, false, false);
+}
+
+/**
+ * e1000_write_phy_reg_hv_locked - Write HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes the data to PHY register at the offset. Assumes semaphore
+ * already acquired.
+ **/
+s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_phy_reg_hv(hw, offset, data, true, false);
+}
+
+/**
+ * e1000_write_phy_reg_page_hv - Write HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes the data to PHY register at the offset. Assumes semaphore
+ * already acquired and page already set.
+ **/
+s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_phy_reg_hv(hw, offset, data, true, true);
+}
+
+/**
+ * e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page
+ * @page: page to be accessed
+ **/
+STATIC u32 e1000_get_phy_addr_for_hv_page(u32 page)
+{
+ u32 phy_addr = 2;
+
+ if (page >= HV_INTC_FC_PAGE_START)
+ phy_addr = 1;
+
+ return phy_addr;
+}
+
+/**
+ * e1000_access_phy_debug_regs_hv - Read HV PHY vendor specific high registers
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read or written
+ * @data: pointer to the data to be read or written
+ * @read: determines if operation is read or write
+ *
+ * Reads the PHY register at offset and stores the retreived information
+ * in data. Assumes semaphore already acquired. Note that the procedure
+ * to access these regs uses the address port and data port to read/write.
+ * These accesses done with PHY address 2 and without using pages.
+ **/
+STATIC s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
+ u16 *data, bool read)
+{
+ s32 ret_val;
+ u32 addr_reg;
+ u32 data_reg;
+
+ DEBUGFUNC("e1000_access_phy_debug_regs_hv");
+
+ /* This takes care of the difference with desktop vs mobile phy */
+ addr_reg = ((hw->phy.type == e1000_phy_82578) ?
+ I82578_ADDR_REG : I82577_ADDR_REG);
+ data_reg = addr_reg + 1;
+
+ /* All operations in this function are phy address 2 */
+ hw->phy.addr = 2;
+
+ /* masking with 0x3F to remove the page from offset */
+ ret_val = e1000_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F);
+ if (ret_val) {
+ DEBUGOUT("Could not write the Address Offset port register\n");
+ return ret_val;
+ }
+
+ /* Read or write the data value next */
+ if (read)
+ ret_val = e1000_read_phy_reg_mdic(hw, data_reg, data);
+ else
+ ret_val = e1000_write_phy_reg_mdic(hw, data_reg, *data);
+
+ if (ret_val)
+ DEBUGOUT("Could not access the Data port register\n");
+
+ return ret_val;
+}
+
+/**
+ * e1000_link_stall_workaround_hv - Si workaround
+ * @hw: pointer to the HW structure
+ *
+ * This function works around a Si bug where the link partner can get
+ * a link up indication before the PHY does. If small packets are sent
+ * by the link partner they can be placed in the packet buffer without
+ * being properly accounted for by the PHY and will stall preventing
+ * further packets from being received. The workaround is to clear the
+ * packet buffer after the PHY detects link up.
+ **/
+s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 data;
+
+ DEBUGFUNC("e1000_link_stall_workaround_hv");
+
+ if (hw->phy.type != e1000_phy_82578)
+ return E1000_SUCCESS;
+
+ /* Do not apply workaround if in PHY loopback bit 14 set */
+ hw->phy.ops.read_reg(hw, PHY_CONTROL, &data);
+ if (data & PHY_CONTROL_LB)
+ return E1000_SUCCESS;
+
+ /* check if link is up and at 1Gbps */
+ ret_val = hw->phy.ops.read_reg(hw, BM_CS_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_MASK);
+
+ if (data != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_1000))
+ return E1000_SUCCESS;
+
+ msec_delay(200);
+
+ /* flush the packets in the fifo buffer */
+ ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL,
+ (HV_MUX_DATA_CTRL_GEN_TO_MAC |
+ HV_MUX_DATA_CTRL_FORCE_SPEED));
+ if (ret_val)
+ return ret_val;
+
+ return hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL,
+ HV_MUX_DATA_CTRL_GEN_TO_MAC);
+}
+
+/**
+ * e1000_check_polarity_82577 - Checks the polarity.
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ * Polarity is determined based on the PHY specific status register.
+ **/
+s32 e1000_check_polarity_82577(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("e1000_check_polarity_82577");
+
+ ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
+
+ if (!ret_val)
+ phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Calls the PHY setup function to force speed and duplex.
+ **/
+s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+ DEBUGFUNC("e1000_phy_force_speed_duplex_82577");
+
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ usec_delay(1);
+
+ if (phy->autoneg_wait_to_complete) {
+ DEBUGOUT("Waiting for forced speed/duplex link on 82577 phy\n");
+
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link)
+ DEBUGOUT("Link taking longer than expected.\n");
+
+ /* Try once more */
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_phy_info_82577 - Retrieve I82577 PHY information
+ * @hw: pointer to the HW structure
+ *
+ * Read PHY status to determine if link is up. If link is up, then
+ * set/determine 10base-T extended distance and polarity correction. Read
+ * PHY port status to determine MDI/MDIx and speed. Based on the speed,
+ * determine on the cable length, local and remote receiver.
+ **/
+s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ bool link;
+
+ DEBUGFUNC("e1000_get_phy_info_82577");
+
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link) {
+ DEBUGOUT("Phy info is only valid if link is up\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ phy->polarity_correction = true;
+
+ ret_val = e1000_check_polarity_82577(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
+ if (ret_val)
+ return ret_val;
+
+ phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX);
+
+ if ((data & I82577_PHY_STATUS2_SPEED_MASK) ==
+ I82577_PHY_STATUS2_SPEED_1000MBPS) {
+ ret_val = hw->phy.ops.get_cable_length(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+
+ phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+ } else {
+ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+ phy->local_rx = e1000_1000t_rx_status_undefined;
+ phy->remote_rx = e1000_1000t_rx_status_undefined;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_cable_length_82577 - Determine cable length for 82577 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Reads the diagnostic status register and verifies result is valid before
+ * placing it in the phy_cable_length field.
+ **/
+s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, length;
+
+ DEBUGFUNC("e1000_get_cable_length_82577");
+
+ ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
+ I82577_DSTATUS_CABLE_LENGTH_SHIFT);
+
+ if (length == E1000_CABLE_LENGTH_UNDEFINED)
+ return -E1000_ERR_PHY;
+
+ phy->cable_length = length;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_phy_reg_gs40g - Write GS40G PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ s32 ret_val;
+ u16 page = offset >> GS40G_PAGE_SHIFT;
+
+ DEBUGFUNC("e1000_write_phy_reg_gs40g");
+
+ offset = offset & GS40G_OFFSET_MASK;
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page);
+ if (ret_val)
+ goto release;
+ ret_val = e1000_write_phy_reg_mdic(hw, offset, data);
+
+release:
+ hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000_read_phy_reg_gs40g - Read GS40G PHY register
+ * @hw: pointer to the HW structure
+ * @offset: lower half is register offset to read to
+ * upper half is page to use.
+ * @data: data to read at register offset
+ *
+ * Acquires semaphore, if necessary, then reads the data in the PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ s32 ret_val;
+ u16 page = offset >> GS40G_PAGE_SHIFT;
+
+ DEBUGFUNC("e1000_read_phy_reg_gs40g");
+
+ offset = offset & GS40G_OFFSET_MASK;
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page);
+ if (ret_val)
+ goto release;
+ ret_val = e1000_read_phy_reg_mdic(hw, offset, data);
+
+release:
+ hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000_read_phy_reg_mphy - Read mPHY control register
+ * @hw: pointer to the HW structure
+ * @address: address to be read
+ * @data: pointer to the read data
+ *
+ * Reads the mPHY control register in the PHY at offset and stores the
+ * information read to data.
+ **/
+s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data)
+{
+ u32 mphy_ctrl = 0;
+ bool locked = false;
+ bool ready;
+
+ DEBUGFUNC("e1000_read_phy_reg_mphy");
+
+ /* Check if mPHY is ready to read/write operations */
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+
+ /* Check if mPHY access is disabled and enable it if so */
+ mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL);
+ if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) {
+ locked = true;
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ mphy_ctrl |= E1000_MPHY_ENA_ACCESS;
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
+ }
+
+ /* Set the address that we want to read */
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+
+ /* We mask address, because we want to use only current lane */
+ mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK &
+ ~E1000_MPHY_ADDRESS_FNC_OVERRIDE) |
+ (address & E1000_MPHY_ADDRESS_MASK);
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
+
+ /* Read data from the address */
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ *data = E1000_READ_REG(hw, E1000_MPHY_DATA);
+
+ /* Disable access to mPHY if it was originally disabled */
+ if (locked)
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL,
+ E1000_MPHY_DIS_ACCESS);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_phy_reg_mphy - Write mPHY control register
+ * @hw: pointer to the HW structure
+ * @address: address to write to
+ * @data: data to write to register at offset
+ * @line_override: used when we want to use different line than default one
+ *
+ * Writes data to mPHY control register.
+ **/
+s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data,
+ bool line_override)
+{
+ u32 mphy_ctrl = 0;
+ bool locked = false;
+ bool ready;
+
+ DEBUGFUNC("e1000_write_phy_reg_mphy");
+
+ /* Check if mPHY is ready to read/write operations */
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+
+ /* Check if mPHY access is disabled and enable it if so */
+ mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL);
+ if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) {
+ locked = true;
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ mphy_ctrl |= E1000_MPHY_ENA_ACCESS;
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
+ }
+
+ /* Set the address that we want to read */
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+
+ /* We mask address, because we want to use only current lane */
+ if (line_override)
+ mphy_ctrl |= E1000_MPHY_ADDRESS_FNC_OVERRIDE;
+ else
+ mphy_ctrl &= ~E1000_MPHY_ADDRESS_FNC_OVERRIDE;
+ mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK) |
+ (address & E1000_MPHY_ADDRESS_MASK);
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
+
+ /* Read data from the address */
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ E1000_WRITE_REG(hw, E1000_MPHY_DATA, data);
+
+ /* Disable access to mPHY if it was originally disabled */
+ if (locked)
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL,
+ E1000_MPHY_DIS_ACCESS);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_is_mphy_ready - Check if mPHY control register is not busy
+ * @hw: pointer to the HW structure
+ *
+ * Returns mPHY control register status.
+ **/
+bool e1000_is_mphy_ready(struct e1000_hw *hw)
+{
+ u16 retry_count = 0;
+ u32 mphy_ctrl = 0;
+ bool ready = false;
+
+ while (retry_count < 2) {
+ mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL);
+ if (mphy_ctrl & E1000_MPHY_BUSY) {
+ usec_delay(20);
+ retry_count++;
+ continue;
+ }
+ ready = true;
+ break;
+ }
+
+ if (!ready)
+ DEBUGOUT("ERROR READING mPHY control register, phy is busy.\n");
+
+ return ready;
+}
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_phy.h b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_phy.h
new file mode 100755
index 00000000..73a9b1fd
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_phy.h
@@ -0,0 +1,327 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_PHY_H_
+#define _E1000_PHY_H_
+
+void e1000_init_phy_ops_generic(struct e1000_hw *hw);
+s32 e1000_null_read_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+void e1000_null_phy_generic(struct e1000_hw *hw);
+s32 e1000_null_lplu_state(struct e1000_hw *hw, bool active);
+s32 e1000_null_write_reg(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_null_set_page(struct e1000_hw *hw, u16 data);
+s32 e1000_read_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+s32 e1000_write_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data);
+s32 e1000_check_downshift_generic(struct e1000_hw *hw);
+s32 e1000_check_polarity_m88(struct e1000_hw *hw);
+s32 e1000_check_polarity_igp(struct e1000_hw *hw);
+s32 e1000_check_polarity_ife(struct e1000_hw *hw);
+s32 e1000_check_reset_block_generic(struct e1000_hw *hw);
+s32 e1000_phy_setup_autoneg(struct e1000_hw *hw);
+s32 e1000_copper_link_autoneg(struct e1000_hw *hw);
+s32 e1000_copper_link_setup_igp(struct e1000_hw *hw);
+s32 e1000_copper_link_setup_m88(struct e1000_hw *hw);
+s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw);
+s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw);
+s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw);
+s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
+s32 e1000_get_cable_length_m88(struct e1000_hw *hw);
+s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw);
+s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw);
+s32 e1000_get_cfg_done_generic(struct e1000_hw *hw);
+s32 e1000_get_phy_id(struct e1000_hw *hw);
+s32 e1000_get_phy_info_igp(struct e1000_hw *hw);
+s32 e1000_get_phy_info_m88(struct e1000_hw *hw);
+s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
+s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw);
+void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
+s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw);
+s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw);
+s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page);
+s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active);
+s32 e1000_setup_copper_link_generic(struct e1000_hw *hw);
+s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+ u32 usec_interval, bool *success);
+s32 e1000_phy_init_script_igp3(struct e1000_hw *hw);
+enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id);
+s32 e1000_determine_phy_address(struct e1000_hw *hw);
+s32 e1000_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg);
+s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg);
+s32 e1000_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
+void e1000_power_up_phy_copper(struct e1000_hw *hw);
+void e1000_power_down_phy_copper(struct e1000_hw *hw);
+s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data);
+s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data);
+s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
+s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
+s32 e1000_check_polarity_82577(struct e1000_hw *hw);
+s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
+s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
+s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
+s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data);
+s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data,
+ bool line_override);
+bool e1000_is_mphy_ready(struct e1000_hw *hw);
+
+#define E1000_MAX_PHY_ADDR 8
+
+/* IGP01E1000 Specific Registers */
+#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
+#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
+#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
+#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
+#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO */
+#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
+#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
+#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */
+#define IGP_PAGE_SHIFT 5
+#define PHY_REG_MASK 0x1F
+
+/* GS40G - I210 PHY defines */
+#define GS40G_PAGE_SELECT 0x16
+#define GS40G_PAGE_SHIFT 16
+#define GS40G_OFFSET_MASK 0xFFFF
+#define GS40G_PAGE_2 0x20000
+#define GS40G_MAC_REG2 0x15
+#define GS40G_MAC_LB 0x4140
+#define GS40G_MAC_SPEED_1G 0X0006
+#define GS40G_COPPER_SPEC 0x0010
+#define GS40G_CS_POWER_DOWN 0x0002
+
+/* BM/HV Specific Registers */
+#define BM_PORT_CTRL_PAGE 769
+#define BM_WUC_PAGE 800
+#define BM_WUC_ADDRESS_OPCODE 0x11
+#define BM_WUC_DATA_OPCODE 0x12
+#define BM_WUC_ENABLE_PAGE BM_PORT_CTRL_PAGE
+#define BM_WUC_ENABLE_REG 17
+#define BM_WUC_ENABLE_BIT (1 << 2)
+#define BM_WUC_HOST_WU_BIT (1 << 4)
+#define BM_WUC_ME_WU_BIT (1 << 5)
+
+#define PHY_UPPER_SHIFT 21
+#define BM_PHY_REG(page, reg) \
+ (((reg) & MAX_PHY_REG_ADDRESS) |\
+ (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\
+ (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)))
+#define BM_PHY_REG_PAGE(offset) \
+ ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF))
+#define BM_PHY_REG_NUM(offset) \
+ ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\
+ (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\
+ ~MAX_PHY_REG_ADDRESS)))
+
+#define HV_INTC_FC_PAGE_START 768
+#define I82578_ADDR_REG 29
+#define I82577_ADDR_REG 16
+#define I82577_CFG_REG 22
+#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
+#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */
+#define I82577_CTRL_REG 23
+
+/* 82577 specific PHY registers */
+#define I82577_PHY_CTRL_2 18
+#define I82577_PHY_LBK_CTRL 19
+#define I82577_PHY_STATUS_2 26
+#define I82577_PHY_DIAG_STATUS 31
+
+/* I82577 PHY Status 2 */
+#define I82577_PHY_STATUS2_REV_POLARITY 0x0400
+#define I82577_PHY_STATUS2_MDIX 0x0800
+#define I82577_PHY_STATUS2_SPEED_MASK 0x0300
+#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
+
+/* I82577 PHY Control 2 */
+#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200
+#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
+#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600
+
+/* I82577 PHY Diagnostics Status */
+#define I82577_DSTATUS_CABLE_LENGTH 0x03FC
+#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2
+
+/* 82580 PHY Power Management */
+#define E1000_82580_PHY_POWER_MGMT 0xE14
+#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */
+#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */
+#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */
+#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */
+
+#define E1000_MPHY_DIS_ACCESS 0x80000000 /* disable_access bit */
+#define E1000_MPHY_ENA_ACCESS 0x40000000 /* enable_access bit */
+#define E1000_MPHY_BUSY 0x00010000 /* busy bit */
+#define E1000_MPHY_ADDRESS_FNC_OVERRIDE 0x20000000 /* fnc_override bit */
+#define E1000_MPHY_ADDRESS_MASK 0x0000FFFF /* address mask */
+
+/* BM PHY Copper Specific Control 1 */
+#define BM_CS_CTRL1 16
+
+/* BM PHY Copper Specific Status */
+#define BM_CS_STATUS 17
+#define BM_CS_STATUS_LINK_UP 0x0400
+#define BM_CS_STATUS_RESOLVED 0x0800
+#define BM_CS_STATUS_SPEED_MASK 0xC000
+#define BM_CS_STATUS_SPEED_1000 0x8000
+
+/* 82577 Mobile Phy Status Register */
+#define HV_M_STATUS 26
+#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000
+#define HV_M_STATUS_SPEED_MASK 0x0300
+#define HV_M_STATUS_SPEED_1000 0x0200
+#define HV_M_STATUS_SPEED_100 0x0100
+#define HV_M_STATUS_LINK_UP 0x0040
+
+#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
+#define IGP01E1000_PHY_POLARITY_MASK 0x0078
+
+#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
+
+#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
+
+/* Enable flexible speed on link-up */
+#define IGP01E1000_GMII_FLEX_SPD 0x0010
+#define IGP01E1000_GMII_SPD 0x0020 /* Enable SPD */
+
+#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
+#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
+#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
+
+#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
+
+#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
+#define IGP01E1000_PSSR_MDIX 0x0800
+#define IGP01E1000_PSSR_SPEED_MASK 0xC000
+#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
+
+#define IGP02E1000_PHY_CHANNEL_NUM 4
+#define IGP02E1000_PHY_AGC_A 0x11B1
+#define IGP02E1000_PHY_AGC_B 0x12B1
+#define IGP02E1000_PHY_AGC_C 0x14B1
+#define IGP02E1000_PHY_AGC_D 0x18B1
+
+#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course=15:13, Fine=12:9 */
+#define IGP02E1000_AGC_LENGTH_MASK 0x7F
+#define IGP02E1000_AGC_RANGE 15
+
+#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
+
+#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000
+#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
+#define E1000_KMRNCTRLSTA_REN 0x00200000
+#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */
+#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
+#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
+#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
+#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */
+#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
+#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7
+#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002 /* enable K1 */
+#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */
+#define E1000_KMRNCTRLSTA_OP_MODES 0x1F /* Kumeran Modes of Operation */
+#define E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC 0x0002 /* change LSC to CSC */
+
+#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
+#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Ctrl */
+#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Ctrl */
+#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */
+
+/* IFE PHY Extended Status Control */
+#define IFE_PESC_POLARITY_REVERSED 0x0100
+
+/* IFE PHY Special Control */
+#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010
+#define IFE_PSC_FORCE_POLARITY 0x0020
+
+/* IFE PHY Special Control and LED Control */
+#define IFE_PSCL_PROBE_MODE 0x0020
+#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
+#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
+
+/* IFE PHY MDIX Control */
+#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
+#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */
+#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto, 0=disable */
+
+/* SFP modules ID memory locations */
+#define E1000_SFF_IDENTIFIER_OFFSET 0x00
+#define E1000_SFF_IDENTIFIER_SFF 0x02
+#define E1000_SFF_IDENTIFIER_SFP 0x03
+
+#define E1000_SFF_ETH_FLAGS_OFFSET 0x06
+/* Flags for SFP modules compatible with ETH up to 1Gb */
+struct sfp_e1000_flags {
+ u8 e1000_base_sx:1;
+ u8 e1000_base_lx:1;
+ u8 e1000_base_cx:1;
+ u8 e1000_base_t:1;
+ u8 e100_base_lx:1;
+ u8 e100_base_fx:1;
+ u8 e10_base_bx10:1;
+ u8 e10_base_px:1;
+};
+
+/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
+#define E1000_SFF_VENDOR_OUI_TYCO 0x00407600
+#define E1000_SFF_VENDOR_OUI_FTL 0x00906500
+#define E1000_SFF_VENDOR_OUI_AVAGO 0x00176A00
+#define E1000_SFF_VENDOR_OUI_INTEL 0x001B2100
+
+#endif
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_regs.h b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_regs.h
new file mode 100755
index 00000000..bde2a089
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_regs.h
@@ -0,0 +1,685 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_REGS_H_
+#define _E1000_REGS_H_
+
+#define E1000_CTRL 0x00000 /* Device Control - RW */
+#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */
+#define E1000_STATUS 0x00008 /* Device Status - RO */
+#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
+#define E1000_EERD 0x00014 /* EEPROM Read - RW */
+#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
+#define E1000_FLA 0x0001C /* Flash Access - RW */
+#define E1000_MDIC 0x00020 /* MDI Control - RW */
+#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */
+#define E1000_REGISTER_SET_SIZE 0x20000 /* CSR Size */
+#define E1000_EEPROM_INIT_CTRL_WORD_2 0x0F /* EEPROM Init Ctrl Word 2 */
+#define E1000_EEPROM_PCIE_CTRL_WORD_2 0x28 /* EEPROM PCIe Ctrl Word 2 */
+#define E1000_BARCTRL 0x5BBC /* BAR ctrl reg */
+#define E1000_BARCTRL_FLSIZE 0x0700 /* BAR ctrl Flsize */
+#define E1000_BARCTRL_CSRSIZE 0x2000 /* BAR ctrl CSR size */
+#define E1000_MPHY_ADDR_CTRL 0x0024 /* GbE MPHY Address Control */
+#define E1000_MPHY_DATA 0x0E10 /* GBE MPHY Data */
+#define E1000_MPHY_STAT 0x0E0C /* GBE MPHY Statistics */
+#define E1000_PPHY_CTRL 0x5b48 /* PCIe PHY Control */
+#define E1000_I350_BARCTRL 0x5BFC /* BAR ctrl reg */
+#define E1000_I350_DTXMXPKTSZ 0x355C /* Maximum sent packet size reg*/
+#define E1000_SCTL 0x00024 /* SerDes Control - RW */
+#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
+#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
+#if !defined(EXTERNAL_RELEASE) || (defined(NAHUM6LP_HW) && defined(ULP_SUPPORT))
+#define E1000_FEXT 0x0002C /* Future Extended - RW */
+#endif /* !EXTERNAL_RELEASE || (NAHUM6LP_HW && ULP_SUPPORT) */
+#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
+#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
+#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
+#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
+#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
+#define E1000_FCT 0x00030 /* Flow Control Type - RW */
+#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
+#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
+#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
+#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
+#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
+#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
+#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
+#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
+#define E1000_IVAR 0x000E4 /* Interrupt Vector Allocation Register - RW */
+#define E1000_SVCR 0x000F0
+#define E1000_SVT 0x000F4
+#define E1000_LPIC 0x000FC /* Low Power IDLE control */
+#define E1000_RCTL 0x00100 /* Rx Control - RW */
+#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
+#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */
+#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */
+#define E1000_PBA_ECC 0x01100 /* PBA ECC Register */
+#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */
+#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
+#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
+#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
+#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
+#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
+#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
+#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */
+#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */
+#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
+#define E1000_TCTL 0x00400 /* Tx Control - RW */
+#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */
+#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */
+#define E1000_TBT 0x00448 /* Tx Burst Timer - RW */
+#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
+#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
+#define E1000_LEDMUX 0x08130 /* LED MUX Control */
+#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */
+#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */
+#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */
+#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */
+#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
+#define E1000_PBS 0x01008 /* Packet Buffer Size */
+#define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */
+#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
+#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */
+#define E1000_FLASHT 0x01028 /* FLASH Timer Register */
+#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
+#define E1000_FLSWCTL 0x01030 /* FLASH control register */
+#define E1000_FLSWDATA 0x01034 /* FLASH data register */
+#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */
+#define E1000_FLOP 0x0103C /* FLASH Opcode Register */
+#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
+#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */
+#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */
+#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */
+#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */
+#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */
+#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */
+#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */
+#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */
+#define E1000_I2C_CLK_STRETCH_DIS 0x00008000 /* I2C- Dis Clk Stretching */
+#define E1000_WDSTP 0x01040 /* Watchdog Setup - RW */
+#define E1000_SWDSTS 0x01044 /* SW Device Status - RW */
+#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */
+#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */
+#define E1000_VPDDIAG 0x01060 /* VPD Diagnostic - RO */
+#define E1000_ICR_V2 0x01500 /* Intr Cause - new location - RC */
+#define E1000_ICS_V2 0x01504 /* Intr Cause Set - new location - WO */
+#define E1000_IMS_V2 0x01508 /* Intr Mask Set/Read - new location - RW */
+#define E1000_IMC_V2 0x0150C /* Intr Mask Clear - new location - WO */
+#define E1000_IAM_V2 0x01510 /* Intr Ack Auto Mask - new location - RW */
+#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */
+#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
+#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
+#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */
+#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
+#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
+#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
+#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
+#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
+#define E1000_PBRTH 0x02458 /* PB Rx Arbitration Threshold - RW */
+#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */
+/* Split and Replication Rx Control - RW */
+#define E1000_RDPUMB 0x025CC /* DMA Rx Descriptor uC Mailbox - RW */
+#define E1000_RDPUAD 0x025D0 /* DMA Rx Descriptor uC Addr Command - RW */
+#define E1000_RDPUWD 0x025D4 /* DMA Rx Descriptor uC Data Write - RW */
+#define E1000_RDPURD 0x025D8 /* DMA Rx Descriptor uC Data Read - RW */
+#define E1000_RDPUCTL 0x025DC /* DMA Rx Descriptor uC Control - RW */
+#define E1000_PBDIAG 0x02458 /* Packet Buffer Diagnostic - RW */
+#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
+#define E1000_IRPBS 0x02404 /* Same as RXPBS, renamed for newer Si - RW */
+#define E1000_PBRWAC 0x024E8 /* Rx packet buffer wrap around counter - RO */
+#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */
+#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */
+#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */
+#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */
+#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */
+#define E1000_I210_FLMNGCTL 0x12038
+#define E1000_I210_FLMNGDATA 0x1203C
+#define E1000_I210_FLMNGCNT 0x12040
+
+#define E1000_I210_FLSWCTL 0x12048
+#define E1000_I210_FLSWDATA 0x1204C
+#define E1000_I210_FLSWCNT 0x12050
+
+#define E1000_I210_FLA 0x1201C
+
+#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n))
+#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */
+
+/* QAV Tx mode control register */
+#define E1000_I210_TQAVCTRL 0x3570
+
+/* QAV Tx mode control register bitfields masks */
+/* QAV enable */
+#define E1000_TQAVCTRL_MODE (1 << 0)
+/* Fetching arbitration type */
+#define E1000_TQAVCTRL_FETCH_ARB (1 << 4)
+/* Fetching timer enable */
+#define E1000_TQAVCTRL_FETCH_TIMER_ENABLE (1 << 5)
+/* Launch arbitration type */
+#define E1000_TQAVCTRL_LAUNCH_ARB (1 << 8)
+/* Launch timer enable */
+#define E1000_TQAVCTRL_LAUNCH_TIMER_ENABLE (1 << 9)
+/* SP waits for SR enable */
+#define E1000_TQAVCTRL_SP_WAIT_SR (1 << 10)
+/* Fetching timer correction */
+#define E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET 16
+#define E1000_TQAVCTRL_FETCH_TIMER_DELTA \
+ (0xFFFF << E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET)
+
+/* High credit registers where _n can be 0 or 1. */
+#define E1000_I210_TQAVHC(_n) (0x300C + 0x40 * (_n))
+
+/* Queues fetch arbitration priority control register */
+#define E1000_I210_TQAVARBCTRL 0x3574
+/* Queues priority masks where _n and _p can be 0-3. */
+#define E1000_TQAVARBCTRL_QUEUE_PRI(_n, _p) ((_p) << (2 * _n))
+/* QAV Tx mode control registers where _n can be 0 or 1. */
+#define E1000_I210_TQAVCC(_n) (0x3004 + 0x40 * (_n))
+
+/* QAV Tx mode control register bitfields masks */
+#define E1000_TQAVCC_IDLE_SLOPE 0xFFFF /* Idle slope */
+#define E1000_TQAVCC_KEEP_CREDITS (1 << 30) /* Keep credits opt enable */
+#define E1000_TQAVCC_QUEUE_MODE (1 << 31) /* SP vs. SR Tx mode */
+
+/* Good transmitted packets counter registers */
+#define E1000_PQGPTC(_n) (0x010014 + (0x100 * (_n)))
+
+/* Queues packet buffer size masks where _n can be 0-3 and _s 0-63 [kB] */
+#define E1000_I210_TXPBS_SIZE(_n, _s) ((_s) << (6 * _n))
+
+#define E1000_MMDAC 13 /* MMD Access Control */
+#define E1000_MMDAAD 14 /* MMD Access Address/Data */
+
+/* Convenience macros
+ *
+ * Note: "_n" is the queue number of the register to be written to.
+ *
+ * Example usage:
+ * E1000_RDBAL_REG(current_rx_queue)
+ */
+#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
+ (0x0C000 + ((_n) * 0x40)))
+#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
+ (0x0C004 + ((_n) * 0x40)))
+#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
+ (0x0C008 + ((_n) * 0x40)))
+#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \
+ (0x0C00C + ((_n) * 0x40)))
+#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
+ (0x0C010 + ((_n) * 0x40)))
+#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \
+ (0x0C014 + ((_n) * 0x40)))
+#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n)
+#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
+ (0x0C018 + ((_n) * 0x40)))
+#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
+ (0x0C028 + ((_n) * 0x40)))
+#define E1000_RQDPC(_n) ((_n) < 4 ? (0x02830 + ((_n) * 0x100)) : \
+ (0x0C030 + ((_n) * 0x40)))
+#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
+ (0x0E000 + ((_n) * 0x40)))
+#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
+ (0x0E004 + ((_n) * 0x40)))
+#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
+ (0x0E008 + ((_n) * 0x40)))
+#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
+ (0x0E010 + ((_n) * 0x40)))
+#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \
+ (0x0E014 + ((_n) * 0x40)))
+#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n)
+#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
+ (0x0E018 + ((_n) * 0x40)))
+#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
+ (0x0E028 + ((_n) * 0x40)))
+#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) : \
+ (0x0E038 + ((_n) * 0x40)))
+#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) : \
+ (0x0E03C + ((_n) * 0x40)))
+#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100))
+#define E1000_RSRPD 0x02C00 /* Rx Small Packet Detect - RW */
+#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */
+#define E1000_TXDMAC 0x03000 /* Tx DMA Control - RW */
+#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */
+#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
+#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+ (0x054E0 + ((_i - 16) * 8)))
+#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+ (0x054E4 + ((_i - 16) * 8)))
+#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8))
+#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8))
+#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
+#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
+#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
+#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8))
+#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8))
+#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8))
+#define E1000_PBSLAC 0x03100 /* Pkt Buffer Slave Access Control */
+#define E1000_PBSLAD(_n) (0x03110 + (0x4 * (_n))) /* Pkt Buffer DWORD */
+#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
+/* Same as TXPBS, renamed for newer Si - RW */
+#define E1000_ITPBS 0x03404
+#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
+#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
+#define E1000_TDPUMB 0x0357C /* DMA Tx Desc uC Mail Box - RW */
+#define E1000_TDPUAD 0x03580 /* DMA Tx Desc uC Addr Command - RW */
+#define E1000_TDPUWD 0x03584 /* DMA Tx Desc uC Data Write - RW */
+#define E1000_TDPURD 0x03588 /* DMA Tx Desc uC Data Read - RW */
+#define E1000_TDPUCTL 0x0358C /* DMA Tx Desc uC Control - RW */
+#define E1000_DTXCTL 0x03590 /* DMA Tx Control - RW */
+#define E1000_DTXTCPFLGL 0x0359C /* DMA Tx Control flag low - RW */
+#define E1000_DTXTCPFLGH 0x035A0 /* DMA Tx Control flag high - RW */
+/* DMA Tx Max Total Allow Size Reqs - RW */
+#define E1000_DTXMXSZRQ 0x03540
+#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */
+#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */
+#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */
+#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
+#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
+#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
+#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
+#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
+#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
+#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
+#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
+#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
+#define E1000_COLC 0x04028 /* Collision Count - R/clr */
+#define E1000_DC 0x04030 /* Defer Count - R/clr */
+#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */
+#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */
+#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
+#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
+#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */
+#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */
+#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */
+#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */
+#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */
+#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */
+#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */
+#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */
+#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */
+#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */
+#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */
+#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */
+#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */
+#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */
+#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */
+#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */
+#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */
+#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */
+#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */
+#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */
+#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */
+#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */
+#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */
+#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */
+#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */
+#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
+#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */
+#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */
+#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */
+#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */
+#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */
+#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */
+#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */
+#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */
+#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */
+#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */
+#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */
+#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */
+#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */
+#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */
+#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */
+#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */
+#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */
+#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
+#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */
+#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */
+#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */
+#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */
+#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */
+#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */
+#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */
+#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
+#define E1000_CRC_OFFSET 0x05F50 /* CRC Offset register */
+
+#define E1000_VFGPRC 0x00F10
+#define E1000_VFGORC 0x00F18
+#define E1000_VFMPRC 0x00F3C
+#define E1000_VFGPTC 0x00F14
+#define E1000_VFGOTC 0x00F34
+#define E1000_VFGOTLBC 0x00F50
+#define E1000_VFGPTLBC 0x00F44
+#define E1000_VFGORLBC 0x00F48
+#define E1000_VFGPRLBC 0x00F40
+/* Virtualization statistical counters */
+#define E1000_PFVFGPRC(_n) (0x010010 + (0x100 * (_n)))
+#define E1000_PFVFGPTC(_n) (0x010014 + (0x100 * (_n)))
+#define E1000_PFVFGORC(_n) (0x010018 + (0x100 * (_n)))
+#define E1000_PFVFGOTC(_n) (0x010034 + (0x100 * (_n)))
+#define E1000_PFVFMPRC(_n) (0x010038 + (0x100 * (_n)))
+#define E1000_PFVFGPRLBC(_n) (0x010040 + (0x100 * (_n)))
+#define E1000_PFVFGPTLBC(_n) (0x010044 + (0x100 * (_n)))
+#define E1000_PFVFGORLBC(_n) (0x010048 + (0x100 * (_n)))
+#define E1000_PFVFGOTLBC(_n) (0x010050 + (0x100 * (_n)))
+
+/* LinkSec */
+#define E1000_LSECTXUT 0x04300 /* Tx Untagged Pkt Cnt */
+#define E1000_LSECTXPKTE 0x04304 /* Encrypted Tx Pkts Cnt */
+#define E1000_LSECTXPKTP 0x04308 /* Protected Tx Pkt Cnt */
+#define E1000_LSECTXOCTE 0x0430C /* Encrypted Tx Octets Cnt */
+#define E1000_LSECTXOCTP 0x04310 /* Protected Tx Octets Cnt */
+#define E1000_LSECRXUT 0x04314 /* Untagged non-Strict Rx Pkt Cnt */
+#define E1000_LSECRXOCTD 0x0431C /* Rx Octets Decrypted Count */
+#define E1000_LSECRXOCTV 0x04320 /* Rx Octets Validated */
+#define E1000_LSECRXBAD 0x04324 /* Rx Bad Tag */
+#define E1000_LSECRXNOSCI 0x04328 /* Rx Packet No SCI Count */
+#define E1000_LSECRXUNSCI 0x0432C /* Rx Packet Unknown SCI Count */
+#define E1000_LSECRXUNCH 0x04330 /* Rx Unchecked Packets Count */
+#define E1000_LSECRXDELAY 0x04340 /* Rx Delayed Packet Count */
+#define E1000_LSECRXLATE 0x04350 /* Rx Late Packets Count */
+#define E1000_LSECRXOK(_n) (0x04360 + (0x04 * (_n))) /* Rx Pkt OK Cnt */
+#define E1000_LSECRXINV(_n) (0x04380 + (0x04 * (_n))) /* Rx Invalid Cnt */
+#define E1000_LSECRXNV(_n) (0x043A0 + (0x04 * (_n))) /* Rx Not Valid Cnt */
+#define E1000_LSECRXUNSA 0x043C0 /* Rx Unused SA Count */
+#define E1000_LSECRXNUSA 0x043D0 /* Rx Not Using SA Count */
+#define E1000_LSECTXCAP 0x0B000 /* Tx Capabilities Register - RO */
+#define E1000_LSECRXCAP 0x0B300 /* Rx Capabilities Register - RO */
+#define E1000_LSECTXCTRL 0x0B004 /* Tx Control - RW */
+#define E1000_LSECRXCTRL 0x0B304 /* Rx Control - RW */
+#define E1000_LSECTXSCL 0x0B008 /* Tx SCI Low - RW */
+#define E1000_LSECTXSCH 0x0B00C /* Tx SCI High - RW */
+#define E1000_LSECTXSA 0x0B010 /* Tx SA0 - RW */
+#define E1000_LSECTXPN0 0x0B018 /* Tx SA PN 0 - RW */
+#define E1000_LSECTXPN1 0x0B01C /* Tx SA PN 1 - RW */
+#define E1000_LSECRXSCL 0x0B3D0 /* Rx SCI Low - RW */
+#define E1000_LSECRXSCH 0x0B3E0 /* Rx SCI High - RW */
+/* LinkSec Tx 128-bit Key 0 - WO */
+#define E1000_LSECTXKEY0(_n) (0x0B020 + (0x04 * (_n)))
+/* LinkSec Tx 128-bit Key 1 - WO */
+#define E1000_LSECTXKEY1(_n) (0x0B030 + (0x04 * (_n)))
+#define E1000_LSECRXSA(_n) (0x0B310 + (0x04 * (_n))) /* Rx SAs - RW */
+#define E1000_LSECRXPN(_n) (0x0B330 + (0x04 * (_n))) /* Rx SAs - RW */
+/* LinkSec Rx Keys - where _n is the SA no. and _m the 4 dwords of the 128 bit
+ * key - RW.
+ */
+#define E1000_LSECRXKEY(_n, _m) (0x0B350 + (0x10 * (_n)) + (0x04 * (_m)))
+
+#define E1000_SSVPC 0x041A0 /* Switch Security Violation Pkt Cnt */
+#define E1000_IPSCTRL 0xB430 /* IpSec Control Register */
+#define E1000_IPSRXCMD 0x0B408 /* IPSec Rx Command Register - RW */
+#define E1000_IPSRXIDX 0x0B400 /* IPSec Rx Index - RW */
+/* IPSec Rx IPv4/v6 Address - RW */
+#define E1000_IPSRXIPADDR(_n) (0x0B420 + (0x04 * (_n)))
+/* IPSec Rx 128-bit Key - RW */
+#define E1000_IPSRXKEY(_n) (0x0B410 + (0x04 * (_n)))
+#define E1000_IPSRXSALT 0x0B404 /* IPSec Rx Salt - RW */
+#define E1000_IPSRXSPI 0x0B40C /* IPSec Rx SPI - RW */
+/* IPSec Tx 128-bit Key - RW */
+#define E1000_IPSTXKEY(_n) (0x0B460 + (0x04 * (_n)))
+#define E1000_IPSTXSALT 0x0B454 /* IPSec Tx Salt - RW */
+#define E1000_IPSTXIDX 0x0B450 /* IPSec Tx SA IDX - RW */
+#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */
+#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */
+#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */
+#define E1000_CBTMPC 0x0402C /* Circuit Breaker Tx Packet Count */
+#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */
+#define E1000_CBRDPC 0x04044 /* Circuit Breaker Rx Dropped Count */
+#define E1000_CBRMPC 0x040FC /* Circuit Breaker Rx Packet Count */
+#define E1000_RPTHC 0x04104 /* Rx Packets To Host */
+#define E1000_HGPTC 0x04118 /* Host Good Packets Tx Count */
+#define E1000_HTCBDPC 0x04124 /* Host Tx Circuit Breaker Dropped Count */
+#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */
+#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */
+#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */
+#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */
+#define E1000_LENERRS 0x04138 /* Length Errors Count */
+#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */
+#define E1000_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */
+#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */
+#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */
+#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */
+#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Pg - RW */
+#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */
+#define E1000_RLPML 0x05004 /* Rx Long Packet Max Length */
+#define E1000_RFCTL 0x05008 /* Receive Filter Control*/
+#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
+#define E1000_RA 0x05400 /* Receive Address - RW Array */
+#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */
+#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
+#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */
+#define E1000_CIAA 0x05B88 /* Config Indirect Access Address - RW */
+#define E1000_CIAD 0x05B8C /* Config Indirect Access Data - RW */
+#define E1000_VFQA0 0x0B000 /* VLAN Filter Queue Array 0 - RW Array */
+#define E1000_VFQA1 0x0B200 /* VLAN Filter Queue Array 1 - RW Array */
+#define E1000_WUC 0x05800 /* Wakeup Control - RW */
+#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
+#define E1000_WUS 0x05810 /* Wakeup Status - RO */
+#define E1000_MANC 0x05820 /* Management Control - RW */
+#define E1000_IPAV 0x05838 /* IP Address Valid - RW */
+#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */
+#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */
+#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */
+#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */
+#define E1000_PBACL 0x05B68 /* MSIx PBA Clear - Read/Write 1's to clear */
+#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */
+#define E1000_HOST_IF 0x08800 /* Host Interface */
+#define E1000_HIBBA 0x8F40 /* Host Interface Buffer Base Address */
+/* Flexible Host Filter Table */
+#define E1000_FHFT(_n) (0x09000 + ((_n) * 0x100))
+/* Ext Flexible Host Filter Table */
+#define E1000_FHFT_EXT(_n) (0x09A00 + ((_n) * 0x100))
+
+
+#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */
+#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */
+/* Management Decision Filters */
+#define E1000_MDEF(_n) (0x05890 + (4 * (_n)))
+#define E1000_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */
+#define E1000_CCMCTL 0x05B48 /* CCM Control Register */
+#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */
+#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */
+#define E1000_GCR 0x05B00 /* PCI-Ex Control */
+#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */
+#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */
+#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */
+#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */
+#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */
+#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
+#define E1000_SWSM 0x05B50 /* SW Semaphore */
+#define E1000_FWSM 0x05B54 /* FW Semaphore */
+/* Driver-only SW semaphore (not used by BOOT agents) */
+#define E1000_SWSM2 0x05B58
+#define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */
+#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */
+#define E1000_UFUSE 0x05B78 /* UFUSE - RO */
+#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
+#define E1000_HICR 0x08F00 /* Host Interface Control */
+#define E1000_FWSTS 0x08F0C /* FW Status */
+
+/* RSS registers */
+#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */
+#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
+#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */
+#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/
+#define E1000_IMIRVP 0x05AC0 /* Immediate INT Rx VLAN Priority -RW */
+#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) /* MSI-X Alloc Reg -RW */
+#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */
+#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */
+#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */
+#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */
+/* VT Registers */
+#define E1000_SWPBS 0x03004 /* Switch Packet Buffer Size - RW */
+#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */
+#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */
+#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */
+#define E1000_VFRE 0x00C8C /* VF Receive Enables */
+#define E1000_VFTE 0x00C90 /* VF Transmit Enables */
+#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */
+#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */
+#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */
+#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */
+#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */
+#define E1000_IOVTCL 0x05BBC /* IOV Control Register */
+#define E1000_VMRCTL 0X05D80 /* Virtual Mirror Rule Control */
+#define E1000_VMRVLAN 0x05D90 /* Virtual Mirror Rule VLAN */
+#define E1000_VMRVM 0x05DA0 /* Virtual Mirror Rule VM */
+#define E1000_MDFB 0x03558 /* Malicious Driver free block */
+#define E1000_LVMMC 0x03548 /* Last VM Misbehavior cause */
+#define E1000_TXSWC 0x05ACC /* Tx Switch Control */
+#define E1000_SCCRL 0x05DB0 /* Storm Control Control */
+#define E1000_BSCTRH 0x05DB8 /* Broadcast Storm Control Threshold */
+#define E1000_MSCTRH 0x05DBC /* Multicast Storm Control Threshold */
+/* These act per VF so an array friendly macro is used */
+#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n)))
+#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n)))
+#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
+#define E1000_VFVMBMEM(_n) (0x00800 + (_n))
+#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
+/* VLAN Virtual Machine Filter - RW */
+#define E1000_VLVF(_n) (0x05D00 + (4 * (_n)))
+#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
+#define E1000_DVMOLR(_n) (0x0C038 + (0x40 * (_n))) /* DMA VM offload */
+#define E1000_VTCTRL(_n) (0x10000 + (0x100 * (_n))) /* VT Control */
+#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
+#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
+#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
+#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
+#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
+#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */
+#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */
+#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
+#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
+#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
+#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
+#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
+#define E1000_TIMADJL 0x0B60C /* Time sync time adjustment offset Low - RW */
+#define E1000_TIMADJH 0x0B610 /* Time sync time adjustment offset High - RW */
+#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
+#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */
+#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */
+#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */
+#define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */
+#define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */
+
+/* Filtering Registers */
+#define E1000_SAQF(_n) (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */
+#define E1000_DAQF(_n) (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */
+#define E1000_SPQF(_n) (0x059C0 + (4 * (_n))) /* Source Port Queue Fltr */
+#define E1000_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */
+#define E1000_TTQF(_n) (0x059E0 + (4 * (_n))) /* 2-tuple Queue Fltr */
+#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */
+#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
+
+#define E1000_RTTDCS 0x3600 /* Reedtown Tx Desc plane control and status */
+#define E1000_RTTPCS 0x3474 /* Reedtown Tx Packet Plane control and status */
+#define E1000_RTRPCS 0x2474 /* Rx packet plane control and status */
+#define E1000_RTRUP2TC 0x05AC4 /* Rx User Priority to Traffic Class */
+#define E1000_RTTUP2TC 0x0418 /* Transmit User Priority to Traffic Class */
+/* Tx Desc plane TC Rate-scheduler config */
+#define E1000_RTTDTCRC(_n) (0x3610 + ((_n) * 4))
+/* Tx Packet plane TC Rate-Scheduler Config */
+#define E1000_RTTPTCRC(_n) (0x3480 + ((_n) * 4))
+/* Rx Packet plane TC Rate-Scheduler Config */
+#define E1000_RTRPTCRC(_n) (0x2480 + ((_n) * 4))
+/* Tx Desc Plane TC Rate-Scheduler Status */
+#define E1000_RTTDTCRS(_n) (0x3630 + ((_n) * 4))
+/* Tx Desc Plane TC Rate-Scheduler MMW */
+#define E1000_RTTDTCRM(_n) (0x3650 + ((_n) * 4))
+/* Tx Packet plane TC Rate-Scheduler Status */
+#define E1000_RTTPTCRS(_n) (0x34A0 + ((_n) * 4))
+/* Tx Packet plane TC Rate-scheduler MMW */
+#define E1000_RTTPTCRM(_n) (0x34C0 + ((_n) * 4))
+/* Rx Packet plane TC Rate-Scheduler Status */
+#define E1000_RTRPTCRS(_n) (0x24A0 + ((_n) * 4))
+/* Rx Packet plane TC Rate-Scheduler MMW */
+#define E1000_RTRPTCRM(_n) (0x24C0 + ((_n) * 4))
+/* Tx Desc plane VM Rate-Scheduler MMW*/
+#define E1000_RTTDVMRM(_n) (0x3670 + ((_n) * 4))
+/* Tx BCN Rate-Scheduler MMW */
+#define E1000_RTTBCNRM(_n) (0x3690 + ((_n) * 4))
+#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select */
+#define E1000_RTTDVMRC 0x3608 /* Tx Desc Plane VM Rate-Scheduler Config */
+#define E1000_RTTDVMRS 0x360C /* Tx Desc Plane VM Rate-Scheduler Status */
+#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config */
+#define E1000_RTTBCNRS 0x36B4 /* Tx BCN Rate-Scheduler Status */
+#define E1000_RTTBCNCR 0xB200 /* Tx BCN Control Register */
+#define E1000_RTTBCNTG 0x35A4 /* Tx BCN Tagging */
+#define E1000_RTTBCNCP 0xB208 /* Tx BCN Congestion point */
+#define E1000_RTRBCNCR 0xB20C /* Rx BCN Control Register */
+#define E1000_RTTBCNRD 0x36B8 /* Tx BCN Rate Drift */
+#define E1000_PFCTOP 0x1080 /* Priority Flow Control Type and Opcode */
+#define E1000_RTTBCNIDX 0xB204 /* Tx BCN Congestion Point */
+#define E1000_RTTBCNACH 0x0B214 /* Tx BCN Control High */
+#define E1000_RTTBCNACL 0x0B210 /* Tx BCN Control Low */
+
+/* DMA Coalescing registers */
+#define E1000_DMACR 0x02508 /* Control Register */
+#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */
+#define E1000_DMCTLX 0x02514 /* Time to Lx Request */
+#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */
+#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */
+#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */
+#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
+
+/* PCIe Parity Status Register */
+#define E1000_PCIEERRSTS 0x05BA8
+
+#define E1000_PROXYS 0x5F64 /* Proxying Status */
+#define E1000_PROXYFC 0x5F60 /* Proxying Filter Control */
+/* Thermal sensor configuration and status registers */
+#define E1000_THMJT 0x08100 /* Junction Temperature */
+#define E1000_THLOWTC 0x08104 /* Low Threshold Control */
+#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */
+#define E1000_THHIGHTC 0x0810C /* High Threshold Control */
+#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */
+
+/* Energy Efficient Ethernet "EEE" registers */
+#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */
+#define E1000_LTRC 0x01A0 /* Latency Tolerance Reporting Control */
+#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/
+#define E1000_EEE_SU 0x0E34 /* EEE Setup */
+#define E1000_TLPIC 0x4148 /* EEE Tx LPI Count - TLPIC */
+#define E1000_RLPIC 0x414C /* EEE Rx LPI Count - RLPIC */
+
+/* OS2BMC Registers */
+#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */
+#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */
+#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */
+#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */
+
+
+
+#endif
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_vf.c b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_vf.c
new file mode 100755
index 00000000..778561e7
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_vf.c
@@ -0,0 +1,586 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+
+#include "e1000_api.h"
+
+
+STATIC s32 e1000_init_phy_params_vf(struct e1000_hw *hw);
+STATIC s32 e1000_init_nvm_params_vf(struct e1000_hw *hw);
+STATIC void e1000_release_vf(struct e1000_hw *hw);
+STATIC s32 e1000_acquire_vf(struct e1000_hw *hw);
+STATIC s32 e1000_setup_link_vf(struct e1000_hw *hw);
+STATIC s32 e1000_get_bus_info_pcie_vf(struct e1000_hw *hw);
+STATIC s32 e1000_init_mac_params_vf(struct e1000_hw *hw);
+STATIC s32 e1000_check_for_link_vf(struct e1000_hw *hw);
+STATIC s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex);
+STATIC s32 e1000_init_hw_vf(struct e1000_hw *hw);
+STATIC s32 e1000_reset_hw_vf(struct e1000_hw *hw);
+STATIC void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *, u32);
+STATIC void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32);
+STATIC s32 e1000_read_mac_addr_vf(struct e1000_hw *);
+
+/**
+ * e1000_init_phy_params_vf - Inits PHY params
+ * @hw: pointer to the HW structure
+ *
+ * Doesn't do much - there's no PHY available to the VF.
+ **/
+STATIC s32 e1000_init_phy_params_vf(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_init_phy_params_vf");
+ hw->phy.type = e1000_phy_vf;
+ hw->phy.ops.acquire = e1000_acquire_vf;
+ hw->phy.ops.release = e1000_release_vf;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_nvm_params_vf - Inits NVM params
+ * @hw: pointer to the HW structure
+ *
+ * Doesn't do much - there's no NVM available to the VF.
+ **/
+STATIC s32 e1000_init_nvm_params_vf(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_init_nvm_params_vf");
+ hw->nvm.type = e1000_nvm_none;
+ hw->nvm.ops.acquire = e1000_acquire_vf;
+ hw->nvm.ops.release = e1000_release_vf;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_mac_params_vf - Inits MAC params
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_init_mac_params_vf(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+
+ DEBUGFUNC("e1000_init_mac_params_vf");
+
+ /* Set media type */
+ /*
+ * Virtual functions don't care what they're media type is as they
+ * have no direct access to the PHY, or the media. That is handled
+ * by the physical function driver.
+ */
+ hw->phy.media_type = e1000_media_type_unknown;
+
+ /* No ASF features for the VF driver */
+ mac->asf_firmware_present = false;
+ /* ARC subsystem not supported */
+ mac->arc_subsystem_valid = false;
+ /* Disable adaptive IFS mode so the generic funcs don't do anything */
+ mac->adaptive_ifs = false;
+ /* VF's have no MTA Registers - PF feature only */
+ mac->mta_reg_count = 128;
+ /* VF's have no access to RAR entries */
+ mac->rar_entry_count = 1;
+
+ /* Function pointers */
+ /* link setup */
+ mac->ops.setup_link = e1000_setup_link_vf;
+ /* bus type/speed/width */
+ mac->ops.get_bus_info = e1000_get_bus_info_pcie_vf;
+ /* reset */
+ mac->ops.reset_hw = e1000_reset_hw_vf;
+ /* hw initialization */
+ mac->ops.init_hw = e1000_init_hw_vf;
+ /* check for link */
+ mac->ops.check_for_link = e1000_check_for_link_vf;
+ /* link info */
+ mac->ops.get_link_up_info = e1000_get_link_up_info_vf;
+ /* multicast address update */
+ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf;
+ /* set mac address */
+ mac->ops.rar_set = e1000_rar_set_vf;
+ /* read mac address */
+ mac->ops.read_mac_addr = e1000_read_mac_addr_vf;
+
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_function_pointers_vf - Inits function pointers
+ * @hw: pointer to the HW structure
+ **/
+void e1000_init_function_pointers_vf(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_init_function_pointers_vf");
+
+ hw->mac.ops.init_params = e1000_init_mac_params_vf;
+ hw->nvm.ops.init_params = e1000_init_nvm_params_vf;
+ hw->phy.ops.init_params = e1000_init_phy_params_vf;
+ hw->mbx.ops.init_params = e1000_init_mbx_params_vf;
+}
+
+/**
+ * e1000_acquire_vf - Acquire rights to access PHY or NVM.
+ * @hw: pointer to the HW structure
+ *
+ * There is no PHY or NVM so we want all attempts to acquire these to fail.
+ * In addition, the MAC registers to access PHY/NVM don't exist so we don't
+ * even want any SW to attempt to use them.
+ **/
+STATIC s32 e1000_acquire_vf(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ UNREFERENCED_1PARAMETER(hw);
+ return -E1000_ERR_PHY;
+}
+
+/**
+ * e1000_release_vf - Release PHY or NVM
+ * @hw: pointer to the HW structure
+ *
+ * There is no PHY or NVM so we want all attempts to acquire these to fail.
+ * In addition, the MAC registers to access PHY/NVM don't exist so we don't
+ * even want any SW to attempt to use them.
+ **/
+STATIC void e1000_release_vf(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ UNREFERENCED_1PARAMETER(hw);
+ return;
+}
+
+/**
+ * e1000_setup_link_vf - Sets up link.
+ * @hw: pointer to the HW structure
+ *
+ * Virtual functions cannot change link.
+ **/
+STATIC s32 e1000_setup_link_vf(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_setup_link_vf");
+ UNREFERENCED_1PARAMETER(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_bus_info_pcie_vf - Gets the bus info.
+ * @hw: pointer to the HW structure
+ *
+ * Virtual functions are not really on their own bus.
+ **/
+STATIC s32 e1000_get_bus_info_pcie_vf(struct e1000_hw *hw)
+{
+ struct e1000_bus_info *bus = &hw->bus;
+
+ DEBUGFUNC("e1000_get_bus_info_pcie_vf");
+
+ /* Do not set type PCI-E because we don't want disable master to run */
+ bus->type = e1000_bus_type_reserved;
+ bus->speed = e1000_bus_speed_2500;
+
+ return 0;
+}
+
+/**
+ * e1000_get_link_up_info_vf - Gets link info.
+ * @hw: pointer to the HW structure
+ * @speed: pointer to 16 bit value to store link speed.
+ * @duplex: pointer to 16 bit value to store duplex.
+ *
+ * Since we cannot read the PHY and get accurate link info, we must rely upon
+ * the status register's data which is often stale and inaccurate.
+ **/
+STATIC s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ s32 status;
+
+ DEBUGFUNC("e1000_get_link_up_info_vf");
+
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if (status & E1000_STATUS_SPEED_1000) {
+ *speed = SPEED_1000;
+ DEBUGOUT("1000 Mbs, ");
+ } else if (status & E1000_STATUS_SPEED_100) {
+ *speed = SPEED_100;
+ DEBUGOUT("100 Mbs, ");
+ } else {
+ *speed = SPEED_10;
+ DEBUGOUT("10 Mbs, ");
+ }
+
+ if (status & E1000_STATUS_FD) {
+ *duplex = FULL_DUPLEX;
+ DEBUGOUT("Full Duplex\n");
+ } else {
+ *duplex = HALF_DUPLEX;
+ DEBUGOUT("Half Duplex\n");
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_reset_hw_vf - Resets the HW
+ * @hw: pointer to the HW structure
+ *
+ * VF's provide a function level reset. This is done using bit 26 of ctrl_reg.
+ * This is all the reset we can perform on a VF.
+ **/
+STATIC s32 e1000_reset_hw_vf(struct e1000_hw *hw)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ u32 timeout = E1000_VF_INIT_TIMEOUT;
+ s32 ret_val = -E1000_ERR_MAC_INIT;
+ u32 ctrl, msgbuf[3];
+ u8 *addr = (u8 *)(&msgbuf[1]);
+
+ DEBUGFUNC("e1000_reset_hw_vf");
+
+ DEBUGOUT("Issuing a function level reset to MAC\n");
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+
+ /* we cannot reset while the RSTI / RSTD bits are asserted */
+ while (!mbx->ops.check_for_rst(hw, 0) && timeout) {
+ timeout--;
+ usec_delay(5);
+ }
+
+ if (timeout) {
+ /* mailbox timeout can now become active */
+ mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT;
+
+ msgbuf[0] = E1000_VF_RESET;
+ mbx->ops.write_posted(hw, msgbuf, 1, 0);
+
+ msec_delay(10);
+
+ /* set our "perm_addr" based on info provided by PF */
+ ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0);
+ if (!ret_val) {
+ if (msgbuf[0] == (E1000_VF_RESET |
+ E1000_VT_MSGTYPE_ACK))
+ memcpy(hw->mac.perm_addr, addr, 6);
+ else
+ ret_val = -E1000_ERR_MAC_INIT;
+ }
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_hw_vf - Inits the HW
+ * @hw: pointer to the HW structure
+ *
+ * Not much to do here except clear the PF Reset indication if there is one.
+ **/
+STATIC s32 e1000_init_hw_vf(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_init_hw_vf");
+
+ /* attempt to set and restore our mac address */
+ e1000_rar_set_vf(hw, hw->mac.addr, 0);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_rar_set_vf - set device MAC address
+ * @hw: pointer to the HW structure
+ * @addr: pointer to the receive address
+ * @index receive address array register
+ **/
+STATIC void e1000_rar_set_vf(struct e1000_hw *hw, u8 *addr,
+ u32 E1000_UNUSEDARG index)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[3];
+ u8 *msg_addr = (u8 *)(&msgbuf[1]);
+ s32 ret_val;
+
+ UNREFERENCED_1PARAMETER(index);
+ memset(msgbuf, 0, 12);
+ msgbuf[0] = E1000_VF_SET_MAC_ADDR;
+ memcpy(msg_addr, addr, 6);
+ ret_val = mbx->ops.write_posted(hw, msgbuf, 3, 0);
+
+ if (!ret_val)
+ ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0);
+
+ msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS;
+
+ /* if nacked the address was rejected, use "perm_addr" */
+ if (!ret_val &&
+ (msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK)))
+ e1000_read_mac_addr_vf(hw);
+}
+
+/**
+ * e1000_hash_mc_addr_vf - Generate a multicast hash value
+ * @hw: pointer to the HW structure
+ * @mc_addr: pointer to a multicast address
+ *
+ * Generates a multicast address hash value which is used to determine
+ * the multicast filter table array address and new table value.
+ **/
+STATIC u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr)
+{
+ u32 hash_value, hash_mask;
+ u8 bit_shift = 0;
+
+ DEBUGFUNC("e1000_hash_mc_addr_generic");
+
+ /* Register count multiplied by bits per register */
+ hash_mask = (hw->mac.mta_reg_count * 32) - 1;
+
+ /*
+ * The bit_shift is the number of left-shifts
+ * where 0xFF would still fall within the hash mask.
+ */
+ while (hash_mask >> bit_shift != 0xFF)
+ bit_shift++;
+
+ hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
+ (((u16) mc_addr[5]) << bit_shift)));
+
+ return hash_value;
+}
+
+STATIC void e1000_write_msg_read_ack(struct e1000_hw *hw,
+ u32 *msg, u16 size)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ u32 retmsg[E1000_VFMAILBOX_SIZE];
+ s32 retval = mbx->ops.write_posted(hw, msg, size, 0);
+
+ if (!retval)
+ mbx->ops.read_posted(hw, retmsg, E1000_VFMAILBOX_SIZE, 0);
+}
+
+/**
+ * e1000_update_mc_addr_list_vf - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ *
+ * Updates the Multicast Table Array.
+ * The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count)
+{
+ u32 msgbuf[E1000_VFMAILBOX_SIZE];
+ u16 *hash_list = (u16 *)&msgbuf[1];
+ u32 hash_value;
+ u32 i;
+
+ DEBUGFUNC("e1000_update_mc_addr_list_vf");
+
+ /* Each entry in the list uses 1 16 bit word. We have 30
+ * 16 bit words available in our HW msg buffer (minus 1 for the
+ * msg type). That's 30 hash values if we pack 'em right. If
+ * there are more than 30 MC addresses to add then punt the
+ * extras for now and then add code to handle more than 30 later.
+ * It would be unusual for a server to request that many multi-cast
+ * addresses except for in large enterprise network environments.
+ */
+
+ DEBUGOUT1("MC Addr Count = %d\n", mc_addr_count);
+
+ if (mc_addr_count > 30) {
+ msgbuf[0] |= E1000_VF_SET_MULTICAST_OVERFLOW;
+ mc_addr_count = 30;
+ }
+
+ msgbuf[0] = E1000_VF_SET_MULTICAST;
+ msgbuf[0] |= mc_addr_count << E1000_VT_MSGINFO_SHIFT;
+
+ for (i = 0; i < mc_addr_count; i++) {
+ hash_value = e1000_hash_mc_addr_vf(hw, mc_addr_list);
+ DEBUGOUT1("Hash value = 0x%03X\n", hash_value);
+ hash_list[i] = hash_value & 0x0FFF;
+ mc_addr_list += ETH_ADDR_LEN;
+ }
+
+ e1000_write_msg_read_ack(hw, msgbuf, E1000_VFMAILBOX_SIZE);
+}
+
+/**
+ * e1000_vfta_set_vf - Set/Unset vlan filter table address
+ * @hw: pointer to the HW structure
+ * @vid: determines the vfta register and bit to set/unset
+ * @set: if true then set bit, else clear bit
+ **/
+void e1000_vfta_set_vf(struct e1000_hw *hw, u16 vid, bool set)
+{
+ u32 msgbuf[2];
+
+ msgbuf[0] = E1000_VF_SET_VLAN;
+ msgbuf[1] = vid;
+ /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
+ if (set)
+ msgbuf[0] |= E1000_VF_SET_VLAN_ADD;
+
+ e1000_write_msg_read_ack(hw, msgbuf, 2);
+}
+
+/** e1000_rlpml_set_vf - Set the maximum receive packet length
+ * @hw: pointer to the HW structure
+ * @max_size: value to assign to max frame size
+ **/
+void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size)
+{
+ u32 msgbuf[2];
+
+ msgbuf[0] = E1000_VF_SET_LPE;
+ msgbuf[1] = max_size;
+
+ e1000_write_msg_read_ack(hw, msgbuf, 2);
+}
+
+/**
+ * e1000_promisc_set_vf - Set flags for Unicast or Multicast promisc
+ * @hw: pointer to the HW structure
+ * @uni: boolean indicating unicast promisc status
+ * @multi: boolean indicating multicast promisc status
+ **/
+s32 e1000_promisc_set_vf(struct e1000_hw *hw, enum e1000_promisc_type type)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf = E1000_VF_SET_PROMISC;
+ s32 ret_val;
+
+ switch (type) {
+ case e1000_promisc_multicast:
+ msgbuf |= E1000_VF_SET_PROMISC_MULTICAST;
+ break;
+ case e1000_promisc_enabled:
+ msgbuf |= E1000_VF_SET_PROMISC_MULTICAST;
+ case e1000_promisc_unicast:
+ msgbuf |= E1000_VF_SET_PROMISC_UNICAST;
+ case e1000_promisc_disabled:
+ break;
+ default:
+ return -E1000_ERR_MAC_INIT;
+ }
+
+ ret_val = mbx->ops.write_posted(hw, &msgbuf, 1, 0);
+
+ if (!ret_val)
+ ret_val = mbx->ops.read_posted(hw, &msgbuf, 1, 0);
+
+ if (!ret_val && !(msgbuf & E1000_VT_MSGTYPE_ACK))
+ ret_val = -E1000_ERR_MAC_INIT;
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_mac_addr_vf - Read device MAC address
+ * @hw: pointer to the HW structure
+ **/
+STATIC s32 e1000_read_mac_addr_vf(struct e1000_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < ETH_ADDR_LEN; i++)
+ hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_check_for_link_vf - Check for link for a virtual interface
+ * @hw: pointer to the HW structure
+ *
+ * Checks to see if the underlying PF is still talking to the VF and
+ * if it is then it reports the link state to the hardware, otherwise
+ * it reports link down and returns an error.
+ **/
+STATIC s32 e1000_check_for_link_vf(struct e1000_hw *hw)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val = E1000_SUCCESS;
+ u32 in_msg = 0;
+
+ DEBUGFUNC("e1000_check_for_link_vf");
+
+ /*
+ * We only want to run this if there has been a rst asserted.
+ * in this case that could mean a link change, device reset,
+ * or a virtual function reset
+ */
+
+ /* If we were hit with a reset or timeout drop the link */
+ if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
+ mac->get_link_status = true;
+
+ if (!mac->get_link_status)
+ goto out;
+
+ /* if link status is down no point in checking to see if pf is up */
+ if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))
+ goto out;
+
+ /* if the read failed it could just be a mailbox collision, best wait
+ * until we are called again and don't report an error */
+ if (mbx->ops.read(hw, &in_msg, 1, 0))
+ goto out;
+
+ /* if incoming message isn't clear to send we are waiting on response */
+ if (!(in_msg & E1000_VT_MSGTYPE_CTS)) {
+ /* message is not CTS and is NACK we have lost CTS status */
+ if (in_msg & E1000_VT_MSGTYPE_NACK)
+ ret_val = -E1000_ERR_MAC_INIT;
+ goto out;
+ }
+
+ /* at this point we know the PF is talking to us, check and see if
+ * we are still accepting timeout or if we had a timeout failure.
+ * if we failed then we will need to reinit */
+ if (!mbx->timeout) {
+ ret_val = -E1000_ERR_MAC_INIT;
+ goto out;
+ }
+
+ /* if we passed all the tests above then the link is up and we no
+ * longer need to check for link */
+ mac->get_link_status = false;
+
+out:
+ return ret_val;
+}
+
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_vf.h b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_vf.h
new file mode 100755
index 00000000..6d5bd996
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000/e1000_vf.h
@@ -0,0 +1,295 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _E1000_VF_H_
+#define _E1000_VF_H_
+
+#include "e1000_osdep.h"
+#include "e1000_regs.h"
+#include "e1000_defines.h"
+
+struct e1000_hw;
+
+#define E1000_DEV_ID_82576_VF 0x10CA
+#define E1000_DEV_ID_I350_VF 0x1520
+
+#define E1000_VF_INIT_TIMEOUT 200 /* Num of retries to clear RSTI */
+
+/* Additional Descriptor Control definitions */
+#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */
+#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */
+
+/* SRRCTL bit definitions */
+#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \
+ (0x0C00C + ((_n) * 0x40)))
+#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
+#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00
+#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
+#define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000
+#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000
+#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000
+#define E1000_SRRCTL_DROP_EN 0x80000000
+
+#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F
+#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00
+
+/* Interrupt Defines */
+#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */
+#define E1000_EITR(_n) (0x01680 + ((_n) << 2))
+#define E1000_EICS 0x01520 /* Ext. Intr Cause Set -W0 */
+#define E1000_EIMS 0x01524 /* Ext. Intr Mask Set/Read -RW */
+#define E1000_EIMC 0x01528 /* Ext. Intr Mask Clear -WO */
+#define E1000_EIAC 0x0152C /* Ext. Intr Auto Clear -RW */
+#define E1000_EIAM 0x01530 /* Ext. Intr Ack Auto Clear Mask -RW */
+#define E1000_IVAR0 0x01700 /* Intr Vector Alloc (array) -RW */
+#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes -RW */
+#define E1000_IVAR_VALID 0x80
+
+/* Receive Descriptor - Advanced */
+union e1000_adv_rx_desc {
+ struct {
+ u64 pkt_addr; /* Packet buffer address */
+ u64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ union {
+ u32 data;
+ struct {
+ /* RSS type, Packet type */
+ u16 pkt_info;
+ /* Split Header, header buffer len */
+ u16 hdr_info;
+ } hs_rss;
+ } lo_dword;
+ union {
+ u32 rss; /* RSS Hash */
+ struct {
+ u16 ip_id; /* IP id */
+ u16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ u32 status_error; /* ext status/error */
+ u16 length; /* Packet length */
+ u16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
+#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
+
+/* Transmit Descriptor - Advanced */
+union e1000_adv_tx_desc {
+ struct {
+ u64 buffer_addr; /* Address of descriptor's data buf */
+ u32 cmd_type_len;
+ u32 olinfo_status;
+ } read;
+ struct {
+ u64 rsvd; /* Reserved */
+ u32 nxtseq_seed;
+ u32 status;
+ } wb;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
+#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
+#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
+#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */
+#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
+#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
+#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
+
+/* Context descriptors */
+struct e1000_adv_tx_context_desc {
+ u32 vlan_macip_lens;
+ u32 seqnum_seed;
+ u32 type_tucmd_mlhl;
+ u32 mss_l4len_idx;
+};
+
+#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
+#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
+#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
+#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
+#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+
+enum e1000_mac_type {
+ e1000_undefined = 0,
+ e1000_vfadapt,
+ e1000_vfadapt_i350,
+ e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
+};
+
+struct e1000_vf_stats {
+ u64 base_gprc;
+ u64 base_gptc;
+ u64 base_gorc;
+ u64 base_gotc;
+ u64 base_mprc;
+ u64 base_gotlbc;
+ u64 base_gptlbc;
+ u64 base_gorlbc;
+ u64 base_gprlbc;
+
+ u32 last_gprc;
+ u32 last_gptc;
+ u32 last_gorc;
+ u32 last_gotc;
+ u32 last_mprc;
+ u32 last_gotlbc;
+ u32 last_gptlbc;
+ u32 last_gorlbc;
+ u32 last_gprlbc;
+
+ u64 gprc;
+ u64 gptc;
+ u64 gorc;
+ u64 gotc;
+ u64 mprc;
+ u64 gotlbc;
+ u64 gptlbc;
+ u64 gorlbc;
+ u64 gprlbc;
+};
+
+#include "e1000_mbx.h"
+
+struct e1000_mac_operations {
+ /* Function pointers for the MAC. */
+ s32 (*init_params)(struct e1000_hw *);
+ s32 (*check_for_link)(struct e1000_hw *);
+ void (*clear_vfta)(struct e1000_hw *);
+ s32 (*get_bus_info)(struct e1000_hw *);
+ s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
+ void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
+ s32 (*reset_hw)(struct e1000_hw *);
+ s32 (*init_hw)(struct e1000_hw *);
+ s32 (*setup_link)(struct e1000_hw *);
+ void (*write_vfta)(struct e1000_hw *, u32, u32);
+ void (*rar_set)(struct e1000_hw *, u8*, u32);
+ s32 (*read_mac_addr)(struct e1000_hw *);
+};
+
+struct e1000_mac_info {
+ struct e1000_mac_operations ops;
+ u8 addr[6];
+ u8 perm_addr[6];
+
+ enum e1000_mac_type type;
+
+ u16 mta_reg_count;
+ u16 rar_entry_count;
+
+ bool get_link_status;
+};
+
+struct e1000_mbx_operations {
+ s32 (*init_params)(struct e1000_hw *hw);
+ s32 (*read)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*write)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*check_for_msg)(struct e1000_hw *, u16);
+ s32 (*check_for_ack)(struct e1000_hw *, u16);
+ s32 (*check_for_rst)(struct e1000_hw *, u16);
+};
+
+struct e1000_mbx_stats {
+ u32 msgs_tx;
+ u32 msgs_rx;
+
+ u32 acks;
+ u32 reqs;
+ u32 rsts;
+};
+
+struct e1000_mbx_info {
+ struct e1000_mbx_operations ops;
+ struct e1000_mbx_stats stats;
+ u32 timeout;
+ u32 usec_delay;
+ u16 size;
+};
+
+struct e1000_dev_spec_vf {
+ u32 vf_number;
+ u32 v2p_mailbox;
+};
+
+struct e1000_hw {
+ void *back;
+
+ u8 *hw_addr;
+ u8 *flash_address;
+ unsigned long io_base;
+
+ struct e1000_mac_info mac;
+ struct e1000_mbx_info mbx;
+
+ union {
+ struct e1000_dev_spec_vf vf;
+ } dev_spec;
+
+ u16 device_id;
+ u16 subsystem_vendor_id;
+ u16 subsystem_device_id;
+ u16 vendor_id;
+
+ u8 revision_id;
+};
+
+enum e1000_promisc_type {
+ e1000_promisc_disabled = 0, /* all promisc modes disabled */
+ e1000_promisc_unicast = 1, /* unicast promiscuous enabled */
+ e1000_promisc_multicast = 2, /* multicast promiscuous enabled */
+ e1000_promisc_enabled = 3, /* both uni and multicast promisc */
+ e1000_num_promisc_types
+};
+
+/* These functions must be implemented by drivers */
+s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+void e1000_vfta_set_vf(struct e1000_hw *, u16, bool);
+void e1000_rlpml_set_vf(struct e1000_hw *, u16);
+s32 e1000_promisc_set_vf(struct e1000_hw *, enum e1000_promisc_type);
+#endif /* _E1000_VF_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000_ethdev.h b/src/dpdk_lib18/librte_pmd_e1000/e1000_ethdev.h
new file mode 100755
index 00000000..71eb5fb7
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000_ethdev.h
@@ -0,0 +1,248 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _E1000_ETHDEV_H_
+#define _E1000_ETHDEV_H_
+
+/* need update link, bit flag */
+#define E1000_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
+#define E1000_FLAG_MAILBOX (uint32_t)(1 << 1)
+
+/*
+ * Defines that were not part of e1000_hw.h as they are not used by the FreeBSD
+ * driver.
+ */
+#define E1000_ADVTXD_POPTS_TXSM 0x00000200 /* L4 Checksum offload request */
+#define E1000_ADVTXD_POPTS_IXSM 0x00000100 /* IP Checksum offload request */
+#define E1000_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* L4 Packet TYPE of Reserved */
+#define E1000_RXD_STAT_TMST 0x10000 /* Timestamped Packet indication */
+#define E1000_RXD_ERR_CKSUM_BIT 29
+#define E1000_RXD_ERR_CKSUM_MSK 3
+#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Bit shift for l2_len */
+#define E1000_CTRL_EXT_EXTEND_VLAN (1<<26) /* EXTENDED VLAN */
+#define IGB_VFTA_SIZE 128
+
+#define IGB_MAX_RX_QUEUE_NUM 8
+#define IGB_MAX_RX_QUEUE_NUM_82576 16
+
+#define E1000_SYN_FILTER_ENABLE 0x00000001 /* syn filter enable field */
+#define E1000_SYN_FILTER_QUEUE 0x0000000E /* syn filter queue field */
+#define E1000_SYN_FILTER_QUEUE_SHIFT 1 /* syn filter queue field */
+#define E1000_RFCTL_SYNQFP 0x00080000 /* SYNQFP in RFCTL register */
+
+#define E1000_ETQF_ETHERTYPE 0x0000FFFF
+#define E1000_ETQF_QUEUE 0x00070000
+#define E1000_ETQF_QUEUE_SHIFT 16
+#define E1000_MAX_ETQF_FILTERS 8
+
+#define E1000_IMIR_DSTPORT 0x0000FFFF
+#define E1000_IMIR_PRIORITY 0xE0000000
+#define E1000_IMIR_EXT_SIZE_BP 0x00001000
+#define E1000_IMIR_EXT_CTRL_UGR 0x00002000
+#define E1000_IMIR_EXT_CTRL_ACK 0x00004000
+#define E1000_IMIR_EXT_CTRL_PSH 0x00008000
+#define E1000_IMIR_EXT_CTRL_RST 0x00010000
+#define E1000_IMIR_EXT_CTRL_SYN 0x00020000
+#define E1000_IMIR_EXT_CTRL_FIN 0x00040000
+#define E1000_IMIR_EXT_CTRL_BP 0x00080000
+#define E1000_MAX_TTQF_FILTERS 8
+#define E1000_2TUPLE_MAX_PRI 7
+
+#define E1000_MAX_FLEXIBLE_FILTERS 8
+#define E1000_MAX_FHFT 4
+#define E1000_MAX_FHFT_EXT 4
+#define E1000_MAX_FLEX_FILTER_PRI 7
+#define E1000_MAX_FLEX_FILTER_LEN 128
+#define E1000_FHFT_QUEUEING_LEN 0x0000007F
+#define E1000_FHFT_QUEUEING_QUEUE 0x00000700
+#define E1000_FHFT_QUEUEING_PRIO 0x00070000
+#define E1000_FHFT_QUEUEING_OFFSET 0xFC
+#define E1000_FHFT_QUEUEING_QUEUE_SHIFT 8
+#define E1000_FHFT_QUEUEING_PRIO_SHIFT 16
+#define E1000_WUFC_FLEX_HQ 0x00004000
+
+#define E1000_SPQF_SRCPORT 0x0000FFFF
+
+#define E1000_MAX_FTQF_FILTERS 8
+#define E1000_FTQF_PROTOCOL_MASK 0x000000FF
+#define E1000_FTQF_5TUPLE_MASK_SHIFT 28
+#define E1000_FTQF_PROTOCOL_COMP_MASK 0x10000000
+#define E1000_FTQF_SOURCE_ADDR_MASK 0x20000000
+#define E1000_FTQF_DEST_ADDR_MASK 0x40000000
+#define E1000_FTQF_SOURCE_PORT_MASK 0x80000000
+#define E1000_FTQF_VF_MASK_EN 0x00008000
+#define E1000_FTQF_QUEUE_MASK 0x03ff0000
+#define E1000_FTQF_QUEUE_SHIFT 16
+#define E1000_FTQF_QUEUE_ENABLE 0x00000100
+
+/* structure for interrupt relative data */
+struct e1000_interrupt {
+ uint32_t flags;
+ uint32_t mask;
+};
+
+/* local vfta copy */
+struct e1000_vfta {
+ uint32_t vfta[IGB_VFTA_SIZE];
+};
+
+/*
+ * VF data which used by PF host only
+ */
+#define E1000_MAX_VF_MC_ENTRIES 30
+struct e1000_vf_info {
+ uint8_t vf_mac_addresses[ETHER_ADDR_LEN];
+ uint16_t vf_mc_hashes[E1000_MAX_VF_MC_ENTRIES];
+ uint16_t num_vf_mc_hashes;
+ uint16_t default_vf_vlan_id;
+ uint16_t vlans_enabled;
+ uint16_t pf_qos;
+ uint16_t vlan_count;
+ uint16_t tx_rate;
+};
+
+/*
+ * Structure to store private data for each driver instance (for each port).
+ */
+struct e1000_adapter {
+ struct e1000_hw hw;
+ struct e1000_hw_stats stats;
+ struct e1000_interrupt intr;
+ struct e1000_vfta shadow_vfta;
+ struct e1000_vf_info *vfdata;
+};
+
+#define E1000_DEV_PRIVATE_TO_HW(adapter) \
+ (&((struct e1000_adapter *)adapter)->hw)
+
+#define E1000_DEV_PRIVATE_TO_STATS(adapter) \
+ (&((struct e1000_adapter *)adapter)->stats)
+
+#define E1000_DEV_PRIVATE_TO_INTR(adapter) \
+ (&((struct e1000_adapter *)adapter)->intr)
+
+#define E1000_DEV_PRIVATE_TO_VFTA(adapter) \
+ (&((struct e1000_adapter *)adapter)->shadow_vfta)
+
+#define E1000_DEV_PRIVATE_TO_P_VFDATA(adapter) \
+ (&((struct e1000_adapter *)adapter)->vfdata)
+
+/*
+ * RX/TX IGB function prototypes
+ */
+void eth_igb_tx_queue_release(void *txq);
+void eth_igb_rx_queue_release(void *rxq);
+void igb_dev_clear_queues(struct rte_eth_dev *dev);
+
+int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
+
+uint32_t eth_igb_rx_queue_count(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+
+int eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset);
+
+int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+
+int eth_igb_rx_init(struct rte_eth_dev *dev);
+
+void eth_igb_tx_init(struct rte_eth_dev *dev);
+
+uint16_t eth_igb_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t eth_igb_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t eth_igb_recv_scattered_pkts(void *rxq,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
+int eth_igb_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+
+int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+
+int eth_igbvf_rx_init(struct rte_eth_dev *dev);
+
+void eth_igbvf_tx_init(struct rte_eth_dev *dev);
+
+/*
+ * misc function prototypes
+ */
+void igb_pf_host_init(struct rte_eth_dev *eth_dev);
+
+void igb_pf_mbx_process(struct rte_eth_dev *eth_dev);
+
+int igb_pf_host_configure(struct rte_eth_dev *eth_dev);
+
+/*
+ * RX/TX EM function prototypes
+ */
+void eth_em_tx_queue_release(void *txq);
+void eth_em_rx_queue_release(void *rxq);
+
+void em_dev_clear_queues(struct rte_eth_dev *dev);
+
+int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
+
+uint32_t eth_em_rx_queue_count(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+
+int eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset);
+
+int eth_em_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+
+int eth_em_rx_init(struct rte_eth_dev *dev);
+
+void eth_em_tx_init(struct rte_eth_dev *dev);
+
+uint16_t eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+#endif /* _E1000_ETHDEV_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_e1000/e1000_logs.h b/src/dpdk_lib18/librte_pmd_e1000/e1000_logs.h
new file mode 100755
index 00000000..67f2c84c
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/e1000_logs.h
@@ -0,0 +1,77 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _E1000_LOGS_H_
+#define _E1000_LOGS_H_
+
+#define PMD_INIT_LOG(level, fmt, args...) RTE_LOG(level, PMD," " fmt "\n", ##args)
+
+
+#ifdef RTE_LIBRTE_E1000_DEBUG_INIT
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+#else
+#define PMD_INIT_FUNC_TRACE() do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_E1000_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_E1000_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_E1000_DEBUG_TX_FREE
+#define PMD_TX_FREE_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_E1000_DEBUG_DRIVER
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
+#else
+#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
+#endif
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+#endif /* _E1000_LOGS_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_e1000/em_ethdev.c b/src/dpdk_lib18/librte_pmd_e1000/em_ethdev.c
new file mode 100755
index 00000000..3f2897ee
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/em_ethdev.c
@@ -0,0 +1,1532 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+
+#include <rte_common.h>
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_atomic.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+
+#include "e1000_logs.h"
+#include "e1000/e1000_api.h"
+#include "e1000_ethdev.h"
+
+#define EM_EIAC 0x000DC
+
+#define PMD_ROUNDUP(x,y) (((x) + (y) - 1)/(y) * (y))
+
+
+static int eth_em_configure(struct rte_eth_dev *dev);
+static int eth_em_start(struct rte_eth_dev *dev);
+static void eth_em_stop(struct rte_eth_dev *dev);
+static void eth_em_close(struct rte_eth_dev *dev);
+static void eth_em_promiscuous_enable(struct rte_eth_dev *dev);
+static void eth_em_promiscuous_disable(struct rte_eth_dev *dev);
+static void eth_em_allmulticast_enable(struct rte_eth_dev *dev);
+static void eth_em_allmulticast_disable(struct rte_eth_dev *dev);
+static int eth_em_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
+static void eth_em_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *rte_stats);
+static void eth_em_stats_reset(struct rte_eth_dev *dev);
+static void eth_em_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static int eth_em_flow_ctrl_get(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+static int eth_em_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+static int eth_em_interrupt_setup(struct rte_eth_dev *dev);
+static int eth_em_interrupt_get_status(struct rte_eth_dev *dev);
+static int eth_em_interrupt_action(struct rte_eth_dev *dev);
+static void eth_em_interrupt_handler(struct rte_intr_handle *handle,
+ void *param);
+
+static int em_hw_init(struct e1000_hw *hw);
+static int em_hardware_init(struct e1000_hw *hw);
+static void em_hw_control_acquire(struct e1000_hw *hw);
+static void em_hw_control_release(struct e1000_hw *hw);
+static void em_init_manageability(struct e1000_hw *hw);
+static void em_release_manageability(struct e1000_hw *hw);
+
+static int eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+
+static int eth_em_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vlan_id, int on);
+static void eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static void em_vlan_hw_filter_enable(struct rte_eth_dev *dev);
+static void em_vlan_hw_filter_disable(struct rte_eth_dev *dev);
+static void em_vlan_hw_strip_enable(struct rte_eth_dev *dev);
+static void em_vlan_hw_strip_disable(struct rte_eth_dev *dev);
+
+/*
+static void eth_em_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vlan_id, int on);
+*/
+static int eth_em_led_on(struct rte_eth_dev *dev);
+static int eth_em_led_off(struct rte_eth_dev *dev);
+
+static void em_intr_disable(struct e1000_hw *hw);
+static int em_get_rx_buffer_size(struct e1000_hw *hw);
+static void eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint32_t index, uint32_t pool);
+static void eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index);
+
+#define EM_FC_PAUSE_TIME 0x0680
+#define EM_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */
+#define EM_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
+
+static enum e1000_fc_mode em_fc_setting = e1000_fc_full;
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static struct rte_pci_id pci_id_em_map[] = {
+
+#define RTE_PCI_DEV_ID_DECL_EM(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
+#include "rte_pci_dev_ids.h"
+
+{.device_id = 0},
+};
+
+static struct eth_dev_ops eth_em_ops = {
+ .dev_configure = eth_em_configure,
+ .dev_start = eth_em_start,
+ .dev_stop = eth_em_stop,
+ .dev_close = eth_em_close,
+ .promiscuous_enable = eth_em_promiscuous_enable,
+ .promiscuous_disable = eth_em_promiscuous_disable,
+ .allmulticast_enable = eth_em_allmulticast_enable,
+ .allmulticast_disable = eth_em_allmulticast_disable,
+ .link_update = eth_em_link_update,
+ .stats_get = eth_em_stats_get,
+ .stats_reset = eth_em_stats_reset,
+ .dev_infos_get = eth_em_infos_get,
+ .mtu_set = eth_em_mtu_set,
+ .vlan_filter_set = eth_em_vlan_filter_set,
+ .vlan_offload_set = eth_em_vlan_offload_set,
+ .rx_queue_setup = eth_em_rx_queue_setup,
+ .rx_queue_release = eth_em_rx_queue_release,
+ .rx_queue_count = eth_em_rx_queue_count,
+ .rx_descriptor_done = eth_em_rx_descriptor_done,
+ .tx_queue_setup = eth_em_tx_queue_setup,
+ .tx_queue_release = eth_em_tx_queue_release,
+ .dev_led_on = eth_em_led_on,
+ .dev_led_off = eth_em_led_off,
+ .flow_ctrl_get = eth_em_flow_ctrl_get,
+ .flow_ctrl_set = eth_em_flow_ctrl_set,
+ .mac_addr_add = eth_em_rar_set,
+ .mac_addr_remove = eth_em_rar_clear,
+};
+
+/**
+ * Atomically reads the link status information from global
+ * structure rte_eth_dev.
+ *
+ * @param dev
+ * - Pointer to the structure rte_eth_dev to read from.
+ * - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, negative value.
+ */
+static inline int
+rte_em_dev_atomic_read_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = link;
+ struct rte_eth_link *src = &(dev->data->dev_link);
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+/**
+ * Atomically writes the link status information into global
+ * structure rte_eth_dev.
+ *
+ * @param dev
+ * - Pointer to the structure rte_eth_dev to read from.
+ * - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, negative value.
+ */
+static inline int
+rte_em_dev_atomic_write_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = &(dev->data->dev_link);
+ struct rte_eth_link *src = link;
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+static int
+eth_em_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
+ struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev;
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct e1000_vfta * shadow_vfta =
+ E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
+
+ pci_dev = eth_dev->pci_dev;
+ eth_dev->dev_ops = &eth_em_ops;
+ eth_dev->rx_pkt_burst = (eth_rx_burst_t)&eth_em_recv_pkts;
+ eth_dev->tx_pkt_burst = (eth_tx_burst_t)&eth_em_xmit_pkts;
+
+ /* for secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX function */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+ if (eth_dev->data->scattered_rx)
+ eth_dev->rx_pkt_burst =
+ (eth_rx_burst_t)&eth_em_recv_scattered_pkts;
+ return 0;
+ }
+
+ hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+ hw->device_id = pci_dev->id.device_id;
+
+ /* For ICH8 support we'll need to map the flash memory BAR */
+
+ if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS ||
+ em_hw_init(hw) != 0) {
+ PMD_INIT_LOG(ERR, "port_id %d vendorID=0x%x deviceID=0x%x: "
+ "failed to init HW",
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
+ return -(ENODEV);
+ }
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("e1000", ETHER_ADDR_LEN *
+ hw->mac.rar_entry_count, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
+ "store MAC addresses",
+ ETHER_ADDR_LEN * hw->mac.rar_entry_count);
+ return -(ENOMEM);
+ }
+
+ /* Copy the permanent MAC address */
+ ether_addr_copy((struct ether_addr *) hw->mac.addr,
+ eth_dev->data->mac_addrs);
+
+ /* initialize the vfta */
+ memset(shadow_vfta, 0, sizeof(*shadow_vfta));
+
+ PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x",
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
+
+ rte_intr_callback_register(&(pci_dev->intr_handle),
+ eth_em_interrupt_handler, (void *)eth_dev);
+
+ return (0);
+}
+
+static struct eth_driver rte_em_pmd = {
+ {
+ .name = "rte_em_pmd",
+ .id_table = pci_id_em_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ },
+ .eth_dev_init = eth_em_dev_init,
+ .dev_private_size = sizeof(struct e1000_adapter),
+};
+
+static int
+rte_em_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
+{
+ rte_eth_driver_register(&rte_em_pmd);
+ return 0;
+}
+
+static int
+em_hw_init(struct e1000_hw *hw)
+{
+ int diag;
+
+ diag = hw->mac.ops.init_params(hw);
+ if (diag != 0) {
+ PMD_INIT_LOG(ERR, "MAC Initialization Error");
+ return diag;
+ }
+ diag = hw->nvm.ops.init_params(hw);
+ if (diag != 0) {
+ PMD_INIT_LOG(ERR, "NVM Initialization Error");
+ return diag;
+ }
+ diag = hw->phy.ops.init_params(hw);
+ if (diag != 0) {
+ PMD_INIT_LOG(ERR, "PHY Initialization Error");
+ return diag;
+ }
+ (void) e1000_get_bus_info(hw);
+
+ hw->mac.autoneg = 1;
+ hw->phy.autoneg_wait_to_complete = 0;
+ hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
+
+ e1000_init_script_state_82541(hw, TRUE);
+ e1000_set_tbi_compatibility_82543(hw, TRUE);
+
+ /* Copper options */
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ hw->phy.mdix = 0; /* AUTO_ALL_MODES */
+ hw->phy.disable_polarity_correction = 0;
+ hw->phy.ms_type = e1000_ms_hw_default;
+ }
+
+ /*
+ * Start from a known state, this is important in reading the nvm
+ * and mac from that.
+ */
+ e1000_reset_hw(hw);
+
+ /* Make sure we have a good EEPROM before we read from it */
+ if (e1000_validate_nvm_checksum(hw) < 0) {
+ /*
+ * Some PCI-E parts fail the first check due to
+ * the link being in sleep state, call it again,
+ * if it fails a second time its a real issue.
+ */
+ diag = e1000_validate_nvm_checksum(hw);
+ if (diag < 0) {
+ PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
+ goto error;
+ }
+ }
+
+ /* Read the permanent MAC address out of the EEPROM */
+ diag = e1000_read_mac_addr(hw);
+ if (diag != 0) {
+ PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
+ goto error;
+ }
+
+ /* Now initialize the hardware */
+ diag = em_hardware_init(hw);
+ if (diag != 0) {
+ PMD_INIT_LOG(ERR, "Hardware initialization failed");
+ goto error;
+ }
+
+ hw->mac.get_link_status = 1;
+
+ /* Indicate SOL/IDER usage */
+ diag = e1000_check_reset_block(hw);
+ if (diag < 0) {
+ PMD_INIT_LOG(ERR, "PHY reset is blocked due to "
+ "SOL/IDER session");
+ }
+ return (0);
+
+error:
+ em_hw_control_release(hw);
+ return (diag);
+}
+
+static int
+eth_em_configure(struct rte_eth_dev *dev)
+{
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+ intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
+ PMD_INIT_FUNC_TRACE();
+
+ return (0);
+}
+
+static void
+em_set_pba(struct e1000_hw *hw)
+{
+ uint32_t pba;
+
+ /*
+ * Packet Buffer Allocation (PBA)
+ * Writing PBA sets the receive portion of the buffer
+ * the remainder is used for the transmit buffer.
+ * Devices before the 82547 had a Packet Buffer of 64K.
+ * After the 82547 the buffer was reduced to 40K.
+ */
+ switch (hw->mac.type) {
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ /* 82547: Total Packet Buffer is 40K */
+ pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
+ break;
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_80003es2lan:
+ pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
+ break;
+ case e1000_82573: /* 82573: Total Packet Buffer is 32K */
+ pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
+ break;
+ case e1000_ich8lan:
+ pba = E1000_PBA_8K;
+ break;
+ case e1000_ich9lan:
+ case e1000_ich10lan:
+ pba = E1000_PBA_10K;
+ break;
+ case e1000_pchlan:
+ case e1000_pch2lan:
+ pba = E1000_PBA_26K;
+ break;
+ default:
+ pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
+ }
+
+ E1000_WRITE_REG(hw, E1000_PBA, pba);
+}
+
+static int
+eth_em_start(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret, mask;
+
+ PMD_INIT_FUNC_TRACE();
+
+ eth_em_stop(dev);
+
+ e1000_power_up_phy(hw);
+
+ /* Set default PBA value */
+ em_set_pba(hw);
+
+ /* Put the address into the Receive Address Array */
+ e1000_rar_set(hw, hw->mac.addr, 0);
+
+ /*
+ * With the 82571 adapter, RAR[0] may be overwritten
+ * when the other port is reset, we make a duplicate
+ * in RAR[14] for that eventuality, this assures
+ * the interface continues to function.
+ */
+ if (hw->mac.type == e1000_82571) {
+ e1000_set_laa_state_82571(hw, TRUE);
+ e1000_rar_set(hw, hw->mac.addr, E1000_RAR_ENTRIES - 1);
+ }
+
+ /* Initialize the hardware */
+ if (em_hardware_init(hw)) {
+ PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
+ return (-EIO);
+ }
+
+ E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN);
+
+ /* Configure for OS presence */
+ em_init_manageability(hw);
+
+ eth_em_tx_init(dev);
+
+ ret = eth_em_rx_init(dev);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
+ em_dev_clear_queues(dev);
+ return ret;
+ }
+
+ e1000_clear_hw_cntrs_base_generic(hw);
+
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
+ ETH_VLAN_EXTEND_MASK;
+ eth_em_vlan_offload_set(dev, mask);
+
+ /* Set Interrupt Throttling Rate to maximum allowed value. */
+ E1000_WRITE_REG(hw, E1000_ITR, UINT16_MAX);
+
+ /* Setup link speed and duplex */
+ switch (dev->data->dev_conf.link_speed) {
+ case ETH_LINK_SPEED_AUTONEG:
+ if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
+ hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
+ else if (dev->data->dev_conf.link_duplex ==
+ ETH_LINK_HALF_DUPLEX)
+ hw->phy.autoneg_advertised = E1000_ALL_HALF_DUPLEX;
+ else if (dev->data->dev_conf.link_duplex ==
+ ETH_LINK_FULL_DUPLEX)
+ hw->phy.autoneg_advertised = E1000_ALL_FULL_DUPLEX;
+ else
+ goto error_invalid_config;
+ break;
+ case ETH_LINK_SPEED_10:
+ if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
+ hw->phy.autoneg_advertised = E1000_ALL_10_SPEED;
+ else if (dev->data->dev_conf.link_duplex ==
+ ETH_LINK_HALF_DUPLEX)
+ hw->phy.autoneg_advertised = ADVERTISE_10_HALF;
+ else if (dev->data->dev_conf.link_duplex ==
+ ETH_LINK_FULL_DUPLEX)
+ hw->phy.autoneg_advertised = ADVERTISE_10_FULL;
+ else
+ goto error_invalid_config;
+ break;
+ case ETH_LINK_SPEED_100:
+ if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
+ hw->phy.autoneg_advertised = E1000_ALL_100_SPEED;
+ else if (dev->data->dev_conf.link_duplex ==
+ ETH_LINK_HALF_DUPLEX)
+ hw->phy.autoneg_advertised = ADVERTISE_100_HALF;
+ else if (dev->data->dev_conf.link_duplex ==
+ ETH_LINK_FULL_DUPLEX)
+ hw->phy.autoneg_advertised = ADVERTISE_100_FULL;
+ else
+ goto error_invalid_config;
+ break;
+ case ETH_LINK_SPEED_1000:
+ if ((dev->data->dev_conf.link_duplex ==
+ ETH_LINK_AUTONEG_DUPLEX) ||
+ (dev->data->dev_conf.link_duplex ==
+ ETH_LINK_FULL_DUPLEX))
+ hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
+ else
+ goto error_invalid_config;
+ break;
+ case ETH_LINK_SPEED_10000:
+ default:
+ goto error_invalid_config;
+ }
+ e1000_setup_link(hw);
+
+ /* check if lsc interrupt feature is enabled */
+ if (dev->data->dev_conf.intr_conf.lsc != 0) {
+ ret = eth_em_interrupt_setup(dev);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Unable to setup interrupts");
+ em_dev_clear_queues(dev);
+ return ret;
+ }
+ }
+
+ PMD_INIT_LOG(DEBUG, "<<");
+
+ return (0);
+
+error_invalid_config:
+ PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u",
+ dev->data->dev_conf.link_speed,
+ dev->data->dev_conf.link_duplex, dev->data->port_id);
+ em_dev_clear_queues(dev);
+ return (-EINVAL);
+}
+
+/*********************************************************************
+ *
+ * This routine disables all traffic on the adapter by issuing a
+ * global reset on the MAC.
+ *
+ **********************************************************************/
+static void
+eth_em_stop(struct rte_eth_dev *dev)
+{
+ struct rte_eth_link link;
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ em_intr_disable(hw);
+ e1000_reset_hw(hw);
+ if (hw->mac.type >= e1000_82544)
+ E1000_WRITE_REG(hw, E1000_WUC, 0);
+
+ /* Power down the phy. Needed to make the link go down */
+ e1000_power_down_phy(hw);
+
+ em_dev_clear_queues(dev);
+
+ /* clear the recorded link status */
+ memset(&link, 0, sizeof(link));
+ rte_em_dev_atomic_write_link_status(dev, &link);
+}
+
+static void
+eth_em_close(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ eth_em_stop(dev);
+ e1000_phy_hw_reset(hw);
+ em_release_manageability(hw);
+ em_hw_control_release(hw);
+}
+
+static int
+em_get_rx_buffer_size(struct e1000_hw *hw)
+{
+ uint32_t rx_buf_size;
+
+ rx_buf_size = ((E1000_READ_REG(hw, E1000_PBA) & UINT16_MAX) << 10);
+ return rx_buf_size;
+}
+
+/*********************************************************************
+ *
+ * Initialize the hardware
+ *
+ **********************************************************************/
+static int
+em_hardware_init(struct e1000_hw *hw)
+{
+ uint32_t rx_buf_size;
+ int diag;
+
+ /* Issue a global reset */
+ e1000_reset_hw(hw);
+
+ /* Let the firmware know the OS is in control */
+ em_hw_control_acquire(hw);
+
+ /*
+ * These parameters control the automatic generation (Tx) and
+ * response (Rx) to Ethernet PAUSE frames.
+ * - High water mark should allow for at least two standard size (1518)
+ * frames to be received after sending an XOFF.
+ * - Low water mark works best when it is very near the high water mark.
+ * This allows the receiver to restart by sending XON when it has
+ * drained a bit. Here we use an arbitrary value of 1500 which will
+ * restart after one full frame is pulled from the buffer. There
+ * could be several smaller frames in the buffer and if so they will
+ * not trigger the XON until their total number reduces the buffer
+ * by 1500.
+ * - The pause time is fairly large at 1000 x 512ns = 512 usec.
+ */
+ rx_buf_size = em_get_rx_buffer_size(hw);
+
+ hw->fc.high_water = rx_buf_size - PMD_ROUNDUP(ETHER_MAX_LEN * 2, 1024);
+ hw->fc.low_water = hw->fc.high_water - 1500;
+
+ if (hw->mac.type == e1000_80003es2lan)
+ hw->fc.pause_time = UINT16_MAX;
+ else
+ hw->fc.pause_time = EM_FC_PAUSE_TIME;
+
+ hw->fc.send_xon = 1;
+
+ /* Set Flow control, use the tunable location if sane */
+ if (em_fc_setting <= e1000_fc_full)
+ hw->fc.requested_mode = em_fc_setting;
+ else
+ hw->fc.requested_mode = e1000_fc_none;
+
+ /* Workaround: no TX flow ctrl for PCH */
+ if (hw->mac.type == e1000_pchlan)
+ hw->fc.requested_mode = e1000_fc_rx_pause;
+
+ /* Override - settings for PCH2LAN, ya its magic :) */
+ if (hw->mac.type == e1000_pch2lan) {
+ hw->fc.high_water = 0x5C20;
+ hw->fc.low_water = 0x5048;
+ hw->fc.pause_time = 0x0650;
+ hw->fc.refresh_time = 0x0400;
+ }
+
+ diag = e1000_init_hw(hw);
+ if (diag < 0)
+ return (diag);
+ e1000_check_for_link(hw);
+ return (0);
+}
+
+/* This function is based on em_update_stats_counters() in e1000/if_em.c */
+static void
+eth_em_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_hw_stats *stats =
+ E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ int pause_frames;
+
+ if(hw->phy.media_type == e1000_media_type_copper ||
+ (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
+ stats->symerrs += E1000_READ_REG(hw,E1000_SYMERRS);
+ stats->sec += E1000_READ_REG(hw, E1000_SEC);
+ }
+
+ stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
+ stats->mpc += E1000_READ_REG(hw, E1000_MPC);
+ stats->scc += E1000_READ_REG(hw, E1000_SCC);
+ stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
+
+ stats->mcc += E1000_READ_REG(hw, E1000_MCC);
+ stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
+ stats->colc += E1000_READ_REG(hw, E1000_COLC);
+ stats->dc += E1000_READ_REG(hw, E1000_DC);
+ stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
+ stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
+ stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
+
+ /*
+ * For watchdog management we need to know if we have been
+ * paused during the last interval, so capture that here.
+ */
+ pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
+ stats->xoffrxc += pause_frames;
+ stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
+ stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
+ stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
+ stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
+ stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
+ stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
+ stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
+ stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
+ stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
+ stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
+ stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
+ stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
+
+ /*
+ * For the 64-bit byte counters the low dword must be read first.
+ * Both registers clear on the read of the high dword.
+ */
+
+ stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
+ stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
+ stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
+ stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
+
+ stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
+ stats->ruc += E1000_READ_REG(hw, E1000_RUC);
+ stats->rfc += E1000_READ_REG(hw, E1000_RFC);
+ stats->roc += E1000_READ_REG(hw, E1000_ROC);
+ stats->rjc += E1000_READ_REG(hw, E1000_RJC);
+
+ stats->tor += E1000_READ_REG(hw, E1000_TORH);
+ stats->tot += E1000_READ_REG(hw, E1000_TOTH);
+
+ stats->tpr += E1000_READ_REG(hw, E1000_TPR);
+ stats->tpt += E1000_READ_REG(hw, E1000_TPT);
+ stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
+ stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
+ stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
+ stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
+ stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
+ stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
+ stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
+ stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
+
+ /* Interrupt Counts */
+
+ if (hw->mac.type >= e1000_82571) {
+ stats->iac += E1000_READ_REG(hw, E1000_IAC);
+ stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
+ stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
+ stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
+ stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
+ stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
+ stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
+ stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
+ stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
+ }
+
+ if (hw->mac.type >= e1000_82543) {
+ stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
+ stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
+ stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
+ stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
+ stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
+ stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
+ }
+
+ if (rte_stats == NULL)
+ return;
+
+ /* Rx Errors */
+ rte_stats->ibadcrc = stats->crcerrs;
+ rte_stats->ibadlen = stats->rlec + stats->ruc + stats->roc;
+ rte_stats->imissed = stats->mpc;
+ rte_stats->ierrors = rte_stats->ibadcrc +
+ rte_stats->ibadlen +
+ rte_stats->imissed +
+ stats->rxerrc + stats->algnerrc + stats->cexterr;
+
+ /* Tx Errors */
+ rte_stats->oerrors = stats->ecol + stats->latecol;
+
+ rte_stats->ipackets = stats->gprc;
+ rte_stats->opackets = stats->gptc;
+ rte_stats->ibytes = stats->gorc;
+ rte_stats->obytes = stats->gotc;
+
+ /* XON/XOFF pause frames stats registers */
+ rte_stats->tx_pause_xon = stats->xontxc;
+ rte_stats->rx_pause_xon = stats->xonrxc;
+ rte_stats->tx_pause_xoff = stats->xofftxc;
+ rte_stats->rx_pause_xoff = stats->xoffrxc;
+}
+
+static void
+eth_em_stats_reset(struct rte_eth_dev *dev)
+{
+ struct e1000_hw_stats *hw_stats =
+ E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ /* HW registers are cleared on read */
+ eth_em_stats_get(dev, NULL);
+
+ /* Reset software totals */
+ memset(hw_stats, 0, sizeof(*hw_stats));
+}
+
+static uint32_t
+em_get_max_pktlen(const struct e1000_hw *hw)
+{
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_ich9lan:
+ case e1000_ich10lan:
+ case e1000_pch2lan:
+ case e1000_82574:
+ case e1000_80003es2lan: /* 9K Jumbo Frame size */
+ return (0x2412);
+ case e1000_pchlan:
+ return (0x1000);
+ /* Adapters that do not support jumbo frames */
+ case e1000_82583:
+ case e1000_ich8lan:
+ return (ETHER_MAX_LEN);
+ default:
+ return (MAX_JUMBO_FRAME_SIZE);
+ }
+}
+
+static void
+eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
+ dev_info->max_rx_pktlen = em_get_max_pktlen(hw);
+ dev_info->max_mac_addrs = hw->mac.rar_entry_count;
+
+ /*
+ * Starting with 631xESB hw supports 2 TX/RX queues per port.
+ * Unfortunatelly, all these nics have just one TX context.
+ * So we have few choises for TX:
+ * - Use just one TX queue.
+ * - Allow cksum offload only for one TX queue.
+ * - Don't allow TX cksum offload at all.
+ * For now, option #1 was chosen.
+ * To use second RX queue we have to use extended RX descriptor
+ * (Multiple Receive Queues are mutually exclusive with UDP
+ * fragmentation and are not supported when a legacy receive
+ * descriptor format is used).
+ * Which means separate RX routinies - as legacy nics (82540, 82545)
+ * don't support extended RXD.
+ * To avoid it we support just one RX queue for now (no RSS).
+ */
+
+ dev_info->max_rx_queues = 1;
+ dev_info->max_tx_queues = 1;
+}
+
+/* return 0 means link status changed, -1 means not changed */
+static int
+eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_link link, old;
+ int link_check, count;
+
+ link_check = 0;
+ hw->mac.get_link_status = 1;
+
+ /* possible wait-to-complete in up to 9 seconds */
+ for (count = 0; count < EM_LINK_UPDATE_CHECK_TIMEOUT; count ++) {
+ /* Read the real link status */
+ switch (hw->phy.media_type) {
+ case e1000_media_type_copper:
+ /* Do the work to read phy */
+ e1000_check_for_link(hw);
+ link_check = !hw->mac.get_link_status;
+ break;
+
+ case e1000_media_type_fiber:
+ e1000_check_for_link(hw);
+ link_check = (E1000_READ_REG(hw, E1000_STATUS) &
+ E1000_STATUS_LU);
+ break;
+
+ case e1000_media_type_internal_serdes:
+ e1000_check_for_link(hw);
+ link_check = hw->mac.serdes_has_link;
+ break;
+
+ default:
+ break;
+ }
+ if (link_check || wait_to_complete == 0)
+ break;
+ rte_delay_ms(EM_LINK_UPDATE_CHECK_INTERVAL);
+ }
+ memset(&link, 0, sizeof(link));
+ rte_em_dev_atomic_read_link_status(dev, &link);
+ old = link;
+
+ /* Now we check if a transition has happened */
+ if (link_check && (link.link_status == 0)) {
+ hw->mac.ops.get_link_up_info(hw, &link.link_speed,
+ &link.link_duplex);
+ link.link_status = 1;
+ } else if (!link_check && (link.link_status == 1)) {
+ link.link_speed = 0;
+ link.link_duplex = 0;
+ link.link_status = 0;
+ }
+ rte_em_dev_atomic_write_link_status(dev, &link);
+
+ /* not changed */
+ if (old.link_status == link.link_status)
+ return -1;
+
+ /* changed */
+ return 0;
+}
+
+/*
+ * em_hw_control_acquire sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means
+ * that the driver is loaded. For AMT version type f/w
+ * this means that the network i/f is open.
+ */
+static void
+em_hw_control_acquire(struct e1000_hw *hw)
+{
+ uint32_t ctrl_ext, swsm;
+
+ /* Let firmware know the driver has taken over */
+ if (hw->mac.type == e1000_82573) {
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD);
+
+ } else {
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT,
+ ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+ }
+}
+
+/*
+ * em_hw_control_release resets {CTRL_EXTT|FWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded. For AMT versions of the
+ * f/w this means that the network i/f is closed.
+ */
+static void
+em_hw_control_release(struct e1000_hw *hw)
+{
+ uint32_t ctrl_ext, swsm;
+
+ /* Let firmware taken over control of h/w */
+ if (hw->mac.type == e1000_82573) {
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
+ } else {
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT,
+ ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+ }
+}
+
+/*
+ * Bit of a misnomer, what this really means is
+ * to enable OS management of the system... aka
+ * to disable special hardware management features.
+ */
+static void
+em_init_manageability(struct e1000_hw *hw)
+{
+ if (e1000_enable_mng_pass_thru(hw)) {
+ uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H);
+ uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
+
+ /* disable hardware interception of ARP */
+ manc &= ~(E1000_MANC_ARP_EN);
+
+ /* enable receiving management packets to the host */
+ manc |= E1000_MANC_EN_MNG2HOST;
+ manc2h |= 1 << 5; /* Mng Port 623 */
+ manc2h |= 1 << 6; /* Mng Port 664 */
+ E1000_WRITE_REG(hw, E1000_MANC2H, manc2h);
+ E1000_WRITE_REG(hw, E1000_MANC, manc);
+ }
+}
+
+/*
+ * Give control back to hardware management
+ * controller if there is one.
+ */
+static void
+em_release_manageability(struct e1000_hw *hw)
+{
+ uint32_t manc;
+
+ if (e1000_enable_mng_pass_thru(hw)) {
+ manc = E1000_READ_REG(hw, E1000_MANC);
+
+ /* re-enable hardware interception of ARP */
+ manc |= E1000_MANC_ARP_EN;
+ manc &= ~E1000_MANC_EN_MNG2HOST;
+
+ E1000_WRITE_REG(hw, E1000_MANC, manc);
+ }
+}
+
+static void
+eth_em_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t rctl;
+
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+static void
+eth_em_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t rctl;
+
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_SBP);
+ if (dev->data->all_multicast == 1)
+ rctl |= E1000_RCTL_MPE;
+ else
+ rctl &= (~E1000_RCTL_MPE);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+static void
+eth_em_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t rctl;
+
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl |= E1000_RCTL_MPE;
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+static void
+eth_em_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t rctl;
+
+ if (dev->data->promiscuous == 1)
+ return; /* must remain in all_multicast mode */
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl &= (~E1000_RCTL_MPE);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+static int
+eth_em_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vfta * shadow_vfta =
+ E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+ uint32_t vfta;
+ uint32_t vid_idx;
+ uint32_t vid_bit;
+
+ vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) &
+ E1000_VFTA_ENTRY_MASK);
+ vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
+ vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
+ if (on)
+ vfta |= vid_bit;
+ else
+ vfta &= ~vid_bit;
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
+
+ /* update local VFTA copy */
+ shadow_vfta->vfta[vid_idx] = vfta;
+
+ return 0;
+}
+
+static void
+em_vlan_hw_filter_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+
+ /* Filter Table Disable */
+ reg = E1000_READ_REG(hw, E1000_RCTL);
+ reg &= ~E1000_RCTL_CFIEN;
+ reg &= ~E1000_RCTL_VFE;
+ E1000_WRITE_REG(hw, E1000_RCTL, reg);
+}
+
+static void
+em_vlan_hw_filter_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vfta * shadow_vfta =
+ E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+ uint32_t reg;
+ int i;
+
+ /* Filter Table Enable, CFI not used for packet acceptance */
+ reg = E1000_READ_REG(hw, E1000_RCTL);
+ reg &= ~E1000_RCTL_CFIEN;
+ reg |= E1000_RCTL_VFE;
+ E1000_WRITE_REG(hw, E1000_RCTL, reg);
+
+ /* restore vfta from local copy */
+ for (i = 0; i < IGB_VFTA_SIZE; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]);
+}
+
+static void
+em_vlan_hw_strip_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+
+ /* VLAN Mode Disable */
+ reg = E1000_READ_REG(hw, E1000_CTRL);
+ reg &= ~E1000_CTRL_VME;
+ E1000_WRITE_REG(hw, E1000_CTRL, reg);
+
+}
+
+static void
+em_vlan_hw_strip_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+
+ /* VLAN Mode Enable */
+ reg = E1000_READ_REG(hw, E1000_CTRL);
+ reg |= E1000_CTRL_VME;
+ E1000_WRITE_REG(hw, E1000_CTRL, reg);
+}
+
+static void
+eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ if(mask & ETH_VLAN_STRIP_MASK){
+ if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ em_vlan_hw_strip_enable(dev);
+ else
+ em_vlan_hw_strip_disable(dev);
+ }
+
+ if(mask & ETH_VLAN_FILTER_MASK){
+ if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ em_vlan_hw_filter_enable(dev);
+ else
+ em_vlan_hw_filter_disable(dev);
+ }
+}
+
+static void
+em_intr_disable(struct e1000_hw *hw)
+{
+ E1000_WRITE_REG(hw, E1000_IMC, ~0);
+}
+
+/**
+ * It enables the interrupt mask and then enable the interrupt.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_em_interrupt_setup(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ E1000_WRITE_REG(hw, E1000_IMS, E1000_ICR_LSC);
+ rte_intr_enable(&(dev->pci_dev->intr_handle));
+ return (0);
+}
+
+/*
+ * It reads ICR and gets interrupt causes, check it and set a bit flag
+ * to update link status.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_em_interrupt_get_status(struct rte_eth_dev *dev)
+{
+ uint32_t icr;
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ /* read-on-clear nic registers here */
+ icr = E1000_READ_REG(hw, E1000_ICR);
+ if (icr & E1000_ICR_LSC) {
+ intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
+ }
+
+ return 0;
+}
+
+/*
+ * It executes link_update after knowing an interrupt is prsent.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_em_interrupt_action(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ uint32_t tctl, rctl;
+ struct rte_eth_link link;
+ int ret;
+
+ if (!(intr->flags & E1000_FLAG_NEED_LINK_UPDATE))
+ return -1;
+
+ intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
+ rte_intr_enable(&(dev->pci_dev->intr_handle));
+
+ /* set get_link_status to check register later */
+ hw->mac.get_link_status = 1;
+ ret = eth_em_link_update(dev, 0);
+
+ /* check if link has changed */
+ if (ret < 0)
+ return 0;
+
+ memset(&link, 0, sizeof(link));
+ rte_em_dev_atomic_read_link_status(dev, &link);
+ if (link.link_status) {
+ PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s",
+ dev->data->port_id, (unsigned)link.link_speed,
+ link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+ "full-duplex" : "half-duplex");
+ } else {
+ PMD_INIT_LOG(INFO, " Port %d: Link Down", dev->data->port_id);
+ }
+ PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
+ dev->pci_dev->addr.domain, dev->pci_dev->addr.bus,
+ dev->pci_dev->addr.devid, dev->pci_dev->addr.function);
+ tctl = E1000_READ_REG(hw, E1000_TCTL);
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ if (link.link_status) {
+ /* enable Tx/Rx */
+ tctl |= E1000_TCTL_EN;
+ rctl |= E1000_RCTL_EN;
+ } else {
+ /* disable Tx/Rx */
+ tctl &= ~E1000_TCTL_EN;
+ rctl &= ~E1000_RCTL_EN;
+ }
+ E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ E1000_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+/**
+ * Interrupt handler which shall be registered at first.
+ *
+ * @param handle
+ * Pointer to interrupt handle.
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ * void
+ */
+static void
+eth_em_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
+ void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+
+ eth_em_interrupt_get_status(dev);
+ eth_em_interrupt_action(dev);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+}
+
+static int
+eth_em_led_on(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ return (e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
+}
+
+static int
+eth_em_led_off(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ return (e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
+}
+
+static int
+eth_em_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct e1000_hw *hw;
+ uint32_t ctrl;
+ int tx_pause;
+ int rx_pause;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ fc_conf->pause_time = hw->fc.pause_time;
+ fc_conf->high_water = hw->fc.high_water;
+ fc_conf->low_water = hw->fc.low_water;
+ fc_conf->send_xon = hw->fc.send_xon;
+ fc_conf->autoneg = hw->mac.autoneg;
+
+ /*
+ * Return rx_pause and tx_pause status according to actual setting of
+ * the TFCE and RFCE bits in the CTRL register.
+ */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ if (ctrl & E1000_CTRL_TFCE)
+ tx_pause = 1;
+ else
+ tx_pause = 0;
+
+ if (ctrl & E1000_CTRL_RFCE)
+ rx_pause = 1;
+ else
+ rx_pause = 0;
+
+ if (rx_pause && tx_pause)
+ fc_conf->mode = RTE_FC_FULL;
+ else if (rx_pause)
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ else if (tx_pause)
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ else
+ fc_conf->mode = RTE_FC_NONE;
+
+ return 0;
+}
+
+static int
+eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct e1000_hw *hw;
+ int err;
+ enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = {
+ e1000_fc_none,
+ e1000_fc_rx_pause,
+ e1000_fc_tx_pause,
+ e1000_fc_full
+ };
+ uint32_t rx_buf_size;
+ uint32_t max_high_water;
+ uint32_t rctl;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (fc_conf->autoneg != hw->mac.autoneg)
+ return -ENOTSUP;
+ rx_buf_size = em_get_rx_buffer_size(hw);
+ PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
+
+ /* At least reserve one Ethernet frame for watermark */
+ max_high_water = rx_buf_size - ETHER_MAX_LEN;
+ if ((fc_conf->high_water > max_high_water) ||
+ (fc_conf->high_water < fc_conf->low_water)) {
+ PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
+ PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water);
+ return (-EINVAL);
+ }
+
+ hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
+ hw->fc.pause_time = fc_conf->pause_time;
+ hw->fc.high_water = fc_conf->high_water;
+ hw->fc.low_water = fc_conf->low_water;
+ hw->fc.send_xon = fc_conf->send_xon;
+
+ err = e1000_setup_link_generic(hw);
+ if (err == E1000_SUCCESS) {
+
+ /* check if we want to forward MAC frames - driver doesn't have native
+ * capability to do that, so we'll write the registers ourselves */
+
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+
+ /* set or clear MFLCN.PMCF bit depending on configuration */
+ if (fc_conf->mac_ctrl_frame_fwd != 0)
+ rctl |= E1000_RCTL_PMCF;
+ else
+ rctl &= ~E1000_RCTL_PMCF;
+
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ E1000_WRITE_FLUSH(hw);
+
+ return 0;
+ }
+
+ PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
+ return (-EIO);
+}
+
+static void
+eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint32_t index, __rte_unused uint32_t pool)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ e1000_rar_set(hw, mac_addr->addr_bytes, index);
+}
+
+static void
+eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index)
+{
+ uint8_t addr[ETHER_ADDR_LEN];
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ memset(addr, 0, sizeof(addr));
+
+ e1000_rar_set(hw, addr, index);
+}
+
+static int
+eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct rte_eth_dev_info dev_info;
+ struct e1000_hw *hw;
+ uint32_t frame_size;
+ uint32_t rctl;
+
+ eth_em_infos_get(dev, &dev_info);
+ frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE;
+
+ /* check that mtu is within the allowed range */
+ if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
+ return -EINVAL;
+
+ /* refuse mtu that requires the support of scattered packets when this
+ * feature has not been enabled before. */
+ if (!dev->data->scattered_rx &&
+ frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
+ return -EINVAL;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+
+ /* switch to jumbo mode if needed */
+ if (frame_size > ETHER_MAX_LEN) {
+ dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ rctl |= E1000_RCTL_LPE;
+ } else {
+ dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ rctl &= ~E1000_RCTL_LPE;
+ }
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+
+ /* update max frame size */
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+ return 0;
+}
+
+struct rte_driver em_pmd_drv = {
+ .type = PMD_PDEV,
+ .init = rte_em_pmd_init,
+};
+
+PMD_REGISTER_DRIVER(em_pmd_drv);
diff --git a/src/dpdk_lib18/librte_pmd_e1000/em_rxtx.c b/src/dpdk_lib18/librte_pmd_e1000/em_rxtx.c
new file mode 100755
index 00000000..aa0b88c1
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/em_rxtx.c
@@ -0,0 +1,1867 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <inttypes.h>
+
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_prefetch.h>
+#include <rte_ip.h>
+#include <rte_udp.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_string_fns.h>
+
+#include "e1000_logs.h"
+#include "e1000/e1000_api.h"
+#include "e1000_ethdev.h"
+#include "e1000/e1000_osdep.h"
+
+#define E1000_TXD_VLAN_SHIFT 16
+
+#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */
+
+static inline struct rte_mbuf *
+rte_rxmbuf_alloc(struct rte_mempool *mp)
+{
+ struct rte_mbuf *m;
+
+ m = __rte_mbuf_raw_alloc(mp);
+ __rte_mbuf_sanity_check_raw(m, 0);
+ return (m);
+}
+
+#define RTE_MBUF_DATA_DMA_ADDR(mb) \
+ (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
+
+#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
+ (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
+
+/**
+ * Structure associated with each descriptor of the RX ring of a RX queue.
+ */
+struct em_rx_entry {
+ struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
+};
+
+/**
+ * Structure associated with each descriptor of the TX ring of a TX queue.
+ */
+struct em_tx_entry {
+ struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
+ uint16_t next_id; /**< Index of next descriptor in ring. */
+ uint16_t last_id; /**< Index of last scattered descriptor. */
+};
+
+/**
+ * Structure associated with each RX queue.
+ */
+struct em_rx_queue {
+ struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
+ volatile struct e1000_rx_desc *rx_ring; /**< RX ring virtual address. */
+ uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
+ volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
+ volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
+ struct em_rx_entry *sw_ring; /**< address of RX software ring. */
+ struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
+ struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
+ uint16_t nb_rx_desc; /**< number of RX descriptors. */
+ uint16_t rx_tail; /**< current value of RDT register. */
+ uint16_t nb_rx_hold; /**< number of held free RX desc. */
+ uint16_t rx_free_thresh; /**< max free RX desc to hold. */
+ uint16_t queue_id; /**< RX queue index. */
+ uint8_t port_id; /**< Device port identifier. */
+ uint8_t pthresh; /**< Prefetch threshold register. */
+ uint8_t hthresh; /**< Host threshold register. */
+ uint8_t wthresh; /**< Write-back threshold register. */
+ uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
+};
+
+/**
+ * Hardware context number
+ */
+enum {
+ EM_CTX_0 = 0, /**< CTX0 */
+ EM_CTX_NUM = 1, /**< CTX NUM */
+};
+
+/** Offload features */
+union em_vlan_macip {
+ uint32_t data;
+ struct {
+ uint16_t l3_len:9; /**< L3 (IP) Header Length. */
+ uint16_t l2_len:7; /**< L2 (MAC) Header Length. */
+ uint16_t vlan_tci;
+ /**< VLAN Tag Control Identifier (CPU order). */
+ } f;
+};
+
+/*
+ * Compare mask for vlan_macip_len.data,
+ * should be in sync with em_vlan_macip.f layout.
+ * */
+#define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
+#define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
+#define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
+/** MAC+IP length. */
+#define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
+
+/**
+ * Structure to check if new context need be built
+ */
+struct em_ctx_info {
+ uint64_t flags; /**< ol_flags related to context build. */
+ uint32_t cmp_mask; /**< compare mask */
+ union em_vlan_macip hdrlen; /**< L2 and L3 header lenghts */
+};
+
+/**
+ * Structure associated with each TX queue.
+ */
+struct em_tx_queue {
+ volatile struct e1000_data_desc *tx_ring; /**< TX ring address */
+ uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
+ struct em_tx_entry *sw_ring; /**< virtual address of SW ring. */
+ volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
+ uint16_t nb_tx_desc; /**< number of TX descriptors. */
+ uint16_t tx_tail; /**< Current value of TDT register. */
+ uint16_t tx_free_thresh;/**< minimum TX before freeing. */
+ /**< Number of TX descriptors to use before RS bit is set. */
+ uint16_t tx_rs_thresh;
+ /** Number of TX descriptors used since RS bit was set. */
+ uint16_t nb_tx_used;
+ /** Index to last TX descriptor to have been cleaned. */
+ uint16_t last_desc_cleaned;
+ /** Total number of TX descriptors ready to be allocated. */
+ uint16_t nb_tx_free;
+ uint16_t queue_id; /**< TX queue index. */
+ uint8_t port_id; /**< Device port identifier. */
+ uint8_t pthresh; /**< Prefetch threshold register. */
+ uint8_t hthresh; /**< Host threshold register. */
+ uint8_t wthresh; /**< Write-back threshold register. */
+ struct em_ctx_info ctx_cache;
+ /**< Hardware context history.*/
+};
+
+#if 1
+#define RTE_PMD_USE_PREFETCH
+#endif
+
+#ifdef RTE_PMD_USE_PREFETCH
+#define rte_em_prefetch(p) rte_prefetch0(p)
+#else
+#define rte_em_prefetch(p) do {} while(0)
+#endif
+
+#ifdef RTE_PMD_PACKET_PREFETCH
+#define rte_packet_prefetch(p) rte_prefetch1(p)
+#else
+#define rte_packet_prefetch(p) do {} while(0)
+#endif
+
+#ifndef DEFAULT_TX_FREE_THRESH
+#define DEFAULT_TX_FREE_THRESH 32
+#endif /* DEFAULT_TX_FREE_THRESH */
+
+#ifndef DEFAULT_TX_RS_THRESH
+#define DEFAULT_TX_RS_THRESH 32
+#endif /* DEFAULT_TX_RS_THRESH */
+
+
+/*********************************************************************
+ *
+ * TX function
+ *
+ **********************************************************************/
+
+/*
+ * Populates TX context descriptor.
+ */
+static inline void
+em_set_xmit_ctx(struct em_tx_queue* txq,
+ volatile struct e1000_context_desc *ctx_txd,
+ uint64_t flags,
+ union em_vlan_macip hdrlen)
+{
+ uint32_t cmp_mask, cmd_len;
+ uint16_t ipcse, l2len;
+ struct e1000_context_desc ctx;
+
+ cmp_mask = 0;
+ cmd_len = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_C;
+
+ l2len = hdrlen.f.l2_len;
+ ipcse = (uint16_t)(l2len + hdrlen.f.l3_len);
+
+ /* setup IPCS* fields */
+ ctx.lower_setup.ip_fields.ipcss = (uint8_t)l2len;
+ ctx.lower_setup.ip_fields.ipcso = (uint8_t)(l2len +
+ offsetof(struct ipv4_hdr, hdr_checksum));
+
+ /*
+ * When doing checksum or TCP segmentation with IPv6 headers,
+ * IPCSE field should be set t0 0.
+ */
+ if (flags & PKT_TX_IP_CKSUM) {
+ ctx.lower_setup.ip_fields.ipcse =
+ (uint16_t)rte_cpu_to_le_16(ipcse - 1);
+ cmd_len |= E1000_TXD_CMD_IP;
+ cmp_mask |= TX_MACIP_LEN_CMP_MASK;
+ } else {
+ ctx.lower_setup.ip_fields.ipcse = 0;
+ }
+
+ /* setup TUCS* fields */
+ ctx.upper_setup.tcp_fields.tucss = (uint8_t)ipcse;
+ ctx.upper_setup.tcp_fields.tucse = 0;
+
+ switch (flags & PKT_TX_L4_MASK) {
+ case PKT_TX_UDP_CKSUM:
+ ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse +
+ offsetof(struct udp_hdr, dgram_cksum));
+ cmp_mask |= TX_MACIP_LEN_CMP_MASK;
+ break;
+ case PKT_TX_TCP_CKSUM:
+ ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse +
+ offsetof(struct tcp_hdr, cksum));
+ cmd_len |= E1000_TXD_CMD_TCP;
+ cmp_mask |= TX_MACIP_LEN_CMP_MASK;
+ break;
+ default:
+ ctx.upper_setup.tcp_fields.tucso = 0;
+ }
+
+ ctx.cmd_and_length = rte_cpu_to_le_32(cmd_len);
+ ctx.tcp_seg_setup.data = 0;
+
+ *ctx_txd = ctx;
+
+ txq->ctx_cache.flags = flags;
+ txq->ctx_cache.cmp_mask = cmp_mask;
+ txq->ctx_cache.hdrlen = hdrlen;
+}
+
+/*
+ * Check which hardware context can be used. Use the existing match
+ * or create a new context descriptor.
+ */
+static inline uint32_t
+what_ctx_update(struct em_tx_queue *txq, uint64_t flags,
+ union em_vlan_macip hdrlen)
+{
+ /* If match with the current context */
+ if (likely (txq->ctx_cache.flags == flags &&
+ ((txq->ctx_cache.hdrlen.data ^ hdrlen.data) &
+ txq->ctx_cache.cmp_mask) == 0))
+ return (EM_CTX_0);
+
+ /* Mismatch */
+ return (EM_CTX_NUM);
+}
+
+/* Reset transmit descriptors after they have been used */
+static inline int
+em_xmit_cleanup(struct em_tx_queue *txq)
+{
+ struct em_tx_entry *sw_ring = txq->sw_ring;
+ volatile struct e1000_data_desc *txr = txq->tx_ring;
+ uint16_t last_desc_cleaned = txq->last_desc_cleaned;
+ uint16_t nb_tx_desc = txq->nb_tx_desc;
+ uint16_t desc_to_clean_to;
+ uint16_t nb_tx_to_clean;
+
+ /* Determine the last descriptor needing to be cleaned */
+ desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
+ if (desc_to_clean_to >= nb_tx_desc)
+ desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
+
+ /* Check to make sure the last descriptor to clean is done */
+ desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
+ if (! (txr[desc_to_clean_to].upper.fields.status & E1000_TXD_STAT_DD))
+ {
+ PMD_TX_FREE_LOG(DEBUG,
+ "TX descriptor %4u is not done"
+ "(port=%d queue=%d)", desc_to_clean_to,
+ txq->port_id, txq->queue_id);
+ /* Failed to clean any descriptors, better luck next time */
+ return -(1);
+ }
+
+ /* Figure out how many descriptors will be cleaned */
+ if (last_desc_cleaned > desc_to_clean_to)
+ nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
+ desc_to_clean_to);
+ else
+ nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
+ last_desc_cleaned);
+
+ PMD_TX_FREE_LOG(DEBUG,
+ "Cleaning %4u TX descriptors: %4u to %4u "
+ "(port=%d queue=%d)", nb_tx_to_clean,
+ last_desc_cleaned, desc_to_clean_to, txq->port_id,
+ txq->queue_id);
+
+ /*
+ * The last descriptor to clean is done, so that means all the
+ * descriptors from the last descriptor that was cleaned
+ * up to the last descriptor with the RS bit set
+ * are done. Only reset the threshold descriptor.
+ */
+ txr[desc_to_clean_to].upper.fields.status = 0;
+
+ /* Update the txq to reflect the last descriptor that was cleaned */
+ txq->last_desc_cleaned = desc_to_clean_to;
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
+
+ /* No Error */
+ return (0);
+}
+
+static inline uint32_t
+tx_desc_cksum_flags_to_upper(uint64_t ol_flags)
+{
+ static const uint32_t l4_olinfo[2] = {0, E1000_TXD_POPTS_TXSM << 8};
+ static const uint32_t l3_olinfo[2] = {0, E1000_TXD_POPTS_IXSM << 8};
+ uint32_t tmp;
+
+ tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
+ tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
+ return (tmp);
+}
+
+uint16_t
+eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct em_tx_queue *txq;
+ struct em_tx_entry *sw_ring;
+ struct em_tx_entry *txe, *txn;
+ volatile struct e1000_data_desc *txr;
+ volatile struct e1000_data_desc *txd;
+ struct rte_mbuf *tx_pkt;
+ struct rte_mbuf *m_seg;
+ uint64_t buf_dma_addr;
+ uint32_t popts_spec;
+ uint32_t cmd_type_len;
+ uint16_t slen;
+ uint64_t ol_flags;
+ uint16_t tx_id;
+ uint16_t tx_last;
+ uint16_t nb_tx;
+ uint16_t nb_used;
+ uint64_t tx_ol_req;
+ uint32_t ctx;
+ uint32_t new_ctx;
+ union em_vlan_macip hdrlen;
+
+ txq = tx_queue;
+ sw_ring = txq->sw_ring;
+ txr = txq->tx_ring;
+ tx_id = txq->tx_tail;
+ txe = &sw_ring[tx_id];
+
+ /* Determine if the descriptor ring needs to be cleaned. */
+ if ((txq->nb_tx_desc - txq->nb_tx_free) > txq->tx_free_thresh) {
+ em_xmit_cleanup(txq);
+ }
+
+ /* TX loop */
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ new_ctx = 0;
+ tx_pkt = *tx_pkts++;
+
+ RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
+
+ /*
+ * Determine how many (if any) context descriptors
+ * are needed for offload functionality.
+ */
+ ol_flags = tx_pkt->ol_flags;
+
+ /* If hardware offload required */
+ tx_ol_req = (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK));
+ if (tx_ol_req) {
+ hdrlen.f.vlan_tci = tx_pkt->vlan_tci;
+ hdrlen.f.l2_len = tx_pkt->l2_len;
+ hdrlen.f.l3_len = tx_pkt->l3_len;
+ /* If new context to be built or reuse the exist ctx. */
+ ctx = what_ctx_update(txq, tx_ol_req, hdrlen);
+
+ /* Only allocate context descriptor if required*/
+ new_ctx = (ctx == EM_CTX_NUM);
+ }
+
+ /*
+ * Keep track of how many descriptors are used this loop
+ * This will always be the number of segments + the number of
+ * Context descriptors required to transmit the packet
+ */
+ nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
+
+ /*
+ * The number of descriptors that must be allocated for a
+ * packet is the number of segments of that packet, plus 1
+ * Context Descriptor for the hardware offload, if any.
+ * Determine the last TX descriptor to allocate in the TX ring
+ * for the packet, starting from the current position (tx_id)
+ * in the ring.
+ */
+ tx_last = (uint16_t) (tx_id + nb_used - 1);
+
+ /* Circular ring */
+ if (tx_last >= txq->nb_tx_desc)
+ tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
+
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
+ " tx_first=%u tx_last=%u",
+ (unsigned) txq->port_id,
+ (unsigned) txq->queue_id,
+ (unsigned) tx_pkt->pkt_len,
+ (unsigned) tx_id,
+ (unsigned) tx_last);
+
+ /*
+ * Make sure there are enough TX descriptors available to
+ * transmit the entire packet.
+ * nb_used better be less than or equal to txq->tx_rs_thresh
+ */
+ while (unlikely (nb_used > txq->nb_tx_free)) {
+ PMD_TX_FREE_LOG(DEBUG, "Not enough free TX descriptors "
+ "nb_used=%4u nb_free=%4u "
+ "(port=%d queue=%d)",
+ nb_used, txq->nb_tx_free,
+ txq->port_id, txq->queue_id);
+
+ if (em_xmit_cleanup(txq) != 0) {
+ /* Could not clean any descriptors */
+ if (nb_tx == 0)
+ return (0);
+ goto end_of_tx;
+ }
+ }
+
+ /*
+ * By now there are enough free TX descriptors to transmit
+ * the packet.
+ */
+
+ /*
+ * Set common flags of all TX Data Descriptors.
+ *
+ * The following bits must be set in all Data Descriptors:
+ * - E1000_TXD_DTYP_DATA
+ * - E1000_TXD_DTYP_DEXT
+ *
+ * The following bits must be set in the first Data Descriptor
+ * and are ignored in the other ones:
+ * - E1000_TXD_POPTS_IXSM
+ * - E1000_TXD_POPTS_TXSM
+ *
+ * The following bits must be set in the last Data Descriptor
+ * and are ignored in the other ones:
+ * - E1000_TXD_CMD_VLE
+ * - E1000_TXD_CMD_IFCS
+ *
+ * The following bits must only be set in the last Data
+ * Descriptor:
+ * - E1000_TXD_CMD_EOP
+ *
+ * The following bits can be set in any Data Descriptor, but
+ * are only set in the last Data Descriptor:
+ * - E1000_TXD_CMD_RS
+ */
+ cmd_type_len = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
+ E1000_TXD_CMD_IFCS;
+ popts_spec = 0;
+
+ /* Set VLAN Tag offload fields. */
+ if (ol_flags & PKT_TX_VLAN_PKT) {
+ cmd_type_len |= E1000_TXD_CMD_VLE;
+ popts_spec = tx_pkt->vlan_tci << E1000_TXD_VLAN_SHIFT;
+ }
+
+ if (tx_ol_req) {
+ /*
+ * Setup the TX Context Descriptor if required
+ */
+ if (new_ctx) {
+ volatile struct e1000_context_desc *ctx_txd;
+
+ ctx_txd = (volatile struct e1000_context_desc *)
+ &txr[tx_id];
+
+ txn = &sw_ring[txe->next_id];
+ RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
+
+ if (txe->mbuf != NULL) {
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = NULL;
+ }
+
+ em_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
+ hdrlen);
+
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ }
+
+ /*
+ * Setup the TX Data Descriptor,
+ * This path will go through
+ * whatever new/reuse the context descriptor
+ */
+ popts_spec |= tx_desc_cksum_flags_to_upper(ol_flags);
+ }
+
+ m_seg = tx_pkt;
+ do {
+ txd = &txr[tx_id];
+ txn = &sw_ring[txe->next_id];
+
+ if (txe->mbuf != NULL)
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = m_seg;
+
+ /*
+ * Set up Transmit Data Descriptor.
+ */
+ slen = m_seg->data_len;
+ buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+
+ txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
+ txd->lower.data = rte_cpu_to_le_32(cmd_type_len | slen);
+ txd->upper.data = rte_cpu_to_le_32(popts_spec);
+
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ m_seg = m_seg->next;
+ } while (m_seg != NULL);
+
+ /*
+ * The last packet data descriptor needs End Of Packet (EOP)
+ */
+ cmd_type_len |= E1000_TXD_CMD_EOP;
+ txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
+
+ /* Set RS bit only on threshold packets' last descriptor */
+ if (txq->nb_tx_used >= txq->tx_rs_thresh) {
+ PMD_TX_FREE_LOG(DEBUG,
+ "Setting RS bit on TXD id=%4u "
+ "(port=%d queue=%d)",
+ tx_last, txq->port_id, txq->queue_id);
+
+ cmd_type_len |= E1000_TXD_CMD_RS;
+
+ /* Update txq RS bit counters */
+ txq->nb_tx_used = 0;
+ }
+ txd->lower.data |= rte_cpu_to_le_32(cmd_type_len);
+ }
+end_of_tx:
+ rte_wmb();
+
+ /*
+ * Set the Transmit Descriptor Tail (TDT)
+ */
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
+ (unsigned) txq->port_id, (unsigned) txq->queue_id,
+ (unsigned) tx_id, (unsigned) nb_tx);
+ E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
+ txq->tx_tail = tx_id;
+
+ return (nb_tx);
+}
+
+/*********************************************************************
+ *
+ * RX functions
+ *
+ **********************************************************************/
+
+static inline uint64_t
+rx_desc_status_to_pkt_flags(uint32_t rx_status)
+{
+ uint64_t pkt_flags;
+
+ /* Check if VLAN present */
+ pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0);
+
+ return pkt_flags;
+}
+
+static inline uint64_t
+rx_desc_error_to_pkt_flags(uint32_t rx_error)
+{
+ uint64_t pkt_flags = 0;
+
+ if (rx_error & E1000_RXD_ERR_IPE)
+ pkt_flags |= PKT_RX_IP_CKSUM_BAD;
+ if (rx_error & E1000_RXD_ERR_TCPE)
+ pkt_flags |= PKT_RX_L4_CKSUM_BAD;
+ return (pkt_flags);
+}
+
+uint16_t
+eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ volatile struct e1000_rx_desc *rx_ring;
+ volatile struct e1000_rx_desc *rxdp;
+ struct em_rx_queue *rxq;
+ struct em_rx_entry *sw_ring;
+ struct em_rx_entry *rxe;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *nmb;
+ struct e1000_rx_desc rxd;
+ uint64_t dma_addr;
+ uint16_t pkt_len;
+ uint16_t rx_id;
+ uint16_t nb_rx;
+ uint16_t nb_hold;
+ uint8_t status;
+
+ rxq = rx_queue;
+
+ nb_rx = 0;
+ nb_hold = 0;
+ rx_id = rxq->rx_tail;
+ rx_ring = rxq->rx_ring;
+ sw_ring = rxq->sw_ring;
+ while (nb_rx < nb_pkts) {
+ /*
+ * The order of operations here is important as the DD status
+ * bit must not be read after any other descriptor fields.
+ * rx_ring and rxdp are pointing to volatile data so the order
+ * of accesses cannot be reordered by the compiler. If they were
+ * not volatile, they could be reordered which could lead to
+ * using invalid descriptor fields when read from rxd.
+ */
+ rxdp = &rx_ring[rx_id];
+ status = rxdp->status;
+ if (! (status & E1000_RXD_STAT_DD))
+ break;
+ rxd = *rxdp;
+
+ /*
+ * End of packet.
+ *
+ * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
+ * likely to be invalid and to be dropped by the various
+ * validation checks performed by the network stack.
+ *
+ * Allocate a new mbuf to replenish the RX ring descriptor.
+ * If the allocation fails:
+ * - arrange for that RX descriptor to be the first one
+ * being parsed the next time the receive function is
+ * invoked [on the same queue].
+ *
+ * - Stop parsing the RX ring and return immediately.
+ *
+ * This policy do not drop the packet received in the RX
+ * descriptor for which the allocation of a new mbuf failed.
+ * Thus, it allows that packet to be later retrieved if
+ * mbuf have been freed in the mean time.
+ * As a side effect, holding RX descriptors instead of
+ * systematically giving them back to the NIC may lead to
+ * RX ring exhaustion situations.
+ * However, the NIC can gracefully prevent such situations
+ * to happen by sending specific "back-pressure" flow control
+ * frames to its peer(s).
+ */
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+ "status=0x%x pkt_len=%u",
+ (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+ (unsigned) rx_id, (unsigned) status,
+ (unsigned) rte_le_to_cpu_16(rxd.length));
+
+ nmb = rte_rxmbuf_alloc(rxq->mb_pool);
+ if (nmb == NULL) {
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u",
+ (unsigned) rxq->port_id,
+ (unsigned) rxq->queue_id);
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+ break;
+ }
+
+ nb_hold++;
+ rxe = &sw_ring[rx_id];
+ rx_id++;
+ if (rx_id == rxq->nb_rx_desc)
+ rx_id = 0;
+
+ /* Prefetch next mbuf while processing current one. */
+ rte_em_prefetch(sw_ring[rx_id].mbuf);
+
+ /*
+ * When next RX descriptor is on a cache-line boundary,
+ * prefetch the next 4 RX descriptors and the next 8 pointers
+ * to mbufs.
+ */
+ if ((rx_id & 0x3) == 0) {
+ rte_em_prefetch(&rx_ring[rx_id]);
+ rte_em_prefetch(&sw_ring[rx_id]);
+ }
+
+ /* Rearm RXD: attach new mbuf and reset status to zero. */
+
+ rxm = rxe->mbuf;
+ rxe->mbuf = nmb;
+ dma_addr =
+ rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+ rxdp->buffer_addr = dma_addr;
+ rxdp->status = 0;
+
+ /*
+ * Initialize the returned mbuf.
+ * 1) setup generic mbuf fields:
+ * - number of segments,
+ * - next segment,
+ * - packet length,
+ * - RX port identifier.
+ * 2) integrate hardware offload data, if any:
+ * - RSS flag & hash,
+ * - IP checksum flag,
+ * - VLAN TCI, if any,
+ * - error flags.
+ */
+ pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.length) -
+ rxq->crc_len);
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = pkt_len;
+ rxm->data_len = pkt_len;
+ rxm->port = rxq->port_id;
+
+ rxm->ol_flags = rx_desc_status_to_pkt_flags(status);
+ rxm->ol_flags = rxm->ol_flags |
+ rx_desc_error_to_pkt_flags(rxd.errors);
+
+ /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
+ rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
+
+ /*
+ * Store the mbuf address into the next entry of the array
+ * of returned packets.
+ */
+ rx_pkts[nb_rx++] = rxm;
+ }
+ rxq->rx_tail = rx_id;
+
+ /*
+ * If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ * register.
+ * Update the RDT with the value of the last processed RX descriptor
+ * minus 1, to guarantee that the RDT register is never equal to the
+ * RDH register, which creates a "full" ring situtation from the
+ * hardware point of view...
+ */
+ nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
+ if (nb_hold > rxq->rx_free_thresh) {
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+ "nb_hold=%u nb_rx=%u",
+ (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+ (unsigned) rx_id, (unsigned) nb_hold,
+ (unsigned) nb_rx);
+ rx_id = (uint16_t) ((rx_id == 0) ?
+ (rxq->nb_rx_desc - 1) : (rx_id - 1));
+ E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+ nb_hold = 0;
+ }
+ rxq->nb_rx_hold = nb_hold;
+ return (nb_rx);
+}
+
+uint16_t
+eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct em_rx_queue *rxq;
+ volatile struct e1000_rx_desc *rx_ring;
+ volatile struct e1000_rx_desc *rxdp;
+ struct em_rx_entry *sw_ring;
+ struct em_rx_entry *rxe;
+ struct rte_mbuf *first_seg;
+ struct rte_mbuf *last_seg;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *nmb;
+ struct e1000_rx_desc rxd;
+ uint64_t dma; /* Physical address of mbuf data buffer */
+ uint16_t rx_id;
+ uint16_t nb_rx;
+ uint16_t nb_hold;
+ uint16_t data_len;
+ uint8_t status;
+
+ rxq = rx_queue;
+
+ nb_rx = 0;
+ nb_hold = 0;
+ rx_id = rxq->rx_tail;
+ rx_ring = rxq->rx_ring;
+ sw_ring = rxq->sw_ring;
+
+ /*
+ * Retrieve RX context of current packet, if any.
+ */
+ first_seg = rxq->pkt_first_seg;
+ last_seg = rxq->pkt_last_seg;
+
+ while (nb_rx < nb_pkts) {
+ next_desc:
+ /*
+ * The order of operations here is important as the DD status
+ * bit must not be read after any other descriptor fields.
+ * rx_ring and rxdp are pointing to volatile data so the order
+ * of accesses cannot be reordered by the compiler. If they were
+ * not volatile, they could be reordered which could lead to
+ * using invalid descriptor fields when read from rxd.
+ */
+ rxdp = &rx_ring[rx_id];
+ status = rxdp->status;
+ if (! (status & E1000_RXD_STAT_DD))
+ break;
+ rxd = *rxdp;
+
+ /*
+ * Descriptor done.
+ *
+ * Allocate a new mbuf to replenish the RX ring descriptor.
+ * If the allocation fails:
+ * - arrange for that RX descriptor to be the first one
+ * being parsed the next time the receive function is
+ * invoked [on the same queue].
+ *
+ * - Stop parsing the RX ring and return immediately.
+ *
+ * This policy does not drop the packet received in the RX
+ * descriptor for which the allocation of a new mbuf failed.
+ * Thus, it allows that packet to be later retrieved if
+ * mbuf have been freed in the mean time.
+ * As a side effect, holding RX descriptors instead of
+ * systematically giving them back to the NIC may lead to
+ * RX ring exhaustion situations.
+ * However, the NIC can gracefully prevent such situations
+ * to happen by sending specific "back-pressure" flow control
+ * frames to its peer(s).
+ */
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+ "status=0x%x data_len=%u",
+ (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+ (unsigned) rx_id, (unsigned) status,
+ (unsigned) rte_le_to_cpu_16(rxd.length));
+
+ nmb = rte_rxmbuf_alloc(rxq->mb_pool);
+ if (nmb == NULL) {
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u", (unsigned) rxq->port_id,
+ (unsigned) rxq->queue_id);
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+ break;
+ }
+
+ nb_hold++;
+ rxe = &sw_ring[rx_id];
+ rx_id++;
+ if (rx_id == rxq->nb_rx_desc)
+ rx_id = 0;
+
+ /* Prefetch next mbuf while processing current one. */
+ rte_em_prefetch(sw_ring[rx_id].mbuf);
+
+ /*
+ * When next RX descriptor is on a cache-line boundary,
+ * prefetch the next 4 RX descriptors and the next 8 pointers
+ * to mbufs.
+ */
+ if ((rx_id & 0x3) == 0) {
+ rte_em_prefetch(&rx_ring[rx_id]);
+ rte_em_prefetch(&sw_ring[rx_id]);
+ }
+
+ /*
+ * Update RX descriptor with the physical address of the new
+ * data buffer of the new allocated mbuf.
+ */
+ rxm = rxe->mbuf;
+ rxe->mbuf = nmb;
+ dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+ rxdp->buffer_addr = dma;
+ rxdp->status = 0;
+
+ /*
+ * Set data length & data buffer address of mbuf.
+ */
+ data_len = rte_le_to_cpu_16(rxd.length);
+ rxm->data_len = data_len;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+
+ /*
+ * If this is the first buffer of the received packet,
+ * set the pointer to the first mbuf of the packet and
+ * initialize its context.
+ * Otherwise, update the total length and the number of segments
+ * of the current scattered packet, and update the pointer to
+ * the last mbuf of the current packet.
+ */
+ if (first_seg == NULL) {
+ first_seg = rxm;
+ first_seg->pkt_len = data_len;
+ first_seg->nb_segs = 1;
+ } else {
+ first_seg->pkt_len += data_len;
+ first_seg->nb_segs++;
+ last_seg->next = rxm;
+ }
+
+ /*
+ * If this is not the last buffer of the received packet,
+ * update the pointer to the last mbuf of the current scattered
+ * packet and continue to parse the RX ring.
+ */
+ if (! (status & E1000_RXD_STAT_EOP)) {
+ last_seg = rxm;
+ goto next_desc;
+ }
+
+ /*
+ * This is the last buffer of the received packet.
+ * If the CRC is not stripped by the hardware:
+ * - Subtract the CRC length from the total packet length.
+ * - If the last buffer only contains the whole CRC or a part
+ * of it, free the mbuf associated to the last buffer.
+ * If part of the CRC is also contained in the previous
+ * mbuf, subtract the length of that CRC part from the
+ * data length of the previous mbuf.
+ */
+ rxm->next = NULL;
+ if (unlikely(rxq->crc_len > 0)) {
+ first_seg->pkt_len -= ETHER_CRC_LEN;
+ if (data_len <= ETHER_CRC_LEN) {
+ rte_pktmbuf_free_seg(rxm);
+ first_seg->nb_segs--;
+ last_seg->data_len = (uint16_t)
+ (last_seg->data_len -
+ (ETHER_CRC_LEN - data_len));
+ last_seg->next = NULL;
+ } else
+ rxm->data_len =
+ (uint16_t) (data_len - ETHER_CRC_LEN);
+ }
+
+ /*
+ * Initialize the first mbuf of the returned packet:
+ * - RX port identifier,
+ * - hardware offload data, if any:
+ * - IP checksum flag,
+ * - error flags.
+ */
+ first_seg->port = rxq->port_id;
+
+ first_seg->ol_flags = rx_desc_status_to_pkt_flags(status);
+ first_seg->ol_flags = first_seg->ol_flags |
+ rx_desc_error_to_pkt_flags(rxd.errors);
+
+ /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
+ rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
+
+ /* Prefetch data of first segment, if configured to do so. */
+ rte_packet_prefetch((char *)first_seg->buf_addr +
+ first_seg->data_off);
+
+ /*
+ * Store the mbuf address into the next entry of the array
+ * of returned packets.
+ */
+ rx_pkts[nb_rx++] = first_seg;
+
+ /*
+ * Setup receipt context for a new packet.
+ */
+ first_seg = NULL;
+ }
+
+ /*
+ * Record index of the next RX descriptor to probe.
+ */
+ rxq->rx_tail = rx_id;
+
+ /*
+ * Save receive context.
+ */
+ rxq->pkt_first_seg = first_seg;
+ rxq->pkt_last_seg = last_seg;
+
+ /*
+ * If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ * register.
+ * Update the RDT with the value of the last processed RX descriptor
+ * minus 1, to guarantee that the RDT register is never equal to the
+ * RDH register, which creates a "full" ring situtation from the
+ * hardware point of view...
+ */
+ nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
+ if (nb_hold > rxq->rx_free_thresh) {
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+ "nb_hold=%u nb_rx=%u",
+ (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+ (unsigned) rx_id, (unsigned) nb_hold,
+ (unsigned) nb_rx);
+ rx_id = (uint16_t) ((rx_id == 0) ?
+ (rxq->nb_rx_desc - 1) : (rx_id - 1));
+ E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+ nb_hold = 0;
+ }
+ rxq->nb_rx_hold = nb_hold;
+ return (nb_rx);
+}
+
+/*
+ * Rings setup and release.
+ *
+ * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
+ * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
+ * This will also optimize cache line size effect.
+ * H/W supports up to cache line size 128.
+ */
+#define EM_ALIGN 128
+
+/*
+ * Maximum number of Ring Descriptors.
+ *
+ * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
+ * desscriptors should meet the following condition:
+ * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
+ */
+#define EM_MIN_RING_DESC 32
+#define EM_MAX_RING_DESC 4096
+
+#define EM_MAX_BUF_SIZE 16384
+#define EM_RCTL_FLXBUF_STEP 1024
+
+static const struct rte_memzone *
+ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
+ uint16_t queue_id, uint32_t ring_size, int socket_id)
+{
+ const struct rte_memzone *mz;
+ char z_name[RTE_MEMZONE_NAMESIZE];
+
+ snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
+ dev->driver->pci_drv.name, ring_name, dev->data->port_id,
+ queue_id);
+
+ if ((mz = rte_memzone_lookup(z_name)) != 0)
+ return (mz);
+
+#ifdef RTE_LIBRTE_XEN_DOM0
+ return rte_memzone_reserve_bounded(z_name, ring_size,
+ socket_id, 0, RTE_CACHE_LINE_SIZE, RTE_PGSIZE_2M);
+#else
+ return rte_memzone_reserve(z_name, ring_size, socket_id, 0);
+#endif
+}
+
+static void
+em_tx_queue_release_mbufs(struct em_tx_queue *txq)
+{
+ unsigned i;
+
+ if (txq->sw_ring != NULL) {
+ for (i = 0; i != txq->nb_tx_desc; i++) {
+ if (txq->sw_ring[i].mbuf != NULL) {
+ rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+ txq->sw_ring[i].mbuf = NULL;
+ }
+ }
+ }
+}
+
+static void
+em_tx_queue_release(struct em_tx_queue *txq)
+{
+ if (txq != NULL) {
+ em_tx_queue_release_mbufs(txq);
+ rte_free(txq->sw_ring);
+ rte_free(txq);
+ }
+}
+
+void
+eth_em_tx_queue_release(void *txq)
+{
+ em_tx_queue_release(txq);
+}
+
+/* (Re)set dynamic em_tx_queue fields to defaults */
+static void
+em_reset_tx_queue(struct em_tx_queue *txq)
+{
+ uint16_t i, nb_desc, prev;
+ static const struct e1000_data_desc txd_init = {
+ .upper.fields = {.status = E1000_TXD_STAT_DD},
+ };
+
+ nb_desc = txq->nb_tx_desc;
+
+ /* Initialize ring entries */
+
+ prev = (uint16_t) (nb_desc - 1);
+
+ for (i = 0; i < nb_desc; i++) {
+ txq->tx_ring[i] = txd_init;
+ txq->sw_ring[i].mbuf = NULL;
+ txq->sw_ring[i].last_id = i;
+ txq->sw_ring[prev].next_id = i;
+ prev = i;
+ }
+
+ /*
+ * Always allow 1 descriptor to be un-allocated to avoid
+ * a H/W race condition
+ */
+ txq->nb_tx_free = (uint16_t)(nb_desc - 1);
+ txq->last_desc_cleaned = (uint16_t)(nb_desc - 1);
+ txq->nb_tx_used = 0;
+ txq->tx_tail = 0;
+
+ memset((void*)&txq->ctx_cache, 0, sizeof (txq->ctx_cache));
+}
+
+int
+eth_em_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ const struct rte_memzone *tz;
+ struct em_tx_queue *txq;
+ struct e1000_hw *hw;
+ uint32_t tsize;
+ uint16_t tx_rs_thresh, tx_free_thresh;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /*
+ * Validate number of transmit descriptors.
+ * It must not exceed hardware maximum, and must be multiple
+ * of EM_ALIGN.
+ */
+ if (((nb_desc * sizeof(*txq->tx_ring)) % EM_ALIGN) != 0 ||
+ (nb_desc > EM_MAX_RING_DESC) ||
+ (nb_desc < EM_MIN_RING_DESC)) {
+ return -(EINVAL);
+ }
+
+ tx_free_thresh = tx_conf->tx_free_thresh;
+ if (tx_free_thresh == 0)
+ tx_free_thresh = (uint16_t)RTE_MIN(nb_desc / 4,
+ DEFAULT_TX_FREE_THRESH);
+
+ tx_rs_thresh = tx_conf->tx_rs_thresh;
+ if (tx_rs_thresh == 0)
+ tx_rs_thresh = (uint16_t)RTE_MIN(tx_free_thresh,
+ DEFAULT_TX_RS_THRESH);
+
+ if (tx_free_thresh >= (nb_desc - 3)) {
+ PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the "
+ "number of TX descriptors minus 3. "
+ "(tx_free_thresh=%u port=%d queue=%d)",
+ (unsigned int)tx_free_thresh,
+ (int)dev->data->port_id, (int)queue_idx);
+ return -(EINVAL);
+ }
+ if (tx_rs_thresh > tx_free_thresh) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
+ "tx_free_thresh. (tx_free_thresh=%u "
+ "tx_rs_thresh=%u port=%d queue=%d)",
+ (unsigned int)tx_free_thresh,
+ (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id,
+ (int)queue_idx);
+ return -(EINVAL);
+ }
+
+ /*
+ * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
+ * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
+ * by the NIC and all descriptors are written back after the NIC
+ * accumulates WTHRESH descriptors.
+ */
+ if (tx_conf->tx_thresh.wthresh != 0 && tx_rs_thresh != 1) {
+ PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
+ "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
+ "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id, (int)queue_idx);
+ return -(EINVAL);
+ }
+
+ /* Free memory prior to re-allocation if needed... */
+ if (dev->data->tx_queues[queue_idx] != NULL) {
+ em_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ /*
+ * Allocate TX ring hardware descriptors. A memzone large enough to
+ * handle the maximum ring size is allocated in order to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ tsize = sizeof (txq->tx_ring[0]) * EM_MAX_RING_DESC;
+ if ((tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx, tsize,
+ socket_id)) == NULL)
+ return (-ENOMEM);
+
+ /* Allocate the tx queue data structure. */
+ if ((txq = rte_zmalloc("ethdev TX queue", sizeof(*txq),
+ RTE_CACHE_LINE_SIZE)) == NULL)
+ return (-ENOMEM);
+
+ /* Allocate software ring */
+ if ((txq->sw_ring = rte_zmalloc("txq->sw_ring",
+ sizeof(txq->sw_ring[0]) * nb_desc,
+ RTE_CACHE_LINE_SIZE)) == NULL) {
+ em_tx_queue_release(txq);
+ return (-ENOMEM);
+ }
+
+ txq->nb_tx_desc = nb_desc;
+ txq->tx_free_thresh = tx_free_thresh;
+ txq->tx_rs_thresh = tx_rs_thresh;
+ txq->pthresh = tx_conf->tx_thresh.pthresh;
+ txq->hthresh = tx_conf->tx_thresh.hthresh;
+ txq->wthresh = tx_conf->tx_thresh.wthresh;
+ txq->queue_id = queue_idx;
+ txq->port_id = dev->data->port_id;
+
+ txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx));
+#ifndef RTE_LIBRTE_XEN_DOM0
+ txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
+#else
+ txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+#endif
+ txq->tx_ring = (struct e1000_data_desc *) tz->addr;
+
+ PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
+ txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
+
+ em_reset_tx_queue(txq);
+
+ dev->data->tx_queues[queue_idx] = txq;
+ return (0);
+}
+
+static void
+em_rx_queue_release_mbufs(struct em_rx_queue *rxq)
+{
+ unsigned i;
+
+ if (rxq->sw_ring != NULL) {
+ for (i = 0; i != rxq->nb_rx_desc; i++) {
+ if (rxq->sw_ring[i].mbuf != NULL) {
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ rxq->sw_ring[i].mbuf = NULL;
+ }
+ }
+ }
+}
+
+static void
+em_rx_queue_release(struct em_rx_queue *rxq)
+{
+ if (rxq != NULL) {
+ em_rx_queue_release_mbufs(rxq);
+ rte_free(rxq->sw_ring);
+ rte_free(rxq);
+ }
+}
+
+void
+eth_em_rx_queue_release(void *rxq)
+{
+ em_rx_queue_release(rxq);
+}
+
+/* Reset dynamic em_rx_queue fields back to defaults */
+static void
+em_reset_rx_queue(struct em_rx_queue *rxq)
+{
+ rxq->rx_tail = 0;
+ rxq->nb_rx_hold = 0;
+ rxq->pkt_first_seg = NULL;
+ rxq->pkt_last_seg = NULL;
+}
+
+int
+eth_em_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ const struct rte_memzone *rz;
+ struct em_rx_queue *rxq;
+ struct e1000_hw *hw;
+ uint32_t rsize;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /*
+ * Validate number of receive descriptors.
+ * It must not exceed hardware maximum, and must be multiple
+ * of EM_ALIGN.
+ */
+ if (((nb_desc * sizeof(rxq->rx_ring[0])) % EM_ALIGN) != 0 ||
+ (nb_desc > EM_MAX_RING_DESC) ||
+ (nb_desc < EM_MIN_RING_DESC)) {
+ return (-EINVAL);
+ }
+
+ /*
+ * EM devices don't support drop_en functionality
+ */
+ if (rx_conf->rx_drop_en) {
+ PMD_INIT_LOG(ERR, "drop_en functionality not supported by "
+ "device");
+ return (-EINVAL);
+ }
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->rx_queues[queue_idx] != NULL) {
+ em_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* Allocate RX ring for max possible mumber of hardware descriptors. */
+ rsize = sizeof (rxq->rx_ring[0]) * EM_MAX_RING_DESC;
+ if ((rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, rsize,
+ socket_id)) == NULL)
+ return (-ENOMEM);
+
+ /* Allocate the RX queue data structure. */
+ if ((rxq = rte_zmalloc("ethdev RX queue", sizeof(*rxq),
+ RTE_CACHE_LINE_SIZE)) == NULL)
+ return (-ENOMEM);
+
+ /* Allocate software ring. */
+ if ((rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
+ sizeof (rxq->sw_ring[0]) * nb_desc,
+ RTE_CACHE_LINE_SIZE)) == NULL) {
+ em_rx_queue_release(rxq);
+ return (-ENOMEM);
+ }
+
+ rxq->mb_pool = mp;
+ rxq->nb_rx_desc = nb_desc;
+ rxq->pthresh = rx_conf->rx_thresh.pthresh;
+ rxq->hthresh = rx_conf->rx_thresh.hthresh;
+ rxq->wthresh = rx_conf->rx_thresh.wthresh;
+ rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+ rxq->queue_id = queue_idx;
+ rxq->port_id = dev->data->port_id;
+ rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
+ 0 : ETHER_CRC_LEN);
+
+ rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
+ rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx));
+#ifndef RTE_LIBRTE_XEN_DOM0
+ rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
+#else
+ rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+#endif
+ rxq->rx_ring = (struct e1000_rx_desc *) rz->addr;
+
+ PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
+ rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
+
+ dev->data->rx_queues[queue_idx] = rxq;
+ em_reset_rx_queue(rxq);
+
+ return (0);
+}
+
+uint32_t
+eth_em_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+#define EM_RXQ_SCAN_INTERVAL 4
+ volatile struct e1000_rx_desc *rxdp;
+ struct em_rx_queue *rxq;
+ uint32_t desc = 0;
+
+ if (rx_queue_id >= dev->data->nb_rx_queues) {
+ PMD_RX_LOG(DEBUG, "Invalid RX queue_id=%d", rx_queue_id);
+ return 0;
+ }
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ rxdp = &(rxq->rx_ring[rxq->rx_tail]);
+
+ while ((desc < rxq->nb_rx_desc) &&
+ (rxdp->status & E1000_RXD_STAT_DD)) {
+ desc += EM_RXQ_SCAN_INTERVAL;
+ rxdp += EM_RXQ_SCAN_INTERVAL;
+ if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
+ rxdp = &(rxq->rx_ring[rxq->rx_tail +
+ desc - rxq->nb_rx_desc]);
+ }
+
+ return desc;
+}
+
+int
+eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset)
+{
+ volatile struct e1000_rx_desc *rxdp;
+ struct em_rx_queue *rxq = rx_queue;
+ uint32_t desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return 0;
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ rxdp = &rxq->rx_ring[desc];
+ return !!(rxdp->status & E1000_RXD_STAT_DD);
+}
+
+void
+em_dev_clear_queues(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+ struct em_tx_queue *txq;
+ struct em_rx_queue *rxq;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (txq != NULL) {
+ em_tx_queue_release_mbufs(txq);
+ em_reset_tx_queue(txq);
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (rxq != NULL) {
+ em_rx_queue_release_mbufs(rxq);
+ em_reset_rx_queue(rxq);
+ }
+ }
+}
+
+/*
+ * Takes as input/output parameter RX buffer size.
+ * Returns (BSIZE | BSEX | FLXBUF) fields of RCTL register.
+ */
+static uint32_t
+em_rctl_bsize(__rte_unused enum e1000_mac_type hwtyp, uint32_t *bufsz)
+{
+ /*
+ * For BSIZE & BSEX all configurable sizes are:
+ * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
+ * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
+ * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
+ * 2048: rctl |= E1000_RCTL_SZ_2048;
+ * 1024: rctl |= E1000_RCTL_SZ_1024;
+ * 512: rctl |= E1000_RCTL_SZ_512;
+ * 256: rctl |= E1000_RCTL_SZ_256;
+ */
+ static const struct {
+ uint32_t bufsz;
+ uint32_t rctl;
+ } bufsz_to_rctl[] = {
+ {16384, (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX)},
+ {8192, (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX)},
+ {4096, (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX)},
+ {2048, E1000_RCTL_SZ_2048},
+ {1024, E1000_RCTL_SZ_1024},
+ {512, E1000_RCTL_SZ_512},
+ {256, E1000_RCTL_SZ_256},
+ };
+
+ int i;
+ uint32_t rctl_bsize;
+
+ rctl_bsize = *bufsz;
+
+ /*
+ * Starting from 82571 it is possible to specify RX buffer size
+ * by RCTL.FLXBUF. When this field is different from zero, the
+ * RX buffer size = RCTL.FLXBUF * 1K
+ * (e.g. t is possible to specify RX buffer size 1,2,...,15KB).
+ * It is working ok on real HW, but by some reason doesn't work
+ * on VMware emulated 82574L.
+ * So for now, always use BSIZE/BSEX to setup RX buffer size.
+ * If you don't plan to use it on VMware emulated 82574L and
+ * would like to specify RX buffer size in 1K granularity,
+ * uncomment the following lines:
+ * ***************************************************************
+ * if (hwtyp >= e1000_82571 && hwtyp <= e1000_82574 &&
+ * rctl_bsize >= EM_RCTL_FLXBUF_STEP) {
+ * rctl_bsize /= EM_RCTL_FLXBUF_STEP;
+ * *bufsz = rctl_bsize;
+ * return (rctl_bsize << E1000_RCTL_FLXBUF_SHIFT &
+ * E1000_RCTL_FLXBUF_MASK);
+ * }
+ * ***************************************************************
+ */
+
+ for (i = 0; i != sizeof(bufsz_to_rctl) / sizeof(bufsz_to_rctl[0]);
+ i++) {
+ if (rctl_bsize >= bufsz_to_rctl[i].bufsz) {
+ *bufsz = bufsz_to_rctl[i].bufsz;
+ return (bufsz_to_rctl[i].rctl);
+ }
+ }
+
+ /* Should never happen. */
+ return (-EINVAL);
+}
+
+static int
+em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq)
+{
+ struct em_rx_entry *rxe = rxq->sw_ring;
+ uint64_t dma_addr;
+ unsigned i;
+ static const struct e1000_rx_desc rxd_init = {
+ .buffer_addr = 0,
+ };
+
+ /* Initialize software ring entries */
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ volatile struct e1000_rx_desc *rxd;
+ struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
+
+ if (mbuf == NULL) {
+ PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
+ "queue_id=%hu", rxq->queue_id);
+ return (-ENOMEM);
+ }
+
+ dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
+
+ /* Clear HW ring memory */
+ rxq->rx_ring[i] = rxd_init;
+
+ rxd = &rxq->rx_ring[i];
+ rxd->buffer_addr = dma_addr;
+ rxe[i].mbuf = mbuf;
+ }
+
+ return 0;
+}
+
+/*********************************************************************
+ *
+ * Enable receive unit.
+ *
+ **********************************************************************/
+int
+eth_em_rx_init(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw;
+ struct em_rx_queue *rxq;
+ uint32_t rctl;
+ uint32_t rfctl;
+ uint32_t rxcsum;
+ uint32_t rctl_bsize;
+ uint16_t i;
+ int ret;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /*
+ * Make sure receives are disabled while setting
+ * up the descriptor ring.
+ */
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+
+ rfctl = E1000_READ_REG(hw, E1000_RFCTL);
+
+ /* Disable extended descriptor type. */
+ rfctl &= ~E1000_RFCTL_EXTEN;
+ /* Disable accelerated acknowledge */
+ if (hw->mac.type == e1000_82574)
+ rfctl |= E1000_RFCTL_ACK_DIS;
+
+ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
+
+ /*
+ * XXX TEMPORARY WORKAROUND: on some systems with 82573
+ * long latencies are observed, like Lenovo X60. This
+ * change eliminates the problem, but since having positive
+ * values in RDTR is a known source of problems on other
+ * platforms another solution is being sought.
+ */
+ if (hw->mac.type == e1000_82573)
+ E1000_WRITE_REG(hw, E1000_RDTR, 0x20);
+
+ dev->rx_pkt_burst = (eth_rx_burst_t)eth_em_recv_pkts;
+
+ /* Determine RX bufsize. */
+ rctl_bsize = EM_MAX_BUF_SIZE;
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ uint32_t buf_size;
+
+ rxq = dev->data->rx_queues[i];
+ mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
+ buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+ rctl_bsize = RTE_MIN(rctl_bsize, buf_size);
+ }
+
+ rctl |= em_rctl_bsize(hw->mac.type, &rctl_bsize);
+
+ /* Configure and enable each RX queue. */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ uint64_t bus_addr;
+ uint32_t rxdctl;
+
+ rxq = dev->data->rx_queues[i];
+
+ /* Allocate buffers for descriptor rings and setup queue */
+ ret = em_alloc_rx_queue_mbufs(rxq);
+ if (ret)
+ return ret;
+
+ /*
+ * Reset crc_len in case it was changed after queue setup by a
+ * call to configure
+ */
+ rxq->crc_len =
+ (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
+ 0 : ETHER_CRC_LEN);
+
+ bus_addr = rxq->rx_ring_phys_addr;
+ E1000_WRITE_REG(hw, E1000_RDLEN(i),
+ rxq->nb_rx_desc *
+ sizeof(*rxq->rx_ring));
+ E1000_WRITE_REG(hw, E1000_RDBAH(i),
+ (uint32_t)(bus_addr >> 32));
+ E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
+
+ E1000_WRITE_REG(hw, E1000_RDH(i), 0);
+ E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
+
+ rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
+ rxdctl &= 0xFE000000;
+ rxdctl |= rxq->pthresh & 0x3F;
+ rxdctl |= (rxq->hthresh & 0x3F) << 8;
+ rxdctl |= (rxq->wthresh & 0x3F) << 16;
+ rxdctl |= E1000_RXDCTL_GRAN;
+ E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
+
+ /*
+ * Due to EM devices not having any sort of hardware
+ * limit for packet length, jumbo frame of any size
+ * can be accepted, thus we have to enable scattered
+ * rx if jumbo frames are enabled (or if buffer size
+ * is too small to accommodate non-jumbo packets)
+ * to avoid splitting packets that don't fit into
+ * one buffer.
+ */
+ if (dev->data->dev_conf.rxmode.jumbo_frame ||
+ rctl_bsize < ETHER_MAX_LEN) {
+ if (!dev->data->scattered_rx)
+ PMD_INIT_LOG(DEBUG, "forcing scatter mode");
+ dev->rx_pkt_burst =
+ (eth_rx_burst_t)eth_em_recv_scattered_pkts;
+ dev->data->scattered_rx = 1;
+ }
+ }
+
+ if (dev->data->dev_conf.rxmode.enable_scatter) {
+ if (!dev->data->scattered_rx)
+ PMD_INIT_LOG(DEBUG, "forcing scatter mode");
+ dev->rx_pkt_burst = eth_em_recv_scattered_pkts;
+ dev->data->scattered_rx = 1;
+ }
+
+ /*
+ * Setup the Checksum Register.
+ * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
+ */
+ rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
+
+ if (dev->data->dev_conf.rxmode.hw_ip_checksum)
+ rxcsum |= E1000_RXCSUM_IPOFL;
+ else
+ rxcsum &= ~E1000_RXCSUM_IPOFL;
+ E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
+
+ /* No MRQ or RSS support for now */
+
+ /* Set early receive threshold on appropriate hw */
+ if ((hw->mac.type == e1000_ich9lan ||
+ hw->mac.type == e1000_pch2lan ||
+ hw->mac.type == e1000_ich10lan) &&
+ dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+ u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
+ E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
+ E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13));
+ }
+
+ if (hw->mac.type == e1000_pch2lan) {
+ if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+ e1000_lv_jumbo_workaround_ich8lan(hw, TRUE);
+ else
+ e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
+ }
+
+ /* Setup the Receive Control Register. */
+ if (dev->data->dev_conf.rxmode.hw_strip_crc)
+ rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
+ else
+ rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
+
+ rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+ rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
+ E1000_RCTL_RDMTS_HALF |
+ (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+
+ /* Make sure VLAN Filters are off. */
+ rctl &= ~E1000_RCTL_VFE;
+ /* Don't store bad packets. */
+ rctl &= ~E1000_RCTL_SBP;
+ /* Legacy descriptor type. */
+ rctl &= ~E1000_RCTL_DTYP_MASK;
+
+ /*
+ * Configure support of jumbo frames, if any.
+ */
+ if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+ rctl |= E1000_RCTL_LPE;
+ else
+ rctl &= ~E1000_RCTL_LPE;
+
+ /* Enable Receives. */
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+
+ return 0;
+}
+
+/*********************************************************************
+ *
+ * Enable transmit unit.
+ *
+ **********************************************************************/
+void
+eth_em_tx_init(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw;
+ struct em_tx_queue *txq;
+ uint32_t tctl;
+ uint32_t txdctl;
+ uint16_t i;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Setup the Base and Length of the Tx Descriptor Rings. */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ uint64_t bus_addr;
+
+ txq = dev->data->tx_queues[i];
+ bus_addr = txq->tx_ring_phys_addr;
+ E1000_WRITE_REG(hw, E1000_TDLEN(i),
+ txq->nb_tx_desc *
+ sizeof(*txq->tx_ring));
+ E1000_WRITE_REG(hw, E1000_TDBAH(i),
+ (uint32_t)(bus_addr >> 32));
+ E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
+
+ /* Setup the HW Tx Head and Tail descriptor pointers. */
+ E1000_WRITE_REG(hw, E1000_TDT(i), 0);
+ E1000_WRITE_REG(hw, E1000_TDH(i), 0);
+
+ /* Setup Transmit threshold registers. */
+ txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
+ /*
+ * bit 22 is reserved, on some models should always be 0,
+ * on others - always 1.
+ */
+ txdctl &= E1000_TXDCTL_COUNT_DESC;
+ txdctl |= txq->pthresh & 0x3F;
+ txdctl |= (txq->hthresh & 0x3F) << 8;
+ txdctl |= (txq->wthresh & 0x3F) << 16;
+ txdctl |= E1000_TXDCTL_GRAN;
+ E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
+ }
+
+ /* Program the Transmit Control Register. */
+ tctl = E1000_READ_REG(hw, E1000_TCTL);
+ tctl &= ~E1000_TCTL_CT;
+ tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
+ (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
+
+ /* This write will effectively turn on the transmit unit. */
+ E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+}
+
diff --git a/src/dpdk_lib18/librte_pmd_e1000/igb_ethdev.c b/src/dpdk_lib18/librte_pmd_e1000/igb_ethdev.c
new file mode 100755
index 00000000..0cebf985
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/igb_ethdev.c
@@ -0,0 +1,3164 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+
+#include <rte_common.h>
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_atomic.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+
+#include "e1000_logs.h"
+#include "e1000/e1000_api.h"
+#include "e1000_ethdev.h"
+
+/*
+ * Default values for port configuration
+ */
+#define IGB_DEFAULT_RX_FREE_THRESH 32
+#define IGB_DEFAULT_RX_PTHRESH 8
+#define IGB_DEFAULT_RX_HTHRESH 8
+#define IGB_DEFAULT_RX_WTHRESH 0
+
+#define IGB_DEFAULT_TX_PTHRESH 32
+#define IGB_DEFAULT_TX_HTHRESH 0
+#define IGB_DEFAULT_TX_WTHRESH 0
+
+/* Bit shift and mask */
+#define IGB_4_BIT_WIDTH (CHAR_BIT / 2)
+#define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t)
+#define IGB_8_BIT_WIDTH CHAR_BIT
+#define IGB_8_BIT_MASK UINT8_MAX
+
+static int eth_igb_configure(struct rte_eth_dev *dev);
+static int eth_igb_start(struct rte_eth_dev *dev);
+static void eth_igb_stop(struct rte_eth_dev *dev);
+static void eth_igb_close(struct rte_eth_dev *dev);
+static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev);
+static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev);
+static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev);
+static void eth_igb_allmulticast_disable(struct rte_eth_dev *dev);
+static int eth_igb_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
+static void eth_igb_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *rte_stats);
+static void eth_igb_stats_reset(struct rte_eth_dev *dev);
+static void eth_igb_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static void eth_igbvf_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
+static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
+static int eth_igb_interrupt_action(struct rte_eth_dev *dev);
+static void eth_igb_interrupt_handler(struct rte_intr_handle *handle,
+ void *param);
+static int igb_hardware_init(struct e1000_hw *hw);
+static void igb_hw_control_acquire(struct e1000_hw *hw);
+static void igb_hw_control_release(struct e1000_hw *hw);
+static void igb_init_manageability(struct e1000_hw *hw);
+static void igb_release_manageability(struct e1000_hw *hw);
+
+static int eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+
+static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vlan_id, int on);
+static void eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
+static void eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+
+static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev);
+static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev);
+static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev);
+static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev);
+static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev);
+static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev);
+
+static int eth_igb_led_on(struct rte_eth_dev *dev);
+static int eth_igb_led_off(struct rte_eth_dev *dev);
+
+static void igb_intr_disable(struct e1000_hw *hw);
+static int igb_get_rx_buffer_size(struct e1000_hw *hw);
+static void eth_igb_rar_set(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index, uint32_t pool);
+static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
+
+static void igbvf_intr_disable(struct e1000_hw *hw);
+static int igbvf_dev_configure(struct rte_eth_dev *dev);
+static int igbvf_dev_start(struct rte_eth_dev *dev);
+static void igbvf_dev_stop(struct rte_eth_dev *dev);
+static void igbvf_dev_close(struct rte_eth_dev *dev);
+static int eth_igbvf_link_update(struct e1000_hw *hw);
+static void eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats);
+static void eth_igbvf_stats_reset(struct rte_eth_dev *dev);
+static int igbvf_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vlan_id, int on);
+static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on);
+static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on);
+static int eth_igb_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int eth_igb_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int eth_igb_add_syn_filter(struct rte_eth_dev *dev,
+ struct rte_syn_filter *filter, uint16_t rx_queue);
+static int eth_igb_remove_syn_filter(struct rte_eth_dev *dev);
+static int eth_igb_get_syn_filter(struct rte_eth_dev *dev,
+ struct rte_syn_filter *filter, uint16_t *rx_queue);
+static int eth_igb_add_ethertype_filter(struct rte_eth_dev *dev,
+ uint16_t index,
+ struct rte_ethertype_filter *filter, uint16_t rx_queue);
+static int eth_igb_remove_ethertype_filter(struct rte_eth_dev *dev,
+ uint16_t index);
+static int eth_igb_get_ethertype_filter(struct rte_eth_dev *dev,
+ uint16_t index,
+ struct rte_ethertype_filter *filter, uint16_t *rx_queue);
+static int eth_igb_add_2tuple_filter(struct rte_eth_dev *dev,
+ uint16_t index,
+ struct rte_2tuple_filter *filter, uint16_t rx_queue);
+static int eth_igb_remove_2tuple_filter(struct rte_eth_dev *dev,
+ uint16_t index);
+static int eth_igb_get_2tuple_filter(struct rte_eth_dev *dev,
+ uint16_t index,
+ struct rte_2tuple_filter *filter, uint16_t *rx_queue);
+static int eth_igb_add_flex_filter(struct rte_eth_dev *dev,
+ uint16_t index,
+ struct rte_flex_filter *filter, uint16_t rx_queue);
+static int eth_igb_remove_flex_filter(struct rte_eth_dev *dev,
+ uint16_t index);
+static int eth_igb_get_flex_filter(struct rte_eth_dev *dev,
+ uint16_t index,
+ struct rte_flex_filter *filter, uint16_t *rx_queue);
+static int eth_igb_add_5tuple_filter(struct rte_eth_dev *dev,
+ uint16_t index,
+ struct rte_5tuple_filter *filter, uint16_t rx_queue);
+static int eth_igb_remove_5tuple_filter(struct rte_eth_dev *dev,
+ uint16_t index);
+static int eth_igb_get_5tuple_filter(struct rte_eth_dev *dev,
+ uint16_t index,
+ struct rte_5tuple_filter *filter, uint16_t *rx_queue);
+
+/*
+ * Define VF Stats MACRO for Non "cleared on read" register
+ */
+#define UPDATE_VF_STAT(reg, last, cur) \
+{ \
+ u32 latest = E1000_READ_REG(hw, reg); \
+ cur += latest - last; \
+ last = latest; \
+}
+
+
+#define IGB_FC_PAUSE_TIME 0x0680
+#define IGB_LINK_UPDATE_CHECK_TIMEOUT 10 /* 9s */
+#define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
+
+
+#define IGBVF_PMD_NAME "rte_igbvf_pmd" /* PMD name */
+
+static enum e1000_fc_mode igb_fc_setting = e1000_fc_full;
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static struct rte_pci_id pci_id_igb_map[] = {
+
+#define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
+#include "rte_pci_dev_ids.h"
+
+{.device_id = 0},
+};
+
+/*
+ * The set of PCI devices this driver supports (for 82576&I350 VF)
+ */
+static struct rte_pci_id pci_id_igbvf_map[] = {
+
+#define RTE_PCI_DEV_ID_DECL_IGBVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
+#include "rte_pci_dev_ids.h"
+
+{.device_id = 0},
+};
+
+static struct eth_dev_ops eth_igb_ops = {
+ .dev_configure = eth_igb_configure,
+ .dev_start = eth_igb_start,
+ .dev_stop = eth_igb_stop,
+ .dev_close = eth_igb_close,
+ .promiscuous_enable = eth_igb_promiscuous_enable,
+ .promiscuous_disable = eth_igb_promiscuous_disable,
+ .allmulticast_enable = eth_igb_allmulticast_enable,
+ .allmulticast_disable = eth_igb_allmulticast_disable,
+ .link_update = eth_igb_link_update,
+ .stats_get = eth_igb_stats_get,
+ .stats_reset = eth_igb_stats_reset,
+ .dev_infos_get = eth_igb_infos_get,
+ .mtu_set = eth_igb_mtu_set,
+ .vlan_filter_set = eth_igb_vlan_filter_set,
+ .vlan_tpid_set = eth_igb_vlan_tpid_set,
+ .vlan_offload_set = eth_igb_vlan_offload_set,
+ .rx_queue_setup = eth_igb_rx_queue_setup,
+ .rx_queue_release = eth_igb_rx_queue_release,
+ .rx_queue_count = eth_igb_rx_queue_count,
+ .rx_descriptor_done = eth_igb_rx_descriptor_done,
+ .tx_queue_setup = eth_igb_tx_queue_setup,
+ .tx_queue_release = eth_igb_tx_queue_release,
+ .dev_led_on = eth_igb_led_on,
+ .dev_led_off = eth_igb_led_off,
+ .flow_ctrl_get = eth_igb_flow_ctrl_get,
+ .flow_ctrl_set = eth_igb_flow_ctrl_set,
+ .mac_addr_add = eth_igb_rar_set,
+ .mac_addr_remove = eth_igb_rar_clear,
+ .reta_update = eth_igb_rss_reta_update,
+ .reta_query = eth_igb_rss_reta_query,
+ .rss_hash_update = eth_igb_rss_hash_update,
+ .rss_hash_conf_get = eth_igb_rss_hash_conf_get,
+ .add_syn_filter = eth_igb_add_syn_filter,
+ .remove_syn_filter = eth_igb_remove_syn_filter,
+ .get_syn_filter = eth_igb_get_syn_filter,
+ .add_ethertype_filter = eth_igb_add_ethertype_filter,
+ .remove_ethertype_filter = eth_igb_remove_ethertype_filter,
+ .get_ethertype_filter = eth_igb_get_ethertype_filter,
+ .add_2tuple_filter = eth_igb_add_2tuple_filter,
+ .remove_2tuple_filter = eth_igb_remove_2tuple_filter,
+ .get_2tuple_filter = eth_igb_get_2tuple_filter,
+ .add_flex_filter = eth_igb_add_flex_filter,
+ .remove_flex_filter = eth_igb_remove_flex_filter,
+ .get_flex_filter = eth_igb_get_flex_filter,
+ .add_5tuple_filter = eth_igb_add_5tuple_filter,
+ .remove_5tuple_filter = eth_igb_remove_5tuple_filter,
+ .get_5tuple_filter = eth_igb_get_5tuple_filter,
+};
+
+/*
+ * dev_ops for virtual function, bare necessities for basic vf
+ * operation have been implemented
+ */
+static struct eth_dev_ops igbvf_eth_dev_ops = {
+ .dev_configure = igbvf_dev_configure,
+ .dev_start = igbvf_dev_start,
+ .dev_stop = igbvf_dev_stop,
+ .dev_close = igbvf_dev_close,
+ .link_update = eth_igb_link_update,
+ .stats_get = eth_igbvf_stats_get,
+ .stats_reset = eth_igbvf_stats_reset,
+ .vlan_filter_set = igbvf_vlan_filter_set,
+ .dev_infos_get = eth_igbvf_infos_get,
+ .rx_queue_setup = eth_igb_rx_queue_setup,
+ .rx_queue_release = eth_igb_rx_queue_release,
+ .tx_queue_setup = eth_igb_tx_queue_setup,
+ .tx_queue_release = eth_igb_tx_queue_release,
+};
+
+/**
+ * Atomically reads the link status information from global
+ * structure rte_eth_dev.
+ *
+ * @param dev
+ * - Pointer to the structure rte_eth_dev to read from.
+ * - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, negative value.
+ */
+static inline int
+rte_igb_dev_atomic_read_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = link;
+ struct rte_eth_link *src = &(dev->data->dev_link);
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+/**
+ * Atomically writes the link status information into global
+ * structure rte_eth_dev.
+ *
+ * @param dev
+ * - Pointer to the structure rte_eth_dev to read from.
+ * - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, negative value.
+ */
+static inline int
+rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = &(dev->data->dev_link);
+ struct rte_eth_link *src = link;
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+static inline void
+igb_intr_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ E1000_WRITE_REG(hw, E1000_IMS, intr->mask);
+ E1000_WRITE_FLUSH(hw);
+}
+
+static void
+igb_intr_disable(struct e1000_hw *hw)
+{
+ E1000_WRITE_REG(hw, E1000_IMC, ~0);
+ E1000_WRITE_FLUSH(hw);
+}
+
+static inline int32_t
+igb_pf_reset_hw(struct e1000_hw *hw)
+{
+ uint32_t ctrl_ext;
+ int32_t status;
+
+ status = e1000_reset_hw(hw);
+
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH(hw);
+
+ return status;
+}
+
+static void
+igb_identify_hardware(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ hw->vendor_id = dev->pci_dev->id.vendor_id;
+ hw->device_id = dev->pci_dev->id.device_id;
+ hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
+ hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
+
+ e1000_set_mac_type(hw);
+
+ /* need to check if it is a vf device below */
+}
+
+static int
+igb_reset_swfw_lock(struct e1000_hw *hw)
+{
+ int ret_val;
+
+ /*
+ * Do mac ops initialization manually here, since we will need
+ * some function pointers set by this call.
+ */
+ ret_val = e1000_init_mac_params(hw);
+ if (ret_val)
+ return ret_val;
+
+ /*
+ * SMBI lock should not fail in this early stage. If this is the case,
+ * it is due to an improper exit of the application.
+ * So force the release of the faulty lock.
+ */
+ if (e1000_get_hw_semaphore_generic(hw) < 0) {
+ PMD_DRV_LOG(DEBUG, "SMBI lock released");
+ }
+ e1000_put_hw_semaphore_generic(hw);
+
+ if (hw->mac.ops.acquire_swfw_sync != NULL) {
+ uint16_t mask;
+
+ /*
+ * Phy lock should not fail in this early stage. If this is the case,
+ * it is due to an improper exit of the application.
+ * So force the release of the faulty lock.
+ */
+ mask = E1000_SWFW_PHY0_SM << hw->bus.func;
+ if (hw->bus.func > E1000_FUNC_1)
+ mask <<= 2;
+ if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
+ PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
+ hw->bus.func);
+ }
+ hw->mac.ops.release_swfw_sync(hw, mask);
+
+ /*
+ * This one is more tricky since it is common to all ports; but
+ * swfw_sync retries last long enough (1s) to be almost sure that if
+ * lock can not be taken it is due to an improper lock of the
+ * semaphore.
+ */
+ mask = E1000_SWFW_EEP_SM;
+ if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
+ PMD_DRV_LOG(DEBUG, "SWFW common locks released");
+ }
+ hw->mac.ops.release_swfw_sync(hw, mask);
+ }
+
+ return E1000_SUCCESS;
+}
+
+static int
+eth_igb_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
+ struct rte_eth_dev *eth_dev)
+{
+ int error = 0;
+ struct rte_pci_device *pci_dev;
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct e1000_vfta * shadow_vfta =
+ E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
+ uint32_t ctrl_ext;
+
+ pci_dev = eth_dev->pci_dev;
+ eth_dev->dev_ops = &eth_igb_ops;
+ eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
+ eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
+
+ /* for secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX function */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+ if (eth_dev->data->scattered_rx)
+ eth_dev->rx_pkt_burst = &eth_igb_recv_scattered_pkts;
+ return 0;
+ }
+
+ hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
+
+ igb_identify_hardware(eth_dev);
+ if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) {
+ error = -EIO;
+ goto err_late;
+ }
+
+ e1000_get_bus_info(hw);
+
+ /* Reset any pending lock */
+ if (igb_reset_swfw_lock(hw) != E1000_SUCCESS) {
+ error = -EIO;
+ goto err_late;
+ }
+
+ /* Finish initialization */
+ if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) {
+ error = -EIO;
+ goto err_late;
+ }
+
+ hw->mac.autoneg = 1;
+ hw->phy.autoneg_wait_to_complete = 0;
+ hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
+
+ /* Copper options */
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ hw->phy.mdix = 0; /* AUTO_ALL_MODES */
+ hw->phy.disable_polarity_correction = 0;
+ hw->phy.ms_type = e1000_ms_hw_default;
+ }
+
+ /*
+ * Start from a known state, this is important in reading the nvm
+ * and mac from that.
+ */
+ igb_pf_reset_hw(hw);
+
+ /* Make sure we have a good EEPROM before we read from it */
+ if (e1000_validate_nvm_checksum(hw) < 0) {
+ /*
+ * Some PCI-E parts fail the first check due to
+ * the link being in sleep state, call it again,
+ * if it fails a second time its a real issue.
+ */
+ if (e1000_validate_nvm_checksum(hw) < 0) {
+ PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
+ error = -EIO;
+ goto err_late;
+ }
+ }
+
+ /* Read the permanent MAC address out of the EEPROM */
+ if (e1000_read_mac_addr(hw) != 0) {
+ PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
+ error = -EIO;
+ goto err_late;
+ }
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("e1000",
+ ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
+ "store MAC addresses",
+ ETHER_ADDR_LEN * hw->mac.rar_entry_count);
+ error = -ENOMEM;
+ goto err_late;
+ }
+
+ /* Copy the permanent MAC address */
+ ether_addr_copy((struct ether_addr *)hw->mac.addr, &eth_dev->data->mac_addrs[0]);
+
+ /* initialize the vfta */
+ memset(shadow_vfta, 0, sizeof(*shadow_vfta));
+
+ /* Now initialize the hardware */
+ if (igb_hardware_init(hw) != 0) {
+ PMD_INIT_LOG(ERR, "Hardware initialization failed");
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+ error = -ENODEV;
+ goto err_late;
+ }
+ hw->mac.get_link_status = 1;
+
+ /* Indicate SOL/IDER usage */
+ if (e1000_check_reset_block(hw) < 0) {
+ PMD_INIT_LOG(ERR, "PHY reset is blocked due to"
+ "SOL/IDER session");
+ }
+
+ /* initialize PF if max_vfs not zero */
+ igb_pf_host_init(eth_dev);
+
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH(hw);
+
+ PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x",
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
+
+ rte_intr_callback_register(&(pci_dev->intr_handle),
+ eth_igb_interrupt_handler, (void *)eth_dev);
+
+ /* enable uio intr after callback register */
+ rte_intr_enable(&(pci_dev->intr_handle));
+
+ /* enable support intr */
+ igb_intr_enable(eth_dev);
+
+ return 0;
+
+err_late:
+ igb_hw_control_release(hw);
+
+ return (error);
+}
+
+/*
+ * Virtual Function device init
+ */
+static int
+eth_igbvf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
+ struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev;
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ int diag;
+
+ PMD_INIT_FUNC_TRACE();
+
+ eth_dev->dev_ops = &igbvf_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
+ eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
+
+ /* for secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX function */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+ if (eth_dev->data->scattered_rx)
+ eth_dev->rx_pkt_burst = &eth_igb_recv_scattered_pkts;
+ return 0;
+ }
+
+ pci_dev = eth_dev->pci_dev;
+
+ hw->device_id = pci_dev->id.device_id;
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+
+ /* Initialize the shared code (base driver) */
+ diag = e1000_setup_init_funcs(hw, TRUE);
+ if (diag != 0) {
+ PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d",
+ diag);
+ return -EIO;
+ }
+
+ /* init_mailbox_params */
+ hw->mbx.ops.init_params(hw);
+
+ /* Disable the interrupts for VF */
+ igbvf_intr_disable(hw);
+
+ diag = hw->mac.ops.reset_hw(hw);
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN *
+ hw->mac.rar_entry_count, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate %d bytes needed to store MAC "
+ "addresses",
+ ETHER_ADDR_LEN * hw->mac.rar_entry_count);
+ return -ENOMEM;
+ }
+
+ /* Copy the permanent MAC address */
+ ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
+ &eth_dev->data->mac_addrs[0]);
+
+ PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x "
+ "mac.type=%s",
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id, "igb_mac_82576_vf");
+
+ return 0;
+}
+
+static struct eth_driver rte_igb_pmd = {
+ {
+ .name = "rte_igb_pmd",
+ .id_table = pci_id_igb_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ },
+ .eth_dev_init = eth_igb_dev_init,
+ .dev_private_size = sizeof(struct e1000_adapter),
+};
+
+/*
+ * virtual function driver struct
+ */
+static struct eth_driver rte_igbvf_pmd = {
+ {
+ .name = "rte_igbvf_pmd",
+ .id_table = pci_id_igbvf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ },
+ .eth_dev_init = eth_igbvf_dev_init,
+ .dev_private_size = sizeof(struct e1000_adapter),
+};
+
+static int
+rte_igb_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
+{
+ rte_eth_driver_register(&rte_igb_pmd);
+ return 0;
+}
+
+static void
+igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ /* RCTL: enable VLAN filter since VMDq always use VLAN filter */
+ uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl |= E1000_RCTL_VFE;
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+/*
+ * VF Driver initialization routine.
+ * Invoked one at EAL init time.
+ * Register itself as the [Virtual Poll Mode] Driver of PCI IGB devices.
+ */
+static int
+rte_igbvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ rte_eth_driver_register(&rte_igbvf_pmd);
+ return (0);
+}
+
+static int
+eth_igb_configure(struct rte_eth_dev *dev)
+{
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+ intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
+ PMD_INIT_FUNC_TRACE();
+
+ return (0);
+}
+
+static int
+eth_igb_start(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret, i, mask;
+ uint32_t ctrl_ext;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Power up the phy. Needed to make the link go Up */
+ e1000_power_up_phy(hw);
+
+ /*
+ * Packet Buffer Allocation (PBA)
+ * Writing PBA sets the receive portion of the buffer
+ * the remainder is used for the transmit buffer.
+ */
+ if (hw->mac.type == e1000_82575) {
+ uint32_t pba;
+
+ pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
+ E1000_WRITE_REG(hw, E1000_PBA, pba);
+ }
+
+ /* Put the address into the Receive Address Array */
+ e1000_rar_set(hw, hw->mac.addr, 0);
+
+ /* Initialize the hardware */
+ if (igb_hardware_init(hw)) {
+ PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
+ return (-EIO);
+ }
+
+ E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
+
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH(hw);
+
+ /* configure PF module if SRIOV enabled */
+ igb_pf_host_configure(dev);
+
+ /* Configure for OS presence */
+ igb_init_manageability(hw);
+
+ eth_igb_tx_init(dev);
+
+ /* This can fail when allocating mbufs for descriptor rings */
+ ret = eth_igb_rx_init(dev);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
+ igb_dev_clear_queues(dev);
+ return ret;
+ }
+
+ e1000_clear_hw_cntrs_base_generic(hw);
+
+ /*
+ * VLAN Offload Settings
+ */
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
+ ETH_VLAN_EXTEND_MASK;
+ eth_igb_vlan_offload_set(dev, mask);
+
+ if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+ /* Enable VLAN filter since VMDq always use VLAN filter */
+ igb_vmdq_vlan_hw_filter_enable(dev);
+ }
+
+ /*
+ * Configure the Interrupt Moderation register (EITR) with the maximum
+ * possible value (0xFFFF) to minimize "System Partial Write" issued by
+ * spurious [DMA] memory updates of RX and TX ring descriptors.
+ *
+ * With a EITR granularity of 2 microseconds in the 82576, only 7/8
+ * spurious memory updates per second should be expected.
+ * ((65535 * 2) / 1000.1000 ~= 0.131 second).
+ *
+ * Because interrupts are not used at all, the MSI-X is not activated
+ * and interrupt moderation is controlled by EITR[0].
+ *
+ * Note that having [almost] disabled memory updates of RX and TX ring
+ * descriptors through the Interrupt Moderation mechanism, memory
+ * updates of ring descriptors are now moderated by the configurable
+ * value of Write-Back Threshold registers.
+ */
+ if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) ||
+ (hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210) ||
+ (hw->mac.type == e1000_i211)) {
+ uint32_t ivar;
+
+ /* Enable all RX & TX queues in the IVAR registers */
+ ivar = (uint32_t) ((E1000_IVAR_VALID << 16) | E1000_IVAR_VALID);
+ for (i = 0; i < 8; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, i, ivar);
+
+ /* Configure EITR with the maximum possible value (0xFFFF) */
+ E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF);
+ }
+
+ /* Setup link speed and duplex */
+ switch (dev->data->dev_conf.link_speed) {
+ case ETH_LINK_SPEED_AUTONEG:
+ if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
+ hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
+ else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
+ hw->phy.autoneg_advertised = E1000_ALL_HALF_DUPLEX;
+ else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
+ hw->phy.autoneg_advertised = E1000_ALL_FULL_DUPLEX;
+ else
+ goto error_invalid_config;
+ break;
+ case ETH_LINK_SPEED_10:
+ if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
+ hw->phy.autoneg_advertised = E1000_ALL_10_SPEED;
+ else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
+ hw->phy.autoneg_advertised = ADVERTISE_10_HALF;
+ else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
+ hw->phy.autoneg_advertised = ADVERTISE_10_FULL;
+ else
+ goto error_invalid_config;
+ break;
+ case ETH_LINK_SPEED_100:
+ if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
+ hw->phy.autoneg_advertised = E1000_ALL_100_SPEED;
+ else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
+ hw->phy.autoneg_advertised = ADVERTISE_100_HALF;
+ else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
+ hw->phy.autoneg_advertised = ADVERTISE_100_FULL;
+ else
+ goto error_invalid_config;
+ break;
+ case ETH_LINK_SPEED_1000:
+ if ((dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX) ||
+ (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX))
+ hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
+ else
+ goto error_invalid_config;
+ break;
+ case ETH_LINK_SPEED_10000:
+ default:
+ goto error_invalid_config;
+ }
+ e1000_setup_link(hw);
+
+ /* check if lsc interrupt feature is enabled */
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ ret = eth_igb_lsc_interrupt_setup(dev);
+
+ /* resume enabled intr since hw reset */
+ igb_intr_enable(dev);
+
+ PMD_INIT_LOG(DEBUG, "<<");
+
+ return (0);
+
+error_invalid_config:
+ PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u",
+ dev->data->dev_conf.link_speed,
+ dev->data->dev_conf.link_duplex, dev->data->port_id);
+ igb_dev_clear_queues(dev);
+ return (-EINVAL);
+}
+
+/*********************************************************************
+ *
+ * This routine disables all traffic on the adapter by issuing a
+ * global reset on the MAC.
+ *
+ **********************************************************************/
+static void
+eth_igb_stop(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_link link;
+
+ igb_intr_disable(hw);
+ igb_pf_reset_hw(hw);
+ E1000_WRITE_REG(hw, E1000_WUC, 0);
+
+ /* Set bit for Go Link disconnect */
+ if (hw->mac.type >= e1000_82580) {
+ uint32_t phpm_reg;
+
+ phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
+ phpm_reg |= E1000_82580_PM_GO_LINKD;
+ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
+ }
+
+ /* Power down the phy. Needed to make the link go Down */
+ e1000_power_down_phy(hw);
+
+ igb_dev_clear_queues(dev);
+
+ /* clear the recorded link status */
+ memset(&link, 0, sizeof(link));
+ rte_igb_dev_atomic_write_link_status(dev, &link);
+}
+
+static void
+eth_igb_close(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_link link;
+
+ eth_igb_stop(dev);
+ e1000_phy_hw_reset(hw);
+ igb_release_manageability(hw);
+ igb_hw_control_release(hw);
+
+ /* Clear bit for Go Link disconnect */
+ if (hw->mac.type >= e1000_82580) {
+ uint32_t phpm_reg;
+
+ phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
+ phpm_reg &= ~E1000_82580_PM_GO_LINKD;
+ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
+ }
+
+ igb_dev_clear_queues(dev);
+
+ memset(&link, 0, sizeof(link));
+ rte_igb_dev_atomic_write_link_status(dev, &link);
+}
+
+static int
+igb_get_rx_buffer_size(struct e1000_hw *hw)
+{
+ uint32_t rx_buf_size;
+ if (hw->mac.type == e1000_82576) {
+ rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10;
+ } else if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i350) {
+ /* PBS needs to be translated according to a lookup table */
+ rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf);
+ rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size);
+ rx_buf_size = (rx_buf_size << 10);
+ } else if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
+ rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0x3f) << 10;
+ } else {
+ rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10;
+ }
+
+ return rx_buf_size;
+}
+
+/*********************************************************************
+ *
+ * Initialize the hardware
+ *
+ **********************************************************************/
+static int
+igb_hardware_init(struct e1000_hw *hw)
+{
+ uint32_t rx_buf_size;
+ int diag;
+
+ /* Let the firmware know the OS is in control */
+ igb_hw_control_acquire(hw);
+
+ /*
+ * These parameters control the automatic generation (Tx) and
+ * response (Rx) to Ethernet PAUSE frames.
+ * - High water mark should allow for at least two standard size (1518)
+ * frames to be received after sending an XOFF.
+ * - Low water mark works best when it is very near the high water mark.
+ * This allows the receiver to restart by sending XON when it has
+ * drained a bit. Here we use an arbitrary value of 1500 which will
+ * restart after one full frame is pulled from the buffer. There
+ * could be several smaller frames in the buffer and if so they will
+ * not trigger the XON until their total number reduces the buffer
+ * by 1500.
+ * - The pause time is fairly large at 1000 x 512ns = 512 usec.
+ */
+ rx_buf_size = igb_get_rx_buffer_size(hw);
+
+ hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2);
+ hw->fc.low_water = hw->fc.high_water - 1500;
+ hw->fc.pause_time = IGB_FC_PAUSE_TIME;
+ hw->fc.send_xon = 1;
+
+ /* Set Flow control, use the tunable location if sane */
+ if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4))
+ hw->fc.requested_mode = igb_fc_setting;
+ else
+ hw->fc.requested_mode = e1000_fc_none;
+
+ /* Issue a global reset */
+ igb_pf_reset_hw(hw);
+ E1000_WRITE_REG(hw, E1000_WUC, 0);
+
+ diag = e1000_init_hw(hw);
+ if (diag < 0)
+ return (diag);
+
+ E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
+ e1000_get_phy_info(hw);
+ e1000_check_for_link(hw);
+
+ return (0);
+}
+
+/* This function is based on igb_update_stats_counters() in igb/if_igb.c */
+static void
+eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_hw_stats *stats =
+ E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ int pause_frames;
+
+ if(hw->phy.media_type == e1000_media_type_copper ||
+ (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
+ stats->symerrs +=
+ E1000_READ_REG(hw,E1000_SYMERRS);
+ stats->sec += E1000_READ_REG(hw, E1000_SEC);
+ }
+
+ stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
+ stats->mpc += E1000_READ_REG(hw, E1000_MPC);
+ stats->scc += E1000_READ_REG(hw, E1000_SCC);
+ stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
+
+ stats->mcc += E1000_READ_REG(hw, E1000_MCC);
+ stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
+ stats->colc += E1000_READ_REG(hw, E1000_COLC);
+ stats->dc += E1000_READ_REG(hw, E1000_DC);
+ stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
+ stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
+ stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
+ /*
+ ** For watchdog management we need to know if we have been
+ ** paused during the last interval, so capture that here.
+ */
+ pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
+ stats->xoffrxc += pause_frames;
+ stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
+ stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
+ stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
+ stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
+ stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
+ stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
+ stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
+ stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
+ stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
+ stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
+ stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
+ stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
+
+ /* For the 64-bit byte counters the low dword must be read first. */
+ /* Both registers clear on the read of the high dword */
+
+ stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
+ stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
+ stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
+ stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
+
+ stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
+ stats->ruc += E1000_READ_REG(hw, E1000_RUC);
+ stats->rfc += E1000_READ_REG(hw, E1000_RFC);
+ stats->roc += E1000_READ_REG(hw, E1000_ROC);
+ stats->rjc += E1000_READ_REG(hw, E1000_RJC);
+
+ stats->tor += E1000_READ_REG(hw, E1000_TORH);
+ stats->tot += E1000_READ_REG(hw, E1000_TOTH);
+
+ stats->tpr += E1000_READ_REG(hw, E1000_TPR);
+ stats->tpt += E1000_READ_REG(hw, E1000_TPT);
+ stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
+ stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
+ stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
+ stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
+ stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
+ stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
+ stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
+ stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
+
+ /* Interrupt Counts */
+
+ stats->iac += E1000_READ_REG(hw, E1000_IAC);
+ stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
+ stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
+ stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
+ stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
+ stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
+ stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
+ stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
+ stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
+
+ /* Host to Card Statistics */
+
+ stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
+ stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
+ stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
+ stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
+ stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
+ stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
+ stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
+ stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL);
+ stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32);
+ stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL);
+ stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32);
+ stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
+ stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
+ stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
+
+ stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
+ stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
+ stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
+ stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
+ stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
+ stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
+
+ if (rte_stats == NULL)
+ return;
+
+ /* Rx Errors */
+ rte_stats->ibadcrc = stats->crcerrs;
+ rte_stats->ibadlen = stats->rlec + stats->ruc + stats->roc;
+ rte_stats->imissed = stats->mpc;
+ rte_stats->ierrors = rte_stats->ibadcrc +
+ rte_stats->ibadlen +
+ rte_stats->imissed +
+ stats->rxerrc + stats->algnerrc + stats->cexterr;
+
+ /* Tx Errors */
+ rte_stats->oerrors = stats->ecol + stats->latecol;
+
+ /* XON/XOFF pause frames */
+ rte_stats->tx_pause_xon = stats->xontxc;
+ rte_stats->rx_pause_xon = stats->xonrxc;
+ rte_stats->tx_pause_xoff = stats->xofftxc;
+ rte_stats->rx_pause_xoff = stats->xoffrxc;
+
+ rte_stats->ipackets = stats->gprc;
+ rte_stats->opackets = stats->gptc;
+ rte_stats->ibytes = stats->gorc;
+ rte_stats->obytes = stats->gotc;
+}
+
+static void
+eth_igb_stats_reset(struct rte_eth_dev *dev)
+{
+ struct e1000_hw_stats *hw_stats =
+ E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ /* HW registers are cleared on read */
+ eth_igb_stats_get(dev, NULL);
+
+ /* Reset software totals */
+ memset(hw_stats, 0, sizeof(*hw_stats));
+}
+
+static void
+eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
+ E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ /* Good Rx packets, include VF loopback */
+ UPDATE_VF_STAT(E1000_VFGPRC,
+ hw_stats->last_gprc, hw_stats->gprc);
+
+ /* Good Rx octets, include VF loopback */
+ UPDATE_VF_STAT(E1000_VFGORC,
+ hw_stats->last_gorc, hw_stats->gorc);
+
+ /* Good Tx packets, include VF loopback */
+ UPDATE_VF_STAT(E1000_VFGPTC,
+ hw_stats->last_gptc, hw_stats->gptc);
+
+ /* Good Tx octets, include VF loopback */
+ UPDATE_VF_STAT(E1000_VFGOTC,
+ hw_stats->last_gotc, hw_stats->gotc);
+
+ /* Rx Multicst packets */
+ UPDATE_VF_STAT(E1000_VFMPRC,
+ hw_stats->last_mprc, hw_stats->mprc);
+
+ /* Good Rx loopback packets */
+ UPDATE_VF_STAT(E1000_VFGPRLBC,
+ hw_stats->last_gprlbc, hw_stats->gprlbc);
+
+ /* Good Rx loopback octets */
+ UPDATE_VF_STAT(E1000_VFGORLBC,
+ hw_stats->last_gorlbc, hw_stats->gorlbc);
+
+ /* Good Tx loopback packets */
+ UPDATE_VF_STAT(E1000_VFGPTLBC,
+ hw_stats->last_gptlbc, hw_stats->gptlbc);
+
+ /* Good Tx loopback octets */
+ UPDATE_VF_STAT(E1000_VFGOTLBC,
+ hw_stats->last_gotlbc, hw_stats->gotlbc);
+
+ if (rte_stats == NULL)
+ return;
+
+ memset(rte_stats, 0, sizeof(*rte_stats));
+ rte_stats->ipackets = hw_stats->gprc;
+ rte_stats->ibytes = hw_stats->gorc;
+ rte_stats->opackets = hw_stats->gptc;
+ rte_stats->obytes = hw_stats->gotc;
+ rte_stats->imcasts = hw_stats->mprc;
+ rte_stats->ilbpackets = hw_stats->gprlbc;
+ rte_stats->ilbbytes = hw_stats->gorlbc;
+ rte_stats->olbpackets = hw_stats->gptlbc;
+ rte_stats->olbbytes = hw_stats->gotlbc;
+
+}
+
+static void
+eth_igbvf_stats_reset(struct rte_eth_dev *dev)
+{
+ struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
+ E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ /* Sync HW register to the last stats */
+ eth_igbvf_stats_get(dev, NULL);
+
+ /* reset HW current stats*/
+ memset(&hw_stats->gprc, 0, sizeof(*hw_stats) -
+ offsetof(struct e1000_vf_stats, gprc));
+
+}
+
+static void
+eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
+ dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
+ dev_info->max_mac_addrs = hw->mac.rar_entry_count;
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM;
+
+ switch (hw->mac.type) {
+ case e1000_82575:
+ dev_info->max_rx_queues = 4;
+ dev_info->max_tx_queues = 4;
+ dev_info->max_vmdq_pools = 0;
+ break;
+
+ case e1000_82576:
+ dev_info->max_rx_queues = 16;
+ dev_info->max_tx_queues = 16;
+ dev_info->max_vmdq_pools = ETH_8_POOLS;
+ dev_info->vmdq_queue_num = 16;
+ break;
+
+ case e1000_82580:
+ dev_info->max_rx_queues = 8;
+ dev_info->max_tx_queues = 8;
+ dev_info->max_vmdq_pools = ETH_8_POOLS;
+ dev_info->vmdq_queue_num = 8;
+ break;
+
+ case e1000_i350:
+ dev_info->max_rx_queues = 8;
+ dev_info->max_tx_queues = 8;
+ dev_info->max_vmdq_pools = ETH_8_POOLS;
+ dev_info->vmdq_queue_num = 8;
+ break;
+
+ case e1000_i354:
+ dev_info->max_rx_queues = 8;
+ dev_info->max_tx_queues = 8;
+ break;
+
+ case e1000_i210:
+ dev_info->max_rx_queues = 4;
+ dev_info->max_tx_queues = 4;
+ dev_info->max_vmdq_pools = 0;
+ break;
+
+ case e1000_i211:
+ dev_info->max_rx_queues = 2;
+ dev_info->max_tx_queues = 2;
+ dev_info->max_vmdq_pools = 0;
+ break;
+
+ default:
+ /* Should not happen */
+ break;
+ }
+ dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = IGB_DEFAULT_RX_PTHRESH,
+ .hthresh = IGB_DEFAULT_RX_HTHRESH,
+ .wthresh = IGB_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = IGB_DEFAULT_TX_PTHRESH,
+ .hthresh = IGB_DEFAULT_TX_HTHRESH,
+ .wthresh = IGB_DEFAULT_TX_WTHRESH,
+ },
+ .txq_flags = 0,
+ };
+}
+
+static void
+eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
+ dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
+ dev_info->max_mac_addrs = hw->mac.rar_entry_count;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM;
+ switch (hw->mac.type) {
+ case e1000_vfadapt:
+ dev_info->max_rx_queues = 2;
+ dev_info->max_tx_queues = 2;
+ break;
+ case e1000_vfadapt_i350:
+ dev_info->max_rx_queues = 1;
+ dev_info->max_tx_queues = 1;
+ break;
+ default:
+ /* Should not happen */
+ break;
+ }
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = IGB_DEFAULT_RX_PTHRESH,
+ .hthresh = IGB_DEFAULT_RX_HTHRESH,
+ .wthresh = IGB_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = IGB_DEFAULT_TX_PTHRESH,
+ .hthresh = IGB_DEFAULT_TX_HTHRESH,
+ .wthresh = IGB_DEFAULT_TX_WTHRESH,
+ },
+ .txq_flags = 0,
+ };
+}
+
+/* return 0 means link status changed, -1 means not changed */
+static int
+eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_link link, old;
+ int link_check, count;
+
+ link_check = 0;
+ hw->mac.get_link_status = 1;
+
+ /* possible wait-to-complete in up to 9 seconds */
+ for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) {
+ /* Read the real link status */
+ switch (hw->phy.media_type) {
+ case e1000_media_type_copper:
+ /* Do the work to read phy */
+ e1000_check_for_link(hw);
+ link_check = !hw->mac.get_link_status;
+ break;
+
+ case e1000_media_type_fiber:
+ e1000_check_for_link(hw);
+ link_check = (E1000_READ_REG(hw, E1000_STATUS) &
+ E1000_STATUS_LU);
+ break;
+
+ case e1000_media_type_internal_serdes:
+ e1000_check_for_link(hw);
+ link_check = hw->mac.serdes_has_link;
+ break;
+
+ /* VF device is type_unknown */
+ case e1000_media_type_unknown:
+ eth_igbvf_link_update(hw);
+ link_check = !hw->mac.get_link_status;
+ break;
+
+ default:
+ break;
+ }
+ if (link_check || wait_to_complete == 0)
+ break;
+ rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL);
+ }
+ memset(&link, 0, sizeof(link));
+ rte_igb_dev_atomic_read_link_status(dev, &link);
+ old = link;
+
+ /* Now we check if a transition has happened */
+ if (link_check) {
+ hw->mac.ops.get_link_up_info(hw, &link.link_speed,
+ &link.link_duplex);
+ link.link_status = 1;
+ } else if (!link_check) {
+ link.link_speed = 0;
+ link.link_duplex = 0;
+ link.link_status = 0;
+ }
+ rte_igb_dev_atomic_write_link_status(dev, &link);
+
+ /* not changed */
+ if (old.link_status == link.link_status)
+ return -1;
+
+ /* changed */
+ return 0;
+}
+
+/*
+ * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means
+ * that the driver is loaded.
+ */
+static void
+igb_hw_control_acquire(struct e1000_hw *hw)
+{
+ uint32_t ctrl_ext;
+
+ /* Let firmware know the driver has taken over */
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+}
+
+/*
+ * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded.
+ */
+static void
+igb_hw_control_release(struct e1000_hw *hw)
+{
+ uint32_t ctrl_ext;
+
+ /* Let firmware taken over control of h/w */
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT,
+ ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+}
+
+/*
+ * Bit of a misnomer, what this really means is
+ * to enable OS management of the system... aka
+ * to disable special hardware management features.
+ */
+static void
+igb_init_manageability(struct e1000_hw *hw)
+{
+ if (e1000_enable_mng_pass_thru(hw)) {
+ uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H);
+ uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
+
+ /* disable hardware interception of ARP */
+ manc &= ~(E1000_MANC_ARP_EN);
+
+ /* enable receiving management packets to the host */
+ manc |= E1000_MANC_EN_MNG2HOST;
+ manc2h |= 1 << 5; /* Mng Port 623 */
+ manc2h |= 1 << 6; /* Mng Port 664 */
+ E1000_WRITE_REG(hw, E1000_MANC2H, manc2h);
+ E1000_WRITE_REG(hw, E1000_MANC, manc);
+ }
+}
+
+static void
+igb_release_manageability(struct e1000_hw *hw)
+{
+ if (e1000_enable_mng_pass_thru(hw)) {
+ uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
+
+ manc |= E1000_MANC_ARP_EN;
+ manc &= ~E1000_MANC_EN_MNG2HOST;
+
+ E1000_WRITE_REG(hw, E1000_MANC, manc);
+ }
+}
+
+static void
+eth_igb_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t rctl;
+
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+static void
+eth_igb_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t rctl;
+
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl &= (~E1000_RCTL_UPE);
+ if (dev->data->all_multicast == 1)
+ rctl |= E1000_RCTL_MPE;
+ else
+ rctl &= (~E1000_RCTL_MPE);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+static void
+eth_igb_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t rctl;
+
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl |= E1000_RCTL_MPE;
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+static void
+eth_igb_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t rctl;
+
+ if (dev->data->promiscuous == 1)
+ return; /* must remain in all_multicast mode */
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl &= (~E1000_RCTL_MPE);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+static int
+eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vfta * shadow_vfta =
+ E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+ uint32_t vfta;
+ uint32_t vid_idx;
+ uint32_t vid_bit;
+
+ vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) &
+ E1000_VFTA_ENTRY_MASK);
+ vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
+ vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
+ if (on)
+ vfta |= vid_bit;
+ else
+ vfta &= ~vid_bit;
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
+
+ /* update local VFTA copy */
+ shadow_vfta->vfta[vid_idx] = vfta;
+
+ return 0;
+}
+
+static void
+eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg = ETHER_TYPE_VLAN ;
+
+ reg |= (tpid << 16);
+ E1000_WRITE_REG(hw, E1000_VET, reg);
+}
+
+static void
+igb_vlan_hw_filter_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+
+ /* Filter Table Disable */
+ reg = E1000_READ_REG(hw, E1000_RCTL);
+ reg &= ~E1000_RCTL_CFIEN;
+ reg &= ~E1000_RCTL_VFE;
+ E1000_WRITE_REG(hw, E1000_RCTL, reg);
+}
+
+static void
+igb_vlan_hw_filter_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vfta * shadow_vfta =
+ E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+ uint32_t reg;
+ int i;
+
+ /* Filter Table Enable, CFI not used for packet acceptance */
+ reg = E1000_READ_REG(hw, E1000_RCTL);
+ reg &= ~E1000_RCTL_CFIEN;
+ reg |= E1000_RCTL_VFE;
+ E1000_WRITE_REG(hw, E1000_RCTL, reg);
+
+ /* restore VFTA table */
+ for (i = 0; i < IGB_VFTA_SIZE; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]);
+}
+
+static void
+igb_vlan_hw_strip_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+
+ /* VLAN Mode Disable */
+ reg = E1000_READ_REG(hw, E1000_CTRL);
+ reg &= ~E1000_CTRL_VME;
+ E1000_WRITE_REG(hw, E1000_CTRL, reg);
+}
+
+static void
+igb_vlan_hw_strip_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+
+ /* VLAN Mode Enable */
+ reg = E1000_READ_REG(hw, E1000_CTRL);
+ reg |= E1000_CTRL_VME;
+ E1000_WRITE_REG(hw, E1000_CTRL, reg);
+}
+
+static void
+igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+
+ /* CTRL_EXT: Extended VLAN */
+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ reg &= ~E1000_CTRL_EXT_EXTEND_VLAN;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+
+ /* Update maximum packet length */
+ if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+ E1000_WRITE_REG(hw, E1000_RLPML,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ VLAN_TAG_SIZE);
+}
+
+static void
+igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+
+ /* CTRL_EXT: Extended VLAN */
+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ reg |= E1000_CTRL_EXT_EXTEND_VLAN;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+
+ /* Update maximum packet length */
+ if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+ E1000_WRITE_REG(hw, E1000_RLPML,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ 2 * VLAN_TAG_SIZE);
+}
+
+static void
+eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ if(mask & ETH_VLAN_STRIP_MASK){
+ if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ igb_vlan_hw_strip_enable(dev);
+ else
+ igb_vlan_hw_strip_disable(dev);
+ }
+
+ if(mask & ETH_VLAN_FILTER_MASK){
+ if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ igb_vlan_hw_filter_enable(dev);
+ else
+ igb_vlan_hw_filter_disable(dev);
+ }
+
+ if(mask & ETH_VLAN_EXTEND_MASK){
+ if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+ igb_vlan_hw_extend_enable(dev);
+ else
+ igb_vlan_hw_extend_disable(dev);
+ }
+}
+
+
+/**
+ * It enables the interrupt mask and then enable the interrupt.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev)
+{
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ intr->mask |= E1000_ICR_LSC;
+
+ return 0;
+}
+
+/*
+ * It reads ICR and gets interrupt causes, check it and set a bit flag
+ * to update link status.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
+{
+ uint32_t icr;
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ igb_intr_disable(hw);
+
+ /* read-on-clear nic registers here */
+ icr = E1000_READ_REG(hw, E1000_ICR);
+
+ intr->flags = 0;
+ if (icr & E1000_ICR_LSC) {
+ intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
+ }
+
+ if (icr & E1000_ICR_VMMB)
+ intr->flags |= E1000_FLAG_MAILBOX;
+
+ return 0;
+}
+
+/*
+ * It executes link_update after knowing an interrupt is prsent.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_interrupt_action(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ uint32_t tctl, rctl;
+ struct rte_eth_link link;
+ int ret;
+
+ if (intr->flags & E1000_FLAG_MAILBOX) {
+ igb_pf_mbx_process(dev);
+ intr->flags &= ~E1000_FLAG_MAILBOX;
+ }
+
+ igb_intr_enable(dev);
+ rte_intr_enable(&(dev->pci_dev->intr_handle));
+
+ if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) {
+ intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
+
+ /* set get_link_status to check register later */
+ hw->mac.get_link_status = 1;
+ ret = eth_igb_link_update(dev, 0);
+
+ /* check if link has changed */
+ if (ret < 0)
+ return 0;
+
+ memset(&link, 0, sizeof(link));
+ rte_igb_dev_atomic_read_link_status(dev, &link);
+ if (link.link_status) {
+ PMD_INIT_LOG(INFO,
+ " Port %d: Link Up - speed %u Mbps - %s",
+ dev->data->port_id,
+ (unsigned)link.link_speed,
+ link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+ "full-duplex" : "half-duplex");
+ } else {
+ PMD_INIT_LOG(INFO, " Port %d: Link Down",
+ dev->data->port_id);
+ }
+ PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
+ dev->pci_dev->addr.domain,
+ dev->pci_dev->addr.bus,
+ dev->pci_dev->addr.devid,
+ dev->pci_dev->addr.function);
+ tctl = E1000_READ_REG(hw, E1000_TCTL);
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ if (link.link_status) {
+ /* enable Tx/Rx */
+ tctl |= E1000_TCTL_EN;
+ rctl |= E1000_RCTL_EN;
+ } else {
+ /* disable Tx/Rx */
+ tctl &= ~E1000_TCTL_EN;
+ rctl &= ~E1000_RCTL_EN;
+ }
+ E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ E1000_WRITE_FLUSH(hw);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+ }
+
+ return 0;
+}
+
+/**
+ * Interrupt handler which shall be registered at first.
+ *
+ * @param handle
+ * Pointer to interrupt handle.
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ * void
+ */
+static void
+eth_igb_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
+ void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+
+ eth_igb_interrupt_get_status(dev);
+ eth_igb_interrupt_action(dev);
+}
+
+static int
+eth_igb_led_on(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ return (e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
+}
+
+static int
+eth_igb_led_off(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ return (e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
+}
+
+static int
+eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct e1000_hw *hw;
+ uint32_t ctrl;
+ int tx_pause;
+ int rx_pause;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ fc_conf->pause_time = hw->fc.pause_time;
+ fc_conf->high_water = hw->fc.high_water;
+ fc_conf->low_water = hw->fc.low_water;
+ fc_conf->send_xon = hw->fc.send_xon;
+ fc_conf->autoneg = hw->mac.autoneg;
+
+ /*
+ * Return rx_pause and tx_pause status according to actual setting of
+ * the TFCE and RFCE bits in the CTRL register.
+ */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ if (ctrl & E1000_CTRL_TFCE)
+ tx_pause = 1;
+ else
+ tx_pause = 0;
+
+ if (ctrl & E1000_CTRL_RFCE)
+ rx_pause = 1;
+ else
+ rx_pause = 0;
+
+ if (rx_pause && tx_pause)
+ fc_conf->mode = RTE_FC_FULL;
+ else if (rx_pause)
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ else if (tx_pause)
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ else
+ fc_conf->mode = RTE_FC_NONE;
+
+ return 0;
+}
+
+static int
+eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct e1000_hw *hw;
+ int err;
+ enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = {
+ e1000_fc_none,
+ e1000_fc_rx_pause,
+ e1000_fc_tx_pause,
+ e1000_fc_full
+ };
+ uint32_t rx_buf_size;
+ uint32_t max_high_water;
+ uint32_t rctl;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (fc_conf->autoneg != hw->mac.autoneg)
+ return -ENOTSUP;
+ rx_buf_size = igb_get_rx_buffer_size(hw);
+ PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
+
+ /* At least reserve one Ethernet frame for watermark */
+ max_high_water = rx_buf_size - ETHER_MAX_LEN;
+ if ((fc_conf->high_water > max_high_water) ||
+ (fc_conf->high_water < fc_conf->low_water)) {
+ PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
+ PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water);
+ return (-EINVAL);
+ }
+
+ hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
+ hw->fc.pause_time = fc_conf->pause_time;
+ hw->fc.high_water = fc_conf->high_water;
+ hw->fc.low_water = fc_conf->low_water;
+ hw->fc.send_xon = fc_conf->send_xon;
+
+ err = e1000_setup_link_generic(hw);
+ if (err == E1000_SUCCESS) {
+
+ /* check if we want to forward MAC frames - driver doesn't have native
+ * capability to do that, so we'll write the registers ourselves */
+
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+
+ /* set or clear MFLCN.PMCF bit depending on configuration */
+ if (fc_conf->mac_ctrl_frame_fwd != 0)
+ rctl |= E1000_RCTL_PMCF;
+ else
+ rctl &= ~E1000_RCTL_PMCF;
+
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ E1000_WRITE_FLUSH(hw);
+
+ return 0;
+ }
+
+ PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
+ return (-EIO);
+}
+
+#define E1000_RAH_POOLSEL_SHIFT (18)
+static void
+eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint32_t index, __rte_unused uint32_t pool)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t rah;
+
+ e1000_rar_set(hw, mac_addr->addr_bytes, index);
+ rah = E1000_READ_REG(hw, E1000_RAH(index));
+ rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool));
+ E1000_WRITE_REG(hw, E1000_RAH(index), rah);
+}
+
+static void
+eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
+{
+ uint8_t addr[ETHER_ADDR_LEN];
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ memset(addr, 0, sizeof(addr));
+
+ e1000_rar_set(hw, addr, index);
+}
+
+/*
+ * Virtual Function operations
+ */
+static void
+igbvf_intr_disable(struct e1000_hw *hw)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ /* Clear interrupt mask to stop from interrupts being generated */
+ E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF);
+
+ E1000_WRITE_FLUSH(hw);
+}
+
+static void
+igbvf_stop_adapter(struct rte_eth_dev *dev)
+{
+ u32 reg_val;
+ u16 i;
+ struct rte_eth_dev_info dev_info;
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ memset(&dev_info, 0, sizeof(dev_info));
+ eth_igbvf_infos_get(dev, &dev_info);
+
+ /* Clear interrupt mask to stop from interrupts being generated */
+ igbvf_intr_disable(hw);
+
+ /* Clear any pending interrupts, flush previous writes */
+ E1000_READ_REG(hw, E1000_EICR);
+
+ /* Disable the transmit unit. Each queue must be disabled. */
+ for (i = 0; i < dev_info.max_tx_queues; i++)
+ E1000_WRITE_REG(hw, E1000_TXDCTL(i), E1000_TXDCTL_SWFLSH);
+
+ /* Disable the receive unit by stopping each queue */
+ for (i = 0; i < dev_info.max_rx_queues; i++) {
+ reg_val = E1000_READ_REG(hw, E1000_RXDCTL(i));
+ reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE;
+ E1000_WRITE_REG(hw, E1000_RXDCTL(i), reg_val);
+ while (E1000_READ_REG(hw, E1000_RXDCTL(i)) & E1000_RXDCTL_QUEUE_ENABLE)
+ ;
+ }
+
+ /* flush all queues disables */
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(2);
+}
+
+static int eth_igbvf_link_update(struct e1000_hw *hw)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ struct e1000_mac_info *mac = &hw->mac;
+ int ret_val = E1000_SUCCESS;
+
+ PMD_INIT_LOG(DEBUG, "e1000_check_for_link_vf");
+
+ /*
+ * We only want to run this if there has been a rst asserted.
+ * in this case that could mean a link change, device reset,
+ * or a virtual function reset
+ */
+
+ /* If we were hit with a reset or timeout drop the link */
+ if (!e1000_check_for_rst(hw, 0) || !mbx->timeout)
+ mac->get_link_status = TRUE;
+
+ if (!mac->get_link_status)
+ goto out;
+
+ /* if link status is down no point in checking to see if pf is up */
+ if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))
+ goto out;
+
+ /* if we passed all the tests above then the link is up and we no
+ * longer need to check for link */
+ mac->get_link_status = FALSE;
+
+out:
+ return ret_val;
+}
+
+
+static int
+igbvf_dev_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf* conf = &dev->data->dev_conf;
+
+ PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
+ dev->data->port_id);
+
+ /*
+ * VF has no ability to enable/disable HW CRC
+ * Keep the persistent behavior the same as Host PF
+ */
+#ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
+ if (!conf->rxmode.hw_strip_crc) {
+ PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip");
+ conf->rxmode.hw_strip_crc = 1;
+ }
+#else
+ if (conf->rxmode.hw_strip_crc) {
+ PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip");
+ conf->rxmode.hw_strip_crc = 0;
+ }
+#endif
+
+ return 0;
+}
+
+static int
+igbvf_dev_start(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ hw->mac.ops.reset_hw(hw);
+
+ /* Set all vfta */
+ igbvf_set_vfta_all(dev,1);
+
+ eth_igbvf_tx_init(dev);
+
+ /* This can fail when allocating mbufs for descriptor rings */
+ ret = eth_igbvf_rx_init(dev);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
+ igb_dev_clear_queues(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+igbvf_dev_stop(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ igbvf_stop_adapter(dev);
+
+ /*
+ * Clear what we set, but we still keep shadow_vfta to
+ * restore after device starts
+ */
+ igbvf_set_vfta_all(dev,0);
+
+ igb_dev_clear_queues(dev);
+}
+
+static void
+igbvf_dev_close(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ e1000_reset_hw(hw);
+
+ igbvf_dev_stop(dev);
+}
+
+static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ uint32_t msgbuf[2];
+
+ /* After set vlan, vlan strip will also be enabled in igb driver*/
+ msgbuf[0] = E1000_VF_SET_VLAN;
+ msgbuf[1] = vid;
+ /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
+ if (on)
+ msgbuf[0] |= E1000_VF_SET_VLAN_ADD;
+
+ return (mbx->ops.write_posted(hw, msgbuf, 2, 0));
+}
+
+static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vfta * shadow_vfta =
+ E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+ int i = 0, j = 0, vfta = 0, mask = 1;
+
+ for (i = 0; i < IGB_VFTA_SIZE; i++){
+ vfta = shadow_vfta->vfta[i];
+ if(vfta){
+ mask = 1;
+ for (j = 0; j < 32; j++){
+ if(vfta & mask)
+ igbvf_set_vfta(hw,
+ (uint16_t)((i<<5)+j), on);
+ mask<<=1;
+ }
+ }
+ }
+
+}
+
+static int
+igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vfta * shadow_vfta =
+ E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+ uint32_t vid_idx = 0;
+ uint32_t vid_bit = 0;
+ int ret = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/
+ ret = igbvf_set_vfta(hw, vlan_id, !!on);
+ if(ret){
+ PMD_INIT_LOG(ERR, "Unable to set VF vlan");
+ return ret;
+ }
+ vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
+ vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
+
+ /*Save what we set and retore it after device reset*/
+ if (on)
+ shadow_vfta->vfta[vid_idx] |= vid_bit;
+ else
+ shadow_vfta->vfta[vid_idx] &= ~vid_bit;
+
+ return 0;
+}
+
+static int
+eth_igb_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ uint8_t i, j, mask;
+ uint32_t reta, r;
+ uint16_t idx, shift;
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (reta_size != ETH_RSS_RETA_SIZE_128) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+ IGB_4_BIT_MASK);
+ if (!mask)
+ continue;
+ if (mask == IGB_4_BIT_MASK)
+ r = 0;
+ else
+ r = E1000_READ_REG(hw, E1000_RETA(i >> 2));
+ for (j = 0, reta = 0; j < IGB_4_BIT_WIDTH; j++) {
+ if (mask & (0x1 << j))
+ reta |= reta_conf[idx].reta[shift + j] <<
+ (CHAR_BIT * j);
+ else
+ reta |= r & (IGB_8_BIT_MASK << (CHAR_BIT * j));
+ }
+ E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta);
+ }
+
+ return 0;
+}
+
+static int
+eth_igb_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ uint8_t i, j, mask;
+ uint32_t reta;
+ uint16_t idx, shift;
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (reta_size != ETH_RSS_RETA_SIZE_128) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+ IGB_4_BIT_MASK);
+ if (!mask)
+ continue;
+ reta = E1000_READ_REG(hw, E1000_RETA(i >> 2));
+ for (j = 0; j < IGB_4_BIT_WIDTH; j++) {
+ if (mask & (0x1 << j))
+ reta_conf[idx].reta[shift + j] =
+ ((reta >> (CHAR_BIT * j)) &
+ IGB_8_BIT_MASK);
+ }
+ }
+
+ return 0;
+}
+
+#define MAC_TYPE_FILTER_SUP(type) do {\
+ if ((type) != e1000_82580 && (type) != e1000_i350 &&\
+ (type) != e1000_82576)\
+ return -ENOSYS;\
+} while (0)
+
+/*
+ * add the syn filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * filter: ponter to the filter that will be added.
+ * rx_queue: the queue id the filter assigned to.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_add_syn_filter(struct rte_eth_dev *dev,
+ struct rte_syn_filter *filter, uint16_t rx_queue)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t synqf, rfctl;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ if (rx_queue >= IGB_MAX_RX_QUEUE_NUM)
+ return -EINVAL;
+
+ synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
+ if (synqf & E1000_SYN_FILTER_ENABLE)
+ return -EINVAL;
+
+ synqf = (uint32_t)(((rx_queue << E1000_SYN_FILTER_QUEUE_SHIFT) &
+ E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE);
+
+ rfctl = E1000_READ_REG(hw, E1000_RFCTL);
+ if (filter->hig_pri)
+ rfctl |= E1000_RFCTL_SYNQFP;
+ else
+ rfctl &= ~E1000_RFCTL_SYNQFP;
+
+ E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
+ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
+ return 0;
+}
+
+/*
+ * remove the syn filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_remove_syn_filter(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ E1000_WRITE_REG(hw, E1000_SYNQF(0), 0);
+ return 0;
+}
+
+/*
+ * get the syn filter's info
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * filter: ponter to the filter that returns.
+ * *rx_queue: pointer to the queue id the filter assigned to.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_get_syn_filter(struct rte_eth_dev *dev,
+ struct rte_syn_filter *filter, uint16_t *rx_queue)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t synqf, rfctl;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+ synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
+ if (synqf & E1000_SYN_FILTER_ENABLE) {
+ rfctl = E1000_READ_REG(hw, E1000_RFCTL);
+ filter->hig_pri = (rfctl & E1000_RFCTL_SYNQFP) ? 1 : 0;
+ *rx_queue = (uint8_t)((synqf & E1000_SYN_FILTER_QUEUE) >>
+ E1000_SYN_FILTER_QUEUE_SHIFT);
+ return 0;
+ }
+ return -ENOENT;
+}
+
+/*
+ * add an ethertype filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: ponter to the filter that will be added.
+ * rx_queue: the queue id the filter assigned to.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_add_ethertype_filter(struct rte_eth_dev *dev, uint16_t index,
+ struct rte_ethertype_filter *filter, uint16_t rx_queue)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t etqf;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ if (index >= E1000_MAX_ETQF_FILTERS || rx_queue >= IGB_MAX_RX_QUEUE_NUM)
+ return -EINVAL;
+
+ etqf = E1000_READ_REG(hw, E1000_ETQF(index));
+ if (etqf & E1000_ETQF_FILTER_ENABLE)
+ return -EINVAL; /* filter index is in use. */
+ else
+ etqf = 0;
+
+ etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE;
+ etqf |= (uint32_t)(filter->ethertype & E1000_ETQF_ETHERTYPE);
+ etqf |= rx_queue << E1000_ETQF_QUEUE_SHIFT;
+
+ if (filter->priority_en) {
+ PMD_INIT_LOG(ERR, "vlan and priority (%d) is not supported"
+ " in E1000.", filter->priority);
+ return -EINVAL;
+ }
+
+ E1000_WRITE_REG(hw, E1000_ETQF(index), etqf);
+ return 0;
+}
+
+/*
+ * remove an ethertype filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_remove_ethertype_filter(struct rte_eth_dev *dev, uint16_t index)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ if (index >= E1000_MAX_ETQF_FILTERS)
+ return -EINVAL;
+
+ E1000_WRITE_REG(hw, E1000_ETQF(index), 0);
+ return 0;
+}
+
+/*
+ * get an ethertype filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: ponter to the filter that will be gotten.
+ * *rx_queue: the ponited of the queue id the filter assigned to.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_get_ethertype_filter(struct rte_eth_dev *dev, uint16_t index,
+ struct rte_ethertype_filter *filter, uint16_t *rx_queue)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t etqf;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ if (index >= E1000_MAX_ETQF_FILTERS)
+ return -EINVAL;
+
+ etqf = E1000_READ_REG(hw, E1000_ETQF(index));
+ if (etqf & E1000_ETQF_FILTER_ENABLE) {
+ filter->ethertype = etqf & E1000_ETQF_ETHERTYPE;
+ filter->priority_en = 0;
+ *rx_queue = (etqf & E1000_ETQF_QUEUE) >> E1000_ETQF_QUEUE_SHIFT;
+ return 0;
+ }
+ return -ENOENT;
+}
+
+#define MAC_TYPE_FILTER_SUP_EXT(type) do {\
+ if ((type) != e1000_82580 && (type) != e1000_i350)\
+ return -ENOSYS; \
+} while (0)
+
+/*
+ * add a 2tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: ponter to the filter that will be added.
+ * rx_queue: the queue id the filter assigned to.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_add_2tuple_filter(struct rte_eth_dev *dev, uint16_t index,
+ struct rte_2tuple_filter *filter, uint16_t rx_queue)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t ttqf, imir = 0;
+ uint32_t imir_ext = 0;
+
+ MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
+
+ if (index >= E1000_MAX_TTQF_FILTERS ||
+ rx_queue >= IGB_MAX_RX_QUEUE_NUM ||
+ filter->priority > E1000_2TUPLE_MAX_PRI)
+ return -EINVAL; /* filter index is out of range. */
+ if (filter->tcp_flags > TCP_FLAG_ALL)
+ return -EINVAL; /* flags is invalid. */
+
+ ttqf = E1000_READ_REG(hw, E1000_TTQF(index));
+ if (ttqf & E1000_TTQF_QUEUE_ENABLE)
+ return -EINVAL; /* filter index is in use. */
+
+ imir = (uint32_t)(filter->dst_port & E1000_IMIR_DSTPORT);
+ if (filter->dst_port_mask == 1) /* 1b means not compare. */
+ imir |= E1000_IMIR_PORT_BP;
+ else
+ imir &= ~E1000_IMIR_PORT_BP;
+
+ imir |= filter->priority << E1000_IMIR_PRIORITY_SHIFT;
+
+ ttqf = 0;
+ ttqf |= E1000_TTQF_QUEUE_ENABLE;
+ ttqf |= (uint32_t)(rx_queue << E1000_TTQF_QUEUE_SHIFT);
+ ttqf |= (uint32_t)(filter->protocol & E1000_TTQF_PROTOCOL_MASK);
+ if (filter->protocol_mask == 1)
+ ttqf |= E1000_TTQF_MASK_ENABLE;
+ else
+ ttqf &= ~E1000_TTQF_MASK_ENABLE;
+
+ imir_ext |= E1000_IMIR_EXT_SIZE_BP;
+ /* tcp flags bits setting. */
+ if (filter->tcp_flags & TCP_FLAG_ALL) {
+ if (filter->tcp_flags & TCP_UGR_FLAG)
+ imir_ext |= E1000_IMIR_EXT_CTRL_UGR;
+ if (filter->tcp_flags & TCP_ACK_FLAG)
+ imir_ext |= E1000_IMIR_EXT_CTRL_ACK;
+ if (filter->tcp_flags & TCP_PSH_FLAG)
+ imir_ext |= E1000_IMIR_EXT_CTRL_PSH;
+ if (filter->tcp_flags & TCP_RST_FLAG)
+ imir_ext |= E1000_IMIR_EXT_CTRL_RST;
+ if (filter->tcp_flags & TCP_SYN_FLAG)
+ imir_ext |= E1000_IMIR_EXT_CTRL_SYN;
+ if (filter->tcp_flags & TCP_FIN_FLAG)
+ imir_ext |= E1000_IMIR_EXT_CTRL_FIN;
+ imir_ext &= ~E1000_IMIR_EXT_CTRL_BP;
+ } else
+ imir_ext |= E1000_IMIR_EXT_CTRL_BP;
+ E1000_WRITE_REG(hw, E1000_IMIR(index), imir);
+ E1000_WRITE_REG(hw, E1000_TTQF(index), ttqf);
+ E1000_WRITE_REG(hw, E1000_IMIREXT(index), imir_ext);
+ return 0;
+}
+
+/*
+ * remove a 2tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_remove_2tuple_filter(struct rte_eth_dev *dev,
+ uint16_t index)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
+
+ if (index >= E1000_MAX_TTQF_FILTERS)
+ return -EINVAL; /* filter index is out of range */
+
+ E1000_WRITE_REG(hw, E1000_TTQF(index), 0);
+ E1000_WRITE_REG(hw, E1000_IMIR(index), 0);
+ E1000_WRITE_REG(hw, E1000_IMIREXT(index), 0);
+ return 0;
+}
+
+/*
+ * get a 2tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: ponter to the filter that returns.
+ * *rx_queue: pointer of the queue id the filter assigned to.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_get_2tuple_filter(struct rte_eth_dev *dev, uint16_t index,
+ struct rte_2tuple_filter *filter, uint16_t *rx_queue)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t imir, ttqf, imir_ext;
+
+ MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
+
+ if (index >= E1000_MAX_TTQF_FILTERS)
+ return -EINVAL; /* filter index is out of range. */
+
+ ttqf = E1000_READ_REG(hw, E1000_TTQF(index));
+ if (ttqf & E1000_TTQF_QUEUE_ENABLE) {
+ imir = E1000_READ_REG(hw, E1000_IMIR(index));
+ filter->protocol = ttqf & E1000_TTQF_PROTOCOL_MASK;
+ filter->protocol_mask = (ttqf & E1000_TTQF_MASK_ENABLE) ? 1 : 0;
+ *rx_queue = (ttqf & E1000_TTQF_RX_QUEUE_MASK) >>
+ E1000_TTQF_QUEUE_SHIFT;
+ filter->dst_port = (uint16_t)(imir & E1000_IMIR_DSTPORT);
+ filter->dst_port_mask = (imir & E1000_IMIR_PORT_BP) ? 1 : 0;
+ filter->priority = (imir & E1000_IMIR_PRIORITY) >>
+ E1000_IMIR_PRIORITY_SHIFT;
+
+ imir_ext = E1000_READ_REG(hw, E1000_IMIREXT(index));
+ if (!(imir_ext & E1000_IMIR_EXT_CTRL_BP)) {
+ if (imir_ext & E1000_IMIR_EXT_CTRL_UGR)
+ filter->tcp_flags |= TCP_UGR_FLAG;
+ if (imir_ext & E1000_IMIR_EXT_CTRL_ACK)
+ filter->tcp_flags |= TCP_ACK_FLAG;
+ if (imir_ext & E1000_IMIR_EXT_CTRL_PSH)
+ filter->tcp_flags |= TCP_PSH_FLAG;
+ if (imir_ext & E1000_IMIR_EXT_CTRL_RST)
+ filter->tcp_flags |= TCP_RST_FLAG;
+ if (imir_ext & E1000_IMIR_EXT_CTRL_SYN)
+ filter->tcp_flags |= TCP_SYN_FLAG;
+ if (imir_ext & E1000_IMIR_EXT_CTRL_FIN)
+ filter->tcp_flags |= TCP_FIN_FLAG;
+ } else
+ filter->tcp_flags = 0;
+ return 0;
+ }
+ return -ENOENT;
+}
+
+/*
+ * add a flex filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: ponter to the filter that will be added.
+ * rx_queue: the queue id the filter assigned to.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_add_flex_filter(struct rte_eth_dev *dev, uint16_t index,
+ struct rte_flex_filter *filter, uint16_t rx_queue)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t wufc, en_bits = 0;
+ uint32_t queueing = 0;
+ uint32_t reg_off = 0;
+ uint8_t i, j = 0;
+
+ MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
+
+ if (index >= E1000_MAX_FLEXIBLE_FILTERS)
+ return -EINVAL; /* filter index is out of range. */
+
+ if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN ||
+ filter->len % 8 != 0 ||
+ filter->priority > E1000_MAX_FLEX_FILTER_PRI)
+ return -EINVAL;
+
+ wufc = E1000_READ_REG(hw, E1000_WUFC);
+ en_bits = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << index);
+ if ((wufc & en_bits) == en_bits)
+ return -EINVAL; /* the filter is in use. */
+
+ E1000_WRITE_REG(hw, E1000_WUFC,
+ wufc | E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << index));
+
+ j = 0;
+ if (index < E1000_MAX_FHFT)
+ reg_off = E1000_FHFT(index);
+ else
+ reg_off = E1000_FHFT_EXT(index - E1000_MAX_FHFT);
+
+ for (i = 0; i < 16; i++) {
+ E1000_WRITE_REG(hw, reg_off + i*4*4, filter->dwords[j]);
+ E1000_WRITE_REG(hw, reg_off + (i*4+1)*4, filter->dwords[++j]);
+ E1000_WRITE_REG(hw, reg_off + (i*4+2)*4,
+ (uint32_t)filter->mask[i]);
+ ++j;
+ }
+ queueing |= filter->len |
+ (rx_queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) |
+ (filter->priority << E1000_FHFT_QUEUEING_PRIO_SHIFT);
+ E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET, queueing);
+ return 0;
+}
+
+/*
+ * remove a flex filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_remove_flex_filter(struct rte_eth_dev *dev,
+ uint16_t index)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t wufc, reg_off = 0;
+ uint8_t i;
+
+ MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
+
+ if (index >= E1000_MAX_FLEXIBLE_FILTERS)
+ return -EINVAL; /* filter index is out of range. */
+
+ wufc = E1000_READ_REG(hw, E1000_WUFC);
+ E1000_WRITE_REG(hw, E1000_WUFC, wufc & (~(E1000_WUFC_FLX0 << index)));
+
+ if (index < E1000_MAX_FHFT)
+ reg_off = E1000_FHFT(index);
+ else
+ reg_off = E1000_FHFT_EXT(index - E1000_MAX_FHFT);
+
+ for (i = 0; i < 64; i++)
+ E1000_WRITE_REG(hw, reg_off + i*4, 0);
+ return 0;
+}
+
+/*
+ * get a flex filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: ponter to the filter that returns.
+ * *rx_queue: the pointer of the queue id the filter assigned to.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_get_flex_filter(struct rte_eth_dev *dev, uint16_t index,
+ struct rte_flex_filter *filter, uint16_t *rx_queue)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t wufc, queueing, wufc_en = 0;
+ uint8_t i, j;
+
+ MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
+
+ if (index >= E1000_MAX_FLEXIBLE_FILTERS)
+ return -EINVAL; /* filter index is out of range. */
+
+ wufc = E1000_READ_REG(hw, E1000_WUFC);
+ wufc_en = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << index);
+
+ if ((wufc & wufc_en) == wufc_en) {
+ uint32_t reg_off = 0;
+ j = 0;
+ if (index < E1000_MAX_FHFT)
+ reg_off = E1000_FHFT(index);
+ else
+ reg_off = E1000_FHFT_EXT(index - E1000_MAX_FHFT);
+
+ for (i = 0; i < 16; i++, j = i * 2) {
+ filter->dwords[j] =
+ E1000_READ_REG(hw, reg_off + i*4*4);
+ filter->dwords[j+1] =
+ E1000_READ_REG(hw, reg_off + (i*4+1)*4);
+ filter->mask[i] =
+ E1000_READ_REG(hw, reg_off + (i*4+2)*4);
+ }
+ queueing = E1000_READ_REG(hw,
+ reg_off + E1000_FHFT_QUEUEING_OFFSET);
+ filter->len = queueing & E1000_FHFT_QUEUEING_LEN;
+ filter->priority = (queueing & E1000_FHFT_QUEUEING_PRIO) >>
+ E1000_FHFT_QUEUEING_PRIO_SHIFT;
+ *rx_queue = (queueing & E1000_FHFT_QUEUEING_QUEUE) >>
+ E1000_FHFT_QUEUEING_QUEUE_SHIFT;
+ return 0;
+ }
+ return -ENOENT;
+}
+
+/*
+ * add a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: ponter to the filter that will be added.
+ * rx_queue: the queue id the filter assigned to.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_add_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
+ struct rte_5tuple_filter *filter, uint16_t rx_queue)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t ftqf, spqf = 0;
+ uint32_t imir = 0;
+ uint32_t imir_ext = 0;
+
+ if (hw->mac.type != e1000_82576)
+ return -ENOSYS;
+
+ if (index >= E1000_MAX_FTQF_FILTERS ||
+ rx_queue >= IGB_MAX_RX_QUEUE_NUM_82576)
+ return -EINVAL; /* filter index is out of range. */
+
+ ftqf = E1000_READ_REG(hw, E1000_FTQF(index));
+ if (ftqf & E1000_FTQF_QUEUE_ENABLE)
+ return -EINVAL; /* filter index is in use. */
+
+ ftqf = 0;
+ ftqf |= filter->protocol & E1000_FTQF_PROTOCOL_MASK;
+ if (filter->src_ip_mask == 1) /* 1b means not compare. */
+ ftqf |= E1000_FTQF_SOURCE_ADDR_MASK;
+ if (filter->dst_ip_mask == 1)
+ ftqf |= E1000_FTQF_DEST_ADDR_MASK;
+ if (filter->src_port_mask == 1)
+ ftqf |= E1000_FTQF_SOURCE_PORT_MASK;
+ if (filter->protocol_mask == 1)
+ ftqf |= E1000_FTQF_PROTOCOL_COMP_MASK;
+ ftqf |= (rx_queue << E1000_FTQF_QUEUE_SHIFT) & E1000_FTQF_QUEUE_MASK;
+ ftqf |= E1000_FTQF_VF_MASK_EN;
+ ftqf |= E1000_FTQF_QUEUE_ENABLE;
+ E1000_WRITE_REG(hw, E1000_FTQF(index), ftqf);
+ E1000_WRITE_REG(hw, E1000_DAQF(index), filter->dst_ip);
+ E1000_WRITE_REG(hw, E1000_SAQF(index), filter->src_ip);
+
+ spqf |= filter->src_port & E1000_SPQF_SRCPORT;
+ E1000_WRITE_REG(hw, E1000_SPQF(index), spqf);
+
+ imir |= (uint32_t)(filter->dst_port & E1000_IMIR_DSTPORT);
+ if (filter->dst_port_mask == 1) /* 1b means not compare. */
+ imir |= E1000_IMIR_PORT_BP;
+ else
+ imir &= ~E1000_IMIR_PORT_BP;
+ imir |= filter->priority << E1000_IMIR_PRIORITY_SHIFT;
+
+ imir_ext |= E1000_IMIR_EXT_SIZE_BP;
+ /* tcp flags bits setting. */
+ if (filter->tcp_flags & TCP_FLAG_ALL) {
+ if (filter->tcp_flags & TCP_UGR_FLAG)
+ imir_ext |= E1000_IMIR_EXT_CTRL_UGR;
+ if (filter->tcp_flags & TCP_ACK_FLAG)
+ imir_ext |= E1000_IMIR_EXT_CTRL_ACK;
+ if (filter->tcp_flags & TCP_PSH_FLAG)
+ imir_ext |= E1000_IMIR_EXT_CTRL_PSH;
+ if (filter->tcp_flags & TCP_RST_FLAG)
+ imir_ext |= E1000_IMIR_EXT_CTRL_RST;
+ if (filter->tcp_flags & TCP_SYN_FLAG)
+ imir_ext |= E1000_IMIR_EXT_CTRL_SYN;
+ if (filter->tcp_flags & TCP_FIN_FLAG)
+ imir_ext |= E1000_IMIR_EXT_CTRL_FIN;
+ } else
+ imir_ext |= E1000_IMIR_EXT_CTRL_BP;
+ E1000_WRITE_REG(hw, E1000_IMIR(index), imir);
+ E1000_WRITE_REG(hw, E1000_IMIREXT(index), imir_ext);
+ return 0;
+}
+
+/*
+ * remove a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_remove_5tuple_filter(struct rte_eth_dev *dev,
+ uint16_t index)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->mac.type != e1000_82576)
+ return -ENOSYS;
+
+ if (index >= E1000_MAX_FTQF_FILTERS)
+ return -EINVAL; /* filter index is out of range. */
+
+ E1000_WRITE_REG(hw, E1000_FTQF(index), 0);
+ E1000_WRITE_REG(hw, E1000_DAQF(index), 0);
+ E1000_WRITE_REG(hw, E1000_SAQF(index), 0);
+ E1000_WRITE_REG(hw, E1000_SPQF(index), 0);
+ E1000_WRITE_REG(hw, E1000_IMIR(index), 0);
+ E1000_WRITE_REG(hw, E1000_IMIREXT(index), 0);
+ return 0;
+}
+
+/*
+ * get a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates
+ * filter: ponter to the filter that returns
+ * *rx_queue: pointer of the queue id the filter assigned to
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+eth_igb_get_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
+ struct rte_5tuple_filter *filter, uint16_t *rx_queue)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t spqf, ftqf, imir, imir_ext;
+
+ if (hw->mac.type != e1000_82576)
+ return -ENOSYS;
+
+ if (index >= E1000_MAX_FTQF_FILTERS)
+ return -EINVAL; /* filter index is out of range. */
+
+ ftqf = E1000_READ_REG(hw, E1000_FTQF(index));
+ if (ftqf & E1000_FTQF_QUEUE_ENABLE) {
+ filter->src_ip_mask =
+ (ftqf & E1000_FTQF_SOURCE_ADDR_MASK) ? 1 : 0;
+ filter->dst_ip_mask =
+ (ftqf & E1000_FTQF_DEST_ADDR_MASK) ? 1 : 0;
+ filter->src_port_mask =
+ (ftqf & E1000_FTQF_SOURCE_PORT_MASK) ? 1 : 0;
+ filter->protocol_mask =
+ (ftqf & E1000_FTQF_PROTOCOL_COMP_MASK) ? 1 : 0;
+ filter->protocol =
+ (uint8_t)ftqf & E1000_FTQF_PROTOCOL_MASK;
+ *rx_queue = (uint16_t)((ftqf & E1000_FTQF_QUEUE_MASK) >>
+ E1000_FTQF_QUEUE_SHIFT);
+
+ spqf = E1000_READ_REG(hw, E1000_SPQF(index));
+ filter->src_port = spqf & E1000_SPQF_SRCPORT;
+
+ filter->dst_ip = E1000_READ_REG(hw, E1000_DAQF(index));
+ filter->src_ip = E1000_READ_REG(hw, E1000_SAQF(index));
+
+ imir = E1000_READ_REG(hw, E1000_IMIR(index));
+ filter->dst_port_mask = (imir & E1000_IMIR_PORT_BP) ? 1 : 0;
+ filter->dst_port = (uint16_t)(imir & E1000_IMIR_DSTPORT);
+ filter->priority = (imir & E1000_IMIR_PRIORITY) >>
+ E1000_IMIR_PRIORITY_SHIFT;
+
+ imir_ext = E1000_READ_REG(hw, E1000_IMIREXT(index));
+ if (!(imir_ext & E1000_IMIR_EXT_CTRL_BP)) {
+ if (imir_ext & E1000_IMIR_EXT_CTRL_UGR)
+ filter->tcp_flags |= TCP_UGR_FLAG;
+ if (imir_ext & E1000_IMIR_EXT_CTRL_ACK)
+ filter->tcp_flags |= TCP_ACK_FLAG;
+ if (imir_ext & E1000_IMIR_EXT_CTRL_PSH)
+ filter->tcp_flags |= TCP_PSH_FLAG;
+ if (imir_ext & E1000_IMIR_EXT_CTRL_RST)
+ filter->tcp_flags |= TCP_RST_FLAG;
+ if (imir_ext & E1000_IMIR_EXT_CTRL_SYN)
+ filter->tcp_flags |= TCP_SYN_FLAG;
+ if (imir_ext & E1000_IMIR_EXT_CTRL_FIN)
+ filter->tcp_flags |= TCP_FIN_FLAG;
+ } else
+ filter->tcp_flags = 0;
+ return 0;
+ }
+ return -ENOENT;
+}
+
+static int
+eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ uint32_t rctl;
+ struct e1000_hw *hw;
+ struct rte_eth_dev_info dev_info;
+ uint32_t frame_size = mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN +
+ VLAN_TAG_SIZE);
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+#ifdef RTE_LIBRTE_82571_SUPPORT
+ /* XXX: not bigger than max_rx_pktlen */
+ if (hw->mac.type == e1000_82571)
+ return -ENOTSUP;
+#endif
+ eth_igb_infos_get(dev, &dev_info);
+
+ /* check that mtu is within the allowed range */
+ if ((mtu < ETHER_MIN_MTU) ||
+ (frame_size > dev_info.max_rx_pktlen))
+ return -EINVAL;
+
+ /* refuse mtu that requires the support of scattered packets when this
+ * feature has not been enabled before. */
+ if (!dev->data->scattered_rx &&
+ frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
+ return -EINVAL;
+
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+
+ /* switch to jumbo mode if needed */
+ if (frame_size > ETHER_MAX_LEN) {
+ dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ rctl |= E1000_RCTL_LPE;
+ } else {
+ dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ rctl &= ~E1000_RCTL_LPE;
+ }
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+
+ /* update max frame size */
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ E1000_WRITE_REG(hw, E1000_RLPML,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len);
+
+ return 0;
+}
+
+static struct rte_driver pmd_igb_drv = {
+ .type = PMD_PDEV,
+ .init = rte_igb_pmd_init,
+};
+
+static struct rte_driver pmd_igbvf_drv = {
+ .type = PMD_PDEV,
+ .init = rte_igbvf_pmd_init,
+};
+
+PMD_REGISTER_DRIVER(pmd_igb_drv);
+PMD_REGISTER_DRIVER(pmd_igbvf_drv);
diff --git a/src/dpdk_lib18/librte_pmd_e1000/igb_pf.c b/src/dpdk_lib18/librte_pmd_e1000/igb_pf.c
new file mode 100755
index 00000000..bc3816a7
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/igb_pf.c
@@ -0,0 +1,483 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_memcpy.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+
+#include "e1000/e1000_defines.h"
+#include "e1000/e1000_regs.h"
+#include "e1000/e1000_hw.h"
+#include "e1000_ethdev.h"
+
+static inline uint16_t
+dev_num_vf(struct rte_eth_dev *eth_dev)
+{
+ return eth_dev->pci_dev->max_vfs;
+}
+
+static inline
+int igb_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num)
+{
+ unsigned char vf_mac_addr[ETHER_ADDR_LEN];
+ struct e1000_vf_info *vfinfo =
+ *E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+ uint16_t vfn;
+
+ for (vfn = 0; vfn < vf_num; vfn++) {
+ eth_random_addr(vf_mac_addr);
+ /* keep the random address as default */
+ memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr,
+ ETHER_ADDR_LEN);
+ }
+
+ return 0;
+}
+
+static inline int
+igb_mb_intr_setup(struct rte_eth_dev *dev)
+{
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ intr->mask |= E1000_ICR_VMMB;
+
+ return 0;
+}
+
+void igb_pf_host_init(struct rte_eth_dev *eth_dev)
+{
+ struct e1000_vf_info **vfinfo =
+ E1000_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ uint16_t vf_num;
+ uint8_t nb_queue;
+
+ RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
+ if (0 == (vf_num = dev_num_vf(eth_dev)))
+ return;
+
+ if (hw->mac.type == e1000_i350)
+ nb_queue = 1;
+ else if(hw->mac.type == e1000_82576)
+ /* per datasheet, it should be 2, but 1 seems correct */
+ nb_queue = 1;
+ else
+ return;
+
+ *vfinfo = rte_zmalloc("vf_info", sizeof(struct e1000_vf_info) * vf_num, 0);
+ if (*vfinfo == NULL)
+ rte_panic("Cannot allocate memory for private VF data\n");
+
+ RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_8_POOLS;
+ RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
+ RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
+ RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);
+
+ igb_vf_perm_addr_gen(eth_dev, vf_num);
+
+ /* set mb interrupt mask */
+ igb_mb_intr_setup(eth_dev);
+
+ return;
+}
+
+#define E1000_RAH_POOLSEL_SHIFT (18)
+int igb_pf_host_configure(struct rte_eth_dev *eth_dev)
+{
+ uint32_t vtctl;
+ uint16_t vf_num;
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ uint32_t vlanctrl;
+ int i;
+ uint32_t rah;
+
+ if (0 == (vf_num = dev_num_vf(eth_dev)))
+ return -1;
+
+ /* enable VMDq and set the default pool for PF */
+ vtctl = E1000_READ_REG(hw, E1000_VT_CTL);
+ vtctl &= ~E1000_VT_CTL_DEFAULT_POOL_MASK;
+ vtctl |= RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx
+ << E1000_VT_CTL_DEFAULT_POOL_SHIFT;
+ vtctl |= E1000_VT_CTL_VM_REPL_EN;
+ E1000_WRITE_REG(hw, E1000_VT_CTL, vtctl);
+
+ /* Enable pools reserved to PF only */
+ E1000_WRITE_REG(hw, E1000_VFRE, (~0) << vf_num);
+ E1000_WRITE_REG(hw, E1000_VFTE, (~0) << vf_num);
+
+ /* PFDMA Tx General Switch Control Enables VMDQ loopback */
+ if (hw->mac.type == e1000_i350)
+ E1000_WRITE_REG(hw, E1000_TXSWC, E1000_DTXSWC_VMDQ_LOOPBACK_EN);
+ else
+ E1000_WRITE_REG(hw, E1000_DTXSWC, E1000_DTXSWC_VMDQ_LOOPBACK_EN);
+
+ /* clear VMDq map to perment rar 0 */
+ rah = E1000_READ_REG(hw, E1000_RAH(0));
+ rah &= ~ (0xFF << E1000_RAH_POOLSEL_SHIFT);
+ E1000_WRITE_REG(hw, E1000_RAH(0), rah);
+
+ /* clear VMDq map to scan rar 32 */
+ rah = E1000_READ_REG(hw, E1000_RAH(hw->mac.rar_entry_count));
+ rah &= ~ (0xFF << E1000_RAH_POOLSEL_SHIFT);
+ E1000_WRITE_REG(hw, E1000_RAH(hw->mac.rar_entry_count), rah);
+
+ /* set VMDq map to default PF pool */
+ rah = E1000_READ_REG(hw, E1000_RAH(0));
+ rah |= (0x1 << (RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx +
+ E1000_RAH_POOLSEL_SHIFT));
+ E1000_WRITE_REG(hw, E1000_RAH(0), rah);
+
+ /*
+ * enable vlan filtering and allow all vlan tags through
+ */
+ vlanctrl = E1000_READ_REG(hw, E1000_RCTL);
+ vlanctrl |= E1000_RCTL_VFE ; /* enable vlan filters */
+ E1000_WRITE_REG(hw, E1000_RCTL, vlanctrl);
+
+ /* VFTA - enable all vlan filters */
+ for (i = 0; i < IGB_VFTA_SIZE; i++) {
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, 0xFFFFFFFF);
+ }
+
+ /* Enable/Disable MAC Anti-Spoofing */
+ e1000_vmdq_set_anti_spoofing_pf(hw, FALSE, vf_num);
+
+ return 0;
+}
+
+static void
+set_rx_mode(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *dev_data =
+ (struct rte_eth_dev_data*)dev->data->dev_private;
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t fctrl, vmolr = E1000_VMOLR_BAM | E1000_VMOLR_AUPE;
+ uint16_t vfn = dev_num_vf(dev);
+
+ /* Check for Promiscuous and All Multicast modes */
+ fctrl = E1000_READ_REG(hw, E1000_RCTL);
+
+ /* set all bits that we expect to always be set */
+ fctrl &= ~E1000_RCTL_SBP; /* disable store-bad-packets */
+ fctrl |= E1000_RCTL_BAM;;
+
+ /* clear the bits we are changing the status of */
+ fctrl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
+
+ if (dev_data->promiscuous) {
+ fctrl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+ vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
+ } else {
+ if (dev_data->all_multicast) {
+ fctrl |= E1000_RCTL_MPE;
+ vmolr |= E1000_VMOLR_MPME;
+ } else {
+ vmolr |= E1000_VMOLR_ROMPE;
+ }
+ }
+
+ if ((hw->mac.type == e1000_82576) ||
+ (hw->mac.type == e1000_i350)) {
+ vmolr |= E1000_READ_REG(hw, E1000_VMOLR(vfn)) &
+ ~(E1000_VMOLR_MPME | E1000_VMOLR_ROMPE |
+ E1000_VMOLR_ROPE);
+ E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr);
+ }
+
+ E1000_WRITE_REG(hw, E1000_RCTL, fctrl);
+}
+
+static inline void
+igb_vf_reset_event(struct rte_eth_dev *dev, uint16_t vf)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vf_info *vfinfo =
+ *(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ uint32_t vmolr = E1000_READ_REG(hw, E1000_VMOLR(vf));
+
+ vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE |
+ E1000_VMOLR_BAM | E1000_VMOLR_AUPE);
+ E1000_WRITE_REG(hw, E1000_VMOLR(vf), vmolr);
+
+ E1000_WRITE_REG(hw, E1000_VMVIR(vf), 0);
+
+ /* reset multicast table array for vf */
+ vfinfo[vf].num_vf_mc_hashes = 0;
+
+ /* reset rx mode */
+ set_rx_mode(dev);
+}
+
+static inline void
+igb_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+
+ /* enable transmit and receive for vf */
+ reg = E1000_READ_REG(hw, E1000_VFTE);
+ reg |= (reg | (1 << vf));
+ E1000_WRITE_REG(hw, E1000_VFTE, reg);
+
+ reg = E1000_READ_REG(hw, E1000_VFRE);
+ reg |= (reg | (1 << vf));
+ E1000_WRITE_REG(hw, E1000_VFRE, reg);
+
+ igb_vf_reset_event(dev, vf);
+}
+
+static int
+igb_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vf_info *vfinfo =
+ *(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ unsigned char *vf_mac = vfinfo[vf].vf_mac_addresses;
+ int rar_entry = hw->mac.rar_entry_count - (vf + 1);
+ uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
+ uint32_t rah;
+
+ igb_vf_reset_msg(dev, vf);
+
+ hw->mac.ops.rar_set(hw, vf_mac, rar_entry);
+ rah = E1000_READ_REG(hw, E1000_RAH(rar_entry));
+ rah |= (0x1 << (vf + E1000_RAH_POOLSEL_SHIFT));
+ E1000_WRITE_REG(hw, E1000_RAH(rar_entry), rah);
+
+ /* reply to reset with ack and vf mac address */
+ msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
+ rte_memcpy(new_mac, vf_mac, ETHER_ADDR_LEN);
+ e1000_write_mbx(hw, msgbuf, 3, vf);
+
+ return 0;
+}
+
+static int
+igb_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vf_info *vfinfo =
+ *(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ int rar_entry = hw->mac.rar_entry_count - (vf + 1);
+ uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
+
+ if (is_valid_assigned_ether_addr((struct ether_addr*)new_mac)) {
+ rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6);
+ hw->mac.ops.rar_set(hw, new_mac, rar_entry);
+ return 0;
+ }
+ return -1;
+}
+
+static int
+igb_vf_set_multicast(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *msgbuf)
+{
+ int i;
+ uint32_t vector_bit;
+ uint32_t vector_reg;
+ uint32_t mta_reg;
+ int entries = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >>
+ E1000_VT_MSGINFO_SHIFT;
+ uint16_t *hash_list = (uint16_t *)&msgbuf[1];
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vf_info *vfinfo =
+ *(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+
+ /* only so many hash values supported */
+ entries = RTE_MIN(entries, E1000_MAX_VF_MC_ENTRIES);
+
+ /*
+ * salt away the number of multi cast addresses assigned
+ * to this VF for later use to restore when the PF multi cast
+ * list changes
+ */
+ vfinfo->num_vf_mc_hashes = (uint16_t)entries;
+
+ /*
+ * VFs are limited to using the MTA hash table for their multicast
+ * addresses
+ */
+ for (i = 0; i < entries; i++) {
+ vfinfo->vf_mc_hashes[i] = hash_list[i];
+ }
+
+ for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
+ vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
+ vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
+ mta_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, vector_reg);
+ mta_reg |= (1 << vector_bit);
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, vector_reg, mta_reg);
+ }
+
+ return 0;
+}
+
+static int
+igb_vf_set_vlan(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
+{
+ int add, vid;
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_vf_info *vfinfo =
+ *(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ uint32_t vid_idx, vid_bit, vfta;
+
+ add = (msgbuf[0] & E1000_VT_MSGINFO_MASK)
+ >> E1000_VT_MSGINFO_SHIFT;
+ vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
+
+ if (add)
+ vfinfo[vf].vlan_count++;
+ else if (vfinfo[vf].vlan_count)
+ vfinfo[vf].vlan_count--;
+
+ vid_idx = (uint32_t)((vid >> E1000_VFTA_ENTRY_SHIFT) &
+ E1000_VFTA_ENTRY_MASK);
+ vid_bit = (uint32_t)(1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
+ vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
+ if (add)
+ vfta |= vid_bit;
+ else
+ vfta &= ~vid_bit;
+
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
+ E1000_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int
+igb_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
+{
+ uint16_t mbx_size = E1000_VFMAILBOX_SIZE;
+ uint32_t msgbuf[E1000_VFMAILBOX_SIZE];
+ int32_t retval;
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ retval = e1000_read_mbx(hw, msgbuf, mbx_size, vf);
+ if (retval) {
+ PMD_INIT_LOG(ERR, "Error mbx recv msg from VF %d", vf);
+ return retval;
+ }
+
+ /* do nothing with the message already processed */
+ if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
+ return retval;
+
+ /* flush the ack before we write any messages back */
+ E1000_WRITE_FLUSH(hw);
+
+ /* perform VF reset */
+ if (msgbuf[0] == E1000_VF_RESET) {
+ return igb_vf_reset(dev, vf, msgbuf);
+ }
+
+ /* check & process VF to PF mailbox message */
+ switch ((msgbuf[0] & 0xFFFF)) {
+ case E1000_VF_SET_MAC_ADDR:
+ retval = igb_vf_set_mac_addr(dev, vf, msgbuf);
+ break;
+ case E1000_VF_SET_MULTICAST:
+ retval = igb_vf_set_multicast(dev, vf, msgbuf);
+ break;
+ case E1000_VF_SET_VLAN:
+ retval = igb_vf_set_vlan(dev, vf, msgbuf);
+ break;
+ default:
+ PMD_INIT_LOG(DEBUG, "Unhandled Msg %8.8x",
+ (unsigned) msgbuf[0]);
+ retval = E1000_ERR_MBX;
+ break;
+ }
+
+ /* response the VF according to the message process result */
+ if (retval)
+ msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
+ else
+ msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
+
+ msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
+
+ e1000_write_mbx(hw, msgbuf, 1, vf);
+
+ return retval;
+}
+
+static inline void
+igb_rcv_ack_from_vf(struct rte_eth_dev *dev, uint16_t vf)
+{
+ uint32_t msg = E1000_VT_MSGTYPE_NACK;
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ e1000_write_mbx(hw, &msg, 1, vf);
+}
+
+void igb_pf_mbx_process(struct rte_eth_dev *eth_dev)
+{
+ uint16_t vf;
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+
+ for (vf = 0; vf < dev_num_vf(eth_dev); vf++) {
+ /* check & process vf function level reset */
+ if (!e1000_check_for_rst(hw, vf))
+ igb_vf_reset_event(eth_dev, vf);
+
+ /* check & process vf mailbox messages */
+ if (!e1000_check_for_msg(hw, vf))
+ igb_rcv_msg_from_vf(eth_dev, vf);
+
+ /* check & process acks from vf */
+ if (!e1000_check_for_ack(hw, vf))
+ igb_rcv_ack_from_vf(eth_dev, vf);
+ }
+}
diff --git a/src/dpdk_lib18/librte_pmd_e1000/igb_rxtx.c b/src/dpdk_lib18/librte_pmd_e1000/igb_rxtx.c
new file mode 100755
index 00000000..5c394a98
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_e1000/igb_rxtx.c
@@ -0,0 +1,2415 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <inttypes.h>
+
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_prefetch.h>
+#include <rte_udp.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_string_fns.h>
+
+#include "e1000_logs.h"
+#include "e1000/e1000_api.h"
+#include "e1000_ethdev.h"
+
+#define IGB_RSS_OFFLOAD_ALL ( \
+ ETH_RSS_IPV4 | \
+ ETH_RSS_IPV4_TCP | \
+ ETH_RSS_IPV6 | \
+ ETH_RSS_IPV6_EX | \
+ ETH_RSS_IPV6_TCP | \
+ ETH_RSS_IPV6_TCP_EX | \
+ ETH_RSS_IPV4_UDP | \
+ ETH_RSS_IPV6_UDP | \
+ ETH_RSS_IPV6_UDP_EX)
+
+/* Bit Mask to indicate what bits required for building TX context */
+#define IGB_TX_OFFLOAD_MASK ( \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK)
+
+static inline struct rte_mbuf *
+rte_rxmbuf_alloc(struct rte_mempool *mp)
+{
+ struct rte_mbuf *m;
+
+ m = __rte_mbuf_raw_alloc(mp);
+ __rte_mbuf_sanity_check_raw(m, 0);
+ return (m);
+}
+
+#define RTE_MBUF_DATA_DMA_ADDR(mb) \
+ (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
+
+#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
+ (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
+
+/**
+ * Structure associated with each descriptor of the RX ring of a RX queue.
+ */
+struct igb_rx_entry {
+ struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
+};
+
+/**
+ * Structure associated with each descriptor of the TX ring of a TX queue.
+ */
+struct igb_tx_entry {
+ struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
+ uint16_t next_id; /**< Index of next descriptor in ring. */
+ uint16_t last_id; /**< Index of last scattered descriptor. */
+};
+
+/**
+ * Structure associated with each RX queue.
+ */
+struct igb_rx_queue {
+ struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
+ volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
+ uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
+ volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
+ volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
+ struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
+ struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
+ struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
+ uint16_t nb_rx_desc; /**< number of RX descriptors. */
+ uint16_t rx_tail; /**< current value of RDT register. */
+ uint16_t nb_rx_hold; /**< number of held free RX desc. */
+ uint16_t rx_free_thresh; /**< max free RX desc to hold. */
+ uint16_t queue_id; /**< RX queue index. */
+ uint16_t reg_idx; /**< RX queue register index. */
+ uint8_t port_id; /**< Device port identifier. */
+ uint8_t pthresh; /**< Prefetch threshold register. */
+ uint8_t hthresh; /**< Host threshold register. */
+ uint8_t wthresh; /**< Write-back threshold register. */
+ uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
+ uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
+};
+
+/**
+ * Hardware context number
+ */
+enum igb_advctx_num {
+ IGB_CTX_0 = 0, /**< CTX0 */
+ IGB_CTX_1 = 1, /**< CTX1 */
+ IGB_CTX_NUM = 2, /**< CTX_NUM */
+};
+
+/** Offload features */
+union igb_vlan_macip {
+ uint32_t data;
+ struct {
+ uint16_t l2_l3_len; /**< 7bit L2 and 9b L3 lengths combined */
+ uint16_t vlan_tci;
+ /**< VLAN Tag Control Identifier (CPU order). */
+ } f;
+};
+
+/*
+ * Compare mask for vlan_macip_len.data,
+ * should be in sync with igb_vlan_macip.f layout.
+ * */
+#define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
+#define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
+#define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
+/** MAC+IP length. */
+#define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
+
+/**
+ * Strucutre to check if new context need be built
+ */
+struct igb_advctx_info {
+ uint64_t flags; /**< ol_flags related to context build. */
+ uint32_t cmp_mask; /**< compare mask for vlan_macip_lens */
+ union igb_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
+};
+
+/**
+ * Structure associated with each TX queue.
+ */
+struct igb_tx_queue {
+ volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
+ uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
+ struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
+ volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
+ uint32_t txd_type; /**< Device-specific TXD type */
+ uint16_t nb_tx_desc; /**< number of TX descriptors. */
+ uint16_t tx_tail; /**< Current value of TDT register. */
+ uint16_t tx_head;
+ /**< Index of first used TX descriptor. */
+ uint16_t queue_id; /**< TX queue index. */
+ uint16_t reg_idx; /**< TX queue register index. */
+ uint8_t port_id; /**< Device port identifier. */
+ uint8_t pthresh; /**< Prefetch threshold register. */
+ uint8_t hthresh; /**< Host threshold register. */
+ uint8_t wthresh; /**< Write-back threshold register. */
+ uint32_t ctx_curr;
+ /**< Current used hardware descriptor. */
+ uint32_t ctx_start;
+ /**< Start context position for transmit queue. */
+ struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
+ /**< Hardware context history.*/
+};
+
+#if 1
+#define RTE_PMD_USE_PREFETCH
+#endif
+
+#ifdef RTE_PMD_USE_PREFETCH
+#define rte_igb_prefetch(p) rte_prefetch0(p)
+#else
+#define rte_igb_prefetch(p) do {} while(0)
+#endif
+
+#ifdef RTE_PMD_PACKET_PREFETCH
+#define rte_packet_prefetch(p) rte_prefetch1(p)
+#else
+#define rte_packet_prefetch(p) do {} while(0)
+#endif
+
+/*
+ * Macro for VMDq feature for 1 GbE NIC.
+ */
+#define E1000_VMOLR_SIZE (8)
+
+/*********************************************************************
+ *
+ * TX function
+ *
+ **********************************************************************/
+
+/*
+ * Advanced context descriptor are almost same between igb/ixgbe
+ * This is a separate function, looking for optimization opportunity here
+ * Rework required to go with the pre-defined values.
+ */
+
+static inline void
+igbe_set_xmit_ctx(struct igb_tx_queue* txq,
+ volatile struct e1000_adv_tx_context_desc *ctx_txd,
+ uint64_t ol_flags, uint32_t vlan_macip_lens)
+{
+ uint32_t type_tucmd_mlhl;
+ uint32_t mss_l4len_idx;
+ uint32_t ctx_idx, ctx_curr;
+ uint32_t cmp_mask;
+
+ ctx_curr = txq->ctx_curr;
+ ctx_idx = ctx_curr + txq->ctx_start;
+
+ cmp_mask = 0;
+ type_tucmd_mlhl = 0;
+
+ if (ol_flags & PKT_TX_VLAN_PKT) {
+ cmp_mask |= TX_VLAN_CMP_MASK;
+ }
+
+ if (ol_flags & PKT_TX_IP_CKSUM) {
+ type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
+ cmp_mask |= TX_MACIP_LEN_CMP_MASK;
+ }
+
+ /* Specify which HW CTX to upload. */
+ mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
+ switch (ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_UDP_CKSUM:
+ type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
+ E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
+ mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
+ cmp_mask |= TX_MACIP_LEN_CMP_MASK;
+ break;
+ case PKT_TX_TCP_CKSUM:
+ type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
+ E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
+ mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
+ cmp_mask |= TX_MACIP_LEN_CMP_MASK;
+ break;
+ case PKT_TX_SCTP_CKSUM:
+ type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
+ E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
+ mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
+ cmp_mask |= TX_MACIP_LEN_CMP_MASK;
+ break;
+ default:
+ type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
+ E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
+ break;
+ }
+
+ txq->ctx_cache[ctx_curr].flags = ol_flags;
+ txq->ctx_cache[ctx_curr].cmp_mask = cmp_mask;
+ txq->ctx_cache[ctx_curr].vlan_macip_lens.data =
+ vlan_macip_lens & cmp_mask;
+
+ ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
+ ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
+ ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
+ ctx_txd->seqnum_seed = 0;
+}
+
+/*
+ * Check which hardware context can be used. Use the existing match
+ * or create a new context descriptor.
+ */
+static inline uint32_t
+what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
+ uint32_t vlan_macip_lens)
+{
+ /* If match with the current context */
+ if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
+ (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
+ (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
+ return txq->ctx_curr;
+ }
+
+ /* If match with the second context */
+ txq->ctx_curr ^= 1;
+ if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
+ (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
+ (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
+ return txq->ctx_curr;
+ }
+
+ /* Mismatch, use the previous context */
+ return (IGB_CTX_NUM);
+}
+
+static inline uint32_t
+tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
+{
+ static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
+ static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
+ uint32_t tmp;
+
+ tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
+ tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
+ return tmp;
+}
+
+static inline uint32_t
+tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
+{
+ static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
+ return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
+}
+
+uint16_t
+eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct igb_tx_queue *txq;
+ struct igb_tx_entry *sw_ring;
+ struct igb_tx_entry *txe, *txn;
+ volatile union e1000_adv_tx_desc *txr;
+ volatile union e1000_adv_tx_desc *txd;
+ struct rte_mbuf *tx_pkt;
+ struct rte_mbuf *m_seg;
+ union igb_vlan_macip vlan_macip_lens;
+ union {
+ uint16_t u16;
+ struct {
+ uint16_t l3_len:9;
+ uint16_t l2_len:7;
+ };
+ } l2_l3_len;
+ uint64_t buf_dma_addr;
+ uint32_t olinfo_status;
+ uint32_t cmd_type_len;
+ uint32_t pkt_len;
+ uint16_t slen;
+ uint64_t ol_flags;
+ uint16_t tx_end;
+ uint16_t tx_id;
+ uint16_t tx_last;
+ uint16_t nb_tx;
+ uint64_t tx_ol_req;
+ uint32_t new_ctx = 0;
+ uint32_t ctx = 0;
+
+ txq = tx_queue;
+ sw_ring = txq->sw_ring;
+ txr = txq->tx_ring;
+ tx_id = txq->tx_tail;
+ txe = &sw_ring[tx_id];
+
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ tx_pkt = *tx_pkts++;
+ pkt_len = tx_pkt->pkt_len;
+
+ RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
+
+ /*
+ * The number of descriptors that must be allocated for a
+ * packet is the number of segments of that packet, plus 1
+ * Context Descriptor for the VLAN Tag Identifier, if any.
+ * Determine the last TX descriptor to allocate in the TX ring
+ * for the packet, starting from the current position (tx_id)
+ * in the ring.
+ */
+ tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
+
+ ol_flags = tx_pkt->ol_flags;
+ l2_l3_len.l2_len = tx_pkt->l2_len;
+ l2_l3_len.l3_len = tx_pkt->l3_len;
+ vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
+ vlan_macip_lens.f.l2_l3_len = l2_l3_len.u16;
+ tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
+
+ /* If a Context Descriptor need be built . */
+ if (tx_ol_req) {
+ ctx = what_advctx_update(txq, tx_ol_req,
+ vlan_macip_lens.data);
+ /* Only allocate context descriptor if required*/
+ new_ctx = (ctx == IGB_CTX_NUM);
+ ctx = txq->ctx_curr;
+ tx_last = (uint16_t) (tx_last + new_ctx);
+ }
+ if (tx_last >= txq->nb_tx_desc)
+ tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
+
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
+ " tx_first=%u tx_last=%u",
+ (unsigned) txq->port_id,
+ (unsigned) txq->queue_id,
+ (unsigned) pkt_len,
+ (unsigned) tx_id,
+ (unsigned) tx_last);
+
+ /*
+ * Check if there are enough free descriptors in the TX ring
+ * to transmit the next packet.
+ * This operation is based on the two following rules:
+ *
+ * 1- Only check that the last needed TX descriptor can be
+ * allocated (by construction, if that descriptor is free,
+ * all intermediate ones are also free).
+ *
+ * For this purpose, the index of the last TX descriptor
+ * used for a packet (the "last descriptor" of a packet)
+ * is recorded in the TX entries (the last one included)
+ * that are associated with all TX descriptors allocated
+ * for that packet.
+ *
+ * 2- Avoid to allocate the last free TX descriptor of the
+ * ring, in order to never set the TDT register with the
+ * same value stored in parallel by the NIC in the TDH
+ * register, which makes the TX engine of the NIC enter
+ * in a deadlock situation.
+ *
+ * By extension, avoid to allocate a free descriptor that
+ * belongs to the last set of free descriptors allocated
+ * to the same packet previously transmitted.
+ */
+
+ /*
+ * The "last descriptor" of the previously sent packet, if any,
+ * which used the last descriptor to allocate.
+ */
+ tx_end = sw_ring[tx_last].last_id;
+
+ /*
+ * The next descriptor following that "last descriptor" in the
+ * ring.
+ */
+ tx_end = sw_ring[tx_end].next_id;
+
+ /*
+ * The "last descriptor" associated with that next descriptor.
+ */
+ tx_end = sw_ring[tx_end].last_id;
+
+ /*
+ * Check that this descriptor is free.
+ */
+ if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
+ if (nb_tx == 0)
+ return (0);
+ goto end_of_tx;
+ }
+
+ /*
+ * Set common flags of all TX Data Descriptors.
+ *
+ * The following bits must be set in all Data Descriptors:
+ * - E1000_ADVTXD_DTYP_DATA
+ * - E1000_ADVTXD_DCMD_DEXT
+ *
+ * The following bits must be set in the first Data Descriptor
+ * and are ignored in the other ones:
+ * - E1000_ADVTXD_DCMD_IFCS
+ * - E1000_ADVTXD_MAC_1588
+ * - E1000_ADVTXD_DCMD_VLE
+ *
+ * The following bits must only be set in the last Data
+ * Descriptor:
+ * - E1000_TXD_CMD_EOP
+ *
+ * The following bits can be set in any Data Descriptor, but
+ * are only set in the last Data Descriptor:
+ * - E1000_TXD_CMD_RS
+ */
+ cmd_type_len = txq->txd_type |
+ E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
+ olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
+#if defined(RTE_LIBRTE_IEEE1588)
+ if (ol_flags & PKT_TX_IEEE1588_TMST)
+ cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
+#endif
+ if (tx_ol_req) {
+ /* Setup TX Advanced context descriptor if required */
+ if (new_ctx) {
+ volatile struct e1000_adv_tx_context_desc *
+ ctx_txd;
+
+ ctx_txd = (volatile struct
+ e1000_adv_tx_context_desc *)
+ &txr[tx_id];
+
+ txn = &sw_ring[txe->next_id];
+ RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
+
+ if (txe->mbuf != NULL) {
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = NULL;
+ }
+
+ igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
+ vlan_macip_lens.data);
+
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ }
+
+ /* Setup the TX Advanced Data Descriptor */
+ cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
+ olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
+ olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
+ }
+
+ m_seg = tx_pkt;
+ do {
+ txn = &sw_ring[txe->next_id];
+ txd = &txr[tx_id];
+
+ if (txe->mbuf != NULL)
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = m_seg;
+
+ /*
+ * Set up transmit descriptor.
+ */
+ slen = (uint16_t) m_seg->data_len;
+ buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+ txd->read.buffer_addr =
+ rte_cpu_to_le_64(buf_dma_addr);
+ txd->read.cmd_type_len =
+ rte_cpu_to_le_32(cmd_type_len | slen);
+ txd->read.olinfo_status =
+ rte_cpu_to_le_32(olinfo_status);
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ m_seg = m_seg->next;
+ } while (m_seg != NULL);
+
+ /*
+ * The last packet data descriptor needs End Of Packet (EOP)
+ * and Report Status (RS).
+ */
+ txd->read.cmd_type_len |=
+ rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
+ }
+ end_of_tx:
+ rte_wmb();
+
+ /*
+ * Set the Transmit Descriptor Tail (TDT).
+ */
+ E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
+ (unsigned) txq->port_id, (unsigned) txq->queue_id,
+ (unsigned) tx_id, (unsigned) nb_tx);
+ txq->tx_tail = tx_id;
+
+ return (nb_tx);
+}
+
+/*********************************************************************
+ *
+ * RX functions
+ *
+ **********************************************************************/
+static inline uint64_t
+rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
+{
+ uint64_t pkt_flags;
+
+ static uint64_t ip_pkt_types_map[16] = {
+ 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
+ PKT_RX_IPV6_HDR, 0, 0, 0,
+ PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
+ PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
+ };
+
+#if defined(RTE_LIBRTE_IEEE1588)
+ static uint32_t ip_pkt_etqf_map[8] = {
+ 0, 0, 0, PKT_RX_IEEE1588_PTP,
+ 0, 0, 0, 0,
+ };
+
+ pkt_flags = (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
+ ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
+ ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
+#else
+ pkt_flags = (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
+ ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
+#endif
+ return pkt_flags | (((hl_tp_rs & 0x0F) == 0) ? 0 : PKT_RX_RSS_HASH);
+}
+
+static inline uint64_t
+rx_desc_status_to_pkt_flags(uint32_t rx_status)
+{
+ uint64_t pkt_flags;
+
+ /* Check if VLAN present */
+ pkt_flags = (rx_status & E1000_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0;
+
+#if defined(RTE_LIBRTE_IEEE1588)
+ if (rx_status & E1000_RXD_STAT_TMST)
+ pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
+#endif
+ return pkt_flags;
+}
+
+static inline uint64_t
+rx_desc_error_to_pkt_flags(uint32_t rx_status)
+{
+ /*
+ * Bit 30: IPE, IPv4 checksum error
+ * Bit 29: L4I, L4I integrity error
+ */
+
+ static uint64_t error_to_pkt_flags_map[4] = {
+ 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
+ };
+ return error_to_pkt_flags_map[(rx_status >>
+ E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
+}
+
+uint16_t
+eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct igb_rx_queue *rxq;
+ volatile union e1000_adv_rx_desc *rx_ring;
+ volatile union e1000_adv_rx_desc *rxdp;
+ struct igb_rx_entry *sw_ring;
+ struct igb_rx_entry *rxe;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *nmb;
+ union e1000_adv_rx_desc rxd;
+ uint64_t dma_addr;
+ uint32_t staterr;
+ uint32_t hlen_type_rss;
+ uint16_t pkt_len;
+ uint16_t rx_id;
+ uint16_t nb_rx;
+ uint16_t nb_hold;
+ uint64_t pkt_flags;
+
+ nb_rx = 0;
+ nb_hold = 0;
+ rxq = rx_queue;
+ rx_id = rxq->rx_tail;
+ rx_ring = rxq->rx_ring;
+ sw_ring = rxq->sw_ring;
+ while (nb_rx < nb_pkts) {
+ /*
+ * The order of operations here is important as the DD status
+ * bit must not be read after any other descriptor fields.
+ * rx_ring and rxdp are pointing to volatile data so the order
+ * of accesses cannot be reordered by the compiler. If they were
+ * not volatile, they could be reordered which could lead to
+ * using invalid descriptor fields when read from rxd.
+ */
+ rxdp = &rx_ring[rx_id];
+ staterr = rxdp->wb.upper.status_error;
+ if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
+ break;
+ rxd = *rxdp;
+
+ /*
+ * End of packet.
+ *
+ * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
+ * likely to be invalid and to be dropped by the various
+ * validation checks performed by the network stack.
+ *
+ * Allocate a new mbuf to replenish the RX ring descriptor.
+ * If the allocation fails:
+ * - arrange for that RX descriptor to be the first one
+ * being parsed the next time the receive function is
+ * invoked [on the same queue].
+ *
+ * - Stop parsing the RX ring and return immediately.
+ *
+ * This policy do not drop the packet received in the RX
+ * descriptor for which the allocation of a new mbuf failed.
+ * Thus, it allows that packet to be later retrieved if
+ * mbuf have been freed in the mean time.
+ * As a side effect, holding RX descriptors instead of
+ * systematically giving them back to the NIC may lead to
+ * RX ring exhaustion situations.
+ * However, the NIC can gracefully prevent such situations
+ * to happen by sending specific "back-pressure" flow control
+ * frames to its peer(s).
+ */
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+ "staterr=0x%x pkt_len=%u",
+ (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+ (unsigned) rx_id, (unsigned) staterr,
+ (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
+
+ nmb = rte_rxmbuf_alloc(rxq->mb_pool);
+ if (nmb == NULL) {
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u", (unsigned) rxq->port_id,
+ (unsigned) rxq->queue_id);
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+ break;
+ }
+
+ nb_hold++;
+ rxe = &sw_ring[rx_id];
+ rx_id++;
+ if (rx_id == rxq->nb_rx_desc)
+ rx_id = 0;
+
+ /* Prefetch next mbuf while processing current one. */
+ rte_igb_prefetch(sw_ring[rx_id].mbuf);
+
+ /*
+ * When next RX descriptor is on a cache-line boundary,
+ * prefetch the next 4 RX descriptors and the next 8 pointers
+ * to mbufs.
+ */
+ if ((rx_id & 0x3) == 0) {
+ rte_igb_prefetch(&rx_ring[rx_id]);
+ rte_igb_prefetch(&sw_ring[rx_id]);
+ }
+
+ rxm = rxe->mbuf;
+ rxe->mbuf = nmb;
+ dma_addr =
+ rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+ rxdp->read.hdr_addr = dma_addr;
+ rxdp->read.pkt_addr = dma_addr;
+
+ /*
+ * Initialize the returned mbuf.
+ * 1) setup generic mbuf fields:
+ * - number of segments,
+ * - next segment,
+ * - packet length,
+ * - RX port identifier.
+ * 2) integrate hardware offload data, if any:
+ * - RSS flag & hash,
+ * - IP checksum flag,
+ * - VLAN TCI, if any,
+ * - error flags.
+ */
+ pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
+ rxq->crc_len);
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = pkt_len;
+ rxm->data_len = pkt_len;
+ rxm->port = rxq->port_id;
+
+ rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
+ hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
+ /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
+ rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
+
+ pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
+ pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
+ pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
+ rxm->ol_flags = pkt_flags;
+
+ /*
+ * Store the mbuf address into the next entry of the array
+ * of returned packets.
+ */
+ rx_pkts[nb_rx++] = rxm;
+ }
+ rxq->rx_tail = rx_id;
+
+ /*
+ * If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ * register.
+ * Update the RDT with the value of the last processed RX descriptor
+ * minus 1, to guarantee that the RDT register is never equal to the
+ * RDH register, which creates a "full" ring situtation from the
+ * hardware point of view...
+ */
+ nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
+ if (nb_hold > rxq->rx_free_thresh) {
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+ "nb_hold=%u nb_rx=%u",
+ (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+ (unsigned) rx_id, (unsigned) nb_hold,
+ (unsigned) nb_rx);
+ rx_id = (uint16_t) ((rx_id == 0) ?
+ (rxq->nb_rx_desc - 1) : (rx_id - 1));
+ E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+ nb_hold = 0;
+ }
+ rxq->nb_rx_hold = nb_hold;
+ return (nb_rx);
+}
+
+uint16_t
+eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct igb_rx_queue *rxq;
+ volatile union e1000_adv_rx_desc *rx_ring;
+ volatile union e1000_adv_rx_desc *rxdp;
+ struct igb_rx_entry *sw_ring;
+ struct igb_rx_entry *rxe;
+ struct rte_mbuf *first_seg;
+ struct rte_mbuf *last_seg;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *nmb;
+ union e1000_adv_rx_desc rxd;
+ uint64_t dma; /* Physical address of mbuf data buffer */
+ uint32_t staterr;
+ uint32_t hlen_type_rss;
+ uint16_t rx_id;
+ uint16_t nb_rx;
+ uint16_t nb_hold;
+ uint16_t data_len;
+ uint64_t pkt_flags;
+
+ nb_rx = 0;
+ nb_hold = 0;
+ rxq = rx_queue;
+ rx_id = rxq->rx_tail;
+ rx_ring = rxq->rx_ring;
+ sw_ring = rxq->sw_ring;
+
+ /*
+ * Retrieve RX context of current packet, if any.
+ */
+ first_seg = rxq->pkt_first_seg;
+ last_seg = rxq->pkt_last_seg;
+
+ while (nb_rx < nb_pkts) {
+ next_desc:
+ /*
+ * The order of operations here is important as the DD status
+ * bit must not be read after any other descriptor fields.
+ * rx_ring and rxdp are pointing to volatile data so the order
+ * of accesses cannot be reordered by the compiler. If they were
+ * not volatile, they could be reordered which could lead to
+ * using invalid descriptor fields when read from rxd.
+ */
+ rxdp = &rx_ring[rx_id];
+ staterr = rxdp->wb.upper.status_error;
+ if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
+ break;
+ rxd = *rxdp;
+
+ /*
+ * Descriptor done.
+ *
+ * Allocate a new mbuf to replenish the RX ring descriptor.
+ * If the allocation fails:
+ * - arrange for that RX descriptor to be the first one
+ * being parsed the next time the receive function is
+ * invoked [on the same queue].
+ *
+ * - Stop parsing the RX ring and return immediately.
+ *
+ * This policy does not drop the packet received in the RX
+ * descriptor for which the allocation of a new mbuf failed.
+ * Thus, it allows that packet to be later retrieved if
+ * mbuf have been freed in the mean time.
+ * As a side effect, holding RX descriptors instead of
+ * systematically giving them back to the NIC may lead to
+ * RX ring exhaustion situations.
+ * However, the NIC can gracefully prevent such situations
+ * to happen by sending specific "back-pressure" flow control
+ * frames to its peer(s).
+ */
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+ "staterr=0x%x data_len=%u",
+ (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+ (unsigned) rx_id, (unsigned) staterr,
+ (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
+
+ nmb = rte_rxmbuf_alloc(rxq->mb_pool);
+ if (nmb == NULL) {
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u", (unsigned) rxq->port_id,
+ (unsigned) rxq->queue_id);
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+ break;
+ }
+
+ nb_hold++;
+ rxe = &sw_ring[rx_id];
+ rx_id++;
+ if (rx_id == rxq->nb_rx_desc)
+ rx_id = 0;
+
+ /* Prefetch next mbuf while processing current one. */
+ rte_igb_prefetch(sw_ring[rx_id].mbuf);
+
+ /*
+ * When next RX descriptor is on a cache-line boundary,
+ * prefetch the next 4 RX descriptors and the next 8 pointers
+ * to mbufs.
+ */
+ if ((rx_id & 0x3) == 0) {
+ rte_igb_prefetch(&rx_ring[rx_id]);
+ rte_igb_prefetch(&sw_ring[rx_id]);
+ }
+
+ /*
+ * Update RX descriptor with the physical address of the new
+ * data buffer of the new allocated mbuf.
+ */
+ rxm = rxe->mbuf;
+ rxe->mbuf = nmb;
+ dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+ rxdp->read.pkt_addr = dma;
+ rxdp->read.hdr_addr = dma;
+
+ /*
+ * Set data length & data buffer address of mbuf.
+ */
+ data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
+ rxm->data_len = data_len;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+
+ /*
+ * If this is the first buffer of the received packet,
+ * set the pointer to the first mbuf of the packet and
+ * initialize its context.
+ * Otherwise, update the total length and the number of segments
+ * of the current scattered packet, and update the pointer to
+ * the last mbuf of the current packet.
+ */
+ if (first_seg == NULL) {
+ first_seg = rxm;
+ first_seg->pkt_len = data_len;
+ first_seg->nb_segs = 1;
+ } else {
+ first_seg->pkt_len += data_len;
+ first_seg->nb_segs++;
+ last_seg->next = rxm;
+ }
+
+ /*
+ * If this is not the last buffer of the received packet,
+ * update the pointer to the last mbuf of the current scattered
+ * packet and continue to parse the RX ring.
+ */
+ if (! (staterr & E1000_RXD_STAT_EOP)) {
+ last_seg = rxm;
+ goto next_desc;
+ }
+
+ /*
+ * This is the last buffer of the received packet.
+ * If the CRC is not stripped by the hardware:
+ * - Subtract the CRC length from the total packet length.
+ * - If the last buffer only contains the whole CRC or a part
+ * of it, free the mbuf associated to the last buffer.
+ * If part of the CRC is also contained in the previous
+ * mbuf, subtract the length of that CRC part from the
+ * data length of the previous mbuf.
+ */
+ rxm->next = NULL;
+ if (unlikely(rxq->crc_len > 0)) {
+ first_seg->pkt_len -= ETHER_CRC_LEN;
+ if (data_len <= ETHER_CRC_LEN) {
+ rte_pktmbuf_free_seg(rxm);
+ first_seg->nb_segs--;
+ last_seg->data_len = (uint16_t)
+ (last_seg->data_len -
+ (ETHER_CRC_LEN - data_len));
+ last_seg->next = NULL;
+ } else
+ rxm->data_len =
+ (uint16_t) (data_len - ETHER_CRC_LEN);
+ }
+
+ /*
+ * Initialize the first mbuf of the returned packet:
+ * - RX port identifier,
+ * - hardware offload data, if any:
+ * - RSS flag & hash,
+ * - IP checksum flag,
+ * - VLAN TCI, if any,
+ * - error flags.
+ */
+ first_seg->port = rxq->port_id;
+ first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
+
+ /*
+ * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
+ * set in the pkt_flags field.
+ */
+ first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
+ hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
+ pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
+ pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
+ pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
+ first_seg->ol_flags = pkt_flags;
+
+ /* Prefetch data of first segment, if configured to do so. */
+ rte_packet_prefetch((char *)first_seg->buf_addr +
+ first_seg->data_off);
+
+ /*
+ * Store the mbuf address into the next entry of the array
+ * of returned packets.
+ */
+ rx_pkts[nb_rx++] = first_seg;
+
+ /*
+ * Setup receipt context for a new packet.
+ */
+ first_seg = NULL;
+ }
+
+ /*
+ * Record index of the next RX descriptor to probe.
+ */
+ rxq->rx_tail = rx_id;
+
+ /*
+ * Save receive context.
+ */
+ rxq->pkt_first_seg = first_seg;
+ rxq->pkt_last_seg = last_seg;
+
+ /*
+ * If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ * register.
+ * Update the RDT with the value of the last processed RX descriptor
+ * minus 1, to guarantee that the RDT register is never equal to the
+ * RDH register, which creates a "full" ring situtation from the
+ * hardware point of view...
+ */
+ nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
+ if (nb_hold > rxq->rx_free_thresh) {
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+ "nb_hold=%u nb_rx=%u",
+ (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+ (unsigned) rx_id, (unsigned) nb_hold,
+ (unsigned) nb_rx);
+ rx_id = (uint16_t) ((rx_id == 0) ?
+ (rxq->nb_rx_desc - 1) : (rx_id - 1));
+ E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+ nb_hold = 0;
+ }
+ rxq->nb_rx_hold = nb_hold;
+ return (nb_rx);
+}
+
+/*
+ * Rings setup and release.
+ *
+ * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
+ * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
+ * This will also optimize cache line size effect.
+ * H/W supports up to cache line size 128.
+ */
+#define IGB_ALIGN 128
+
+/*
+ * Maximum number of Ring Descriptors.
+ *
+ * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
+ * desscriptors should meet the following condition:
+ * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
+ */
+#define IGB_MIN_RING_DESC 32
+#define IGB_MAX_RING_DESC 4096
+
+static const struct rte_memzone *
+ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
+ uint16_t queue_id, uint32_t ring_size, int socket_id)
+{
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+
+ snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
+ dev->driver->pci_drv.name, ring_name,
+ dev->data->port_id, queue_id);
+ mz = rte_memzone_lookup(z_name);
+ if (mz)
+ return mz;
+
+#ifdef RTE_LIBRTE_XEN_DOM0
+ return rte_memzone_reserve_bounded(z_name, ring_size,
+ socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M);
+#else
+ return rte_memzone_reserve_aligned(z_name, ring_size,
+ socket_id, 0, IGB_ALIGN);
+#endif
+}
+
+static void
+igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
+{
+ unsigned i;
+
+ if (txq->sw_ring != NULL) {
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ if (txq->sw_ring[i].mbuf != NULL) {
+ rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+ txq->sw_ring[i].mbuf = NULL;
+ }
+ }
+ }
+}
+
+static void
+igb_tx_queue_release(struct igb_tx_queue *txq)
+{
+ if (txq != NULL) {
+ igb_tx_queue_release_mbufs(txq);
+ rte_free(txq->sw_ring);
+ rte_free(txq);
+ }
+}
+
+void
+eth_igb_tx_queue_release(void *txq)
+{
+ igb_tx_queue_release(txq);
+}
+
+static void
+igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
+{
+ txq->tx_head = 0;
+ txq->tx_tail = 0;
+ txq->ctx_curr = 0;
+ memset((void*)&txq->ctx_cache, 0,
+ IGB_CTX_NUM * sizeof(struct igb_advctx_info));
+}
+
+static void
+igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
+{
+ static const union e1000_adv_tx_desc zeroed_desc = { .read = {
+ .buffer_addr = 0}};
+ struct igb_tx_entry *txe = txq->sw_ring;
+ uint16_t i, prev;
+ struct e1000_hw *hw;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ /* Zero out HW ring memory */
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ txq->tx_ring[i] = zeroed_desc;
+ }
+
+ /* Initialize ring entries */
+ prev = (uint16_t)(txq->nb_tx_desc - 1);
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
+
+ txd->wb.status = E1000_TXD_STAT_DD;
+ txe[i].mbuf = NULL;
+ txe[i].last_id = i;
+ txe[prev].next_id = i;
+ prev = i;
+ }
+
+ txq->txd_type = E1000_ADVTXD_DTYP_DATA;
+ /* 82575 specific, each tx queue will use 2 hw contexts */
+ if (hw->mac.type == e1000_82575)
+ txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
+
+ igb_reset_tx_queue_stat(txq);
+}
+
+int
+eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ const struct rte_memzone *tz;
+ struct igb_tx_queue *txq;
+ struct e1000_hw *hw;
+ uint32_t size;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /*
+ * Validate number of transmit descriptors.
+ * It must not exceed hardware maximum, and must be multiple
+ * of IGB_ALIGN.
+ */
+ if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
+ (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
+ return -EINVAL;
+ }
+
+ /*
+ * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
+ * driver.
+ */
+ if (tx_conf->tx_free_thresh != 0)
+ PMD_INIT_LOG(WARNING, "The tx_free_thresh parameter is not "
+ "used for the 1G driver.");
+ if (tx_conf->tx_rs_thresh != 0)
+ PMD_INIT_LOG(WARNING, "The tx_rs_thresh parameter is not "
+ "used for the 1G driver.");
+ if (tx_conf->tx_thresh.wthresh == 0)
+ PMD_INIT_LOG(WARNING, "To improve 1G driver performance, "
+ "consider setting the TX WTHRESH value to 4, 8, "
+ "or 16.");
+
+ /* Free memory prior to re-allocation if needed */
+ if (dev->data->tx_queues[queue_idx] != NULL) {
+ igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ /* First allocate the tx queue data structure */
+ txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
+ RTE_CACHE_LINE_SIZE);
+ if (txq == NULL)
+ return (-ENOMEM);
+
+ /*
+ * Allocate TX ring hardware descriptors. A memzone large enough to
+ * handle the maximum ring size is allocated in order to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
+ tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ size, socket_id);
+ if (tz == NULL) {
+ igb_tx_queue_release(txq);
+ return (-ENOMEM);
+ }
+
+ txq->nb_tx_desc = nb_desc;
+ txq->pthresh = tx_conf->tx_thresh.pthresh;
+ txq->hthresh = tx_conf->tx_thresh.hthresh;
+ txq->wthresh = tx_conf->tx_thresh.wthresh;
+ if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
+ txq->wthresh = 1;
+ txq->queue_id = queue_idx;
+ txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
+ queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
+ txq->port_id = dev->data->port_id;
+
+ txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
+#ifndef RTE_LIBRTE_XEN_DOM0
+ txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
+#else
+ txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+#endif
+ txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
+ /* Allocate software ring */
+ txq->sw_ring = rte_zmalloc("txq->sw_ring",
+ sizeof(struct igb_tx_entry) * nb_desc,
+ RTE_CACHE_LINE_SIZE);
+ if (txq->sw_ring == NULL) {
+ igb_tx_queue_release(txq);
+ return (-ENOMEM);
+ }
+ PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
+ txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
+
+ igb_reset_tx_queue(txq, dev);
+ dev->tx_pkt_burst = eth_igb_xmit_pkts;
+ dev->data->tx_queues[queue_idx] = txq;
+
+ return (0);
+}
+
+static void
+igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
+{
+ unsigned i;
+
+ if (rxq->sw_ring != NULL) {
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_ring[i].mbuf != NULL) {
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ rxq->sw_ring[i].mbuf = NULL;
+ }
+ }
+ }
+}
+
+static void
+igb_rx_queue_release(struct igb_rx_queue *rxq)
+{
+ if (rxq != NULL) {
+ igb_rx_queue_release_mbufs(rxq);
+ rte_free(rxq->sw_ring);
+ rte_free(rxq);
+ }
+}
+
+void
+eth_igb_rx_queue_release(void *rxq)
+{
+ igb_rx_queue_release(rxq);
+}
+
+static void
+igb_reset_rx_queue(struct igb_rx_queue *rxq)
+{
+ static const union e1000_adv_rx_desc zeroed_desc = { .read = {
+ .pkt_addr = 0}};
+ unsigned i;
+
+ /* Zero out HW ring memory */
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ rxq->rx_ring[i] = zeroed_desc;
+ }
+
+ rxq->rx_tail = 0;
+ rxq->pkt_first_seg = NULL;
+ rxq->pkt_last_seg = NULL;
+}
+
+int
+eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ const struct rte_memzone *rz;
+ struct igb_rx_queue *rxq;
+ struct e1000_hw *hw;
+ unsigned int size;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /*
+ * Validate number of receive descriptors.
+ * It must not exceed hardware maximum, and must be multiple
+ * of IGB_ALIGN.
+ */
+ if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
+ (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
+ return (-EINVAL);
+ }
+
+ /* Free memory prior to re-allocation if needed */
+ if (dev->data->rx_queues[queue_idx] != NULL) {
+ igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* First allocate the RX queue data structure. */
+ rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
+ RTE_CACHE_LINE_SIZE);
+ if (rxq == NULL)
+ return (-ENOMEM);
+ rxq->mb_pool = mp;
+ rxq->nb_rx_desc = nb_desc;
+ rxq->pthresh = rx_conf->rx_thresh.pthresh;
+ rxq->hthresh = rx_conf->rx_thresh.hthresh;
+ rxq->wthresh = rx_conf->rx_thresh.wthresh;
+ if (rxq->wthresh > 0 && hw->mac.type == e1000_82576)
+ rxq->wthresh = 1;
+ rxq->drop_en = rx_conf->rx_drop_en;
+ rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+ rxq->queue_id = queue_idx;
+ rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
+ queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
+ rxq->port_id = dev->data->port_id;
+ rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
+ ETHER_CRC_LEN);
+
+ /*
+ * Allocate RX ring hardware descriptors. A memzone large enough to
+ * handle the maximum ring size is allocated in order to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
+ rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
+ if (rz == NULL) {
+ igb_rx_queue_release(rxq);
+ return (-ENOMEM);
+ }
+ rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
+ rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
+#ifndef RTE_LIBRTE_XEN_DOM0
+ rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
+#else
+ rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+#endif
+ rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
+
+ /* Allocate software ring. */
+ rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
+ sizeof(struct igb_rx_entry) * nb_desc,
+ RTE_CACHE_LINE_SIZE);
+ if (rxq->sw_ring == NULL) {
+ igb_rx_queue_release(rxq);
+ return (-ENOMEM);
+ }
+ PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
+ rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
+
+ dev->data->rx_queues[queue_idx] = rxq;
+ igb_reset_rx_queue(rxq);
+
+ return 0;
+}
+
+uint32_t
+eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+#define IGB_RXQ_SCAN_INTERVAL 4
+ volatile union e1000_adv_rx_desc *rxdp;
+ struct igb_rx_queue *rxq;
+ uint32_t desc = 0;
+
+ if (rx_queue_id >= dev->data->nb_rx_queues) {
+ PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
+ return 0;
+ }
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ rxdp = &(rxq->rx_ring[rxq->rx_tail]);
+
+ while ((desc < rxq->nb_rx_desc) &&
+ (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
+ desc += IGB_RXQ_SCAN_INTERVAL;
+ rxdp += IGB_RXQ_SCAN_INTERVAL;
+ if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
+ rxdp = &(rxq->rx_ring[rxq->rx_tail +
+ desc - rxq->nb_rx_desc]);
+ }
+
+ return 0;
+}
+
+int
+eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
+{
+ volatile union e1000_adv_rx_desc *rxdp;
+ struct igb_rx_queue *rxq = rx_queue;
+ uint32_t desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return 0;
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ rxdp = &rxq->rx_ring[desc];
+ return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
+}
+
+void
+igb_dev_clear_queues(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+ struct igb_tx_queue *txq;
+ struct igb_rx_queue *rxq;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (txq != NULL) {
+ igb_tx_queue_release_mbufs(txq);
+ igb_reset_tx_queue(txq, dev);
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (rxq != NULL) {
+ igb_rx_queue_release_mbufs(rxq);
+ igb_reset_rx_queue(rxq);
+ }
+ }
+}
+
+/**
+ * Receive Side Scaling (RSS).
+ * See section 7.1.1.7 in the following document:
+ * "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
+ *
+ * Principles:
+ * The source and destination IP addresses of the IP header and the source and
+ * destination ports of TCP/UDP headers, if any, of received packets are hashed
+ * against a configurable random key to compute a 32-bit RSS hash result.
+ * The seven (7) LSBs of the 32-bit hash result are used as an index into a
+ * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
+ * RSS output index which is used as the RX queue index where to store the
+ * received packets.
+ * The following output is supplied in the RX write-back descriptor:
+ * - 32-bit result of the Microsoft RSS hash function,
+ * - 4-bit RSS type field.
+ */
+
+/*
+ * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
+ * Used as the default key.
+ */
+static uint8_t rss_intel_key[40] = {
+ 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+ 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+ 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+ 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+ 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
+};
+
+static void
+igb_rss_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw;
+ uint32_t mrqc;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ mrqc = E1000_READ_REG(hw, E1000_MRQC);
+ mrqc &= ~E1000_MRQC_ENABLE_MASK;
+ E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
+}
+
+static void
+igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
+{
+ uint8_t *hash_key;
+ uint32_t rss_key;
+ uint32_t mrqc;
+ uint64_t rss_hf;
+ uint16_t i;
+
+ hash_key = rss_conf->rss_key;
+ if (hash_key != NULL) {
+ /* Fill in RSS hash key */
+ for (i = 0; i < 10; i++) {
+ rss_key = hash_key[(i * 4)];
+ rss_key |= hash_key[(i * 4) + 1] << 8;
+ rss_key |= hash_key[(i * 4) + 2] << 16;
+ rss_key |= hash_key[(i * 4) + 3] << 24;
+ E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
+ }
+ }
+
+ /* Set configured hashing protocols in MRQC register */
+ rss_hf = rss_conf->rss_hf;
+ mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
+ if (rss_hf & ETH_RSS_IPV4)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
+ if (rss_hf & ETH_RSS_IPV4_TCP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
+ if (rss_hf & ETH_RSS_IPV6)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
+ if (rss_hf & ETH_RSS_IPV6_EX)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
+ if (rss_hf & ETH_RSS_IPV6_TCP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
+ if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
+ if (rss_hf & ETH_RSS_IPV4_UDP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
+ if (rss_hf & ETH_RSS_IPV6_UDP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
+ if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
+ E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
+}
+
+int
+eth_igb_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct e1000_hw *hw;
+ uint32_t mrqc;
+ uint64_t rss_hf;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /*
+ * Before changing anything, first check that the update RSS operation
+ * does not attempt to disable RSS, if RSS was enabled at
+ * initialization time, or does not attempt to enable RSS, if RSS was
+ * disabled at initialization time.
+ */
+ rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
+ mrqc = E1000_READ_REG(hw, E1000_MRQC);
+ if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
+ if (rss_hf != 0) /* Enable RSS */
+ return -(EINVAL);
+ return 0; /* Nothing to do */
+ }
+ /* RSS enabled */
+ if (rss_hf == 0) /* Disable RSS */
+ return -(EINVAL);
+ igb_hw_rss_hash_set(hw, rss_conf);
+ return 0;
+}
+
+int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct e1000_hw *hw;
+ uint8_t *hash_key;
+ uint32_t rss_key;
+ uint32_t mrqc;
+ uint64_t rss_hf;
+ uint16_t i;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ hash_key = rss_conf->rss_key;
+ if (hash_key != NULL) {
+ /* Return RSS hash key */
+ for (i = 0; i < 10; i++) {
+ rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
+ hash_key[(i * 4)] = rss_key & 0x000000FF;
+ hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
+ hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
+ hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
+ }
+ }
+
+ /* Get RSS functions configured in MRQC register */
+ mrqc = E1000_READ_REG(hw, E1000_MRQC);
+ if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
+ rss_conf->rss_hf = 0;
+ return 0;
+ }
+ rss_hf = 0;
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
+ rss_hf |= ETH_RSS_IPV4;
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
+ rss_hf |= ETH_RSS_IPV4_TCP;
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
+ rss_hf |= ETH_RSS_IPV6;
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
+ rss_hf |= ETH_RSS_IPV6_EX;
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
+ rss_hf |= ETH_RSS_IPV6_TCP;
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
+ rss_hf |= ETH_RSS_IPV6_TCP_EX;
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
+ rss_hf |= ETH_RSS_IPV4_UDP;
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
+ rss_hf |= ETH_RSS_IPV6_UDP;
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
+ rss_hf |= ETH_RSS_IPV6_UDP_EX;
+ rss_conf->rss_hf = rss_hf;
+ return 0;
+}
+
+static void
+igb_rss_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_rss_conf rss_conf;
+ struct e1000_hw *hw;
+ uint32_t shift;
+ uint16_t i;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Fill in redirection table. */
+ shift = (hw->mac.type == e1000_82575) ? 6 : 0;
+ for (i = 0; i < 128; i++) {
+ union e1000_reta {
+ uint32_t dword;
+ uint8_t bytes[4];
+ } reta;
+ uint8_t q_idx;
+
+ q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
+ i % dev->data->nb_rx_queues : 0);
+ reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
+ if ((i & 3) == 3)
+ E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
+ }
+
+ /*
+ * Configure the RSS key and the RSS protocols used to compute
+ * the RSS hash of input packets.
+ */
+ rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
+ if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
+ igb_rss_disable(dev);
+ return;
+ }
+ if (rss_conf.rss_key == NULL)
+ rss_conf.rss_key = rss_intel_key; /* Default hash key */
+ igb_hw_rss_hash_set(hw, &rss_conf);
+}
+
+/*
+ * Check if the mac type support VMDq or not.
+ * Return 1 if it supports, otherwise, return 0.
+ */
+static int
+igb_is_vmdq_supported(const struct rte_eth_dev *dev)
+{
+ const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ switch (hw->mac.type) {
+ case e1000_82576:
+ case e1000_82580:
+ case e1000_i350:
+ return 1;
+ case e1000_82540:
+ case e1000_82541:
+ case e1000_82542:
+ case e1000_82543:
+ case e1000_82544:
+ case e1000_82545:
+ case e1000_82546:
+ case e1000_82547:
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ case e1000_i210:
+ case e1000_i211:
+ default:
+ PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
+ return 0;
+ }
+}
+
+static int
+igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_vmdq_rx_conf *cfg;
+ struct e1000_hw *hw;
+ uint32_t mrqc, vt_ctl, vmolr, rctl;
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
+
+ /* Check if mac type can support VMDq, return value of 0 means NOT support */
+ if (igb_is_vmdq_supported(dev) == 0)
+ return -1;
+
+ igb_rss_disable(dev);
+
+ /* RCTL: eanble VLAN filter */
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl |= E1000_RCTL_VFE;
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+
+ /* MRQC: enable vmdq */
+ mrqc = E1000_READ_REG(hw, E1000_MRQC);
+ mrqc |= E1000_MRQC_ENABLE_VMDQ;
+ E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
+
+ /* VTCTL: pool selection according to VLAN tag */
+ vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
+ if (cfg->enable_default_pool)
+ vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
+ vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
+ E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
+
+ for (i = 0; i < E1000_VMOLR_SIZE; i++) {
+ vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
+ vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
+ E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
+ E1000_VMOLR_MPME);
+
+ if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
+ vmolr |= E1000_VMOLR_AUPE;
+ if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
+ vmolr |= E1000_VMOLR_ROMPE;
+ if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
+ vmolr |= E1000_VMOLR_ROPE;
+ if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
+ vmolr |= E1000_VMOLR_BAM;
+ if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
+ vmolr |= E1000_VMOLR_MPME;
+
+ E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
+ }
+
+ /*
+ * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
+ * Both 82576 and 82580 support it
+ */
+ if (hw->mac.type != e1000_i350) {
+ for (i = 0; i < E1000_VMOLR_SIZE; i++) {
+ vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
+ vmolr |= E1000_VMOLR_STRVLAN;
+ E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
+ }
+ }
+
+ /* VFTA - enable all vlan filters */
+ for (i = 0; i < IGB_VFTA_SIZE; i++)
+ E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
+
+ /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
+ if (hw->mac.type != e1000_82580)
+ E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
+
+ /*
+ * RAH/RAL - allow pools to read specific mac addresses
+ * In this case, all pools should be able to read from mac addr 0
+ */
+ E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
+ E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
+
+ /* VLVF: set up filters for vlan tags as configured */
+ for (i = 0; i < cfg->nb_pool_maps; i++) {
+ /* set vlan id in VF register and set the valid bit */
+ E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
+ (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
+ ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
+ E1000_VLVF_POOLSEL_MASK)));
+ }
+
+ E1000_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+
+/*********************************************************************
+ *
+ * Enable receive unit.
+ *
+ **********************************************************************/
+
+static int
+igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
+{
+ struct igb_rx_entry *rxe = rxq->sw_ring;
+ uint64_t dma_addr;
+ unsigned i;
+
+ /* Initialize software ring entries. */
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ volatile union e1000_adv_rx_desc *rxd;
+ struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
+
+ if (mbuf == NULL) {
+ PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
+ "queue_id=%hu", rxq->queue_id);
+ return (-ENOMEM);
+ }
+ dma_addr =
+ rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
+ rxd = &rxq->rx_ring[i];
+ rxd->read.hdr_addr = dma_addr;
+ rxd->read.pkt_addr = dma_addr;
+ rxe[i].mbuf = mbuf;
+ }
+
+ return 0;
+}
+
+#define E1000_MRQC_DEF_Q_SHIFT (3)
+static int
+igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t mrqc;
+
+ if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
+ /*
+ * SRIOV active scheme
+ * FIXME if support RSS together with VMDq & SRIOV
+ */
+ mrqc = E1000_MRQC_ENABLE_VMDQ;
+ /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
+ mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
+ E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
+ } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
+ /*
+ * SRIOV inactive scheme
+ */
+ switch (dev->data->dev_conf.rxmode.mq_mode) {
+ case ETH_MQ_RX_RSS:
+ igb_rss_configure(dev);
+ break;
+ case ETH_MQ_RX_VMDQ_ONLY:
+ /*Configure general VMDQ only RX parameters*/
+ igb_vmdq_rx_hw_configure(dev);
+ break;
+ case ETH_MQ_RX_NONE:
+ /* if mq_mode is none, disable rss mode.*/
+ default:
+ igb_rss_disable(dev);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+int
+eth_igb_rx_init(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw;
+ struct igb_rx_queue *rxq;
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ uint32_t rctl;
+ uint32_t rxcsum;
+ uint32_t srrctl;
+ uint16_t buf_size;
+ uint16_t rctl_bsize;
+ uint16_t i;
+ int ret;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ srrctl = 0;
+
+ /*
+ * Make sure receives are disabled while setting
+ * up the descriptor ring.
+ */
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+
+ /*
+ * Configure support of jumbo frames, if any.
+ */
+ if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+ rctl |= E1000_RCTL_LPE;
+
+ /*
+ * Set maximum packet length by default, and might be updated
+ * together with enabling/disabling dual VLAN.
+ */
+ E1000_WRITE_REG(hw, E1000_RLPML,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ VLAN_TAG_SIZE);
+ } else
+ rctl &= ~E1000_RCTL_LPE;
+
+ /* Configure and enable each RX queue. */
+ rctl_bsize = 0;
+ dev->rx_pkt_burst = eth_igb_recv_pkts;
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ uint64_t bus_addr;
+ uint32_t rxdctl;
+
+ rxq = dev->data->rx_queues[i];
+
+ /* Allocate buffers for descriptor rings and set up queue */
+ ret = igb_alloc_rx_queue_mbufs(rxq);
+ if (ret)
+ return ret;
+
+ /*
+ * Reset crc_len in case it was changed after queue setup by a
+ * call to configure
+ */
+ rxq->crc_len =
+ (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
+ 0 : ETHER_CRC_LEN);
+
+ bus_addr = rxq->rx_ring_phys_addr;
+ E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
+ rxq->nb_rx_desc *
+ sizeof(union e1000_adv_rx_desc));
+ E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
+ (uint32_t)(bus_addr >> 32));
+ E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
+
+ srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+ /*
+ * Configure RX buffer size.
+ */
+ mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
+ buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
+ RTE_PKTMBUF_HEADROOM);
+ if (buf_size >= 1024) {
+ /*
+ * Configure the BSIZEPACKET field of the SRRCTL
+ * register of the queue.
+ * Value is in 1 KB resolution, from 1 KB to 127 KB.
+ * If this field is equal to 0b, then RCTL.BSIZE
+ * determines the RX packet buffer size.
+ */
+ srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
+ E1000_SRRCTL_BSIZEPKT_MASK);
+ buf_size = (uint16_t) ((srrctl &
+ E1000_SRRCTL_BSIZEPKT_MASK) <<
+ E1000_SRRCTL_BSIZEPKT_SHIFT);
+
+ /* It adds dual VLAN length for supporting dual VLAN */
+ if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ 2 * VLAN_TAG_SIZE) > buf_size){
+ if (!dev->data->scattered_rx)
+ PMD_INIT_LOG(DEBUG,
+ "forcing scatter mode");
+ dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
+ dev->data->scattered_rx = 1;
+ }
+ } else {
+ /*
+ * Use BSIZE field of the device RCTL register.
+ */
+ if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
+ rctl_bsize = buf_size;
+ if (!dev->data->scattered_rx)
+ PMD_INIT_LOG(DEBUG, "forcing scatter mode");
+ dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
+ dev->data->scattered_rx = 1;
+ }
+
+ /* Set if packets are dropped when no descriptors available */
+ if (rxq->drop_en)
+ srrctl |= E1000_SRRCTL_DROP_EN;
+
+ E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
+
+ /* Enable this RX queue. */
+ rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
+ rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
+ rxdctl &= 0xFFF00000;
+ rxdctl |= (rxq->pthresh & 0x1F);
+ rxdctl |= ((rxq->hthresh & 0x1F) << 8);
+ rxdctl |= ((rxq->wthresh & 0x1F) << 16);
+ E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
+ }
+
+ if (dev->data->dev_conf.rxmode.enable_scatter) {
+ if (!dev->data->scattered_rx)
+ PMD_INIT_LOG(DEBUG, "forcing scatter mode");
+ dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
+ dev->data->scattered_rx = 1;
+ }
+
+ /*
+ * Setup BSIZE field of RCTL register, if needed.
+ * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
+ * register, since the code above configures the SRRCTL register of
+ * the RX queue in such a case.
+ * All configurable sizes are:
+ * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
+ * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
+ * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
+ * 2048: rctl |= E1000_RCTL_SZ_2048;
+ * 1024: rctl |= E1000_RCTL_SZ_1024;
+ * 512: rctl |= E1000_RCTL_SZ_512;
+ * 256: rctl |= E1000_RCTL_SZ_256;
+ */
+ if (rctl_bsize > 0) {
+ if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
+ rctl |= E1000_RCTL_SZ_512;
+ else /* 256 <= buf_size < 512 - use 256 */
+ rctl |= E1000_RCTL_SZ_256;
+ }
+
+ /*
+ * Configure RSS if device configured with multiple RX queues.
+ */
+ igb_dev_mq_rx_configure(dev);
+
+ /* Update the rctl since igb_dev_mq_rx_configure may change its value */
+ rctl |= E1000_READ_REG(hw, E1000_RCTL);
+
+ /*
+ * Setup the Checksum Register.
+ * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
+ */
+ rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
+ rxcsum |= E1000_RXCSUM_PCSD;
+
+ /* Enable both L3/L4 rx checksum offload */
+ if (dev->data->dev_conf.rxmode.hw_ip_checksum)
+ rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
+ else
+ rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
+ E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
+
+ /* Setup the Receive Control Register. */
+ if (dev->data->dev_conf.rxmode.hw_strip_crc) {
+ rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
+
+ /* set STRCRC bit in all queues */
+ if (hw->mac.type == e1000_i350 ||
+ hw->mac.type == e1000_i210 ||
+ hw->mac.type == e1000_i211 ||
+ hw->mac.type == e1000_i354) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ uint32_t dvmolr = E1000_READ_REG(hw,
+ E1000_DVMOLR(rxq->reg_idx));
+ dvmolr |= E1000_DVMOLR_STRCRC;
+ E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
+ }
+ }
+ } else {
+ rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
+
+ /* clear STRCRC bit in all queues */
+ if (hw->mac.type == e1000_i350 ||
+ hw->mac.type == e1000_i210 ||
+ hw->mac.type == e1000_i211 ||
+ hw->mac.type == e1000_i354) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ uint32_t dvmolr = E1000_READ_REG(hw,
+ E1000_DVMOLR(rxq->reg_idx));
+ dvmolr &= ~E1000_DVMOLR_STRCRC;
+ E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
+ }
+ }
+ }
+
+ rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+ rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
+ E1000_RCTL_RDMTS_HALF |
+ (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+
+ /* Make sure VLAN Filters are off. */
+ if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
+ rctl &= ~E1000_RCTL_VFE;
+ /* Don't store bad packets. */
+ rctl &= ~E1000_RCTL_SBP;
+
+ /* Enable Receives. */
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+
+ /*
+ * Setup the HW Rx Head and Tail Descriptor Pointers.
+ * This needs to be done after enable.
+ */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
+ E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
+ }
+
+ return 0;
+}
+
+/*********************************************************************
+ *
+ * Enable transmit unit.
+ *
+ **********************************************************************/
+void
+eth_igb_tx_init(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw;
+ struct igb_tx_queue *txq;
+ uint32_t tctl;
+ uint32_t txdctl;
+ uint16_t i;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Setup the Base and Length of the Tx Descriptor Rings. */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ uint64_t bus_addr;
+ txq = dev->data->tx_queues[i];
+ bus_addr = txq->tx_ring_phys_addr;
+
+ E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
+ txq->nb_tx_desc *
+ sizeof(union e1000_adv_tx_desc));
+ E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
+ (uint32_t)(bus_addr >> 32));
+ E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
+
+ /* Setup the HW Tx Head and Tail descriptor pointers. */
+ E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
+ E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
+
+ /* Setup Transmit threshold registers. */
+ txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
+ txdctl |= txq->pthresh & 0x1F;
+ txdctl |= ((txq->hthresh & 0x1F) << 8);
+ txdctl |= ((txq->wthresh & 0x1F) << 16);
+ txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
+ E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
+ }
+
+ /* Program the Transmit Control Register. */
+ tctl = E1000_READ_REG(hw, E1000_TCTL);
+ tctl &= ~E1000_TCTL_CT;
+ tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
+ (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
+
+ e1000_config_collision_dist(hw);
+
+ /* This write will effectively turn on the transmit unit. */
+ E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+}
+
+/*********************************************************************
+ *
+ * Enable VF receive unit.
+ *
+ **********************************************************************/
+int
+eth_igbvf_rx_init(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw;
+ struct igb_rx_queue *rxq;
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ uint32_t srrctl;
+ uint16_t buf_size;
+ uint16_t rctl_bsize;
+ uint16_t i;
+ int ret;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* setup MTU */
+ e1000_rlpml_set_vf(hw,
+ (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ VLAN_TAG_SIZE));
+
+ /* Configure and enable each RX queue. */
+ rctl_bsize = 0;
+ dev->rx_pkt_burst = eth_igb_recv_pkts;
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ uint64_t bus_addr;
+ uint32_t rxdctl;
+
+ rxq = dev->data->rx_queues[i];
+
+ /* Allocate buffers for descriptor rings and set up queue */
+ ret = igb_alloc_rx_queue_mbufs(rxq);
+ if (ret)
+ return ret;
+
+ bus_addr = rxq->rx_ring_phys_addr;
+ E1000_WRITE_REG(hw, E1000_RDLEN(i),
+ rxq->nb_rx_desc *
+ sizeof(union e1000_adv_rx_desc));
+ E1000_WRITE_REG(hw, E1000_RDBAH(i),
+ (uint32_t)(bus_addr >> 32));
+ E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
+
+ srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+ /*
+ * Configure RX buffer size.
+ */
+ mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
+ buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
+ RTE_PKTMBUF_HEADROOM);
+ if (buf_size >= 1024) {
+ /*
+ * Configure the BSIZEPACKET field of the SRRCTL
+ * register of the queue.
+ * Value is in 1 KB resolution, from 1 KB to 127 KB.
+ * If this field is equal to 0b, then RCTL.BSIZE
+ * determines the RX packet buffer size.
+ */
+ srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
+ E1000_SRRCTL_BSIZEPKT_MASK);
+ buf_size = (uint16_t) ((srrctl &
+ E1000_SRRCTL_BSIZEPKT_MASK) <<
+ E1000_SRRCTL_BSIZEPKT_SHIFT);
+
+ /* It adds dual VLAN length for supporting dual VLAN */
+ if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ 2 * VLAN_TAG_SIZE) > buf_size){
+ if (!dev->data->scattered_rx)
+ PMD_INIT_LOG(DEBUG,
+ "forcing scatter mode");
+ dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
+ dev->data->scattered_rx = 1;
+ }
+ } else {
+ /*
+ * Use BSIZE field of the device RCTL register.
+ */
+ if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
+ rctl_bsize = buf_size;
+ if (!dev->data->scattered_rx)
+ PMD_INIT_LOG(DEBUG, "forcing scatter mode");
+ dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
+ dev->data->scattered_rx = 1;
+ }
+
+ /* Set if packets are dropped when no descriptors available */
+ if (rxq->drop_en)
+ srrctl |= E1000_SRRCTL_DROP_EN;
+
+ E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
+
+ /* Enable this RX queue. */
+ rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
+ rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
+ rxdctl &= 0xFFF00000;
+ rxdctl |= (rxq->pthresh & 0x1F);
+ rxdctl |= ((rxq->hthresh & 0x1F) << 8);
+ if (hw->mac.type == e1000_vfadapt) {
+ /*
+ * Workaround of 82576 VF Erratum
+ * force set WTHRESH to 1
+ * to avoid Write-Back not triggered sometimes
+ */
+ rxdctl |= 0x10000;
+ PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
+ }
+ else
+ rxdctl |= ((rxq->wthresh & 0x1F) << 16);
+ E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
+ }
+
+ if (dev->data->dev_conf.rxmode.enable_scatter) {
+ if (!dev->data->scattered_rx)
+ PMD_INIT_LOG(DEBUG, "forcing scatter mode");
+ dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
+ dev->data->scattered_rx = 1;
+ }
+
+ /*
+ * Setup the HW Rx Head and Tail Descriptor Pointers.
+ * This needs to be done after enable.
+ */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ E1000_WRITE_REG(hw, E1000_RDH(i), 0);
+ E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
+ }
+
+ return 0;
+}
+
+/*********************************************************************
+ *
+ * Enable VF transmit unit.
+ *
+ **********************************************************************/
+void
+eth_igbvf_tx_init(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw;
+ struct igb_tx_queue *txq;
+ uint32_t txdctl;
+ uint16_t i;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Setup the Base and Length of the Tx Descriptor Rings. */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ uint64_t bus_addr;
+
+ txq = dev->data->tx_queues[i];
+ bus_addr = txq->tx_ring_phys_addr;
+ E1000_WRITE_REG(hw, E1000_TDLEN(i),
+ txq->nb_tx_desc *
+ sizeof(union e1000_adv_tx_desc));
+ E1000_WRITE_REG(hw, E1000_TDBAH(i),
+ (uint32_t)(bus_addr >> 32));
+ E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
+
+ /* Setup the HW Tx Head and Tail descriptor pointers. */
+ E1000_WRITE_REG(hw, E1000_TDT(i), 0);
+ E1000_WRITE_REG(hw, E1000_TDH(i), 0);
+
+ /* Setup Transmit threshold registers. */
+ txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
+ txdctl |= txq->pthresh & 0x1F;
+ txdctl |= ((txq->hthresh & 0x1F) << 8);
+ if (hw->mac.type == e1000_82576) {
+ /*
+ * Workaround of 82576 VF Erratum
+ * force set WTHRESH to 1
+ * to avoid Write-Back not triggered sometimes
+ */
+ txdctl |= 0x10000;
+ PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
+ }
+ else
+ txdctl |= ((txq->wthresh & 0x1F) << 16);
+ txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
+ E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
+ }
+
+}
+
diff --git a/src/dpdk_lib18/librte_pmd_enic/LICENSE b/src/dpdk_lib18/librte_pmd_enic/LICENSE
new file mode 100755
index 00000000..46a27a4e
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_enic/LICENSE
@@ -0,0 +1,27 @@
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/dpdk_lib18/librte_pmd_enic/Makefile b/src/dpdk_lib18/librte_pmd_enic/Makefile
new file mode 100755
index 00000000..a2a623f3
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_enic/Makefile
@@ -0,0 +1,67 @@
+#
+# Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
+# Copyright 2007 Nuova Systems, Inc. All rights reserved.
+#
+# Copyright (c) 2014, Cisco Systems, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_enic.a
+
+CFLAGS += -I$(RTE_SDK)/lib/librte_pmd_enic/vnic/
+CFLAGS += -I$(RTE_SDK)/lib/librte_pmd_enic/
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS) -Wno-strict-aliasing
+
+VPATH += $(RTE_SDK)/lib/librte_pmd_enic/src
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_main.c
+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_clsf.c
+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_res.c
+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += vnic/vnic_cq.c
+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += vnic/vnic_wq.c
+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += vnic/vnic_dev.c
+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += vnic/vnic_intr.c
+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += vnic/vnic_rq.c
+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += vnic/vnic_rss.c
+
+# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += lib/librte_eal lib/librte_ether
+DEPDIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += lib/librte_mempool lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += lib/librte_net lib/librte_malloc
+DEPDIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += lib/librte_hash
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_pmd_enic/enic.h b/src/dpdk_lib18/librte_pmd_enic/enic.h
new file mode 100755
index 00000000..c43417c2
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_enic/enic.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ident "$Id$"
+
+#ifndef _ENIC_H_
+#define _ENIC_H_
+
+#include "vnic_enet.h"
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+#include "vnic_cq.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "vnic_nic.h"
+#include "vnic_rss.h"
+#include "enic_res.h"
+
+#define DRV_NAME "enic_pmd"
+#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Poll-mode Driver"
+#define DRV_VERSION "1.0.0.4"
+#define DRV_COPYRIGHT "Copyright 2008-2014 Cisco Systems, Inc"
+
+#define ENIC_WQ_MAX 8
+#define ENIC_RQ_MAX 8
+#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
+#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
+
+#define VLAN_ETH_HLEN 18
+
+#define ENICPMD_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
+
+#define ENICPMD_BDF_LENGTH 13 /* 0000:00:00.0'\0' */
+#define PKT_TX_TCP_UDP_CKSUM 0x6000
+#define ENIC_CALC_IP_CKSUM 1
+#define ENIC_CALC_TCP_UDP_CKSUM 2
+#define ENIC_MAX_MTU 9000
+#define PAGE_SIZE 4096
+#define PAGE_ROUND_UP(x) \
+ ((((unsigned long)(x)) + PAGE_SIZE-1) & (~(PAGE_SIZE-1)))
+
+#define ENICPMD_VFIO_PATH "/dev/vfio/vfio"
+/*#define ENIC_DESC_COUNT_MAKE_ODD (x) do{if ((~(x)) & 1) { (x)--; } }while(0)*/
+
+#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
+#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
+
+
+#define ENICPMD_FDIR_MAX 64
+
+struct enic_fdir_node {
+ struct rte_fdir_filter filter;
+ u16 fltr_id;
+ u16 rq_index;
+};
+
+struct enic_fdir {
+ struct rte_eth_fdir stats;
+ struct rte_hash *hash;
+ struct enic_fdir_node *nodes[ENICPMD_FDIR_MAX];
+};
+
+/* Per-instance private data structure */
+struct enic {
+ struct enic *next;
+ struct rte_pci_device *pdev;
+ struct vnic_enet_config config;
+ struct vnic_dev_bar bar0;
+ struct vnic_dev *vdev;
+
+ struct rte_eth_dev *rte_dev;
+ struct enic_fdir fdir;
+ char bdf_name[ENICPMD_BDF_LENGTH];
+ int dev_fd;
+ int iommu_group_fd;
+ int iommu_groupid;
+ int eventfd;
+ uint8_t mac_addr[ETH_ALEN];
+ pthread_t err_intr_thread;
+ int promisc;
+ int allmulti;
+ int ig_vlan_strip_en;
+ int link_status;
+ u8 hw_ip_checksum;
+
+ unsigned int flags;
+ unsigned int priv_flags;
+
+ /* work queue */
+ struct vnic_wq wq[ENIC_WQ_MAX];
+ unsigned int wq_count;
+
+ /* receive queue */
+ struct vnic_rq rq[ENIC_RQ_MAX];
+ unsigned int rq_count;
+
+ /* completion queue */
+ struct vnic_cq cq[ENIC_CQ_MAX];
+ unsigned int cq_count;
+
+ /* interrupt resource */
+ struct vnic_intr intr;
+ unsigned int intr_count;
+};
+
+static inline unsigned int enic_cq_rq(__rte_unused struct enic *enic, unsigned int rq)
+{
+ return rq;
+}
+
+static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
+{
+ return enic->rq_count + wq;
+}
+
+static inline unsigned int enic_msix_err_intr(__rte_unused struct enic *enic)
+{
+ return 0;
+}
+
+static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev)
+{
+ return (struct enic *)eth_dev->data->dev_private;
+}
+
+extern int enic_fdir_add_fltr(struct enic *enic,
+ struct rte_fdir_filter *params, u16 queue, u8 drop);
+extern int enic_fdir_del_fltr(struct enic *enic,
+ struct rte_fdir_filter *params);
+extern void enic_free_wq(void *txq);
+extern int enic_alloc_intr_resources(struct enic *enic);
+extern int enic_setup_finish(struct enic *enic);
+extern int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
+ unsigned int socket_id, uint16_t nb_desc);
+extern void enic_start_wq(struct enic *enic, uint16_t queue_idx);
+extern int enic_stop_wq(struct enic *enic, uint16_t queue_idx);
+extern void enic_start_rq(struct enic *enic, uint16_t queue_idx);
+extern int enic_stop_rq(struct enic *enic, uint16_t queue_idx);
+extern void enic_free_rq(void *rxq);
+extern int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
+ unsigned int socket_id, struct rte_mempool *mp,
+ uint16_t nb_desc);
+extern int enic_set_rss_nic_cfg(struct enic *enic);
+extern int enic_set_vnic_res(struct enic *enic);
+extern void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size);
+extern int enic_enable(struct enic *enic);
+extern int enic_disable(struct enic *enic);
+extern void enic_remove(struct enic *enic);
+extern int enic_get_link_status(struct enic *enic);
+extern void enic_dev_stats_get(struct enic *enic,
+ struct rte_eth_stats *r_stats);
+extern void enic_dev_stats_clear(struct enic *enic);
+extern void enic_add_packet_filter(struct enic *enic);
+extern void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr);
+extern void enic_del_mac_address(struct enic *enic);
+extern unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq);
+extern int enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
+ struct rte_mbuf *tx_pkt, unsigned short len,
+ uint8_t sop, uint8_t eop,
+ uint16_t ol_flags, uint16_t vlan_tag);
+extern int enic_poll(struct vnic_rq *rq, struct rte_mbuf **rx_pkts,
+ unsigned int budget, unsigned int *work_done);
+extern int enic_probe(struct enic *enic);
+extern int enic_clsf_init(struct enic *enic);
+extern void enic_clsf_destroy(struct enic *enic);
+#endif /* _ENIC_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_enic/enic_clsf.c b/src/dpdk_lib18/librte_pmd_enic/enic_clsf.c
new file mode 100755
index 00000000..577a382c
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_enic/enic_clsf.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ident "$Id$"
+
+#include <libgen.h>
+
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_hash.h>
+#include <rte_byteorder.h>
+
+#include "enic_compat.h"
+#include "enic.h"
+#include "wq_enet_desc.h"
+#include "rq_enet_desc.h"
+#include "cq_enet_desc.h"
+#include "vnic_enet.h"
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+#include "vnic_cq.h"
+#include "vnic_intr.h"
+#include "vnic_nic.h"
+
+#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
+#include <rte_hash_crc.h>
+#define DEFAULT_HASH_FUNC rte_hash_crc
+#else
+#include <rte_jhash.h>
+#define DEFAULT_HASH_FUNC rte_jhash
+#endif
+
+#define SOCKET_0 0
+#define ENICPMD_CLSF_HASH_ENTRIES ENICPMD_FDIR_MAX
+#define ENICPMD_CLSF_BUCKET_ENTRIES 4
+
+int enic_fdir_del_fltr(struct enic *enic, struct rte_fdir_filter *params)
+{
+ int32_t pos;
+ struct enic_fdir_node *key;
+ /* See if the key is in the table */
+ pos = rte_hash_del_key(enic->fdir.hash, params);
+ switch (pos) {
+ case -EINVAL:
+ case -ENOENT:
+ enic->fdir.stats.f_remove++;
+ return -EINVAL;
+ default:
+ /* The entry is present in the table */
+ key = enic->fdir.nodes[pos];
+
+ /* Delete the filter */
+ vnic_dev_classifier(enic->vdev, CLSF_DEL,
+ &key->fltr_id, NULL);
+ rte_free(key);
+ enic->fdir.nodes[pos] = NULL;
+ enic->fdir.stats.free++;
+ enic->fdir.stats.remove++;
+ break;
+ }
+ return 0;
+}
+
+int enic_fdir_add_fltr(struct enic *enic, struct rte_fdir_filter *params,
+ u16 queue, u8 drop)
+{
+ struct enic_fdir_node *key;
+ struct filter fltr = {.type = 0};
+ int32_t pos;
+ u8 do_free = 0;
+ u16 old_fltr_id = 0;
+
+ if (!enic->fdir.hash || params->vlan_id || !params->l4type ||
+ (RTE_FDIR_IPTYPE_IPV6 == params->iptype) ||
+ (RTE_FDIR_L4TYPE_SCTP == params->l4type) ||
+ params->flex_bytes || drop) {
+ enic->fdir.stats.f_add++;
+ return -ENOTSUP;
+ }
+
+ /* See if the key is already there in the table */
+ pos = rte_hash_del_key(enic->fdir.hash, params);
+ switch (pos) {
+ case -EINVAL:
+ enic->fdir.stats.f_add++;
+ return -EINVAL;
+ case -ENOENT:
+ /* Add a new classifier entry */
+ if (!enic->fdir.stats.free) {
+ enic->fdir.stats.f_add++;
+ return -ENOSPC;
+ }
+ key = (struct enic_fdir_node *)rte_zmalloc(
+ "enic_fdir_node",
+ sizeof(struct enic_fdir_node), 0);
+ if (!key) {
+ enic->fdir.stats.f_add++;
+ return -ENOMEM;
+ }
+ break;
+ default:
+ /* The entry is already present in the table.
+ * Check if there is a change in queue
+ */
+ key = enic->fdir.nodes[pos];
+ enic->fdir.nodes[pos] = NULL;
+ if (unlikely(key->rq_index == queue)) {
+ /* Nothing to be done */
+ pos = rte_hash_add_key(enic->fdir.hash, params);
+ enic->fdir.nodes[pos] = key;
+ enic->fdir.stats.f_add++;
+ dev_warning(enic,
+ "FDIR rule is already present\n");
+ return 0;
+ }
+
+ if (likely(enic->fdir.stats.free)) {
+ /* Add the filter and then delete the old one.
+ * This is to avoid packets from going into the
+ * default queue during the window between
+ * delete and add
+ */
+ do_free = 1;
+ old_fltr_id = key->fltr_id;
+ } else {
+ /* No free slots in the classifier.
+ * Delete the filter and add the modified one later
+ */
+ vnic_dev_classifier(enic->vdev, CLSF_DEL,
+ &key->fltr_id, NULL);
+ enic->fdir.stats.free++;
+ }
+
+ break;
+ }
+
+ key->filter = *params;
+ key->rq_index = queue;
+
+ fltr.type = FILTER_IPV4_5TUPLE;
+ fltr.u.ipv4.src_addr = rte_be_to_cpu_32(params->ip_src.ipv4_addr);
+ fltr.u.ipv4.dst_addr = rte_be_to_cpu_32(params->ip_dst.ipv4_addr);
+ fltr.u.ipv4.src_port = rte_be_to_cpu_16(params->port_src);
+ fltr.u.ipv4.dst_port = rte_be_to_cpu_16(params->port_dst);
+
+ if (RTE_FDIR_L4TYPE_TCP == params->l4type)
+ fltr.u.ipv4.protocol = PROTO_TCP;
+ else
+ fltr.u.ipv4.protocol = PROTO_UDP;
+
+ fltr.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
+
+ if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr)) {
+ key->fltr_id = queue;
+ } else {
+ dev_err(enic, "Add classifier entry failed\n");
+ enic->fdir.stats.f_add++;
+ rte_free(key);
+ return -1;
+ }
+
+ if (do_free)
+ vnic_dev_classifier(enic->vdev, CLSF_DEL, &old_fltr_id, NULL);
+ else{
+ enic->fdir.stats.free--;
+ enic->fdir.stats.add++;
+ }
+
+ pos = rte_hash_add_key(enic->fdir.hash, (void *)key);
+ enic->fdir.nodes[pos] = key;
+ return 0;
+}
+
+void enic_clsf_destroy(struct enic *enic)
+{
+ u32 index;
+ struct enic_fdir_node *key;
+ /* delete classifier entries */
+ for (index = 0; index < ENICPMD_FDIR_MAX; index++) {
+ key = enic->fdir.nodes[index];
+ if (key) {
+ vnic_dev_classifier(enic->vdev, CLSF_DEL,
+ &key->fltr_id, NULL);
+ rte_free(key);
+ }
+ }
+
+ if (enic->fdir.hash) {
+ rte_hash_free(enic->fdir.hash);
+ enic->fdir.hash = NULL;
+ }
+}
+
+int enic_clsf_init(struct enic *enic)
+{
+ struct rte_hash_parameters hash_params = {
+ .name = "enicpmd_clsf_hash",
+ .entries = ENICPMD_CLSF_HASH_ENTRIES,
+ .bucket_entries = ENICPMD_CLSF_BUCKET_ENTRIES,
+ .key_len = sizeof(struct rte_fdir_filter),
+ .hash_func = DEFAULT_HASH_FUNC,
+ .hash_func_init_val = 0,
+ .socket_id = SOCKET_0,
+ };
+
+ enic->fdir.hash = rte_hash_create(&hash_params);
+ memset(&enic->fdir.stats, 0, sizeof(enic->fdir.stats));
+ enic->fdir.stats.free = ENICPMD_FDIR_MAX;
+ return (NULL == enic->fdir.hash);
+}
diff --git a/src/dpdk_lib18/librte_pmd_enic/enic_compat.h b/src/dpdk_lib18/librte_pmd_enic/enic_compat.h
new file mode 100755
index 00000000..b1af838d
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_enic/enic_compat.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ident "$Id$"
+
+#ifndef _ENIC_COMPAT_H_
+#define _ENIC_COMPAT_H_
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include <rte_atomic.h>
+#include <rte_malloc.h>
+
+#define ENIC_PAGE_ALIGN 4096ULL
+#define ENIC_ALIGN ENIC_PAGE_ALIGN
+#define NAME_MAX 255
+#define ETH_ALEN 6
+
+#define __iomem
+
+#define rmb() rte_rmb() /* dpdk rte provided rmb */
+#define wmb() rte_wmb() /* dpdk rte provided wmb */
+
+#define le16_to_cpu
+#define le32_to_cpu
+#define le64_to_cpu
+#define cpu_to_le16
+#define cpu_to_le32
+#define cpu_to_le64
+
+#ifndef offsetof
+#define offsetof(t, m) ((size_t) &((t *)0)->m)
+#endif
+
+#define pr_err(y, args...) dev_err(0, y, ##args)
+#define pr_warn(y, args...) dev_warning(0, y, ##args)
+#define BUG() pr_err("BUG at %s:%d", __func__, __LINE__)
+
+#define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1)
+#define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask))
+#define udelay usleep
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+
+#define kzalloc(size, flags) calloc(1, size)
+#define kfree(x) free(x)
+
+#define dev_err(x, args...) printf("rte_enic_pmd : Error - " args)
+#define dev_info(x, args...) printf("rte_enic_pmd: Info - " args)
+#define dev_warning(x, args...) printf("rte_enic_pmd: Warning - " args)
+#define dev_trace(x, args...) printf("rte_enic_pmd: Trace - " args)
+
+#define __le16 u16
+#define __le32 u32
+#define __le64 u64
+
+typedef unsigned char u8;
+typedef unsigned short u16;
+typedef unsigned int u32;
+typedef unsigned long long u64;
+typedef unsigned long long dma_addr_t;
+
+static inline uint32_t ioread32(volatile void *addr)
+{
+ return *(volatile uint32_t *)addr;
+}
+
+static inline uint16_t ioread16(volatile void *addr)
+{
+ return *(volatile uint16_t *)addr;
+}
+
+static inline uint8_t ioread8(volatile void *addr)
+{
+ return *(volatile uint8_t *)addr;
+}
+
+static inline void iowrite32(uint32_t val, volatile void *addr)
+{
+ *(volatile uint32_t *)addr = val;
+}
+
+static inline void iowrite16(uint16_t val, volatile void *addr)
+{
+ *(volatile uint16_t *)addr = val;
+}
+
+static inline void iowrite8(uint8_t val, volatile void *addr)
+{
+ *(volatile uint8_t *)addr = val;
+}
+
+static inline unsigned int readl(volatile void __iomem *addr)
+{
+ return *(volatile unsigned int *)addr;
+}
+
+static inline void writel(unsigned int val, volatile void __iomem *addr)
+{
+ *(volatile unsigned int *)addr = val;
+}
+
+#define min_t(type, x, y) ({ \
+ type __min1 = (x); \
+ type __min2 = (y); \
+ __min1 < __min2 ? __min1 : __min2; })
+
+#define max_t(type, x, y) ({ \
+ type __max1 = (x); \
+ type __max2 = (y); \
+ __max1 > __max2 ? __max1 : __max2; })
+
+#endif /* _ENIC_COMPAT_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_enic/enic_ethdev.c b/src/dpdk_lib18/librte_pmd_enic/enic_ethdev.c
new file mode 100755
index 00000000..9cb6666b
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_enic/enic_ethdev.c
@@ -0,0 +1,612 @@
+/*
+ * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ident "$Id$"
+
+#include <stdio.h>
+#include <stdint.h>
+
+#include <rte_dev.h>
+#include <rte_pci.h>
+#include <rte_ethdev.h>
+#include <rte_string_fns.h>
+
+#include "vnic_intr.h"
+#include "vnic_cq.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+#include "vnic_enet.h"
+#include "enic.h"
+
+#define ENICPMD_FUNC_TRACE() \
+ RTE_LOG(DEBUG, PMD, "ENICPMD trace: %s\n", __func__)
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static struct rte_pci_id pci_id_enic_map[] = {
+#define RTE_PCI_DEV_ID_DECL_ENIC(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
+#ifndef PCI_VENDOR_ID_CISCO
+#define PCI_VENDOR_ID_CISCO 0x1137
+#endif
+#include "rte_pci_dev_ids.h"
+RTE_PCI_DEV_ID_DECL_ENIC(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET)
+RTE_PCI_DEV_ID_DECL_ENIC(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF)
+{.vendor_id = 0, /* Sentinal */},
+};
+
+static int enicpmd_fdir_remove_perfect_filter(struct rte_eth_dev *eth_dev,
+ struct rte_fdir_filter *fdir_filter,
+ __rte_unused uint16_t soft_id)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ return enic_fdir_del_fltr(enic, fdir_filter);
+}
+
+static int enicpmd_fdir_add_perfect_filter(struct rte_eth_dev *eth_dev,
+ struct rte_fdir_filter *fdir_filter, __rte_unused uint16_t soft_id,
+ uint8_t queue, uint8_t drop)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ return enic_fdir_add_fltr(enic, fdir_filter, (uint16_t)queue, drop);
+}
+
+static void enicpmd_fdir_info_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fdir *fdir)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ *fdir = enic->fdir.stats;
+}
+
+static void enicpmd_dev_tx_queue_release(void *txq)
+{
+ ENICPMD_FUNC_TRACE();
+ enic_free_wq(txq);
+}
+
+static int enicpmd_dev_setup_intr(struct enic *enic)
+{
+ int ret;
+ unsigned int index;
+
+ ENICPMD_FUNC_TRACE();
+
+ /* Are we done with the init of all the queues? */
+ for (index = 0; index < enic->cq_count; index++) {
+ if (!enic->cq[index].ctrl)
+ break;
+ }
+
+ if (enic->cq_count != index)
+ return 0;
+
+ ret = enic_alloc_intr_resources(enic);
+ if (ret) {
+ dev_err(enic, "alloc intr failed\n");
+ return ret;
+ }
+ enic_init_vnic_resources(enic);
+
+ ret = enic_setup_finish(enic);
+ if (ret)
+ dev_err(enic, "setup could not be finished\n");
+
+ return ret;
+}
+
+static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ __rte_unused const struct rte_eth_txconf *tx_conf)
+{
+ int ret;
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx];
+
+ ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
+ if (ret) {
+ dev_err(enic, "error in allocating wq\n");
+ return ret;
+ }
+
+ return enicpmd_dev_setup_intr(enic);
+}
+
+static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+
+ enic_start_wq(enic, queue_idx);
+
+ return 0;
+}
+
+static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx)
+{
+ int ret;
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+
+ ret = enic_stop_wq(enic, queue_idx);
+ if (ret)
+ dev_err(enic, "error in stopping wq %d\n", queue_idx);
+
+ return ret;
+}
+
+static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+
+ enic_start_rq(enic, queue_idx);
+
+ return 0;
+}
+
+static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx)
+{
+ int ret;
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+
+ ret = enic_stop_rq(enic, queue_idx);
+ if (ret)
+ dev_err(enic, "error in stopping rq %d\n", queue_idx);
+
+ return ret;
+}
+
+static void enicpmd_dev_rx_queue_release(void *rxq)
+{
+ ENICPMD_FUNC_TRACE();
+ enic_free_rq(rxq);
+}
+
+static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ int ret;
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ eth_dev->data->rx_queues[queue_idx] = (void *)&enic->rq[queue_idx];
+
+ ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc);
+ if (ret) {
+ dev_err(enic, "error in allocating rq\n");
+ return ret;
+ }
+
+ return enicpmd_dev_setup_intr(enic);
+}
+
+static int enicpmd_vlan_filter_set(struct rte_eth_dev *eth_dev,
+ uint16_t vlan_id, int on)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ if (on)
+ enic_add_vlan(enic, vlan_id);
+ else
+ enic_del_vlan(enic, vlan_id);
+ return 0;
+}
+
+static void enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
+ enic->ig_vlan_strip_en = 1;
+ else
+ enic->ig_vlan_strip_en = 0;
+ }
+ enic_set_rss_nic_cfg(enic);
+
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ dev_warning(enic,
+ "Configuration of VLAN filter is not supported\n");
+ }
+
+ if (mask & ETH_VLAN_EXTEND_MASK) {
+ dev_warning(enic,
+ "Configuration of extended VLAN is not supported\n");
+ }
+}
+
+static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
+{
+ int ret;
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ ret = enic_set_vnic_res(enic);
+ if (ret) {
+ dev_err(enic, "Set vNIC resource num failed, aborting\n");
+ return ret;
+ }
+
+ if (eth_dev->data->dev_conf.rxmode.split_hdr_size &&
+ eth_dev->data->dev_conf.rxmode.header_split) {
+ /* Enable header-data-split */
+ enic_set_hdr_split_size(enic,
+ eth_dev->data->dev_conf.rxmode.split_hdr_size);
+ }
+
+ enic->hw_ip_checksum = eth_dev->data->dev_conf.rxmode.hw_ip_checksum;
+ return 0;
+}
+
+/* Start the device.
+ * It returns 0 on success.
+ */
+static int enicpmd_dev_start(struct rte_eth_dev *eth_dev)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ return enic_enable(enic);
+}
+
+/*
+ * Stop device: disable rx and tx functions to allow for reconfiguring.
+ */
+static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev)
+{
+ struct rte_eth_link link;
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ enic_disable(enic);
+ memset(&link, 0, sizeof(link));
+ rte_atomic64_cmpset((uint64_t *)&eth_dev->data->dev_link,
+ *(uint64_t *)&eth_dev->data->dev_link,
+ *(uint64_t *)&link);
+}
+
+/*
+ * Stop device.
+ */
+static void enicpmd_dev_close(struct rte_eth_dev *eth_dev)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ enic_remove(enic);
+}
+
+static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev,
+ __rte_unused int wait_to_complete)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+ int ret;
+ int link_status = 0;
+
+ ENICPMD_FUNC_TRACE();
+ link_status = enic_get_link_status(enic);
+ ret = (link_status == enic->link_status);
+ enic->link_status = link_status;
+ eth_dev->data->dev_link.link_status = link_status;
+ eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
+ return ret;
+}
+
+static void enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_stats *stats)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ enic_dev_stats_get(enic, stats);
+}
+
+static void enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ enic_dev_stats_clear(enic);
+}
+
+static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *device_info)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ device_info->max_rx_queues = enic->rq_count;
+ device_info->max_tx_queues = enic->wq_count;
+ device_info->min_rx_bufsize = ENIC_MIN_MTU;
+ device_info->max_rx_pktlen = enic->config.mtu;
+ device_info->max_mac_addrs = 1;
+ device_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ device_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM;
+}
+
+static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ enic->promisc = 1;
+ enic_add_packet_filter(enic);
+}
+
+static void enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ enic->promisc = 0;
+ enic_add_packet_filter(enic);
+}
+
+static void enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ enic->allmulti = 1;
+ enic_add_packet_filter(enic);
+}
+
+static void enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ enic->allmulti = 0;
+ enic_add_packet_filter(enic);
+}
+
+static void enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
+ struct ether_addr *mac_addr,
+ __rte_unused uint32_t index, __rte_unused uint32_t pool)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ enic_set_mac_address(enic, mac_addr->addr_bytes);
+}
+
+static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, __rte_unused uint32_t index)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ enic_del_mac_address(enic);
+}
+
+
+static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ unsigned int index;
+ unsigned int frags;
+ unsigned int pkt_len;
+ unsigned int seg_len;
+ unsigned int inc_len;
+ unsigned int nb_segs;
+ struct rte_mbuf *tx_pkt;
+ struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
+ struct enic *enic = vnic_dev_priv(wq->vdev);
+ unsigned short vlan_id;
+ unsigned short ol_flags;
+
+ for (index = 0; index < nb_pkts; index++) {
+ tx_pkt = *tx_pkts++;
+ inc_len = 0;
+ nb_segs = tx_pkt->nb_segs;
+ if (nb_segs > vnic_wq_desc_avail(wq)) {
+ /* wq cleanup and try again */
+ if (!enic_cleanup_wq(enic, wq) ||
+ (nb_segs > vnic_wq_desc_avail(wq)))
+ return index;
+ }
+ pkt_len = tx_pkt->pkt_len;
+ vlan_id = tx_pkt->vlan_tci;
+ ol_flags = tx_pkt->ol_flags;
+ for (frags = 0; inc_len < pkt_len; frags++) {
+ if (!tx_pkt)
+ break;
+ seg_len = tx_pkt->data_len;
+ inc_len += seg_len;
+ if (enic_send_pkt(enic, wq, tx_pkt,
+ (unsigned short)seg_len, !frags,
+ (pkt_len == inc_len), ol_flags, vlan_id)) {
+ break;
+ }
+ tx_pkt = tx_pkt->next;
+ }
+ }
+
+ enic_cleanup_wq(enic, wq);
+ return index;
+}
+
+static uint16_t enicpmd_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct vnic_rq *rq = (struct vnic_rq *)rx_queue;
+ unsigned int work_done;
+
+ if (enic_poll(rq, rx_pkts, (unsigned int)nb_pkts, &work_done))
+ dev_err(enic, "error in enicpmd poll\n");
+
+ return work_done;
+}
+
+static struct eth_dev_ops enicpmd_eth_dev_ops = {
+ .dev_configure = enicpmd_dev_configure,
+ .dev_start = enicpmd_dev_start,
+ .dev_stop = enicpmd_dev_stop,
+ .dev_set_link_up = NULL,
+ .dev_set_link_down = NULL,
+ .dev_close = enicpmd_dev_close,
+ .promiscuous_enable = enicpmd_dev_promiscuous_enable,
+ .promiscuous_disable = enicpmd_dev_promiscuous_disable,
+ .allmulticast_enable = enicpmd_dev_allmulticast_enable,
+ .allmulticast_disable = enicpmd_dev_allmulticast_disable,
+ .link_update = enicpmd_dev_link_update,
+ .stats_get = enicpmd_dev_stats_get,
+ .stats_reset = enicpmd_dev_stats_reset,
+ .queue_stats_mapping_set = NULL,
+ .dev_infos_get = enicpmd_dev_info_get,
+ .mtu_set = NULL,
+ .vlan_filter_set = enicpmd_vlan_filter_set,
+ .vlan_tpid_set = NULL,
+ .vlan_offload_set = enicpmd_vlan_offload_set,
+ .vlan_strip_queue_set = NULL,
+ .rx_queue_start = enicpmd_dev_rx_queue_start,
+ .rx_queue_stop = enicpmd_dev_rx_queue_stop,
+ .tx_queue_start = enicpmd_dev_tx_queue_start,
+ .tx_queue_stop = enicpmd_dev_tx_queue_stop,
+ .rx_queue_setup = enicpmd_dev_rx_queue_setup,
+ .rx_queue_release = enicpmd_dev_rx_queue_release,
+ .rx_queue_count = NULL,
+ .rx_descriptor_done = NULL,
+ .tx_queue_setup = enicpmd_dev_tx_queue_setup,
+ .tx_queue_release = enicpmd_dev_tx_queue_release,
+ .dev_led_on = NULL,
+ .dev_led_off = NULL,
+ .flow_ctrl_get = NULL,
+ .flow_ctrl_set = NULL,
+ .priority_flow_ctrl_set = NULL,
+ .mac_addr_add = enicpmd_add_mac_addr,
+ .mac_addr_remove = enicpmd_remove_mac_addr,
+ .fdir_add_signature_filter = NULL,
+ .fdir_update_signature_filter = NULL,
+ .fdir_remove_signature_filter = NULL,
+ .fdir_infos_get = enicpmd_fdir_info_get,
+ .fdir_add_perfect_filter = enicpmd_fdir_add_perfect_filter,
+ .fdir_update_perfect_filter = enicpmd_fdir_add_perfect_filter,
+ .fdir_remove_perfect_filter = enicpmd_fdir_remove_perfect_filter,
+ .fdir_set_masks = NULL,
+};
+
+struct enic *enicpmd_list_head = NULL;
+/* Initialize the driver
+ * It returns 0 on success.
+ */
+static int eth_enicpmd_dev_init(
+ __attribute__((unused))struct eth_driver *eth_drv,
+ struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pdev;
+ struct rte_pci_addr *addr;
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+
+ enic->rte_dev = eth_dev;
+ eth_dev->dev_ops = &enicpmd_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &enicpmd_recv_pkts;
+ eth_dev->tx_pkt_burst = &enicpmd_xmit_pkts;
+
+ pdev = eth_dev->pci_dev;
+ enic->pdev = pdev;
+ addr = &pdev->addr;
+
+ snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
+ addr->domain, addr->bus, addr->devid, addr->function);
+
+ return enic_probe(enic);
+}
+
+static struct eth_driver rte_enic_pmd = {
+ {
+ .name = "rte_enic_pmd",
+ .id_table = pci_id_enic_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ },
+ .eth_dev_init = eth_enicpmd_dev_init,
+ .dev_private_size = sizeof(struct enic),
+};
+
+/* Driver initialization routine.
+ * Invoked once at EAL init time.
+ * Register as the [Poll Mode] Driver of Cisco ENIC device.
+ */
+static int
+rte_enic_pmd_init(const char *name __rte_unused,
+ const char *params __rte_unused)
+{
+ ENICPMD_FUNC_TRACE();
+
+ rte_eth_driver_register(&rte_enic_pmd);
+ return 0;
+}
+
+static struct rte_driver rte_enic_driver = {
+ .type = PMD_PDEV,
+ .init = rte_enic_pmd_init,
+};
+
+PMD_REGISTER_DRIVER(rte_enic_driver);
diff --git a/src/dpdk_lib18/librte_pmd_enic/enic_main.c b/src/dpdk_lib18/librte_pmd_enic/enic_main.c
new file mode 100755
index 00000000..8ab8e44b
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_enic/enic_main.c
@@ -0,0 +1,1117 @@
+/*
+ * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ident "$Id$"
+
+#include <stdio.h>
+
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <libgen.h>
+
+#include <rte_pci.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_string_fns.h>
+#include <rte_ethdev.h>
+
+#include "enic_compat.h"
+#include "enic.h"
+#include "wq_enet_desc.h"
+#include "rq_enet_desc.h"
+#include "cq_enet_desc.h"
+#include "vnic_enet.h"
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+#include "vnic_cq.h"
+#include "vnic_intr.h"
+#include "vnic_nic.h"
+
+static inline int enic_is_sriov_vf(struct enic *enic)
+{
+ return enic->pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
+}
+
+static int is_zero_addr(uint8_t *addr)
+{
+ return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
+}
+
+static int is_mcast_addr(uint8_t *addr)
+{
+ return addr[0] & 1;
+}
+
+static int is_eth_addr_valid(uint8_t *addr)
+{
+ return !is_mcast_addr(addr) && !is_zero_addr(addr);
+}
+
+static inline struct rte_mbuf *
+enic_rxmbuf_alloc(struct rte_mempool *mp)
+{
+ struct rte_mbuf *m;
+
+ m = __rte_mbuf_raw_alloc(mp);
+ __rte_mbuf_sanity_check_raw(m, 0);
+ return m;
+}
+
+void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size)
+{
+ vnic_set_hdr_split_size(enic->vdev, split_hdr_size);
+}
+
+static void enic_free_wq_buf(__rte_unused struct vnic_wq *wq, struct vnic_wq_buf *buf)
+{
+ struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->os_buf;
+
+ rte_mempool_put(mbuf->pool, mbuf);
+ buf->os_buf = NULL;
+}
+
+static void enic_wq_free_buf(struct vnic_wq *wq,
+ __rte_unused struct cq_desc *cq_desc,
+ struct vnic_wq_buf *buf,
+ __rte_unused void *opaque)
+{
+ enic_free_wq_buf(wq, buf);
+}
+
+static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
+ __rte_unused u8 type, u16 q_number, u16 completed_index, void *opaque)
+{
+ struct enic *enic = vnic_dev_priv(vdev);
+
+ vnic_wq_service(&enic->wq[q_number], cq_desc,
+ completed_index, enic_wq_free_buf,
+ opaque);
+
+ return 0;
+}
+
+static void enic_log_q_error(struct enic *enic)
+{
+ unsigned int i;
+ u32 error_status;
+
+ for (i = 0; i < enic->wq_count; i++) {
+ error_status = vnic_wq_error_status(&enic->wq[i]);
+ if (error_status)
+ dev_err(enic, "WQ[%d] error_status %d\n", i,
+ error_status);
+ }
+
+ for (i = 0; i < enic->rq_count; i++) {
+ error_status = vnic_rq_error_status(&enic->rq[i]);
+ if (error_status)
+ dev_err(enic, "RQ[%d] error_status %d\n", i,
+ error_status);
+ }
+}
+
+unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq)
+{
+ unsigned int cq = enic_cq_wq(enic, wq->index);
+
+ /* Return the work done */
+ return vnic_cq_service(&enic->cq[cq],
+ -1 /*wq_work_to_do*/, enic_wq_service, NULL);
+}
+
+
+int enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
+ struct rte_mbuf *tx_pkt, unsigned short len,
+ uint8_t sop, uint8_t eop,
+ uint16_t ol_flags, uint16_t vlan_tag)
+{
+ struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
+ uint16_t mss = 0;
+ uint8_t cq_entry = eop;
+ uint8_t vlan_tag_insert = 0;
+ uint64_t bus_addr = (dma_addr_t)
+ (tx_pkt->buf_physaddr + RTE_PKTMBUF_HEADROOM);
+
+ if (sop) {
+ if (ol_flags & PKT_TX_VLAN_PKT)
+ vlan_tag_insert = 1;
+
+ if (enic->hw_ip_checksum) {
+ if (ol_flags & PKT_TX_IP_CKSUM)
+ mss |= ENIC_CALC_IP_CKSUM;
+
+ if (ol_flags & PKT_TX_TCP_UDP_CKSUM)
+ mss |= ENIC_CALC_TCP_UDP_CKSUM;
+ }
+ }
+
+ wq_enet_desc_enc(desc,
+ bus_addr,
+ len,
+ mss,
+ 0 /* header_length */,
+ 0 /* offload_mode WQ_ENET_OFFLOAD_MODE_CSUM */,
+ eop,
+ cq_entry,
+ 0 /* fcoe_encap */,
+ vlan_tag_insert,
+ vlan_tag,
+ 0 /* loopback */);
+
+ vnic_wq_post(wq, (void *)tx_pkt, bus_addr, len,
+ sop, eop,
+ 1 /*desc_skip_cnt*/,
+ cq_entry,
+ 0 /*compressed send*/,
+ 0 /*wrid*/);
+
+ return 0;
+}
+
+void enic_dev_stats_clear(struct enic *enic)
+{
+ if (vnic_dev_stats_clear(enic->vdev))
+ dev_err(enic, "Error in clearing stats\n");
+}
+
+void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
+{
+ struct vnic_stats *stats;
+
+ memset(r_stats, 0, sizeof(*r_stats));
+ if (vnic_dev_stats_dump(enic->vdev, &stats)) {
+ dev_err(enic, "Error in getting stats\n");
+ return;
+ }
+
+ r_stats->ipackets = stats->rx.rx_frames_ok;
+ r_stats->opackets = stats->tx.tx_frames_ok;
+
+ r_stats->ibytes = stats->rx.rx_bytes_ok;
+ r_stats->obytes = stats->tx.tx_bytes_ok;
+
+ r_stats->ierrors = stats->rx.rx_errors;
+ r_stats->oerrors = stats->tx.tx_errors;
+
+ r_stats->imcasts = stats->rx.rx_multicast_frames_ok;
+ r_stats->rx_nombuf = stats->rx.rx_no_bufs;
+}
+
+void enic_del_mac_address(struct enic *enic)
+{
+ if (vnic_dev_del_addr(enic->vdev, enic->mac_addr))
+ dev_err(enic, "del mac addr failed\n");
+}
+
+void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
+{
+ int err;
+
+ if (!is_eth_addr_valid(mac_addr)) {
+ dev_err(enic, "invalid mac address\n");
+ return;
+ }
+
+ err = vnic_dev_del_addr(enic->vdev, mac_addr);
+ if (err) {
+ dev_err(enic, "del mac addr failed\n");
+ return;
+ }
+
+ ether_addr_copy((struct ether_addr *)mac_addr,
+ (struct ether_addr *)enic->mac_addr);
+
+ err = vnic_dev_add_addr(enic->vdev, mac_addr);
+ if (err) {
+ dev_err(enic, "add mac addr failed\n");
+ return;
+ }
+}
+
+static void
+enic_free_rq_buf(__rte_unused struct vnic_rq *rq, struct vnic_rq_buf *buf)
+{
+ if (!buf->os_buf)
+ return;
+
+ rte_pktmbuf_free((struct rte_mbuf *)buf->os_buf);
+ buf->os_buf = NULL;
+}
+
+void enic_init_vnic_resources(struct enic *enic)
+{
+ unsigned int error_interrupt_enable = 1;
+ unsigned int error_interrupt_offset = 0;
+ unsigned int index = 0;
+
+ for (index = 0; index < enic->rq_count; index++) {
+ vnic_rq_init(&enic->rq[index],
+ enic_cq_rq(enic, index),
+ error_interrupt_enable,
+ error_interrupt_offset);
+ }
+
+ for (index = 0; index < enic->wq_count; index++) {
+ vnic_wq_init(&enic->wq[index],
+ enic_cq_wq(enic, index),
+ error_interrupt_enable,
+ error_interrupt_offset);
+ }
+
+ vnic_dev_stats_clear(enic->vdev);
+
+ for (index = 0; index < enic->cq_count; index++) {
+ vnic_cq_init(&enic->cq[index],
+ 0 /* flow_control_enable */,
+ 1 /* color_enable */,
+ 0 /* cq_head */,
+ 0 /* cq_tail */,
+ 1 /* cq_tail_color */,
+ 0 /* interrupt_enable */,
+ 1 /* cq_entry_enable */,
+ 0 /* cq_message_enable */,
+ 0 /* interrupt offset */,
+ 0 /* cq_message_addr */);
+ }
+
+ vnic_intr_init(&enic->intr,
+ enic->config.intr_timer_usec,
+ enic->config.intr_timer_type,
+ /*mask_on_assertion*/1);
+}
+
+
+static int enic_rq_alloc_buf(struct vnic_rq *rq)
+{
+ struct enic *enic = vnic_dev_priv(rq->vdev);
+ dma_addr_t dma_addr;
+ struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
+ uint8_t type = RQ_ENET_TYPE_ONLY_SOP;
+ u16 split_hdr_size = vnic_get_hdr_split_size(enic->vdev);
+ struct rte_mbuf *mbuf = enic_rxmbuf_alloc(rq->mp);
+ struct rte_mbuf *hdr_mbuf = NULL;
+
+ if (!mbuf) {
+ dev_err(enic, "mbuf alloc in enic_rq_alloc_buf failed\n");
+ return -1;
+ }
+
+ if (unlikely(split_hdr_size)) {
+ if (vnic_rq_desc_avail(rq) < 2) {
+ rte_mempool_put(mbuf->pool, mbuf);
+ return -1;
+ }
+ hdr_mbuf = enic_rxmbuf_alloc(rq->mp);
+ if (!hdr_mbuf) {
+ rte_mempool_put(mbuf->pool, mbuf);
+ dev_err(enic,
+ "hdr_mbuf alloc in enic_rq_alloc_buf failed\n");
+ return -1;
+ }
+
+ hdr_mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+
+ hdr_mbuf->nb_segs = 2;
+ hdr_mbuf->port = rq->index;
+ hdr_mbuf->next = mbuf;
+
+ dma_addr = (dma_addr_t)
+ (hdr_mbuf->buf_physaddr + hdr_mbuf->data_off);
+
+ rq_enet_desc_enc(desc, dma_addr, type, split_hdr_size);
+
+ vnic_rq_post(rq, (void *)hdr_mbuf, 0 /*os_buf_index*/, dma_addr,
+ (unsigned int)split_hdr_size, 0 /*wrid*/);
+
+ desc = vnic_rq_next_desc(rq);
+ type = RQ_ENET_TYPE_NOT_SOP;
+ } else {
+ mbuf->nb_segs = 1;
+ mbuf->port = rq->index;
+ }
+
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->next = NULL;
+
+ dma_addr = (dma_addr_t)
+ (mbuf->buf_physaddr + mbuf->data_off);
+
+ rq_enet_desc_enc(desc, dma_addr, type, mbuf->buf_len);
+
+ vnic_rq_post(rq, (void *)mbuf, 0 /*os_buf_index*/, dma_addr,
+ (unsigned int)mbuf->buf_len, 0 /*wrid*/);
+
+ return 0;
+}
+
+static int enic_rq_indicate_buf(struct vnic_rq *rq,
+ struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
+ int skipped, void *opaque)
+{
+ struct enic *enic = vnic_dev_priv(rq->vdev);
+ struct rte_mbuf **rx_pkt_bucket = (struct rte_mbuf **)opaque;
+ struct rte_mbuf *rx_pkt = NULL;
+ struct rte_mbuf *hdr_rx_pkt = NULL;
+
+ u8 type, color, eop, sop, ingress_port, vlan_stripped;
+ u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
+ u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
+ u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
+ u8 packet_error;
+ u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
+ u32 rss_hash;
+
+ cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
+ &type, &color, &q_number, &completed_index,
+ &ingress_port, &fcoe, &eop, &sop, &rss_type,
+ &csum_not_calc, &rss_hash, &bytes_written,
+ &packet_error, &vlan_stripped, &vlan_tci, &checksum,
+ &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
+ &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
+ &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
+ &fcs_ok);
+
+ rx_pkt = (struct rte_mbuf *)buf->os_buf;
+ buf->os_buf = NULL;
+
+ if (unlikely(packet_error)) {
+ dev_err(enic, "packet error\n");
+ rx_pkt->data_len = 0;
+ return 0;
+ }
+
+ if (unlikely(skipped)) {
+ rx_pkt->data_len = 0;
+ return 0;
+ }
+
+ if (likely(!vnic_get_hdr_split_size(enic->vdev))) {
+ /* No header split configured */
+ *rx_pkt_bucket = rx_pkt;
+ rx_pkt->pkt_len = bytes_written;
+
+ if (ipv4) {
+ rx_pkt->ol_flags |= PKT_RX_IPV4_HDR;
+ if (!csum_not_calc) {
+ if (unlikely(!ipv4_csum_ok))
+ rx_pkt->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+
+ if ((tcp || udp) && (!tcp_udp_csum_ok))
+ rx_pkt->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ }
+ } else if (ipv6)
+ rx_pkt->ol_flags |= PKT_RX_IPV6_HDR;
+ } else {
+ /* Header split */
+ if (sop && !eop) {
+ /* This piece is header */
+ *rx_pkt_bucket = rx_pkt;
+ rx_pkt->pkt_len = bytes_written;
+ } else {
+ if (sop && eop) {
+ /* The packet is smaller than split_hdr_size */
+ *rx_pkt_bucket = rx_pkt;
+ rx_pkt->pkt_len = bytes_written;
+ if (ipv4) {
+ rx_pkt->ol_flags |= PKT_RX_IPV4_HDR;
+ if (!csum_not_calc) {
+ if (unlikely(!ipv4_csum_ok))
+ rx_pkt->ol_flags |=
+ PKT_RX_IP_CKSUM_BAD;
+
+ if ((tcp || udp) &&
+ (!tcp_udp_csum_ok))
+ rx_pkt->ol_flags |=
+ PKT_RX_L4_CKSUM_BAD;
+ }
+ } else if (ipv6)
+ rx_pkt->ol_flags |= PKT_RX_IPV6_HDR;
+ } else {
+ /* Payload */
+ hdr_rx_pkt = *rx_pkt_bucket;
+ hdr_rx_pkt->pkt_len += bytes_written;
+ if (ipv4) {
+ hdr_rx_pkt->ol_flags |= PKT_RX_IPV4_HDR;
+ if (!csum_not_calc) {
+ if (unlikely(!ipv4_csum_ok))
+ hdr_rx_pkt->ol_flags |=
+ PKT_RX_IP_CKSUM_BAD;
+
+ if ((tcp || udp) &&
+ (!tcp_udp_csum_ok))
+ hdr_rx_pkt->ol_flags |=
+ PKT_RX_L4_CKSUM_BAD;
+ }
+ } else if (ipv6)
+ hdr_rx_pkt->ol_flags |= PKT_RX_IPV6_HDR;
+
+ }
+ }
+ }
+
+ rx_pkt->data_len = bytes_written;
+
+ if (rss_hash) {
+ rx_pkt->ol_flags |= PKT_RX_RSS_HASH;
+ rx_pkt->hash.rss = rss_hash;
+ }
+
+ if (vlan_tci) {
+ rx_pkt->ol_flags |= PKT_RX_VLAN_PKT;
+ rx_pkt->vlan_tci = vlan_tci;
+ }
+
+ return eop;
+}
+
+static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
+ __rte_unused u8 type, u16 q_number, u16 completed_index, void *opaque)
+{
+ struct enic *enic = vnic_dev_priv(vdev);
+
+ return vnic_rq_service(&enic->rq[q_number], cq_desc,
+ completed_index, VNIC_RQ_RETURN_DESC,
+ enic_rq_indicate_buf, opaque);
+
+}
+
+int enic_poll(struct vnic_rq *rq, struct rte_mbuf **rx_pkts,
+ unsigned int budget, unsigned int *work_done)
+{
+ struct enic *enic = vnic_dev_priv(rq->vdev);
+ unsigned int cq = enic_cq_rq(enic, rq->index);
+ int err = 0;
+
+ *work_done = vnic_cq_service(&enic->cq[cq],
+ budget, enic_rq_service, (void *)rx_pkts);
+
+ if (*work_done) {
+ vnic_rq_fill(rq, enic_rq_alloc_buf);
+
+ /* Need at least one buffer on ring to get going */
+ if (vnic_rq_desc_used(rq) == 0) {
+ dev_err(enic, "Unable to alloc receive buffers\n");
+ err = -1;
+ }
+ }
+ return err;
+}
+
+static void *
+enic_alloc_consistent(__rte_unused void *priv, size_t size,
+ dma_addr_t *dma_handle, u8 *name)
+{
+ void *vaddr;
+ const struct rte_memzone *rz;
+ *dma_handle = 0;
+
+ rz = rte_memzone_reserve_aligned((const char *)name,
+ size, 0, 0, ENIC_ALIGN);
+ if (!rz) {
+ pr_err("%s : Failed to allocate memory requested for %s",
+ __func__, name);
+ return NULL;
+ }
+
+ vaddr = rz->addr;
+ *dma_handle = (dma_addr_t)rz->phys_addr;
+
+ return vaddr;
+}
+
+static void
+enic_free_consistent(__rte_unused struct rte_pci_device *hwdev,
+ __rte_unused size_t size,
+ __rte_unused void *vaddr,
+ __rte_unused dma_addr_t dma_handle)
+{
+ /* Nothing to be done */
+}
+
+static void
+enic_intr_handler(__rte_unused struct rte_intr_handle *handle,
+ void *arg)
+{
+ struct enic *enic = pmd_priv((struct rte_eth_dev *)arg);
+
+ vnic_intr_return_all_credits(&enic->intr);
+
+ enic_log_q_error(enic);
+}
+
+int enic_enable(struct enic *enic)
+{
+ unsigned int index;
+ struct rte_eth_dev *eth_dev = enic->rte_dev;
+
+ eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
+ eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
+
+ if (enic_clsf_init(enic))
+ dev_warning(enic, "Init of hash table for clsf failed."\
+ "Flow director feature will not work\n");
+
+ /* Fill RQ bufs */
+ for (index = 0; index < enic->rq_count; index++) {
+ vnic_rq_fill(&enic->rq[index], enic_rq_alloc_buf);
+
+ /* Need at least one buffer on ring to get going
+ */
+ if (vnic_rq_desc_used(&enic->rq[index]) == 0) {
+ dev_err(enic, "Unable to alloc receive buffers\n");
+ return -1;
+ }
+ }
+
+ for (index = 0; index < enic->wq_count; index++)
+ vnic_wq_enable(&enic->wq[index]);
+ for (index = 0; index < enic->rq_count; index++)
+ vnic_rq_enable(&enic->rq[index]);
+
+ vnic_dev_enable_wait(enic->vdev);
+
+ /* Register and enable error interrupt */
+ rte_intr_callback_register(&(enic->pdev->intr_handle),
+ enic_intr_handler, (void *)enic->rte_dev);
+
+ rte_intr_enable(&(enic->pdev->intr_handle));
+ vnic_intr_unmask(&enic->intr);
+
+ return 0;
+}
+
+int enic_alloc_intr_resources(struct enic *enic)
+{
+ int err;
+
+ dev_info(enic, "vNIC resources used: "\
+ "wq %d rq %d cq %d intr %d\n",
+ enic->wq_count, enic->rq_count,
+ enic->cq_count, enic->intr_count);
+
+ err = vnic_intr_alloc(enic->vdev, &enic->intr, 0);
+ if (err)
+ enic_free_vnic_resources(enic);
+
+ return err;
+}
+
+void enic_free_rq(void *rxq)
+{
+ struct vnic_rq *rq = (struct vnic_rq *)rxq;
+ struct enic *enic = vnic_dev_priv(rq->vdev);
+
+ vnic_rq_free(rq);
+ vnic_cq_free(&enic->cq[rq->index]);
+}
+
+void enic_start_wq(struct enic *enic, uint16_t queue_idx)
+{
+ vnic_wq_enable(&enic->wq[queue_idx]);
+}
+
+int enic_stop_wq(struct enic *enic, uint16_t queue_idx)
+{
+ return vnic_wq_disable(&enic->wq[queue_idx]);
+}
+
+void enic_start_rq(struct enic *enic, uint16_t queue_idx)
+{
+ vnic_rq_enable(&enic->rq[queue_idx]);
+}
+
+int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
+{
+ return vnic_rq_disable(&enic->rq[queue_idx]);
+}
+
+int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
+ unsigned int socket_id, struct rte_mempool *mp,
+ uint16_t nb_desc)
+{
+ int err;
+ struct vnic_rq *rq = &enic->rq[queue_idx];
+
+ rq->socket_id = socket_id;
+ rq->mp = mp;
+
+ if (nb_desc) {
+ if (nb_desc > enic->config.rq_desc_count) {
+ dev_warning(enic,
+ "RQ %d - number of rx desc in cmd line (%d)"\
+ "is greater than that in the UCSM/CIMC adapter"\
+ "policy. Applying the value in the adapter "\
+ "policy (%d).\n",
+ queue_idx, nb_desc, enic->config.rq_desc_count);
+ } else if (nb_desc != enic->config.rq_desc_count) {
+ enic->config.rq_desc_count = nb_desc;
+ dev_info(enic,
+ "RX Queues - effective number of descs:%d\n",
+ nb_desc);
+ }
+ }
+
+ /* Allocate queue resources */
+ err = vnic_rq_alloc(enic->vdev, &enic->rq[queue_idx], queue_idx,
+ enic->config.rq_desc_count,
+ sizeof(struct rq_enet_desc));
+ if (err) {
+ dev_err(enic, "error in allocation of rq\n");
+ return err;
+ }
+
+ err = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
+ socket_id, enic->config.rq_desc_count,
+ sizeof(struct cq_enet_rq_desc));
+ if (err) {
+ vnic_rq_free(rq);
+ dev_err(enic, "error in allocation of cq for rq\n");
+ }
+
+ return err;
+}
+
+void enic_free_wq(void *txq)
+{
+ struct vnic_wq *wq = (struct vnic_wq *)txq;
+ struct enic *enic = vnic_dev_priv(wq->vdev);
+
+ vnic_wq_free(wq);
+ vnic_cq_free(&enic->cq[enic->rq_count + wq->index]);
+}
+
+int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
+ unsigned int socket_id, uint16_t nb_desc)
+{
+ int err;
+ struct vnic_wq *wq = &enic->wq[queue_idx];
+ unsigned int cq_index = enic_cq_wq(enic, queue_idx);
+
+ wq->socket_id = socket_id;
+ if (nb_desc) {
+ if (nb_desc > enic->config.wq_desc_count) {
+ dev_warning(enic,
+ "WQ %d - number of tx desc in cmd line (%d)"\
+ "is greater than that in the UCSM/CIMC adapter"\
+ "policy. Applying the value in the adapter "\
+ "policy (%d)\n",
+ queue_idx, nb_desc, enic->config.wq_desc_count);
+ } else if (nb_desc != enic->config.wq_desc_count) {
+ enic->config.wq_desc_count = nb_desc;
+ dev_info(enic,
+ "TX Queues - effective number of descs:%d\n",
+ nb_desc);
+ }
+ }
+
+ /* Allocate queue resources */
+ err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx,
+ enic->config.wq_desc_count,
+ sizeof(struct wq_enet_desc));
+ if (err) {
+ dev_err(enic, "error in allocation of wq\n");
+ return err;
+ }
+
+ err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index,
+ socket_id, enic->config.wq_desc_count,
+ sizeof(struct cq_enet_wq_desc));
+ if (err) {
+ vnic_wq_free(wq);
+ dev_err(enic, "error in allocation of cq for wq\n");
+ }
+
+ return err;
+}
+
+int enic_disable(struct enic *enic)
+{
+ unsigned int i;
+ int err;
+
+ vnic_intr_mask(&enic->intr);
+ (void)vnic_intr_masked(&enic->intr); /* flush write */
+
+ vnic_dev_disable(enic->vdev);
+
+ enic_clsf_destroy(enic);
+
+ if (!enic_is_sriov_vf(enic))
+ vnic_dev_del_addr(enic->vdev, enic->mac_addr);
+
+ for (i = 0; i < enic->wq_count; i++) {
+ err = vnic_wq_disable(&enic->wq[i]);
+ if (err)
+ return err;
+ }
+ for (i = 0; i < enic->rq_count; i++) {
+ err = vnic_rq_disable(&enic->rq[i]);
+ if (err)
+ return err;
+ }
+
+ vnic_dev_set_reset_flag(enic->vdev, 1);
+ vnic_dev_notify_unset(enic->vdev);
+
+ for (i = 0; i < enic->wq_count; i++)
+ vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
+ for (i = 0; i < enic->rq_count; i++)
+ vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
+ for (i = 0; i < enic->cq_count; i++)
+ vnic_cq_clean(&enic->cq[i]);
+ vnic_intr_clean(&enic->intr);
+
+ return 0;
+}
+
+static int enic_dev_wait(struct vnic_dev *vdev,
+ int (*start)(struct vnic_dev *, int),
+ int (*finished)(struct vnic_dev *, int *),
+ int arg)
+{
+ int done;
+ int err;
+ int i;
+
+ err = start(vdev, arg);
+ if (err)
+ return err;
+
+ /* Wait for func to complete...2 seconds max */
+ for (i = 0; i < 2000; i++) {
+ err = finished(vdev, &done);
+ if (err)
+ return err;
+ if (done)
+ return 0;
+ usleep(1000);
+ }
+ return -ETIMEDOUT;
+}
+
+static int enic_dev_open(struct enic *enic)
+{
+ int err;
+
+ err = enic_dev_wait(enic->vdev, vnic_dev_open,
+ vnic_dev_open_done, 0);
+ if (err)
+ dev_err(enic_get_dev(enic),
+ "vNIC device open failed, err %d\n", err);
+
+ return err;
+}
+
+static int enic_set_rsskey(struct enic *enic)
+{
+ dma_addr_t rss_key_buf_pa;
+ union vnic_rss_key *rss_key_buf_va = NULL;
+ static union vnic_rss_key rss_key = {
+ .key[0] = {.b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101}},
+ .key[1] = {.b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101}},
+ .key[2] = {.b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115}},
+ .key[3] = {.b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108}},
+ };
+ int err;
+ u8 name[NAME_MAX];
+
+ snprintf((char *)name, NAME_MAX, "rss_key-%s", enic->bdf_name);
+ rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key),
+ &rss_key_buf_pa, name);
+ if (!rss_key_buf_va)
+ return -ENOMEM;
+
+ rte_memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
+
+ err = enic_set_rss_key(enic,
+ rss_key_buf_pa,
+ sizeof(union vnic_rss_key));
+
+ enic_free_consistent(enic->pdev, sizeof(union vnic_rss_key),
+ rss_key_buf_va, rss_key_buf_pa);
+
+ return err;
+}
+
+static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
+{
+ dma_addr_t rss_cpu_buf_pa;
+ union vnic_rss_cpu *rss_cpu_buf_va = NULL;
+ int i;
+ int err;
+ u8 name[NAME_MAX];
+
+ snprintf((char *)name, NAME_MAX, "rss_cpu-%s", enic->bdf_name);
+ rss_cpu_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_cpu),
+ &rss_cpu_buf_pa, name);
+ if (!rss_cpu_buf_va)
+ return -ENOMEM;
+
+ for (i = 0; i < (1 << rss_hash_bits); i++)
+ (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
+
+ err = enic_set_rss_cpu(enic,
+ rss_cpu_buf_pa,
+ sizeof(union vnic_rss_cpu));
+
+ enic_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),
+ rss_cpu_buf_va, rss_cpu_buf_pa);
+
+ return err;
+}
+
+static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
+ u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
+{
+ const u8 tso_ipid_split_en = 0;
+ int err;
+
+ /* Enable VLAN tag stripping */
+
+ err = enic_set_nic_cfg(enic,
+ rss_default_cpu, rss_hash_type,
+ rss_hash_bits, rss_base_cpu,
+ rss_enable, tso_ipid_split_en,
+ enic->ig_vlan_strip_en);
+
+ return err;
+}
+
+int enic_set_rss_nic_cfg(struct enic *enic)
+{
+ const u8 rss_default_cpu = 0;
+ const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
+ NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
+ NIC_CFG_RSS_HASH_TYPE_IPV6 |
+ NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
+ const u8 rss_hash_bits = 7;
+ const u8 rss_base_cpu = 0;
+ u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
+
+ if (rss_enable) {
+ if (!enic_set_rsskey(enic)) {
+ if (enic_set_rsscpu(enic, rss_hash_bits)) {
+ rss_enable = 0;
+ dev_warning(enic, "RSS disabled, "\
+ "Failed to set RSS cpu indirection table.");
+ }
+ } else {
+ rss_enable = 0;
+ dev_warning(enic,
+ "RSS disabled, Failed to set RSS key.\n");
+ }
+ }
+
+ return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
+ rss_hash_bits, rss_base_cpu, rss_enable);
+}
+
+int enic_setup_finish(struct enic *enic)
+{
+ int ret;
+
+ ret = enic_set_rss_nic_cfg(enic);
+ if (ret) {
+ dev_err(enic, "Failed to config nic, aborting.\n");
+ return -1;
+ }
+
+ vnic_dev_add_addr(enic->vdev, enic->mac_addr);
+
+ /* Default conf */
+ vnic_dev_packet_filter(enic->vdev,
+ 1 /* directed */,
+ 1 /* multicast */,
+ 1 /* broadcast */,
+ 0 /* promisc */,
+ 1 /* allmulti */);
+
+ enic->promisc = 0;
+ enic->allmulti = 1;
+
+ return 0;
+}
+
+void enic_add_packet_filter(struct enic *enic)
+{
+ /* Args -> directed, multicast, broadcast, promisc, allmulti */
+ vnic_dev_packet_filter(enic->vdev, 1, 1, 1,
+ enic->promisc, enic->allmulti);
+}
+
+int enic_get_link_status(struct enic *enic)
+{
+ return vnic_dev_link_status(enic->vdev);
+}
+
+static void enic_dev_deinit(struct enic *enic)
+{
+ struct rte_eth_dev *eth_dev = enic->rte_dev;
+
+ if (eth_dev->data->mac_addrs)
+ rte_free(eth_dev->data->mac_addrs);
+}
+
+
+int enic_set_vnic_res(struct enic *enic)
+{
+ struct rte_eth_dev *eth_dev = enic->rte_dev;
+
+ if ((enic->rq_count < eth_dev->data->nb_rx_queues) ||
+ (enic->wq_count < eth_dev->data->nb_tx_queues)) {
+ dev_err(dev, "Not enough resources configured, aborting\n");
+ return -1;
+ }
+
+ enic->rq_count = eth_dev->data->nb_rx_queues;
+ enic->wq_count = eth_dev->data->nb_tx_queues;
+ if (enic->cq_count < (enic->rq_count + enic->wq_count)) {
+ dev_err(dev, "Not enough resources configured, aborting\n");
+ return -1;
+ }
+
+ enic->cq_count = enic->rq_count + enic->wq_count;
+ return 0;
+}
+
+static int enic_dev_init(struct enic *enic)
+{
+ int err;
+ struct rte_eth_dev *eth_dev = enic->rte_dev;
+
+ vnic_dev_intr_coal_timer_info_default(enic->vdev);
+
+ /* Get vNIC configuration
+ */
+ err = enic_get_vnic_config(enic);
+ if (err) {
+ dev_err(dev, "Get vNIC configuration failed, aborting\n");
+ return err;
+ }
+
+ eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN, 0);
+ if (!eth_dev->data->mac_addrs) {
+ dev_err(enic, "mac addr storage alloc failed, aborting.\n");
+ return -1;
+ }
+ ether_addr_copy((struct ether_addr *) enic->mac_addr,
+ &eth_dev->data->mac_addrs[0]);
+
+
+ /* Get available resource counts
+ */
+ enic_get_res_counts(enic);
+
+ vnic_dev_set_reset_flag(enic->vdev, 0);
+
+ return 0;
+
+}
+
+int enic_probe(struct enic *enic)
+{
+ struct rte_pci_device *pdev = enic->pdev;
+ int err = -1;
+
+ dev_info(enic, " Initializing ENIC PMD version %s\n", DRV_VERSION);
+
+ enic->bar0.vaddr = (void *)pdev->mem_resource[0].addr;
+ enic->bar0.len = pdev->mem_resource[0].len;
+
+ /* Register vNIC device */
+ enic->vdev = vnic_dev_register(NULL, enic, enic->pdev, &enic->bar0, 1);
+ if (!enic->vdev) {
+ dev_err(enic, "vNIC registration failed, aborting\n");
+ goto err_out;
+ }
+
+ vnic_register_cbacks(enic->vdev,
+ enic_alloc_consistent,
+ enic_free_consistent);
+
+ /* Issue device open to get device in known state */
+ err = enic_dev_open(enic);
+ if (err) {
+ dev_err(enic, "vNIC dev open failed, aborting\n");
+ goto err_out_unregister;
+ }
+
+ /* Set ingress vlan rewrite mode before vnic initialization */
+ err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
+ IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
+ if (err) {
+ dev_err(enic,
+ "Failed to set ingress vlan rewrite mode, aborting.\n");
+ goto err_out_dev_close;
+ }
+
+ /* Issue device init to initialize the vnic-to-switch link.
+ * We'll start with carrier off and wait for link UP
+ * notification later to turn on carrier. We don't need
+ * to wait here for the vnic-to-switch link initialization
+ * to complete; link UP notification is the indication that
+ * the process is complete.
+ */
+
+ err = vnic_dev_init(enic->vdev, 0);
+ if (err) {
+ dev_err(enic, "vNIC dev init failed, aborting\n");
+ goto err_out_dev_close;
+ }
+
+ err = enic_dev_init(enic);
+ if (err) {
+ dev_err(enic, "Device initialization failed, aborting\n");
+ goto err_out_dev_close;
+ }
+
+ return 0;
+
+err_out_dev_close:
+ vnic_dev_close(enic->vdev);
+err_out_unregister:
+ vnic_dev_unregister(enic->vdev);
+err_out:
+ return err;
+}
+
+void enic_remove(struct enic *enic)
+{
+ enic_dev_deinit(enic);
+ vnic_dev_close(enic->vdev);
+ vnic_dev_unregister(enic->vdev);
+}
diff --git a/src/dpdk_lib18/librte_pmd_enic/enic_res.c b/src/dpdk_lib18/librte_pmd_enic/enic_res.c
new file mode 100755
index 00000000..12a337c0
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_enic/enic_res.c
@@ -0,0 +1,219 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ident "$Id: enic_res.c 171146 2014-05-02 07:08:20Z ssujith $"
+
+#include "enic_compat.h"
+#include "rte_ethdev.h"
+#include "wq_enet_desc.h"
+#include "rq_enet_desc.h"
+#include "cq_enet_desc.h"
+#include "vnic_resource.h"
+#include "vnic_enet.h"
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+#include "vnic_cq.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "vnic_nic.h"
+#include "vnic_rss.h"
+#include "enic_res.h"
+#include "enic.h"
+
+int enic_get_vnic_config(struct enic *enic)
+{
+ struct vnic_enet_config *c = &enic->config;
+ int err;
+
+ err = vnic_dev_get_mac_addr(enic->vdev, enic->mac_addr);
+ if (err) {
+ dev_err(enic_get_dev(enic),
+ "Error getting MAC addr, %d\n", err);
+ return err;
+ }
+
+#define GET_CONFIG(m) \
+ do { \
+ err = vnic_dev_spec(enic->vdev, \
+ offsetof(struct vnic_enet_config, m), \
+ sizeof(c->m), &c->m); \
+ if (err) { \
+ dev_err(enic_get_dev(enic), \
+ "Error getting %s, %d\n", #m, err); \
+ return err; \
+ } \
+ } while (0)
+
+ GET_CONFIG(flags);
+ GET_CONFIG(wq_desc_count);
+ GET_CONFIG(rq_desc_count);
+ GET_CONFIG(mtu);
+ GET_CONFIG(intr_timer_type);
+ GET_CONFIG(intr_mode);
+ GET_CONFIG(intr_timer_usec);
+ GET_CONFIG(loop_tag);
+ GET_CONFIG(num_arfs);
+
+ c->wq_desc_count =
+ min_t(u32, ENIC_MAX_WQ_DESCS,
+ max_t(u32, ENIC_MIN_WQ_DESCS,
+ c->wq_desc_count));
+ c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
+
+ c->rq_desc_count =
+ min_t(u32, ENIC_MAX_RQ_DESCS,
+ max_t(u32, ENIC_MIN_RQ_DESCS,
+ c->rq_desc_count));
+ c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
+
+ if (c->mtu == 0)
+ c->mtu = 1500;
+ c->mtu = min_t(u16, ENIC_MAX_MTU,
+ max_t(u16, ENIC_MIN_MTU,
+ c->mtu));
+
+ c->intr_timer_usec = min_t(u32, c->intr_timer_usec,
+ vnic_dev_get_intr_coal_timer_max(enic->vdev));
+
+ dev_info(enic_get_dev(enic),
+ "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x "
+ "wq/rq %d/%d mtu %d\n",
+ enic->mac_addr[0], enic->mac_addr[1], enic->mac_addr[2],
+ enic->mac_addr[3], enic->mac_addr[4], enic->mac_addr[5],
+ c->wq_desc_count, c->rq_desc_count, c->mtu);
+ dev_info(enic_get_dev(enic), "vNIC csum tx/rx %s/%s "
+ "rss %s intr mode %s type %s timer %d usec "
+ "loopback tag 0x%04x\n",
+ ENIC_SETTING(enic, TXCSUM) ? "yes" : "no",
+ ENIC_SETTING(enic, RXCSUM) ? "yes" : "no",
+ ENIC_SETTING(enic, RSS) ? "yes" : "no",
+ c->intr_mode == VENET_INTR_MODE_INTX ? "INTx" :
+ c->intr_mode == VENET_INTR_MODE_MSI ? "MSI" :
+ c->intr_mode == VENET_INTR_MODE_ANY ? "any" :
+ "unknown",
+ c->intr_timer_type == VENET_INTR_TYPE_MIN ? "min" :
+ c->intr_timer_type == VENET_INTR_TYPE_IDLE ? "idle" :
+ "unknown",
+ c->intr_timer_usec,
+ c->loop_tag);
+
+ return 0;
+}
+
+int enic_add_vlan(struct enic *enic, u16 vlanid)
+{
+ u64 a0 = vlanid, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ err = vnic_dev_cmd(enic->vdev, CMD_VLAN_ADD, &a0, &a1, wait);
+ if (err)
+ dev_err(enic_get_dev(enic), "Can't add vlan id, %d\n", err);
+
+ return err;
+}
+
+int enic_del_vlan(struct enic *enic, u16 vlanid)
+{
+ u64 a0 = vlanid, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ err = vnic_dev_cmd(enic->vdev, CMD_VLAN_DEL, &a0, &a1, wait);
+ if (err)
+ dev_err(enic_get_dev(enic), "Can't delete vlan id, %d\n", err);
+
+ return err;
+}
+
+int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
+ u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
+ u8 ig_vlan_strip_en)
+{
+ u64 a0, a1;
+ u32 nic_cfg;
+ int wait = 1000;
+
+ vnic_set_nic_cfg(&nic_cfg, rss_default_cpu,
+ rss_hash_type, rss_hash_bits, rss_base_cpu,
+ rss_enable, tso_ipid_split_en, ig_vlan_strip_en);
+
+ a0 = nic_cfg;
+ a1 = 0;
+
+ return vnic_dev_cmd(enic->vdev, CMD_NIC_CFG, &a0, &a1, wait);
+}
+
+int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, u64 len)
+{
+ u64 a0 = (u64)key_pa, a1 = len;
+ int wait = 1000;
+
+ return vnic_dev_cmd(enic->vdev, CMD_RSS_KEY, &a0, &a1, wait);
+}
+
+int enic_set_rss_cpu(struct enic *enic, dma_addr_t cpu_pa, u64 len)
+{
+ u64 a0 = (u64)cpu_pa, a1 = len;
+ int wait = 1000;
+
+ return vnic_dev_cmd(enic->vdev, CMD_RSS_CPU, &a0, &a1, wait);
+}
+
+void enic_free_vnic_resources(struct enic *enic)
+{
+ unsigned int i;
+
+ for (i = 0; i < enic->wq_count; i++)
+ vnic_wq_free(&enic->wq[i]);
+ for (i = 0; i < enic->rq_count; i++)
+ vnic_rq_free(&enic->rq[i]);
+ for (i = 0; i < enic->cq_count; i++)
+ vnic_cq_free(&enic->cq[i]);
+ vnic_intr_free(&enic->intr);
+}
+
+void enic_get_res_counts(struct enic *enic)
+{
+ enic->wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ);
+ enic->rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ);
+ enic->cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ);
+ enic->intr_count = vnic_dev_get_res_count(enic->vdev,
+ RES_TYPE_INTR_CTRL);
+
+ dev_info(enic_get_dev(enic),
+ "vNIC resources avail: wq %d rq %d cq %d intr %d\n",
+ enic->wq_count, enic->rq_count,
+ enic->cq_count, enic->intr_count);
+}
diff --git a/src/dpdk_lib18/librte_pmd_enic/enic_res.h b/src/dpdk_lib18/librte_pmd_enic/enic_res.h
new file mode 100755
index 00000000..ea60f6a3
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_enic/enic_res.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ident "$Id: enic_res.h 173137 2014-05-16 03:27:22Z sanpilla $"
+
+#ifndef _ENIC_RES_H_
+#define _ENIC_RES_H_
+
+#include "wq_enet_desc.h"
+#include "rq_enet_desc.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+
+#define ENIC_MIN_WQ_DESCS 64
+#define ENIC_MAX_WQ_DESCS 4096
+#define ENIC_MIN_RQ_DESCS 64
+#define ENIC_MAX_RQ_DESCS 4096
+
+#define ENIC_MIN_MTU 68
+#define ENIC_MAX_MTU 9000
+
+#define ENIC_MULTICAST_PERFECT_FILTERS 32
+#define ENIC_UNICAST_PERFECT_FILTERS 32
+
+#define ENIC_NON_TSO_MAX_DESC 16
+
+#define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
+
+static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
+ void *os_buf, dma_addr_t dma_addr, unsigned int len,
+ unsigned int mss_or_csum_offset, unsigned int hdr_len,
+ int vlan_tag_insert, unsigned int vlan_tag,
+ int offload_mode, int cq_entry, int sop, int eop, int loopback)
+{
+ struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
+ u8 desc_skip_cnt = 1;
+ u8 compressed_send = 0;
+ u64 wrid = 0;
+
+ wq_enet_desc_enc(desc,
+ (u64)dma_addr | VNIC_PADDR_TARGET,
+ (u16)len,
+ (u16)mss_or_csum_offset,
+ (u16)hdr_len, (u8)offload_mode,
+ (u8)eop, (u8)cq_entry,
+ 0, /* fcoe_encap */
+ (u8)vlan_tag_insert,
+ (u16)vlan_tag,
+ (u8)loopback);
+
+ vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt,
+ (u8)cq_entry, compressed_send, wrid);
+}
+
+static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq,
+ void *os_buf, dma_addr_t dma_addr, unsigned int len,
+ int eop, int loopback)
+{
+ enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
+ 0, 0, 0, 0, 0,
+ eop, 0 /* !SOP */, eop, loopback);
+}
+
+static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf,
+ dma_addr_t dma_addr, unsigned int len, int vlan_tag_insert,
+ unsigned int vlan_tag, int eop, int loopback)
+{
+ enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
+ 0, 0, vlan_tag_insert, vlan_tag,
+ WQ_ENET_OFFLOAD_MODE_CSUM,
+ eop, 1 /* SOP */, eop, loopback);
+}
+
+static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq,
+ void *os_buf, dma_addr_t dma_addr, unsigned int len,
+ int ip_csum, int tcpudp_csum, int vlan_tag_insert,
+ unsigned int vlan_tag, int eop, int loopback)
+{
+ enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
+ (ip_csum ? 1 : 0) + (tcpudp_csum ? 2 : 0),
+ 0, vlan_tag_insert, vlan_tag,
+ WQ_ENET_OFFLOAD_MODE_CSUM,
+ eop, 1 /* SOP */, eop, loopback);
+}
+
+static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq,
+ void *os_buf, dma_addr_t dma_addr, unsigned int len,
+ unsigned int csum_offset, unsigned int hdr_len,
+ int vlan_tag_insert, unsigned int vlan_tag, int eop, int loopback)
+{
+ enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
+ csum_offset, hdr_len, vlan_tag_insert, vlan_tag,
+ WQ_ENET_OFFLOAD_MODE_CSUM_L4,
+ eop, 1 /* SOP */, eop, loopback);
+}
+
+static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq,
+ void *os_buf, dma_addr_t dma_addr, unsigned int len,
+ unsigned int mss, unsigned int hdr_len, int vlan_tag_insert,
+ unsigned int vlan_tag, int eop, int loopback)
+{
+ enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
+ mss, hdr_len, vlan_tag_insert, vlan_tag,
+ WQ_ENET_OFFLOAD_MODE_TSO,
+ eop, 1 /* SOP */, eop, loopback);
+}
+static inline void enic_queue_rq_desc(struct vnic_rq *rq,
+ void *os_buf, unsigned int os_buf_index,
+ dma_addr_t dma_addr, unsigned int len)
+{
+ struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
+ u64 wrid = 0;
+ u8 type = os_buf_index ?
+ RQ_ENET_TYPE_NOT_SOP : RQ_ENET_TYPE_ONLY_SOP;
+
+ rq_enet_desc_enc(desc,
+ (u64)dma_addr | VNIC_PADDR_TARGET,
+ type, (u16)len);
+
+ vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len, wrid);
+}
+
+struct enic;
+
+int enic_get_vnic_config(struct enic *);
+int enic_add_vlan(struct enic *enic, u16 vlanid);
+int enic_del_vlan(struct enic *enic, u16 vlanid);
+int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
+ u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
+ u8 ig_vlan_strip_en);
+int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, u64 len);
+int enic_set_rss_cpu(struct enic *enic, dma_addr_t cpu_pa, u64 len);
+void enic_get_res_counts(struct enic *enic);
+void enic_init_vnic_resources(struct enic *enic);
+int enic_alloc_vnic_resources(struct enic *);
+void enic_free_vnic_resources(struct enic *);
+
+#endif /* _ENIC_RES_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/cq_desc.h b/src/dpdk_lib18/librte_pmd_enic/vnic/cq_desc.h
new file mode 100755
index 00000000..c4189679
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_enic/vnic/cq_desc.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ident "$Id: cq_desc.h 129574 2013-04-26 22:11:14Z rfaucett $"
+
+#ifndef _CQ_DESC_H_
+#define _CQ_DESC_H_
+
+/*
+ * Completion queue descriptor types
+ */
+enum cq_desc_types {
+ CQ_DESC_TYPE_WQ_ENET = 0,
+ CQ_DESC_TYPE_DESC_COPY = 1,
+ CQ_DESC_TYPE_WQ_EXCH = 2,
+ CQ_DESC_TYPE_RQ_ENET = 3,
+ CQ_DESC_TYPE_RQ_FCP = 4,
+ CQ_DESC_TYPE_IOMMU_MISS = 5,
+ CQ_DESC_TYPE_SGL = 6,
+ CQ_DESC_TYPE_CLASSIFIER = 7,
+ CQ_DESC_TYPE_TEST = 127,
+};
+
+/* Completion queue descriptor: 16B
+ *
+ * All completion queues have this basic layout. The
+ * type_specfic area is unique for each completion
+ * queue type.
+ */
+struct cq_desc {
+ __le16 completed_index;
+ __le16 q_number;
+ u8 type_specfic[11];
+ u8 type_color;
+};
+
+#define CQ_DESC_TYPE_BITS 4
+#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1)
+#define CQ_DESC_COLOR_MASK 1
+#define CQ_DESC_COLOR_SHIFT 7
+#define CQ_DESC_Q_NUM_BITS 10
+#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1)
+#define CQ_DESC_COMP_NDX_BITS 12
+#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
+
+static inline void cq_color_enc(struct cq_desc *desc, const u8 color)
+{
+ if (color)
+ desc->type_color |= (1 << CQ_DESC_COLOR_SHIFT);
+ else
+ desc->type_color &= ~(1 << CQ_DESC_COLOR_SHIFT);
+}
+
+static inline void cq_desc_enc(struct cq_desc *desc,
+ const u8 type, const u8 color, const u16 q_number,
+ const u16 completed_index)
+{
+ desc->type_color = (type & CQ_DESC_TYPE_MASK) |
+ ((color & CQ_DESC_COLOR_MASK) << CQ_DESC_COLOR_SHIFT);
+ desc->q_number = cpu_to_le16(q_number & CQ_DESC_Q_NUM_MASK);
+ desc->completed_index = cpu_to_le16(completed_index &
+ CQ_DESC_COMP_NDX_MASK);
+}
+
+static inline void cq_desc_dec(const struct cq_desc *desc_arg,
+ u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
+{
+ const struct cq_desc *desc = desc_arg;
+ const u8 type_color = desc->type_color;
+
+ *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
+
+ /*
+ * Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+
+ rmb();
+
+ *type = type_color & CQ_DESC_TYPE_MASK;
+ *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
+ *completed_index = le16_to_cpu(desc->completed_index) &
+ CQ_DESC_COMP_NDX_MASK;
+}
+
+static inline void cq_color_dec(const struct cq_desc *desc_arg, u8 *color)
+{
+ volatile const struct cq_desc *desc = desc_arg;
+
+ *color = (desc->type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
+}
+
+#endif /* _CQ_DESC_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/cq_enet_desc.h b/src/dpdk_lib18/librte_pmd_enic/vnic/cq_enet_desc.h
new file mode 100755
index 00000000..669a2b50
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_enic/vnic/cq_enet_desc.h
@@ -0,0 +1,261 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ident "$Id: cq_enet_desc.h 160468 2014-02-18 09:50:15Z gvaradar $"
+
+#ifndef _CQ_ENET_DESC_H_
+#define _CQ_ENET_DESC_H_
+
+#include "cq_desc.h"
+
+/* Ethernet completion queue descriptor: 16B */
+struct cq_enet_wq_desc {
+ __le16 completed_index;
+ __le16 q_number;
+ u8 reserved[11];
+ u8 type_color;
+};
+
+static inline void cq_enet_wq_desc_enc(struct cq_enet_wq_desc *desc,
+ u8 type, u8 color, u16 q_number, u16 completed_index)
+{
+ cq_desc_enc((struct cq_desc *)desc, type,
+ color, q_number, completed_index);
+}
+
+static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
+ u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
+{
+ cq_desc_dec((struct cq_desc *)desc, type,
+ color, q_number, completed_index);
+}
+
+/* Completion queue descriptor: Ethernet receive queue, 16B */
+struct cq_enet_rq_desc {
+ __le16 completed_index_flags;
+ __le16 q_number_rss_type_flags;
+ __le32 rss_hash;
+ __le16 bytes_written_flags;
+ __le16 vlan;
+ __le16 checksum_fcoe;
+ u8 flags;
+ u8 type_color;
+};
+
+#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12)
+#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13)
+#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14)
+#define CQ_ENET_RQ_DESC_FLAGS_SOP (0x1 << 15)
+
+#define CQ_ENET_RQ_DESC_RSS_TYPE_BITS 4
+#define CQ_ENET_RQ_DESC_RSS_TYPE_MASK \
+ ((1 << CQ_ENET_RQ_DESC_RSS_TYPE_BITS) - 1)
+#define CQ_ENET_RQ_DESC_RSS_TYPE_NONE 0
+#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv4 1
+#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4 2
+#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6 3
+#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6 4
+#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX 5
+#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX 6
+
+#define CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC (0x1 << 14)
+
+#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS 14
+#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK \
+ ((1 << CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS) - 1)
+#define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED (0x1 << 14)
+#define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED (0x1 << 15)
+
+#define CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_BITS 12
+#define CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_MASK \
+ ((1 << CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_BITS) - 1)
+#define CQ_ENET_RQ_DESC_VLAN_TCI_CFI_MASK (0x1 << 12)
+#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_BITS 3
+#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_MASK \
+ ((1 << CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_BITS) - 1)
+#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_SHIFT 13
+
+#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS 8
+#define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \
+ ((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1)
+#define CQ_ENET_RQ_DESC_FCOE_EOF_BITS 8
+#define CQ_ENET_RQ_DESC_FCOE_EOF_MASK \
+ ((1 << CQ_ENET_RQ_DESC_FCOE_EOF_BITS) - 1)
+#define CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT 8
+
+#define CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK (0x1 << 0)
+#define CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK (0x1 << 0)
+#define CQ_ENET_RQ_DESC_FLAGS_UDP (0x1 << 1)
+#define CQ_ENET_RQ_DESC_FCOE_ENC_ERROR (0x1 << 1)
+#define CQ_ENET_RQ_DESC_FLAGS_TCP (0x1 << 2)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK (0x1 << 3)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV6 (0x1 << 4)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV4 (0x1 << 5)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6)
+#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7)
+
+static inline void cq_enet_rq_desc_enc(struct cq_enet_rq_desc *desc,
+ u8 type, u8 color, u16 q_number, u16 completed_index,
+ u8 ingress_port, u8 fcoe, u8 eop, u8 sop, u8 rss_type, u8 csum_not_calc,
+ u32 rss_hash, u16 bytes_written, u8 packet_error, u8 vlan_stripped,
+ u16 vlan, u16 checksum, u8 fcoe_sof, u8 fcoe_fc_crc_ok,
+ u8 fcoe_enc_error, u8 fcoe_eof, u8 tcp_udp_csum_ok, u8 udp, u8 tcp,
+ u8 ipv4_csum_ok, u8 ipv6, u8 ipv4, u8 ipv4_fragment, u8 fcs_ok)
+{
+ cq_desc_enc((struct cq_desc *)desc, type,
+ color, q_number, completed_index);
+
+ desc->completed_index_flags |= cpu_to_le16(
+ (ingress_port ? CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT : 0) |
+ (fcoe ? CQ_ENET_RQ_DESC_FLAGS_FCOE : 0) |
+ (eop ? CQ_ENET_RQ_DESC_FLAGS_EOP : 0) |
+ (sop ? CQ_ENET_RQ_DESC_FLAGS_SOP : 0));
+
+ desc->q_number_rss_type_flags |= cpu_to_le16(
+ ((rss_type & CQ_ENET_RQ_DESC_RSS_TYPE_MASK) <<
+ CQ_DESC_Q_NUM_BITS) |
+ (csum_not_calc ? CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC : 0));
+
+ desc->rss_hash = cpu_to_le32(rss_hash);
+
+ desc->bytes_written_flags = cpu_to_le16(
+ (bytes_written & CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK) |
+ (packet_error ? CQ_ENET_RQ_DESC_FLAGS_TRUNCATED : 0) |
+ (vlan_stripped ? CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED : 0));
+
+ desc->vlan = cpu_to_le16(vlan);
+
+ if (fcoe) {
+ desc->checksum_fcoe = cpu_to_le16(
+ (fcoe_sof & CQ_ENET_RQ_DESC_FCOE_SOF_MASK) |
+ ((fcoe_eof & CQ_ENET_RQ_DESC_FCOE_EOF_MASK) <<
+ CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT));
+ } else {
+ desc->checksum_fcoe = cpu_to_le16(checksum);
+ }
+
+ desc->flags =
+ (tcp_udp_csum_ok ? CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK : 0) |
+ (udp ? CQ_ENET_RQ_DESC_FLAGS_UDP : 0) |
+ (tcp ? CQ_ENET_RQ_DESC_FLAGS_TCP : 0) |
+ (ipv4_csum_ok ? CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK : 0) |
+ (ipv6 ? CQ_ENET_RQ_DESC_FLAGS_IPV6 : 0) |
+ (ipv4 ? CQ_ENET_RQ_DESC_FLAGS_IPV4 : 0) |
+ (ipv4_fragment ? CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT : 0) |
+ (fcs_ok ? CQ_ENET_RQ_DESC_FLAGS_FCS_OK : 0) |
+ (fcoe_fc_crc_ok ? CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK : 0) |
+ (fcoe_enc_error ? CQ_ENET_RQ_DESC_FCOE_ENC_ERROR : 0);
+}
+
+static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
+ u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
+ u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
+ u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
+ u8 *vlan_stripped, u16 *vlan_tci, u16 *checksum, u8 *fcoe_sof,
+ u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
+ u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
+ u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
+{
+ u16 completed_index_flags;
+ u16 q_number_rss_type_flags;
+ u16 bytes_written_flags;
+
+ cq_desc_dec((struct cq_desc *)desc, type,
+ color, q_number, completed_index);
+
+ completed_index_flags = le16_to_cpu(desc->completed_index_flags);
+ q_number_rss_type_flags =
+ le16_to_cpu(desc->q_number_rss_type_flags);
+ bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
+
+ *ingress_port = (completed_index_flags &
+ CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
+ *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
+ 1 : 0;
+ *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
+ 1 : 0;
+ *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
+ 1 : 0;
+
+ *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
+ CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
+ *csum_not_calc = (q_number_rss_type_flags &
+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
+
+ *rss_hash = le32_to_cpu(desc->rss_hash);
+
+ *bytes_written = bytes_written_flags &
+ CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+ *packet_error = (bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
+ *vlan_stripped = (bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
+
+ /*
+ * Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12)
+ */
+ *vlan_tci = le16_to_cpu(desc->vlan);
+
+ if (*fcoe) {
+ *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
+ CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
+ *fcoe_fc_crc_ok = (desc->flags &
+ CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
+ *fcoe_enc_error = (desc->flags &
+ CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
+ *fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >>
+ CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
+ CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
+ *checksum = 0;
+ } else {
+ *fcoe_sof = 0;
+ *fcoe_fc_crc_ok = 0;
+ *fcoe_enc_error = 0;
+ *fcoe_eof = 0;
+ *checksum = le16_to_cpu(desc->checksum_fcoe);
+ }
+
+ *tcp_udp_csum_ok =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
+ *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
+ *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
+ *ipv4_csum_ok =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
+ *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
+ *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
+ *ipv4_fragment =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
+ *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
+}
+
+#endif /* _CQ_ENET_DESC_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/rq_enet_desc.h b/src/dpdk_lib18/librte_pmd_enic/vnic/rq_enet_desc.h
new file mode 100755
index 00000000..f38ff2a1
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_enic/vnic/rq_enet_desc.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ident "$Id: rq_enet_desc.h 59839 2010-09-27 20:36:31Z roprabhu $"
+
+#ifndef _RQ_ENET_DESC_H_
+#define _RQ_ENET_DESC_H_
+
+/* Ethernet receive queue descriptor: 16B */
+struct rq_enet_desc {
+ __le64 address;
+ __le16 length_type;
+ u8 reserved[6];
+};
+
+enum rq_enet_type_types {
+ RQ_ENET_TYPE_ONLY_SOP = 0,
+ RQ_ENET_TYPE_NOT_SOP = 1,
+ RQ_ENET_TYPE_RESV2 = 2,
+ RQ_ENET_TYPE_RESV3 = 3,
+};
+
+#define RQ_ENET_ADDR_BITS 64
+#define RQ_ENET_LEN_BITS 14
+#define RQ_ENET_LEN_MASK ((1 << RQ_ENET_LEN_BITS) - 1)
+#define RQ_ENET_TYPE_BITS 2
+#define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1)
+
+static inline void rq_enet_desc_enc(struct rq_enet_desc *desc,
+ u64 address, u8 type, u16 length)
+{
+ desc->address = cpu_to_le64(address);
+ desc->length_type = cpu_to_le16((length & RQ_ENET_LEN_MASK) |
+ ((type & RQ_ENET_TYPE_MASK) << RQ_ENET_LEN_BITS));
+}
+
+static inline void rq_enet_desc_dec(struct rq_enet_desc *desc,
+ u64 *address, u8 *type, u16 *length)
+{
+ *address = le64_to_cpu(desc->address);
+ *length = le16_to_cpu(desc->length_type) & RQ_ENET_LEN_MASK;
+ *type = (u8)((le16_to_cpu(desc->length_type) >> RQ_ENET_LEN_BITS) &
+ RQ_ENET_TYPE_MASK);
+}
+
+#endif /* _RQ_ENET_DESC_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_cq.c b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_cq.c
new file mode 100755
index 00000000..cda97e4e
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_cq.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ident "$Id: vnic_cq.c 171146 2014-05-02 07:08:20Z ssujith $"
+
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+int vnic_cq_mem_size(struct vnic_cq *cq, unsigned int desc_count,
+ unsigned int desc_size)
+{
+ int mem_size;
+
+ mem_size = vnic_dev_desc_ring_size(&cq->ring, desc_count, desc_size);
+
+ return mem_size;
+}
+
+void vnic_cq_free(struct vnic_cq *cq)
+{
+ vnic_dev_free_desc_ring(cq->vdev, &cq->ring);
+
+ cq->ctrl = NULL;
+}
+
+int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
+ unsigned int socket_id,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ int err;
+ char res_name[NAME_MAX];
+ static int instance;
+
+ cq->index = index;
+ cq->vdev = vdev;
+
+ cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
+ if (!cq->ctrl) {
+ pr_err("Failed to hook CQ[%d] resource\n", index);
+ return -EINVAL;
+ }
+
+ snprintf(res_name, sizeof(res_name), "%d-cq-%d", instance++, index);
+ err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size,
+ socket_id, res_name);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
+ unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
+ unsigned int cq_tail_color, unsigned int interrupt_enable,
+ unsigned int cq_entry_enable, unsigned int cq_message_enable,
+ unsigned int interrupt_offset, u64 cq_message_addr)
+{
+ u64 paddr;
+
+ paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET;
+ writeq(paddr, &cq->ctrl->ring_base);
+ iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size);
+ iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable);
+ iowrite32(color_enable, &cq->ctrl->color_enable);
+ iowrite32(cq_head, &cq->ctrl->cq_head);
+ iowrite32(cq_tail, &cq->ctrl->cq_tail);
+ iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color);
+ iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable);
+ iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable);
+ iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable);
+ iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset);
+ writeq(cq_message_addr, &cq->ctrl->cq_message_addr);
+
+ cq->interrupt_offset = interrupt_offset;
+}
+
+void vnic_cq_clean(struct vnic_cq *cq)
+{
+ cq->to_clean = 0;
+ cq->last_color = 0;
+
+ iowrite32(0, &cq->ctrl->cq_head);
+ iowrite32(0, &cq->ctrl->cq_tail);
+ iowrite32(1, &cq->ctrl->cq_tail_color);
+
+ vnic_dev_clear_desc_ring(&cq->ring);
+}
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_cq.h b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_cq.h
new file mode 100755
index 00000000..0928d720
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_cq.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ident "$Id: vnic_cq.h 173398 2014-05-19 09:17:02Z gvaradar $"
+
+#ifndef _VNIC_CQ_H_
+#define _VNIC_CQ_H_
+
+#include <rte_mbuf.h>
+
+#include "cq_desc.h"
+#include "vnic_dev.h"
+
+/* Completion queue control */
+struct vnic_cq_ctrl {
+ u64 ring_base; /* 0x00 */
+ u32 ring_size; /* 0x08 */
+ u32 pad0;
+ u32 flow_control_enable; /* 0x10 */
+ u32 pad1;
+ u32 color_enable; /* 0x18 */
+ u32 pad2;
+ u32 cq_head; /* 0x20 */
+ u32 pad3;
+ u32 cq_tail; /* 0x28 */
+ u32 pad4;
+ u32 cq_tail_color; /* 0x30 */
+ u32 pad5;
+ u32 interrupt_enable; /* 0x38 */
+ u32 pad6;
+ u32 cq_entry_enable; /* 0x40 */
+ u32 pad7;
+ u32 cq_message_enable; /* 0x48 */
+ u32 pad8;
+ u32 interrupt_offset; /* 0x50 */
+ u32 pad9;
+ u64 cq_message_addr; /* 0x58 */
+ u32 pad10;
+};
+
+#ifdef ENIC_AIC
+struct vnic_rx_bytes_counter {
+ unsigned int small_pkt_bytes_cnt;
+ unsigned int large_pkt_bytes_cnt;
+};
+#endif
+
+struct vnic_cq {
+ unsigned int index;
+ struct vnic_dev *vdev;
+ struct vnic_cq_ctrl __iomem *ctrl; /* memory-mapped */
+ struct vnic_dev_ring ring;
+ unsigned int to_clean;
+ unsigned int last_color;
+ unsigned int interrupt_offset;
+#ifdef ENIC_AIC
+ struct vnic_rx_bytes_counter pkt_size_counter;
+ unsigned int cur_rx_coal_timeval;
+ unsigned int tobe_rx_coal_timeval;
+ ktime_t prev_ts;
+#endif
+};
+
+static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
+ unsigned int work_to_do,
+ int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
+ u8 type, u16 q_number, u16 completed_index, void *opaque),
+ void *opaque)
+{
+ struct cq_desc *cq_desc;
+ unsigned int work_done = 0;
+ u16 q_number, completed_index;
+ u8 type, color;
+ struct rte_mbuf **rx_pkts = opaque;
+ unsigned int ret;
+
+ cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
+ cq->ring.desc_size * cq->to_clean);
+ cq_desc_dec(cq_desc, &type, &color,
+ &q_number, &completed_index);
+
+ while (color != cq->last_color) {
+ if (opaque)
+ opaque = (void *)&(rx_pkts[work_done]);
+
+ ret = (*q_service)(cq->vdev, cq_desc, type,
+ q_number, completed_index, opaque);
+ cq->to_clean++;
+ if (cq->to_clean == cq->ring.desc_count) {
+ cq->to_clean = 0;
+ cq->last_color = cq->last_color ? 0 : 1;
+ }
+
+ cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
+ cq->ring.desc_size * cq->to_clean);
+ cq_desc_dec(cq_desc, &type, &color,
+ &q_number, &completed_index);
+
+ if (ret)
+ work_done++;
+ if (work_done >= work_to_do)
+ break;
+ }
+
+ return work_done;
+}
+
+void vnic_cq_free(struct vnic_cq *cq);
+int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
+ unsigned int socket_id,
+ unsigned int desc_count, unsigned int desc_size);
+void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
+ unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
+ unsigned int cq_tail_color, unsigned int interrupt_enable,
+ unsigned int cq_entry_enable, unsigned int message_enable,
+ unsigned int interrupt_offset, u64 message_addr);
+void vnic_cq_clean(struct vnic_cq *cq);
+int vnic_cq_mem_size(struct vnic_cq *cq, unsigned int desc_count,
+ unsigned int desc_size);
+
+#endif /* _VNIC_CQ_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_dev.c b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_dev.c
new file mode 100755
index 00000000..6407994d
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_dev.c
@@ -0,0 +1,1054 @@
+/*
+ * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ident "$Id$"
+
+#include <rte_memzone.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+
+#include "vnic_dev.h"
+#include "vnic_resource.h"
+#include "vnic_devcmd.h"
+#include "vnic_stats.h"
+
+
+enum vnic_proxy_type {
+ PROXY_NONE,
+ PROXY_BY_BDF,
+ PROXY_BY_INDEX,
+};
+
+struct vnic_res {
+ void __iomem *vaddr;
+ dma_addr_t bus_addr;
+ unsigned int count;
+};
+
+struct vnic_intr_coal_timer_info {
+ u32 mul;
+ u32 div;
+ u32 max_usec;
+};
+
+struct vnic_dev {
+ void *priv;
+ struct rte_pci_device *pdev;
+ struct vnic_res res[RES_TYPE_MAX];
+ enum vnic_dev_intr_mode intr_mode;
+ struct vnic_devcmd __iomem *devcmd;
+ struct vnic_devcmd_notify *notify;
+ struct vnic_devcmd_notify notify_copy;
+ dma_addr_t notify_pa;
+ u32 notify_sz;
+ dma_addr_t linkstatus_pa;
+ struct vnic_stats *stats;
+ dma_addr_t stats_pa;
+ struct vnic_devcmd_fw_info *fw_info;
+ dma_addr_t fw_info_pa;
+ enum vnic_proxy_type proxy;
+ u32 proxy_index;
+ u64 args[VNIC_DEVCMD_NARGS];
+ u16 split_hdr_size;
+ int in_reset;
+ struct vnic_intr_coal_timer_info intr_coal_timer_info;
+ void *(*alloc_consistent)(void *priv, size_t size,
+ dma_addr_t *dma_handle, u8 *name);
+ void (*free_consistent)(struct rte_pci_device *hwdev,
+ size_t size, void *vaddr,
+ dma_addr_t dma_handle);
+};
+
+#define VNIC_MAX_RES_HDR_SIZE \
+ (sizeof(struct vnic_resource_header) + \
+ sizeof(struct vnic_resource) * RES_TYPE_MAX)
+#define VNIC_RES_STRIDE 128
+
+void *vnic_dev_priv(struct vnic_dev *vdev)
+{
+ return vdev->priv;
+}
+
+void vnic_register_cbacks(struct vnic_dev *vdev,
+ void *(*alloc_consistent)(void *priv, size_t size,
+ dma_addr_t *dma_handle, u8 *name),
+ void (*free_consistent)(struct rte_pci_device *hwdev,
+ size_t size, void *vaddr,
+ dma_addr_t dma_handle))
+{
+ vdev->alloc_consistent = alloc_consistent;
+ vdev->free_consistent = free_consistent;
+}
+
+static int vnic_dev_discover_res(struct vnic_dev *vdev,
+ struct vnic_dev_bar *bar, unsigned int num_bars)
+{
+ struct vnic_resource_header __iomem *rh;
+ struct mgmt_barmap_hdr __iomem *mrh;
+ struct vnic_resource __iomem *r;
+ u8 type;
+
+ if (num_bars == 0)
+ return -EINVAL;
+
+ if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
+ pr_err("vNIC BAR0 res hdr length error\n");
+ return -EINVAL;
+ }
+
+ rh = bar->vaddr;
+ mrh = bar->vaddr;
+ if (!rh) {
+ pr_err("vNIC BAR0 res hdr not mem-mapped\n");
+ return -EINVAL;
+ }
+
+ /* Check for mgmt vnic in addition to normal vnic */
+ if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) ||
+ (ioread32(&rh->version) != VNIC_RES_VERSION)) {
+ if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) ||
+ (ioread32(&mrh->version) != MGMTVNIC_VERSION)) {
+ pr_err("vNIC BAR0 res magic/version error " \
+ "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
+ VNIC_RES_MAGIC, VNIC_RES_VERSION,
+ MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
+ ioread32(&rh->magic), ioread32(&rh->version));
+ return -EINVAL;
+ }
+ }
+
+ if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC)
+ r = (struct vnic_resource __iomem *)(mrh + 1);
+ else
+ r = (struct vnic_resource __iomem *)(rh + 1);
+
+
+ while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
+ u8 bar_num = ioread8(&r->bar);
+ u32 bar_offset = ioread32(&r->bar_offset);
+ u32 count = ioread32(&r->count);
+ u32 len;
+
+ r++;
+
+ if (bar_num >= num_bars)
+ continue;
+
+ if (!bar[bar_num].len || !bar[bar_num].vaddr)
+ continue;
+
+ switch (type) {
+ case RES_TYPE_WQ:
+ case RES_TYPE_RQ:
+ case RES_TYPE_CQ:
+ case RES_TYPE_INTR_CTRL:
+ /* each count is stride bytes long */
+ len = count * VNIC_RES_STRIDE;
+ if (len + bar_offset > bar[bar_num].len) {
+ pr_err("vNIC BAR0 resource %d " \
+ "out-of-bounds, offset 0x%x + " \
+ "size 0x%x > bar len 0x%lx\n",
+ type, bar_offset,
+ len,
+ bar[bar_num].len);
+ return -EINVAL;
+ }
+ break;
+ case RES_TYPE_INTR_PBA_LEGACY:
+ case RES_TYPE_DEVCMD:
+ len = count;
+ break;
+ default:
+ continue;
+ }
+
+ vdev->res[type].count = count;
+ vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr +
+ bar_offset;
+ vdev->res[type].bus_addr = bar[bar_num].bus_addr + bar_offset;
+ }
+
+ return 0;
+}
+
+unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
+ enum vnic_res_type type)
+{
+ return vdev->res[type].count;
+}
+
+void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
+ unsigned int index)
+{
+ if (!vdev->res[type].vaddr)
+ return NULL;
+
+ switch (type) {
+ case RES_TYPE_WQ:
+ case RES_TYPE_RQ:
+ case RES_TYPE_CQ:
+ case RES_TYPE_INTR_CTRL:
+ return (char __iomem *)vdev->res[type].vaddr +
+ index * VNIC_RES_STRIDE;
+ default:
+ return (char __iomem *)vdev->res[type].vaddr;
+ }
+}
+
+unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ /* The base address of the desc rings must be 512 byte aligned.
+ * Descriptor count is aligned to groups of 32 descriptors. A
+ * count of 0 means the maximum 4096 descriptors. Descriptor
+ * size is aligned to 16 bytes.
+ */
+
+ unsigned int count_align = 32;
+ unsigned int desc_align = 16;
+
+ ring->base_align = 512;
+
+ if (desc_count == 0)
+ desc_count = 4096;
+
+ ring->desc_count = ALIGN(desc_count, count_align);
+
+ ring->desc_size = ALIGN(desc_size, desc_align);
+
+ ring->size = ring->desc_count * ring->desc_size;
+ ring->size_unaligned = ring->size + ring->base_align;
+
+ return ring->size_unaligned;
+}
+
+void vnic_set_hdr_split_size(struct vnic_dev *vdev, u16 size)
+{
+ vdev->split_hdr_size = size;
+}
+
+u16 vnic_get_hdr_split_size(struct vnic_dev *vdev)
+{
+ return vdev->split_hdr_size;
+}
+
+void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
+{
+ memset(ring->descs, 0, ring->size);
+}
+
+int vnic_dev_alloc_desc_ring(__attribute__((unused)) struct vnic_dev *vdev,
+ struct vnic_dev_ring *ring,
+ unsigned int desc_count, unsigned int desc_size, unsigned int socket_id,
+ char *z_name)
+{
+ const struct rte_memzone *rz;
+
+ vnic_dev_desc_ring_size(ring, desc_count, desc_size);
+
+ rz = rte_memzone_reserve_aligned(z_name,
+ ring->size_unaligned, socket_id,
+ 0, ENIC_ALIGN);
+ if (!rz) {
+ pr_err("Failed to allocate ring (size=%d), aborting\n",
+ (int)ring->size);
+ return -ENOMEM;
+ }
+
+ ring->descs_unaligned = rz->addr;
+ if (!ring->descs_unaligned) {
+ pr_err("Failed to map allocated ring (size=%d), aborting\n",
+ (int)ring->size);
+ return -ENOMEM;
+ }
+
+ ring->base_addr_unaligned = (dma_addr_t)rz->phys_addr;
+
+ ring->base_addr = ALIGN(ring->base_addr_unaligned,
+ ring->base_align);
+ ring->descs = (u8 *)ring->descs_unaligned +
+ (ring->base_addr - ring->base_addr_unaligned);
+
+ vnic_dev_clear_desc_ring(ring);
+
+ ring->desc_avail = ring->desc_count - 1;
+
+ return 0;
+}
+
+void vnic_dev_free_desc_ring(__attribute__((unused)) struct vnic_dev *vdev,
+ struct vnic_dev_ring *ring)
+{
+ if (ring->descs)
+ ring->descs = NULL;
+}
+
+static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ int wait)
+{
+ struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
+ unsigned int i;
+ int delay;
+ u32 status;
+ int err;
+
+ status = ioread32(&devcmd->status);
+ if (status == 0xFFFFFFFF) {
+ /* PCI-e target device is gone */
+ return -ENODEV;
+ }
+ if (status & STAT_BUSY) {
+
+ pr_err("Busy devcmd %d\n", _CMD_N(cmd));
+ return -EBUSY;
+ }
+
+ if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
+ for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
+ writeq(vdev->args[i], &devcmd->args[i]);
+ wmb(); /* complete all writes initiated till now */
+ }
+
+ iowrite32(cmd, &devcmd->cmd);
+
+ if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
+ return 0;
+
+ for (delay = 0; delay < wait; delay++) {
+
+ udelay(100);
+
+ status = ioread32(&devcmd->status);
+ if (status == 0xFFFFFFFF) {
+ /* PCI-e target device is gone */
+ return -ENODEV;
+ }
+
+ if (!(status & STAT_BUSY)) {
+ if (status & STAT_ERROR) {
+ err = -(int)readq(&devcmd->args[0]);
+ if (cmd != CMD_CAPABILITY)
+ pr_err("Devcmd %d failed " \
+ "with error code %d\n",
+ _CMD_N(cmd), err);
+ return err;
+ }
+
+ if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
+ rmb();/* finish all reads initiated till now */
+ for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
+ vdev->args[i] = readq(&devcmd->args[i]);
+ }
+
+ return 0;
+ }
+ }
+
+ pr_err("Timedout devcmd %d\n", _CMD_N(cmd));
+ return -ETIMEDOUT;
+}
+
+static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
+ enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd,
+ u64 *a0, u64 *a1, int wait)
+{
+ u32 status;
+ int err;
+
+ memset(vdev->args, 0, sizeof(vdev->args));
+
+ vdev->args[0] = vdev->proxy_index;
+ vdev->args[1] = cmd;
+ vdev->args[2] = *a0;
+ vdev->args[3] = *a1;
+
+ err = _vnic_dev_cmd(vdev, proxy_cmd, wait);
+ if (err)
+ return err;
+
+ status = (u32)vdev->args[0];
+ if (status & STAT_ERROR) {
+ err = (int)vdev->args[1];
+ if (err != ERR_ECMDUNKNOWN ||
+ cmd != CMD_CAPABILITY)
+ pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd));
+ return err;
+ }
+
+ *a0 = vdev->args[1];
+ *a1 = vdev->args[2];
+
+ return 0;
+}
+
+static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
+ enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
+{
+ int err;
+
+ vdev->args[0] = *a0;
+ vdev->args[1] = *a1;
+
+ err = _vnic_dev_cmd(vdev, cmd, wait);
+
+ *a0 = vdev->args[0];
+ *a1 = vdev->args[1];
+
+ return err;
+}
+
+void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, u16 index)
+{
+ vdev->proxy = PROXY_BY_INDEX;
+ vdev->proxy_index = index;
+}
+
+void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, u16 bdf)
+{
+ vdev->proxy = PROXY_BY_BDF;
+ vdev->proxy_index = bdf;
+}
+
+void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev)
+{
+ vdev->proxy = PROXY_NONE;
+ vdev->proxy_index = 0;
+}
+
+int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ u64 *a0, u64 *a1, int wait)
+{
+ memset(vdev->args, 0, sizeof(vdev->args));
+
+ switch (vdev->proxy) {
+ case PROXY_BY_INDEX:
+ return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
+ a0, a1, wait);
+ case PROXY_BY_BDF:
+ return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
+ a0, a1, wait);
+ case PROXY_NONE:
+ default:
+ return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait);
+ }
+}
+
+static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
+{
+ u64 a0 = (u32)cmd, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
+
+ return !(err || a0);
+}
+
+int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
+ void *value)
+{
+ u64 a0, a1;
+ int wait = 1000;
+ int err;
+
+ a0 = offset;
+ a1 = size;
+
+ err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
+
+ switch (size) {
+ case 1:
+ *(u8 *)value = (u8)a0;
+ break;
+ case 2:
+ *(u16 *)value = (u16)a0;
+ break;
+ case 4:
+ *(u32 *)value = (u32)a0;
+ break;
+ case 8:
+ *(u64 *)value = a0;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ return err;
+}
+
+int vnic_dev_stats_clear(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
+}
+
+int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
+{
+ u64 a0, a1;
+ int wait = 1000;
+ static u32 instance;
+ char name[NAME_MAX];
+
+ if (!vdev->stats) {
+ snprintf((char *)name, sizeof(name),
+ "vnic_stats-%d", instance++);
+ vdev->stats = vdev->alloc_consistent(vdev->priv,
+ sizeof(struct vnic_stats), &vdev->stats_pa, (u8 *)name);
+ if (!vdev->stats)
+ return -ENOMEM;
+ }
+
+ *stats = vdev->stats;
+ a0 = vdev->stats_pa;
+ a1 = sizeof(struct vnic_stats);
+
+ return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
+}
+
+int vnic_dev_close(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
+}
+
+/** Deprecated. @see vnic_dev_enable_wait */
+int vnic_dev_enable(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
+}
+
+int vnic_dev_enable_wait(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+
+ if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT))
+ return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
+ else
+ return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
+}
+
+int vnic_dev_disable(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
+}
+
+int vnic_dev_open(struct vnic_dev *vdev, int arg)
+{
+ u64 a0 = (u32)arg, a1 = 0;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
+}
+
+int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ *done = 0;
+
+ err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
+ if (err)
+ return err;
+
+ *done = (a0 == 0);
+
+ return 0;
+}
+
+int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
+{
+ u64 a0 = (u32)arg, a1 = 0;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
+}
+
+int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ *done = 0;
+
+ err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
+ if (err)
+ return err;
+
+ *done = (a0 == 0);
+
+ return 0;
+}
+
+int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
+{
+ u64 a0, a1 = 0;
+ int wait = 1000;
+ int err, i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = 0;
+
+ err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
+ if (err)
+ return err;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = ((u8 *)&a0)[i];
+
+ return 0;
+}
+
+int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
+ int broadcast, int promisc, int allmulti)
+{
+ u64 a0, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
+ (multicast ? CMD_PFILTER_MULTICAST : 0) |
+ (broadcast ? CMD_PFILTER_BROADCAST : 0) |
+ (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
+ (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
+
+ err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
+ if (err)
+ pr_err("Can't set packet filter\n");
+
+ return err;
+}
+
+int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int err;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ ((u8 *)&a0)[i] = addr[i];
+
+ err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
+ if (err)
+ pr_err("Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
+ err);
+
+ return err;
+}
+
+int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int err;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ ((u8 *)&a0)[i] = addr[i];
+
+ err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
+ if (err)
+ pr_err("Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
+ err);
+
+ return err;
+}
+
+int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
+ u8 ig_vlan_rewrite_mode)
+{
+ u64 a0 = ig_vlan_rewrite_mode, a1 = 0;
+ int wait = 1000;
+
+ if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE))
+ return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE,
+ &a0, &a1, wait);
+ else
+ return 0;
+}
+
+int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr)
+{
+ u64 a0 = intr, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ err = vnic_dev_cmd(vdev, CMD_IAR, &a0, &a1, wait);
+ if (err)
+ pr_err("Failed to raise INTR[%d], err %d\n", intr, err);
+
+ return err;
+}
+
+void vnic_dev_set_reset_flag(struct vnic_dev *vdev, int state)
+{
+ vdev->in_reset = state;
+}
+
+static inline int vnic_dev_in_reset(struct vnic_dev *vdev)
+{
+ return vdev->in_reset;
+}
+
+int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
+ void *notify_addr, dma_addr_t notify_pa, u16 intr)
+{
+ u64 a0, a1;
+ int wait = 1000;
+ int r;
+
+ memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify));
+ if (!vnic_dev_in_reset(vdev)) {
+ vdev->notify = notify_addr;
+ vdev->notify_pa = notify_pa;
+ }
+
+ a0 = (u64)notify_pa;
+ a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
+ a1 += sizeof(struct vnic_devcmd_notify);
+
+ r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
+ if (!vnic_dev_in_reset(vdev))
+ vdev->notify_sz = (r == 0) ? (u32)a1 : 0;
+
+ return r;
+}
+
+int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
+{
+ void *notify_addr = NULL;
+ dma_addr_t notify_pa = 0;
+ char name[NAME_MAX];
+ static u32 instance;
+
+ if (vdev->notify || vdev->notify_pa) {
+ pr_warn("notify block %p still allocated.\n" \
+ "Ignore if restarting port\n", vdev->notify);
+ return -EINVAL;
+ }
+
+ if (!vnic_dev_in_reset(vdev)) {
+ snprintf((char *)name, sizeof(name),
+ "vnic_notify-%d", instance++);
+ notify_addr = vdev->alloc_consistent(vdev->priv,
+ sizeof(struct vnic_devcmd_notify),
+ &notify_pa, (u8 *)name);
+ if (!notify_addr)
+ return -ENOMEM;
+ }
+
+ return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
+}
+
+int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
+{
+ u64 a0, a1;
+ int wait = 1000;
+ int err;
+
+ a0 = 0; /* paddr = 0 to unset notify buffer */
+ a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
+ a1 += sizeof(struct vnic_devcmd_notify);
+
+ err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
+ if (!vnic_dev_in_reset(vdev)) {
+ vdev->notify = NULL;
+ vdev->notify_pa = 0;
+ vdev->notify_sz = 0;
+ }
+
+ return err;
+}
+
+int vnic_dev_notify_unset(struct vnic_dev *vdev)
+{
+ if (vdev->notify && !vnic_dev_in_reset(vdev)) {
+ vdev->free_consistent(vdev->pdev,
+ sizeof(struct vnic_devcmd_notify),
+ vdev->notify,
+ vdev->notify_pa);
+ }
+
+ return vnic_dev_notify_unsetcmd(vdev);
+}
+
+static int vnic_dev_notify_ready(struct vnic_dev *vdev)
+{
+ u32 *words;
+ unsigned int nwords = vdev->notify_sz / 4;
+ unsigned int i;
+ u32 csum;
+
+ if (!vdev->notify || !vdev->notify_sz)
+ return 0;
+
+ do {
+ csum = 0;
+ rte_memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz);
+ words = (u32 *)&vdev->notify_copy;
+ for (i = 1; i < nwords; i++)
+ csum += words[i];
+ } while (csum != words[0]);
+
+ return 1;
+}
+
+int vnic_dev_init(struct vnic_dev *vdev, int arg)
+{
+ u64 a0 = (u32)arg, a1 = 0;
+ int wait = 1000;
+ int r = 0;
+
+ if (vnic_dev_capable(vdev, CMD_INIT))
+ r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
+ else {
+ vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
+ if (a0 & CMD_INITF_DEFAULT_MAC) {
+ /* Emulate these for old CMD_INIT_v1 which
+ * didn't pass a0 so no CMD_INITF_*.
+ */
+ vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
+ vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
+ }
+ }
+ return r;
+}
+
+int vnic_dev_deinit(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_DEINIT, &a0, &a1, wait);
+}
+
+void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev)
+{
+ /* Default: hardware intr coal timer is in units of 1.5 usecs */
+ vdev->intr_coal_timer_info.mul = 2;
+ vdev->intr_coal_timer_info.div = 3;
+ vdev->intr_coal_timer_info.max_usec =
+ vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff);
+}
+
+int vnic_dev_link_status(struct vnic_dev *vdev)
+{
+ if (!vnic_dev_notify_ready(vdev))
+ return 0;
+
+ return vdev->notify_copy.link_state;
+}
+
+u32 vnic_dev_port_speed(struct vnic_dev *vdev)
+{
+ if (!vnic_dev_notify_ready(vdev))
+ return 0;
+
+ return vdev->notify_copy.port_speed;
+}
+
+void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
+ enum vnic_dev_intr_mode intr_mode)
+{
+ vdev->intr_mode = intr_mode;
+}
+
+enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
+ struct vnic_dev *vdev)
+{
+ return vdev->intr_mode;
+}
+
+u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec)
+{
+ return (usec * vdev->intr_coal_timer_info.mul) /
+ vdev->intr_coal_timer_info.div;
+}
+
+u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles)
+{
+ return (hw_cycles * vdev->intr_coal_timer_info.div) /
+ vdev->intr_coal_timer_info.mul;
+}
+
+u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev)
+{
+ return vdev->intr_coal_timer_info.max_usec;
+}
+
+void vnic_dev_unregister(struct vnic_dev *vdev)
+{
+ if (vdev) {
+ if (vdev->notify)
+ vdev->free_consistent(vdev->pdev,
+ sizeof(struct vnic_devcmd_notify),
+ vdev->notify,
+ vdev->notify_pa);
+ if (vdev->stats)
+ vdev->free_consistent(vdev->pdev,
+ sizeof(struct vnic_stats),
+ vdev->stats, vdev->stats_pa);
+ if (vdev->fw_info)
+ vdev->free_consistent(vdev->pdev,
+ sizeof(struct vnic_devcmd_fw_info),
+ vdev->fw_info, vdev->fw_info_pa);
+ kfree(vdev);
+ }
+}
+
+struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
+ void *priv, struct rte_pci_device *pdev, struct vnic_dev_bar *bar,
+ unsigned int num_bars)
+{
+ if (!vdev) {
+ vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
+ if (!vdev)
+ return NULL;
+ }
+
+ vdev->priv = priv;
+ vdev->pdev = pdev;
+
+ if (vnic_dev_discover_res(vdev, bar, num_bars))
+ goto err_out;
+
+ vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
+ if (!vdev->devcmd)
+ goto err_out;
+
+ return vdev;
+
+err_out:
+ vnic_dev_unregister(vdev);
+ return NULL;
+}
+
+struct rte_pci_device *vnic_dev_get_pdev(struct vnic_dev *vdev)
+{
+ return vdev->pdev;
+}
+
+int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
+{
+ u64 a0, a1 = 0;
+ int wait = 1000;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ ((u8 *)&a0)[i] = mac_addr[i];
+
+ return vnic_dev_cmd(vdev, CMD_SET_MAC_ADDR, &a0, &a1, wait);
+}
+
+/*
+ * vnic_dev_classifier: Add/Delete classifier entries
+ * @vdev: vdev of the device
+ * @cmd: CLSF_ADD for Add filter
+ * CLSF_DEL for Delete filter
+ * @entry: In case of ADD filter, the caller passes the RQ number in this
+ * variable.
+ * This function stores the filter_id returned by the
+ * firmware in the same variable before return;
+ *
+ * In case of DEL filter, the caller passes the RQ number. Return
+ * value is irrelevant.
+ * @data: filter data
+ */
+int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
+ struct filter *data)
+{
+ u64 a0, a1;
+ int wait = 1000;
+ dma_addr_t tlv_pa;
+ int ret = -EINVAL;
+ struct filter_tlv *tlv, *tlv_va;
+ struct filter_action *action;
+ u64 tlv_size;
+ static unsigned int unique_id;
+ char z_name[RTE_MEMZONE_NAMESIZE];
+
+ if (cmd == CLSF_ADD) {
+ tlv_size = sizeof(struct filter) +
+ sizeof(struct filter_action) +
+ 2*sizeof(struct filter_tlv);
+ snprintf((char *)z_name, sizeof(z_name),
+ "vnic_clsf_%d", unique_id++);
+ tlv_va = vdev->alloc_consistent(vdev->priv,
+ tlv_size, &tlv_pa, (u8 *)z_name);
+ if (!tlv_va)
+ return -ENOMEM;
+ tlv = tlv_va;
+ a0 = tlv_pa;
+ a1 = tlv_size;
+ memset(tlv, 0, tlv_size);
+ tlv->type = CLSF_TLV_FILTER;
+ tlv->length = sizeof(struct filter);
+ *(struct filter *)&tlv->val = *data;
+
+ tlv = (struct filter_tlv *)((char *)tlv +
+ sizeof(struct filter_tlv) +
+ sizeof(struct filter));
+
+ tlv->type = CLSF_TLV_ACTION;
+ tlv->length = sizeof(struct filter_action);
+ action = (struct filter_action *)&tlv->val;
+ action->type = FILTER_ACTION_RQ_STEERING;
+ action->u.rq_idx = *entry;
+
+ ret = vnic_dev_cmd(vdev, CMD_ADD_FILTER, &a0, &a1, wait);
+ *entry = (u16)a0;
+ vdev->free_consistent(vdev->pdev, tlv_size, tlv_va, tlv_pa);
+ } else if (cmd == CLSF_DEL) {
+ a0 = *entry;
+ ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait);
+ }
+
+ return ret;
+}
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_dev.h b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_dev.h
new file mode 100755
index 00000000..d1373a5b
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_dev.h
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ident "$Id: vnic_dev.h 196958 2014-11-04 18:23:37Z xuywang $"
+
+#ifndef _VNIC_DEV_H_
+#define _VNIC_DEV_H_
+
+#include "enic_compat.h"
+#include "rte_pci.h"
+#include "vnic_resource.h"
+#include "vnic_devcmd.h"
+
+#ifndef VNIC_PADDR_TARGET
+#define VNIC_PADDR_TARGET 0x0000000000000000ULL
+#endif
+
+#ifndef readq
+static inline u64 readq(void __iomem *reg)
+{
+ return ((u64)readl((char *)reg + 0x4UL) << 32) |
+ (u64)readl(reg);
+}
+
+static inline void writeq(u64 val, void __iomem *reg)
+{
+ writel(val & 0xffffffff, reg);
+ writel(val >> 32, (char *)reg + 0x4UL);
+}
+#endif
+
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+enum vnic_dev_intr_mode {
+ VNIC_DEV_INTR_MODE_UNKNOWN,
+ VNIC_DEV_INTR_MODE_INTX,
+ VNIC_DEV_INTR_MODE_MSI,
+ VNIC_DEV_INTR_MODE_MSIX,
+};
+
+struct vnic_dev_bar {
+ void __iomem *vaddr;
+ dma_addr_t bus_addr;
+ unsigned long len;
+};
+
+struct vnic_dev_ring {
+ void *descs;
+ size_t size;
+ dma_addr_t base_addr;
+ size_t base_align;
+ void *descs_unaligned;
+ size_t size_unaligned;
+ dma_addr_t base_addr_unaligned;
+ unsigned int desc_size;
+ unsigned int desc_count;
+ unsigned int desc_avail;
+};
+
+struct vnic_dev_iomap_info {
+ dma_addr_t bus_addr;
+ unsigned long len;
+ void __iomem *vaddr;
+};
+
+struct vnic_dev;
+struct vnic_stats;
+
+void *vnic_dev_priv(struct vnic_dev *vdev);
+unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
+ enum vnic_res_type type);
+void vnic_register_cbacks(struct vnic_dev *vdev,
+ void *(*alloc_consistent)(void *priv, size_t size,
+ dma_addr_t *dma_handle, u8 *name),
+ void (*free_consistent)(struct rte_pci_device *hwdev,
+ size_t size, void *vaddr,
+ dma_addr_t dma_handle));
+void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
+ unsigned int index);
+dma_addr_t vnic_dev_get_res_bus_addr(struct vnic_dev *vdev,
+ enum vnic_res_type type, unsigned int index);
+uint8_t vnic_dev_get_res_bar(struct vnic_dev *vdev,
+ enum vnic_res_type type);
+uint32_t vnic_dev_get_res_offset(struct vnic_dev *vdev,
+ enum vnic_res_type type, unsigned int index);
+unsigned long vnic_dev_get_res_type_len(struct vnic_dev *vdev,
+ enum vnic_res_type type);
+unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
+ unsigned int desc_count, unsigned int desc_size);
+void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
+void vnic_set_hdr_split_size(struct vnic_dev *vdev, u16 size);
+u16 vnic_get_hdr_split_size(struct vnic_dev *vdev);
+int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
+ unsigned int desc_count, unsigned int desc_size, unsigned int socket_id,
+ char *z_name);
+void vnic_dev_free_desc_ring(struct vnic_dev *vdev,
+ struct vnic_dev_ring *ring);
+int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ u64 *a0, u64 *a1, int wait);
+int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ u64 *args, int nargs, int wait);
+void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, u16 index);
+void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, u16 bdf);
+void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev);
+int vnic_dev_fw_info(struct vnic_dev *vdev,
+ struct vnic_devcmd_fw_info **fw_info);
+int vnic_dev_asic_info(struct vnic_dev *vdev, u16 *asic_type, u16 *asic_rev);
+int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
+ void *value);
+int vnic_dev_stats_clear(struct vnic_dev *vdev);
+int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
+int vnic_dev_hang_notify(struct vnic_dev *vdev);
+int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
+ int broadcast, int promisc, int allmulti);
+int vnic_dev_packet_filter_all(struct vnic_dev *vdev, int directed,
+ int multicast, int broadcast, int promisc, int allmulti);
+int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
+int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
+int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
+int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr);
+int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
+void vnic_dev_set_reset_flag(struct vnic_dev *vdev, int state);
+int vnic_dev_notify_unset(struct vnic_dev *vdev);
+int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
+ void *notify_addr, dma_addr_t notify_pa, u16 intr);
+int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev);
+int vnic_dev_link_status(struct vnic_dev *vdev);
+u32 vnic_dev_port_speed(struct vnic_dev *vdev);
+u32 vnic_dev_msg_lvl(struct vnic_dev *vdev);
+u32 vnic_dev_mtu(struct vnic_dev *vdev);
+u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev);
+u32 vnic_dev_notify_status(struct vnic_dev *vdev);
+u32 vnic_dev_uif(struct vnic_dev *vdev);
+int vnic_dev_close(struct vnic_dev *vdev);
+int vnic_dev_enable(struct vnic_dev *vdev);
+int vnic_dev_enable_wait(struct vnic_dev *vdev);
+int vnic_dev_disable(struct vnic_dev *vdev);
+int vnic_dev_open(struct vnic_dev *vdev, int arg);
+int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
+int vnic_dev_init(struct vnic_dev *vdev, int arg);
+int vnic_dev_init_done(struct vnic_dev *vdev, int *done, int *err);
+int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len);
+int vnic_dev_deinit(struct vnic_dev *vdev);
+void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev);
+int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev);
+int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
+int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
+int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg);
+int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done);
+void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
+ enum vnic_dev_intr_mode intr_mode);
+enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev);
+u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec);
+u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles);
+u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev);
+void vnic_dev_unregister(struct vnic_dev *vdev);
+int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
+ u8 ig_vlan_rewrite_mode);
+struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
+ void *priv, struct rte_pci_device *pdev, struct vnic_dev_bar *bar,
+ unsigned int num_bars);
+struct rte_pci_device *vnic_dev_get_pdev(struct vnic_dev *vdev);
+int vnic_dev_cmd_init(struct vnic_dev *vdev, int fallback);
+int vnic_dev_get_size(void);
+int vnic_dev_int13(struct vnic_dev *vdev, u64 arg, u32 op);
+int vnic_dev_perbi(struct vnic_dev *vdev, u64 arg, u32 op);
+u32 vnic_dev_perbi_rebuild_cnt(struct vnic_dev *vdev);
+int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len);
+int vnic_dev_enable2(struct vnic_dev *vdev, int active);
+int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status);
+int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status);
+int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
+int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
+ struct filter *data);
+#ifdef ENIC_VXLAN
+int vnic_dev_overlay_offload_enable_disable(struct vnic_dev *vdev,
+ u8 overlay, u8 config);
+int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
+ u16 vxlan_udp_port_number);
+#endif
+#endif /* _VNIC_DEV_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_devcmd.h b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_devcmd.h
new file mode 100755
index 00000000..e7ecf31a
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_devcmd.h
@@ -0,0 +1,774 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ident "$Id: vnic_devcmd.h 173135 2014-05-16 03:14:07Z sanpilla $"
+
+#ifndef _VNIC_DEVCMD_H_
+#define _VNIC_DEVCMD_H_
+
+#define _CMD_NBITS 14
+#define _CMD_VTYPEBITS 10
+#define _CMD_FLAGSBITS 6
+#define _CMD_DIRBITS 2
+
+#define _CMD_NMASK ((1 << _CMD_NBITS)-1)
+#define _CMD_VTYPEMASK ((1 << _CMD_VTYPEBITS)-1)
+#define _CMD_FLAGSMASK ((1 << _CMD_FLAGSBITS)-1)
+#define _CMD_DIRMASK ((1 << _CMD_DIRBITS)-1)
+
+#define _CMD_NSHIFT 0
+#define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS)
+#define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS)
+#define _CMD_DIRSHIFT (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS)
+
+/*
+ * Direction bits (from host perspective).
+ */
+#define _CMD_DIR_NONE 0U
+#define _CMD_DIR_WRITE 1U
+#define _CMD_DIR_READ 2U
+#define _CMD_DIR_RW (_CMD_DIR_WRITE | _CMD_DIR_READ)
+
+/*
+ * Flag bits.
+ */
+#define _CMD_FLAGS_NONE 0U
+#define _CMD_FLAGS_NOWAIT 1U
+
+/*
+ * vNIC type bits.
+ */
+#define _CMD_VTYPE_NONE 0U
+#define _CMD_VTYPE_ENET 1U
+#define _CMD_VTYPE_FC 2U
+#define _CMD_VTYPE_SCSI 4U
+#define _CMD_VTYPE_ALL (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI)
+
+/*
+ * Used to create cmds..
+ */
+#define _CMDCF(dir, flags, vtype, nr) \
+ (((dir) << _CMD_DIRSHIFT) | \
+ ((flags) << _CMD_FLAGSSHIFT) | \
+ ((vtype) << _CMD_VTYPESHIFT) | \
+ ((nr) << _CMD_NSHIFT))
+#define _CMDC(dir, vtype, nr) _CMDCF(dir, 0, vtype, nr)
+#define _CMDCNW(dir, vtype, nr) _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr)
+
+/*
+ * Used to decode cmds..
+ */
+#define _CMD_DIR(cmd) (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK)
+#define _CMD_FLAGS(cmd) (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK)
+#define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK)
+#define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK)
+
+enum vnic_devcmd_cmd {
+ CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
+
+ /*
+ * mcpu fw info in mem:
+ * in:
+ * (u64)a0=paddr to struct vnic_devcmd_fw_info
+ * action:
+ * Fills in struct vnic_devcmd_fw_info (128 bytes)
+ * note:
+ * An old definition of CMD_MCPU_FW_INFO
+ */
+ CMD_MCPU_FW_INFO_OLD = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
+
+ /*
+ * mcpu fw info in mem:
+ * in:
+ * (u64)a0=paddr to struct vnic_devcmd_fw_info
+ * (u16)a1=size of the structure
+ * out:
+ * (u16)a1=0 for in:a1 = 0,
+ * data size actually written for other values.
+ * action:
+ * Fills in first 128 bytes of vnic_devcmd_fw_info for in:a1 = 0,
+ * first in:a1 bytes for 0 < in:a1 <= 132,
+ * 132 bytes for other values of in:a1.
+ * note:
+ * CMD_MCPU_FW_INFO and CMD_MCPU_FW_INFO_OLD have the same enum 1
+ * for source compatibility.
+ */
+ CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 1),
+
+ /* dev-specific block member:
+ * in: (u16)a0=offset,(u8)a1=size
+ * out: a0=value */
+ CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2),
+
+ /* stats clear */
+ CMD_STATS_CLEAR = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3),
+
+ /* stats dump in mem: (u64)a0=paddr to stats area,
+ * (u16)a1=sizeof stats area */
+ CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4),
+
+ /* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */
+ CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7),
+
+ /* set Rx packet filter for all: (u32)a0=filters (see CMD_PFILTER_*) */
+ CMD_PACKET_FILTER_ALL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 7),
+
+ /* hang detection notification */
+ CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
+
+ /* MAC address in (u48)a0 */
+ CMD_GET_MAC_ADDR = _CMDC(_CMD_DIR_READ,
+ _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9),
+
+ /* add addr from (u48)a0 */
+ CMD_ADDR_ADD = _CMDCNW(_CMD_DIR_WRITE,
+ _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 12),
+
+ /* del addr from (u48)a0 */
+ CMD_ADDR_DEL = _CMDCNW(_CMD_DIR_WRITE,
+ _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 13),
+
+ /* add VLAN id in (u16)a0 */
+ CMD_VLAN_ADD = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 14),
+
+ /* del VLAN id in (u16)a0 */
+ CMD_VLAN_DEL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15),
+
+ /* nic_cfg in (u32)a0 */
+ CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
+
+ /* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */
+ CMD_RSS_KEY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17),
+
+ /* union vnic_rss_cpu in mem: (u64)a0=paddr, (u16)a1=len */
+ CMD_RSS_CPU = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 18),
+
+ /* initiate softreset */
+ CMD_SOFT_RESET = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 19),
+
+ /* softreset status:
+ * out: a0=0 reset complete, a0=1 reset in progress */
+ CMD_SOFT_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 20),
+
+ /* set struct vnic_devcmd_notify buffer in mem:
+ * in:
+ * (u64)a0=paddr to notify (set paddr=0 to unset)
+ * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
+ * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
+ * out:
+ * (u32)a1 = effective size
+ */
+ CMD_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21),
+
+ /* UNDI API: (u64)a0=paddr to s_PXENV_UNDI_ struct,
+ * (u8)a1=PXENV_UNDI_xxx */
+ CMD_UNDI = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 22),
+
+ /* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */
+ CMD_OPEN = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23),
+
+ /* open status:
+ * out: a0=0 open complete, a0=1 open in progress */
+ CMD_OPEN_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24),
+
+ /* close vnic */
+ CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25),
+
+ /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */
+/***** Replaced by CMD_INIT *****/
+ CMD_INIT_v1 = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26),
+
+ /* variant of CMD_INIT, with provisioning info
+ * (u64)a0=paddr of vnic_devcmd_provinfo
+ * (u32)a1=sizeof provision info */
+ CMD_INIT_PROV_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 27),
+
+ /* enable virtual link */
+ CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
+
+ /* enable virtual link, waiting variant. */
+ CMD_ENABLE_WAIT = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
+
+ /* disable virtual link */
+ CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29),
+
+ /* stats dump sum of all vnic stats on same uplink in mem:
+ * (u64)a0=paddr
+ * (u16)a1=sizeof stats area */
+ CMD_STATS_DUMP_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30),
+
+ /* init status:
+ * out: a0=0 init complete, a0=1 init in progress
+ * if a0=0, a1=errno */
+ CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31),
+
+ /* INT13 API: (u64)a0=paddr to vnic_int13_params struct
+ * (u32)a1=INT13_CMD_xxx */
+ CMD_INT13 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_FC, 32),
+
+ /* logical uplink enable/disable: (u64)a0: 0/1=disable/enable */
+ CMD_LOGICAL_UPLINK = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 33),
+
+ /* undo initialize of virtual link */
+ CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
+
+ /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */
+ CMD_INIT = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 35),
+
+ /* check fw capability of a cmd:
+ * in: (u32)a0=cmd
+ * out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */
+ CMD_CAPABILITY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36),
+
+ /* persistent binding info
+ * in: (u64)a0=paddr of arg
+ * (u32)a1=CMD_PERBI_XXX */
+ CMD_PERBI = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 37),
+
+ /* Interrupt Assert Register functionality
+ * in: (u16)a0=interrupt number to assert
+ */
+ CMD_IAR = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 38),
+
+ /* initiate hangreset, like softreset after hang detected */
+ CMD_HANG_RESET = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 39),
+
+ /* hangreset status:
+ * out: a0=0 reset complete, a0=1 reset in progress */
+ CMD_HANG_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 40),
+
+ /*
+ * Set hw ingress packet vlan rewrite mode:
+ * in: (u32)a0=new vlan rewrite mode
+ * out: (u32)a0=old vlan rewrite mode */
+ CMD_IG_VLAN_REWRITE_MODE = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 41),
+
+ /*
+ * in: (u16)a0=bdf of target vnic
+ * (u32)a1=cmd to proxy
+ * a2-a15=args to cmd in a1
+ * out: (u32)a0=status of proxied cmd
+ * a1-a15=out args of proxied cmd */
+ CMD_PROXY_BY_BDF = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42),
+
+ /*
+ * As for BY_BDF except a0 is index of hvnlink subordinate vnic
+ * or SR-IOV virtual vnic
+ */
+ CMD_PROXY_BY_INDEX = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43),
+
+ /*
+ * For HPP toggle:
+ * adapter-info-get
+ * in: (u64)a0=phsical address of buffer passed in from caller.
+ * (u16)a1=size of buffer specified in a0.
+ * out: (u64)a0=phsical address of buffer passed in from caller.
+ * (u16)a1=actual bytes from VIF-CONFIG-INFO TLV, or
+ * 0 if no VIF-CONFIG-INFO TLV was ever received. */
+ CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44),
+
+ /*
+ * INT13 API: (u64)a0=paddr to vnic_int13_params struct
+ * (u32)a1=INT13_CMD_xxx
+ */
+ CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45),
+
+ /*
+ * Set default vlan:
+ * in: (u16)a0=new default vlan
+ * (u16)a1=zero for overriding vlan with param a0,
+ * non-zero for resetting vlan to the default
+ * out: (u16)a0=old default vlan
+ */
+ CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46),
+
+ /* init_prov_info2:
+ * Variant of CMD_INIT_PROV_INFO, where it will not try to enable
+ * the vnic until CMD_ENABLE2 is issued.
+ * (u64)a0=paddr of vnic_devcmd_provinfo
+ * (u32)a1=sizeof provision info */
+ CMD_INIT_PROV_INFO2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 47),
+
+ /* enable2:
+ * (u32)a0=0 ==> standby
+ * =CMD_ENABLE2_ACTIVE ==> active
+ */
+ CMD_ENABLE2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 48),
+
+ /*
+ * cmd_status:
+ * Returns the status of the specified command
+ * Input:
+ * a0 = command for which status is being queried.
+ * Possible values are:
+ * CMD_SOFT_RESET
+ * CMD_HANG_RESET
+ * CMD_OPEN
+ * CMD_INIT
+ * CMD_INIT_PROV_INFO
+ * CMD_DEINIT
+ * CMD_INIT_PROV_INFO2
+ * CMD_ENABLE2
+ * Output:
+ * if status == STAT_ERROR
+ * a0 = ERR_ENOTSUPPORTED - status for command in a0 is
+ * not supported
+ * if status == STAT_NONE
+ * a0 = status of the devcmd specified in a0 as follows.
+ * ERR_SUCCESS - command in a0 completed successfully
+ * ERR_EINPROGRESS - command in a0 is still in progress
+ */
+ CMD_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 49),
+
+ /*
+ * Returns interrupt coalescing timer conversion factors.
+ * After calling this devcmd, ENIC driver can convert
+ * interrupt coalescing timer in usec into CPU cycles as follows:
+ *
+ * intr_timer_cycles = intr_timer_usec * multiplier / divisor
+ *
+ * Interrupt coalescing timer in usecs can be be converted/obtained
+ * from CPU cycles as follows:
+ *
+ * intr_timer_usec = intr_timer_cycles * divisor / multiplier
+ *
+ * in: none
+ * out: (u32)a0 = multiplier
+ * (u32)a1 = divisor
+ * (u32)a2 = maximum timer value in usec
+ */
+ CMD_INTR_COAL_CONVERT = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 50),
+
+ /*
+ * ISCSI DUMP API:
+ * in: (u64)a0=paddr of the param or param itself
+ * (u32)a1=ISCSI_CMD_xxx
+ */
+ CMD_ISCSI_DUMP_REQ = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 51),
+
+ /*
+ * ISCSI DUMP STATUS API:
+ * in: (u32)a0=cmd tag
+ * in: (u32)a1=ISCSI_CMD_xxx
+ * out: (u32)a0=cmd status
+ */
+ CMD_ISCSI_DUMP_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 52),
+
+ /*
+ * Subvnic migration from MQ <--> VF.
+ * Enable the LIF migration from MQ to VF and vice versa. MQ and VF
+ * indexes are statically bound at the time of initialization.
+ * Based on the
+ * direction of migration, the resources of either MQ or the VF shall
+ * be attached to the LIF.
+ * in: (u32)a0=Direction of Migration
+ * 0=> Migrate to VF
+ * 1=> Migrate to MQ
+ * (u32)a1=VF index (MQ index)
+ */
+ CMD_MIGRATE_SUBVNIC = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 53),
+
+
+ /*
+ * Register / Deregister the notification block for MQ subvnics
+ * in:
+ * (u64)a0=paddr to notify (set paddr=0 to unset)
+ * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
+ * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
+ * out:
+ * (u32)a1 = effective size
+ */
+ CMD_SUBVNIC_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 54),
+
+ /*
+ * Set the predefined mac address as default
+ * in:
+ * (u48)a0=mac addr
+ */
+ CMD_SET_MAC_ADDR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 55),
+
+ /* Update the provisioning info of the given VIF
+ * (u64)a0=paddr of vnic_devcmd_provinfo
+ * (u32)a1=sizeof provision info */
+ CMD_PROV_INFO_UPDATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 56),
+
+ /*
+ * Initialization for the devcmd2 interface.
+ * in: (u64) a0=host result buffer physical address
+ * in: (u16) a1=number of entries in result buffer
+ */
+ CMD_INITIALIZE_DEVCMD2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 57),
+
+ /*
+ * Add a filter.
+ * in: (u64) a0= filter address
+ * (u32) a1= size of filter
+ * out: (u32) a0=filter identifier
+ */
+ CMD_ADD_FILTER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 58),
+
+ /*
+ * Delete a filter.
+ * in: (u32) a0=filter identifier
+ */
+ CMD_DEL_FILTER = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 59),
+
+ /*
+ * Enable a Queue Pair in User space NIC
+ * in: (u32) a0=Queue Pair number
+ * (u32) a1= command
+ */
+ CMD_QP_ENABLE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 60),
+
+ /*
+ * Disable a Queue Pair in User space NIC
+ * in: (u32) a0=Queue Pair number
+ * (u32) a1= command
+ */
+ CMD_QP_DISABLE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 61),
+
+ /*
+ * Stats dump Queue Pair in User space NIC
+ * in: (u32) a0=Queue Pair number
+ * (u64) a1=host buffer addr for status dump
+ * (u32) a2=length of the buffer
+ */
+ CMD_QP_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 62),
+
+ /*
+ * Clear stats for Queue Pair in User space NIC
+ * in: (u32) a0=Queue Pair number
+ */
+ CMD_QP_STATS_CLEAR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 63),
+
+ /*
+ * Enable/Disable overlay offloads on the given vnic
+ * in: (u8) a0 = OVERLAY_FEATURE_NVGRE : NVGRE
+ * a0 = OVERLAY_FEATURE_VXLAN : VxLAN
+ * in: (u8) a1 = OVERLAY_OFFLOAD_ENABLE : Enable
+ * a1 = OVERLAY_OFFLOAD_DISABLE : Disable
+ */
+ CMD_OVERLAY_OFFLOAD_ENABLE_DISABLE =
+ _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 72),
+
+ /*
+ * Configuration of overlay offloads feature on a given vNIC
+ * in: (u8) a0 = DEVCMD_OVERLAY_NVGRE : NVGRE
+ * a0 = DEVCMD_OVERLAY_VXLAN : VxLAN
+ * in: (u8) a1 = VXLAN_PORT_UPDATE : VxLAN
+ * in: (u16) a2 = unsigned short int port information
+ */
+ CMD_OVERLAY_OFFLOAD_CFG = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 73),
+};
+
+/* CMD_ENABLE2 flags */
+#define CMD_ENABLE2_STANDBY 0x0
+#define CMD_ENABLE2_ACTIVE 0x1
+
+/* flags for CMD_OPEN */
+#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */
+
+/* flags for CMD_INIT */
+#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */
+
+/* flags for CMD_PACKET_FILTER */
+#define CMD_PFILTER_DIRECTED 0x01
+#define CMD_PFILTER_MULTICAST 0x02
+#define CMD_PFILTER_BROADCAST 0x04
+#define CMD_PFILTER_PROMISCUOUS 0x08
+#define CMD_PFILTER_ALL_MULTICAST 0x10
+
+/* Commands for CMD_QP_ENABLE/CM_QP_DISABLE */
+#define CMD_QP_RQWQ 0x0
+
+/* rewrite modes for CMD_IG_VLAN_REWRITE_MODE */
+#define IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK 0
+#define IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN 1
+#define IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN 2
+#define IG_VLAN_REWRITE_MODE_PASS_THRU 3
+
+enum vnic_devcmd_status {
+ STAT_NONE = 0,
+ STAT_BUSY = 1 << 0, /* cmd in progress */
+ STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */
+};
+
+enum vnic_devcmd_error {
+ ERR_SUCCESS = 0,
+ ERR_EINVAL = 1,
+ ERR_EFAULT = 2,
+ ERR_EPERM = 3,
+ ERR_EBUSY = 4,
+ ERR_ECMDUNKNOWN = 5,
+ ERR_EBADSTATE = 6,
+ ERR_ENOMEM = 7,
+ ERR_ETIMEDOUT = 8,
+ ERR_ELINKDOWN = 9,
+ ERR_EMAXRES = 10,
+ ERR_ENOTSUPPORTED = 11,
+ ERR_EINPROGRESS = 12,
+ ERR_MAX
+};
+
+/*
+ * note: hw_version and asic_rev refer to the same thing,
+ * but have different formats. hw_version is
+ * a 32-byte string (e.g. "A2") and asic_rev is
+ * a 16-bit integer (e.g. 0xA2).
+ */
+struct vnic_devcmd_fw_info {
+ char fw_version[32];
+ char fw_build[32];
+ char hw_version[32];
+ char hw_serial_number[32];
+ u16 asic_type;
+ u16 asic_rev;
+};
+
+enum fwinfo_asic_type {
+ FWINFO_ASIC_TYPE_UNKNOWN,
+ FWINFO_ASIC_TYPE_PALO,
+ FWINFO_ASIC_TYPE_SERENO,
+};
+
+
+struct vnic_devcmd_notify {
+ u32 csum; /* checksum over following words */
+
+ u32 link_state; /* link up == 1 */
+ u32 port_speed; /* effective port speed (rate limit) */
+ u32 mtu; /* MTU */
+ u32 msglvl; /* requested driver msg lvl */
+ u32 uif; /* uplink interface */
+ u32 status; /* status bits (see VNIC_STF_*) */
+ u32 error; /* error code (see ERR_*) for first ERR */
+ u32 link_down_cnt; /* running count of link down transitions */
+ u32 perbi_rebuild_cnt; /* running count of perbi rebuilds */
+};
+#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */
+#define VNIC_STF_STD_PAUSE 0x0002 /* standard link-level pause on */
+#define VNIC_STF_PFC_PAUSE 0x0004 /* priority flow control pause on */
+/* all supported status flags */
+#define VNIC_STF_ALL (VNIC_STF_FATAL_ERR |\
+ VNIC_STF_STD_PAUSE |\
+ VNIC_STF_PFC_PAUSE |\
+ 0)
+
+struct vnic_devcmd_provinfo {
+ u8 oui[3];
+ u8 type;
+ u8 data[0];
+};
+
+/*
+ * These are used in flags field of different filters to denote
+ * valid fields used.
+ */
+#define FILTER_FIELD_VALID(fld) (1 << (fld - 1))
+
+#define FILTER_FIELDS_USNIC (FILTER_FIELD_VALID(1) | \
+ FILTER_FIELD_VALID(2) | \
+ FILTER_FIELD_VALID(3) | \
+ FILTER_FIELD_VALID(4))
+
+#define FILTER_FIELDS_IPV4_5TUPLE (FILTER_FIELD_VALID(1) | \
+ FILTER_FIELD_VALID(2) | \
+ FILTER_FIELD_VALID(3) | \
+ FILTER_FIELD_VALID(4) | \
+ FILTER_FIELD_VALID(5))
+
+#define FILTER_FIELDS_MAC_VLAN (FILTER_FIELD_VALID(1) | \
+ FILTER_FIELD_VALID(2))
+
+#define FILTER_FIELD_USNIC_VLAN FILTER_FIELD_VALID(1)
+#define FILTER_FIELD_USNIC_ETHTYPE FILTER_FIELD_VALID(2)
+#define FILTER_FIELD_USNIC_PROTO FILTER_FIELD_VALID(3)
+#define FILTER_FIELD_USNIC_ID FILTER_FIELD_VALID(4)
+
+struct filter_usnic_id {
+ u32 flags;
+ u16 vlan;
+ u16 ethtype;
+ u8 proto_version;
+ u32 usnic_id;
+} __attribute__((packed));
+
+#define FILTER_FIELD_5TUP_PROTO FILTER_FIELD_VALID(1)
+#define FILTER_FIELD_5TUP_SRC_AD FILTER_FIELD_VALID(2)
+#define FILTER_FIELD_5TUP_DST_AD FILTER_FIELD_VALID(3)
+#define FILTER_FIELD_5TUP_SRC_PT FILTER_FIELD_VALID(4)
+#define FILTER_FIELD_5TUP_DST_PT FILTER_FIELD_VALID(5)
+
+/* Enums for the protocol field. */
+enum protocol_e {
+ PROTO_UDP = 0,
+ PROTO_TCP = 1,
+};
+
+struct filter_ipv4_5tuple {
+ u32 flags;
+ u32 protocol;
+ u32 src_addr;
+ u32 dst_addr;
+ u16 src_port;
+ u16 dst_port;
+} __attribute__((packed));
+
+#define FILTER_FIELD_VMQ_VLAN FILTER_FIELD_VALID(1)
+#define FILTER_FIELD_VMQ_MAC FILTER_FIELD_VALID(2)
+
+struct filter_mac_vlan {
+ u32 flags;
+ u16 vlan;
+ u8 mac_addr[6];
+} __attribute__((packed));
+
+/* Specifies the filter_action type. */
+enum {
+ FILTER_ACTION_RQ_STEERING = 0,
+ FILTER_ACTION_MAX
+};
+
+struct filter_action {
+ u32 type;
+ union {
+ u32 rq_idx;
+ } u;
+} __attribute__((packed));
+
+/* Specifies the filter type. */
+enum filter_type {
+ FILTER_USNIC_ID = 0,
+ FILTER_IPV4_5TUPLE = 1,
+ FILTER_MAC_VLAN = 2,
+ FILTER_MAX
+};
+
+struct filter {
+ u32 type;
+ union {
+ struct filter_usnic_id usnic;
+ struct filter_ipv4_5tuple ipv4;
+ struct filter_mac_vlan mac_vlan;
+ } u;
+} __attribute__((packed));
+
+enum {
+ CLSF_TLV_FILTER = 0,
+ CLSF_TLV_ACTION = 1,
+};
+
+#define FILTER_MAX_BUF_SIZE 100 /* Maximum size of buffer to CMD_ADD_FILTER */
+
+struct filter_tlv {
+ uint32_t type;
+ uint32_t length;
+ uint32_t val[0];
+};
+
+enum {
+ CLSF_ADD = 0,
+ CLSF_DEL = 1,
+};
+
+/*
+ * Writing cmd register causes STAT_BUSY to get set in status register.
+ * When cmd completes, STAT_BUSY will be cleared.
+ *
+ * If cmd completed successfully STAT_ERROR will be clear
+ * and args registers contain cmd-specific results.
+ *
+ * If cmd error, STAT_ERROR will be set and args[0] contains error code.
+ *
+ * status register is read-only. While STAT_BUSY is set,
+ * all other register contents are read-only.
+ */
+
+/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */
+#define VNIC_DEVCMD_NARGS 15
+struct vnic_devcmd {
+ u32 status; /* RO */
+ u32 cmd; /* RW */
+ u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */
+};
+
+/*
+ * Version 2 of the interface.
+ *
+ * Some things are carried over, notably the vnic_devcmd_cmd enum.
+ */
+
+/*
+ * Flags for vnic_devcmd2.flags
+ */
+
+#define DEVCMD2_FNORESULT 0x1 /* Don't copy result to host */
+
+#define VNIC_DEVCMD2_NARGS VNIC_DEVCMD_NARGS
+struct vnic_devcmd2 {
+ u16 pad;
+ u16 flags;
+ u32 cmd; /* same command #defines as original */
+ u64 args[VNIC_DEVCMD2_NARGS];
+};
+
+#define VNIC_DEVCMD2_NRESULTS VNIC_DEVCMD_NARGS
+struct devcmd2_result {
+ u64 results[VNIC_DEVCMD2_NRESULTS];
+ u32 pad;
+ u16 completed_index; /* into copy WQ */
+ u8 error; /* same error codes as original */
+ u8 color; /* 0 or 1 as with completion queues */
+};
+
+#define DEVCMD2_RING_SIZE 32
+#define DEVCMD2_DESC_SIZE 128
+
+#define DEVCMD2_RESULTS_SIZE_MAX ((1 << 16) - 1)
+
+/* Overlay related definitions */
+
+/*
+ * This enum lists the flag associated with each of the overlay features
+ */
+typedef enum {
+ OVERLAY_FEATURE_NVGRE = 1,
+ OVERLAY_FEATURE_VXLAN,
+ OVERLAY_FEATURE_MAX,
+} overlay_feature_t;
+
+#define OVERLAY_OFFLOAD_ENABLE 0
+#define OVERLAY_OFFLOAD_DISABLE 1
+
+#define OVERLAY_CFG_VXLAN_PORT_UPDATE 0
+#endif /* _VNIC_DEVCMD_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_enet.h b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_enet.h
new file mode 100755
index 00000000..9d3cc07e
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_enet.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ident "$Id: vnic_enet.h 175806 2014-06-04 19:31:17Z rfaucett $"
+
+#ifndef _VNIC_ENIC_H_
+#define _VNIC_ENIC_H_
+
+/* Device-specific region: enet configuration */
+struct vnic_enet_config {
+ u32 flags;
+ u32 wq_desc_count;
+ u32 rq_desc_count;
+ u16 mtu;
+ u16 intr_timer_deprecated;
+ u8 intr_timer_type;
+ u8 intr_mode;
+ char devname[16];
+ u32 intr_timer_usec;
+ u16 loop_tag;
+ u16 vf_rq_count;
+ u16 num_arfs;
+ u64 mem_paddr;
+};
+
+#define VENETF_TSO 0x1 /* TSO enabled */
+#define VENETF_LRO 0x2 /* LRO enabled */
+#define VENETF_RXCSUM 0x4 /* RX csum enabled */
+#define VENETF_TXCSUM 0x8 /* TX csum enabled */
+#define VENETF_RSS 0x10 /* RSS enabled */
+#define VENETF_RSSHASH_IPV4 0x20 /* Hash on IPv4 fields */
+#define VENETF_RSSHASH_TCPIPV4 0x40 /* Hash on TCP + IPv4 fields */
+#define VENETF_RSSHASH_IPV6 0x80 /* Hash on IPv6 fields */
+#define VENETF_RSSHASH_TCPIPV6 0x100 /* Hash on TCP + IPv6 fields */
+#define VENETF_RSSHASH_IPV6_EX 0x200 /* Hash on IPv6 extended fields */
+#define VENETF_RSSHASH_TCPIPV6_EX 0x400 /* Hash on TCP + IPv6 ext. fields */
+#define VENETF_LOOP 0x800 /* Loopback enabled */
+#define VENETF_VMQ 0x4000 /* using VMQ flag for VMware NETQ */
+#define VENETF_VXLAN 0x10000 /* VxLAN offload */
+#define VENETF_NVGRE 0x20000 /* NVGRE offload */
+#define VENET_INTR_TYPE_MIN 0 /* Timer specs min interrupt spacing */
+#define VENET_INTR_TYPE_IDLE 1 /* Timer specs idle time before irq */
+
+#define VENET_INTR_MODE_ANY 0 /* Try MSI-X, then MSI, then INTx */
+#define VENET_INTR_MODE_MSI 1 /* Try MSI then INTx */
+#define VENET_INTR_MODE_INTX 2 /* Try INTx only */
+
+#endif /* _VNIC_ENIC_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_intr.c b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_intr.c
new file mode 100755
index 00000000..84368afc
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_intr.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ident "$Id: vnic_intr.c 171146 2014-05-02 07:08:20Z ssujith $"
+
+#include "vnic_dev.h"
+#include "vnic_intr.h"
+
+void vnic_intr_free(struct vnic_intr *intr)
+{
+ intr->ctrl = NULL;
+}
+
+int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
+ unsigned int index)
+{
+ intr->index = index;
+ intr->vdev = vdev;
+
+ intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
+ if (!intr->ctrl) {
+ pr_err("Failed to hook INTR[%d].ctrl resource\n", index);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void vnic_intr_init(struct vnic_intr *intr, u32 coalescing_timer,
+ unsigned int coalescing_type, unsigned int mask_on_assertion)
+{
+ vnic_intr_coalescing_timer_set(intr, coalescing_timer);
+ iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
+ iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
+ iowrite32(0, &intr->ctrl->int_credits);
+}
+
+void vnic_intr_coalescing_timer_set(struct vnic_intr *intr,
+ u32 coalescing_timer)
+{
+ iowrite32(vnic_dev_intr_coal_timer_usec_to_hw(intr->vdev,
+ coalescing_timer), &intr->ctrl->coalescing_timer);
+}
+
+void vnic_intr_clean(struct vnic_intr *intr)
+{
+ iowrite32(0, &intr->ctrl->int_credits);
+}
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_intr.h b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_intr.h
new file mode 100755
index 00000000..ecb82bf4
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_intr.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ident "$Id: vnic_intr.h 171146 2014-05-02 07:08:20Z ssujith $"
+
+#ifndef _VNIC_INTR_H_
+#define _VNIC_INTR_H_
+
+
+#include "vnic_dev.h"
+
+#define VNIC_INTR_TIMER_TYPE_ABS 0
+#define VNIC_INTR_TIMER_TYPE_QUIET 1
+
+/* Interrupt control */
+struct vnic_intr_ctrl {
+ u32 coalescing_timer; /* 0x00 */
+ u32 pad0;
+ u32 coalescing_value; /* 0x08 */
+ u32 pad1;
+ u32 coalescing_type; /* 0x10 */
+ u32 pad2;
+ u32 mask_on_assertion; /* 0x18 */
+ u32 pad3;
+ u32 mask; /* 0x20 */
+ u32 pad4;
+ u32 int_credits; /* 0x28 */
+ u32 pad5;
+ u32 int_credit_return; /* 0x30 */
+ u32 pad6;
+};
+
+struct vnic_intr {
+ unsigned int index;
+ struct vnic_dev *vdev;
+ struct vnic_intr_ctrl __iomem *ctrl; /* memory-mapped */
+};
+
+static inline void vnic_intr_unmask(struct vnic_intr *intr)
+{
+ iowrite32(0, &intr->ctrl->mask);
+}
+
+static inline void vnic_intr_mask(struct vnic_intr *intr)
+{
+ iowrite32(1, &intr->ctrl->mask);
+}
+
+static inline int vnic_intr_masked(struct vnic_intr *intr)
+{
+ return ioread32(&intr->ctrl->mask);
+}
+
+static inline void vnic_intr_return_credits(struct vnic_intr *intr,
+ unsigned int credits, int unmask, int reset_timer)
+{
+#define VNIC_INTR_UNMASK_SHIFT 16
+#define VNIC_INTR_RESET_TIMER_SHIFT 17
+
+ u32 int_credit_return = (credits & 0xffff) |
+ (unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) |
+ (reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0);
+
+ iowrite32(int_credit_return, &intr->ctrl->int_credit_return);
+}
+
+static inline unsigned int vnic_intr_credits(struct vnic_intr *intr)
+{
+ return ioread32(&intr->ctrl->int_credits);
+}
+
+static inline void vnic_intr_return_all_credits(struct vnic_intr *intr)
+{
+ unsigned int credits = vnic_intr_credits(intr);
+ int unmask = 1;
+ int reset_timer = 1;
+
+ vnic_intr_return_credits(intr, credits, unmask, reset_timer);
+}
+
+static inline u32 vnic_intr_legacy_pba(u32 __iomem *legacy_pba)
+{
+ /* read PBA without clearing */
+ return ioread32(legacy_pba);
+}
+
+void vnic_intr_free(struct vnic_intr *intr);
+int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
+ unsigned int index);
+void vnic_intr_init(struct vnic_intr *intr, u32 coalescing_timer,
+ unsigned int coalescing_type, unsigned int mask_on_assertion);
+void vnic_intr_coalescing_timer_set(struct vnic_intr *intr,
+ u32 coalescing_timer);
+void vnic_intr_clean(struct vnic_intr *intr);
+
+#endif /* _VNIC_INTR_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_nic.h b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_nic.h
new file mode 100755
index 00000000..332cfb4f
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_nic.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ident "$Id: vnic_nic.h 59839 2010-09-27 20:36:31Z roprabhu $"
+
+#ifndef _VNIC_NIC_H_
+#define _VNIC_NIC_H_
+
+#define NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD 0xffUL
+#define NIC_CFG_RSS_DEFAULT_CPU_SHIFT 0
+#define NIC_CFG_RSS_HASH_TYPE (0xffUL << 8)
+#define NIC_CFG_RSS_HASH_TYPE_MASK_FIELD 0xffUL
+#define NIC_CFG_RSS_HASH_TYPE_SHIFT 8
+#define NIC_CFG_RSS_HASH_BITS (7UL << 16)
+#define NIC_CFG_RSS_HASH_BITS_MASK_FIELD 7UL
+#define NIC_CFG_RSS_HASH_BITS_SHIFT 16
+#define NIC_CFG_RSS_BASE_CPU (7UL << 19)
+#define NIC_CFG_RSS_BASE_CPU_MASK_FIELD 7UL
+#define NIC_CFG_RSS_BASE_CPU_SHIFT 19
+#define NIC_CFG_RSS_ENABLE (1UL << 22)
+#define NIC_CFG_RSS_ENABLE_MASK_FIELD 1UL
+#define NIC_CFG_RSS_ENABLE_SHIFT 22
+#define NIC_CFG_TSO_IPID_SPLIT_EN (1UL << 23)
+#define NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD 1UL
+#define NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT 23
+#define NIC_CFG_IG_VLAN_STRIP_EN (1UL << 24)
+#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL
+#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24
+
+#define NIC_CFG_RSS_HASH_TYPE_IPV4 (1 << 1)
+#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 (1 << 2)
+#define NIC_CFG_RSS_HASH_TYPE_IPV6 (1 << 3)
+#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 4)
+#define NIC_CFG_RSS_HASH_TYPE_IPV6_EX (1 << 5)
+#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX (1 << 6)
+
+static inline void vnic_set_nic_cfg(u32 *nic_cfg,
+ u8 rss_default_cpu, u8 rss_hash_type,
+ u8 rss_hash_bits, u8 rss_base_cpu,
+ u8 rss_enable, u8 tso_ipid_split_en,
+ u8 ig_vlan_strip_en)
+{
+ *nic_cfg = (rss_default_cpu & NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD) |
+ ((rss_hash_type & NIC_CFG_RSS_HASH_TYPE_MASK_FIELD)
+ << NIC_CFG_RSS_HASH_TYPE_SHIFT) |
+ ((rss_hash_bits & NIC_CFG_RSS_HASH_BITS_MASK_FIELD)
+ << NIC_CFG_RSS_HASH_BITS_SHIFT) |
+ ((rss_base_cpu & NIC_CFG_RSS_BASE_CPU_MASK_FIELD)
+ << NIC_CFG_RSS_BASE_CPU_SHIFT) |
+ ((rss_enable & NIC_CFG_RSS_ENABLE_MASK_FIELD)
+ << NIC_CFG_RSS_ENABLE_SHIFT) |
+ ((tso_ipid_split_en & NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD)
+ << NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT) |
+ ((ig_vlan_strip_en & NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD)
+ << NIC_CFG_IG_VLAN_STRIP_EN_SHIFT);
+}
+
+#endif /* _VNIC_NIC_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_resource.h b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_resource.h
new file mode 100755
index 00000000..2512712e
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_resource.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ident "$Id: vnic_resource.h 196958 2014-11-04 18:23:37Z xuywang $"
+
+#ifndef _VNIC_RESOURCE_H_
+#define _VNIC_RESOURCE_H_
+
+#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */
+#define VNIC_RES_VERSION 0x00000000L
+#define MGMTVNIC_MAGIC 0x544d474dL /* 'MGMT' */
+#define MGMTVNIC_VERSION 0x00000000L
+
+/* The MAC address assigned to the CFG vNIC is fixed. */
+#define MGMTVNIC_MAC { 0x02, 0x00, 0x54, 0x4d, 0x47, 0x4d }
+
+/* vNIC resource types */
+enum vnic_res_type {
+ RES_TYPE_EOL, /* End-of-list */
+ RES_TYPE_WQ, /* Work queues */
+ RES_TYPE_RQ, /* Receive queues */
+ RES_TYPE_CQ, /* Completion queues */
+ RES_TYPE_MEM, /* Window to dev memory */
+ RES_TYPE_NIC_CFG, /* Enet NIC config registers */
+ RES_TYPE_RSS_KEY, /* Enet RSS secret key */
+ RES_TYPE_RSS_CPU, /* Enet RSS indirection table */
+ RES_TYPE_TX_STATS, /* Netblock Tx statistic regs */
+ RES_TYPE_RX_STATS, /* Netblock Rx statistic regs */
+ RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */
+ RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */
+ RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */
+ RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status */
+ RES_TYPE_DEBUG, /* Debug-only info */
+ RES_TYPE_DEV, /* Device-specific region */
+ RES_TYPE_DEVCMD, /* Device command region */
+ RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */
+ RES_TYPE_SUBVNIC, /* subvnic resource type */
+ RES_TYPE_MQ_WQ, /* MQ Work queues */
+ RES_TYPE_MQ_RQ, /* MQ Receive queues */
+ RES_TYPE_MQ_CQ, /* MQ Completion queues */
+ RES_TYPE_DEPRECATED1, /* Old version of devcmd 2 */
+ RES_TYPE_DEVCMD2, /* Device control region */
+ RES_TYPE_MAX, /* Count of resource types */
+};
+
+struct vnic_resource_header {
+ u32 magic;
+ u32 version;
+};
+
+struct mgmt_barmap_hdr {
+ u32 magic; /* magic number */
+ u32 version; /* header format version */
+ u16 lif; /* loopback lif for mgmt frames */
+ u16 pci_slot; /* installed pci slot */
+ char serial[16]; /* card serial number */
+};
+
+struct vnic_resource {
+ u8 type;
+ u8 bar;
+ u8 pad[2];
+ u32 bar_offset;
+ u32 count;
+};
+
+#endif /* _VNIC_RESOURCE_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_rq.c b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_rq.c
new file mode 100755
index 00000000..3a4b65ab
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_rq.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ident "$Id: vnic_rq.c 171146 2014-05-02 07:08:20Z ssujith $"
+
+#include "vnic_dev.h"
+#include "vnic_rq.h"
+
+static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
+{
+ struct vnic_rq_buf *buf;
+ unsigned int i, j, count = rq->ring.desc_count;
+ unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
+
+ for (i = 0; i < blks; i++) {
+ rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC);
+ if (!rq->bufs[i])
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < blks; i++) {
+ buf = rq->bufs[i];
+ for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES(count); j++) {
+ buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES(count) + j;
+ buf->desc = (u8 *)rq->ring.descs +
+ rq->ring.desc_size * buf->index;
+ if (buf->index + 1 == count) {
+ buf->next = rq->bufs[0];
+ break;
+ } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES(count)) {
+ buf->next = rq->bufs[i + 1];
+ } else {
+ buf->next = buf + 1;
+ buf++;
+ }
+ }
+ }
+
+ rq->to_use = rq->to_clean = rq->bufs[0];
+
+ return 0;
+}
+
+int vnic_rq_mem_size(struct vnic_rq *rq, unsigned int desc_count,
+ unsigned int desc_size)
+{
+ int mem_size = 0;
+
+ mem_size += vnic_dev_desc_ring_size(&rq->ring, desc_count, desc_size);
+
+ mem_size += VNIC_RQ_BUF_BLKS_NEEDED(rq->ring.desc_count) *
+ VNIC_RQ_BUF_BLK_SZ(rq->ring.desc_count);
+
+ return mem_size;
+}
+
+void vnic_rq_free(struct vnic_rq *rq)
+{
+ struct vnic_dev *vdev;
+ unsigned int i;
+
+ vdev = rq->vdev;
+
+ vnic_dev_free_desc_ring(vdev, &rq->ring);
+
+ for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
+ if (rq->bufs[i]) {
+ kfree(rq->bufs[i]);
+ rq->bufs[i] = NULL;
+ }
+ }
+
+ rq->ctrl = NULL;
+}
+
+int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ int err;
+ char res_name[NAME_MAX];
+ static int instance;
+
+ rq->index = index;
+ rq->vdev = vdev;
+
+ rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
+ if (!rq->ctrl) {
+ pr_err("Failed to hook RQ[%d] resource\n", index);
+ return -EINVAL;
+ }
+
+ vnic_rq_disable(rq);
+
+ snprintf(res_name, sizeof(res_name), "%d-rq-%d", instance++, index);
+ err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size,
+ rq->socket_id, res_name);
+ if (err)
+ return err;
+
+ err = vnic_rq_alloc_bufs(rq);
+ if (err) {
+ vnic_rq_free(rq);
+ return err;
+ }
+
+ return 0;
+}
+
+void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
+ unsigned int fetch_index, unsigned int posted_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset)
+{
+ u64 paddr;
+ unsigned int count = rq->ring.desc_count;
+
+ paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
+ writeq(paddr, &rq->ctrl->ring_base);
+ iowrite32(count, &rq->ctrl->ring_size);
+ iowrite32(cq_index, &rq->ctrl->cq_index);
+ iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
+ iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
+ iowrite32(0, &rq->ctrl->dropped_packet_count);
+ iowrite32(0, &rq->ctrl->error_status);
+ iowrite32(fetch_index, &rq->ctrl->fetch_index);
+ iowrite32(posted_index, &rq->ctrl->posted_index);
+
+ rq->to_use = rq->to_clean =
+ &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)]
+ [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
+}
+
+void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset)
+{
+ u32 fetch_index = 0;
+ /* Use current fetch_index as the ring starting point */
+ fetch_index = ioread32(&rq->ctrl->fetch_index);
+
+ if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
+ /* Hardware surprise removal: reset fetch_index */
+ fetch_index = 0;
+ }
+
+ vnic_rq_init_start(rq, cq_index,
+ fetch_index, fetch_index,
+ error_interrupt_enable,
+ error_interrupt_offset);
+}
+
+void vnic_rq_error_out(struct vnic_rq *rq, unsigned int error)
+{
+ iowrite32(error, &rq->ctrl->error_status);
+}
+
+unsigned int vnic_rq_error_status(struct vnic_rq *rq)
+{
+ return ioread32(&rq->ctrl->error_status);
+}
+
+void vnic_rq_enable(struct vnic_rq *rq)
+{
+ iowrite32(1, &rq->ctrl->enable);
+}
+
+int vnic_rq_disable(struct vnic_rq *rq)
+{
+ unsigned int wait;
+
+ iowrite32(0, &rq->ctrl->enable);
+
+ /* Wait for HW to ACK disable request */
+ for (wait = 0; wait < 1000; wait++) {
+ if (!(ioread32(&rq->ctrl->running)))
+ return 0;
+ udelay(10);
+ }
+
+ pr_err("Failed to disable RQ[%d]\n", rq->index);
+
+ return -ETIMEDOUT;
+}
+
+void vnic_rq_clean(struct vnic_rq *rq,
+ void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
+{
+ struct vnic_rq_buf *buf;
+ u32 fetch_index;
+ unsigned int count = rq->ring.desc_count;
+
+ buf = rq->to_clean;
+
+ while (vnic_rq_desc_used(rq) > 0) {
+
+ (*buf_clean)(rq, buf);
+
+ buf = rq->to_clean = buf->next;
+ rq->ring.desc_avail++;
+ }
+
+ /* Use current fetch_index as the ring starting point */
+ fetch_index = ioread32(&rq->ctrl->fetch_index);
+
+ if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
+ /* Hardware surprise removal: reset fetch_index */
+ fetch_index = 0;
+ }
+ rq->to_use = rq->to_clean =
+ &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)]
+ [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
+ iowrite32(fetch_index, &rq->ctrl->posted_index);
+
+ vnic_dev_clear_desc_ring(&rq->ring);
+}
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_rq.h b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_rq.h
new file mode 100755
index 00000000..54b66123
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_rq.h
@@ -0,0 +1,282 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ident "$Id: vnic_rq.h 180262 2014-07-02 07:57:43Z gvaradar $"
+
+#ifndef _VNIC_RQ_H_
+#define _VNIC_RQ_H_
+
+
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+/* Receive queue control */
+struct vnic_rq_ctrl {
+ u64 ring_base; /* 0x00 */
+ u32 ring_size; /* 0x08 */
+ u32 pad0;
+ u32 posted_index; /* 0x10 */
+ u32 pad1;
+ u32 cq_index; /* 0x18 */
+ u32 pad2;
+ u32 enable; /* 0x20 */
+ u32 pad3;
+ u32 running; /* 0x28 */
+ u32 pad4;
+ u32 fetch_index; /* 0x30 */
+ u32 pad5;
+ u32 error_interrupt_enable; /* 0x38 */
+ u32 pad6;
+ u32 error_interrupt_offset; /* 0x40 */
+ u32 pad7;
+ u32 error_status; /* 0x48 */
+ u32 pad8;
+ u32 dropped_packet_count; /* 0x50 */
+ u32 pad9;
+ u32 dropped_packet_count_rc; /* 0x58 */
+ u32 pad10;
+};
+
+/* Break the vnic_rq_buf allocations into blocks of 32/64 entries */
+#define VNIC_RQ_BUF_MIN_BLK_ENTRIES 32
+#define VNIC_RQ_BUF_DFLT_BLK_ENTRIES 64
+#define VNIC_RQ_BUF_BLK_ENTRIES(entries) \
+ ((unsigned int)((entries < VNIC_RQ_BUF_DFLT_BLK_ENTRIES) ? \
+ VNIC_RQ_BUF_MIN_BLK_ENTRIES : VNIC_RQ_BUF_DFLT_BLK_ENTRIES))
+#define VNIC_RQ_BUF_BLK_SZ(entries) \
+ (VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf))
+#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
+ DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries))
+#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
+
+struct vnic_rq_buf {
+ struct vnic_rq_buf *next;
+ dma_addr_t dma_addr;
+ void *os_buf;
+ unsigned int os_buf_index;
+ unsigned int len;
+ unsigned int index;
+ void *desc;
+ uint64_t wr_id;
+};
+
+struct vnic_rq {
+ unsigned int index;
+ struct vnic_dev *vdev;
+ struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */
+ struct vnic_dev_ring ring;
+ struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX];
+ struct vnic_rq_buf *to_use;
+ struct vnic_rq_buf *to_clean;
+ void *os_buf_head;
+ unsigned int pkts_outstanding;
+
+ unsigned int socket_id;
+ struct rte_mempool *mp;
+};
+
+static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
+{
+ /* how many does SW own? */
+ return rq->ring.desc_avail;
+}
+
+static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
+{
+ /* how many does HW own? */
+ return rq->ring.desc_count - rq->ring.desc_avail - 1;
+}
+
+static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
+{
+ return rq->to_use->desc;
+}
+
+static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
+{
+ return rq->to_use->index;
+}
+
+static inline void vnic_rq_post(struct vnic_rq *rq,
+ void *os_buf, unsigned int os_buf_index,
+ dma_addr_t dma_addr, unsigned int len,
+ uint64_t wrid)
+{
+ struct vnic_rq_buf *buf = rq->to_use;
+
+ buf->os_buf = os_buf;
+ buf->os_buf_index = os_buf_index;
+ buf->dma_addr = dma_addr;
+ buf->len = len;
+ buf->wr_id = wrid;
+
+ buf = buf->next;
+ rq->to_use = buf;
+ rq->ring.desc_avail--;
+
+ /* Move the posted_index every nth descriptor
+ */
+
+#ifndef VNIC_RQ_RETURN_RATE
+#define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */
+#endif
+
+ if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
+ /* Adding write memory barrier prevents compiler and/or CPU
+ * reordering, thus avoiding descriptor posting before
+ * descriptor is initialized. Otherwise, hardware can read
+ * stale descriptor fields.
+ */
+ wmb();
+ iowrite32(buf->index, &rq->ctrl->posted_index);
+ }
+}
+
+static inline void vnic_rq_post_commit(struct vnic_rq *rq,
+ void *os_buf, unsigned int os_buf_index,
+ dma_addr_t dma_addr, unsigned int len)
+{
+ struct vnic_rq_buf *buf = rq->to_use;
+
+ buf->os_buf = os_buf;
+ buf->os_buf_index = os_buf_index;
+ buf->dma_addr = dma_addr;
+ buf->len = len;
+
+ buf = buf->next;
+ rq->to_use = buf;
+ rq->ring.desc_avail--;
+
+ /* Move the posted_index every descriptor
+ */
+
+ /* Adding write memory barrier prevents compiler and/or CPU
+ * reordering, thus avoiding descriptor posting before
+ * descriptor is initialized. Otherwise, hardware can read
+ * stale descriptor fields.
+ */
+ wmb();
+ iowrite32(buf->index, &rq->ctrl->posted_index);
+}
+
+static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
+{
+ rq->ring.desc_avail += count;
+}
+
+enum desc_return_options {
+ VNIC_RQ_RETURN_DESC,
+ VNIC_RQ_DEFER_RETURN_DESC,
+};
+
+static inline int vnic_rq_service(struct vnic_rq *rq,
+ struct cq_desc *cq_desc, u16 completed_index,
+ int desc_return, int (*buf_service)(struct vnic_rq *rq,
+ struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
+ int skipped, void *opaque), void *opaque)
+{
+ struct vnic_rq_buf *buf;
+ int skipped;
+ int eop = 0;
+
+ buf = rq->to_clean;
+ while (1) {
+
+ skipped = (buf->index != completed_index);
+
+ if ((*buf_service)(rq, cq_desc, buf, skipped, opaque))
+ eop++;
+
+ if (desc_return == VNIC_RQ_RETURN_DESC)
+ rq->ring.desc_avail++;
+
+ rq->to_clean = buf->next;
+
+ if (!skipped)
+ break;
+
+ buf = rq->to_clean;
+ }
+ return eop;
+}
+
+static inline int vnic_rq_fill(struct vnic_rq *rq,
+ int (*buf_fill)(struct vnic_rq *rq))
+{
+ int err;
+
+ while (vnic_rq_desc_avail(rq) > 0) {
+
+ err = (*buf_fill)(rq);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static inline int vnic_rq_fill_count(struct vnic_rq *rq,
+ int (*buf_fill)(struct vnic_rq *rq), unsigned int count)
+{
+ int err;
+
+ while ((vnic_rq_desc_avail(rq) > 0) && (count--)) {
+
+ err = (*buf_fill)(rq);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+void vnic_rq_free(struct vnic_rq *rq);
+int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
+ unsigned int desc_count, unsigned int desc_size);
+void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
+ unsigned int fetch_index, unsigned int posted_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset);
+void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset);
+void vnic_rq_error_out(struct vnic_rq *rq, unsigned int error);
+unsigned int vnic_rq_error_status(struct vnic_rq *rq);
+void vnic_rq_enable(struct vnic_rq *rq);
+int vnic_rq_disable(struct vnic_rq *rq);
+void vnic_rq_clean(struct vnic_rq *rq,
+ void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
+int vnic_rq_mem_size(struct vnic_rq *rq, unsigned int desc_count,
+ unsigned int desc_size);
+
+#endif /* _VNIC_RQ_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_rss.c b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_rss.c
new file mode 100755
index 00000000..5ff76b14
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_rss.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ident "$Id$"
+
+#include "enic_compat.h"
+#include "vnic_rss.h"
+
+void vnic_set_rss_key(union vnic_rss_key *rss_key, u8 *key)
+{
+ u32 i;
+ u32 *p;
+ u16 *q;
+
+ for (i = 0; i < 4; ++i) {
+ p = (u32 *)(key + (10 * i));
+ iowrite32(*p++, &rss_key->key[i].b[0]);
+ iowrite32(*p++, &rss_key->key[i].b[4]);
+ q = (u16 *)p;
+ iowrite32(*q, &rss_key->key[i].b[8]);
+ }
+}
+
+void vnic_set_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu)
+{
+ u32 i;
+ u32 *p = (u32 *)cpu;
+
+ for (i = 0; i < 32; ++i)
+ iowrite32(*p++, &rss_cpu->cpu[i].b[0]);
+}
+
+void vnic_get_rss_key(union vnic_rss_key *rss_key, u8 *key)
+{
+ u32 i;
+ u32 *p;
+ u16 *q;
+
+ for (i = 0; i < 4; ++i) {
+ p = (u32 *)(key + (10 * i));
+ *p++ = ioread32(&rss_key->key[i].b[0]);
+ *p++ = ioread32(&rss_key->key[i].b[4]);
+ q = (u16 *)p;
+ *q = (u16)ioread32(&rss_key->key[i].b[8]);
+ }
+}
+
+void vnic_get_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu)
+{
+ u32 i;
+ u32 *p = (u32 *)cpu;
+
+ for (i = 0; i < 32; ++i)
+ *p++ = ioread32(&rss_cpu->cpu[i].b[0]);
+}
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_rss.h b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_rss.h
new file mode 100755
index 00000000..45ed3d2a
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_rss.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ident "$Id: vnic_rss.h 64224 2010-11-09 19:43:13Z vkolluri $"
+
+#ifndef _VNIC_RSS_H_
+#define _VNIC_RSS_H_
+
+/* RSS key array */
+union vnic_rss_key {
+ struct {
+ u8 b[10];
+ u8 b_pad[6];
+ } key[4];
+ u64 raw[8];
+};
+
+/* RSS cpu array */
+union vnic_rss_cpu {
+ struct {
+ u8 b[4];
+ u8 b_pad[4];
+ } cpu[32];
+ u64 raw[32];
+};
+
+void vnic_set_rss_key(union vnic_rss_key *rss_key, u8 *key);
+void vnic_set_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu);
+void vnic_get_rss_key(union vnic_rss_key *rss_key, u8 *key);
+void vnic_get_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu);
+
+#endif /* _VNIC_RSS_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_stats.h b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_stats.h
new file mode 100755
index 00000000..ac5aa722
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_stats.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ident "$Id: vnic_stats.h 84040 2011-08-09 23:38:43Z dwang2 $"
+
+#ifndef _VNIC_STATS_H_
+#define _VNIC_STATS_H_
+
+/* Tx statistics */
+struct vnic_tx_stats {
+ u64 tx_frames_ok;
+ u64 tx_unicast_frames_ok;
+ u64 tx_multicast_frames_ok;
+ u64 tx_broadcast_frames_ok;
+ u64 tx_bytes_ok;
+ u64 tx_unicast_bytes_ok;
+ u64 tx_multicast_bytes_ok;
+ u64 tx_broadcast_bytes_ok;
+ u64 tx_drops;
+ u64 tx_errors;
+ u64 tx_tso;
+ u64 rsvd[16];
+};
+
+/* Rx statistics */
+struct vnic_rx_stats {
+ u64 rx_frames_ok;
+ u64 rx_frames_total;
+ u64 rx_unicast_frames_ok;
+ u64 rx_multicast_frames_ok;
+ u64 rx_broadcast_frames_ok;
+ u64 rx_bytes_ok;
+ u64 rx_unicast_bytes_ok;
+ u64 rx_multicast_bytes_ok;
+ u64 rx_broadcast_bytes_ok;
+ u64 rx_drop;
+ u64 rx_no_bufs;
+ u64 rx_errors;
+ u64 rx_rss;
+ u64 rx_crc_errors;
+ u64 rx_frames_64;
+ u64 rx_frames_127;
+ u64 rx_frames_255;
+ u64 rx_frames_511;
+ u64 rx_frames_1023;
+ u64 rx_frames_1518;
+ u64 rx_frames_to_max;
+ u64 rsvd[16];
+};
+
+struct vnic_stats {
+ struct vnic_tx_stats tx;
+ struct vnic_rx_stats rx;
+};
+
+#endif /* _VNIC_STATS_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_wq.c b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_wq.c
new file mode 100755
index 00000000..e52cef0b
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_wq.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ident "$Id: vnic_wq.c 183023 2014-07-22 23:47:25Z xuywang $"
+
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+
+static inline
+int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq,
+ unsigned int index, enum vnic_res_type res_type)
+{
+ wq->ctrl = vnic_dev_get_res(vdev, res_type, index);
+ if (!wq->ctrl)
+ return -EINVAL;
+ return 0;
+}
+
+static inline
+int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ char res_name[NAME_MAX];
+ static int instance;
+
+ snprintf(res_name, sizeof(res_name), "%d-wq-%d", instance++, wq->index);
+ return vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size,
+ wq->socket_id, res_name);
+}
+
+static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
+{
+ struct vnic_wq_buf *buf;
+ unsigned int i, j, count = wq->ring.desc_count;
+ unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
+
+ for (i = 0; i < blks; i++) {
+ wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC);
+ if (!wq->bufs[i])
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < blks; i++) {
+ buf = wq->bufs[i];
+ for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES(count); j++) {
+ buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES(count) + j;
+ buf->desc = (u8 *)wq->ring.descs +
+ wq->ring.desc_size * buf->index;
+ if (buf->index + 1 == count) {
+ buf->next = wq->bufs[0];
+ break;
+ } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) {
+ buf->next = wq->bufs[i + 1];
+ } else {
+ buf->next = buf + 1;
+ buf++;
+ }
+ }
+ }
+
+ wq->to_use = wq->to_clean = wq->bufs[0];
+
+ return 0;
+}
+
+void vnic_wq_free(struct vnic_wq *wq)
+{
+ struct vnic_dev *vdev;
+ unsigned int i;
+
+ vdev = wq->vdev;
+
+ vnic_dev_free_desc_ring(vdev, &wq->ring);
+
+ for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
+ if (wq->bufs[i]) {
+ kfree(wq->bufs[i]);
+ wq->bufs[i] = NULL;
+ }
+ }
+
+ wq->ctrl = NULL;
+}
+
+int vnic_wq_mem_size(struct vnic_wq *wq, unsigned int desc_count,
+ unsigned int desc_size)
+{
+ int mem_size = 0;
+
+ mem_size += vnic_dev_desc_ring_size(&wq->ring, desc_count, desc_size);
+
+ mem_size += VNIC_WQ_BUF_BLKS_NEEDED(wq->ring.desc_count) *
+ VNIC_WQ_BUF_BLK_SZ(wq->ring.desc_count);
+
+ return mem_size;
+}
+
+
+int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ int err;
+
+ wq->index = index;
+ wq->vdev = vdev;
+
+ err = vnic_wq_get_ctrl(vdev, wq, index, RES_TYPE_WQ);
+ if (err) {
+ pr_err("Failed to hook WQ[%d] resource, err %d\n", index, err);
+ return err;
+ }
+
+ vnic_wq_disable(wq);
+
+ err = vnic_wq_alloc_ring(vdev, wq, desc_count, desc_size);
+ if (err)
+ return err;
+
+ err = vnic_wq_alloc_bufs(wq);
+ if (err) {
+ vnic_wq_free(wq);
+ return err;
+ }
+
+ return 0;
+}
+
+void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int fetch_index, unsigned int posted_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset)
+{
+ u64 paddr;
+ unsigned int count = wq->ring.desc_count;
+
+ paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
+ writeq(paddr, &wq->ctrl->ring_base);
+ iowrite32(count, &wq->ctrl->ring_size);
+ iowrite32(fetch_index, &wq->ctrl->fetch_index);
+ iowrite32(posted_index, &wq->ctrl->posted_index);
+ iowrite32(cq_index, &wq->ctrl->cq_index);
+ iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
+ iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
+ iowrite32(0, &wq->ctrl->error_status);
+
+ wq->to_use = wq->to_clean =
+ &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)]
+ [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)];
+}
+
+void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset)
+{
+ vnic_wq_init_start(wq, cq_index, 0, 0,
+ error_interrupt_enable,
+ error_interrupt_offset);
+}
+
+void vnic_wq_error_out(struct vnic_wq *wq, unsigned int error)
+{
+ iowrite32(error, &wq->ctrl->error_status);
+}
+
+unsigned int vnic_wq_error_status(struct vnic_wq *wq)
+{
+ return ioread32(&wq->ctrl->error_status);
+}
+
+void vnic_wq_enable(struct vnic_wq *wq)
+{
+ iowrite32(1, &wq->ctrl->enable);
+}
+
+int vnic_wq_disable(struct vnic_wq *wq)
+{
+ unsigned int wait;
+
+ iowrite32(0, &wq->ctrl->enable);
+
+ /* Wait for HW to ACK disable request */
+ for (wait = 0; wait < 1000; wait++) {
+ if (!(ioread32(&wq->ctrl->running)))
+ return 0;
+ udelay(10);
+ }
+
+ pr_err("Failed to disable WQ[%d]\n", wq->index);
+
+ return -ETIMEDOUT;
+}
+
+void vnic_wq_clean(struct vnic_wq *wq,
+ void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
+{
+ struct vnic_wq_buf *buf;
+
+ buf = wq->to_clean;
+
+ while (vnic_wq_desc_used(wq) > 0) {
+
+ (*buf_clean)(wq, buf);
+
+ buf = wq->to_clean = buf->next;
+ wq->ring.desc_avail++;
+ }
+
+ wq->to_use = wq->to_clean = wq->bufs[0];
+
+ iowrite32(0, &wq->ctrl->fetch_index);
+ iowrite32(0, &wq->ctrl->posted_index);
+ iowrite32(0, &wq->ctrl->error_status);
+
+ vnic_dev_clear_desc_ring(&wq->ring);
+}
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_wq.h b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_wq.h
new file mode 100755
index 00000000..f8219ad5
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_enic/vnic/vnic_wq.h
@@ -0,0 +1,283 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ident "$Id: vnic_wq.h 183023 2014-07-22 23:47:25Z xuywang $"
+
+#ifndef _VNIC_WQ_H_
+#define _VNIC_WQ_H_
+
+
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+/* Work queue control */
+struct vnic_wq_ctrl {
+ u64 ring_base; /* 0x00 */
+ u32 ring_size; /* 0x08 */
+ u32 pad0;
+ u32 posted_index; /* 0x10 */
+ u32 pad1;
+ u32 cq_index; /* 0x18 */
+ u32 pad2;
+ u32 enable; /* 0x20 */
+ u32 pad3;
+ u32 running; /* 0x28 */
+ u32 pad4;
+ u32 fetch_index; /* 0x30 */
+ u32 pad5;
+ u32 dca_value; /* 0x38 */
+ u32 pad6;
+ u32 error_interrupt_enable; /* 0x40 */
+ u32 pad7;
+ u32 error_interrupt_offset; /* 0x48 */
+ u32 pad8;
+ u32 error_status; /* 0x50 */
+ u32 pad9;
+};
+
+struct vnic_wq_buf {
+ struct vnic_wq_buf *next;
+ dma_addr_t dma_addr;
+ void *os_buf;
+ unsigned int len;
+ unsigned int index;
+ int sop;
+ void *desc;
+ uint64_t wr_id; /* Cookie */
+ uint8_t cq_entry; /* Gets completion event from hw */
+ uint8_t desc_skip_cnt; /* Num descs to occupy */
+ uint8_t compressed_send; /* Both hdr and payload in one desc */
+};
+
+/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */
+#define VNIC_WQ_BUF_MIN_BLK_ENTRIES 32
+#define VNIC_WQ_BUF_DFLT_BLK_ENTRIES 64
+#define VNIC_WQ_BUF_BLK_ENTRIES(entries) \
+ ((unsigned int)((entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \
+ VNIC_WQ_BUF_MIN_BLK_ENTRIES : VNIC_WQ_BUF_DFLT_BLK_ENTRIES))
+#define VNIC_WQ_BUF_BLK_SZ(entries) \
+ (VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf))
+#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
+ DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries))
+#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
+
+struct vnic_wq {
+ unsigned int index;
+ struct vnic_dev *vdev;
+ struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
+ struct vnic_dev_ring ring;
+ struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX];
+ struct vnic_wq_buf *to_use;
+ struct vnic_wq_buf *to_clean;
+ unsigned int pkts_outstanding;
+ unsigned int socket_id;
+};
+
+static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
+{
+ /* how many does SW own? */
+ return wq->ring.desc_avail;
+}
+
+static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq)
+{
+ /* how many does HW own? */
+ return wq->ring.desc_count - wq->ring.desc_avail - 1;
+}
+
+static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
+{
+ return wq->to_use->desc;
+}
+
+#define PI_LOG2_CACHE_LINE_SIZE 5
+#define PI_INDEX_BITS 12
+#define PI_INDEX_MASK ((1U << PI_INDEX_BITS) - 1)
+#define PI_PREFETCH_LEN_MASK ((1U << PI_LOG2_CACHE_LINE_SIZE) - 1)
+#define PI_PREFETCH_LEN_OFF 16
+#define PI_PREFETCH_ADDR_BITS 43
+#define PI_PREFETCH_ADDR_MASK ((1ULL << PI_PREFETCH_ADDR_BITS) - 1)
+#define PI_PREFETCH_ADDR_OFF 21
+
+/** How many cache lines are touched by buffer (addr, len). */
+static inline unsigned int num_cache_lines_touched(dma_addr_t addr,
+ unsigned int len)
+{
+ const unsigned long mask = PI_PREFETCH_LEN_MASK;
+ const unsigned long laddr = (unsigned long)addr;
+ unsigned long lines, equiv_len;
+ /* A. If addr is aligned, our solution is just to round up len to the
+ next boundary.
+
+ e.g. addr = 0, len = 48
+ +--------------------+
+ |XXXXXXXXXXXXXXXXXXXX| 32-byte cacheline a
+ +--------------------+
+ |XXXXXXXXXX | cacheline b
+ +--------------------+
+
+ B. If addr is not aligned, however, we may use an extra
+ cacheline. e.g. addr = 12, len = 22
+
+ +--------------------+
+ | XXXXXXXXXXXXX|
+ +--------------------+
+ |XX |
+ +--------------------+
+
+ Our solution is to make the problem equivalent to case A
+ above by adding the empty space in the first cacheline to the length:
+ unsigned long len;
+
+ +--------------------+
+ |eeeeeeeXXXXXXXXXXXXX| "e" is empty space, which we add to len
+ +--------------------+
+ |XX |
+ +--------------------+
+
+ */
+ equiv_len = len + (laddr & mask);
+
+ /* Now we can just round up this len to the next 32-byte boundary. */
+ lines = (equiv_len + mask) & (~mask);
+
+ /* Scale bytes -> cachelines. */
+ return lines >> PI_LOG2_CACHE_LINE_SIZE;
+}
+
+static inline u64 vnic_cached_posted_index(dma_addr_t addr, unsigned int len,
+ unsigned int index)
+{
+ unsigned int num_cache_lines = num_cache_lines_touched(addr, len);
+ /* Wish we could avoid a branch here. We could have separate
+ * vnic_wq_post() and vinc_wq_post_inline(), the latter
+ * only supporting < 1k (2^5 * 2^5) sends, I suppose. This would
+ * eliminate the if (eop) branch as well.
+ */
+ if (num_cache_lines > PI_PREFETCH_LEN_MASK)
+ num_cache_lines = 0;
+ return (index & PI_INDEX_MASK) |
+ ((num_cache_lines & PI_PREFETCH_LEN_MASK) << PI_PREFETCH_LEN_OFF) |
+ (((addr >> PI_LOG2_CACHE_LINE_SIZE) &
+ PI_PREFETCH_ADDR_MASK) << PI_PREFETCH_ADDR_OFF);
+}
+
+static inline void vnic_wq_post(struct vnic_wq *wq,
+ void *os_buf, dma_addr_t dma_addr,
+ unsigned int len, int sop, int eop,
+ uint8_t desc_skip_cnt, uint8_t cq_entry,
+ uint8_t compressed_send, uint64_t wrid)
+{
+ struct vnic_wq_buf *buf = wq->to_use;
+
+ buf->sop = sop;
+ buf->cq_entry = cq_entry;
+ buf->compressed_send = compressed_send;
+ buf->desc_skip_cnt = desc_skip_cnt;
+ buf->os_buf = os_buf;
+ buf->dma_addr = dma_addr;
+ buf->len = len;
+ buf->wr_id = wrid;
+
+ buf = buf->next;
+ if (eop) {
+#ifdef DO_PREFETCH
+ uint64_t wr = vnic_cached_posted_index(dma_addr, len,
+ buf->index);
+#endif
+ /* Adding write memory barrier prevents compiler and/or CPU
+ * reordering, thus avoiding descriptor posting before
+ * descriptor is initialized. Otherwise, hardware can read
+ * stale descriptor fields.
+ */
+ wmb();
+#ifdef DO_PREFETCH
+ /* Intel chipsets seem to limit the rate of PIOs that we can
+ * push on the bus. Thus, it is very important to do a single
+ * 64 bit write here. With two 32-bit writes, my maximum
+ * pkt/sec rate was cut almost in half. -AJF
+ */
+ iowrite64((uint64_t)wr, &wq->ctrl->posted_index);
+#else
+ iowrite32(buf->index, &wq->ctrl->posted_index);
+#endif
+ }
+ wq->to_use = buf;
+
+ wq->ring.desc_avail -= desc_skip_cnt;
+}
+
+static inline void vnic_wq_service(struct vnic_wq *wq,
+ struct cq_desc *cq_desc, u16 completed_index,
+ void (*buf_service)(struct vnic_wq *wq,
+ struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque),
+ void *opaque)
+{
+ struct vnic_wq_buf *buf;
+
+ buf = wq->to_clean;
+ while (1) {
+
+ (*buf_service)(wq, cq_desc, buf, opaque);
+
+ wq->ring.desc_avail++;
+
+ wq->to_clean = buf->next;
+
+ if (buf->index == completed_index)
+ break;
+
+ buf = wq->to_clean;
+ }
+}
+
+void vnic_wq_free(struct vnic_wq *wq);
+int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
+ unsigned int desc_count, unsigned int desc_size);
+void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int fetch_index, unsigned int posted_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset);
+void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset);
+void vnic_wq_error_out(struct vnic_wq *wq, unsigned int error);
+unsigned int vnic_wq_error_status(struct vnic_wq *wq);
+void vnic_wq_enable(struct vnic_wq *wq);
+int vnic_wq_disable(struct vnic_wq *wq);
+void vnic_wq_clean(struct vnic_wq *wq,
+ void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
+int vnic_wq_mem_size(struct vnic_wq *wq, unsigned int desc_count,
+ unsigned int desc_size);
+
+#endif /* _VNIC_WQ_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_enic/vnic/wq_enet_desc.h b/src/dpdk_lib18/librte_pmd_enic/vnic/wq_enet_desc.h
new file mode 100755
index 00000000..ff2b7680
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_enic/vnic/wq_enet_desc.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ident "$Id: wq_enet_desc.h 59839 2010-09-27 20:36:31Z roprabhu $"
+
+#ifndef _WQ_ENET_DESC_H_
+#define _WQ_ENET_DESC_H_
+
+/* Ethernet work queue descriptor: 16B */
+struct wq_enet_desc {
+ __le64 address;
+ __le16 length;
+ __le16 mss_loopback;
+ __le16 header_length_flags;
+ __le16 vlan_tag;
+};
+
+#define WQ_ENET_ADDR_BITS 64
+#define WQ_ENET_LEN_BITS 14
+#define WQ_ENET_LEN_MASK ((1 << WQ_ENET_LEN_BITS) - 1)
+#define WQ_ENET_MSS_BITS 14
+#define WQ_ENET_MSS_MASK ((1 << WQ_ENET_MSS_BITS) - 1)
+#define WQ_ENET_MSS_SHIFT 2
+#define WQ_ENET_LOOPBACK_SHIFT 1
+#define WQ_ENET_HDRLEN_BITS 10
+#define WQ_ENET_HDRLEN_MASK ((1 << WQ_ENET_HDRLEN_BITS) - 1)
+#define WQ_ENET_FLAGS_OM_BITS 2
+#define WQ_ENET_FLAGS_OM_MASK ((1 << WQ_ENET_FLAGS_OM_BITS) - 1)
+#define WQ_ENET_FLAGS_EOP_SHIFT 12
+#define WQ_ENET_FLAGS_CQ_ENTRY_SHIFT 13
+#define WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT 14
+#define WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT 15
+
+#define WQ_ENET_OFFLOAD_MODE_CSUM 0
+#define WQ_ENET_OFFLOAD_MODE_RESERVED 1
+#define WQ_ENET_OFFLOAD_MODE_CSUM_L4 2
+#define WQ_ENET_OFFLOAD_MODE_TSO 3
+
+static inline void wq_enet_desc_enc(struct wq_enet_desc *desc,
+ u64 address, u16 length, u16 mss, u16 header_length,
+ u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap,
+ u8 vlan_tag_insert, u16 vlan_tag, u8 loopback)
+{
+ desc->address = cpu_to_le64(address);
+ desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK);
+ desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) <<
+ WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT);
+ desc->header_length_flags = cpu_to_le16(
+ (header_length & WQ_ENET_HDRLEN_MASK) |
+ (offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS |
+ (eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT |
+ (cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT |
+ (fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT |
+ (vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT);
+ desc->vlan_tag = cpu_to_le16(vlan_tag);
+}
+
+static inline void wq_enet_desc_dec(struct wq_enet_desc *desc,
+ u64 *address, u16 *length, u16 *mss, u16 *header_length,
+ u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap,
+ u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback)
+{
+ *address = le64_to_cpu(desc->address);
+ *length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK;
+ *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) &
+ WQ_ENET_MSS_MASK;
+ *loopback = (u8)((le16_to_cpu(desc->mss_loopback) >>
+ WQ_ENET_LOOPBACK_SHIFT) & 1);
+ *header_length = le16_to_cpu(desc->header_length_flags) &
+ WQ_ENET_HDRLEN_MASK;
+ *offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK);
+ *eop = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_FLAGS_EOP_SHIFT) & 1);
+ *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1);
+ *fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1);
+ *vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1);
+ *vlan_tag = le16_to_cpu(desc->vlan_tag);
+}
+
+#endif /* _WQ_ENET_DESC_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_i40e/Makefile b/src/dpdk_lib18/librte_pmd_i40e/Makefile
new file mode 100755
index 00000000..98e4bdf2
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_i40e/Makefile
@@ -0,0 +1,101 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_i40e.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+#
+# Add extra flags for base driver files (also known as shared code)
+# to disable warnings
+#
+ifeq ($(CC), icc)
+CFLAGS_BASE_DRIVER = -wd593
+else ifeq ($(CC), clang)
+CFLAGS_BASE_DRIVER += -Wno-sign-compare
+CFLAGS_BASE_DRIVER += -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-unused-parameter
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing
+CFLAGS_BASE_DRIVER += -Wno-format
+CFLAGS_BASE_DRIVER += -Wno-missing-field-initializers
+CFLAGS_BASE_DRIVER += -Wno-pointer-to-int-cast
+CFLAGS_BASE_DRIVER += -Wno-format-nonliteral
+else
+CFLAGS_BASE_DRIVER = -Wno-sign-compare
+CFLAGS_BASE_DRIVER += -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-unused-parameter
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing
+CFLAGS_BASE_DRIVER += -Wno-format
+CFLAGS_BASE_DRIVER += -Wno-missing-field-initializers
+CFLAGS_BASE_DRIVER += -Wno-pointer-to-int-cast
+CFLAGS_BASE_DRIVER += -Wno-format-nonliteral
+CFLAGS_BASE_DRIVER += -Wno-format-security
+
+ifeq ($(shell test $(GCC_MAJOR_VERSION) -ge 4 -a $(GCC_MINOR_VERSION) -ge 4 && echo 1), 1)
+CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable
+endif
+
+CFLAGS_i40e_lan_hmc.o += -Wno-error
+endif
+OBJS_BASE_DRIVER=$(patsubst %.c,%.o,$(notdir $(wildcard $(RTE_SDK)/lib/librte_pmd_i40e/i40e/*.c)))
+$(foreach obj, $(OBJS_BASE_DRIVER), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
+
+VPATH += $(RTE_SDK)/lib/librte_pmd_i40e/i40e
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_adminq.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_diag.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_hmc.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_lan_hmc.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_nvm.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_dcb.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev_vf.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_pf.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c
+
+# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_eal lib/librte_ether
+DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_mempool lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_net lib/librte_malloc
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_adminq.c b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_adminq.c
new file mode 100755
index 00000000..e098ed69
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_adminq.c
@@ -0,0 +1,1084 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "i40e_status.h"
+#include "i40e_type.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+
+#ifndef VF_DRIVER
+/**
+ * i40e_is_nvm_update_op - return true if this is an NVM update operation
+ * @desc: API request descriptor
+ **/
+STATIC INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
+{
+ return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase) ||
+ desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update));
+}
+
+#endif /* VF_DRIVER */
+/**
+ * i40e_adminq_init_regs - Initialize AdminQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_asq and alloc_arq functions have already been called
+ **/
+STATIC void i40e_adminq_init_regs(struct i40e_hw *hw)
+{
+ /* set head and tail registers in our local struct */
+ if (hw->mac.type == I40E_MAC_VF) {
+ hw->aq.asq.tail = I40E_VF_ATQT1;
+ hw->aq.asq.head = I40E_VF_ATQH1;
+ hw->aq.asq.len = I40E_VF_ATQLEN1;
+ hw->aq.asq.bal = I40E_VF_ATQBAL1;
+ hw->aq.asq.bah = I40E_VF_ATQBAH1;
+ hw->aq.arq.tail = I40E_VF_ARQT1;
+ hw->aq.arq.head = I40E_VF_ARQH1;
+ hw->aq.arq.len = I40E_VF_ARQLEN1;
+ hw->aq.arq.bal = I40E_VF_ARQBAL1;
+ hw->aq.arq.bah = I40E_VF_ARQBAH1;
+ } else {
+ hw->aq.asq.tail = I40E_PF_ATQT;
+ hw->aq.asq.head = I40E_PF_ATQH;
+ hw->aq.asq.len = I40E_PF_ATQLEN;
+ hw->aq.asq.bal = I40E_PF_ATQBAL;
+ hw->aq.asq.bah = I40E_PF_ATQBAH;
+ hw->aq.arq.tail = I40E_PF_ARQT;
+ hw->aq.arq.head = I40E_PF_ARQH;
+ hw->aq.arq.len = I40E_PF_ARQLEN;
+ hw->aq.arq.bal = I40E_PF_ARQBAL;
+ hw->aq.arq.bah = I40E_PF_ARQBAH;
+ }
+}
+
+/**
+ * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ **/
+enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code;
+
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
+ i40e_mem_atq_ring,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_aq_desc)),
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ return ret_code;
+
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_asq_cmd_details)));
+ if (ret_code) {
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+ return ret_code;
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ **/
+enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code;
+
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
+ i40e_mem_arq_ring,
+ (hw->aq.num_arq_entries *
+ sizeof(struct i40e_aq_desc)),
+ I40E_ADMINQ_DESC_ALIGNMENT);
+
+ return ret_code;
+}
+
+/**
+ * i40e_free_adminq_asq - Free Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted send buffers have already been cleaned
+ * and de-allocated
+ **/
+void i40e_free_adminq_asq(struct i40e_hw *hw)
+{
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+}
+
+/**
+ * i40e_free_adminq_arq - Free Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted receive buffers have already been cleaned
+ * and de-allocated
+ **/
+void i40e_free_adminq_arq(struct i40e_hw *hw)
+{
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+}
+
+/**
+ * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
+ * @hw: pointer to the hardware structure
+ **/
+STATIC enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code;
+ struct i40e_aq_desc *desc;
+ struct i40e_dma_mem *bi;
+ int i;
+
+ /* We'll be allocating the buffer info memory first, then we can
+ * allocate the mapped buffers for the event processing
+ */
+
+ /* buffer_info structures do not need alignment */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
+ (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
+ if (ret_code)
+ goto alloc_arq_bufs;
+ hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_arq_entries; i++) {
+ bi = &hw->aq.arq.r.arq_bi[i];
+ ret_code = i40e_allocate_dma_mem(hw, bi,
+ i40e_mem_arq_buf,
+ hw->aq.arq_buf_size,
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_arq_bufs;
+
+ /* now configure the descriptors for use */
+ desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
+
+ desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
+ desc->opcode = 0;
+ /* This is in accordance with Admin queue design, there is no
+ * register for buffer size configuration
+ */
+ desc->datalen = CPU_TO_LE16((u16)bi->size);
+ desc->retval = 0;
+ desc->cookie_high = 0;
+ desc->cookie_low = 0;
+ desc->params.external.addr_high =
+ CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
+ desc->params.external.addr_low =
+ CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
+ desc->params.external.param0 = 0;
+ desc->params.external.param1 = 0;
+ }
+
+alloc_arq_bufs:
+ return ret_code;
+
+unwind_alloc_arq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
+ * @hw: pointer to the hardware structure
+ **/
+STATIC enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code;
+ struct i40e_dma_mem *bi;
+ int i;
+
+ /* No mapped memory needed yet, just the buffer info structures */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
+ (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
+ if (ret_code)
+ goto alloc_asq_bufs;
+ hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_asq_entries; i++) {
+ bi = &hw->aq.asq.r.asq_bi[i];
+ ret_code = i40e_allocate_dma_mem(hw, bi,
+ i40e_mem_asq_buf,
+ hw->aq.asq_buf_size,
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_asq_bufs;
+ }
+alloc_asq_bufs:
+ return ret_code;
+
+unwind_alloc_asq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * i40e_free_arq_bufs - Free receive queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+STATIC void i40e_free_arq_bufs(struct i40e_hw *hw)
+{
+ int i;
+
+ /* free descriptors */
+ for (i = 0; i < hw->aq.num_arq_entries; i++)
+ i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+}
+
+/**
+ * i40e_free_asq_bufs - Free send queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+STATIC void i40e_free_asq_bufs(struct i40e_hw *hw)
+{
+ int i;
+
+ /* only unmap if the address is non-NULL */
+ for (i = 0; i < hw->aq.num_asq_entries; i++)
+ if (hw->aq.asq.r.asq_bi[i].pa)
+ i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+
+ /* free the buffer info list */
+ i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+}
+
+/**
+ * i40e_config_asq_regs - configure ASQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the transmit queue
+ **/
+STATIC enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u32 reg = 0;
+
+ /* Clear Head and Tail */
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+
+ /* set starting point */
+ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ I40E_PF_ATQLEN_ATQENABLE_MASK));
+ wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
+ wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
+
+ /* Check one register to verify that config was applied */
+ reg = rd32(hw, hw->aq.asq.bal);
+ if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
+}
+
+/**
+ * i40e_config_arq_regs - ARQ register configuration
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the receive (event queue)
+ **/
+STATIC enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u32 reg = 0;
+
+ /* Clear Head and Tail */
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+
+ /* set starting point */
+ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ I40E_PF_ARQLEN_ARQENABLE_MASK));
+ wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
+ wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
+
+ /* Update tail in the HW to post pre-allocated buffers */
+ wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+
+ /* Check one register to verify that config was applied */
+ reg = rd32(hw, hw->aq.arq.bal);
+ if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
+}
+
+/**
+ * i40e_init_asq - main initialization routine for ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * This is the main initialization routine for the Admin Send Queue
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (hw->aq.asq.count > 0) {
+ /* queue already initialized */
+ ret_code = I40E_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_asq_entries == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+ hw->aq.asq.count = hw->aq.num_asq_entries;
+
+ /* allocate the ring memory */
+ ret_code = i40e_alloc_adminq_asq_ring(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = i40e_alloc_asq_bufs(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ ret_code = i40e_config_asq_regs(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ i40e_free_adminq_asq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_init_arq - initialize ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main initialization routine for the Admin Receive (Event) Queue.
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (hw->aq.arq.count > 0) {
+ /* queue already initialized */
+ ret_code = I40E_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+ hw->aq.arq.count = hw->aq.num_arq_entries;
+
+ /* allocate the ring memory */
+ ret_code = i40e_alloc_adminq_arq_ring(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = i40e_alloc_arq_bufs(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ ret_code = i40e_config_arq_regs(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ i40e_free_adminq_arq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_asq - shutdown the ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Send Queue
+ **/
+enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (hw->aq.asq.count == 0)
+ return I40E_ERR_NOT_READY;
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+ wr32(hw, hw->aq.asq.len, 0);
+ wr32(hw, hw->aq.asq.bal, 0);
+ wr32(hw, hw->aq.asq.bah, 0);
+
+ /* make sure spinlock is available */
+ i40e_acquire_spinlock(&hw->aq.asq_spinlock);
+
+ hw->aq.asq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ i40e_free_asq_bufs(hw);
+
+ i40e_release_spinlock(&hw->aq.asq_spinlock);
+
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_arq - shutdown ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Receive Queue
+ **/
+enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (hw->aq.arq.count == 0)
+ return I40E_ERR_NOT_READY;
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+ wr32(hw, hw->aq.arq.len, 0);
+ wr32(hw, hw->aq.arq.bal, 0);
+ wr32(hw, hw->aq.arq.bah, 0);
+
+ /* make sure spinlock is available */
+ i40e_acquire_spinlock(&hw->aq.arq_spinlock);
+
+ hw->aq.arq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ i40e_free_arq_bufs(hw);
+
+ i40e_release_spinlock(&hw->aq.arq_spinlock);
+
+ return ret_code;
+}
+
+/**
+ * i40e_init_adminq - main initialization routine for Admin Queue
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.num_arq_entries
+ * - hw->aq.arq_buf_size
+ * - hw->aq.asq_buf_size
+ **/
+enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code;
+#ifndef VF_DRIVER
+ u16 eetrack_lo, eetrack_hi;
+ int retry = 0;
+#endif
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.num_asq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ /* initialize spin locks */
+ i40e_init_spinlock(&hw->aq.asq_spinlock);
+ i40e_init_spinlock(&hw->aq.arq_spinlock);
+
+ /* Set up register offsets */
+ i40e_adminq_init_regs(hw);
+
+ /* setup ASQ command write back timeout */
+ hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
+
+ /* allocate the ASQ */
+ ret_code = i40e_init_asq(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_destroy_spinlocks;
+
+ /* allocate the ARQ */
+ ret_code = i40e_init_arq(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_asq;
+
+#ifndef VF_DRIVER
+ /* There are some cases where the firmware may not be quite ready
+ * for AdminQ operations, so we retry the AdminQ setup a few times
+ * if we see timeouts in this first AQ call.
+ */
+ do {
+ ret_code = i40e_aq_get_firmware_version(hw,
+ &hw->aq.fw_maj_ver,
+ &hw->aq.fw_min_ver,
+ &hw->aq.api_maj_ver,
+ &hw->aq.api_min_ver,
+ NULL);
+ if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
+ break;
+ retry++;
+ i40e_msec_delay(100);
+ i40e_resume_aq(hw);
+ } while (retry < 10);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_arq;
+
+ /* get the NVM version info */
+ i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);
+ i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
+ i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
+ hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
+
+ if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
+ ret_code = I40E_ERR_FIRMWARE_API_VERSION;
+ goto init_adminq_free_arq;
+ }
+
+ /* pre-emptive resource lock release */
+ i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+ hw->aq.nvm_busy = false;
+
+ ret_code = i40e_aq_set_hmc_resource_profile(hw,
+ I40E_HMC_PROFILE_DEFAULT,
+ 0,
+ NULL);
+ ret_code = I40E_SUCCESS;
+
+#endif /* VF_DRIVER */
+ /* success! */
+ goto init_adminq_exit;
+
+#ifndef VF_DRIVER
+init_adminq_free_arq:
+ i40e_shutdown_arq(hw);
+#endif
+init_adminq_free_asq:
+ i40e_shutdown_asq(hw);
+init_adminq_destroy_spinlocks:
+ i40e_destroy_spinlock(&hw->aq.asq_spinlock);
+ i40e_destroy_spinlock(&hw->aq.arq_spinlock);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_adminq - shutdown routine for the Admin Queue
+ * @hw: pointer to the hardware structure
+ **/
+enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (i40e_check_asq_alive(hw))
+ i40e_aq_queue_shutdown(hw, true);
+
+ i40e_shutdown_asq(hw);
+ i40e_shutdown_arq(hw);
+
+ /* destroy the spinlocks */
+ i40e_destroy_spinlock(&hw->aq.asq_spinlock);
+ i40e_destroy_spinlock(&hw->aq.arq_spinlock);
+
+ return ret_code;
+}
+
+/**
+ * i40e_clean_asq - cleans Admin send queue
+ * @hw: pointer to the hardware structure
+ *
+ * returns the number of free desc
+ **/
+u16 i40e_clean_asq(struct i40e_hw *hw)
+{
+ struct i40e_adminq_ring *asq = &(hw->aq.asq);
+ struct i40e_asq_cmd_details *details;
+ u16 ntc = asq->next_to_clean;
+ struct i40e_aq_desc desc_cb;
+ struct i40e_aq_desc *desc;
+
+ desc = I40E_ADMINQ_DESC(*asq, ntc);
+ details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ while (rd32(hw, hw->aq.asq.head) != ntc) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "%s: ntc %d head %d.\n", __FUNCTION__, ntc,
+ rd32(hw, hw->aq.asq.head));
+
+ if (details->callback) {
+ I40E_ADMINQ_CALLBACK cb_func =
+ (I40E_ADMINQ_CALLBACK)details->callback;
+ i40e_memcpy(&desc_cb, desc,
+ sizeof(struct i40e_aq_desc), I40E_DMA_TO_DMA);
+ cb_func(hw, &desc_cb);
+ }
+ i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
+ i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
+ ntc++;
+ if (ntc == asq->count)
+ ntc = 0;
+ desc = I40E_ADMINQ_DESC(*asq, ntc);
+ details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ }
+
+ asq->next_to_clean = ntc;
+
+ return I40E_DESC_UNUSED(asq);
+}
+
+/**
+ * i40e_asq_done - check if FW has processed the Admin Send Queue
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if the firmware has processed all descriptors on the
+ * admin send queue. Returns false if there are still requests pending.
+ **/
+bool i40e_asq_done(struct i40e_hw *hw)
+{
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
+
+}
+
+/**
+ * i40e_asq_send_command - send command to Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buff: buffer to use for indirect commands
+ * @buff_size: size of buffer for indirect commands
+ * @cmd_details: pointer to command details structure
+ *
+ * This is the main send command driver routine for the Admin Queue send
+ * queue. It runs the queue, cleans the queue, etc
+ **/
+enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_dma_mem *dma_buff = NULL;
+ struct i40e_asq_cmd_details *details;
+ struct i40e_aq_desc *desc_on_ring;
+ bool cmd_completed = false;
+ u16 retval = 0;
+ u32 val = 0;
+
+ val = rd32(hw, hw->aq.asq.head);
+ if (val >= hw->aq.num_asq_entries) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: head overrun at %d\n", val);
+ status = I40E_ERR_QUEUE_EMPTY;
+ goto asq_send_command_exit;
+ }
+
+ if (hw->aq.asq.count == 0) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Admin queue not initialized.\n");
+ status = I40E_ERR_QUEUE_EMPTY;
+ goto asq_send_command_exit;
+ }
+
+#ifndef VF_DRIVER
+ if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
+ status = I40E_ERR_NVM;
+ goto asq_send_command_exit;
+ }
+
+#endif
+ details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
+ if (cmd_details) {
+ i40e_memcpy(details,
+ cmd_details,
+ sizeof(struct i40e_asq_cmd_details),
+ I40E_NONDMA_TO_NONDMA);
+
+ /* If the cmd_details are defined copy the cookie. The
+ * CPU_TO_LE32 is not needed here because the data is ignored
+ * by the FW, only used by the driver
+ */
+ if (details->cookie) {
+ desc->cookie_high =
+ CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
+ desc->cookie_low =
+ CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
+ }
+ } else {
+ i40e_memset(details, 0,
+ sizeof(struct i40e_asq_cmd_details),
+ I40E_NONDMA_MEM);
+ }
+
+ /* clear requested flags and then set additional flags if defined */
+ desc->flags &= ~CPU_TO_LE16(details->flags_dis);
+ desc->flags |= CPU_TO_LE16(details->flags_ena);
+
+ i40e_acquire_spinlock(&hw->aq.asq_spinlock);
+
+ if (buff_size > hw->aq.asq_buf_size) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Invalid buffer size: %d.\n",
+ buff_size);
+ status = I40E_ERR_INVALID_SIZE;
+ goto asq_send_command_error;
+ }
+
+ if (details->postpone && !details->async) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Async flag not set along with postpone flag");
+ status = I40E_ERR_PARAM;
+ goto asq_send_command_error;
+ }
+
+ /* call clean and check queue available function to reclaim the
+ * descriptors that were processed by FW, the function returns the
+ * number of desc available
+ */
+ /* the clean function called here could be called in a separate thread
+ * in case of asynchronous completions
+ */
+ if (i40e_clean_asq(hw) == 0) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Error queue is full.\n");
+ status = I40E_ERR_ADMIN_QUEUE_FULL;
+ goto asq_send_command_error;
+ }
+
+ /* initialize the temp desc pointer with the right desc */
+ desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
+
+ /* if the desc is available copy the temp desc to the right place */
+ i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
+ I40E_NONDMA_TO_DMA);
+
+ /* if buff is not NULL assume indirect command */
+ if (buff != NULL) {
+ dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
+ /* copy the user buff into the respective DMA buff */
+ i40e_memcpy(dma_buff->va, buff, buff_size,
+ I40E_NONDMA_TO_DMA);
+ desc_on_ring->datalen = CPU_TO_LE16(buff_size);
+
+ /* Update the address values in the desc with the pa value
+ * for respective buffer
+ */
+ desc_on_ring->params.external.addr_high =
+ CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
+ desc_on_ring->params.external.addr_low =
+ CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
+ }
+
+ /* bump the tail */
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
+ i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
+ buff, buff_size);
+ (hw->aq.asq.next_to_use)++;
+ if (hw->aq.asq.next_to_use == hw->aq.asq.count)
+ hw->aq.asq.next_to_use = 0;
+ if (!details->postpone)
+ wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+
+ /* if cmd_details are not defined or async flag is not set,
+ * we need to wait for desc write back
+ */
+ if (!details->async && !details->postpone) {
+ u32 total_delay = 0;
+
+ do {
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ if (i40e_asq_done(hw))
+ break;
+ /* ugh! delay while spin_lock */
+ i40e_msec_delay(1);
+ total_delay++;
+ } while (total_delay < hw->aq.asq_cmd_timeout);
+ }
+
+ /* if ready, copy the desc back to temp */
+ if (i40e_asq_done(hw)) {
+ i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
+ I40E_DMA_TO_NONDMA);
+ if (buff != NULL)
+ i40e_memcpy(buff, dma_buff->va, buff_size,
+ I40E_DMA_TO_NONDMA);
+ retval = LE16_TO_CPU(desc->retval);
+ if (retval != 0) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Command completed with error 0x%X.\n",
+ retval);
+
+ /* strip off FW internal code */
+ retval &= 0xff;
+ }
+ cmd_completed = true;
+ if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
+ status = I40E_SUCCESS;
+ else
+ status = I40E_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
+ }
+
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: desc and buffer writeback:\n");
+ i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
+
+ /* update the error if time out occurred */
+ if ((!cmd_completed) &&
+ (!details->async && !details->postpone)) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Writeback timeout.\n");
+ status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ }
+
+#ifndef VF_DRIVER
+ if (!status && i40e_is_nvm_update_op(desc))
+ hw->aq.nvm_busy = true;
+
+#endif /* VF_DRIVER */
+asq_send_command_error:
+ i40e_release_spinlock(&hw->aq.asq_spinlock);
+asq_send_command_exit:
+ return status;
+}
+
+/**
+ * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Fill the desc with default values
+ **/
+void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+ u16 opcode)
+{
+ /* zero out the desc */
+ i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
+ I40E_NONDMA_MEM);
+ desc->opcode = CPU_TO_LE16(opcode);
+ desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
+}
+
+/**
+ * i40e_clean_arq_element
+ * @hw: pointer to the hw struct
+ * @e: event info from the receive descriptor, includes any buffers
+ * @pending: number of events that could be left to process
+ *
+ * This function cleans one Admin Receive Queue element and returns
+ * the contents through e. It can also return how many events are
+ * left to process through 'pending'
+ **/
+enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *pending)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u16 ntc = hw->aq.arq.next_to_clean;
+ struct i40e_aq_desc *desc;
+ struct i40e_dma_mem *bi;
+ u16 desc_idx;
+ u16 datalen;
+ u16 flags;
+ u16 ntu;
+
+ /* take the lock before we start messing with the ring */
+ i40e_acquire_spinlock(&hw->aq.arq_spinlock);
+
+ /* set next_to_use to head */
+ ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+ if (ntu == ntc) {
+ /* nothing to do - shouldn't need to update ring's values */
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Queue is empty.\n");
+ ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
+ goto clean_arq_element_out;
+ }
+
+ /* now clean the next descriptor */
+ desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
+ desc_idx = ntc;
+
+ flags = LE16_TO_CPU(desc->flags);
+ if (flags & I40E_AQ_FLAG_ERR) {
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.arq_last_status =
+ (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Event received with error 0x%X.\n",
+ hw->aq.arq_last_status);
+ }
+
+ i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
+ I40E_DMA_TO_NONDMA);
+ datalen = LE16_TO_CPU(desc->datalen);
+ e->msg_len = min(datalen, e->buf_len);
+ if (e->msg_buf != NULL && (e->msg_len != 0))
+ i40e_memcpy(e->msg_buf,
+ hw->aq.arq.r.arq_bi[desc_idx].va,
+ e->msg_len, I40E_DMA_TO_NONDMA);
+
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
+ i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
+ hw->aq.arq_buf_size);
+
+ /* Restore the original datalen and buffer address in the desc,
+ * FW updates datalen to indicate the event message
+ * size
+ */
+ bi = &hw->aq.arq.r.arq_bi[ntc];
+ i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
+
+ desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
+ desc->datalen = CPU_TO_LE16((u16)bi->size);
+ desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
+ desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
+
+ /* set tail = the last cleaned desc index. */
+ wr32(hw, hw->aq.arq.tail, ntc);
+ /* ntc is updated to tail + 1 */
+ ntc++;
+ if (ntc == hw->aq.num_arq_entries)
+ ntc = 0;
+ hw->aq.arq.next_to_clean = ntc;
+ hw->aq.arq.next_to_use = ntu;
+
+clean_arq_element_out:
+ /* Set pending if needed, unlock and return */
+ if (pending != NULL)
+ *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+ i40e_release_spinlock(&hw->aq.arq_spinlock);
+
+#ifndef VF_DRIVER
+ if (i40e_is_nvm_update_op(&e->desc)) {
+ hw->aq.nvm_busy = false;
+ if (hw->aq.nvm_release_on_done) {
+ i40e_release_nvm(hw);
+ hw->aq.nvm_release_on_done = false;
+ }
+ }
+
+#endif
+ return ret_code;
+}
+
+void i40e_resume_aq(struct i40e_hw *hw)
+{
+ /* Registers are reset after PF reset */
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+
+#if (I40E_VF_ATQLEN_ATQENABLE_MASK != I40E_PF_ATQLEN_ATQENABLE_MASK)
+#error I40E_VF_ATQLEN_ATQENABLE_MASK != I40E_PF_ATQLEN_ATQENABLE_MASK
+#endif
+ i40e_config_asq_regs(hw);
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+
+ i40e_config_arq_regs(hw);
+}
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_adminq.h b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_adminq.h
new file mode 100755
index 00000000..ea611bd9
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_adminq.h
@@ -0,0 +1,157 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_ADMINQ_H_
+#define _I40E_ADMINQ_H_
+
+#include "i40e_osdep.h"
+#include "i40e_adminq_cmd.h"
+
+#define I40E_ADMINQ_DESC(R, i) \
+ (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
+
+#define I40E_ADMINQ_DESC_ALIGNMENT 4096
+
+struct i40e_adminq_ring {
+ struct i40e_virt_mem dma_head; /* space for dma structures */
+ struct i40e_dma_mem desc_buf; /* descriptor ring memory */
+ struct i40e_virt_mem cmd_buf; /* command buffer memory */
+
+ union {
+ struct i40e_dma_mem *asq_bi;
+ struct i40e_dma_mem *arq_bi;
+ } r;
+
+ u16 count; /* Number of descriptors */
+ u16 rx_buf_len; /* Admin Receive Queue buffer length */
+
+ /* used for interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ /* used for queue tracking */
+ u32 head;
+ u32 tail;
+ u32 len;
+ u32 bah;
+ u32 bal;
+};
+
+/* ASQ transaction details */
+struct i40e_asq_cmd_details {
+ void *callback; /* cast from type I40E_ADMINQ_CALLBACK */
+ u64 cookie;
+ u16 flags_ena;
+ u16 flags_dis;
+ bool async;
+ bool postpone;
+};
+
+#define I40E_ADMINQ_DETAILS(R, i) \
+ (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))
+
+/* ARQ event information */
+struct i40e_arq_event_info {
+ struct i40e_aq_desc desc;
+ u16 msg_len;
+ u16 buf_len;
+ u8 *msg_buf;
+};
+
+/* Admin Queue information */
+struct i40e_adminq_info {
+ struct i40e_adminq_ring arq; /* receive queue */
+ struct i40e_adminq_ring asq; /* send queue */
+ u32 asq_cmd_timeout; /* send queue cmd write back timeout*/
+ u16 num_arq_entries; /* receive queue depth */
+ u16 num_asq_entries; /* send queue depth */
+ u16 arq_buf_size; /* receive queue buffer size */
+ u16 asq_buf_size; /* send queue buffer size */
+ u16 fw_maj_ver; /* firmware major version */
+ u16 fw_min_ver; /* firmware minor version */
+ u16 api_maj_ver; /* api major version */
+ u16 api_min_ver; /* api minor version */
+ bool nvm_busy;
+ bool nvm_release_on_done;
+
+ struct i40e_spinlock asq_spinlock; /* Send queue spinlock */
+ struct i40e_spinlock arq_spinlock; /* Receive queue spinlock */
+
+ /* last status values on send and receive queues */
+ enum i40e_admin_queue_err asq_last_status;
+ enum i40e_admin_queue_err arq_last_status;
+};
+
+/**
+ * i40e_aq_rc_to_posix - convert errors to user-land codes
+ * aq_rc: AdminQ error code to convert
+ **/
+STATIC inline int i40e_aq_rc_to_posix(u16 aq_rc)
+{
+ int aq_to_posix[] = {
+ 0, /* I40E_AQ_RC_OK */
+ -EPERM, /* I40E_AQ_RC_EPERM */
+ -ENOENT, /* I40E_AQ_RC_ENOENT */
+ -ESRCH, /* I40E_AQ_RC_ESRCH */
+ -EINTR, /* I40E_AQ_RC_EINTR */
+ -EIO, /* I40E_AQ_RC_EIO */
+ -ENXIO, /* I40E_AQ_RC_ENXIO */
+ -E2BIG, /* I40E_AQ_RC_E2BIG */
+ -EAGAIN, /* I40E_AQ_RC_EAGAIN */
+ -ENOMEM, /* I40E_AQ_RC_ENOMEM */
+ -EACCES, /* I40E_AQ_RC_EACCES */
+ -EFAULT, /* I40E_AQ_RC_EFAULT */
+ -EBUSY, /* I40E_AQ_RC_EBUSY */
+ -EEXIST, /* I40E_AQ_RC_EEXIST */
+ -EINVAL, /* I40E_AQ_RC_EINVAL */
+ -ENOTTY, /* I40E_AQ_RC_ENOTTY */
+ -ENOSPC, /* I40E_AQ_RC_ENOSPC */
+ -ENOSYS, /* I40E_AQ_RC_ENOSYS */
+ -ERANGE, /* I40E_AQ_RC_ERANGE */
+ -EPIPE, /* I40E_AQ_RC_EFLUSHED */
+ -ESPIPE, /* I40E_AQ_RC_BAD_ADDR */
+ -EROFS, /* I40E_AQ_RC_EMODE */
+ -EFBIG, /* I40E_AQ_RC_EFBIG */
+ };
+
+ return aq_to_posix[aq_rc];
+}
+
+/* general information */
+#define I40E_AQ_LARGE_BUF 512
+#define I40E_ASQ_CMD_TIMEOUT 100 /* msecs */
+
+void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+ u16 opcode);
+
+#endif /* _I40E_ADMINQ_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_adminq_cmd.h b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_adminq_cmd.h
new file mode 100755
index 00000000..5ea9b7db
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_adminq_cmd.h
@@ -0,0 +1,2179 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_ADMINQ_CMD_H_
+#define _I40E_ADMINQ_CMD_H_
+
+/* This header file defines the i40e Admin Queue commands and is shared between
+ * i40e Firmware and Software.
+ *
+ * This file needs to comply with the Linux Kernel coding style.
+ */
+
+#define I40E_FW_API_VERSION_MAJOR 0x0001
+#define I40E_FW_API_VERSION_MINOR 0x0002
+
+struct i40e_aq_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 param2;
+ __le32 param3;
+ } internal;
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+ } external;
+ u8 raw[16];
+ } params;
+};
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets*/
+#define I40E_AQ_FLAG_DD_SHIFT 0
+#define I40E_AQ_FLAG_CMP_SHIFT 1
+#define I40E_AQ_FLAG_ERR_SHIFT 2
+#define I40E_AQ_FLAG_VFE_SHIFT 3
+#define I40E_AQ_FLAG_LB_SHIFT 9
+#define I40E_AQ_FLAG_RD_SHIFT 10
+#define I40E_AQ_FLAG_VFC_SHIFT 11
+#define I40E_AQ_FLAG_BUF_SHIFT 12
+#define I40E_AQ_FLAG_SI_SHIFT 13
+#define I40E_AQ_FLAG_EI_SHIFT 14
+#define I40E_AQ_FLAG_FE_SHIFT 15
+
+#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+
+/* error codes */
+enum i40e_admin_queue_err {
+ I40E_AQ_RC_OK = 0, /* success */
+ I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
+ I40E_AQ_RC_ENOENT = 2, /* No such element */
+ I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
+ I40E_AQ_RC_EINTR = 4, /* operation interrupted */
+ I40E_AQ_RC_EIO = 5, /* I/O error */
+ I40E_AQ_RC_ENXIO = 6, /* No such resource */
+ I40E_AQ_RC_E2BIG = 7, /* Arg too long */
+ I40E_AQ_RC_EAGAIN = 8, /* Try again */
+ I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
+ I40E_AQ_RC_EACCES = 10, /* Permission denied */
+ I40E_AQ_RC_EFAULT = 11, /* Bad address */
+ I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ I40E_AQ_RC_EEXIST = 13, /* object already exists */
+ I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
+ I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
+ I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
+ I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
+ I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
+ I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ I40E_AQ_RC_EFBIG = 22, /* File too large */
+};
+
+/* Admin Queue command opcodes */
+enum i40e_admin_queue_opc {
+ /* aq commands */
+ i40e_aqc_opc_get_version = 0x0001,
+ i40e_aqc_opc_driver_version = 0x0002,
+ i40e_aqc_opc_queue_shutdown = 0x0003,
+ i40e_aqc_opc_set_pf_context = 0x0004,
+
+ /* resource ownership */
+ i40e_aqc_opc_request_resource = 0x0008,
+ i40e_aqc_opc_release_resource = 0x0009,
+
+ i40e_aqc_opc_list_func_capabilities = 0x000A,
+ i40e_aqc_opc_list_dev_capabilities = 0x000B,
+
+ i40e_aqc_opc_set_cppm_configuration = 0x0103,
+ i40e_aqc_opc_set_arp_proxy_entry = 0x0104,
+ i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
+
+ /* LAA */
+ i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */
+ i40e_aqc_opc_mac_address_read = 0x0107,
+ i40e_aqc_opc_mac_address_write = 0x0108,
+
+ /* PXE */
+ i40e_aqc_opc_clear_pxe_mode = 0x0110,
+
+ /* internal switch commands */
+ i40e_aqc_opc_get_switch_config = 0x0200,
+ i40e_aqc_opc_add_statistics = 0x0201,
+ i40e_aqc_opc_remove_statistics = 0x0202,
+ i40e_aqc_opc_set_port_parameters = 0x0203,
+ i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
+
+ i40e_aqc_opc_add_vsi = 0x0210,
+ i40e_aqc_opc_update_vsi_parameters = 0x0211,
+ i40e_aqc_opc_get_vsi_parameters = 0x0212,
+
+ i40e_aqc_opc_add_pv = 0x0220,
+ i40e_aqc_opc_update_pv_parameters = 0x0221,
+ i40e_aqc_opc_get_pv_parameters = 0x0222,
+
+ i40e_aqc_opc_add_veb = 0x0230,
+ i40e_aqc_opc_update_veb_parameters = 0x0231,
+ i40e_aqc_opc_get_veb_parameters = 0x0232,
+
+ i40e_aqc_opc_delete_element = 0x0243,
+
+ i40e_aqc_opc_add_macvlan = 0x0250,
+ i40e_aqc_opc_remove_macvlan = 0x0251,
+ i40e_aqc_opc_add_vlan = 0x0252,
+ i40e_aqc_opc_remove_vlan = 0x0253,
+ i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
+ i40e_aqc_opc_add_tag = 0x0255,
+ i40e_aqc_opc_remove_tag = 0x0256,
+ i40e_aqc_opc_add_multicast_etag = 0x0257,
+ i40e_aqc_opc_remove_multicast_etag = 0x0258,
+ i40e_aqc_opc_update_tag = 0x0259,
+ i40e_aqc_opc_add_control_packet_filter = 0x025A,
+ i40e_aqc_opc_remove_control_packet_filter = 0x025B,
+ i40e_aqc_opc_add_cloud_filters = 0x025C,
+ i40e_aqc_opc_remove_cloud_filters = 0x025D,
+
+ i40e_aqc_opc_add_mirror_rule = 0x0260,
+ i40e_aqc_opc_delete_mirror_rule = 0x0261,
+
+ /* DCB commands */
+ i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
+ i40e_aqc_opc_dcb_updated = 0x0302,
+
+ /* TX scheduler */
+ i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
+ i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
+ i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
+ i40e_aqc_opc_query_vsi_bw_config = 0x0408,
+ i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
+ i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+
+ i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
+ i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
+ i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
+ i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
+ i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
+ i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
+ i40e_aqc_opc_query_port_ets_config = 0x0419,
+ i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
+ i40e_aqc_opc_suspend_port_tx = 0x041B,
+ i40e_aqc_opc_resume_port_tx = 0x041C,
+ i40e_aqc_opc_configure_partition_bw = 0x041D,
+
+ /* hmc */
+ i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
+ i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
+
+ /* phy commands*/
+ i40e_aqc_opc_get_phy_abilities = 0x0600,
+ i40e_aqc_opc_set_phy_config = 0x0601,
+ i40e_aqc_opc_set_mac_config = 0x0603,
+ i40e_aqc_opc_set_link_restart_an = 0x0605,
+ i40e_aqc_opc_get_link_status = 0x0607,
+ i40e_aqc_opc_set_phy_int_mask = 0x0613,
+ i40e_aqc_opc_get_local_advt_reg = 0x0614,
+ i40e_aqc_opc_set_local_advt_reg = 0x0615,
+ i40e_aqc_opc_get_partner_advt = 0x0616,
+ i40e_aqc_opc_set_lb_modes = 0x0618,
+ i40e_aqc_opc_get_phy_wol_caps = 0x0621,
+ i40e_aqc_opc_set_phy_debug = 0x0622,
+ i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
+
+ /* NVM commands */
+ i40e_aqc_opc_nvm_read = 0x0701,
+ i40e_aqc_opc_nvm_erase = 0x0702,
+ i40e_aqc_opc_nvm_update = 0x0703,
+ i40e_aqc_opc_nvm_config_read = 0x0704,
+ i40e_aqc_opc_nvm_config_write = 0x0705,
+
+ /* virtualization commands */
+ i40e_aqc_opc_send_msg_to_pf = 0x0801,
+ i40e_aqc_opc_send_msg_to_vf = 0x0802,
+ i40e_aqc_opc_send_msg_to_peer = 0x0803,
+
+ /* alternate structure */
+ i40e_aqc_opc_alternate_write = 0x0900,
+ i40e_aqc_opc_alternate_write_indirect = 0x0901,
+ i40e_aqc_opc_alternate_read = 0x0902,
+ i40e_aqc_opc_alternate_read_indirect = 0x0903,
+ i40e_aqc_opc_alternate_write_done = 0x0904,
+ i40e_aqc_opc_alternate_set_mode = 0x0905,
+ i40e_aqc_opc_alternate_clear_port = 0x0906,
+
+ /* LLDP commands */
+ i40e_aqc_opc_lldp_get_mib = 0x0A00,
+ i40e_aqc_opc_lldp_update_mib = 0x0A01,
+ i40e_aqc_opc_lldp_add_tlv = 0x0A02,
+ i40e_aqc_opc_lldp_update_tlv = 0x0A03,
+ i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
+ i40e_aqc_opc_lldp_stop = 0x0A05,
+ i40e_aqc_opc_lldp_start = 0x0A06,
+
+ /* Tunnel commands */
+ i40e_aqc_opc_add_udp_tunnel = 0x0B00,
+ i40e_aqc_opc_del_udp_tunnel = 0x0B01,
+ i40e_aqc_opc_tunnel_key_structure = 0x0B10,
+
+ /* Async Events */
+ i40e_aqc_opc_event_lan_overflow = 0x1001,
+
+ /* OEM commands */
+ i40e_aqc_opc_oem_parameter_change = 0xFE00,
+ i40e_aqc_opc_oem_device_status_change = 0xFE01,
+
+ /* debug commands */
+ i40e_aqc_opc_debug_get_deviceid = 0xFF00,
+ i40e_aqc_opc_debug_set_mode = 0xFF01,
+ i40e_aqc_opc_debug_read_reg = 0xFF03,
+ i40e_aqc_opc_debug_write_reg = 0xFF04,
+ i40e_aqc_opc_debug_modify_reg = 0xFF07,
+ i40e_aqc_opc_debug_dump_internals = 0xFF08,
+ i40e_aqc_opc_debug_modify_internals = 0xFF09,
+};
+
+/* command structures and indirect data structures */
+
+/* Structure naming conventions:
+ * - no suffix for direct command descriptor structures
+ * - _data for indirect sent data
+ * - _resp for indirect return data (data which is both will use _data)
+ * - _completion for direct return data
+ * - _element_ for repeated elements (may also be _data or _resp)
+ *
+ * Command structures are expected to overlay the params.raw member of the basic
+ * descriptor, and as such cannot exceed 16 bytes in length.
+ */
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \
+ { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used extensively to ensure that command structures are 16
+ * bytes in length as they have to map to the raw array of that size.
+ */
+#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
+
+/* internal (0x00XX) commands */
+
+/* Get version (direct 0x0001) */
+struct i40e_aqc_get_version {
+ __le32 rom_ver;
+ __le32 fw_build;
+ __le16 fw_major;
+ __le16 fw_minor;
+ __le16 api_major;
+ __le16 api_minor;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
+
+/* Send driver version (indirect 0x0002) */
+struct i40e_aqc_driver_version {
+ u8 driver_major_ver;
+ u8 driver_minor_ver;
+ u8 driver_build_ver;
+ u8 driver_subbuild_ver;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
+
+/* Queue Shutdown (direct 0x0003) */
+struct i40e_aqc_queue_shutdown {
+ __le32 driver_unloading;
+#define I40E_AQ_DRIVER_UNLOADING 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
+
+/* Set PF context (0x0004, direct) */
+struct i40e_aqc_set_pf_context {
+ u8 pf_id;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+#define I40E_AQ_RESOURCE_NVM 1
+#define I40E_AQ_RESOURCE_SDP 2
+#define I40E_AQ_RESOURCE_ACCESS_READ 1
+#define I40E_AQ_RESOURCE_ACCESS_WRITE 2
+#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000
+#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
+
+struct i40e_aqc_request_resource {
+ __le16 resource_id;
+ __le16 access_type;
+ __le32 timeout;
+ __le32 resource_number;
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct i40e_aqc_list_capabilites {
+ u8 command_flags;
+#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
+
+struct i40e_aqc_list_capabilities_element_resp {
+ __le16 id;
+ u8 major_rev;
+ u8 minor_rev;
+ __le32 number;
+ __le32 logical_id;
+ __le32 phys_id;
+ u8 reserved[16];
+};
+
+/* list of caps */
+
+#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001
+#define I40E_AQ_CAP_ID_MNG_MODE 0x0002
+#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003
+#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
+#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
+#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006
+#define I40E_AQ_CAP_ID_SRIOV 0x0012
+#define I40E_AQ_CAP_ID_VF 0x0013
+#define I40E_AQ_CAP_ID_VMDQ 0x0014
+#define I40E_AQ_CAP_ID_8021QBG 0x0015
+#define I40E_AQ_CAP_ID_8021QBR 0x0016
+#define I40E_AQ_CAP_ID_VSI 0x0017
+#define I40E_AQ_CAP_ID_DCB 0x0018
+#define I40E_AQ_CAP_ID_FCOE 0x0021
+#define I40E_AQ_CAP_ID_RSS 0x0040
+#define I40E_AQ_CAP_ID_RXQ 0x0041
+#define I40E_AQ_CAP_ID_TXQ 0x0042
+#define I40E_AQ_CAP_ID_MSIX 0x0043
+#define I40E_AQ_CAP_ID_VF_MSIX 0x0044
+#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
+#define I40E_AQ_CAP_ID_1588 0x0046
+#define I40E_AQ_CAP_ID_IWARP 0x0051
+#define I40E_AQ_CAP_ID_LED 0x0061
+#define I40E_AQ_CAP_ID_SDP 0x0062
+#define I40E_AQ_CAP_ID_MDIO 0x0063
+#define I40E_AQ_CAP_ID_FLEX10 0x00F1
+#define I40E_AQ_CAP_ID_CEM 0x00F2
+
+/* Set CPPM Configuration (direct 0x0103) */
+struct i40e_aqc_cppm_configuration {
+ __le16 command_flags;
+#define I40E_AQ_CPPM_EN_LTRC 0x0800
+#define I40E_AQ_CPPM_EN_DMCTH 0x1000
+#define I40E_AQ_CPPM_EN_DMCTLX 0x2000
+#define I40E_AQ_CPPM_EN_HPTC 0x4000
+#define I40E_AQ_CPPM_EN_DMARC 0x8000
+ __le16 ttlx;
+ __le32 dmacr;
+ __le16 dmcth;
+ u8 hptc;
+ u8 reserved;
+ __le32 pfltrc;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
+
+/* Set ARP Proxy command / response (indirect 0x0104) */
+struct i40e_aqc_arp_proxy_data {
+ __le16 command_flags;
+#define I40E_AQ_ARP_INIT_IPV4 0x0008
+#define I40E_AQ_ARP_UNSUP_CTL 0x0010
+#define I40E_AQ_ARP_ENA 0x0020
+#define I40E_AQ_ARP_ADD_IPV4 0x0040
+#define I40E_AQ_ARP_DEL_IPV4 0x0080
+ __le16 table_id;
+ __le32 pfpm_proxyfc;
+ __le32 ip_addr;
+ u8 mac_addr[6];
+};
+
+/* Set NS Proxy Table Entry Command (indirect 0x0105) */
+struct i40e_aqc_ns_proxy_data {
+ __le16 table_idx_mac_addr_0;
+ __le16 table_idx_mac_addr_1;
+ __le16 table_idx_ipv6_0;
+ __le16 table_idx_ipv6_1;
+ __le16 control;
+#define I40E_AQ_NS_PROXY_ADD_0 0x0100
+#define I40E_AQ_NS_PROXY_DEL_0 0x0200
+#define I40E_AQ_NS_PROXY_ADD_1 0x0400
+#define I40E_AQ_NS_PROXY_DEL_1 0x0800
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
+ u8 mac_addr_0[6];
+ u8 mac_addr_1[6];
+ u8 local_mac_addr[6];
+ u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
+ u8 ipv6_addr_1[16];
+};
+
+/* Manage LAA Command (0x0106) - obsolete */
+struct i40e_aqc_mng_laa {
+ __le16 command_flags;
+#define I40E_AQ_LAA_FLAG_WR 0x8000
+ u8 reserved[2];
+ __le32 sal;
+ __le16 sah;
+ u8 reserved2[6];
+};
+
+/* Manage MAC Address Read Command (indirect 0x0107) */
+struct i40e_aqc_mac_address_read {
+ __le16 command_flags;
+#define I40E_AQC_LAN_ADDR_VALID 0x10
+#define I40E_AQC_SAN_ADDR_VALID 0x20
+#define I40E_AQC_PORT_ADDR_VALID 0x40
+#define I40E_AQC_WOL_ADDR_VALID 0x80
+#define I40E_AQC_ADDR_VALID_MASK 0xf0
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read);
+
+struct i40e_aqc_mac_address_read_data {
+ u8 pf_lan_mac[6];
+ u8 pf_san_mac[6];
+ u8 port_mac[6];
+ u8 pf_wol_mac[6];
+};
+
+I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
+
+/* Manage MAC Address Write Command (0x0108) */
+struct i40e_aqc_mac_address_write {
+ __le16 command_flags;
+#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
+#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
+#define I40E_AQC_WRITE_TYPE_PORT 0x8000
+#define I40E_AQC_WRITE_TYPE_MASK 0xc000
+ __le16 mac_sah;
+ __le32 mac_sal;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
+
+/* PXE commands (0x011x) */
+
+/* Clear PXE Command and response (direct 0x0110) */
+struct i40e_aqc_clear_pxe {
+ u8 rx_cnt;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
+
+/* Switch configuration commands (0x02xx) */
+
+/* Used by many indirect commands that only pass an seid and a buffer in the
+ * command
+ */
+struct i40e_aqc_switch_seid {
+ __le16 seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
+
+/* Get Switch Configuration command (indirect 0x0200)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_switch_config_header_resp {
+ __le16 num_reported;
+ __le16 num_total;
+ u8 reserved[12];
+};
+
+struct i40e_aqc_switch_config_element_resp {
+ u8 element_type;
+#define I40E_AQ_SW_ELEM_TYPE_MAC 1
+#define I40E_AQ_SW_ELEM_TYPE_PF 2
+#define I40E_AQ_SW_ELEM_TYPE_VF 3
+#define I40E_AQ_SW_ELEM_TYPE_EMP 4
+#define I40E_AQ_SW_ELEM_TYPE_BMC 5
+#define I40E_AQ_SW_ELEM_TYPE_PV 16
+#define I40E_AQ_SW_ELEM_TYPE_VEB 17
+#define I40E_AQ_SW_ELEM_TYPE_PA 18
+#define I40E_AQ_SW_ELEM_TYPE_VSI 19
+ u8 revision;
+#define I40E_AQ_SW_ELEM_REV_1 1
+ __le16 seid;
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ u8 reserved[3];
+ u8 connection_type;
+#define I40E_AQ_CONN_TYPE_REGULAR 0x1
+#define I40E_AQ_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_CONN_TYPE_CASCADED 0x3
+ __le16 scheduler_id;
+ __le16 element_info;
+};
+
+/* Get Switch Configuration (indirect 0x0200)
+ * an array of elements are returned in the response buffer
+ * the first in the array is the header, remainder are elements
+ */
+struct i40e_aqc_get_switch_config_resp {
+ struct i40e_aqc_get_switch_config_header_resp header;
+ struct i40e_aqc_switch_config_element_resp element[1];
+};
+
+/* Add Statistics (direct 0x0201)
+ * Remove Statistics (direct 0x0202)
+ */
+struct i40e_aqc_add_remove_statistics {
+ __le16 seid;
+ __le16 vlan;
+ __le16 stat_index;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics);
+
+/* Set Port Parameters command (direct 0x0203) */
+struct i40e_aqc_set_port_parameters {
+ __le16 command_flags;
+#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1
+#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
+#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
+ __le16 bad_frame_vsi;
+ __le16 default_seid; /* reserved for command */
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters);
+
+/* Get Switch Resource Allocation (indirect 0x0204) */
+struct i40e_aqc_get_switch_resource_alloc {
+ u8 num_entries; /* reserved for command */
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc);
+
+/* expect an array of these structs in the response buffer */
+struct i40e_aqc_switch_resource_alloc_element_resp {
+ u8 resource_type;
+#define I40E_AQ_RESOURCE_TYPE_VEB 0x0
+#define I40E_AQ_RESOURCE_TYPE_VSI 0x1
+#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2
+#define I40E_AQ_RESOURCE_TYPE_STAG 0x3
+#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4
+#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5
+#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6
+#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7
+#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8
+#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9
+#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA
+#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB
+#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC
+#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD
+#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF
+#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10
+#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11
+#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12
+#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13
+ u8 reserved1;
+ __le16 guaranteed;
+ __le16 total;
+ __le16 used;
+ __le16 total_unalloced;
+ u8 reserved2[6];
+};
+
+/* Add VSI (indirect 0x0210)
+ * this indirect command uses struct i40e_aqc_vsi_properties_data
+ * as the indirect buffer (128 bytes)
+ *
+ * Update VSI (indirect 0x211)
+ * uses the same data structure as Add VSI
+ *
+ * Get VSI (indirect 0x0212)
+ * uses the same completion and data structure as Add VSI
+ */
+struct i40e_aqc_add_get_update_vsi {
+ __le16 uplink_seid;
+ u8 connection_type;
+#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1
+#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3
+ u8 reserved1;
+ u8 vf_id;
+ u8 reserved2;
+ __le16 vsi_flags;
+#define I40E_AQ_VSI_TYPE_SHIFT 0x0
+#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
+#define I40E_AQ_VSI_TYPE_VF 0x0
+#define I40E_AQ_VSI_TYPE_VMDQ2 0x1
+#define I40E_AQ_VSI_TYPE_PF 0x2
+#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
+#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi);
+
+struct i40e_aqc_add_get_update_vsi_completion {
+ __le16 seid;
+ __le16 vsi_number;
+ __le16 vsi_used;
+ __le16 vsi_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion);
+
+struct i40e_aqc_vsi_properties_data {
+ /* first 96 byte are written by SW */
+ __le16 valid_sections;
+#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
+#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
+#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
+#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
+#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
+#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
+#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
+#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
+#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
+#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
+ /* switch section */
+ __le16 switch_id; /* 12bit id combined with flags below */
+#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
+#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
+#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
+#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
+#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
+ u8 sw_reserved[2];
+ /* security section */
+ u8 sec_flags;
+#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
+ u8 sec_reserved;
+ /* VLAN section */
+ __le16 pvid; /* VLANS include priority bits */
+ __le16 fcoe_pvid;
+ u8 port_vlan_flags;
+#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
+#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
+ I40E_AQ_VSI_PVLAN_MODE_SHIFT)
+#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
+#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
+#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
+#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
+#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
+#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
+ I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
+#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
+#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
+ u8 pvlan_reserved[3];
+ /* ingress egress up sections */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
+#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
+#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
+#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
+#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
+#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
+#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
+#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
+#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
+ __le32 egress_table; /* same defines as for ingress table */
+ /* cascaded PV section */
+ __le16 cas_pv_tag;
+ u8 cas_pv_flags;
+#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
+ I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
+#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
+#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
+#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
+#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
+ u8 cas_pv_reserved;
+ /* queue mapping section */
+ __le16 mapping_flags;
+#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
+#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
+ __le16 queue_mapping[16];
+#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
+#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
+ __le16 tc_mapping[8];
+#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
+#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
+#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+ /* queueing option section */
+ u8 queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
+#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+ u8 queueing_opt_reserved[3];
+ /* scheduler section */
+ u8 up_enable_bits;
+ u8 sched_reserved;
+ /* outer up section */
+ __le32 outer_up_table; /* same structure and defines as ingress table */
+ u8 cmd_reserved[8];
+ /* last 32 bytes are written by FW */
+ __le16 qs_handle[8];
+#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
+ __le16 stat_counter_idx;
+ __le16 sched_id;
+ u8 resp_reserved[12];
+};
+
+I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
+
+/* Add Port Virtualizer (direct 0x0220)
+ * also used for update PV (direct 0x0221) but only flags are used
+ * (IS_CTRL_PORT only works on add PV)
+ */
+struct i40e_aqc_add_update_pv {
+ __le16 command_flags;
+#define I40E_AQC_PV_FLAG_PV_TYPE 0x1
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4
+#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8
+ __le16 uplink_seid;
+ __le16 connected_seid;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv);
+
+struct i40e_aqc_add_update_pv_completion {
+ /* reserved for update; for add also encodes error if rc == ENOSPC */
+ __le16 pv_seid;
+#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1
+#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
+
+/* Get PV Params (direct 0x0222)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+
+struct i40e_aqc_get_pv_params_completion {
+ __le16 seid;
+ __le16 default_stag;
+ __le16 pv_flags; /* same flags as add_pv */
+#define I40E_AQC_GET_PV_PV_TYPE 0x1
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4
+ u8 reserved[8];
+ __le16 default_port_seid;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion);
+
+/* Add VEB (direct 0x0230) */
+struct i40e_aqc_add_veb {
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ __le16 veb_flags;
+#define I40E_AQC_ADD_VEB_FLOATING 0x1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \
+ I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4
+#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8
+ u8 enable_tcs;
+ u8 reserved[9];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb);
+
+struct i40e_aqc_add_veb_completion {
+ u8 reserved[6];
+ __le16 switch_seid;
+ /* also encodes error if rc == ENOSPC; codes are the same as add_pv */
+ __le16 veb_seid;
+#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1
+#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
+
+/* Get VEB Parameters (direct 0x0232)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_veb_parameters_completion {
+ __le16 seid;
+ __le16 switch_id;
+ __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
+
+/* Delete Element (direct 0x0243)
+ * uses the generic i40e_aqc_switch_seid
+ */
+
+/* Add MAC-VLAN (indirect 0x0250) */
+
+/* used for the command for most vlan commands */
+struct i40e_aqc_macvlan {
+ __le16 num_addresses;
+ __le16 seid[3];
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan);
+
+/* indirect data for command and response */
+struct i40e_aqc_add_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ __le16 flags;
+#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001
+#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002
+#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004
+#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008
+ __le16 queue_number;
+#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \
+ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+ /* response section */
+ u8 match_method;
+#define I40E_AQC_MM_PERFECT_MATCH 0x01
+#define I40E_AQC_MM_HASH_MATCH 0x02
+#define I40E_AQC_MM_ERR_NO_RES 0xFF
+ u8 reserved1[3];
+};
+
+struct i40e_aqc_add_remove_macvlan_completion {
+ __le16 perfect_mac_used;
+ __le16 perfect_mac_free;
+ __le16 unicast_hash_free;
+ __le16 multicast_hash_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion);
+
+/* Remove MAC-VLAN (indirect 0x0251)
+ * uses i40e_aqc_macvlan for the descriptor
+ * data points to an array of num_addresses of elements
+ */
+
+struct i40e_aqc_remove_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ u8 flags;
+#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01
+#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02
+#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08
+#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10
+ u8 reserved[3];
+ /* reply section */
+ u8 error_code;
+#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF
+ u8 reply_reserved[3];
+};
+
+/* Add VLAN (indirect 0x0252)
+ * Remove VLAN (indirect 0x0253)
+ * use the generic i40e_aqc_macvlan for the command
+ */
+struct i40e_aqc_add_remove_vlan_element_data {
+ __le16 vlan_tag;
+ u8 vlan_flags;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_LOCAL 0x1
+#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1
+#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
+#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0
+#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2
+#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4
+#define I40E_AQC_VLAN_PTYPE_SHIFT 3
+#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
+#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0
+#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8
+#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10
+#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_ALL 0x1
+ u8 reserved;
+ u8 result;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_SUCCESS 0x0
+#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE
+#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF
+ u8 reserved1[3];
+};
+
+struct i40e_aqc_add_remove_vlan_completion {
+ u8 reserved[4];
+ __le16 vlans_used;
+ __le16 vlans_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Set VSI Promiscuous Modes (direct 0x0254) */
+struct i40e_aqc_set_vsi_promiscuous_modes {
+ __le16 promiscuous_flags;
+ __le16 valid_flags;
+/* flags used for both fields above */
+#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01
+#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02
+#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
+#define I40E_AQC_SET_VSI_DEFAULT 0x08
+#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
+ __le16 seid;
+#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
+ __le16 vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
+
+/* Add S/E-tag command (direct 0x0255)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_add_tag {
+ __le16 flags;
+#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001
+ __le16 seid;
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 tag;
+ __le16 queue_number;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag);
+
+struct i40e_aqc_add_remove_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
+
+/* Remove S/E-tag command (direct 0x0256)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_remove_tag {
+ __le16 seid;
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 tag;
+ u8 reserved[12];
+};
+
+/* Add multicast E-Tag (direct 0x0257)
+ * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
+ * and no external data
+ */
+struct i40e_aqc_add_remove_mcast_etag {
+ __le16 pv_seid;
+ __le16 etag;
+ u8 num_unicast_etags;
+ u8 reserved[3];
+ __le32 addr_high; /* address of array of 2-byte s-tags */
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag);
+
+struct i40e_aqc_add_remove_mcast_etag_completion {
+ u8 reserved[4];
+ __le16 mcast_etags_used;
+ __le16 mcast_etags_free;
+ __le32 addr_high;
+ __le32 addr_low;
+
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion);
+
+/* Update S/E-Tag (direct 0x0259) */
+struct i40e_aqc_update_tag {
+ __le16 seid;
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 old_tag;
+ __le16 new_tag;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag);
+
+struct i40e_aqc_update_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
+
+/* Add Control Packet filter (direct 0x025A)
+ * Remove Control Packet filter (direct 0x025B)
+ * uses the i40e_aqc_add_oveb_cloud,
+ * and the generic direct completion structure
+ */
+struct i40e_aqc_add_remove_control_packet_filter {
+ u8 mac[6];
+ __le16 etype;
+ __le16 flags;
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000
+ __le16 seid;
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
+ __le16 queue;
+ u8 reserved[2];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter);
+
+struct i40e_aqc_add_remove_control_packet_filter_completion {
+ __le16 mac_etype_used;
+ __le16 etype_used;
+ __le16 mac_etype_free;
+ __le16 etype_free;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
+
+/* Add Cloud filters (indirect 0x025C)
+ * Remove Cloud filters (indirect 0x025D)
+ * uses the i40e_aqc_add_remove_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct i40e_aqc_add_remove_cloud_filters {
+ u8 num_filters;
+ u8 reserved;
+ __le16 seid;
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
+
+struct i40e_aqc_add_remove_cloud_filters_element_data {
+ u8 outer_mac[6];
+ u8 inner_mac[6];
+ __le16 inner_vlan;
+ union {
+ struct {
+ u8 reserved[12];
+ u8 data[4];
+ } v4;
+ struct {
+ u8 data[16];
+ } v6;
+ } ipaddr;
+ __le16 flags;
+#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
+ I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
+/* 0x0000 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
+/* 0x0002 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
+/* 0x0005 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
+/* 0x0007 reserved */
+/* 0x0008 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
+#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
+
+#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
+#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
+#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
+
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
+
+ __le32 tenant_id;
+ u8 reserved[4];
+ __le16 queue_number;
+#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \
+ I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
+ u8 reserved2[14];
+ /* response section */
+ u8 allocation_result;
+#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
+#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF
+ u8 response_reserved[7];
+};
+
+struct i40e_aqc_remove_cloud_filters_completion {
+ __le16 perfect_ovlan_used;
+ __le16 perfect_ovlan_free;
+ __le16 vlan_used;
+ __le16 vlan_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
+
+/* Add Mirror Rule (indirect or direct 0x0260)
+ * Delete Mirror Rule (indirect or direct 0x0261)
+ * note: some rule types (4,5) do not use an external buffer.
+ * take care to set the flags correctly.
+ */
+struct i40e_aqc_add_delete_mirror_rule {
+ __le16 seid;
+ __le16 rule_type;
+#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0
+#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \
+ I40E_AQC_MIRROR_RULE_TYPE_SHIFT)
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2
+#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5
+ __le16 num_entries;
+ __le16 destination; /* VSI for add, rule id for delete */
+ __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule);
+
+struct i40e_aqc_add_delete_mirror_rule_completion {
+ u8 reserved[2];
+ __le16 rule_id; /* only used on add */
+ __le16 mirror_rules_used;
+ __le16 mirror_rules_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
+
+/* DCB 0x03xx*/
+
+/* PFC Ignore (direct 0x0301)
+ * the command and response use the same descriptor structure
+ */
+struct i40e_aqc_pfc_ignore {
+ u8 tc_bitmap;
+ u8 command_flags; /* unused on response */
+#define I40E_AQC_PFC_IGNORE_SET 0x80
+#define I40E_AQC_PFC_IGNORE_CLEAR 0x0
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
+
+/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure
+ * with no parameters
+ */
+
+/* TX scheduler 0x04xx */
+
+/* Almost all the indirect commands use
+ * this generic struct to pass the SEID in param0
+ */
+struct i40e_aqc_tx_sched_ind {
+ __le16 vsi_seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind);
+
+/* Several commands respond with a set of queue set handles */
+struct i40e_aqc_qs_handles_resp {
+ __le16 qs_handles[8];
+};
+
+/* Configure VSI BW limits (direct 0x0400) */
+struct i40e_aqc_configure_vsi_bw_limit {
+ __le16 vsi_seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_credit; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
+
+/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406)
+ * responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_ets_sla_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credits[8]; /* FW writesback QS handles here */
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
+ * responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_tc_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 tc_bw_credits[8];
+ u8 reserved1[4];
+ __le16 qs_handles[8];
+};
+
+/* Query vsi bw configuration (indirect 0x0408) */
+struct i40e_aqc_query_vsi_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 tc_suspended_bits;
+ u8 reserved[14];
+ __le16 qs_handles[8];
+ u8 reserved1[4];
+ __le16 port_bw_limit;
+ u8 reserved2[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved3[23];
+};
+
+/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
+struct i40e_aqc_query_vsi_ets_sla_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 share_credits[8];
+ __le16 credits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
+struct i40e_aqc_configure_switching_comp_bw_limit {
+ __le16 seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
+
+/* Enable Physical Port ETS (indirect 0x0413)
+ * Modify Physical Port ETS (indirect 0x0414)
+ * Disable Physical Port ETS (indirect 0x0415)
+ */
+struct i40e_aqc_configure_switching_comp_ets_data {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 seepage;
+#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
+ u8 tc_strict_priority_flags;
+ u8 reserved1[17];
+ u8 tc_bw_share_credits[8];
+ u8 reserved2[96];
+};
+
+/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
+struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credit[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+/* Configure Switching Component Bandwidth Allocation per Tc
+ * (indirect 0x0417)
+ */
+struct i40e_aqc_configure_switching_comp_bw_config_data {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits; /* bool */
+ u8 tc_bw_share_credits[8];
+ u8 reserved1[20];
+};
+
+/* Query Switching Component Configuration (indirect 0x0418) */
+struct i40e_aqc_query_switching_comp_ets_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[35];
+ __le16 port_bw_limit;
+ u8 reserved1[2];
+ u8 tc_bw_max; /* 0-3, limit = 2^max */
+ u8 reserved2[23];
+};
+
+/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
+struct i40e_aqc_query_port_ets_config_resp {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 reserved1;
+ u8 tc_strict_priority_bits;
+ u8 reserved2;
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved3[32];
+};
+
+/* Query Switching Component Bandwidth Allocation per Traffic Type
+ * (indirect 0x041A)
+ */
+struct i40e_aqc_query_switching_comp_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits_enable; /* bool */
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+/* Suspend/resume port TX traffic
+ * (direct 0x041B and 0x041C) uses the generic SEID struct
+ */
+
+/* Configure partition BW
+ * (indirect 0x041D)
+ */
+struct i40e_aqc_configure_partition_bw_data {
+ __le16 pf_valid_bits;
+ u8 min_bw[16]; /* guaranteed bandwidth */
+ u8 max_bw[16]; /* bandwidth limit */
+};
+
+/* Get and set the active HMC resource profile and status.
+ * (direct 0x0500) and (direct 0x0501)
+ */
+struct i40e_aq_get_set_hmc_resource_profile {
+ u8 pm_profile;
+ u8 pe_vf_enabled;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
+
+enum i40e_aq_hmc_profile {
+ /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
+ I40E_HMC_PROFILE_DEFAULT = 1,
+ I40E_HMC_PROFILE_FAVOR_VF = 2,
+ I40E_HMC_PROFILE_EQUAL = 3,
+};
+
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
+
+/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
+
+/* set in param0 for get phy abilities to report qualified modules */
+#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001
+#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002
+
+enum i40e_aq_phy_type {
+ I40E_PHY_TYPE_SGMII = 0x0,
+ I40E_PHY_TYPE_1000BASE_KX = 0x1,
+ I40E_PHY_TYPE_10GBASE_KX4 = 0x2,
+ I40E_PHY_TYPE_10GBASE_KR = 0x3,
+ I40E_PHY_TYPE_40GBASE_KR4 = 0x4,
+ I40E_PHY_TYPE_XAUI = 0x5,
+ I40E_PHY_TYPE_XFI = 0x6,
+ I40E_PHY_TYPE_SFI = 0x7,
+ I40E_PHY_TYPE_XLAUI = 0x8,
+ I40E_PHY_TYPE_XLPPI = 0x9,
+ I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
+ I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
+ I40E_PHY_TYPE_10GBASE_AOC = 0xC,
+ I40E_PHY_TYPE_40GBASE_AOC = 0xD,
+ I40E_PHY_TYPE_100BASE_TX = 0x11,
+ I40E_PHY_TYPE_1000BASE_T = 0x12,
+ I40E_PHY_TYPE_10GBASE_T = 0x13,
+ I40E_PHY_TYPE_10GBASE_SR = 0x14,
+ I40E_PHY_TYPE_10GBASE_LR = 0x15,
+ I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16,
+ I40E_PHY_TYPE_10GBASE_CR1 = 0x17,
+ I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
+ I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
+ I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
+ I40E_PHY_TYPE_1000BASE_SX = 0x1B,
+ I40E_PHY_TYPE_1000BASE_LX = 0x1C,
+ I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
+ I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
+ I40E_PHY_TYPE_MAX
+};
+
+#define I40E_LINK_SPEED_100MB_SHIFT 0x1
+#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
+#define I40E_LINK_SPEED_10GB_SHIFT 0x3
+#define I40E_LINK_SPEED_40GB_SHIFT 0x4
+#define I40E_LINK_SPEED_20GB_SHIFT 0x5
+
+enum i40e_aq_link_speed {
+ I40E_LINK_SPEED_UNKNOWN = 0,
+ I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT),
+ I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
+ I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
+ I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
+ I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT)
+};
+
+struct i40e_aqc_module_desc {
+ u8 oui[3];
+ u8 reserved1;
+ u8 part_number[16];
+ u8 revision[4];
+ u8 reserved2[8];
+};
+
+struct i40e_aq_get_phy_abilities_resp {
+ __le32 phy_type; /* bitmap using the above enum for offsets */
+ u8 link_speed; /* bitmap using the above enum bit patterns */
+ u8 abilities;
+#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
+#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
+#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
+#define I40E_AQ_PHY_LINK_ENABLED 0x08
+#define I40E_AQ_PHY_AN_ENABLED 0x10
+#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
+ __le16 eee_capability;
+#define I40E_AQ_EEE_100BASE_TX 0x0002
+#define I40E_AQ_EEE_1000BASE_T 0x0004
+#define I40E_AQ_EEE_10GBASE_T 0x0008
+#define I40E_AQ_EEE_1000BASE_KX 0x0010
+#define I40E_AQ_EEE_10GBASE_KX4 0x0020
+#define I40E_AQ_EEE_10GBASE_KR 0x0040
+ __le32 eeer_val;
+ u8 d3_lpan;
+#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
+ u8 reserved[3];
+ u8 phy_id[4];
+ u8 module_type[3];
+ u8 qualified_module_count;
+#define I40E_AQ_PHY_MAX_QMS 16
+ struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
+};
+
+/* Set PHY Config (direct 0x0601) */
+struct i40e_aq_set_phy_config { /* same bits as above in all */
+ __le32 phy_type;
+ u8 link_speed;
+ u8 abilities;
+/* bits 0-2 use the values from get_phy_abilities_resp */
+#define I40E_AQ_PHY_ENABLE_LINK 0x08
+#define I40E_AQ_PHY_ENABLE_AN 0x10
+#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20
+ __le16 eee_capability;
+ __le32 eeer;
+ u8 low_power_ctrl;
+ u8 reserved[3];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
+
+/* Set MAC Config command data structure (direct 0x0603) */
+struct i40e_aq_set_mac_config {
+ __le16 max_frame_size;
+ u8 params;
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
+#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
+#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
+#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
+#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
+#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
+ u8 tx_timer_priority; /* bitmap */
+ __le16 tx_timer_value;
+ __le16 fc_refresh_threshold;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config);
+
+/* Restart Auto-Negotiation (direct 0x605) */
+struct i40e_aqc_set_link_restart_an {
+ u8 command;
+#define I40E_AQ_PHY_RESTART_AN 0x02
+#define I40E_AQ_PHY_LINK_ENABLE 0x04
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an);
+
+/* Get Link Status cmd & response data structure (direct 0x0607) */
+struct i40e_aqc_get_link_status {
+ __le16 command_flags; /* only field set on command */
+#define I40E_AQ_LSE_MASK 0x3
+#define I40E_AQ_LSE_NOP 0x0
+#define I40E_AQ_LSE_DISABLE 0x2
+#define I40E_AQ_LSE_ENABLE 0x3
+/* only response uses this flag */
+#define I40E_AQ_LSE_IS_ENABLED 0x1
+ u8 phy_type; /* i40e_aq_phy_type */
+ u8 link_speed; /* i40e_aq_link_speed */
+ u8 link_info;
+#define I40E_AQ_LINK_UP 0x01
+#define I40E_AQ_LINK_FAULT 0x02
+#define I40E_AQ_LINK_FAULT_TX 0x04
+#define I40E_AQ_LINK_FAULT_RX 0x08
+#define I40E_AQ_LINK_FAULT_REMOTE 0x10
+#define I40E_AQ_MEDIA_AVAILABLE 0x40
+#define I40E_AQ_SIGNAL_DETECT 0x80
+ u8 an_info;
+#define I40E_AQ_AN_COMPLETED 0x01
+#define I40E_AQ_LP_AN_ABILITY 0x02
+#define I40E_AQ_PD_FAULT 0x04
+#define I40E_AQ_FEC_EN 0x08
+#define I40E_AQ_PHY_LOW_POWER 0x10
+#define I40E_AQ_LINK_PAUSE_TX 0x20
+#define I40E_AQ_LINK_PAUSE_RX 0x40
+#define I40E_AQ_QUALIFIED_MODULE 0x80
+ u8 ext_info;
+#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01
+#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02
+#define I40E_AQ_LINK_TX_SHIFT 0x02
+#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT)
+#define I40E_AQ_LINK_TX_ACTIVE 0x00
+#define I40E_AQ_LINK_TX_DRAINED 0x01
+#define I40E_AQ_LINK_TX_FLUSHED 0x03
+#define I40E_AQ_LINK_FORCED_40G 0x10
+ u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
+ __le16 max_frame_size;
+ u8 config;
+#define I40E_AQ_CONFIG_CRC_ENA 0x04
+#define I40E_AQ_CONFIG_PACING_MASK 0x78
+ u8 reserved[5];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
+
+/* Set event mask command (direct 0x613) */
+struct i40e_aqc_set_phy_int_mask {
+ u8 reserved[8];
+ __le16 event_mask;
+#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002
+#define I40E_AQ_EVENT_MEDIA_NA 0x0004
+#define I40E_AQ_EVENT_LINK_FAULT 0x0008
+#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010
+#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020
+#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040
+#define I40E_AQ_EVENT_AN_COMPLETED 0x0080
+#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100
+#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
+ u8 reserved1[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
+
+/* Get Local AN advt register (direct 0x0614)
+ * Set Local AN advt register (direct 0x0615)
+ * Get Link Partner AN advt register (direct 0x0616)
+ */
+struct i40e_aqc_an_advt_reg {
+ __le32 local_an_reg0;
+ __le16 local_an_reg1;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
+
+/* Set Loopback mode (0x0618) */
+struct i40e_aqc_set_lb_mode {
+ __le16 lb_mode;
+#define I40E_AQ_LB_PHY_LOCAL 0x01
+#define I40E_AQ_LB_PHY_REMOTE 0x02
+#define I40E_AQ_LB_MAC_LOCAL 0x04
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
+
+/* Set PHY Debug command (0x0622) */
+struct i40e_aqc_set_phy_debug {
+ u8 command_flags;
+#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \
+ I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT)
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
+#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
+
+enum i40e_aq_phy_reg_type {
+ I40E_AQC_PHY_REG_INTERNAL = 0x1,
+ I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2,
+ I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
+};
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Update commands (indirect 0x0703)
+ */
+struct i40e_aqc_nvm_update {
+ u8 command_flags;
+#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_FLASH_ONLY 0x80
+ u8 module_pointer;
+ __le16 length;
+ __le32 offset;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
+
+/* NVM Config Read (indirect 0x0704) */
+struct i40e_aqc_nvm_config_read {
+ __le16 cmd_flags;
+#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
+#define ANVM_READ_SINGLE_FEATURE 0
+#define ANVM_READ_MULTIPLE_FEATURES 1
+ __le16 element_count;
+ __le16 element_id; /* Feature/field ID */
+ u8 reserved[2];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
+
+/* NVM Config Write (indirect 0x0705) */
+struct i40e_aqc_nvm_config_write {
+ __le16 cmd_flags;
+ __le16 element_count;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
+
+struct i40e_aqc_nvm_config_data_feature {
+ __le16 feature_id;
+ __le16 instance_id;
+ __le16 feature_options;
+ __le16 feature_selection;
+};
+
+struct i40e_aqc_nvm_config_data_immediate_field {
+#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2
+ __le16 field_id;
+ __le16 instance_id;
+ __le16 field_options;
+ __le16 field_value;
+};
+
+/* Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to Peer PF command (indirect 0x0803)
+ */
+struct i40e_aqc_pf_vf_message {
+ __le32 id;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
+
+/* Alternate structure */
+
+/* Direct write (direct 0x0900)
+ * Direct read (direct 0x0902)
+ */
+struct i40e_aqc_alternate_write {
+ __le32 address0;
+ __le32 data0;
+ __le32 address1;
+ __le32 data1;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write);
+
+/* Indirect write (indirect 0x0901)
+ * Indirect read (indirect 0x0903)
+ */
+
+struct i40e_aqc_alternate_ind_write {
+ __le32 address;
+ __le32 length;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write);
+
+/* Done alternate write (direct 0x0904)
+ * uses i40e_aq_desc
+ */
+struct i40e_aqc_alternate_write_done {
+ __le16 cmd_flags;
+#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1
+#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0
+#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1
+#define I40E_AQ_ALTERNATE_RESET_NEEDED 2
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done);
+
+/* Set OEM mode (direct 0x0905) */
+struct i40e_aqc_alternate_set_mode {
+ __le32 mode;
+#define I40E_AQ_ALTERNATE_MODE_NONE 0
+#define I40E_AQ_ALTERNATE_MODE_OEM 1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
+
+/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */
+
+/* async events 0x10xx */
+
+/* Lan Queue Overflow Event (direct, 0x1001) */
+struct i40e_aqc_lan_overflow {
+ __le32 prtdcb_rupto;
+ __le32 otx_ctl;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow);
+
+/* Get LLDP MIB (indirect 0x0A00) */
+struct i40e_aqc_lldp_get_mib {
+ u8 type;
+ u8 reserved1;
+#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3
+#define I40E_AQ_LLDP_MIB_LOCAL 0x0
+#define I40E_AQ_LLDP_MIB_REMOTE 0x1
+#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC
+#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1
+#define I40E_AQ_LLDP_TX_SHIFT 0x4
+#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT)
+/* TX pause flags use I40E_AQ_LINK_TX_* above */
+ __le16 local_len;
+ __le16 remote_len;
+ u8 reserved2[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
+
+/* Configure LLDP MIB Change Event (direct 0x0A01)
+ * also used for the event (with type in the command field)
+ */
+struct i40e_aqc_lldp_update_mib {
+ u8 command;
+#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
+#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
+
+/* Add LLDP TLV (indirect 0x0A02)
+ * Delete LLDP TLV (indirect 0x0A04)
+ */
+struct i40e_aqc_lldp_add_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved1[1];
+ __le16 len;
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv);
+
+/* Update LLDP TLV (indirect 0x0A03) */
+struct i40e_aqc_lldp_update_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved;
+ __le16 old_len;
+ __le16 new_offset;
+ __le16 new_len;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
+
+/* Stop LLDP (direct 0x0A05) */
+struct i40e_aqc_lldp_stop {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_STOP 0x0
+#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
+
+/* Start LLDP (direct 0x0A06) */
+
+struct i40e_aqc_lldp_start {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_START 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
+
+/* Apply MIB changes (0x0A07)
+ * uses the generic struc as it contains no data
+ */
+
+/* Add Udp Tunnel command and completion (direct 0x0B00) */
+struct i40e_aqc_add_udp_tunnel {
+ __le16 udp_port;
+ u8 reserved0[3];
+ u8 protocol_type;
+#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00
+#define I40E_AQC_TUNNEL_TYPE_NGE 0x01
+#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10
+ u8 reserved1[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
+
+struct i40e_aqc_add_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 filter_entry_index;
+ u8 multiple_pfs;
+#define I40E_AQC_SINGLE_PF 0x0
+#define I40E_AQC_MULTIPLE_PFS 0x1
+ u8 total_filters;
+ u8 reserved[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion);
+
+/* remove UDP Tunnel command (0x0B01) */
+struct i40e_aqc_remove_udp_tunnel {
+ u8 reserved[2];
+ u8 index; /* 0 to 15 */
+ u8 reserved2[13];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
+
+struct i40e_aqc_del_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 index; /* 0 to 15 */
+ u8 multiple_pfs;
+ u8 total_filters_used;
+ u8 reserved1[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
+
+/* tunnel key structure 0x0B10 */
+
+struct i40e_aqc_tunnel_key_structure {
+ u8 key1_off;
+ u8 key2_off;
+ u8 key1_len; /* 0 to 15 */
+ u8 key2_len; /* 0 to 15 */
+ u8 flags;
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
+/* response flags */
+#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
+ u8 network_key_index;
+#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0
+#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1
+#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2
+#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
+
+/* OEM mode commands (direct 0xFE0x) */
+struct i40e_aqc_oem_param_change {
+ __le32 param_type;
+#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0
+#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
+#define I40E_AQ_OEM_PARAM_MAC 2
+ __le32 param_value1;
+ u8 param_value2[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
+
+struct i40e_aqc_oem_state_change {
+ __le32 state;
+#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0
+#define I40E_AQ_OEM_STATE_LINK_UP 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
+
+/* debug commands */
+
+/* get device id (0xFF00) uses the generic structure */
+
+/* set test more (0xFF01, internal) */
+
+struct i40e_acq_set_test_mode {
+ u8 mode;
+#define I40E_AQ_TEST_PARTIAL 0
+#define I40E_AQ_TEST_FULL 1
+#define I40E_AQ_TEST_NVM 2
+ u8 reserved[3];
+ u8 command;
+#define I40E_AQ_TEST_OPEN 0
+#define I40E_AQ_TEST_CLOSE 1
+#define I40E_AQ_TEST_INC 2
+ u8 reserved2[3];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode);
+
+/* Debug Read Register command (0xFF03)
+ * Debug Write Register command (0xFF04)
+ */
+struct i40e_aqc_debug_reg_read_write {
+ __le32 reserved;
+ __le32 address;
+ __le32 value_high;
+ __le32 value_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write);
+
+/* Scatter/gather Reg Read (indirect 0xFF05)
+ * Scatter/gather Reg Write (indirect 0xFF06)
+ */
+
+/* i40e_aq_desc is used for the command */
+struct i40e_aqc_debug_reg_sg_element_data {
+ __le32 address;
+ __le32 value;
+};
+
+/* Debug Modify register (direct 0xFF07) */
+struct i40e_aqc_debug_modify_reg {
+ __le32 address;
+ __le32 value;
+ __le32 clear_mask;
+ __le32 set_mask;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
+
+/* dump internal data (0xFF08, indirect) */
+
+#define I40E_AQ_CLUSTER_ID_AUX 0
+#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1
+#define I40E_AQ_CLUSTER_ID_TXSCHED 2
+#define I40E_AQ_CLUSTER_ID_HMC 3
+#define I40E_AQ_CLUSTER_ID_MAC0 4
+#define I40E_AQ_CLUSTER_ID_MAC1 5
+#define I40E_AQ_CLUSTER_ID_MAC2 6
+#define I40E_AQ_CLUSTER_ID_MAC3 7
+#define I40E_AQ_CLUSTER_ID_DCB 8
+#define I40E_AQ_CLUSTER_ID_EMP_MEM 9
+#define I40E_AQ_CLUSTER_ID_PKT_BUF 10
+#define I40E_AQ_CLUSTER_ID_ALTRAM 11
+
+struct i40e_aqc_debug_dump_internals {
+ u8 cluster_id;
+ u8 table_id;
+ __le16 data_size;
+ __le32 idx;
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals);
+
+struct i40e_aqc_debug_modify_internals {
+ u8 cluster_id;
+ u8 cluster_specific_params[7];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
+
+#endif
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_alloc.h b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_alloc.h
new file mode 100755
index 00000000..6e81cd5b
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_alloc.h
@@ -0,0 +1,65 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_ALLOC_H_
+#define _I40E_ALLOC_H_
+
+struct i40e_hw;
+
+/* Memory allocation types */
+enum i40e_memory_type {
+ i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */
+ i40e_mem_asq_buf = 1,
+ i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */
+ i40e_mem_arq_ring = 3, /* ARQ descriptor ring */
+ i40e_mem_atq_ring = 4, /* ATQ descriptor ring */
+ i40e_mem_pd = 5, /* Page Descriptor */
+ i40e_mem_bp = 6, /* Backing Page - 4KB */
+ i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */
+ i40e_mem_reserved
+};
+
+/* prototype for functions used for dynamic memory allocation */
+enum i40e_status_code i40e_allocate_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem,
+ enum i40e_memory_type type,
+ u64 size, u32 alignment);
+enum i40e_status_code i40e_free_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem);
+enum i40e_status_code i40e_allocate_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem,
+ u32 size);
+enum i40e_status_code i40e_free_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem);
+
+#endif /* _I40E_ALLOC_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_common.c b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_common.c
new file mode 100755
index 00000000..ffaa777e
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_common.c
@@ -0,0 +1,4793 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "i40e_type.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include "i40e_virtchnl.h"
+
+/**
+ * i40e_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+#ifdef VF_DRIVER
+enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
+#else
+STATIC enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
+#endif
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ DEBUGFUNC("i40e_set_mac_type\n");
+
+ if (hw->vendor_id == I40E_INTEL_VENDOR_ID) {
+ switch (hw->device_id) {
+ case I40E_DEV_ID_SFP_XL710:
+ case I40E_DEV_ID_QEMU:
+ case I40E_DEV_ID_KX_A:
+ case I40E_DEV_ID_KX_B:
+ case I40E_DEV_ID_KX_C:
+ case I40E_DEV_ID_QSFP_A:
+ case I40E_DEV_ID_QSFP_B:
+ case I40E_DEV_ID_QSFP_C:
+ case I40E_DEV_ID_10G_BASE_T:
+ hw->mac.type = I40E_MAC_XL710;
+ break;
+ case I40E_DEV_ID_VF:
+ case I40E_DEV_ID_VF_HV:
+ hw->mac.type = I40E_MAC_VF;
+ break;
+ default:
+ hw->mac.type = I40E_MAC_GENERIC;
+ break;
+ }
+ } else {
+ status = I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ DEBUGOUT2("i40e_set_mac_type found mac: %d, returns: %d\n",
+ hw->mac.type, status);
+ return status;
+}
+
+/**
+ * i40e_debug_aq
+ * @hw: debug mask related to admin queue
+ * @mask: debug mask
+ * @desc: pointer to admin queue descriptor
+ * @buffer: pointer to command buffer
+ * @buf_len: max length of buffer
+ *
+ * Dumps debug log about adminq command with descriptor contents.
+ **/
+void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
+ void *buffer, u16 buf_len)
+{
+ struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+ u16 len = LE16_TO_CPU(aq_desc->datalen);
+ u8 *aq_buffer = (u8 *)buffer;
+ u32 data[4];
+ u32 i = 0;
+
+ if ((!(mask & hw->debug_mask)) || (desc == NULL))
+ return;
+
+ i40e_debug(hw, mask,
+ "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+ aq_desc->opcode, aq_desc->flags, aq_desc->datalen,
+ aq_desc->retval);
+ i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ aq_desc->cookie_high, aq_desc->cookie_low);
+ i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
+ aq_desc->params.internal.param0,
+ aq_desc->params.internal.param1);
+ i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
+ aq_desc->params.external.addr_high,
+ aq_desc->params.external.addr_low);
+
+ if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+ i40e_memset(data, 0, sizeof(data), I40E_NONDMA_MEM);
+ i40e_debug(hw, mask, "AQ CMD Buffer:\n");
+ if (buf_len < len)
+ len = buf_len;
+ for (i = 0; i < len; i++) {
+ data[((i % 16) / 4)] |=
+ ((u32)aq_buffer[i]) << (8 * (i % 4));
+ if ((i % 16) == 15) {
+ i40e_debug(hw, mask,
+ "\t0x%04X %08X %08X %08X %08X\n",
+ i - 15, data[0], data[1], data[2],
+ data[3]);
+ i40e_memset(data, 0, sizeof(data),
+ I40E_NONDMA_MEM);
+ }
+ }
+ if ((i % 16) != 0)
+ i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n",
+ i - (i % 16), data[0], data[1], data[2],
+ data[3]);
+ }
+}
+
+/**
+ * i40e_check_asq_alive
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if Queue is enabled else false.
+ **/
+bool i40e_check_asq_alive(struct i40e_hw *hw)
+{
+ if (hw->aq.asq.len)
+ return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK);
+ else
+ return false;
+}
+
+/**
+ * i40e_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_queue_shutdown *cmd =
+ (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_queue_shutdown);
+
+ if (unloading)
+ cmd->driver_unloading = CPU_TO_LE32(I40E_AQ_DRIVER_UNLOADING);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
+ * hardware to a bit-field that can be used by SW to more easily determine the
+ * packet type.
+ *
+ * Macros are used to shorten the table lines and make this table human
+ * readable.
+ *
+ * We store the PTYPE in the top byte of the bit field - this is just so that
+ * we can check that the table doesn't have a row missing, as the index into
+ * the table should be the PTYPE.
+ *
+ * Typical work flow:
+ *
+ * IF NOT i40e_ptype_lookup[ptype].known
+ * THEN
+ * Packet is unknown
+ * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
+ * Use the rest of the fields to look at the tunnels, inner protocols, etc
+ * ELSE
+ * Use the enum i40e_rx_l2_ptype to decode the packet type
+ * ENDIF
+ */
+
+/* macro to make the table lines short */
+#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
+ { PTYPE, \
+ 1, \
+ I40E_RX_PTYPE_OUTER_##OUTER_IP, \
+ I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
+ I40E_RX_PTYPE_##OUTER_FRAG, \
+ I40E_RX_PTYPE_TUNNEL_##T, \
+ I40E_RX_PTYPE_TUNNEL_END_##TE, \
+ I40E_RX_PTYPE_##TEF, \
+ I40E_RX_PTYPE_INNER_PROT_##I, \
+ I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
+
+#define I40E_PTT_UNUSED_ENTRY(PTYPE) \
+ { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+
+/* shorter macros makes the table fit but are terse */
+#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
+#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
+#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
+
+/* Lookup table mapping the HW PTYPE to the bit field for decoding */
+struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
+ /* L2 Packet types */
+ I40E_PTT_UNUSED_ENTRY(0),
+ I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
+ I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT_UNUSED_ENTRY(4),
+ I40E_PTT_UNUSED_ENTRY(5),
+ I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT_UNUSED_ENTRY(8),
+ I40E_PTT_UNUSED_ENTRY(9),
+ I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+
+ /* Non Tunneled IPv4 */
+ I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(25),
+ I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
+ I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv4 */
+ I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(32),
+ I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv6 */
+ I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(39),
+ I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT */
+ I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> IPv4 */
+ I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(47),
+ I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> IPv6 */
+ I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(54),
+ I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC */
+ I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
+ I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(62),
+ I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
+ I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(69),
+ I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC/VLAN */
+ I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
+ I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(77),
+ I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
+ I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(84),
+ I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* Non Tunneled IPv6 */
+ I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ I40E_PTT_UNUSED_ENTRY(91),
+ I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
+ I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv4 */
+ I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(98),
+ I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv6 */
+ I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(105),
+ I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT */
+ I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> IPv4 */
+ I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(113),
+ I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> IPv6 */
+ I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(120),
+ I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC */
+ I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
+ I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(128),
+ I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
+ I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(135),
+ I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN */
+ I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
+ I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(143),
+ I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
+ I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(150),
+ I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* unused entries */
+ I40E_PTT_UNUSED_ENTRY(154),
+ I40E_PTT_UNUSED_ENTRY(155),
+ I40E_PTT_UNUSED_ENTRY(156),
+ I40E_PTT_UNUSED_ENTRY(157),
+ I40E_PTT_UNUSED_ENTRY(158),
+ I40E_PTT_UNUSED_ENTRY(159),
+
+ I40E_PTT_UNUSED_ENTRY(160),
+ I40E_PTT_UNUSED_ENTRY(161),
+ I40E_PTT_UNUSED_ENTRY(162),
+ I40E_PTT_UNUSED_ENTRY(163),
+ I40E_PTT_UNUSED_ENTRY(164),
+ I40E_PTT_UNUSED_ENTRY(165),
+ I40E_PTT_UNUSED_ENTRY(166),
+ I40E_PTT_UNUSED_ENTRY(167),
+ I40E_PTT_UNUSED_ENTRY(168),
+ I40E_PTT_UNUSED_ENTRY(169),
+
+ I40E_PTT_UNUSED_ENTRY(170),
+ I40E_PTT_UNUSED_ENTRY(171),
+ I40E_PTT_UNUSED_ENTRY(172),
+ I40E_PTT_UNUSED_ENTRY(173),
+ I40E_PTT_UNUSED_ENTRY(174),
+ I40E_PTT_UNUSED_ENTRY(175),
+ I40E_PTT_UNUSED_ENTRY(176),
+ I40E_PTT_UNUSED_ENTRY(177),
+ I40E_PTT_UNUSED_ENTRY(178),
+ I40E_PTT_UNUSED_ENTRY(179),
+
+ I40E_PTT_UNUSED_ENTRY(180),
+ I40E_PTT_UNUSED_ENTRY(181),
+ I40E_PTT_UNUSED_ENTRY(182),
+ I40E_PTT_UNUSED_ENTRY(183),
+ I40E_PTT_UNUSED_ENTRY(184),
+ I40E_PTT_UNUSED_ENTRY(185),
+ I40E_PTT_UNUSED_ENTRY(186),
+ I40E_PTT_UNUSED_ENTRY(187),
+ I40E_PTT_UNUSED_ENTRY(188),
+ I40E_PTT_UNUSED_ENTRY(189),
+
+ I40E_PTT_UNUSED_ENTRY(190),
+ I40E_PTT_UNUSED_ENTRY(191),
+ I40E_PTT_UNUSED_ENTRY(192),
+ I40E_PTT_UNUSED_ENTRY(193),
+ I40E_PTT_UNUSED_ENTRY(194),
+ I40E_PTT_UNUSED_ENTRY(195),
+ I40E_PTT_UNUSED_ENTRY(196),
+ I40E_PTT_UNUSED_ENTRY(197),
+ I40E_PTT_UNUSED_ENTRY(198),
+ I40E_PTT_UNUSED_ENTRY(199),
+
+ I40E_PTT_UNUSED_ENTRY(200),
+ I40E_PTT_UNUSED_ENTRY(201),
+ I40E_PTT_UNUSED_ENTRY(202),
+ I40E_PTT_UNUSED_ENTRY(203),
+ I40E_PTT_UNUSED_ENTRY(204),
+ I40E_PTT_UNUSED_ENTRY(205),
+ I40E_PTT_UNUSED_ENTRY(206),
+ I40E_PTT_UNUSED_ENTRY(207),
+ I40E_PTT_UNUSED_ENTRY(208),
+ I40E_PTT_UNUSED_ENTRY(209),
+
+ I40E_PTT_UNUSED_ENTRY(210),
+ I40E_PTT_UNUSED_ENTRY(211),
+ I40E_PTT_UNUSED_ENTRY(212),
+ I40E_PTT_UNUSED_ENTRY(213),
+ I40E_PTT_UNUSED_ENTRY(214),
+ I40E_PTT_UNUSED_ENTRY(215),
+ I40E_PTT_UNUSED_ENTRY(216),
+ I40E_PTT_UNUSED_ENTRY(217),
+ I40E_PTT_UNUSED_ENTRY(218),
+ I40E_PTT_UNUSED_ENTRY(219),
+
+ I40E_PTT_UNUSED_ENTRY(220),
+ I40E_PTT_UNUSED_ENTRY(221),
+ I40E_PTT_UNUSED_ENTRY(222),
+ I40E_PTT_UNUSED_ENTRY(223),
+ I40E_PTT_UNUSED_ENTRY(224),
+ I40E_PTT_UNUSED_ENTRY(225),
+ I40E_PTT_UNUSED_ENTRY(226),
+ I40E_PTT_UNUSED_ENTRY(227),
+ I40E_PTT_UNUSED_ENTRY(228),
+ I40E_PTT_UNUSED_ENTRY(229),
+
+ I40E_PTT_UNUSED_ENTRY(230),
+ I40E_PTT_UNUSED_ENTRY(231),
+ I40E_PTT_UNUSED_ENTRY(232),
+ I40E_PTT_UNUSED_ENTRY(233),
+ I40E_PTT_UNUSED_ENTRY(234),
+ I40E_PTT_UNUSED_ENTRY(235),
+ I40E_PTT_UNUSED_ENTRY(236),
+ I40E_PTT_UNUSED_ENTRY(237),
+ I40E_PTT_UNUSED_ENTRY(238),
+ I40E_PTT_UNUSED_ENTRY(239),
+
+ I40E_PTT_UNUSED_ENTRY(240),
+ I40E_PTT_UNUSED_ENTRY(241),
+ I40E_PTT_UNUSED_ENTRY(242),
+ I40E_PTT_UNUSED_ENTRY(243),
+ I40E_PTT_UNUSED_ENTRY(244),
+ I40E_PTT_UNUSED_ENTRY(245),
+ I40E_PTT_UNUSED_ENTRY(246),
+ I40E_PTT_UNUSED_ENTRY(247),
+ I40E_PTT_UNUSED_ENTRY(248),
+ I40E_PTT_UNUSED_ENTRY(249),
+
+ I40E_PTT_UNUSED_ENTRY(250),
+ I40E_PTT_UNUSED_ENTRY(251),
+ I40E_PTT_UNUSED_ENTRY(252),
+ I40E_PTT_UNUSED_ENTRY(253),
+ I40E_PTT_UNUSED_ENTRY(254),
+ I40E_PTT_UNUSED_ENTRY(255)
+};
+
+#ifndef VF_DRIVER
+
+/**
+ * i40e_init_shared_code - Initialize the shared code
+ * @hw: pointer to hardware structure
+ *
+ * This assigns the MAC type and PHY code and inits the NVM.
+ * Does not touch the hardware. This function must be called prior to any
+ * other function in the shared code. The i40e_hw structure should be
+ * memset to 0 prior to calling this function. The following fields in
+ * hw structure should be filled in prior to calling this function:
+ * hw_addr, back, device_id, vendor_id, subsystem_device_id,
+ * subsystem_vendor_id, and revision_id
+ **/
+enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ u32 reg;
+
+ DEBUGFUNC("i40e_init_shared_code");
+
+ i40e_set_mac_type(hw);
+
+ switch (hw->mac.type) {
+ case I40E_MAC_XL710:
+ break;
+ default:
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ hw->phy.get_link_info = true;
+
+ /* Determine port number */
+ reg = rd32(hw, I40E_PFGEN_PORTNUM);
+ reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >>
+ I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT);
+ hw->port = (u8)reg;
+
+ /* Determine the PF number based on the PCI fn */
+ reg = rd32(hw, I40E_GLPCI_CAPSUP);
+ if (reg & I40E_GLPCI_CAPSUP_ARI_EN_MASK)
+ hw->pf_id = (u8)((hw->bus.device << 3) | hw->bus.func);
+ else
+ hw->pf_id = (u8)hw->bus.func;
+
+ status = i40e_init_nvm(hw);
+ return status;
+}
+
+/**
+ * i40e_aq_mac_address_read - Retrieve the MAC addresses
+ * @hw: pointer to the hw struct
+ * @flags: a return indicator of what addresses were added to the addr store
+ * @addrs: the requestor's mac addr store
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+STATIC enum i40e_status_code i40e_aq_mac_address_read(struct i40e_hw *hw,
+ u16 *flags,
+ struct i40e_aqc_mac_address_read_data *addrs,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_mac_address_read *cmd_data =
+ (struct i40e_aqc_mac_address_read *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+
+ status = i40e_asq_send_command(hw, &desc, addrs,
+ sizeof(*addrs), cmd_details);
+ *flags = LE16_TO_CPU(cmd_data->command_flags);
+
+ return status;
+}
+
+/**
+ * i40e_aq_mac_address_write - Change the MAC addresses
+ * @hw: pointer to the hw struct
+ * @flags: indicates which MAC to be written
+ * @mac_addr: address to write
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_mac_address_write(struct i40e_hw *hw,
+ u16 flags, u8 *mac_addr,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_mac_address_write *cmd_data =
+ (struct i40e_aqc_mac_address_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_mac_address_write);
+ cmd_data->command_flags = CPU_TO_LE16(flags);
+ cmd_data->mac_sah = CPU_TO_LE16((u16)mac_addr[0] << 8 | mac_addr[1]);
+ cmd_data->mac_sal = CPU_TO_LE32(((u32)mac_addr[2] << 24) |
+ ((u32)mac_addr[3] << 16) |
+ ((u32)mac_addr[4] << 8) |
+ mac_addr[5]);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_get_mac_addr - get MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to MAC address
+ *
+ * Reads the adapter's MAC address from register
+ **/
+enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+{
+ struct i40e_aqc_mac_address_read_data addrs;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+
+ if (flags & I40E_AQC_LAN_ADDR_VALID)
+ memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac));
+
+ return status;
+}
+
+/**
+ * i40e_get_port_mac_addr - get Port MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to Port MAC address
+ *
+ * Reads the adapter's Port MAC address
+ **/
+enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+{
+ struct i40e_aqc_mac_address_read_data addrs;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+ if (status)
+ return status;
+
+ if (flags & I40E_AQC_PORT_ADDR_VALID)
+ memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac));
+ else
+ status = I40E_ERR_INVALID_MAC_ADDR;
+
+ return status;
+}
+
+/**
+ * i40e_pre_tx_queue_cfg - pre tx queue configure
+ * @hw: pointer to the HW structure
+ * @queue: target pf queue index
+ * @enable: state change request
+ *
+ * Handles hw requirement to indicate intention to enable
+ * or disable target queue.
+ **/
+void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
+{
+ u32 abs_queue_idx = hw->func_caps.base_queue + queue;
+ u32 reg_block = 0;
+ u32 reg_val;
+
+ if (abs_queue_idx >= 128) {
+ reg_block = abs_queue_idx / 128;
+ abs_queue_idx %= 128;
+ }
+
+ reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
+ reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
+ reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
+
+ if (enable)
+ reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK;
+ else
+ reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
+
+ wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
+}
+
+/**
+ * i40e_validate_mac_addr - Validate unicast MAC address
+ * @mac_addr: pointer to MAC address
+ *
+ * Tests a MAC address to ensure it is a valid Individual Address
+ **/
+enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ DEBUGFUNC("i40e_validate_mac_addr");
+
+ /* Broadcast addresses ARE multicast addresses
+ * Make sure it is not a multicast address
+ * Reject the zero address
+ */
+ if (I40E_IS_MULTICAST(mac_addr) ||
+ (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
+ mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0))
+ status = I40E_ERR_INVALID_MAC_ADDR;
+
+ return status;
+}
+
+/**
+ * i40e_get_media_type - Gets media type
+ * @hw: pointer to the hardware structure
+ **/
+STATIC enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
+{
+ enum i40e_media_type media;
+
+ switch (hw->phy.link_info.phy_type) {
+ case I40E_PHY_TYPE_10GBASE_SR:
+ case I40E_PHY_TYPE_10GBASE_LR:
+ case I40E_PHY_TYPE_1000BASE_SX:
+ case I40E_PHY_TYPE_1000BASE_LX:
+ case I40E_PHY_TYPE_40GBASE_SR4:
+ case I40E_PHY_TYPE_40GBASE_LR4:
+ media = I40E_MEDIA_TYPE_FIBER;
+ break;
+ case I40E_PHY_TYPE_100BASE_TX:
+ case I40E_PHY_TYPE_1000BASE_T:
+ case I40E_PHY_TYPE_10GBASE_T:
+ media = I40E_MEDIA_TYPE_BASET;
+ break;
+ case I40E_PHY_TYPE_10GBASE_CR1_CU:
+ case I40E_PHY_TYPE_40GBASE_CR4_CU:
+ case I40E_PHY_TYPE_10GBASE_CR1:
+ case I40E_PHY_TYPE_40GBASE_CR4:
+ case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+ media = I40E_MEDIA_TYPE_DA;
+ break;
+ case I40E_PHY_TYPE_1000BASE_KX:
+ case I40E_PHY_TYPE_10GBASE_KX4:
+ case I40E_PHY_TYPE_10GBASE_KR:
+ case I40E_PHY_TYPE_40GBASE_KR4:
+ media = I40E_MEDIA_TYPE_BACKPLANE;
+ break;
+ case I40E_PHY_TYPE_SGMII:
+ case I40E_PHY_TYPE_XAUI:
+ case I40E_PHY_TYPE_XFI:
+ case I40E_PHY_TYPE_XLAUI:
+ case I40E_PHY_TYPE_XLPPI:
+ default:
+ media = I40E_MEDIA_TYPE_UNKNOWN;
+ break;
+ }
+
+ return media;
+}
+
+#define I40E_PF_RESET_WAIT_COUNT 100
+/**
+ * i40e_pf_reset - Reset the PF
+ * @hw: pointer to the hardware structure
+ *
+ * Assuming someone else has triggered a global reset,
+ * assure the global reset is complete and then reset the PF
+ **/
+enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
+{
+ u32 cnt = 0;
+ u32 cnt1 = 0;
+ u32 reg = 0;
+ u32 grst_del;
+
+ /* Poll for Global Reset steady state in case of recent GRST.
+ * The grst delay value is in 100ms units, and we'll wait a
+ * couple counts longer to be sure we don't just miss the end.
+ */
+ grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK
+ >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
+ for (cnt = 0; cnt < grst_del + 2; cnt++) {
+ reg = rd32(hw, I40E_GLGEN_RSTAT);
+ if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
+ break;
+ i40e_msec_delay(100);
+ }
+ if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
+ DEBUGOUT("Global reset polling failed to complete.\n");
+ return I40E_ERR_RESET_FAILED;
+ }
+
+ /* Now Wait for the FW to be ready */
+ for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
+ reg = rd32(hw, I40E_GLNVM_ULD);
+ reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
+ if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) {
+ DEBUGOUT1("Core and Global modules ready %d\n", cnt1);
+ break;
+ }
+ i40e_msec_delay(10);
+ }
+ if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
+ DEBUGOUT("wait for FW Reset complete timedout\n");
+ DEBUGOUT1("I40E_GLNVM_ULD = 0x%x\n", reg);
+ return I40E_ERR_RESET_FAILED;
+ }
+
+ /* If there was a Global Reset in progress when we got here,
+ * we don't need to do the PF Reset
+ */
+ if (!cnt) {
+ reg = rd32(hw, I40E_PFGEN_CTRL);
+ wr32(hw, I40E_PFGEN_CTRL,
+ (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
+ for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
+ reg = rd32(hw, I40E_PFGEN_CTRL);
+ if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
+ break;
+ i40e_msec_delay(1);
+ }
+ if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
+ DEBUGOUT("PF reset polling failed to complete.\n");
+ return I40E_ERR_RESET_FAILED;
+ }
+ }
+
+ i40e_clear_pxe_mode(hw);
+
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_clear_hw - clear out any left over hw state
+ * @hw: pointer to the hw struct
+ *
+ * Clear queues and interrupts, typically called at init time,
+ * but after the capabilities have been found so we know how many
+ * queues and msix vectors have been allocated.
+ **/
+void i40e_clear_hw(struct i40e_hw *hw)
+{
+ u32 num_queues, base_queue;
+ u32 num_pf_int;
+ u32 num_vf_int;
+ u32 num_vfs;
+ u32 i, j;
+ u32 val;
+ u32 eol = 0x7ff;
+
+ /* get number of interrupts, queues, and vfs */
+ val = rd32(hw, I40E_GLPCI_CNF2);
+ num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
+ I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
+ num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
+ I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
+
+ val = rd32(hw, I40E_PFLAN_QALLOC);
+ base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
+ I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
+ j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
+ I40E_PFLAN_QALLOC_LASTQ_SHIFT;
+ if (val & I40E_PFLAN_QALLOC_VALID_MASK)
+ num_queues = (j - base_queue) + 1;
+ else
+ num_queues = 0;
+
+ val = rd32(hw, I40E_PF_VT_PFALLOC);
+ i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
+ I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
+ j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
+ I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
+ if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
+ num_vfs = (j - i) + 1;
+ else
+ num_vfs = 0;
+
+ /* stop all the interrupts */
+ wr32(hw, I40E_PFINT_ICR0_ENA, 0);
+ val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
+ for (i = 0; i < num_pf_int - 2; i++)
+ wr32(hw, I40E_PFINT_DYN_CTLN(i), val);
+
+ /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
+ val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
+ wr32(hw, I40E_PFINT_LNKLST0, val);
+ for (i = 0; i < num_pf_int - 2; i++)
+ wr32(hw, I40E_PFINT_LNKLSTN(i), val);
+ val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
+ for (i = 0; i < num_vfs; i++)
+ wr32(hw, I40E_VPINT_LNKLST0(i), val);
+ for (i = 0; i < num_vf_int - 2; i++)
+ wr32(hw, I40E_VPINT_LNKLSTN(i), val);
+
+ /* warn the HW of the coming Tx disables */
+ for (i = 0; i < num_queues; i++) {
+ u32 abs_queue_idx = base_queue + i;
+ u32 reg_block = 0;
+
+ if (abs_queue_idx >= 128) {
+ reg_block = abs_queue_idx / 128;
+ abs_queue_idx %= 128;
+ }
+
+ val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
+ val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
+ val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
+ val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
+
+ wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
+ }
+ i40e_usec_delay(400);
+
+ /* stop all the queues */
+ for (i = 0; i < num_queues; i++) {
+ wr32(hw, I40E_QINT_TQCTL(i), 0);
+ wr32(hw, I40E_QTX_ENA(i), 0);
+ wr32(hw, I40E_QINT_RQCTL(i), 0);
+ wr32(hw, I40E_QRX_ENA(i), 0);
+ }
+
+ /* short wait for all queue disables to settle */
+ i40e_usec_delay(50);
+}
+
+/**
+ * i40e_clear_pxe_mode - clear pxe operations mode
+ * @hw: pointer to the hw struct
+ *
+ * Make sure all PXE mode settings are cleared, including things
+ * like descriptor fetch/write-back mode.
+ **/
+void i40e_clear_pxe_mode(struct i40e_hw *hw)
+{
+ if (i40e_check_asq_alive(hw))
+ i40e_aq_clear_pxe_mode(hw, NULL);
+}
+
+/**
+ * i40e_led_is_mine - helper to find matching led
+ * @hw: pointer to the hw struct
+ * @idx: index into GPIO registers
+ *
+ * returns: 0 if no match, otherwise the value of the GPIO_CTL register
+ */
+static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
+{
+ u32 gpio_val = 0;
+ u32 port;
+
+ if (!hw->func_caps.led[idx])
+ return 0;
+
+ gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
+ port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
+ I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+
+ /* if PRT_NUM_NA is 1 then this LED is not port specific, OR
+ * if it is not our port then ignore
+ */
+ if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) ||
+ (port != hw->port))
+ return 0;
+
+ return gpio_val;
+}
+
+#define I40E_LED0 22
+#define I40E_LINK_ACTIVITY 0xC
+
+/**
+ * i40e_led_get - return current on/off mode
+ * @hw: pointer to the hw struct
+ *
+ * The value returned is the 'mode' field as defined in the
+ * GPIO register definitions: 0x0 = off, 0xf = on, and other
+ * values are variations of possible behaviors relating to
+ * blink, link, and wire.
+ **/
+u32 i40e_led_get(struct i40e_hw *hw)
+{
+ u32 mode = 0;
+ int i;
+
+ /* as per the documentation GPIO 22-29 are the LED
+ * GPIO pins named LED0..LED7
+ */
+ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
+ u32 gpio_val = i40e_led_is_mine(hw, i);
+
+ if (!gpio_val)
+ continue;
+
+ mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
+ I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
+ break;
+ }
+
+ return mode;
+}
+
+/**
+ * i40e_led_set - set new on/off mode
+ * @hw: pointer to the hw struct
+ * @mode: 0=off, 0xf=on (else see manual for mode details)
+ * @blink: true if the LED should blink when on, false if steady
+ *
+ * if this function is used to turn on the blink it should
+ * be used to disable the blink when restoring the original state.
+ **/
+void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
+{
+ int i;
+
+ if (mode & 0xfffffff0)
+ DEBUGOUT1("invalid mode passed in %X\n", mode);
+
+ /* as per the documentation GPIO 22-29 are the LED
+ * GPIO pins named LED0..LED7
+ */
+ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
+ u32 gpio_val = i40e_led_is_mine(hw, i);
+
+ if (!gpio_val)
+ continue;
+
+ gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
+ /* this & is a bit of paranoia, but serves as a range check */
+ gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
+ I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
+
+ if (mode == I40E_LINK_ACTIVITY)
+ blink = false;
+
+ gpio_val |= (blink ? 1 : 0) <<
+ I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT;
+
+ wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
+ break;
+ }
+}
+
+/* Admin command wrappers */
+
+/**
+ * i40e_aq_get_phy_capabilities
+ * @hw: pointer to the hw struct
+ * @abilities: structure for PHY capabilities to be filled
+ * @qualified_modules: report Qualified Modules
+ * @report_init: report init capabilities (active are default)
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Returns the various PHY abilities supported on the Port.
+ **/
+enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+ bool qualified_modules, bool report_init,
+ struct i40e_aq_get_phy_abilities_resp *abilities,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+ u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
+
+ if (!abilities)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_phy_abilities);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (abilities_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ if (qualified_modules)
+ desc.params.external.param0 |=
+ CPU_TO_LE32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
+
+ if (report_init)
+ desc.params.external.param0 |=
+ CPU_TO_LE32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
+
+ status = i40e_asq_send_command(hw, &desc, abilities, abilities_size,
+ cmd_details);
+
+ if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
+ status = I40E_ERR_UNKNOWN_PHY;
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_phy_config
+ * @hw: pointer to the hw struct
+ * @config: structure with PHY configuration to be set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set the various PHY configuration parameters
+ * supported on the Port.One or more of the Set PHY config parameters may be
+ * ignored in an MFP mode as the PF may not have the privilege to set some
+ * of the PHY Config parameters. This status will be indicated by the
+ * command response.
+ **/
+enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
+ struct i40e_aq_set_phy_config *config,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aq_set_phy_config *cmd =
+ (struct i40e_aq_set_phy_config *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (!config)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_phy_config);
+
+ *cmd = *config;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_set_fc
+ * @hw: pointer to the hw struct
+ *
+ * Set the requested flow control mode using set_phy_config.
+ **/
+enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+ bool atomic_restart)
+{
+ enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ struct i40e_aq_set_phy_config config;
+ enum i40e_status_code status;
+ u8 pause_mask = 0x0;
+
+ *aq_failures = 0x0;
+
+ switch (fc_mode) {
+ case I40E_FC_FULL:
+ pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
+ pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
+ break;
+ case I40E_FC_RX_PAUSE:
+ pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
+ break;
+ case I40E_FC_TX_PAUSE:
+ pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
+ break;
+ default:
+ break;
+ }
+
+ /* Get the current phy config */
+ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+ NULL);
+ if (status) {
+ *aq_failures |= I40E_SET_FC_AQ_FAIL_GET;
+ return status;
+ }
+
+ memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
+ /* clear the old pause settings */
+ config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
+ ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
+ /* set the new abilities */
+ config.abilities |= pause_mask;
+ /* If the abilities have changed, then set the new config */
+ if (config.abilities != abilities.abilities) {
+ /* Auto restart link so settings take effect */
+ if (atomic_restart)
+ config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ /* Copy over all the old settings */
+ config.phy_type = abilities.phy_type;
+ config.link_speed = abilities.link_speed;
+ config.eee_capability = abilities.eee_capability;
+ config.eeer = abilities.eeer_val;
+ config.low_power_ctrl = abilities.d3_lpan;
+ status = i40e_aq_set_phy_config(hw, &config, NULL);
+
+ if (status)
+ *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
+ }
+ /* Update the link info */
+ status = i40e_update_link_info(hw, true);
+ if (status) {
+ /* Wait a little bit (on 40G cards it sometimes takes a really
+ * long time for link to come back from the atomic reset)
+ * and try once more
+ */
+ i40e_msec_delay(1000);
+ status = i40e_update_link_info(hw, true);
+ }
+ if (status)
+ *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_mac_config
+ * @hw: pointer to the hw struct
+ * @max_frame_size: Maximum Frame Size to be supported by the port
+ * @crc_en: Tell HW to append a CRC to outgoing frames
+ * @pacing: Pacing configurations
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Configure MAC settings for frame size, jumbo frame support and the
+ * addition of a CRC by the hardware.
+ **/
+enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw,
+ u16 max_frame_size,
+ bool crc_en, u16 pacing,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aq_set_mac_config *cmd =
+ (struct i40e_aq_set_mac_config *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (max_frame_size == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_mac_config);
+
+ cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
+ cmd->params = ((u8)pacing & 0x0F) << 3;
+ if (crc_en)
+ cmd->params |= I40E_AQ_SET_MAC_CONFIG_CRC_EN;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_clear_pxe_mode
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Tell the firmware that the driver is taking over from PXE
+ **/
+enum i40e_status_code i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_clear_pxe *cmd =
+ (struct i40e_aqc_clear_pxe *)&desc.params.raw;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_clear_pxe_mode);
+
+ cmd->rx_cnt = 0x2;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_link_restart_an
+ * @hw: pointer to the hw struct
+ * @enable_link: if true: enable link, if false: disable link
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Sets up the link and restarts the Auto-Negotiation over the link.
+ **/
+enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+ bool enable_link, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_link_restart_an *cmd =
+ (struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_link_restart_an);
+
+ cmd->command = I40E_AQ_PHY_RESTART_AN;
+ if (enable_link)
+ cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
+ else
+ cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_link_info
+ * @hw: pointer to the hw struct
+ * @enable_lse: enable/disable LinkStatusEvent reporting
+ * @link: pointer to link status structure - optional
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Returns the link status of the adapter.
+ **/
+enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
+ bool enable_lse, struct i40e_link_status *link,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_link_status *resp =
+ (struct i40e_aqc_get_link_status *)&desc.params.raw;
+ struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ enum i40e_status_code status;
+ bool tx_pause, rx_pause;
+ u16 command_flags;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
+
+ if (enable_lse)
+ command_flags = I40E_AQ_LSE_ENABLE;
+ else
+ command_flags = I40E_AQ_LSE_DISABLE;
+ resp->command_flags = CPU_TO_LE16(command_flags);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status != I40E_SUCCESS)
+ goto aq_get_link_info_exit;
+
+ /* save off old link status information */
+ i40e_memcpy(&hw->phy.link_info_old, hw_link_info,
+ sizeof(struct i40e_link_status), I40E_NONDMA_TO_NONDMA);
+
+ /* update link status */
+ hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
+ hw->phy.media_type = i40e_get_media_type(hw);
+ hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
+ hw_link_info->link_info = resp->link_info;
+ hw_link_info->an_info = resp->an_info;
+ hw_link_info->ext_info = resp->ext_info;
+ hw_link_info->loopback = resp->loopback;
+ hw_link_info->max_frame_size = LE16_TO_CPU(resp->max_frame_size);
+ hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
+
+ /* update fc info */
+ tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX);
+ rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX);
+ if (tx_pause & rx_pause)
+ hw->fc.current_mode = I40E_FC_FULL;
+ else if (tx_pause)
+ hw->fc.current_mode = I40E_FC_TX_PAUSE;
+ else if (rx_pause)
+ hw->fc.current_mode = I40E_FC_RX_PAUSE;
+ else
+ hw->fc.current_mode = I40E_FC_NONE;
+
+ if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
+ hw_link_info->crc_enable = true;
+ else
+ hw_link_info->crc_enable = false;
+
+ if (resp->command_flags & CPU_TO_LE16(I40E_AQ_LSE_ENABLE))
+ hw_link_info->lse_enable = true;
+ else
+ hw_link_info->lse_enable = false;
+
+ /* save link status information */
+ if (link)
+ i40e_memcpy(link, hw_link_info, sizeof(struct i40e_link_status),
+ I40E_NONDMA_TO_NONDMA);
+
+ /* flag cleared so helper functions don't call AQ again */
+ hw->phy.get_link_info = false;
+
+aq_get_link_info_exit:
+ return status;
+}
+
+/**
+ * i40e_update_link_info
+ * @hw: pointer to the hw struct
+ * @enable_lse: enable/disable LinkStatusEvent reporting
+ *
+ * Returns the link status of the adapter
+ **/
+enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw,
+ bool enable_lse)
+{
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ enum i40e_status_code status;
+
+ status = i40e_aq_get_link_info(hw, enable_lse, NULL, NULL);
+ if (status)
+ return status;
+
+ status = i40e_aq_get_phy_capabilities(hw, false, false,
+ &abilities, NULL);
+ if (status)
+ return status;
+
+ if (abilities.abilities & I40E_AQ_PHY_AN_ENABLED)
+ hw->phy.link_info.an_enabled = true;
+ else
+ hw->phy.link_info.an_enabled = false;
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_phy_int_mask
+ * @hw: pointer to the hw struct
+ * @mask: interrupt mask to be set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set link interrupt mask.
+ **/
+enum i40e_status_code i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
+ u16 mask,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_phy_int_mask *cmd =
+ (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_phy_int_mask);
+
+ cmd->event_mask = CPU_TO_LE16(mask);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_local_advt_reg
+ * @hw: pointer to the hw struct
+ * @advt_reg: local AN advertisement register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the Local AN advertisement register value.
+ **/
+enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw,
+ u64 *advt_reg,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_an_advt_reg *resp =
+ (struct i40e_aqc_an_advt_reg *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_local_advt_reg);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status != I40E_SUCCESS)
+ goto aq_get_local_advt_reg_exit;
+
+ *advt_reg = (u64)(LE16_TO_CPU(resp->local_an_reg1)) << 32;
+ *advt_reg |= LE32_TO_CPU(resp->local_an_reg0);
+
+aq_get_local_advt_reg_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_set_local_advt_reg
+ * @hw: pointer to the hw struct
+ * @advt_reg: local AN advertisement register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the Local AN advertisement register value.
+ **/
+enum i40e_status_code i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
+ u64 advt_reg,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_an_advt_reg *cmd =
+ (struct i40e_aqc_an_advt_reg *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_local_advt_reg);
+
+ cmd->local_an_reg0 = CPU_TO_LE32(I40E_LO_DWORD(advt_reg));
+ cmd->local_an_reg1 = CPU_TO_LE16(I40E_HI_DWORD(advt_reg));
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_partner_advt
+ * @hw: pointer to the hw struct
+ * @advt_reg: AN partner advertisement register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the link partner AN advertisement register value.
+ **/
+enum i40e_status_code i40e_aq_get_partner_advt(struct i40e_hw *hw,
+ u64 *advt_reg,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_an_advt_reg *resp =
+ (struct i40e_aqc_an_advt_reg *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_partner_advt);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status != I40E_SUCCESS)
+ goto aq_get_partner_advt_exit;
+
+ *advt_reg = (u64)(LE16_TO_CPU(resp->local_an_reg1)) << 32;
+ *advt_reg |= LE32_TO_CPU(resp->local_an_reg0);
+
+aq_get_partner_advt_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_set_lb_modes
+ * @hw: pointer to the hw struct
+ * @lb_modes: loopback mode to be set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Sets loopback modes.
+ **/
+enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw,
+ u16 lb_modes,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_lb_mode *cmd =
+ (struct i40e_aqc_set_lb_mode *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_lb_modes);
+
+ cmd->lb_mode = CPU_TO_LE16(lb_modes);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_phy_debug
+ * @hw: pointer to the hw struct
+ * @cmd_flags: debug command flags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Reset the external PHY.
+ **/
+enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_phy_debug *cmd =
+ (struct i40e_aqc_set_phy_debug *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_phy_debug);
+
+ cmd->command_flags = cmd_flags;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_vsi
+ * @hw: pointer to the hw struct
+ * @vsi_ctx: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add a VSI context to the hardware.
+**/
+enum i40e_status_code i40e_aq_add_vsi(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp =
+ (struct i40e_aqc_add_get_update_vsi_completion *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_vsi);
+
+ cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->uplink_seid);
+ cmd->connection_type = vsi_ctx->connection_type;
+ cmd->vf_id = vsi_ctx->vf_num;
+ cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+
+ status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info), cmd_details);
+
+ if (status != I40E_SUCCESS)
+ goto aq_add_vsi_exit;
+
+ vsi_ctx->seid = LE16_TO_CPU(resp->seid);
+ vsi_ctx->vsi_number = LE16_TO_CPU(resp->vsi_number);
+ vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
+
+aq_add_vsi_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_set_default_vsi
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ cmd->promiscuous_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT);
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT);
+ cmd->seid = CPU_TO_LE16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_unicast_promiscuous
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set: set unicast promiscuous enable/disable
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set)
+ flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+
+ cmd->seid = CPU_TO_LE16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_multicast_promiscuous
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set: set multicast promiscuous enable/disable
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set)
+ flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+
+ cmd->seid = CPU_TO_LE16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_broadcast
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set_filter: true to set filter, false to clear filter
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
+ **/
+enum i40e_status_code i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+ u16 seid, bool set_filter,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set_filter)
+ cmd->promiscuous_flags
+ |= CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+ else
+ cmd->promiscuous_flags
+ &= CPU_TO_LE16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+ cmd->seid = CPU_TO_LE16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_get_vsi_params - get VSI configuration info
+ * @hw: pointer to the hw struct
+ * @vsi_ctx: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_get_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp =
+ (struct i40e_aqc_add_get_update_vsi_completion *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ UNREFERENCED_1PARAMETER(cmd_details);
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_vsi_parameters);
+
+ cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+
+ status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info), NULL);
+
+ if (status != I40E_SUCCESS)
+ goto aq_get_vsi_params_exit;
+
+ vsi_ctx->seid = LE16_TO_CPU(resp->seid);
+ vsi_ctx->vsi_number = LE16_TO_CPU(resp->vsi_number);
+ vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
+
+aq_get_vsi_params_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_update_vsi_params
+ * @hw: pointer to the hw struct
+ * @vsi_ctx: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Update a VSI context.
+ **/
+enum i40e_status_code i40e_aq_update_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_update_vsi_parameters);
+ cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid);
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+
+ status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info), cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_switch_config
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the result buffer
+ * @buf_size: length of input buffer
+ * @start_seid: seid to start for the report, 0 == beginning
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Fill the buf with switch configuration returned from AdminQ command
+ **/
+enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw,
+ struct i40e_aqc_get_switch_config_resp *buf,
+ u16 buf_size, u16 *start_seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_switch_seid *scfg =
+ (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_switch_config);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ scfg->seid = CPU_TO_LE16(*start_seid);
+
+ status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
+ *start_seid = LE16_TO_CPU(scfg->seid);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_firmware_version
+ * @hw: pointer to the hw struct
+ * @fw_major_version: firmware major version
+ * @fw_minor_version: firmware minor version
+ * @api_major_version: major queue version
+ * @api_minor_version: minor queue version
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the firmware version from the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw,
+ u16 *fw_major_version, u16 *fw_minor_version,
+ u16 *api_major_version, u16 *api_minor_version,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_version *resp =
+ (struct i40e_aqc_get_version *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status == I40E_SUCCESS) {
+ if (fw_major_version != NULL)
+ *fw_major_version = LE16_TO_CPU(resp->fw_major);
+ if (fw_minor_version != NULL)
+ *fw_minor_version = LE16_TO_CPU(resp->fw_minor);
+ if (api_major_version != NULL)
+ *api_major_version = LE16_TO_CPU(resp->api_major);
+ if (api_minor_version != NULL)
+ *api_minor_version = LE16_TO_CPU(resp->api_minor);
+
+ /* A workaround to fix the API version in SW */
+ if (api_major_version && api_minor_version &&
+ fw_major_version && fw_minor_version &&
+ ((*api_major_version == 1) && (*api_minor_version == 1)) &&
+ (((*fw_major_version == 4) && (*fw_minor_version >= 2)) ||
+ (*fw_major_version > 4)))
+ *api_minor_version = 2;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_send_driver_version
+ * @hw: pointer to the hw struct
+ * @dv: driver's major, minor version
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Send the driver version to the firmware
+ **/
+enum i40e_status_code i40e_aq_send_driver_version(struct i40e_hw *hw,
+ struct i40e_driver_version *dv,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_driver_version *cmd =
+ (struct i40e_aqc_driver_version *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 len;
+
+ if (dv == NULL)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
+
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_SI);
+ cmd->driver_major_ver = dv->major_version;
+ cmd->driver_minor_ver = dv->minor_version;
+ cmd->driver_build_ver = dv->build_version;
+ cmd->driver_subbuild_ver = dv->subbuild_version;
+
+ len = 0;
+ while (len < sizeof(dv->driver_string) &&
+ (dv->driver_string[len] < 0x80) &&
+ dv->driver_string[len])
+ len++;
+ status = i40e_asq_send_command(hw, &desc, dv->driver_string,
+ len, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_get_link_status - get status of the HW network link
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if link is up, false if link is down.
+ *
+ * Side effect: LinkStatusEvent reporting becomes enabled
+ **/
+bool i40e_get_link_status(struct i40e_hw *hw)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ bool link_status = false;
+
+ if (hw->phy.get_link_info) {
+ status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+
+ if (status != I40E_SUCCESS)
+ goto i40e_get_link_status_exit;
+ }
+
+ link_status = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
+
+i40e_get_link_status_exit:
+ return link_status;
+}
+
+/**
+ * i40e_get_link_speed
+ * @hw: pointer to the hw struct
+ *
+ * Returns the link speed of the adapter.
+ **/
+enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw)
+{
+ enum i40e_aq_link_speed speed = I40E_LINK_SPEED_UNKNOWN;
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ if (hw->phy.get_link_info) {
+ status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+
+ if (status != I40E_SUCCESS)
+ goto i40e_link_speed_exit;
+ }
+
+ speed = hw->phy.link_info.link_speed;
+
+i40e_link_speed_exit:
+ return speed;
+}
+
+/**
+ * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
+ * @hw: pointer to the hw struct
+ * @uplink_seid: the MAC or other gizmo SEID
+ * @downlink_seid: the VSI SEID
+ * @enabled_tc: bitmap of TCs to be enabled
+ * @default_port: true for default port VSI, false for control port
+ * @enable_l2_filtering: true to add L2 filter table rules to regular forwarding rules for cloud support
+ * @veb_seid: pointer to where to put the resulting VEB SEID
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This asks the FW to add a VEB between the uplink and downlink
+ * elements. If the uplink SEID is 0, this will be a floating VEB.
+ **/
+enum i40e_status_code i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+ u16 downlink_seid, u8 enabled_tc,
+ bool default_port, bool enable_l2_filtering,
+ u16 *veb_seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_veb *cmd =
+ (struct i40e_aqc_add_veb *)&desc.params.raw;
+ struct i40e_aqc_add_veb_completion *resp =
+ (struct i40e_aqc_add_veb_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 veb_flags = 0;
+
+ /* SEIDs need to either both be set or both be 0 for floating VEB */
+ if (!!uplink_seid != !!downlink_seid)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
+
+ cmd->uplink_seid = CPU_TO_LE16(uplink_seid);
+ cmd->downlink_seid = CPU_TO_LE16(downlink_seid);
+ cmd->enable_tcs = enabled_tc;
+ if (!uplink_seid)
+ veb_flags |= I40E_AQC_ADD_VEB_FLOATING;
+ if (default_port)
+ veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
+ else
+ veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
+
+ if (enable_l2_filtering)
+ veb_flags |= I40E_AQC_ADD_VEB_ENABLE_L2_FILTER;
+
+ cmd->veb_flags = CPU_TO_LE16(veb_flags);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status && veb_seid)
+ *veb_seid = LE16_TO_CPU(resp->veb_seid);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_veb_parameters - Retrieve VEB parameters
+ * @hw: pointer to the hw struct
+ * @veb_seid: the SEID of the VEB to query
+ * @switch_id: the uplink switch id
+ * @floating: set to true if the VEB is floating
+ * @statistic_index: index of the stats counter block for this VEB
+ * @vebs_used: number of VEB's used by function
+ * @vebs_free: total VEB's not reserved by any function
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This retrieves the parameters for a particular VEB, specified by
+ * uplink_seid, and returns them to the caller.
+ **/
+enum i40e_status_code i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+ u16 veb_seid, u16 *switch_id,
+ bool *floating, u16 *statistic_index,
+ u16 *vebs_used, u16 *vebs_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
+ (struct i40e_aqc_get_veb_parameters_completion *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ if (veb_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_veb_parameters);
+ cmd_resp->seid = CPU_TO_LE16(veb_seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ if (status)
+ goto get_veb_exit;
+
+ if (switch_id)
+ *switch_id = LE16_TO_CPU(cmd_resp->switch_id);
+ if (statistic_index)
+ *statistic_index = LE16_TO_CPU(cmd_resp->statistic_index);
+ if (vebs_used)
+ *vebs_used = LE16_TO_CPU(cmd_resp->vebs_used);
+ if (vebs_free)
+ *vebs_free = LE16_TO_CPU(cmd_resp->vebs_free);
+ if (floating) {
+ u16 flags = LE16_TO_CPU(cmd_resp->veb_flags);
+ if (flags & I40E_AQC_ADD_VEB_FLOATING)
+ *floating = true;
+ else
+ *floating = false;
+ }
+
+get_veb_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_add_macvlan
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add MAC/VLAN addresses to the HW filtering
+ **/
+enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buf_size;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(struct i40e_aqc_add_macvlan_element_data);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
+ cmd->num_addresses = CPU_TO_LE16(count);
+ cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_macvlan
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be removed
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Remove MAC/VLAN addresses from the HW filtering
+ **/
+enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buf_size;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(struct i40e_aqc_remove_macvlan_element_data);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
+ cmd->num_addresses = CPU_TO_LE16(count);
+ cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_vlan - Add VLAN ids to the HW filtering
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the vlan filters
+ * @v_list: list of vlan filters to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_add_vlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buf_size;
+
+ if (count == 0 || !v_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vlan);
+ cmd->num_addresses = CPU_TO_LE16(count);
+ cmd->seid[0] = CPU_TO_LE16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_vlan - Remove VLANs from the HW filtering
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the vlan filters
+ * @v_list: list of macvlans to be removed
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_remove_vlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buf_size;
+
+ if (count == 0 || !v_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_vlan);
+ cmd->num_addresses = CPU_TO_LE16(count);
+ cmd->seid[0] = CPU_TO_LE16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_send_msg_to_vf
+ * @hw: pointer to the hardware structure
+ * @vfid: vf id to send msg
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * send msg to vf
+ **/
+enum i40e_status_code i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+ u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_pf_vf_message *cmd =
+ (struct i40e_aqc_pf_vf_message *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
+ cmd->id = CPU_TO_LE32(vfid);
+ desc.cookie_high = CPU_TO_LE32(v_opcode);
+ desc.cookie_low = CPU_TO_LE32(v_retval);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_SI);
+ if (msglen) {
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF |
+ I40E_AQ_FLAG_RD));
+ if (msglen > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(msglen);
+ }
+ status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_debug_write_register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Write to a register using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_debug_reg_read_write *cmd =
+ (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
+
+ cmd->address = CPU_TO_LE32(reg_addr);
+ cmd->value_high = CPU_TO_LE32((u32)(reg_val >> 32));
+ cmd->value_low = CPU_TO_LE32((u32)(reg_val & 0xFFFFFFFF));
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_hmc_resource_profile
+ * @hw: pointer to the hw struct
+ * @profile: type of profile the HMC is to be set as
+ * @pe_vf_enabled_count: the number of PE enabled VFs the system has
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * query the HMC profile of the device.
+ **/
+enum i40e_status_code i40e_aq_get_hmc_resource_profile(struct i40e_hw *hw,
+ enum i40e_aq_hmc_profile *profile,
+ u8 *pe_vf_enabled_count,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aq_get_set_hmc_resource_profile *resp =
+ (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_query_hmc_resource_profile);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ *profile = (enum i40e_aq_hmc_profile)(resp->pm_profile &
+ I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK);
+ *pe_vf_enabled_count = resp->pe_vf_enabled &
+ I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK;
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_hmc_resource_profile
+ * @hw: pointer to the hw struct
+ * @profile: type of profile the HMC is to be set as
+ * @pe_vf_enabled_count: the number of PE enabled VFs the system has
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * set the HMC profile of the device.
+ **/
+enum i40e_status_code i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
+ enum i40e_aq_hmc_profile profile,
+ u8 pe_vf_enabled_count,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aq_get_set_hmc_resource_profile *cmd =
+ (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_hmc_resource_profile);
+
+ cmd->pm_profile = (u8)profile;
+ cmd->pe_vf_enabled = pe_vf_enabled_count;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_request_resource
+ * @hw: pointer to the hw struct
+ * @resource: resource id
+ * @access: access type
+ * @sdp_number: resource number
+ * @timeout: the maximum time in ms that the driver may hold the resource
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * requests common resource using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_request_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ enum i40e_aq_resource_access_type access,
+ u8 sdp_number, u64 *timeout,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_request_resource *cmd_resp =
+ (struct i40e_aqc_request_resource *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ DEBUGFUNC("i40e_aq_request_resource");
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
+
+ cmd_resp->resource_id = CPU_TO_LE16(resource);
+ cmd_resp->access_type = CPU_TO_LE16(access);
+ cmd_resp->resource_number = CPU_TO_LE32(sdp_number);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ /* The completion specifies the maximum time in ms that the driver
+ * may hold the resource in the Timeout field.
+ * If the resource is held by someone else, the command completes with
+ * busy return value and the timeout field indicates the maximum time
+ * the current owner of the resource has to free it.
+ */
+ if (status == I40E_SUCCESS || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
+ *timeout = LE32_TO_CPU(cmd_resp->timeout);
+
+ return status;
+}
+
+/**
+ * i40e_aq_release_resource
+ * @hw: pointer to the hw struct
+ * @resource: resource id
+ * @sdp_number: resource number
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * release common resource using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_release_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ u8 sdp_number,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_request_resource *cmd =
+ (struct i40e_aqc_request_resource *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ DEBUGFUNC("i40e_aq_release_resource");
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
+
+ cmd->resource_id = CPU_TO_LE16(resource);
+ cmd->resource_number = CPU_TO_LE32(sdp_number);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_read_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be read (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the NVM using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_nvm_update *cmd =
+ (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ DEBUGFUNC("i40e_aq_read_nvm");
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000) {
+ status = I40E_ERR_PARAM;
+ goto i40e_aq_read_nvm_exit;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+ cmd->module_pointer = module_pointer;
+ cmd->offset = CPU_TO_LE32(offset);
+ cmd->length = CPU_TO_LE16(length);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (length > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
+
+i40e_aq_read_nvm_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_erase_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in the module (expressed in 4 KB from module's beginning)
+ * @length: length of the section to be erased (expressed in 4 KB)
+ * @last_command: tells if this is the last command in a series
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Erase the NVM sector using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, bool last_command,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_nvm_update *cmd =
+ (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ DEBUGFUNC("i40e_aq_erase_nvm");
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000) {
+ status = I40E_ERR_PARAM;
+ goto i40e_aq_erase_nvm_exit;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+ cmd->module_pointer = module_pointer;
+ cmd->offset = CPU_TO_LE32(offset);
+ cmd->length = CPU_TO_LE16(length);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+i40e_aq_erase_nvm_exit:
+ return status;
+}
+
+#define I40E_DEV_FUNC_CAP_SWITCH_MODE 0x01
+#define I40E_DEV_FUNC_CAP_MGMT_MODE 0x02
+#define I40E_DEV_FUNC_CAP_NPAR 0x03
+#define I40E_DEV_FUNC_CAP_OS2BMC 0x04
+#define I40E_DEV_FUNC_CAP_VALID_FUNC 0x05
+#define I40E_DEV_FUNC_CAP_SRIOV_1_1 0x12
+#define I40E_DEV_FUNC_CAP_VF 0x13
+#define I40E_DEV_FUNC_CAP_VMDQ 0x14
+#define I40E_DEV_FUNC_CAP_802_1_QBG 0x15
+#define I40E_DEV_FUNC_CAP_802_1_QBH 0x16
+#define I40E_DEV_FUNC_CAP_VSI 0x17
+#define I40E_DEV_FUNC_CAP_DCB 0x18
+#define I40E_DEV_FUNC_CAP_FCOE 0x21
+#define I40E_DEV_FUNC_CAP_RSS 0x40
+#define I40E_DEV_FUNC_CAP_RX_QUEUES 0x41
+#define I40E_DEV_FUNC_CAP_TX_QUEUES 0x42
+#define I40E_DEV_FUNC_CAP_MSIX 0x43
+#define I40E_DEV_FUNC_CAP_MSIX_VF 0x44
+#define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR 0x45
+#define I40E_DEV_FUNC_CAP_IEEE_1588 0x46
+#define I40E_DEV_FUNC_CAP_MFP_MODE_1 0xF1
+#define I40E_DEV_FUNC_CAP_CEM 0xF2
+#define I40E_DEV_FUNC_CAP_IWARP 0x51
+#define I40E_DEV_FUNC_CAP_LED 0x61
+#define I40E_DEV_FUNC_CAP_SDP 0x62
+#define I40E_DEV_FUNC_CAP_MDIO 0x63
+
+/**
+ * i40e_parse_discover_capabilities
+ * @hw: pointer to the hw struct
+ * @buff: pointer to a buffer containing device/function capability records
+ * @cap_count: number of capability records in the list
+ * @list_type_opc: type of capabilities list to parse
+ *
+ * Parse the device/function capabilities list.
+ **/
+STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
+ u32 cap_count,
+ enum i40e_admin_queue_opc list_type_opc)
+{
+ struct i40e_aqc_list_capabilities_element_resp *cap;
+ u32 number, logical_id, phys_id;
+ struct i40e_hw_capabilities *p;
+ u32 i = 0;
+ u16 id;
+
+ cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
+
+ if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
+ p = (struct i40e_hw_capabilities *)&hw->dev_caps;
+ else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
+ p = (struct i40e_hw_capabilities *)&hw->func_caps;
+ else
+ return;
+
+ for (i = 0; i < cap_count; i++, cap++) {
+ id = LE16_TO_CPU(cap->id);
+ number = LE32_TO_CPU(cap->number);
+ logical_id = LE32_TO_CPU(cap->logical_id);
+ phys_id = LE32_TO_CPU(cap->phys_id);
+
+ switch (id) {
+ case I40E_DEV_FUNC_CAP_SWITCH_MODE:
+ p->switch_mode = number;
+ break;
+ case I40E_DEV_FUNC_CAP_MGMT_MODE:
+ p->management_mode = number;
+ break;
+ case I40E_DEV_FUNC_CAP_NPAR:
+ p->npar_enable = number;
+ break;
+ case I40E_DEV_FUNC_CAP_OS2BMC:
+ p->os2bmc = number;
+ break;
+ case I40E_DEV_FUNC_CAP_VALID_FUNC:
+ p->valid_functions = number;
+ break;
+ case I40E_DEV_FUNC_CAP_SRIOV_1_1:
+ if (number == 1)
+ p->sr_iov_1_1 = true;
+ break;
+ case I40E_DEV_FUNC_CAP_VF:
+ p->num_vfs = number;
+ p->vf_base_id = logical_id;
+ break;
+ case I40E_DEV_FUNC_CAP_VMDQ:
+ if (number == 1)
+ p->vmdq = true;
+ break;
+ case I40E_DEV_FUNC_CAP_802_1_QBG:
+ if (number == 1)
+ p->evb_802_1_qbg = true;
+ break;
+ case I40E_DEV_FUNC_CAP_802_1_QBH:
+ if (number == 1)
+ p->evb_802_1_qbh = true;
+ break;
+ case I40E_DEV_FUNC_CAP_VSI:
+ p->num_vsis = number;
+ break;
+ case I40E_DEV_FUNC_CAP_DCB:
+ if (number == 1) {
+ p->dcb = true;
+ p->enabled_tcmap = logical_id;
+ p->maxtc = phys_id;
+ }
+ break;
+ case I40E_DEV_FUNC_CAP_FCOE:
+ if (number == 1)
+ p->fcoe = true;
+ break;
+ case I40E_DEV_FUNC_CAP_RSS:
+ p->rss = true;
+ p->rss_table_size = number;
+ p->rss_table_entry_width = logical_id;
+ break;
+ case I40E_DEV_FUNC_CAP_RX_QUEUES:
+ p->num_rx_qp = number;
+ p->base_queue = phys_id;
+ break;
+ case I40E_DEV_FUNC_CAP_TX_QUEUES:
+ p->num_tx_qp = number;
+ p->base_queue = phys_id;
+ break;
+ case I40E_DEV_FUNC_CAP_MSIX:
+ p->num_msix_vectors = number;
+ break;
+ case I40E_DEV_FUNC_CAP_MSIX_VF:
+ p->num_msix_vectors_vf = number;
+ break;
+ case I40E_DEV_FUNC_CAP_MFP_MODE_1:
+ if (number == 1)
+ p->mfp_mode_1 = true;
+ break;
+ case I40E_DEV_FUNC_CAP_CEM:
+ if (number == 1)
+ p->mgmt_cem = true;
+ break;
+ case I40E_DEV_FUNC_CAP_IWARP:
+ if (number == 1)
+ p->iwarp = true;
+ break;
+ case I40E_DEV_FUNC_CAP_LED:
+ if (phys_id < I40E_HW_CAP_MAX_GPIO)
+ p->led[phys_id] = true;
+ break;
+ case I40E_DEV_FUNC_CAP_SDP:
+ if (phys_id < I40E_HW_CAP_MAX_GPIO)
+ p->sdp[phys_id] = true;
+ break;
+ case I40E_DEV_FUNC_CAP_MDIO:
+ if (number == 1) {
+ p->mdio_port_num = phys_id;
+ p->mdio_port_mode = logical_id;
+ }
+ break;
+ case I40E_DEV_FUNC_CAP_IEEE_1588:
+ if (number == 1)
+ p->ieee_1588 = true;
+ break;
+ case I40E_DEV_FUNC_CAP_FLOW_DIRECTOR:
+ p->fd = true;
+ p->fd_filters_guaranteed = number;
+ p->fd_filters_best_effort = logical_id;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Software override ensuring FCoE is disabled if npar or mfp
+ * mode because it is not supported in these modes.
+ */
+ if (p->npar_enable || p->mfp_mode_1)
+ p->fcoe = false;
+
+ /* additional HW specific goodies that might
+ * someday be HW version specific
+ */
+ p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
+}
+
+/**
+ * i40e_aq_discover_capabilities
+ * @hw: pointer to the hw struct
+ * @buff: a virtual buffer to hold the capabilities
+ * @buff_size: Size of the virtual buffer
+ * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM
+ * @list_type_opc: capabilities type to discover - pass in the command opcode
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the device capabilities descriptions from the firmware
+ **/
+enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw,
+ void *buff, u16 buff_size, u16 *data_size,
+ enum i40e_admin_queue_opc list_type_opc,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aqc_list_capabilites *cmd;
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
+
+ if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
+ list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
+ status = I40E_ERR_PARAM;
+ goto exit;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ *data_size = LE16_TO_CPU(desc.datalen);
+
+ if (status)
+ goto exit;
+
+ i40e_parse_discover_capabilities(hw, buff, LE32_TO_CPU(cmd->count),
+ list_type_opc);
+
+exit:
+ return status;
+}
+
+/**
+ * i40e_aq_update_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be written (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Update the NVM using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_nvm_update *cmd =
+ (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ DEBUGFUNC("i40e_aq_update_nvm");
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000) {
+ status = I40E_ERR_PARAM;
+ goto i40e_aq_update_nvm_exit;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+ cmd->module_pointer = module_pointer;
+ cmd->offset = CPU_TO_LE32(offset);
+ cmd->length = CPU_TO_LE16(length);
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (length > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
+
+i40e_aq_update_nvm_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_get_lldp_mib
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge requested
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buff: pointer to a user supplied buffer to store the MIB block
+ * @buff_size: size of the buffer (in bytes)
+ * @local_len : length of the returned Local LLDP MIB
+ * @remote_len: length of the returned Remote LLDP MIB
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Requests the complete LLDP MIB (entire packet).
+ **/
+enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+ u8 mib_type, void *buff, u16 buff_size,
+ u16 *local_len, u16 *remote_len,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_get_mib *cmd =
+ (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+ struct i40e_aqc_lldp_get_mib *resp =
+ (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
+ /* Indirect Command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+
+ cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
+ cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+ I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (local_len != NULL)
+ *local_len = LE16_TO_CPU(resp->local_len);
+ if (remote_len != NULL)
+ *remote_len = LE16_TO_CPU(resp->remote_len);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_cfg_lldp_mib_change_event
+ * @hw: pointer to the hw struct
+ * @enable_update: Enable or Disable event posting
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Enable or Disable posting of an event on ARQ when LLDP MIB
+ * associated with the interface changes
+ **/
+enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+ bool enable_update,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_update_mib *cmd =
+ (struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
+
+ if (!enable_update)
+ cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_lldp_tlv
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge
+ * @buff: buffer with TLV to add
+ * @buff_size: length of the buffer
+ * @tlv_len: length of the TLV to be added
+ * @mib_len: length of the LLDP MIB returned in response
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add the specified TLV to LLDP Local MIB for the given bridge type,
+ * it is responsibility of the caller to make sure that the TLV is not
+ * already present in the LLDPDU.
+ * In return firmware will write the complete LLDP MIB with the newly
+ * added TLV in the response buffer.
+ **/
+enum i40e_status_code i40e_aq_add_lldp_tlv(struct i40e_hw *hw, u8 bridge_type,
+ void *buff, u16 buff_size, u16 tlv_len,
+ u16 *mib_len,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_add_tlv *cmd =
+ (struct i40e_aqc_lldp_add_tlv *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff || tlv_len == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_add_tlv);
+
+ /* Indirect Command */
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+ I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+ cmd->len = CPU_TO_LE16(tlv_len);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (mib_len != NULL)
+ *mib_len = LE16_TO_CPU(desc.datalen);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_update_lldp_tlv
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge
+ * @buff: buffer with TLV to update
+ * @buff_size: size of the buffer holding original and updated TLVs
+ * @old_len: Length of the Original TLV
+ * @new_len: Length of the Updated TLV
+ * @offset: offset of the updated TLV in the buff
+ * @mib_len: length of the returned LLDP MIB
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Update the specified TLV to the LLDP Local MIB for the given bridge type.
+ * Firmware will place the complete LLDP MIB in response buffer with the
+ * updated TLV.
+ **/
+enum i40e_status_code i40e_aq_update_lldp_tlv(struct i40e_hw *hw,
+ u8 bridge_type, void *buff, u16 buff_size,
+ u16 old_len, u16 new_len, u16 offset,
+ u16 *mib_len,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_update_tlv *cmd =
+ (struct i40e_aqc_lldp_update_tlv *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff || offset == 0 ||
+ old_len == 0 || new_len == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_tlv);
+
+ /* Indirect Command */
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+ I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+ cmd->old_len = CPU_TO_LE16(old_len);
+ cmd->new_offset = CPU_TO_LE16(offset);
+ cmd->new_len = CPU_TO_LE16(new_len);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (mib_len != NULL)
+ *mib_len = LE16_TO_CPU(desc.datalen);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_delete_lldp_tlv
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge
+ * @buff: pointer to a user supplied buffer that has the TLV
+ * @buff_size: length of the buffer
+ * @tlv_len: length of the TLV to be deleted
+ * @mib_len: length of the returned LLDP MIB
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Delete the specified TLV from LLDP Local MIB for the given bridge type.
+ * The firmware places the entire LLDP MIB in the response buffer.
+ **/
+enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw,
+ u8 bridge_type, void *buff, u16 buff_size,
+ u16 tlv_len, u16 *mib_len,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_add_tlv *cmd =
+ (struct i40e_aqc_lldp_add_tlv *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_delete_tlv);
+
+ /* Indirect Command */
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(buff_size);
+ cmd->len = CPU_TO_LE16(tlv_len);
+ cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+ I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (mib_len != NULL)
+ *mib_len = LE16_TO_CPU(desc.datalen);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_stop_lldp
+ * @hw: pointer to the hw struct
+ * @shutdown_agent: True if LLDP Agent needs to be Shutdown
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Stop or Shutdown the embedded LLDP Agent
+ **/
+enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_stop *cmd =
+ (struct i40e_aqc_lldp_stop *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
+
+ if (shutdown_agent)
+ cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_start_lldp
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Start the embedded LLDP Agent on all ports.
+ **/
+enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_start *cmd =
+ (struct i40e_aqc_lldp_start *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
+
+ cmd->command = I40E_AQ_LLDP_AGENT_START;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_udp_tunnel
+ * @hw: pointer to the hw struct
+ * @udp_port: the UDP port to add
+ * @header_len: length of the tunneling header length in DWords
+ * @protocol_index: protocol index type
+ * @filter_index: pointer to filter index
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_udp_tunnel *cmd =
+ (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
+ struct i40e_aqc_del_udp_tunnel_completion *resp =
+ (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
+
+ cmd->udp_port = CPU_TO_LE16(udp_port);
+ cmd->protocol_type = protocol_index;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status)
+ *filter_index = resp->index;
+
+ return status;
+}
+
+/**
+ * i40e_aq_del_udp_tunnel
+ * @hw: pointer to the hw struct
+ * @index: filter index
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_remove_udp_tunnel *cmd =
+ (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
+
+ cmd->index = index;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_switch_resource_alloc (0x0204)
+ * @hw: pointer to the hw struct
+ * @num_entries: pointer to u8 to store the number of resource entries returned
+ * @buf: pointer to a user supplied buffer. This buffer must be large enough
+ * to store the resource information for all resource types. Each
+ * resource type is a i40e_aqc_switch_resource_alloc_data structure.
+ * @count: size, in bytes, of the buffer provided
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Query the resources allocated to a function.
+ **/
+enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw,
+ u8 *num_entries,
+ struct i40e_aqc_switch_resource_alloc_element_resp *buf,
+ u16 count,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_switch_resource_alloc *cmd_resp =
+ (struct i40e_aqc_get_switch_resource_alloc *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 length = count
+ * sizeof(struct i40e_aqc_switch_resource_alloc_element_resp);
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_switch_resource_alloc);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (length > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details);
+
+ if (!status)
+ *num_entries = cmd_resp->num_entries;
+
+ return status;
+}
+
+/**
+ * i40e_aq_delete_element - Delete switch element
+ * @hw: pointer to the hw struct
+ * @seid: the SEID to delete from the switch
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This deletes a switch element from the switch.
+ **/
+enum i40e_status_code i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_switch_seid *cmd =
+ (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
+
+ cmd->seid = CPU_TO_LE16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40_aq_add_pvirt - Instantiate a Port Virtualizer on a port
+ * @hw: pointer to the hw struct
+ * @flags: component flags
+ * @mac_seid: uplink seid (MAC SEID)
+ * @vsi_seid: connected vsi seid
+ * @ret_seid: seid of create pv component
+ *
+ * This instantiates an i40e port virtualizer with specified flags.
+ * Depending on specified flags the port virtualizer can act as a
+ * 802.1Qbr port virtualizer or a 802.1Qbg S-component.
+ */
+enum i40e_status_code i40e_aq_add_pvirt(struct i40e_hw *hw, u16 flags,
+ u16 mac_seid, u16 vsi_seid,
+ u16 *ret_seid)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_update_pv *cmd =
+ (struct i40e_aqc_add_update_pv *)&desc.params.raw;
+ struct i40e_aqc_add_update_pv_completion *resp =
+ (struct i40e_aqc_add_update_pv_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (vsi_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_pv);
+ cmd->command_flags = CPU_TO_LE16(flags);
+ cmd->uplink_seid = CPU_TO_LE16(mac_seid);
+ cmd->connected_seid = CPU_TO_LE16(vsi_seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+ if (!status && ret_seid)
+ *ret_seid = LE16_TO_CPU(resp->pv_seid);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_tag - Add an S/E-tag
+ * @hw: pointer to the hw struct
+ * @direct_to_queue: should s-tag direct flow to a specific queue
+ * @vsi_seid: VSI SEID to use this tag
+ * @tag: value of the tag
+ * @queue_num: queue number, only valid is direct_to_queue is true
+ * @tags_used: return value, number of tags in use by this PF
+ * @tags_free: return value, number of unallocated tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This associates an S- or E-tag to a VSI in the switch complex. It returns
+ * the number of tags allocated by the PF, and the number of unallocated
+ * tags available.
+ **/
+enum i40e_status_code i40e_aq_add_tag(struct i40e_hw *hw, bool direct_to_queue,
+ u16 vsi_seid, u16 tag, u16 queue_num,
+ u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_tag *cmd =
+ (struct i40e_aqc_add_tag *)&desc.params.raw;
+ struct i40e_aqc_add_remove_tag_completion *resp =
+ (struct i40e_aqc_add_remove_tag_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (vsi_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_tag);
+
+ cmd->seid = CPU_TO_LE16(vsi_seid);
+ cmd->tag = CPU_TO_LE16(tag);
+ if (direct_to_queue) {
+ cmd->flags = CPU_TO_LE16(I40E_AQC_ADD_TAG_FLAG_TO_QUEUE);
+ cmd->queue_number = CPU_TO_LE16(queue_num);
+ }
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ if (tags_used != NULL)
+ *tags_used = LE16_TO_CPU(resp->tags_used);
+ if (tags_free != NULL)
+ *tags_free = LE16_TO_CPU(resp->tags_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_tag - Remove an S- or E-tag
+ * @hw: pointer to the hw struct
+ * @vsi_seid: VSI SEID this tag is associated with
+ * @tag: value of the S-tag to delete
+ * @tags_used: return value, number of tags in use by this PF
+ * @tags_free: return value, number of unallocated tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This deletes an S- or E-tag from a VSI in the switch complex. It returns
+ * the number of tags allocated by the PF, and the number of unallocated
+ * tags available.
+ **/
+enum i40e_status_code i40e_aq_remove_tag(struct i40e_hw *hw, u16 vsi_seid,
+ u16 tag, u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_remove_tag *cmd =
+ (struct i40e_aqc_remove_tag *)&desc.params.raw;
+ struct i40e_aqc_add_remove_tag_completion *resp =
+ (struct i40e_aqc_add_remove_tag_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (vsi_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_tag);
+
+ cmd->seid = CPU_TO_LE16(vsi_seid);
+ cmd->tag = CPU_TO_LE16(tag);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ if (tags_used != NULL)
+ *tags_used = LE16_TO_CPU(resp->tags_used);
+ if (tags_free != NULL)
+ *tags_free = LE16_TO_CPU(resp->tags_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_mcast_etag - Add a multicast E-tag
+ * @hw: pointer to the hw struct
+ * @pv_seid: Port Virtualizer of this SEID to associate E-tag with
+ * @etag: value of E-tag to add
+ * @num_tags_in_buf: number of unicast E-tags in indirect buffer
+ * @buf: address of indirect buffer
+ * @tags_used: return value, number of E-tags in use by this port
+ * @tags_free: return value, number of unallocated M-tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This associates a multicast E-tag to a port virtualizer. It will return
+ * the number of tags allocated by the PF, and the number of unallocated
+ * tags available.
+ *
+ * The indirect buffer pointed to by buf is a list of 2-byte E-tags,
+ * num_tags_in_buf long.
+ **/
+enum i40e_status_code i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pv_seid,
+ u16 etag, u8 num_tags_in_buf, void *buf,
+ u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_mcast_etag *cmd =
+ (struct i40e_aqc_add_remove_mcast_etag *)&desc.params.raw;
+ struct i40e_aqc_add_remove_mcast_etag_completion *resp =
+ (struct i40e_aqc_add_remove_mcast_etag_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 length = sizeof(u16) * num_tags_in_buf;
+
+ if ((pv_seid == 0) || (buf == NULL) || (num_tags_in_buf == 0))
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_multicast_etag);
+
+ cmd->pv_seid = CPU_TO_LE16(pv_seid);
+ cmd->etag = CPU_TO_LE16(etag);
+ cmd->num_unicast_etags = num_tags_in_buf;
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (length > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details);
+
+ if (!status) {
+ if (tags_used != NULL)
+ *tags_used = LE16_TO_CPU(resp->mcast_etags_used);
+ if (tags_free != NULL)
+ *tags_free = LE16_TO_CPU(resp->mcast_etags_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_mcast_etag - Remove a multicast E-tag
+ * @hw: pointer to the hw struct
+ * @pv_seid: Port Virtualizer SEID this M-tag is associated with
+ * @etag: value of the E-tag to remove
+ * @tags_used: return value, number of tags in use by this port
+ * @tags_free: return value, number of unallocated tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This deletes an E-tag from the port virtualizer. It will return
+ * the number of tags allocated by the port, and the number of unallocated
+ * tags available.
+ **/
+enum i40e_status_code i40e_aq_remove_mcast_etag(struct i40e_hw *hw, u16 pv_seid,
+ u16 etag, u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_mcast_etag *cmd =
+ (struct i40e_aqc_add_remove_mcast_etag *)&desc.params.raw;
+ struct i40e_aqc_add_remove_mcast_etag_completion *resp =
+ (struct i40e_aqc_add_remove_mcast_etag_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+
+ if (pv_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_multicast_etag);
+
+ cmd->pv_seid = CPU_TO_LE16(pv_seid);
+ cmd->etag = CPU_TO_LE16(etag);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ if (tags_used != NULL)
+ *tags_used = LE16_TO_CPU(resp->mcast_etags_used);
+ if (tags_free != NULL)
+ *tags_free = LE16_TO_CPU(resp->mcast_etags_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_update_tag - Update an S/E-tag
+ * @hw: pointer to the hw struct
+ * @vsi_seid: VSI SEID using this S-tag
+ * @old_tag: old tag value
+ * @new_tag: new tag value
+ * @tags_used: return value, number of tags in use by this PF
+ * @tags_free: return value, number of unallocated tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This updates the value of the tag currently attached to this VSI
+ * in the switch complex. It will return the number of tags allocated
+ * by the PF, and the number of unallocated tags available.
+ **/
+enum i40e_status_code i40e_aq_update_tag(struct i40e_hw *hw, u16 vsi_seid,
+ u16 old_tag, u16 new_tag, u16 *tags_used,
+ u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_update_tag *cmd =
+ (struct i40e_aqc_update_tag *)&desc.params.raw;
+ struct i40e_aqc_update_tag_completion *resp =
+ (struct i40e_aqc_update_tag_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (vsi_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_update_tag);
+
+ cmd->seid = CPU_TO_LE16(vsi_seid);
+ cmd->old_tag = CPU_TO_LE16(old_tag);
+ cmd->new_tag = CPU_TO_LE16(new_tag);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ if (tags_used != NULL)
+ *tags_used = LE16_TO_CPU(resp->tags_used);
+ if (tags_free != NULL)
+ *tags_free = LE16_TO_CPU(resp->tags_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_dcb_ignore_pfc - Ignore PFC for given TCs
+ * @hw: pointer to the hw struct
+ * @tcmap: TC map for request/release any ignore PFC condition
+ * @request: request or release ignore PFC condition
+ * @tcmap_ret: return TCs for which PFC is currently ignored
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This sends out request/release to ignore PFC condition for a TC.
+ * It will return the TCs for which PFC is currently ignored.
+ **/
+enum i40e_status_code i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw, u8 tcmap,
+ bool request, u8 *tcmap_ret,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_pfc_ignore *cmd_resp =
+ (struct i40e_aqc_pfc_ignore *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_ignore_pfc);
+
+ if (request)
+ cmd_resp->command_flags = I40E_AQC_PFC_IGNORE_SET;
+
+ cmd_resp->tc_bitmap = tcmap;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ if (tcmap_ret != NULL)
+ *tcmap_ret = cmd_resp->tc_bitmap;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_dcb_updated - DCB Updated Command
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * When LLDP is handled in PF this command is used by the PF
+ * to notify EMP that a DCB setting is modified.
+ * When LLDP is handled in EMP this command is used by the PF
+ * to notify EMP whenever one of the following parameters get
+ * modified:
+ * - PFCLinkDelayAllowance in PRTDCB_GENC.PFCLDA
+ * - PCIRTT in PRTDCB_GENC.PCIRTT
+ * - Maximum Frame Size for non-FCoE TCs set by PRTDCB_TDPUC.MAX_TXFRAME.
+ * EMP will return when the shared RPB settings have been
+ * recomputed and modified. The retval field in the descriptor
+ * will be set to 0 when RPB is modified.
+ **/
+enum i40e_status_code i40e_aq_dcb_updated(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_statistics - Add a statistics block to a VLAN in a switch.
+ * @hw: pointer to the hw struct
+ * @seid: defines the SEID of the switch for which the stats are requested
+ * @vlan_id: the VLAN ID for which the statistics are requested
+ * @stat_index: index of the statistics counters block assigned to this VLAN
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * XL710 supports 128 smonVlanStats counters.This command is used to
+ * allocate a set of smonVlanStats counters to a specific VLAN in a specific
+ * switch.
+ **/
+enum i40e_status_code i40e_aq_add_statistics(struct i40e_hw *hw, u16 seid,
+ u16 vlan_id, u16 *stat_index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_statistics *cmd_resp =
+ (struct i40e_aqc_add_remove_statistics *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if ((seid == 0) || (stat_index == NULL))
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_statistics);
+
+ cmd_resp->seid = CPU_TO_LE16(seid);
+ cmd_resp->vlan = CPU_TO_LE16(vlan_id);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status)
+ *stat_index = LE16_TO_CPU(cmd_resp->stat_index);
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_statistics - Remove a statistics block to a VLAN in a switch.
+ * @hw: pointer to the hw struct
+ * @seid: defines the SEID of the switch for which the stats are requested
+ * @vlan_id: the VLAN ID for which the statistics are requested
+ * @stat_index: index of the statistics counters block assigned to this VLAN
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * XL710 supports 128 smonVlanStats counters.This command is used to
+ * deallocate a set of smonVlanStats counters to a specific VLAN in a specific
+ * switch.
+ **/
+enum i40e_status_code i40e_aq_remove_statistics(struct i40e_hw *hw, u16 seid,
+ u16 vlan_id, u16 stat_index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_statistics *cmd =
+ (struct i40e_aqc_add_remove_statistics *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_statistics);
+
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->vlan = CPU_TO_LE16(vlan_id);
+ cmd->stat_index = CPU_TO_LE16(stat_index);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_port_parameters - set physical port parameters.
+ * @hw: pointer to the hw struct
+ * @bad_frame_vsi: defines the VSI to which bad frames are forwarded
+ * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
+ * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded
+ * @double_vlan: if set double VLAN is enabled
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_port_parameters(struct i40e_hw *hw,
+ u16 bad_frame_vsi, bool save_bad_pac,
+ bool pad_short_pac, bool double_vlan,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aqc_set_port_parameters *cmd;
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ u16 command_flags = 0;
+
+ cmd = (struct i40e_aqc_set_port_parameters *)&desc.params.raw;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_port_parameters);
+
+ cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
+ if (save_bad_pac)
+ command_flags |= I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS;
+ if (pad_short_pac)
+ command_flags |= I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS;
+ if (double_vlan)
+ command_flags |= I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA;
+ cmd->command_flags = CPU_TO_LE16(command_flags);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
+ * @hw: pointer to the hw struct
+ * @seid: seid for the physical port/switching component/vsi
+ * @buff: Indirect buffer to hold data parameters and response
+ * @buff_size: Indirect buffer size
+ * @opcode: Tx scheduler AQ command opcode
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Generic command handler for Tx scheduler AQ commands
+ **/
+static enum i40e_status_code i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
+ void *buff, u16 buff_size,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_tx_sched_ind *cmd =
+ (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
+ enum i40e_status_code status;
+ bool cmd_param_flag = false;
+
+ switch (opcode) {
+ case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit:
+ case i40e_aqc_opc_configure_vsi_tc_bw:
+ case i40e_aqc_opc_enable_switching_comp_ets:
+ case i40e_aqc_opc_modify_switching_comp_ets:
+ case i40e_aqc_opc_disable_switching_comp_ets:
+ case i40e_aqc_opc_configure_switching_comp_ets_bw_limit:
+ case i40e_aqc_opc_configure_switching_comp_bw_config:
+ cmd_param_flag = true;
+ break;
+ case i40e_aqc_opc_query_vsi_bw_config:
+ case i40e_aqc_opc_query_vsi_ets_sla_config:
+ case i40e_aqc_opc_query_switching_comp_ets_config:
+ case i40e_aqc_opc_query_port_ets_config:
+ case i40e_aqc_opc_query_switching_comp_bw_config:
+ cmd_param_flag = false;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, opcode);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (cmd_param_flag)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->vsi_seid = CPU_TO_LE16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @credit: BW limit credits (0 = disabled)
+ * @max_credit: Max BW limit credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_credit,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_configure_vsi_bw_limit *cmd =
+ (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_configure_vsi_bw_limit);
+
+ cmd->vsi_seid = CPU_TO_LE16(seid);
+ cmd->credit = CPU_TO_LE16(credit);
+ cmd->max_credit = max_credit;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_config_switch_comp_bw_limit - Configure Switching component BW Limit
+ * @hw: pointer to the hw struct
+ * @seid: switching component seid
+ * @credit: BW limit credits (0 = disabled)
+ * @max_bw: Max BW limit credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_bw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_configure_switching_comp_bw_limit *cmd =
+ (struct i40e_aqc_configure_switching_comp_bw_limit *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_configure_switching_comp_bw_limit);
+
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->credit = CPU_TO_LE16(credit);
+ cmd->max_bw = max_bw;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_config_vsi_ets_sla_bw_limit - Config VSI BW Limit per TC
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @bw_data: Buffer holding enabled TCs, per TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_vsi_ets_sla_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_configure_vsi_ets_sla_bw_limit,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_configure_vsi_tc_bw,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_config_switch_comp_ets_bw_limit - Config Switch comp BW Limit per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer holding enabled TCs, per TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_switch_comp_ets_bw_limit(
+ struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_configure_switching_comp_ets_bw_limit,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_vsi_bw_config - Query VSI BW configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI
+ * @bw_data: Buffer to hold VSI BW configuration
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_vsi_bw_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI
+ * @bw_data: Buffer to hold VSI BW configuration per TC
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_vsi_ets_sla_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer to hold switching component's per TC BW config
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_switching_comp_ets_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI or switching component connected to Physical Port
+ * @bw_data: Buffer to hold current ETS configuration for the Physical Port
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_port_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_port_ets_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer to hold switching component's BW configuration
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_switching_comp_bw_config,
+ cmd_details);
+}
+
+/**
+ * i40e_validate_filter_settings
+ * @hw: pointer to the hardware structure
+ * @settings: Filter control settings
+ *
+ * Check and validate the filter control settings passed.
+ * The function checks for the valid filter/context sizes being
+ * passed for FCoE and PE.
+ *
+ * Returns I40E_SUCCESS if the values passed are valid and within
+ * range else returns an error.
+ **/
+STATIC enum i40e_status_code i40e_validate_filter_settings(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings)
+{
+ u32 fcoe_cntx_size, fcoe_filt_size;
+ u32 pe_cntx_size, pe_filt_size;
+ u32 fcoe_fmax;
+
+ u32 val;
+
+ /* Validate FCoE settings passed */
+ switch (settings->fcoe_filt_num) {
+ case I40E_HASH_FILTER_SIZE_1K:
+ case I40E_HASH_FILTER_SIZE_2K:
+ case I40E_HASH_FILTER_SIZE_4K:
+ case I40E_HASH_FILTER_SIZE_8K:
+ case I40E_HASH_FILTER_SIZE_16K:
+ case I40E_HASH_FILTER_SIZE_32K:
+ fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
+ fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ switch (settings->fcoe_cntx_num) {
+ case I40E_DMA_CNTX_SIZE_512:
+ case I40E_DMA_CNTX_SIZE_1K:
+ case I40E_DMA_CNTX_SIZE_2K:
+ case I40E_DMA_CNTX_SIZE_4K:
+ fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
+ fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ /* Validate PE settings passed */
+ switch (settings->pe_filt_num) {
+ case I40E_HASH_FILTER_SIZE_1K:
+ case I40E_HASH_FILTER_SIZE_2K:
+ case I40E_HASH_FILTER_SIZE_4K:
+ case I40E_HASH_FILTER_SIZE_8K:
+ case I40E_HASH_FILTER_SIZE_16K:
+ case I40E_HASH_FILTER_SIZE_32K:
+ case I40E_HASH_FILTER_SIZE_64K:
+ case I40E_HASH_FILTER_SIZE_128K:
+ case I40E_HASH_FILTER_SIZE_256K:
+ case I40E_HASH_FILTER_SIZE_512K:
+ case I40E_HASH_FILTER_SIZE_1M:
+ pe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
+ pe_filt_size <<= (u32)settings->pe_filt_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ switch (settings->pe_cntx_num) {
+ case I40E_DMA_CNTX_SIZE_512:
+ case I40E_DMA_CNTX_SIZE_1K:
+ case I40E_DMA_CNTX_SIZE_2K:
+ case I40E_DMA_CNTX_SIZE_4K:
+ case I40E_DMA_CNTX_SIZE_8K:
+ case I40E_DMA_CNTX_SIZE_16K:
+ case I40E_DMA_CNTX_SIZE_32K:
+ case I40E_DMA_CNTX_SIZE_64K:
+ case I40E_DMA_CNTX_SIZE_128K:
+ case I40E_DMA_CNTX_SIZE_256K:
+ pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
+ pe_cntx_size <<= (u32)settings->pe_cntx_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
+ val = rd32(hw, I40E_GLHMC_FCOEFMAX);
+ fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
+ >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
+ if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
+ return I40E_ERR_INVALID_SIZE;
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_set_filter_control
+ * @hw: pointer to the hardware structure
+ * @settings: Filter control settings
+ *
+ * Set the Queue Filters for PE/FCoE and enable filters required
+ * for a single PF. It is expected that these settings are programmed
+ * at the driver initialization time.
+ **/
+enum i40e_status_code i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ u32 hash_lut_size = 0;
+ u32 val;
+
+ if (!settings)
+ return I40E_ERR_PARAM;
+
+ /* Validate the input settings */
+ ret = i40e_validate_filter_settings(hw, settings);
+ if (ret)
+ return ret;
+
+ /* Read the PF Queue Filter control register */
+ val = rd32(hw, I40E_PFQF_CTL_0);
+
+ /* Program required PE hash buckets for the PF */
+ val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
+ val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PEHSIZE_MASK;
+ /* Program required PE contexts for the PF */
+ val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
+ val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PEDSIZE_MASK;
+
+ /* Program required FCoE hash buckets for the PF */
+ val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
+ val |= ((u32)settings->fcoe_filt_num <<
+ I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
+ /* Program required FCoE DDP contexts for the PF */
+ val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
+ val |= ((u32)settings->fcoe_cntx_num <<
+ I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
+
+ /* Program Hash LUT size for the PF */
+ val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
+ if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
+ hash_lut_size = 1;
+ val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
+
+ /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
+ if (settings->enable_fdir)
+ val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
+ if (settings->enable_ethtype)
+ val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK;
+ if (settings->enable_macvlan)
+ val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
+
+ wr32(hw, I40E_PFQF_CTL_0, val);
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter
+ * @hw: pointer to the hw struct
+ * @mac_addr: MAC address to use in the filter
+ * @ethtype: Ethertype to use in the filter
+ * @flags: Flags that needs to be applied to the filter
+ * @vsi_seid: seid of the control VSI
+ * @queue: VSI queue number to send the packet to
+ * @is_add: Add control packet filter if True else remove
+ * @stats: Structure to hold information on control filter counts
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This command will Add or Remove control packet filter for a control VSI.
+ * In return it will update the total number of perfect filter count in
+ * the stats member.
+ **/
+enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_control_packet_filter *cmd =
+ (struct i40e_aqc_add_remove_control_packet_filter *)
+ &desc.params.raw;
+ struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
+ (struct i40e_aqc_add_remove_control_packet_filter_completion *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ if (vsi_seid == 0)
+ return I40E_ERR_PARAM;
+
+ if (is_add) {
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_control_packet_filter);
+ cmd->queue = CPU_TO_LE16(queue);
+ } else {
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_control_packet_filter);
+ }
+
+ if (mac_addr)
+ i40e_memcpy(cmd->mac, mac_addr, I40E_ETH_LENGTH_OF_ADDRESS,
+ I40E_NONDMA_TO_NONDMA);
+
+ cmd->etype = CPU_TO_LE16(ethtype);
+ cmd->flags = CPU_TO_LE16(flags);
+ cmd->seid = CPU_TO_LE16(vsi_seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status && stats) {
+ stats->mac_etype_used = LE16_TO_CPU(resp->mac_etype_used);
+ stats->etype_used = LE16_TO_CPU(resp->etype_used);
+ stats->mac_etype_free = LE16_TO_CPU(resp->mac_etype_free);
+ stats->etype_free = LE16_TO_CPU(resp->etype_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_cloud_filters
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to add cloud filters from
+ * @filters: Buffer which contains the filters to be added
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Set the cloud filters for a given VSI. The contents of the
+ * i40e_aqc_add_remove_cloud_filters_element_data are filled
+ * in by the caller of the function.
+ *
+ **/
+enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ u8 filter_count)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_cloud_filters *cmd =
+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ u16 buff_len;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_cloud_filters);
+
+ buff_len = sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data) *
+ filter_count;
+ desc.datalen = CPU_TO_LE16(buff_len);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->num_filters = filter_count;
+ cmd->seid = CPU_TO_LE16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_cloud_filters
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to remove cloud filters from
+ * @filters: Buffer which contains the filters to be removed
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Remove the cloud filters for a given VSI. The contents of the
+ * i40e_aqc_add_remove_cloud_filters_element_data are filled
+ * in by the caller of the function.
+ *
+ **/
+enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ u8 filter_count)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_cloud_filters *cmd =
+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buff_len;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_cloud_filters);
+
+ buff_len = sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data) *
+ filter_count;
+ desc.datalen = CPU_TO_LE16(buff_len);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->num_filters = filter_count;
+ cmd->seid = CPU_TO_LE16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_alternate_write
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be read
+ * @reg_val0: value to be written under 'reg_addr0'
+ * @reg_addr1: address of second dword to be read
+ * @reg_val1: value to be written under 'reg_addr1'
+ *
+ * Write one or two dwords to alternate structure. Fields are indicated
+ * by 'reg_addr0' and 'reg_addr1' register numbers.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_write(struct i40e_hw *hw,
+ u32 reg_addr0, u32 reg_val0,
+ u32 reg_addr1, u32 reg_val1)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_write *cmd_resp =
+ (struct i40e_aqc_alternate_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_write);
+ cmd_resp->address0 = CPU_TO_LE32(reg_addr0);
+ cmd_resp->address1 = CPU_TO_LE32(reg_addr1);
+ cmd_resp->data0 = CPU_TO_LE32(reg_val0);
+ cmd_resp->data1 = CPU_TO_LE32(reg_val1);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_alternate_write_indirect
+ * @hw: pointer to the hardware structure
+ * @addr: address of a first register to be modified
+ * @dw_count: number of alternate structure fields to write
+ * @buffer: pointer to the command buffer
+ *
+ * Write 'dw_count' dwords from 'buffer' to alternate structure
+ * starting at 'addr'.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_write_indirect(struct i40e_hw *hw,
+ u32 addr, u32 dw_count, void *buffer)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_ind_write *cmd_resp =
+ (struct i40e_aqc_alternate_ind_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buffer == NULL)
+ return I40E_ERR_PARAM;
+
+ /* Indirect command */
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_alternate_write_indirect);
+
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_RD);
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+ if (dw_count > (I40E_AQ_LARGE_BUF/4))
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ cmd_resp->address = CPU_TO_LE32(addr);
+ cmd_resp->length = CPU_TO_LE32(dw_count);
+ cmd_resp->addr_high = CPU_TO_LE32(I40E_HI_WORD((u64)buffer));
+ cmd_resp->addr_low = CPU_TO_LE32(I40E_LO_DWORD((u64)buffer));
+
+ status = i40e_asq_send_command(hw, &desc, buffer,
+ I40E_LO_DWORD(4*dw_count), NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_alternate_read
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be read
+ * @reg_val0: pointer for data read from 'reg_addr0'
+ * @reg_addr1: address of second dword to be read
+ * @reg_val1: pointer for data read from 'reg_addr1'
+ *
+ * Read one or two dwords from alternate structure. Fields are indicated
+ * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
+ * is not passed then only register at 'reg_addr0' is read.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw,
+ u32 reg_addr0, u32 *reg_val0,
+ u32 reg_addr1, u32 *reg_val1)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_write *cmd_resp =
+ (struct i40e_aqc_alternate_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (reg_val0 == NULL)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
+ cmd_resp->address0 = CPU_TO_LE32(reg_addr0);
+ cmd_resp->address1 = CPU_TO_LE32(reg_addr1);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ if (status == I40E_SUCCESS) {
+ *reg_val0 = LE32_TO_CPU(cmd_resp->data0);
+
+ if (reg_val1 != NULL)
+ *reg_val1 = LE32_TO_CPU(cmd_resp->data1);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_alternate_read_indirect
+ * @hw: pointer to the hardware structure
+ * @addr: address of the alternate structure field
+ * @dw_count: number of alternate structure fields to read
+ * @buffer: pointer to the command buffer
+ *
+ * Read 'dw_count' dwords from alternate structure starting at 'addr' and
+ * place them in 'buffer'. The buffer should be allocated by caller.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_read_indirect(struct i40e_hw *hw,
+ u32 addr, u32 dw_count, void *buffer)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_ind_write *cmd_resp =
+ (struct i40e_aqc_alternate_ind_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buffer == NULL)
+ return I40E_ERR_PARAM;
+
+ /* Indirect command */
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_alternate_read_indirect);
+
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_RD);
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+ if (dw_count > (I40E_AQ_LARGE_BUF/4))
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ cmd_resp->address = CPU_TO_LE32(addr);
+ cmd_resp->length = CPU_TO_LE32(dw_count);
+ cmd_resp->addr_high = CPU_TO_LE32(I40E_HI_DWORD((u64)buffer));
+ cmd_resp->addr_low = CPU_TO_LE32(I40E_LO_DWORD((u64)buffer));
+
+ status = i40e_asq_send_command(hw, &desc, buffer,
+ I40E_LO_DWORD(4*dw_count), NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_alternate_clear
+ * @hw: pointer to the HW structure.
+ *
+ * Clear the alternate structures of the port from which the function
+ * is called.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_clear(struct i40e_hw *hw)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_alternate_clear_port);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_alternate_write_done
+ * @hw: pointer to the HW structure.
+ * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
+ * @reset_needed: indicates the SW should trigger GLOBAL reset
+ *
+ * Indicates to the FW that alternate structures have been changed.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_write_done(struct i40e_hw *hw,
+ u8 bios_mode, bool *reset_needed)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_write_done *cmd =
+ (struct i40e_aqc_alternate_write_done *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (reset_needed == NULL)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_alternate_write_done);
+
+ cmd->cmd_flags = CPU_TO_LE16(bios_mode);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+ if (!status)
+ *reset_needed = ((LE16_TO_CPU(cmd->cmd_flags) &
+ I40E_AQ_ALTERNATE_RESET_NEEDED) != 0);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_oem_mode
+ * @hw: pointer to the HW structure.
+ * @oem_mode: the OEM mode to be used
+ *
+ * Sets the device to a specific operating mode. Currently the only supported
+ * mode is no_clp, which causes FW to refrain from using Alternate RAM.
+ *
+ **/
+enum i40e_status_code i40e_aq_set_oem_mode(struct i40e_hw *hw,
+ u8 oem_mode)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_write_done *cmd =
+ (struct i40e_aqc_alternate_write_done *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_alternate_set_mode);
+
+ cmd->cmd_flags = CPU_TO_LE16(oem_mode);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_resume_port_tx
+ * @hw: pointer to the hardware structure
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Resume port's Tx traffic
+ **/
+enum i40e_status_code i40e_aq_resume_port_tx(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_set_pci_config_data - store PCI bus info
+ * @hw: pointer to hardware structure
+ * @link_status: the link status word from PCI config space
+ *
+ * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
+ **/
+void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
+{
+ hw->bus.type = i40e_bus_type_pci_express;
+
+ switch (link_status & I40E_PCI_LINK_WIDTH) {
+ case I40E_PCI_LINK_WIDTH_1:
+ hw->bus.width = i40e_bus_width_pcie_x1;
+ break;
+ case I40E_PCI_LINK_WIDTH_2:
+ hw->bus.width = i40e_bus_width_pcie_x2;
+ break;
+ case I40E_PCI_LINK_WIDTH_4:
+ hw->bus.width = i40e_bus_width_pcie_x4;
+ break;
+ case I40E_PCI_LINK_WIDTH_8:
+ hw->bus.width = i40e_bus_width_pcie_x8;
+ break;
+ default:
+ hw->bus.width = i40e_bus_width_unknown;
+ break;
+ }
+
+ switch (link_status & I40E_PCI_LINK_SPEED) {
+ case I40E_PCI_LINK_SPEED_2500:
+ hw->bus.speed = i40e_bus_speed_2500;
+ break;
+ case I40E_PCI_LINK_SPEED_5000:
+ hw->bus.speed = i40e_bus_speed_5000;
+ break;
+ case I40E_PCI_LINK_SPEED_8000:
+ hw->bus.speed = i40e_bus_speed_8000;
+ break;
+ default:
+ hw->bus.speed = i40e_bus_speed_unknown;
+ break;
+ }
+}
+
+/**
+ * i40e_read_bw_from_alt_ram
+ * @hw: pointer to the hardware structure
+ * @max_bw: pointer for max_bw read
+ * @min_bw: pointer for min_bw read
+ * @min_valid: pointer for bool that is true if min_bw is a valid value
+ * @max_valid: pointer for bool that is true if max_bw is a valid value
+ *
+ * Read bw from the alternate ram for the given pf
+ **/
+enum i40e_status_code i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+ u32 *max_bw, u32 *min_bw,
+ bool *min_valid, bool *max_valid)
+{
+ enum i40e_status_code status;
+ u32 max_bw_addr, min_bw_addr;
+
+ /* Calculate the address of the min/max bw registers */
+ max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
+ I40E_ALT_STRUCT_MAX_BW_OFFSET +
+ (I40E_ALT_STRUCT_DWORDS_PER_PF*hw->pf_id);
+ min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
+ I40E_ALT_STRUCT_MIN_BW_OFFSET +
+ (I40E_ALT_STRUCT_DWORDS_PER_PF*hw->pf_id);
+
+ /* Read the bandwidths from alt ram */
+ status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
+ min_bw_addr, min_bw);
+
+ if (*min_bw & I40E_ALT_BW_VALID_MASK)
+ *min_valid = true;
+ else
+ *min_valid = false;
+
+ if (*max_bw & I40E_ALT_BW_VALID_MASK)
+ *max_valid = true;
+ else
+ *max_valid = false;
+
+ return status;
+}
+
+/**
+ * i40e_aq_configure_partition_bw
+ * @hw: pointer to the hardware structure
+ * @bw_data: Buffer holding valid pfs and bw limits
+ * @cmd_details: pointer to command details
+ *
+ * Configure partitions guaranteed/max bw
+ **/
+enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+ struct i40e_aqc_configure_partition_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ u16 bwd_size = sizeof(struct i40e_aqc_configure_partition_bw_data);
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_configure_partition_bw);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
+ if (bwd_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ desc.datalen = CPU_TO_LE16(bwd_size);
+
+ status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_send_msg_to_pf
+ * @hw: pointer to the hardware structure
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * Send message to PF driver using admin queue. By default, this message
+ * is sent asynchronously, i.e. i40e_asq_send_command() does not wait for
+ * completion before returning.
+ **/
+enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+ enum i40e_virtchnl_ops v_opcode,
+ enum i40e_status_code v_retval,
+ u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_asq_cmd_details details;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_SI);
+ desc.cookie_high = CPU_TO_LE32(v_opcode);
+ desc.cookie_low = CPU_TO_LE32(v_retval);
+ if (msglen) {
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF
+ | I40E_AQ_FLAG_RD));
+ if (msglen > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(msglen);
+ }
+ if (!cmd_details) {
+ i40e_memset(&details, 0, sizeof(details), I40E_NONDMA_MEM);
+ details.async = true;
+ cmd_details = &details;
+ }
+ status = i40e_asq_send_command(hw, (struct i40e_aq_desc *)&desc, msg,
+ msglen, cmd_details);
+ return status;
+}
+
+/**
+ * i40e_vf_parse_hw_config
+ * @hw: pointer to the hardware structure
+ * @msg: pointer to the virtual channel VF resource structure
+ *
+ * Given a VF resource message from the PF, populate the hw struct
+ * with appropriate information.
+ **/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+ struct i40e_virtchnl_vf_resource *msg)
+{
+ struct i40e_virtchnl_vsi_resource *vsi_res;
+ int i;
+
+ vsi_res = &msg->vsi_res[0];
+
+ hw->dev_caps.num_vsis = msg->num_vsis;
+ hw->dev_caps.num_rx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
+ hw->dev_caps.dcb = msg->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_L2;
+ hw->dev_caps.fcoe = (msg->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0;
+ hw->dev_caps.iwarp = (msg->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0;
+ for (i = 0; i < msg->num_vsis; i++) {
+ if (vsi_res->vsi_type == I40E_VSI_SRIOV) {
+ i40e_memcpy(hw->mac.perm_addr,
+ vsi_res->default_mac_addr,
+ I40E_ETH_LENGTH_OF_ADDRESS,
+ I40E_NONDMA_TO_NONDMA);
+ i40e_memcpy(hw->mac.addr, vsi_res->default_mac_addr,
+ I40E_ETH_LENGTH_OF_ADDRESS,
+ I40E_NONDMA_TO_NONDMA);
+ }
+ vsi_res++;
+ }
+}
+
+/**
+ * i40e_vf_reset
+ * @hw: pointer to the hardware structure
+ *
+ * Send a VF_RESET message to the PF. Does not wait for response from PF
+ * as none will be forthcoming. Immediately after calling this function,
+ * the admin queue should be shut down and (optionally) reinitialized.
+ **/
+enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw)
+{
+ return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF,
+ I40E_SUCCESS, NULL, 0, NULL);
+}
+#endif /* VF_DRIVER */
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_dcb.c b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_dcb.c
new file mode 100755
index 00000000..d0670287
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_dcb.c
@@ -0,0 +1,479 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include "i40e_dcb.h"
+
+/**
+ * i40e_get_dcbx_status
+ * @hw: pointer to the hw struct
+ * @status: Embedded DCBX Engine Status
+ *
+ * Get the DCBX status from the Firmware
+ **/
+enum i40e_status_code i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
+{
+ u32 reg;
+
+ if (!status)
+ return I40E_ERR_PARAM;
+
+ reg = rd32(hw, I40E_PRTDCB_GENS);
+ *status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >>
+ I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT);
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_parse_ieee_etscfg_tlv
+ * @tlv: IEEE 802.1Qaz ETS CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses IEEE 802.1Qaz ETS CFG TLV
+ **/
+static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ struct i40e_ieee_ets_config *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ u8 priority;
+ int i;
+
+ /* First Octet post subtype
+ * --------------------------
+ * |will-|CBS | Re- | Max |
+ * |ing | |served| TCs |
+ * --------------------------
+ * |1bit | 1bit|3 bits|3bits|
+ */
+ etscfg = &dcbcfg->etscfg;
+ etscfg->willing = (u8)((buf[offset] & I40E_IEEE_ETS_WILLING_MASK) >>
+ I40E_IEEE_ETS_WILLING_SHIFT);
+ etscfg->cbs = (u8)((buf[offset] & I40E_IEEE_ETS_CBS_MASK) >>
+ I40E_IEEE_ETS_CBS_SHIFT);
+ etscfg->maxtcs = (u8)((buf[offset] & I40E_IEEE_ETS_MAXTC_MASK) >>
+ I40E_IEEE_ETS_MAXTC_SHIFT);
+
+ /* Move offset to Priority Assignment Table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
+ I40E_IEEE_ETS_PRIO_1_SHIFT);
+ etscfg->prioritytable[i * 2] = priority;
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
+ I40E_IEEE_ETS_PRIO_0_SHIFT);
+ etscfg->prioritytable[i * 2 + 1] = priority;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ etscfg->tcbwtable[i] = buf[offset++];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ etscfg->tsatable[i] = buf[offset++];
+}
+
+/**
+ * i40e_parse_ieee_etsrec_tlv
+ * @tlv: IEEE 802.1Qaz ETS REC TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Parses IEEE 802.1Qaz ETS REC TLV
+ **/
+static void i40e_parse_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ u8 priority;
+ int i;
+
+ /* Move offset to priority table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
+ I40E_IEEE_ETS_PRIO_1_SHIFT);
+ dcbcfg->etsrec.prioritytable[i*2] = priority;
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
+ I40E_IEEE_ETS_PRIO_0_SHIFT);
+ dcbcfg->etsrec.prioritytable[i*2 + 1] = priority;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etsrec.tcbwtable[i] = buf[offset++];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etsrec.tsatable[i] = buf[offset++];
+}
+
+/**
+ * i40e_parse_ieee_pfccfg_tlv
+ * @tlv: IEEE 802.1Qaz PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses IEEE 802.1Qaz PFC CFG TLV
+ **/
+static void i40e_parse_ieee_pfccfg_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ /* ----------------------------------------
+ * |will-|MBC | Re- | PFC | PFC Enable |
+ * |ing | |served| cap | |
+ * -----------------------------------------
+ * |1bit | 1bit|2 bits|4bits| 1 octet |
+ */
+ dcbcfg->pfc.willing = (u8)((buf[0] & I40E_IEEE_PFC_WILLING_MASK) >>
+ I40E_IEEE_PFC_WILLING_SHIFT);
+ dcbcfg->pfc.mbc = (u8)((buf[0] & I40E_IEEE_PFC_MBC_MASK) >>
+ I40E_IEEE_PFC_MBC_SHIFT);
+ dcbcfg->pfc.pfccap = (u8)((buf[0] & I40E_IEEE_PFC_CAP_MASK) >>
+ I40E_IEEE_PFC_CAP_SHIFT);
+ dcbcfg->pfc.pfcenable = buf[1];
+}
+
+/**
+ * i40e_parse_ieee_app_tlv
+ * @tlv: IEEE 802.1Qaz APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses IEEE 802.1Qaz APP PRIO TLV
+ **/
+static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 typelength;
+ u16 offset = 0;
+ u16 length;
+ int i = 0;
+ u8 *buf;
+
+ typelength = I40E_NTOHS(tlv->typelength);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ buf = tlv->tlvinfo;
+
+ /* The App priority table starts 5 octets after TLV header */
+ length -= (sizeof(tlv->ouisubtype) + 1);
+
+ /* Move offset to App Priority Table */
+ offset++;
+
+ /* Application Priority Table (3 octets)
+ * Octets:| 1 | 2 | 3 |
+ * -----------------------------------------
+ * |Priority|Rsrvd| Sel | Protocol ID |
+ * -----------------------------------------
+ * Bits:|23 21|20 19|18 16|15 0|
+ * -----------------------------------------
+ */
+ while (offset < length) {
+ dcbcfg->app[i].priority = (u8)((buf[offset] &
+ I40E_IEEE_APP_PRIO_MASK) >>
+ I40E_IEEE_APP_PRIO_SHIFT);
+ dcbcfg->app[i].selector = (u8)((buf[offset] &
+ I40E_IEEE_APP_SEL_MASK) >>
+ I40E_IEEE_APP_SEL_SHIFT);
+ dcbcfg->app[i].protocolid = (buf[offset + 1] << 0x8) |
+ buf[offset + 2];
+ /* Move to next app */
+ offset += 3;
+ i++;
+ if (i >= I40E_DCBX_MAX_APPS)
+ break;
+ }
+
+ dcbcfg->numapps = i;
+}
+
+/**
+ * i40e_parse_ieee_etsrec_tlv
+ * @tlv: IEEE 802.1Qaz TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ **/
+static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u32 ouisubtype;
+ u8 subtype;
+
+ ouisubtype = I40E_NTOHL(tlv->ouisubtype);
+ subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
+ I40E_LLDP_TLV_SUBTYPE_SHIFT);
+ switch (subtype) {
+ case I40E_IEEE_SUBTYPE_ETS_CFG:
+ i40e_parse_ieee_etscfg_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_ETS_REC:
+ i40e_parse_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_PFC_CFG:
+ i40e_parse_ieee_pfccfg_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_APP_PRI:
+ i40e_parse_ieee_app_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_parse_org_tlv
+ * @tlv: Organization specific TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Currently only IEEE 802.1Qaz TLV is supported, all others
+ * will be returned
+ **/
+static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u32 ouisubtype;
+ u32 oui;
+
+ ouisubtype = I40E_NTOHL(tlv->ouisubtype);
+ oui = (u32)((ouisubtype & I40E_LLDP_TLV_OUI_MASK) >>
+ I40E_LLDP_TLV_OUI_SHIFT);
+ switch (oui) {
+ case I40E_IEEE_8021QAZ_OUI:
+ i40e_parse_ieee_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_lldp_to_dcb_config
+ * @lldpmib: LLDPDU to be parsed
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Parse DCB configuration from the LLDPDU
+ **/
+enum i40e_status_code i40e_lldp_to_dcb_config(u8 *lldpmib,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_lldp_org_tlv *tlv;
+ u16 type;
+ u16 length;
+ u16 typelength;
+ u16 offset = 0;
+
+ if (!lldpmib || !dcbcfg)
+ return I40E_ERR_PARAM;
+
+ /* set to the start of LLDPDU */
+ lldpmib += I40E_LLDP_MIB_HLEN;
+ tlv = (struct i40e_lldp_org_tlv *)lldpmib;
+ while (1) {
+ typelength = I40E_NTOHS(tlv->typelength);
+ type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
+ I40E_LLDP_TLV_TYPE_SHIFT);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ offset += sizeof(typelength) + length;
+
+ /* END TLV or beyond LLDPDU size */
+ if ((type == I40E_TLV_TYPE_END) || (offset > I40E_LLDPDU_SIZE))
+ break;
+
+ switch (type) {
+ case I40E_TLV_TYPE_ORG:
+ i40e_parse_org_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+
+ /* Move to next TLV */
+ tlv = (struct i40e_lldp_org_tlv *)((char *)tlv +
+ sizeof(tlv->typelength) +
+ length);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_aq_get_dcb_config
+ * @hw: pointer to the hw struct
+ * @mib_type: mib type for the query
+ * @bridgetype: bridge type for the query (remote)
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Query DCB configuration from the Firmware
+ **/
+enum i40e_status_code i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ struct i40e_virt_mem mem;
+ u8 *lldpmib;
+
+ /* Allocate the LLDPDU */
+ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
+ if (ret)
+ return ret;
+
+ lldpmib = (u8 *)mem.va;
+ ret = i40e_aq_get_lldp_mib(hw, bridgetype, mib_type,
+ (void *)lldpmib, I40E_LLDPDU_SIZE,
+ NULL, NULL, NULL);
+ if (ret)
+ goto free_mem;
+
+ /* Parse LLDP MIB to get dcb configuration */
+ ret = i40e_lldp_to_dcb_config(lldpmib, dcbcfg);
+
+free_mem:
+ i40e_free_virt_mem(hw, &mem);
+ return ret;
+}
+
+/**
+ * i40e_get_dcb_config
+ * @hw: pointer to the hw struct
+ *
+ * Get DCB configuration from the Firmware
+ **/
+enum i40e_status_code i40e_get_dcb_config(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+
+ /* Get Local DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
+ &hw->local_dcbx_config);
+ if (ret)
+ goto out;
+
+ /* Get Remote DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+ &hw->remote_dcbx_config);
+out:
+ return ret;
+}
+
+/**
+ * i40e_init_dcb
+ * @hw: pointer to the hw struct
+ *
+ * Update DCB configuration from the Firmware
+ **/
+enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+
+ if (!hw->func_caps.dcb)
+ return ret;
+
+ /* Get DCBX status */
+ ret = i40e_get_dcbx_status(hw, &hw->dcbx_status);
+ if (ret)
+ return ret;
+
+ /* Check the DCBX Status */
+ switch (hw->dcbx_status) {
+ case I40E_DCBX_STATUS_DONE:
+ case I40E_DCBX_STATUS_IN_PROGRESS:
+ /* Get current DCBX configuration */
+ ret = i40e_get_dcb_config(hw);
+ break;
+ case I40E_DCBX_STATUS_DISABLED:
+ return ret;
+ case I40E_DCBX_STATUS_NOT_STARTED:
+ case I40E_DCBX_STATUS_MULTIPLE_PEERS:
+ default:
+ break;
+ }
+
+ /* Configure the LLDP MIB change event */
+ ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
+ if (ret)
+ return ret;
+
+ return ret;
+}
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_dcb.h b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_dcb.h
new file mode 100755
index 00000000..2261e080
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_dcb.h
@@ -0,0 +1,161 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_DCB_H_
+#define _I40E_DCB_H_
+
+#include "i40e_type.h"
+
+#define I40E_DCBX_OFFLOAD_DISABLED 0
+#define I40E_DCBX_OFFLOAD_ENABLED 1
+
+#define I40E_DCBX_STATUS_NOT_STARTED 0
+#define I40E_DCBX_STATUS_IN_PROGRESS 1
+#define I40E_DCBX_STATUS_DONE 2
+#define I40E_DCBX_STATUS_MULTIPLE_PEERS 3
+#define I40E_DCBX_STATUS_DISABLED 7
+
+#define I40E_TLV_TYPE_END 0
+#define I40E_TLV_TYPE_ORG 127
+
+#define I40E_IEEE_8021QAZ_OUI 0x0080C2
+#define I40E_IEEE_SUBTYPE_ETS_CFG 9
+#define I40E_IEEE_SUBTYPE_ETS_REC 10
+#define I40E_IEEE_SUBTYPE_PFC_CFG 11
+#define I40E_IEEE_SUBTYPE_APP_PRI 12
+
+#define I40E_LLDP_ADMINSTATUS_DISABLED 0
+#define I40E_LLDP_ADMINSTATUS_ENABLED_RX 1
+#define I40E_LLDP_ADMINSTATUS_ENABLED_TX 2
+#define I40E_LLDP_ADMINSTATUS_ENABLED_RXTX 3
+
+/* Defines for LLDP TLV header */
+#define I40E_LLDP_MIB_HLEN 14
+#define I40E_LLDP_TLV_LEN_SHIFT 0
+#define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
+#define I40E_LLDP_TLV_TYPE_SHIFT 9
+#define I40E_LLDP_TLV_TYPE_MASK (0x7F << I40E_LLDP_TLV_TYPE_SHIFT)
+#define I40E_LLDP_TLV_SUBTYPE_SHIFT 0
+#define I40E_LLDP_TLV_SUBTYPE_MASK (0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT)
+#define I40E_LLDP_TLV_OUI_SHIFT 8
+#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT)
+
+/* Defines for IEEE ETS TLV */
+#define I40E_IEEE_ETS_MAXTC_SHIFT 0
+#define I40E_IEEE_ETS_MAXTC_MASK (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT)
+#define I40E_IEEE_ETS_CBS_SHIFT 6
+#define I40E_IEEE_ETS_CBS_MASK (0x1 << I40E_IEEE_ETS_CBS_SHIFT)
+#define I40E_IEEE_ETS_WILLING_SHIFT 7
+#define I40E_IEEE_ETS_WILLING_MASK (0x1 << I40E_IEEE_ETS_WILLING_SHIFT)
+#define I40E_IEEE_ETS_PRIO_0_SHIFT 0
+#define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT)
+#define I40E_IEEE_ETS_PRIO_1_SHIFT 4
+#define I40E_IEEE_ETS_PRIO_1_MASK (0x7 << I40E_IEEE_ETS_PRIO_1_SHIFT)
+
+/* Defines for IEEE TSA types */
+#define I40E_IEEE_TSA_STRICT 0
+#define I40E_IEEE_TSA_CBS 1
+#define I40E_IEEE_TSA_ETS 2
+#define I40E_IEEE_TSA_VENDOR 255
+
+/* Defines for IEEE PFC TLV */
+#define I40E_IEEE_PFC_CAP_SHIFT 0
+#define I40E_IEEE_PFC_CAP_MASK (0xF << I40E_IEEE_PFC_CAP_SHIFT)
+#define I40E_IEEE_PFC_MBC_SHIFT 6
+#define I40E_IEEE_PFC_MBC_MASK (0x1 << I40E_IEEE_PFC_MBC_SHIFT)
+#define I40E_IEEE_PFC_WILLING_SHIFT 7
+#define I40E_IEEE_PFC_WILLING_MASK (0x1 << I40E_IEEE_PFC_WILLING_SHIFT)
+
+/* Defines for IEEE APP TLV */
+#define I40E_IEEE_APP_SEL_SHIFT 0
+#define I40E_IEEE_APP_SEL_MASK (0x7 << I40E_IEEE_APP_SEL_SHIFT)
+#define I40E_IEEE_APP_PRIO_SHIFT 5
+#define I40E_IEEE_APP_PRIO_MASK (0x7 << I40E_IEEE_APP_PRIO_SHIFT)
+
+
+#pragma pack(1)
+
+/* IEEE 802.1AB LLDP TLV structure */
+struct i40e_lldp_generic_tlv {
+ __be16 typelength;
+ u8 tlvinfo[1];
+};
+
+/* IEEE 802.1AB LLDP Organization specific TLV */
+struct i40e_lldp_org_tlv {
+ __be16 typelength;
+ __be32 ouisubtype;
+ u8 tlvinfo[1];
+};
+#pragma pack()
+
+/*
+ * TODO: The below structures related LLDP/DCBX variables
+ * and statistics are defined but need to find how to get
+ * the required information from the Firmware to use them
+ */
+
+/* IEEE 802.1AB LLDP Agent Statistics */
+struct i40e_lldp_stats {
+ u64 remtablelastchangetime;
+ u64 remtableinserts;
+ u64 remtabledeletes;
+ u64 remtabledrops;
+ u64 remtableageouts;
+ u64 txframestotal;
+ u64 rxframesdiscarded;
+ u64 rxportframeerrors;
+ u64 rxportframestotal;
+ u64 rxporttlvsdiscardedtotal;
+ u64 rxporttlvsunrecognizedtotal;
+ u64 remtoomanyneighbors;
+};
+
+/* IEEE 802.1Qaz DCBX variables */
+struct i40e_dcbx_variables {
+ u32 defmaxtrafficclasses;
+ u32 defprioritytcmapping;
+ u32 deftcbandwidth;
+ u32 deftsaassignment;
+};
+
+enum i40e_status_code i40e_get_dcbx_status(struct i40e_hw *hw,
+ u16 *status);
+enum i40e_status_code i40e_lldp_to_dcb_config(u8 *lldpmib,
+ struct i40e_dcbx_config *dcbcfg);
+enum i40e_status_code i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg);
+enum i40e_status_code i40e_get_dcb_config(struct i40e_hw *hw);
+enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw);
+#endif /* _I40E_DCB_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_diag.c b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_diag.c
new file mode 100755
index 00000000..167fcf8e
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_diag.c
@@ -0,0 +1,178 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "i40e_diag.h"
+#include "i40e_prototype.h"
+
+/**
+ * i40e_diag_set_loopback
+ * @hw: pointer to the hw struct
+ * @mode: loopback mode
+ *
+ * Set chosen loopback mode
+ **/
+enum i40e_status_code i40e_diag_set_loopback(struct i40e_hw *hw,
+ enum i40e_lb_mode mode)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (i40e_aq_set_lb_modes(hw, mode, NULL))
+ ret_code = I40E_ERR_DIAG_TEST_FAILED;
+
+ return ret_code;
+}
+
+/**
+ * i40e_diag_reg_pattern_test
+ * @hw: pointer to the hw struct
+ * @reg: reg to be tested
+ * @mask: bits to be touched
+ **/
+static enum i40e_status_code i40e_diag_reg_pattern_test(struct i40e_hw *hw,
+ u32 reg, u32 mask)
+{
+ const u32 patterns[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ u32 pat, val, orig_val;
+ int i;
+
+ orig_val = rd32(hw, reg);
+ for (i = 0; i < ARRAY_SIZE(patterns); i++) {
+ pat = patterns[i];
+ wr32(hw, reg, (pat & mask));
+ val = rd32(hw, reg);
+ if ((val & mask) != (pat & mask)) {
+ return I40E_ERR_DIAG_TEST_FAILED;
+ }
+ }
+
+ wr32(hw, reg, orig_val);
+ val = rd32(hw, reg);
+ if (val != orig_val) {
+ return I40E_ERR_DIAG_TEST_FAILED;
+ }
+
+ return I40E_SUCCESS;
+}
+
+struct i40e_diag_reg_test_info i40e_reg_list[] = {
+ /* offset mask elements stride */
+ {I40E_QTX_CTL(0), 0x0000FFBF, 1, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
+ {I40E_PFINT_ITR0(0), 0x00000FFF, 3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
+ {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
+ {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
+ {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
+ {I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0},
+ {I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0},
+ {I40E_PFINT_LNKLSTN(0), 0x000007FF, 1, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
+ {I40E_QINT_TQCTL(0), 0x000000FF, 1, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
+ {I40E_QINT_RQCTL(0), 0x000000FF, 1, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
+ {I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0},
+ { 0 }
+};
+
+/**
+ * i40e_diag_reg_test
+ * @hw: pointer to the hw struct
+ *
+ * Perform registers diagnostic test
+ **/
+enum i40e_status_code i40e_diag_reg_test(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u32 reg, mask;
+ u32 i, j;
+
+ for (i = 0; i40e_reg_list[i].offset != 0 &&
+ ret_code == I40E_SUCCESS; i++) {
+
+ /* set actual reg range for dynamically allocated resources */
+ if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) &&
+ hw->func_caps.num_tx_qp != 0)
+ i40e_reg_list[i].elements = hw->func_caps.num_tx_qp;
+ if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) ||
+ i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) ||
+ i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) ||
+ i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) ||
+ i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) &&
+ hw->func_caps.num_msix_vectors != 0)
+ i40e_reg_list[i].elements =
+ hw->func_caps.num_msix_vectors - 1;
+
+ /* test register access */
+ mask = i40e_reg_list[i].mask;
+ for (j = 0; j < i40e_reg_list[i].elements &&
+ ret_code == I40E_SUCCESS; j++) {
+ reg = i40e_reg_list[i].offset
+ + (j * i40e_reg_list[i].stride);
+ ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);
+ }
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_diag_eeprom_test
+ * @hw: pointer to the hw struct
+ *
+ * Perform EEPROM diagnostic test
+ **/
+enum i40e_status_code i40e_diag_eeprom_test(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code;
+ u16 reg_val;
+
+ /* read NVM control word and if NVM valid, validate EEPROM checksum*/
+ ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
+ if ((ret_code == I40E_SUCCESS) &&
+ ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
+ (0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) {
+ ret_code = i40e_validate_nvm_checksum(hw, NULL);
+ } else {
+ ret_code = I40E_ERR_DIAG_TEST_FAILED;
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_diag_fw_alive_test
+ * @hw: pointer to the hw struct
+ *
+ * Perform FW alive diagnostic test
+ **/
+enum i40e_status_code i40e_diag_fw_alive_test(struct i40e_hw *hw)
+{
+ UNREFERENCED_1PARAMETER(hw);
+ return I40E_SUCCESS;
+}
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_diag.h b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_diag.h
new file mode 100755
index 00000000..feb4d4b7
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_diag.h
@@ -0,0 +1,61 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_DIAG_H_
+#define _I40E_DIAG_H_
+
+#include "i40e_type.h"
+
+enum i40e_lb_mode {
+ I40E_LB_MODE_NONE = 0x0,
+ I40E_LB_MODE_PHY_LOCAL = I40E_AQ_LB_PHY_LOCAL,
+ I40E_LB_MODE_PHY_REMOTE = I40E_AQ_LB_PHY_REMOTE,
+ I40E_LB_MODE_MAC_LOCAL = I40E_AQ_LB_MAC_LOCAL,
+};
+
+struct i40e_diag_reg_test_info {
+ u32 offset; /* the base register */
+ u32 mask; /* bits that can be tested */
+ u32 elements; /* number of elements if array */
+ u32 stride; /* bytes between each element */
+};
+
+extern struct i40e_diag_reg_test_info i40e_reg_list[];
+
+enum i40e_status_code i40e_diag_set_loopback(struct i40e_hw *hw,
+ enum i40e_lb_mode mode);
+enum i40e_status_code i40e_diag_fw_alive_test(struct i40e_hw *hw);
+enum i40e_status_code i40e_diag_reg_test(struct i40e_hw *hw);
+enum i40e_status_code i40e_diag_eeprom_test(struct i40e_hw *hw);
+
+#endif /* _I40E_DIAG_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_hmc.c b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_hmc.c
new file mode 100755
index 00000000..ae6896a6
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_hmc.c
@@ -0,0 +1,373 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_status.h"
+#include "i40e_alloc.h"
+#include "i40e_hmc.h"
+#include "i40e_type.h"
+
+/**
+ * i40e_add_sd_table_entry - Adds a segment descriptor to the table
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @sd_index: segment descriptor index to manipulate
+ * @type: what type of segment descriptor we're manipulating
+ * @direct_mode_sz: size to alloc in direct mode
+ **/
+enum i40e_status_code i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_sd_entry *sd_entry;
+ enum i40e_memory_type mem_type;
+ bool dma_mem_alloc_done = false;
+ struct i40e_dma_mem mem;
+ u64 alloc_len;
+
+ if (NULL == hmc_info->sd_table.sd_entry) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_add_sd_table_entry: bad sd_entry\n");
+ goto exit;
+ }
+
+ if (sd_index >= hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_SD_INDEX;
+ DEBUGOUT("i40e_add_sd_table_entry: bad sd_index\n");
+ goto exit;
+ }
+
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
+ if (!sd_entry->valid) {
+ if (I40E_SD_TYPE_PAGED == type) {
+ mem_type = i40e_mem_pd;
+ alloc_len = I40E_HMC_PAGED_BP_SIZE;
+ } else {
+ mem_type = i40e_mem_bp_jumbo;
+ alloc_len = direct_mode_sz;
+ }
+
+ /* allocate a 4K pd page or 2M backing page */
+ ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len,
+ I40E_HMC_PD_BP_BUF_ALIGNMENT);
+ if (ret_code)
+ goto exit;
+ dma_mem_alloc_done = true;
+ if (I40E_SD_TYPE_PAGED == type) {
+ ret_code = i40e_allocate_virt_mem(hw,
+ &sd_entry->u.pd_table.pd_entry_virt_mem,
+ sizeof(struct i40e_hmc_pd_entry) * 512);
+ if (ret_code)
+ goto exit;
+ sd_entry->u.pd_table.pd_entry =
+ (struct i40e_hmc_pd_entry *)
+ sd_entry->u.pd_table.pd_entry_virt_mem.va;
+ i40e_memcpy(&sd_entry->u.pd_table.pd_page_addr,
+ &mem, sizeof(struct i40e_dma_mem),
+ I40E_NONDMA_TO_NONDMA);
+ } else {
+ i40e_memcpy(&sd_entry->u.bp.addr,
+ &mem, sizeof(struct i40e_dma_mem),
+ I40E_NONDMA_TO_NONDMA);
+ sd_entry->u.bp.sd_pd_index = sd_index;
+ }
+ /* initialize the sd entry */
+ hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
+
+ /* increment the ref count */
+ I40E_INC_SD_REFCNT(&hmc_info->sd_table);
+ }
+ /* Increment backing page reference count */
+ if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type)
+ I40E_INC_BP_REFCNT(&sd_entry->u.bp);
+exit:
+ if (I40E_SUCCESS != ret_code)
+ if (dma_mem_alloc_done)
+ i40e_free_dma_mem(hw, &mem);
+
+ return ret_code;
+}
+
+/**
+ * i40e_add_pd_table_entry - Adds page descriptor to the specified table
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @pd_index: which page descriptor index to manipulate
+ *
+ * This function:
+ * 1. Initializes the pd entry
+ * 2. Adds pd_entry in the pd_table
+ * 3. Mark the entry valid in i40e_hmc_pd_entry structure
+ * 4. Initializes the pd_entry's ref count to 1
+ * assumptions:
+ * 1. The memory for pd should be pinned down, physically contiguous and
+ * aligned on 4K boundary and zeroed memory.
+ * 2. It should be 4K in size.
+ **/
+enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_pd_table *pd_table;
+ struct i40e_hmc_pd_entry *pd_entry;
+ struct i40e_dma_mem mem;
+ u32 sd_idx, rel_pd_idx;
+ u64 *pd_addr;
+ u64 page_desc;
+
+ if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
+ DEBUGOUT("i40e_add_pd_table_entry: bad pd_index\n");
+ goto exit;
+ }
+
+ /* find corresponding sd */
+ sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD);
+ if (I40E_SD_TYPE_PAGED !=
+ hmc_info->sd_table.sd_entry[sd_idx].entry_type)
+ goto exit;
+
+ rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD);
+ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ pd_entry = &pd_table->pd_entry[rel_pd_idx];
+ if (!pd_entry->valid) {
+ /* allocate a 4K backing page */
+ ret_code = i40e_allocate_dma_mem(hw, &mem, i40e_mem_bp,
+ I40E_HMC_PAGED_BP_SIZE,
+ I40E_HMC_PD_BP_BUF_ALIGNMENT);
+ if (ret_code)
+ goto exit;
+
+ i40e_memcpy(&pd_entry->bp.addr, &mem,
+ sizeof(struct i40e_dma_mem), I40E_NONDMA_TO_NONDMA);
+ pd_entry->bp.sd_pd_index = pd_index;
+ pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
+ /* Set page address and valid bit */
+ page_desc = mem.pa | 0x1;
+
+ pd_addr = (u64 *)pd_table->pd_page_addr.va;
+ pd_addr += rel_pd_idx;
+
+ /* Add the backing page physical address in the pd entry */
+ i40e_memcpy(pd_addr, &page_desc, sizeof(u64),
+ I40E_NONDMA_TO_DMA);
+
+ pd_entry->sd_index = sd_idx;
+ pd_entry->valid = true;
+ I40E_INC_PD_REFCNT(pd_table);
+ }
+ I40E_INC_BP_REFCNT(&pd_entry->bp);
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_pd_bp - remove a backing page from a page descriptor
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ * @is_pf: distinguishes a VF from a PF
+ *
+ * This function:
+ * 1. Marks the entry in pd tabe (for paged address mode) or in sd table
+ * (for direct address mode) invalid.
+ * 2. Write to register PMPDINV to invalidate the backing page in FV cache
+ * 3. Decrement the ref count for the pd _entry
+ * assumptions:
+ * 1. Caller can deallocate the memory used by backing storage after this
+ * function returns.
+ **/
+enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_pd_entry *pd_entry;
+ struct i40e_hmc_pd_table *pd_table;
+ struct i40e_hmc_sd_entry *sd_entry;
+ u32 sd_idx, rel_pd_idx;
+ u64 *pd_addr;
+
+ /* calculate index */
+ sd_idx = idx / I40E_HMC_PD_CNT_IN_SD;
+ rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD;
+ if (sd_idx >= hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
+ DEBUGOUT("i40e_remove_pd_bp: bad idx\n");
+ goto exit;
+ }
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
+ if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) {
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ DEBUGOUT("i40e_remove_pd_bp: wrong sd_entry type\n");
+ goto exit;
+ }
+ /* get the entry and decrease its ref counter */
+ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ pd_entry = &pd_table->pd_entry[rel_pd_idx];
+ I40E_DEC_BP_REFCNT(&pd_entry->bp);
+ if (pd_entry->bp.ref_cnt)
+ goto exit;
+
+ /* mark the entry invalid */
+ pd_entry->valid = false;
+ I40E_DEC_PD_REFCNT(pd_table);
+ pd_addr = (u64 *)pd_table->pd_page_addr.va;
+ pd_addr += rel_pd_idx;
+ i40e_memset(pd_addr, 0, sizeof(u64), I40E_DMA_MEM);
+ I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
+
+ /* free memory here */
+ ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
+ if (I40E_SUCCESS != ret_code)
+ goto exit;
+ if (!pd_table->ref_cnt)
+ i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ **/
+enum i40e_status_code i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_sd_entry *sd_entry;
+
+ /* get the entry and decrease its ref counter */
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ I40E_DEC_BP_REFCNT(&sd_entry->u.bp);
+ if (sd_entry->u.bp.ref_cnt) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto exit;
+ }
+ I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
+
+ /* mark the entry invalid */
+ sd_entry->valid = false;
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ * @is_pf: used to distinguish between VF and PF
+ **/
+enum i40e_status_code i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf)
+{
+ struct i40e_hmc_sd_entry *sd_entry;
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ /* get the entry and decrease its ref counter */
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ if (is_pf) {
+ I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
+ } else {
+ ret_code = I40E_NOT_SUPPORTED;
+ goto exit;
+ }
+ ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
+ if (I40E_SUCCESS != ret_code)
+ goto exit;
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ **/
+enum i40e_status_code i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_sd_entry *sd_entry;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+
+ if (sd_entry->u.pd_table.ref_cnt) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto exit;
+ }
+
+ /* mark the entry invalid */
+ sd_entry->valid = false;
+
+ I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_pd_page_new - Removes a PD page from sd entry.
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ * @is_pf: used to distinguish between VF and PF
+ **/
+enum i40e_status_code i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_sd_entry *sd_entry;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ if (is_pf) {
+ I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
+ } else {
+ ret_code = I40E_NOT_SUPPORTED;
+ goto exit;
+ }
+ /* free memory here */
+ ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
+ if (I40E_SUCCESS != ret_code)
+ goto exit;
+exit:
+ return ret_code;
+}
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_hmc.h b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_hmc.h
new file mode 100755
index 00000000..eb629fc6
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_hmc.h
@@ -0,0 +1,243 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_HMC_H_
+#define _I40E_HMC_H_
+
+#define I40E_HMC_MAX_BP_COUNT 512
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */
+#define I40E_HMC_PD_CNT_IN_SD 512
+#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */
+#define I40E_HMC_PAGED_BP_SIZE 4096
+#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096
+#define I40E_FIRST_VF_FPM_ID 16
+
+struct i40e_hmc_obj_info {
+ u64 base; /* base addr in FPM */
+ u32 max_cnt; /* max count available for this hmc func */
+ u32 cnt; /* count of objects driver actually wants to create */
+ u64 size; /* size in bytes of one object */
+};
+
+enum i40e_sd_entry_type {
+ I40E_SD_TYPE_INVALID = 0,
+ I40E_SD_TYPE_PAGED = 1,
+ I40E_SD_TYPE_DIRECT = 2
+};
+
+struct i40e_hmc_bp {
+ enum i40e_sd_entry_type entry_type;
+ struct i40e_dma_mem addr; /* populate to be used by hw */
+ u32 sd_pd_index;
+ u32 ref_cnt;
+};
+
+struct i40e_hmc_pd_entry {
+ struct i40e_hmc_bp bp;
+ u32 sd_index;
+ bool valid;
+};
+
+struct i40e_hmc_pd_table {
+ struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */
+ struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */
+ struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */
+
+ u32 ref_cnt;
+ u32 sd_index;
+};
+
+struct i40e_hmc_sd_entry {
+ enum i40e_sd_entry_type entry_type;
+ bool valid;
+
+ union {
+ struct i40e_hmc_pd_table pd_table;
+ struct i40e_hmc_bp bp;
+ } u;
+};
+
+struct i40e_hmc_sd_table {
+ struct i40e_virt_mem addr; /* used to track sd_entry allocations */
+ u32 sd_cnt;
+ u32 ref_cnt;
+ struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */
+};
+
+struct i40e_hmc_info {
+ u32 signature;
+ /* equals to pci func num for PF and dynamically allocated for VFs */
+ u8 hmc_fn_id;
+ u16 first_sd_index; /* index of the first available SD */
+
+ /* hmc objects */
+ struct i40e_hmc_obj_info *hmc_obj;
+ struct i40e_virt_mem hmc_obj_virt_mem;
+ struct i40e_hmc_sd_table sd_table;
+};
+
+#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++)
+#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++)
+#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)
+
+#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--)
+#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--)
+#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
+
+/**
+ * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware
+ * @hw: pointer to our hw struct
+ * @pa: pointer to physical address
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \
+{ \
+ u32 val1, val2, val3; \
+ val1 = (u32)(I40E_HI_DWORD(pa)); \
+ val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \
+ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
+ (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
+ val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
+ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
+ wr32((hw), I40E_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \
+{ \
+ u32 val2, val3; \
+ val2 = (I40E_HMC_MAX_BP_COUNT << \
+ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
+ val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
+ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
+ wr32((hw), I40E_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_idx: segment descriptor index
+ * @pd_idx: page descriptor index
+ **/
+#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
+ wr32((hw), I40E_PFHMC_PDINV, \
+ (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
+ ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+/**
+ * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @type: type of HMC resources we're searching
+ * @index: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @sd_idx: pointer to return index of the segment descriptor in question
+ * @sd_limit: pointer to return the maximum number of segment descriptors
+ *
+ * This function calculates the segment descriptor index and index limit
+ * for the resource defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\
+{ \
+ u64 fpm_addr, fpm_limit; \
+ fpm_addr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (index); \
+ fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\
+ *(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \
+ *(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(sd_limit) += 1; \
+}
+
+/**
+ * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @type: HMC resource type we're examining
+ * @idx: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @pd_index: pointer to return page descriptor index
+ * @pd_limit: pointer to return page descriptor index limit
+ *
+ * Calculates the page descriptor index and index limit for the resource
+ * defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\
+{ \
+ u64 fpm_adr, fpm_limit; \
+ fpm_adr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (idx); \
+ fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \
+ *(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \
+ *(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(pd_limit) += 1; \
+}
+enum i40e_status_code i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz);
+
+enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index);
+enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx);
+enum i40e_status_code i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+enum i40e_status_code i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+enum i40e_status_code i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+enum i40e_status_code i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+
+#endif /* _I40E_HMC_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_lan_hmc.c b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_lan_hmc.c
new file mode 100755
index 00000000..b08534bf
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_lan_hmc.c
@@ -0,0 +1,1417 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_type.h"
+#include "i40e_hmc.h"
+#include "i40e_lan_hmc.h"
+#include "i40e_prototype.h"
+
+/* lan specific interface functions */
+
+/**
+ * i40e_align_l2obj_base - aligns base object pointer to 512 bytes
+ * @offset: base address offset needing alignment
+ *
+ * Aligns the layer 2 function private memory so it's 512-byte aligned.
+ **/
+STATIC u64 i40e_align_l2obj_base(u64 offset)
+{
+ u64 aligned_offset = offset;
+
+ if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0)
+ aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT -
+ (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT));
+
+ return aligned_offset;
+}
+
+/**
+ * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size
+ * @txq_num: number of Tx queues needing backing context
+ * @rxq_num: number of Rx queues needing backing context
+ * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
+ * @fcoe_filt_num: number of FCoE filters needing backing context
+ *
+ * Calculates the maximum amount of memory for the function required, based
+ * on the number of resources it must provide context for.
+ **/
+u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
+ u32 fcoe_cntx_num, u32 fcoe_filt_num)
+{
+ u64 fpm_size = 0;
+
+ fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ;
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ);
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX);
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT);
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ return fpm_size;
+}
+
+/**
+ * i40e_init_lan_hmc - initialize i40e_hmc_info struct
+ * @hw: pointer to the HW structure
+ * @txq_num: number of Tx queues needing backing context
+ * @rxq_num: number of Rx queues needing backing context
+ * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
+ * @fcoe_filt_num: number of FCoE filters needing backing context
+ *
+ * This function will be called once per physical function initialization.
+ * It will fill out the i40e_hmc_obj_info structure for LAN objects based on
+ * the driver's provided input, as well as information from the HMC itself
+ * loaded from NVRAM.
+ *
+ * Assumptions:
+ * - HMC Resource Profile has been selected before calling this function.
+ **/
+enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num)
+{
+ struct i40e_hmc_obj_info *obj, *full_obj;
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u64 l2fpm_size;
+ u32 size_exp;
+
+ hw->hmc.signature = I40E_HMC_INFO_SIGNATURE;
+ hw->hmc.hmc_fn_id = hw->pf_id;
+
+ /* allocate memory for hmc_obj */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem,
+ sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX);
+ if (ret_code)
+ goto init_lan_hmc_out;
+ hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *)
+ hw->hmc.hmc_obj_virt_mem.va;
+
+ /* The full object will be used to create the LAN HMC SD */
+ full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL];
+ full_obj->max_cnt = 0;
+ full_obj->cnt = 0;
+ full_obj->base = 0;
+ full_obj->size = 0;
+
+ /* Tx queue context information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
+ obj->cnt = txq_num;
+ obj->base = 0;
+ size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
+ obj->size = (u64)1 << size_exp;
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (txq_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ DEBUGOUT3("i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ txq_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ /* Rx queue context information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
+ obj->cnt = rxq_num;
+ obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base +
+ (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt *
+ hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
+ obj->base = i40e_align_l2obj_base(obj->base);
+ size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
+ obj->size = (u64)1 << size_exp;
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (rxq_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ DEBUGOUT3("i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ rxq_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ /* FCoE context information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX);
+ obj->cnt = fcoe_cntx_num;
+ obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base +
+ (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt *
+ hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
+ obj->base = i40e_align_l2obj_base(obj->base);
+ size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
+ obj->size = (u64)1 << size_exp;
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (fcoe_cntx_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ DEBUGOUT3("i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ fcoe_cntx_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ /* FCoE filter information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX);
+ obj->cnt = fcoe_filt_num;
+ obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base +
+ (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt *
+ hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
+ obj->base = i40e_align_l2obj_base(obj->base);
+ size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
+ obj->size = (u64)1 << size_exp;
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (fcoe_filt_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ DEBUGOUT3("i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ fcoe_filt_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ hw->hmc.first_sd_index = 0;
+ hw->hmc.sd_table.ref_cnt = 0;
+ l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num,
+ fcoe_filt_num);
+ if (NULL == hw->hmc.sd_table.sd_entry) {
+ hw->hmc.sd_table.sd_cnt = (u32)
+ (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) /
+ I40E_HMC_DIRECT_BP_SIZE;
+
+ /* allocate the sd_entry members in the sd_table */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr,
+ (sizeof(struct i40e_hmc_sd_entry) *
+ hw->hmc.sd_table.sd_cnt));
+ if (ret_code)
+ goto init_lan_hmc_out;
+ hw->hmc.sd_table.sd_entry =
+ (struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
+ }
+ /* store in the LAN full object for later */
+ full_obj->size = l2fpm_size;
+
+init_lan_hmc_out:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_pd_page - Remove a page from the page descriptor table
+ * @hw: pointer to the HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ *
+ * This function:
+ * 1. Marks the entry in pd table (for paged address mode) invalid
+ * 2. write to register PMPDINV to invalidate the backing page in FV cache
+ * 3. Decrement the ref count for pd_entry
+ * assumptions:
+ * 1. caller can deallocate the memory used by pd after this function
+ * returns.
+ **/
+STATIC enum i40e_status_code i40e_remove_pd_page(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (i40e_prep_remove_pd_page(hmc_info, idx) == I40E_SUCCESS)
+ ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true);
+
+ return ret_code;
+}
+
+/**
+ * i40e_remove_sd_bp - remove a backing page from a segment descriptor
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ *
+ * This function:
+ * 1. Marks the entry in sd table (for direct address mode) invalid
+ * 2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set
+ * to 0) and PMSDDATAHIGH to invalidate the sd page
+ * 3. Decrement the ref count for the sd_entry
+ * assumptions:
+ * 1. caller can deallocate the memory used by backing storage after this
+ * function returns.
+ **/
+STATIC enum i40e_status_code i40e_remove_sd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (i40e_prep_remove_sd_bp(hmc_info, idx) == I40E_SUCCESS)
+ ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true);
+
+ return ret_code;
+}
+
+/**
+ * i40e_create_lan_hmc_object - allocate backing store for hmc objects
+ * @hw: pointer to the HW structure
+ * @info: pointer to i40e_hmc_create_obj_info struct
+ *
+ * This will allocate memory for PDs and backing pages and populate
+ * the sd and pd entries.
+ **/
+enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_create_obj_info *info)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_sd_entry *sd_entry;
+ u32 pd_idx1 = 0, pd_lmt1 = 0;
+ u32 pd_idx = 0, pd_lmt = 0;
+ bool pd_error = false;
+ u32 sd_idx, sd_lmt;
+ u64 sd_size;
+ u32 i, j;
+
+ if (NULL == info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_create_lan_hmc_object: bad info ptr\n");
+ goto exit;
+ }
+ if (NULL == info->hmc_info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_create_lan_hmc_object: bad hmc_info ptr\n");
+ goto exit;
+ }
+ if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_create_lan_hmc_object: bad signature\n");
+ goto exit;
+ }
+
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+ DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+
+ /* find sd index and limit */
+ I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count,
+ &sd_idx, &sd_lmt);
+ if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+ sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_SD_INDEX;
+ goto exit;
+ }
+ /* find pd index */
+ I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &pd_idx,
+ &pd_lmt);
+
+ /* This is to cover for cases where you may not want to have an SD with
+ * the full 2M memory but something smaller. By not filling out any
+ * size, the function will default the SD size to be 2M.
+ */
+ if (info->direct_mode_sz == 0)
+ sd_size = I40E_HMC_DIRECT_BP_SIZE;
+ else
+ sd_size = info->direct_mode_sz;
+
+ /* check if all the sds are valid. If not, allocate a page and
+ * initialize it.
+ */
+ for (j = sd_idx; j < sd_lmt; j++) {
+ /* update the sd table entry */
+ ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j,
+ info->entry_type,
+ sd_size);
+ if (I40E_SUCCESS != ret_code)
+ goto exit_sd_error;
+ sd_entry = &info->hmc_info->sd_table.sd_entry[j];
+ if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
+ /* check if all the pds in this sd are valid. If not,
+ * allocate a page and initialize it.
+ */
+
+ /* find pd_idx and pd_lmt in this sd */
+ pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT));
+ pd_lmt1 = min(pd_lmt,
+ ((j + 1) * I40E_HMC_MAX_BP_COUNT));
+ for (i = pd_idx1; i < pd_lmt1; i++) {
+ /* update the pd table entry */
+ ret_code = i40e_add_pd_table_entry(hw,
+ info->hmc_info,
+ i);
+ if (I40E_SUCCESS != ret_code) {
+ pd_error = true;
+ break;
+ }
+ }
+ if (pd_error) {
+ /* remove the backing pages from pd_idx1 to i */
+ while (i && (i > pd_idx1)) {
+ i40e_remove_pd_bp(hw, info->hmc_info,
+ (i - 1));
+ i--;
+ }
+ }
+ }
+ if (!sd_entry->valid) {
+ sd_entry->valid = true;
+ switch (sd_entry->entry_type) {
+ case I40E_SD_TYPE_PAGED:
+ I40E_SET_PF_SD_ENTRY(hw,
+ sd_entry->u.pd_table.pd_page_addr.pa,
+ j, sd_entry->entry_type);
+ break;
+ case I40E_SD_TYPE_DIRECT:
+ I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa,
+ j, sd_entry->entry_type);
+ break;
+ default:
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ goto exit;
+ }
+ }
+ }
+ goto exit;
+
+exit_sd_error:
+ /* cleanup for sd entries from j to sd_idx */
+ while (j && (j > sd_idx)) {
+ sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
+ switch (sd_entry->entry_type) {
+ case I40E_SD_TYPE_PAGED:
+ pd_idx1 = max(pd_idx,
+ ((j - 1) * I40E_HMC_MAX_BP_COUNT));
+ pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
+ for (i = pd_idx1; i < pd_lmt1; i++) {
+ i40e_remove_pd_bp(hw, info->hmc_info, i);
+ }
+ i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
+ break;
+ case I40E_SD_TYPE_DIRECT:
+ i40e_remove_sd_bp(hw, info->hmc_info, (j - 1));
+ break;
+ default:
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ break;
+ }
+ j--;
+ }
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_configure_lan_hmc - prepare the HMC backing store
+ * @hw: pointer to the hw structure
+ * @model: the model for the layout of the SD/PD tables
+ *
+ * - This function will be called once per physical function initialization.
+ * - This function will be called after i40e_init_lan_hmc() and before
+ * any LAN/FCoE HMC objects can be created.
+ **/
+enum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model)
+{
+ struct i40e_hmc_lan_create_obj_info info;
+ u8 hmc_fn_id = hw->hmc.hmc_fn_id;
+ struct i40e_hmc_obj_info *obj;
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ /* Initialize part of the create object info struct */
+ info.hmc_info = &hw->hmc;
+ info.rsrc_type = I40E_HMC_LAN_FULL;
+ info.start_idx = 0;
+ info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size;
+
+ /* Build the SD entry for the LAN objects */
+ switch (model) {
+ case I40E_HMC_MODEL_DIRECT_PREFERRED:
+ case I40E_HMC_MODEL_DIRECT_ONLY:
+ info.entry_type = I40E_SD_TYPE_DIRECT;
+ /* Make one big object, a single SD */
+ info.count = 1;
+ ret_code = i40e_create_lan_hmc_object(hw, &info);
+ if ((ret_code != I40E_SUCCESS) && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
+ goto try_type_paged;
+ else if (ret_code != I40E_SUCCESS)
+ goto configure_lan_hmc_out;
+ /* else clause falls through the break */
+ break;
+ case I40E_HMC_MODEL_PAGED_ONLY:
+try_type_paged:
+ info.entry_type = I40E_SD_TYPE_PAGED;
+ /* Make one big object in the PD table */
+ info.count = 1;
+ ret_code = i40e_create_lan_hmc_object(hw, &info);
+ if (ret_code != I40E_SUCCESS)
+ goto configure_lan_hmc_out;
+ break;
+ default:
+ /* unsupported type */
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ DEBUGOUT1("i40e_configure_lan_hmc: Unknown SD type: %d\n",
+ ret_code);
+ goto configure_lan_hmc_out;
+ }
+
+ /* Configure and program the FPM registers so objects can be created */
+
+ /* Tx contexts */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
+ wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt);
+
+ /* Rx contexts */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
+ wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt);
+
+ /* FCoE contexts */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
+ wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt);
+
+ /* FCoE filters */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
+ wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt);
+
+configure_lan_hmc_out:
+ return ret_code;
+}
+
+/**
+ * i40e_delete_hmc_object - remove hmc objects
+ * @hw: pointer to the HW structure
+ * @info: pointer to i40e_hmc_delete_obj_info struct
+ *
+ * This will de-populate the SDs and PDs. It frees
+ * the memory for PDS and backing storage. After this function is returned,
+ * caller should deallocate memory allocated previously for
+ * book-keeping information about PDs and backing storage.
+ **/
+enum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_delete_obj_info *info)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_pd_table *pd_table;
+ u32 pd_idx, pd_lmt, rel_pd_idx;
+ u32 sd_idx, sd_lmt;
+ u32 i, j;
+
+ if (NULL == info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_delete_hmc_object: bad info ptr\n");
+ goto exit;
+ }
+ if (NULL == info->hmc_info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_delete_hmc_object: bad info->hmc_info ptr\n");
+ goto exit;
+ }
+ if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->signature\n");
+ goto exit;
+ }
+
+ if (NULL == info->hmc_info->sd_table.sd_entry) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_delete_hmc_object: bad sd_entry\n");
+ goto exit;
+ }
+
+ if (NULL == info->hmc_info->hmc_obj) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->hmc_obj\n");
+ goto exit;
+ }
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+ DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+
+ I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &pd_idx,
+ &pd_lmt);
+
+ for (j = pd_idx; j < pd_lmt; j++) {
+ sd_idx = j / I40E_HMC_PD_CNT_IN_SD;
+
+ if (I40E_SD_TYPE_PAGED !=
+ info->hmc_info->sd_table.sd_entry[sd_idx].entry_type)
+ continue;
+
+ rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD;
+
+ pd_table =
+ &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ if (pd_table->pd_entry[rel_pd_idx].valid) {
+ ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
+ if (I40E_SUCCESS != ret_code)
+ goto exit;
+ }
+ }
+
+ /* find sd index and limit */
+ I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count,
+ &sd_idx, &sd_lmt);
+ if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+ sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_SD_INDEX;
+ goto exit;
+ }
+
+ for (i = sd_idx; i < sd_lmt; i++) {
+ if (!info->hmc_info->sd_table.sd_entry[i].valid)
+ continue;
+ switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
+ case I40E_SD_TYPE_DIRECT:
+ ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i);
+ if (I40E_SUCCESS != ret_code)
+ goto exit;
+ break;
+ case I40E_SD_TYPE_PAGED:
+ ret_code = i40e_remove_pd_page(hw, info->hmc_info, i);
+ if (I40E_SUCCESS != ret_code)
+ goto exit;
+ break;
+ default:
+ break;
+ }
+ }
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory
+ * @hw: pointer to the hw structure
+ *
+ * This must be called by drivers as they are shutting down and being
+ * removed from the OS.
+ **/
+enum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw)
+{
+ struct i40e_hmc_lan_delete_obj_info info;
+ enum i40e_status_code ret_code;
+
+ info.hmc_info = &hw->hmc;
+ info.rsrc_type = I40E_HMC_LAN_FULL;
+ info.start_idx = 0;
+ info.count = 1;
+
+ /* delete the object */
+ ret_code = i40e_delete_lan_hmc_object(hw, &info);
+
+ /* free the SD table entry for LAN */
+ i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr);
+ hw->hmc.sd_table.sd_cnt = 0;
+ hw->hmc.sd_table.sd_entry = NULL;
+
+ /* free memory used for hmc_obj */
+ i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
+ hw->hmc.hmc_obj = NULL;
+
+ return ret_code;
+}
+
+#define I40E_HMC_STORE(_struct, _ele) \
+ offsetof(struct _struct, _ele), \
+ FIELD_SIZEOF(struct _struct, _ele)
+
+struct i40e_context_ele {
+ u16 offset;
+ u16 size_of;
+ u16 width;
+ u16 lsb;
+};
+
+/* LAN Tx Queue Context */
+static struct i40e_context_ele i40e_hmc_txq_ce_info[] = {
+ /* Field Width LSB */
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, head), 13, 0 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, new_context), 1, 30 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, base), 57, 32 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena), 1, 89 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena), 1, 90 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena), 1, 91 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena), 1, 92 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid), 8, 96 },
+/* line 1 */
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb), 13, 0 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena), 1, 32 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, qlen), 13, 33 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena), 1, 46 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena), 1, 47 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena), 1, 48 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr), 64, 64 + 128 },
+/* line 7 */
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, crc), 32, 0 + (7 * 128) },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist), 10, 84 + (7 * 128) },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act), 1, 94 + (7 * 128) },
+ { 0 }
+};
+
+/* LAN Rx Queue Context */
+static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
+ /* Field Width LSB */
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, head), 13, 0 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid), 8, 13 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, base), 57, 32 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen), 13, 89 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff), 7, 102 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff), 5, 109 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype), 2, 114 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize), 1, 116 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip), 1, 117 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena), 1, 118 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel), 1, 119 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0), 4, 120 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1), 2, 124 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv), 1, 127 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax), 14, 174 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1, 193 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1, 194 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena), 1, 201 },
+ { 0 }
+};
+
+/**
+ * i40e_write_byte - replace HMC context byte
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_byte(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *src)
+{
+ u8 src_byte, dest_byte, mask;
+ u8 *from, *dest;
+ u16 shift_width;
+
+ /* copy from the next struct field */
+ from = src + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = ((u8)1 << ce_info->width) - 1;
+
+ src_byte = *from;
+ src_byte &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_byte <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&dest_byte, dest, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
+
+ dest_byte &= ~mask; /* get the bits not changing */
+ dest_byte |= src_byte; /* add in the new bits */
+
+ /* put it all back */
+ i40e_memcpy(dest, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_write_word - replace HMC context word
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_word(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *src)
+{
+ u16 src_word, mask;
+ u8 *from, *dest;
+ u16 shift_width;
+ __le16 dest_word;
+
+ /* copy from the next struct field */
+ from = src + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = ((u16)1 << ce_info->width) - 1;
+
+ /* don't swizzle the bits until after the mask because the mask bits
+ * will be in a different bit position on big endian machines
+ */
+ src_word = *(u16 *)from;
+ src_word &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_word <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&dest_word, dest, sizeof(dest_word), I40E_DMA_TO_NONDMA);
+
+ dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
+ dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
+
+ /* put it all back */
+ i40e_memcpy(dest, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_write_dword - replace HMC context dword
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_dword(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *src)
+{
+ u32 src_dword, mask;
+ u8 *from, *dest;
+ u16 shift_width;
+ __le32 dest_dword;
+
+ /* copy from the next struct field */
+ from = src + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 32 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 5 bits so the shift will do nothing
+ */
+ if (ce_info->width < 32)
+ mask = ((u32)1 << ce_info->width) - 1;
+ else
+ mask = 0xFFFFFFFF;
+
+ /* don't swizzle the bits until after the mask because the mask bits
+ * will be in a different bit position on big endian machines
+ */
+ src_dword = *(u32 *)from;
+ src_dword &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_dword <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&dest_dword, dest, sizeof(dest_dword), I40E_DMA_TO_NONDMA);
+
+ dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
+ dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
+
+ /* put it all back */
+ i40e_memcpy(dest, &dest_dword, sizeof(dest_dword), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_write_qword - replace HMC context qword
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_qword(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *src)
+{
+ u64 src_qword, mask;
+ u8 *from, *dest;
+ u16 shift_width;
+ __le64 dest_qword;
+
+ /* copy from the next struct field */
+ from = src + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 64 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 6 bits so the shift will do nothing
+ */
+ if (ce_info->width < 64)
+ mask = ((u64)1 << ce_info->width) - 1;
+ else
+ mask = 0xFFFFFFFFFFFFFFFFUL;
+
+ /* don't swizzle the bits until after the mask because the mask bits
+ * will be in a different bit position on big endian machines
+ */
+ src_qword = *(u64 *)from;
+ src_qword &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_qword <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&dest_qword, dest, sizeof(dest_qword), I40E_DMA_TO_NONDMA);
+
+ dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
+ dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
+
+ /* put it all back */
+ i40e_memcpy(dest, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_read_byte - read HMC context byte into struct
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static void i40e_read_byte(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ u8 dest_byte, mask;
+ u8 *src, *target;
+ u16 shift_width;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = ((u8)1 << ce_info->width) - 1;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+
+ /* get the current bits from the src bit string */
+ src = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&dest_byte, src, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
+
+ dest_byte &= ~(mask);
+
+ dest_byte >>= shift_width;
+
+ /* get the address from the struct field */
+ target = dest + ce_info->offset;
+
+ /* put it back in the struct */
+ i40e_memcpy(target, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_read_word - read HMC context word into struct
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static void i40e_read_word(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ u16 dest_word, mask;
+ u8 *src, *target;
+ u16 shift_width;
+ __le16 src_word;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = ((u16)1 << ce_info->width) - 1;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+
+ /* get the current bits from the src bit string */
+ src = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&src_word, src, sizeof(src_word), I40E_DMA_TO_NONDMA);
+
+ /* the data in the memory is stored as little endian so mask it
+ * correctly
+ */
+ src_word &= ~(CPU_TO_LE16(mask));
+
+ /* get the data back into host order before shifting */
+ dest_word = LE16_TO_CPU(src_word);
+
+ dest_word >>= shift_width;
+
+ /* get the address from the struct field */
+ target = dest + ce_info->offset;
+
+ /* put it back in the struct */
+ i40e_memcpy(target, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_read_dword - read HMC context dword into struct
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static void i40e_read_dword(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ u32 dest_dword, mask;
+ u8 *src, *target;
+ u16 shift_width;
+ __le32 src_dword;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 32 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 5 bits so the shift will do nothing
+ */
+ if (ce_info->width < 32)
+ mask = ((u32)1 << ce_info->width) - 1;
+ else
+ mask = 0xFFFFFFFF;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+
+ /* get the current bits from the src bit string */
+ src = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&src_dword, src, sizeof(src_dword), I40E_DMA_TO_NONDMA);
+
+ /* the data in the memory is stored as little endian so mask it
+ * correctly
+ */
+ src_dword &= ~(CPU_TO_LE32(mask));
+
+ /* get the data back into host order before shifting */
+ dest_dword = LE32_TO_CPU(src_dword);
+
+ dest_dword >>= shift_width;
+
+ /* get the address from the struct field */
+ target = dest + ce_info->offset;
+
+ /* put it back in the struct */
+ i40e_memcpy(target, &dest_dword, sizeof(dest_dword),
+ I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_read_qword - read HMC context qword into struct
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static void i40e_read_qword(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ u64 dest_qword, mask;
+ u8 *src, *target;
+ u16 shift_width;
+ __le64 src_qword;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 64 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 6 bits so the shift will do nothing
+ */
+ if (ce_info->width < 64)
+ mask = ((u64)1 << ce_info->width) - 1;
+ else
+ mask = 0xFFFFFFFFFFFFFFFFUL;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+
+ /* get the current bits from the src bit string */
+ src = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&src_qword, src, sizeof(src_qword), I40E_DMA_TO_NONDMA);
+
+ /* the data in the memory is stored as little endian so mask it
+ * correctly
+ */
+ src_qword &= ~(CPU_TO_LE64(mask));
+
+ /* get the data back into host order before shifting */
+ dest_qword = LE64_TO_CPU(src_qword);
+
+ dest_qword >>= shift_width;
+
+ /* get the address from the struct field */
+ target = dest + ce_info->offset;
+
+ /* put it back in the struct */
+ i40e_memcpy(target, &dest_qword, sizeof(dest_qword),
+ I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_get_hmc_context - extract HMC context bits
+ * @context_bytes: pointer to the context bit array
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static enum i40e_status_code i40e_get_hmc_context(u8 *context_bytes,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ int f;
+
+ for (f = 0; ce_info[f].width != 0; f++) {
+ switch (ce_info[f].size_of) {
+ case 1:
+ i40e_read_byte(context_bytes, &ce_info[f], dest);
+ break;
+ case 2:
+ i40e_read_word(context_bytes, &ce_info[f], dest);
+ break;
+ case 4:
+ i40e_read_dword(context_bytes, &ce_info[f], dest);
+ break;
+ case 8:
+ i40e_read_qword(context_bytes, &ce_info[f], dest);
+ break;
+ default:
+ /* nothing to do, just keep going */
+ break;
+ }
+ }
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_clear_hmc_context - zero out the HMC context bits
+ * @hw: the hardware struct
+ * @context_bytes: pointer to the context bit array (DMA memory)
+ * @hmc_type: the type of HMC resource
+ **/
+static enum i40e_status_code i40e_clear_hmc_context(struct i40e_hw *hw,
+ u8 *context_bytes,
+ enum i40e_hmc_lan_rsrc_type hmc_type)
+{
+ /* clean the bit array */
+ i40e_memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size,
+ I40E_DMA_MEM);
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_set_hmc_context - replace HMC context bits
+ * @context_bytes: pointer to the context bit array
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static enum i40e_status_code i40e_set_hmc_context(u8 *context_bytes,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ int f;
+
+ for (f = 0; ce_info[f].width != 0; f++) {
+
+ /* we have to deal with each element of the HMC using the
+ * correct size so that we are correct regardless of the
+ * endianness of the machine
+ */
+ switch (ce_info[f].size_of) {
+ case 1:
+ i40e_write_byte(context_bytes, &ce_info[f], dest);
+ break;
+ case 2:
+ i40e_write_word(context_bytes, &ce_info[f], dest);
+ break;
+ case 4:
+ i40e_write_dword(context_bytes, &ce_info[f], dest);
+ break;
+ case 8:
+ i40e_write_qword(context_bytes, &ce_info[f], dest);
+ break;
+ }
+ }
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_hmc_get_object_va - retrieves an object's virtual address
+ * @hmc_info: pointer to i40e_hmc_info struct
+ * @object_base: pointer to u64 to get the va
+ * @rsrc_type: the hmc resource type
+ * @obj_idx: hmc object index
+ *
+ * This function retrieves the object's virtual address from the object
+ * base pointer. This function is used for LAN Queue contexts.
+ **/
+STATIC
+enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hmc_info *hmc_info,
+ u8 **object_base,
+ enum i40e_hmc_lan_rsrc_type rsrc_type,
+ u32 obj_idx)
+{
+ u32 obj_offset_in_sd, obj_offset_in_pd;
+ struct i40e_hmc_sd_entry *sd_entry;
+ struct i40e_hmc_pd_entry *pd_entry;
+ u32 pd_idx, pd_lmt, rel_pd_idx;
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u64 obj_offset_in_fpm;
+ u32 sd_idx, sd_lmt;
+
+ if (NULL == hmc_info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info ptr\n");
+ goto exit;
+ }
+ if (NULL == hmc_info->hmc_obj) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
+ goto exit;
+ }
+ if (NULL == object_base) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_hmc_get_object_va: bad object_base ptr\n");
+ goto exit;
+ }
+ if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->signature\n");
+ goto exit;
+ }
+ if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) {
+ DEBUGOUT1("i40e_hmc_get_object_va: returns error %d\n",
+ ret_code);
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+ goto exit;
+ }
+ /* find sd index and limit */
+ I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
+ &sd_idx, &sd_lmt);
+
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
+ obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base +
+ hmc_info->hmc_obj[rsrc_type].size * obj_idx;
+
+ if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
+ I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
+ &pd_idx, &pd_lmt);
+ rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD;
+ pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx];
+ obj_offset_in_pd = (u32)(obj_offset_in_fpm %
+ I40E_HMC_PAGED_BP_SIZE);
+ *object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd;
+ } else {
+ obj_offset_in_sd = (u32)(obj_offset_in_fpm %
+ I40E_HMC_DIRECT_BP_SIZE);
+ *object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd;
+ }
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_get_lan_tx_queue_context - return the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ * @s: the struct to be filled
+ **/
+enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s)
+{
+ enum i40e_status_code err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
+ I40E_HMC_LAN_TX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_get_hmc_context(context_bytes,
+ i40e_hmc_txq_ce_info, (u8 *)s);
+}
+
+/**
+ * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ **/
+enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue)
+{
+ enum i40e_status_code err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
+ I40E_HMC_LAN_TX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX);
+}
+
+/**
+ * i40e_set_lan_tx_queue_context - set the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ * @s: the struct to be filled
+ **/
+enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s)
+{
+ enum i40e_status_code err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
+ I40E_HMC_LAN_TX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_set_hmc_context(context_bytes,
+ i40e_hmc_txq_ce_info, (u8 *)s);
+}
+
+/**
+ * i40e_get_lan_rx_queue_context - return the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ * @s: the struct to be filled
+ **/
+enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s)
+{
+ enum i40e_status_code err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
+ I40E_HMC_LAN_RX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_get_hmc_context(context_bytes,
+ i40e_hmc_rxq_ce_info, (u8 *)s);
+}
+
+/**
+ * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ **/
+enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue)
+{
+ enum i40e_status_code err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
+ I40E_HMC_LAN_RX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX);
+}
+
+/**
+ * i40e_set_lan_rx_queue_context - set the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ * @s: the struct to be filled
+ **/
+enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s)
+{
+ enum i40e_status_code err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
+ I40E_HMC_LAN_RX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_set_hmc_context(context_bytes,
+ i40e_hmc_rxq_ce_info, (u8 *)s);
+}
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_lan_hmc.h b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_lan_hmc.h
new file mode 100755
index 00000000..70ef65cb
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_lan_hmc.h
@@ -0,0 +1,200 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_LAN_HMC_H_
+#define _I40E_LAN_HMC_H_
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+/* HMC element context information */
+
+/* Rx queue context data
+ *
+ * The sizes of the variables may be larger than needed due to crossing byte
+ * boundaries. If we do not have the width of the variable set to the correct
+ * size then we could end up shifting bits off the top of the variable when the
+ * variable is at the top of a byte and crosses over into the next byte.
+ */
+struct i40e_hmc_obj_rxq {
+ u16 head;
+ u16 cpuid; /* bigger than needed, see above for reason */
+ u64 base;
+ u16 qlen;
+#define I40E_RXQ_CTX_DBUFF_SHIFT 7
+ u16 dbuff; /* bigger than needed, see above for reason */
+#define I40E_RXQ_CTX_HBUFF_SHIFT 6
+ u16 hbuff; /* bigger than needed, see above for reason */
+ u8 dtype;
+ u8 dsize;
+ u8 crcstrip;
+ u8 fc_ena;
+ u8 l2tsel;
+ u8 hsplit_0;
+ u8 hsplit_1;
+ u8 showiv;
+ u32 rxmax; /* bigger than needed, see above for reason */
+ u8 tphrdesc_ena;
+ u8 tphwdesc_ena;
+ u8 tphdata_ena;
+ u8 tphhead_ena;
+ u16 lrxqthresh; /* bigger than needed, see above for reason */
+ u8 prefena; /* NOTE: normally must be set to 1 at init */
+};
+
+/* Tx queue context data
+*
+* The sizes of the variables may be larger than needed due to crossing byte
+* boundaries. If we do not have the width of the variable set to the correct
+* size then we could end up shifting bits off the top of the variable when the
+* variable is at the top of a byte and crosses over into the next byte.
+*/
+struct i40e_hmc_obj_txq {
+ u16 head;
+ u8 new_context;
+ u64 base;
+ u8 fc_ena;
+ u8 timesync_ena;
+ u8 fd_ena;
+ u8 alt_vlan_ena;
+ u16 thead_wb;
+ u8 cpuid;
+ u8 head_wb_ena;
+ u16 qlen;
+ u8 tphrdesc_ena;
+ u8 tphrpacket_ena;
+ u8 tphwdesc_ena;
+ u64 head_wb_addr;
+ u32 crc;
+ u16 rdylist;
+ u8 rdylist_act;
+};
+
+/* for hsplit_0 field of Rx HMC context */
+enum i40e_hmc_obj_rx_hsplit_0 {
+ I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8,
+};
+
+/* fcoe_cntx and fcoe_filt are for debugging purpose only */
+struct i40e_hmc_obj_fcoe_cntx {
+ u32 rsv[32];
+};
+
+struct i40e_hmc_obj_fcoe_filt {
+ u32 rsv[8];
+};
+
+/* Context sizes for LAN objects */
+enum i40e_hmc_lan_object_size {
+ I40E_HMC_LAN_OBJ_SZ_8 = 0x3,
+ I40E_HMC_LAN_OBJ_SZ_16 = 0x4,
+ I40E_HMC_LAN_OBJ_SZ_32 = 0x5,
+ I40E_HMC_LAN_OBJ_SZ_64 = 0x6,
+ I40E_HMC_LAN_OBJ_SZ_128 = 0x7,
+ I40E_HMC_LAN_OBJ_SZ_256 = 0x8,
+ I40E_HMC_LAN_OBJ_SZ_512 = 0x9,
+};
+
+#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
+#define I40E_HMC_OBJ_SIZE_TXQ 128
+#define I40E_HMC_OBJ_SIZE_RXQ 32
+#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 64
+#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64
+
+enum i40e_hmc_lan_rsrc_type {
+ I40E_HMC_LAN_FULL = 0,
+ I40E_HMC_LAN_TX = 1,
+ I40E_HMC_LAN_RX = 2,
+ I40E_HMC_FCOE_CTX = 3,
+ I40E_HMC_FCOE_FILT = 4,
+ I40E_HMC_LAN_MAX = 5
+};
+
+enum i40e_hmc_model {
+ I40E_HMC_MODEL_DIRECT_PREFERRED = 0,
+ I40E_HMC_MODEL_DIRECT_ONLY = 1,
+ I40E_HMC_MODEL_PAGED_ONLY = 2,
+ I40E_HMC_MODEL_UNKNOWN,
+};
+
+struct i40e_hmc_lan_create_obj_info {
+ struct i40e_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+ enum i40e_sd_entry_type entry_type;
+ u64 direct_mode_sz;
+};
+
+struct i40e_hmc_lan_delete_obj_info {
+ struct i40e_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+};
+
+enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num);
+enum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model);
+enum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw);
+
+u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
+ u32 fcoe_cntx_num, u32 fcoe_filt_num);
+enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s);
+enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s);
+enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s);
+enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s);
+enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_create_obj_info *info);
+enum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_delete_obj_info *info);
+
+#endif /* _I40E_LAN_HMC_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_nvm.c b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_nvm.c
new file mode 100755
index 00000000..c62f5eb7
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_nvm.c
@@ -0,0 +1,940 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "i40e_prototype.h"
+
+/**
+ * i40e_init_nvm_ops - Initialize NVM function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setup the function pointers and the NVM info structure. Should be called
+ * once per NVM initialization, e.g. inside the i40e_init_shared_code().
+ * Please notice that the NVM term is used here (& in all methods covered
+ * in this file) as an equivalent of the FLASH part mapped into the SR.
+ * We are accessing FLASH always thru the Shadow RAM.
+ **/
+enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
+{
+ struct i40e_nvm_info *nvm = &hw->nvm;
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u32 fla, gens;
+ u8 sr_size;
+
+ DEBUGFUNC("i40e_init_nvm");
+
+ /* The SR size is stored regardless of the nvm programming mode
+ * as the blank mode may be used in the factory line.
+ */
+ gens = rd32(hw, I40E_GLNVM_GENS);
+ sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
+ I40E_GLNVM_GENS_SR_SIZE_SHIFT);
+ /* Switching to words (sr_size contains power of 2KB) */
+ nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB;
+
+ /* Check if we are in the normal or blank NVM programming mode */
+ fla = rd32(hw, I40E_GLNVM_FLA);
+ if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
+ /* Max NVM timeout */
+ nvm->timeout = I40E_MAX_NVM_TIMEOUT;
+ nvm->blank_nvm_mode = false;
+ } else { /* Blank programming mode */
+ nvm->blank_nvm_mode = true;
+ ret_code = I40E_ERR_NVM_BLANK_MODE;
+ DEBUGOUT("NVM init error: unsupported blank mode.\n");
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
+ * @hw: pointer to the HW structure
+ * @access: NVM access type (read or write)
+ *
+ * This function will request NVM ownership for reading
+ * via the proper Admin Command.
+ **/
+enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
+ enum i40e_aq_resource_access_type access)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u64 gtime, timeout;
+ u64 time = 0;
+
+ DEBUGFUNC("i40e_acquire_nvm");
+
+ if (hw->nvm.blank_nvm_mode)
+ goto i40e_i40e_acquire_nvm_exit;
+
+ ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
+ 0, &time, NULL);
+ /* Reading the Global Device Timer */
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+
+ /* Store the timeout */
+ hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time) + gtime;
+
+ if (ret_code != I40E_SUCCESS) {
+ /* Set the polling timeout */
+ if (time > I40E_MAX_NVM_TIMEOUT)
+ timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT)
+ + gtime;
+ else
+ timeout = hw->nvm.hw_semaphore_timeout;
+ /* Poll until the current NVM owner timeouts */
+ while (gtime < timeout) {
+ i40e_msec_delay(10);
+ ret_code = i40e_aq_request_resource(hw,
+ I40E_NVM_RESOURCE_ID,
+ access, 0, &time,
+ NULL);
+ if (ret_code == I40E_SUCCESS) {
+ hw->nvm.hw_semaphore_timeout =
+ I40E_MS_TO_GTIME(time) + gtime;
+ break;
+ }
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+ }
+ if (ret_code != I40E_SUCCESS) {
+ hw->nvm.hw_semaphore_timeout = 0;
+ hw->nvm.hw_semaphore_wait =
+ I40E_MS_TO_GTIME(time) + gtime;
+ DEBUGOUT1("NVM acquire timed out, wait %llu ms before trying again.\n",
+ time);
+ }
+ }
+
+i40e_i40e_acquire_nvm_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_release_nvm - Generic request for releasing the NVM ownership
+ * @hw: pointer to the HW structure
+ *
+ * This function will release NVM resource via the proper Admin Command.
+ **/
+void i40e_release_nvm(struct i40e_hw *hw)
+{
+ DEBUGFUNC("i40e_release_nvm");
+
+ if (!hw->nvm.blank_nvm_mode)
+ i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+}
+
+/**
+ * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
+ * @hw: pointer to the HW structure
+ *
+ * Polls the SRCTL Shadow RAM register done bit.
+ **/
+static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
+ u32 srctl, wait_cnt;
+
+ DEBUGFUNC("i40e_poll_sr_srctl_done_bit");
+
+ /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
+ for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
+ srctl = rd32(hw, I40E_GLNVM_SRCTL);
+ if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
+ ret_code = I40E_SUCCESS;
+ break;
+ }
+ i40e_usec_delay(5);
+ }
+ if (ret_code == I40E_ERR_TIMEOUT)
+ DEBUGOUT("Done bit in GLNVM_SRCTL not set");
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_word - Reads Shadow RAM
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data)
+{
+ enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
+ u32 sr_reg;
+
+ DEBUGFUNC("i40e_read_nvm_srctl");
+
+ if (offset >= hw->nvm.sr_size) {
+ DEBUGOUT("NVM read error: Offset beyond Shadow RAM limit.\n");
+ ret_code = I40E_ERR_PARAM;
+ goto read_nvm_exit;
+ }
+
+ /* Poll the done bit first */
+ ret_code = i40e_poll_sr_srctl_done_bit(hw);
+ if (ret_code == I40E_SUCCESS) {
+ /* Write the address and start reading */
+ sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
+ (1 << I40E_GLNVM_SRCTL_START_SHIFT);
+ wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
+
+ /* Poll I40E_GLNVM_SRCTL until the done bit is set */
+ ret_code = i40e_poll_sr_srctl_done_bit(hw);
+ if (ret_code == I40E_SUCCESS) {
+ sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
+ *data = (u16)((sr_reg &
+ I40E_GLNVM_SRDATA_RDDATA_MASK)
+ >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
+ }
+ }
+ if (ret_code != I40E_SUCCESS)
+ DEBUGOUT1("NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
+ offset);
+
+read_nvm_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_buffer - Reads Shadow RAM buffer
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u16 index, word;
+
+ DEBUGFUNC("i40e_read_nvm_buffer");
+
+ /* Loop thru the selected region */
+ for (word = 0; word < *words; word++) {
+ index = offset + word;
+ ret_code = i40e_read_nvm_word(hw, index, &data[word]);
+ if (ret_code != I40E_SUCCESS)
+ break;
+ }
+
+ /* Update the number of words read from the Shadow RAM */
+ *words = word;
+
+ return ret_code;
+}
+/**
+ * i40e_write_nvm_aq - Writes Shadow RAM.
+ * @hw: pointer to the HW structure.
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in words from module start
+ * @words: number of words to write
+ * @data: buffer with words to write to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ **/
+enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 words, void *data,
+ bool last_command)
+{
+ enum i40e_status_code ret_code = I40E_ERR_NVM;
+
+ DEBUGFUNC("i40e_write_nvm_aq");
+
+ /* Here we are checking the SR limit only for the flat memory model.
+ * We cannot do it for the module-based model, as we did not acquire
+ * the NVM resource yet (we cannot get the module pointer value).
+ * Firmware will check the module-based model.
+ */
+ if ((offset + words) > hw->nvm.sr_size)
+ DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n");
+ else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+ /* We can write only up to 4KB (one sector), in one AQ write */
+ DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n");
+ else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+ != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+ /* A single write cannot spread over two sectors */
+ DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n");
+ else
+ ret_code = i40e_aq_update_nvm(hw, module_pointer,
+ 2 * offset, /*bytes*/
+ 2 * words, /*bytes*/
+ data, last_command, NULL);
+
+ return ret_code;
+}
+
+/**
+ * i40e_write_nvm_word - Writes Shadow RAM word
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to write
+ * @data: word to write to the Shadow RAM
+ *
+ * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
+ * NVM ownership have to be acquired and released (on ARQ completion event
+ * reception) by caller. To commit SR to NVM update checksum function
+ * should be called.
+ **/
+enum i40e_status_code i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
+ void *data)
+{
+ DEBUGFUNC("i40e_write_nvm_word");
+
+ /* Value 0x00 below means that we treat SR as a flat mem */
+ return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, false);
+}
+
+/**
+ * i40e_write_nvm_buffer - Writes Shadow RAM buffer
+ * @hw: pointer to the HW structure
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset of the Shadow RAM buffer to write
+ * @words: number of words to write
+ * @data: words to write to the Shadow RAM
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ * NVM ownership must be acquired before calling this function and released
+ * on ARQ completion event reception by caller. To commit SR to NVM update
+ * checksum function should be called.
+ **/
+enum i40e_status_code i40e_write_nvm_buffer(struct i40e_hw *hw,
+ u8 module_pointer, u32 offset,
+ u16 words, void *data)
+{
+ DEBUGFUNC("i40e_write_nvm_buffer");
+
+ /* Here we will only write one buffer as the size of the modules
+ * mirrored in the Shadow RAM is always less than 4K.
+ */
+ return i40e_write_nvm_aq(hw, module_pointer, offset, words,
+ data, false);
+}
+
+/**
+ * i40e_calc_nvm_checksum - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ * @checksum: pointer to the checksum
+ *
+ * This function calculates SW Checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
+ * is customer specific and unknown. Therefore, this function skips all maximum
+ * possible size of VPD (1kB).
+ **/
+enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u16 pcie_alt_module = 0;
+ u16 checksum_local = 0;
+ u16 vpd_module = 0;
+ u16 word = 0;
+ u32 i = 0;
+
+ DEBUGFUNC("i40e_calc_nvm_checksum");
+
+ /* read pointer to VPD area */
+ ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
+ if (ret_code != I40E_SUCCESS) {
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+ goto i40e_calc_nvm_checksum_exit;
+ }
+
+ /* read pointer to PCIe Alt Auto-load module */
+ ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
+ &pcie_alt_module);
+ if (ret_code != I40E_SUCCESS) {
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+ goto i40e_calc_nvm_checksum_exit;
+ }
+
+ /* Calculate SW checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules
+ */
+ for (i = 0; i < hw->nvm.sr_size; i++) {
+ /* Skip Checksum word */
+ if (i == I40E_SR_SW_CHECKSUM_WORD)
+ i++;
+ /* Skip VPD module (convert byte size to word count) */
+ if (i == (u32)vpd_module) {
+ i += (I40E_SR_VPD_MODULE_MAX_SIZE / 2);
+ if (i >= hw->nvm.sr_size)
+ break;
+ }
+ /* Skip PCIe ALT module (convert byte size to word count) */
+ if (i == (u32)pcie_alt_module) {
+ i += (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2);
+ if (i >= hw->nvm.sr_size)
+ break;
+ }
+
+ ret_code = i40e_read_nvm_word(hw, (u16)i, &word);
+ if (ret_code != I40E_SUCCESS) {
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+ goto i40e_calc_nvm_checksum_exit;
+ }
+ checksum_local += word;
+ }
+
+ *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
+
+i40e_calc_nvm_checksum_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_update_nvm_checksum - Updates the NVM checksum
+ * @hw: pointer to hardware structure
+ *
+ * NVM ownership must be acquired before calling this function and released
+ * on ARQ completion event reception by caller.
+ * This function will commit SR to NVM.
+ **/
+enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u16 checksum;
+
+ DEBUGFUNC("i40e_update_nvm_checksum");
+
+ ret_code = i40e_calc_nvm_checksum(hw, &checksum);
+ if (ret_code == I40E_SUCCESS)
+ ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
+ 1, &checksum, true);
+
+ return ret_code;
+}
+
+/**
+ * i40e_validate_nvm_checksum - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum: calculated checksum
+ *
+ * Performs checksum calculation and validates the NVM SW checksum. If the
+ * caller does not need checksum, the value can be NULL.
+ **/
+enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u16 checksum_sr = 0;
+ u16 checksum_local = 0;
+
+ DEBUGFUNC("i40e_validate_nvm_checksum");
+
+ ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
+ if (ret_code != I40E_SUCCESS)
+ goto i40e_validate_nvm_checksum_exit;
+
+ /* Do not use i40e_read_nvm_word() because we do not want to take
+ * the synchronization semaphores twice here.
+ */
+ i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
+
+ /* Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (checksum_local != checksum_sr)
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum)
+ *checksum = checksum_local;
+
+i40e_validate_nvm_checksum_exit:
+ return ret_code;
+}
+
+STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *err);
+STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *err);
+STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *err);
+STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ int *err);
+STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ int *err);
+STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *err);
+STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *err);
+STATIC inline u8 i40e_nvmupd_get_module(u32 val)
+{
+ return (u8)(val & I40E_NVM_MOD_PNT_MASK);
+}
+STATIC inline u8 i40e_nvmupd_get_transaction(u32 val)
+{
+ return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
+}
+
+/**
+ * i40e_nvmupd_command - Process an NVM update command
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command
+ * @bytes: pointer to the data buffer
+ * @err: pointer to return error code
+ *
+ * Dispatches command depending on what update state is current
+ **/
+enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *err)
+{
+ enum i40e_status_code status;
+
+ DEBUGFUNC("i40e_nvmupd_command");
+
+ /* assume success */
+ *err = 0;
+
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT:
+ status = i40e_nvmupd_state_init(hw, cmd, bytes, err);
+ break;
+
+ case I40E_NVMUPD_STATE_READING:
+ status = i40e_nvmupd_state_reading(hw, cmd, bytes, err);
+ break;
+
+ case I40E_NVMUPD_STATE_WRITING:
+ status = i40e_nvmupd_state_writing(hw, cmd, bytes, err);
+ break;
+
+ default:
+ /* invalid state, should never happen */
+ status = I40E_NOT_SUPPORTED;
+ *err = -ESRCH;
+ break;
+ }
+ return status;
+}
+
+/**
+ * i40e_nvmupd_state_init - Handle NVM update state Init
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @err: pointer to return error code
+ *
+ * Process legitimate commands of the Init state and conditionally set next
+ * state. Reject all other commands.
+ **/
+STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *err)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ enum i40e_nvmupd_cmd upd_cmd;
+
+ DEBUGFUNC("i40e_nvmupd_state_init");
+
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, err);
+
+ switch (upd_cmd) {
+ case I40E_NVMUPD_READ_SA:
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (status) {
+ *err = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ } else {
+ status = i40e_nvmupd_nvm_read(hw, cmd, bytes, err);
+ i40e_release_nvm(hw);
+ }
+ break;
+
+ case I40E_NVMUPD_READ_SNT:
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (status) {
+ *err = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ } else {
+ status = i40e_nvmupd_nvm_read(hw, cmd, bytes, err);
+ hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
+ }
+ break;
+
+ case I40E_NVMUPD_WRITE_ERA:
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+ if (status) {
+ *err = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ } else {
+ status = i40e_nvmupd_nvm_erase(hw, cmd, err);
+ if (status)
+ i40e_release_nvm(hw);
+ else
+ hw->aq.nvm_release_on_done = true;
+ }
+ break;
+
+ case I40E_NVMUPD_WRITE_SA:
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+ if (status) {
+ *err = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ } else {
+ status = i40e_nvmupd_nvm_write(hw, cmd, bytes, err);
+ if (status)
+ i40e_release_nvm(hw);
+ else
+ hw->aq.nvm_release_on_done = true;
+ }
+ break;
+
+ case I40E_NVMUPD_WRITE_SNT:
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+ if (status) {
+ *err = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ } else {
+ status = i40e_nvmupd_nvm_write(hw, cmd, bytes, err);
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ }
+ break;
+
+ case I40E_NVMUPD_CSUM_SA:
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+ if (status) {
+ *err = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+ } else {
+ status = i40e_update_nvm_checksum(hw);
+ if (status) {
+ *err = hw->aq.asq_last_status ?
+ i40e_aq_rc_to_posix(hw->aq.asq_last_status) :
+ -EIO;
+ i40e_release_nvm(hw);
+ } else {
+ hw->aq.nvm_release_on_done = true;
+ }
+ }
+ break;
+
+ default:
+ status = I40E_ERR_NVM;
+ *err = -ESRCH;
+ break;
+ }
+ return status;
+}
+
+/**
+ * i40e_nvmupd_state_reading - Handle NVM update state Reading
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @err: pointer to return error code
+ *
+ * NVM ownership is already held. Process legitimate commands and set any
+ * change in state; reject all other commands.
+ **/
+STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *err)
+{
+ enum i40e_status_code status;
+ enum i40e_nvmupd_cmd upd_cmd;
+
+ DEBUGFUNC("i40e_nvmupd_state_reading");
+
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, err);
+
+ switch (upd_cmd) {
+ case I40E_NVMUPD_READ_SA:
+ case I40E_NVMUPD_READ_CON:
+ status = i40e_nvmupd_nvm_read(hw, cmd, bytes, err);
+ break;
+
+ case I40E_NVMUPD_READ_LCB:
+ status = i40e_nvmupd_nvm_read(hw, cmd, bytes, err);
+ i40e_release_nvm(hw);
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ break;
+
+ default:
+ status = I40E_NOT_SUPPORTED;
+ *err = -ESRCH;
+ break;
+ }
+ return status;
+}
+
+/**
+ * i40e_nvmupd_state_writing - Handle NVM update state Writing
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @err: pointer to return error code
+ *
+ * NVM ownership is already held. Process legitimate commands and set any
+ * change in state; reject all other commands
+ **/
+STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *err)
+{
+ enum i40e_status_code status;
+ enum i40e_nvmupd_cmd upd_cmd;
+
+ DEBUGFUNC("i40e_nvmupd_state_writing");
+
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, err);
+
+ switch (upd_cmd) {
+ case I40E_NVMUPD_WRITE_CON:
+ status = i40e_nvmupd_nvm_write(hw, cmd, bytes, err);
+ break;
+
+ case I40E_NVMUPD_WRITE_LCB:
+ status = i40e_nvmupd_nvm_write(hw, cmd, bytes, err);
+ if (!status) {
+ hw->aq.nvm_release_on_done = true;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ }
+ break;
+
+ case I40E_NVMUPD_CSUM_CON:
+ status = i40e_update_nvm_checksum(hw);
+ if (status)
+ *err = hw->aq.asq_last_status ?
+ i40e_aq_rc_to_posix(hw->aq.asq_last_status) :
+ -EIO;
+ break;
+
+ case I40E_NVMUPD_CSUM_LCB:
+ status = i40e_update_nvm_checksum(hw);
+ if (status) {
+ *err = hw->aq.asq_last_status ?
+ i40e_aq_rc_to_posix(hw->aq.asq_last_status) :
+ -EIO;
+ } else {
+ hw->aq.nvm_release_on_done = true;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ }
+ break;
+
+ default:
+ status = I40E_NOT_SUPPORTED;
+ *err = -ESRCH;
+ break;
+ }
+ return status;
+}
+
+/**
+ * i40e_nvmupd_validate_command - Validate given command
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @err: pointer to return error code
+ *
+ * Return one of the valid command types or I40E_NVMUPD_INVALID
+ **/
+STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ int *err)
+{
+ enum i40e_nvmupd_cmd upd_cmd;
+ u8 transaction, module;
+
+ DEBUGFUNC("i40e_nvmupd_validate_command\n");
+
+ /* anything that doesn't match a recognized case is an error */
+ upd_cmd = I40E_NVMUPD_INVALID;
+
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
+
+ /* limits on data size */
+ if ((cmd->data_size < 1) ||
+ (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
+ DEBUGOUT1("i40e_nvmupd_validate_command data_size %d\n",
+ cmd->data_size);
+ *err = -EFAULT;
+ return I40E_NVMUPD_INVALID;
+ }
+
+ switch (cmd->command) {
+ case I40E_NVM_READ:
+ switch (transaction) {
+ case I40E_NVM_CON:
+ upd_cmd = I40E_NVMUPD_READ_CON;
+ break;
+ case I40E_NVM_SNT:
+ upd_cmd = I40E_NVMUPD_READ_SNT;
+ break;
+ case I40E_NVM_LCB:
+ upd_cmd = I40E_NVMUPD_READ_LCB;
+ break;
+ case I40E_NVM_SA:
+ upd_cmd = I40E_NVMUPD_READ_SA;
+ break;
+ }
+ break;
+
+ case I40E_NVM_WRITE:
+ switch (transaction) {
+ case I40E_NVM_CON:
+ upd_cmd = I40E_NVMUPD_WRITE_CON;
+ break;
+ case I40E_NVM_SNT:
+ upd_cmd = I40E_NVMUPD_WRITE_SNT;
+ break;
+ case I40E_NVM_LCB:
+ upd_cmd = I40E_NVMUPD_WRITE_LCB;
+ break;
+ case I40E_NVM_SA:
+ upd_cmd = I40E_NVMUPD_WRITE_SA;
+ break;
+ case I40E_NVM_ERA:
+ upd_cmd = I40E_NVMUPD_WRITE_ERA;
+ break;
+ case I40E_NVM_CSUM:
+ upd_cmd = I40E_NVMUPD_CSUM_CON;
+ break;
+ case (I40E_NVM_CSUM|I40E_NVM_SA):
+ upd_cmd = I40E_NVMUPD_CSUM_SA;
+ break;
+ case (I40E_NVM_CSUM|I40E_NVM_LCB):
+ upd_cmd = I40E_NVMUPD_CSUM_LCB;
+ break;
+ }
+ break;
+ }
+
+ if (upd_cmd == I40E_NVMUPD_INVALID) {
+ *err = -EFAULT;
+ DEBUGOUT2(
+ "i40e_nvmupd_validate_command returns %d err: %d\n",
+ upd_cmd, *err);
+ }
+ return upd_cmd;
+}
+
+/**
+ * i40e_nvmupd_nvm_read - Read NVM
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @err: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *err)
+{
+ enum i40e_status_code status;
+ u8 module, transaction;
+ bool last;
+
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
+ last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
+ DEBUGOUT3("i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
+ module, cmd->offset, cmd->data_size);
+
+ status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
+ bytes, last, NULL);
+ DEBUGOUT1("i40e_nvmupd_nvm_read status %d\n", status);
+ if (status != I40E_SUCCESS)
+ *err = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+
+ return status;
+}
+
+/**
+ * i40e_nvmupd_nvm_erase - Erase an NVM module
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @err: pointer to return error code
+ *
+ * module, offset, data_size and data are in cmd structure
+ **/
+STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ int *err)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ u8 module, transaction;
+ bool last;
+
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
+ last = (transaction & I40E_NVM_LCB);
+ DEBUGOUT3("i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
+ module, cmd->offset, cmd->data_size);
+ status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
+ last, NULL);
+ DEBUGOUT1("i40e_nvmupd_nvm_erase status %d\n", status);
+ if (status != I40E_SUCCESS)
+ *err = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+
+ return status;
+}
+
+/**
+ * i40e_nvmupd_nvm_write - Write NVM
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @err: pointer to return error code
+ *
+ * module, offset, data_size and data are in cmd structure
+ **/
+STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *err)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ u8 module, transaction;
+ bool last;
+
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
+ last = (transaction & I40E_NVM_LCB);
+ DEBUGOUT3("i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
+ module, cmd->offset, cmd->data_size);
+ status = i40e_aq_update_nvm(hw, module, cmd->offset,
+ (u16)cmd->data_size, bytes, last, NULL);
+ DEBUGOUT1("i40e_nvmupd_nvm_write status %d\n", status);
+ if (status != I40E_SUCCESS)
+ *err = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+
+ return status;
+}
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_osdep.h b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_osdep.h
new file mode 100755
index 00000000..de71b0d8
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_osdep.h
@@ -0,0 +1,197 @@
+/******************************************************************************
+
+ Copyright (c) 2001-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+******************************************************************************/
+
+#ifndef _I40E_OSDEP_H_
+#define _I40E_OSDEP_H_
+
+#include <string.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdarg.h>
+
+#include <rte_common.h>
+#include <rte_memcpy.h>
+#include <rte_byteorder.h>
+#include <rte_cycles.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include "../i40e_logs.h"
+
+#define INLINE inline
+#define STATIC static
+
+typedef uint8_t u8;
+typedef int8_t s8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef uint64_t u64;
+typedef int bool;
+
+typedef enum i40e_status_code i40e_status;
+#define __iomem
+#define hw_dbg(hw, S, A...) do {} while (0)
+#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
+#define lower_32_bits(n) ((u32)(n))
+#define low_16_bits(x) ((x) & 0xFFFF)
+#define high_16_bits(x) (((x) & 0xFFFF0000) >> 16)
+
+#ifndef ETH_ADDR_LEN
+#define ETH_ADDR_LEN 6
+#endif
+
+#ifndef __le16
+#define __le16 uint16_t
+#endif
+#ifndef __le32
+#define __le32 uint32_t
+#endif
+#ifndef __le64
+#define __le64 uint64_t
+#endif
+#ifndef __be16
+#define __be16 uint16_t
+#endif
+#ifndef __be32
+#define __be32 uint32_t
+#endif
+#ifndef __be64
+#define __be64 uint64_t
+#endif
+
+#define FALSE 0
+#define TRUE 1
+#define false 0
+#define true 1
+
+#define min(a,b) RTE_MIN(a,b)
+#define max(a,b) RTE_MAX(a,b)
+
+#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
+#define ASSERT(x) if(!(x)) rte_panic("IXGBE: x")
+
+#define DEBUGOUT(S) PMD_DRV_LOG_RAW(DEBUG, S)
+#define DEBUGOUT1(S, A...) PMD_DRV_LOG_RAW(DEBUG, S, ##A)
+
+#define DEBUGFUNC(F) DEBUGOUT(F "\n")
+#define DEBUGOUT2 DEBUGOUT1
+#define DEBUGOUT3 DEBUGOUT2
+#define DEBUGOUT6 DEBUGOUT3
+#define DEBUGOUT7 DEBUGOUT6
+
+#define i40e_debug(h, m, s, ...) \
+do { \
+ if (((m) & (h)->debug_mask)) \
+ PMD_DRV_LOG_RAW(DEBUG, "i40e %02x.%x " s, \
+ (h)->bus.device, (h)->bus.func, \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define I40E_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
+#define I40E_PCI_REG_ADDR(a, reg) \
+ ((volatile uint32_t *)((char *)(a)->hw_addr + (reg)))
+static inline uint32_t i40e_read_addr(volatile void *addr)
+{
+ return I40E_PCI_REG(addr);
+}
+#define I40E_PCI_REG_WRITE(reg, value) \
+ do {I40E_PCI_REG((reg)) = (value);} while(0)
+
+#define I40E_WRITE_FLUSH(a) I40E_READ_REG(a, I40E_GLGEN_STAT)
+#define I40EVF_WRITE_FLUSH(a) I40E_READ_REG(a, I40E_VFGEN_RSTAT)
+
+#define I40E_READ_REG(hw, reg) i40e_read_addr(I40E_PCI_REG_ADDR((hw), (reg)))
+#define I40E_WRITE_REG(hw, reg, value) \
+ I40E_PCI_REG_WRITE(I40E_PCI_REG_ADDR((hw), (reg)), (value))
+
+#define rd32(a, reg) i40e_read_addr(I40E_PCI_REG_ADDR((a), (reg)))
+#define wr32(a, reg, value) \
+ I40E_PCI_REG_WRITE(I40E_PCI_REG_ADDR((a), (reg)), (value))
+#define flush(a) i40e_read_addr(I40E_PCI_REG_ADDR((a), (I40E_GLGEN_STAT)))
+
+#define ARRAY_SIZE(arr) (sizeof(arr)/sizeof(arr[0]))
+
+/* memory allocation tracking */
+struct i40e_dma_mem {
+ void *va;
+ u64 pa;
+ u32 size;
+ u64 id;
+} __attribute__((packed));
+
+#define i40e_allocate_dma_mem(h, m, unused, s, a) \
+ i40e_allocate_dma_mem_d(h, m, s, a)
+#define i40e_free_dma_mem(h, m) i40e_free_dma_mem_d(h, m)
+
+struct i40e_virt_mem {
+ void *va;
+ u32 size;
+} __attribute__((packed));
+
+#define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt_mem_d(h, m, s)
+#define i40e_free_virt_mem(h, m) i40e_free_virt_mem_d(h, m)
+
+#define CPU_TO_LE16(o) rte_cpu_to_le_16(o)
+#define CPU_TO_LE32(s) rte_cpu_to_le_32(s)
+#define CPU_TO_LE64(h) rte_cpu_to_le_64(h)
+#define LE16_TO_CPU(a) rte_le_to_cpu_16(a)
+#define LE32_TO_CPU(c) rte_le_to_cpu_32(c)
+#define LE64_TO_CPU(k) rte_le_to_cpu_64(k)
+
+/* SW spinlock */
+struct i40e_spinlock {
+ rte_spinlock_t spinlock;
+};
+
+#define i40e_init_spinlock(_sp) i40e_init_spinlock_d(_sp)
+#define i40e_acquire_spinlock(_sp) i40e_acquire_spinlock_d(_sp)
+#define i40e_release_spinlock(_sp) i40e_release_spinlock_d(_sp)
+#define i40e_destroy_spinlock(_sp) i40e_destroy_spinlock_d(_sp)
+
+#define I40E_NTOHS(a) rte_be_to_cpu_16(a)
+#define I40E_NTOHL(a) rte_be_to_cpu_32(a)
+#define I40E_HTONS(a) rte_cpu_to_be_16(a)
+#define I40E_HTONL(a) rte_cpu_to_be_32(a)
+
+#define i40e_memset(a, b, c, d) memset((a), (b), (c))
+#define i40e_memcpy(a, b, c, d) rte_memcpy((a), (b), (c))
+
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+#define DELAY(x) rte_delay_us(x)
+#define i40e_usec_delay(x) rte_delay_us(x)
+#define i40e_msec_delay(x) rte_delay_us(1000*(x))
+#define udelay(x) DELAY(x)
+#define msleep(x) DELAY(1000*(x))
+#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
+
+#endif /* _I40E_OSDEP_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_prototype.h b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_prototype.h
new file mode 100755
index 00000000..f3215cf8
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_prototype.h
@@ -0,0 +1,430 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_PROTOTYPE_H_
+#define _I40E_PROTOTYPE_H_
+
+#include "i40e_type.h"
+#include "i40e_alloc.h"
+#include "i40e_virtchnl.h"
+
+/* Prototypes for shared code functions that are not in
+ * the standard function pointer structures. These are
+ * mostly because they are needed even before the init
+ * has happened and will assist in the early SW and FW
+ * setup.
+ */
+
+/* adminq functions */
+enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw);
+enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw);
+enum i40e_status_code i40e_init_asq(struct i40e_hw *hw);
+enum i40e_status_code i40e_init_arq(struct i40e_hw *hw);
+enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw);
+enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw);
+enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw);
+enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw);
+u16 i40e_clean_asq(struct i40e_hw *hw);
+void i40e_free_adminq_asq(struct i40e_hw *hw);
+void i40e_free_adminq_arq(struct i40e_hw *hw);
+void i40e_adminq_init_ring_data(struct i40e_hw *hw);
+enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *events_pending);
+enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+bool i40e_asq_done(struct i40e_hw *hw);
+
+/* debug function for adminq */
+void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
+ void *desc, void *buffer, u16 buf_len);
+
+void i40e_idle_aq(struct i40e_hw *hw);
+void i40e_resume_aq(struct i40e_hw *hw);
+bool i40e_check_asq_alive(struct i40e_hw *hw);
+enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+
+#ifndef VF_DRIVER
+
+u32 i40e_led_get(struct i40e_hw *hw);
+void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
+
+/* admin send queue commands */
+
+enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw,
+ u16 *fw_major_version, u16 *fw_minor_version,
+ u16 *api_major_version, u16 *api_minor_version,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+ bool qualified_modules, bool report_init,
+ struct i40e_aq_get_phy_abilities_resp *abilities,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
+ struct i40e_aq_set_phy_config *config,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+ bool atomic_reset);
+enum i40e_status_code i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw,
+ u16 max_frame_size, bool crc_en, u16 pacing,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw,
+ u64 *advt_reg,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_partner_advt(struct i40e_hw *hw,
+ u64 *advt_reg,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw, u16 lb_modes,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+ bool enable_link, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
+ bool enable_lse, struct i40e_link_status *link,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw,
+ bool enable_lse);
+enum i40e_status_code i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
+ u64 advt_reg,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_send_driver_version(struct i40e_hw *hw,
+ struct i40e_driver_version *dv,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_vsi(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+ u16 vsi_id, bool set_filter,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_update_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+ u16 downlink_seid, u8 enabled_tc,
+ bool default_port, bool enable_l2_filtering,
+ u16 *pveb_seid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+ u16 veb_seid, u16 *switch_id, bool *floating,
+ u16 *statistic_index, u16 *vebs_used,
+ u16 *vebs_free,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_vlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_remove_vlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+ u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw,
+ struct i40e_aqc_get_switch_config_resp *buf,
+ u16 buf_size, u16 *start_seid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_request_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ enum i40e_aq_resource_access_type access,
+ u8 sdp_number, u64 *timeout,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_release_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ u8 sdp_number,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, bool last_command,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw,
+ void *buff, u16 buff_size, u16 *data_size,
+ enum i40e_admin_queue_opc list_type_opc,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+ u8 mib_type, void *buff, u16 buff_size,
+ u16 *local_len, u16 *remote_len,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+ bool enable_update,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_lldp_tlv(struct i40e_hw *hw, u8 bridge_type,
+ void *buff, u16 buff_size, u16 tlv_len,
+ u16 *mib_len,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_update_lldp_tlv(struct i40e_hw *hw,
+ u8 bridge_type, void *buff, u16 buff_size,
+ u16 old_len, u16 new_len, u16 offset,
+ u16 *mib_len,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw,
+ u8 bridge_type, void *buff, u16 buff_size,
+ u16 tlv_len, u16 *mib_len,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw,
+ u8 *num_entries,
+ struct i40e_aqc_switch_resource_alloc_element_resp *buf,
+ u16 count,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_pvirt(struct i40e_hw *hw, u16 flags,
+ u16 mac_seid, u16 vsi_seid,
+ u16 *ret_seid);
+enum i40e_status_code i40e_aq_add_tag(struct i40e_hw *hw, bool direct_to_queue,
+ u16 vsi_seid, u16 tag, u16 queue_num,
+ u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_remove_tag(struct i40e_hw *hw, u16 vsi_seid,
+ u16 tag, u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pe_seid,
+ u16 etag, u8 num_tags_in_buf, void *buf,
+ u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_remove_mcast_etag(struct i40e_hw *hw, u16 pe_seid,
+ u16 etag, u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_update_tag(struct i40e_hw *hw, u16 vsi_seid,
+ u16 old_tag, u16 new_tag, u16 *tags_used,
+ u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_statistics(struct i40e_hw *hw, u16 seid,
+ u16 vlan_id, u16 *stat_index,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_remove_statistics(struct i40e_hw *hw, u16 seid,
+ u16 vlan_id, u16 stat_index,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_port_parameters(struct i40e_hw *hw,
+ u16 bad_frame_vsi, bool save_bad_pac,
+ bool pad_short_pac, bool double_vlan,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_mac_address_write(struct i40e_hw *hw,
+ u16 flags, u8 *mac_addr,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_credit,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw,
+ u8 tcmap, bool request, u8 *tcmap_ret,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_hmc_resource_profile(struct i40e_hw *hw,
+ enum i40e_aq_hmc_profile *profile,
+ u8 *pe_vf_enabled_count,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_switch_comp_ets_bw_limit(
+ struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_vsi_ets_sla_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_dcb_updated(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
+ enum i40e_aq_hmc_profile profile,
+ u8 pe_vf_enabled_count,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_bw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_port_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_resume_port_tx(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
+ u16 vsi,
+ struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ u8 filter_count);
+
+enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
+ u16 vsi,
+ struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ u8 filter_count);
+
+enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw,
+ u32 reg_addr0, u32 *reg_val0,
+ u32 reg_addr1, u32 *reg_val1);
+enum i40e_status_code i40e_aq_alternate_read_indirect(struct i40e_hw *hw,
+ u32 addr, u32 dw_count, void *buffer);
+enum i40e_status_code i40e_aq_alternate_write(struct i40e_hw *hw,
+ u32 reg_addr0, u32 reg_val0,
+ u32 reg_addr1, u32 reg_val1);
+enum i40e_status_code i40e_aq_alternate_write_indirect(struct i40e_hw *hw,
+ u32 addr, u32 dw_count, void *buffer);
+enum i40e_status_code i40e_aq_alternate_clear(struct i40e_hw *hw);
+enum i40e_status_code i40e_aq_alternate_write_done(struct i40e_hw *hw,
+ u8 bios_mode, bool *reset_needed);
+enum i40e_status_code i40e_aq_set_oem_mode(struct i40e_hw *hw,
+ u8 oem_mode);
+
+/* i40e_common */
+enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw);
+enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw);
+void i40e_clear_hw(struct i40e_hw *hw);
+void i40e_clear_pxe_mode(struct i40e_hw *hw);
+bool i40e_get_link_status(struct i40e_hw *hw);
+enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+enum i40e_status_code i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+ u32 *max_bw, u32 *min_bw, bool *min_valid, bool *max_valid);
+enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+ struct i40e_aqc_configure_partition_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
+enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr);
+enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw);
+/* prototype for functions used for NVM access */
+enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw);
+enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
+ enum i40e_aq_resource_access_type access);
+void i40e_release_nvm(struct i40e_hw *hw);
+enum i40e_status_code i40e_read_nvm_srrd(struct i40e_hw *hw, u16 offset,
+ u16 *data);
+enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data);
+enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data);
+enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module,
+ u32 offset, u16 words, void *data,
+ bool last_command);
+enum i40e_status_code i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
+ void *data);
+enum i40e_status_code i40e_write_nvm_buffer(struct i40e_hw *hw, u8 module,
+ u32 offset, u16 words, void *data);
+enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum);
+enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw);
+enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum);
+enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *);
+void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
+#endif /* VF_DRIVER */
+
+#if defined(I40E_QV) || defined(VF_DRIVER)
+enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw);
+
+#endif
+extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
+
+STATIC INLINE struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
+{
+ return i40e_ptype_lookup[ptype];
+}
+
+/* prototype for functions used for SW spinlocks */
+void i40e_init_spinlock(struct i40e_spinlock *sp);
+void i40e_acquire_spinlock(struct i40e_spinlock *sp);
+void i40e_release_spinlock(struct i40e_spinlock *sp);
+void i40e_destroy_spinlock(struct i40e_spinlock *sp);
+
+/* i40e_common for VF drivers*/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+ struct i40e_virtchnl_vf_resource *msg);
+enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw);
+enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+ enum i40e_virtchnl_ops v_opcode,
+ enum i40e_status_code v_retval,
+ u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings);
+enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details);
+#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_register.h b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_register.h
new file mode 100755
index 00000000..f236c395
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_register.h
@@ -0,0 +1,3377 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_REGISTER_H_
+#define _I40E_REGISTER_H_
+
+
+#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */
+#define I40E_GL_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT)
+#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */
+#define I40E_GL_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT)
+#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */
+#define I40E_GL_ARQH_ARQH_SHIFT 0
+#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT)
+#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */
+#define I40E_GL_ARQT_ARQT_SHIFT 0
+#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT)
+#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */
+#define I40E_GL_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT)
+#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */
+#define I40E_GL_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT)
+#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */
+#define I40E_GL_ATQH_ATQH_SHIFT 0
+#define I40E_GL_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT)
+#define I40E_GL_ATQLEN 0x00080240 /* Reset: EMPR */
+#define I40E_GL_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_GL_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_GL_ATQLEN_ATQLEN_SHIFT)
+#define I40E_GL_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_GL_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQVFE_SHIFT)
+#define I40E_GL_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_GL_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_GL_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_GL_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_GL_ATQT 0x00080440 /* Reset: EMPR */
+#define I40E_GL_ATQT_ATQT_SHIFT 0
+#define I40E_GL_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_GL_ATQT_ATQT_SHIFT)
+#define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */
+#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_PF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_PF_ARQBAL 0x00080080 /* Reset: EMPR */
+#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_PF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_PF_ARQH 0x00080380 /* Reset: EMPR */
+#define I40E_PF_ARQH_ARQH_SHIFT 0
+#define I40E_PF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_PF_ARQH_ARQH_SHIFT)
+#define I40E_PF_ARQLEN 0x00080280 /* Reset: EMPR */
+#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_PF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_PF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_PF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
+#define I40E_PF_ARQT_ARQT_SHIFT 0
+#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
+#define I40E_PF_ATQBAH 0x00080100 /* Reset: EMPR */
+#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_PF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_PF_ATQBAL 0x00080000 /* Reset: EMPR */
+#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_PF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_PF_ATQH 0x00080300 /* Reset: EMPR */
+#define I40E_PF_ATQH_ATQH_SHIFT 0
+#define I40E_PF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_PF_ATQH_ATQH_SHIFT)
+#define I40E_PF_ATQLEN 0x00080200 /* Reset: EMPR */
+#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_PF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_PF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_PF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
+#define I40E_PF_ATQT_ATQT_SHIFT 0
+#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
+#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQBAH_MAX_INDEX 127
+#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQBAL_MAX_INDEX 127
+#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQH_MAX_INDEX 127
+#define I40E_VF_ARQH_ARQH_SHIFT 0
+#define I40E_VF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH_ARQH_SHIFT)
+#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQLEN_MAX_INDEX 127
+#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQT_MAX_INDEX 127
+#define I40E_VF_ARQT_ARQT_SHIFT 0
+#define I40E_VF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT_ARQT_SHIFT)
+#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQBAH_MAX_INDEX 127
+#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQBAL_MAX_INDEX 127
+#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQH_MAX_INDEX 127
+#define I40E_VF_ATQH_ATQH_SHIFT 0
+#define I40E_VF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH_ATQH_SHIFT)
+#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQLEN_MAX_INDEX 127
+#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQT_MAX_INDEX 127
+#define I40E_VF_ATQT_ATQT_SHIFT 0
+#define I40E_VF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT_ATQT_SHIFT)
+#define I40E_PRT_L2TAGSEN 0x001C0B20 /* Reset: CORER */
+#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0
+#define I40E_PRT_L2TAGSEN_ENABLE_MASK I40E_MASK(0xFF, I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA 0x0010C080 /* Reset: PFR */
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO 0x0010C000 /* Reset: PFR */
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LANCTXCTL 0x0010C300 /* Reset: CORER */
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK I40E_MASK(0x7, I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17
+#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
+#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3
+#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0
+#define I40E_PFCM_LANCTXDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFCM_LANCTXDATA_DATA_SHIFT)
+#define I40E_PFCM_LANCTXSTAT 0x0010C380 /* Reset: CORER */
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
+#define I40E_GLDCB_GENC 0x00083044 /* Reset: CORER */
+#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0
+#define I40E_GLDCB_GENC_PCIRTT_MASK I40E_MASK(0xFFFF, I40E_GLDCB_GENC_PCIRTT_SHIFT)
+#define I40E_GLDCB_RUPTI 0x00122618 /* Reset: CORER */
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
+#define I40E_PRTDCB_FCCFG 0x001E4640 /* Reset: GLOBR */
+#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
+#define I40E_PRTDCB_FCCFG_TFCE_MASK I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT)
+#define I40E_PRTDCB_FCRTV 0x001E4600 /* Reset: GLOBR */
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
+#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: GLOBR */
+#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3
+#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0
+#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
+#define I40E_PRTDCB_GENC 0x00083000 /* Reset: CORER */
+#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0
+#define I40E_PRTDCB_GENC_RESERVED_1_MASK I40E_MASK(0x3, I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
+#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2
+#define I40E_PRTDCB_GENC_NUMTC_MASK I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6
+#define I40E_PRTDCB_GENC_FCOEUP_MASK I40E_MASK(0x7, I40E_PRTDCB_GENC_FCOEUP_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK I40E_MASK(0x1, I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
+#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16
+#define I40E_PRTDCB_GENC_PFCLDA_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT)
+#define I40E_PRTDCB_GENS 0x00083020 /* Reset: CORER */
+#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
+#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
+#define I40E_PRTDCB_MFLCN 0x001E2400 /* Reset: GLOBR */
+#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0
+#define I40E_PRTDCB_MFLCN_PMCF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT)
+#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
+#define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
+#define I40E_PRTDCB_MFLCN_RPFCM_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
+#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3
+#define I40E_PRTDCB_MFLCN_RFCE_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
+#define I40E_PRTDCB_MFLCN_RPFCE_MASK I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
+#define I40E_PRTDCB_RETSC 0x001223E0 /* Reset: CORER */
+#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0
+#define I40E_PRTDCB_RETSC_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
+#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8
+#define I40E_PRTDCB_RETSC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7
+#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0
+#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
+#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
+#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8
+#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
+#define I40E_PRTDCB_RUP 0x001C0B00 /* Reset: CORER */
+#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
+#define I40E_PRTDCB_RUP_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
+#define I40E_PRTDCB_RUP2TC 0x001C09A0 /* Reset: CORER */
+#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
+#define I40E_PRTDCB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
+#define I40E_PRTDCB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
+#define I40E_PRTDCB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
+#define I40E_PRTDCB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
+#define I40E_PRTDCB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
+#define I40E_PRTDCB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
+#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
+#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
+#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */
+#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TCMSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TCPMC 0x000A21A0 /* Reset: CORER */
+#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCWSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TDPMC 0x000A0180 /* Reset: CORER */
+#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0
+#define I40E_PRTDCB_TDPMC_DPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT)
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB 0x000AE060 /* Reset: CORER */
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB 0x00098060 /* Reset: CORER */
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
+#define I40E_PRTDCB_TFCS 0x001E4560 /* Reset: GLOBR */
+#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0
+#define I40E_PRTDCB_TFCS_TXOFF_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
+#define I40E_PRTDCB_TFCS_TXOFF0_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
+#define I40E_PRTDCB_TFCS_TXOFF1_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
+#define I40E_PRTDCB_TFCS_TXOFF2_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
+#define I40E_PRTDCB_TFCS_TXOFF3_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
+#define I40E_PRTDCB_TFCS_TXOFF4_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
+#define I40E_PRTDCB_TFCS_TXOFF5_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
+#define I40E_PRTDCB_TFCS_TXOFF6_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
+#define I40E_PRTDCB_TFCS_TXOFF7_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
+#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */
+#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
+#define I40E_GLFCOE_RCTL 0x00269B94 /* Reset: CORER */
+#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0
+#define I40E_GLFCOE_RCTL_FCOEVER_MASK I40E_MASK(0xF, I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
+#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4
+#define I40E_GLFCOE_RCTL_SAVBAD_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
+#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5
+#define I40E_GLFCOE_RCTL_ICRC_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_ICRC_SHIFT)
+#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16
+#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK I40E_MASK(0x3FFF, I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
+#define I40E_GL_FWSTS 0x00083048 /* Reset: POR */
+#define I40E_GL_FWSTS_FWS0B_SHIFT 0
+#define I40E_GL_FWSTS_FWS0B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS0B_SHIFT)
+#define I40E_GL_FWSTS_FWRI_SHIFT 9
+#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT)
+#define I40E_GL_FWSTS_FWS1B_SHIFT 16
+#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */
+#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
+#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK I40E_MASK(0x3, I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */
+#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
+#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
+#define I40E_GLGEN_GPIO_STAT 0x0008817C /* Reset: POR */
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
+#define I40E_GLGEN_GPIO_TRANSIT 0x00088180 /* Reset: POR */
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
+#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_I2CCMD_MAX_INDEX 3
+#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0
+#define I40E_GLGEN_I2CCMD_DATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_I2CCMD_DATA_SHIFT)
+#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16
+#define I40E_GLGEN_I2CCMD_REGADD_MASK I40E_MASK(0xFF, I40E_GLGEN_I2CCMD_REGADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24
+#define I40E_GLGEN_I2CCMD_PHYADD_MASK I40E_MASK(0x7, I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_OP_SHIFT 27
+#define I40E_GLGEN_I2CCMD_OP_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_OP_SHIFT)
+#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28
+#define I40E_GLGEN_I2CCMD_RESET_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_RESET_SHIFT)
+#define I40E_GLGEN_I2CCMD_R_SHIFT 29
+#define I40E_GLGEN_I2CCMD_R_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_R_SHIFT)
+#define I40E_GLGEN_I2CCMD_E_SHIFT 31
+#define I40E_GLGEN_I2CCMD_E_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_E_SHIFT)
+#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK I40E_MASK(0x1F, I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK I40E_MASK(0x7, I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9
+#define I40E_GLGEN_I2CPARAMS_CLK_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
+#define I40E_GLGEN_LED_CTL 0x00088178 /* Reset: POR */
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK I40E_MASK(0x1FFFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x3FFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
+#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MSCA_MAX_INDEX 3
+#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0
+#define I40E_GLGEN_MSCA_MDIADD_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSCA_MDIADD_SHIFT)
+#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16
+#define I40E_GLGEN_MSCA_DEVADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_DEVADD_SHIFT)
+#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21
+#define I40E_GLGEN_MSCA_PHYADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_PHYADD_SHIFT)
+#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
+#define I40E_GLGEN_MSCA_OPCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_GLGEN_MSCA_STCODE_SHIFT 28
+#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
+#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
+#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MSRWD_MAX_INDEX 3
+#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
+#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
+#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
+#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
+#define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */
+#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
+#define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
+#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2
+#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
+#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4
+#define I40E_GLGEN_RSTAT_CORERCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6
+#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8
+#define I40E_GLGEN_RSTAT_EMPRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
+#define I40E_GLGEN_RSTCTL 0x000B8180 /* Reset: POR */
+#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0
+#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
+#define I40E_GLGEN_RSTENA_EMP 0x000B818C /* Reset: POR */
+#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0
+#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT)
+#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */
+#define I40E_GLGEN_RTRIG_CORER_SHIFT 0
+#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT)
+#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1
+#define I40E_GLGEN_RTRIG_GLOBR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_GLOBR_SHIFT)
+#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2
+#define I40E_GLGEN_RTRIG_EMPFWR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
+#define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */
+#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0
+#define I40E_GLGEN_STAT_HWRSVD0_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD0_SHIFT)
+#define I40E_GLGEN_STAT_DCBEN_SHIFT 2
+#define I40E_GLGEN_STAT_DCBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_DCBEN_SHIFT)
+#define I40E_GLGEN_STAT_VTEN_SHIFT 3
+#define I40E_GLGEN_STAT_VTEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_VTEN_SHIFT)
+#define I40E_GLGEN_STAT_FCOEN_SHIFT 4
+#define I40E_GLGEN_STAT_FCOEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_FCOEN_SHIFT)
+#define I40E_GLGEN_STAT_EVBEN_SHIFT 5
+#define I40E_GLGEN_STAT_EVBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_EVBEN_SHIFT)
+#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6
+#define I40E_GLGEN_STAT_HWRSVD1_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD1_SHIFT)
+#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3
+#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0
+#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
+#define I40E_GLVFGEN_TIMER 0x000881BC /* Reset: CORER */
+#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0
+#define I40E_GLVFGEN_TIMER_GTIME_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVFGEN_TIMER_GTIME_SHIFT)
+#define I40E_PFGEN_CTRL 0x00092400 /* Reset: PFR */
+#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0
+#define I40E_PFGEN_CTRL_PFSWR_MASK I40E_MASK(0x1, I40E_PFGEN_CTRL_PFSWR_SHIFT)
+#define I40E_PFGEN_DRUN 0x00092500 /* Reset: CORER */
+#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0
+#define I40E_PFGEN_DRUN_DRVUNLD_MASK I40E_MASK(0x1, I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
+#define I40E_PFGEN_PORTNUM 0x001C0480 /* Reset: CORER */
+#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_STATE 0x00088000 /* Reset: CORER */
+#define I40E_PFGEN_STATE_RESERVED_0_SHIFT 0
+#define I40E_PFGEN_STATE_RESERVED_0_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_RESERVED_0_SHIFT)
+#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1
+#define I40E_PFGEN_STATE_PFFCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFFCEN_SHIFT)
+#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2
+#define I40E_PFGEN_STATE_PFLINKEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFLINKEN_SHIFT)
+#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3
+#define I40E_PFGEN_STATE_PFSCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFSCEN_SHIFT)
+#define I40E_PRTGEN_CNF 0x000B8120 /* Reset: POR */
+#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0
+#define I40E_PRTGEN_CNF_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF2 0x000B8160 /* Reset: POR */
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
+#define I40E_PRTGEN_STATUS 0x000B8100 /* Reset: POR */
+#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0
+#define I40E_PRTGEN_STATUS_PORT_VALID_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
+#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFGEN_RSTAT1_MAX_INDEX 127
+#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
+#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127
+#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0
+#define I40E_VPGEN_VFRSTAT_VFRD_MASK I40E_MASK(0x1, I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
+#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127
+#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0
+#define I40E_VPGEN_VFRTRIG_VFSWR_MASK I40E_MASK(0x1, I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
+#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_VSIGEN_RSTAT_MAX_INDEX 383
+#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0
+#define I40E_VSIGEN_RSTAT_VMRD_MASK I40E_MASK(0x1, I40E_VSIGEN_RSTAT_VMRD_SHIFT)
+#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_VSIGEN_RTRIG_MAX_INDEX 383
+#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0
+#define I40E_VSIGEN_RTRIG_VMSWR_MASK I40E_MASK(0x1, I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
+#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
+#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK I40E_MASK(0xFFFFF, I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
+#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010 /* Reset: CORER */
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
+#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
+#define I40E_GLHMC_FCOEFMAX 0x000C20D0 /* Reset: CORER */
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
+#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018 /* Reset: CORER */
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEMAX 0x000C2014 /* Reset: CORER */
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK I40E_MASK(0x1FFF, I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
+#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29
+#define I40E_GLHMC_FSIAVCNT_RSVD_MASK I40E_MASK(0x7, I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_FSIAVMAX 0x000C2068 /* Reset: CORER */
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
+#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064 /* Reset: CORER */
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
+#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
+#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
+#define I40E_GLHMC_FSIMCMAX 0x000C2060 /* Reset: CORER */
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK I40E_MASK(0x3FFF, I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
+#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c /* Reset: CORER */
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
+#define I40E_GLHMC_LANQMAX 0x000C2008 /* Reset: CORER */
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
+#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
+#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
+#define I40E_GLHMC_LANRXOBJSZ 0x000C200c /* Reset: CORER */
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
+#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
+#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24
+#define I40E_GLHMC_LANTXBASE_RSVD_MASK I40E_MASK(0xFF, I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
+#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
+#define I40E_GLHMC_LANTXOBJSZ 0x000C2004 /* Reset: CORER */
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
+#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK I40E_MASK(0xF, I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
+#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_SDPART_MAX_INDEX 15
+#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_SDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_SDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
+#define I40E_PFHMC_ERRORDATA 0x000C0500 /* Reset: PFR */
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK I40E_MASK(0x3FFFFFFF, I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
+#define I40E_PFHMC_ERRORINFO 0x000C0400 /* Reset: PFR */
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK I40E_MASK(0xF, I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
+#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */
+#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_PDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
+#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
+#define I40E_PFHMC_PDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD 0x000C0000 /* Reset: PFR */
+#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_SDCMD_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
+#define I40E_PFHMC_SDCMD_PMSDWR_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
+#define I40E_PFHMC_SDDATAHIGH 0x000C0200 /* Reset: PFR */
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
+#define I40E_PFHMC_SDDATALOW 0x000C0100 /* Reset: PFR */
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK I40E_MASK(0x3FF, I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK I40E_MASK(0xFFFFF, I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
+#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */ /* Reset: POR */
+#define I40E_GL_GP_FUSE_MAX_INDEX 28
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
+#define I40E_GL_UFUSE 0x00094008 /* Reset: POR */
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
+#define I40E_GL_UFUSE_NIC_ID_SHIFT 2
+#define I40E_GL_UFUSE_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10
+#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
+#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11
+#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
+#define I40E_EMPINT_GPIO_ENA 0x00088188 /* Reset: POR */
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100 /* Reset: CORER */
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK I40E_MASK(0x1, I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */
+#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */
+#define I40E_PFINT_CEQCTL_MAX_INDEX 511
+#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */
+#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_PFINT_GPIO_ENA 0x00088080 /* Reset: CORER */
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */
+#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_PFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_PFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_PFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_PFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5
+#define I40E_PFINT_ICR0_QUEUE_4_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_4_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6
+#define I40E_PFINT_ICR0_QUEUE_5_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_5_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7
+#define I40E_PFINT_ICR0_QUEUE_6_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_6_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8
+#define I40E_PFINT_ICR0_QUEUE_7_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_7_SHIFT)
+#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GRST_SHIFT)
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_SWINT_SHIFT 31
+#define I40E_PFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_SWINT_SHIFT)
+#define I40E_PFINT_ICR0_ENA 0x00038800 /* Reset: CORER */
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_ENA_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GRST_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_ENA_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_ENA_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_PFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */ /* Reset: PFR */
+#define I40E_PFINT_ITR0_MAX_INDEX 2
+#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4)) /* _i=0...2, _INTPF=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_ITRN_MAX_INDEX 2
+#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_PFINT_LNKLST0 0x00038500 /* Reset: PFR */
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_LNKLSTN_MAX_INDEX 511
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_RATE0 0x00038580 /* Reset: PFR */
+#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATE0_INTERVAL_SHIFT)
+#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_RATEN_MAX_INDEX 511
+#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT)
+#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: PFR */
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QINT_RQCTL_MAX_INDEX 1535
+#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_RQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_RQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_RQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_RQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_RQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_INTEVENT_SHIFT)
+#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QINT_TQCTL_MAX_INDEX 1535
+#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_TQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_TQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_TQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_TQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_TQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_INTEVENT_SHIFT)
+#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127
+#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
+#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VFINT_ICR0_MAX_INDEX 127
+#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_SWINT_SHIFT 31
+#define I40E_VFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_SWINT_SHIFT)
+#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */ /* Reset: VFR */
+#define I40E_VFINT_ITR0_MAX_INDEX 2
+#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...511 */ /* Reset: VFR */
+#define I40E_VFINT_ITRN_MAX_INDEX 2
+#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPINT_AEQCTL_MAX_INDEX 127
+#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: CORER */
+#define I40E_VPINT_CEQCTL_MAX_INDEX 511
+#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPINT_LNKLST0_MAX_INDEX 127
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
+#define I40E_VPINT_LNKLSTN_MAX_INDEX 511
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPINT_RATE0_MAX_INDEX 127
+#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATE0_INTERVAL_SHIFT)
+#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
+#define I40E_VPINT_RATEN_MAX_INDEX 511
+#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATEN_INTERVAL_SHIFT)
+#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_GL_RDPU_CNTRL 0x00051060 /* Reset: CORER */
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK I40E_MASK(0x1, I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
+#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1
+#define I40E_GL_RDPU_CNTRL_ECO_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_RDPU_CNTRL_ECO_SHIFT)
+#define I40E_GLLAN_RCTL_0 0x0012A500 /* Reset: CORER */
+#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0
+#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
+#define I40E_GLLAN_TSOMSK_F 0x000442D8 /* Reset: CORER */
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
+#define I40E_GLLAN_TSOMSK_L 0x000442E0 /* Reset: CORER */
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
+#define I40E_GLLAN_TSOMSK_M 0x000442DC /* Reset: CORER */
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000e6500 + ((_i) * 4)) /* _i=0...11 */ /* Reset: CORER */
+#define I40E_GLLAN_TXPRE_QDIS_MAX_INDEX 11
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK I40E_MASK(0x7FF, I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT 16
+#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
+#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */
+#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
+#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
+#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
+#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
+#define I40E_QRX_ENA_MAX_INDEX 1535
+#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QRX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_REQ_SHIFT)
+#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QRX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QRX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QRX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QRX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_STAT_SHIFT)
+#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QRX_TAIL_MAX_INDEX 1535
+#define I40E_QRX_TAIL_TAIL_SHIFT 0
+#define I40E_QRX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL_TAIL_SHIFT)
+#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QTX_CTL_MAX_INDEX 1535
+#define I40E_QTX_CTL_PFVF_Q_SHIFT 0
+#define I40E_QTX_CTL_PFVF_Q_MASK I40E_MASK(0x3, I40E_QTX_CTL_PFVF_Q_SHIFT)
+#define I40E_QTX_CTL_PF_INDX_SHIFT 2
+#define I40E_QTX_CTL_PF_INDX_MASK I40E_MASK(0xF, I40E_QTX_CTL_PF_INDX_SHIFT)
+#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7
+#define I40E_QTX_CTL_VFVM_INDX_MASK I40E_MASK(0x1FF, I40E_QTX_CTL_VFVM_INDX_SHIFT)
+#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
+#define I40E_QTX_ENA_MAX_INDEX 1535
+#define I40E_QTX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QTX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_REQ_SHIFT)
+#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QTX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QTX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QTX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QTX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_STAT_SHIFT)
+#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QTX_HEAD_MAX_INDEX 1535
+#define I40E_QTX_HEAD_HEAD_SHIFT 0
+#define I40E_QTX_HEAD_HEAD_MASK I40E_MASK(0x1FFF, I40E_QTX_HEAD_HEAD_SHIFT)
+#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16
+#define I40E_QTX_HEAD_RS_PENDING_MASK I40E_MASK(0x1, I40E_QTX_HEAD_RS_PENDING_SHIFT)
+#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
+#define I40E_QTX_TAIL_MAX_INDEX 1535
+#define I40E_QTX_TAIL_TAIL_SHIFT 0
+#define I40E_QTX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL_TAIL_SHIFT)
+#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_MAPENA_MAX_INDEX 127
+#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0
+#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
+#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_QTABLE_MAX_INDEX 15
+#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0
+#define I40E_VPLAN_QTABLE_QINDEX_MASK I40E_MASK(0x7FF, I40E_VPLAN_QTABLE_QINDEX_SHIFT)
+#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
+#define I40E_VSILAN_QBASE_MAX_INDEX 383
+#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0
+#define I40E_VSILAN_QBASE_VSIBASE_MASK I40E_MASK(0x7FF, I40E_VSILAN_QBASE_VSIBASE_SHIFT)
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
+#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...7, _VSI=0...383 */ /* Reset: PFR */
+#define I40E_VSILAN_QTABLE_MAX_INDEX 7
+#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
+#define I40E_VSILAN_QTABLE_QINDEX_0_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
+#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
+#define I40E_VSILAN_QTABLE_QINDEX_1_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
+#define I40E_PRTGL_SAH 0x001E2140 /* Reset: GLOBR */
+#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0
+#define I40E_PRTGL_SAH_FC_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_FC_SAH_SHIFT)
+#define I40E_PRTGL_SAH_MFS_SHIFT 16
+#define I40E_PRTGL_SAH_MFS_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_MFS_SHIFT)
+#define I40E_PRTGL_SAL 0x001E2120 /* Reset: GLOBR */
+#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
+#define I40E_PRTGL_SAL_FC_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480 /* Reset: GLOBR */
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484 /* Reset: GLOBR */
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
+#define I40E_GL_FWRESETCNT 0x00083100 /* Reset: POR */
+#define I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT 0
+#define I40E_GL_FWRESETCNT_FWRESETCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT)
+#define I40E_GL_MNG_FWSM 0x000B6134 /* Reset: POR */
+#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0
+#define I40E_GL_MNG_FWSM_FW_MODES_MASK I40E_MASK(0x3, I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK I40E_MASK(0xF, I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
+#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16
+#define I40E_GL_MNG_FWSM_RESET_CNT_MASK I40E_MASK(0x7, I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK I40E_MASK(0x3F, I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_HWARB_CTRL 0x000B6130 /* Reset: POR */
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK I40E_MASK(0x1, I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
+#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */ /* Reset: POR */
+#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
+#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260 /* Reset: POR */
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7
+#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0
+#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
+#define I40E_PRT_MNG_MANC 0x00256A20 /* Reset: POR */
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19
+#define I40E_PRT_MNG_MANC_RCV_ALL_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26
+#define I40E_PRT_MNG_MANC_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
+#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7
+#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0
+#define I40E_PRT_MNG_MAVTV_VID_MASK I40E_MASK(0xFFF, I40E_PRT_MNG_MAVTV_VID_SHIFT)
+#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PRT_MNG_MDEF_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5
+#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29
+#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
+#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_METF_MAX_INDEX 3
+#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0
+#define I40E_PRT_MNG_METF_ETYPE_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_METF_ETYPE_SHIFT)
+#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30
+#define I40E_PRT_MNG_METF_POLARITY_MASK I40E_MASK(0x1, I40E_PRT_MNG_METF_POLARITY_SHIFT)
+#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
+#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
+#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16
+#define I40E_PRT_MNG_MFUTP_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_UDP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17
+#define I40E_PRT_MNG_MFUTP_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_TCP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
+#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3
+#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
+#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15
+#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_MMAH_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0
+#define I40E_PRT_MNG_MMAH_MMAH_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MMAH_MMAH_SHIFT)
+#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_MMAL_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0
+#define I40E_PRT_MNG_MMAL_MMAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MMAL_MMAL_SHIFT)
+#define I40E_PRT_MNG_MNGONLY 0x00256A60 /* Reset: POR */
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
+#define I40E_PRT_MNG_MSFM 0x00256AA0 /* Reset: POR */
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
+#define I40E_MSIX_PBA(_i) (0x00001000 + ((_i) * 4)) /* _i=0...5 */ /* Reset: FLR */
+#define I40E_MSIX_PBA_MAX_INDEX 5
+#define I40E_MSIX_PBA_PENBIT_SHIFT 0
+#define I40E_MSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_PBA_PENBIT_SHIFT)
+#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
+#define I40E_MSIX_TADD_MAX_INDEX 128
+#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_MSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_MSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_MSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_MSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
+#define I40E_MSIX_TMSG_MAX_INDEX 128
+#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_MSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
+#define I40E_MSIX_TUADD_MAX_INDEX 128
+#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_MSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
+#define I40E_MSIX_TVCTRL_MAX_INDEX 128
+#define I40E_MSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_MSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_MSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */
+#define I40E_VFMSIX_PBA1_MAX_INDEX 19
+#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA1_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD1_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TMSG1_MAX_INDEX 639
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TUADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
+#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
+#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0
+#define I40E_GLNVM_FLA_FL_SCK_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SCK_SHIFT)
+#define I40E_GLNVM_FLA_FL_CE_SHIFT 1
+#define I40E_GLNVM_FLA_FL_CE_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_CE_SHIFT)
+#define I40E_GLNVM_FLA_FL_SI_SHIFT 2
+#define I40E_GLNVM_FLA_FL_SI_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SI_SHIFT)
+#define I40E_GLNVM_FLA_FL_SO_SHIFT 3
+#define I40E_GLNVM_FLA_FL_SO_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SO_SHIFT)
+#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4
+#define I40E_GLNVM_FLA_FL_REQ_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_REQ_SHIFT)
+#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5
+#define I40E_GLNVM_FLA_FL_GNT_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_GNT_SHIFT)
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
+#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18
+#define I40E_GLNVM_FLA_FL_SADDR_MASK I40E_MASK(0x7FF, I40E_GLNVM_FLA_FL_SADDR_SHIFT)
+#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30
+#define I40E_GLNVM_FLA_FL_BUSY_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_BUSY_SHIFT)
+#define I40E_GLNVM_FLA_FL_DER_SHIFT 31
+#define I40E_GLNVM_FLA_FL_DER_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_DER_SHIFT)
+#define I40E_GLNVM_FLASHID 0x000B6104 /* Reset: POR */
+#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0
+#define I40E_GLNVM_FLASHID_FLASHID_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_FLASHID_FLASHID_SHIFT)
+#define I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT 31
+#define I40E_GLNVM_FLASHID_FLEEP_PERF_MASK I40E_MASK(0x1, I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT)
+#define I40E_GLNVM_GENS 0x000B6100 /* Reset: POR */
+#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0
+#define I40E_GLNVM_GENS_NVM_PRES_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_NVM_PRES_SHIFT)
+#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5
+#define I40E_GLNVM_GENS_SR_SIZE_MASK I40E_MASK(0x7, I40E_GLNVM_GENS_SR_SIZE_SHIFT)
+#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8
+#define I40E_GLNVM_GENS_BANK1VAL_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_BANK1VAL_SHIFT)
+#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23
+#define I40E_GLNVM_GENS_ALT_PRST_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_ALT_PRST_SHIFT)
+#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25
+#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
+#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset: POR */
+#define I40E_GLNVM_PROTCSR_MAX_INDEX 59
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
+#define I40E_GLNVM_SRCTL 0x000B6110 /* Reset: POR */
+#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0
+#define I40E_GLNVM_SRCTL_SRBUSY_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
+#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14
+#define I40E_GLNVM_SRCTL_ADDR_MASK I40E_MASK(0x7FFF, I40E_GLNVM_SRCTL_ADDR_SHIFT)
+#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29
+#define I40E_GLNVM_SRCTL_WRITE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_WRITE_SHIFT)
+#define I40E_GLNVM_SRCTL_START_SHIFT 30
+#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT)
+#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
+#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */
+#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
+#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT)
+#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
+#define I40E_GLNVM_SRDATA_RDDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_RDDATA_SHIFT)
+#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
+#define I40E_GLPCI_BYTCTH 0x0009C484 /* Reset: PCIR */
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_BYTCTL 0x0009C488 /* Reset: PCIR */
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_CAPCTRL 0x000BE4A4 /* Reset: PCIR */
+#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0
+#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP 0x000BE4A8 /* Reset: PCIR */
+#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0
+#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
+#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2
+#define I40E_GLPCI_CAPSUP_LTR_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3
+#define I40E_GLPCI_CAPSUP_TPH_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4
+#define I40E_GLPCI_CAPSUP_ARI_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5
+#define I40E_GLPCI_CAPSUP_IOV_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6
+#define I40E_GLPCI_CAPSUP_ACS_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7
+#define I40E_GLPCI_CAPSUP_SEC_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18
+#define I40E_GLPCI_CAPSUP_IDO_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19
+#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
+#define I40E_GLPCI_CNF 0x000BE4C0 /* Reset: POR */
+#define I40E_GLPCI_CNF_FLEX10_SHIFT 1
+#define I40E_GLPCI_CNF_FLEX10_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_FLEX10_SHIFT)
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
+#define I40E_GLPCI_CNF2 0x000BE494 /* Reset: PCIR */
+#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0
+#define I40E_GLPCI_CNF2_RO_DIS_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_RO_DIS_SHIFT)
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
+#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
+#define I40E_GLPCI_GSCL_1 0x0009C48C /* Reset: PCIR */
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
+#define I40E_GLPCI_GSCL_2 0x0009C490 /* Reset: PCIR */
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
+#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
+#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
+#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
+#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
+#define I40E_GLPCI_LATCT 0x0009C4B4 /* Reset: PCIR */
+#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0
+#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT)
+#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
+#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
+#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
+#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1
+#define I40E_GLPCI_LBARCTRL_BAR32_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_RSVD_4_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_RSVD_10_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT)
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
+#define I40E_GLPCI_LINKCAP 0x000BE4AC /* Reset: PCIR */
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK I40E_MASK(0x3F, I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK I40E_MASK(0x7, I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK I40E_MASK(0xF, I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
+#define I40E_GLPCI_PCIERR 0x000BE4FC /* Reset: PCIR */
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
+#define I40E_GLPCI_PKTCT 0x0009C4BC /* Reset: PCIR */
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4 /* Reset: PCIR */
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0 /* Reset: PCIR */
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PMSUP 0x000BE4B0 /* Reset: PCIR */
+#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0
+#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14
+#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK I40E_MASK(0x1, I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
+#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15
+#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC /* Reset: PCIR */
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
+#define I40E_GLPCI_PWRDATA 0x000BE490 /* Reset: PCIR */
+#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0
+#define I40E_GLPCI_PWRDATA_D0_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8
+#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16
+#define I40E_GLPCI_PWRDATA_D3_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK I40E_MASK(0x3, I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
+#define I40E_GLPCI_REVID 0x000BE4B4 /* Reset: PCIR */
+#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0
+#define I40E_GLPCI_REVID_NVM_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_REVID_NVM_REVID_SHIFT)
+#define I40E_GLPCI_SERH 0x000BE49C /* Reset: PCIR */
+#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0
+#define I40E_GLPCI_SERH_SER_NUM_H_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
+#define I40E_GLPCI_SERL 0x000BE498 /* Reset: PCIR */
+#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0
+#define I40E_GLPCI_SERL_SER_NUM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8 /* Reset: PCIR */
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC /* Reset: PCIR */
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SUBVENID 0x000BE48C /* Reset: PCIR */
+#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT 0
+#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT)
+#define I40E_GLPCI_UPADD 0x000BE4F8 /* Reset: PCIR */
+#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1
+#define I40E_GLPCI_UPADD_ADDRESS_MASK I40E_MASK(0x7FFFFFFF, I40E_GLPCI_UPADD_ADDRESS_SHIFT)
+#define I40E_GLPCI_VENDORID 0x000BE518 /* Reset: PCIR */
+#define I40E_GLPCI_VENDORID_VENDORID_SHIFT 0
+#define I40E_GLPCI_VENDORID_VENDORID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_VENDORID_VENDORID_SHIFT)
+#define I40E_GLPCI_VFSUP 0x000BE4B8 /* Reset: PCIR */
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
+#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK I40E_MASK(0x1F, I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8
+#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK I40E_MASK(0xFF, I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
+#define I40E_PF_PCI_CIAA 0x0009C080 /* Reset: FLR */
+#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0
+#define I40E_PF_PCI_CIAA_ADDRESS_MASK I40E_MASK(0xFFF, I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
+#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
+#define I40E_PF_PCI_CIAA_VF_NUM_MASK I40E_MASK(0x7F, I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
+#define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */
+#define I40E_PF_PCI_CIAD_DATA_SHIFT 0
+#define I40E_PF_PCI_CIAD_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_PCI_CIAD_DATA_SHIFT)
+#define I40E_PFPCI_CLASS 0x000BE400 /* Reset: PCIR */
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
+#define I40E_PFPCI_CLASS_RESERVED_1_SHIFT 1
+#define I40E_PFPCI_CLASS_RESERVED_1_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_RESERVED_1_SHIFT)
+#define I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT 2
+#define I40E_PFPCI_CLASS_PF_IS_LAN_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT)
+#define I40E_PFPCI_CNF 0x000BE000 /* Reset: PCIR */
+#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2
+#define I40E_PFPCI_CNF_MSI_EN_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_MSI_EN_SHIFT)
+#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3
+#define I40E_PFPCI_CNF_EXROM_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
+#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4
+#define I40E_PFPCI_CNF_IO_BAR_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_IO_BAR_SHIFT)
+#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5
+#define I40E_PFPCI_CNF_INT_PIN_MASK I40E_MASK(0x3, I40E_PFPCI_CNF_INT_PIN_SHIFT)
+#define I40E_PFPCI_DEVID 0x000BE080 /* Reset: PCIR */
+#define I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT 0
+#define I40E_PFPCI_DEVID_PF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT)
+#define I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT 16
+#define I40E_PFPCI_DEVID_VF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT)
+#define I40E_PFPCI_FACTPS 0x0009C180 /* Reset: FLR */
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK I40E_MASK(0x3, I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK I40E_MASK(0x1, I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
+#define I40E_PFPCI_FUNC 0x000BE200 /* Reset: POR */
+#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
+#define I40E_PFPCI_FUNC2 0x000BE180 /* Reset: PCIR */
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_ICAUSE 0x0009C200 /* Reset: PFR */
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
+#define I40E_PFPCI_IENA 0x0009C280 /* Reset: PFR */
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
+#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800 /* Reset: PCIR */
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_PM 0x000BE300 /* Reset: POR */
+#define I40E_PFPCI_PM_PME_EN_SHIFT 0
+#define I40E_PFPCI_PM_PME_EN_MASK I40E_MASK(0x1, I40E_PFPCI_PM_PME_EN_SHIFT)
+#define I40E_PFPCI_STATUS1 0x000BE280 /* Reset: POR */
+#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0
+#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK I40E_MASK(0x1, I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
+#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */
+#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT 0
+#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT)
+#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT 16
+#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE 0x0000E400 /* Reset: PCIR */
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: PCIR */
+#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880 /* Reset: PCIR */
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VMINDEX 0x0009C300 /* Reset: PCIR */
+#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0
+#define I40E_PFPCI_VMINDEX_VMINDEX_MASK I40E_MASK(0x1FF, I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
+#define I40E_PFPCI_VMPEND 0x0009C380 /* Reset: PCIR */
+#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0
+#define I40E_PFPCI_VMPEND_PENDING_MASK I40E_MASK(0x1, I40E_PFPCI_VMPEND_PENDING_SHIFT)
+#define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEEC 0x001E4380 /* Reset: GLOBR */
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK I40E_MASK(0x3, I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
+#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26
+#define I40E_PRTPM_EEEC_TEEE_DLY_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
+#define I40E_PRTPM_EEEFWD 0x001E4400 /* Reset: GLOBR */
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK I40E_MASK(0x1, I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
+#define I40E_PRTPM_EEER 0x001E4360 /* Reset: GLOBR */
+#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0
+#define I40E_PRTPM_EEER_TW_SYSTEM_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
+#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
+#define I40E_PRTPM_EEER_TX_LPI_EN_MASK I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
+#define I40E_PRTPM_EEETXC 0x001E43E0 /* Reset: GLOBR */
+#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0
+#define I40E_PRTPM_EEETXC_TW_PHY_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
+#define I40E_PRTPM_GC 0x000B8140 /* Reset: POR */
+#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0
+#define I40E_PRTPM_GC_EMP_LINK_ON_MASK I40E_MASK(0x1, I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
+#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1
+#define I40E_PRTPM_GC_MNG_VETO_MASK I40E_MASK(0x1, I40E_PRTPM_GC_MNG_VETO_SHIFT)
+#define I40E_PRTPM_GC_RATD_SHIFT 2
+#define I40E_PRTPM_GC_RATD_MASK I40E_MASK(0x1, I40E_PRTPM_GC_RATD_SHIFT)
+#define I40E_PRTPM_GC_LCDMP_SHIFT 3
+#define I40E_PRTPM_GC_LCDMP_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LCDMP_SHIFT)
+#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
+#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
+#define I40E_PRTPM_RLPIC 0x001E43A0 /* Reset: GLOBR */
+#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
+#define I40E_PRTPM_RLPIC_ERLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
+#define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */
+#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
+#define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
+#define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */
+#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
+#define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
+#define I40E_GLRPB_GHW 0x000AC830 /* Reset: CORER */
+#define I40E_GLRPB_GHW_GHW_SHIFT 0
+#define I40E_GLRPB_GHW_GHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GHW_GHW_SHIFT)
+#define I40E_GLRPB_GLW 0x000AC834 /* Reset: CORER */
+#define I40E_GLRPB_GLW_GLW_SHIFT 0
+#define I40E_GLRPB_GLW_GLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GLW_GLW_SHIFT)
+#define I40E_GLRPB_PHW 0x000AC844 /* Reset: CORER */
+#define I40E_GLRPB_PHW_PHW_SHIFT 0
+#define I40E_GLRPB_PHW_PHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PHW_PHW_SHIFT)
+#define I40E_GLRPB_PLW 0x000AC848 /* Reset: CORER */
+#define I40E_GLRPB_PLW_PLW_SHIFT 0
+#define I40E_GLRPB_PLW_PLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PLW_PLW_SHIFT)
+#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DHW_MAX_INDEX 7
+#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
+#define I40E_PRTRPB_DHW_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
+#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DLW_MAX_INDEX 7
+#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
+#define I40E_PRTRPB_DLW_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
+#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DPS_MAX_INDEX 7
+#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
+#define I40E_PRTRPB_DPS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
+#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_SHT_MAX_INDEX 7
+#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
+#define I40E_PRTRPB_SHT_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
+#define I40E_PRTRPB_SHW 0x000AC580 /* Reset: CORER */
+#define I40E_PRTRPB_SHW_SHW_SHIFT 0
+#define I40E_PRTRPB_SHW_SHW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT)
+#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_SLT_MAX_INDEX 7
+#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
+#define I40E_PRTRPB_SLT_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
+#define I40E_PRTRPB_SLW 0x000AC6A0 /* Reset: CORER */
+#define I40E_PRTRPB_SLW_SLW_SHIFT 0
+#define I40E_PRTRPB_SLW_SLW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT)
+#define I40E_PRTRPB_SPS 0x000AC7C0 /* Reset: CORER */
+#define I40E_PRTRPB_SPS_SPS_SHIFT 0
+#define I40E_PRTRPB_SPS_SPS_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT)
+#define I40E_GLQF_CTL 0x00269BA4 /* Reset: CORER */
+#define I40E_GLQF_CTL_HTOEP_SHIFT 1
+#define I40E_GLQF_CTL_HTOEP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_SHIFT)
+#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2
+#define I40E_GLQF_CTL_HTOEP_FCOE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
+#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
+#define I40E_GLQF_CTL_PCNT_ALLOC_MASK I40E_MASK(0x7, I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
+#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT 6
+#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT)
+#define I40E_GLQF_CTL_RSVD_SHIFT 7
+#define I40E_GLQF_CTL_RSVD_MASK I40E_MASK(0x1, I40E_GLQF_CTL_RSVD_SHIFT)
+#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
+#define I40E_GLQF_CTL_MAXPEBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
+#define I40E_GLQF_CTL_MAXFCBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14
+#define I40E_GLQF_CTL_MAXFDBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
+#define I40E_GLQF_CTL_FDBEST_SHIFT 17
+#define I40E_GLQF_CTL_FDBEST_MASK I40E_MASK(0xFF, I40E_GLQF_CTL_FDBEST_SHIFT)
+#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25
+#define I40E_GLQF_CTL_PROGPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_PROGPRIO_SHIFT)
+#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26
+#define I40E_GLQF_CTL_INVALPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_INVALPRIO_SHIFT)
+#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27
+#define I40E_GLQF_CTL_IGNORE_IP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_IGNORE_IP_SHIFT)
+#define I40E_GLQF_FDCNT_0 0x00269BAC /* Reset: CORER */
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
+#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13
+#define I40E_GLQF_FDCNT_0_BESTCNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
+#define I40E_GLQF_HKEY(_i) (0x00270140 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
+#define I40E_GLQF_HKEY_MAX_INDEX 12
+#define I40E_GLQF_HKEY_KEY_0_SHIFT 0
+#define I40E_GLQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_0_SHIFT)
+#define I40E_GLQF_HKEY_KEY_1_SHIFT 8
+#define I40E_GLQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_1_SHIFT)
+#define I40E_GLQF_HKEY_KEY_2_SHIFT 16
+#define I40E_GLQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_2_SHIFT)
+#define I40E_GLQF_HKEY_KEY_3_SHIFT 24
+#define I40E_GLQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_3_SHIFT)
+#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_HSYM_MAX_INDEX 63
+#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0
+#define I40E_GLQF_HSYM_SYMH_ENA_MASK I40E_MASK(0x1, I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
+#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */ /* Reset: CORER */
+#define I40E_GLQF_PCNT_MAX_INDEX 511
+#define I40E_GLQF_PCNT_PCNT_SHIFT 0
+#define I40E_GLQF_PCNT_PCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_PCNT_PCNT_SHIFT)
+#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_SWAP_MAX_INDEX 1
+#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0
+#define I40E_GLQF_SWAP_OFF0_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6
+#define I40E_GLQF_SWAP_OFF0_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN0_SHIFT 12
+#define I40E_GLQF_SWAP_FLEN0_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16
+#define I40E_GLQF_SWAP_OFF1_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22
+#define I40E_GLQF_SWAP_OFF1_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN1_SHIFT 28
+#define I40E_GLQF_SWAP_FLEN1_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN1_SHIFT)
+#define I40E_PFQF_CTL_0 0x001C0AC0 /* Reset: CORER */
+#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_0_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_0_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10
+#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14
+#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17
+#define I40E_PFQF_CTL_0_FD_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_FD_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18
+#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20
+#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24
+#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_1 0x00245D80 /* Reset: CORER */
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
+#define I40E_PFQF_FDALLOC 0x00246280 /* Reset: CORER */
+#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0
+#define I40E_PFQF_FDALLOC_FDALLOC_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
+#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8
+#define I40E_PFQF_FDALLOC_FDBEST_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDBEST_SHIFT)
+#define I40E_PFQF_FDSTAT 0x00246380 /* Reset: CORER */
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
+#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16
+#define I40E_PFQF_FDSTAT_BEST_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
+#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_PFQF_HENA_MAX_INDEX 1
+#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_PFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */ /* Reset: CORER */
+#define I40E_PFQF_HKEY_MAX_INDEX 12
+#define I40E_PFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_PFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_0_SHIFT)
+#define I40E_PFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_PFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_1_SHIFT)
+#define I40E_PFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_PFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_2_SHIFT)
+#define I40E_PFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_PFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_3_SHIFT)
+#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_PFQF_HLUT_MAX_INDEX 127
+#define I40E_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_PFQF_HLUT_LUT0_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_PFQF_HLUT_LUT1_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_PFQF_HLUT_LUT2_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_PFQF_HLUT_LUT3_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PRTQF_CTL_0 0x00256E60 /* Reset: CORER */
+#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0
+#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK I40E_MASK(0x1, I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
+#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */ /* Reset: CORER */
+#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
+#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
+#define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
+#define I40E_PRTQF_FD_MSK_MAX_INDEX 63
+#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
+#define I40E_PRTQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRTQF_FD_MSK_MASK_SHIFT)
+#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16
+#define I40E_PRTQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
+#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */ /* Reset: CORER */
+#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5
+#define I40E_PRTQF_FLX_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
+#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...1, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HENA1_MAX_INDEX 1
+#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA1_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HKEY1_MAX_INDEX 12
+#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY1_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY1_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY1_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY1_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HLUT1_MAX_INDEX 15
+#define I40E_VFQF_HLUT1_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT1_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT0_SHIFT)
+#define I40E_VFQF_HLUT1_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT1_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT1_SHIFT)
+#define I40E_VFQF_HLUT1_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT1_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT2_SHIFT)
+#define I40E_VFQF_HLUT1_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT1_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT3_SHIFT)
+#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...7, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HREGION1_MAX_INDEX 7
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION1_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION1_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION1_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION1_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION1_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION1_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION1_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION1_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_7_SHIFT)
+#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPQF_CTL_MAX_INDEX 127
+#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0
+#define I40E_VPQF_CTL_PEHSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEHSIZE_SHIFT)
+#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5
+#define I40E_VPQF_CTL_PEDSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEDSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10
+#define I40E_VPQF_CTL_FCHSIZE_MASK I40E_MASK(0xF, I40E_VPQF_CTL_FCHSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14
+#define I40E_VPQF_CTL_FCDSIZE_MASK I40E_MASK(0x3, I40E_VPQF_CTL_FCDSIZE_SHIFT)
+#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
+#define I40E_VSIQF_CTL_MAX_INDEX 383
+#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0
+#define I40E_VSIQF_CTL_FCOE_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1
+#define I40E_VSIQF_CTL_PETCP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2
+#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3
+#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
+#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...3, _VSI=0...383 */ /* Reset: PFR */
+#define I40E_VSIQF_TCREGION_MAX_INDEX 3
+#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
+#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
+#define I40E_VSIQF_TCREGION_TC_SIZE_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25
+#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
+#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOECRC_MAX_INDEX 143
+#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0
+#define I40E_GL_FCOECRC_FCOECRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOECRC_FCOECRC_SHIFT)
+#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDDPC_MAX_INDEX 143
+#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
+#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
+#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDIFEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
+#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDIXEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
+#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDIXVC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
+#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDWRCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
+#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDWRCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
+#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDWTCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
+#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDWTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
+#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOELAST_MAX_INDEX 143
+#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0
+#define I40E_GL_FCOELAST_FCOELAST_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOELAST_FCOELAST_SHIFT)
+#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEPRC_MAX_INDEX 143
+#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0
+#define I40E_GL_FCOEPRC_FCOEPRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
+#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEPTC_MAX_INDEX 143
+#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0
+#define I40E_GL_FCOEPTC_FCOEPTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
+#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOERPDC_MAX_INDEX 143
+#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
+#define I40E_GL_FCOERPDC_FCOERPDC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
+#define I40E_GL_RXERR1_L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1_L_MAX_INDEX 143
+#define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0
+#define I40E_GL_RXERR1_L_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT)
+#define I40E_GL_RXERR2_L(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR2_L_MAX_INDEX 143
+#define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0
+#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT)
+#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPRCH_MAX_INDEX 3
+#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPRCL_MAX_INDEX 3
+#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPTCH_MAX_INDEX 3
+#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPTCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPTCL_MAX_INDEX 3
+#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPTCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_UPRCH_SHIFT)
+#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_CRCERRS_MAX_INDEX 3
+#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
+#define I40E_GLPRT_CRCERRS_CRCERRS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
+#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GORCH_MAX_INDEX 3
+#define I40E_GLPRT_GORCH_GORCH_SHIFT 0
+#define I40E_GLPRT_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GORCH_GORCH_SHIFT)
+#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GORCL_MAX_INDEX 3
+#define I40E_GLPRT_GORCL_GORCL_SHIFT 0
+#define I40E_GLPRT_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GORCL_GORCL_SHIFT)
+#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GOTCH_MAX_INDEX 3
+#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLPRT_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GOTCH_GOTCH_SHIFT)
+#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GOTCL_MAX_INDEX 3
+#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLPRT_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GOTCL_GOTCL_SHIFT)
+#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_ILLERRC_MAX_INDEX 3
+#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0
+#define I40E_GLPRT_ILLERRC_ILLERRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
+#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LDPC_MAX_INDEX 3
+#define I40E_GLPRT_LDPC_LDPC_SHIFT 0
+#define I40E_GLPRT_LDPC_LDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LDPC_LDPC_SHIFT)
+#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
+#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
+#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0
+#define I40E_GLPRT_LXONTXC_LXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
+#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MLFC_MAX_INDEX 3
+#define I40E_GLPRT_MLFC_MLFC_SHIFT 0
+#define I40E_GLPRT_MLFC_MLFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MLFC_MLFC_SHIFT)
+#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPRCH_MAX_INDEX 3
+#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLPRT_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPRCH_MPRCH_SHIFT)
+#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPRCL_MAX_INDEX 3
+#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLPRT_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPRCL_MPRCL_SHIFT)
+#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPTCH_MAX_INDEX 3
+#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLPRT_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPTCH_MPTCH_SHIFT)
+#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPTCL_MAX_INDEX 3
+#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLPRT_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPTCL_MPTCL_SHIFT)
+#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MRFC_MAX_INDEX 3
+#define I40E_GLPRT_MRFC_MRFC_SHIFT 0
+#define I40E_GLPRT_MRFC_MRFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MRFC_MRFC_SHIFT)
+#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0
+#define I40E_GLPRT_PRC1023H_PRC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
+#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0
+#define I40E_GLPRT_PRC1023L_PRC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
+#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC127H_MAX_INDEX 3
+#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0
+#define I40E_GLPRT_PRC127H_PRC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC127H_PRC127H_SHIFT)
+#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC127L_MAX_INDEX 3
+#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0
+#define I40E_GLPRT_PRC127L_PRC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC127L_PRC127L_SHIFT)
+#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC1522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC1522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC255H_MAX_INDEX 3
+#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0
+#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
+#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC255L_MAX_INDEX 3
+#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0
+#define I40E_GLPRT_PRC255L_PRC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC255L_PRC255L_SHIFT)
+#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC511H_MAX_INDEX 3
+#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0
+#define I40E_GLPRT_PRC511H_PRC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC511H_PRC511H_SHIFT)
+#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC511L_MAX_INDEX 3
+#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0
+#define I40E_GLPRT_PRC511L_PRC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC511L_PRC511L_SHIFT)
+#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC64H_MAX_INDEX 3
+#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0
+#define I40E_GLPRT_PRC64H_PRC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC64H_PRC64H_SHIFT)
+#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC64L_MAX_INDEX 3
+#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0
+#define I40E_GLPRT_PRC64L_PRC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC64L_PRC64L_SHIFT)
+#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC9522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC9522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0
+#define I40E_GLPRT_PTC1023H_PTC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
+#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0
+#define I40E_GLPRT_PTC1023L_PTC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
+#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC127H_MAX_INDEX 3
+#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0
+#define I40E_GLPRT_PTC127H_PTC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC127H_PTC127H_SHIFT)
+#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC127L_MAX_INDEX 3
+#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0
+#define I40E_GLPRT_PTC127L_PTC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC127L_PTC127L_SHIFT)
+#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0
+#define I40E_GLPRT_PTC1522H_PTC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
+#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0
+#define I40E_GLPRT_PTC1522L_PTC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
+#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC255H_MAX_INDEX 3
+#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0
+#define I40E_GLPRT_PTC255H_PTC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC255H_PTC255H_SHIFT)
+#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC255L_MAX_INDEX 3
+#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0
+#define I40E_GLPRT_PTC255L_PTC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC255L_PTC255L_SHIFT)
+#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC511H_MAX_INDEX 3
+#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0
+#define I40E_GLPRT_PTC511H_PTC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC511H_PTC511H_SHIFT)
+#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC511L_MAX_INDEX 3
+#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0
+#define I40E_GLPRT_PTC511L_PTC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC511L_PTC511L_SHIFT)
+#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC64H_MAX_INDEX 3
+#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0
+#define I40E_GLPRT_PTC64H_PTC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC64H_PTC64H_SHIFT)
+#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC64L_MAX_INDEX 3
+#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0
+#define I40E_GLPRT_PTC64L_PTC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC64L_PTC64L_SHIFT)
+#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0
+#define I40E_GLPRT_PTC9522H_PTC9522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
+#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0
+#define I40E_GLPRT_PTC9522L_PTC9522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
+#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
+#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
+#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
+#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RDPC_MAX_INDEX 3
+#define I40E_GLPRT_RDPC_RDPC_SHIFT 0
+#define I40E_GLPRT_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RDPC_RDPC_SHIFT)
+#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RFC_MAX_INDEX 3
+#define I40E_GLPRT_RFC_RFC_SHIFT 0
+#define I40E_GLPRT_RFC_RFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RFC_RFC_SHIFT)
+#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RJC_MAX_INDEX 3
+#define I40E_GLPRT_RJC_RJC_SHIFT 0
+#define I40E_GLPRT_RJC_RJC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RJC_RJC_SHIFT)
+#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RLEC_MAX_INDEX 3
+#define I40E_GLPRT_RLEC_RLEC_SHIFT 0
+#define I40E_GLPRT_RLEC_RLEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RLEC_RLEC_SHIFT)
+#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_ROC_MAX_INDEX 3
+#define I40E_GLPRT_ROC_ROC_SHIFT 0
+#define I40E_GLPRT_ROC_ROC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ROC_ROC_SHIFT)
+#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RUC_MAX_INDEX 3
+#define I40E_GLPRT_RUC_RUC_SHIFT 0
+#define I40E_GLPRT_RUC_RUC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUC_RUC_SHIFT)
+#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RUPP_MAX_INDEX 3
+#define I40E_GLPRT_RUPP_RUPP_SHIFT 0
+#define I40E_GLPRT_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUPP_RUPP_SHIFT)
+#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
+#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_TDOLD_MAX_INDEX 3
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
+#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_TDPC_MAX_INDEX 3
+#define I40E_GLPRT_TDPC_TDPC_SHIFT 0
+#define I40E_GLPRT_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDPC_TDPC_SHIFT)
+#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPRCH_MAX_INDEX 3
+#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPRCL_MAX_INDEX 3
+#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLPRT_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPRCL_UPRCL_SHIFT)
+#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPTCH_MAX_INDEX 3
+#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPTCH_UPTCH_SHIFT)
+#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPTCL_MAX_INDEX 3
+#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCL_VUPTCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
+#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPRCH_MAX_INDEX 15
+#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLSW_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPRCH_BPRCH_SHIFT)
+#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPRCL_MAX_INDEX 15
+#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLSW_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPRCL_BPRCL_SHIFT)
+#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPTCH_MAX_INDEX 15
+#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLSW_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPTCH_BPTCH_SHIFT)
+#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPTCL_MAX_INDEX 15
+#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLSW_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPTCL_BPTCL_SHIFT)
+#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GORCH_MAX_INDEX 15
+#define I40E_GLSW_GORCH_GORCH_SHIFT 0
+#define I40E_GLSW_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GORCH_GORCH_SHIFT)
+#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GORCL_MAX_INDEX 15
+#define I40E_GLSW_GORCL_GORCL_SHIFT 0
+#define I40E_GLSW_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GORCL_GORCL_SHIFT)
+#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GOTCH_MAX_INDEX 15
+#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLSW_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GOTCH_GOTCH_SHIFT)
+#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GOTCL_MAX_INDEX 15
+#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLSW_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GOTCL_GOTCL_SHIFT)
+#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPRCH_MAX_INDEX 15
+#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLSW_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPRCH_MPRCH_SHIFT)
+#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPRCL_MAX_INDEX 15
+#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLSW_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPRCL_MPRCL_SHIFT)
+#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPTCH_MAX_INDEX 15
+#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLSW_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPTCH_MPTCH_SHIFT)
+#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPTCL_MAX_INDEX 15
+#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLSW_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPTCL_MPTCL_SHIFT)
+#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_RUPP_MAX_INDEX 15
+#define I40E_GLSW_RUPP_RUPP_SHIFT 0
+#define I40E_GLSW_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_RUPP_RUPP_SHIFT)
+#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_TDPC_MAX_INDEX 15
+#define I40E_GLSW_TDPC_TDPC_SHIFT 0
+#define I40E_GLSW_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_TDPC_TDPC_SHIFT)
+#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPRCH_MAX_INDEX 15
+#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLSW_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPRCH_UPRCH_SHIFT)
+#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPRCL_MAX_INDEX 15
+#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLSW_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPRCL_UPRCL_SHIFT)
+#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPTCH_MAX_INDEX 15
+#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLSW_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPTCH_UPTCH_SHIFT)
+#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPTCL_MAX_INDEX 15
+#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLSW_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPTCL_UPTCL_SHIFT)
+#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPRCH_MAX_INDEX 383
+#define I40E_GLV_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLV_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPRCH_BPRCH_SHIFT)
+#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPRCL_MAX_INDEX 383
+#define I40E_GLV_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLV_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPRCL_BPRCL_SHIFT)
+#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPTCH_MAX_INDEX 383
+#define I40E_GLV_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLV_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPTCH_BPTCH_SHIFT)
+#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPTCL_MAX_INDEX 383
+#define I40E_GLV_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLV_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPTCL_BPTCL_SHIFT)
+#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GORCH_MAX_INDEX 383
+#define I40E_GLV_GORCH_GORCH_SHIFT 0
+#define I40E_GLV_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GORCH_GORCH_SHIFT)
+#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GORCL_MAX_INDEX 383
+#define I40E_GLV_GORCL_GORCL_SHIFT 0
+#define I40E_GLV_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GORCL_GORCL_SHIFT)
+#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GOTCH_MAX_INDEX 383
+#define I40E_GLV_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLV_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GOTCH_GOTCH_SHIFT)
+#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GOTCL_MAX_INDEX 383
+#define I40E_GLV_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLV_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GOTCL_GOTCL_SHIFT)
+#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPRCH_MAX_INDEX 383
+#define I40E_GLV_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLV_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPRCH_MPRCH_SHIFT)
+#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPRCL_MAX_INDEX 383
+#define I40E_GLV_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLV_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPRCL_MPRCL_SHIFT)
+#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPTCH_MAX_INDEX 383
+#define I40E_GLV_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLV_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPTCH_MPTCH_SHIFT)
+#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPTCL_MAX_INDEX 383
+#define I40E_GLV_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLV_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPTCL_MPTCL_SHIFT)
+#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_RDPC_MAX_INDEX 383
+#define I40E_GLV_RDPC_RDPC_SHIFT 0
+#define I40E_GLV_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RDPC_RDPC_SHIFT)
+#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_RUPP_MAX_INDEX 383
+#define I40E_GLV_RUPP_RUPP_SHIFT 0
+#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT)
+#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_TEPC_MAX_INDEX 383
+#define I40E_GLV_TEPC_TEPC_SHIFT 0
+#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT)
+#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPRCH_MAX_INDEX 383
+#define I40E_GLV_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLV_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPRCH_UPRCH_SHIFT)
+#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPRCL_MAX_INDEX 383
+#define I40E_GLV_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLV_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPRCL_UPRCL_SHIFT)
+#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPTCH_MAX_INDEX 383
+#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0
+#define I40E_GLV_UPTCH_GLVUPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
+#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPTCL_MAX_INDEX 383
+#define I40E_GLV_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLV_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPTCL_UPTCL_SHIFT)
+#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_RBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_RBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_RPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_RPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
+#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_TBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_TBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_TPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_TPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
+#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_BPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0
+#define I40E_GLVEBVL_BPCH_VLBPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
+#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_BPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0
+#define I40E_GLVEBVL_BPCL_VLBPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
+#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_GORCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GORCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_GORCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GORCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GOTCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GOTCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_MPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0
+#define I40E_GLVEBVL_MPCH_VLMPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
+#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_MPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0
+#define I40E_GLVEBVL_MPCL_VLMPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
+#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_UPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0
+#define I40E_GLVEBVL_UPCH_VLUPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
+#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_UPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0
+#define I40E_GLVEBVL_UPCL_VLUPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C /* Reset: CORER */
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK I40E_MASK(0xFFFF, I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
+#define I40E_GL_SWR_DEF_ACT(_i) (0x00270200 + ((_i) * 4)) /* _i=0...35 */ /* Reset: CORER */
+#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 35
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
+#define I40E_GL_SWR_DEF_ACT_EN(_i) (0x0026CFB8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GL_SWR_DEF_ACT_EN_MAX_INDEX 1
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
+#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */
+#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0
+#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK I40E_MASK(0x7FFFFFFF, I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
+#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31
+#define I40E_PRTTSYN_ADJ_SIGN_MASK I40E_MASK(0x1, I40E_PRTTSYN_ADJ_SIGN_SHIFT)
+#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1
+#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3
+#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8
+#define I40E_PRTTSYN_AUX_0_PULSEW_MASK I40E_MASK(0xF, I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
+#define I40E_PRTTSYN_AUX_1_INSTNT_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
+#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_CLKO_MAX_INDEX 1
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
+#define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
+#define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12
+#define I40E_PRTTSYN_CTL0_TSYNACT_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL0_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_CTL1 0x00085020 /* Reset: CORER */
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26
+#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL1_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
+#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
+#define I40E_PRTTSYN_INC_H 0x001E4060 /* Reset: GLOBR */
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK I40E_MASK(0x3F, I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
+#define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
+#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
+#define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */
+#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_0_EVENT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2
+#define I40E_PRTTSYN_STAT_0_TGT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3
+#define I40E_PRTTSYN_STAT_0_TGT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
+#define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
+#define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */
+#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_1_RXT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_1_RXT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2
+#define I40E_PRTTSYN_STAT_1_RXT2_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3
+#define I40E_PRTTSYN_STAT_1_RXT3_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
+#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
+#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
+#define I40E_PRTTSYN_TIME_H 0x001E4120 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
+#define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
+#define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
+#define I40E_GLSCD_QUANTA 0x000B2080 /* Reset: CORER */
+#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0
+#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK I40E_MASK(0x7, I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT)
+#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */
+#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
+#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_RX_EVENT_SHIFT 8
+#define I40E_GL_MDET_RX_EVENT_MASK I40E_MASK(0x1FF, I40E_GL_MDET_RX_EVENT_SHIFT)
+#define I40E_GL_MDET_RX_QUEUE_SHIFT 17
+#define I40E_GL_MDET_RX_QUEUE_MASK I40E_MASK(0x3FFF, I40E_GL_MDET_RX_QUEUE_SHIFT)
+#define I40E_GL_MDET_RX_VALID_SHIFT 31
+#define I40E_GL_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_RX_VALID_SHIFT)
+#define I40E_GL_MDET_TX 0x000E6480 /* Reset: CORER */
+#define I40E_GL_MDET_TX_QUEUE_SHIFT 0
+#define I40E_GL_MDET_TX_QUEUE_MASK I40E_MASK(0xFFF, I40E_GL_MDET_TX_QUEUE_SHIFT)
+#define I40E_GL_MDET_TX_VF_NUM_SHIFT 12
+#define I40E_GL_MDET_TX_VF_NUM_MASK I40E_MASK(0x1FF, I40E_GL_MDET_TX_VF_NUM_SHIFT)
+#define I40E_GL_MDET_TX_PF_NUM_SHIFT 21
+#define I40E_GL_MDET_TX_PF_NUM_MASK I40E_MASK(0xF, I40E_GL_MDET_TX_PF_NUM_SHIFT)
+#define I40E_GL_MDET_TX_EVENT_SHIFT 25
+#define I40E_GL_MDET_TX_EVENT_MASK I40E_MASK(0x1F, I40E_GL_MDET_TX_EVENT_SHIFT)
+#define I40E_GL_MDET_TX_VALID_SHIFT 31
+#define I40E_GL_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_TX_VALID_SHIFT)
+#define I40E_PF_MDET_RX 0x0012A400 /* Reset: CORER */
+#define I40E_PF_MDET_RX_VALID_SHIFT 0
+#define I40E_PF_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_RX_VALID_SHIFT)
+#define I40E_PF_MDET_TX 0x000E6400 /* Reset: CORER */
+#define I40E_PF_MDET_TX_VALID_SHIFT 0
+#define I40E_PF_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_TX_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC 0x001C0500 /* Reset: CORER */
+#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0
+#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
+#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
+#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VP_MDET_RX_MAX_INDEX 127
+#define I40E_VP_MDET_RX_VALID_SHIFT 0
+#define I40E_VP_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_RX_VALID_SHIFT)
+#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VP_MDET_TX_MAX_INDEX 127
+#define I40E_VP_MDET_TX_VALID_SHIFT 0
+#define I40E_VP_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_TX_VALID_SHIFT)
+#define I40E_GLPM_WUMC 0x0006C800 /* Reset: POR */
+#define I40E_GLPM_WUMC_NOTCO_SHIFT 0
+#define I40E_GLPM_WUMC_NOTCO_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_NOTCO_SHIFT)
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
+#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2
+#define I40E_GLPM_WUMC_ROL_MODE_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_ROL_MODE_SHIFT)
+#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3
+#define I40E_GLPM_WUMC_RESERVED_4_MASK I40E_MASK(0x1FFF, I40E_GLPM_WUMC_RESERVED_4_SHIFT)
+#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16
+#define I40E_GLPM_WUMC_MNG_WU_PF_MASK I40E_MASK(0xFFFF, I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
+#define I40E_PFPM_APM 0x000B8080 /* Reset: POR */
+#define I40E_PFPM_APM_APME_SHIFT 0
+#define I40E_PFPM_APM_APME_MASK I40E_MASK(0x1, I40E_PFPM_APM_APME_SHIFT)
+#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PFPM_WUC 0x0006B200 /* Reset: POR */
+#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
+#define I40E_PFPM_WUC_EN_APM_D0_MASK I40E_MASK(0x1, I40E_PFPM_WUC_EN_APM_D0_SHIFT)
+#define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */
+#define I40E_PFPM_WUFC_LNKC_SHIFT 0
+#define I40E_PFPM_WUFC_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_LNKC_SHIFT)
+#define I40E_PFPM_WUFC_MAG_SHIFT 1
+#define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT)
+#define I40E_PFPM_WUFC_MNG_SHIFT 3
+#define I40E_PFPM_WUFC_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MNG_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4
+#define I40E_PFPM_WUFC_FLX0_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5
+#define I40E_PFPM_WUFC_FLX1_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6
+#define I40E_PFPM_WUFC_FLX2_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7
+#define I40E_PFPM_WUFC_FLX3_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8
+#define I40E_PFPM_WUFC_FLX4_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9
+#define I40E_PFPM_WUFC_FLX5_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10
+#define I40E_PFPM_WUFC_FLX6_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11
+#define I40E_PFPM_WUFC_FLX7_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_SHIFT 16
+#define I40E_PFPM_WUFC_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_SHIFT 17
+#define I40E_PFPM_WUFC_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_SHIFT 18
+#define I40E_PFPM_WUFC_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_SHIFT 19
+#define I40E_PFPM_WUFC_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_SHIFT 20
+#define I40E_PFPM_WUFC_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_SHIFT 21
+#define I40E_PFPM_WUFC_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_SHIFT 22
+#define I40E_PFPM_WUFC_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_SHIFT 23
+#define I40E_PFPM_WUFC_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_SHIFT)
+#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUFC_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
+#define I40E_PFPM_WUS 0x0006B600 /* Reset: POR */
+#define I40E_PFPM_WUS_LNKC_SHIFT 0
+#define I40E_PFPM_WUS_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUS_LNKC_SHIFT)
+#define I40E_PFPM_WUS_MAG_SHIFT 1
+#define I40E_PFPM_WUS_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MAG_SHIFT)
+#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2
+#define I40E_PFPM_WUS_PME_STATUS_MASK I40E_MASK(0x1, I40E_PFPM_WUS_PME_STATUS_SHIFT)
+#define I40E_PFPM_WUS_MNG_SHIFT 3
+#define I40E_PFPM_WUS_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MNG_SHIFT)
+#define I40E_PFPM_WUS_FLX0_SHIFT 16
+#define I40E_PFPM_WUS_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX0_SHIFT)
+#define I40E_PFPM_WUS_FLX1_SHIFT 17
+#define I40E_PFPM_WUS_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX1_SHIFT)
+#define I40E_PFPM_WUS_FLX2_SHIFT 18
+#define I40E_PFPM_WUS_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX2_SHIFT)
+#define I40E_PFPM_WUS_FLX3_SHIFT 19
+#define I40E_PFPM_WUS_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX3_SHIFT)
+#define I40E_PFPM_WUS_FLX4_SHIFT 20
+#define I40E_PFPM_WUS_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX4_SHIFT)
+#define I40E_PFPM_WUS_FLX5_SHIFT 21
+#define I40E_PFPM_WUS_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX5_SHIFT)
+#define I40E_PFPM_WUS_FLX6_SHIFT 22
+#define I40E_PFPM_WUS_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX6_SHIFT)
+#define I40E_PFPM_WUS_FLX7_SHIFT 23
+#define I40E_PFPM_WUS_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX7_SHIFT)
+#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUS_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FW_RST_WK_SHIFT)
+#define I40E_PRTPM_FHFHR 0x0006C000 /* Reset: POR */
+#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0
+#define I40E_PRTPM_FHFHR_UNICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_UNICAST_SHIFT)
+#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1
+#define I40E_PRTPM_FHFHR_MULTICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
+#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
+#define I40E_PRTPM_SAH_MAX_INDEX 3
+#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0
+#define I40E_PRTPM_SAH_PFPM_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
+#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26
+#define I40E_PRTPM_SAH_PF_NUM_MASK I40E_MASK(0xF, I40E_PRTPM_SAH_PF_NUM_SHIFT)
+#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30
+#define I40E_PRTPM_SAH_MC_MAG_EN_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
+#define I40E_PRTPM_SAH_AV_SHIFT 31
+#define I40E_PRTPM_SAH_AV_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_AV_SHIFT)
+#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
+#define I40E_PRTPM_SAL_MAX_INDEX 3
+#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0
+#define I40E_PRTPM_SAL_PFPM_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
+#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
+#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */
+#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL1_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT)
+#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */
+#define I40E_VF_ARQH1_ARQH_SHIFT 0
+#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT)
+#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */
+#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN1_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
+#define I40E_VF_ARQT1_ARQT_SHIFT 0
+#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT)
+#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */
+#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH1_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */
+#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL1_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT)
+#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */
+#define I40E_VF_ATQH1_ATQH_SHIFT 0
+#define I40E_VF_ATQH1_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT)
+#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */
+#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN1_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
+#define I40E_VF_ATQT1_ATQT_SHIFT 0
+#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT)
+#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */
+#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
+#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */
+#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
+#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15
+#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA1_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
+#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */
+#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR01_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR01_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR01_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR01_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR01_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR01_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR01_SWINT_SHIFT 31
+#define I40E_VFINT_ICR01_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT)
+#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */
+#define I40E_VFINT_ITR01_MAX_INDEX 2
+#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */
+#define I40E_VFINT_ITRN1_MAX_INDEX 2
+#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: VFR */
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
+#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_QRX_TAIL1_MAX_INDEX 15
+#define I40E_QRX_TAIL1_TAIL_SHIFT 0
+#define I40E_QRX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT)
+#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */
+#define I40E_QTX_TAIL1_MAX_INDEX 15
+#define I40E_QTX_TAIL1_TAIL_SHIFT 0
+#define I40E_QTX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT)
+#define I40E_VFMSIX_PBA 0x00002000 /* Reset: VFLR */
+#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TADD_MAX_INDEX 16
+#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TMSG_MAX_INDEX 16
+#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TUADD_MAX_INDEX 16
+#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
+#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_VFQF_HENA_MAX_INDEX 1
+#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
+#define I40E_VFQF_HKEY_MAX_INDEX 12
+#define I40E_VFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_VFQF_HLUT_MAX_INDEX 15
+#define I40E_VFQF_HLUT_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT)
+#define I40E_VFQF_HLUT_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT)
+#define I40E_VFQF_HLUT_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT)
+#define I40E_VFQF_HLUT_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT)
+#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_VFQF_HREGION_MAX_INDEX 7
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
+#endif
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_status.h b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_status.h
new file mode 100755
index 00000000..2e693a39
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_status.h
@@ -0,0 +1,107 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_STATUS_H_
+#define _I40E_STATUS_H_
+
+/* Error Codes */
+enum i40e_status_code {
+ I40E_SUCCESS = 0,
+ I40E_ERR_NVM = -1,
+ I40E_ERR_NVM_CHECKSUM = -2,
+ I40E_ERR_PHY = -3,
+ I40E_ERR_CONFIG = -4,
+ I40E_ERR_PARAM = -5,
+ I40E_ERR_MAC_TYPE = -6,
+ I40E_ERR_UNKNOWN_PHY = -7,
+ I40E_ERR_LINK_SETUP = -8,
+ I40E_ERR_ADAPTER_STOPPED = -9,
+ I40E_ERR_INVALID_MAC_ADDR = -10,
+ I40E_ERR_DEVICE_NOT_SUPPORTED = -11,
+ I40E_ERR_MASTER_REQUESTS_PENDING = -12,
+ I40E_ERR_INVALID_LINK_SETTINGS = -13,
+ I40E_ERR_AUTONEG_NOT_COMPLETE = -14,
+ I40E_ERR_RESET_FAILED = -15,
+ I40E_ERR_SWFW_SYNC = -16,
+ I40E_ERR_NO_AVAILABLE_VSI = -17,
+ I40E_ERR_NO_MEMORY = -18,
+ I40E_ERR_BAD_PTR = -19,
+ I40E_ERR_RING_FULL = -20,
+ I40E_ERR_INVALID_PD_ID = -21,
+ I40E_ERR_INVALID_QP_ID = -22,
+ I40E_ERR_INVALID_CQ_ID = -23,
+ I40E_ERR_INVALID_CEQ_ID = -24,
+ I40E_ERR_INVALID_AEQ_ID = -25,
+ I40E_ERR_INVALID_SIZE = -26,
+ I40E_ERR_INVALID_ARP_INDEX = -27,
+ I40E_ERR_INVALID_FPM_FUNC_ID = -28,
+ I40E_ERR_QP_INVALID_MSG_SIZE = -29,
+ I40E_ERR_QP_TOOMANY_WRS_POSTED = -30,
+ I40E_ERR_INVALID_FRAG_COUNT = -31,
+ I40E_ERR_QUEUE_EMPTY = -32,
+ I40E_ERR_INVALID_ALIGNMENT = -33,
+ I40E_ERR_FLUSHED_QUEUE = -34,
+ I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35,
+ I40E_ERR_INVALID_IMM_DATA_SIZE = -36,
+ I40E_ERR_TIMEOUT = -37,
+ I40E_ERR_OPCODE_MISMATCH = -38,
+ I40E_ERR_CQP_COMPL_ERROR = -39,
+ I40E_ERR_INVALID_VF_ID = -40,
+ I40E_ERR_INVALID_HMCFN_ID = -41,
+ I40E_ERR_BACKING_PAGE_ERROR = -42,
+ I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
+ I40E_ERR_INVALID_PBLE_INDEX = -44,
+ I40E_ERR_INVALID_SD_INDEX = -45,
+ I40E_ERR_INVALID_PAGE_DESC_INDEX = -46,
+ I40E_ERR_INVALID_SD_TYPE = -47,
+ I40E_ERR_MEMCPY_FAILED = -48,
+ I40E_ERR_INVALID_HMC_OBJ_INDEX = -49,
+ I40E_ERR_INVALID_HMC_OBJ_COUNT = -50,
+ I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51,
+ I40E_ERR_SRQ_ENABLED = -52,
+ I40E_ERR_ADMIN_QUEUE_ERROR = -53,
+ I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54,
+ I40E_ERR_BUF_TOO_SHORT = -55,
+ I40E_ERR_ADMIN_QUEUE_FULL = -56,
+ I40E_ERR_ADMIN_QUEUE_NO_WORK = -57,
+ I40E_ERR_BAD_IWARP_CQE = -58,
+ I40E_ERR_NVM_BLANK_MODE = -59,
+ I40E_ERR_NOT_IMPLEMENTED = -60,
+ I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61,
+ I40E_ERR_DIAG_TEST_FAILED = -62,
+ I40E_ERR_NOT_READY = -63,
+ I40E_NOT_SUPPORTED = -64,
+ I40E_ERR_FIRMWARE_API_VERSION = -65,
+};
+
+#endif /* _I40E_STATUS_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_type.h b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_type.h
new file mode 100755
index 00000000..bb876402
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_type.h
@@ -0,0 +1,1425 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_TYPE_H_
+#define _I40E_TYPE_H_
+
+#include "i40e_status.h"
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_hmc.h"
+#include "i40e_lan_hmc.h"
+
+#define UNREFERENCED_XPARAMETER
+#define UNREFERENCED_1PARAMETER(_p) (_p);
+#define UNREFERENCED_2PARAMETER(_p, _q) (_p); (_q);
+#define UNREFERENCED_3PARAMETER(_p, _q, _r) (_p); (_q); (_r);
+#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) (_p); (_q); (_r); (_s);
+#define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t) (_p); (_q); (_r); (_s); (_t);
+
+/* Vendor ID */
+#define I40E_INTEL_VENDOR_ID 0x8086
+
+/* Device IDs */
+#define I40E_DEV_ID_SFP_XL710 0x1572
+#define I40E_DEV_ID_QEMU 0x1574
+#define I40E_DEV_ID_KX_A 0x157F
+#define I40E_DEV_ID_KX_B 0x1580
+#define I40E_DEV_ID_KX_C 0x1581
+#define I40E_DEV_ID_QSFP_A 0x1583
+#define I40E_DEV_ID_QSFP_B 0x1584
+#define I40E_DEV_ID_QSFP_C 0x1585
+#define I40E_DEV_ID_10G_BASE_T 0x1586
+#define I40E_DEV_ID_VF 0x154C
+#define I40E_DEV_ID_VF_HV 0x1571
+
+#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
+ (d) == I40E_DEV_ID_QSFP_B || \
+ (d) == I40E_DEV_ID_QSFP_C)
+
+#ifndef I40E_MASK
+/* I40E_MASK is a macro used on 32 bit registers */
+#define I40E_MASK(mask, shift) (mask << shift)
+#endif
+
+#define I40E_MAX_PF 16
+#define I40E_MAX_PF_VSI 64
+#define I40E_MAX_PF_QP 128
+#define I40E_MAX_VSI_QP 16
+#define I40E_MAX_VF_VSI 3
+#define I40E_MAX_CHAINED_RX_BUFFERS 5
+#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16
+
+/* something less than 1 minute */
+#define I40E_HEARTBEAT_TIMEOUT (HZ * 50)
+
+/* Max default timeout in ms, */
+#define I40E_MAX_NVM_TIMEOUT 18000
+
+/* Check whether address is multicast. */
+#define I40E_IS_MULTICAST(address) (bool)(((u8 *)(address))[0] & ((u8)0x01))
+
+/* Check whether an address is broadcast. */
+#define I40E_IS_BROADCAST(address) \
+ ((((u8 *)(address))[0] == ((u8)0xff)) && \
+ (((u8 *)(address))[1] == ((u8)0xff)))
+
+/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
+#define I40E_MS_TO_GTIME(time) ((time) * 1000)
+
+/* forward declaration */
+struct i40e_hw;
+typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
+
+#define I40E_ETH_LENGTH_OF_ADDRESS 6
+/* Data type manipulation macros. */
+#define I40E_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
+#define I40E_LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF))
+
+#define I40E_HI_WORD(x) ((u16)(((x) >> 16) & 0xFFFF))
+#define I40E_LO_WORD(x) ((u16)((x) & 0xFFFF))
+
+#define I40E_HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF))
+#define I40E_LO_BYTE(x) ((u8)((x) & 0xFF))
+
+/* Number of Transmit Descriptors must be a multiple of 8. */
+#define I40E_REQ_TX_DESCRIPTOR_MULTIPLE 8
+/* Number of Receive Descriptors must be a multiple of 32 if
+ * the number of descriptors is greater than 32.
+ */
+#define I40E_REQ_RX_DESCRIPTOR_MULTIPLE 32
+
+#define I40E_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+/* bitfields for Tx queue mapping in QTX_CTL */
+#define I40E_QTX_CTL_VF_QUEUE 0x0
+#define I40E_QTX_CTL_VM_QUEUE 0x1
+#define I40E_QTX_CTL_PF_QUEUE 0x2
+
+/* debug masks - set these bits in hw->debug_mask to control output */
+enum i40e_debug_mask {
+ I40E_DEBUG_INIT = 0x00000001,
+ I40E_DEBUG_RELEASE = 0x00000002,
+
+ I40E_DEBUG_LINK = 0x00000010,
+ I40E_DEBUG_PHY = 0x00000020,
+ I40E_DEBUG_HMC = 0x00000040,
+ I40E_DEBUG_NVM = 0x00000080,
+ I40E_DEBUG_LAN = 0x00000100,
+ I40E_DEBUG_FLOW = 0x00000200,
+ I40E_DEBUG_DCB = 0x00000400,
+ I40E_DEBUG_DIAG = 0x00000800,
+ I40E_DEBUG_FD = 0x00001000,
+
+ I40E_DEBUG_AQ_MESSAGE = 0x01000000,
+ I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
+ I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
+ I40E_DEBUG_AQ_COMMAND = 0x06000000,
+ I40E_DEBUG_AQ = 0x0F000000,
+
+ I40E_DEBUG_USER = 0xF0000000,
+
+ I40E_DEBUG_ALL = 0xFFFFFFFF
+};
+
+/* PCI Bus Info */
+#define I40E_PCI_LINK_STATUS 0xB2
+#define I40E_PCI_LINK_WIDTH 0x3F0
+#define I40E_PCI_LINK_WIDTH_1 0x10
+#define I40E_PCI_LINK_WIDTH_2 0x20
+#define I40E_PCI_LINK_WIDTH_4 0x40
+#define I40E_PCI_LINK_WIDTH_8 0x80
+#define I40E_PCI_LINK_SPEED 0xF
+#define I40E_PCI_LINK_SPEED_2500 0x1
+#define I40E_PCI_LINK_SPEED_5000 0x2
+#define I40E_PCI_LINK_SPEED_8000 0x3
+
+/* Memory types */
+enum i40e_memset_type {
+ I40E_NONDMA_MEM = 0,
+ I40E_DMA_MEM
+};
+
+/* Memcpy types */
+enum i40e_memcpy_type {
+ I40E_NONDMA_TO_NONDMA = 0,
+ I40E_NONDMA_TO_DMA,
+ I40E_DMA_TO_DMA,
+ I40E_DMA_TO_NONDMA
+};
+
+/* These are structs for managing the hardware information and the operations.
+ * The structures of function pointers are filled out at init time when we
+ * know for sure exactly which hardware we're working with. This gives us the
+ * flexibility of using the same main driver code but adapting to slightly
+ * different hardware needs as new parts are developed. For this architecture,
+ * the Firmware and AdminQ are intended to insulate the driver from most of the
+ * future changes, but these structures will also do part of the job.
+ */
+enum i40e_mac_type {
+ I40E_MAC_UNKNOWN = 0,
+ I40E_MAC_X710,
+ I40E_MAC_XL710,
+ I40E_MAC_VF,
+ I40E_MAC_GENERIC,
+};
+
+enum i40e_media_type {
+ I40E_MEDIA_TYPE_UNKNOWN = 0,
+ I40E_MEDIA_TYPE_FIBER,
+ I40E_MEDIA_TYPE_BASET,
+ I40E_MEDIA_TYPE_BACKPLANE,
+ I40E_MEDIA_TYPE_CX4,
+ I40E_MEDIA_TYPE_DA,
+ I40E_MEDIA_TYPE_VIRTUAL
+};
+
+enum i40e_fc_mode {
+ I40E_FC_NONE = 0,
+ I40E_FC_RX_PAUSE,
+ I40E_FC_TX_PAUSE,
+ I40E_FC_FULL,
+ I40E_FC_PFC,
+ I40E_FC_DEFAULT
+};
+
+enum i40e_set_fc_aq_failures {
+ I40E_SET_FC_AQ_FAIL_NONE = 0,
+ I40E_SET_FC_AQ_FAIL_GET = 1,
+ I40E_SET_FC_AQ_FAIL_SET = 2,
+ I40E_SET_FC_AQ_FAIL_UPDATE = 4,
+ I40E_SET_FC_AQ_FAIL_SET_UPDATE = 6
+};
+
+enum i40e_vsi_type {
+ I40E_VSI_MAIN = 0,
+ I40E_VSI_VMDQ1,
+ I40E_VSI_VMDQ2,
+ I40E_VSI_CTRL,
+ I40E_VSI_FCOE,
+ I40E_VSI_MIRROR,
+ I40E_VSI_SRIOV,
+ I40E_VSI_FDIR,
+ I40E_VSI_TYPE_UNKNOWN
+};
+
+enum i40e_queue_type {
+ I40E_QUEUE_TYPE_RX = 0,
+ I40E_QUEUE_TYPE_TX,
+ I40E_QUEUE_TYPE_PE_CEQ,
+ I40E_QUEUE_TYPE_UNKNOWN
+};
+
+struct i40e_link_status {
+ enum i40e_aq_phy_type phy_type;
+ enum i40e_aq_link_speed link_speed;
+ u8 link_info;
+ u8 an_info;
+ u8 ext_info;
+ u8 loopback;
+ bool an_enabled;
+ /* is Link Status Event notification to SW enabled */
+ bool lse_enable;
+ u16 max_frame_size;
+ bool crc_enable;
+ u8 pacing;
+};
+
+struct i40e_phy_info {
+ struct i40e_link_status link_info;
+ struct i40e_link_status link_info_old;
+ u32 autoneg_advertised;
+ u32 phy_id;
+ u32 module_type;
+ bool get_link_info;
+ enum i40e_media_type media_type;
+};
+
+#define I40E_HW_CAP_MAX_GPIO 30
+#define I40E_HW_CAP_MDIO_PORT_MODE_MDIO 0
+#define I40E_HW_CAP_MDIO_PORT_MODE_I2C 1
+
+/* Capabilities of a PF or a VF or the whole device */
+struct i40e_hw_capabilities {
+ u32 switch_mode;
+#define I40E_NVM_IMAGE_TYPE_EVB 0x0
+#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2
+#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
+
+ u32 management_mode;
+ u32 npar_enable;
+ u32 os2bmc;
+ u32 valid_functions;
+ bool sr_iov_1_1;
+ bool vmdq;
+ bool evb_802_1_qbg; /* Edge Virtual Bridging */
+ bool evb_802_1_qbh; /* Bridge Port Extension */
+ bool dcb;
+ bool fcoe;
+ bool mfp_mode_1;
+ bool mgmt_cem;
+ bool ieee_1588;
+ bool iwarp;
+ bool fd;
+ u32 fd_filters_guaranteed;
+ u32 fd_filters_best_effort;
+ bool rss;
+ u32 rss_table_size;
+ u32 rss_table_entry_width;
+ bool led[I40E_HW_CAP_MAX_GPIO];
+ bool sdp[I40E_HW_CAP_MAX_GPIO];
+ u32 nvm_image_type;
+ u32 num_flow_director_filters;
+ u32 num_vfs;
+ u32 vf_base_id;
+ u32 num_vsis;
+ u32 num_rx_qp;
+ u32 num_tx_qp;
+ u32 base_queue;
+ u32 num_msix_vectors;
+ u32 num_msix_vectors_vf;
+ u32 led_pin_num;
+ u32 sdp_pin_num;
+ u32 mdio_port_num;
+ u32 mdio_port_mode;
+ u8 rx_buf_chain_len;
+ u32 enabled_tcmap;
+ u32 maxtc;
+};
+
+struct i40e_mac_info {
+ enum i40e_mac_type type;
+ u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 perm_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 san_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 port_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u16 max_fcoeq;
+};
+
+enum i40e_aq_resources_ids {
+ I40E_NVM_RESOURCE_ID = 1
+};
+
+enum i40e_aq_resource_access_type {
+ I40E_RESOURCE_READ = 1,
+ I40E_RESOURCE_WRITE
+};
+
+struct i40e_nvm_info {
+ u64 hw_semaphore_timeout; /* 2usec global time (GTIME resolution) */
+ u64 hw_semaphore_wait; /* - || - */
+ u32 timeout; /* [ms] */
+ u16 sr_size; /* Shadow RAM size in words */
+ bool blank_nvm_mode; /* is NVM empty (no FW present)*/
+ u16 version; /* NVM package version */
+ u32 eetrack; /* NVM data version */
+};
+
+/* definitions used in NVM update support */
+
+enum i40e_nvmupd_cmd {
+ I40E_NVMUPD_INVALID,
+ I40E_NVMUPD_READ_CON,
+ I40E_NVMUPD_READ_SNT,
+ I40E_NVMUPD_READ_LCB,
+ I40E_NVMUPD_READ_SA,
+ I40E_NVMUPD_WRITE_ERA,
+ I40E_NVMUPD_WRITE_CON,
+ I40E_NVMUPD_WRITE_SNT,
+ I40E_NVMUPD_WRITE_LCB,
+ I40E_NVMUPD_WRITE_SA,
+ I40E_NVMUPD_CSUM_CON,
+ I40E_NVMUPD_CSUM_SA,
+ I40E_NVMUPD_CSUM_LCB,
+};
+
+enum i40e_nvmupd_state {
+ I40E_NVMUPD_STATE_INIT,
+ I40E_NVMUPD_STATE_READING,
+ I40E_NVMUPD_STATE_WRITING
+};
+
+/* nvm_access definition and its masks/shifts need to be accessible to
+ * application, core driver, and shared code. Where is the right file?
+ */
+#define I40E_NVM_READ 0xB
+#define I40E_NVM_WRITE 0xC
+
+#define I40E_NVM_MOD_PNT_MASK 0xFF
+
+#define I40E_NVM_TRANS_SHIFT 8
+#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
+#define I40E_NVM_CON 0x0
+#define I40E_NVM_SNT 0x1
+#define I40E_NVM_LCB 0x2
+#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
+#define I40E_NVM_ERA 0x4
+#define I40E_NVM_CSUM 0x8
+
+#define I40E_NVM_ADAPT_SHIFT 16
+#define I40E_NVM_ADAPT_MASK (0xffffULL << I40E_NVM_ADAPT_SHIFT)
+
+#define I40E_NVMUPD_MAX_DATA 4096
+#define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */
+
+struct i40e_nvm_access {
+ u32 command;
+ u32 config;
+ u32 offset; /* in bytes */
+ u32 data_size; /* in bytes */
+ u8 data[1];
+};
+
+/* PCI bus types */
+enum i40e_bus_type {
+ i40e_bus_type_unknown = 0,
+ i40e_bus_type_pci,
+ i40e_bus_type_pcix,
+ i40e_bus_type_pci_express,
+ i40e_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum i40e_bus_speed {
+ i40e_bus_speed_unknown = 0,
+ i40e_bus_speed_33 = 33,
+ i40e_bus_speed_66 = 66,
+ i40e_bus_speed_100 = 100,
+ i40e_bus_speed_120 = 120,
+ i40e_bus_speed_133 = 133,
+ i40e_bus_speed_2500 = 2500,
+ i40e_bus_speed_5000 = 5000,
+ i40e_bus_speed_8000 = 8000,
+ i40e_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum i40e_bus_width {
+ i40e_bus_width_unknown = 0,
+ i40e_bus_width_pcie_x1 = 1,
+ i40e_bus_width_pcie_x2 = 2,
+ i40e_bus_width_pcie_x4 = 4,
+ i40e_bus_width_pcie_x8 = 8,
+ i40e_bus_width_32 = 32,
+ i40e_bus_width_64 = 64,
+ i40e_bus_width_reserved
+};
+
+/* Bus parameters */
+struct i40e_bus_info {
+ enum i40e_bus_speed speed;
+ enum i40e_bus_width width;
+ enum i40e_bus_type type;
+
+ u16 func;
+ u16 device;
+ u16 lan_id;
+};
+
+/* Flow control (FC) parameters */
+struct i40e_fc_info {
+ enum i40e_fc_mode current_mode; /* FC mode in effect */
+ enum i40e_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+#define I40E_MAX_TRAFFIC_CLASS 8
+#define I40E_MAX_USER_PRIORITY 8
+#define I40E_DCBX_MAX_APPS 32
+#define I40E_LLDPDU_SIZE 1500
+
+/* IEEE 802.1Qaz ETS Configuration data */
+struct i40e_ieee_ets_config {
+ u8 willing;
+ u8 cbs;
+ u8 maxtcs;
+ u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* IEEE 802.1Qaz ETS Recommendation data */
+struct i40e_ieee_ets_recommend {
+ u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* IEEE 802.1Qaz PFC Configuration data */
+struct i40e_ieee_pfc_config {
+ u8 willing;
+ u8 mbc;
+ u8 pfccap;
+ u8 pfcenable;
+};
+
+/* IEEE 802.1Qaz Application Priority data */
+struct i40e_ieee_app_priority_table {
+ u8 priority;
+ u8 selector;
+ u16 protocolid;
+};
+
+struct i40e_dcbx_config {
+ u32 numapps;
+ struct i40e_ieee_ets_config etscfg;
+ struct i40e_ieee_ets_recommend etsrec;
+ struct i40e_ieee_pfc_config pfc;
+ struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS];
+};
+
+/* Port hardware description */
+struct i40e_hw {
+ u8 *hw_addr;
+ void *back;
+
+ /* function pointer structs */
+ struct i40e_phy_info phy;
+ struct i40e_mac_info mac;
+ struct i40e_bus_info bus;
+ struct i40e_nvm_info nvm;
+ struct i40e_fc_info fc;
+
+ /* pci info */
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ u8 port;
+ bool adapter_stopped;
+
+ /* capabilities for entire device and PCI func */
+ struct i40e_hw_capabilities dev_caps;
+ struct i40e_hw_capabilities func_caps;
+
+ /* Flow Director shared filter space */
+ u16 fdir_shared_filter_count;
+
+ /* device profile info */
+ u8 pf_id;
+ u16 main_vsi_seid;
+
+ /* Closest numa node to the device */
+ u16 numa_node;
+
+ /* Admin Queue info */
+ struct i40e_adminq_info aq;
+
+ /* state of nvm update process */
+ enum i40e_nvmupd_state nvmupd_state;
+
+ /* HMC info */
+ struct i40e_hmc_info hmc; /* HMC info struct */
+
+ /* LLDP/DCBX Status */
+ u16 dcbx_status;
+
+ /* DCBX info */
+ struct i40e_dcbx_config local_dcbx_config;
+ struct i40e_dcbx_config remote_dcbx_config;
+
+ /* debug mask */
+ u32 debug_mask;
+};
+
+struct i40e_driver_version {
+ u8 major_version;
+ u8 minor_version;
+ u8 build_version;
+ u8 subbuild_version;
+ u8 driver_string[32];
+};
+
+/* RX Descriptors */
+union i40e_16byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fd_id; /* Flow director filter id */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ } hi_dword;
+ } qword0;
+ struct {
+ /* ext status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ } wb; /* writeback */
+};
+
+union i40e_32byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_buffer_addr is DD bit */
+ __le64 rsvd1;
+ __le64 rsvd2;
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ /* Flow director filter id in case of
+ * Programming status desc WB
+ */
+ __le32 fd_id;
+ } hi_dword;
+ } qword0;
+ struct {
+ /* status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ struct {
+ __le16 ext_status; /* extended status */
+ __le16 rsvd;
+ __le16 l2tag2_1;
+ __le16 l2tag2_2;
+ } qword2;
+ struct {
+ union {
+ __le32 flex_bytes_lo;
+ __le32 pe_status;
+ } lo_dword;
+ union {
+ __le32 flex_bytes_hi;
+ __le32 fd_id;
+ } hi_dword;
+ } qword3;
+ } wb; /* writeback */
+};
+
+#define I40E_RXD_QW0_MIRROR_STATUS_SHIFT 8
+#define I40E_RXD_QW0_MIRROR_STATUS_MASK (0x3FUL << \
+ I40E_RXD_QW0_MIRROR_STATUS_SHIFT)
+#define I40E_RXD_QW0_FCOEINDX_SHIFT 0
+#define I40E_RXD_QW0_FCOEINDX_MASK (0xFFFUL << \
+ I40E_RXD_QW0_FCOEINDX_SHIFT)
+
+enum i40e_rx_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_STATUS_DD_SHIFT = 0,
+ I40E_RX_DESC_STATUS_EOF_SHIFT = 1,
+ I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
+ I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3,
+ I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
+ I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
+ I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
+ I40E_RX_DESC_STATUS_PIF_SHIFT = 8,
+ I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
+ I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
+ I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
+ I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
+ I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
+ I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
+ I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
+};
+
+#define I40E_RXD_QW1_STATUS_SHIFT 0
+#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) << \
+ I40E_RXD_QW1_STATUS_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
+ I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \
+ I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_UMBCAST_SHIFT I40E_RX_DESC_STATUS_UMBCAST
+#define I40E_RXD_QW1_STATUS_UMBCAST_MASK (0x3UL << \
+ I40E_RXD_QW1_STATUS_UMBCAST_SHIFT)
+
+enum i40e_rx_desc_fltstat_values {
+ I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
+ I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
+ I40E_RX_DESC_FLTSTAT_RSV = 2,
+ I40E_RX_DESC_FLTSTAT_RSS_HASH = 3,
+};
+
+#define I40E_RXD_PACKET_TYPE_UNICAST 0
+#define I40E_RXD_PACKET_TYPE_MULTICAST 1
+#define I40E_RXD_PACKET_TYPE_BROADCAST 2
+#define I40E_RXD_PACKET_TYPE_MIRRORED 3
+
+#define I40E_RXD_QW1_ERROR_SHIFT 19
+#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT)
+
+enum i40e_rx_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_ERROR_RXE_SHIFT = 0,
+ I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1,
+ I40E_RX_DESC_ERROR_HBO_SHIFT = 2,
+ I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */
+ I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
+ I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
+ I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
+ I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
+ I40E_RX_DESC_ERROR_PPRS_SHIFT = 7
+};
+
+enum i40e_rx_desc_error_l3l4e_fcoe_masks {
+ I40E_RX_DESC_ERROR_L3L4E_NONE = 0,
+ I40E_RX_DESC_ERROR_L3L4E_PROT = 1,
+ I40E_RX_DESC_ERROR_L3L4E_FC = 2,
+ I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3,
+ I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
+};
+
+#define I40E_RXD_QW1_PTYPE_SHIFT 30
+#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT)
+
+/* Packet type non-ip values */
+enum i40e_rx_l2_ptype {
+ I40E_RX_PTYPE_L2_RESERVED = 0,
+ I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
+ I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
+ I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
+ I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
+ I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
+ I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
+ I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
+ I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
+ I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
+ I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
+ I40E_RX_PTYPE_L2_ARP = 11,
+ I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
+ I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
+ I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
+ I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
+ I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
+ I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
+ I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
+ I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
+ I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
+ I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
+};
+
+struct i40e_rx_ptype_decoded {
+ u32 ptype:8;
+ u32 known:1;
+ u32 outer_ip:1;
+ u32 outer_ip_ver:1;
+ u32 outer_frag:1;
+ u32 tunnel_type:3;
+ u32 tunnel_end_prot:2;
+ u32 tunnel_end_frag:1;
+ u32 inner_prot:4;
+ u32 payload_layer:3;
+};
+
+enum i40e_rx_ptype_outer_ip {
+ I40E_RX_PTYPE_OUTER_L2 = 0,
+ I40E_RX_PTYPE_OUTER_IP = 1
+};
+
+enum i40e_rx_ptype_outer_ip_ver {
+ I40E_RX_PTYPE_OUTER_NONE = 0,
+ I40E_RX_PTYPE_OUTER_IPV4 = 0,
+ I40E_RX_PTYPE_OUTER_IPV6 = 1
+};
+
+enum i40e_rx_ptype_outer_fragmented {
+ I40E_RX_PTYPE_NOT_FRAG = 0,
+ I40E_RX_PTYPE_FRAG = 1
+};
+
+enum i40e_rx_ptype_tunnel_type {
+ I40E_RX_PTYPE_TUNNEL_NONE = 0,
+ I40E_RX_PTYPE_TUNNEL_IP_IP = 1,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
+};
+
+enum i40e_rx_ptype_tunnel_end_prot {
+ I40E_RX_PTYPE_TUNNEL_END_NONE = 0,
+ I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1,
+ I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2,
+};
+
+enum i40e_rx_ptype_inner_prot {
+ I40E_RX_PTYPE_INNER_PROT_NONE = 0,
+ I40E_RX_PTYPE_INNER_PROT_UDP = 1,
+ I40E_RX_PTYPE_INNER_PROT_TCP = 2,
+ I40E_RX_PTYPE_INNER_PROT_SCTP = 3,
+ I40E_RX_PTYPE_INNER_PROT_ICMP = 4,
+ I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5
+};
+
+enum i40e_rx_ptype_payload_layer {
+ I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
+};
+
+#define I40E_RX_PTYPE_BIT_MASK 0x0FFFFFFF
+#define I40E_RX_PTYPE_SHIFT 56
+
+#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38
+#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52
+#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
+ I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
+#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \
+ I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+
+#define I40E_RXD_QW1_NEXTP_SHIFT 38
+#define I40E_RXD_QW1_NEXTP_MASK (0x1FFFULL << I40E_RXD_QW1_NEXTP_SHIFT)
+
+#define I40E_RXD_QW2_EXT_STATUS_SHIFT 0
+#define I40E_RXD_QW2_EXT_STATUS_MASK (0xFFFFFUL << \
+ I40E_RXD_QW2_EXT_STATUS_SHIFT)
+
+enum i40e_rx_desc_ext_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0,
+ I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
+ I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
+ I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
+ I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
+ I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
+ I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
+};
+
+#define I40E_RXD_QW2_L2TAG2_SHIFT 0
+#define I40E_RXD_QW2_L2TAG2_MASK (0xFFFFUL << I40E_RXD_QW2_L2TAG2_SHIFT)
+
+#define I40E_RXD_QW2_L2TAG3_SHIFT 16
+#define I40E_RXD_QW2_L2TAG3_MASK (0xFFFFUL << I40E_RXD_QW2_L2TAG3_SHIFT)
+
+enum i40e_rx_desc_pe_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
+ I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */
+ I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */
+ I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24,
+ I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25,
+ I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26,
+ I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27,
+ I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28,
+ I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29
+};
+
+#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38
+#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT 0
+#define I40E_RX_PROG_STATUS_DESC_QW1_STATUS_MASK (0x7FFFUL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT)
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
+
+enum i40e_rx_prog_status_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0,
+ I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */
+};
+
+enum i40e_rx_prog_status_desc_prog_id_masks {
+ I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4,
+};
+
+enum i40e_rx_prog_status_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
+ I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1,
+ I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
+};
+
+#define I40E_TWO_BIT_MASK 0x3
+#define I40E_THREE_BIT_MASK 0x7
+#define I40E_FOUR_BIT_MASK 0xF
+#define I40E_EIGHTEEN_BIT_MASK 0x3FFFF
+
+/* TX Descriptor */
+struct i40e_tx_desc {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le64 cmd_type_offset_bsz;
+};
+
+#define I40E_TXD_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT)
+
+enum i40e_tx_desc_dtype_value {
+ I40E_TX_DESC_DTYPE_DATA = 0x0,
+ I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */
+ I40E_TX_DESC_DTYPE_CONTEXT = 0x1,
+ I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2,
+ I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8,
+ I40E_TX_DESC_DTYPE_DDP_CTX = 0x9,
+ I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB,
+ I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC,
+ I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD,
+ I40E_TX_DESC_DTYPE_DESC_DONE = 0xF
+};
+
+#define I40E_TXD_QW1_CMD_SHIFT 4
+#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT)
+
+enum i40e_tx_desc_cmd_bits {
+ I40E_TX_DESC_CMD_EOP = 0x0001,
+ I40E_TX_DESC_CMD_RS = 0x0002,
+ I40E_TX_DESC_CMD_ICRC = 0x0004,
+ I40E_TX_DESC_CMD_IL2TAG1 = 0x0008,
+ I40E_TX_DESC_CMD_DUMMY = 0x0010,
+ I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
+ I40E_TX_DESC_CMD_FCOET = 0x0080,
+ I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */
+};
+
+#define I40E_TXD_QW1_OFFSET_SHIFT 16
+#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \
+ I40E_TXD_QW1_OFFSET_SHIFT)
+
+enum i40e_tx_desc_length_fields {
+ /* Note: These are predefined bit offsets */
+ I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */
+};
+
+#define I40E_TXD_QW1_MACLEN_MASK (0x7FUL << I40E_TX_DESC_LENGTH_MACLEN_SHIFT)
+#define I40E_TXD_QW1_IPLEN_MASK (0x7FUL << I40E_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define I40E_TXD_QW1_L4LEN_MASK (0xFUL << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define I40E_TXD_QW1_FCLEN_MASK (0xFUL << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+
+#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34
+#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \
+ I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
+
+#define I40E_TXD_QW1_L2TAG1_SHIFT 48
+#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT)
+
+/* Context descriptors */
+struct i40e_tx_context_desc {
+ __le32 tunneling_params;
+ __le16 l2tag2;
+ __le16 rsvd;
+ __le64 type_cmd_tso_mss;
+};
+
+#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_CTX_QW1_CMD_SHIFT 4
+#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT)
+
+enum i40e_tx_ctx_desc_cmd_bits {
+ I40E_TX_CTX_DESC_TSO = 0x01,
+ I40E_TX_CTX_DESC_TSYN = 0x02,
+ I40E_TX_CTX_DESC_IL2TAG2 = 0x04,
+ I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
+ I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
+ I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
+ I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
+ I40E_TX_CTX_DESC_SWTCH_VSI = 0x30,
+ I40E_TX_CTX_DESC_SWPE = 0x40
+};
+
+#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30
+#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \
+ I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
+
+#define I40E_TXD_CTX_QW1_MSS_SHIFT 50
+#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \
+ I40E_TXD_CTX_QW1_MSS_SHIFT)
+
+#define I40E_TXD_CTX_QW1_VSI_SHIFT 50
+#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0
+#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \
+ I40E_TXD_CTX_QW0_EXT_IP_SHIFT)
+
+enum i40e_tx_ctx_desc_eipt_offload {
+ I40E_TX_CTX_EXT_IP_NONE = 0x0,
+ I40E_TX_CTX_EXT_IP_IPV6 = 0x1,
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
+ I40E_TX_CTX_EXT_IP_IPV4 = 0x3
+};
+
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_NATT_SHIFT 9
+#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \
+ I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+
+#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
+
+#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12
+#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19
+#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
+ I40E_TXD_CTX_QW0_DECTTL_SHIFT)
+
+struct i40e_nop_desc {
+ __le64 rsvd;
+ __le64 dtype_cmd;
+};
+
+#define I40E_TXD_NOP_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_NOP_QW1_DTYPE_MASK (0xFUL << I40E_TXD_NOP_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_NOP_QW1_CMD_SHIFT 4
+#define I40E_TXD_NOP_QW1_CMD_MASK (0x7FUL << I40E_TXD_NOP_QW1_CMD_SHIFT)
+
+enum i40e_tx_nop_desc_cmd_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_TX_NOP_DESC_EOP_SHIFT = 0,
+ I40E_TX_NOP_DESC_RS_SHIFT = 1,
+ I40E_TX_NOP_DESC_RSV_SHIFT = 2 /* 5 bits */
+};
+
+struct i40e_filter_program_desc {
+ __le32 qindex_flex_ptype_vsi;
+ __le32 rsvd;
+ __le32 dtype_cmd_cntindex;
+ __le32 fd_id;
+};
+#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0
+#define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \
+ I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
+#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11
+#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \
+ I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
+#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17
+#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \
+ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
+
+/* Packet Classifier Types for filters */
+enum i40e_filter_pctype {
+ /* Note: Values 0-30 are reserved for future use */
+ I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
+ /* Note: Value 32 is reserved for future use */
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
+ I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
+ I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
+ I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
+ /* Note: Values 37-40 are reserved for future use */
+ I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
+ I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
+ I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
+ I40E_FILTER_PCTYPE_FRAG_IPV6 = 46,
+ /* Note: Value 47 is reserved for future use */
+ I40E_FILTER_PCTYPE_FCOE_OX = 48,
+ I40E_FILTER_PCTYPE_FCOE_RX = 49,
+ I40E_FILTER_PCTYPE_FCOE_OTHER = 50,
+ /* Note: Values 51-62 are reserved for future use */
+ I40E_FILTER_PCTYPE_L2_PAYLOAD = 63,
+};
+
+enum i40e_filter_program_desc_dest {
+ I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0,
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1,
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2,
+};
+
+enum i40e_filter_program_desc_fd_status {
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3,
+};
+
+#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_FLTR_QW1_DTYPE_MASK (0xFUL << I40E_TXD_FLTR_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
+#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT)
+
+enum i40e_filter_program_desc_pcmd {
+ I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1,
+ I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2,
+};
+
+#define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \
+ I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
+ I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
+#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
+
+enum i40e_filter_type {
+ I40E_FLOW_DIRECTOR_FLTR = 0,
+ I40E_PE_QUAD_HASH_FLTR = 1,
+ I40E_ETHERTYPE_FLTR,
+ I40E_FCOE_CTX_FLTR,
+ I40E_MAC_VLAN_FLTR,
+ I40E_HASH_FLTR
+};
+
+struct i40e_vsi_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 vsi_number;
+ u16 vsis_allocated;
+ u16 vsis_unallocated;
+ u16 flags;
+ u8 pf_num;
+ u8 vf_num;
+ u8 connection_type;
+ struct i40e_aqc_vsi_properties_data info;
+};
+
+struct i40e_veb_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 veb_number;
+ u16 vebs_allocated;
+ u16 vebs_unallocated;
+ u16 flags;
+ struct i40e_aqc_get_veb_parameters_completion info;
+};
+
+/* Statistics collected by each port, VSI, VEB, and S-channel */
+struct i40e_eth_stats {
+ u64 rx_bytes; /* gorc */
+ u64 rx_unicast; /* uprc */
+ u64 rx_multicast; /* mprc */
+ u64 rx_broadcast; /* bprc */
+ u64 rx_discards; /* rdpc */
+ u64 rx_unknown_protocol; /* rupp */
+ u64 tx_bytes; /* gotc */
+ u64 tx_unicast; /* uptc */
+ u64 tx_multicast; /* mptc */
+ u64 tx_broadcast; /* bptc */
+ u64 tx_discards; /* tdpc */
+ u64 tx_errors; /* tepc */
+};
+
+/* Statistics collected by the MAC */
+struct i40e_hw_port_stats {
+ /* eth stats collected by the port */
+ struct i40e_eth_stats eth;
+
+ /* additional port specific stats */
+ u64 tx_dropped_link_down; /* tdold */
+ u64 crc_errors; /* crcerrs */
+ u64 illegal_bytes; /* illerrc */
+ u64 error_bytes; /* errbc */
+ u64 mac_local_faults; /* mlfc */
+ u64 mac_remote_faults; /* mrfc */
+ u64 rx_length_errors; /* rlec */
+ u64 link_xon_rx; /* lxonrxc */
+ u64 link_xoff_rx; /* lxoffrxc */
+ u64 priority_xon_rx[8]; /* pxonrxc[8] */
+ u64 priority_xoff_rx[8]; /* pxoffrxc[8] */
+ u64 link_xon_tx; /* lxontxc */
+ u64 link_xoff_tx; /* lxofftxc */
+ u64 priority_xon_tx[8]; /* pxontxc[8] */
+ u64 priority_xoff_tx[8]; /* pxofftxc[8] */
+ u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */
+ u64 rx_size_64; /* prc64 */
+ u64 rx_size_127; /* prc127 */
+ u64 rx_size_255; /* prc255 */
+ u64 rx_size_511; /* prc511 */
+ u64 rx_size_1023; /* prc1023 */
+ u64 rx_size_1522; /* prc1522 */
+ u64 rx_size_big; /* prc9522 */
+ u64 rx_undersize; /* ruc */
+ u64 rx_fragments; /* rfc */
+ u64 rx_oversize; /* roc */
+ u64 rx_jabber; /* rjc */
+ u64 tx_size_64; /* ptc64 */
+ u64 tx_size_127; /* ptc127 */
+ u64 tx_size_255; /* ptc255 */
+ u64 tx_size_511; /* ptc511 */
+ u64 tx_size_1023; /* ptc1023 */
+ u64 tx_size_1522; /* ptc1522 */
+ u64 tx_size_big; /* ptc9522 */
+ u64 mac_short_packet_dropped; /* mspdc */
+ u64 checksum_error; /* xec */
+ /* flow director stats */
+ u64 fd_atr_match;
+ u64 fd_sb_match;
+ /* EEE LPI */
+ u32 tx_lpi_status;
+ u32 rx_lpi_status;
+ u64 tx_lpi_count; /* etlpic */
+ u64 rx_lpi_count; /* erlpic */
+};
+
+/* Checksum and Shadow RAM pointers */
+#define I40E_SR_NVM_CONTROL_WORD 0x00
+#define I40E_SR_PCIE_ANALOG_CONFIG_PTR 0x03
+#define I40E_SR_PHY_ANALOG_CONFIG_PTR 0x04
+#define I40E_SR_OPTION_ROM_PTR 0x05
+#define I40E_SR_RO_PCIR_REGS_AUTO_LOAD_PTR 0x06
+#define I40E_SR_AUTO_GENERATED_POINTERS_PTR 0x07
+#define I40E_SR_PCIR_REGS_AUTO_LOAD_PTR 0x08
+#define I40E_SR_EMP_GLOBAL_MODULE_PTR 0x09
+#define I40E_SR_RO_PCIE_LCB_PTR 0x0A
+#define I40E_SR_EMP_IMAGE_PTR 0x0B
+#define I40E_SR_PE_IMAGE_PTR 0x0C
+#define I40E_SR_CSR_PROTECTED_LIST_PTR 0x0D
+#define I40E_SR_MNG_CONFIG_PTR 0x0E
+#define I40E_SR_EMP_MODULE_PTR 0x0F
+#define I40E_SR_PBA_BLOCK_PTR 0x16
+#define I40E_SR_BOOT_CONFIG_PTR 0x17
+#define I40E_SR_NVM_IMAGE_VERSION 0x18
+#define I40E_SR_NVM_WAKE_ON_LAN 0x19
+#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
+#define I40E_SR_PERMANENT_SAN_MAC_ADDRESS_PTR 0x28
+#define I40E_SR_NVM_EETRACK_LO 0x2D
+#define I40E_SR_NVM_EETRACK_HI 0x2E
+#define I40E_SR_VPD_PTR 0x2F
+#define I40E_SR_PXE_SETUP_PTR 0x30
+#define I40E_SR_PXE_CONFIG_CUST_OPTIONS_PTR 0x31
+#define I40E_SR_SW_ETHERNET_MAC_ADDRESS_PTR 0x37
+#define I40E_SR_POR_REGS_AUTO_LOAD_PTR 0x38
+#define I40E_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A
+#define I40E_SR_GLOBR_REGS_AUTO_LOAD_PTR 0x3B
+#define I40E_SR_CORER_REGS_AUTO_LOAD_PTR 0x3C
+#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
+#define I40E_SR_SW_CHECKSUM_WORD 0x3F
+#define I40E_SR_1ST_FREE_PROVISION_AREA_PTR 0x40
+#define I40E_SR_4TH_FREE_PROVISION_AREA_PTR 0x42
+#define I40E_SR_3RD_FREE_PROVISION_AREA_PTR 0x44
+#define I40E_SR_2ND_FREE_PROVISION_AREA_PTR 0x46
+#define I40E_SR_EMP_SR_SETTINGS_PTR 0x48
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define I40E_SR_VPD_MODULE_MAX_SIZE 1024
+#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
+#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
+#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
+
+/* Shadow RAM related */
+#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800
+#define I40E_SR_BUF_ALIGNMENT 4096
+#define I40E_SR_WORDS_IN_1KB 512
+/* Checksum should be calculated such that after adding all the words,
+ * including the checksum word itself, the sum should be 0xBABA.
+ */
+#define I40E_SR_SW_CHECKSUM_BASE 0xBABA
+
+#define I40E_SRRD_SRCTL_ATTEMPTS 100000
+
+enum i40e_switch_element_types {
+ I40E_SWITCH_ELEMENT_TYPE_MAC = 1,
+ I40E_SWITCH_ELEMENT_TYPE_PF = 2,
+ I40E_SWITCH_ELEMENT_TYPE_VF = 3,
+ I40E_SWITCH_ELEMENT_TYPE_EMP = 4,
+ I40E_SWITCH_ELEMENT_TYPE_BMC = 6,
+ I40E_SWITCH_ELEMENT_TYPE_PE = 16,
+ I40E_SWITCH_ELEMENT_TYPE_VEB = 17,
+ I40E_SWITCH_ELEMENT_TYPE_PA = 18,
+ I40E_SWITCH_ELEMENT_TYPE_VSI = 19,
+};
+
+/* Supported EtherType filters */
+enum i40e_ether_type_index {
+ I40E_ETHER_TYPE_1588 = 0,
+ I40E_ETHER_TYPE_FIP = 1,
+ I40E_ETHER_TYPE_OUI_EXTENDED = 2,
+ I40E_ETHER_TYPE_MAC_CONTROL = 3,
+ I40E_ETHER_TYPE_LLDP = 4,
+ I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5,
+ I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6,
+ I40E_ETHER_TYPE_QCN_CNM = 7,
+ I40E_ETHER_TYPE_8021X = 8,
+ I40E_ETHER_TYPE_ARP = 9,
+ I40E_ETHER_TYPE_RSV1 = 10,
+ I40E_ETHER_TYPE_RSV2 = 11,
+};
+
+/* Filter context base size is 1K */
+#define I40E_HASH_FILTER_BASE_SIZE 1024
+/* Supported Hash filter values */
+enum i40e_hash_filter_size {
+ I40E_HASH_FILTER_SIZE_1K = 0,
+ I40E_HASH_FILTER_SIZE_2K = 1,
+ I40E_HASH_FILTER_SIZE_4K = 2,
+ I40E_HASH_FILTER_SIZE_8K = 3,
+ I40E_HASH_FILTER_SIZE_16K = 4,
+ I40E_HASH_FILTER_SIZE_32K = 5,
+ I40E_HASH_FILTER_SIZE_64K = 6,
+ I40E_HASH_FILTER_SIZE_128K = 7,
+ I40E_HASH_FILTER_SIZE_256K = 8,
+ I40E_HASH_FILTER_SIZE_512K = 9,
+ I40E_HASH_FILTER_SIZE_1M = 10,
+};
+
+/* DMA context base size is 0.5K */
+#define I40E_DMA_CNTX_BASE_SIZE 512
+/* Supported DMA context values */
+enum i40e_dma_cntx_size {
+ I40E_DMA_CNTX_SIZE_512 = 0,
+ I40E_DMA_CNTX_SIZE_1K = 1,
+ I40E_DMA_CNTX_SIZE_2K = 2,
+ I40E_DMA_CNTX_SIZE_4K = 3,
+ I40E_DMA_CNTX_SIZE_8K = 4,
+ I40E_DMA_CNTX_SIZE_16K = 5,
+ I40E_DMA_CNTX_SIZE_32K = 6,
+ I40E_DMA_CNTX_SIZE_64K = 7,
+ I40E_DMA_CNTX_SIZE_128K = 8,
+ I40E_DMA_CNTX_SIZE_256K = 9,
+};
+
+/* Supported Hash look up table (LUT) sizes */
+enum i40e_hash_lut_size {
+ I40E_HASH_LUT_SIZE_128 = 0,
+ I40E_HASH_LUT_SIZE_512 = 1,
+};
+
+/* Structure to hold a per PF filter control settings */
+struct i40e_filter_control_settings {
+ /* number of PE Quad Hash filter buckets */
+ enum i40e_hash_filter_size pe_filt_num;
+ /* number of PE Quad Hash contexts */
+ enum i40e_dma_cntx_size pe_cntx_num;
+ /* number of FCoE filter buckets */
+ enum i40e_hash_filter_size fcoe_filt_num;
+ /* number of FCoE DDP contexts */
+ enum i40e_dma_cntx_size fcoe_cntx_num;
+ /* size of the Hash LUT */
+ enum i40e_hash_lut_size hash_lut_size;
+ /* enable FDIR filters for PF and its VFs */
+ bool enable_fdir;
+ /* enable Ethertype filters for PF and its VFs */
+ bool enable_ethtype;
+ /* enable MAC/VLAN filters for PF and its VFs */
+ bool enable_macvlan;
+};
+
+/* Structure to hold device level control filter counts */
+struct i40e_control_filter_stats {
+ u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */
+ u16 etype_used; /* Used perfect EtherType filters */
+ u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */
+ u16 etype_free; /* Un-used perfect EtherType filters */
+};
+
+enum i40e_reset_type {
+ I40E_RESET_POR = 0,
+ I40E_RESET_CORER = 1,
+ I40E_RESET_GLOBR = 2,
+ I40E_RESET_EMPR = 3,
+};
+
+/* Offsets into Alternate Ram */
+#define I40E_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */
+#define I40E_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */
+#define I40E_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET 0xD /* in dwords */
+#define I40E_ALT_STRUCT_USER_PRIORITY_OFFSET 0xC /* in dwords */
+#define I40E_ALT_STRUCT_MIN_BW_OFFSET 0xE /* in dwords */
+#define I40E_ALT_STRUCT_MAX_BW_OFFSET 0xF /* in dwords */
+
+/* Alternate Ram Bandwidth Masks */
+#define I40E_ALT_BW_VALUE_MASK 0xFF
+#define I40E_ALT_BW_RELATIVE_MASK 0x40000000
+#define I40E_ALT_BW_VALID_MASK 0x80000000
+
+/* RSS Hash Table Size */
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
+#endif /* _I40E_TYPE_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_virtchnl.h b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_virtchnl.h
new file mode 100755
index 00000000..5257b5de
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_i40e/i40e/i40e_virtchnl.h
@@ -0,0 +1,373 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _I40E_VIRTCHNL_H_
+#define _I40E_VIRTCHNL_H_
+
+#include "i40e_type.h"
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the various i40e drivers.
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always i40e_aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * Firmware copies the cookie fields when sending messages between the PF and
+ * VF, but uses all other fields internally. Due to this limitation, we
+ * must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the vsi indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI. Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value is of
+ * i40e_status_code type, defined in the i40e_type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of these
+ * opcodes. The VF driver must first validate the API version of the PF driver,
+ * then request a reset, then get resources, then configure queues and
+ * interrupts. After these operations are complete, the VF driver may start
+ * its queues, optionally add MAC and VLAN filters, and process traffic.
+ */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum i40e_virtchnl_ops {
+/* VF sends req. to pf for the following
+ * ops.
+ */
+ I40E_VIRTCHNL_OP_UNKNOWN = 0,
+ I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+ I40E_VIRTCHNL_OP_RESET_VF,
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE,
+ I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_VIRTCHNL_OP_DEL_VLAN,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ I40E_VIRTCHNL_OP_GET_STATS,
+ I40E_VIRTCHNL_OP_FCOE,
+/* PF sends status change events to vfs using
+ * the following op.
+ */
+ I40E_VIRTCHNL_OP_EVENT,
+};
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct i40e_virtchnl_msg {
+ u8 pad[8]; /* AQ flags/opcode/len/retval fields */
+ enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+ enum i40e_status_code v_retval; /* ditto for desc->retval */
+ u32 vfid; /* used by PF when sending to VF */
+};
+
+/* Message descriptions and data structures.*/
+
+/* I40E_VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define I40E_VIRTCHNL_VERSION_MAJOR 1
+#define I40E_VIRTCHNL_VERSION_MINOR 0
+struct i40e_virtchnl_version_info {
+ u32 major;
+ u32 minor;
+};
+
+/* I40E_VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
+ * VF sends this request to PF with no parameters
+ * PF responds with an indirect message containing
+ * i40e_virtchnl_vf_resource and one or more
+ * i40e_virtchnl_vsi_resource structures.
+ */
+
+struct i40e_virtchnl_vsi_resource {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ enum i40e_vsi_type vsi_type;
+ u16 qset_handle;
+ u8 default_mac_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+};
+/* VF offload flags */
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+
+struct i40e_virtchnl_vf_resource {
+ u16 num_vsis;
+ u16 num_queue_pairs;
+ u16 max_vectors;
+ u16 max_mtu;
+
+ u32 vf_offload_flags;
+ u32 max_fcoe_contexts;
+ u32 max_fcoe_filters;
+
+ struct i40e_virtchnl_vsi_resource vsi_res[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of i40e_virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct i40e_virtchnl_txq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u16 ring_len; /* number of descriptors, multiple of 8 */
+ u16 headwb_enabled;
+ u64 dma_ring_addr;
+ u64 dma_headwb_addr;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of i40e_virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Rx queue config info */
+struct i40e_virtchnl_rxq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u32 ring_len; /* number of descriptors, multiple of 32 */
+ u16 hdr_size;
+ u16 splithdr_enabled;
+ u32 databuffer_size;
+ u32 max_pkt_size;
+ u64 dma_ring_addr;
+ enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for all active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ */
+struct i40e_virtchnl_queue_pair_info {
+ /* NOTE: vsi_id and queue_id should be identical for both queues. */
+ struct i40e_virtchnl_txq_info txq;
+ struct i40e_virtchnl_rxq_info rxq;
+};
+
+struct i40e_virtchnl_vsi_queue_config_info {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ struct i40e_virtchnl_queue_pair_info qpair[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0.
+ * PF configures interrupt mapping and returns status.
+ */
+struct i40e_virtchnl_vector_map {
+ u16 vsi_id;
+ u16 vector_id;
+ u16 rxq_map;
+ u16 txq_map;
+ u16 rxitr_idx;
+ u16 txitr_idx;
+};
+
+struct i40e_virtchnl_irq_map_info {
+ u16 num_vectors;
+ struct i40e_virtchnl_vector_map vecmap[1];
+};
+
+/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
+ * I40E_VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ */
+struct i40e_virtchnl_queue_select {
+ u16 vsi_id;
+ u16 pad;
+ u32 rx_queues;
+ u32 tx_queues;
+};
+
+/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct i40e_virtchnl_ether_addr {
+ u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 pad[2];
+};
+
+struct i40e_virtchnl_ether_addr_list {
+ u16 vsi_id;
+ u16 num_elements;
+ struct i40e_virtchnl_ether_addr list[1];
+};
+
+/* I40E_VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct i40e_virtchnl_vlan_filter_list {
+ u16 vsi_id;
+ u16 num_elements;
+ u16 vlan_id[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct i40e_virtchnl_promisc_info {
+ u16 vsi_id;
+ u16 flags;
+};
+
+#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001
+#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002
+
+/* I40E_VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct i40e_eth_stats in an external buffer.
+ */
+
+/* I40E_VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum i40e_virtchnl_event_codes {
+ I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
+ I40E_VIRTCHNL_EVENT_LINK_CHANGE,
+ I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
+ I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+};
+#define I40E_PF_EVENT_SEVERITY_INFO 0
+#define I40E_PF_EVENT_SEVERITY_ATTENTION 1
+#define I40E_PF_EVENT_SEVERITY_ACTION_REQUIRED 2
+#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255
+
+struct i40e_virtchnl_pf_event {
+ enum i40e_virtchnl_event_codes event;
+ union {
+ struct {
+ enum i40e_aq_link_speed link_speed;
+ bool link_status;
+ } link_event;
+ } event_data;
+
+ int severity;
+};
+
+/* VF reset states - these are written into the RSTAT register:
+ * I40E_VFGEN_RSTAT1 on the PF
+ * I40E_VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum i40e_vfr_states {
+ I40E_VFR_INPROGRESS = 0,
+ I40E_VFR_COMPLETED,
+ I40E_VFR_VFACTIVE,
+ I40E_VFR_UNKNOWN,
+};
+
+#endif /* _I40E_VIRTCHNL_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e_ethdev.c b/src/dpdk_lib18/librte_pmd_i40e/i40e_ethdev.c
new file mode 100755
index 00000000..9c0db84c
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_i40e/i40e_ethdev.c
@@ -0,0 +1,5456 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+
+#include <rte_string_fns.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_alarm.h>
+#include <rte_dev.h>
+#include <rte_eth_ctrl.h>
+
+#include "i40e_logs.h"
+#include "i40e/i40e_prototype.h"
+#include "i40e/i40e_adminq_cmd.h"
+#include "i40e/i40e_type.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+#include "i40e_pf.h"
+
+/* Maximun number of MAC addresses */
+#define I40E_NUM_MACADDR_MAX 64
+#define I40E_CLEAR_PXE_WAIT_MS 200
+
+/* Maximun number of capability elements */
+#define I40E_MAX_CAP_ELE_NUM 128
+
+/* Wait count and inteval */
+#define I40E_CHK_Q_ENA_COUNT 1000
+#define I40E_CHK_Q_ENA_INTERVAL_US 1000
+
+/* Maximun number of VSI */
+#define I40E_MAX_NUM_VSIS (384UL)
+
+/* Default queue interrupt throttling time in microseconds*/
+#define I40E_ITR_INDEX_DEFAULT 0
+#define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
+#define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
+
+#define I40E_PRE_TX_Q_CFG_WAIT_US 10 /* 10 us */
+
+/* Mask of PF interrupt causes */
+#define I40E_PFINT_ICR0_ENA_MASK ( \
+ I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
+ I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
+ I40E_PFINT_ICR0_ENA_GRST_MASK | \
+ I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
+ I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
+ I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK | \
+ I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
+ I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
+ I40E_PFINT_ICR0_ENA_VFLR_MASK | \
+ I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
+
+static int eth_i40e_dev_init(\
+ __attribute__((unused)) struct eth_driver *eth_drv,
+ struct rte_eth_dev *eth_dev);
+static int i40e_dev_configure(struct rte_eth_dev *dev);
+static int i40e_dev_start(struct rte_eth_dev *dev);
+static void i40e_dev_stop(struct rte_eth_dev *dev);
+static void i40e_dev_close(struct rte_eth_dev *dev);
+static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
+static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
+static void i40e_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
+static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
+ uint16_t queue_id,
+ uint8_t stat_idx,
+ uint8_t is_rx);
+static void i40e_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vlan_id,
+ int on);
+static void i40e_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid);
+static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
+ uint16_t queue,
+ int on);
+static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
+static int i40e_dev_led_on(struct rte_eth_dev *dev);
+static int i40e_dev_led_off(struct rte_eth_dev *dev);
+static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_pfc_conf *pfc_conf);
+static void i40e_macaddr_add(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index,
+ uint32_t pool);
+static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
+static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+
+static int i40e_get_cap(struct i40e_hw *hw);
+static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
+static int i40e_pf_setup(struct i40e_pf *pf);
+static int i40e_dev_rxtx_init(struct i40e_pf *pf);
+static int i40e_vmdq_setup(struct rte_eth_dev *dev);
+static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
+ bool offset_loaded, uint64_t *offset, uint64_t *stat);
+static void i40e_stat_update_48(struct i40e_hw *hw,
+ uint32_t hireg,
+ uint32_t loreg,
+ bool offset_loaded,
+ uint64_t *offset,
+ uint64_t *stat);
+static void i40e_pf_config_irq0(struct i40e_hw *hw);
+static void i40e_dev_interrupt_handler(
+ __rte_unused struct rte_intr_handle *handle, void *param);
+static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
+ uint32_t base, uint32_t num);
+static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
+static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
+ uint32_t base);
+static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
+ uint16_t num);
+static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
+static int i40e_veb_release(struct i40e_veb *veb);
+static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
+ struct i40e_vsi *vsi);
+static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
+static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
+static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
+ struct i40e_macvlan_filter *mv_f,
+ int num,
+ struct ether_addr *addr);
+static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
+ struct i40e_macvlan_filter *mv_f,
+ int num,
+ uint16_t vlan);
+static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
+static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+static int i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel);
+static int i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel);
+static int i40e_ethertype_filter_set(struct i40e_pf *pf,
+ struct rte_eth_ethertype_filter *filter,
+ bool add);
+static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg);
+static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg);
+static void i40e_configure_registers(struct i40e_hw *hw);
+
+/* Default hash key buffer for RSS */
+static uint32_t rss_key_default[I40E_PFQF_HKEY_MAX_INDEX + 1];
+
+static struct rte_pci_id pci_id_i40e_map[] = {
+#define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
+#include "rte_pci_dev_ids.h"
+{ .vendor_id = 0, /* sentinel */ },
+};
+
+static struct eth_dev_ops i40e_eth_dev_ops = {
+ .dev_configure = i40e_dev_configure,
+ .dev_start = i40e_dev_start,
+ .dev_stop = i40e_dev_stop,
+ .dev_close = i40e_dev_close,
+ .promiscuous_enable = i40e_dev_promiscuous_enable,
+ .promiscuous_disable = i40e_dev_promiscuous_disable,
+ .allmulticast_enable = i40e_dev_allmulticast_enable,
+ .allmulticast_disable = i40e_dev_allmulticast_disable,
+ .dev_set_link_up = i40e_dev_set_link_up,
+ .dev_set_link_down = i40e_dev_set_link_down,
+ .link_update = i40e_dev_link_update,
+ .stats_get = i40e_dev_stats_get,
+ .stats_reset = i40e_dev_stats_reset,
+ .queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set,
+ .dev_infos_get = i40e_dev_info_get,
+ .vlan_filter_set = i40e_vlan_filter_set,
+ .vlan_tpid_set = i40e_vlan_tpid_set,
+ .vlan_offload_set = i40e_vlan_offload_set,
+ .vlan_strip_queue_set = i40e_vlan_strip_queue_set,
+ .vlan_pvid_set = i40e_vlan_pvid_set,
+ .rx_queue_start = i40e_dev_rx_queue_start,
+ .rx_queue_stop = i40e_dev_rx_queue_stop,
+ .tx_queue_start = i40e_dev_tx_queue_start,
+ .tx_queue_stop = i40e_dev_tx_queue_stop,
+ .rx_queue_setup = i40e_dev_rx_queue_setup,
+ .rx_queue_release = i40e_dev_rx_queue_release,
+ .rx_queue_count = i40e_dev_rx_queue_count,
+ .rx_descriptor_done = i40e_dev_rx_descriptor_done,
+ .tx_queue_setup = i40e_dev_tx_queue_setup,
+ .tx_queue_release = i40e_dev_tx_queue_release,
+ .dev_led_on = i40e_dev_led_on,
+ .dev_led_off = i40e_dev_led_off,
+ .flow_ctrl_set = i40e_flow_ctrl_set,
+ .priority_flow_ctrl_set = i40e_priority_flow_ctrl_set,
+ .mac_addr_add = i40e_macaddr_add,
+ .mac_addr_remove = i40e_macaddr_remove,
+ .reta_update = i40e_dev_rss_reta_update,
+ .reta_query = i40e_dev_rss_reta_query,
+ .rss_hash_update = i40e_dev_rss_hash_update,
+ .rss_hash_conf_get = i40e_dev_rss_hash_conf_get,
+ .udp_tunnel_add = i40e_dev_udp_tunnel_add,
+ .udp_tunnel_del = i40e_dev_udp_tunnel_del,
+ .filter_ctrl = i40e_dev_filter_ctrl,
+};
+
+static struct eth_driver rte_i40e_pmd = {
+ {
+ .name = "rte_i40e_pmd",
+ .id_table = pci_id_i40e_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ },
+ .eth_dev_init = eth_i40e_dev_init,
+ .dev_private_size = sizeof(struct i40e_adapter),
+};
+
+static inline int
+i40e_align_floor(int n)
+{
+ if (n == 0)
+ return 0;
+ return (1 << (sizeof(n) * CHAR_BIT - 1 - __builtin_clz(n)));
+}
+
+static inline int
+rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = link;
+ struct rte_eth_link *src = &(dev->data->dev_link);
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+static inline int
+rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = &(dev->data->dev_link);
+ struct rte_eth_link *src = link;
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Driver initialization routine.
+ * Invoked once at EAL init time.
+ * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
+ */
+static int
+rte_i40e_pmd_init(const char *name __rte_unused,
+ const char *params __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+ rte_eth_driver_register(&rte_i40e_pmd);
+
+ return 0;
+}
+
+static struct rte_driver rte_i40e_driver = {
+ .type = PMD_PDEV,
+ .init = rte_i40e_pmd_init,
+};
+
+PMD_REGISTER_DRIVER(rte_i40e_driver);
+
+/*
+ * Initialize registers for flexible payload, which should be set by NVM.
+ * This should be removed from code once it is fixed in NVM.
+ */
+#ifndef I40E_GLQF_ORT
+#define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4))
+#endif
+#ifndef I40E_GLQF_PIT
+#define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4))
+#endif
+
+static inline void i40e_flex_payload_reg_init(struct i40e_hw *hw)
+{
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(18), 0x00000030);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(19), 0x00000030);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(26), 0x0000002B);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(30), 0x0000002B);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(20), 0x00000031);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(23), 0x00000031);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(63), 0x0000002D);
+
+ /* GLQF_PIT Registers */
+ I40E_WRITE_REG(hw, I40E_GLQF_PIT(16), 0x00007480);
+ I40E_WRITE_REG(hw, I40E_GLQF_PIT(17), 0x00007440);
+}
+
+#define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32))
+#define I40E_GLQF_FD_MSK(_i, _j) (0x00267200 + ((_i) * 4 + (_j) * 8))
+
+static inline void i40e_fillter_fields_reg_init(struct i40e_hw *hw)
+{
+ uint32_t reg;
+
+
+ reg = I40E_READ_REG(hw,I40E_GLQF_ORT(12));
+ //printf("GLQF_ORT(12) = 0x%08x\n", reg);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(12), 0x00000062);
+
+ reg = I40E_READ_REG(hw,I40E_GLQF_PIT(2));
+ //printf("I40E_GLQF_PIT(2) = 0x%08x\n", reg);
+ I40E_WRITE_REG(hw, I40E_GLQF_PIT(2), 0x000024A0);
+
+ reg = I40E_READ_REG(hw,I40E_PRTQF_FD_INSET(31, 0));
+ //printf("I40E_PRTQF_FD_INSET(31, 0) = 0x%08x\n", reg);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(31, 0), 0);
+ reg = I40E_READ_REG(hw,I40E_PRTQF_FD_INSET(31, 1));
+ //printf("I40E_PRTQF_FD_INSET(31, 1) = 0x%08x\n", reg);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(31, 1), 0x00040000);
+
+ reg = I40E_READ_REG(hw,I40E_PRTQF_FD_INSET(33, 0));
+ //printf("I40E_PRTQF_FD_INSET(33, 0) = 0x%08x\n", reg);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(33, 0), 0);
+ reg = I40E_READ_REG(hw,I40E_PRTQF_FD_INSET(33, 1));
+ //printf("I40E_PRTQF_FD_INSET(33, 1) = 0x%08x\n", reg);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(33, 1), 0x00040000);
+
+ reg = I40E_READ_REG(hw,I40E_PRTQF_FD_INSET(41, 0));
+ //printf("I40E_PRTQF_FD_INSET(41, 0) = 0x%08x\n", reg);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(41, 0), 0);
+ reg = I40E_READ_REG(hw,I40E_PRTQF_FD_INSET(41, 1));
+ //printf("I40E_PRTQF_FD_INSET(41, 1) = 0x%08x\n", reg);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(41, 1), 0x00080000);
+
+ reg = I40E_READ_REG(hw,I40E_PRTQF_FD_INSET(43, 0));
+ //printf("I40E_PRTQF_FD_INSET(43, 0) = 0x%08x\n", reg);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(43, 0), 0);
+ reg = I40E_READ_REG(hw,I40E_PRTQF_FD_INSET(43, 1));
+ //printf("I40E_PRTQF_FD_INSET(43, 1) = 0x%08x\n", reg);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(43, 1), 0x00080000);
+
+ reg = I40E_READ_REG(hw,I40E_PRTQF_FD_INSET(34, 0));
+ //printf("I40E_PRTQF_FD_INSET(34, 0) = 0x%08x\n", reg);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(34, 0), 0);
+ reg = I40E_READ_REG(hw,I40E_PRTQF_FD_INSET(34, 1));
+ //printf("I40E_PRTQF_FD_INSET(34, 1) = 0x%08x\n", reg);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(34, 1), 0x00040000);
+
+ reg = I40E_READ_REG(hw,I40E_PRTQF_FD_INSET(44, 0));
+ //printf("I40E_PRTQF_FD_INSET(44, 0) = 0x%08x\n", reg);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(44, 0), 0);
+ reg = I40E_READ_REG(hw,I40E_PRTQF_FD_INSET(44, 1));
+ //printf("I40E_PRTQF_FD_INSET(44, 1) = 0x%08x\n", reg);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(44, 1), 0x00080000);
+
+ reg = I40E_READ_REG(hw,I40E_GLQF_FD_MSK(0, 34));
+ //printf("I40E_GLQF_FD_MSK(0, 34) = 0x%08x\n", reg);
+ I40E_WRITE_REG(hw, I40E_GLQF_FD_MSK(0, 34), 0x000DFF00);
+ reg = I40E_READ_REG(hw,I40E_GLQF_FD_MSK(0, 44));
+ //printf("I40E_GLQF_FD_MSK(0, 44) = 0x%08x\n", reg);
+ I40E_WRITE_REG(hw, I40E_GLQF_FD_MSK(0,44), 0x000C00FF);
+
+ I40E_WRITE_FLUSH(hw);
+}
+
+
+
+static int
+eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
+ struct rte_eth_dev *dev)
+{
+ struct rte_pci_device *pci_dev;
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vsi *vsi;
+ int ret;
+ uint32_t len;
+ uint8_t aq_fail = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ dev->dev_ops = &i40e_eth_dev_ops;
+ dev->rx_pkt_burst = i40e_recv_pkts;
+ dev->tx_pkt_burst = i40e_xmit_pkts;
+
+ /* for secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX function */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+ if (dev->data->scattered_rx)
+ dev->rx_pkt_burst = i40e_recv_scattered_pkts;
+ return 0;
+ }
+ pci_dev = dev->pci_dev;
+ pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ pf->adapter->eth_dev = dev;
+ pf->dev_data = dev->data;
+
+ hw->back = I40E_PF_TO_ADAPTER(pf);
+ hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
+ if (!hw->hw_addr) {
+ PMD_INIT_LOG(ERR, "Hardware is not available, "
+ "as address is NULL");
+ return -ENODEV;
+ }
+
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->device_id = pci_dev->id.device_id;
+ hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
+ hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
+ hw->bus.device = pci_dev->addr.devid;
+ hw->bus.func = pci_dev->addr.function;
+
+ /* Make sure all is clean before doing PF reset */
+ i40e_clear_hw(hw);
+
+ /* Reset here to make sure all is clean for each PF */
+ ret = i40e_pf_reset(hw);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
+ return ret;
+ }
+
+ /* Initialize the shared code (base driver) */
+ ret = i40e_init_shared_code(hw);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
+ return ret;
+ }
+
+ /*
+ * To work around the NVM issue,initialize registers
+ * for flexible payload by software.
+ * It should be removed once issues are fixed in NVM.
+ */
+ //i40e_flex_payload_reg_init(hw);
+ i40e_fillter_fields_reg_init(hw);
+
+ /* Initialize the parameters for adminq */
+ i40e_init_adminq_parameter(hw);
+ ret = i40e_init_adminq(hw);
+ if (ret != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
+ return -EIO;
+ }
+ PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
+ hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
+ hw->aq.api_maj_ver, hw->aq.api_min_ver,
+ ((hw->nvm.version >> 12) & 0xf),
+ ((hw->nvm.version >> 4) & 0xff),
+ (hw->nvm.version & 0xf), hw->nvm.eetrack);
+
+ /* Disable LLDP */
+ ret = i40e_aq_stop_lldp(hw, true, NULL);
+ if (ret != I40E_SUCCESS) /* Its failure can be ignored */
+ PMD_INIT_LOG(INFO, "Failed to stop lldp");
+
+ /* Clear PXE mode */
+ i40e_clear_pxe_mode(hw);
+
+ /*
+ * On X710, performance number is far from the expectation on recent
+ * firmware versions. The fix for this issue may not be integrated in
+ * the following firmware version. So the workaround in software driver
+ * is needed. It needs to modify the initial values of 3 internal only
+ * registers. Note that the workaround can be removed when it is fixed
+ * in firmware in the future.
+ */
+ i40e_configure_registers(hw);
+
+ /* Get hw capabilities */
+ ret = i40e_get_cap(hw);
+ if (ret != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
+ goto err_get_capabilities;
+ }
+
+ /* Initialize parameters for PF */
+ ret = i40e_pf_parameter_init(dev);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
+ goto err_parameter_init;
+ }
+
+ /* Initialize the queue management */
+ ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR, "Failed to init queue pool");
+ goto err_qp_pool_init;
+ }
+ ret = i40e_res_pool_init(&pf->msix_pool, 1,
+ hw->func_caps.num_msix_vectors - 1);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
+ goto err_msix_pool_init;
+ }
+
+ /* Initialize lan hmc */
+ ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
+ hw->func_caps.num_rx_qp, 0, 0);
+ if (ret != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
+ goto err_init_lan_hmc;
+ }
+
+ /* Configure lan hmc */
+ ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
+ if (ret != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
+ goto err_configure_lan_hmc;
+ }
+
+ /* Get and check the mac address */
+ i40e_get_mac_addr(hw, hw->mac.addr);
+ if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR, "mac address is not valid");
+ ret = -EIO;
+ goto err_get_mac_addr;
+ }
+ /* Copy the permanent MAC address */
+ ether_addr_copy((struct ether_addr *) hw->mac.addr,
+ (struct ether_addr *) hw->mac.perm_addr);
+
+ /* Disable flow control */
+ hw->fc.requested_mode = I40E_FC_NONE;
+ i40e_set_fc(hw, &aq_fail, TRUE);
+
+ /* PF setup, which includes VSI setup */
+ ret = i40e_pf_setup(pf);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
+ goto err_setup_pf_switch;
+ }
+
+ vsi = pf->main_vsi;
+
+ /* Disable double vlan by default */
+ i40e_vsi_config_double_vlan(vsi, FALSE);
+
+ if (!vsi->max_macaddrs)
+ len = ETHER_ADDR_LEN;
+ else
+ len = ETHER_ADDR_LEN * vsi->max_macaddrs;
+
+ /* Should be after VSI initialized */
+ dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
+ if (!dev->data->mac_addrs) {
+ PMD_INIT_LOG(ERR, "Failed to allocated memory "
+ "for storing mac address");
+ goto err_mac_alloc;
+ }
+ ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
+ &dev->data->mac_addrs[0]);
+
+ /* initialize pf host driver to setup SRIOV resource if applicable */
+ i40e_pf_host_init(dev);
+
+ /* register callback func to eal lib */
+ rte_intr_callback_register(&(pci_dev->intr_handle),
+ i40e_dev_interrupt_handler, (void *)dev);
+
+ /* configure and enable device interrupt */
+ i40e_pf_config_irq0(hw);
+ i40e_pf_enable_irq0(hw);
+
+ /* enable uio intr after callback register */
+ rte_intr_enable(&(pci_dev->intr_handle));
+
+ return 0;
+
+err_mac_alloc:
+ i40e_vsi_release(pf->main_vsi);
+err_setup_pf_switch:
+err_get_mac_addr:
+err_configure_lan_hmc:
+ (void)i40e_shutdown_lan_hmc(hw);
+err_init_lan_hmc:
+ i40e_res_pool_destroy(&pf->msix_pool);
+err_msix_pool_init:
+ i40e_res_pool_destroy(&pf->qp_pool);
+err_qp_pool_init:
+err_parameter_init:
+err_get_capabilities:
+ (void)i40e_shutdown_adminq(hw);
+
+ return ret;
+}
+
+static int
+i40e_dev_configure(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
+ int ret;
+
+ if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
+ ret = i40e_fdir_setup(pf);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to setup flow director.");
+ return -ENOTSUP;
+ }
+ ret = i40e_fdir_configure(dev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "failed to configure fdir.");
+ goto err;
+ }
+ } else
+ i40e_fdir_teardown(pf);
+
+ ret = i40e_dev_init_vlan(dev);
+ if (ret < 0)
+ goto err;
+
+ /* VMDQ setup.
+ * Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
+ * RSS setting have different requirements.
+ * General PMD driver call sequence are NIC init, configure,
+ * rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
+ * will try to lookup the VSI that specific queue belongs to if VMDQ
+ * applicable. So, VMDQ setting has to be done before
+ * rx/tx_queue_setup(). This function is good to place vmdq_setup.
+ * For RSS setting, it will try to calculate actual configured RX queue
+ * number, which will be available after rx_queue_setup(). dev_start()
+ * function is good to place RSS setup.
+ */
+ if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
+ ret = i40e_vmdq_setup(dev);
+ if (ret)
+ goto err;
+ }
+ return 0;
+err:
+ i40e_fdir_teardown(pf);
+ return ret;
+}
+
+void
+i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
+{
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint16_t msix_vect = vsi->msix_intr;
+ uint16_t i;
+
+ for (i = 0; i < vsi->nb_qps; i++) {
+ I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
+ I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
+ rte_wmb();
+ }
+
+ if (vsi->type != I40E_VSI_SRIOV) {
+ I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1), 0);
+ I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
+ msix_vect - 1), 0);
+ } else {
+ uint32_t reg;
+ reg = (hw->func_caps.num_msix_vectors_vf - 1) *
+ vsi->user_param + (msix_vect - 1);
+
+ I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), 0);
+ }
+ I40E_WRITE_FLUSH(hw);
+}
+
+static inline uint16_t
+i40e_calc_itr_interval(int16_t interval)
+{
+ if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX)
+ interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
+
+ /* Convert to hardware count, as writing each 1 represents 2 us */
+ return (interval/2);
+}
+
+void
+i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
+{
+ uint32_t val;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint16_t msix_vect = vsi->msix_intr;
+ int i;
+
+ for (i = 0; i < vsi->nb_qps; i++)
+ I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
+
+ /* Bind all RX queues to allocated MSIX interrupt */
+ for (i = 0; i < vsi->nb_qps; i++) {
+ val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
+ I40E_QINT_RQCTL_ITR_INDX_MASK |
+ ((vsi->base_queue + i + 1) <<
+ I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
+ I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+
+ if (i == vsi->nb_qps - 1)
+ val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
+ I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), val);
+ }
+
+ /* Write first RX queue to Link list register as the head element */
+ if (vsi->type != I40E_VSI_SRIOV) {
+ uint16_t interval =
+ i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
+
+ I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
+ (vsi->base_queue <<
+ I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
+ (0x0 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
+
+ I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
+ msix_vect - 1), interval);
+
+#ifndef I40E_GLINT_CTL
+#define I40E_GLINT_CTL 0x0003F800
+#define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK 0x4
+#endif
+ /* Disable auto-mask on enabling of all none-zero interrupt */
+ I40E_WRITE_REG(hw, I40E_GLINT_CTL,
+ I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK);
+ } else {
+ uint32_t reg;
+
+ /* num_msix_vectors_vf needs to minus irq0 */
+ reg = (hw->func_caps.num_msix_vectors_vf - 1) *
+ vsi->user_param + (msix_vect - 1);
+
+ I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), (vsi->base_queue <<
+ I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
+ (0x0 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
+ }
+
+ I40E_WRITE_FLUSH(hw);
+}
+
+static void
+i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
+{
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint16_t interval = i40e_calc_itr_interval(\
+ RTE_LIBRTE_I40E_ITR_INTERVAL);
+
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1),
+ I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+ (interval << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+}
+
+static void
+i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
+{
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1), 0);
+}
+
+static inline uint8_t
+i40e_parse_link_speed(uint16_t eth_link_speed)
+{
+ uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
+
+ switch (eth_link_speed) {
+ case ETH_LINK_SPEED_40G:
+ link_speed = I40E_LINK_SPEED_40GB;
+ break;
+ case ETH_LINK_SPEED_20G:
+ link_speed = I40E_LINK_SPEED_20GB;
+ break;
+ case ETH_LINK_SPEED_10G:
+ link_speed = I40E_LINK_SPEED_10GB;
+ break;
+ case ETH_LINK_SPEED_1000:
+ link_speed = I40E_LINK_SPEED_1GB;
+ break;
+ case ETH_LINK_SPEED_100:
+ link_speed = I40E_LINK_SPEED_100MB;
+ break;
+ }
+
+ return link_speed;
+}
+
+static int
+i40e_phy_conf_link(struct i40e_hw *hw, uint8_t abilities, uint8_t force_speed)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_get_phy_abilities_resp phy_ab;
+ struct i40e_aq_set_phy_config phy_conf;
+ const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
+ I40E_AQ_PHY_FLAG_PAUSE_RX |
+ I40E_AQ_PHY_FLAG_LOW_POWER;
+ const uint8_t advt = I40E_LINK_SPEED_40GB |
+ I40E_LINK_SPEED_10GB |
+ I40E_LINK_SPEED_1GB |
+ I40E_LINK_SPEED_100MB;
+ int ret = -ENOTSUP;
+
+ status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
+ NULL);
+ if (status)
+ return ret;
+
+ memset(&phy_conf, 0, sizeof(phy_conf));
+
+ /* bits 0-2 use the values from get_phy_abilities_resp */
+ abilities &= ~mask;
+ abilities |= phy_ab.abilities & mask;
+
+ /* update ablities and speed */
+ if (abilities & I40E_AQ_PHY_AN_ENABLED)
+ phy_conf.link_speed = advt;
+ else
+ phy_conf.link_speed = force_speed;
+
+ phy_conf.abilities = abilities;
+
+ /* use get_phy_abilities_resp value for the rest */
+ phy_conf.phy_type = phy_ab.phy_type;
+ phy_conf.eee_capability = phy_ab.eee_capability;
+ phy_conf.eeer = phy_ab.eeer_val;
+ phy_conf.low_power_ctrl = phy_ab.d3_lpan;
+
+ PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
+ phy_ab.abilities, phy_ab.link_speed);
+ PMD_DRV_LOG(DEBUG, "\tConfig: abilities %x, link_speed %x",
+ phy_conf.abilities, phy_conf.link_speed);
+
+ status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
+ if (status)
+ return ret;
+
+ return I40E_SUCCESS;
+}
+
+static int
+i40e_apply_link_speed(struct rte_eth_dev *dev)
+{
+ uint8_t speed;
+ uint8_t abilities = 0;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_conf *conf = &dev->data->dev_conf;
+
+ speed = i40e_parse_link_speed(conf->link_speed);
+ abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ if (conf->link_speed == ETH_LINK_SPEED_AUTONEG)
+ abilities |= I40E_AQ_PHY_AN_ENABLED;
+ else
+ abilities |= I40E_AQ_PHY_LINK_ENABLED;
+
+ return i40e_phy_conf_link(hw, abilities, speed);
+}
+
+static int
+i40e_dev_start(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vsi *main_vsi = pf->main_vsi;
+ int ret, i;
+
+ if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
+ (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
+ PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
+ dev->data->dev_conf.link_duplex,
+ dev->data->port_id);
+ return -EINVAL;
+ }
+
+ /* Initialize VSI */
+ ret = i40e_dev_rxtx_init(pf);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
+ goto err_up;
+ }
+
+ /* Map queues with MSIX interrupt */
+ i40e_vsi_queues_bind_intr(main_vsi);
+ i40e_vsi_enable_queues_intr(main_vsi);
+
+ /* Map VMDQ VSI queues with MSIX interrupt */
+ for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
+ i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi);
+ i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
+ }
+
+ /* enable FDIR MSIX interrupt */
+ if (pf->fdir.fdir_vsi) {
+ i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
+ i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
+ }
+
+ /* Enable all queues which have been configured */
+ ret = i40e_dev_switch_queues(pf, TRUE);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to enable VSI");
+ goto err_up;
+ }
+
+ /* Enable receiving broadcast packets */
+ ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
+
+ for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
+ ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
+ true, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
+ }
+
+ /* Apply link configure */
+ ret = i40e_apply_link_speed(dev);
+ if (I40E_SUCCESS != ret) {
+ PMD_DRV_LOG(ERR, "Fail to apply link setting");
+ goto err_up;
+ }
+
+ return I40E_SUCCESS;
+
+err_up:
+ i40e_dev_switch_queues(pf, FALSE);
+ i40e_dev_clear_queues(dev);
+
+ return ret;
+}
+
+static void
+i40e_dev_stop(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_vsi *main_vsi = pf->main_vsi;
+ int i;
+
+ /* Disable all queues */
+ i40e_dev_switch_queues(pf, FALSE);
+
+ /* un-map queues with interrupt registers */
+ i40e_vsi_disable_queues_intr(main_vsi);
+ i40e_vsi_queues_unbind_intr(main_vsi);
+
+ for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
+ i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
+ i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
+ }
+
+ if (pf->fdir.fdir_vsi) {
+ i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
+ i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
+ }
+ /* Clear all queues and release memory */
+ i40e_dev_clear_queues(dev);
+
+ /* Set link down */
+ i40e_dev_set_link_down(dev);
+
+}
+
+static void
+i40e_dev_close(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+
+ PMD_INIT_FUNC_TRACE();
+
+ i40e_dev_stop(dev);
+
+ /* Disable interrupt */
+ i40e_pf_disable_irq0(hw);
+ rte_intr_disable(&(dev->pci_dev->intr_handle));
+
+ /* shutdown and destroy the HMC */
+ i40e_shutdown_lan_hmc(hw);
+
+ /* release all the existing VSIs and VEBs */
+ i40e_fdir_teardown(pf);
+ i40e_vsi_release(pf->main_vsi);
+
+ /* shutdown the adminq */
+ i40e_aq_queue_shutdown(hw, true);
+ i40e_shutdown_adminq(hw);
+
+ i40e_res_pool_destroy(&pf->qp_pool);
+ i40e_res_pool_destroy(&pf->msix_pool);
+
+ /* force a PF reset to clean anything leftover */
+ reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
+ I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
+ (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
+ I40E_WRITE_FLUSH(hw);
+}
+
+static void
+i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ int status;
+
+ status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
+ true, NULL);
+ if (status != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
+
+ status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
+ TRUE, NULL);
+ if (status != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
+
+}
+
+static void
+i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ int status;
+
+ status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
+ false, NULL);
+ if (status != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
+
+ status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
+ false, NULL);
+ if (status != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
+}
+
+static void
+i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ int ret;
+
+ ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
+}
+
+static void
+i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ int ret;
+
+ if (dev->data->promiscuous == 1)
+ return; /* must remain in all_multicast mode */
+
+ ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
+ vsi->seid, FALSE, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
+}
+
+/*
+ * Set device link up.
+ */
+static int
+i40e_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ /* re-apply link speed setting */
+ return i40e_apply_link_speed(dev);
+}
+
+/*
+ * Set device link down.
+ */
+static int
+i40e_dev_set_link_down(__rte_unused struct rte_eth_dev *dev)
+{
+ uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
+ uint8_t abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ return i40e_phy_conf_link(hw, abilities, speed);
+}
+
+int
+i40e_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_link_status link_status;
+ struct rte_eth_link link, old;
+ int status;
+
+ memset(&link, 0, sizeof(link));
+ memset(&old, 0, sizeof(old));
+ memset(&link_status, 0, sizeof(link_status));
+ rte_i40e_dev_atomic_read_link_status(dev, &old);
+
+ /* Get link status information from hardware */
+ status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
+ if (status != I40E_SUCCESS) {
+ link.link_speed = ETH_LINK_SPEED_100;
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ PMD_DRV_LOG(ERR, "Failed to get link info");
+ goto out;
+ }
+
+ link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
+
+ if (!link.link_status)
+ goto out;
+
+ /* i40e uses full duplex only */
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+
+ /* Parse the link status */
+ switch (link_status.link_speed) {
+ case I40E_LINK_SPEED_100MB:
+ link.link_speed = ETH_LINK_SPEED_100;
+ break;
+ case I40E_LINK_SPEED_1GB:
+ link.link_speed = ETH_LINK_SPEED_1000;
+ break;
+ case I40E_LINK_SPEED_10GB:
+ link.link_speed = ETH_LINK_SPEED_10G;
+ break;
+ case I40E_LINK_SPEED_20GB:
+ link.link_speed = ETH_LINK_SPEED_20G;
+ break;
+ case I40E_LINK_SPEED_40GB:
+ link.link_speed = ETH_LINK_SPEED_40G;
+ break;
+ default:
+ link.link_speed = ETH_LINK_SPEED_100;
+ break;
+ }
+
+out:
+ rte_i40e_dev_atomic_write_link_status(dev, &link);
+ if (link.link_status == old.link_status)
+ return -1;
+
+ return 0;
+}
+
+/* Get all the statistics of a VSI */
+void
+i40e_update_vsi_stats(struct i40e_vsi *vsi)
+{
+ struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
+ struct i40e_eth_stats *nes = &vsi->eth_stats;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
+
+ i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
+ vsi->offset_loaded, &oes->rx_bytes,
+ &nes->rx_bytes);
+ i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
+ vsi->offset_loaded, &oes->rx_unicast,
+ &nes->rx_unicast);
+ i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
+ vsi->offset_loaded, &oes->rx_multicast,
+ &nes->rx_multicast);
+ i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
+ vsi->offset_loaded, &oes->rx_broadcast,
+ &nes->rx_broadcast);
+ i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
+ &oes->rx_discards, &nes->rx_discards);
+ /* GLV_REPC not supported */
+ /* GLV_RMPC not supported */
+ i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
+ &oes->rx_unknown_protocol,
+ &nes->rx_unknown_protocol);
+ i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
+ vsi->offset_loaded, &oes->tx_bytes,
+ &nes->tx_bytes);
+ i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
+ vsi->offset_loaded, &oes->tx_unicast,
+ &nes->tx_unicast);
+ i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
+ vsi->offset_loaded, &oes->tx_multicast,
+ &nes->tx_multicast);
+ i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
+ vsi->offset_loaded, &oes->tx_broadcast,
+ &nes->tx_broadcast);
+ /* GLV_TDPC not supported */
+ i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
+ &oes->tx_errors, &nes->tx_errors);
+ vsi->offset_loaded = true;
+
+ PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
+ vsi->vsi_id);
+ PMD_DRV_LOG(DEBUG, "rx_bytes: %lu", nes->rx_bytes);
+ PMD_DRV_LOG(DEBUG, "rx_unicast: %lu", nes->rx_unicast);
+ PMD_DRV_LOG(DEBUG, "rx_multicast: %lu", nes->rx_multicast);
+ PMD_DRV_LOG(DEBUG, "rx_broadcast: %lu", nes->rx_broadcast);
+ PMD_DRV_LOG(DEBUG, "rx_discards: %lu", nes->rx_discards);
+ PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %lu",
+ nes->rx_unknown_protocol);
+ PMD_DRV_LOG(DEBUG, "tx_bytes: %lu", nes->tx_bytes);
+ PMD_DRV_LOG(DEBUG, "tx_unicast: %lu", nes->tx_unicast);
+ PMD_DRV_LOG(DEBUG, "tx_multicast: %lu", nes->tx_multicast);
+ PMD_DRV_LOG(DEBUG, "tx_broadcast: %lu", nes->tx_broadcast);
+ PMD_DRV_LOG(DEBUG, "tx_discards: %lu", nes->tx_discards);
+ PMD_DRV_LOG(DEBUG, "tx_errors: %lu", nes->tx_errors);
+ PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
+ vsi->vsi_id);
+}
+
+/* Get all statistics of a port */
+static void
+i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ uint32_t i;
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
+ struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
+
+ /* Get statistics of struct i40e_eth_stats */
+ i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
+ I40E_GLPRT_GORCL(hw->port),
+ pf->offset_loaded, &os->eth.rx_bytes,
+ &ns->eth.rx_bytes);
+ i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
+ I40E_GLPRT_UPRCL(hw->port),
+ pf->offset_loaded, &os->eth.rx_unicast,
+ &ns->eth.rx_unicast);
+ i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
+ I40E_GLPRT_MPRCL(hw->port),
+ pf->offset_loaded, &os->eth.rx_multicast,
+ &ns->eth.rx_multicast);
+ i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
+ I40E_GLPRT_BPRCL(hw->port),
+ pf->offset_loaded, &os->eth.rx_broadcast,
+ &ns->eth.rx_broadcast);
+ i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
+ pf->offset_loaded, &os->eth.rx_discards,
+ &ns->eth.rx_discards);
+ /* GLPRT_REPC not supported */
+ /* GLPRT_RMPC not supported */
+ i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
+ pf->offset_loaded,
+ &os->eth.rx_unknown_protocol,
+ &ns->eth.rx_unknown_protocol);
+ i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
+ I40E_GLPRT_GOTCL(hw->port),
+ pf->offset_loaded, &os->eth.tx_bytes,
+ &ns->eth.tx_bytes);
+ i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
+ I40E_GLPRT_UPTCL(hw->port),
+ pf->offset_loaded, &os->eth.tx_unicast,
+ &ns->eth.tx_unicast);
+ i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
+ I40E_GLPRT_MPTCL(hw->port),
+ pf->offset_loaded, &os->eth.tx_multicast,
+ &ns->eth.tx_multicast);
+ i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
+ I40E_GLPRT_BPTCL(hw->port),
+ pf->offset_loaded, &os->eth.tx_broadcast,
+ &ns->eth.tx_broadcast);
+ i40e_stat_update_32(hw, I40E_GLPRT_TDPC(hw->port),
+ pf->offset_loaded, &os->eth.tx_discards,
+ &ns->eth.tx_discards);
+ /* GLPRT_TEPC not supported */
+
+ /* additional port specific stats */
+ i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
+ pf->offset_loaded, &os->tx_dropped_link_down,
+ &ns->tx_dropped_link_down);
+ i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
+ pf->offset_loaded, &os->crc_errors,
+ &ns->crc_errors);
+ i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
+ pf->offset_loaded, &os->illegal_bytes,
+ &ns->illegal_bytes);
+ /* GLPRT_ERRBC not supported */
+ i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
+ pf->offset_loaded, &os->mac_local_faults,
+ &ns->mac_local_faults);
+ i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
+ pf->offset_loaded, &os->mac_remote_faults,
+ &ns->mac_remote_faults);
+ i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
+ pf->offset_loaded, &os->rx_length_errors,
+ &ns->rx_length_errors);
+ i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
+ pf->offset_loaded, &os->link_xon_rx,
+ &ns->link_xon_rx);
+ i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
+ pf->offset_loaded, &os->link_xoff_rx,
+ &ns->link_xoff_rx);
+ for (i = 0; i < 8; i++) {
+ i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
+ pf->offset_loaded,
+ &os->priority_xon_rx[i],
+ &ns->priority_xon_rx[i]);
+ i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
+ pf->offset_loaded,
+ &os->priority_xoff_rx[i],
+ &ns->priority_xoff_rx[i]);
+ }
+ i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
+ pf->offset_loaded, &os->link_xon_tx,
+ &ns->link_xon_tx);
+ i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
+ pf->offset_loaded, &os->link_xoff_tx,
+ &ns->link_xoff_tx);
+ for (i = 0; i < 8; i++) {
+ i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
+ pf->offset_loaded,
+ &os->priority_xon_tx[i],
+ &ns->priority_xon_tx[i]);
+ i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
+ pf->offset_loaded,
+ &os->priority_xoff_tx[i],
+ &ns->priority_xoff_tx[i]);
+ i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
+ pf->offset_loaded,
+ &os->priority_xon_2_xoff[i],
+ &ns->priority_xon_2_xoff[i]);
+ }
+ i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
+ I40E_GLPRT_PRC64L(hw->port),
+ pf->offset_loaded, &os->rx_size_64,
+ &ns->rx_size_64);
+ i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
+ I40E_GLPRT_PRC127L(hw->port),
+ pf->offset_loaded, &os->rx_size_127,
+ &ns->rx_size_127);
+ i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
+ I40E_GLPRT_PRC255L(hw->port),
+ pf->offset_loaded, &os->rx_size_255,
+ &ns->rx_size_255);
+ i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
+ I40E_GLPRT_PRC511L(hw->port),
+ pf->offset_loaded, &os->rx_size_511,
+ &ns->rx_size_511);
+ i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
+ I40E_GLPRT_PRC1023L(hw->port),
+ pf->offset_loaded, &os->rx_size_1023,
+ &ns->rx_size_1023);
+ i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
+ I40E_GLPRT_PRC1522L(hw->port),
+ pf->offset_loaded, &os->rx_size_1522,
+ &ns->rx_size_1522);
+ i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
+ I40E_GLPRT_PRC9522L(hw->port),
+ pf->offset_loaded, &os->rx_size_big,
+ &ns->rx_size_big);
+ i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
+ pf->offset_loaded, &os->rx_undersize,
+ &ns->rx_undersize);
+ i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
+ pf->offset_loaded, &os->rx_fragments,
+ &ns->rx_fragments);
+ i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
+ pf->offset_loaded, &os->rx_oversize,
+ &ns->rx_oversize);
+ i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
+ pf->offset_loaded, &os->rx_jabber,
+ &ns->rx_jabber);
+ i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
+ I40E_GLPRT_PTC64L(hw->port),
+ pf->offset_loaded, &os->tx_size_64,
+ &ns->tx_size_64);
+ i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
+ I40E_GLPRT_PTC127L(hw->port),
+ pf->offset_loaded, &os->tx_size_127,
+ &ns->tx_size_127);
+ i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
+ I40E_GLPRT_PTC255L(hw->port),
+ pf->offset_loaded, &os->tx_size_255,
+ &ns->tx_size_255);
+ i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
+ I40E_GLPRT_PTC511L(hw->port),
+ pf->offset_loaded, &os->tx_size_511,
+ &ns->tx_size_511);
+ i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
+ I40E_GLPRT_PTC1023L(hw->port),
+ pf->offset_loaded, &os->tx_size_1023,
+ &ns->tx_size_1023);
+ i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
+ I40E_GLPRT_PTC1522L(hw->port),
+ pf->offset_loaded, &os->tx_size_1522,
+ &ns->tx_size_1522);
+ i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
+ I40E_GLPRT_PTC9522L(hw->port),
+ pf->offset_loaded, &os->tx_size_big,
+ &ns->tx_size_big);
+ i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
+ pf->offset_loaded,
+ &os->fd_sb_match, &ns->fd_sb_match);
+ /* GLPRT_MSPDC not supported */
+ /* GLPRT_XEC not supported */
+
+ pf->offset_loaded = true;
+
+ if (pf->main_vsi)
+ i40e_update_vsi_stats(pf->main_vsi);
+
+ stats->ipackets = ns->eth.rx_unicast + ns->eth.rx_multicast +
+ ns->eth.rx_broadcast;
+ stats->opackets = ns->eth.tx_unicast + ns->eth.tx_multicast +
+ ns->eth.tx_broadcast;
+ stats->ibytes = ns->eth.rx_bytes;
+ stats->obytes = ns->eth.tx_bytes;
+ stats->oerrors = ns->eth.tx_errors;
+ stats->imcasts = ns->eth.rx_multicast;
+ stats->fdirmatch = ns->fd_sb_match;
+
+ /* Rx Errors */
+ stats->ibadcrc = ns->crc_errors;
+ stats->ibadlen = ns->rx_length_errors + ns->rx_undersize +
+ ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
+ stats->imissed = ns->eth.rx_discards;
+ stats->ierrors = stats->ibadcrc + stats->ibadlen + stats->imissed;
+
+ PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
+ PMD_DRV_LOG(DEBUG, "rx_bytes: %lu", ns->eth.rx_bytes);
+ PMD_DRV_LOG(DEBUG, "rx_unicast: %lu", ns->eth.rx_unicast);
+ PMD_DRV_LOG(DEBUG, "rx_multicast: %lu", ns->eth.rx_multicast);
+ PMD_DRV_LOG(DEBUG, "rx_broadcast: %lu", ns->eth.rx_broadcast);
+ PMD_DRV_LOG(DEBUG, "rx_discards: %lu", ns->eth.rx_discards);
+ PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %lu",
+ ns->eth.rx_unknown_protocol);
+ PMD_DRV_LOG(DEBUG, "tx_bytes: %lu", ns->eth.tx_bytes);
+ PMD_DRV_LOG(DEBUG, "tx_unicast: %lu", ns->eth.tx_unicast);
+ PMD_DRV_LOG(DEBUG, "tx_multicast: %lu", ns->eth.tx_multicast);
+ PMD_DRV_LOG(DEBUG, "tx_broadcast: %lu", ns->eth.tx_broadcast);
+ PMD_DRV_LOG(DEBUG, "tx_discards: %lu", ns->eth.tx_discards);
+ PMD_DRV_LOG(DEBUG, "tx_errors: %lu", ns->eth.tx_errors);
+
+ PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %lu",
+ ns->tx_dropped_link_down);
+ PMD_DRV_LOG(DEBUG, "crc_errors: %lu", ns->crc_errors);
+ PMD_DRV_LOG(DEBUG, "illegal_bytes: %lu",
+ ns->illegal_bytes);
+ PMD_DRV_LOG(DEBUG, "error_bytes: %lu", ns->error_bytes);
+ PMD_DRV_LOG(DEBUG, "mac_local_faults: %lu",
+ ns->mac_local_faults);
+ PMD_DRV_LOG(DEBUG, "mac_remote_faults: %lu",
+ ns->mac_remote_faults);
+ PMD_DRV_LOG(DEBUG, "rx_length_errors: %lu",
+ ns->rx_length_errors);
+ PMD_DRV_LOG(DEBUG, "link_xon_rx: %lu", ns->link_xon_rx);
+ PMD_DRV_LOG(DEBUG, "link_xoff_rx: %lu", ns->link_xoff_rx);
+ for (i = 0; i < 8; i++) {
+ PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]: %lu",
+ i, ns->priority_xon_rx[i]);
+ PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]: %lu",
+ i, ns->priority_xoff_rx[i]);
+ }
+ PMD_DRV_LOG(DEBUG, "link_xon_tx: %lu", ns->link_xon_tx);
+ PMD_DRV_LOG(DEBUG, "link_xoff_tx: %lu", ns->link_xoff_tx);
+ for (i = 0; i < 8; i++) {
+ PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]: %lu",
+ i, ns->priority_xon_tx[i]);
+ PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]: %lu",
+ i, ns->priority_xoff_tx[i]);
+ PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]: %lu",
+ i, ns->priority_xon_2_xoff[i]);
+ }
+ PMD_DRV_LOG(DEBUG, "rx_size_64: %lu", ns->rx_size_64);
+ PMD_DRV_LOG(DEBUG, "rx_size_127: %lu", ns->rx_size_127);
+ PMD_DRV_LOG(DEBUG, "rx_size_255: %lu", ns->rx_size_255);
+ PMD_DRV_LOG(DEBUG, "rx_size_511: %lu", ns->rx_size_511);
+ PMD_DRV_LOG(DEBUG, "rx_size_1023: %lu", ns->rx_size_1023);
+ PMD_DRV_LOG(DEBUG, "rx_size_1522: %lu", ns->rx_size_1522);
+ PMD_DRV_LOG(DEBUG, "rx_size_big: %lu", ns->rx_size_big);
+ PMD_DRV_LOG(DEBUG, "rx_undersize: %lu", ns->rx_undersize);
+ PMD_DRV_LOG(DEBUG, "rx_fragments: %lu", ns->rx_fragments);
+ PMD_DRV_LOG(DEBUG, "rx_oversize: %lu", ns->rx_oversize);
+ PMD_DRV_LOG(DEBUG, "rx_jabber: %lu", ns->rx_jabber);
+ PMD_DRV_LOG(DEBUG, "tx_size_64: %lu", ns->tx_size_64);
+ PMD_DRV_LOG(DEBUG, "tx_size_127: %lu", ns->tx_size_127);
+ PMD_DRV_LOG(DEBUG, "tx_size_255: %lu", ns->tx_size_255);
+ PMD_DRV_LOG(DEBUG, "tx_size_511: %lu", ns->tx_size_511);
+ PMD_DRV_LOG(DEBUG, "tx_size_1023: %lu", ns->tx_size_1023);
+ PMD_DRV_LOG(DEBUG, "tx_size_1522: %lu", ns->tx_size_1522);
+ PMD_DRV_LOG(DEBUG, "tx_size_big: %lu", ns->tx_size_big);
+ PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %lu",
+ ns->mac_short_packet_dropped);
+ PMD_DRV_LOG(DEBUG, "checksum_error: %lu",
+ ns->checksum_error);
+ PMD_DRV_LOG(DEBUG, "fdir_match: %lu", ns->fd_sb_match);
+ PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
+}
+
+/* Reset the statistics */
+static void
+i40e_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ /* It results in reloading the start point of each counter */
+ pf->offset_loaded = false;
+}
+
+static int
+i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t queue_id,
+ __rte_unused uint8_t stat_idx,
+ __rte_unused uint8_t is_rx)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return -ENOSYS;
+}
+
+static void
+i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+
+ dev_info->max_rx_queues = vsi->nb_qps;
+ dev_info->max_tx_queues = vsi->nb_qps;
+ dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
+ dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
+ dev_info->max_mac_addrs = vsi->max_macaddrs;
+ dev_info->max_vfs = dev->pci_dev->max_vfs;
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM;
+ dev_info->reta_size = pf->hash_lut_size;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = I40E_DEFAULT_RX_PTHRESH,
+ .hthresh = I40E_DEFAULT_RX_HTHRESH,
+ .wthresh = I40E_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = I40E_DEFAULT_TX_PTHRESH,
+ .hthresh = I40E_DEFAULT_TX_HTHRESH,
+ .wthresh = I40E_DEFAULT_TX_WTHRESH,
+ },
+ .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
+ .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
+ .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
+ ETH_TXQ_FLAGS_NOOFFLOADS,
+ };
+
+ if (pf->flags | I40E_FLAG_VMDQ) {
+ dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
+ dev_info->vmdq_queue_base = dev_info->max_rx_queues;
+ dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
+ pf->max_nb_vmdq_vsi;
+ dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
+ dev_info->max_rx_queues += dev_info->vmdq_queue_num;
+ dev_info->max_tx_queues += dev_info->vmdq_queue_num;
+ }
+}
+
+static int
+i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ PMD_INIT_FUNC_TRACE();
+
+ if (on)
+ return i40e_vsi_add_vlan(vsi, vlan_id);
+ else
+ return i40e_vsi_delete_vlan(vsi, vlan_id);
+}
+
+static void
+i40e_vlan_tpid_set(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t tpid)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static void
+i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ /* Enable or disable VLAN stripping */
+ if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ i40e_vsi_config_vlan_stripping(vsi, TRUE);
+ else
+ i40e_vsi_config_vlan_stripping(vsi, FALSE);
+ }
+
+ if (mask & ETH_VLAN_EXTEND_MASK) {
+ if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+ i40e_vsi_config_double_vlan(vsi, TRUE);
+ else
+ i40e_vsi_config_double_vlan(vsi, FALSE);
+ }
+}
+
+static void
+i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t queue,
+ __rte_unused int on)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static int
+i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
+ struct i40e_vsi_vlan_pvid_info info;
+
+ memset(&info, 0, sizeof(info));
+ info.on = on;
+ if (info.on)
+ info.config.pvid = pvid;
+ else {
+ info.config.reject.tagged =
+ data->dev_conf.txmode.hw_vlan_reject_tagged;
+ info.config.reject.untagged =
+ data->dev_conf.txmode.hw_vlan_reject_untagged;
+ }
+
+ return i40e_vsi_vlan_pvid_set(vsi, &info);
+}
+
+static int
+i40e_dev_led_on(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t mode = i40e_led_get(hw);
+
+ if (mode == 0)
+ i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
+
+ return 0;
+}
+
+static int
+i40e_dev_led_off(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t mode = i40e_led_get(hw);
+
+ if (mode != 0)
+ i40e_led_set(hw, 0, false);
+
+ return 0;
+}
+
+static int
+i40e_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused struct rte_eth_fc_conf *fc_conf)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return -ENOSYS;
+}
+
+static int
+i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused struct rte_eth_pfc_conf *pfc_conf)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return -ENOSYS;
+}
+
+/* Add a MAC address, and update filters */
+static void
+i40e_macaddr_add(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ __rte_unused uint32_t index,
+ uint32_t pool)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_mac_filter_info mac_filter;
+ struct i40e_vsi *vsi;
+ int ret;
+
+ /* If VMDQ not enabled or configured, return */
+ if (pool != 0 && (!(pf->flags | I40E_FLAG_VMDQ) || !pf->nb_cfg_vmdq_vsi)) {
+ PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
+ pf->flags | I40E_FLAG_VMDQ ? "configured" : "enabled",
+ pool);
+ return;
+ }
+
+ if (pool > pf->nb_cfg_vmdq_vsi) {
+ PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
+ pool, pf->nb_cfg_vmdq_vsi);
+ return;
+ }
+
+ (void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
+ mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+
+ if (pool == 0)
+ vsi = pf->main_vsi;
+ else
+ vsi = pf->vmdq[pool - 1].vsi;
+
+ ret = i40e_vsi_add_mac(vsi, &mac_filter);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
+ return;
+ }
+}
+
+/* Remove a MAC address, and update filters */
+static void
+i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_vsi *vsi;
+ struct rte_eth_dev_data *data = dev->data;
+ struct ether_addr *macaddr;
+ int ret;
+ uint32_t i;
+ uint64_t pool_sel;
+
+ macaddr = &(data->mac_addrs[index]);
+
+ pool_sel = dev->data->mac_pool_sel[index];
+
+ for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
+ if (pool_sel & (1ULL << i)) {
+ if (i == 0)
+ vsi = pf->main_vsi;
+ else {
+ /* No VMDQ pool enabled or configured */
+ if (!(pf->flags | I40E_FLAG_VMDQ) ||
+ (i > pf->nb_cfg_vmdq_vsi)) {
+ PMD_DRV_LOG(ERR, "No VMDQ pool enabled"
+ "/configured");
+ return;
+ }
+ vsi = pf->vmdq[i - 1].vsi;
+ }
+ ret = i40e_vsi_delete_mac(vsi, macaddr);
+
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
+ return;
+ }
+ }
+ }
+}
+
+/* Set perfect match or hash match of MAC and VLAN for a VF */
+static int
+i40e_vf_mac_filter_set(struct i40e_pf *pf,
+ struct rte_eth_mac_filter *filter,
+ bool add)
+{
+ struct i40e_hw *hw;
+ struct i40e_mac_filter_info mac_filter;
+ struct ether_addr old_mac;
+ struct ether_addr *new_mac;
+ struct i40e_pf_vf *vf = NULL;
+ uint16_t vf_id;
+ int ret;
+
+ if (pf == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid PF argument.");
+ return -EINVAL;
+ }
+ hw = I40E_PF_TO_HW(pf);
+
+ if (filter == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
+ return -EINVAL;
+ }
+
+ new_mac = &filter->mac_addr;
+
+ if (is_zero_ether_addr(new_mac)) {
+ PMD_DRV_LOG(ERR, "Invalid ethernet address.");
+ return -EINVAL;
+ }
+
+ vf_id = filter->dst_id;
+
+ if (vf_id > pf->vf_num - 1 || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+ vf = &pf->vfs[vf_id];
+
+ if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
+ PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
+ return -EINVAL;
+ }
+
+ if (add) {
+ (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
+ (void)rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
+ ETHER_ADDR_LEN);
+ (void)rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
+ ETHER_ADDR_LEN);
+
+ mac_filter.filter_type = filter->filter_type;
+ ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
+ return -1;
+ }
+ ether_addr_copy(new_mac, &pf->dev_addr);
+ } else {
+ (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
+ ETHER_ADDR_LEN);
+ ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
+ return -1;
+ }
+
+ /* Clear device address as it has been removed */
+ if (is_same_ether_addr(&(pf->dev_addr), new_mac))
+ memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
+ }
+
+ return 0;
+}
+
+/* MAC filter handle */
+static int
+i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_eth_mac_filter *filter;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ int ret = I40E_NOT_SUPPORTED;
+
+ filter = (struct rte_eth_mac_filter *)(arg);
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_NOP:
+ ret = I40E_SUCCESS;
+ break;
+ case RTE_ETH_FILTER_ADD:
+ i40e_pf_disable_irq0(hw);
+ if (filter->is_vf)
+ ret = i40e_vf_mac_filter_set(pf, filter, 1);
+ i40e_pf_enable_irq0(hw);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ i40e_pf_disable_irq0(hw);
+ if (filter->is_vf)
+ ret = i40e_vf_mac_filter_set(pf, filter, 0);
+ i40e_pf_enable_irq0(hw);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
+ ret = I40E_ERR_PARAM;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t lut, l;
+ uint16_t i, j, lut_size = pf->hash_lut_size;
+ uint16_t idx, shift;
+ uint8_t mask;
+
+ if (reta_size != lut_size ||
+ reta_size > ETH_RSS_RETA_SIZE_512) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)\n", reta_size, lut_size);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i += I40E_4_BIT_WIDTH) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+ I40E_4_BIT_MASK);
+ if (!mask)
+ continue;
+ if (mask == I40E_4_BIT_MASK)
+ l = 0;
+ else
+ l = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
+ for (j = 0, lut = 0; j < I40E_4_BIT_WIDTH; j++) {
+ if (mask & (0x1 << j))
+ lut |= reta_conf[idx].reta[shift + j] <<
+ (CHAR_BIT * j);
+ else
+ lut |= l & (I40E_8_BIT_MASK << (CHAR_BIT * j));
+ }
+ I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
+ }
+
+ return 0;
+}
+
+static int
+i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t lut;
+ uint16_t i, j, lut_size = pf->hash_lut_size;
+ uint16_t idx, shift;
+ uint8_t mask;
+
+ if (reta_size != lut_size ||
+ reta_size > ETH_RSS_RETA_SIZE_512) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)\n", reta_size, lut_size);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i += I40E_4_BIT_WIDTH) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+ I40E_4_BIT_MASK);
+ if (!mask)
+ continue;
+
+ lut = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
+ for (j = 0; j < I40E_4_BIT_WIDTH; j++) {
+ if (mask & (0x1 << j))
+ reta_conf[idx].reta[shift + j] = ((lut >>
+ (CHAR_BIT * j)) & I40E_8_BIT_MASK);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
+ * @hw: pointer to the HW structure
+ * @mem: pointer to mem struct to fill out
+ * @size: size of memory requested
+ * @alignment: what to align the allocation to
+ **/
+enum i40e_status_code
+i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
+ struct i40e_dma_mem *mem,
+ u64 size,
+ u32 alignment)
+{
+ static uint64_t id = 0;
+ const struct rte_memzone *mz = NULL;
+ char z_name[RTE_MEMZONE_NAMESIZE];
+
+ if (!mem)
+ return I40E_ERR_PARAM;
+
+ id++;
+ snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, id);
+#ifdef RTE_LIBRTE_XEN_DOM0
+ mz = rte_memzone_reserve_bounded(z_name, size, 0, 0, alignment,
+ RTE_PGSIZE_2M);
+#else
+ mz = rte_memzone_reserve_aligned(z_name, size, 0, 0, alignment);
+#endif
+ if (!mz)
+ return I40E_ERR_NO_MEMORY;
+
+ mem->id = id;
+ mem->size = size;
+ mem->va = mz->addr;
+#ifdef RTE_LIBRTE_XEN_DOM0
+ mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
+#else
+ mem->pa = mz->phys_addr;
+#endif
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to free
+ **/
+enum i40e_status_code
+i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
+ struct i40e_dma_mem *mem)
+{
+ if (!mem || !mem->va)
+ return I40E_ERR_PARAM;
+
+ mem->va = NULL;
+ mem->pa = (u64)0;
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
+ * @hw: pointer to the HW structure
+ * @mem: pointer to mem struct to fill out
+ * @size: size of memory requested
+ **/
+enum i40e_status_code
+i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
+ struct i40e_virt_mem *mem,
+ u32 size)
+{
+ if (!mem)
+ return I40E_ERR_PARAM;
+
+ mem->size = size;
+ mem->va = rte_zmalloc("i40e", size, 0);
+
+ if (mem->va)
+ return I40E_SUCCESS;
+ else
+ return I40E_ERR_NO_MEMORY;
+}
+
+/**
+ * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
+ * @hw: pointer to the HW structure
+ * @mem: pointer to mem struct to free
+ **/
+enum i40e_status_code
+i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
+ struct i40e_virt_mem *mem)
+{
+ if (!mem)
+ return I40E_ERR_PARAM;
+
+ rte_free(mem->va);
+ mem->va = NULL;
+
+ return I40E_SUCCESS;
+}
+
+void
+i40e_init_spinlock_d(struct i40e_spinlock *sp)
+{
+ rte_spinlock_init(&sp->spinlock);
+}
+
+void
+i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
+{
+ rte_spinlock_lock(&sp->spinlock);
+}
+
+void
+i40e_release_spinlock_d(struct i40e_spinlock *sp)
+{
+ rte_spinlock_unlock(&sp->spinlock);
+}
+
+void
+i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
+{
+ return;
+}
+
+/**
+ * Get the hardware capabilities, which will be parsed
+ * and saved into struct i40e_hw.
+ */
+static int
+i40e_get_cap(struct i40e_hw *hw)
+{
+ struct i40e_aqc_list_capabilities_element_resp *buf;
+ uint16_t len, size = 0;
+ int ret;
+
+ /* Calculate a huge enough buff for saving response data temporarily */
+ len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
+ I40E_MAX_CAP_ELE_NUM;
+ buf = rte_zmalloc("i40e", len, 0);
+ if (!buf) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ /* Get, parse the capabilities and save it to hw */
+ ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
+ i40e_aqc_opc_list_func_capabilities, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to discover capabilities");
+
+ /* Free the temporary buffer after being used */
+ rte_free(buf);
+
+ return ret;
+}
+
+static int
+i40e_pf_parameter_init(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint16_t sum_queues = 0, sum_vsis, left_queues;
+
+ /* First check if FW support SRIOV */
+ if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
+ PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
+ return -EINVAL;
+ }
+
+ pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
+ pf->max_num_vsi = RTE_MIN(hw->func_caps.num_vsis, I40E_MAX_NUM_VSIS);
+ PMD_INIT_LOG(INFO, "Max supported VSIs:%u", pf->max_num_vsi);
+ /* Allocate queues for pf */
+ if (hw->func_caps.rss) {
+ pf->flags |= I40E_FLAG_RSS;
+ pf->lan_nb_qps = RTE_MIN(hw->func_caps.num_tx_qp,
+ (uint32_t)(1 << hw->func_caps.rss_table_entry_width));
+ pf->lan_nb_qps = i40e_align_floor(pf->lan_nb_qps);
+ } else
+ pf->lan_nb_qps = 1;
+ sum_queues = pf->lan_nb_qps;
+ /* Default VSI is not counted in */
+ sum_vsis = 0;
+ PMD_INIT_LOG(INFO, "PF queue pairs:%u", pf->lan_nb_qps);
+
+ if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
+ pf->flags |= I40E_FLAG_SRIOV;
+ pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
+ if (dev->pci_dev->max_vfs > hw->func_caps.num_vfs) {
+ PMD_INIT_LOG(ERR, "Config VF number %u, "
+ "max supported %u.",
+ dev->pci_dev->max_vfs,
+ hw->func_caps.num_vfs);
+ return -EINVAL;
+ }
+ if (pf->vf_nb_qps > I40E_MAX_QP_NUM_PER_VF) {
+ PMD_INIT_LOG(ERR, "FVL VF queue %u, "
+ "max support %u queues.",
+ pf->vf_nb_qps, I40E_MAX_QP_NUM_PER_VF);
+ return -EINVAL;
+ }
+ pf->vf_num = dev->pci_dev->max_vfs;
+ sum_queues += pf->vf_nb_qps * pf->vf_num;
+ sum_vsis += pf->vf_num;
+ PMD_INIT_LOG(INFO, "Max VF num:%u each has queue pairs:%u",
+ pf->vf_num, pf->vf_nb_qps);
+ } else
+ pf->vf_num = 0;
+
+ if (hw->func_caps.vmdq) {
+ pf->flags |= I40E_FLAG_VMDQ;
+ pf->vmdq_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
+ pf->max_nb_vmdq_vsi = 1;
+ /*
+ * If VMDQ available, assume a single VSI can be created. Will adjust
+ * later.
+ */
+ sum_queues += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
+ sum_vsis += pf->max_nb_vmdq_vsi;
+ } else {
+ pf->vmdq_nb_qps = 0;
+ pf->max_nb_vmdq_vsi = 0;
+ }
+ pf->nb_cfg_vmdq_vsi = 0;
+
+ if (hw->func_caps.fd) {
+ pf->flags |= I40E_FLAG_FDIR;
+ pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
+ /**
+ * Each flow director consumes one VSI and one queue,
+ * but can't calculate out predictably here.
+ */
+ }
+
+ if (sum_vsis > pf->max_num_vsi ||
+ sum_queues > hw->func_caps.num_rx_qp) {
+ PMD_INIT_LOG(ERR, "VSI/QUEUE setting can't be satisfied");
+ PMD_INIT_LOG(ERR, "Max VSIs: %u, asked:%u",
+ pf->max_num_vsi, sum_vsis);
+ PMD_INIT_LOG(ERR, "Total queue pairs:%u, asked:%u",
+ hw->func_caps.num_rx_qp, sum_queues);
+ return -EINVAL;
+ }
+
+ /* Adjust VMDQ setting to support as many VMs as possible */
+ if (pf->flags & I40E_FLAG_VMDQ) {
+ left_queues = hw->func_caps.num_rx_qp - sum_queues;
+
+ pf->max_nb_vmdq_vsi += RTE_MIN(left_queues / pf->vmdq_nb_qps,
+ pf->max_num_vsi - sum_vsis);
+
+ /* Limit the max VMDQ number that rte_ether that can support */
+ pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
+ ETH_64_POOLS - 1);
+
+ PMD_INIT_LOG(INFO, "Max VMDQ VSI num:%u",
+ pf->max_nb_vmdq_vsi);
+ PMD_INIT_LOG(INFO, "VMDQ queue pairs:%u", pf->vmdq_nb_qps);
+ }
+
+ /* Each VSI occupy 1 MSIX interrupt at least, plus IRQ0 for misc intr
+ * cause */
+ if (sum_vsis > hw->func_caps.num_msix_vectors - 1) {
+ PMD_INIT_LOG(ERR, "Too many VSIs(%u), MSIX intr(%u) not enough",
+ sum_vsis, hw->func_caps.num_msix_vectors);
+ return -EINVAL;
+ }
+ return I40E_SUCCESS;
+}
+
+static int
+i40e_pf_get_switch_config(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_aqc_get_switch_config_resp *switch_config;
+ struct i40e_aqc_switch_config_element_resp *element;
+ uint16_t start_seid = 0, num_reported;
+ int ret;
+
+ switch_config = (struct i40e_aqc_get_switch_config_resp *)\
+ rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
+ if (!switch_config) {
+ PMD_DRV_LOG(ERR, "Failed to allocated memory");
+ return -ENOMEM;
+ }
+
+ /* Get the switch configurations */
+ ret = i40e_aq_get_switch_config(hw, switch_config,
+ I40E_AQ_LARGE_BUF, &start_seid, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to get switch configurations");
+ goto fail;
+ }
+ num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
+ if (num_reported != 1) { /* The number should be 1 */
+ PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
+ goto fail;
+ }
+
+ /* Parse the switch configuration elements */
+ element = &(switch_config->element[0]);
+ if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
+ pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
+ pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
+ } else
+ PMD_DRV_LOG(INFO, "Unknown element type");
+
+fail:
+ rte_free(switch_config);
+
+ return ret;
+}
+
+static int
+i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
+ uint32_t num)
+{
+ struct pool_entry *entry;
+
+ if (pool == NULL || num == 0)
+ return -EINVAL;
+
+ entry = rte_zmalloc("i40e", sizeof(*entry), 0);
+ if (entry == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
+ return -ENOMEM;
+ }
+
+ /* queue heap initialize */
+ pool->num_free = num;
+ pool->num_alloc = 0;
+ pool->base = base;
+ LIST_INIT(&pool->alloc_list);
+ LIST_INIT(&pool->free_list);
+
+ /* Initialize element */
+ entry->base = 0;
+ entry->len = num;
+
+ LIST_INSERT_HEAD(&pool->free_list, entry, next);
+ return 0;
+}
+
+static void
+i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
+{
+ struct pool_entry *entry;
+
+ if (pool == NULL)
+ return;
+
+ LIST_FOREACH(entry, &pool->alloc_list, next) {
+ LIST_REMOVE(entry, next);
+ rte_free(entry);
+ }
+
+ LIST_FOREACH(entry, &pool->free_list, next) {
+ LIST_REMOVE(entry, next);
+ rte_free(entry);
+ }
+
+ pool->num_free = 0;
+ pool->num_alloc = 0;
+ pool->base = 0;
+ LIST_INIT(&pool->alloc_list);
+ LIST_INIT(&pool->free_list);
+}
+
+static int
+i40e_res_pool_free(struct i40e_res_pool_info *pool,
+ uint32_t base)
+{
+ struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
+ uint32_t pool_offset;
+ int insert;
+
+ if (pool == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid parameter");
+ return -EINVAL;
+ }
+
+ pool_offset = base - pool->base;
+ /* Lookup in alloc list */
+ LIST_FOREACH(entry, &pool->alloc_list, next) {
+ if (entry->base == pool_offset) {
+ valid_entry = entry;
+ LIST_REMOVE(entry, next);
+ break;
+ }
+ }
+
+ /* Not find, return */
+ if (valid_entry == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to find entry");
+ return -EINVAL;
+ }
+
+ /**
+ * Found it, move it to free list and try to merge.
+ * In order to make merge easier, always sort it by qbase.
+ * Find adjacent prev and last entries.
+ */
+ prev = next = NULL;
+ LIST_FOREACH(entry, &pool->free_list, next) {
+ if (entry->base > valid_entry->base) {
+ next = entry;
+ break;
+ }
+ prev = entry;
+ }
+
+ insert = 0;
+ /* Try to merge with next one*/
+ if (next != NULL) {
+ /* Merge with next one */
+ if (valid_entry->base + valid_entry->len == next->base) {
+ next->base = valid_entry->base;
+ next->len += valid_entry->len;
+ rte_free(valid_entry);
+ valid_entry = next;
+ insert = 1;
+ }
+ }
+
+ if (prev != NULL) {
+ /* Merge with previous one */
+ if (prev->base + prev->len == valid_entry->base) {
+ prev->len += valid_entry->len;
+ /* If it merge with next one, remove next node */
+ if (insert == 1) {
+ LIST_REMOVE(valid_entry, next);
+ rte_free(valid_entry);
+ } else {
+ rte_free(valid_entry);
+ insert = 1;
+ }
+ }
+ }
+
+ /* Not find any entry to merge, insert */
+ if (insert == 0) {
+ if (prev != NULL)
+ LIST_INSERT_AFTER(prev, valid_entry, next);
+ else if (next != NULL)
+ LIST_INSERT_BEFORE(next, valid_entry, next);
+ else /* It's empty list, insert to head */
+ LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
+ }
+
+ pool->num_free += valid_entry->len;
+ pool->num_alloc -= valid_entry->len;
+
+ return 0;
+}
+
+static int
+i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
+ uint16_t num)
+{
+ struct pool_entry *entry, *valid_entry;
+
+ if (pool == NULL || num == 0) {
+ PMD_DRV_LOG(ERR, "Invalid parameter");
+ return -EINVAL;
+ }
+
+ if (pool->num_free < num) {
+ PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
+ num, pool->num_free);
+ return -ENOMEM;
+ }
+
+ valid_entry = NULL;
+ /* Lookup in free list and find most fit one */
+ LIST_FOREACH(entry, &pool->free_list, next) {
+ if (entry->len >= num) {
+ /* Find best one */
+ if (entry->len == num) {
+ valid_entry = entry;
+ break;
+ }
+ if (valid_entry == NULL || valid_entry->len > entry->len)
+ valid_entry = entry;
+ }
+ }
+
+ /* Not find one to satisfy the request, return */
+ if (valid_entry == NULL) {
+ PMD_DRV_LOG(ERR, "No valid entry found");
+ return -ENOMEM;
+ }
+ /**
+ * The entry have equal queue number as requested,
+ * remove it from alloc_list.
+ */
+ if (valid_entry->len == num) {
+ LIST_REMOVE(valid_entry, next);
+ } else {
+ /**
+ * The entry have more numbers than requested,
+ * create a new entry for alloc_list and minus its
+ * queue base and number in free_list.
+ */
+ entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
+ if (entry == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for "
+ "resource pool");
+ return -ENOMEM;
+ }
+ entry->base = valid_entry->base;
+ entry->len = num;
+ valid_entry->base += num;
+ valid_entry->len -= num;
+ valid_entry = entry;
+ }
+
+ /* Insert it into alloc list, not sorted */
+ LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
+
+ pool->num_free -= valid_entry->len;
+ pool->num_alloc += valid_entry->len;
+
+ return (valid_entry->base + pool->base);
+}
+
+/**
+ * bitmap_is_subset - Check whether src2 is subset of src1
+ **/
+static inline int
+bitmap_is_subset(uint8_t src1, uint8_t src2)
+{
+ return !((src1 ^ src2) & src2);
+}
+
+static int
+validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
+{
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+
+ /* If DCB is not supported, only default TC is supported */
+ if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
+ PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
+ return -EINVAL;
+ }
+
+ if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
+ PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
+ "HW support 0x%x", hw->func_caps.enabled_tcmap,
+ enabled_tcmap);
+ return -EINVAL;
+ }
+ return I40E_SUCCESS;
+}
+
+int
+i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
+ struct i40e_vsi_vlan_pvid_info *info)
+{
+ struct i40e_hw *hw;
+ struct i40e_vsi_context ctxt;
+ uint8_t vlan_flags = 0;
+ int ret;
+
+ if (vsi == NULL || info == NULL) {
+ PMD_DRV_LOG(ERR, "invalid parameters");
+ return I40E_ERR_PARAM;
+ }
+
+ if (info->on) {
+ vsi->info.pvid = info->config.pvid;
+ /**
+ * If insert pvid is enabled, only tagged pkts are
+ * allowed to be sent out.
+ */
+ vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
+ I40E_AQ_VSI_PVLAN_MODE_TAGGED;
+ } else {
+ vsi->info.pvid = 0;
+ if (info->config.reject.tagged == 0)
+ vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
+
+ if (info->config.reject.untagged == 0)
+ vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
+ }
+ vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
+ I40E_AQ_VSI_PVLAN_MODE_MASK);
+ vsi->info.port_vlan_flags |= vlan_flags;
+ vsi->info.valid_sections =
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ memset(&ctxt, 0, sizeof(ctxt));
+ (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.seid = vsi->seid;
+
+ hw = I40E_VSI_TO_HW(vsi);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+
+ return ret;
+}
+
+static int
+i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
+{
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ int i, ret;
+ struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
+
+ ret = validate_tcmap_parameter(vsi, enabled_tcmap);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ if (!vsi->seid) {
+ PMD_DRV_LOG(ERR, "seid not valid");
+ return -EINVAL;
+ }
+
+ memset(&tc_bw_data, 0, sizeof(tc_bw_data));
+ tc_bw_data.tc_valid_bits = enabled_tcmap;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ tc_bw_data.tc_bw_credits[i] =
+ (enabled_tcmap & (1 << i)) ? 1 : 0;
+
+ ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to configure TC BW");
+ return ret;
+ }
+
+ (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
+ sizeof(vsi->info.qs_handle));
+ return I40E_SUCCESS;
+}
+
+static int
+i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
+ struct i40e_aqc_vsi_properties_data *info,
+ uint8_t enabled_tcmap)
+{
+ int ret, total_tc = 0, i;
+ uint16_t qpnum_per_tc, bsf, qp_idx;
+
+ ret = validate_tcmap_parameter(vsi, enabled_tcmap);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ if (enabled_tcmap & (1 << i))
+ total_tc++;
+ vsi->enabled_tc = enabled_tcmap;
+
+ /* Number of queues per enabled TC */
+ qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
+ qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
+ bsf = rte_bsf32(qpnum_per_tc);
+
+ /* Adjust the queue number to actual queues that can be applied */
+ vsi->nb_qps = qpnum_per_tc * total_tc;
+
+ /**
+ * Configure TC and queue mapping parameters, for enabled TC,
+ * allocate qpnum_per_tc queues to this traffic. For disabled TC,
+ * default queue will serve it.
+ */
+ qp_idx = 0;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (vsi->enabled_tc & (1 << i)) {
+ info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
+ (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
+ qp_idx += qpnum_per_tc;
+ } else
+ info->tc_mapping[i] = 0;
+ }
+
+ /* Associate queue number with VSI */
+ if (vsi->type == I40E_VSI_SRIOV) {
+ info->mapping_flags |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
+ for (i = 0; i < vsi->nb_qps; i++)
+ info->queue_mapping[i] =
+ rte_cpu_to_le_16(vsi->base_queue + i);
+ } else {
+ info->mapping_flags |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
+ info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
+ }
+ info->valid_sections =
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
+
+ return I40E_SUCCESS;
+}
+
+static int
+i40e_veb_release(struct i40e_veb *veb)
+{
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+
+ if (veb == NULL || veb->associate_vsi == NULL)
+ return -EINVAL;
+
+ if (!TAILQ_EMPTY(&veb->head)) {
+ PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
+ return -EACCES;
+ }
+
+ vsi = veb->associate_vsi;
+ hw = I40E_VSI_TO_HW(vsi);
+
+ vsi->uplink_seid = veb->uplink_seid;
+ i40e_aq_delete_element(hw, veb->seid, NULL);
+ rte_free(veb);
+ vsi->veb = NULL;
+ return I40E_SUCCESS;
+}
+
+/* Setup a veb */
+static struct i40e_veb *
+i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
+{
+ struct i40e_veb *veb;
+ int ret;
+ struct i40e_hw *hw;
+
+ if (NULL == pf || vsi == NULL) {
+ PMD_DRV_LOG(ERR, "veb setup failed, "
+ "associated VSI shouldn't null");
+ return NULL;
+ }
+ hw = I40E_PF_TO_HW(pf);
+
+ veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
+ if (!veb) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
+ goto fail;
+ }
+
+ veb->associate_vsi = vsi;
+ TAILQ_INIT(&veb->head);
+ veb->uplink_seid = vsi->uplink_seid;
+
+ ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
+ I40E_DEFAULT_TCMAP, false, false, &veb->seid, NULL);
+
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
+ hw->aq.asq_last_status);
+ goto fail;
+ }
+
+ /* get statistics index */
+ ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
+ &veb->stats_idx, NULL, NULL, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d",
+ hw->aq.asq_last_status);
+ goto fail;
+ }
+
+ /* Get VEB bandwidth, to be implemented */
+ /* Now associated vsi binding to the VEB, set uplink to this VEB */
+ vsi->uplink_seid = veb->seid;
+
+ return veb;
+fail:
+ rte_free(veb);
+ return NULL;
+}
+
+int
+i40e_vsi_release(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
+ struct i40e_vsi_list *vsi_list;
+ int ret;
+ struct i40e_mac_filter *f;
+
+ if (!vsi)
+ return I40E_SUCCESS;
+
+ pf = I40E_VSI_TO_PF(vsi);
+ hw = I40E_VSI_TO_HW(vsi);
+
+ /* VSI has child to attach, release child first */
+ if (vsi->veb) {
+ TAILQ_FOREACH(vsi_list, &vsi->veb->head, list) {
+ if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
+ return -1;
+ TAILQ_REMOVE(&vsi->veb->head, vsi_list, list);
+ }
+ i40e_veb_release(vsi->veb);
+ }
+
+ /* Remove all macvlan filters of the VSI */
+ i40e_vsi_remove_all_macvlan_filter(vsi);
+ TAILQ_FOREACH(f, &vsi->mac_list, next)
+ rte_free(f);
+
+ if (vsi->type != I40E_VSI_MAIN) {
+ /* Remove vsi from parent's sibling list */
+ if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
+ PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
+ return I40E_ERR_PARAM;
+ }
+ TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
+ &vsi->sib_vsi_list, list);
+
+ /* Remove all switch element of the VSI */
+ ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to delete element");
+ }
+ i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
+
+ if (vsi->type != I40E_VSI_SRIOV)
+ i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
+ rte_free(vsi);
+
+ return I40E_SUCCESS;
+}
+
+static int
+i40e_update_default_filter_setting(struct i40e_vsi *vsi)
+{
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_aqc_remove_macvlan_element_data def_filter;
+ struct i40e_mac_filter_info filter;
+ int ret;
+
+ if (vsi->type != I40E_VSI_MAIN)
+ return I40E_ERR_CONFIG;
+ memset(&def_filter, 0, sizeof(def_filter));
+ (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
+ ETH_ADDR_LEN);
+ def_filter.vlan_tag = 0;
+ def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
+ I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+ ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
+ if (ret != I40E_SUCCESS) {
+ struct i40e_mac_filter *f;
+ struct ether_addr *mac;
+
+ PMD_DRV_LOG(WARNING, "Cannot remove the default "
+ "macvlan filter");
+ /* It needs to add the permanent mac into mac list */
+ f = rte_zmalloc("macv_filter", sizeof(*f), 0);
+ if (f == NULL) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+ mac = &f->mac_info.mac_addr;
+ (void)rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
+ ETH_ADDR_LEN);
+ f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+ TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
+ vsi->mac_num++;
+
+ return ret;
+ }
+ (void)rte_memcpy(&filter.mac_addr,
+ (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
+ filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+ return i40e_vsi_add_mac(vsi, &filter);
+}
+
+static int
+i40e_vsi_dump_bw_config(struct i40e_vsi *vsi)
+{
+ struct i40e_aqc_query_vsi_bw_config_resp bw_config;
+ struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
+ struct i40e_hw *hw = &vsi->adapter->hw;
+ i40e_status ret;
+ int i;
+
+ memset(&bw_config, 0, sizeof(bw_config));
+ ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
+ hw->aq.asq_last_status);
+ return ret;
+ }
+
+ memset(&ets_sla_config, 0, sizeof(ets_sla_config));
+ ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
+ &ets_sla_config, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
+ "configuration %u", hw->aq.asq_last_status);
+ return ret;
+ }
+
+ /* Not store the info yet, just print out */
+ PMD_DRV_LOG(INFO, "VSI bw limit:%u", bw_config.port_bw_limit);
+ PMD_DRV_LOG(INFO, "VSI max_bw:%u", bw_config.max_bw);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ PMD_DRV_LOG(INFO, "\tVSI TC%u:share credits %u", i,
+ ets_sla_config.share_credits[i]);
+ PMD_DRV_LOG(INFO, "\tVSI TC%u:credits %u", i,
+ rte_le_to_cpu_16(ets_sla_config.credits[i]));
+ PMD_DRV_LOG(INFO, "\tVSI TC%u: max credits: %u", i,
+ rte_le_to_cpu_16(ets_sla_config.credits[i / 4]) >>
+ (i * 4));
+ }
+
+ return 0;
+}
+
+/* Setup a VSI */
+struct i40e_vsi *
+i40e_vsi_setup(struct i40e_pf *pf,
+ enum i40e_vsi_type type,
+ struct i40e_vsi *uplink_vsi,
+ uint16_t user_param)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi;
+ struct i40e_mac_filter_info filter;
+ int ret;
+ struct i40e_vsi_context ctxt;
+ struct ether_addr broadcast =
+ {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
+
+ if (type != I40E_VSI_MAIN && uplink_vsi == NULL) {
+ PMD_DRV_LOG(ERR, "VSI setup failed, "
+ "VSI link shouldn't be NULL");
+ return NULL;
+ }
+
+ if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
+ PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
+ "uplink VSI should be NULL");
+ return NULL;
+ }
+
+ /* If uplink vsi didn't setup VEB, create one first */
+ if (type != I40E_VSI_MAIN && uplink_vsi->veb == NULL) {
+ uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
+
+ if (NULL == uplink_vsi->veb) {
+ PMD_DRV_LOG(ERR, "VEB setup failed");
+ return NULL;
+ }
+ }
+
+ vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
+ return NULL;
+ }
+ TAILQ_INIT(&vsi->mac_list);
+ vsi->type = type;
+ vsi->adapter = I40E_PF_TO_ADAPTER(pf);
+ vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
+ vsi->parent_vsi = uplink_vsi;
+ vsi->user_param = user_param;
+ /* Allocate queues */
+ switch (vsi->type) {
+ case I40E_VSI_MAIN :
+ vsi->nb_qps = pf->lan_nb_qps;
+ break;
+ case I40E_VSI_SRIOV :
+ vsi->nb_qps = pf->vf_nb_qps;
+ break;
+ case I40E_VSI_VMDQ2:
+ vsi->nb_qps = pf->vmdq_nb_qps;
+ break;
+ case I40E_VSI_FDIR:
+ vsi->nb_qps = pf->fdir_nb_qps;
+ break;
+ default:
+ goto fail_mem;
+ }
+ /*
+ * The filter status descriptor is reported in rx queue 0,
+ * while the tx queue for fdir filter programming has no
+ * such constraints, can be non-zero queues.
+ * To simplify it, choose FDIR vsi use queue 0 pair.
+ * To make sure it will use queue 0 pair, queue allocation
+ * need be done before this function is called
+ */
+ if (type != I40E_VSI_FDIR) {
+ ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
+ vsi->seid, ret);
+ goto fail_mem;
+ }
+ vsi->base_queue = ret;
+ } else
+ vsi->base_queue = I40E_FDIR_QUEUE_ID;
+
+ /* VF has MSIX interrupt in VF range, don't allocate here */
+ if (type != I40E_VSI_SRIOV) {
+ ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
+ goto fail_queue_alloc;
+ }
+ vsi->msix_intr = ret;
+ } else
+ vsi->msix_intr = 0;
+ /* Add VSI */
+ if (type == I40E_VSI_MAIN) {
+ /* For main VSI, no need to add since it's default one */
+ vsi->uplink_seid = pf->mac_seid;
+ vsi->seid = pf->main_vsi_seid;
+ /* Bind queues with specific MSIX interrupt */
+ /**
+ * Needs 2 interrupt at least, one for misc cause which will
+ * enabled from OS side, Another for queues binding the
+ * interrupt from device side only.
+ */
+
+ /* Get default VSI parameters from hardware */
+ memset(&ctxt, 0, sizeof(ctxt));
+ ctxt.seid = vsi->seid;
+ ctxt.pf_num = hw->pf_id;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.vf_num = 0;
+ ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to get VSI params");
+ goto fail_msix_alloc;
+ }
+ (void)rte_memcpy(&vsi->info, &ctxt.info,
+ sizeof(struct i40e_aqc_vsi_properties_data));
+ vsi->vsi_id = ctxt.vsi_number;
+ vsi->info.valid_sections = 0;
+
+ /* Configure tc, enabled TC0 only */
+ if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
+ I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
+ goto fail_msix_alloc;
+ }
+
+ /* TC, queue mapping */
+ memset(&ctxt, 0, sizeof(ctxt));
+ vsi->info.valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
+ I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
+ (void)rte_memcpy(&ctxt.info, &vsi->info,
+ sizeof(struct i40e_aqc_vsi_properties_data));
+ ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
+ I40E_DEFAULT_TCMAP);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to configure "
+ "TC queue mapping");
+ goto fail_msix_alloc;
+ }
+ ctxt.seid = vsi->seid;
+ ctxt.pf_num = hw->pf_id;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.vf_num = 0;
+
+ /* Update VSI parameters */
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+ goto fail_msix_alloc;
+ }
+
+ (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
+ sizeof(vsi->info.tc_mapping));
+ (void)rte_memcpy(&vsi->info.queue_mapping,
+ &ctxt.info.queue_mapping,
+ sizeof(vsi->info.queue_mapping));
+ vsi->info.mapping_flags = ctxt.info.mapping_flags;
+ vsi->info.valid_sections = 0;
+
+ (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
+ ETH_ADDR_LEN);
+
+ /**
+ * Updating default filter settings are necessary to prevent
+ * reception of tagged packets.
+ * Some old firmware configurations load a default macvlan
+ * filter which accepts both tagged and untagged packets.
+ * The updating is to use a normal filter instead if needed.
+ * For NVM 4.2.2 or after, the updating is not needed anymore.
+ * The firmware with correct configurations load the default
+ * macvlan filter which is expected and cannot be removed.
+ */
+ i40e_update_default_filter_setting(vsi);
+ } else if (type == I40E_VSI_SRIOV) {
+ memset(&ctxt, 0, sizeof(ctxt));
+ /**
+ * For other VSI, the uplink_seid equals to uplink VSI's
+ * uplink_seid since they share same VEB
+ */
+ vsi->uplink_seid = uplink_vsi->uplink_seid;
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.connection_type = 0x1;
+ ctxt.flags = I40E_AQ_VSI_TYPE_VF;
+
+ /* Configure switch ID */
+ ctxt.info.valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id =
+ rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ /* Configure port/vlan */
+ ctxt.info.valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
+ ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
+ I40E_DEFAULT_TCMAP);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to configure "
+ "TC queue mapping");
+ goto fail_msix_alloc;
+ }
+ ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
+ ctxt.info.valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
+ /**
+ * Since VSI is not created yet, only configure parameter,
+ * will add vsi below.
+ */
+ } else if (type == I40E_VSI_VMDQ2) {
+ memset(&ctxt, 0, sizeof(ctxt));
+ /*
+ * For other VSI, the uplink_seid equals to uplink VSI's
+ * uplink_seid since they share same VEB
+ */
+ vsi->uplink_seid = uplink_vsi->uplink_seid;
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.connection_type = 0x1;
+ ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
+
+ ctxt.info.valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ /* user_param carries flag to enable loop back */
+ if (user_param) {
+ ctxt.info.switch_id =
+ rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
+ ctxt.info.switch_id |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ }
+
+ /* Configure port/vlan */
+ ctxt.info.valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
+ ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
+ I40E_DEFAULT_TCMAP);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to configure "
+ "TC queue mapping");
+ goto fail_msix_alloc;
+ }
+ ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
+ ctxt.info.valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
+ } else if (type == I40E_VSI_FDIR) {
+ memset(&ctxt, 0, sizeof(ctxt));
+ vsi->uplink_seid = uplink_vsi->uplink_seid;
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.connection_type = 0x1; /* regular data port */
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
+ I40E_DEFAULT_TCMAP);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to configure "
+ "TC queue mapping.");
+ goto fail_msix_alloc;
+ }
+ ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
+ ctxt.info.valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
+ } else {
+ PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
+ goto fail_msix_alloc;
+ }
+
+ if (vsi->type != I40E_VSI_MAIN) {
+ ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
+ hw->aq.asq_last_status);
+ goto fail_msix_alloc;
+ }
+ memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
+ vsi->info.valid_sections = 0;
+ vsi->seid = ctxt.seid;
+ vsi->vsi_id = ctxt.vsi_number;
+ vsi->sib_vsi_list.vsi = vsi;
+ TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
+ &vsi->sib_vsi_list, list);
+ }
+
+ /* MAC/VLAN configuration */
+ (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
+ filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+
+ ret = i40e_vsi_add_mac(vsi, &filter);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
+ goto fail_msix_alloc;
+ }
+
+ /* Get VSI BW information */
+ i40e_vsi_dump_bw_config(vsi);
+ return vsi;
+fail_msix_alloc:
+ i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
+fail_queue_alloc:
+ i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
+fail_mem:
+ rte_free(vsi);
+ return NULL;
+}
+
+/* Configure vlan stripping on or off */
+int
+i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
+{
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_vsi_context ctxt;
+ uint8_t vlan_flags;
+ int ret = I40E_SUCCESS;
+
+ /* Check if it has been already on or off */
+ if (vsi->info.valid_sections &
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
+ if (on) {
+ if ((vsi->info.port_vlan_flags &
+ I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
+ return 0; /* already on */
+ } else {
+ if ((vsi->info.port_vlan_flags &
+ I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
+ I40E_AQ_VSI_PVLAN_EMOD_MASK)
+ return 0; /* already off */
+ }
+ }
+
+ if (on)
+ vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
+ else
+ vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
+ vsi->info.valid_sections =
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
+ vsi->info.port_vlan_flags |= vlan_flags;
+ ctxt.seid = vsi->seid;
+ (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret)
+ PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
+ on ? "enable" : "disable");
+
+ return ret;
+}
+
+static int
+i40e_dev_init_vlan(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ int ret;
+
+ /* Apply vlan offload setting */
+ i40e_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
+
+ /* Apply double-vlan setting, not implemented yet */
+
+ /* Apply pvid setting */
+ ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
+ data->dev_conf.txmode.hw_vlan_insert_pvid);
+ if (ret)
+ PMD_DRV_LOG(INFO, "Failed to update VSI params");
+
+ return ret;
+}
+
+static int
+i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
+{
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+
+ return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
+}
+
+static int
+i40e_update_flow_control(struct i40e_hw *hw)
+{
+#define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
+ struct i40e_link_status link_status;
+ uint32_t rxfc = 0, txfc = 0, reg;
+ uint8_t an_info;
+ int ret;
+
+ memset(&link_status, 0, sizeof(link_status));
+ ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to get link status information");
+ goto write_reg; /* Disable flow control */
+ }
+
+ an_info = hw->phy.link_info.an_info;
+ if (!(an_info & I40E_AQ_AN_COMPLETED)) {
+ PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
+ ret = I40E_ERR_NOT_READY;
+ goto write_reg; /* Disable flow control */
+ }
+ /**
+ * If link auto negotiation is enabled, flow control needs to
+ * be configured according to it
+ */
+ switch (an_info & I40E_LINK_PAUSE_RXTX) {
+ case I40E_LINK_PAUSE_RXTX:
+ rxfc = 1;
+ txfc = 1;
+ hw->fc.current_mode = I40E_FC_FULL;
+ break;
+ case I40E_AQ_LINK_PAUSE_RX:
+ rxfc = 1;
+ hw->fc.current_mode = I40E_FC_RX_PAUSE;
+ break;
+ case I40E_AQ_LINK_PAUSE_TX:
+ txfc = 1;
+ hw->fc.current_mode = I40E_FC_TX_PAUSE;
+ break;
+ default:
+ hw->fc.current_mode = I40E_FC_NONE;
+ break;
+ }
+
+write_reg:
+ I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
+ txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
+ reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
+ reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
+ reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
+ I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
+
+ return ret;
+}
+
+/* PF setup */
+static int
+i40e_pf_setup(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_filter_control_settings settings;
+ struct i40e_vsi *vsi;
+ int ret;
+
+ /* Clear all stats counters */
+ pf->offset_loaded = FALSE;
+ memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
+ memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
+
+ ret = i40e_pf_get_switch_config(pf);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
+ return ret;
+ }
+ if (pf->flags & I40E_FLAG_FDIR) {
+ /* make queue allocated first, let FDIR use queue pair 0*/
+ ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
+ if (ret != I40E_FDIR_QUEUE_ID) {
+ PMD_DRV_LOG(ERR, "queue allocation fails for FDIR :"
+ " ret =%d", ret);
+ pf->flags &= ~I40E_FLAG_FDIR;
+ }
+ }
+ /* main VSI setup */
+ vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Setup of main vsi failed");
+ return I40E_ERR_NOT_READY;
+ }
+ pf->main_vsi = vsi;
+
+ /* Configure filter control */
+ memset(&settings, 0, sizeof(settings));
+ if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
+ settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
+ else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
+ settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
+ else {
+ PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported\n",
+ hw->func_caps.rss_table_size);
+ return I40E_ERR_PARAM;
+ }
+ PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table "
+ "size: %u\n", hw->func_caps.rss_table_size);
+ pf->hash_lut_size = hw->func_caps.rss_table_size;
+
+ /* Enable ethtype and macvlan filters */
+ settings.enable_ethtype = TRUE;
+ settings.enable_macvlan = TRUE;
+ ret = i40e_set_filter_control(hw, &settings);
+ if (ret)
+ PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
+ ret);
+
+ /* Update flow control according to the auto negotiation */
+ i40e_update_flow_control(hw);
+
+ return I40E_SUCCESS;
+}
+
+int
+i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
+{
+ uint32_t reg;
+ uint16_t j;
+
+ /**
+ * Set or clear TX Queue Disable flags,
+ * which is required by hardware.
+ */
+ i40e_pre_tx_queue_cfg(hw, q_idx, on);
+ rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
+
+ /* Wait until the request is finished */
+ for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
+ rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
+ reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
+ if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
+ ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
+ & 0x1))) {
+ break;
+ }
+ }
+ if (on) {
+ if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
+ return I40E_SUCCESS; /* already on, skip next steps */
+
+ I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
+ reg |= I40E_QTX_ENA_QENA_REQ_MASK;
+ } else {
+ if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ return I40E_SUCCESS; /* already off, skip next steps */
+ reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
+ }
+ /* Write the register */
+ I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
+ /* Check the result */
+ for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
+ rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
+ reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
+ if (on) {
+ if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
+ (reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ break;
+ } else {
+ if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
+ !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ break;
+ }
+ }
+ /* Check if it is timeout */
+ if (j >= I40E_CHK_Q_ENA_COUNT) {
+ PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
+ (on ? "enable" : "disable"), q_idx);
+ return I40E_ERR_TIMEOUT;
+ }
+
+ return I40E_SUCCESS;
+}
+
+/* Swith on or off the tx queues */
+static int
+i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
+{
+ struct rte_eth_dev_data *dev_data = pf->dev_data;
+ struct i40e_tx_queue *txq;
+ struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ uint16_t i;
+ int ret;
+
+ for (i = 0; i < dev_data->nb_tx_queues; i++) {
+ txq = dev_data->tx_queues[i];
+ /* Don't operate the queue if not configured or
+ * if starting only per queue */
+ if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
+ continue;
+ if (on)
+ ret = i40e_dev_tx_queue_start(dev, i);
+ else
+ ret = i40e_dev_tx_queue_stop(dev, i);
+ if ( ret != I40E_SUCCESS)
+ return ret;
+ }
+
+ return I40E_SUCCESS;
+}
+
+int
+i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
+{
+ uint32_t reg;
+ uint16_t j;
+
+ /* Wait until the request is finished */
+ for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
+ rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
+ reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
+ if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
+ ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
+ break;
+ }
+
+ if (on) {
+ if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
+ return I40E_SUCCESS; /* Already on, skip next steps */
+ reg |= I40E_QRX_ENA_QENA_REQ_MASK;
+ } else {
+ if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ return I40E_SUCCESS; /* Already off, skip next steps */
+ reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
+ }
+
+ /* Write the register */
+ I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
+ /* Check the result */
+ for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
+ rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
+ reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
+ if (on) {
+ if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
+ (reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ break;
+ } else {
+ if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
+ !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ break;
+ }
+ }
+
+ /* Check if it is timeout */
+ if (j >= I40E_CHK_Q_ENA_COUNT) {
+ PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
+ (on ? "enable" : "disable"), q_idx);
+ return I40E_ERR_TIMEOUT;
+ }
+
+ return I40E_SUCCESS;
+}
+/* Switch on or off the rx queues */
+static int
+i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
+{
+ struct rte_eth_dev_data *dev_data = pf->dev_data;
+ struct i40e_rx_queue *rxq;
+ struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ uint16_t i;
+ int ret;
+
+ for (i = 0; i < dev_data->nb_rx_queues; i++) {
+ rxq = dev_data->rx_queues[i];
+ /* Don't operate the queue if not configured or
+ * if starting only per queue */
+ if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
+ continue;
+ if (on)
+ ret = i40e_dev_rx_queue_start(dev, i);
+ else
+ ret = i40e_dev_rx_queue_stop(dev, i);
+ if (ret != I40E_SUCCESS)
+ return ret;
+ }
+
+ return I40E_SUCCESS;
+}
+
+/* Switch on or off all the rx/tx queues */
+int
+i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
+{
+ int ret;
+
+ if (on) {
+ /* enable rx queues before enabling tx queues */
+ ret = i40e_dev_switch_rx_queues(pf, on);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to switch rx queues");
+ return ret;
+ }
+ ret = i40e_dev_switch_tx_queues(pf, on);
+ } else {
+ /* Stop tx queues before stopping rx queues */
+ ret = i40e_dev_switch_tx_queues(pf, on);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to switch tx queues");
+ return ret;
+ }
+ ret = i40e_dev_switch_rx_queues(pf, on);
+ }
+
+ return ret;
+}
+
+/* Initialize VSI for TX */
+static int
+i40e_dev_tx_init(struct i40e_pf *pf)
+{
+ struct rte_eth_dev_data *data = pf->dev_data;
+ uint16_t i;
+ uint32_t ret = I40E_SUCCESS;
+ struct i40e_tx_queue *txq;
+
+ for (i = 0; i < data->nb_tx_queues; i++) {
+ txq = data->tx_queues[i];
+ if (!txq || !txq->q_set)
+ continue;
+ ret = i40e_tx_queue_init(txq);
+ if (ret != I40E_SUCCESS)
+ break;
+ }
+
+ return ret;
+}
+
+/* Initialize VSI for RX */
+static int
+i40e_dev_rx_init(struct i40e_pf *pf)
+{
+ struct rte_eth_dev_data *data = pf->dev_data;
+ int ret = I40E_SUCCESS;
+ uint16_t i;
+ struct i40e_rx_queue *rxq;
+
+ i40e_pf_config_mq_rx(pf);
+ for (i = 0; i < data->nb_rx_queues; i++) {
+ rxq = data->rx_queues[i];
+ if (!rxq || !rxq->q_set)
+ continue;
+
+ ret = i40e_rx_queue_init(rxq);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to do RX queue "
+ "initialization");
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int
+i40e_dev_rxtx_init(struct i40e_pf *pf)
+{
+ int err;
+
+ err = i40e_dev_tx_init(pf);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to do TX initialization");
+ return err;
+ }
+ err = i40e_dev_rx_init(pf);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to do RX initialization");
+ return err;
+ }
+
+ return err;
+}
+
+static int
+i40e_vmdq_setup(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf *conf = &dev->data->dev_conf;
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int i, err, conf_vsis, j, loop;
+ struct i40e_vsi *vsi;
+ struct i40e_vmdq_info *vmdq_info;
+ struct rte_eth_vmdq_rx_conf *vmdq_conf;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+
+ /*
+ * Disable interrupt to avoid message from VF. Furthermore, it will
+ * avoid race condition in VSI creation/destroy.
+ */
+ i40e_pf_disable_irq0(hw);
+
+ if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
+ PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
+ return -ENOTSUP;
+ }
+
+ conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
+ if (conf_vsis > pf->max_nb_vmdq_vsi) {
+ PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
+ conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
+ pf->max_nb_vmdq_vsi);
+ return -ENOTSUP;
+ }
+
+ if (pf->vmdq != NULL) {
+ PMD_INIT_LOG(INFO, "VMDQ already configured");
+ return 0;
+ }
+
+ pf->vmdq = rte_zmalloc("vmdq_info_struct",
+ sizeof(*vmdq_info) * conf_vsis, 0);
+
+ if (pf->vmdq == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory");
+ return -ENOMEM;
+ }
+
+ vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
+
+ /* Create VMDQ VSI */
+ for (i = 0; i < conf_vsis; i++) {
+ vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
+ vmdq_conf->enable_loop_back);
+ if (vsi == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
+ err = -1;
+ goto err_vsi_setup;
+ }
+ vmdq_info = &pf->vmdq[i];
+ vmdq_info->pf = pf;
+ vmdq_info->vsi = vsi;
+ }
+ pf->nb_cfg_vmdq_vsi = conf_vsis;
+
+ /* Configure Vlan */
+ loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
+ for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
+ for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
+ if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
+ PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
+ vmdq_conf->pool_map[i].vlan_id, j);
+
+ err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
+ vmdq_conf->pool_map[i].vlan_id);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Failed to add vlan");
+ err = -1;
+ goto err_vsi_setup;
+ }
+ }
+ }
+ }
+
+ i40e_pf_enable_irq0(hw);
+
+ return 0;
+
+err_vsi_setup:
+ for (i = 0; i < conf_vsis; i++)
+ if (pf->vmdq[i].vsi == NULL)
+ break;
+ else
+ i40e_vsi_release(pf->vmdq[i].vsi);
+
+ rte_free(pf->vmdq);
+ pf->vmdq = NULL;
+ i40e_pf_enable_irq0(hw);
+ return err;
+}
+
+static void
+i40e_stat_update_32(struct i40e_hw *hw,
+ uint32_t reg,
+ bool offset_loaded,
+ uint64_t *offset,
+ uint64_t *stat)
+{
+ uint64_t new_data;
+
+ new_data = (uint64_t)I40E_READ_REG(hw, reg);
+ if (!offset_loaded)
+ *offset = new_data;
+
+ if (new_data >= *offset)
+ *stat = (uint64_t)(new_data - *offset);
+ else
+ *stat = (uint64_t)((new_data +
+ ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
+}
+
+static void
+i40e_stat_update_48(struct i40e_hw *hw,
+ uint32_t hireg,
+ uint32_t loreg,
+ bool offset_loaded,
+ uint64_t *offset,
+ uint64_t *stat)
+{
+ uint64_t new_data;
+
+ new_data = (uint64_t)I40E_READ_REG(hw, loreg);
+ new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
+ I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
+
+ if (!offset_loaded)
+ *offset = new_data;
+
+ if (new_data >= *offset)
+ *stat = new_data - *offset;
+ else
+ *stat = (uint64_t)((new_data +
+ ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
+
+ *stat &= I40E_48_BIT_MASK;
+}
+
+/* Disable IRQ0 */
+void
+i40e_pf_disable_irq0(struct i40e_hw *hw)
+{
+ /* Disable all interrupt types */
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
+ I40E_WRITE_FLUSH(hw);
+}
+
+/* Enable IRQ0 */
+void
+i40e_pf_enable_irq0(struct i40e_hw *hw)
+{
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
+ I40E_WRITE_FLUSH(hw);
+}
+
+static void
+i40e_pf_config_irq0(struct i40e_hw *hw)
+{
+ /* read pending request and disable first */
+ i40e_pf_disable_irq0(hw);
+ I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
+ I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
+ I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
+
+ /* Link no queues with irq0 */
+ I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
+ I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
+}
+
+static void
+i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int i;
+ uint16_t abs_vf_id;
+ uint32_t index, offset, val;
+
+ if (!pf->vfs)
+ return;
+ /**
+ * Try to find which VF trigger a reset, use absolute VF id to access
+ * since the reg is global register.
+ */
+ for (i = 0; i < pf->vf_num; i++) {
+ abs_vf_id = hw->func_caps.vf_base_id + i;
+ index = abs_vf_id / I40E_UINT32_BIT_SIZE;
+ offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
+ val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
+ /* VFR event occured */
+ if (val & (0x1 << offset)) {
+ int ret;
+
+ /* Clear the event first */
+ I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
+ (0x1 << offset));
+ PMD_DRV_LOG(INFO, "VF %u reset occured", abs_vf_id);
+ /**
+ * Only notify a VF reset event occured,
+ * don't trigger another SW reset
+ */
+ ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to do VF reset");
+ }
+ }
+}
+
+static void
+i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_arq_event_info info;
+ uint16_t pending, opcode;
+ int ret;
+
+ info.buf_len = I40E_AQ_BUF_SZ;
+ info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
+ if (!info.msg_buf) {
+ PMD_DRV_LOG(ERR, "Failed to allocate mem");
+ return;
+ }
+
+ pending = 1;
+ while (pending) {
+ ret = i40e_clean_arq_element(hw, &info, &pending);
+
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
+ "aq_err: %u", hw->aq.asq_last_status);
+ break;
+ }
+ opcode = rte_le_to_cpu_16(info.desc.opcode);
+
+ switch (opcode) {
+ case i40e_aqc_opc_send_msg_to_pf:
+ /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
+ i40e_pf_host_handle_vf_msg(dev,
+ rte_le_to_cpu_16(info.desc.retval),
+ rte_le_to_cpu_32(info.desc.cookie_high),
+ rte_le_to_cpu_32(info.desc.cookie_low),
+ info.msg_buf,
+ info.msg_len);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Request %u is not supported yet",
+ opcode);
+ break;
+ }
+ }
+ rte_free(info.msg_buf);
+}
+
+/*
+ * Interrupt handler is registered as the alarm callback for handling LSC
+ * interrupt in a definite of time, in order to wait the NIC into a stable
+ * state. Currently it waits 1 sec in i40e for the link up interrupt, and
+ * no need for link down interrupt.
+ */
+static void
+i40e_dev_interrupt_delayed_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t icr0;
+
+ /* read interrupt causes again */
+ icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
+
+#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
+ if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
+ PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error\n");
+ if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
+ PMD_DRV_LOG(ERR, "ICR0: malicious programming detected\n");
+ if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
+ PMD_DRV_LOG(INFO, "ICR0: global reset requested\n");
+ if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
+ PMD_DRV_LOG(INFO, "ICR0: PCI exception\n activated\n");
+ if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
+ PMD_DRV_LOG(INFO, "ICR0: a change in the storm control "
+ "state\n");
+ if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
+ PMD_DRV_LOG(ERR, "ICR0: HMC error\n");
+ if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
+ PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error\n");
+#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
+
+ if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
+ PMD_DRV_LOG(INFO, "INT:VF reset detected\n");
+ i40e_dev_handle_vfr_event(dev);
+ }
+ if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
+ PMD_DRV_LOG(INFO, "INT:ADMINQ event\n");
+ i40e_dev_handle_aq_msg(dev);
+ }
+
+ /* handle the link up interrupt in an alarm callback */
+ i40e_dev_link_update(dev, 0);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+
+ i40e_pf_enable_irq0(hw);
+ rte_intr_enable(&(dev->pci_dev->intr_handle));
+}
+
+/**
+ * Interrupt handler triggered by NIC for handling
+ * specific interrupt.
+ *
+ * @param handle
+ * Pointer to interrupt handle.
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ * void
+ */
+static void
+i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
+ void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t icr0;
+
+ /* Disable interrupt */
+ i40e_pf_disable_irq0(hw);
+
+ /* read out interrupt causes */
+ icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
+
+ /* No interrupt event indicated */
+ if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
+ PMD_DRV_LOG(INFO, "No interrupt event");
+ goto done;
+ }
+#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
+ if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
+ PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
+ if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
+ PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
+ if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
+ PMD_DRV_LOG(INFO, "ICR0: global reset requested");
+ if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
+ PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
+ if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
+ PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
+ if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
+ PMD_DRV_LOG(ERR, "ICR0: HMC error");
+ if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
+ PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
+#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
+
+ if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
+ PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
+ i40e_dev_handle_vfr_event(dev);
+ }
+ if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
+ PMD_DRV_LOG(INFO, "ICR0: adminq event");
+ i40e_dev_handle_aq_msg(dev);
+ }
+
+ /* Link Status Change interrupt */
+ if (icr0 & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
+#define I40E_US_PER_SECOND 1000000
+ struct rte_eth_link link;
+
+ PMD_DRV_LOG(INFO, "ICR0: link status changed\n");
+ memset(&link, 0, sizeof(link));
+ rte_i40e_dev_atomic_read_link_status(dev, &link);
+ i40e_dev_link_update(dev, 0);
+
+ /*
+ * For link up interrupt, it needs to wait 1 second to let the
+ * hardware be a stable state. Otherwise several consecutive
+ * interrupts can be observed.
+ * For link down interrupt, no need to wait.
+ */
+ if (!link.link_status && rte_eal_alarm_set(I40E_US_PER_SECOND,
+ i40e_dev_interrupt_delayed_handler, (void *)dev) >= 0)
+ return;
+ else
+ _rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC);
+ }
+
+done:
+ /* Enable interrupt */
+ i40e_pf_enable_irq0(hw);
+ rte_intr_enable(&(dev->pci_dev->intr_handle));
+}
+
+static int
+i40e_add_macvlan_filters(struct i40e_vsi *vsi,
+ struct i40e_macvlan_filter *filter,
+ int total)
+{
+ int ele_num, ele_buff_size;
+ int num, actual_num, i;
+ uint16_t flags;
+ int ret = I40E_SUCCESS;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_aqc_add_macvlan_element_data *req_list;
+
+ if (filter == NULL || total == 0)
+ return I40E_ERR_PARAM;
+ ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
+ ele_buff_size = hw->aq.asq_buf_size;
+
+ req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
+ if (req_list == NULL) {
+ PMD_DRV_LOG(ERR, "Fail to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ num = 0;
+ do {
+ actual_num = (num + ele_num > total) ? (total - num) : ele_num;
+ memset(req_list, 0, ele_buff_size);
+
+ for (i = 0; i < actual_num; i++) {
+ (void)rte_memcpy(req_list[i].mac_addr,
+ &filter[num + i].macaddr, ETH_ADDR_LEN);
+ req_list[i].vlan_tag =
+ rte_cpu_to_le_16(filter[num + i].vlan_id);
+
+ switch (filter[num + i].filter_type) {
+ case RTE_MAC_PERFECT_MATCH:
+ flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
+ I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+ break;
+ case RTE_MACVLAN_PERFECT_MATCH:
+ flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
+ break;
+ case RTE_MAC_HASH_MATCH:
+ flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
+ I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+ break;
+ case RTE_MACVLAN_HASH_MATCH:
+ flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid MAC match type\n");
+ ret = I40E_ERR_PARAM;
+ goto DONE;
+ }
+
+ req_list[i].queue_number = 0;
+
+ req_list[i].flags = rte_cpu_to_le_16(flags);
+ }
+
+ ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
+ actual_num, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
+ goto DONE;
+ }
+ num += actual_num;
+ } while (num < total);
+
+DONE:
+ rte_free(req_list);
+ return ret;
+}
+
+static int
+i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
+ struct i40e_macvlan_filter *filter,
+ int total)
+{
+ int ele_num, ele_buff_size;
+ int num, actual_num, i;
+ uint16_t flags;
+ int ret = I40E_SUCCESS;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_aqc_remove_macvlan_element_data *req_list;
+
+ if (filter == NULL || total == 0)
+ return I40E_ERR_PARAM;
+
+ ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
+ ele_buff_size = hw->aq.asq_buf_size;
+
+ req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
+ if (req_list == NULL) {
+ PMD_DRV_LOG(ERR, "Fail to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ num = 0;
+ do {
+ actual_num = (num + ele_num > total) ? (total - num) : ele_num;
+ memset(req_list, 0, ele_buff_size);
+
+ for (i = 0; i < actual_num; i++) {
+ (void)rte_memcpy(req_list[i].mac_addr,
+ &filter[num + i].macaddr, ETH_ADDR_LEN);
+ req_list[i].vlan_tag =
+ rte_cpu_to_le_16(filter[num + i].vlan_id);
+
+ switch (filter[num + i].filter_type) {
+ case RTE_MAC_PERFECT_MATCH:
+ flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
+ I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+ break;
+ case RTE_MACVLAN_PERFECT_MATCH:
+ flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+ break;
+ case RTE_MAC_HASH_MATCH:
+ flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
+ I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+ break;
+ case RTE_MACVLAN_HASH_MATCH:
+ flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid MAC filter type\n");
+ ret = I40E_ERR_PARAM;
+ goto DONE;
+ }
+ req_list[i].flags = rte_cpu_to_le_16(flags);
+ }
+
+ ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
+ actual_num, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
+ goto DONE;
+ }
+ num += actual_num;
+ } while (num < total);
+
+DONE:
+ rte_free(req_list);
+ return ret;
+}
+
+/* Find out specific MAC filter */
+static struct i40e_mac_filter *
+i40e_find_mac_filter(struct i40e_vsi *vsi,
+ struct ether_addr *macaddr)
+{
+ struct i40e_mac_filter *f;
+
+ TAILQ_FOREACH(f, &vsi->mac_list, next) {
+ if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
+ return f;
+ }
+
+ return NULL;
+}
+
+static bool
+i40e_find_vlan_filter(struct i40e_vsi *vsi,
+ uint16_t vlan_id)
+{
+ uint32_t vid_idx, vid_bit;
+
+ if (vlan_id > ETH_VLAN_ID_MAX)
+ return 0;
+
+ vid_idx = I40E_VFTA_IDX(vlan_id);
+ vid_bit = I40E_VFTA_BIT(vlan_id);
+
+ if (vsi->vfta[vid_idx] & vid_bit)
+ return 1;
+ else
+ return 0;
+}
+
+static void
+i40e_set_vlan_filter(struct i40e_vsi *vsi,
+ uint16_t vlan_id, bool on)
+{
+ uint32_t vid_idx, vid_bit;
+
+ if (vlan_id > ETH_VLAN_ID_MAX)
+ return;
+
+ vid_idx = I40E_VFTA_IDX(vlan_id);
+ vid_bit = I40E_VFTA_BIT(vlan_id);
+
+ if (on)
+ vsi->vfta[vid_idx] |= vid_bit;
+ else
+ vsi->vfta[vid_idx] &= ~vid_bit;
+}
+
+/**
+ * Find all vlan options for specific mac addr,
+ * return with actual vlan found.
+ */
+static inline int
+i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
+ struct i40e_macvlan_filter *mv_f,
+ int num, struct ether_addr *addr)
+{
+ int i;
+ uint32_t j, k;
+
+ /**
+ * Not to use i40e_find_vlan_filter to decrease the loop time,
+ * although the code looks complex.
+ */
+ if (num < vsi->vlan_num)
+ return I40E_ERR_PARAM;
+
+ i = 0;
+ for (j = 0; j < I40E_VFTA_SIZE; j++) {
+ if (vsi->vfta[j]) {
+ for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
+ if (vsi->vfta[j] & (1 << k)) {
+ if (i > num - 1) {
+ PMD_DRV_LOG(ERR, "vlan number "
+ "not match");
+ return I40E_ERR_PARAM;
+ }
+ (void)rte_memcpy(&mv_f[i].macaddr,
+ addr, ETH_ADDR_LEN);
+ mv_f[i].vlan_id =
+ j * I40E_UINT32_BIT_SIZE + k;
+ i++;
+ }
+ }
+ }
+ }
+ return I40E_SUCCESS;
+}
+
+static inline int
+i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
+ struct i40e_macvlan_filter *mv_f,
+ int num,
+ uint16_t vlan)
+{
+ int i = 0;
+ struct i40e_mac_filter *f;
+
+ if (num < vsi->mac_num)
+ return I40E_ERR_PARAM;
+
+ TAILQ_FOREACH(f, &vsi->mac_list, next) {
+ if (i > num - 1) {
+ PMD_DRV_LOG(ERR, "buffer number not match");
+ return I40E_ERR_PARAM;
+ }
+ (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
+ ETH_ADDR_LEN);
+ mv_f[i].vlan_id = vlan;
+ mv_f[i].filter_type = f->mac_info.filter_type;
+ i++;
+ }
+
+ return I40E_SUCCESS;
+}
+
+static int
+i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
+{
+ int i, num;
+ struct i40e_mac_filter *f;
+ struct i40e_macvlan_filter *mv_f;
+ int ret = I40E_SUCCESS;
+
+ if (vsi == NULL || vsi->mac_num == 0)
+ return I40E_ERR_PARAM;
+
+ /* Case that no vlan is set */
+ if (vsi->vlan_num == 0)
+ num = vsi->mac_num;
+ else
+ num = vsi->mac_num * vsi->vlan_num;
+
+ mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
+ if (mv_f == NULL) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ i = 0;
+ if (vsi->vlan_num == 0) {
+ TAILQ_FOREACH(f, &vsi->mac_list, next) {
+ (void)rte_memcpy(&mv_f[i].macaddr,
+ &f->mac_info.mac_addr, ETH_ADDR_LEN);
+ mv_f[i].vlan_id = 0;
+ i++;
+ }
+ } else {
+ TAILQ_FOREACH(f, &vsi->mac_list, next) {
+ ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
+ vsi->vlan_num, &f->mac_info.mac_addr);
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+ i += vsi->vlan_num;
+ }
+ }
+
+ ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
+DONE:
+ rte_free(mv_f);
+
+ return ret;
+}
+
+int
+i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
+{
+ struct i40e_macvlan_filter *mv_f;
+ int mac_num;
+ int ret = I40E_SUCCESS;
+
+ if (!vsi || vlan > ETHER_MAX_VLAN_ID)
+ return I40E_ERR_PARAM;
+
+ /* If it's already set, just return */
+ if (i40e_find_vlan_filter(vsi,vlan))
+ return I40E_SUCCESS;
+
+ mac_num = vsi->mac_num;
+
+ if (mac_num == 0) {
+ PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
+ return I40E_ERR_PARAM;
+ }
+
+ mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
+
+ if (mv_f == NULL) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
+
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+
+ ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
+
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+
+ i40e_set_vlan_filter(vsi, vlan, 1);
+
+ vsi->vlan_num++;
+ ret = I40E_SUCCESS;
+DONE:
+ rte_free(mv_f);
+ return ret;
+}
+
+int
+i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
+{
+ struct i40e_macvlan_filter *mv_f;
+ int mac_num;
+ int ret = I40E_SUCCESS;
+
+ /**
+ * Vlan 0 is the generic filter for untagged packets
+ * and can't be removed.
+ */
+ if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
+ return I40E_ERR_PARAM;
+
+ /* If can't find it, just return */
+ if (!i40e_find_vlan_filter(vsi, vlan))
+ return I40E_ERR_PARAM;
+
+ mac_num = vsi->mac_num;
+
+ if (mac_num == 0) {
+ PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
+ return I40E_ERR_PARAM;
+ }
+
+ mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
+
+ if (mv_f == NULL) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
+
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+
+ ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
+
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+
+ /* This is last vlan to remove, replace all mac filter with vlan 0 */
+ if (vsi->vlan_num == 1) {
+ ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+
+ ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+ }
+
+ i40e_set_vlan_filter(vsi, vlan, 0);
+
+ vsi->vlan_num--;
+ ret = I40E_SUCCESS;
+DONE:
+ rte_free(mv_f);
+ return ret;
+}
+
+int
+i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
+{
+ struct i40e_mac_filter *f;
+ struct i40e_macvlan_filter *mv_f;
+ int i, vlan_num = 0;
+ int ret = I40E_SUCCESS;
+
+ /* If it's add and we've config it, return */
+ f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
+ if (f != NULL)
+ return I40E_SUCCESS;
+ if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
+ (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
+
+ /**
+ * If vlan_num is 0, that's the first time to add mac,
+ * set mask for vlan_id 0.
+ */
+ if (vsi->vlan_num == 0) {
+ i40e_set_vlan_filter(vsi, 0, 1);
+ vsi->vlan_num = 1;
+ }
+ vlan_num = vsi->vlan_num;
+ } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
+ (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
+ vlan_num = 1;
+
+ mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
+ if (mv_f == NULL) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ for (i = 0; i < vlan_num; i++) {
+ mv_f[i].filter_type = mac_filter->filter_type;
+ (void)rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
+ ETH_ADDR_LEN);
+ }
+
+ if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
+ mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
+ ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
+ &mac_filter->mac_addr);
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+ }
+
+ ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+
+ /* Add the mac addr into mac list */
+ f = rte_zmalloc("macv_filter", sizeof(*f), 0);
+ if (f == NULL) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ ret = I40E_ERR_NO_MEMORY;
+ goto DONE;
+ }
+ (void)rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
+ ETH_ADDR_LEN);
+ f->mac_info.filter_type = mac_filter->filter_type;
+ TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
+ vsi->mac_num++;
+
+ ret = I40E_SUCCESS;
+DONE:
+ rte_free(mv_f);
+
+ return ret;
+}
+
+int
+i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
+{
+ struct i40e_mac_filter *f;
+ struct i40e_macvlan_filter *mv_f;
+ int i, vlan_num;
+ enum rte_mac_filter_type filter_type;
+ int ret = I40E_SUCCESS;
+
+ /* Can't find it, return an error */
+ f = i40e_find_mac_filter(vsi, addr);
+ if (f == NULL)
+ return I40E_ERR_PARAM;
+
+ vlan_num = vsi->vlan_num;
+ filter_type = f->mac_info.filter_type;
+ if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
+ filter_type == RTE_MACVLAN_HASH_MATCH) {
+ if (vlan_num == 0) {
+ PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n");
+ return I40E_ERR_PARAM;
+ }
+ } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
+ filter_type == RTE_MAC_HASH_MATCH)
+ vlan_num = 1;
+
+ mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
+ if (mv_f == NULL) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ for (i = 0; i < vlan_num; i++) {
+ mv_f[i].filter_type = filter_type;
+ (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
+ ETH_ADDR_LEN);
+ }
+ if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
+ filter_type == RTE_MACVLAN_HASH_MATCH) {
+ ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+ }
+
+ ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
+ if (ret != I40E_SUCCESS)
+ goto DONE;
+
+ /* Remove the mac addr into mac list */
+ TAILQ_REMOVE(&vsi->mac_list, f, next);
+ rte_free(f);
+ vsi->mac_num--;
+
+ ret = I40E_SUCCESS;
+DONE:
+ rte_free(mv_f);
+ return ret;
+}
+
+/* Configure hash enable flags for RSS */
+uint64_t
+i40e_config_hena(uint64_t flags)
+{
+ uint64_t hena = 0;
+
+ if (!flags)
+ return hena;
+
+ if (flags & ETH_RSS_NONF_IPV4_UDP)
+ hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ if (flags & ETH_RSS_NONF_IPV4_TCP)
+ hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ if (flags & ETH_RSS_NONF_IPV4_SCTP)
+ hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
+ if (flags & ETH_RSS_NONF_IPV4_OTHER)
+ hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ if (flags & ETH_RSS_FRAG_IPV4)
+ hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
+ if (flags & ETH_RSS_NONF_IPV6_UDP)
+ hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ if (flags & ETH_RSS_NONF_IPV6_TCP)
+ hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ if (flags & ETH_RSS_NONF_IPV6_SCTP)
+ hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
+ if (flags & ETH_RSS_NONF_IPV6_OTHER)
+ hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
+ if (flags & ETH_RSS_FRAG_IPV6)
+ hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
+ if (flags & ETH_RSS_L2_PAYLOAD)
+ hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD;
+
+ return hena;
+}
+
+/* Parse the hash enable flags */
+uint64_t
+i40e_parse_hena(uint64_t flags)
+{
+ uint64_t rss_hf = 0;
+
+ if (!flags)
+ return rss_hf;
+
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
+ rss_hf |= ETH_RSS_NONF_IPV4_UDP;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
+ rss_hf |= ETH_RSS_NONF_IPV4_TCP;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
+ rss_hf |= ETH_RSS_NONF_IPV4_SCTP;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
+ rss_hf |= ETH_RSS_NONF_IPV4_OTHER;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
+ rss_hf |= ETH_RSS_FRAG_IPV4;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
+ rss_hf |= ETH_RSS_NONF_IPV6_UDP;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
+ rss_hf |= ETH_RSS_NONF_IPV6_TCP;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
+ rss_hf |= ETH_RSS_NONF_IPV6_SCTP;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
+ rss_hf |= ETH_RSS_NONF_IPV6_OTHER;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
+ rss_hf |= ETH_RSS_FRAG_IPV6;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+ rss_hf |= ETH_RSS_L2_PAYLOAD;
+
+ return rss_hf;
+}
+
+/* Disable RSS */
+static void
+i40e_pf_disable_rss(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint64_t hena;
+
+ hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
+ hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
+ hena &= ~I40E_RSS_HENA_ALL;
+ I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
+ I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
+ I40E_WRITE_FLUSH(hw);
+}
+
+static int
+i40e_hw_rss_hash_set(struct i40e_hw *hw, struct rte_eth_rss_conf *rss_conf)
+{
+ uint32_t *hash_key;
+ uint8_t hash_key_len;
+ uint64_t rss_hf;
+ uint16_t i;
+ uint64_t hena;
+
+ hash_key = (uint32_t *)(rss_conf->rss_key);
+ hash_key_len = rss_conf->rss_key_len;
+ if (hash_key != NULL && hash_key_len >=
+ (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
+ /* Fill in RSS hash key */
+ for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+ I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i), hash_key[i]);
+ }
+
+ rss_hf = rss_conf->rss_hf;
+ hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
+ hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
+ hena &= ~I40E_RSS_HENA_ALL;
+ hena |= i40e_config_hena(rss_hf);
+ I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
+ I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
+ I40E_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int
+i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
+ uint64_t hena;
+
+ hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
+ hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
+ if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
+ if (rss_hf != 0) /* Enable RSS */
+ return -EINVAL;
+ return 0; /* Nothing to do */
+ }
+ /* RSS enabled */
+ if (rss_hf == 0) /* Disable RSS */
+ return -EINVAL;
+
+ return i40e_hw_rss_hash_set(hw, rss_conf);
+}
+
+static int
+i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t *hash_key = (uint32_t *)(rss_conf->rss_key);
+ uint64_t hena;
+ uint16_t i;
+
+ if (hash_key != NULL) {
+ for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+ hash_key[i] = I40E_READ_REG(hw, I40E_PFQF_HKEY(i));
+ rss_conf->rss_key_len = i * sizeof(uint32_t);
+ }
+ hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
+ hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
+ rss_conf->rss_hf = i40e_parse_hena(hena);
+
+ return 0;
+}
+
+static int
+i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
+{
+ switch (filter_type) {
+ case RTE_TUNNEL_FILTER_IMAC_IVLAN:
+ *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
+ break;
+ case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
+ *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
+ break;
+ case RTE_TUNNEL_FILTER_IMAC_TENID:
+ *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
+ break;
+ case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
+ *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
+ break;
+ case ETH_TUNNEL_FILTER_IMAC:
+ *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid tunnel filter type");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
+ struct rte_eth_tunnel_filter_conf *tunnel_filter,
+ uint8_t add)
+{
+ uint16_t ip_type;
+ uint8_t tun_type = 0;
+ int val, ret = 0;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter;
+ struct i40e_aqc_add_remove_cloud_filters_element_data *pfilter;
+
+ cld_filter = rte_zmalloc("tunnel_filter",
+ sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
+ 0);
+
+ if (NULL == cld_filter) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+ return -EINVAL;
+ }
+ pfilter = cld_filter;
+
+ (void)rte_memcpy(&pfilter->outer_mac, tunnel_filter->outer_mac,
+ sizeof(struct ether_addr));
+ (void)rte_memcpy(&pfilter->inner_mac, tunnel_filter->inner_mac,
+ sizeof(struct ether_addr));
+
+ pfilter->inner_vlan = tunnel_filter->inner_vlan;
+ if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
+ ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
+ (void)rte_memcpy(&pfilter->ipaddr.v4.data,
+ &tunnel_filter->ip_addr,
+ sizeof(pfilter->ipaddr.v4.data));
+ } else {
+ ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
+ (void)rte_memcpy(&pfilter->ipaddr.v6.data,
+ &tunnel_filter->ip_addr,
+ sizeof(pfilter->ipaddr.v6.data));
+ }
+
+ /* check tunneled type */
+ switch (tunnel_filter->tunnel_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN;
+ break;
+ default:
+ /* Other tunnel types is not supported. */
+ PMD_DRV_LOG(ERR, "tunnel type is not supported.");
+ rte_free(cld_filter);
+ return -EINVAL;
+ }
+
+ val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
+ &pfilter->flags);
+ if (val < 0) {
+ rte_free(cld_filter);
+ return -EINVAL;
+ }
+
+ pfilter->flags |= I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | ip_type |
+ (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
+ pfilter->tenant_id = tunnel_filter->tenant_id;
+ pfilter->queue_number = tunnel_filter->queue_id;
+
+ if (add)
+ ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1);
+ else
+ ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
+ cld_filter, 1);
+
+ rte_free(cld_filter);
+ return ret;
+}
+
+static int
+i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
+{
+ uint8_t i;
+
+ for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
+ if (pf->vxlan_ports[i] == port)
+ return i;
+ }
+
+ return -1;
+}
+
+static int
+i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
+{
+ int idx, ret;
+ uint8_t filter_idx;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+
+ idx = i40e_get_vxlan_port_idx(pf, port);
+
+ /* Check if port already exists */
+ if (idx >= 0) {
+ PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
+ return -EINVAL;
+ }
+
+ /* Now check if there is space to add the new port */
+ idx = i40e_get_vxlan_port_idx(pf, 0);
+ if (idx < 0) {
+ PMD_DRV_LOG(ERR, "Maximum number of UDP ports reached,"
+ "not adding port %d", port);
+ return -ENOSPC;
+ }
+
+ ret = i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
+ &filter_idx, NULL);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
+ return -1;
+ }
+
+ PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
+ port, filter_idx);
+
+ /* New port: add it and mark its index in the bitmap */
+ pf->vxlan_ports[idx] = port;
+ pf->vxlan_bitmap |= (1 << idx);
+
+ if (!(pf->flags & I40E_FLAG_VXLAN))
+ pf->flags |= I40E_FLAG_VXLAN;
+
+ return 0;
+}
+
+static int
+i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
+{
+ int idx;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+
+ if (!(pf->flags & I40E_FLAG_VXLAN)) {
+ PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
+ return -EINVAL;
+ }
+
+ idx = i40e_get_vxlan_port_idx(pf, port);
+
+ if (idx < 0) {
+ PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
+ return -EINVAL;
+ }
+
+ if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
+ PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
+ return -1;
+ }
+
+ PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
+ port, idx);
+
+ pf->vxlan_ports[idx] = 0;
+ pf->vxlan_bitmap &= ~(1 << idx);
+
+ if (!pf->vxlan_bitmap)
+ pf->flags &= ~I40E_FLAG_VXLAN;
+
+ return 0;
+}
+
+/* Add UDP tunneling port */
+static int
+i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel)
+{
+ int ret = 0;
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (udp_tunnel == NULL)
+ return -EINVAL;
+
+ switch (udp_tunnel->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
+ break;
+
+ case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_TUNNEL_TYPE_TEREDO:
+ PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
+ ret = -1;
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+/* Remove UDP tunneling port */
+static int
+i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel)
+{
+ int ret = 0;
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (udp_tunnel == NULL)
+ return -EINVAL;
+
+ switch (udp_tunnel->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
+ break;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_TUNNEL_TYPE_TEREDO:
+ PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
+ ret = -1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+/* Calculate the maximum number of contiguous PF queues that are configured */
+static int
+i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
+{
+ struct rte_eth_dev_data *data = pf->dev_data;
+ int i, num;
+ struct i40e_rx_queue *rxq;
+
+ num = 0;
+ for (i = 0; i < pf->lan_nb_qps; i++) {
+ rxq = data->rx_queues[i];
+ if (rxq && rxq->q_set)
+ num++;
+ else
+ break;
+ }
+
+ return num;
+}
+
+/* Configure RSS */
+static int
+i40e_pf_config_rss(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_rss_conf rss_conf;
+ uint32_t i, lut = 0;
+ uint16_t j, num;
+
+ /*
+ * If both VMDQ and RSS enabled, not all of PF queues are configured.
+ * It's necessary to calulate the actual PF queues that are configured.
+ */
+ if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
+ num = i40e_pf_calc_configured_queues_num(pf);
+ num = i40e_align_floor(num);
+ } else
+ num = i40e_align_floor(pf->dev_data->nb_rx_queues);
+
+ PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
+ num);
+
+ if (num == 0) {
+ PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
+ return -ENOTSUP;
+ }
+
+ for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
+ if (j == num)
+ j = 0;
+ lut = (lut << 8) | (j & ((0x1 <<
+ hw->func_caps.rss_table_entry_width) - 1));
+ if ((i & 3) == 3)
+ I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
+ }
+
+ rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
+ if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
+ i40e_pf_disable_rss(pf);
+ return 0;
+ }
+ if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
+ (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
+ /* Calculate the default hash key */
+ for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+ rss_key_default[i] = (uint32_t)rte_rand();
+ rss_conf.rss_key = (uint8_t *)rss_key_default;
+ rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t);
+ }
+
+ return i40e_hw_rss_hash_set(hw, &rss_conf);
+}
+
+static int
+i40e_tunnel_filter_param_check(struct i40e_pf *pf,
+ struct rte_eth_tunnel_filter_conf *filter)
+{
+ if (pf == NULL || filter == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid parameter");
+ return -EINVAL;
+ }
+
+ if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
+ PMD_DRV_LOG(ERR, "Invalid queue ID");
+ return -EINVAL;
+ }
+
+ if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
+ PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
+ return -EINVAL;
+ }
+
+ if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
+ (is_zero_ether_addr(filter->outer_mac))) {
+ PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
+ return -EINVAL;
+ }
+
+ if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
+ (is_zero_ether_addr(filter->inner_mac))) {
+ PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+i40e_tunnel_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct rte_eth_tunnel_filter_conf *filter;
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int ret = I40E_SUCCESS;
+
+ filter = (struct rte_eth_tunnel_filter_conf *)(arg);
+
+ if (i40e_tunnel_filter_param_check(pf, filter) < 0)
+ return I40E_ERR_PARAM;
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_NOP:
+ if (!(pf->flags & I40E_FLAG_VXLAN))
+ ret = I40E_NOT_SUPPORTED;
+ case RTE_ETH_FILTER_ADD:
+ ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
+ ret = I40E_ERR_PARAM;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+i40e_pf_config_mq_rx(struct i40e_pf *pf)
+{
+ int ret = 0;
+ enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
+
+ if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
+ PMD_INIT_LOG(ERR, "i40e doesn't support DCB yet");
+ return -ENOTSUP;
+ }
+
+ /* RSS setup */
+ if (mq_mode & ETH_MQ_RX_RSS_FLAG)
+ ret = i40e_pf_config_rss(pf);
+ else
+ i40e_pf_disable_rss(pf);
+
+ return ret;
+}
+
+/*
+ * Configure ethertype filter, which can director packet by filtering
+ * with mac address and ether_type or only ether_type
+ */
+static int
+i40e_ethertype_filter_set(struct i40e_pf *pf,
+ struct rte_eth_ethertype_filter *filter,
+ bool add)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_control_filter_stats stats;
+ uint16_t flags = 0;
+ int ret;
+
+ if (filter->queue >= pf->dev_data->nb_rx_queues) {
+ PMD_DRV_LOG(ERR, "Invalid queue ID");
+ return -EINVAL;
+ }
+ if (filter->ether_type == ETHER_TYPE_IPv4 ||
+ filter->ether_type == ETHER_TYPE_IPv6) {
+ PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
+ " control packet filter.", filter->ether_type);
+ return -EINVAL;
+ }
+ if (filter->ether_type == ETHER_TYPE_VLAN)
+ PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is"
+ " not supported.");
+
+ if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
+
+ memset(&stats, 0, sizeof(stats));
+ ret = i40e_aq_add_rem_control_packet_filter(hw,
+ filter->mac_addr.addr_bytes,
+ filter->ether_type, flags,
+ pf->main_vsi->seid,
+ filter->queue, add, &stats, NULL);
+
+ PMD_DRV_LOG(INFO, "add/rem control packet filter, return %d,"
+ " mac_etype_used = %u, etype_used = %u,"
+ " mac_etype_free = %u, etype_free = %u\n",
+ ret, stats.mac_etype_used, stats.etype_used,
+ stats.mac_etype_free, stats.etype_free);
+ if (ret < 0)
+ return -ENOSYS;
+ return 0;
+}
+
+/*
+ * Handle operations for ethertype filter.
+ */
+static int
+i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int ret = 0;
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return ret;
+
+ if (arg == NULL) {
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
+ filter_op);
+ return -EINVAL;
+ }
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = i40e_ethertype_filter_set(pf,
+ (struct rte_eth_ethertype_filter *)arg,
+ TRUE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = i40e_ethertype_filter_set(pf,
+ (struct rte_eth_ethertype_filter *)arg,
+ FALSE);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
+ ret = -ENOSYS;
+ break;
+ }
+ return ret;
+}
+
+static int
+i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret = 0;
+
+ if (dev == NULL)
+ return -EINVAL;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_MACVLAN:
+ ret = i40e_mac_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_TUNNEL:
+ ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+enum i40e_filter_pctype
+i40e_flowtype_to_pctype(enum rte_eth_flow_type flow_type)
+{
+ static const enum i40e_filter_pctype pctype_table[] = {
+ [RTE_ETH_FLOW_TYPE_UDPV4] = I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
+ [RTE_ETH_FLOW_TYPE_TCPV4] = I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
+ [RTE_ETH_FLOW_TYPE_SCTPV4] = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
+ [RTE_ETH_FLOW_TYPE_IPV4_OTHER] =
+ I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
+ [RTE_ETH_FLOW_TYPE_FRAG_IPV4] =
+ I40E_FILTER_PCTYPE_FRAG_IPV4,
+ [RTE_ETH_FLOW_TYPE_UDPV6] = I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
+ [RTE_ETH_FLOW_TYPE_TCPV6] = I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
+ [RTE_ETH_FLOW_TYPE_SCTPV6] = I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
+ [RTE_ETH_FLOW_TYPE_IPV6_OTHER] =
+ I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
+ [RTE_ETH_FLOW_TYPE_FRAG_IPV6] =
+ I40E_FILTER_PCTYPE_FRAG_IPV6,
+ };
+
+ return pctype_table[flow_type];
+}
+
+enum rte_eth_flow_type
+i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
+{
+ static const enum rte_eth_flow_type flowtype_table[] = {
+ [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = RTE_ETH_FLOW_TYPE_UDPV4,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = RTE_ETH_FLOW_TYPE_TCPV4,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = RTE_ETH_FLOW_TYPE_SCTPV4,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
+ RTE_ETH_FLOW_TYPE_IPV4_OTHER,
+ [I40E_FILTER_PCTYPE_FRAG_IPV4] =
+ RTE_ETH_FLOW_TYPE_FRAG_IPV4,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = RTE_ETH_FLOW_TYPE_UDPV6,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = RTE_ETH_FLOW_TYPE_TCPV6,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = RTE_ETH_FLOW_TYPE_SCTPV6,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
+ RTE_ETH_FLOW_TYPE_IPV6_OTHER,
+ [I40E_FILTER_PCTYPE_FRAG_IPV6] =
+ RTE_ETH_FLOW_TYPE_FRAG_IPV6,
+ };
+
+ return flowtype_table[pctype];
+}
+
+static int
+i40e_debug_read_register(struct i40e_hw *hw, uint32_t addr, uint64_t *val)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
+ desc.params.internal.param1 = rte_cpu_to_le_32(addr);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+ if (status < 0)
+ return status;
+
+ *val = ((uint64_t)(rte_le_to_cpu_32(desc.params.internal.param2)) <<
+ (CHAR_BIT * sizeof(uint32_t))) +
+ rte_le_to_cpu_32(desc.params.internal.param3);
+
+ return status;
+}
+
+/*
+ * On X710, performance number is far from the expectation on recent firmware
+ * versions. The fix for this issue may not be integrated in the following
+ * firmware version. So the workaround in software driver is needed. It needs
+ * to modify the initial values of 3 internal only registers. Note that the
+ * workaround can be removed when it is fixed in firmware in the future.
+ */
+static void
+i40e_configure_registers(struct i40e_hw *hw)
+{
+#define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00
+#define I40E_GL_SWR_PRI_JOIN_MAP_2 0x26CE08
+#define I40E_GL_SWR_PM_UP_THR 0x269FBC
+#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x10000200
+#define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
+#define I40E_GL_SWR_PM_UP_THR_VALUE 0x03030303
+
+ static const struct {
+ uint32_t addr;
+ uint64_t val;
+ } reg_table[] = {
+ {I40E_GL_SWR_PRI_JOIN_MAP_0, I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE},
+ {I40E_GL_SWR_PRI_JOIN_MAP_2, I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE},
+ {I40E_GL_SWR_PM_UP_THR, I40E_GL_SWR_PM_UP_THR_VALUE},
+ };
+ uint64_t reg;
+ uint32_t i;
+ int ret;
+
+ /* Below fix is for X710 only */
+ if (i40e_is_40G_device(hw->device_id))
+ return;
+
+ for (i = 0; i < RTE_DIM(reg_table); i++) {
+ ret = i40e_debug_read_register(hw, reg_table[i].addr, &reg);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
+ reg_table[i].addr);
+ break;
+ }
+ PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
+ reg_table[i].addr, reg);
+ if (reg == reg_table[i].val)
+ continue;
+
+ ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
+ reg_table[i].val, NULL);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to write 0x%"PRIx64" to the "
+ "address of 0x%"PRIx32, reg_table[i].val,
+ reg_table[i].addr);
+ break;
+ }
+ PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
+ "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
+ }
+}
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e_ethdev.h b/src/dpdk_lib18/librte_pmd_i40e/i40e_ethdev.h
new file mode 100755
index 00000000..f913ea96
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_i40e/i40e_ethdev.h
@@ -0,0 +1,567 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _I40E_ETHDEV_H_
+#define _I40E_ETHDEV_H_
+
+#include <rte_eth_ctrl.h>
+
+#define I40E_AQ_LEN 32
+#define I40E_AQ_BUF_SZ 4096
+/* Number of queues per TC should be one of 1, 2, 4, 8, 16, 32, 64 */
+#define I40E_MAX_Q_PER_TC 64
+#define I40E_NUM_DESC_DEFAULT 512
+#define I40E_NUM_DESC_ALIGN 32
+#define I40E_BUF_SIZE_MIN 1024
+#define I40E_FRAME_SIZE_MAX 9728
+#define I40E_QUEUE_BASE_ADDR_UNIT 128
+/* number of VSIs and queue default setting */
+#define I40E_MAX_QP_NUM_PER_VF 16
+#define I40E_DEFAULT_QP_NUM_FDIR 1
+#define I40E_UINT32_BIT_SIZE (CHAR_BIT * sizeof(uint32_t))
+#define I40E_VFTA_SIZE (4096 / I40E_UINT32_BIT_SIZE)
+/*
+ * vlan_id is a 12 bit number.
+ * The VFTA array is actually a 4096 bit array, 128 of 32bit elements.
+ * 2^5 = 32. The val of lower 5 bits specifies the bit in the 32bit element.
+ * The higher 7 bit val specifies VFTA array index.
+ */
+#define I40E_VFTA_BIT(vlan_id) (1 << ((vlan_id) & 0x1F))
+#define I40E_VFTA_IDX(vlan_id) ((vlan_id) >> 5)
+
+/* Default TC traffic in case DCB is not enabled */
+#define I40E_DEFAULT_TCMAP 0x1
+#define I40E_FDIR_QUEUE_ID 0
+
+/* Always assign pool 0 to main VSI, VMDQ will start from 1 */
+#define I40E_VMDQ_POOL_BASE 1
+
+#define I40E_DEFAULT_RX_FREE_THRESH 32
+#define I40E_DEFAULT_RX_PTHRESH 8
+#define I40E_DEFAULT_RX_HTHRESH 8
+#define I40E_DEFAULT_RX_WTHRESH 0
+
+#define I40E_DEFAULT_TX_FREE_THRESH 32
+#define I40E_DEFAULT_TX_PTHRESH 32
+#define I40E_DEFAULT_TX_HTHRESH 0
+#define I40E_DEFAULT_TX_WTHRESH 0
+#define I40E_DEFAULT_TX_RSBIT_THRESH 32
+
+/* Bit shift and mask */
+#define I40E_4_BIT_WIDTH (CHAR_BIT / 2)
+#define I40E_4_BIT_MASK RTE_LEN2MASK(I40E_4_BIT_WIDTH, uint8_t)
+#define I40E_8_BIT_WIDTH CHAR_BIT
+#define I40E_8_BIT_MASK UINT8_MAX
+#define I40E_16_BIT_WIDTH (CHAR_BIT * 2)
+#define I40E_16_BIT_MASK UINT16_MAX
+#define I40E_32_BIT_WIDTH (CHAR_BIT * 4)
+#define I40E_32_BIT_MASK UINT32_MAX
+#define I40E_48_BIT_WIDTH (CHAR_BIT * 6)
+#define I40E_48_BIT_MASK RTE_LEN2MASK(I40E_48_BIT_WIDTH, uint64_t)
+
+/* index flex payload per layer */
+enum i40e_flxpld_layer_idx {
+ I40E_FLXPLD_L2_IDX = 0,
+ I40E_FLXPLD_L3_IDX = 1,
+ I40E_FLXPLD_L4_IDX = 2,
+ I40E_MAX_FLXPLD_LAYER = 3,
+};
+#define I40E_MAX_FLXPLD_FIED 3 /* max number of flex payload fields */
+#define I40E_FDIR_BITMASK_NUM_WORD 2 /* max number of bitmask words */
+#define I40E_FDIR_MAX_FLEXWORD_NUM 8 /* max number of flexpayload words */
+#define I40E_FDIR_MAX_FLEX_LEN 16 /* len in bytes of flex payload */
+
+/* i40e flags */
+#define I40E_FLAG_RSS (1ULL << 0)
+#define I40E_FLAG_DCB (1ULL << 1)
+#define I40E_FLAG_VMDQ (1ULL << 2)
+#define I40E_FLAG_SRIOV (1ULL << 3)
+#define I40E_FLAG_HEADER_SPLIT_DISABLED (1ULL << 4)
+#define I40E_FLAG_HEADER_SPLIT_ENABLED (1ULL << 5)
+#define I40E_FLAG_FDIR (1ULL << 6)
+#define I40E_FLAG_VXLAN (1ULL << 7)
+#define I40E_FLAG_ALL (I40E_FLAG_RSS | \
+ I40E_FLAG_DCB | \
+ I40E_FLAG_VMDQ | \
+ I40E_FLAG_SRIOV | \
+ I40E_FLAG_HEADER_SPLIT_DISABLED | \
+ I40E_FLAG_HEADER_SPLIT_ENABLED | \
+ I40E_FLAG_FDIR | \
+ I40E_FLAG_VXLAN)
+
+#define I40E_RSS_OFFLOAD_ALL ( \
+ ETH_RSS_NONF_IPV4_UDP | \
+ ETH_RSS_NONF_IPV4_TCP | \
+ ETH_RSS_NONF_IPV4_SCTP | \
+ ETH_RSS_NONF_IPV4_OTHER | \
+ ETH_RSS_FRAG_IPV4 | \
+ ETH_RSS_NONF_IPV6_UDP | \
+ ETH_RSS_NONF_IPV6_TCP | \
+ ETH_RSS_NONF_IPV6_SCTP | \
+ ETH_RSS_NONF_IPV6_OTHER | \
+ ETH_RSS_FRAG_IPV6 | \
+ ETH_RSS_L2_PAYLOAD)
+
+/* All bits of RSS hash enable */
+#define I40E_RSS_HENA_ALL ( \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+ (1ULL << I40E_FILTER_PCTYPE_FCOE_OX) | \
+ (1ULL << I40E_FILTER_PCTYPE_FCOE_RX) | \
+ (1ULL << I40E_FILTER_PCTYPE_FCOE_OTHER) | \
+ (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
+struct i40e_adapter;
+
+/**
+ * MAC filter structure
+ */
+struct i40e_mac_filter_info {
+ enum rte_mac_filter_type filter_type;
+ struct ether_addr mac_addr;
+};
+
+TAILQ_HEAD(i40e_mac_filter_list, i40e_mac_filter);
+
+/* MAC filter list structure */
+struct i40e_mac_filter {
+ TAILQ_ENTRY(i40e_mac_filter) next;
+ struct i40e_mac_filter_info mac_info;
+};
+
+TAILQ_HEAD(i40e_vsi_list_head, i40e_vsi_list);
+
+struct i40e_vsi;
+
+/* VSI list structure */
+struct i40e_vsi_list {
+ TAILQ_ENTRY(i40e_vsi_list) list;
+ struct i40e_vsi *vsi;
+};
+
+struct i40e_rx_queue;
+struct i40e_tx_queue;
+
+/* Structure that defines a VEB */
+struct i40e_veb {
+ struct i40e_vsi_list_head head;
+ struct i40e_vsi *associate_vsi; /* Associate VSI who owns the VEB */
+ uint16_t seid; /* The seid of VEB itself */
+ uint16_t uplink_seid; /* The uplink seid of this VEB */
+ uint16_t stats_idx;
+ struct i40e_eth_stats stats;
+};
+
+/* i40e MACVLAN filter structure */
+struct i40e_macvlan_filter {
+ struct ether_addr macaddr;
+ enum rte_mac_filter_type filter_type;
+ uint16_t vlan_id;
+};
+
+/*
+ * Structure that defines a VSI, associated with a adapter.
+ */
+struct i40e_vsi {
+ struct i40e_adapter *adapter; /* Backreference to associated adapter */
+ struct i40e_aqc_vsi_properties_data info; /* VSI properties */
+
+ struct i40e_eth_stats eth_stats_offset;
+ struct i40e_eth_stats eth_stats;
+ /*
+ * When drivers loaded, only a default main VSI exists. In case new VSI
+ * needs to add, HW needs to know the layout that VSIs are organized.
+ * Besides that, VSI isan element and can't switch packets, which needs
+ * to add new component VEB to perform switching. So, a new VSI needs
+ * to specify the the uplink VSI (Parent VSI) before created. The
+ * uplink VSI will check whether it had a VEB to switch packets. If no,
+ * it will try to create one. Then, uplink VSI will move the new VSI
+ * into its' sib_vsi_list to manage all the downlink VSI.
+ * sib_vsi_list: the VSI list that shared the same uplink VSI.
+ * parent_vsi : the uplink VSI. It's NULL for main VSI.
+ * veb : the VEB associates with the VSI.
+ */
+ struct i40e_vsi_list sib_vsi_list; /* sibling vsi list */
+ struct i40e_vsi *parent_vsi;
+ struct i40e_veb *veb; /* Associated veb, could be null */
+ bool offset_loaded;
+ enum i40e_vsi_type type; /* VSI types */
+ uint16_t vlan_num; /* Total VLAN number */
+ uint16_t mac_num; /* Total mac number */
+ uint32_t vfta[I40E_VFTA_SIZE]; /* VLAN bitmap */
+ struct i40e_mac_filter_list mac_list; /* macvlan filter list */
+ /* specific VSI-defined parameters, SRIOV stored the vf_id */
+ uint32_t user_param;
+ uint16_t seid; /* The seid of VSI itself */
+ uint16_t uplink_seid; /* The uplink seid of this VSI */
+ uint16_t nb_qps; /* Number of queue pairs VSI can occupy */
+ uint16_t max_macaddrs; /* Maximum number of MAC addresses */
+ uint16_t base_queue; /* The first queue index of this VSI */
+ /*
+ * The offset to visit VSI related register, assigned by HW when
+ * creating VSI
+ */
+ uint16_t vsi_id;
+ uint16_t msix_intr; /* The MSIX interrupt binds to VSI */
+ uint8_t enabled_tc; /* The traffic class enabled */
+};
+
+struct pool_entry {
+ LIST_ENTRY(pool_entry) next;
+ uint16_t base;
+ uint16_t len;
+};
+
+LIST_HEAD(res_list, pool_entry);
+
+struct i40e_res_pool_info {
+ uint32_t base; /* Resource start index */
+ uint32_t num_alloc; /* Allocated resource number */
+ uint32_t num_free; /* Total available resource number */
+ struct res_list alloc_list; /* Allocated resource list */
+ struct res_list free_list; /* Available resource list */
+};
+
+enum I40E_VF_STATE {
+ I40E_VF_INACTIVE = 0,
+ I40E_VF_INRESET,
+ I40E_VF_ININIT,
+ I40E_VF_ACTIVE,
+};
+
+/*
+ * Structure to store private data for PF host.
+ */
+struct i40e_pf_vf {
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ enum I40E_VF_STATE state; /* The number of queue pairs availiable */
+ uint16_t vf_idx; /* VF index in pf->vfs */
+ uint16_t lan_nb_qps; /* Actual queues allocated */
+ uint16_t reset_cnt; /* Total vf reset times */
+};
+
+/*
+ * Structure to store private data for VMDQ instance
+ */
+struct i40e_vmdq_info {
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+};
+
+/*
+ * Structure to store flex pit for flow diretor.
+ */
+struct i40e_fdir_flex_pit {
+ uint8_t src_offset; /* offset in words from the beginning of payload */
+ uint8_t size; /* size in words */
+ uint8_t dst_offset; /* offset in words of flexible payload */
+};
+
+struct i40e_fdir_flex_mask {
+ uint8_t word_mask; /**< Bit i enables word i of flexible payload */
+ struct {
+ uint8_t offset;
+ uint16_t mask;
+ } bitmask[I40E_FDIR_BITMASK_NUM_WORD];
+};
+
+#define I40E_FILTER_PCTYPE_MAX 64
+/*
+ * A structure used to define fields of a FDIR related info.
+ */
+struct i40e_fdir_info {
+ struct i40e_vsi *fdir_vsi; /* pointer to fdir VSI structure */
+ uint16_t match_counter_index; /* Statistic counter index used for fdir*/
+ struct i40e_tx_queue *txq;
+ struct i40e_rx_queue *rxq;
+ void *prg_pkt; /* memory for fdir program packet */
+ uint64_t dma_addr; /* physic address of packet memory*/
+ /*
+ * the rule how bytes stream is extracted as flexible payload
+ * for each payload layer, the setting can up to three elements
+ */
+ struct i40e_fdir_flex_pit flex_set[I40E_MAX_FLXPLD_LAYER * I40E_MAX_FLXPLD_FIED];
+ struct i40e_fdir_flex_mask flex_mask[I40E_FILTER_PCTYPE_MAX];
+};
+
+/*
+ * Structure to store private data specific for PF instance.
+ */
+struct i40e_pf {
+ struct i40e_adapter *adapter; /* The adapter this PF associate to */
+ struct i40e_vsi *main_vsi; /* pointer to main VSI structure */
+ uint16_t mac_seid; /* The seid of the MAC of this PF */
+ uint16_t main_vsi_seid; /* The seid of the main VSI */
+ uint16_t max_num_vsi;
+ struct i40e_res_pool_info qp_pool; /*Queue pair pool */
+ struct i40e_res_pool_info msix_pool; /* MSIX interrupt pool */
+
+ struct i40e_hw_port_stats stats_offset;
+ struct i40e_hw_port_stats stats;
+ bool offset_loaded;
+
+ struct rte_eth_dev_data *dev_data; /* Pointer to the device data */
+ struct ether_addr dev_addr; /* PF device mac address */
+ uint64_t flags; /* PF featuer flags */
+ /* All kinds of queue pair setting for different VSIs */
+ struct i40e_pf_vf *vfs;
+ uint16_t vf_num;
+ /* Each of below queue pairs should be power of 2 since it's the
+ precondition after TC configuration applied */
+ uint16_t lan_nb_qps; /* The number of queue pairs of LAN */
+ uint16_t vmdq_nb_qps; /* The number of queue pairs of VMDq */
+ uint16_t vf_nb_qps; /* The number of queue pairs of VF */
+ uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */
+ uint16_t hash_lut_size; /* The size of hash lookup table */
+ /* store VXLAN UDP ports */
+ uint16_t vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
+ uint16_t vxlan_bitmap; /* Vxlan bit mask */
+
+ /* VMDQ related info */
+ uint16_t max_nb_vmdq_vsi; /* Max number of VMDQ VSIs supported */
+ uint16_t nb_cfg_vmdq_vsi; /* number of VMDQ VSIs configured */
+ struct i40e_vmdq_info *vmdq;
+
+ struct i40e_fdir_info fdir; /* flow director info */
+};
+
+enum pending_msg {
+ PFMSG_LINK_CHANGE = 0x1,
+ PFMSG_RESET_IMPENDING = 0x2,
+ PFMSG_DRIVER_CLOSE = 0x4,
+};
+
+struct i40e_vsi_vlan_pvid_info {
+ uint16_t on; /* Enable or disable pvid */
+ union {
+ uint16_t pvid; /* Valid in case 'on' is set to set pvid */
+ struct {
+ /* Valid in case 'on' is cleared. 'tagged' will reject tagged packets,
+ * while 'untagged' will reject untagged packets.
+ */
+ uint8_t tagged;
+ uint8_t untagged;
+ } reject;
+ } config;
+};
+
+struct i40e_vf_rx_queues {
+ uint64_t rx_dma_addr;
+ uint32_t rx_ring_len;
+ uint32_t buff_size;
+};
+
+struct i40e_vf_tx_queues {
+ uint64_t tx_dma_addr;
+ uint32_t tx_ring_len;
+};
+
+/*
+ * Structure to store private data specific for VF instance.
+ */
+struct i40e_vf {
+ struct i40e_adapter *adapter; /* The adapter this VF associate to */
+ struct rte_eth_dev_data *dev_data; /* Pointer to the device data */
+ uint16_t num_queue_pairs;
+ uint16_t max_pkt_len; /* Maximum packet length */
+ bool promisc_unicast_enabled;
+ bool promisc_multicast_enabled;
+
+ uint32_t version_major; /* Major version number */
+ uint32_t version_minor; /* Minor version number */
+ uint16_t promisc_flags; /* Promiscuous setting */
+ uint32_t vlan[I40E_VFTA_SIZE]; /* VLAN bit map */
+
+ /* Event from pf */
+ bool dev_closed;
+ bool link_up;
+ bool vf_reset;
+ volatile uint32_t pend_cmd; /* pending command not finished yet */
+ u16 pend_msg; /* flags indicates events from pf not handled yet */
+
+ /* VSI info */
+ struct i40e_virtchnl_vf_resource *vf_res; /* All VSIs */
+ struct i40e_virtchnl_vsi_resource *vsi_res; /* LAN VSI */
+ struct i40e_vsi vsi;
+};
+
+/*
+ * Structure to store private data for each PF/VF instance.
+ */
+struct i40e_adapter {
+ /* Common for both PF and VF */
+ struct i40e_hw hw;
+ struct rte_eth_dev *eth_dev;
+
+ /* Specific for PF or VF */
+ union {
+ struct i40e_pf pf;
+ struct i40e_vf vf;
+ };
+};
+
+int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
+int i40e_vsi_release(struct i40e_vsi *vsi);
+struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf,
+ enum i40e_vsi_type type,
+ struct i40e_vsi *uplink_vsi,
+ uint16_t user_param);
+int i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on);
+int i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on);
+int i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan);
+int i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan);
+int i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *filter);
+int i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr);
+void i40e_update_vsi_stats(struct i40e_vsi *vsi);
+void i40e_pf_disable_irq0(struct i40e_hw *hw);
+void i40e_pf_enable_irq0(struct i40e_hw *hw);
+int i40e_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete);
+void i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi);
+void i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi);
+int i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
+ struct i40e_vsi_vlan_pvid_info *info);
+int i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on);
+uint64_t i40e_config_hena(uint64_t flags);
+uint64_t i40e_parse_hena(uint64_t flags);
+enum i40e_status_code i40e_fdir_setup_tx_resources(struct i40e_pf *pf);
+enum i40e_status_code i40e_fdir_setup_rx_resources(struct i40e_pf *pf);
+int i40e_fdir_setup(struct i40e_pf *pf);
+const struct rte_memzone *i40e_memzone_reserve(const char *name,
+ uint32_t len,
+ int socket_id);
+int i40e_fdir_configure(struct rte_eth_dev *dev);
+void i40e_fdir_teardown(struct i40e_pf *pf);
+enum i40e_filter_pctype i40e_flowtype_to_pctype(
+ enum rte_eth_flow_type flow_type);
+enum rte_eth_flow_type i40e_pctype_to_flowtype(
+ enum i40e_filter_pctype pctype);
+int i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg);
+
+/* I40E_DEV_PRIVATE_TO */
+#define I40E_DEV_PRIVATE_TO_PF(adapter) \
+ (&((struct i40e_adapter *)adapter)->pf)
+#define I40E_DEV_PRIVATE_TO_HW(adapter) \
+ (&((struct i40e_adapter *)adapter)->hw)
+#define I40E_DEV_PRIVATE_TO_ADAPTER(adapter) \
+ ((struct i40e_adapter *)adapter)
+
+/* I40EVF_DEV_PRIVATE_TO */
+#define I40EVF_DEV_PRIVATE_TO_VF(adapter) \
+ (&((struct i40e_adapter *)adapter)->vf)
+
+static inline struct i40e_vsi *
+i40e_get_vsi_from_adapter(struct i40e_adapter *adapter)
+{
+ struct i40e_hw *hw;
+
+ if (!adapter)
+ return NULL;
+
+ hw = I40E_DEV_PRIVATE_TO_HW(adapter);
+ if (hw->mac.type == I40E_MAC_VF) {
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(adapter);
+ return &vf->vsi;
+ } else {
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(adapter);
+ return pf->main_vsi;
+ }
+}
+#define I40E_DEV_PRIVATE_TO_MAIN_VSI(adapter) \
+ i40e_get_vsi_from_adapter((struct i40e_adapter *)adapter)
+
+/* I40E_VSI_TO */
+#define I40E_VSI_TO_HW(vsi) \
+ (&(((struct i40e_vsi *)vsi)->adapter->hw))
+#define I40E_VSI_TO_PF(vsi) \
+ (&(((struct i40e_vsi *)vsi)->adapter->pf))
+#define I40E_VSI_TO_DEV_DATA(vsi) \
+ (((struct i40e_vsi *)vsi)->adapter->pf.dev_data)
+#define I40E_VSI_TO_ETH_DEV(vsi) \
+ (((struct i40e_vsi *)vsi)->adapter->eth_dev)
+
+/* I40E_PF_TO */
+#define I40E_PF_TO_HW(pf) \
+ (&(((struct i40e_pf *)pf)->adapter->hw))
+#define I40E_PF_TO_ADAPTER(pf) \
+ ((struct i40e_adapter *)pf->adapter)
+
+/* I40E_VF_TO */
+#define I40E_VF_TO_HW(vf) \
+ (&(((struct i40e_vf *)vf)->adapter->hw))
+
+static inline void
+i40e_init_adminq_parameter(struct i40e_hw *hw)
+{
+ hw->aq.num_arq_entries = I40E_AQ_LEN;
+ hw->aq.num_asq_entries = I40E_AQ_LEN;
+ hw->aq.arq_buf_size = I40E_AQ_BUF_SZ;
+ hw->aq.asq_buf_size = I40E_AQ_BUF_SZ;
+}
+
+#define I40E_VALID_FLOW_TYPE(flow_type) \
+ ((flow_type) == RTE_ETH_FLOW_TYPE_UDPV4 || \
+ (flow_type) == RTE_ETH_FLOW_TYPE_TCPV4 || \
+ (flow_type) == RTE_ETH_FLOW_TYPE_SCTPV4 || \
+ (flow_type) == RTE_ETH_FLOW_TYPE_IPV4_OTHER || \
+ (flow_type) == RTE_ETH_FLOW_TYPE_FRAG_IPV4 || \
+ (flow_type) == RTE_ETH_FLOW_TYPE_UDPV6 || \
+ (flow_type) == RTE_ETH_FLOW_TYPE_TCPV6 || \
+ (flow_type) == RTE_ETH_FLOW_TYPE_SCTPV6 || \
+ (flow_type) == RTE_ETH_FLOW_TYPE_IPV6_OTHER || \
+ (flow_type) == RTE_ETH_FLOW_TYPE_FRAG_IPV6)
+
+#define I40E_VALID_PCTYPE(pctype) \
+ ((pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_UDP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_TCP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER || \
+ (pctype) == I40E_FILTER_PCTYPE_FRAG_IPV4 || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_UDP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_TCP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER || \
+ (pctype) == I40E_FILTER_PCTYPE_FRAG_IPV6)
+
+#endif /* _I40E_ETHDEV_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e_ethdev_vf.c b/src/dpdk_lib18/librte_pmd_i40e/i40e_ethdev_vf.c
new file mode 100755
index 00000000..fe46cf12
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_i40e/i40e_ethdev_vf.c
@@ -0,0 +1,1897 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_atomic.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+
+#include "i40e_logs.h"
+#include "i40e/i40e_prototype.h"
+#include "i40e/i40e_adminq_cmd.h"
+#include "i40e/i40e_type.h"
+
+#include "i40e_rxtx.h"
+#include "i40e_ethdev.h"
+#include "i40e_pf.h"
+#define I40EVF_VSI_DEFAULT_MSIX_INTR 1
+
+/* busy wait delay in msec */
+#define I40EVF_BUSY_WAIT_DELAY 10
+#define I40EVF_BUSY_WAIT_COUNT 50
+#define MAX_RESET_WAIT_CNT 20
+
+struct i40evf_arq_msg_info {
+ enum i40e_virtchnl_ops ops;
+ enum i40e_status_code result;
+ uint16_t buf_len;
+ uint16_t msg_len;
+ uint8_t *msg;
+};
+
+struct vf_cmd_info {
+ enum i40e_virtchnl_ops ops;
+ uint8_t *in_args;
+ uint32_t in_args_size;
+ uint8_t *out_buffer;
+ /* Input & output type. pass in buffer size and pass out
+ * actual return result
+ */
+ uint32_t out_size;
+};
+
+enum i40evf_aq_result {
+ I40EVF_MSG_ERR = -1, /* Meet error when accessing admin queue */
+ I40EVF_MSG_NON, /* Read nothing from admin queue */
+ I40EVF_MSG_SYS, /* Read system msg from admin queue */
+ I40EVF_MSG_CMD, /* Read async command result */
+};
+
+/* A share buffer to store the command result from PF driver */
+static uint8_t cmd_result_buffer[I40E_AQ_BUF_SZ];
+
+static int i40evf_dev_configure(struct rte_eth_dev *dev);
+static int i40evf_dev_start(struct rte_eth_dev *dev);
+static void i40evf_dev_stop(struct rte_eth_dev *dev);
+static void i40evf_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static int i40evf_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete);
+static void i40evf_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static int i40evf_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vlan_id, int on);
+static void i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static int i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid,
+ int on);
+static void i40evf_dev_close(struct rte_eth_dev *dev);
+static void i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static void i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static void i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static void i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int i40evf_get_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link);
+static int i40evf_init_vlan(struct rte_eth_dev *dev);
+static int i40evf_dev_rx_queue_start(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+static int i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+static int i40evf_dev_tx_queue_start(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id);
+static int i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id);
+static int i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int i40evf_config_rss(struct i40e_vf *vf);
+static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+
+/* Default hash key buffer for RSS */
+static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1];
+
+static struct eth_dev_ops i40evf_eth_dev_ops = {
+ .dev_configure = i40evf_dev_configure,
+ .dev_start = i40evf_dev_start,
+ .dev_stop = i40evf_dev_stop,
+ .promiscuous_enable = i40evf_dev_promiscuous_enable,
+ .promiscuous_disable = i40evf_dev_promiscuous_disable,
+ .allmulticast_enable = i40evf_dev_allmulticast_enable,
+ .allmulticast_disable = i40evf_dev_allmulticast_disable,
+ .link_update = i40evf_dev_link_update,
+ .stats_get = i40evf_dev_stats_get,
+ .dev_close = i40evf_dev_close,
+ .dev_infos_get = i40evf_dev_info_get,
+ .vlan_filter_set = i40evf_vlan_filter_set,
+ .vlan_offload_set = i40evf_vlan_offload_set,
+ .vlan_pvid_set = i40evf_vlan_pvid_set,
+ .rx_queue_start = i40evf_dev_rx_queue_start,
+ .rx_queue_stop = i40evf_dev_rx_queue_stop,
+ .tx_queue_start = i40evf_dev_tx_queue_start,
+ .tx_queue_stop = i40evf_dev_tx_queue_stop,
+ .rx_queue_setup = i40e_dev_rx_queue_setup,
+ .rx_queue_release = i40e_dev_rx_queue_release,
+ .tx_queue_setup = i40e_dev_tx_queue_setup,
+ .tx_queue_release = i40e_dev_tx_queue_release,
+ .reta_update = i40evf_dev_rss_reta_update,
+ .reta_query = i40evf_dev_rss_reta_query,
+ .rss_hash_update = i40evf_dev_rss_hash_update,
+ .rss_hash_conf_get = i40evf_dev_rss_hash_conf_get,
+};
+
+static int
+i40evf_set_mac_type(struct i40e_hw *hw)
+{
+ int status = I40E_ERR_DEVICE_NOT_SUPPORTED;
+
+ if (hw->vendor_id == I40E_INTEL_VENDOR_ID) {
+ switch (hw->device_id) {
+ case I40E_DEV_ID_VF:
+ case I40E_DEV_ID_VF_HV:
+ hw->mac.type = I40E_MAC_VF;
+ status = I40E_SUCCESS;
+ break;
+ default:
+ ;
+ }
+ }
+
+ return status;
+}
+
+/*
+ * Parse admin queue message.
+ *
+ * return value:
+ * < 0: meet error
+ * 0: read sys msg
+ * > 0: read cmd result
+ */
+static enum i40evf_aq_result
+i40evf_parse_pfmsg(struct i40e_vf *vf,
+ struct i40e_arq_event_info *event,
+ struct i40evf_arq_msg_info *data)
+{
+ enum i40e_virtchnl_ops opcode = (enum i40e_virtchnl_ops)\
+ rte_le_to_cpu_32(event->desc.cookie_high);
+ enum i40e_status_code retval = (enum i40e_status_code)\
+ rte_le_to_cpu_32(event->desc.cookie_low);
+ enum i40evf_aq_result ret = I40EVF_MSG_CMD;
+
+ /* pf sys event */
+ if (opcode == I40E_VIRTCHNL_OP_EVENT) {
+ struct i40e_virtchnl_pf_event *vpe =
+ (struct i40e_virtchnl_pf_event *)event->msg_buf;
+
+ /* Initialize ret to sys event */
+ ret = I40EVF_MSG_SYS;
+ switch (vpe->event) {
+ case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
+ vf->link_up =
+ vpe->event_data.link_event.link_status;
+ vf->pend_msg |= PFMSG_LINK_CHANGE;
+ PMD_DRV_LOG(INFO, "Link status update:%s",
+ vf->link_up ? "up" : "down");
+ break;
+ case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
+ vf->vf_reset = true;
+ vf->pend_msg |= PFMSG_RESET_IMPENDING;
+ PMD_DRV_LOG(INFO, "vf is reseting");
+ break;
+ case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
+ vf->dev_closed = true;
+ vf->pend_msg |= PFMSG_DRIVER_CLOSE;
+ PMD_DRV_LOG(INFO, "PF driver closed");
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "%s: Unknown event %d from pf",
+ __func__, vpe->event);
+ }
+ } else {
+ /* async reply msg on command issued by vf previously */
+ ret = I40EVF_MSG_CMD;
+ /* Actual data length read from PF */
+ data->msg_len = event->msg_len;
+ }
+ /* fill the ops and result to notify VF */
+ data->result = retval;
+ data->ops = opcode;
+
+ return ret;
+}
+
+/*
+ * Read data in admin queue to get msg from pf driver
+ */
+static enum i40evf_aq_result
+i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_arq_event_info event;
+ int ret;
+ enum i40evf_aq_result result = I40EVF_MSG_NON;
+
+ event.buf_len = data->buf_len;
+ event.msg_buf = data->msg;
+ ret = i40e_clean_arq_element(hw, &event, NULL);
+ /* Can't read any msg from adminQ */
+ if (ret) {
+ if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
+ result = I40EVF_MSG_NON;
+ else
+ result = I40EVF_MSG_ERR;
+ return result;
+ }
+
+ /* Parse the event */
+ result = i40evf_parse_pfmsg(vf, &event, data);
+
+ return result;
+}
+
+/*
+ * Polling read until command result return from pf driver or meet error.
+ */
+static int
+i40evf_wait_cmd_done(struct rte_eth_dev *dev,
+ struct i40evf_arq_msg_info *data)
+{
+ int i = 0;
+ enum i40evf_aq_result ret;
+
+#define MAX_TRY_TIMES 10
+#define ASQ_DELAY_MS 50
+ do {
+ /* Delay some time first */
+ rte_delay_ms(ASQ_DELAY_MS);
+ ret = i40evf_read_pfmsg(dev, data);
+ if (ret == I40EVF_MSG_CMD)
+ return 0;
+ else if (ret == I40EVF_MSG_ERR)
+ return -1;
+
+ /* If don't read msg or read sys event, continue */
+ } while(i++ < MAX_TRY_TIMES);
+
+ return -1;
+}
+
+/**
+ * clear current command. Only call in case execute
+ * _atomic_set_cmd successfully.
+ */
+static inline void
+_clear_cmd(struct i40e_vf *vf)
+{
+ rte_wmb();
+ vf->pend_cmd = I40E_VIRTCHNL_OP_UNKNOWN;
+}
+
+/*
+ * Check there is pending cmd in execution. If none, set new command.
+ */
+static inline int
+_atomic_set_cmd(struct i40e_vf *vf, enum i40e_virtchnl_ops ops)
+{
+ int ret = rte_atomic32_cmpset(&vf->pend_cmd,
+ I40E_VIRTCHNL_OP_UNKNOWN, ops);
+
+ if (!ret)
+ PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
+
+ return !ret;
+}
+
+static int
+i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int err = -1;
+ struct i40evf_arq_msg_info info;
+
+ if (_atomic_set_cmd(vf, args->ops))
+ return -1;
+
+ info.msg = args->out_buffer;
+ info.buf_len = args->out_size;
+ info.ops = I40E_VIRTCHNL_OP_UNKNOWN;
+ info.result = I40E_SUCCESS;
+
+ err = i40e_aq_send_msg_to_pf(hw, args->ops, I40E_SUCCESS,
+ args->in_args, args->in_args_size, NULL);
+ if (err) {
+ PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops);
+ return err;
+ }
+
+ err = i40evf_wait_cmd_done(dev, &info);
+ /* read message and it's expected one */
+ if (!err && args->ops == info.ops)
+ _clear_cmd(vf);
+ else if (err)
+ PMD_DRV_LOG(ERR, "Failed to read message from AdminQ");
+ else if (args->ops != info.ops)
+ PMD_DRV_LOG(ERR, "command mismatch, expect %u, get %u",
+ args->ops, info.ops);
+
+ return (err | info.result);
+}
+
+/*
+ * Check API version with sync wait until version read or fail from admin queue
+ */
+static int
+i40evf_check_api_version(struct rte_eth_dev *dev)
+{
+ struct i40e_virtchnl_version_info version, *pver;
+ int err;
+ struct vf_cmd_info args;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ version.major = I40E_VIRTCHNL_VERSION_MAJOR;
+ version.minor = I40E_VIRTCHNL_VERSION_MINOR;
+
+ args.ops = I40E_VIRTCHNL_OP_VERSION;
+ args.in_args = (uint8_t *)&version;
+ args.in_args_size = sizeof(version);
+ args.out_buffer = cmd_result_buffer;
+ args.out_size = I40E_AQ_BUF_SZ;
+
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err) {
+ PMD_INIT_LOG(ERR, "fail to execute command OP_VERSION");
+ return err;
+ }
+
+ pver = (struct i40e_virtchnl_version_info *)args.out_buffer;
+ vf->version_major = pver->major;
+ vf->version_minor = pver->minor;
+ if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
+ PMD_DRV_LOG(INFO, "Peer is DPDK PF host");
+ else if ((vf->version_major == I40E_VIRTCHNL_VERSION_MAJOR) &&
+ (vf->version_minor == I40E_VIRTCHNL_VERSION_MINOR))
+ PMD_DRV_LOG(INFO, "Peer is Linux PF host");
+ else {
+ PMD_INIT_LOG(ERR, "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
+ vf->version_major, vf->version_minor,
+ I40E_VIRTCHNL_VERSION_MAJOR,
+ I40E_VIRTCHNL_VERSION_MINOR);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+i40evf_get_vf_resource(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int err;
+ struct vf_cmd_info args;
+ uint32_t len;
+
+ args.ops = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
+ args.in_args = NULL;
+ args.in_args_size = 0;
+ args.out_buffer = cmd_result_buffer;
+ args.out_size = I40E_AQ_BUF_SZ;
+
+ err = i40evf_execute_vf_cmd(dev, &args);
+
+ if (err) {
+ PMD_DRV_LOG(ERR, "fail to execute command OP_GET_VF_RESOURCE");
+ return err;
+ }
+
+ len = sizeof(struct i40e_virtchnl_vf_resource) +
+ I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
+
+ (void)rte_memcpy(vf->vf_res, args.out_buffer,
+ RTE_MIN(args.out_size, len));
+ i40e_vf_parse_hw_config(hw, vf->vf_res);
+
+ return 0;
+}
+
+static int
+i40evf_config_promisc(struct rte_eth_dev *dev,
+ bool enable_unicast,
+ bool enable_multicast)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int err;
+ struct vf_cmd_info args;
+ struct i40e_virtchnl_promisc_info promisc;
+
+ promisc.flags = 0;
+ promisc.vsi_id = vf->vsi_res->vsi_id;
+
+ if (enable_unicast)
+ promisc.flags |= I40E_FLAG_VF_UNICAST_PROMISC;
+
+ if (enable_multicast)
+ promisc.flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
+
+ args.ops = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
+ args.in_args = (uint8_t *)&promisc;
+ args.in_args_size = sizeof(promisc);
+ args.out_buffer = cmd_result_buffer;
+ args.out_size = I40E_AQ_BUF_SZ;
+
+ err = i40evf_execute_vf_cmd(dev, &args);
+
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command "
+ "CONFIG_PROMISCUOUS_MODE");
+ return err;
+}
+
+/* Configure vlan and double vlan offload. Use flag to specify which part to configure */
+static int
+i40evf_config_vlan_offload(struct rte_eth_dev *dev,
+ bool enable_vlan_strip)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int err;
+ struct vf_cmd_info args;
+ struct i40e_virtchnl_vlan_offload_info offload;
+
+ offload.vsi_id = vf->vsi_res->vsi_id;
+ offload.enable_vlan_strip = enable_vlan_strip;
+
+ args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD;
+ args.in_args = (uint8_t *)&offload;
+ args.in_args_size = sizeof(offload);
+ args.out_buffer = cmd_result_buffer;
+ args.out_size = I40E_AQ_BUF_SZ;
+
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_OFFLOAD");
+
+ return err;
+}
+
+static int
+i40evf_config_vlan_pvid(struct rte_eth_dev *dev,
+ struct i40e_vsi_vlan_pvid_info *info)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int err;
+ struct vf_cmd_info args;
+ struct i40e_virtchnl_pvid_info tpid_info;
+
+ if (dev == NULL || info == NULL) {
+ PMD_DRV_LOG(ERR, "invalid parameters");
+ return I40E_ERR_PARAM;
+ }
+
+ memset(&tpid_info, 0, sizeof(tpid_info));
+ tpid_info.vsi_id = vf->vsi_res->vsi_id;
+ (void)rte_memcpy(&tpid_info.info, info, sizeof(*info));
+
+ args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_PVID;
+ args.in_args = (uint8_t *)&tpid_info;
+ args.in_args_size = sizeof(tpid_info);
+ args.out_buffer = cmd_result_buffer;
+ args.out_size = I40E_AQ_BUF_SZ;
+
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_PVID");
+
+ return err;
+}
+
+static void
+i40evf_fill_virtchnl_vsi_txq_info(struct i40e_virtchnl_txq_info *txq_info,
+ uint16_t vsi_id,
+ uint16_t queue_id,
+ uint16_t nb_txq,
+ struct i40e_tx_queue *txq)
+{
+ txq_info->vsi_id = vsi_id;
+ txq_info->queue_id = queue_id;
+ if (queue_id < nb_txq) {
+ txq_info->ring_len = txq->nb_tx_desc;
+ txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
+ }
+}
+
+static void
+i40evf_fill_virtchnl_vsi_rxq_info(struct i40e_virtchnl_rxq_info *rxq_info,
+ uint16_t vsi_id,
+ uint16_t queue_id,
+ uint16_t nb_rxq,
+ uint32_t max_pkt_size,
+ struct i40e_rx_queue *rxq)
+{
+ rxq_info->vsi_id = vsi_id;
+ rxq_info->queue_id = queue_id;
+ rxq_info->max_pkt_size = max_pkt_size;
+ if (queue_id < nb_rxq) {
+ struct rte_pktmbuf_pool_private *mbp_priv;
+
+ rxq_info->ring_len = rxq->nb_rx_desc;
+ rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
+ mbp_priv = rte_mempool_get_priv(rxq->mp);
+ rxq_info->databuffer_size =
+ mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+ }
+}
+
+/* It configures VSI queues to co-work with Linux PF host */
+static int
+i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_rx_queue **rxq =
+ (struct i40e_rx_queue **)dev->data->rx_queues;
+ struct i40e_tx_queue **txq =
+ (struct i40e_tx_queue **)dev->data->tx_queues;
+ struct i40e_virtchnl_vsi_queue_config_info *vc_vqci;
+ struct i40e_virtchnl_queue_pair_info *vc_qpi;
+ struct vf_cmd_info args;
+ uint16_t i, nb_qp = vf->num_queue_pairs;
+ const uint32_t size =
+ I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci, nb_qp);
+ uint8_t buff[size];
+ int ret;
+
+ memset(buff, 0, sizeof(buff));
+ vc_vqci = (struct i40e_virtchnl_vsi_queue_config_info *)buff;
+ vc_vqci->vsi_id = vf->vsi_res->vsi_id;
+ vc_vqci->num_queue_pairs = nb_qp;
+
+ for (i = 0, vc_qpi = vc_vqci->qpair; i < nb_qp; i++, vc_qpi++) {
+ i40evf_fill_virtchnl_vsi_txq_info(&vc_qpi->txq,
+ vc_vqci->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
+ i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpi->rxq,
+ vc_vqci->vsi_id, i, dev->data->nb_rx_queues,
+ vf->max_pkt_len, rxq[i]);
+ }
+ memset(&args, 0, sizeof(args));
+ args.ops = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
+ args.in_args = (uint8_t *)vc_vqci;
+ args.in_args_size = size;
+ args.out_buffer = cmd_result_buffer;
+ args.out_size = I40E_AQ_BUF_SZ;
+ ret = i40evf_execute_vf_cmd(dev, &args);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to execute command of "
+ "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES\n");
+
+ return ret;
+}
+
+/* It configures VSI queues to co-work with DPDK PF host */
+static int
+i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_rx_queue **rxq =
+ (struct i40e_rx_queue **)dev->data->rx_queues;
+ struct i40e_tx_queue **txq =
+ (struct i40e_tx_queue **)dev->data->tx_queues;
+ struct i40e_virtchnl_vsi_queue_config_ext_info *vc_vqcei;
+ struct i40e_virtchnl_queue_pair_ext_info *vc_qpei;
+ struct vf_cmd_info args;
+ uint16_t i, nb_qp = vf->num_queue_pairs;
+ const uint32_t size =
+ I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei, nb_qp);
+ uint8_t buff[size];
+ int ret;
+
+ memset(buff, 0, sizeof(buff));
+ vc_vqcei = (struct i40e_virtchnl_vsi_queue_config_ext_info *)buff;
+ vc_vqcei->vsi_id = vf->vsi_res->vsi_id;
+ vc_vqcei->num_queue_pairs = nb_qp;
+ vc_qpei = vc_vqcei->qpair;
+ for (i = 0; i < nb_qp; i++, vc_qpei++) {
+ i40evf_fill_virtchnl_vsi_txq_info(&vc_qpei->txq,
+ vc_vqcei->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
+ i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpei->rxq,
+ vc_vqcei->vsi_id, i, dev->data->nb_rx_queues,
+ vf->max_pkt_len, rxq[i]);
+ if (i < dev->data->nb_rx_queues)
+ /*
+ * It adds extra info for configuring VSI queues, which
+ * is needed to enable the configurable crc stripping
+ * in VF.
+ */
+ vc_qpei->rxq_ext.crcstrip =
+ dev->data->dev_conf.rxmode.hw_strip_crc;
+ }
+ memset(&args, 0, sizeof(args));
+ args.ops =
+ (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT;
+ args.in_args = (uint8_t *)vc_vqcei;
+ args.in_args_size = size;
+ args.out_buffer = cmd_result_buffer;
+ args.out_size = I40E_AQ_BUF_SZ;
+ ret = i40evf_execute_vf_cmd(dev, &args);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to execute command of "
+ "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT\n");
+
+ return ret;
+}
+
+static int
+i40evf_configure_queues(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
+ /* To support DPDK PF host */
+ return i40evf_configure_vsi_queues_ext(dev);
+ else
+ /* To support Linux PF host */
+ return i40evf_configure_vsi_queues(dev);
+}
+
+static int
+i40evf_config_irq_map(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct vf_cmd_info args;
+ uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_irq_map_info) + \
+ sizeof(struct i40e_virtchnl_vector_map)];
+ struct i40e_virtchnl_irq_map_info *map_info;
+ int i, err;
+ map_info = (struct i40e_virtchnl_irq_map_info *)cmd_buffer;
+ map_info->num_vectors = 1;
+ map_info->vecmap[0].rxitr_idx = RTE_LIBRTE_I40E_ITR_INTERVAL / 2;
+ map_info->vecmap[0].txitr_idx = RTE_LIBRTE_I40E_ITR_INTERVAL / 2;
+ map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id;
+ /* Alway use default dynamic MSIX interrupt */
+ map_info->vecmap[0].vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR;
+ /* Don't map any tx queue */
+ map_info->vecmap[0].txq_map = 0;
+ map_info->vecmap[0].rxq_map = 0;
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ map_info->vecmap[0].rxq_map |= 1 << i;
+
+ args.ops = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
+ args.in_args = (u8 *)cmd_buffer;
+ args.in_args_size = sizeof(cmd_buffer);
+ args.out_buffer = cmd_result_buffer;
+ args.out_size = I40E_AQ_BUF_SZ;
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES");
+
+ return err;
+}
+
+static int
+i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid,
+ bool on)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_virtchnl_queue_select queue_select;
+ int err;
+ struct vf_cmd_info args;
+ memset(&queue_select, 0, sizeof(queue_select));
+ queue_select.vsi_id = vf->vsi_res->vsi_id;
+
+ if (isrx)
+ queue_select.rx_queues |= 1 << qid;
+ else
+ queue_select.tx_queues |= 1 << qid;
+
+ if (on)
+ args.ops = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
+ else
+ args.ops = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
+ args.in_args = (u8 *)&queue_select;
+ args.in_args_size = sizeof(queue_select);
+ args.out_buffer = cmd_result_buffer;
+ args.out_size = I40E_AQ_BUF_SZ;
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to switch %s %u %s",
+ isrx ? "RX" : "TX", qid, on ? "on" : "off");
+
+ return err;
+}
+
+static int
+i40evf_start_queues(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *dev_data = dev->data;
+ int i;
+ struct i40e_rx_queue *rxq;
+ struct i40e_tx_queue *txq;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev_data->rx_queues[i];
+ if (rxq->rx_deferred_start)
+ continue;
+ if (i40evf_dev_rx_queue_start(dev, i) != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
+ return -1;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev_data->tx_queues[i];
+ if (txq->tx_deferred_start)
+ continue;
+ if (i40evf_dev_tx_queue_start(dev, i) != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+i40evf_stop_queues(struct rte_eth_dev *dev)
+{
+ int i;
+
+ /* Stop TX queues first */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (i40evf_dev_tx_queue_stop(dev, i) != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
+ return -1;
+ }
+ }
+
+ /* Then stop RX queues */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (i40evf_dev_rx_queue_stop(dev, i) != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+i40evf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
+{
+ struct i40e_virtchnl_ether_addr_list *list;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
+ sizeof(struct i40e_virtchnl_ether_addr)];
+ int err;
+ struct vf_cmd_info args;
+
+ if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x",
+ addr->addr_bytes[0], addr->addr_bytes[1],
+ addr->addr_bytes[2], addr->addr_bytes[3],
+ addr->addr_bytes[4], addr->addr_bytes[5]);
+ return -1;
+ }
+
+ list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
+ list->vsi_id = vf->vsi_res->vsi_id;
+ list->num_elements = 1;
+ (void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
+ sizeof(addr->addr_bytes));
+
+ args.ops = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
+ args.in_args = cmd_buffer;
+ args.in_args_size = sizeof(cmd_buffer);
+ args.out_buffer = cmd_result_buffer;
+ args.out_size = I40E_AQ_BUF_SZ;
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command "
+ "OP_ADD_ETHER_ADDRESS");
+
+ return err;
+}
+
+static int
+i40evf_del_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
+{
+ struct i40e_virtchnl_ether_addr_list *list;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
+ sizeof(struct i40e_virtchnl_ether_addr)];
+ int err;
+ struct vf_cmd_info args;
+
+ if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Invalid mac:%x-%x-%x-%x-%x-%x",
+ addr->addr_bytes[0], addr->addr_bytes[1],
+ addr->addr_bytes[2], addr->addr_bytes[3],
+ addr->addr_bytes[4], addr->addr_bytes[5]);
+ return -1;
+ }
+
+ list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
+ list->vsi_id = vf->vsi_res->vsi_id;
+ list->num_elements = 1;
+ (void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
+ sizeof(addr->addr_bytes));
+
+ args.ops = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
+ args.in_args = cmd_buffer;
+ args.in_args_size = sizeof(cmd_buffer);
+ args.out_buffer = cmd_result_buffer;
+ args.out_size = I40E_AQ_BUF_SZ;
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command "
+ "OP_DEL_ETHER_ADDRESS");
+
+ return err;
+}
+
+static int
+i40evf_get_statics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_virtchnl_queue_select q_stats;
+ struct i40e_eth_stats *pstats;
+ int err;
+ struct vf_cmd_info args;
+
+ memset(&q_stats, 0, sizeof(q_stats));
+ q_stats.vsi_id = vf->vsi_res->vsi_id;
+ args.ops = I40E_VIRTCHNL_OP_GET_STATS;
+ args.in_args = (u8 *)&q_stats;
+ args.in_args_size = sizeof(q_stats);
+ args.out_buffer = cmd_result_buffer;
+ args.out_size = I40E_AQ_BUF_SZ;
+
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
+ return err;
+ }
+ pstats = (struct i40e_eth_stats *)args.out_buffer;
+ stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
+ pstats->rx_broadcast;
+ stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
+ pstats->tx_unicast;
+ stats->ierrors = pstats->rx_discards;
+ stats->oerrors = pstats->tx_errors + pstats->tx_discards;
+ stats->ibytes = pstats->rx_bytes;
+ stats->obytes = pstats->tx_bytes;
+
+ return 0;
+}
+
+static int
+i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_virtchnl_vlan_filter_list *vlan_list;
+ uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ sizeof(uint16_t)];
+ int err;
+ struct vf_cmd_info args;
+
+ vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer;
+ vlan_list->vsi_id = vf->vsi_res->vsi_id;
+ vlan_list->num_elements = 1;
+ vlan_list->vlan_id[0] = vlanid;
+
+ args.ops = I40E_VIRTCHNL_OP_ADD_VLAN;
+ args.in_args = (u8 *)&cmd_buffer;
+ args.in_args_size = sizeof(cmd_buffer);
+ args.out_buffer = cmd_result_buffer;
+ args.out_size = I40E_AQ_BUF_SZ;
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_VLAN");
+
+ return err;
+}
+
+static int
+i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_virtchnl_vlan_filter_list *vlan_list;
+ uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ sizeof(uint16_t)];
+ int err;
+ struct vf_cmd_info args;
+
+ vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer;
+ vlan_list->vsi_id = vf->vsi_res->vsi_id;
+ vlan_list->num_elements = 1;
+ vlan_list->vlan_id[0] = vlanid;
+
+ args.ops = I40E_VIRTCHNL_OP_DEL_VLAN;
+ args.in_args = (u8 *)&cmd_buffer;
+ args.in_args_size = sizeof(cmd_buffer);
+ args.out_buffer = cmd_result_buffer;
+ args.out_size = I40E_AQ_BUF_SZ;
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_VLAN");
+
+ return err;
+}
+
+static int
+i40evf_get_link_status(struct rte_eth_dev *dev, struct rte_eth_link *link)
+{
+ int err;
+ struct vf_cmd_info args;
+ struct rte_eth_link *new_link;
+
+ args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_GET_LINK_STAT;
+ args.in_args = NULL;
+ args.in_args_size = 0;
+ args.out_buffer = cmd_result_buffer;
+ args.out_size = I40E_AQ_BUF_SZ;
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR, "fail to execute command OP_GET_LINK_STAT");
+ return err;
+ }
+
+ new_link = (struct rte_eth_link *)args.out_buffer;
+ (void)rte_memcpy(link, new_link, sizeof(*link));
+
+ return 0;
+}
+
+static struct rte_pci_id pci_id_i40evf_map[] = {
+#define RTE_PCI_DEV_ID_DECL_I40EVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
+#include "rte_pci_dev_ids.h"
+{ .vendor_id = 0, /* sentinel */ },
+};
+
+static inline int
+i40evf_dev_atomic_write_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = &(dev->data->dev_link);
+ struct rte_eth_link *src = link;
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+static int
+i40evf_reset_vf(struct i40e_hw *hw)
+{
+ int i, reset;
+
+ if (i40e_vf_reset(hw) != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Reset VF NIC failed");
+ return -1;
+ }
+ /**
+ * After issuing vf reset command to pf, pf won't necessarily
+ * reset vf, it depends on what state it exactly is. If it's not
+ * initialized yet, it won't have vf reset since it's in a certain
+ * state. If not, it will try to reset. Even vf is reset, pf will
+ * set I40E_VFGEN_RSTAT to COMPLETE first, then wait 10ms and set
+ * it to ACTIVE. In this duration, vf may not catch the moment that
+ * COMPLETE is set. So, for vf, we'll try to wait a long time.
+ */
+ rte_delay_ms(200);
+
+ for (i = 0; i < MAX_RESET_WAIT_CNT; i++) {
+ reset = rd32(hw, I40E_VFGEN_RSTAT) &
+ I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+ reset = reset >> I40E_VFGEN_RSTAT_VFR_STATE_SHIFT;
+ if (I40E_VFR_COMPLETED == reset || I40E_VFR_VFACTIVE == reset)
+ break;
+ else
+ rte_delay_ms(50);
+ }
+
+ if (i >= MAX_RESET_WAIT_CNT) {
+ PMD_INIT_LOG(ERR, "Reset VF NIC failed");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+i40evf_init_vf(struct rte_eth_dev *dev)
+{
+ int i, err, bufsz;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ vf->dev_data = dev->data;
+ err = i40evf_set_mac_type(hw);
+ if (err) {
+ PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
+ goto err;
+ }
+
+ i40e_init_adminq_parameter(hw);
+ err = i40e_init_adminq(hw);
+ if (err) {
+ PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
+ goto err;
+ }
+
+
+ /* Reset VF and wait until it's complete */
+ if (i40evf_reset_vf(hw)) {
+ PMD_INIT_LOG(ERR, "reset NIC failed");
+ goto err_aq;
+ }
+
+ /* VF reset, shutdown admin queue and initialize again */
+ if (i40e_shutdown_adminq(hw) != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR, "i40e_shutdown_adminq failed");
+ return -1;
+ }
+
+ i40e_init_adminq_parameter(hw);
+ if (i40e_init_adminq(hw) != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR, "init_adminq failed");
+ return -1;
+ }
+ if (i40evf_check_api_version(dev) != 0) {
+ PMD_INIT_LOG(ERR, "check_api version failed");
+ goto err_aq;
+ }
+ bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
+ (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
+ vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
+ if (!vf->vf_res) {
+ PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
+ goto err_aq;
+ }
+
+ if (i40evf_get_vf_resource(dev) != 0) {
+ PMD_INIT_LOG(ERR, "i40evf_get_vf_config failed");
+ goto err_alloc;
+ }
+
+ /* got VF config message back from PF, now we can parse it */
+ for (i = 0; i < vf->vf_res->num_vsis; i++) {
+ if (vf->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
+ vf->vsi_res = &vf->vf_res->vsi_res[i];
+ }
+
+ if (!vf->vsi_res) {
+ PMD_INIT_LOG(ERR, "no LAN VSI found");
+ goto err_alloc;
+ }
+
+ vf->vsi.vsi_id = vf->vsi_res->vsi_id;
+ vf->vsi.type = vf->vsi_res->vsi_type;
+ vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs;
+
+ /* check mac addr, if it's not valid, genrate one */
+ if (I40E_SUCCESS != i40e_validate_mac_addr(\
+ vf->vsi_res->default_mac_addr))
+ eth_random_addr(vf->vsi_res->default_mac_addr);
+
+ ether_addr_copy((struct ether_addr *)vf->vsi_res->default_mac_addr,
+ (struct ether_addr *)hw->mac.addr);
+
+ return 0;
+
+err_alloc:
+ rte_free(vf->vf_res);
+err_aq:
+ i40e_shutdown_adminq(hw); /* ignore error */
+err:
+ return -1;
+}
+
+static int
+i40evf_dev_init(__rte_unused struct eth_driver *eth_drv,
+ struct rte_eth_dev *eth_dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(\
+ eth_dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* assign ops func pointer */
+ eth_dev->dev_ops = &i40evf_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &i40e_recv_pkts;
+ eth_dev->tx_pkt_burst = &i40e_xmit_pkts;
+
+ /*
+ * For secondary processes, we don't initialise any further as primary
+ * has already done this work.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+ if (eth_dev->data->scattered_rx)
+ eth_dev->rx_pkt_burst = i40e_recv_scattered_pkts;
+ return 0;
+ }
+
+ hw->vendor_id = eth_dev->pci_dev->id.vendor_id;
+ hw->device_id = eth_dev->pci_dev->id.device_id;
+ hw->subsystem_vendor_id = eth_dev->pci_dev->id.subsystem_vendor_id;
+ hw->subsystem_device_id = eth_dev->pci_dev->id.subsystem_device_id;
+ hw->bus.device = eth_dev->pci_dev->addr.devid;
+ hw->bus.func = eth_dev->pci_dev->addr.function;
+ hw->hw_addr = (void *)eth_dev->pci_dev->mem_resource[0].addr;
+
+ if(i40evf_init_vf(eth_dev) != 0) {
+ PMD_INIT_LOG(ERR, "Init vf failed");
+ return -1;
+ }
+
+ /* copy mac addr */
+ eth_dev->data->mac_addrs = rte_zmalloc("i40evf_mac",
+ ETHER_ADDR_LEN, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
+ "store MAC addresses", ETHER_ADDR_LEN);
+ return -ENOMEM;
+ }
+ ether_addr_copy((struct ether_addr *)hw->mac.addr,
+ (struct ether_addr *)eth_dev->data->mac_addrs);
+
+ return 0;
+}
+
+/*
+ * virtual function driver struct
+ */
+static struct eth_driver rte_i40evf_pmd = {
+ {
+ .name = "rte_i40evf_pmd",
+ .id_table = pci_id_i40evf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ },
+ .eth_dev_init = i40evf_dev_init,
+ .dev_private_size = sizeof(struct i40e_vf),
+};
+
+/*
+ * VF Driver initialization routine.
+ * Invoked one at EAL init time.
+ * Register itself as the [Virtual Poll Mode] Driver of PCI Fortville devices.
+ */
+static int
+rte_i40evf_pmd_init(const char *name __rte_unused,
+ const char *params __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ rte_eth_driver_register(&rte_i40evf_pmd);
+
+ return 0;
+}
+
+static struct rte_driver rte_i40evf_driver = {
+ .type = PMD_PDEV,
+ .init = rte_i40evf_pmd_init,
+};
+
+PMD_REGISTER_DRIVER(rte_i40evf_driver);
+
+static int
+i40evf_dev_configure(struct rte_eth_dev *dev)
+{
+ return i40evf_init_vlan(dev);
+}
+
+static int
+i40evf_init_vlan(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ int ret;
+
+ /* Apply vlan offload setting */
+ i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
+
+ /* Apply pvid setting */
+ ret = i40evf_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
+ data->dev_conf.txmode.hw_vlan_insert_pvid);
+ return ret;
+}
+
+static void
+i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ bool enable_vlan_strip = 0;
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ /* Linux pf host doesn't support vlan offload yet */
+ if (vf->version_major == I40E_DPDK_VERSION_MAJOR) {
+ /* Vlan stripping setting */
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ /* Enable or disable VLAN stripping */
+ if (dev_conf->rxmode.hw_vlan_strip)
+ enable_vlan_strip = 1;
+ else
+ enable_vlan_strip = 0;
+
+ i40evf_config_vlan_offload(dev, enable_vlan_strip);
+ }
+ }
+}
+
+static int
+i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
+{
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ struct i40e_vsi_vlan_pvid_info info;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ memset(&info, 0, sizeof(info));
+ info.on = on;
+
+ /* Linux pf host don't support vlan offload yet */
+ if (vf->version_major == I40E_DPDK_VERSION_MAJOR) {
+ if (info.on)
+ info.config.pvid = pvid;
+ else {
+ info.config.reject.tagged =
+ dev_conf->txmode.hw_vlan_reject_tagged;
+ info.config.reject.untagged =
+ dev_conf->txmode.hw_vlan_reject_untagged;
+ }
+ return i40evf_config_vlan_pvid(dev, &info);
+ }
+
+ return 0;
+}
+
+static int
+i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct i40e_rx_queue *rxq;
+ int err = 0;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rx_queue_id < dev->data->nb_rx_queues) {
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ err = i40e_alloc_rx_queue_mbufs(rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
+ return err;
+ }
+
+ rte_wmb();
+
+ /* Init the RX tail register. */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ I40EVF_WRITE_FLUSH(hw);
+
+ /* Ready to switch the queue on */
+ err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
+
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
+ rx_queue_id);
+ }
+
+ return err;
+}
+
+static int
+i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct i40e_rx_queue *rxq;
+ int err;
+
+ if (rx_queue_id < dev->data->nb_rx_queues) {
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
+
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+ rx_queue_id);
+ return err;
+ }
+
+ i40e_rx_queue_release_mbufs(rxq);
+ i40e_reset_rx_queue(rxq);
+ }
+
+ return 0;
+}
+
+static int
+i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ int err = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (tx_queue_id < dev->data->nb_tx_queues) {
+
+ /* Ready to switch the queue on */
+ err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
+
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
+ tx_queue_id);
+ }
+
+ return err;
+}
+
+static int
+i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct i40e_tx_queue *txq;
+ int err;
+
+ if (tx_queue_id < dev->data->nb_tx_queues) {
+ txq = dev->data->tx_queues[tx_queue_id];
+
+ err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
+
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of",
+ tx_queue_id);
+ return err;
+ }
+
+ i40e_tx_queue_release_mbufs(txq);
+ i40e_reset_tx_queue(txq);
+ }
+
+ return 0;
+}
+
+static int
+i40evf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ int ret;
+
+ if (on)
+ ret = i40evf_add_vlan(dev, vlan_id);
+ else
+ ret = i40evf_del_vlan(dev,vlan_id);
+
+ return ret;
+}
+
+static int
+i40evf_rx_init(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ uint16_t i;
+ struct i40e_rx_queue **rxq =
+ (struct i40e_rx_queue **)dev->data->rx_queues;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ i40evf_config_rss(vf);
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq[i]->qrx_tail = hw->hw_addr + I40E_QRX_TAIL1(i);
+ I40E_PCI_REG_WRITE(rxq[i]->qrx_tail, rxq[i]->nb_rx_desc - 1);
+ }
+
+ /* Flush the operation to write registers */
+ I40EVF_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static void
+i40evf_tx_init(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+ struct i40e_tx_queue **txq =
+ (struct i40e_tx_queue **)dev->data->tx_queues;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ txq[i]->qtx_tail = hw->hw_addr + I40E_QTX_TAIL1(i);
+}
+
+static inline void
+i40evf_enable_queues_intr(struct i40e_hw *hw)
+{
+ I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - 1),
+ I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+}
+
+static inline void
+i40evf_disable_queues_intr(struct i40e_hw *hw)
+{
+ I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - 1),
+ 0);
+}
+
+static int
+i40evf_dev_start(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ether_addr mac_addr;
+
+ PMD_INIT_FUNC_TRACE();
+
+ vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (vf->max_pkt_len <= ETHER_MAX_LEN ||
+ vf->max_pkt_len > I40E_FRAME_SIZE_MAX) {
+ PMD_DRV_LOG(ERR, "maximum packet length must "
+ "be larger than %u and smaller than %u,"
+ "as jumbo frame is enabled",
+ (uint32_t)ETHER_MAX_LEN,
+ (uint32_t)I40E_FRAME_SIZE_MAX);
+ return I40E_ERR_CONFIG;
+ }
+ } else {
+ if (vf->max_pkt_len < ETHER_MIN_LEN ||
+ vf->max_pkt_len > ETHER_MAX_LEN) {
+ PMD_DRV_LOG(ERR, "maximum packet length must be "
+ "larger than %u and smaller than %u, "
+ "as jumbo frame is disabled",
+ (uint32_t)ETHER_MIN_LEN,
+ (uint32_t)ETHER_MAX_LEN);
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
+ dev->data->nb_tx_queues);
+
+ if (i40evf_rx_init(dev) != 0){
+ PMD_DRV_LOG(ERR, "failed to do RX init");
+ return -1;
+ }
+
+ i40evf_tx_init(dev);
+
+ if (i40evf_configure_queues(dev) != 0) {
+ PMD_DRV_LOG(ERR, "configure queues failed");
+ goto err_queue;
+ }
+ if (i40evf_config_irq_map(dev)) {
+ PMD_DRV_LOG(ERR, "config_irq_map failed");
+ goto err_queue;
+ }
+
+ /* Set mac addr */
+ (void)rte_memcpy(mac_addr.addr_bytes, hw->mac.addr,
+ sizeof(mac_addr.addr_bytes));
+ if (i40evf_add_mac_addr(dev, &mac_addr)) {
+ PMD_DRV_LOG(ERR, "Failed to add mac addr");
+ goto err_queue;
+ }
+
+ if (i40evf_start_queues(dev) != 0) {
+ PMD_DRV_LOG(ERR, "enable queues failed");
+ goto err_mac;
+ }
+
+ i40evf_enable_queues_intr(hw);
+ return 0;
+
+err_mac:
+ i40evf_del_mac_addr(dev, &mac_addr);
+err_queue:
+ return -1;
+}
+
+static void
+i40evf_dev_stop(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ i40evf_disable_queues_intr(hw);
+ i40evf_stop_queues(dev);
+}
+
+static int
+i40evf_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete)
+{
+ struct rte_eth_link new_link;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ /*
+ * DPDK pf host provide interfacet to acquire link status
+ * while Linux driver does not
+ */
+ if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
+ i40evf_get_link_status(dev, &new_link);
+ else {
+ /* Always assume it's up, for Linux driver PF host */
+ new_link.link_duplex = ETH_LINK_AUTONEG_DUPLEX;
+ new_link.link_speed = ETH_LINK_SPEED_10000;
+ new_link.link_status = 1;
+ }
+ i40evf_dev_atomic_write_link_status(dev, &new_link);
+
+ return 0;
+}
+
+static void
+i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int ret;
+
+ /* If enabled, just return */
+ if (vf->promisc_unicast_enabled)
+ return;
+
+ ret = i40evf_config_promisc(dev, 1, vf->promisc_multicast_enabled);
+ if (ret == 0)
+ vf->promisc_unicast_enabled = TRUE;
+}
+
+static void
+i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int ret;
+
+ /* If disabled, just return */
+ if (!vf->promisc_unicast_enabled)
+ return;
+
+ ret = i40evf_config_promisc(dev, 0, vf->promisc_multicast_enabled);
+ if (ret == 0)
+ vf->promisc_unicast_enabled = FALSE;
+}
+
+static void
+i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int ret;
+
+ /* If enabled, just return */
+ if (vf->promisc_multicast_enabled)
+ return;
+
+ ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 1);
+ if (ret == 0)
+ vf->promisc_multicast_enabled = TRUE;
+}
+
+static void
+i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int ret;
+
+ /* If enabled, just return */
+ if (!vf->promisc_multicast_enabled)
+ return;
+
+ ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 0);
+ if (ret == 0)
+ vf->promisc_multicast_enabled = FALSE;
+}
+
+static void
+i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ memset(dev_info, 0, sizeof(*dev_info));
+ dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
+ dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
+ dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
+ dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
+ dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = I40E_DEFAULT_RX_PTHRESH,
+ .hthresh = I40E_DEFAULT_RX_HTHRESH,
+ .wthresh = I40E_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = I40E_DEFAULT_TX_PTHRESH,
+ .hthresh = I40E_DEFAULT_TX_HTHRESH,
+ .wthresh = I40E_DEFAULT_TX_WTHRESH,
+ },
+ .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
+ .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
+ .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
+ ETH_TXQ_FLAGS_NOOFFLOADS,
+ };
+}
+
+static void
+i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ memset(stats, 0, sizeof(*stats));
+ if (i40evf_get_statics(dev, stats))
+ PMD_DRV_LOG(ERR, "Get statics failed");
+}
+
+static void
+i40evf_dev_close(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ i40evf_dev_stop(dev);
+ i40evf_reset_vf(hw);
+ i40e_shutdown_adminq(hw);
+}
+
+static int
+i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t lut, l;
+ uint16_t i, j;
+ uint16_t idx, shift;
+ uint8_t mask;
+
+ if (reta_size != ETH_RSS_RETA_SIZE_64) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number of hardware can "
+ "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i += I40E_4_BIT_WIDTH) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+ I40E_4_BIT_MASK);
+ if (!mask)
+ continue;
+ if (mask == I40E_4_BIT_MASK)
+ l = 0;
+ else
+ l = I40E_READ_REG(hw, I40E_VFQF_HLUT(i >> 2));
+
+ for (j = 0, lut = 0; j < I40E_4_BIT_WIDTH; j++) {
+ if (mask & (0x1 << j))
+ lut |= reta_conf[idx].reta[shift + j] <<
+ (CHAR_BIT * j);
+ else
+ lut |= l & (I40E_8_BIT_MASK << (CHAR_BIT * j));
+ }
+ I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i >> 2), lut);
+ }
+
+ return 0;
+}
+
+static int
+i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t lut;
+ uint16_t i, j;
+ uint16_t idx, shift;
+ uint8_t mask;
+
+ if (reta_size != ETH_RSS_RETA_SIZE_64) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number of hardware can "
+ "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i += I40E_4_BIT_WIDTH) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+ I40E_4_BIT_MASK);
+ if (!mask)
+ continue;
+
+ lut = I40E_READ_REG(hw, I40E_VFQF_HLUT(i >> 2));
+ for (j = 0; j < I40E_4_BIT_WIDTH; j++) {
+ if (mask & (0x1 << j))
+ reta_conf[idx].reta[shift + j] =
+ ((lut >> (CHAR_BIT * j)) &
+ I40E_8_BIT_MASK);
+ }
+ }
+
+ return 0;
+}
+
+static int
+i40evf_hw_rss_hash_set(struct i40e_hw *hw, struct rte_eth_rss_conf *rss_conf)
+{
+ uint32_t *hash_key;
+ uint8_t hash_key_len;
+ uint64_t rss_hf, hena;
+
+ hash_key = (uint32_t *)(rss_conf->rss_key);
+ hash_key_len = rss_conf->rss_key_len;
+ if (hash_key != NULL && hash_key_len >=
+ (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
+ uint16_t i;
+
+ for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
+ I40E_WRITE_REG(hw, I40E_VFQF_HKEY(i), hash_key[i]);
+ }
+
+ rss_hf = rss_conf->rss_hf;
+ hena = (uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(0));
+ hena |= ((uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(1))) << 32;
+ hena &= ~I40E_RSS_HENA_ALL;
+ hena |= i40e_config_hena(rss_hf);
+ I40E_WRITE_REG(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
+ I40E_WRITE_REG(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
+ I40EVF_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static void
+i40evf_disable_rss(struct i40e_vf *vf)
+{
+ struct i40e_hw *hw = I40E_VF_TO_HW(vf);
+ uint64_t hena;
+
+ hena = (uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(0));
+ hena |= ((uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(1))) << 32;
+ hena &= ~I40E_RSS_HENA_ALL;
+ I40E_WRITE_REG(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
+ I40E_WRITE_REG(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
+ I40EVF_WRITE_FLUSH(hw);
+}
+
+static int
+i40evf_config_rss(struct i40e_vf *vf)
+{
+ struct i40e_hw *hw = I40E_VF_TO_HW(vf);
+ struct rte_eth_rss_conf rss_conf;
+ uint32_t i, j, lut = 0, nb_q = (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
+
+ if (vf->dev_data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+ i40evf_disable_rss(vf);
+ PMD_DRV_LOG(DEBUG, "RSS not configured\n");
+ return 0;
+ }
+
+ /* Fill out the look up table */
+ for (i = 0, j = 0; i < nb_q; i++, j++) {
+ if (j >= vf->num_queue_pairs)
+ j = 0;
+ lut = (lut << 8) | j;
+ if ((i & 3) == 3)
+ I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i >> 2), lut);
+ }
+
+ rss_conf = vf->dev_data->dev_conf.rx_adv_conf.rss_conf;
+ if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
+ i40evf_disable_rss(vf);
+ PMD_DRV_LOG(DEBUG, "No hash flag is set\n");
+ return 0;
+ }
+
+ if (rss_conf.rss_key == NULL || rss_conf.rss_key_len < nb_q) {
+ /* Calculate the default hash key */
+ for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
+ rss_key_default[i] = (uint32_t)rte_rand();
+ rss_conf.rss_key = (uint8_t *)rss_key_default;
+ rss_conf.rss_key_len = nb_q;
+ }
+
+ return i40evf_hw_rss_hash_set(hw, &rss_conf);
+}
+
+static int
+i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
+ uint64_t hena;
+
+ hena = (uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(0));
+ hena |= ((uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(1))) << 32;
+ if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
+ if (rss_hf != 0) /* Enable RSS */
+ return -EINVAL;
+ return 0;
+ }
+
+ /* RSS enabled */
+ if (rss_hf == 0) /* Disable RSS */
+ return -EINVAL;
+
+ return i40evf_hw_rss_hash_set(hw, rss_conf);
+}
+
+static int
+i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t *hash_key = (uint32_t *)(rss_conf->rss_key);
+ uint64_t hena;
+ uint16_t i;
+
+ if (hash_key) {
+ for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
+ hash_key[i] = I40E_READ_REG(hw, I40E_VFQF_HKEY(i));
+ rss_conf->rss_key_len = i * sizeof(uint32_t);
+ }
+ hena = (uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(0));
+ hena |= ((uint64_t)I40E_READ_REG(hw, I40E_VFQF_HENA(1))) << 32;
+ rss_conf->rss_hf = i40e_parse_hena(hena);
+
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e_fdir.c b/src/dpdk_lib18/librte_pmd_i40e/i40e_fdir.c
new file mode 100755
index 00000000..98df9357
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_i40e/i40e_fdir.c
@@ -0,0 +1,1365 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_log.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_ip.h>
+#include <rte_udp.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+
+#include "i40e_logs.h"
+#include "i40e/i40e_type.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+
+#define I40E_FDIR_MZ_NAME "FDIR_MEMZONE"
+#ifndef IPV6_ADDR_LEN
+#define IPV6_ADDR_LEN 16
+#endif
+
+#define I40E_FDIR_PKT_LEN 512
+#define I40E_FDIR_IP_DEFAULT_LEN 420
+#define I40E_FDIR_IP_DEFAULT_TTL 0xFF
+#define I40E_FDIR_IP_DEFAULT_VERSION_IHL 0x45
+#define I40E_FDIR_TCP_DEFAULT_DATAOFF 0x50
+#define I40E_FDIR_IPv6_DEFAULT_VTC_FLOW 0x60300000
+#define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS 0xFF
+#define I40E_FDIR_IPv6_PAYLOAD_LEN 380
+#define I40E_FDIR_UDP_DEFAULT_LEN 400
+
+/* Wait count and interval for fdir filter programming */
+#define I40E_FDIR_WAIT_COUNT 10
+#define I40E_FDIR_WAIT_INTERVAL_US 1000
+
+/* Wait count and interval for fdir filter flush */
+#define I40E_FDIR_FLUSH_RETRY 50
+#define I40E_FDIR_FLUSH_INTERVAL_MS 5
+
+#define I40E_COUNTER_PF 2
+/* Statistic counter index for one pf */
+#define I40E_COUNTER_INDEX_FDIR(pf_id) (0 + (pf_id) * I40E_COUNTER_PF)
+#define I40E_MAX_FLX_SOURCE_OFF 480
+#define I40E_FLX_OFFSET_IN_FIELD_VECTOR 50
+
+#define NONUSE_FLX_PIT_DEST_OFF 63
+#define NONUSE_FLX_PIT_FSIZE 1
+#define MK_FLX_PIT(src_offset, fsize, dst_offset) ( \
+ (((src_offset) << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) & \
+ I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK) | \
+ (((fsize) << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) & \
+ I40E_PRTQF_FLX_PIT_FSIZE_MASK) | \
+ ((((dst_offset) + I40E_FLX_OFFSET_IN_FIELD_VECTOR) << \
+ I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) & \
+ I40E_PRTQF_FLX_PIT_DEST_OFF_MASK))
+
+#define I40E_FDIR_FLOW_TYPES ( \
+ (1 << RTE_ETH_FLOW_TYPE_UDPV4) | \
+ (1 << RTE_ETH_FLOW_TYPE_TCPV4) | \
+ (1 << RTE_ETH_FLOW_TYPE_SCTPV4) | \
+ (1 << RTE_ETH_FLOW_TYPE_IPV4_OTHER) | \
+ (1 << RTE_ETH_FLOW_TYPE_FRAG_IPV4) | \
+ (1 << RTE_ETH_FLOW_TYPE_UDPV6) | \
+ (1 << RTE_ETH_FLOW_TYPE_TCPV6) | \
+ (1 << RTE_ETH_FLOW_TYPE_SCTPV6) | \
+ (1 << RTE_ETH_FLOW_TYPE_IPV6_OTHER) | \
+ (1 << RTE_ETH_FLOW_TYPE_FRAG_IPV6))
+
+#define I40E_FLEX_WORD_MASK(off) (0x80 >> (off))
+
+static int i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq);
+static int i40e_check_fdir_flex_conf(
+ const struct rte_eth_fdir_flex_conf *conf);
+static void i40e_set_flx_pld_cfg(struct i40e_pf *pf,
+ const struct rte_eth_flex_payload_cfg *cfg);
+static void i40e_set_flex_mask_on_pctype(struct i40e_pf *pf,
+ enum i40e_filter_pctype pctype,
+ const struct rte_eth_fdir_flex_mask *mask_cfg);
+static int i40e_fdir_construct_pkt(struct i40e_pf *pf,
+ const struct rte_eth_fdir_input *fdir_input,
+ unsigned char *raw_pkt);
+static int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *filter,
+ bool add);
+static int i40e_fdir_filter_programming(struct i40e_pf *pf,
+ enum i40e_filter_pctype pctype,
+ const struct rte_eth_fdir_filter *filter,
+ bool add);
+static int i40e_fdir_flush(struct rte_eth_dev *dev);
+static void i40e_fdir_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_fdir_info *fdir);
+static void i40e_fdir_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_fdir_stats *stat);
+
+static int
+i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
+{
+ struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
+ struct i40e_hmc_obj_rxq rx_ctx;
+ int err = I40E_SUCCESS;
+
+ memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
+ /* Init the RX queue in hardware */
+ rx_ctx.dbuff = I40E_RXBUF_SZ_1024 >> I40E_RXQ_CTX_DBUFF_SHIFT;
+ rx_ctx.hbuff = 0;
+ rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
+ rx_ctx.qlen = rxq->nb_rx_desc;
+#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+ rx_ctx.dsize = 1;
+#endif
+ rx_ctx.dtype = i40e_header_split_none;
+ rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
+ rx_ctx.rxmax = ETHER_MAX_LEN;
+ rx_ctx.tphrdesc_ena = 1;
+ rx_ctx.tphwdesc_ena = 1;
+ rx_ctx.tphdata_ena = 1;
+ rx_ctx.tphhead_ena = 1;
+ rx_ctx.lrxqthresh = 2;
+ rx_ctx.crcstrip = 0;
+ rx_ctx.l2tsel = 1;
+ rx_ctx.showiv = 1;
+ rx_ctx.prefena = 1;
+
+ err = i40e_clear_lan_rx_queue_context(hw, rxq->reg_idx);
+ if (err != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to clear FDIR RX queue context.");
+ return err;
+ }
+ err = i40e_set_lan_rx_queue_context(hw, rxq->reg_idx, &rx_ctx);
+ if (err != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to set FDIR RX queue context.");
+ return err;
+ }
+ rxq->qrx_tail = hw->hw_addr +
+ I40E_QRX_TAIL(rxq->vsi->base_queue);
+
+ rte_wmb();
+ /* Init the RX tail regieter. */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, 0);
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+
+ return err;
+}
+
+/*
+ * i40e_fdir_setup - reserve and initialize the Flow Director resources
+ * @pf: board private structure
+ */
+int
+i40e_fdir_setup(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi;
+ int err = I40E_SUCCESS;
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz = NULL;
+ struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
+
+ if ((pf->flags & I40E_FLAG_FDIR) == 0) {
+ PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
+ return I40E_NOT_SUPPORTED;
+ }
+
+ PMD_DRV_LOG(INFO, "FDIR HW Capabilities: num_filters_guaranteed = %u,"
+ " num_filters_best_effort = %u.",
+ hw->func_caps.fd_filters_guaranteed,
+ hw->func_caps.fd_filters_best_effort);
+
+ vsi = pf->fdir.fdir_vsi;
+ if (vsi) {
+ PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
+ return I40E_SUCCESS;
+ }
+ /* make new FDIR VSI */
+ vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->main_vsi, 0);
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
+ return I40E_ERR_NO_AVAILABLE_VSI;
+ }
+ pf->fdir.fdir_vsi = vsi;
+
+ /*Fdir tx queue setup*/
+ err = i40e_fdir_setup_tx_resources(pf);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
+ goto fail_setup_tx;
+ }
+
+ /*Fdir rx queue setup*/
+ err = i40e_fdir_setup_rx_resources(pf);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
+ goto fail_setup_rx;
+ }
+
+ err = i40e_tx_queue_init(pf->fdir.txq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to do FDIR TX initialization.");
+ goto fail_mem;
+ }
+
+ /* need switch on before dev start*/
+ err = i40e_switch_tx_queue(hw, vsi->base_queue, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to do fdir TX switch on.");
+ goto fail_mem;
+ }
+
+ /* Init the rx queue in hardware */
+ err = i40e_fdir_rx_queue_init(pf->fdir.rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to do FDIR RX initialization.");
+ goto fail_mem;
+ }
+
+ /* switch on rx queue */
+ err = i40e_switch_rx_queue(hw, vsi->base_queue, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to do FDIR RX switch on.");
+ goto fail_mem;
+ }
+
+ /* reserve memory for the fdir programming packet */
+ snprintf(z_name, sizeof(z_name), "%s_%s_%d",
+ eth_dev->driver->pci_drv.name,
+ I40E_FDIR_MZ_NAME,
+ eth_dev->data->port_id);
+ mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN, SOCKET_ID_ANY);
+ if (!mz) {
+ PMD_DRV_LOG(ERR, "Cannot init memzone for "
+ "flow director program packet.");
+ err = I40E_ERR_NO_MEMORY;
+ goto fail_mem;
+ }
+ pf->fdir.prg_pkt = mz->addr;
+#ifdef RTE_LIBRTE_XEN_DOM0
+ pf->fdir.dma_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
+#else
+ pf->fdir.dma_addr = (uint64_t)mz->phys_addr;
+#endif
+ pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id);
+ PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
+ vsi->base_queue);
+ return I40E_SUCCESS;
+
+fail_mem:
+ i40e_dev_rx_queue_release(pf->fdir.rxq);
+ pf->fdir.rxq = NULL;
+fail_setup_rx:
+ i40e_dev_tx_queue_release(pf->fdir.txq);
+ pf->fdir.txq = NULL;
+fail_setup_tx:
+ i40e_vsi_release(vsi);
+ pf->fdir.fdir_vsi = NULL;
+ return err;
+}
+
+/*
+ * i40e_fdir_teardown - release the Flow Director resources
+ * @pf: board private structure
+ */
+void
+i40e_fdir_teardown(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi;
+
+ vsi = pf->fdir.fdir_vsi;
+ if (!vsi)
+ return;
+ i40e_switch_tx_queue(hw, vsi->base_queue, FALSE);
+ i40e_switch_rx_queue(hw, vsi->base_queue, FALSE);
+ i40e_dev_rx_queue_release(pf->fdir.rxq);
+ pf->fdir.rxq = NULL;
+ i40e_dev_tx_queue_release(pf->fdir.txq);
+ pf->fdir.txq = NULL;
+ i40e_vsi_release(vsi);
+ pf->fdir.fdir_vsi = NULL;
+}
+
+/* check whether the flow director table in empty */
+static inline int
+i40e_fdir_empty(struct i40e_hw *hw)
+{
+ uint32_t guarant_cnt, best_cnt;
+
+ guarant_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
+ I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
+ best_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
+ I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
+ if (best_cnt + guarant_cnt > 0)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Initialize the configuration about bytes stream extracted as flexible payload
+ * and mask setting
+ */
+static inline void
+i40e_init_flx_pld(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint8_t pctype;
+ int i, index;
+
+ /*
+ * Define the bytes stream extracted as flexible payload in
+ * field vector. By default, select 8 words from the beginning
+ * of payload as flexible payload.
+ */
+ for (i = I40E_FLXPLD_L2_IDX; i < I40E_MAX_FLXPLD_LAYER; i++) {
+ index = i * I40E_MAX_FLXPLD_FIED;
+ pf->fdir.flex_set[index].src_offset = 0;
+ pf->fdir.flex_set[index].size = I40E_FDIR_MAX_FLEXWORD_NUM;
+ pf->fdir.flex_set[index].dst_offset = 0;
+ I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(index), 0x0000C900);
+ I40E_WRITE_REG(hw,
+ I40E_PRTQF_FLX_PIT(index + 1), 0x0000FC29);/*non-used*/
+ I40E_WRITE_REG(hw,
+ I40E_PRTQF_FLX_PIT(index + 2), 0x0000FC2A);/*non-used*/
+ }
+
+ /* initialize the masks */
+ for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ pctype <= I40E_FILTER_PCTYPE_FRAG_IPV6; pctype++) {
+ pf->fdir.flex_mask[pctype].word_mask = 0;
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_FLXINSET(pctype), 0);
+ for (i = 0; i < I40E_FDIR_BITMASK_NUM_WORD; i++) {
+ pf->fdir.flex_mask[pctype].bitmask[i].offset = 0;
+ pf->fdir.flex_mask[pctype].bitmask[i].mask = 0;
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_MSK(pctype, i), 0);
+ }
+ }
+}
+
+#define I40E_WORD(hi, lo) (uint16_t)((((hi) << 8) & 0xFF00) | ((lo) & 0xFF))
+
+#define I40E_VALIDATE_FLEX_PIT(flex_pit1, flex_pit2) do { \
+ if ((flex_pit2).src_offset < \
+ (flex_pit1).src_offset + (flex_pit1).size) { \
+ PMD_DRV_LOG(ERR, "src_offset should be not" \
+ " less than than previous offset" \
+ " + previous FSIZE."); \
+ return -EINVAL; \
+ } \
+} while (0)
+
+/*
+ * i40e_srcoff_to_flx_pit - transform the src_offset into flex_pit structure,
+ * and the flex_pit will be sorted by it's src_offset value
+ */
+static inline uint16_t
+i40e_srcoff_to_flx_pit(const uint16_t *src_offset,
+ struct i40e_fdir_flex_pit *flex_pit)
+{
+ uint16_t src_tmp, size, num = 0;
+ uint16_t i, k, j = 0;
+
+ while (j < I40E_FDIR_MAX_FLEX_LEN) {
+ size = 1;
+ for (; j < I40E_FDIR_MAX_FLEX_LEN; j++) {
+ if (src_offset[j + 1] == src_offset[j] + 1)
+ size++;
+ else {
+ src_tmp = src_offset[j] + 1 - size;
+ /* the flex_pit need to be sort by scr_offset */
+ for (i = 0; i < num; i++) {
+ if (src_tmp < flex_pit[i].src_offset)
+ break;
+ }
+ /* if insert required, move backward */
+ for (k = num; k > i; k--)
+ flex_pit[k] = flex_pit[k - 1];
+ /* insert */
+ flex_pit[i].dst_offset = j + 1 - size;
+ flex_pit[i].src_offset = src_tmp;
+ flex_pit[i].size = size;
+ j++;
+ num++;
+ break;
+ }
+ }
+ }
+ return num;
+}
+
+/* i40e_check_fdir_flex_payload -check flex payload configuration arguments */
+static inline int
+i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg)
+{
+ struct i40e_fdir_flex_pit flex_pit[I40E_FDIR_MAX_FLEX_LEN];
+ uint16_t num, i;
+
+ for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++) {
+ if (flex_cfg->src_offset[i] >= I40E_MAX_FLX_SOURCE_OFF) {
+ PMD_DRV_LOG(ERR, "exceeds maxmial payload limit.");
+ return -EINVAL;
+ }
+ }
+
+ memset(flex_pit, 0, sizeof(flex_pit));
+ num = i40e_srcoff_to_flx_pit(flex_cfg->src_offset, flex_pit);
+ if (num > I40E_MAX_FLXPLD_FIED) {
+ PMD_DRV_LOG(ERR, "exceeds maxmial number of flex fields.");
+ return -EINVAL;
+ }
+ for (i = 0; i < num; i++) {
+ if (flex_pit[i].size & 0x01 || flex_pit[i].dst_offset & 0x01 ||
+ flex_pit[i].src_offset & 0x01) {
+ PMD_DRV_LOG(ERR, "flexpayload should be measured"
+ " in word");
+ return -EINVAL;
+ }
+ if (i != num - 1)
+ I40E_VALIDATE_FLEX_PIT(flex_pit[i], flex_pit[i + 1]);
+ }
+ return 0;
+}
+
+/*
+ * i40e_check_fdir_flex_conf -check if the flex payload and mask configuration
+ * arguments are valid
+ */
+static int
+i40e_check_fdir_flex_conf(const struct rte_eth_fdir_flex_conf *conf)
+{
+ const struct rte_eth_flex_payload_cfg *flex_cfg;
+ const struct rte_eth_fdir_flex_mask *flex_mask;
+ uint16_t mask_tmp;
+ uint8_t nb_bitmask;
+ uint16_t i, j;
+ int ret = 0;
+
+ if (conf == NULL) {
+ PMD_DRV_LOG(INFO, "NULL pointer.");
+ return -EINVAL;
+ }
+ /* check flexible payload setting configuration */
+ if (conf->nb_payloads > RTE_ETH_L4_PAYLOAD) {
+ PMD_DRV_LOG(ERR, "invalid number of payload setting.");
+ return -EINVAL;
+ }
+ for (i = 0; i < conf->nb_payloads; i++) {
+ flex_cfg = &conf->flex_set[i];
+ if (flex_cfg->type > RTE_ETH_L4_PAYLOAD) {
+ PMD_DRV_LOG(ERR, "invalid payload type.");
+ return -EINVAL;
+ }
+ ret = i40e_check_fdir_flex_payload(flex_cfg);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "invalid flex payload arguments.");
+ return -EINVAL;
+ }
+ }
+
+ /* check flex mask setting configuration */
+ if (conf->nb_flexmasks > RTE_ETH_FLOW_TYPE_FRAG_IPV6) {
+ PMD_DRV_LOG(ERR, "invalid number of flex masks.");
+ return -EINVAL;
+ }
+ for (i = 0; i < conf->nb_flexmasks; i++) {
+ flex_mask = &conf->flex_mask[i];
+ if (!I40E_VALID_FLOW_TYPE(flex_mask->flow_type)) {
+ PMD_DRV_LOG(WARNING, "invalid flow type.");
+ return -EINVAL;
+ }
+ nb_bitmask = 0;
+ for (j = 0; j < I40E_FDIR_MAX_FLEX_LEN; j += sizeof(uint16_t)) {
+ mask_tmp = I40E_WORD(flex_mask->mask[j],
+ flex_mask->mask[j + 1]);
+ if (mask_tmp != 0x0 && mask_tmp != UINT16_MAX) {
+ nb_bitmask++;
+ if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD) {
+ PMD_DRV_LOG(ERR, " exceed maximal"
+ " number of bitmasks.");
+ return -EINVAL;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+ * i40e_set_flx_pld_cfg -configure the rule how bytes stream is extracted as flexible payload
+ * @pf: board private structure
+ * @cfg: the rule how bytes stream is extracted as flexible payload
+ */
+static void
+i40e_set_flx_pld_cfg(struct i40e_pf *pf,
+ const struct rte_eth_flex_payload_cfg *cfg)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_FIED];
+ uint32_t flx_pit;
+ uint16_t num, min_next_off; /* in words */
+ uint8_t field_idx = 0;
+ uint8_t layer_idx = 0;
+ uint16_t i;
+
+ if (cfg->type == RTE_ETH_L2_PAYLOAD)
+ layer_idx = I40E_FLXPLD_L2_IDX;
+ else if (cfg->type == RTE_ETH_L3_PAYLOAD)
+ layer_idx = I40E_FLXPLD_L3_IDX;
+ else if (cfg->type == RTE_ETH_L4_PAYLOAD)
+ layer_idx = I40E_FLXPLD_L4_IDX;
+
+ memset(flex_pit, 0, sizeof(flex_pit));
+ num = i40e_srcoff_to_flx_pit(cfg->src_offset, flex_pit);
+
+ for (i = 0; i < num; i++) {
+ field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
+ /* record the info in fdir structure */
+ pf->fdir.flex_set[field_idx].src_offset =
+ flex_pit[i].src_offset / sizeof(uint16_t);
+ pf->fdir.flex_set[field_idx].size =
+ flex_pit[i].size / sizeof(uint16_t);
+ pf->fdir.flex_set[field_idx].dst_offset =
+ flex_pit[i].dst_offset / sizeof(uint16_t);
+ flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
+ pf->fdir.flex_set[field_idx].size,
+ pf->fdir.flex_set[field_idx].dst_offset);
+
+ I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
+ }
+ min_next_off = pf->fdir.flex_set[field_idx].src_offset +
+ pf->fdir.flex_set[field_idx].size;
+
+ for (; i < I40E_MAX_FLXPLD_FIED; i++) {
+ /* set the non-used register obeying register's constrain */
+ flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
+ NONUSE_FLX_PIT_DEST_OFF);
+ I40E_WRITE_REG(hw,
+ I40E_PRTQF_FLX_PIT(layer_idx * I40E_MAX_FLXPLD_FIED + i),
+ flx_pit);
+ min_next_off++;
+ }
+}
+
+/*
+ * i40e_set_flex_mask_on_pctype - configure the mask on flexible payload
+ * @pf: board private structure
+ * @pctype: packet classify type
+ * @flex_masks: mask for flexible payload
+ */
+static void
+i40e_set_flex_mask_on_pctype(struct i40e_pf *pf,
+ enum i40e_filter_pctype pctype,
+ const struct rte_eth_fdir_flex_mask *mask_cfg)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_fdir_flex_mask *flex_mask;
+ uint32_t flxinset, fd_mask;
+ uint16_t mask_tmp;
+ uint8_t i, nb_bitmask = 0;
+
+ flex_mask = &pf->fdir.flex_mask[pctype];
+ memset(flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
+ for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
+ mask_tmp = I40E_WORD(mask_cfg->mask[i], mask_cfg->mask[i + 1]);
+ if (mask_tmp != 0x0) {
+ flex_mask->word_mask |=
+ I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
+ if (mask_tmp != UINT16_MAX) {
+ /* set bit mask */
+ flex_mask->bitmask[nb_bitmask].mask = ~mask_tmp;
+ flex_mask->bitmask[nb_bitmask].offset =
+ i / sizeof(uint16_t);
+ nb_bitmask++;
+ }
+ }
+ }
+ /* write mask to hw */
+ flxinset = (flex_mask->word_mask <<
+ I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
+ I40E_PRTQF_FD_FLXINSET_INSET_MASK;
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
+
+ for (i = 0; i < nb_bitmask; i++) {
+ fd_mask = (flex_mask->bitmask[i].mask <<
+ I40E_PRTQF_FD_MSK_MASK_SHIFT) &
+ I40E_PRTQF_FD_MSK_MASK_MASK;
+ fd_mask |= ((flex_mask->bitmask[i].offset +
+ I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
+ I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
+ I40E_PRTQF_FD_MSK_OFFSET_MASK;
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
+ }
+}
+
+/*
+ * Configure flow director related setting
+ */
+int
+i40e_fdir_configure(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_fdir_flex_conf *conf;
+ enum i40e_filter_pctype pctype;
+ uint32_t val;
+ uint8_t i;
+ int ret = 0;
+
+ /*
+ * configuration need to be done before
+ * flow director filters are added
+ * If filters exist, flush them.
+ */
+ if (i40e_fdir_empty(hw) < 0) {
+ ret = i40e_fdir_flush(dev);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "failed to flush fdir table.");
+ return ret;
+ }
+ }
+
+ /* enable FDIR filter */
+ val = I40E_READ_REG(hw, I40E_PFQF_CTL_0);
+ val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
+ I40E_WRITE_REG(hw, I40E_PFQF_CTL_0, val);
+
+ i40e_init_flx_pld(pf); /* set flex config to default value */
+
+ conf = &dev->data->dev_conf.fdir_conf.flex_conf;
+ ret = i40e_check_fdir_flex_conf(conf);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, " invalid configuration arguments.");
+ return -EINVAL;
+ }
+ /* configure flex payload */
+ for (i = 0; i < conf->nb_payloads; i++)
+ i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]);
+ /* configure flex mask*/
+ for (i = 0; i < conf->nb_flexmasks; i++) {
+ pctype = i40e_flowtype_to_pctype(
+ conf->flex_mask[i].flow_type);
+ i40e_set_flex_mask_on_pctype(pf,
+ pctype,
+ &conf->flex_mask[i]);
+ }
+
+ return ret;
+}
+
+static inline void
+i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input,
+ unsigned char *raw_pkt)
+{
+ struct ether_hdr *ether = (struct ether_hdr *)raw_pkt;
+ struct ipv4_hdr *ip;
+ struct ipv6_hdr *ip6;
+ static const uint8_t next_proto[] = {
+ [RTE_ETH_FLOW_TYPE_UDPV4] = IPPROTO_UDP,
+ [RTE_ETH_FLOW_TYPE_TCPV4] = IPPROTO_TCP,
+ [RTE_ETH_FLOW_TYPE_SCTPV4] = IPPROTO_SCTP,
+ [RTE_ETH_FLOW_TYPE_IPV4_OTHER] = IPPROTO_IP,
+ [RTE_ETH_FLOW_TYPE_FRAG_IPV4] = IPPROTO_IP,
+ [RTE_ETH_FLOW_TYPE_UDPV6] = IPPROTO_UDP,
+ [RTE_ETH_FLOW_TYPE_TCPV6] = IPPROTO_TCP,
+ [RTE_ETH_FLOW_TYPE_SCTPV6] = IPPROTO_SCTP,
+ [RTE_ETH_FLOW_TYPE_IPV6_OTHER] = IPPROTO_NONE,
+ [RTE_ETH_FLOW_TYPE_FRAG_IPV6] = IPPROTO_NONE,
+ };
+
+ switch (fdir_input->flow_type) {
+ case RTE_ETH_FLOW_TYPE_UDPV4:
+ case RTE_ETH_FLOW_TYPE_TCPV4:
+ case RTE_ETH_FLOW_TYPE_SCTPV4:
+ case RTE_ETH_FLOW_TYPE_IPV4_OTHER:
+ case RTE_ETH_FLOW_TYPE_FRAG_IPV4:
+ ip = (struct ipv4_hdr *)(raw_pkt + sizeof(struct ether_hdr));
+
+ ether->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
+ /* set len to by default */
+ ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
+ ip->time_to_live = fdir_input->ttl;
+ /*
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
+ ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
+ ip->next_proto_id = next_proto[fdir_input->flow_type];
+ break;
+ case RTE_ETH_FLOW_TYPE_UDPV6:
+ case RTE_ETH_FLOW_TYPE_TCPV6:
+ case RTE_ETH_FLOW_TYPE_SCTPV6:
+ case RTE_ETH_FLOW_TYPE_IPV6_OTHER:
+ case RTE_ETH_FLOW_TYPE_FRAG_IPV6:
+ ip6 = (struct ipv6_hdr *)(raw_pkt + sizeof(struct ether_hdr));
+
+ ether->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ ip6->vtc_flow =
+ rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW);
+ ip6->payload_len =
+ rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
+ ip6->hop_limits = fdir_input->ttl;
+
+ /*
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ rte_memcpy(&(ip6->src_addr),
+ &(fdir_input->flow.ipv6_flow.dst_ip),
+ IPV6_ADDR_LEN);
+ rte_memcpy(&(ip6->dst_addr),
+ &(fdir_input->flow.ipv6_flow.src_ip),
+ IPV6_ADDR_LEN);
+ ip6->proto = next_proto[fdir_input->flow_type];
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unknown flow type %u.",
+ fdir_input->flow_type);
+ break;
+ }
+}
+
+
+/*
+ * i40e_fdir_construct_pkt - construct packet based on fields in input
+ * @pf: board private structure
+ * @fdir_input: input set of the flow director entry
+ * @raw_pkt: a packet to be constructed
+ */
+static int
+i40e_fdir_construct_pkt(struct i40e_pf *pf,
+ const struct rte_eth_fdir_input *fdir_input,
+ unsigned char *raw_pkt)
+{
+ unsigned char *payload, *ptr;
+ struct udp_hdr *udp;
+ struct tcp_hdr *tcp;
+ struct sctp_hdr *sctp;
+ uint8_t size, dst = 0;
+ uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
+
+ /* fill the ethernet and IP head */
+ i40e_fdir_fill_eth_ip_head(fdir_input, raw_pkt);
+
+ /* fill the L4 head */
+ switch (fdir_input->flow_type) {
+ case RTE_ETH_FLOW_TYPE_UDPV4:
+ udp = (struct udp_hdr *)(raw_pkt + sizeof(struct ether_hdr) +
+ sizeof(struct ipv4_hdr));
+ payload = (unsigned char *)udp + sizeof(struct udp_hdr);
+ /*
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ udp->src_port = fdir_input->flow.udp4_flow.dst_port;
+ udp->dst_port = fdir_input->flow.udp4_flow.src_port;
+ udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
+ break;
+
+ case RTE_ETH_FLOW_TYPE_TCPV4:
+ tcp = (struct tcp_hdr *)(raw_pkt + sizeof(struct ether_hdr) +
+ sizeof(struct ipv4_hdr));
+ payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
+ /*
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
+ tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
+ tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
+ break;
+
+ case RTE_ETH_FLOW_TYPE_SCTPV4:
+ sctp = (struct sctp_hdr *)(raw_pkt + sizeof(struct ether_hdr) +
+ sizeof(struct ipv4_hdr));
+ payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
+ sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
+ break;
+
+ case RTE_ETH_FLOW_TYPE_IPV4_OTHER:
+ case RTE_ETH_FLOW_TYPE_FRAG_IPV4:
+ payload = raw_pkt + sizeof(struct ether_hdr) +
+ sizeof(struct ipv4_hdr);
+ set_idx = I40E_FLXPLD_L3_IDX;
+ break;
+
+ case RTE_ETH_FLOW_TYPE_UDPV6:
+ udp = (struct udp_hdr *)(raw_pkt + sizeof(struct ether_hdr) +
+ sizeof(struct ipv6_hdr));
+ payload = (unsigned char *)udp + sizeof(struct udp_hdr);
+ /*
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ udp->src_port = fdir_input->flow.udp6_flow.dst_port;
+ udp->dst_port = fdir_input->flow.udp6_flow.src_port;
+ udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
+ break;
+
+ case RTE_ETH_FLOW_TYPE_TCPV6:
+ tcp = (struct tcp_hdr *)(raw_pkt + sizeof(struct ether_hdr) +
+ sizeof(struct ipv6_hdr));
+ payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
+ /*
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
+ tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
+ tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
+ break;
+
+ case RTE_ETH_FLOW_TYPE_SCTPV6:
+ sctp = (struct sctp_hdr *)(raw_pkt + sizeof(struct ether_hdr) +
+ sizeof(struct ipv6_hdr));
+ payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
+ sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
+ break;
+
+ case RTE_ETH_FLOW_TYPE_IPV6_OTHER:
+ case RTE_ETH_FLOW_TYPE_FRAG_IPV6:
+ payload = raw_pkt + sizeof(struct ether_hdr) +
+ sizeof(struct ipv6_hdr);
+ set_idx = I40E_FLXPLD_L3_IDX;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unknown flow type %u.", fdir_input->flow_type);
+ return -EINVAL;
+ }
+
+ /* fill the flexbytes to payload */
+ for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
+ pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
+ size = pf->fdir.flex_set[pit_idx].size;
+ if (size == 0)
+ continue;
+ dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
+ ptr = payload +
+ pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
+ (void)rte_memcpy(ptr,
+ &fdir_input->flow_ext.flexbytes[dst],
+ size * sizeof(uint16_t));
+ }
+
+ return 0;
+}
+
+/* Construct the tx flags */
+static inline uint64_t
+i40e_build_ctob(uint32_t td_cmd,
+ uint32_t td_offset,
+ unsigned int size,
+ uint32_t td_tag)
+{
+ return rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DATA |
+ ((uint64_t)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
+ ((uint64_t)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
+ ((uint64_t)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
+ ((uint64_t)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
+}
+
+/*
+ * check the programming status descriptor in rx queue.
+ * done after Programming Flow Director is programmed on
+ * tx queue
+ */
+static inline int
+i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
+{
+ volatile union i40e_rx_desc *rxdp;
+ uint64_t qword1;
+ uint32_t rx_status;
+ uint32_t len, id;
+ uint32_t error;
+ int ret = 0;
+
+ rxdp = &rxq->rx_ring[rxq->rx_tail];
+ qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
+ rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK)
+ >> I40E_RXD_QW1_STATUS_SHIFT;
+
+ if (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
+ len = qword1 >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT;
+ id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
+ I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
+
+ if (len == I40E_RX_PROG_STATUS_DESC_LENGTH &&
+ id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) {
+ error = (qword1 &
+ I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
+ I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
+ if (error == (0x1 <<
+ I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
+ PMD_DRV_LOG(ERR, "Failed to add FDIR filter"
+ " (FD_ID %u): programming status"
+ " reported.",
+ rxdp->wb.qword0.hi_dword.fd_id);
+ ret = -1;
+ } else if (error == (0x1 <<
+ I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
+ PMD_DRV_LOG(ERR, "Failed to delete FDIR filter"
+ " (FD_ID %u): programming status"
+ " reported.",
+ rxdp->wb.qword0.hi_dword.fd_id);
+ ret = -1;
+ } else
+ PMD_DRV_LOG(ERR, "invalid programming status"
+ " reported, error = %u.", error);
+ } else
+ PMD_DRV_LOG(ERR, "unknown programming status"
+ " reported, len = %d, id = %u.", len, id);
+ rxdp->wb.qword1.status_error_len = 0;
+ rxq->rx_tail++;
+ if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
+ rxq->rx_tail = 0;
+ }
+ return ret;
+}
+
+/*
+ * i40e_add_del_fdir_filter - add or remove a flow director filter.
+ * @pf: board private structure
+ * @filter: fdir filter entry
+ * @add: 0 - delete, 1 - add
+ */
+static int
+i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *filter,
+ bool add)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
+ enum i40e_filter_pctype pctype;
+ int ret = 0;
+
+ if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
+ PMD_DRV_LOG(ERR, "FDIR is not enabled, please"
+ " check the mode in fdir_conf.");
+ return -ENOTSUP;
+ }
+
+ if (!I40E_VALID_FLOW_TYPE(filter->input.flow_type)) {
+ PMD_DRV_LOG(ERR, "invalid flow_type input.");
+ return -EINVAL;
+ }
+ if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
+ PMD_DRV_LOG(ERR, "Invalid queue ID");
+ return -EINVAL;
+ }
+
+ memset(pkt, 0, I40E_FDIR_PKT_LEN);
+
+ ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
+ return ret;
+ }
+ pctype = i40e_flowtype_to_pctype(filter->input.flow_type);
+ ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
+ pctype);
+ return ret;
+ }
+ return ret;
+}
+
+/*
+ * i40e_fdir_filter_programming - Program a flow director filter rule.
+ * Is done by Flow Director Programming Descriptor followed by packet
+ * structure that contains the filter fields need to match.
+ * @pf: board private structure
+ * @pctype: pctype
+ * @filter: fdir filter entry
+ * @add: 0 - delelet, 1 - add
+ */
+static int
+i40e_fdir_filter_programming(struct i40e_pf *pf,
+ enum i40e_filter_pctype pctype,
+ const struct rte_eth_fdir_filter *filter,
+ bool add)
+{
+ struct i40e_tx_queue *txq = pf->fdir.txq;
+ struct i40e_rx_queue *rxq = pf->fdir.rxq;
+ const struct rte_eth_fdir_action *fdir_action = &filter->action;
+ volatile struct i40e_tx_desc *txdp;
+ volatile struct i40e_filter_program_desc *fdirdp;
+ uint32_t td_cmd;
+ uint16_t i;
+ uint8_t dest;
+
+ PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
+ fdirdp = (volatile struct i40e_filter_program_desc *)
+ (&(txq->tx_ring[txq->tx_tail]));
+
+ fdirdp->qindex_flex_ptype_vsi =
+ rte_cpu_to_le_32((fdir_action->rx_queue <<
+ I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW0_QINDEX_MASK);
+
+ fdirdp->qindex_flex_ptype_vsi |=
+ rte_cpu_to_le_32((fdir_action->flex_off <<
+ I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
+ I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
+
+ fdirdp->qindex_flex_ptype_vsi |=
+ rte_cpu_to_le_32((pctype <<
+ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
+ I40E_TXD_FLTR_QW0_PCTYPE_MASK);
+
+ /* Use LAN VSI Id by default */
+ fdirdp->qindex_flex_ptype_vsi |=
+ rte_cpu_to_le_32((pf->main_vsi->vsi_id <<
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
+ I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
+
+ fdirdp->dtype_cmd_cntindex =
+ rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
+
+ if (add)
+ fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
+ I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+ else
+ fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
+ I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+
+ if (fdir_action->behavior == RTE_ETH_FDIR_REJECT)
+ dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
+ else
+ dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
+ fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
+ I40E_TXD_FLTR_QW1_DEST_SHIFT) &
+ I40E_TXD_FLTR_QW1_DEST_MASK);
+
+ fdirdp->dtype_cmd_cntindex |=
+ rte_cpu_to_le_32((fdir_action->report_status<<
+ I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
+ I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
+
+ fdirdp->dtype_cmd_cntindex |=
+ rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
+ fdirdp->dtype_cmd_cntindex |=
+ rte_cpu_to_le_32((pf->fdir.match_counter_index <<
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
+
+ fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
+
+ PMD_DRV_LOG(INFO, "filling transmit descriptor.");
+ txdp = &(txq->tx_ring[txq->tx_tail + 1]);
+ txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
+ td_cmd = I40E_TX_DESC_CMD_EOP |
+ I40E_TX_DESC_CMD_RS |
+ I40E_TX_DESC_CMD_DUMMY;
+
+ txdp->cmd_type_offset_bsz =
+ i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
+
+ txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
+ if (txq->tx_tail >= txq->nb_tx_desc)
+ txq->tx_tail = 0;
+ /* Update the tx tail register */
+ rte_wmb();
+ I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+
+ for (i = 0; i < I40E_FDIR_WAIT_COUNT; i++) {
+ rte_delay_us(I40E_FDIR_WAIT_INTERVAL_US);
+ if (txdp->cmd_type_offset_bsz &
+ rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
+ break;
+ }
+ if (i >= I40E_FDIR_WAIT_COUNT) {
+ PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
+ " time out to get DD on tx queue.");
+ return -ETIMEDOUT;
+ }
+ /* totally delay 10 ms to check programming status*/
+ rte_delay_us((I40E_FDIR_WAIT_COUNT - i) * I40E_FDIR_WAIT_INTERVAL_US);
+ if (i40e_check_fdir_programming_status(rxq) < 0) {
+ PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
+ " programming status reported.");
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+
+/*
+ * i40e_fdir_flush - clear all filters of Flow Director table
+ * @pf: board private structure
+ */
+static int
+i40e_fdir_flush(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t reg;
+ uint16_t guarant_cnt, best_cnt;
+ uint16_t i;
+
+ I40E_WRITE_REG(hw, I40E_PFQF_CTL_1, I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
+ I40E_WRITE_FLUSH(hw);
+
+ for (i = 0; i < I40E_FDIR_FLUSH_RETRY; i++) {
+ rte_delay_ms(I40E_FDIR_FLUSH_INTERVAL_MS);
+ reg = I40E_READ_REG(hw, I40E_PFQF_CTL_1);
+ if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
+ break;
+ }
+ if (i >= I40E_FDIR_FLUSH_RETRY) {
+ PMD_DRV_LOG(ERR, "FD table did not flush, may need more time.");
+ return -ETIMEDOUT;
+ }
+ guarant_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
+ I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
+ best_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
+ I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
+ if (guarant_cnt != 0 || best_cnt != 0) {
+ PMD_DRV_LOG(ERR, "Failed to flush FD table.");
+ return -ENOSYS;
+ } else
+ PMD_DRV_LOG(INFO, "FD table Flush success.");
+ return 0;
+}
+
+static inline void
+i40e_fdir_info_get_flex_set(struct i40e_pf *pf,
+ struct rte_eth_flex_payload_cfg *flex_set,
+ uint16_t *num)
+{
+ struct i40e_fdir_flex_pit *flex_pit;
+ struct rte_eth_flex_payload_cfg *ptr = flex_set;
+ uint16_t src, dst, size, j, k;
+ uint8_t i, layer_idx;
+
+ for (layer_idx = I40E_FLXPLD_L2_IDX;
+ layer_idx <= I40E_FLXPLD_L4_IDX;
+ layer_idx++) {
+ if (layer_idx == I40E_FLXPLD_L2_IDX)
+ ptr->type = RTE_ETH_L2_PAYLOAD;
+ else if (layer_idx == I40E_FLXPLD_L3_IDX)
+ ptr->type = RTE_ETH_L3_PAYLOAD;
+ else if (layer_idx == I40E_FLXPLD_L4_IDX)
+ ptr->type = RTE_ETH_L4_PAYLOAD;
+
+ for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
+ flex_pit = &pf->fdir.flex_set[layer_idx *
+ I40E_MAX_FLXPLD_FIED + i];
+ if (flex_pit->size == 0)
+ continue;
+ src = flex_pit->src_offset * sizeof(uint16_t);
+ dst = flex_pit->dst_offset * sizeof(uint16_t);
+ size = flex_pit->size * sizeof(uint16_t);
+ for (j = src, k = dst; j < src + size; j++, k++)
+ ptr->src_offset[k] = j;
+ }
+ (*num)++;
+ ptr++;
+ }
+}
+
+static inline void
+i40e_fdir_info_get_flex_mask(struct i40e_pf *pf,
+ struct rte_eth_fdir_flex_mask *flex_mask,
+ uint16_t *num)
+{
+ struct i40e_fdir_flex_mask *mask;
+ struct rte_eth_fdir_flex_mask *ptr = flex_mask;
+ enum rte_eth_flow_type flow_type;
+ uint8_t i, j;
+ uint16_t off_bytes, mask_tmp;
+
+ for (i = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ i <= I40E_FILTER_PCTYPE_FRAG_IPV6;
+ i++) {
+ mask = &pf->fdir.flex_mask[i];
+ if (!I40E_VALID_PCTYPE((enum i40e_filter_pctype)i))
+ continue;
+ flow_type = i40e_pctype_to_flowtype((enum i40e_filter_pctype)i);
+ for (j = 0; j < I40E_FDIR_MAX_FLEXWORD_NUM; j++) {
+ if (mask->word_mask & I40E_FLEX_WORD_MASK(j)) {
+ ptr->mask[j * sizeof(uint16_t)] = UINT8_MAX;
+ ptr->mask[j * sizeof(uint16_t) + 1] = UINT8_MAX;
+ } else {
+ ptr->mask[j * sizeof(uint16_t)] = 0x0;
+ ptr->mask[j * sizeof(uint16_t) + 1] = 0x0;
+ }
+ }
+ for (j = 0; j < I40E_FDIR_BITMASK_NUM_WORD; j++) {
+ off_bytes = mask->bitmask[j].offset * sizeof(uint16_t);
+ mask_tmp = ~mask->bitmask[j].mask;
+ ptr->mask[off_bytes] &= I40E_HI_BYTE(mask_tmp);
+ ptr->mask[off_bytes + 1] &= I40E_LO_BYTE(mask_tmp);
+ }
+ ptr->flow_type = flow_type;
+ ptr++;
+ (*num)++;
+ }
+}
+
+/*
+ * i40e_fdir_info_get - get information of Flow Director
+ * @pf: ethernet device to get info from
+ * @fdir: a pointer to a structure of type *rte_eth_fdir_info* to be filled with
+ * the flow director information.
+ */
+static void
+i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint16_t num_flex_set = 0;
+ uint16_t num_flex_mask = 0;
+
+ if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
+ fdir->mode = RTE_FDIR_MODE_PERFECT;
+ else
+ fdir->mode = RTE_FDIR_MODE_NONE;
+
+ fdir->guarant_spc =
+ (uint32_t)hw->func_caps.fd_filters_guaranteed;
+ fdir->best_spc =
+ (uint32_t)hw->func_caps.fd_filters_best_effort;
+ fdir->max_flexpayload = I40E_FDIR_MAX_FLEX_LEN;
+ fdir->flow_types_mask[0] = I40E_FDIR_FLOW_TYPES;
+ fdir->flex_payload_unit = sizeof(uint16_t);
+ fdir->flex_bitmask_unit = sizeof(uint16_t);
+ fdir->max_flex_payload_segment_num = I40E_MAX_FLXPLD_FIED;
+ fdir->flex_payload_limit = I40E_MAX_FLX_SOURCE_OFF;
+ fdir->max_flex_bitmask_num = I40E_FDIR_BITMASK_NUM_WORD;
+
+ i40e_fdir_info_get_flex_set(pf,
+ fdir->flex_conf.flex_set,
+ &num_flex_set);
+ i40e_fdir_info_get_flex_mask(pf,
+ fdir->flex_conf.flex_mask,
+ &num_flex_mask);
+
+ fdir->flex_conf.nb_payloads = num_flex_set;
+ fdir->flex_conf.nb_flexmasks = num_flex_mask;
+}
+
+/*
+ * i40e_fdir_stat_get - get statistics of Flow Director
+ * @pf: ethernet device to get info from
+ * @stat: a pointer to a structure of type *rte_eth_fdir_stats* to be filled with
+ * the flow director statistics.
+ */
+static void
+i40e_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *stat)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t fdstat;
+
+ fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
+ stat->guarant_cnt =
+ (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
+ stat->best_cnt =
+ (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
+}
+
+/*
+ * i40e_fdir_ctrl_func - deal with all operations on flow director.
+ * @pf: board private structure
+ * @filter_op:operation will be taken.
+ * @arg: a pointer to specific structure corresponding to the filter_op
+ */
+int
+i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int ret = 0;
+
+ if ((pf->flags & I40E_FLAG_FDIR) == 0)
+ return -ENOTSUP;
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
+ return -EINVAL;
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = i40e_add_del_fdir_filter(dev,
+ (struct rte_eth_fdir_filter *)arg,
+ TRUE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = i40e_add_del_fdir_filter(dev,
+ (struct rte_eth_fdir_filter *)arg,
+ FALSE);
+ break;
+ case RTE_ETH_FILTER_FLUSH:
+ ret = i40e_fdir_flush(dev);
+ break;
+ case RTE_ETH_FILTER_INFO:
+ i40e_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg);
+ break;
+ case RTE_ETH_FILTER_STATS:
+ i40e_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unknown operation %u.", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e_logs.h b/src/dpdk_lib18/librte_pmd_i40e/i40e_logs.h
new file mode 100755
index 00000000..63c9c991
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_i40e/i40e_logs.h
@@ -0,0 +1,77 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _I40E_LOGS_H_
+#define _I40E_LOGS_H_
+
+#define PMD_INIT_LOG(level, fmt, args...) RTE_LOG(level, PMD," " fmt "\n", ##args)
+
+
+#ifdef RTE_LIBRTE_I40E_DEBUG_INIT
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+#else
+#define PMD_INIT_FUNC_TRACE() do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_I40E_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_I40E_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_I40E_DEBUG_TX_FREE
+#define PMD_TX_FREE_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
+#else
+#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
+#endif
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+#endif /* _I40E_LOGS_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e_pf.c b/src/dpdk_lib18/librte_pmd_i40e/i40e_pf.c
new file mode 100755
index 00000000..cbb2dcc1
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_i40e/i40e_pf.c
@@ -0,0 +1,1063 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+
+#include <rte_string_fns.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+
+#include "i40e_logs.h"
+#include "i40e/i40e_prototype.h"
+#include "i40e/i40e_adminq_cmd.h"
+#include "i40e/i40e_type.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+#include "i40e_pf.h"
+
+#define I40E_CFG_CRCSTRIP_DEFAULT 1
+
+static int
+i40e_pf_host_switch_queues(struct i40e_pf_vf *vf,
+ struct i40e_virtchnl_queue_select *qsel,
+ bool on);
+
+/**
+ * Bind PF queues with VSI and VF.
+ **/
+static int
+i40e_pf_vf_queues_mapping(struct i40e_pf_vf *vf)
+{
+ int i;
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ uint16_t vsi_id = vf->vsi->vsi_id;
+ uint16_t vf_id = vf->vf_idx;
+ uint16_t nb_qps = vf->vsi->nb_qps;
+ uint16_t qbase = vf->vsi->base_queue;
+ uint16_t q1, q2;
+ uint32_t val;
+
+ /*
+ * VF should use scatter range queues. So, it needn't
+ * to set QBASE in this register.
+ */
+ I40E_WRITE_REG(hw, I40E_VSILAN_QBASE(vsi_id),
+ I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
+
+ /* Set to enable VFLAN_QTABLE[] registers valid */
+ I40E_WRITE_REG(hw, I40E_VPLAN_MAPENA(vf_id),
+ I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
+
+ /* map PF queues to VF */
+ for (i = 0; i < nb_qps; i++) {
+ val = ((qbase + i) & I40E_VPLAN_QTABLE_QINDEX_MASK);
+ I40E_WRITE_REG(hw, I40E_VPLAN_QTABLE(i, vf_id), val);
+ }
+
+ /* map PF queues to VSI */
+ for (i = 0; i < I40E_MAX_QP_NUM_PER_VF / 2; i++) {
+ if (2 * i > nb_qps - 1)
+ q1 = I40E_VSILAN_QTABLE_QINDEX_0_MASK;
+ else
+ q1 = qbase + 2 * i;
+
+ if (2 * i + 1 > nb_qps - 1)
+ q2 = I40E_VSILAN_QTABLE_QINDEX_0_MASK;
+ else
+ q2 = qbase + 2 * i + 1;
+
+ val = (q2 << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT) + q1;
+ I40E_WRITE_REG(hw, I40E_VSILAN_QTABLE(i, vsi_id), val);
+ }
+ I40E_WRITE_FLUSH(hw);
+
+ return I40E_SUCCESS;
+}
+
+
+/**
+ * Proceed VF reset operation.
+ */
+int
+i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
+{
+ uint32_t val, i;
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ uint16_t vf_id, abs_vf_id, vf_msix_num;
+ int ret;
+ struct i40e_virtchnl_queue_select qsel;
+
+ if (vf == NULL)
+ return -EINVAL;
+
+ vf_id = vf->vf_idx;
+ abs_vf_id = vf_id + hw->func_caps.vf_base_id;
+
+ /* Notify VF that we are in VFR progress */
+ I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_PF_VFR_INPROGRESS);
+
+ /*
+ * If require a SW VF reset, a VFLR interrupt will be generated,
+ * this function will be called again. To avoid it,
+ * disable interrupt first.
+ */
+ if (do_hw_reset) {
+ vf->state = I40E_VF_INRESET;
+ val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
+ val |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
+ I40E_WRITE_FLUSH(hw);
+ }
+
+#define VFRESET_MAX_WAIT_CNT 100
+ /* Wait until VF reset is done */
+ for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
+ rte_delay_us(10);
+ val = I40E_READ_REG(hw, I40E_VPGEN_VFRSTAT(vf_id));
+ if (val & I40E_VPGEN_VFRSTAT_VFRD_MASK)
+ break;
+ }
+
+ if (i >= VFRESET_MAX_WAIT_CNT) {
+ PMD_DRV_LOG(ERR, "VF reset timeout");
+ return -ETIMEDOUT;
+ }
+
+ /* This is not first time to do reset, do cleanup job first */
+ if (vf->vsi) {
+ /* Disable queues */
+ memset(&qsel, 0, sizeof(qsel));
+ for (i = 0; i < vf->vsi->nb_qps; i++)
+ qsel.rx_queues |= 1 << i;
+ qsel.tx_queues = qsel.rx_queues;
+ ret = i40e_pf_host_switch_queues(vf, &qsel, false);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Disable VF queues failed");
+ return -EFAULT;
+ }
+
+ /* Disable VF interrupt setting */
+ vf_msix_num = hw->func_caps.num_msix_vectors_vf;
+ for (i = 0; i < vf_msix_num; i++) {
+ if (!i)
+ val = I40E_VFINT_DYN_CTL0(vf_id);
+ else
+ val = I40E_VFINT_DYN_CTLN(((vf_msix_num - 1) *
+ (vf_id)) + (i - 1));
+ I40E_WRITE_REG(hw, val, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+ }
+ I40E_WRITE_FLUSH(hw);
+
+ /* remove VSI */
+ ret = i40e_vsi_release(vf->vsi);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Release VSI failed");
+ return -EFAULT;
+ }
+ }
+
+#define I40E_VF_PCI_ADDR 0xAA
+#define I40E_VF_PEND_MASK 0x20
+ /* Check the pending transactions of this VF */
+ /* Use absolute VF id, refer to datasheet for details */
+ I40E_WRITE_REG(hw, I40E_PF_PCI_CIAA, I40E_VF_PCI_ADDR |
+ (abs_vf_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
+ for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
+ rte_delay_us(1);
+ val = I40E_READ_REG(hw, I40E_PF_PCI_CIAD);
+ if ((val & I40E_VF_PEND_MASK) == 0)
+ break;
+ }
+
+ if (i >= VFRESET_MAX_WAIT_CNT) {
+ PMD_DRV_LOG(ERR, "Wait VF PCI transaction end timeout");
+ return -ETIMEDOUT;
+ }
+
+ /* Reset done, Set COMPLETE flag and clear reset bit */
+ I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_PF_VFR_COMPLETED);
+ val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
+ val &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
+ vf->reset_cnt++;
+ I40E_WRITE_FLUSH(hw);
+
+ /* Allocate resource again */
+ vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV,
+ vf->pf->main_vsi, vf->vf_idx);
+ if (vf->vsi == NULL) {
+ PMD_DRV_LOG(ERR, "Add vsi failed");
+ return -EFAULT;
+ }
+
+ ret = i40e_pf_vf_queues_mapping(vf);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "queue mapping error");
+ i40e_vsi_release(vf->vsi);
+ return -EFAULT;
+ }
+
+ return ret;
+}
+
+static int
+i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf,
+ uint32_t opcode,
+ uint32_t retval,
+ uint8_t *msg,
+ uint16_t msglen)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ uint16_t abs_vf_id = hw->func_caps.vf_base_id + vf->vf_idx;
+ int ret;
+
+ ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, opcode, retval,
+ msg, msglen, NULL);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Fail to send message to VF, err %u",
+ hw->aq.asq_last_status);
+ }
+
+ return ret;
+}
+
+static void
+i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf)
+{
+ struct i40e_virtchnl_version_info info;
+
+ info.major = I40E_DPDK_VERSION_MAJOR;
+ info.minor = I40E_DPDK_VERSION_MINOR;
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
+ I40E_SUCCESS, (uint8_t *)&info, sizeof(info));
+}
+
+static int
+i40e_pf_host_process_cmd_reset_vf(struct i40e_pf_vf *vf)
+{
+ i40e_pf_host_vf_reset(vf, 1);
+
+ /* No feedback will be sent to VF for VFLR */
+ return I40E_SUCCESS;
+}
+
+static int
+i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf)
+{
+ struct i40e_virtchnl_vf_resource *vf_res = NULL;
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ uint32_t len = 0;
+ int ret = I40E_SUCCESS;
+
+ /* only have 1 VSI by default */
+ len = sizeof(struct i40e_virtchnl_vf_resource) +
+ I40E_DEFAULT_VF_VSI_NUM *
+ sizeof(struct i40e_virtchnl_vsi_resource);
+
+ vf_res = rte_zmalloc("i40e_vf_res", len, 0);
+ if (vf_res == NULL) {
+ PMD_DRV_LOG(ERR, "failed to allocate mem");
+ ret = I40E_ERR_NO_MEMORY;
+ vf_res = NULL;
+ len = 0;
+ goto send_msg;
+ }
+
+ vf_res->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
+ I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+ vf_res->max_vectors = hw->func_caps.num_msix_vectors_vf;
+ vf_res->num_queue_pairs = vf->vsi->nb_qps;
+ vf_res->num_vsis = I40E_DEFAULT_VF_VSI_NUM;
+
+ /* Change below setting if PF host can support more VSIs for VF */
+ vf_res->vsi_res[0].vsi_type = I40E_VSI_SRIOV;
+ /* As assume Vf only has single VSI now, always return 0 */
+ vf_res->vsi_res[0].vsi_id = 0;
+ vf_res->vsi_res[0].num_queue_pairs = vf->vsi->nb_qps;
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ ret, (uint8_t *)vf_res, len);
+ rte_free(vf_res);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_hmc_config_rxq(struct i40e_hw *hw,
+ struct i40e_pf_vf *vf,
+ struct i40e_virtchnl_rxq_info *rxq,
+ uint8_t crcstrip)
+{
+ int err = I40E_SUCCESS;
+ struct i40e_hmc_obj_rxq rx_ctx;
+ uint16_t abs_queue_id = vf->vsi->base_queue + rxq->queue_id;
+
+ /* Clear the context structure first */
+ memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
+ rx_ctx.dbuff = rxq->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
+ rx_ctx.hbuff = rxq->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
+ rx_ctx.base = rxq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT;
+ rx_ctx.qlen = rxq->ring_len;
+#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+ rx_ctx.dsize = 1;
+#endif
+
+ if (rxq->splithdr_enabled) {
+ rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_ALL;
+ rx_ctx.dtype = i40e_header_split_enabled;
+ } else {
+ rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
+ rx_ctx.dtype = i40e_header_split_none;
+ }
+ rx_ctx.rxmax = rxq->max_pkt_size;
+ rx_ctx.tphrdesc_ena = 1;
+ rx_ctx.tphwdesc_ena = 1;
+ rx_ctx.tphdata_ena = 1;
+ rx_ctx.tphhead_ena = 1;
+ rx_ctx.lrxqthresh = 2;
+ rx_ctx.crcstrip = crcstrip;
+ rx_ctx.l2tsel = 1;
+ rx_ctx.prefena = 1;
+
+ err = i40e_clear_lan_rx_queue_context(hw, abs_queue_id);
+ if (err != I40E_SUCCESS)
+ return err;
+ err = i40e_set_lan_rx_queue_context(hw, abs_queue_id, &rx_ctx);
+
+ return err;
+}
+
+static int
+i40e_pf_host_hmc_config_txq(struct i40e_hw *hw,
+ struct i40e_pf_vf *vf,
+ struct i40e_virtchnl_txq_info *txq)
+{
+ int err = I40E_SUCCESS;
+ struct i40e_hmc_obj_txq tx_ctx;
+ uint32_t qtx_ctl;
+ uint16_t abs_queue_id = vf->vsi->base_queue + txq->queue_id;
+
+
+ /* clear the context structure first */
+ memset(&tx_ctx, 0, sizeof(tx_ctx));
+ tx_ctx.new_context = 1;
+ tx_ctx.base = txq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT;
+ tx_ctx.qlen = txq->ring_len;
+ tx_ctx.rdylist = rte_le_to_cpu_16(vf->vsi->info.qs_handle[0]);
+ err = i40e_clear_lan_tx_queue_context(hw, abs_queue_id);
+ if (err != I40E_SUCCESS)
+ return err;
+
+ err = i40e_set_lan_tx_queue_context(hw, abs_queue_id, &tx_ctx);
+ if (err != I40E_SUCCESS)
+ return err;
+
+ /* bind queue with VF function, since TX/QX will appear in pair,
+ * so only has QTX_CTL to set.
+ */
+ qtx_ctl = (I40E_QTX_CTL_VF_QUEUE << I40E_QTX_CTL_PFVF_Q_SHIFT) |
+ ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
+ I40E_QTX_CTL_PF_INDX_MASK) |
+ (((vf->vf_idx + hw->func_caps.vf_base_id) <<
+ I40E_QTX_CTL_VFVM_INDX_SHIFT) &
+ I40E_QTX_CTL_VFVM_INDX_MASK);
+ I40E_WRITE_REG(hw, I40E_QTX_CTL(abs_queue_id), qtx_ctl);
+ I40E_WRITE_FLUSH(hw);
+
+ return I40E_SUCCESS;
+}
+
+static int
+i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ struct i40e_vsi *vsi = vf->vsi;
+ struct i40e_virtchnl_vsi_queue_config_info *vc_vqci =
+ (struct i40e_virtchnl_vsi_queue_config_info *)msg;
+ struct i40e_virtchnl_queue_pair_info *vc_qpi;
+ int i, ret = I40E_SUCCESS;
+
+ if (!msg || vc_vqci->num_queue_pairs > vsi->nb_qps ||
+ vc_vqci->num_queue_pairs > I40E_MAX_VSI_QP ||
+ msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci,
+ vc_vqci->num_queue_pairs)) {
+ PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong\n");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ vc_qpi = vc_vqci->qpair;
+ for (i = 0; i < vc_vqci->num_queue_pairs; i++) {
+ if (vc_qpi[i].rxq.queue_id > vsi->nb_qps - 1 ||
+ vc_qpi[i].txq.queue_id > vsi->nb_qps - 1) {
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ /*
+ * Apply VF RX queue setting to HMC.
+ * If the opcode is I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
+ * then the extra information of
+ * 'struct i40e_virtchnl_queue_pair_extra_info' is needed,
+ * otherwise set the last parameter to NULL.
+ */
+ if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpi[i].rxq,
+ I40E_CFG_CRCSTRIP_DEFAULT) != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Configure RX queue HMC failed");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ /* Apply VF TX queue setting to HMC */
+ if (i40e_pf_host_hmc_config_txq(hw, vf,
+ &vc_qpi[i].txq) != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Configure TX queue HMC failed");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+ }
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ struct i40e_vsi *vsi = vf->vsi;
+ struct i40e_virtchnl_vsi_queue_config_ext_info *vc_vqcei =
+ (struct i40e_virtchnl_vsi_queue_config_ext_info *)msg;
+ struct i40e_virtchnl_queue_pair_ext_info *vc_qpei;
+ int i, ret = I40E_SUCCESS;
+
+ if (!msg || vc_vqcei->num_queue_pairs > vsi->nb_qps ||
+ vc_vqcei->num_queue_pairs > I40E_MAX_VSI_QP ||
+ msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei,
+ vc_vqcei->num_queue_pairs)) {
+ PMD_DRV_LOG(ERR, "vsi_queue_config_ext_info argument wrong\n");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ vc_qpei = vc_vqcei->qpair;
+ for (i = 0; i < vc_vqcei->num_queue_pairs; i++) {
+ if (vc_qpei[i].rxq.queue_id > vsi->nb_qps - 1 ||
+ vc_qpei[i].txq.queue_id > vsi->nb_qps - 1) {
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+ /*
+ * Apply VF RX queue setting to HMC.
+ * If the opcode is I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
+ * then the extra information of
+ * 'struct i40e_virtchnl_queue_pair_ext_info' is needed,
+ * otherwise set the last parameter to NULL.
+ */
+ if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpei[i].rxq,
+ vc_qpei[i].rxq_ext.crcstrip) != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Configure RX queue HMC failed");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ /* Apply VF TX queue setting to HMC */
+ if (i40e_pf_host_hmc_config_txq(hw, vf, &vc_qpei[i].txq) !=
+ I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Configure TX queue HMC failed");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+ }
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
+ uint8_t *msg, uint16_t msglen)
+{
+ int ret = I40E_SUCCESS;
+ struct i40e_virtchnl_irq_map_info *irqmap =
+ (struct i40e_virtchnl_irq_map_info *)msg;
+
+ if (msg == NULL || msglen < sizeof(struct i40e_virtchnl_irq_map_info)) {
+ PMD_DRV_LOG(ERR, "buffer too short");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ /* Assume VF only have 1 vector to bind all queues */
+ if (irqmap->num_vectors != 1) {
+ PMD_DRV_LOG(ERR, "DKDK host only support 1 vector");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ if (irqmap->vecmap[0].vector_id == 0) {
+ PMD_DRV_LOG(ERR, "DPDK host don't support use IRQ0");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+ /* This MSIX intr store the intr in VF range */
+ vf->vsi->msix_intr = irqmap->vecmap[0].vector_id;
+
+ /* Don't care how the TX/RX queue mapping with this vector.
+ * Link all VF RX queues together. Only did mapping work.
+ * VF can disable/enable the intr by itself.
+ */
+ i40e_vsi_queues_bind_intr(vf->vsi);
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_switch_queues(struct i40e_pf_vf *vf,
+ struct i40e_virtchnl_queue_select *qsel,
+ bool on)
+{
+ int ret = I40E_SUCCESS;
+ int i;
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ uint16_t baseq = vf->vsi->base_queue;
+
+ if (qsel->rx_queues + qsel->tx_queues == 0)
+ return I40E_ERR_PARAM;
+
+ /* always enable RX first and disable last */
+ /* Enable RX if it's enable */
+ if (on) {
+ for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
+ if (qsel->rx_queues & (1 << i)) {
+ ret = i40e_switch_rx_queue(hw, baseq + i, on);
+ if (ret != I40E_SUCCESS)
+ return ret;
+ }
+ }
+
+ /* Enable/Disable TX */
+ for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
+ if (qsel->tx_queues & (1 << i)) {
+ ret = i40e_switch_tx_queue(hw, baseq + i, on);
+ if (ret != I40E_SUCCESS)
+ return ret;
+ }
+
+ /* disable RX last if it's disable */
+ if (!on) {
+ /* disable RX */
+ for (i = 0; i < I40E_MAX_QP_NUM_PER_VF; i++)
+ if (qsel->rx_queues & (1 << i)) {
+ ret = i40e_switch_rx_queue(hw, baseq + i, on);
+ if (ret != I40E_SUCCESS)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_enable_queues(struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen)
+{
+ int ret = I40E_SUCCESS;
+ struct i40e_virtchnl_queue_select *q_sel =
+ (struct i40e_virtchnl_queue_select *)msg;
+
+ if (msg == NULL || msglen != sizeof(*q_sel)) {
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+ ret = i40e_pf_host_switch_queues(vf, q_sel, true);
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_disable_queues(struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen)
+{
+ int ret = I40E_SUCCESS;
+ struct i40e_virtchnl_queue_select *q_sel =
+ (struct i40e_virtchnl_queue_select *)msg;
+
+ if (msg == NULL || msglen != sizeof(*q_sel)) {
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+ ret = i40e_pf_host_switch_queues(vf, q_sel, false);
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+
+static int
+i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen)
+{
+ int ret = I40E_SUCCESS;
+ struct i40e_virtchnl_ether_addr_list *addr_list =
+ (struct i40e_virtchnl_ether_addr_list *)msg;
+ struct i40e_mac_filter_info filter;
+ int i;
+ struct ether_addr *mac;
+
+ memset(&filter, 0 , sizeof(struct i40e_mac_filter_info));
+
+ if (msg == NULL || msglen <= sizeof(*addr_list)) {
+ PMD_DRV_LOG(ERR, "add_ether_address argument too short");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ for (i = 0; i < addr_list->num_elements; i++) {
+ mac = (struct ether_addr *)(addr_list->list[i].addr);
+ (void)rte_memcpy(&filter.mac_addr, mac, ETHER_ADDR_LEN);
+ filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+ if(!is_valid_assigned_ether_addr(mac) ||
+ i40e_vsi_add_mac(vf->vsi, &filter)) {
+ ret = I40E_ERR_INVALID_MAC_ADDR;
+ goto send_msg;
+ }
+ }
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen)
+{
+ int ret = I40E_SUCCESS;
+ struct i40e_virtchnl_ether_addr_list *addr_list =
+ (struct i40e_virtchnl_ether_addr_list *)msg;
+ int i;
+ struct ether_addr *mac;
+
+ if (msg == NULL || msglen <= sizeof(*addr_list)) {
+ PMD_DRV_LOG(ERR, "delete_ether_address argument too short");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ for (i = 0; i < addr_list->num_elements; i++) {
+ mac = (struct ether_addr *)(addr_list->list[i].addr);
+ if(!is_valid_assigned_ether_addr(mac) ||
+ i40e_vsi_delete_mac(vf->vsi, mac)) {
+ ret = I40E_ERR_INVALID_MAC_ADDR;
+ goto send_msg;
+ }
+ }
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf,
+ uint8_t *msg, uint16_t msglen)
+{
+ int ret = I40E_SUCCESS;
+ struct i40e_virtchnl_vlan_filter_list *vlan_filter_list =
+ (struct i40e_virtchnl_vlan_filter_list *)msg;
+ int i;
+ uint16_t *vid;
+
+ if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
+ PMD_DRV_LOG(ERR, "add_vlan argument too short");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ vid = vlan_filter_list->vlan_id;
+
+ for (i = 0; i < vlan_filter_list->num_elements; i++) {
+ ret = i40e_vsi_add_vlan(vf->vsi, vid[i]);
+ if(ret != I40E_SUCCESS)
+ goto send_msg;
+ }
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen)
+{
+ int ret = I40E_SUCCESS;
+ struct i40e_virtchnl_vlan_filter_list *vlan_filter_list =
+ (struct i40e_virtchnl_vlan_filter_list *)msg;
+ int i;
+ uint16_t *vid;
+
+ if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
+ PMD_DRV_LOG(ERR, "delete_vlan argument too short");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ vid = vlan_filter_list->vlan_id;
+ for (i = 0; i < vlan_filter_list->num_elements; i++) {
+ ret = i40e_vsi_delete_vlan(vf->vsi, vid[i]);
+ if(ret != I40E_SUCCESS)
+ goto send_msg;
+ }
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_config_promisc_mode(
+ struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen)
+{
+ int ret = I40E_SUCCESS;
+ struct i40e_virtchnl_promisc_info *promisc =
+ (struct i40e_virtchnl_promisc_info *)msg;
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ bool unicast = FALSE, multicast = FALSE;
+
+ if (msg == NULL || msglen != sizeof(*promisc)) {
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ if (promisc->flags & I40E_FLAG_VF_UNICAST_PROMISC)
+ unicast = TRUE;
+ ret = i40e_aq_set_vsi_unicast_promiscuous(hw,
+ vf->vsi->seid, unicast, NULL);
+ if (ret != I40E_SUCCESS)
+ goto send_msg;
+
+ if (promisc->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
+ multicast = TRUE;
+ ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vf->vsi->seid,
+ multicast, NULL);
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_get_stats(struct i40e_pf_vf *vf)
+{
+ i40e_update_vsi_stats(vf->vsi);
+
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS,
+ I40E_SUCCESS, (uint8_t *)&vf->vsi->eth_stats,
+ sizeof(vf->vsi->eth_stats));
+
+ return I40E_SUCCESS;
+}
+
+static void
+i40e_pf_host_process_cmd_get_link_status(struct i40e_pf_vf *vf)
+{
+ struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vf->pf->main_vsi);
+
+ /* Update link status first to acquire latest link change */
+ i40e_dev_link_update(dev, 1);
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_LINK_STAT,
+ I40E_SUCCESS, (uint8_t *)&dev->data->dev_link,
+ sizeof(struct rte_eth_link));
+}
+
+static int
+i40e_pf_host_process_cmd_cfg_vlan_offload(
+ struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen)
+{
+ int ret = I40E_SUCCESS;
+ struct i40e_virtchnl_vlan_offload_info *offload =
+ (struct i40e_virtchnl_vlan_offload_info *)msg;
+
+ if (msg == NULL || msglen != sizeof(*offload)) {
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ ret = i40e_vsi_config_vlan_stripping(vf->vsi,
+ !!offload->enable_vlan_strip);
+ if (ret != 0)
+ PMD_DRV_LOG(ERR, "Failed to configure vlan stripping");
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_cfg_pvid(struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen)
+{
+ int ret = I40E_SUCCESS;
+ struct i40e_virtchnl_pvid_info *tpid_info =
+ (struct i40e_virtchnl_pvid_info *)msg;
+
+ if (msg == NULL || msglen != sizeof(*tpid_info)) {
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ ret = i40e_vsi_vlan_pvid_set(vf->vsi, &tpid_info->info);
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CFG_VLAN_PVID,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+void
+i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
+ uint16_t abs_vf_id, uint32_t opcode,
+ __rte_unused uint32_t retval,
+ uint8_t *msg,
+ uint16_t msglen)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf_vf *vf;
+ /* AdminQ will pass absolute VF id, transfer to internal vf id */
+ uint16_t vf_id = abs_vf_id - hw->func_caps.vf_base_id;
+
+ if (!dev || vf_id > pf->vf_num - 1 || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "invalid argument");
+ return;
+ }
+
+ vf = &pf->vfs[vf_id];
+ if (!vf->vsi) {
+ PMD_DRV_LOG(ERR, "NO VSI associated with VF found");
+ i40e_pf_host_send_msg_to_vf(vf, opcode,
+ I40E_ERR_NO_AVAILABLE_VSI, NULL, 0);
+ return;
+ }
+
+ switch (opcode) {
+ case I40E_VIRTCHNL_OP_VERSION :
+ PMD_DRV_LOG(INFO, "OP_VERSION received");
+ i40e_pf_host_process_cmd_version(vf);
+ break;
+ case I40E_VIRTCHNL_OP_RESET_VF :
+ PMD_DRV_LOG(INFO, "OP_RESET_VF received");
+ i40e_pf_host_process_cmd_reset_vf(vf);
+ break;
+ case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ PMD_DRV_LOG(INFO, "OP_GET_VF_RESOURCES received");
+ i40e_pf_host_process_cmd_get_vf_resource(vf);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES received");
+ i40e_pf_host_process_cmd_config_vsi_queues(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT:
+ PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES_EXT received");
+ i40e_pf_host_process_cmd_config_vsi_queues_ext(vf, msg,
+ msglen);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ PMD_DRV_LOG(INFO, "OP_CONFIG_IRQ_MAP received");
+ i40e_pf_host_process_cmd_config_irq_map(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ PMD_DRV_LOG(INFO, "OP_ENABLE_QUEUES received");
+ i40e_pf_host_process_cmd_enable_queues(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ PMD_DRV_LOG(INFO, "OP_DISABLE_QUEUE received");
+ i40e_pf_host_process_cmd_disable_queues(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ PMD_DRV_LOG(INFO, "OP_ADD_ETHER_ADDRESS received");
+ i40e_pf_host_process_cmd_add_ether_address(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ PMD_DRV_LOG(INFO, "OP_DEL_ETHER_ADDRESS received");
+ i40e_pf_host_process_cmd_del_ether_address(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_ADD_VLAN:
+ PMD_DRV_LOG(INFO, "OP_ADD_VLAN received");
+ i40e_pf_host_process_cmd_add_vlan(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_DEL_VLAN:
+ PMD_DRV_LOG(INFO, "OP_DEL_VLAN received");
+ i40e_pf_host_process_cmd_del_vlan(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ PMD_DRV_LOG(INFO, "OP_CONFIG_PROMISCUOUS_MODE received");
+ i40e_pf_host_process_cmd_config_promisc_mode(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_GET_STATS:
+ PMD_DRV_LOG(INFO, "OP_GET_STATS received");
+ i40e_pf_host_process_cmd_get_stats(vf);
+ break;
+ case I40E_VIRTCHNL_OP_GET_LINK_STAT:
+ PMD_DRV_LOG(INFO, "OP_GET_LINK_STAT received");
+ i40e_pf_host_process_cmd_get_link_status(vf);
+ break;
+ case I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD:
+ PMD_DRV_LOG(INFO, "OP_CFG_VLAN_OFFLOAD received");
+ i40e_pf_host_process_cmd_cfg_vlan_offload(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_CFG_VLAN_PVID:
+ PMD_DRV_LOG(INFO, "OP_CFG_VLAN_PVID received");
+ i40e_pf_host_process_cmd_cfg_pvid(vf, msg, msglen);
+ break;
+ /* Don't add command supported below, which will
+ * return an error code.
+ */
+ case I40E_VIRTCHNL_OP_FCOE:
+ PMD_DRV_LOG(ERR, "OP_FCOE received, not supported");
+ default:
+ PMD_DRV_LOG(ERR, "%u received, not supported", opcode);
+ i40e_pf_host_send_msg_to_vf(vf, opcode, I40E_ERR_PARAM,
+ NULL, 0);
+ break;
+ }
+}
+
+int
+i40e_pf_host_init(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ int ret, i;
+ uint32_t val;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /**
+ * return if SRIOV not enabled, VF number not configured or
+ * no queue assigned.
+ */
+ if(!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || pf->vf_nb_qps == 0)
+ return I40E_SUCCESS;
+
+ /* Allocate memory to store VF structure */
+ pf->vfs = rte_zmalloc("i40e_pf_vf",sizeof(*pf->vfs) * pf->vf_num, 0);
+ if(pf->vfs == NULL)
+ return -ENOMEM;
+
+ /* Disable irq0 for VFR event */
+ i40e_pf_disable_irq0(hw);
+
+ /* Disable VF link status interrupt */
+ val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM);
+ val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
+ I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
+ I40E_WRITE_FLUSH(hw);
+
+ for (i = 0; i < pf->vf_num; i++) {
+ pf->vfs[i].pf = pf;
+ pf->vfs[i].state = I40E_VF_INACTIVE;
+ pf->vfs[i].vf_idx = i;
+ ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
+ if (ret != I40E_SUCCESS)
+ goto fail;
+ }
+
+ /* restore irq0 */
+ i40e_pf_enable_irq0(hw);
+
+ return I40E_SUCCESS;
+
+fail:
+ rte_free(pf->vfs);
+ i40e_pf_enable_irq0(hw);
+
+ return ret;
+}
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e_pf.h b/src/dpdk_lib18/librte_pmd_i40e/i40e_pf.h
new file mode 100755
index 00000000..8bf83c21
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_i40e/i40e_pf.h
@@ -0,0 +1,127 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _I40E_PF_H_
+#define _I40E_PF_H_
+
+/* VERSION info to exchange between VF and PF host. In case VF works with
+ * ND kernel driver, it reads I40E_VIRTCHNL_VERSION_MAJOR/MINOR. In
+ * case works with DPDK host, it reads version below. Then VF realize who it
+ * is talking to and use proper language to communicate.
+ * */
+#define I40E_DPDK_SIGNATURE ('D' << 24 | 'P' << 16 | 'D' << 8 | 'K')
+#define I40E_DPDK_VERSION_MAJOR I40E_DPDK_SIGNATURE
+#define I40E_DPDK_VERSION_MINOR 0
+
+/* Default setting on number of VSIs that VF can contain */
+#define I40E_DEFAULT_VF_VSI_NUM 1
+
+#define I40E_DPDK_OFFSET 0x100
+
+enum i40e_pf_vfr_state {
+ I40E_PF_VFR_INPROGRESS = 0,
+ I40E_PF_VFR_COMPLETED = 1,
+};
+
+/* DPDK pf driver specific command to VF */
+enum i40e_virtchnl_ops_dpdk {
+ /*
+ * Keep some gap between Linux PF commands and
+ * DPDK PF extended commands.
+ */
+ I40E_VIRTCHNL_OP_GET_LINK_STAT = I40E_VIRTCHNL_OP_VERSION +
+ I40E_DPDK_OFFSET,
+ I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD,
+ I40E_VIRTCHNL_OP_CFG_VLAN_PVID,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
+};
+
+/* A structure to support extended info of a receive queue. */
+struct i40e_virtchnl_rxq_ext_info {
+ uint8_t crcstrip;
+};
+
+/*
+ * A structure to support extended info of queue pairs, an additional field
+ * is added, comparing to original 'struct i40e_virtchnl_queue_pair_info'.
+ */
+struct i40e_virtchnl_queue_pair_ext_info {
+ /* vsi_id and queue_id should be identical for both rx and tx queues.*/
+ struct i40e_virtchnl_txq_info txq;
+ struct i40e_virtchnl_rxq_info rxq;
+ struct i40e_virtchnl_rxq_ext_info rxq_ext;
+};
+
+/*
+ * A structure to support extended info of VSI queue pairs,
+ * 'struct i40e_virtchnl_queue_pair_ext_info' is used, see its original
+ * of 'struct i40e_virtchnl_queue_pair_info'.
+ */
+struct i40e_virtchnl_vsi_queue_config_ext_info {
+ uint16_t vsi_id;
+ uint16_t num_queue_pairs;
+ struct i40e_virtchnl_queue_pair_ext_info qpair[0];
+};
+
+struct i40e_virtchnl_vlan_offload_info {
+ uint16_t vsi_id;
+ uint8_t enable_vlan_strip;
+ uint8_t reserved;
+};
+
+/*
+ * Macro to calculate the memory size for configuring VSI queues
+ * via virtual channel.
+ */
+#define I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(x, n) \
+ (sizeof(*(x)) + sizeof((x)->qpair[0]) * (n))
+
+/*
+ * I40E_VIRTCHNL_OP_CFG_VLAN_PVID
+ * VF sends this message to enable/disable pvid. If it's
+ * enable op, needs to specify the pvid. PF returns status
+ * code in retval.
+ */
+struct i40e_virtchnl_pvid_info {
+ uint16_t vsi_id;
+ struct i40e_vsi_vlan_pvid_info info;
+};
+
+int i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset);
+void i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
+ uint16_t abs_vf_id, uint32_t opcode,
+ __rte_unused uint32_t retval,
+ uint8_t *msg, uint16_t msglen);
+int i40e_pf_host_init(struct rte_eth_dev *dev);
+
+#endif /* _I40E_PF_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e_rxtx.c b/src/dpdk_lib18/librte_pmd_i40e/i40e_rxtx.c
new file mode 100755
index 00000000..2beae3c8
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_i40e/i40e_rxtx.c
@@ -0,0 +1,2650 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <sys/queue.h>
+
+#include <rte_string_fns.h>
+#include <rte_memzone.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_udp.h>
+
+#include "i40e_logs.h"
+#include "i40e/i40e_prototype.h"
+#include "i40e/i40e_type.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+
+#define I40E_MIN_RING_DESC 64
+#define I40E_MAX_RING_DESC 4096
+#define I40E_ALIGN 128
+#define DEFAULT_TX_RS_THRESH 32
+#define DEFAULT_TX_FREE_THRESH 32
+#define I40E_MAX_PKT_TYPE 256
+
+#define I40E_VLAN_TAG_SIZE 4
+#define I40E_TX_MAX_BURST 32
+
+#define I40E_DMA_MEM_ALIGN 4096
+
+#define I40E_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
+ ETH_TXQ_FLAGS_NOOFFLOADS)
+
+#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+
+#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
+ (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
+
+#define RTE_MBUF_DATA_DMA_ADDR(mb) \
+ ((uint64_t)((mb)->buf_physaddr + (mb)->data_off))
+
+static const struct rte_memzone *
+i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev,
+ const char *ring_name,
+ uint16_t queue_id,
+ uint32_t ring_size,
+ int socket_id);
+static uint16_t i40e_xmit_pkts_simple(void *tx_queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+/* Translate the rx descriptor status to pkt flags */
+static inline uint64_t
+i40e_rxd_status_to_pkt_flags(uint64_t qword)
+{
+ uint64_t flags;
+
+ /* Check if VLAN packet */
+ flags = qword & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
+ PKT_RX_VLAN_PKT : 0;
+
+ /* Check if RSS_HASH */
+ flags |= (((qword >> I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) &
+ I40E_RX_DESC_FLTSTAT_RSS_HASH) ==
+ I40E_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
+
+ /* Check if FDIR Match */
+ flags |= (qword & (1 << I40E_RX_DESC_STATUS_FLM_SHIFT) ?
+ PKT_RX_FDIR : 0);
+
+ return flags;
+}
+
+static inline uint64_t
+i40e_rxd_error_to_pkt_flags(uint64_t qword)
+{
+ uint64_t flags = 0;
+ uint64_t error_bits = (qword >> I40E_RXD_QW1_ERROR_SHIFT);
+
+#define I40E_RX_ERR_BITS 0x3f
+ if (likely((error_bits & I40E_RX_ERR_BITS) == 0))
+ return flags;
+ /* If RXE bit set, all other status bits are meaningless */
+ if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ flags |= PKT_RX_MAC_ERR;
+ return flags;
+ }
+
+ /* If RECIPE bit set, all other status indications should be ignored */
+ if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_RECIPE_SHIFT))) {
+ flags |= PKT_RX_RECIP_ERR;
+ return flags;
+ }
+ if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT)))
+ flags |= PKT_RX_HBUF_OVERFLOW;
+ if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_IPE_SHIFT)))
+ flags |= PKT_RX_IP_CKSUM_BAD;
+ if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT)))
+ flags |= PKT_RX_L4_CKSUM_BAD;
+ if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT)))
+ flags |= PKT_RX_EIP_CKSUM_BAD;
+ if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_OVERSIZE_SHIFT)))
+ flags |= PKT_RX_OVERSIZE;
+
+ return flags;
+}
+
+/* Translate pkt types to pkt flags */
+static inline uint64_t
+i40e_rxd_ptype_to_pkt_flags(uint64_t qword)
+{
+ uint8_t ptype = (uint8_t)((qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT);
+ static const uint64_t ip_ptype_map[I40E_MAX_PKT_TYPE] = {
+ 0, /* PTYPE 0 */
+ 0, /* PTYPE 1 */
+ 0, /* PTYPE 2 */
+ 0, /* PTYPE 3 */
+ 0, /* PTYPE 4 */
+ 0, /* PTYPE 5 */
+ 0, /* PTYPE 6 */
+ 0, /* PTYPE 7 */
+ 0, /* PTYPE 8 */
+ 0, /* PTYPE 9 */
+ 0, /* PTYPE 10 */
+ 0, /* PTYPE 11 */
+ 0, /* PTYPE 12 */
+ 0, /* PTYPE 13 */
+ 0, /* PTYPE 14 */
+ 0, /* PTYPE 15 */
+ 0, /* PTYPE 16 */
+ 0, /* PTYPE 17 */
+ 0, /* PTYPE 18 */
+ 0, /* PTYPE 19 */
+ 0, /* PTYPE 20 */
+ 0, /* PTYPE 21 */
+ PKT_RX_IPV4_HDR, /* PTYPE 22 */
+ PKT_RX_IPV4_HDR, /* PTYPE 23 */
+ PKT_RX_IPV4_HDR, /* PTYPE 24 */
+ 0, /* PTYPE 25 */
+ PKT_RX_IPV4_HDR, /* PTYPE 26 */
+ PKT_RX_IPV4_HDR, /* PTYPE 27 */
+ PKT_RX_IPV4_HDR, /* PTYPE 28 */
+ PKT_RX_IPV4_HDR_EXT, /* PTYPE 29 */
+ PKT_RX_IPV4_HDR_EXT, /* PTYPE 30 */
+ PKT_RX_IPV4_HDR_EXT, /* PTYPE 31 */
+ 0, /* PTYPE 32 */
+ PKT_RX_IPV4_HDR_EXT, /* PTYPE 33 */
+ PKT_RX_IPV4_HDR_EXT, /* PTYPE 34 */
+ PKT_RX_IPV4_HDR_EXT, /* PTYPE 35 */
+ PKT_RX_IPV4_HDR_EXT, /* PTYPE 36 */
+ PKT_RX_IPV4_HDR_EXT, /* PTYPE 37 */
+ PKT_RX_IPV4_HDR_EXT, /* PTYPE 38 */
+ 0, /* PTYPE 39 */
+ PKT_RX_IPV4_HDR_EXT, /* PTYPE 40 */
+ PKT_RX_IPV4_HDR_EXT, /* PTYPE 41 */
+ PKT_RX_IPV4_HDR_EXT, /* PTYPE 42 */
+ PKT_RX_IPV4_HDR_EXT, /* PTYPE 43 */
+ PKT_RX_IPV4_HDR_EXT, /* PTYPE 44 */
+ PKT_RX_IPV4_HDR_EXT, /* PTYPE 45 */
+ PKT_RX_IPV4_HDR_EXT, /* PTYPE 46 */
+ 0, /* PTYPE 47 */
+ PKT_RX_IPV4_HDR_EXT, /* PTYPE 48 */
+ PKT_RX_IPV4_HDR_EXT, /* PTYPE 49 */
+ PKT_RX_IPV4_HDR_EXT, /* PTYPE 50 */
+ PKT_RX_IPV4_HDR_EXT, /* PTYPE 51 */
+ PKT_RX_IPV4_HDR_EXT, /* PTYPE 52 */
+ PKT_RX_IPV4_HDR_EXT, /* PTYPE 53 */
+ 0, /* PTYPE 54 */
+ PKT_RX_IPV4_HDR_EXT, /* PTYPE 55 */
+ PKT_RX_IPV4_HDR_EXT, /* PTYPE 56 */
+ PKT_RX_IPV4_HDR_EXT, /* PTYPE 57 */
+ PKT_RX_IPV4_HDR_EXT, /* PTYPE 58 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 59 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 60 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 61 */
+ 0, /* PTYPE 62 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 63 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 64 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 65 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 66 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 67 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 68 */
+ 0, /* PTYPE 69 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 70 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 71 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 72 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 73 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 74 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 75 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 76 */
+ 0, /* PTYPE 77 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 78 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 79 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 80 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 81 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 82 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 83 */
+ 0, /* PTYPE 84 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 85 */
+ PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 86 */
+ PKT_RX_IPV4_HDR_EXT, /* PTYPE 87 */
+ PKT_RX_IPV6_HDR, /* PTYPE 88 */
+ PKT_RX_IPV6_HDR, /* PTYPE 89 */
+ PKT_RX_IPV6_HDR, /* PTYPE 90 */
+ 0, /* PTYPE 91 */
+ PKT_RX_IPV6_HDR, /* PTYPE 92 */
+ PKT_RX_IPV6_HDR, /* PTYPE 93 */
+ PKT_RX_IPV6_HDR, /* PTYPE 94 */
+ PKT_RX_IPV6_HDR_EXT, /* PTYPE 95 */
+ PKT_RX_IPV6_HDR_EXT, /* PTYPE 96 */
+ PKT_RX_IPV6_HDR_EXT, /* PTYPE 97 */
+ 0, /* PTYPE 98 */
+ PKT_RX_IPV6_HDR_EXT, /* PTYPE 99 */
+ PKT_RX_IPV6_HDR_EXT, /* PTYPE 100 */
+ PKT_RX_IPV6_HDR_EXT, /* PTYPE 101 */
+ PKT_RX_IPV6_HDR_EXT, /* PTYPE 102 */
+ PKT_RX_IPV6_HDR_EXT, /* PTYPE 103 */
+ PKT_RX_IPV6_HDR_EXT, /* PTYPE 104 */
+ 0, /* PTYPE 105 */
+ PKT_RX_IPV6_HDR_EXT, /* PTYPE 106 */
+ PKT_RX_IPV6_HDR_EXT, /* PTYPE 107 */
+ PKT_RX_IPV6_HDR_EXT, /* PTYPE 108 */
+ PKT_RX_IPV6_HDR_EXT, /* PTYPE 109 */
+ PKT_RX_IPV6_HDR_EXT, /* PTYPE 110 */
+ PKT_RX_IPV6_HDR_EXT, /* PTYPE 111 */
+ PKT_RX_IPV6_HDR_EXT, /* PTYPE 112 */
+ 0, /* PTYPE 113 */
+ PKT_RX_IPV6_HDR_EXT, /* PTYPE 114 */
+ PKT_RX_IPV6_HDR_EXT, /* PTYPE 115 */
+ PKT_RX_IPV6_HDR_EXT, /* PTYPE 116 */
+ PKT_RX_IPV6_HDR_EXT, /* PTYPE 117 */
+ PKT_RX_IPV6_HDR_EXT, /* PTYPE 118 */
+ PKT_RX_IPV6_HDR_EXT, /* PTYPE 119 */
+ 0, /* PTYPE 120 */
+ PKT_RX_IPV6_HDR_EXT, /* PTYPE 121 */
+ PKT_RX_IPV6_HDR_EXT, /* PTYPE 122 */
+ PKT_RX_IPV6_HDR_EXT, /* PTYPE 123 */
+ PKT_RX_IPV6_HDR_EXT, /* PTYPE 124 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 125 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 126 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 127 */
+ 0, /* PTYPE 128 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 129 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 130 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 131 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 132 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 133 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 134 */
+ 0, /* PTYPE 135 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 136 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 137 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 138 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 139 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 140 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 141 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 142 */
+ 0, /* PTYPE 143 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 144 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 145 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 146 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 147 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 148 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 149 */
+ 0, /* PTYPE 150 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 151 */
+ PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 152 */
+ PKT_RX_IPV6_HDR_EXT, /* PTYPE 153 */
+ 0, /* PTYPE 154 */
+ 0, /* PTYPE 155 */
+ 0, /* PTYPE 156 */
+ 0, /* PTYPE 157 */
+ 0, /* PTYPE 158 */
+ 0, /* PTYPE 159 */
+ 0, /* PTYPE 160 */
+ 0, /* PTYPE 161 */
+ 0, /* PTYPE 162 */
+ 0, /* PTYPE 163 */
+ 0, /* PTYPE 164 */
+ 0, /* PTYPE 165 */
+ 0, /* PTYPE 166 */
+ 0, /* PTYPE 167 */
+ 0, /* PTYPE 168 */
+ 0, /* PTYPE 169 */
+ 0, /* PTYPE 170 */
+ 0, /* PTYPE 171 */
+ 0, /* PTYPE 172 */
+ 0, /* PTYPE 173 */
+ 0, /* PTYPE 174 */
+ 0, /* PTYPE 175 */
+ 0, /* PTYPE 176 */
+ 0, /* PTYPE 177 */
+ 0, /* PTYPE 178 */
+ 0, /* PTYPE 179 */
+ 0, /* PTYPE 180 */
+ 0, /* PTYPE 181 */
+ 0, /* PTYPE 182 */
+ 0, /* PTYPE 183 */
+ 0, /* PTYPE 184 */
+ 0, /* PTYPE 185 */
+ 0, /* PTYPE 186 */
+ 0, /* PTYPE 187 */
+ 0, /* PTYPE 188 */
+ 0, /* PTYPE 189 */
+ 0, /* PTYPE 190 */
+ 0, /* PTYPE 191 */
+ 0, /* PTYPE 192 */
+ 0, /* PTYPE 193 */
+ 0, /* PTYPE 194 */
+ 0, /* PTYPE 195 */
+ 0, /* PTYPE 196 */
+ 0, /* PTYPE 197 */
+ 0, /* PTYPE 198 */
+ 0, /* PTYPE 199 */
+ 0, /* PTYPE 200 */
+ 0, /* PTYPE 201 */
+ 0, /* PTYPE 202 */
+ 0, /* PTYPE 203 */
+ 0, /* PTYPE 204 */
+ 0, /* PTYPE 205 */
+ 0, /* PTYPE 206 */
+ 0, /* PTYPE 207 */
+ 0, /* PTYPE 208 */
+ 0, /* PTYPE 209 */
+ 0, /* PTYPE 210 */
+ 0, /* PTYPE 211 */
+ 0, /* PTYPE 212 */
+ 0, /* PTYPE 213 */
+ 0, /* PTYPE 214 */
+ 0, /* PTYPE 215 */
+ 0, /* PTYPE 216 */
+ 0, /* PTYPE 217 */
+ 0, /* PTYPE 218 */
+ 0, /* PTYPE 219 */
+ 0, /* PTYPE 220 */
+ 0, /* PTYPE 221 */
+ 0, /* PTYPE 222 */
+ 0, /* PTYPE 223 */
+ 0, /* PTYPE 224 */
+ 0, /* PTYPE 225 */
+ 0, /* PTYPE 226 */
+ 0, /* PTYPE 227 */
+ 0, /* PTYPE 228 */
+ 0, /* PTYPE 229 */
+ 0, /* PTYPE 230 */
+ 0, /* PTYPE 231 */
+ 0, /* PTYPE 232 */
+ 0, /* PTYPE 233 */
+ 0, /* PTYPE 234 */
+ 0, /* PTYPE 235 */
+ 0, /* PTYPE 236 */
+ 0, /* PTYPE 237 */
+ 0, /* PTYPE 238 */
+ 0, /* PTYPE 239 */
+ 0, /* PTYPE 240 */
+ 0, /* PTYPE 241 */
+ 0, /* PTYPE 242 */
+ 0, /* PTYPE 243 */
+ 0, /* PTYPE 244 */
+ 0, /* PTYPE 245 */
+ 0, /* PTYPE 246 */
+ 0, /* PTYPE 247 */
+ 0, /* PTYPE 248 */
+ 0, /* PTYPE 249 */
+ 0, /* PTYPE 250 */
+ 0, /* PTYPE 251 */
+ 0, /* PTYPE 252 */
+ 0, /* PTYPE 253 */
+ 0, /* PTYPE 254 */
+ 0, /* PTYPE 255 */
+ };
+
+ return ip_ptype_map[ptype];
+}
+
+#define I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK 0x03
+#define I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID 0x01
+#define I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX 0x02
+#define I40E_RX_DESC_EXT_STATUS_FLEXBL_MASK 0x03
+#define I40E_RX_DESC_EXT_STATUS_FLEXBL_FLEX 0x01
+
+static inline uint64_t
+i40e_rxd_build_fdir(volatile union i40e_rx_desc *rxdp, struct rte_mbuf *mb)
+{
+ uint64_t flags = 0;
+#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+ uint16_t flexbh, flexbl;
+
+ flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
+ I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) &
+ I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK;
+ flexbl = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
+ I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT) &
+ I40E_RX_DESC_EXT_STATUS_FLEXBL_MASK;
+
+
+ if (flexbh == I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
+ mb->hash.fdir.hi =
+ rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
+ flags |= PKT_RX_FDIR_ID;
+ } else if (flexbh == I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX) {
+ mb->hash.fdir.hi =
+ rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.flex_bytes_hi);
+ flags |= PKT_RX_FDIR_FLX;
+ }
+ if (flexbl == I40E_RX_DESC_EXT_STATUS_FLEXBL_FLEX) {
+ mb->hash.fdir.lo =
+ rte_le_to_cpu_32(rxdp->wb.qword3.lo_dword.flex_bytes_lo);
+ flags |= PKT_RX_FDIR_FLX;
+ }
+#else
+ mb->hash.fdir.hi =
+ rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
+ flags |= PKT_RX_FDIR_ID;
+#endif
+ return flags;
+}
+static inline void
+i40e_txd_enable_checksum(uint64_t ol_flags,
+ uint32_t *td_cmd,
+ uint32_t *td_offset,
+ uint8_t l2_len,
+ uint16_t l3_len,
+ uint8_t outer_l2_len,
+ uint16_t outer_l3_len,
+ uint32_t *cd_tunneling)
+{
+ if (!l2_len) {
+ PMD_DRV_LOG(DEBUG, "L2 length set to 0");
+ return;
+ }
+
+ if (!l3_len) {
+ PMD_DRV_LOG(DEBUG, "L3 length set to 0");
+ return;
+ }
+
+ /* UDP tunneling packet TX checksum offload */
+ if (unlikely(ol_flags & PKT_TX_UDP_TUNNEL_PKT)) {
+
+ *td_offset |= (outer_l2_len >> 1)
+ << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+ if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
+ else if (ol_flags & PKT_TX_OUTER_IPV4)
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+ else if (ol_flags & PKT_TX_OUTER_IPV6)
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+
+ /* Now set the ctx descriptor fields */
+ *cd_tunneling |= (outer_l3_len >> 2) <<
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
+ I40E_TXD_CTX_UDP_TUNNELING |
+ (l2_len >> 1) <<
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+
+ } else
+ *td_offset |= (l2_len >> 1)
+ << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+ /* Enable L3 checksum offloads */
+ if (ol_flags & PKT_TX_IPV4_CSUM) {
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
+ *td_offset |= (l3_len >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ } else if (ol_flags & PKT_TX_IPV4) {
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
+ *td_offset |= (l3_len >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ } else if (ol_flags & PKT_TX_IPV6) {
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
+ *td_offset |= (l3_len >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ }
+
+ /* Enable L4 checksum offloads */
+ switch (ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_TCP_CKSUM:
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
+ *td_offset |= (sizeof(struct tcp_hdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ case PKT_TX_SCTP_CKSUM:
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
+ *td_offset |= (sizeof(struct sctp_hdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ case PKT_TX_UDP_CKSUM:
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
+ *td_offset |= (sizeof(struct udp_hdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ default:
+ break;
+ }
+}
+
+static inline struct rte_mbuf *
+rte_rxmbuf_alloc(struct rte_mempool *mp)
+{
+ struct rte_mbuf *m;
+
+ m = __rte_mbuf_raw_alloc(mp);
+ __rte_mbuf_sanity_check_raw(m, 0);
+
+ return m;
+}
+
+/* Construct the tx flags */
+static inline uint64_t
+i40e_build_ctob(uint32_t td_cmd,
+ uint32_t td_offset,
+ unsigned int size,
+ uint32_t td_tag)
+{
+ return rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DATA |
+ ((uint64_t)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
+ ((uint64_t)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
+ ((uint64_t)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
+ ((uint64_t)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
+}
+
+static inline int
+i40e_xmit_cleanup(struct i40e_tx_queue *txq)
+{
+ struct i40e_tx_entry *sw_ring = txq->sw_ring;
+ volatile struct i40e_tx_desc *txd = txq->tx_ring;
+ uint16_t last_desc_cleaned = txq->last_desc_cleaned;
+ uint16_t nb_tx_desc = txq->nb_tx_desc;
+ uint16_t desc_to_clean_to;
+ uint16_t nb_tx_to_clean;
+
+ desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
+ if (desc_to_clean_to >= nb_tx_desc)
+ desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
+
+ desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
+ if (!(txd[desc_to_clean_to].cmd_type_offset_bsz &
+ rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))) {
+ PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
+ "(port=%d queue=%d)", desc_to_clean_to,
+ txq->port_id, txq->queue_id);
+ return -1;
+ }
+
+ if (last_desc_cleaned > desc_to_clean_to)
+ nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
+ desc_to_clean_to);
+ else
+ nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
+ last_desc_cleaned);
+
+ txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
+
+ txq->last_desc_cleaned = desc_to_clean_to;
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
+
+ return 0;
+}
+
+static inline int
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+check_rx_burst_bulk_alloc_preconditions(struct i40e_rx_queue *rxq)
+#else
+check_rx_burst_bulk_alloc_preconditions(__rte_unused struct i40e_rx_queue *rxq)
+#endif
+{
+ int ret = 0;
+
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+ if (!(rxq->rx_free_thresh >= RTE_PMD_I40E_RX_MAX_BURST)) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->rx_free_thresh=%d, "
+ "RTE_PMD_I40E_RX_MAX_BURST=%d",
+ rxq->rx_free_thresh, RTE_PMD_I40E_RX_MAX_BURST);
+ ret = -EINVAL;
+ } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->rx_free_thresh=%d, "
+ "rxq->nb_rx_desc=%d",
+ rxq->rx_free_thresh, rxq->nb_rx_desc);
+ ret = -EINVAL;
+ } else if (!(rxq->nb_rx_desc % rxq->rx_free_thresh) == 0) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->nb_rx_desc=%d, "
+ "rxq->rx_free_thresh=%d",
+ rxq->nb_rx_desc, rxq->rx_free_thresh);
+ ret = -EINVAL;
+ } else if (!(rxq->nb_rx_desc < (I40E_MAX_RING_DESC -
+ RTE_PMD_I40E_RX_MAX_BURST))) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->nb_rx_desc=%d, "
+ "I40E_MAX_RING_DESC=%d, "
+ "RTE_PMD_I40E_RX_MAX_BURST=%d",
+ rxq->nb_rx_desc, I40E_MAX_RING_DESC,
+ RTE_PMD_I40E_RX_MAX_BURST);
+ ret = -EINVAL;
+ }
+#else
+ ret = -EINVAL;
+#endif
+
+ return ret;
+}
+
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+#define I40E_LOOK_AHEAD 8
+#if (I40E_LOOK_AHEAD != 8)
+#error "PMD I40E: I40E_LOOK_AHEAD must be 8\n"
+#endif
+static inline int
+i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
+{
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *rxep;
+ struct rte_mbuf *mb;
+ uint16_t pkt_len;
+ uint64_t qword1;
+ uint32_t rx_status;
+ int32_t s[I40E_LOOK_AHEAD], nb_dd;
+ int32_t i, j, nb_rx = 0;
+ uint64_t pkt_flags;
+
+ rxdp = &rxq->rx_ring[rxq->rx_tail];
+ rxep = &rxq->sw_ring[rxq->rx_tail];
+
+ qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
+ rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+
+ /* Make sure there is at least 1 packet to receive */
+ if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ return 0;
+
+ /**
+ * Scan LOOK_AHEAD descriptors at a time to determine which
+ * descriptors reference packets that are ready to be received.
+ */
+ for (i = 0; i < RTE_PMD_I40E_RX_MAX_BURST; i+=I40E_LOOK_AHEAD,
+ rxdp += I40E_LOOK_AHEAD, rxep += I40E_LOOK_AHEAD) {
+ /* Read desc statuses backwards to avoid race condition */
+ for (j = I40E_LOOK_AHEAD - 1; j >= 0; j--) {
+ qword1 = rte_le_to_cpu_64(\
+ rxdp[j].wb.qword1.status_error_len);
+ s[j] = (qword1 & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+ }
+
+ /* Compute how many status bits were set */
+ for (j = 0, nb_dd = 0; j < I40E_LOOK_AHEAD; j++)
+ nb_dd += s[j] & (1 << I40E_RX_DESC_STATUS_DD_SHIFT);
+
+ nb_rx += nb_dd;
+
+ /* Translate descriptor info to mbuf parameters */
+ for (j = 0; j < nb_dd; j++) {
+ mb = rxep[j].mbuf;
+ qword1 = rte_le_to_cpu_64(\
+ rxdp[j].wb.qword1.status_error_len);
+ rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+ pkt_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
+ mb->data_len = pkt_len;
+ mb->pkt_len = pkt_len;
+ mb->vlan_tci = rx_status &
+ (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
+ rte_le_to_cpu_16(\
+ rxdp[j].wb.qword0.lo_dword.l2tag1) : 0;
+ pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
+ pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
+ pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
+
+ mb->packet_type = (uint16_t)((qword1 &
+ I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT);
+ if (pkt_flags & PKT_RX_RSS_HASH)
+ mb->hash.rss = rte_le_to_cpu_32(\
+ rxdp[j].wb.qword0.hi_dword.rss);
+ if (pkt_flags & PKT_RX_FDIR)
+ pkt_flags |= i40e_rxd_build_fdir(&rxdp[j], mb);
+
+ mb->ol_flags = pkt_flags;
+ }
+
+ for (j = 0; j < I40E_LOOK_AHEAD; j++)
+ rxq->rx_stage[i + j] = rxep[j].mbuf;
+
+ if (nb_dd != I40E_LOOK_AHEAD)
+ break;
+ }
+
+ /* Clear software ring entries */
+ for (i = 0; i < nb_rx; i++)
+ rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
+
+ return nb_rx;
+}
+
+static inline uint16_t
+i40e_rx_fill_from_stage(struct i40e_rx_queue *rxq,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t i;
+ struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
+
+ nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
+
+ for (i = 0; i < nb_pkts; i++)
+ rx_pkts[i] = stage[i];
+
+ rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
+ rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
+
+ return nb_pkts;
+}
+
+static inline int
+i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq)
+{
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *rxep;
+ struct rte_mbuf *mb;
+ uint16_t alloc_idx, i;
+ uint64_t dma_addr;
+ int diag;
+
+ /* Allocate buffers in bulk */
+ alloc_idx = (uint16_t)(rxq->rx_free_trigger -
+ (rxq->rx_free_thresh - 1));
+ rxep = &(rxq->sw_ring[alloc_idx]);
+ diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
+ rxq->rx_free_thresh);
+ if (unlikely(diag != 0)) {
+ PMD_DRV_LOG(ERR, "Failed to get mbufs in bulk");
+ return -ENOMEM;
+ }
+
+ rxdp = &rxq->rx_ring[alloc_idx];
+ for (i = 0; i < rxq->rx_free_thresh; i++) {
+ mb = rxep[i].mbuf;
+ rte_mbuf_refcnt_set(mb, 1);
+ mb->next = NULL;
+ mb->data_off = RTE_PKTMBUF_HEADROOM;
+ mb->nb_segs = 1;
+ mb->port = rxq->port_id;
+ dma_addr = rte_cpu_to_le_64(\
+ RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
+ rxdp[i].read.hdr_addr = dma_addr;
+ rxdp[i].read.pkt_addr = dma_addr;
+ }
+
+ /* Update rx tail regsiter */
+ rte_wmb();
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
+
+ rxq->rx_free_trigger =
+ (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
+ if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
+ rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
+
+ return 0;
+}
+
+static inline uint16_t
+rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct i40e_rx_queue *rxq = (struct i40e_rx_queue *)rx_queue;
+ uint16_t nb_rx = 0;
+
+ if (!nb_pkts)
+ return 0;
+
+ if (rxq->rx_nb_avail)
+ return i40e_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+
+ nb_rx = (uint16_t)i40e_rx_scan_hw_ring(rxq);
+ rxq->rx_next_avail = 0;
+ rxq->rx_nb_avail = nb_rx;
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
+
+ if (rxq->rx_tail > rxq->rx_free_trigger) {
+ if (i40e_rx_alloc_bufs(rxq) != 0) {
+ uint16_t i, j;
+
+ PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
+ "port_id=%u, queue_id=%u",
+ rxq->port_id, rxq->queue_id);
+ rxq->rx_nb_avail = 0;
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
+ for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
+ rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
+
+ return 0;
+ }
+ }
+
+ if (rxq->rx_tail >= rxq->nb_rx_desc)
+ rxq->rx_tail = 0;
+
+ if (rxq->rx_nb_avail)
+ return i40e_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+
+ return 0;
+}
+
+static uint16_t
+i40e_recv_pkts_bulk_alloc(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_rx = 0, n, count;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ if (likely(nb_pkts <= RTE_PMD_I40E_RX_MAX_BURST))
+ return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
+
+ while (nb_pkts) {
+ n = RTE_MIN(nb_pkts, RTE_PMD_I40E_RX_MAX_BURST);
+ count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
+ nb_rx = (uint16_t)(nb_rx + count);
+ nb_pkts = (uint16_t)(nb_pkts - count);
+ if (count < n)
+ break;
+ }
+
+ return nb_rx;
+}
+#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
+
+uint16_t
+i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct i40e_rx_queue *rxq;
+ volatile union i40e_rx_desc *rx_ring;
+ volatile union i40e_rx_desc *rxdp;
+ union i40e_rx_desc rxd;
+ struct i40e_rx_entry *sw_ring;
+ struct i40e_rx_entry *rxe;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *nmb;
+ uint16_t nb_rx;
+ uint32_t rx_status;
+ uint64_t qword1;
+ uint16_t rx_packet_len;
+ uint16_t rx_id, nb_hold;
+ uint64_t dma_addr;
+ uint64_t pkt_flags;
+
+ nb_rx = 0;
+ nb_hold = 0;
+ rxq = rx_queue;
+ rx_id = rxq->rx_tail;
+ rx_ring = rxq->rx_ring;
+ sw_ring = rxq->sw_ring;
+
+ while (nb_rx < nb_pkts) {
+ rxdp = &rx_ring[rx_id];
+ qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
+ rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK)
+ >> I40E_RXD_QW1_STATUS_SHIFT;
+ /* Check the DD bit first */
+ if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ break;
+
+ nmb = rte_rxmbuf_alloc(rxq->mp);
+ if (unlikely(!nmb))
+ break;
+ rxd = *rxdp;
+
+ nb_hold++;
+ rxe = &sw_ring[rx_id];
+ rx_id++;
+ if (unlikely(rx_id == rxq->nb_rx_desc))
+ rx_id = 0;
+
+ /* Prefetch next mbuf */
+ rte_prefetch0(sw_ring[rx_id].mbuf);
+
+ /**
+ * When next RX descriptor is on a cache line boundary,
+ * prefetch the next 4 RX descriptors and next 8 pointers
+ * to mbufs.
+ */
+ if ((rx_id & 0x3) == 0) {
+ rte_prefetch0(&rx_ring[rx_id]);
+ rte_prefetch0(&sw_ring[rx_id]);
+ }
+ rxm = rxe->mbuf;
+ rxe->mbuf = nmb;
+ dma_addr =
+ rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+ rxdp->read.hdr_addr = dma_addr;
+ rxdp->read.pkt_addr = dma_addr;
+
+ rx_packet_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
+
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = rx_packet_len;
+ rxm->data_len = rx_packet_len;
+ rxm->port = rxq->port_id;
+
+ rxm->vlan_tci = rx_status &
+ (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
+ rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0;
+ pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
+ pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
+ pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
+ rxm->packet_type = (uint16_t)((qword1 & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT);
+ if (pkt_flags & PKT_RX_RSS_HASH)
+ rxm->hash.rss =
+ rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
+ if (pkt_flags & PKT_RX_FDIR)
+ pkt_flags |= i40e_rxd_build_fdir(&rxd, rxm);
+
+ rxm->ol_flags = pkt_flags;
+
+ rx_pkts[nb_rx++] = rxm;
+ }
+ rxq->rx_tail = rx_id;
+
+ /**
+ * If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the receive tail register of queue.
+ * Update that register with the value of the last processed RX
+ * descriptor minus 1.
+ */
+ nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
+ if (nb_hold > rxq->rx_free_thresh) {
+ rx_id = (uint16_t) ((rx_id == 0) ?
+ (rxq->nb_rx_desc - 1) : (rx_id - 1));
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+ nb_hold = 0;
+ }
+ rxq->nb_rx_hold = nb_hold;
+
+ return nb_rx;
+}
+
+uint16_t
+i40e_recv_scattered_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct i40e_rx_queue *rxq = rx_queue;
+ volatile union i40e_rx_desc *rx_ring = rxq->rx_ring;
+ volatile union i40e_rx_desc *rxdp;
+ union i40e_rx_desc rxd;
+ struct i40e_rx_entry *sw_ring = rxq->sw_ring;
+ struct i40e_rx_entry *rxe;
+ struct rte_mbuf *first_seg = rxq->pkt_first_seg;
+ struct rte_mbuf *last_seg = rxq->pkt_last_seg;
+ struct rte_mbuf *nmb, *rxm;
+ uint16_t rx_id = rxq->rx_tail;
+ uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
+ uint32_t rx_status;
+ uint64_t qword1;
+ uint64_t dma_addr;
+ uint64_t pkt_flags;
+
+ while (nb_rx < nb_pkts) {
+ rxdp = &rx_ring[rx_id];
+ qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
+ rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+ /* Check the DD bit */
+ if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ break;
+
+ nmb = rte_rxmbuf_alloc(rxq->mp);
+ if (unlikely(!nmb))
+ break;
+ rxd = *rxdp;
+ nb_hold++;
+ rxe = &sw_ring[rx_id];
+ rx_id++;
+ if (rx_id == rxq->nb_rx_desc)
+ rx_id = 0;
+
+ /* Prefetch next mbuf */
+ rte_prefetch0(sw_ring[rx_id].mbuf);
+
+ /**
+ * When next RX descriptor is on a cache line boundary,
+ * prefetch the next 4 RX descriptors and next 8 pointers
+ * to mbufs.
+ */
+ if ((rx_id & 0x3) == 0) {
+ rte_prefetch0(&rx_ring[rx_id]);
+ rte_prefetch0(&sw_ring[rx_id]);
+ }
+
+ rxm = rxe->mbuf;
+ rxe->mbuf = nmb;
+ dma_addr =
+ rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+
+ /* Set data buffer address and data length of the mbuf */
+ rxdp->read.hdr_addr = dma_addr;
+ rxdp->read.pkt_addr = dma_addr;
+ rx_packet_len = (qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ rxm->data_len = rx_packet_len;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+
+ /**
+ * If this is the first buffer of the received packet, set the
+ * pointer to the first mbuf of the packet and initialize its
+ * context. Otherwise, update the total length and the number
+ * of segments of the current scattered packet, and update the
+ * pointer to the last mbuf of the current packet.
+ */
+ if (!first_seg) {
+ first_seg = rxm;
+ first_seg->nb_segs = 1;
+ first_seg->pkt_len = rx_packet_len;
+ } else {
+ first_seg->pkt_len =
+ (uint16_t)(first_seg->pkt_len +
+ rx_packet_len);
+ first_seg->nb_segs++;
+ last_seg->next = rxm;
+ }
+
+ /**
+ * If this is not the last buffer of the received packet,
+ * update the pointer to the last mbuf of the current scattered
+ * packet and continue to parse the RX ring.
+ */
+ if (!(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT))) {
+ last_seg = rxm;
+ continue;
+ }
+
+ /**
+ * This is the last buffer of the received packet. If the CRC
+ * is not stripped by the hardware:
+ * - Subtract the CRC length from the total packet length.
+ * - If the last buffer only contains the whole CRC or a part
+ * of it, free the mbuf associated to the last buffer. If part
+ * of the CRC is also contained in the previous mbuf, subtract
+ * the length of that CRC part from the data length of the
+ * previous mbuf.
+ */
+ rxm->next = NULL;
+ if (unlikely(rxq->crc_len > 0)) {
+ first_seg->pkt_len -= ETHER_CRC_LEN;
+ if (rx_packet_len <= ETHER_CRC_LEN) {
+ rte_pktmbuf_free_seg(rxm);
+ first_seg->nb_segs--;
+ last_seg->data_len =
+ (uint16_t)(last_seg->data_len -
+ (ETHER_CRC_LEN - rx_packet_len));
+ last_seg->next = NULL;
+ } else
+ rxm->data_len = (uint16_t)(rx_packet_len -
+ ETHER_CRC_LEN);
+ }
+
+ first_seg->port = rxq->port_id;
+ first_seg->vlan_tci = (rx_status &
+ (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
+ rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0;
+ pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
+ pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
+ pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
+ first_seg->packet_type = (uint16_t)((qword1 &
+ I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT);
+ if (pkt_flags & PKT_RX_RSS_HASH)
+ rxm->hash.rss =
+ rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
+ if (pkt_flags & PKT_RX_FDIR)
+ pkt_flags |= i40e_rxd_build_fdir(&rxd, rxm);
+
+ first_seg->ol_flags = pkt_flags;
+
+ /* Prefetch data of first segment, if configured to do so. */
+ rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
+ first_seg->data_off));
+ rx_pkts[nb_rx++] = first_seg;
+ first_seg = NULL;
+ }
+
+ /* Record index of the next RX descriptor to probe. */
+ rxq->rx_tail = rx_id;
+ rxq->pkt_first_seg = first_seg;
+ rxq->pkt_last_seg = last_seg;
+
+ /**
+ * If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ * register. Update the RDT with the value of the last processed RX
+ * descriptor minus 1, to guarantee that the RDT register is never
+ * equal to the RDH register, which creates a "full" ring situtation
+ * from the hardware point of view.
+ */
+ nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
+ if (nb_hold > rxq->rx_free_thresh) {
+ rx_id = (uint16_t)(rx_id == 0 ?
+ (rxq->nb_rx_desc - 1) : (rx_id - 1));
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+ nb_hold = 0;
+ }
+ rxq->nb_rx_hold = nb_hold;
+
+ return nb_rx;
+}
+
+/* Check if the context descriptor is needed for TX offloading */
+static inline uint16_t
+i40e_calc_context_desc(uint64_t flags)
+{
+ uint64_t mask = 0ULL;
+
+ if (flags | PKT_TX_UDP_TUNNEL_PKT)
+ mask |= PKT_TX_UDP_TUNNEL_PKT;
+
+#ifdef RTE_LIBRTE_IEEE1588
+ mask |= PKT_TX_IEEE1588_TMST;
+#endif
+ if (flags & mask)
+ return 1;
+
+ return 0;
+}
+
+uint16_t
+i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct i40e_tx_queue *txq;
+ struct i40e_tx_entry *sw_ring;
+ struct i40e_tx_entry *txe, *txn;
+ volatile struct i40e_tx_desc *txd;
+ volatile struct i40e_tx_desc *txr;
+ struct rte_mbuf *tx_pkt;
+ struct rte_mbuf *m_seg;
+ uint32_t cd_tunneling_params;
+ uint16_t tx_id;
+ uint16_t nb_tx;
+ uint32_t td_cmd;
+ uint32_t td_offset;
+ uint32_t tx_flags;
+ uint32_t td_tag;
+ uint64_t ol_flags;
+ uint8_t l2_len;
+ uint16_t l3_len;
+ uint8_t outer_l2_len;
+ uint16_t outer_l3_len;
+ uint16_t nb_used;
+ uint16_t nb_ctx;
+ uint16_t tx_last;
+ uint16_t slen;
+ uint64_t buf_dma_addr;
+
+ txq = tx_queue;
+ sw_ring = txq->sw_ring;
+ txr = txq->tx_ring;
+ tx_id = txq->tx_tail;
+ txe = &sw_ring[tx_id];
+
+ /* Check if the descriptor ring needs to be cleaned. */
+ if ((txq->nb_tx_desc - txq->nb_tx_free) > txq->tx_free_thresh)
+ i40e_xmit_cleanup(txq);
+
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ td_cmd = 0;
+ td_tag = 0;
+ td_offset = 0;
+ tx_flags = 0;
+
+ tx_pkt = *tx_pkts++;
+ RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
+
+ ol_flags = tx_pkt->ol_flags;
+ l2_len = tx_pkt->l2_len;
+ l3_len = tx_pkt->l3_len;
+ outer_l2_len = tx_pkt->outer_l2_len;
+ outer_l3_len = tx_pkt->outer_l3_len;
+
+ /* Calculate the number of context descriptors needed. */
+ nb_ctx = i40e_calc_context_desc(ol_flags);
+
+ /**
+ * The number of descriptors that must be allocated for
+ * a packet equals to the number of the segments of that
+ * packet plus 1 context descriptor if needed.
+ */
+ nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
+ tx_last = (uint16_t)(tx_id + nb_used - 1);
+
+ /* Circular ring */
+ if (tx_last >= txq->nb_tx_desc)
+ tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
+
+ if (nb_used > txq->nb_tx_free) {
+ if (i40e_xmit_cleanup(txq) != 0) {
+ if (nb_tx == 0)
+ return 0;
+ goto end_of_tx;
+ }
+ if (unlikely(nb_used > txq->tx_rs_thresh)) {
+ while (nb_used > txq->nb_tx_free) {
+ if (i40e_xmit_cleanup(txq) != 0) {
+ if (nb_tx == 0)
+ return 0;
+ goto end_of_tx;
+ }
+ }
+ }
+ }
+
+ /* Descriptor based VLAN insertion */
+ if (ol_flags & PKT_TX_VLAN_PKT) {
+ tx_flags |= tx_pkt->vlan_tci <<
+ I40E_TX_FLAG_L2TAG1_SHIFT;
+ tx_flags |= I40E_TX_FLAG_INSERT_VLAN;
+ td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
+ td_tag = (tx_flags & I40E_TX_FLAG_L2TAG1_MASK) >>
+ I40E_TX_FLAG_L2TAG1_SHIFT;
+ }
+
+ /* Always enable CRC offload insertion */
+ td_cmd |= I40E_TX_DESC_CMD_ICRC;
+
+ /* Enable checksum offloading */
+ cd_tunneling_params = 0;
+ i40e_txd_enable_checksum(ol_flags, &td_cmd, &td_offset,
+ l2_len, l3_len, outer_l2_len,
+ outer_l3_len,
+ &cd_tunneling_params);
+
+ if (unlikely(nb_ctx)) {
+ /* Setup TX context descriptor if required */
+ volatile struct i40e_tx_context_desc *ctx_txd =
+ (volatile struct i40e_tx_context_desc *)\
+ &txr[tx_id];
+ uint16_t cd_l2tag2 = 0;
+ uint64_t cd_type_cmd_tso_mss =
+ I40E_TX_DESC_DTYPE_CONTEXT;
+
+ txn = &sw_ring[txe->next_id];
+ RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
+ if (txe->mbuf != NULL) {
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = NULL;
+ }
+#ifdef RTE_LIBRTE_IEEE1588
+ if (ol_flags & PKT_TX_IEEE1588_TMST)
+ cd_type_cmd_tso_mss |=
+ ((uint64_t)I40E_TX_CTX_DESC_TSYN <<
+ I40E_TXD_CTX_QW1_CMD_SHIFT);
+#endif
+ ctx_txd->tunneling_params =
+ rte_cpu_to_le_32(cd_tunneling_params);
+ ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
+ ctx_txd->type_cmd_tso_mss =
+ rte_cpu_to_le_64(cd_type_cmd_tso_mss);
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ }
+
+ m_seg = tx_pkt;
+ do {
+ txd = &txr[tx_id];
+ txn = &sw_ring[txe->next_id];
+
+ if (txe->mbuf)
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = m_seg;
+
+ /* Setup TX Descriptor */
+ slen = m_seg->data_len;
+ buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+ txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
+ txd->cmd_type_offset_bsz = i40e_build_ctob(td_cmd,
+ td_offset, slen, td_tag);
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ m_seg = m_seg->next;
+ } while (m_seg != NULL);
+
+ /* The last packet data descriptor needs End Of Packet (EOP) */
+ td_cmd |= I40E_TX_DESC_CMD_EOP;
+ txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
+
+ if (txq->nb_tx_used >= txq->tx_rs_thresh) {
+ PMD_TX_FREE_LOG(DEBUG,
+ "Setting RS bit on TXD id="
+ "%4u (port=%d queue=%d)",
+ tx_last, txq->port_id, txq->queue_id);
+
+ td_cmd |= I40E_TX_DESC_CMD_RS;
+
+ /* Update txq RS bit counters */
+ txq->nb_tx_used = 0;
+ }
+
+ txd->cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)td_cmd) <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ }
+
+end_of_tx:
+ rte_wmb();
+
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
+ (unsigned) txq->port_id, (unsigned) txq->queue_id,
+ (unsigned) tx_id, (unsigned) nb_tx);
+
+ I40E_PCI_REG_WRITE(txq->qtx_tail, tx_id);
+ txq->tx_tail = tx_id;
+
+ return nb_tx;
+}
+
+static inline int __attribute__((always_inline))
+i40e_tx_free_bufs(struct i40e_tx_queue *txq)
+{
+ struct i40e_tx_entry *txep;
+ uint16_t i;
+
+ if (!(txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)))
+ return 0;
+
+ txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
+
+ for (i = 0; i < txq->tx_rs_thresh; i++)
+ rte_prefetch0((txep + i)->mbuf);
+
+ if (!(txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT)) {
+ for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
+ rte_mempool_put(txep->mbuf->pool, txep->mbuf);
+ txep->mbuf = NULL;
+ }
+ } else {
+ for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
+ rte_pktmbuf_free_seg(txep->mbuf);
+ txep->mbuf = NULL;
+ }
+ }
+
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+ txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+ if (txq->tx_next_dd >= txq->nb_tx_desc)
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ return txq->tx_rs_thresh;
+}
+
+#define I40E_TD_CMD (I40E_TX_DESC_CMD_ICRC |\
+ I40E_TX_DESC_CMD_EOP)
+
+/* Populate 4 descriptors with data from 4 mbufs */
+static inline void
+tx4(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
+{
+ uint64_t dma_addr;
+ uint32_t i;
+
+ for (i = 0; i < 4; i++, txdp++, pkts++) {
+ dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
+ txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
+ txdp->cmd_type_offset_bsz =
+ i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
+ (*pkts)->data_len, 0);
+ }
+}
+
+/* Populate 1 descriptor with data from 1 mbuf */
+static inline void
+tx1(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
+{
+ uint64_t dma_addr;
+
+ dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
+ txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
+ txdp->cmd_type_offset_bsz =
+ i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
+ (*pkts)->data_len, 0);
+}
+
+/* Fill hardware descriptor ring with mbuf data */
+static inline void
+i40e_tx_fill_hw_ring(struct i40e_tx_queue *txq,
+ struct rte_mbuf **pkts,
+ uint16_t nb_pkts)
+{
+ volatile struct i40e_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
+ struct i40e_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
+ const int N_PER_LOOP = 4;
+ const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
+ int mainpart, leftover;
+ int i, j;
+
+ mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
+ leftover = (nb_pkts & ((uint32_t) N_PER_LOOP_MASK));
+ for (i = 0; i < mainpart; i += N_PER_LOOP) {
+ for (j = 0; j < N_PER_LOOP; ++j) {
+ (txep + i + j)->mbuf = *(pkts + i + j);
+ }
+ tx4(txdp + i, pkts + i);
+ }
+ if (unlikely(leftover > 0)) {
+ for (i = 0; i < leftover; ++i) {
+ (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
+ tx1(txdp + mainpart + i, pkts + mainpart + i);
+ }
+ }
+}
+
+static inline uint16_t
+tx_xmit_pkts(struct i40e_tx_queue *txq,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ volatile struct i40e_tx_desc *txr = txq->tx_ring;
+ uint16_t n = 0;
+
+ /**
+ * Begin scanning the H/W ring for done descriptors when the number
+ * of available descriptors drops below tx_free_thresh. For each done
+ * descriptor, free the associated buffer.
+ */
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ i40e_tx_free_bufs(txq);
+
+ /* Use available descriptor only */
+ nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+ if (unlikely(!nb_pkts))
+ return 0;
+
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+ if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
+ n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
+ i40e_tx_fill_hw_ring(txq, tx_pkts, n);
+ txr[txq->tx_next_rs].cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+ txq->tx_tail = 0;
+ }
+
+ /* Fill hardware descriptor ring with mbuf data */
+ i40e_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
+ txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
+
+ /* Determin if RS bit needs to be set */
+ if (txq->tx_tail > txq->tx_next_rs) {
+ txr[txq->tx_next_rs].cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ txq->tx_next_rs =
+ (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
+ if (txq->tx_next_rs >= txq->nb_tx_desc)
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+ }
+
+ if (txq->tx_tail >= txq->nb_tx_desc)
+ txq->tx_tail = 0;
+
+ /* Update the tx tail register */
+ rte_wmb();
+ I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+static uint16_t
+i40e_xmit_pkts_simple(void *tx_queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx = 0;
+
+ if (likely(nb_pkts <= I40E_TX_MAX_BURST))
+ return tx_xmit_pkts((struct i40e_tx_queue *)tx_queue,
+ tx_pkts, nb_pkts);
+
+ while (nb_pkts) {
+ uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
+ I40E_TX_MAX_BURST);
+
+ ret = tx_xmit_pkts((struct i40e_tx_queue *)tx_queue,
+ &tx_pkts[nb_tx], num);
+ nb_tx = (uint16_t)(nb_tx + ret);
+ nb_pkts = (uint16_t)(nb_pkts - ret);
+ if (ret < num)
+ break;
+ }
+
+ return nb_tx;
+}
+
+/*
+ * Find the VSI the queue belongs to. 'queue_idx' is the queue index
+ * application used, which assume having sequential ones. But from driver's
+ * perspective, it's different. For example, q0 belongs to FDIR VSI, q1-q64
+ * to MAIN VSI, , q65-96 to SRIOV VSIs, q97-128 to VMDQ VSIs. For application
+ * running on host, q1-64 and q97-128 can be used, total 96 queues. They can
+ * use queue_idx from 0 to 95 to access queues, while real queue would be
+ * different. This function will do a queue mapping to find VSI the queue
+ * belongs to.
+ */
+static struct i40e_vsi*
+i40e_pf_get_vsi_by_qindex(struct i40e_pf *pf, uint16_t queue_idx)
+{
+ /* the queue in MAIN VSI range */
+ if (queue_idx < pf->main_vsi->nb_qps)
+ return pf->main_vsi;
+
+ queue_idx -= pf->main_vsi->nb_qps;
+
+ /* queue_idx is greater than VMDQ VSIs range */
+ if (queue_idx > pf->nb_cfg_vmdq_vsi * pf->vmdq_nb_qps - 1) {
+ PMD_INIT_LOG(ERR, "queue_idx out of range. VMDQ configured?");
+ return NULL;
+ }
+
+ return pf->vmdq[queue_idx / pf->vmdq_nb_qps].vsi;
+}
+
+static uint16_t
+i40e_get_queue_offset_by_qindex(struct i40e_pf *pf, uint16_t queue_idx)
+{
+ /* the queue in MAIN VSI range */
+ if (queue_idx < pf->main_vsi->nb_qps)
+ return queue_idx;
+
+ /* It's VMDQ queues */
+ queue_idx -= pf->main_vsi->nb_qps;
+
+ if (pf->nb_cfg_vmdq_vsi)
+ return queue_idx % pf->vmdq_nb_qps;
+ else {
+ PMD_INIT_LOG(ERR, "Fail to get queue offset");
+ return (uint16_t)(-1);
+ }
+}
+
+int
+i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct i40e_rx_queue *rxq;
+ int err = -1;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rx_queue_id < dev->data->nb_rx_queues) {
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ err = i40e_alloc_rx_queue_mbufs(rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
+ return err;
+ }
+
+ rte_wmb();
+
+ /* Init the RX tail regieter. */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+
+ err = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE);
+
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
+ rx_queue_id);
+
+ i40e_rx_queue_release_mbufs(rxq);
+ i40e_reset_rx_queue(rxq);
+ }
+ }
+
+ return err;
+}
+
+int
+i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct i40e_rx_queue *rxq;
+ int err;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (rx_queue_id < dev->data->nb_rx_queues) {
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ /*
+ * rx_queue_id is queue id aplication refers to, while
+ * rxq->reg_idx is the real queue index.
+ */
+ err = i40e_switch_rx_queue(hw, rxq->reg_idx, FALSE);
+
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+ rx_queue_id);
+ return err;
+ }
+ i40e_rx_queue_release_mbufs(rxq);
+ i40e_reset_rx_queue(rxq);
+ }
+
+ return 0;
+}
+
+int
+i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ int err = -1;
+ struct i40e_tx_queue *txq;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (tx_queue_id < dev->data->nb_tx_queues) {
+ txq = dev->data->tx_queues[tx_queue_id];
+
+ /*
+ * tx_queue_id is queue id aplication refers to, while
+ * rxq->reg_idx is the real queue index.
+ */
+ err = i40e_switch_tx_queue(hw, txq->reg_idx, TRUE);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
+ tx_queue_id);
+ }
+
+ return err;
+}
+
+int
+i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct i40e_tx_queue *txq;
+ int err;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (tx_queue_id < dev->data->nb_tx_queues) {
+ txq = dev->data->tx_queues[tx_queue_id];
+
+ /*
+ * tx_queue_id is queue id aplication refers to, while
+ * txq->reg_idx is the real queue index.
+ */
+ err = i40e_switch_tx_queue(hw, txq->reg_idx, FALSE);
+
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of",
+ tx_queue_id);
+ return err;
+ }
+
+ i40e_tx_queue_release_mbufs(txq);
+ i40e_reset_tx_queue(txq);
+ }
+
+ return 0;
+}
+
+int
+i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_rx_queue *rxq;
+ const struct rte_memzone *rz;
+ uint32_t ring_size;
+ uint16_t len;
+ int use_def_burst_func = 1;
+
+ if (hw->mac.type == I40E_MAC_VF) {
+ struct i40e_vf *vf =
+ I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ vsi = &vf->vsi;
+ } else
+ vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
+
+ if (vsi == NULL) {
+ PMD_DRV_LOG(ERR, "VSI not available or queue "
+ "index exceeds the maximum");
+ return I40E_ERR_PARAM;
+ }
+ if (((nb_desc * sizeof(union i40e_rx_desc)) % I40E_ALIGN) != 0 ||
+ (nb_desc > I40E_MAX_RING_DESC) ||
+ (nb_desc < I40E_MIN_RING_DESC)) {
+ PMD_DRV_LOG(ERR, "Number (%u) of receive descriptors is "
+ "invalid", nb_desc);
+ return I40E_ERR_PARAM;
+ }
+
+ /* Free memory if needed */
+ if (dev->data->rx_queues[queue_idx]) {
+ i40e_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* Allocate the rx queue data structure */
+ rxq = rte_zmalloc_socket("i40e rx queue",
+ sizeof(struct i40e_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!rxq) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for "
+ "rx queue data structure");
+ return (-ENOMEM);
+ }
+ rxq->mp = mp;
+ rxq->nb_rx_desc = nb_desc;
+ rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+ rxq->queue_id = queue_idx;
+ if (hw->mac.type == I40E_MAC_VF)
+ rxq->reg_idx = queue_idx;
+ else /* PF device */
+ rxq->reg_idx = vsi->base_queue +
+ i40e_get_queue_offset_by_qindex(pf, queue_idx);
+
+ rxq->port_id = dev->data->port_id;
+ rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
+ 0 : ETHER_CRC_LEN);
+ rxq->drop_en = rx_conf->rx_drop_en;
+ rxq->vsi = vsi;
+ rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+
+ /* Allocate the maximun number of RX ring hardware descriptor. */
+ ring_size = sizeof(union i40e_rx_desc) * I40E_MAX_RING_DESC;
+ ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
+ rz = i40e_ring_dma_zone_reserve(dev,
+ "rx_ring",
+ queue_idx,
+ ring_size,
+ socket_id);
+ if (!rz) {
+ i40e_dev_rx_queue_release(rxq);
+ PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX");
+ return (-ENOMEM);
+ }
+
+ /* Zero all the descriptors in the ring. */
+ memset(rz->addr, 0, ring_size);
+
+#ifdef RTE_LIBRTE_XEN_DOM0
+ rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+#else
+ rxq->rx_ring_phys_addr = (uint64_t)rz->phys_addr;
+#endif
+
+ rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
+
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+ len = (uint16_t)(nb_desc + RTE_PMD_I40E_RX_MAX_BURST);
+#else
+ len = nb_desc;
+#endif
+
+ /* Allocate the software ring. */
+ rxq->sw_ring =
+ rte_zmalloc_socket("i40e rx sw ring",
+ sizeof(struct i40e_rx_entry) * len,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!rxq->sw_ring) {
+ i40e_dev_rx_queue_release(rxq);
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for SW ring");
+ return (-ENOMEM);
+ }
+
+ i40e_reset_rx_queue(rxq);
+ rxq->q_set = TRUE;
+ dev->data->rx_queues[queue_idx] = rxq;
+
+ use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
+
+ if (!use_def_burst_func && !dev->data->scattered_rx) {
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+ "satisfied. Rx Burst Bulk Alloc function will be "
+ "used on port=%d, queue=%d.",
+ rxq->port_id, rxq->queue_id);
+ dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
+#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
+ } else {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+ "not satisfied, Scattered Rx is requested, "
+ "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
+ "not enabled on port=%d, queue=%d.",
+ rxq->port_id, rxq->queue_id);
+ }
+
+ return 0;
+}
+
+void
+i40e_dev_rx_queue_release(void *rxq)
+{
+ struct i40e_rx_queue *q = (struct i40e_rx_queue *)rxq;
+
+ if (!q) {
+ PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
+ return;
+ }
+
+ i40e_rx_queue_release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_free(q);
+}
+
+uint32_t
+i40e_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+#define I40E_RXQ_SCAN_INTERVAL 4
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_queue *rxq;
+ uint16_t desc = 0;
+
+ if (unlikely(rx_queue_id >= dev->data->nb_rx_queues)) {
+ PMD_DRV_LOG(ERR, "Invalid RX queue id %u", rx_queue_id);
+ return 0;
+ }
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ rxdp = &(rxq->rx_ring[rxq->rx_tail]);
+ while ((desc < rxq->nb_rx_desc) &&
+ ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
+ I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT) &
+ (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
+ /**
+ * Check the DD bit of a rx descriptor of each 4 in a group,
+ * to avoid checking too frequently and downgrading performance
+ * too much.
+ */
+ desc += I40E_RXQ_SCAN_INTERVAL;
+ rxdp += I40E_RXQ_SCAN_INTERVAL;
+ if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
+ rxdp = &(rxq->rx_ring[rxq->rx_tail +
+ desc - rxq->nb_rx_desc]);
+ }
+
+ return desc;
+}
+
+int
+i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
+{
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_queue *rxq = rx_queue;
+ uint16_t desc;
+ int ret;
+
+ if (unlikely(offset >= rxq->nb_rx_desc)) {
+ PMD_DRV_LOG(ERR, "Invalid RX queue id %u", offset);
+ return 0;
+ }
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ rxdp = &(rxq->rx_ring[desc]);
+
+ ret = !!(((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
+ I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT) &
+ (1 << I40E_RX_DESC_STATUS_DD_SHIFT));
+
+ return ret;
+}
+
+int
+i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_tx_queue *txq;
+ const struct rte_memzone *tz;
+ uint32_t ring_size;
+ uint16_t tx_rs_thresh, tx_free_thresh;
+
+ if (hw->mac.type == I40E_MAC_VF) {
+ struct i40e_vf *vf =
+ I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ vsi = &vf->vsi;
+ } else
+ vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
+
+ if (vsi == NULL) {
+ PMD_DRV_LOG(ERR, "VSI is NULL, or queue index (%u) "
+ "exceeds the maximum", queue_idx);
+ return I40E_ERR_PARAM;
+ }
+
+ if (((nb_desc * sizeof(struct i40e_tx_desc)) % I40E_ALIGN) != 0 ||
+ (nb_desc > I40E_MAX_RING_DESC) ||
+ (nb_desc < I40E_MIN_RING_DESC)) {
+ PMD_DRV_LOG(ERR, "Number (%u) of transmit descriptors is "
+ "invalid", nb_desc);
+ return I40E_ERR_PARAM;
+ }
+
+ /**
+ * The following two parameters control the setting of the RS bit on
+ * transmit descriptors. TX descriptors will have their RS bit set
+ * after txq->tx_rs_thresh descriptors have been used. The TX
+ * descriptor ring will be cleaned after txq->tx_free_thresh
+ * descriptors are used or if the number of descriptors required to
+ * transmit a packet is greater than the number of free TX descriptors.
+ *
+ * The following constraints must be satisfied:
+ * - tx_rs_thresh must be greater than 0.
+ * - tx_rs_thresh must be less than the size of the ring minus 2.
+ * - tx_rs_thresh must be less than or equal to tx_free_thresh.
+ * - tx_rs_thresh must be a divisor of the ring size.
+ * - tx_free_thresh must be greater than 0.
+ * - tx_free_thresh must be less than the size of the ring minus 3.
+ *
+ * One descriptor in the TX ring is used as a sentinel to avoid a H/W
+ * race condition, hence the maximum threshold constraints. When set
+ * to zero use default values.
+ */
+ tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
+ tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
+ tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
+ tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
+ if (tx_rs_thresh >= (nb_desc - 2)) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
+ "number of TX descriptors minus 2. "
+ "(tx_rs_thresh=%u port=%d queue=%d)",
+ (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id,
+ (int)queue_idx);
+ return I40E_ERR_PARAM;
+ }
+ if (tx_free_thresh >= (nb_desc - 3)) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
+ "tx_free_thresh must be less than the "
+ "number of TX descriptors minus 3. "
+ "(tx_free_thresh=%u port=%d queue=%d)",
+ (unsigned int)tx_free_thresh,
+ (int)dev->data->port_id,
+ (int)queue_idx);
+ return I40E_ERR_PARAM;
+ }
+ if (tx_rs_thresh > tx_free_thresh) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
+ "equal to tx_free_thresh. (tx_free_thresh=%u"
+ " tx_rs_thresh=%u port=%d queue=%d)",
+ (unsigned int)tx_free_thresh,
+ (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id,
+ (int)queue_idx);
+ return I40E_ERR_PARAM;
+ }
+ if ((nb_desc % tx_rs_thresh) != 0) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
+ "number of TX descriptors. (tx_rs_thresh=%u"
+ " port=%d queue=%d)",
+ (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id,
+ (int)queue_idx);
+ return I40E_ERR_PARAM;
+ }
+ if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
+ PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
+ "tx_rs_thresh is greater than 1. "
+ "(tx_rs_thresh=%u port=%d queue=%d)",
+ (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id,
+ (int)queue_idx);
+ return I40E_ERR_PARAM;
+ }
+
+ /* Free memory if needed. */
+ if (dev->data->tx_queues[queue_idx]) {
+ i40e_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ /* Allocate the TX queue data structure. */
+ txq = rte_zmalloc_socket("i40e tx queue",
+ sizeof(struct i40e_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!txq) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for "
+ "tx queue structure");
+ return (-ENOMEM);
+ }
+
+ /* Allocate TX hardware ring descriptors. */
+ ring_size = sizeof(struct i40e_tx_desc) * I40E_MAX_RING_DESC;
+ ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
+ tz = i40e_ring_dma_zone_reserve(dev,
+ "tx_ring",
+ queue_idx,
+ ring_size,
+ socket_id);
+ if (!tz) {
+ i40e_dev_tx_queue_release(txq);
+ PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX");
+ return (-ENOMEM);
+ }
+
+ txq->nb_tx_desc = nb_desc;
+ txq->tx_rs_thresh = tx_rs_thresh;
+ txq->tx_free_thresh = tx_free_thresh;
+ txq->pthresh = tx_conf->tx_thresh.pthresh;
+ txq->hthresh = tx_conf->tx_thresh.hthresh;
+ txq->wthresh = tx_conf->tx_thresh.wthresh;
+ txq->queue_id = queue_idx;
+ if (hw->mac.type == I40E_MAC_VF)
+ txq->reg_idx = queue_idx;
+ else /* PF device */
+ txq->reg_idx = vsi->base_queue +
+ i40e_get_queue_offset_by_qindex(pf, queue_idx);
+
+ txq->port_id = dev->data->port_id;
+ txq->txq_flags = tx_conf->txq_flags;
+ txq->vsi = vsi;
+ txq->tx_deferred_start = tx_conf->tx_deferred_start;
+
+#ifdef RTE_LIBRTE_XEN_DOM0
+ txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+#else
+ txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
+#endif
+ txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
+
+ /* Allocate software ring */
+ txq->sw_ring =
+ rte_zmalloc_socket("i40e tx sw ring",
+ sizeof(struct i40e_tx_entry) * nb_desc,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!txq->sw_ring) {
+ i40e_dev_tx_queue_release(txq);
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for SW TX ring");
+ return (-ENOMEM);
+ }
+
+ i40e_reset_tx_queue(txq);
+ txq->q_set = TRUE;
+ dev->data->tx_queues[queue_idx] = txq;
+
+ /* Use a simple TX queue without offloads or multi segs if possible */
+ if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS) &&
+ (txq->tx_rs_thresh >= I40E_TX_MAX_BURST)) {
+ PMD_INIT_LOG(INFO, "Using simple tx path");
+ dev->tx_pkt_burst = i40e_xmit_pkts_simple;
+ } else {
+ PMD_INIT_LOG(INFO, "Using full-featured tx path");
+ dev->tx_pkt_burst = i40e_xmit_pkts;
+ }
+
+ return 0;
+}
+
+void
+i40e_dev_tx_queue_release(void *txq)
+{
+ struct i40e_tx_queue *q = (struct i40e_tx_queue *)txq;
+
+ if (!q) {
+ PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
+ return;
+ }
+
+ i40e_tx_queue_release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_free(q);
+}
+
+static const struct rte_memzone *
+i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev,
+ const char *ring_name,
+ uint16_t queue_id,
+ uint32_t ring_size,
+ int socket_id)
+{
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+
+ snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
+ dev->driver->pci_drv.name, ring_name,
+ dev->data->port_id, queue_id);
+ mz = rte_memzone_lookup(z_name);
+ if (mz)
+ return mz;
+
+#ifdef RTE_LIBRTE_XEN_DOM0
+ return rte_memzone_reserve_bounded(z_name, ring_size,
+ socket_id, 0, I40E_ALIGN, RTE_PGSIZE_2M);
+#else
+ return rte_memzone_reserve_aligned(z_name, ring_size,
+ socket_id, 0, I40E_ALIGN);
+#endif
+}
+
+const struct rte_memzone *
+i40e_memzone_reserve(const char *name, uint32_t len, int socket_id)
+{
+ const struct rte_memzone *mz = NULL;
+
+ mz = rte_memzone_lookup(name);
+ if (mz)
+ return mz;
+#ifdef RTE_LIBRTE_XEN_DOM0
+ mz = rte_memzone_reserve_bounded(name, len,
+ socket_id, 0, I40E_ALIGN, RTE_PGSIZE_2M);
+#else
+ mz = rte_memzone_reserve_aligned(name, len,
+ socket_id, 0, I40E_ALIGN);
+#endif
+ return mz;
+}
+
+void
+i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq)
+{
+ uint16_t i;
+
+ if (!rxq || !rxq->sw_ring) {
+ PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
+ return;
+ }
+
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_ring[i].mbuf) {
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ rxq->sw_ring[i].mbuf = NULL;
+ }
+ }
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+ if (rxq->rx_nb_avail == 0)
+ return;
+ for (i = 0; i < rxq->rx_nb_avail; i++) {
+ struct rte_mbuf *mbuf;
+
+ mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
+ rte_pktmbuf_free_seg(mbuf);
+ }
+ rxq->rx_nb_avail = 0;
+#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
+}
+
+void
+i40e_reset_rx_queue(struct i40e_rx_queue *rxq)
+{
+ unsigned i;
+ uint16_t len;
+
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+ if (check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
+ len = (uint16_t)(rxq->nb_rx_desc + RTE_PMD_I40E_RX_MAX_BURST);
+ else
+#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
+ len = rxq->nb_rx_desc;
+
+ for (i = 0; i < len * sizeof(union i40e_rx_desc); i++)
+ ((volatile char *)rxq->rx_ring)[i] = 0;
+
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+ memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
+ for (i = 0; i < RTE_PMD_I40E_RX_MAX_BURST; ++i)
+ rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
+
+ rxq->rx_nb_avail = 0;
+ rxq->rx_next_avail = 0;
+ rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
+#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
+ rxq->rx_tail = 0;
+ rxq->nb_rx_hold = 0;
+ rxq->pkt_first_seg = NULL;
+ rxq->pkt_last_seg = NULL;
+}
+
+void
+i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
+{
+ uint16_t i;
+
+ if (!txq || !txq->sw_ring) {
+ PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
+ return;
+ }
+
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ if (txq->sw_ring[i].mbuf) {
+ rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+ txq->sw_ring[i].mbuf = NULL;
+ }
+ }
+}
+
+void
+i40e_reset_tx_queue(struct i40e_tx_queue *txq)
+{
+ struct i40e_tx_entry *txe;
+ uint16_t i, prev, size;
+
+ if (!txq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
+ return;
+ }
+
+ txe = txq->sw_ring;
+ size = sizeof(struct i40e_tx_desc) * txq->nb_tx_desc;
+ for (i = 0; i < size; i++)
+ ((volatile char *)txq->tx_ring)[i] = 0;
+
+ prev = (uint16_t)(txq->nb_tx_desc - 1);
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ volatile struct i40e_tx_desc *txd = &txq->tx_ring[i];
+
+ txd->cmd_type_offset_bsz =
+ rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE);
+ txe[i].mbuf = NULL;
+ txe[i].last_id = i;
+ txe[prev].next_id = i;
+ prev = i;
+ }
+
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ txq->tx_tail = 0;
+ txq->nb_tx_used = 0;
+
+ txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
+}
+
+/* Init the TX queue in hardware */
+int
+i40e_tx_queue_init(struct i40e_tx_queue *txq)
+{
+ enum i40e_status_code err = I40E_SUCCESS;
+ struct i40e_vsi *vsi = txq->vsi;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint16_t pf_q = txq->reg_idx;
+ struct i40e_hmc_obj_txq tx_ctx;
+ uint32_t qtx_ctl;
+
+ /* clear the context structure first */
+ memset(&tx_ctx, 0, sizeof(tx_ctx));
+ tx_ctx.new_context = 1;
+ tx_ctx.base = txq->tx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
+ tx_ctx.qlen = txq->nb_tx_desc;
+ tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[0]);
+ if (vsi->type == I40E_VSI_FDIR)
+ tx_ctx.fd_ena = TRUE;
+
+ err = i40e_clear_lan_tx_queue_context(hw, pf_q);
+ if (err != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failure of clean lan tx queue context");
+ return err;
+ }
+
+ err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
+ if (err != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failure of set lan tx queue context");
+ return err;
+ }
+
+ /* Now associate this queue with this PCI function */
+ qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
+ qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
+ I40E_QTX_CTL_PF_INDX_MASK);
+ I40E_WRITE_REG(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
+ I40E_WRITE_FLUSH(hw);
+
+ txq->qtx_tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
+
+ return err;
+}
+
+int
+i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq)
+{
+ struct i40e_rx_entry *rxe = rxq->sw_ring;
+ uint64_t dma_addr;
+ uint16_t i;
+
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ volatile union i40e_rx_desc *rxd;
+ struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mp);
+
+ if (unlikely(!mbuf)) {
+ PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
+ return -ENOMEM;
+ }
+
+ rte_mbuf_refcnt_set(mbuf, 1);
+ mbuf->next = NULL;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->port_id;
+
+ dma_addr =
+ rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
+
+ rxd = &rxq->rx_ring[i];
+ rxd->read.pkt_addr = dma_addr;
+ rxd->read.hdr_addr = dma_addr;
+#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+ rxd->read.rsvd1 = 0;
+ rxd->read.rsvd2 = 0;
+#endif /* RTE_LIBRTE_I40E_16BYTE_RX_DESC */
+
+ rxe[i].mbuf = mbuf;
+ }
+
+ return 0;
+}
+
+/*
+ * Calculate the buffer length, and check the jumbo frame
+ * and maximum packet length.
+ */
+static int
+i40e_rx_queue_config(struct i40e_rx_queue *rxq)
+{
+ struct i40e_pf *pf = I40E_VSI_TO_PF(rxq->vsi);
+ struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
+ struct rte_eth_dev_data *data = pf->dev_data;
+ struct rte_pktmbuf_pool_private *mbp_priv =
+ rte_mempool_get_priv(rxq->mp);
+ uint16_t buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+ RTE_PKTMBUF_HEADROOM);
+ uint16_t len;
+
+ switch (pf->flags & (I40E_FLAG_HEADER_SPLIT_DISABLED |
+ I40E_FLAG_HEADER_SPLIT_ENABLED)) {
+ case I40E_FLAG_HEADER_SPLIT_ENABLED: /* Not supported */
+ rxq->rx_hdr_len = RTE_ALIGN(I40E_RXBUF_SZ_1024,
+ (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
+ rxq->rx_buf_len = RTE_ALIGN(I40E_RXBUF_SZ_2048,
+ (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
+ rxq->hs_mode = i40e_header_split_enabled;
+ break;
+ case I40E_FLAG_HEADER_SPLIT_DISABLED:
+ default:
+ rxq->rx_hdr_len = 0;
+ rxq->rx_buf_len = RTE_ALIGN(buf_size,
+ (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
+ rxq->hs_mode = i40e_header_split_none;
+ break;
+ }
+
+ len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;
+ rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len);
+ if (data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
+ rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
+ PMD_DRV_LOG(ERR, "maximum packet length must "
+ "be larger than %u and smaller than %u,"
+ "as jumbo frame is enabled",
+ (uint32_t)ETHER_MAX_LEN,
+ (uint32_t)I40E_FRAME_SIZE_MAX);
+ return I40E_ERR_CONFIG;
+ }
+ } else {
+ if (rxq->max_pkt_len < ETHER_MIN_LEN ||
+ rxq->max_pkt_len > ETHER_MAX_LEN) {
+ PMD_DRV_LOG(ERR, "maximum packet length must be "
+ "larger than %u and smaller than %u, "
+ "as jumbo frame is disabled",
+ (uint32_t)ETHER_MIN_LEN,
+ (uint32_t)ETHER_MAX_LEN);
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ return 0;
+}
+
+/* Init the RX queue in hardware */
+int
+i40e_rx_queue_init(struct i40e_rx_queue *rxq)
+{
+ int err = I40E_SUCCESS;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
+ struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(rxq->vsi);
+ struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
+ uint16_t pf_q = rxq->reg_idx;
+ uint16_t buf_size;
+ struct i40e_hmc_obj_rxq rx_ctx;
+ struct rte_pktmbuf_pool_private *mbp_priv;
+
+ err = i40e_rx_queue_config(rxq);
+ if (err < 0) {
+ PMD_DRV_LOG(ERR, "Failed to config RX queue");
+ return err;
+ }
+
+ /* Clear the context structure first */
+ memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
+ rx_ctx.dbuff = rxq->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
+ rx_ctx.hbuff = rxq->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
+
+ rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
+ rx_ctx.qlen = rxq->nb_rx_desc;
+#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+ rx_ctx.dsize = 1;
+#endif
+ rx_ctx.dtype = rxq->hs_mode;
+ if (rxq->hs_mode)
+ rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_ALL;
+ else
+ rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
+ rx_ctx.rxmax = rxq->max_pkt_len;
+ rx_ctx.tphrdesc_ena = 1;
+ rx_ctx.tphwdesc_ena = 1;
+ rx_ctx.tphdata_ena = 1;
+ rx_ctx.tphhead_ena = 1;
+ rx_ctx.lrxqthresh = 2;
+ rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
+ rx_ctx.l2tsel = 1;
+ rx_ctx.showiv = 1;
+ rx_ctx.prefena = 1;
+
+ err = i40e_clear_lan_rx_queue_context(hw, pf_q);
+ if (err != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to clear LAN RX queue context");
+ return err;
+ }
+ err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
+ if (err != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to set LAN RX queue context");
+ return err;
+ }
+
+ rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
+
+ mbp_priv = rte_mempool_get_priv(rxq->mp);
+ buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
+ RTE_PKTMBUF_HEADROOM);
+
+ /* Check if scattered RX needs to be used. */
+ if ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
+ dev_data->scattered_rx = 1;
+ dev->rx_pkt_burst = i40e_recv_scattered_pkts;
+ }
+
+ /* Init the RX tail regieter. */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+
+ return 0;
+}
+
+void
+i40e_dev_clear_queues(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ i40e_tx_queue_release_mbufs(dev->data->tx_queues[i]);
+ i40e_reset_tx_queue(dev->data->tx_queues[i]);
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ i40e_rx_queue_release_mbufs(dev->data->rx_queues[i]);
+ i40e_reset_rx_queue(dev->data->rx_queues[i]);
+ }
+}
+
+#define I40E_FDIR_NUM_TX_DESC I40E_MIN_RING_DESC
+#define I40E_FDIR_NUM_RX_DESC I40E_MIN_RING_DESC
+
+enum i40e_status_code
+i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
+{
+ struct i40e_tx_queue *txq;
+ const struct rte_memzone *tz = NULL;
+ uint32_t ring_size;
+ struct rte_eth_dev *dev = pf->adapter->eth_dev;
+
+ if (!pf) {
+ PMD_DRV_LOG(ERR, "PF is not available");
+ return I40E_ERR_BAD_PTR;
+ }
+
+ /* Allocate the TX queue data structure. */
+ txq = rte_zmalloc_socket("i40e fdir tx queue",
+ sizeof(struct i40e_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!txq) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for "
+ "tx queue structure.");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ /* Allocate TX hardware ring descriptors. */
+ ring_size = sizeof(struct i40e_tx_desc) * I40E_FDIR_NUM_TX_DESC;
+ ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
+
+ tz = i40e_ring_dma_zone_reserve(dev,
+ "fdir_tx_ring",
+ I40E_FDIR_QUEUE_ID,
+ ring_size,
+ SOCKET_ID_ANY);
+ if (!tz) {
+ i40e_dev_tx_queue_release(txq);
+ PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ txq->nb_tx_desc = I40E_FDIR_NUM_TX_DESC;
+ txq->queue_id = I40E_FDIR_QUEUE_ID;
+ txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
+ txq->vsi = pf->fdir.fdir_vsi;
+
+#ifdef RTE_LIBRTE_XEN_DOM0
+ txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+#else
+ txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
+#endif
+ txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
+ /*
+ * don't need to allocate software ring and reset for the fdir
+ * program queue just set the queue has been configured.
+ */
+ txq->q_set = TRUE;
+ pf->fdir.txq = txq;
+
+ return I40E_SUCCESS;
+}
+
+enum i40e_status_code
+i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
+{
+ struct i40e_rx_queue *rxq;
+ const struct rte_memzone *rz = NULL;
+ uint32_t ring_size;
+ struct rte_eth_dev *dev = pf->adapter->eth_dev;
+
+ if (!pf) {
+ PMD_DRV_LOG(ERR, "PF is not available");
+ return I40E_ERR_BAD_PTR;
+ }
+
+ /* Allocate the RX queue data structure. */
+ rxq = rte_zmalloc_socket("i40e fdir rx queue",
+ sizeof(struct i40e_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!rxq) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for "
+ "rx queue structure.");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ /* Allocate RX hardware ring descriptors. */
+ ring_size = sizeof(union i40e_rx_desc) * I40E_FDIR_NUM_RX_DESC;
+ ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
+
+ rz = i40e_ring_dma_zone_reserve(dev,
+ "fdir_rx_ring",
+ I40E_FDIR_QUEUE_ID,
+ ring_size,
+ SOCKET_ID_ANY);
+ if (!rz) {
+ i40e_dev_rx_queue_release(rxq);
+ PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ rxq->nb_rx_desc = I40E_FDIR_NUM_RX_DESC;
+ rxq->queue_id = I40E_FDIR_QUEUE_ID;
+ rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
+ rxq->vsi = pf->fdir.fdir_vsi;
+
+#ifdef RTE_LIBRTE_XEN_DOM0
+ rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+#else
+ rxq->rx_ring_phys_addr = (uint64_t)rz->phys_addr;
+#endif
+ rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
+
+ /*
+ * Don't need to allocate software ring and reset for the fdir
+ * rx queue, just set the queue has been configured.
+ */
+ rxq->q_set = TRUE;
+ pf->fdir.rxq = rxq;
+
+ return I40E_SUCCESS;
+}
diff --git a/src/dpdk_lib18/librte_pmd_i40e/i40e_rxtx.h b/src/dpdk_lib18/librte_pmd_i40e/i40e_rxtx.h
new file mode 100755
index 00000000..af932e3d
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_i40e/i40e_rxtx.h
@@ -0,0 +1,198 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _I40E_RXTX_H_
+#define _I40E_RXTX_H_
+
+/**
+ * 32 bits tx flags, high 16 bits for L2TAG1 (VLAN),
+ * low 16 bits for others.
+ */
+#define I40E_TX_FLAG_L2TAG1_SHIFT 16
+#define I40E_TX_FLAG_L2TAG1_MASK 0xffff0000
+#define I40E_TX_FLAG_CSUM ((uint32_t)(1 << 0))
+#define I40E_TX_FLAG_INSERT_VLAN ((uint32_t)(1 << 1))
+#define I40E_TX_FLAG_TSYN ((uint32_t)(1 << 2))
+
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+#define RTE_PMD_I40E_RX_MAX_BURST 32
+#endif
+
+#define I40E_RXBUF_SZ_1024 1024
+#define I40E_RXBUF_SZ_2048 2048
+
+enum i40e_header_split_mode {
+ i40e_header_split_none = 0,
+ i40e_header_split_enabled = 1,
+ i40e_header_split_always = 2,
+ i40e_header_split_reserved
+};
+
+#define I40E_HEADER_SPLIT_NONE ((uint8_t)0)
+#define I40E_HEADER_SPLIT_L2 ((uint8_t)(1 << 0))
+#define I40E_HEADER_SPLIT_IP ((uint8_t)(1 << 1))
+#define I40E_HEADER_SPLIT_UDP_TCP ((uint8_t)(1 << 2))
+#define I40E_HEADER_SPLIT_SCTP ((uint8_t)(1 << 3))
+#define I40E_HEADER_SPLIT_ALL (I40E_HEADER_SPLIT_L2 | \
+ I40E_HEADER_SPLIT_IP | \
+ I40E_HEADER_SPLIT_UDP_TCP | \
+ I40E_HEADER_SPLIT_SCTP)
+
+/* HW desc structure, both 16-byte and 32-byte types are supported */
+#ifdef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+#define i40e_rx_desc i40e_16byte_rx_desc
+#else
+#define i40e_rx_desc i40e_32byte_rx_desc
+#endif
+
+struct i40e_rx_entry {
+ struct rte_mbuf *mbuf;
+};
+
+/*
+ * Structure associated with each RX queue.
+ */
+struct i40e_rx_queue {
+ struct rte_mempool *mp; /**< mbuf pool to populate RX ring */
+ volatile union i40e_rx_desc *rx_ring;/**< RX ring virtual address */
+ uint64_t rx_ring_phys_addr; /**< RX ring DMA address */
+ struct i40e_rx_entry *sw_ring; /**< address of RX soft ring */
+ uint16_t nb_rx_desc; /**< number of RX descriptors */
+ uint16_t rx_free_thresh; /**< max free RX desc to hold */
+ uint16_t rx_tail; /**< current value of tail */
+ uint16_t nb_rx_hold; /**< number of held free RX desc */
+ struct rte_mbuf *pkt_first_seg; /**< first segment of current packet */
+ struct rte_mbuf *pkt_last_seg; /**< last segment of current packet */
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+ uint16_t rx_nb_avail; /**< number of staged packets ready */
+ uint16_t rx_next_avail; /**< index of next staged packets */
+ uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
+ struct rte_mbuf fake_mbuf; /**< dummy mbuf */
+ struct rte_mbuf *rx_stage[RTE_PMD_I40E_RX_MAX_BURST * 2];
+#endif
+ uint8_t port_id; /**< device port ID */
+ uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise */
+ uint16_t queue_id; /**< RX queue index */
+ uint16_t reg_idx; /**< RX queue register index */
+ uint8_t drop_en; /**< if not 0, set register bit */
+ volatile uint8_t *qrx_tail; /**< register address of tail */
+ struct i40e_vsi *vsi; /**< the VSI this queue belongs to */
+ uint16_t rx_buf_len; /* The packet buffer size */
+ uint16_t rx_hdr_len; /* The header buffer size */
+ uint16_t max_pkt_len; /* Maximum packet length */
+ uint8_t hs_mode; /* Header Split mode */
+ bool q_set; /**< indicate if rx queue has been configured */
+ bool rx_deferred_start; /**< don't start this queue in dev start */
+};
+
+struct i40e_tx_entry {
+ struct rte_mbuf *mbuf;
+ uint16_t next_id;
+ uint16_t last_id;
+};
+
+/*
+ * Structure associated with each TX queue.
+ */
+struct i40e_tx_queue {
+ uint16_t nb_tx_desc; /**< number of TX descriptors */
+ uint64_t tx_ring_phys_addr; /**< TX ring DMA address */
+ volatile struct i40e_tx_desc *tx_ring; /**< TX ring virtual address */
+ struct i40e_tx_entry *sw_ring; /**< virtual address of SW ring */
+ uint16_t tx_tail; /**< current value of tail register */
+ volatile uint8_t *qtx_tail; /**< register address of tail */
+ uint16_t nb_tx_used; /**< number of TX desc used since RS bit set */
+ /**< index to last TX descriptor to have been cleaned */
+ uint16_t last_desc_cleaned;
+ /**< Total number of TX descriptors ready to be allocated. */
+ uint16_t nb_tx_free;
+ /**< Number of TX descriptors to use before RS bit is set. */
+ uint16_t tx_free_thresh; /**< minimum TX before freeing. */
+ /** Number of TX descriptors to use before RS bit is set. */
+ uint16_t tx_rs_thresh;
+ uint8_t pthresh; /**< Prefetch threshold register. */
+ uint8_t hthresh; /**< Host threshold register. */
+ uint8_t wthresh; /**< Write-back threshold reg. */
+ uint8_t port_id; /**< Device port identifier. */
+ uint16_t queue_id; /**< TX queue index. */
+ uint16_t reg_idx;
+ uint32_t txq_flags;
+ struct i40e_vsi *vsi; /**< the VSI this queue belongs to */
+ uint16_t tx_next_dd;
+ uint16_t tx_next_rs;
+ bool q_set; /**< indicate if tx queue has been configured */
+ bool tx_deferred_start; /**< don't start this queue in dev start */
+};
+
+int i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp);
+int i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+void i40e_dev_rx_queue_release(void *rxq);
+void i40e_dev_tx_queue_release(void *txq);
+uint16_t i40e_recv_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t i40e_recv_scattered_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t i40e_xmit_pkts(void *tx_queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+int i40e_tx_queue_init(struct i40e_tx_queue *txq);
+int i40e_rx_queue_init(struct i40e_rx_queue *rxq);
+void i40e_free_tx_resources(struct i40e_tx_queue *txq);
+void i40e_free_rx_resources(struct i40e_rx_queue *rxq);
+void i40e_dev_clear_queues(struct rte_eth_dev *dev);
+void i40e_reset_rx_queue(struct i40e_rx_queue *rxq);
+void i40e_reset_tx_queue(struct i40e_tx_queue *txq);
+void i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq);
+int i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq);
+void i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq);
+
+uint32_t i40e_dev_rx_queue_count(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+int i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
+
+#endif /* _I40E_RXTX_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/Makefile b/src/dpdk_lib18/librte_pmd_ixgbe/Makefile
new file mode 100755
index 00000000..35880479
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/Makefile
@@ -0,0 +1,117 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_ixgbe.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+ifeq ($(CC), icc)
+#
+# CFLAGS for icc
+#
+CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259
+
+else ifeq ($(CC), clang)
+#
+# CFLAGS for clang
+#
+CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
+
+else
+#
+# CFLAGS for gcc
+#
+ifneq ($(shell test $(GCC_MAJOR_VERSION) -le 4 -a $(GCC_MINOR_VERSION) -le 3 && echo 1), 1)
+CFLAGS += -Wno-deprecated
+endif
+CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
+
+ifeq ($(shell test $(GCC_MAJOR_VERSION) -ge 4 -a $(GCC_MINOR_VERSION) -ge 6 && echo 1), 1)
+CFLAGS_ixgbe_common.o += -Wno-unused-but-set-variable
+CFLAGS_ixgbe_x550.o += -Wno-unused-but-set-variable -Wno-maybe-uninitialized
+endif
+
+ifeq ($(shell test $(GCC_MAJOR_VERSION) -le 4 -a $(GCC_MINOR_VERSION) -le 6 && echo 1), 1)
+CFLAGS_ixgbe_x550.o += -Wno-uninitialized
+CFLAGS_ixgbe_phy.o += -Wno-uninitialized
+endif
+endif
+
+#
+# Add extra flags for base driver files (also known as shared code)
+# to disable warnings in them
+#
+BASE_DRIVER_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(RTE_SDK)/lib/librte_pmd_ixgbe/ixgbe/*.c)))
+$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
+
+VPATH += $(RTE_SDK)/lib/librte_pmd_ixgbe/ixgbe
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82598.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_x540.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_x550.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_phy.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_api.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_vf.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb_82599.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb_82598.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_mbx.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_fdir.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_pf.c
+SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec.c
+
+ifeq ($(CONFIG_RTE_NIC_BYPASS),y)
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_bypass.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599_bypass.c
+endif
+
+
+# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_eal lib/librte_ether
+DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_mempool lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_net lib/librte_malloc
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/README b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/README
new file mode 100755
index 00000000..e0e5f0dc
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/README
@@ -0,0 +1,67 @@
+..
+ BSD LICENSE
+
+ Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Intel® IXGBE driver
+===================
+
+This directory contains source code of FreeBSD ixgbe driver of version
+cid-10g-shared-code.2014.09.04 released by LAD. The sub-directory of lad/
+contains the original source package.
+This driver is valid for the product(s) listed below
+
+* Intel® 10 Gigabit AF DA Dual Port Server Adapter
+* Intel® 10 Gigabit AT Server Adapter
+* Intel® 10 Gigabit AT2 Server Adapter
+* Intel® 10 Gigabit CX4 Dual Port Server Adapter
+* Intel® 10 Gigabit XF LR Server Adapter
+* Intel® 10 Gigabit XF SR Dual Port Server Adapter
+* Intel® 10 Gigabit XF SR Server Adapter
+* Intel® 82598 10 Gigabit Ethernet Controller
+* Intel® 82599 10 Gigabit Ethernet Controller
+* Intel® Ethernet Controller X540-AT2
+* Intel® Ethernet Server Adapter X520 Series
+* Intel® Ethernet Server Adapter X520-T2
+* Intel® Ethernet Controller X550-BT2
+
+Updating driver
+===============
+
+The following modifications have been made to this code to integrate it with the
+Intel® DPDK:
+
+
+ixgbe_osdep.h
+-------------
+
+The OS dependency layer has been extensively modified to support the drivers in
+the Intel® DPDK environment. It is expected that these files will not need to be
+changed on updating the driver.
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_82598.c b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_82598.c
new file mode 100755
index 00000000..c8ce893b
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_82598.c
@@ -0,0 +1,1436 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "ixgbe_type.h"
+#include "ixgbe_82598.h"
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+#ident "$Id: ixgbe_82598.c,v 1.199 2013/05/22 23:26:31 jtkirshe Exp $"
+
+#define IXGBE_82598_MAX_TX_QUEUES 32
+#define IXGBE_82598_MAX_RX_QUEUES 64
+#define IXGBE_82598_RAR_ENTRIES 16
+#define IXGBE_82598_MC_TBL_SIZE 128
+#define IXGBE_82598_VFT_TBL_SIZE 128
+#define IXGBE_82598_RX_PB_SIZE 512
+
+STATIC s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg);
+STATIC enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
+STATIC s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
+ bool autoneg_wait_to_complete);
+STATIC s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed, bool *link_up,
+ bool link_up_wait_to_complete);
+STATIC s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+STATIC s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+STATIC s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
+STATIC s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+STATIC s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
+STATIC void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
+ u32 headroom, int strategy);
+STATIC s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data);
+/**
+ * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
+ * @hw: pointer to the HW structure
+ *
+ * The defaults for 82598 should be in the range of 50us to 50ms,
+ * however the hardware default for these parts is 500us to 1ms which is less
+ * than the 10ms recommended by the pci-e spec. To address this we need to
+ * increase the value to either 10ms to 250ms for capability version 1 config,
+ * or 16ms to 55ms for version 2.
+ **/
+void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
+{
+ u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
+ u16 pcie_devctl2;
+
+ /* only take action if timeout value is defaulted to 0 */
+ if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
+ goto out;
+
+ /*
+ * if capababilities version is type 1 we can write the
+ * timeout of 10ms to 250ms through the GCR register
+ */
+ if (!(gcr & IXGBE_GCR_CAP_VER2)) {
+ gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
+ goto out;
+ }
+
+ /*
+ * for version 2 capabilities we need to write the config space
+ * directly in order to set the completion timeout value for
+ * 16ms to 55ms
+ */
+ pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
+ pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
+ IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
+out:
+ /* disable completion timeout resend */
+ gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
+ IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
+}
+
+/**
+ * ixgbe_init_ops_82598 - Inits func ptrs and MAC type
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the function pointers and assign the MAC type for 82598.
+ * Does not touch the hardware.
+ **/
+s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ DEBUGFUNC("ixgbe_init_ops_82598");
+
+ ret_val = ixgbe_init_phy_ops_generic(hw);
+ ret_val = ixgbe_init_ops_generic(hw);
+
+ /* PHY */
+ phy->ops.init = &ixgbe_init_phy_ops_82598;
+
+ /* MAC */
+ mac->ops.start_hw = &ixgbe_start_hw_82598;
+ mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598;
+ mac->ops.reset_hw = &ixgbe_reset_hw_82598;
+ mac->ops.get_media_type = &ixgbe_get_media_type_82598;
+ mac->ops.get_supported_physical_layer =
+ &ixgbe_get_supported_physical_layer_82598;
+ mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
+ mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
+ mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
+ mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82598;
+
+ /* RAR, Multicast, VLAN */
+ mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
+ mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
+ mac->ops.set_vfta = &ixgbe_set_vfta_82598;
+ mac->ops.set_vlvf = NULL;
+ mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
+
+ /* Flow Control */
+ mac->ops.fc_enable = &ixgbe_fc_enable_82598;
+
+ mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
+ mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
+ mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
+ mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE;
+ mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
+ mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
+ mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
+
+ /* SFP+ Module */
+ phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
+ phy->ops.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598;
+
+ /* Link */
+ mac->ops.check_link = &ixgbe_check_mac_link_82598;
+ mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
+ mac->ops.flap_tx_laser = NULL;
+ mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82598;
+ mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598;
+
+ /* Manageability interface */
+ mac->ops.set_fw_drv_ver = NULL;
+
+ mac->ops.get_rtrup2tc = NULL;
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
+ * @hw: pointer to hardware structure
+ *
+ * Initialize any function pointers that were not able to be
+ * set during init_shared_code because the PHY/SFP type was
+ * not known. Perform the SFP init if necessary.
+ *
+ **/
+s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ s32 ret_val = IXGBE_SUCCESS;
+ u16 list_offset, data_offset;
+
+ DEBUGFUNC("ixgbe_init_phy_ops_82598");
+
+ /* Identify the PHY */
+ phy->ops.identify(hw);
+
+ /* Overwrite the link function pointers if copper PHY */
+ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
+ mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
+ mac->ops.get_link_capabilities =
+ &ixgbe_get_copper_link_capabilities_generic;
+ }
+
+ switch (hw->phy.type) {
+ case ixgbe_phy_tn:
+ phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
+ phy->ops.check_link = &ixgbe_check_phy_link_tnx;
+ phy->ops.get_firmware_version =
+ &ixgbe_get_phy_firmware_version_tnx;
+ break;
+ case ixgbe_phy_nl:
+ phy->ops.reset = &ixgbe_reset_phy_nl;
+
+ /* Call SFP+ identify routine to get the SFP+ module type */
+ ret_val = phy->ops.identify_sfp(hw);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+ else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
+ ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ goto out;
+ }
+
+ /* Check to see if SFP+ module is supported */
+ ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
+ &list_offset,
+ &data_offset);
+ if (ret_val != IXGBE_SUCCESS) {
+ ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ goto out;
+ }
+ break;
+ default:
+ break;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware using the generic start_hw function.
+ * Disables relaxed ordering Then set pcie completion timeout
+ *
+ **/
+s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
+{
+ u32 regval;
+ u32 i;
+ s32 ret_val = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_start_hw_82598");
+
+ ret_val = ixgbe_start_hw_generic(hw);
+
+ /* Disable relaxed ordering */
+ for (i = 0; ((i < hw->mac.max_tx_queues) &&
+ (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
+ regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
+ }
+
+ for (i = 0; ((i < hw->mac.max_rx_queues) &&
+ (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+ regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
+ IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
+ }
+
+ /* set the completion timeout for interface */
+ if (ret_val == IXGBE_SUCCESS)
+ ixgbe_set_pcie_completion_timeout(hw);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_get_link_capabilities_82598 - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: boolean auto-negotiation value
+ *
+ * Determines the link capabilities by reading the AUTOC register.
+ **/
+STATIC s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ s32 status = IXGBE_SUCCESS;
+ u32 autoc = 0;
+
+ DEBUGFUNC("ixgbe_get_link_capabilities_82598");
+
+ /*
+ * Determine link capabilities based on the stored value of AUTOC,
+ * which represents EEPROM defaults. If AUTOC value has not been
+ * stored, use the current register value.
+ */
+ if (hw->mac.orig_link_settings_stored)
+ autoc = hw->mac.orig_autoc;
+ else
+ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+ switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+ case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = false;
+ break;
+
+ case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ *autoneg = false;
+ break;
+
+ case IXGBE_AUTOC_LMS_1G_AN:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = true;
+ break;
+
+ case IXGBE_AUTOC_LMS_KX4_AN:
+ case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ if (autoc & IXGBE_AUTOC_KX4_SUPP)
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (autoc & IXGBE_AUTOC_KX_SUPP)
+ *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = true;
+ break;
+
+ default:
+ status = IXGBE_ERR_LINK_SETUP;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_get_media_type_82598 - Determines media type
+ * @hw: pointer to hardware structure
+ *
+ * Returns the media type (fiber, copper, backplane)
+ **/
+STATIC enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
+{
+ enum ixgbe_media_type media_type;
+
+ DEBUGFUNC("ixgbe_get_media_type_82598");
+
+ /* Detect if there is a copper PHY attached. */
+ switch (hw->phy.type) {
+ case ixgbe_phy_cu_unknown:
+ case ixgbe_phy_tn:
+ media_type = ixgbe_media_type_copper;
+ goto out;
+ default:
+ break;
+ }
+
+ /* Media type for I82598 is based on device ID */
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82598:
+ case IXGBE_DEV_ID_82598_BX:
+ /* Default device ID is mezzanine card KX/KX4 */
+ media_type = ixgbe_media_type_backplane;
+ break;
+ case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+ case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+ case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
+ case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
+ case IXGBE_DEV_ID_82598EB_XF_LR:
+ case IXGBE_DEV_ID_82598EB_SFP_LOM:
+ media_type = ixgbe_media_type_fiber;
+ break;
+ case IXGBE_DEV_ID_82598EB_CX4:
+ case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
+ media_type = ixgbe_media_type_cx4;
+ break;
+ case IXGBE_DEV_ID_82598AT:
+ case IXGBE_DEV_ID_82598AT2:
+ media_type = ixgbe_media_type_copper;
+ break;
+ default:
+ media_type = ixgbe_media_type_unknown;
+ break;
+ }
+out:
+ return media_type;
+}
+
+/**
+ * ixgbe_fc_enable_82598 - Enable flow control
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to the current settings.
+ **/
+s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+ u32 fctrl_reg;
+ u32 rmcs_reg;
+ u32 reg;
+ u32 fcrtl, fcrth;
+ u32 link_speed = 0;
+ int i;
+ bool link_up;
+
+ DEBUGFUNC("ixgbe_fc_enable_82598");
+
+ /* Validate the water mark configuration */
+ if (!hw->fc.pause_time) {
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ /* Low water mark of zero causes XOFF floods */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ if (!hw->fc.low_water[i] ||
+ hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+ DEBUGOUT("Invalid water mark configuration\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+ }
+ }
+
+ /*
+ * On 82598 having Rx FC on causes resets while doing 1G
+ * so if it's on turn it off once we know link_speed. For
+ * more details see 82598 Specification update.
+ */
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_full:
+ hw->fc.requested_mode = ixgbe_fc_tx_pause;
+ break;
+ case ixgbe_fc_rx_pause:
+ hw->fc.requested_mode = ixgbe_fc_none;
+ break;
+ default:
+ /* no change */
+ break;
+ }
+ }
+
+ /* Negotiate the fc mode to use */
+ ixgbe_fc_autoneg(hw);
+
+ /* Disable any previous flow control settings */
+ fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
+
+ rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
+ rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
+
+ /*
+ * The possible values of fc.current_mode are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but
+ * we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: Invalid.
+ */
+ switch (hw->fc.current_mode) {
+ case ixgbe_fc_none:
+ /*
+ * Flow control is disabled by software override or autoneg.
+ * The code below will actually disable it in the HW.
+ */
+ break;
+ case ixgbe_fc_rx_pause:
+ /*
+ * Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ fctrl_reg |= IXGBE_FCTRL_RFCE;
+ break;
+ case ixgbe_fc_tx_pause:
+ /*
+ * Tx Flow control is enabled, and Rx Flow control is
+ * disabled by software override.
+ */
+ rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
+ break;
+ case ixgbe_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by SW override. */
+ fctrl_reg |= IXGBE_FCTRL_RFCE;
+ rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ ret_val = IXGBE_ERR_CONFIG;
+ goto out;
+ break;
+ }
+
+ /* Set 802.3x based flow control settings. */
+ fctrl_reg |= IXGBE_FCTRL_DPF;
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
+ IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
+
+ /* Set up and enable Rx high/low water mark thresholds, enable XON. */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
+ fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
+ }
+
+ }
+
+ /* Configure pause time (2 TCs per register) */
+ reg = hw->fc.pause_time * 0x00010001;
+ for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
+ IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
+
+ /* Configure flow control refresh threshold value */
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_start_mac_link_82598 - Configures MAC link settings
+ * @hw: pointer to hardware structure
+ *
+ * Configures link settings based on values in the ixgbe_hw struct.
+ * Restarts the link. Performs autonegotiation if needed.
+ **/
+STATIC s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
+ bool autoneg_wait_to_complete)
+{
+ u32 autoc_reg;
+ u32 links_reg;
+ u32 i;
+ s32 status = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_start_mac_link_82598");
+
+ /* Restart link */
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+
+ /* Only poll for autoneg to complete if specified to do so */
+ if (autoneg_wait_to_complete) {
+ if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+ IXGBE_AUTOC_LMS_KX4_AN ||
+ (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+ IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
+ links_reg = 0; /* Just in case Autoneg time = 0 */
+ for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if (links_reg & IXGBE_LINKS_KX_AN_COMP)
+ break;
+ msec_delay(100);
+ }
+ if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
+ status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+ DEBUGOUT("Autonegotiation did not complete.\n");
+ }
+ }
+ }
+
+ /* Add delay to filter out noises during initial link setup */
+ msec_delay(50);
+
+ return status;
+}
+
+/**
+ * ixgbe_validate_link_ready - Function looks for phy link
+ * @hw: pointer to hardware structure
+ *
+ * Function indicates success when phy link is available. If phy is not ready
+ * within 5 seconds of MAC indicating link, the function returns error.
+ **/
+STATIC s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
+{
+ u32 timeout;
+ u16 an_reg;
+
+ if (hw->device_id != IXGBE_DEV_ID_82598AT2)
+ return IXGBE_SUCCESS;
+
+ for (timeout = 0;
+ timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
+
+ if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
+ (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
+ break;
+
+ msec_delay(100);
+ }
+
+ if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
+ DEBUGOUT("Link was indicated but link is down\n");
+ return IXGBE_ERR_LINK_SETUP;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_check_mac_link_82598 - Get link/speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true is link is up, false otherwise
+ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Reads the links register to determine if link is up and the current speed
+ **/
+STATIC s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed, bool *link_up,
+ bool link_up_wait_to_complete)
+{
+ u32 links_reg;
+ u32 i;
+ u16 link_reg, adapt_comp_reg;
+
+ DEBUGFUNC("ixgbe_check_mac_link_82598");
+
+ /*
+ * SERDES PHY requires us to read link status from undocumented
+ * register 0xC79F. Bit 0 set indicates link is up/ready; clear
+ * indicates link down. OxC00C is read to check that the XAUI lanes
+ * are active. Bit 0 clear indicates active; set indicates inactive.
+ */
+ if (hw->phy.type == ixgbe_phy_nl) {
+ hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
+ hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
+ hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
+ &adapt_comp_reg);
+ if (link_up_wait_to_complete) {
+ for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
+ if ((link_reg & 1) &&
+ ((adapt_comp_reg & 1) == 0)) {
+ *link_up = true;
+ break;
+ } else {
+ *link_up = false;
+ }
+ msec_delay(100);
+ hw->phy.ops.read_reg(hw, 0xC79F,
+ IXGBE_TWINAX_DEV,
+ &link_reg);
+ hw->phy.ops.read_reg(hw, 0xC00C,
+ IXGBE_TWINAX_DEV,
+ &adapt_comp_reg);
+ }
+ } else {
+ if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
+ *link_up = true;
+ else
+ *link_up = false;
+ }
+
+ if (*link_up == false)
+ goto out;
+ }
+
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if (link_up_wait_to_complete) {
+ for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
+ if (links_reg & IXGBE_LINKS_UP) {
+ *link_up = true;
+ break;
+ } else {
+ *link_up = false;
+ }
+ msec_delay(100);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ }
+ } else {
+ if (links_reg & IXGBE_LINKS_UP)
+ *link_up = true;
+ else
+ *link_up = false;
+ }
+
+ if (links_reg & IXGBE_LINKS_SPEED)
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+ if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) &&
+ (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
+ *link_up = false;
+
+out:
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_setup_mac_link_82598 - Set MAC link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Set the link speed in the AUTOC register and restarts link.
+ **/
+STATIC s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ bool autoneg = false;
+ s32 status = IXGBE_SUCCESS;
+ ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
+ u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 autoc = curr_autoc;
+ u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
+
+ DEBUGFUNC("ixgbe_setup_mac_link_82598");
+
+ /* Check to see if speed passed in is supported. */
+ ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
+ speed &= link_capabilities;
+
+ if (speed == IXGBE_LINK_SPEED_UNKNOWN)
+ status = IXGBE_ERR_LINK_SETUP;
+
+ /* Set KX4/KX support according to speed requested */
+ else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
+ link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
+ autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ autoc |= IXGBE_AUTOC_KX4_SUPP;
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ autoc |= IXGBE_AUTOC_KX_SUPP;
+ if (autoc != curr_autoc)
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+ }
+
+ if (status == IXGBE_SUCCESS) {
+ /*
+ * Setup and restart the link based on the new values in
+ * ixgbe_hw This will write the AUTOC register based on the new
+ * stored values
+ */
+ status = ixgbe_start_mac_link_82598(hw,
+ autoneg_wait_to_complete);
+ }
+
+ return status;
+}
+
+
+/**
+ * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true if waiting is needed to complete
+ *
+ * Sets the link speed in the AUTOC register in the MAC and restarts link.
+ **/
+STATIC s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ s32 status;
+
+ DEBUGFUNC("ixgbe_setup_copper_link_82598");
+
+ /* Setup the PHY according to input speed */
+ status = hw->phy.ops.setup_link_speed(hw, speed,
+ autoneg_wait_to_complete);
+ /* Set up MAC */
+ ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
+
+ return status;
+}
+
+/**
+ * ixgbe_reset_hw_82598 - Performs hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks and
+ * clears all interrupts, performing a PHY reset, and performing a link (MAC)
+ * reset.
+ **/
+STATIC s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+ s32 phy_status = IXGBE_SUCCESS;
+ u32 ctrl;
+ u32 gheccr;
+ u32 i;
+ u32 autoc;
+ u8 analog_val;
+
+ DEBUGFUNC("ixgbe_reset_hw_82598");
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ status = hw->mac.ops.stop_adapter(hw);
+ if (status != IXGBE_SUCCESS)
+ goto reset_hw_out;
+
+ /*
+ * Power up the Atlas Tx lanes if they are currently powered down.
+ * Atlas Tx lanes are powered down for MAC loopback tests, but
+ * they are not automatically restored on reset.
+ */
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
+ if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
+ /* Enable Tx Atlas so packets can be transmitted again */
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
+ &analog_val);
+ analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
+ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
+ analog_val);
+
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
+ &analog_val);
+ analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
+ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
+ analog_val);
+
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
+ &analog_val);
+ analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
+ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
+ analog_val);
+
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
+ &analog_val);
+ analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
+ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
+ analog_val);
+ }
+
+ /* Reset PHY */
+ if (hw->phy.reset_disable == false) {
+ /* PHY ops must be identified and initialized prior to reset */
+
+ /* Init PHY and function pointers, perform SFP setup */
+ phy_status = hw->phy.ops.init(hw);
+ if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ goto reset_hw_out;
+ if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto mac_reset_top;
+
+ hw->phy.ops.reset(hw);
+ }
+
+mac_reset_top:
+ /*
+ * Issue global reset to the MAC. This needs to be a SW reset.
+ * If link reset is used, it might reset the MAC when mng is using it
+ */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Poll for reset bit to self-clear indicating reset is complete */
+ for (i = 0; i < 10; i++) {
+ usec_delay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST))
+ break;
+ }
+ if (ctrl & IXGBE_CTRL_RST) {
+ status = IXGBE_ERR_RESET_FAILED;
+ DEBUGOUT("Reset polling failed to complete.\n");
+ }
+
+ msec_delay(50);
+
+ /*
+ * Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to allow time
+ * for any pending HW events to complete.
+ */
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ goto mac_reset_top;
+ }
+
+ gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
+ gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
+ IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
+
+ /*
+ * Store the original AUTOC value if it has not been
+ * stored off yet. Otherwise restore the stored original
+ * AUTOC value since the reset operation sets back to deaults.
+ */
+ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ if (hw->mac.orig_link_settings_stored == false) {
+ hw->mac.orig_autoc = autoc;
+ hw->mac.orig_link_settings_stored = true;
+ } else if (autoc != hw->mac.orig_autoc) {
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
+ }
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /*
+ * Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table
+ */
+ hw->mac.ops.init_rx_addrs(hw);
+
+reset_hw_out:
+ if (phy_status != IXGBE_SUCCESS)
+ status = phy_status;
+
+ return status;
+}
+
+/**
+ * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
+ * @hw: pointer to hardware struct
+ * @rar: receive address register index to associate with a VMDq index
+ * @vmdq: VMDq set index
+ **/
+s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ u32 rar_high;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ DEBUGFUNC("ixgbe_set_vmdq_82598");
+
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
+ DEBUGOUT1("RAR index %d is out of range.\n", rar);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+ rar_high &= ~IXGBE_RAH_VIND_MASK;
+ rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
+ * @hw: pointer to hardware struct
+ * @rar: receive address register index to associate with a VMDq index
+ * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
+ **/
+STATIC s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ u32 rar_high;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ UNREFERENCED_1PARAMETER(vmdq);
+
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
+ DEBUGOUT1("RAR index %d is out of range.\n", rar);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+ if (rar_high & IXGBE_RAH_VIND_MASK) {
+ rar_high &= ~IXGBE_RAH_VIND_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_set_vfta_82598 - Set VLAN filter table
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vind: VMDq output index that maps queue to VLAN id in VFTA
+ * @vlan_on: boolean flag to turn on/off VLAN in VFTA
+ *
+ * Turn on/off specified VLAN in the VLAN filter table.
+ **/
+s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on)
+{
+ u32 regindex;
+ u32 bitindex;
+ u32 bits;
+ u32 vftabyte;
+
+ DEBUGFUNC("ixgbe_set_vfta_82598");
+
+ if (vlan > 4095)
+ return IXGBE_ERR_PARAM;
+
+ /* Determine 32-bit word position in array */
+ regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
+
+ /* Determine the location of the (VMD) queue index */
+ vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
+ bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
+
+ /* Set the nibble for VMD queue index */
+ bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
+ bits &= (~(0x0F << bitindex));
+ bits |= (vind << bitindex);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
+
+ /* Determine the location of the bit for this VLAN id */
+ bitindex = vlan & 0x1F; /* lower five bits */
+
+ bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
+ if (vlan_on)
+ /* Turn on this VLAN id */
+ bits |= (1 << bitindex);
+ else
+ /* Turn off this VLAN id */
+ bits &= ~(1 << bitindex);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_clear_vfta_82598 - Clear VLAN filter table
+ * @hw: pointer to hardware structure
+ *
+ * Clears the VLAN filer table, and the VMDq index associated with the filter
+ **/
+STATIC s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
+{
+ u32 offset;
+ u32 vlanbyte;
+
+ DEBUGFUNC("ixgbe_clear_vfta_82598");
+
+ for (offset = 0; offset < hw->mac.vft_size; offset++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
+
+ for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
+ for (offset = 0; offset < hw->mac.vft_size; offset++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
+ 0);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
+ * @hw: pointer to hardware structure
+ * @reg: analog register to read
+ * @val: read value
+ *
+ * Performs read operation to Atlas analog register specified.
+ **/
+s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
+{
+ u32 atlas_ctl;
+
+ DEBUGFUNC("ixgbe_read_analog_reg8_82598");
+
+ IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
+ IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(10);
+ atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
+ *val = (u8)atlas_ctl;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
+ * @hw: pointer to hardware structure
+ * @reg: atlas register to write
+ * @val: value to write
+ *
+ * Performs write operation to Atlas analog register specified.
+ **/
+s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
+{
+ u32 atlas_ctl;
+
+ DEBUGFUNC("ixgbe_write_analog_reg8_82598");
+
+ atlas_ctl = (reg << 8) | val;
+ IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(10);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
+ * @hw: pointer to hardware structure
+ * @dev_addr: address to read from
+ * @byte_offset: byte offset to read from dev_addr
+ * @eeprom_data: value read
+ *
+ * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+STATIC s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
+ u8 byte_offset, u8 *eeprom_data)
+{
+ s32 status = IXGBE_SUCCESS;
+ u16 sfp_addr = 0;
+ u16 sfp_data = 0;
+ u16 sfp_stat = 0;
+ u16 gssr;
+ u32 i;
+
+ DEBUGFUNC("ixgbe_read_i2c_phy_82598");
+
+ if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+ gssr = IXGBE_GSSR_PHY1_SM;
+ else
+ gssr = IXGBE_GSSR_PHY0_SM;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS)
+ return IXGBE_ERR_SWFW_SYNC;
+
+ if (hw->phy.type == ixgbe_phy_nl) {
+ /*
+ * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
+ * 0xC30D. These registers are used to talk to the SFP+
+ * module's EEPROM through the SDA/SCL (I2C) interface.
+ */
+ sfp_addr = (dev_addr << 8) + byte_offset;
+ sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
+ hw->phy.ops.write_reg_mdi(hw,
+ IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ sfp_addr);
+
+ /* Poll status */
+ for (i = 0; i < 100; i++) {
+ hw->phy.ops.read_reg_mdi(hw,
+ IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &sfp_stat);
+ sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
+ if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
+ break;
+ msec_delay(10);
+ }
+
+ if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
+ DEBUGOUT("EEPROM read did not pass.\n");
+ status = IXGBE_ERR_SFP_NOT_PRESENT;
+ goto out;
+ }
+
+ /* Read data */
+ hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
+
+ *eeprom_data = (u8)(sfp_data >> 8);
+ } else {
+ status = IXGBE_ERR_PHY;
+ }
+
+out:
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ return status;
+}
+
+/**
+ * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to read
+ * @eeprom_data: value read
+ *
+ * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data)
+{
+ return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
+ byte_offset, eeprom_data);
+}
+
+/**
+ * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset at address 0xA2
+ * @eeprom_data: value read
+ *
+ * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
+ **/
+STATIC s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data)
+{
+ return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
+ byte_offset, sff8472_data);
+}
+
+/**
+ * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ **/
+u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
+{
+ u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
+ u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
+ u16 ext_ability = 0;
+
+ DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
+
+ hw->phy.ops.identify(hw);
+
+ /* Copper PHY must be checked before AUTOC LMS to determine correct
+ * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
+ switch (hw->phy.type) {
+ case ixgbe_phy_tn:
+ case ixgbe_phy_cu_unknown:
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
+ if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+ if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+ goto out;
+ default:
+ break;
+ }
+
+ switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+ case IXGBE_AUTOC_LMS_1G_AN:
+ case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+ if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ else
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
+ break;
+ case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+ if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
+ else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+ else /* XAUI */
+ physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ break;
+ case IXGBE_AUTOC_LMS_KX4_AN:
+ case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
+ if (autoc & IXGBE_AUTOC_KX_SUPP)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ if (autoc & IXGBE_AUTOC_KX4_SUPP)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+ break;
+ default:
+ break;
+ }
+
+ if (hw->phy.type == ixgbe_phy_nl) {
+ hw->phy.ops.identify_sfp(hw);
+
+ switch (hw->phy.sfp_type) {
+ case ixgbe_sfp_type_da_cu:
+ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
+ break;
+ case ixgbe_sfp_type_sr:
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+ break;
+ case ixgbe_sfp_type_lr:
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ break;
+ default:
+ physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ break;
+ }
+ }
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
+ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
+ break;
+ case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+ case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+ case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+ break;
+ case IXGBE_DEV_ID_82598EB_XF_LR:
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ break;
+ default:
+ break;
+ }
+
+out:
+ return physical_layer;
+}
+
+/**
+ * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
+ * port devices.
+ * @hw: pointer to the HW structure
+ *
+ * Calls common function and corrects issue with some single port devices
+ * that enable LAN1 but not LAN0.
+ **/
+void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
+{
+ struct ixgbe_bus_info *bus = &hw->bus;
+ u16 pci_gen = 0;
+ u16 pci_ctrl2 = 0;
+
+ DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
+
+ ixgbe_set_lan_id_multi_port_pcie(hw);
+
+ /* check if LAN0 is disabled */
+ hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
+ if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
+
+ hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
+
+ /* if LAN0 is completely disabled force function to 0 */
+ if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
+ !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
+ !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
+
+ bus->func = 0;
+ }
+ }
+}
+
+/**
+ * ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
+ * @hw: pointer to hardware structure
+ *
+ **/
+void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
+{
+ u32 regval;
+ u32 i;
+
+ DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
+
+ /* Enable relaxed ordering */
+ for (i = 0; ((i < hw->mac.max_tx_queues) &&
+ (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
+ regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
+ }
+
+ for (i = 0; ((i < hw->mac.max_rx_queues) &&
+ (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+ regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
+ IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
+ }
+
+}
+
+/**
+ * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
+ * @hw: pointer to hardware structure
+ * @num_pb: number of packet buffers to allocate
+ * @headroom: reserve n KB of headroom
+ * @strategy: packet buffer allocation strategy
+ **/
+STATIC void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
+ u32 headroom, int strategy)
+{
+ u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
+ u8 i = 0;
+ UNREFERENCED_1PARAMETER(headroom);
+
+ if (!num_pb)
+ return;
+
+ /* Setup Rx packet buffer sizes */
+ switch (strategy) {
+ case PBA_STRATEGY_WEIGHTED:
+ /* Setup the first four at 80KB */
+ rxpktsize = IXGBE_RXPBSIZE_80KB;
+ for (; i < 4; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ /* Setup the last four at 48KB...don't re-init i */
+ rxpktsize = IXGBE_RXPBSIZE_48KB;
+ /* Fall Through */
+ case PBA_STRATEGY_EQUAL:
+ default:
+ /* Divide the remaining Rx packet buffer evenly among the TCs */
+ for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ break;
+ }
+
+ /* Setup Tx packet buffer sizes */
+ for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
+}
+
+/**
+ * ixgbe_enable_rx_dma_82598 - Enable the Rx DMA unit
+ * @hw: pointer to hardware structure
+ * @regval: register value to write to RXCTRL
+ *
+ * Enables the Rx DMA unit
+ **/
+s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval)
+{
+ DEBUGFUNC("ixgbe_enable_rx_dma_82598");
+
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
+
+ return IXGBE_SUCCESS;
+}
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_82598.h b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_82598.h
new file mode 100755
index 00000000..58ce4d12
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_82598.h
@@ -0,0 +1,53 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_82598_H_
+#define _IXGBE_82598_H_
+#ident "$Id: ixgbe_82598.h,v 1.3 2012/03/27 22:16:51 jtkirshe Exp $"
+
+u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw);
+s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw);
+void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw);
+s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
+s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
+s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
+s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data);
+u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
+s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
+void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
+void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
+s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval);
+#endif /* _IXGBE_82598_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_82599.c b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_82599.c
new file mode 100755
index 00000000..bd65a7f1
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_82599.c
@@ -0,0 +1,2699 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "ixgbe_type.h"
+#include "ixgbe_82599.h"
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+#ident "$Id: ixgbe_82599.c,v 1.334 2013/12/04 22:34:00 jtkirshe Exp $"
+
+#define IXGBE_82599_MAX_TX_QUEUES 128
+#define IXGBE_82599_MAX_RX_QUEUES 128
+#define IXGBE_82599_RAR_ENTRIES 128
+#define IXGBE_82599_MC_TBL_SIZE 128
+#define IXGBE_82599_VFT_TBL_SIZE 128
+#define IXGBE_82599_RX_PB_SIZE 512
+
+STATIC s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+STATIC s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
+STATIC s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
+ u16 offset, u16 *data);
+STATIC s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+STATIC s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+STATIC s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data);
+
+void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+
+ DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
+
+ /*
+ * enable the laser control functions for SFP+ fiber
+ * and MNG not enabled
+ */
+ if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+ !ixgbe_mng_enabled(hw)) {
+ mac->ops.disable_tx_laser =
+ &ixgbe_disable_tx_laser_multispeed_fiber;
+ mac->ops.enable_tx_laser =
+ &ixgbe_enable_tx_laser_multispeed_fiber;
+ mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
+
+ } else {
+ mac->ops.disable_tx_laser = NULL;
+ mac->ops.enable_tx_laser = NULL;
+ mac->ops.flap_tx_laser = NULL;
+ }
+
+ if (hw->phy.multispeed_fiber) {
+ /* Set up dual speed SFP+ support */
+ mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
+ } else {
+ if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
+ (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
+ hw->phy.smart_speed == ixgbe_smart_speed_on) &&
+ !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
+ } else {
+ mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
+ }
+ }
+}
+
+/**
+ * ixgbe_init_phy_ops_82599 - PHY/SFP specific init
+ * @hw: pointer to hardware structure
+ *
+ * Initialize any function pointers that were not able to be
+ * set during init_shared_code because the PHY/SFP type was
+ * not known. Perform the SFP init if necessary.
+ *
+ **/
+s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ s32 ret_val = IXGBE_SUCCESS;
+ u32 esdp;
+
+ DEBUGFUNC("ixgbe_init_phy_ops_82599");
+
+ if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
+ /* Store flag indicating I2C bus access control unit. */
+ hw->phy.qsfp_shared_i2c_bus = TRUE;
+
+ /* Initialize access to QSFP+ I2C bus */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp |= IXGBE_ESDP_SDP0_DIR;
+ esdp &= ~IXGBE_ESDP_SDP1_DIR;
+ esdp &= ~IXGBE_ESDP_SDP0;
+ esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
+ esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+
+ phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_82599;
+ phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_82599;
+ }
+ /* Identify the PHY or SFP module */
+ ret_val = phy->ops.identify(hw);
+ if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ goto init_phy_ops_out;
+
+ /* Setup function pointers based on detected SFP module and speeds */
+ ixgbe_init_mac_link_ops_82599(hw);
+ if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
+ hw->phy.ops.reset = NULL;
+
+ /* If copper media, overwrite with copper function pointers */
+ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
+ mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
+ mac->ops.get_link_capabilities =
+ &ixgbe_get_copper_link_capabilities_generic;
+ }
+
+ /* Set necessary function pointers based on PHY type */
+ switch (hw->phy.type) {
+ case ixgbe_phy_tn:
+ phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
+ phy->ops.check_link = &ixgbe_check_phy_link_tnx;
+ phy->ops.get_firmware_version =
+ &ixgbe_get_phy_firmware_version_tnx;
+ break;
+ default:
+ break;
+ }
+init_phy_ops_out:
+ return ret_val;
+}
+
+s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+ u16 list_offset, data_offset, data_value;
+
+ DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
+
+ if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
+ ixgbe_init_mac_link_ops_82599(hw);
+
+ hw->phy.ops.reset = NULL;
+
+ ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
+ &data_offset);
+ if (ret_val != IXGBE_SUCCESS)
+ goto setup_sfp_out;
+
+ /* PHY config will finish before releasing the semaphore */
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val != IXGBE_SUCCESS) {
+ ret_val = IXGBE_ERR_SWFW_SYNC;
+ goto setup_sfp_out;
+ }
+
+ if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
+ goto setup_sfp_err;
+ while (data_value != 0xffff) {
+ IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
+ IXGBE_WRITE_FLUSH(hw);
+ if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
+ goto setup_sfp_err;
+ }
+
+ /* Release the semaphore */
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+ /* Delay obtaining semaphore again to allow FW access
+ * prot_autoc_write uses the semaphore too.
+ */
+ msec_delay(hw->eeprom.semaphore_delay);
+
+ /* Restart DSP and set SFI mode */
+ ret_val = hw->mac.ops.prot_autoc_write(hw,
+ hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
+ false);
+
+ if (ret_val) {
+ DEBUGOUT("sfp module setup not complete\n");
+ ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
+ goto setup_sfp_out;
+ }
+
+ }
+
+setup_sfp_out:
+ return ret_val;
+
+setup_sfp_err:
+ /* Release the semaphore */
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+ /* Delay obtaining semaphore again to allow FW access */
+ msec_delay(hw->eeprom.semaphore_delay);
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "eeprom read at offset %d failed", data_offset);
+ return IXGBE_ERR_PHY;
+}
+
+/**
+ * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read
+ * @hw: pointer to hardware structure
+ * @locked: Return the if we locked for this read.
+ * @reg_val: Value we read from AUTOC
+ *
+ * For this part (82599) we need to wrap read-modify-writes with a possible
+ * FW/SW lock. It is assumed this lock will be freed with the next
+ * prot_autoc_write_82599().
+ */
+s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
+{
+ s32 ret_val;
+
+ *locked = false;
+ /* If LESM is on then we need to hold the SW/FW semaphore. */
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val != IXGBE_SUCCESS)
+ return IXGBE_ERR_SWFW_SYNC;
+
+ *locked = true;
+ }
+
+ *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
+ * @hw: pointer to hardware structure
+ * @reg_val: value to write to AUTOC
+ * @locked: bool to indicate whether the SW/FW lock was already taken by
+ * previous proc_autoc_read_82599.
+ *
+ * This part (82599) may need to hold the SW/FW lock around all writes to
+ * AUTOC. Likewise after a write we need to do a pipeline reset.
+ */
+s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ goto out;
+
+ /* We only need to get the lock if:
+ * - We didn't do it already (in the read part of a read-modify-write)
+ * - LESM is enabled.
+ */
+ if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val != IXGBE_SUCCESS)
+ return IXGBE_ERR_SWFW_SYNC;
+
+ locked = true;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+ ret_val = ixgbe_reset_pipeline_82599(hw);
+
+out:
+ /* Free the SW/FW semaphore as we either grabbed it here or
+ * already had it when this function was called.
+ */
+ if (locked)
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_ops_82599 - Inits func ptrs and MAC type
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the function pointers and assign the MAC type for 82599.
+ * Does not touch the hardware.
+ **/
+
+s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ s32 ret_val;
+
+ DEBUGFUNC("ixgbe_init_ops_82599");
+
+ ixgbe_init_phy_ops_generic(hw);
+ ret_val = ixgbe_init_ops_generic(hw);
+
+ /* PHY */
+ phy->ops.identify = &ixgbe_identify_phy_82599;
+ phy->ops.init = &ixgbe_init_phy_ops_82599;
+
+ /* MAC */
+ mac->ops.reset_hw = &ixgbe_reset_hw_82599;
+ mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2;
+ mac->ops.get_media_type = &ixgbe_get_media_type_82599;
+ mac->ops.get_supported_physical_layer =
+ &ixgbe_get_supported_physical_layer_82599;
+ mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
+ mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
+ mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
+ mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
+ mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
+ mac->ops.start_hw = &ixgbe_start_hw_82599;
+ mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
+ mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
+ mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
+ mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
+ mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
+ mac->ops.prot_autoc_read = &prot_autoc_read_82599;
+ mac->ops.prot_autoc_write = &prot_autoc_write_82599;
+
+ /* RAR, Multicast, VLAN */
+ mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
+ mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
+ mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
+ mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
+ mac->rar_highwater = 1;
+ mac->ops.set_vfta = &ixgbe_set_vfta_generic;
+ mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
+ mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
+ mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
+ mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
+ mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
+ mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
+
+ /* Link */
+ mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
+ mac->ops.check_link = &ixgbe_check_mac_link_generic;
+ mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
+ ixgbe_init_mac_link_ops_82599(hw);
+
+ mac->mcft_size = IXGBE_82599_MC_TBL_SIZE;
+ mac->vft_size = IXGBE_82599_VFT_TBL_SIZE;
+ mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
+ mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE;
+ mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
+ mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
+ mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
+
+ mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
+ IXGBE_FWSM_MODE_MASK) ? true : false;
+
+ hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
+
+ /* EEPROM */
+ eeprom->ops.read = &ixgbe_read_eeprom_82599;
+ eeprom->ops.read_buffer = &ixgbe_read_eeprom_buffer_82599;
+
+ /* Manageability interface */
+ mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
+
+ mac->ops.get_thermal_sensor_data =
+ &ixgbe_get_thermal_sensor_data_generic;
+ mac->ops.init_thermal_sensor_thresh =
+ &ixgbe_init_thermal_sensor_thresh_generic;
+
+ mac->ops.get_rtrup2tc = &ixgbe_dcb_get_rtrup2tc_generic;
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_get_link_capabilities_82599 - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: true when autoneg or autotry is enabled
+ *
+ * Determines the link capabilities by reading the AUTOC register.
+ **/
+s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ s32 status = IXGBE_SUCCESS;
+ u32 autoc = 0;
+
+ DEBUGFUNC("ixgbe_get_link_capabilities_82599");
+
+
+ /* Check if 1G SFP module. */
+ if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = true;
+ goto out;
+ }
+
+ /*
+ * Determine link capabilities based on the stored value of AUTOC,
+ * which represents EEPROM defaults. If AUTOC value has not
+ * been stored, use the current register values.
+ */
+ if (hw->mac.orig_link_settings_stored)
+ autoc = hw->mac.orig_autoc;
+ else
+ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+ switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+ case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = false;
+ break;
+
+ case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ *autoneg = false;
+ break;
+
+ case IXGBE_AUTOC_LMS_1G_AN:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = true;
+ break;
+
+ case IXGBE_AUTOC_LMS_10G_SERIAL:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ *autoneg = false;
+ break;
+
+ case IXGBE_AUTOC_LMS_KX4_KX_KR:
+ case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ if (autoc & IXGBE_AUTOC_KR_SUPP)
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (autoc & IXGBE_AUTOC_KX4_SUPP)
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (autoc & IXGBE_AUTOC_KX_SUPP)
+ *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = true;
+ break;
+
+ case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ if (autoc & IXGBE_AUTOC_KR_SUPP)
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (autoc & IXGBE_AUTOC_KX4_SUPP)
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (autoc & IXGBE_AUTOC_KX_SUPP)
+ *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = true;
+ break;
+
+ case IXGBE_AUTOC_LMS_SGMII_1G_100M:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
+ *autoneg = false;
+ break;
+
+ default:
+ status = IXGBE_ERR_LINK_SETUP;
+ goto out;
+ break;
+ }
+
+ if (hw->phy.multispeed_fiber) {
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+
+ /* QSFP must not enable full auto-negotiation
+ * Limited autoneg is enabled at 1G
+ */
+ if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp)
+ *autoneg = false;
+ else
+ *autoneg = true;
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_get_media_type_82599 - Get media type
+ * @hw: pointer to hardware structure
+ *
+ * Returns the media type (fiber, copper, backplane)
+ **/
+enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
+{
+ enum ixgbe_media_type media_type;
+
+ DEBUGFUNC("ixgbe_get_media_type_82599");
+
+ /* Detect if there is a copper PHY attached. */
+ switch (hw->phy.type) {
+ case ixgbe_phy_cu_unknown:
+ case ixgbe_phy_tn:
+ media_type = ixgbe_media_type_copper;
+ goto out;
+ default:
+ break;
+ }
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82599_KX4:
+ case IXGBE_DEV_ID_82599_KX4_MEZZ:
+ case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+ case IXGBE_DEV_ID_82599_KR:
+ case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
+ case IXGBE_DEV_ID_82599_XAUI_LOM:
+ /* Default device ID is mezzanine card KX/KX4 */
+ media_type = ixgbe_media_type_backplane;
+ break;
+ case IXGBE_DEV_ID_82599_SFP:
+ case IXGBE_DEV_ID_82599_SFP_FCOE:
+ case IXGBE_DEV_ID_82599_SFP_EM:
+ case IXGBE_DEV_ID_82599_SFP_SF2:
+ case IXGBE_DEV_ID_82599_SFP_SF_QP:
+ case IXGBE_DEV_ID_82599EN_SFP:
+ media_type = ixgbe_media_type_fiber;
+ break;
+ case IXGBE_DEV_ID_82599_CX4:
+ media_type = ixgbe_media_type_cx4;
+ break;
+ case IXGBE_DEV_ID_82599_T3_LOM:
+ media_type = ixgbe_media_type_copper;
+ break;
+ case IXGBE_DEV_ID_82599_LS:
+ media_type = ixgbe_media_type_fiber_lco;
+ break;
+ case IXGBE_DEV_ID_82599_QSFP_SF_QP:
+ media_type = ixgbe_media_type_fiber_qsfp;
+ break;
+ default:
+ media_type = ixgbe_media_type_unknown;
+ break;
+ }
+out:
+ return media_type;
+}
+
+/**
+ * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
+ * @hw: pointer to hardware structure
+ *
+ * Disables link during D3 power down sequence.
+ *
+ **/
+void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
+{
+ u32 autoc2_reg, fwsm;
+ u16 ee_ctrl_2 = 0;
+
+ DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599");
+ ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
+
+ /* Check to see if MNG FW could be enabled */
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+
+ if (((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) &&
+ !hw->wol_enabled &&
+ ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
+ autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
+ }
+}
+
+/**
+ * ixgbe_start_mac_link_82599 - Setup MAC link settings
+ * @hw: pointer to hardware structure
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Configures link settings based on values in the ixgbe_hw struct.
+ * Restarts the link. Performs autonegotiation if needed.
+ **/
+s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+ bool autoneg_wait_to_complete)
+{
+ u32 autoc_reg;
+ u32 links_reg;
+ u32 i;
+ s32 status = IXGBE_SUCCESS;
+ bool got_lock = false;
+
+ DEBUGFUNC("ixgbe_start_mac_link_82599");
+
+
+ /* reset_pipeline requires us to hold this lock as it writes to
+ * AUTOC.
+ */
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ status = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ got_lock = true;
+ }
+
+ /* Restart link */
+ ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock)
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+
+ /* Only poll for autoneg to complete if specified to do so */
+ if (autoneg_wait_to_complete) {
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+ IXGBE_AUTOC_LMS_KX4_KX_KR ||
+ (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+ IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
+ (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+ IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
+ links_reg = 0; /* Just in case Autoneg time = 0 */
+ for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if (links_reg & IXGBE_LINKS_KX_AN_COMP)
+ break;
+ msec_delay(100);
+ }
+ if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
+ status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+ DEBUGOUT("Autoneg did not complete.\n");
+ }
+ }
+ }
+
+ /* Add delay to filter out noises during initial link setup */
+ msec_delay(50);
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * The base drivers may require better control over SFP+ module
+ * PHY states. This includes selectively shutting down the Tx
+ * laser on the PHY, effectively halting physical link.
+ **/
+void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+ u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ return;
+
+ /* Disable Tx laser; allow 100us to go dark per spec */
+ esdp_reg |= IXGBE_ESDP_SDP3;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(100);
+}
+
+/**
+ * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * The base drivers may require better control over SFP+ module
+ * PHY states. This includes selectively turning on the Tx
+ * laser on the PHY, effectively starting physical link.
+ **/
+void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+ u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+ /* Enable Tx laser; allow 100ms to light up */
+ esdp_reg &= ~IXGBE_ESDP_SDP3;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ msec_delay(100);
+}
+
+/**
+ * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * When the driver changes the link speeds that it can support,
+ * it sets autotry_restart to true to indicate that we need to
+ * initiate a new autotry session with the link partner. To do
+ * so, we set the speed then disable and re-enable the Tx laser, to
+ * alert the link partner that it also needs to restart autotry on its
+ * end. This is consistent with true clause 37 autoneg, which also
+ * involves a loss of signal.
+ **/
+void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+ DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
+
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ return;
+
+ if (hw->mac.autotry_restart) {
+ ixgbe_disable_tx_laser_multispeed_fiber(hw);
+ ixgbe_enable_tx_laser_multispeed_fiber(hw);
+ hw->mac.autotry_restart = false;
+ }
+}
+
+
+/**
+ * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Set the link speed in the AUTOC register and restarts link.
+ **/
+s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ s32 status = IXGBE_SUCCESS;
+ ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ u32 speedcnt = 0;
+ u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ u32 i = 0;
+ bool autoneg, link_up = false;
+
+ DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
+
+ /* Mask off requested but non-supported speeds */
+ status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ speed &= link_speed;
+
+ /*
+ * Try each speed one by one, highest priority first. We do this in
+ * software because 10gb fiber doesn't support speed autonegotiation.
+ */
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+ speedcnt++;
+ highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
+
+ /* If we already have link at this speed, just jump out */
+ status = ixgbe_check_link(hw, &link_speed, &link_up, false);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
+ goto out;
+
+ /* Set the module link speed */
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber:
+ esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ break;
+ case ixgbe_media_type_fiber_qsfp:
+ /* QSFP module automatically detects MAC link speed */
+ break;
+ default:
+ DEBUGOUT("Unexpected media type.\n");
+ break;
+ }
+
+ /* Allow module to change analog characteristics (1G->10G) */
+ msec_delay(40);
+
+ status = ixgbe_setup_mac_link_82599(hw,
+ IXGBE_LINK_SPEED_10GB_FULL,
+ autoneg_wait_to_complete);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Flap the tx laser if it has not already been done */
+ ixgbe_flap_tx_laser(hw);
+
+ /*
+ * Wait for the controller to acquire link. Per IEEE 802.3ap,
+ * Section 73.10.2, we may have to wait up to 500ms if KR is
+ * attempted. 82599 uses the same timing for 10g SFI.
+ */
+ for (i = 0; i < 5; i++) {
+ /* Wait for the link partner to also set speed */
+ msec_delay(100);
+
+ /* If we have link, just jump out */
+ status = ixgbe_check_link(hw, &link_speed,
+ &link_up, false);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (link_up)
+ goto out;
+ }
+ }
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+ speedcnt++;
+ if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
+ highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+ /* If we already have link at this speed, just jump out */
+ status = ixgbe_check_link(hw, &link_speed, &link_up, false);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
+ goto out;
+
+ /* Set the module link speed */
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber:
+ esdp_reg &= ~IXGBE_ESDP_SDP5;
+ esdp_reg |= IXGBE_ESDP_SDP5_DIR;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ break;
+ case ixgbe_media_type_fiber_qsfp:
+ /* QSFP module automatically detects link speed */
+ break;
+ default:
+ DEBUGOUT("Unexpected media type.\n");
+ break;
+ }
+
+ /* Allow module to change analog characteristics (10G->1G) */
+ msec_delay(40);
+
+ status = ixgbe_setup_mac_link_82599(hw,
+ IXGBE_LINK_SPEED_1GB_FULL,
+ autoneg_wait_to_complete);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Flap the Tx laser if it has not already been done */
+ ixgbe_flap_tx_laser(hw);
+
+ /* Wait for the link partner to also set speed */
+ msec_delay(100);
+
+ /* If we have link, just jump out */
+ status = ixgbe_check_link(hw, &link_speed, &link_up, false);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (link_up)
+ goto out;
+ }
+
+ /*
+ * We didn't get link. Configure back to the highest speed we tried,
+ * (if there was more than one). We call ourselves back with just the
+ * single highest speed that the user requested.
+ */
+ if (speedcnt > 1)
+ status = ixgbe_setup_mac_link_multispeed_fiber(hw,
+ highest_link_speed, autoneg_wait_to_complete);
+
+out:
+ /* Set autoneg_advertised value based on input link speed */
+ hw->phy.autoneg_advertised = 0;
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Implements the Intel SmartSpeed algorithm.
+ **/
+s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ s32 status = IXGBE_SUCCESS;
+ ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ s32 i, j;
+ bool link_up = false;
+ u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+ DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
+
+ /* Set autoneg_advertised value based on input link speed */
+ hw->phy.autoneg_advertised = 0;
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_100_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
+
+ /*
+ * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the
+ * autoneg advertisement if link is unable to be established at the
+ * highest negotiated rate. This can sometimes happen due to integrity
+ * issues with the physical media connection.
+ */
+
+ /* First, try to get link with full advertisement */
+ hw->phy.smart_speed_active = false;
+ for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
+ status = ixgbe_setup_mac_link_82599(hw, speed,
+ autoneg_wait_to_complete);
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ /*
+ * Wait for the controller to acquire link. Per IEEE 802.3ap,
+ * Section 73.10.2, we may have to wait up to 500ms if KR is
+ * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
+ * Table 9 in the AN MAS.
+ */
+ for (i = 0; i < 5; i++) {
+ msec_delay(100);
+
+ /* If we have link, just jump out */
+ status = ixgbe_check_link(hw, &link_speed, &link_up,
+ false);
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ if (link_up)
+ goto out;
+ }
+ }
+
+ /*
+ * We didn't get link. If we advertised KR plus one of KX4/KX
+ * (or BX4/BX), then disable KR and try again.
+ */
+ if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
+ ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
+ goto out;
+
+ /* Turn SmartSpeed on to disable KR support */
+ hw->phy.smart_speed_active = true;
+ status = ixgbe_setup_mac_link_82599(hw, speed,
+ autoneg_wait_to_complete);
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ /*
+ * Wait for the controller to acquire link. 600ms will allow for
+ * the AN link_fail_inhibit_timer as well for multiple cycles of
+ * parallel detect, both 10g and 1g. This allows for the maximum
+ * connect attempts as defined in the AN MAS table 73-7.
+ */
+ for (i = 0; i < 6; i++) {
+ msec_delay(100);
+
+ /* If we have link, just jump out */
+ status = ixgbe_check_link(hw, &link_speed, &link_up, false);
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ if (link_up)
+ goto out;
+ }
+
+ /* We didn't get link. Turn SmartSpeed back off. */
+ hw->phy.smart_speed_active = false;
+ status = ixgbe_setup_mac_link_82599(hw, speed,
+ autoneg_wait_to_complete);
+
+out:
+ if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
+ DEBUGOUT("Smartspeed has downgraded the link speed "
+ "from the maximum advertised\n");
+ return status;
+}
+
+/**
+ * ixgbe_setup_mac_link_82599 - Set MAC link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Set the link speed in the AUTOC register and restarts link.
+ **/
+s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ bool autoneg = false;
+ s32 status = IXGBE_SUCCESS;
+ u32 pma_pmd_1g, link_mode;
+ u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */
+ u32 orig_autoc = 0; /* holds the cached value of AUTOC register */
+ u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */
+ u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
+ u32 links_reg;
+ u32 i;
+ ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
+
+ DEBUGFUNC("ixgbe_setup_mac_link_82599");
+
+ /* Check to see if speed passed in is supported. */
+ status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
+ if (status)
+ goto out;
+
+ speed &= link_capabilities;
+
+ if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
+ status = IXGBE_ERR_LINK_SETUP;
+ goto out;
+ }
+
+ /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
+ if (hw->mac.orig_link_settings_stored)
+ orig_autoc = hw->mac.orig_autoc;
+ else
+ orig_autoc = autoc;
+
+ link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
+ pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
+
+ if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
+ link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
+ link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
+ /* Set KX4/KX/KR support according to speed requested */
+ autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+ if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
+ autoc |= IXGBE_AUTOC_KX4_SUPP;
+ if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
+ (hw->phy.smart_speed_active == false))
+ autoc |= IXGBE_AUTOC_KR_SUPP;
+ }
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ autoc |= IXGBE_AUTOC_KX_SUPP;
+ } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
+ (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
+ link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
+ /* Switch from 1G SFI to 10G SFI if requested */
+ if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
+ (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
+ autoc &= ~IXGBE_AUTOC_LMS_MASK;
+ autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
+ }
+ } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
+ (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
+ /* Switch from 10G SFI to 1G SFI if requested */
+ if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
+ (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
+ autoc &= ~IXGBE_AUTOC_LMS_MASK;
+ if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel)
+ autoc |= IXGBE_AUTOC_LMS_1G_AN;
+ else
+ autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
+ }
+ }
+
+ if (autoc != current_autoc) {
+ /* Restart link */
+ status = hw->mac.ops.prot_autoc_write(hw, autoc, false);
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ /* Only poll for autoneg to complete if specified to do so */
+ if (autoneg_wait_to_complete) {
+ if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
+ link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
+ link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
+ links_reg = 0; /*Just in case Autoneg time=0*/
+ for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
+ links_reg =
+ IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if (links_reg & IXGBE_LINKS_KX_AN_COMP)
+ break;
+ msec_delay(100);
+ }
+ if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
+ status =
+ IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+ DEBUGOUT("Autoneg did not complete.\n");
+ }
+ }
+ }
+
+ /* Add delay to filter out noises during initial link setup */
+ msec_delay(50);
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true if waiting is needed to complete
+ *
+ * Restarts link on PHY and MAC based on settings passed in.
+ **/
+STATIC s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ s32 status;
+
+ DEBUGFUNC("ixgbe_setup_copper_link_82599");
+
+ /* Setup the PHY according to input speed */
+ status = hw->phy.ops.setup_link_speed(hw, speed,
+ autoneg_wait_to_complete);
+ /* Set up MAC */
+ ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
+
+ return status;
+}
+
+/**
+ * ixgbe_reset_hw_82599 - Perform hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks
+ * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
+ * reset.
+ **/
+s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
+{
+ ixgbe_link_speed link_speed;
+ s32 status;
+ u32 ctrl = 0;
+ u32 i, autoc, autoc2;
+ u32 curr_lms;
+ bool link_up = false;
+
+ DEBUGFUNC("ixgbe_reset_hw_82599");
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ status = hw->mac.ops.stop_adapter(hw);
+ if (status != IXGBE_SUCCESS)
+ goto reset_hw_out;
+
+ /* flush pending Tx transactions */
+ ixgbe_clear_tx_pending(hw);
+
+ /* PHY ops must be identified and initialized prior to reset */
+
+ /* Identify PHY and related function pointers */
+ status = hw->phy.ops.init(hw);
+
+ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ goto reset_hw_out;
+
+ /* Setup SFP module if there is one present. */
+ if (hw->phy.sfp_setup_needed) {
+ status = hw->mac.ops.setup_sfp(hw);
+ hw->phy.sfp_setup_needed = false;
+ }
+
+ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ goto reset_hw_out;
+
+ /* Reset PHY */
+ if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
+ hw->phy.ops.reset(hw);
+
+ /* remember AUTOC from before we reset */
+ curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
+
+mac_reset_top:
+ /*
+ * Issue global reset to the MAC. Needs to be SW reset if link is up.
+ * If link reset is used when link is up, it might reset the PHY when
+ * mng is using it. If link is down or the flag to force full link
+ * reset is set, then perform link reset.
+ */
+ ctrl = IXGBE_CTRL_LNK_RST;
+ if (!hw->force_full_reset) {
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ if (link_up)
+ ctrl = IXGBE_CTRL_RST;
+ }
+
+ ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Poll for reset bit to self-clear meaning reset is complete */
+ for (i = 0; i < 10; i++) {
+ usec_delay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST_MASK))
+ break;
+ }
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
+ status = IXGBE_ERR_RESET_FAILED;
+ DEBUGOUT("Reset polling failed to complete.\n");
+ }
+
+ msec_delay(50);
+
+ /*
+ * Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to
+ * allow time for any pending HW events to complete.
+ */
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ goto mac_reset_top;
+ }
+
+ /*
+ * Store the original AUTOC/AUTOC2 values if they have not been
+ * stored off yet. Otherwise restore the stored original
+ * values since the reset operation sets back to defaults.
+ */
+ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+
+ /* Enable link if disabled in NVM */
+ if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
+ autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
+ if (hw->mac.orig_link_settings_stored == false) {
+ hw->mac.orig_autoc = autoc;
+ hw->mac.orig_autoc2 = autoc2;
+ hw->mac.orig_link_settings_stored = true;
+ } else {
+
+ /* If MNG FW is running on a multi-speed device that
+ * doesn't autoneg with out driver support we need to
+ * leave LMS in the state it was before we MAC reset.
+ * Likewise if we support WoL we don't want change the
+ * LMS state.
+ */
+ if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
+ hw->wol_enabled)
+ hw->mac.orig_autoc =
+ (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
+ curr_lms;
+
+ if (autoc != hw->mac.orig_autoc) {
+ status = hw->mac.ops.prot_autoc_write(hw,
+ hw->mac.orig_autoc,
+ false);
+ if (status != IXGBE_SUCCESS)
+ goto reset_hw_out;
+ }
+
+ if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
+ (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
+ autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
+ autoc2 |= (hw->mac.orig_autoc2 &
+ IXGBE_AUTOC2_UPPER_MASK);
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
+ }
+ }
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /*
+ * Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to 128,
+ * since we modify this value when programming the SAN MAC address.
+ */
+ hw->mac.num_rar_entries = 128;
+ hw->mac.ops.init_rx_addrs(hw);
+
+ /* Store the permanent SAN mac address */
+ hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
+
+ /* Add the SAN MAC address to the RAR only if it's a valid address */
+ if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
+ hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
+ hw->mac.san_addr, 0, IXGBE_RAH_AV);
+
+ /* Save the SAN MAC RAR index */
+ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
+
+ /* Reserve the last RAR for the SAN MAC address */
+ hw->mac.num_rar_entries--;
+ }
+
+ /* Store the alternative WWNN/WWPN prefix */
+ hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
+ &hw->mac.wwpn_prefix);
+
+reset_hw_out:
+ return status;
+}
+
+/**
+ * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
+ * @hw: pointer to hardware structure
+ */
+STATIC s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
+ if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
+ IXGBE_FDIRCMD_CMD_MASK))
+ return IXGBE_SUCCESS;
+ usec_delay(10);
+ }
+
+ return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
+}
+
+/**
+ * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
+{
+ s32 err;
+ int i;
+ u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+ fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
+
+ DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
+
+ /*
+ * Before starting reinitialization process,
+ * FDIRCMD.CMD must be zero.
+ */
+ err = ixgbe_fdir_check_cmd_complete(hw);
+ if (err) {
+ DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n");
+ return err;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
+ IXGBE_WRITE_FLUSH(hw);
+ /*
+ * 82599 adapters flow director init flow cannot be restarted,
+ * Workaround 82599 silicon errata by performing the following steps
+ * before re-writing the FDIRCTRL control register with the same value.
+ * - write 1 to bit 8 of FDIRCMD register &
+ * - write 0 to bit 8 of FDIRCMD register
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
+ IXGBE_FDIRCMD_CLEARHT));
+ IXGBE_WRITE_FLUSH(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
+ ~IXGBE_FDIRCMD_CLEARHT));
+ IXGBE_WRITE_FLUSH(hw);
+ /*
+ * Clear FDIR Hash register to clear any leftover hashes
+ * waiting to be programmed.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
+ IXGBE_WRITE_FLUSH(hw);
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Poll init-done after we write FDIRCTRL register */
+ for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
+ if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
+ IXGBE_FDIRCTRL_INIT_DONE)
+ break;
+ msec_delay(1);
+ }
+ if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
+ DEBUGOUT("Flow Director Signature poll time exceeded!\n");
+ return IXGBE_ERR_FDIR_REINIT_FAILED;
+ }
+
+ /* Clear FDIR statistics registers (read to clear) */
+ IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
+ IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
+ IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
+ IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+ IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
+ * @hw: pointer to hardware structure
+ * @fdirctrl: value to write to flow director control register
+ **/
+STATIC void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
+{
+ int i;
+
+ DEBUGFUNC("ixgbe_fdir_enable_82599");
+
+ /* Prime the keys for hashing */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
+
+ /*
+ * Poll init-done after we write the register. Estimated times:
+ * 10G: PBALLOC = 11b, timing is 60us
+ * 1G: PBALLOC = 11b, timing is 600us
+ * 100M: PBALLOC = 11b, timing is 6ms
+ *
+ * Multiple these timings by 4 if under full Rx load
+ *
+ * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
+ * 1 msec per poll time. If we're at line rate and drop to 100M, then
+ * this might not finish in our poll time, but we can live with that
+ * for now.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
+ IXGBE_WRITE_FLUSH(hw);
+ for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
+ if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
+ IXGBE_FDIRCTRL_INIT_DONE)
+ break;
+ msec_delay(1);
+ }
+
+ if (i >= IXGBE_FDIR_INIT_DONE_POLL)
+ DEBUGOUT("Flow Director poll time exceeded!\n");
+}
+
+/**
+ * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
+ * @hw: pointer to hardware structure
+ * @fdirctrl: value to write to flow director control register, initially
+ * contains just the value of the Rx packet buffer allocation
+ **/
+s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
+{
+ DEBUGFUNC("ixgbe_init_fdir_signature_82599");
+
+ /*
+ * Continue setup of fdirctrl register bits:
+ * Move the flexible bytes to use the ethertype - shift 6 words
+ * Set the maximum length per hash bucket to 0xA filters
+ * Send interrupt when 64 filters are left
+ */
+ fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
+ (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
+ (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
+
+ /* write hashes and fdirctrl register, poll for completion */
+ ixgbe_fdir_enable_82599(hw, fdirctrl);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
+ * @hw: pointer to hardware structure
+ * @fdirctrl: value to write to flow director control register, initially
+ * contains just the value of the Rx packet buffer allocation
+ * @cloud_mode: true - cloud mode, false - other mode
+ **/
+s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
+ bool cloud_mode)
+{
+ DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
+
+ /*
+ * Continue setup of fdirctrl register bits:
+ * Turn perfect match filtering on
+ * Report hash in RSS field of Rx wb descriptor
+ * Initialize the drop queue
+ * Move the flexible bytes to use the ethertype - shift 6 words
+ * Set the maximum length per hash bucket to 0xA filters
+ * Send interrupt when 64 (0x4 * 16) filters are left
+ */
+ fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
+ IXGBE_FDIRCTRL_REPORT_STATUS |
+ (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
+ (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
+ (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
+ (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
+
+ if (cloud_mode)
+ fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD <<
+ IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
+
+ /* write hashes and fdirctrl register, poll for completion */
+ ixgbe_fdir_enable_82599(hw, fdirctrl);
+
+ return IXGBE_SUCCESS;
+}
+
+/*
+ * These defines allow us to quickly generate all of the necessary instructions
+ * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
+ * for values 0 through 15
+ */
+#define IXGBE_ATR_COMMON_HASH_KEY \
+ (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
+#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
+do { \
+ u32 n = (_n); \
+ if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
+ common_hash ^= lo_hash_dword >> n; \
+ else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+ bucket_hash ^= lo_hash_dword >> n; \
+ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
+ sig_hash ^= lo_hash_dword << (16 - n); \
+ if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
+ common_hash ^= hi_hash_dword >> n; \
+ else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+ bucket_hash ^= hi_hash_dword >> n; \
+ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
+ sig_hash ^= hi_hash_dword << (16 - n); \
+} while (0)
+
+/**
+ * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
+ * @stream: input bitstream to compute the hash on
+ *
+ * This function is almost identical to the function above but contains
+ * several optimizations such as unwinding all of the loops, letting the
+ * compiler work out all of the conditional ifs since the keys are static
+ * defines, and computing two keys at once since the hashed dword stream
+ * will be the same for both keys.
+ **/
+u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common)
+{
+ u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+ u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
+
+ /* record the flow_vm_vlan bits as they are a key part to the hash */
+ flow_vm_vlan = IXGBE_NTOHL(input.dword);
+
+ /* generate common hash dword */
+ hi_hash_dword = IXGBE_NTOHL(common.dword);
+
+ /* low dword is word swapped version of common */
+ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+ /* apply flow ID/VM pool/VLAN ID bits to hash words */
+ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+
+ /* Process bits 0 and 16 */
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
+
+ /*
+ * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+ * delay this because bit 0 of the stream should not be processed
+ * so we do not add the VLAN until after bit 0 was processed
+ */
+ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+ /* Process remaining 30 bit of the key */
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
+
+ /* combine common_hash result with signature and bucket hashes */
+ bucket_hash ^= common_hash;
+ bucket_hash &= IXGBE_ATR_HASH_MASK;
+
+ sig_hash ^= common_hash << 16;
+ sig_hash &= IXGBE_ATR_HASH_MASK << 16;
+
+ /* return completed signature hash */
+ return sig_hash ^ bucket_hash;
+}
+
+/**
+ * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
+ * @hw: pointer to hardware structure
+ * @input: unique input dword
+ * @common: compressed common input dword
+ * @queue: queue index to direct traffic to
+ **/
+s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common,
+ u8 queue)
+{
+ u64 fdirhashcmd;
+ u32 fdircmd;
+ s32 err;
+
+ DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
+
+ /*
+ * Get the flow_type in order to program FDIRCMD properly
+ * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
+ * fifth is FDIRCMD.TUNNEL_FILTER
+ */
+ switch (input.formatted.flow_type) {
+ case IXGBE_ATR_FLOW_TYPE_TCPV4:
+ case IXGBE_ATR_FLOW_TYPE_UDPV4:
+ case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+ case IXGBE_ATR_FLOW_TYPE_TCPV6:
+ case IXGBE_ATR_FLOW_TYPE_UDPV6:
+ case IXGBE_ATR_FLOW_TYPE_SCTPV6:
+ break;
+ default:
+ DEBUGOUT(" Error on flow type input\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ /* configure FDIRCMD register */
+ fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+ fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+
+ /*
+ * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
+ * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
+ */
+ fdirhashcmd = (u64)fdircmd << 32;
+ fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
+ IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
+
+ err = ixgbe_fdir_check_cmd_complete(hw);
+ if (err) {
+ DEBUGOUT("Flow Director command did not complete!\n");
+ return err;
+ }
+
+ DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
+
+ return IXGBE_SUCCESS;
+}
+
+#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
+do { \
+ u32 n = (_n); \
+ if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+ bucket_hash ^= lo_hash_dword >> n; \
+ if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+ bucket_hash ^= hi_hash_dword >> n; \
+} while (0)
+
+/**
+ * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
+ * @atr_input: input bitstream to compute the hash on
+ * @input_mask: mask for the input bitstream
+ *
+ * This function serves two main purposes. First it applies the input_mask
+ * to the atr_input resulting in a cleaned up atr_input data stream.
+ * Secondly it computes the hash and stores it in the bkt_hash field at
+ * the end of the input byte stream. This way it will be available for
+ * future use without needing to recompute the hash.
+ **/
+void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+ union ixgbe_atr_input *input_mask)
+{
+
+ u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+ u32 bucket_hash = 0;
+ u32 hi_dword = 0;
+ u32 i = 0;
+
+ /* Apply masks to input data */
+ for (i = 0; i < 14; i++)
+ input->dword_stream[i] &= input_mask->dword_stream[i];
+
+ /* record the flow_vm_vlan bits as they are a key part to the hash */
+ flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
+
+ /* generate common hash dword */
+ for (i = 1; i <= 13; i++)
+ hi_dword ^= input->dword_stream[i];
+ hi_hash_dword = IXGBE_NTOHL(hi_dword);
+
+ /* low dword is word swapped version of common */
+ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+ /* apply flow ID/VM pool/VLAN ID bits to hash words */
+ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+
+ /* Process bits 0 and 16 */
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
+
+ /*
+ * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+ * delay this because bit 0 of the stream should not be processed
+ * so we do not add the VLAN until after bit 0 was processed
+ */
+ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+ /* Process remaining 30 bit of the key */
+ for (i = 1; i <= 15; i++)
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
+
+ /*
+ * Limit hash to 13 bits since max bucket count is 8K.
+ * Store result at the end of the input stream.
+ */
+ input->formatted.bkt_hash = bucket_hash & 0x1FFF;
+}
+
+/**
+ * ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks
+ * @input_mask: mask to be bit swapped
+ *
+ * The source and destination port masks for flow director are bit swapped
+ * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
+ * generate a correctly swapped value we need to bit swap the mask and that
+ * is what is accomplished by this function.
+ **/
+STATIC u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
+{
+ u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
+ mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
+ mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
+ mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
+ mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
+ mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
+ return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
+}
+
+/*
+ * These two macros are meant to address the fact that we have registers
+ * that are either all or in part big-endian. As a result on big-endian
+ * systems we will end up byte swapping the value to little-endian before
+ * it is byte swapped again and written to the hardware in the original
+ * big-endian format.
+ */
+#define IXGBE_STORE_AS_BE32(_value) \
+ (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
+ (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
+
+#define IXGBE_WRITE_REG_BE32(a, reg, value) \
+ IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
+
+#define IXGBE_STORE_AS_BE16(_value) \
+ IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
+
+s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input_mask, bool cloud_mode)
+{
+ /* mask IPv6 since it is currently not supported */
+ u32 fdirm = IXGBE_FDIRM_DIPv6;
+ u32 fdirtcpm;
+ u32 fdirip6m;
+ DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
+
+ /*
+ * Program the relevant mask registers. If src/dst_port or src/dst_addr
+ * are zero, then assume a full mask for that field. Also assume that
+ * a VLAN of 0 is unspecified, so mask that out as well. L4type
+ * cannot be masked out in this implementation.
+ *
+ * This also assumes IPv4 only. IPv6 masking isn't supported at this
+ * point in time.
+ */
+
+ /* verify bucket hash is cleared on hash generation */
+ if (input_mask->formatted.bkt_hash)
+ DEBUGOUT(" bucket hash should always be 0 in mask\n");
+
+ /* Program FDIRM and verify partial masks */
+ switch (input_mask->formatted.vm_pool & 0x7F) {
+ case 0x0:
+ fdirm |= IXGBE_FDIRM_POOL;
+ case 0x7F:
+ break;
+ default:
+ DEBUGOUT(" Error on vm pool mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
+ case 0x0:
+ fdirm |= IXGBE_FDIRM_L4P;
+ if (input_mask->formatted.dst_port ||
+ input_mask->formatted.src_port) {
+ DEBUGOUT(" Error on src/dst port mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+ case IXGBE_ATR_L4TYPE_MASK:
+ break;
+ default:
+ DEBUGOUT(" Error on flow type mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
+ case 0x0000:
+ /* mask VLAN ID, fall through to mask VLAN priority */
+ fdirm |= IXGBE_FDIRM_VLANID;
+ case 0x0FFF:
+ /* mask VLAN priority */
+ fdirm |= IXGBE_FDIRM_VLANP;
+ break;
+ case 0xE000:
+ /* mask VLAN ID only, fall through */
+ fdirm |= IXGBE_FDIRM_VLANID;
+ case 0xEFFF:
+ /* no VLAN fields masked */
+ break;
+ default:
+ DEBUGOUT(" Error on VLAN mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ switch (input_mask->formatted.flex_bytes & 0xFFFF) {
+ case 0x0000:
+ /* Mask Flex Bytes, fall through */
+ fdirm |= IXGBE_FDIRM_FLEX;
+ case 0xFFFF:
+ break;
+ default:
+ DEBUGOUT(" Error on flexible byte mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ if (cloud_mode) {
+ fdirm |= IXGBE_FDIRM_L3P;
+ fdirip6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
+ fdirip6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
+
+ switch (input_mask->formatted.inner_mac[0] & 0xFF) {
+ case 0x00:
+ /* Mask inner MAC, fall through */
+ fdirip6m |= IXGBE_FDIRIP6M_INNER_MAC;
+ case 0xFF:
+ break;
+ default:
+ DEBUGOUT(" Error on inner_mac byte mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ switch (input_mask->formatted.tni_vni & 0xFFFFFFFF) {
+ case 0x0:
+ /* Mask vxlan id */
+ fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI;
+ break;
+ case 0x00FFFFFF:
+ fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
+ break;
+ case 0xFFFFFFFF:
+ break;
+ default:
+ DEBUGOUT(" Error on TNI/VNI byte mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ switch (input_mask->formatted.tunnel_type & 0xFFFF) {
+ case 0x0:
+ /* Mask turnnel type, fall through */
+ fdirip6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
+ case 0xFFFF:
+ break;
+ default:
+ DEBUGOUT(" Error on tunnel type byte mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m);
+ }
+
+ /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
+
+ /* store the TCP/UDP port masks, bit reversed from port layout */
+ fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
+
+ /* write both the same so that UDP and TCP use the same mask */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
+ /* also use it for SCTP */
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
+ break;
+ default:
+ break;
+ }
+
+ /* store source and destination IP masks (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
+ ~input_mask->formatted.src_ip[0]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
+ ~input_mask->formatted.dst_ip[0]);
+
+ /* store IPv6 mask */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, 0xffffffff);
+
+ return IXGBE_SUCCESS;
+}
+
+s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id, u8 queue, bool cloud_mode)
+{
+ u32 fdirport, fdirvlan, fdirhash, fdircmd;
+ u32 addr_low, addr_high;
+ u32 cloud_type = 0;
+ s32 err;
+
+ DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
+
+ /* currently IPv6 is not supported, must be programmed with 0 */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
+ input->formatted.src_ip[0]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
+ input->formatted.src_ip[1]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
+ input->formatted.src_ip[2]);
+
+ /* record the source address (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
+
+ /* record the first 32 bits of the destination address (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
+
+ /* record source and destination port (little-endian)*/
+ fdirport = IXGBE_NTOHS(input->formatted.dst_port);
+ fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
+ fdirport |= IXGBE_NTOHS(input->formatted.src_port);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
+
+ /* record VLAN (little-endian) and flex_bytes(big-endian) */
+ fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
+ fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
+ fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
+
+ if (cloud_mode) {
+ if (input->formatted.tunnel_type != 0)
+ cloud_type = 0x80000000;
+
+ addr_low = ((u32)input->formatted.inner_mac[0] |
+ ((u32)input->formatted.inner_mac[1] << 8) |
+ ((u32)input->formatted.inner_mac[2] << 16) |
+ ((u32)input->formatted.inner_mac[3] << 24));
+ addr_high = ((u32)input->formatted.inner_mac[4] |
+ ((u32)input->formatted.inner_mac[5] << 8));
+ cloud_type |= addr_high;
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), addr_low);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), cloud_type);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni);
+ }
+
+ /* configure FDIRHASH register */
+ fdirhash = input->formatted.bkt_hash;
+ fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+
+ /*
+ * flush all previous writes to make certain registers are
+ * programmed prior to issuing the command
+ */
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* configure FDIRCMD register */
+ fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ if (queue == IXGBE_FDIR_DROP_QUEUE)
+ fdircmd |= IXGBE_FDIRCMD_DROP;
+ if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK)
+ fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
+ fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+ fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+ fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
+ err = ixgbe_fdir_check_cmd_complete(hw);
+ if (err) {
+ DEBUGOUT("Flow Director command did not complete!\n");
+ return err;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id)
+{
+ u32 fdirhash;
+ u32 fdircmd = 0;
+ s32 err;
+
+ /* configure FDIRHASH register */
+ fdirhash = input->formatted.bkt_hash;
+ fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+
+ /* flush hash to HW */
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Query if filter is present */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
+
+ err = ixgbe_fdir_check_cmd_complete(hw);
+ if (err) {
+ DEBUGOUT("Flow Director command did not complete!\n");
+ return err;
+ }
+
+ /* if filter exists in hardware then remove it */
+ if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+ IXGBE_WRITE_FLUSH(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+ IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
+ * @hw: pointer to hardware structure
+ * @input: input bitstream
+ * @input_mask: mask for the input bitstream
+ * @soft_id: software index for the filters
+ * @queue: queue index to direct traffic to
+ *
+ * Note that the caller to this function must lock before calling, since the
+ * hardware writes must be protected from one another.
+ **/
+s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ union ixgbe_atr_input *input_mask,
+ u16 soft_id, u8 queue, bool cloud_mode)
+{
+ s32 err = IXGBE_ERR_CONFIG;
+
+ DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
+
+ /*
+ * Check flow_type formatting, and bail out before we touch the hardware
+ * if there's a configuration issue
+ */
+ switch (input->formatted.flow_type) {
+ case IXGBE_ATR_FLOW_TYPE_IPV4:
+ case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4:
+ input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
+ if (input->formatted.dst_port || input->formatted.src_port) {
+ DEBUGOUT(" Error on src/dst port\n");
+ return IXGBE_ERR_CONFIG;
+ }
+ break;
+ case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+ case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4:
+ if (input->formatted.dst_port || input->formatted.src_port) {
+ DEBUGOUT(" Error on src/dst port\n");
+ return IXGBE_ERR_CONFIG;
+ }
+ case IXGBE_ATR_FLOW_TYPE_TCPV4:
+ case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4:
+ case IXGBE_ATR_FLOW_TYPE_UDPV4:
+ case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4:
+ input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
+ IXGBE_ATR_L4TYPE_MASK;
+ break;
+ default:
+ DEBUGOUT(" Error on flow type input\n");
+ return err;
+ }
+
+ /* program input mask into the HW */
+ err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode);
+ if (err)
+ return err;
+
+ /* apply mask and compute/store hash */
+ ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
+
+ /* program filters to filter memory */
+ return ixgbe_fdir_write_perfect_filter_82599(hw, input,
+ soft_id, queue, cloud_mode);
+}
+
+/**
+ * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
+ * @hw: pointer to hardware structure
+ * @reg: analog register to read
+ * @val: read value
+ *
+ * Performs read operation to Omer analog register specified.
+ **/
+s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
+{
+ u32 core_ctl;
+
+ DEBUGFUNC("ixgbe_read_analog_reg8_82599");
+
+ IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
+ (reg << 8));
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(10);
+ core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
+ *val = (u8)core_ctl;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
+ * @hw: pointer to hardware structure
+ * @reg: atlas register to write
+ * @val: value to write
+ *
+ * Performs write operation to Omer analog register specified.
+ **/
+s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
+{
+ u32 core_ctl;
+
+ DEBUGFUNC("ixgbe_write_analog_reg8_82599");
+
+ core_ctl = (reg << 8) | val;
+ IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(10);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware using the generic start_hw function
+ * and the generation start_hw function.
+ * Then performs revision-specific operations, if any.
+ **/
+s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_start_hw_82599");
+
+ ret_val = ixgbe_start_hw_generic(hw);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+
+ ret_val = ixgbe_start_hw_gen2(hw);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+
+ /* We need to run link autotry after the driver loads */
+ hw->mac.autotry_restart = true;
+
+ if (ret_val == IXGBE_SUCCESS)
+ ret_val = ixgbe_verify_fw_version_82599(hw);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_identify_phy_82599 - Get physical layer module
+ * @hw: pointer to hardware structure
+ *
+ * Determines the physical layer module found on the current adapter.
+ * If PHY already detected, maintains current PHY type in hw struct,
+ * otherwise executes the PHY detection routine.
+ **/
+s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ DEBUGFUNC("ixgbe_identify_phy_82599");
+
+ /* Detect PHY if not unknown - returns success if already detected. */
+ status = ixgbe_identify_phy_generic(hw);
+ if (status != IXGBE_SUCCESS) {
+ /* 82599 10GBASE-T requires an external PHY */
+ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
+ return status;
+ else
+ status = ixgbe_identify_module_generic(hw);
+ }
+
+ /* Set PHY type none if no PHY detected */
+ if (hw->phy.type == ixgbe_phy_unknown) {
+ hw->phy.type = ixgbe_phy_none;
+ return IXGBE_SUCCESS;
+ }
+
+ /* Return error if SFP module has been detected but is not supported */
+ if (hw->phy.type == ixgbe_phy_sfp_unsupported)
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
+
+ return status;
+}
+
+/**
+ * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ **/
+u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
+{
+ u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
+ u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
+ u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
+ u16 ext_ability = 0;
+
+ DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
+
+ hw->phy.ops.identify(hw);
+
+ switch (hw->phy.type) {
+ case ixgbe_phy_tn:
+ case ixgbe_phy_cu_unknown:
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
+ if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+ if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+ goto out;
+ default:
+ break;
+ }
+
+ switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+ case IXGBE_AUTOC_LMS_1G_AN:
+ case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+ if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
+ IXGBE_PHYSICAL_LAYER_1000BASE_BX;
+ goto out;
+ } else
+ /* SFI mode so read SFP module */
+ goto sfp_check;
+ break;
+ case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+ if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
+ else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+ else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
+ goto out;
+ break;
+ case IXGBE_AUTOC_LMS_10G_SERIAL:
+ if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
+ goto out;
+ } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
+ goto sfp_check;
+ break;
+ case IXGBE_AUTOC_LMS_KX4_KX_KR:
+ case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
+ if (autoc & IXGBE_AUTOC_KX_SUPP)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ if (autoc & IXGBE_AUTOC_KX4_SUPP)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+ if (autoc & IXGBE_AUTOC_KR_SUPP)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
+ goto out;
+ break;
+ default:
+ goto out;
+ break;
+ }
+
+sfp_check:
+ /* SFP check must be done last since DA modules are sometimes used to
+ * test KR mode - we need to id KR mode correctly before SFP module.
+ * Call identify_sfp because the pluggable module may have changed */
+ physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
+out:
+ return physical_layer;
+}
+
+/**
+ * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
+ * @hw: pointer to hardware structure
+ * @regval: register value to write to RXCTRL
+ *
+ * Enables the Rx DMA unit for 82599
+ **/
+s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
+{
+
+ DEBUGFUNC("ixgbe_enable_rx_dma_82599");
+
+ /*
+ * Workaround for 82599 silicon errata when enabling the Rx datapath.
+ * If traffic is incoming before we enable the Rx unit, it could hang
+ * the Rx DMA unit. Therefore, make sure the security engine is
+ * completely disabled prior to enabling the Rx unit.
+ */
+
+ hw->mac.ops.disable_sec_rx_path(hw);
+
+ if (regval & IXGBE_RXCTRL_RXEN)
+ ixgbe_enable_rx(hw);
+ else
+ ixgbe_disable_rx(hw);
+
+ hw->mac.ops.enable_sec_rx_path(hw);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_verify_fw_version_82599 - verify FW version for 82599
+ * @hw: pointer to hardware structure
+ *
+ * Verifies that installed the firmware version is 0.6 or higher
+ * for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
+ *
+ * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
+ * if the FW version is not supported.
+ **/
+STATIC s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_EEPROM_VERSION;
+ u16 fw_offset, fw_ptp_cfg_offset;
+ u16 fw_version;
+
+ DEBUGFUNC("ixgbe_verify_fw_version_82599");
+
+ /* firmware check is only necessary for SFI devices */
+ if (hw->phy.media_type != ixgbe_media_type_fiber) {
+ status = IXGBE_SUCCESS;
+ goto fw_version_out;
+ }
+
+ /* get the offset to the Firmware Module block */
+ if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) {
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "eeprom read at offset %d failed", IXGBE_FW_PTR);
+ return IXGBE_ERR_EEPROM_VERSION;
+ }
+
+ if ((fw_offset == 0) || (fw_offset == 0xFFFF))
+ goto fw_version_out;
+
+ /* get the offset to the Pass Through Patch Configuration block */
+ if (hw->eeprom.ops.read(hw, (fw_offset +
+ IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
+ &fw_ptp_cfg_offset)) {
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "eeprom read at offset %d failed",
+ fw_offset +
+ IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR);
+ return IXGBE_ERR_EEPROM_VERSION;
+ }
+
+ if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
+ goto fw_version_out;
+
+ /* get the firmware version */
+ if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
+ IXGBE_FW_PATCH_VERSION_4), &fw_version)) {
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "eeprom read at offset %d failed",
+ fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4);
+ return IXGBE_ERR_EEPROM_VERSION;
+ }
+
+ if (fw_version > 0x5)
+ status = IXGBE_SUCCESS;
+
+fw_version_out:
+ return status;
+}
+
+/**
+ * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
+ * @hw: pointer to hardware structure
+ *
+ * Returns true if the LESM FW module is present and enabled. Otherwise
+ * returns false. Smart Speed must be disabled if LESM FW module is enabled.
+ **/
+bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
+{
+ bool lesm_enabled = false;
+ u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
+ s32 status;
+
+ DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
+
+ /* get the offset to the Firmware Module block */
+ status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
+
+ if ((status != IXGBE_SUCCESS) ||
+ (fw_offset == 0) || (fw_offset == 0xFFFF))
+ goto out;
+
+ /* get the offset to the LESM Parameters block */
+ status = hw->eeprom.ops.read(hw, (fw_offset +
+ IXGBE_FW_LESM_PARAMETERS_PTR),
+ &fw_lesm_param_offset);
+
+ if ((status != IXGBE_SUCCESS) ||
+ (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
+ goto out;
+
+ /* get the LESM state word */
+ status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
+ IXGBE_FW_LESM_STATE_1),
+ &fw_lesm_state);
+
+ if ((status == IXGBE_SUCCESS) &&
+ (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
+ lesm_enabled = true;
+
+out:
+ return lesm_enabled;
+}
+
+/**
+ * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
+ * fastest available method
+ *
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in EEPROM to read
+ * @words: number of words
+ * @data: word(s) read from the EEPROM
+ *
+ * Retrieves 16 bit word(s) read from EEPROM
+ **/
+STATIC s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ s32 ret_val = IXGBE_ERR_CONFIG;
+
+ DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
+
+ /*
+ * If EEPROM is detected and can be addressed using 14 bits,
+ * use EERD otherwise use bit bang
+ */
+ if ((eeprom->type == ixgbe_eeprom_spi) &&
+ (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
+ ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
+ data);
+ else
+ ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
+ words,
+ data);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_read_eeprom_82599 - Read EEPROM word using
+ * fastest available method
+ *
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM
+ **/
+STATIC s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
+ u16 offset, u16 *data)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ s32 ret_val = IXGBE_ERR_CONFIG;
+
+ DEBUGFUNC("ixgbe_read_eeprom_82599");
+
+ /*
+ * If EEPROM is detected and can be addressed using 14 bits,
+ * use EERD otherwise use bit bang
+ */
+ if ((eeprom->type == ixgbe_eeprom_spi) &&
+ (offset <= IXGBE_EERD_MAX_ADDR))
+ ret_val = ixgbe_read_eerd_generic(hw, offset, data);
+ else
+ ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_reset_pipeline_82599 - perform pipeline reset
+ *
+ * @hw: pointer to hardware structure
+ *
+ * Reset pipeline by asserting Restart_AN together with LMS change to ensure
+ * full pipeline reset. This function assumes the SW/FW lock is held.
+ **/
+s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
+{
+ s32 ret_val;
+ u32 anlp1_reg = 0;
+ u32 i, autoc_reg, autoc2_reg;
+
+ /* Enable link if disabled in NVM */
+ autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
+ autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+ /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
+ autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
+ /* Wait for AN to leave state 0 */
+ for (i = 0; i < 10; i++) {
+ msec_delay(4);
+ anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+ if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
+ break;
+ }
+
+ if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
+ DEBUGOUT("auto negotiation not completed\n");
+ ret_val = IXGBE_ERR_RESET_FAILED;
+ goto reset_pipeline_out;
+ }
+
+ ret_val = IXGBE_SUCCESS;
+
+reset_pipeline_out:
+ /* Write AUTOC register with original LMS field and Restart_AN */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return ret_val;
+}
+
+
+/**
+ * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+STATIC s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ u32 esdp;
+ s32 status;
+ s32 timeout = 200;
+
+ DEBUGFUNC("ixgbe_read_i2c_byte_82599");
+
+ if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
+ /* Acquire I2C bus ownership. */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp |= IXGBE_ESDP_SDP0;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+
+ while (timeout) {
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ if (esdp & IXGBE_ESDP_SDP1)
+ break;
+
+ msec_delay(5);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("Driver can't access resource,"
+ " acquiring I2C bus timeout.\n");
+ status = IXGBE_ERR_I2C;
+ goto release_i2c_access;
+ }
+ }
+
+ status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
+
+release_i2c_access:
+
+ if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
+ /* Release I2C bus ownership. */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp &= ~IXGBE_ESDP_SDP0;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+STATIC s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ u32 esdp;
+ s32 status;
+ s32 timeout = 200;
+
+ DEBUGFUNC("ixgbe_write_i2c_byte_82599");
+
+ if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
+ /* Acquire I2C bus ownership. */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp |= IXGBE_ESDP_SDP0;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+
+ while (timeout) {
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ if (esdp & IXGBE_ESDP_SDP1)
+ break;
+
+ msec_delay(5);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("Driver can't access resource,"
+ " acquiring I2C bus timeout.\n");
+ status = IXGBE_ERR_I2C;
+ goto release_i2c_access;
+ }
+ }
+
+ status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
+
+release_i2c_access:
+
+ if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
+ /* Release I2C bus ownership. */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp &= ~IXGBE_ESDP_SDP0;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
+ return status;
+}
+
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_82599.h b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_82599.h
new file mode 100755
index 00000000..83124196
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_82599.h
@@ -0,0 +1,66 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_82599_H_
+#define _IXGBE_82599_H_
+#ident "$Id: ixgbe_82599.h,v 1.12 2013/10/30 10:19:10 jtkirshe Exp $"
+
+s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed, bool *autoneg);
+enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
+void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
+void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
+s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
+s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
+s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw);
+s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
+s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
+u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
+s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
+s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val);
+s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 reg_val, bool locked);
+#endif /* _IXGBE_82599_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_api.c b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_api.c
new file mode 100755
index 00000000..18027604
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_api.c
@@ -0,0 +1,1420 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+#ident "$Id: ixgbe_api.c,v 1.207 2013/11/22 01:02:01 jtkirshe Exp $"
+
+/**
+ * ixgbe_dcb_get_rtrup2tc - read rtrup2tc reg
+ * @hw: pointer to hardware structure
+ * @map: pointer to u8 arr for returning map
+ *
+ * Read the rtrup2tc HW register and resolve its content into map
+ **/
+void ixgbe_dcb_get_rtrup2tc(struct ixgbe_hw *hw, u8 *map)
+{
+ if (hw->mac.ops.get_rtrup2tc)
+ hw->mac.ops.get_rtrup2tc(hw, map);
+}
+
+/**
+ * ixgbe_init_shared_code - Initialize the shared code
+ * @hw: pointer to hardware structure
+ *
+ * This will assign function pointers and assign the MAC type and PHY code.
+ * Does not touch the hardware. This function must be called prior to any
+ * other function in the shared code. The ixgbe_hw structure should be
+ * memset to 0 prior to calling this function. The following fields in
+ * hw structure should be filled in prior to calling this function:
+ * hw_addr, back, device_id, vendor_id, subsystem_device_id,
+ * subsystem_vendor_id, and revision_id
+ **/
+s32 ixgbe_init_shared_code(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ DEBUGFUNC("ixgbe_init_shared_code");
+
+ /*
+ * Set the mac type
+ */
+ ixgbe_set_mac_type(hw);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ status = ixgbe_init_ops_82598(hw);
+ break;
+ case ixgbe_mac_82599EB:
+ status = ixgbe_init_ops_82599(hw);
+ break;
+ case ixgbe_mac_X540:
+ status = ixgbe_init_ops_X540(hw);
+ break;
+ case ixgbe_mac_X550:
+ status = ixgbe_init_ops_X550(hw);
+ break;
+ case ixgbe_mac_X550EM_x:
+ status = ixgbe_init_ops_X550EM(hw);
+ break;
+ case ixgbe_mac_82599_vf:
+ case ixgbe_mac_X540_vf:
+ case ixgbe_mac_X550_vf:
+ case ixgbe_mac_X550EM_x_vf:
+ status = ixgbe_init_ops_vf(hw);
+ break;
+ default:
+ status = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_set_mac_type\n");
+
+ if (hw->vendor_id != IXGBE_INTEL_VENDOR_ID) {
+ ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
+ "Unsupported vendor id: %x", hw->vendor_id);
+ return IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82598:
+ case IXGBE_DEV_ID_82598_BX:
+ case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+ case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+ case IXGBE_DEV_ID_82598AT:
+ case IXGBE_DEV_ID_82598AT2:
+ case IXGBE_DEV_ID_82598EB_CX4:
+ case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
+ case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
+ case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
+ case IXGBE_DEV_ID_82598EB_XF_LR:
+ case IXGBE_DEV_ID_82598EB_SFP_LOM:
+ hw->mac.type = ixgbe_mac_82598EB;
+ break;
+ case IXGBE_DEV_ID_82599_KX4:
+ case IXGBE_DEV_ID_82599_KX4_MEZZ:
+ case IXGBE_DEV_ID_82599_XAUI_LOM:
+ case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+ case IXGBE_DEV_ID_82599_KR:
+ case IXGBE_DEV_ID_82599_SFP:
+ case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
+ case IXGBE_DEV_ID_82599_SFP_FCOE:
+ case IXGBE_DEV_ID_82599_SFP_EM:
+ case IXGBE_DEV_ID_82599_SFP_SF2:
+ case IXGBE_DEV_ID_82599_SFP_SF_QP:
+ case IXGBE_DEV_ID_82599_QSFP_SF_QP:
+ case IXGBE_DEV_ID_82599EN_SFP:
+ case IXGBE_DEV_ID_82599_CX4:
+ case IXGBE_DEV_ID_82599_LS:
+ case IXGBE_DEV_ID_82599_T3_LOM:
+ hw->mac.type = ixgbe_mac_82599EB;
+ break;
+ case IXGBE_DEV_ID_82599_VF:
+ case IXGBE_DEV_ID_82599_VF_HV:
+ hw->mac.type = ixgbe_mac_82599_vf;
+ break;
+ case IXGBE_DEV_ID_X540_VF:
+ case IXGBE_DEV_ID_X540_VF_HV:
+ hw->mac.type = ixgbe_mac_X540_vf;
+ break;
+ case IXGBE_DEV_ID_X540T:
+ case IXGBE_DEV_ID_X540T1:
+ hw->mac.type = ixgbe_mac_X540;
+ break;
+ case IXGBE_DEV_ID_X550T:
+ hw->mac.type = ixgbe_mac_X550;
+ break;
+ case IXGBE_DEV_ID_X550EM_X:
+ case IXGBE_DEV_ID_X550EM_X_KX4:
+ case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_X_SFP:
+ hw->mac.type = ixgbe_mac_X550EM_x;
+ break;
+ case IXGBE_DEV_ID_X550_VF:
+ case IXGBE_DEV_ID_X550_VF_HV:
+ hw->mac.type = ixgbe_mac_X550_vf;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_VF:
+ case IXGBE_DEV_ID_X550EM_X_VF_HV:
+ hw->mac.type = ixgbe_mac_X550EM_x_vf;
+ break;
+ default:
+ ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+ ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
+ "Unsupported device id: %x",
+ hw->device_id);
+ break;
+ }
+
+ DEBUGOUT2("ixgbe_set_mac_type found mac: %d, returns: %d\n",
+ hw->mac.type, ret_val);
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_hw - Initialize the hardware
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the hardware by resetting and then starting the hardware
+ **/
+s32 ixgbe_init_hw(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.init_hw, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_reset_hw - Performs a hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks and
+ * clears all interrupts, performs a PHY reset, and performs a MAC reset
+ **/
+s32 ixgbe_reset_hw(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.reset_hw, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_start_hw - Prepares hardware for Rx/Tx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware by filling the bus info structure and media type,
+ * clears all on chip counters, initializes receive address registers,
+ * multicast table, VLAN filter table, calls routine to setup link and
+ * flow control settings, and leaves transmit and receive units disabled
+ * and uninitialized.
+ **/
+s32 ixgbe_start_hw(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.start_hw, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_enable_relaxed_ordering - Enables tx relaxed ordering,
+ * which is disabled by default in ixgbe_start_hw();
+ *
+ * @hw: pointer to hardware structure
+ *
+ * Enable relaxed ordering;
+ **/
+void ixgbe_enable_relaxed_ordering(struct ixgbe_hw *hw)
+{
+ if (hw->mac.ops.enable_relaxed_ordering)
+ hw->mac.ops.enable_relaxed_ordering(hw);
+}
+
+/**
+ * ixgbe_clear_hw_cntrs - Clear hardware counters
+ * @hw: pointer to hardware structure
+ *
+ * Clears all hardware statistics counters by reading them from the hardware
+ * Statistics counters are clear on read.
+ **/
+s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.clear_hw_cntrs, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_media_type - Get media type
+ * @hw: pointer to hardware structure
+ *
+ * Returns the media type (fiber, copper, backplane)
+ **/
+enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_media_type, (hw),
+ ixgbe_media_type_unknown);
+}
+
+/**
+ * ixgbe_get_mac_addr - Get MAC address
+ * @hw: pointer to hardware structure
+ * @mac_addr: Adapter MAC address
+ *
+ * Reads the adapter's MAC address from the first Receive Address Register
+ * (RAR0) A reset of the adapter must have been performed prior to calling
+ * this function in order for the MAC address to have been loaded from the
+ * EEPROM into RAR0
+ **/
+s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_mac_addr,
+ (hw, mac_addr), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_san_mac_addr - Get SAN MAC address
+ * @hw: pointer to hardware structure
+ * @san_mac_addr: SAN MAC address
+ *
+ * Reads the SAN MAC address from the EEPROM, if it's available. This is
+ * per-port, so set_lan_id() must be called before reading the addresses.
+ **/
+s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_san_mac_addr,
+ (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_san_mac_addr - Write a SAN MAC address
+ * @hw: pointer to hardware structure
+ * @san_mac_addr: SAN MAC address
+ *
+ * Writes A SAN MAC address to the EEPROM.
+ **/
+s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_san_mac_addr,
+ (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_device_caps - Get additional device capabilities
+ * @hw: pointer to hardware structure
+ * @device_caps: the EEPROM word for device capabilities
+ *
+ * Reads the extra device capabilities from the EEPROM
+ **/
+s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_device_caps,
+ (hw, device_caps), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_wwn_prefix - Get alternative WWNN/WWPN prefix from the EEPROM
+ * @hw: pointer to hardware structure
+ * @wwnn_prefix: the alternative WWNN prefix
+ * @wwpn_prefix: the alternative WWPN prefix
+ *
+ * This function will read the EEPROM from the alternative SAN MAC address
+ * block to check the support for the alternative WWNN/WWPN prefix support.
+ **/
+s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_wwn_prefix,
+ (hw, wwnn_prefix, wwpn_prefix),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_fcoe_boot_status - Get FCOE boot status from EEPROM
+ * @hw: pointer to hardware structure
+ * @bs: the fcoe boot status
+ *
+ * This function will read the FCOE boot status from the iSCSI FCOE block
+ **/
+s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_fcoe_boot_status,
+ (hw, bs),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_bus_info - Set PCI bus info
+ * @hw: pointer to hardware structure
+ *
+ * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
+ **/
+s32 ixgbe_get_bus_info(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_bus_info, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_num_of_tx_queues - Get Tx queues
+ * @hw: pointer to hardware structure
+ *
+ * Returns the number of transmit queues for the given adapter.
+ **/
+u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw)
+{
+ return hw->mac.max_tx_queues;
+}
+
+/**
+ * ixgbe_get_num_of_rx_queues - Get Rx queues
+ * @hw: pointer to hardware structure
+ *
+ * Returns the number of receive queues for the given adapter.
+ **/
+u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw)
+{
+ return hw->mac.max_rx_queues;
+}
+
+/**
+ * ixgbe_stop_adapter - Disable Rx/Tx units
+ * @hw: pointer to hardware structure
+ *
+ * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+ * disables transmit and receive units. The adapter_stopped flag is used by
+ * the shared code and drivers to determine if the adapter is in a stopped
+ * state and should not touch the hardware.
+ **/
+s32 ixgbe_stop_adapter(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.stop_adapter, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_read_pba_string - Reads part number string from EEPROM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the EEPROM
+ * @pba_num_size: part number string buffer length
+ *
+ * Reads the part number string from the EEPROM.
+ **/
+s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size)
+{
+ return ixgbe_read_pba_string_generic(hw, pba_num, pba_num_size);
+}
+
+/**
+ * ixgbe_read_pba_num - Reads part number from EEPROM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number from the EEPROM
+ *
+ * Reads the part number from the EEPROM.
+ **/
+s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num)
+{
+ return ixgbe_read_pba_num_generic(hw, pba_num);
+}
+
+/**
+ * ixgbe_identify_phy - Get PHY type
+ * @hw: pointer to hardware structure
+ *
+ * Determines the physical layer module found on the current adapter.
+ **/
+s32 ixgbe_identify_phy(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ if (hw->phy.type == ixgbe_phy_unknown) {
+ status = ixgbe_call_func(hw, hw->phy.ops.identify, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_reset_phy - Perform a PHY reset
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reset_phy(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ if (hw->phy.type == ixgbe_phy_unknown) {
+ if (ixgbe_identify_phy(hw) != IXGBE_SUCCESS)
+ status = IXGBE_ERR_PHY;
+ }
+
+ if (status == IXGBE_SUCCESS) {
+ status = ixgbe_call_func(hw, hw->phy.ops.reset, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+ }
+ return status;
+}
+
+/**
+ * ixgbe_get_phy_firmware_version -
+ * @hw: pointer to hardware structure
+ * @firmware_version: pointer to firmware version
+ **/
+s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, u16 *firmware_version)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ status = ixgbe_call_func(hw, hw->phy.ops.get_firmware_version,
+ (hw, firmware_version),
+ IXGBE_NOT_IMPLEMENTED);
+ return status;
+}
+
+/**
+ * ixgbe_read_phy_reg - Read PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @phy_data: Pointer to read data from PHY register
+ *
+ * Reads a value from a specified PHY register
+ **/
+s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 *phy_data)
+{
+ if (hw->phy.id == 0)
+ ixgbe_identify_phy(hw);
+
+ return ixgbe_call_func(hw, hw->phy.ops.read_reg, (hw, reg_addr,
+ device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_write_phy_reg - Write PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @phy_data: Data to write to the PHY register
+ *
+ * Writes a value to specified PHY register
+ **/
+s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 phy_data)
+{
+ if (hw->phy.id == 0)
+ ixgbe_identify_phy(hw);
+
+ return ixgbe_call_func(hw, hw->phy.ops.write_reg, (hw, reg_addr,
+ device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_setup_phy_link - Restart PHY autoneg
+ * @hw: pointer to hardware structure
+ *
+ * Restart autonegotiation and PHY and waits for completion.
+ **/
+s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.setup_link, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_check_phy_link - Determine link and speed status
+ * @hw: pointer to hardware structure
+ *
+ * Reads a PHY register to determine if link is up and the current speed for
+ * the PHY.
+ **/
+s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.check_link, (hw, speed,
+ link_up), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_setup_phy_link_speed - Set auto advertise
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ *
+ * Sets the auto advertised capabilities
+ **/
+s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.setup_link_speed, (hw, speed,
+ autoneg_wait_to_complete),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_check_link - Get link and speed status
+ * @hw: pointer to hardware structure
+ *
+ * Reads the links register to determine if link is up and the current speed
+ **/
+s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.check_link, (hw, speed,
+ link_up, link_up_wait_to_complete),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_disable_tx_laser - Disable Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * If the driver needs to disable the laser on SFI optics.
+ **/
+void ixgbe_disable_tx_laser(struct ixgbe_hw *hw)
+{
+ if (hw->mac.ops.disable_tx_laser)
+ hw->mac.ops.disable_tx_laser(hw);
+}
+
+/**
+ * ixgbe_enable_tx_laser - Enable Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * If the driver needs to enable the laser on SFI optics.
+ **/
+void ixgbe_enable_tx_laser(struct ixgbe_hw *hw)
+{
+ if (hw->mac.ops.enable_tx_laser)
+ hw->mac.ops.enable_tx_laser(hw);
+}
+
+/**
+ * ixgbe_flap_tx_laser - flap Tx laser to start autotry process
+ * @hw: pointer to hardware structure
+ *
+ * When the driver changes the link speeds that it can support then
+ * flap the tx laser to alert the link partner to start autotry
+ * process on its end.
+ **/
+void ixgbe_flap_tx_laser(struct ixgbe_hw *hw)
+{
+ if (hw->mac.ops.flap_tx_laser)
+ hw->mac.ops.flap_tx_laser(hw);
+}
+
+/**
+ * ixgbe_setup_link - Set link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ *
+ * Configures link settings. Restarts the link.
+ * Performs autonegotiation if needed.
+ **/
+s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.setup_link, (hw, speed,
+ autoneg_wait_to_complete),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_link_capabilities - Returns link capabilities
+ * @hw: pointer to hardware structure
+ *
+ * Determines the link capabilities of the current configuration.
+ **/
+s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_link_capabilities, (hw,
+ speed, autoneg), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_led_on - Turn on LEDs
+ * @hw: pointer to hardware structure
+ * @index: led number to turn on
+ *
+ * Turns on the software controllable LEDs.
+ **/
+s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.led_on, (hw, index),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_led_off - Turn off LEDs
+ * @hw: pointer to hardware structure
+ * @index: led number to turn off
+ *
+ * Turns off the software controllable LEDs.
+ **/
+s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.led_off, (hw, index),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_blink_led_start - Blink LEDs
+ * @hw: pointer to hardware structure
+ * @index: led number to blink
+ *
+ * Blink LED based on index.
+ **/
+s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.blink_led_start, (hw, index),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_blink_led_stop - Stop blinking LEDs
+ * @hw: pointer to hardware structure
+ *
+ * Stop blinking LED based on index.
+ **/
+s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.blink_led_stop, (hw, index),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_init_eeprom_params - Initialize EEPROM parameters
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ * ixgbe_hw struct in order to set up EEPROM access.
+ **/
+s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.init_params, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+
+/**
+ * ixgbe_write_eeprom - Write word to EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be written to
+ * @data: 16 bit word to be written to the EEPROM
+ *
+ * Writes 16 bit value to EEPROM. If ixgbe_eeprom_update_checksum is not
+ * called after this function, the EEPROM will most likely contain an
+ * invalid checksum.
+ **/
+s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.write, (hw, offset, data),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_write_eeprom_buffer - Write word(s) to EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be written to
+ * @data: 16 bit word(s) to be written to the EEPROM
+ * @words: number of words
+ *
+ * Writes 16 bit word(s) to EEPROM. If ixgbe_eeprom_update_checksum is not
+ * called after this function, the EEPROM will most likely contain an
+ * invalid checksum.
+ **/
+s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.write_buffer,
+ (hw, offset, words, data),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_read_eeprom - Read word from EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be read
+ * @data: read 16 bit value from EEPROM
+ *
+ * Reads 16 bit value from EEPROM
+ **/
+s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.read, (hw, offset, data),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_read_eeprom_buffer - Read word(s) from EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be read
+ * @data: read 16 bit word(s) from EEPROM
+ * @words: number of words
+ *
+ * Reads 16 bit word(s) from EEPROM
+ **/
+s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.read_buffer,
+ (hw, offset, words, data),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_validate_eeprom_checksum - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum
+ **/
+s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.validate_checksum,
+ (hw, checksum_val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_eeprom_update_checksum - Updates the EEPROM checksum
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->eeprom.ops.update_checksum, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_insert_mac_addr - Find a RAR for this mac address
+ * @hw: pointer to hardware structure
+ * @addr: Address to put into receive address register
+ * @vmdq: VMDq pool to assign
+ *
+ * Puts an ethernet address into a receive address register, or
+ * finds the rar that it is aleady in; adds to the pool list
+ **/
+s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.insert_mac_addr,
+ (hw, addr, vmdq),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_rar - Set Rx address register
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @vmdq: VMDq "set"
+ * @enable_addr: set flag that address is active
+ *
+ * Puts an ethernet address into a receive address register.
+ **/
+s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_rar, (hw, index, addr, vmdq,
+ enable_addr), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_clear_rar - Clear Rx address register
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ *
+ * Puts an ethernet address into a receive address register.
+ **/
+s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.clear_rar, (hw, index),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_vmdq - Associate a VMDq index with a receive address
+ * @hw: pointer to hardware structure
+ * @rar: receive address register index to associate with VMDq index
+ * @vmdq: VMDq set or pool index
+ **/
+s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_vmdq, (hw, rar, vmdq),
+ IXGBE_NOT_IMPLEMENTED);
+
+}
+
+/**
+ * ixgbe_set_vmdq_san_mac - Associate VMDq index 127 with a receive address
+ * @hw: pointer to hardware structure
+ * @vmdq: VMDq default pool index
+ **/
+s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_vmdq_san_mac,
+ (hw, vmdq), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_clear_vmdq - Disassociate a VMDq index from a receive address
+ * @hw: pointer to hardware structure
+ * @rar: receive address register index to disassociate with VMDq index
+ * @vmdq: VMDq set or pool index
+ **/
+s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.clear_vmdq, (hw, rar, vmdq),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_init_rx_addrs - Initializes receive address filters.
+ * @hw: pointer to hardware structure
+ *
+ * Places the MAC address in receive address register 0 and clears the rest
+ * of the receive address registers. Clears the multicast table. Assumes
+ * the receiver is in reset when the routine is called.
+ **/
+s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.init_rx_addrs, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_num_rx_addrs - Returns the number of RAR entries.
+ * @hw: pointer to hardware structure
+ **/
+u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw)
+{
+ return hw->mac.num_rar_entries;
+}
+
+/**
+ * ixgbe_update_uc_addr_list - Updates the MAC's list of secondary addresses
+ * @hw: pointer to hardware structure
+ * @addr_list: the list of new multicast addresses
+ * @addr_count: number of addresses
+ * @func: iterator function to walk the multicast address list
+ *
+ * The given list replaces any existing list. Clears the secondary addrs from
+ * receive address registers. Uses unused receive address registers for the
+ * first secondary addresses, and falls back to promiscuous mode as needed.
+ **/
+s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
+ u32 addr_count, ixgbe_mc_addr_itr func)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.update_uc_addr_list, (hw,
+ addr_list, addr_count, func),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_update_mc_addr_list - Updates the MAC's list of multicast addresses
+ * @hw: pointer to hardware structure
+ * @mc_addr_list: the list of new multicast addresses
+ * @mc_addr_count: number of addresses
+ * @func: iterator function to walk the multicast address list
+ *
+ * The given list replaces any existing list. Clears the MC addrs from receive
+ * address registers and the multicast table. Uses unused receive address
+ * registers for the first multicast addresses, and hashes the rest into the
+ * multicast table.
+ **/
+s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, ixgbe_mc_addr_itr func,
+ bool clear)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.update_mc_addr_list, (hw,
+ mc_addr_list, mc_addr_count, func, clear),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_enable_mc - Enable multicast address in RAR
+ * @hw: pointer to hardware structure
+ *
+ * Enables multicast address in RAR and the use of the multicast hash table.
+ **/
+s32 ixgbe_enable_mc(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.enable_mc, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_disable_mc - Disable multicast address in RAR
+ * @hw: pointer to hardware structure
+ *
+ * Disables multicast address in RAR and the use of the multicast hash table.
+ **/
+s32 ixgbe_disable_mc(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.disable_mc, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_clear_vfta - Clear VLAN filter table
+ * @hw: pointer to hardware structure
+ *
+ * Clears the VLAN filer table, and the VMDq index associated with the filter
+ **/
+s32 ixgbe_clear_vfta(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.clear_vfta, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_vfta - Set VLAN filter table
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vind: VMDq output index that maps queue to VLAN id in VFTA
+ * @vlan_on: boolean flag to turn on/off VLAN in VFTA
+ *
+ * Turn on/off specified VLAN in the VLAN filter table.
+ **/
+s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_vfta, (hw, vlan, vind,
+ vlan_on), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_vlvf - Set VLAN Pool Filter
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vind: VMDq output index that maps queue to VLAN id in VFVFB
+ * @vlan_on: boolean flag to turn on/off VLAN in VFVF
+ * @vfta_changed: pointer to boolean flag which indicates whether VFTA
+ * should be changed
+ *
+ * Turn on/off specified bit in VLVF table.
+ **/
+s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on,
+ bool *vfta_changed)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_vlvf, (hw, vlan, vind,
+ vlan_on, vfta_changed), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_fc_enable - Enable flow control
+ * @hw: pointer to hardware structure
+ *
+ * Configures the flow control settings based on SW configuration.
+ **/
+s32 ixgbe_fc_enable(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.fc_enable, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_fw_drv_ver - Try to send the driver version number FW
+ * @hw: pointer to hardware structure
+ * @maj: driver major number to be sent to firmware
+ * @min: driver minor number to be sent to firmware
+ * @build: driver build number to be sent to firmware
+ * @ver: driver version number to be sent to firmware
+ **/
+s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
+ u8 ver)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.set_fw_drv_ver, (hw, maj, min,
+ build, ver), IXGBE_NOT_IMPLEMENTED);
+}
+
+
+/**
+ * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
+ * @hw: pointer to hardware structure
+ *
+ * Updates the temperatures in mac.thermal_sensor_data
+ **/
+s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_thermal_sensor_data, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds
+ * @hw: pointer to hardware structure
+ *
+ * Inits the thermal sensor thresholds according to the NVM map
+ **/
+s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.init_thermal_sensor_thresh, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_dmac_config - Configure DMA Coalescing registers.
+ * @hw: pointer to hardware structure
+ *
+ * Configure DMA coalescing. If enabling dmac, dmac is activated.
+ * When disabling dmac, dmac enable dmac bit is cleared.
+ **/
+s32 ixgbe_dmac_config(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.dmac_config, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_dmac_update_tcs - Configure DMA Coalescing registers.
+ * @hw: pointer to hardware structure
+ *
+ * Disables dmac, updates per TC settings, and then enable dmac.
+ **/
+s32 ixgbe_dmac_update_tcs(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.dmac_update_tcs, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_dmac_config_tcs - Configure DMA Coalescing registers.
+ * @hw: pointer to hardware structure
+ *
+ * Configure DMA coalescing threshold per TC and set high priority bit for
+ * FCOE TC. The dmac enable bit must be cleared before configuring.
+ **/
+s32 ixgbe_dmac_config_tcs(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.dmac_config_tcs, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_setup_eee - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ * @enable_eee: boolean flag to enable EEE
+ *
+ * Enable/disable EEE based on enable_ee flag.
+ * Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C
+ * are modified.
+ *
+ **/
+s32 ixgbe_setup_eee(struct ixgbe_hw *hw, bool enable_eee)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.setup_eee, (hw, enable_eee),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_source_address_pruning - Enable/Disable source address pruning
+ * @hw: pointer to hardware structure
+ * @enbale: enable or disable source address pruning
+ * @pool: Rx pool - Rx pool to toggle source address pruning
+ **/
+void ixgbe_set_source_address_pruning(struct ixgbe_hw *hw, bool enable,
+ unsigned int pool)
+{
+ if (hw->mac.ops.set_source_address_pruning)
+ hw->mac.ops.set_source_address_pruning(hw, enable, pool);
+}
+
+/**
+ * ixgbe_set_ethertype_anti_spoofing - Enable/Disable Ethertype anti-spoofing
+ * @hw: pointer to hardware structure
+ * @enable: enable or disable switch for Ethertype anti-spoofing
+ * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
+ *
+ **/
+void ixgbe_set_ethertype_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
+{
+ if (hw->mac.ops.set_ethertype_anti_spoofing)
+ hw->mac.ops.set_ethertype_anti_spoofing(hw, enable, vf);
+}
+
+/**
+ * ixgbe_read_iosf_sb_reg - Read 32 bit PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @device_type: type of device you want to communicate with
+ * @phy_data: Pointer to read data from PHY register
+ *
+ * Reads a value from a specified PHY register
+ **/
+s32 ixgbe_read_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 *phy_data)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.read_iosf_sb_reg, (hw, reg_addr,
+ device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_write_iosf_sb_reg - Write 32 bit register through IOSF Sideband
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: type of device you want to communicate with
+ * @phy_data: Data to write to the PHY register
+ *
+ * Writes a value to specified PHY register
+ **/
+s32 ixgbe_write_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 phy_data)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.write_iosf_sb_reg, (hw, reg_addr,
+ device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_disable_mdd - Disable malicious driver detection
+ * @hw: pointer to hardware structure
+ *
+ **/
+void ixgbe_disable_mdd(struct ixgbe_hw *hw)
+{
+ if (hw->mac.ops.disable_mdd)
+ hw->mac.ops.disable_mdd(hw);
+}
+
+/**
+ * ixgbe_enable_mdd - Enable malicious driver detection
+ * @hw: pointer to hardware structure
+ *
+ **/
+void ixgbe_enable_mdd(struct ixgbe_hw *hw)
+{
+ if (hw->mac.ops.enable_mdd)
+ hw->mac.ops.enable_mdd(hw);
+}
+
+/**
+ * ixgbe_mdd_event - Handle malicious driver detection event
+ * @hw: pointer to hardware structure
+ * @vf_bitmap: vf bitmap of malicious vfs
+ *
+ **/
+void ixgbe_mdd_event(struct ixgbe_hw *hw, u32 *vf_bitmap)
+{
+ if (hw->mac.ops.mdd_event)
+ hw->mac.ops.mdd_event(hw, vf_bitmap);
+}
+
+/**
+ * ixgbe_restore_mdd_vf - Restore VF that was disabled during malicious driver
+ * detection event
+ * @hw: pointer to hardware structure
+ * @vf: vf index
+ *
+ **/
+void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf)
+{
+ if (hw->mac.ops.restore_mdd_vf)
+ hw->mac.ops.restore_mdd_vf(hw, vf);
+}
+
+/**
+ * ixgbe_read_analog_reg8 - Reads 8 bit analog register
+ * @hw: pointer to hardware structure
+ * @reg: analog register to read
+ * @val: read value
+ *
+ * Performs write operation to analog register specified.
+ **/
+s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.read_analog_reg8, (hw, reg,
+ val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_write_analog_reg8 - Writes 8 bit analog register
+ * @hw: pointer to hardware structure
+ * @reg: analog register to write
+ * @val: value to write
+ *
+ * Performs write operation to Atlas analog register specified.
+ **/
+s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.write_analog_reg8, (hw, reg,
+ val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_init_uta_tables - Initializes Unicast Table Arrays.
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the Unicast Table Arrays to zero on device load. This
+ * is part of the Rx init addr execution path.
+ **/
+s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.init_uta_tables, (hw),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_read_i2c_byte - Reads 8 bit word over I2C at specified device address
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
+ u8 *data)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.read_i2c_byte, (hw, byte_offset,
+ dev_addr, data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_write_i2c_byte - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface
+ * at a specified device address.
+ **/
+s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
+ u8 data)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.write_i2c_byte, (hw, byte_offset,
+ dev_addr, data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_write_i2c_eeprom - Writes 8 bit EEPROM word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to write
+ * @eeprom_data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw,
+ u8 byte_offset, u8 eeprom_data)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.write_i2c_eeprom,
+ (hw, byte_offset, eeprom_data),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_read_i2c_eeprom - Reads 8 bit EEPROM word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to read
+ * @eeprom_data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data)
+{
+ return ixgbe_call_func(hw, hw->phy.ops.read_i2c_eeprom,
+ (hw, byte_offset, eeprom_data),
+ IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_supported_physical_layer - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ **/
+u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.get_supported_physical_layer,
+ (hw), IXGBE_PHYSICAL_LAYER_UNKNOWN);
+}
+
+/**
+ * ixgbe_enable_rx_dma - Enables Rx DMA unit, dependent on device specifics
+ * @hw: pointer to hardware structure
+ * @regval: bitfield to write to the Rx DMA register
+ *
+ * Enables the Rx DMA unit of the device.
+ **/
+s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.enable_rx_dma,
+ (hw, regval), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_disable_sec_rx_path - Stops the receive data path
+ * @hw: pointer to hardware structure
+ *
+ * Stops the receive data path.
+ **/
+s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.disable_sec_rx_path,
+ (hw), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_enable_sec_rx_path - Enables the receive data path
+ * @hw: pointer to hardware structure
+ *
+ * Enables the receive data path.
+ **/
+s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.enable_sec_rx_path,
+ (hw), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_acquire_swfw_semaphore - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore through SW_FW_SYNC register for the specified
+ * function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u32 mask)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.acquire_swfw_sync,
+ (hw, mask), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_release_swfw_semaphore - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore through SW_FW_SYNC register for the specified
+ * function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask)
+{
+ if (hw->mac.ops.release_swfw_sync)
+ hw->mac.ops.release_swfw_sync(hw, mask);
+}
+
+
+void ixgbe_disable_rx(struct ixgbe_hw *hw)
+{
+ if (hw->mac.ops.disable_rx)
+ hw->mac.ops.disable_rx(hw);
+}
+
+void ixgbe_enable_rx(struct ixgbe_hw *hw)
+{
+ if (hw->mac.ops.enable_rx)
+ hw->mac.ops.enable_rx(hw);
+}
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_api.h b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_api.h
new file mode 100755
index 00000000..1c12ff6d
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_api.h
@@ -0,0 +1,203 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_API_H_
+#define _IXGBE_API_H_
+
+#include "ixgbe_type.h"
+#ident "$Id: ixgbe_api.h,v 1.123 2013/11/22 01:02:01 jtkirshe Exp $"
+
+void ixgbe_dcb_get_rtrup2tc(struct ixgbe_hw *hw, u8 *map);
+
+s32 ixgbe_init_shared_code(struct ixgbe_hw *hw);
+
+extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
+
+s32 ixgbe_set_mac_type(struct ixgbe_hw *hw);
+s32 ixgbe_init_hw(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw(struct ixgbe_hw *hw);
+void ixgbe_enable_relaxed_ordering(struct ixgbe_hw *hw);
+s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw);
+enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw);
+s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr);
+s32 ixgbe_get_bus_info(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw);
+s32 ixgbe_stop_adapter(struct ixgbe_hw *hw);
+s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num);
+s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size);
+
+s32 ixgbe_identify_phy(struct ixgbe_hw *hw);
+s32 ixgbe_reset_phy(struct ixgbe_hw *hw);
+s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 *phy_data);
+s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 phy_data);
+
+s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw);
+s32 ixgbe_check_phy_link(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up);
+s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+void ixgbe_disable_tx_laser(struct ixgbe_hw *hw);
+void ixgbe_enable_tx_laser(struct ixgbe_hw *hw);
+void ixgbe_flap_tx_laser(struct ixgbe_hw *hw);
+s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete);
+s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *autoneg);
+s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index);
+
+s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw);
+s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+
+s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val);
+s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw);
+
+s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
+s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr);
+s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq);
+s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw);
+s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
+ u32 addr_count, ixgbe_mc_addr_itr func);
+s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, ixgbe_mc_addr_itr func,
+ bool clear);
+void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr_list, u32 vmdq);
+s32 ixgbe_enable_mc(struct ixgbe_hw *hw);
+s32 ixgbe_disable_mc(struct ixgbe_hw *hw);
+s32 ixgbe_clear_vfta(struct ixgbe_hw *hw);
+s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan,
+ u32 vind, bool vlan_on);
+s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, bool *vfta_changed);
+s32 ixgbe_fc_enable(struct ixgbe_hw *hw);
+s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
+ u8 ver);
+s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw);
+s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw);
+void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr);
+s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw,
+ u16 *firmware_version);
+s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val);
+s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val);
+s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw);
+s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data);
+u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw);
+s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval);
+s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw);
+s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw);
+s32 ixgbe_mng_fw_enabled(struct ixgbe_hw *hw);
+s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
+s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
+ bool cloud_mode);
+s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common,
+ u8 queue);
+s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input_mask, bool cloud_mode);
+s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id, u8 queue, bool cloud_mode);
+s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id);
+s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ union ixgbe_atr_input *mask,
+ u16 soft_id,
+ u8 queue,
+ bool cloud_mode);
+void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+ union ixgbe_atr_input *mask);
+u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common);
+bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
+s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
+ u8 *data);
+s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
+ u8 data);
+s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data);
+s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
+s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
+s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps);
+s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask);
+s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix);
+s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs);
+s32 ixgbe_dmac_config(struct ixgbe_hw *hw);
+s32 ixgbe_dmac_update_tcs(struct ixgbe_hw *hw);
+s32 ixgbe_dmac_config_tcs(struct ixgbe_hw *hw);
+s32 ixgbe_setup_eee(struct ixgbe_hw *hw, bool enable_eee);
+void ixgbe_set_source_address_pruning(struct ixgbe_hw *hw, bool enable,
+ unsigned int vf);
+void ixgbe_set_ethertype_anti_spoofing(struct ixgbe_hw *hw, bool enable,
+ int vf);
+s32 ixgbe_read_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 *phy_data);
+s32 ixgbe_write_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 phy_data);
+void ixgbe_disable_mdd(struct ixgbe_hw *hw);
+void ixgbe_enable_mdd(struct ixgbe_hw *hw);
+void ixgbe_mdd_event(struct ixgbe_hw *hw, u32 *vf_bitmap);
+void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf);
+void ixgbe_disable_rx(struct ixgbe_hw *hw);
+void ixgbe_enable_rx(struct ixgbe_hw *hw);
+
+#endif /* _IXGBE_API_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_common.c b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_common.c
new file mode 100755
index 00000000..37e5bae5
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_common.c
@@ -0,0 +1,4869 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+#include "ixgbe_dcb.h"
+#include "ixgbe_dcb_82599.h"
+#include "ixgbe_api.h"
+#ident "$Id: ixgbe_common.c,v 1.382 2013/11/22 01:02:01 jtkirshe Exp $"
+
+STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
+STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
+STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
+STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
+STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
+STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
+ u16 count);
+STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
+STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
+STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
+STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw);
+
+STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
+STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
+ u16 *san_mac_offset);
+STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
+ u16 offset);
+
+/**
+ * ixgbe_init_ops_generic - Inits function ptrs
+ * @hw: pointer to the hardware structure
+ *
+ * Initialize the function pointers.
+ **/
+s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ DEBUGFUNC("ixgbe_init_ops_generic");
+
+ /* EEPROM */
+ eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
+ /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
+ if (eec & IXGBE_EEC_PRES) {
+ eeprom->ops.read = &ixgbe_read_eerd_generic;
+ eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_generic;
+ } else {
+ eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
+ eeprom->ops.read_buffer =
+ &ixgbe_read_eeprom_buffer_bit_bang_generic;
+ }
+ eeprom->ops.write = &ixgbe_write_eeprom_generic;
+ eeprom->ops.write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic;
+ eeprom->ops.validate_checksum =
+ &ixgbe_validate_eeprom_checksum_generic;
+ eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
+ eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic;
+
+ /* MAC */
+ mac->ops.init_hw = &ixgbe_init_hw_generic;
+ mac->ops.reset_hw = NULL;
+ mac->ops.start_hw = &ixgbe_start_hw_generic;
+ mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
+ mac->ops.get_media_type = NULL;
+ mac->ops.get_supported_physical_layer = NULL;
+ mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic;
+ mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
+ mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
+ mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
+ mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
+ mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync;
+ mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync;
+ mac->ops.prot_autoc_read = &prot_autoc_read_generic;
+ mac->ops.prot_autoc_write = &prot_autoc_write_generic;
+
+ /* LEDs */
+ mac->ops.led_on = &ixgbe_led_on_generic;
+ mac->ops.led_off = &ixgbe_led_off_generic;
+ mac->ops.blink_led_start = &ixgbe_blink_led_start_generic;
+ mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic;
+
+ /* RAR, Multicast, VLAN */
+ mac->ops.set_rar = &ixgbe_set_rar_generic;
+ mac->ops.clear_rar = &ixgbe_clear_rar_generic;
+ mac->ops.insert_mac_addr = NULL;
+ mac->ops.set_vmdq = NULL;
+ mac->ops.clear_vmdq = NULL;
+ mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
+ mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic;
+ mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
+ mac->ops.enable_mc = &ixgbe_enable_mc_generic;
+ mac->ops.disable_mc = &ixgbe_disable_mc_generic;
+ mac->ops.clear_vfta = NULL;
+ mac->ops.set_vfta = NULL;
+ mac->ops.set_vlvf = NULL;
+ mac->ops.init_uta_tables = NULL;
+ mac->ops.enable_rx = &ixgbe_enable_rx_generic;
+ mac->ops.disable_rx = &ixgbe_disable_rx_generic;
+
+ /* Flow Control */
+ mac->ops.fc_enable = &ixgbe_fc_enable_generic;
+
+ /* Link */
+ mac->ops.get_link_capabilities = NULL;
+ mac->ops.setup_link = NULL;
+ mac->ops.check_link = NULL;
+ mac->ops.dmac_config = NULL;
+ mac->ops.dmac_update_tcs = NULL;
+ mac->ops.dmac_config_tcs = NULL;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
+ * of flow control
+ * @hw: pointer to hardware structure
+ *
+ * This function returns true if the device supports flow control
+ * autonegotiation, and false if it does not.
+ *
+ **/
+bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
+{
+ bool supported = false;
+ ixgbe_link_speed speed;
+ bool link_up;
+
+ DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
+
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber_qsfp:
+ case ixgbe_media_type_fiber:
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+ /* if link is down, assume supported */
+ if (link_up)
+ supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
+ true : false;
+ else
+ supported = true;
+ break;
+ case ixgbe_media_type_backplane:
+ supported = true;
+ break;
+ case ixgbe_media_type_copper:
+ /* only some copper devices support flow control autoneg */
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82599_T3_LOM:
+ case IXGBE_DEV_ID_X540T:
+ case IXGBE_DEV_ID_X540T1:
+ case IXGBE_DEV_ID_X550T:
+ supported = true;
+ break;
+ default:
+ supported = false;
+ }
+ default:
+ break;
+ }
+
+ ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
+ "Device %x does not support flow control autoneg",
+ hw->device_id);
+ return supported;
+}
+
+/**
+ * ixgbe_setup_fc - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Called at init time to set up flow control.
+ **/
+STATIC s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+ u32 reg = 0, reg_bp = 0;
+ u16 reg_cu = 0;
+ bool locked = false;
+
+ DEBUGFUNC("ixgbe_setup_fc");
+
+ /* Validate the requested mode */
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+ "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ /*
+ * 10gig parts do not have a word in the EEPROM to determine the
+ * default flow control setting, so we explicitly set it to full.
+ */
+ if (hw->fc.requested_mode == ixgbe_fc_default)
+ hw->fc.requested_mode = ixgbe_fc_full;
+
+ /*
+ * Set up the 1G and 10G flow control advertisement registers so the
+ * HW will be able to do fc autoneg once the cable is plugged in. If
+ * we link at 10G, the 1G advertisement is harmless and vice versa.
+ */
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_backplane:
+ /* some MAC's need RMW protection on AUTOC */
+ ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &reg_bp);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+
+ /* only backplane uses autoc so fall though */
+ case ixgbe_media_type_fiber_qsfp:
+ case ixgbe_media_type_fiber:
+ reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+
+ break;
+ case ixgbe_media_type_copper:
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg_cu);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * The possible values of fc.requested_mode are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but
+ * we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: Invalid.
+ */
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_none:
+ /* Flow control completely disabled by software override. */
+ reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+ if (hw->phy.media_type == ixgbe_media_type_backplane)
+ reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
+ IXGBE_AUTOC_ASM_PAUSE);
+ else if (hw->phy.media_type == ixgbe_media_type_copper)
+ reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
+ break;
+ case ixgbe_fc_tx_pause:
+ /*
+ * Tx Flow control is enabled, and Rx Flow control is
+ * disabled by software override.
+ */
+ reg |= IXGBE_PCS1GANA_ASM_PAUSE;
+ reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
+ if (hw->phy.media_type == ixgbe_media_type_backplane) {
+ reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
+ reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
+ } else if (hw->phy.media_type == ixgbe_media_type_copper) {
+ reg_cu |= IXGBE_TAF_ASM_PAUSE;
+ reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
+ }
+ break;
+ case ixgbe_fc_rx_pause:
+ /*
+ * Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE, as such we fall
+ * through to the fc_full statement. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ case ixgbe_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by SW override. */
+ reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
+ if (hw->phy.media_type == ixgbe_media_type_backplane)
+ reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
+ IXGBE_AUTOC_ASM_PAUSE;
+ else if (hw->phy.media_type == ixgbe_media_type_copper)
+ reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
+ break;
+ default:
+ ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
+ "Flow control param set incorrectly\n");
+ ret_val = IXGBE_ERR_CONFIG;
+ goto out;
+ break;
+ }
+
+ if (hw->mac.type < ixgbe_mac_X540) {
+ /*
+ * Enable auto-negotiation between the MAC & PHY;
+ * the MAC will advertise clause 37 flow control.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
+ reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
+
+ /* Disable AN timeout */
+ if (hw->fc.strict_ieee)
+ reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
+ DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
+ }
+
+ /*
+ * AUTOC restart handles negotiation of 1G and 10G on backplane
+ * and copper. There is no need to set the PCS1GCTL register.
+ *
+ */
+ if (hw->phy.media_type == ixgbe_media_type_backplane) {
+ reg_bp |= IXGBE_AUTOC_AN_RESTART;
+ ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
+ if (ret_val)
+ goto out;
+ } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
+ (ixgbe_device_supports_autoneg_fc(hw))) {
+ hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
+ }
+
+ DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware by filling the bus info structure and media type, clears
+ * all on chip counters, initializes receive address registers, multicast
+ * table, VLAN filter table, calls routine to set up link and flow control
+ * settings, and leaves transmit and receive units disabled and uninitialized
+ **/
+s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
+{
+ s32 ret_val;
+ u32 ctrl_ext;
+
+ DEBUGFUNC("ixgbe_start_hw_generic");
+
+ /* Set the media type */
+ hw->phy.media_type = hw->mac.ops.get_media_type(hw);
+
+ /* PHY ops initialization must be done in reset_hw() */
+
+ /* Clear the VLAN filter table */
+ hw->mac.ops.clear_vfta(hw);
+
+ /* Clear statistics registers */
+ hw->mac.ops.clear_hw_cntrs(hw);
+
+ /* Set No Snoop Disable */
+ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Setup flow control */
+ ret_val = ixgbe_setup_fc(hw);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+
+ /* Clear adapter stopped flag */
+ hw->adapter_stopped = false;
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_start_hw_gen2 - Init sequence for common device family
+ * @hw: pointer to hw structure
+ *
+ * Performs the init sequence common to the second generation
+ * of 10 GbE devices.
+ * Devices in the second generation:
+ * 82599
+ * X540
+ **/
+s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
+{
+ u32 i;
+ u32 regval;
+
+ /* Clear the rate limiters */
+ for (i = 0; i < hw->mac.max_tx_queues; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
+ }
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Disable relaxed ordering */
+ for (i = 0; i < hw->mac.max_tx_queues; i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
+ regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
+ }
+
+ for (i = 0; i < hw->mac.max_rx_queues; i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+ regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
+ IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_init_hw_generic - Generic hardware initialization
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the hardware by resetting the hardware, filling the bus info
+ * structure and media type, clears all on chip counters, initializes receive
+ * address registers, multicast table, VLAN filter table, calls routine to set
+ * up link and flow control settings, and leaves transmit and receive units
+ * disabled and uninitialized
+ **/
+s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ DEBUGFUNC("ixgbe_init_hw_generic");
+
+ /* Reset the hardware */
+ status = hw->mac.ops.reset_hw(hw);
+
+ if (status == IXGBE_SUCCESS) {
+ /* Start the HW */
+ status = hw->mac.ops.start_hw(hw);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
+ * @hw: pointer to hardware structure
+ *
+ * Clears all hardware statistics counters by reading them from the hardware
+ * Statistics counters are clear on read.
+ **/
+s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
+{
+ u16 i = 0;
+
+ DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
+
+ IXGBE_READ_REG(hw, IXGBE_CRCERRS);
+ IXGBE_READ_REG(hw, IXGBE_ILLERRC);
+ IXGBE_READ_REG(hw, IXGBE_ERRBC);
+ IXGBE_READ_REG(hw, IXGBE_MSPDC);
+ for (i = 0; i < 8; i++)
+ IXGBE_READ_REG(hw, IXGBE_MPC(i));
+
+ IXGBE_READ_REG(hw, IXGBE_MLFC);
+ IXGBE_READ_REG(hw, IXGBE_MRFC);
+ IXGBE_READ_REG(hw, IXGBE_RLEC);
+ IXGBE_READ_REG(hw, IXGBE_LXONTXC);
+ IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
+ if (hw->mac.type >= ixgbe_mac_82599EB) {
+ IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
+ IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+ } else {
+ IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+ IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+ }
+
+ for (i = 0; i < 8; i++) {
+ IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
+ IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
+ if (hw->mac.type >= ixgbe_mac_82599EB) {
+ IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
+ IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
+ } else {
+ IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
+ IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+ }
+ }
+ if (hw->mac.type >= ixgbe_mac_82599EB)
+ for (i = 0; i < 8; i++)
+ IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
+ IXGBE_READ_REG(hw, IXGBE_PRC64);
+ IXGBE_READ_REG(hw, IXGBE_PRC127);
+ IXGBE_READ_REG(hw, IXGBE_PRC255);
+ IXGBE_READ_REG(hw, IXGBE_PRC511);
+ IXGBE_READ_REG(hw, IXGBE_PRC1023);
+ IXGBE_READ_REG(hw, IXGBE_PRC1522);
+ IXGBE_READ_REG(hw, IXGBE_GPRC);
+ IXGBE_READ_REG(hw, IXGBE_BPRC);
+ IXGBE_READ_REG(hw, IXGBE_MPRC);
+ IXGBE_READ_REG(hw, IXGBE_GPTC);
+ IXGBE_READ_REG(hw, IXGBE_GORCL);
+ IXGBE_READ_REG(hw, IXGBE_GORCH);
+ IXGBE_READ_REG(hw, IXGBE_GOTCL);
+ IXGBE_READ_REG(hw, IXGBE_GOTCH);
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ for (i = 0; i < 8; i++)
+ IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+ IXGBE_READ_REG(hw, IXGBE_RUC);
+ IXGBE_READ_REG(hw, IXGBE_RFC);
+ IXGBE_READ_REG(hw, IXGBE_ROC);
+ IXGBE_READ_REG(hw, IXGBE_RJC);
+ IXGBE_READ_REG(hw, IXGBE_MNGPRC);
+ IXGBE_READ_REG(hw, IXGBE_MNGPDC);
+ IXGBE_READ_REG(hw, IXGBE_MNGPTC);
+ IXGBE_READ_REG(hw, IXGBE_TORL);
+ IXGBE_READ_REG(hw, IXGBE_TORH);
+ IXGBE_READ_REG(hw, IXGBE_TPR);
+ IXGBE_READ_REG(hw, IXGBE_TPT);
+ IXGBE_READ_REG(hw, IXGBE_PTC64);
+ IXGBE_READ_REG(hw, IXGBE_PTC127);
+ IXGBE_READ_REG(hw, IXGBE_PTC255);
+ IXGBE_READ_REG(hw, IXGBE_PTC511);
+ IXGBE_READ_REG(hw, IXGBE_PTC1023);
+ IXGBE_READ_REG(hw, IXGBE_PTC1522);
+ IXGBE_READ_REG(hw, IXGBE_MPTC);
+ IXGBE_READ_REG(hw, IXGBE_BPTC);
+ for (i = 0; i < 16; i++) {
+ IXGBE_READ_REG(hw, IXGBE_QPRC(i));
+ IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+ if (hw->mac.type >= ixgbe_mac_82599EB) {
+ IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
+ IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
+ IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
+ IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
+ IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+ } else {
+ IXGBE_READ_REG(hw, IXGBE_QBRC(i));
+ IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+ }
+ }
+
+ if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
+ if (hw->phy.id == 0)
+ ixgbe_identify_phy(hw);
+ hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
+ IXGBE_MDIO_PCS_DEV_TYPE, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
+ IXGBE_MDIO_PCS_DEV_TYPE, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
+ IXGBE_MDIO_PCS_DEV_TYPE, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
+ IXGBE_MDIO_PCS_DEV_TYPE, &i);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the EEPROM
+ * @pba_num_size: part number string buffer length
+ *
+ * Reads the part number string from the EEPROM.
+ **/
+s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ s32 ret_val;
+ u16 data;
+ u16 pba_ptr;
+ u16 offset;
+ u16 length;
+
+ DEBUGFUNC("ixgbe_read_pba_string_generic");
+
+ if (pba_num == NULL) {
+ DEBUGOUT("PBA string buffer was null\n");
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ /*
+ * if data is not ptr guard the PBA must be in legacy format which
+ * means pba_ptr is actually our second data word for the PBA number
+ * and we can decode it into an ascii string
+ */
+ if (data != IXGBE_PBANUM_PTR_GUARD) {
+ DEBUGOUT("NVM PBA number is not stored as string\n");
+
+ /* we will need 11 characters to store the PBA */
+ if (pba_num_size < 11) {
+ DEBUGOUT("PBA string buffer too small\n");
+ return IXGBE_ERR_NO_SPACE;
+ }
+
+ /* extract hex string from data and pba_ptr */
+ pba_num[0] = (data >> 12) & 0xF;
+ pba_num[1] = (data >> 8) & 0xF;
+ pba_num[2] = (data >> 4) & 0xF;
+ pba_num[3] = data & 0xF;
+ pba_num[4] = (pba_ptr >> 12) & 0xF;
+ pba_num[5] = (pba_ptr >> 8) & 0xF;
+ pba_num[6] = '-';
+ pba_num[7] = 0;
+ pba_num[8] = (pba_ptr >> 4) & 0xF;
+ pba_num[9] = pba_ptr & 0xF;
+
+ /* put a null character on the end of our string */
+ pba_num[10] = '\0';
+
+ /* switch all the data but the '-' to hex char */
+ for (offset = 0; offset < 10; offset++) {
+ if (pba_num[offset] < 0xA)
+ pba_num[offset] += '0';
+ else if (pba_num[offset] < 0x10)
+ pba_num[offset] += 'A' - 0xA;
+ }
+
+ return IXGBE_SUCCESS;
+ }
+
+ ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (length == 0xFFFF || length == 0) {
+ DEBUGOUT("NVM PBA number section invalid length\n");
+ return IXGBE_ERR_PBA_SECTION;
+ }
+
+ /* check if pba_num buffer is big enough */
+ if (pba_num_size < (((u32)length * 2) - 1)) {
+ DEBUGOUT("PBA string buffer too small\n");
+ return IXGBE_ERR_NO_SPACE;
+ }
+
+ /* trim pba length from start of string */
+ pba_ptr++;
+ length--;
+
+ for (offset = 0; offset < length; offset++) {
+ ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+ pba_num[offset * 2] = (u8)(data >> 8);
+ pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
+ }
+ pba_num[offset * 2] = '\0';
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_pba_num_generic - Reads part number from EEPROM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number from the EEPROM
+ *
+ * Reads the part number from the EEPROM.
+ **/
+s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
+{
+ s32 ret_val;
+ u16 data;
+
+ DEBUGFUNC("ixgbe_read_pba_num_generic");
+
+ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ } else if (data == IXGBE_PBANUM_PTR_GUARD) {
+ DEBUGOUT("NVM Not supported\n");
+ return IXGBE_NOT_IMPLEMENTED;
+ }
+ *pba_num = (u32)(data << 16);
+
+ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+ *pba_num |= data;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_pba_raw
+ * @hw: pointer to the HW structure
+ * @eeprom_buf: optional pointer to EEPROM image
+ * @eeprom_buf_size: size of EEPROM image in words
+ * @max_pba_block_size: PBA block size limit
+ * @pba: pointer to output PBA structure
+ *
+ * Reads PBA from EEPROM image when eeprom_buf is not NULL.
+ * Reads PBA from physical EEPROM device when eeprom_buf is NULL.
+ *
+ **/
+s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, u16 max_pba_block_size,
+ struct ixgbe_pba *pba)
+{
+ s32 ret_val;
+ u16 pba_block_size;
+
+ if (pba == NULL)
+ return IXGBE_ERR_PARAM;
+
+ if (eeprom_buf == NULL) {
+ ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
+ &pba->word[0]);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
+ pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
+ pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
+ } else {
+ return IXGBE_ERR_PARAM;
+ }
+ }
+
+ if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
+ if (pba->pba_block == NULL)
+ return IXGBE_ERR_PARAM;
+
+ ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
+ eeprom_buf_size,
+ &pba_block_size);
+ if (ret_val)
+ return ret_val;
+
+ if (pba_block_size > max_pba_block_size)
+ return IXGBE_ERR_PARAM;
+
+ if (eeprom_buf == NULL) {
+ ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
+ pba_block_size,
+ pba->pba_block);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > (u32)(pba->word[1] +
+ pba_block_size)) {
+ memcpy(pba->pba_block,
+ &eeprom_buf[pba->word[1]],
+ pba_block_size * sizeof(u16));
+ } else {
+ return IXGBE_ERR_PARAM;
+ }
+ }
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_write_pba_raw
+ * @hw: pointer to the HW structure
+ * @eeprom_buf: optional pointer to EEPROM image
+ * @eeprom_buf_size: size of EEPROM image in words
+ * @pba: pointer to PBA structure
+ *
+ * Writes PBA to EEPROM image when eeprom_buf is not NULL.
+ * Writes PBA to physical EEPROM device when eeprom_buf is NULL.
+ *
+ **/
+s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, struct ixgbe_pba *pba)
+{
+ s32 ret_val;
+
+ if (pba == NULL)
+ return IXGBE_ERR_PARAM;
+
+ if (eeprom_buf == NULL) {
+ ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
+ &pba->word[0]);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
+ eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
+ eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
+ } else {
+ return IXGBE_ERR_PARAM;
+ }
+ }
+
+ if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
+ if (pba->pba_block == NULL)
+ return IXGBE_ERR_PARAM;
+
+ if (eeprom_buf == NULL) {
+ ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
+ pba->pba_block[0],
+ pba->pba_block);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > (u32)(pba->word[1] +
+ pba->pba_block[0])) {
+ memcpy(&eeprom_buf[pba->word[1]],
+ pba->pba_block,
+ pba->pba_block[0] * sizeof(u16));
+ } else {
+ return IXGBE_ERR_PARAM;
+ }
+ }
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_pba_block_size
+ * @hw: pointer to the HW structure
+ * @eeprom_buf: optional pointer to EEPROM image
+ * @eeprom_buf_size: size of EEPROM image in words
+ * @pba_data_size: pointer to output variable
+ *
+ * Returns the size of the PBA block in words. Function operates on EEPROM
+ * image if the eeprom_buf pointer is not NULL otherwise it accesses physical
+ * EEPROM device.
+ *
+ **/
+s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, u16 *pba_block_size)
+{
+ s32 ret_val;
+ u16 pba_word[2];
+ u16 length;
+
+ DEBUGFUNC("ixgbe_get_pba_block_size");
+
+ if (eeprom_buf == NULL) {
+ ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
+ &pba_word[0]);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
+ pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
+ pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
+ } else {
+ return IXGBE_ERR_PARAM;
+ }
+ }
+
+ if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
+ if (eeprom_buf == NULL) {
+ ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
+ &length);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (eeprom_buf_size > pba_word[1])
+ length = eeprom_buf[pba_word[1] + 0];
+ else
+ return IXGBE_ERR_PARAM;
+ }
+
+ if (length == 0xFFFF || length == 0)
+ return IXGBE_ERR_PBA_SECTION;
+ } else {
+ /* PBA number in legacy format, there is no PBA Block. */
+ length = 0;
+ }
+
+ if (pba_block_size != NULL)
+ *pba_block_size = length;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_mac_addr_generic - Generic get MAC address
+ * @hw: pointer to hardware structure
+ * @mac_addr: Adapter MAC address
+ *
+ * Reads the adapter's MAC address from first Receive Address Register (RAR0)
+ * A reset of the adapter must be performed prior to calling this function
+ * in order for the MAC address to have been loaded from the EEPROM into RAR0
+ **/
+s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
+{
+ u32 rar_high;
+ u32 rar_low;
+ u16 i;
+
+ DEBUGFUNC("ixgbe_get_mac_addr_generic");
+
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
+ rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
+
+ for (i = 0; i < 4; i++)
+ mac_addr[i] = (u8)(rar_low >> (i*8));
+
+ for (i = 0; i < 2; i++)
+ mac_addr[i+4] = (u8)(rar_high >> (i*8));
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_set_pci_config_data_generic - Generic store PCI bus info
+ * @hw: pointer to hardware structure
+ * @link_status: the link status returned by the PCI config space
+ *
+ * Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
+ **/
+void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+
+ hw->bus.type = ixgbe_bus_type_pci_express;
+
+ switch (link_status & IXGBE_PCI_LINK_WIDTH) {
+ case IXGBE_PCI_LINK_WIDTH_1:
+ hw->bus.width = ixgbe_bus_width_pcie_x1;
+ break;
+ case IXGBE_PCI_LINK_WIDTH_2:
+ hw->bus.width = ixgbe_bus_width_pcie_x2;
+ break;
+ case IXGBE_PCI_LINK_WIDTH_4:
+ hw->bus.width = ixgbe_bus_width_pcie_x4;
+ break;
+ case IXGBE_PCI_LINK_WIDTH_8:
+ hw->bus.width = ixgbe_bus_width_pcie_x8;
+ break;
+ default:
+ hw->bus.width = ixgbe_bus_width_unknown;
+ break;
+ }
+
+ switch (link_status & IXGBE_PCI_LINK_SPEED) {
+ case IXGBE_PCI_LINK_SPEED_2500:
+ hw->bus.speed = ixgbe_bus_speed_2500;
+ break;
+ case IXGBE_PCI_LINK_SPEED_5000:
+ hw->bus.speed = ixgbe_bus_speed_5000;
+ break;
+ case IXGBE_PCI_LINK_SPEED_8000:
+ hw->bus.speed = ixgbe_bus_speed_8000;
+ break;
+ default:
+ hw->bus.speed = ixgbe_bus_speed_unknown;
+ break;
+ }
+
+ mac->ops.set_lan_id(hw);
+}
+
+/**
+ * ixgbe_get_bus_info_generic - Generic set PCI bus info
+ * @hw: pointer to hardware structure
+ *
+ * Gets the PCI bus info (speed, width, type) then calls helper function to
+ * store this data within the ixgbe_hw structure.
+ **/
+s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
+{
+ u16 link_status;
+
+ DEBUGFUNC("ixgbe_get_bus_info_generic");
+
+ /* Get the negotiated link width and speed from PCI config space */
+ link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
+
+ ixgbe_set_pci_config_data_generic(hw, link_status);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
+ * @hw: pointer to the HW structure
+ *
+ * Determines the LAN function id by reading memory-mapped registers
+ * and swaps the port value if requested.
+ **/
+void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
+{
+ struct ixgbe_bus_info *bus = &hw->bus;
+ u32 reg;
+
+ DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
+
+ reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
+ bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
+ bus->lan_id = bus->func;
+
+ /* check for a port swap */
+ reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
+ if (reg & IXGBE_FACTPS_LFS)
+ bus->func ^= 0x1;
+}
+
+/**
+ * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
+ * @hw: pointer to hardware structure
+ *
+ * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+ * disables transmit and receive units. The adapter_stopped flag is used by
+ * the shared code and drivers to determine if the adapter is in a stopped
+ * state and should not touch the hardware.
+ **/
+s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
+{
+ u32 reg_val;
+ u16 i;
+
+ DEBUGFUNC("ixgbe_stop_adapter_generic");
+
+ /*
+ * Set the adapter_stopped flag so other driver functions stop touching
+ * the hardware
+ */
+ hw->adapter_stopped = true;
+
+ /* Disable the receive unit */
+ ixgbe_disable_rx(hw);
+
+ /* Clear interrupt mask to stop interrupts from being generated */
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
+
+ /* Clear any pending interrupts, flush previous writes */
+ IXGBE_READ_REG(hw, IXGBE_EICR);
+
+ /* Disable the transmit unit. Each queue must be disabled. */
+ for (i = 0; i < hw->mac.max_tx_queues; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
+
+ /* Disable the receive unit by stopping each queue */
+ for (i = 0; i < hw->mac.max_rx_queues; i++) {
+ reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+ reg_val &= ~IXGBE_RXDCTL_ENABLE;
+ reg_val |= IXGBE_RXDCTL_SWFLSH;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
+ }
+
+ /* flush all queues disables */
+ IXGBE_WRITE_FLUSH(hw);
+ msec_delay(2);
+
+ /*
+ * Prevent the PCI-E bus from from hanging by disabling PCI-E master
+ * access and verify no pending requests
+ */
+ return ixgbe_disable_pcie_master(hw);
+}
+
+/**
+ * ixgbe_led_on_generic - Turns on the software controllable LEDs.
+ * @hw: pointer to hardware structure
+ * @index: led number to turn on
+ **/
+s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
+{
+ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+ DEBUGFUNC("ixgbe_led_on_generic");
+
+ /* To turn on the LED, set mode to ON. */
+ led_reg &= ~IXGBE_LED_MODE_MASK(index);
+ led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_led_off_generic - Turns off the software controllable LEDs.
+ * @hw: pointer to hardware structure
+ * @index: led number to turn off
+ **/
+s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
+{
+ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+ DEBUGFUNC("ixgbe_led_off_generic");
+
+ /* To turn off the LED, set mode to OFF. */
+ led_reg &= ~IXGBE_LED_MODE_MASK(index);
+ led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ * ixgbe_hw struct in order to set up EEPROM access.
+ **/
+s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ u32 eec;
+ u16 eeprom_size;
+
+ DEBUGFUNC("ixgbe_init_eeprom_params_generic");
+
+ if (eeprom->type == ixgbe_eeprom_uninitialized) {
+ eeprom->type = ixgbe_eeprom_none;
+ /* Set default semaphore delay to 10ms which is a well
+ * tested value */
+ eeprom->semaphore_delay = 10;
+ /* Clear EEPROM page size, it will be initialized as needed */
+ eeprom->word_page_size = 0;
+
+ /*
+ * Check for EEPROM present first.
+ * If not present leave as none
+ */
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ if (eec & IXGBE_EEC_PRES) {
+ eeprom->type = ixgbe_eeprom_spi;
+
+ /*
+ * SPI EEPROM is assumed here. This code would need to
+ * change if a future EEPROM is not SPI.
+ */
+ eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
+ IXGBE_EEC_SIZE_SHIFT);
+ eeprom->word_size = 1 << (eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
+ }
+
+ if (eec & IXGBE_EEC_ADDR_SIZE)
+ eeprom->address_bits = 16;
+ else
+ eeprom->address_bits = 8;
+ DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
+ "%d\n", eeprom->type, eeprom->word_size,
+ eeprom->address_bits);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to write
+ * @words: number of word(s)
+ * @data: 16 bit word(s) to write to EEPROM
+ *
+ * Reads 16 bit word(s) from EEPROM through bit-bang method
+ **/
+s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status = IXGBE_SUCCESS;
+ u16 i, count;
+
+ DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (words == 0) {
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ if (offset + words > hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ /*
+ * The EEPROM page size cannot be queried from the chip. We do lazy
+ * initialization. It is worth to do that when we write large buffer.
+ */
+ if ((hw->eeprom.word_page_size == 0) &&
+ (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
+ ixgbe_detect_eeprom_page_size_generic(hw, offset);
+
+ /*
+ * We cannot hold synchronization semaphores for too long
+ * to avoid other entity starvation. However it is more efficient
+ * to read in bursts than synchronizing access for each word.
+ */
+ for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
+ count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
+ IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
+ status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
+ count, &data[i]);
+
+ if (status != IXGBE_SUCCESS)
+ break;
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of word(s)
+ * @data: 16 bit word(s) to be written to the EEPROM
+ *
+ * If ixgbe_eeprom_update_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ **/
+STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status;
+ u16 word;
+ u16 page_size;
+ u16 i;
+ u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
+
+ DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
+
+ /* Prepare the EEPROM for writing */
+ status = ixgbe_acquire_eeprom(hw);
+
+ if (status == IXGBE_SUCCESS) {
+ if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
+ ixgbe_release_eeprom(hw);
+ status = IXGBE_ERR_EEPROM;
+ }
+ }
+
+ if (status == IXGBE_SUCCESS) {
+ for (i = 0; i < words; i++) {
+ ixgbe_standby_eeprom(hw);
+
+ /* Send the WRITE ENABLE command (8 bit opcode ) */
+ ixgbe_shift_out_eeprom_bits(hw,
+ IXGBE_EEPROM_WREN_OPCODE_SPI,
+ IXGBE_EEPROM_OPCODE_BITS);
+
+ ixgbe_standby_eeprom(hw);
+
+ /*
+ * Some SPI eeproms use the 8th address bit embedded
+ * in the opcode
+ */
+ if ((hw->eeprom.address_bits == 8) &&
+ ((offset + i) >= 128))
+ write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
+
+ /* Send the Write command (8-bit opcode + addr) */
+ ixgbe_shift_out_eeprom_bits(hw, write_opcode,
+ IXGBE_EEPROM_OPCODE_BITS);
+ ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
+ hw->eeprom.address_bits);
+
+ page_size = hw->eeprom.word_page_size;
+
+ /* Send the data in burst via SPI*/
+ do {
+ word = data[i];
+ word = (word >> 8) | (word << 8);
+ ixgbe_shift_out_eeprom_bits(hw, word, 16);
+
+ if (page_size == 0)
+ break;
+
+ /* do not wrap around page */
+ if (((offset + i) & (page_size - 1)) ==
+ (page_size - 1))
+ break;
+ } while (++i < words);
+
+ ixgbe_standby_eeprom(hw);
+ msec_delay(10);
+ }
+ /* Done with writing - release the EEPROM */
+ ixgbe_release_eeprom(hw);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be written to
+ * @data: 16 bit word to be written to the EEPROM
+ *
+ * If ixgbe_eeprom_update_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ **/
+s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ s32 status;
+
+ DEBUGFUNC("ixgbe_write_eeprom_generic");
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (offset >= hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be read
+ * @data: read 16 bit words(s) from EEPROM
+ * @words: number of word(s)
+ *
+ * Reads 16 bit word(s) from EEPROM through bit-bang method
+ **/
+s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status = IXGBE_SUCCESS;
+ u16 i, count;
+
+ DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (words == 0) {
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ if (offset + words > hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ /*
+ * We cannot hold synchronization semaphores for too long
+ * to avoid other entity starvation. However it is more efficient
+ * to read in bursts than synchronizing access for each word.
+ */
+ for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
+ count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
+ IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
+
+ status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
+ count, &data[i]);
+
+ if (status != IXGBE_SUCCESS)
+ break;
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be read
+ * @words: number of word(s)
+ * @data: read 16 bit word(s) from EEPROM
+ *
+ * Reads 16 bit word(s) from EEPROM through bit-bang method
+ **/
+STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status;
+ u16 word_in;
+ u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
+ u16 i;
+
+ DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
+
+ /* Prepare the EEPROM for reading */
+ status = ixgbe_acquire_eeprom(hw);
+
+ if (status == IXGBE_SUCCESS) {
+ if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
+ ixgbe_release_eeprom(hw);
+ status = IXGBE_ERR_EEPROM;
+ }
+ }
+
+ if (status == IXGBE_SUCCESS) {
+ for (i = 0; i < words; i++) {
+ ixgbe_standby_eeprom(hw);
+ /*
+ * Some SPI eeproms use the 8th address bit embedded
+ * in the opcode
+ */
+ if ((hw->eeprom.address_bits == 8) &&
+ ((offset + i) >= 128))
+ read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
+
+ /* Send the READ command (opcode + addr) */
+ ixgbe_shift_out_eeprom_bits(hw, read_opcode,
+ IXGBE_EEPROM_OPCODE_BITS);
+ ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
+ hw->eeprom.address_bits);
+
+ /* Read the data. */
+ word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
+ data[i] = (word_in >> 8) | (word_in << 8);
+ }
+
+ /* End this read operation */
+ ixgbe_release_eeprom(hw);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be read
+ * @data: read 16 bit value from EEPROM
+ *
+ * Reads 16 bit value from EEPROM through bit-bang method
+ **/
+s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 *data)
+{
+ s32 status;
+
+ DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (offset >= hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of word(s)
+ * @data: 16 bit word(s) from the EEPROM
+ *
+ * Reads a 16 bit word(s) from the EEPROM using the EERD register.
+ **/
+s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ u32 eerd;
+ s32 status = IXGBE_SUCCESS;
+ u32 i;
+
+ DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (words == 0) {
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
+ goto out;
+ }
+
+ if (offset >= hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
+ goto out;
+ }
+
+ for (i = 0; i < words; i++) {
+ eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
+ IXGBE_EEPROM_RW_REG_START;
+
+ IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
+
+ if (status == IXGBE_SUCCESS) {
+ data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
+ IXGBE_EEPROM_RW_REG_DATA);
+ } else {
+ DEBUGOUT("Eeprom read timed out\n");
+ goto out;
+ }
+ }
+out:
+ return status;
+}
+
+/**
+ * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be used as a scratch pad
+ *
+ * Discover EEPROM page size by writing marching data at given offset.
+ * This function is called only when we are writing a new large buffer
+ * at given offset so the data would be overwritten anyway.
+ **/
+STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
+ u16 offset)
+{
+ u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
+ s32 status = IXGBE_SUCCESS;
+ u16 i;
+
+ DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
+
+ for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
+ data[i] = i;
+
+ hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
+ status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
+ IXGBE_EEPROM_PAGE_SIZE_MAX, data);
+ hw->eeprom.word_page_size = 0;
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ /*
+ * When writing in burst more than the actual page size
+ * EEPROM address wraps around current page.
+ */
+ hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
+
+ DEBUGOUT1("Detected EEPROM page size = %d words.",
+ hw->eeprom.word_page_size);
+out:
+ return status;
+}
+
+/**
+ * ixgbe_read_eerd_generic - Read EEPROM word using EERD
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
+}
+
+/**
+ * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @words: number of word(s)
+ * @data: word(s) write to the EEPROM
+ *
+ * Write a 16 bit word(s) to the EEPROM using the EEWR register.
+ **/
+s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ u32 eewr;
+ s32 status = IXGBE_SUCCESS;
+ u16 i;
+
+ DEBUGFUNC("ixgbe_write_eewr_generic");
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (words == 0) {
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
+ goto out;
+ }
+
+ if (offset >= hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
+ goto out;
+ }
+
+ for (i = 0; i < words; i++) {
+ eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
+ (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
+ IXGBE_EEPROM_RW_REG_START;
+
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("Eeprom write EEWR timed out\n");
+ goto out;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
+
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("Eeprom write EEWR timed out\n");
+ goto out;
+ }
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the EEWR register.
+ **/
+s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
+}
+
+/**
+ * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
+ * @hw: pointer to hardware structure
+ * @ee_reg: EEPROM flag for polling
+ *
+ * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
+ * read or write is done respectively.
+ **/
+s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
+{
+ u32 i;
+ u32 reg;
+ s32 status = IXGBE_ERR_EEPROM;
+
+ DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
+
+ for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
+ if (ee_reg == IXGBE_NVM_POLL_READ)
+ reg = IXGBE_READ_REG(hw, IXGBE_EERD);
+ else
+ reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
+
+ if (reg & IXGBE_EEPROM_RW_REG_DONE) {
+ status = IXGBE_SUCCESS;
+ break;
+ }
+ usec_delay(5);
+ }
+
+ if (i == IXGBE_EERD_EEWR_ATTEMPTS)
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "EEPROM read/write done polling timed out");
+
+ return status;
+}
+
+/**
+ * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
+ * @hw: pointer to hardware structure
+ *
+ * Prepares EEPROM for access using bit-bang method. This function should
+ * be called before issuing a command to the EEPROM.
+ **/
+STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+ u32 eec;
+ u32 i;
+
+ DEBUGFUNC("ixgbe_acquire_eeprom");
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
+ != IXGBE_SUCCESS)
+ status = IXGBE_ERR_SWFW_SYNC;
+
+ if (status == IXGBE_SUCCESS) {
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ /* Request EEPROM Access */
+ eec |= IXGBE_EEC_REQ;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+
+ for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ if (eec & IXGBE_EEC_GNT)
+ break;
+ usec_delay(5);
+ }
+
+ /* Release if grant not acquired */
+ if (!(eec & IXGBE_EEC_GNT)) {
+ eec &= ~IXGBE_EEC_REQ;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ DEBUGOUT("Could not acquire EEPROM grant\n");
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ status = IXGBE_ERR_EEPROM;
+ }
+
+ /* Setup EEPROM for Read/Write */
+ if (status == IXGBE_SUCCESS) {
+ /* Clear CS and SK */
+ eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(1);
+ }
+ }
+ return status;
+}
+
+/**
+ * ixgbe_get_eeprom_semaphore - Get hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
+ **/
+STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_EEPROM;
+ u32 timeout = 2000;
+ u32 i;
+ u32 swsm;
+
+ DEBUGFUNC("ixgbe_get_eeprom_semaphore");
+
+
+ /* Get SMBI software semaphore between device drivers first */
+ for (i = 0; i < timeout; i++) {
+ /*
+ * If the SMBI bit is 0 when we read it, then the bit will be
+ * set and we have the semaphore
+ */
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ if (!(swsm & IXGBE_SWSM_SMBI)) {
+ status = IXGBE_SUCCESS;
+ break;
+ }
+ usec_delay(50);
+ }
+
+ if (i == timeout) {
+ DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
+ "not granted.\n");
+ /*
+ * this release is particularly important because our attempts
+ * above to get the semaphore may have succeeded, and if there
+ * was a timeout, we should unconditionally clear the semaphore
+ * bits to free the driver to make progress
+ */
+ ixgbe_release_eeprom_semaphore(hw);
+
+ usec_delay(50);
+ /*
+ * one last try
+ * If the SMBI bit is 0 when we read it, then the bit will be
+ * set and we have the semaphore
+ */
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ if (!(swsm & IXGBE_SWSM_SMBI))
+ status = IXGBE_SUCCESS;
+ }
+
+ /* Now get the semaphore between SW/FW through the SWESMBI bit */
+ if (status == IXGBE_SUCCESS) {
+ for (i = 0; i < timeout; i++) {
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+
+ /* Set the SW EEPROM semaphore bit to request access */
+ swsm |= IXGBE_SWSM_SWESMBI;
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+
+ /*
+ * If we set the bit successfully then we got the
+ * semaphore.
+ */
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ if (swsm & IXGBE_SWSM_SWESMBI)
+ break;
+
+ usec_delay(50);
+ }
+
+ /*
+ * Release semaphores and return error if SW EEPROM semaphore
+ * was not granted because we don't have access to the EEPROM
+ */
+ if (i >= timeout) {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "SWESMBI Software EEPROM semaphore not granted.\n");
+ ixgbe_release_eeprom_semaphore(hw);
+ status = IXGBE_ERR_EEPROM;
+ }
+ } else {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "Software semaphore SMBI between device drivers "
+ "not granted.\n");
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_release_eeprom_semaphore - Release hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * This function clears hardware semaphore bits.
+ **/
+STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
+{
+ u32 swsm;
+
+ DEBUGFUNC("ixgbe_release_eeprom_semaphore");
+
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+
+ /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
+ swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_ready_eeprom - Polls for EEPROM ready
+ * @hw: pointer to hardware structure
+ **/
+STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+ u16 i;
+ u8 spi_stat_reg;
+
+ DEBUGFUNC("ixgbe_ready_eeprom");
+
+ /*
+ * Read "Status Register" repeatedly until the LSB is cleared. The
+ * EEPROM will signal that the command has been completed by clearing
+ * bit 0 of the internal status register. If it's not cleared within
+ * 5 milliseconds, then error out.
+ */
+ for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
+ ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
+ IXGBE_EEPROM_OPCODE_BITS);
+ spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
+ if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
+ break;
+
+ usec_delay(5);
+ ixgbe_standby_eeprom(hw);
+ };
+
+ /*
+ * On some parts, SPI write time could vary from 0-20mSec on 3.3V
+ * devices (and only 0-5mSec on 5V devices)
+ */
+ if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
+ DEBUGOUT("SPI EEPROM Status error\n");
+ status = IXGBE_ERR_EEPROM;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
+ * @hw: pointer to hardware structure
+ **/
+STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
+{
+ u32 eec;
+
+ DEBUGFUNC("ixgbe_standby_eeprom");
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ /* Toggle CS to flush commands */
+ eec |= IXGBE_EEC_CS;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(1);
+ eec &= ~IXGBE_EEC_CS;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(1);
+}
+
+/**
+ * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
+ * @hw: pointer to hardware structure
+ * @data: data to send to the EEPROM
+ * @count: number of bits to shift out
+ **/
+STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
+ u16 count)
+{
+ u32 eec;
+ u32 mask;
+ u32 i;
+
+ DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ /*
+ * Mask is used to shift "count" bits of "data" out to the EEPROM
+ * one bit at a time. Determine the starting bit based on count
+ */
+ mask = 0x01 << (count - 1);
+
+ for (i = 0; i < count; i++) {
+ /*
+ * A "1" is shifted out to the EEPROM by setting bit "DI" to a
+ * "1", and then raising and then lowering the clock (the SK
+ * bit controls the clock input to the EEPROM). A "0" is
+ * shifted out to the EEPROM by setting "DI" to "0" and then
+ * raising and then lowering the clock.
+ */
+ if (data & mask)
+ eec |= IXGBE_EEC_DI;
+ else
+ eec &= ~IXGBE_EEC_DI;
+
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_FLUSH(hw);
+
+ usec_delay(1);
+
+ ixgbe_raise_eeprom_clk(hw, &eec);
+ ixgbe_lower_eeprom_clk(hw, &eec);
+
+ /*
+ * Shift mask to signify next bit of data to shift in to the
+ * EEPROM
+ */
+ mask = mask >> 1;
+ };
+
+ /* We leave the "DI" bit set to "0" when we leave this routine. */
+ eec &= ~IXGBE_EEC_DI;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
+ * @hw: pointer to hardware structure
+ **/
+STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
+{
+ u32 eec;
+ u32 i;
+ u16 data = 0;
+
+ DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
+
+ /*
+ * In order to read a register from the EEPROM, we need to shift
+ * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
+ * the clock input to the EEPROM (setting the SK bit), and then reading
+ * the value of the "DO" bit. During this "shifting in" process the
+ * "DI" bit should always be clear.
+ */
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
+
+ for (i = 0; i < count; i++) {
+ data = data << 1;
+ ixgbe_raise_eeprom_clk(hw, &eec);
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ eec &= ~(IXGBE_EEC_DI);
+ if (eec & IXGBE_EEC_DO)
+ data |= 1;
+
+ ixgbe_lower_eeprom_clk(hw, &eec);
+ }
+
+ return data;
+}
+
+/**
+ * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
+ * @hw: pointer to hardware structure
+ * @eec: EEC register's current value
+ **/
+STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
+{
+ DEBUGFUNC("ixgbe_raise_eeprom_clk");
+
+ /*
+ * Raise the clock input to the EEPROM
+ * (setting the SK bit), then delay
+ */
+ *eec = *eec | IXGBE_EEC_SK;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(1);
+}
+
+/**
+ * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
+ * @hw: pointer to hardware structure
+ * @eecd: EECD's current value
+ **/
+STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
+{
+ DEBUGFUNC("ixgbe_lower_eeprom_clk");
+
+ /*
+ * Lower the clock input to the EEPROM (clearing the SK bit), then
+ * delay
+ */
+ *eec = *eec & ~IXGBE_EEC_SK;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(1);
+}
+
+/**
+ * ixgbe_release_eeprom - Release EEPROM, release semaphores
+ * @hw: pointer to hardware structure
+ **/
+STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw)
+{
+ u32 eec;
+
+ DEBUGFUNC("ixgbe_release_eeprom");
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ eec |= IXGBE_EEC_CS; /* Pull CS high */
+ eec &= ~IXGBE_EEC_SK; /* Lower SCK */
+
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_FLUSH(hw);
+
+ usec_delay(1);
+
+ /* Stop requesting EEPROM access */
+ eec &= ~IXGBE_EEC_REQ;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+
+ /* Delay before attempt to obtain semaphore again to allow FW access */
+ msec_delay(hw->eeprom.semaphore_delay);
+}
+
+/**
+ * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ *
+ * Returns a negative error code on error, or the 16-bit checksum
+ **/
+s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
+{
+ u16 i;
+ u16 j;
+ u16 checksum = 0;
+ u16 length = 0;
+ u16 pointer = 0;
+ u16 word = 0;
+
+ DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
+
+ /* Include 0x0-0x3F in the checksum */
+ for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
+ if (hw->eeprom.ops.read(hw, i, &word)) {
+ DEBUGOUT("EEPROM read failed\n");
+ return IXGBE_ERR_EEPROM;
+ }
+ checksum += word;
+ }
+
+ /* Include all data from pointers except for the fw pointer */
+ for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
+ if (hw->eeprom.ops.read(hw, i, &pointer)) {
+ DEBUGOUT("EEPROM read failed\n");
+ return IXGBE_ERR_EEPROM;
+ }
+
+ /* If the pointer seems invalid */
+ if (pointer == 0xFFFF || pointer == 0)
+ continue;
+
+ if (hw->eeprom.ops.read(hw, pointer, &length)) {
+ DEBUGOUT("EEPROM read failed\n");
+ return IXGBE_ERR_EEPROM;
+ }
+
+ if (length == 0xFFFF || length == 0)
+ continue;
+
+ for (j = pointer + 1; j <= pointer + length; j++) {
+ if (hw->eeprom.ops.read(hw, j, &word)) {
+ DEBUGOUT("EEPROM read failed\n");
+ return IXGBE_ERR_EEPROM;
+ }
+ checksum += word;
+ }
+ }
+
+ checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+
+ return (s32)checksum;
+}
+
+/**
+ * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ **/
+s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
+ u16 *checksum_val)
+{
+ s32 status;
+ u16 checksum;
+ u16 read_checksum = 0;
+
+ DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
+
+ /* Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = hw->eeprom.ops.read(hw, 0, &checksum);
+ if (status) {
+ DEBUGOUT("EEPROM read failed\n");
+ return status;
+ }
+
+ status = hw->eeprom.ops.calc_checksum(hw);
+ if (status < 0)
+ return status;
+
+ checksum = (u16)(status & 0xffff);
+
+ status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
+ if (status) {
+ DEBUGOUT("EEPROM read failed\n");
+ return status;
+ }
+
+ /* Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (read_checksum != checksum)
+ status = IXGBE_ERR_EEPROM_CHECKSUM;
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum_val)
+ *checksum_val = checksum;
+
+ return status;
+}
+
+/**
+ * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u16 checksum;
+
+ DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
+
+ /* Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = hw->eeprom.ops.read(hw, 0, &checksum);
+ if (status) {
+ DEBUGOUT("EEPROM read failed\n");
+ return status;
+ }
+
+ status = hw->eeprom.ops.calc_checksum(hw);
+ if (status < 0)
+ return status;
+
+ checksum = (u16)(status & 0xffff);
+
+ status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
+
+ return status;
+}
+
+/**
+ * ixgbe_validate_mac_addr - Validate MAC address
+ * @mac_addr: pointer to MAC address.
+ *
+ * Tests a MAC address to ensure it is a valid Individual Address
+ **/
+s32 ixgbe_validate_mac_addr(u8 *mac_addr)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_validate_mac_addr");
+
+ /* Make sure it is not a multicast address */
+ if (IXGBE_IS_MULTICAST(mac_addr)) {
+ DEBUGOUT("MAC address is multicast\n");
+ status = IXGBE_ERR_INVALID_MAC_ADDR;
+ /* Not a broadcast address */
+ } else if (IXGBE_IS_BROADCAST(mac_addr)) {
+ DEBUGOUT("MAC address is broadcast\n");
+ status = IXGBE_ERR_INVALID_MAC_ADDR;
+ /* Reject the zero address */
+ } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
+ mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
+ DEBUGOUT("MAC address is all zeros\n");
+ status = IXGBE_ERR_INVALID_MAC_ADDR;
+ }
+ return status;
+}
+
+/**
+ * ixgbe_set_rar_generic - Set Rx address register
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @vmdq: VMDq "set" or "pool" index
+ * @enable_addr: set flag that address is active
+ *
+ * Puts an ethernet address into a receive address register.
+ **/
+s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr)
+{
+ u32 rar_low, rar_high;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ DEBUGFUNC("ixgbe_set_rar_generic");
+
+ /* Make sure we are using a valid rar index range */
+ if (index >= rar_entries) {
+ ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
+ "RAR index %d is out of range.\n", index);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ /* setup VMDq pool selection before this RAR gets enabled */
+ hw->mac.ops.set_vmdq(hw, index, vmdq);
+
+ /*
+ * HW expects these in little endian so we reverse the byte
+ * order from network order (big endian) to little endian
+ */
+ rar_low = ((u32)addr[0] |
+ ((u32)addr[1] << 8) |
+ ((u32)addr[2] << 16) |
+ ((u32)addr[3] << 24));
+ /*
+ * Some parts put the VMDq setting in the extra RAH bits,
+ * so save everything except the lower 16 bits that hold part
+ * of the address and the address valid bit.
+ */
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
+ rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
+ rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
+
+ if (enable_addr != 0)
+ rar_high |= IXGBE_RAH_AV;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_clear_rar_generic - Remove Rx address register
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ *
+ * Clears an ethernet address from a receive address register.
+ **/
+s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
+{
+ u32 rar_high;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ DEBUGFUNC("ixgbe_clear_rar_generic");
+
+ /* Make sure we are using a valid rar index range */
+ if (index >= rar_entries) {
+ ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
+ "RAR index %d is out of range.\n", index);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ /*
+ * Some parts put the VMDq setting in the extra RAH bits,
+ * so save everything except the lower 16 bits that hold part
+ * of the address and the address valid bit.
+ */
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
+ rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
+
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+
+ /* clear VMDq pool/queue selection for this RAR */
+ hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
+ * @hw: pointer to hardware structure
+ *
+ * Places the MAC address in receive address register 0 and clears the rest
+ * of the receive address registers. Clears the multicast table. Assumes
+ * the receiver is in reset when the routine is called.
+ **/
+s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
+{
+ u32 i;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ DEBUGFUNC("ixgbe_init_rx_addrs_generic");
+
+ /*
+ * If the current mac address is valid, assume it is a software override
+ * to the permanent address.
+ * Otherwise, use the permanent address from the eeprom.
+ */
+ if (ixgbe_validate_mac_addr(hw->mac.addr) ==
+ IXGBE_ERR_INVALID_MAC_ADDR) {
+ /* Get the MAC address from the RAR0 for later reference */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
+
+ DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
+ hw->mac.addr[0], hw->mac.addr[1],
+ hw->mac.addr[2]);
+ DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
+ hw->mac.addr[4], hw->mac.addr[5]);
+ } else {
+ /* Setup the receive address. */
+ DEBUGOUT("Overriding MAC Address in RAR[0]\n");
+ DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
+ hw->mac.addr[0], hw->mac.addr[1],
+ hw->mac.addr[2]);
+ DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
+ hw->mac.addr[4], hw->mac.addr[5]);
+
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+
+ /* clear VMDq pool/queue selection for RAR 0 */
+ hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
+ }
+ hw->addr_ctrl.overflow_promisc = 0;
+
+ hw->addr_ctrl.rar_used_count = 1;
+
+ /* Zero out the other receive addresses. */
+ DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
+ for (i = 1; i < rar_entries; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
+ }
+
+ /* Clear the MTA */
+ hw->addr_ctrl.mta_in_use = 0;
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
+
+ DEBUGOUT(" Clearing MTA\n");
+ for (i = 0; i < hw->mac.mcft_size; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
+
+ ixgbe_init_uta_tables(hw);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_add_uc_addr - Adds a secondary unicast address.
+ * @hw: pointer to hardware structure
+ * @addr: new address
+ *
+ * Adds it to unused receive address register or goes into promiscuous mode.
+ **/
+void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
+{
+ u32 rar_entries = hw->mac.num_rar_entries;
+ u32 rar;
+
+ DEBUGFUNC("ixgbe_add_uc_addr");
+
+ DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+
+ /*
+ * Place this address in the RAR if there is room,
+ * else put the controller into promiscuous mode
+ */
+ if (hw->addr_ctrl.rar_used_count < rar_entries) {
+ rar = hw->addr_ctrl.rar_used_count;
+ hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
+ DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
+ hw->addr_ctrl.rar_used_count++;
+ } else {
+ hw->addr_ctrl.overflow_promisc++;
+ }
+
+ DEBUGOUT("ixgbe_add_uc_addr Complete\n");
+}
+
+/**
+ * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
+ * @hw: pointer to hardware structure
+ * @addr_list: the list of new addresses
+ * @addr_count: number of addresses
+ * @next: iterator function to walk the address list
+ *
+ * The given list replaces any existing list. Clears the secondary addrs from
+ * receive address registers. Uses unused receive address registers for the
+ * first secondary addresses, and falls back to promiscuous mode as needed.
+ *
+ * Drivers using secondary unicast addresses must set user_set_promisc when
+ * manually putting the device into promiscuous mode.
+ **/
+s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
+ u32 addr_count, ixgbe_mc_addr_itr next)
+{
+ u8 *addr;
+ u32 i;
+ u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
+ u32 uc_addr_in_use;
+ u32 fctrl;
+ u32 vmdq;
+
+ DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
+
+ /*
+ * Clear accounting of old secondary address list,
+ * don't count RAR[0]
+ */
+ uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
+ hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
+ hw->addr_ctrl.overflow_promisc = 0;
+
+ /* Zero out the other receive addresses */
+ DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
+ for (i = 0; i < uc_addr_in_use; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
+ }
+
+ /* Add the new addresses */
+ for (i = 0; i < addr_count; i++) {
+ DEBUGOUT(" Adding the secondary addresses:\n");
+ addr = next(hw, &addr_list, &vmdq);
+ ixgbe_add_uc_addr(hw, addr, vmdq);
+ }
+
+ if (hw->addr_ctrl.overflow_promisc) {
+ /* enable promisc if not already in overflow or set by user */
+ if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
+ DEBUGOUT(" Entering address overflow promisc mode\n");
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl |= IXGBE_FCTRL_UPE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+ }
+ } else {
+ /* only disable if set by overflow, not by user */
+ if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
+ DEBUGOUT(" Leaving address overflow promisc mode\n");
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl &= ~IXGBE_FCTRL_UPE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+ }
+ }
+
+ DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_mta_vector - Determines bit-vector in multicast table to set
+ * @hw: pointer to hardware structure
+ * @mc_addr: the multicast address
+ *
+ * Extracts the 12 bits, from a multicast address, to determine which
+ * bit-vector to set in the multicast table. The hardware uses 12 bits, from
+ * incoming rx multicast addresses, to determine the bit-vector to check in
+ * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
+ * by the MO field of the MCSTCTRL. The MO field is set during initialization
+ * to mc_filter_type.
+ **/
+STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+ u32 vector = 0;
+
+ DEBUGFUNC("ixgbe_mta_vector");
+
+ switch (hw->mac.mc_filter_type) {
+ case 0: /* use bits [47:36] of the address */
+ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+ break;
+ case 1: /* use bits [46:35] of the address */
+ vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+ break;
+ case 2: /* use bits [45:34] of the address */
+ vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+ break;
+ case 3: /* use bits [43:32] of the address */
+ vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+ break;
+ default: /* Invalid mc_filter_type */
+ DEBUGOUT("MC filter type param set incorrectly\n");
+ ASSERT(0);
+ break;
+ }
+
+ /* vector can only be 12-bits or boundary will be exceeded */
+ vector &= 0xFFF;
+ return vector;
+}
+
+/**
+ * ixgbe_set_mta - Set bit-vector in multicast table
+ * @hw: pointer to hardware structure
+ * @hash_value: Multicast address hash value
+ *
+ * Sets the bit-vector in the multicast table.
+ **/
+void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+ u32 vector;
+ u32 vector_bit;
+ u32 vector_reg;
+
+ DEBUGFUNC("ixgbe_set_mta");
+
+ hw->addr_ctrl.mta_in_use++;
+
+ vector = ixgbe_mta_vector(hw, mc_addr);
+ DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
+
+ /*
+ * The MTA is a register array of 128 32-bit registers. It is treated
+ * like an array of 4096 bits. We want to set bit
+ * BitArray[vector_value]. So we figure out what register the bit is
+ * in, read it, OR in the new bit, then write back the new value. The
+ * register is determined by the upper 7 bits of the vector value and
+ * the bit within that register are determined by the lower 5 bits of
+ * the value.
+ */
+ vector_reg = (vector >> 5) & 0x7F;
+ vector_bit = vector & 0x1F;
+ hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
+}
+
+/**
+ * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
+ * @hw: pointer to hardware structure
+ * @mc_addr_list: the list of new multicast addresses
+ * @mc_addr_count: number of addresses
+ * @next: iterator function to walk the multicast address list
+ * @clear: flag, when set clears the table beforehand
+ *
+ * When the clear flag is set, the given list replaces any existing list.
+ * Hashes the given addresses into the multicast table.
+ **/
+s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, ixgbe_mc_addr_itr next,
+ bool clear)
+{
+ u32 i;
+ u32 vmdq;
+
+ DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
+
+ /*
+ * Set the new number of MC addresses that we are being requested to
+ * use.
+ */
+ hw->addr_ctrl.num_mc_addrs = mc_addr_count;
+ hw->addr_ctrl.mta_in_use = 0;
+
+ /* Clear mta_shadow */
+ if (clear) {
+ DEBUGOUT(" Clearing MTA\n");
+ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+ }
+
+ /* Update mta_shadow */
+ for (i = 0; i < mc_addr_count; i++) {
+ DEBUGOUT(" Adding the multicast addresses:\n");
+ ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
+ }
+
+ /* Enable mta */
+ for (i = 0; i < hw->mac.mcft_size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
+ hw->mac.mta_shadow[i]);
+
+ if (hw->addr_ctrl.mta_in_use > 0)
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
+ IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
+
+ DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_enable_mc_generic - Enable multicast address in RAR
+ * @hw: pointer to hardware structure
+ *
+ * Enables multicast address in RAR and the use of the multicast hash table.
+ **/
+s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
+
+ DEBUGFUNC("ixgbe_enable_mc_generic");
+
+ if (a->mta_in_use > 0)
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
+ hw->mac.mc_filter_type);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_disable_mc_generic - Disable multicast address in RAR
+ * @hw: pointer to hardware structure
+ *
+ * Disables multicast address in RAR and the use of the multicast hash table.
+ **/
+s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
+
+ DEBUGFUNC("ixgbe_disable_mc_generic");
+
+ if (a->mta_in_use > 0)
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_fc_enable_generic - Enable flow control
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to the current settings.
+ **/
+s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+ u32 mflcn_reg, fccfg_reg;
+ u32 reg;
+ u32 fcrtl, fcrth;
+ int i;
+
+ DEBUGFUNC("ixgbe_fc_enable_generic");
+
+ /* Validate the water mark configuration */
+ if (!hw->fc.pause_time) {
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ /* Low water mark of zero causes XOFF floods */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ if (!hw->fc.low_water[i] ||
+ hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+ DEBUGOUT("Invalid water mark configuration\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+ }
+ }
+
+ /* Negotiate the fc mode to use */
+ ixgbe_fc_autoneg(hw);
+
+ /* Disable any previous flow control settings */
+ mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+ mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
+
+ fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
+ fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
+
+ /*
+ * The possible values of fc.current_mode are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but
+ * we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: Invalid.
+ */
+ switch (hw->fc.current_mode) {
+ case ixgbe_fc_none:
+ /*
+ * Flow control is disabled by software override or autoneg.
+ * The code below will actually disable it in the HW.
+ */
+ break;
+ case ixgbe_fc_rx_pause:
+ /*
+ * Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ mflcn_reg |= IXGBE_MFLCN_RFCE;
+ break;
+ case ixgbe_fc_tx_pause:
+ /*
+ * Tx Flow control is enabled, and Rx Flow control is
+ * disabled by software override.
+ */
+ fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
+ break;
+ case ixgbe_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by SW override. */
+ mflcn_reg |= IXGBE_MFLCN_RFCE;
+ fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
+ break;
+ default:
+ ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
+ "Flow control param set incorrectly\n");
+ ret_val = IXGBE_ERR_CONFIG;
+ goto out;
+ break;
+ }
+
+ /* Set 802.3x based flow control settings. */
+ mflcn_reg |= IXGBE_MFLCN_DPF;
+ IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
+ IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
+
+
+ /* Set up and enable Rx high/low water mark thresholds, enable XON. */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
+ fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
+ /*
+ * In order to prevent Tx hangs when the internal Tx
+ * switch is enabled we must set the high water mark
+ * to the Rx packet buffer size - 24KB. This allows
+ * the Tx switch to function even under heavy Rx
+ * workloads.
+ */
+ fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
+ }
+
+ /* Configure pause time (2 TCs per register) */
+ reg = hw->fc.pause_time * 0x00010001;
+ for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
+ IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
+
+ /* Configure flow control refresh threshold value */
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_negotiate_fc - Negotiate flow control
+ * @hw: pointer to hardware structure
+ * @adv_reg: flow control advertised settings
+ * @lp_reg: link partner's flow control settings
+ * @adv_sym: symmetric pause bit in advertisement
+ * @adv_asm: asymmetric pause bit in advertisement
+ * @lp_sym: symmetric pause bit in link partner advertisement
+ * @lp_asm: asymmetric pause bit in link partner advertisement
+ *
+ * Find the intersection between advertised settings and link partner's
+ * advertised settings
+ **/
+STATIC s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
+{
+ if ((!(adv_reg)) || (!(lp_reg))) {
+ ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
+ "Local or link partner's advertised flow control "
+ "settings are NULL. Local: %x, link partner: %x\n",
+ adv_reg, lp_reg);
+ return IXGBE_ERR_FC_NOT_NEGOTIATED;
+ }
+
+ if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
+ /*
+ * Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise RX
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == ixgbe_fc_full) {
+ hw->fc.current_mode = ixgbe_fc_full;
+ DEBUGOUT("Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = ixgbe_fc_rx_pause;
+ DEBUGOUT("Flow Control=RX PAUSE frames only\n");
+ }
+ } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+ (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+ hw->fc.current_mode = ixgbe_fc_tx_pause;
+ DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
+ } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+ !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+ hw->fc.current_mode = ixgbe_fc_rx_pause;
+ DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
+ } else {
+ hw->fc.current_mode = ixgbe_fc_none;
+ DEBUGOUT("Flow Control = NONE.\n");
+ }
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according on 1 gig fiber.
+ **/
+STATIC s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
+{
+ u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
+ s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+
+ /*
+ * On multispeed fiber at 1g, bail out if
+ * - link is up but AN did not complete, or if
+ * - link is up and AN completed but timed out
+ */
+
+ linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
+ if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
+ (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "Auto-Negotiation did not complete or timed out");
+ goto out;
+ }
+
+ pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+ pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
+
+ ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
+ pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
+ IXGBE_PCS1GANA_ASM_PAUSE,
+ IXGBE_PCS1GANA_SYM_PAUSE,
+ IXGBE_PCS1GANA_ASM_PAUSE);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to IEEE clause 37.
+ **/
+STATIC s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
+{
+ u32 links2, anlp1_reg, autoc_reg, links;
+ s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+
+ /*
+ * On backplane, bail out if
+ * - backplane autoneg was not completed, or if
+ * - we are 82599 and link partner is not AN enabled
+ */
+ links = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "Auto-Negotiation did not complete");
+ goto out;
+ }
+
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
+ if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
+ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+ "Link partner is not AN enabled");
+ goto out;
+ }
+ }
+ /*
+ * Read the 10g AN autoc and LP ability registers and resolve
+ * local flow control settings accordingly
+ */
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+
+ ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
+ anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
+ IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to IEEE clause 37.
+ **/
+STATIC s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
+{
+ u16 technology_ability_reg = 0;
+ u16 lp_technology_ability_reg = 0;
+
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &technology_ability_reg);
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &lp_technology_ability_reg);
+
+ return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
+ (u32)lp_technology_ability_reg,
+ IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
+ IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
+}
+
+/**
+ * ixgbe_fc_autoneg - Configure flow control
+ * @hw: pointer to hardware structure
+ *
+ * Compares our advertised flow control capabilities to those advertised by
+ * our link partner, and determines the proper flow control mode to use.
+ **/
+void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ ixgbe_link_speed speed;
+ bool link_up;
+
+ DEBUGFUNC("ixgbe_fc_autoneg");
+
+ /*
+ * AN should have completed when the cable was plugged in.
+ * Look for reasons to bail out. Bail out if:
+ * - FC autoneg is disabled, or if
+ * - link is not up.
+ */
+ if (hw->fc.disable_fc_autoneg) {
+ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+ "Flow control autoneg is disabled");
+ goto out;
+ }
+
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+ if (!link_up) {
+ ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
+ goto out;
+ }
+
+ switch (hw->phy.media_type) {
+ /* Autoneg flow control on fiber adapters */
+ case ixgbe_media_type_fiber_qsfp:
+ case ixgbe_media_type_fiber:
+ if (speed == IXGBE_LINK_SPEED_1GB_FULL)
+ ret_val = ixgbe_fc_autoneg_fiber(hw);
+ break;
+
+ /* Autoneg flow control on backplane adapters */
+ case ixgbe_media_type_backplane:
+ ret_val = ixgbe_fc_autoneg_backplane(hw);
+ break;
+
+ /* Autoneg flow control on copper adapters */
+ case ixgbe_media_type_copper:
+ if (ixgbe_device_supports_autoneg_fc(hw))
+ ret_val = ixgbe_fc_autoneg_copper(hw);
+ break;
+
+ default:
+ break;
+ }
+
+out:
+ if (ret_val == IXGBE_SUCCESS) {
+ hw->fc.fc_was_autonegged = true;
+ } else {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ }
+}
+
+/*
+ * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
+ * @hw: pointer to hardware structure
+ *
+ * System-wide timeout range is encoded in PCIe Device Control2 register.
+ *
+ * Add 10% to specified maximum and return the number of times to poll for
+ * completion timeout, in units of 100 microsec. Never return less than
+ * 800 = 80 millisec.
+ */
+STATIC u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
+{
+ s16 devctl2;
+ u32 pollcnt;
+
+ devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
+ devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
+
+ switch (devctl2) {
+ case IXGBE_PCIDEVCTRL2_65_130ms:
+ pollcnt = 1300; /* 130 millisec */
+ break;
+ case IXGBE_PCIDEVCTRL2_260_520ms:
+ pollcnt = 5200; /* 520 millisec */
+ break;
+ case IXGBE_PCIDEVCTRL2_1_2s:
+ pollcnt = 20000; /* 2 sec */
+ break;
+ case IXGBE_PCIDEVCTRL2_4_8s:
+ pollcnt = 80000; /* 8 sec */
+ break;
+ case IXGBE_PCIDEVCTRL2_17_34s:
+ pollcnt = 34000; /* 34 sec */
+ break;
+ case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */
+ case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */
+ case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */
+ case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */
+ default:
+ pollcnt = 800; /* 80 millisec minimum */
+ break;
+ }
+
+ /* add 10% to spec maximum */
+ return (pollcnt * 11) / 10;
+}
+
+/**
+ * ixgbe_disable_pcie_master - Disable PCI-express master access
+ * @hw: pointer to hardware structure
+ *
+ * Disables PCI-Express master access and verifies there are no pending
+ * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
+ * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
+ * is returned signifying master requests disabled.
+ **/
+s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+ u32 i, poll;
+ u16 value;
+
+ DEBUGFUNC("ixgbe_disable_pcie_master");
+
+ /* Always set this bit to ensure any future transactions are blocked */
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
+
+ /* Exit if master requests are blocked */
+ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
+ IXGBE_REMOVED(hw->hw_addr))
+ goto out;
+
+ /* Poll for master request bit to clear */
+ for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+ usec_delay(100);
+ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
+ goto out;
+ }
+
+ /*
+ * Two consecutive resets are required via CTRL.RST per datasheet
+ * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
+ * of this need. The first reset prevents new master requests from
+ * being issued by our device. We then must wait 1usec or more for any
+ * remaining completions from the PCIe bus to trickle in, and then reset
+ * again to clear out any effects they may have had on our device.
+ */
+ DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
+ hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+
+ /*
+ * Before proceeding, make sure that the PCIe block does not have
+ * transactions pending.
+ */
+ poll = ixgbe_pcie_timeout_poll(hw);
+ for (i = 0; i < poll; i++) {
+ usec_delay(100);
+ value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
+ if (IXGBE_REMOVED(hw->hw_addr))
+ goto out;
+ if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
+ goto out;
+ }
+
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "PCIe transaction pending bit also did not clear.\n");
+ status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore through the GSSR register for the specified
+ * function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
+{
+ u32 gssr = 0;
+ u32 swmask = mask;
+ u32 fwmask = mask << 5;
+ u32 timeout = 200;
+ u32 i;
+
+ DEBUGFUNC("ixgbe_acquire_swfw_sync");
+
+ for (i = 0; i < timeout; i++) {
+ /*
+ * SW NVM semaphore bit is used for access to all
+ * SW_FW_SYNC bits (not just NVM)
+ */
+ if (ixgbe_get_eeprom_semaphore(hw))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
+ if (!(gssr & (fwmask | swmask))) {
+ gssr |= swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
+ ixgbe_release_eeprom_semaphore(hw);
+ return IXGBE_SUCCESS;
+ } else {
+ /* Resource is currently in use by FW or SW */
+ ixgbe_release_eeprom_semaphore(hw);
+ msec_delay(5);
+ }
+ }
+
+ /* If time expired clear the bits holding the lock and retry */
+ if (gssr & (fwmask | swmask))
+ ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
+
+ msec_delay(5);
+ return IXGBE_ERR_SWFW_SYNC;
+}
+
+/**
+ * ixgbe_release_swfw_sync - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore through the GSSR register for the specified
+ * function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
+{
+ u32 gssr;
+ u32 swmask = mask;
+
+ DEBUGFUNC("ixgbe_release_swfw_sync");
+
+ ixgbe_get_eeprom_semaphore(hw);
+
+ gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
+ gssr &= ~swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
+
+ ixgbe_release_eeprom_semaphore(hw);
+}
+
+/**
+ * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
+ * @hw: pointer to hardware structure
+ *
+ * Stops the receive data path and waits for the HW to internally empty
+ * the Rx security block
+ **/
+s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
+{
+#define IXGBE_MAX_SECRX_POLL 40
+
+ int i;
+ int secrxreg;
+
+ DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
+
+
+ secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
+ for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
+ secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
+ if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
+ break;
+ else
+ /* Use interrupt-safe sleep just in case */
+ usec_delay(1000);
+ }
+
+ /* For informational purposes only */
+ if (i >= IXGBE_MAX_SECRX_POLL)
+ DEBUGOUT("Rx unit being enabled before security "
+ "path fully disabled. Continuing with init.\n");
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
+ * @hw: pointer to hardware structure
+ * @reg_val: Value we read from AUTOC
+ *
+ * The default case requires no protection so just to the register read.
+ */
+s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
+{
+ *locked = false;
+ *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
+ * @hw: pointer to hardware structure
+ * @reg_val: value to write to AUTOC
+ * @locked: bool to indicate whether the SW/FW lock was already taken by
+ * previous read.
+ *
+ * The default case requires no protection so just to the register write.
+ */
+s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
+{
+ UNREFERENCED_1PARAMETER(locked);
+
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
+ * @hw: pointer to hardware structure
+ *
+ * Enables the receive data path.
+ **/
+s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
+{
+ int secrxreg;
+
+ DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
+
+ secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
+ * @hw: pointer to hardware structure
+ * @regval: register value to write to RXCTRL
+ *
+ * Enables the Rx DMA unit
+ **/
+s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
+{
+ DEBUGFUNC("ixgbe_enable_rx_dma_generic");
+
+ if (regval & IXGBE_RXCTRL_RXEN)
+ ixgbe_enable_rx(hw);
+ else
+ ixgbe_disable_rx(hw);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_blink_led_start_generic - Blink LED based on index.
+ * @hw: pointer to hardware structure
+ * @index: led number to blink
+ **/
+s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
+{
+ ixgbe_link_speed speed = 0;
+ bool link_up = 0;
+ u32 autoc_reg = 0;
+ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ s32 ret_val = IXGBE_SUCCESS;
+ bool locked = false;
+
+ DEBUGFUNC("ixgbe_blink_led_start_generic");
+
+ /*
+ * Link must be up to auto-blink the LEDs;
+ * Force it if link is down.
+ */
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+
+ if (!link_up) {
+ ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+
+ autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+ autoc_reg |= IXGBE_AUTOC_FLU;
+
+ ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+
+ IXGBE_WRITE_FLUSH(hw);
+ msec_delay(10);
+ }
+
+ led_reg &= ~IXGBE_LED_MODE_MASK(index);
+ led_reg |= IXGBE_LED_BLINK(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
+ * @hw: pointer to hardware structure
+ * @index: led number to stop blinking
+ **/
+s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
+{
+ u32 autoc_reg = 0;
+ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ s32 ret_val = IXGBE_SUCCESS;
+ bool locked = false;
+
+ DEBUGFUNC("ixgbe_blink_led_stop_generic");
+
+ ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+
+ autoc_reg &= ~IXGBE_AUTOC_FLU;
+ autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+
+ ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+
+ led_reg &= ~IXGBE_LED_MODE_MASK(index);
+ led_reg &= ~IXGBE_LED_BLINK(index);
+ led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
+ * @hw: pointer to hardware structure
+ * @san_mac_offset: SAN MAC address offset
+ *
+ * This function will read the EEPROM location for the SAN MAC address
+ * pointer, and returns the value at that location. This is used in both
+ * get and set mac_addr routines.
+ **/
+STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
+ u16 *san_mac_offset)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
+
+ /*
+ * First read the EEPROM pointer to see if the MAC addresses are
+ * available.
+ */
+ ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
+ san_mac_offset);
+ if (ret_val) {
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "eeprom at offset %d failed",
+ IXGBE_SAN_MAC_ADDR_PTR);
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
+ * @hw: pointer to hardware structure
+ * @san_mac_addr: SAN MAC address
+ *
+ * Reads the SAN MAC address from the EEPROM, if it's available. This is
+ * per-port, so set_lan_id() must be called before reading the addresses.
+ * set_lan_id() is called by identify_sfp(), but this cannot be relied
+ * upon for non-SFP connections, so we must call it here.
+ **/
+s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
+{
+ u16 san_mac_data, san_mac_offset;
+ u8 i;
+ s32 ret_val;
+
+ DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
+
+ /*
+ * First read the EEPROM pointer to see if the MAC addresses are
+ * available. If they're not, no point in calling set_lan_id() here.
+ */
+ ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
+ if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
+ goto san_mac_addr_out;
+
+ /* make sure we know which port we need to program */
+ hw->mac.ops.set_lan_id(hw);
+ /* apply the port offset to the address offset */
+ (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
+ (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
+ for (i = 0; i < 3; i++) {
+ ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
+ &san_mac_data);
+ if (ret_val) {
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "eeprom read at offset %d failed",
+ san_mac_offset);
+ goto san_mac_addr_out;
+ }
+ san_mac_addr[i * 2] = (u8)(san_mac_data);
+ san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
+ san_mac_offset++;
+ }
+ return IXGBE_SUCCESS;
+
+san_mac_addr_out:
+ /*
+ * No addresses available in this EEPROM. It's not an
+ * error though, so just wipe the local address and return.
+ */
+ for (i = 0; i < 6; i++)
+ san_mac_addr[i] = 0xFF;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
+ * @hw: pointer to hardware structure
+ * @san_mac_addr: SAN MAC address
+ *
+ * Write a SAN MAC address to the EEPROM.
+ **/
+s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
+{
+ s32 ret_val;
+ u16 san_mac_data, san_mac_offset;
+ u8 i;
+
+ DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
+
+ /* Look for SAN mac address pointer. If not defined, return */
+ ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
+ if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
+ return IXGBE_ERR_NO_SAN_ADDR_PTR;
+
+ /* Make sure we know which port we need to write */
+ hw->mac.ops.set_lan_id(hw);
+ /* Apply the port offset to the address offset */
+ (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
+ (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
+
+ for (i = 0; i < 3; i++) {
+ san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
+ san_mac_data |= (u16)(san_mac_addr[i * 2]);
+ hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
+ san_mac_offset++;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
+ * @hw: pointer to hardware structure
+ *
+ * Read PCIe configuration space, and get the MSI-X vector count from
+ * the capabilities table.
+ **/
+u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
+{
+ u16 msix_count = 1;
+ u16 max_msix_count;
+ u16 pcie_offset;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
+ max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
+ max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
+ break;
+ default:
+ return msix_count;
+ }
+
+ DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
+ msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
+ if (IXGBE_REMOVED(hw->hw_addr))
+ msix_count = 0;
+ msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
+
+ /* MSI-X count is zero-based in HW */
+ msix_count++;
+
+ if (msix_count > max_msix_count)
+ msix_count = max_msix_count;
+
+ return msix_count;
+}
+
+/**
+ * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
+ * @hw: pointer to hardware structure
+ * @addr: Address to put into receive address register
+ * @vmdq: VMDq pool to assign
+ *
+ * Puts an ethernet address into a receive address register, or
+ * finds the rar that it is aleady in; adds to the pool list
+ **/
+s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
+{
+ static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
+ u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
+ u32 rar;
+ u32 rar_low, rar_high;
+ u32 addr_low, addr_high;
+
+ DEBUGFUNC("ixgbe_insert_mac_addr_generic");
+
+ /* swap bytes for HW little endian */
+ addr_low = addr[0] | (addr[1] << 8)
+ | (addr[2] << 16)
+ | (addr[3] << 24);
+ addr_high = addr[4] | (addr[5] << 8);
+
+ /*
+ * Either find the mac_id in rar or find the first empty space.
+ * rar_highwater points to just after the highest currently used
+ * rar in order to shorten the search. It grows when we add a new
+ * rar to the top.
+ */
+ for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+
+ if (((IXGBE_RAH_AV & rar_high) == 0)
+ && first_empty_rar == NO_EMPTY_RAR_FOUND) {
+ first_empty_rar = rar;
+ } else if ((rar_high & 0xFFFF) == addr_high) {
+ rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
+ if (rar_low == addr_low)
+ break; /* found it already in the rars */
+ }
+ }
+
+ if (rar < hw->mac.rar_highwater) {
+ /* already there so just add to the pool bits */
+ ixgbe_set_vmdq(hw, rar, vmdq);
+ } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
+ /* stick it into first empty RAR slot we found */
+ rar = first_empty_rar;
+ ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
+ } else if (rar == hw->mac.rar_highwater) {
+ /* add it to the top of the list and inc the highwater mark */
+ ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
+ hw->mac.rar_highwater++;
+ } else if (rar >= hw->mac.num_rar_entries) {
+ return IXGBE_ERR_INVALID_MAC_ADDR;
+ }
+
+ /*
+ * If we found rar[0], make sure the default pool bit (we use pool 0)
+ * remains cleared to be sure default pool packets will get delivered
+ */
+ if (rar == 0)
+ ixgbe_clear_vmdq(hw, rar, 0);
+
+ return rar;
+}
+
+/**
+ * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
+ * @hw: pointer to hardware struct
+ * @rar: receive address register index to disassociate
+ * @vmdq: VMDq pool index to remove from the rar
+ **/
+s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ u32 mpsar_lo, mpsar_hi;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ DEBUGFUNC("ixgbe_clear_vmdq_generic");
+
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
+ ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
+ "RAR index %d is out of range.\n", rar);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+ mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+
+ if (IXGBE_REMOVED(hw->hw_addr))
+ goto done;
+
+ if (!mpsar_lo && !mpsar_hi)
+ goto done;
+
+ if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
+ if (mpsar_lo) {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
+ mpsar_lo = 0;
+ }
+ if (mpsar_hi) {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
+ mpsar_hi = 0;
+ }
+ } else if (vmdq < 32) {
+ mpsar_lo &= ~(1 << vmdq);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
+ } else {
+ mpsar_hi &= ~(1 << (vmdq - 32));
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
+ }
+
+ /* was that the last pool using this rar? */
+ if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
+ hw->mac.ops.clear_rar(hw, rar);
+done:
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
+ * @hw: pointer to hardware struct
+ * @rar: receive address register index to associate with a VMDq index
+ * @vmdq: VMDq pool index
+ **/
+s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ u32 mpsar;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ DEBUGFUNC("ixgbe_set_vmdq_generic");
+
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
+ ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
+ "RAR index %d is out of range.\n", rar);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ if (vmdq < 32) {
+ mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+ mpsar |= 1 << vmdq;
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
+ } else {
+ mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+ mpsar |= 1 << (vmdq - 32);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
+ }
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * This function should only be involved in the IOV mode.
+ * In IOV mode, Default pool is next pool after the number of
+ * VFs advertized and not 0.
+ * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
+ *
+ * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
+ * @hw: pointer to hardware struct
+ * @vmdq: VMDq pool index
+ **/
+s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
+{
+ u32 rar = hw->mac.san_mac_rar_index;
+
+ DEBUGFUNC("ixgbe_set_vmdq_san_mac");
+
+ if (vmdq < 32) {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
+{
+ int i;
+
+ DEBUGFUNC("ixgbe_init_uta_tables_generic");
+ DEBUGOUT(" Clearing UTA\n");
+
+ for (i = 0; i < 128; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ *
+ * return the VLVF index where this VLAN id should be placed
+ *
+ **/
+s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
+{
+ u32 bits = 0;
+ u32 first_empty_slot = 0;
+ s32 regindex;
+
+ /* short cut the special case */
+ if (vlan == 0)
+ return 0;
+
+ /*
+ * Search for the vlan id in the VLVF entries. Save off the first empty
+ * slot found along the way
+ */
+ for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
+ bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
+ if (!bits && !(first_empty_slot))
+ first_empty_slot = regindex;
+ else if ((bits & 0x0FFF) == vlan)
+ break;
+ }
+
+ /*
+ * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
+ * in the VLVF. Else use the first empty VLVF register for this
+ * vlan id.
+ */
+ if (regindex >= IXGBE_VLVF_ENTRIES) {
+ if (first_empty_slot)
+ regindex = first_empty_slot;
+ else {
+ ERROR_REPORT1(IXGBE_ERROR_SOFTWARE,
+ "No space in VLVF.\n");
+ regindex = IXGBE_ERR_NO_SPACE;
+ }
+ }
+
+ return regindex;
+}
+
+/**
+ * ixgbe_set_vfta_generic - Set VLAN filter table
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vind: VMDq output index that maps queue to VLAN id in VFVFB
+ * @vlan_on: boolean flag to turn on/off VLAN in VFVF
+ *
+ * Turn on/off specified VLAN in the VLAN filter table.
+ **/
+s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on)
+{
+ s32 regindex;
+ u32 bitindex;
+ u32 vfta;
+ u32 targetbit;
+ s32 ret_val = IXGBE_SUCCESS;
+ bool vfta_changed = false;
+
+ DEBUGFUNC("ixgbe_set_vfta_generic");
+
+ if (vlan > 4095)
+ return IXGBE_ERR_PARAM;
+
+ /*
+ * this is a 2 part operation - first the VFTA, then the
+ * VLVF and VLVFB if VT Mode is set
+ * We don't write the VFTA until we know the VLVF part succeeded.
+ */
+
+ /* Part 1
+ * The VFTA is a bitstring made up of 128 32-bit registers
+ * that enable the particular VLAN id, much like the MTA:
+ * bits[11-5]: which register
+ * bits[4-0]: which bit in the register
+ */
+ regindex = (vlan >> 5) & 0x7F;
+ bitindex = vlan & 0x1F;
+ targetbit = (1 << bitindex);
+ vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
+
+ if (vlan_on) {
+ if (!(vfta & targetbit)) {
+ vfta |= targetbit;
+ vfta_changed = true;
+ }
+ } else {
+ if ((vfta & targetbit)) {
+ vfta &= ~targetbit;
+ vfta_changed = true;
+ }
+ }
+
+ /* Part 2
+ * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
+ */
+ ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
+ &vfta_changed);
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ if (vfta_changed)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vind: VMDq output index that maps queue to VLAN id in VFVFB
+ * @vlan_on: boolean flag to turn on/off VLAN in VFVF
+ * @vfta_changed: pointer to boolean flag which indicates whether VFTA
+ * should be changed
+ *
+ * Turn on/off specified bit in VLVF table.
+ **/
+s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, bool *vfta_changed)
+{
+ u32 vt;
+
+ DEBUGFUNC("ixgbe_set_vlvf_generic");
+
+ if (vlan > 4095)
+ return IXGBE_ERR_PARAM;
+
+ /* If VT Mode is set
+ * Either vlan_on
+ * make sure the vlan is in VLVF
+ * set the vind bit in the matching VLVFB
+ * Or !vlan_on
+ * clear the pool bit and possibly the vind
+ */
+ vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ if (vt & IXGBE_VT_CTL_VT_ENABLE) {
+ s32 vlvf_index;
+ u32 bits;
+
+ vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
+ if (vlvf_index < 0)
+ return vlvf_index;
+
+ if (vlan_on) {
+ /* set the pool bit */
+ if (vind < 32) {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2));
+ bits |= (1 << vind);
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2),
+ bits);
+ } else {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1));
+ bits |= (1 << (vind - 32));
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1),
+ bits);
+ }
+ } else {
+ /* clear the pool bit */
+ if (vind < 32) {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2));
+ bits &= ~(1 << vind);
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2),
+ bits);
+ bits |= IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1));
+ } else {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1));
+ bits &= ~(1 << (vind - 32));
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1),
+ bits);
+ bits |= IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2));
+ }
+ }
+
+ /*
+ * If there are still bits set in the VLVFB registers
+ * for the VLAN ID indicated we need to see if the
+ * caller is requesting that we clear the VFTA entry bit.
+ * If the caller has requested that we clear the VFTA
+ * entry bit but there are still pools/VFs using this VLAN
+ * ID entry then ignore the request. We're not worried
+ * about the case where we're turning the VFTA VLAN ID
+ * entry bit on, only when requested to turn it off as
+ * there may be multiple pools and/or VFs using the
+ * VLAN ID entry. In that case we cannot clear the
+ * VFTA bit until all pools/VFs using that VLAN ID have also
+ * been cleared. This will be indicated by "bits" being
+ * zero.
+ */
+ if (bits) {
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
+ (IXGBE_VLVF_VIEN | vlan));
+ if ((!vlan_on) && (vfta_changed != NULL)) {
+ /* someone wants to clear the vfta entry
+ * but some pools/VFs are still using it.
+ * Ignore it. */
+ *vfta_changed = false;
+ }
+ } else
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_clear_vfta_generic - Clear VLAN filter table
+ * @hw: pointer to hardware structure
+ *
+ * Clears the VLAN filer table, and the VMDq index associated with the filter
+ **/
+s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
+{
+ u32 offset;
+
+ DEBUGFUNC("ixgbe_clear_vfta_generic");
+
+ for (offset = 0; offset < hw->mac.vft_size; offset++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
+
+ for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_check_mac_link_generic - Determine link and speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true when link is up
+ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Reads the links register to determine if link is up and the current speed
+ **/
+s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete)
+{
+ u32 links_reg, links_orig;
+ u32 i;
+
+ DEBUGFUNC("ixgbe_check_mac_link_generic");
+
+ /* clear the old state */
+ links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
+
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+
+ if (links_orig != links_reg) {
+ DEBUGOUT2("LINKS changed from %08X to %08X\n",
+ links_orig, links_reg);
+ }
+
+ if (link_up_wait_to_complete) {
+ for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
+ if (links_reg & IXGBE_LINKS_UP) {
+ *link_up = true;
+ break;
+ } else {
+ *link_up = false;
+ }
+ msec_delay(100);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ }
+ } else {
+ if (links_reg & IXGBE_LINKS_UP)
+ *link_up = true;
+ else
+ *link_up = false;
+ }
+
+ if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
+ IXGBE_LINKS_SPEED_10G_82599) {
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ if (hw->mac.type > ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ }
+ }
+ else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
+ IXGBE_LINKS_SPEED_1G_82599)
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
+ IXGBE_LINKS_SPEED_100_82599)
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
+ * the EEPROM
+ * @hw: pointer to hardware structure
+ * @wwnn_prefix: the alternative WWNN prefix
+ * @wwpn_prefix: the alternative WWPN prefix
+ *
+ * This function will read the EEPROM from the alternative SAN MAC address
+ * block to check the support for the alternative WWNN/WWPN prefix support.
+ **/
+s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix)
+{
+ u16 offset, caps;
+ u16 alt_san_mac_blk_offset;
+
+ DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
+
+ /* clear output first */
+ *wwnn_prefix = 0xFFFF;
+ *wwpn_prefix = 0xFFFF;
+
+ /* check if alternative SAN MAC is supported */
+ offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
+ if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
+ goto wwn_prefix_err;
+
+ if ((alt_san_mac_blk_offset == 0) ||
+ (alt_san_mac_blk_offset == 0xFFFF))
+ goto wwn_prefix_out;
+
+ /* check capability in alternative san mac address block */
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
+ if (hw->eeprom.ops.read(hw, offset, &caps))
+ goto wwn_prefix_err;
+ if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
+ goto wwn_prefix_out;
+
+ /* get the corresponding prefix for WWNN/WWPN */
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
+ if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "eeprom read at offset %d failed", offset);
+ }
+
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
+ if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
+ goto wwn_prefix_err;
+
+wwn_prefix_out:
+ return IXGBE_SUCCESS;
+
+wwn_prefix_err:
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "eeprom read at offset %d failed", offset);
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
+ * @hw: pointer to hardware structure
+ * @bs: the fcoe boot status
+ *
+ * This function will read the FCOE boot status from the iSCSI FCOE block
+ **/
+s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
+{
+ u16 offset, caps, flags;
+ s32 status;
+
+ DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
+
+ /* clear output first */
+ *bs = ixgbe_fcoe_bootstatus_unavailable;
+
+ /* check if FCOE IBA block is present */
+ offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
+ status = hw->eeprom.ops.read(hw, offset, &caps);
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
+ goto out;
+
+ /* check if iSCSI FCOE block is populated */
+ status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ if ((offset == 0) || (offset == 0xFFFF))
+ goto out;
+
+ /* read fcoe flags in iSCSI FCOE block */
+ offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
+ status = hw->eeprom.ops.read(hw, offset, &flags);
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
+ *bs = ixgbe_fcoe_bootstatus_enabled;
+ else
+ *bs = ixgbe_fcoe_bootstatus_disabled;
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
+ * @hw: pointer to hardware structure
+ * @enable: enable or disable switch for anti-spoofing
+ * @pf: Physical Function pool - do not enable anti-spoofing for the PF
+ *
+ **/
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
+{
+ int j;
+ int pf_target_reg = pf >> 3;
+ int pf_target_shift = pf % 8;
+ u32 pfvfspoof = 0;
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
+ if (enable)
+ pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
+
+ /*
+ * PFVFSPOOF register array is size 8 with 8 bits assigned to
+ * MAC anti-spoof enables in each register array element.
+ */
+ for (j = 0; j < pf_target_reg; j++)
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
+
+ /*
+ * The PF should be allowed to spoof so that it can support
+ * emulation mode NICs. Do not set the bits assigned to the PF
+ */
+ pfvfspoof &= (1 << pf_target_shift) - 1;
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
+
+ /*
+ * Remaining pools belong to the PF so they do not need to have
+ * anti-spoofing enabled.
+ */
+ for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
+}
+
+/**
+ * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
+ * @hw: pointer to hardware structure
+ * @enable: enable or disable switch for VLAN anti-spoofing
+ * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
+ *
+ **/
+void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
+{
+ int vf_target_reg = vf >> 3;
+ int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
+ u32 pfvfspoof;
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
+ pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
+ if (enable)
+ pfvfspoof |= (1 << vf_target_shift);
+ else
+ pfvfspoof &= ~(1 << vf_target_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
+}
+
+/**
+ * ixgbe_get_device_caps_generic - Get additional device capabilities
+ * @hw: pointer to hardware structure
+ * @device_caps: the EEPROM word with the extra device capabilities
+ *
+ * This function will read the EEPROM location for the device capabilities,
+ * and return the word through device_caps.
+ **/
+s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
+{
+ DEBUGFUNC("ixgbe_get_device_caps_generic");
+
+ hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
+ * @hw: pointer to hardware structure
+ *
+ **/
+void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
+{
+ u32 regval;
+ u32 i;
+
+ DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
+
+ /* Enable relaxed ordering */
+ for (i = 0; i < hw->mac.max_tx_queues; i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
+ regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
+ }
+
+ for (i = 0; i < hw->mac.max_rx_queues; i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+ regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
+ IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
+ }
+
+}
+
+/**
+ * ixgbe_calculate_checksum - Calculate checksum for buffer
+ * @buffer: pointer to EEPROM
+ * @length: size of EEPROM to calculate a checksum for
+ * Calculates the checksum for some buffer on a specified length. The
+ * checksum calculated is returned.
+ **/
+u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
+{
+ u32 i;
+ u8 sum = 0;
+
+ DEBUGFUNC("ixgbe_calculate_checksum");
+
+ if (!buffer)
+ return 0;
+
+ for (i = 0; i < length; i++)
+ sum += buffer[i];
+
+ return (u8) (0 - sum);
+}
+
+/**
+ * ixgbe_host_interface_command - Issue command to manageability block
+ * @hw: pointer to the HW structure
+ * @buffer: contains the command to write and where the return status will
+ * be placed
+ * @length: length of buffer, must be multiple of 4 bytes
+ * @return_data: read and return data from the buffer (true) or not (false)
+ * Needed because FW structures are big endian and decoding of
+ * these fields can be 8 bit or 16 bit based on command. Decoding
+ * is not easily understood without making a table of commands.
+ * So we will leave this up to the caller to read back the data
+ * in these cases.
+ *
+ * Communicates with the manageability block. On success return IXGBE_SUCCESS
+ * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
+ **/
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
+ u32 length, bool return_data)
+{
+ u32 hicr, i, bi, fwsts;
+ u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
+ u16 buf_len;
+ u8 dword_len;
+
+ DEBUGFUNC("ixgbe_host_interface_command");
+
+ if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
+ DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
+ return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ }
+ /* Set bit 9 of FWSTS clearing FW reset indication */
+ fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
+ IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
+
+ /* Check that the host interface is enabled. */
+ hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
+ if ((hicr & IXGBE_HICR_EN) == 0) {
+ DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
+ return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Calculate length in DWORDs. We must be DWORD aligned */
+ if ((length % (sizeof(u32))) != 0) {
+ DEBUGOUT("Buffer length failure, not aligned to dword");
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ dword_len = length >> 2;
+
+ /* The device driver writes the relevant command block
+ * into the ram area.
+ */
+ for (i = 0; i < dword_len; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
+ i, IXGBE_CPU_TO_LE32(buffer[i]));
+
+ /* Setting this bit tells the ARC that a new command is pending. */
+ IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
+
+ for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
+ hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
+ if (!(hicr & IXGBE_HICR_C))
+ break;
+ msec_delay(1);
+ }
+
+ /* Check command completion */
+ if (i == IXGBE_HI_COMMAND_TIMEOUT ||
+ !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
+ ERROR_REPORT1(IXGBE_ERROR_CAUTION,
+ "Command has failed with no status valid.\n");
+ return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ if (!return_data)
+ return 0;
+
+ /* Calculate length in DWORDs */
+ dword_len = hdr_size >> 2;
+
+ /* first pull in the header so we know the buffer length */
+ for (bi = 0; bi < dword_len; bi++) {
+ buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
+ IXGBE_LE32_TO_CPUS(&buffer[bi]);
+ }
+
+ /* If there is any thing in data position pull it in */
+ buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
+ if (buf_len == 0)
+ return 0;
+
+ if (length < buf_len + hdr_size) {
+ DEBUGOUT("Buffer not large enough for reply message.\n");
+ return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Calculate length in DWORDs, add 3 for odd lengths */
+ dword_len = (buf_len + 3) >> 2;
+
+ /* Pull in the rest of the buffer (bi is where we left off) */
+ for (; bi <= dword_len; bi++) {
+ buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
+ IXGBE_LE32_TO_CPUS(&buffer[bi]);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
+ * @hw: pointer to the HW structure
+ * @maj: driver version major number
+ * @min: driver version minor number
+ * @build: driver version build number
+ * @sub: driver version sub build number
+ *
+ * Sends driver version number to firmware through the manageability
+ * block. On success return IXGBE_SUCCESS
+ * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
+ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ **/
+s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 sub)
+{
+ struct ixgbe_hic_drv_info fw_cmd;
+ int i;
+ s32 ret_val = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)
+ != IXGBE_SUCCESS) {
+ ret_val = IXGBE_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
+ fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
+ fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+ fw_cmd.port_num = (u8)hw->bus.func;
+ fw_cmd.ver_maj = maj;
+ fw_cmd.ver_min = min;
+ fw_cmd.ver_build = build;
+ fw_cmd.ver_sub = sub;
+ fw_cmd.hdr.checksum = 0;
+ fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
+ (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
+ fw_cmd.pad = 0;
+ fw_cmd.pad2 = 0;
+
+ for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
+ ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+ sizeof(fw_cmd), true);
+ if (ret_val != IXGBE_SUCCESS)
+ continue;
+
+ if (fw_cmd.hdr.cmd_or_resp.ret_status ==
+ FW_CEM_RESP_STATUS_SUCCESS)
+ ret_val = IXGBE_SUCCESS;
+ else
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+
+ break;
+ }
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
+ * @hw: pointer to hardware structure
+ * @num_pb: number of packet buffers to allocate
+ * @headroom: reserve n KB of headroom
+ * @strategy: packet buffer allocation strategy
+ **/
+void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
+ int strategy)
+{
+ u32 pbsize = hw->mac.rx_pb_size;
+ int i = 0;
+ u32 rxpktsize, txpktsize, txpbthresh;
+
+ /* Reserve headroom */
+ pbsize -= headroom;
+
+ if (!num_pb)
+ num_pb = 1;
+
+ /* Divide remaining packet buffer space amongst the number of packet
+ * buffers requested using supplied strategy.
+ */
+ switch (strategy) {
+ case PBA_STRATEGY_WEIGHTED:
+ /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
+ * buffer with 5/8 of the packet buffer space.
+ */
+ rxpktsize = (pbsize * 5) / (num_pb * 4);
+ pbsize -= rxpktsize * (num_pb / 2);
+ rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
+ for (; i < (num_pb / 2); i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ /* Fall through to configure remaining packet buffers */
+ case PBA_STRATEGY_EQUAL:
+ rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
+ for (; i < num_pb; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ break;
+ default:
+ break;
+ }
+
+ /* Only support an equally distributed Tx packet buffer strategy. */
+ txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
+ txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
+ for (i = 0; i < num_pb; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
+ }
+
+ /* Clear unused TCs, if any, to zero buffer size*/
+ for (; i < IXGBE_MAX_PB; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
+ }
+}
+
+/**
+ * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
+ * @hw: pointer to the hardware structure
+ *
+ * The 82599 and x540 MACs can experience issues if TX work is still pending
+ * when a reset occurs. This function prevents this by flushing the PCIe
+ * buffers on the system.
+ **/
+void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
+{
+ u32 gcr_ext, hlreg0;
+
+ /*
+ * If double reset is not requested then all transactions should
+ * already be clear and as such there is no work to do
+ */
+ if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
+ return;
+
+ /*
+ * Set loopback enable to prevent any transmits from being sent
+ * should the link come up. This assumes that the RXCTRL.RXEN bit
+ * has already been cleared.
+ */
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
+
+ /* initiate cleaning flow for buffers in the PCIe transaction layer */
+ gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
+ gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
+
+ /* Flush all writes and allow 20usec for all transactions to clear */
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(20);
+
+ /* restore previous register values */
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+}
+
+STATIC const u8 ixgbe_emc_temp_data[4] = {
+ IXGBE_EMC_INTERNAL_DATA,
+ IXGBE_EMC_DIODE1_DATA,
+ IXGBE_EMC_DIODE2_DATA,
+ IXGBE_EMC_DIODE3_DATA
+};
+STATIC const u8 ixgbe_emc_therm_limit[4] = {
+ IXGBE_EMC_INTERNAL_THERM_LIMIT,
+ IXGBE_EMC_DIODE1_THERM_LIMIT,
+ IXGBE_EMC_DIODE2_THERM_LIMIT,
+ IXGBE_EMC_DIODE3_THERM_LIMIT
+};
+
+/**
+ * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
+ * @hw: pointer to hardware structure
+ * @data: pointer to the thermal sensor data structure
+ *
+ * Returns the thermal sensor data structure
+ **/
+s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+ u16 ets_offset;
+ u16 ets_cfg;
+ u16 ets_sensor;
+ u8 num_sensors;
+ u8 sensor_index;
+ u8 sensor_location;
+ u8 i;
+ struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+
+ DEBUGFUNC("ixgbe_get_thermal_sensor_data_generic");
+
+ /* Only support thermal sensors attached to 82599 physical port 0 */
+ if ((hw->mac.type != ixgbe_mac_82599EB) ||
+ (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
+ status = IXGBE_NOT_IMPLEMENTED;
+ goto out;
+ }
+
+ status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, &ets_offset);
+ if (status)
+ goto out;
+
+ if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) {
+ status = IXGBE_NOT_IMPLEMENTED;
+ goto out;
+ }
+
+ status = hw->eeprom.ops.read(hw, ets_offset, &ets_cfg);
+ if (status)
+ goto out;
+
+ if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
+ != IXGBE_ETS_TYPE_EMC) {
+ status = IXGBE_NOT_IMPLEMENTED;
+ goto out;
+ }
+
+ num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
+ if (num_sensors > IXGBE_MAX_SENSORS)
+ num_sensors = IXGBE_MAX_SENSORS;
+
+ for (i = 0; i < num_sensors; i++) {
+ status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
+ &ets_sensor);
+ if (status)
+ goto out;
+
+ sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
+ IXGBE_ETS_DATA_INDEX_SHIFT);
+ sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
+ IXGBE_ETS_DATA_LOC_SHIFT);
+
+ if (sensor_location != 0) {
+ status = hw->phy.ops.read_i2c_byte(hw,
+ ixgbe_emc_temp_data[sensor_index],
+ IXGBE_I2C_THERMAL_SENSOR_ADDR,
+ &data->sensor[i].temp);
+ if (status)
+ goto out;
+ }
+ }
+out:
+ return status;
+}
+
+/**
+ * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds
+ * @hw: pointer to hardware structure
+ *
+ * Inits the thermal sensor thresholds according to the NVM map
+ * and save off the threshold and location values into mac.thermal_sensor_data
+ **/
+s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+ u16 offset;
+ u16 ets_offset;
+ u16 ets_cfg;
+ u16 ets_sensor;
+ u8 low_thresh_delta;
+ u8 num_sensors;
+ u8 sensor_index;
+ u8 sensor_location;
+ u8 therm_limit;
+ u8 i;
+ struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+
+ DEBUGFUNC("ixgbe_init_thermal_sensor_thresh_generic");
+
+ memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
+
+ /* Only support thermal sensors attached to 82599 physical port 0 */
+ if ((hw->mac.type != ixgbe_mac_82599EB) ||
+ (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
+ return IXGBE_NOT_IMPLEMENTED;
+
+ offset = IXGBE_ETS_CFG;
+ if (hw->eeprom.ops.read(hw, offset, &ets_offset))
+ goto eeprom_err;
+ if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
+ return IXGBE_NOT_IMPLEMENTED;
+
+ offset = ets_offset;
+ if (hw->eeprom.ops.read(hw, offset, &ets_cfg))
+ goto eeprom_err;
+ if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
+ != IXGBE_ETS_TYPE_EMC)
+ return IXGBE_NOT_IMPLEMENTED;
+
+ low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
+ IXGBE_ETS_LTHRES_DELTA_SHIFT);
+ num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
+
+ for (i = 0; i < num_sensors; i++) {
+ offset = ets_offset + 1 + i;
+ if (hw->eeprom.ops.read(hw, offset, &ets_sensor)) {
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "eeprom read at offset %d failed",
+ offset);
+ continue;
+ }
+ sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
+ IXGBE_ETS_DATA_INDEX_SHIFT);
+ sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
+ IXGBE_ETS_DATA_LOC_SHIFT);
+ therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK;
+
+ hw->phy.ops.write_i2c_byte(hw,
+ ixgbe_emc_therm_limit[sensor_index],
+ IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit);
+
+ if ((i < IXGBE_MAX_SENSORS) && (sensor_location != 0)) {
+ data->sensor[i].location = sensor_location;
+ data->sensor[i].caution_thresh = therm_limit;
+ data->sensor[i].max_op_thresh = therm_limit -
+ low_thresh_delta;
+ }
+ }
+ return status;
+
+eeprom_err:
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "eeprom read at offset %d failed", offset);
+ return IXGBE_NOT_IMPLEMENTED;
+}
+
+
+/**
+ * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
+ * @hw: pointer to hardware structure
+ * @map: pointer to u8 arr for returning map
+ *
+ * Read the rtrup2tc HW register and resolve its content into map
+ **/
+void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map)
+{
+ u32 reg, i;
+
+ reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
+ for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
+ map[i] = IXGBE_RTRUP2TC_UP_MASK &
+ (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
+ return;
+}
+
+void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
+{
+ u32 pfdtxgswc;
+ u32 rxctrl;
+
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (rxctrl & IXGBE_RXCTRL_RXEN) {
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
+ pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+ hw->mac.set_lben = true;
+ } else {
+ hw->mac.set_lben = false;
+ }
+ }
+ rxctrl &= ~IXGBE_RXCTRL_RXEN;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
+ }
+}
+
+void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
+{
+ u32 pfdtxgswc;
+ u32 rxctrl;
+
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
+
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ if (hw->mac.set_lben) {
+ pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+ hw->mac.set_lben = false;
+ }
+ }
+}
+
+/**
+ * ixgbe_mng_enabled - Is the manageability engine enabled?
+ * @hw: pointer to hardware structure
+ *
+ * Returns true if the manageability engine is enabled.
+ **/
+bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
+{
+ u32 fwsm, manc, factps;
+
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+ if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
+ return false;
+
+ manc = IXGBE_READ_REG(hw, IXGBE_MANC);
+ if (!(manc & IXGBE_MANC_RCV_TCO_EN))
+ return false;
+
+ if (hw->mac.type <= ixgbe_mac_X540) {
+ factps = IXGBE_READ_REG(hw, IXGBE_FACTPS);
+ if (factps & IXGBE_FACTPS_MNGCG)
+ return false;
+ }
+
+ return true;
+}
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_common.h b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_common.h
new file mode 100755
index 00000000..bfd41aad
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_common.h
@@ -0,0 +1,182 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_COMMON_H_
+#define _IXGBE_COMMON_H_
+
+#include "ixgbe_type.h"
+#ident "$Id: ixgbe_common.h,v 1.143 2013/11/22 01:02:01 jtkirshe Exp $"
+#define IXGBE_WRITE_REG64(hw, reg, value) \
+ do { \
+ IXGBE_WRITE_REG(hw, reg, (u32) value); \
+ IXGBE_WRITE_REG(hw, reg + 4, (u32) (value >> 32)); \
+ } while (0)
+#define IXGBE_REMOVED(a) (0)
+struct ixgbe_pba {
+ u16 word[2];
+ u16 *pba_block;
+};
+
+void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map);
+
+u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
+s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
+s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
+s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
+s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num);
+s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
+s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, u16 max_pba_block_size,
+ struct ixgbe_pba *pba);
+s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, struct ixgbe_pba *pba);
+s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, u16 *pba_block_size);
+s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
+s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
+void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status);
+void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
+s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
+
+s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
+
+s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
+s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 *data);
+s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
+s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
+ u16 *checksum_val);
+s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
+s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
+
+s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr);
+s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
+s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count,
+ ixgbe_mc_addr_itr func, bool clear);
+s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
+ u32 addr_count, ixgbe_mc_addr_itr func);
+s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
+s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
+s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
+s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw);
+s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw);
+
+s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
+bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
+void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
+
+s32 ixgbe_validate_mac_addr(u8 *mac_addr);
+s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask);
+s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
+
+s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
+s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
+
+s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
+
+s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
+s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
+
+s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
+s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
+s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
+s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
+ u32 vind, bool vlan_on);
+s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, bool *vfta_changed);
+s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
+s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan);
+
+s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete);
+
+s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix);
+
+s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs);
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
+void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
+s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
+void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
+ int strategy);
+void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw);
+s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 ver);
+u8 ixgbe_calculate_checksum(u8 *buffer, u32 length);
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
+ u32 length, bool return_data);
+
+void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
+
+extern s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
+extern void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
+bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
+
+#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
+#define IXGBE_EMC_INTERNAL_DATA 0x00
+#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20
+#define IXGBE_EMC_DIODE1_DATA 0x01
+#define IXGBE_EMC_DIODE1_THERM_LIMIT 0x19
+#define IXGBE_EMC_DIODE2_DATA 0x23
+#define IXGBE_EMC_DIODE2_THERM_LIMIT 0x1A
+#define IXGBE_EMC_DIODE3_DATA 0x2A
+#define IXGBE_EMC_DIODE3_THERM_LIMIT 0x30
+
+s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
+s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
+void ixgbe_disable_rx_generic(struct ixgbe_hw *hw);
+void ixgbe_enable_rx_generic(struct ixgbe_hw *hw);
+#endif /* IXGBE_COMMON */
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb.c b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb.c
new file mode 100755
index 00000000..2245f276
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb.c
@@ -0,0 +1,715 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+
+#include "ixgbe_type.h"
+#include "ixgbe_dcb.h"
+#include "ixgbe_dcb_82598.h"
+#include "ixgbe_dcb_82599.h"
+#ident "$Id: ixgbe_dcb.c,v 1.55 2013/11/22 01:02:01 jtkirshe Exp $"
+
+/**
+ * ixgbe_dcb_calculate_tc_credits - This calculates the ieee traffic class
+ * credits from the configured bandwidth percentages. Credits
+ * are the smallest unit programmable into the underlying
+ * hardware. The IEEE 802.1Qaz specification do not use bandwidth
+ * groups so this is much simplified from the CEE case.
+ */
+s32 ixgbe_dcb_calculate_tc_credits(u8 *bw, u16 *refill, u16 *max,
+ int max_frame_size)
+{
+ int min_percent = 100;
+ int min_credit, multiplier;
+ int i;
+
+ min_credit = ((max_frame_size / 2) + IXGBE_DCB_CREDIT_QUANTUM - 1) /
+ IXGBE_DCB_CREDIT_QUANTUM;
+
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ if (bw[i] < min_percent && bw[i])
+ min_percent = bw[i];
+ }
+
+ multiplier = (min_credit / min_percent) + 1;
+
+ /* Find out the hw credits for each TC */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ int val = min(bw[i] * multiplier, IXGBE_DCB_MAX_CREDIT_REFILL);
+
+ if (val < min_credit)
+ val = min_credit;
+ refill[i] = (u16)val;
+
+ max[i] = bw[i] ? (bw[i]*IXGBE_DCB_MAX_CREDIT)/100 : min_credit;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_dcb_calculate_tc_credits_cee - Calculates traffic class credits
+ * @ixgbe_dcb_config: Struct containing DCB settings.
+ * @direction: Configuring either Tx or Rx.
+ *
+ * This function calculates the credits allocated to each traffic class.
+ * It should be called only after the rules are checked by
+ * ixgbe_dcb_check_config_cee().
+ */
+s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *hw,
+ struct ixgbe_dcb_config *dcb_config,
+ u32 max_frame_size, u8 direction)
+{
+ struct ixgbe_dcb_tc_path *p;
+ u32 min_multiplier = 0;
+ u16 min_percent = 100;
+ s32 ret_val = IXGBE_SUCCESS;
+ /* Initialization values default for Tx settings */
+ u32 min_credit = 0;
+ u32 credit_refill = 0;
+ u32 credit_max = 0;
+ u16 link_percentage = 0;
+ u8 bw_percent = 0;
+ u8 i;
+
+ if (dcb_config == NULL) {
+ ret_val = IXGBE_ERR_CONFIG;
+ goto out;
+ }
+
+ min_credit = ((max_frame_size / 2) + IXGBE_DCB_CREDIT_QUANTUM - 1) /
+ IXGBE_DCB_CREDIT_QUANTUM;
+
+ /* Find smallest link percentage */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ p = &dcb_config->tc_config[i].path[direction];
+ bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
+ link_percentage = p->bwg_percent;
+
+ link_percentage = (link_percentage * bw_percent) / 100;
+
+ if (link_percentage && link_percentage < min_percent)
+ min_percent = link_percentage;
+ }
+
+ /*
+ * The ratio between traffic classes will control the bandwidth
+ * percentages seen on the wire. To calculate this ratio we use
+ * a multiplier. It is required that the refill credits must be
+ * larger than the max frame size so here we find the smallest
+ * multiplier that will allow all bandwidth percentages to be
+ * greater than the max frame size.
+ */
+ min_multiplier = (min_credit / min_percent) + 1;
+
+ /* Find out the link percentage for each TC first */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ p = &dcb_config->tc_config[i].path[direction];
+ bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
+
+ link_percentage = p->bwg_percent;
+ /* Must be careful of integer division for very small nums */
+ link_percentage = (link_percentage * bw_percent) / 100;
+ if (p->bwg_percent > 0 && link_percentage == 0)
+ link_percentage = 1;
+
+ /* Save link_percentage for reference */
+ p->link_percent = (u8)link_percentage;
+
+ /* Calculate credit refill ratio using multiplier */
+ credit_refill = min(link_percentage * min_multiplier,
+ (u32)IXGBE_DCB_MAX_CREDIT_REFILL);
+ p->data_credits_refill = (u16)credit_refill;
+
+ /* Calculate maximum credit for the TC */
+ credit_max = (link_percentage * IXGBE_DCB_MAX_CREDIT) / 100;
+
+ /*
+ * Adjustment based on rule checking, if the percentage
+ * of a TC is too small, the maximum credit may not be
+ * enough to send out a jumbo frame in data plane arbitration.
+ */
+ if (credit_max && (credit_max < min_credit))
+ credit_max = min_credit;
+
+ if (direction == IXGBE_DCB_TX_CONFIG) {
+ /*
+ * Adjustment based on rule checking, if the
+ * percentage of a TC is too small, the maximum
+ * credit may not be enough to send out a TSO
+ * packet in descriptor plane arbitration.
+ */
+ if (credit_max && (credit_max <
+ IXGBE_DCB_MIN_TSO_CREDIT)
+ && (hw->mac.type == ixgbe_mac_82598EB))
+ credit_max = IXGBE_DCB_MIN_TSO_CREDIT;
+
+ dcb_config->tc_config[i].desc_credits_max =
+ (u16)credit_max;
+ }
+
+ p->data_credits_max = (u16)credit_max;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_dcb_unpack_pfc_cee - Unpack dcb_config PFC info
+ * @cfg: dcb configuration to unpack into hardware consumable fields
+ * @map: user priority to traffic class map
+ * @pfc_up: u8 to store user priority PFC bitmask
+ *
+ * This unpacks the dcb configuration PFC info which is stored per
+ * traffic class into a 8bit user priority bitmask that can be
+ * consumed by hardware routines. The priority to tc map must be
+ * updated before calling this routine to use current up-to maps.
+ */
+void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *cfg, u8 *map, u8 *pfc_up)
+{
+ struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+ int up;
+
+ /*
+ * If the TC for this user priority has PFC enabled then set the
+ * matching bit in 'pfc_up' to reflect that PFC is enabled.
+ */
+ for (*pfc_up = 0, up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++) {
+ if (tc_config[map[up]].pfc != ixgbe_dcb_pfc_disabled)
+ *pfc_up |= 1 << up;
+ }
+}
+
+void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *cfg, int direction,
+ u16 *refill)
+{
+ struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+ int tc;
+
+ for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
+ refill[tc] = tc_config[tc].path[direction].data_credits_refill;
+}
+
+void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *cfg, u16 *max)
+{
+ struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+ int tc;
+
+ for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
+ max[tc] = tc_config[tc].desc_credits_max;
+}
+
+void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *cfg, int direction,
+ u8 *bwgid)
+{
+ struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+ int tc;
+
+ for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
+ bwgid[tc] = tc_config[tc].path[direction].bwg_id;
+}
+
+void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *cfg, int direction,
+ u8 *tsa)
+{
+ struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+ int tc;
+
+ for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
+ tsa[tc] = tc_config[tc].path[direction].tsa;
+}
+
+u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up)
+{
+ struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+ u8 prio_mask = 1 << up;
+ u8 tc = cfg->num_tcs.pg_tcs;
+
+ /* If tc is 0 then DCB is likely not enabled or supported */
+ if (!tc)
+ goto out;
+
+ /*
+ * Test from maximum TC to 1 and report the first match we find. If
+ * we find no match we can assume that the TC is 0 since the TC must
+ * be set for all user priorities
+ */
+ for (tc--; tc; tc--) {
+ if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap)
+ break;
+ }
+out:
+ return tc;
+}
+
+void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *cfg, int direction,
+ u8 *map)
+{
+ u8 up;
+
+ for (up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++)
+ map[up] = ixgbe_dcb_get_tc_from_up(cfg, direction, up);
+}
+
+/**
+ * ixgbe_dcb_config - Struct containing DCB settings.
+ * @dcb_config: Pointer to DCB config structure
+ *
+ * This function checks DCB rules for DCB settings.
+ * The following rules are checked:
+ * 1. The sum of bandwidth percentages of all Bandwidth Groups must total 100%.
+ * 2. The sum of bandwidth percentages of all Traffic Classes within a Bandwidth
+ * Group must total 100.
+ * 3. A Traffic Class should not be set to both Link Strict Priority
+ * and Group Strict Priority.
+ * 4. Link strict Bandwidth Groups can only have link strict traffic classes
+ * with zero bandwidth.
+ */
+s32 ixgbe_dcb_check_config_cee(struct ixgbe_dcb_config *dcb_config)
+{
+ struct ixgbe_dcb_tc_path *p;
+ s32 ret_val = IXGBE_SUCCESS;
+ u8 i, j, bw = 0, bw_id;
+ u8 bw_sum[2][IXGBE_DCB_MAX_BW_GROUP];
+ bool link_strict[2][IXGBE_DCB_MAX_BW_GROUP];
+
+ memset(bw_sum, 0, sizeof(bw_sum));
+ memset(link_strict, 0, sizeof(link_strict));
+
+ /* First Tx, then Rx */
+ for (i = 0; i < 2; i++) {
+ /* Check each traffic class for rule violation */
+ for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
+ p = &dcb_config->tc_config[j].path[i];
+
+ bw = p->bwg_percent;
+ bw_id = p->bwg_id;
+
+ if (bw_id >= IXGBE_DCB_MAX_BW_GROUP) {
+ ret_val = IXGBE_ERR_CONFIG;
+ goto err_config;
+ }
+ if (p->tsa == ixgbe_dcb_tsa_strict) {
+ link_strict[i][bw_id] = true;
+ /* Link strict should have zero bandwidth */
+ if (bw) {
+ ret_val = IXGBE_ERR_CONFIG;
+ goto err_config;
+ }
+ } else if (!bw) {
+ /*
+ * Traffic classes without link strict
+ * should have non-zero bandwidth.
+ */
+ ret_val = IXGBE_ERR_CONFIG;
+ goto err_config;
+ }
+ bw_sum[i][bw_id] += bw;
+ }
+
+ bw = 0;
+
+ /* Check each bandwidth group for rule violation */
+ for (j = 0; j < IXGBE_DCB_MAX_BW_GROUP; j++) {
+ bw += dcb_config->bw_percentage[i][j];
+ /*
+ * Sum of bandwidth percentages of all traffic classes
+ * within a Bandwidth Group must total 100 except for
+ * link strict group (zero bandwidth).
+ */
+ if (link_strict[i][j]) {
+ if (bw_sum[i][j]) {
+ /*
+ * Link strict group should have zero
+ * bandwidth.
+ */
+ ret_val = IXGBE_ERR_CONFIG;
+ goto err_config;
+ }
+ } else if (bw_sum[i][j] != IXGBE_DCB_BW_PERCENT &&
+ bw_sum[i][j] != 0) {
+ ret_val = IXGBE_ERR_CONFIG;
+ goto err_config;
+ }
+ }
+
+ if (bw != IXGBE_DCB_BW_PERCENT) {
+ ret_val = IXGBE_ERR_CONFIG;
+ goto err_config;
+ }
+ }
+
+err_config:
+ DEBUGOUT2("DCB error code %d while checking %s settings.\n",
+ ret_val, (i == IXGBE_DCB_TX_CONFIG) ? "Tx" : "Rx");
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_dcb_get_tc_stats - Returns status of each traffic class
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count: Number of elements in bwg_array.
+ *
+ * This function returns the status data for each of the Traffic Classes in use.
+ */
+s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
+ u8 tc_count)
+{
+ s32 ret = IXGBE_NOT_IMPLEMENTED;
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ret = ixgbe_dcb_get_tc_stats_82598(hw, stats, tc_count);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ ret = ixgbe_dcb_get_tc_stats_82599(hw, stats, tc_count);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+/**
+ * ixgbe_dcb_get_pfc_stats - Returns CBFC status of each traffic class
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count: Number of elements in bwg_array.
+ *
+ * This function returns the CBFC status data for each of the Traffic Classes.
+ */
+s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
+ u8 tc_count)
+{
+ s32 ret = IXGBE_NOT_IMPLEMENTED;
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ret = ixgbe_dcb_get_pfc_stats_82598(hw, stats, tc_count);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ ret = ixgbe_dcb_get_pfc_stats_82599(hw, stats, tc_count);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+/**
+ * ixgbe_dcb_config_rx_arbiter_cee - Config Rx arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Rx Data Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *hw,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ s32 ret = IXGBE_NOT_IMPLEMENTED;
+ u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 };
+ u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 };
+ u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
+ u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 };
+ u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 };
+
+ ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
+ ixgbe_dcb_unpack_max_cee(dcb_config, max);
+ ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
+ ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
+ ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ret = ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ ret = ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwgid,
+ tsa, map);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+/**
+ * ixgbe_dcb_config_tx_desc_arbiter_cee - Config Tx Desc arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Tx Descriptor Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *hw,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ s32 ret = IXGBE_NOT_IMPLEMENTED;
+ u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+ u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+ u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+ u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+
+ ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
+ ixgbe_dcb_unpack_max_cee(dcb_config, max);
+ ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
+ ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ret = ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
+ bwgid, tsa);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ ret = ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
+ bwgid, tsa);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+/**
+ * ixgbe_dcb_config_tx_data_arbiter_cee - Config Tx data arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Tx Data Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *hw,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ s32 ret = IXGBE_NOT_IMPLEMENTED;
+ u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+ u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+ u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
+ u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+ u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+
+ ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
+ ixgbe_dcb_unpack_max_cee(dcb_config, max);
+ ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
+ ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
+ ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ret = ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
+ bwgid, tsa);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ ret = ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
+ bwgid, tsa,
+ map);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+/**
+ * ixgbe_dcb_config_pfc_cee - Config priority flow control
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Priority Flow Control for each traffic class.
+ */
+s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *hw,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ s32 ret = IXGBE_NOT_IMPLEMENTED;
+ u8 pfc_en;
+ u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
+
+ ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map);
+ ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+/**
+ * ixgbe_dcb_config_tc_stats - Config traffic class statistics
+ * @hw: pointer to hardware structure
+ *
+ * Configure queue statistics registers, all queues belonging to same traffic
+ * class uses a single set of queue statistics counters.
+ */
+s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *hw)
+{
+ s32 ret = IXGBE_NOT_IMPLEMENTED;
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ret = ixgbe_dcb_config_tc_stats_82598(hw);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ ret = ixgbe_dcb_config_tc_stats_82599(hw, NULL);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+/**
+ * ixgbe_dcb_hw_config_cee - Config and enable DCB
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure dcb settings and enable dcb mode.
+ */
+s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *hw,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ s32 ret = IXGBE_NOT_IMPLEMENTED;
+ u8 pfc_en;
+ u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+ u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+ u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
+ u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+ u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+
+ /* Unpack CEE standard containers */
+ ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
+ ixgbe_dcb_unpack_max_cee(dcb_config, max);
+ ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
+ ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
+ ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ret = ixgbe_dcb_hw_config_82598(hw, dcb_config->link_speed,
+ refill, max, bwgid, tsa);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ ixgbe_dcb_config_82599(hw, dcb_config);
+ ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->link_speed,
+ refill, max, bwgid,
+ tsa, map);
+
+ ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
+ break;
+ default:
+ break;
+ }
+
+ if (!ret && dcb_config->pfc_mode_enable) {
+ ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
+ ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
+ }
+
+ return ret;
+}
+
+/* Helper routines to abstract HW specifics from DCB netlink ops */
+s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
+{
+ int ret = IXGBE_ERR_PARAM;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
+ u8 *bwg_id, u8 *tsa, u8 *map)
+{
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
+ ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,
+ tsa);
+ ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,
+ tsa);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
+ tsa, map);
+ ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,
+ tsa);
+ ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
+ tsa, map);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb.h b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb.h
new file mode 100755
index 00000000..633abb2e
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb.h
@@ -0,0 +1,176 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_DCB_H_
+#define _IXGBE_DCB_H_
+
+#ident "$Id: ixgbe_dcb.h,v 1.39 2012/04/17 00:07:40 jtkirshe Exp $"
+
+#include "ixgbe_type.h"
+
+/* DCB defines */
+/* DCB credit calculation defines */
+#define IXGBE_DCB_CREDIT_QUANTUM 64
+#define IXGBE_DCB_MAX_CREDIT_REFILL 200 /* 200 * 64B = 12800B */
+#define IXGBE_DCB_MAX_TSO_SIZE (32 * 1024) /* Max TSO pkt size in DCB*/
+#define IXGBE_DCB_MAX_CREDIT (2 * IXGBE_DCB_MAX_CREDIT_REFILL)
+
+/* 513 for 32KB TSO packet */
+#define IXGBE_DCB_MIN_TSO_CREDIT \
+ ((IXGBE_DCB_MAX_TSO_SIZE / IXGBE_DCB_CREDIT_QUANTUM) + 1)
+
+/* DCB configuration defines */
+#define IXGBE_DCB_MAX_USER_PRIORITY 8
+#define IXGBE_DCB_MAX_BW_GROUP 8
+#define IXGBE_DCB_BW_PERCENT 100
+
+#define IXGBE_DCB_TX_CONFIG 0
+#define IXGBE_DCB_RX_CONFIG 1
+
+/* DCB capability defines */
+#define IXGBE_DCB_PG_SUPPORT 0x00000001
+#define IXGBE_DCB_PFC_SUPPORT 0x00000002
+#define IXGBE_DCB_BCN_SUPPORT 0x00000004
+#define IXGBE_DCB_UP2TC_SUPPORT 0x00000008
+#define IXGBE_DCB_GSP_SUPPORT 0x00000010
+
+struct ixgbe_dcb_support {
+ u32 capabilities; /* DCB capabilities */
+
+ /* Each bit represents a number of TCs configurable in the hw.
+ * If 8 traffic classes can be configured, the value is 0x80. */
+ u8 traffic_classes;
+ u8 pfc_traffic_classes;
+};
+
+enum ixgbe_dcb_tsa {
+ ixgbe_dcb_tsa_ets = 0,
+ ixgbe_dcb_tsa_group_strict_cee,
+ ixgbe_dcb_tsa_strict
+};
+
+/* Traffic class bandwidth allocation per direction */
+struct ixgbe_dcb_tc_path {
+ u8 bwg_id; /* Bandwidth Group (BWG) ID */
+ u8 bwg_percent; /* % of BWG's bandwidth */
+ u8 link_percent; /* % of link bandwidth */
+ u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */
+ u16 data_credits_refill; /* Credit refill amount in 64B granularity */
+ u16 data_credits_max; /* Max credits for a configured packet buffer
+ * in 64B granularity.*/
+ enum ixgbe_dcb_tsa tsa; /* Link or Group Strict Priority */
+};
+
+enum ixgbe_dcb_pfc {
+ ixgbe_dcb_pfc_disabled = 0,
+ ixgbe_dcb_pfc_enabled,
+ ixgbe_dcb_pfc_enabled_txonly,
+ ixgbe_dcb_pfc_enabled_rxonly
+};
+
+/* Traffic class configuration */
+struct ixgbe_dcb_tc_config {
+ struct ixgbe_dcb_tc_path path[2]; /* One each for Tx/Rx */
+ enum ixgbe_dcb_pfc pfc; /* Class based flow control setting */
+
+ u16 desc_credits_max; /* For Tx Descriptor arbitration */
+ u8 tc; /* Traffic class (TC) */
+};
+
+enum ixgbe_dcb_pba {
+ /* PBA[0-7] each use 64KB FIFO */
+ ixgbe_dcb_pba_equal = PBA_STRATEGY_EQUAL,
+ /* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */
+ ixgbe_dcb_pba_80_48 = PBA_STRATEGY_WEIGHTED
+};
+
+struct ixgbe_dcb_num_tcs {
+ u8 pg_tcs;
+ u8 pfc_tcs;
+};
+
+struct ixgbe_dcb_config {
+ struct ixgbe_dcb_tc_config tc_config[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+ struct ixgbe_dcb_support support;
+ struct ixgbe_dcb_num_tcs num_tcs;
+ u8 bw_percentage[2][IXGBE_DCB_MAX_BW_GROUP]; /* One each for Tx/Rx */
+ bool pfc_mode_enable;
+ bool round_robin_enable;
+
+ enum ixgbe_dcb_pba rx_pba_cfg;
+
+ u32 dcb_cfg_version; /* Not used...OS-specific? */
+ u32 link_speed; /* For bandwidth allocation validation purpose */
+ bool vt_mode;
+};
+
+/* DCB driver APIs */
+
+/* DCB rule checking */
+s32 ixgbe_dcb_check_config_cee(struct ixgbe_dcb_config *);
+
+/* DCB credits calculation */
+s32 ixgbe_dcb_calculate_tc_credits(u8 *, u16 *, u16 *, int);
+s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *,
+ struct ixgbe_dcb_config *, u32, u8);
+
+/* DCB PFC */
+s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *, u8, u8 *);
+s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+
+/* DCB stats */
+s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *);
+s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
+s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
+
+/* DCB config arbiters */
+s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *,
+ struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *,
+ struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *,
+ struct ixgbe_dcb_config *);
+
+/* DCB unpack routines */
+void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *, u8 *, u8 *);
+void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *, int, u16 *);
+void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *, u16 *);
+void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *, int, u8 *);
+void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *, int, u8 *);
+void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *, int, u8 *);
+u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8);
+
+/* DCB initialization */
+s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, u16 *, u16 *, u8 *, u8 *, u8 *);
+s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+#endif /* _IXGBE_DCB_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82598.c b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82598.c
new file mode 100755
index 00000000..a6161cd5
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82598.c
@@ -0,0 +1,361 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+
+#include "ixgbe_type.h"
+#include "ixgbe_dcb.h"
+#include "ixgbe_dcb_82598.h"
+#ident "$Id: ixgbe_dcb_82598.c,v 1.29 2012/03/30 06:45:33 jtkirshe Exp $"
+
+/**
+ * ixgbe_dcb_get_tc_stats_82598 - Return status data for each traffic class
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count: Number of elements in bwg_array.
+ *
+ * This function returns the status data for each of the Traffic Classes in use.
+ */
+s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw,
+ struct ixgbe_hw_stats *stats,
+ u8 tc_count)
+{
+ int tc;
+
+ DEBUGFUNC("dcb_get_tc_stats");
+
+ if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
+ return IXGBE_ERR_PARAM;
+
+ /* Statistics pertaining to each traffic class */
+ for (tc = 0; tc < tc_count; tc++) {
+ /* Transmitted Packets */
+ stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
+ /* Transmitted Bytes */
+ stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc));
+ /* Received Packets */
+ stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
+ /* Received Bytes */
+ stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc));
+
+#if 0
+ /* Can we get rid of these?? Consequently, getting rid
+ * of the tc_stats structure.
+ */
+ tc_stats_array[up]->in_overflow_discards = 0;
+ tc_stats_array[up]->out_overflow_discards = 0;
+#endif
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_get_pfc_stats_82598 - Returns CBFC status data
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count: Number of elements in bwg_array.
+ *
+ * This function returns the CBFC status data for each of the Traffic Classes.
+ */
+s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw,
+ struct ixgbe_hw_stats *stats,
+ u8 tc_count)
+{
+ int tc;
+
+ DEBUGFUNC("dcb_get_pfc_stats");
+
+ if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
+ return IXGBE_ERR_PARAM;
+
+ for (tc = 0; tc < tc_count; tc++) {
+ /* Priority XOFF Transmitted */
+ stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
+ /* Priority XOFF Received */
+ stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(tc));
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Rx Data Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *tsa)
+{
+ u32 reg = 0;
+ u32 credit_refill = 0;
+ u32 credit_max = 0;
+ u8 i = 0;
+
+ reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA;
+ IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
+ /* Enable Arbiter */
+ reg &= ~IXGBE_RMCS_ARBDIS;
+ /* Enable Receive Recycle within the BWG */
+ reg |= IXGBE_RMCS_RRM;
+ /* Enable Deficit Fixed Priority arbitration*/
+ reg |= IXGBE_RMCS_DFP;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
+
+ /* Configure traffic class credits and priority */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ credit_refill = refill[i];
+ credit_max = max[i];
+
+ reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
+
+ if (tsa[i] == ixgbe_dcb_tsa_strict)
+ reg |= IXGBE_RT2CR_LSP;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
+ }
+
+ reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+ reg |= IXGBE_RDRXCTL_RDMTS_1_2;
+ reg |= IXGBE_RDRXCTL_MPBEN;
+ reg |= IXGBE_RDRXCTL_MCEN;
+ IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ /* Make sure there is enough descriptors before arbitration */
+ reg &= ~IXGBE_RXCTRL_DMBYPS;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Tx Descriptor Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
+ u16 *refill, u16 *max, u8 *bwg_id,
+ u8 *tsa)
+{
+ u32 reg, max_credits;
+ u8 i;
+
+ reg = IXGBE_READ_REG(hw, IXGBE_DPMCS);
+
+ /* Enable arbiter */
+ reg &= ~IXGBE_DPMCS_ARBDIS;
+ reg |= IXGBE_DPMCS_TSOEF;
+
+ /* Configure Max TSO packet size 34KB including payload and headers */
+ reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
+
+ IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg);
+
+ /* Configure traffic class credits and priority */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ max_credits = max[i];
+ reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
+ reg |= refill[i];
+ reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
+
+ if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
+ reg |= IXGBE_TDTQ2TCCR_GSP;
+
+ if (tsa[i] == ixgbe_dcb_tsa_strict)
+ reg |= IXGBE_TDTQ2TCCR_LSP;
+
+ IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Tx Data Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
+ u16 *refill, u16 *max, u8 *bwg_id,
+ u8 *tsa)
+{
+ u32 reg;
+ u8 i;
+
+ reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
+ /* Enable Data Plane Arbiter */
+ reg &= ~IXGBE_PDPMCS_ARBDIS;
+ /* Enable DFP and Transmit Recycle Mode */
+ reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM);
+
+ IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg);
+
+ /* Configure traffic class credits and priority */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ reg = refill[i];
+ reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT;
+ reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT;
+
+ if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
+ reg |= IXGBE_TDPT2TCCR_GSP;
+
+ if (tsa[i] == ixgbe_dcb_tsa_strict)
+ reg |= IXGBE_TDPT2TCCR_LSP;
+
+ IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
+ }
+
+ /* Enable Tx packet buffer division */
+ reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
+ reg |= IXGBE_DTXCTL_ENDBUBD;
+ IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_pfc_82598 - Config priority flow control
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Priority Flow Control for each traffic class.
+ */
+s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
+{
+ u32 fcrtl, reg;
+ u8 i;
+
+ /* Enable Transmit Priority Flow Control */
+ reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
+ reg &= ~IXGBE_RMCS_TFCE_802_3X;
+ reg |= IXGBE_RMCS_TFCE_PRIORITY;
+ IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
+
+ /* Enable Receive Priority Flow Control */
+ reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ reg &= ~(IXGBE_FCTRL_RPFCE | IXGBE_FCTRL_RFCE);
+
+ if (pfc_en)
+ reg |= IXGBE_FCTRL_RPFCE;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
+
+ /* Configure PFC Tx thresholds per TC */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ if (!(pfc_en & (1 << i))) {
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
+ continue;
+ }
+
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
+ reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
+ }
+
+ /* Configure pause time */
+ reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
+ for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
+ IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
+
+ /* Configure flow control refresh threshold value */
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics
+ * @hw: pointer to hardware structure
+ *
+ * Configure queue statistics registers, all queues belonging to same traffic
+ * class uses a single set of queue statistics counters.
+ */
+s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
+{
+ u32 reg = 0;
+ u8 i = 0;
+ u8 j = 0;
+
+ /* Receive Queues stats setting - 8 queues per statistics reg */
+ for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) {
+ reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i));
+ reg |= ((0x1010101) * j);
+ IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
+ reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1));
+ reg |= ((0x1010101) * j);
+ IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg);
+ }
+ /* Transmit Queues stats setting - 4 queues per statistics reg*/
+ for (i = 0; i < 8; i++) {
+ reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i));
+ reg |= ((0x1010101) * i);
+ IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_hw_config_82598 - Config and enable DCB
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure dcb settings and enable dcb mode.
+ */
+s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, int link_speed,
+ u16 *refill, u16 *max, u8 *bwg_id,
+ u8 *tsa)
+{
+ UNREFERENCED_1PARAMETER(link_speed);
+
+ ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
+ ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,
+ tsa);
+ ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,
+ tsa);
+ ixgbe_dcb_config_tc_stats_82598(hw);
+
+
+ return IXGBE_SUCCESS;
+}
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82598.h b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82598.h
new file mode 100755
index 00000000..9307644c
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82598.h
@@ -0,0 +1,100 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_DCB_82598_H_
+#define _IXGBE_DCB_82598_H_
+#ident "$Id: ixgbe_dcb_82598.h,v 1.12 2012/03/26 22:28:19 jtkirshe Exp $"
+
+/* DCB register definitions */
+
+#define IXGBE_DPMCS_MTSOS_SHIFT 16
+#define IXGBE_DPMCS_TDPAC 0x00000001 /* 0 Round Robin,
+ * 1 DFP - Deficit Fixed Priority */
+#define IXGBE_DPMCS_TRM 0x00000010 /* Transmit Recycle Mode */
+#define IXGBE_DPMCS_ARBDIS 0x00000040 /* DCB arbiter disable */
+#define IXGBE_DPMCS_TSOEF 0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */
+
+#define IXGBE_RUPPBMR_MQA 0x80000000 /* Enable UP to queue mapping */
+
+#define IXGBE_RT2CR_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */
+#define IXGBE_RT2CR_LSP 0x80000000 /* LSP enable bit */
+
+#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet
+ * buffers enable */
+#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores
+ * (RSS) enable */
+
+#define IXGBE_TDTQ2TCCR_MCL_SHIFT 12
+#define IXGBE_TDTQ2TCCR_BWG_SHIFT 9
+#define IXGBE_TDTQ2TCCR_GSP 0x40000000
+#define IXGBE_TDTQ2TCCR_LSP 0x80000000
+
+#define IXGBE_TDPT2TCCR_MCL_SHIFT 12
+#define IXGBE_TDPT2TCCR_BWG_SHIFT 9
+#define IXGBE_TDPT2TCCR_GSP 0x40000000
+#define IXGBE_TDPT2TCCR_LSP 0x80000000
+
+#define IXGBE_PDPMCS_TPPAC 0x00000020 /* 0 Round Robin,
+ * 1 DFP - Deficit Fixed Priority */
+#define IXGBE_PDPMCS_ARBDIS 0x00000040 /* Arbiter disable */
+#define IXGBE_PDPMCS_TRM 0x00000100 /* Transmit Recycle Mode enable */
+
+#define IXGBE_DTXCTL_ENDBUBD 0x00000004 /* Enable DBU buffer division */
+
+#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */
+#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */
+#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */
+#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */
+
+/* DCB driver APIs */
+
+/* DCB PFC */
+s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8);
+
+/* DCB stats */
+s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *);
+s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *,
+ struct ixgbe_hw_stats *, u8);
+s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *,
+ struct ixgbe_hw_stats *, u8);
+
+/* DCB config arbiters */
+s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *,
+ u8 *, u8 *);
+s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *,
+ u8 *, u8 *);
+s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *, u8 *);
+
+/* DCB initialization */
+s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, int, u16 *, u16 *, u8 *, u8 *);
+#endif /* _IXGBE_DCB_82958_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82599.c b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82599.c
new file mode 100755
index 00000000..e754d1a4
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82599.c
@@ -0,0 +1,594 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+
+#include "ixgbe_type.h"
+#include "ixgbe_dcb.h"
+#include "ixgbe_dcb_82599.h"
+#ident "$Id: ixgbe_dcb_82599.c,v 1.67 2012/03/30 06:45:33 jtkirshe Exp $"
+
+/**
+ * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count: Number of elements in bwg_array.
+ *
+ * This function returns the status data for each of the Traffic Classes in use.
+ */
+s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw,
+ struct ixgbe_hw_stats *stats,
+ u8 tc_count)
+{
+ int tc;
+
+ DEBUGFUNC("dcb_get_tc_stats");
+
+ if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
+ return IXGBE_ERR_PARAM;
+
+ /* Statistics pertaining to each traffic class */
+ for (tc = 0; tc < tc_count; tc++) {
+ /* Transmitted Packets */
+ stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
+ /* Transmitted Bytes (read low first to prevent missed carry) */
+ stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(tc));
+ stats->qbtc[tc] +=
+ (((u64)(IXGBE_READ_REG(hw, IXGBE_QBTC_H(tc)))) << 32);
+ /* Received Packets */
+ stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
+ /* Received Bytes (read low first to prevent missed carry) */
+ stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(tc));
+ stats->qbrc[tc] +=
+ (((u64)(IXGBE_READ_REG(hw, IXGBE_QBRC_H(tc)))) << 32);
+
+ /* Received Dropped Packet */
+ stats->qprdc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRDC(tc));
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count: Number of elements in bwg_array.
+ *
+ * This function returns the CBFC status data for each of the Traffic Classes.
+ */
+s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw,
+ struct ixgbe_hw_stats *stats,
+ u8 tc_count)
+{
+ int tc;
+
+ DEBUGFUNC("dcb_get_pfc_stats");
+
+ if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
+ return IXGBE_ERR_PARAM;
+
+ for (tc = 0; tc < tc_count; tc++) {
+ /* Priority XOFF Transmitted */
+ stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
+ /* Priority XOFF Received */
+ stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc));
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Rx Packet Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *tsa,
+ u8 *map)
+{
+ u32 reg = 0;
+ u32 credit_refill = 0;
+ u32 credit_max = 0;
+ u8 i = 0;
+
+ /*
+ * Disable the arbiter before changing parameters
+ * (always enable recycle mode; WSP)
+ */
+ reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
+
+ /*
+ * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
+ * bits sets for the UPs that needs to be mappped to that TC.
+ * e.g if priorities 6 and 7 are to be mapped to a TC then the
+ * up_to_tc_bitmap value for that TC will be 11000000 in binary.
+ */
+ reg = 0;
+ for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
+ reg |= (map[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
+
+ IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
+
+ /* Configure traffic class credits and priority */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ credit_refill = refill[i];
+ credit_max = max[i];
+ reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
+
+ reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
+
+ if (tsa[i] == ixgbe_dcb_tsa_strict)
+ reg |= IXGBE_RTRPT4C_LSP;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
+ }
+
+ /*
+ * Configure Rx packet plane (recycle mode; WSP) and
+ * enable arbiter
+ */
+ reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
+ IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Tx Descriptor Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *tsa)
+{
+ u32 reg, max_credits;
+ u8 i;
+
+ /* Clear the per-Tx queue credits; we use per-TC instead */
+ for (i = 0; i < 128; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0);
+ }
+
+ /* Configure traffic class credits and priority */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ max_credits = max[i];
+ reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
+ reg |= refill[i];
+ reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
+
+ if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
+ reg |= IXGBE_RTTDT2C_GSP;
+
+ if (tsa[i] == ixgbe_dcb_tsa_strict)
+ reg |= IXGBE_RTTDT2C_LSP;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
+ }
+
+ /*
+ * Configure Tx descriptor plane (recycle mode; WSP) and
+ * enable arbiter
+ */
+ reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Tx Packet Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *tsa,
+ u8 *map)
+{
+ u32 reg;
+ u8 i;
+
+ /*
+ * Disable the arbiter before changing parameters
+ * (always enable recycle mode; SP; arb delay)
+ */
+ reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
+ (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) |
+ IXGBE_RTTPCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
+
+ /*
+ * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
+ * bits sets for the UPs that needs to be mappped to that TC.
+ * e.g if priorities 6 and 7 are to be mapped to a TC then the
+ * up_to_tc_bitmap value for that TC will be 11000000 in binary.
+ */
+ reg = 0;
+ for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
+ reg |= (map[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
+
+ IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
+
+ /* Configure traffic class credits and priority */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ reg = refill[i];
+ reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
+ reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
+
+ if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
+ reg |= IXGBE_RTTPT2C_GSP;
+
+ if (tsa[i] == ixgbe_dcb_tsa_strict)
+ reg |= IXGBE_RTTPT2C_LSP;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
+ }
+
+ /*
+ * Configure Tx packet plane (recycle mode; SP; arb delay) and
+ * enable arbiter
+ */
+ reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
+ (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
+ * @hw: pointer to hardware structure
+ * @pfc_en: enabled pfc bitmask
+ * @map: priority to tc assignments indexed by priority
+ *
+ * Configure Priority Flow Control (PFC) for each traffic class.
+ */
+s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
+{
+ u32 i, j, fcrtl, reg;
+ u8 max_tc = 0;
+
+ /* Enable Transmit Priority Flow Control */
+ IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY);
+
+ /* Enable Receive Priority Flow Control */
+ reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+ reg |= IXGBE_MFLCN_DPF;
+
+ /*
+ * X540 supports per TC Rx priority flow control. So
+ * clear all TCs and only enable those that should be
+ * enabled.
+ */
+ reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
+
+ if (hw->mac.type == ixgbe_mac_X540)
+ reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
+
+ if (pfc_en)
+ reg |= IXGBE_MFLCN_RPFCE;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
+
+ for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) {
+ if (map[i] > max_tc)
+ max_tc = map[i];
+ }
+
+
+ /* Configure PFC Tx thresholds per TC */
+ for (i = 0; i <= max_tc; i++) {
+ int enabled = 0;
+
+ for (j = 0; j < IXGBE_DCB_MAX_USER_PRIORITY; j++) {
+ if ((map[j] == i) && (pfc_en & (1 << j))) {
+ enabled = 1;
+ break;
+ }
+ }
+
+ if (enabled) {
+ reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
+ } else {
+ /*
+ * In order to prevent Tx hangs when the internal Tx
+ * switch is enabled we must set the high water mark
+ * to the Rx packet buffer size - 24KB. This allows
+ * the Tx switch to function even under heavy Rx
+ * workloads.
+ */
+ reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
+ }
+
+ for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0);
+ }
+
+ /* Configure pause time (2 TCs per register) */
+ reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
+ for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
+ IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
+
+ /* Configure flow control refresh threshold value */
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics
+ * @hw: pointer to hardware structure
+ *
+ * Configure queue statistics registers, all queues belonging to same traffic
+ * class uses a single set of queue statistics counters.
+ */
+s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ u32 reg = 0;
+ u8 i = 0;
+ u8 tc_count = 8;
+ bool vt_mode = false;
+
+ if (dcb_config != NULL) {
+ tc_count = dcb_config->num_tcs.pg_tcs;
+ vt_mode = dcb_config->vt_mode;
+ }
+
+ if (!((tc_count == 8 && vt_mode == false) || tc_count == 4))
+ return IXGBE_ERR_PARAM;
+
+ if (tc_count == 8 && vt_mode == false) {
+ /*
+ * Receive Queues stats setting
+ * 32 RQSMR registers, each configuring 4 queues.
+ *
+ * Set all 16 queues of each TC to the same stat
+ * with TC 'n' going to stat 'n'.
+ */
+ for (i = 0; i < 32; i++) {
+ reg = 0x01010101 * (i / 4);
+ IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
+ }
+ /*
+ * Transmit Queues stats setting
+ * 32 TQSM registers, each controlling 4 queues.
+ *
+ * Set all queues of each TC to the same stat
+ * with TC 'n' going to stat 'n'.
+ * Tx queues are allocated non-uniformly to TCs:
+ * 32, 32, 16, 16, 8, 8, 8, 8.
+ */
+ for (i = 0; i < 32; i++) {
+ if (i < 8)
+ reg = 0x00000000;
+ else if (i < 16)
+ reg = 0x01010101;
+ else if (i < 20)
+ reg = 0x02020202;
+ else if (i < 24)
+ reg = 0x03030303;
+ else if (i < 26)
+ reg = 0x04040404;
+ else if (i < 28)
+ reg = 0x05050505;
+ else if (i < 30)
+ reg = 0x06060606;
+ else
+ reg = 0x07070707;
+ IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
+ }
+ } else if (tc_count == 4 && vt_mode == false) {
+ /*
+ * Receive Queues stats setting
+ * 32 RQSMR registers, each configuring 4 queues.
+ *
+ * Set all 16 queues of each TC to the same stat
+ * with TC 'n' going to stat 'n'.
+ */
+ for (i = 0; i < 32; i++) {
+ if (i % 8 > 3)
+ /* In 4 TC mode, odd 16-queue ranges are
+ * not used.
+ */
+ continue;
+ reg = 0x01010101 * (i / 8);
+ IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
+ }
+ /*
+ * Transmit Queues stats setting
+ * 32 TQSM registers, each controlling 4 queues.
+ *
+ * Set all queues of each TC to the same stat
+ * with TC 'n' going to stat 'n'.
+ * Tx queues are allocated non-uniformly to TCs:
+ * 64, 32, 16, 16.
+ */
+ for (i = 0; i < 32; i++) {
+ if (i < 16)
+ reg = 0x00000000;
+ else if (i < 24)
+ reg = 0x01010101;
+ else if (i < 28)
+ reg = 0x02020202;
+ else
+ reg = 0x03030303;
+ IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
+ }
+ } else if (tc_count == 4 && vt_mode == true) {
+ /*
+ * Receive Queues stats setting
+ * 32 RQSMR registers, each configuring 4 queues.
+ *
+ * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
+ * pool. Set all 32 queues of each TC across pools to the same
+ * stat with TC 'n' going to stat 'n'.
+ */
+ for (i = 0; i < 32; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0x03020100);
+ /*
+ * Transmit Queues stats setting
+ * 32 TQSM registers, each controlling 4 queues.
+ *
+ * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
+ * pool. Set all 32 queues of each TC across pools to the same
+ * stat with TC 'n' going to stat 'n'.
+ */
+ for (i = 0; i < 32; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0x03020100);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_82599 - Configure general DCB parameters
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure general DCB parameters.
+ */
+s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ u32 reg;
+ u32 q;
+
+ /* Disable the Tx desc arbiter so that MTQC can be changed */
+ reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+ reg |= IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ if (dcb_config->num_tcs.pg_tcs == 8) {
+ /* Enable DCB for Rx with 8 TCs */
+ switch (reg & IXGBE_MRQC_MRQE_MASK) {
+ case 0:
+ case IXGBE_MRQC_RT4TCEN:
+ /* RSS disabled cases */
+ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+ IXGBE_MRQC_RT8TCEN;
+ break;
+ case IXGBE_MRQC_RSSEN:
+ case IXGBE_MRQC_RTRSS4TCEN:
+ /* RSS enabled cases */
+ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+ IXGBE_MRQC_RTRSS8TCEN;
+ break;
+ default:
+ /*
+ * Unsupported value, assume stale data,
+ * overwrite no RSS
+ */
+ ASSERT(0);
+ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+ IXGBE_MRQC_RT8TCEN;
+ }
+ }
+ if (dcb_config->num_tcs.pg_tcs == 4) {
+ /* We support both VT-on and VT-off with 4 TCs. */
+ if (dcb_config->vt_mode)
+ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+ IXGBE_MRQC_VMDQRT4TCEN;
+ else
+ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+ IXGBE_MRQC_RTRSS4TCEN;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
+
+ /* Enable DCB for Tx with 8 TCs */
+ if (dcb_config->num_tcs.pg_tcs == 8)
+ reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
+ else {
+ /* We support both VT-on and VT-off with 4 TCs. */
+ reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
+ if (dcb_config->vt_mode)
+ reg |= IXGBE_MTQC_VT_ENA;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
+
+ /* Disable drop for all queues */
+ for (q = 0; q < 128; q++)
+ IXGBE_WRITE_REG(hw, IXGBE_QDE,
+ (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
+
+ /* Enable the Tx desc arbiter */
+ reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+ reg &= ~IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+ /* Enable Security TX Buffer IFG for DCB */
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ reg |= IXGBE_SECTX_DCB;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure dcb settings and enable dcb mode.
+ */
+s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, int link_speed,
+ u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa,
+ u8 *map)
+{
+ UNREFERENCED_1PARAMETER(link_speed);
+
+ ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa,
+ map);
+ ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,
+ tsa);
+ ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
+ tsa, map);
+
+ return IXGBE_SUCCESS;
+}
+
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82599.h b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82599.h
new file mode 100755
index 00000000..94a5e9e0
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82599.h
@@ -0,0 +1,154 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_DCB_82599_H_
+#define _IXGBE_DCB_82599_H_
+#ident "$Id: ixgbe_dcb_82599.h,v 1.34 2013/03/20 21:52:47 jtkirshe Exp $"
+
+/* DCB register definitions */
+#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin,
+ * 1 WSP - Weighted Strict Priority
+ */
+#define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin,
+ * 1 WRR - Weighted Round Robin
+ */
+#define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */
+#define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */
+#define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must
+ * clear!
+ */
+#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */
+
+/* Receive UP2TC mapping */
+#define IXGBE_RTRUP2TC_UP_SHIFT 3
+#define IXGBE_RTRUP2TC_UP_MASK 7
+/* Transmit UP2TC mapping */
+#define IXGBE_RTTUP2TC_UP_SHIFT 3
+
+#define IXGBE_RTRPT4C_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */
+#define IXGBE_RTRPT4C_BWG_SHIFT 9 /* Offset to BWG index */
+#define IXGBE_RTRPT4C_GSP 0x40000000 /* GSP enable bit */
+#define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */
+
+#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet
+ * buffers enable
+ */
+#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores
+ * (RSS) enable
+ */
+
+/* RTRPCS Bit Masks */
+#define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */
+/* Receive Arbitration Control: 0 Round Robin, 1 DFP */
+#define IXGBE_RTRPCS_RAC 0x00000004
+#define IXGBE_RTRPCS_ARBDIS 0x00000040 /* Arbitration disable bit */
+
+/* RTTDT2C Bit Masks */
+#define IXGBE_RTTDT2C_MCL_SHIFT 12
+#define IXGBE_RTTDT2C_BWG_SHIFT 9
+#define IXGBE_RTTDT2C_GSP 0x40000000
+#define IXGBE_RTTDT2C_LSP 0x80000000
+
+#define IXGBE_RTTPT2C_MCL_SHIFT 12
+#define IXGBE_RTTPT2C_BWG_SHIFT 9
+#define IXGBE_RTTPT2C_GSP 0x40000000
+#define IXGBE_RTTPT2C_LSP 0x80000000
+
+/* RTTPCS Bit Masks */
+#define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin,
+ * 1 SP - Strict Priority
+ */
+#define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */
+#define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */
+#define IXGBE_RTTPCS_ARBD_SHIFT 22
+#define IXGBE_RTTPCS_ARBD_DCB 0x4 /* Arbitration delay in DCB mode */
+
+#define IXGBE_TXPBTHRESH_DCB 0xA /* THRESH value for DCB mode */
+
+/* SECTXMINIFG DCB */
+#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer SEC IFG */
+
+/* BCN register definitions */
+#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14
+#define IXGBE_RTTBCNRC_RS_ENA 0x80000000
+
+#define IXGBE_RTTBCNCR_MNG_CMTGI 0x00000001
+#define IXGBE_RTTBCNCR_MGN_BCNA_MODE 0x00000002
+#define IXGBE_RTTBCNCR_RSV7_11_SHIFT 5
+#define IXGBE_RTTBCNCR_G 0x00000400
+#define IXGBE_RTTBCNCR_I 0x00000800
+#define IXGBE_RTTBCNCR_H 0x00001000
+#define IXGBE_RTTBCNCR_VER_SHIFT 14
+#define IXGBE_RTTBCNCR_CMT_ETH_SHIFT 16
+
+#define IXGBE_RTTBCNACL_SMAC_L_SHIFT 16
+
+#define IXGBE_RTTBCNTG_BCNA_MODE 0x80000000
+
+#define IXGBE_RTTBCNRTT_TS_SHIFT 3
+#define IXGBE_RTTBCNRTT_TXQ_IDX_SHIFT 16
+
+#define IXGBE_RTTBCNRD_BCN_CLEAR_ALL 0x00000002
+#define IXGBE_RTTBCNRD_DRIFT_FAC_SHIFT 2
+#define IXGBE_RTTBCNRD_DRIFT_INT_SHIFT 16
+#define IXGBE_RTTBCNRD_DRIFT_ENA 0x80000000
+
+
+/* DCB driver APIs */
+
+/* DCB PFC */
+s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *, u8, u8 *);
+
+/* DCB stats */
+s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *,
+ struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *,
+ struct ixgbe_hw_stats *, u8);
+s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *,
+ struct ixgbe_hw_stats *, u8);
+
+/* DCB config arbiters */
+s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *,
+ u8 *, u8 *);
+s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *,
+ u8 *, u8 *, u8 *);
+s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *, u8 *,
+ u8 *, u8 *);
+
+/* DCB initialization */
+s32 ixgbe_dcb_config_82599(struct ixgbe_hw *,
+ struct ixgbe_dcb_config *);
+
+s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *, int, u16 *, u16 *, u8 *,
+ u8 *, u8 *);
+#endif /* _IXGBE_DCB_82959_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.c b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.c
new file mode 100755
index 00000000..c00c2f7c
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.c
@@ -0,0 +1,789 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "ixgbe_type.h"
+#include "ixgbe_mbx.h"
+
+/**
+ * ixgbe_read_mbx - Reads a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns SUCCESS if it successfully read message from buffer
+ **/
+s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_read_mbx");
+
+ /* limit read to size of mailbox */
+ if (size > mbx->size)
+ size = mbx->size;
+
+ if (mbx->ops.read)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_mbx - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_write_mbx");
+
+ if (size > mbx->size) {
+ ret_val = IXGBE_ERR_MBX;
+ ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
+ "Invalid mailbox message size %d", size);
+ } else if (mbx->ops.write)
+ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_msg - checks to see if someone sent us mail
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_check_for_msg");
+
+ if (mbx->ops.check_for_msg)
+ ret_val = mbx->ops.check_for_msg(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_ack - checks to see if someone sent us ACK
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_check_for_ack");
+
+ if (mbx->ops.check_for_ack)
+ ret_val = mbx->ops.check_for_ack(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_rst - checks to see if other side has reset
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_check_for_rst");
+
+ if (mbx->ops.check_for_rst)
+ ret_val = mbx->ops.check_for_rst(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_poll_for_msg - Wait for message notification
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification
+ **/
+STATIC s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ DEBUGFUNC("ixgbe_poll_for_msg");
+
+ if (!countdown || !mbx->ops.check_for_msg)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+ break;
+ usec_delay(mbx->usec_delay);
+ }
+
+ if (countdown == 0)
+ ERROR_REPORT2(IXGBE_ERROR_POLLING,
+ "Polling for VF%d mailbox message timedout", mbx_id);
+
+out:
+ return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbe_poll_for_ack - Wait for message acknowledgement
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message acknowledgement
+ **/
+STATIC s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ DEBUGFUNC("ixgbe_poll_for_ack");
+
+ if (!countdown || !mbx->ops.check_for_ack)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+ break;
+ usec_delay(mbx->usec_delay);
+ }
+
+ if (countdown == 0)
+ ERROR_REPORT2(IXGBE_ERROR_POLLING,
+ "Polling for VF%d mailbox ack timedout", mbx_id);
+
+out:
+ return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbe_read_posted_mbx - Wait for message notification and receive message
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification and
+ * copied it into the receive buffer.
+ **/
+s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_read_posted_mbx");
+
+ if (!mbx->ops.read)
+ goto out;
+
+ ret_val = ixgbe_poll_for_msg(hw, mbx_id);
+
+ /* if ack received read message, otherwise we timed out */
+ if (!ret_val)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout period
+ **/
+s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_write_posted_mbx");
+
+ /* exit if either we can't write or there isn't a defined timeout */
+ if (!mbx->ops.write || !mbx->timeout)
+ goto out;
+
+ /* send msg */
+ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+ /* if msg sent wait until we receive an ack */
+ if (!ret_val)
+ ret_val = ixgbe_poll_for_ack(hw, mbx_id);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_mbx_ops_generic - Initialize MB function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setups up the mailbox read and write message function pointers
+ **/
+void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ mbx->ops.read_posted = ixgbe_read_posted_mbx;
+ mbx->ops.write_posted = ixgbe_write_posted_mbx;
+}
+
+/**
+ * ixgbe_read_v2p_mailbox - read v2p mailbox
+ * @hw: pointer to the HW structure
+ *
+ * This function is used to read the v2p mailbox without losing the read to
+ * clear status bits.
+ **/
+STATIC u32 ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw)
+{
+ u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
+
+ v2p_mailbox |= hw->mbx.v2p_mailbox;
+ hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
+
+ return v2p_mailbox;
+}
+
+/**
+ * ixgbe_check_for_bit_vf - Determine if a status bit was set
+ * @hw: pointer to the HW structure
+ * @mask: bitmask for bits to be tested and cleared
+ *
+ * This function is used to check for the read to clear bits within
+ * the V2P mailbox.
+ **/
+STATIC s32 ixgbe_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
+{
+ u32 v2p_mailbox = ixgbe_read_v2p_mailbox(hw);
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (v2p_mailbox & mask)
+ ret_val = IXGBE_SUCCESS;
+
+ hw->mbx.v2p_mailbox &= ~mask;
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_msg_vf - checks to see if the PF has sent mail
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the PF has set the Status bit or else ERR_MBX
+ **/
+STATIC s32 ixgbe_check_for_msg_vf(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ UNREFERENCED_1PARAMETER(mbx_id);
+ DEBUGFUNC("ixgbe_check_for_msg_vf");
+
+ if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) {
+ ret_val = IXGBE_SUCCESS;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_ack_vf - checks to see if the PF has ACK'd
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX
+ **/
+STATIC s32 ixgbe_check_for_ack_vf(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ UNREFERENCED_1PARAMETER(mbx_id);
+ DEBUGFUNC("ixgbe_check_for_ack_vf");
+
+ if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
+ ret_val = IXGBE_SUCCESS;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_rst_vf - checks to see if the PF has reset
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns true if the PF has set the reset done bit or else false
+ **/
+STATIC s32 ixgbe_check_for_rst_vf(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ UNREFERENCED_1PARAMETER(mbx_id);
+ DEBUGFUNC("ixgbe_check_for_rst_vf");
+
+ if (!ixgbe_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
+ IXGBE_VFMAILBOX_RSTI))) {
+ ret_val = IXGBE_SUCCESS;
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_obtain_mbx_lock_vf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ *
+ * return SUCCESS if we obtained the mailbox lock
+ **/
+STATIC s32 ixgbe_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_obtain_mbx_lock_vf");
+
+ /* Take ownership of the buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
+
+ /* reserve mailbox for vf use */
+ if (ixgbe_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
+ ret_val = IXGBE_SUCCESS;
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_mbx_vf - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+STATIC s32 ixgbe_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
+{
+ s32 ret_val;
+ u16 i;
+
+ UNREFERENCED_1PARAMETER(mbx_id);
+
+ DEBUGFUNC("ixgbe_write_mbx_vf");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ ixgbe_check_for_msg_vf(hw, 0);
+ ixgbe_check_for_ack_vf(hw, 0);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
+
+ /*
+ * Complete the remaining mailbox data registers with zero to reset
+ * the data sent in a previous exchange (in either side) with the PF,
+ * including exchanges performed by another Guest OS to which that VF
+ * was previously assigned.
+ */
+ while (i < hw->mbx.size) {
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, 0);
+ i++;
+ }
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+ /* Drop VFU and interrupt the PF to tell it a message has been sent */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
+
+out_no_write:
+ return ret_val;
+}
+
+/**
+ * ixgbe_read_mbx_vf - Reads a message from the inbox intended for vf
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns SUCCESS if it successfully read message from buffer
+ **/
+STATIC s32 ixgbe_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+ u16 i;
+
+ DEBUGFUNC("ixgbe_read_mbx_vf");
+ UNREFERENCED_1PARAMETER(mbx_id);
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message from the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
+
+ /* Acknowledge receipt and release mailbox, then we're done */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_mbx_params_vf - set initial values for vf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for vf mailbox
+ */
+void ixgbe_init_mbx_params_vf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ /* start mailbox as timed out and let the reset_hw call set the timeout
+ * value to begin communications */
+ mbx->timeout = 0;
+ mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY;
+
+ mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+ mbx->ops.read = ixgbe_read_mbx_vf;
+ mbx->ops.write = ixgbe_write_mbx_vf;
+ mbx->ops.read_posted = ixgbe_read_posted_mbx;
+ mbx->ops.write_posted = ixgbe_write_posted_mbx;
+ mbx->ops.check_for_msg = ixgbe_check_for_msg_vf;
+ mbx->ops.check_for_ack = ixgbe_check_for_ack_vf;
+ mbx->ops.check_for_rst = ixgbe_check_for_rst_vf;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+}
+
+STATIC s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
+{
+ u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (mbvficr & mask) {
+ ret_val = IXGBE_SUCCESS;
+ IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+STATIC s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+ s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+ u32 vf_bit = vf_number % 16;
+
+ DEBUGFUNC("ixgbe_check_for_msg_pf");
+
+ if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
+ index)) {
+ ret_val = IXGBE_SUCCESS;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+STATIC s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+ s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+ u32 vf_bit = vf_number % 16;
+
+ DEBUGFUNC("ixgbe_check_for_ack_pf");
+
+ if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
+ index)) {
+ ret_val = IXGBE_SUCCESS;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_rst_pf - checks to see if the VF has reset
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+STATIC s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ u32 reg_offset = (vf_number < 32) ? 0 : 1;
+ u32 vf_shift = vf_number % 32;
+ u32 vflre = 0;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_check_for_rst_pf");
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
+ break;
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X540:
+ vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
+ break;
+ default:
+ break;
+ }
+
+ if (vflre & (1 << vf_shift)) {
+ ret_val = IXGBE_SUCCESS;
+ IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * return SUCCESS if we obtained the mailbox lock
+ **/
+STATIC s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+ u32 p2v_mailbox;
+
+ DEBUGFUNC("ixgbe_obtain_mbx_lock_pf");
+
+ /* Take ownership of the buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
+
+ /* reserve mailbox for vf use */
+ p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
+ if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
+ ret_val = IXGBE_SUCCESS;
+ else
+ ERROR_REPORT2(IXGBE_ERROR_POLLING,
+ "Failed to obtain mailbox lock for VF%d", vf_number);
+
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_mbx_pf - Places a message in the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+STATIC s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 vf_number)
+{
+ s32 ret_val;
+ u16 i;
+
+ DEBUGFUNC("ixgbe_write_mbx_pf");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ ixgbe_check_for_msg_pf(hw, vf_number);
+ ixgbe_check_for_ack_pf(hw, vf_number);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
+
+ /*
+ * Complete the remaining mailbox data registers with zero to reset
+ * the data sent in a previous exchange (in either side) with the VF,
+ * including exchanges performed by another Guest OS to which that VF
+ * was previously assigned.
+ */
+ while (i < hw->mbx.size) {
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, 0);
+ i++;
+ }
+
+ /* Interrupt VF to tell it a message has been sent and release buffer*/
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+out_no_write:
+ return ret_val;
+
+}
+
+/**
+ * ixgbe_read_mbx_pf - Read a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * This function copies a message from the mailbox buffer to the caller's
+ * memory buffer. The presumption is that the caller knows that there was
+ * a message due to a VF request so no polling for message is needed.
+ **/
+STATIC s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 vf_number)
+{
+ s32 ret_val;
+ u16 i;
+
+ DEBUGFUNC("ixgbe_read_mbx_pf");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
+
+ /* Acknowledge the message and release buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_mbx_params_pf - set initial values for pf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for pf mailbox
+ */
+void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ if (hw->mac.type != ixgbe_mac_82599EB &&
+ hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X540)
+ return;
+
+ mbx->timeout = 0;
+ mbx->usec_delay = 0;
+
+ mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+ mbx->ops.read = ixgbe_read_mbx_pf;
+ mbx->ops.write = ixgbe_write_mbx_pf;
+ mbx->ops.read_posted = ixgbe_read_posted_mbx;
+ mbx->ops.write_posted = ixgbe_write_posted_mbx;
+ mbx->ops.check_for_msg = ixgbe_check_for_msg_pf;
+ mbx->ops.check_for_ack = ixgbe_check_for_ack_pf;
+ mbx->ops.check_for_rst = ixgbe_check_for_rst_pf;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+}
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.h b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.h
new file mode 100755
index 00000000..4594572a
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.h
@@ -0,0 +1,150 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_MBX_H_
+#define _IXGBE_MBX_H_
+
+#include "ixgbe_type.h"
+
+#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+#define IXGBE_ERR_MBX -100
+
+#define IXGBE_VFMAILBOX 0x002FC
+#define IXGBE_VFMBMEM 0x00200
+
+/* Define mailbox register bits */
+#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
+#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */
+#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
+#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
+#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */
+#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
+#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
+
+#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
+#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
+#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
+
+#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
+#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
+#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
+#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
+
+
+/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
+ * PF. The reverse is true if it is IXGBE_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
+ * this are the ACK */
+#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
+ * this are the NACK */
+#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
+ * clear to send requests */
+#define IXGBE_VT_MSGINFO_SHIFT 16
+/* bits 23:16 are used for extra info for certain messages */
+#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+
+/* definitions to support mailbox API version negotiation */
+
+/*
+ * each element denotes a version of the API; existing numbers may not
+ * change; any additions must go at the end
+ */
+enum ixgbe_pfvf_api_rev {
+ ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
+ ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
+ ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
+ /* This value should always be last */
+ ixgbe_mbox_api_unknown, /* indicates that API version is not known */
+};
+
+/* mailbox API, legacy requests */
+#define IXGBE_VF_RESET 0x01 /* VF requests reset */
+#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+
+/* mailbox API, version 1.0 VF requests */
+#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
+#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
+
+/* mailbox API, version 1.1 VF requests */
+#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */
+
+/* GET_QUEUES return data indices within the mailbox */
+#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
+#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
+#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */
+#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */
+
+/* length of permanent address message returned from PF */
+#define IXGBE_VF_PERMADDR_MSG_LEN 4
+/* word in permanent address message with the current multicast type */
+#define IXGBE_VF_MC_TYPE_WORD 3
+
+#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
+
+/* mailbox API, version 2.0 VF requests */
+#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
+#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */
+#define IXGBE_VF_ENABLE_MACADDR 0x0A /* enable MAC address */
+#define IXGBE_VF_DISABLE_MACADDR 0x0B /* disable MAC address */
+#define IXGBE_VF_GET_MACADDRS 0x0C /* get all configured MAC addrs */
+#define IXGBE_VF_SET_MCAST_PROMISC 0x0D /* enable multicast promiscuous */
+#define IXGBE_VF_GET_MTU 0x0E /* get bounds on MTU */
+#define IXGBE_VF_SET_MTU 0x0F /* set a specific MTU */
+
+/* mailbox API, version 2.0 PF requests */
+#define IXGBE_PF_TRANSPARENT_VLAN 0x0101 /* enable transparent vlan */
+
+#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+
+s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
+s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
+s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
+void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw);
+void ixgbe_init_mbx_params_vf(struct ixgbe_hw *);
+void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
+
+#endif /* _IXGBE_MBX_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_osdep.h b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_osdep.h
new file mode 100755
index 00000000..7f95ae48
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_osdep.h
@@ -0,0 +1,156 @@
+/******************************************************************************
+
+ Copyright (c) 2001-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _IXGBE_OS_H_
+#define _IXGBE_OS_H_
+
+#include <string.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_log.h>
+#include <rte_byteorder.h>
+
+#include "../ixgbe_logs.h"
+#include "../ixgbe_bypass_defines.h"
+
+#define ASSERT(x) if(!(x)) rte_panic("IXGBE: x")
+
+#define DELAY(x) rte_delay_us(x)
+#define usec_delay(x) DELAY(x)
+#define msec_delay(x) DELAY(1000*(x))
+
+#define DEBUGFUNC(F) DEBUGOUT(F "\n");
+#define DEBUGOUT(S, args...) PMD_DRV_LOG_RAW(DEBUG, S, ##args)
+#define DEBUGOUT1(S, args...) DEBUGOUT(S, ##args)
+#define DEBUGOUT2(S, args...) DEBUGOUT(S, ##args)
+#define DEBUGOUT3(S, args...) DEBUGOUT(S, ##args)
+#define DEBUGOUT6(S, args...) DEBUGOUT(S, ##args)
+#define DEBUGOUT7(S, args...) DEBUGOUT(S, ##args)
+
+#define ERROR_REPORT1(e, S, args...) DEBUGOUT(S, ##args)
+#define ERROR_REPORT2(e, S, args...) DEBUGOUT(S, ##args)
+#define ERROR_REPORT3(e, S, args...) DEBUGOUT(S, ##args)
+
+#define FALSE 0
+#define TRUE 1
+
+#define false 0
+#define true 1
+#define min(a,b) RTE_MIN(a,b)
+
+#define EWARN(hw, S, args...) DEBUGOUT1(S, ##args)
+
+/* Bunch of defines for shared code bogosity */
+#define UNREFERENCED_PARAMETER(_p)
+#define UNREFERENCED_1PARAMETER(_p)
+#define UNREFERENCED_2PARAMETER(_p, _q)
+#define UNREFERENCED_3PARAMETER(_p, _q, _r)
+#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s)
+
+/* Shared code error reporting */
+enum {
+ IXGBE_ERROR_SOFTWARE,
+ IXGBE_ERROR_POLLING,
+ IXGBE_ERROR_INVALID_STATE,
+ IXGBE_ERROR_UNSUPPORTED,
+ IXGBE_ERROR_ARGUMENT,
+ IXGBE_ERROR_CAUTION,
+};
+
+#define STATIC static
+#define IXGBE_NTOHL(_i) rte_be_to_cpu_32(_i)
+#define IXGBE_NTOHS(_i) rte_be_to_cpu_16(_i)
+#define IXGBE_CPU_TO_LE32(_i) rte_cpu_to_le_32(_i)
+#define IXGBE_LE32_TO_CPUS(_i) rte_le_to_cpu_32(_i)
+#define IXGBE_CPU_TO_BE16(_i) rte_cpu_to_be_16(_i)
+#define IXGBE_CPU_TO_BE32(_i) rte_cpu_to_be_32(_i)
+
+typedef uint8_t u8;
+typedef int8_t s8;
+typedef uint16_t u16;
+typedef int16_t s16;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef uint64_t u64;
+
+#ifndef __cplusplus
+typedef int bool;
+#endif
+
+#define mb() rte_mb()
+#define wmb() rte_wmb()
+#define rmb() rte_rmb()
+
+#define prefetch(x) rte_prefetch0(x)
+
+#define IXGBE_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
+
+static inline uint32_t ixgbe_read_addr(volatile void* addr)
+{
+ return IXGBE_PCI_REG(addr);
+}
+
+#define IXGBE_PCI_REG_WRITE(reg, value) do { \
+ IXGBE_PCI_REG((reg)) = (value); \
+} while(0)
+
+#define IXGBE_PCI_REG_ADDR(hw, reg) \
+ ((volatile uint32_t *)((char *)(hw)->hw_addr + (reg)))
+
+#define IXGBE_PCI_REG_ARRAY_ADDR(hw, reg, index) \
+ IXGBE_PCI_REG_ADDR((hw), (reg) + ((index) << 2))
+
+/* Not implemented !! */
+#define IXGBE_READ_PCIE_WORD(hw, reg) 0
+#define IXGBE_WRITE_PCIE_WORD(hw, reg, value) do { } while(0)
+
+#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
+
+#define IXGBE_READ_REG(hw, reg) \
+ ixgbe_read_addr(IXGBE_PCI_REG_ADDR((hw), (reg)))
+
+#define IXGBE_WRITE_REG(hw, reg, value) \
+ IXGBE_PCI_REG_WRITE(IXGBE_PCI_REG_ADDR((hw), (reg)), (value))
+
+#define IXGBE_READ_REG_ARRAY(hw, reg, index) \
+ IXGBE_PCI_REG(IXGBE_PCI_REG_ARRAY_ADDR((hw), (reg), (index)))
+
+#define IXGBE_WRITE_REG_ARRAY(hw, reg, index, value) \
+ IXGBE_PCI_REG_WRITE(IXGBE_PCI_REG_ARRAY_ADDR((hw), (reg), (index)), (value))
+
+#endif /* _IXGBE_OS_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_phy.c b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_phy.c
new file mode 100755
index 00000000..af8b6d3e
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_phy.c
@@ -0,0 +1,2425 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+#ident "$Id: ixgbe_phy.c,v 1.155 2013/08/14 22:34:03 jtkirshe Exp $"
+
+STATIC void ixgbe_i2c_start(struct ixgbe_hw *hw);
+STATIC void ixgbe_i2c_stop(struct ixgbe_hw *hw);
+STATIC s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data);
+STATIC s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
+STATIC s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
+STATIC s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
+STATIC s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
+STATIC void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
+STATIC void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
+STATIC s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
+STATIC bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl);
+STATIC s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data);
+
+/**
+ * ixgbe_out_i2c_byte_ack - Send I2C byte with ack
+ * @hw: pointer to the hardware structure
+ * @byte: byte to send
+ *
+ * Returns an error code on error.
+ */
+STATIC s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
+{
+ s32 status;
+
+ status = ixgbe_clock_out_i2c_byte(hw, byte);
+ if (status)
+ return status;
+ return ixgbe_get_i2c_ack(hw);
+}
+
+/**
+ * ixgbe_in_i2c_byte_ack - Receive an I2C byte and send ack
+ * @hw: pointer to the hardware structure
+ * @byte: pointer to a u8 to receive the byte
+ *
+ * Returns an error code on error.
+ */
+STATIC s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte)
+{
+ s32 status;
+
+ status = ixgbe_clock_in_i2c_byte(hw, byte);
+ if (status)
+ return status;
+ /* ACK */
+ return ixgbe_clock_out_i2c_bit(hw, false);
+}
+
+/**
+ * ixgbe_ones_comp_byte_add - Perform one's complement addition
+ * @add1 - addend 1
+ * @add2 - addend 2
+ *
+ * Returns one's complement 8-bit sum.
+ */
+STATIC u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2)
+{
+ u16 sum = add1 + add2;
+
+ sum = (sum & 0xFF) + (sum >> 8);
+ return sum & 0xFF;
+}
+
+/**
+ * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to read from
+ * @reg: I2C device register to read from
+ * @val: pointer to location to receive read value
+ *
+ * Returns an error code on error.
+ */
+STATIC s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
+ u16 reg, u16 *val)
+{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ int max_retry = 10;
+ int retry = 0;
+ u8 csum_byte;
+ u8 high_bits;
+ u8 low_bits;
+ u8 reg_high;
+ u8 csum;
+
+ reg_high = ((reg >> 7) & 0xFE) | 1; /* Indicate read combined */
+ csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
+ csum = ~csum;
+ do {
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+ return IXGBE_ERR_SWFW_SYNC;
+ ixgbe_i2c_start(hw);
+ /* Device Address and write indication */
+ if (ixgbe_out_i2c_byte_ack(hw, addr))
+ goto fail;
+ /* Write bits 14:8 */
+ if (ixgbe_out_i2c_byte_ack(hw, reg_high))
+ goto fail;
+ /* Write bits 7:0 */
+ if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
+ goto fail;
+ /* Write csum */
+ if (ixgbe_out_i2c_byte_ack(hw, csum))
+ goto fail;
+ /* Re-start condition */
+ ixgbe_i2c_start(hw);
+ /* Device Address and read indication */
+ if (ixgbe_out_i2c_byte_ack(hw, addr | 1))
+ goto fail;
+ /* Get upper bits */
+ if (ixgbe_in_i2c_byte_ack(hw, &high_bits))
+ goto fail;
+ /* Get low bits */
+ if (ixgbe_in_i2c_byte_ack(hw, &low_bits))
+ goto fail;
+ /* Get csum */
+ if (ixgbe_clock_in_i2c_byte(hw, &csum_byte))
+ goto fail;
+ /* NACK */
+ if (ixgbe_clock_out_i2c_bit(hw, false))
+ goto fail;
+ ixgbe_i2c_stop(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ *val = (high_bits << 8) | low_bits;
+ return 0;
+
+fail:
+ ixgbe_i2c_bus_clear(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ retry++;
+ if (retry < max_retry)
+ DEBUGOUT("I2C byte read combined error - Retrying.\n");
+ else
+ DEBUGOUT("I2C byte read combined error.\n");
+ } while (retry < max_retry);
+
+ return IXGBE_ERR_I2C;
+}
+
+/**
+ * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to write to
+ * @reg: I2C device register to write to
+ * @val: value to write
+ *
+ * Returns an error code on error.
+ */
+STATIC s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
+ u8 addr, u16 reg, u16 val)
+{
+ int max_retry = 1;
+ int retry = 0;
+ u8 reg_high;
+ u8 csum;
+
+ reg_high = (reg >> 7) & 0xFE; /* Indicate write combined */
+ csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
+ csum = ixgbe_ones_comp_byte_add(csum, val >> 8);
+ csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF);
+ csum = ~csum;
+ do {
+ ixgbe_i2c_start(hw);
+ /* Device Address and write indication */
+ if (ixgbe_out_i2c_byte_ack(hw, addr))
+ goto fail;
+ /* Write bits 14:8 */
+ if (ixgbe_out_i2c_byte_ack(hw, reg_high))
+ goto fail;
+ /* Write bits 7:0 */
+ if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
+ goto fail;
+ /* Write data 15:8 */
+ if (ixgbe_out_i2c_byte_ack(hw, val >> 8))
+ goto fail;
+ /* Write data 7:0 */
+ if (ixgbe_out_i2c_byte_ack(hw, val & 0xFF))
+ goto fail;
+ /* Write csum */
+ if (ixgbe_out_i2c_byte_ack(hw, csum))
+ goto fail;
+ ixgbe_i2c_stop(hw);
+ return 0;
+
+fail:
+ ixgbe_i2c_bus_clear(hw);
+ retry++;
+ if (retry < max_retry)
+ DEBUGOUT("I2C byte write combined error - Retrying.\n");
+ else
+ DEBUGOUT("I2C byte write combined error.\n");
+ } while (retry < max_retry);
+
+ return IXGBE_ERR_I2C;
+}
+
+/**
+ * ixgbe_init_phy_ops_generic - Inits PHY function ptrs
+ * @hw: pointer to the hardware structure
+ *
+ * Initialize the function pointers.
+ **/
+s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_phy_info *phy = &hw->phy;
+
+ DEBUGFUNC("ixgbe_init_phy_ops_generic");
+
+ /* PHY */
+ phy->ops.identify = &ixgbe_identify_phy_generic;
+ phy->ops.reset = &ixgbe_reset_phy_generic;
+ phy->ops.read_reg = &ixgbe_read_phy_reg_generic;
+ phy->ops.write_reg = &ixgbe_write_phy_reg_generic;
+ phy->ops.read_reg_mdi = &ixgbe_read_phy_reg_mdi;
+ phy->ops.write_reg_mdi = &ixgbe_write_phy_reg_mdi;
+ phy->ops.setup_link = &ixgbe_setup_phy_link_generic;
+ phy->ops.setup_link_speed = &ixgbe_setup_phy_link_speed_generic;
+ phy->ops.check_link = NULL;
+ phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_generic;
+ phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_generic;
+ phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_generic;
+ phy->ops.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic;
+ phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic;
+ phy->ops.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic;
+ phy->ops.i2c_bus_clear = &ixgbe_i2c_bus_clear;
+ phy->ops.identify_sfp = &ixgbe_identify_module_generic;
+ phy->sfp_type = ixgbe_sfp_type_unknown;
+ phy->ops.read_i2c_combined = &ixgbe_read_i2c_combined_generic;
+ phy->ops.write_i2c_combined = &ixgbe_write_i2c_combined_generic;
+ phy->ops.check_overtemp = &ixgbe_tn_check_overtemp;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_identify_phy_generic - Get physical layer module
+ * @hw: pointer to hardware structure
+ *
+ * Determines the physical layer module found on the current adapter.
+ **/
+s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+ u32 phy_addr;
+ u16 ext_ability = 0;
+
+ DEBUGFUNC("ixgbe_identify_phy_generic");
+
+ if (!hw->phy.phy_semaphore_mask) {
+ hw->phy.lan_id = IXGBE_READ_REG(hw, IXGBE_STATUS) &
+ IXGBE_STATUS_LAN_ID_1;
+ if (hw->phy.lan_id)
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
+ }
+
+ if (hw->phy.type == ixgbe_phy_unknown) {
+ for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
+ if (ixgbe_validate_phy_addr(hw, phy_addr)) {
+ hw->phy.addr = phy_addr;
+ ixgbe_get_phy_id(hw);
+ hw->phy.type =
+ ixgbe_get_phy_type_from_id(hw->phy.id);
+
+ if (hw->phy.type == ixgbe_phy_unknown) {
+ hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_PHY_EXT_ABILITY,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &ext_ability);
+ if (ext_ability &
+ (IXGBE_MDIO_PHY_10GBASET_ABILITY |
+ IXGBE_MDIO_PHY_1000BASET_ABILITY))
+ hw->phy.type =
+ ixgbe_phy_cu_unknown;
+ else
+ hw->phy.type =
+ ixgbe_phy_generic;
+ }
+
+ status = IXGBE_SUCCESS;
+ break;
+ }
+ }
+
+ /* Certain media types do not have a phy so an address will not
+ * be found and the code will take this path. Caller has to
+ * decide if it is an error or not.
+ */
+ if (status != IXGBE_SUCCESS) {
+ hw->phy.addr = 0;
+ }
+ } else {
+ status = IXGBE_SUCCESS;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_check_reset_blocked - check status of MNG FW veto bit
+ * @hw: pointer to the hardware structure
+ *
+ * This function checks the MMNGC.MNG_VETO bit to see if there are
+ * any constraints on link from manageability. For MAC's that don't
+ * have this bit just return faluse since the link can not be blocked
+ * via this method.
+ **/
+s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
+{
+ u32 mmngc;
+
+ DEBUGFUNC("ixgbe_check_reset_blocked");
+
+ /* If we don't have this bit, it can't be blocking */
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return false;
+
+ mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC);
+ if (mmngc & IXGBE_MMNGC_MNG_VETO) {
+ ERROR_REPORT1(IXGBE_ERROR_SOFTWARE,
+ "MNG_VETO bit detected.\n");
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ixgbe_validate_phy_addr - Determines phy address is valid
+ * @hw: pointer to hardware structure
+ *
+ **/
+bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr)
+{
+ u16 phy_id = 0;
+ bool valid = false;
+
+ DEBUGFUNC("ixgbe_validate_phy_addr");
+
+ hw->phy.addr = phy_addr;
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id);
+
+ if (phy_id != 0xFFFF && phy_id != 0x0)
+ valid = true;
+
+ return valid;
+}
+
+/**
+ * ixgbe_get_phy_id - Get the phy type
+ * @hw: pointer to hardware structure
+ *
+ **/
+s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
+{
+ u32 status;
+ u16 phy_id_high = 0;
+ u16 phy_id_low = 0;
+
+ DEBUGFUNC("ixgbe_get_phy_id");
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &phy_id_high);
+
+ if (status == IXGBE_SUCCESS) {
+ hw->phy.id = (u32)(phy_id_high << 16);
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &phy_id_low);
+ hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
+ hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
+ }
+ return status;
+}
+
+/**
+ * ixgbe_get_phy_type_from_id - Get the phy type
+ * @hw: pointer to hardware structure
+ *
+ **/
+enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
+{
+ enum ixgbe_phy_type phy_type;
+
+ DEBUGFUNC("ixgbe_get_phy_type_from_id");
+
+ switch (phy_id) {
+ case TN1010_PHY_ID:
+ phy_type = ixgbe_phy_tn;
+ break;
+ case X550_PHY_ID:
+ case X540_PHY_ID:
+ phy_type = ixgbe_phy_aq;
+ break;
+ case QT2022_PHY_ID:
+ phy_type = ixgbe_phy_qt;
+ break;
+ case ATH_PHY_ID:
+ phy_type = ixgbe_phy_nl;
+ break;
+ default:
+ phy_type = ixgbe_phy_unknown;
+ break;
+ }
+
+ DEBUGOUT1("phy type found is %d\n", phy_type);
+ return phy_type;
+}
+
+/**
+ * ixgbe_reset_phy_generic - Performs a PHY reset
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
+{
+ u32 i;
+ u16 ctrl = 0;
+ s32 status = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_reset_phy_generic");
+
+ if (hw->phy.type == ixgbe_phy_unknown)
+ status = ixgbe_identify_phy_generic(hw);
+
+ if (status != IXGBE_SUCCESS || hw->phy.type == ixgbe_phy_none)
+ goto out;
+
+ /* Don't reset PHY if it's shut down due to overtemp. */
+ if (!hw->phy.reset_if_overtemp &&
+ (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
+ goto out;
+
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ goto out;
+
+ /*
+ * Perform soft PHY reset to the PHY_XS.
+ * This will cause a soft reset to the PHY
+ */
+ hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+ IXGBE_MDIO_PHY_XS_DEV_TYPE,
+ IXGBE_MDIO_PHY_XS_RESET);
+
+ /*
+ * Poll for reset bit to self-clear indicating reset is complete.
+ * Some PHYs could take up to 3 seconds to complete and need about
+ * 1.7 usec delay after the reset is complete.
+ */
+ for (i = 0; i < 30; i++) {
+ msec_delay(100);
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+ IXGBE_MDIO_PHY_XS_DEV_TYPE, &ctrl);
+ if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) {
+ usec_delay(2);
+ break;
+ }
+ }
+
+ if (ctrl & IXGBE_MDIO_PHY_XS_RESET) {
+ status = IXGBE_ERR_RESET_FAILED;
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "PHY reset polling failed to complete.\n");
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_read_phy_mdi - Reads a value from a specified PHY register without
+ * the SWFW lock
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @phy_data: Pointer to read data from PHY register
+ **/
+s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 *phy_data)
+{
+ u32 i, data, command;
+
+ /* Setup and write the address cycle command */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /*
+ * Check every 10 usec to see if the address cycle completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usec_delay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY address command did not complete.\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ /*
+ * Address cycle complete, setup and write the read
+ * command
+ */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /*
+ * Check every 10 usec to see if the address cycle
+ * completed. The MDI Command bit will clear when the
+ * operation is complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usec_delay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY read command didn't complete\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ /*
+ * Read operation is complete. Get the data
+ * from MSRWD
+ */
+ data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
+ data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
+ *phy_data = (u16)(data);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register
+ * using the SWFW lock - this function is needed in most cases
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @phy_data: Pointer to read data from PHY register
+ **/
+s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data)
+{
+ s32 status;
+ u32 gssr = hw->phy.phy_semaphore_mask;
+
+ DEBUGFUNC("ixgbe_read_phy_reg_generic");
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == IXGBE_SUCCESS) {
+ status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
+ phy_data);
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ } else {
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_write_phy_reg_mdi - Writes a value to specified PHY register
+ * without SWFW lock
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 5 bit device type
+ * @phy_data: Data to write to the PHY register
+ **/
+s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data)
+{
+ u32 i, command;
+
+ /* Put the data in the MDI single read and write data register*/
+ IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
+
+ /* Setup and write the address cycle command */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /*
+ * Check every 10 usec to see if the address cycle completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usec_delay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY address cmd didn't complete\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ /*
+ * Address cycle complete, setup and write the write
+ * command
+ */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /*
+ * Check every 10 usec to see if the address cycle
+ * completed. The MDI Command bit will clear when the
+ * operation is complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usec_delay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY write cmd didn't complete\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register
+ * using SWFW lock- this function is needed in most cases
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 5 bit device type
+ * @phy_data: Data to write to the PHY register
+ **/
+s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data)
+{
+ s32 status;
+ u32 gssr = hw->phy.phy_semaphore_mask;
+
+ DEBUGFUNC("ixgbe_write_phy_reg_generic");
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == IXGBE_SUCCESS) {
+ status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
+ phy_data);
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ } else {
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_phy_link_generic - Set and restart auto-neg
+ * @hw: pointer to hardware structure
+ *
+ * Restart auto-negotiation and PHY and waits for completion.
+ **/
+s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+ u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
+ bool autoneg = false;
+ ixgbe_link_speed speed;
+
+ DEBUGFUNC("ixgbe_setup_phy_link_generic");
+
+ ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+ /* Set or unset auto-negotiation 10G advertisement */
+ hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+ autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE;
+
+ hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
+ }
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+ /* Set or unset auto-negotiation 1G advertisement */
+ hw->phy.ops.read_reg(hw,
+ IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+ autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
+
+ hw->phy.ops.write_reg(hw,
+ IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
+ }
+
+ if (speed & IXGBE_LINK_SPEED_100_FULL) {
+ /* Set or unset auto-negotiation 100M advertisement */
+ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ autoneg_reg &= ~(IXGBE_MII_100BASE_T_ADVERTISE |
+ IXGBE_MII_100BASE_T_ADVERTISE_HALF);
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
+ autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE;
+
+ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
+ }
+
+ /* Blocked by MNG FW so don't reset PHY */
+ if (ixgbe_check_reset_blocked(hw))
+ return status;
+
+ /* Restart PHY auto-negotiation. */
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
+
+ autoneg_reg |= IXGBE_MII_RESTART;
+
+ hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ **/
+s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
+
+ DEBUGFUNC("ixgbe_setup_phy_link_speed_generic");
+
+ /*
+ * Clear autoneg_advertised and set new values based on input link
+ * speed.
+ */
+ hw->phy.autoneg_advertised = 0;
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_100_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
+
+ /* Setup link based on the new speed settings */
+ hw->phy.ops.setup_link(hw);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: boolean auto-negotiation value
+ *
+ * Determines the link capabilities by reading the AUTOC register.
+ **/
+s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ s32 status;
+ u16 speed_ability;
+
+ DEBUGFUNC("ixgbe_get_copper_link_capabilities_generic");
+
+ *speed = 0;
+ *autoneg = true;
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &speed_ability);
+
+ if (status == IXGBE_SUCCESS) {
+ if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G)
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G)
+ *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ if (speed_ability & IXGBE_MDIO_PHY_SPEED_100M)
+ *speed |= IXGBE_LINK_SPEED_100_FULL;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_check_phy_link_tnx - Determine link and speed status
+ * @hw: pointer to hardware structure
+ *
+ * Reads the VS1 register to determine if link is up and the current speed for
+ * the PHY.
+ **/
+s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up)
+{
+ s32 status = IXGBE_SUCCESS;
+ u32 time_out;
+ u32 max_time_out = 10;
+ u16 phy_link = 0;
+ u16 phy_speed = 0;
+ u16 phy_data = 0;
+
+ DEBUGFUNC("ixgbe_check_phy_link_tnx");
+
+ /* Initialize speed and link to default case */
+ *link_up = false;
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+
+ /*
+ * Check current speed and link status of the PHY register.
+ * This is a vendor specific register and may have to
+ * be changed for other copper PHYs.
+ */
+ for (time_out = 0; time_out < max_time_out; time_out++) {
+ usec_delay(10);
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &phy_data);
+ phy_link = phy_data & IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
+ phy_speed = phy_data &
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
+ if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
+ *link_up = true;
+ if (phy_speed ==
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_phy_link_tnx - Set and restart auto-neg
+ * @hw: pointer to hardware structure
+ *
+ * Restart auto-negotiation and PHY and waits for completion.
+ **/
+s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+ u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
+ bool autoneg = false;
+ ixgbe_link_speed speed;
+
+ DEBUGFUNC("ixgbe_setup_phy_link_tnx");
+
+ ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+ /* Set or unset auto-negotiation 10G advertisement */
+ hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+ autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE;
+
+ hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
+ }
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+ /* Set or unset auto-negotiation 1G advertisement */
+ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+ autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
+
+ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
+ }
+
+ if (speed & IXGBE_LINK_SPEED_100_FULL) {
+ /* Set or unset auto-negotiation 100M advertisement */
+ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ autoneg_reg &= ~IXGBE_MII_100BASE_T_ADVERTISE;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
+ autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE;
+
+ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
+ }
+
+ /* Blocked by MNG FW so don't reset PHY */
+ if (ixgbe_check_reset_blocked(hw))
+ return status;
+
+ /* Restart PHY auto-negotiation. */
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
+
+ autoneg_reg |= IXGBE_MII_RESTART;
+
+ hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
+ * @hw: pointer to hardware structure
+ * @firmware_version: pointer to the PHY Firmware Version
+ **/
+s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
+ u16 *firmware_version)
+{
+ s32 status;
+
+ DEBUGFUNC("ixgbe_get_phy_firmware_version_tnx");
+
+ status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ firmware_version);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
+ * @hw: pointer to hardware structure
+ * @firmware_version: pointer to the PHY Firmware Version
+ **/
+s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
+ u16 *firmware_version)
+{
+ s32 status;
+
+ DEBUGFUNC("ixgbe_get_phy_firmware_version_generic");
+
+ status = hw->phy.ops.read_reg(hw, AQ_FW_REV,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ firmware_version);
+
+ return status;
+}
+
+/**
+ * ixgbe_reset_phy_nl - Performs a PHY reset
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
+{
+ u16 phy_offset, control, eword, edata, block_crc;
+ bool end_data = false;
+ u16 list_offset, data_offset;
+ u16 phy_data = 0;
+ s32 ret_val = IXGBE_SUCCESS;
+ u32 i;
+
+ DEBUGFUNC("ixgbe_reset_phy_nl");
+
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ goto out;
+
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+ IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
+
+ /* reset the PHY and poll for completion */
+ hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+ IXGBE_MDIO_PHY_XS_DEV_TYPE,
+ (phy_data | IXGBE_MDIO_PHY_XS_RESET));
+
+ for (i = 0; i < 100; i++) {
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+ IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
+ if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) == 0)
+ break;
+ msec_delay(10);
+ }
+
+ if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) != 0) {
+ DEBUGOUT("PHY reset did not complete.\n");
+ ret_val = IXGBE_ERR_PHY;
+ goto out;
+ }
+
+ /* Get init offsets */
+ ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
+ &data_offset);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+
+ ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc);
+ data_offset++;
+ while (!end_data) {
+ /*
+ * Read control word from PHY init contents offset
+ */
+ ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
+ if (ret_val)
+ goto err_eeprom;
+ control = (eword & IXGBE_CONTROL_MASK_NL) >>
+ IXGBE_CONTROL_SHIFT_NL;
+ edata = eword & IXGBE_DATA_MASK_NL;
+ switch (control) {
+ case IXGBE_DELAY_NL:
+ data_offset++;
+ DEBUGOUT1("DELAY: %d MS\n", edata);
+ msec_delay(edata);
+ break;
+ case IXGBE_DATA_NL:
+ DEBUGOUT("DATA:\n");
+ data_offset++;
+ ret_val = hw->eeprom.ops.read(hw, data_offset,
+ &phy_offset);
+ if (ret_val)
+ goto err_eeprom;
+ data_offset++;
+ for (i = 0; i < edata; i++) {
+ ret_val = hw->eeprom.ops.read(hw, data_offset,
+ &eword);
+ if (ret_val)
+ goto err_eeprom;
+ hw->phy.ops.write_reg(hw, phy_offset,
+ IXGBE_TWINAX_DEV, eword);
+ DEBUGOUT2("Wrote %4.4x to %4.4x\n", eword,
+ phy_offset);
+ data_offset++;
+ phy_offset++;
+ }
+ break;
+ case IXGBE_CONTROL_NL:
+ data_offset++;
+ DEBUGOUT("CONTROL:\n");
+ if (edata == IXGBE_CONTROL_EOL_NL) {
+ DEBUGOUT("EOL\n");
+ end_data = true;
+ } else if (edata == IXGBE_CONTROL_SOL_NL) {
+ DEBUGOUT("SOL\n");
+ } else {
+ DEBUGOUT("Bad control value\n");
+ ret_val = IXGBE_ERR_PHY;
+ goto out;
+ }
+ break;
+ default:
+ DEBUGOUT("Bad control type\n");
+ ret_val = IXGBE_ERR_PHY;
+ goto out;
+ }
+ }
+
+out:
+ return ret_val;
+
+err_eeprom:
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "eeprom read at offset %d failed", data_offset);
+ return IXGBE_ERR_PHY;
+}
+
+/**
+ * ixgbe_identify_module_generic - Identifies module type
+ * @hw: pointer to hardware structure
+ *
+ * Determines HW type and calls appropriate function.
+ **/
+s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_SFP_NOT_PRESENT;
+
+ DEBUGFUNC("ixgbe_identify_module_generic");
+
+ switch (hw->mac.ops.get_media_type(hw)) {
+ case ixgbe_media_type_fiber:
+ status = ixgbe_identify_sfp_module_generic(hw);
+ break;
+
+ case ixgbe_media_type_fiber_qsfp:
+ status = ixgbe_identify_qsfp_module_generic(hw);
+ break;
+
+ default:
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ status = IXGBE_ERR_SFP_NOT_PRESENT;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_identify_sfp_module_generic - Identifies SFP modules
+ * @hw: pointer to hardware structure
+ *
+ * Searches for and identifies the SFP module and assigns appropriate PHY type.
+ **/
+s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+ u32 vendor_oui = 0;
+ enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
+ u8 identifier = 0;
+ u8 comp_codes_1g = 0;
+ u8 comp_codes_10g = 0;
+ u8 oui_bytes[3] = {0, 0, 0};
+ u8 cable_tech = 0;
+ u8 cable_spec = 0;
+ u16 enforce_sfp = 0;
+
+ DEBUGFUNC("ixgbe_identify_sfp_module_generic");
+
+ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ status = IXGBE_ERR_SFP_NOT_PRESENT;
+ goto out;
+ }
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_IDENTIFIER,
+ &identifier);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+
+ /* LAN ID is needed for sfp_type determination */
+ hw->mac.ops.set_lan_id(hw);
+
+ if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ } else {
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_1GBE_COMP_CODES,
+ &comp_codes_1g);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_10GBE_COMP_CODES,
+ &comp_codes_10g);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_CABLE_TECHNOLOGY,
+ &cable_tech);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+
+ /* ID Module
+ * =========
+ * 0 SFP_DA_CU
+ * 1 SFP_SR
+ * 2 SFP_LR
+ * 3 SFP_DA_CORE0 - 82599-specific
+ * 4 SFP_DA_CORE1 - 82599-specific
+ * 5 SFP_SR/LR_CORE0 - 82599-specific
+ * 6 SFP_SR/LR_CORE1 - 82599-specific
+ * 7 SFP_act_lmt_DA_CORE0 - 82599-specific
+ * 8 SFP_act_lmt_DA_CORE1 - 82599-specific
+ * 9 SFP_1g_cu_CORE0 - 82599-specific
+ * 10 SFP_1g_cu_CORE1 - 82599-specific
+ * 11 SFP_1g_sx_CORE0 - 82599-specific
+ * 12 SFP_1g_sx_CORE1 - 82599-specific
+ */
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+ hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
+ else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
+ hw->phy.sfp_type = ixgbe_sfp_type_sr;
+ else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
+ hw->phy.sfp_type = ixgbe_sfp_type_lr;
+ else
+ hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+ } else {
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_cu_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_cu_core1;
+ } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
+ hw->phy.ops.read_i2c_eeprom(
+ hw, IXGBE_SFF_CABLE_SPEC_COMP,
+ &cable_spec);
+ if (cable_spec &
+ IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_act_lmt_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_act_lmt_core1;
+ } else {
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_unknown;
+ }
+ } else if (comp_codes_10g &
+ (IXGBE_SFF_10GBASESR_CAPABLE |
+ IXGBE_SFF_10GBASELR_CAPABLE)) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_srlr_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_srlr_core1;
+ } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_cu_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_cu_core1;
+ } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_sx_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_sx_core1;
+ } else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_lx_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_lx_core1;
+ } else {
+ hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+ }
+ }
+
+ if (hw->phy.sfp_type != stored_sfp_type)
+ hw->phy.sfp_setup_needed = true;
+
+ /* Determine if the SFP+ PHY is dual speed or not. */
+ hw->phy.multispeed_fiber = false;
+ if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
+ (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
+ ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
+ (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
+ hw->phy.multispeed_fiber = true;
+
+ /* Determine PHY vendor */
+ if (hw->phy.type != ixgbe_phy_nl) {
+ hw->phy.id = identifier;
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_VENDOR_OUI_BYTE0,
+ &oui_bytes[0]);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_VENDOR_OUI_BYTE1,
+ &oui_bytes[1]);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_VENDOR_OUI_BYTE2,
+ &oui_bytes[2]);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+
+ vendor_oui =
+ ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
+ (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
+ (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
+
+ switch (vendor_oui) {
+ case IXGBE_SFF_VENDOR_OUI_TYCO:
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+ hw->phy.type =
+ ixgbe_phy_sfp_passive_tyco;
+ break;
+ case IXGBE_SFF_VENDOR_OUI_FTL:
+ if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
+ hw->phy.type = ixgbe_phy_sfp_ftl_active;
+ else
+ hw->phy.type = ixgbe_phy_sfp_ftl;
+ break;
+ case IXGBE_SFF_VENDOR_OUI_AVAGO:
+ hw->phy.type = ixgbe_phy_sfp_avago;
+ break;
+ case IXGBE_SFF_VENDOR_OUI_INTEL:
+ hw->phy.type = ixgbe_phy_sfp_intel;
+ break;
+ default:
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+ hw->phy.type =
+ ixgbe_phy_sfp_passive_unknown;
+ else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
+ hw->phy.type =
+ ixgbe_phy_sfp_active_unknown;
+ else
+ hw->phy.type = ixgbe_phy_sfp_unknown;
+ break;
+ }
+ }
+
+ /* Allow any DA cable vendor */
+ if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
+ IXGBE_SFF_DA_ACTIVE_CABLE)) {
+ status = IXGBE_SUCCESS;
+ goto out;
+ }
+
+ /* Verify supported 1G SFP modules */
+ if (comp_codes_10g == 0 &&
+ !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ goto out;
+ }
+
+ /* Anything else 82598-based is supported */
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ status = IXGBE_SUCCESS;
+ goto out;
+ }
+
+ ixgbe_get_device_caps(hw, &enforce_sfp);
+ if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
+ !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
+ /* Make sure we're a supported PHY type */
+ if (hw->phy.type == ixgbe_phy_sfp_intel) {
+ status = IXGBE_SUCCESS;
+ } else {
+ if (hw->allow_unsupported_sfp == true) {
+ printf("SFP+ is not Intel \n");
+ EWARN(hw, "WARNING: Intel (R) Network "
+ "Connections are quality tested "
+ "using Intel (R) Ethernet Optics."
+ " Using untested modules is not "
+ "supported and may cause unstable"
+ " operation or damage to the "
+ "module or the adapter. Intel "
+ "Corporation is not responsible "
+ "for any harm caused by using "
+ "untested modules.\n", status);
+ status = IXGBE_SUCCESS;
+ } else {
+ printf("SFP+ module not supported\n");
+ DEBUGOUT("SFP+ module not supported\n");
+ hw->phy.type =
+ ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ }
+ }
+ } else {
+ status = IXGBE_SUCCESS;
+ }
+ }
+
+out:
+ return status;
+
+err_read_i2c_eeprom:
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ if (hw->phy.type != ixgbe_phy_nl) {
+ hw->phy.id = 0;
+ hw->phy.type = ixgbe_phy_unknown;
+ }
+ return IXGBE_ERR_SFP_NOT_PRESENT;
+}
+
+/**
+ * ixgbe_get_supported_phy_sfp_layer_generic - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current SFP.
+ */
+s32 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw)
+{
+ u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u8 comp_codes_10g = 0;
+ u8 comp_codes_1g = 0;
+
+ DEBUGFUNC("ixgbe_get_supported_phy_sfp_layer_generic");
+
+ hw->phy.ops.identify_sfp(hw);
+ if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
+ return physical_layer;
+
+ switch (hw->phy.type) {
+ case ixgbe_phy_sfp_passive_tyco:
+ case ixgbe_phy_sfp_passive_unknown:
+ case ixgbe_phy_qsfp_passive_unknown:
+ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
+ break;
+ case ixgbe_phy_sfp_ftl_active:
+ case ixgbe_phy_sfp_active_unknown:
+ case ixgbe_phy_qsfp_active_unknown:
+ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
+ break;
+ case ixgbe_phy_sfp_avago:
+ case ixgbe_phy_sfp_ftl:
+ case ixgbe_phy_sfp_intel:
+ case ixgbe_phy_sfp_unknown:
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
+ if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+ else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX;
+ break;
+ case ixgbe_phy_qsfp_intel:
+ case ixgbe_phy_qsfp_unknown:
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g);
+ if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+ else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ break;
+ default:
+ break;
+ }
+
+ return physical_layer;
+}
+
+/**
+ * ixgbe_identify_qsfp_module_generic - Identifies QSFP modules
+ * @hw: pointer to hardware structure
+ *
+ * Searches for and identifies the QSFP module and assigns appropriate PHY type
+ **/
+s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+ u32 vendor_oui = 0;
+ enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
+ u8 identifier = 0;
+ u8 comp_codes_1g = 0;
+ u8 comp_codes_10g = 0;
+ u8 oui_bytes[3] = {0, 0, 0};
+ u16 enforce_sfp = 0;
+ u8 connector = 0;
+ u8 cable_length = 0;
+ u8 device_tech = 0;
+ bool active_cable = false;
+
+ DEBUGFUNC("ixgbe_identify_qsfp_module_generic");
+
+ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) {
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ status = IXGBE_ERR_SFP_NOT_PRESENT;
+ goto out;
+ }
+
+ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
+ &identifier);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+
+ if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) {
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ goto out;
+ }
+
+ hw->phy.id = identifier;
+
+ /* LAN ID is needed for sfp_type determination */
+ hw->mac.ops.set_lan_id(hw);
+
+ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP,
+ &comp_codes_10g);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_1GBE_COMP,
+ &comp_codes_1g);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+
+ if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) {
+ hw->phy.type = ixgbe_phy_qsfp_passive_unknown;
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0;
+ else
+ hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1;
+ } else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
+ IXGBE_SFF_10GBASELR_CAPABLE)) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0;
+ else
+ hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1;
+ } else {
+ if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE)
+ active_cable = true;
+
+ if (!active_cable) {
+ /* check for active DA cables that pre-date
+ * SFF-8436 v3.6 */
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_CONNECTOR,
+ &connector);
+
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_CABLE_LENGTH,
+ &cable_length);
+
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_DEVICE_TECH,
+ &device_tech);
+
+ if ((connector ==
+ IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE) &&
+ (cable_length > 0) &&
+ ((device_tech >> 4) ==
+ IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL))
+ active_cable = true;
+ }
+
+ if (active_cable) {
+ hw->phy.type = ixgbe_phy_qsfp_active_unknown;
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_act_lmt_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_act_lmt_core1;
+ } else {
+ /* unsupported module type */
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ goto out;
+ }
+ }
+
+ if (hw->phy.sfp_type != stored_sfp_type)
+ hw->phy.sfp_setup_needed = true;
+
+ /* Determine if the QSFP+ PHY is dual speed or not. */
+ hw->phy.multispeed_fiber = false;
+ if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
+ (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
+ ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
+ (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
+ hw->phy.multispeed_fiber = true;
+
+ /* Determine PHY vendor for optical modules */
+ if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
+ IXGBE_SFF_10GBASELR_CAPABLE)) {
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0,
+ &oui_bytes[0]);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1,
+ &oui_bytes[1]);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2,
+ &oui_bytes[2]);
+
+ if (status != IXGBE_SUCCESS)
+ goto err_read_i2c_eeprom;
+
+ vendor_oui =
+ ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
+ (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
+ (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
+
+ if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL)
+ hw->phy.type = ixgbe_phy_qsfp_intel;
+ else
+ hw->phy.type = ixgbe_phy_qsfp_unknown;
+
+ ixgbe_get_device_caps(hw, &enforce_sfp);
+ if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
+ /* Make sure we're a supported PHY type */
+ if (hw->phy.type == ixgbe_phy_qsfp_intel) {
+ status = IXGBE_SUCCESS;
+ } else {
+ if (hw->allow_unsupported_sfp == true) {
+ EWARN(hw, "WARNING: Intel (R) Network "
+ "Connections are quality tested "
+ "using Intel (R) Ethernet Optics."
+ " Using untested modules is not "
+ "supported and may cause unstable"
+ " operation or damage to the "
+ "module or the adapter. Intel "
+ "Corporation is not responsible "
+ "for any harm caused by using "
+ "untested modules.\n", status);
+ status = IXGBE_SUCCESS;
+ } else {
+ DEBUGOUT("QSFP module not supported\n");
+ hw->phy.type =
+ ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ }
+ }
+ } else {
+ status = IXGBE_SUCCESS;
+ }
+ }
+
+out:
+ return status;
+
+err_read_i2c_eeprom:
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ hw->phy.id = 0;
+ hw->phy.type = ixgbe_phy_unknown;
+
+ return IXGBE_ERR_SFP_NOT_PRESENT;
+}
+
+
+/**
+ * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
+ * @hw: pointer to hardware structure
+ * @list_offset: offset to the SFP ID list
+ * @data_offset: offset to the SFP data block
+ *
+ * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if
+ * so it returns the offsets to the phy init sequence block.
+ **/
+s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+ u16 *list_offset,
+ u16 *data_offset)
+{
+ u16 sfp_id;
+ u16 sfp_type = hw->phy.sfp_type;
+
+ DEBUGFUNC("ixgbe_get_sfp_init_sequence_offsets");
+
+ if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
+
+ if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
+ return IXGBE_ERR_SFP_NOT_PRESENT;
+
+ if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) &&
+ (hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
+
+ /*
+ * Limiting active cables and 1G Phys must be initialized as
+ * SR modules
+ */
+ if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
+ sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
+ sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+ sfp_type == ixgbe_sfp_type_1g_sx_core0)
+ sfp_type = ixgbe_sfp_type_srlr_core0;
+ else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
+ sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
+ sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+ sfp_type == ixgbe_sfp_type_1g_sx_core1)
+ sfp_type = ixgbe_sfp_type_srlr_core1;
+
+ /* Read offset to PHY init contents */
+ if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) {
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "eeprom read at offset %d failed",
+ IXGBE_PHY_INIT_OFFSET_NL);
+ return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
+ }
+
+ if ((!*list_offset) || (*list_offset == 0xFFFF))
+ return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
+
+ /* Shift offset to first ID word */
+ (*list_offset)++;
+
+ /*
+ * Find the matching SFP ID in the EEPROM
+ * and program the init sequence
+ */
+ if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
+ goto err_phy;
+
+ while (sfp_id != IXGBE_PHY_INIT_END_NL) {
+ if (sfp_id == sfp_type) {
+ (*list_offset)++;
+ if (hw->eeprom.ops.read(hw, *list_offset, data_offset))
+ goto err_phy;
+ if ((!*data_offset) || (*data_offset == 0xFFFF)) {
+ DEBUGOUT("SFP+ module not supported\n");
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ } else {
+ break;
+ }
+ } else {
+ (*list_offset) += 2;
+ if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
+ goto err_phy;
+ }
+ }
+
+ if (sfp_id == IXGBE_PHY_INIT_END_NL) {
+ DEBUGOUT("No matching SFP+ module found\n");
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ }
+
+ return IXGBE_SUCCESS;
+
+err_phy:
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "eeprom read at offset %d failed", *list_offset);
+ return IXGBE_ERR_PHY;
+}
+
+/**
+ * ixgbe_read_i2c_eeprom_generic - Reads 8 bit EEPROM word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to read
+ * @eeprom_data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data)
+{
+ DEBUGFUNC("ixgbe_read_i2c_eeprom_generic");
+
+ return hw->phy.ops.read_i2c_byte(hw, byte_offset,
+ IXGBE_I2C_EEPROM_DEV_ADDR,
+ eeprom_data);
+}
+
+/**
+ * ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset at address 0xA2
+ * @eeprom_data: value read
+ *
+ * Performs byte read operation to SFP module's SFF-8472 data over I2C
+ **/
+STATIC s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data)
+{
+ return hw->phy.ops.read_i2c_byte(hw, byte_offset,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ sff8472_data);
+}
+
+/**
+ * ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to write
+ * @eeprom_data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 eeprom_data)
+{
+ DEBUGFUNC("ixgbe_write_i2c_eeprom_generic");
+
+ return hw->phy.ops.write_i2c_byte(hw, byte_offset,
+ IXGBE_I2C_EEPROM_DEV_ADDR,
+ eeprom_data);
+}
+
+/**
+ * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ s32 status;
+ u32 max_retry = 10;
+ u32 retry = 0;
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ bool nack = 1;
+ *data = 0;
+
+ DEBUGFUNC("ixgbe_read_i2c_byte_generic");
+
+ do {
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ ixgbe_i2c_start(hw);
+
+ /* Device Address and write indication */
+ status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
+ if (status != IXGBE_SUCCESS)
+ goto fail;
+
+ status = ixgbe_get_i2c_ack(hw);
+ if (status != IXGBE_SUCCESS)
+ goto fail;
+
+ status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
+ if (status != IXGBE_SUCCESS)
+ goto fail;
+
+ status = ixgbe_get_i2c_ack(hw);
+ if (status != IXGBE_SUCCESS)
+ goto fail;
+
+ ixgbe_i2c_start(hw);
+
+ /* Device Address and read indication */
+ status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1));
+ if (status != IXGBE_SUCCESS)
+ goto fail;
+
+ status = ixgbe_get_i2c_ack(hw);
+ if (status != IXGBE_SUCCESS)
+ goto fail;
+
+ status = ixgbe_clock_in_i2c_byte(hw, data);
+ if (status != IXGBE_SUCCESS)
+ goto fail;
+
+ status = ixgbe_clock_out_i2c_bit(hw, nack);
+ if (status != IXGBE_SUCCESS)
+ goto fail;
+
+ ixgbe_i2c_stop(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ return IXGBE_SUCCESS;
+
+fail:
+ ixgbe_i2c_bus_clear(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ msec_delay(100);
+ retry++;
+ if (retry < max_retry)
+ DEBUGOUT("I2C byte read error - Retrying.\n");
+ else
+ DEBUGOUT("I2C byte read error.\n");
+
+ } while (retry < max_retry);
+
+ return status;
+}
+
+/**
+ * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ s32 status = IXGBE_SUCCESS;
+ u32 max_retry = 1;
+ u32 retry = 0;
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+
+ DEBUGFUNC("ixgbe_write_i2c_byte_generic");
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != IXGBE_SUCCESS) {
+ status = IXGBE_ERR_SWFW_SYNC;
+ goto write_byte_out;
+ }
+
+ do {
+ ixgbe_i2c_start(hw);
+
+ status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
+ if (status != IXGBE_SUCCESS)
+ goto fail;
+
+ status = ixgbe_get_i2c_ack(hw);
+ if (status != IXGBE_SUCCESS)
+ goto fail;
+
+ status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
+ if (status != IXGBE_SUCCESS)
+ goto fail;
+
+ status = ixgbe_get_i2c_ack(hw);
+ if (status != IXGBE_SUCCESS)
+ goto fail;
+
+ status = ixgbe_clock_out_i2c_byte(hw, data);
+ if (status != IXGBE_SUCCESS)
+ goto fail;
+
+ status = ixgbe_get_i2c_ack(hw);
+ if (status != IXGBE_SUCCESS)
+ goto fail;
+
+ ixgbe_i2c_stop(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ return IXGBE_SUCCESS;
+
+fail:
+ ixgbe_i2c_bus_clear(hw);
+ retry++;
+ if (retry < max_retry)
+ DEBUGOUT("I2C byte write error - Retrying.\n");
+ else
+ DEBUGOUT("I2C byte write error.\n");
+ } while (retry < max_retry);
+
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+write_byte_out:
+ return status;
+}
+
+/**
+ * ixgbe_i2c_start - Sets I2C start condition
+ * @hw: pointer to hardware structure
+ *
+ * Sets I2C start condition (High -> Low on SDA while SCL is High)
+ **/
+STATIC void ixgbe_i2c_start(struct ixgbe_hw *hw)
+{
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+
+ DEBUGFUNC("ixgbe_i2c_start");
+
+ /* Start condition must begin with data and clock high */
+ ixgbe_set_i2c_data(hw, &i2cctl, 1);
+ ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+ /* Setup time for start condition (4.7us) */
+ usec_delay(IXGBE_I2C_T_SU_STA);
+
+ ixgbe_set_i2c_data(hw, &i2cctl, 0);
+
+ /* Hold time for start condition (4us) */
+ usec_delay(IXGBE_I2C_T_HD_STA);
+
+ ixgbe_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us */
+ usec_delay(IXGBE_I2C_T_LOW);
+
+}
+
+/**
+ * ixgbe_i2c_stop - Sets I2C stop condition
+ * @hw: pointer to hardware structure
+ *
+ * Sets I2C stop condition (Low -> High on SDA while SCL is High)
+ **/
+STATIC void ixgbe_i2c_stop(struct ixgbe_hw *hw)
+{
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+
+ DEBUGFUNC("ixgbe_i2c_stop");
+
+ /* Stop condition must begin with data low and clock high */
+ ixgbe_set_i2c_data(hw, &i2cctl, 0);
+ ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+ /* Setup time for stop condition (4us) */
+ usec_delay(IXGBE_I2C_T_SU_STO);
+
+ ixgbe_set_i2c_data(hw, &i2cctl, 1);
+
+ /* bus free time between stop and start (4.7us)*/
+ usec_delay(IXGBE_I2C_T_BUF);
+}
+
+/**
+ * ixgbe_clock_in_i2c_byte - Clocks in one byte via I2C
+ * @hw: pointer to hardware structure
+ * @data: data byte to clock in
+ *
+ * Clocks in one byte data via I2C data/clock
+ **/
+STATIC s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
+{
+ s32 i;
+ bool bit = 0;
+
+ DEBUGFUNC("ixgbe_clock_in_i2c_byte");
+
+ for (i = 7; i >= 0; i--) {
+ ixgbe_clock_in_i2c_bit(hw, &bit);
+ *data |= bit << i;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_clock_out_i2c_byte - Clocks out one byte via I2C
+ * @hw: pointer to hardware structure
+ * @data: data byte clocked out
+ *
+ * Clocks out one byte data via I2C data/clock
+ **/
+STATIC s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
+{
+ s32 status = IXGBE_SUCCESS;
+ s32 i;
+ u32 i2cctl;
+ bool bit;
+
+ DEBUGFUNC("ixgbe_clock_out_i2c_byte");
+
+ for (i = 7; i >= 0; i--) {
+ bit = (data >> i) & 0x1;
+ status = ixgbe_clock_out_i2c_bit(hw, bit);
+
+ if (status != IXGBE_SUCCESS)
+ break;
+ }
+
+ /* Release SDA line (set high) */
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_i2c_ack - Polls for I2C ACK
+ * @hw: pointer to hardware structure
+ *
+ * Clocks in/out one bit via I2C data/clock
+ **/
+STATIC s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+ u32 i = 0;
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ u32 timeout = 10;
+ bool ack = 1;
+
+ DEBUGFUNC("ixgbe_get_i2c_ack");
+
+ ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+
+ /* Minimum high period of clock is 4us */
+ usec_delay(IXGBE_I2C_T_HIGH);
+
+ /* Poll for ACK. Note that ACK in I2C spec is
+ * transition from 1 to 0 */
+ for (i = 0; i < timeout; i++) {
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ ack = ixgbe_get_i2c_data(hw, &i2cctl);
+
+ usec_delay(1);
+ if (!ack)
+ break;
+ }
+
+ if (ack) {
+ DEBUGOUT("I2C ack was not received.\n");
+ status = IXGBE_ERR_I2C;
+ }
+
+ ixgbe_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us */
+ usec_delay(IXGBE_I2C_T_LOW);
+
+ return status;
+}
+
+/**
+ * ixgbe_clock_in_i2c_bit - Clocks in one bit via I2C data/clock
+ * @hw: pointer to hardware structure
+ * @data: read data value
+ *
+ * Clocks in one bit via I2C data/clock
+ **/
+STATIC s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
+{
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+
+ DEBUGFUNC("ixgbe_clock_in_i2c_bit");
+
+ ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+ /* Minimum high period of clock is 4us */
+ usec_delay(IXGBE_I2C_T_HIGH);
+
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ *data = ixgbe_get_i2c_data(hw, &i2cctl);
+
+ ixgbe_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us */
+ usec_delay(IXGBE_I2C_T_LOW);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock
+ * @hw: pointer to hardware structure
+ * @data: data value to write
+ *
+ * Clocks out one bit via I2C data/clock
+ **/
+STATIC s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
+{
+ s32 status;
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+
+ DEBUGFUNC("ixgbe_clock_out_i2c_bit");
+
+ status = ixgbe_set_i2c_data(hw, &i2cctl, data);
+ if (status == IXGBE_SUCCESS) {
+ ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+ /* Minimum high period of clock is 4us */
+ usec_delay(IXGBE_I2C_T_HIGH);
+
+ ixgbe_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us.
+ * This also takes care of the data hold time.
+ */
+ usec_delay(IXGBE_I2C_T_LOW);
+ } else {
+ status = IXGBE_ERR_I2C;
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "I2C data was not set to %X\n", data);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_raise_i2c_clk - Raises the I2C SCL clock
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Raises the I2C clock line '0'->'1'
+ **/
+STATIC void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
+{
+ u32 i = 0;
+ u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT;
+ u32 i2cctl_r = 0;
+
+ DEBUGFUNC("ixgbe_raise_i2c_clk");
+
+ for (i = 0; i < timeout; i++) {
+ *i2cctl |= IXGBE_I2C_CLK_OUT_BY_MAC(hw);
+
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+ /* SCL rise time (1000ns) */
+ usec_delay(IXGBE_I2C_T_RISE);
+
+ i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ if (i2cctl_r & IXGBE_I2C_CLK_IN_BY_MAC(hw))
+ break;
+ }
+}
+
+/**
+ * ixgbe_lower_i2c_clk - Lowers the I2C SCL clock
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Lowers the I2C clock line '1'->'0'
+ **/
+STATIC void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
+{
+
+ DEBUGFUNC("ixgbe_lower_i2c_clk");
+
+ *i2cctl &= ~(IXGBE_I2C_CLK_OUT_BY_MAC(hw));
+
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* SCL fall time (300ns) */
+ usec_delay(IXGBE_I2C_T_FALL);
+}
+
+/**
+ * ixgbe_set_i2c_data - Sets the I2C data bit
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ * @data: I2C data value (0 or 1) to set
+ *
+ * Sets the I2C data bit
+ **/
+STATIC s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_set_i2c_data");
+
+ if (data)
+ *i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
+ else
+ *i2cctl &= ~(IXGBE_I2C_DATA_OUT_BY_MAC(hw));
+
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
+ usec_delay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
+
+ /* Verify data was set correctly */
+ *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ if (data != ixgbe_get_i2c_data(hw, i2cctl)) {
+ status = IXGBE_ERR_I2C;
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "Error - I2C data was not set to %X.\n",
+ data);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_get_i2c_data - Reads the I2C SDA data bit
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Returns the I2C data bit value
+ **/
+STATIC bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
+{
+ bool data;
+ UNREFERENCED_1PARAMETER(hw);
+
+ DEBUGFUNC("ixgbe_get_i2c_data");
+
+ if (*i2cctl & IXGBE_I2C_DATA_IN_BY_MAC(hw))
+ data = 1;
+ else
+ data = 0;
+
+ return data;
+}
+
+/**
+ * ixgbe_i2c_bus_clear - Clears the I2C bus
+ * @hw: pointer to hardware structure
+ *
+ * Clears the I2C bus by sending nine clock pulses.
+ * Used when data line is stuck low.
+ **/
+void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
+{
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ u32 i;
+
+ DEBUGFUNC("ixgbe_i2c_bus_clear");
+
+ ixgbe_i2c_start(hw);
+
+ ixgbe_set_i2c_data(hw, &i2cctl, 1);
+
+ for (i = 0; i < 9; i++) {
+ ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+ /* Min high period of clock is 4us */
+ usec_delay(IXGBE_I2C_T_HIGH);
+
+ ixgbe_lower_i2c_clk(hw, &i2cctl);
+
+ /* Min low period of clock is 4.7us*/
+ usec_delay(IXGBE_I2C_T_LOW);
+ }
+
+ ixgbe_i2c_start(hw);
+
+ /* Put the i2c bus back to default state */
+ ixgbe_i2c_stop(hw);
+}
+
+/**
+ * ixgbe_tn_check_overtemp - Checks if an overtemp occurred.
+ * @hw: pointer to hardware structure
+ *
+ * Checks if the LASI temp alarm status was triggered due to overtemp
+ **/
+s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+ u16 phy_data = 0;
+
+ DEBUGFUNC("ixgbe_tn_check_overtemp");
+
+ if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
+ goto out;
+
+ /* Check that the LASI temp alarm status was triggered */
+ hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_data);
+
+ if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
+ goto out;
+
+ status = IXGBE_ERR_OVERTEMP;
+ ERROR_REPORT1(IXGBE_ERROR_CAUTION, "Device over temperature");
+out:
+ return status;
+}
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_phy.h b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_phy.h
new file mode 100755
index 00000000..e262cc41
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_phy.h
@@ -0,0 +1,176 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_PHY_H_
+#define _IXGBE_PHY_H_
+
+#include "ixgbe_type.h"
+#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0
+#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2
+#define IXGBE_I2C_EEPROM_BANK_LEN 0xFF
+
+/* EEPROM byte offsets */
+#define IXGBE_SFF_IDENTIFIER 0x0
+#define IXGBE_SFF_IDENTIFIER_SFP 0x3
+#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25
+#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26
+#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27
+#define IXGBE_SFF_1GBE_COMP_CODES 0x6
+#define IXGBE_SFF_10GBE_COMP_CODES 0x3
+#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
+#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
+#define IXGBE_SFF_SFF_8472_SWAP 0x5C
+#define IXGBE_SFF_SFF_8472_COMP 0x5E
+#define IXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD
+#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0 0xA5
+#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1 0xA6
+#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2 0xA7
+#define IXGBE_SFF_QSFP_CONNECTOR 0x82
+#define IXGBE_SFF_QSFP_10GBE_COMP 0x83
+#define IXGBE_SFF_QSFP_1GBE_COMP 0x86
+#define IXGBE_SFF_QSFP_CABLE_LENGTH 0x92
+#define IXGBE_SFF_QSFP_DEVICE_TECH 0x93
+
+/* Bitmasks */
+#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
+#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
+#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
+#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
+#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
+#define IXGBE_SFF_1GBASET_CAPABLE 0x8
+#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
+#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
+#define IXGBE_SFF_ADDRESSING_MODE 0x4
+#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
+#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
+#define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23
+#define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0
+#define IXGBE_I2C_EEPROM_READ_MASK 0x100
+#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
+#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
+#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
+#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
+#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
+
+#define IXGBE_CS4227 0x9E /* CS4227 address */
+#define IXGBE_CS4227_SPARE24_LSB 0x12B0 /* Reg to program EDC */
+#define IXGBE_CS4227_EDC_MODE_CX1 0x0002
+#define IXGBE_CS4227_EDC_MODE_SR 0x0004
+
+/* Flow control defines */
+#define IXGBE_TAF_SYM_PAUSE 0x400
+#define IXGBE_TAF_ASM_PAUSE 0x800
+
+/* Bit-shift macros */
+#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24
+#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16
+#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8
+
+/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
+#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600
+#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500
+#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00
+#define IXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100
+
+/* I2C SDA and SCL timing parameters for standard mode */
+#define IXGBE_I2C_T_HD_STA 4
+#define IXGBE_I2C_T_LOW 5
+#define IXGBE_I2C_T_HIGH 4
+#define IXGBE_I2C_T_SU_STA 5
+#define IXGBE_I2C_T_HD_DATA 5
+#define IXGBE_I2C_T_SU_DATA 1
+#define IXGBE_I2C_T_RISE 1
+#define IXGBE_I2C_T_FALL 1
+#define IXGBE_I2C_T_SU_STO 4
+#define IXGBE_I2C_T_BUF 5
+
+#define IXGBE_TN_LASI_STATUS_REG 0x9005
+#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
+
+/* SFP+ SFF-8472 Compliance */
+#define IXGBE_SFF_SFF_8472_UNSUP 0x00
+
+#ident "$Id: ixgbe_phy.h,v 1.56 2013/09/05 23:59:49 jtkirshe Exp $"
+
+s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
+bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
+enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
+s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
+s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
+s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
+s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 *phy_data);
+s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 phy_data);
+s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data);
+s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data);
+s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
+s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg);
+s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw);
+
+/* PHY specific */
+s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up);
+s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
+s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
+ u16 *firmware_version);
+s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
+ u16 *firmware_version);
+
+s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
+s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
+s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
+s32 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw);
+s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
+s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+ u16 *list_offset,
+ u16 *data_offset);
+s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
+s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data);
+s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data);
+s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 eeprom_data);
+void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
+#endif /* _IXGBE_PHY_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_type.h b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_type.h
new file mode 100755
index 00000000..c67d462f
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_type.h
@@ -0,0 +1,3765 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_TYPE_H_
+#define _IXGBE_TYPE_H_
+
+/*
+ * The following is a brief description of the error categories used by the
+ * ERROR_REPORT* macros.
+ *
+ * - IXGBE_ERROR_INVALID_STATE
+ * This category is for errors which represent a serious failure state that is
+ * unexpected, and could be potentially harmful to device operation. It should
+ * not be used for errors relating to issues that can be worked around or
+ * ignored.
+ *
+ * - IXGBE_ERROR_POLLING
+ * This category is for errors related to polling/timeout issues and should be
+ * used in any case where the timeout occured, or a failure to obtain a lock, or
+ * failure to receive data within the time limit.
+ *
+ * - IXGBE_ERROR_CAUTION
+ * This category should be used for reporting issues that may be the cause of
+ * other errors, such as temperature warnings. It should indicate an event which
+ * could be serious, but hasn't necessarily caused problems yet.
+ *
+ * - IXGBE_ERROR_SOFTWARE
+ * This category is intended for errors due to software state preventing
+ * something. The category is not intended for errors due to bad arguments, or
+ * due to unsupported features. It should be used when a state occurs which
+ * prevents action but is not a serious issue.
+ *
+ * - IXGBE_ERROR_ARGUMENT
+ * This category is for when a bad or invalid argument is passed. It should be
+ * used whenever a function is called and error checking has detected the
+ * argument is wrong or incorrect.
+ *
+ * - IXGBE_ERROR_UNSUPPORTED
+ * This category is for errors which are due to unsupported circumstances or
+ * configuration issues. It should not be used when the issue is due to an
+ * invalid argument, but for when something has occurred that is unsupported
+ * (Ex: Flow control autonegotiation or an unsupported SFP+ module.)
+ */
+
+#include "ixgbe_osdep.h"
+
+#ident "$Id: ixgbe_type.h,v 1.630 2013/11/22 22:48:40 jtkirshe Exp $"
+
+/* Vendor ID */
+#define IXGBE_INTEL_VENDOR_ID 0x8086
+
+/* Device IDs */
+#define IXGBE_DEV_ID_82598 0x10B6
+#define IXGBE_DEV_ID_82598_BX 0x1508
+#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6
+#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
+#define IXGBE_DEV_ID_82598AT 0x10C8
+#define IXGBE_DEV_ID_82598AT2 0x150B
+#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB
+#define IXGBE_DEV_ID_82598EB_CX4 0x10DD
+#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC
+#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1
+#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1
+#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
+#define IXGBE_DEV_ID_82599_KX4 0x10F7
+#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
+#define IXGBE_DEV_ID_82599_KR 0x1517
+#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
+#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
+#define IXGBE_DEV_ID_82599_CX4 0x10F9
+#define IXGBE_DEV_ID_82599_SFP 0x10FB
+#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
+#define IXGBE_SUBDEV_ID_82599_SFP_WOL0 0x1071
+#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
+#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
+#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470
+#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B
+#define IXGBE_SUBDEV_ID_82599_LOM_SFP 0x8976
+#define IXGBE_SUBDEV_ID_82599_LOM_SNAP6 0x2159
+#define IXGBE_SUBDEV_ID_82599_SFP_1OCP 0x000D
+#define IXGBE_SUBDEV_ID_82599_SFP_2OCP 0x0008
+#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152A
+#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
+#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
+#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
+#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A
+#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558
+#define IXGBE_DEV_ID_82599EN_SFP 0x1557
+#define IXGBE_SUBDEV_ID_82599EN_SFP_OCP1 0x0001
+#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
+#define IXGBE_DEV_ID_82599_T3_LOM 0x151C
+#define IXGBE_DEV_ID_82599_VF 0x10ED
+#define IXGBE_DEV_ID_82599_VF_HV 0x152E
+#define IXGBE_DEV_ID_82599_LS 0x154F
+#define IXGBE_DEV_ID_X540T 0x1528
+#define IXGBE_DEV_ID_X540_VF 0x1515
+#define IXGBE_DEV_ID_X540_VF_HV 0x1530
+#define IXGBE_DEV_ID_X540T1 0x1560
+#define IXGBE_DEV_ID_X550EM_X 0x15A7
+#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC
+#define IXGBE_DEV_ID_X550T 0x1563
+#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA
+#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB
+#define IXGBE_DEV_ID_X550_VF_HV 0x1564
+#define IXGBE_DEV_ID_X550_VF 0x1565
+#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
+#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
+
+/* General Registers */
+#define IXGBE_CTRL 0x00000
+#define IXGBE_STATUS 0x00008
+#define IXGBE_CTRL_EXT 0x00018
+#define IXGBE_ESDP 0x00020
+#define IXGBE_EODSDP 0x00028
+#define IXGBE_I2CCTL_82599 0x00028
+#define IXGBE_I2CCTL_X550 0x15F5C
+#define IXGBE_I2CCTL_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \
+ IXGBE_I2CCTL_X550 : IXGBE_I2CCTL_82599))
+#define IXGBE_PHY_GPIO 0x00028
+#define IXGBE_MAC_GPIO 0x00030
+#define IXGBE_PHYINT_STATUS0 0x00100
+#define IXGBE_PHYINT_STATUS1 0x00104
+#define IXGBE_PHYINT_STATUS2 0x00108
+#define IXGBE_LEDCTL 0x00200
+#define IXGBE_FRTIMER 0x00048
+#define IXGBE_TCPTIMER 0x0004C
+#define IXGBE_CORESPARE 0x00600
+#define IXGBE_EXVET 0x05078
+
+/* NVM Registers */
+#define IXGBE_EEC 0x10010
+#define IXGBE_EERD 0x10014
+#define IXGBE_EEWR 0x10018
+#define IXGBE_FLA 0x1001C
+#define IXGBE_EEMNGCTL 0x10110
+#define IXGBE_EEMNGDATA 0x10114
+#define IXGBE_FLMNGCTL 0x10118
+#define IXGBE_FLMNGDATA 0x1011C
+#define IXGBE_FLMNGCNT 0x10120
+#define IXGBE_FLOP 0x1013C
+#define IXGBE_GRC 0x10200
+#define IXGBE_SRAMREL 0x10210
+#define IXGBE_PHYDBG 0x10218
+
+/* General Receive Control */
+#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */
+#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */
+
+#define IXGBE_VPDDIAG0 0x10204
+#define IXGBE_VPDDIAG1 0x10208
+
+/* I2CCTL Bit Masks */
+#define IXGBE_I2C_CLK_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
+ 0x00004000 : 0x00000001)
+#define IXGBE_I2C_CLK_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
+ 0x00000200 : 0x00000002)
+#define IXGBE_I2C_DATA_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
+ 0x00001000 : 0x00000004)
+#define IXGBE_I2C_DATA_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
+ 0x00000400 : 0x00000008)
+#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500
+
+#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
+#define IXGBE_EMC_INTERNAL_DATA 0x00
+#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20
+#define IXGBE_EMC_DIODE1_DATA 0x01
+#define IXGBE_EMC_DIODE1_THERM_LIMIT 0x19
+#define IXGBE_EMC_DIODE2_DATA 0x23
+#define IXGBE_EMC_DIODE2_THERM_LIMIT 0x1A
+
+#define IXGBE_MAX_SENSORS 3
+
+struct ixgbe_thermal_diode_data {
+ u8 location;
+ u8 temp;
+ u8 caution_thresh;
+ u8 max_op_thresh;
+};
+
+struct ixgbe_thermal_sensor_data {
+ struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS];
+};
+
+/* Interrupt Registers */
+#define IXGBE_EICR 0x00800
+#define IXGBE_EICS 0x00808
+#define IXGBE_EIMS 0x00880
+#define IXGBE_EIMC 0x00888
+#define IXGBE_EIAC 0x00810
+#define IXGBE_EIAM 0x00890
+#define IXGBE_EICS_EX(_i) (0x00A90 + (_i) * 4)
+#define IXGBE_EIMS_EX(_i) (0x00AA0 + (_i) * 4)
+#define IXGBE_EIMC_EX(_i) (0x00AB0 + (_i) * 4)
+#define IXGBE_EIAM_EX(_i) (0x00AD0 + (_i) * 4)
+/* 82599 EITR is only 12 bits, with the lower 3 always zero */
+/*
+ * 82598 EITR is 16 bits but set the limits based on the max
+ * supported by all ixgbe hardware
+ */
+#define IXGBE_MAX_INT_RATE 488281
+#define IXGBE_MIN_INT_RATE 956
+#define IXGBE_MAX_EITR 0x00000FF8
+#define IXGBE_MIN_EITR 8
+#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \
+ (0x012300 + (((_i) - 24) * 4)))
+#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8
+#define IXGBE_EITR_LLI_MOD 0x00008000
+#define IXGBE_EITR_CNT_WDIS 0x80000000
+#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */
+#define IXGBE_IVAR_MISC 0x00A00 /* misc MSI-X interrupt causes */
+#define IXGBE_EITRSEL 0x00894
+#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */
+#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */
+#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4)))
+#define IXGBE_GPIE 0x00898
+
+/* Flow Control Registers */
+#define IXGBE_FCADBUL 0x03210
+#define IXGBE_FCADBUH 0x03214
+#define IXGBE_FCAMACL 0x04328
+#define IXGBE_FCAMACH 0x0432C
+#define IXGBE_FCRTH_82599(_i) (0x03260 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_FCRTL_82599(_i) (0x03220 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_PFCTOP 0x03008
+#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */
+#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */
+#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */
+#define IXGBE_FCRTV 0x032A0
+#define IXGBE_FCCFG 0x03D00
+#define IXGBE_TFCS 0x0CE00
+
+/* Receive DMA Registers */
+#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \
+ (0x0D000 + (((_i) - 64) * 0x40)))
+#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \
+ (0x0D004 + (((_i) - 64) * 0x40)))
+#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \
+ (0x0D008 + (((_i) - 64) * 0x40)))
+#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \
+ (0x0D010 + (((_i) - 64) * 0x40)))
+#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \
+ (0x0D018 + (((_i) - 64) * 0x40)))
+#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
+ (0x0D028 + (((_i) - 64) * 0x40)))
+#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
+ (0x0D02C + (((_i) - 64) * 0x40)))
+#define IXGBE_RSCDBU 0x03028
+#define IXGBE_RDDCC 0x02F20
+#define IXGBE_RXMEMWRAP 0x03190
+#define IXGBE_STARCTRL 0x03024
+/*
+ * Split and Replication Receive Control Registers
+ * 00-15 : 0x02100 + n*4
+ * 16-64 : 0x01014 + n*0x40
+ * 64-127: 0x0D014 + (n-64)*0x40
+ */
+#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
+ (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
+ (0x0D014 + (((_i) - 64) * 0x40))))
+/*
+ * Rx DCA Control Register:
+ * 00-15 : 0x02200 + n*4
+ * 16-64 : 0x0100C + n*0x40
+ * 64-127: 0x0D00C + (n-64)*0x40
+ */
+#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
+ (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
+ (0x0D00C + (((_i) - 64) * 0x40))))
+#define IXGBE_RDRXCTL 0x02F00
+/* 8 of these 0x03C00 - 0x03C1C */
+#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4))
+#define IXGBE_RXCTRL 0x03000
+#define IXGBE_DROPEN 0x03D04
+#define IXGBE_RXPBSIZE_SHIFT 10
+#define IXGBE_RXPBSIZE_MASK 0x000FFC00
+
+/* Receive Registers */
+#define IXGBE_RXCSUM 0x05000
+#define IXGBE_RFCTL 0x05008
+#define IXGBE_DRECCCTL 0x02F08
+#define IXGBE_DRECCCTL_DISABLE 0
+#define IXGBE_DRECCCTL2 0x02F8C
+
+/* Multicast Table Array - 128 entries */
+#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4))
+#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+ (0x0A200 + ((_i) * 8)))
+#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+ (0x0A204 + ((_i) * 8)))
+#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8))
+#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8))
+/* Packet split receive type */
+#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \
+ (0x0EA00 + ((_i) * 4)))
+/* array of 4096 1-bit vlan filters */
+#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4))
+/*array of 4096 4-bit vlan vmdq indices */
+#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4))
+#define IXGBE_FCTRL 0x05080
+#define IXGBE_VLNCTRL 0x05088
+#define IXGBE_MCSTCTRL 0x05090
+#define IXGBE_MRQC 0x05818
+#define IXGBE_SAQF(_i) (0x0E000 + ((_i) * 4)) /* Source Address Queue Filter */
+#define IXGBE_DAQF(_i) (0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */
+#define IXGBE_SDPQF(_i) (0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter */
+#define IXGBE_FTQF(_i) (0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */
+#define IXGBE_ETQF(_i) (0x05128 + ((_i) * 4)) /* EType Queue Filter */
+#define IXGBE_ETQS(_i) (0x0EC00 + ((_i) * 4)) /* EType Queue Select */
+#define IXGBE_SYNQF 0x0EC30 /* SYN Packet Queue Filter */
+#define IXGBE_RQTC 0x0EC70
+#define IXGBE_MTQC 0x08120
+#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */
+#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */
+#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */
+#define IXGBE_PFFLPL 0x050B0
+#define IXGBE_PFFLPH 0x050B4
+#define IXGBE_VT_CTL 0x051B0
+#define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */
+/* 64 Mailboxes, 16 DW each */
+#define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i)))
+#define IXGBE_PFMBICR(_i) (0x00710 + (4 * (_i))) /* 4 total */
+#define IXGBE_PFMBIMR(_i) (0x00720 + (4 * (_i))) /* 4 total */
+#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4))
+#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4))
+#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4))
+#define IXGBE_QDE 0x2F04
+#define IXGBE_VMTXSW(_i) (0x05180 + ((_i) * 4)) /* 2 total */
+#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */
+#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4))
+#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4))
+#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4))
+#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4))
+#define IXGBE_LVMMC_RX 0x2FA8
+#define IXGBE_LVMMC_TX 0x8108
+#define IXGBE_LMVM_RX 0x2FA4
+#define IXGBE_LMVM_TX 0x8124
+#define IXGBE_WQBR_RX(_i) (0x2FB0 + ((_i) * 4)) /* 4 total */
+#define IXGBE_WQBR_TX(_i) (0x8130 + ((_i) * 4)) /* 4 total */
+#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/
+#define IXGBE_RXFECCERR0 0x051B8
+#define IXGBE_LLITHRESH 0x0EC90
+#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_IMIRVP 0x05AC0
+#define IXGBE_VMD_CTL 0x0581C
+#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */
+#define IXGBE_ERETA(_i) (0x0EE80 + ((_i) * 4)) /* 96 of these (0-95) */
+#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */
+
+/* Registers for setting up RSS on X550 with SRIOV
+ * _p - pool number (0..63)
+ * _i - index (0..10 for PFVFRSSRK, 0..15 for PFVFRETA)
+ */
+#define IXGBE_PFVFMRQC(_p) (0x03400 + ((_p) * 4))
+#define IXGBE_PFVFRSSRK(_i, _p) (0x018000 + ((_i) * 4) + ((_p) * 0x40))
+#define IXGBE_PFVFRETA(_i, _p) (0x019000 + ((_i) * 4) + ((_p) * 0x40))
+
+/* Flow Director registers */
+#define IXGBE_FDIRCTRL 0x0EE00
+#define IXGBE_FDIRHKEY 0x0EE68
+#define IXGBE_FDIRSKEY 0x0EE6C
+#define IXGBE_FDIRDIP4M 0x0EE3C
+#define IXGBE_FDIRSIP4M 0x0EE40
+#define IXGBE_FDIRTCPM 0x0EE44
+#define IXGBE_FDIRUDPM 0x0EE48
+#define IXGBE_FDIRSCTPM 0x0EE78
+#define IXGBE_FDIRIP6M 0x0EE74
+#define IXGBE_FDIRM 0x0EE70
+
+/* Flow Director Stats registers */
+#define IXGBE_FDIRFREE 0x0EE38
+#define IXGBE_FDIRLEN 0x0EE4C
+#define IXGBE_FDIRUSTAT 0x0EE50
+#define IXGBE_FDIRFSTAT 0x0EE54
+#define IXGBE_FDIRMATCH 0x0EE58
+#define IXGBE_FDIRMISS 0x0EE5C
+
+/* Flow Director Programming registers */
+#define IXGBE_FDIRSIPv6(_i) (0x0EE0C + ((_i) * 4)) /* 3 of these (0-2) */
+#define IXGBE_FDIRIPSA 0x0EE18
+#define IXGBE_FDIRIPDA 0x0EE1C
+#define IXGBE_FDIRPORT 0x0EE20
+#define IXGBE_FDIRVLAN 0x0EE24
+#define IXGBE_FDIRHASH 0x0EE28
+#define IXGBE_FDIRCMD 0x0EE2C
+
+/* Transmit DMA registers */
+#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of them (0-31)*/
+#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40))
+#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40))
+#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40))
+#define IXGBE_TDT(_i) (0x06018 + ((_i) * 0x40))
+#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40))
+#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40))
+#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40))
+#define IXGBE_DTXCTL 0x07E00
+
+#define IXGBE_DMATXCTL 0x04A80
+#define IXGBE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */
+#define IXGBE_PFDTXGSWC 0x08220
+#define IXGBE_DTXMXSZRQ 0x08100
+#define IXGBE_DTXTCPFLGL 0x04A88
+#define IXGBE_DTXTCPFLGH 0x04A8C
+#define IXGBE_LBDRPEN 0x0CA00
+#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */
+
+#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */
+#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */
+#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */
+#define IXGBE_DMATXCTL_MDP_EN 0x20 /* Bit 5 */
+#define IXGBE_DMATXCTL_MBINTEN 0x40 /* Bit 6 */
+#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */
+
+#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
+
+/* Anti-spoofing defines */
+#define IXGBE_SPOOF_MACAS_MASK 0xFF
+#define IXGBE_SPOOF_VLANAS_MASK 0xFF00
+#define IXGBE_SPOOF_VLANAS_SHIFT 8
+#define IXGBE_SPOOF_ETHERTYPEAS 0xFF000000
+#define IXGBE_SPOOF_ETHERTYPEAS_SHIFT 16
+#define IXGBE_PFVFSPOOF_REG_COUNT 8
+/* 16 of these (0-15) */
+#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4))
+/* Tx DCA Control register : 128 of these (0-127) */
+#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40))
+#define IXGBE_TIPG 0x0CB00
+#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_MNGTXMAP 0x0CD10
+#define IXGBE_TIPG_FIBER_DEFAULT 3
+#define IXGBE_TXPBSIZE_SHIFT 10
+
+/* Wake up registers */
+#define IXGBE_WUC 0x05800
+#define IXGBE_WUFC 0x05808
+#define IXGBE_WUS 0x05810
+#define IXGBE_IPAV 0x05838
+#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */
+#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */
+
+#define IXGBE_WUPL 0x05900
+#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
+#define IXGBE_PROXYS 0x05F60 /* Proxying Status Register */
+#define IXGBE_PROXYFC 0x05F64 /* Proxying Filter Control Register */
+#define IXGBE_VXLANCTRL 0x0000507C /* Rx filter VXLAN UDPPORT Register */
+
+#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */
+/* Ext Flexible Host Filter Table */
+#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100))
+#define IXGBE_FHFT_EXT_X550(_n) (0x09600 + ((_n) * 0x100))
+
+/* Four Flexible Filters are supported */
+#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4
+
+/* Six Flexible Filters are supported */
+#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_6 6
+/* Eight Flexible Filters are supported */
+#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_8 8
+#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2
+
+/* Each Flexible Filter is at most 128 (0x80) bytes in length */
+#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX 128
+#define IXGBE_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */
+#define IXGBE_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */
+#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */
+
+/* Wake Up Filter Control */
+#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define IXGBE_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define IXGBE_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define IXGBE_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define IXGBE_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define IXGBE_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
+#define IXGBE_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define IXGBE_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
+#define IXGBE_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */
+
+#define IXGBE_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */
+#define IXGBE_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+#define IXGBE_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
+#define IXGBE_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
+#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
+#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */
+#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */
+#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */
+#define IXGBE_WUFC_FLX_FILTERS_6 0x003F0000 /* Mask for 6 flex filters */
+#define IXGBE_WUFC_FLX_FILTERS_8 0x00FF0000 /* Mask for 8 flex filters */
+#define IXGBE_WUFC_FW_RST_WK 0x80000000 /* Ena wake on FW reset assertion */
+/* Mask for Ext. flex filters */
+#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000
+#define IXGBE_WUFC_ALL_FILTERS 0x000F00FF /* Mask all 4 flex filters */
+#define IXGBE_WUFC_ALL_FILTERS_6 0x003F00FF /* Mask all 6 flex filters */
+#define IXGBE_WUFC_ALL_FILTERS_8 0x00FF00FF /* Mask all 8 flex filters */
+#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
+
+/* Wake Up Status */
+#define IXGBE_WUS_LNKC IXGBE_WUFC_LNKC
+#define IXGBE_WUS_MAG IXGBE_WUFC_MAG
+#define IXGBE_WUS_EX IXGBE_WUFC_EX
+#define IXGBE_WUS_MC IXGBE_WUFC_MC
+#define IXGBE_WUS_BC IXGBE_WUFC_BC
+#define IXGBE_WUS_ARP IXGBE_WUFC_ARP
+#define IXGBE_WUS_IPV4 IXGBE_WUFC_IPV4
+#define IXGBE_WUS_IPV6 IXGBE_WUFC_IPV6
+#define IXGBE_WUS_MNG IXGBE_WUFC_MNG
+#define IXGBE_WUS_FLX0 IXGBE_WUFC_FLX0
+#define IXGBE_WUS_FLX1 IXGBE_WUFC_FLX1
+#define IXGBE_WUS_FLX2 IXGBE_WUFC_FLX2
+#define IXGBE_WUS_FLX3 IXGBE_WUFC_FLX3
+#define IXGBE_WUS_FLX4 IXGBE_WUFC_FLX4
+#define IXGBE_WUS_FLX5 IXGBE_WUFC_FLX5
+#define IXGBE_WUS_FLX_FILTERS IXGBE_WUFC_FLX_FILTERS
+#define IXGBE_WUS_FW_RST_WK IXGBE_WUFC_FW_RST_WK
+/* Proxy Status */
+#define IXGBE_PROXYS_EX 0x00000004 /* Exact packet received */
+#define IXGBE_PROXYS_ARP_DIR 0x00000020 /* ARP w/filter match received */
+#define IXGBE_PROXYS_NS 0x00000200 /* IPV6 NS received */
+#define IXGBE_PROXYS_NS_DIR 0x00000400 /* IPV6 NS w/DA match received */
+#define IXGBE_PROXYS_ARP 0x00000800 /* ARP request packet received */
+#define IXGBE_PROXYS_MLD 0x00001000 /* IPv6 MLD packet received */
+
+/* Proxying Filter Control */
+#define IXGBE_PROXYFC_ENABLE 0x00000001 /* Port Proxying Enable */
+#define IXGBE_PROXYFC_EX 0x00000004 /* Directed Exact Proxy Enable */
+#define IXGBE_PROXYFC_ARP_DIR 0x00000020 /* Directed ARP Proxy Enable */
+#define IXGBE_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */
+#define IXGBE_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Enable */
+#define IXGBE_PROXYFC_MLD 0x00000800 /* IPv6 MLD Proxy Enable */
+#define IXGBE_PROXYFC_NO_TCO 0x00008000 /* Ignore TCO packets */
+
+#define IXGBE_WUPL_LENGTH_MASK 0xFFFF
+
+/* DCB registers */
+#define IXGBE_DCB_MAX_TRAFFIC_CLASS 8
+#define IXGBE_RMCS 0x03D00
+#define IXGBE_DPMCS 0x07F40
+#define IXGBE_PDPMCS 0x0CD00
+#define IXGBE_RUPPBMR 0x050A0
+#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_TDTQ2TCCR(_i) (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */
+#define IXGBE_TDTQ2TCSR(_i) (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */
+#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
+
+/* Power Management */
+/* DMA Coalescing configuration */
+struct ixgbe_dmac_config {
+ u16 watchdog_timer; /* usec units */
+ bool fcoe_en;
+ u32 link_speed;
+ u8 fcoe_tc;
+ u8 num_tcs;
+};
+
+/*
+ * DMA Coalescing threshold Rx PB TC[n] value in Kilobyte by link speed.
+ * DMACRXT = 10Gbps = 10,000 bits / usec = 1250 bytes / usec 70 * 1250 ==
+ * 87500 bytes [85KB]
+ */
+#define IXGBE_DMACRXT_10G 0x55
+#define IXGBE_DMACRXT_1G 0x09
+#define IXGBE_DMACRXT_100M 0x01
+
+/* DMA Coalescing registers */
+#define IXGBE_DMCMNGTH 0x15F20 /* Management Threshold */
+#define IXGBE_DMACR 0x02400 /* Control register */
+#define IXGBE_DMCTH(_i) (0x03300 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_DMCTLX 0x02404 /* Time to Lx request */
+/* DMA Coalescing register fields */
+#define IXGBE_DMCMNGTH_DMCMNGTH_MASK 0x000FFFF0 /* Mng Threshold mask */
+#define IXGBE_DMCMNGTH_DMCMNGTH_SHIFT 4 /* Management Threshold shift */
+#define IXGBE_DMACR_DMACWT_MASK 0x0000FFFF /* Watchdog Timer mask */
+#define IXGBE_DMACR_HIGH_PRI_TC_MASK 0x00FF0000
+#define IXGBE_DMACR_HIGH_PRI_TC_SHIFT 16
+#define IXGBE_DMACR_EN_MNG_IND 0x10000000 /* Enable Mng Indications */
+#define IXGBE_DMACR_LX_COAL_IND 0x40000000 /* Lx Coalescing indicate */
+#define IXGBE_DMACR_DMAC_EN 0x80000000 /* DMA Coalescing Enable */
+#define IXGBE_DMCTH_DMACRXT_MASK 0x000001FF /* Receive Threshold mask */
+#define IXGBE_DMCTLX_TTLX_MASK 0x00000FFF /* Time to Lx request mask */
+
+/* EEE registers */
+#define IXGBE_EEER 0x043A0 /* EEE register */
+#define IXGBE_EEE_STAT 0x04398 /* EEE Status */
+#define IXGBE_EEE_SU 0x04380 /* EEE Set up */
+#define IXGBE_TLPIC 0x041F4 /* EEE Tx LPI count */
+#define IXGBE_RLPIC 0x041F8 /* EEE Rx LPI count */
+
+/* EEE register fields */
+#define IXGBE_EEER_TX_LPI_EN 0x00010000 /* Enable EEE LPI TX path */
+#define IXGBE_EEER_RX_LPI_EN 0x00020000 /* Enable EEE LPI RX path */
+#define IXGBE_EEE_STAT_NEG 0x20000000 /* EEE support neg on link */
+#define IXGBE_EEE_RX_LPI_STATUS 0x40000000 /* RX Link in LPI status */
+#define IXGBE_EEE_TX_LPI_STATUS 0x80000000 /* TX Link in LPI status */
+
+
+
+/* Security Control Registers */
+#define IXGBE_SECTXCTRL 0x08800
+#define IXGBE_SECTXSTAT 0x08804
+#define IXGBE_SECTXBUFFAF 0x08808
+#define IXGBE_SECTXMINIFG 0x08810
+#define IXGBE_SECRXCTRL 0x08D00
+#define IXGBE_SECRXSTAT 0x08D04
+
+/* Security Bit Fields and Masks */
+#define IXGBE_SECTXCTRL_SECTX_DIS 0x00000001
+#define IXGBE_SECTXCTRL_TX_DIS 0x00000002
+#define IXGBE_SECTXCTRL_STORE_FORWARD 0x00000004
+
+#define IXGBE_SECTXSTAT_SECTX_RDY 0x00000001
+#define IXGBE_SECTXSTAT_ECC_TXERR 0x00000002
+
+#define IXGBE_SECRXCTRL_SECRX_DIS 0x00000001
+#define IXGBE_SECRXCTRL_RX_DIS 0x00000002
+
+#define IXGBE_SECRXSTAT_SECRX_RDY 0x00000001
+#define IXGBE_SECRXSTAT_ECC_RXERR 0x00000002
+
+/* LinkSec (MacSec) Registers */
+#define IXGBE_LSECTXCAP 0x08A00
+#define IXGBE_LSECRXCAP 0x08F00
+#define IXGBE_LSECTXCTRL 0x08A04
+#define IXGBE_LSECTXSCL 0x08A08 /* SCI Low */
+#define IXGBE_LSECTXSCH 0x08A0C /* SCI High */
+#define IXGBE_LSECTXSA 0x08A10
+#define IXGBE_LSECTXPN0 0x08A14
+#define IXGBE_LSECTXPN1 0x08A18
+#define IXGBE_LSECTXKEY0(_n) (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */
+#define IXGBE_LSECTXKEY1(_n) (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */
+#define IXGBE_LSECRXCTRL 0x08F04
+#define IXGBE_LSECRXSCL 0x08F08
+#define IXGBE_LSECRXSCH 0x08F0C
+#define IXGBE_LSECRXSA(_i) (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */
+#define IXGBE_LSECRXPN(_i) (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */
+#define IXGBE_LSECRXKEY(_n, _m) (0x08F20 + ((0x10 * (_n)) + (4 * (_m))))
+#define IXGBE_LSECTXUT 0x08A3C /* OutPktsUntagged */
+#define IXGBE_LSECTXPKTE 0x08A40 /* OutPktsEncrypted */
+#define IXGBE_LSECTXPKTP 0x08A44 /* OutPktsProtected */
+#define IXGBE_LSECTXOCTE 0x08A48 /* OutOctetsEncrypted */
+#define IXGBE_LSECTXOCTP 0x08A4C /* OutOctetsProtected */
+#define IXGBE_LSECRXUT 0x08F40 /* InPktsUntagged/InPktsNoTag */
+#define IXGBE_LSECRXOCTD 0x08F44 /* InOctetsDecrypted */
+#define IXGBE_LSECRXOCTV 0x08F48 /* InOctetsValidated */
+#define IXGBE_LSECRXBAD 0x08F4C /* InPktsBadTag */
+#define IXGBE_LSECRXNOSCI 0x08F50 /* InPktsNoSci */
+#define IXGBE_LSECRXUNSCI 0x08F54 /* InPktsUnknownSci */
+#define IXGBE_LSECRXUNCH 0x08F58 /* InPktsUnchecked */
+#define IXGBE_LSECRXDELAY 0x08F5C /* InPktsDelayed */
+#define IXGBE_LSECRXLATE 0x08F60 /* InPktsLate */
+#define IXGBE_LSECRXOK(_n) (0x08F64 + (0x04 * (_n))) /* InPktsOk */
+#define IXGBE_LSECRXINV(_n) (0x08F6C + (0x04 * (_n))) /* InPktsInvalid */
+#define IXGBE_LSECRXNV(_n) (0x08F74 + (0x04 * (_n))) /* InPktsNotValid */
+#define IXGBE_LSECRXUNSA 0x08F7C /* InPktsUnusedSa */
+#define IXGBE_LSECRXNUSA 0x08F80 /* InPktsNotUsingSa */
+
+/* LinkSec (MacSec) Bit Fields and Masks */
+#define IXGBE_LSECTXCAP_SUM_MASK 0x00FF0000
+#define IXGBE_LSECTXCAP_SUM_SHIFT 16
+#define IXGBE_LSECRXCAP_SUM_MASK 0x00FF0000
+#define IXGBE_LSECRXCAP_SUM_SHIFT 16
+
+#define IXGBE_LSECTXCTRL_EN_MASK 0x00000003
+#define IXGBE_LSECTXCTRL_DISABLE 0x0
+#define IXGBE_LSECTXCTRL_AUTH 0x1
+#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT 0x2
+#define IXGBE_LSECTXCTRL_AISCI 0x00000020
+#define IXGBE_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00
+#define IXGBE_LSECTXCTRL_RSV_MASK 0x000000D8
+
+#define IXGBE_LSECRXCTRL_EN_MASK 0x0000000C
+#define IXGBE_LSECRXCTRL_EN_SHIFT 2
+#define IXGBE_LSECRXCTRL_DISABLE 0x0
+#define IXGBE_LSECRXCTRL_CHECK 0x1
+#define IXGBE_LSECRXCTRL_STRICT 0x2
+#define IXGBE_LSECRXCTRL_DROP 0x3
+#define IXGBE_LSECRXCTRL_PLSH 0x00000040
+#define IXGBE_LSECRXCTRL_RP 0x00000080
+#define IXGBE_LSECRXCTRL_RSV_MASK 0xFFFFFF33
+
+/* IpSec Registers */
+#define IXGBE_IPSTXIDX 0x08900
+#define IXGBE_IPSTXSALT 0x08904
+#define IXGBE_IPSTXKEY(_i) (0x08908 + (4 * (_i))) /* 4 of these (0-3) */
+#define IXGBE_IPSRXIDX 0x08E00
+#define IXGBE_IPSRXIPADDR(_i) (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */
+#define IXGBE_IPSRXSPI 0x08E14
+#define IXGBE_IPSRXIPIDX 0x08E18
+#define IXGBE_IPSRXKEY(_i) (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */
+#define IXGBE_IPSRXSALT 0x08E2C
+#define IXGBE_IPSRXMOD 0x08E30
+
+#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4
+
+/* DCB registers */
+#define IXGBE_RTRPCS 0x02430
+#define IXGBE_RTTDCS 0x04900
+#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */
+#define IXGBE_RTTPCS 0x0CD00
+#define IXGBE_RTRUP2TC 0x03020
+#define IXGBE_RTTUP2TC 0x0C800
+#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_TXLLQ(_i) (0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */
+#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTDQSEL 0x04904
+#define IXGBE_RTTDT1C 0x04908
+#define IXGBE_RTTDT1S 0x0490C
+#define IXGBE_RTTDTECC 0x04990
+#define IXGBE_RTTDTECC_NO_BCN 0x00000100
+
+#define IXGBE_RTTBCNRC 0x04984
+#define IXGBE_RTTBCNRC_RS_ENA 0x80000000
+#define IXGBE_RTTBCNRC_RF_DEC_MASK 0x00003FFF
+#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14
+#define IXGBE_RTTBCNRC_RF_INT_MASK \
+ (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
+#define IXGBE_RTTBCNRM 0x04980
+
+/* BCN (for DCB) Registers */
+#define IXGBE_RTTBCNRS 0x04988
+#define IXGBE_RTTBCNCR 0x08B00
+#define IXGBE_RTTBCNACH 0x08B04
+#define IXGBE_RTTBCNACL 0x08B08
+#define IXGBE_RTTBCNTG 0x04A90
+#define IXGBE_RTTBCNIDX 0x08B0C
+#define IXGBE_RTTBCNCP 0x08B10
+#define IXGBE_RTFRTIMER 0x08B14
+#define IXGBE_RTTBCNRTT 0x05150
+#define IXGBE_RTTBCNRD 0x0498C
+
+
+/* FCoE DMA Context Registers */
+/* FCoE Direct DMA Context */
+#define IXGBE_FCDDC(_i, _j) (0x20000 + ((_i) * 0x4) + ((_j) * 0x10))
+#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */
+#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */
+#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */
+#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */
+#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */
+#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */
+#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */
+#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */
+#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */
+#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3
+#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8
+#define IXGBE_FCBUFF_OFFSET_SHIFT 16
+#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */
+#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */
+#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */
+#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */
+#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16
+/* FCoE SOF/EOF */
+#define IXGBE_TEOFF 0x04A94 /* Tx FC EOF */
+#define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */
+#define IXGBE_REOFF 0x05158 /* Rx FC EOF */
+#define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */
+/* FCoE Filter Context Registers */
+#define IXGBE_FCD_ID 0x05114 /* FCoE D_ID */
+#define IXGBE_FCSMAC 0x0510C /* FCoE Source MAC */
+#define IXGBE_FCFLTRW_SMAC_HIGH_SHIFT 16
+/* FCoE Direct Filter Context */
+#define IXGBE_FCDFC(_i, _j) (0x28000 + ((_i) * 0x4) + ((_j) * 0x10))
+#define IXGBE_FCDFCD(_i) (0x30000 + ((_i) * 0x4))
+#define IXGBE_FCFLT 0x05108 /* FC FLT Context */
+#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */
+#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */
+#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */
+#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */
+#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */
+#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */
+#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */
+#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */
+#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */
+/* FCoE Receive Control */
+#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */
+#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */
+#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */
+#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */
+#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */
+#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */
+#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */
+#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */
+#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */
+#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */
+#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8
+/* FCoE Redirection */
+#define IXGBE_FCRECTL 0x0ED00 /* FC Redirection Control */
+#define IXGBE_FCRETA0 0x0ED10 /* FC Redirection Table 0 */
+#define IXGBE_FCRETA(_i) (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */
+#define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */
+#define IXGBE_FCRETASEL_ENA 0x2 /* FCoE FCRETASEL bit */
+#define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */
+#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */
+#define IXGBE_FCRETA_SIZE_X550 32 /* Max entries in FCRETA */
+/* Higher 7 bits for the queue index */
+#define IXGBE_FCRETA_ENTRY_HIGH_MASK 0x007F0000
+#define IXGBE_FCRETA_ENTRY_HIGH_SHIFT 16
+
+/* Stats registers */
+#define IXGBE_CRCERRS 0x04000
+#define IXGBE_ILLERRC 0x04004
+#define IXGBE_ERRBC 0x04008
+#define IXGBE_MSPDC 0x04010
+#define IXGBE_MPC(_i) (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/
+#define IXGBE_MLFC 0x04034
+#define IXGBE_MRFC 0x04038
+#define IXGBE_RLEC 0x04040
+#define IXGBE_LXONTXC 0x03F60
+#define IXGBE_LXONRXC 0x0CF60
+#define IXGBE_LXOFFTXC 0x03F68
+#define IXGBE_LXOFFRXC 0x0CF68
+#define IXGBE_LXONRXCNT 0x041A4
+#define IXGBE_LXOFFRXCNT 0x041A8
+#define IXGBE_PXONRXCNT(_i) (0x04140 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_PXOFFRXCNT(_i) (0x04160 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_PXON2OFFCNT(_i) (0x03240 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_PXONTXC(_i) (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/
+#define IXGBE_PXONRXC(_i) (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/
+#define IXGBE_PXOFFTXC(_i) (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/
+#define IXGBE_PXOFFRXC(_i) (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/
+#define IXGBE_PRC64 0x0405C
+#define IXGBE_PRC127 0x04060
+#define IXGBE_PRC255 0x04064
+#define IXGBE_PRC511 0x04068
+#define IXGBE_PRC1023 0x0406C
+#define IXGBE_PRC1522 0x04070
+#define IXGBE_GPRC 0x04074
+#define IXGBE_BPRC 0x04078
+#define IXGBE_MPRC 0x0407C
+#define IXGBE_GPTC 0x04080
+#define IXGBE_GORCL 0x04088
+#define IXGBE_GORCH 0x0408C
+#define IXGBE_GOTCL 0x04090
+#define IXGBE_GOTCH 0x04094
+#define IXGBE_RNBC(_i) (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/
+#define IXGBE_RUC 0x040A4
+#define IXGBE_RFC 0x040A8
+#define IXGBE_ROC 0x040AC
+#define IXGBE_RJC 0x040B0
+#define IXGBE_MNGPRC 0x040B4
+#define IXGBE_MNGPDC 0x040B8
+#define IXGBE_MNGPTC 0x0CF90
+#define IXGBE_TORL 0x040C0
+#define IXGBE_TORH 0x040C4
+#define IXGBE_TPR 0x040D0
+#define IXGBE_TPT 0x040D4
+#define IXGBE_PTC64 0x040D8
+#define IXGBE_PTC127 0x040DC
+#define IXGBE_PTC255 0x040E0
+#define IXGBE_PTC511 0x040E4
+#define IXGBE_PTC1023 0x040E8
+#define IXGBE_PTC1522 0x040EC
+#define IXGBE_MPTC 0x040F0
+#define IXGBE_BPTC 0x040F4
+#define IXGBE_XEC 0x04120
+#define IXGBE_SSVPC 0x08780
+
+#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4))
+#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \
+ (0x08600 + ((_i) * 4)))
+#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4))
+
+#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */
+#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */
+#define IXGBE_FCCRC 0x05118 /* Num of Good Eth CRC w/ Bad FC CRC */
+#define IXGBE_FCOERPDC 0x0241C /* FCoE Rx Packets Dropped Count */
+#define IXGBE_FCLAST 0x02424 /* FCoE Last Error Count */
+#define IXGBE_FCOEPRC 0x02428 /* Number of FCoE Packets Received */
+#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */
+#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */
+#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */
+#define IXGBE_FCCRC_CNT_MASK 0x0000FFFF /* CRC_CNT: bit 0 - 15 */
+#define IXGBE_FCLAST_CNT_MASK 0x0000FFFF /* Last_CNT: bit 0 - 15 */
+#define IXGBE_O2BGPTC 0x041C4
+#define IXGBE_O2BSPC 0x087B0
+#define IXGBE_B2OSPC 0x041C0
+#define IXGBE_B2OGPRC 0x02F90
+#define IXGBE_BUPRC 0x04180
+#define IXGBE_BMPRC 0x04184
+#define IXGBE_BBPRC 0x04188
+#define IXGBE_BUPTC 0x0418C
+#define IXGBE_BMPTC 0x04190
+#define IXGBE_BBPTC 0x04194
+#define IXGBE_BCRCERRS 0x04198
+#define IXGBE_BXONRXC 0x0419C
+#define IXGBE_BXOFFRXC 0x041E0
+#define IXGBE_BXONTXC 0x041E4
+#define IXGBE_BXOFFTXC 0x041E8
+
+/* Management */
+#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_MANC 0x05820
+#define IXGBE_MFVAL 0x05824
+#define IXGBE_MANC2H 0x05860
+#define IXGBE_MDEF(_i) (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_MIPAF 0x058B0
+#define IXGBE_MMAL(_i) (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */
+#define IXGBE_MMAH(_i) (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */
+#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */
+#define IXGBE_METF(_i) (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */
+#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_LSWFW 0x15014
+#define IXGBE_BMCIP(_i) (0x05050 + ((_i) * 4)) /* 0x5050-0x505C */
+#define IXGBE_BMCIPVAL 0x05060
+#define IXGBE_BMCIP_IPADDR_TYPE 0x00000001
+#define IXGBE_BMCIP_IPADDR_VALID 0x00000002
+
+/* Management Bit Fields and Masks */
+#define IXGBE_MANC_MPROXYE 0x40000000 /* Management Proxy Enable */
+#define IXGBE_MANC_RCV_TCO_EN 0x00020000 /* Rcv TCO packet enable */
+#define IXGBE_MANC_EN_BMC2OS 0x10000000 /* Ena BMC2OS and OS2BMC traffic */
+#define IXGBE_MANC_EN_BMC2OS_SHIFT 28
+
+/* Firmware Semaphore Register */
+#define IXGBE_FWSM_MODE_MASK 0xE
+#define IXGBE_FWSM_TS_ENABLED 0x1
+#define IXGBE_FWSM_FW_MODE_PT 0x4
+
+/* ARC Subsystem registers */
+#define IXGBE_HICR 0x15F00
+#define IXGBE_FWSTS 0x15F0C
+#define IXGBE_HSMC0R 0x15F04
+#define IXGBE_HSMC1R 0x15F08
+#define IXGBE_SWSR 0x15F10
+#define IXGBE_HFDR 0x15FE8
+#define IXGBE_FLEX_MNG 0x15800 /* 0x15800 - 0x15EFC */
+
+#define IXGBE_HICR_EN 0x01 /* Enable bit - RO */
+/* Driver sets this bit when done to put command in RAM */
+#define IXGBE_HICR_C 0x02
+#define IXGBE_HICR_SV 0x04 /* Status Validity */
+#define IXGBE_HICR_FW_RESET_ENABLE 0x40
+#define IXGBE_HICR_FW_RESET 0x80
+
+/* PCI-E registers */
+#define IXGBE_GCR 0x11000
+#define IXGBE_GTV 0x11004
+#define IXGBE_FUNCTAG 0x11008
+#define IXGBE_GLT 0x1100C
+#define IXGBE_PCIEPIPEADR 0x11004
+#define IXGBE_PCIEPIPEDAT 0x11008
+#define IXGBE_GSCL_1 0x11010
+#define IXGBE_GSCL_2 0x11014
+#define IXGBE_GSCL_3 0x11018
+#define IXGBE_GSCL_4 0x1101C
+#define IXGBE_GSCN_0 0x11020
+#define IXGBE_GSCN_1 0x11024
+#define IXGBE_GSCN_2 0x11028
+#define IXGBE_GSCN_3 0x1102C
+#define IXGBE_FACTPS 0x10150
+#define IXGBE_PCIEANACTL 0x11040
+#define IXGBE_SWSM 0x10140
+#define IXGBE_FWSM 0x10148
+#define IXGBE_GSSR 0x10160
+#define IXGBE_MREVID 0x11064
+#define IXGBE_DCA_ID 0x11070
+#define IXGBE_DCA_CTRL 0x11074
+#define IXGBE_SWFW_SYNC IXGBE_GSSR
+
+/* PCI-E registers 82599-Specific */
+#define IXGBE_GCR_EXT 0x11050
+#define IXGBE_GSCL_5_82599 0x11030
+#define IXGBE_GSCL_6_82599 0x11034
+#define IXGBE_GSCL_7_82599 0x11038
+#define IXGBE_GSCL_8_82599 0x1103C
+#define IXGBE_PHYADR_82599 0x11040
+#define IXGBE_PHYDAT_82599 0x11044
+#define IXGBE_PHYCTL_82599 0x11048
+#define IXGBE_PBACLR_82599 0x11068
+#define IXGBE_CIAA_82599 0x11088
+#define IXGBE_CIAD_82599 0x1108C
+#define IXGBE_CIAA_X550 0x11508
+#define IXGBE_CIAD_X550 0x11510
+#define IXGBE_CIAA_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \
+ IXGBE_CIAA_X550 : IXGBE_CIAA_82599))
+#define IXGBE_CIAD_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \
+ IXGBE_CIAD_X550 : IXGBE_CIAD_82599))
+#define IXGBE_PICAUSE 0x110B0
+#define IXGBE_PIENA 0x110B8
+#define IXGBE_CDQ_MBR_82599 0x110B4
+#define IXGBE_PCIESPARE 0x110BC
+#define IXGBE_MISC_REG_82599 0x110F0
+#define IXGBE_ECC_CTRL_0_82599 0x11100
+#define IXGBE_ECC_CTRL_1_82599 0x11104
+#define IXGBE_ECC_STATUS_82599 0x110E0
+#define IXGBE_BAR_CTRL_82599 0x110F4
+
+/* PCI Express Control */
+#define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000
+#define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000
+#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000
+#define IXGBE_GCR_CAP_VER2 0x00040000
+
+#define IXGBE_GCR_EXT_MSIX_EN 0x80000000
+#define IXGBE_GCR_EXT_BUFFERS_CLEAR 0x40000000
+#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001
+#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002
+#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
+#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
+ IXGBE_GCR_EXT_VT_MODE_64)
+#define IXGBE_GCR_EXT_VT_MODE_MASK 0x00000003
+/* Time Sync Registers */
+#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
+#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */
+#define IXGBE_RXSTMPL 0x051E8 /* Rx timestamp Low - RO */
+#define IXGBE_RXSTMPH 0x051A4 /* Rx timestamp High - RO */
+#define IXGBE_RXSATRL 0x051A0 /* Rx timestamp attribute low - RO */
+#define IXGBE_RXSATRH 0x051A8 /* Rx timestamp attribute high - RO */
+#define IXGBE_RXMTRL 0x05120 /* RX message type register low - RW */
+#define IXGBE_TXSTMPL 0x08C04 /* Tx timestamp value Low - RO */
+#define IXGBE_TXSTMPH 0x08C08 /* Tx timestamp value High - RO */
+#define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */
+#define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */
+#define IXGBE_SYSTIMR 0x08C58 /* System time register Residue - RO */
+#define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */
+#define IXGBE_TIMADJL 0x08C18 /* Time Adjustment Offset register Low - RW */
+#define IXGBE_TIMADJH 0x08C1C /* Time Adjustment Offset register High - RW */
+#define IXGBE_TSAUXC 0x08C20 /* TimeSync Auxiliary Control register - RW */
+#define IXGBE_TRGTTIML0 0x08C24 /* Target Time Register 0 Low - RW */
+#define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */
+#define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */
+#define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */
+#define IXGBE_CLKTIML 0x08C34 /* Clock Out Time Register Low - RW */
+#define IXGBE_CLKTIMH 0x08C38 /* Clock Out Time Register High - RW */
+#define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */
+#define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */
+#define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */
+#define IXGBE_AUXSTMPH0 0x08C40 /* Auxiliary Time Stamp 0 register High - RO */
+#define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */
+#define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */
+#define IXGBE_TSIM 0x08C68 /* TimeSync Interrupt Mask Register - RW */
+#define IXGBE_TSICR 0x08C60 /* TimeSync Interrupt Cause Register - WO */
+#define IXGBE_TSSDP 0x0003C /* TimeSync SDP Configuration Register - RW */
+
+/* Diagnostic Registers */
+#define IXGBE_RDSTATCTL 0x02C20
+#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */
+#define IXGBE_RDHMPN 0x02F08
+#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4))
+#define IXGBE_RDPROBE 0x02F20
+#define IXGBE_RDMAM 0x02F30
+#define IXGBE_RDMAD 0x02F34
+#define IXGBE_TDHMPN 0x07F08
+#define IXGBE_TDHMPN2 0x082FC
+#define IXGBE_TXDESCIC 0x082CC
+#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4))
+#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4))
+#define IXGBE_TDPROBE 0x07F20
+#define IXGBE_TXBUFCTRL 0x0C600
+#define IXGBE_TXBUFDATA0 0x0C610
+#define IXGBE_TXBUFDATA1 0x0C614
+#define IXGBE_TXBUFDATA2 0x0C618
+#define IXGBE_TXBUFDATA3 0x0C61C
+#define IXGBE_RXBUFCTRL 0x03600
+#define IXGBE_RXBUFDATA0 0x03610
+#define IXGBE_RXBUFDATA1 0x03614
+#define IXGBE_RXBUFDATA2 0x03618
+#define IXGBE_RXBUFDATA3 0x0361C
+#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_RFVAL 0x050A4
+#define IXGBE_MDFTC1 0x042B8
+#define IXGBE_MDFTC2 0x042C0
+#define IXGBE_MDFTFIFO1 0x042C4
+#define IXGBE_MDFTFIFO2 0x042C8
+#define IXGBE_MDFTS 0x042CC
+#define IXGBE_RXDATAWRPTR(_i) (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/
+#define IXGBE_RXDESCWRPTR(_i) (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/
+#define IXGBE_RXDATARDPTR(_i) (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/
+#define IXGBE_RXDESCRDPTR(_i) (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/
+#define IXGBE_TXDATAWRPTR(_i) (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/
+#define IXGBE_TXDESCWRPTR(_i) (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/
+#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/
+#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/
+#define IXGBE_PCIEECCCTL 0x1106C
+#define IXGBE_RXWRPTR(_i) (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/
+#define IXGBE_RXUSED(_i) (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/
+#define IXGBE_RXRDPTR(_i) (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/
+#define IXGBE_RXRDWRPTR(_i) (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/
+#define IXGBE_TXWRPTR(_i) (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/
+#define IXGBE_TXUSED(_i) (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/
+#define IXGBE_TXRDPTR(_i) (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/
+#define IXGBE_TXRDWRPTR(_i) (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/
+#define IXGBE_PCIEECCCTL0 0x11100
+#define IXGBE_PCIEECCCTL1 0x11104
+#define IXGBE_RXDBUECC 0x03F70
+#define IXGBE_TXDBUECC 0x0CF70
+#define IXGBE_RXDBUEST 0x03F74
+#define IXGBE_TXDBUEST 0x0CF74
+#define IXGBE_PBTXECC 0x0C300
+#define IXGBE_PBRXECC 0x03300
+#define IXGBE_GHECCR 0x110B0
+
+/* MAC Registers */
+#define IXGBE_PCS1GCFIG 0x04200
+#define IXGBE_PCS1GLCTL 0x04208
+#define IXGBE_PCS1GLSTA 0x0420C
+#define IXGBE_PCS1GDBG0 0x04210
+#define IXGBE_PCS1GDBG1 0x04214
+#define IXGBE_PCS1GANA 0x04218
+#define IXGBE_PCS1GANLP 0x0421C
+#define IXGBE_PCS1GANNP 0x04220
+#define IXGBE_PCS1GANLPNP 0x04224
+#define IXGBE_HLREG0 0x04240
+#define IXGBE_HLREG1 0x04244
+#define IXGBE_PAP 0x04248
+#define IXGBE_MACA 0x0424C
+#define IXGBE_APAE 0x04250
+#define IXGBE_ARD 0x04254
+#define IXGBE_AIS 0x04258
+#define IXGBE_MSCA 0x0425C
+#define IXGBE_MSRWD 0x04260
+#define IXGBE_MLADD 0x04264
+#define IXGBE_MHADD 0x04268
+#define IXGBE_MAXFRS 0x04268
+#define IXGBE_TREG 0x0426C
+#define IXGBE_PCSS1 0x04288
+#define IXGBE_PCSS2 0x0428C
+#define IXGBE_XPCSS 0x04290
+#define IXGBE_MFLCN 0x04294
+#define IXGBE_SERDESC 0x04298
+#define IXGBE_MACS 0x0429C
+#define IXGBE_AUTOC 0x042A0
+#define IXGBE_LINKS 0x042A4
+#define IXGBE_LINKS2 0x04324
+#define IXGBE_AUTOC2 0x042A8
+#define IXGBE_AUTOC3 0x042AC
+#define IXGBE_ANLP1 0x042B0
+#define IXGBE_ANLP2 0x042B4
+#define IXGBE_MACC 0x04330
+#define IXGBE_ATLASCTL 0x04800
+#define IXGBE_MMNGC 0x042D0
+#define IXGBE_ANLPNP1 0x042D4
+#define IXGBE_ANLPNP2 0x042D8
+#define IXGBE_KRPCSFC 0x042E0
+#define IXGBE_KRPCSS 0x042E4
+#define IXGBE_FECS1 0x042E8
+#define IXGBE_FECS2 0x042EC
+#define IXGBE_SMADARCTL 0x14F10
+#define IXGBE_MPVC 0x04318
+#define IXGBE_SGMIIC 0x04314
+
+/* Statistics Registers */
+#define IXGBE_RXNFGPC 0x041B0
+#define IXGBE_RXNFGBCL 0x041B4
+#define IXGBE_RXNFGBCH 0x041B8
+#define IXGBE_RXDGPC 0x02F50
+#define IXGBE_RXDGBCL 0x02F54
+#define IXGBE_RXDGBCH 0x02F58
+#define IXGBE_RXDDGPC 0x02F5C
+#define IXGBE_RXDDGBCL 0x02F60
+#define IXGBE_RXDDGBCH 0x02F64
+#define IXGBE_RXLPBKGPC 0x02F68
+#define IXGBE_RXLPBKGBCL 0x02F6C
+#define IXGBE_RXLPBKGBCH 0x02F70
+#define IXGBE_RXDLPBKGPC 0x02F74
+#define IXGBE_RXDLPBKGBCL 0x02F78
+#define IXGBE_RXDLPBKGBCH 0x02F7C
+#define IXGBE_TXDGPC 0x087A0
+#define IXGBE_TXDGBCL 0x087A4
+#define IXGBE_TXDGBCH 0x087A8
+
+#define IXGBE_RXDSTATCTRL 0x02F40
+
+/* Copper Pond 2 link timeout */
+#define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50
+
+/* Omer CORECTL */
+#define IXGBE_CORECTL 0x014F00
+/* BARCTRL */
+#define IXGBE_BARCTRL 0x110F4
+#define IXGBE_BARCTRL_FLSIZE 0x0700
+#define IXGBE_BARCTRL_FLSIZE_SHIFT 8
+#define IXGBE_BARCTRL_CSRSIZE 0x2000
+
+/* RSCCTL Bit Masks */
+#define IXGBE_RSCCTL_RSCEN 0x01
+#define IXGBE_RSCCTL_MAXDESC_1 0x00
+#define IXGBE_RSCCTL_MAXDESC_4 0x04
+#define IXGBE_RSCCTL_MAXDESC_8 0x08
+#define IXGBE_RSCCTL_MAXDESC_16 0x0C
+#define IXGBE_RSCCTL_TS_DIS 0x02
+
+/* RSCDBU Bit Masks */
+#define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F
+#define IXGBE_RSCDBU_RSCACKDIS 0x00000080
+
+/* RDRXCTL Bit Masks */
+#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min THLD Size */
+#define IXGBE_RDRXCTL_CRCSTRIP 0x00000002 /* CRC Strip */
+#define IXGBE_RDRXCTL_PSP 0x00000004 /* Pad Small Packet */
+#define IXGBE_RDRXCTL_MVMEN 0x00000020
+#define IXGBE_RDRXCTL_RSC_PUSH_DIS 0x00000020
+#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */
+#define IXGBE_RDRXCTL_RSC_PUSH 0x00000080
+#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */
+#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */
+#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI*/
+#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC ena */
+#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC ena */
+#define IXGBE_RDRXCTL_MBINTEN 0x10000000
+#define IXGBE_RDRXCTL_MDP_EN 0x20000000
+
+/* RQTC Bit Masks and Shifts */
+#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4)
+#define IXGBE_RQTC_TC0_MASK (0x7 << 0)
+#define IXGBE_RQTC_TC1_MASK (0x7 << 4)
+#define IXGBE_RQTC_TC2_MASK (0x7 << 8)
+#define IXGBE_RQTC_TC3_MASK (0x7 << 12)
+#define IXGBE_RQTC_TC4_MASK (0x7 << 16)
+#define IXGBE_RQTC_TC5_MASK (0x7 << 20)
+#define IXGBE_RQTC_TC6_MASK (0x7 << 24)
+#define IXGBE_RQTC_TC7_MASK (0x7 << 28)
+
+/* PSRTYPE.RQPL Bit masks and shift */
+#define IXGBE_PSRTYPE_RQPL_MASK 0x7
+#define IXGBE_PSRTYPE_RQPL_SHIFT 29
+
+/* CTRL Bit Masks */
+#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */
+#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */
+#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
+#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST)
+
+/* FACTPS */
+#define IXGBE_FACTPS_MNGCG 0x20000000 /* Manageblility Clock Gated */
+#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */
+
+/* MHADD Bit Masks */
+#define IXGBE_MHADD_MFS_MASK 0xFFFF0000
+#define IXGBE_MHADD_MFS_SHIFT 16
+
+/* Extended Device Control */
+#define IXGBE_CTRL_EXT_PFRSTD 0x00004000 /* Physical Function Reset Done */
+#define IXGBE_CTRL_EXT_NS_DIS 0x00010000 /* No Snoop disable */
+#define IXGBE_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
+#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
+
+/* Direct Cache Access (DCA) definitions */
+#define IXGBE_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */
+#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */
+
+#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
+#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
+
+#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
+#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */
+#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */
+#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */
+#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */
+#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */
+#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */
+#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */
+
+#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
+#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */
+#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */
+#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
+#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
+#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */
+
+/* MSCA Bit Masks */
+#define IXGBE_MSCA_NP_ADDR_MASK 0x0000FFFF /* MDI Addr (new prot) */
+#define IXGBE_MSCA_NP_ADDR_SHIFT 0
+#define IXGBE_MSCA_DEV_TYPE_MASK 0x001F0000 /* Dev Type (new prot) */
+#define IXGBE_MSCA_DEV_TYPE_SHIFT 16 /* Register Address (old prot */
+#define IXGBE_MSCA_PHY_ADDR_MASK 0x03E00000 /* PHY Address mask */
+#define IXGBE_MSCA_PHY_ADDR_SHIFT 21 /* PHY Address shift*/
+#define IXGBE_MSCA_OP_CODE_MASK 0x0C000000 /* OP CODE mask */
+#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */
+#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */
+#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (wr) */
+#define IXGBE_MSCA_READ 0x0C000000 /* OP CODE 11 (rd) */
+#define IXGBE_MSCA_READ_AUTOINC 0x08000000 /* OP CODE 10 (rd auto inc)*/
+#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */
+#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */
+#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new prot) */
+#define IXGBE_MSCA_OLD_PROTOCOL 0x10000000 /* ST CODE 01 (old prot) */
+#define IXGBE_MSCA_MDI_COMMAND 0x40000000 /* Initiate MDI command */
+#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress ena */
+
+/* MSRWD bit masks */
+#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF
+#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0
+#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000
+#define IXGBE_MSRWD_READ_DATA_SHIFT 16
+
+/* Atlas registers */
+#define IXGBE_ATLAS_PDN_LPBK 0x24
+#define IXGBE_ATLAS_PDN_10G 0xB
+#define IXGBE_ATLAS_PDN_1G 0xC
+#define IXGBE_ATLAS_PDN_AN 0xD
+
+/* Atlas bit masks */
+#define IXGBE_ATLASCTL_WRITE_CMD 0x00010000
+#define IXGBE_ATLAS_PDN_TX_REG_EN 0x10
+#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL 0xF0
+#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0
+#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0
+
+/* Omer bit masks */
+#define IXGBE_CORECTL_WRITE_CMD 0x00010000
+
+/* Device Type definitions for new protocol MDIO commands */
+#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1
+#define IXGBE_MDIO_PCS_DEV_TYPE 0x3
+#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4
+#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */
+#define IXGBE_TWINAX_DEV 1
+
+#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */
+
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Ctrl Reg */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0-10G, 1-1G */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010
+
+#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */
+#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */
+#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */
+#define IXGBE_AUTO_NEG_10GBASE_EEE_ADVT 0x8 /* AUTO NEG EEE 10GBaseT Advt */
+#define IXGBE_AUTO_NEG_1000BASE_EEE_ADVT 0x4 /* AUTO NEG EEE 1000BaseT Advt */
+#define IXGBE_AUTO_NEG_100BASE_EEE_ADVT 0x2 /* AUTO NEG EEE 100BaseT Advt */
+#define IXGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */
+#define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */
+#define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/
+#define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/
+#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */
+#define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */
+#define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */
+#define IXGBE_MDIO_PHY_SPEED_100M 0x0020 /* 100M capable */
+#define IXGBE_MDIO_PHY_EXT_ABILITY 0xB /* Ext Ability Reg */
+#define IXGBE_MDIO_PHY_10GBASET_ABILITY 0x0004 /* 10GBaseT capable */
+#define IXGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */
+#define IXGBE_MDIO_PHY_100BASETX_ABILITY 0x0080 /* 100BaseTX capable */
+#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */
+
+#define IXGBE_MDIO_PMA_PMD_CONTROL_ADDR 0x0000 /* PMA/PMD Control Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */
+
+#define IXGBE_PCRC8ECL 0x0E810 /* PCR CRC-8 Error Count Lo */
+#define IXGBE_PCRC8ECH 0x0E811 /* PCR CRC-8 Error Count Hi */
+#define IXGBE_PCRC8ECH_MASK 0x1F
+#define IXGBE_LDPCECL 0x0E820 /* PCR Uncorrected Error Count Lo */
+#define IXGBE_LDPCECH 0x0E821 /* PCR Uncorrected Error Count Hi */
+
+/* MII clause 22/28 definitions */
+#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800
+
+#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG 0x20 /* 10G Control Reg */
+#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */
+#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */
+#define IXGBE_MII_AUTONEG_ADVERTISE_REG 0x10 /* 100M Advertisement */
+#define IXGBE_MII_10GBASE_T_ADVERTISE 0x1000 /* full duplex, bit:12*/
+#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/
+#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/
+#define IXGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */
+#define IXGBE_MII_100BASE_T_ADVERTISE_HALF 0x0080 /* half duplex, bit:7 */
+#define IXGBE_MII_RESTART 0x200
+#define IXGBE_MII_AUTONEG_COMPLETE 0x20
+#define IXGBE_MII_AUTONEG_LINK_UP 0x04
+#define IXGBE_MII_AUTONEG_REG 0x0
+
+#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0
+#define IXGBE_MAX_PHY_ADDR 32
+
+/* PHY IDs*/
+#define TN1010_PHY_ID 0x00A19410
+#define TNX_FW_REV 0xB
+#define X540_PHY_ID 0x01540200
+#define X550_PHY_ID 0x01540220
+#define AQ_FW_REV 0x20
+#define QT2022_PHY_ID 0x0043A400
+#define ATH_PHY_ID 0x03429050
+
+/* PHY Types */
+#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
+
+/* Special PHY Init Routine */
+#define IXGBE_PHY_INIT_OFFSET_NL 0x002B
+#define IXGBE_PHY_INIT_END_NL 0xFFFF
+#define IXGBE_CONTROL_MASK_NL 0xF000
+#define IXGBE_DATA_MASK_NL 0x0FFF
+#define IXGBE_CONTROL_SHIFT_NL 12
+#define IXGBE_DELAY_NL 0
+#define IXGBE_DATA_NL 1
+#define IXGBE_CONTROL_NL 0x000F
+#define IXGBE_CONTROL_EOL_NL 0x0FFF
+#define IXGBE_CONTROL_SOL_NL 0x0000
+
+/* General purpose Interrupt Enable */
+#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */
+#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */
+#define IXGBE_SDP2_GPIEN 0x00000004 /* SDP2 */
+#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */
+#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */
+#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */
+#define IXGBE_GPIE_EIAME 0x40000000
+#define IXGBE_GPIE_PBA_SUPPORT 0x80000000
+#define IXGBE_GPIE_RSC_DELAY_SHIFT 11
+#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */
+#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */
+#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */
+#define IXGBE_GPIE_VTMODE_64 0x0000C000 /* 64 VFs 2 queues per VF */
+
+/* Packet Buffer Initialization */
+#define IXGBE_MAX_PACKET_BUFFERS 8
+
+#define IXGBE_TXPBSIZE_20KB 0x00005000 /* 20KB Packet Buffer */
+#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */
+#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */
+#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */
+#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */
+#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */
+#define IXGBE_RXPBSIZE_MAX 0x00080000 /* 512KB Packet Buffer */
+#define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer */
+
+#define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */
+#define IXGBE_MAX_PB 8
+
+/* Packet buffer allocation strategies */
+enum {
+ PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */
+#define PBA_STRATEGY_EQUAL PBA_STRATEGY_EQUAL
+ PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */
+#define PBA_STRATEGY_WEIGHTED PBA_STRATEGY_WEIGHTED
+};
+
+/* Transmit Flow Control status */
+#define IXGBE_TFCS_TXOFF 0x00000001
+#define IXGBE_TFCS_TXOFF0 0x00000100
+#define IXGBE_TFCS_TXOFF1 0x00000200
+#define IXGBE_TFCS_TXOFF2 0x00000400
+#define IXGBE_TFCS_TXOFF3 0x00000800
+#define IXGBE_TFCS_TXOFF4 0x00001000
+#define IXGBE_TFCS_TXOFF5 0x00002000
+#define IXGBE_TFCS_TXOFF6 0x00004000
+#define IXGBE_TFCS_TXOFF7 0x00008000
+
+/* TCP Timer */
+#define IXGBE_TCPTIMER_KS 0x00000100
+#define IXGBE_TCPTIMER_COUNT_ENABLE 0x00000200
+#define IXGBE_TCPTIMER_COUNT_FINISH 0x00000400
+#define IXGBE_TCPTIMER_LOOP 0x00000800
+#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF
+
+/* HLREG0 Bit Masks */
+#define IXGBE_HLREG0_TXCRCEN 0x00000001 /* bit 0 */
+#define IXGBE_HLREG0_RXCRCSTRP 0x00000002 /* bit 1 */
+#define IXGBE_HLREG0_JUMBOEN 0x00000004 /* bit 2 */
+#define IXGBE_HLREG0_TXPADEN 0x00000400 /* bit 10 */
+#define IXGBE_HLREG0_TXPAUSEEN 0x00001000 /* bit 12 */
+#define IXGBE_HLREG0_RXPAUSEEN 0x00004000 /* bit 14 */
+#define IXGBE_HLREG0_LPBK 0x00008000 /* bit 15 */
+#define IXGBE_HLREG0_MDCSPD 0x00010000 /* bit 16 */
+#define IXGBE_HLREG0_CONTMDC 0x00020000 /* bit 17 */
+#define IXGBE_HLREG0_CTRLFLTR 0x00040000 /* bit 18 */
+#define IXGBE_HLREG0_PREPEND 0x00F00000 /* bits 20-23 */
+#define IXGBE_HLREG0_PRIPAUSEEN 0x01000000 /* bit 24 */
+#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000 /* bits 25-26 */
+#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000 /* bit 27 */
+#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000 /* bit 28 */
+
+/* VMD_CTL bitmasks */
+#define IXGBE_VMD_CTL_VMDQ_EN 0x00000001
+#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002
+
+/* VT_CTL bitmasks */
+#define IXGBE_VT_CTL_DIS_DEFPL 0x20000000 /* disable default pool */
+#define IXGBE_VT_CTL_REPLEN 0x40000000 /* replication enabled */
+#define IXGBE_VT_CTL_VT_ENABLE 0x00000001 /* Enable VT Mode */
+#define IXGBE_VT_CTL_POOL_SHIFT 7
+#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT)
+
+/* VMOLR bitmasks */
+#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */
+#define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */
+#define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */
+#define IXGBE_VMOLR_BAM 0x08000000 /* accept broadcast packets */
+#define IXGBE_VMOLR_MPE 0x10000000 /* multicast promiscuous */
+
+/* VFRE bitmask */
+#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF
+
+#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+
+/* RDHMPN and TDHMPN bitmasks */
+#define IXGBE_RDHMPN_RDICADDR 0x007FF800
+#define IXGBE_RDHMPN_RDICRDREQ 0x00800000
+#define IXGBE_RDHMPN_RDICADDR_SHIFT 11
+#define IXGBE_TDHMPN_TDICADDR 0x003FF800
+#define IXGBE_TDHMPN_TDICRDREQ 0x00800000
+#define IXGBE_TDHMPN_TDICADDR_SHIFT 11
+
+#define IXGBE_RDMAM_MEM_SEL_SHIFT 13
+#define IXGBE_RDMAM_DWORD_SHIFT 9
+#define IXGBE_RDMAM_DESC_COMP_FIFO 1
+#define IXGBE_RDMAM_DFC_CMD_FIFO 2
+#define IXGBE_RDMAM_RSC_HEADER_ADDR 3
+#define IXGBE_RDMAM_TCN_STATUS_RAM 4
+#define IXGBE_RDMAM_WB_COLL_FIFO 5
+#define IXGBE_RDMAM_QSC_CNT_RAM 6
+#define IXGBE_RDMAM_QSC_FCOE_RAM 7
+#define IXGBE_RDMAM_QSC_QUEUE_CNT 8
+#define IXGBE_RDMAM_QSC_QUEUE_RAM 0xA
+#define IXGBE_RDMAM_QSC_RSC_RAM 0xB
+#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE 135
+#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT 4
+#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE 48
+#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT 7
+#define IXGBE_RDMAM_RSC_HEADER_ADDR_RANGE 32
+#define IXGBE_RDMAM_RSC_HEADER_ADDR_COUNT 4
+#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE 256
+#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT 9
+#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE 8
+#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT 4
+#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE 64
+#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT 4
+#define IXGBE_RDMAM_QSC_FCOE_RAM_RANGE 512
+#define IXGBE_RDMAM_QSC_FCOE_RAM_COUNT 5
+#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE 32
+#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT 4
+#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE 128
+#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT 8
+#define IXGBE_RDMAM_QSC_RSC_RAM_RANGE 32
+#define IXGBE_RDMAM_QSC_RSC_RAM_COUNT 8
+
+#define IXGBE_TXDESCIC_READY 0x80000000
+
+/* Receive Checksum Control */
+#define IXGBE_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
+#define IXGBE_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
+
+/* FCRTL Bit Masks */
+#define IXGBE_FCRTL_XONE 0x80000000 /* XON enable */
+#define IXGBE_FCRTH_FCEN 0x80000000 /* Packet buffer fc enable */
+
+/* PAP bit masks*/
+#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */
+
+/* RMCS Bit Masks */
+#define IXGBE_RMCS_RRM 0x00000002 /* Rx Recycle Mode enable */
+/* Receive Arbitration Control: 0 Round Robin, 1 DFP */
+#define IXGBE_RMCS_RAC 0x00000004
+/* Deficit Fixed Prio ena */
+#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC
+#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority FC ena */
+#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority FC ena */
+#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */
+
+/* FCCFG Bit Masks */
+#define IXGBE_FCCFG_TFCE_802_3X 0x00000008 /* Tx link FC enable */
+#define IXGBE_FCCFG_TFCE_PRIORITY 0x00000010 /* Tx priority FC enable */
+
+/* Interrupt register bitmasks */
+
+/* Extended Interrupt Cause Read */
+#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
+#define IXGBE_EICR_FLOW_DIR 0x00010000 /* FDir Exception */
+#define IXGBE_EICR_RX_MISS 0x00020000 /* Packet Buffer Overrun */
+#define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */
+#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
+#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */
+#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */
+#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
+#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */
+#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */
+#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */
+#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */
+#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */
+#define IXGBE_EICR_ECC 0x10000000 /* ECC Error */
+#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */
+#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */
+#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */
+#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
+
+/* Extended Interrupt Cause Set */
+#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EICS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */
+#define IXGBE_EICS_RX_MISS IXGBE_EICR_RX_MISS /* Pkt Buffer Overrun */
+#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */
+#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
+#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
+#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
+#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
+#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
+#define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */
+#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
+#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */
+#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
+#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+/* Extended Interrupt Mask Set */
+#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */
+#define IXGBE_EIMS_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */
+#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */
+#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
+#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermal Sensor Event */
+#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
+#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
+#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
+#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
+#define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */
+#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
+#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */
+#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
+#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+/* Extended Interrupt Mask Clear */
+#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMC_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */
+#define IXGBE_EIMC_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */
+#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */
+#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
+#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
+#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
+#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
+#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
+#define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */
+#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
+#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */
+#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
+#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+#define IXGBE_EIMS_ENABLE_MASK ( \
+ IXGBE_EIMS_RTX_QUEUE | \
+ IXGBE_EIMS_LSC | \
+ IXGBE_EIMS_TCP_TIMER | \
+ IXGBE_EIMS_OTHER)
+
+/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
+#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
+#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */
+#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
+#define IXGBE_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */
+#define IXGBE_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */
+#define IXGBE_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */
+#define IXGBE_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */
+#define IXGBE_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */
+#define IXGBE_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */
+#define IXGBE_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of control bits */
+#define IXGBE_IMIR_SIZE_BP_82599 0x00001000 /* Packet size bypass */
+#define IXGBE_IMIR_CTRL_URG_82599 0x00002000 /* Check URG bit in header */
+#define IXGBE_IMIR_CTRL_ACK_82599 0x00004000 /* Check ACK bit in header */
+#define IXGBE_IMIR_CTRL_PSH_82599 0x00008000 /* Check PSH bit in header */
+#define IXGBE_IMIR_CTRL_RST_82599 0x00010000 /* Check RST bit in header */
+#define IXGBE_IMIR_CTRL_SYN_82599 0x00020000 /* Check SYN bit in header */
+#define IXGBE_IMIR_CTRL_FIN_82599 0x00040000 /* Check FIN bit in header */
+#define IXGBE_IMIR_CTRL_BP_82599 0x00080000 /* Bypass chk of ctrl bits */
+#define IXGBE_IMIR_LLI_EN_82599 0x00100000 /* Enables low latency Int */
+#define IXGBE_IMIR_RX_QUEUE_MASK_82599 0x0000007F /* Rx Queue Mask */
+#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599 21 /* Rx Queue Shift */
+#define IXGBE_IMIRVP_PRIORITY_MASK 0x00000007 /* VLAN priority mask */
+#define IXGBE_IMIRVP_PRIORITY_EN 0x00000008 /* VLAN priority enable */
+
+#define IXGBE_MAX_FTQF_FILTERS 128
+#define IXGBE_FTQF_PROTOCOL_MASK 0x00000003
+#define IXGBE_FTQF_PROTOCOL_TCP 0x00000000
+#define IXGBE_FTQF_PROTOCOL_UDP 0x00000001
+#define IXGBE_FTQF_PROTOCOL_SCTP 2
+#define IXGBE_FTQF_PRIORITY_MASK 0x00000007
+#define IXGBE_FTQF_PRIORITY_SHIFT 2
+#define IXGBE_FTQF_POOL_MASK 0x0000003F
+#define IXGBE_FTQF_POOL_SHIFT 8
+#define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F
+#define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25
+#define IXGBE_FTQF_SOURCE_ADDR_MASK 0x1E
+#define IXGBE_FTQF_DEST_ADDR_MASK 0x1D
+#define IXGBE_FTQF_SOURCE_PORT_MASK 0x1B
+#define IXGBE_FTQF_DEST_PORT_MASK 0x17
+#define IXGBE_FTQF_PROTOCOL_COMP_MASK 0x0F
+#define IXGBE_FTQF_POOL_MASK_EN 0x40000000
+#define IXGBE_FTQF_QUEUE_ENABLE 0x80000000
+
+/* Interrupt clear mask */
+#define IXGBE_IRQ_CLEAR_MASK 0xFFFFFFFF
+
+/* Interrupt Vector Allocation Registers */
+#define IXGBE_IVAR_REG_NUM 25
+#define IXGBE_IVAR_REG_NUM_82599 64
+#define IXGBE_IVAR_TXRX_ENTRY 96
+#define IXGBE_IVAR_RX_ENTRY 64
+#define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i))
+#define IXGBE_IVAR_TX_QUEUE(_i) (64 + (_i))
+#define IXGBE_IVAR_TX_ENTRY 32
+
+#define IXGBE_IVAR_TCP_TIMER_INDEX 96 /* 0 based index */
+#define IXGBE_IVAR_OTHER_CAUSES_INDEX 97 /* 0 based index */
+
+#define IXGBE_MSIX_VECTOR(_i) (0 + (_i))
+
+#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
+
+/* ETYPE Queue Filter/Select Bit Masks */
+#define IXGBE_MAX_ETQF_FILTERS 8
+#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */
+#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */
+#define IXGBE_ETQF_TX_ANTISPOOF 0x20000000 /* bit 29 */
+#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */
+#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */
+#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */
+#define IXGBE_ETQF_POOL_SHIFT 20
+
+#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */
+#define IXGBE_ETQS_RX_QUEUE_SHIFT 16
+#define IXGBE_ETQS_LLI 0x20000000 /* bit 29 */
+#define IXGBE_ETQS_QUEUE_EN 0x80000000 /* bit 31 */
+
+/*
+ * ETQF filter list: one static filter per filter consumer. This is
+ * to avoid filter collisions later. Add new filters
+ * here!!
+ *
+ * Current filters:
+ * EAPOL 802.1x (0x888e): Filter 0
+ * FCoE (0x8906): Filter 2
+ * 1588 (0x88f7): Filter 3
+ * FIP (0x8914): Filter 4
+ * LLDP (0x88CC): Filter 5
+ * LACP (0x8809): Filter 6
+ */
+#define IXGBE_ETQF_FILTER_EAPOL 0
+#define IXGBE_ETQF_FILTER_FCOE 2
+#define IXGBE_ETQF_FILTER_1588 3
+#define IXGBE_ETQF_FILTER_FIP 4
+#define IXGBE_ETQF_FILTER_LLDP 5
+#define IXGBE_ETQF_FILTER_LACP 6
+/* VLAN Control Bit Masks */
+#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */
+#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */
+#define IXGBE_VLNCTRL_CFIEN 0x20000000 /* bit 29 */
+#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */
+#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */
+
+/* VLAN pool filtering masks */
+#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */
+#define IXGBE_VLVF_ENTRIES 64
+#define IXGBE_VLVF_VLANID_MASK 0x00000FFF
+/* Per VF Port VLAN insertion rules */
+#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
+#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
+
+#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
+
+/* STATUS Bit Masks */
+#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */
+#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/
+#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Ena Status */
+
+#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */
+#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */
+
+/* ESDP Bit Masks */
+#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */
+#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */
+#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */
+#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */
+#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */
+#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */
+#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */
+#define IXGBE_ESDP_SDP7 0x00000080 /* SDP7 Data Value */
+#define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */
+#define IXGBE_ESDP_SDP1_DIR 0x00000200 /* SDP1 IO direction */
+#define IXGBE_ESDP_SDP2_DIR 0x00000400 /* SDP1 IO direction */
+#define IXGBE_ESDP_SDP3_DIR 0x00000800 /* SDP3 IO direction */
+#define IXGBE_ESDP_SDP4_DIR 0x00001000 /* SDP4 IO direction */
+#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */
+#define IXGBE_ESDP_SDP6_DIR 0x00004000 /* SDP6 IO direction */
+#define IXGBE_ESDP_SDP7_DIR 0x00008000 /* SDP7 IO direction */
+#define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 IO mode */
+#define IXGBE_ESDP_SDP1_NATIVE 0x00020000 /* SDP1 IO mode */
+
+
+/* LEDCTL Bit Masks */
+#define IXGBE_LED_IVRT_BASE 0x00000040
+#define IXGBE_LED_BLINK_BASE 0x00000080
+#define IXGBE_LED_MODE_MASK_BASE 0x0000000F
+#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i)))
+#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i))
+#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i)
+#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i)
+#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i)
+
+/* LED modes */
+#define IXGBE_LED_LINK_UP 0x0
+#define IXGBE_LED_LINK_10G 0x1
+#define IXGBE_LED_MAC 0x2
+#define IXGBE_LED_FILTER 0x3
+#define IXGBE_LED_LINK_ACTIVE 0x4
+#define IXGBE_LED_LINK_1G 0x5
+#define IXGBE_LED_ON 0xE
+#define IXGBE_LED_OFF 0xF
+
+/* AUTOC Bit Masks */
+#define IXGBE_AUTOC_KX4_KX_SUPP_MASK 0xC0000000
+#define IXGBE_AUTOC_KX4_SUPP 0x80000000
+#define IXGBE_AUTOC_KX_SUPP 0x40000000
+#define IXGBE_AUTOC_PAUSE 0x30000000
+#define IXGBE_AUTOC_ASM_PAUSE 0x20000000
+#define IXGBE_AUTOC_SYM_PAUSE 0x10000000
+#define IXGBE_AUTOC_RF 0x08000000
+#define IXGBE_AUTOC_PD_TMR 0x06000000
+#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000
+#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000
+#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000
+#define IXGBE_AUTOC_FECA 0x00040000
+#define IXGBE_AUTOC_FECR 0x00020000
+#define IXGBE_AUTOC_KR_SUPP 0x00010000
+#define IXGBE_AUTOC_AN_RESTART 0x00001000
+#define IXGBE_AUTOC_FLU 0x00000001
+#define IXGBE_AUTOC_LMS_SHIFT 13
+#define IXGBE_AUTOC_LMS_10G_SERIAL (0x3 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_KX_KR (0x4 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_SGMII_1G_100M (0x5 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII (0x7 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+
+#define IXGBE_AUTOC_1G_PMA_PMD_MASK 0x00000200
+#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9
+#define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180
+#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7
+#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_SFI (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_KX_BX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+
+#define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000
+#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000
+#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16
+#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK 0x50000000
+#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000
+
+#define IXGBE_MACC_FLU 0x00000001
+#define IXGBE_MACC_FSV_10G 0x00030000
+#define IXGBE_MACC_FS 0x00040000
+#define IXGBE_MAC_RX2TX_LPBK 0x00000002
+
+/* Veto Bit definiton */
+#define IXGBE_MMNGC_MNG_VETO 0x00000001
+
+/* LINKS Bit Masks */
+#define IXGBE_LINKS_KX_AN_COMP 0x80000000
+#define IXGBE_LINKS_UP 0x40000000
+#define IXGBE_LINKS_SPEED 0x20000000
+#define IXGBE_LINKS_MODE 0x18000000
+#define IXGBE_LINKS_RX_MODE 0x06000000
+#define IXGBE_LINKS_TX_MODE 0x01800000
+#define IXGBE_LINKS_XGXS_EN 0x00400000
+#define IXGBE_LINKS_SGMII_EN 0x02000000
+#define IXGBE_LINKS_PCS_1G_EN 0x00200000
+#define IXGBE_LINKS_1G_AN_EN 0x00100000
+#define IXGBE_LINKS_KX_AN_IDLE 0x00080000
+#define IXGBE_LINKS_1G_SYNC 0x00040000
+#define IXGBE_LINKS_10G_ALIGN 0x00020000
+#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000
+#define IXGBE_LINKS_TL_FAULT 0x00001000
+#define IXGBE_LINKS_SIGNAL 0x00000F00
+
+#define IXGBE_LINKS_SPEED_NON_STD 0x08000000
+#define IXGBE_LINKS_SPEED_82599 0x30000000
+#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
+#define IXGBE_LINKS_SPEED_1G_82599 0x20000000
+#define IXGBE_LINKS_SPEED_100_82599 0x10000000
+#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
+#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
+
+#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040
+
+/* PCS1GLSTA Bit Masks */
+#define IXGBE_PCS1GLSTA_LINK_OK 1
+#define IXGBE_PCS1GLSTA_SYNK_OK 0x10
+#define IXGBE_PCS1GLSTA_AN_COMPLETE 0x10000
+#define IXGBE_PCS1GLSTA_AN_PAGE_RX 0x20000
+#define IXGBE_PCS1GLSTA_AN_TIMED_OUT 0x40000
+#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000
+#define IXGBE_PCS1GLSTA_AN_ERROR_RWS 0x100000
+
+#define IXGBE_PCS1GANA_SYM_PAUSE 0x80
+#define IXGBE_PCS1GANA_ASM_PAUSE 0x100
+
+/* PCS1GLCTL Bit Masks */
+#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg to en */
+#define IXGBE_PCS1GLCTL_FLV_LINK_UP 1
+#define IXGBE_PCS1GLCTL_FORCE_LINK 0x20
+#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH 0x40
+#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000
+#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000
+
+/* ANLP1 Bit Masks */
+#define IXGBE_ANLP1_PAUSE 0x0C00
+#define IXGBE_ANLP1_SYM_PAUSE 0x0400
+#define IXGBE_ANLP1_ASM_PAUSE 0x0800
+#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000
+
+/* SW Semaphore Register bitmasks */
+#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
+#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
+#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */
+#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */
+
+/* SW_FW_SYNC/GSSR definitions */
+#define IXGBE_GSSR_EEP_SM 0x0001
+#define IXGBE_GSSR_PHY0_SM 0x0002
+#define IXGBE_GSSR_PHY1_SM 0x0004
+#define IXGBE_GSSR_MAC_CSR_SM 0x0008
+#define IXGBE_GSSR_FLASH_SM 0x0010
+#define IXGBE_GSSR_SW_MNG_SM 0x0400
+#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys and both I2Cs */
+#define IXGBE_GSSR_I2C_MASK 0x1800
+#define IXGBE_GSSR_NVM_PHY_MASK 0xF
+
+/* FW Status register bitmask */
+#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */
+
+/* EEC Register */
+#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */
+#define IXGBE_EEC_CS 0x00000002 /* EEPROM Chip Select */
+#define IXGBE_EEC_DI 0x00000004 /* EEPROM Data In */
+#define IXGBE_EEC_DO 0x00000008 /* EEPROM Data Out */
+#define IXGBE_EEC_FWE_MASK 0x00000030 /* FLASH Write Enable */
+#define IXGBE_EEC_FWE_DIS 0x00000010 /* Disable FLASH writes */
+#define IXGBE_EEC_FWE_EN 0x00000020 /* Enable FLASH writes */
+#define IXGBE_EEC_FWE_SHIFT 4
+#define IXGBE_EEC_REQ 0x00000040 /* EEPROM Access Request */
+#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */
+#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */
+#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */
+#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */
+#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */
+#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */
+/* EEPROM Addressing bits based on type (0-small, 1-large) */
+#define IXGBE_EEC_ADDR_SIZE 0x00000400
+#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */
+#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */
+
+#define IXGBE_EEC_SIZE_SHIFT 11
+#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6
+#define IXGBE_EEPROM_OPCODE_BITS 8
+
+/* FLA Register */
+#define IXGBE_FLA_LOCKED 0x00000040
+
+/* Part Number String Length */
+#define IXGBE_PBANUM_LENGTH 11
+
+/* Checksum and EEPROM pointers */
+#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
+#define IXGBE_EEPROM_CHECKSUM 0x3F
+#define IXGBE_EEPROM_SUM 0xBABA
+#define IXGBE_PCIE_ANALOG_PTR 0x03
+#define IXGBE_ATLAS0_CONFIG_PTR 0x04
+#define IXGBE_PHY_PTR 0x04
+#define IXGBE_ATLAS1_CONFIG_PTR 0x05
+#define IXGBE_OPTION_ROM_PTR 0x05
+#define IXGBE_PCIE_GENERAL_PTR 0x06
+#define IXGBE_PCIE_CONFIG0_PTR 0x07
+#define IXGBE_PCIE_CONFIG1_PTR 0x08
+#define IXGBE_CORE0_PTR 0x09
+#define IXGBE_CORE1_PTR 0x0A
+#define IXGBE_MAC0_PTR 0x0B
+#define IXGBE_MAC1_PTR 0x0C
+#define IXGBE_CSR0_CONFIG_PTR 0x0D
+#define IXGBE_CSR1_CONFIG_PTR 0x0E
+#define IXGBE_PCIE_ANALOG_PTR_X550 0x02
+#define IXGBE_SHADOW_RAM_SIZE_X550 0x4000
+#define IXGBE_IXGBE_PCIE_GENERAL_SIZE 0x24
+#define IXGBE_PCIE_CONFIG_SIZE 0x08
+#define IXGBE_EEPROM_LAST_WORD 0x41
+#define IXGBE_FW_PTR 0x0F
+#define IXGBE_PBANUM0_PTR 0x15
+#define IXGBE_PBANUM1_PTR 0x16
+#define IXGBE_ALT_MAC_ADDR_PTR 0x37
+#define IXGBE_FREE_SPACE_PTR 0X3E
+
+/* External Thermal Sensor Config */
+#define IXGBE_ETS_CFG 0x26
+#define IXGBE_ETS_LTHRES_DELTA_MASK 0x07C0
+#define IXGBE_ETS_LTHRES_DELTA_SHIFT 6
+#define IXGBE_ETS_TYPE_MASK 0x0038
+#define IXGBE_ETS_TYPE_SHIFT 3
+#define IXGBE_ETS_TYPE_EMC 0x000
+#define IXGBE_ETS_NUM_SENSORS_MASK 0x0007
+#define IXGBE_ETS_DATA_LOC_MASK 0x3C00
+#define IXGBE_ETS_DATA_LOC_SHIFT 10
+#define IXGBE_ETS_DATA_INDEX_MASK 0x0300
+#define IXGBE_ETS_DATA_INDEX_SHIFT 8
+#define IXGBE_ETS_DATA_HTHRESH_MASK 0x00FF
+
+#define IXGBE_SAN_MAC_ADDR_PTR 0x28
+#define IXGBE_DEVICE_CAPS 0x2C
+#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
+#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
+#define IXGBE_MAX_MSIX_VECTORS_82599 0x40
+#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
+#define IXGBE_MAX_MSIX_VECTORS_82598 0x13
+
+/* MSI-X capability fields masks */
+#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF
+
+/* Legacy EEPROM word offsets */
+#define IXGBE_ISCSI_BOOT_CAPS 0x0033
+#define IXGBE_ISCSI_SETUP_PORT_0 0x0030
+#define IXGBE_ISCSI_SETUP_PORT_1 0x0034
+
+/* EEPROM Commands - SPI */
+#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */
+#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01
+#define IXGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */
+#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */
+#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */
+#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */
+/* EEPROM reset Write Enable latch */
+#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04
+#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */
+#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */
+#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */
+#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */
+#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */
+
+/* EEPROM Read Register */
+#define IXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */
+#define IXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */
+#define IXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */
+#define IXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
+#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for wr complete */
+#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for rd complete */
+
+#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
+
+#define IXGBE_EEPROM_PAGE_SIZE_MAX 128
+#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 256 /* words rd in burst */
+#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* words wr in burst */
+#define IXGBE_EEPROM_CTRL_2 1 /* EEPROM CTRL word 2 */
+#define IXGBE_EEPROM_CCD_BIT 2
+
+#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS
+#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM attempts to gain grant */
+#endif
+
+/* Number of 5 microseconds we wait for EERD read and
+ * EERW write to complete */
+#define IXGBE_EERD_EEWR_ATTEMPTS 100000
+
+/* # attempts we wait for flush update to complete */
+#define IXGBE_FLUDONE_ATTEMPTS 20000
+
+#define IXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */
+#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */
+#define IXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */
+#define IXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */
+
+#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0
+#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3
+#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1
+#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2
+#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2
+#define IXGBE_FW_LESM_STATE_1 0x1
+#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */
+#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4
+#define IXGBE_FW_PATCH_VERSION_4 0x7
+#define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */
+#define IXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */
+#define IXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */
+#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */
+#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */
+#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt SAN MAC capability */
+#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt SAN MAC 0 offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt SAN MAC 1 offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt WWNN prefix offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt WWPN prefix offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt SAN MAC exists */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt WWN base exists */
+
+/* FW header offset */
+#define IXGBE_X540_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4
+#define IXGBE_X540_FW_MODULE_MASK 0x7FFF
+/* 4KB multiplier */
+#define IXGBE_X540_FW_MODULE_LENGTH 0x1000
+/* version word 2 (month & day) */
+#define IXGBE_X540_FW_PATCH_VERSION_2 0x5
+/* version word 3 (silicon compatibility & year) */
+#define IXGBE_X540_FW_PATCH_VERSION_3 0x6
+/* version word 4 (major & minor numbers) */
+#define IXGBE_X540_FW_PATCH_VERSION_4 0x7
+
+#define IXGBE_DEVICE_CAPS_WOL_PORT0_1 0x4 /* WoL supported on ports 0 & 1 */
+#define IXGBE_DEVICE_CAPS_WOL_PORT0 0x8 /* WoL supported on port 0 */
+#define IXGBE_DEVICE_CAPS_WOL_MASK 0xC /* Mask for WoL capabilities */
+
+/* PCI Bus Info */
+#define IXGBE_PCI_DEVICE_STATUS 0xAA
+#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
+#define IXGBE_PCI_LINK_STATUS 0xB2
+#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
+#define IXGBE_PCI_LINK_WIDTH 0x3F0
+#define IXGBE_PCI_LINK_WIDTH_1 0x10
+#define IXGBE_PCI_LINK_WIDTH_2 0x20
+#define IXGBE_PCI_LINK_WIDTH_4 0x40
+#define IXGBE_PCI_LINK_WIDTH_8 0x80
+#define IXGBE_PCI_LINK_SPEED 0xF
+#define IXGBE_PCI_LINK_SPEED_2500 0x1
+#define IXGBE_PCI_LINK_SPEED_5000 0x2
+#define IXGBE_PCI_LINK_SPEED_8000 0x3
+#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E
+#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
+#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
+
+#define IXGBE_PCIDEVCTRL2_TIMEO_MASK 0xf
+#define IXGBE_PCIDEVCTRL2_16_32ms_def 0x0
+#define IXGBE_PCIDEVCTRL2_50_100us 0x1
+#define IXGBE_PCIDEVCTRL2_1_2ms 0x2
+#define IXGBE_PCIDEVCTRL2_16_32ms 0x5
+#define IXGBE_PCIDEVCTRL2_65_130ms 0x6
+#define IXGBE_PCIDEVCTRL2_260_520ms 0x9
+#define IXGBE_PCIDEVCTRL2_1_2s 0xa
+#define IXGBE_PCIDEVCTRL2_4_8s 0xd
+#define IXGBE_PCIDEVCTRL2_17_34s 0xe
+
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
+
+/* Check whether address is multicast. This is little-endian specific check.*/
+#define IXGBE_IS_MULTICAST(Address) \
+ (bool)(((u8 *)(Address))[0] & ((u8)0x01))
+
+/* Check whether an address is broadcast. */
+#define IXGBE_IS_BROADCAST(Address) \
+ ((((u8 *)(Address))[0] == ((u8)0xff)) && \
+ (((u8 *)(Address))[1] == ((u8)0xff)))
+
+/* RAH */
+#define IXGBE_RAH_VIND_MASK 0x003C0000
+#define IXGBE_RAH_VIND_SHIFT 18
+#define IXGBE_RAH_AV 0x80000000
+#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF
+
+/* Header split receive */
+#define IXGBE_RFCTL_ISCSI_DIS 0x00000001
+#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E
+#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1
+#define IXGBE_RFCTL_RSC_DIS 0x00000020
+#define IXGBE_RFCTL_NFSW_DIS 0x00000040
+#define IXGBE_RFCTL_NFSR_DIS 0x00000080
+#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300
+#define IXGBE_RFCTL_NFS_VER_SHIFT 8
+#define IXGBE_RFCTL_NFS_VER_2 0
+#define IXGBE_RFCTL_NFS_VER_3 1
+#define IXGBE_RFCTL_NFS_VER_4 2
+#define IXGBE_RFCTL_IPV6_DIS 0x00000400
+#define IXGBE_RFCTL_IPV6_XSUM_DIS 0x00000800
+#define IXGBE_RFCTL_IPFRSP_DIS 0x00004000
+#define IXGBE_RFCTL_IPV6_EX_DIS 0x00010000
+#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
+
+/* Transmit Config masks */
+#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */
+#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */
+#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */
+/* Enable short packet padding to 64 bytes */
+#define IXGBE_TX_PAD_ENABLE 0x00000400
+#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */
+/* This allows for 16K packets + 4k for vlan */
+#define IXGBE_MAX_FRAME_SZ 0x40040000
+
+#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */
+#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */
+
+/* Receive Config masks */
+#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
+#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Desc Monitor Bypass */
+#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Ena specific Rx Queue */
+#define IXGBE_RXDCTL_SWFLSH 0x04000000 /* Rx Desc wr-bk flushing */
+#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* X540 supported only */
+#define IXGBE_RXDCTL_RLPML_EN 0x00008000
+#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
+
+#define IXGBE_TSAUXC_EN_CLK 0x00000004
+#define IXGBE_TSAUXC_SYNCLK 0x00000008
+#define IXGBE_TSAUXC_SDP0_INT 0x00000040
+#define IXGBE_TSAUXC_EN_TT0 0x00000001
+#define IXGBE_TSAUXC_EN_TT1 0x00000002
+#define IXGBE_TSAUXC_ST0 0x00000010
+#define IXGBE_TSAUXC_DISABLE_SYSTIME 0x80000000
+
+#define IXGBE_TSSDP_TS_SDP0_SEL_MASK 0x000000C0
+#define IXGBE_TSSDP_TS_SDP0_CLK0 0x00000080
+#define IXGBE_TSSDP_TS_SDP0_EN 0x00000100
+
+#define IXGBE_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
+#define IXGBE_TSYNCTXCTL_ENABLED 0x00000010 /* Tx timestamping enabled */
+
+#define IXGBE_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */
+#define IXGBE_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */
+#define IXGBE_TSYNCRXCTL_TYPE_L2_V2 0x00
+#define IXGBE_TSYNCRXCTL_TYPE_L4_V1 0x02
+#define IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
+#define IXGBE_TSYNCRXCTL_TYPE_ALL 0x08
+#define IXGBE_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
+#define IXGBE_TSYNCRXCTL_ENABLED 0x00000010 /* Rx Timestamping enabled */
+#define IXGBE_TSYNCRXCTL_TSIP_UT_EN 0x00800000 /* Rx Timestamp in Packet */
+#define IXGBE_TSYNCRXCTL_TSIP_UP_MASK 0xFF000000 /* Rx Timestamp UP Mask */
+
+#define IXGBE_TSIM_SYS_WRAP 0x00000001
+#define IXGBE_TSIM_TXTS 0x00000002
+#define IXGBE_TSIM_TADJ 0x00000080
+
+#define IXGBE_TSICR_SYS_WRAP IXGBE_TSIM_SYS_WRAP
+#define IXGBE_TSICR_TXTS IXGBE_TSIM_TXTS
+#define IXGBE_TSICR_TADJ IXGBE_TSIM_TADJ
+
+#define IXGBE_RXMTRL_V1_CTRLT_MASK 0x000000FF
+#define IXGBE_RXMTRL_V1_SYNC_MSG 0x00
+#define IXGBE_RXMTRL_V1_DELAY_REQ_MSG 0x01
+#define IXGBE_RXMTRL_V1_FOLLOWUP_MSG 0x02
+#define IXGBE_RXMTRL_V1_DELAY_RESP_MSG 0x03
+#define IXGBE_RXMTRL_V1_MGMT_MSG 0x04
+
+#define IXGBE_RXMTRL_V2_MSGID_MASK 0x0000FF00
+#define IXGBE_RXMTRL_V2_SYNC_MSG 0x0000
+#define IXGBE_RXMTRL_V2_DELAY_REQ_MSG 0x0100
+#define IXGBE_RXMTRL_V2_PDELAY_REQ_MSG 0x0200
+#define IXGBE_RXMTRL_V2_PDELAY_RESP_MSG 0x0300
+#define IXGBE_RXMTRL_V2_FOLLOWUP_MSG 0x0800
+#define IXGBE_RXMTRL_V2_DELAY_RESP_MSG 0x0900
+#define IXGBE_RXMTRL_V2_PDELAY_FOLLOWUP_MSG 0x0A00
+#define IXGBE_RXMTRL_V2_ANNOUNCE_MSG 0x0B00
+#define IXGBE_RXMTRL_V2_SIGNALLING_MSG 0x0C00
+#define IXGBE_RXMTRL_V2_MGMT_MSG 0x0D00
+
+#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
+#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
+#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */
+#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */
+#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */
+#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */
+/* Receive Priority Flow Control Enable */
+#define IXGBE_FCTRL_RPFCE 0x00004000
+#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */
+#define IXGBE_MFLCN_PMCF 0x00000001 /* Pass MAC Control Frames */
+#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */
+#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */
+#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */
+#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF4 /* Rx Priority FC bitmap mask */
+#define IXGBE_MFLCN_RPFCE_SHIFT 4 /* Rx Priority FC bitmap shift */
+
+/* Multiple Receive Queue Control */
+#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */
+#define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */
+#define IXGBE_MRQC_RT8TCEN 0x00000002 /* 8 TC no RSS */
+#define IXGBE_MRQC_RT4TCEN 0x00000003 /* 4 TC no RSS */
+#define IXGBE_MRQC_RTRSS8TCEN 0x00000004 /* 8 TC w/ RSS */
+#define IXGBE_MRQC_RTRSS4TCEN 0x00000005 /* 4 TC w/ RSS */
+#define IXGBE_MRQC_VMDQEN 0x00000008 /* VMDq2 64 pools no RSS */
+#define IXGBE_MRQC_VMDQRSS32EN 0x0000000A /* VMDq2 32 pools w/ RSS */
+#define IXGBE_MRQC_VMDQRSS64EN 0x0000000B /* VMDq2 64 pools w/ RSS */
+#define IXGBE_MRQC_VMDQRT8TCEN 0x0000000C /* VMDq2/RT 16 pool 8 TC */
+#define IXGBE_MRQC_VMDQRT4TCEN 0x0000000D /* VMDq2/RT 32 pool 4 TC */
+#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000
+#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
+#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_EX 0x00080000
+#define IXGBE_MRQC_RSS_FIELD_IPV6 0x00100000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
+#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000
+#define IXGBE_MRQC_MULTIPLE_RSS 0x00002000
+#define IXGBE_MRQC_L3L4TXSWEN 0x00008000
+
+/* Queue Drop Enable */
+#define IXGBE_QDE_ENABLE 0x00000001
+#define IXGBE_QDE_HIDE_VLAN 0x00000002
+#define IXGBE_QDE_IDX_MASK 0x00007F00
+#define IXGBE_QDE_IDX_SHIFT 8
+#define IXGBE_QDE_WRITE 0x00010000
+#define IXGBE_QDE_READ 0x00020000
+
+#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */
+#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */
+#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */
+#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */
+#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
+#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+
+#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000
+#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000
+#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000
+#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED 0x18000000
+#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000
+/* Multiple Transmit Queue Command Register */
+#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */
+#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */
+#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */
+#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */
+#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */
+#define IXGBE_MTQC_4TC_4TQ 0x8 /* 4 TC if RT_ENA and VT_ENA */
+#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */
+
+/* Receive Descriptor bit definitions */
+#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */
+#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */
+#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */
+#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */
+#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004
+#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
+#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */
+#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
+#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
+#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */
+#define IXGBE_RXD_STAT_OUTERIPCS 0x100 /* Cloud IP xsum calculated */
+#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */
+#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
+#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
+#define IXGBE_RXD_STAT_LLINT 0x800 /* Pkt caused Low Latency Interrupt */
+#define IXGBE_RXD_STAT_TSIP 0x08000 /* Time Stamp in packet buffer */
+#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */
+#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */
+#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */
+#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
+#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */
+#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */
+#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */
+#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */
+#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */
+#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */
+#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
+#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */
+#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */
+#define IXGBE_RXDADV_ERR_OUTERIPER 0x04000000 /* CRC IP Header error */
+#define IXGBE_RXDADV_ERR_RXE 0x20000000 /* Any MAC Error */
+#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */
+#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */
+#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */
+#define IXGBE_RXDADV_ERR_FDIR_DROP 0x00200000 /* FDIR Drop error */
+#define IXGBE_RXDADV_ERR_FDIR_COLL 0x00400000 /* FDIR Collision error */
+#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */
+#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */
+#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
+#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
+#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */
+#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */
+#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */
+#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */
+#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
+#define IXGBE_RXD_PRI_SHIFT 13
+#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */
+#define IXGBE_RXD_CFI_SHIFT 12
+
+#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */
+#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */
+#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */
+#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */
+#define IXGBE_RXDADV_STAT_MASK 0x000fffff /* Stat/NEXTP: bit 0-19 */
+#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */
+#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */
+#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
+#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */
+#define IXGBE_RXDADV_STAT_TS 0x00010000 /* IEEE1588 Time Stamp */
+#define IXGBE_RXDADV_STAT_TSIP 0x00008000 /* Time Stamp in packet buffer */
+
+/* PSRTYPE bit definitions */
+#define IXGBE_PSRTYPE_TCPHDR 0x00000010
+#define IXGBE_PSRTYPE_UDPHDR 0x00000020
+#define IXGBE_PSRTYPE_IPV4HDR 0x00000100
+#define IXGBE_PSRTYPE_IPV6HDR 0x00000200
+#define IXGBE_PSRTYPE_L2HDR 0x00001000
+
+/* SRRCTL bit definitions */
+#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
+#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* 64byte resolution (>> 6)
+ * + at bit 8 offset (<< 8)
+ * = (<< 2)
+ */
+#define IXGBE_SRRCTL_RDMTS_SHIFT 22
+#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000
+#define IXGBE_SRRCTL_DROP_EN 0x10000000
+#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F
+#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00
+#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000
+#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000
+
+#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000
+#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
+
+#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F
+#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0
+#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0
+#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0
+#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000
+#define IXGBE_RXDADV_RSCCNT_SHIFT 17
+#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5
+#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000
+#define IXGBE_RXDADV_SPH 0x8000
+
+/* RSS Hash results */
+#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000
+#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001
+#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003
+#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004
+#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
+#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
+
+/* RSS Packet Types as indicated in the receive descriptor. */
+#define IXGBE_RXDADV_PKTTYPE_NONE 0x00000000
+#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */
+#define IXGBE_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPv4 hdr + extensions */
+#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */
+#define IXGBE_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPv6 hdr + extensions */
+#define IXGBE_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */
+#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
+#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
+#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
+#define IXGBE_RXDADV_PKTTYPE_VXLAN 0x00000800 /* VXLAN hdr present */
+#define IXGBE_RXDADV_PKTTYPE_TUNNEL 0x00010000 /* Tunnel type */
+#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */
+#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */
+#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */
+#define IXGBE_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */
+#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */
+#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */
+
+/* Security Processing bit Indication */
+#define IXGBE_RXDADV_LNKSEC_STATUS_SECP 0x00020000
+#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000
+#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000
+#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000
+#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000
+
+/* Masks to determine if packets should be dropped due to frame errors */
+#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
+ IXGBE_RXD_ERR_CE | \
+ IXGBE_RXD_ERR_LE | \
+ IXGBE_RXD_ERR_PE | \
+ IXGBE_RXD_ERR_OSE | \
+ IXGBE_RXD_ERR_USE)
+
+#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
+ IXGBE_RXDADV_ERR_CE | \
+ IXGBE_RXDADV_ERR_LE | \
+ IXGBE_RXDADV_ERR_PE | \
+ IXGBE_RXDADV_ERR_OSE | \
+ IXGBE_RXDADV_ERR_USE)
+
+#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK_82599 IXGBE_RXDADV_ERR_RXE
+
+/* Multicast bit mask */
+#define IXGBE_MCSTCTRL_MFE 0x4
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8
+#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024
+
+/* Vlan-specific macros */
+#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */
+#define IXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */
+#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */
+#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
+
+/* SR-IOV specific macros */
+#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4)
+#define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4))
+#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600))
+#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4))
+/* Translated register #defines */
+#define IXGBE_PVFCTRL(P) (0x00300 + (4 * (P)))
+#define IXGBE_PVFSTATUS(P) (0x00008 + (0 * (P)))
+#define IXGBE_PVFLINKS(P) (0x042A4 + (0 * (P)))
+#define IXGBE_PVFRTIMER(P) (0x00048 + (0 * (P)))
+#define IXGBE_PVFMAILBOX(P) (0x04C00 + (4 * (P)))
+#define IXGBE_PVFRXMEMWRAP(P) (0x03190 + (0 * (P)))
+#define IXGBE_PVTEICR(P) (0x00B00 + (4 * (P)))
+#define IXGBE_PVTEICS(P) (0x00C00 + (4 * (P)))
+#define IXGBE_PVTEIMS(P) (0x00D00 + (4 * (P)))
+#define IXGBE_PVTEIMC(P) (0x00E00 + (4 * (P)))
+#define IXGBE_PVTEIAC(P) (0x00F00 + (4 * (P)))
+#define IXGBE_PVTEIAM(P) (0x04D00 + (4 * (P)))
+#define IXGBE_PVTEITR(P) (((P) < 24) ? (0x00820 + ((P) * 4)) : \
+ (0x012300 + (((P) - 24) * 4)))
+#define IXGBE_PVTIVAR(P) (0x12500 + (4 * (P)))
+#define IXGBE_PVTIVAR_MISC(P) (0x04E00 + (4 * (P)))
+#define IXGBE_PVTRSCINT(P) (0x12000 + (4 * (P)))
+#define IXGBE_VFPBACL(P) (0x110C8 + (4 * (P)))
+#define IXGBE_PVFRDBAL(P) ((P < 64) ? (0x01000 + (0x40 * (P))) \
+ : (0x0D000 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRDBAH(P) ((P < 64) ? (0x01004 + (0x40 * (P))) \
+ : (0x0D004 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRDLEN(P) ((P < 64) ? (0x01008 + (0x40 * (P))) \
+ : (0x0D008 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRDH(P) ((P < 64) ? (0x01010 + (0x40 * (P))) \
+ : (0x0D010 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRDT(P) ((P < 64) ? (0x01018 + (0x40 * (P))) \
+ : (0x0D018 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRXDCTL(P) ((P < 64) ? (0x01028 + (0x40 * (P))) \
+ : (0x0D028 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFSRRCTL(P) ((P < 64) ? (0x01014 + (0x40 * (P))) \
+ : (0x0D014 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFPSRTYPE(P) (0x0EA00 + (4 * (P)))
+#define IXGBE_PVFTDBAL(P) (0x06000 + (0x40 * (P)))
+#define IXGBE_PVFTDBAH(P) (0x06004 + (0x40 * (P)))
+#define IXGBE_PVFTTDLEN(P) (0x06008 + (0x40 * (P)))
+#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P)))
+#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P)))
+#define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P)))
+#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P)))
+#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P)))
+#define IXGBE_PVFDCA_RXCTRL(P) (((P) < 64) ? (0x0100C + (0x40 * (P))) \
+ : (0x0D00C + (0x40 * ((P) - 64))))
+#define IXGBE_PVFDCA_TXCTRL(P) (0x0600C + (0x40 * (P)))
+#define IXGBE_PVFGPRC(x) (0x0101C + (0x40 * (x)))
+#define IXGBE_PVFGPTC(x) (0x08300 + (0x04 * (x)))
+#define IXGBE_PVFGORC_LSB(x) (0x01020 + (0x40 * (x)))
+#define IXGBE_PVFGORC_MSB(x) (0x0D020 + (0x40 * (x)))
+#define IXGBE_PVFGOTC_LSB(x) (0x08400 + (0x08 * (x)))
+#define IXGBE_PVFGOTC_MSB(x) (0x08404 + (0x08 * (x)))
+#define IXGBE_PVFMPRC(x) (0x0D01C + (0x40 * (x)))
+
+#define IXGBE_PVFTDWBALn(q_per_pool, vf_number, vf_q_index) \
+ (IXGBE_PVFTDWBAL((q_per_pool)*(vf_number) + (vf_q_index)))
+#define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \
+ (IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index)))
+
+#define IXGBE_PVFTDHn(q_per_pool, vf_number, vf_q_index) \
+ (IXGBE_PVFTDH((q_per_pool)*(vf_number) + (vf_q_index)))
+#define IXGBE_PVFTDTn(q_per_pool, vf_number, vf_q_index) \
+ (IXGBE_PVFTDT((q_per_pool)*(vf_number) + (vf_q_index)))
+
+/* Little Endian defines */
+#ifndef __le16
+#define __le16 u16
+#endif
+#ifndef __le32
+#define __le32 u32
+#endif
+#ifndef __le64
+#define __le64 u64
+
+#endif
+#ifndef __be16
+/* Big Endian defines */
+#define __be16 u16
+#define __be32 u32
+#define __be64 u64
+
+#endif
+enum ixgbe_fdir_pballoc_type {
+ IXGBE_FDIR_PBALLOC_NONE = 0,
+ IXGBE_FDIR_PBALLOC_64K = 1,
+ IXGBE_FDIR_PBALLOC_128K = 2,
+ IXGBE_FDIR_PBALLOC_256K = 3,
+};
+
+/* Flow Director register values */
+#define IXGBE_FDIRCTRL_PBALLOC_64K 0x00000001
+#define IXGBE_FDIRCTRL_PBALLOC_128K 0x00000002
+#define IXGBE_FDIRCTRL_PBALLOC_256K 0x00000003
+#define IXGBE_FDIRCTRL_INIT_DONE 0x00000008
+#define IXGBE_FDIRCTRL_PERFECT_MATCH 0x00000010
+#define IXGBE_FDIRCTRL_REPORT_STATUS 0x00000020
+#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080
+#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8
+#define IXGBE_FDIRCTRL_FLEX_SHIFT 16
+#define IXGBE_FDIRCTRL_FILTERMODE_SHIFT 21
+#define IXGBE_FDIRCTRL_FILTERMODE_MACVLAN 0x0001 /* bit 23:21, 001b */
+#define IXGBE_FDIRCTRL_FILTERMODE_CLOUD 0x0002 /* bit 23:21, 010b */
+#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000
+#define IXGBE_FDIRCTRL_FILTERMODE_MASK 0x00E00000
+#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24
+#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000
+#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT 28
+
+#define IXGBE_FDIRTCPM_DPORTM_SHIFT 16
+#define IXGBE_FDIRUDPM_DPORTM_SHIFT 16
+#define IXGBE_FDIRIP6M_DIPM_SHIFT 16
+#define IXGBE_FDIRM_VLANID 0x00000001
+#define IXGBE_FDIRM_VLANP 0x00000002
+#define IXGBE_FDIRM_POOL 0x00000004
+#define IXGBE_FDIRM_L4P 0x00000008
+#define IXGBE_FDIRM_FLEX 0x00000010
+#define IXGBE_FDIRM_DIPv6 0x00000020
+#define IXGBE_FDIRM_L3P 0x00000040
+
+#define IXGBE_FDIRIP6M_INNER_MAC 0x03F0 /* bit 9:4 */
+#define IXGBE_FDIRIP6M_TUNNEL_TYPE 0x0800 /* bit 11 */
+#define IXGBE_FDIRIP6M_TNI_VNI 0xF000 /* bit 15:12 */
+#define IXGBE_FDIRIP6M_TNI_VNI_24 0x1000 /* bit 12 */
+#define IXGBE_FDIRIP6M_ALWAYS_MASK 0x040F /* bit 10, 3:0 */
+
+#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF
+#define IXGBE_FDIRFREE_FREE_SHIFT 0
+#define IXGBE_FDIRFREE_COLL_MASK 0x7FFF0000
+#define IXGBE_FDIRFREE_COLL_SHIFT 16
+#define IXGBE_FDIRLEN_MAXLEN_MASK 0x3F
+#define IXGBE_FDIRLEN_MAXLEN_SHIFT 0
+#define IXGBE_FDIRLEN_MAXHASH_MASK 0x7FFF0000
+#define IXGBE_FDIRLEN_MAXHASH_SHIFT 16
+#define IXGBE_FDIRUSTAT_ADD_MASK 0xFFFF
+#define IXGBE_FDIRUSTAT_ADD_SHIFT 0
+#define IXGBE_FDIRUSTAT_REMOVE_MASK 0xFFFF0000
+#define IXGBE_FDIRUSTAT_REMOVE_SHIFT 16
+#define IXGBE_FDIRFSTAT_FADD_MASK 0x00FF
+#define IXGBE_FDIRFSTAT_FADD_SHIFT 0
+#define IXGBE_FDIRFSTAT_FREMOVE_MASK 0xFF00
+#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT 8
+#define IXGBE_FDIRPORT_DESTINATION_SHIFT 16
+#define IXGBE_FDIRVLAN_FLEX_SHIFT 16
+#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT 15
+#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT 16
+
+#define IXGBE_FDIRCMD_CMD_MASK 0x00000003
+#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001
+#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002
+#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003
+#define IXGBE_FDIRCMD_FILTER_VALID 0x00000004
+#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008
+#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010
+#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020
+#define IXGBE_FDIRCMD_L4TYPE_TCP 0x00000040
+#define IXGBE_FDIRCMD_L4TYPE_SCTP 0x00000060
+#define IXGBE_FDIRCMD_IPV6 0x00000080
+#define IXGBE_FDIRCMD_CLEARHT 0x00000100
+#define IXGBE_FDIRCMD_DROP 0x00000200
+#define IXGBE_FDIRCMD_INT 0x00000400
+#define IXGBE_FDIRCMD_LAST 0x00000800
+#define IXGBE_FDIRCMD_COLLISION 0x00001000
+#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000
+#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5
+#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16
+#define IXGBE_FDIRCMD_TUNNEL_FILTER_SHIFT 23
+#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24
+#define IXGBE_FDIR_INIT_DONE_POLL 10
+#define IXGBE_FDIRCMD_CMD_POLL 10
+#define IXGBE_FDIRCMD_TUNNEL_FILTER 0x00800000
+#define IXGBE_FDIR_DROP_QUEUE 127
+
+
+/* Manageablility Host Interface defines */
+#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
+#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */
+#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */
+
+/* CEM Support */
+#define FW_CEM_HDR_LEN 0x4
+#define FW_CEM_CMD_DRIVER_INFO 0xDD
+#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5
+#define FW_CEM_CMD_RESERVED 0X0
+#define FW_CEM_UNUSED_VER 0x0
+#define FW_CEM_MAX_RETRIES 3
+#define FW_CEM_RESP_STATUS_SUCCESS 0x1
+#define FW_READ_SHADOW_RAM_CMD 0x31
+#define FW_READ_SHADOW_RAM_LEN 0x6
+#define FW_WRITE_SHADOW_RAM_CMD 0x33
+#define FW_WRITE_SHADOW_RAM_LEN 0xA /* 8 plus 1 WORD to write */
+#define FW_SHADOW_RAM_DUMP_CMD 0x36
+#define FW_SHADOW_RAM_DUMP_LEN 0
+#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */
+#define FW_NVM_DATA_OFFSET 3
+#define FW_MAX_READ_BUFFER_SIZE 1024
+#define FW_DISABLE_RXEN_CMD 0xDE
+#define FW_DISABLE_RXEN_LEN 0x1
+/* Host Interface Command Structures */
+
+struct ixgbe_hic_hdr {
+ u8 cmd;
+ u8 buf_len;
+ union {
+ u8 cmd_resv;
+ u8 ret_status;
+ } cmd_or_resp;
+ u8 checksum;
+};
+
+struct ixgbe_hic_hdr2 {
+ u8 cmd;
+ u8 buf_len1;
+ u8 buf_len2;
+ u8 checksum;
+};
+
+struct ixgbe_hic_drv_info {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_num;
+ u8 ver_sub;
+ u8 ver_build;
+ u8 ver_min;
+ u8 ver_maj;
+ u8 pad; /* end spacing to ensure length is mult. of dword */
+ u16 pad2; /* end spacing to ensure length is mult. of dword2 */
+};
+
+/* These need to be dword aligned */
+struct ixgbe_hic_read_shadow_ram {
+ struct ixgbe_hic_hdr2 hdr;
+ u32 address;
+ u16 length;
+ u16 pad2;
+ u16 data;
+ u16 pad3;
+};
+
+struct ixgbe_hic_write_shadow_ram {
+ struct ixgbe_hic_hdr2 hdr;
+ u32 address;
+ u16 length;
+ u16 pad2;
+ u16 data;
+ u16 pad3;
+};
+
+struct ixgbe_hic_disable_rxen {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_number;
+ u8 pad2;
+ u16 pad3;
+};
+
+
+/* Transmit Descriptor - Legacy */
+struct ixgbe_legacy_tx_desc {
+ u64 buffer_addr; /* Address of the descriptor's data buffer */
+ union {
+ __le32 data;
+ struct {
+ __le16 length; /* Data buffer length */
+ u8 cso; /* Checksum offset */
+ u8 cmd; /* Descriptor control */
+ } flags;
+ } lower;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 css; /* Checksum start */
+ __le16 vlan;
+ } fields;
+ } upper;
+};
+
+/* Transmit Descriptor - Advanced */
+union ixgbe_adv_tx_desc {
+ struct {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le32 cmd_type_len;
+ __le32 olinfo_status;
+ } read;
+ struct {
+ __le64 rsvd; /* Reserved */
+ __le32 nxtseq_seed;
+ __le32 status;
+ } wb;
+};
+
+/* Receive Descriptor - Legacy */
+struct ixgbe_legacy_rx_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ __le16 length; /* Length of data DMAed into data buffer */
+ __le16 csum; /* Packet checksum */
+ u8 status; /* Descriptor status */
+ u8 errors; /* Descriptor Errors */
+ __le16 vlan;
+};
+
+/* Receive Descriptor - Advanced */
+union ixgbe_adv_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ union {
+ __le32 data;
+ struct {
+ __le16 pkt_info; /* RSS, Pkt type */
+ __le16 hdr_info; /* Splithdr, hdrlen */
+ } hs_rss;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length; /* Packet length */
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+/* Context descriptors */
+struct ixgbe_adv_tx_context_desc {
+ __le32 vlan_macip_lens;
+ __le32 seqnum_seed;
+ __le32 type_tucmd_mlhl;
+ __le32 mss_l4len_idx;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */
+#define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */
+#define IXGBE_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 time stamp */
+#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */
+#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */
+#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
+#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Adv Context Desc */
+#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Adv Data Descriptor */
+#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */
+#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
+#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */
+#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */
+#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext 1=Adv */
+#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */
+#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */
+#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */
+#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */
+#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
+#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */
+#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
+#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
+ IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
+ IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
+/* 1st&Last TSO-full iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800
+#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */
+#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
+#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
+#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
+#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
+#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
+#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
+#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
+#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
+#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /* req Markers and CRC */
+#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
+#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
+#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */
+#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */
+#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */
+#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */
+#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */
+#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation End */
+#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation Start */
+#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */
+#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */
+#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */
+#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */
+#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
+#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+
+#define IXGBE_ADVTXD_OUTER_IPLEN 16 /* Adv ctxt OUTERIPLEN shift */
+#define IXGBE_ADVTXD_TUNNEL_LEN 24 /* Adv ctxt TUNNELLEN shift */
+#define IXGBE_ADVTXD_TUNNEL_TYPE_SHIFT 16 /* Adv Tx Desc Tunnel Type shift */
+#define IXGBE_ADVTXD_OUTERIPCS_SHIFT 17 /* Adv Tx Desc OUTERIPCS Shift */
+#define IXGBE_ADVTXD_TUNNEL_TYPE_NVGRE 1 /* Adv Tx Desc Tunnel Type NVGRE */
+
+/* Autonegotiation advertised speeds */
+typedef u32 ixgbe_autoneg_advertised;
+/* Link speed */
+typedef u32 ixgbe_link_speed;
+#define IXGBE_LINK_SPEED_UNKNOWN 0
+#define IXGBE_LINK_SPEED_100_FULL 0x0008
+#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
+#define IXGBE_LINK_SPEED_2_5GB_FULL 0x0040
+#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
+#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
+ IXGBE_LINK_SPEED_10GB_FULL)
+#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \
+ IXGBE_LINK_SPEED_1GB_FULL | \
+ IXGBE_LINK_SPEED_10GB_FULL)
+
+/* Physical layer type */
+typedef u32 ixgbe_physical_layer;
+#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0
+#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001
+#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002
+#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x0004
+#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020
+#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x0040
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x0080
+#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x0100
+#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200
+#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800
+#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
+#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
+#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x4000
+
+/* Flow Control Data Sheet defined values
+ * Calculation and defines taken from 802.1bb Annex O
+ */
+
+/* BitTimes (BT) conversion */
+#define IXGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024))
+#define IXGBE_B2BT(BT) (BT * 8)
+
+/* Calculate Delay to respond to PFC */
+#define IXGBE_PFC_D 672
+
+/* Calculate Cable Delay */
+#define IXGBE_CABLE_DC 5556 /* Delay Copper */
+#define IXGBE_CABLE_DO 5000 /* Delay Optical */
+
+/* Calculate Interface Delay X540 */
+#define IXGBE_PHY_DC 25600 /* Delay 10G BASET */
+#define IXGBE_MAC_DC 8192 /* Delay Copper XAUI interface */
+#define IXGBE_XAUI_DC (2 * 2048) /* Delay Copper Phy */
+
+#define IXGBE_ID_X540 (IXGBE_MAC_DC + IXGBE_XAUI_DC + IXGBE_PHY_DC)
+
+/* Calculate Interface Delay 82598, 82599 */
+#define IXGBE_PHY_D 12800
+#define IXGBE_MAC_D 4096
+#define IXGBE_XAUI_D (2 * 1024)
+
+#define IXGBE_ID (IXGBE_MAC_D + IXGBE_XAUI_D + IXGBE_PHY_D)
+
+/* Calculate Delay incurred from higher layer */
+#define IXGBE_HD 6144
+
+/* Calculate PCI Bus delay for low thresholds */
+#define IXGBE_PCI_DELAY 10000
+
+/* Calculate X540 delay value in bit times */
+#define IXGBE_DV_X540(_max_frame_link, _max_frame_tc) \
+ ((36 * \
+ (IXGBE_B2BT(_max_frame_link) + \
+ IXGBE_PFC_D + \
+ (2 * IXGBE_CABLE_DC) + \
+ (2 * IXGBE_ID_X540) + \
+ IXGBE_HD) / 25 + 1) + \
+ 2 * IXGBE_B2BT(_max_frame_tc))
+
+/* Calculate 82599, 82598 delay value in bit times */
+#define IXGBE_DV(_max_frame_link, _max_frame_tc) \
+ ((36 * \
+ (IXGBE_B2BT(_max_frame_link) + \
+ IXGBE_PFC_D + \
+ (2 * IXGBE_CABLE_DC) + \
+ (2 * IXGBE_ID) + \
+ IXGBE_HD) / 25 + 1) + \
+ 2 * IXGBE_B2BT(_max_frame_tc))
+
+/* Calculate low threshold delay values */
+#define IXGBE_LOW_DV_X540(_max_frame_tc) \
+ (2 * IXGBE_B2BT(_max_frame_tc) + \
+ (36 * IXGBE_PCI_DELAY / 25) + 1)
+#define IXGBE_LOW_DV(_max_frame_tc) \
+ (2 * IXGBE_LOW_DV_X540(_max_frame_tc))
+
+/* Software ATR hash keys */
+#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
+#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
+
+/* Software ATR input stream values and masks */
+#define IXGBE_ATR_HASH_MASK 0x7fff
+#define IXGBE_ATR_L4TYPE_MASK 0x3
+#define IXGBE_ATR_L4TYPE_UDP 0x1
+#define IXGBE_ATR_L4TYPE_TCP 0x2
+#define IXGBE_ATR_L4TYPE_SCTP 0x3
+#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
+#define IXGBE_ATR_L4TYPE_TUNNEL_MASK 0x10
+enum ixgbe_atr_flow_type {
+ IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0,
+ IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1,
+ IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2,
+ IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3,
+ IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4,
+ IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5,
+ IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6,
+ IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7,
+ IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10,
+ IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11,
+ IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12,
+ IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13,
+ IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14,
+ IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15,
+ IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16,
+ IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17,
+};
+
+/* Flow Director ATR input struct. */
+union ixgbe_atr_input {
+ /*
+ * Byte layout in order, all values with MSB first:
+ *
+ * vm_pool - 1 byte
+ * flow_type - 1 byte
+ * vlan_id - 2 bytes
+ * src_ip - 16 bytes
+ * inner_mac - 6 bytes
+ * cloud_mode - 2 bytes
+ * tni_vni - 4 bytes
+ * dst_ip - 16 bytes
+ * src_port - 2 bytes
+ * dst_port - 2 bytes
+ * flex_bytes - 2 bytes
+ * bkt_hash - 2 bytes
+ */
+ struct {
+ u8 vm_pool;
+ u8 flow_type;
+ __be16 vlan_id;
+ __be32 dst_ip[4];
+ __be32 src_ip[4];
+ u8 inner_mac[6];
+ __be16 tunnel_type;
+ __be32 tni_vni;
+ __be16 src_port;
+ __be16 dst_port;
+ __be16 flex_bytes;
+ __be16 bkt_hash;
+ } formatted;
+ __be32 dword_stream[14];
+};
+
+/* Flow Director compressed ATR hash input struct */
+union ixgbe_atr_hash_dword {
+ struct {
+ u8 vm_pool;
+ u8 flow_type;
+ __be16 vlan_id;
+ } formatted;
+ __be32 ip;
+ struct {
+ __be16 src;
+ __be16 dst;
+ } port;
+ __be16 flex_bytes;
+ __be32 dword;
+};
+
+
+/*
+ * Unavailable: The FCoE Boot Option ROM is not present in the flash.
+ * Disabled: Present; boot order is not set for any targets on the port.
+ * Enabled: Present; boot order is set for at least one target on the port.
+ */
+enum ixgbe_fcoe_boot_status {
+ ixgbe_fcoe_bootstatus_disabled = 0,
+ ixgbe_fcoe_bootstatus_enabled = 1,
+ ixgbe_fcoe_bootstatus_unavailable = 0xFFFF
+};
+
+enum ixgbe_eeprom_type {
+ ixgbe_eeprom_uninitialized = 0,
+ ixgbe_eeprom_spi,
+ ixgbe_flash,
+ ixgbe_eeprom_none /* No NVM support */
+};
+
+enum ixgbe_mac_type {
+ ixgbe_mac_unknown = 0,
+ ixgbe_mac_82598EB,
+ ixgbe_mac_82599EB,
+ ixgbe_mac_82599_vf,
+ ixgbe_mac_X540,
+ ixgbe_mac_X540_vf,
+ /*
+ * X550EM MAC type decoder:
+ * ixgbe_mac_X550EM_x: "x" = Xeon
+ * ixgbe_mac_X550EM_a: "a" = Atom
+ */
+ ixgbe_mac_X550,
+ ixgbe_mac_X550EM_x,
+ ixgbe_mac_X550_vf,
+ ixgbe_mac_X550EM_x_vf,
+ ixgbe_num_macs
+};
+
+enum ixgbe_phy_type {
+ ixgbe_phy_unknown = 0,
+ ixgbe_phy_none,
+ ixgbe_phy_tn,
+ ixgbe_phy_aq,
+ ixgbe_phy_x550em_kr,
+ ixgbe_phy_x550em_kx4,
+ ixgbe_phy_cu_unknown,
+ ixgbe_phy_qt,
+ ixgbe_phy_xaui,
+ ixgbe_phy_nl,
+ ixgbe_phy_sfp_passive_tyco,
+ ixgbe_phy_sfp_passive_unknown,
+ ixgbe_phy_sfp_active_unknown,
+ ixgbe_phy_sfp_avago,
+ ixgbe_phy_sfp_ftl,
+ ixgbe_phy_sfp_ftl_active,
+ ixgbe_phy_sfp_unknown,
+ ixgbe_phy_sfp_intel,
+ ixgbe_phy_qsfp_passive_unknown,
+ ixgbe_phy_qsfp_active_unknown,
+ ixgbe_phy_qsfp_intel,
+ ixgbe_phy_qsfp_unknown,
+ ixgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/
+ ixgbe_phy_generic
+};
+
+/*
+ * SFP+ module type IDs:
+ *
+ * ID Module Type
+ * =============
+ * 0 SFP_DA_CU
+ * 1 SFP_SR
+ * 2 SFP_LR
+ * 3 SFP_DA_CU_CORE0 - 82599-specific
+ * 4 SFP_DA_CU_CORE1 - 82599-specific
+ * 5 SFP_SR/LR_CORE0 - 82599-specific
+ * 6 SFP_SR/LR_CORE1 - 82599-specific
+ */
+enum ixgbe_sfp_type {
+ ixgbe_sfp_type_da_cu = 0,
+ ixgbe_sfp_type_sr = 1,
+ ixgbe_sfp_type_lr = 2,
+ ixgbe_sfp_type_da_cu_core0 = 3,
+ ixgbe_sfp_type_da_cu_core1 = 4,
+ ixgbe_sfp_type_srlr_core0 = 5,
+ ixgbe_sfp_type_srlr_core1 = 6,
+ ixgbe_sfp_type_da_act_lmt_core0 = 7,
+ ixgbe_sfp_type_da_act_lmt_core1 = 8,
+ ixgbe_sfp_type_1g_cu_core0 = 9,
+ ixgbe_sfp_type_1g_cu_core1 = 10,
+ ixgbe_sfp_type_1g_sx_core0 = 11,
+ ixgbe_sfp_type_1g_sx_core1 = 12,
+ ixgbe_sfp_type_1g_lx_core0 = 13,
+ ixgbe_sfp_type_1g_lx_core1 = 14,
+ ixgbe_sfp_type_not_present = 0xFFFE,
+ ixgbe_sfp_type_unknown = 0xFFFF
+};
+
+enum ixgbe_media_type {
+ ixgbe_media_type_unknown = 0,
+ ixgbe_media_type_fiber,
+ ixgbe_media_type_fiber_qsfp,
+ ixgbe_media_type_fiber_lco,
+ ixgbe_media_type_copper,
+ ixgbe_media_type_backplane,
+ ixgbe_media_type_cx4,
+ ixgbe_media_type_virtual
+};
+
+/* Flow Control Settings */
+enum ixgbe_fc_mode {
+ ixgbe_fc_none = 0,
+ ixgbe_fc_rx_pause,
+ ixgbe_fc_tx_pause,
+ ixgbe_fc_full,
+ ixgbe_fc_default
+};
+
+/* Smart Speed Settings */
+#define IXGBE_SMARTSPEED_MAX_RETRIES 3
+enum ixgbe_smart_speed {
+ ixgbe_smart_speed_auto = 0,
+ ixgbe_smart_speed_on,
+ ixgbe_smart_speed_off
+};
+
+/* PCI bus types */
+enum ixgbe_bus_type {
+ ixgbe_bus_type_unknown = 0,
+ ixgbe_bus_type_pci,
+ ixgbe_bus_type_pcix,
+ ixgbe_bus_type_pci_express,
+ ixgbe_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum ixgbe_bus_speed {
+ ixgbe_bus_speed_unknown = 0,
+ ixgbe_bus_speed_33 = 33,
+ ixgbe_bus_speed_66 = 66,
+ ixgbe_bus_speed_100 = 100,
+ ixgbe_bus_speed_120 = 120,
+ ixgbe_bus_speed_133 = 133,
+ ixgbe_bus_speed_2500 = 2500,
+ ixgbe_bus_speed_5000 = 5000,
+ ixgbe_bus_speed_8000 = 8000,
+ ixgbe_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum ixgbe_bus_width {
+ ixgbe_bus_width_unknown = 0,
+ ixgbe_bus_width_pcie_x1 = 1,
+ ixgbe_bus_width_pcie_x2 = 2,
+ ixgbe_bus_width_pcie_x4 = 4,
+ ixgbe_bus_width_pcie_x8 = 8,
+ ixgbe_bus_width_32 = 32,
+ ixgbe_bus_width_64 = 64,
+ ixgbe_bus_width_reserved
+};
+
+struct ixgbe_addr_filter_info {
+ u32 num_mc_addrs;
+ u32 rar_used_count;
+ u32 mta_in_use;
+ u32 overflow_promisc;
+ bool user_set_promisc;
+};
+
+/* Bus parameters */
+struct ixgbe_bus_info {
+ enum ixgbe_bus_speed speed;
+ enum ixgbe_bus_width width;
+ enum ixgbe_bus_type type;
+
+ u16 func;
+ u16 lan_id;
+};
+
+/* Flow control parameters */
+struct ixgbe_fc_info {
+ u32 high_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl High-water */
+ u32 low_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl Low-water */
+ u16 pause_time; /* Flow Control Pause timer */
+ bool send_xon; /* Flow control send XON */
+ bool strict_ieee; /* Strict IEEE mode */
+ bool disable_fc_autoneg; /* Do not autonegotiate FC */
+ bool fc_was_autonegged; /* Is current_mode the result of autonegging? */
+ enum ixgbe_fc_mode current_mode; /* FC mode in effect */
+ enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+/* Statistics counters collected by the MAC */
+struct ixgbe_hw_stats {
+ u64 crcerrs;
+ u64 illerrc;
+ u64 errbc;
+ u64 mspdc;
+ u64 mpctotal;
+ u64 mpc[8];
+ u64 mlfc;
+ u64 mrfc;
+ u64 rlec;
+ u64 lxontxc;
+ u64 lxonrxc;
+ u64 lxofftxc;
+ u64 lxoffrxc;
+ u64 pxontxc[8];
+ u64 pxonrxc[8];
+ u64 pxofftxc[8];
+ u64 pxoffrxc[8];
+ u64 prc64;
+ u64 prc127;
+ u64 prc255;
+ u64 prc511;
+ u64 prc1023;
+ u64 prc1522;
+ u64 gprc;
+ u64 bprc;
+ u64 mprc;
+ u64 gptc;
+ u64 gorc;
+ u64 gotc;
+ u64 rnbc[8];
+ u64 ruc;
+ u64 rfc;
+ u64 roc;
+ u64 rjc;
+ u64 mngprc;
+ u64 mngpdc;
+ u64 mngptc;
+ u64 tor;
+ u64 tpr;
+ u64 tpt;
+ u64 ptc64;
+ u64 ptc127;
+ u64 ptc255;
+ u64 ptc511;
+ u64 ptc1023;
+ u64 ptc1522;
+ u64 mptc;
+ u64 bptc;
+ u64 xec;
+ u64 qprc[16];
+ u64 qptc[16];
+ u64 qbrc[16];
+ u64 qbtc[16];
+ u64 qprdc[16];
+ u64 pxon2offc[8];
+ u64 fdirustat_add;
+ u64 fdirustat_remove;
+ u64 fdirfstat_fadd;
+ u64 fdirfstat_fremove;
+ u64 fdirmatch;
+ u64 fdirmiss;
+ u64 fccrc;
+ u64 fclast;
+ u64 fcoerpdc;
+ u64 fcoeprc;
+ u64 fcoeptc;
+ u64 fcoedwrc;
+ u64 fcoedwtc;
+ u64 fcoe_noddp;
+ u64 fcoe_noddp_ext_buff;
+ u64 ldpcec;
+ u64 pcrc8ec;
+ u64 b2ospc;
+ u64 b2ogprc;
+ u64 o2bgptc;
+ u64 o2bspc;
+};
+
+/* forward declaration */
+struct ixgbe_hw;
+
+/* iterator type for walking multicast address lists */
+typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
+ u32 *vmdq);
+
+/* Function pointer table */
+struct ixgbe_eeprom_operations {
+ s32 (*init_params)(struct ixgbe_hw *);
+ s32 (*read)(struct ixgbe_hw *, u16, u16 *);
+ s32 (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
+ s32 (*write)(struct ixgbe_hw *, u16, u16);
+ s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
+ s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
+ s32 (*update_checksum)(struct ixgbe_hw *);
+ s32 (*calc_checksum)(struct ixgbe_hw *);
+};
+
+struct ixgbe_mac_operations {
+ s32 (*init_hw)(struct ixgbe_hw *);
+ s32 (*reset_hw)(struct ixgbe_hw *);
+ s32 (*start_hw)(struct ixgbe_hw *);
+ s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
+ void (*enable_relaxed_ordering)(struct ixgbe_hw *);
+ enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
+ u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
+ s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
+ s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
+ s32 (*set_san_mac_addr)(struct ixgbe_hw *, u8 *);
+ s32 (*get_device_caps)(struct ixgbe_hw *, u16 *);
+ s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *);
+ s32 (*get_fcoe_boot_status)(struct ixgbe_hw *, u16 *);
+ s32 (*stop_adapter)(struct ixgbe_hw *);
+ s32 (*get_bus_info)(struct ixgbe_hw *);
+ void (*set_lan_id)(struct ixgbe_hw *);
+ s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
+ s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
+ s32 (*setup_sfp)(struct ixgbe_hw *);
+ s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
+ s32 (*disable_sec_rx_path)(struct ixgbe_hw *);
+ s32 (*enable_sec_rx_path)(struct ixgbe_hw *);
+ s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32);
+ void (*release_swfw_sync)(struct ixgbe_hw *, u32);
+ s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
+ s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
+
+ /* Link */
+ void (*disable_tx_laser)(struct ixgbe_hw *);
+ void (*enable_tx_laser)(struct ixgbe_hw *);
+ void (*flap_tx_laser)(struct ixgbe_hw *);
+ s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
+ s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
+ s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
+ bool *);
+
+ /* Packet Buffer manipulation */
+ void (*setup_rxpba)(struct ixgbe_hw *, int, u32, int);
+
+ /* LED */
+ s32 (*led_on)(struct ixgbe_hw *, u32);
+ s32 (*led_off)(struct ixgbe_hw *, u32);
+ s32 (*blink_led_start)(struct ixgbe_hw *, u32);
+ s32 (*blink_led_stop)(struct ixgbe_hw *, u32);
+
+ /* RAR, Multicast, VLAN */
+ s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
+ s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *);
+ s32 (*clear_rar)(struct ixgbe_hw *, u32);
+ s32 (*insert_mac_addr)(struct ixgbe_hw *, u8 *, u32);
+ s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
+ s32 (*set_vmdq_san_mac)(struct ixgbe_hw *, u32);
+ s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
+ s32 (*init_rx_addrs)(struct ixgbe_hw *);
+ s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32,
+ ixgbe_mc_addr_itr);
+ s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
+ ixgbe_mc_addr_itr, bool clear);
+ s32 (*enable_mc)(struct ixgbe_hw *);
+ s32 (*disable_mc)(struct ixgbe_hw *);
+ s32 (*clear_vfta)(struct ixgbe_hw *);
+ s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
+ s32 (*set_vlvf)(struct ixgbe_hw *, u32, u32, bool, bool *);
+ s32 (*init_uta_tables)(struct ixgbe_hw *);
+ void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int);
+ void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
+
+ /* Flow Control */
+ s32 (*fc_enable)(struct ixgbe_hw *);
+
+ /* Manageability interface */
+ s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
+ s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
+ s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
+ s32 (*dmac_config)(struct ixgbe_hw *hw);
+ s32 (*dmac_update_tcs)(struct ixgbe_hw *hw);
+ s32 (*dmac_config_tcs)(struct ixgbe_hw *hw);
+ void (*get_rtrup2tc)(struct ixgbe_hw *hw, u8 *map);
+ s32 (*setup_eee)(struct ixgbe_hw *hw, bool enable_eee);
+ void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int);
+ void (*set_source_address_pruning)(struct ixgbe_hw *, bool,
+ unsigned int);
+ void (*disable_rx)(struct ixgbe_hw *hw);
+ void (*enable_rx)(struct ixgbe_hw *hw);
+ s32 (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *);
+ s32 (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32);
+ void (*disable_mdd)(struct ixgbe_hw *hw);
+ void (*enable_mdd)(struct ixgbe_hw *hw);
+ void (*mdd_event)(struct ixgbe_hw *hw, u32 *vf_bitmap);
+ void (*restore_mdd_vf)(struct ixgbe_hw *hw, u32 vf);
+};
+
+struct ixgbe_phy_operations {
+ s32 (*identify)(struct ixgbe_hw *);
+ s32 (*identify_sfp)(struct ixgbe_hw *);
+ s32 (*init)(struct ixgbe_hw *);
+ s32 (*reset)(struct ixgbe_hw *);
+ s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
+ s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
+ s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *);
+ s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16);
+ s32 (*setup_link)(struct ixgbe_hw *);
+ s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
+ s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
+ s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
+ s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
+ s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
+ s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *);
+ s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
+ s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
+ void (*i2c_bus_clear)(struct ixgbe_hw *);
+ s32 (*read_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val);
+ s32 (*write_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
+ s32 (*check_overtemp)(struct ixgbe_hw *);
+};
+
+struct ixgbe_eeprom_info {
+ struct ixgbe_eeprom_operations ops;
+ enum ixgbe_eeprom_type type;
+ u32 semaphore_delay;
+ u16 word_size;
+ u16 address_bits;
+ u16 word_page_size;
+};
+
+#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
+struct ixgbe_mac_info {
+ struct ixgbe_mac_operations ops;
+ enum ixgbe_mac_type type;
+ u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+ u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+ u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+ /* prefix for World Wide Node Name (WWNN) */
+ u16 wwnn_prefix;
+ /* prefix for World Wide Port Name (WWPN) */
+ u16 wwpn_prefix;
+#define IXGBE_MAX_MTA 128
+ u32 mta_shadow[IXGBE_MAX_MTA];
+ s32 mc_filter_type;
+ u32 mcft_size;
+ u32 vft_size;
+ u32 num_rar_entries;
+ u32 rar_highwater;
+ u32 rx_pb_size;
+ u32 max_tx_queues;
+ u32 max_rx_queues;
+ u32 orig_autoc;
+ u8 san_mac_rar_index;
+ bool get_link_status;
+ u32 orig_autoc2;
+ u16 max_msix_vectors;
+ bool arc_subsystem_valid;
+ bool orig_link_settings_stored;
+ bool autotry_restart;
+ u8 flags;
+ struct ixgbe_thermal_sensor_data thermal_sensor_data;
+ bool thermal_sensor_enabled;
+ struct ixgbe_dmac_config dmac_config;
+ bool set_lben;
+};
+
+struct ixgbe_phy_info {
+ struct ixgbe_phy_operations ops;
+ enum ixgbe_phy_type type;
+ u32 addr;
+ u32 id;
+ enum ixgbe_sfp_type sfp_type;
+ bool sfp_setup_needed;
+ u32 revision;
+ enum ixgbe_media_type media_type;
+ u32 phy_semaphore_mask;
+ u8 lan_id;
+ bool reset_disable;
+ ixgbe_autoneg_advertised autoneg_advertised;
+ enum ixgbe_smart_speed smart_speed;
+ bool smart_speed_active;
+ bool multispeed_fiber;
+ bool reset_if_overtemp;
+ bool qsfp_shared_i2c_bus;
+};
+
+#include "ixgbe_mbx.h"
+
+struct ixgbe_mbx_operations {
+ void (*init_params)(struct ixgbe_hw *hw);
+ s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*check_for_msg)(struct ixgbe_hw *, u16);
+ s32 (*check_for_ack)(struct ixgbe_hw *, u16);
+ s32 (*check_for_rst)(struct ixgbe_hw *, u16);
+};
+
+struct ixgbe_mbx_stats {
+ u32 msgs_tx;
+ u32 msgs_rx;
+
+ u32 acks;
+ u32 reqs;
+ u32 rsts;
+};
+
+struct ixgbe_mbx_info {
+ struct ixgbe_mbx_operations ops;
+ struct ixgbe_mbx_stats stats;
+ u32 timeout;
+ u32 usec_delay;
+ u32 v2p_mailbox;
+ u16 size;
+};
+
+struct ixgbe_hw {
+ u8 *hw_addr;
+ void *back;
+ struct ixgbe_mac_info mac;
+ struct ixgbe_addr_filter_info addr_ctrl;
+ struct ixgbe_fc_info fc;
+ struct ixgbe_phy_info phy;
+ struct ixgbe_eeprom_info eeprom;
+ struct ixgbe_bus_info bus;
+ struct ixgbe_mbx_info mbx;
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ bool adapter_stopped;
+ int api_version;
+ bool force_full_reset;
+ bool allow_unsupported_sfp;
+ bool wol_enabled;
+};
+
+#define ixgbe_call_func(hw, func, params, error) \
+ (func != NULL) ? func params : error
+
+
+/* Error Codes */
+#define IXGBE_SUCCESS 0
+#define IXGBE_ERR_EEPROM -1
+#define IXGBE_ERR_EEPROM_CHECKSUM -2
+#define IXGBE_ERR_PHY -3
+#define IXGBE_ERR_CONFIG -4
+#define IXGBE_ERR_PARAM -5
+#define IXGBE_ERR_MAC_TYPE -6
+#define IXGBE_ERR_UNKNOWN_PHY -7
+#define IXGBE_ERR_LINK_SETUP -8
+#define IXGBE_ERR_ADAPTER_STOPPED -9
+#define IXGBE_ERR_INVALID_MAC_ADDR -10
+#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11
+#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12
+#define IXGBE_ERR_INVALID_LINK_SETTINGS -13
+#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14
+#define IXGBE_ERR_RESET_FAILED -15
+#define IXGBE_ERR_SWFW_SYNC -16
+#define IXGBE_ERR_PHY_ADDR_INVALID -17
+#define IXGBE_ERR_I2C -18
+#define IXGBE_ERR_SFP_NOT_SUPPORTED -19
+#define IXGBE_ERR_SFP_NOT_PRESENT -20
+#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21
+#define IXGBE_ERR_NO_SAN_ADDR_PTR -22
+#define IXGBE_ERR_FDIR_REINIT_FAILED -23
+#define IXGBE_ERR_EEPROM_VERSION -24
+#define IXGBE_ERR_NO_SPACE -25
+#define IXGBE_ERR_OVERTEMP -26
+#define IXGBE_ERR_FC_NOT_NEGOTIATED -27
+#define IXGBE_ERR_FC_NOT_SUPPORTED -28
+#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
+#define IXGBE_ERR_PBA_SECTION -31
+#define IXGBE_ERR_INVALID_ARGUMENT -32
+#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33
+#define IXGBE_ERR_OUT_OF_MEM -34
+#define IXGBE_ERR_FEATURE_NOT_SUPPORTED -36
+#define IXGBE_ERR_EEPROM_PROTECTED_REGION -37
+#define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38
+
+#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
+
+
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P == 0) ? (0x4010) : (0x8010))
+#define IXGBE_KRM_LINK_CTRL_1(P) ((P == 0) ? (0x420C) : (0x820C))
+#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P == 0) ? (0x4634) : (0x8634))
+#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P == 0) ? (0x4638) : (0x8638))
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P == 0) ? (0x4B00) : (0x8B00))
+#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P == 0) ? (0x4E00) : (0x8E00))
+#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P == 0) ? (0x5520) : (0x9520))
+#define IXGBE_KRM_RX_ANA_CTL(P) ((P == 0) ? (0x5A00) : (0x9A00))
+
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9)
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11)
+
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31)
+
+#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6)
+#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15)
+#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16)
+
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL (1 << 4)
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS (1 << 2)
+
+#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (0x3 << 16)
+
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN (1 << 1)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN (1 << 2)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN (1 << 3)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1 << 31)
+
+#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144
+#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148
+
+#define IXGBE_SB_IOSF_CTRL_ADDR_SHIFT 0
+#define IXGBE_SB_IOSF_CTRL_ADDR_MASK 0xFF
+#define IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT 18
+#define IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK \
+ (0x3 << IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT)
+#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT 20
+#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK \
+ (0xFF << IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT)
+#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28
+#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK 0x7
+#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31
+#define IXGBE_SB_IOSF_CTRL_BUSY (1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT)
+#define IXGBE_SB_IOSF_TARGET_KR_PHY 0
+#define IXGBE_SB_IOSF_TARGET_KX4_PHY 1
+#define IXGBE_SB_IOSF_TARGET_KX4_PCS 2
+
+#endif /* _IXGBE_TYPE_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_vf.c b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_vf.c
new file mode 100755
index 00000000..e6b6c517
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_vf.c
@@ -0,0 +1,725 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+
+#include "ixgbe_api.h"
+#include "ixgbe_type.h"
+#include "ixgbe_vf.h"
+#ident "$Id: ixgbe_vf.c,v 1.62 2013/06/27 21:30:59 jtkirshe Exp $"
+
+#ifndef IXGBE_VFWRITE_REG
+#define IXGBE_VFWRITE_REG IXGBE_WRITE_REG
+#endif
+#ifndef IXGBE_VFREAD_REG
+#define IXGBE_VFREAD_REG IXGBE_READ_REG
+#endif
+
+/**
+ * ixgbe_init_ops_vf - Initialize the pointers for vf
+ * @hw: pointer to hardware structure
+ *
+ * This will assign function pointers, adapter-specific functions can
+ * override the assignment of generic function pointers by assigning
+ * their own adapter-specific function pointers.
+ * Does not touch the hardware.
+ **/
+s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw)
+{
+ /* MAC */
+ hw->mac.ops.init_hw = ixgbe_init_hw_vf;
+ hw->mac.ops.reset_hw = ixgbe_reset_hw_vf;
+ hw->mac.ops.start_hw = ixgbe_start_hw_vf;
+ /* Cannot clear stats on VF */
+ hw->mac.ops.clear_hw_cntrs = NULL;
+ hw->mac.ops.get_media_type = NULL;
+ hw->mac.ops.get_mac_addr = ixgbe_get_mac_addr_vf;
+ hw->mac.ops.stop_adapter = ixgbe_stop_adapter_vf;
+ hw->mac.ops.get_bus_info = NULL;
+
+ /* Link */
+ hw->mac.ops.setup_link = ixgbe_setup_mac_link_vf;
+ hw->mac.ops.check_link = ixgbe_check_mac_link_vf;
+ hw->mac.ops.get_link_capabilities = NULL;
+
+ /* RAR, Multicast, VLAN */
+ hw->mac.ops.set_rar = ixgbe_set_rar_vf;
+ hw->mac.ops.set_uc_addr = ixgbevf_set_uc_addr_vf;
+ hw->mac.ops.init_rx_addrs = NULL;
+ hw->mac.ops.update_mc_addr_list = ixgbe_update_mc_addr_list_vf;
+ hw->mac.ops.enable_mc = NULL;
+ hw->mac.ops.disable_mc = NULL;
+ hw->mac.ops.clear_vfta = NULL;
+ hw->mac.ops.set_vfta = ixgbe_set_vfta_vf;
+
+ hw->mac.max_tx_queues = 1;
+ hw->mac.max_rx_queues = 1;
+
+ hw->mbx.ops.init_params = ixgbe_init_mbx_params_vf;
+
+ return IXGBE_SUCCESS;
+}
+
+/* ixgbe_virt_clr_reg - Set register to default (power on) state.
+ * @hw: pointer to hardware structure
+ */
+static void ixgbe_virt_clr_reg(struct ixgbe_hw *hw)
+{
+ int i;
+ u32 vfsrrctl;
+ u32 vfdca_rxctrl;
+ u32 vfdca_txctrl;
+
+ /* VRSRRCTL default values (BSIZEPACKET = 2048, BSIZEHEADER = 256) */
+ vfsrrctl = 0x100 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ vfsrrctl |= 0x800 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+
+ /* DCA_RXCTRL default value */
+ vfdca_rxctrl = IXGBE_DCA_RXCTRL_DESC_RRO_EN |
+ IXGBE_DCA_RXCTRL_DATA_WRO_EN |
+ IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
+
+ /* DCA_TXCTRL default value */
+ vfdca_txctrl = IXGBE_DCA_TXCTRL_DESC_RRO_EN |
+ IXGBE_DCA_TXCTRL_DESC_WRO_EN |
+ IXGBE_DCA_TXCTRL_DATA_RRO_EN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
+
+ for (i = 0; i < 7; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), vfsrrctl);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(i), vfdca_rxctrl);
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), vfdca_txctrl);
+ }
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_start_hw_vf - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware by filling the bus info structure and media type, clears
+ * all on chip counters, initializes receive address registers, multicast
+ * table, VLAN filter table, calls routine to set up link and flow control
+ * settings, and leaves transmit and receive units disabled and uninitialized
+ **/
+s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw)
+{
+ /* Clear adapter stopped flag */
+ hw->adapter_stopped = false;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_init_hw_vf - virtual function hardware initialization
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the hardware by resetting the hardware and then starting
+ * the hardware
+ **/
+s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw)
+{
+ s32 status = hw->mac.ops.start_hw(hw);
+
+ hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
+
+ return status;
+}
+
+/**
+ * ixgbe_reset_hw_vf - Performs hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by reseting the transmit and receive units, masks and
+ * clears all interrupts.
+ **/
+s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 timeout = IXGBE_VF_INIT_TIMEOUT;
+ s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
+ u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
+ u8 *addr = (u8 *)(&msgbuf[1]);
+
+ DEBUGFUNC("ixgbevf_reset_hw_vf");
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ hw->mac.ops.stop_adapter(hw);
+
+ /* reset the api version */
+ hw->api_version = ixgbe_mbox_api_10;
+
+ DEBUGOUT("Issuing a function level reset to MAC\n");
+
+ IXGBE_VFWRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
+ IXGBE_WRITE_FLUSH(hw);
+
+ msec_delay(50);
+
+ /* we cannot reset while the RSTI / RSTD bits are asserted */
+ while (!mbx->ops.check_for_rst(hw, 0) && timeout) {
+ timeout--;
+ usec_delay(5);
+ }
+
+ if (!timeout)
+ return IXGBE_ERR_RESET_FAILED;
+
+ /* Reset VF registers to initial values */
+ ixgbe_virt_clr_reg(hw);
+
+ /* mailbox timeout can now become active */
+ mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
+
+ msgbuf[0] = IXGBE_VF_RESET;
+ mbx->ops.write_posted(hw, msgbuf, 1, 0);
+
+ msec_delay(10);
+
+ /*
+ * set our "perm_addr" based on info provided by PF
+ * also set up the mc_filter_type which is piggy backed
+ * on the mac address in word 3
+ */
+ ret_val = mbx->ops.read_posted(hw, msgbuf,
+ IXGBE_VF_PERMADDR_MSG_LEN, 0);
+ if (ret_val)
+ return ret_val;
+
+ if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
+ msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
+ return IXGBE_ERR_INVALID_MAC_ADDR;
+
+ memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+ hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_stop_adapter_vf - Generic stop Tx/Rx units
+ * @hw: pointer to hardware structure
+ *
+ * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+ * disables transmit and receive units. The adapter_stopped flag is used by
+ * the shared code and drivers to determine if the adapter is in a stopped
+ * state and should not touch the hardware.
+ **/
+s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw)
+{
+ u32 reg_val;
+ u16 i;
+
+ /*
+ * Set the adapter_stopped flag so other driver functions stop touching
+ * the hardware
+ */
+ hw->adapter_stopped = true;
+
+ /* Clear interrupt mask to stop from interrupts being generated */
+ IXGBE_VFWRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
+
+ /* Clear any pending interrupts, flush previous writes */
+ IXGBE_VFREAD_REG(hw, IXGBE_VTEICR);
+
+ /* Disable the transmit unit. Each queue must be disabled. */
+ for (i = 0; i < hw->mac.max_tx_queues; i++)
+ IXGBE_VFWRITE_REG(hw, IXGBE_VFTXDCTL(i), IXGBE_TXDCTL_SWFLSH);
+
+ /* Disable the receive unit by stopping each queue */
+ for (i = 0; i < hw->mac.max_rx_queues; i++) {
+ reg_val = IXGBE_VFREAD_REG(hw, IXGBE_VFRXDCTL(i));
+ reg_val &= ~IXGBE_RXDCTL_ENABLE;
+ IXGBE_VFWRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
+ }
+ /* Clear packet split and pool config */
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
+
+ /* flush all queues disables */
+ IXGBE_WRITE_FLUSH(hw);
+ msec_delay(2);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_mta_vector - Determines bit-vector in multicast table to set
+ * @hw: pointer to hardware structure
+ * @mc_addr: the multicast address
+ *
+ * Extracts the 12 bits, from a multicast address, to determine which
+ * bit-vector to set in the multicast table. The hardware uses 12 bits, from
+ * incoming rx multicast addresses, to determine the bit-vector to check in
+ * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
+ * by the MO field of the MCSTCTRL. The MO field is set during initialization
+ * to mc_filter_type.
+ **/
+STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+ u32 vector = 0;
+
+ switch (hw->mac.mc_filter_type) {
+ case 0: /* use bits [47:36] of the address */
+ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+ break;
+ case 1: /* use bits [46:35] of the address */
+ vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+ break;
+ case 2: /* use bits [45:34] of the address */
+ vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+ break;
+ case 3: /* use bits [43:32] of the address */
+ vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+ break;
+ default: /* Invalid mc_filter_type */
+ DEBUGOUT("MC filter type param set incorrectly\n");
+ ASSERT(0);
+ break;
+ }
+
+ /* vector can only be 12-bits or boundary will be exceeded */
+ vector &= 0xFFF;
+ return vector;
+}
+
+STATIC void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw,
+ u32 *msg, u16 size)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 retmsg[IXGBE_VFMAILBOX_SIZE];
+ s32 retval = mbx->ops.write_posted(hw, msg, size, 0);
+
+ if (!retval)
+ mbx->ops.read_posted(hw, retmsg, size, 0);
+}
+
+/**
+ * ixgbe_set_rar_vf - set device MAC address
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @vmdq: VMDq "set" or "pool" index
+ * @enable_addr: set flag that address is active
+ **/
+s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[3];
+ u8 *msg_addr = (u8 *)(&msgbuf[1]);
+ s32 ret_val;
+ UNREFERENCED_3PARAMETER(vmdq, enable_addr, index);
+
+ memset(msgbuf, 0, 12);
+ msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
+ memcpy(msg_addr, addr, 6);
+ ret_val = mbx->ops.write_posted(hw, msgbuf, 3, 0);
+
+ if (!ret_val)
+ ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0);
+
+ msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ /* if nacked the address was rejected, use "perm_addr" */
+ if (!ret_val &&
+ (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK)))
+ ixgbe_get_mac_addr_vf(hw, hw->mac.addr);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_update_mc_addr_list_vf - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ * @next: caller supplied function to return next address in list
+ *
+ * Updates the Multicast Table Array.
+ **/
+s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, ixgbe_mc_addr_itr next,
+ bool clear)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
+ u16 *vector_list = (u16 *)&msgbuf[1];
+ u32 vector;
+ u32 cnt, i;
+ u32 vmdq;
+
+ UNREFERENCED_1PARAMETER(clear);
+
+ DEBUGFUNC("ixgbe_update_mc_addr_list_vf");
+
+ /* Each entry in the list uses 1 16 bit word. We have 30
+ * 16 bit words available in our HW msg buffer (minus 1 for the
+ * msg type). That's 30 hash values if we pack 'em right. If
+ * there are more than 30 MC addresses to add then punt the
+ * extras for now and then add code to handle more than 30 later.
+ * It would be unusual for a server to request that many multi-cast
+ * addresses except for in large enterprise network environments.
+ */
+
+ DEBUGOUT1("MC Addr Count = %d\n", mc_addr_count);
+
+ cnt = (mc_addr_count > 30) ? 30 : mc_addr_count;
+ msgbuf[0] = IXGBE_VF_SET_MULTICAST;
+ msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
+
+ for (i = 0; i < cnt; i++) {
+ vector = ixgbe_mta_vector(hw, next(hw, &mc_addr_list, &vmdq));
+ DEBUGOUT1("Hash value = 0x%03X\n", vector);
+ vector_list[i] = (u16)vector;
+ }
+
+ return mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE, 0);
+}
+
+/**
+ * ixgbe_set_vfta_vf - Set/Unset vlan filter table address
+ * @hw: pointer to the HW structure
+ * @vlan: 12 bit VLAN ID
+ * @vind: unused by VF drivers
+ * @vlan_on: if true then set bit, else clear bit
+ **/
+s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[2];
+ s32 ret_val;
+ UNREFERENCED_1PARAMETER(vind);
+
+ msgbuf[0] = IXGBE_VF_SET_VLAN;
+ msgbuf[1] = vlan;
+ /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
+ msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
+
+ ret_val = mbx->ops.write_posted(hw, msgbuf, 2, 0);
+ if (!ret_val)
+ ret_val = mbx->ops.read_posted(hw, msgbuf, 1, 0);
+
+ if (!ret_val && (msgbuf[0] & IXGBE_VT_MSGTYPE_ACK))
+ return IXGBE_SUCCESS;
+
+ return ret_val | (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK);
+}
+
+/**
+ * ixgbe_get_num_of_tx_queues_vf - Get number of TX queues
+ * @hw: pointer to hardware structure
+ *
+ * Returns the number of transmit queues for the given adapter.
+ **/
+u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw)
+{
+ UNREFERENCED_1PARAMETER(hw);
+ return IXGBE_VF_MAX_TX_QUEUES;
+}
+
+/**
+ * ixgbe_get_num_of_rx_queues_vf - Get number of RX queues
+ * @hw: pointer to hardware structure
+ *
+ * Returns the number of receive queues for the given adapter.
+ **/
+u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw)
+{
+ UNREFERENCED_1PARAMETER(hw);
+ return IXGBE_VF_MAX_RX_QUEUES;
+}
+
+/**
+ * ixgbe_get_mac_addr_vf - Read device MAC address
+ * @hw: pointer to the HW structure
+ **/
+s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
+{
+ int i;
+
+ for (i = 0; i < IXGBE_ETH_LENGTH_OF_ADDRESS; i++)
+ mac_addr[i] = hw->mac.perm_addr[i];
+
+ return IXGBE_SUCCESS;
+}
+
+s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[3];
+ u8 *msg_addr = (u8 *)(&msgbuf[1]);
+ s32 ret_val;
+
+ memset(msgbuf, 0, sizeof(msgbuf));
+ /*
+ * If index is one then this is the start of a new list and needs
+ * indication to the PF so it can do it's own list management.
+ * If it is zero then that tells the PF to just clear all of
+ * this VF's macvlans and there is no new list.
+ */
+ msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
+ msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
+ if (addr)
+ memcpy(msg_addr, addr, 6);
+ ret_val = mbx->ops.write_posted(hw, msgbuf, 3, 0);
+
+ if (!ret_val)
+ ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0);
+
+ msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ if (!ret_val)
+ if (msgbuf[0] == (IXGBE_VF_SET_MACVLAN | IXGBE_VT_MSGTYPE_NACK))
+ ret_val = IXGBE_ERR_OUT_OF_MEM;
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_setup_mac_link_vf - Setup MAC link settings
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: true if autonegotiation enabled
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Set the link speed in the AUTOC register and restarts link.
+ **/
+s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ UNREFERENCED_3PARAMETER(hw, speed, autoneg_wait_to_complete);
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_check_mac_link_vf - Get link/speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true is link is up, false otherwise
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Reads the links register to determine if link is up and the current speed
+ **/
+s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool autoneg_wait_to_complete)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ struct ixgbe_mac_info *mac = &hw->mac;
+ s32 ret_val = IXGBE_SUCCESS;
+ u32 links_reg;
+ u32 in_msg = 0;
+ UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
+
+ /* If we were hit with a reset drop the link */
+ if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
+ mac->get_link_status = true;
+
+ if (!mac->get_link_status)
+ goto out;
+
+ /* if link status is down no point in checking to see if pf is up */
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+
+ /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
+ * before the link status is correct
+ */
+ if (mac->type == ixgbe_mac_82599_vf) {
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ usec_delay(100);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+ }
+ }
+
+ switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+ case IXGBE_LINKS_SPEED_10G_82599:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_1G_82599:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_100_82599:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ break;
+ }
+
+ /* if the read failed it could just be a mailbox collision, best wait
+ * until we are called again and don't report an error
+ */
+ if (mbx->ops.read(hw, &in_msg, 1, 0))
+ goto out;
+
+ if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
+ /* msg is not CTS and is NACK we must have lost CTS status */
+ if (in_msg & IXGBE_VT_MSGTYPE_NACK)
+ ret_val = -1;
+ goto out;
+ }
+
+ /* the pf is talking, if we timed out in the past we reinit */
+ if (!mbx->timeout) {
+ ret_val = -1;
+ goto out;
+ }
+
+ /* if we passed all the tests above then the link is up and we no
+ * longer need to check for link
+ */
+ mac->get_link_status = false;
+
+out:
+ *link_up = !mac->get_link_status;
+ return ret_val;
+}
+
+/**
+ * ixgbevf_rlpml_set_vf - Set the maximum receive packet length
+ * @hw: pointer to the HW structure
+ * @max_size: value to assign to max frame size
+ **/
+void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
+{
+ u32 msgbuf[2];
+
+ msgbuf[0] = IXGBE_VF_SET_LPE;
+ msgbuf[1] = max_size;
+ ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
+}
+
+/**
+ * ixgbevf_negotiate_api_version - Negotiate supported API version
+ * @hw: pointer to the HW structure
+ * @api: integer containing requested API version
+ **/
+int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
+{
+ int err;
+ u32 msg[3];
+
+ /* Negotiate the mailbox API version */
+ msg[0] = IXGBE_VF_API_NEGOTIATE;
+ msg[1] = api;
+ msg[2] = 0;
+ err = hw->mbx.ops.write_posted(hw, msg, 3, 0);
+
+ if (!err)
+ err = hw->mbx.ops.read_posted(hw, msg, 3, 0);
+
+ if (!err) {
+ msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ /* Store value and return 0 on success */
+ if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
+ hw->api_version = api;
+ return 0;
+ }
+
+ err = IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ return err;
+}
+
+int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
+ unsigned int *default_tc)
+{
+ int err;
+ u32 msg[5];
+
+ /* do nothing if API doesn't support ixgbevf_get_queues */
+ switch (hw->api_version) {
+ case ixgbe_mbox_api_11:
+ break;
+ default:
+ return 0;
+ }
+
+ /* Fetch queue configuration from the PF */
+ msg[0] = IXGBE_VF_GET_QUEUES;
+ msg[1] = msg[2] = msg[3] = msg[4] = 0;
+ err = hw->mbx.ops.write_posted(hw, msg, 5, 0);
+
+ if (!err)
+ err = hw->mbx.ops.read_posted(hw, msg, 5, 0);
+
+ if (!err) {
+ msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ /*
+ * if we we didn't get an ACK there must have been
+ * some sort of mailbox error so we should treat it
+ * as such
+ */
+ if (msg[0] != (IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK))
+ return IXGBE_ERR_MBX;
+
+ /* record and validate values from message */
+ hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
+ if (hw->mac.max_tx_queues == 0 ||
+ hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
+ hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
+
+ hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
+ if (hw->mac.max_rx_queues == 0 ||
+ hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
+ hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
+
+ *num_tcs = msg[IXGBE_VF_TRANS_VLAN];
+ /* in case of unknown state assume we cannot tag frames */
+ if (*num_tcs > hw->mac.max_rx_queues)
+ *num_tcs = 1;
+
+ *default_tc = msg[IXGBE_VF_DEF_QUEUE];
+ /* default to queue 0 on out-of-bounds queue number */
+ if (*default_tc >= hw->mac.max_tx_queues)
+ *default_tc = 0;
+ }
+
+ return err;
+}
+
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_vf.h b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_vf.h
new file mode 100755
index 00000000..3c1c1689
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_vf.h
@@ -0,0 +1,145 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef __IXGBE_VF_H__
+#define __IXGBE_VF_H__
+#ident "$Id: ixgbe_vf.h,v 1.37 2013/11/07 08:18:53 jtkirshe Exp $"
+
+#define IXGBE_VF_IRQ_CLEAR_MASK 7
+#define IXGBE_VF_MAX_TX_QUEUES 8
+#define IXGBE_VF_MAX_RX_QUEUES 8
+
+/* DCB define */
+#define IXGBE_VF_MAX_TRAFFIC_CLASS 8
+
+#define IXGBE_VFCTRL 0x00000
+#define IXGBE_VFSTATUS 0x00008
+#define IXGBE_VFLINKS 0x00010
+#define IXGBE_VFFRTIMER 0x00048
+#define IXGBE_VFRXMEMWRAP 0x03190
+#define IXGBE_VTEICR 0x00100
+#define IXGBE_VTEICS 0x00104
+#define IXGBE_VTEIMS 0x00108
+#define IXGBE_VTEIMC 0x0010C
+#define IXGBE_VTEIAC 0x00110
+#define IXGBE_VTEIAM 0x00114
+#define IXGBE_VTEITR(x) (0x00820 + (4 * (x)))
+#define IXGBE_VTIVAR(x) (0x00120 + (4 * (x)))
+#define IXGBE_VTIVAR_MISC 0x00140
+#define IXGBE_VTRSCINT(x) (0x00180 + (4 * (x)))
+/* define IXGBE_VFPBACL still says TBD in EAS */
+#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * (x)))
+#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * (x)))
+#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * (x)))
+#define IXGBE_VFRDH(x) (0x01010 + (0x40 * (x)))
+#define IXGBE_VFRDT(x) (0x01018 + (0x40 * (x)))
+#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * (x)))
+#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * (x)))
+#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * (x)))
+#define IXGBE_VFPSRTYPE 0x00300
+#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * (x)))
+#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * (x)))
+#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * (x)))
+#define IXGBE_VFTDH(x) (0x02010 + (0x40 * (x)))
+#define IXGBE_VFTDT(x) (0x02018 + (0x40 * (x)))
+#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * (x)))
+#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * (x)))
+#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * (x)))
+#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * (x)))
+#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * (x)))
+#define IXGBE_VFGPRC 0x0101C
+#define IXGBE_VFGPTC 0x0201C
+#define IXGBE_VFGORC_LSB 0x01020
+#define IXGBE_VFGORC_MSB 0x01024
+#define IXGBE_VFGOTC_LSB 0x02020
+#define IXGBE_VFGOTC_MSB 0x02024
+#define IXGBE_VFMPRC 0x01034
+#define IXGBE_VFMRQC 0x3000
+#define IXGBE_VFRSSRK(x) (0x3100 + ((x) * 4))
+#define IXGBE_VFRETA(x) (0x3200 + ((x) * 4))
+
+
+struct ixgbevf_hw_stats {
+ u64 base_vfgprc;
+ u64 base_vfgptc;
+ u64 base_vfgorc;
+ u64 base_vfgotc;
+ u64 base_vfmprc;
+
+ u64 last_vfgprc;
+ u64 last_vfgptc;
+ u64 last_vfgorc;
+ u64 last_vfgotc;
+ u64 last_vfmprc;
+
+ u64 vfgprc;
+ u64 vfgptc;
+ u64 vfgorc;
+ u64 vfgotc;
+ u64 vfmprc;
+
+ u64 saved_reset_vfgprc;
+ u64 saved_reset_vfgptc;
+ u64 saved_reset_vfgorc;
+ u64 saved_reset_vfgotc;
+ u64 saved_reset_vfmprc;
+};
+
+s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw);
+s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw);
+s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr);
+s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool autoneg_wait_to_complete);
+s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr);
+s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr);
+s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, ixgbe_mc_addr_itr,
+ bool clear);
+s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
+void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
+int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
+int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
+ unsigned int *default_tc);
+
+#ifdef IXGBEVF_OSDEP2
+#include "ixgbevf_osdep2.h"
+
+#endif /* IXGBEVF_OSDEP2 */
+#endif /* __IXGBE_VF_H__ */
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_x540.c b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_x540.c
new file mode 100755
index 00000000..ab384502
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_x540.c
@@ -0,0 +1,1038 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "ixgbe_x540.h"
+#include "ixgbe_type.h"
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+#define IXGBE_X540_MAX_TX_QUEUES 128
+#define IXGBE_X540_MAX_RX_QUEUES 128
+#define IXGBE_X540_RAR_ENTRIES 128
+#define IXGBE_X540_MC_TBL_SIZE 128
+#define IXGBE_X540_VFT_TBL_SIZE 128
+#define IXGBE_X540_RX_PB_SIZE 384
+
+STATIC s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
+STATIC s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
+STATIC void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
+
+/**
+ * ixgbe_init_ops_X540 - Inits func ptrs and MAC type
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the function pointers and assign the MAC type for X540.
+ * Does not touch the hardware.
+ **/
+s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ s32 ret_val;
+
+ DEBUGFUNC("ixgbe_init_ops_X540");
+
+ ret_val = ixgbe_init_phy_ops_generic(hw);
+ ret_val = ixgbe_init_ops_generic(hw);
+
+
+ /* EEPROM */
+ eeprom->ops.init_params = &ixgbe_init_eeprom_params_X540;
+ eeprom->ops.read = &ixgbe_read_eerd_X540;
+ eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_X540;
+ eeprom->ops.write = &ixgbe_write_eewr_X540;
+ eeprom->ops.write_buffer = &ixgbe_write_eewr_buffer_X540;
+ eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_X540;
+ eeprom->ops.validate_checksum = &ixgbe_validate_eeprom_checksum_X540;
+ eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_X540;
+
+ /* PHY */
+ phy->ops.init = &ixgbe_init_phy_ops_generic;
+ phy->ops.reset = NULL;
+
+ /* MAC */
+ mac->ops.reset_hw = &ixgbe_reset_hw_X540;
+ mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2;
+ mac->ops.get_media_type = &ixgbe_get_media_type_X540;
+ mac->ops.get_supported_physical_layer =
+ &ixgbe_get_supported_physical_layer_X540;
+ mac->ops.read_analog_reg8 = NULL;
+ mac->ops.write_analog_reg8 = NULL;
+ mac->ops.start_hw = &ixgbe_start_hw_X540;
+ mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
+ mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
+ mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
+ mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
+ mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
+ mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540;
+ mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync_X540;
+ mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
+ mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
+
+ /* RAR, Multicast, VLAN */
+ mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
+ mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
+ mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
+ mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
+ mac->rar_highwater = 1;
+ mac->ops.set_vfta = &ixgbe_set_vfta_generic;
+ mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
+ mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
+ mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
+ mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
+ mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
+
+ /* Link */
+ mac->ops.get_link_capabilities =
+ &ixgbe_get_copper_link_capabilities_generic;
+ mac->ops.setup_link = &ixgbe_setup_mac_link_X540;
+ mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
+ mac->ops.check_link = &ixgbe_check_mac_link_generic;
+
+
+ mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
+ mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
+ mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
+ mac->rx_pb_size = IXGBE_X540_RX_PB_SIZE;
+ mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES;
+ mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES;
+ mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
+
+ /*
+ * FWSM register
+ * ARC supported; valid only if manageability features are
+ * enabled.
+ */
+ mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
+ IXGBE_FWSM_MODE_MASK) ? true : false;
+
+ hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
+
+ /* LEDs */
+ mac->ops.blink_led_start = ixgbe_blink_led_start_X540;
+ mac->ops.blink_led_stop = ixgbe_blink_led_stop_X540;
+
+ /* Manageability interface */
+ mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
+
+ mac->ops.get_rtrup2tc = &ixgbe_dcb_get_rtrup2tc_generic;
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_get_link_capabilities_X540 - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: true when autoneg or autotry is enabled
+ *
+ * Determines the link capabilities by reading the AUTOC register.
+ **/
+s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ ixgbe_get_copper_link_capabilities_generic(hw, speed, autoneg);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_media_type_X540 - Get media type
+ * @hw: pointer to hardware structure
+ *
+ * Returns the media type (fiber, copper, backplane)
+ **/
+enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
+{
+ UNREFERENCED_1PARAMETER(hw);
+ return ixgbe_media_type_copper;
+}
+
+/**
+ * ixgbe_setup_mac_link_X540 - Sets the auto advertised capabilities
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ **/
+s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ DEBUGFUNC("ixgbe_setup_mac_link_X540");
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete);
+}
+
+/**
+ * ixgbe_reset_hw_X540 - Perform hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks
+ * and clears all interrupts, and perform a reset.
+ **/
+s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u32 ctrl, i;
+
+ DEBUGFUNC("ixgbe_reset_hw_X540");
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ status = hw->mac.ops.stop_adapter(hw);
+ if (status != IXGBE_SUCCESS)
+ goto reset_hw_out;
+
+ /* flush pending Tx transactions */
+ ixgbe_clear_tx_pending(hw);
+
+mac_reset_top:
+ ctrl = IXGBE_CTRL_RST;
+ ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Poll for reset bit to self-clear indicating reset is complete */
+ for (i = 0; i < 10; i++) {
+ usec_delay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST_MASK))
+ break;
+ }
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
+ status = IXGBE_ERR_RESET_FAILED;
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "Reset polling failed to complete.\n");
+ }
+ msec_delay(100);
+
+ /*
+ * Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to allow time
+ * for any pending HW events to complete.
+ */
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ goto mac_reset_top;
+ }
+
+ /* Set the Rx packet buffer size. */
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /*
+ * Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to 128,
+ * since we modify this value when programming the SAN MAC address.
+ */
+ hw->mac.num_rar_entries = 128;
+ hw->mac.ops.init_rx_addrs(hw);
+
+ /* Store the permanent SAN mac address */
+ hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
+
+ /* Add the SAN MAC address to the RAR only if it's a valid address */
+ if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
+ hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
+ hw->mac.san_addr, 0, IXGBE_RAH_AV);
+
+ /* Save the SAN MAC RAR index */
+ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
+
+ /* Reserve the last RAR for the SAN MAC address */
+ hw->mac.num_rar_entries--;
+ }
+
+ /* Store the alternative WWNN/WWPN prefix */
+ hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
+ &hw->mac.wwpn_prefix);
+
+reset_hw_out:
+ return status;
+}
+
+/**
+ * ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware using the generic start_hw function
+ * and the generation start_hw function.
+ * Then performs revision-specific operations, if any.
+ **/
+s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_start_hw_X540");
+
+ ret_val = ixgbe_start_hw_generic(hw);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+
+ ret_val = ixgbe_start_hw_gen2(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ **/
+u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
+{
+ u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u16 ext_ability = 0;
+
+ DEBUGFUNC("ixgbe_get_supported_physical_layer_X540");
+
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
+ if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+ if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+
+ return physical_layer;
+}
+
+/**
+ * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ * ixgbe_hw struct in order to set up EEPROM access.
+ **/
+s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ u32 eec;
+ u16 eeprom_size;
+
+ DEBUGFUNC("ixgbe_init_eeprom_params_X540");
+
+ if (eeprom->type == ixgbe_eeprom_uninitialized) {
+ eeprom->semaphore_delay = 10;
+ eeprom->type = ixgbe_flash;
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
+ IXGBE_EEC_SIZE_SHIFT);
+ eeprom->word_size = 1 << (eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
+
+ DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
+ eeprom->type, eeprom->word_size);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_eerd_X540- Read EEPROM word using EERD
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_read_eerd_X540");
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ IXGBE_SUCCESS) {
+ status = ixgbe_read_eerd_generic(hw, offset, data);
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ } else {
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_read_eerd_buffer_X540- Read EEPROM word(s) using EERD
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words
+ * @data: word(s) read from the EEPROM
+ *
+ * Reads a 16 bit word(s) from the EEPROM using the EERD register.
+ **/
+s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
+ u16 offset, u16 words, u16 *data)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_read_eerd_buffer_X540");
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ IXGBE_SUCCESS) {
+ status = ixgbe_read_eerd_buffer_generic(hw, offset,
+ words, data);
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ } else {
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the EEWR register.
+ **/
+s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_write_eewr_X540");
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ IXGBE_SUCCESS) {
+ status = ixgbe_write_eewr_generic(hw, offset, data);
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ } else {
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @words: number of words
+ * @data: word(s) write to the EEPROM
+ *
+ * Write a 16 bit word(s) to the EEPROM using the EEWR register.
+ **/
+s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
+ u16 offset, u16 words, u16 *data)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_write_eewr_buffer_X540");
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ IXGBE_SUCCESS) {
+ status = ixgbe_write_eewr_buffer_generic(hw, offset,
+ words, data);
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ } else {
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum
+ *
+ * This function does not use synchronization for EERD and EEWR. It can
+ * be used internally by function which utilize ixgbe_acquire_swfw_sync_X540.
+ *
+ * @hw: pointer to hardware structure
+ *
+ * Returns a negative error code on error, or the 16-bit checksum
+ **/
+s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
+{
+ u16 i, j;
+ u16 checksum = 0;
+ u16 length = 0;
+ u16 pointer = 0;
+ u16 word = 0;
+ u16 checksum_last_word = IXGBE_EEPROM_CHECKSUM;
+ u16 ptr_start = IXGBE_PCIE_ANALOG_PTR;
+
+ /* Do not use hw->eeprom.ops.read because we do not want to take
+ * the synchronization semaphores here. Instead use
+ * ixgbe_read_eerd_generic
+ */
+
+ DEBUGFUNC("ixgbe_calc_eeprom_checksum_X540");
+
+ /* Include 0x0-0x3F in the checksum */
+ for (i = 0; i <= checksum_last_word; i++) {
+ if (ixgbe_read_eerd_generic(hw, i, &word)) {
+ DEBUGOUT("EEPROM read failed\n");
+ return IXGBE_ERR_EEPROM;
+ }
+ if (i != IXGBE_EEPROM_CHECKSUM)
+ checksum += word;
+ }
+
+ /* Include all data from pointers 0x3, 0x6-0xE. This excludes the
+ * FW, PHY module, and PCIe Expansion/Option ROM pointers.
+ */
+ for (i = ptr_start; i < IXGBE_FW_PTR; i++) {
+ if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
+ continue;
+
+ if (ixgbe_read_eerd_generic(hw, i, &pointer)) {
+ DEBUGOUT("EEPROM read failed\n");
+ return IXGBE_ERR_EEPROM;
+ }
+
+ /* Skip pointer section if the pointer is invalid. */
+ if (pointer == 0xFFFF || pointer == 0 ||
+ pointer >= hw->eeprom.word_size)
+ continue;
+
+ if (ixgbe_read_eerd_generic(hw, pointer, &length)) {
+ DEBUGOUT("EEPROM read failed\n");
+ return IXGBE_ERR_EEPROM;
+ }
+
+ /* Skip pointer section if length is invalid. */
+ if (length == 0xFFFF || length == 0 ||
+ (pointer + length) >= hw->eeprom.word_size)
+ continue;
+
+ for (j = pointer + 1; j <= pointer + length; j++) {
+ if (ixgbe_read_eerd_generic(hw, j, &word)) {
+ DEBUGOUT("EEPROM read failed\n");
+ return IXGBE_ERR_EEPROM;
+ }
+ checksum += word;
+ }
+ }
+
+ checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+
+ return (s32)checksum;
+}
+
+/**
+ * ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ **/
+s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
+ u16 *checksum_val)
+{
+ s32 status;
+ u16 checksum;
+ u16 read_checksum = 0;
+
+ DEBUGFUNC("ixgbe_validate_eeprom_checksum_X540");
+
+ /* Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = hw->eeprom.ops.read(hw, 0, &checksum);
+ if (status) {
+ DEBUGOUT("EEPROM read failed\n");
+ return status;
+ }
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ status = hw->eeprom.ops.calc_checksum(hw);
+ if (status < 0)
+ goto out;
+
+ checksum = (u16)(status & 0xffff);
+
+ /* Do not use hw->eeprom.ops.read because we do not want to take
+ * the synchronization semaphores twice here.
+ */
+ status = ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
+ &read_checksum);
+ if (status)
+ goto out;
+
+ /* Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (read_checksum != checksum) {
+ ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
+ "Invalid EEPROM checksum");
+ status = IXGBE_ERR_EEPROM_CHECKSUM;
+ }
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum_val)
+ *checksum_val = checksum;
+
+out:
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+
+ return status;
+}
+
+/**
+ * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash
+ * @hw: pointer to hardware structure
+ *
+ * After writing EEPROM to shadow RAM using EEWR register, software calculates
+ * checksum and updates the EEPROM and instructs the hardware to update
+ * the flash.
+ **/
+s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u16 checksum;
+
+ DEBUGFUNC("ixgbe_update_eeprom_checksum_X540");
+
+ /* Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = hw->eeprom.ops.read(hw, 0, &checksum);
+ if (status) {
+ DEBUGOUT("EEPROM read failed\n");
+ return status;
+ }
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ status = hw->eeprom.ops.calc_checksum(hw);
+ if (status < 0)
+ goto out;
+
+ checksum = (u16)(status & 0xffff);
+
+ /* Do not use hw->eeprom.ops.write because we do not want to
+ * take the synchronization semaphores twice here.
+ */
+ status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, checksum);
+ if (status)
+ goto out;
+
+ status = ixgbe_update_flash_X540(hw);
+
+out:
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+
+ return status;
+}
+
+/**
+ * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device
+ * @hw: pointer to hardware structure
+ *
+ * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
+ * EEPROM from shadow RAM to the flash device.
+ **/
+s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
+{
+ u32 flup;
+ s32 status;
+
+ DEBUGFUNC("ixgbe_update_flash_X540");
+
+ status = ixgbe_poll_flash_update_done_X540(hw);
+ if (status == IXGBE_ERR_EEPROM) {
+ DEBUGOUT("Flash update time out\n");
+ goto out;
+ }
+
+ flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+
+ status = ixgbe_poll_flash_update_done_X540(hw);
+ if (status == IXGBE_SUCCESS)
+ DEBUGOUT("Flash update complete\n");
+ else
+ DEBUGOUT("Flash update time out\n");
+
+ if (hw->mac.type == ixgbe_mac_X540 && hw->revision_id == 0) {
+ flup = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ if (flup & IXGBE_EEC_SEC1VAL) {
+ flup |= IXGBE_EEC_FLUP;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+ }
+
+ status = ixgbe_poll_flash_update_done_X540(hw);
+ if (status == IXGBE_SUCCESS)
+ DEBUGOUT("Flash update complete\n");
+ else
+ DEBUGOUT("Flash update time out\n");
+ }
+out:
+ return status;
+}
+
+/**
+ * ixgbe_poll_flash_update_done_X540 - Poll flash update status
+ * @hw: pointer to hardware structure
+ *
+ * Polls the FLUDONE (bit 26) of the EEC Register to determine when the
+ * flash update is done.
+ **/
+STATIC s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
+{
+ u32 i;
+ u32 reg;
+ s32 status = IXGBE_ERR_EEPROM;
+
+ DEBUGFUNC("ixgbe_poll_flash_update_done_X540");
+
+ for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
+ reg = IXGBE_READ_REG(hw, IXGBE_EEC);
+ if (reg & IXGBE_EEC_FLUDONE) {
+ status = IXGBE_SUCCESS;
+ break;
+ }
+ msec_delay(5);
+ }
+
+ if (i == IXGBE_FLUDONE_ATTEMPTS)
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "Flash update status polling timed out");
+
+ return status;
+}
+
+/**
+ * ixgbe_set_mux - Set mux for port 1 access with CS4227
+ * @hw: pointer to hardware structure
+ * @state: set mux if 1, clear if 0
+ */
+STATIC void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
+{
+ u32 esdp;
+
+ if (!hw->phy.lan_id)
+ return;
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ if (state)
+ esdp |= IXGBE_ESDP_SDP1;
+ else
+ esdp &= ~IXGBE_ESDP_SDP1;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore thought the SW_FW_SYNC register for
+ * the specified function (CSR, PHY0, PHY1, NVM, Flash)
+ **/
+s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
+{
+ u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK;
+ u32 fwmask = swmask << 5;
+ u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK;
+ u32 timeout = 200;
+ u32 hwmask = 0;
+ u32 swfw_sync;
+ u32 i;
+
+ DEBUGFUNC("ixgbe_acquire_swfw_sync_X540");
+
+ if (swmask & IXGBE_GSSR_EEP_SM)
+ hwmask |= IXGBE_GSSR_FLASH_SM;
+
+ /* SW only mask doesn't have FW bit pair */
+ if (mask & IXGBE_GSSR_SW_MNG_SM)
+ swmask |= IXGBE_GSSR_SW_MNG_SM;
+
+ swmask |= swi2c_mask;
+ fwmask |= swi2c_mask << 2;
+ for (i = 0; i < timeout; i++) {
+ /* SW NVM semaphore bit is used for access to all
+ * SW_FW_SYNC bits (not just NVM)
+ */
+ if (ixgbe_get_swfw_sync_semaphore(hw))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ if (!(swfw_sync & (fwmask | swmask | hwmask))) {
+ swfw_sync |= swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+ ixgbe_release_swfw_sync_semaphore(hw);
+ msec_delay(5);
+ if (swi2c_mask)
+ ixgbe_set_mux(hw, 1);
+ return IXGBE_SUCCESS;
+ }
+ /* Firmware currently using resource (fwmask), hardware
+ * currently using resource (hwmask), or other software
+ * thread currently using resource (swmask)
+ */
+ ixgbe_release_swfw_sync_semaphore(hw);
+ msec_delay(5);
+ }
+
+ /* Failed to get SW only semaphore */
+ if (swmask == IXGBE_GSSR_SW_MNG_SM) {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "Failed to get SW only semaphore");
+ return IXGBE_ERR_SWFW_SYNC;
+ }
+
+ /* If the resource is not released by the FW/HW the SW can assume that
+ * the FW/HW malfunctions. In that case the SW should set the SW bit(s)
+ * of the requested resource(s) while ignoring the corresponding FW/HW
+ * bits in the SW_FW_SYNC register.
+ */
+ if (ixgbe_get_swfw_sync_semaphore(hw))
+ return IXGBE_ERR_SWFW_SYNC;
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ if (swfw_sync & (fwmask | hwmask)) {
+ swfw_sync |= swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+ ixgbe_release_swfw_sync_semaphore(hw);
+ msec_delay(5);
+ if (swi2c_mask)
+ ixgbe_set_mux(hw, 1);
+ return IXGBE_SUCCESS;
+ }
+ /* If the resource is not released by other SW the SW can assume that
+ * the other SW malfunctions. In that case the SW should clear all SW
+ * flags that it does not own and then repeat the whole process once
+ * again.
+ */
+ if (swfw_sync & swmask) {
+ u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM |
+ IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM;
+
+ if (swi2c_mask)
+ rmask |= IXGBE_GSSR_I2C_MASK;
+ ixgbe_release_swfw_sync_X540(hw, rmask);
+ ixgbe_release_swfw_sync_semaphore(hw);
+ return IXGBE_ERR_SWFW_SYNC;
+ }
+ ixgbe_release_swfw_sync_semaphore(hw);
+
+ return IXGBE_ERR_SWFW_SYNC;
+}
+
+/**
+ * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore through the SW_FW_SYNC register
+ * for the specified function (CSR, PHY0, PHY1, EVM, Flash)
+ **/
+void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
+{
+ u32 swmask = mask & (IXGBE_GSSR_NVM_PHY_MASK | IXGBE_GSSR_SW_MNG_SM);
+ u32 swfw_sync;
+
+ DEBUGFUNC("ixgbe_release_swfw_sync_X540");
+
+ if (mask & IXGBE_GSSR_I2C_MASK) {
+ swmask |= mask & IXGBE_GSSR_I2C_MASK;
+ ixgbe_set_mux(hw, 0);
+ }
+ ixgbe_get_swfw_sync_semaphore(hw);
+
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swfw_sync &= ~swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+
+ ixgbe_release_swfw_sync_semaphore(hw);
+ msec_delay(5);
+}
+
+/**
+ * ixgbe_get_swfw_sync_semaphore - Get hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * Sets the hardware semaphores so SW/FW can gain control of shared resources
+ **/
+STATIC s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_EEPROM;
+ u32 timeout = 2000;
+ u32 i;
+ u32 swsm;
+
+ DEBUGFUNC("ixgbe_get_swfw_sync_semaphore");
+
+ /* Get SMBI software semaphore between device drivers first */
+ for (i = 0; i < timeout; i++) {
+ /*
+ * If the SMBI bit is 0 when we read it, then the bit will be
+ * set and we have the semaphore
+ */
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ if (!(swsm & IXGBE_SWSM_SMBI)) {
+ status = IXGBE_SUCCESS;
+ break;
+ }
+ usec_delay(50);
+ }
+
+ /* Now get the semaphore between SW/FW through the REGSMP bit */
+ if (status == IXGBE_SUCCESS) {
+ for (i = 0; i < timeout; i++) {
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ if (!(swsm & IXGBE_SWFW_REGSMP))
+ break;
+
+ usec_delay(50);
+ }
+
+ /*
+ * Release semaphores and return error if SW NVM semaphore
+ * was not granted because we don't have access to the EEPROM
+ */
+ if (i >= timeout) {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "REGSMP Software NVM semaphore not granted.\n");
+ ixgbe_release_swfw_sync_semaphore(hw);
+ status = IXGBE_ERR_EEPROM;
+ }
+ } else {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "Software semaphore SMBI between device drivers "
+ "not granted.\n");
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_release_swfw_sync_semaphore - Release hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * This function clears hardware semaphore bits.
+ **/
+STATIC void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
+{
+ u32 swsm;
+
+ DEBUGFUNC("ixgbe_release_swfw_sync_semaphore");
+
+ /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
+
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm &= ~IXGBE_SWSM_SMBI;
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swsm &= ~IXGBE_SWFW_REGSMP;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_blink_led_start_X540 - Blink LED based on index.
+ * @hw: pointer to hardware structure
+ * @index: led number to blink
+ *
+ * Devices that implement the version 2 interface:
+ * X540
+ **/
+s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
+{
+ u32 macc_reg;
+ u32 ledctl_reg;
+ ixgbe_link_speed speed;
+ bool link_up;
+
+ DEBUGFUNC("ixgbe_blink_led_start_X540");
+
+ /*
+ * Link should be up in order for the blink bit in the LED control
+ * register to work. Force link and speed in the MAC if link is down.
+ * This will be reversed when we stop the blinking.
+ */
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+ if (link_up == false) {
+ macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+ macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
+ IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
+ }
+ /* Set the LED to LINK_UP + BLINK. */
+ ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
+ ledctl_reg |= IXGBE_LED_BLINK(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_blink_led_stop_X540 - Stop blinking LED based on index.
+ * @hw: pointer to hardware structure
+ * @index: led number to stop blinking
+ *
+ * Devices that implement the version 2 interface:
+ * X540
+ **/
+s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
+{
+ u32 macc_reg;
+ u32 ledctl_reg;
+
+ DEBUGFUNC("ixgbe_blink_led_stop_X540");
+
+ /* Restore the LED to its default value. */
+ ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
+ ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
+ ledctl_reg &= ~IXGBE_LED_BLINK(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
+
+ /* Unforce link and speed in the MAC. */
+ macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+ macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS);
+ IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return IXGBE_SUCCESS;
+}
+
+
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_x540.h b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_x540.h
new file mode 100755
index 00000000..338c0e6e
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_x540.h
@@ -0,0 +1,67 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_X540_H_
+#define _IXGBE_X540_H_
+
+#include "ixgbe_type.h"
+#ident "$Id: ixgbe_x540.h,v 1.11 2013/10/11 08:36:03 jtkirshe Exp $"
+
+s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed, bool *autoneg);
+enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool link_up_wait_to_complete);
+s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw);
+u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw);
+
+s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
+s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words,
+ u16 *data);
+s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words,
+ u16 *data);
+s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw);
+s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, u16 *checksum_val);
+s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw);
+s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
+
+s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
+
+s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
+#endif /* _IXGBE_X540_H_ */
+
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_x550.c b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_x550.c
new file mode 100755
index 00000000..06d66dd5
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_x550.c
@@ -0,0 +1,1809 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "ixgbe_x550.h"
+#include "ixgbe_x540.h"
+#include "ixgbe_type.h"
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+/**
+ * ixgbe_init_ops_X550 - Inits func ptrs and MAC type
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the function pointers and assign the MAC type for X550.
+ * Does not touch the hardware.
+ **/
+s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ s32 ret_val;
+
+ DEBUGFUNC("ixgbe_init_ops_X550");
+
+ ret_val = ixgbe_init_ops_X540(hw);
+ mac->ops.dmac_config = &ixgbe_dmac_config_X550;
+ mac->ops.dmac_config_tcs = &ixgbe_dmac_config_tcs_X550;
+ mac->ops.dmac_update_tcs = &ixgbe_dmac_update_tcs_X550;
+ mac->ops.setup_eee = &ixgbe_setup_eee_X550;
+ mac->ops.set_source_address_pruning =
+ &ixgbe_set_source_address_pruning_X550;
+ mac->ops.set_ethertype_anti_spoofing =
+ &ixgbe_set_ethertype_anti_spoofing_X550;
+
+ mac->ops.get_rtrup2tc = &ixgbe_dcb_get_rtrup2tc_generic;
+ eeprom->ops.init_params = &ixgbe_init_eeprom_params_X550;
+ eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_X550;
+ eeprom->ops.read = &ixgbe_read_ee_hostif_X550;
+ eeprom->ops.read_buffer = &ixgbe_read_ee_hostif_buffer_X550;
+ eeprom->ops.write = &ixgbe_write_ee_hostif_X550;
+ eeprom->ops.write_buffer = &ixgbe_write_ee_hostif_buffer_X550;
+ eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_X550;
+ eeprom->ops.validate_checksum = &ixgbe_validate_eeprom_checksum_X550;
+
+ mac->ops.disable_mdd = &ixgbe_disable_mdd_X550;
+ mac->ops.enable_mdd = &ixgbe_enable_mdd_X550;
+ mac->ops.mdd_event = &ixgbe_mdd_event_X550;
+ mac->ops.restore_mdd_vf = &ixgbe_restore_mdd_vf_X550;
+ mac->ops.disable_rx = &ixgbe_disable_rx_x550;
+ return ret_val;
+}
+
+/**
+ * ixgbe_identify_phy_x550em - Get PHY type based on device id
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
+{
+ u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_X_SFP:
+ /* set up for CS4227 usage */
+ hw->phy.lan_id = IXGBE_READ_REG(hw, IXGBE_STATUS) &
+ IXGBE_STATUS_LAN_ID_1;
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
+ if (hw->phy.lan_id) {
+
+ esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
+ esdp |= IXGBE_ESDP_SDP1_DIR;
+ }
+ esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+
+ return ixgbe_identify_module_generic(hw);
+ break;
+ case IXGBE_DEV_ID_X550EM_X_KX4:
+ hw->phy.type = ixgbe_phy_x550em_kx4;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_X:
+ hw->phy.type = ixgbe_phy_x550em_kr;
+ break;
+ default:
+ break;
+ }
+ return IXGBE_SUCCESS;
+}
+
+STATIC s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data)
+{
+ UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data);
+ return IXGBE_NOT_IMPLEMENTED;
+}
+
+STATIC s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data)
+{
+ UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data);
+ return IXGBE_NOT_IMPLEMENTED;
+}
+
+/**
+* ixgbe_init_ops_X550EM - Inits func ptrs and MAC type
+* @hw: pointer to hardware structure
+*
+* Initialize the function pointers and for MAC type X550EM.
+* Does not touch the hardware.
+**/
+s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ DEBUGFUNC("ixgbe_init_ops_X550EM");
+
+ /* Similar to X550 so start there. */
+ ret_val = ixgbe_init_ops_X550(hw);
+
+ /* Since this function eventually calls
+ * ixgbe_init_ops_540 by design, we are setting
+ * the pointers to NULL explicitly here to overwrite
+ * the values being set in the x540 function.
+ */
+ /* Thermal sensor not supported in x550EM */
+ mac->ops.get_thermal_sensor_data = NULL;
+ mac->ops.init_thermal_sensor_thresh = NULL;
+ mac->thermal_sensor_enabled = false;
+
+ /* FCOE not supported in x550EM */
+ mac->ops.get_san_mac_addr = NULL;
+ mac->ops.set_san_mac_addr = NULL;
+ mac->ops.get_wwn_prefix = NULL;
+ mac->ops.get_fcoe_boot_status = NULL;
+
+ /* IPsec not supported in x550EM */
+ mac->ops.disable_sec_rx_path = NULL;
+ mac->ops.enable_sec_rx_path = NULL;
+
+ /* PCIe bus info not supported in X550EM */
+ mac->ops.get_bus_info = NULL;
+
+ mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
+ mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
+ mac->ops.get_media_type = &ixgbe_get_media_type_X550em;
+ mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_X550em;
+ mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_X550em;
+ mac->ops.reset_hw = &ixgbe_reset_hw_X550em;
+ mac->ops.get_supported_physical_layer =
+ &ixgbe_get_supported_physical_layer_X550em;
+
+ /* PHY */
+ phy->ops.init = &ixgbe_init_phy_ops_X550em;
+ phy->ops.identify = &ixgbe_identify_phy_x550em;
+ phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
+ phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
+ phy->ops.setup_link = ixgbe_setup_kr_x550em;
+
+
+ /* EEPROM */
+ eeprom->ops.init_params = &ixgbe_init_eeprom_params_X540;
+ eeprom->ops.read = &ixgbe_read_ee_hostif_X550;
+ eeprom->ops.read_buffer = &ixgbe_read_ee_hostif_buffer_X550;
+ eeprom->ops.write = &ixgbe_write_ee_hostif_X550;
+ eeprom->ops.write_buffer = &ixgbe_write_ee_hostif_buffer_X550;
+ eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_X550;
+ eeprom->ops.validate_checksum = &ixgbe_validate_eeprom_checksum_X550;
+ eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_X550;
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_dmac_config_X550
+ * @hw: pointer to hardware structure
+ *
+ * Configure DMA coalescing. If enabling dmac, dmac is activated.
+ * When disabling dmac, dmac enable dmac bit is cleared.
+ **/
+s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw)
+{
+ u32 reg, high_pri_tc;
+
+ DEBUGFUNC("ixgbe_dmac_config_X550");
+
+ /* Disable DMA coalescing before configuring */
+ reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
+ reg &= ~IXGBE_DMACR_DMAC_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
+
+ /* Disable DMA Coalescing if the watchdog timer is 0 */
+ if (!hw->mac.dmac_config.watchdog_timer)
+ goto out;
+
+ ixgbe_dmac_config_tcs_X550(hw);
+
+ /* Configure DMA Coalescing Control Register */
+ reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
+
+ /* Set the watchdog timer in units of 40.96 usec */
+ reg &= ~IXGBE_DMACR_DMACWT_MASK;
+ reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096;
+
+ reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK;
+ /* If fcoe is enabled, set high priority traffic class */
+ if (hw->mac.dmac_config.fcoe_en) {
+ high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc;
+ reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) &
+ IXGBE_DMACR_HIGH_PRI_TC_MASK);
+ }
+ reg |= IXGBE_DMACR_EN_MNG_IND;
+
+ /* Enable DMA coalescing after configuration */
+ reg |= IXGBE_DMACR_DMAC_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
+
+out:
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dmac_config_tcs_X550
+ * @hw: pointer to hardware structure
+ *
+ * Configure DMA coalescing threshold per TC. The dmac enable bit must
+ * be cleared before configuring.
+ **/
+s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw)
+{
+ u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb;
+
+ DEBUGFUNC("ixgbe_dmac_config_tcs_X550");
+
+ /* Configure DMA coalescing enabled */
+ switch (hw->mac.dmac_config.link_speed) {
+ case IXGBE_LINK_SPEED_100_FULL:
+ pb_headroom = IXGBE_DMACRXT_100M;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ pb_headroom = IXGBE_DMACRXT_1G;
+ break;
+ default:
+ pb_headroom = IXGBE_DMACRXT_10G;
+ break;
+ }
+
+ maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >>
+ IXGBE_MHADD_MFS_SHIFT) / 1024);
+
+ /* Set the per Rx packet buffer receive threshold */
+ for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) {
+ reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc));
+ reg &= ~IXGBE_DMCTH_DMACRXT_MASK;
+
+ if (tc < hw->mac.dmac_config.num_tcs) {
+ /* Get Rx PB size */
+ rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc));
+ rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >>
+ IXGBE_RXPBSIZE_SHIFT;
+
+ /* Calculate receive buffer threshold in kilobytes */
+ if (rx_pb_size > pb_headroom)
+ rx_pb_size = rx_pb_size - pb_headroom;
+ else
+ rx_pb_size = 0;
+
+ /* Minimum of MFS shall be set for DMCTH */
+ reg |= (rx_pb_size > maxframe_size_kb) ?
+ rx_pb_size : maxframe_size_kb;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg);
+ }
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dmac_update_tcs_X550
+ * @hw: pointer to hardware structure
+ *
+ * Disables dmac, updates per TC settings, and then enables dmac.
+ **/
+s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw)
+{
+ u32 reg;
+
+ DEBUGFUNC("ixgbe_dmac_update_tcs_X550");
+
+ /* Disable DMA coalescing before configuring */
+ reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
+ reg &= ~IXGBE_DMACR_DMAC_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
+
+ ixgbe_dmac_config_tcs_X550(hw);
+
+ /* Enable DMA coalescing after configuration */
+ reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
+ reg |= IXGBE_DMACR_DMAC_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ * ixgbe_hw struct in order to set up EEPROM access.
+ **/
+s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ u32 eec;
+ u16 eeprom_size;
+
+ DEBUGFUNC("ixgbe_init_eeprom_params_X550");
+
+ if (eeprom->type == ixgbe_eeprom_uninitialized) {
+ eeprom->semaphore_delay = 10;
+ eeprom->type = ixgbe_flash;
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
+ IXGBE_EEC_SIZE_SHIFT);
+ eeprom->word_size = 1 << (eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
+
+ DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
+ eeprom->type, eeprom->word_size);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_setup_eee_X550 - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ * @enable_eee: boolean flag to enable EEE
+ *
+ * Enable/disable EEE based on enable_eee flag.
+ * Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C
+ * are modified.
+ *
+ **/
+s32 ixgbe_setup_eee_X550(struct ixgbe_hw *hw, bool enable_eee)
+{
+ u32 eeer;
+ u16 autoneg_eee_reg;
+ u32 link_reg;
+ s32 status;
+
+ DEBUGFUNC("ixgbe_setup_eee_X550");
+
+ eeer = IXGBE_READ_REG(hw, IXGBE_EEER);
+ /* Enable or disable EEE per flag */
+ if (enable_eee) {
+ eeer |= (IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN);
+
+ if (hw->device_id == IXGBE_DEV_ID_X550T) {
+ /* Advertise EEE capability */
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_eee_reg);
+
+ autoneg_eee_reg |= (IXGBE_AUTO_NEG_10GBASE_EEE_ADVT |
+ IXGBE_AUTO_NEG_1000BASE_EEE_ADVT |
+ IXGBE_AUTO_NEG_100BASE_EEE_ADVT);
+
+ hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_eee_reg);
+ } else if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR ||
+ hw->device_id == IXGBE_DEV_ID_X550EM_X) {
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->phy.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR |
+ IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX;
+
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->phy.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ }
+ } else {
+ eeer &= ~(IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN);
+
+ if (hw->device_id == IXGBE_DEV_ID_X550T) {
+ /* Disable advertised EEE capability */
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_eee_reg);
+
+ autoneg_eee_reg &= ~(IXGBE_AUTO_NEG_10GBASE_EEE_ADVT |
+ IXGBE_AUTO_NEG_1000BASE_EEE_ADVT |
+ IXGBE_AUTO_NEG_100BASE_EEE_ADVT);
+
+ hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_eee_reg);
+ } else if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR ||
+ hw->device_id == IXGBE_DEV_ID_X550EM_X) {
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->phy.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ link_reg &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR |
+ IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX);
+
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->phy.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ }
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_EEER, eeer);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning
+ * @hw: pointer to hardware structure
+ * @enable: enable or disable source address pruning
+ * @pool: Rx pool to set source address pruning for
+ **/
+void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
+ unsigned int pool)
+{
+ u64 pfflp;
+
+ /* max rx pool is 63 */
+ if (pool > 63)
+ return;
+
+ pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL);
+ pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32;
+
+ if (enable)
+ pfflp |= (1ULL << pool);
+ else
+ pfflp &= ~(1ULL << pool);
+
+ IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp);
+ IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32));
+}
+
+/**
+ * ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype anti-spoofing
+ * @hw: pointer to hardware structure
+ * @enable: enable or disable switch for Ethertype anti-spoofing
+ * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
+ *
+ **/
+void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
+ bool enable, int vf)
+{
+ int vf_target_reg = vf >> 3;
+ int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
+ u32 pfvfspoof;
+
+ DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550");
+
+ pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
+ if (enable)
+ pfvfspoof |= (1 << vf_target_shift);
+ else
+ pfvfspoof &= ~(1 << vf_target_shift);
+
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
+}
+
+/**
+ * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the IOSF
+ * device
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 3 bit device type
+ * @data: Data to write to the register
+ **/
+s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 data)
+{
+ u32 i, command, error;
+
+ command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
+ (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
+
+ /* Write IOSF control register */
+ IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
+
+ /* Write IOSF data register */
+ IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
+ /*
+ * Check every 10 usec to see if the address cycle completed.
+ * The SB IOSF BUSY bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usec_delay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
+ if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
+ break;
+ }
+
+ if ((command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) != 0) {
+ error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
+ IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
+ ERROR_REPORT2(IXGBE_ERROR_POLLING,
+ "Failed to write, error %x\n", error);
+ return IXGBE_ERR_PHY;
+ }
+
+ if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING, "Write timed out\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the IOSF
+ * device
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 3 bit device type
+ * @phy_data: Pointer to read data from the register
+ **/
+s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 *data)
+{
+ u32 i, command, error;
+
+ command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
+ (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
+
+ /* Write IOSF control register */
+ IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
+
+ /*
+ * Check every 10 usec to see if the address cycle completed.
+ * The SB IOSF BUSY bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usec_delay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
+ if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
+ break;
+ }
+
+ if ((command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) != 0) {
+ error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
+ IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
+ ERROR_REPORT2(IXGBE_ERROR_POLLING,
+ "Failed to read, error %x\n", error);
+ return IXGBE_ERR_PHY;
+ }
+
+ if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING, "Read timed out\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_disable_mdd_X550
+ * @hw: pointer to hardware structure
+ *
+ * Disable malicious driver detection
+ **/
+void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw)
+{
+ u32 reg;
+
+ DEBUGFUNC("ixgbe_disable_mdd_X550");
+
+ /* Disable MDD for TX DMA and interrupt */
+ reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
+
+ /* Disable MDD for RX and interrupt */
+ reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+ reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
+ IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
+}
+
+/**
+ * ixgbe_enable_mdd_X550
+ * @hw: pointer to hardware structure
+ *
+ * Enable malicious driver detection
+ **/
+void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw)
+{
+ u32 reg;
+
+ DEBUGFUNC("ixgbe_enable_mdd_X550");
+
+ /* Enable MDD for TX DMA and interrupt */
+ reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
+
+ /* Enable MDD for RX and interrupt */
+ reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+ reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
+ IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
+}
+
+/**
+ * ixgbe_restore_mdd_vf_X550
+ * @hw: pointer to hardware structure
+ * @vf: vf index
+ *
+ * Restore VF that was disabled during malicious driver detection event
+ **/
+void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf)
+{
+ u32 idx, reg, num_qs, start_q, bitmask;
+
+ DEBUGFUNC("ixgbe_restore_mdd_vf_X550");
+
+ /* Map VF to queues */
+ reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ switch (reg & IXGBE_MRQC_MRQE_MASK) {
+ case IXGBE_MRQC_VMDQRT8TCEN:
+ num_qs = 8; /* 16 VFs / pools */
+ bitmask = 0x000000FF;
+ break;
+ case IXGBE_MRQC_VMDQRSS32EN:
+ case IXGBE_MRQC_VMDQRT4TCEN:
+ num_qs = 4; /* 32 VFs / pools */
+ bitmask = 0x0000000F;
+ break;
+ default: /* 64 VFs / pools */
+ num_qs = 2;
+ bitmask = 0x00000003;
+ break;
+ }
+ start_q = vf * num_qs;
+
+ /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */
+ idx = start_q / 32;
+ reg = 0;
+ reg |= (bitmask << (start_q % 32));
+ IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg);
+ IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg);
+}
+
+/**
+ * ixgbe_mdd_event_X550
+ * @hw: pointer to hardware structure
+ * @vf_bitmap: vf bitmap of malicious vfs
+ *
+ * Handle malicious driver detection event.
+ **/
+void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap)
+{
+ u32 wqbr;
+ u32 i, j, reg, q, shift, vf, idx;
+
+ DEBUGFUNC("ixgbe_mdd_event_X550");
+
+ /* figure out pool size for mapping to vf's */
+ reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ switch (reg & IXGBE_MRQC_MRQE_MASK) {
+ case IXGBE_MRQC_VMDQRT8TCEN:
+ shift = 3; /* 16 VFs / pools */
+ break;
+ case IXGBE_MRQC_VMDQRSS32EN:
+ case IXGBE_MRQC_VMDQRT4TCEN:
+ shift = 2; /* 32 VFs / pools */
+ break;
+ default:
+ shift = 1; /* 64 VFs / pools */
+ break;
+ }
+
+ /* Read WQBR_TX and WQBR_RX and check for malicious queues */
+ for (i = 0; i < 4; i++) {
+ wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i));
+ wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
+
+ if (!wqbr)
+ continue;
+
+ /* Get malicious queue */
+ for (j = 0; j < 32 && wqbr; j++) {
+
+ if (!(wqbr & (1 << j)))
+ continue;
+
+ /* Get queue from bitmask */
+ q = j + (i * 32);
+
+ /* Map queue to vf */
+ vf = (q >> shift);
+
+ /* Set vf bit in vf_bitmap */
+ idx = vf / 32;
+ vf_bitmap[idx] |= (1 << (vf % 32));
+ wqbr &= ~(1 << j);
+ }
+ }
+}
+
+/**
+ * ixgbe_get_media_type_X550em - Get media type
+ * @hw: pointer to hardware structure
+ *
+ * Returns the media type (fiber, copper, backplane)
+ */
+enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
+{
+ enum ixgbe_media_type media_type;
+
+ DEBUGFUNC("ixgbe_get_media_type_X550em");
+
+ /* Detect if there is a copper PHY attached. */
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_X:
+ case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_X_KX4:
+ media_type = ixgbe_media_type_backplane;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_SFP:
+ media_type = ixgbe_media_type_fiber;
+ break;
+ default:
+ media_type = ixgbe_media_type_unknown;
+ break;
+ }
+ return media_type;
+}
+
+/**
+ * ixgbe_setup_sfp_modules_X550em - Setup SFP module
+ * @hw: pointer to hardware structure
+ */
+s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
+{
+ bool setup_linear;
+ u16 reg_slice, edc_mode;
+
+ DEBUGFUNC("ixgbe_setup_sfp_modules_X550em");
+
+ switch (hw->phy.sfp_type) {
+ case ixgbe_sfp_type_unknown:
+ return IXGBE_SUCCESS;
+ case ixgbe_sfp_type_not_present:
+ return IXGBE_ERR_SFP_NOT_PRESENT;
+ case ixgbe_sfp_type_da_cu_core0:
+ case ixgbe_sfp_type_da_cu_core1:
+ setup_linear = true;
+ break;
+ case ixgbe_sfp_type_srlr_core0:
+ case ixgbe_sfp_type_srlr_core1:
+ case ixgbe_sfp_type_da_act_lmt_core0:
+ case ixgbe_sfp_type_da_act_lmt_core1:
+ case ixgbe_sfp_type_1g_sx_core0:
+ case ixgbe_sfp_type_1g_sx_core1:
+ case ixgbe_sfp_type_1g_lx_core0:
+ case ixgbe_sfp_type_1g_lx_core1:
+ setup_linear = false;
+ break;
+ default:
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ }
+
+ ixgbe_init_mac_link_ops_X550em(hw);
+ hw->phy.ops.reset = NULL;
+
+ /* The CS4227 slice address is the base address + the port-pair reg
+ * offset. I.e. Slice 0 = 0x0000 and slice 1 = 0x1000.
+ */
+ reg_slice = IXGBE_CS4227_SPARE24_LSB + (hw->phy.lan_id << 12);
+
+ if (setup_linear)
+ edc_mode = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
+ else
+ edc_mode = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
+
+ /* Configure CS4227 for connection type. */
+ return hw->phy.ops.write_i2c_combined(hw, IXGBE_CS4227,
+ reg_slice, edc_mode);
+}
+
+/**
+ * ixgbe_init_mac_link_ops_X550em - init mac link function pointers
+ * @hw: pointer to hardware structure
+ */
+void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+
+ DEBUGFUNC("ixgbe_init_mac_link_ops_X550em");
+
+ /* CS4227 does not support autoneg, so disable the laser control
+ * functions for SFP+ fiber
+ */
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
+ mac->ops.disable_tx_laser = NULL;
+ mac->ops.enable_tx_laser = NULL;
+ mac->ops.flap_tx_laser = NULL;
+ }
+}
+
+/**
+ * ixgbe_get_link_capabilities_x550em - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: true when autoneg or autotry is enabled
+ */
+s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ DEBUGFUNC("ixgbe_get_link_capabilities_X550em");
+
+ /* SFP */
+ if (hw->phy.media_type == ixgbe_media_type_fiber) {
+
+ /* CS4227 SFP must not enable auto-negotiation */
+ *autoneg = false;
+
+ /* Check if 1G SFP module. */
+ if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1
+ || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ return IXGBE_SUCCESS;
+ }
+
+ /* Link capabilities are based on SFP */
+ if (hw->phy.multispeed_fiber)
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ } else {
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = true;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_init_phy_ops_X550em - PHY/SFP specific init
+ * @hw: pointer to hardware structure
+ *
+ * Initialize any function pointers that were not able to be
+ * set during init_shared_code because the PHY/SFP type was
+ * not known. Perform the SFP init if necessary.
+ */
+s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
+{
+ struct ixgbe_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u32 esdp;
+
+ DEBUGFUNC("ixgbe_init_phy_ops_X550em");
+
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ phy->lan_id = IXGBE_READ_REG(hw, IXGBE_STATUS) &
+ IXGBE_STATUS_LAN_ID_1;
+ phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
+ if (phy->lan_id) {
+ esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
+ esdp |= IXGBE_ESDP_SDP1_DIR;
+ }
+ esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ }
+
+ /* Identify the PHY or SFP module */
+ ret_val = phy->ops.identify(hw);
+ if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ return ret_val;
+
+ /* Setup function pointers based on detected SFP module and speeds */
+ ixgbe_init_mac_link_ops_X550em(hw);
+ if (phy->sfp_type != ixgbe_sfp_type_unknown)
+ phy->ops.reset = NULL;
+
+ /* Set functions pointers based on phy type */
+ switch (hw->phy.type) {
+ case ixgbe_phy_x550em_kr:
+ phy->ops.setup_link = ixgbe_setup_kr_x550em;
+ break;
+ default:
+ break;
+ }
+ return ret_val;
+}
+
+/**
+ * ixgbe_reset_hw_X550em - Perform hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks
+ * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
+ * reset.
+ */
+s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
+{
+ ixgbe_link_speed link_speed;
+ s32 status;
+ u32 ctrl = 0;
+ u32 i;
+ bool link_up = false;
+
+ DEBUGFUNC("ixgbe_reset_hw_X550em");
+
+ /* Call adapter stop to disable Tx/Rx and clear interrupts */
+ status = hw->mac.ops.stop_adapter(hw);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* flush pending Tx transactions */
+ ixgbe_clear_tx_pending(hw);
+
+ /* PHY ops must be identified and initialized prior to reset */
+
+ /* Identify PHY and related function pointers */
+ status = hw->phy.ops.init(hw);
+
+ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ return status;
+
+ /* Setup SFP module if there is one present. */
+ if (hw->phy.sfp_setup_needed) {
+ status = hw->mac.ops.setup_sfp(hw);
+ hw->phy.sfp_setup_needed = false;
+ }
+
+ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ return status;
+
+ /* Reset PHY */
+ if (!hw->phy.reset_disable && hw->phy.ops.reset)
+ hw->phy.ops.reset(hw);
+
+mac_reset_top:
+ /* Issue global reset to the MAC. Needs to be SW reset if link is up.
+ * If link reset is used when link is up, it might reset the PHY when
+ * mng is using it. If link is down or the flag to force full link
+ * reset is set, then perform link reset.
+ */
+ ctrl = IXGBE_CTRL_LNK_RST;
+ if (!hw->force_full_reset) {
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ if (link_up)
+ ctrl = IXGBE_CTRL_RST;
+ }
+
+ ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Poll for reset bit to self-clear meaning reset is complete */
+ for (i = 0; i < 10; i++) {
+ usec_delay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST_MASK))
+ break;
+ }
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
+ status = IXGBE_ERR_RESET_FAILED;
+ DEBUGOUT("Reset polling failed to complete.\n");
+ }
+
+ msec_delay(50);
+
+ /* Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to
+ * allow time for any pending HW events to complete.
+ */
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ goto mac_reset_top;
+ }
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /* Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to 128,
+ * since we modify this value when programming the SAN MAC address.
+ */
+ hw->mac.num_rar_entries = 128;
+ hw->mac.ops.init_rx_addrs(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_kr_x550em - Configure the KR PHY.
+ * @hw: pointer to hardware structure
+ *
+ * Configures the integrated KR PHY.
+ **/
+s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u32 reg_val;
+
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->phy.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ;
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
+ reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
+ IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
+
+ /* Advertise 10G support. */
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
+
+ /* Advertise 1G support. */
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
+
+ /* Restart auto-negotiation. */
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->phy.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI.
+ * @hw: pointer to hardware structure
+ *
+ * Configures the integrated KR PHY to use iXFI mode.
+ **/
+s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u32 reg_val;
+
+ /* Disable AN and force speed to 10G Serial. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->phy.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->phy.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Disable training protocol FSM. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->phy.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->phy.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Disable Flex from training TXFFE. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_DSP_TXFFE_STATE_4(hw->phy.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_DSP_TXFFE_STATE_4(hw->phy.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_DSP_TXFFE_STATE_5(hw->phy.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_DSP_TXFFE_STATE_5(hw->phy.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Enable override for coefficients. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_TX_COEFF_CTRL_1(hw->phy.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
+ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
+ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
+ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_TX_COEFF_CTRL_1(hw->phy.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Toggle port SW reset by AN reset. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->phy.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->phy.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback.
+ * @hw: pointer to hardware structure
+ *
+ * Configures the integrated KR PHY to use internal loopback mode.
+ **/
+s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u32 reg_val;
+
+ /* Disable AN and force speed to 10G Serial. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->phy.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->phy.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Set near-end loopback clocks. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->phy.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B;
+ reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->phy.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Set loopback enable. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_PMD_DFX_BURNIN(hw->phy.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_PMD_DFX_BURNIN(hw->phy.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Training bypass. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->phy.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->phy.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ return status;
+}
+
+/**
+ * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
+ * assuming that the semaphore is already obtained.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the hostif.
+ **/
+s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
+ u16 *data)
+{
+ s32 status;
+ struct ixgbe_hic_read_shadow_ram buffer;
+
+ DEBUGFUNC("ixgbe_read_ee_hostif_data_X550");
+ buffer.hdr.cmd = FW_READ_SHADOW_RAM_CMD;
+ buffer.hdr.buf_len1 = 0;
+ buffer.hdr.buf_len2 = FW_READ_SHADOW_RAM_LEN;
+ buffer.hdr.checksum = FW_DEFAULT_CHECKSUM;
+
+ /* convert offset from words to bytes */
+ buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
+ /* one word */
+ buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
+
+ status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+ sizeof(buffer), false);
+
+ if (status)
+ return status;
+
+ *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
+ FW_NVM_DATA_OFFSET);
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the hostif.
+ **/
+s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
+ u16 *data)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_read_ee_hostif_X550");
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ IXGBE_SUCCESS) {
+ status = ixgbe_read_ee_hostif_data_X550(hw, offset, data);
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ } else {
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words
+ * @data: word(s) read from the EEPROM
+ *
+ * Reads a 16 bit word(s) from the EEPROM using the hostif.
+ **/
+s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+ u16 offset, u16 words, u16 *data)
+{
+ struct ixgbe_hic_read_shadow_ram buffer;
+ u32 current_word = 0;
+ u16 words_to_read;
+ s32 status;
+ u32 i;
+
+ DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550");
+
+ /* Take semaphore for the entire operation. */
+ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ if (status) {
+ DEBUGOUT("EEPROM read buffer - semaphore failed\n");
+ return status;
+ }
+ while (words) {
+ if (words > FW_MAX_READ_BUFFER_SIZE / 2)
+ words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
+ else
+ words_to_read = words;
+
+ buffer.hdr.cmd = FW_READ_SHADOW_RAM_CMD;
+ buffer.hdr.buf_len1 = 0;
+ buffer.hdr.buf_len2 = FW_READ_SHADOW_RAM_LEN;
+ buffer.hdr.checksum = FW_DEFAULT_CHECKSUM;
+
+ /* convert offset from words to bytes */
+ buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2);
+ buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2);
+
+ status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+ sizeof(buffer), false);
+
+ if (status) {
+ DEBUGOUT("Host interface command failed\n");
+ goto out;
+ }
+
+ for (i = 0; i < words_to_read; i++) {
+ u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) +
+ 2 * i;
+ u32 value = IXGBE_READ_REG(hw, reg);
+
+ data[current_word] = (u16)(value & 0xffff);
+ current_word++;
+ i++;
+ if (i < words_to_read) {
+ value >>= 16;
+ data[current_word] = (u16)(value & 0xffff);
+ current_word++;
+ }
+ }
+ words -= words_to_read;
+ }
+
+out:
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ return status;
+}
+
+/**
+ * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the hostif.
+ **/
+s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
+ u16 data)
+{
+ s32 status;
+ struct ixgbe_hic_write_shadow_ram buffer;
+
+ DEBUGFUNC("ixgbe_write_ee_hostif_data_X550");
+
+ buffer.hdr.cmd = FW_WRITE_SHADOW_RAM_CMD;
+ buffer.hdr.buf_len1 = 0;
+ buffer.hdr.buf_len2 = FW_WRITE_SHADOW_RAM_LEN;
+ buffer.hdr.checksum = FW_DEFAULT_CHECKSUM;
+
+ /* one word */
+ buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
+ buffer.data = data;
+ buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
+
+ status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+ sizeof(buffer), false);
+
+ return status;
+}
+
+/**
+ * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the hostif.
+ **/
+s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
+ u16 data)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_write_ee_hostif_X550");
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ IXGBE_SUCCESS) {
+ status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ } else {
+ DEBUGOUT("write ee hostif failed to get semaphore");
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @words: number of words
+ * @data: word(s) write to the EEPROM
+ *
+ * Write a 16 bit word(s) to the EEPROM using the hostif.
+ **/
+s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+ u16 offset, u16 words, u16 *data)
+{
+ s32 status = IXGBE_SUCCESS;
+ u32 i = 0;
+
+ DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550");
+
+ /* Take semaphore for the entire operation. */
+ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("EEPROM write buffer - semaphore failed\n");
+ goto out;
+ }
+
+ for (i = 0; i < words; i++) {
+ status = ixgbe_write_ee_hostif_data_X550(hw, offset + i,
+ data[i]);
+
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("Eeprom buffered write failed\n");
+ break;
+ }
+ }
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+out:
+
+ return status;
+}
+
+/**
+ * ixgbe_checksum_ptr_x550 - Checksum one pointer region
+ * @hw: pointer to hardware structure
+ * @ptr: pointer offset in eeprom
+ * @size: size of section pointed by ptr, if 0 first word will be used as size
+ * @csum: address of checksum to update
+ *
+ * Returns error status for any failure
+ */
+STATIC s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
+ u16 size, u16 *csum)
+{
+ u16 buf[256];
+ s32 status;
+ u16 length, bufsz, i, start;
+
+ bufsz = sizeof(buf) / sizeof(buf[0]);
+
+ /* Read a chunk at the pointer location */
+ status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf);
+ if (status) {
+ DEBUGOUT("Failed to read EEPROM image\n");
+ return status;
+ }
+
+ if (size) {
+ start = 0;
+ length = size;
+ } else {
+ start = 1;
+ length = buf[0];
+
+ /* Skip pointer section if length is invalid. */
+ if (length == 0xFFFF || length == 0 ||
+ (ptr + length) >= hw->eeprom.word_size)
+ return IXGBE_SUCCESS;
+ }
+
+ for (i = start; length; i++, length--) {
+ if (i == bufsz) {
+ ptr += bufsz;
+ i = 0;
+ if (length < bufsz)
+ bufsz = length;
+
+ /* Read a chunk at the pointer location */
+ status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr,
+ bufsz, buf);
+ if (status) {
+ DEBUGOUT("Failed to read EEPROM image\n");
+ return status;
+ }
+ }
+ *csum += buf[i];
+ }
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ *
+ * Returns a negative error code on error, or the 16-bit checksum
+ **/
+s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
+{
+ u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
+ s32 status;
+ u16 checksum = 0;
+ u16 pointer, i, size;
+
+ DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550");
+
+ hw->eeprom.ops.init_params(hw);
+
+ /* Read pointer area */
+ status = ixgbe_read_ee_hostif_buffer_X550(hw, 0,
+ IXGBE_EEPROM_LAST_WORD + 1,
+ eeprom_ptrs);
+ if (status) {
+ DEBUGOUT("Failed to read EEPROM image\n");
+ return status;
+ }
+
+ /*
+ * For X550 hardware include 0x0-0x41 in the checksum, skip the
+ * checksum word itself
+ */
+ for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++)
+ if (i != IXGBE_EEPROM_CHECKSUM)
+ checksum += eeprom_ptrs[i];
+
+ /*
+ * Include all data from pointers 0x3, 0x6-0xE. This excludes the
+ * FW, PHY module, and PCIe Expansion/Option ROM pointers.
+ */
+ for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) {
+ if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
+ continue;
+
+ pointer = eeprom_ptrs[i];
+
+ /* Skip pointer section if the pointer is invalid. */
+ if (pointer == 0xFFFF || pointer == 0 ||
+ pointer >= hw->eeprom.word_size)
+ continue;
+
+ switch (i) {
+ case IXGBE_PCIE_GENERAL_PTR:
+ size = IXGBE_IXGBE_PCIE_GENERAL_SIZE;
+ break;
+ case IXGBE_PCIE_CONFIG0_PTR:
+ case IXGBE_PCIE_CONFIG1_PTR:
+ size = IXGBE_PCIE_CONFIG_SIZE;
+ break;
+ default:
+ size = 0;
+ break;
+ }
+
+ status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum);
+ if (status)
+ return status;
+ }
+
+ checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+
+ return (s32)checksum;
+}
+
+/**
+ * ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ **/
+s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
+ u16 *checksum_val)
+{
+ s32 status;
+ u16 checksum;
+ u16 read_checksum = 0;
+
+ DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550");
+
+ /* Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = hw->eeprom.ops.read(hw, 0, &checksum);
+ if (status) {
+ DEBUGOUT("EEPROM read failed\n");
+ return status;
+ }
+
+ status = hw->eeprom.ops.calc_checksum(hw);
+ if (status < 0)
+ return status;
+
+ checksum = (u16)(status & 0xffff);
+
+ status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
+ &read_checksum);
+ if (status)
+ return status;
+
+ /* Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (read_checksum != checksum) {
+ status = IXGBE_ERR_EEPROM_CHECKSUM;
+ ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
+ "Invalid EEPROM checksum");
+ }
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum_val)
+ *checksum_val = checksum;
+
+ return status;
+}
+
+/**
+ * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
+ * @hw: pointer to hardware structure
+ *
+ * After writing EEPROM to shadow RAM using EEWR register, software calculates
+ * checksum and updates the EEPROM and instructs the hardware to update
+ * the flash.
+ **/
+s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u16 checksum = 0;
+
+ DEBUGFUNC("ixgbe_update_eeprom_checksum_X550");
+
+ /* Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum);
+ if (status) {
+ DEBUGOUT("EEPROM read failed\n");
+ return status;
+ }
+
+ status = ixgbe_calc_eeprom_checksum_X550(hw);
+ if (status < 0)
+ return status;
+
+ checksum = (u16)(status & 0xffff);
+
+ status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
+ checksum);
+ if (status)
+ return status;
+
+ status = ixgbe_update_flash_X550(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device
+ * @hw: pointer to hardware structure
+ *
+ * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
+ **/
+s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+ struct ixgbe_hic_hdr2 buffer;
+
+ DEBUGFUNC("ixgbe_update_flash_X550");
+
+ buffer.cmd = FW_SHADOW_RAM_DUMP_CMD;
+ buffer.buf_len1 = 0;
+ buffer.buf_len2 = FW_SHADOW_RAM_DUMP_LEN;
+ buffer.checksum = FW_DEFAULT_CHECKSUM;
+
+ status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+ sizeof(buffer), false);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_supported_physical_layer_X550em - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ **/
+u32 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
+{
+ u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+
+ DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em");
+
+ hw->phy.ops.identify(hw);
+
+ switch (hw->phy.type) {
+ case ixgbe_phy_x550em_kr:
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
+ IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ break;
+ case ixgbe_phy_x550em_kx4:
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
+ IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ break;
+ default:
+ break;
+ }
+
+ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
+ physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
+
+ return physical_layer;
+}
+
+/**
+ * ixgbe_disable_rx_x550 - Disable RX unit
+ *
+ * Enables the Rx DMA unit for x550
+ **/
+void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
+{
+ u32 rxctrl, pfdtxgswc;
+ s32 status;
+ struct ixgbe_hic_disable_rxen fw_cmd;
+
+ DEBUGFUNC("ixgbe_enable_rx_dma_x550");
+
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (rxctrl & IXGBE_RXCTRL_RXEN) {
+ pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
+ pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+ hw->mac.set_lben = true;
+ } else {
+ hw->mac.set_lben = false;
+ }
+
+ fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
+ fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
+ fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ fw_cmd.port_number = hw->phy.lan_id;
+
+ status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+ sizeof(struct ixgbe_hic_disable_rxen),
+ true);
+
+ /* If we fail - disable RX using register write */
+ if (status) {
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (rxctrl & IXGBE_RXCTRL_RXEN) {
+ rxctrl &= ~IXGBE_RXCTRL_RXEN;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
+ }
+ }
+ }
+}
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_x550.h b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_x550.h
new file mode 100755
index 00000000..e8de1343
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_x550.h
@@ -0,0 +1,88 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_X550_H_
+#define _IXGBE_X550_H_
+
+#include "ixgbe_type.h"
+
+s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw);
+s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw);
+s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw);
+
+s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw);
+s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw);
+s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw);
+s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
+ u16 *checksum_val);
+s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw);
+s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+ u16 offset, u16 words, u16 *data);
+s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
+ u16 data);
+s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+ u16 offset, u16 words, u16 *data);
+s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
+u16 *data);
+s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
+ u16 *data);
+s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
+ u16 data);
+s32 ixgbe_set_eee_X550(struct ixgbe_hw *hw, bool enable_eee);
+s32 ixgbe_setup_eee_X550(struct ixgbe_hw *hw, bool enable_eee);
+void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
+ unsigned int pool);
+void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
+ bool enable, int vf);
+s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 data);
+s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 *data);
+void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw);
+void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw);
+void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap);
+void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf);
+enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw);
+s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw);
+s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed, bool *autoneg);
+void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw);
+s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw);
+s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw);
+s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw);
+s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw);
+u32 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw);
+void ixgbe_disable_rx_x550(struct ixgbe_hw *hw);
+#endif /* _IXGBE_X550_H_ */
+
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_82599_bypass.c b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_82599_bypass.c
new file mode 100755
index 00000000..12cc01d5
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_82599_bypass.c
@@ -0,0 +1,314 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ixgbe/ixgbe_type.h"
+#include "ixgbe/ixgbe_82599.h"
+#include "ixgbe/ixgbe_api.h"
+#include "ixgbe/ixgbe_common.h"
+#include "ixgbe/ixgbe_phy.h"
+#include "ixgbe_bypass_defines.h"
+#include "ixgbe_bypass.h"
+
+/**
+ * ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber
+ * @hw: pointer to hardware structure
+ * @speed: link speed to set
+ *
+ * We set the module speed differently for fixed fiber. For other
+ * multi-speed devices we don't have an error value so here if we
+ * detect an error we just log it and exit.
+ */
+static void
+ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed)
+{
+ s32 status;
+ u8 rs, eeprom_data;
+
+ switch (speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ /* one bit mask same as setting on */
+ rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid fixed module speed");
+ return;
+ }
+
+ /* Set RS0 */
+ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ &eeprom_data);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to read Rx Rate Select RS0");
+ goto out;
+ }
+
+ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
+
+ status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ eeprom_data);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to write Rx Rate Select RS0");
+ goto out;
+ }
+
+ /* Set RS1 */
+ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ &eeprom_data);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to read Rx Rate Select RS1");
+ goto out;
+ }
+
+ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
+
+ status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ eeprom_data);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to write Rx Rate Select RS1");
+ goto out;
+ }
+out:
+ return;
+}
+
+/**
+ * ixgbe_setup_mac_link_multispeed_fixed_fiber - Set MAC link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Set the link speed in the AUTOC register and restarts link.
+ **/
+static s32
+ixgbe_setup_mac_link_multispeed_fixed_fiber(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ s32 status = IXGBE_SUCCESS;
+ ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ u32 speedcnt = 0;
+ u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ u32 i = 0;
+ bool link_up = false;
+ bool negotiation;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Mask off requested but non-supported speeds */
+ status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ speed &= link_speed;
+
+ /*
+ * Try each speed one by one, highest priority first. We do this in
+ * software because 10gb fiber doesn't support speed autonegotiation.
+ */
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+ speedcnt++;
+ highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
+
+ /* If we already have link at this speed, just jump out */
+ status = ixgbe_check_link(hw, &link_speed, &link_up, false);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
+ goto out;
+ /* Set the module link speed */
+ ixgbe_set_fiber_fixed_speed(hw, IXGBE_LINK_SPEED_10GB_FULL);
+
+ /* Set the module link speed */
+ esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Allow module to change analog characteristics (1G->10G) */
+ msec_delay(40);
+
+ status = ixgbe_setup_mac_link_82599(hw,
+ IXGBE_LINK_SPEED_10GB_FULL,
+ autoneg_wait_to_complete);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Flap the tx laser if it has not already been done */
+ ixgbe_flap_tx_laser(hw);
+
+ /*
+ * Wait for the controller to acquire link. Per IEEE 802.3ap,
+ * Section 73.10.2, we may have to wait up to 500ms if KR is
+ * attempted. 82599 uses the same timing for 10g SFI.
+ */
+ for (i = 0; i < 5; i++) {
+ /* Wait for the link partner to also set speed */
+ msec_delay(100);
+
+ /* If we have link, just jump out */
+ status = ixgbe_check_link(hw, &link_speed,
+ &link_up, false);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (link_up)
+ goto out;
+ }
+ }
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+ speedcnt++;
+ if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
+ highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+ /* If we already have link at this speed, just jump out */
+ status = ixgbe_check_link(hw, &link_speed, &link_up, false);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
+ goto out;
+
+ /* Set the module link speed */
+ ixgbe_set_fiber_fixed_speed(hw, IXGBE_LINK_SPEED_1GB_FULL);
+
+ /* Allow module to change analog characteristics (10G->1G) */
+ msec_delay(40);
+
+ status = ixgbe_setup_mac_link_82599(hw,
+ IXGBE_LINK_SPEED_1GB_FULL,
+ autoneg_wait_to_complete);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Flap the tx laser if it has not already been done */
+ ixgbe_flap_tx_laser(hw);
+
+ /* Wait for the link partner to also set speed */
+ msec_delay(100);
+
+ /* If we have link, just jump out */
+ status = ixgbe_check_link(hw, &link_speed, &link_up, false);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (link_up)
+ goto out;
+ }
+
+ /*
+ * We didn't get link. Configure back to the highest speed we tried,
+ * (if there was more than one). We call ourselves back with just the
+ * single highest speed that the user requested.
+ */
+ if (speedcnt > 1)
+ status = ixgbe_setup_mac_link_multispeed_fixed_fiber(hw,
+ highest_link_speed, autoneg_wait_to_complete);
+
+out:
+ /* Set autoneg_advertised value based on input link speed */
+ hw->phy.autoneg_advertised = 0;
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+ return status;
+}
+
+static enum ixgbe_media_type
+ixgbe_bypass_get_media_type(struct ixgbe_hw *hw)
+{
+ enum ixgbe_media_type media_type;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
+ media_type = ixgbe_media_type_fiber;
+ } else {
+ media_type = ixgbe_get_media_type_82599(hw);
+ }
+ return (media_type);
+}
+
+/*
+ * Wrapper around shared code (base driver) to support BYPASS nic.
+ */
+s32
+ixgbe_bypass_init_shared_code(struct ixgbe_hw *hw)
+{
+ s32 ret_val;
+
+ if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
+ hw->mac.type = ixgbe_mac_82599EB;
+ }
+
+ ret_val = ixgbe_init_shared_code(hw);
+ if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
+ hw->mac.ops.get_media_type = &ixgbe_bypass_get_media_type;
+ ixgbe_init_mac_link_ops_82599(hw);
+ }
+
+ return ret_val;
+}
+
+s32
+ixgbe_bypass_init_hw(struct ixgbe_hw *hw)
+{
+ int rc;
+
+ if ((rc = ixgbe_init_hw(hw)) == 0 &&
+ hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
+
+ hw->mac.ops.setup_link =
+ &ixgbe_setup_mac_link_multispeed_fixed_fiber;
+
+ hw->mac.ops.get_media_type = &ixgbe_bypass_get_media_type;
+
+ hw->mac.ops.disable_tx_laser = NULL;
+ hw->mac.ops.enable_tx_laser = NULL;
+ hw->mac.ops.flap_tx_laser = NULL;
+ }
+
+ return (rc);
+}
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_bypass.c b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_bypass.c
new file mode 100755
index 00000000..832f4156
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_bypass.c
@@ -0,0 +1,414 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <time.h>
+#include <rte_atomic.h>
+#include <rte_ethdev.h>
+#include "ixgbe_ethdev.h"
+#include "ixgbe_bypass_api.h"
+
+#define BYPASS_STATUS_OFF_MASK 3
+
+/* Macros to check for invlaid function pointers. */
+#define FUNC_PTR_OR_ERR_RET(func, retval) do { \
+ if ((func) == NULL) { \
+ PMD_DRV_LOG(ERR, "%s:%d function not supported", \
+ __func__, __LINE__); \
+ return retval; \
+ } \
+} while(0)
+
+#define FUNC_PTR_OR_RET(func) do { \
+ if ((func) == NULL) { \
+ PMD_DRV_LOG(ERR, "%s:%d function not supported", \
+ __func__, __LINE__); \
+ return; \
+ } \
+} while(0)
+
+
+/**
+ * ixgbe_bypass_set_time - Set bypass FW time epoc.
+ *
+ * @hw: pointer to hardware structure
+ *
+ * This function with sync the FW date stamp with that of the
+ * system clock.
+ **/
+static void
+ixgbe_bypass_set_time(struct ixgbe_adapter *adapter)
+{
+ u32 mask, value;
+ u32 sec;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ sec = 0;
+
+ /*
+ * Send the FW our current time and turn on time_valid and
+ * timer_reset bits.
+ */
+ mask = BYPASS_CTL1_TIME_M |
+ BYPASS_CTL1_VALID_M |
+ BYPASS_CTL1_OFFTRST_M;
+ value = (sec & BYPASS_CTL1_TIME_M) |
+ BYPASS_CTL1_VALID |
+ BYPASS_CTL1_OFFTRST;
+
+ FUNC_PTR_OR_RET(adapter->bps.ops.bypass_set);
+
+ /* Store FW reset time (in seconds from epoch). */
+ adapter->bps.reset_tm = time(NULL);
+
+ /* reset FW timer. */
+ adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL1, mask, value);
+}
+
+/**
+ * ixgbe_bypass_init - Make some environment changes for bypass
+ *
+ * @adapter: pointer to ixgbe_adapter structure for access to state bits
+ *
+ * This function collects all the modifications needed by the bypass
+ * driver.
+ **/
+void
+ixgbe_bypass_init(struct rte_eth_dev *dev)
+{
+ struct ixgbe_adapter *adapter;
+ struct ixgbe_hw *hw;
+
+ adapter = IXGBE_DEV_TO_ADPATER(dev);
+ hw = &adapter->hw;
+
+ /* Only allow BYPASS ops on the first port */
+ if (hw->device_id != IXGBE_DEV_ID_82599_BYPASS ||
+ hw->bus.func != 0) {
+ PMD_DRV_LOG(ERR, "bypass function is not supported on that device");
+ return;
+ }
+
+ /* set bypass ops. */
+ adapter->bps.ops.bypass_rw = &ixgbe_bypass_rw_generic;
+ adapter->bps.ops.bypass_valid_rd = &ixgbe_bypass_valid_rd_generic;
+ adapter->bps.ops.bypass_set = &ixgbe_bypass_set_generic;
+ adapter->bps.ops.bypass_rd_eep = &ixgbe_bypass_rd_eep_generic;
+
+ /* set the time for logging. */
+ ixgbe_bypass_set_time(adapter);
+
+ /* Don't have the SDP to the laser */
+ hw->mac.ops.disable_tx_laser = NULL;
+ hw->mac.ops.enable_tx_laser = NULL;
+ hw->mac.ops.flap_tx_laser = NULL;
+}
+
+s32
+ixgbe_bypass_state_show(struct rte_eth_dev *dev, u32 *state)
+{
+ struct ixgbe_hw *hw;
+ s32 ret_val;
+ u32 cmd;
+ u32 by_ctl = 0;
+ struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
+
+ hw = &adapter->hw;
+ FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP);
+
+ cmd = BYPASS_PAGE_CTL0;
+ ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &by_ctl);
+
+ /* Assume bypass_rw didn't error out, if it did state will
+ * be ignored anyway.
+ */
+ *state = (by_ctl >> BYPASS_STATUS_OFF_SHIFT) & BYPASS_STATUS_OFF_MASK;
+
+ return (ret_val);
+}
+
+
+s32
+ixgbe_bypass_state_store(struct rte_eth_dev *dev, u32 *new_state)
+{
+ struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
+ struct ixgbe_hw *hw;
+ s32 ret_val;
+
+ hw = &adapter->hw;
+ FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_set, -ENOTSUP);
+
+ /* Set the new state */
+ ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0,
+ BYPASS_MODE_OFF_M, *new_state);
+ if (ret_val)
+ goto exit;
+
+ /* Set AUTO back on so FW can receive events */
+ ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0,
+ BYPASS_MODE_OFF_M, BYPASS_AUTO);
+
+exit:
+ return ret_val;
+
+}
+
+s32
+ixgbe_bypass_event_show(struct rte_eth_dev *dev, u32 event,
+ u32 *state)
+{
+ struct ixgbe_hw *hw;
+ s32 ret_val;
+ u32 shift;
+ u32 cmd;
+ u32 by_ctl = 0;
+ struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
+
+ hw = &adapter->hw;
+ FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP);
+
+ cmd = BYPASS_PAGE_CTL0;
+ ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &by_ctl);
+
+ /* Assume bypass_rw didn't error out, if it did event will
+ * be ignored anyway.
+ */
+ switch (event) {
+ case BYPASS_EVENT_WDT_TO:
+ shift = BYPASS_WDTIMEOUT_SHIFT;
+ break;
+ case BYPASS_EVENT_MAIN_ON:
+ shift = BYPASS_MAIN_ON_SHIFT;
+ break;
+ case BYPASS_EVENT_MAIN_OFF:
+ shift = BYPASS_MAIN_OFF_SHIFT;
+ break;
+ case BYPASS_EVENT_AUX_ON:
+ shift = BYPASS_AUX_ON_SHIFT;
+ break;
+ case BYPASS_EVENT_AUX_OFF:
+ shift = BYPASS_AUX_OFF_SHIFT;
+ break;
+ default:
+ return EINVAL;
+ }
+
+ *state = (by_ctl >> shift) & 0x3;
+
+ return ret_val;
+}
+
+s32
+ixgbe_bypass_event_store(struct rte_eth_dev *dev, u32 event,
+ u32 state)
+{
+ struct ixgbe_hw *hw;
+ u32 status;
+ u32 off;
+ s32 ret_val;
+ struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
+
+ hw = &adapter->hw;
+ FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_set, -ENOTSUP);
+
+ switch (event) {
+ case BYPASS_EVENT_WDT_TO:
+ off = BYPASS_WDTIMEOUT_M;
+ status = state << BYPASS_WDTIMEOUT_SHIFT;
+ break;
+ case BYPASS_EVENT_MAIN_ON:
+ off = BYPASS_MAIN_ON_M;
+ status = state << BYPASS_MAIN_ON_SHIFT;
+ break;
+ case BYPASS_EVENT_MAIN_OFF:
+ off = BYPASS_MAIN_OFF_M;
+ status = state << BYPASS_MAIN_OFF_SHIFT;
+ break;
+ case BYPASS_EVENT_AUX_ON:
+ off = BYPASS_AUX_ON_M;
+ status = state << BYPASS_AUX_ON_SHIFT;
+ break;
+ case BYPASS_EVENT_AUX_OFF:
+ off = BYPASS_AUX_OFF_M;
+ status = state << BYPASS_AUX_OFF_SHIFT;
+ break;
+ default:
+ return EINVAL;
+ }
+
+ ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0,
+ off, status);
+
+ return ret_val;
+}
+
+s32
+ixgbe_bypass_wd_timeout_store(struct rte_eth_dev *dev, u32 timeout)
+{
+ struct ixgbe_hw *hw;
+ u32 status;
+ u32 mask;
+ s32 ret_val;
+ struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
+
+ hw = &adapter->hw;
+ FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_set, -ENOTSUP);
+
+ /* disable the timer with timeout of zero */
+ if (timeout == RTE_BYPASS_TMT_OFF) {
+ status = 0x0; /* WDG enable off */
+ mask = BYPASS_WDT_ENABLE_M;
+ } else {
+ /* set time out value */
+ mask = BYPASS_WDT_VALUE_M;
+
+ /* enable the timer */
+ status = timeout << BYPASS_WDT_TIME_SHIFT;
+ status |= 0x1 << BYPASS_WDT_ENABLE_SHIFT;
+ mask |= BYPASS_WDT_ENABLE_M;
+ }
+
+ ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0,
+ mask, status);
+
+ return ret_val;
+}
+
+s32
+ixgbe_bypass_ver_show(struct rte_eth_dev *dev, u32 *ver)
+{
+ struct ixgbe_hw *hw;
+ u32 cmd;
+ u32 status;
+ s32 ret_val;
+ struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
+
+ hw = &adapter->hw;
+ FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP);
+
+ cmd = BYPASS_PAGE_CTL2 | BYPASS_WE;
+ cmd |= (BYPASS_EEPROM_VER_ADD << BYPASS_CTL2_OFFSET_SHIFT) &
+ BYPASS_CTL2_OFFSET_M;
+ ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &status);
+ if (ret_val)
+ goto exit;
+
+ /* wait for the write to stick */
+ msleep(100);
+
+ /* Now read the results */
+ cmd &= ~BYPASS_WE;
+ ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &status);
+ if (ret_val)
+ goto exit;
+
+ *ver = status & BYPASS_CTL2_DATA_M; /* only one byte of date */
+
+exit:
+ return ret_val;
+}
+
+s32
+ixgbe_bypass_wd_timeout_show(struct rte_eth_dev *dev, u32 *wd_timeout)
+{
+ struct ixgbe_hw *hw;
+ u32 by_ctl = 0;
+ u32 cmd;
+ u32 wdg;
+ s32 ret_val;
+ struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
+
+ hw = &adapter->hw;
+ FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP);
+
+ cmd = BYPASS_PAGE_CTL0;
+ ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &by_ctl);
+
+ wdg = by_ctl & BYPASS_WDT_ENABLE_M;
+ if (!wdg)
+ *wd_timeout = RTE_BYPASS_TMT_OFF;
+ else
+ *wd_timeout = (by_ctl >> BYPASS_WDT_TIME_SHIFT) &
+ BYPASS_WDT_MASK;
+
+ return ret_val;
+}
+
+s32
+ixgbe_bypass_wd_reset(struct rte_eth_dev *dev)
+{
+ u32 cmd;
+ u32 status;
+ u32 sec;
+ u32 count = 0;
+ s32 ret_val;
+ struct ixgbe_hw *hw;
+ struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
+
+ hw = &adapter->hw;
+
+ FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP);
+ FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_valid_rd, -ENOTSUP);
+
+ /* Use the lower level bit-bang functions since we don't need
+ * to read the register first to get it's current state as we
+ * are setting every thing in this write.
+ */
+ /* Set up WD pet */
+ cmd = BYPASS_PAGE_CTL1 | BYPASS_WE | BYPASS_CTL1_WDT_PET;
+
+ /* Resync the FW time while writing to CTL1 anyway */
+ adapter->bps.reset_tm = time(NULL);
+ sec = 0;
+
+ cmd |= (sec & BYPASS_CTL1_TIME_M) | BYPASS_CTL1_VALID;
+
+ /* reset FW timer offset since we are resetting the clock */
+ cmd |= BYPASS_CTL1_OFFTRST;
+
+ ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &status);
+
+ /* Read until it matches what we wrote, or we time out */
+ do {
+ if (count++ > 10) {
+ ret_val = IXGBE_BYPASS_FW_WRITE_FAILURE;
+ break;
+ }
+
+ if (adapter->bps.ops.bypass_rw(hw, BYPASS_PAGE_CTL1, &status)) {
+ ret_val = IXGBE_ERR_INVALID_ARGUMENT;
+ break;
+ }
+ } while (!adapter->bps.ops.bypass_valid_rd(cmd, status));
+
+ return ret_val;
+}
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_bypass.h b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_bypass.h
new file mode 100755
index 00000000..fcd97743
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_bypass.h
@@ -0,0 +1,68 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _IXGBE_BYPASS_H_
+#define _IXGBE_BYPASS_H_
+
+#ifdef RTE_NIC_BYPASS
+
+struct ixgbe_bypass_mac_ops {
+ s32 (*bypass_rw) (struct ixgbe_hw *hw, u32 cmd, u32 *status);
+ bool (*bypass_valid_rd) (u32 in_reg, u32 out_reg);
+ s32 (*bypass_set) (struct ixgbe_hw *hw, u32 cmd, u32 event, u32 action);
+ s32 (*bypass_rd_eep) (struct ixgbe_hw *hw, u32 addr, u8 *value);
+};
+
+struct ixgbe_bypass_info {
+ uint64_t reset_tm;
+ struct ixgbe_bypass_mac_ops ops;
+};
+
+struct rte_eth_dev;
+
+void ixgbe_bypass_init(struct rte_eth_dev *dev);
+s32 ixgbe_bypass_state_show(struct rte_eth_dev *dev, u32 *state);
+s32 ixgbe_bypass_state_store(struct rte_eth_dev *dev, u32 *new_state);
+s32 ixgbe_bypass_event_show(struct rte_eth_dev *dev, u32 event, u32 *state);
+s32 ixgbe_bypass_event_store(struct rte_eth_dev *dev, u32 event, u32 state);
+s32 ixgbe_bypass_wd_timeout_store(struct rte_eth_dev *dev, u32 timeout);
+s32 ixgbe_bypass_ver_show(struct rte_eth_dev *dev, u32 *ver);
+s32 ixgbe_bypass_wd_timeout_show(struct rte_eth_dev *dev, u32 *wd_timeout);
+s32 ixgbe_bypass_wd_reset(struct rte_eth_dev *dev);
+
+s32 ixgbe_bypass_init_shared_code(struct ixgbe_hw *hw);
+s32 ixgbe_bypass_init_hw(struct ixgbe_hw *hw);
+
+#endif /* RTE_NIC_BYPASS */
+
+#endif /* _IXGBE_BYPASS_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_bypass_api.h b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_bypass_api.h
new file mode 100755
index 00000000..b4a73864
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_bypass_api.h
@@ -0,0 +1,299 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _IXGBE_BYPASS_API_H_
+#define _IXGBE_BYPASS_API_H_
+
+#ifdef RTE_NIC_BYPASS
+
+#include "ixgbe_bypass_defines.h"
+/**
+ * ixgbe_bypass_rw_generic - Bit bang data into by_pass FW
+ *
+ * @hw: pointer to hardware structure
+ * @cmd: Command we send to the FW
+ * @status: The reply from the FW
+ *
+ * Bit-bangs the cmd to the by_pass FW status points to what is returned.
+ **/
+#define IXGBE_BYPASS_BB_WAIT 1
+static s32 ixgbe_bypass_rw_generic(struct ixgbe_hw *hw, u32 cmd, u32 *status)
+{
+ int i;
+ u32 sck, sdi, sdo, dir_sck, dir_sdi, dir_sdo;
+ u32 esdp;
+
+ if (!status)
+ return IXGBE_ERR_PARAM;
+
+ *status = 0;
+
+ /* SDP vary by MAC type */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ sck = IXGBE_ESDP_SDP7;
+ sdi = IXGBE_ESDP_SDP0;
+ sdo = IXGBE_ESDP_SDP6;
+ dir_sck = IXGBE_ESDP_SDP7_DIR;
+ dir_sdi = IXGBE_ESDP_SDP0_DIR;
+ dir_sdo = IXGBE_ESDP_SDP6_DIR;
+ break;
+ case ixgbe_mac_X540:
+ sck = IXGBE_ESDP_SDP2;
+ sdi = IXGBE_ESDP_SDP0;
+ sdo = IXGBE_ESDP_SDP1;
+ dir_sck = IXGBE_ESDP_SDP2_DIR;
+ dir_sdi = IXGBE_ESDP_SDP0_DIR;
+ dir_sdo = IXGBE_ESDP_SDP1_DIR;
+ break;
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ sck = IXGBE_ESDP_SDP2;
+ sdi = IXGBE_ESDP_SDP0;
+ sdo = IXGBE_ESDP_SDP1;
+ dir_sck = IXGBE_ESDP_SDP2_DIR;
+ dir_sdi = IXGBE_ESDP_SDP0_DIR;
+ dir_sdo = IXGBE_ESDP_SDP1_DIR;
+ break;
+ default:
+ return IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ /* Set SDP pins direction */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp |= dir_sck; /* SCK as output */
+ esdp |= dir_sdi; /* SDI as output */
+ esdp &= ~dir_sdo; /* SDO as input */
+ esdp |= sck;
+ esdp |= sdi;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+ // TODO:
+ msleep(IXGBE_BYPASS_BB_WAIT);
+
+ /* Generate start condition */
+ esdp &= ~sdi;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+ msleep(IXGBE_BYPASS_BB_WAIT);
+
+ esdp &= ~sck;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+ msleep(IXGBE_BYPASS_BB_WAIT);
+
+ /* Clock out the new control word and clock in the status */
+ for (i = 0; i < 32; i++) {
+ if ((cmd >> (31 - i)) & 0x01) {
+ esdp |= sdi;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ } else {
+ esdp &= ~sdi;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ }
+ IXGBE_WRITE_FLUSH(hw);
+ msleep(IXGBE_BYPASS_BB_WAIT);
+
+ esdp |= sck;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+ msleep(IXGBE_BYPASS_BB_WAIT);
+
+ esdp &= ~sck;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+ msleep(IXGBE_BYPASS_BB_WAIT);
+
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ if (esdp & sdo)
+ *status = (*status << 1) | 0x01;
+ else
+ *status = (*status << 1) | 0x00;
+ msleep(IXGBE_BYPASS_BB_WAIT);
+ }
+
+ /* stop condition */
+ esdp |= sck;
+ esdp &= ~sdi;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+ msleep(IXGBE_BYPASS_BB_WAIT);
+
+ esdp |= sdi;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* set the page bits to match the cmd that the status it belongs to */
+ *status = (*status & 0x3fffffff) | (cmd & 0xc0000000);
+
+ return 0;
+}
+
+/**
+ * ixgbe_bypass_valid_rd_generic - Verify valid return from bit-bang.
+ *
+ * If we send a write we can't be sure it took until we can read back
+ * that same register. It can be a problem as some of the feilds may
+ * for valid reasons change between the time wrote the register and
+ * we read it again to verify. So this function check everything we
+ * can check and then assumes it worked.
+ *
+ * @u32 in_reg - The register cmd for the bit-bang read.
+ * @u32 out_reg - The register returned from a bit-bang read.
+ **/
+static bool ixgbe_bypass_valid_rd_generic(u32 in_reg, u32 out_reg)
+{
+ u32 mask;
+
+ /* Page must match for all control pages */
+ if ((in_reg & BYPASS_PAGE_M) != (out_reg & BYPASS_PAGE_M))
+ return false;
+
+ switch (in_reg & BYPASS_PAGE_M) {
+ case BYPASS_PAGE_CTL0:
+ /* All the following can't change since the last write
+ * - All the event actions
+ * - The timeout value
+ */
+ mask = BYPASS_AUX_ON_M | BYPASS_MAIN_ON_M |
+ BYPASS_MAIN_OFF_M | BYPASS_AUX_OFF_M |
+ BYPASS_WDTIMEOUT_M |
+ BYPASS_WDT_VALUE_M;
+ if ((out_reg & mask) != (in_reg & mask))
+ return false;
+
+ /* 0x0 is never a valid value for bypass status */
+ if (!(out_reg & BYPASS_STATUS_OFF_M))
+ return false;
+ break;
+ case BYPASS_PAGE_CTL1:
+ /* All the following can't change since the last write
+ * - time valid bit
+ * - time we last sent
+ */
+ mask = BYPASS_CTL1_VALID_M | BYPASS_CTL1_TIME_M;
+ if ((out_reg & mask) != (in_reg & mask))
+ return false;
+ break;
+ case BYPASS_PAGE_CTL2:
+ /* All we can check in this page is control number
+ * which is already done above.
+ */
+ break;
+ }
+
+ /* We are as sure as we can be return true */
+ return true;
+}
+
+/**
+ * ixgbe_bypass_set_generic - Set a bypass field in the FW CTRL Regiter.
+ *
+ * @hw: pointer to hardware structure
+ * @cmd: The control word we are setting.
+ * @event: The event we are setting in the FW. This also happens to
+ * be the mask for the event we are setting (handy)
+ * @action: The action we set the event to in the FW. This is in a
+ * bit field that happens to be what we want to put in
+ * the event spot (also handy)
+ **/
+static s32 ixgbe_bypass_set_generic(struct ixgbe_hw *hw, u32 ctrl, u32 event,
+ u32 action)
+{
+ u32 by_ctl = 0;
+ u32 cmd, verify;
+ u32 count = 0;
+
+ /* Get current values */
+ cmd = ctrl; /* just reading only need control number */
+ if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl))
+ return IXGBE_ERR_INVALID_ARGUMENT;
+
+ /* Set to new action */
+ cmd = (by_ctl & ~event) | BYPASS_WE | action;
+ if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl))
+ return IXGBE_ERR_INVALID_ARGUMENT;
+
+ /* Page 0 force a FW eeprom write which is slow so verify */
+ if ((cmd & BYPASS_PAGE_M) == BYPASS_PAGE_CTL0) {
+ verify = BYPASS_PAGE_CTL0;
+ do {
+ if (count++ > 5)
+ return IXGBE_BYPASS_FW_WRITE_FAILURE;
+
+ if (ixgbe_bypass_rw_generic(hw, verify, &by_ctl))
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ } while (!ixgbe_bypass_valid_rd_generic(cmd, by_ctl));
+ } else {
+ /* We have give the FW time for the write to stick */
+ msleep(100);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_bypass_rd_eep_generic - Read the bypass FW eeprom address.
+ *
+ * @hw: pointer to hardware structure
+ * @addr: The bypass eeprom address to read.
+ * @value: The 8b of data at the address above.
+ **/
+static s32 ixgbe_bypass_rd_eep_generic(struct ixgbe_hw *hw, u32 addr, u8 *value)
+{
+ u32 cmd;
+ u32 status;
+
+
+ /* send the request */
+ cmd = BYPASS_PAGE_CTL2 | BYPASS_WE;
+ cmd |= (addr << BYPASS_CTL2_OFFSET_SHIFT) & BYPASS_CTL2_OFFSET_M;
+ if (ixgbe_bypass_rw_generic(hw, cmd, &status))
+ return IXGBE_ERR_INVALID_ARGUMENT;
+
+ /* We have give the FW time for the write to stick */
+ msleep(100);
+
+ /* now read the results */
+ cmd &= ~BYPASS_WE;
+ if (ixgbe_bypass_rw_generic(hw, cmd, &status))
+ return IXGBE_ERR_INVALID_ARGUMENT;
+
+ *value = status & BYPASS_CTL2_DATA_M;
+
+ return 0;
+}
+
+#endif /* RTE_NIC_BYPASS */
+
+#endif /* _IXGBE_BYPASS_API_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_bypass_defines.h b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_bypass_defines.h
new file mode 100755
index 00000000..22570acf
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_bypass_defines.h
@@ -0,0 +1,160 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _IXGBE_BYPASS_DEFINES_H_
+#define _IXGBE_BYPASS_DEFINES_H_
+
+#ifdef RTE_NIC_BYPASS
+
+#define msleep(x) rte_delay_us(x*1000)
+#define usleep_range(min, max) rte_delay_us(min)
+
+#define BYPASS_PAGE_CTL0 0x00000000
+#define BYPASS_PAGE_CTL1 0x40000000
+#define BYPASS_PAGE_CTL2 0x80000000
+#define BYPASS_PAGE_M 0xc0000000
+#define BYPASS_WE 0x20000000
+
+#define BYPASS_AUTO 0x0
+#define BYPASS_NOP 0x0
+#define BYPASS_NORM 0x1
+#define BYPASS_BYPASS 0x2
+#define BYPASS_ISOLATE 0x3
+
+#define BYPASS_EVENT_MAIN_ON 0x1
+#define BYPASS_EVENT_AUX_ON 0x2
+#define BYPASS_EVENT_MAIN_OFF 0x3
+#define BYPASS_EVENT_AUX_OFF 0x4
+#define BYPASS_EVENT_WDT_TO 0x5
+#define BYPASS_EVENT_USR 0x6
+
+#define BYPASS_MODE_OFF_M 0x00000003
+#define BYPASS_STATUS_OFF_M 0x0000000c
+#define BYPASS_AUX_ON_M 0x00000030
+#define BYPASS_MAIN_ON_M 0x000000c0
+#define BYPASS_MAIN_OFF_M 0x00000300
+#define BYPASS_AUX_OFF_M 0x00000c00
+#define BYPASS_WDTIMEOUT_M 0x00003000
+#define BYPASS_WDT_ENABLE_M 0x00004000
+#define BYPASS_WDT_VALUE_M 0x00070000
+
+#define BYPASS_MODE_OFF_SHIFT 0
+#define BYPASS_STATUS_OFF_SHIFT 2
+#define BYPASS_AUX_ON_SHIFT 4
+#define BYPASS_MAIN_ON_SHIFT 6
+#define BYPASS_MAIN_OFF_SHIFT 8
+#define BYPASS_AUX_OFF_SHIFT 10
+#define BYPASS_WDTIMEOUT_SHIFT 12
+#define BYPASS_WDT_ENABLE_SHIFT 14
+#define BYPASS_WDT_TIME_SHIFT 16
+
+#define BYPASS_WDT_1 0x0
+#define BYPASS_WDT_1_5 0x1
+#define BYPASS_WDT_2 0x2
+#define BYPASS_WDT_3 0x3
+#define BYPASS_WDT_4 0x4
+#define BYPASS_WDT_8 0x5
+#define BYPASS_WDT_16 0x6
+#define BYPASS_WDT_32 0x7
+#define BYPASS_WDT_OFF 0xffff
+
+#define BYPASS_WDT_MASK 0x7
+
+#define BYPASS_CTL1_TIME_M 0x01ffffff
+#define BYPASS_CTL1_VALID_M 0x02000000
+#define BYPASS_CTL1_OFFTRST_M 0x04000000
+#define BYPASS_CTL1_WDT_PET_M 0x08000000
+
+#define BYPASS_CTL1_VALID 0x02000000
+#define BYPASS_CTL1_OFFTRST 0x04000000
+#define BYPASS_CTL1_WDT_PET 0x08000000
+
+#define BYPASS_CTL2_DATA_M 0x000000ff
+#define BYPASS_CTL2_OFFSET_M 0x0000ff00
+#define BYPASS_CTL2_RW_M 0x00010000
+#define BYPASS_CTL2_HEAD_M 0x0ff00000
+
+#define BYPASS_CTL2_OFFSET_SHIFT 8
+#define BYPASS_CTL2_HEAD_SHIFT 20
+
+#define BYPASS_CTL2_RW 0x00010000
+
+enum ixgbe_state_t {
+ __IXGBE_TESTING,
+ __IXGBE_RESETTING,
+ __IXGBE_DOWN,
+ __IXGBE_SERVICE_SCHED,
+ __IXGBE_IN_SFP_INIT,
+ __IXGBE_IN_BYPASS_LOW,
+ __IXGBE_IN_BYPASS_HIGH,
+ __IXGBE_IN_BYPASS_LOG,
+};
+
+#define BYPASS_MAX_LOGS 43
+#define BYPASS_LOG_SIZE 5
+#define BYPASS_LOG_LINE_SIZE 37
+
+#define BYPASS_EEPROM_VER_ADD 0x02
+
+#define BYPASS_LOG_TIME_M 0x01ffffff
+#define BYPASS_LOG_TIME_VALID_M 0x02000000
+#define BYPASS_LOG_HEAD_M 0x04000000
+#define BYPASS_LOG_CLEAR_M 0x08000000
+#define BYPASS_LOG_EVENT_M 0xf0000000
+#define BYPASS_LOG_ACTION_M 0x03
+
+#define BYPASS_LOG_EVENT_SHIFT 28
+#define BYPASS_LOG_CLEAR_SHIFT 24 /* bit offset */
+#define IXGBE_DEV_TO_ADPATER(dev) \
+ ((struct ixgbe_adapter*)(dev->data->dev_private))
+
+/* extractions from ixgbe_phy.h */
+#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2
+
+#define IXGBE_SFF_SFF_8472_SWAP 0x5C
+#define IXGBE_SFF_SFF_8472_COMP 0x5E
+#define IXGBE_SFF_SFF_8472_OSCB 0x6E
+#define IXGBE_SFF_SFF_8472_ESCB 0x76
+
+#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
+
+/* extractions from ixgbe_type.h */
+#define IXGBE_DEV_ID_82599_BYPASS 0x155D
+
+#define IXGBE_BYPASS_FW_WRITE_FAILURE -35
+
+#endif /* RTE_NIC_BYPASS */
+
+#endif /* _IXGBE_BYPASS_DEFINES_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_ethdev.c b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_ethdev.c
new file mode 100755
index 00000000..3fc37384
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_ethdev.c
@@ -0,0 +1,4133 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <netinet/in.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_atomic.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_dev.h>
+
+#include "ixgbe_logs.h"
+#include "ixgbe/ixgbe_api.h"
+#include "ixgbe/ixgbe_vf.h"
+#include "ixgbe/ixgbe_common.h"
+#include "ixgbe_ethdev.h"
+#include "ixgbe_bypass.h"
+#include "ixgbe_rxtx.h"
+
+/*
+ * High threshold controlling when to start sending XOFF frames. Must be at
+ * least 8 bytes less than receive packet buffer size. This value is in units
+ * of 1024 bytes.
+ */
+#define IXGBE_FC_HI 0x80
+
+/*
+ * Low threshold controlling when to start sending XON frames. This value is
+ * in units of 1024 bytes.
+ */
+#define IXGBE_FC_LO 0x40
+
+/* Timer value included in XOFF frames. */
+#define IXGBE_FC_PAUSE 0x680
+
+#define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
+#define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */
+#define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */
+
+#define IXGBE_MMW_SIZE_DEFAULT 0x4
+#define IXGBE_MMW_SIZE_JUMBO_FRAME 0x14
+
+/*
+ * Default values for RX/TX configuration
+ */
+#define IXGBE_DEFAULT_RX_FREE_THRESH 32
+#define IXGBE_DEFAULT_RX_PTHRESH 8
+#define IXGBE_DEFAULT_RX_HTHRESH 8
+#define IXGBE_DEFAULT_RX_WTHRESH 0
+
+#define IXGBE_DEFAULT_TX_FREE_THRESH 32
+#define IXGBE_DEFAULT_TX_PTHRESH 32
+#define IXGBE_DEFAULT_TX_HTHRESH 0
+#define IXGBE_DEFAULT_TX_WTHRESH 0
+#define IXGBE_DEFAULT_TX_RSBIT_THRESH 32
+
+/* Bit shift and mask */
+#define IXGBE_4_BIT_WIDTH (CHAR_BIT / 2)
+#define IXGBE_4_BIT_MASK RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t)
+#define IXGBE_8_BIT_WIDTH CHAR_BIT
+#define IXGBE_8_BIT_MASK UINT8_MAX
+
+#define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
+
+#define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
+
+static int eth_ixgbe_dev_init(struct eth_driver *eth_drv,
+ struct rte_eth_dev *eth_dev);
+static int ixgbe_dev_configure(struct rte_eth_dev *dev);
+static int ixgbe_dev_start(struct rte_eth_dev *dev);
+static void ixgbe_dev_stop(struct rte_eth_dev *dev);
+static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
+static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
+static void ixgbe_dev_close(struct rte_eth_dev *dev);
+static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
+static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
+static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
+ uint16_t queue_id,
+ uint8_t stat_idx,
+ uint8_t is_rx);
+static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+
+static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vlan_id, int on);
+static void ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
+static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
+ uint16_t queue, bool on);
+static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
+ int on);
+static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
+static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
+static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
+static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
+
+static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
+static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
+static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_pfc_conf *pfc_conf);
+static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
+static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
+static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
+static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
+static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
+ void *param);
+static void ixgbe_dev_interrupt_delayed_handler(void *param);
+static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint32_t index, uint32_t pool);
+static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
+static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config);
+
+/* For Virtual Function support */
+static int eth_ixgbevf_dev_init(struct eth_driver *eth_drv,
+ struct rte_eth_dev *eth_dev);
+static int ixgbevf_dev_configure(struct rte_eth_dev *dev);
+static int ixgbevf_dev_start(struct rte_eth_dev *dev);
+static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
+static void ixgbevf_dev_close(struct rte_eth_dev *dev);
+static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
+static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
+static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vlan_id, int on);
+static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
+ uint16_t queue, int on);
+static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
+
+/* For Eth VMDQ APIs support */
+static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
+ ether_addr* mac_addr,uint8_t on);
+static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on);
+static int ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
+ uint16_t rx_mask, uint8_t on);
+static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
+static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
+static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
+ uint64_t pool_mask,uint8_t vlan_on);
+static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
+ struct rte_eth_vmdq_mirror_conf *mirror_conf,
+ uint8_t rule_id, uint8_t on);
+static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
+ uint8_t rule_id);
+
+static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
+ uint16_t queue_idx, uint16_t tx_rate);
+static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk);
+
+static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index, uint32_t pool);
+static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
+static int ixgbe_add_syn_filter(struct rte_eth_dev *dev,
+ struct rte_syn_filter *filter, uint16_t rx_queue);
+static int ixgbe_remove_syn_filter(struct rte_eth_dev *dev);
+static int ixgbe_get_syn_filter(struct rte_eth_dev *dev,
+ struct rte_syn_filter *filter, uint16_t *rx_queue);
+static int ixgbe_add_ethertype_filter(struct rte_eth_dev *dev, uint16_t index,
+ struct rte_ethertype_filter *filter, uint16_t rx_queue);
+static int ixgbe_remove_ethertype_filter(struct rte_eth_dev *dev,
+ uint16_t index);
+static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, uint16_t index,
+ struct rte_ethertype_filter *filter, uint16_t *rx_queue);
+static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
+ struct rte_5tuple_filter *filter, uint16_t rx_queue);
+static int ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
+ uint16_t index);
+static int ixgbe_get_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
+ struct rte_5tuple_filter *filter, uint16_t *rx_queue);
+
+static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
+
+/*
+ * Define VF Stats MACRO for Non "cleared on read" register
+ */
+#define UPDATE_VF_STAT(reg, last, cur) \
+{ \
+ u32 latest = IXGBE_READ_REG(hw, reg); \
+ cur += latest - last; \
+ last = latest; \
+}
+
+#define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur) \
+{ \
+ u64 new_lsb = IXGBE_READ_REG(hw, lsb); \
+ u64 new_msb = IXGBE_READ_REG(hw, msb); \
+ u64 latest = ((new_msb << 32) | new_lsb); \
+ cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
+ last = latest; \
+}
+
+#define IXGBE_SET_HWSTRIP(h, q) do{\
+ uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
+ uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
+ (h)->bitmap[idx] |= 1 << bit;\
+ }while(0)
+
+#define IXGBE_CLEAR_HWSTRIP(h, q) do{\
+ uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
+ uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
+ (h)->bitmap[idx] &= ~(1 << bit);\
+ }while(0)
+
+#define IXGBE_GET_HWSTRIP(h, q, r) do{\
+ uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
+ uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
+ (r) = (h)->bitmap[idx] >> bit & 1;\
+ }while(0)
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static struct rte_pci_id pci_id_ixgbe_map[] = {
+
+#define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
+#include "rte_pci_dev_ids.h"
+
+{ .vendor_id = 0, /* sentinel */ },
+};
+
+
+/*
+ * The set of PCI devices this driver supports (for 82599 VF)
+ */
+static struct rte_pci_id pci_id_ixgbevf_map[] = {
+
+#define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
+#include "rte_pci_dev_ids.h"
+{ .vendor_id = 0, /* sentinel */ },
+
+};
+
+static struct eth_dev_ops ixgbe_eth_dev_ops = {
+ .dev_configure = ixgbe_dev_configure,
+ .dev_start = ixgbe_dev_start,
+ .dev_stop = ixgbe_dev_stop,
+ .dev_set_link_up = ixgbe_dev_set_link_up,
+ .dev_set_link_down = ixgbe_dev_set_link_down,
+ .dev_close = ixgbe_dev_close,
+ .promiscuous_enable = ixgbe_dev_promiscuous_enable,
+ .promiscuous_disable = ixgbe_dev_promiscuous_disable,
+ .allmulticast_enable = ixgbe_dev_allmulticast_enable,
+ .allmulticast_disable = ixgbe_dev_allmulticast_disable,
+ .link_update = ixgbe_dev_link_update,
+ .stats_get = ixgbe_dev_stats_get,
+ .stats_reset = ixgbe_dev_stats_reset,
+ .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
+ .dev_infos_get = ixgbe_dev_info_get,
+ .mtu_set = ixgbe_dev_mtu_set,
+ .vlan_filter_set = ixgbe_vlan_filter_set,
+ .vlan_tpid_set = ixgbe_vlan_tpid_set,
+ .vlan_offload_set = ixgbe_vlan_offload_set,
+ .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
+ .rx_queue_start = ixgbe_dev_rx_queue_start,
+ .rx_queue_stop = ixgbe_dev_rx_queue_stop,
+ .tx_queue_start = ixgbe_dev_tx_queue_start,
+ .tx_queue_stop = ixgbe_dev_tx_queue_stop,
+ .rx_queue_setup = ixgbe_dev_rx_queue_setup,
+ .rx_queue_release = ixgbe_dev_rx_queue_release,
+ .rx_queue_count = ixgbe_dev_rx_queue_count,
+ .rx_descriptor_done = ixgbe_dev_rx_descriptor_done,
+ .tx_queue_setup = ixgbe_dev_tx_queue_setup,
+ .tx_queue_release = ixgbe_dev_tx_queue_release,
+ .dev_led_on = ixgbe_dev_led_on,
+ .dev_led_off = ixgbe_dev_led_off,
+ .flow_ctrl_get = ixgbe_flow_ctrl_get,
+ .flow_ctrl_set = ixgbe_flow_ctrl_set,
+ .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
+ .mac_addr_add = ixgbe_add_rar,
+ .mac_addr_remove = ixgbe_remove_rar,
+ .uc_hash_table_set = ixgbe_uc_hash_table_set,
+ .uc_all_hash_table_set = ixgbe_uc_all_hash_table_set,
+ .mirror_rule_set = ixgbe_mirror_rule_set,
+ .mirror_rule_reset = ixgbe_mirror_rule_reset,
+ .set_vf_rx_mode = ixgbe_set_pool_rx_mode,
+ .set_vf_rx = ixgbe_set_pool_rx,
+ .set_vf_tx = ixgbe_set_pool_tx,
+ .set_vf_vlan_filter = ixgbe_set_pool_vlan_filter,
+ .set_queue_rate_limit = ixgbe_set_queue_rate_limit,
+ .set_vf_rate_limit = ixgbe_set_vf_rate_limit,
+ .fdir_add_signature_filter = ixgbe_fdir_add_signature_filter,
+ .fdir_update_signature_filter = ixgbe_fdir_update_signature_filter,
+ .fdir_remove_signature_filter = ixgbe_fdir_remove_signature_filter,
+ .fdir_infos_get = ixgbe_fdir_info_get,
+ .fdir_add_perfect_filter = ixgbe_fdir_add_perfect_filter,
+ .fdir_update_perfect_filter = ixgbe_fdir_update_perfect_filter,
+ .fdir_remove_perfect_filter = ixgbe_fdir_remove_perfect_filter,
+ .fdir_set_masks = ixgbe_fdir_set_masks,
+ .reta_update = ixgbe_dev_rss_reta_update,
+ .reta_query = ixgbe_dev_rss_reta_query,
+#ifdef RTE_NIC_BYPASS
+ .bypass_init = ixgbe_bypass_init,
+ .bypass_state_set = ixgbe_bypass_state_store,
+ .bypass_state_show = ixgbe_bypass_state_show,
+ .bypass_event_set = ixgbe_bypass_event_store,
+ .bypass_event_show = ixgbe_bypass_event_show,
+ .bypass_wd_timeout_set = ixgbe_bypass_wd_timeout_store,
+ .bypass_wd_timeout_show = ixgbe_bypass_wd_timeout_show,
+ .bypass_ver_show = ixgbe_bypass_ver_show,
+ .bypass_wd_reset = ixgbe_bypass_wd_reset,
+#endif /* RTE_NIC_BYPASS */
+ .rss_hash_update = ixgbe_dev_rss_hash_update,
+ .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get,
+ .add_syn_filter = ixgbe_add_syn_filter,
+ .remove_syn_filter = ixgbe_remove_syn_filter,
+ .get_syn_filter = ixgbe_get_syn_filter,
+ .add_ethertype_filter = ixgbe_add_ethertype_filter,
+ .remove_ethertype_filter = ixgbe_remove_ethertype_filter,
+ .get_ethertype_filter = ixgbe_get_ethertype_filter,
+ .add_5tuple_filter = ixgbe_add_5tuple_filter,
+ .remove_5tuple_filter = ixgbe_remove_5tuple_filter,
+ .get_5tuple_filter = ixgbe_get_5tuple_filter,
+};
+
+/*
+ * dev_ops for virtual function, bare necessities for basic vf
+ * operation have been implemented
+ */
+static struct eth_dev_ops ixgbevf_eth_dev_ops = {
+
+ .dev_configure = ixgbevf_dev_configure,
+ .dev_start = ixgbevf_dev_start,
+ .dev_stop = ixgbevf_dev_stop,
+ .link_update = ixgbe_dev_link_update,
+ .stats_get = ixgbevf_dev_stats_get,
+ .stats_reset = ixgbevf_dev_stats_reset,
+ .dev_close = ixgbevf_dev_close,
+ .dev_infos_get = ixgbevf_dev_info_get,
+ .mtu_set = ixgbevf_dev_set_mtu,
+ .vlan_filter_set = ixgbevf_vlan_filter_set,
+ .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
+ .vlan_offload_set = ixgbevf_vlan_offload_set,
+ .rx_queue_setup = ixgbe_dev_rx_queue_setup,
+ .rx_queue_release = ixgbe_dev_rx_queue_release,
+ .tx_queue_setup = ixgbe_dev_tx_queue_setup,
+ .tx_queue_release = ixgbe_dev_tx_queue_release,
+ .mac_addr_add = ixgbevf_add_mac_addr,
+ .mac_addr_remove = ixgbevf_remove_mac_addr,
+};
+
+/**
+ * Atomically reads the link status information from global
+ * structure rte_eth_dev.
+ *
+ * @param dev
+ * - Pointer to the structure rte_eth_dev to read from.
+ * - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, negative value.
+ */
+static inline int
+rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = link;
+ struct rte_eth_link *src = &(dev->data->dev_link);
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+/**
+ * Atomically writes the link status information into global
+ * structure rte_eth_dev.
+ *
+ * @param dev
+ * - Pointer to the structure rte_eth_dev to read from.
+ * - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, negative value.
+ */
+static inline int
+rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = &(dev->data->dev_link);
+ struct rte_eth_link *src = link;
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * This function is the same as ixgbe_is_sfp() in ixgbe/ixgbe.h.
+ */
+static inline int
+ixgbe_is_sfp(struct ixgbe_hw *hw)
+{
+ switch (hw->phy.type) {
+ case ixgbe_phy_sfp_avago:
+ case ixgbe_phy_sfp_ftl:
+ case ixgbe_phy_sfp_intel:
+ case ixgbe_phy_sfp_unknown:
+ case ixgbe_phy_sfp_passive_tyco:
+ case ixgbe_phy_sfp_passive_unknown:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static inline int32_t
+ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
+{
+ uint32_t ctrl_ext;
+ int32_t status;
+
+ status = ixgbe_reset_hw(hw);
+
+ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return status;
+}
+
+static inline void
+ixgbe_enable_intr(struct rte_eth_dev *dev)
+{
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/*
+ * This function is based on ixgbe_disable_intr() in ixgbe/ixgbe.h.
+ */
+static void
+ixgbe_disable_intr(struct ixgbe_hw *hw)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
+ }
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/*
+ * This function resets queue statistics mapping registers.
+ * From Niantic datasheet, Initialization of Statistics section:
+ * "...if software requires the queue counters, the RQSMR and TQSM registers
+ * must be re-programmed following a device reset.
+ */
+static void
+ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
+{
+ uint32_t i;
+
+ for(i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
+ }
+}
+
+
+static int
+ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
+ uint16_t queue_id,
+ uint8_t stat_idx,
+ uint8_t is_rx)
+{
+#define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
+#define NB_QMAP_FIELDS_PER_QSM_REG 4
+#define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
+
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct ixgbe_stat_mapping_registers *stat_mappings =
+ IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private);
+ uint32_t qsmr_mask = 0;
+ uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
+ uint32_t q_map;
+ uint8_t n, offset;
+
+ if ((hw->mac.type != ixgbe_mac_82599EB) &&
+ (hw->mac.type != ixgbe_mac_X540) &&
+ (hw->mac.type != ixgbe_mac_X550) &&
+ (hw->mac.type != ixgbe_mac_X550EM_x))
+ return -ENOSYS;
+
+ PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d",
+ (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+ queue_id, stat_idx);
+
+ n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
+ if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
+ PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
+ return -EIO;
+ }
+ offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
+
+ /* Now clear any previous stat_idx set */
+ clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
+ if (!is_rx)
+ stat_mappings->tqsm[n] &= ~clearing_mask;
+ else
+ stat_mappings->rqsmr[n] &= ~clearing_mask;
+
+ q_map = (uint32_t)stat_idx;
+ q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
+ qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
+ if (!is_rx)
+ stat_mappings->tqsm[n] |= qsmr_mask;
+ else
+ stat_mappings->rqsmr[n] |= qsmr_mask;
+
+ PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d",
+ (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+ queue_id, stat_idx);
+ PMD_INIT_LOG(INFO, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
+ is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
+
+ /* Now write the mapping in the appropriate register */
+ if (is_rx) {
+ PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d",
+ stat_mappings->rqsmr[n], n);
+ IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
+ }
+ else {
+ PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d",
+ stat_mappings->tqsm[n], n);
+ IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
+ }
+ return 0;
+}
+
+static void
+ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev)
+{
+ struct ixgbe_stat_mapping_registers *stat_mappings =
+ IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int i;
+
+ /* write whatever was in stat mapping table to the NIC */
+ for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) {
+ /* rx */
+ IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]);
+
+ /* tx */
+ IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]);
+ }
+}
+
+static void
+ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
+{
+ uint8_t i;
+ struct ixgbe_dcb_tc_config *tc;
+ uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
+
+ dcb_config->num_tcs.pg_tcs = dcb_max_tc;
+ dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
+ for (i = 0; i < dcb_max_tc; i++) {
+ tc = &dcb_config->tc_config[i];
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
+ (uint8_t)(100/dcb_max_tc + (i & 1));
+ tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
+ tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
+ (uint8_t)(100/dcb_max_tc + (i & 1));
+ tc->pfc = ixgbe_dcb_pfc_disabled;
+ }
+
+ /* Initialize default user to priority mapping, UPx->TC0 */
+ tc = &dcb_config->tc_config[0];
+ tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
+ tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
+ for (i = 0; i< IXGBE_DCB_MAX_BW_GROUP; i++) {
+ dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
+ dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
+ }
+ dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal;
+ dcb_config->pfc_mode_enable = false;
+ dcb_config->vt_mode = true;
+ dcb_config->round_robin_enable = false;
+ /* support all DCB capabilities in 82599 */
+ dcb_config->support.capabilities = 0xFF;
+
+ /*we only support 4 Tcs for X540, X550 */
+ if (hw->mac.type == ixgbe_mac_X540 ||
+ hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x) {
+ dcb_config->num_tcs.pg_tcs = 4;
+ dcb_config->num_tcs.pfc_tcs = 4;
+ }
+}
+
+/*
+ * Ensure that all locks are released before first NVM or PHY access
+ */
+static void
+ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
+{
+ uint16_t mask;
+
+ /*
+ * Phy lock should not fail in this early stage. If this is the case,
+ * it is due to an improper exit of the application.
+ * So force the release of the faulty lock. Release of common lock
+ * is done automatically by swfw_sync function.
+ */
+ mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
+ if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
+ PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func);
+ }
+ ixgbe_release_swfw_semaphore(hw, mask);
+
+ /*
+ * These ones are more tricky since they are common to all ports; but
+ * swfw_sync retries last long enough (1s) to be almost sure that if
+ * lock can not be taken it is due to an improper lock of the
+ * semaphore.
+ */
+ mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
+ if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
+ PMD_DRV_LOG(DEBUG, "SWFW common locks released");
+ }
+ ixgbe_release_swfw_semaphore(hw, mask);
+}
+
+/*
+ * This function is based on code in ixgbe_attach() in ixgbe/ixgbe.c.
+ * It returns 0 on success.
+ */
+static int
+eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
+ struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct ixgbe_vfta * shadow_vfta =
+ IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
+ struct ixgbe_hwstrip *hwstrip =
+ IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
+ struct ixgbe_dcb_config *dcb_config =
+ IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
+ uint32_t ctrl_ext;
+ uint16_t csum;
+ int diag, i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ eth_dev->dev_ops = &ixgbe_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
+ eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
+
+ /*
+ * For secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX and TX function.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+ struct igb_tx_queue *txq;
+ /* TX queue function in primary, set by last queue initialized
+ * Tx queue may not initialized by primary process */
+ if (eth_dev->data->tx_queues) {
+ txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
+ set_tx_function(eth_dev, txq);
+ } else {
+ /* Use default TX function if we get here */
+ PMD_INIT_LOG(INFO, "No TX queues configured yet. "
+ "Using default TX function.");
+ }
+
+ if (eth_dev->data->scattered_rx)
+ eth_dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+ return 0;
+ }
+ pci_dev = eth_dev->pci_dev;
+
+ /* Vendor and Device ID need to be set before init of shared code */
+ hw->device_id = pci_dev->id.device_id;
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+ hw->allow_unsupported_sfp = 1;
+
+ /* Initialize the shared code (base driver) */
+#ifdef RTE_NIC_BYPASS
+ diag = ixgbe_bypass_init_shared_code(hw);
+#else
+ diag = ixgbe_init_shared_code(hw);
+#endif /* RTE_NIC_BYPASS */
+
+ if (diag != IXGBE_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
+ return -EIO;
+ }
+
+ /* pick up the PCI bus settings for reporting later */
+ ixgbe_get_bus_info(hw);
+
+ /* Unlock any pending hardware semaphore */
+ ixgbe_swfw_lock_reset(hw);
+
+ /* Initialize DCB configuration*/
+ memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
+ ixgbe_dcb_init(hw,dcb_config);
+ /* Get Hardware Flow Control setting */
+ hw->fc.requested_mode = ixgbe_fc_full;
+ hw->fc.current_mode = ixgbe_fc_full;
+ hw->fc.pause_time = IXGBE_FC_PAUSE;
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ hw->fc.low_water[i] = IXGBE_FC_LO;
+ hw->fc.high_water[i] = IXGBE_FC_HI;
+ }
+ hw->fc.send_xon = 1;
+
+ /* Make sure we have a good EEPROM before we read from it */
+ diag = ixgbe_validate_eeprom_checksum(hw, &csum);
+ if (diag != IXGBE_SUCCESS) {
+ PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
+ return -EIO;
+ }
+
+#ifdef RTE_NIC_BYPASS
+ diag = ixgbe_bypass_init_hw(hw);
+#else
+ diag = ixgbe_init_hw(hw);
+#endif /* RTE_NIC_BYPASS */
+
+ /*
+ * Devices with copper phys will fail to initialise if ixgbe_init_hw()
+ * is called too soon after the kernel driver unbinding/binding occurs.
+ * The failure occurs in ixgbe_identify_phy_generic() for all devices,
+ * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
+ * also called. See ixgbe_identify_phy_82599(). The reason for the
+ * failure is not known, and only occuts when virtualisation features
+ * are disabled in the bios. A delay of 100ms was found to be enough by
+ * trial-and-error, and is doubled to be safe.
+ */
+ if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
+ rte_delay_ms(200);
+ diag = ixgbe_init_hw(hw);
+ }
+
+ if (diag == IXGBE_ERR_EEPROM_VERSION) {
+ PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
+ "LOM. Please be aware there may be issues associated "
+ "with your hardware.");
+ PMD_INIT_LOG(ERR, "If you are experiencing problems "
+ "please contact your Intel or hardware representative "
+ "who provided you with this hardware.");
+ } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
+ if (diag) {
+ PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
+ return -EIO;
+ }
+
+ /* disable interrupt */
+ ixgbe_disable_intr(hw);
+
+ /* reset mappings for queue statistics hw counters*/
+ ixgbe_reset_qstat_mappings(hw);
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
+ hw->mac.num_rar_entries, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate %u bytes needed to store "
+ "MAC addresses",
+ ETHER_ADDR_LEN * hw->mac.num_rar_entries);
+ return -ENOMEM;
+ }
+ /* Copy the permanent MAC address */
+ ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
+ &eth_dev->data->mac_addrs[0]);
+
+ /* Allocate memory for storing hash filter MAC addresses */
+ eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
+ IXGBE_VMDQ_NUM_UC_MAC, 0);
+ if (eth_dev->data->hash_mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate %d bytes needed to store MAC addresses",
+ ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
+ return -ENOMEM;
+ }
+
+ /* initialize the vfta */
+ memset(shadow_vfta, 0, sizeof(*shadow_vfta));
+
+ /* initialize the hw strip bitmap*/
+ memset(hwstrip, 0, sizeof(*hwstrip));
+
+ /* initialize PF if max_vfs not zero */
+ ixgbe_pf_host_init(eth_dev);
+
+ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ /* let hardware know driver is loaded */
+ ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+ IXGBE_WRITE_FLUSH(hw);
+
+ if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
+ PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
+ (int) hw->mac.type, (int) hw->phy.type,
+ (int) hw->phy.sfp_type);
+ else
+ PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
+ (int) hw->mac.type, (int) hw->phy.type);
+
+ PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
+
+ rte_intr_callback_register(&(pci_dev->intr_handle),
+ ixgbe_dev_interrupt_handler, (void *)eth_dev);
+
+ /* enable uio intr after callback register */
+ rte_intr_enable(&(pci_dev->intr_handle));
+
+ /* enable support intr */
+ ixgbe_enable_intr(eth_dev);
+
+ return 0;
+}
+
+
+/*
+ * Negotiate mailbox API version with the PF.
+ * After reset API version is always set to the basic one (ixgbe_mbox_api_10).
+ * Then we try to negotiate starting with the most recent one.
+ * If all negotiation attempts fail, then we will proceed with
+ * the default one (ixgbe_mbox_api_10).
+ */
+static void
+ixgbevf_negotiate_api(struct ixgbe_hw *hw)
+{
+ int32_t i;
+
+ /* start with highest supported, proceed down */
+ static const enum ixgbe_pfvf_api_rev sup_ver[] = {
+ ixgbe_mbox_api_11,
+ ixgbe_mbox_api_10,
+ };
+
+ for (i = 0;
+ i != RTE_DIM(sup_ver) &&
+ ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0;
+ i++)
+ ;
+}
+
+static void
+generate_random_mac_addr(struct ether_addr *mac_addr)
+{
+ uint64_t random;
+
+ /* Set Organizationally Unique Identifier (OUI) prefix. */
+ mac_addr->addr_bytes[0] = 0x00;
+ mac_addr->addr_bytes[1] = 0x09;
+ mac_addr->addr_bytes[2] = 0xC0;
+ /* Force indication of locally assigned MAC address. */
+ mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR;
+ /* Generate the last 3 bytes of the MAC address with a random number. */
+ random = rte_rand();
+ memcpy(&mac_addr->addr_bytes[3], &random, 3);
+}
+
+/*
+ * Virtual Function device init
+ */
+static int
+eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
+ struct rte_eth_dev *eth_dev)
+{
+ int diag;
+ uint32_t tc, tcs;
+ struct rte_pci_device *pci_dev;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct ixgbe_vfta * shadow_vfta =
+ IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
+ struct ixgbe_hwstrip *hwstrip =
+ IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
+ struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
+
+ PMD_INIT_FUNC_TRACE();
+
+ eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
+ eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
+
+ /* for secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX function */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+ if (eth_dev->data->scattered_rx)
+ eth_dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+ return 0;
+ }
+
+ pci_dev = eth_dev->pci_dev;
+
+ hw->device_id = pci_dev->id.device_id;
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+
+ /* initialize the vfta */
+ memset(shadow_vfta, 0, sizeof(*shadow_vfta));
+
+ /* initialize the hw strip bitmap*/
+ memset(hwstrip, 0, sizeof(*hwstrip));
+
+ /* Initialize the shared code (base driver) */
+ diag = ixgbe_init_shared_code(hw);
+ if (diag != IXGBE_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
+ return -EIO;
+ }
+
+ /* init_mailbox_params */
+ hw->mbx.ops.init_params(hw);
+
+ /* Disable the interrupts for VF */
+ ixgbevf_intr_disable(hw);
+
+ hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
+ diag = hw->mac.ops.reset_hw(hw);
+
+ /*
+ * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when
+ * the underlying PF driver has not assigned a MAC address to the VF.
+ * In this case, assign a random MAC address.
+ */
+ if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
+ PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
+ return (diag);
+ }
+
+ /* negotiate mailbox API version to use with the PF. */
+ ixgbevf_negotiate_api(hw);
+
+ /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
+ ixgbevf_get_queues(hw, &tcs, &tc);
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
+ hw->mac.num_rar_entries, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate %u bytes needed to store "
+ "MAC addresses",
+ ETHER_ADDR_LEN * hw->mac.num_rar_entries);
+ return -ENOMEM;
+ }
+
+ /* Generate a random MAC address, if none was assigned by PF. */
+ if (is_zero_ether_addr(perm_addr)) {
+ generate_random_mac_addr(perm_addr);
+ diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
+ if (diag) {
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+ return diag;
+ }
+ PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
+ PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
+ "%02x:%02x:%02x:%02x:%02x:%02x",
+ perm_addr->addr_bytes[0],
+ perm_addr->addr_bytes[1],
+ perm_addr->addr_bytes[2],
+ perm_addr->addr_bytes[3],
+ perm_addr->addr_bytes[4],
+ perm_addr->addr_bytes[5]);
+ }
+
+ /* Copy the permanent MAC address */
+ ether_addr_copy(perm_addr, &eth_dev->data->mac_addrs[0]);
+
+ /* reset the hardware with the new settings */
+ diag = hw->mac.ops.start_hw(hw);
+ switch (diag) {
+ case 0:
+ break;
+
+ default:
+ PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
+ return (-EIO);
+ }
+
+ PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id, "ixgbe_mac_82599_vf");
+
+ return 0;
+}
+
+static struct eth_driver rte_ixgbe_pmd = {
+ {
+ .name = "rte_ixgbe_pmd",
+ .id_table = pci_id_ixgbe_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ },
+ .eth_dev_init = eth_ixgbe_dev_init,
+ .dev_private_size = sizeof(struct ixgbe_adapter),
+};
+
+/*
+ * virtual function driver struct
+ */
+static struct eth_driver rte_ixgbevf_pmd = {
+ {
+ .name = "rte_ixgbevf_pmd",
+ .id_table = pci_id_ixgbevf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ },
+ .eth_dev_init = eth_ixgbevf_dev_init,
+ .dev_private_size = sizeof(struct ixgbe_adapter),
+};
+
+/*
+ * Driver initialization routine.
+ * Invoked once at EAL init time.
+ * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
+ */
+static int
+rte_ixgbe_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ rte_eth_driver_register(&rte_ixgbe_pmd);
+ return 0;
+}
+
+/*
+ * VF Driver initialization routine.
+ * Invoked one at EAL init time.
+ * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices.
+ */
+static int
+rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ rte_eth_driver_register(&rte_ixgbevf_pmd);
+ return (0);
+}
+
+static int
+ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vfta * shadow_vfta =
+ IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+ uint32_t vfta;
+ uint32_t vid_idx;
+ uint32_t vid_bit;
+
+ vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
+ vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
+ vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
+ if (on)
+ vfta |= vid_bit;
+ else
+ vfta &= ~vid_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
+
+ /* update local VFTA copy */
+ shadow_vfta->vfta[vid_idx] = vfta;
+
+ return 0;
+}
+
+static void
+ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
+{
+ if (on)
+ ixgbe_vlan_hw_strip_enable(dev, queue);
+ else
+ ixgbe_vlan_hw_strip_disable(dev, queue);
+}
+
+static void
+ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Only the high 16-bits is valid */
+ IXGBE_WRITE_REG(hw, IXGBE_EXVET, tpid << 16);
+}
+
+void
+ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t vlnctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Filter Table Disable */
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlnctrl &= ~IXGBE_VLNCTRL_VFE;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+}
+
+void
+ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vfta * shadow_vfta =
+ IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+ uint32_t vlnctrl;
+ uint16_t i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Filter Table Enable */
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
+ vlnctrl |= IXGBE_VLNCTRL_VFE;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+
+ /* write whatever is in local vfta copy */
+ for (i = 0; i < IXGBE_VFTA_SIZE; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
+}
+
+static void
+ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
+{
+ struct ixgbe_hwstrip *hwstrip =
+ IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
+
+ if(queue >= IXGBE_MAX_RX_QUEUE_NUM)
+ return;
+
+ if (on)
+ IXGBE_SET_HWSTRIP(hwstrip, queue);
+ else
+ IXGBE_CLEAR_HWSTRIP(hwstrip, queue);
+}
+
+static void
+ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t ctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ /* No queue level support */
+ PMD_INIT_LOG(INFO, "82598EB not support queue level hw strip");
+ return;
+ }
+ else {
+ /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
+ ctrl &= ~IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
+ }
+ /* record those setting for HW strip per queue */
+ ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
+}
+
+static void
+ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t ctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ /* No queue level supported */
+ PMD_INIT_LOG(INFO, "82598EB not support queue level hw strip");
+ return;
+ }
+ else {
+ /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
+ ctrl |= IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
+ }
+ /* record those setting for HW strip per queue */
+ ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
+}
+
+void
+ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t ctrl;
+ uint16_t i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ ctrl &= ~IXGBE_VLNCTRL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
+ }
+ else {
+ /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+ ctrl &= ~IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
+
+ /* record those setting for HW strip per queue */
+ ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0);
+ }
+ }
+}
+
+void
+ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t ctrl;
+ uint16_t i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ ctrl |= IXGBE_VLNCTRL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
+ }
+ else {
+ /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+ ctrl |= IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
+
+ /* record those setting for HW strip per queue */
+ ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
+ }
+ }
+}
+
+static void
+ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t ctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* DMATXCTRL: Geric Double VLAN Disable */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ ctrl &= ~IXGBE_DMATXCTL_GDV;
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
+
+ /* CTRL_EXT: Global Double VLAN Disable */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ ctrl &= ~IXGBE_EXTENDED_VLAN;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
+
+}
+
+static void
+ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t ctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* DMATXCTRL: Geric Double VLAN Enable */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ ctrl |= IXGBE_DMATXCTL_GDV;
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
+
+ /* CTRL_EXT: Global Double VLAN Enable */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ ctrl |= IXGBE_EXTENDED_VLAN;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
+
+ /*
+ * VET EXT field in the EXVET register = 0x8100 by default
+ * So no need to change. Same to VT field of DMATXCTL register
+ */
+}
+
+static void
+ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ if(mask & ETH_VLAN_STRIP_MASK){
+ if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ ixgbe_vlan_hw_strip_enable_all(dev);
+ else
+ ixgbe_vlan_hw_strip_disable_all(dev);
+ }
+
+ if(mask & ETH_VLAN_FILTER_MASK){
+ if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ ixgbe_vlan_hw_filter_enable(dev);
+ else
+ ixgbe_vlan_hw_filter_disable(dev);
+ }
+
+ if(mask & ETH_VLAN_EXTEND_MASK){
+ if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+ ixgbe_vlan_hw_extend_enable(dev);
+ else
+ ixgbe_vlan_hw_extend_disable(dev);
+ }
+}
+
+static void
+ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
+ uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
+}
+
+static int
+ixgbe_dev_configure(struct rte_eth_dev *dev)
+{
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* set flag to update link status after init */
+ intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+
+ return 0;
+}
+
+/*
+ * Configure device link speed and setup link.
+ * It returns 0 on success.
+ */
+static int
+ixgbe_dev_start(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vf_info *vfinfo =
+ *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+ int err, link_up = 0, negotiate = 0;
+ uint32_t speed = 0;
+ int mask = 0;
+ int status;
+ uint16_t vf, idx;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* IXGBE devices don't support half duplex */
+ if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
+ (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
+ PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
+ dev->data->dev_conf.link_duplex,
+ dev->data->port_id);
+ return -EINVAL;
+ }
+
+ /* stop adapter */
+ hw->adapter_stopped = FALSE;
+ ixgbe_stop_adapter(hw);
+
+ /* reinitialize adapter
+ * this calls reset and start */
+ status = ixgbe_pf_reset_hw(hw);
+ if (status != 0)
+ return -1;
+ hw->mac.ops.start_hw(hw);
+ hw->mac.get_link_status = true;
+
+ /* configure PF module if SRIOV enabled */
+ ixgbe_pf_host_configure(dev);
+
+ /* initialize transmission unit */
+ ixgbe_dev_tx_init(dev);
+
+ /* This can fail when allocating mbufs for descriptor rings */
+ err = ixgbe_dev_rx_init(dev);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
+ goto error;
+ }
+
+ ixgbe_dev_rxtx_start(dev);
+
+ if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
+ err = hw->mac.ops.setup_sfp(hw);
+ if (err)
+ goto error;
+ }
+
+ /* Turn on the laser */
+ ixgbe_enable_tx_laser(hw);
+
+ /* Skip link setup if loopback mode is enabled for 82599. */
+ if (hw->mac.type == ixgbe_mac_82599EB &&
+ dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
+ goto skip_link_setup;
+
+ err = ixgbe_check_link(hw, &speed, &link_up, 0);
+ if (err)
+ goto error;
+ err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
+ if (err)
+ goto error;
+
+ switch(dev->data->dev_conf.link_speed) {
+ case ETH_LINK_SPEED_AUTONEG:
+ speed = (hw->mac.type != ixgbe_mac_82598EB) ?
+ IXGBE_LINK_SPEED_82599_AUTONEG :
+ IXGBE_LINK_SPEED_82598_AUTONEG;
+ break;
+ case ETH_LINK_SPEED_100:
+ /*
+ * Invalid for 82598 but error will be detected by
+ * ixgbe_setup_link()
+ */
+ speed = IXGBE_LINK_SPEED_100_FULL;
+ break;
+ case ETH_LINK_SPEED_1000:
+ speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case ETH_LINK_SPEED_10000:
+ speed = IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ default:
+ PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu",
+ dev->data->dev_conf.link_speed,
+ dev->data->port_id);
+ goto error;
+ }
+
+ err = ixgbe_setup_link(hw, speed, link_up);
+ if (err)
+ goto error;
+
+skip_link_setup:
+
+ /* check if lsc interrupt is enabled */
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ ixgbe_dev_lsc_interrupt_setup(dev);
+
+ /* resume enabled intr since hw reset */
+ ixgbe_enable_intr(dev);
+
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
+ ETH_VLAN_EXTEND_MASK;
+ ixgbe_vlan_offload_set(dev, mask);
+
+ if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+ /* Enable vlan filtering for VMDq */
+ ixgbe_vmdq_vlan_hw_filter_enable(dev);
+ }
+
+ /* Configure DCB hw */
+ ixgbe_configure_dcb(dev);
+
+ if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
+ err = ixgbe_fdir_configure(dev);
+ if (err)
+ goto error;
+ }
+
+ /* Restore vf rate limit */
+ if (vfinfo != NULL) {
+ for (vf = 0; vf < dev->pci_dev->max_vfs; vf++)
+ for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
+ if (vfinfo[vf].tx_rate[idx] != 0)
+ ixgbe_set_vf_rate_limit(dev, vf,
+ vfinfo[vf].tx_rate[idx],
+ 1 << idx);
+ }
+
+ ixgbe_restore_statistics_mapping(dev);
+
+ return (0);
+
+error:
+ PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
+ ixgbe_dev_clear_queues(dev);
+ return -EIO;
+}
+
+/*
+ * Stop device: disable rx and tx functions to allow for reconfiguring.
+ */
+static void
+ixgbe_dev_stop(struct rte_eth_dev *dev)
+{
+ struct rte_eth_link link;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vf_info *vfinfo =
+ *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+ int vf;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* disable interrupts */
+ ixgbe_disable_intr(hw);
+
+ /* reset the NIC */
+ ixgbe_pf_reset_hw(hw);
+ hw->adapter_stopped = FALSE;
+
+ /* stop adapter */
+ ixgbe_stop_adapter(hw);
+
+ for (vf = 0; vfinfo != NULL &&
+ vf < dev->pci_dev->max_vfs; vf++)
+ vfinfo[vf].clear_to_send = false;
+
+ /* Turn off the laser */
+ ixgbe_disable_tx_laser(hw);
+
+ ixgbe_dev_clear_queues(dev);
+
+ /* Clear stored conf */
+ dev->data->scattered_rx = 0;
+
+ /* Clear recorded link status */
+ memset(&link, 0, sizeof(link));
+ rte_ixgbe_dev_atomic_write_link_status(dev, &link);
+}
+
+/*
+ * Set device link up: enable tx laser.
+ */
+static int
+ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+#ifdef RTE_NIC_BYPASS
+ if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
+ /* Not suported in bypass mode */
+ PMD_INIT_LOG(ERR, "Set link up is not supported "
+ "by device id 0x%x", hw->device_id);
+ return -ENOTSUP;
+ }
+#endif
+ /* Turn on the laser */
+ ixgbe_enable_tx_laser(hw);
+ return 0;
+ }
+
+ PMD_INIT_LOG(ERR, "Set link up is not supported by device id 0x%x",
+ hw->device_id);
+ return -ENOTSUP;
+}
+
+/*
+ * Set device link down: disable tx laser.
+ */
+static int
+ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+#ifdef RTE_NIC_BYPASS
+ if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
+ /* Not suported in bypass mode */
+ PMD_INIT_LOG(ERR, "Set link down is not supported "
+ "by device id 0x%x", hw->device_id);
+ return -ENOTSUP;
+ }
+#endif
+ /* Turn off the laser */
+ ixgbe_disable_tx_laser(hw);
+ return 0;
+ }
+
+ PMD_INIT_LOG(ERR, "Set link down is not supported by device id 0x%x",
+ hw->device_id);
+ return -ENOTSUP;
+}
+
+/*
+ * Reest and stop device.
+ */
+static void
+ixgbe_dev_close(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ ixgbe_pf_reset_hw(hw);
+
+ ixgbe_dev_stop(dev);
+ hw->adapter_stopped = 1;
+
+ ixgbe_disable_pcie_master(hw);
+
+ /* reprogram the RAR[0] in case user changed it. */
+ ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+}
+
+/*
+ * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
+ */
+static void
+ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_hw_stats *hw_stats =
+ IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ uint32_t bprc, lxon, lxoff, total;
+ uint64_t total_missed_rx, total_qbrc, total_qprc;
+ unsigned i;
+
+ total_missed_rx = 0;
+ total_qbrc = 0;
+ total_qprc = 0;
+
+ hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
+ hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
+ hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
+ hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
+
+ for (i = 0; i < 8; i++) {
+ uint32_t mp;
+ mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
+ /* global total per queue */
+ hw_stats->mpc[i] += mp;
+ /* Running comprehensive total for stats display */
+ total_missed_rx += hw_stats->mpc[i];
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ hw_stats->rnbc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+ hw_stats->pxontxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
+ hw_stats->pxonrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
+ hw_stats->pxofftxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
+ hw_stats->pxoffrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+ hw_stats->pxon2offc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
+ }
+ for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
+ hw_stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
+ hw_stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+ hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
+ hw_stats->qbrc[i] +=
+ ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
+ hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
+ hw_stats->qbtc[i] +=
+ ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
+ hw_stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+
+ total_qprc += hw_stats->qprc[i];
+ total_qbrc += hw_stats->qbrc[i];
+ }
+ hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
+ hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
+ hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
+
+ /* Note that gprc counts missed packets */
+ hw_stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
+
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
+ hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
+ hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
+ hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
+ hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
+ hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
+ hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
+ hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+ } else {
+ hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+ hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+ /* 82598 only has a counter in the high register */
+ hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
+ hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
+ hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
+ }
+
+ /*
+ * Workaround: mprc hardware is incorrectly counting
+ * broadcasts, so for now we subtract those.
+ */
+ bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
+ hw_stats->bprc += bprc;
+ hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ hw_stats->mprc -= bprc;
+
+ hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
+ hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
+ hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
+ hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
+ hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
+ hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
+
+ lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
+ hw_stats->lxontxc += lxon;
+ lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
+ hw_stats->lxofftxc += lxoff;
+ total = lxon + lxoff;
+
+ hw_stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
+ hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
+ hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
+ hw_stats->gptc -= total;
+ hw_stats->mptc -= total;
+ hw_stats->ptc64 -= total;
+ hw_stats->gotc -= total * ETHER_MIN_LEN;
+
+ hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
+ hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
+ hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
+ hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
+ hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
+ hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
+ hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
+ hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
+ hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
+ hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
+ hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
+ hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
+ hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
+ hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
+ hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
+ hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
+ hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
+ hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
+ /* Only read FCOE on 82599 */
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
+ hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
+ hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
+ hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
+ hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
+ }
+
+ if (stats == NULL)
+ return;
+
+ /* Fill out the rte_eth_stats statistics structure */
+ stats->ipackets = total_qprc;
+ stats->ibytes = total_qbrc;
+ stats->opackets = hw_stats->gptc;
+ stats->obytes = hw_stats->gotc;
+ stats->imcasts = hw_stats->mprc;
+
+ for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
+ stats->q_ipackets[i] = hw_stats->qprc[i];
+ stats->q_opackets[i] = hw_stats->qptc[i];
+ stats->q_ibytes[i] = hw_stats->qbrc[i];
+ stats->q_obytes[i] = hw_stats->qbtc[i];
+ stats->q_errors[i] = hw_stats->qprdc[i];
+ }
+
+ /* Rx Errors */
+ stats->ibadcrc = hw_stats->crcerrs;
+ stats->ibadlen = hw_stats->rlec + hw_stats->ruc + hw_stats->roc;
+ stats->imissed = total_missed_rx;
+ stats->ierrors = stats->ibadcrc +
+ stats->ibadlen +
+ stats->imissed +
+ hw_stats->illerrc + hw_stats->errbc;
+
+ /* Tx Errors */
+ stats->oerrors = 0;
+
+ /* XON/XOFF pause frames */
+ stats->tx_pause_xon = hw_stats->lxontxc;
+ stats->rx_pause_xon = hw_stats->lxonrxc;
+ stats->tx_pause_xoff = hw_stats->lxofftxc;
+ stats->rx_pause_xoff = hw_stats->lxoffrxc;
+
+ /* Flow Director Stats registers */
+ hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
+ hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+ stats->fdirmatch = hw_stats->fdirmatch;
+ stats->fdirmiss = hw_stats->fdirmiss;
+}
+
+static void
+ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw_stats *stats =
+ IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ /* HW registers are cleared on read */
+ ixgbe_dev_stats_get(dev, NULL);
+
+ /* Reset software totals */
+ memset(stats, 0, sizeof(*stats));
+}
+
+static void
+ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
+ IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ /* Good Rx packet, include VF loopback */
+ UPDATE_VF_STAT(IXGBE_VFGPRC,
+ hw_stats->last_vfgprc, hw_stats->vfgprc);
+
+ /* Good Rx octets, include VF loopback */
+ UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
+ hw_stats->last_vfgorc, hw_stats->vfgorc);
+
+ /* Good Tx packet, include VF loopback */
+ UPDATE_VF_STAT(IXGBE_VFGPTC,
+ hw_stats->last_vfgptc, hw_stats->vfgptc);
+
+ /* Good Tx octets, include VF loopback */
+ UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
+ hw_stats->last_vfgotc, hw_stats->vfgotc);
+
+ /* Rx Multicst Packet */
+ UPDATE_VF_STAT(IXGBE_VFMPRC,
+ hw_stats->last_vfmprc, hw_stats->vfmprc);
+
+ if (stats == NULL)
+ return;
+
+ memset(stats, 0, sizeof(*stats));
+ stats->ipackets = hw_stats->vfgprc;
+ stats->ibytes = hw_stats->vfgorc;
+ stats->opackets = hw_stats->vfgptc;
+ stats->obytes = hw_stats->vfgotc;
+ stats->imcasts = hw_stats->vfmprc;
+}
+
+static void
+ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
+ IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ /* Sync HW register to the last stats */
+ ixgbevf_dev_stats_get(dev, NULL);
+
+ /* reset HW current stats*/
+ hw_stats->vfgprc = 0;
+ hw_stats->vfgorc = 0;
+ hw_stats->vfgptc = 0;
+ hw_stats->vfgotc = 0;
+ hw_stats->vfmprc = 0;
+
+}
+
+static void
+ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
+ dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
+ dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
+ dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
+ dev_info->max_mac_addrs = hw->mac.num_rar_entries;
+ dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
+ dev_info->max_vfs = dev->pci_dev->max_vfs;
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ dev_info->max_vmdq_pools = ETH_16_POOLS;
+ else
+ dev_info->max_vmdq_pools = ETH_64_POOLS;
+ dev_info->vmdq_queue_num = dev_info->max_rx_queues;
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
+ .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
+ .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
+ .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
+ .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
+ },
+ .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
+ .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
+ .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
+ ETH_TXQ_FLAGS_NOOFFLOADS,
+ };
+ dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+}
+
+static void
+ixgbevf_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
+ dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
+ dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
+ dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS reg */
+ dev_info->max_mac_addrs = hw->mac.num_rar_entries;
+ dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
+ dev_info->max_vfs = dev->pci_dev->max_vfs;
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ dev_info->max_vmdq_pools = ETH_16_POOLS;
+ else
+ dev_info->max_vmdq_pools = ETH_64_POOLS;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
+ .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
+ .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
+ .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
+ .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
+ },
+ .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
+ .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
+ .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
+ ETH_TXQ_FLAGS_NOOFFLOADS,
+ };
+}
+
+/* return 0 means link status changed, -1 means not changed */
+static int
+ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_link link, old;
+ ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ int link_up;
+ int diag;
+
+ link.link_status = 0;
+ link.link_speed = 0;
+ link.link_duplex = 0;
+ memset(&old, 0, sizeof(old));
+ rte_ixgbe_dev_atomic_read_link_status(dev, &old);
+
+ /* check if it needs to wait to complete, if lsc interrupt is enabled */
+ if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
+ diag = ixgbe_check_link(hw, &link_speed, &link_up, 0);
+ else
+ diag = ixgbe_check_link(hw, &link_speed, &link_up, 1);
+ if (diag != 0) {
+ link.link_speed = ETH_LINK_SPEED_100;
+ link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ rte_ixgbe_dev_atomic_write_link_status(dev, &link);
+ if (link.link_status == old.link_status)
+ return -1;
+ return 0;
+ }
+
+ if (link_speed == IXGBE_LINK_SPEED_UNKNOWN &&
+ !hw->mac.get_link_status) {
+ memcpy(&link, &old, sizeof(link));
+ return -1;
+ }
+
+ if (link_up == 0) {
+ rte_ixgbe_dev_atomic_write_link_status(dev, &link);
+ if (link.link_status == old.link_status)
+ return -1;
+ return 0;
+ }
+ link.link_status = 1;
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+
+ switch (link_speed) {
+ default:
+ case IXGBE_LINK_SPEED_UNKNOWN:
+ link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ link.link_speed = ETH_LINK_SPEED_100;
+ break;
+
+ case IXGBE_LINK_SPEED_100_FULL:
+ link.link_speed = ETH_LINK_SPEED_100;
+ break;
+
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ link.link_speed = ETH_LINK_SPEED_1000;
+ break;
+
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ link.link_speed = ETH_LINK_SPEED_10000;
+ break;
+ }
+ rte_ixgbe_dev_atomic_write_link_status(dev, &link);
+
+ if (link.link_status == old.link_status)
+ return -1;
+
+ return 0;
+}
+
+static void
+ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t fctrl;
+
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+}
+
+static void
+ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t fctrl;
+
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl &= (~IXGBE_FCTRL_UPE);
+ if (dev->data->all_multicast == 1)
+ fctrl |= IXGBE_FCTRL_MPE;
+ else
+ fctrl &= (~IXGBE_FCTRL_MPE);
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+}
+
+static void
+ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t fctrl;
+
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl |= IXGBE_FCTRL_MPE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+}
+
+static void
+ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t fctrl;
+
+ if (dev->data->promiscuous == 1)
+ return; /* must remain in all_multicast mode */
+
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl &= (~IXGBE_FCTRL_MPE);
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+}
+
+/**
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev)
+{
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ ixgbe_dev_link_status_print(dev);
+ intr->mask |= IXGBE_EICR_LSC;
+
+ return 0;
+}
+
+/*
+ * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
+{
+ uint32_t eicr;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ /* clear all cause mask */
+ ixgbe_disable_intr(hw);
+
+ /* read-on-clear nic registers here */
+ eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
+ PMD_DRV_LOG(INFO, "eicr %x", eicr);
+
+ intr->flags = 0;
+ if (eicr & IXGBE_EICR_LSC) {
+ /* set flag for async link update */
+ intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+ }
+
+ if (eicr & IXGBE_EICR_MAILBOX)
+ intr->flags |= IXGBE_FLAG_MAILBOX;
+
+ return 0;
+}
+
+/**
+ * It gets and then prints the link status.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static void
+ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
+{
+ struct rte_eth_link link;
+
+ memset(&link, 0, sizeof(link));
+ rte_ixgbe_dev_atomic_read_link_status(dev, &link);
+ if (link.link_status) {
+ PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
+ (int)(dev->data->port_id),
+ (unsigned)link.link_speed,
+ link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+ "full-duplex" : "half-duplex");
+ } else {
+ PMD_INIT_LOG(INFO, " Port %d: Link Down",
+ (int)(dev->data->port_id));
+ }
+ PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
+ dev->pci_dev->addr.domain,
+ dev->pci_dev->addr.bus,
+ dev->pci_dev->addr.devid,
+ dev->pci_dev->addr.function);
+}
+
+/*
+ * It executes link_update after knowing an interrupt occurred.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
+{
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ int64_t timeout;
+ struct rte_eth_link link;
+ int intr_enable_delay = false;
+
+ PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
+
+ if (intr->flags & IXGBE_FLAG_MAILBOX) {
+ ixgbe_pf_mbx_process(dev);
+ intr->flags &= ~IXGBE_FLAG_MAILBOX;
+ }
+
+ if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
+ /* get the link status before link update, for predicting later */
+ memset(&link, 0, sizeof(link));
+ rte_ixgbe_dev_atomic_read_link_status(dev, &link);
+
+ ixgbe_dev_link_update(dev, 0);
+
+ /* likely to up */
+ if (!link.link_status)
+ /* handle it 1 sec later, wait it being stable */
+ timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
+ /* likely to down */
+ else
+ /* handle it 4 sec later, wait it being stable */
+ timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
+
+ ixgbe_dev_link_status_print(dev);
+
+ intr_enable_delay = true;
+ }
+
+ if (intr_enable_delay) {
+ if (rte_eal_alarm_set(timeout * 1000,
+ ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0)
+ PMD_DRV_LOG(ERR, "Error setting alarm");
+ } else {
+ PMD_DRV_LOG(DEBUG, "enable intr immediately");
+ ixgbe_enable_intr(dev);
+ rte_intr_enable(&(dev->pci_dev->intr_handle));
+ }
+
+
+ return 0;
+}
+
+/**
+ * Interrupt handler which shall be registered for alarm callback for delayed
+ * handling specific interrupt to wait for the stable nic state. As the
+ * NIC interrupt state is not stable for ixgbe after link is just down,
+ * it needs to wait 4 seconds to get the stable status.
+ *
+ * @param handle
+ * Pointer to interrupt handle.
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ * void
+ */
+static void
+ixgbe_dev_interrupt_delayed_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t eicr;
+
+ eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
+ if (eicr & IXGBE_EICR_MAILBOX)
+ ixgbe_pf_mbx_process(dev);
+
+ if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
+ ixgbe_dev_link_update(dev, 0);
+ intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
+ ixgbe_dev_link_status_print(dev);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+ }
+
+ PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
+ ixgbe_enable_intr(dev);
+ rte_intr_enable(&(dev->pci_dev->intr_handle));
+}
+
+/**
+ * Interrupt handler triggered by NIC for handling
+ * specific interrupt.
+ *
+ * @param handle
+ * Pointer to interrupt handle.
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ * void
+ */
+static void
+ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
+ void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ ixgbe_dev_interrupt_get_status(dev);
+ ixgbe_dev_interrupt_action(dev);
+}
+
+static int
+ixgbe_dev_led_on(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ return (ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
+}
+
+static int
+ixgbe_dev_led_off(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ return (ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
+}
+
+static int
+ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct ixgbe_hw *hw;
+ uint32_t mflcn_reg;
+ uint32_t fccfg_reg;
+ int rx_pause;
+ int tx_pause;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ fc_conf->pause_time = hw->fc.pause_time;
+ fc_conf->high_water = hw->fc.high_water[0];
+ fc_conf->low_water = hw->fc.low_water[0];
+ fc_conf->send_xon = hw->fc.send_xon;
+ fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
+
+ /*
+ * Return rx_pause status according to actual setting of
+ * MFLCN register.
+ */
+ mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+ if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE))
+ rx_pause = 1;
+ else
+ rx_pause = 0;
+
+ /*
+ * Return tx_pause status according to actual setting of
+ * FCCFG register.
+ */
+ fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
+ if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY))
+ tx_pause = 1;
+ else
+ tx_pause = 0;
+
+ if (rx_pause && tx_pause)
+ fc_conf->mode = RTE_FC_FULL;
+ else if (rx_pause)
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ else if (tx_pause)
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ else
+ fc_conf->mode = RTE_FC_NONE;
+
+ return 0;
+}
+
+static int
+ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct ixgbe_hw *hw;
+ int err;
+ uint32_t rx_buf_size;
+ uint32_t max_high_water;
+ uint32_t mflcn;
+ enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
+ ixgbe_fc_none,
+ ixgbe_fc_rx_pause,
+ ixgbe_fc_tx_pause,
+ ixgbe_fc_full
+ };
+
+ PMD_INIT_FUNC_TRACE();
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (fc_conf->autoneg != !hw->fc.disable_fc_autoneg)
+ return -ENOTSUP;
+ rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
+ PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
+
+ /*
+ * At least reserve one Ethernet frame for watermark
+ * high_water/low_water in kilo bytes for ixgbe
+ */
+ max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
+ if ((fc_conf->high_water > max_high_water) ||
+ (fc_conf->high_water < fc_conf->low_water)) {
+ PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
+ PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
+ return (-EINVAL);
+ }
+
+ hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
+ hw->fc.pause_time = fc_conf->pause_time;
+ hw->fc.high_water[0] = fc_conf->high_water;
+ hw->fc.low_water[0] = fc_conf->low_water;
+ hw->fc.send_xon = fc_conf->send_xon;
+
+ err = ixgbe_fc_enable(hw);
+
+ /* Not negotiated is not an error case */
+ if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
+
+ /* check if we want to forward MAC frames - driver doesn't have native
+ * capability to do that, so we'll write the registers ourselves */
+
+ mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+
+ /* set or clear MFLCN.PMCF bit depending on configuration */
+ if (fc_conf->mac_ctrl_frame_fwd != 0)
+ mflcn |= IXGBE_MFLCN_PMCF;
+ else
+ mflcn &= ~IXGBE_MFLCN_PMCF;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+ }
+
+ PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err);
+ return -EIO;
+}
+
+/**
+ * ixgbe_pfc_enable_generic - Enable flow control
+ * @hw: pointer to hardware structure
+ * @tc_num: traffic class number
+ * Enable flow control according to the current settings.
+ */
+static int
+ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
+{
+ int ret_val = 0;
+ uint32_t mflcn_reg, fccfg_reg;
+ uint32_t reg;
+ uint32_t fcrtl, fcrth;
+ uint8_t i;
+ uint8_t nb_rx_en;
+
+ /* Validate the water mark configuration */
+ if (!hw->fc.pause_time) {
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ /* Low water mark of zero causes XOFF floods */
+ if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
+ /* High/Low water can not be 0 */
+ if( (!hw->fc.high_water[tc_num])|| (!hw->fc.low_water[tc_num])) {
+ PMD_INIT_LOG(ERR, "Invalid water mark configuration");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ if(hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
+ PMD_INIT_LOG(ERR, "Invalid water mark configuration");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+ }
+ /* Negotiate the fc mode to use */
+ ixgbe_fc_autoneg(hw);
+
+ /* Disable any previous flow control settings */
+ mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+ mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE);
+
+ fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
+ fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
+
+ switch (hw->fc.current_mode) {
+ case ixgbe_fc_none:
+ /*
+ * If the count of enabled RX Priority Flow control >1,
+ * and the TX pause can not be disabled
+ */
+ nb_rx_en = 0;
+ for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
+ if (reg & IXGBE_FCRTH_FCEN)
+ nb_rx_en++;
+ }
+ if (nb_rx_en > 1)
+ fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
+ break;
+ case ixgbe_fc_rx_pause:
+ /*
+ * Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ mflcn_reg |= IXGBE_MFLCN_RPFCE;
+ /*
+ * If the count of enabled RX Priority Flow control >1,
+ * and the TX pause can not be disabled
+ */
+ nb_rx_en = 0;
+ for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
+ if (reg & IXGBE_FCRTH_FCEN)
+ nb_rx_en++;
+ }
+ if (nb_rx_en > 1)
+ fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
+ break;
+ case ixgbe_fc_tx_pause:
+ /*
+ * Tx Flow control is enabled, and Rx Flow control is
+ * disabled by software override.
+ */
+ fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
+ break;
+ case ixgbe_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by SW override. */
+ mflcn_reg |= IXGBE_MFLCN_RPFCE;
+ fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
+ break;
+ default:
+ PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
+ ret_val = IXGBE_ERR_CONFIG;
+ goto out;
+ break;
+ }
+
+ /* Set 802.3x based flow control settings. */
+ mflcn_reg |= IXGBE_MFLCN_DPF;
+ IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
+ IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
+
+ /* Set up and enable Rx high/low water mark thresholds, enable XON. */
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[tc_num]) {
+ fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl);
+ fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN;
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0);
+ /*
+ * In order to prevent Tx hangs when the internal Tx
+ * switch is enabled we must set the high water mark
+ * to the maximum FCRTH value. This allows the Tx
+ * switch to function even under heavy Rx workloads.
+ */
+ fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth);
+
+ /* Configure pause time (2 TCs per register) */
+ reg = hw->fc.pause_time * 0x00010001;
+ for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
+ IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
+
+ /* Configure flow control refresh threshold value */
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
+
+out:
+ return ret_val;
+}
+
+static int
+ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
+
+ if(hw->mac.type != ixgbe_mac_82598EB) {
+ ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num);
+ }
+ return ret_val;
+}
+
+static int
+ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
+{
+ int err;
+ uint32_t rx_buf_size;
+ uint32_t max_high_water;
+ uint8_t tc_num;
+ uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_dcb_config *dcb_config =
+ IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+
+ enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
+ ixgbe_fc_none,
+ ixgbe_fc_rx_pause,
+ ixgbe_fc_tx_pause,
+ ixgbe_fc_full
+ };
+
+ PMD_INIT_FUNC_TRACE();
+
+ ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
+ tc_num = map[pfc_conf->priority];
+ rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
+ PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
+ /*
+ * At least reserve one Ethernet frame for watermark
+ * high_water/low_water in kilo bytes for ixgbe
+ */
+ max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
+ if ((pfc_conf->fc.high_water > max_high_water) ||
+ (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
+ PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
+ PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
+ return (-EINVAL);
+ }
+
+ hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
+ hw->fc.pause_time = pfc_conf->fc.pause_time;
+ hw->fc.send_xon = pfc_conf->fc.send_xon;
+ hw->fc.low_water[tc_num] = pfc_conf->fc.low_water;
+ hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
+
+ err = ixgbe_dcb_pfc_enable(dev,tc_num);
+
+ /* Not negotiated is not an error case */
+ if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
+ return 0;
+
+ PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
+ return -EIO;
+}
+
+static int
+ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ uint8_t i, j, mask;
+ uint32_t reta, r;
+ uint16_t idx, shift;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+ if (reta_size != ETH_RSS_RETA_SIZE_128) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+ IXGBE_4_BIT_MASK);
+ if (!mask)
+ continue;
+ if (mask == IXGBE_4_BIT_MASK)
+ r = 0;
+ else
+ r = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2));
+ for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
+ if (mask & (0x1 << j))
+ reta |= reta_conf[idx].reta[shift + j] <<
+ (CHAR_BIT * j);
+ else
+ reta |= r & (IXGBE_8_BIT_MASK <<
+ (CHAR_BIT * j));
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ uint8_t i, j, mask;
+ uint32_t reta;
+ uint16_t idx, shift;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+ if (reta_size != ETH_RSS_RETA_SIZE_128) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IXGBE_4_BIT_WIDTH) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+ IXGBE_4_BIT_MASK);
+ if (!mask)
+ continue;
+
+ reta = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2));
+ for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
+ if (mask & (0x1 << j))
+ reta_conf[idx].reta[shift + j] =
+ ((reta >> (CHAR_BIT * j)) &
+ IXGBE_8_BIT_MASK);
+ }
+ }
+
+ return 0;
+}
+
+static void
+ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint32_t index, uint32_t pool)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t enable_addr = 1;
+
+ ixgbe_set_rar(hw, index, mac_addr->addr_bytes, pool, enable_addr);
+}
+
+static void
+ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ ixgbe_clear_rar(hw, index);
+}
+
+static int
+ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ uint32_t hlreg0;
+ uint32_t maxfrs;
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev_info dev_info;
+ uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+ ixgbe_dev_info_get(dev, &dev_info);
+
+ /* check that mtu is within the allowed range */
+ if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
+ return -EINVAL;
+
+ /* refuse mtu that requires the support of scattered packets when this
+ * feature has not been enabled before. */
+ if (!dev->data->scattered_rx &&
+ (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
+ dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+
+ /* switch to jumbo mode if needed */
+ if (frame_size > ETHER_MAX_LEN) {
+ dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ hlreg0 |= IXGBE_HLREG0_JUMBOEN;
+ } else {
+ dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+
+ /* update max frame size */
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
+ maxfrs &= 0x0000FFFF;
+ maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
+ IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
+
+ return 0;
+}
+
+/*
+ * Virtual Function operations
+ */
+static void
+ixgbevf_intr_disable(struct ixgbe_hw *hw)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ /* Clear interrupt mask to stop from interrupts being generated */
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+static int
+ixgbevf_dev_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf* conf = &dev->data->dev_conf;
+
+ PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
+ dev->data->port_id);
+
+ /*
+ * VF has no ability to enable/disable HW CRC
+ * Keep the persistent behavior the same as Host PF
+ */
+#ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
+ if (!conf->rxmode.hw_strip_crc) {
+ PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip");
+ conf->rxmode.hw_strip_crc = 1;
+ }
+#else
+ if (conf->rxmode.hw_strip_crc) {
+ PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip");
+ conf->rxmode.hw_strip_crc = 0;
+ }
+#endif
+
+ return 0;
+}
+
+static int
+ixgbevf_dev_start(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int err, mask = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ hw->mac.ops.reset_hw(hw);
+ hw->mac.get_link_status = true;
+
+ /* negotiate mailbox API version to use with the PF. */
+ ixgbevf_negotiate_api(hw);
+
+ ixgbevf_dev_tx_init(dev);
+
+ /* This can fail when allocating mbufs for descriptor rings */
+ err = ixgbevf_dev_rx_init(dev);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
+ ixgbe_dev_clear_queues(dev);
+ return err;
+ }
+
+ /* Set vfta */
+ ixgbevf_set_vfta_all(dev,1);
+
+ /* Set HW strip */
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
+ ETH_VLAN_EXTEND_MASK;
+ ixgbevf_vlan_offload_set(dev, mask);
+
+ ixgbevf_dev_rxtx_start(dev);
+
+ return 0;
+}
+
+static void
+ixgbevf_dev_stop(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ hw->adapter_stopped = TRUE;
+ ixgbe_stop_adapter(hw);
+
+ /*
+ * Clear what we set, but we still keep shadow_vfta to
+ * restore after device starts
+ */
+ ixgbevf_set_vfta_all(dev,0);
+
+ /* Clear stored conf */
+ dev->data->scattered_rx = 0;
+
+ ixgbe_dev_clear_queues(dev);
+}
+
+static void
+ixgbevf_dev_close(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ ixgbe_reset_hw(hw);
+
+ ixgbevf_dev_stop(dev);
+
+ /* reprogram the RAR[0] in case user changed it. */
+ ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+}
+
+static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vfta * shadow_vfta =
+ IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+ int i = 0, j = 0, vfta = 0, mask = 1;
+
+ for (i = 0; i < IXGBE_VFTA_SIZE; i++){
+ vfta = shadow_vfta->vfta[i];
+ if(vfta){
+ mask = 1;
+ for (j = 0; j < 32; j++){
+ if(vfta & mask)
+ ixgbe_set_vfta(hw, (i<<5)+j, 0, on);
+ mask<<=1;
+ }
+ }
+ }
+
+}
+
+static int
+ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vfta * shadow_vfta =
+ IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+ uint32_t vid_idx = 0;
+ uint32_t vid_bit = 0;
+ int ret = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
+ ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on);
+ if(ret){
+ PMD_INIT_LOG(ERR, "Unable to set VF vlan");
+ return ret;
+ }
+ vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
+ vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
+
+ /* Save what we set and retore it after device reset */
+ if (on)
+ shadow_vfta->vfta[vid_idx] |= vid_bit;
+ else
+ shadow_vfta->vfta[vid_idx] &= ~vid_bit;
+
+ return 0;
+}
+
+static void
+ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t ctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if(queue >= hw->mac.max_rx_queues)
+ return;
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
+ if(on)
+ ctrl |= IXGBE_RXDCTL_VME;
+ else
+ ctrl &= ~IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
+
+ ixgbe_vlan_hw_strip_bitmap_set( dev, queue, on);
+}
+
+static void
+ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t i;
+ int on = 0;
+
+ /* VF function only support hw strip feature, others are not support */
+ if(mask & ETH_VLAN_STRIP_MASK){
+ on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
+
+ for(i=0; i < hw->mac.max_rx_queues; i++)
+ ixgbevf_vlan_strip_queue_set(dev,i,on);
+ }
+}
+
+static int
+ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
+{
+ uint32_t reg_val;
+
+ /* we only need to do this if VMDq is enabled */
+ reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
+ PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting");
+ return (-1);
+ }
+
+ return 0;
+}
+
+static uint32_t
+ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr)
+{
+ uint32_t vector = 0;
+ switch (hw->mac.mc_filter_type) {
+ case 0: /* use bits [47:36] of the address */
+ vector = ((uc_addr->addr_bytes[4] >> 4) |
+ (((uint16_t)uc_addr->addr_bytes[5]) << 4));
+ break;
+ case 1: /* use bits [46:35] of the address */
+ vector = ((uc_addr->addr_bytes[4] >> 3) |
+ (((uint16_t)uc_addr->addr_bytes[5]) << 5));
+ break;
+ case 2: /* use bits [45:34] of the address */
+ vector = ((uc_addr->addr_bytes[4] >> 2) |
+ (((uint16_t)uc_addr->addr_bytes[5]) << 6));
+ break;
+ case 3: /* use bits [43:32] of the address */
+ vector = ((uc_addr->addr_bytes[4]) |
+ (((uint16_t)uc_addr->addr_bytes[5]) << 8));
+ break;
+ default: /* Invalid mc_filter_type */
+ break;
+ }
+
+ /* vector can only be 12-bits or boundary will be exceeded */
+ vector &= 0xFFF;
+ return vector;
+}
+
+static int
+ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
+ uint8_t on)
+{
+ uint32_t vector;
+ uint32_t uta_idx;
+ uint32_t reg_val;
+ uint32_t uta_shift;
+ uint32_t rc;
+ const uint32_t ixgbe_uta_idx_mask = 0x7F;
+ const uint32_t ixgbe_uta_bit_shift = 5;
+ const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1;
+ const uint32_t bit1 = 0x1;
+
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_uta_info *uta_info =
+ IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
+
+ /* The UTA table only exists on 82599 hardware and newer */
+ if (hw->mac.type < ixgbe_mac_82599EB)
+ return (-ENOTSUP);
+
+ vector = ixgbe_uta_vector(hw,mac_addr);
+ uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
+ uta_shift = vector & ixgbe_uta_bit_mask;
+
+ rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
+ if(rc == on)
+ return 0;
+
+ reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
+ if (on) {
+ uta_info->uta_in_use++;
+ reg_val |= (bit1 << uta_shift);
+ uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift);
+ } else {
+ uta_info->uta_in_use--;
+ reg_val &= ~(bit1 << uta_shift);
+ uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift);
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val);
+
+ if (uta_info->uta_in_use > 0)
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
+ IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type);
+
+ return 0;
+}
+
+static int
+ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
+{
+ int i;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_uta_info *uta_info =
+ IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
+
+ /* The UTA table only exists on 82599 hardware and newer */
+ if (hw->mac.type < ixgbe_mac_82599EB)
+ return (-ENOTSUP);
+
+ if(on) {
+ for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+ uta_info->uta_shadow[i] = ~0;
+ IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
+ }
+ } else {
+ for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+ uta_info->uta_shadow[i] = 0;
+ IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
+ }
+ }
+ return 0;
+
+}
+
+uint32_t
+ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
+{
+ uint32_t new_val = orig_val;
+
+ if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+ new_val |= IXGBE_VMOLR_AUPE;
+ if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+ new_val |= IXGBE_VMOLR_ROMPE;
+ if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+ new_val |= IXGBE_VMOLR_ROPE;
+ if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+ new_val |= IXGBE_VMOLR_BAM;
+ if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+ new_val |= IXGBE_VMOLR_MPE;
+
+ return new_val;
+}
+
+static int
+ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
+ uint16_t rx_mask, uint8_t on)
+{
+ int val = 0;
+
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
+
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
+ " on 82599 hardware and newer");
+ return (-ENOTSUP);
+ }
+ if (ixgbe_vmdq_mode_check(hw) < 0)
+ return (-ENOTSUP);
+
+ val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
+
+ if (on)
+ vmolr |= val;
+ else
+ vmolr &= ~val;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
+
+ return 0;
+}
+
+static int
+ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
+{
+ uint32_t reg,addr;
+ uint32_t val;
+ const uint8_t bit1 = 0x1;
+
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (ixgbe_vmdq_mode_check(hw) < 0)
+ return (-ENOTSUP);
+
+ addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2);
+ reg = IXGBE_READ_REG(hw, addr);
+ val = bit1 << pool;
+
+ if (on)
+ reg |= val;
+ else
+ reg &= ~val;
+
+ IXGBE_WRITE_REG(hw, addr,reg);
+
+ return 0;
+}
+
+static int
+ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
+{
+ uint32_t reg,addr;
+ uint32_t val;
+ const uint8_t bit1 = 0x1;
+
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (ixgbe_vmdq_mode_check(hw) < 0)
+ return (-ENOTSUP);
+
+ addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2);
+ reg = IXGBE_READ_REG(hw, addr);
+ val = bit1 << pool;
+
+ if (on)
+ reg |= val;
+ else
+ reg &= ~val;
+
+ IXGBE_WRITE_REG(hw, addr,reg);
+
+ return 0;
+}
+
+static int
+ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
+ uint64_t pool_mask, uint8_t vlan_on)
+{
+ int ret = 0;
+ uint16_t pool_idx;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (ixgbe_vmdq_mode_check(hw) < 0)
+ return (-ENOTSUP);
+ for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
+ if (pool_mask & ((uint64_t)(1ULL << pool_idx)))
+ ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
+ struct rte_eth_vmdq_mirror_conf *mirror_conf,
+ uint8_t rule_id, uint8_t on)
+{
+ uint32_t mr_ctl,vlvf;
+ uint32_t mp_lsb = 0;
+ uint32_t mv_msb = 0;
+ uint32_t mv_lsb = 0;
+ uint32_t mp_msb = 0;
+ uint8_t i = 0;
+ int reg_index = 0;
+ uint64_t vlan_mask = 0;
+
+ const uint8_t pool_mask_offset = 32;
+ const uint8_t vlan_mask_offset = 32;
+ const uint8_t dst_pool_offset = 8;
+ const uint8_t rule_mr_offset = 4;
+ const uint8_t mirror_rule_mask= 0x0F;
+
+ struct ixgbe_mirror_info *mr_info =
+ (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (ixgbe_vmdq_mode_check(hw) < 0)
+ return (-ENOTSUP);
+
+ /* Check if vlan mask is valid */
+ if ((mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) && (on)) {
+ if (mirror_conf->vlan.vlan_mask == 0)
+ return (-EINVAL);
+ }
+
+ /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
+ if (mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) {
+ for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) {
+ if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
+ /* search vlan id related pool vlan filter index */
+ reg_index = ixgbe_find_vlvf_slot(hw,
+ mirror_conf->vlan.vlan_id[i]);
+ if(reg_index < 0)
+ return (-EINVAL);
+ vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
+ if ((vlvf & IXGBE_VLVF_VIEN) &&
+ ((vlvf & IXGBE_VLVF_VLANID_MASK)
+ == mirror_conf->vlan.vlan_id[i]))
+ vlan_mask |= (1ULL << reg_index);
+ else
+ return (-EINVAL);
+ }
+ }
+
+ if (on) {
+ mv_lsb = vlan_mask & 0xFFFFFFFF;
+ mv_msb = vlan_mask >> vlan_mask_offset;
+
+ mr_info->mr_conf[rule_id].vlan.vlan_mask =
+ mirror_conf->vlan.vlan_mask;
+ for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
+ if(mirror_conf->vlan.vlan_mask & (1ULL << i))
+ mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
+ mirror_conf->vlan.vlan_id[i];
+ }
+ } else {
+ mv_lsb = 0;
+ mv_msb = 0;
+ mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
+ for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
+ mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
+ }
+ }
+
+ /*
+ * if enable pool mirror, write related pool mask register,if disable
+ * pool mirror, clear PFMRVM register
+ */
+ if (mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) {
+ if (on) {
+ mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
+ mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
+ mr_info->mr_conf[rule_id].pool_mask =
+ mirror_conf->pool_mask;
+
+ } else {
+ mp_lsb = 0;
+ mp_msb = 0;
+ mr_info->mr_conf[rule_id].pool_mask = 0;
+ }
+ }
+
+ /* read mirror control register and recalculate it */
+ mr_ctl = IXGBE_READ_REG(hw,IXGBE_MRCTL(rule_id));
+
+ if (on) {
+ mr_ctl |= mirror_conf->rule_type_mask;
+ mr_ctl &= mirror_rule_mask;
+ mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
+ } else
+ mr_ctl &= ~(mirror_conf->rule_type_mask & mirror_rule_mask);
+
+ mr_info->mr_conf[rule_id].rule_type_mask = (uint8_t)(mr_ctl & mirror_rule_mask);
+ mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
+
+ /* write mirrror control register */
+ IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
+
+ /* write pool mirrror control register */
+ if (mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) {
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
+ mp_msb);
+ }
+ /* write VLAN mirrror control register */
+ if (mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) {
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
+ mv_msb);
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
+{
+ int mr_ctl = 0;
+ uint32_t lsb_val = 0;
+ uint32_t msb_val = 0;
+ const uint8_t rule_mr_offset = 4;
+
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_mirror_info *mr_info =
+ (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
+
+ if (ixgbe_vmdq_mode_check(hw) < 0)
+ return (-ENOTSUP);
+
+ memset(&mr_info->mr_conf[rule_id], 0,
+ sizeof(struct rte_eth_vmdq_mirror_conf));
+
+ /* clear PFVMCTL register */
+ IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
+
+ /* clear pool mask register */
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
+
+ /* clear vlan mask register */
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
+
+ return 0;
+}
+
+static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
+ uint16_t queue_idx, uint16_t tx_rate)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t rf_dec, rf_int;
+ uint32_t bcnrc_val;
+ uint16_t link_speed = dev->data->dev_link.link_speed;
+
+ if (queue_idx >= hw->mac.max_tx_queues)
+ return -EINVAL;
+
+ if (tx_rate != 0) {
+ /* Calculate the rate factor values to set */
+ rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
+ rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
+ rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
+
+ bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
+ bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
+ IXGBE_RTTBCNRC_RF_INT_MASK_M);
+ bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
+ } else {
+ bcnrc_val = 0;
+ }
+
+ /*
+ * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
+ * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
+ * set as 0x4.
+ */
+ if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
+ (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
+ IXGBE_MAX_JUMBO_FRAME_SIZE))
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
+ IXGBE_MMW_SIZE_JUMBO_FRAME);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
+ IXGBE_MMW_SIZE_DEFAULT);
+
+ /* Set RTTBCNRC of queue X */
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vf_info *vfinfo =
+ *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ uint8_t nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+ uint32_t queue_stride =
+ IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
+ uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;
+ uint32_t queue_end = queue_idx + nb_q_per_pool - 1;
+ uint16_t total_rate = 0;
+
+ if (queue_end >= hw->mac.max_tx_queues)
+ return -EINVAL;
+
+ if (vfinfo != NULL) {
+ for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) {
+ if (vf_idx == vf)
+ continue;
+ for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
+ idx++)
+ total_rate += vfinfo[vf_idx].tx_rate[idx];
+ }
+ } else
+ return -EINVAL;
+
+ /* Store tx_rate for this vf. */
+ for (idx = 0; idx < nb_q_per_pool; idx++) {
+ if (((uint64_t)0x1 << idx) & q_msk) {
+ if (vfinfo[vf].tx_rate[idx] != tx_rate)
+ vfinfo[vf].tx_rate[idx] = tx_rate;
+ total_rate += tx_rate;
+ }
+ }
+
+ if (total_rate > dev->data->dev_link.link_speed) {
+ /*
+ * Reset stored TX rate of the VF if it causes exceed
+ * link speed.
+ */
+ memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
+ return -EINVAL;
+ }
+
+ /* Set RTTBCNRC of each queue/pool for vf X */
+ for (; queue_idx <= queue_end; queue_idx++) {
+ if (0x1 & q_msk)
+ ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
+ q_msk = q_msk >> 1;
+ }
+
+ return 0;
+}
+
+static void
+ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ __attribute__((unused)) uint32_t index,
+ __attribute__((unused)) uint32_t pool)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int diag;
+
+ /*
+ * On a 82599 VF, adding again the same MAC addr is not an idempotent
+ * operation. Trap this case to avoid exhausting the [very limited]
+ * set of PF resources used to store VF MAC addresses.
+ */
+ if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
+ return;
+ diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
+ if (diag == 0)
+ return;
+ PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag);
+}
+
+static void
+ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
+ struct ether_addr *mac_addr;
+ uint32_t i;
+ int diag;
+
+ /*
+ * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
+ * not support the deletion of a given MAC address.
+ * Instead, it imposes to delete all MAC addresses, then to add again
+ * all MAC addresses with the exception of the one to be deleted.
+ */
+ (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
+
+ /*
+ * Add again all MAC addresses, with the exception of the deleted one
+ * and of the permanent MAC address.
+ */
+ for (i = 0, mac_addr = dev->data->mac_addrs;
+ i < hw->mac.num_rar_entries; i++, mac_addr++) {
+ /* Skip the deleted MAC address */
+ if (i == index)
+ continue;
+ /* Skip NULL MAC addresses */
+ if (is_zero_ether_addr(mac_addr))
+ continue;
+ /* Skip the permanent MAC address */
+ if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
+ continue;
+ diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
+ if (diag != 0)
+ PMD_DRV_LOG(ERR,
+ "Adding again MAC address "
+ "%02x:%02x:%02x:%02x:%02x:%02x failed "
+ "diag=%d",
+ mac_addr->addr_bytes[0],
+ mac_addr->addr_bytes[1],
+ mac_addr->addr_bytes[2],
+ mac_addr->addr_bytes[3],
+ mac_addr->addr_bytes[4],
+ mac_addr->addr_bytes[5],
+ diag);
+ }
+}
+
+/*
+ * add syn filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * filter: ponter to the filter that will be added.
+ * rx_queue: the queue id the filter assigned to.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_add_syn_filter(struct rte_eth_dev *dev,
+ struct rte_syn_filter *filter, uint16_t rx_queue)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t synqf;
+
+ if (hw->mac.type != ixgbe_mac_82599EB)
+ return -ENOSYS;
+
+ if (rx_queue >= IXGBE_MAX_RX_QUEUE_NUM)
+ return -EINVAL;
+
+ synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+
+ if (synqf & IXGBE_SYN_FILTER_ENABLE)
+ return -EINVAL;
+
+ synqf = (uint32_t)(((rx_queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
+ IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
+
+ if (filter->hig_pri)
+ synqf |= IXGBE_SYN_FILTER_SYNQFP;
+ else
+ synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
+
+ IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
+ return 0;
+}
+
+/*
+ * remove syn filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_remove_syn_filter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t synqf;
+
+ if (hw->mac.type != ixgbe_mac_82599EB)
+ return -ENOSYS;
+
+ synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+
+ synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
+
+ IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
+ return 0;
+}
+
+/*
+ * get the syn filter's info
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * filter: ponter to the filter that returns.
+ * *rx_queue: pointer to the queue id the filter assigned to.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_get_syn_filter(struct rte_eth_dev *dev,
+ struct rte_syn_filter *filter, uint16_t *rx_queue)
+
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t synqf;
+
+ if (hw->mac.type != ixgbe_mac_82599EB)
+ return -ENOSYS;
+
+ synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+ if (synqf & IXGBE_SYN_FILTER_ENABLE) {
+ filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
+ *rx_queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
+ return 0;
+ }
+ return -ENOENT;
+}
+
+/*
+ * add an ethertype filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: ponter to the filter that will be added.
+ * rx_queue: the queue id the filter assigned to.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_add_ethertype_filter(struct rte_eth_dev *dev,
+ uint16_t index, struct rte_ethertype_filter *filter,
+ uint16_t rx_queue)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t etqf, etqs = 0;
+
+ if (hw->mac.type != ixgbe_mac_82599EB)
+ return -ENOSYS;
+
+ if (index >= IXGBE_MAX_ETQF_FILTERS ||
+ rx_queue >= IXGBE_MAX_RX_QUEUE_NUM)
+ return -EINVAL;
+
+ etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(index));
+ if (etqf & IXGBE_ETQF_FILTER_EN)
+ return -EINVAL; /* filter index is in use. */
+
+ etqf = 0;
+ etqf |= IXGBE_ETQF_FILTER_EN;
+ etqf |= (uint32_t)filter->ethertype;
+
+ if (filter->priority_en) {
+ if (filter->priority > IXGBE_ETQF_MAX_PRI)
+ return -EINVAL;
+ etqf |= (uint32_t)((filter->priority << IXGBE_ETQF_SHIFT) & IXGBE_ETQF_UP);
+ etqf |= IXGBE_ETQF_UP_EN;
+ }
+ etqs |= (uint32_t)((rx_queue << IXGBE_ETQS_RX_QUEUE_SHIFT) & IXGBE_ETQS_RX_QUEUE);
+ etqs |= IXGBE_ETQS_QUEUE_EN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(index), etqf);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQS(index), etqs);
+ return 0;
+}
+
+/*
+ * remove an ethertype filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_remove_ethertype_filter(struct rte_eth_dev *dev,
+ uint16_t index)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->mac.type != ixgbe_mac_82599EB)
+ return -ENOSYS;
+
+ if (index >= IXGBE_MAX_ETQF_FILTERS)
+ return -EINVAL;
+
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQS(index), 0);
+
+ return 0;
+}
+
+/*
+ * get an ethertype filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: ponter to the filter that will be gotten.
+ * *rx_queue: the ponited of the queue id the filter assigned to.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
+ uint16_t index, struct rte_ethertype_filter *filter,
+ uint16_t *rx_queue)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t etqf, etqs;
+
+ if (hw->mac.type != ixgbe_mac_82599EB)
+ return -ENOSYS;
+
+ if (index >= IXGBE_MAX_ETQF_FILTERS)
+ return -EINVAL;
+
+ etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(index));
+ etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(index));
+ if (etqf & IXGBE_ETQF_FILTER_EN) {
+ filter->ethertype = etqf & IXGBE_ETQF_ETHERTYPE;
+ filter->priority_en = (etqf & IXGBE_ETQF_UP_EN) ? 1 : 0;
+ if (filter->priority_en)
+ filter->priority = (etqf & IXGBE_ETQF_UP) >> 16;
+ *rx_queue = (etqs & IXGBE_ETQS_RX_QUEUE) >> IXGBE_ETQS_RX_QUEUE_SHIFT;
+ return 0;
+ }
+ return -ENOENT;
+}
+
+static inline enum ixgbe_5tuple_protocol
+convert_protocol_type(uint8_t protocol_value)
+{
+ if (protocol_value == IPPROTO_TCP)
+ return IXGBE_FILTER_PROTOCOL_TCP;
+ else if (protocol_value == IPPROTO_UDP)
+ return IXGBE_FILTER_PROTOCOL_UDP;
+ else if (protocol_value == IPPROTO_SCTP)
+ return IXGBE_FILTER_PROTOCOL_SCTP;
+ else
+ return IXGBE_FILTER_PROTOCOL_NONE;
+}
+
+static inline uint8_t
+revert_protocol_type(enum ixgbe_5tuple_protocol protocol)
+{
+ if (protocol == IXGBE_FILTER_PROTOCOL_TCP)
+ return IPPROTO_TCP;
+ else if (protocol == IXGBE_FILTER_PROTOCOL_UDP)
+ return IPPROTO_UDP;
+ else if (protocol == IXGBE_FILTER_PROTOCOL_SCTP)
+ return IPPROTO_SCTP;
+ else
+ return 0;
+}
+
+/*
+ * add a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: ponter to the filter that will be added.
+ * rx_queue: the queue id the filter assigned to.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
+ struct rte_5tuple_filter *filter, uint16_t rx_queue)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t ftqf, sdpqf = 0;
+ uint32_t l34timir = 0;
+ uint8_t mask = 0xff;
+
+ if (hw->mac.type != ixgbe_mac_82599EB)
+ return -ENOSYS;
+
+ if (index >= IXGBE_MAX_FTQF_FILTERS ||
+ rx_queue >= IXGBE_MAX_RX_QUEUE_NUM ||
+ filter->priority > IXGBE_5TUPLE_MAX_PRI ||
+ filter->priority < IXGBE_5TUPLE_MIN_PRI)
+ return -EINVAL; /* filter index is out of range. */
+
+ if (filter->tcp_flags) {
+ PMD_INIT_LOG(INFO, "82599EB not tcp flags in 5tuple");
+ return -EINVAL;
+ }
+
+ ftqf = IXGBE_READ_REG(hw, IXGBE_FTQF(index));
+ if (ftqf & IXGBE_FTQF_QUEUE_ENABLE)
+ return -EINVAL; /* filter index is in use. */
+
+ ftqf = 0;
+ sdpqf = (uint32_t)(filter->dst_port << IXGBE_SDPQF_DSTPORT_SHIFT);
+ sdpqf = sdpqf | (filter->src_port & IXGBE_SDPQF_SRCPORT);
+
+ ftqf |= (uint32_t)(convert_protocol_type(filter->protocol) &
+ IXGBE_FTQF_PROTOCOL_MASK);
+ ftqf |= (uint32_t)((filter->priority & IXGBE_FTQF_PRIORITY_MASK) <<
+ IXGBE_FTQF_PRIORITY_SHIFT);
+ if (filter->src_ip_mask == 0) /* 0 means compare. */
+ mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
+ if (filter->dst_ip_mask == 0)
+ mask &= IXGBE_FTQF_DEST_ADDR_MASK;
+ if (filter->src_port_mask == 0)
+ mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
+ if (filter->dst_port_mask == 0)
+ mask &= IXGBE_FTQF_DEST_PORT_MASK;
+ if (filter->protocol_mask == 0)
+ mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
+ ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
+ ftqf |= IXGBE_FTQF_POOL_MASK_EN;
+ ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
+
+ IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), filter->dst_ip);
+ IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), filter->src_ip);
+ IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), sdpqf);
+ IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), ftqf);
+
+ l34timir |= IXGBE_L34T_IMIR_RESERVE;
+ l34timir |= (uint32_t)(rx_queue << IXGBE_L34T_IMIR_QUEUE_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), l34timir);
+ return 0;
+}
+
+/*
+ * remove a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
+ uint16_t index)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->mac.type != ixgbe_mac_82599EB)
+ return -ENOSYS;
+
+ if (index >= IXGBE_MAX_FTQF_FILTERS)
+ return -EINVAL; /* filter index is out of range. */
+
+ IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
+ return 0;
+}
+
+/*
+ * get a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates
+ * filter: ponter to the filter that returns.
+ * *rx_queue: pointer of the queue id the filter assigned to.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_get_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
+ struct rte_5tuple_filter *filter, uint16_t *rx_queue)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t sdpqf, ftqf, l34timir;
+ uint8_t mask;
+ enum ixgbe_5tuple_protocol proto;
+
+ if (hw->mac.type != ixgbe_mac_82599EB)
+ return -ENOSYS;
+
+ if (index >= IXGBE_MAX_FTQF_FILTERS)
+ return -EINVAL; /* filter index is out of range. */
+
+ ftqf = IXGBE_READ_REG(hw, IXGBE_FTQF(index));
+ if (ftqf & IXGBE_FTQF_QUEUE_ENABLE) {
+ proto = (enum ixgbe_5tuple_protocol)(ftqf & IXGBE_FTQF_PROTOCOL_MASK);
+ filter->protocol = revert_protocol_type(proto);
+ filter->priority = (ftqf >> IXGBE_FTQF_PRIORITY_SHIFT) &
+ IXGBE_FTQF_PRIORITY_MASK;
+ mask = (uint8_t)((ftqf >> IXGBE_FTQF_5TUPLE_MASK_SHIFT) &
+ IXGBE_FTQF_5TUPLE_MASK_MASK);
+ filter->src_ip_mask =
+ (mask & IXGBE_FTQF_SOURCE_ADDR_MASK) ? 1 : 0;
+ filter->dst_ip_mask =
+ (mask & IXGBE_FTQF_DEST_ADDR_MASK) ? 1 : 0;
+ filter->src_port_mask =
+ (mask & IXGBE_FTQF_SOURCE_PORT_MASK) ? 1 : 0;
+ filter->dst_port_mask =
+ (mask & IXGBE_FTQF_DEST_PORT_MASK) ? 1 : 0;
+ filter->protocol_mask =
+ (mask & IXGBE_FTQF_PROTOCOL_COMP_MASK) ? 1 : 0;
+
+ sdpqf = IXGBE_READ_REG(hw, IXGBE_SDPQF(index));
+ filter->dst_port = (sdpqf & IXGBE_SDPQF_DSTPORT) >>
+ IXGBE_SDPQF_DSTPORT_SHIFT;
+ filter->src_port = sdpqf & IXGBE_SDPQF_SRCPORT;
+ filter->dst_ip = IXGBE_READ_REG(hw, IXGBE_DAQF(index));
+ filter->src_ip = IXGBE_READ_REG(hw, IXGBE_SAQF(index));
+
+ l34timir = IXGBE_READ_REG(hw, IXGBE_L34T_IMIR(index));
+ *rx_queue = (l34timir & IXGBE_L34T_IMIR_QUEUE) >>
+ IXGBE_L34T_IMIR_QUEUE_SHIFT;
+ return 0;
+ }
+ return -ENOENT;
+}
+
+static int
+ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct ixgbe_hw *hw;
+ uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
+ return -EINVAL;
+
+ /* refuse mtu that requires the support of scattered packets when this
+ * feature has not been enabled before. */
+ if (!dev->data->scattered_rx &&
+ (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
+ dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
+ return -EINVAL;
+
+ /*
+ * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
+ * request of the version 2.0 of the mailbox API.
+ * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
+ * of the mailbox API.
+ * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
+ * prior to 3.11.33 which contains the following change:
+ * "ixgbe: Enable jumbo frames support w/ SR-IOV"
+ */
+ ixgbevf_rlpml_set_vf(hw, max_frame);
+
+ /* update max frame size */
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
+ return 0;
+}
+
+static struct rte_driver rte_ixgbe_driver = {
+ .type = PMD_PDEV,
+ .init = rte_ixgbe_pmd_init,
+};
+
+static struct rte_driver rte_ixgbevf_driver = {
+ .type = PMD_PDEV,
+ .init = rte_ixgbevf_pmd_init,
+};
+
+PMD_REGISTER_DRIVER(rte_ixgbe_driver);
+PMD_REGISTER_DRIVER(rte_ixgbevf_driver);
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_ethdev.h b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_ethdev.h
new file mode 100755
index 00000000..ca991701
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_ethdev.h
@@ -0,0 +1,344 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _IXGBE_ETHDEV_H_
+#define _IXGBE_ETHDEV_H_
+#include "ixgbe/ixgbe_dcb.h"
+#include "ixgbe/ixgbe_dcb_82599.h"
+#include "ixgbe/ixgbe_dcb_82598.h"
+#include "ixgbe_bypass.h"
+
+/* need update link, bit flag */
+#define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
+#define IXGBE_FLAG_MAILBOX (uint32_t)(1 << 1)
+
+/*
+ * Defines that were not part of ixgbe_type.h as they are not used by the
+ * FreeBSD driver.
+ */
+#define IXGBE_ADVTXD_MAC_1588 0x00080000 /* IEEE1588 Timestamp packet */
+#define IXGBE_RXD_STAT_TMST 0x10000 /* Timestamped Packet indication */
+#define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* L4 Packet TYPE, resvd */
+#define IXGBE_RXDADV_ERR_CKSUM_BIT 30
+#define IXGBE_RXDADV_ERR_CKSUM_MSK 3
+#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Bit shift for l2_len */
+#define IXGBE_NB_STAT_MAPPING_REGS 32
+#define IXGBE_EXTENDED_VLAN (uint32_t)(1 << 26) /* EXTENDED VLAN ENABLE */
+#define IXGBE_VFTA_SIZE 128
+#define IXGBE_VLAN_TAG_SIZE 4
+#define IXGBE_MAX_RX_QUEUE_NUM 128
+#ifndef NBBY
+#define NBBY 8 /* number of bits in a byte */
+#endif
+#define IXGBE_HWSTRIP_BITMAP_SIZE (IXGBE_MAX_RX_QUEUE_NUM / (sizeof(uint32_t) * NBBY))
+
+/* Loopback operation modes */
+/* 82599 specific loopback operation types */
+#define IXGBE_LPBK_82599_NONE 0x0 /* Default value. Loopback is disabled. */
+#define IXGBE_LPBK_82599_TX_RX 0x1 /* Tx->Rx loopback operation is enabled. */
+
+#define IXGBE_MAX_JUMBO_FRAME_SIZE 0x2600 /* Maximum Jumbo frame size. */
+
+#define IXGBE_RTTBCNRC_RF_INT_MASK_BASE 0x000003FF
+#define IXGBE_RTTBCNRC_RF_INT_MASK_M \
+ (IXGBE_RTTBCNRC_RF_INT_MASK_BASE << IXGBE_RTTBCNRC_RF_INT_SHIFT)
+
+#define IXGBE_MAX_QUEUE_NUM_PER_VF 8
+
+#define IXGBE_SYN_FILTER_ENABLE 0x00000001 /* syn filter enable field */
+#define IXGBE_SYN_FILTER_QUEUE 0x000000FE /* syn filter queue field */
+#define IXGBE_SYN_FILTER_QUEUE_SHIFT 1 /* syn filter queue field shift */
+#define IXGBE_SYN_FILTER_SYNQFP 0x80000000 /* syn filter SYNQFP */
+
+#define IXGBE_ETQF_UP 0x00070000 /* ethertype filter priority field */
+#define IXGBE_ETQF_SHIFT 16
+#define IXGBE_ETQF_UP_EN 0x00080000
+#define IXGBE_ETQF_ETHERTYPE 0x0000FFFF /* ethertype filter ethertype field */
+#define IXGBE_ETQF_MAX_PRI 7
+
+#define IXGBE_SDPQF_DSTPORT 0xFFFF0000 /* dst port field */
+#define IXGBE_SDPQF_DSTPORT_SHIFT 16 /* dst port field shift */
+#define IXGBE_SDPQF_SRCPORT 0x0000FFFF /* src port field */
+
+#define IXGBE_L34T_IMIR_SIZE_BP 0x00001000
+#define IXGBE_L34T_IMIR_RESERVE 0x00080000 /* bit 13 to 19 must be set to 1000000b. */
+#define IXGBE_L34T_IMIR_LLI 0x00100000
+#define IXGBE_L34T_IMIR_QUEUE 0x0FE00000
+#define IXGBE_L34T_IMIR_QUEUE_SHIFT 21
+#define IXGBE_5TUPLE_MAX_PRI 7
+#define IXGBE_5TUPLE_MIN_PRI 1
+
+/*
+ * Information about the fdir mode.
+ */
+struct ixgbe_hw_fdir_info {
+ uint16_t collision;
+ uint16_t free;
+ uint16_t maxhash;
+ uint8_t maxlen;
+ uint64_t add;
+ uint64_t remove;
+ uint64_t f_add;
+ uint64_t f_remove;
+};
+
+/* structure for interrupt relative data */
+struct ixgbe_interrupt {
+ uint32_t flags;
+ uint32_t mask;
+};
+
+struct ixgbe_stat_mapping_registers {
+ uint32_t tqsm[IXGBE_NB_STAT_MAPPING_REGS];
+ uint32_t rqsmr[IXGBE_NB_STAT_MAPPING_REGS];
+};
+
+struct ixgbe_vfta {
+ uint32_t vfta[IXGBE_VFTA_SIZE];
+};
+
+struct ixgbe_hwstrip {
+ uint32_t bitmap[IXGBE_HWSTRIP_BITMAP_SIZE];
+};
+
+/*
+ * VF data which used by PF host only
+ */
+#define IXGBE_MAX_VF_MC_ENTRIES 30
+#define IXGBE_MAX_MR_RULE_ENTRIES 4 /* number of mirroring rules supported */
+#define IXGBE_MAX_UTA 128
+
+struct ixgbe_uta_info {
+ uint8_t uc_filter_type;
+ uint16_t uta_in_use;
+ uint32_t uta_shadow[IXGBE_MAX_UTA];
+};
+
+struct ixgbe_mirror_info {
+ struct rte_eth_vmdq_mirror_conf mr_conf[ETH_VMDQ_NUM_MIRROR_RULE];
+ /**< store PF mirror rules configuration*/
+};
+
+struct ixgbe_vf_info {
+ uint8_t vf_mac_addresses[ETHER_ADDR_LEN];
+ uint16_t vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
+ uint16_t num_vf_mc_hashes;
+ uint16_t default_vf_vlan_id;
+ uint16_t vlans_enabled;
+ bool clear_to_send;
+ uint16_t tx_rate[IXGBE_MAX_QUEUE_NUM_PER_VF];
+ uint16_t vlan_count;
+ uint8_t spoofchk_enabled;
+};
+
+/*
+ * Structure to store private data for each driver instance (for each port).
+ */
+struct ixgbe_adapter {
+ struct ixgbe_hw hw;
+ struct ixgbe_hw_stats stats;
+ struct ixgbe_hw_fdir_info fdir;
+ struct ixgbe_interrupt intr;
+ struct ixgbe_stat_mapping_registers stat_mappings;
+ struct ixgbe_vfta shadow_vfta;
+ struct ixgbe_hwstrip hwstrip;
+ struct ixgbe_dcb_config dcb_config;
+ struct ixgbe_mirror_info mr_data;
+ struct ixgbe_vf_info *vfdata;
+ struct ixgbe_uta_info uta_info;
+#ifdef RTE_NIC_BYPASS
+ struct ixgbe_bypass_info bps;
+#endif /* RTE_NIC_BYPASS */
+};
+
+/*
+ * Possible l4type of 5tuple filters.
+ */
+enum ixgbe_5tuple_protocol {
+ IXGBE_FILTER_PROTOCOL_TCP = 0,
+ IXGBE_FILTER_PROTOCOL_UDP,
+ IXGBE_FILTER_PROTOCOL_SCTP,
+ IXGBE_FILTER_PROTOCOL_NONE,
+};
+
+#define IXGBE_DEV_PRIVATE_TO_HW(adapter)\
+ (&((struct ixgbe_adapter *)adapter)->hw)
+
+#define IXGBE_DEV_PRIVATE_TO_STATS(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->stats)
+
+#define IXGBE_DEV_PRIVATE_TO_INTR(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->intr)
+
+#define IXGBE_DEV_PRIVATE_TO_FDIR_INFO(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->fdir)
+
+#define IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->stat_mappings)
+
+#define IXGBE_DEV_PRIVATE_TO_VFTA(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->shadow_vfta)
+
+#define IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->hwstrip)
+
+#define IXGBE_DEV_PRIVATE_TO_DCB_CFG(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->dcb_config)
+
+#define IXGBE_DEV_PRIVATE_TO_P_VFDATA(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->vfdata)
+
+#define IXGBE_DEV_PRIVATE_TO_PFDATA(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->mr_data)
+
+#define IXGBE_DEV_PRIVATE_TO_UTA(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->uta_info)
+
+/*
+ * RX/TX function prototypes
+ */
+void ixgbe_dev_clear_queues(struct rte_eth_dev *dev);
+
+void ixgbe_dev_rx_queue_release(void *rxq);
+
+void ixgbe_dev_tx_queue_release(void *txq);
+
+int ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
+
+int ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+
+uint32_t ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+
+int ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
+
+int ixgbe_dev_rx_init(struct rte_eth_dev *dev);
+
+void ixgbe_dev_tx_init(struct rte_eth_dev *dev);
+
+void ixgbe_dev_rxtx_start(struct rte_eth_dev *dev);
+
+int ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+
+int ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+
+int ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+
+int ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+
+int ixgbevf_dev_rx_init(struct rte_eth_dev *dev);
+
+void ixgbevf_dev_tx_init(struct rte_eth_dev *dev);
+
+void ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev);
+
+uint16_t ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+uint16_t ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+#endif
+
+uint16_t ixgbe_recv_scattered_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
+uint16_t ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+int ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+
+int ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+
+/*
+ * Flow director function prototypes
+ */
+int ixgbe_fdir_configure(struct rte_eth_dev *dev);
+
+int ixgbe_fdir_add_signature_filter(struct rte_eth_dev *dev,
+ struct rte_fdir_filter *fdir_filter, uint8_t queue);
+
+int ixgbe_fdir_update_signature_filter(struct rte_eth_dev *dev,
+ struct rte_fdir_filter *fdir_filter, uint8_t queue);
+
+int ixgbe_fdir_remove_signature_filter(struct rte_eth_dev *dev,
+ struct rte_fdir_filter *fdir_filter);
+
+void ixgbe_fdir_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_fdir *fdir);
+
+int ixgbe_fdir_add_perfect_filter(struct rte_eth_dev *dev,
+ struct rte_fdir_filter *fdir_filter, uint16_t soft_id,
+ uint8_t queue, uint8_t drop);
+
+int ixgbe_fdir_update_perfect_filter(struct rte_eth_dev *dev,
+ struct rte_fdir_filter *fdir_filter,uint16_t soft_id,
+ uint8_t queue, uint8_t drop);
+
+int ixgbe_fdir_remove_perfect_filter(struct rte_eth_dev *dev,
+ struct rte_fdir_filter *fdir_filter, uint16_t soft_id);
+
+int ixgbe_fdir_set_masks(struct rte_eth_dev *dev,
+ struct rte_fdir_masks *fdir_masks);
+
+void ixgbe_configure_dcb(struct rte_eth_dev *dev);
+
+/*
+ * misc function prototypes
+ */
+void ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev);
+
+void ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev);
+
+void ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev);
+
+void ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev);
+
+void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev);
+
+void ixgbe_pf_mbx_process(struct rte_eth_dev *eth_dev);
+
+int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev);
+
+uint32_t ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
+#endif /* _IXGBE_ETHDEV_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_fdir.c b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_fdir.c
new file mode 100755
index 00000000..571decc5
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_fdir.c
@@ -0,0 +1,922 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+
+#include "ixgbe_logs.h"
+#include "ixgbe/ixgbe_api.h"
+#include "ixgbe/ixgbe_common.h"
+#include "ixgbe_ethdev.h"
+
+/* To get PBALLOC (Packet Buffer Allocation) bits from FDIRCTRL value */
+#define FDIRCTRL_PBALLOC_MASK 0x03
+
+/* For calculating memory required for FDIR filters */
+#define PBALLOC_SIZE_SHIFT 15
+
+/* Number of bits used to mask bucket hash for different pballoc sizes */
+#define PERFECT_BUCKET_64KB_HASH_MASK 0x07FF /* 11 bits */
+#define PERFECT_BUCKET_128KB_HASH_MASK 0x0FFF /* 12 bits */
+#define PERFECT_BUCKET_256KB_HASH_MASK 0x1FFF /* 13 bits */
+#define SIG_BUCKET_64KB_HASH_MASK 0x1FFF /* 13 bits */
+#define SIG_BUCKET_128KB_HASH_MASK 0x3FFF /* 14 bits */
+#define SIG_BUCKET_256KB_HASH_MASK 0x7FFF /* 15 bits */
+
+/**
+ * This function is based on ixgbe_fdir_enable_82599() in ixgbe/ixgbe_82599.c.
+ * It adds extra configuration of fdirctrl that is common for all filter types.
+ *
+ * Initialize Flow Director control registers
+ * @hw: pointer to hardware structure
+ * @fdirctrl: value to write to flow director control register
+ **/
+static void fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
+{
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Prime the keys for hashing */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
+
+ /*
+ * Continue setup of fdirctrl register bits:
+ * Set the maximum length per hash bucket to 0xA filters
+ * Send interrupt when 64 filters are left
+ */
+ fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
+ (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
+
+ /*
+ * Poll init-done after we write the register. Estimated times:
+ * 10G: PBALLOC = 11b, timing is 60us
+ * 1G: PBALLOC = 11b, timing is 600us
+ * 100M: PBALLOC = 11b, timing is 6ms
+ *
+ * Multiple these timings by 4 if under full Rx load
+ *
+ * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
+ * 1 msec per poll time. If we're at line rate and drop to 100M, then
+ * this might not finish in our poll time, but we can live with that
+ * for now.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
+ IXGBE_WRITE_FLUSH(hw);
+ for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
+ if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
+ IXGBE_FDIRCTRL_INIT_DONE)
+ break;
+ msec_delay(1);
+ }
+
+ if (i >= IXGBE_FDIR_INIT_DONE_POLL)
+ PMD_INIT_LOG(WARNING, "Flow Director poll time exceeded!");
+}
+
+/*
+ * Set appropriate bits in fdirctrl for: variable reporting levels, moving
+ * flexbytes matching field, and drop queue (only for perfect matching mode).
+ */
+static int
+configure_fdir_flags(struct rte_fdir_conf *conf, uint32_t *fdirctrl)
+{
+ *fdirctrl = 0;
+
+ switch (conf->pballoc) {
+ case RTE_FDIR_PBALLOC_64K:
+ /* 8k - 1 signature filters */
+ *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
+ break;
+ case RTE_FDIR_PBALLOC_128K:
+ /* 16k - 1 signature filters */
+ *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
+ break;
+ case RTE_FDIR_PBALLOC_256K:
+ /* 32k - 1 signature filters */
+ *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
+ break;
+ default:
+ /* bad value */
+ PMD_INIT_LOG(ERR, "Invalid fdir_conf->pballoc value");
+ return -EINVAL;
+ };
+
+ /* status flags: write hash & swindex in the rx descriptor */
+ switch (conf->status) {
+ case RTE_FDIR_NO_REPORT_STATUS:
+ /* do nothing, default mode */
+ break;
+ case RTE_FDIR_REPORT_STATUS:
+ /* report status when the packet matches a fdir rule */
+ *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
+ break;
+ case RTE_FDIR_REPORT_STATUS_ALWAYS:
+ /* always report status */
+ *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS;
+ break;
+ default:
+ /* bad value */
+ PMD_INIT_LOG(ERR, "Invalid fdir_conf->status value");
+ return -EINVAL;
+ };
+
+ *fdirctrl |= (conf->flexbytes_offset << IXGBE_FDIRCTRL_FLEX_SHIFT);
+
+ if (conf->mode == RTE_FDIR_MODE_PERFECT) {
+ *fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
+ *fdirctrl |= (conf->drop_queue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
+ }
+
+ return 0;
+}
+
+int
+ixgbe_fdir_configure(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int err;
+ uint32_t fdirctrl, pbsize;
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->mac.type != ixgbe_mac_82599EB &&
+ hw->mac.type != ixgbe_mac_X540 &&
+ hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x)
+ return -ENOSYS;
+
+ err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl);
+ if (err)
+ return err;
+
+ /*
+ * Before enabling Flow Director, the Rx Packet Buffer size
+ * must be reduced. The new value is the current size minus
+ * flow director memory usage size.
+ */
+ pbsize = (1 << (PBALLOC_SIZE_SHIFT + (fdirctrl & FDIRCTRL_PBALLOC_MASK)));
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
+ (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
+
+ /*
+ * The defaults in the HW for RX PB 1-7 are not zero and so should be
+ * intialized to zero for non DCB mode otherwise actual total RX PB
+ * would be bigger than programmed and filter space would run into
+ * the PB 0 region.
+ */
+ for (i = 1; i < 8; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
+
+ fdir_enable_82599(hw, fdirctrl);
+ return 0;
+}
+
+/*
+ * The below function is taken from the FreeBSD IXGBE drivers release
+ * 2.3.8. The only change is not to mask hash_result with IXGBE_ATR_HASH_MASK
+ * before returning, as the signature hash can use 16bits.
+ *
+ * The newer driver has optimised functions for calculating bucket and
+ * signature hashes. However they don't support IPv6 type packets for signature
+ * filters so are not used here.
+ *
+ * Note that the bkt_hash field in the ixgbe_atr_input structure is also never
+ * set.
+ *
+ * Compute the hashes for SW ATR
+ * @stream: input bitstream to compute the hash on
+ * @key: 32-bit hash key
+ **/
+static u32
+ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
+ u32 key)
+{
+ /*
+ * The algorithm is as follows:
+ * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
+ * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
+ * and A[n] x B[n] is bitwise AND between same length strings
+ *
+ * K[n] is 16 bits, defined as:
+ * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
+ * for n modulo 32 < 15, K[n] =
+ * K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
+ *
+ * S[n] is 16 bits, defined as:
+ * for n >= 15, S[n] = S[n:n - 15]
+ * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
+ *
+ * To simplify for programming, the algorithm is implemented
+ * in software this way:
+ *
+ * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
+ *
+ * for (i = 0; i < 352; i+=32)
+ * hi_hash_dword[31:0] ^= Stream[(i+31):i];
+ *
+ * lo_hash_dword[15:0] ^= Stream[15:0];
+ * lo_hash_dword[15:0] ^= hi_hash_dword[31:16];
+ * lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
+ *
+ * hi_hash_dword[31:0] ^= Stream[351:320];
+ *
+ * if(key[0])
+ * hash[15:0] ^= Stream[15:0];
+ *
+ * for (i = 0; i < 16; i++) {
+ * if (key[i])
+ * hash[15:0] ^= lo_hash_dword[(i+15):i];
+ * if (key[i + 16])
+ * hash[15:0] ^= hi_hash_dword[(i+15):i];
+ * }
+ *
+ */
+ __be32 common_hash_dword = 0;
+ u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+ u32 hash_result = 0;
+ u8 i;
+
+ /* record the flow_vm_vlan bits as they are a key part to the hash */
+ flow_vm_vlan = IXGBE_NTOHL(atr_input->dword_stream[0]);
+
+ /* generate common hash dword */
+ for (i = 1; i <= 13; i++)
+ common_hash_dword ^= atr_input->dword_stream[i];
+
+ hi_hash_dword = IXGBE_NTOHL(common_hash_dword);
+
+ /* low dword is word swapped version of common */
+ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+ /* apply flow ID/VM pool/VLAN ID bits to hash words */
+ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+
+ /* Process bits 0 and 16 */
+ if (key & 0x0001) hash_result ^= lo_hash_dword;
+ if (key & 0x00010000) hash_result ^= hi_hash_dword;
+
+ /*
+ * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+ * delay this because bit 0 of the stream should not be processed
+ * so we do not add the vlan until after bit 0 was processed
+ */
+ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+
+ /* process the remaining 30 bits in the key 2 bits at a time */
+ for (i = 15; i; i-- ) {
+ if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
+ if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
+ }
+
+ return hash_result;
+}
+
+/*
+ * Calculate the hash value needed for signature-match filters. In the FreeBSD
+ * driver, this is done by the optimised function
+ * ixgbe_atr_compute_sig_hash_82599(). However that can't be used here as it
+ * doesn't support calculating a hash for an IPv6 filter.
+ */
+static uint32_t
+atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
+ enum rte_fdir_pballoc_type pballoc)
+{
+ uint32_t bucket_hash, sig_hash;
+
+ if (pballoc == RTE_FDIR_PBALLOC_256K)
+ bucket_hash = ixgbe_atr_compute_hash_82599(input,
+ IXGBE_ATR_BUCKET_HASH_KEY) &
+ SIG_BUCKET_256KB_HASH_MASK;
+ else if (pballoc == RTE_FDIR_PBALLOC_128K)
+ bucket_hash = ixgbe_atr_compute_hash_82599(input,
+ IXGBE_ATR_BUCKET_HASH_KEY) &
+ SIG_BUCKET_128KB_HASH_MASK;
+ else
+ bucket_hash = ixgbe_atr_compute_hash_82599(input,
+ IXGBE_ATR_BUCKET_HASH_KEY) &
+ SIG_BUCKET_64KB_HASH_MASK;
+
+ sig_hash = ixgbe_atr_compute_hash_82599(input,
+ IXGBE_ATR_SIGNATURE_HASH_KEY);
+
+ return (sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT) | bucket_hash;
+}
+
+/**
+ * This function is based on ixgbe_atr_add_signature_filter_82599() in
+ * ixgbe/ixgbe_82599.c, but uses a pre-calculated hash value. It also supports
+ * setting extra fields in the FDIRCMD register, and removes the code that was
+ * verifying the flow_type field. According to the documentation, a flow type of
+ * 00 (i.e. not TCP, UDP, or SCTP) is not supported, however it appears to
+ * work ok...
+ *
+ * Adds a signature hash filter
+ * @hw: pointer to hardware structure
+ * @input: unique input dword
+ * @queue: queue index to direct traffic to
+ * @fdircmd: any extra flags to set in fdircmd register
+ * @fdirhash: pre-calculated hash value for the filter
+ **/
+static void
+fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input, u8 queue, u32 fdircmd,
+ u32 fdirhash)
+{
+ u64 fdirhashcmd;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* configure FDIRCMD register */
+ fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+ fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+
+ /*
+ * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
+ * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
+ */
+ fdirhashcmd = (u64)fdircmd << 32;
+ fdirhashcmd |= fdirhash;
+ IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
+
+ PMD_INIT_LOG(DEBUG, "Tx Queue=%x hash=%x", queue, (u32)fdirhashcmd);
+}
+
+/*
+ * Convert DPDK rte_fdir_filter struct to ixgbe_atr_input union that is used
+ * by the IXGBE driver code.
+ */
+static int
+fdir_filter_to_atr_input(struct rte_fdir_filter *fdir_filter,
+ union ixgbe_atr_input *input)
+{
+ if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP ||
+ fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE) &&
+ (fdir_filter->port_src || fdir_filter->port_dst)) {
+ PMD_INIT_LOG(ERR, "Invalid fdir_filter");
+ return -EINVAL;
+ }
+
+ memset(input, 0, sizeof(*input));
+
+ input->formatted.vlan_id = fdir_filter->vlan_id;
+ input->formatted.src_port = fdir_filter->port_src;
+ input->formatted.dst_port = fdir_filter->port_dst;
+ input->formatted.flex_bytes = fdir_filter->flex_bytes;
+
+ switch (fdir_filter->l4type) {
+ case RTE_FDIR_L4TYPE_TCP:
+ input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
+ break;
+ case RTE_FDIR_L4TYPE_UDP:
+ input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
+ break;
+ case RTE_FDIR_L4TYPE_SCTP:
+ input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
+ break;
+ case RTE_FDIR_L4TYPE_NONE:
+ input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
+ break;
+ default:
+ PMD_INIT_LOG(ERR, " Error on l4type input");
+ return -EINVAL;
+ }
+
+ if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6) {
+ input->formatted.flow_type |= IXGBE_ATR_L4TYPE_IPV6_MASK;
+
+ input->formatted.src_ip[0] = fdir_filter->ip_src.ipv6_addr[0];
+ input->formatted.src_ip[1] = fdir_filter->ip_src.ipv6_addr[1];
+ input->formatted.src_ip[2] = fdir_filter->ip_src.ipv6_addr[2];
+ input->formatted.src_ip[3] = fdir_filter->ip_src.ipv6_addr[3];
+
+ input->formatted.dst_ip[0] = fdir_filter->ip_dst.ipv6_addr[0];
+ input->formatted.dst_ip[1] = fdir_filter->ip_dst.ipv6_addr[1];
+ input->formatted.dst_ip[2] = fdir_filter->ip_dst.ipv6_addr[2];
+ input->formatted.dst_ip[3] = fdir_filter->ip_dst.ipv6_addr[3];
+
+ } else {
+ input->formatted.src_ip[0] = fdir_filter->ip_src.ipv4_addr;
+ input->formatted.dst_ip[0] = fdir_filter->ip_dst.ipv4_addr;
+ }
+
+ return 0;
+}
+
+/*
+ * Adds or updates a signature filter.
+ *
+ * dev: ethernet device to add filter to
+ * fdir_filter: filter details
+ * queue: queue index to direct traffic to
+ * update: 0 to add a new filter, otherwise update existing.
+ */
+static int
+fdir_add_update_signature_filter(struct rte_eth_dev *dev,
+ struct rte_fdir_filter *fdir_filter, uint8_t queue, int update)
+{
+ struct ixgbe_hw *hw= IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0;
+ uint32_t fdirhash;
+ union ixgbe_atr_input input;
+ int err;
+
+ if (hw->mac.type != ixgbe_mac_82599EB &&
+ hw->mac.type != ixgbe_mac_X540 &&
+ hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x)
+ return -ENOSYS;
+
+ err = fdir_filter_to_atr_input(fdir_filter, &input);
+ if (err)
+ return err;
+
+ fdirhash = atr_compute_sig_hash_82599(&input,
+ dev->data->dev_conf.fdir_conf.pballoc);
+ fdir_add_signature_filter_82599(hw, &input, queue, fdircmd_flags,
+ fdirhash);
+ return 0;
+}
+
+int
+ixgbe_fdir_add_signature_filter(struct rte_eth_dev *dev,
+ struct rte_fdir_filter *fdir_filter, uint8_t queue)
+{
+ PMD_INIT_FUNC_TRACE();
+ return fdir_add_update_signature_filter(dev, fdir_filter, queue, 0);
+}
+
+int
+ixgbe_fdir_update_signature_filter(struct rte_eth_dev *dev,
+ struct rte_fdir_filter *fdir_filter, uint8_t queue)
+{
+ PMD_INIT_FUNC_TRACE();
+ return fdir_add_update_signature_filter(dev, fdir_filter, queue, 1);
+}
+
+/*
+ * This is based on ixgbe_fdir_erase_perfect_filter_82599() in
+ * ixgbe/ixgbe_82599.c. It is modified to take in the hash as a parameter so
+ * that it can be used for removing signature and perfect filters.
+ */
+static s32
+fdir_erase_filter_82599(struct ixgbe_hw *hw,
+ __rte_unused union ixgbe_atr_input *input, uint32_t fdirhash)
+{
+ u32 fdircmd = 0;
+ u32 retry_count;
+ s32 err = 0;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+
+ /* flush hash to HW */
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Query if filter is present */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
+
+ for (retry_count = 10; retry_count; retry_count--) {
+ /* allow 10us for query to process */
+ usec_delay(10);
+ /* verify query completed successfully */
+ fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
+ if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
+ break;
+ }
+
+ if (!retry_count) {
+ PMD_INIT_LOG(ERR, "Timeout querying for flow director filter");
+ err = -EIO;
+ }
+
+ /* if filter exists in hardware then remove it */
+ if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+ IXGBE_WRITE_FLUSH(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+ IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
+ }
+
+ return err;
+}
+
+int
+ixgbe_fdir_remove_signature_filter(struct rte_eth_dev *dev,
+ struct rte_fdir_filter *fdir_filter)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ union ixgbe_atr_input input;
+ int err;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->mac.type != ixgbe_mac_82599EB &&
+ hw->mac.type != ixgbe_mac_X540 &&
+ hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x)
+ return -ENOSYS;
+
+ err = fdir_filter_to_atr_input(fdir_filter, &input);
+ if (err)
+ return err;
+
+ return fdir_erase_filter_82599(hw, &input,
+ atr_compute_sig_hash_82599(&input,
+ dev->data->dev_conf.fdir_conf.pballoc));
+}
+
+/**
+ * Reverse the bits in FDIR registers that store 2 x 16 bit masks.
+ *
+ * @hi_dword: Bits 31:16 mask to be bit swapped.
+ * @lo_dword: Bits 15:0 mask to be bit swapped.
+ *
+ * Flow director uses several registers to store 2 x 16 bit masks with the
+ * bits reversed such as FDIRTCPM, FDIRUDPM and FDIRIP6M. The LS bit of the
+ * mask affects the MS bit/byte of the target. This function reverses the
+ * bits in these masks.
+ * **/
+static uint32_t
+reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword)
+{
+ u32 mask = hi_dword << 16;
+ mask |= lo_dword;
+ mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
+ mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
+ mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
+ return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
+}
+
+/*
+ * This macro exists in ixgbe/ixgbe_82599.c, however in that file it reverses
+ * the bytes, and then reverses them again. So here it does nothing.
+ */
+#define IXGBE_WRITE_REG_BE32 IXGBE_WRITE_REG
+
+/*
+ * This is based on ixgbe_fdir_set_input_mask_82599() in ixgbe/ixgbe_82599.c,
+ * but makes use of the rte_fdir_masks structure to see which bits to set.
+ */
+static int
+fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+ struct rte_fdir_masks *input_mask)
+{
+ /* mask VM pool since it is currently not supported */
+ u32 fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
+ u32 fdirtcpm; /* TCP source and destination port masks. */
+ //u32 fdiripv6m; /* IPv6 source and destination masks. */
+
+ PMD_INIT_FUNC_TRACE();
+
+ /*
+ * Program the relevant mask registers. If src/dst_port or src/dst_addr
+ * are zero, then assume a full mask for that field. Also assume that
+ * a VLAN of 0 is unspecified, so mask that out as well. L4type
+ * cannot be masked out in this implementation.
+ */
+ if (input_mask->only_ip_flow) {
+ /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
+ fdirm |= IXGBE_FDIRM_L4P;
+ if (input_mask->dst_port_mask || input_mask->src_port_mask) {
+ PMD_INIT_LOG(ERR, " Error on src/dst port mask");
+ return -EINVAL;
+ }
+ }
+
+ //if (!input_mask->comp_ipv6_dst)
+ /* mask DIPV6 */
+ // fdirm |= IXGBE_FDIRM_DIPv6;
+
+ if (!input_mask->vlan_id)
+ /* mask VLAN ID*/
+ fdirm |= IXGBE_FDIRM_VLANID;
+
+ if (!input_mask->vlan_prio)
+ /* mask VLAN priority */
+ fdirm |= IXGBE_FDIRM_VLANP;
+
+ if (!input_mask->flexbytes)
+ /* Mask Flex Bytes */
+ fdirm |= IXGBE_FDIRM_FLEX;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
+
+ /* store the TCP/UDP port masks, bit reversed from port layout */
+ fdirtcpm = reverse_fdir_bitmasks(input_mask->dst_port_mask,
+ input_mask->src_port_mask);
+
+ /* write both the same so that UDP and TCP use the same mask */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
+
+ // if (!input_mask->set_ipv6_mask) {
+ /* Store source and destination IPv4 masks (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
+ IXGBE_NTOHL(~input_mask->src_ipv4_mask));
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
+ IXGBE_NTOHL(~input_mask->dst_ipv4_mask));
+ //}
+ //else {
+ /* Store source and destination IPv6 masks (bit reversed) */
+ // fdiripv6m = reverse_fdir_bitmasks(input_mask->dst_ipv6_mask,
+ // input_mask->src_ipv6_mask);
+
+ // IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, ~fdiripv6m);
+ //}
+ /* store IPv6 mask */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, 0xffffffff);
+
+
+ return IXGBE_SUCCESS;
+}
+
+int
+ixgbe_fdir_set_masks(struct rte_eth_dev *dev, struct rte_fdir_masks *fdir_masks)
+{
+ struct ixgbe_hw *hw;
+ int err;
+
+ PMD_INIT_FUNC_TRACE();
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->mac.type != ixgbe_mac_82599EB &&
+ hw->mac.type != ixgbe_mac_X540 &&
+ hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x)
+ return -ENOSYS;
+
+ err = ixgbe_reinit_fdir_tables_82599(hw);
+ if (err) {
+ PMD_INIT_LOG(ERR, "reinit of fdir tables failed");
+ return -EIO;
+ }
+
+ return fdir_set_input_mask_82599(hw, fdir_masks);
+}
+
+static uint32_t
+atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+ enum rte_fdir_pballoc_type pballoc)
+{
+ if (pballoc == RTE_FDIR_PBALLOC_256K)
+ return ixgbe_atr_compute_hash_82599(input,
+ IXGBE_ATR_BUCKET_HASH_KEY) &
+ PERFECT_BUCKET_256KB_HASH_MASK;
+ else if (pballoc == RTE_FDIR_PBALLOC_128K)
+ return ixgbe_atr_compute_hash_82599(input,
+ IXGBE_ATR_BUCKET_HASH_KEY) &
+ PERFECT_BUCKET_128KB_HASH_MASK;
+ else
+ return ixgbe_atr_compute_hash_82599(input,
+ IXGBE_ATR_BUCKET_HASH_KEY) &
+ PERFECT_BUCKET_64KB_HASH_MASK;
+}
+
+/*
+ * This is based on ixgbe_fdir_write_perfect_filter_82599() in
+ * ixgbe/ixgbe_82599.c, with the ability to set extra flags in FDIRCMD register
+ * added, and IPv6 support also added. The hash value is also pre-calculated
+ * as the pballoc value is needed to do it.
+ */
+static void
+fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input,
+ uint16_t soft_id, uint8_t queue, uint32_t fdircmd,
+ uint32_t fdirhash)
+{
+ u32 fdirport, fdirvlan;
+
+ /* record the source address (big-endian) */
+ if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), input->formatted.src_ip[0]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), input->formatted.src_ip[1]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.src_ip[2]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[3]);
+ }
+ else {
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
+ }
+
+ /* record the first 32 bits of the destination address (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
+
+ /* record source and destination port (little-endian)*/
+ fdirport = IXGBE_NTOHS(input->formatted.dst_port);
+ fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
+ fdirport |= IXGBE_NTOHS(input->formatted.src_port);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
+
+ /* record vlan (little-endian) and flex_bytes(big-endian) */
+ fdirvlan = input->formatted.flex_bytes;
+ fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
+ fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
+
+ /* configure FDIRHASH register */
+ fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+
+ /*
+ * flush all previous writes to make certain registers are
+ * programmed prior to issuing the command
+ */
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* configure FDIRCMD register */
+ fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+ fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+ fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
+}
+
+/*
+ * Adds or updates a perfect filter.
+ *
+ * dev: ethernet device to add filter to
+ * fdir_filter: filter details
+ * soft_id: software index for the filters
+ * queue: queue index to direct traffic to
+ * drop: non-zero if packets should be sent to the drop queue
+ * update: 0 to add a new filter, otherwise update existing.
+ */
+static int
+fdir_add_update_perfect_filter(struct rte_eth_dev *dev,
+ struct rte_fdir_filter *fdir_filter, uint16_t soft_id,
+ uint8_t queue, int drop, int update)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0;
+ uint32_t fdirhash;
+ union ixgbe_atr_input input;
+ int err;
+
+ if (hw->mac.type != ixgbe_mac_82599EB &&
+ hw->mac.type != ixgbe_mac_X540 &&
+ hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x)
+ return -ENOSYS;
+
+ err = fdir_filter_to_atr_input(fdir_filter, &input);
+ if (err)
+ return err;
+
+ if (drop) {
+ queue = dev->data->dev_conf.fdir_conf.drop_queue;
+ fdircmd_flags |= IXGBE_FDIRCMD_DROP;
+ }
+
+ fdirhash = atr_compute_perfect_hash_82599(&input,
+ dev->data->dev_conf.fdir_conf.pballoc);
+
+ fdir_write_perfect_filter_82599(hw, &input, soft_id, queue,
+ fdircmd_flags, fdirhash);
+ return 0;
+}
+
+int
+ixgbe_fdir_add_perfect_filter(struct rte_eth_dev *dev,
+ struct rte_fdir_filter *fdir_filter, uint16_t soft_id,
+ uint8_t queue, uint8_t drop)
+{
+ PMD_INIT_FUNC_TRACE();
+ return fdir_add_update_perfect_filter(dev, fdir_filter, soft_id, queue,
+ drop, 0);
+}
+
+int
+ixgbe_fdir_update_perfect_filter(struct rte_eth_dev *dev,
+ struct rte_fdir_filter *fdir_filter, uint16_t soft_id,
+ uint8_t queue, uint8_t drop)
+{
+ PMD_INIT_FUNC_TRACE();
+ return fdir_add_update_perfect_filter(dev, fdir_filter, soft_id, queue,
+ drop, 1);
+}
+
+int
+ixgbe_fdir_remove_perfect_filter(struct rte_eth_dev *dev,
+ struct rte_fdir_filter *fdir_filter,
+ uint16_t soft_id)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ union ixgbe_atr_input input;
+ uint32_t fdirhash;
+ int err;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->mac.type != ixgbe_mac_82599EB &&
+ hw->mac.type != ixgbe_mac_X540 &&
+ hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x)
+ return -ENOSYS;
+
+ err = fdir_filter_to_atr_input(fdir_filter, &input);
+ if (err)
+ return err;
+
+ /* configure FDIRHASH register */
+ fdirhash = atr_compute_perfect_hash_82599(&input,
+ dev->data->dev_conf.fdir_conf.pballoc);
+ fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
+
+ return fdir_erase_filter_82599(hw, &input, fdirhash);
+}
+
+void
+ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir *fdir)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_hw_fdir_info *info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ uint32_t reg;
+
+ if (hw->mac.type != ixgbe_mac_82599EB &&
+ hw->mac.type != ixgbe_mac_X540 &&
+ hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x)
+ return;
+
+ /* Get the information from registers */
+ reg = IXGBE_READ_REG(hw, IXGBE_FDIRFREE);
+ info->collision = (uint16_t)((reg & IXGBE_FDIRFREE_COLL_MASK) >>
+ IXGBE_FDIRFREE_COLL_SHIFT);
+ info->free = (uint16_t)((reg & IXGBE_FDIRFREE_FREE_MASK) >>
+ IXGBE_FDIRFREE_FREE_SHIFT);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
+ info->maxhash = (uint16_t)((reg & IXGBE_FDIRLEN_MAXHASH_MASK) >>
+ IXGBE_FDIRLEN_MAXHASH_SHIFT);
+ info->maxlen = (uint8_t)((reg & IXGBE_FDIRLEN_MAXLEN_MASK) >>
+ IXGBE_FDIRLEN_MAXLEN_SHIFT);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
+ info->remove += (reg & IXGBE_FDIRUSTAT_REMOVE_MASK) >>
+ IXGBE_FDIRUSTAT_REMOVE_SHIFT;
+ info->add += (reg & IXGBE_FDIRUSTAT_ADD_MASK) >>
+ IXGBE_FDIRUSTAT_ADD_SHIFT;
+
+ reg = IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT) & 0xFFFF;
+ info->f_remove += (reg & IXGBE_FDIRFSTAT_FREMOVE_MASK) >>
+ IXGBE_FDIRFSTAT_FREMOVE_SHIFT;
+ info->f_add += (reg & IXGBE_FDIRFSTAT_FADD_MASK) >>
+ IXGBE_FDIRFSTAT_FADD_SHIFT;
+
+ /* Copy the new information in the fdir parameter */
+ fdir->collision = info->collision;
+ fdir->free = info->free;
+ fdir->maxhash = info->maxhash;
+ fdir->maxlen = info->maxlen;
+ fdir->remove = info->remove;
+ fdir->add = info->add;
+ fdir->f_remove = info->f_remove;
+ fdir->f_add = info->f_add;
+}
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_logs.h b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_logs.h
new file mode 100755
index 00000000..4f224ecf
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_logs.h
@@ -0,0 +1,78 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _IXGBE_LOGS_H_
+#define _IXGBE_LOGS_H_
+
+
+#define PMD_INIT_LOG(level, fmt, args...) RTE_LOG(level, PMD," " fmt "\n", ##args)
+
+
+#ifdef RTE_LIBRTE_IXGBE_DEBUG_INIT
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+#else
+#define PMD_INIT_FUNC_TRACE() do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_IXGBE_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX_FREE
+#define PMD_TX_FREE_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_IXGBE_DEBUG_DRIVER
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
+#else
+#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
+#endif
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+#endif /* _IXGBE_LOGS_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_pf.c b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_pf.c
new file mode 100755
index 00000000..51da1fd1
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_pf.c
@@ -0,0 +1,566 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_memcpy.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+
+#include "ixgbe/ixgbe_common.h"
+#include "ixgbe_ethdev.h"
+
+#define IXGBE_MAX_VFTA (128)
+
+static inline uint16_t
+dev_num_vf(struct rte_eth_dev *eth_dev)
+{
+ return eth_dev->pci_dev->max_vfs;
+}
+
+static inline
+int ixgbe_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num)
+{
+ unsigned char vf_mac_addr[ETHER_ADDR_LEN];
+ struct ixgbe_vf_info *vfinfo =
+ *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+ uint16_t vfn;
+
+ for (vfn = 0; vfn < vf_num; vfn++) {
+ eth_random_addr(vf_mac_addr);
+ /* keep the random address as default */
+ memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr,
+ ETHER_ADDR_LEN);
+ }
+
+ return 0;
+}
+
+static inline int
+ixgbe_mb_intr_setup(struct rte_eth_dev *dev)
+{
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ intr->mask |= IXGBE_EICR_MAILBOX;
+
+ return 0;
+}
+
+void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_vf_info **vfinfo =
+ IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
+ struct ixgbe_mirror_info *mirror_info =
+ IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev->data->dev_private);
+ struct ixgbe_uta_info *uta_info =
+ IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private);
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ uint16_t vf_num;
+ uint8_t nb_queue;
+
+ PMD_INIT_FUNC_TRACE();
+
+ RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
+ if (0 == (vf_num = dev_num_vf(eth_dev)))
+ return;
+
+ *vfinfo = rte_zmalloc("vf_info", sizeof(struct ixgbe_vf_info) * vf_num, 0);
+ if (*vfinfo == NULL)
+ rte_panic("Cannot allocate memory for private VF data\n");
+
+ memset(mirror_info,0,sizeof(struct ixgbe_mirror_info));
+ memset(uta_info,0,sizeof(struct ixgbe_uta_info));
+ hw->mac.mc_filter_type = 0;
+
+ if (vf_num >= ETH_32_POOLS) {
+ nb_queue = 2;
+ RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS;
+ } else if (vf_num >= ETH_16_POOLS) {
+ nb_queue = 4;
+ RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS;
+ } else {
+ nb_queue = 8;
+ RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
+ }
+
+ RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
+ RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
+ RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);
+
+ ixgbe_vf_perm_addr_gen(eth_dev, vf_num);
+
+ /* init_mailbox_params */
+ hw->mbx.ops.init_params(hw);
+
+ /* set mb interrupt mask */
+ ixgbe_mb_intr_setup(eth_dev);
+
+ return;
+}
+
+int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
+{
+ uint32_t vtctl, fcrth;
+ uint32_t vfre_slot, vfre_offset;
+ uint16_t vf_num;
+ const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */
+ const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ uint32_t gpie, gcr_ext;
+ uint32_t vlanctrl;
+ int i;
+
+ if (0 == (vf_num = dev_num_vf(eth_dev)))
+ return -1;
+
+ /* enable VMDq and set the default pool for PF */
+ vtctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ vtctl |= IXGBE_VMD_CTL_VMDQ_EN;
+ vtctl &= ~IXGBE_VT_CTL_POOL_MASK;
+ vtctl |= RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx
+ << IXGBE_VT_CTL_POOL_SHIFT;
+ vtctl |= IXGBE_VT_CTL_REPLEN;
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
+
+ vfre_offset = vf_num & VFRE_MASK;
+ vfre_slot = (vf_num >> VFRE_SHIFT) > 0 ? 1 : 0;
+
+ /* Enable pools reserved to PF only */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot), (~0) << vfre_offset);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot ^ 1), vfre_slot - 1);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(vfre_slot), (~0) << vfre_offset);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(vfre_slot ^ 1), vfre_slot - 1);
+
+ /* PFDMA Tx General Switch Control Enables VMDQ loopback */
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+
+ /* clear VMDq map to perment rar 0 */
+ hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
+
+ /* clear VMDq map to scan rar 127 */
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(hw->mac.num_rar_entries), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(hw->mac.num_rar_entries), 0);
+
+ /* set VMDq map to default PF pool */
+ hw->mac.ops.set_vmdq(hw, 0, RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx);
+
+ /*
+ * SW msut set GCR_EXT.VT_Mode the same as GPIE.VT_Mode
+ */
+ gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
+
+ gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+ gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+ gpie |= IXGBE_GPIE_MSIX_MODE;
+
+ switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
+ case ETH_64_POOLS:
+ gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
+ gpie |= IXGBE_GPIE_VTMODE_64;
+ break;
+ case ETH_32_POOLS:
+ gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
+ gpie |= IXGBE_GPIE_VTMODE_32;
+ break;
+ case ETH_16_POOLS:
+ gcr_ext |= IXGBE_GCR_EXT_VT_MODE_16;
+ gpie |= IXGBE_GPIE_VTMODE_16;
+ break;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
+ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+ /*
+ * enable vlan filtering and allow all vlan tags through
+ */
+ vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
+
+ /* VFTA - enable all vlan filters */
+ for (i = 0; i < IXGBE_MAX_VFTA; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
+ }
+
+ /* Enable MAC Anti-Spoofing */
+ hw->mac.ops.set_mac_anti_spoofing(hw, FALSE, vf_num);
+
+ /* set flow control threshold to max to avoid tx switch hang */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
+ fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
+ }
+
+ return 0;
+}
+
+static void
+set_rx_mode(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *dev_data =
+ (struct rte_eth_dev_data*)dev->data->dev_private;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
+ uint16_t vfn = dev_num_vf(dev);
+
+ /* Check for Promiscuous and All Multicast modes */
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+
+ /* set all bits that we expect to always be set */
+ fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
+ fctrl |= IXGBE_FCTRL_BAM;
+
+ /* clear the bits we are changing the status of */
+ fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+
+ if (dev_data->promiscuous) {
+ fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+ vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
+ } else {
+ if (dev_data->all_multicast) {
+ fctrl |= IXGBE_FCTRL_MPE;
+ vmolr |= IXGBE_VMOLR_MPE;
+ } else {
+ vmolr |= IXGBE_VMOLR_ROMPE;
+ }
+ }
+
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(vfn)) &
+ ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
+ IXGBE_VMOLR_ROPE);
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vfn), vmolr);
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+
+ if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ ixgbe_vlan_hw_strip_enable_all(dev);
+ else
+ ixgbe_vlan_hw_strip_disable_all(dev);
+}
+
+static inline void
+ixgbe_vf_reset_event(struct rte_eth_dev *dev, uint16_t vf)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vf_info *vfinfo =
+ *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ int rar_entry = hw->mac.num_rar_entries - (vf + 1);
+ uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
+
+ vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_ROMPE |
+ IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
+
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
+
+ /* reset multicast table array for vf */
+ vfinfo[vf].num_vf_mc_hashes = 0;
+
+ /* reset rx mode */
+ set_rx_mode(dev);
+
+ hw->mac.ops.clear_rar(hw, rar_entry);
+}
+
+static inline void
+ixgbe_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+ uint32_t reg_offset, vf_shift;
+ const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */
+ const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
+
+ vf_shift = vf & VFRE_MASK;
+ reg_offset = (vf >> VFRE_SHIFT) > 0 ? 1 : 0;
+
+ /* enable transmit and receive for vf */
+ reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
+ reg |= (reg | (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
+ reg |= (reg | (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
+
+ /* Enable counting of spoofed packets in the SSVPC register */
+ reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
+ reg |= (1 << vf_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
+
+ ixgbe_vf_reset_event(dev, vf);
+}
+
+static int
+ixgbe_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vf_info *vfinfo =
+ *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ unsigned char *vf_mac = vfinfo[vf].vf_mac_addresses;
+ int rar_entry = hw->mac.num_rar_entries - (vf + 1);
+ uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
+
+ ixgbe_vf_reset_msg(dev, vf);
+
+ hw->mac.ops.set_rar(hw, rar_entry, vf_mac, vf, IXGBE_RAH_AV);
+
+ /* reply to reset with ack and vf mac address */
+ msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
+ rte_memcpy(new_mac, vf_mac, ETHER_ADDR_LEN);
+ /*
+ * Piggyback the multicast filter type so VF can compute the
+ * correct vectors
+ */
+ msgbuf[3] = hw->mac.mc_filter_type;
+ ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
+
+ return 0;
+}
+
+static int
+ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vf_info *vfinfo =
+ *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ int rar_entry = hw->mac.num_rar_entries - (vf + 1);
+ uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
+
+ if (is_valid_assigned_ether_addr((struct ether_addr*)new_mac)) {
+ rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6);
+ return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf, IXGBE_RAH_AV);
+ }
+ return -1;
+}
+
+static int
+ixgbe_vf_set_multicast(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *msgbuf)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vf_info *vfinfo =
+ *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ int nb_entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
+ IXGBE_VT_MSGINFO_SHIFT;
+ uint16_t *hash_list = (uint16_t *)&msgbuf[1];
+ uint32_t mta_idx;
+ uint32_t mta_shift;
+ const uint32_t IXGBE_MTA_INDEX_MASK = 0x7F;
+ const uint32_t IXGBE_MTA_BIT_SHIFT = 5;
+ const uint32_t IXGBE_MTA_BIT_MASK = (0x1 << IXGBE_MTA_BIT_SHIFT) - 1;
+ uint32_t reg_val;
+ int i;
+
+ /* only so many hash values supported */
+ nb_entries = RTE_MIN(nb_entries, IXGBE_MAX_VF_MC_ENTRIES);
+
+ /* store the mc entries */
+ vfinfo->num_vf_mc_hashes = (uint16_t)nb_entries;
+ for (i = 0; i < nb_entries; i++) {
+ vfinfo->vf_mc_hashes[i] = hash_list[i];
+ }
+
+ for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
+ mta_idx = (vfinfo->vf_mc_hashes[i] >> IXGBE_MTA_BIT_SHIFT)
+ & IXGBE_MTA_INDEX_MASK;
+ mta_shift = vfinfo->vf_mc_hashes[i] & IXGBE_MTA_BIT_MASK;
+ reg_val = IXGBE_READ_REG(hw, IXGBE_MTA(mta_idx));
+ reg_val |= (1 << mta_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_MTA(mta_idx), reg_val);
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_vf_set_vlan(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
+{
+ int add, vid;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vf_info *vfinfo =
+ *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+
+ add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
+ >> IXGBE_VT_MSGINFO_SHIFT;
+ vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
+
+ if (add)
+ vfinfo[vf].vlan_count++;
+ else if (vfinfo[vf].vlan_count)
+ vfinfo[vf].vlan_count--;
+ return hw->mac.ops.set_vfta(hw, vid, vf, (bool)add);
+}
+
+static int
+ixgbe_set_vf_lpe(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *msgbuf)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t new_mtu = msgbuf[1];
+ uint32_t max_frs;
+ int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+ /* X540 and X550 support jumbo frames in IOV mode */
+ if (hw->mac.type != ixgbe_mac_X540 &&
+ hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x)
+ return -1;
+
+ if ((max_frame < ETHER_MIN_LEN) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
+ return -1;
+
+ max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
+ IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
+ if (max_frs < new_mtu) {
+ max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
+{
+ uint16_t mbx_size = IXGBE_VFMAILBOX_SIZE;
+ uint32_t msgbuf[IXGBE_VFMAILBOX_SIZE];
+ int32_t retval;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vf_info *vfinfo =
+ *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+
+ retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
+ if (retval) {
+ PMD_DRV_LOG(ERR, "Error mbx recv msg from VF %d", vf);
+ return retval;
+ }
+
+ /* do nothing with the message already been processed */
+ if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
+ return retval;
+
+ /* flush the ack before we write any messages back */
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* perform VF reset */
+ if (msgbuf[0] == IXGBE_VF_RESET) {
+ int ret = ixgbe_vf_reset(dev, vf, msgbuf);
+ vfinfo[vf].clear_to_send = true;
+ return ret;
+ }
+
+ /* check & process VF to PF mailbox message */
+ switch ((msgbuf[0] & 0xFFFF)) {
+ case IXGBE_VF_SET_MAC_ADDR:
+ retval = ixgbe_vf_set_mac_addr(dev, vf, msgbuf);
+ break;
+ case IXGBE_VF_SET_MULTICAST:
+ retval = ixgbe_vf_set_multicast(dev, vf, msgbuf);
+ break;
+ case IXGBE_VF_SET_LPE:
+ retval = ixgbe_set_vf_lpe(dev, vf, msgbuf);
+ break;
+ case IXGBE_VF_SET_VLAN:
+ retval = ixgbe_vf_set_vlan(dev, vf, msgbuf);
+ break;
+ default:
+ PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x", (unsigned)msgbuf[0]);
+ retval = IXGBE_ERR_MBX;
+ break;
+ }
+
+ /* response the VF according to the message process result */
+ if (retval)
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
+ else
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
+
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
+
+ ixgbe_write_mbx(hw, msgbuf, 1, vf);
+
+ return retval;
+}
+
+static inline void
+ixgbe_rcv_ack_from_vf(struct rte_eth_dev *dev, uint16_t vf)
+{
+ uint32_t msg = IXGBE_VT_MSGTYPE_NACK;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vf_info *vfinfo =
+ *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+
+ if (!vfinfo[vf].clear_to_send)
+ ixgbe_write_mbx(hw, &msg, 1, vf);
+}
+
+void ixgbe_pf_mbx_process(struct rte_eth_dev *eth_dev)
+{
+ uint16_t vf;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+
+ for (vf = 0; vf < dev_num_vf(eth_dev); vf++) {
+ /* check & process vf function level reset */
+ if (!ixgbe_check_for_rst(hw, vf))
+ ixgbe_vf_reset_event(eth_dev, vf);
+
+ /* check & process vf mailbox messages */
+ if (!ixgbe_check_for_msg(hw, vf))
+ ixgbe_rcv_msg_from_vf(eth_dev, vf);
+
+ /* check & process acks from vf */
+ if (!ixgbe_check_for_ack(hw, vf))
+ ixgbe_rcv_ack_from_vf(eth_dev, vf);
+ }
+}
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_rxtx.c b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_rxtx.c
new file mode 100755
index 00000000..e10d6a21
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_rxtx.c
@@ -0,0 +1,4228 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright 2014 6WIND S.A.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_prefetch.h>
+#include <rte_udp.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_string_fns.h>
+#include <rte_errno.h>
+
+#include "ixgbe_logs.h"
+#include "ixgbe/ixgbe_api.h"
+#include "ixgbe/ixgbe_vf.h"
+#include "ixgbe_ethdev.h"
+#include "ixgbe/ixgbe_dcb.h"
+#include "ixgbe/ixgbe_common.h"
+#include "ixgbe_rxtx.h"
+
+#define IXGBE_RSS_OFFLOAD_ALL ( \
+ ETH_RSS_IPV4 | \
+ ETH_RSS_IPV4_TCP | \
+ ETH_RSS_IPV6 | \
+ ETH_RSS_IPV6_EX | \
+ ETH_RSS_IPV6_TCP | \
+ ETH_RSS_IPV6_TCP_EX | \
+ ETH_RSS_IPV4_UDP | \
+ ETH_RSS_IPV6_UDP | \
+ ETH_RSS_IPV6_UDP_EX)
+
+/* Bit Mask to indicate what bits required for building TX context */
+#define IXGBE_TX_OFFLOAD_MASK ( \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_TCP_SEG)
+
+static inline struct rte_mbuf *
+rte_rxmbuf_alloc(struct rte_mempool *mp)
+{
+ struct rte_mbuf *m;
+
+ m = __rte_mbuf_raw_alloc(mp);
+ __rte_mbuf_sanity_check_raw(m, 0);
+ return (m);
+}
+
+
+#if 1
+#define RTE_PMD_USE_PREFETCH
+#endif
+
+#ifdef RTE_PMD_USE_PREFETCH
+/*
+ * Prefetch a cache line into all cache levels.
+ */
+#define rte_ixgbe_prefetch(p) rte_prefetch0(p)
+#else
+#define rte_ixgbe_prefetch(p) do {} while(0)
+#endif
+
+/*********************************************************************
+ *
+ * TX functions
+ *
+ **********************************************************************/
+
+/*
+ * Check for descriptors with their DD bit set and free mbufs.
+ * Return the total number of buffers freed.
+ */
+static inline int __attribute__((always_inline))
+ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
+{
+ struct igb_tx_entry *txep;
+ uint32_t status;
+ int i;
+
+ /* check DD bit on threshold descriptor */
+ status = txq->tx_ring[txq->tx_next_dd].wb.status;
+ if (! (status & IXGBE_ADVTXD_STAT_DD))
+ return 0;
+
+ /*
+ * first buffer to free from S/W ring is at index
+ * tx_next_dd - (tx_rs_thresh-1)
+ */
+ txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
+
+ /* free buffers one at a time */
+ if ((txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT) != 0) {
+ for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
+ txep->mbuf->next = NULL;
+ rte_mempool_put(txep->mbuf->pool, txep->mbuf);
+ txep->mbuf = NULL;
+ }
+ } else {
+ for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
+ rte_pktmbuf_free_seg(txep->mbuf);
+ txep->mbuf = NULL;
+ }
+ }
+
+ /* buffers were freed, update counters */
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+ txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+ if (txq->tx_next_dd >= txq->nb_tx_desc)
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ return txq->tx_rs_thresh;
+}
+
+/* Populate 4 descriptors with data from 4 mbufs */
+static inline void
+tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
+{
+ uint64_t buf_dma_addr;
+ uint32_t pkt_len;
+ int i;
+
+ for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
+ buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
+ pkt_len = (*pkts)->data_len;
+
+ /* write data to descriptor */
+ txdp->read.buffer_addr = buf_dma_addr;
+ txdp->read.cmd_type_len =
+ ((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+ txdp->read.olinfo_status =
+ (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+ rte_prefetch0(&(*pkts)->pool);
+ }
+}
+
+/* Populate 1 descriptor with data from 1 mbuf */
+static inline void
+tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
+{
+ uint64_t buf_dma_addr;
+ uint32_t pkt_len;
+
+ buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
+ pkt_len = (*pkts)->data_len;
+
+ /* write data to descriptor */
+ txdp->read.buffer_addr = buf_dma_addr;
+ txdp->read.cmd_type_len =
+ ((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+ txdp->read.olinfo_status =
+ (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+ rte_prefetch0(&(*pkts)->pool);
+}
+
+/*
+ * Fill H/W descriptor ring with mbuf data.
+ * Copy mbuf pointers to the S/W ring.
+ */
+static inline void
+ixgbe_tx_fill_hw_ring(struct igb_tx_queue *txq, struct rte_mbuf **pkts,
+ uint16_t nb_pkts)
+{
+ volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
+ struct igb_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
+ const int N_PER_LOOP = 4;
+ const int N_PER_LOOP_MASK = N_PER_LOOP-1;
+ int mainpart, leftover;
+ int i, j;
+
+ /*
+ * Process most of the packets in chunks of N pkts. Any
+ * leftover packets will get processed one at a time.
+ */
+ mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
+ leftover = (nb_pkts & ((uint32_t) N_PER_LOOP_MASK));
+ for (i = 0; i < mainpart; i += N_PER_LOOP) {
+ /* Copy N mbuf pointers to the S/W ring */
+ for (j = 0; j < N_PER_LOOP; ++j) {
+ (txep + i + j)->mbuf = *(pkts + i + j);
+ }
+ tx4(txdp + i, pkts + i);
+ }
+
+ if (unlikely(leftover > 0)) {
+ for (i = 0; i < leftover; ++i) {
+ (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
+ tx1(txdp + mainpart + i, pkts + mainpart + i);
+ }
+ }
+}
+
+static inline uint16_t
+tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct igb_tx_queue *txq = (struct igb_tx_queue *)tx_queue;
+ volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
+ uint16_t n = 0;
+
+ /*
+ * Begin scanning the H/W ring for done descriptors when the
+ * number of available descriptors drops below tx_free_thresh. For
+ * each done descriptor, free the associated buffer.
+ */
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ ixgbe_tx_free_bufs(txq);
+
+ /* Only use descriptors that are available */
+ nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ /* Use exactly nb_pkts descriptors */
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+ /*
+ * At this point, we know there are enough descriptors in the
+ * ring to transmit all the packets. This assumes that each
+ * mbuf contains a single segment, and that no new offloads
+ * are expected, which would require a new context descriptor.
+ */
+
+ /*
+ * See if we're going to wrap-around. If so, handle the top
+ * of the descriptor ring first, then do the bottom. If not,
+ * the processing looks just like the "bottom" part anyway...
+ */
+ if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
+ n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
+ ixgbe_tx_fill_hw_ring(txq, tx_pkts, n);
+
+ /*
+ * We know that the last descriptor in the ring will need to
+ * have its RS bit set because tx_rs_thresh has to be
+ * a divisor of the ring size
+ */
+ tx_r[txq->tx_next_rs].read.cmd_type_len |=
+ rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ txq->tx_tail = 0;
+ }
+
+ /* Fill H/W descriptor ring with mbuf data */
+ ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
+ txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
+
+ /*
+ * Determine if RS bit should be set
+ * This is what we actually want:
+ * if ((txq->tx_tail - 1) >= txq->tx_next_rs)
+ * but instead of subtracting 1 and doing >=, we can just do
+ * greater than without subtracting.
+ */
+ if (txq->tx_tail > txq->tx_next_rs) {
+ tx_r[txq->tx_next_rs].read.cmd_type_len |=
+ rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
+ txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
+ txq->tx_rs_thresh);
+ if (txq->tx_next_rs >= txq->nb_tx_desc)
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+ }
+
+ /*
+ * Check for wrap-around. This would only happen if we used
+ * up to the last descriptor in the ring, no more, no less.
+ */
+ if (txq->tx_tail >= txq->nb_tx_desc)
+ txq->tx_tail = 0;
+
+ /* update tail pointer */
+ rte_wmb();
+ IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+uint16_t
+ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx;
+
+ /* Try to transmit at least chunks of TX_MAX_BURST pkts */
+ if (likely(nb_pkts <= RTE_PMD_IXGBE_TX_MAX_BURST))
+ return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
+
+ /* transmit more than the max burst, in chunks of TX_MAX_BURST */
+ nb_tx = 0;
+ while (nb_pkts) {
+ uint16_t ret, n;
+ n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
+ ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
+ nb_tx = (uint16_t)(nb_tx + ret);
+ nb_pkts = (uint16_t)(nb_pkts - ret);
+ if (ret < n)
+ break;
+ }
+
+ return nb_tx;
+}
+
+static inline void
+ixgbe_set_xmit_ctx(struct igb_tx_queue* txq,
+ volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
+ uint64_t ol_flags, union ixgbe_tx_offload tx_offload)
+{
+ uint32_t type_tucmd_mlhl;
+ uint32_t mss_l4len_idx = 0;
+ uint32_t ctx_idx;
+ uint32_t vlan_macip_lens;
+ union ixgbe_tx_offload tx_offload_mask;
+
+ ctx_idx = txq->ctx_curr;
+ tx_offload_mask.data = 0;
+ type_tucmd_mlhl = 0;
+
+ /* Specify which HW CTX to upload. */
+ mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
+
+ if (ol_flags & PKT_TX_VLAN_PKT) {
+ tx_offload_mask.vlan_tci |= ~0;
+ }
+
+ /* check if TCP segmentation required for this packet */
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ /* implies IP cksum and TCP cksum */
+ type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 |
+ IXGBE_ADVTXD_TUCMD_L4T_TCP |
+ IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
+
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ tx_offload_mask.l4_len |= ~0;
+ tx_offload_mask.tso_segsz |= ~0;
+ mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT;
+ mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT;
+ } else { /* no TSO, check if hardware checksum is needed */
+ if (ol_flags & PKT_TX_IP_CKSUM) {
+ type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ }
+
+ switch (ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_UDP_CKSUM:
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
+ IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
+ mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ break;
+ case PKT_TX_TCP_CKSUM:
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
+ IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
+ mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ tx_offload_mask.l4_len |= ~0;
+ break;
+ case PKT_TX_SCTP_CKSUM:
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
+ IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
+ mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ break;
+ default:
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV |
+ IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
+ break;
+ }
+ }
+
+ txq->ctx_cache[ctx_idx].flags = ol_flags;
+ txq->ctx_cache[ctx_idx].tx_offload.data =
+ tx_offload_mask.data & tx_offload.data;
+ txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
+
+ ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
+ vlan_macip_lens = tx_offload.l3_len;
+ vlan_macip_lens |= (tx_offload.l2_len << IXGBE_ADVTXD_MACLEN_SHIFT);
+ vlan_macip_lens |= ((uint32_t)tx_offload.vlan_tci << IXGBE_ADVTXD_VLAN_SHIFT);
+ ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
+ ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
+ ctx_txd->seqnum_seed = 0;
+}
+
+/*
+ * Check which hardware context can be used. Use the existing match
+ * or create a new context descriptor.
+ */
+static inline uint32_t
+what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
+ union ixgbe_tx_offload tx_offload)
+{
+ /* If match with the current used context */
+ if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
+ return txq->ctx_curr;
+ }
+
+ /* What if match with the next context */
+ txq->ctx_curr ^= 1;
+ if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
+ return txq->ctx_curr;
+ }
+
+ /* Mismatch, use the previous context */
+ return (IXGBE_CTX_NUM);
+}
+
+static inline uint32_t
+tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
+{
+ uint32_t tmp = 0;
+ if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
+ tmp |= IXGBE_ADVTXD_POPTS_TXSM;
+ if (ol_flags & PKT_TX_IP_CKSUM)
+ tmp |= IXGBE_ADVTXD_POPTS_IXSM;
+ if (ol_flags & PKT_TX_TCP_SEG)
+ tmp |= IXGBE_ADVTXD_POPTS_TXSM;
+ return tmp;
+}
+
+static inline uint32_t
+tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
+{
+ uint32_t cmdtype = 0;
+ if (ol_flags & PKT_TX_VLAN_PKT)
+ cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
+ if (ol_flags & PKT_TX_TCP_SEG)
+ cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
+ return cmdtype;
+}
+
+/* Default RS bit threshold values */
+#ifndef DEFAULT_TX_RS_THRESH
+#define DEFAULT_TX_RS_THRESH 32
+#endif
+#ifndef DEFAULT_TX_FREE_THRESH
+#define DEFAULT_TX_FREE_THRESH 32
+#endif
+
+/* Reset transmit descriptors after they have been used */
+static inline int
+ixgbe_xmit_cleanup(struct igb_tx_queue *txq)
+{
+ struct igb_tx_entry *sw_ring = txq->sw_ring;
+ volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
+ uint16_t last_desc_cleaned = txq->last_desc_cleaned;
+ uint16_t nb_tx_desc = txq->nb_tx_desc;
+ uint16_t desc_to_clean_to;
+ uint16_t nb_tx_to_clean;
+
+ /* Determine the last descriptor needing to be cleaned */
+ desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
+ if (desc_to_clean_to >= nb_tx_desc)
+ desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
+
+ /* Check to make sure the last descriptor to clean is done */
+ desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
+ if (! (txr[desc_to_clean_to].wb.status & IXGBE_TXD_STAT_DD))
+ {
+ PMD_TX_FREE_LOG(DEBUG,
+ "TX descriptor %4u is not done"
+ "(port=%d queue=%d)",
+ desc_to_clean_to,
+ txq->port_id, txq->queue_id);
+ /* Failed to clean any descriptors, better luck next time */
+ return -(1);
+ }
+
+ /* Figure out how many descriptors will be cleaned */
+ if (last_desc_cleaned > desc_to_clean_to)
+ nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
+ desc_to_clean_to);
+ else
+ nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
+ last_desc_cleaned);
+
+ PMD_TX_FREE_LOG(DEBUG,
+ "Cleaning %4u TX descriptors: %4u to %4u "
+ "(port=%d queue=%d)",
+ nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
+ txq->port_id, txq->queue_id);
+
+ /*
+ * The last descriptor to clean is done, so that means all the
+ * descriptors from the last descriptor that was cleaned
+ * up to the last descriptor with the RS bit set
+ * are done. Only reset the threshold descriptor.
+ */
+ txr[desc_to_clean_to].wb.status = 0;
+
+ /* Update the txq to reflect the last descriptor that was cleaned */
+ txq->last_desc_cleaned = desc_to_clean_to;
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
+
+ /* No Error */
+ return (0);
+}
+
+uint16_t
+ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct igb_tx_queue *txq;
+ struct igb_tx_entry *sw_ring;
+ struct igb_tx_entry *txe, *txn;
+ volatile union ixgbe_adv_tx_desc *txr;
+ volatile union ixgbe_adv_tx_desc *txd;
+ struct rte_mbuf *tx_pkt;
+ struct rte_mbuf *m_seg;
+ uint64_t buf_dma_addr;
+ uint32_t olinfo_status;
+ uint32_t cmd_type_len;
+ uint32_t pkt_len;
+ uint16_t slen;
+ uint64_t ol_flags;
+ uint16_t tx_id;
+ uint16_t tx_last;
+ uint16_t nb_tx;
+ uint16_t nb_used;
+ uint64_t tx_ol_req;
+ uint32_t ctx = 0;
+ uint32_t new_ctx;
+ union ixgbe_tx_offload tx_offload = { .data = 0 };
+
+ txq = tx_queue;
+ sw_ring = txq->sw_ring;
+ txr = txq->tx_ring;
+ tx_id = txq->tx_tail;
+ txe = &sw_ring[tx_id];
+
+ /* Determine if the descriptor ring needs to be cleaned. */
+ if ((txq->nb_tx_desc - txq->nb_tx_free) > txq->tx_free_thresh) {
+ ixgbe_xmit_cleanup(txq);
+ }
+
+ rte_prefetch0(&txe->mbuf->pool);
+
+ /* TX loop */
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ new_ctx = 0;
+ tx_pkt = *tx_pkts++;
+ pkt_len = tx_pkt->pkt_len;
+
+ /*
+ * Determine how many (if any) context descriptors
+ * are needed for offload functionality.
+ */
+ ol_flags = tx_pkt->ol_flags;
+
+ /* If hardware offload required */
+ tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
+ if (tx_ol_req) {
+ tx_offload.l2_len = tx_pkt->l2_len;
+ tx_offload.l3_len = tx_pkt->l3_len;
+ tx_offload.l4_len = tx_pkt->l4_len;
+ tx_offload.vlan_tci = tx_pkt->vlan_tci;
+ tx_offload.tso_segsz = tx_pkt->tso_segsz;
+
+ /* If new context need be built or reuse the exist ctx. */
+ ctx = what_advctx_update(txq, tx_ol_req,
+ tx_offload);
+ /* Only allocate context descriptor if required*/
+ new_ctx = (ctx == IXGBE_CTX_NUM);
+ ctx = txq->ctx_curr;
+ }
+
+ /*
+ * Keep track of how many descriptors are used this loop
+ * This will always be the number of segments + the number of
+ * Context descriptors required to transmit the packet
+ */
+ nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
+
+ /*
+ * The number of descriptors that must be allocated for a
+ * packet is the number of segments of that packet, plus 1
+ * Context Descriptor for the hardware offload, if any.
+ * Determine the last TX descriptor to allocate in the TX ring
+ * for the packet, starting from the current position (tx_id)
+ * in the ring.
+ */
+ tx_last = (uint16_t) (tx_id + nb_used - 1);
+
+ /* Circular ring */
+ if (tx_last >= txq->nb_tx_desc)
+ tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
+
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
+ " tx_first=%u tx_last=%u",
+ (unsigned) txq->port_id,
+ (unsigned) txq->queue_id,
+ (unsigned) pkt_len,
+ (unsigned) tx_id,
+ (unsigned) tx_last);
+
+ /*
+ * Make sure there are enough TX descriptors available to
+ * transmit the entire packet.
+ * nb_used better be less than or equal to txq->tx_rs_thresh
+ */
+ if (nb_used > txq->nb_tx_free) {
+ PMD_TX_FREE_LOG(DEBUG,
+ "Not enough free TX descriptors "
+ "nb_used=%4u nb_free=%4u "
+ "(port=%d queue=%d)",
+ nb_used, txq->nb_tx_free,
+ txq->port_id, txq->queue_id);
+
+ if (ixgbe_xmit_cleanup(txq) != 0) {
+ /* Could not clean any descriptors */
+ if (nb_tx == 0)
+ return (0);
+ goto end_of_tx;
+ }
+
+ /* nb_used better be <= txq->tx_rs_thresh */
+ if (unlikely(nb_used > txq->tx_rs_thresh)) {
+ PMD_TX_FREE_LOG(DEBUG,
+ "The number of descriptors needed to "
+ "transmit the packet exceeds the "
+ "RS bit threshold. This will impact "
+ "performance."
+ "nb_used=%4u nb_free=%4u "
+ "tx_rs_thresh=%4u. "
+ "(port=%d queue=%d)",
+ nb_used, txq->nb_tx_free,
+ txq->tx_rs_thresh,
+ txq->port_id, txq->queue_id);
+ /*
+ * Loop here until there are enough TX
+ * descriptors or until the ring cannot be
+ * cleaned.
+ */
+ while (nb_used > txq->nb_tx_free) {
+ if (ixgbe_xmit_cleanup(txq) != 0) {
+ /*
+ * Could not clean any
+ * descriptors
+ */
+ if (nb_tx == 0)
+ return (0);
+ goto end_of_tx;
+ }
+ }
+ }
+ }
+
+ /*
+ * By now there are enough free TX descriptors to transmit
+ * the packet.
+ */
+
+ /*
+ * Set common flags of all TX Data Descriptors.
+ *
+ * The following bits must be set in all Data Descriptors:
+ * - IXGBE_ADVTXD_DTYP_DATA
+ * - IXGBE_ADVTXD_DCMD_DEXT
+ *
+ * The following bits must be set in the first Data Descriptor
+ * and are ignored in the other ones:
+ * - IXGBE_ADVTXD_DCMD_IFCS
+ * - IXGBE_ADVTXD_MAC_1588
+ * - IXGBE_ADVTXD_DCMD_VLE
+ *
+ * The following bits must only be set in the last Data
+ * Descriptor:
+ * - IXGBE_TXD_CMD_EOP
+ *
+ * The following bits can be set in any Data Descriptor, but
+ * are only set in the last Data Descriptor:
+ * - IXGBE_TXD_CMD_RS
+ */
+ cmd_type_len = IXGBE_ADVTXD_DTYP_DATA |
+ IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
+
+#ifdef RTE_LIBRTE_IEEE1588
+ if (ol_flags & PKT_TX_IEEE1588_TMST)
+ cmd_type_len |= IXGBE_ADVTXD_MAC_1588;
+#endif
+
+ olinfo_status = 0;
+ if (tx_ol_req) {
+
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ /* when TSO is on, paylen in descriptor is the
+ * not the packet len but the tcp payload len */
+ pkt_len -= (tx_offload.l2_len +
+ tx_offload.l3_len + tx_offload.l4_len);
+ }
+
+ /*
+ * Setup the TX Advanced Context Descriptor if required
+ */
+ if (new_ctx) {
+ volatile struct ixgbe_adv_tx_context_desc *
+ ctx_txd;
+
+ ctx_txd = (volatile struct
+ ixgbe_adv_tx_context_desc *)
+ &txr[tx_id];
+
+ txn = &sw_ring[txe->next_id];
+ rte_prefetch0(&txn->mbuf->pool);
+
+ if (txe->mbuf != NULL) {
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = NULL;
+ }
+
+ ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
+ tx_offload);
+
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ }
+
+ /*
+ * Setup the TX Advanced Data Descriptor,
+ * This path will go through
+ * whatever new/reuse the context descriptor
+ */
+ cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags);
+ olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
+ olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT;
+ }
+
+ olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
+ m_seg = tx_pkt;
+ do {
+ txd = &txr[tx_id];
+ txn = &sw_ring[txe->next_id];
+ rte_prefetch0(&txn->mbuf->pool);
+
+ if (txe->mbuf != NULL)
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = m_seg;
+
+ /*
+ * Set up Transmit Data Descriptor.
+ */
+ slen = m_seg->data_len;
+ buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+ txd->read.buffer_addr =
+ rte_cpu_to_le_64(buf_dma_addr);
+ txd->read.cmd_type_len =
+ rte_cpu_to_le_32(cmd_type_len | slen);
+ txd->read.olinfo_status =
+ rte_cpu_to_le_32(olinfo_status);
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ m_seg = m_seg->next;
+ } while (m_seg != NULL);
+
+ /*
+ * The last packet data descriptor needs End Of Packet (EOP)
+ */
+ cmd_type_len |= IXGBE_TXD_CMD_EOP;
+ txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
+
+ /* Set RS bit only on threshold packets' last descriptor */
+ if (txq->nb_tx_used >= txq->tx_rs_thresh) {
+ PMD_TX_FREE_LOG(DEBUG,
+ "Setting RS bit on TXD id="
+ "%4u (port=%d queue=%d)",
+ tx_last, txq->port_id, txq->queue_id);
+
+ cmd_type_len |= IXGBE_TXD_CMD_RS;
+
+ /* Update txq RS bit counters */
+ txq->nb_tx_used = 0;
+ }
+ txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
+ }
+end_of_tx:
+ rte_wmb();
+
+ /*
+ * Set the Transmit Descriptor Tail (TDT)
+ */
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
+ (unsigned) txq->port_id, (unsigned) txq->queue_id,
+ (unsigned) tx_id, (unsigned) nb_tx);
+ IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
+ txq->tx_tail = tx_id;
+
+ return (nb_tx);
+}
+
+/*********************************************************************
+ *
+ * RX functions
+ *
+ **********************************************************************/
+static inline uint64_t
+rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
+{
+ uint64_t pkt_flags;
+
+ static uint64_t ip_pkt_types_map[16] = {
+ 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
+ PKT_RX_IPV6_HDR, 0, 0, 0,
+ PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
+ PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
+ };
+
+ static uint64_t ip_rss_types_map[16] = {
+ 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
+ 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
+ PKT_RX_RSS_HASH, 0, 0, 0,
+ 0, 0, 0, PKT_RX_FDIR,
+ };
+
+#ifdef RTE_LIBRTE_IEEE1588
+ static uint64_t ip_pkt_etqf_map[8] = {
+ 0, 0, 0, PKT_RX_IEEE1588_PTP,
+ 0, 0, 0, 0,
+ };
+
+ pkt_flags = (hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ?
+ ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
+ ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
+#else
+ pkt_flags = (hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ? 0 :
+ ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
+
+#endif
+ return pkt_flags | ip_rss_types_map[hl_tp_rs & 0xF];
+}
+
+static inline uint64_t
+rx_desc_status_to_pkt_flags(uint32_t rx_status)
+{
+ uint64_t pkt_flags;
+
+ /*
+ * Check if VLAN present only.
+ * Do not check whether L3/L4 rx checksum done by NIC or not,
+ * That can be found from rte_eth_rxmode.hw_ip_checksum flag
+ */
+ pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0;
+
+#ifdef RTE_LIBRTE_IEEE1588
+ if (rx_status & IXGBE_RXD_STAT_TMST)
+ pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
+#endif
+ return pkt_flags;
+}
+
+static inline uint64_t
+rx_desc_error_to_pkt_flags(uint32_t rx_status)
+{
+ /*
+ * Bit 31: IPE, IPv4 checksum error
+ * Bit 30: L4I, L4I integrity error
+ */
+ static uint64_t error_to_pkt_flags_map[4] = {
+ 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
+ };
+ return error_to_pkt_flags_map[(rx_status >>
+ IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
+}
+
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+/*
+ * LOOK_AHEAD defines how many desc statuses to check beyond the
+ * current descriptor.
+ * It must be a pound define for optimal performance.
+ * Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring
+ * function only works with LOOK_AHEAD=8.
+ */
+#define LOOK_AHEAD 8
+#if (LOOK_AHEAD != 8)
+#error "PMD IXGBE: LOOK_AHEAD must be 8\n"
+#endif
+static inline int
+ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
+{
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ struct igb_rx_entry *rxep;
+ struct rte_mbuf *mb;
+ uint16_t pkt_len;
+ uint64_t pkt_flags;
+ int s[LOOK_AHEAD], nb_dd;
+ int i, j, nb_rx = 0;
+
+
+ /* get references to current descriptor and S/W ring entry */
+ rxdp = &rxq->rx_ring[rxq->rx_tail];
+ rxep = &rxq->sw_ring[rxq->rx_tail];
+
+ /* check to make sure there is at least 1 packet to receive */
+ if (! (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD))
+ return 0;
+
+ /*
+ * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
+ * reference packets that are ready to be received.
+ */
+ for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
+ i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD)
+ {
+ /* Read desc statuses backwards to avoid race condition */
+ for (j = LOOK_AHEAD-1; j >= 0; --j)
+ s[j] = rxdp[j].wb.upper.status_error;
+
+ /* Compute how many status bits were set */
+ nb_dd = 0;
+ for (j = 0; j < LOOK_AHEAD; ++j)
+ nb_dd += s[j] & IXGBE_RXDADV_STAT_DD;
+
+ nb_rx += nb_dd;
+
+ /* Translate descriptor info to mbuf format */
+ for (j = 0; j < nb_dd; ++j) {
+ mb = rxep[j].mbuf;
+ pkt_len = (uint16_t)(rxdp[j].wb.upper.length - rxq->crc_len);
+ mb->data_len = pkt_len;
+ mb->pkt_len = pkt_len;
+ mb->vlan_tci = rxdp[j].wb.upper.vlan;
+ mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
+
+ /* convert descriptor fields to rte mbuf flags */
+ pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(
+ rxdp[j].wb.lower.lo_dword.data);
+ /* reuse status field from scan list */
+ pkt_flags |= rx_desc_status_to_pkt_flags(s[j]);
+ pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
+ mb->ol_flags = pkt_flags;
+
+ if (likely(pkt_flags & PKT_RX_RSS_HASH))
+ mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss;
+ else if (pkt_flags & PKT_RX_FDIR) {
+ mb->hash.fdir.hash =
+ (uint16_t)((rxdp[j].wb.lower.hi_dword.csum_ip.csum)
+ & IXGBE_ATR_HASH_MASK);
+ mb->hash.fdir.id = rxdp[j].wb.lower.hi_dword.csum_ip.ip_id;
+ }
+ }
+
+ /* Move mbuf pointers from the S/W ring to the stage */
+ for (j = 0; j < LOOK_AHEAD; ++j) {
+ rxq->rx_stage[i + j] = rxep[j].mbuf;
+ }
+
+ /* stop if all requested packets could not be received */
+ if (nb_dd != LOOK_AHEAD)
+ break;
+ }
+
+ /* clear software ring entries so we can cleanup correctly */
+ for (i = 0; i < nb_rx; ++i) {
+ rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
+ }
+
+
+ return nb_rx;
+}
+
+static inline int
+ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
+{
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ struct igb_rx_entry *rxep;
+ struct rte_mbuf *mb;
+ uint16_t alloc_idx;
+ uint64_t dma_addr;
+ int diag, i;
+
+ /* allocate buffers in bulk directly into the S/W ring */
+ alloc_idx = (uint16_t)(rxq->rx_free_trigger -
+ (rxq->rx_free_thresh - 1));
+ rxep = &rxq->sw_ring[alloc_idx];
+ diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
+ rxq->rx_free_thresh);
+ if (unlikely(diag != 0))
+ return (-ENOMEM);
+
+ rxdp = &rxq->rx_ring[alloc_idx];
+ for (i = 0; i < rxq->rx_free_thresh; ++i) {
+ /* populate the static rte mbuf fields */
+ mb = rxep[i].mbuf;
+ rte_mbuf_refcnt_set(mb, 1);
+ mb->next = NULL;
+ mb->data_off = RTE_PKTMBUF_HEADROOM;
+ mb->nb_segs = 1;
+ mb->port = rxq->port_id;
+
+ /* populate the descriptors */
+ dma_addr = (uint64_t)mb->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+ rxdp[i].read.hdr_addr = dma_addr;
+ rxdp[i].read.pkt_addr = dma_addr;
+ }
+
+ /* update tail pointer */
+ rte_wmb();
+ IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rxq->rx_free_trigger);
+
+ /* update state of internal queue structure */
+ rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_trigger +
+ rxq->rx_free_thresh);
+ if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
+ rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
+
+ /* no errors */
+ return 0;
+}
+
+static inline uint16_t
+ixgbe_rx_fill_from_stage(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
+ int i;
+
+ /* how many packets are ready to return? */
+ nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
+
+ /* copy mbuf pointers to the application's packet list */
+ for (i = 0; i < nb_pkts; ++i)
+ rx_pkts[i] = stage[i];
+
+ /* update internal queue state */
+ rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
+ rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
+
+ return nb_pkts;
+}
+
+static inline uint16_t
+rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct igb_rx_queue *rxq = (struct igb_rx_queue *)rx_queue;
+ uint16_t nb_rx = 0;
+
+ /* Any previously recv'd pkts will be returned from the Rx stage */
+ if (rxq->rx_nb_avail)
+ return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+
+ /* Scan the H/W ring for packets to receive */
+ nb_rx = (uint16_t)ixgbe_rx_scan_hw_ring(rxq);
+
+ /* update internal queue state */
+ rxq->rx_next_avail = 0;
+ rxq->rx_nb_avail = nb_rx;
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
+
+ /* if required, allocate new buffers to replenish descriptors */
+ if (rxq->rx_tail > rxq->rx_free_trigger) {
+ if (ixgbe_rx_alloc_bufs(rxq) != 0) {
+ int i, j;
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u", (unsigned) rxq->port_id,
+ (unsigned) rxq->queue_id);
+
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ rxq->rx_free_thresh;
+
+ /*
+ * Need to rewind any previous receives if we cannot
+ * allocate new buffers to replenish the old ones.
+ */
+ rxq->rx_nb_avail = 0;
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
+ for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
+ rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
+
+ return 0;
+ }
+ }
+
+ if (rxq->rx_tail >= rxq->nb_rx_desc)
+ rxq->rx_tail = 0;
+
+ /* received any packets this loop? */
+ if (rxq->rx_nb_avail)
+ return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+
+ return 0;
+}
+
+/* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
+uint16_t
+ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_rx;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ if (likely(nb_pkts <= RTE_PMD_IXGBE_RX_MAX_BURST))
+ return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
+
+ /* request is relatively large, chunk it up */
+ nb_rx = 0;
+ while (nb_pkts) {
+ uint16_t ret, n;
+ n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
+ ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
+ nb_rx = (uint16_t)(nb_rx + ret);
+ nb_pkts = (uint16_t)(nb_pkts - ret);
+ if (ret < n)
+ break;
+ }
+
+ return nb_rx;
+}
+#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
+
+uint16_t
+ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct igb_rx_queue *rxq;
+ volatile union ixgbe_adv_rx_desc *rx_ring;
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ struct igb_rx_entry *sw_ring;
+ struct igb_rx_entry *rxe;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *nmb;
+ union ixgbe_adv_rx_desc rxd;
+ uint64_t dma_addr;
+ uint32_t staterr;
+ uint32_t hlen_type_rss;
+ uint16_t pkt_len;
+ uint16_t rx_id;
+ uint16_t nb_rx;
+ uint16_t nb_hold;
+ uint64_t pkt_flags;
+
+ nb_rx = 0;
+ nb_hold = 0;
+ rxq = rx_queue;
+ rx_id = rxq->rx_tail;
+ rx_ring = rxq->rx_ring;
+ sw_ring = rxq->sw_ring;
+ while (nb_rx < nb_pkts) {
+ /*
+ * The order of operations here is important as the DD status
+ * bit must not be read after any other descriptor fields.
+ * rx_ring and rxdp are pointing to volatile data so the order
+ * of accesses cannot be reordered by the compiler. If they were
+ * not volatile, they could be reordered which could lead to
+ * using invalid descriptor fields when read from rxd.
+ */
+ rxdp = &rx_ring[rx_id];
+ staterr = rxdp->wb.upper.status_error;
+ if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
+ break;
+ rxd = *rxdp;
+
+ /*
+ * End of packet.
+ *
+ * If the IXGBE_RXDADV_STAT_EOP flag is not set, the RX packet
+ * is likely to be invalid and to be dropped by the various
+ * validation checks performed by the network stack.
+ *
+ * Allocate a new mbuf to replenish the RX ring descriptor.
+ * If the allocation fails:
+ * - arrange for that RX descriptor to be the first one
+ * being parsed the next time the receive function is
+ * invoked [on the same queue].
+ *
+ * - Stop parsing the RX ring and return immediately.
+ *
+ * This policy do not drop the packet received in the RX
+ * descriptor for which the allocation of a new mbuf failed.
+ * Thus, it allows that packet to be later retrieved if
+ * mbuf have been freed in the mean time.
+ * As a side effect, holding RX descriptors instead of
+ * systematically giving them back to the NIC may lead to
+ * RX ring exhaustion situations.
+ * However, the NIC can gracefully prevent such situations
+ * to happen by sending specific "back-pressure" flow control
+ * frames to its peer(s).
+ */
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+ "ext_err_stat=0x%08x pkt_len=%u",
+ (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+ (unsigned) rx_id, (unsigned) staterr,
+ (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
+
+ nmb = rte_rxmbuf_alloc(rxq->mb_pool);
+ if (nmb == NULL) {
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u", (unsigned) rxq->port_id,
+ (unsigned) rxq->queue_id);
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+ break;
+ }
+
+ nb_hold++;
+ rxe = &sw_ring[rx_id];
+ rx_id++;
+ if (rx_id == rxq->nb_rx_desc)
+ rx_id = 0;
+
+ /* Prefetch next mbuf while processing current one. */
+ rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
+
+ /*
+ * When next RX descriptor is on a cache-line boundary,
+ * prefetch the next 4 RX descriptors and the next 8 pointers
+ * to mbufs.
+ */
+ if ((rx_id & 0x3) == 0) {
+ rte_ixgbe_prefetch(&rx_ring[rx_id]);
+ rte_ixgbe_prefetch(&sw_ring[rx_id]);
+ }
+
+ rxm = rxe->mbuf;
+ rxe->mbuf = nmb;
+ dma_addr =
+ rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+ rxdp->read.hdr_addr = dma_addr;
+ rxdp->read.pkt_addr = dma_addr;
+
+ /*
+ * Initialize the returned mbuf.
+ * 1) setup generic mbuf fields:
+ * - number of segments,
+ * - next segment,
+ * - packet length,
+ * - RX port identifier.
+ * 2) integrate hardware offload data, if any:
+ * - RSS flag & hash,
+ * - IP checksum flag,
+ * - VLAN TCI, if any,
+ * - error flags.
+ */
+ pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
+ rxq->crc_len);
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = pkt_len;
+ rxm->data_len = pkt_len;
+ rxm->port = rxq->port_id;
+
+ hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
+ /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
+ rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
+
+ pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
+ pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
+ pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
+ rxm->ol_flags = pkt_flags;
+
+ if (likely(pkt_flags & PKT_RX_RSS_HASH))
+ rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
+ else if (pkt_flags & PKT_RX_FDIR) {
+ rxm->hash.fdir.hash =
+ (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
+ & IXGBE_ATR_HASH_MASK);
+ rxm->hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id;
+ }
+ /*
+ * Store the mbuf address into the next entry of the array
+ * of returned packets.
+ */
+ rx_pkts[nb_rx++] = rxm;
+ }
+ rxq->rx_tail = rx_id;
+
+ /*
+ * If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ * register.
+ * Update the RDT with the value of the last processed RX descriptor
+ * minus 1, to guarantee that the RDT register is never equal to the
+ * RDH register, which creates a "full" ring situtation from the
+ * hardware point of view...
+ */
+ nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
+ if (nb_hold > rxq->rx_free_thresh) {
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+ "nb_hold=%u nb_rx=%u",
+ (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+ (unsigned) rx_id, (unsigned) nb_hold,
+ (unsigned) nb_rx);
+ rx_id = (uint16_t) ((rx_id == 0) ?
+ (rxq->nb_rx_desc - 1) : (rx_id - 1));
+ IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+ nb_hold = 0;
+ }
+ rxq->nb_rx_hold = nb_hold;
+ return (nb_rx);
+}
+
+uint16_t
+ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct igb_rx_queue *rxq;
+ volatile union ixgbe_adv_rx_desc *rx_ring;
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ struct igb_rx_entry *sw_ring;
+ struct igb_rx_entry *rxe;
+ struct rte_mbuf *first_seg;
+ struct rte_mbuf *last_seg;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *nmb;
+ union ixgbe_adv_rx_desc rxd;
+ uint64_t dma; /* Physical address of mbuf data buffer */
+ uint32_t staterr;
+ uint32_t hlen_type_rss;
+ uint16_t rx_id;
+ uint16_t nb_rx;
+ uint16_t nb_hold;
+ uint16_t data_len;
+ uint64_t pkt_flags;
+
+ nb_rx = 0;
+ nb_hold = 0;
+ rxq = rx_queue;
+ rx_id = rxq->rx_tail;
+ rx_ring = rxq->rx_ring;
+ sw_ring = rxq->sw_ring;
+
+ /*
+ * Retrieve RX context of current packet, if any.
+ */
+ first_seg = rxq->pkt_first_seg;
+ last_seg = rxq->pkt_last_seg;
+
+ while (nb_rx < nb_pkts) {
+ next_desc:
+ /*
+ * The order of operations here is important as the DD status
+ * bit must not be read after any other descriptor fields.
+ * rx_ring and rxdp are pointing to volatile data so the order
+ * of accesses cannot be reordered by the compiler. If they were
+ * not volatile, they could be reordered which could lead to
+ * using invalid descriptor fields when read from rxd.
+ */
+ rxdp = &rx_ring[rx_id];
+ staterr = rxdp->wb.upper.status_error;
+ if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
+ break;
+ rxd = *rxdp;
+
+ /*
+ * Descriptor done.
+ *
+ * Allocate a new mbuf to replenish the RX ring descriptor.
+ * If the allocation fails:
+ * - arrange for that RX descriptor to be the first one
+ * being parsed the next time the receive function is
+ * invoked [on the same queue].
+ *
+ * - Stop parsing the RX ring and return immediately.
+ *
+ * This policy does not drop the packet received in the RX
+ * descriptor for which the allocation of a new mbuf failed.
+ * Thus, it allows that packet to be later retrieved if
+ * mbuf have been freed in the mean time.
+ * As a side effect, holding RX descriptors instead of
+ * systematically giving them back to the NIC may lead to
+ * RX ring exhaustion situations.
+ * However, the NIC can gracefully prevent such situations
+ * to happen by sending specific "back-pressure" flow control
+ * frames to its peer(s).
+ */
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+ "staterr=0x%x data_len=%u",
+ (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+ (unsigned) rx_id, (unsigned) staterr,
+ (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
+
+ nmb = rte_rxmbuf_alloc(rxq->mb_pool);
+ if (nmb == NULL) {
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u", (unsigned) rxq->port_id,
+ (unsigned) rxq->queue_id);
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+ break;
+ }
+
+ nb_hold++;
+ rxe = &sw_ring[rx_id];
+ rx_id++;
+ if (rx_id == rxq->nb_rx_desc)
+ rx_id = 0;
+
+ /* Prefetch next mbuf while processing current one. */
+ rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
+
+ /*
+ * When next RX descriptor is on a cache-line boundary,
+ * prefetch the next 4 RX descriptors and the next 8 pointers
+ * to mbufs.
+ */
+ if ((rx_id & 0x3) == 0) {
+ rte_ixgbe_prefetch(&rx_ring[rx_id]);
+ rte_ixgbe_prefetch(&sw_ring[rx_id]);
+ }
+
+ /*
+ * Update RX descriptor with the physical address of the new
+ * data buffer of the new allocated mbuf.
+ */
+ rxm = rxe->mbuf;
+ rxe->mbuf = nmb;
+ dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+ rxdp->read.hdr_addr = dma;
+ rxdp->read.pkt_addr = dma;
+
+ /*
+ * Set data length & data buffer address of mbuf.
+ */
+ data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
+ rxm->data_len = data_len;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+
+ /*
+ * If this is the first buffer of the received packet,
+ * set the pointer to the first mbuf of the packet and
+ * initialize its context.
+ * Otherwise, update the total length and the number of segments
+ * of the current scattered packet, and update the pointer to
+ * the last mbuf of the current packet.
+ */
+ if (first_seg == NULL) {
+ first_seg = rxm;
+ first_seg->pkt_len = data_len;
+ first_seg->nb_segs = 1;
+ } else {
+ first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
+ + data_len);
+ first_seg->nb_segs++;
+ last_seg->next = rxm;
+ }
+
+ /*
+ * If this is not the last buffer of the received packet,
+ * update the pointer to the last mbuf of the current scattered
+ * packet and continue to parse the RX ring.
+ */
+ if (! (staterr & IXGBE_RXDADV_STAT_EOP)) {
+ last_seg = rxm;
+ goto next_desc;
+ }
+
+ /*
+ * This is the last buffer of the received packet.
+ * If the CRC is not stripped by the hardware:
+ * - Subtract the CRC length from the total packet length.
+ * - If the last buffer only contains the whole CRC or a part
+ * of it, free the mbuf associated to the last buffer.
+ * If part of the CRC is also contained in the previous
+ * mbuf, subtract the length of that CRC part from the
+ * data length of the previous mbuf.
+ */
+ rxm->next = NULL;
+ if (unlikely(rxq->crc_len > 0)) {
+ first_seg->pkt_len -= ETHER_CRC_LEN;
+ if (data_len <= ETHER_CRC_LEN) {
+ rte_pktmbuf_free_seg(rxm);
+ first_seg->nb_segs--;
+ last_seg->data_len = (uint16_t)
+ (last_seg->data_len -
+ (ETHER_CRC_LEN - data_len));
+ last_seg->next = NULL;
+ } else
+ rxm->data_len =
+ (uint16_t) (data_len - ETHER_CRC_LEN);
+ }
+
+ /*
+ * Initialize the first mbuf of the returned packet:
+ * - RX port identifier,
+ * - hardware offload data, if any:
+ * - RSS flag & hash,
+ * - IP checksum flag,
+ * - VLAN TCI, if any,
+ * - error flags.
+ */
+ first_seg->port = rxq->port_id;
+
+ /*
+ * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
+ * set in the pkt_flags field.
+ */
+ first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
+ hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
+ pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
+ pkt_flags = (pkt_flags |
+ rx_desc_status_to_pkt_flags(staterr));
+ pkt_flags = (pkt_flags |
+ rx_desc_error_to_pkt_flags(staterr));
+ first_seg->ol_flags = pkt_flags;
+
+ if (likely(pkt_flags & PKT_RX_RSS_HASH))
+ first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
+ else if (pkt_flags & PKT_RX_FDIR) {
+ first_seg->hash.fdir.hash =
+ (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
+ & IXGBE_ATR_HASH_MASK);
+ first_seg->hash.fdir.id =
+ rxd.wb.lower.hi_dword.csum_ip.ip_id;
+ }
+
+ /* Prefetch data of first segment, if configured to do so. */
+ rte_packet_prefetch((char *)first_seg->buf_addr +
+ first_seg->data_off);
+
+ /*
+ * Store the mbuf address into the next entry of the array
+ * of returned packets.
+ */
+ rx_pkts[nb_rx++] = first_seg;
+
+ /*
+ * Setup receipt context for a new packet.
+ */
+ first_seg = NULL;
+ }
+
+ /*
+ * Record index of the next RX descriptor to probe.
+ */
+ rxq->rx_tail = rx_id;
+
+ /*
+ * Save receive context.
+ */
+ rxq->pkt_first_seg = first_seg;
+ rxq->pkt_last_seg = last_seg;
+
+ /*
+ * If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ * register.
+ * Update the RDT with the value of the last processed RX descriptor
+ * minus 1, to guarantee that the RDT register is never equal to the
+ * RDH register, which creates a "full" ring situtation from the
+ * hardware point of view...
+ */
+ nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
+ if (nb_hold > rxq->rx_free_thresh) {
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+ "nb_hold=%u nb_rx=%u",
+ (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+ (unsigned) rx_id, (unsigned) nb_hold,
+ (unsigned) nb_rx);
+ rx_id = (uint16_t) ((rx_id == 0) ?
+ (rxq->nb_rx_desc - 1) : (rx_id - 1));
+ IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+ nb_hold = 0;
+ }
+ rxq->nb_rx_hold = nb_hold;
+ return (nb_rx);
+}
+
+/*********************************************************************
+ *
+ * Queue management functions
+ *
+ **********************************************************************/
+
+/*
+ * Rings setup and release.
+ *
+ * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
+ * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
+ * also optimize cache line size effect. H/W supports up to cache line size 128.
+ */
+#define IXGBE_ALIGN 128
+
+/*
+ * Maximum number of Ring Descriptors.
+ *
+ * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
+ * descriptors should meet the following condition:
+ * (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0
+ */
+#define IXGBE_MIN_RING_DESC 32
+#define IXGBE_MAX_RING_DESC 4096
+
+/*
+ * Create memzone for HW rings. malloc can't be used as the physical address is
+ * needed. If the memzone is already created, then this function returns a ptr
+ * to the old one.
+ */
+static const struct rte_memzone *
+ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
+ uint16_t queue_id, uint32_t ring_size, int socket_id)
+{
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+
+ snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
+ dev->driver->pci_drv.name, ring_name,
+ dev->data->port_id, queue_id);
+
+ mz = rte_memzone_lookup(z_name);
+ if (mz)
+ return mz;
+
+#ifdef RTE_LIBRTE_XEN_DOM0
+ return rte_memzone_reserve_bounded(z_name, ring_size,
+ socket_id, 0, IXGBE_ALIGN, RTE_PGSIZE_2M);
+#else
+ return rte_memzone_reserve_aligned(z_name, ring_size,
+ socket_id, 0, IXGBE_ALIGN);
+#endif
+}
+
+static void
+ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
+{
+ unsigned i;
+
+ if (txq->sw_ring != NULL) {
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ if (txq->sw_ring[i].mbuf != NULL) {
+ rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+ txq->sw_ring[i].mbuf = NULL;
+ }
+ }
+ }
+}
+
+static void
+ixgbe_tx_free_swring(struct igb_tx_queue *txq)
+{
+ if (txq != NULL &&
+ txq->sw_ring != NULL)
+ rte_free(txq->sw_ring);
+}
+
+static void
+ixgbe_tx_queue_release(struct igb_tx_queue *txq)
+{
+ if (txq != NULL && txq->ops != NULL) {
+ txq->ops->release_mbufs(txq);
+ txq->ops->free_swring(txq);
+ rte_free(txq);
+ }
+}
+
+void
+ixgbe_dev_tx_queue_release(void *txq)
+{
+ ixgbe_tx_queue_release(txq);
+}
+
+/* (Re)set dynamic igb_tx_queue fields to defaults */
+static void
+ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
+{
+ static const union ixgbe_adv_tx_desc zeroed_desc = { .read = {
+ .buffer_addr = 0}};
+ struct igb_tx_entry *txe = txq->sw_ring;
+ uint16_t prev, i;
+
+ /* Zero out HW ring memory */
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ txq->tx_ring[i] = zeroed_desc;
+ }
+
+ /* Initialize SW ring entries */
+ prev = (uint16_t) (txq->nb_tx_desc - 1);
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
+ txd->wb.status = IXGBE_TXD_STAT_DD;
+ txe[i].mbuf = NULL;
+ txe[i].last_id = i;
+ txe[prev].next_id = i;
+ prev = i;
+ }
+
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ txq->tx_tail = 0;
+ txq->nb_tx_used = 0;
+ /*
+ * Always allow 1 descriptor to be un-allocated to avoid
+ * a H/W race condition
+ */
+ txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
+ txq->ctx_curr = 0;
+ memset((void*)&txq->ctx_cache, 0,
+ IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
+}
+
+static struct ixgbe_txq_ops def_txq_ops = {
+ .release_mbufs = ixgbe_tx_queue_release_mbufs,
+ .free_swring = ixgbe_tx_free_swring,
+ .reset = ixgbe_reset_tx_queue,
+};
+
+/* Takes an ethdev and a queue and sets up the tx function to be used based on
+ * the queue parameters. Used in tx_queue_setup by primary process and then
+ * in dev_init by secondary process when attaching to an existing ethdev.
+ */
+void
+set_tx_function(struct rte_eth_dev *dev, struct igb_tx_queue *txq)
+{
+ /* Use a simple Tx queue (no offloads, no multi segs) if possible */
+ if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
+ && (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
+ PMD_INIT_LOG(INFO, "Using simple tx code path");
+#ifdef RTE_IXGBE_INC_VECTOR
+ if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
+ (rte_eal_process_type() != RTE_PROC_PRIMARY ||
+ ixgbe_txq_vec_setup(txq) == 0)) {
+ PMD_INIT_LOG(INFO, "Vector tx enabled.");
+ dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
+ } else
+#endif
+ dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
+ } else {
+ PMD_INIT_LOG(INFO, "Using full-featured tx code path");
+ PMD_INIT_LOG(INFO,
+ " - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
+ (unsigned long)txq->txq_flags,
+ (unsigned long)IXGBE_SIMPLE_FLAGS);
+ PMD_INIT_LOG(INFO,
+ " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
+ (unsigned long)txq->tx_rs_thresh,
+ (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
+ dev->tx_pkt_burst = ixgbe_xmit_pkts;
+ }
+}
+
+int
+ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ const struct rte_memzone *tz;
+ struct igb_tx_queue *txq;
+ struct ixgbe_hw *hw;
+ uint16_t tx_rs_thresh, tx_free_thresh;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /*
+ * Validate number of transmit descriptors.
+ * It must not exceed hardware maximum, and must be multiple
+ * of IXGBE_ALIGN.
+ */
+ if (((nb_desc * sizeof(union ixgbe_adv_tx_desc)) % IXGBE_ALIGN) != 0 ||
+ (nb_desc > IXGBE_MAX_RING_DESC) ||
+ (nb_desc < IXGBE_MIN_RING_DESC)) {
+ return -EINVAL;
+ }
+
+ /*
+ * The following two parameters control the setting of the RS bit on
+ * transmit descriptors.
+ * TX descriptors will have their RS bit set after txq->tx_rs_thresh
+ * descriptors have been used.
+ * The TX descriptor ring will be cleaned after txq->tx_free_thresh
+ * descriptors are used or if the number of descriptors required
+ * to transmit a packet is greater than the number of free TX
+ * descriptors.
+ * The following constraints must be satisfied:
+ * tx_rs_thresh must be greater than 0.
+ * tx_rs_thresh must be less than the size of the ring minus 2.
+ * tx_rs_thresh must be less than or equal to tx_free_thresh.
+ * tx_rs_thresh must be a divisor of the ring size.
+ * tx_free_thresh must be greater than 0.
+ * tx_free_thresh must be less than the size of the ring minus 3.
+ * One descriptor in the TX ring is used as a sentinel to avoid a
+ * H/W race condition, hence the maximum threshold constraints.
+ * When set to zero use default values.
+ */
+ tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
+ tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
+ tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
+ tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
+ if (tx_rs_thresh >= (nb_desc - 2)) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
+ "of TX descriptors minus 2. (tx_rs_thresh=%u "
+ "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id, (int)queue_idx);
+ return -(EINVAL);
+ }
+ if (tx_free_thresh >= (nb_desc - 3)) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
+ "tx_free_thresh must be less than the number of "
+ "TX descriptors minus 3. (tx_free_thresh=%u "
+ "port=%d queue=%d)",
+ (unsigned int)tx_free_thresh,
+ (int)dev->data->port_id, (int)queue_idx);
+ return -(EINVAL);
+ }
+ if (tx_rs_thresh > tx_free_thresh) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
+ "tx_free_thresh. (tx_free_thresh=%u "
+ "tx_rs_thresh=%u port=%d queue=%d)",
+ (unsigned int)tx_free_thresh,
+ (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id,
+ (int)queue_idx);
+ return -(EINVAL);
+ }
+ if ((nb_desc % tx_rs_thresh) != 0) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
+ "number of TX descriptors. (tx_rs_thresh=%u "
+ "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id, (int)queue_idx);
+ return -(EINVAL);
+ }
+
+ /*
+ * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
+ * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
+ * by the NIC and all descriptors are written back after the NIC
+ * accumulates WTHRESH descriptors.
+ */
+ if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
+ PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
+ "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
+ "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id, (int)queue_idx);
+ return -(EINVAL);
+ }
+
+ /* Free memory prior to re-allocation if needed... */
+ if (dev->data->tx_queues[queue_idx] != NULL) {
+ ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ /* First allocate the tx queue data structure */
+ txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct igb_tx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq == NULL)
+ return (-ENOMEM);
+
+ /*
+ * Allocate TX ring hardware descriptors. A memzone large enough to
+ * handle the maximum ring size is allocated in order to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
+ socket_id);
+ if (tz == NULL) {
+ ixgbe_tx_queue_release(txq);
+ return (-ENOMEM);
+ }
+
+ txq->nb_tx_desc = nb_desc;
+ txq->tx_rs_thresh = tx_rs_thresh;
+ txq->tx_free_thresh = tx_free_thresh;
+ txq->pthresh = tx_conf->tx_thresh.pthresh;
+ txq->hthresh = tx_conf->tx_thresh.hthresh;
+ txq->wthresh = tx_conf->tx_thresh.wthresh;
+ txq->queue_id = queue_idx;
+ txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
+ queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
+ txq->port_id = dev->data->port_id;
+ txq->txq_flags = tx_conf->txq_flags;
+ txq->ops = &def_txq_ops;
+ txq->tx_deferred_start = tx_conf->tx_deferred_start;
+
+ /*
+ * Modification to set VFTDT for virtual function if vf is detected
+ */
+ if (hw->mac.type == ixgbe_mac_82599_vf ||
+ hw->mac.type == ixgbe_mac_X540_vf)
+ txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
+ else
+ txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
+#ifndef RTE_LIBRTE_XEN_DOM0
+ txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
+#else
+ txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+#endif
+ txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
+
+ /* Allocate software ring */
+ txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
+ sizeof(struct igb_tx_entry) * nb_desc,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq->sw_ring == NULL) {
+ ixgbe_tx_queue_release(txq);
+ return (-ENOMEM);
+ }
+ PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
+ txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
+
+ /* set up vector or scalar TX function as appropriate */
+ set_tx_function(dev, txq);
+
+ txq->ops->reset(txq);
+
+ dev->data->tx_queues[queue_idx] = txq;
+
+
+ return (0);
+}
+
+static void
+ixgbe_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
+{
+ unsigned i;
+
+ if (rxq->sw_ring != NULL) {
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_ring[i].mbuf != NULL) {
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ rxq->sw_ring[i].mbuf = NULL;
+ }
+ }
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+ if (rxq->rx_nb_avail) {
+ for (i = 0; i < rxq->rx_nb_avail; ++i) {
+ struct rte_mbuf *mb;
+ mb = rxq->rx_stage[rxq->rx_next_avail + i];
+ rte_pktmbuf_free_seg(mb);
+ }
+ rxq->rx_nb_avail = 0;
+ }
+#endif
+ }
+}
+
+static void
+ixgbe_rx_queue_release(struct igb_rx_queue *rxq)
+{
+ if (rxq != NULL) {
+ ixgbe_rx_queue_release_mbufs(rxq);
+ rte_free(rxq->sw_ring);
+ rte_free(rxq);
+ }
+}
+
+void
+ixgbe_dev_rx_queue_release(void *rxq)
+{
+ ixgbe_rx_queue_release(rxq);
+}
+
+/*
+ * Check if Rx Burst Bulk Alloc function can be used.
+ * Return
+ * 0: the preconditions are satisfied and the bulk allocation function
+ * can be used.
+ * -EINVAL: the preconditions are NOT satisfied and the default Rx burst
+ * function must be used.
+ */
+static inline int
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+check_rx_burst_bulk_alloc_preconditions(struct igb_rx_queue *rxq)
+#else
+check_rx_burst_bulk_alloc_preconditions(__rte_unused struct igb_rx_queue *rxq)
+#endif
+{
+ int ret = 0;
+
+ /*
+ * Make sure the following pre-conditions are satisfied:
+ * rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
+ * rxq->rx_free_thresh < rxq->nb_rx_desc
+ * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
+ * rxq->nb_rx_desc<(IXGBE_MAX_RING_DESC-RTE_PMD_IXGBE_RX_MAX_BURST)
+ * Scattered packets are not supported. This should be checked
+ * outside of this function.
+ */
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+ if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->rx_free_thresh=%d, "
+ "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
+ rxq->rx_free_thresh, RTE_PMD_IXGBE_RX_MAX_BURST);
+ ret = -EINVAL;
+ } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->rx_free_thresh=%d, "
+ "rxq->nb_rx_desc=%d",
+ rxq->rx_free_thresh, rxq->nb_rx_desc);
+ ret = -EINVAL;
+ } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->nb_rx_desc=%d, "
+ "rxq->rx_free_thresh=%d",
+ rxq->nb_rx_desc, rxq->rx_free_thresh);
+ ret = -EINVAL;
+ } else if (!(rxq->nb_rx_desc <
+ (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST))) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->nb_rx_desc=%d, "
+ "IXGBE_MAX_RING_DESC=%d, "
+ "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
+ rxq->nb_rx_desc, IXGBE_MAX_RING_DESC,
+ RTE_PMD_IXGBE_RX_MAX_BURST);
+ ret = -EINVAL;
+ }
+#else
+ ret = -EINVAL;
+#endif
+
+ return ret;
+}
+
+/* Reset dynamic igb_rx_queue fields back to defaults */
+static void
+ixgbe_reset_rx_queue(struct igb_rx_queue *rxq)
+{
+ static const union ixgbe_adv_rx_desc zeroed_desc = { .read = {
+ .pkt_addr = 0}};
+ unsigned i;
+ uint16_t len;
+
+ /*
+ * By default, the Rx queue setup function allocates enough memory for
+ * IXGBE_MAX_RING_DESC. The Rx Burst bulk allocation function requires
+ * extra memory at the end of the descriptor ring to be zero'd out. A
+ * pre-condition for using the Rx burst bulk alloc function is that the
+ * number of descriptors is less than or equal to
+ * (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST). Check all the
+ * constraints here to see if we need to zero out memory after the end
+ * of the H/W descriptor ring.
+ */
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+ if (check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
+ /* zero out extra memory */
+ len = (uint16_t)(rxq->nb_rx_desc + RTE_PMD_IXGBE_RX_MAX_BURST);
+ else
+#endif
+ /* do not zero out extra memory */
+ len = rxq->nb_rx_desc;
+
+ /*
+ * Zero out HW ring memory. Zero out extra memory at the end of
+ * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
+ * reads extra memory as zeros.
+ */
+ for (i = 0; i < len; i++) {
+ rxq->rx_ring[i] = zeroed_desc;
+ }
+
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+ /*
+ * initialize extra software ring entries. Space for these extra
+ * entries is always allocated
+ */
+ memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
+ for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST; ++i) {
+ rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
+ }
+
+ rxq->rx_nb_avail = 0;
+ rxq->rx_next_avail = 0;
+ rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
+#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
+ rxq->rx_tail = 0;
+ rxq->nb_rx_hold = 0;
+ rxq->pkt_first_seg = NULL;
+ rxq->pkt_last_seg = NULL;
+}
+
+int
+ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ const struct rte_memzone *rz;
+ struct igb_rx_queue *rxq;
+ struct ixgbe_hw *hw;
+ int use_def_burst_func = 1;
+ uint16_t len;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /*
+ * Validate number of receive descriptors.
+ * It must not exceed hardware maximum, and must be multiple
+ * of IXGBE_ALIGN.
+ */
+ if (((nb_desc * sizeof(union ixgbe_adv_rx_desc)) % IXGBE_ALIGN) != 0 ||
+ (nb_desc > IXGBE_MAX_RING_DESC) ||
+ (nb_desc < IXGBE_MIN_RING_DESC)) {
+ return (-EINVAL);
+ }
+
+ /* Free memory prior to re-allocation if needed... */
+ if (dev->data->rx_queues[queue_idx] != NULL) {
+ ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* First allocate the rx queue data structure */
+ rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct igb_rx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq == NULL)
+ return (-ENOMEM);
+ rxq->mb_pool = mp;
+ rxq->nb_rx_desc = nb_desc;
+ rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+ rxq->queue_id = queue_idx;
+ rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
+ queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
+ rxq->port_id = dev->data->port_id;
+ rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
+ 0 : ETHER_CRC_LEN);
+ rxq->drop_en = rx_conf->rx_drop_en;
+ rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+
+ /*
+ * Allocate RX ring hardware descriptors. A memzone large enough to
+ * handle the maximum ring size is allocated in order to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx,
+ RX_RING_SZ, socket_id);
+ if (rz == NULL) {
+ ixgbe_rx_queue_release(rxq);
+ return (-ENOMEM);
+ }
+
+ /*
+ * Zero init all the descriptors in the ring.
+ */
+ memset (rz->addr, 0, RX_RING_SZ);
+
+ /*
+ * Modified to setup VFRDT for Virtual Function
+ */
+ if (hw->mac.type == ixgbe_mac_82599_vf ||
+ hw->mac.type == ixgbe_mac_X540_vf) {
+ rxq->rdt_reg_addr =
+ IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
+ rxq->rdh_reg_addr =
+ IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx));
+ }
+ else {
+ rxq->rdt_reg_addr =
+ IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
+ rxq->rdh_reg_addr =
+ IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
+ }
+#ifndef RTE_LIBRTE_XEN_DOM0
+ rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
+#else
+ rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+#endif
+ rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
+
+ /*
+ * Allocate software ring. Allow for space at the end of the
+ * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
+ * function does not access an invalid memory region.
+ */
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+ len = (uint16_t)(nb_desc + RTE_PMD_IXGBE_RX_MAX_BURST);
+#else
+ len = nb_desc;
+#endif
+ rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
+ sizeof(struct igb_rx_entry) * len,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq->sw_ring == NULL) {
+ ixgbe_rx_queue_release(rxq);
+ return (-ENOMEM);
+ }
+ PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
+ rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
+
+ /*
+ * Certain constraints must be met in order to use the bulk buffer
+ * allocation Rx burst function.
+ */
+ use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
+
+#ifdef RTE_IXGBE_INC_VECTOR
+ ixgbe_rxq_vec_setup(rxq);
+#endif
+ /* Check if pre-conditions are satisfied, and no Scattered Rx */
+ if (!use_def_burst_func && !dev->data->scattered_rx) {
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+ "satisfied. Rx Burst Bulk Alloc function will be "
+ "used on port=%d, queue=%d.",
+ rxq->port_id, rxq->queue_id);
+ dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
+#ifdef RTE_IXGBE_INC_VECTOR
+ if (!ixgbe_rx_vec_condition_check(dev)) {
+ PMD_INIT_LOG(INFO, "Vector rx enabled, please make "
+ "sure RX burst size no less than 32.");
+ dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
+ }
+#endif
+#endif
+ } else {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions "
+ "are not satisfied, Scattered Rx is requested, "
+ "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC is not "
+ "enabled (port=%d, queue=%d).",
+ rxq->port_id, rxq->queue_id);
+ }
+ dev->data->rx_queues[queue_idx] = rxq;
+
+ ixgbe_reset_rx_queue(rxq);
+
+ return 0;
+}
+
+uint32_t
+ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+#define IXGBE_RXQ_SCAN_INTERVAL 4
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ struct igb_rx_queue *rxq;
+ uint32_t desc = 0;
+
+ if (rx_queue_id >= dev->data->nb_rx_queues) {
+ PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
+ return 0;
+ }
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ rxdp = &(rxq->rx_ring[rxq->rx_tail]);
+
+ while ((desc < rxq->nb_rx_desc) &&
+ (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD)) {
+ desc += IXGBE_RXQ_SCAN_INTERVAL;
+ rxdp += IXGBE_RXQ_SCAN_INTERVAL;
+ if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
+ rxdp = &(rxq->rx_ring[rxq->rx_tail +
+ desc - rxq->nb_rx_desc]);
+ }
+
+ return desc;
+}
+
+int
+ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
+{
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ struct igb_rx_queue *rxq = rx_queue;
+ uint32_t desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return 0;
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ rxdp = &rxq->rx_ring[desc];
+ return !!(rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD);
+}
+
+void
+ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
+{
+ unsigned i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct igb_tx_queue *txq = dev->data->tx_queues[i];
+ if (txq != NULL) {
+ txq->ops->release_mbufs(txq);
+ txq->ops->reset(txq);
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct igb_rx_queue *rxq = dev->data->rx_queues[i];
+ if (rxq != NULL) {
+ ixgbe_rx_queue_release_mbufs(rxq);
+ ixgbe_reset_rx_queue(rxq);
+ }
+ }
+}
+
+/*********************************************************************
+ *
+ * Device RX/TX init functions
+ *
+ **********************************************************************/
+
+/**
+ * Receive Side Scaling (RSS)
+ * See section 7.1.2.8 in the following document:
+ * "Intel 82599 10 GbE Controller Datasheet" - Revision 2.1 October 2009
+ *
+ * Principles:
+ * The source and destination IP addresses of the IP header and the source
+ * and destination ports of TCP/UDP headers, if any, of received packets are
+ * hashed against a configurable random key to compute a 32-bit RSS hash result.
+ * The seven (7) LSBs of the 32-bit hash result are used as an index into a
+ * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
+ * RSS output index which is used as the RX queue index where to store the
+ * received packets.
+ * The following output is supplied in the RX write-back descriptor:
+ * - 32-bit result of the Microsoft RSS hash function,
+ * - 4-bit RSS type field.
+ */
+
+/*
+ * RSS random key supplied in section 7.1.2.8.3 of the Intel 82599 datasheet.
+ * Used as the default key.
+ */
+static uint8_t rss_intel_key[40] = {
+ 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+ 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+ 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+ 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+ 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
+};
+
+static void
+ixgbe_rss_disable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw;
+ uint32_t mrqc;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ mrqc &= ~IXGBE_MRQC_RSSEN;
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+}
+
+static void
+ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
+{
+ uint8_t *hash_key;
+ uint32_t mrqc;
+ uint32_t rss_key;
+ uint64_t rss_hf;
+ uint16_t i;
+
+ hash_key = rss_conf->rss_key;
+ if (hash_key != NULL) {
+ /* Fill in RSS hash key */
+ for (i = 0; i < 10; i++) {
+ rss_key = hash_key[(i * 4)];
+ rss_key |= hash_key[(i * 4) + 1] << 8;
+ rss_key |= hash_key[(i * 4) + 2] << 16;
+ rss_key |= hash_key[(i * 4) + 3] << 24;
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, rss_key);
+ }
+ }
+
+ /* Set configured hashing protocols in MRQC register */
+ rss_hf = rss_conf->rss_hf;
+ mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
+ if (rss_hf & ETH_RSS_IPV4)
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
+ if (rss_hf & ETH_RSS_IPV4_TCP)
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
+ if (rss_hf & ETH_RSS_IPV6)
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
+ if (rss_hf & ETH_RSS_IPV6_EX)
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
+ if (rss_hf & ETH_RSS_IPV6_TCP)
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
+ if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
+ if (rss_hf & ETH_RSS_IPV4_UDP)
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
+ if (rss_hf & ETH_RSS_IPV6_UDP)
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
+ if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+}
+
+int
+ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct ixgbe_hw *hw;
+ uint32_t mrqc;
+ uint64_t rss_hf;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /*
+ * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
+ * "RSS enabling cannot be done dynamically while it must be
+ * preceded by a software reset"
+ * Before changing anything, first check that the update RSS operation
+ * does not attempt to disable RSS, if RSS was enabled at
+ * initialization time, or does not attempt to enable RSS, if RSS was
+ * disabled at initialization time.
+ */
+ rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL;
+ mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */
+ if (rss_hf != 0) /* Enable RSS */
+ return -(EINVAL);
+ return 0; /* Nothing to do */
+ }
+ /* RSS enabled */
+ if (rss_hf == 0) /* Disable RSS */
+ return -(EINVAL);
+ ixgbe_hw_rss_hash_set(hw, rss_conf);
+ return 0;
+}
+
+int
+ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct ixgbe_hw *hw;
+ uint8_t *hash_key;
+ uint32_t mrqc;
+ uint32_t rss_key;
+ uint64_t rss_hf;
+ uint16_t i;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ hash_key = rss_conf->rss_key;
+ if (hash_key != NULL) {
+ /* Return RSS hash key */
+ for (i = 0; i < 10; i++) {
+ rss_key = IXGBE_READ_REG_ARRAY(hw, IXGBE_RSSRK(0), i);
+ hash_key[(i * 4)] = rss_key & 0x000000FF;
+ hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
+ hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
+ hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
+ }
+ }
+
+ /* Get RSS functions configured in MRQC register */
+ mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */
+ rss_conf->rss_hf = 0;
+ return 0;
+ }
+ rss_hf = 0;
+ if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
+ rss_hf |= ETH_RSS_IPV4;
+ if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
+ rss_hf |= ETH_RSS_IPV4_TCP;
+ if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
+ rss_hf |= ETH_RSS_IPV6;
+ if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
+ rss_hf |= ETH_RSS_IPV6_EX;
+ if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
+ rss_hf |= ETH_RSS_IPV6_TCP;
+ if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
+ rss_hf |= ETH_RSS_IPV6_TCP_EX;
+ if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
+ rss_hf |= ETH_RSS_IPV4_UDP;
+ if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
+ rss_hf |= ETH_RSS_IPV6_UDP;
+ if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
+ rss_hf |= ETH_RSS_IPV6_UDP_EX;
+ rss_conf->rss_hf = rss_hf;
+ return 0;
+}
+
+static void
+ixgbe_rss_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_rss_conf rss_conf;
+ struct ixgbe_hw *hw;
+ uint32_t reta;
+ uint16_t i;
+ uint16_t j;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /*
+ * Fill in redirection table
+ * The byte-swap is needed because NIC registers are in
+ * little-endian order.
+ */
+ reta = 0;
+ for (i = 0, j = 0; i < 128; i++, j++) {
+ if (j == dev->data->nb_rx_queues)
+ j = 0;
+ reta = (reta << 8) | j;
+ if ((i & 3) == 3)
+ IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2),
+ rte_bswap32(reta));
+ }
+
+ /*
+ * Configure the RSS key and the RSS protocols used to compute
+ * the RSS hash of input packets.
+ */
+ rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
+ if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
+ ixgbe_rss_disable(dev);
+ return;
+ }
+ if (rss_conf.rss_key == NULL)
+ rss_conf.rss_key = rss_intel_key; /* Default hash key */
+ ixgbe_hw_rss_hash_set(hw, &rss_conf);
+}
+
+#define NUM_VFTA_REGISTERS 128
+#define NIC_RX_BUFFER_SIZE 0x200
+
+static void
+ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_vmdq_dcb_conf *cfg;
+ struct ixgbe_hw *hw;
+ enum rte_eth_nb_pools num_pools;
+ uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
+ uint16_t pbsize;
+ uint8_t nb_tcs; /* number of traffic classes */
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+ num_pools = cfg->nb_queue_pools;
+ /* Check we have a valid number of pools */
+ if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
+ ixgbe_rss_disable(dev);
+ return;
+ }
+ /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
+ nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
+
+ /*
+ * RXPBSIZE
+ * split rx buffer up into sections, each for 1 traffic class
+ */
+ pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
+ for (i = 0 ; i < nb_tcs; i++) {
+ uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
+ rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
+ /* clear 10 bits. */
+ rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
+ }
+ /* zero alloc all unused TCs */
+ for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
+ rxpbsize &= (~( 0x3FF << IXGBE_RXPBSIZE_SHIFT ));
+ /* clear 10 bits. */
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
+ }
+
+ /* MRQC: enable vmdq and dcb */
+ mrqc = ((num_pools == ETH_16_POOLS) ? \
+ IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN );
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+
+ /* PFVTCTL: turn on virtualisation and set the default pool */
+ vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
+ if (cfg->enable_default_pool) {
+ vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
+ } else {
+ vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
+
+ /* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
+ queue_mapping = 0;
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ /*
+ * mapping is done with 3 bits per priority,
+ * so shift by i*3 each time
+ */
+ queue_mapping |= ((cfg->dcb_queue[i] & 0x07) << (i * 3));
+
+ IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
+
+ /* RTRPCS: DCB related */
+ IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, IXGBE_RMCS_RRM);
+
+ /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
+ vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
+
+ /* VFTA - enable all vlan filters */
+ for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
+ }
+
+ /* VFRE: pool enabling for receive - 16 or 32 */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), \
+ num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+
+ /*
+ * MPSAR - allow pools to read specific mac addresses
+ * In this case, all pools should be able to read from mac addr 0
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), 0xFFFFFFFF);
+
+ /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
+ for (i = 0; i < cfg->nb_pool_maps; i++) {
+ /* set vlan id in VF register and set the valid bit */
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
+ (cfg->pool_map[i].vlan_id & 0xFFF)));
+ /*
+ * Put the allowed pools in VFB reg. As we only have 16 or 32
+ * pools, we only need to use the first half of the register
+ * i.e. bits 0-31
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), cfg->pool_map[i].pools);
+ }
+}
+
+/**
+ * ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ */
+static void
+ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ uint32_t reg;
+ uint32_t q;
+
+ PMD_INIT_FUNC_TRACE();
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ /* Disable the Tx desc arbiter so that MTQC can be changed */
+ reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+ reg |= IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+ /* Enable DCB for Tx with 8 TCs */
+ if (dcb_config->num_tcs.pg_tcs == 8) {
+ reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
+ }
+ else {
+ reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
+ }
+ if (dcb_config->vt_mode)
+ reg |= IXGBE_MTQC_VT_ENA;
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
+
+ /* Disable drop for all queues */
+ for (q = 0; q < 128; q++)
+ IXGBE_WRITE_REG(hw, IXGBE_QDE,
+ (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
+
+ /* Enable the Tx desc arbiter */
+ reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+ reg &= ~IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+ /* Enable Security TX Buffer IFG for DCB */
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ reg |= IXGBE_SECTX_DCB;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
+ }
+ return;
+}
+
+/**
+ * ixgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
+ * @dev: pointer to rte_eth_dev structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ */
+static void
+ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
+ &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+ if (hw->mac.type != ixgbe_mac_82598EB)
+ /*PF VF Transmit Enable*/
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
+ vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+
+ /*Configure general DCB TX parameters*/
+ ixgbe_dcb_tx_hw_config(hw,dcb_config);
+ return;
+}
+
+static void
+ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
+ &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+ struct ixgbe_dcb_tc_config *tc;
+ uint8_t i,j;
+
+ /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
+ if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS ) {
+ dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
+ dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+ }
+ else {
+ dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
+ dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+ }
+ /* User Priority to Traffic Class mapping */
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ j = vmdq_rx_conf->dcb_queue[i];
+ tc = &dcb_config->tc_config[j];
+ tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
+ (uint8_t)(1 << j);
+ }
+}
+
+static void
+ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
+ &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
+ struct ixgbe_dcb_tc_config *tc;
+ uint8_t i,j;
+
+ /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
+ if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ) {
+ dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
+ dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+ }
+ else {
+ dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
+ dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+ }
+
+ /* User Priority to Traffic Class mapping */
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ j = vmdq_tx_conf->dcb_queue[i];
+ tc = &dcb_config->tc_config[j];
+ tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
+ (uint8_t)(1 << j);
+ }
+ return;
+}
+
+static void
+ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_dcb_rx_conf *rx_conf =
+ &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+ struct ixgbe_dcb_tc_config *tc;
+ uint8_t i,j;
+
+ dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
+ dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
+
+ /* User Priority to Traffic Class mapping */
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ j = rx_conf->dcb_queue[i];
+ tc = &dcb_config->tc_config[j];
+ tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
+ (uint8_t)(1 << j);
+ }
+}
+
+static void
+ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_dcb_tx_conf *tx_conf =
+ &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
+ struct ixgbe_dcb_tc_config *tc;
+ uint8_t i,j;
+
+ dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
+ dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
+
+ /* User Priority to Traffic Class mapping */
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ j = tx_conf->dcb_queue[i];
+ tc = &dcb_config->tc_config[j];
+ tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
+ (uint8_t)(1 << j);
+ }
+}
+
+/**
+ * ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ */
+static void
+ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ uint32_t reg;
+ uint32_t vlanctrl;
+ uint8_t i;
+
+ PMD_INIT_FUNC_TRACE();
+ /*
+ * Disable the arbiter before changing parameters
+ * (always enable recycle mode; WSP)
+ */
+ reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
+
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ if (dcb_config->num_tcs.pg_tcs == 4) {
+ if (dcb_config->vt_mode)
+ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+ IXGBE_MRQC_VMDQRT4TCEN;
+ else {
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
+ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+ IXGBE_MRQC_RT4TCEN;
+ }
+ }
+ if (dcb_config->num_tcs.pg_tcs == 8) {
+ if (dcb_config->vt_mode)
+ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+ IXGBE_MRQC_VMDQRT8TCEN;
+ else {
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
+ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+ IXGBE_MRQC_RT8TCEN;
+ }
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
+ }
+
+ /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
+ vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
+
+ /* VFTA - enable all vlan filters */
+ for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
+ }
+
+ /*
+ * Configure Rx packet plane (recycle mode; WSP) and
+ * enable arbiter
+ */
+ reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
+ IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
+
+ return;
+}
+
+static void
+ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
+ uint16_t *max,uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
+{
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
+ tsa, map);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max,
+ uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
+{
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,tsa);
+ ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,tsa);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,tsa);
+ ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,tsa, map);
+ break;
+ default:
+ break;
+ }
+}
+
+#define DCB_RX_CONFIG 1
+#define DCB_TX_CONFIG 1
+#define DCB_TX_PB 1024
+/**
+ * ixgbe_dcb_hw_configure - Enable DCB and configure
+ * general DCB in VT mode and non-VT mode parameters
+ * @dev: pointer to rte_eth_dev structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ */
+static int
+ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ int ret = 0;
+ uint8_t i,pfc_en,nb_tcs;
+ uint16_t pbsize;
+ uint8_t config_dcb_rx = 0;
+ uint8_t config_dcb_tx = 0;
+ uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
+ uint8_t bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
+ uint16_t refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
+ uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
+ uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
+ struct ixgbe_dcb_tc_config *tc;
+ uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ switch(dev->data->dev_conf.rxmode.mq_mode){
+ case ETH_MQ_RX_VMDQ_DCB:
+ dcb_config->vt_mode = true;
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ config_dcb_rx = DCB_RX_CONFIG;
+ /*
+ *get dcb and VT rx configuration parameters
+ *from rte_eth_conf
+ */
+ ixgbe_vmdq_dcb_rx_config(dev,dcb_config);
+ /*Configure general VMDQ and DCB RX parameters*/
+ ixgbe_vmdq_dcb_configure(dev);
+ }
+ break;
+ case ETH_MQ_RX_DCB:
+ dcb_config->vt_mode = false;
+ config_dcb_rx = DCB_RX_CONFIG;
+ /* Get dcb TX configuration parameters from rte_eth_conf */
+ ixgbe_dcb_rx_config(dev,dcb_config);
+ /*Configure general DCB RX parameters*/
+ ixgbe_dcb_rx_hw_config(hw, dcb_config);
+ break;
+ default:
+ PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
+ break;
+ }
+ switch (dev->data->dev_conf.txmode.mq_mode) {
+ case ETH_MQ_TX_VMDQ_DCB:
+ dcb_config->vt_mode = true;
+ config_dcb_tx = DCB_TX_CONFIG;
+ /* get DCB and VT TX configuration parameters from rte_eth_conf */
+ ixgbe_dcb_vt_tx_config(dev,dcb_config);
+ /*Configure general VMDQ and DCB TX parameters*/
+ ixgbe_vmdq_dcb_hw_tx_config(dev,dcb_config);
+ break;
+
+ case ETH_MQ_TX_DCB:
+ dcb_config->vt_mode = false;
+ config_dcb_tx = DCB_TX_CONFIG;
+ /*get DCB TX configuration parameters from rte_eth_conf*/
+ ixgbe_dcb_tx_config(dev,dcb_config);
+ /*Configure general DCB TX parameters*/
+ ixgbe_dcb_tx_hw_config(hw, dcb_config);
+ break;
+ default:
+ PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
+ break;
+ }
+
+ nb_tcs = dcb_config->num_tcs.pfc_tcs;
+ /* Unpack map */
+ ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
+ if(nb_tcs == ETH_4_TCS) {
+ /* Avoid un-configured priority mapping to TC0 */
+ uint8_t j = 4;
+ uint8_t mask = 0xFF;
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
+ mask = (uint8_t)(mask & (~ (1 << map[i])));
+ for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
+ if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
+ map[j++] = i;
+ mask >>= 1;
+ }
+ /* Re-configure 4 TCs BW */
+ for (i = 0; i < nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs);
+ tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs);
+ }
+ for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ tc = &dcb_config->tc_config[i];
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
+ tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
+ }
+ }
+
+ if(config_dcb_rx) {
+ /* Set RX buffer size */
+ pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
+ uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
+ for (i = 0 ; i < nb_tcs; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
+ }
+ /* zero alloc all unused TCs */
+ for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
+ }
+ }
+ if(config_dcb_tx) {
+ /* Only support an equally distributed Tx packet buffer strategy. */
+ uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
+ uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
+ for (i = 0; i < nb_tcs; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
+ }
+ /* Clear unused TCs, if any, to zero buffer size*/
+ for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
+ }
+ }
+
+ /*Calculates traffic class credits*/
+ ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
+ IXGBE_DCB_TX_CONFIG);
+ ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
+ IXGBE_DCB_RX_CONFIG);
+
+ if(config_dcb_rx) {
+ /* Unpack CEE standard containers */
+ ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, refill);
+ ixgbe_dcb_unpack_max_cee(dcb_config, max);
+ ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
+ ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
+ /* Configure PG(ETS) RX */
+ ixgbe_dcb_hw_arbite_rx_config(hw,refill,max,bwgid,tsa,map);
+ }
+
+ if(config_dcb_tx) {
+ /* Unpack CEE standard containers */
+ ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
+ ixgbe_dcb_unpack_max_cee(dcb_config, max);
+ ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
+ ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
+ /* Configure PG(ETS) TX */
+ ixgbe_dcb_hw_arbite_tx_config(hw,refill,max,bwgid,tsa,map);
+ }
+
+ /*Configure queue statistics registers*/
+ ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
+
+ /* Check if the PFC is supported */
+ if(dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+ pbsize = (uint16_t) (NIC_RX_BUFFER_SIZE / nb_tcs);
+ for (i = 0; i < nb_tcs; i++) {
+ /*
+ * If the TC count is 8,and the default high_water is 48,
+ * the low_water is 16 as default.
+ */
+ hw->fc.high_water[i] = (pbsize * 3 ) / 4;
+ hw->fc.low_water[i] = pbsize / 4;
+ /* Enable pfc for this TC */
+ tc = &dcb_config->tc_config[i];
+ tc->pfc = ixgbe_dcb_pfc_enabled;
+ }
+ ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
+ if(dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
+ pfc_en &= 0x0F;
+ ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
+ }
+
+ return ret;
+}
+
+/**
+ * ixgbe_configure_dcb - Configure DCB Hardware
+ * @dev: pointer to rte_eth_dev
+ */
+void ixgbe_configure_dcb(struct rte_eth_dev *dev)
+{
+ struct ixgbe_dcb_config *dcb_cfg =
+ IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+ struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* check support mq_mode for DCB */
+ if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
+ (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB))
+ return;
+
+ if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES)
+ return;
+
+ /** Configure DCB hardware **/
+ ixgbe_dcb_hw_configure(dev,dcb_cfg);
+
+ return;
+}
+
+/*
+ * VMDq only support for 10 GbE NIC.
+ */
+static void
+ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_vmdq_rx_conf *cfg;
+ struct ixgbe_hw *hw;
+ enum rte_eth_nb_pools num_pools;
+ uint32_t mrqc, vt_ctl, vlanctrl;
+ uint32_t vmolr = 0;
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
+ num_pools = cfg->nb_queue_pools;
+
+ ixgbe_rss_disable(dev);
+
+ /* MRQC: enable vmdq */
+ mrqc = IXGBE_MRQC_VMDQEN;
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+
+ /* PFVTCTL: turn on virtualisation and set the default pool */
+ vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
+ if (cfg->enable_default_pool)
+ vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
+ else
+ vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
+
+ for (i = 0; i < (int)num_pools; i++) {
+ vmolr = ixgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
+ }
+
+ /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
+ vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
+
+ /* VFTA - enable all vlan filters */
+ for (i = 0; i < NUM_VFTA_REGISTERS; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), UINT32_MAX);
+
+ /* VFRE: pool enabling for receive - 64 */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
+ if (num_pools == ETH_64_POOLS)
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
+
+ /*
+ * MPSAR - allow pools to read specific mac addresses
+ * In this case, all pools should be able to read from mac addr 0
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), UINT32_MAX);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), UINT32_MAX);
+
+ /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
+ for (i = 0; i < cfg->nb_pool_maps; i++) {
+ /* set vlan id in VF register and set the valid bit */
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
+ (cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK)));
+ /*
+ * Put the allowed pools in VFB reg. As we only have 16 or 64
+ * pools, we only need to use the first half of the register
+ * i.e. bits 0-31
+ */
+ if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), \
+ (cfg->pool_map[i].pools & UINT32_MAX));
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i*2+1)), \
+ ((cfg->pool_map[i].pools >> 32) \
+ & UINT32_MAX));
+
+ }
+
+ /* PFDMA Tx General Switch Control Enables VMDQ loopback */
+ if (cfg->enable_loop_back) {
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+ for (i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX);
+ }
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/*
+ * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters
+ * @hw: pointer to hardware structure
+ */
+static void
+ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
+{
+ uint32_t reg;
+ uint32_t q;
+
+ PMD_INIT_FUNC_TRACE();
+ /*PF VF Transmit Enable*/
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), UINT32_MAX);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), UINT32_MAX);
+
+ /* Disable the Tx desc arbiter so that MTQC can be changed */
+ reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+ reg |= IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+ reg = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
+
+ /* Disable drop for all queues */
+ for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
+ IXGBE_WRITE_REG(hw, IXGBE_QDE,
+ (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
+
+ /* Enable the Tx desc arbiter */
+ reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+ reg &= ~IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+ IXGBE_WRITE_FLUSH(hw);
+
+ return;
+}
+
+static int
+ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
+{
+ struct igb_rx_entry *rxe = rxq->sw_ring;
+ uint64_t dma_addr;
+ unsigned i;
+
+ /* Initialize software ring entries */
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ volatile union ixgbe_adv_rx_desc *rxd;
+ struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
+ if (mbuf == NULL) {
+ PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
+ (unsigned) rxq->queue_id);
+ return (-ENOMEM);
+ }
+
+ rte_mbuf_refcnt_set(mbuf, 1);
+ mbuf->next = NULL;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->port_id;
+
+ dma_addr =
+ rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
+ rxd = &rxq->rx_ring[i];
+ rxd->read.hdr_addr = dma_addr;
+ rxd->read.pkt_addr = dma_addr;
+ rxe[i].mbuf = mbuf;
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return 0;
+
+ if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+ /*
+ * SRIOV inactive scheme
+ * any DCB/RSS w/o VMDq multi-queue setting
+ */
+ switch (dev->data->dev_conf.rxmode.mq_mode) {
+ case ETH_MQ_RX_RSS:
+ ixgbe_rss_configure(dev);
+ break;
+
+ case ETH_MQ_RX_VMDQ_DCB:
+ ixgbe_vmdq_dcb_configure(dev);
+ break;
+
+ case ETH_MQ_RX_VMDQ_ONLY:
+ ixgbe_vmdq_rx_hw_configure(dev);
+ break;
+
+ case ETH_MQ_RX_NONE:
+ /* if mq_mode is none, disable rss mode.*/
+ default: ixgbe_rss_disable(dev);
+ }
+ } else {
+ switch (RTE_ETH_DEV_SRIOV(dev).active) {
+ /*
+ * SRIOV active scheme
+ * FIXME if support DCB/RSS together with VMDq & SRIOV
+ */
+ case ETH_64_POOLS:
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQEN);
+ break;
+
+ case ETH_32_POOLS:
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQRT4TCEN);
+ break;
+
+ case ETH_16_POOLS:
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQRT8TCEN);
+ break;
+ default:
+ PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
+ }
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t mtqc;
+ uint32_t rttdcs;
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return 0;
+
+ /* disable arbiter before setting MTQC */
+ rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+ rttdcs |= IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+
+ if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+ /*
+ * SRIOV inactive scheme
+ * any DCB w/o VMDq multi-queue setting
+ */
+ if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
+ ixgbe_vmdq_tx_hw_configure(hw);
+ else {
+ mtqc = IXGBE_MTQC_64Q_1PB;
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
+ }
+ } else {
+ switch (RTE_ETH_DEV_SRIOV(dev).active) {
+
+ /*
+ * SRIOV active scheme
+ * FIXME if support DCB together with VMDq & SRIOV
+ */
+ case ETH_64_POOLS:
+ mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
+ break;
+ case ETH_32_POOLS:
+ mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
+ break;
+ case ETH_16_POOLS:
+ mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
+ IXGBE_MTQC_8TC_8TQ;
+ break;
+ default:
+ mtqc = IXGBE_MTQC_64Q_1PB;
+ PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
+ }
+
+ /* re-enable arbiter */
+ rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+
+ return 0;
+}
+
+/*
+ * Initializes Receive Unit.
+ */
+int
+ixgbe_dev_rx_init(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw;
+ struct igb_rx_queue *rxq;
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ uint64_t bus_addr;
+ uint32_t rxctrl;
+ uint32_t fctrl;
+ uint32_t hlreg0;
+ uint32_t maxfrs;
+ uint32_t srrctl;
+ uint32_t rdrxctl;
+ uint32_t rxcsum;
+ uint16_t buf_size;
+ uint16_t i;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /*
+ * Make sure receives are disabled while setting
+ * up the RX context (registers, descriptor rings, etc.).
+ */
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
+
+ /* Enable receipt of broadcasted frames */
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl |= IXGBE_FCTRL_BAM;
+ fctrl |= IXGBE_FCTRL_DPF;
+ fctrl |= IXGBE_FCTRL_PMCF;
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+
+ /*
+ * Configure CRC stripping, if any.
+ */
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ if (dev->data->dev_conf.rxmode.hw_strip_crc)
+ hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
+ else
+ hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
+
+ /*
+ * Configure jumbo frame support, if any.
+ */
+ if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+ hlreg0 |= IXGBE_HLREG0_JUMBOEN;
+ maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
+ maxfrs &= 0x0000FFFF;
+ maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
+ IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
+ } else
+ hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
+
+ /*
+ * If loopback mode is configured for 82599, set LPBK bit.
+ */
+ if (hw->mac.type == ixgbe_mac_82599EB &&
+ dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
+ hlreg0 |= IXGBE_HLREG0_LPBK;
+ else
+ hlreg0 &= ~IXGBE_HLREG0_LPBK;
+
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+
+ /* Setup RX queues */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+
+ /*
+ * Reset crc_len in case it was changed after queue setup by a
+ * call to configure.
+ */
+ rxq->crc_len = (uint8_t)
+ ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
+ ETHER_CRC_LEN);
+
+ /* Setup the Base and Length of the Rx Descriptor Rings */
+ bus_addr = rxq->rx_ring_phys_addr;
+ IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rxq->reg_idx),
+ (uint32_t)(bus_addr & 0x00000000ffffffffULL));
+ IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rxq->reg_idx),
+ (uint32_t)(bus_addr >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rxq->reg_idx),
+ rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
+ IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
+
+ /* Configure the SRRCTL register */
+#ifdef RTE_HEADER_SPLIT_ENABLE
+ /*
+ * Configure Header Split
+ */
+ if (dev->data->dev_conf.rxmode.header_split) {
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ /* Must setup the PSRTYPE register */
+ uint32_t psrtype;
+ psrtype = IXGBE_PSRTYPE_TCPHDR |
+ IXGBE_PSRTYPE_UDPHDR |
+ IXGBE_PSRTYPE_IPV4HDR |
+ IXGBE_PSRTYPE_IPV6HDR;
+ IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
+ }
+ srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
+ IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+ IXGBE_SRRCTL_BSIZEHDR_MASK);
+ srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+ } else
+#endif
+ srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+ /* Set if packets are dropped when no descriptors available */
+ if (rxq->drop_en)
+ srrctl |= IXGBE_SRRCTL_DROP_EN;
+
+ /*
+ * Configure the RX buffer size in the BSIZEPACKET field of
+ * the SRRCTL register of the queue.
+ * The value is in 1 KB resolution. Valid values can be from
+ * 1 KB to 16 KB.
+ */
+ mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
+ buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
+ RTE_PKTMBUF_HEADROOM);
+ srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
+ IXGBE_SRRCTL_BSIZEPKT_MASK);
+ IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
+
+ buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT);
+
+ /* It adds dual VLAN length for supporting dual VLAN */
+ if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ 2 * IXGBE_VLAN_TAG_SIZE) > buf_size){
+ if (!dev->data->scattered_rx)
+ PMD_INIT_LOG(DEBUG, "forcing scatter mode");
+ dev->data->scattered_rx = 1;
+#ifdef RTE_IXGBE_INC_VECTOR
+ dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
+#else
+ dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+#endif
+ }
+ }
+
+ if (dev->data->dev_conf.rxmode.enable_scatter) {
+ if (!dev->data->scattered_rx)
+ PMD_INIT_LOG(DEBUG, "forcing scatter mode");
+#ifdef RTE_IXGBE_INC_VECTOR
+ dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
+#else
+ dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+#endif
+ dev->data->scattered_rx = 1;
+ }
+
+ /*
+ * Device configured with multiple RX queues.
+ */
+ ixgbe_dev_mq_rx_configure(dev);
+
+ /*
+ * Setup the Checksum Register.
+ * Disable Full-Packet Checksum which is mutually exclusive with RSS.
+ * Enable IP/L4 checkum computation by hardware if requested to do so.
+ */
+ rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
+ rxcsum |= IXGBE_RXCSUM_PCSD;
+ if (dev->data->dev_conf.rxmode.hw_ip_checksum)
+ rxcsum |= IXGBE_RXCSUM_IPPCSE;
+ else
+ rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
+
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+ if (dev->data->dev_conf.rxmode.hw_strip_crc)
+ rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
+ else
+ rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
+ rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
+ IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
+ }
+
+ return 0;
+}
+
+/*
+ * Initializes Transmit Unit.
+ */
+void
+ixgbe_dev_tx_init(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw;
+ struct igb_tx_queue *txq;
+ uint64_t bus_addr;
+ uint32_t hlreg0;
+ uint32_t txctrl;
+ uint16_t i;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Enable TX CRC (checksum offload requirement) and hw padding
+ * (TSO requirement) */
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+
+ /* Setup the Base and Length of the Tx Descriptor Rings */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+
+ bus_addr = txq->tx_ring_phys_addr;
+ IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx),
+ (uint32_t)(bus_addr & 0x00000000ffffffffULL));
+ IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx),
+ (uint32_t)(bus_addr >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_TDLEN(txq->reg_idx),
+ txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
+ /* Setup the HW Tx Head and TX Tail descriptor pointers */
+ IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
+
+ /*
+ * Disable Tx Head Writeback RO bit, since this hoses
+ * bookkeeping if things aren't delivered in order.
+ */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ txctrl = IXGBE_READ_REG(hw,
+ IXGBE_DCA_TXCTRL(txq->reg_idx));
+ txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
+ txctrl);
+ break;
+
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ default:
+ txctrl = IXGBE_READ_REG(hw,
+ IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
+ txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
+ txctrl);
+ break;
+ }
+ }
+
+ /* Device configured with multiple TX queues. */
+ ixgbe_dev_mq_tx_configure(dev);
+}
+
+/*
+ * Set up link for 82599 loopback mode Tx->Rx.
+ */
+static inline void
+ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
+ IXGBE_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Could not enable loopback mode");
+ /* ignore error */
+ return;
+ }
+ }
+
+ /* Restart link */
+ IXGBE_WRITE_REG(hw,
+ IXGBE_AUTOC,
+ IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU);
+ ixgbe_reset_pipeline_82599(hw);
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+ msec_delay(50);
+}
+
+
+/*
+ * Start Transmit and Receive Units.
+ */
+void
+ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw;
+ struct igb_tx_queue *txq;
+ struct igb_rx_queue *rxq;
+ uint32_t txdctl;
+ uint32_t dmatxctl;
+ uint32_t rxctrl;
+ uint16_t i;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ /* Setup Transmit Threshold Registers */
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
+ txdctl |= txq->pthresh & 0x7F;
+ txdctl |= ((txq->hthresh & 0x7F) << 8);
+ txdctl |= ((txq->wthresh & 0x7F) << 16);
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
+ }
+
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ dmatxctl |= IXGBE_DMATXCTL_TE;
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (!txq->tx_deferred_start)
+ ixgbe_dev_tx_queue_start(dev, i);
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (!rxq->rx_deferred_start)
+ ixgbe_dev_rx_queue_start(dev, i);
+ }
+
+ /* Enable Receive engine */
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ rxctrl |= IXGBE_RXCTRL_DMBYPS;
+ rxctrl |= IXGBE_RXCTRL_RXEN;
+ hw->mac.ops.enable_rx_dma(hw, rxctrl);
+
+ /* If loopback mode is enabled for 82599, set up the link accordingly */
+ if (hw->mac.type == ixgbe_mac_82599EB &&
+ dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
+ ixgbe_setup_loopback_link_82599(hw);
+
+}
+
+/*
+ * Start Receive Units for specified queue.
+ */
+int
+ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct ixgbe_hw *hw;
+ struct igb_rx_queue *rxq;
+ uint32_t rxdctl;
+ int poll_ms;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (rx_queue_id < dev->data->nb_rx_queues) {
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ /* Allocate buffers for descriptor rings */
+ if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
+ PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
+ rx_queue_id);
+ return -1;
+ }
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ rxdctl |= IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
+
+ /* Wait until RX Enable ready */
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d",
+ rx_queue_id);
+ rte_wmb();
+ IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
+ } else
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Stop Receive Units for specified queue.
+ */
+int
+ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct ixgbe_hw *hw;
+ struct igb_rx_queue *rxq;
+ uint32_t rxdctl;
+ int poll_ms;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (rx_queue_id < dev->data->nb_rx_queues) {
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
+
+ /* Wait until RX Enable ready */
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ } while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d",
+ rx_queue_id);
+
+ rte_delay_us(RTE_IXGBE_WAIT_100_US);
+
+ ixgbe_rx_queue_release_mbufs(rxq);
+ ixgbe_reset_rx_queue(rxq);
+ } else
+ return -1;
+
+ return 0;
+}
+
+
+/*
+ * Start Transmit Units for specified queue.
+ */
+int
+ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct ixgbe_hw *hw;
+ struct igb_tx_queue *txq;
+ uint32_t txdctl;
+ int poll_ms;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (tx_queue_id < dev->data->nb_tx_queues) {
+ txq = dev->data->tx_queues[tx_queue_id];
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
+ txdctl |= IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
+
+ /* Wait until TX Enable ready */
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ txdctl = IXGBE_READ_REG(hw,
+ IXGBE_TXDCTL(txq->reg_idx));
+ } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not enable "
+ "Tx Queue %d", tx_queue_id);
+ }
+ rte_wmb();
+ IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
+ } else
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Stop Transmit Units for specified queue.
+ */
+int
+ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct ixgbe_hw *hw;
+ struct igb_tx_queue *txq;
+ uint32_t txdctl;
+ uint32_t txtdh, txtdt;
+ int poll_ms;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (tx_queue_id < dev->data->nb_tx_queues) {
+ txq = dev->data->tx_queues[tx_queue_id];
+
+ /* Wait until TX queue is empty */
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_us(RTE_IXGBE_WAIT_100_US);
+ txtdh = IXGBE_READ_REG(hw,
+ IXGBE_TDH(txq->reg_idx));
+ txtdt = IXGBE_READ_REG(hw,
+ IXGBE_TDT(txq->reg_idx));
+ } while (--poll_ms && (txtdh != txtdt));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Tx Queue %d is not empty "
+ "when stopping.", tx_queue_id);
+ }
+
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
+ txdctl &= ~IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
+
+ /* Wait until TX Enable ready */
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ txdctl = IXGBE_READ_REG(hw,
+ IXGBE_TXDCTL(txq->reg_idx));
+ } while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not disable "
+ "Tx Queue %d", tx_queue_id);
+ }
+
+ if (txq->ops != NULL) {
+ txq->ops->release_mbufs(txq);
+ txq->ops->reset(txq);
+ }
+ } else
+ return -1;
+
+ return 0;
+}
+
+/*
+ * [VF] Initializes Receive Unit.
+ */
+int
+ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw;
+ struct igb_rx_queue *rxq;
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ uint64_t bus_addr;
+ uint32_t srrctl;
+ uint16_t buf_size;
+ uint16_t i;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /*
+ * When the VF driver issues a IXGBE_VF_RESET request, the PF driver
+ * disables the VF receipt of packets if the PF MTU is > 1500.
+ * This is done to deal with 82599 limitations that imposes
+ * the PF and all VFs to share the same MTU.
+ * Then, the PF driver enables again the VF receipt of packet when
+ * the VF driver issues a IXGBE_VF_SET_LPE request.
+ * In the meantime, the VF device cannot be used, even if the VF driver
+ * and the Guest VM network stack are ready to accept packets with a
+ * size up to the PF MTU.
+ * As a work-around to this PF behaviour, force the call to
+ * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
+ * VF packets received can work in all cases.
+ */
+ ixgbevf_rlpml_set_vf(hw,
+ (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
+
+ /* Setup RX queues */
+ dev->rx_pkt_burst = ixgbe_recv_pkts;
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+
+ /* Allocate buffers for descriptor rings */
+ ret = ixgbe_alloc_rx_queue_mbufs(rxq);
+ if (ret)
+ return ret;
+
+ /* Setup the Base and Length of the Rx Descriptor Rings */
+ bus_addr = rxq->rx_ring_phys_addr;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
+ (uint32_t)(bus_addr & 0x00000000ffffffffULL));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
+ (uint32_t)(bus_addr >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
+ rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
+
+
+ /* Configure the SRRCTL register */
+#ifdef RTE_HEADER_SPLIT_ENABLE
+ /*
+ * Configure Header Split
+ */
+ if (dev->data->dev_conf.rxmode.header_split) {
+
+ /* Must setup the PSRTYPE register */
+ uint32_t psrtype;
+ psrtype = IXGBE_PSRTYPE_TCPHDR |
+ IXGBE_PSRTYPE_UDPHDR |
+ IXGBE_PSRTYPE_IPV4HDR |
+ IXGBE_PSRTYPE_IPV6HDR;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE(i), psrtype);
+
+ srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
+ IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+ IXGBE_SRRCTL_BSIZEHDR_MASK);
+ srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+ } else
+#endif
+ srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+ /* Set if packets are dropped when no descriptors available */
+ if (rxq->drop_en)
+ srrctl |= IXGBE_SRRCTL_DROP_EN;
+
+ /*
+ * Configure the RX buffer size in the BSIZEPACKET field of
+ * the SRRCTL register of the queue.
+ * The value is in 1 KB resolution. Valid values can be from
+ * 1 KB to 16 KB.
+ */
+ mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
+ buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
+ RTE_PKTMBUF_HEADROOM);
+ srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
+ IXGBE_SRRCTL_BSIZEPKT_MASK);
+
+ /*
+ * VF modification to write virtual function SRRCTL register
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl);
+
+ buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT);
+
+ /* It adds dual VLAN length for supporting dual VLAN */
+ if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
+ if (!dev->data->scattered_rx)
+ PMD_INIT_LOG(DEBUG, "forcing scatter mode");
+ dev->data->scattered_rx = 1;
+#ifdef RTE_IXGBE_INC_VECTOR
+ dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
+#else
+ dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+#endif
+ }
+ }
+
+ if (dev->data->dev_conf.rxmode.enable_scatter) {
+ if (!dev->data->scattered_rx)
+ PMD_INIT_LOG(DEBUG, "forcing scatter mode");
+#ifdef RTE_IXGBE_INC_VECTOR
+ dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
+#else
+ dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+#endif
+ dev->data->scattered_rx = 1;
+ }
+
+ return 0;
+}
+
+/*
+ * [VF] Initializes Transmit Unit.
+ */
+void
+ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw;
+ struct igb_tx_queue *txq;
+ uint64_t bus_addr;
+ uint32_t txctrl;
+ uint16_t i;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Setup the Base and Length of the Tx Descriptor Rings */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ bus_addr = txq->tx_ring_phys_addr;
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
+ (uint32_t)(bus_addr & 0x00000000ffffffffULL));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i),
+ (uint32_t)(bus_addr >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
+ txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
+ /* Setup the HW Tx Head and TX Tail descriptor pointers */
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
+
+ /*
+ * Disable Tx Head Writeback RO bit, since this hoses
+ * bookkeeping if things aren't delivered in order.
+ */
+ txctrl = IXGBE_READ_REG(hw,
+ IXGBE_VFDCA_TXCTRL(i));
+ txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i),
+ txctrl);
+ }
+}
+
+/*
+ * [VF] Start Transmit and Receive Units.
+ */
+void
+ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw;
+ struct igb_tx_queue *txq;
+ struct igb_rx_queue *rxq;
+ uint32_t txdctl;
+ uint32_t rxdctl;
+ uint16_t i;
+ int poll_ms;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ /* Setup Transmit Threshold Registers */
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+ txdctl |= txq->pthresh & 0x7F;
+ txdctl |= ((txq->hthresh & 0x7F) << 8);
+ txdctl |= ((txq->wthresh & 0x7F) << 16);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+ txdctl |= IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
+
+ poll_ms = 10;
+ /* Wait until TX Enable ready */
+ do {
+ rte_delay_ms(1);
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+ } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
+ }
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+
+ rxq = dev->data->rx_queues[i];
+
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+ rxdctl |= IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
+
+ /* Wait until RX Enable ready */
+ poll_ms = 10;
+ do {
+ rte_delay_ms(1);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+ } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
+ rte_wmb();
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);
+
+ }
+}
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_rxtx.h b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_rxtx.h
new file mode 100755
index 00000000..329007cb
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_rxtx.h
@@ -0,0 +1,270 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _IXGBE_RXTX_H_
+#define _IXGBE_RXTX_H_
+
+
+#define RTE_PMD_IXGBE_TX_MAX_BURST 32
+
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+#define RTE_PMD_IXGBE_RX_MAX_BURST 32
+#define RTE_IXGBE_DESCS_PER_LOOP 4
+#elif defined(RTE_IXGBE_INC_VECTOR)
+#define RTE_IXGBE_DESCS_PER_LOOP 4
+#else
+#define RTE_IXGBE_DESCS_PER_LOOP 1
+#endif
+
+#define RTE_MBUF_DATA_DMA_ADDR(mb) \
+ (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
+
+#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
+ (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
+
+#ifdef RTE_IXGBE_INC_VECTOR
+#define RTE_IXGBE_VPMD_RX_BURST 32
+#define RTE_IXGBE_VPMD_TX_BURST 32
+#define RTE_IXGBE_RXQ_REARM_THRESH RTE_IXGBE_VPMD_RX_BURST
+#define RTE_IXGBE_TX_MAX_FREE_BUF_SZ 64
+#endif
+
+#define RX_RING_SZ ((IXGBE_MAX_RING_DESC + RTE_IXGBE_DESCS_PER_LOOP - 1) * \
+ sizeof(union ixgbe_adv_rx_desc))
+
+#ifdef RTE_PMD_PACKET_PREFETCH
+#define rte_packet_prefetch(p) rte_prefetch1(p)
+#else
+#define rte_packet_prefetch(p) do {} while(0)
+#endif
+
+#define RTE_IXGBE_REGISTER_POLL_WAIT_10_MS 10
+#define RTE_IXGBE_WAIT_100_US 100
+#define RTE_IXGBE_VMTXSW_REGISTER_COUNT 2
+
+/**
+ * Structure associated with each descriptor of the RX ring of a RX queue.
+ */
+struct igb_rx_entry {
+ struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
+};
+
+/**
+ * Structure associated with each descriptor of the TX ring of a TX queue.
+ */
+struct igb_tx_entry {
+ struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
+ uint16_t next_id; /**< Index of next descriptor in ring. */
+ uint16_t last_id; /**< Index of last scattered descriptor. */
+};
+
+/**
+ * Structure associated with each descriptor of the TX ring of a TX queue.
+ */
+struct igb_tx_entry_v {
+ struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
+};
+
+/**
+ * Structure associated with each RX queue.
+ */
+struct igb_rx_queue {
+ struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
+ volatile union ixgbe_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
+ uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
+ volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
+ volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
+ struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
+ struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
+ struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
+ uint64_t mbuf_initializer; /**< value to init mbufs */
+ uint16_t nb_rx_desc; /**< number of RX descriptors. */
+ uint16_t rx_tail; /**< current value of RDT register. */
+ uint16_t nb_rx_hold; /**< number of held free RX desc. */
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+ uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */
+ uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
+ uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
+#endif
+#ifdef RTE_IXGBE_INC_VECTOR
+ uint16_t rxrearm_nb; /**< the idx we start the re-arming from */
+ uint16_t rxrearm_start; /**< number of remaining to be re-armed */
+#endif
+ uint16_t rx_free_thresh; /**< max free RX desc to hold. */
+ uint16_t queue_id; /**< RX queue index. */
+ uint16_t reg_idx; /**< RX queue register index. */
+ uint8_t port_id; /**< Device port identifier. */
+ uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
+ uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
+ uint8_t rx_deferred_start; /**< not in global dev start. */
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+ /** need to alloc dummy mbuf, for wraparound when scanning hw ring */
+ struct rte_mbuf fake_mbuf;
+ /** hold packets to return to application */
+ struct rte_mbuf *rx_stage[RTE_PMD_IXGBE_RX_MAX_BURST*2];
+#endif
+};
+
+/**
+ * IXGBE CTX Constants
+ */
+enum ixgbe_advctx_num {
+ IXGBE_CTX_0 = 0, /**< CTX0 */
+ IXGBE_CTX_1 = 1, /**< CTX1 */
+ IXGBE_CTX_NUM = 2, /**< CTX NUMBER */
+};
+
+/** Offload features */
+union ixgbe_tx_offload {
+ uint64_t data;
+ struct {
+ uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
+ uint64_t l3_len:9; /**< L3 (IP) Header Length. */
+ uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
+ uint64_t tso_segsz:16; /**< TCP TSO segment size */
+ uint64_t vlan_tci:16;
+ /**< VLAN Tag Control Identifier (CPU order). */
+ };
+};
+
+/*
+ * Compare mask for vlan_macip_len.data,
+ * should be in sync with ixgbe_vlan_macip.f layout.
+ * */
+#define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
+#define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
+#define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
+/** MAC+IP length. */
+#define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
+
+/**
+ * Structure to check if new context need be built
+ */
+
+struct ixgbe_advctx_info {
+ uint64_t flags; /**< ol_flags for context build. */
+ /**< tx offload: vlan, tso, l2-l3-l4 lengths. */
+ union ixgbe_tx_offload tx_offload;
+ /** compare mask for tx offload. */
+ union ixgbe_tx_offload tx_offload_mask;
+};
+
+/**
+ * Structure associated with each TX queue.
+ */
+struct igb_tx_queue {
+ /** TX ring virtual address. */
+ volatile union ixgbe_adv_tx_desc *tx_ring;
+ uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
+ struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
+ volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
+ uint16_t nb_tx_desc; /**< number of TX descriptors. */
+ uint16_t tx_tail; /**< current value of TDT reg. */
+ uint16_t tx_free_thresh;/**< minimum TX before freeing. */
+ /** Number of TX descriptors to use before RS bit is set. */
+ uint16_t tx_rs_thresh;
+ /** Number of TX descriptors used since RS bit was set. */
+ uint16_t nb_tx_used;
+ /** Index to last TX descriptor to have been cleaned. */
+ uint16_t last_desc_cleaned;
+ /** Total number of TX descriptors ready to be allocated. */
+ uint16_t nb_tx_free;
+ uint16_t tx_next_dd; /**< next desc to scan for DD bit */
+ uint16_t tx_next_rs; /**< next desc to set RS bit */
+ uint16_t queue_id; /**< TX queue index. */
+ uint16_t reg_idx; /**< TX queue register index. */
+ uint8_t port_id; /**< Device port identifier. */
+ uint8_t pthresh; /**< Prefetch threshold register. */
+ uint8_t hthresh; /**< Host threshold register. */
+ uint8_t wthresh; /**< Write-back threshold reg. */
+ uint32_t txq_flags; /**< Holds flags for this TXq */
+ uint32_t ctx_curr; /**< Hardware context states. */
+ /** Hardware context0 history. */
+ struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
+ struct ixgbe_txq_ops *ops; /**< txq ops */
+ uint8_t tx_deferred_start; /**< not in global dev start. */
+};
+
+struct ixgbe_txq_ops {
+ void (*release_mbufs)(struct igb_tx_queue *txq);
+ void (*free_swring)(struct igb_tx_queue *txq);
+ void (*reset)(struct igb_tx_queue *txq);
+};
+
+/*
+ * The "simple" TX queue functions require that the following
+ * flags are set when the TX queue is configured:
+ * - ETH_TXQ_FLAGS_NOMULTSEGS
+ * - ETH_TXQ_FLAGS_NOVLANOFFL
+ * - ETH_TXQ_FLAGS_NOXSUMSCTP
+ * - ETH_TXQ_FLAGS_NOXSUMUDP
+ * - ETH_TXQ_FLAGS_NOXSUMTCP
+ * and that the RS bit threshold (tx_rs_thresh) is at least equal to
+ * RTE_PMD_IXGBE_TX_MAX_BURST.
+ */
+#define IXGBE_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
+ ETH_TXQ_FLAGS_NOOFFLOADS)
+
+/*
+ * Populate descriptors with the following info:
+ * 1.) buffer_addr = phys_addr + headroom
+ * 2.) cmd_type_len = DCMD_DTYP_FLAGS | pkt_len
+ * 3.) olinfo_status = pkt_len << PAYLEN_SHIFT
+ */
+
+/* Defines for Tx descriptor */
+#define DCMD_DTYP_FLAGS (IXGBE_ADVTXD_DTYP_DATA |\
+ IXGBE_ADVTXD_DCMD_IFCS |\
+ IXGBE_ADVTXD_DCMD_DEXT |\
+ IXGBE_ADVTXD_DCMD_EOP)
+
+
+/* Takes an ethdev and a queue and sets up the tx function to be used based on
+ * the queue parameters. Used in tx_queue_setup by primary process and then
+ * in dev_init by secondary process when attaching to an existing ethdev.
+ */
+void set_tx_function(struct rte_eth_dev *dev, struct igb_tx_queue *txq);
+
+#ifdef RTE_IXGBE_INC_VECTOR
+uint16_t ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t ixgbe_recv_scattered_pkts_vec(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+int ixgbe_txq_vec_setup(struct igb_tx_queue *txq);
+int ixgbe_rxq_vec_setup(struct igb_rx_queue *rxq);
+int ixgbe_rx_vec_condition_check(struct rte_eth_dev *dev);
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_rxtx_vec.c b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_rxtx_vec.c
new file mode 100755
index 00000000..b54cb191
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ixgbe/ixgbe_rxtx_vec.c
@@ -0,0 +1,802 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+
+#include "ixgbe_ethdev.h"
+#include "ixgbe_rxtx.h"
+
+#include <tmmintrin.h>
+
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+static inline void
+ixgbe_rxq_rearm(struct igb_rx_queue *rxq)
+{
+ int i;
+ uint16_t rx_id;
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ struct igb_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
+ struct rte_mbuf *mb0, *mb1;
+ __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
+ RTE_PKTMBUF_HEADROOM);
+ __m128i dma_addr0, dma_addr1;
+
+ rxdp = rxq->rx_ring + rxq->rxrearm_start;
+
+ /* Pull 'n' more MBUFs into the software ring */
+ if (rte_mempool_get_bulk(rxq->mb_pool,
+ (void *)rxep,
+ RTE_IXGBE_RXQ_REARM_THRESH) < 0) {
+ if (rxq->rxrearm_nb + RTE_IXGBE_RXQ_REARM_THRESH >=
+ rxq->nb_rx_desc) {
+ dma_addr0 = _mm_setzero_si128();
+ for (i = 0; i < RTE_IXGBE_DESCS_PER_LOOP; i++) {
+ rxep[i].mbuf = &rxq->fake_mbuf;
+ _mm_store_si128((__m128i *)&rxdp[i].read,
+ dma_addr0);
+ }
+ }
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ RTE_IXGBE_RXQ_REARM_THRESH;
+ return;
+ }
+
+ /* Initialize the mbufs in vector, process 2 mbufs in one loop */
+ for (i = 0; i < RTE_IXGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) {
+ __m128i vaddr0, vaddr1;
+ uintptr_t p0, p1;
+
+ mb0 = rxep[0].mbuf;
+ mb1 = rxep[1].mbuf;
+
+ /*
+ * Flush mbuf with pkt template.
+ * Data to be rearmed is 6 bytes long.
+ * Though, RX will overwrite ol_flags that are coming next
+ * anyway. So overwrite whole 8 bytes with one load:
+ * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
+ */
+ p0 = (uintptr_t)&mb0->rearm_data;
+ *(uint64_t *)p0 = rxq->mbuf_initializer;
+ p1 = (uintptr_t)&mb1->rearm_data;
+ *(uint64_t *)p1 = rxq->mbuf_initializer;
+
+ /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+ vaddr0 = _mm_loadu_si128((__m128i *)&(mb0->buf_addr));
+ vaddr1 = _mm_loadu_si128((__m128i *)&(mb1->buf_addr));
+
+ /* convert pa to dma_addr hdr/data */
+ dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
+ dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
+
+ /* add headroom to pa values */
+ dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
+ dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
+
+ /* flush desc with pa dma_addr */
+ _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
+ _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
+ }
+
+ rxq->rxrearm_start += RTE_IXGBE_RXQ_REARM_THRESH;
+ if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= RTE_IXGBE_RXQ_REARM_THRESH;
+
+ rx_id = (uint16_t) ((rxq->rxrearm_start == 0) ?
+ (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+ /* Update the tail pointer on the NIC */
+ IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+}
+
+/* Handling the offload flags (olflags) field takes computation
+ * time when receiving packets. Therefore we provide a flag to disable
+ * the processing of the olflags field when they are not needed. This
+ * gives improved performance, at the cost of losing the offload info
+ * in the received packet
+ */
+#ifdef RTE_IXGBE_RX_OLFLAGS_ENABLE
+
+#define OLFLAGS_MASK ((uint16_t)(PKT_RX_VLAN_PKT | PKT_RX_IPV4_HDR |\
+ PKT_RX_IPV4_HDR_EXT | PKT_RX_IPV6_HDR |\
+ PKT_RX_IPV6_HDR_EXT))
+#define OLFLAGS_MASK_V (((uint64_t)OLFLAGS_MASK << 48) | \
+ ((uint64_t)OLFLAGS_MASK << 32) | \
+ ((uint64_t)OLFLAGS_MASK << 16) | \
+ ((uint64_t)OLFLAGS_MASK))
+#define PTYPE_SHIFT (1)
+#define VTAG_SHIFT (3)
+
+static inline void
+desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
+{
+ __m128i ptype0, ptype1, vtag0, vtag1;
+ union {
+ uint16_t e[4];
+ uint64_t dword;
+ } vol;
+
+ ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
+ ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
+ vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]);
+ vtag1 = _mm_unpackhi_epi16(descs[2], descs[3]);
+
+ ptype1 = _mm_unpacklo_epi32(ptype0, ptype1);
+ vtag1 = _mm_unpacklo_epi32(vtag0, vtag1);
+
+ ptype1 = _mm_slli_epi16(ptype1, PTYPE_SHIFT);
+ vtag1 = _mm_srli_epi16(vtag1, VTAG_SHIFT);
+
+ ptype1 = _mm_or_si128(ptype1, vtag1);
+ vol.dword = _mm_cvtsi128_si64(ptype1) & OLFLAGS_MASK_V;
+
+ rx_pkts[0]->ol_flags = vol.e[0];
+ rx_pkts[1]->ol_flags = vol.e[1];
+ rx_pkts[2]->ol_flags = vol.e[2];
+ rx_pkts[3]->ol_flags = vol.e[3];
+}
+#else
+#define desc_to_olflags_v(desc, rx_pkts) do {} while (0)
+#endif
+
+/*
+ * vPMD receive routine, now only accept (nb_pkts == RTE_IXGBE_VPMD_RX_BURST)
+ * in one loop
+ *
+ * Notice:
+ * - nb_pkts < RTE_IXGBE_VPMD_RX_BURST, just return no packet
+ * - nb_pkts > RTE_IXGBE_VPMD_RX_BURST, only scan RTE_IXGBE_VPMD_RX_BURST
+ * numbers of DD bit
+ * - don't support ol_flags for rss and csum err
+ */
+static inline uint16_t
+_recv_raw_pkts_vec(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
+{
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ struct igb_rx_entry *sw_ring;
+ uint16_t nb_pkts_recd;
+ int pos;
+ uint64_t var;
+ __m128i shuf_msk;
+ __m128i crc_adjust = _mm_set_epi16(
+ 0, 0, 0, 0, /* ignore non-length fields */
+ 0, /* ignore high-16bits of pkt_len */
+ -rxq->crc_len, /* sub crc on pkt_len */
+ -rxq->crc_len, /* sub crc on data_len */
+ 0 /* ignore pkt_type field */
+ );
+ __m128i dd_check, eop_check;
+
+ if (unlikely(nb_pkts < RTE_IXGBE_VPMD_RX_BURST))
+ return 0;
+
+ /* Just the act of getting into the function from the application is
+ * going to cost about 7 cycles */
+ rxdp = rxq->rx_ring + rxq->rx_tail;
+
+ _mm_prefetch((const void *)rxdp, _MM_HINT_T0);
+
+ /* See if we need to rearm the RX queue - gives the prefetch a bit
+ * of time to act */
+ if (rxq->rxrearm_nb > RTE_IXGBE_RXQ_REARM_THRESH)
+ ixgbe_rxq_rearm(rxq);
+
+ /* Before we start moving massive data around, check to see if
+ * there is actually a packet available */
+ if (!(rxdp->wb.upper.status_error &
+ rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
+ return 0;
+
+ /* 4 packets DD mask */
+ dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
+
+ /* 4 packets EOP mask */
+ eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
+
+ /* mask to shuffle from desc. to mbuf */
+ shuf_msk = _mm_set_epi8(
+ 7, 6, 5, 4, /* octet 4~7, 32bits rss */
+ 0xFF, 0xFF, /* skip high 16 bits vlan_macip, zero out */
+ 15, 14, /* octet 14~15, low 16 bits vlan_macip */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 13, 12, /* octet 12~13, low 16 bits pkt_len */
+ 13, 12, /* octet 12~13, 16 bits data_len */
+ 0xFF, 0xFF /* skip pkt_type field */
+ );
+
+ /* Cache is empty -> need to scan the buffer rings, but first move
+ * the next 'n' mbufs into the cache */
+ sw_ring = &rxq->sw_ring[rxq->rx_tail];
+
+ /*
+ * A. load 4 packet in one loop
+ * B. copy 4 mbuf point from swring to rx_pkts
+ * C. calc the number of DD bits among the 4 packets
+ * [C*. extract the end-of-packet bit, if requested]
+ * D. fill info. from desc to mbuf
+ */
+ for (pos = 0, nb_pkts_recd = 0; pos < RTE_IXGBE_VPMD_RX_BURST;
+ pos += RTE_IXGBE_DESCS_PER_LOOP,
+ rxdp += RTE_IXGBE_DESCS_PER_LOOP) {
+ __m128i descs[RTE_IXGBE_DESCS_PER_LOOP];
+ __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+ __m128i zero, staterr, sterr_tmp1, sterr_tmp2;
+ __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
+
+ if (split_packet) {
+ rte_prefetch0(&rx_pkts[pos]->cacheline1);
+ rte_prefetch0(&rx_pkts[pos + 1]->cacheline1);
+ rte_prefetch0(&rx_pkts[pos + 2]->cacheline1);
+ rte_prefetch0(&rx_pkts[pos + 3]->cacheline1);
+ }
+
+ /* B.1 load 1 mbuf point */
+ mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
+
+ /* Read desc statuses backwards to avoid race condition */
+ /* A.1 load 4 pkts desc */
+ descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
+
+ /* B.1 load 1 mbuf point */
+ mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
+
+ descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
+ /* B.1 load 2 mbuf point */
+ descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
+ descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+
+ /* avoid compiler reorder optimization */
+ rte_compiler_barrier();
+
+ /* D.1 pkt 3,4 convert format from desc to pktmbuf */
+ pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
+ pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
+
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
+
+ /* set ol_flags with packet type and vlan tag */
+ desc_to_olflags_v(descs, &rx_pkts[pos]);
+
+ /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
+ pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
+ pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
+
+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */
+ pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
+ pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
+
+ /* C.2 get 4 pkts staterr value */
+ zero = _mm_xor_si128(dd_check, dd_check);
+ staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
+
+ /* D.3 copy final 3,4 data to rx_pkts */
+ _mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1,
+ pkt_mb4);
+ _mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1,
+ pkt_mb3);
+
+ /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
+ pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
+ pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
+
+ /* C* extract and record EOP bit */
+ if (split_packet) {
+ __m128i eop_shuf_mask = _mm_set_epi8(
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x04, 0x0C, 0x00, 0x08
+ );
+
+ /* and with mask to extract bits, flipping 1-0 */
+ __m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
+ /* the staterr values are not in order, as the count
+ * count of dd bits doesn't care. However, for end of
+ * packet tracking, we do care, so shuffle. This also
+ * compresses the 32-bit values to 8-bit */
+ eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
+ /* store the resulting 32-bit value */
+ *(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
+ split_packet += RTE_IXGBE_DESCS_PER_LOOP;
+
+ /* zero-out next pointers */
+ rx_pkts[pos]->next = NULL;
+ rx_pkts[pos + 1]->next = NULL;
+ rx_pkts[pos + 2]->next = NULL;
+ rx_pkts[pos + 3]->next = NULL;
+ }
+
+ /* C.3 calc available number of desc */
+ staterr = _mm_and_si128(staterr, dd_check);
+ staterr = _mm_packs_epi32(staterr, zero);
+
+ /* D.3 copy final 1,2 data to rx_pkts */
+ _mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1,
+ pkt_mb2);
+ _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
+ pkt_mb1);
+
+ /* C.4 calc avaialbe number of desc */
+ var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
+ nb_pkts_recd += var;
+ if (likely(var != RTE_IXGBE_DESCS_PER_LOOP))
+ break;
+ }
+
+ /* Update our internal tail pointer */
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
+ rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
+
+ return nb_pkts_recd;
+}
+
+/*
+ * vPMD receive routine, now only accept (nb_pkts == RTE_IXGBE_VPMD_RX_BURST)
+ * in one loop
+ *
+ * Notice:
+ * - nb_pkts < RTE_IXGBE_VPMD_RX_BURST, just return no packet
+ * - nb_pkts > RTE_IXGBE_VPMD_RX_BURST, only scan RTE_IXGBE_VPMD_RX_BURST
+ * numbers of DD bit
+ * - don't support ol_flags for rss and csum err
+ */
+uint16_t
+ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+static inline uint16_t
+reassemble_packets(struct igb_rx_queue *rxq, struct rte_mbuf **rx_bufs,
+ uint16_t nb_bufs, uint8_t *split_flags)
+{
+ struct rte_mbuf *pkts[RTE_IXGBE_VPMD_RX_BURST]; /*finished pkts*/
+ struct rte_mbuf *start = rxq->pkt_first_seg;
+ struct rte_mbuf *end = rxq->pkt_last_seg;
+ unsigned pkt_idx = 0, buf_idx = 0;
+
+
+ while (buf_idx < nb_bufs) {
+ if (end != NULL) {
+ /* processing a split packet */
+ end->next = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+
+ start->nb_segs++;
+ start->pkt_len += rx_bufs[buf_idx]->data_len;
+ end = end->next;
+
+ if (!split_flags[buf_idx]) {
+ /* it's the last packet of the set */
+ start->hash = end->hash;
+ start->ol_flags = end->ol_flags;
+ /* we need to strip crc for the whole packet */
+ start->pkt_len -= rxq->crc_len;
+ if (end->data_len > rxq->crc_len)
+ end->data_len -= rxq->crc_len;
+ else {
+ /* free up last mbuf */
+ struct rte_mbuf *secondlast = start;
+ while (secondlast->next != end)
+ secondlast = secondlast->next;
+ secondlast->data_len -= (rxq->crc_len -
+ end->data_len);
+ secondlast->next = NULL;
+ rte_pktmbuf_free_seg(end);
+ end = secondlast;
+ }
+ pkts[pkt_idx++] = start;
+ start = end = NULL;
+ }
+ } else {
+ /* not processing a split packet */
+ if (!split_flags[buf_idx]) {
+ /* not a split packet, save and skip */
+ pkts[pkt_idx++] = rx_bufs[buf_idx];
+ continue;
+ }
+ end = start = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+ rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
+ }
+ buf_idx++;
+ }
+
+ /* save the partial packet for next time */
+ rxq->pkt_first_seg = start;
+ rxq->pkt_last_seg = end;
+ memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
+ return pkt_idx;
+}
+
+/*
+ * vPMD receive routine that reassembles scattered packets
+ *
+ * Notice:
+ * - don't support ol_flags for rss and csum err
+ * - now only accept (nb_pkts == RTE_IXGBE_VPMD_RX_BURST)
+ */
+uint16_t
+ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct igb_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[RTE_IXGBE_VPMD_RX_BURST] = {0};
+
+ /* get some new buffers */
+ uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
+ split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint32_t *split_fl32 = (uint32_t *)split_flags;
+ if (rxq->pkt_first_seg == NULL &&
+ split_fl32[0] == 0 && split_fl32[1] == 0 &&
+ split_fl32[2] == 0 && split_fl32[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ unsigned i = 0;
+ if (rxq->pkt_first_seg == NULL) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ }
+ return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
+static inline void
+vtx1(volatile union ixgbe_adv_tx_desc *txdp,
+ struct rte_mbuf *pkt, uint64_t flags)
+{
+ __m128i descriptor = _mm_set_epi64x((uint64_t)pkt->pkt_len << 46 |
+ flags | pkt->data_len,
+ pkt->buf_physaddr + pkt->data_off);
+ _mm_store_si128((__m128i *)&txdp->read, descriptor);
+}
+
+static inline void
+vtx(volatile union ixgbe_adv_tx_desc *txdp,
+ struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
+{
+ int i;
+ for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+ vtx1(txdp, *pkt, flags);
+}
+
+static inline int __attribute__((always_inline))
+ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
+{
+ struct igb_tx_entry_v *txep;
+ uint32_t status;
+ uint32_t n;
+ uint32_t i;
+ int nb_free = 0;
+ struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
+
+ /* check DD bit on threshold descriptor */
+ status = txq->tx_ring[txq->tx_next_dd].wb.status;
+ if (!(status & IXGBE_ADVTXD_STAT_DD))
+ return 0;
+
+ n = txq->tx_rs_thresh;
+
+ /*
+ * first buffer to free from S/W ring is at index
+ * tx_next_dd - (tx_rs_thresh-1)
+ */
+ txep = &((struct igb_tx_entry_v *)txq->sw_ring)[txq->tx_next_dd -
+ (n - 1)];
+#ifdef RTE_MBUF_REFCNT
+ m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
+#else
+ m = txep[0].mbuf;
+#endif
+ if (likely(m != NULL)) {
+ free[0] = m;
+ nb_free = 1;
+ for (i = 1; i < n; i++) {
+#ifdef RTE_MBUF_REFCNT
+ m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+#else
+ m = txep[i].mbuf;
+#endif
+ if (likely(m != NULL)) {
+ if (likely(m->pool == free[0]->pool))
+ free[nb_free++] = m;
+ else {
+ rte_mempool_put_bulk(free[0]->pool,
+ (void *)free, nb_free);
+ free[0] = m;
+ nb_free = 1;
+ }
+ }
+ }
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+ } else {
+ for (i = 1; i < n; i++) {
+ m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (m != NULL)
+ rte_mempool_put(m->pool, m);
+ }
+ }
+
+ /* buffers were freed, update counters */
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+ txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+ if (txq->tx_next_dd >= txq->nb_tx_desc)
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ return txq->tx_rs_thresh;
+}
+
+static inline void __attribute__((always_inline))
+tx_backlog_entry(struct igb_tx_entry_v *txep,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i;
+ for (i = 0; i < (int)nb_pkts; ++i)
+ txep[i].mbuf = tx_pkts[i];
+}
+
+uint16_t
+ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct igb_tx_queue *txq = (struct igb_tx_queue *)tx_queue;
+ volatile union ixgbe_adv_tx_desc *txdp;
+ struct igb_tx_entry_v *txep;
+ uint16_t n, nb_commit, tx_id;
+ uint64_t flags = DCMD_DTYP_FLAGS;
+ uint64_t rs = IXGBE_ADVTXD_DCMD_RS|DCMD_DTYP_FLAGS;
+ int i;
+
+ if (unlikely(nb_pkts > RTE_IXGBE_VPMD_TX_BURST))
+ nb_pkts = RTE_IXGBE_VPMD_TX_BURST;
+
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ ixgbe_tx_free_bufs(txq);
+
+ nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ tx_id = txq->tx_tail;
+ txdp = &txq->tx_ring[tx_id];
+ txep = &((struct igb_tx_entry_v *)txq->sw_ring)[tx_id];
+
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+ n = (uint16_t)(txq->nb_tx_desc - tx_id);
+ if (nb_commit >= n) {
+
+ tx_backlog_entry(txep, tx_pkts, n);
+
+ for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+ vtx1(txdp, *tx_pkts, flags);
+
+ vtx1(txdp, *tx_pkts++, rs);
+
+ nb_commit = (uint16_t)(nb_commit - n);
+
+ tx_id = 0;
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ /* avoid reach the end of ring */
+ txdp = &(txq->tx_ring[tx_id]);
+ txep = &(((struct igb_tx_entry_v *)txq->sw_ring)[tx_id]);
+ }
+
+ tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+ vtx(txdp, tx_pkts, nb_commit, flags);
+
+ tx_id = (uint16_t)(tx_id + nb_commit);
+ if (tx_id > txq->tx_next_rs) {
+ txq->tx_ring[txq->tx_next_rs].read.cmd_type_len |=
+ rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
+ txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
+ txq->tx_rs_thresh);
+ }
+
+ txq->tx_tail = tx_id;
+
+ IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+static void
+ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
+{
+ unsigned i;
+ struct igb_tx_entry_v *txe;
+ uint16_t nb_free, max_desc;
+
+ if (txq->sw_ring != NULL) {
+ /* release the used mbufs in sw_ring */
+ nb_free = txq->nb_tx_free;
+ max_desc = (uint16_t)(txq->nb_tx_desc - 1);
+ for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
+ nb_free < max_desc && i != txq->tx_tail;
+ i = (i + 1) & max_desc) {
+ txe = (struct igb_tx_entry_v *)&txq->sw_ring[i];
+ if (txe->mbuf != NULL)
+ rte_pktmbuf_free_seg(txe->mbuf);
+ }
+ /* reset tx_entry */
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ txe = (struct igb_tx_entry_v *)&txq->sw_ring[i];
+ txe->mbuf = NULL;
+ }
+ }
+}
+
+static void
+ixgbe_tx_free_swring(struct igb_tx_queue *txq)
+{
+ if (txq == NULL)
+ return;
+
+ if (txq->sw_ring != NULL) {
+ rte_free((struct igb_rx_entry *)txq->sw_ring - 1);
+ txq->sw_ring = NULL;
+ }
+}
+
+static void
+ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
+{
+ static const union ixgbe_adv_tx_desc zeroed_desc = { .read = {
+ .buffer_addr = 0} };
+ struct igb_tx_entry_v *txe = (struct igb_tx_entry_v *)txq->sw_ring;
+ uint16_t i;
+
+ /* Zero out HW ring memory */
+ for (i = 0; i < txq->nb_tx_desc; i++)
+ txq->tx_ring[i] = zeroed_desc;
+
+ /* Initialize SW ring entries */
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
+ txd->wb.status = IXGBE_TXD_STAT_DD;
+ txe[i].mbuf = NULL;
+ }
+
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ txq->tx_tail = 0;
+ txq->nb_tx_used = 0;
+ /*
+ * Always allow 1 descriptor to be un-allocated to avoid
+ * a H/W race condition
+ */
+ txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
+ txq->ctx_curr = 0;
+ memset((void *)&txq->ctx_cache, 0,
+ IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
+}
+
+static struct ixgbe_txq_ops vec_txq_ops = {
+ .release_mbufs = ixgbe_tx_queue_release_mbufs,
+ .free_swring = ixgbe_tx_free_swring,
+ .reset = ixgbe_reset_tx_queue,
+};
+
+int
+ixgbe_rxq_vec_setup(struct igb_rx_queue *rxq)
+{
+ uintptr_t p;
+ struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
+
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.port = rxq->port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ p = (uintptr_t)&mb_def.rearm_data;
+ rxq->mbuf_initializer = *(uint64_t *)p;
+ return 0;
+}
+
+int ixgbe_txq_vec_setup(struct igb_tx_queue *txq)
+{
+ if (txq->sw_ring == NULL)
+ return -1;
+
+ /* leave the first one for overflow */
+ txq->sw_ring = (struct igb_tx_entry *)
+ ((struct igb_tx_entry_v *)txq->sw_ring + 1);
+ txq->ops = &vec_txq_ops;
+
+ return 0;
+}
+
+int ixgbe_rx_vec_condition_check(struct rte_eth_dev *dev)
+{
+#ifndef RTE_LIBRTE_IEEE1588
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+
+#ifndef RTE_IXGBE_RX_OLFLAGS_ENABLE
+ /* whithout rx ol_flags, no VP flag report */
+ if (rxmode->hw_vlan_strip != 0 ||
+ rxmode->hw_vlan_extend != 0)
+ return -1;
+#endif
+
+ /* no fdir support */
+ if (fconf->mode != RTE_FDIR_MODE_NONE)
+ return -1;
+
+ /*
+ * - no csum error report support
+ * - no header split support
+ */
+ if (rxmode->hw_ip_checksum == 1 ||
+ rxmode->header_split == 1)
+ return -1;
+
+ return 0;
+#else
+ RTE_SET_USED(dev);
+ return -1;
+#endif
+}
diff --git a/src/dpdk_lib18/librte_pmd_pcap/Makefile b/src/dpdk_lib18/librte_pmd_pcap/Makefile
new file mode 100755
index 00000000..c5c214dc
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_pcap/Makefile
@@ -0,0 +1,59 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# Copyright(c) 2014 6WIND S.A.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_pcap.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += rte_eth_pcap.c
+
+#
+# Export include files
+#
+SYMLINK-y-include +=
+
+# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_ether
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_malloc
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_kvargs
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_pmd_pcap/rte_eth_pcap.c b/src/dpdk_lib18/librte_pmd_pcap/rte_eth_pcap.c
new file mode 100755
index 00000000..f12d1e7e
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_pcap/rte_eth_pcap.c
@@ -0,0 +1,936 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2014 6WIND S.A.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <time.h>
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+
+#include <net/if.h>
+
+#include <pcap.h>
+
+#define RTE_ETH_PCAP_SNAPSHOT_LEN 65535
+#define RTE_ETH_PCAP_SNAPLEN 4096
+#define RTE_ETH_PCAP_PROMISC 1
+#define RTE_ETH_PCAP_TIMEOUT -1
+#define ETH_PCAP_RX_PCAP_ARG "rx_pcap"
+#define ETH_PCAP_TX_PCAP_ARG "tx_pcap"
+#define ETH_PCAP_RX_IFACE_ARG "rx_iface"
+#define ETH_PCAP_TX_IFACE_ARG "tx_iface"
+#define ETH_PCAP_IFACE_ARG "iface"
+
+static char errbuf[PCAP_ERRBUF_SIZE];
+static struct timeval start_time;
+static uint64_t start_cycles;
+static uint64_t hz;
+
+struct pcap_rx_queue {
+ pcap_t *pcap;
+ uint8_t in_port;
+ struct rte_mempool *mb_pool;
+ volatile unsigned long rx_pkts;
+ volatile unsigned long err_pkts;
+ const char *name;
+ const char *type;
+};
+
+struct pcap_tx_queue {
+ pcap_dumper_t *dumper;
+ pcap_t *pcap;
+ volatile unsigned long tx_pkts;
+ volatile unsigned long err_pkts;
+ const char *name;
+ const char *type;
+};
+
+struct rx_pcaps {
+ unsigned num_of_rx;
+ pcap_t *pcaps[RTE_PMD_RING_MAX_RX_RINGS];
+ const char *names[RTE_PMD_RING_MAX_RX_RINGS];
+ const char *types[RTE_PMD_RING_MAX_RX_RINGS];
+};
+
+struct tx_pcaps {
+ unsigned num_of_tx;
+ pcap_dumper_t *dumpers[RTE_PMD_RING_MAX_TX_RINGS];
+ pcap_t *pcaps[RTE_PMD_RING_MAX_RX_RINGS];
+ const char *names[RTE_PMD_RING_MAX_RX_RINGS];
+ const char *types[RTE_PMD_RING_MAX_RX_RINGS];
+};
+
+struct pmd_internals {
+ struct pcap_rx_queue rx_queue[RTE_PMD_RING_MAX_RX_RINGS];
+ struct pcap_tx_queue tx_queue[RTE_PMD_RING_MAX_TX_RINGS];
+ unsigned nb_rx_queues;
+ unsigned nb_tx_queues;
+ int if_index;
+ int single_iface;
+};
+
+const char *valid_arguments[] = {
+ ETH_PCAP_RX_PCAP_ARG,
+ ETH_PCAP_TX_PCAP_ARG,
+ ETH_PCAP_RX_IFACE_ARG,
+ ETH_PCAP_TX_IFACE_ARG,
+ ETH_PCAP_IFACE_ARG,
+ NULL
+};
+
+static int open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper);
+static int open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap);
+static int open_single_iface(const char *iface, pcap_t **pcap);
+
+static struct ether_addr eth_addr = { .addr_bytes = { 0, 0, 0, 0x1, 0x2, 0x3 } };
+static const char *drivername = "Pcap PMD";
+static struct rte_eth_link pmd_link = {
+ .link_speed = 10000,
+ .link_duplex = ETH_LINK_FULL_DUPLEX,
+ .link_status = 0
+};
+
+
+static uint16_t
+eth_pcap_rx(void *queue,
+ struct rte_mbuf **bufs,
+ uint16_t nb_pkts)
+{
+ unsigned i;
+ struct pcap_pkthdr header;
+ const u_char *packet;
+ struct rte_mbuf *mbuf;
+ struct pcap_rx_queue *pcap_q = queue;
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ uint16_t num_rx = 0;
+ uint16_t buf_size;
+
+ if (unlikely(pcap_q->pcap == NULL || nb_pkts == 0))
+ return 0;
+
+ /* Reads the given number of packets from the pcap file one by one
+ * and copies the packet data into a newly allocated mbuf to return.
+ */
+ for (i = 0; i < nb_pkts; i++) {
+ /* Get the next PCAP packet */
+ packet = pcap_next(pcap_q->pcap, &header);
+ if (unlikely(packet == NULL))
+ break;
+ else
+ mbuf = rte_pktmbuf_alloc(pcap_q->mb_pool);
+ if (unlikely(mbuf == NULL))
+ break;
+
+ /* Now get the space available for data in the mbuf */
+ mbp_priv = rte_mempool_get_priv(pcap_q->mb_pool);
+ buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
+ RTE_PKTMBUF_HEADROOM);
+
+ if (header.len <= buf_size) {
+ /* pcap packet will fit in the mbuf, go ahead and copy */
+ rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet,
+ header.len);
+ mbuf->data_len = (uint16_t)header.len;
+ mbuf->pkt_len = mbuf->data_len;
+ mbuf->port = pcap_q->in_port;
+ bufs[num_rx] = mbuf;
+ num_rx++;
+ } else {
+ /* pcap packet will not fit in the mbuf, so drop packet */
+ RTE_LOG(ERR, PMD,
+ "PCAP packet %d bytes will not fit in mbuf (%d bytes)\n",
+ header.len, buf_size);
+ rte_pktmbuf_free(mbuf);
+ }
+ }
+ pcap_q->rx_pkts += num_rx;
+ return num_rx;
+}
+
+static inline void
+calculate_timestamp(struct timeval *ts) {
+ uint64_t cycles;
+ struct timeval cur_time;
+
+ cycles = rte_get_timer_cycles() - start_cycles;
+ cur_time.tv_sec = cycles / hz;
+ cur_time.tv_usec = (cycles % hz) * 10e6 / hz;
+ timeradd(&start_time, &cur_time, ts);
+}
+
+/*
+ * Callback to handle writing packets to a pcap file.
+ */
+static uint16_t
+eth_pcap_tx_dumper(void *queue,
+ struct rte_mbuf **bufs,
+ uint16_t nb_pkts)
+{
+ unsigned i;
+ struct rte_mbuf *mbuf;
+ struct pcap_tx_queue *dumper_q = queue;
+ uint16_t num_tx = 0;
+ struct pcap_pkthdr header;
+
+ if (dumper_q->dumper == NULL || nb_pkts == 0)
+ return 0;
+
+ /* writes the nb_pkts packets to the previously opened pcap file dumper */
+ for (i = 0; i < nb_pkts; i++) {
+ mbuf = bufs[i];
+ calculate_timestamp(&header.ts);
+ header.len = mbuf->data_len;
+ header.caplen = header.len;
+ pcap_dump((u_char *)dumper_q->dumper, &header,
+ rte_pktmbuf_mtod(mbuf, void*));
+ rte_pktmbuf_free(mbuf);
+ num_tx++;
+ }
+
+ /*
+ * Since there's no place to hook a callback when the forwarding
+ * process stops and to make sure the pcap file is actually written,
+ * we flush the pcap dumper within each burst.
+ */
+ pcap_dump_flush(dumper_q->dumper);
+ dumper_q->tx_pkts += num_tx;
+ dumper_q->err_pkts += nb_pkts - num_tx;
+ return num_tx;
+}
+
+/*
+ * Callback to handle sending packets through a real NIC.
+ */
+static uint16_t
+eth_pcap_tx(void *queue,
+ struct rte_mbuf **bufs,
+ uint16_t nb_pkts)
+{
+ unsigned i;
+ int ret;
+ struct rte_mbuf *mbuf;
+ struct pcap_tx_queue *tx_queue = queue;
+ uint16_t num_tx = 0;
+
+ if (unlikely(nb_pkts == 0 || tx_queue->pcap == NULL))
+ return 0;
+
+ for (i = 0; i < nb_pkts; i++) {
+ mbuf = bufs[i];
+ ret = pcap_sendpacket(tx_queue->pcap,
+ rte_pktmbuf_mtod(mbuf, u_char *),
+ mbuf->data_len);
+ if (unlikely(ret != 0))
+ break;
+ num_tx++;
+ rte_pktmbuf_free(mbuf);
+ }
+
+ tx_queue->tx_pkts += num_tx;
+ tx_queue->err_pkts += nb_pkts - num_tx;
+ return num_tx;
+}
+
+static int
+eth_dev_start(struct rte_eth_dev *dev)
+{
+ unsigned i;
+ struct pmd_internals *internals = dev->data->dev_private;
+ struct pcap_tx_queue *tx;
+ struct pcap_rx_queue *rx;
+
+ /* Special iface case. Single pcap is open and shared between tx/rx. */
+ if (internals->single_iface) {
+ tx = &internals->tx_queue[0];
+ rx = &internals->rx_queue[0];
+
+ if (!tx->pcap && strcmp(tx->type, ETH_PCAP_IFACE_ARG) == 0) {
+ if (open_single_iface(tx->name, &tx->pcap) < 0)
+ return -1;
+ rx->pcap = tx->pcap;
+ }
+ goto status_up;
+ }
+
+ /* If not open already, open tx pcaps/dumpers */
+ for (i = 0; i < internals->nb_tx_queues; i++) {
+ tx = &internals->tx_queue[i];
+
+ if (!tx->dumper && strcmp(tx->type, ETH_PCAP_TX_PCAP_ARG) == 0) {
+ if (open_single_tx_pcap(tx->name, &tx->dumper) < 0)
+ return -1;
+ }
+
+ else if (!tx->pcap && strcmp(tx->type, ETH_PCAP_TX_IFACE_ARG) == 0) {
+ if (open_single_iface(tx->name, &tx->pcap) < 0)
+ return -1;
+ }
+ }
+
+ /* If not open already, open rx pcaps */
+ for (i = 0; i < internals->nb_rx_queues; i++) {
+ rx = &internals->rx_queue[i];
+
+ if (rx->pcap != NULL)
+ continue;
+
+ if (strcmp(rx->type, ETH_PCAP_RX_PCAP_ARG) == 0) {
+ if (open_single_rx_pcap(rx->name, &rx->pcap) < 0)
+ return -1;
+ }
+
+ else if (strcmp(rx->type, ETH_PCAP_RX_IFACE_ARG) == 0) {
+ if (open_single_iface(rx->name, &rx->pcap) < 0)
+ return -1;
+ }
+ }
+
+status_up:
+
+ dev->data->dev_link.link_status = 1;
+ return 0;
+}
+
+/*
+ * This function gets called when the current port gets stopped.
+ * Is the only place for us to close all the tx streams dumpers.
+ * If not called the dumpers will be flushed within each tx burst.
+ */
+static void
+eth_dev_stop(struct rte_eth_dev *dev)
+{
+ unsigned i;
+ struct pmd_internals *internals = dev->data->dev_private;
+ struct pcap_tx_queue *tx;
+ struct pcap_rx_queue *rx;
+
+ /* Special iface case. Single pcap is open and shared between tx/rx. */
+ if (internals->single_iface) {
+ tx = &internals->tx_queue[0];
+ rx = &internals->rx_queue[0];
+ pcap_close(tx->pcap);
+ tx->pcap = NULL;
+ rx->pcap = NULL;
+ goto status_down;
+ }
+
+ for (i = 0; i < internals->nb_tx_queues; i++) {
+ tx = &internals->tx_queue[i];
+
+ if (tx->dumper != NULL) {
+ pcap_dump_close(tx->dumper);
+ tx->dumper = NULL;
+ }
+
+ if (tx->pcap != NULL) {
+ pcap_close(tx->pcap);
+ tx->pcap = NULL;
+ }
+ }
+
+ for (i = 0; i < internals->nb_rx_queues; i++) {
+ rx = &internals->rx_queue[i];
+
+ if (rx->pcap != NULL) {
+ pcap_close(rx->pcap);
+ rx->pcap = NULL;
+ }
+ }
+
+status_down:
+ dev->data->dev_link.link_status = 0;
+}
+
+static int
+eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
+{
+ return 0;
+}
+
+static void
+eth_dev_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ dev_info->driver_name = drivername;
+ dev_info->if_index = internals->if_index;
+ dev_info->max_mac_addrs = 1;
+ dev_info->max_rx_pktlen = (uint32_t) -1;
+ dev_info->max_rx_queues = (uint16_t)internals->nb_rx_queues;
+ dev_info->max_tx_queues = (uint16_t)internals->nb_tx_queues;
+ dev_info->min_rx_bufsize = 0;
+ dev_info->pci_dev = NULL;
+}
+
+static void
+eth_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *igb_stats)
+{
+ unsigned i;
+ unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
+ const struct pmd_internals *internal = dev->data->dev_private;
+
+ memset(igb_stats, 0, sizeof(*igb_stats));
+ for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internal->nb_rx_queues;
+ i++) {
+ igb_stats->q_ipackets[i] = internal->rx_queue[i].rx_pkts;
+ rx_total += igb_stats->q_ipackets[i];
+ }
+
+ for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internal->nb_tx_queues;
+ i++) {
+ igb_stats->q_opackets[i] = internal->tx_queue[i].tx_pkts;
+ igb_stats->q_errors[i] = internal->tx_queue[i].err_pkts;
+ tx_total += igb_stats->q_opackets[i];
+ tx_err_total += igb_stats->q_errors[i];
+ }
+
+ igb_stats->ipackets = rx_total;
+ igb_stats->opackets = tx_total;
+ igb_stats->oerrors = tx_err_total;
+}
+
+static void
+eth_stats_reset(struct rte_eth_dev *dev)
+{
+ unsigned i;
+ struct pmd_internals *internal = dev->data->dev_private;
+ for (i = 0; i < internal->nb_rx_queues; i++)
+ internal->rx_queue[i].rx_pkts = 0;
+ for (i = 0; i < internal->nb_tx_queues; i++) {
+ internal->tx_queue[i].tx_pkts = 0;
+ internal->tx_queue[i].err_pkts = 0;
+ }
+}
+
+static void
+eth_dev_close(struct rte_eth_dev *dev __rte_unused)
+{
+}
+
+static void
+eth_queue_release(void *q __rte_unused)
+{
+}
+
+static int
+eth_link_update(struct rte_eth_dev *dev __rte_unused,
+ int wait_to_complete __rte_unused)
+{
+ return 0;
+}
+
+static int
+eth_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mb_pool)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id];
+ pcap_q->mb_pool = mb_pool;
+ dev->data->rx_queues[rx_queue_id] = pcap_q;
+ pcap_q->in_port = dev->data->port_id;
+ return 0;
+}
+
+static int
+eth_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+
+ struct pmd_internals *internals = dev->data->dev_private;
+ dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id];
+ return 0;
+}
+
+static struct eth_dev_ops ops = {
+ .dev_start = eth_dev_start,
+ .dev_stop = eth_dev_stop,
+ .dev_close = eth_dev_close,
+ .dev_configure = eth_dev_configure,
+ .dev_infos_get = eth_dev_info,
+ .rx_queue_setup = eth_rx_queue_setup,
+ .tx_queue_setup = eth_tx_queue_setup,
+ .rx_queue_release = eth_queue_release,
+ .tx_queue_release = eth_queue_release,
+ .link_update = eth_link_update,
+ .stats_get = eth_stats_get,
+ .stats_reset = eth_stats_reset,
+};
+
+/*
+ * Function handler that opens the pcap file for reading a stores a
+ * reference of it for use it later on.
+ */
+static int
+open_rx_pcap(const char *key, const char *value, void *extra_args)
+{
+ unsigned i;
+ const char *pcap_filename = value;
+ struct rx_pcaps *pcaps = extra_args;
+ pcap_t *pcap = NULL;
+
+ for (i = 0; i < pcaps->num_of_rx; i++) {
+ if (open_single_rx_pcap(pcap_filename, &pcap) < 0)
+ return -1;
+
+ pcaps->pcaps[i] = pcap;
+ pcaps->names[i] = pcap_filename;
+ pcaps->types[i] = key;
+ }
+
+ return 0;
+}
+
+static int
+open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap)
+{
+ if ((*pcap = pcap_open_offline(pcap_filename, errbuf)) == NULL) {
+ RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", pcap_filename, errbuf);
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * Opens a pcap file for writing and stores a reference to it
+ * for use it later on.
+ */
+static int
+open_tx_pcap(const char *key, const char *value, void *extra_args)
+{
+ unsigned i;
+ const char *pcap_filename = value;
+ struct tx_pcaps *dumpers = extra_args;
+ pcap_dumper_t *dumper;
+
+ for (i = 0; i < dumpers->num_of_tx; i++) {
+ if (open_single_tx_pcap(pcap_filename, &dumper) < 0)
+ return -1;
+
+ dumpers->dumpers[i] = dumper;
+ dumpers->names[i] = pcap_filename;
+ dumpers->types[i] = key;
+ }
+
+ return 0;
+}
+
+static int
+open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper)
+{
+ pcap_t *tx_pcap;
+ /*
+ * We need to create a dummy empty pcap_t to use it
+ * with pcap_dump_open(). We create big enough an Ethernet
+ * pcap holder.
+ */
+
+ if ((tx_pcap = pcap_open_dead(DLT_EN10MB, RTE_ETH_PCAP_SNAPSHOT_LEN))
+ == NULL) {
+ RTE_LOG(ERR, PMD, "Couldn't create dead pcap\n");
+ return -1;
+ }
+
+ /* The dumper is created using the previous pcap_t reference */
+ if ((*dumper = pcap_dump_open(tx_pcap, pcap_filename)) == NULL) {
+ RTE_LOG(ERR, PMD, "Couldn't open %s for writing.\n", pcap_filename);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * pcap_open_live wrapper function
+ */
+static inline int
+open_iface_live(const char *iface, pcap_t **pcap) {
+ *pcap = pcap_open_live(iface, RTE_ETH_PCAP_SNAPLEN,
+ RTE_ETH_PCAP_PROMISC, RTE_ETH_PCAP_TIMEOUT, errbuf);
+
+ if (*pcap == NULL) {
+ RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", iface, errbuf);
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * Opens an interface for reading and writing
+ */
+static inline int
+open_rx_tx_iface(const char *key, const char *value, void *extra_args)
+{
+ const char *iface = value;
+ struct rx_pcaps *pcaps = extra_args;
+ pcap_t *pcap = NULL;
+
+ if (open_single_iface(iface, &pcap) < 0)
+ return -1;
+
+ pcaps->pcaps[0] = pcap;
+ pcaps->names[0] = iface;
+ pcaps->types[0] = key;
+
+ return 0;
+}
+
+/*
+ * Opens a NIC for reading packets from it
+ */
+static inline int
+open_rx_iface(const char *key, const char *value, void *extra_args)
+{
+ unsigned i;
+ const char *iface = value;
+ struct rx_pcaps *pcaps = extra_args;
+ pcap_t *pcap = NULL;
+
+ for (i = 0; i < pcaps->num_of_rx; i++) {
+ if (open_single_iface(iface, &pcap) < 0)
+ return -1;
+ pcaps->pcaps[i] = pcap;
+ pcaps->names[i] = iface;
+ pcaps->types[i] = key;
+ }
+
+ return 0;
+}
+
+/*
+ * Opens a NIC for writing packets to it
+ */
+static int
+open_tx_iface(const char *key, const char *value, void *extra_args)
+{
+ unsigned i;
+ const char *iface = value;
+ struct tx_pcaps *pcaps = extra_args;
+ pcap_t *pcap;
+
+ for (i = 0; i < pcaps->num_of_tx; i++) {
+ if (open_single_iface(iface, &pcap) < 0)
+ return -1;
+ pcaps->pcaps[i] = pcap;
+ pcaps->names[i] = iface;
+ pcaps->types[i] = key;
+ }
+
+ return 0;
+}
+
+static int
+open_single_iface(const char *iface, pcap_t **pcap)
+{
+ if (open_iface_live(iface, pcap) < 0) {
+ RTE_LOG(ERR, PMD, "Couldn't open interface %s\n", iface);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+rte_pmd_init_internals(const char *name, const unsigned nb_rx_queues,
+ const unsigned nb_tx_queues,
+ const unsigned numa_node,
+ struct pmd_internals **internals,
+ struct rte_eth_dev **eth_dev,
+ struct rte_kvargs *kvlist)
+{
+ struct rte_eth_dev_data *data = NULL;
+ struct rte_pci_device *pci_dev = NULL;
+ unsigned k_idx;
+ struct rte_kvargs_pair *pair = NULL;
+
+ for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
+ pair = &kvlist->pairs[k_idx];
+ if (strstr(pair->key, ETH_PCAP_IFACE_ARG) != NULL)
+ break;
+ }
+
+ RTE_LOG(INFO, PMD,
+ "Creating pcap-backed ethdev on numa socket %u\n", numa_node);
+
+ /* now do all data allocation - for eth_dev structure, dummy pci driver
+ * and internal (private) data
+ */
+ data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
+ if (data == NULL)
+ goto error;
+
+ pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, numa_node);
+ if (pci_dev == NULL)
+ goto error;
+
+ *internals = rte_zmalloc_socket(name, sizeof(**internals), 0, numa_node);
+ if (*internals == NULL)
+ goto error;
+
+ /* reserve an ethdev entry */
+ *eth_dev = rte_eth_dev_allocate(name);
+ if (*eth_dev == NULL)
+ goto error;
+
+ /* now put it all together
+ * - store queue data in internals,
+ * - store numa_node info in pci_driver
+ * - point eth_dev_data to internals and pci_driver
+ * - and point eth_dev structure to new eth_dev_data structure
+ */
+ /* NOTE: we'll replace the data element, of originally allocated eth_dev
+ * so the rings are local per-process */
+
+ (*internals)->nb_rx_queues = nb_rx_queues;
+ (*internals)->nb_tx_queues = nb_tx_queues;
+
+ if (pair == NULL)
+ (*internals)->if_index = 0;
+ else
+ (*internals)->if_index = if_nametoindex(pair->value);
+
+ pci_dev->numa_node = numa_node;
+
+ data->dev_private = *internals;
+ data->port_id = (*eth_dev)->data->port_id;
+ data->nb_rx_queues = (uint16_t)nb_rx_queues;
+ data->nb_tx_queues = (uint16_t)nb_tx_queues;
+ data->dev_link = pmd_link;
+ data->mac_addrs = &eth_addr;
+
+ (*eth_dev)->data = data;
+ (*eth_dev)->dev_ops = &ops;
+ (*eth_dev)->pci_dev = pci_dev;
+
+ return 0;
+
+ error: if (data)
+ rte_free(data);
+ if (pci_dev)
+ rte_free(pci_dev);
+ if (*internals)
+ rte_free(*internals);
+ return -1;
+}
+
+static int
+rte_eth_from_pcaps_n_dumpers(const char *name,
+ struct rx_pcaps *rx_queues,
+ const unsigned nb_rx_queues,
+ struct tx_pcaps *tx_queues,
+ const unsigned nb_tx_queues,
+ const unsigned numa_node,
+ struct rte_kvargs *kvlist)
+{
+ struct pmd_internals *internals = NULL;
+ struct rte_eth_dev *eth_dev = NULL;
+ unsigned i;
+
+ /* do some parameter checking */
+ if (rx_queues == NULL && nb_rx_queues > 0)
+ return -1;
+ if (tx_queues == NULL && nb_tx_queues > 0)
+ return -1;
+
+ if (rte_pmd_init_internals(name, nb_rx_queues, nb_tx_queues, numa_node,
+ &internals, &eth_dev, kvlist) < 0)
+ return -1;
+
+ for (i = 0; i < nb_rx_queues; i++) {
+ internals->rx_queue->pcap = rx_queues->pcaps[i];
+ internals->rx_queue->name = rx_queues->names[i];
+ internals->rx_queue->type = rx_queues->types[i];
+ }
+ for (i = 0; i < nb_tx_queues; i++) {
+ internals->tx_queue->dumper = tx_queues->dumpers[i];
+ internals->tx_queue->name = tx_queues->names[i];
+ internals->tx_queue->type = tx_queues->types[i];
+ }
+
+ /* using multiple pcaps/interfaces */
+ internals->single_iface = 0;
+
+ eth_dev->rx_pkt_burst = eth_pcap_rx;
+ eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
+
+ return 0;
+}
+
+ struct rx_pcaps pcaps;
+static int
+rte_eth_from_pcaps(const char *name,
+ struct rx_pcaps *rx_queues,
+ const unsigned nb_rx_queues,
+ struct tx_pcaps *tx_queues,
+ const unsigned nb_tx_queues,
+ const unsigned numa_node,
+ struct rte_kvargs *kvlist,
+ int single_iface)
+{
+ struct pmd_internals *internals = NULL;
+ struct rte_eth_dev *eth_dev = NULL;
+ unsigned i;
+
+ /* do some parameter checking */
+ if (rx_queues == NULL && nb_rx_queues > 0)
+ return -1;
+ if (tx_queues == NULL && nb_tx_queues > 0)
+ return -1;
+
+ if (rte_pmd_init_internals(name, nb_rx_queues, nb_tx_queues, numa_node,
+ &internals, &eth_dev, kvlist) < 0)
+ return -1;
+
+ for (i = 0; i < nb_rx_queues; i++) {
+ internals->rx_queue->pcap = rx_queues->pcaps[i];
+ internals->rx_queue->name = rx_queues->names[i];
+ internals->rx_queue->type = rx_queues->types[i];
+ }
+ for (i = 0; i < nb_tx_queues; i++) {
+ internals->tx_queue->pcap = tx_queues->pcaps[i];
+ internals->tx_queue->name = tx_queues->names[i];
+ internals->tx_queue->type = tx_queues->types[i];
+ }
+
+ /* store wether we are using a single interface for rx/tx or not */
+ internals->single_iface = single_iface;
+
+ eth_dev->rx_pkt_burst = eth_pcap_rx;
+ eth_dev->tx_pkt_burst = eth_pcap_tx;
+
+ return 0;
+}
+
+
+static int
+rte_pmd_pcap_devinit(const char *name, const char *params)
+{
+ unsigned numa_node, using_dumpers = 0;
+ int ret;
+ struct rte_kvargs *kvlist;
+ struct rx_pcaps pcaps;
+ struct tx_pcaps dumpers;
+
+ RTE_LOG(INFO, PMD, "Initializing pmd_pcap for %s\n", name);
+
+ numa_node = rte_socket_id();
+
+ gettimeofday(&start_time, NULL);
+ start_cycles = rte_get_timer_cycles();
+ hz = rte_get_timer_hz();
+
+ kvlist = rte_kvargs_parse(params, valid_arguments);
+ if (kvlist == NULL)
+ return -1;
+
+ /*
+ * If iface argument is passed we open the NICs and use them for
+ * reading / writing
+ */
+ if (rte_kvargs_count(kvlist, ETH_PCAP_IFACE_ARG) == 1) {
+
+ ret = rte_kvargs_process(kvlist, ETH_PCAP_IFACE_ARG,
+ &open_rx_tx_iface, &pcaps);
+ if (ret < 0)
+ return -1;
+ dumpers.pcaps[0] = pcaps.pcaps[0];
+ dumpers.names[0] = pcaps.names[0];
+ dumpers.types[0] = pcaps.types[0];
+ return rte_eth_from_pcaps(name, &pcaps, 1, &dumpers, 1,
+ numa_node, kvlist, 1);
+ }
+
+ /*
+ * We check whether we want to open a RX stream from a real NIC or a
+ * pcap file
+ */
+ if ((pcaps.num_of_rx = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG))) {
+ ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
+ &open_rx_pcap, &pcaps);
+ } else {
+ pcaps.num_of_rx = rte_kvargs_count(kvlist,
+ ETH_PCAP_RX_IFACE_ARG);
+ ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_IFACE_ARG,
+ &open_rx_iface, &pcaps);
+ }
+
+ if (ret < 0)
+ return -1;
+
+ /*
+ * We check whether we want to open a TX stream to a real NIC or a
+ * pcap file
+ */
+ if ((dumpers.num_of_tx = rte_kvargs_count(kvlist,
+ ETH_PCAP_TX_PCAP_ARG))) {
+ ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
+ &open_tx_pcap, &dumpers);
+ using_dumpers = 1;
+ } else {
+ dumpers.num_of_tx = rte_kvargs_count(kvlist,
+ ETH_PCAP_TX_IFACE_ARG);
+ ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG,
+ &open_tx_iface, &dumpers);
+ }
+
+ if (ret < 0)
+ return -1;
+
+ if (using_dumpers)
+ return rte_eth_from_pcaps_n_dumpers(name, &pcaps, pcaps.num_of_rx,
+ &dumpers, dumpers.num_of_tx, numa_node, kvlist);
+
+ return rte_eth_from_pcaps(name, &pcaps, pcaps.num_of_rx, &dumpers,
+ dumpers.num_of_tx, numa_node, kvlist, 0);
+
+}
+
+static struct rte_driver pmd_pcap_drv = {
+ .name = "eth_pcap",
+ .type = PMD_VDEV,
+ .init = rte_pmd_pcap_devinit,
+};
+
+PMD_REGISTER_DRIVER(pmd_pcap_drv);
diff --git a/src/dpdk_lib18/librte_pmd_ring/Makefile b/src/dpdk_lib18/librte_pmd_ring/Makefile
new file mode 100755
index 00000000..b57e4210
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ring/Makefile
@@ -0,0 +1,57 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_ring.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_RING) += rte_eth_ring.c
+
+#
+# Export include files
+#
+SYMLINK-y-include += rte_eth_ring.h
+
+# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_RING) += lib/librte_eal lib/librte_ring
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_RING) += lib/librte_mbuf lib/librte_ether
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_RING) += lib/librte_kvargs
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_pmd_ring/rte_eth_ring.c b/src/dpdk_lib18/librte_pmd_ring/rte_eth_ring.c
new file mode 100755
index 00000000..4f1b6ed8
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ring/rte_eth_ring.c
@@ -0,0 +1,530 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rte_eth_ring.h"
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_dev.h>
+#include <rte_kvargs.h>
+
+#define ETH_RING_NUMA_NODE_ACTION_ARG "nodeaction"
+#define ETH_RING_ACTION_CREATE "CREATE"
+#define ETH_RING_ACTION_ATTACH "ATTACH"
+
+static const char *valid_arguments[] = {
+ ETH_RING_NUMA_NODE_ACTION_ARG,
+ NULL
+};
+
+struct ring_queue {
+ struct rte_ring *rng;
+ rte_atomic64_t rx_pkts;
+ rte_atomic64_t tx_pkts;
+ rte_atomic64_t err_pkts;
+};
+
+struct pmd_internals {
+ unsigned nb_rx_queues;
+ unsigned nb_tx_queues;
+
+ struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS];
+ struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS];
+};
+
+
+static struct ether_addr eth_addr = { .addr_bytes = {0} };
+static const char *drivername = "Rings PMD";
+static struct rte_eth_link pmd_link = {
+ .link_speed = 10000,
+ .link_duplex = ETH_LINK_FULL_DUPLEX,
+ .link_status = 0
+};
+
+static uint16_t
+eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
+{
+ void **ptrs = (void *)&bufs[0];
+ struct ring_queue *r = q;
+ const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
+ ptrs, nb_bufs);
+ if (r->rng->flags & RING_F_SC_DEQ)
+ r->rx_pkts.cnt += nb_rx;
+ else
+ rte_atomic64_add(&(r->rx_pkts), nb_rx);
+ return nb_rx;
+}
+
+static uint16_t
+eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
+{
+ void **ptrs = (void *)&bufs[0];
+ struct ring_queue *r = q;
+ const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
+ ptrs, nb_bufs);
+ if (r->rng->flags & RING_F_SP_ENQ) {
+ r->tx_pkts.cnt += nb_tx;
+ r->err_pkts.cnt += nb_bufs - nb_tx;
+ } else {
+ rte_atomic64_add(&(r->tx_pkts), nb_tx);
+ rte_atomic64_add(&(r->err_pkts), nb_bufs - nb_tx);
+ }
+ return nb_tx;
+}
+
+static int
+eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
+
+static int
+eth_dev_start(struct rte_eth_dev *dev)
+{
+ dev->data->dev_link.link_status = 1;
+ return 0;
+}
+
+static void
+eth_dev_stop(struct rte_eth_dev *dev)
+{
+ dev->data->dev_link.link_status = 0;
+}
+
+static int
+eth_rx_queue_setup(struct rte_eth_dev *dev,uint16_t rx_queue_id,
+ uint16_t nb_rx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mb_pool __rte_unused)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ dev->data->rx_queues[rx_queue_id] = &internals->rx_ring_queues[rx_queue_id];
+ return 0;
+}
+
+static int
+eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ dev->data->tx_queues[tx_queue_id] = &internals->tx_ring_queues[tx_queue_id];
+ return 0;
+}
+
+
+static void
+eth_dev_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ dev_info->driver_name = drivername;
+ dev_info->max_mac_addrs = 1;
+ dev_info->max_rx_pktlen = (uint32_t)-1;
+ dev_info->max_rx_queues = (uint16_t)internals->nb_rx_queues;
+ dev_info->max_tx_queues = (uint16_t)internals->nb_tx_queues;
+ dev_info->min_rx_bufsize = 0;
+ dev_info->pci_dev = NULL;
+}
+
+static void
+eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
+{
+ unsigned i;
+ unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
+ const struct pmd_internals *internal = dev->data->dev_private;
+
+ memset(igb_stats, 0, sizeof(*igb_stats));
+ for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
+ i < internal->nb_rx_queues; i++) {
+ igb_stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
+ rx_total += igb_stats->q_ipackets[i];
+ }
+
+ for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
+ i < internal->nb_tx_queues; i++) {
+ igb_stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt;
+ igb_stats->q_errors[i] = internal->tx_ring_queues[i].err_pkts.cnt;
+ tx_total += igb_stats->q_opackets[i];
+ tx_err_total += igb_stats->q_errors[i];
+ }
+
+ igb_stats->ipackets = rx_total;
+ igb_stats->opackets = tx_total;
+ igb_stats->oerrors = tx_err_total;
+}
+
+static void
+eth_stats_reset(struct rte_eth_dev *dev)
+{
+ unsigned i;
+ struct pmd_internals *internal = dev->data->dev_private;
+ for (i = 0; i < internal->nb_rx_queues; i++)
+ internal->rx_ring_queues[i].rx_pkts.cnt = 0;
+ for (i = 0; i < internal->nb_tx_queues; i++) {
+ internal->tx_ring_queues[i].tx_pkts.cnt = 0;
+ internal->tx_ring_queues[i].err_pkts.cnt = 0;
+ }
+}
+
+static void
+eth_queue_release(void *q __rte_unused) { ; }
+static int
+eth_link_update(struct rte_eth_dev *dev __rte_unused,
+ int wait_to_complete __rte_unused) { return 0; }
+
+static struct eth_dev_ops ops = {
+ .dev_start = eth_dev_start,
+ .dev_stop = eth_dev_stop,
+ .dev_configure = eth_dev_configure,
+ .dev_infos_get = eth_dev_info,
+ .rx_queue_setup = eth_rx_queue_setup,
+ .tx_queue_setup = eth_tx_queue_setup,
+ .rx_queue_release = eth_queue_release,
+ .tx_queue_release = eth_queue_release,
+ .link_update = eth_link_update,
+ .stats_get = eth_stats_get,
+ .stats_reset = eth_stats_reset,
+};
+
+int
+rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
+ const unsigned nb_rx_queues,
+ struct rte_ring *const tx_queues[],
+ const unsigned nb_tx_queues,
+ const unsigned numa_node)
+{
+ struct rte_eth_dev_data *data = NULL;
+ struct rte_pci_device *pci_dev = NULL;
+ struct pmd_internals *internals = NULL;
+ struct rte_eth_dev *eth_dev = NULL;
+ unsigned i;
+
+ /* do some parameter checking */
+ if (rx_queues == NULL && nb_rx_queues > 0)
+ goto error;
+ if (tx_queues == NULL && nb_tx_queues > 0)
+ goto error;
+
+ RTE_LOG(INFO, PMD, "Creating rings-backed ethdev on numa socket %u\n",
+ numa_node);
+
+ /* now do all data allocation - for eth_dev structure, dummy pci driver
+ * and internal (private) data
+ */
+ data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
+ if (data == NULL)
+ goto error;
+
+ pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, numa_node);
+ if (pci_dev == NULL)
+ goto error;
+
+ internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
+ if (internals == NULL)
+ goto error;
+
+ /* reserve an ethdev entry */
+ eth_dev = rte_eth_dev_allocate(name);
+ if (eth_dev == NULL)
+ goto error;
+
+ /* now put it all together
+ * - store queue data in internals,
+ * - store numa_node info in pci_driver
+ * - point eth_dev_data to internals and pci_driver
+ * - and point eth_dev structure to new eth_dev_data structure
+ */
+ /* NOTE: we'll replace the data element, of originally allocated eth_dev
+ * so the rings are local per-process */
+
+ internals->nb_rx_queues = nb_rx_queues;
+ internals->nb_tx_queues = nb_tx_queues;
+ for (i = 0; i < nb_rx_queues; i++) {
+ internals->rx_ring_queues[i].rng = rx_queues[i];
+ }
+ for (i = 0; i < nb_tx_queues; i++) {
+ internals->tx_ring_queues[i].rng = tx_queues[i];
+ }
+
+ pci_dev->numa_node = numa_node;
+
+ data->dev_private = internals;
+ data->port_id = eth_dev->data->port_id;
+ data->nb_rx_queues = (uint16_t)nb_rx_queues;
+ data->nb_tx_queues = (uint16_t)nb_tx_queues;
+ data->dev_link = pmd_link;
+ data->mac_addrs = &eth_addr;
+
+ eth_dev ->data = data;
+ eth_dev ->dev_ops = &ops;
+ eth_dev ->pci_dev = pci_dev;
+
+ /* finally assign rx and tx ops */
+ eth_dev->rx_pkt_burst = eth_ring_rx;
+ eth_dev->tx_pkt_burst = eth_ring_tx;
+
+ return 0;
+
+error:
+ if (data)
+ rte_free(data);
+ if (pci_dev)
+ rte_free(pci_dev);
+ if (internals)
+ rte_free(internals);
+ return -1;
+}
+
+enum dev_action{
+ DEV_CREATE,
+ DEV_ATTACH
+};
+
+static int
+eth_dev_ring_create(const char *name, const unsigned numa_node,
+ enum dev_action action)
+{
+ /* rx and tx are so-called from point of view of first port.
+ * They are inverted from the point of view of second port
+ */
+ struct rte_ring *rxtx[RTE_PMD_RING_MAX_RX_RINGS];
+ unsigned i;
+ char rng_name[RTE_RING_NAMESIZE];
+ unsigned num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
+ RTE_PMD_RING_MAX_TX_RINGS);
+
+ for (i = 0; i < num_rings; i++) {
+ snprintf(rng_name, sizeof(rng_name), "ETH_RXTX%u_%s", i, name);
+ rxtx[i] = (action == DEV_CREATE) ?
+ rte_ring_create(rng_name, 1024, numa_node,
+ RING_F_SP_ENQ|RING_F_SC_DEQ) :
+ rte_ring_lookup(rng_name);
+ if (rxtx[i] == NULL)
+ return -1;
+ }
+
+ if (rte_eth_from_rings(name, rxtx, num_rings, rxtx, num_rings, numa_node))
+ return -1;
+
+ return 0;
+}
+
+
+static int
+eth_dev_ring_pair_create(const char *name, const unsigned numa_node,
+ enum dev_action action)
+{
+ /* rx and tx are so-called from point of view of first port.
+ * They are inverted from the point of view of second port
+ */
+ struct rte_ring *rx[RTE_PMD_RING_MAX_RX_RINGS];
+ struct rte_ring *tx[RTE_PMD_RING_MAX_TX_RINGS];
+ unsigned i;
+ char rx_rng_name[RTE_RING_NAMESIZE];
+ char tx_rng_name[RTE_RING_NAMESIZE];
+ unsigned num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
+ RTE_PMD_RING_MAX_TX_RINGS);
+
+ for (i = 0; i < num_rings; i++) {
+ snprintf(rx_rng_name, sizeof(rx_rng_name), "ETH_RX%u_%s", i, name);
+ rx[i] = (action == DEV_CREATE) ?
+ rte_ring_create(rx_rng_name, 1024, numa_node,
+ RING_F_SP_ENQ|RING_F_SC_DEQ) :
+ rte_ring_lookup(rx_rng_name);
+ if (rx[i] == NULL)
+ return -1;
+ snprintf(tx_rng_name, sizeof(tx_rng_name), "ETH_TX%u_%s", i, name);
+ tx[i] = (action == DEV_CREATE) ?
+ rte_ring_create(tx_rng_name, 1024, numa_node,
+ RING_F_SP_ENQ|RING_F_SC_DEQ):
+ rte_ring_lookup(tx_rng_name);
+ if (tx[i] == NULL)
+ return -1;
+ }
+
+ if (rte_eth_from_rings(rx_rng_name, rx, num_rings, tx, num_rings,
+ numa_node) || rte_eth_from_rings(tx_rng_name, tx, num_rings, rx,
+ num_rings, numa_node))
+ return -1;
+
+ return 0;
+}
+
+int
+rte_eth_ring_pair_create(const char *name, const unsigned numa_node)
+{
+ RTE_LOG(WARNING, PMD, "rte_eth_ring_pair_create is deprecated\n");
+ return eth_dev_ring_pair_create(name, numa_node, DEV_CREATE);
+}
+
+int
+rte_eth_ring_pair_attach(const char *name, const unsigned numa_node)
+{
+ RTE_LOG(WARNING, PMD, "rte_eth_ring_pair_attach is deprecated\n");
+ return eth_dev_ring_pair_create(name, numa_node, DEV_ATTACH);
+}
+
+struct node_action_pair {
+ char name[PATH_MAX];
+ unsigned node;
+ enum dev_action action;
+};
+
+struct node_action_list {
+ unsigned total;
+ unsigned count;
+ struct node_action_pair *list;
+};
+
+static int parse_kvlist (const char *key __rte_unused, const char *value, void *data)
+{
+ struct node_action_list *info = data;
+ int ret;
+ char *name;
+ char *action;
+ char *node;
+ char *end;
+
+ name = strdup(value);
+
+ ret = -EINVAL;
+
+ if (!name) {
+ RTE_LOG(WARNING, PMD, "command line paramter is empty for ring pmd!\n");
+ goto out;
+ }
+
+ node = strchr(name, ':');
+ if (!node) {
+ RTE_LOG(WARNING, PMD, "could not parse node value from %s", name);
+ goto out;
+ }
+
+ *node = '\0';
+ node++;
+
+ action = strchr(node, ':');
+ if (!action) {
+ RTE_LOG(WARNING, PMD, "could not action value from %s", node);
+ goto out;
+ }
+
+ *action = '\0';
+ action++;
+
+ /*
+ * Need to do some sanity checking here
+ */
+
+ if (strcmp(action, ETH_RING_ACTION_ATTACH) == 0)
+ info->list[info->count].action = DEV_ATTACH;
+ else if (strcmp(action, ETH_RING_ACTION_CREATE) == 0)
+ info->list[info->count].action = DEV_CREATE;
+ else
+ goto out;
+
+ errno = 0;
+ info->list[info->count].node = strtol(node, &end, 10);
+
+ if ((errno != 0) || (*end != '\0')) {
+ RTE_LOG(WARNING, PMD, "node value %s is unparseable as a number\n", node);
+ goto out;
+ }
+
+ snprintf(info->list[info->count].name, sizeof(info->list[info->count].name), "%s", name);
+
+ info->count++;
+
+ ret = 0;
+out:
+ free(name);
+ return ret;
+}
+
+int
+rte_pmd_ring_devinit(const char *name, const char *params)
+{
+ struct rte_kvargs *kvlist;
+ int ret = 0;
+ struct node_action_list *info = NULL;
+
+ RTE_LOG(INFO, PMD, "Initializing pmd_ring for %s\n", name);
+
+ if (params == NULL || params[0] == '\0')
+ eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE);
+ else {
+ kvlist = rte_kvargs_parse(params, valid_arguments);
+
+ if (!kvlist) {
+ RTE_LOG(INFO, PMD, "Ignoring unsupported parameters when creating"
+ " rings-backed ethernet device\n");
+ eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE);
+ return 0;
+ } else {
+ ret = rte_kvargs_count(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG);
+ info = rte_zmalloc("struct node_action_list", sizeof(struct node_action_list) +
+ (sizeof(struct node_action_pair) * ret), 0);
+ if (!info)
+ goto out;
+
+ info->total = ret;
+ info->list = (struct node_action_pair*)(info + 1);
+
+ ret = rte_kvargs_process(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG,
+ parse_kvlist, info);
+
+ if (ret < 0)
+ goto out_free;
+
+ for (info->count = 0; info->count < info->total; info->count++) {
+ eth_dev_ring_create(name, info->list[info->count].node,
+ info->list[info->count].action);
+ }
+ }
+ }
+
+out_free:
+ rte_free(info);
+out:
+ return ret;
+}
+
+static struct rte_driver pmd_ring_drv = {
+ .name = "eth_ring",
+ .type = PMD_VDEV,
+ .init = rte_pmd_ring_devinit,
+};
+
+PMD_REGISTER_DRIVER(pmd_ring_drv);
diff --git a/src/dpdk_lib18/librte_pmd_ring/rte_eth_ring.h b/src/dpdk_lib18/librte_pmd_ring/rte_eth_ring.h
new file mode 100755
index 00000000..e6ae19ed
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_ring/rte_eth_ring.h
@@ -0,0 +1,63 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ETH_RING_H_
+#define _RTE_ETH_RING_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_ring.h>
+
+int rte_eth_from_rings(const char *name,
+ struct rte_ring * const rx_queues[],
+ const unsigned nb_rx_queues,
+ struct rte_ring *const tx_queues[],
+ const unsigned nb_tx_queues,
+ const unsigned numa_node);
+
+int rte_eth_ring_pair_create(const char *name, const unsigned numa_node);
+int rte_eth_ring_pair_attach(const char *name, const unsigned numa_node);
+
+/**
+ * For use by test apps only. Called as part of EAL init to set up any dummy NICs
+ * configured on command line.
+ */
+int rte_pmd_ring_devinit(const char *name, const char *params);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_pmd_virtio/Makefile b/src/dpdk_lib18/librte_pmd_virtio/Makefile
new file mode 100755
index 00000000..456095b3
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_virtio/Makefile
@@ -0,0 +1,57 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_virtio_uio.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtqueue.c
+SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_pci.c
+SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_ethdev.c
+
+
+# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += lib/librte_eal lib/librte_ether
+DEPDIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += lib/librte_mempool lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += lib/librte_net lib/librte_malloc
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_pmd_virtio/virtio_ethdev.c b/src/dpdk_lib18/librte_pmd_virtio/virtio_ethdev.c
new file mode 100755
index 00000000..b3b5bb6a
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_virtio/virtio_ethdev.c
@@ -0,0 +1,1209 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+#ifdef RTE_EXEC_ENV_LINUXAPP
+#include <dirent.h>
+#endif
+
+#include <rte_ethdev.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_common.h>
+
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_dev.h>
+
+#include "virtio_ethdev.h"
+#include "virtio_pci.h"
+#include "virtio_logs.h"
+#include "virtqueue.h"
+
+
+static int eth_virtio_dev_init(struct eth_driver *eth_drv,
+ struct rte_eth_dev *eth_dev);
+static int virtio_dev_configure(struct rte_eth_dev *dev);
+static int virtio_dev_start(struct rte_eth_dev *dev);
+static void virtio_dev_stop(struct rte_eth_dev *dev);
+static void virtio_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static void virtio_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static void virtio_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static void virtio_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static void virtio_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static int virtio_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete);
+
+static void virtio_set_hwaddr(struct virtio_hw *hw);
+static void virtio_get_hwaddr(struct virtio_hw *hw);
+
+static void virtio_dev_rx_queue_release(__rte_unused void *rxq);
+static void virtio_dev_tx_queue_release(__rte_unused void *txq);
+
+static void virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
+static void virtio_dev_stats_reset(struct rte_eth_dev *dev);
+static void virtio_dev_free_mbufs(struct rte_eth_dev *dev);
+
+static int virtio_dev_queue_stats_mapping_set(
+ __rte_unused struct rte_eth_dev *eth_dev,
+ __rte_unused uint16_t queue_id,
+ __rte_unused uint8_t stat_idx,
+ __rte_unused uint8_t is_rx);
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static struct rte_pci_id pci_id_virtio_map[] = {
+
+#define RTE_PCI_DEV_ID_DECL_VIRTIO(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
+#include "rte_pci_dev_ids.h"
+
+{ .vendor_id = 0, /* sentinel */ },
+};
+
+static int
+virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl,
+ int *dlen, int pkt_num)
+{
+ uint32_t head = vq->vq_desc_head_idx, i;
+ int k, sum = 0;
+ virtio_net_ctrl_ack status = ~0;
+ struct virtio_pmd_ctrl result;
+
+ ctrl->status = status;
+
+ if (!vq->hw->cvq) {
+ PMD_INIT_LOG(ERR,
+ "%s(): Control queue is not supported.",
+ __func__);
+ return -1;
+ }
+
+ PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
+ "vq->hw->cvq = %p vq = %p",
+ vq->vq_desc_head_idx, status, vq->hw->cvq, vq);
+
+ if ((vq->vq_free_cnt < ((uint32_t)pkt_num + 2)) || (pkt_num < 1))
+ return -1;
+
+ memcpy(vq->virtio_net_hdr_mz->addr, ctrl,
+ sizeof(struct virtio_pmd_ctrl));
+
+ /*
+ * Format is enforced in qemu code:
+ * One TX packet for header;
+ * At least one TX packet per argument;
+ * One RX packet for ACK.
+ */
+ vq->vq_ring.desc[head].flags = VRING_DESC_F_NEXT;
+ vq->vq_ring.desc[head].addr = vq->virtio_net_hdr_mz->phys_addr;
+ vq->vq_ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
+ vq->vq_free_cnt--;
+ i = vq->vq_ring.desc[head].next;
+
+ for (k = 0; k < pkt_num; k++) {
+ vq->vq_ring.desc[i].flags = VRING_DESC_F_NEXT;
+ vq->vq_ring.desc[i].addr = vq->virtio_net_hdr_mz->phys_addr
+ + sizeof(struct virtio_net_ctrl_hdr)
+ + sizeof(ctrl->status) + sizeof(uint8_t)*sum;
+ vq->vq_ring.desc[i].len = dlen[k];
+ sum += dlen[k];
+ vq->vq_free_cnt--;
+ i = vq->vq_ring.desc[i].next;
+ }
+
+ vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE;
+ vq->vq_ring.desc[i].addr = vq->virtio_net_hdr_mz->phys_addr
+ + sizeof(struct virtio_net_ctrl_hdr);
+ vq->vq_ring.desc[i].len = sizeof(ctrl->status);
+ vq->vq_free_cnt--;
+
+ vq->vq_desc_head_idx = vq->vq_ring.desc[i].next;
+
+ vq_update_avail_ring(vq, head);
+ vq_update_avail_idx(vq);
+
+ PMD_INIT_LOG(DEBUG, "vq->vq_queue_index = %d", vq->vq_queue_index);
+
+ virtqueue_notify(vq);
+
+ while (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
+ usleep(100);
+
+ while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
+ uint32_t idx, desc_idx, used_idx;
+ struct vring_used_elem *uep;
+
+ rmb();
+
+ used_idx = (uint32_t)(vq->vq_used_cons_idx
+ & (vq->vq_nentries - 1));
+ uep = &vq->vq_ring.used->ring[used_idx];
+ idx = (uint32_t) uep->id;
+ desc_idx = idx;
+
+ while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
+ desc_idx = vq->vq_ring.desc[desc_idx].next;
+ vq->vq_free_cnt++;
+ }
+
+ vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
+ vq->vq_desc_head_idx = idx;
+
+ vq->vq_used_cons_idx++;
+ vq->vq_free_cnt++;
+ }
+
+ PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d",
+ vq->vq_free_cnt, vq->vq_desc_head_idx);
+
+ memcpy(&result, vq->virtio_net_hdr_mz->addr,
+ sizeof(struct virtio_pmd_ctrl));
+
+ return result.status;
+}
+
+static int
+virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues)
+{
+ struct virtio_hw *hw
+ = VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct virtio_pmd_ctrl ctrl;
+ int dlen[1];
+ int ret;
+
+ ctrl.hdr.class = VIRTIO_NET_CTRL_MQ;
+ ctrl.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
+ memcpy(ctrl.data, &nb_queues, sizeof(uint16_t));
+
+ dlen[0] = sizeof(uint16_t);
+
+ ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
+
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Multiqueue configured but send command "
+ "failed, this is too late now...");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int virtio_dev_queue_setup(struct rte_eth_dev *dev,
+ int queue_type,
+ uint16_t queue_idx,
+ uint8_t vtpci_queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ struct virtqueue **pvq)
+{
+ char vq_name[VIRTQUEUE_MAX_NAME_SZ];
+ const struct rte_memzone *mz;
+ uint16_t vq_size;
+ int size;
+ struct virtio_hw *hw =
+ VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct virtqueue *vq = NULL;
+
+ /* Write the virtqueue index to the Queue Select Field */
+ VIRTIO_WRITE_REG_2(hw, VIRTIO_PCI_QUEUE_SEL, vtpci_queue_idx);
+ PMD_INIT_LOG(DEBUG, "selecting queue: %d", vtpci_queue_idx);
+
+ /*
+ * Read the virtqueue size from the Queue Size field
+ * Always power of 2 and if 0 virtqueue does not exist
+ */
+ vq_size = VIRTIO_READ_REG_2(hw, VIRTIO_PCI_QUEUE_NUM);
+ PMD_INIT_LOG(DEBUG, "vq_size: %d nb_desc:%d", vq_size, nb_desc);
+ if (nb_desc == 0)
+ nb_desc = vq_size;
+ if (vq_size == 0) {
+ PMD_INIT_LOG(ERR, "%s: virtqueue does not exist", __func__);
+ return -EINVAL;
+ } else if (!rte_is_power_of_2(vq_size)) {
+ PMD_INIT_LOG(ERR, "%s: virtqueue size is not powerof 2", __func__);
+ return -EINVAL;
+ } else if (nb_desc != vq_size) {
+ PMD_INIT_LOG(ERR, "Warning: nb_desc(%d) is not equal to vq size (%d), fall to vq size",
+ nb_desc, vq_size);
+ nb_desc = vq_size;
+ }
+
+ if (queue_type == VTNET_RQ) {
+ snprintf(vq_name, sizeof(vq_name), "port%d_rvq%d",
+ dev->data->port_id, queue_idx);
+ vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
+ vq_size * sizeof(struct vq_desc_extra), RTE_CACHE_LINE_SIZE);
+ } else if (queue_type == VTNET_TQ) {
+ snprintf(vq_name, sizeof(vq_name), "port%d_tvq%d",
+ dev->data->port_id, queue_idx);
+ vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
+ vq_size * sizeof(struct vq_desc_extra), RTE_CACHE_LINE_SIZE);
+ } else if (queue_type == VTNET_CQ) {
+ snprintf(vq_name, sizeof(vq_name), "port%d_cvq",
+ dev->data->port_id);
+ vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
+ vq_size * sizeof(struct vq_desc_extra),
+ RTE_CACHE_LINE_SIZE);
+ }
+ if (vq == NULL) {
+ PMD_INIT_LOG(ERR, "%s: Can not allocate virtqueue", __func__);
+ return (-ENOMEM);
+ }
+
+ vq->hw = hw;
+ vq->port_id = dev->data->port_id;
+ vq->queue_id = queue_idx;
+ vq->vq_queue_index = vtpci_queue_idx;
+ vq->vq_alignment = VIRTIO_PCI_VRING_ALIGN;
+ vq->vq_nentries = vq_size;
+ vq->vq_free_cnt = vq_size;
+
+ /*
+ * Reserve a memzone for vring elements
+ */
+ size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
+ vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
+ PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d", size, vq->vq_ring_size);
+
+ mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
+ socket_id, 0, VIRTIO_PCI_VRING_ALIGN);
+ if (mz == NULL) {
+ rte_free(vq);
+ return -ENOMEM;
+ }
+
+ /*
+ * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
+ * and only accepts 32 bit page frame number.
+ * Check if the allocated physical memory exceeds 16TB.
+ */
+ if ((mz->phys_addr + vq->vq_ring_size - 1) >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
+ PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!");
+ rte_free(vq);
+ return -ENOMEM;
+ }
+
+ memset(mz->addr, 0, sizeof(mz->len));
+ vq->mz = mz;
+ vq->vq_ring_mem = mz->phys_addr;
+ vq->vq_ring_virt_mem = mz->addr;
+ PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%"PRIx64, (uint64_t)mz->phys_addr);
+ PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%"PRIx64, (uint64_t)mz->addr);
+ vq->virtio_net_hdr_mz = NULL;
+ vq->virtio_net_hdr_mem = 0;
+
+ if (queue_type == VTNET_TQ) {
+ /*
+ * For each xmit packet, allocate a virtio_net_hdr
+ */
+ snprintf(vq_name, sizeof(vq_name), "port%d_tvq%d_hdrzone",
+ dev->data->port_id, queue_idx);
+ vq->virtio_net_hdr_mz = rte_memzone_reserve_aligned(vq_name,
+ vq_size * hw->vtnet_hdr_size,
+ socket_id, 0, RTE_CACHE_LINE_SIZE);
+ if (vq->virtio_net_hdr_mz == NULL) {
+ rte_free(vq);
+ return -ENOMEM;
+ }
+ vq->virtio_net_hdr_mem =
+ vq->virtio_net_hdr_mz->phys_addr;
+ memset(vq->virtio_net_hdr_mz->addr, 0,
+ vq_size * hw->vtnet_hdr_size);
+ } else if (queue_type == VTNET_CQ) {
+ /* Allocate a page for control vq command, data and status */
+ snprintf(vq_name, sizeof(vq_name), "port%d_cvq_hdrzone",
+ dev->data->port_id);
+ vq->virtio_net_hdr_mz = rte_memzone_reserve_aligned(vq_name,
+ PAGE_SIZE, socket_id, 0, RTE_CACHE_LINE_SIZE);
+ if (vq->virtio_net_hdr_mz == NULL) {
+ rte_free(vq);
+ return -ENOMEM;
+ }
+ vq->virtio_net_hdr_mem =
+ vq->virtio_net_hdr_mz->phys_addr;
+ memset(vq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE);
+ }
+
+ /*
+ * Set guest physical address of the virtqueue
+ * in VIRTIO_PCI_QUEUE_PFN config register of device
+ */
+ VIRTIO_WRITE_REG_4(hw, VIRTIO_PCI_QUEUE_PFN,
+ mz->phys_addr >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
+ *pvq = vq;
+ return 0;
+}
+
+static int
+virtio_dev_cq_queue_setup(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx,
+ uint32_t socket_id)
+{
+ struct virtqueue *vq;
+ uint16_t nb_desc = 0;
+ int ret;
+ struct virtio_hw *hw =
+ VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+ ret = virtio_dev_queue_setup(dev, VTNET_CQ, VTNET_SQ_CQ_QUEUE_IDX,
+ vtpci_queue_idx, nb_desc, socket_id, &vq);
+
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR, "control vq initialization failed");
+ return ret;
+ }
+
+ hw->cvq = vq;
+ return 0;
+}
+
+static void
+virtio_dev_close(struct rte_eth_dev *dev)
+{
+ PMD_INIT_LOG(DEBUG, "virtio_dev_close");
+
+ virtio_dev_stop(dev);
+}
+
+static void
+virtio_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw
+ = VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct virtio_pmd_ctrl ctrl;
+ int dlen[1];
+ int ret;
+
+ ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
+ ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
+ ctrl.data[0] = 1;
+ dlen[0] = 1;
+
+ ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
+
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to enable promisc");
+}
+
+static void
+virtio_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw
+ = VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct virtio_pmd_ctrl ctrl;
+ int dlen[1];
+ int ret;
+
+ ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
+ ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
+ ctrl.data[0] = 0;
+ dlen[0] = 1;
+
+ ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
+
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to disable promisc");
+}
+
+static void
+virtio_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw
+ = VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct virtio_pmd_ctrl ctrl;
+ int dlen[1];
+ int ret;
+
+ ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
+ ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
+ ctrl.data[0] = 1;
+ dlen[0] = 1;
+
+ ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
+
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to enable allmulticast");
+}
+
+static void
+virtio_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw
+ = VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct virtio_pmd_ctrl ctrl;
+ int dlen[1];
+ int ret;
+
+ ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
+ ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
+ ctrl.data[0] = 0;
+ dlen[0] = 1;
+
+ ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
+
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to disable allmulticast");
+}
+
+/*
+ * dev_ops for virtio, bare necessities for basic operation
+ */
+static struct eth_dev_ops virtio_eth_dev_ops = {
+ .dev_configure = virtio_dev_configure,
+ .dev_start = virtio_dev_start,
+ .dev_stop = virtio_dev_stop,
+ .dev_close = virtio_dev_close,
+ .promiscuous_enable = virtio_dev_promiscuous_enable,
+ .promiscuous_disable = virtio_dev_promiscuous_disable,
+ .allmulticast_enable = virtio_dev_allmulticast_enable,
+ .allmulticast_disable = virtio_dev_allmulticast_disable,
+
+ .dev_infos_get = virtio_dev_info_get,
+ .stats_get = virtio_dev_stats_get,
+ .stats_reset = virtio_dev_stats_reset,
+ .link_update = virtio_dev_link_update,
+ .mac_addr_add = NULL,
+ .mac_addr_remove = NULL,
+ .rx_queue_setup = virtio_dev_rx_queue_setup,
+ /* meaningfull only to multiple queue */
+ .rx_queue_release = virtio_dev_rx_queue_release,
+ .tx_queue_setup = virtio_dev_tx_queue_setup,
+ /* meaningfull only to multiple queue */
+ .tx_queue_release = virtio_dev_tx_queue_release,
+ /* collect stats per queue */
+ .queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
+};
+
+static inline int
+virtio_dev_atomic_read_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = link;
+ struct rte_eth_link *src = &(dev->data->dev_link);
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+/**
+ * Atomically writes the link status information into global
+ * structure rte_eth_dev.
+ *
+ * @param dev
+ * - Pointer to the structure rte_eth_dev to read from.
+ * - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, negative value.
+ */
+static inline int
+virtio_dev_atomic_write_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = &(dev->data->dev_link);
+ struct rte_eth_link *src = link;
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+static void
+virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ unsigned i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ const struct virtqueue *txvq = dev->data->tx_queues[i];
+ if (txvq == NULL)
+ continue;
+
+ stats->opackets += txvq->packets;
+ stats->obytes += txvq->bytes;
+ stats->oerrors += txvq->errors;
+
+ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_opackets[i] = txvq->packets;
+ stats->q_obytes[i] = txvq->bytes;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ const struct virtqueue *rxvq = dev->data->rx_queues[i];
+ if (rxvq == NULL)
+ continue;
+
+ stats->ipackets += rxvq->packets;
+ stats->ibytes += rxvq->bytes;
+ stats->ierrors += rxvq->errors;
+
+ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_ipackets[i] = rxvq->packets;
+ stats->q_ibytes[i] = rxvq->bytes;
+ }
+ }
+
+ stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
+}
+
+static void
+virtio_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct virtqueue *txvq = dev->data->tx_queues[i];
+ if (txvq == NULL)
+ continue;
+
+ txvq->packets = 0;
+ txvq->bytes = 0;
+ txvq->errors = 0;
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct virtqueue *rxvq = dev->data->rx_queues[i];
+ if (rxvq == NULL)
+ continue;
+
+ rxvq->packets = 0;
+ rxvq->bytes = 0;
+ rxvq->errors = 0;
+ }
+
+ dev->data->rx_mbuf_alloc_failed = 0;
+}
+
+static void
+virtio_set_hwaddr(struct virtio_hw *hw)
+{
+ vtpci_write_dev_config(hw,
+ offsetof(struct virtio_net_config, mac),
+ &hw->mac_addr, ETHER_ADDR_LEN);
+}
+
+static void
+virtio_get_hwaddr(struct virtio_hw *hw)
+{
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC)) {
+ vtpci_read_dev_config(hw,
+ offsetof(struct virtio_net_config, mac),
+ &hw->mac_addr, ETHER_ADDR_LEN);
+ } else {
+ eth_random_addr(&hw->mac_addr[0]);
+ virtio_set_hwaddr(hw);
+ }
+}
+
+
+static void
+virtio_negotiate_features(struct virtio_hw *hw)
+{
+ uint32_t host_features, mask;
+
+ mask = VIRTIO_NET_F_CTRL_VLAN;
+ mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM;
+
+ /* TSO and LRO are only available when their corresponding
+ * checksum offload feature is also negotiated.
+ */
+ mask |= VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 | VIRTIO_NET_F_HOST_ECN;
+ mask |= VIRTIO_NET_F_GUEST_TSO4 | VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN;
+ mask |= VTNET_LRO_FEATURES;
+
+ /* not negotiating INDIRECT descriptor table support */
+ mask |= VIRTIO_RING_F_INDIRECT_DESC;
+
+ /* Prepare guest_features: feature that driver wants to support */
+ hw->guest_features = VTNET_FEATURES & ~mask;
+ PMD_INIT_LOG(DEBUG, "guest_features before negotiate = %x",
+ hw->guest_features);
+
+ /* Read device(host) feature bits */
+ host_features = VIRTIO_READ_REG_4(hw, VIRTIO_PCI_HOST_FEATURES);
+ PMD_INIT_LOG(DEBUG, "host_features before negotiate = %x",
+ host_features);
+
+ /*
+ * Negotiate features: Subset of device feature bits are written back
+ * guest feature bits.
+ */
+ hw->guest_features = vtpci_negotiate_features(hw, host_features);
+ PMD_INIT_LOG(DEBUG, "features after negotiate = %x",
+ hw->guest_features);
+}
+
+#ifdef RTE_EXEC_ENV_LINUXAPP
+static int
+parse_sysfs_value(const char *filename, unsigned long *val)
+{
+ FILE *f;
+ char buf[BUFSIZ];
+ char *end = NULL;
+
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ PMD_INIT_LOG(ERR, "%s(): cannot open sysfs value %s",
+ __func__, filename);
+ return -1;
+ }
+
+ if (fgets(buf, sizeof(buf), f) == NULL) {
+ PMD_INIT_LOG(ERR, "%s(): cannot read sysfs value %s",
+ __func__, filename);
+ fclose(f);
+ return -1;
+ }
+ *val = strtoul(buf, &end, 0);
+ if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) {
+ PMD_INIT_LOG(ERR, "%s(): cannot parse sysfs value %s",
+ __func__, filename);
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+ return 0;
+}
+
+static int get_uio_dev(struct rte_pci_addr *loc, char *buf, unsigned int buflen)
+{
+ unsigned int uio_num;
+ struct dirent *e;
+ DIR *dir;
+ char dirname[PATH_MAX];
+
+ /* depending on kernel version, uio can be located in uio/uioX
+ * or uio:uioX */
+ snprintf(dirname, sizeof(dirname),
+ SYSFS_PCI_DEVICES "/" PCI_PRI_FMT "/uio",
+ loc->domain, loc->bus, loc->devid, loc->function);
+ dir = opendir(dirname);
+ if (dir == NULL) {
+ /* retry with the parent directory */
+ snprintf(dirname, sizeof(dirname),
+ SYSFS_PCI_DEVICES "/" PCI_PRI_FMT,
+ loc->domain, loc->bus, loc->devid, loc->function);
+ dir = opendir(dirname);
+
+ if (dir == NULL) {
+ PMD_INIT_LOG(ERR, "Cannot opendir %s", dirname);
+ return -1;
+ }
+ }
+
+ /* take the first file starting with "uio" */
+ while ((e = readdir(dir)) != NULL) {
+ /* format could be uio%d ...*/
+ int shortprefix_len = sizeof("uio") - 1;
+ /* ... or uio:uio%d */
+ int longprefix_len = sizeof("uio:uio") - 1;
+ char *endptr;
+
+ if (strncmp(e->d_name, "uio", 3) != 0)
+ continue;
+
+ /* first try uio%d */
+ errno = 0;
+ uio_num = strtoull(e->d_name + shortprefix_len, &endptr, 10);
+ if (errno == 0 && endptr != (e->d_name + shortprefix_len)) {
+ snprintf(buf, buflen, "%s/uio%u", dirname, uio_num);
+ break;
+ }
+
+ /* then try uio:uio%d */
+ errno = 0;
+ uio_num = strtoull(e->d_name + longprefix_len, &endptr, 10);
+ if (errno == 0 && endptr != (e->d_name + longprefix_len)) {
+ snprintf(buf, buflen, "%s/uio:uio%u", dirname,
+ uio_num);
+ break;
+ }
+ }
+ closedir(dir);
+
+ /* No uio resource found */
+ if (e == NULL) {
+ PMD_INIT_LOG(ERR, "Could not find uio resource");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+virtio_has_msix(const struct rte_pci_addr *loc)
+{
+ DIR *d;
+ char dirname[PATH_MAX];
+
+ snprintf(dirname, sizeof(dirname),
+ SYSFS_PCI_DEVICES "/" PCI_PRI_FMT "/msi_irqs",
+ loc->domain, loc->bus, loc->devid, loc->function);
+
+ d = opendir(dirname);
+ if (d)
+ closedir(d);
+
+ return (d != NULL);
+}
+#else
+static int
+virtio_has_msix(const struct rte_pci_addr *loc __rte_unused)
+{
+ /* nic_uio does not enable interrupts, return 0 (false). */
+ return 0;
+}
+#endif
+
+/*
+ * This function is based on probe() function in virtio_pci.c
+ * It returns 0 on success.
+ */
+static int
+eth_virtio_dev_init(__rte_unused struct eth_driver *eth_drv,
+ struct rte_eth_dev *eth_dev)
+{
+ struct virtio_net_config *config;
+ struct virtio_net_config local_config;
+ uint32_t offset_conf = sizeof(config->mac);
+ struct rte_pci_device *pci_dev;
+ struct virtio_hw *hw =
+ VIRTIO_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+
+ if (RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr)) {
+ PMD_INIT_LOG(ERR,
+ "MBUF HEADROOM should be enough to hold virtio net hdr\n");
+ return -1;
+ }
+
+ eth_dev->dev_ops = &virtio_eth_dev_ops;
+ eth_dev->tx_pkt_burst = &virtio_xmit_pkts;
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+ return 0;
+
+ pci_dev = eth_dev->pci_dev;
+
+#ifdef RTE_EXEC_ENV_LINUXAPP
+ {
+ char dirname[PATH_MAX];
+ char filename[PATH_MAX];
+ unsigned long start, size;
+
+ if (get_uio_dev(&pci_dev->addr, dirname, sizeof(dirname)) < 0)
+ return -1;
+
+ /* get portio size */
+ snprintf(filename, sizeof(filename),
+ "%s/portio/port0/size", dirname);
+ if (parse_sysfs_value(filename, &size) < 0) {
+ PMD_INIT_LOG(ERR, "%s(): cannot parse size",
+ __func__);
+ return -1;
+ }
+
+ /* get portio start */
+ snprintf(filename, sizeof(filename),
+ "%s/portio/port0/start", dirname);
+ if (parse_sysfs_value(filename, &start) < 0) {
+ PMD_INIT_LOG(ERR, "%s(): cannot parse portio start",
+ __func__);
+ return -1;
+ }
+ pci_dev->mem_resource[0].addr = (void *)(uintptr_t)start;
+ pci_dev->mem_resource[0].len = (uint64_t)size;
+ PMD_INIT_LOG(DEBUG,
+ "PCI Port IO found start=0x%lx with size=0x%lx",
+ start, size);
+ }
+#endif
+ hw->use_msix = virtio_has_msix(&pci_dev->addr);
+ hw->io_base = (uint32_t)(uintptr_t)pci_dev->mem_resource[0].addr;
+
+ /* Reset the device although not necessary at startup */
+ vtpci_reset(hw);
+
+ /* Tell the host we've noticed this device. */
+ vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
+
+ /* Tell the host we've known how to drive the device. */
+ vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
+ virtio_negotiate_features(hw);
+
+ /* Setting up rx_header size for the device */
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
+ hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ } else {
+ eth_dev->rx_pkt_burst = &virtio_recv_pkts;
+ hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
+ }
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("virtio", ETHER_ADDR_LEN, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate %d bytes needed to store MAC addresses",
+ ETHER_ADDR_LEN);
+ return -ENOMEM;
+ }
+
+ /* Copy the permanent MAC address to: virtio_hw */
+ virtio_get_hwaddr(hw);
+ ether_addr_copy((struct ether_addr *) hw->mac_addr,
+ &eth_dev->data->mac_addrs[0]);
+ PMD_INIT_LOG(DEBUG,
+ "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X",
+ hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
+ hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
+
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
+ config = &local_config;
+
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
+ offset_conf += sizeof(config->status);
+ } else {
+ PMD_INIT_LOG(DEBUG,
+ "VIRTIO_NET_F_STATUS is not supported");
+ config->status = 0;
+ }
+
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MQ)) {
+ offset_conf += sizeof(config->max_virtqueue_pairs);
+ } else {
+ PMD_INIT_LOG(DEBUG,
+ "VIRTIO_NET_F_MQ is not supported");
+ config->max_virtqueue_pairs = 1;
+ }
+
+ vtpci_read_dev_config(hw, 0, (uint8_t *)config, offset_conf);
+
+ hw->max_rx_queues =
+ (VIRTIO_MAX_RX_QUEUES < config->max_virtqueue_pairs) ?
+ VIRTIO_MAX_RX_QUEUES : config->max_virtqueue_pairs;
+ hw->max_tx_queues =
+ (VIRTIO_MAX_TX_QUEUES < config->max_virtqueue_pairs) ?
+ VIRTIO_MAX_TX_QUEUES : config->max_virtqueue_pairs;
+
+ virtio_dev_cq_queue_setup(eth_dev,
+ config->max_virtqueue_pairs * 2,
+ SOCKET_ID_ANY);
+
+ PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=%d",
+ config->max_virtqueue_pairs);
+ PMD_INIT_LOG(DEBUG, "config->status=%d", config->status);
+ PMD_INIT_LOG(DEBUG,
+ "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X",
+ config->mac[0], config->mac[1],
+ config->mac[2], config->mac[3],
+ config->mac[4], config->mac[5]);
+ } else {
+ hw->max_rx_queues = 1;
+ hw->max_tx_queues = 1;
+ }
+
+ eth_dev->data->nb_rx_queues = hw->max_rx_queues;
+ eth_dev->data->nb_tx_queues = hw->max_tx_queues;
+
+ PMD_INIT_LOG(DEBUG, "hw->max_rx_queues=%d hw->max_tx_queues=%d",
+ hw->max_rx_queues, hw->max_tx_queues);
+ PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
+ return 0;
+}
+
+static struct eth_driver rte_virtio_pmd = {
+ {
+ .name = "rte_virtio_pmd",
+ .id_table = pci_id_virtio_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ },
+ .eth_dev_init = eth_virtio_dev_init,
+ .dev_private_size = sizeof(struct virtio_adapter),
+};
+
+/*
+ * Driver initialization routine.
+ * Invoked once at EAL init time.
+ * Register itself as the [Poll Mode] Driver of PCI virtio devices.
+ * Returns 0 on success.
+ */
+static int
+rte_virtio_pmd_init(const char *name __rte_unused,
+ const char *param __rte_unused)
+{
+ if (rte_eal_iopl_init() != 0) {
+ PMD_INIT_LOG(ERR, "IOPL call failed - cannot use virtio PMD");
+ return -1;
+ }
+
+ rte_eth_driver_register(&rte_virtio_pmd);
+ return 0;
+}
+
+/*
+ * Only 1 queue is supported, no queue release related operation
+ */
+static void
+virtio_dev_rx_queue_release(__rte_unused void *rxq)
+{
+}
+
+static void
+virtio_dev_tx_queue_release(__rte_unused void *txq)
+{
+}
+
+/*
+ * Configure virtio device
+ * It returns 0 on success.
+ */
+static int
+virtio_dev_configure(struct rte_eth_dev *dev)
+{
+ const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+
+ PMD_INIT_LOG(DEBUG, "configure");
+
+ if (rxmode->hw_ip_checksum) {
+ PMD_DRV_LOG(ERR, "HW IP checksum not supported");
+ return (-EINVAL);
+ }
+
+ return 0;
+}
+
+
+static int
+virtio_dev_start(struct rte_eth_dev *dev)
+{
+ uint16_t nb_queues, i;
+ uint16_t status;
+ struct virtio_hw *hw =
+ VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Tell the host we've noticed this device. */
+ vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
+
+ /* Tell the host we've known how to drive the device. */
+ vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
+
+ virtio_dev_cq_start(dev);
+
+ /* Do final configuration before rx/tx engine starts */
+ virtio_dev_rxtx_start(dev);
+
+ /* Check VIRTIO_NET_F_STATUS for link status*/
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
+ vtpci_read_dev_config(hw,
+ offsetof(struct virtio_net_config, status),
+ &status, sizeof(status));
+ if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
+ PMD_INIT_LOG(ERR, "Port: %d Link is DOWN",
+ dev->data->port_id);
+ return -EIO;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Port: %d Link is UP",
+ dev->data->port_id);
+ }
+ }
+ vtpci_reinit_complete(hw);
+
+ /*Notify the backend
+ *Otherwise the tap backend might already stop its queue due to fullness.
+ *vhost backend will have no chance to be waked up
+ */
+ nb_queues = dev->data->nb_rx_queues;
+ if (nb_queues > 1) {
+ if (virtio_set_multiple_queues(dev, nb_queues) != 0)
+ return -EINVAL;
+ }
+
+ PMD_INIT_LOG(DEBUG, "nb_queues=%d", nb_queues);
+
+ for (i = 0; i < nb_queues; i++)
+ virtqueue_notify(dev->data->rx_queues[i]);
+
+ PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]);
+
+ return 0;
+}
+
+static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
+{
+ struct rte_mbuf *buf;
+ int i, mbuf_num = 0;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ PMD_INIT_LOG(DEBUG,
+ "Before freeing rxq[%d] used and unused buf", i);
+ VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);
+
+ while ((buf = (struct rte_mbuf *)virtqueue_detatch_unused(
+ dev->data->rx_queues[i])) != NULL) {
+ rte_pktmbuf_free(buf);
+ mbuf_num++;
+ }
+
+ PMD_INIT_LOG(DEBUG, "free %d mbufs", mbuf_num);
+ PMD_INIT_LOG(DEBUG,
+ "After freeing rxq[%d] used and unused buf", i);
+ VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ PMD_INIT_LOG(DEBUG,
+ "Before freeing txq[%d] used and unused bufs",
+ i);
+ VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]);
+
+ mbuf_num = 0;
+ while ((buf = (struct rte_mbuf *)virtqueue_detatch_unused(
+ dev->data->tx_queues[i])) != NULL) {
+ rte_pktmbuf_free(buf);
+
+ mbuf_num++;
+ }
+
+ PMD_INIT_LOG(DEBUG, "free %d mbufs", mbuf_num);
+ PMD_INIT_LOG(DEBUG,
+ "After freeing txq[%d] used and unused buf", i);
+ VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]);
+ }
+}
+
+/*
+ * Stop device: disable rx and tx functions to allow for reconfiguring.
+ */
+static void
+virtio_dev_stop(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw =
+ VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* reset the NIC */
+ vtpci_reset(hw);
+ virtio_dev_free_mbufs(dev);
+}
+
+static int
+virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
+{
+ struct rte_eth_link link, old;
+ uint16_t status;
+ struct virtio_hw *hw =
+ VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ memset(&link, 0, sizeof(link));
+ virtio_dev_atomic_read_link_status(dev, &link);
+ old = link;
+ link.link_duplex = FULL_DUPLEX;
+ link.link_speed = SPEED_10G;
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
+ PMD_INIT_LOG(DEBUG, "Get link status from hw");
+ vtpci_read_dev_config(hw,
+ offsetof(struct virtio_net_config, status),
+ &status, sizeof(status));
+ if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
+ link.link_status = 0;
+ PMD_INIT_LOG(DEBUG, "Port %d is down",
+ dev->data->port_id);
+ } else {
+ link.link_status = 1;
+ PMD_INIT_LOG(DEBUG, "Port %d is up",
+ dev->data->port_id);
+ }
+ } else {
+ link.link_status = 1; /* Link up */
+ }
+ virtio_dev_atomic_write_link_status(dev, &link);
+ if (old.link_status == link.link_status)
+ return -1;
+ /*changed*/
+ return 0;
+}
+
+static void
+virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct virtio_hw *hw = VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ dev_info->driver_name = dev->driver->pci_drv.name;
+ dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
+ dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
+ dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE;
+ dev_info->max_rx_pktlen = VIRTIO_MAX_RX_PKTLEN;
+ dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS;
+}
+
+/*
+ * It enables testpmd to collect per queue stats.
+ */
+static int
+virtio_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *eth_dev,
+__rte_unused uint16_t queue_id, __rte_unused uint8_t stat_idx,
+__rte_unused uint8_t is_rx)
+{
+ return 0;
+}
+
+static struct rte_driver rte_virtio_driver = {
+ .type = PMD_PDEV,
+ .init = rte_virtio_pmd_init,
+};
+
+PMD_REGISTER_DRIVER(rte_virtio_driver);
diff --git a/src/dpdk_lib18/librte_pmd_virtio/virtio_ethdev.h b/src/dpdk_lib18/librte_pmd_virtio/virtio_ethdev.h
new file mode 100755
index 00000000..1da3c625
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_virtio/virtio_ethdev.h
@@ -0,0 +1,132 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _VIRTIO_ETHDEV_H_
+#define _VIRTIO_ETHDEV_H_
+
+#include <stdint.h>
+
+#include "virtio_pci.h"
+
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+#define SPEED_10G 10000
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+#define VIRTIO_MAX_RX_QUEUES 128
+#define VIRTIO_MAX_TX_QUEUES 128
+#define VIRTIO_MAX_MAC_ADDRS 1
+#define VIRTIO_MIN_RX_BUFSIZE 64
+#define VIRTIO_MAX_RX_PKTLEN 9728
+
+/* Features desired/implemented by this driver. */
+#define VTNET_FEATURES \
+ (VIRTIO_NET_F_MAC | \
+ VIRTIO_NET_F_STATUS | \
+ VIRTIO_NET_F_MQ | \
+ VIRTIO_NET_F_CTRL_VQ | \
+ VIRTIO_NET_F_CTRL_RX | \
+ VIRTIO_NET_F_CTRL_VLAN | \
+ VIRTIO_NET_F_CSUM | \
+ VIRTIO_NET_F_HOST_TSO4 | \
+ VIRTIO_NET_F_HOST_TSO6 | \
+ VIRTIO_NET_F_HOST_ECN | \
+ VIRTIO_NET_F_GUEST_CSUM | \
+ VIRTIO_NET_F_GUEST_TSO4 | \
+ VIRTIO_NET_F_GUEST_TSO6 | \
+ VIRTIO_NET_F_GUEST_ECN | \
+ VIRTIO_NET_F_MRG_RXBUF | \
+ VIRTIO_RING_F_INDIRECT_DESC)
+
+/*
+ * CQ function prototype
+ */
+void virtio_dev_cq_start(struct rte_eth_dev *dev);
+
+/*
+ * RX/TX function prototypes
+ */
+void virtio_dev_rxtx_start(struct rte_eth_dev *dev);
+
+int virtio_dev_queue_setup(struct rte_eth_dev *dev,
+ int queue_type,
+ uint16_t queue_idx,
+ uint8_t vtpci_queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ struct virtqueue **pvq);
+
+int virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
+
+int virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+
+uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+/*
+ * Structure to store private data for each driver instance (for each port).
+ */
+struct virtio_adapter {
+ struct virtio_hw hw;
+};
+
+#define VIRTIO_DEV_PRIVATE_TO_HW(adapter)\
+ (&((struct virtio_adapter *)adapter)->hw)
+
+/*
+ * The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us
+ * frames larger than 1514 bytes. We do not yet support software LRO
+ * via tcp_lro_rx().
+ */
+#define VTNET_LRO_FEATURES (VIRTIO_NET_F_GUEST_TSO4 | \
+ VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN)
+
+
+#endif /* _VIRTIO_ETHDEV_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_virtio/virtio_logs.h b/src/dpdk_lib18/librte_pmd_virtio/virtio_logs.h
new file mode 100755
index 00000000..d6c33f7b
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_virtio/virtio_logs.h
@@ -0,0 +1,70 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _VIRTIO_LOGS_H_
+#define _VIRTIO_LOGS_H_
+
+#include <rte_log.h>
+
+#ifdef RTE_LIBRTE_VIRTIO_DEBUG_INIT
+#define PMD_INIT_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+#else
+#define PMD_INIT_LOG(level, fmt, args...) do { } while(0)
+#define PMD_INIT_FUNC_TRACE() do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_VIRTIO_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s() rx: " fmt , __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_VIRTIO_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s() tx: " fmt , __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+
+#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DRIVER
+#define PMD_DRV_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt , __func__, ## args)
+#else
+#define PMD_DRV_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#endif /* _VIRTIO_LOGS_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_virtio/virtio_pci.c b/src/dpdk_lib18/librte_pmd_virtio/virtio_pci.c
new file mode 100755
index 00000000..ca9c7482
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_virtio/virtio_pci.c
@@ -0,0 +1,129 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <stdint.h>
+
+#include "virtio_pci.h"
+#include "virtio_logs.h"
+
+void
+vtpci_read_dev_config(struct virtio_hw *hw, uint64_t offset,
+ void *dst, int length)
+{
+ uint64_t off;
+ uint8_t *d;
+ int size;
+
+ off = VIRTIO_PCI_CONFIG(hw) + offset;
+ for (d = dst; length > 0; d += size, off += size, length -= size) {
+ if (length >= 4) {
+ size = 4;
+ *(uint32_t *)d = VIRTIO_READ_REG_4(hw, off);
+ } else if (length >= 2) {
+ size = 2;
+ *(uint16_t *)d = VIRTIO_READ_REG_2(hw, off);
+ } else {
+ size = 1;
+ *d = VIRTIO_READ_REG_1(hw, off);
+ }
+ }
+}
+
+void
+vtpci_write_dev_config(struct virtio_hw *hw, uint64_t offset,
+ void *src, int length)
+{
+ uint64_t off;
+ uint8_t *s;
+ int size;
+
+ off = VIRTIO_PCI_CONFIG(hw) + offset;
+ for (s = src; length > 0; s += size, off += size, length -= size) {
+ if (length >= 4) {
+ size = 4;
+ VIRTIO_WRITE_REG_4(hw, off, *(uint32_t *)s);
+ } else if (length >= 2) {
+ size = 2;
+ VIRTIO_WRITE_REG_2(hw, off, *(uint16_t *)s);
+ } else {
+ size = 1;
+ VIRTIO_WRITE_REG_1(hw, off, *s);
+ }
+ }
+}
+
+uint32_t
+vtpci_negotiate_features(struct virtio_hw *hw, uint32_t host_features)
+{
+ uint32_t features;
+ /*
+ * Limit negotiated features to what the driver, virtqueue, and
+ * host all support.
+ */
+ features = host_features & hw->guest_features;
+
+ VIRTIO_WRITE_REG_4(hw, VIRTIO_PCI_GUEST_FEATURES, features);
+ return features;
+}
+
+
+void
+vtpci_reset(struct virtio_hw *hw)
+{
+ /*
+ * Setting the status to RESET sets the host device to
+ * the original, uninitialized state.
+ */
+ vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
+ vtpci_get_status(hw);
+}
+
+void
+vtpci_reinit_complete(struct virtio_hw *hw)
+{
+ vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK);
+}
+
+uint8_t
+vtpci_get_status(struct virtio_hw *hw)
+{
+ return VIRTIO_READ_REG_1(hw, VIRTIO_PCI_STATUS);
+}
+
+void
+vtpci_set_status(struct virtio_hw *hw, uint8_t status)
+{
+ if (status != VIRTIO_CONFIG_STATUS_RESET)
+ status = (uint8_t)(status | vtpci_get_status(hw));
+
+ VIRTIO_WRITE_REG_1(hw, VIRTIO_PCI_STATUS, status);
+}
diff --git a/src/dpdk_lib18/librte_pmd_virtio/virtio_pci.h b/src/dpdk_lib18/librte_pmd_virtio/virtio_pci.h
new file mode 100755
index 00000000..373f9dcb
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_virtio/virtio_pci.h
@@ -0,0 +1,266 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _VIRTIO_PCI_H_
+#define _VIRTIO_PCI_H_
+
+#include <stdint.h>
+
+#ifdef __FreeBSD__
+#include <sys/types.h>
+#include <machine/cpufunc.h>
+#else
+#include <sys/io.h>
+#endif
+
+#include <rte_ethdev.h>
+
+struct virtqueue;
+
+/* VirtIO PCI vendor/device ID. */
+#define VIRTIO_PCI_VENDORID 0x1AF4
+#define VIRTIO_PCI_DEVICEID_MIN 0x1000
+#define VIRTIO_PCI_DEVICEID_MAX 0x103F
+
+/* VirtIO ABI version, this must match exactly. */
+#define VIRTIO_PCI_ABI_VERSION 0
+
+/*
+ * VirtIO Header, located in BAR 0.
+ */
+#define VIRTIO_PCI_HOST_FEATURES 0 /* host's supported features (32bit, RO)*/
+#define VIRTIO_PCI_GUEST_FEATURES 4 /* guest's supported features (32, RW) */
+#define VIRTIO_PCI_QUEUE_PFN 8 /* physical address of VQ (32, RW) */
+#define VIRTIO_PCI_QUEUE_NUM 12 /* number of ring entries (16, RO) */
+#define VIRTIO_PCI_QUEUE_SEL 14 /* current VQ selection (16, RW) */
+#define VIRTIO_PCI_QUEUE_NOTIFY 16 /* notify host regarding VQ (16, RW) */
+#define VIRTIO_PCI_STATUS 18 /* device status register (8, RW) */
+#define VIRTIO_PCI_ISR 19 /* interrupt status register, reading
+ * also clears the register (8, RO) */
+/* Only if MSIX is enabled: */
+#define VIRTIO_MSI_CONFIG_VECTOR 20 /* configuration change vector (16, RW) */
+#define VIRTIO_MSI_QUEUE_VECTOR 22 /* vector for selected VQ notifications
+ (16, RW) */
+
+/* The bit of the ISR which indicates a device has an interrupt. */
+#define VIRTIO_PCI_ISR_INTR 0x1
+/* The bit of the ISR which indicates a device configuration change. */
+#define VIRTIO_PCI_ISR_CONFIG 0x2
+/* Vector value used to disable MSI for queue. */
+#define VIRTIO_MSI_NO_VECTOR 0xFFFF
+
+/* VirtIO device IDs. */
+#define VIRTIO_ID_NETWORK 0x01
+#define VIRTIO_ID_BLOCK 0x02
+#define VIRTIO_ID_CONSOLE 0x03
+#define VIRTIO_ID_ENTROPY 0x04
+#define VIRTIO_ID_BALLOON 0x05
+#define VIRTIO_ID_IOMEMORY 0x06
+#define VIRTIO_ID_9P 0x09
+
+/* Status byte for guest to report progress. */
+#define VIRTIO_CONFIG_STATUS_RESET 0x00
+#define VIRTIO_CONFIG_STATUS_ACK 0x01
+#define VIRTIO_CONFIG_STATUS_DRIVER 0x02
+#define VIRTIO_CONFIG_STATUS_DRIVER_OK 0x04
+#define VIRTIO_CONFIG_STATUS_FAILED 0x80
+
+/*
+ * Generate interrupt when the virtqueue ring is
+ * completely used, even if we've suppressed them.
+ */
+#define VIRTIO_F_NOTIFY_ON_EMPTY (1 << 24)
+
+/*
+ * The guest should never negotiate this feature; it
+ * is used to detect faulty drivers.
+ */
+#define VIRTIO_F_BAD_FEATURE (1 << 30)
+
+/*
+ * Some VirtIO feature bits (currently bits 28 through 31) are
+ * reserved for the transport being used (eg. virtio_ring), the
+ * rest are per-device feature bits.
+ */
+#define VIRTIO_TRANSPORT_F_START 28
+#define VIRTIO_TRANSPORT_F_END 32
+
+/*
+ * Each virtqueue indirect descriptor list must be physically contiguous.
+ * To allow us to malloc(9) each list individually, limit the number
+ * supported to what will fit in one page. With 4KB pages, this is a limit
+ * of 256 descriptors. If there is ever a need for more, we can switch to
+ * contigmalloc(9) for the larger allocations, similar to what
+ * bus_dmamem_alloc(9) does.
+ *
+ * Note the sizeof(struct vring_desc) is 16 bytes.
+ */
+#define VIRTIO_MAX_INDIRECT ((int) (PAGE_SIZE / 16))
+
+/* The feature bitmap for virtio net */
+#define VIRTIO_NET_F_CSUM 0x00001 /* Host handles pkts w/ partial csum */
+#define VIRTIO_NET_F_GUEST_CSUM 0x00002 /* Guest handles pkts w/ partial csum*/
+#define VIRTIO_NET_F_MAC 0x00020 /* Host has given MAC address. */
+#define VIRTIO_NET_F_GSO 0x00040 /* Host handles pkts w/ any GSO type */
+#define VIRTIO_NET_F_GUEST_TSO4 0x00080 /* Guest can handle TSOv4 in. */
+#define VIRTIO_NET_F_GUEST_TSO6 0x00100 /* Guest can handle TSOv6 in. */
+#define VIRTIO_NET_F_GUEST_ECN 0x00200 /* Guest can handle TSO[6] w/ ECN in.*/
+#define VIRTIO_NET_F_GUEST_UFO 0x00400 /* Guest can handle UFO in. */
+#define VIRTIO_NET_F_HOST_TSO4 0x00800 /* Host can handle TSOv4 in. */
+#define VIRTIO_NET_F_HOST_TSO6 0x01000 /* Host can handle TSOv6 in. */
+#define VIRTIO_NET_F_HOST_ECN 0x02000 /* Host can handle TSO[6] w/ ECN in. */
+#define VIRTIO_NET_F_HOST_UFO 0x04000 /* Host can handle UFO in. */
+#define VIRTIO_NET_F_MRG_RXBUF 0x08000 /* Host can merge receive buffers. */
+#define VIRTIO_NET_F_STATUS 0x10000 /* virtio_net_config.status available*/
+#define VIRTIO_NET_F_CTRL_VQ 0x20000 /* Control channel available */
+#define VIRTIO_NET_F_CTRL_RX 0x40000 /* Control channel RX mode support */
+#define VIRTIO_NET_F_CTRL_VLAN 0x80000 /* Control channel VLAN filtering */
+#define VIRTIO_NET_F_CTRL_RX_EXTRA 0x100000 /* Extra RX mode control support */
+#define VIRTIO_RING_F_INDIRECT_DESC 0x10000000 /* Support for indirect buffer descriptors. */
+/* The guest publishes the used index for which it expects an interrupt
+ * at the end of the avail ring. Host should ignore the avail->flags field.
+ * The host publishes the avail index for which it expects a kick
+ * at the end of the used ring. Guest should ignore the used->flags field.
+ */
+#define VIRTIO_RING_F_EVENT_IDX 0x20000000
+
+#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */
+
+/*
+ * Maximum number of virtqueues per device.
+ */
+#define VIRTIO_MAX_VIRTQUEUES 8
+
+struct virtio_hw {
+ struct virtqueue *cvq;
+ uint32_t io_base;
+ uint32_t guest_features;
+ uint32_t max_tx_queues;
+ uint32_t max_rx_queues;
+ uint16_t vtnet_hdr_size;
+ uint8_t use_msix;
+ uint8_t mac_addr[ETHER_ADDR_LEN];
+};
+
+/*
+ * This structure is just a reference to read
+ * net device specific config space; it just a chodu structure
+ *
+ */
+struct virtio_net_config {
+ /* The config defining mac address (if VIRTIO_NET_F_MAC) */
+ uint8_t mac[ETHER_ADDR_LEN];
+ /* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */
+ uint16_t status;
+ uint16_t max_virtqueue_pairs;
+} __attribute__((packed));
+
+/*
+ * The remaining space is defined by each driver as the per-driver
+ * configuration space.
+ */
+#define VIRTIO_PCI_CONFIG(hw) (((hw)->use_msix) ? 24 : 20)
+
+/*
+ * How many bits to shift physical queue address written to QUEUE_PFN.
+ * 12 is historical, and due to x86 page size.
+ */
+#define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
+
+/* The alignment to use between consumer and producer parts of vring. */
+#define VIRTIO_PCI_VRING_ALIGN 4096
+
+#ifdef __FreeBSD__
+
+static inline void
+outb_p(unsigned char data, unsigned int port)
+{
+
+ outb(port, (u_char)data);
+}
+
+static inline void
+outw_p(unsigned short data, unsigned int port)
+{
+ outw(port, (u_short)data);
+}
+
+static inline void
+outl_p(unsigned int data, unsigned int port)
+{
+ outl(port, (u_int)data);
+}
+#endif
+
+#define VIRTIO_PCI_REG_ADDR(hw, reg) \
+ (unsigned short)((hw)->io_base + (reg))
+
+#define VIRTIO_READ_REG_1(hw, reg) \
+ inb((VIRTIO_PCI_REG_ADDR((hw), (reg))))
+#define VIRTIO_WRITE_REG_1(hw, reg, value) \
+ outb_p((unsigned char)(value), (VIRTIO_PCI_REG_ADDR((hw), (reg))))
+
+#define VIRTIO_READ_REG_2(hw, reg) \
+ inw((VIRTIO_PCI_REG_ADDR((hw), (reg))))
+#define VIRTIO_WRITE_REG_2(hw, reg, value) \
+ outw_p((unsigned short)(value), (VIRTIO_PCI_REG_ADDR((hw), (reg))))
+
+#define VIRTIO_READ_REG_4(hw, reg) \
+ inl((VIRTIO_PCI_REG_ADDR((hw), (reg))))
+#define VIRTIO_WRITE_REG_4(hw, reg, value) \
+ outl_p((unsigned int)(value), (VIRTIO_PCI_REG_ADDR((hw), (reg))))
+
+static inline int
+vtpci_with_feature(struct virtio_hw *hw, uint32_t feature)
+{
+ return (hw->guest_features & feature) != 0;
+}
+
+/*
+ * Function declaration from virtio_pci.c
+ */
+void vtpci_reset(struct virtio_hw *);
+
+void vtpci_reinit_complete(struct virtio_hw *);
+
+uint8_t vtpci_get_status(struct virtio_hw *);
+
+void vtpci_set_status(struct virtio_hw *, uint8_t);
+
+uint32_t vtpci_negotiate_features(struct virtio_hw *, uint32_t);
+
+void vtpci_write_dev_config(struct virtio_hw *, uint64_t, void *, int);
+
+void vtpci_read_dev_config(struct virtio_hw *, uint64_t, void *, int);
+
+#endif /* _VIRTIO_PCI_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_virtio/virtio_ring.h b/src/dpdk_lib18/librte_pmd_virtio/virtio_ring.h
new file mode 100755
index 00000000..a16c4991
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_virtio/virtio_ring.h
@@ -0,0 +1,163 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _VIRTIO_RING_H_
+#define _VIRTIO_RING_H_
+
+#include <stdint.h>
+
+#include <rte_common.h>
+
+/* This marks a buffer as continuing via the next field. */
+#define VRING_DESC_F_NEXT 1
+/* This marks a buffer as write-only (otherwise read-only). */
+#define VRING_DESC_F_WRITE 2
+/* This means the buffer contains a list of buffer descriptors. */
+#define VRING_DESC_F_INDIRECT 4
+
+/* The Host uses this in used->flags to advise the Guest: don't kick me
+ * when you add a buffer. It's unreliable, so it's simply an
+ * optimization. Guest will still kick if it's out of buffers. */
+#define VRING_USED_F_NO_NOTIFY 1
+/* The Guest uses this in avail->flags to advise the Host: don't
+ * interrupt me when you consume a buffer. It's unreliable, so it's
+ * simply an optimization. */
+#define VRING_AVAIL_F_NO_INTERRUPT 1
+
+/* VirtIO ring descriptors: 16 bytes.
+ * These can chain together via "next". */
+struct vring_desc {
+ uint64_t addr; /* Address (guest-physical). */
+ uint32_t len; /* Length. */
+ uint16_t flags; /* The flags as indicated above. */
+ uint16_t next; /* We chain unused descriptors via this. */
+};
+
+struct vring_avail {
+ uint16_t flags;
+ uint16_t idx;
+ uint16_t ring[0];
+};
+
+/* id is a 16bit index. uint32_t is used here for ids for padding reasons. */
+struct vring_used_elem {
+ /* Index of start of used descriptor chain. */
+ uint32_t id;
+ /* Total length of the descriptor chain which was written to. */
+ uint32_t len;
+};
+
+struct vring_used {
+ uint16_t flags;
+ uint16_t idx;
+ struct vring_used_elem ring[0];
+};
+
+struct vring {
+ unsigned int num;
+ struct vring_desc *desc;
+ struct vring_avail *avail;
+ struct vring_used *used;
+};
+
+/* The standard layout for the ring is a continuous chunk of memory which
+ * looks like this. We assume num is a power of 2.
+ *
+ * struct vring {
+ * // The actual descriptors (16 bytes each)
+ * struct vring_desc desc[num];
+ *
+ * // A ring of available descriptor heads with free-running index.
+ * __u16 avail_flags;
+ * __u16 avail_idx;
+ * __u16 available[num];
+ * __u16 used_event_idx;
+ *
+ * // Padding to the next align boundary.
+ * char pad[];
+ *
+ * // A ring of used descriptor heads with free-running index.
+ * __u16 used_flags;
+ * __u16 used_idx;
+ * struct vring_used_elem used[num];
+ * __u16 avail_event_idx;
+ * };
+ *
+ * NOTE: for VirtIO PCI, align is 4096.
+ */
+
+/*
+ * We publish the used event index at the end of the available ring, and vice
+ * versa. They are at the end for backwards compatibility.
+ */
+#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
+#define vring_avail_event(vr) (*(uint16_t *)&(vr)->used->ring[(vr)->num])
+
+static inline int
+vring_size(unsigned int num, unsigned long align)
+{
+ int size;
+
+ size = num * sizeof(struct vring_desc);
+ size += sizeof(struct vring_avail) + (num * sizeof(uint16_t));
+ size = RTE_ALIGN_CEIL(size, align);
+ size += sizeof(struct vring_used) +
+ (num * sizeof(struct vring_used_elem));
+ return size;
+}
+
+static inline void
+vring_init(struct vring *vr, unsigned int num, uint8_t *p,
+ unsigned long align)
+{
+ vr->num = num;
+ vr->desc = (struct vring_desc *) p;
+ vr->avail = (struct vring_avail *) (p +
+ num * sizeof(struct vring_desc));
+ vr->used = (void *)
+ RTE_ALIGN_CEIL((uintptr_t)(&vr->avail->ring[num]), align);
+}
+
+/*
+ * The following is used with VIRTIO_RING_F_EVENT_IDX.
+ * Assuming a given event_idx value from the other size, if we have
+ * just incremented index from old to new_idx, should we trigger an
+ * event?
+ */
+static inline int
+vring_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
+{
+ return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
+}
+
+#endif /* _VIRTIO_RING_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_virtio/virtio_rxtx.c b/src/dpdk_lib18/librte_pmd_virtio/virtio_rxtx.c
new file mode 100755
index 00000000..c013f976
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_virtio/virtio_rxtx.c
@@ -0,0 +1,747 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_branch_prediction.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_prefetch.h>
+#include <rte_string_fns.h>
+#include <rte_errno.h>
+
+#include "virtio_logs.h"
+#include "virtio_ethdev.h"
+#include "virtqueue.h"
+
+#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
+#define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
+#else
+#define VIRTIO_DUMP_PACKET(m, len) do { } while (0)
+#endif
+
+static void
+vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
+{
+ struct vring_desc *dp, *dp_tail;
+ struct vq_desc_extra *dxp;
+ uint16_t desc_idx_last = desc_idx;
+
+ dp = &vq->vq_ring.desc[desc_idx];
+ dxp = &vq->vq_descx[desc_idx];
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
+ if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
+ while (dp->flags & VRING_DESC_F_NEXT) {
+ desc_idx_last = dp->next;
+ dp = &vq->vq_ring.desc[dp->next];
+ }
+ }
+ dxp->ndescs = 0;
+
+ /*
+ * We must append the existing free chain, if any, to the end of
+ * newly freed chain. If the virtqueue was completely used, then
+ * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
+ */
+ if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
+ vq->vq_desc_head_idx = desc_idx;
+ } else {
+ dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
+ dp_tail->next = desc_idx;
+ }
+
+ vq->vq_desc_tail_idx = desc_idx_last;
+ dp->next = VQ_RING_DESC_CHAIN_END;
+}
+
+static uint16_t
+virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
+ uint32_t *len, uint16_t num)
+{
+ struct vring_used_elem *uep;
+ struct rte_mbuf *cookie;
+ uint16_t used_idx, desc_idx;
+ uint16_t i;
+
+ /* Caller does the check */
+ for (i = 0; i < num ; i++) {
+ used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
+ uep = &vq->vq_ring.used->ring[used_idx];
+ desc_idx = (uint16_t) uep->id;
+ len[i] = uep->len;
+ cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
+
+ if (unlikely(cookie == NULL)) {
+ PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u\n",
+ vq->vq_used_cons_idx);
+ break;
+ }
+
+ rte_prefetch0(cookie);
+ rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
+ rx_pkts[i] = cookie;
+ vq->vq_used_cons_idx++;
+ vq_ring_free_chain(vq, desc_idx);
+ vq->vq_descx[desc_idx].cookie = NULL;
+ }
+
+ return i;
+}
+
+static void
+virtqueue_dequeue_pkt_tx(struct virtqueue *vq)
+{
+ struct vring_used_elem *uep;
+ uint16_t used_idx, desc_idx;
+
+ used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
+ uep = &vq->vq_ring.used->ring[used_idx];
+ desc_idx = (uint16_t) uep->id;
+ vq->vq_used_cons_idx++;
+ vq_ring_free_chain(vq, desc_idx);
+}
+
+
+static inline int
+virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
+{
+ struct vq_desc_extra *dxp;
+ struct virtio_hw *hw = vq->hw;
+ struct vring_desc *start_dp;
+ uint16_t needed = 1;
+ uint16_t head_idx, idx;
+
+ if (unlikely(vq->vq_free_cnt == 0))
+ return -ENOSPC;
+ if (unlikely(vq->vq_free_cnt < needed))
+ return -EMSGSIZE;
+
+ head_idx = vq->vq_desc_head_idx;
+ if (unlikely(head_idx >= vq->vq_nentries))
+ return -EFAULT;
+
+ idx = head_idx;
+ dxp = &vq->vq_descx[idx];
+ dxp->cookie = (void *)cookie;
+ dxp->ndescs = needed;
+
+ start_dp = vq->vq_ring.desc;
+ start_dp[idx].addr =
+ (uint64_t)(cookie->buf_physaddr + RTE_PKTMBUF_HEADROOM
+ - hw->vtnet_hdr_size);
+ start_dp[idx].len =
+ cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
+ start_dp[idx].flags = VRING_DESC_F_WRITE;
+ idx = start_dp[idx].next;
+ vq->vq_desc_head_idx = idx;
+ if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+ vq->vq_desc_tail_idx = idx;
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
+ vq_update_avail_ring(vq, head_idx);
+
+ return 0;
+}
+
+static int
+virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)
+{
+ struct vq_desc_extra *dxp;
+ struct vring_desc *start_dp;
+ uint16_t seg_num = cookie->nb_segs;
+ uint16_t needed = 1 + seg_num;
+ uint16_t head_idx, idx;
+ uint16_t head_size = txvq->hw->vtnet_hdr_size;
+
+ if (unlikely(txvq->vq_free_cnt == 0))
+ return -ENOSPC;
+ if (unlikely(txvq->vq_free_cnt < needed))
+ return -EMSGSIZE;
+ head_idx = txvq->vq_desc_head_idx;
+ if (unlikely(head_idx >= txvq->vq_nentries))
+ return -EFAULT;
+
+ idx = head_idx;
+ dxp = &txvq->vq_descx[idx];
+ if (dxp->cookie != NULL)
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = (void *)cookie;
+ dxp->ndescs = needed;
+
+ start_dp = txvq->vq_ring.desc;
+ start_dp[idx].addr =
+ txvq->virtio_net_hdr_mem + idx * head_size;
+ start_dp[idx].len = (uint32_t)head_size;
+ start_dp[idx].flags = VRING_DESC_F_NEXT;
+
+ for (; ((seg_num > 0) && (cookie != NULL)); seg_num--) {
+ idx = start_dp[idx].next;
+ start_dp[idx].addr = RTE_MBUF_DATA_DMA_ADDR(cookie);
+ start_dp[idx].len = cookie->data_len;
+ start_dp[idx].flags = VRING_DESC_F_NEXT;
+ cookie = cookie->next;
+ }
+
+ start_dp[idx].flags &= ~VRING_DESC_F_NEXT;
+ idx = start_dp[idx].next;
+ txvq->vq_desc_head_idx = idx;
+ if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+ txvq->vq_desc_tail_idx = idx;
+ txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
+ vq_update_avail_ring(txvq, head_idx);
+
+ return 0;
+}
+
+static inline struct rte_mbuf *
+rte_rxmbuf_alloc(struct rte_mempool *mp)
+{
+ struct rte_mbuf *m;
+
+ m = __rte_mbuf_raw_alloc(mp);
+ __rte_mbuf_sanity_check_raw(m, 0);
+
+ return m;
+}
+
+static void
+virtio_dev_vring_start(struct virtqueue *vq, int queue_type)
+{
+ struct rte_mbuf *m;
+ int i, nbufs, error, size = vq->vq_nentries;
+ struct vring *vr = &vq->vq_ring;
+ uint8_t *ring_mem = vq->vq_ring_virt_mem;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /*
+ * Reinitialise since virtio port might have been stopped and restarted
+ */
+ memset(vq->vq_ring_virt_mem, 0, vq->vq_ring_size);
+ vring_init(vr, size, ring_mem, vq->vq_alignment);
+ vq->vq_used_cons_idx = 0;
+ vq->vq_desc_head_idx = 0;
+ vq->vq_avail_idx = 0;
+ vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
+ vq->vq_free_cnt = vq->vq_nentries;
+ memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
+
+ /* Chain all the descriptors in the ring with an END */
+ for (i = 0; i < size - 1; i++)
+ vr->desc[i].next = (uint16_t)(i + 1);
+ vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
+
+ /*
+ * Disable device(host) interrupting guest
+ */
+ virtqueue_disable_intr(vq);
+
+ /* Only rx virtqueue needs mbufs to be allocated at initialization */
+ if (queue_type == VTNET_RQ) {
+ if (vq->mpool == NULL)
+ rte_exit(EXIT_FAILURE,
+ "Cannot allocate initial mbufs for rx virtqueue");
+
+ /* Allocate blank mbufs for the each rx descriptor */
+ nbufs = 0;
+ error = ENOSPC;
+ while (!virtqueue_full(vq)) {
+ m = rte_rxmbuf_alloc(vq->mpool);
+ if (m == NULL)
+ break;
+
+ /******************************************
+ * Enqueue allocated buffers *
+ *******************************************/
+ error = virtqueue_enqueue_recv_refill(vq, m);
+
+ if (error) {
+ rte_pktmbuf_free(m);
+ break;
+ }
+ nbufs++;
+ }
+
+ vq_update_avail_idx(vq);
+
+ PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
+
+ VIRTIO_WRITE_REG_2(vq->hw, VIRTIO_PCI_QUEUE_SEL,
+ vq->vq_queue_index);
+ VIRTIO_WRITE_REG_4(vq->hw, VIRTIO_PCI_QUEUE_PFN,
+ vq->mz->phys_addr >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
+ } else if (queue_type == VTNET_TQ) {
+ VIRTIO_WRITE_REG_2(vq->hw, VIRTIO_PCI_QUEUE_SEL,
+ vq->vq_queue_index);
+ VIRTIO_WRITE_REG_4(vq->hw, VIRTIO_PCI_QUEUE_PFN,
+ vq->mz->phys_addr >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
+ } else {
+ VIRTIO_WRITE_REG_2(vq->hw, VIRTIO_PCI_QUEUE_SEL,
+ vq->vq_queue_index);
+ VIRTIO_WRITE_REG_4(vq->hw, VIRTIO_PCI_QUEUE_PFN,
+ vq->mz->phys_addr >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
+ }
+}
+
+void
+virtio_dev_cq_start(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw
+ = VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->cvq) {
+ virtio_dev_vring_start(hw->cvq, VTNET_CQ);
+ VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq);
+ }
+}
+
+void
+virtio_dev_rxtx_start(struct rte_eth_dev *dev)
+{
+ /*
+ * Start receive and transmit vrings
+ * - Setup vring structure for all queues
+ * - Initialize descriptor for the rx vring
+ * - Allocate blank mbufs for the each rx descriptor
+ *
+ */
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Start rx vring. */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ virtio_dev_vring_start(dev->data->rx_queues[i], VTNET_RQ);
+ VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);
+ }
+
+ /* Start tx vring. */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ virtio_dev_vring_start(dev->data->tx_queues[i], VTNET_TQ);
+ VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]);
+ }
+}
+
+int
+virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
+ struct virtqueue *vq;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+ ret = virtio_dev_queue_setup(dev, VTNET_RQ, queue_idx, vtpci_queue_idx,
+ nb_desc, socket_id, &vq);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR, "tvq initialization failed");
+ return ret;
+ }
+
+ /* Create mempool for rx mbuf allocation */
+ vq->mpool = mp;
+
+ dev->data->rx_queues[queue_idx] = vq;
+ return 0;
+}
+
+/*
+ * struct rte_eth_dev *dev: Used to update dev
+ * uint16_t nb_desc: Defaults to values read from config space
+ * unsigned int socket_id: Used to allocate memzone
+ * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
+ * uint16_t queue_idx: Just used as an index in dev txq list
+ */
+int
+virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
+ struct virtqueue *vq;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOOFFLOADS)
+ != ETH_TXQ_FLAGS_NOOFFLOADS) {
+ PMD_INIT_LOG(ERR, "TX checksum offload not supported\n");
+ return -EINVAL;
+ }
+
+ ret = virtio_dev_queue_setup(dev, VTNET_TQ, queue_idx, vtpci_queue_idx,
+ nb_desc, socket_id, &vq);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR, "rvq initialization failed");
+ return ret;
+ }
+
+ dev->data->tx_queues[queue_idx] = vq;
+ return 0;
+}
+
+static void
+virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
+{
+ int error;
+ /*
+ * Requeue the discarded mbuf. This should always be
+ * successful since it was just dequeued.
+ */
+ error = virtqueue_enqueue_recv_refill(vq, m);
+ if (unlikely(error)) {
+ RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
+ rte_pktmbuf_free(m);
+ }
+}
+
+#define VIRTIO_MBUF_BURST_SZ 64
+#define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
+uint16_t
+virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct virtqueue *rxvq = rx_queue;
+ struct rte_mbuf *rxm, *new_mbuf;
+ uint16_t nb_used, num, nb_rx = 0;
+ uint32_t len[VIRTIO_MBUF_BURST_SZ];
+ struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
+ int error;
+ uint32_t i, nb_enqueued = 0;
+ const uint32_t hdr_size = sizeof(struct virtio_net_hdr);
+
+ nb_used = VIRTQUEUE_NUSED(rxvq);
+
+ rmb();
+
+ num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
+ num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ);
+ if (likely(num > DESC_PER_CACHELINE))
+ num = num - ((rxvq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
+
+ if (num == 0)
+ return 0;
+
+ num = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, num);
+ PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
+ for (i = 0; i < num ; i++) {
+ rxm = rcv_pkts[i];
+
+ PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
+
+ if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ PMD_RX_LOG(ERR, "Packet drop");
+ nb_enqueued++;
+ virtio_discard_rxbuf(rxvq, rxm);
+ rxvq->errors++;
+ continue;
+ }
+
+ rxm->port = rxvq->port_id;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
+ rxm->data_len = (uint16_t)(len[i] - hdr_size);
+
+ VIRTIO_DUMP_PACKET(rxm, rxm->data_len);
+
+ rx_pkts[nb_rx++] = rxm;
+ rxvq->bytes += rx_pkts[nb_rx - 1]->pkt_len;
+ }
+
+ rxvq->packets += nb_rx;
+
+ /* Allocate new mbuf for the used descriptor */
+ error = ENOSPC;
+ while (likely(!virtqueue_full(rxvq))) {
+ new_mbuf = rte_rxmbuf_alloc(rxvq->mpool);
+ if (unlikely(new_mbuf == NULL)) {
+ struct rte_eth_dev *dev
+ = &rte_eth_devices[rxvq->port_id];
+ dev->data->rx_mbuf_alloc_failed++;
+ break;
+ }
+ error = virtqueue_enqueue_recv_refill(rxvq, new_mbuf);
+ if (unlikely(error)) {
+ rte_pktmbuf_free(new_mbuf);
+ break;
+ }
+ nb_enqueued++;
+ }
+
+ if (likely(nb_enqueued)) {
+ if (unlikely(virtqueue_kick_prepare(rxvq))) {
+ virtqueue_notify(rxvq);
+ PMD_RX_LOG(DEBUG, "Notified\n");
+ }
+ }
+
+ vq_update_avail_idx(rxvq);
+
+ return nb_rx;
+}
+
+uint16_t
+virtio_recv_mergeable_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtqueue *rxvq = rx_queue;
+ struct rte_mbuf *rxm, *new_mbuf;
+ uint16_t nb_used, num, nb_rx = 0;
+ uint32_t len[VIRTIO_MBUF_BURST_SZ];
+ struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
+ struct rte_mbuf *prev;
+ int error;
+ uint32_t i = 0, nb_enqueued = 0;
+ uint32_t seg_num = 0;
+ uint16_t extra_idx = 0;
+ uint32_t seg_res = 0;
+ const uint32_t hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+
+ nb_used = VIRTQUEUE_NUSED(rxvq);
+
+ rmb();
+
+ if (nb_used == 0)
+ return 0;
+
+ PMD_RX_LOG(DEBUG, "used:%d\n", nb_used);
+
+ while (i < nb_used) {
+ struct virtio_net_hdr_mrg_rxbuf *header;
+
+ if (nb_rx == nb_pkts)
+ break;
+
+ num = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, 1);
+ if (num != 1)
+ continue;
+
+ i++;
+
+ PMD_RX_LOG(DEBUG, "dequeue:%d\n", num);
+ PMD_RX_LOG(DEBUG, "packet len:%d\n", len[0]);
+
+ rxm = rcv_pkts[0];
+
+ if (unlikely(len[0] < hdr_size + ETHER_HDR_LEN)) {
+ PMD_RX_LOG(ERR, "Packet drop\n");
+ nb_enqueued++;
+ virtio_discard_rxbuf(rxvq, rxm);
+ rxvq->errors++;
+ continue;
+ }
+
+ header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)rxm->buf_addr +
+ RTE_PKTMBUF_HEADROOM - hdr_size);
+ seg_num = header->num_buffers;
+
+ if (seg_num == 0)
+ seg_num = 1;
+
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rxm->nb_segs = seg_num;
+ rxm->next = NULL;
+ rxm->pkt_len = (uint32_t)(len[0] - hdr_size);
+ rxm->data_len = (uint16_t)(len[0] - hdr_size);
+
+ rxm->port = rxvq->port_id;
+ rx_pkts[nb_rx] = rxm;
+ prev = rxm;
+
+ seg_res = seg_num - 1;
+
+ while (seg_res != 0) {
+ /*
+ * Get extra segments for current uncompleted packet.
+ */
+ uint32_t rcv_cnt =
+ RTE_MIN(seg_res, RTE_DIM(rcv_pkts));
+ if (likely(VIRTQUEUE_NUSED(rxvq) >= rcv_cnt)) {
+ uint32_t rx_num =
+ virtqueue_dequeue_burst_rx(rxvq,
+ rcv_pkts, len, rcv_cnt);
+ i += rx_num;
+ rcv_cnt = rx_num;
+ } else {
+ PMD_RX_LOG(ERR,
+ "No enough segments for packet.\n");
+ nb_enqueued++;
+ virtio_discard_rxbuf(rxvq, rxm);
+ rxvq->errors++;
+ break;
+ }
+
+ extra_idx = 0;
+
+ while (extra_idx < rcv_cnt) {
+ rxm = rcv_pkts[extra_idx];
+
+ rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
+ rxm->next = NULL;
+ rxm->pkt_len = (uint32_t)(len[extra_idx]);
+ rxm->data_len = (uint16_t)(len[extra_idx]);
+
+ if (prev)
+ prev->next = rxm;
+
+ prev = rxm;
+ rx_pkts[nb_rx]->pkt_len += rxm->pkt_len;
+ extra_idx++;
+ };
+ seg_res -= rcv_cnt;
+ }
+
+ VIRTIO_DUMP_PACKET(rx_pkts[nb_rx],
+ rx_pkts[nb_rx]->data_len);
+
+ rxvq->bytes += rx_pkts[nb_rx]->pkt_len;
+ nb_rx++;
+ }
+
+ rxvq->packets += nb_rx;
+
+ /* Allocate new mbuf for the used descriptor */
+ error = ENOSPC;
+ while (likely(!virtqueue_full(rxvq))) {
+ new_mbuf = rte_rxmbuf_alloc(rxvq->mpool);
+ if (unlikely(new_mbuf == NULL)) {
+ struct rte_eth_dev *dev
+ = &rte_eth_devices[rxvq->port_id];
+ dev->data->rx_mbuf_alloc_failed++;
+ break;
+ }
+ error = virtqueue_enqueue_recv_refill(rxvq, new_mbuf);
+ if (unlikely(error)) {
+ rte_pktmbuf_free(new_mbuf);
+ break;
+ }
+ nb_enqueued++;
+ }
+
+ if (likely(nb_enqueued)) {
+ if (unlikely(virtqueue_kick_prepare(rxvq))) {
+ virtqueue_notify(rxvq);
+ PMD_RX_LOG(DEBUG, "Notified");
+ }
+ }
+
+ vq_update_avail_idx(rxvq);
+
+ return nb_rx;
+}
+
+uint16_t
+virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct virtqueue *txvq = tx_queue;
+ struct rte_mbuf *txm;
+ uint16_t nb_used, nb_tx, num;
+ int error;
+
+ nb_tx = 0;
+
+ if (unlikely(nb_pkts < 1))
+ return nb_pkts;
+
+ PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
+ nb_used = VIRTQUEUE_NUSED(txvq);
+
+ rmb();
+
+ num = (uint16_t)(likely(nb_used < VIRTIO_MBUF_BURST_SZ) ? nb_used : VIRTIO_MBUF_BURST_SZ);
+
+ while (nb_tx < nb_pkts) {
+ /* Need one more descriptor for virtio header. */
+ int need = tx_pkts[nb_tx]->nb_segs - txvq->vq_free_cnt + 1;
+ int deq_cnt = RTE_MIN(need, (int)num);
+
+ num -= (deq_cnt > 0) ? deq_cnt : 0;
+ while (deq_cnt > 0) {
+ virtqueue_dequeue_pkt_tx(txvq);
+ deq_cnt--;
+ }
+
+ need = (int)tx_pkts[nb_tx]->nb_segs - txvq->vq_free_cnt + 1;
+ /*
+ * Zero or negative value indicates it has enough free
+ * descriptors to use for transmitting.
+ */
+ if (likely(need <= 0)) {
+ txm = tx_pkts[nb_tx];
+ /* Enqueue Packet buffers */
+ error = virtqueue_enqueue_xmit(txvq, txm);
+ if (unlikely(error)) {
+ if (error == ENOSPC)
+ PMD_TX_LOG(ERR, "virtqueue_enqueue Free count = 0");
+ else if (error == EMSGSIZE)
+ PMD_TX_LOG(ERR, "virtqueue_enqueue Free count < 1");
+ else
+ PMD_TX_LOG(ERR, "virtqueue_enqueue error: %d", error);
+ break;
+ }
+ nb_tx++;
+ txvq->bytes += txm->pkt_len;
+ } else {
+ PMD_TX_LOG(ERR, "No free tx descriptors to transmit");
+ break;
+ }
+ }
+ vq_update_avail_idx(txvq);
+
+ txvq->packets += nb_tx;
+
+ if (unlikely(virtqueue_kick_prepare(txvq))) {
+ virtqueue_notify(txvq);
+ PMD_TX_LOG(DEBUG, "Notified backend after xmit");
+ }
+
+ return nb_tx;
+}
diff --git a/src/dpdk_lib18/librte_pmd_virtio/virtqueue.c b/src/dpdk_lib18/librte_pmd_virtio/virtqueue.c
new file mode 100755
index 00000000..8a3005fb
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_virtio/virtqueue.c
@@ -0,0 +1,70 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <stdint.h>
+
+#include <rte_mbuf.h>
+
+#include "virtqueue.h"
+#include "virtio_logs.h"
+#include "virtio_pci.h"
+
+void
+virtqueue_disable_intr(struct virtqueue *vq)
+{
+ /*
+ * Set VRING_AVAIL_F_NO_INTERRUPT to hint host
+ * not to interrupt when it consumes packets
+ * Note: this is only considered a hint to the host
+ */
+ vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+}
+
+/*
+ * Two types of mbuf to be cleaned:
+ * 1) mbuf that has been consumed by backend but not used by virtio.
+ * 2) mbuf that hasn't been consued by backend.
+ */
+struct rte_mbuf *
+virtqueue_detatch_unused(struct virtqueue *vq)
+{
+ struct rte_mbuf *cookie;
+ int idx;
+
+ for (idx = 0; idx < vq->vq_nentries; idx++) {
+ if ((cookie = vq->vq_descx[idx].cookie) != NULL) {
+ vq->vq_descx[idx].cookie = NULL;
+ return cookie;
+ }
+ }
+ return NULL;
+}
diff --git a/src/dpdk_lib18/librte_pmd_virtio/virtqueue.h b/src/dpdk_lib18/librte_pmd_virtio/virtqueue.h
new file mode 100755
index 00000000..fdee0547
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_virtio/virtqueue.h
@@ -0,0 +1,283 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _VIRTQUEUE_H_
+#define _VIRTQUEUE_H_
+
+#include <stdint.h>
+
+#include <rte_atomic.h>
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_mempool.h>
+
+#include "virtio_pci.h"
+#include "virtio_ring.h"
+#include "virtio_logs.h"
+
+#define mb() rte_mb()
+#define wmb() rte_wmb()
+#define rmb() rte_rmb()
+
+#ifdef RTE_PMD_PACKET_PREFETCH
+#define rte_packet_prefetch(p) rte_prefetch1(p)
+#else
+#define rte_packet_prefetch(p) do {} while(0)
+#endif
+
+#define VIRTQUEUE_MAX_NAME_SZ 32
+
+#define RTE_MBUF_DATA_DMA_ADDR(mb) \
+ (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
+
+#define VTNET_SQ_RQ_QUEUE_IDX 0
+#define VTNET_SQ_TQ_QUEUE_IDX 1
+#define VTNET_SQ_CQ_QUEUE_IDX 2
+
+enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 };
+/**
+ * The maximum virtqueue size is 2^15. Use that value as the end of
+ * descriptor chain terminator since it will never be a valid index
+ * in the descriptor table. This is used to verify we are correctly
+ * handling vq_free_cnt.
+ */
+#define VQ_RING_DESC_CHAIN_END 32768
+
+/**
+ * Control the RX mode, ie. promiscuous, allmulti, etc...
+ * All commands require an "out" sg entry containing a 1 byte
+ * state value, zero = disable, non-zero = enable. Commands
+ * 0 and 1 are supported with the VIRTIO_NET_F_CTRL_RX feature.
+ * Commands 2-5 are added with VIRTIO_NET_F_CTRL_RX_EXTRA.
+ */
+#define VIRTIO_NET_CTRL_RX 0
+#define VIRTIO_NET_CTRL_RX_PROMISC 0
+#define VIRTIO_NET_CTRL_RX_ALLMULTI 1
+#define VIRTIO_NET_CTRL_RX_ALLUNI 2
+#define VIRTIO_NET_CTRL_RX_NOMULTI 3
+#define VIRTIO_NET_CTRL_RX_NOUNI 4
+#define VIRTIO_NET_CTRL_RX_NOBCAST 5
+
+/**
+ * Control VLAN filtering
+ *
+ * The VLAN filter table is controlled via a simple ADD/DEL interface.
+ * VLAN IDs not added may be filtered by the hypervisor. Del is the
+ * opposite of add. Both commands expect an out entry containing a 2
+ * byte VLAN ID. VLAN filtering is available with the
+ * VIRTIO_NET_F_CTRL_VLAN feature bit.
+ */
+#define VIRTIO_NET_CTRL_VLAN 2
+#define VIRTIO_NET_CTRL_VLAN_ADD 0
+#define VIRTIO_NET_CTRL_VLAN_DEL 1
+
+struct virtio_net_ctrl_hdr {
+ uint8_t class;
+ uint8_t cmd;
+} __attribute__((packed));
+
+typedef uint8_t virtio_net_ctrl_ack;
+
+#define VIRTIO_NET_OK 0
+#define VIRTIO_NET_ERR 1
+
+#define VIRTIO_MAX_CTRL_DATA 128
+
+struct virtio_pmd_ctrl {
+ struct virtio_net_ctrl_hdr hdr;
+ virtio_net_ctrl_ack status;
+ uint8_t data[VIRTIO_MAX_CTRL_DATA];
+};
+
+struct virtqueue {
+ struct virtio_hw *hw; /**< virtio_hw structure pointer. */
+ const struct rte_memzone *mz; /**< mem zone to populate RX ring. */
+ const struct rte_memzone *virtio_net_hdr_mz; /**< memzone to populate hdr. */
+ struct rte_mempool *mpool; /**< mempool for mbuf allocation */
+ uint16_t queue_id; /**< DPDK queue index. */
+ uint8_t port_id; /**< Device port identifier. */
+
+ void *vq_ring_virt_mem; /**< linear address of vring*/
+ int vq_alignment;
+ int vq_ring_size;
+ phys_addr_t vq_ring_mem; /**< physical address of vring */
+
+ struct vring vq_ring; /**< vring keeping desc, used and avail */
+ uint16_t vq_free_cnt; /**< num of desc available */
+ uint16_t vq_nentries; /**< vring desc numbers */
+ uint16_t vq_queue_index; /**< PCI queue index */
+ /**
+ * Head of the free chain in the descriptor table. If
+ * there are no free descriptors, this will be set to
+ * VQ_RING_DESC_CHAIN_END.
+ */
+ uint16_t vq_desc_head_idx;
+ uint16_t vq_desc_tail_idx;
+ /**
+ * Last consumed descriptor in the used table,
+ * trails vq_ring.used->idx.
+ */
+ uint16_t vq_used_cons_idx;
+ uint16_t vq_avail_idx;
+ phys_addr_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
+
+ /* Statistics */
+ uint64_t packets;
+ uint64_t bytes;
+ uint64_t errors;
+
+ struct vq_desc_extra {
+ void *cookie;
+ uint16_t ndescs;
+ } vq_descx[0];
+};
+
+/* If multiqueue is provided by host, then we suppport it. */
+#ifndef VIRTIO_NET_F_MQ
+/* Device supports Receive Flow Steering */
+#define VIRTIO_NET_F_MQ 0x400000
+#define VIRTIO_NET_CTRL_MQ 4
+#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0
+#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1
+#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000
+#endif
+
+/**
+ * This is the first element of the scatter-gather list. If you don't
+ * specify GSO or CSUM features, you can simply ignore the header.
+ */
+struct virtio_net_hdr {
+#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /**< Use csum_start,csum_offset*/
+ uint8_t flags;
+#define VIRTIO_NET_HDR_GSO_NONE 0 /**< Not a GSO frame */
+#define VIRTIO_NET_HDR_GSO_TCPV4 1 /**< GSO frame, IPv4 TCP (TSO) */
+#define VIRTIO_NET_HDR_GSO_UDP 3 /**< GSO frame, IPv4 UDP (UFO) */
+#define VIRTIO_NET_HDR_GSO_TCPV6 4 /**< GSO frame, IPv6 TCP */
+#define VIRTIO_NET_HDR_GSO_ECN 0x80 /**< TCP has ECN set */
+ uint8_t gso_type;
+ uint16_t hdr_len; /**< Ethernet + IP + tcp/udp hdrs */
+ uint16_t gso_size; /**< Bytes to append to hdr_len per frame */
+ uint16_t csum_start; /**< Position to start checksumming from */
+ uint16_t csum_offset; /**< Offset after that to place checksum */
+};
+
+/**
+ * This is the version of the header to use when the MRG_RXBUF
+ * feature has been negotiated.
+ */
+struct virtio_net_hdr_mrg_rxbuf {
+ struct virtio_net_hdr hdr;
+ uint16_t num_buffers; /**< Number of merged rx buffers */
+};
+
+/**
+ * Tell the backend not to interrupt us.
+ */
+void virtqueue_disable_intr(struct virtqueue *vq);
+/**
+ * Dump virtqueue internal structures, for debug purpose only.
+ */
+void virtqueue_dump(struct virtqueue *vq);
+/**
+ * Get all mbufs to be freed.
+ */
+struct rte_mbuf *virtqueue_detatch_unused(struct virtqueue *vq);
+
+static inline int
+virtqueue_full(const struct virtqueue *vq)
+{
+ return vq->vq_free_cnt == 0;
+}
+
+#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
+
+static inline void
+vq_update_avail_idx(struct virtqueue *vq)
+{
+ rte_compiler_barrier();
+ vq->vq_ring.avail->idx = vq->vq_avail_idx;
+}
+
+static inline void
+vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
+{
+ uint16_t avail_idx;
+ /*
+ * Place the head of the descriptor chain into the next slot and make
+ * it usable to the host. The chain is made available now rather than
+ * deferring to virtqueue_notify() in the hopes that if the host is
+ * currently running on another CPU, we can keep it processing the new
+ * descriptor.
+ */
+ avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1));
+ vq->vq_ring.avail->ring[avail_idx] = desc_idx;
+ vq->vq_avail_idx++;
+}
+
+static inline int
+virtqueue_kick_prepare(struct virtqueue *vq)
+{
+ return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
+}
+
+static inline void
+virtqueue_notify(struct virtqueue *vq)
+{
+ /*
+ * Ensure updated avail->idx is visible to host. mb() necessary?
+ * For virtio on IA, the notificaiton is through io port operation
+ * which is a serialization instruction itself.
+ */
+ VIRTIO_WRITE_REG_2(vq->hw, VIRTIO_PCI_QUEUE_NOTIFY, vq->vq_queue_index);
+}
+
+#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
+#define VIRTQUEUE_DUMP(vq) do { \
+ uint16_t used_idx, nused; \
+ used_idx = (vq)->vq_ring.used->idx; \
+ nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
+ PMD_INIT_LOG(DEBUG, \
+ "VQ: - size=%d; free=%d; used=%d; desc_head_idx=%d;" \
+ " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \
+ " avail.flags=0x%x; used.flags=0x%x", \
+ (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \
+ (vq)->vq_desc_head_idx, (vq)->vq_ring.avail->idx, \
+ (vq)->vq_used_cons_idx, (vq)->vq_ring.used->idx, \
+ (vq)->vq_ring.avail->flags, (vq)->vq_ring.used->flags); \
+} while (0)
+#else
+#define VIRTQUEUE_DUMP(vq) do { } while (0)
+#endif
+
+#endif /* _VIRTQUEUE_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_vmxnet3/Makefile b/src/dpdk_lib18/librte_pmd_vmxnet3/Makefile
new file mode 100755
index 00000000..6872c747
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_vmxnet3/Makefile
@@ -0,0 +1,80 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_vmxnet3_uio.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+ifeq ($(CC), icc)
+#
+# CFLAGS for icc
+#
+CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259
+
+else ifeq ($(CC), clang)
+#
+# CFLAGS for clang
+#
+CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
+
+else
+#
+# CFLAGS for gcc
+#
+ifneq ($(shell test $(GCC_MAJOR_VERSION) -le 4 -a $(GCC_MINOR_VERSION) -le 3 && echo 1), 1)
+CFLAGS += -Wno-deprecated
+endif
+CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
+
+endif
+
+VPATH += $(RTE_SDK)/lib/librte_pmd_vmxnet3/vmxnet3
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3_ethdev.c
+
+# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += lib/librte_eal lib/librte_ether
+DEPDIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += lib/librte_mempool lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += lib/librte_net lib/librte_malloc
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/README b/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/README
new file mode 100755
index 00000000..f13fec72
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/README
@@ -0,0 +1,50 @@
+..
+ BSD LICENSE
+
+ Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Intel VMXNET3 driver
+===================
+
+This directory contains source code of FreeBSD VMXNET3 driver released by VMware.
+In which, upt1_defs.h and vmxnet3_defs.h is introduced without any change.
+The other 4 files: includeCheck.h, vmware_pack_begin.h, vmware_pack_end.h and vmxnet3_osdep.h
+are crated to adapt to the needs from above 2 files.
+
+Updating driver
+===============
+
+The following modifications have been made to this code to integrate it with the
+Intel DPDK:
+
+
+-------------
+
+
diff --git a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/includeCheck.h b/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/includeCheck.h
new file mode 100755
index 00000000..16308d28
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/includeCheck.h
@@ -0,0 +1,40 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _INCLUDECHECK_H
+#define _INCLUDECHECK_H
+
+#include "vmxnet3_osdep.h"
+
+#endif /* _INCLUDECHECK_H */
+
diff --git a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/upt1_defs.h b/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/upt1_defs.h
new file mode 100755
index 00000000..d9144e32
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/upt1_defs.h
@@ -0,0 +1,117 @@
+/*********************************************************
+ * Copyright (C) 2007 VMware, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *********************************************************/
+
+/* upt1_defs.h
+ *
+ * Definitions for UPTv1
+ *
+ * Some of the defs are duplicated in vmkapi_net_upt.h, because
+ * vmkapi_net_upt.h cannot distribute with OSS yet and vmkapi headers can
+ * only include vmkapi headers. Make sure they are kept in sync!
+ */
+
+#ifndef _UPT1_DEFS_H
+#define _UPT1_DEFS_H
+
+#define UPT1_MAX_TX_QUEUES 64
+#define UPT1_MAX_RX_QUEUES 64
+
+#define UPT1_MAX_INTRS (UPT1_MAX_TX_QUEUES + UPT1_MAX_RX_QUEUES)
+
+typedef
+#include "vmware_pack_begin.h"
+struct UPT1_TxStats {
+ uint64 TSOPktsTxOK; /* TSO pkts post-segmentation */
+ uint64 TSOBytesTxOK;
+ uint64 ucastPktsTxOK;
+ uint64 ucastBytesTxOK;
+ uint64 mcastPktsTxOK;
+ uint64 mcastBytesTxOK;
+ uint64 bcastPktsTxOK;
+ uint64 bcastBytesTxOK;
+ uint64 pktsTxError;
+ uint64 pktsTxDiscard;
+}
+#include "vmware_pack_end.h"
+UPT1_TxStats;
+
+typedef
+#include "vmware_pack_begin.h"
+struct UPT1_RxStats {
+ uint64 LROPktsRxOK; /* LRO pkts */
+ uint64 LROBytesRxOK; /* bytes from LRO pkts */
+ /* the following counters are for pkts from the wire, i.e., pre-LRO */
+ uint64 ucastPktsRxOK;
+ uint64 ucastBytesRxOK;
+ uint64 mcastPktsRxOK;
+ uint64 mcastBytesRxOK;
+ uint64 bcastPktsRxOK;
+ uint64 bcastBytesRxOK;
+ uint64 pktsRxOutOfBuf;
+ uint64 pktsRxError;
+}
+#include "vmware_pack_end.h"
+UPT1_RxStats;
+
+/* interrupt moderation level */
+#define UPT1_IML_NONE 0 /* no interrupt moderation */
+#define UPT1_IML_HIGHEST 7 /* least intr generated */
+#define UPT1_IML_ADAPTIVE 8 /* adpative intr moderation */
+
+/* values for UPT1_RSSConf.hashFunc */
+#define UPT1_RSS_HASH_TYPE_NONE 0x0
+#define UPT1_RSS_HASH_TYPE_IPV4 0x01
+#define UPT1_RSS_HASH_TYPE_TCP_IPV4 0x02
+#define UPT1_RSS_HASH_TYPE_IPV6 0x04
+#define UPT1_RSS_HASH_TYPE_TCP_IPV6 0x08
+
+#define UPT1_RSS_HASH_FUNC_NONE 0x0
+#define UPT1_RSS_HASH_FUNC_TOEPLITZ 0x01
+
+#define UPT1_RSS_MAX_KEY_SIZE 40
+#define UPT1_RSS_MAX_IND_TABLE_SIZE 128
+
+typedef
+#include "vmware_pack_begin.h"
+struct UPT1_RSSConf {
+ uint16 hashType;
+ uint16 hashFunc;
+ uint16 hashKeySize;
+ uint16 indTableSize;
+ uint8 hashKey[UPT1_RSS_MAX_KEY_SIZE];
+ uint8 indTable[UPT1_RSS_MAX_IND_TABLE_SIZE];
+}
+#include "vmware_pack_end.h"
+UPT1_RSSConf;
+
+/* features */
+#define UPT1_F_RXCSUM 0x0001 /* rx csum verification */
+#define UPT1_F_RSS 0x0002
+#define UPT1_F_RXVLAN 0x0004 /* VLAN tag stripping */
+#define UPT1_F_LRO 0x0008
+
+#endif
diff --git a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/vmware_pack_begin.h b/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/vmware_pack_begin.h
new file mode 100755
index 00000000..860ec4c3
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/vmware_pack_begin.h
@@ -0,0 +1,32 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
diff --git a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/vmware_pack_end.h b/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/vmware_pack_end.h
new file mode 100755
index 00000000..860ec4c3
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/vmware_pack_end.h
@@ -0,0 +1,32 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
diff --git a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/vmxnet3_defs.h b/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/vmxnet3_defs.h
new file mode 100755
index 00000000..2b56574f
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/vmxnet3_defs.h
@@ -0,0 +1,751 @@
+/*********************************************************
+ * Copyright (C) 2007 VMware, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *********************************************************/
+
+/*
+ * vmxnet3_defs.h --
+ *
+ * Definitions shared by device emulation and guest drivers for
+ * VMXNET3 NIC
+ */
+
+#ifndef _VMXNET3_DEFS_H_
+#define _VMXNET3_DEFS_H_
+
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMKERNEL
+#define INCLUDE_ALLOW_DISTRIBUTE
+#define INCLUDE_ALLOW_VMKDRIVERS
+#define INCLUDE_ALLOW_VMCORE
+#define INCLUDE_ALLOW_MODULE
+#include "includeCheck.h"
+
+#include "upt1_defs.h"
+
+/* all registers are 32 bit wide */
+/* BAR 1 */
+#define VMXNET3_REG_VRRS 0x0 /* Vmxnet3 Revision Report Selection */
+#define VMXNET3_REG_UVRS 0x8 /* UPT Version Report Selection */
+#define VMXNET3_REG_DSAL 0x10 /* Driver Shared Address Low */
+#define VMXNET3_REG_DSAH 0x18 /* Driver Shared Address High */
+#define VMXNET3_REG_CMD 0x20 /* Command */
+#define VMXNET3_REG_MACL 0x28 /* MAC Address Low */
+#define VMXNET3_REG_MACH 0x30 /* MAC Address High */
+#define VMXNET3_REG_ICR 0x38 /* Interrupt Cause Register */
+#define VMXNET3_REG_ECR 0x40 /* Event Cause Register */
+
+#define VMXNET3_REG_WSAL 0xF00 /* Wireless Shared Address Lo */
+#define VMXNET3_REG_WSAH 0xF08 /* Wireless Shared Address Hi */
+#define VMXNET3_REG_WCMD 0xF18 /* Wireless Command */
+
+/* BAR 0 */
+#define VMXNET3_REG_IMR 0x0 /* Interrupt Mask Register */
+#define VMXNET3_REG_TXPROD 0x600 /* Tx Producer Index */
+#define VMXNET3_REG_RXPROD 0x800 /* Rx Producer Index for ring 1 */
+#define VMXNET3_REG_RXPROD2 0xA00 /* Rx Producer Index for ring 2 */
+
+#define VMXNET3_PT_REG_SIZE 4096 /* BAR 0 */
+#define VMXNET3_VD_REG_SIZE 4096 /* BAR 1 */
+
+/*
+ * The two Vmxnet3 MMIO Register PCI BARs (BAR 0 at offset 10h and BAR 1 at
+ * offset 14h) as well as the MSI-X BAR are combined into one PhysMem region:
+ * <-VMXNET3_PT_REG_SIZE-><-VMXNET3_VD_REG_SIZE-><-VMXNET3_MSIX_BAR_SIZE-->
+ * -------------------------------------------------------------------------
+ * |Pass Thru Registers | Virtual Dev Registers | MSI-X Vector/PBA Table |
+ * -------------------------------------------------------------------------
+ * VMXNET3_MSIX_BAR_SIZE is defined in "vmxnet3Int.h"
+ */
+#define VMXNET3_PHYSMEM_PAGES 4
+
+#define VMXNET3_REG_ALIGN 8 /* All registers are 8-byte aligned. */
+#define VMXNET3_REG_ALIGN_MASK 0x7
+
+/* I/O Mapped access to registers */
+#define VMXNET3_IO_TYPE_PT 0
+#define VMXNET3_IO_TYPE_VD 1
+#define VMXNET3_IO_ADDR(type, reg) (((type) << 24) | ((reg) & 0xFFFFFF))
+#define VMXNET3_IO_TYPE(addr) ((addr) >> 24)
+#define VMXNET3_IO_REG(addr) ((addr) & 0xFFFFFF)
+
+#ifndef __le16
+#define __le16 uint16
+#endif
+#ifndef __le32
+#define __le32 uint32
+#endif
+#ifndef __le64
+#define __le64 uint64
+#endif
+
+typedef enum {
+ VMXNET3_CMD_FIRST_SET = 0xCAFE0000,
+ VMXNET3_CMD_ACTIVATE_DEV = VMXNET3_CMD_FIRST_SET,
+ VMXNET3_CMD_QUIESCE_DEV,
+ VMXNET3_CMD_RESET_DEV,
+ VMXNET3_CMD_UPDATE_RX_MODE,
+ VMXNET3_CMD_UPDATE_MAC_FILTERS,
+ VMXNET3_CMD_UPDATE_VLAN_FILTERS,
+ VMXNET3_CMD_UPDATE_RSSIDT,
+ VMXNET3_CMD_UPDATE_IML,
+ VMXNET3_CMD_UPDATE_PMCFG,
+ VMXNET3_CMD_UPDATE_FEATURE,
+ VMXNET3_CMD_STOP_EMULATION,
+ VMXNET3_CMD_LOAD_PLUGIN,
+ VMXNET3_CMD_ACTIVATE_VF,
+
+ VMXNET3_CMD_FIRST_GET = 0xF00D0000,
+ VMXNET3_CMD_GET_QUEUE_STATUS = VMXNET3_CMD_FIRST_GET,
+ VMXNET3_CMD_GET_STATS,
+ VMXNET3_CMD_GET_LINK,
+ VMXNET3_CMD_GET_PERM_MAC_LO,
+ VMXNET3_CMD_GET_PERM_MAC_HI,
+ VMXNET3_CMD_GET_DID_LO,
+ VMXNET3_CMD_GET_DID_HI,
+ VMXNET3_CMD_GET_DEV_EXTRA_INFO,
+ VMXNET3_CMD_GET_CONF_INTR,
+ VMXNET3_CMD_GET_ADAPTIVE_RING_INFO
+} Vmxnet3_Cmd;
+
+/* Adaptive Ring Info Flags */
+#define VMXNET3_DISABLE_ADAPTIVE_RING 1
+
+/*
+ * Little Endian layout of bitfields -
+ * Byte 0 : 7.....len.....0
+ * Byte 1 : rsvd gen 13.len.8
+ * Byte 2 : 5.msscof.0 ext1 dtype
+ * Byte 3 : 13...msscof...6
+ *
+ * Big Endian layout of bitfields -
+ * Byte 0: 13...msscof...6
+ * Byte 1 : 5.msscof.0 ext1 dtype
+ * Byte 2 : rsvd gen 13.len.8
+ * Byte 3 : 7.....len.....0
+ *
+ * Thus, le32_to_cpu on the dword will allow the big endian driver to read
+ * the bit fields correctly. And cpu_to_le32 will convert bitfields
+ * bit fields written by big endian driver to format required by device.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_TxDesc {
+ __le64 addr;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32 msscof:14; /* MSS, checksum offset, flags */
+ uint32 ext1:1;
+ uint32 dtype:1; /* descriptor type */
+ uint32 rsvd:1;
+ uint32 gen:1; /* generation bit */
+ uint32 len:14;
+#else
+ uint32 len:14;
+ uint32 gen:1; /* generation bit */
+ uint32 rsvd:1;
+ uint32 dtype:1; /* descriptor type */
+ uint32 ext1:1;
+ uint32 msscof:14; /* MSS, checksum offset, flags */
+#endif /* __BIG_ENDIAN_BITFIELD */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32 tci:16; /* Tag to Insert */
+ uint32 ti:1; /* VLAN Tag Insertion */
+ uint32 ext2:1;
+ uint32 cq:1; /* completion request */
+ uint32 eop:1; /* End Of Packet */
+ uint32 om:2; /* offload mode */
+ uint32 hlen:10; /* header len */
+#else
+ uint32 hlen:10; /* header len */
+ uint32 om:2; /* offload mode */
+ uint32 eop:1; /* End Of Packet */
+ uint32 cq:1; /* completion request */
+ uint32 ext2:1;
+ uint32 ti:1; /* VLAN Tag Insertion */
+ uint32 tci:16; /* Tag to Insert */
+#endif /* __BIG_ENDIAN_BITFIELD */
+}
+#include "vmware_pack_end.h"
+Vmxnet3_TxDesc;
+
+/* TxDesc.OM values */
+#define VMXNET3_OM_NONE 0
+#define VMXNET3_OM_CSUM 2
+#define VMXNET3_OM_TSO 3
+
+/* fields in TxDesc we access w/o using bit fields */
+#define VMXNET3_TXD_EOP_SHIFT 12
+#define VMXNET3_TXD_CQ_SHIFT 13
+#define VMXNET3_TXD_GEN_SHIFT 14
+#define VMXNET3_TXD_EOP_DWORD_SHIFT 3
+#define VMXNET3_TXD_GEN_DWORD_SHIFT 2
+
+#define VMXNET3_TXD_CQ (1 << VMXNET3_TXD_CQ_SHIFT)
+#define VMXNET3_TXD_EOP (1 << VMXNET3_TXD_EOP_SHIFT)
+#define VMXNET3_TXD_GEN (1 << VMXNET3_TXD_GEN_SHIFT)
+
+#define VMXNET3_TXD_GEN_SIZE 1
+#define VMXNET3_TXD_EOP_SIZE 1
+
+#define VMXNET3_HDR_COPY_SIZE 128
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_TxDataDesc {
+ uint8 data[VMXNET3_HDR_COPY_SIZE];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_TxDataDesc;
+
+#define VMXNET3_TCD_GEN_SHIFT 31
+#define VMXNET3_TCD_GEN_SIZE 1
+#define VMXNET3_TCD_TXIDX_SHIFT 0
+#define VMXNET3_TCD_TXIDX_SIZE 12
+#define VMXNET3_TCD_GEN_DWORD_SHIFT 3
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_TxCompDesc {
+ uint32 txdIdx:12; /* Index of the EOP TxDesc */
+ uint32 ext1:20;
+
+ __le32 ext2;
+ __le32 ext3;
+
+ uint32 rsvd:24;
+ uint32 type:7; /* completion type */
+ uint32 gen:1; /* generation bit */
+}
+#include "vmware_pack_end.h"
+Vmxnet3_TxCompDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_RxDesc {
+ __le64 addr;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32 gen:1; /* Generation bit */
+ uint32 rsvd:15;
+ uint32 dtype:1; /* Descriptor type */
+ uint32 btype:1; /* Buffer Type */
+ uint32 len:14;
+#else
+ uint32 len:14;
+ uint32 btype:1; /* Buffer Type */
+ uint32 dtype:1; /* Descriptor type */
+ uint32 rsvd:15;
+ uint32 gen:1; /* Generation bit */
+#endif
+ __le32 ext1;
+}
+#include "vmware_pack_end.h"
+Vmxnet3_RxDesc;
+
+/* values of RXD.BTYPE */
+#define VMXNET3_RXD_BTYPE_HEAD 0 /* head only */
+#define VMXNET3_RXD_BTYPE_BODY 1 /* body only */
+
+/* fields in RxDesc we access w/o using bit fields */
+#define VMXNET3_RXD_BTYPE_SHIFT 14
+#define VMXNET3_RXD_GEN_SHIFT 31
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_RxCompDesc {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32 ext2:1;
+ uint32 cnc:1; /* Checksum Not Calculated */
+ uint32 rssType:4; /* RSS hash type used */
+ uint32 rqID:10; /* rx queue/ring ID */
+ uint32 sop:1; /* Start of Packet */
+ uint32 eop:1; /* End of Packet */
+ uint32 ext1:2;
+ uint32 rxdIdx:12; /* Index of the RxDesc */
+#else
+ uint32 rxdIdx:12; /* Index of the RxDesc */
+ uint32 ext1:2;
+ uint32 eop:1; /* End of Packet */
+ uint32 sop:1; /* Start of Packet */
+ uint32 rqID:10; /* rx queue/ring ID */
+ uint32 rssType:4; /* RSS hash type used */
+ uint32 cnc:1; /* Checksum Not Calculated */
+ uint32 ext2:1;
+#endif /* __BIG_ENDIAN_BITFIELD */
+
+ __le32 rssHash; /* RSS hash value */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32 tci:16; /* Tag stripped */
+ uint32 ts:1; /* Tag is stripped */
+ uint32 err:1; /* Error */
+ uint32 len:14; /* data length */
+#else
+ uint32 len:14; /* data length */
+ uint32 err:1; /* Error */
+ uint32 ts:1; /* Tag is stripped */
+ uint32 tci:16; /* Tag stripped */
+#endif /* __BIG_ENDIAN_BITFIELD */
+
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32 gen:1; /* generation bit */
+ uint32 type:7; /* completion type */
+ uint32 fcs:1; /* Frame CRC correct */
+ uint32 frg:1; /* IP Fragment */
+ uint32 v4:1; /* IPv4 */
+ uint32 v6:1; /* IPv6 */
+ uint32 ipc:1; /* IP Checksum Correct */
+ uint32 tcp:1; /* TCP packet */
+ uint32 udp:1; /* UDP packet */
+ uint32 tuc:1; /* TCP/UDP Checksum Correct */
+ uint32 csum:16;
+#else
+ uint32 csum:16;
+ uint32 tuc:1; /* TCP/UDP Checksum Correct */
+ uint32 udp:1; /* UDP packet */
+ uint32 tcp:1; /* TCP packet */
+ uint32 ipc:1; /* IP Checksum Correct */
+ uint32 v6:1; /* IPv6 */
+ uint32 v4:1; /* IPv4 */
+ uint32 frg:1; /* IP Fragment */
+ uint32 fcs:1; /* Frame CRC correct */
+ uint32 type:7; /* completion type */
+ uint32 gen:1; /* generation bit */
+#endif /* __BIG_ENDIAN_BITFIELD */
+}
+#include "vmware_pack_end.h"
+Vmxnet3_RxCompDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_RxCompDescExt {
+ __le32 dword1;
+ uint8 segCnt; /* Number of aggregated packets */
+ uint8 dupAckCnt; /* Number of duplicate Acks */
+ __le16 tsDelta; /* TCP timestamp difference */
+ __le32 dword2[2];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_RxCompDescExt;
+
+/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.dword[3] */
+#define VMXNET3_RCD_TUC_SHIFT 16
+#define VMXNET3_RCD_IPC_SHIFT 19
+
+/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.qword[1] */
+#define VMXNET3_RCD_TYPE_SHIFT 56
+#define VMXNET3_RCD_GEN_SHIFT 63
+
+/* csum OK for TCP/UDP pkts over IP */
+#define VMXNET3_RCD_CSUM_OK (1 << VMXNET3_RCD_TUC_SHIFT | 1 << VMXNET3_RCD_IPC_SHIFT)
+
+/* value of RxCompDesc.rssType */
+#define VMXNET3_RCD_RSS_TYPE_NONE 0
+#define VMXNET3_RCD_RSS_TYPE_IPV4 1
+#define VMXNET3_RCD_RSS_TYPE_TCPIPV4 2
+#define VMXNET3_RCD_RSS_TYPE_IPV6 3
+#define VMXNET3_RCD_RSS_TYPE_TCPIPV6 4
+
+/* a union for accessing all cmd/completion descriptors */
+typedef union Vmxnet3_GenericDesc {
+ __le64 qword[2];
+ __le32 dword[4];
+ __le16 word[8];
+ Vmxnet3_TxDesc txd;
+ Vmxnet3_RxDesc rxd;
+ Vmxnet3_TxCompDesc tcd;
+ Vmxnet3_RxCompDesc rcd;
+ Vmxnet3_RxCompDescExt rcdExt;
+} Vmxnet3_GenericDesc;
+
+#define VMXNET3_INIT_GEN 1
+
+/* Max size of a single tx buffer */
+#define VMXNET3_MAX_TX_BUF_SIZE (1 << 14)
+
+/* # of tx desc needed for a tx buffer size */
+#define VMXNET3_TXD_NEEDED(size) (((size) + VMXNET3_MAX_TX_BUF_SIZE - 1) / VMXNET3_MAX_TX_BUF_SIZE)
+
+/* max # of tx descs for a non-tso pkt */
+#define VMXNET3_MAX_TXD_PER_PKT 16
+
+/* Max size of a single rx buffer */
+#define VMXNET3_MAX_RX_BUF_SIZE ((1 << 14) - 1)
+/* Minimum size of a type 0 buffer */
+#define VMXNET3_MIN_T0_BUF_SIZE 128
+#define VMXNET3_MAX_CSUM_OFFSET 1024
+
+/* Ring base address alignment */
+#define VMXNET3_RING_BA_ALIGN 512
+#define VMXNET3_RING_BA_MASK (VMXNET3_RING_BA_ALIGN - 1)
+
+/* Ring size must be a multiple of 32 */
+#define VMXNET3_RING_SIZE_ALIGN 32
+#define VMXNET3_RING_SIZE_MASK (VMXNET3_RING_SIZE_ALIGN - 1)
+
+/* Max ring size */
+#define VMXNET3_TX_RING_MAX_SIZE 4096
+#define VMXNET3_TC_RING_MAX_SIZE 4096
+#define VMXNET3_RX_RING_MAX_SIZE 4096
+#define VMXNET3_RC_RING_MAX_SIZE 8192
+
+/* a list of reasons for queue stop */
+
+#define VMXNET3_ERR_NOEOP 0x80000000 /* cannot find the EOP desc of a pkt */
+#define VMXNET3_ERR_TXD_REUSE 0x80000001 /* reuse a TxDesc before tx completion */
+#define VMXNET3_ERR_BIG_PKT 0x80000002 /* too many TxDesc for a pkt */
+#define VMXNET3_ERR_DESC_NOT_SPT 0x80000003 /* descriptor type not supported */
+#define VMXNET3_ERR_SMALL_BUF 0x80000004 /* type 0 buffer too small */
+#define VMXNET3_ERR_STRESS 0x80000005 /* stress option firing in vmkernel */
+#define VMXNET3_ERR_SWITCH 0x80000006 /* mode switch failure */
+#define VMXNET3_ERR_TXD_INVALID 0x80000007 /* invalid TxDesc */
+
+/* completion descriptor types */
+#define VMXNET3_CDTYPE_TXCOMP 0 /* Tx Completion Descriptor */
+#define VMXNET3_CDTYPE_RXCOMP 3 /* Rx Completion Descriptor */
+#define VMXNET3_CDTYPE_RXCOMP_LRO 4 /* Rx Completion Descriptor for LRO */
+
+#define VMXNET3_GOS_BITS_UNK 0 /* unknown */
+#define VMXNET3_GOS_BITS_32 1
+#define VMXNET3_GOS_BITS_64 2
+
+#define VMXNET3_GOS_TYPE_UNK 0 /* unknown */
+#define VMXNET3_GOS_TYPE_LINUX 1
+#define VMXNET3_GOS_TYPE_WIN 2
+#define VMXNET3_GOS_TYPE_SOLARIS 3
+#define VMXNET3_GOS_TYPE_FREEBSD 4
+#define VMXNET3_GOS_TYPE_PXE 5
+
+/* All structures in DriverShared are padded to multiples of 8 bytes */
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_GOSInfo {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32 gosMisc: 10; /* other info about gos */
+ uint32 gosVer: 16; /* gos version */
+ uint32 gosType: 4; /* which guest */
+ uint32 gosBits: 2; /* 32-bit or 64-bit? */
+#else
+ uint32 gosBits: 2; /* 32-bit or 64-bit? */
+ uint32 gosType: 4; /* which guest */
+ uint32 gosVer: 16; /* gos version */
+ uint32 gosMisc: 10; /* other info about gos */
+#endif /* __BIG_ENDIAN_BITFIELD */
+}
+#include "vmware_pack_end.h"
+Vmxnet3_GOSInfo;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_DriverInfo {
+ __le32 version; /* driver version */
+ Vmxnet3_GOSInfo gos;
+ __le32 vmxnet3RevSpt; /* vmxnet3 revision supported */
+ __le32 uptVerSpt; /* upt version supported */
+}
+#include "vmware_pack_end.h"
+Vmxnet3_DriverInfo;
+
+#define VMXNET3_REV1_MAGIC 0xbabefee1
+
+/*
+ * QueueDescPA must be 128 bytes aligned. It points to an array of
+ * Vmxnet3_TxQueueDesc followed by an array of Vmxnet3_RxQueueDesc.
+ * The number of Vmxnet3_TxQueueDesc/Vmxnet3_RxQueueDesc are specified by
+ * Vmxnet3_MiscConf.numTxQueues/numRxQueues, respectively.
+ */
+#define VMXNET3_QUEUE_DESC_ALIGN 128
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_MiscConf {
+ Vmxnet3_DriverInfo driverInfo;
+ __le64 uptFeatures;
+ __le64 ddPA; /* driver data PA */
+ __le64 queueDescPA; /* queue descriptor table PA */
+ __le32 ddLen; /* driver data len */
+ __le32 queueDescLen; /* queue descriptor table len, in bytes */
+ __le32 mtu;
+ __le16 maxNumRxSG;
+ uint8 numTxQueues;
+ uint8 numRxQueues;
+ __le32 reserved[4];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_MiscConf;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_TxQueueConf {
+ __le64 txRingBasePA;
+ __le64 dataRingBasePA;
+ __le64 compRingBasePA;
+ __le64 ddPA; /* driver data */
+ __le64 reserved;
+ __le32 txRingSize; /* # of tx desc */
+ __le32 dataRingSize; /* # of data desc */
+ __le32 compRingSize; /* # of comp desc */
+ __le32 ddLen; /* size of driver data */
+ uint8 intrIdx;
+ uint8 _pad[7];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_TxQueueConf;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_RxQueueConf {
+ __le64 rxRingBasePA[2];
+ __le64 compRingBasePA;
+ __le64 ddPA; /* driver data */
+ __le64 reserved;
+ __le32 rxRingSize[2]; /* # of rx desc */
+ __le32 compRingSize; /* # of rx comp desc */
+ __le32 ddLen; /* size of driver data */
+ uint8 intrIdx;
+ uint8 _pad[7];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_RxQueueConf;
+
+enum vmxnet3_intr_mask_mode {
+ VMXNET3_IMM_AUTO = 0,
+ VMXNET3_IMM_ACTIVE = 1,
+ VMXNET3_IMM_LAZY = 2
+};
+
+enum vmxnet3_intr_type {
+ VMXNET3_IT_AUTO = 0,
+ VMXNET3_IT_INTX = 1,
+ VMXNET3_IT_MSI = 2,
+ VMXNET3_IT_MSIX = 3
+};
+
+#define VMXNET3_MAX_TX_QUEUES 8
+#define VMXNET3_MAX_RX_QUEUES 16
+/* addition 1 for events */
+#define VMXNET3_MAX_INTRS 25
+
+/* value of intrCtrl */
+#define VMXNET3_IC_DISABLE_ALL 0x1 /* bit 0 */
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_IntrConf {
+ Bool autoMask;
+ uint8 numIntrs; /* # of interrupts */
+ uint8 eventIntrIdx;
+ uint8 modLevels[VMXNET3_MAX_INTRS]; /* moderation level for each intr */
+ __le32 intrCtrl;
+ __le32 reserved[2];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_IntrConf;
+
+/* one bit per VLAN ID, the size is in the units of uint32 */
+#define VMXNET3_VFT_SIZE (4096 / (sizeof(uint32) * 8))
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_QueueStatus {
+ Bool stopped;
+ uint8 _pad[3];
+ __le32 error;
+}
+#include "vmware_pack_end.h"
+Vmxnet3_QueueStatus;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_TxQueueCtrl {
+ __le32 txNumDeferred;
+ __le32 txThreshold;
+ __le64 reserved;
+}
+#include "vmware_pack_end.h"
+Vmxnet3_TxQueueCtrl;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_RxQueueCtrl {
+ Bool updateRxProd;
+ uint8 _pad[7];
+ __le64 reserved;
+}
+#include "vmware_pack_end.h"
+Vmxnet3_RxQueueCtrl;
+
+#define VMXNET3_RXM_UCAST 0x01 /* unicast only */
+#define VMXNET3_RXM_MCAST 0x02 /* multicast passing the filters */
+#define VMXNET3_RXM_BCAST 0x04 /* broadcast only */
+#define VMXNET3_RXM_ALL_MULTI 0x08 /* all multicast */
+#define VMXNET3_RXM_PROMISC 0x10 /* promiscuous */
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_RxFilterConf {
+ __le32 rxMode; /* VMXNET3_RXM_xxx */
+ __le16 mfTableLen; /* size of the multicast filter table */
+ __le16 _pad1;
+ __le64 mfTablePA; /* PA of the multicast filters table */
+ __le32 vfTable[VMXNET3_VFT_SIZE]; /* vlan filter */
+}
+#include "vmware_pack_end.h"
+Vmxnet3_RxFilterConf;
+
+#define VMXNET3_PM_MAX_FILTERS 6
+#define VMXNET3_PM_MAX_PATTERN_SIZE 128
+#define VMXNET3_PM_MAX_MASK_SIZE (VMXNET3_PM_MAX_PATTERN_SIZE / 8)
+
+#define VMXNET3_PM_WAKEUP_MAGIC 0x01 /* wake up on magic pkts */
+#define VMXNET3_PM_WAKEUP_FILTER 0x02 /* wake up on pkts matching filters */
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_PM_PktFilter {
+ uint8 maskSize;
+ uint8 patternSize;
+ uint8 mask[VMXNET3_PM_MAX_MASK_SIZE];
+ uint8 pattern[VMXNET3_PM_MAX_PATTERN_SIZE];
+ uint8 pad[6];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_PM_PktFilter;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_PMConf {
+ __le16 wakeUpEvents; /* VMXNET3_PM_WAKEUP_xxx */
+ uint8 numFilters;
+ uint8 pad[5];
+ Vmxnet3_PM_PktFilter filters[VMXNET3_PM_MAX_FILTERS];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_PMConf;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_VariableLenConfDesc {
+ __le32 confVer;
+ __le32 confLen;
+ __le64 confPA;
+}
+#include "vmware_pack_end.h"
+Vmxnet3_VariableLenConfDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_DSDevRead {
+ /* read-only region for device, read by dev in response to a SET cmd */
+ Vmxnet3_MiscConf misc;
+ Vmxnet3_IntrConf intrConf;
+ Vmxnet3_RxFilterConf rxFilterConf;
+ Vmxnet3_VariableLenConfDesc rssConfDesc;
+ Vmxnet3_VariableLenConfDesc pmConfDesc;
+ Vmxnet3_VariableLenConfDesc pluginConfDesc;
+}
+#include "vmware_pack_end.h"
+Vmxnet3_DSDevRead;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_TxQueueDesc {
+ Vmxnet3_TxQueueCtrl ctrl;
+ Vmxnet3_TxQueueConf conf;
+ /* Driver read after a GET command */
+ Vmxnet3_QueueStatus status;
+ UPT1_TxStats stats;
+ uint8 _pad[88]; /* 128 aligned */
+}
+#include "vmware_pack_end.h"
+Vmxnet3_TxQueueDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_RxQueueDesc {
+ Vmxnet3_RxQueueCtrl ctrl;
+ Vmxnet3_RxQueueConf conf;
+ /* Driver read after a GET command */
+ Vmxnet3_QueueStatus status;
+ UPT1_RxStats stats;
+ uint8 _pad[88]; /* 128 aligned */
+}
+#include "vmware_pack_end.h"
+Vmxnet3_RxQueueDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_DriverShared {
+ __le32 magic;
+ __le32 pad; /* make devRead start at 64-bit boundaries */
+ Vmxnet3_DSDevRead devRead;
+ __le32 ecr;
+ __le32 reserved[5];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_DriverShared;
+
+#define VMXNET3_ECR_RQERR (1 << 0)
+#define VMXNET3_ECR_TQERR (1 << 1)
+#define VMXNET3_ECR_LINK (1 << 2)
+#define VMXNET3_ECR_DIC (1 << 3)
+#define VMXNET3_ECR_DEBUG (1 << 4)
+
+/* flip the gen bit of a ring */
+#define VMXNET3_FLIP_RING_GEN(gen) ((gen) = (gen) ^ 0x1)
+
+/* only use this if moving the idx won't affect the gen bit */
+#define VMXNET3_INC_RING_IDX_ONLY(idx, ring_size) \
+do {\
+ (idx)++;\
+ if (UNLIKELY((idx) == (ring_size))) {\
+ (idx) = 0;\
+ }\
+} while (0)
+
+#define VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid) \
+ vfTable[vid >> 5] |= (1 << (vid & 31))
+#define VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid) \
+ vfTable[vid >> 5] &= ~(1 << (vid & 31))
+
+#define VMXNET3_VFTABLE_ENTRY_IS_SET(vfTable, vid) \
+ ((vfTable[vid >> 5] & (1 << (vid & 31))) != 0)
+
+#define VMXNET3_MAX_MTU 9000
+#define VMXNET3_MIN_MTU 60
+
+#define VMXNET3_LINK_UP (10000 << 16 | 1) // 10 Gbps, up
+#define VMXNET3_LINK_DOWN 0
+
+#define VMXWIFI_DRIVER_SHARED_LEN 8192
+
+#define VMXNET3_DID_PASSTHRU 0xFFFF
+
+#endif /* _VMXNET3_DEFS_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/vmxnet3_osdep.h b/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/vmxnet3_osdep.h
new file mode 100755
index 00000000..b6e3469c
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3/vmxnet3_osdep.h
@@ -0,0 +1,48 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _VMXNET3_OSDEP_H
+#define _VMXNET3_OSDEP_H
+
+typedef uint64_t uint64;
+typedef uint32_t uint32;
+typedef uint16_t uint16;
+typedef uint8_t uint8;
+typedef int bool;
+typedef char Bool;
+
+#ifndef UNLIKELY
+#define UNLIKELY(x) __builtin_expect((x),0)
+#endif /* unlikely */
+
+#endif /* _VMXNET3_OSDEP_H */
diff --git a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_ethdev.c b/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_ethdev.c
new file mode 100755
index 00000000..ef0af16a
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_ethdev.c
@@ -0,0 +1,781 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_atomic.h>
+#include <rte_string_fns.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+
+#include "vmxnet3/vmxnet3_defs.h"
+
+#include "vmxnet3_ring.h"
+#include "vmxnet3_logs.h"
+#include "vmxnet3_ethdev.h"
+
+#define PROCESS_SYS_EVENTS 0
+
+static int eth_vmxnet3_dev_init(struct eth_driver *eth_drv,
+ struct rte_eth_dev *eth_dev);
+static int vmxnet3_dev_configure(struct rte_eth_dev *dev);
+static int vmxnet3_dev_start(struct rte_eth_dev *dev);
+static void vmxnet3_dev_stop(struct rte_eth_dev *dev);
+static void vmxnet3_dev_close(struct rte_eth_dev *dev);
+static void vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set);
+static void vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static void vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static void vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static void vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int vmxnet3_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
+static void vmxnet3_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static void vmxnet3_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+#if PROCESS_SYS_EVENTS == 1
+static void vmxnet3_process_events(struct vmxnet3_hw *);
+#endif
+/*
+ * The set of PCI devices this driver supports
+ */
+static struct rte_pci_id pci_id_vmxnet3_map[] = {
+
+#define RTE_PCI_DEV_ID_DECL_VMXNET3(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
+#include "rte_pci_dev_ids.h"
+
+{ .vendor_id = 0, /* sentinel */ },
+};
+
+static struct eth_dev_ops vmxnet3_eth_dev_ops = {
+ .dev_configure = vmxnet3_dev_configure,
+ .dev_start = vmxnet3_dev_start,
+ .dev_stop = vmxnet3_dev_stop,
+ .dev_close = vmxnet3_dev_close,
+ .promiscuous_enable = vmxnet3_dev_promiscuous_enable,
+ .promiscuous_disable = vmxnet3_dev_promiscuous_disable,
+ .allmulticast_enable = vmxnet3_dev_allmulticast_enable,
+ .allmulticast_disable = vmxnet3_dev_allmulticast_disable,
+ .link_update = vmxnet3_dev_link_update,
+ .stats_get = vmxnet3_dev_stats_get,
+ .dev_infos_get = vmxnet3_dev_info_get,
+ .rx_queue_setup = vmxnet3_dev_rx_queue_setup,
+ .rx_queue_release = vmxnet3_dev_rx_queue_release,
+ .tx_queue_setup = vmxnet3_dev_tx_queue_setup,
+ .tx_queue_release = vmxnet3_dev_tx_queue_release,
+};
+
+static const struct rte_memzone *
+gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
+ const char *post_string, int socket_id, uint16_t align)
+{
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+
+ snprintf(z_name, sizeof(z_name), "%s_%d_%s",
+ dev->driver->pci_drv.name, dev->data->port_id, post_string);
+
+ mz = rte_memzone_lookup(z_name);
+ if (mz)
+ return mz;
+
+ return rte_memzone_reserve_aligned(z_name, size,
+ socket_id, 0, align);
+}
+
+/**
+ * Atomically reads the link status information from global
+ * structure rte_eth_dev.
+ *
+ * @param dev
+ * - Pointer to the structure rte_eth_dev to read from.
+ * - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, negative value.
+ */
+static inline int
+rte_vmxnet3_dev_atomic_write_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = &(dev->data->dev_link);
+ struct rte_eth_link *src = link;
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * This function is based on vmxnet3_disable_intr()
+ */
+static void
+vmxnet3_disable_intr(struct vmxnet3_hw *hw)
+{
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ hw->shared->devRead.intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
+ for (i = 0; i < VMXNET3_MAX_INTRS; i++)
+ VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 1);
+}
+
+/*
+ * It returns 0 on success.
+ */
+static int
+eth_vmxnet3_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
+ struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev;
+ struct vmxnet3_hw *hw = eth_dev->data->dev_private;
+ uint32_t mac_hi, mac_lo, ver;
+
+ PMD_INIT_FUNC_TRACE();
+
+ eth_dev->dev_ops = &vmxnet3_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts;
+ eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;
+ pci_dev = eth_dev->pci_dev;
+
+ /*
+ * for secondary processes, we don't initialise any further as primary
+ * has already done this work.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ /* Vendor and Device ID need to be set before init of shared code */
+ hw->device_id = pci_dev->id.device_id;
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->hw_addr0 = (void *)pci_dev->mem_resource[0].addr;
+ hw->hw_addr1 = (void *)pci_dev->mem_resource[1].addr;
+
+ hw->num_rx_queues = 1;
+ hw->num_tx_queues = 1;
+ hw->cur_mtu = ETHER_MTU;
+ hw->bufs_per_pkt = 1;
+
+ /* Check h/w version compatibility with driver. */
+ ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
+ PMD_INIT_LOG(DEBUG, "Harware version : %d", ver);
+ if (ver & 0x1)
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS, 1);
+ else {
+ PMD_INIT_LOG(ERR, "Uncompatiable h/w version, should be 0x1");
+ return -EIO;
+ }
+
+ /* Check UPT version compatibility with driver. */
+ ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS);
+ PMD_INIT_LOG(DEBUG, "UPT harware version : %d", ver);
+ if (ver & 0x1)
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_UVRS, 1);
+ else {
+ PMD_INIT_LOG(ERR, "Incompatiable UPT version.");
+ return -EIO;
+ }
+
+ /* Getting MAC Address */
+ mac_lo = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACL);
+ mac_hi = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACH);
+ memcpy(hw->perm_addr , &mac_lo, 4);
+ memcpy(hw->perm_addr+4, &mac_hi, 2);
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", ETHER_ADDR_LEN *
+ VMXNET3_MAX_MAC_ADDRS, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate %d bytes needed to store MAC addresses",
+ ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS);
+ return -ENOMEM;
+ }
+ /* Copy the permanent MAC address */
+ ether_addr_copy((struct ether_addr *) hw->perm_addr,
+ &eth_dev->data->mac_addrs[0]);
+
+ PMD_INIT_LOG(DEBUG, "MAC Address : %02x:%02x:%02x:%02x:%02x:%02x",
+ hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
+ hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
+
+ /* Put device in Quiesce Mode */
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
+
+ return 0;
+}
+
+static struct eth_driver rte_vmxnet3_pmd = {
+ {
+ .name = "rte_vmxnet3_pmd",
+ .id_table = pci_id_vmxnet3_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ },
+ .eth_dev_init = eth_vmxnet3_dev_init,
+ .dev_private_size = sizeof(struct vmxnet3_hw),
+};
+
+/*
+ * Driver initialization routine.
+ * Invoked once at EAL init time.
+ * Register itself as the [Poll Mode] Driver of Virtual PCI VMXNET3 devices.
+ */
+static int
+rte_vmxnet3_pmd_init(const char *name __rte_unused, const char *param __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ rte_eth_driver_register(&rte_vmxnet3_pmd);
+ return 0;
+}
+
+static int
+vmxnet3_dev_configure(struct rte_eth_dev *dev)
+{
+ const struct rte_memzone *mz;
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ size_t size;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev->data->nb_rx_queues > UINT8_MAX ||
+ dev->data->nb_tx_queues > UINT8_MAX)
+ return -EINVAL;
+
+ size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
+ dev->data->nb_tx_queues * sizeof(struct Vmxnet3_RxQueueDesc);
+
+ if (size > UINT16_MAX)
+ return -EINVAL;
+
+ hw->num_rx_queues = (uint8_t)dev->data->nb_rx_queues;
+ hw->num_tx_queues = (uint8_t)dev->data->nb_tx_queues;
+
+ /*
+ * Allocate a memzone for Vmxnet3_DriverShared - Vmxnet3_DSDevRead
+ * on current socket
+ */
+ mz = gpa_zone_reserve(dev, sizeof(struct Vmxnet3_DriverShared),
+ "shared", rte_socket_id(), 8);
+
+ if (mz == NULL) {
+ PMD_INIT_LOG(ERR, "ERROR: Creating shared zone");
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+
+ hw->shared = mz->addr;
+ hw->sharedPA = mz->phys_addr;
+
+ /*
+ * Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc
+ * on current socket
+ */
+ mz = gpa_zone_reserve(dev, size, "queuedesc",
+ rte_socket_id(), VMXNET3_QUEUE_DESC_ALIGN);
+ if (mz == NULL) {
+ PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+
+ hw->tqd_start = (Vmxnet3_TxQueueDesc *)mz->addr;
+ hw->rqd_start = (Vmxnet3_RxQueueDesc *)(hw->tqd_start + hw->num_tx_queues);
+
+ hw->queueDescPA = mz->phys_addr;
+ hw->queue_desc_len = (uint16_t)size;
+
+ if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+
+ /* Allocate memory structure for UPT1_RSSConf and configure */
+ mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf), "rss_conf",
+ rte_socket_id(), RTE_CACHE_LINE_SIZE);
+ if (mz == NULL) {
+ PMD_INIT_LOG(ERR,
+ "ERROR: Creating rss_conf structure zone");
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+
+ hw->rss_conf = mz->addr;
+ hw->rss_confPA = mz->phys_addr;
+ }
+
+ return 0;
+}
+
+static int
+vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf port_conf = dev->data->dev_conf;
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ Vmxnet3_DriverShared *shared = hw->shared;
+ Vmxnet3_DSDevRead *devRead = &shared->devRead;
+ uint32_t *mac_ptr;
+ uint32_t val, i;
+ int ret;
+
+ shared->magic = VMXNET3_REV1_MAGIC;
+ devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM;
+
+ /* Setting up Guest OS information */
+ devRead->misc.driverInfo.gos.gosBits = sizeof(void *) == 4 ?
+ VMXNET3_GOS_BITS_32 :
+ VMXNET3_GOS_BITS_64;
+ devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
+ devRead->misc.driverInfo.vmxnet3RevSpt = 1;
+ devRead->misc.driverInfo.uptVerSpt = 1;
+
+ devRead->misc.queueDescPA = hw->queueDescPA;
+ devRead->misc.queueDescLen = hw->queue_desc_len;
+ devRead->misc.mtu = hw->cur_mtu;
+ devRead->misc.numTxQueues = hw->num_tx_queues;
+ devRead->misc.numRxQueues = hw->num_rx_queues;
+
+ /*
+ * Set number of interrupts to 1
+ * PMD disables all the interrupts but this is MUST to activate device
+ * It needs at least one interrupt for link events to handle
+ * So we'll disable it later after device activation if needed
+ */
+ devRead->intrConf.numIntrs = 1;
+ devRead->intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
+
+ for (i = 0; i < hw->num_tx_queues; i++) {
+ Vmxnet3_TxQueueDesc *tqd = &hw->tqd_start[i];
+ vmxnet3_tx_queue_t *txq = dev->data->tx_queues[i];
+
+ tqd->ctrl.txNumDeferred = 0;
+ tqd->ctrl.txThreshold = 1;
+ tqd->conf.txRingBasePA = txq->cmd_ring.basePA;
+ tqd->conf.compRingBasePA = txq->comp_ring.basePA;
+ tqd->conf.dataRingBasePA = txq->data_ring.basePA;
+
+ tqd->conf.txRingSize = txq->cmd_ring.size;
+ tqd->conf.compRingSize = txq->comp_ring.size;
+ tqd->conf.dataRingSize = txq->data_ring.size;
+ tqd->conf.intrIdx = txq->comp_ring.intr_idx;
+ tqd->status.stopped = TRUE;
+ tqd->status.error = 0;
+ memset(&tqd->stats, 0, sizeof(tqd->stats));
+ }
+
+ for (i = 0; i < hw->num_rx_queues; i++) {
+ Vmxnet3_RxQueueDesc *rqd = &hw->rqd_start[i];
+ vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
+
+ rqd->conf.rxRingBasePA[0] = rxq->cmd_ring[0].basePA;
+ rqd->conf.rxRingBasePA[1] = rxq->cmd_ring[1].basePA;
+ rqd->conf.compRingBasePA = rxq->comp_ring.basePA;
+
+ rqd->conf.rxRingSize[0] = rxq->cmd_ring[0].size;
+ rqd->conf.rxRingSize[1] = rxq->cmd_ring[1].size;
+ rqd->conf.compRingSize = rxq->comp_ring.size;
+ rqd->conf.intrIdx = rxq->comp_ring.intr_idx;
+ rqd->status.stopped = TRUE;
+ rqd->status.error = 0;
+ memset(&rqd->stats, 0, sizeof(rqd->stats));
+ }
+
+ /* RxMode set to 0 of VMXNET3_RXM_xxx */
+ devRead->rxFilterConf.rxMode = 0;
+
+ /* Setting up feature flags */
+ if (dev->data->dev_conf.rxmode.hw_ip_checksum)
+ devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
+
+ if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ devRead->misc.uptFeatures |= VMXNET3_F_RXVLAN;
+
+ if (port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+ ret = vmxnet3_rss_configure(dev);
+ if (ret != VMXNET3_SUCCESS)
+ return ret;
+
+ devRead->misc.uptFeatures |= VMXNET3_F_RSS;
+ devRead->rssConfDesc.confVer = 1;
+ devRead->rssConfDesc.confLen = sizeof(struct VMXNET3_RSSConf);
+ devRead->rssConfDesc.confPA = hw->rss_confPA;
+ }
+
+ if (dev->data->dev_conf.rxmode.hw_vlan_filter) {
+ ret = vmxnet3_vlan_configure(dev);
+ if (ret != VMXNET3_SUCCESS)
+ return ret;
+ }
+
+ PMD_INIT_LOG(DEBUG,
+ "Writing MAC Address : %02x:%02x:%02x:%02x:%02x:%02x",
+ hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
+ hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
+
+ /* Write MAC Address back to device */
+ mac_ptr = (uint32_t *)hw->perm_addr;
+ val = *mac_ptr;
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACL, val);
+
+ val = (hw->perm_addr[5] << 8) | hw->perm_addr[4];
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACH, val);
+
+ return VMXNET3_SUCCESS;
+}
+
+/*
+ * Configure device link speed and setup link.
+ * Must be called after eth_vmxnet3_dev_init. Other wise it might fail
+ * It returns 0 on success.
+ */
+static int
+vmxnet3_dev_start(struct rte_eth_dev *dev)
+{
+ int status, ret;
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = vmxnet3_setup_driver_shared(dev);
+ if (ret != VMXNET3_SUCCESS)
+ return ret;
+
+ /* Exchange shared data with device */
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL,
+ VMXNET3_GET_ADDR_LO(hw->sharedPA));
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH,
+ VMXNET3_GET_ADDR_HI(hw->sharedPA));
+
+ /* Activate device by register write */
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV);
+ status = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
+
+ if (status != 0) {
+ PMD_INIT_LOG(ERR, "Device activation in %s(): UNSUCCESSFUL", __func__);
+ return -1;
+ }
+
+ /* Disable interrupts */
+ vmxnet3_disable_intr(hw);
+
+ /*
+ * Load RX queues with blank mbufs and update next2fill index for device
+ * Update RxMode of the device
+ */
+ ret = vmxnet3_dev_rxtx_init(dev);
+ if (ret != VMXNET3_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Device receive init in %s: UNSUCCESSFUL", __func__);
+ return ret;
+ }
+
+ /* Setting proper Rx Mode and issue Rx Mode Update command */
+ vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1);
+
+ /*
+ * Don't need to handle events for now
+ */
+#if PROCESS_SYS_EVENTS == 1
+ events = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_ECR);
+ PMD_INIT_LOG(DEBUG, "Reading events: 0x%X", events);
+ vmxnet3_process_events(hw);
+#endif
+ return status;
+}
+
+/*
+ * Stop device: disable rx and tx functions to allow for reconfiguring.
+ */
+static void
+vmxnet3_dev_stop(struct rte_eth_dev *dev)
+{
+ struct rte_eth_link link;
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->adapter_stopped == TRUE) {
+ PMD_INIT_LOG(DEBUG, "Device already closed.");
+ return;
+ }
+
+ /* disable interrupts */
+ vmxnet3_disable_intr(hw);
+
+ /* quiesce the device first */
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 0);
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, 0);
+
+ /* reset the device */
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
+ PMD_INIT_LOG(DEBUG, "Device reset.");
+ hw->adapter_stopped = FALSE;
+
+ vmxnet3_dev_clear_queues(dev);
+
+ /* Clear recorded link status */
+ memset(&link, 0, sizeof(link));
+ rte_vmxnet3_dev_atomic_write_link_status(dev, &link);
+}
+
+/*
+ * Reset and stop device.
+ */
+static void
+vmxnet3_dev_close(struct rte_eth_dev *dev)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ vmxnet3_dev_stop(dev);
+ hw->adapter_stopped = TRUE;
+}
+
+static void
+vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ unsigned int i;
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
+
+ RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
+ for (i = 0; i < hw->num_tx_queues; i++) {
+ struct UPT1_TxStats *txStats = &hw->tqd_start[i].stats;
+
+ stats->q_opackets[i] = txStats->ucastPktsTxOK +
+ txStats->mcastPktsTxOK +
+ txStats->bcastPktsTxOK;
+ stats->q_obytes[i] = txStats->ucastBytesTxOK +
+ txStats->mcastBytesTxOK +
+ txStats->bcastBytesTxOK;
+
+ stats->opackets += stats->q_opackets[i];
+ stats->obytes += stats->q_obytes[i];
+ stats->oerrors += txStats->pktsTxError +
+ txStats->pktsTxDiscard;
+ }
+
+ RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_RX_QUEUES);
+ for (i = 0; i < hw->num_rx_queues; i++) {
+ struct UPT1_RxStats *rxStats = &hw->rqd_start[i].stats;
+
+ stats->q_ipackets[i] = rxStats->ucastPktsRxOK +
+ rxStats->mcastPktsRxOK +
+ rxStats->bcastPktsRxOK;
+
+ stats->q_ibytes[i] = rxStats->ucastBytesRxOK +
+ rxStats->mcastBytesRxOK +
+ rxStats->bcastBytesRxOK;
+
+ stats->ipackets += stats->q_ipackets[i];
+ stats->ibytes += stats->q_ibytes[i];
+
+ stats->q_errors[i] = rxStats->pktsRxError;
+ stats->ierrors += rxStats->pktsRxError;
+ stats->imcasts += rxStats->mcastPktsRxOK;
+ stats->rx_nombuf += rxStats->pktsRxOutOfBuf;
+ }
+}
+
+static void
+vmxnet3_dev_info_get(__attribute__((unused))struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
+ dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
+ dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM;
+ dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */
+ dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
+
+ dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
+ ETH_TXQ_FLAGS_NOOFFLOADS;
+}
+
+/* return 0 means link status changed, -1 means not changed */
+static int
+vmxnet3_dev_link_update(struct rte_eth_dev *dev, __attribute__((unused)) int wait_to_complete)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ struct rte_eth_link link;
+ uint32_t ret;
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
+ ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
+
+ if (!ret) {
+ PMD_INIT_LOG(ERR, "Link Status Negative : %s()", __func__);
+ return -1;
+ }
+
+ if (ret & 0x1) {
+ link.link_status = 1;
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_speed = ETH_LINK_SPEED_10000;
+
+ rte_vmxnet3_dev_atomic_write_link_status(dev, &link);
+
+ return 0;
+ }
+
+ return -1;
+}
+
+/* Updating rxmode through Vmxnet3_DriverShared structure in adapter */
+static void
+vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set) {
+
+ struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
+
+ if (set)
+ rxConf->rxMode = rxConf->rxMode | feature;
+ else
+ rxConf->rxMode = rxConf->rxMode & (~feature);
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RX_MODE);
+}
+
+/* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
+static void
+vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+
+ vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 1);
+}
+
+/* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
+static void
+vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+
+ vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0);
+}
+
+/* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
+static void
+vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+
+ vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 1);
+}
+
+/* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
+static void
+vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+
+ vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 0);
+}
+
+#if PROCESS_SYS_EVENTS == 1
+static void
+vmxnet3_process_events(struct vmxnet3_hw *hw)
+{
+ uint32_t events = hw->shared->ecr;
+
+ if (!events) {
+ PMD_INIT_LOG(ERR, "No events to process in %s()", __func__);
+ return;
+ }
+
+ /*
+ * ECR bits when written with 1b are cleared. Hence write
+ * events back to ECR so that the bits which were set will be reset.
+ */
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_ECR, events);
+
+ /* Check if link state has changed */
+ if (events & VMXNET3_ECR_LINK)
+ PMD_INIT_LOG(ERR,
+ "Process events in %s(): VMXNET3_ECR_LINK event", __func__);
+
+ /* Check if there is an error on xmit/recv queues */
+ if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_QUEUE_STATUS);
+
+ if (hw->tqd_start->status.stopped)
+ PMD_INIT_LOG(ERR, "tq error 0x%x",
+ hw->tqd_start->status.error);
+
+ if (hw->rqd_start->status.stopped)
+ PMD_INIT_LOG(ERR, "rq error 0x%x",
+ hw->rqd_start->status.error);
+
+ /* Reset the device */
+ /* Have to reset the device */
+ }
+
+ if (events & VMXNET3_ECR_DIC)
+ PMD_INIT_LOG(ERR, "Device implementation change event.");
+
+ if (events & VMXNET3_ECR_DEBUG)
+ PMD_INIT_LOG(ERR, "Debug event generated by device.");
+
+}
+#endif
+
+static struct rte_driver rte_vmxnet3_driver = {
+ .type = PMD_PDEV,
+ .init = rte_vmxnet3_pmd_init,
+};
+
+PMD_REGISTER_DRIVER(rte_vmxnet3_driver);
diff --git a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_ethdev.h b/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_ethdev.h
new file mode 100755
index 00000000..0941cfc6
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_ethdev.h
@@ -0,0 +1,177 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _VMXNET3_ETHDEV_H_
+#define _VMXNET3_ETHDEV_H_
+
+#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
+#define VMXNET3_ASSERT(x) do { \
+ if (!(x)) rte_panic("VMXNET3: x"); \
+} while(0)
+#endif
+
+#define VMXNET3_MAX_MAC_ADDRS 1
+
+/* UPT feature to negotiate */
+#define VMXNET3_F_RXCSUM 0x0001
+#define VMXNET3_F_RSS 0x0002
+#define VMXNET3_F_RXVLAN 0x0004
+#define VMXNET3_F_LRO 0x0008
+
+/* Hash Types supported by device */
+#define VMXNET3_RSS_HASH_TYPE_NONE 0x0
+#define VMXNET3_RSS_HASH_TYPE_IPV4 0x01
+#define VMXNET3_RSS_HASH_TYPE_TCP_IPV4 0x02
+#define VMXNET3_RSS_HASH_TYPE_IPV6 0x04
+#define VMXNET3_RSS_HASH_TYPE_TCP_IPV6 0x08
+
+#define VMXNET3_RSS_HASH_FUNC_NONE 0x0
+#define VMXNET3_RSS_HASH_FUNC_TOEPLITZ 0x01
+
+#define VMXNET3_RSS_MAX_KEY_SIZE 40
+#define VMXNET3_RSS_MAX_IND_TABLE_SIZE 128
+
+/* RSS configuration structure - shared with device through GPA */
+typedef
+struct VMXNET3_RSSConf {
+ uint16_t hashType;
+ uint16_t hashFunc;
+ uint16_t hashKeySize;
+ uint16_t indTableSize;
+ uint8_t hashKey[VMXNET3_RSS_MAX_KEY_SIZE];
+ /*
+ * indTable is only element that can be changed without
+ * device quiesce-reset-update-activation cycle
+ */
+ uint8_t indTable[VMXNET3_RSS_MAX_IND_TABLE_SIZE];
+} VMXNET3_RSSConf;
+
+typedef
+struct vmxnet3_mf_table {
+ void *mfTableBase; /* Multicast addresses list */
+ uint64_t mfTablePA; /* Physical address of the list */
+ uint16_t num_addrs; /* number of multicast addrs */
+} vmxnet3_mf_table_t;
+
+struct vmxnet3_hw {
+
+ uint8_t *hw_addr0; /* BAR0: PT-Passthrough Regs */
+ uint8_t *hw_addr1; /* BAR1: VD-Virtual Device Regs */
+ /* BAR2: MSI-X Regs */
+ /* BAR3: Port IO */
+ void *back;
+
+ uint16_t device_id;
+ uint16_t vendor_id;
+ uint16_t subsystem_device_id;
+ uint16_t subsystem_vendor_id;
+ bool adapter_stopped;
+
+ uint8_t perm_addr[ETHER_ADDR_LEN];
+ uint8_t num_tx_queues;
+ uint8_t num_rx_queues;
+ uint8_t bufs_per_pkt;
+ uint16_t cur_mtu;
+
+ Vmxnet3_TxQueueDesc *tqd_start; /* start address of all tx queue desc */
+ Vmxnet3_RxQueueDesc *rqd_start; /* start address of all rx queue desc */
+
+ Vmxnet3_DriverShared *shared;
+ uint64_t sharedPA;
+
+ uint64_t queueDescPA;
+ uint16_t queue_desc_len;
+
+ VMXNET3_RSSConf *rss_conf;
+ uint64_t rss_confPA;
+ vmxnet3_mf_table_t *mf_table;
+};
+
+#define VMXNET3_GET_ADDR_LO(reg) ((uint32_t)(reg))
+#define VMXNET3_GET_ADDR_HI(reg) ((uint32_t)(((uint64_t)(reg)) >> 32))
+
+/* Config space read/writes */
+
+#define VMXNET3_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
+
+static inline uint32_t vmxnet3_read_addr(volatile void *addr)
+{
+ return VMXNET3_PCI_REG(addr);
+}
+
+#define VMXNET3_PCI_REG_WRITE(reg, value) do { \
+ VMXNET3_PCI_REG((reg)) = (value); \
+} while(0)
+
+#define VMXNET3_PCI_BAR0_REG_ADDR(hw, reg) \
+ ((volatile uint32_t *)((char *)(hw)->hw_addr0 + (reg)))
+#define VMXNET3_READ_BAR0_REG(hw, reg) \
+ vmxnet3_read_addr(VMXNET3_PCI_BAR0_REG_ADDR((hw), (reg)))
+#define VMXNET3_WRITE_BAR0_REG(hw, reg, value) \
+ VMXNET3_PCI_REG_WRITE(VMXNET3_PCI_BAR0_REG_ADDR((hw), (reg)), (value))
+
+#define VMXNET3_PCI_BAR1_REG_ADDR(hw, reg) \
+ ((volatile uint32_t *)((char *)(hw)->hw_addr1 + (reg)))
+#define VMXNET3_READ_BAR1_REG(hw, reg) \
+ vmxnet3_read_addr(VMXNET3_PCI_BAR1_REG_ADDR((hw), (reg)))
+#define VMXNET3_WRITE_BAR1_REG(hw, reg, value) \
+ VMXNET3_PCI_REG_WRITE(VMXNET3_PCI_BAR1_REG_ADDR((hw), (reg)), (value))
+
+/*
+ * RX/TX function prototypes
+ */
+
+void vmxnet3_dev_clear_queues(struct rte_eth_dev *dev);
+
+void vmxnet3_dev_rx_queue_release(void *rxq);
+void vmxnet3_dev_tx_queue_release(void *txq);
+
+int vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
+int vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+
+int vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev);
+
+int vmxnet3_rss_configure(struct rte_eth_dev *dev);
+int vmxnet3_vlan_configure(struct rte_eth_dev *dev);
+
+uint16_t vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+#endif /* _VMXNET3_ETHDEV_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_logs.h b/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_logs.h
new file mode 100755
index 00000000..82639a08
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_logs.h
@@ -0,0 +1,74 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _VMXNET3_LOGS_H_
+#define _VMXNET3_LOGS_H_
+
+#ifdef RTE_LIBRTE_VMXNET3_DEBUG_INIT
+#define PMD_INIT_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+#else
+#define PMD_INIT_LOG(level, fmt, args...) do { } while(0)
+#define PMD_INIT_FUNC_TRACE() do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_VMXNET3_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_VMXNET3_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_VMXNET3_DEBUG_TX_FREE
+#define PMD_TX_FREE_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
+#define PMD_DRV_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_DRV_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#endif /* _VMXNET3_LOGS_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_ring.h b/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_ring.h
new file mode 100755
index 00000000..c5abdb69
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_ring.h
@@ -0,0 +1,183 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _VMXNET3_RING_H_
+#define _VMXNET3_RING_H_
+
+#define VMXNET3_RX_CMDRING_SIZE 2
+
+#define VMXNET3_DRIVER_VERSION_NUM 0x01012000
+
+/* Default ring size */
+#define VMXNET3_DEF_TX_RING_SIZE 512
+#define VMXNET3_DEF_RX_RING_SIZE 128
+
+#define VMXNET3_SUCCESS 0
+#define VMXNET3_FAIL -1
+
+#define TRUE 1
+#define FALSE 0
+
+
+typedef struct vmxnet3_buf_info {
+ uint16_t len;
+ struct rte_mbuf *m;
+ uint64_t bufPA;
+} vmxnet3_buf_info_t;
+
+typedef struct vmxnet3_cmd_ring {
+ vmxnet3_buf_info_t *buf_info;
+ uint32_t size;
+ uint32_t next2fill;
+ uint32_t next2comp;
+ uint8_t gen;
+ uint8_t rid;
+ Vmxnet3_GenericDesc *base;
+ uint64_t basePA;
+} vmxnet3_cmd_ring_t;
+
+static inline void
+vmxnet3_cmd_ring_adv_next2fill(struct vmxnet3_cmd_ring *ring)
+{
+ ring->next2fill++;
+ if (unlikely(ring->next2fill == ring->size)) {
+ ring->next2fill = 0;
+ ring->gen = (uint8_t)(ring->gen ^ 1);
+ }
+}
+
+static inline void
+vmxnet3_cmd_ring_adv_next2comp(struct vmxnet3_cmd_ring *ring)
+{
+ VMXNET3_INC_RING_IDX_ONLY(ring->next2comp, ring->size);
+}
+
+static inline uint32_t
+vmxnet3_cmd_ring_desc_avail(struct vmxnet3_cmd_ring *ring)
+{
+ return (ring->next2comp > ring->next2fill ? 0 : ring->size) +
+ ring->next2comp - ring->next2fill - 1;
+}
+
+static inline bool
+vmxnet3_cmd_ring_desc_empty(struct vmxnet3_cmd_ring *ring)
+{
+ return (ring->next2comp == ring->next2fill);
+}
+
+typedef struct vmxnet3_comp_ring {
+ uint32_t size;
+ uint32_t next2proc;
+ uint8_t gen;
+ uint8_t intr_idx;
+ Vmxnet3_GenericDesc *base;
+ uint64_t basePA;
+} vmxnet3_comp_ring_t;
+
+struct vmxnet3_data_ring {
+ struct Vmxnet3_TxDataDesc *base;
+ uint32_t size;
+ uint64_t basePA;
+};
+
+static inline void
+vmxnet3_comp_ring_adv_next2proc(struct vmxnet3_comp_ring *ring)
+{
+ ring->next2proc++;
+ if (unlikely(ring->next2proc == ring->size)) {
+ ring->next2proc = 0;
+ ring->gen = (uint8_t)(ring->gen ^ 1);
+ }
+}
+
+struct vmxnet3_txq_stats {
+ uint64_t drop_total; /* # of pkts dropped by the driver, the
+ * counters below track droppings due to
+ * different reasons
+ */
+ uint64_t drop_oversized;
+ uint64_t drop_hdr_inspect_err;
+ uint64_t drop_tso;
+ uint64_t deferred;
+ uint64_t tx_ring_full;
+ uint64_t linearized; /* # of pkts linearized */
+};
+
+typedef struct vmxnet3_tx_ctx {
+ int ip_type;
+ bool is_vlan;
+ bool is_cso;
+
+ uint16_t evl_tag; /* only valid when is_vlan == TRUE */
+ uint32_t eth_hdr_size; /* only valid for pkts requesting tso or csum
+ * offloading */
+ uint32_t ip_hdr_size;
+ uint32_t l4_hdr_size;
+} vmxnet3_tx_ctx_t;
+
+typedef struct vmxnet3_tx_queue {
+ struct vmxnet3_hw *hw;
+ struct vmxnet3_cmd_ring cmd_ring;
+ struct vmxnet3_comp_ring comp_ring;
+ struct vmxnet3_data_ring data_ring;
+ uint32_t qid;
+ struct Vmxnet3_TxQueueDesc *shared;
+ struct vmxnet3_txq_stats stats;
+ bool stopped;
+ uint16_t queue_id; /**< Device TX queue index. */
+ uint8_t port_id; /**< Device port identifier. */
+} vmxnet3_tx_queue_t;
+
+
+struct vmxnet3_rxq_stats {
+ uint64_t drop_total;
+ uint64_t drop_err;
+ uint64_t drop_fcs;
+ uint64_t rx_buf_alloc_failure;
+};
+
+typedef struct vmxnet3_rx_queue {
+ struct rte_mempool *mp;
+ struct vmxnet3_hw *hw;
+ struct vmxnet3_cmd_ring cmd_ring[VMXNET3_RX_CMDRING_SIZE];
+ struct vmxnet3_comp_ring comp_ring;
+ uint32_t qid1;
+ uint32_t qid2;
+ Vmxnet3_RxQueueDesc *shared;
+ struct vmxnet3_rxq_stats stats;
+ bool stopped;
+ uint16_t queue_id; /**< Device RX queue index. */
+ uint8_t port_id; /**< Device port identifier. */
+} vmxnet3_rx_queue_t;
+
+#endif /* _VMXNET3_RING_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_rxtx.c b/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_rxtx.c
new file mode 100755
index 00000000..de0c11a0
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_vmxnet3/vmxnet3_rxtx.c
@@ -0,0 +1,1096 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_prefetch.h>
+#include <rte_ip.h>
+#include <rte_udp.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_string_fns.h>
+#include <rte_errno.h>
+
+#include "vmxnet3/vmxnet3_defs.h"
+#include "vmxnet3_ring.h"
+
+#include "vmxnet3_logs.h"
+#include "vmxnet3_ethdev.h"
+
+#define RTE_MBUF_DATA_DMA_ADDR(mb) \
+ (uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
+
+#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
+ (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
+
+static uint32_t rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
+
+static inline int vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t* , uint8_t);
+static inline void vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *);
+#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED
+static void vmxnet3_rxq_dump(struct vmxnet3_rx_queue *);
+static void vmxnet3_txq_dump(struct vmxnet3_tx_queue *);
+#endif
+
+static inline struct rte_mbuf *
+rte_rxmbuf_alloc(struct rte_mempool *mp)
+{
+ struct rte_mbuf *m;
+
+ m = __rte_mbuf_raw_alloc(mp);
+ __rte_mbuf_sanity_check_raw(m, 0);
+ return m;
+}
+
+#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED
+static void
+vmxnet3_rxq_dump(struct vmxnet3_rx_queue *rxq)
+{
+ uint32_t avail = 0;
+
+ if (rxq == NULL)
+ return;
+
+ PMD_RX_LOG(DEBUG,
+ "RXQ: cmd0 base : 0x%p cmd1 base : 0x%p comp ring base : 0x%p.",
+ rxq->cmd_ring[0].base, rxq->cmd_ring[1].base, rxq->comp_ring.base);
+ PMD_RX_LOG(DEBUG,
+ "RXQ: cmd0 basePA : 0x%lx cmd1 basePA : 0x%lx comp ring basePA : 0x%lx.",
+ (unsigned long)rxq->cmd_ring[0].basePA,
+ (unsigned long)rxq->cmd_ring[1].basePA,
+ (unsigned long)rxq->comp_ring.basePA);
+
+ avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[0]);
+ PMD_RX_LOG(DEBUG,
+ "RXQ:cmd0: size=%u; free=%u; next2proc=%u; queued=%u",
+ (uint32_t)rxq->cmd_ring[0].size, avail,
+ rxq->comp_ring.next2proc,
+ rxq->cmd_ring[0].size - avail);
+
+ avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[1]);
+ PMD_RX_LOG(DEBUG, "RXQ:cmd1 size=%u; free=%u; next2proc=%u; queued=%u",
+ (uint32_t)rxq->cmd_ring[1].size, avail, rxq->comp_ring.next2proc,
+ rxq->cmd_ring[1].size - avail);
+
+}
+
+static void
+vmxnet3_txq_dump(struct vmxnet3_tx_queue *txq)
+{
+ uint32_t avail = 0;
+
+ if (txq == NULL)
+ return;
+
+ PMD_TX_LOG(DEBUG, "TXQ: cmd base : 0x%p comp ring base : 0x%p data ring base : 0x%p.",
+ txq->cmd_ring.base, txq->comp_ring.base, txq->data_ring.base);
+ PMD_TX_LOG(DEBUG, "TXQ: cmd basePA : 0x%lx comp ring basePA : 0x%lx data ring basePA : 0x%lx.",
+ (unsigned long)txq->cmd_ring.basePA,
+ (unsigned long)txq->comp_ring.basePA,
+ (unsigned long)txq->data_ring.basePA);
+
+ avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
+ PMD_TX_LOG(DEBUG, "TXQ: size=%u; free=%u; next2proc=%u; queued=%u",
+ (uint32_t)txq->cmd_ring.size, avail,
+ txq->comp_ring.next2proc, txq->cmd_ring.size - avail);
+}
+#endif
+
+static inline void
+vmxnet3_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring)
+{
+ while (ring->next2comp != ring->next2fill) {
+ /* No need to worry about tx desc ownership, device is quiesced by now. */
+ vmxnet3_buf_info_t *buf_info = ring->buf_info + ring->next2comp;
+
+ if (buf_info->m) {
+ rte_pktmbuf_free(buf_info->m);
+ buf_info->m = NULL;
+ buf_info->bufPA = 0;
+ buf_info->len = 0;
+ }
+ vmxnet3_cmd_ring_adv_next2comp(ring);
+ }
+}
+
+static void
+vmxnet3_cmd_ring_release(vmxnet3_cmd_ring_t *ring)
+{
+ vmxnet3_cmd_ring_release_mbufs(ring);
+ rte_free(ring->buf_info);
+ ring->buf_info = NULL;
+}
+
+
+void
+vmxnet3_dev_tx_queue_release(void *txq)
+{
+ vmxnet3_tx_queue_t *tq = txq;
+
+ if (tq != NULL) {
+ /* Release the cmd_ring */
+ vmxnet3_cmd_ring_release(&tq->cmd_ring);
+ }
+}
+
+void
+vmxnet3_dev_rx_queue_release(void *rxq)
+{
+ int i;
+ vmxnet3_rx_queue_t *rq = rxq;
+
+ if (rq != NULL) {
+ /* Release both the cmd_rings */
+ for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
+ vmxnet3_cmd_ring_release(&rq->cmd_ring[i]);
+ }
+}
+
+static void
+vmxnet3_dev_tx_queue_reset(void *txq)
+{
+ vmxnet3_tx_queue_t *tq = txq;
+ struct vmxnet3_cmd_ring *ring = &tq->cmd_ring;
+ struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
+ struct vmxnet3_data_ring *data_ring = &tq->data_ring;
+ int size;
+
+ if (tq != NULL) {
+ /* Release the cmd_ring mbufs */
+ vmxnet3_cmd_ring_release_mbufs(&tq->cmd_ring);
+ }
+
+ /* Tx vmxnet rings structure initialization*/
+ ring->next2fill = 0;
+ ring->next2comp = 0;
+ ring->gen = VMXNET3_INIT_GEN;
+ comp_ring->next2proc = 0;
+ comp_ring->gen = VMXNET3_INIT_GEN;
+
+ size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
+ size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
+ size += sizeof(struct Vmxnet3_TxDataDesc) * data_ring->size;
+
+ memset(ring->base, 0, size);
+}
+
+static void
+vmxnet3_dev_rx_queue_reset(void *rxq)
+{
+ int i;
+ vmxnet3_rx_queue_t *rq = rxq;
+ struct vmxnet3_cmd_ring *ring0, *ring1;
+ struct vmxnet3_comp_ring *comp_ring;
+ int size;
+
+ if (rq != NULL) {
+ /* Release both the cmd_rings mbufs */
+ for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
+ vmxnet3_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
+ }
+
+ ring0 = &rq->cmd_ring[0];
+ ring1 = &rq->cmd_ring[1];
+ comp_ring = &rq->comp_ring;
+
+ /* Rx vmxnet rings structure initialization */
+ ring0->next2fill = 0;
+ ring1->next2fill = 0;
+ ring0->next2comp = 0;
+ ring1->next2comp = 0;
+ ring0->gen = VMXNET3_INIT_GEN;
+ ring1->gen = VMXNET3_INIT_GEN;
+ comp_ring->next2proc = 0;
+ comp_ring->gen = VMXNET3_INIT_GEN;
+
+ size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
+ size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
+
+ memset(ring0->base, 0, size);
+}
+
+void
+vmxnet3_dev_clear_queues(struct rte_eth_dev *dev)
+{
+ unsigned i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
+
+ if (txq != NULL) {
+ txq->stopped = TRUE;
+ vmxnet3_dev_tx_queue_reset(txq);
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i];
+
+ if (rxq != NULL) {
+ rxq->stopped = TRUE;
+ vmxnet3_dev_rx_queue_reset(rxq);
+ }
+ }
+}
+
+static inline void
+vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
+{
+ int completed = 0;
+ struct rte_mbuf *mbuf;
+ vmxnet3_comp_ring_t *comp_ring = &txq->comp_ring;
+ struct Vmxnet3_TxCompDesc *tcd = (struct Vmxnet3_TxCompDesc *)
+ (comp_ring->base + comp_ring->next2proc);
+
+ while (tcd->gen == comp_ring->gen) {
+
+ /* Release cmd_ring descriptor and free mbuf */
+#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
+ VMXNET3_ASSERT(txq->cmd_ring.base[tcd->txdIdx].txd.eop == 1);
+#endif
+ mbuf = txq->cmd_ring.buf_info[tcd->txdIdx].m;
+ if (unlikely(mbuf == NULL))
+ rte_panic("EOP desc does not point to a valid mbuf");
+ else
+ rte_pktmbuf_free(mbuf);
+
+
+ txq->cmd_ring.buf_info[tcd->txdIdx].m = NULL;
+ /* Mark the txd for which tcd was generated as completed */
+ vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
+
+ vmxnet3_comp_ring_adv_next2proc(comp_ring);
+ tcd = (struct Vmxnet3_TxCompDesc *)(comp_ring->base +
+ comp_ring->next2proc);
+ completed++;
+ }
+
+ PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.", completed);
+}
+
+/* patch to convert multi-seg mbuf to one seg mbuf to support vmxnet3 driver for TRex
+*/
+typedef struct rte_mbuf * (*rte_mbuf_convert_to_one_seg_t)(struct rte_mbuf *m);
+
+rte_mbuf_convert_to_one_seg_t vmxnet3_xmit_convert_callback;
+
+int vmxnet3_xmit_set_callback(rte_mbuf_convert_to_one_seg_t cb){
+ vmxnet3_xmit_convert_callback=cb;
+}
+
+
+uint16_t
+vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx;
+ Vmxnet3_TxDesc *txd = NULL;
+ vmxnet3_buf_info_t *tbi = NULL;
+ struct vmxnet3_hw *hw;
+ struct rte_mbuf *txm;
+ vmxnet3_tx_queue_t *txq = tx_queue;
+
+ hw = txq->hw;
+
+ if (unlikely(txq->stopped)) {
+ PMD_TX_LOG(DEBUG, "Tx queue is stopped.");
+ return 0;
+ }
+
+ /* Free up the comp_descriptors aggressively */
+ vmxnet3_tq_tx_complete(txq);
+
+ nb_tx = 0;
+ while (nb_tx < nb_pkts) {
+
+ if (vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring)) {
+ int copy_size = 0;
+
+ txm = tx_pkts[nb_tx];
+ /* Don't support scatter packets yet, free them if met */
+ if (txm->nb_segs != 1) {
+ if (vmxnet3_xmit_convert_callback ){
+ txm=vmxnet3_xmit_convert_callback(txm);
+ }else{
+ txq->stats.drop_total++;
+ nb_tx++;
+ rte_pktmbuf_free(txm);
+ continue;
+ }
+ }
+
+ if (!txm) {
+ txq->stats.drop_total++;
+ nb_tx++;
+ continue;
+ }
+
+ /* Needs to minus ether header len */
+ if (txm->data_len > (hw->cur_mtu + ETHER_HDR_LEN)) {
+ PMD_TX_LOG(DEBUG, "Packet data_len higher than MTU");
+ rte_pktmbuf_free(txm);
+ txq->stats.drop_total++;
+ nb_tx++;
+ continue;
+ }
+
+ txd = (Vmxnet3_TxDesc *)(txq->cmd_ring.base + txq->cmd_ring.next2fill);
+ if (rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) {
+ struct Vmxnet3_TxDataDesc *tdd;
+
+ tdd = txq->data_ring.base + txq->cmd_ring.next2fill;
+ copy_size = rte_pktmbuf_pkt_len(txm);
+ rte_memcpy(tdd->data, rte_pktmbuf_mtod(txm, char *), copy_size);
+ }
+
+ /* Fill the tx descriptor */
+ tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
+ tbi->bufPA = RTE_MBUF_DATA_DMA_ADDR(txm);
+ if (copy_size)
+ txd->addr = rte_cpu_to_le_64(txq->data_ring.basePA +
+ txq->cmd_ring.next2fill *
+ sizeof(struct Vmxnet3_TxDataDesc));
+ else
+ txd->addr = tbi->bufPA;
+ txd->len = txm->data_len;
+
+ /* Mark the last descriptor as End of Packet. */
+ txd->cq = 1;
+ txd->eop = 1;
+
+ /* Add VLAN tag if requested */
+ if (txm->ol_flags & PKT_TX_VLAN_PKT) {
+ txd->ti = 1;
+ txd->tci = rte_cpu_to_le_16(txm->vlan_tci);
+ }
+
+ /* Record current mbuf for freeing it later in tx complete */
+#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
+ VMXNET3_ASSERT(txm);
+#endif
+ tbi->m = txm;
+
+ /* Set the offloading mode to default */
+ txd->hlen = 0;
+ txd->om = VMXNET3_OM_NONE;
+ txd->msscof = 0;
+
+ /* finally flip the GEN bit of the SOP desc */
+ txd->gen = txq->cmd_ring.gen;
+ txq->shared->ctrl.txNumDeferred++;
+
+ /* move to the next2fill descriptor */
+ vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring);
+ nb_tx++;
+
+ } else {
+ PMD_TX_LOG(DEBUG, "No free tx cmd desc(s)");
+ txq->stats.drop_total += (nb_pkts - nb_tx);
+ break;
+ }
+ }
+
+ PMD_TX_LOG(DEBUG, "vmxnet3 txThreshold: %u", txq->shared->ctrl.txThreshold);
+
+ if (txq->shared->ctrl.txNumDeferred >= txq->shared->ctrl.txThreshold) {
+
+ txq->shared->ctrl.txNumDeferred = 0;
+ /* Notify vSwitch that packets are available. */
+ VMXNET3_WRITE_BAR0_REG(hw, (VMXNET3_REG_TXPROD + txq->queue_id * VMXNET3_REG_ALIGN),
+ txq->cmd_ring.next2fill);
+ }
+
+ return nb_tx;
+}
+
+/*
+ * Allocates mbufs and clusters. Post rx descriptors with buffer details
+ * so that device can receive packets in those buffers.
+ * Ring layout:
+ * Among the two rings, 1st ring contains buffers of type 0 and type1.
+ * bufs_per_pkt is set such that for non-LRO cases all the buffers required
+ * by a frame will fit in 1st ring (1st buf of type0 and rest of type1).
+ * 2nd ring contains buffers of type 1 alone. Second ring mostly be used
+ * only for LRO.
+ *
+ */
+static inline int
+vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
+{
+ int err = 0;
+ uint32_t i = 0, val = 0;
+ struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
+
+ if (ring_id == 0) {
+ /* Usually: One HEAD type buf per packet
+ * val = (ring->next2fill % rxq->hw->bufs_per_pkt) ?
+ * VMXNET3_RXD_BTYPE_BODY : VMXNET3_RXD_BTYPE_HEAD;
+ */
+
+ /* We use single packet buffer so all heads here */
+ val = VMXNET3_RXD_BTYPE_HEAD;
+ } else {
+ /* All BODY type buffers for 2nd ring */
+ val = VMXNET3_RXD_BTYPE_BODY;
+ }
+
+ while (vmxnet3_cmd_ring_desc_avail(ring) > 0) {
+ struct Vmxnet3_RxDesc *rxd;
+ struct rte_mbuf *mbuf;
+ vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
+
+ rxd = (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
+
+ /* Allocate blank mbuf for the current Rx Descriptor */
+ mbuf = rte_rxmbuf_alloc(rxq->mp);
+ if (unlikely(mbuf == NULL)) {
+ PMD_RX_LOG(ERR, "Error allocating mbuf in %s", __func__);
+ rxq->stats.rx_buf_alloc_failure++;
+ err = ENOMEM;
+ break;
+ }
+
+ /*
+ * Load mbuf pointer into buf_info[ring_size]
+ * buf_info structure is equivalent to cookie for virtio-virtqueue
+ */
+ buf_info->m = mbuf;
+ buf_info->len = (uint16_t)(mbuf->buf_len -
+ RTE_PKTMBUF_HEADROOM);
+ buf_info->bufPA = RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf);
+
+ /* Load Rx Descriptor with the buffer's GPA */
+ rxd->addr = buf_info->bufPA;
+
+ /* After this point rxd->addr MUST not be NULL */
+ rxd->btype = val;
+ rxd->len = buf_info->len;
+ /* Flip gen bit at the end to change ownership */
+ rxd->gen = ring->gen;
+
+ vmxnet3_cmd_ring_adv_next2fill(ring);
+ i++;
+ }
+
+ /* Return error only if no buffers are posted at present */
+ if (vmxnet3_cmd_ring_desc_avail(ring) >= (ring->size - 1))
+ return -err;
+ else
+ return i;
+}
+
+/*
+ * Process the Rx Completion Ring of given vmxnet3_rx_queue
+ * for nb_pkts burst and return the number of packets received
+ */
+uint16_t
+vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ uint16_t nb_rx;
+ uint32_t nb_rxd, idx;
+ uint8_t ring_idx;
+ vmxnet3_rx_queue_t *rxq;
+ Vmxnet3_RxCompDesc *rcd;
+ vmxnet3_buf_info_t *rbi;
+ Vmxnet3_RxDesc *rxd;
+ struct rte_mbuf *rxm = NULL;
+ struct vmxnet3_hw *hw;
+
+ nb_rx = 0;
+ ring_idx = 0;
+ nb_rxd = 0;
+ idx = 0;
+
+ rxq = rx_queue;
+ hw = rxq->hw;
+
+ rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
+
+ if (unlikely(rxq->stopped)) {
+ PMD_RX_LOG(DEBUG, "Rx queue is stopped.");
+ return 0;
+ }
+
+ while (rcd->gen == rxq->comp_ring.gen) {
+ if (nb_rx >= nb_pkts)
+ break;
+
+ idx = rcd->rxdIdx;
+ ring_idx = (uint8_t)((rcd->rqID == rxq->qid1) ? 0 : 1);
+ rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
+ rbi = rxq->cmd_ring[ring_idx].buf_info + idx;
+
+ if (unlikely(rcd->sop != 1 || rcd->eop != 1)) {
+ rte_pktmbuf_free_seg(rbi->m);
+ PMD_RX_LOG(DEBUG, "Packet spread across multiple buffers\n)");
+ goto rcd_done;
+ }
+
+ PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.", idx, ring_idx);
+
+#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
+ VMXNET3_ASSERT(rcd->len <= rxd->len);
+ VMXNET3_ASSERT(rbi->m);
+#endif
+ if (unlikely(rcd->len == 0)) {
+ PMD_RX_LOG(DEBUG, "Rx buf was skipped. rxring[%d][%d]\n)",
+ ring_idx, idx);
+#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
+ VMXNET3_ASSERT(rcd->sop && rcd->eop);
+#endif
+ rte_pktmbuf_free_seg(rbi->m);
+ goto rcd_done;
+ }
+
+ /* Assuming a packet is coming in a single packet buffer */
+ if (unlikely(rxd->btype != VMXNET3_RXD_BTYPE_HEAD)) {
+ PMD_RX_LOG(DEBUG,
+ "Alert : Misbehaving device, incorrect "
+ " buffer type used. iPacket dropped.");
+ rte_pktmbuf_free_seg(rbi->m);
+ goto rcd_done;
+ }
+#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
+ VMXNET3_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD);
+#endif
+ /* Get the packet buffer pointer from buf_info */
+ rxm = rbi->m;
+
+ /* Clear descriptor associated buf_info to be reused */
+ rbi->m = NULL;
+ rbi->bufPA = 0;
+
+ /* Update the index that we received a packet */
+ rxq->cmd_ring[ring_idx].next2comp = idx;
+
+ /* For RCD with EOP set, check if there is frame error */
+ if (unlikely(rcd->err)) {
+ rxq->stats.drop_total++;
+ rxq->stats.drop_err++;
+
+ if (!rcd->fcs) {
+ rxq->stats.drop_fcs++;
+ PMD_RX_LOG(ERR, "Recv packet dropped due to frame err.");
+ }
+ PMD_RX_LOG(ERR, "Error in received packet rcd#:%d rxd:%d",
+ (int)(rcd - (struct Vmxnet3_RxCompDesc *)
+ rxq->comp_ring.base), rcd->rxdIdx);
+ rte_pktmbuf_free_seg(rxm);
+ goto rcd_done;
+ }
+
+ /* Check for hardware stripped VLAN tag */
+ if (rcd->ts) {
+ PMD_RX_LOG(DEBUG, "Received packet with vlan ID: %d.",
+ rcd->tci);
+ rxm->ol_flags = PKT_RX_VLAN_PKT;
+ /* Copy vlan tag in packet buffer */
+ rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
+ } else {
+ rxm->ol_flags = 0;
+ rxm->vlan_tci = 0;
+ }
+
+ /* Initialize newly received packet buffer */
+ rxm->port = rxq->port_id;
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = (uint16_t)rcd->len;
+ rxm->data_len = (uint16_t)rcd->len;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+
+ /* Check packet type, checksum errors, etc. Only support IPv4 for now. */
+ if (rcd->v4) {
+ struct ether_hdr *eth = rte_pktmbuf_mtod(rxm, struct ether_hdr *);
+ struct ipv4_hdr *ip = (struct ipv4_hdr *)(eth + 1);
+
+ if (((ip->version_ihl & 0xf) << 2) > (int)sizeof(struct ipv4_hdr))
+ rxm->ol_flags |= PKT_RX_IPV4_HDR_EXT;
+ else
+ rxm->ol_flags |= PKT_RX_IPV4_HDR;
+
+ if (!rcd->cnc) {
+ if (!rcd->ipc)
+ rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+
+ if ((rcd->tcp || rcd->udp) && !rcd->tuc)
+ rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ }
+ }
+
+ rx_pkts[nb_rx++] = rxm;
+rcd_done:
+ rxq->cmd_ring[ring_idx].next2comp = idx;
+ VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp, rxq->cmd_ring[ring_idx].size);
+
+ /* It's time to allocate some new buf and renew descriptors */
+ vmxnet3_post_rx_bufs(rxq, ring_idx);
+ if (unlikely(rxq->shared->ctrl.updateRxProd)) {
+ VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] + (rxq->queue_id * VMXNET3_REG_ALIGN),
+ rxq->cmd_ring[ring_idx].next2fill);
+ }
+
+ /* Advance to the next descriptor in comp_ring */
+ vmxnet3_comp_ring_adv_next2proc(&rxq->comp_ring);
+
+ rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
+ nb_rxd++;
+ if (nb_rxd > rxq->cmd_ring[0].size) {
+ PMD_RX_LOG(ERR,
+ "Used up quota of receiving packets,"
+ " relinquish control.");
+ break;
+ }
+ }
+
+ return nb_rx;
+}
+
+/*
+ * Create memzone for device rings. malloc can't be used as the physical address is
+ * needed. If the memzone is already created, then this function returns a ptr
+ * to the old one.
+ */
+static const struct rte_memzone *
+ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
+ uint16_t queue_id, uint32_t ring_size, int socket_id)
+{
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+
+ snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
+ dev->driver->pci_drv.name, ring_name,
+ dev->data->port_id, queue_id);
+
+ mz = rte_memzone_lookup(z_name);
+ if (mz)
+ return mz;
+
+ return rte_memzone_reserve_aligned(z_name, ring_size,
+ socket_id, 0, VMXNET3_RING_BA_ALIGN);
+}
+
+int
+vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ __attribute__((unused)) const struct rte_eth_txconf *tx_conf)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ const struct rte_memzone *mz;
+ struct vmxnet3_tx_queue *txq;
+ struct vmxnet3_cmd_ring *ring;
+ struct vmxnet3_comp_ring *comp_ring;
+ struct vmxnet3_data_ring *data_ring;
+ int size;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS) !=
+ ETH_TXQ_FLAGS_NOMULTSEGS) {
+ PMD_INIT_LOG(ERR, "TX Multi segment not support yet");
+ return -EINVAL;
+ }
+
+ if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOOFFLOADS) !=
+ ETH_TXQ_FLAGS_NOOFFLOADS) {
+ PMD_INIT_LOG(ERR, "TX not support offload function yet");
+ return -EINVAL;
+ }
+
+ txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue), RTE_CACHE_LINE_SIZE);
+ if (txq == NULL) {
+ PMD_INIT_LOG(ERR, "Can not allocate tx queue structure");
+ return -ENOMEM;
+ }
+
+ txq->queue_id = queue_idx;
+ txq->port_id = dev->data->port_id;
+ txq->shared = &hw->tqd_start[queue_idx];
+ txq->hw = hw;
+ txq->qid = queue_idx;
+ txq->stopped = TRUE;
+
+ ring = &txq->cmd_ring;
+ comp_ring = &txq->comp_ring;
+ data_ring = &txq->data_ring;
+
+ /* Tx vmxnet ring length should be between 512-4096 */
+ if (nb_desc < VMXNET3_DEF_TX_RING_SIZE) {
+ PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Min: %u",
+ VMXNET3_DEF_TX_RING_SIZE);
+ return -EINVAL;
+ } else if (nb_desc > VMXNET3_TX_RING_MAX_SIZE) {
+ PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Max: %u",
+ VMXNET3_TX_RING_MAX_SIZE);
+ return -EINVAL;
+ } else {
+ ring->size = nb_desc;
+ ring->size &= ~VMXNET3_RING_SIZE_MASK;
+ }
+ comp_ring->size = data_ring->size = ring->size;
+
+ /* Tx vmxnet rings structure initialization*/
+ ring->next2fill = 0;
+ ring->next2comp = 0;
+ ring->gen = VMXNET3_INIT_GEN;
+ comp_ring->next2proc = 0;
+ comp_ring->gen = VMXNET3_INIT_GEN;
+
+ size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
+ size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
+ size += sizeof(struct Vmxnet3_TxDataDesc) * data_ring->size;
+
+ mz = ring_dma_zone_reserve(dev, "txdesc", queue_idx, size, socket_id);
+ if (mz == NULL) {
+ PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+
+ /* cmd_ring initialization */
+ ring->base = mz->addr;
+ ring->basePA = mz->phys_addr;
+
+ /* comp_ring initialization */
+ comp_ring->base = ring->base + ring->size;
+ comp_ring->basePA = ring->basePA +
+ (sizeof(struct Vmxnet3_TxDesc) * ring->size);
+
+ /* data_ring initialization */
+ data_ring->base = (Vmxnet3_TxDataDesc *)(comp_ring->base + comp_ring->size);
+ data_ring->basePA = comp_ring->basePA +
+ (sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size);
+
+ /* cmd_ring0 buf_info allocation */
+ ring->buf_info = rte_zmalloc("tx_ring_buf_info",
+ ring->size * sizeof(vmxnet3_buf_info_t), RTE_CACHE_LINE_SIZE);
+ if (ring->buf_info == NULL) {
+ PMD_INIT_LOG(ERR, "ERROR: Creating tx_buf_info structure");
+ return -ENOMEM;
+ }
+
+ /* Update the data portion with txq */
+ dev->data->tx_queues[queue_idx] = txq;
+
+ return 0;
+}
+
+int
+vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ __attribute__((unused)) const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ const struct rte_memzone *mz;
+ struct vmxnet3_rx_queue *rxq;
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ struct vmxnet3_cmd_ring *ring0, *ring1, *ring;
+ struct vmxnet3_comp_ring *comp_ring;
+ int size;
+ uint8_t i;
+ char mem_name[32];
+ uint16_t buf_size;
+ struct rte_pktmbuf_pool_private *mbp_priv;
+
+ PMD_INIT_FUNC_TRACE();
+
+ mbp_priv = (struct rte_pktmbuf_pool_private *)
+ rte_mempool_get_priv(mp);
+ buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
+ RTE_PKTMBUF_HEADROOM);
+
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size) {
+ PMD_INIT_LOG(ERR, "buf_size = %u, max_pkt_len = %u, "
+ "VMXNET3 don't support scatter packets yet",
+ buf_size, dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ return -EINVAL;
+ }
+
+ rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue), RTE_CACHE_LINE_SIZE);
+ if (rxq == NULL) {
+ PMD_INIT_LOG(ERR, "Can not allocate rx queue structure");
+ return -ENOMEM;
+ }
+
+ rxq->mp = mp;
+ rxq->queue_id = queue_idx;
+ rxq->port_id = dev->data->port_id;
+ rxq->shared = &hw->rqd_start[queue_idx];
+ rxq->hw = hw;
+ rxq->qid1 = queue_idx;
+ rxq->qid2 = queue_idx + hw->num_rx_queues;
+ rxq->stopped = TRUE;
+
+ ring0 = &rxq->cmd_ring[0];
+ ring1 = &rxq->cmd_ring[1];
+ comp_ring = &rxq->comp_ring;
+
+ /* Rx vmxnet rings length should be between 256-4096 */
+ if (nb_desc < VMXNET3_DEF_RX_RING_SIZE) {
+ PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Min: 256");
+ return -EINVAL;
+ } else if (nb_desc > VMXNET3_RX_RING_MAX_SIZE) {
+ PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Max: 4096");
+ return -EINVAL;
+ } else {
+ ring0->size = nb_desc;
+ ring0->size &= ~VMXNET3_RING_SIZE_MASK;
+ ring1->size = ring0->size;
+ }
+
+ comp_ring->size = ring0->size + ring1->size;
+
+ /* Rx vmxnet rings structure initialization */
+ ring0->next2fill = 0;
+ ring1->next2fill = 0;
+ ring0->next2comp = 0;
+ ring1->next2comp = 0;
+ ring0->gen = VMXNET3_INIT_GEN;
+ ring1->gen = VMXNET3_INIT_GEN;
+ comp_ring->next2proc = 0;
+ comp_ring->gen = VMXNET3_INIT_GEN;
+
+ size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
+ size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
+
+ mz = ring_dma_zone_reserve(dev, "rxdesc", queue_idx, size, socket_id);
+ if (mz == NULL) {
+ PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+
+ /* cmd_ring0 initialization */
+ ring0->base = mz->addr;
+ ring0->basePA = mz->phys_addr;
+
+ /* cmd_ring1 initialization */
+ ring1->base = ring0->base + ring0->size;
+ ring1->basePA = ring0->basePA + sizeof(struct Vmxnet3_RxDesc) * ring0->size;
+
+ /* comp_ring initialization */
+ comp_ring->base = ring1->base + ring1->size;
+ comp_ring->basePA = ring1->basePA + sizeof(struct Vmxnet3_RxDesc) *
+ ring1->size;
+
+ /* cmd_ring0-cmd_ring1 buf_info allocation */
+ for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++) {
+
+ ring = &rxq->cmd_ring[i];
+ ring->rid = i;
+ snprintf(mem_name, sizeof(mem_name), "rx_ring_%d_buf_info", i);
+
+ ring->buf_info = rte_zmalloc(mem_name, ring->size * sizeof(vmxnet3_buf_info_t), RTE_CACHE_LINE_SIZE);
+ if (ring->buf_info == NULL) {
+ PMD_INIT_LOG(ERR, "ERROR: Creating rx_buf_info structure");
+ return -ENOMEM;
+ }
+ }
+
+ /* Update the data portion with rxq */
+ dev->data->rx_queues[queue_idx] = rxq;
+
+ return 0;
+}
+
+/*
+ * Initializes Receive Unit
+ * Load mbufs in rx queue in advance
+ */
+int
+vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+
+ int i, ret;
+ uint8_t j;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < hw->num_rx_queues; i++) {
+ vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
+
+ for (j = 0; j < VMXNET3_RX_CMDRING_SIZE; j++) {
+ /* Passing 0 as alloc_num will allocate full ring */
+ ret = vmxnet3_post_rx_bufs(rxq, j);
+ if (ret <= 0) {
+ PMD_INIT_LOG(ERR, "ERROR: Posting Rxq: %d buffers ring: %d", i, j);
+ return -ret;
+ }
+ /* Updating device with the index:next2fill to fill the mbufs for coming packets */
+ if (unlikely(rxq->shared->ctrl.updateRxProd)) {
+ VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[j] + (rxq->queue_id * VMXNET3_REG_ALIGN),
+ rxq->cmd_ring[j].next2fill);
+ }
+ }
+ rxq->stopped = FALSE;
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
+
+ txq->stopped = FALSE;
+ }
+
+ return 0;
+}
+
+static uint8_t rss_intel_key[40] = {
+ 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+ 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+ 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+ 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+ 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
+};
+
+/*
+ * Configure RSS feature
+ */
+int
+vmxnet3_rss_configure(struct rte_eth_dev *dev)
+{
+#define VMXNET3_RSS_OFFLOAD_ALL ( \
+ ETH_RSS_IPV4 | \
+ ETH_RSS_IPV4_TCP | \
+ ETH_RSS_IPV6 | \
+ ETH_RSS_IPV6_TCP)
+
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ struct VMXNET3_RSSConf *dev_rss_conf;
+ struct rte_eth_rss_conf *port_rss_conf;
+ uint64_t rss_hf;
+ uint8_t i, j;
+
+ PMD_INIT_FUNC_TRACE();
+
+ dev_rss_conf = hw->rss_conf;
+ port_rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
+
+ /* loading hashFunc */
+ dev_rss_conf->hashFunc = VMXNET3_RSS_HASH_FUNC_TOEPLITZ;
+ /* loading hashKeySize */
+ dev_rss_conf->hashKeySize = VMXNET3_RSS_MAX_KEY_SIZE;
+ /* loading indTableSize : Must not exceed VMXNET3_RSS_MAX_IND_TABLE_SIZE (128)*/
+ dev_rss_conf->indTableSize = (uint16_t)(hw->num_rx_queues * 4);
+
+ if (port_rss_conf->rss_key == NULL) {
+ /* Default hash key */
+ port_rss_conf->rss_key = rss_intel_key;
+ }
+
+ /* loading hashKey */
+ memcpy(&dev_rss_conf->hashKey[0], port_rss_conf->rss_key, dev_rss_conf->hashKeySize);
+
+ /* loading indTable */
+ for (i = 0, j = 0; i < dev_rss_conf->indTableSize; i++, j++) {
+ if (j == dev->data->nb_rx_queues)
+ j = 0;
+ dev_rss_conf->indTable[i] = j;
+ }
+
+ /* loading hashType */
+ dev_rss_conf->hashType = 0;
+ rss_hf = port_rss_conf->rss_hf & VMXNET3_RSS_OFFLOAD_ALL;
+ if (rss_hf & ETH_RSS_IPV4)
+ dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV4;
+ if (rss_hf & ETH_RSS_IPV4_TCP)
+ dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV4;
+ if (rss_hf & ETH_RSS_IPV6)
+ dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV6;
+ if (rss_hf & ETH_RSS_IPV6_TCP)
+ dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV6;
+
+ return VMXNET3_SUCCESS;
+}
+
+/*
+ * Configure VLAN Filter feature
+ */
+int
+vmxnet3_vlan_configure(struct rte_eth_dev *dev)
+{
+ uint8_t i;
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Verify if this tag is already set */
+ for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
+ /* Filter all vlan tags out by default */
+ vf_table[i] = 0;
+ /* To-Do: Provide another routine in dev_ops for user config */
+
+ PMD_INIT_LOG(DEBUG, "Registering VLAN portid: %"PRIu8" tag %u",
+ dev->data->port_id, vf_table[i]);
+ }
+
+ return VMXNET3_SUCCESS;
+}
diff --git a/src/dpdk_lib18/librte_pmd_xenvirt/Makefile b/src/dpdk_lib18/librte_pmd_xenvirt/Makefile
new file mode 100755
index 00000000..01bfcaa9
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_xenvirt/Makefile
@@ -0,0 +1,58 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_xenvirt.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += rte_eth_xenvirt.c rte_mempool_gntalloc.c rte_xen_lib.c
+
+#
+# Export include files
+#
+SYMLINK-y-include += rte_eth_xenvirt.h
+
+# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += lib/librte_eal lib/librte_ether
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += lib/librte_mempool lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += lib/librte_net lib/librte_malloc
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += lib/librte_cmdline
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_pmd_xenvirt/rte_eth_xenvirt.c b/src/dpdk_lib18/librte_pmd_xenvirt/rte_eth_xenvirt.c
new file mode 100755
index 00000000..04e30c94
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_xenvirt/rte_eth_xenvirt.c
@@ -0,0 +1,716 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <errno.h>
+#include <sys/user.h>
+#include <linux/binfmts.h>
+#include <xen/xen-compat.h>
+#if __XEN_LATEST_INTERFACE_VERSION__ < 0x00040200
+#include <xs.h>
+#else
+#include <xenstore.h>
+#endif
+#include <linux/virtio_ring.h>
+
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_dev.h>
+#include <cmdline_parse.h>
+#include <cmdline_parse_etheraddr.h>
+
+#include "rte_xen_lib.h"
+#include "virtqueue.h"
+#include "rte_eth_xenvirt.h"
+
+#define VQ_DESC_NUM 256
+#define VIRTIO_MBUF_BURST_SZ 64
+
+/* virtio_idx is increased after new device is created.*/
+static int virtio_idx = 0;
+
+static const char *drivername = "xen dummy virtio PMD";
+
+static struct rte_eth_link pmd_link = {
+ .link_speed = 10000,
+ .link_duplex = ETH_LINK_FULL_DUPLEX,
+ .link_status = 0
+};
+
+static inline struct rte_mbuf *
+rte_rxmbuf_alloc(struct rte_mempool *mp)
+{
+ struct rte_mbuf *m;
+
+ m = __rte_mbuf_raw_alloc(mp);
+ __rte_mbuf_sanity_check_raw(m, 0);
+
+ return m;
+}
+
+
+static uint16_t
+eth_xenvirt_rx(void *q, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct virtqueue *rxvq = q;
+ struct rte_mbuf *rxm, *new_mbuf;
+ uint16_t nb_used, num;
+ uint32_t len[VIRTIO_MBUF_BURST_SZ];
+ uint32_t i;
+ struct pmd_internals *pi = rxvq->internals;
+
+ nb_used = VIRTQUEUE_NUSED(rxvq);
+
+ rte_compiler_barrier(); /* rmb */
+ num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
+ num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ);
+ if (unlikely(num == 0)) return 0;
+
+ num = virtqueue_dequeue_burst(rxvq, rx_pkts, len, num);
+ PMD_RX_LOG(DEBUG, "used:%d dequeue:%d\n", nb_used, num);
+ for (i = 0; i < num ; i ++) {
+ rxm = rx_pkts[i];
+ PMD_RX_LOG(DEBUG, "packet len:%d\n", len[i]);
+ rxm->next = NULL;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rxm->data_len = (uint16_t)(len[i] - sizeof(struct virtio_net_hdr));
+ rxm->nb_segs = 1;
+ rxm->port = pi->port_id;
+ rxm->pkt_len = (uint32_t)(len[i] - sizeof(struct virtio_net_hdr));
+ }
+ /* allocate new mbuf for the used descriptor */
+ while (likely(!virtqueue_full(rxvq))) {
+ new_mbuf = rte_rxmbuf_alloc(rxvq->mpool);
+ if (unlikely(new_mbuf == NULL)) {
+ break;
+ }
+ if (unlikely(virtqueue_enqueue_recv_refill(rxvq, new_mbuf))) {
+ rte_pktmbuf_free_seg(new_mbuf);
+ break;
+ }
+ }
+ pi->eth_stats.ipackets += num;
+ return num;
+}
+
+static uint16_t
+eth_xenvirt_tx(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct virtqueue *txvq = tx_queue;
+ struct rte_mbuf *txm;
+ uint16_t nb_used, nb_tx, num, i;
+ int error;
+ uint32_t len[VIRTIO_MBUF_BURST_SZ];
+ struct rte_mbuf *snd_pkts[VIRTIO_MBUF_BURST_SZ];
+ struct pmd_internals *pi = txvq->internals;
+
+ nb_tx = 0;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
+ nb_used = VIRTQUEUE_NUSED(txvq);
+
+ rte_compiler_barrier(); /* rmb */
+
+ num = (uint16_t)(likely(nb_used <= VIRTIO_MBUF_BURST_SZ) ? nb_used : VIRTIO_MBUF_BURST_SZ);
+ num = virtqueue_dequeue_burst(txvq, snd_pkts, len, num);
+
+ for (i = 0; i < num ; i ++) {
+ /* mergable not supported, one segment only */
+ rte_pktmbuf_free_seg(snd_pkts[i]);
+ }
+
+ while (nb_tx < nb_pkts) {
+ if (likely(!virtqueue_full(txvq))) {
+ /* TODO drop tx_pkts if it contains multiple segments */
+ txm = tx_pkts[nb_tx];
+ error = virtqueue_enqueue_xmit(txvq, txm);
+ if (unlikely(error)) {
+ if (error == ENOSPC)
+ PMD_TX_LOG(ERR, "virtqueue_enqueue Free count = 0\n");
+ else if (error == EMSGSIZE)
+ PMD_TX_LOG(ERR, "virtqueue_enqueue Free count < 1\n");
+ else
+ PMD_TX_LOG(ERR, "virtqueue_enqueue error: %d\n", error);
+ break;
+ }
+ nb_tx++;
+ } else {
+ PMD_TX_LOG(ERR, "No free tx descriptors to transmit\n");
+ /* virtqueue_notify not needed in our para-virt solution */
+ break;
+ }
+ }
+ pi->eth_stats.opackets += nb_tx;
+ return nb_tx;
+}
+
+static int
+eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
+{
+ RTE_LOG(ERR, PMD, "%s\n", __func__);
+ return 0;
+}
+
+/*
+ * Create a shared page between guest and host.
+ * Host monitors this page if it is cleared on unmap, and then
+ * do necessary clean up.
+ */
+static void
+gntalloc_vring_flag(int vtidx)
+{
+ char key_str[PATH_MAX];
+ char val_str[PATH_MAX];
+ uint32_t gref_tmp;
+ void *ptr;
+
+ if (grefwatch_from_alloc(&gref_tmp, &ptr)) {
+ RTE_LOG(ERR, PMD, "grefwatch_from_alloc error\n");
+ exit(0);
+ }
+
+ *(uint8_t *)ptr = MAP_FLAG;
+ snprintf(val_str, sizeof(val_str), "%u", gref_tmp);
+ snprintf(key_str, sizeof(key_str),
+ DPDK_XENSTORE_PATH"%d"VRING_FLAG_STR, vtidx);
+ xenstore_write(key_str, val_str);
+}
+
+/*
+ * Notify host this virtio device is started.
+ * Host could start polling this device.
+ */
+static void
+dev_start_notify(int vtidx)
+{
+ char key_str[PATH_MAX];
+ char val_str[PATH_MAX];
+
+ RTE_LOG(INFO, PMD, "%s: virtio %d is started\n", __func__, vtidx);
+ gntalloc_vring_flag(vtidx);
+
+ snprintf(key_str, sizeof(key_str), "%s%s%d",
+ DPDK_XENSTORE_PATH, EVENT_TYPE_START_STR,
+ vtidx);
+ snprintf(val_str, sizeof(val_str), "1");
+ xenstore_write(key_str, val_str);
+}
+
+/*
+ * Notify host this virtio device is stopped.
+ * Host could stop polling this device.
+ */
+static void
+dev_stop_notify(int vtidx)
+{
+ RTE_SET_USED(vtidx);
+}
+
+
+static int
+update_mac_address(struct ether_addr *mac_addrs, int vtidx)
+{
+ char key_str[PATH_MAX];
+ char val_str[PATH_MAX];
+ int rv;
+
+ if (mac_addrs == NULL) {
+ RTE_LOG(ERR, PMD, "%s: NULL pointer mac specified\n", __func__);
+ return -1;
+ }
+ rv = snprintf(key_str, sizeof(key_str),
+ DPDK_XENSTORE_PATH"%d_ether_addr", vtidx);
+ if (rv == -1)
+ return rv;
+ rv = snprintf(val_str, sizeof(val_str), "%02x:%02x:%02x:%02x:%02x:%02x",
+ mac_addrs->addr_bytes[0],
+ mac_addrs->addr_bytes[1],
+ mac_addrs->addr_bytes[2],
+ mac_addrs->addr_bytes[3],
+ mac_addrs->addr_bytes[4],
+ mac_addrs->addr_bytes[5]);
+ if (rv == -1)
+ return rv;
+ if (xenstore_write(key_str, val_str))
+ return rv;
+ return 0;
+}
+
+
+static int
+eth_dev_start(struct rte_eth_dev *dev)
+{
+ struct virtqueue *rxvq = dev->data->rx_queues[0];
+ struct virtqueue *txvq = dev->data->tx_queues[0];
+ struct rte_mbuf *m;
+ struct pmd_internals *pi = (struct pmd_internals *)dev->data->dev_private;
+ int rv;
+
+ dev->data->dev_link.link_status = 1;
+ while (!virtqueue_full(rxvq)) {
+ m = rte_rxmbuf_alloc(rxvq->mpool);
+ if (m == NULL)
+ break;
+ /* Enqueue allocated buffers. */
+ if (virtqueue_enqueue_recv_refill(rxvq, m)) {
+ rte_pktmbuf_free_seg(m);
+ break;
+ }
+ }
+
+ rxvq->internals = pi;
+ txvq->internals = pi;
+
+ rv = update_mac_address(dev->data->mac_addrs, pi->virtio_idx);
+ if (rv)
+ return -1;
+ dev_start_notify(pi->virtio_idx);
+
+ return 0;
+}
+
+static void
+eth_dev_stop(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *pi = (struct pmd_internals *)dev->data->dev_private;
+
+ dev->data->dev_link.link_status = 0;
+ dev_stop_notify(pi->virtio_idx);
+}
+
+/*
+ * Notify host this virtio device is closed.
+ * Host could do necessary clean up to this device.
+ */
+static void
+eth_dev_close(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+}
+
+static void
+eth_dev_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+
+ RTE_SET_USED(internals);
+ dev_info->driver_name = drivername;
+ dev_info->max_mac_addrs = 1;
+ dev_info->max_rx_pktlen = (uint32_t)2048;
+ dev_info->max_rx_queues = (uint16_t)1;
+ dev_info->max_tx_queues = (uint16_t)1;
+ dev_info->min_rx_bufsize = 0;
+ dev_info->pci_dev = NULL;
+}
+
+static void
+eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ if(stats)
+ rte_memcpy(stats, &internals->eth_stats, sizeof(*stats));
+}
+
+static void
+eth_stats_reset(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ /* Reset software totals */
+ memset(&internals->eth_stats, 0, sizeof(internals->eth_stats));
+}
+
+static void
+eth_queue_release(void *q __rte_unused)
+{
+}
+
+static int
+eth_link_update(struct rte_eth_dev *dev __rte_unused,
+ int wait_to_complete __rte_unused)
+{
+ return 0;
+}
+
+/*
+ * Create shared vring between guest and host.
+ * Memory is allocated through grant alloc driver, so it is not physical continuous.
+ */
+static void *
+gntalloc_vring_create(int queue_type, uint32_t size, int vtidx)
+{
+ char key_str[PATH_MAX] = {0};
+ char val_str[PATH_MAX] = {0};
+ void *va = NULL;
+ int pg_size;
+ uint32_t pg_num;
+ uint32_t *gref_arr = NULL;
+ phys_addr_t *pa_arr = NULL;
+ uint64_t start_index;
+ int rv;
+
+ pg_size = getpagesize();
+ size = RTE_ALIGN_CEIL(size, pg_size);
+ pg_num = size / pg_size;
+
+ gref_arr = calloc(pg_num, sizeof(gref_arr[0]));
+ pa_arr = calloc(pg_num, sizeof(pa_arr[0]));
+
+ if (gref_arr == NULL || pa_arr == NULL) {
+ RTE_LOG(ERR, PMD, "%s: calloc failed\n", __func__);
+ goto out;
+ }
+
+ va = gntalloc(size, gref_arr, &start_index);
+ if (va == NULL) {
+ RTE_LOG(ERR, PMD, "%s: gntalloc failed\n", __func__);
+ goto out;
+ }
+
+ if (get_phys_map(va, pa_arr, pg_num, pg_size))
+ goto out;
+
+ /* write in xenstore gref and pfn for each page of vring */
+ if (grant_node_create(pg_num, gref_arr, pa_arr, val_str, sizeof(val_str))) {
+ gntfree(va, size, start_index);
+ va = NULL;
+ goto out;
+ }
+
+ if (queue_type == VTNET_RQ)
+ rv = snprintf(key_str, sizeof(key_str), DPDK_XENSTORE_PATH"%d"RXVRING_XENSTORE_STR, vtidx);
+ else
+ rv = snprintf(key_str, sizeof(key_str), DPDK_XENSTORE_PATH"%d"TXVRING_XENSTORE_STR, vtidx);
+ if (rv == -1 || xenstore_write(key_str, val_str) == -1) {
+ gntfree(va, size, start_index);
+ va = NULL;
+ }
+out:
+ if (pa_arr)
+ free(pa_arr);
+ if (gref_arr)
+ free(gref_arr);
+
+ return va;
+}
+
+
+
+static struct virtqueue *
+virtio_queue_setup(struct rte_eth_dev *dev, int queue_type)
+{
+ struct virtqueue *vq = NULL;
+ uint16_t vq_size = VQ_DESC_NUM;
+ int i = 0;
+ char vq_name[VIRTQUEUE_MAX_NAME_SZ];
+ size_t size;
+ struct vring *vr;
+
+ /* Allocate memory for virtqueue. */
+ if (queue_type == VTNET_RQ) {
+ snprintf(vq_name, sizeof(vq_name), "port%d_rvq",
+ dev->data->port_id);
+ vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
+ vq_size * sizeof(struct vq_desc_extra), RTE_CACHE_LINE_SIZE);
+ if (vq == NULL) {
+ RTE_LOG(ERR, PMD, "%s: unabled to allocate virtqueue\n", __func__);
+ return NULL;
+ }
+ memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name));
+ } else if(queue_type == VTNET_TQ) {
+ snprintf(vq_name, sizeof(vq_name), "port%d_tvq",
+ dev->data->port_id);
+ vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
+ vq_size * sizeof(struct vq_desc_extra), RTE_CACHE_LINE_SIZE);
+ if (vq == NULL) {
+ RTE_LOG(ERR, PMD, "%s: unabled to allocate virtqueue\n", __func__);
+ return NULL;
+ }
+ memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name));
+ }
+
+ memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name));
+
+ vq->vq_alignment = VIRTIO_PCI_VRING_ALIGN;
+ vq->vq_nentries = vq_size;
+ vq->vq_free_cnt = vq_size;
+ /* Calcuate vring size according to virtio spec */
+ size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
+ vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
+ /* Allocate memory for virtio vring through gntalloc driver*/
+ vq->vq_ring_virt_mem = gntalloc_vring_create(queue_type, vq->vq_ring_size,
+ ((struct pmd_internals *)dev->data->dev_private)->virtio_idx);
+ memset(vq->vq_ring_virt_mem, 0, vq->vq_ring_size);
+ vr = &vq->vq_ring;
+ vring_init(vr, vq_size, vq->vq_ring_virt_mem, vq->vq_alignment);
+ /*
+ * Locally maintained last consumed index, this idex trails
+ * vq_ring.used->idx.
+ */
+ vq->vq_used_cons_idx = 0;
+ vq->vq_desc_head_idx = 0;
+ vq->vq_free_cnt = vq->vq_nentries;
+ memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
+
+ /* Chain all the descriptors in the ring with an END */
+ for (i = 0; i < vq_size - 1; i++)
+ vr->desc[i].next = (uint16_t)(i + 1);
+ vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
+
+ return vq;
+}
+
+static int
+eth_rx_queue_setup(struct rte_eth_dev *dev,uint16_t rx_queue_id,
+ uint16_t nb_rx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mb_pool)
+{
+ struct virtqueue *vq;
+ vq = dev->data->rx_queues[rx_queue_id] = virtio_queue_setup(dev, VTNET_RQ);
+ vq->mpool = mb_pool;
+ return 0;
+}
+
+static int
+eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ dev->data->tx_queues[tx_queue_id] = virtio_queue_setup(dev, VTNET_TQ);
+ return 0;
+}
+
+
+
+static struct eth_dev_ops ops = {
+ .dev_start = eth_dev_start,
+ .dev_stop = eth_dev_stop,
+ .dev_close = eth_dev_close,
+ .dev_configure = eth_dev_configure,
+ .dev_infos_get = eth_dev_info,
+ .rx_queue_setup = eth_rx_queue_setup,
+ .tx_queue_setup = eth_tx_queue_setup,
+ .rx_queue_release = eth_queue_release,
+ .tx_queue_release = eth_queue_release,
+ .link_update = eth_link_update,
+ .stats_get = eth_stats_get,
+ .stats_reset = eth_stats_reset,
+};
+
+
+static int
+rte_eth_xenvirt_parse_args(struct xenvirt_dict *dict,
+ const char *name, const char *params)
+{
+ int i;
+ char *pairs[RTE_ETH_XENVIRT_MAX_ARGS];
+ int num_of_pairs;
+ char *pair[2];
+ char *args;
+ int ret = -1;
+
+ if (params == NULL)
+ return 0;
+
+ args = rte_zmalloc(NULL, strlen(params) + 1, RTE_CACHE_LINE_SIZE);
+ if (args == NULL) {
+ RTE_LOG(ERR, PMD, "Couldn't parse %s device \n", name);
+ return -1;
+ }
+ rte_memcpy(args, params, strlen(params));
+
+ num_of_pairs = rte_strsplit(args, strnlen(args, MAX_ARG_STRLEN),
+ pairs,
+ RTE_ETH_XENVIRT_MAX_ARGS ,
+ RTE_ETH_XENVIRT_PAIRS_DELIM);
+
+ for (i = 0; i < num_of_pairs; i++) {
+ pair[0] = NULL;
+ pair[1] = NULL;
+ rte_strsplit(pairs[i], strnlen(pairs[i], MAX_ARG_STRLEN),
+ pair, 2,
+ RTE_ETH_XENVIRT_KEY_VALUE_DELIM);
+
+ if (pair[0] == NULL || pair[1] == NULL || pair[0][0] == 0
+ || pair[1][0] == 0) {
+ RTE_LOG(ERR, PMD,
+ "Couldn't parse %s device,"
+ "wrong key or value \n", name);
+ goto err;
+ }
+
+ if (!strncmp(pair[0], RTE_ETH_XENVIRT_MAC_PARAM,
+ sizeof(RTE_ETH_XENVIRT_MAC_PARAM))) {
+ if (cmdline_parse_etheraddr(NULL,
+ pair[1],
+ &dict->addr,
+ sizeof(dict->addr)) < 0) {
+ RTE_LOG(ERR, PMD,
+ "Invalid %s device ether address\n",
+ name);
+ goto err;
+ }
+
+ dict->addr_valid = 1;
+ }
+ }
+
+ ret = 0;
+err:
+ rte_free(args);
+ return ret;
+}
+
+enum dev_action {
+ DEV_CREATE,
+ DEV_ATTACH
+};
+
+
+static int
+eth_dev_xenvirt_create(const char *name, const char *params,
+ const unsigned numa_node,
+ enum dev_action action)
+{
+ struct rte_eth_dev_data *data = NULL;
+ struct rte_pci_device *pci_dev = NULL;
+ struct pmd_internals *internals = NULL;
+ struct rte_eth_dev *eth_dev = NULL;
+ struct xenvirt_dict dict;
+ bzero(&dict, sizeof(struct xenvirt_dict));
+
+ RTE_LOG(INFO, PMD, "Creating virtio rings backed ethdev on numa socket %u\n",
+ numa_node);
+ RTE_SET_USED(action);
+
+ if (rte_eth_xenvirt_parse_args(&dict, name, params) < 0) {
+ RTE_LOG(ERR, PMD, "%s: Failed to parse ethdev parameters\n", __func__);
+ return -1;
+ }
+
+ /* now do all data allocation - for eth_dev structure, dummy pci driver
+ * and internal (private) data
+ */
+ data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
+ if (data == NULL)
+ goto err;
+
+ pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, numa_node);
+ if (pci_dev == NULL)
+ goto err;
+
+ internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
+ if (internals == NULL)
+ goto err;
+
+ /* reserve an ethdev entry */
+ eth_dev = rte_eth_dev_allocate(name);
+ if (eth_dev == NULL)
+ goto err;
+
+ pci_dev->numa_node = numa_node;
+
+ data->dev_private = internals;
+ data->port_id = eth_dev->data->port_id;
+ data->nb_rx_queues = (uint16_t)1;
+ data->nb_tx_queues = (uint16_t)1;
+ data->dev_link = pmd_link;
+ data->mac_addrs = rte_zmalloc("xen_virtio", ETHER_ADDR_LEN, 0);
+
+ if(dict.addr_valid)
+ memcpy(&data->mac_addrs->addr_bytes, &dict.addr, sizeof(struct ether_addr));
+ else
+ eth_random_addr(&data->mac_addrs->addr_bytes[0]);
+
+ eth_dev->data = data;
+ eth_dev->dev_ops = &ops;
+ eth_dev->pci_dev = pci_dev;
+
+ eth_dev->rx_pkt_burst = eth_xenvirt_rx;
+ eth_dev->tx_pkt_burst = eth_xenvirt_tx;
+
+ internals->virtio_idx = virtio_idx++;
+ internals->port_id = eth_dev->data->port_id;
+
+ return 0;
+
+err:
+ if (data)
+ rte_free(data);
+ if (pci_dev)
+ rte_free(pci_dev);
+ if (internals)
+ rte_free(internals);
+ return -1;
+}
+
+
+/*TODO: Support multiple process model */
+static int
+rte_pmd_xenvirt_devinit(const char *name, const char *params)
+{
+ if (virtio_idx == 0) {
+ if (xenstore_init() != 0) {
+ RTE_LOG(ERR, PMD, "%s: xenstore init failed\n", __func__);
+ return -1;
+ }
+ if (gntalloc_open() != 0) {
+ RTE_LOG(ERR, PMD, "%s: grant init failed\n", __func__);
+ return -1;
+ }
+ }
+ eth_dev_xenvirt_create(name, params, rte_socket_id(), DEV_CREATE);
+ return 0;
+}
+
+static struct rte_driver pmd_xenvirt_drv = {
+ .name = "eth_xenvirt",
+ .type = PMD_VDEV,
+ .init = rte_pmd_xenvirt_devinit,
+};
+
+PMD_REGISTER_DRIVER(pmd_xenvirt_drv);
diff --git a/src/dpdk_lib18/librte_pmd_xenvirt/rte_eth_xenvirt.h b/src/dpdk_lib18/librte_pmd_xenvirt/rte_eth_xenvirt.h
new file mode 100755
index 00000000..fc15a636
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_xenvirt/rte_eth_xenvirt.h
@@ -0,0 +1,62 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ETH_XENVIRT_H_
+#define _RTE_ETH_XENVIRT_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_mempool.h>
+#include <rte_ring.h>
+
+/**
+ * Creates mempool for xen virtio PMD.
+ * This function uses memzone_reserve to allocate memory for meta data,
+ * and uses grant alloc driver to allocate memory for data area.
+ * The input parameters are exactly the same as rte_mempool_create.
+ */
+struct rte_mempool *
+rte_mempool_gntalloc_create(const char *name, unsigned elt_num, unsigned elt_size,
+ unsigned cache_size, unsigned private_data_size,
+ rte_mempool_ctor_t *mp_init, void *mp_init_arg,
+ rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
+ int socket_id, unsigned flags);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_pmd_xenvirt/rte_mempool_gntalloc.c b/src/dpdk_lib18/librte_pmd_xenvirt/rte_mempool_gntalloc.c
new file mode 100755
index 00000000..3a650e8d
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_xenvirt/rte_mempool_gntalloc.c
@@ -0,0 +1,298 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <sys/ioctl.h>
+#include <string.h>
+#include <xen/sys/gntalloc.h>
+
+#include <rte_common.h>
+#include <rte_mempool.h>
+#include <rte_memory.h>
+#include <rte_errno.h>
+
+#include "rte_xen_lib.h"
+#include "rte_eth_xenvirt.h"
+
+struct _gntarr {
+ uint32_t gref;
+ phys_addr_t pa;
+ uint64_t index;
+ void *va;
+};
+
+struct _mempool_gntalloc_info {
+ struct rte_mempool *mp;
+ uint32_t pg_num;
+ uint32_t *gref_arr;
+ phys_addr_t *pa_arr;
+ void *va;
+ uint32_t mempool_idx;
+ uint64_t start_index;
+};
+
+
+static rte_atomic32_t global_xenvirt_mempool_idx = RTE_ATOMIC32_INIT(-1);
+
+static int
+compare(const void *p1, const void *p2)
+{
+ return ((const struct _gntarr *)p1)->pa - ((const struct _gntarr *)p2)->pa;
+}
+
+
+static struct _mempool_gntalloc_info
+_create_mempool(const char *name, unsigned elt_num, unsigned elt_size,
+ unsigned cache_size, unsigned private_data_size,
+ rte_mempool_ctor_t *mp_init, void *mp_init_arg,
+ rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
+ int socket_id, unsigned flags)
+{
+ struct _mempool_gntalloc_info mgi;
+ struct rte_mempool *mp = NULL;
+ struct rte_mempool_objsz objsz;
+ uint32_t pg_num, rpg_num, pg_shift, pg_sz;
+ char *va, *orig_va, *uv; /* uv: from which, the pages could be freed */
+ ssize_t sz, usz; /* usz: unused size */
+ /*
+ * for each page allocated through xen_gntalloc driver,
+ * gref_arr:stores grant references,
+ * pa_arr: stores physical address,
+ * gnt_arr: stores all meta dat
+ */
+ uint32_t *gref_arr = NULL;
+ phys_addr_t *pa_arr = NULL;
+ struct _gntarr *gnt_arr = NULL;
+ /* start index of the grant referances, used for dealloc*/
+ uint64_t start_index;
+ uint32_t i, j;
+ int rv = 0;
+ struct ioctl_gntalloc_dealloc_gref arg;
+
+ mgi.mp = NULL;
+ va = orig_va = uv = NULL;
+ pg_num = rpg_num = 0;
+ sz = 0;
+
+ pg_sz = getpagesize();
+ if (rte_is_power_of_2(pg_sz) == 0) {
+ goto out;
+ }
+ pg_shift = rte_bsf32(pg_sz);
+
+ rte_mempool_calc_obj_size(elt_size, flags, &objsz);
+ sz = rte_mempool_xmem_size(elt_num, objsz.total_size, pg_shift);
+ pg_num = sz >> pg_shift;
+
+ pa_arr = calloc(pg_num, sizeof(pa_arr[0]));
+ gref_arr = calloc(pg_num, sizeof(gref_arr[0]));
+ gnt_arr = calloc(pg_num, sizeof(gnt_arr[0]));
+ if ((gnt_arr == NULL) || (gref_arr == NULL) || (pa_arr == NULL))
+ goto out;
+
+ /* grant index is continuous in ascending order */
+ orig_va = gntalloc(sz, gref_arr, &start_index);
+ if (orig_va == NULL)
+ goto out;
+
+ get_phys_map(orig_va, pa_arr, pg_num, pg_sz);
+ for (i = 0; i < pg_num; i++) {
+ gnt_arr[i].index = start_index + i * pg_sz;
+ gnt_arr[i].gref = gref_arr[i];
+ gnt_arr[i].pa = pa_arr[i];
+ gnt_arr[i].va = RTE_PTR_ADD(orig_va, i * pg_sz);
+ }
+ qsort(gnt_arr, pg_num, sizeof(struct _gntarr), compare);
+
+ va = get_xen_virtual(sz, pg_sz);
+ if (va == NULL) {
+ goto out;
+ }
+
+ /*
+ * map one by one, as index isn't continuous now.
+ * pg_num VMAs, doesn't linux has a limitation on this?
+ */
+ for (i = 0; i < pg_num; i++) {
+ /* update gref_arr and pa_arr after sort */
+ gref_arr[i] = gnt_arr[i].gref;
+ pa_arr[i] = gnt_arr[i].pa;
+ gnt_arr[i].va = mmap(va + i * pg_sz, pg_sz, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_FIXED, gntalloc_fd, gnt_arr[i].index);
+ if ((gnt_arr[i].va == MAP_FAILED) || (gnt_arr[i].va != (va + i * pg_sz))) {
+ RTE_LOG(ERR, PMD, "failed to map %d pages\n", i);
+ goto mmap_failed;
+ }
+ }
+
+ /*
+ * Check that allocated size is big enough to hold elt_num
+ * objects and a calcualte how many bytes are actually required.
+ */
+ usz = rte_mempool_xmem_usage(va, elt_num, objsz.total_size, pa_arr, pg_num, pg_shift);
+ if (usz < 0) {
+ mp = NULL;
+ i = pg_num;
+ goto mmap_failed;
+ } else {
+ /* unmap unused pages if any */
+ uv = RTE_PTR_ADD(va, usz);
+ if ((usz = va + sz - uv) > 0) {
+
+ RTE_LOG(ERR, PMD,
+ "%s(%s): unmap unused %zu of %zu "
+ "mmaped bytes @%p orig:%p\n",
+ __func__, name, usz, sz, uv, va);
+ munmap(uv, usz);
+ i = (sz - usz) / pg_sz;
+ for (; i < pg_num; i++) {
+ arg.count = 1;
+ arg.index = gnt_arr[i].index;
+ rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, &arg);
+ if (rv) {
+ /* shouldn't fail here */
+ RTE_LOG(ERR, PMD, "va=%p pa=%p index=%p %s\n",
+ gnt_arr[i].va,
+ (void *)gnt_arr[i].pa,
+ (void *)arg.index, strerror(errno));
+ rte_panic("gntdealloc failed when freeing pages\n");
+ }
+ }
+
+ rpg_num = (sz - usz) >> pg_shift;
+ } else
+ rpg_num = pg_num;
+
+ mp = rte_mempool_xmem_create(name, elt_num, elt_size,
+ cache_size, private_data_size,
+ mp_init, mp_init_arg,
+ obj_init, obj_init_arg,
+ socket_id, flags, va, pa_arr, rpg_num, pg_shift);
+
+ RTE_VERIFY(elt_num == mp->size);
+ }
+ mgi.mp = mp;
+ mgi.pg_num = rpg_num;
+ mgi.gref_arr = gref_arr;
+ mgi.pa_arr = pa_arr;
+ if (mp)
+ mgi.mempool_idx = rte_atomic32_add_return(&global_xenvirt_mempool_idx, 1);
+ mgi.start_index = start_index;
+ mgi.va = va;
+
+ if (mp == NULL) {
+ i = pg_num;
+ goto mmap_failed;
+ }
+
+/*
+ * unmap only, without deallocate grant reference.
+ * unused pages have already been unmaped,
+ * unmap twice will fail, but it is safe.
+ */
+mmap_failed:
+ for (j = 0; j < i; j++) {
+ if (gnt_arr[i].va)
+ munmap(gnt_arr[i].va, pg_sz);
+ }
+out:
+ if (gnt_arr)
+ free(gnt_arr);
+ if (orig_va)
+ munmap(orig_va, sz);
+ if (mp == NULL) {
+ if (gref_arr)
+ free(gref_arr);
+ if (pa_arr)
+ free(pa_arr);
+
+ /* some gref has already been de-allocated from the list in the driver,
+ * so dealloc one by one, and it is safe to deallocate twice
+ */
+ if (orig_va) {
+ for (i = 0; i < pg_num; i++) {
+ arg.index = start_index + i * pg_sz;
+ rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, arg);
+ }
+ }
+ }
+ return mgi;
+}
+
+struct rte_mempool *
+rte_mempool_gntalloc_create(const char *name, unsigned elt_num, unsigned elt_size,
+ unsigned cache_size, unsigned private_data_size,
+ rte_mempool_ctor_t *mp_init, void *mp_init_arg,
+ rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
+ int socket_id, unsigned flags)
+{
+ int rv;
+ uint32_t i;
+ struct _mempool_gntalloc_info mgi;
+ struct ioctl_gntalloc_dealloc_gref arg;
+ int pg_sz = getpagesize();
+
+ mgi = _create_mempool(name, elt_num, elt_size,
+ cache_size, private_data_size,
+ mp_init, mp_init_arg,
+ obj_init, obj_init_arg,
+ socket_id, flags);
+ if (mgi.mp) {
+ rv = grant_gntalloc_mbuf_pool(mgi.mp,
+ mgi.pg_num,
+ mgi.gref_arr,
+ mgi.pa_arr,
+ mgi.mempool_idx);
+ free(mgi.gref_arr);
+ free(mgi.pa_arr);
+ if (rv == 0)
+ return mgi.mp;
+ /*
+ * in _create_mempool, unused pages have already been unmapped, deallocagted
+ * unmap and dealloc the remained ones here.
+ */
+ munmap(mgi.va, pg_sz * mgi.pg_num);
+ for (i = 0; i < mgi.pg_num; i++) {
+ arg.index = mgi.start_index + i * pg_sz;
+ rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, arg);
+ }
+ return NULL;
+ }
+ return NULL;
+
+
+
+}
diff --git a/src/dpdk_lib18/librte_pmd_xenvirt/rte_xen_lib.c b/src/dpdk_lib18/librte_pmd_xenvirt/rte_xen_lib.c
new file mode 100755
index 00000000..b3932f0e
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_xenvirt/rte_xen_lib.c
@@ -0,0 +1,428 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <sys/types.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/ioctl.h>
+#include <xen/xen-compat.h>
+#if __XEN_LATEST_INTERFACE_VERSION__ < 0x00040200
+#include <xs.h>
+#else
+#include <xenstore.h>
+#endif
+#include <xen/sys/gntalloc.h>
+
+#include <rte_common.h>
+#include <rte_string_fns.h>
+
+#include "rte_xen_lib.h"
+
+/*
+ * The grant node format in xenstore for vring/mpool is:
+ * 0_rx_vring_gref = "gref1#, gref2#, gref3#"
+ * 0_mempool_gref = "gref1#, gref2#, gref3#"
+ * each gref# is a grant reference for a shared page.
+ * In each shared page, we store the grant_node_item items.
+ */
+struct grant_node_item {
+ uint32_t gref;
+ uint32_t pfn;
+} __attribute__((packed));
+
+/* fd for xen_gntalloc driver, used to allocate grant pages*/
+int gntalloc_fd = -1;
+
+/* xenstore path for local domain, now it is '/local/domain/domid/' */
+static char *dompath = NULL;
+/* handle to xenstore read/write operations */
+static struct xs_handle *xs = NULL;
+
+/*
+ * Reserve a virtual address space.
+ * On success, returns the pointer. On failure, returns NULL.
+ */
+void *
+get_xen_virtual(size_t size, size_t page_sz)
+{
+ void *addr;
+ uintptr_t aligned_addr;
+
+ addr = mmap(NULL, size + page_sz, PROT_READ, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ if (addr == MAP_FAILED) {
+ RTE_LOG(ERR, PMD, "failed get a virtual area\n");
+ return NULL;
+ }
+
+ aligned_addr = RTE_ALIGN_CEIL((uintptr_t)addr, page_sz);
+ addr = (void *)(aligned_addr);
+
+ return addr;
+}
+
+/*
+ * Get the physical address for virtual memory starting at va.
+ */
+int
+get_phys_map(void *va, phys_addr_t pa[], uint32_t pg_num, uint32_t pg_sz)
+{
+ int32_t fd, rc = 0;
+ uint32_t i, nb;
+ off_t ofs;
+
+ ofs = (uintptr_t)va / pg_sz * sizeof(*pa);
+ nb = pg_num * sizeof(*pa);
+
+ if ((fd = open(PAGEMAP_FNAME, O_RDONLY)) < 0 ||
+ (rc = pread(fd, pa, nb, ofs)) < 0 ||
+ (rc -= nb) != 0) {
+ RTE_LOG(ERR, PMD, "%s: failed read of %u bytes from \'%s\' "
+ "at offset %zu, error code: %d\n",
+ __func__, nb, PAGEMAP_FNAME, ofs, errno);
+ rc = ENOENT;
+ }
+
+ close(fd);
+ for (i = 0; i != pg_num; i++)
+ pa[i] = (pa[i] & PAGEMAP_PFN_MASK) * pg_sz;
+
+ return rc;
+}
+
+int
+gntalloc_open(void)
+{
+ gntalloc_fd = open(XEN_GNTALLOC_FNAME, O_RDWR);
+ return (gntalloc_fd != -1) ? 0 : -1;
+}
+
+void
+gntalloc_close(void)
+{
+ if (gntalloc_fd != -1)
+ close(gntalloc_fd);
+ gntalloc_fd = -1;
+}
+
+void *
+gntalloc(size_t size, uint32_t *gref, uint64_t *start_index)
+{
+ int page_size = getpagesize();
+ uint32_t i, pg_num;
+ void *va;
+ int rv;
+ struct ioctl_gntalloc_alloc_gref *arg;
+ struct ioctl_gntalloc_dealloc_gref arg_d;
+
+ if (size % page_size) {
+ RTE_LOG(ERR, PMD, "%s: %zu isn't multiple of page size\n",
+ __func__, size);
+ return NULL;
+ }
+
+ pg_num = size / page_size;
+ arg = malloc(sizeof(*arg) + (pg_num - 1) * sizeof(uint32_t));
+ if (arg == NULL)
+ return NULL;
+ arg->domid = DOM0_DOMID;
+ arg->flags = GNTALLOC_FLAG_WRITABLE;
+ arg->count = pg_num;
+
+ rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_ALLOC_GREF, arg);
+ if (rv) {
+ RTE_LOG(ERR, PMD, "%s: ioctl error\n", __func__);
+ free(arg);
+ return NULL;
+ }
+
+ va = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, gntalloc_fd, arg->index);
+ if (va == MAP_FAILED) {
+ RTE_LOG(ERR, PMD, "%s: mmap failed\n", __func__);
+ arg_d.count = pg_num;
+ arg_d.index = arg->index;
+ ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, arg_d);
+ free(arg);
+ return NULL;
+ }
+
+ if (gref) {
+ for (i = 0; i < pg_num; i++) {
+ gref[i] = arg->gref_ids[i];
+ }
+ }
+ if (start_index)
+ *start_index = arg->index;
+
+ free(arg);
+
+ return va;
+}
+
+int
+grefwatch_from_alloc(uint32_t *gref, void **pptr)
+{
+ int rv;
+ void *ptr;
+ int pg_size = getpagesize();
+ struct ioctl_gntalloc_alloc_gref arg = {
+ .domid = DOM0_DOMID,
+ .flags = GNTALLOC_FLAG_WRITABLE,
+ .count = 1
+ };
+ struct ioctl_gntalloc_dealloc_gref arg_d;
+ struct ioctl_gntalloc_unmap_notify notify = {
+ .action = UNMAP_NOTIFY_CLEAR_BYTE
+ };
+
+ rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_ALLOC_GREF, &arg);
+ if (rv) {
+ RTE_LOG(ERR, PMD, "%s: ioctl error\n", __func__);
+ return -1;
+ }
+
+ ptr = (void *)mmap(NULL, pg_size, PROT_READ|PROT_WRITE, MAP_SHARED, gntalloc_fd, arg.index);
+ arg_d.index = arg.index;
+ arg_d.count = 1;
+ if (ptr == MAP_FAILED) {
+ RTE_LOG(ERR, PMD, "%s: mmap failed\n", __func__);
+ ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, &arg_d);
+ return -1;
+ }
+ if (pptr)
+ *pptr = ptr;
+ if (gref)
+ *gref = arg.gref_ids[0];
+
+ notify.index = arg.index;
+ rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_SET_UNMAP_NOTIFY, &notify);
+ if (rv) {
+ RTE_LOG(ERR, PMD, "%s: unmap notify failed\n", __func__);
+ munmap(ptr, pg_size);
+ ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, &arg_d);
+ return -1;
+ }
+
+ return 0;
+}
+
+void
+gntfree(void *va, size_t sz, uint64_t start_index)
+{
+ struct ioctl_gntalloc_dealloc_gref arg_d;
+
+ if (va && sz) {
+ munmap(va, sz);
+ arg_d.count = sz / getpagesize();
+ arg_d.index = start_index;
+ ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, &arg_d);
+ }
+}
+
+static int
+xenstore_cleanup(void)
+{
+ char store_path[PATH_MAX] = {0};
+
+ if (snprintf(store_path, sizeof(store_path),
+ "%s%s", dompath, DPDK_XENSTORE_NODE) == -1)
+ return -1;
+
+ if (xs_rm(xs, XBT_NULL, store_path) == false) {
+ RTE_LOG(ERR, PMD, "%s: failed cleanup node\n", __func__);
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+xenstore_init(void)
+{
+ unsigned int len, domid;
+ char *buf;
+ static int cleanup = 0;
+ char *end;
+
+ xs = xs_domain_open();
+ if (xs == NULL) {
+ RTE_LOG(ERR, PMD,"%s: xs_domain_open failed\n", __func__);
+ return -1;
+ }
+ buf = xs_read(xs, XBT_NULL, "domid", &len);
+ if (buf == NULL) {
+ RTE_LOG(ERR, PMD, "%s: failed read domid\n", __func__);
+ return -1;
+ }
+ errno = 0;
+ domid = strtoul(buf, &end, 0);
+ if (errno != 0 || end == NULL || end == buf || domid == 0)
+ return -1;
+
+ RTE_LOG(INFO, PMD, "retrieved dom ID = %d\n", domid);
+
+ dompath = xs_get_domain_path(xs, domid);
+ if (dompath == NULL)
+ return -1;
+
+ xs_transaction_start(xs); /* When to stop transaction */
+
+ if (cleanup == 0) {
+ if (xenstore_cleanup())
+ return -1;
+ cleanup = 1;
+ }
+
+ return 0;
+}
+
+int
+xenstore_write(const char *key_str, const char *val_str)
+{
+ char grant_path[PATH_MAX];
+ int rv, len;
+
+ if (xs == NULL) {
+ RTE_LOG(ERR, PMD, "%s: xenstore init failed\n", __func__);
+ return -1;
+ }
+ rv = snprintf(grant_path, sizeof(grant_path), "%s%s", dompath, key_str);
+ if (rv == -1) {
+ RTE_LOG(ERR, PMD, "%s: snprintf %s %s failed\n",
+ __func__, dompath, key_str);
+ return -1;
+ }
+ len = strnlen(val_str, PATH_MAX);
+
+ if (xs_write(xs, XBT_NULL, grant_path, val_str, len) == false) {
+ RTE_LOG(ERR, PMD, "%s: xs_write failed\n", __func__);
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+grant_node_create(uint32_t pg_num, uint32_t *gref_arr, phys_addr_t *pa_arr, char *val_str, size_t str_size)
+{
+ uint64_t start_index;
+ int pg_size;
+ uint32_t pg_shift;
+ void *ptr = NULL;
+ uint32_t count, entries_per_pg;
+ uint32_t i, j = 0, k = 0;;
+ uint32_t *gref_tmp;
+ int first = 1;
+ char tmp_str[PATH_MAX] = {0};
+ int rv = -1;
+
+ pg_size = getpagesize();
+ if (rte_is_power_of_2(pg_size) == 0) {
+ return -1;
+ }
+ pg_shift = rte_bsf32(pg_size);
+ if (pg_size % sizeof(struct grant_node_item)) {
+ RTE_LOG(ERR, PMD, "pg_size isn't a multiple of grant node item\n");
+ return -1;
+ }
+
+ entries_per_pg = pg_size / sizeof(struct grant_node_item);
+ count = (pg_num + entries_per_pg - 1 ) / entries_per_pg;
+ gref_tmp = malloc(count * sizeof(uint32_t));
+ if (gref_tmp == NULL)
+ return -1;
+ ptr = gntalloc(pg_size * count, gref_tmp, &start_index);
+ if (ptr == NULL) {
+ RTE_LOG(ERR, PMD, "%s: gntalloc error of %d pages\n", __func__, count);
+ free(gref_tmp);
+ return -1;
+ }
+
+ while (j < pg_num) {
+ if (first) {
+ rv = snprintf(val_str, str_size, "%u", gref_tmp[k]);
+ first = 0;
+ } else {
+ snprintf(tmp_str, PATH_MAX, "%s", val_str);
+ rv = snprintf(val_str, str_size, "%s,%u", tmp_str, gref_tmp[k]);
+ }
+ k++;
+ if (rv == -1)
+ break;
+
+ for (i = 0; i < entries_per_pg && j < pg_num ; i++) {
+ ((struct grant_node_item *)ptr)->gref = gref_arr[j];
+ ((struct grant_node_item *)ptr)->pfn = pa_arr[j] >> pg_shift;
+ ptr = RTE_PTR_ADD(ptr, sizeof(struct grant_node_item));
+ j++;
+ }
+ }
+ if (rv == -1) {
+ gntfree(ptr, pg_size * count, start_index);
+ } else
+ rv = 0;
+ free(gref_tmp);
+ return rv;
+}
+
+
+int
+grant_gntalloc_mbuf_pool(struct rte_mempool *mpool, uint32_t pg_num, uint32_t *gref_arr, phys_addr_t *pa_arr, int mempool_idx)
+{
+ char key_str[PATH_MAX] = {0};
+ char val_str[PATH_MAX] = {0};
+
+ if (grant_node_create(pg_num, gref_arr, pa_arr, val_str, sizeof(val_str))) {
+ return -1;
+ }
+
+ if (snprintf(key_str, sizeof(key_str),
+ DPDK_XENSTORE_PATH"%d"MEMPOOL_XENSTORE_STR, mempool_idx) == -1)
+ return -1;
+ if (xenstore_write(key_str, val_str) == -1)
+ return -1;
+
+ if (snprintf(key_str, sizeof(key_str),
+ DPDK_XENSTORE_PATH"%d"MEMPOOL_VA_XENSTORE_STR, mempool_idx) == -1)
+ return -1;
+ if (snprintf(val_str, sizeof(val_str), "%"PRIxPTR, (uintptr_t)mpool->elt_va_start) == -1)
+ return -1;
+ if (xenstore_write(key_str, val_str) == -1)
+ return -1;
+
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_pmd_xenvirt/rte_xen_lib.h b/src/dpdk_lib18/librte_pmd_xenvirt/rte_xen_lib.h
new file mode 100755
index 00000000..0ba7148a
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_xenvirt/rte_xen_lib.h
@@ -0,0 +1,113 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_XEN_DUMMY_PMD_H
+#define _RTE_XEN_DUMMY_PMD_H
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_mempool.h>
+#include <rte_ether.h>
+
+#define PAGEMAP_FNAME "/proc/self/pagemap"
+#define XEN_GNTALLOC_FNAME "/dev/xen/gntalloc"
+#define DPDK_XENSTORE_PATH "/control/dpdk/"
+#define DPDK_XENSTORE_NODE "/control/dpdk"
+/*format 0_mempool_gref = "1537,1524,1533" */
+#define MEMPOOL_XENSTORE_STR "_mempool_gref"
+/*format 0_mempool_va = 0x80340000 */
+#define MEMPOOL_VA_XENSTORE_STR "_mempool_va"
+/*format 0_rx_vring_gref = "1537,1524,1533" */
+#define RXVRING_XENSTORE_STR "_rx_vring_gref"
+/*format 0_tx_vring_gref = "1537,1524,1533" */
+#define TXVRING_XENSTORE_STR "_tx_vring_gref"
+#define VRING_FLAG_STR "_vring_flag"
+/*format: event_type_start_0 = 1*/
+#define EVENT_TYPE_START_STR "event_type_start_"
+
+#define DOM0_DOMID 0
+/*
+ * the pfn (page frame number) are bits 0-54 (see pagemap.txt in linux
+ * Documentation).
+ */
+#define PAGEMAP_PFN_BITS 54
+#define PAGEMAP_PFN_MASK RTE_LEN2MASK(PAGEMAP_PFN_BITS, phys_addr_t)
+
+#define MAP_FLAG 0xA5
+
+#define RTE_ETH_XENVIRT_PAIRS_DELIM ';'
+#define RTE_ETH_XENVIRT_KEY_VALUE_DELIM '='
+#define RTE_ETH_XENVIRT_MAX_ARGS 1
+#define RTE_ETH_XENVIRT_MAC_PARAM "mac"
+struct xenvirt_dict {
+ uint8_t addr_valid;
+ struct ether_addr addr;
+};
+
+extern int gntalloc_fd;
+
+int
+gntalloc_open(void);
+
+void
+gntalloc_close(void);
+
+void *
+gntalloc(size_t sz, uint32_t *gref, uint64_t *start_index);
+
+void
+gntfree(void *va, size_t sz, uint64_t start_index);
+
+int
+xenstore_init(void);
+
+int
+xenstore_write(const char *key_str, const char *val_str);
+
+int
+get_phys_map(void *va, phys_addr_t pa[], uint32_t pg_num, uint32_t pg_sz);
+
+void *
+get_xen_virtual(size_t size, size_t page_sz);
+
+int
+grefwatch_from_alloc(uint32_t *gref, void **pptr);
+
+
+int grant_node_create(uint32_t pg_num, uint32_t *gref_arr, phys_addr_t *pa_arr, char *val_str, size_t str_size);
+
+int
+grant_gntalloc_mbuf_pool(struct rte_mempool *mpool, uint32_t pg_num, uint32_t *gref_arr, phys_addr_t *pa_arr, int mempool_idx);
+
+#endif
diff --git a/src/dpdk_lib18/librte_pmd_xenvirt/virtio_logs.h b/src/dpdk_lib18/librte_pmd_xenvirt/virtio_logs.h
new file mode 100755
index 00000000..d6c33f7b
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_xenvirt/virtio_logs.h
@@ -0,0 +1,70 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _VIRTIO_LOGS_H_
+#define _VIRTIO_LOGS_H_
+
+#include <rte_log.h>
+
+#ifdef RTE_LIBRTE_VIRTIO_DEBUG_INIT
+#define PMD_INIT_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+#else
+#define PMD_INIT_LOG(level, fmt, args...) do { } while(0)
+#define PMD_INIT_FUNC_TRACE() do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_VIRTIO_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s() rx: " fmt , __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_VIRTIO_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s() tx: " fmt , __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+
+#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DRIVER
+#define PMD_DRV_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt , __func__, ## args)
+#else
+#define PMD_DRV_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#endif /* _VIRTIO_LOGS_H_ */
diff --git a/src/dpdk_lib18/librte_pmd_xenvirt/virtqueue.h b/src/dpdk_lib18/librte_pmd_xenvirt/virtqueue.h
new file mode 100755
index 00000000..34a24fc5
--- /dev/null
+++ b/src/dpdk_lib18/librte_pmd_xenvirt/virtqueue.h
@@ -0,0 +1,279 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _VIRTQUEUE_H_
+#define _VIRTQUEUE_H_
+
+#include <stdint.h>
+#include <linux/virtio_ring.h>
+#include <linux/virtio_net.h>
+
+#include <rte_atomic.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+
+#include "virtio_logs.h"
+
+/* The alignment to use between consumer and producer parts of vring. */
+#define VIRTIO_PCI_VRING_ALIGN 4096
+
+/*
+ * Address translatio is between gva<->hva,
+ * rather than gpa<->hva in virito spec.
+ */
+#define RTE_MBUF_DATA_DMA_ADDR(mb) \
+ rte_pktmbuf_mtod(mb, uint64_t)
+
+enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 };
+
+/**
+ * The maximum virtqueue size is 2^15. Use that value as the end of
+ * descriptor chain terminator since it will never be a valid index
+ * in the descriptor table. This is used to verify we are correctly
+ * handling vq_free_cnt.
+ */
+#define VQ_RING_DESC_CHAIN_END 32768
+
+#define VIRTQUEUE_MAX_NAME_SZ 32
+
+struct pmd_internals {
+ struct rte_eth_stats eth_stats;
+ int port_id;
+ int virtio_idx;
+};
+
+
+struct virtqueue {
+ char vq_name[VIRTQUEUE_MAX_NAME_SZ];
+ struct rte_mempool *mpool; /**< mempool for mbuf allocation */
+ uint16_t queue_id; /**< DPDK queue index. */
+ uint16_t vq_queue_index; /**< PCI queue index */
+ uint8_t port_id; /**< Device port identifier. */
+
+ void *vq_ring_virt_mem; /**< virtual address of vring*/
+ int vq_alignment;
+ int vq_ring_size;
+
+ struct vring vq_ring; /**< vring keeping desc, used and avail */
+ struct pmd_internals *internals; /**< virtio device internal info. */
+ uint16_t vq_nentries; /**< vring desc numbers */
+ uint16_t vq_desc_head_idx;
+ uint16_t vq_free_cnt; /**< num of desc available */
+ uint16_t vq_used_cons_idx; /**< Last consumed desc in used table, trails vq_ring.used->idx*/
+
+ struct vq_desc_extra {
+ void *cookie;
+ uint16_t ndescs;
+ } vq_descx[0] __rte_cache_aligned;
+};
+
+
+#ifdef RTE_LIBRTE_XENVIRT_DEBUG_DUMP
+#define VIRTQUEUE_DUMP(vq) do { \
+ uint16_t used_idx, nused; \
+ used_idx = (vq)->vq_ring.used->idx; \
+ nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
+ PMD_INIT_LOG(DEBUG, \
+ "VQ: %s - size=%d; free=%d; used=%d; desc_head_idx=%d;" \
+ " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \
+ " avail.flags=0x%x; used.flags=0x%x\n", \
+ (vq)->vq_name, (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \
+ (vq)->vq_desc_head_idx, (vq)->vq_ring.avail->idx, \
+ (vq)->vq_used_cons_idx, (vq)->vq_ring.used->idx, \
+ (vq)->vq_ring.avail->flags, (vq)->vq_ring.used->flags); \
+} while (0)
+#else
+#define VIRTQUEUE_DUMP(vq) do { } while (0)
+#endif
+
+
+/**
+ * Dump virtqueue internal structures, for debug purpose only.
+ */
+void virtqueue_dump(struct virtqueue *vq);
+
+/**
+ * Get all mbufs to be freed.
+ */
+struct rte_mbuf * virtqueue_detatch_unused(struct virtqueue *vq);
+
+static inline int __attribute__((always_inline))
+virtqueue_full(const struct virtqueue *vq)
+{
+ return (vq->vq_free_cnt == 0);
+}
+
+#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
+
+static inline void __attribute__((always_inline))
+vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
+{
+ uint16_t avail_idx;
+ /*
+ * Place the head of the descriptor chain into the next slot and make
+ * it usable to the host. The chain is made available now rather than
+ * deferring to virtqueue_notify() in the hopes that if the host is
+ * currently running on another CPU, we can keep it processing the new
+ * descriptor.
+ */
+ avail_idx = (uint16_t)(vq->vq_ring.avail->idx & (vq->vq_nentries - 1));
+ vq->vq_ring.avail->ring[avail_idx] = desc_idx;
+ rte_compiler_barrier(); /* wmb , for IA memory model barrier is enough*/
+ vq->vq_ring.avail->idx++;
+}
+
+static inline void __attribute__((always_inline))
+vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
+{
+ struct vring_desc *dp;
+ struct vq_desc_extra *dxp;
+
+ dp = &vq->vq_ring.desc[desc_idx];
+ dxp = &vq->vq_descx[desc_idx];
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
+ while (dp->flags & VRING_DESC_F_NEXT) {
+ dp = &vq->vq_ring.desc[dp->next];
+ }
+ dxp->ndescs = 0;
+
+ /*
+ * We must append the existing free chain, if any, to the end of
+ * newly freed chain. If the virtqueue was completely used, then
+ * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
+ */
+ dp->next = vq->vq_desc_head_idx;
+ vq->vq_desc_head_idx = desc_idx;
+}
+
+static inline int __attribute__((always_inline))
+virtqueue_enqueue_recv_refill(struct virtqueue *rxvq, struct rte_mbuf *cookie)
+{
+ const uint16_t needed = 1;
+ const uint16_t head_idx = rxvq->vq_desc_head_idx;
+ struct vring_desc *start_dp = rxvq->vq_ring.desc;
+ struct vq_desc_extra *dxp;
+
+ if (unlikely(rxvq->vq_free_cnt == 0))
+ return -ENOSPC;
+ if (unlikely(rxvq->vq_free_cnt < needed))
+ return -EMSGSIZE;
+ if (unlikely(head_idx >= rxvq->vq_nentries))
+ return -EFAULT;
+
+ dxp = &rxvq->vq_descx[head_idx];
+ dxp->cookie = (void *)cookie;
+ dxp->ndescs = needed;
+
+ start_dp[head_idx].addr =
+ (uint64_t) ((uint64_t)cookie->buf_addr + RTE_PKTMBUF_HEADROOM - sizeof(struct virtio_net_hdr));
+ start_dp[head_idx].len = cookie->buf_len - RTE_PKTMBUF_HEADROOM + sizeof(struct virtio_net_hdr);
+ start_dp[head_idx].flags = VRING_DESC_F_WRITE;
+ rxvq->vq_desc_head_idx = start_dp[head_idx].next;
+ rxvq->vq_free_cnt = (uint16_t)(rxvq->vq_free_cnt - needed);
+ vq_ring_update_avail(rxvq, head_idx);
+
+ return 0;
+}
+
+static inline int __attribute__((always_inline))
+virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)
+{
+
+ const uint16_t needed = 2;
+ struct vring_desc *start_dp = txvq->vq_ring.desc;
+ uint16_t head_idx = txvq->vq_desc_head_idx;
+ uint16_t idx = head_idx;
+ struct vq_desc_extra *dxp;
+
+ if (unlikely(txvq->vq_free_cnt == 0))
+ return -ENOSPC;
+ if (unlikely(txvq->vq_free_cnt < needed))
+ return -EMSGSIZE;
+ if (unlikely(head_idx >= txvq->vq_nentries))
+ return -EFAULT;
+
+ dxp = &txvq->vq_descx[idx];
+ dxp->cookie = (void *)cookie;
+ dxp->ndescs = needed;
+
+ start_dp = txvq->vq_ring.desc;
+ start_dp[idx].addr = 0;
+/*
+ * TODO: save one desc here?
+ */
+ start_dp[idx].len = sizeof(struct virtio_net_hdr);
+ start_dp[idx].flags = VRING_DESC_F_NEXT;
+ start_dp[idx].addr = (uintptr_t)NULL;
+ idx = start_dp[idx].next;
+ start_dp[idx].addr = RTE_MBUF_DATA_DMA_ADDR(cookie);
+ start_dp[idx].len = cookie->data_len;
+ start_dp[idx].flags = 0;
+ idx = start_dp[idx].next;
+ txvq->vq_desc_head_idx = idx;
+ txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
+ vq_ring_update_avail(txvq, head_idx);
+
+ return 0;
+}
+
+static inline uint16_t __attribute__((always_inline))
+virtqueue_dequeue_burst(struct virtqueue *vq, struct rte_mbuf **rx_pkts, uint32_t *len, uint16_t num)
+{
+ struct vring_used_elem *uep;
+ struct rte_mbuf *cookie;
+ uint16_t used_idx, desc_idx;
+ uint16_t i;
+ /* Caller does the check */
+ for (i = 0; i < num ; i ++) {
+ used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
+ uep = &vq->vq_ring.used->ring[used_idx];
+ desc_idx = (uint16_t) uep->id;
+ cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
+ if (unlikely(cookie == NULL)) {
+ PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u\n",
+ vq->vq_used_cons_idx);
+ RTE_LOG(ERR, PMD, "%s: inconsistent (%u, %u)\n", __func__, used_idx , desc_idx);
+ break;
+ }
+ len[i] = uep->len;
+ rx_pkts[i] = cookie;
+ vq->vq_used_cons_idx++;
+ vq_ring_free_chain(vq, desc_idx);
+ vq->vq_descx[desc_idx].cookie = NULL;
+ }
+ return i;
+}
+
+#endif /* _VIRTQUEUE_H_ */
diff --git a/src/dpdk_lib18/librte_port/Makefile b/src/dpdk_lib18/librte_port/Makefile
new file mode 100755
index 00000000..82b51929
--- /dev/null
+++ b/src/dpdk_lib18/librte_port/Makefile
@@ -0,0 +1,77 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_port.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PORT) += rte_port_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PORT) += rte_port_ring.c
+ifeq ($(CONFIG_RTE_LIBRTE_IP_FRAG),y)
+ifeq ($(CONFIG_RTE_MBUF_REFCNT),y)
+SRCS-$(CONFIG_RTE_LIBRTE_PORT) += rte_port_frag.c
+endif
+SRCS-$(CONFIG_RTE_LIBRTE_PORT) += rte_port_ras.c
+endif
+SRCS-$(CONFIG_RTE_LIBRTE_PORT) += rte_port_sched.c
+SRCS-$(CONFIG_RTE_LIBRTE_PORT) += rte_port_source_sink.c
+
+# install includes
+SYMLINK-$(CONFIG_RTE_LIBRTE_PORT)-include += rte_port.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_PORT)-include += rte_port_ethdev.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_PORT)-include += rte_port_ring.h
+ifeq ($(CONFIG_RTE_LIBRTE_IP_FRAG),y)
+ifeq ($(CONFIG_RTE_MBUF_REFCNT),y)
+SYMLINK-$(CONFIG_RTE_LIBRTE_PORT)-include += rte_port_frag.h
+endif
+SYMLINK-$(CONFIG_RTE_LIBRTE_PORT)-include += rte_port_ras.h
+endif
+SYMLINK-$(CONFIG_RTE_LIBRTE_PORT)-include += rte_port_sched.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_PORT)-include += rte_port_source_sink.h
+
+# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PORT) := lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PORT) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PORT) += lib/librte_mempool
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PORT) += lib/librte_malloc
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PORT) += lib/librte_ether
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PORT) += lib/librte_ip_frag
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_port/rte_port.h b/src/dpdk_lib18/librte_port/rte_port.h
new file mode 100755
index 00000000..d84e5a12
--- /dev/null
+++ b/src/dpdk_lib18/librte_port/rte_port.h
@@ -0,0 +1,213 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_PORT_H__
+#define __INCLUDE_RTE_PORT_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file
+ * RTE Port
+ *
+ * This tool is part of the Intel DPDK Packet Framework tool suite and provides
+ * a standard interface to implement different types of packet ports.
+ *
+ ***/
+
+#include <stdint.h>
+#include <rte_mbuf.h>
+
+/**@{
+ * Macros to allow accessing metadata stored in the mbuf headroom
+ * just beyond the end of the mbuf data structure returned by a port
+ */
+#define RTE_MBUF_METADATA_UINT8(mbuf, offset) \
+ (((uint8_t *)&(mbuf)[1])[offset])
+#define RTE_MBUF_METADATA_UINT16(mbuf, offset) \
+ (((uint16_t *)&(mbuf)[1])[offset/sizeof(uint16_t)])
+#define RTE_MBUF_METADATA_UINT32(mbuf, offset) \
+ (((uint32_t *)&(mbuf)[1])[offset/sizeof(uint32_t)])
+#define RTE_MBUF_METADATA_UINT64(mbuf, offset) \
+ (((uint64_t *)&(mbuf)[1])[offset/sizeof(uint64_t)])
+
+#define RTE_MBUF_METADATA_UINT8_PTR(mbuf, offset) \
+ (&RTE_MBUF_METADATA_UINT8(mbuf, offset))
+#define RTE_MBUF_METADATA_UINT16_PTR(mbuf, offset) \
+ (&RTE_MBUF_METADATA_UINT16(mbuf, offset))
+#define RTE_MBUF_METADATA_UINT32_PTR(mbuf, offset) \
+ (&RTE_MBUF_METADATA_UINT32(mbuf, offset))
+#define RTE_MBUF_METADATA_UINT64_PTR(mbuf, offset) \
+ (&RTE_MBUF_METADATA_UINT64(mbuf, offset))
+/**@}*/
+
+/*
+ * Port IN
+ *
+ */
+/** Maximum number of packets read from any input port in a single burst.
+Cannot be changed. */
+#define RTE_PORT_IN_BURST_SIZE_MAX 64
+
+/**
+ * Input port create
+ *
+ * @param params
+ * Parameters for input port creation
+ * @param socket_id
+ * CPU socket ID (e.g. for memory allocation purpose)
+ * @return
+ * Handle to input port instance
+ */
+typedef void* (*rte_port_in_op_create)(void *params, int socket_id);
+
+/**
+ * Input port free
+ *
+ * @param port
+ * Handle to input port instance
+ * @return
+ * 0 on success, error code otherwise
+ */
+typedef int (*rte_port_in_op_free)(void *port);
+
+/**
+ * Input port packet burst RX
+ *
+ * @param port
+ * Handle to input port instance
+ * @param pkts
+ * Burst of input packets
+ * @param n_pkts
+ * Number of packets in the input burst
+ * @return
+ * 0 on success, error code otherwise
+ */
+typedef int (*rte_port_in_op_rx)(
+ void *port,
+ struct rte_mbuf **pkts,
+ uint32_t n_pkts);
+
+/** Input port interface defining the input port operation */
+struct rte_port_in_ops {
+ rte_port_in_op_create f_create; /**< Create */
+ rte_port_in_op_free f_free; /**< Free */
+ rte_port_in_op_rx f_rx; /**< Packet RX (packet burst) */
+};
+
+/*
+ * Port OUT
+ *
+ */
+/**
+ * Output port create
+ *
+ * @param params
+ * Parameters for output port creation
+ * @param socket_id
+ * CPU socket ID (e.g. for memory allocation purpose)
+ * @return
+ * Handle to output port instance
+ */
+typedef void* (*rte_port_out_op_create)(void *params, int socket_id);
+
+/**
+ * Output port free
+ *
+ * @param port
+ * Handle to output port instance
+ * @return
+ * 0 on success, error code otherwise
+ */
+typedef int (*rte_port_out_op_free)(void *port);
+
+/**
+ * Output port single packet TX
+ *
+ * @param port
+ * Handle to output port instance
+ * @param pkt
+ * Input packet
+ * @return
+ * 0 on success, error code otherwise
+ */
+typedef int (*rte_port_out_op_tx)(
+ void *port,
+ struct rte_mbuf *pkt);
+
+/**
+ * Output port packet burst TX
+ *
+ * @param port
+ * Handle to output port instance
+ * @param pkts
+ * Burst of input packets specified as array of up to 64 pointers to struct
+ * rte_mbuf
+ * @param pkts_mask
+ * 64-bit bitmask specifying which packets in the input burst are valid. When
+ * pkts_mask bit n is set, then element n of pkts array is pointing to a
+ * valid packet. Otherwise, element n of pkts array will not be accessed.
+ * @return
+ * 0 on success, error code otherwise
+ */
+typedef int (*rte_port_out_op_tx_bulk)(
+ void *port,
+ struct rte_mbuf **pkt,
+ uint64_t pkts_mask);
+
+/**
+ * Output port flush
+ *
+ * @param port
+ * Handle to output port instance
+ * @return
+ * 0 on success, error code otherwise
+ */
+typedef int (*rte_port_out_op_flush)(void *port);
+
+/** Output port interface defining the output port operation */
+struct rte_port_out_ops {
+ rte_port_out_op_create f_create; /**< Create */
+ rte_port_out_op_free f_free; /**< Free */
+ rte_port_out_op_tx f_tx; /**< Packet TX (single packet) */
+ rte_port_out_op_tx_bulk f_tx_bulk; /**< Packet TX (packet burst) */
+ rte_port_out_op_flush f_flush; /**< Flush */
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_port/rte_port_ethdev.c b/src/dpdk_lib18/librte_port/rte_port_ethdev.c
new file mode 100755
index 00000000..d0149133
--- /dev/null
+++ b/src/dpdk_lib18/librte_port/rte_port_ethdev.c
@@ -0,0 +1,305 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <string.h>
+
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+
+#include "rte_port_ethdev.h"
+
+/*
+ * Port ETHDEV Reader
+ */
+struct rte_port_ethdev_reader {
+ uint16_t queue_id;
+ uint8_t port_id;
+};
+
+static void *
+rte_port_ethdev_reader_create(void *params, int socket_id)
+{
+ struct rte_port_ethdev_reader_params *conf =
+ (struct rte_port_ethdev_reader_params *) params;
+ struct rte_port_ethdev_reader *port;
+
+ /* Check input parameters */
+ if (conf == NULL) {
+ RTE_LOG(ERR, PORT, "%s: params is NULL\n", __func__);
+ return NULL;
+ }
+
+ /* Memory allocation */
+ port = rte_zmalloc_socket("PORT", sizeof(*port),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (port == NULL) {
+ RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
+ return NULL;
+ }
+
+ /* Initialization */
+ port->port_id = conf->port_id;
+ port->queue_id = conf->queue_id;
+
+ return port;
+}
+
+static int
+rte_port_ethdev_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
+{
+ struct rte_port_ethdev_reader *p =
+ (struct rte_port_ethdev_reader *) port;
+
+ return rte_eth_rx_burst(p->port_id, p->queue_id, pkts, n_pkts);
+}
+
+static int
+rte_port_ethdev_reader_free(void *port)
+{
+ if (port == NULL) {
+ RTE_LOG(ERR, PORT, "%s: port is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ rte_free(port);
+
+ return 0;
+}
+
+/*
+ * Port ETHDEV Writer
+ */
+#define RTE_PORT_ETHDEV_WRITER_APPROACH 1
+
+struct rte_port_ethdev_writer {
+ struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX];
+ uint32_t tx_burst_sz;
+ uint16_t tx_buf_count;
+ uint64_t bsz_mask;
+ uint16_t queue_id;
+ uint8_t port_id;
+};
+
+static void *
+rte_port_ethdev_writer_create(void *params, int socket_id)
+{
+ struct rte_port_ethdev_writer_params *conf =
+ (struct rte_port_ethdev_writer_params *) params;
+ struct rte_port_ethdev_writer *port;
+
+ /* Check input parameters */
+ if ((conf == NULL) ||
+ (conf->tx_burst_sz == 0) ||
+ (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) ||
+ (!rte_is_power_of_2(conf->tx_burst_sz))) {
+ RTE_LOG(ERR, PORT, "%s: Invalid input parameters\n", __func__);
+ return NULL;
+ }
+
+ /* Memory allocation */
+ port = rte_zmalloc_socket("PORT", sizeof(*port),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (port == NULL) {
+ RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
+ return NULL;
+ }
+
+ /* Initialization */
+ port->port_id = conf->port_id;
+ port->queue_id = conf->queue_id;
+ port->tx_burst_sz = conf->tx_burst_sz;
+ port->tx_buf_count = 0;
+ port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1);
+
+ return port;
+}
+
+static inline void
+send_burst(struct rte_port_ethdev_writer *p)
+{
+ uint32_t nb_tx;
+
+ nb_tx = rte_eth_tx_burst(p->port_id, p->queue_id,
+ p->tx_buf, p->tx_buf_count);
+
+ for ( ; nb_tx < p->tx_buf_count; nb_tx++)
+ rte_pktmbuf_free(p->tx_buf[nb_tx]);
+
+ p->tx_buf_count = 0;
+}
+
+static int
+rte_port_ethdev_writer_tx(void *port, struct rte_mbuf *pkt)
+{
+ struct rte_port_ethdev_writer *p =
+ (struct rte_port_ethdev_writer *) port;
+
+ p->tx_buf[p->tx_buf_count++] = pkt;
+ if (p->tx_buf_count >= p->tx_burst_sz)
+ send_burst(p);
+
+ return 0;
+}
+
+#if RTE_PORT_ETHDEV_WRITER_APPROACH == 0
+
+static int
+rte_port_ethdev_writer_tx_bulk(void *port,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask)
+{
+ struct rte_port_ethdev_writer *p =
+ (struct rte_port_ethdev_writer *) port;
+
+ if ((pkts_mask & (pkts_mask + 1)) == 0) {
+ uint64_t n_pkts = __builtin_popcountll(pkts_mask);
+ uint32_t i;
+
+ for (i = 0; i < n_pkts; i++) {
+ struct rte_mbuf *pkt = pkts[i];
+
+ p->tx_buf[p->tx_buf_count++] = pkt;
+ if (p->tx_buf_count >= p->tx_burst_sz)
+ send_burst(p);
+ }
+ } else {
+ for ( ; pkts_mask; ) {
+ uint32_t pkt_index = __builtin_ctzll(pkts_mask);
+ uint64_t pkt_mask = 1LLU << pkt_index;
+ struct rte_mbuf *pkt = pkts[pkt_index];
+
+ p->tx_buf[p->tx_buf_count++] = pkt;
+ if (p->tx_buf_count >= p->tx_burst_sz)
+ send_burst(p);
+ pkts_mask &= ~pkt_mask;
+ }
+ }
+
+ return 0;
+}
+
+#elif RTE_PORT_ETHDEV_WRITER_APPROACH == 1
+
+static int
+rte_port_ethdev_writer_tx_bulk(void *port,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask)
+{
+ struct rte_port_ethdev_writer *p =
+ (struct rte_port_ethdev_writer *) port;
+ uint32_t bsz_mask = p->bsz_mask;
+ uint32_t tx_buf_count = p->tx_buf_count;
+ uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
+ ((pkts_mask & bsz_mask) ^ bsz_mask);
+
+ if (expr == 0) {
+ uint64_t n_pkts = __builtin_popcountll(pkts_mask);
+ uint32_t n_pkts_ok;
+
+ if (tx_buf_count)
+ send_burst(p);
+
+ n_pkts_ok = rte_eth_tx_burst(p->port_id, p->queue_id, pkts,
+ n_pkts);
+
+ for ( ; n_pkts_ok < n_pkts; n_pkts_ok++) {
+ struct rte_mbuf *pkt = pkts[n_pkts_ok];
+
+ rte_pktmbuf_free(pkt);
+ }
+ } else {
+ for ( ; pkts_mask; ) {
+ uint32_t pkt_index = __builtin_ctzll(pkts_mask);
+ uint64_t pkt_mask = 1LLU << pkt_index;
+ struct rte_mbuf *pkt = pkts[pkt_index];
+
+ p->tx_buf[tx_buf_count++] = pkt;
+ pkts_mask &= ~pkt_mask;
+ }
+
+ p->tx_buf_count = tx_buf_count;
+ if (tx_buf_count >= p->tx_burst_sz)
+ send_burst(p);
+ }
+
+ return 0;
+}
+
+#else
+
+#error Invalid value for RTE_PORT_ETHDEV_WRITER_APPROACH
+
+#endif
+
+static int
+rte_port_ethdev_writer_flush(void *port)
+{
+ struct rte_port_ethdev_writer *p =
+ (struct rte_port_ethdev_writer *) port;
+
+ if (p->tx_buf_count > 0)
+ send_burst(p);
+
+ return 0;
+}
+
+static int
+rte_port_ethdev_writer_free(void *port)
+{
+ if (port == NULL) {
+ RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ rte_port_ethdev_writer_flush(port);
+ rte_free(port);
+
+ return 0;
+}
+
+/*
+ * Summary of port operations
+ */
+struct rte_port_in_ops rte_port_ethdev_reader_ops = {
+ .f_create = rte_port_ethdev_reader_create,
+ .f_free = rte_port_ethdev_reader_free,
+ .f_rx = rte_port_ethdev_reader_rx,
+};
+
+struct rte_port_out_ops rte_port_ethdev_writer_ops = {
+ .f_create = rte_port_ethdev_writer_create,
+ .f_free = rte_port_ethdev_writer_free,
+ .f_tx = rte_port_ethdev_writer_tx,
+ .f_tx_bulk = rte_port_ethdev_writer_tx_bulk,
+ .f_flush = rte_port_ethdev_writer_flush,
+};
diff --git a/src/dpdk_lib18/librte_port/rte_port_ethdev.h b/src/dpdk_lib18/librte_port/rte_port_ethdev.h
new file mode 100755
index 00000000..af67a12b
--- /dev/null
+++ b/src/dpdk_lib18/librte_port/rte_port_ethdev.h
@@ -0,0 +1,86 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_PORT_ETHDEV_H__
+#define __INCLUDE_RTE_PORT_ETHDEV_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file
+ * RTE Port Ethernet Device
+ *
+ * ethdev_reader: input port built on top of pre-initialized NIC RX queue
+ * ethdev_writer: output port built on top of pre-initialized NIC TX queue
+ *
+ ***/
+
+#include <stdint.h>
+
+#include "rte_port.h"
+
+/** ethdev_reader port parameters */
+struct rte_port_ethdev_reader_params {
+ /** NIC RX port ID */
+ uint8_t port_id;
+
+ /** NIC RX queue ID */
+ uint16_t queue_id;
+};
+
+/** ethdev_reader port operations */
+extern struct rte_port_in_ops rte_port_ethdev_reader_ops;
+
+/** ethdev_writer port parameters */
+struct rte_port_ethdev_writer_params {
+ /** NIC RX port ID */
+ uint8_t port_id;
+
+ /** NIC RX queue ID */
+ uint16_t queue_id;
+
+ /** Recommended burst size to NIC TX queue. The actual burst size can be
+ bigger or smaller than this value. */
+ uint32_t tx_burst_sz;
+};
+
+/** ethdev_writer port operations */
+extern struct rte_port_out_ops rte_port_ethdev_writer_ops;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_port/rte_port_frag.c b/src/dpdk_lib18/librte_port/rte_port_frag.c
new file mode 100755
index 00000000..ff0ab9b8
--- /dev/null
+++ b/src/dpdk_lib18/librte_port/rte_port_frag.c
@@ -0,0 +1,241 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <string.h>
+
+#include <rte_ether.h>
+#include <rte_ip_frag.h>
+#include <rte_memory.h>
+
+#include "rte_port_frag.h"
+
+/* Default byte size for the IPv4 Maximum Transfer Unit (MTU).
+ * This value includes the size of IPv4 header. */
+#define IPV4_MTU_DEFAULT ETHER_MTU
+
+/* Max number of fragments per packet allowed */
+#define IPV4_MAX_FRAGS_PER_PACKET 0x80
+
+struct rte_port_ring_reader_ipv4_frag {
+ /* Input parameters */
+ struct rte_ring *ring;
+ uint32_t mtu;
+ uint32_t metadata_size;
+ struct rte_mempool *pool_direct;
+ struct rte_mempool *pool_indirect;
+
+ /* Internal buffers */
+ struct rte_mbuf *pkts[RTE_PORT_IN_BURST_SIZE_MAX];
+ struct rte_mbuf *frags[IPV4_MAX_FRAGS_PER_PACKET];
+ uint32_t n_pkts;
+ uint32_t pos_pkts;
+ uint32_t n_frags;
+ uint32_t pos_frags;
+} __rte_cache_aligned;
+
+static void *
+rte_port_ring_reader_ipv4_frag_create(void *params, int socket_id)
+{
+ struct rte_port_ring_reader_ipv4_frag_params *conf =
+ (struct rte_port_ring_reader_ipv4_frag_params *) params;
+ struct rte_port_ring_reader_ipv4_frag *port;
+
+ /* Check input parameters */
+ if (conf == NULL) {
+ RTE_LOG(ERR, PORT, "%s: Parameter conf is NULL\n", __func__);
+ return NULL;
+ }
+ if (conf->ring == NULL) {
+ RTE_LOG(ERR, PORT, "%s: Parameter ring is NULL\n", __func__);
+ return NULL;
+ }
+ if (conf->mtu == 0) {
+ RTE_LOG(ERR, PORT, "%s: Parameter mtu is invalid\n", __func__);
+ return NULL;
+ }
+ if (conf->pool_direct == NULL) {
+ RTE_LOG(ERR, PORT, "%s: Parameter pool_direct is NULL\n",
+ __func__);
+ return NULL;
+ }
+ if (conf->pool_indirect == NULL) {
+ RTE_LOG(ERR, PORT, "%s: Parameter pool_indirect is NULL\n",
+ __func__);
+ return NULL;
+ }
+
+ /* Memory allocation */
+ port = rte_zmalloc_socket("PORT", sizeof(*port), RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (port == NULL) {
+ RTE_LOG(ERR, PORT, "%s: port is NULL\n", __func__);
+ return NULL;
+ }
+
+ /* Initialization */
+ port->ring = conf->ring;
+ port->mtu = conf->mtu;
+ port->metadata_size = conf->metadata_size;
+ port->pool_direct = conf->pool_direct;
+ port->pool_indirect = conf->pool_indirect;
+
+ port->n_pkts = 0;
+ port->pos_pkts = 0;
+ port->n_frags = 0;
+ port->pos_frags = 0;
+
+ return port;
+}
+
+static int
+rte_port_ring_reader_ipv4_frag_rx(void *port,
+ struct rte_mbuf **pkts,
+ uint32_t n_pkts)
+{
+ struct rte_port_ring_reader_ipv4_frag *p =
+ (struct rte_port_ring_reader_ipv4_frag *) port;
+ uint32_t n_pkts_out;
+
+ n_pkts_out = 0;
+
+ /* Get packets from the "frag" buffer */
+ if (p->n_frags >= n_pkts) {
+ memcpy(pkts, &p->frags[p->pos_frags], n_pkts * sizeof(void *));
+ p->pos_frags += n_pkts;
+ p->n_frags -= n_pkts;
+
+ return n_pkts;
+ }
+
+ memcpy(pkts, &p->frags[p->pos_frags], p->n_frags * sizeof(void *));
+ n_pkts_out = p->n_frags;
+ p->n_frags = 0;
+
+ /* Look to "pkts" buffer to get more packets */
+ for ( ; ; ) {
+ struct rte_mbuf *pkt;
+ uint32_t n_pkts_to_provide, i;
+ int status;
+
+ /* If "pkts" buffer is empty, read packet burst from ring */
+ if (p->n_pkts == 0) {
+ p->n_pkts = rte_ring_sc_dequeue_burst(p->ring,
+ (void **) p->pkts, RTE_PORT_IN_BURST_SIZE_MAX);
+ if (p->n_pkts == 0)
+ return n_pkts_out;
+ p->pos_pkts = 0;
+ }
+
+ /* Read next packet from "pkts" buffer */
+ pkt = p->pkts[p->pos_pkts++];
+ p->n_pkts--;
+
+ /* If not jumbo, pass current packet to output */
+ if (pkt->pkt_len <= IPV4_MTU_DEFAULT) {
+ pkts[n_pkts_out++] = pkt;
+
+ n_pkts_to_provide = n_pkts - n_pkts_out;
+ if (n_pkts_to_provide == 0)
+ return n_pkts;
+
+ continue;
+ }
+
+ /* Fragment current packet into the "frags" buffer */
+ status = rte_ipv4_fragment_packet(
+ pkt,
+ p->frags,
+ IPV4_MAX_FRAGS_PER_PACKET,
+ p->mtu,
+ p->pool_direct,
+ p->pool_indirect
+ );
+
+ if (status < 0) {
+ rte_pktmbuf_free(pkt);
+ continue;
+ }
+
+ p->n_frags = (uint32_t) status;
+ p->pos_frags = 0;
+
+ /* Copy meta-data from input jumbo packet to its fragments */
+ for (i = 0; i < p->n_frags; i++) {
+ uint8_t *src = RTE_MBUF_METADATA_UINT8_PTR(pkt, 0);
+ uint8_t *dst =
+ RTE_MBUF_METADATA_UINT8_PTR(p->frags[i], 0);
+
+ memcpy(dst, src, p->metadata_size);
+ }
+
+ /* Free input jumbo packet */
+ rte_pktmbuf_free(pkt);
+
+ /* Get packets from "frag" buffer */
+ n_pkts_to_provide = n_pkts - n_pkts_out;
+ if (p->n_frags >= n_pkts_to_provide) {
+ memcpy(&pkts[n_pkts_out], p->frags,
+ n_pkts_to_provide * sizeof(void *));
+ p->n_frags -= n_pkts_to_provide;
+ p->pos_frags += n_pkts_to_provide;
+
+ return n_pkts;
+ }
+
+ memcpy(&pkts[n_pkts_out], p->frags,
+ p->n_frags * sizeof(void *));
+ n_pkts_out += p->n_frags;
+ p->n_frags = 0;
+ }
+}
+
+static int
+rte_port_ring_reader_ipv4_frag_free(void *port)
+{
+ if (port == NULL) {
+ RTE_LOG(ERR, PORT, "%s: Parameter port is NULL\n", __func__);
+ return -1;
+ }
+
+ rte_free(port);
+
+ return 0;
+}
+
+/*
+ * Summary of port operations
+ */
+struct rte_port_in_ops rte_port_ring_reader_ipv4_frag_ops = {
+ .f_create = rte_port_ring_reader_ipv4_frag_create,
+ .f_free = rte_port_ring_reader_ipv4_frag_free,
+ .f_rx = rte_port_ring_reader_ipv4_frag_rx,
+};
diff --git a/src/dpdk_lib18/librte_port/rte_port_frag.h b/src/dpdk_lib18/librte_port/rte_port_frag.h
new file mode 100755
index 00000000..dfd70c01
--- /dev/null
+++ b/src/dpdk_lib18/librte_port/rte_port_frag.h
@@ -0,0 +1,94 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_PORT_IP_FRAG_H__
+#define __INCLUDE_RTE_PORT_IP_FRAG_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file
+ * RTE Port for IPv4 Fragmentation
+ *
+ * This port is built on top of pre-initialized single consumer rte_ring. In
+ * order to minimize the amount of packets stored in the ring at any given
+ * time, the IP fragmentation functionality is executed on ring read operation,
+ * hence this port is implemented as an input port. A regular ring_writer port
+ * can be created to write to the same ring.
+ *
+ * The packets written to the ring are either complete IP datagrams or jumbo
+ * frames (i.e. IP packets with length bigger than provided MTU value). The
+ * packets read from the ring are all non-jumbo frames. The complete IP
+ * datagrams written to the ring are not changed. The jumbo frames are
+ * fragmented into several IP packets with length less or equal to MTU.
+ *
+ ***/
+
+#include <stdint.h>
+
+#include <rte_ring.h>
+
+#include "rte_port.h"
+
+/** ring_reader_ipv4_frag port parameters */
+struct rte_port_ring_reader_ipv4_frag_params {
+ /** Underlying single consumer ring that has to be pre-initialized. */
+ struct rte_ring *ring;
+
+ /** Maximum Transfer Unit (MTU). Maximum IP packet size (in bytes). */
+ uint32_t mtu;
+
+ /** Size of application dependent meta-data stored per each input packet
+ that has to be copied to each of the fragments originating from the
+ same input IP datagram. */
+ uint32_t metadata_size;
+
+ /** Pre-initialized buffer pool used for allocating direct buffers for
+ the output fragments. */
+ struct rte_mempool *pool_direct;
+
+ /** Pre-initialized buffer pool used for allocating indirect buffers for
+ the output fragments. */
+ struct rte_mempool *pool_indirect;
+};
+
+/** ring_reader_ipv4_frag port operations */
+extern struct rte_port_in_ops rte_port_ring_reader_ipv4_frag_ops;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_port/rte_port_ras.c b/src/dpdk_lib18/librte_port/rte_port_ras.c
new file mode 100755
index 00000000..b6ab67ab
--- /dev/null
+++ b/src/dpdk_lib18/librte_port/rte_port_ras.c
@@ -0,0 +1,252 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <string.h>
+
+#include <rte_ether.h>
+#include <rte_ip_frag.h>
+#include <rte_cycles.h>
+#include <rte_log.h>
+
+#include "rte_port_ras.h"
+
+#ifndef IPV4_RAS_N_BUCKETS
+#define IPV4_RAS_N_BUCKETS 4094
+#endif
+
+#ifndef IPV4_RAS_N_ENTRIES_PER_BUCKET
+#define IPV4_RAS_N_ENTRIES_PER_BUCKET 8
+#endif
+
+#ifndef IPV4_RAS_N_ENTRIES
+#define IPV4_RAS_N_ENTRIES (IPV4_RAS_N_BUCKETS * IPV4_RAS_N_ENTRIES_PER_BUCKET)
+#endif
+
+struct rte_port_ring_writer_ipv4_ras {
+ struct rte_mbuf *tx_buf[RTE_PORT_IN_BURST_SIZE_MAX];
+ struct rte_ring *ring;
+ uint32_t tx_burst_sz;
+ uint32_t tx_buf_count;
+ struct rte_ip_frag_tbl *frag_tbl;
+ struct rte_ip_frag_death_row death_row;
+};
+
+static void *
+rte_port_ring_writer_ipv4_ras_create(void *params, int socket_id)
+{
+ struct rte_port_ring_writer_ipv4_ras_params *conf =
+ (struct rte_port_ring_writer_ipv4_ras_params *) params;
+ struct rte_port_ring_writer_ipv4_ras *port;
+ uint64_t frag_cycles;
+
+ /* Check input parameters */
+ if (conf == NULL) {
+ RTE_LOG(ERR, PORT, "%s: Parameter conf is NULL\n", __func__);
+ return NULL;
+ }
+ if (conf->ring == NULL) {
+ RTE_LOG(ERR, PORT, "%s: Parameter ring is NULL\n", __func__);
+ return NULL;
+ }
+ if ((conf->tx_burst_sz == 0) ||
+ (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX)) {
+ RTE_LOG(ERR, PORT, "%s: Parameter tx_burst_sz is invalid\n",
+ __func__);
+ return NULL;
+ }
+
+ /* Memory allocation */
+ port = rte_zmalloc_socket("PORT", sizeof(*port),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (port == NULL) {
+ RTE_LOG(ERR, PORT, "%s: Failed to allocate socket\n", __func__);
+ return NULL;
+ }
+
+ /* Create fragmentation table */
+ frag_cycles = (rte_get_tsc_hz() + MS_PER_S - 1) / MS_PER_S * MS_PER_S;
+ frag_cycles *= 100;
+
+ port->frag_tbl = rte_ip_frag_table_create(
+ IPV4_RAS_N_BUCKETS,
+ IPV4_RAS_N_ENTRIES_PER_BUCKET,
+ IPV4_RAS_N_ENTRIES,
+ frag_cycles,
+ socket_id);
+
+ if (port->frag_tbl == NULL) {
+ RTE_LOG(ERR, PORT, "%s: rte_ip_frag_table_create failed\n",
+ __func__);
+ rte_free(port);
+ return NULL;
+ }
+
+ /* Initialization */
+ port->ring = conf->ring;
+ port->tx_burst_sz = conf->tx_burst_sz;
+ port->tx_buf_count = 0;
+
+ return port;
+}
+
+static inline void
+send_burst(struct rte_port_ring_writer_ipv4_ras *p)
+{
+ uint32_t nb_tx;
+
+ nb_tx = rte_ring_sp_enqueue_burst(p->ring, (void **)p->tx_buf,
+ p->tx_buf_count);
+
+ for ( ; nb_tx < p->tx_buf_count; nb_tx++)
+ rte_pktmbuf_free(p->tx_buf[nb_tx]);
+
+ p->tx_buf_count = 0;
+}
+
+static inline void
+process_one(struct rte_port_ring_writer_ipv4_ras *p, struct rte_mbuf *pkt)
+{
+ /* Assume there is no ethernet header */
+ struct ipv4_hdr *pkt_hdr = (struct ipv4_hdr *)
+ (rte_pktmbuf_mtod(pkt, unsigned char *));
+
+ /* Get "Do not fragment" flag and fragment offset */
+ uint16_t frag_field = rte_be_to_cpu_16(pkt_hdr->fragment_offset);
+ uint16_t frag_offset = (uint16_t)(frag_field & IPV4_HDR_OFFSET_MASK);
+ uint16_t frag_flag = (uint16_t)(frag_field & IPV4_HDR_MF_FLAG);
+
+ /* If it is a fragmented packet, then try to reassemble */
+ if ((frag_flag == 0) && (frag_offset == 0))
+ p->tx_buf[p->tx_buf_count++] = pkt;
+ else {
+ struct rte_mbuf *mo;
+ struct rte_ip_frag_tbl *tbl = p->frag_tbl;
+ struct rte_ip_frag_death_row *dr = &p->death_row;
+
+ /* Process this fragment */
+ mo = rte_ipv4_frag_reassemble_packet(tbl, dr, pkt, rte_rdtsc(), pkt_hdr);
+ if (mo != NULL)
+ p->tx_buf[p->tx_buf_count++] = mo;
+
+ rte_ip_frag_free_death_row(&p->death_row, 3);
+ }
+}
+
+static int
+rte_port_ring_writer_ipv4_ras_tx(void *port, struct rte_mbuf *pkt)
+{
+ struct rte_port_ring_writer_ipv4_ras *p =
+ (struct rte_port_ring_writer_ipv4_ras *) port;
+
+ process_one(p, pkt);
+ if (p->tx_buf_count >= p->tx_burst_sz)
+ send_burst(p);
+
+ return 0;
+}
+
+static int
+rte_port_ring_writer_ipv4_ras_tx_bulk(void *port,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask)
+{
+ struct rte_port_ring_writer_ipv4_ras *p =
+ (struct rte_port_ring_writer_ipv4_ras *) port;
+
+ if ((pkts_mask & (pkts_mask + 1)) == 0) {
+ uint64_t n_pkts = __builtin_popcountll(pkts_mask);
+ uint32_t i;
+
+ for (i = 0; i < n_pkts; i++) {
+ struct rte_mbuf *pkt = pkts[i];
+
+ process_one(p, pkt);
+ if (p->tx_buf_count >= p->tx_burst_sz)
+ send_burst(p);
+ }
+ } else {
+ for ( ; pkts_mask; ) {
+ uint32_t pkt_index = __builtin_ctzll(pkts_mask);
+ uint64_t pkt_mask = 1LLU << pkt_index;
+ struct rte_mbuf *pkt = pkts[pkt_index];
+
+ process_one(p, pkt);
+ if (p->tx_buf_count >= p->tx_burst_sz)
+ send_burst(p);
+
+ pkts_mask &= ~pkt_mask;
+ }
+ }
+
+ return 0;
+}
+
+static int
+rte_port_ring_writer_ipv4_ras_flush(void *port)
+{
+ struct rte_port_ring_writer_ipv4_ras *p =
+ (struct rte_port_ring_writer_ipv4_ras *) port;
+
+ if (p->tx_buf_count > 0)
+ send_burst(p);
+
+ return 0;
+}
+
+static int
+rte_port_ring_writer_ipv4_ras_free(void *port)
+{
+ struct rte_port_ring_writer_ipv4_ras *p =
+ (struct rte_port_ring_writer_ipv4_ras *) port;
+
+ if (port == NULL) {
+ RTE_LOG(ERR, PORT, "%s: Parameter port is NULL\n", __func__);
+ return -1;
+ }
+
+ rte_port_ring_writer_ipv4_ras_flush(port);
+ rte_ip_frag_table_destroy(p->frag_tbl);
+ rte_free(port);
+
+ return 0;
+}
+
+/*
+ * Summary of port operations
+ */
+struct rte_port_out_ops rte_port_ring_writer_ipv4_ras_ops = {
+ .f_create = rte_port_ring_writer_ipv4_ras_create,
+ .f_free = rte_port_ring_writer_ipv4_ras_free,
+ .f_tx = rte_port_ring_writer_ipv4_ras_tx,
+ .f_tx_bulk = rte_port_ring_writer_ipv4_ras_tx_bulk,
+ .f_flush = rte_port_ring_writer_ipv4_ras_flush,
+};
diff --git a/src/dpdk_lib18/librte_port/rte_port_ras.h b/src/dpdk_lib18/librte_port/rte_port_ras.h
new file mode 100755
index 00000000..c6ed688c
--- /dev/null
+++ b/src/dpdk_lib18/librte_port/rte_port_ras.h
@@ -0,0 +1,83 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_PORT_RAS_H__
+#define __INCLUDE_RTE_PORT_RAS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file
+ * RTE Port for IPv4 Reassembly
+ *
+ * This port is built on top of pre-initialized single producer rte_ring. In
+ * order to minimize the amount of packets stored in the ring at any given
+ * time, the IP reassembly functionality is executed on ring write operation,
+ * hence this port is implemented as an output port. A regular ring_reader port
+ * can be created to read from the same ring.
+ *
+ * The packets written to the ring are either complete IP datagrams or IP
+ * fragments. The packets read from the ring are all complete IP datagrams,
+ * either jumbo frames (i.e. IP packets with length bigger than MTU) or not.
+ * The complete IP datagrams written to the ring are not changed. The IP
+ * fragments written to the ring are first reassembled and into complete IP
+ * datagrams or dropped on error or IP reassembly time-out.
+ *
+ ***/
+
+#include <stdint.h>
+
+#include <rte_ring.h>
+
+#include "rte_port.h"
+
+/** ring_writer_ipv4_ras port parameters */
+struct rte_port_ring_writer_ipv4_ras_params {
+ /** Underlying single consumer ring that has to be pre-initialized. */
+ struct rte_ring *ring;
+
+ /** Recommended burst size to ring. The actual burst size can be bigger
+ or smaller than this value. */
+ uint32_t tx_burst_sz;
+};
+
+/** ring_writer_ipv4_ras port operations */
+extern struct rte_port_out_ops rte_port_ring_writer_ipv4_ras_ops;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_port/rte_port_ring.c b/src/dpdk_lib18/librte_port/rte_port_ring.c
new file mode 100755
index 00000000..fa3d77b4
--- /dev/null
+++ b/src/dpdk_lib18/librte_port/rte_port_ring.c
@@ -0,0 +1,237 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <string.h>
+
+#include <rte_mbuf.h>
+#include <rte_ring.h>
+#include <rte_malloc.h>
+
+#include "rte_port_ring.h"
+
+/*
+ * Port RING Reader
+ */
+struct rte_port_ring_reader {
+ struct rte_ring *ring;
+};
+
+static void *
+rte_port_ring_reader_create(void *params, int socket_id)
+{
+ struct rte_port_ring_reader_params *conf =
+ (struct rte_port_ring_reader_params *) params;
+ struct rte_port_ring_reader *port;
+
+ /* Check input parameters */
+ if (conf == NULL) {
+ RTE_LOG(ERR, PORT, "%s: params is NULL\n", __func__);
+ return NULL;
+ }
+
+ /* Memory allocation */
+ port = rte_zmalloc_socket("PORT", sizeof(*port),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (port == NULL) {
+ RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
+ return NULL;
+ }
+
+ /* Initialization */
+ port->ring = conf->ring;
+
+ return port;
+}
+
+static int
+rte_port_ring_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
+{
+ struct rte_port_ring_reader *p = (struct rte_port_ring_reader *) port;
+
+ return rte_ring_sc_dequeue_burst(p->ring, (void **) pkts, n_pkts);
+}
+
+static int
+rte_port_ring_reader_free(void *port)
+{
+ if (port == NULL) {
+ RTE_LOG(ERR, PORT, "%s: port is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ rte_free(port);
+
+ return 0;
+}
+
+/*
+ * Port RING Writer
+ */
+struct rte_port_ring_writer {
+ struct rte_mbuf *tx_buf[RTE_PORT_IN_BURST_SIZE_MAX];
+ struct rte_ring *ring;
+ uint32_t tx_burst_sz;
+ uint32_t tx_buf_count;
+};
+
+static void *
+rte_port_ring_writer_create(void *params, int socket_id)
+{
+ struct rte_port_ring_writer_params *conf =
+ (struct rte_port_ring_writer_params *) params;
+ struct rte_port_ring_writer *port;
+
+ /* Check input parameters */
+ if ((conf == NULL) ||
+ (conf->ring == NULL) ||
+ (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX)) {
+ RTE_LOG(ERR, PORT, "%s: Invalid Parameters\n", __func__);
+ return NULL;
+ }
+
+ /* Memory allocation */
+ port = rte_zmalloc_socket("PORT", sizeof(*port),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (port == NULL) {
+ RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
+ return NULL;
+ }
+
+ /* Initialization */
+ port->ring = conf->ring;
+ port->tx_burst_sz = conf->tx_burst_sz;
+ port->tx_buf_count = 0;
+
+ return port;
+}
+
+static inline void
+send_burst(struct rte_port_ring_writer *p)
+{
+ uint32_t nb_tx;
+
+ nb_tx = rte_ring_sp_enqueue_burst(p->ring, (void **)p->tx_buf,
+ p->tx_buf_count);
+
+ for ( ; nb_tx < p->tx_buf_count; nb_tx++)
+ rte_pktmbuf_free(p->tx_buf[nb_tx]);
+
+ p->tx_buf_count = 0;
+}
+
+static int
+rte_port_ring_writer_tx(void *port, struct rte_mbuf *pkt)
+{
+ struct rte_port_ring_writer *p = (struct rte_port_ring_writer *) port;
+
+ p->tx_buf[p->tx_buf_count++] = pkt;
+ if (p->tx_buf_count >= p->tx_burst_sz)
+ send_burst(p);
+
+ return 0;
+}
+
+static int
+rte_port_ring_writer_tx_bulk(void *port,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask)
+{
+ struct rte_port_ring_writer *p = (struct rte_port_ring_writer *) port;
+
+ if ((pkts_mask & (pkts_mask + 1)) == 0) {
+ uint64_t n_pkts = __builtin_popcountll(pkts_mask);
+ uint32_t i;
+
+ for (i = 0; i < n_pkts; i++) {
+ struct rte_mbuf *pkt = pkts[i];
+
+ p->tx_buf[p->tx_buf_count++] = pkt;
+ if (p->tx_buf_count >= p->tx_burst_sz)
+ send_burst(p);
+ }
+ } else {
+ for ( ; pkts_mask; ) {
+ uint32_t pkt_index = __builtin_ctzll(pkts_mask);
+ uint64_t pkt_mask = 1LLU << pkt_index;
+ struct rte_mbuf *pkt = pkts[pkt_index];
+
+ p->tx_buf[p->tx_buf_count++] = pkt;
+ if (p->tx_buf_count >= p->tx_burst_sz)
+ send_burst(p);
+ pkts_mask &= ~pkt_mask;
+ }
+ }
+
+ return 0;
+}
+
+static int
+rte_port_ring_writer_flush(void *port)
+{
+ struct rte_port_ring_writer *p = (struct rte_port_ring_writer *) port;
+
+ if (p->tx_buf_count > 0)
+ send_burst(p);
+
+ return 0;
+}
+
+static int
+rte_port_ring_writer_free(void *port)
+{
+ if (port == NULL) {
+ RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ rte_port_ring_writer_flush(port);
+ rte_free(port);
+
+ return 0;
+}
+
+/*
+ * Summary of port operations
+ */
+struct rte_port_in_ops rte_port_ring_reader_ops = {
+ .f_create = rte_port_ring_reader_create,
+ .f_free = rte_port_ring_reader_free,
+ .f_rx = rte_port_ring_reader_rx,
+};
+
+struct rte_port_out_ops rte_port_ring_writer_ops = {
+ .f_create = rte_port_ring_writer_create,
+ .f_free = rte_port_ring_writer_free,
+ .f_tx = rte_port_ring_writer_tx,
+ .f_tx_bulk = rte_port_ring_writer_tx_bulk,
+ .f_flush = rte_port_ring_writer_flush,
+};
diff --git a/src/dpdk_lib18/librte_port/rte_port_ring.h b/src/dpdk_lib18/librte_port/rte_port_ring.h
new file mode 100755
index 00000000..009dcf85
--- /dev/null
+++ b/src/dpdk_lib18/librte_port/rte_port_ring.h
@@ -0,0 +1,82 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_PORT_RING_H__
+#define __INCLUDE_RTE_PORT_RING_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file
+ * RTE Port Ring
+ *
+ * ring_reader: input port built on top of pre-initialized single consumer ring
+ * ring_writer: output port built on top of pre-initialized single producer ring
+ *
+ ***/
+
+#include <stdint.h>
+
+#include <rte_ring.h>
+
+#include "rte_port.h"
+
+/** ring_reader port parameters */
+struct rte_port_ring_reader_params {
+ /** Underlying single consumer ring that has to be pre-initialized */
+ struct rte_ring *ring;
+};
+
+/** ring_reader port operations */
+extern struct rte_port_in_ops rte_port_ring_reader_ops;
+
+/** ring_writer port parameters */
+struct rte_port_ring_writer_params {
+ /** Underlying single producer ring that has to be pre-initialized */
+ struct rte_ring *ring;
+
+ /** Recommended burst size to ring. The actual burst size can be
+ bigger or smaller than this value. */
+ uint32_t tx_burst_sz;
+};
+
+/** ring_writer port operations */
+extern struct rte_port_out_ops rte_port_ring_writer_ops;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_port/rte_port_sched.c b/src/dpdk_lib18/librte_port/rte_port_sched.c
new file mode 100755
index 00000000..2107f4c8
--- /dev/null
+++ b/src/dpdk_lib18/librte_port/rte_port_sched.c
@@ -0,0 +1,239 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <string.h>
+
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+
+#include "rte_port_sched.h"
+
+/*
+ * Reader
+ */
+struct rte_port_sched_reader {
+ struct rte_sched_port *sched;
+};
+
+static void *
+rte_port_sched_reader_create(void *params, int socket_id)
+{
+ struct rte_port_sched_reader_params *conf =
+ (struct rte_port_sched_reader_params *) params;
+ struct rte_port_sched_reader *port;
+
+ /* Check input parameters */
+ if ((conf == NULL) ||
+ (conf->sched == NULL)) {
+ RTE_LOG(ERR, PORT, "%s: Invalid params\n", __func__);
+ return NULL;
+ }
+
+ /* Memory allocation */
+ port = rte_zmalloc_socket("PORT", sizeof(*port),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (port == NULL) {
+ RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
+ return NULL;
+ }
+
+ /* Initialization */
+ port->sched = conf->sched;
+
+ return port;
+}
+
+static int
+rte_port_sched_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
+{
+ struct rte_port_sched_reader *p = (struct rte_port_sched_reader *) port;
+
+ return rte_sched_port_dequeue(p->sched, pkts, n_pkts);
+}
+
+static int
+rte_port_sched_reader_free(void *port)
+{
+ if (port == NULL) {
+ RTE_LOG(ERR, PORT, "%s: port is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ rte_free(port);
+
+ return 0;
+}
+
+/*
+ * Writer
+ */
+struct rte_port_sched_writer {
+ struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX];
+ struct rte_sched_port *sched;
+ uint32_t tx_burst_sz;
+ uint32_t tx_buf_count;
+ uint64_t bsz_mask;
+};
+
+static void *
+rte_port_sched_writer_create(void *params, int socket_id)
+{
+ struct rte_port_sched_writer_params *conf =
+ (struct rte_port_sched_writer_params *) params;
+ struct rte_port_sched_writer *port;
+
+ /* Check input parameters */
+ if ((conf == NULL) ||
+ (conf->sched == NULL) ||
+ (conf->tx_burst_sz == 0) ||
+ (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) ||
+ (!rte_is_power_of_2(conf->tx_burst_sz))) {
+ RTE_LOG(ERR, PORT, "%s: Invalid params\n", __func__);
+ return NULL;
+ }
+
+ /* Memory allocation */
+ port = rte_zmalloc_socket("PORT", sizeof(*port),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (port == NULL) {
+ RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
+ return NULL;
+ }
+
+ /* Initialization */
+ port->sched = conf->sched;
+ port->tx_burst_sz = conf->tx_burst_sz;
+ port->tx_buf_count = 0;
+ port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1);
+
+ return port;
+}
+
+static int
+rte_port_sched_writer_tx(void *port, struct rte_mbuf *pkt)
+{
+ struct rte_port_sched_writer *p = (struct rte_port_sched_writer *) port;
+
+ p->tx_buf[p->tx_buf_count++] = pkt;
+ if (p->tx_buf_count >= p->tx_burst_sz) {
+ rte_sched_port_enqueue(p->sched, p->tx_buf, p->tx_buf_count);
+ p->tx_buf_count = 0;
+ }
+
+ return 0;
+}
+
+static int
+rte_port_sched_writer_tx_bulk(void *port,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask)
+{
+ struct rte_port_sched_writer *p = (struct rte_port_sched_writer *) port;
+ uint32_t bsz_mask = p->bsz_mask;
+ uint32_t tx_buf_count = p->tx_buf_count;
+ uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
+ ((pkts_mask & bsz_mask) ^ bsz_mask);
+
+ if (expr == 0) {
+ uint64_t n_pkts = __builtin_popcountll(pkts_mask);
+
+ if (tx_buf_count) {
+ rte_sched_port_enqueue(p->sched, p->tx_buf,
+ tx_buf_count);
+ p->tx_buf_count = 0;
+ }
+
+ rte_sched_port_enqueue(p->sched, pkts, n_pkts);
+ } else {
+ for ( ; pkts_mask; ) {
+ uint32_t pkt_index = __builtin_ctzll(pkts_mask);
+ uint64_t pkt_mask = 1LLU << pkt_index;
+ struct rte_mbuf *pkt = pkts[pkt_index];
+
+ p->tx_buf[tx_buf_count++] = pkt;
+ pkts_mask &= ~pkt_mask;
+ }
+ p->tx_buf_count = tx_buf_count;
+
+ if (tx_buf_count >= p->tx_burst_sz) {
+ rte_sched_port_enqueue(p->sched, p->tx_buf,
+ tx_buf_count);
+ p->tx_buf_count = 0;
+ }
+ }
+
+ return 0;
+}
+
+static int
+rte_port_sched_writer_flush(void *port)
+{
+ struct rte_port_sched_writer *p = (struct rte_port_sched_writer *) port;
+
+ if (p->tx_buf_count) {
+ rte_sched_port_enqueue(p->sched, p->tx_buf, p->tx_buf_count);
+ p->tx_buf_count = 0;
+ }
+
+ return 0;
+}
+
+static int
+rte_port_sched_writer_free(void *port)
+{
+ if (port == NULL) {
+ RTE_LOG(ERR, PORT, "%s: port is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ rte_port_sched_writer_flush(port);
+ rte_free(port);
+
+ return 0;
+}
+
+/*
+ * Summary of port operations
+ */
+struct rte_port_in_ops rte_port_sched_reader_ops = {
+ .f_create = rte_port_sched_reader_create,
+ .f_free = rte_port_sched_reader_free,
+ .f_rx = rte_port_sched_reader_rx,
+};
+
+struct rte_port_out_ops rte_port_sched_writer_ops = {
+ .f_create = rte_port_sched_writer_create,
+ .f_free = rte_port_sched_writer_free,
+ .f_tx = rte_port_sched_writer_tx,
+ .f_tx_bulk = rte_port_sched_writer_tx_bulk,
+ .f_flush = rte_port_sched_writer_flush,
+};
diff --git a/src/dpdk_lib18/librte_port/rte_port_sched.h b/src/dpdk_lib18/librte_port/rte_port_sched.h
new file mode 100755
index 00000000..555415ab
--- /dev/null
+++ b/src/dpdk_lib18/librte_port/rte_port_sched.h
@@ -0,0 +1,82 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_PORT_SCHED_H__
+#define __INCLUDE_RTE_PORT_SCHED_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file
+ * RTE Port Hierarchical Scheduler
+ *
+ * sched_reader: input port built on top of pre-initialized rte_sched_port
+ * sched_writer: output port built on top of pre-initialized rte_sched_port
+ *
+ ***/
+
+#include <stdint.h>
+
+#include <rte_sched.h>
+
+#include "rte_port.h"
+
+/** sched_reader port parameters */
+struct rte_port_sched_reader_params {
+ /** Underlying pre-initialized rte_sched_port */
+ struct rte_sched_port *sched;
+};
+
+/** sched_reader port operations */
+extern struct rte_port_in_ops rte_port_sched_reader_ops;
+
+/** sched_writer port parameters */
+struct rte_port_sched_writer_params {
+ /** Underlying pre-initialized rte_sched_port */
+ struct rte_sched_port *sched;
+
+ /** Recommended burst size. The actual burst size can be bigger or
+ smaller than this value. */
+ uint32_t tx_burst_sz;
+};
+
+/** sched_writer port operations */
+extern struct rte_port_out_ops rte_port_sched_writer_ops;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_port/rte_port_source_sink.c b/src/dpdk_lib18/librte_port/rte_port_source_sink.c
new file mode 100755
index 00000000..b9a25bb0
--- /dev/null
+++ b/src/dpdk_lib18/librte_port/rte_port_source_sink.c
@@ -0,0 +1,158 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+
+#include "rte_port_source_sink.h"
+
+/*
+ * Port SOURCE
+ */
+struct rte_port_source {
+ struct rte_mempool *mempool;
+};
+
+static void *
+rte_port_source_create(void *params, int socket_id)
+{
+ struct rte_port_source_params *p =
+ (struct rte_port_source_params *) params;
+ struct rte_port_source *port;
+
+ /* Check input arguments*/
+ if ((p == NULL) || (p->mempool == NULL)) {
+ RTE_LOG(ERR, PORT, "%s: Invalid params\n", __func__);
+ return NULL;
+ }
+
+ /* Memory allocation */
+ port = rte_zmalloc_socket("PORT", sizeof(*port),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (port == NULL) {
+ RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
+ return NULL;
+ }
+
+ /* Initialization */
+ port->mempool = (struct rte_mempool *) p->mempool;
+
+ return port;
+}
+
+static int
+rte_port_source_free(void *port)
+{
+ /* Check input parameters */
+ if (port == NULL)
+ return 0;
+
+ rte_free(port);
+
+ return 0;
+}
+
+static int
+rte_port_source_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
+{
+ struct rte_port_source *p = (struct rte_port_source *) port;
+
+ if (rte_mempool_get_bulk(p->mempool, (void **) pkts, n_pkts) != 0)
+ return 0;
+
+ return n_pkts;
+}
+
+/*
+ * Port SINK
+ */
+static void *
+rte_port_sink_create(__rte_unused void *params, __rte_unused int socket_id)
+{
+ return (void *) 1;
+}
+
+static int
+rte_port_sink_tx(__rte_unused void *port, struct rte_mbuf *pkt)
+{
+ rte_pktmbuf_free(pkt);
+
+ return 0;
+}
+
+static int
+rte_port_sink_tx_bulk(__rte_unused void *port, struct rte_mbuf **pkts,
+ uint64_t pkts_mask)
+{
+ if ((pkts_mask & (pkts_mask + 1)) == 0) {
+ uint64_t n_pkts = __builtin_popcountll(pkts_mask);
+ uint32_t i;
+
+ for (i = 0; i < n_pkts; i++) {
+ struct rte_mbuf *pkt = pkts[i];
+
+ rte_pktmbuf_free(pkt);
+ }
+ } else {
+ for ( ; pkts_mask; ) {
+ uint32_t pkt_index = __builtin_ctzll(pkts_mask);
+ uint64_t pkt_mask = 1LLU << pkt_index;
+ struct rte_mbuf *pkt = pkts[pkt_index];
+
+ rte_pktmbuf_free(pkt);
+ pkts_mask &= ~pkt_mask;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Summary of port operations
+ */
+struct rte_port_in_ops rte_port_source_ops = {
+ .f_create = rte_port_source_create,
+ .f_free = rte_port_source_free,
+ .f_rx = rte_port_source_rx,
+};
+
+struct rte_port_out_ops rte_port_sink_ops = {
+ .f_create = rte_port_sink_create,
+ .f_free = NULL,
+ .f_tx = rte_port_sink_tx,
+ .f_tx_bulk = rte_port_sink_tx_bulk,
+ .f_flush = NULL,
+};
diff --git a/src/dpdk_lib18/librte_port/rte_port_source_sink.h b/src/dpdk_lib18/librte_port/rte_port_source_sink.h
new file mode 100755
index 00000000..0f9be799
--- /dev/null
+++ b/src/dpdk_lib18/librte_port/rte_port_source_sink.h
@@ -0,0 +1,70 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_PORT_SOURCE_SINK_H__
+#define __INCLUDE_RTE_PORT_SOURCE_SINK_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file
+ * RTE Port Source/Sink
+ *
+ * source: input port that can be used to generate packets
+ * sink: output port that drops all packets written to it
+ *
+ ***/
+
+#include "rte_port.h"
+
+/** source port parameters */
+struct rte_port_source_params {
+ /** Pre-initialized buffer pool */
+ struct rte_mempool *mempool;
+};
+
+/** source port operations */
+extern struct rte_port_in_ops rte_port_source_ops;
+
+/** sink port parameters: NONE */
+
+/** sink port operations */
+extern struct rte_port_out_ops rte_port_sink_ops;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_power/Makefile b/src/dpdk_lib18/librte_power/Makefile
new file mode 100755
index 00000000..d672a5a6
--- /dev/null
+++ b/src/dpdk_lib18/librte_power/Makefile
@@ -0,0 +1,49 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_power.a
+
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3 -fno-strict-aliasing
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_POWER) := rte_power.c rte_power_acpi_cpufreq.c
+SRCS-$(CONFIG_RTE_LIBRTE_POWER) += rte_power_kvm_vm.c guest_channel.c
+
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_POWER)-include := rte_power.h
+
+# this lib needs eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_POWER) += lib/librte_eal
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_power/channel_commands.h b/src/dpdk_lib18/librte_power/channel_commands.h
new file mode 100755
index 00000000..7e78a8b3
--- /dev/null
+++ b/src/dpdk_lib18/librte_power/channel_commands.h
@@ -0,0 +1,77 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CHANNEL_COMMANDS_H_
+#define CHANNEL_COMMANDS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/* Maximum number of CPUs */
+#define CHANNEL_CMDS_MAX_CPUS 64
+#if CHANNEL_CMDS_MAX_CPUS > 64
+#error Maximum number of cores is 64, overflow is guaranteed to \
+ cause problems with VM Power Management
+#endif
+
+/* Maximum number of channels per VM */
+#define CHANNEL_CMDS_MAX_VM_CHANNELS 64
+
+/* Maximum number of channels per VM */
+#define CHANNEL_CMDS_MAX_VM_CHANNELS 64
+
+/* Valid Commands */
+#define CPU_POWER 1
+#define CPU_POWER_CONNECT 2
+
+/* CPU Power Command Scaling */
+#define CPU_POWER_SCALE_UP 1
+#define CPU_POWER_SCALE_DOWN 2
+#define CPU_POWER_SCALE_MAX 3
+#define CPU_POWER_SCALE_MIN 4
+
+struct channel_packet {
+ uint64_t resource_id; /**< core_num, device */
+ uint32_t unit; /**< scale down/up/min/max */
+ uint32_t command; /**< Power, IO, etc */
+};
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* CHANNEL_COMMANDS_H_ */
diff --git a/src/dpdk_lib18/librte_power/guest_channel.c b/src/dpdk_lib18/librte_power/guest_channel.c
new file mode 100755
index 00000000..22956657
--- /dev/null
+++ b/src/dpdk_lib18/librte_power/guest_channel.c
@@ -0,0 +1,162 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <signal.h>
+#include <limits.h>
+#include <fcntl.h>
+#include <string.h>
+#include <errno.h>
+
+
+#include <rte_log.h>
+#include <rte_config.h>
+
+#include "guest_channel.h"
+#include "channel_commands.h"
+
+#define RTE_LOGTYPE_GUEST_CHANNEL RTE_LOGTYPE_USER1
+
+static int global_fds[RTE_MAX_LCORE];
+
+int
+guest_channel_host_connect(const char *path, unsigned lcore_id)
+{
+ int flags, ret;
+ struct channel_packet pkt;
+ char fd_path[PATH_MAX];
+ int fd = -1;
+
+ if (lcore_id >= RTE_MAX_LCORE) {
+ RTE_LOG(ERR, GUEST_CHANNEL, "Channel(%u) is out of range 0...%d\n",
+ lcore_id, RTE_MAX_LCORE-1);
+ return -1;
+ }
+ /* check if path is already open */
+ if (global_fds[lcore_id] != 0) {
+ RTE_LOG(ERR, GUEST_CHANNEL, "Channel(%u) is already open with fd %d\n",
+ lcore_id, global_fds[lcore_id]);
+ return -1;
+ }
+
+ snprintf(fd_path, PATH_MAX, "%s.%u", path, lcore_id);
+ RTE_LOG(INFO, GUEST_CHANNEL, "Opening channel '%s' for lcore %u\n",
+ fd_path, lcore_id);
+ fd = open(fd_path, O_RDWR);
+ if (fd < 0) {
+ RTE_LOG(ERR, GUEST_CHANNEL, "Unable to to connect to '%s' with error "
+ "%s\n", fd_path, strerror(errno));
+ return -1;
+ }
+
+ flags = fcntl(fd, F_GETFL, 0);
+ if (flags < 0) {
+ RTE_LOG(ERR, GUEST_CHANNEL, "Failed on fcntl get flags for file %s\n",
+ fd_path);
+ goto error;
+ }
+
+ flags |= O_NONBLOCK;
+ if (fcntl(fd, F_SETFL, flags) < 0) {
+ RTE_LOG(ERR, GUEST_CHANNEL, "Failed on setting non-blocking mode for "
+ "file %s", fd_path);
+ goto error;
+ }
+ /* QEMU needs a delay after connection */
+ sleep(1);
+
+ /* Send a test packet, this command is ignored by the host, but a successful
+ * send indicates that the host endpoint is monitoring.
+ */
+ pkt.command = CPU_POWER_CONNECT;
+ global_fds[lcore_id] = fd;
+ ret = guest_channel_send_msg(&pkt, lcore_id);
+ if (ret != 0) {
+ RTE_LOG(ERR, GUEST_CHANNEL, "Error on channel '%s' communications "
+ "test: %s\n", fd_path, strerror(ret));
+ goto error;
+ }
+ RTE_LOG(INFO, GUEST_CHANNEL, "Channel '%s' is now connected\n", fd_path);
+ return 0;
+error:
+ close(fd);
+ global_fds[lcore_id] = 0;
+ return -1;
+}
+
+int
+guest_channel_send_msg(struct channel_packet *pkt, unsigned lcore_id)
+{
+ int ret, buffer_len = sizeof(*pkt);
+ void *buffer = pkt;
+
+ if (lcore_id >= RTE_MAX_LCORE) {
+ RTE_LOG(ERR, GUEST_CHANNEL, "Channel(%u) is out of range 0...%d\n",
+ lcore_id, RTE_MAX_LCORE-1);
+ return -1;
+ }
+
+ if (global_fds[lcore_id] == 0) {
+ RTE_LOG(ERR, GUEST_CHANNEL, "Channel is not connected\n");
+ return -1;
+ }
+ while (buffer_len > 0) {
+ ret = write(global_fds[lcore_id], buffer, buffer_len);
+ if (ret == buffer_len)
+ return 0;
+ if (ret == -1) {
+ if (errno == EINTR)
+ continue;
+ return errno;
+ }
+ buffer = (char *)buffer + ret;
+ buffer_len -= ret;
+ }
+ return 0;
+}
+
+void
+guest_channel_host_disconnect(unsigned lcore_id)
+{
+ if (lcore_id >= RTE_MAX_LCORE) {
+ RTE_LOG(ERR, GUEST_CHANNEL, "Channel(%u) is out of range 0...%d\n",
+ lcore_id, RTE_MAX_LCORE-1);
+ return;
+ }
+ if (global_fds[lcore_id] == 0)
+ return;
+ close(global_fds[lcore_id]);
+ global_fds[lcore_id] = 0;
+}
diff --git a/src/dpdk_lib18/librte_power/guest_channel.h b/src/dpdk_lib18/librte_power/guest_channel.h
new file mode 100755
index 00000000..9e18af52
--- /dev/null
+++ b/src/dpdk_lib18/librte_power/guest_channel.h
@@ -0,0 +1,89 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _GUEST_CHANNEL_H
+#define _GUEST_CHANNEL_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <channel_commands.h>
+
+/**
+ * Connect to the Virtio-Serial VM end-point located in path. It is
+ * thread safe for unique lcore_ids. This function must be only called once from
+ * each lcore.
+ *
+ * @param path
+ * The path to the serial device on the filesystem
+ * @param lcore_id
+ * lcore_id.
+ *
+ * @return
+ * - 0 on success.
+ * - Negative on error.
+ */
+int guest_channel_host_connect(const char *path, unsigned lcore_id);
+
+/**
+ * Disconnect from an already connected Virtio-Serial Endpoint.
+ *
+ *
+ * @param lcore_id
+ * lcore_id.
+ *
+ */
+void guest_channel_host_disconnect(unsigned lcore_id);
+
+/**
+ * Send a message contained in pkt over the Virtio-Serial to the host endpoint.
+ *
+ * @param pkt
+ * Pointer to a populated struct guest_agent_pkt
+ *
+ * @param lcore_id
+ * lcore_id.
+ *
+ * @return
+ * - 0 on success.
+ * - Negative on channel not connected.
+ * - errno on write to channel error.
+ */
+int guest_channel_send_msg(struct channel_packet *pkt, unsigned lcore_id);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_power/rte_power.c b/src/dpdk_lib18/librte_power/rte_power.c
new file mode 100755
index 00000000..998ed1c9
--- /dev/null
+++ b/src/dpdk_lib18/librte_power/rte_power.c
@@ -0,0 +1,143 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_atomic.h>
+
+#include "rte_power.h"
+#include "rte_power_acpi_cpufreq.h"
+#include "rte_power_kvm_vm.h"
+#include "rte_power_common.h"
+
+enum power_management_env global_default_env = PM_ENV_NOT_SET;
+
+volatile uint32_t global_env_cfg_status = 0;
+
+/* function pointers */
+rte_power_freqs_t rte_power_freqs = NULL;
+rte_power_get_freq_t rte_power_get_freq = NULL;
+rte_power_set_freq_t rte_power_set_freq = NULL;
+rte_power_freq_change_t rte_power_freq_up = NULL;
+rte_power_freq_change_t rte_power_freq_down = NULL;
+rte_power_freq_change_t rte_power_freq_max = NULL;
+rte_power_freq_change_t rte_power_freq_min = NULL;
+
+int
+rte_power_set_env(enum power_management_env env)
+{
+ if (rte_atomic32_cmpset(&global_env_cfg_status, 0, 1) == 0) {
+ return 0;
+ }
+ if (env == PM_ENV_ACPI_CPUFREQ) {
+ rte_power_freqs = rte_power_acpi_cpufreq_freqs;
+ rte_power_get_freq = rte_power_acpi_cpufreq_get_freq;
+ rte_power_set_freq = rte_power_acpi_cpufreq_set_freq;
+ rte_power_freq_up = rte_power_acpi_cpufreq_freq_up;
+ rte_power_freq_down = rte_power_acpi_cpufreq_freq_down;
+ rte_power_freq_min = rte_power_acpi_cpufreq_freq_min;
+ rte_power_freq_max = rte_power_acpi_cpufreq_freq_max;
+ } else if (env == PM_ENV_KVM_VM) {
+ rte_power_freqs = rte_power_kvm_vm_freqs;
+ rte_power_get_freq = rte_power_kvm_vm_get_freq;
+ rte_power_set_freq = rte_power_kvm_vm_set_freq;
+ rte_power_freq_up = rte_power_kvm_vm_freq_up;
+ rte_power_freq_down = rte_power_kvm_vm_freq_down;
+ rte_power_freq_min = rte_power_kvm_vm_freq_min;
+ rte_power_freq_max = rte_power_kvm_vm_freq_max;
+ } else {
+ RTE_LOG(ERR, POWER, "Invalid Power Management Environment(%d) set\n",
+ env);
+ rte_power_unset_env();
+ return -1;
+ }
+ global_default_env = env;
+ return 0;
+
+}
+
+void
+rte_power_unset_env(void)
+{
+ if (rte_atomic32_cmpset(&global_env_cfg_status, 1, 0) != 0)
+ global_default_env = PM_ENV_NOT_SET;
+}
+
+enum power_management_env
+rte_power_get_env(void) {
+ return global_default_env;
+}
+
+int
+rte_power_init(unsigned lcore_id)
+{
+ int ret = -1;
+
+ if (global_default_env == PM_ENV_ACPI_CPUFREQ) {
+ return rte_power_acpi_cpufreq_init(lcore_id);
+ }
+ if (global_default_env == PM_ENV_KVM_VM) {
+ return rte_power_kvm_vm_init(lcore_id);
+ }
+ /* Auto detect Environment */
+ RTE_LOG(INFO, POWER, "Attempting to initialise ACPI cpufreq power "
+ "management...\n");
+ ret = rte_power_acpi_cpufreq_init(lcore_id);
+ if (ret == 0) {
+ rte_power_set_env(PM_ENV_ACPI_CPUFREQ);
+ goto out;
+ }
+
+ RTE_LOG(INFO, POWER, "Attempting to initialise VM power management...\n");
+ ret = rte_power_kvm_vm_init(lcore_id);
+ if (ret == 0) {
+ rte_power_set_env(PM_ENV_KVM_VM);
+ goto out;
+ }
+ RTE_LOG(ERR, POWER, "Unable to set Power Management Environment for lcore "
+ "%u\n", lcore_id);
+out:
+ return ret;
+}
+
+int
+rte_power_exit(unsigned lcore_id)
+{
+ if (global_default_env == PM_ENV_ACPI_CPUFREQ)
+ return rte_power_acpi_cpufreq_exit(lcore_id);
+ if (global_default_env == PM_ENV_KVM_VM)
+ return rte_power_kvm_vm_exit(lcore_id);
+
+ RTE_LOG(ERR, POWER, "Environment has not been set, unable to exit "
+ "gracefully\n");
+ return -1;
+
+}
diff --git a/src/dpdk_lib18/librte_power/rte_power.h b/src/dpdk_lib18/librte_power/rte_power.h
new file mode 100755
index 00000000..93380693
--- /dev/null
+++ b/src/dpdk_lib18/librte_power/rte_power.h
@@ -0,0 +1,251 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_POWER_H
+#define _RTE_POWER_H
+
+/**
+ * @file
+ * RTE Power Management
+ */
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_string_fns.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Power Management Environment State */
+enum power_management_env {PM_ENV_NOT_SET, PM_ENV_ACPI_CPUFREQ, PM_ENV_KVM_VM};
+
+/**
+ * Set the default power management implementation. If this is not called prior
+ * to rte_power_init(), then auto-detect of the environment will take place.
+ * It is not thread safe.
+ *
+ * @param env
+ * env. The environment in which to initialise Power Management for.
+ *
+ * @return
+ * - 0 on success.
+ * - Negative on error.
+ */
+int rte_power_set_env(enum power_management_env env);
+
+/**
+ * Unset the global environment configuration.
+ * This can only be called after all threads have completed.
+ *
+ * @param None.
+ *
+ * @return
+ * None.
+ */
+void rte_power_unset_env(void);
+
+/**
+ * Get the default power management implementation.
+ *
+ * @param None.
+ *
+ * @return
+ * power_management_env The configured environment.
+ */
+enum power_management_env rte_power_get_env(void);
+
+/**
+ * Initialize power management for a specific lcore. If rte_power_set_env() has
+ * not been called then an auto-detect of the environment will start and
+ * initialise the corresponding resources.
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * - 0 on success.
+ * - Negative on error.
+ */
+int rte_power_init(unsigned lcore_id);
+
+/**
+ * Exit power management on a specific lcore. This will call the environment
+ * dependent exit function.
+ *
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * - 0 on success.
+ * - Negative on error.
+ */
+int rte_power_exit(unsigned lcore_id);
+
+/**
+ * Get the available frequencies of a specific lcore.
+ * Function pointer definition. Review each environments
+ * specific documentation for usage.
+ *
+ * @param lcore_id
+ * lcore id.
+ * @param freqs
+ * The buffer array to save the frequencies.
+ * @param num
+ * The number of frequencies to get.
+ *
+ * @return
+ * The number of available frequencies.
+ */
+typedef uint32_t (*rte_power_freqs_t)(unsigned lcore_id, uint32_t *freqs,
+ uint32_t num);
+
+extern rte_power_freqs_t rte_power_freqs;
+
+/**
+ * Return the current index of available frequencies of a specific lcore.
+ * Function pointer definition. Review each environments
+ * specific documentation for usage.
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * The current index of available frequencies.
+ */
+typedef uint32_t (*rte_power_get_freq_t)(unsigned lcore_id);
+
+extern rte_power_get_freq_t rte_power_get_freq;
+
+/**
+ * Set the new frequency for a specific lcore by indicating the index of
+ * available frequencies.
+ * Function pointer definition. Review each environments
+ * specific documentation for usage.
+ *
+ * @param lcore_id
+ * lcore id.
+ * @param index
+ * The index of available frequencies.
+ *
+ * @return
+ * - 1 on success with frequency changed.
+ * - 0 on success without frequency changed.
+ * - Negative on error.
+ */
+typedef int (*rte_power_set_freq_t)(unsigned lcore_id, uint32_t index);
+
+extern rte_power_set_freq_t rte_power_set_freq;
+
+/**
+ * Function pointer definition for generic frequency change functions. Review
+ * each environments specific documentation for usage.
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * - 1 on success with frequency changed.
+ * - 0 on success without frequency changed.
+ * - Negative on error.
+ */
+typedef int (*rte_power_freq_change_t)(unsigned lcore_id);
+
+/**
+ * Scale up the frequency of a specific lcore according to the available
+ * frequencies.
+ * Review each environments specific documentation for usage.
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * - 1 on success with frequency changed.
+ * - 0 on success without frequency changed.
+ * - Negative on error.
+ */
+extern rte_power_freq_change_t rte_power_freq_up;
+
+/**
+ * Scale down the frequency of a specific lcore according to the available
+ * frequencies.
+ * Review each environments specific documentation for usage.
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * - 1 on success with frequency changed.
+ * - 0 on success without frequency changed.
+ * - Negative on error.
+ */
+
+extern rte_power_freq_change_t rte_power_freq_down;
+
+/**
+ * Scale up the frequency of a specific lcore to the highest according to the
+ * available frequencies.
+ * Review each environments specific documentation for usage.
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * - 1 on success with frequency changed.
+ * - 0 on success without frequency changed.
+ * - Negative on error.
+ */
+extern rte_power_freq_change_t rte_power_freq_max;
+
+/**
+ * Scale down the frequency of a specific lcore to the lowest according to the
+ * available frequencies.
+ * Review each environments specific documentation for usage..
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * - 1 on success with frequency changed.
+ * - 0 on success without frequency changed.
+ * - Negative on error.
+ */
+rte_power_freq_change_t rte_power_freq_min;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_power/rte_power_acpi_cpufreq.c b/src/dpdk_lib18/librte_power/rte_power_acpi_cpufreq.c
new file mode 100755
index 00000000..a56c9b59
--- /dev/null
+++ b/src/dpdk_lib18/librte_power/rte_power_acpi_cpufreq.c
@@ -0,0 +1,545 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <signal.h>
+#include <limits.h>
+
+#include <rte_memcpy.h>
+#include <rte_atomic.h>
+
+#include "rte_power_acpi_cpufreq.h"
+#include "rte_power_common.h"
+
+#ifdef RTE_LIBRTE_POWER_DEBUG
+#define POWER_DEBUG_TRACE(fmt, args...) do { \
+ RTE_LOG(ERR, POWER, "%s: " fmt, __func__, ## args); \
+} while (0)
+#else
+#define POWER_DEBUG_TRACE(fmt, args...)
+#endif
+
+#define FOPEN_OR_ERR_RET(f, retval) do { \
+ if ((f) == NULL) { \
+ RTE_LOG(ERR, POWER, "File not openned\n"); \
+ return retval; \
+ } \
+} while (0)
+
+#define FOPS_OR_NULL_GOTO(ret, label) do { \
+ if ((ret) == NULL) { \
+ RTE_LOG(ERR, POWER, "fgets returns nothing\n"); \
+ goto label; \
+ } \
+} while (0)
+
+#define FOPS_OR_ERR_GOTO(ret, label) do { \
+ if ((ret) < 0) { \
+ RTE_LOG(ERR, POWER, "File operations failed\n"); \
+ goto label; \
+ } \
+} while (0)
+
+#define STR_SIZE 1024
+#define POWER_CONVERT_TO_DECIMAL 10
+
+#define POWER_GOVERNOR_USERSPACE "userspace"
+#define POWER_SYSFILE_GOVERNOR \
+ "/sys/devices/system/cpu/cpu%u/cpufreq/scaling_governor"
+#define POWER_SYSFILE_AVAIL_FREQ \
+ "/sys/devices/system/cpu/cpu%u/cpufreq/scaling_available_frequencies"
+#define POWER_SYSFILE_SETSPEED \
+ "/sys/devices/system/cpu/cpu%u/cpufreq/scaling_setspeed"
+
+enum power_state {
+ POWER_IDLE = 0,
+ POWER_ONGOING,
+ POWER_USED,
+ POWER_UNKNOWN
+};
+
+/**
+ * Power info per lcore.
+ */
+struct rte_power_info {
+ unsigned lcore_id; /**< Logical core id */
+ uint32_t freqs[RTE_MAX_LCORE_FREQS]; /**< Frequency array */
+ uint32_t nb_freqs; /**< number of available freqs */
+ FILE *f; /**< FD of scaling_setspeed */
+ char governor_ori[32]; /**< Original governor name */
+ uint32_t curr_idx; /**< Freq index in freqs array */
+ volatile uint32_t state; /**< Power in use state */
+} __rte_cache_aligned;
+
+static struct rte_power_info lcore_power_info[RTE_MAX_LCORE];
+
+/**
+ * It is to set specific freq for specific logical core, according to the index
+ * of supported frequencies.
+ */
+static int
+set_freq_internal(struct rte_power_info *pi, uint32_t idx)
+{
+ if (idx >= RTE_MAX_LCORE_FREQS || idx >= pi->nb_freqs) {
+ RTE_LOG(ERR, POWER, "Invalid frequency index %u, which "
+ "should be less than %u\n", idx, pi->nb_freqs);
+ return -1;
+ }
+
+ /* Check if it is the same as current */
+ if (idx == pi->curr_idx)
+ return 0;
+
+ POWER_DEBUG_TRACE("Freqency[%u] %u to be set for lcore %u\n",
+ idx, pi->freqs[idx], pi->lcore_id);
+ if (fseek(pi->f, 0, SEEK_SET) < 0) {
+ RTE_LOG(ERR, POWER, "Fail to set file position indicator to 0 "
+ "for setting frequency for lcore %u\n", pi->lcore_id);
+ return -1;
+ }
+ if (fprintf(pi->f, "%u", pi->freqs[idx]) < 0) {
+ RTE_LOG(ERR, POWER, "Fail to write new frequency for "
+ "lcore %u\n", pi->lcore_id);
+ return -1;
+ }
+ fflush(pi->f);
+ pi->curr_idx = idx;
+
+ return 1;
+}
+
+/**
+ * It is to check the current scaling governor by reading sys file, and then
+ * set it into 'userspace' if it is not by writing the sys file. The original
+ * governor will be saved for rolling back.
+ */
+static int
+power_set_governor_userspace(struct rte_power_info *pi)
+{
+ FILE *f;
+ int ret = -1;
+ char buf[BUFSIZ];
+ char fullpath[PATH_MAX];
+ char *s;
+ int val;
+
+ snprintf(fullpath, sizeof(fullpath), POWER_SYSFILE_GOVERNOR,
+ pi->lcore_id);
+ f = fopen(fullpath, "rw+");
+ FOPEN_OR_ERR_RET(f, ret);
+
+ s = fgets(buf, sizeof(buf), f);
+ FOPS_OR_NULL_GOTO(s, out);
+
+ /* Check if current governor is userspace */
+ if (strncmp(buf, POWER_GOVERNOR_USERSPACE,
+ sizeof(POWER_GOVERNOR_USERSPACE)) == 0) {
+ ret = 0;
+ POWER_DEBUG_TRACE("Power management governor of lcore %u is "
+ "already userspace\n", pi->lcore_id);
+ goto out;
+ }
+ /* Save the original governor */
+ snprintf(pi->governor_ori, sizeof(pi->governor_ori), "%s", buf);
+
+ /* Write 'userspace' to the governor */
+ val = fseek(f, 0, SEEK_SET);
+ FOPS_OR_ERR_GOTO(val, out);
+
+ val = fputs(POWER_GOVERNOR_USERSPACE, f);
+ FOPS_OR_ERR_GOTO(val, out);
+
+ ret = 0;
+ RTE_LOG(INFO, POWER, "Power management governor of lcore %u has been "
+ "set to user space successfully\n", pi->lcore_id);
+out:
+ fclose(f);
+
+ return ret;
+}
+
+/**
+ * It is to get the available frequencies of the specific lcore by reading the
+ * sys file.
+ */
+static int
+power_get_available_freqs(struct rte_power_info *pi)
+{
+ FILE *f;
+ int ret = -1, i, count;
+ char *p;
+ char buf[BUFSIZ];
+ char fullpath[PATH_MAX];
+ char *freqs[RTE_MAX_LCORE_FREQS];
+ char *s;
+
+ snprintf(fullpath, sizeof(fullpath), POWER_SYSFILE_AVAIL_FREQ,
+ pi->lcore_id);
+ f = fopen(fullpath, "r");
+ FOPEN_OR_ERR_RET(f, ret);
+
+ s = fgets(buf, sizeof(buf), f);
+ FOPS_OR_NULL_GOTO(s, out);
+
+ /* Strip the line break if there is */
+ p = strchr(buf, '\n');
+ if (p != NULL)
+ *p = 0;
+
+ /* Split string into at most RTE_MAX_LCORE_FREQS frequencies */
+ count = rte_strsplit(buf, sizeof(buf), freqs,
+ RTE_MAX_LCORE_FREQS, ' ');
+ if (count <= 0) {
+ RTE_LOG(ERR, POWER, "No available frequency in "
+ ""POWER_SYSFILE_AVAIL_FREQ"\n", pi->lcore_id);
+ goto out;
+ }
+ if (count >= RTE_MAX_LCORE_FREQS) {
+ RTE_LOG(ERR, POWER, "Too many available frequencies : %d\n",
+ count);
+ goto out;
+ }
+
+ /* Store the available frequncies into power context */
+ for (i = 0, pi->nb_freqs = 0; i < count; i++) {
+ POWER_DEBUG_TRACE("Lcore %u frequency[%d]: %s\n", pi->lcore_id,
+ i, freqs[i]);
+ pi->freqs[pi->nb_freqs++] = strtoul(freqs[i], &p,
+ POWER_CONVERT_TO_DECIMAL);
+ }
+
+ ret = 0;
+ POWER_DEBUG_TRACE("%d frequencie(s) of lcore %u are available\n",
+ count, pi->lcore_id);
+out:
+ fclose(f);
+
+ return ret;
+}
+
+/**
+ * It is to fopen the sys file for the future setting the lcore frequency.
+ */
+static int
+power_init_for_setting_freq(struct rte_power_info *pi)
+{
+ FILE *f;
+ char fullpath[PATH_MAX];
+ char buf[BUFSIZ];
+ uint32_t i, freq;
+ char *s;
+
+ snprintf(fullpath, sizeof(fullpath), POWER_SYSFILE_SETSPEED,
+ pi->lcore_id);
+ f = fopen(fullpath, "rw+");
+ FOPEN_OR_ERR_RET(f, -1);
+
+ s = fgets(buf, sizeof(buf), f);
+ FOPS_OR_NULL_GOTO(s, out);
+
+ freq = strtoul(buf, NULL, POWER_CONVERT_TO_DECIMAL);
+ for (i = 0; i < pi->nb_freqs; i++) {
+ if (freq == pi->freqs[i]) {
+ pi->curr_idx = i;
+ pi->f = f;
+ return 0;
+ }
+ }
+
+out:
+ fclose(f);
+
+ return -1;
+}
+
+int
+rte_power_acpi_cpufreq_init(unsigned lcore_id)
+{
+ struct rte_power_info *pi;
+
+ if (lcore_id >= RTE_MAX_LCORE) {
+ RTE_LOG(ERR, POWER, "Lcore id %u can not exceeds %u\n",
+ lcore_id, RTE_MAX_LCORE - 1U);
+ return -1;
+ }
+
+ pi = &lcore_power_info[lcore_id];
+ if (rte_atomic32_cmpset(&(pi->state), POWER_IDLE, POWER_ONGOING)
+ == 0) {
+ RTE_LOG(INFO, POWER, "Power management of lcore %u is "
+ "in use\n", lcore_id);
+ return -1;
+ }
+
+ pi->lcore_id = lcore_id;
+ /* Check and set the governor */
+ if (power_set_governor_userspace(pi) < 0) {
+ RTE_LOG(ERR, POWER, "Cannot set governor of lcore %u to "
+ "userspace\n", lcore_id);
+ goto fail;
+ }
+
+ /* Get the available frequencies */
+ if (power_get_available_freqs(pi) < 0) {
+ RTE_LOG(ERR, POWER, "Cannot get available frequencies of "
+ "lcore %u\n", lcore_id);
+ goto fail;
+ }
+
+ /* Init for setting lcore frequency */
+ if (power_init_for_setting_freq(pi) < 0) {
+ RTE_LOG(ERR, POWER, "Cannot init for setting frequency for "
+ "lcore %u\n", lcore_id);
+ goto fail;
+ }
+
+ /* Set freq to max by default */
+ if (rte_power_acpi_cpufreq_freq_max(lcore_id) < 0) {
+ RTE_LOG(ERR, POWER, "Cannot set frequency of lcore %u "
+ "to max\n", lcore_id);
+ goto fail;
+ }
+
+ RTE_LOG(INFO, POWER, "Initialized successfully for lcore %u "
+ "power manamgement\n", lcore_id);
+ rte_atomic32_cmpset(&(pi->state), POWER_ONGOING, POWER_USED);
+
+ return 0;
+
+fail:
+ rte_atomic32_cmpset(&(pi->state), POWER_ONGOING, POWER_UNKNOWN);
+
+ return -1;
+}
+
+/**
+ * It is to check the governor and then set the original governor back if
+ * needed by writing the the sys file.
+ */
+static int
+power_set_governor_original(struct rte_power_info *pi)
+{
+ FILE *f;
+ int ret = -1;
+ char buf[BUFSIZ];
+ char fullpath[PATH_MAX];
+ char *s;
+ int val;
+
+ snprintf(fullpath, sizeof(fullpath), POWER_SYSFILE_GOVERNOR,
+ pi->lcore_id);
+ f = fopen(fullpath, "rw+");
+ FOPEN_OR_ERR_RET(f, ret);
+
+ s = fgets(buf, sizeof(buf), f);
+ FOPS_OR_NULL_GOTO(s, out);
+
+ /* Check if the governor to be set is the same as current */
+ if (strncmp(buf, pi->governor_ori, sizeof(pi->governor_ori)) == 0) {
+ ret = 0;
+ POWER_DEBUG_TRACE("Power management governor of lcore %u "
+ "has already been set to %s\n",
+ pi->lcore_id, pi->governor_ori);
+ goto out;
+ }
+
+ /* Write back the original governor */
+ val = fseek(f, 0, SEEK_SET);
+ FOPS_OR_ERR_GOTO(val, out);
+
+ val = fputs(pi->governor_ori, f);
+ FOPS_OR_ERR_GOTO(val, out);
+
+ ret = 0;
+ RTE_LOG(INFO, POWER, "Power management governor of lcore %u "
+ "has been set back to %s successfully\n",
+ pi->lcore_id, pi->governor_ori);
+out:
+ fclose(f);
+
+ return ret;
+}
+
+int
+rte_power_acpi_cpufreq_exit(unsigned lcore_id)
+{
+ struct rte_power_info *pi;
+
+ if (lcore_id >= RTE_MAX_LCORE) {
+ RTE_LOG(ERR, POWER, "Lcore id %u can not exceeds %u\n",
+ lcore_id, RTE_MAX_LCORE - 1U);
+ return -1;
+ }
+ pi = &lcore_power_info[lcore_id];
+ if (rte_atomic32_cmpset(&(pi->state), POWER_USED, POWER_ONGOING)
+ == 0) {
+ RTE_LOG(INFO, POWER, "Power management of lcore %u is "
+ "not used\n", lcore_id);
+ return -1;
+ }
+
+ /* Close FD of setting freq */
+ fclose(pi->f);
+ pi->f = NULL;
+
+ /* Set the governor back to the original */
+ if (power_set_governor_original(pi) < 0) {
+ RTE_LOG(ERR, POWER, "Cannot set the governor of %u back "
+ "to the original\n", lcore_id);
+ goto fail;
+ }
+
+ RTE_LOG(INFO, POWER, "Power management of lcore %u has exited from "
+ "'userspace' mode and been set back to the "
+ "original\n", lcore_id);
+ rte_atomic32_cmpset(&(pi->state), POWER_ONGOING, POWER_IDLE);
+
+ return 0;
+
+fail:
+ rte_atomic32_cmpset(&(pi->state), POWER_ONGOING, POWER_UNKNOWN);
+
+ return -1;
+}
+
+uint32_t
+rte_power_acpi_cpufreq_freqs(unsigned lcore_id, uint32_t *freqs, uint32_t num)
+{
+ struct rte_power_info *pi;
+
+ if (lcore_id >= RTE_MAX_LCORE || !freqs) {
+ RTE_LOG(ERR, POWER, "Invalid input parameter\n");
+ return 0;
+ }
+
+ pi = &lcore_power_info[lcore_id];
+ if (num < pi->nb_freqs) {
+ RTE_LOG(ERR, POWER, "Buffer size is not enough\n");
+ return 0;
+ }
+ rte_memcpy(freqs, pi->freqs, pi->nb_freqs * sizeof(uint32_t));
+
+ return pi->nb_freqs;
+}
+
+uint32_t
+rte_power_acpi_cpufreq_get_freq(unsigned lcore_id)
+{
+ if (lcore_id >= RTE_MAX_LCORE) {
+ RTE_LOG(ERR, POWER, "Invalid lcore ID\n");
+ return RTE_POWER_INVALID_FREQ_INDEX;
+ }
+
+ return lcore_power_info[lcore_id].curr_idx;
+}
+
+int
+rte_power_acpi_cpufreq_set_freq(unsigned lcore_id, uint32_t index)
+{
+ if (lcore_id >= RTE_MAX_LCORE) {
+ RTE_LOG(ERR, POWER, "Invalid lcore ID\n");
+ return -1;
+ }
+
+ return set_freq_internal(&(lcore_power_info[lcore_id]), index);
+}
+
+int
+rte_power_acpi_cpufreq_freq_down(unsigned lcore_id)
+{
+ struct rte_power_info *pi;
+
+ if (lcore_id >= RTE_MAX_LCORE) {
+ RTE_LOG(ERR, POWER, "Invalid lcore ID\n");
+ return -1;
+ }
+
+ pi = &lcore_power_info[lcore_id];
+ if (pi->curr_idx + 1 == pi->nb_freqs)
+ return 0;
+
+ /* Frequencies in the array are from high to low. */
+ return set_freq_internal(pi, pi->curr_idx + 1);
+}
+
+int
+rte_power_acpi_cpufreq_freq_up(unsigned lcore_id)
+{
+ struct rte_power_info *pi;
+
+ if (lcore_id >= RTE_MAX_LCORE) {
+ RTE_LOG(ERR, POWER, "Invalid lcore ID\n");
+ return -1;
+ }
+
+ pi = &lcore_power_info[lcore_id];
+ if (pi->curr_idx == 0)
+ return 0;
+
+ /* Frequencies in the array are from high to low. */
+ return set_freq_internal(pi, pi->curr_idx - 1);
+}
+
+int
+rte_power_acpi_cpufreq_freq_max(unsigned lcore_id)
+{
+ if (lcore_id >= RTE_MAX_LCORE) {
+ RTE_LOG(ERR, POWER, "Invalid lcore ID\n");
+ return -1;
+ }
+
+ /* Frequencies in the array are from high to low. */
+ return set_freq_internal(&lcore_power_info[lcore_id], 0);
+}
+
+int
+rte_power_acpi_cpufreq_freq_min(unsigned lcore_id)
+{
+ struct rte_power_info *pi;
+
+ if (lcore_id >= RTE_MAX_LCORE) {
+ RTE_LOG(ERR, POWER, "Invalid lcore ID\n");
+ return -1;
+ }
+
+ pi = &lcore_power_info[lcore_id];
+
+ /* Frequencies in the array are from high to low. */
+ return set_freq_internal(pi, pi->nb_freqs - 1);
+}
diff --git a/src/dpdk_lib18/librte_power/rte_power_acpi_cpufreq.h b/src/dpdk_lib18/librte_power/rte_power_acpi_cpufreq.h
new file mode 100755
index 00000000..68578e9b
--- /dev/null
+++ b/src/dpdk_lib18/librte_power/rte_power_acpi_cpufreq.h
@@ -0,0 +1,192 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_POWER_ACPI_CPUFREQ_H
+#define _RTE_POWER_ACPI_CPUFREQ_H
+
+/**
+ * @file
+ * RTE Power Management via userspace ACPI cpufreq
+ */
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_string_fns.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Initialize power management for a specific lcore. It will check and set the
+ * governor to userspace for the lcore, get the available frequencies, and
+ * prepare to set new lcore frequency.
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * - 0 on success.
+ * - Negative on error.
+ */
+int rte_power_acpi_cpufreq_init(unsigned lcore_id);
+
+/**
+ * Exit power management on a specific lcore. It will set the governor to which
+ * is before initialized.
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * - 0 on success.
+ * - Negative on error.
+ */
+int rte_power_acpi_cpufreq_exit(unsigned lcore_id);
+
+/**
+ * Get the available frequencies of a specific lcore. The return value will be
+ * the minimal one of the total number of available frequencies and the number
+ * of buffer. The index of available frequencies used in other interfaces
+ * should be in the range of 0 to this return value.
+ * It should be protected outside of this function for threadsafe.
+ *
+ * @param lcore_id
+ * lcore id.
+ * @param freqs
+ * The buffer array to save the frequencies.
+ * @param num
+ * The number of frequencies to get.
+ *
+ * @return
+ * The number of available frequencies.
+ */
+uint32_t rte_power_acpi_cpufreq_freqs(unsigned lcore_id, uint32_t *freqs,
+ uint32_t num);
+
+/**
+ * Return the current index of available frequencies of a specific lcore. It
+ * will return 'RTE_POWER_INVALID_FREQ_INDEX = (~0)' if error.
+ * It should be protected outside of this function for threadsafe.
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * The current index of available frequencies.
+ */
+uint32_t rte_power_acpi_cpufreq_get_freq(unsigned lcore_id);
+
+/**
+ * Set the new frequency for a specific lcore by indicating the index of
+ * available frequencies.
+ * It should be protected outside of this function for threadsafe.
+ *
+ * @param lcore_id
+ * lcore id.
+ * @param index
+ * The index of available frequencies.
+ *
+ * @return
+ * - 1 on success with frequency changed.
+ * - 0 on success without frequency changed.
+ * - Negative on error.
+ */
+int rte_power_acpi_cpufreq_set_freq(unsigned lcore_id, uint32_t index);
+
+/**
+ * Scale up the frequency of a specific lcore according to the available
+ * frequencies.
+ * It should be protected outside of this function for threadsafe.
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * - 1 on success with frequency changed.
+ * - 0 on success without frequency changed.
+ * - Negative on error.
+ */
+int rte_power_acpi_cpufreq_freq_up(unsigned lcore_id);
+
+/**
+ * Scale down the frequency of a specific lcore according to the available
+ * frequencies.
+ * It should be protected outside of this function for threadsafe.
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * - 1 on success with frequency changed.
+ * - 0 on success without frequency changed.
+ * - Negative on error.
+ */
+int rte_power_acpi_cpufreq_freq_down(unsigned lcore_id);
+
+/**
+ * Scale up the frequency of a specific lcore to the highest according to the
+ * available frequencies.
+ * It should be protected outside of this function for threadsafe.
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * - 1 on success with frequency changed.
+ * - 0 on success without frequency changed.
+ * - Negative on error.
+ */
+int rte_power_acpi_cpufreq_freq_max(unsigned lcore_id);
+
+/**
+ * Scale down the frequency of a specific lcore to the lowest according to the
+ * available frequencies.
+ * It should be protected outside of this function for threadsafe.
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * - 1 on success with frequency changed.
+ * - 0 on success without frequency chnaged.
+ * - Negative on error.
+ */
+int rte_power_acpi_cpufreq_freq_min(unsigned lcore_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_power/rte_power_common.h b/src/dpdk_lib18/librte_power/rte_power_common.h
new file mode 100755
index 00000000..64bd168f
--- /dev/null
+++ b/src/dpdk_lib18/librte_power/rte_power_common.h
@@ -0,0 +1,39 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RTE_POWER_COMMON_H_
+#define RTE_POWER_COMMON_H_
+
+#define RTE_POWER_INVALID_FREQ_INDEX (~0)
+
+#endif /* RTE_POWER_COMMON_H_ */
diff --git a/src/dpdk_lib18/librte_power/rte_power_kvm_vm.c b/src/dpdk_lib18/librte_power/rte_power_kvm_vm.c
new file mode 100755
index 00000000..11596c39
--- /dev/null
+++ b/src/dpdk_lib18/librte_power/rte_power_kvm_vm.c
@@ -0,0 +1,136 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <errno.h>
+#include <string.h>
+
+#include <rte_log.h>
+#include <rte_config.h>
+
+#include "guest_channel.h"
+#include "channel_commands.h"
+#include "rte_power_kvm_vm.h"
+#include "rte_power_common.h"
+
+#define FD_PATH "/dev/virtio-ports/virtio.serial.port.poweragent"
+
+static struct channel_packet pkt[CHANNEL_CMDS_MAX_VM_CHANNELS];
+
+
+int
+rte_power_kvm_vm_init(unsigned lcore_id)
+{
+ if (lcore_id >= CHANNEL_CMDS_MAX_VM_CHANNELS) {
+ RTE_LOG(ERR, POWER, "Core(%u) is out of range 0...%d\n",
+ lcore_id, CHANNEL_CMDS_MAX_VM_CHANNELS-1);
+ return -1;
+ }
+ pkt[lcore_id].command = CPU_POWER;
+ pkt[lcore_id].resource_id = lcore_id;
+ return guest_channel_host_connect(FD_PATH, lcore_id);
+}
+
+int
+rte_power_kvm_vm_exit(unsigned lcore_id)
+{
+ guest_channel_host_disconnect(lcore_id);
+ return 0;
+}
+
+uint32_t
+rte_power_kvm_vm_freqs(__attribute__((unused)) unsigned lcore_id,
+ __attribute__((unused)) uint32_t *freqs,
+ __attribute__((unused)) uint32_t num)
+{
+ RTE_LOG(ERR, POWER, "rte_power_freqs is not implemented "
+ "for Virtual Machine Power Management\n");
+ return -ENOTSUP;
+}
+
+uint32_t
+rte_power_kvm_vm_get_freq(__attribute__((unused)) unsigned lcore_id)
+{
+ RTE_LOG(ERR, POWER, "rte_power_get_freq is not implemented "
+ "for Virtual Machine Power Management\n");
+ return -ENOTSUP;
+}
+
+int
+rte_power_kvm_vm_set_freq(__attribute__((unused)) unsigned lcore_id,
+ __attribute__((unused)) uint32_t index)
+{
+ RTE_LOG(ERR, POWER, "rte_power_set_freq is not implemented "
+ "for Virtual Machine Power Management\n");
+ return -ENOTSUP;
+}
+
+static inline int
+send_msg(unsigned lcore_id, uint32_t scale_direction)
+{
+ int ret;
+
+ if (lcore_id >= CHANNEL_CMDS_MAX_VM_CHANNELS) {
+ RTE_LOG(ERR, POWER, "Core(%u) is out of range 0...%d\n",
+ lcore_id, CHANNEL_CMDS_MAX_VM_CHANNELS-1);
+ return -1;
+ }
+ pkt[lcore_id].unit = scale_direction;
+ ret = guest_channel_send_msg(&pkt[lcore_id], lcore_id);
+ if (ret == 0)
+ return 1;
+ RTE_LOG(DEBUG, POWER, "Error sending message: %s\n", strerror(ret));
+ return -1;
+}
+
+int
+rte_power_kvm_vm_freq_up(unsigned lcore_id)
+{
+ return send_msg(lcore_id, CPU_POWER_SCALE_UP);
+}
+
+int
+rte_power_kvm_vm_freq_down(unsigned lcore_id)
+{
+ return send_msg(lcore_id, CPU_POWER_SCALE_DOWN);
+}
+
+int
+rte_power_kvm_vm_freq_max(unsigned lcore_id)
+{
+ return send_msg(lcore_id, CPU_POWER_SCALE_MAX);
+}
+
+int
+rte_power_kvm_vm_freq_min(unsigned lcore_id)
+{
+ return send_msg(lcore_id, CPU_POWER_SCALE_MIN);
+}
diff --git a/src/dpdk_lib18/librte_power/rte_power_kvm_vm.h b/src/dpdk_lib18/librte_power/rte_power_kvm_vm.h
new file mode 100755
index 00000000..dcbc878a
--- /dev/null
+++ b/src/dpdk_lib18/librte_power/rte_power_kvm_vm.h
@@ -0,0 +1,179 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_POWER_KVM_VM_H
+#define _RTE_POWER_KVM_VM_H
+
+/**
+ * @file
+ * RTE Power Management KVM VM
+ */
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_string_fns.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Initialize power management for a specific lcore.
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * - 0 on success.
+ * - Negative on error.
+ */
+int rte_power_kvm_vm_init(unsigned lcore_id);
+
+/**
+ * Exit power management on a specific lcore.
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * - 0 on success.
+ * - Negative on error.
+ */
+int rte_power_kvm_vm_exit(unsigned lcore_id);
+
+/**
+ * Get the available frequencies of a specific lcore.
+ * It is not currently supported for VM Power Management.
+ *
+ * @param lcore_id
+ * lcore id.
+ * @param freqs
+ * The buffer array to save the frequencies.
+ * @param num
+ * The number of frequencies to get.
+ *
+ * @return
+ * -ENOTSUP
+ */
+uint32_t rte_power_kvm_vm_freqs(unsigned lcore_id, uint32_t *freqs,
+ uint32_t num);
+
+/**
+ * Return the current index of available frequencies of a specific lcore.
+ * It is not currently supported for VM Power Management.
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * -ENOTSUP
+ */
+uint32_t rte_power_kvm_vm_get_freq(unsigned lcore_id);
+
+/**
+ * Set the new frequency for a specific lcore by indicating the index of
+ * available frequencies.
+ * It is not currently supported for VM Power Management.
+ *
+ * @param lcore_id
+ * lcore id.
+ * @param index
+ * The index of available frequencies.
+ *
+ * @return
+ * -ENOTSUP
+ */
+int rte_power_kvm_vm_set_freq(unsigned lcore_id, uint32_t index);
+
+/**
+ * Scale up the frequency of a specific lcore. This request is forwarded to the
+ * host monitor.
+ * It should be protected outside of this function for threadsafe.
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * - 1 on success.
+ * - Negative on error.
+ */
+int rte_power_kvm_vm_freq_up(unsigned lcore_id);
+
+/**
+ * Scale down the frequency of a specific lcore according to the available
+ * frequencies.
+ * It should be protected outside of this function for threadsafe.
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * - 1 on success.
+ * - Negative on error.
+ */
+int rte_power_kvm_vm_freq_down(unsigned lcore_id);
+
+/**
+ * Scale up the frequency of a specific lcore to the highest according to the
+ * available frequencies.
+ * It should be protected outside of this function for threadsafe.
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * - 1 on success.
+ * - Negative on error.
+ */
+int rte_power_kvm_vm_freq_max(unsigned lcore_id);
+
+/**
+ * Scale down the frequency of a specific lcore to the lowest according to the
+ * available frequencies.
+ * It should be protected outside of this function for threadsafe.
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * - 1 on success.
+ * - Negative on error.
+ */
+int rte_power_kvm_vm_freq_min(unsigned lcore_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_ring/Makefile b/src/dpdk_lib18/librte_ring/Makefile
new file mode 100755
index 00000000..2380a43c
--- /dev/null
+++ b/src/dpdk_lib18/librte_ring/Makefile
@@ -0,0 +1,48 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_ring.a
+
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_RING) := rte_ring.c
+
+# install includes
+SYMLINK-$(CONFIG_RTE_LIBRTE_RING)-include := rte_ring.h
+
+# this lib needs eal and rte_malloc
+DEPDIRS-$(CONFIG_RTE_LIBRTE_RING) += lib/librte_eal lib/librte_malloc
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_ring/rte_ring.c b/src/dpdk_lib18/librte_ring/rte_ring.c
new file mode 100755
index 00000000..f5899c4c
--- /dev/null
+++ b/src/dpdk_lib18/librte_ring/rte_ring.c
@@ -0,0 +1,338 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Derived from FreeBSD's bufring.c
+ *
+ **************************************************************************
+ *
+ * Copyright (c) 2007,2008 Kip Macy kmacy@freebsd.org
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. The name of Kip Macy nor the names of other
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ ***************************************************************************/
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_atomic.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_errno.h>
+#include <rte_string_fns.h>
+#include <rte_spinlock.h>
+
+#include "rte_ring.h"
+
+TAILQ_HEAD(rte_ring_list, rte_tailq_entry);
+
+/* true if x is a power of 2 */
+#define POWEROF2(x) ((((x)-1) & (x)) == 0)
+
+/* return the size of memory occupied by a ring */
+ssize_t
+rte_ring_get_memsize(unsigned count)
+{
+ ssize_t sz;
+
+ /* count must be a power of 2 */
+ if ((!POWEROF2(count)) || (count > RTE_RING_SZ_MASK )) {
+ RTE_LOG(ERR, RING,
+ "Requested size is invalid, must be power of 2, and "
+ "do not exceed the size limit %u\n", RTE_RING_SZ_MASK);
+ return -EINVAL;
+ }
+
+ sz = sizeof(struct rte_ring) + count * sizeof(void *);
+ sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
+ return sz;
+}
+
+int
+rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
+ unsigned flags)
+{
+ /* compilation-time checks */
+ RTE_BUILD_BUG_ON((sizeof(struct rte_ring) &
+ RTE_CACHE_LINE_MASK) != 0);
+#ifdef RTE_RING_SPLIT_PROD_CONS
+ RTE_BUILD_BUG_ON((offsetof(struct rte_ring, cons) &
+ RTE_CACHE_LINE_MASK) != 0);
+#endif
+ RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) &
+ RTE_CACHE_LINE_MASK) != 0);
+#ifdef RTE_LIBRTE_RING_DEBUG
+ RTE_BUILD_BUG_ON((sizeof(struct rte_ring_debug_stats) &
+ RTE_CACHE_LINE_MASK) != 0);
+ RTE_BUILD_BUG_ON((offsetof(struct rte_ring, stats) &
+ RTE_CACHE_LINE_MASK) != 0);
+#endif
+
+ /* init the ring structure */
+ memset(r, 0, sizeof(*r));
+ snprintf(r->name, sizeof(r->name), "%s", name);
+ r->flags = flags;
+ r->prod.watermark = count;
+ r->prod.sp_enqueue = !!(flags & RING_F_SP_ENQ);
+ r->cons.sc_dequeue = !!(flags & RING_F_SC_DEQ);
+ r->prod.size = r->cons.size = count;
+ r->prod.mask = r->cons.mask = count-1;
+ r->prod.head = r->cons.head = 0;
+ r->prod.tail = r->cons.tail = 0;
+
+ return 0;
+}
+
+/* create the ring */
+struct rte_ring *
+rte_ring_create(const char *name, unsigned count, int socket_id,
+ unsigned flags)
+{
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ struct rte_ring *r;
+ struct rte_tailq_entry *te;
+ const struct rte_memzone *mz;
+ ssize_t ring_size;
+ int mz_flags = 0;
+ struct rte_ring_list* ring_list = NULL;
+
+ /* check that we have an initialised tail queue */
+ if ((ring_list =
+ RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_RING, rte_ring_list)) == NULL) {
+ rte_errno = E_RTE_NO_TAILQ;
+ return NULL;
+ }
+
+ ring_size = rte_ring_get_memsize(count);
+ if (ring_size < 0) {
+ rte_errno = ring_size;
+ return NULL;
+ }
+
+ te = rte_zmalloc("RING_TAILQ_ENTRY", sizeof(*te), 0);
+ if (te == NULL) {
+ RTE_LOG(ERR, RING, "Cannot reserve memory for tailq\n");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ snprintf(mz_name, sizeof(mz_name), "%s%s", RTE_RING_MZ_PREFIX, name);
+
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ /* reserve a memory zone for this ring. If we can't get rte_config or
+ * we are secondary process, the memzone_reserve function will set
+ * rte_errno for us appropriately - hence no check in this this function */
+ mz = rte_memzone_reserve(mz_name, ring_size, socket_id, mz_flags);
+ if (mz != NULL) {
+ r = mz->addr;
+ /* no need to check return value here, we already checked the
+ * arguments above */
+ rte_ring_init(r, name, count, flags);
+
+ te->data = (void *) r;
+
+ TAILQ_INSERT_TAIL(ring_list, te, next);
+ } else {
+ r = NULL;
+ RTE_LOG(ERR, RING, "Cannot reserve memory\n");
+ rte_free(te);
+ }
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ return r;
+}
+
+/*
+ * change the high water mark. If *count* is 0, water marking is
+ * disabled
+ */
+int
+rte_ring_set_water_mark(struct rte_ring *r, unsigned count)
+{
+ if (count >= r->prod.size)
+ return -EINVAL;
+
+ /* if count is 0, disable the watermarking */
+ if (count == 0)
+ count = r->prod.size;
+
+ r->prod.watermark = count;
+ return 0;
+}
+
+/* dump the status of the ring on the console */
+void
+rte_ring_dump(FILE *f, const struct rte_ring *r)
+{
+#ifdef RTE_LIBRTE_RING_DEBUG
+ struct rte_ring_debug_stats sum;
+ unsigned lcore_id;
+#endif
+
+ fprintf(f, "ring <%s>@%p\n", r->name, r);
+ fprintf(f, " flags=%x\n", r->flags);
+ fprintf(f, " size=%"PRIu32"\n", r->prod.size);
+ fprintf(f, " ct=%"PRIu32"\n", r->cons.tail);
+ fprintf(f, " ch=%"PRIu32"\n", r->cons.head);
+ fprintf(f, " pt=%"PRIu32"\n", r->prod.tail);
+ fprintf(f, " ph=%"PRIu32"\n", r->prod.head);
+ fprintf(f, " used=%u\n", rte_ring_count(r));
+ fprintf(f, " avail=%u\n", rte_ring_free_count(r));
+ if (r->prod.watermark == r->prod.size)
+ fprintf(f, " watermark=0\n");
+ else
+ fprintf(f, " watermark=%"PRIu32"\n", r->prod.watermark);
+
+ /* sum and dump statistics */
+#ifdef RTE_LIBRTE_RING_DEBUG
+ memset(&sum, 0, sizeof(sum));
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ sum.enq_success_bulk += r->stats[lcore_id].enq_success_bulk;
+ sum.enq_success_objs += r->stats[lcore_id].enq_success_objs;
+ sum.enq_quota_bulk += r->stats[lcore_id].enq_quota_bulk;
+ sum.enq_quota_objs += r->stats[lcore_id].enq_quota_objs;
+ sum.enq_fail_bulk += r->stats[lcore_id].enq_fail_bulk;
+ sum.enq_fail_objs += r->stats[lcore_id].enq_fail_objs;
+ sum.deq_success_bulk += r->stats[lcore_id].deq_success_bulk;
+ sum.deq_success_objs += r->stats[lcore_id].deq_success_objs;
+ sum.deq_fail_bulk += r->stats[lcore_id].deq_fail_bulk;
+ sum.deq_fail_objs += r->stats[lcore_id].deq_fail_objs;
+ }
+ fprintf(f, " size=%"PRIu32"\n", r->prod.size);
+ fprintf(f, " enq_success_bulk=%"PRIu64"\n", sum.enq_success_bulk);
+ fprintf(f, " enq_success_objs=%"PRIu64"\n", sum.enq_success_objs);
+ fprintf(f, " enq_quota_bulk=%"PRIu64"\n", sum.enq_quota_bulk);
+ fprintf(f, " enq_quota_objs=%"PRIu64"\n", sum.enq_quota_objs);
+ fprintf(f, " enq_fail_bulk=%"PRIu64"\n", sum.enq_fail_bulk);
+ fprintf(f, " enq_fail_objs=%"PRIu64"\n", sum.enq_fail_objs);
+ fprintf(f, " deq_success_bulk=%"PRIu64"\n", sum.deq_success_bulk);
+ fprintf(f, " deq_success_objs=%"PRIu64"\n", sum.deq_success_objs);
+ fprintf(f, " deq_fail_bulk=%"PRIu64"\n", sum.deq_fail_bulk);
+ fprintf(f, " deq_fail_objs=%"PRIu64"\n", sum.deq_fail_objs);
+#else
+ fprintf(f, " no statistics available\n");
+#endif
+}
+
+/* dump the status of all rings on the console */
+void
+rte_ring_list_dump(FILE *f)
+{
+ const struct rte_tailq_entry *te;
+ struct rte_ring_list *ring_list;
+
+ /* check that we have an initialised tail queue */
+ if ((ring_list =
+ RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_RING, rte_ring_list)) == NULL) {
+ rte_errno = E_RTE_NO_TAILQ;
+ return;
+ }
+
+ rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ TAILQ_FOREACH(te, ring_list, next) {
+ rte_ring_dump(f, (struct rte_ring *) te->data);
+ }
+
+ rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
+}
+
+/* search a ring from its name */
+struct rte_ring *
+rte_ring_lookup(const char *name)
+{
+ struct rte_tailq_entry *te;
+ struct rte_ring *r = NULL;
+ struct rte_ring_list *ring_list;
+
+ /* check that we have an initialized tail queue */
+ if ((ring_list =
+ RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_RING, rte_ring_list)) == NULL) {
+ rte_errno = E_RTE_NO_TAILQ;
+ return NULL;
+ }
+
+ rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ TAILQ_FOREACH(te, ring_list, next) {
+ r = (struct rte_ring *) te->data;
+ if (strncmp(name, r->name, RTE_RING_NAMESIZE) == 0)
+ break;
+ }
+
+ rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ if (te == NULL) {
+ rte_errno = ENOENT;
+ return NULL;
+ }
+
+ return r;
+}
diff --git a/src/dpdk_lib18/librte_ring/rte_ring.h b/src/dpdk_lib18/librte_ring/rte_ring.h
new file mode 100755
index 00000000..7cd5f2d4
--- /dev/null
+++ b/src/dpdk_lib18/librte_ring/rte_ring.h
@@ -0,0 +1,1214 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Derived from FreeBSD's bufring.h
+ *
+ **************************************************************************
+ *
+ * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. The name of Kip Macy nor the names of other
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ ***************************************************************************/
+
+#ifndef _RTE_RING_H_
+#define _RTE_RING_H_
+
+/**
+ * @file
+ * RTE Ring
+ *
+ * The Ring Manager is a fixed-size queue, implemented as a table of
+ * pointers. Head and tail pointers are modified atomically, allowing
+ * concurrent access to it. It has the following features:
+ *
+ * - FIFO (First In First Out)
+ * - Maximum size is fixed; the pointers are stored in a table.
+ * - Lockless implementation.
+ * - Multi- or single-consumer dequeue.
+ * - Multi- or single-producer enqueue.
+ * - Bulk dequeue.
+ * - Bulk enqueue.
+ *
+ * Note: the ring implementation is not preemptable. A lcore must not
+ * be interrupted by another task that uses the same ring.
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+#include <stdint.h>
+#include <sys/queue.h>
+#include <errno.h>
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+
+enum rte_ring_queue_behavior {
+ RTE_RING_QUEUE_FIXED = 0, /* Enq/Deq a fixed number of items from a ring */
+ RTE_RING_QUEUE_VARIABLE /* Enq/Deq as many items a possible from ring */
+};
+
+#ifdef RTE_LIBRTE_RING_DEBUG
+/**
+ * A structure that stores the ring statistics (per-lcore).
+ */
+struct rte_ring_debug_stats {
+ uint64_t enq_success_bulk; /**< Successful enqueues number. */
+ uint64_t enq_success_objs; /**< Objects successfully enqueued. */
+ uint64_t enq_quota_bulk; /**< Successful enqueues above watermark. */
+ uint64_t enq_quota_objs; /**< Objects enqueued above watermark. */
+ uint64_t enq_fail_bulk; /**< Failed enqueues number. */
+ uint64_t enq_fail_objs; /**< Objects that failed to be enqueued. */
+ uint64_t deq_success_bulk; /**< Successful dequeues number. */
+ uint64_t deq_success_objs; /**< Objects successfully dequeued. */
+ uint64_t deq_fail_bulk; /**< Failed dequeues number. */
+ uint64_t deq_fail_objs; /**< Objects that failed to be dequeued. */
+} __rte_cache_aligned;
+#endif
+
+#define RTE_RING_NAMESIZE 32 /**< The maximum length of a ring name. */
+#define RTE_RING_MZ_PREFIX "RG_"
+
+/**
+ * An RTE ring structure.
+ *
+ * The producer and the consumer have a head and a tail index. The particularity
+ * of these index is that they are not between 0 and size(ring). These indexes
+ * are between 0 and 2^32, and we mask their value when we access the ring[]
+ * field. Thanks to this assumption, we can do subtractions between 2 index
+ * values in a modulo-32bit base: that's why the overflow of the indexes is not
+ * a problem.
+ */
+struct rte_ring {
+ char name[RTE_RING_NAMESIZE]; /**< Name of the ring. */
+ int flags; /**< Flags supplied at creation. */
+
+ /** Ring producer status. */
+ struct prod {
+ uint32_t watermark; /**< Maximum items before EDQUOT. */
+ uint32_t sp_enqueue; /**< True, if single producer. */
+ uint32_t size; /**< Size of ring. */
+ uint32_t mask; /**< Mask (size-1) of ring. */
+ volatile uint32_t head; /**< Producer head. */
+ volatile uint32_t tail; /**< Producer tail. */
+ } prod __rte_cache_aligned;
+
+ /** Ring consumer status. */
+ struct cons {
+ uint32_t sc_dequeue; /**< True, if single consumer. */
+ uint32_t size; /**< Size of the ring. */
+ uint32_t mask; /**< Mask (size-1) of ring. */
+ volatile uint32_t head; /**< Consumer head. */
+ volatile uint32_t tail; /**< Consumer tail. */
+#ifdef RTE_RING_SPLIT_PROD_CONS
+ } cons __rte_cache_aligned;
+#else
+ } cons;
+#endif
+
+#ifdef RTE_LIBRTE_RING_DEBUG
+ struct rte_ring_debug_stats stats[RTE_MAX_LCORE];
+#endif
+
+ void * ring[0] __rte_cache_aligned; /**< Memory space of ring starts here.
+ * not volatile so need to be careful
+ * about compiler re-ordering */
+};
+
+#define RING_F_SP_ENQ 0x0001 /**< The default enqueue is "single-producer". */
+#define RING_F_SC_DEQ 0x0002 /**< The default dequeue is "single-consumer". */
+#define RTE_RING_QUOT_EXCEED (1 << 31) /**< Quota exceed for burst ops */
+#define RTE_RING_SZ_MASK (unsigned)(0x0fffffff) /**< Ring size mask */
+
+/**
+ * @internal When debug is enabled, store ring statistics.
+ * @param r
+ * A pointer to the ring.
+ * @param name
+ * The name of the statistics field to increment in the ring.
+ * @param n
+ * The number to add to the object-oriented statistics.
+ */
+#ifdef RTE_LIBRTE_RING_DEBUG
+#define __RING_STAT_ADD(r, name, n) do { \
+ unsigned __lcore_id = rte_lcore_id(); \
+ r->stats[__lcore_id].name##_objs += n; \
+ r->stats[__lcore_id].name##_bulk += 1; \
+ } while(0)
+#else
+#define __RING_STAT_ADD(r, name, n) do {} while(0)
+#endif
+
+/**
+ * Calculate the memory size needed for a ring
+ *
+ * This function returns the number of bytes needed for a ring, given
+ * the number of elements in it. This value is the sum of the size of
+ * the structure rte_ring and the size of the memory needed by the
+ * objects pointers. The value is aligned to a cache line size.
+ *
+ * @param count
+ * The number of elements in the ring (must be a power of 2).
+ * @return
+ * - The memory size needed for the ring on success.
+ * - -EINVAL if count is not a power of 2.
+ */
+ssize_t rte_ring_get_memsize(unsigned count);
+
+/**
+ * Initialize a ring structure.
+ *
+ * Initialize a ring structure in memory pointed by "r". The size of the
+ * memory area must be large enough to store the ring structure and the
+ * object table. It is advised to use rte_ring_get_memsize() to get the
+ * appropriate size.
+ *
+ * The ring size is set to *count*, which must be a power of two. Water
+ * marking is disabled by default. The real usable ring size is
+ * *count-1* instead of *count* to differentiate a free ring from an
+ * empty ring.
+ *
+ * The ring is not added in RTE_TAILQ_RING global list. Indeed, the
+ * memory given by the caller may not be shareable among dpdk
+ * processes.
+ *
+ * @param r
+ * The pointer to the ring structure followed by the objects table.
+ * @param name
+ * The name of the ring.
+ * @param count
+ * The number of elements in the ring (must be a power of 2).
+ * @param flags
+ * An OR of the following:
+ * - RING_F_SP_ENQ: If this flag is set, the default behavior when
+ * using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
+ * is "single-producer". Otherwise, it is "multi-producers".
+ * - RING_F_SC_DEQ: If this flag is set, the default behavior when
+ * using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
+ * is "single-consumer". Otherwise, it is "multi-consumers".
+ * @return
+ * 0 on success, or a negative value on error.
+ */
+int rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
+ unsigned flags);
+
+/**
+ * Create a new ring named *name* in memory.
+ *
+ * This function uses ``memzone_reserve()`` to allocate memory. Then it
+ * calls rte_ring_init() to initialize an empty ring.
+ *
+ * The new ring size is set to *count*, which must be a power of
+ * two. Water marking is disabled by default. The real usable ring size
+ * is *count-1* instead of *count* to differentiate a free ring from an
+ * empty ring.
+ *
+ * The ring is added in RTE_TAILQ_RING list.
+ *
+ * @param name
+ * The name of the ring.
+ * @param count
+ * The size of the ring (must be a power of 2).
+ * @param socket_id
+ * The *socket_id* argument is the socket identifier in case of
+ * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
+ * constraint for the reserved zone.
+ * @param flags
+ * An OR of the following:
+ * - RING_F_SP_ENQ: If this flag is set, the default behavior when
+ * using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
+ * is "single-producer". Otherwise, it is "multi-producers".
+ * - RING_F_SC_DEQ: If this flag is set, the default behavior when
+ * using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
+ * is "single-consumer". Otherwise, it is "multi-consumers".
+ * @return
+ * On success, the pointer to the new allocated ring. NULL on error with
+ * rte_errno set appropriately. Possible errno values include:
+ * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
+ * - E_RTE_SECONDARY - function was called from a secondary process instance
+ * - E_RTE_NO_TAILQ - no tailq list could be got for the ring list
+ * - EINVAL - count provided is not a power of 2
+ * - ENOSPC - the maximum number of memzones has already been allocated
+ * - EEXIST - a memzone with the same name already exists
+ * - ENOMEM - no appropriate memory area found in which to create memzone
+ */
+struct rte_ring *rte_ring_create(const char *name, unsigned count,
+ int socket_id, unsigned flags);
+
+/**
+ * Change the high water mark.
+ *
+ * If *count* is 0, water marking is disabled. Otherwise, it is set to the
+ * *count* value. The *count* value must be greater than 0 and less
+ * than the ring size.
+ *
+ * This function can be called at any time (not necessarily at
+ * initialization).
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @param count
+ * The new water mark value.
+ * @return
+ * - 0: Success; water mark changed.
+ * - -EINVAL: Invalid water mark value.
+ */
+int rte_ring_set_water_mark(struct rte_ring *r, unsigned count);
+
+/**
+ * Dump the status of the ring to the console.
+ *
+ * @param f
+ * A pointer to a file for output
+ * @param r
+ * A pointer to the ring structure.
+ */
+void rte_ring_dump(FILE *f, const struct rte_ring *r);
+
+/* the actual enqueue of pointers on the ring.
+ * Placed here since identical code needed in both
+ * single and multi producer enqueue functions */
+#define ENQUEUE_PTRS() do { \
+ const uint32_t size = r->prod.size; \
+ uint32_t idx = prod_head & mask; \
+ if (likely(idx + n < size)) { \
+ for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
+ r->ring[idx] = obj_table[i]; \
+ r->ring[idx+1] = obj_table[i+1]; \
+ r->ring[idx+2] = obj_table[i+2]; \
+ r->ring[idx+3] = obj_table[i+3]; \
+ } \
+ switch (n & 0x3) { \
+ case 3: r->ring[idx++] = obj_table[i++]; \
+ case 2: r->ring[idx++] = obj_table[i++]; \
+ case 1: r->ring[idx++] = obj_table[i++]; \
+ } \
+ } else { \
+ for (i = 0; idx < size; i++, idx++)\
+ r->ring[idx] = obj_table[i]; \
+ for (idx = 0; i < n; i++, idx++) \
+ r->ring[idx] = obj_table[i]; \
+ } \
+} while(0)
+
+/* the actual copy of pointers on the ring to obj_table.
+ * Placed here since identical code needed in both
+ * single and multi consumer dequeue functions */
+#define DEQUEUE_PTRS() do { \
+ uint32_t idx = cons_head & mask; \
+ const uint32_t size = r->cons.size; \
+ if (likely(idx + n < size)) { \
+ for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
+ obj_table[i] = r->ring[idx]; \
+ obj_table[i+1] = r->ring[idx+1]; \
+ obj_table[i+2] = r->ring[idx+2]; \
+ obj_table[i+3] = r->ring[idx+3]; \
+ } \
+ switch (n & 0x3) { \
+ case 3: obj_table[i++] = r->ring[idx++]; \
+ case 2: obj_table[i++] = r->ring[idx++]; \
+ case 1: obj_table[i++] = r->ring[idx++]; \
+ } \
+ } else { \
+ for (i = 0; idx < size; i++, idx++) \
+ obj_table[i] = r->ring[idx]; \
+ for (idx = 0; i < n; i++, idx++) \
+ obj_table[i] = r->ring[idx]; \
+ } \
+} while (0)
+
+/**
+ * @internal Enqueue several objects on the ring (multi-producers safe).
+ *
+ * This function uses a "compare and set" instruction to move the
+ * producer index atomically.
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects).
+ * @param n
+ * The number of objects to add in the ring from the obj_table.
+ * @param behavior
+ * RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
+ * RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
+ * @return
+ * Depend on the behavior value
+ * if behavior = RTE_RING_QUEUE_FIXED
+ * - 0: Success; objects enqueue.
+ * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
+ * high water mark is exceeded.
+ * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
+ * if behavior = RTE_RING_QUEUE_VARIABLE
+ * - n: Actual number of objects enqueued.
+ */
+static inline int __attribute__((always_inline))
+__rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
+ unsigned n, enum rte_ring_queue_behavior behavior)
+{
+ uint32_t prod_head, prod_next;
+ uint32_t cons_tail, free_entries;
+ const unsigned max = n;
+ int success;
+ unsigned i;
+ uint32_t mask = r->prod.mask;
+ int ret;
+
+ /* move prod.head atomically */
+ do {
+ /* Reset n to the initial burst count */
+ n = max;
+
+ prod_head = r->prod.head;
+ cons_tail = r->cons.tail;
+ /* The subtraction is done between two unsigned 32bits value
+ * (the result is always modulo 32 bits even if we have
+ * prod_head > cons_tail). So 'free_entries' is always between 0
+ * and size(ring)-1. */
+ free_entries = (mask + cons_tail - prod_head);
+
+ /* check that we have enough room in ring */
+ if (unlikely(n > free_entries)) {
+ if (behavior == RTE_RING_QUEUE_FIXED) {
+ __RING_STAT_ADD(r, enq_fail, n);
+ return -ENOBUFS;
+ }
+ else {
+ /* No free entry available */
+ if (unlikely(free_entries == 0)) {
+ __RING_STAT_ADD(r, enq_fail, n);
+ return 0;
+ }
+
+ n = free_entries;
+ }
+ }
+
+ prod_next = prod_head + n;
+ success = rte_atomic32_cmpset(&r->prod.head, prod_head,
+ prod_next);
+ } while (unlikely(success == 0));
+
+ /* write entries in ring */
+ ENQUEUE_PTRS();
+ rte_compiler_barrier();
+
+ /* if we exceed the watermark */
+ if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
+ ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
+ (int)(n | RTE_RING_QUOT_EXCEED);
+ __RING_STAT_ADD(r, enq_quota, n);
+ }
+ else {
+ ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
+ __RING_STAT_ADD(r, enq_success, n);
+ }
+
+ /*
+ * If there are other enqueues in progress that preceded us,
+ * we need to wait for them to complete
+ */
+ while (unlikely(r->prod.tail != prod_head))
+ rte_pause();
+
+ r->prod.tail = prod_next;
+ return ret;
+}
+
+/**
+ * @internal Enqueue several objects on a ring (NOT multi-producers safe).
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects).
+ * @param n
+ * The number of objects to add in the ring from the obj_table.
+ * @param behavior
+ * RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
+ * RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
+ * @return
+ * Depend on the behavior value
+ * if behavior = RTE_RING_QUEUE_FIXED
+ * - 0: Success; objects enqueue.
+ * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
+ * high water mark is exceeded.
+ * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
+ * if behavior = RTE_RING_QUEUE_VARIABLE
+ * - n: Actual number of objects enqueued.
+ */
+static inline int __attribute__((always_inline))
+__rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
+ unsigned n, enum rte_ring_queue_behavior behavior)
+{
+ uint32_t prod_head, cons_tail;
+ uint32_t prod_next, free_entries;
+ unsigned i;
+ uint32_t mask = r->prod.mask;
+ int ret;
+
+ prod_head = r->prod.head;
+ cons_tail = r->cons.tail;
+ /* The subtraction is done between two unsigned 32bits value
+ * (the result is always modulo 32 bits even if we have
+ * prod_head > cons_tail). So 'free_entries' is always between 0
+ * and size(ring)-1. */
+ free_entries = mask + cons_tail - prod_head;
+
+ /* check that we have enough room in ring */
+ if (unlikely(n > free_entries)) {
+ if (behavior == RTE_RING_QUEUE_FIXED) {
+ __RING_STAT_ADD(r, enq_fail, n);
+ return -ENOBUFS;
+ }
+ else {
+ /* No free entry available */
+ if (unlikely(free_entries == 0)) {
+ __RING_STAT_ADD(r, enq_fail, n);
+ return 0;
+ }
+
+ n = free_entries;
+ }
+ }
+
+ prod_next = prod_head + n;
+ r->prod.head = prod_next;
+
+ /* write entries in ring */
+ ENQUEUE_PTRS();
+ rte_compiler_barrier();
+
+ /* if we exceed the watermark */
+ if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
+ ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
+ (int)(n | RTE_RING_QUOT_EXCEED);
+ __RING_STAT_ADD(r, enq_quota, n);
+ }
+ else {
+ ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
+ __RING_STAT_ADD(r, enq_success, n);
+ }
+
+ r->prod.tail = prod_next;
+ return ret;
+}
+
+/**
+ * @internal Dequeue several objects from a ring (multi-consumers safe). When
+ * the request objects are more than the available objects, only dequeue the
+ * actual number of objects
+ *
+ * This function uses a "compare and set" instruction to move the
+ * consumer index atomically.
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects) that will be filled.
+ * @param n
+ * The number of objects to dequeue from the ring to the obj_table.
+ * @param behavior
+ * RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
+ * RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
+ * @return
+ * Depend on the behavior value
+ * if behavior = RTE_RING_QUEUE_FIXED
+ * - 0: Success; objects dequeued.
+ * - -ENOENT: Not enough entries in the ring to dequeue; no object is
+ * dequeued.
+ * if behavior = RTE_RING_QUEUE_VARIABLE
+ * - n: Actual number of objects dequeued.
+ */
+
+static inline int __attribute__((always_inline))
+__rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
+ unsigned n, enum rte_ring_queue_behavior behavior)
+{
+ uint32_t cons_head, prod_tail;
+ uint32_t cons_next, entries;
+ const unsigned max = n;
+ int success;
+ unsigned i;
+ uint32_t mask = r->prod.mask;
+
+ /* move cons.head atomically */
+ do {
+ /* Restore n as it may change every loop */
+ n = max;
+
+ cons_head = r->cons.head;
+ prod_tail = r->prod.tail;
+ /* The subtraction is done between two unsigned 32bits value
+ * (the result is always modulo 32 bits even if we have
+ * cons_head > prod_tail). So 'entries' is always between 0
+ * and size(ring)-1. */
+ entries = (prod_tail - cons_head);
+
+ /* Set the actual entries for dequeue */
+ if (n > entries) {
+ if (behavior == RTE_RING_QUEUE_FIXED) {
+ __RING_STAT_ADD(r, deq_fail, n);
+ return -ENOENT;
+ }
+ else {
+ if (unlikely(entries == 0)){
+ __RING_STAT_ADD(r, deq_fail, n);
+ return 0;
+ }
+
+ n = entries;
+ }
+ }
+
+ cons_next = cons_head + n;
+ success = rte_atomic32_cmpset(&r->cons.head, cons_head,
+ cons_next);
+ } while (unlikely(success == 0));
+
+ /* copy in table */
+ DEQUEUE_PTRS();
+ rte_compiler_barrier();
+
+ /*
+ * If there are other dequeues in progress that preceded us,
+ * we need to wait for them to complete
+ */
+ while (unlikely(r->cons.tail != cons_head))
+ rte_pause();
+
+ __RING_STAT_ADD(r, deq_success, n);
+ r->cons.tail = cons_next;
+
+ return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
+}
+
+/**
+ * @internal Dequeue several objects from a ring (NOT multi-consumers safe).
+ * When the request objects are more than the available objects, only dequeue
+ * the actual number of objects
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects) that will be filled.
+ * @param n
+ * The number of objects to dequeue from the ring to the obj_table.
+ * @param behavior
+ * RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
+ * RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
+ * @return
+ * Depend on the behavior value
+ * if behavior = RTE_RING_QUEUE_FIXED
+ * - 0: Success; objects dequeued.
+ * - -ENOENT: Not enough entries in the ring to dequeue; no object is
+ * dequeued.
+ * if behavior = RTE_RING_QUEUE_VARIABLE
+ * - n: Actual number of objects dequeued.
+ */
+static inline int __attribute__((always_inline))
+__rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
+ unsigned n, enum rte_ring_queue_behavior behavior)
+{
+ uint32_t cons_head, prod_tail;
+ uint32_t cons_next, entries;
+ unsigned i;
+ uint32_t mask = r->prod.mask;
+
+ cons_head = r->cons.head;
+ prod_tail = r->prod.tail;
+ /* The subtraction is done between two unsigned 32bits value
+ * (the result is always modulo 32 bits even if we have
+ * cons_head > prod_tail). So 'entries' is always between 0
+ * and size(ring)-1. */
+ entries = prod_tail - cons_head;
+
+ if (n > entries) {
+ if (behavior == RTE_RING_QUEUE_FIXED) {
+ __RING_STAT_ADD(r, deq_fail, n);
+ return -ENOENT;
+ }
+ else {
+ if (unlikely(entries == 0)){
+ __RING_STAT_ADD(r, deq_fail, n);
+ return 0;
+ }
+
+ n = entries;
+ }
+ }
+
+ cons_next = cons_head + n;
+ r->cons.head = cons_next;
+
+ /* copy in table */
+ DEQUEUE_PTRS();
+ rte_compiler_barrier();
+
+ __RING_STAT_ADD(r, deq_success, n);
+ r->cons.tail = cons_next;
+ return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
+}
+
+/**
+ * Enqueue several objects on the ring (multi-producers safe).
+ *
+ * This function uses a "compare and set" instruction to move the
+ * producer index atomically.
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects).
+ * @param n
+ * The number of objects to add in the ring from the obj_table.
+ * @return
+ * - 0: Success; objects enqueue.
+ * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
+ * high water mark is exceeded.
+ * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
+ */
+static inline int __attribute__((always_inline))
+rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
+ unsigned n)
+{
+ return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+}
+
+/**
+ * Enqueue several objects on a ring (NOT multi-producers safe).
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects).
+ * @param n
+ * The number of objects to add in the ring from the obj_table.
+ * @return
+ * - 0: Success; objects enqueued.
+ * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
+ * high water mark is exceeded.
+ * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
+ */
+static inline int __attribute__((always_inline))
+rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
+ unsigned n)
+{
+ return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+}
+
+/**
+ * Enqueue several objects on a ring.
+ *
+ * This function calls the multi-producer or the single-producer
+ * version depending on the default behavior that was specified at
+ * ring creation time (see flags).
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects).
+ * @param n
+ * The number of objects to add in the ring from the obj_table.
+ * @return
+ * - 0: Success; objects enqueued.
+ * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
+ * high water mark is exceeded.
+ * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
+ */
+static inline int __attribute__((always_inline))
+rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
+ unsigned n)
+{
+ if (r->prod.sp_enqueue)
+ return rte_ring_sp_enqueue_bulk(r, obj_table, n);
+ else
+ return rte_ring_mp_enqueue_bulk(r, obj_table, n);
+}
+
+/**
+ * Enqueue one object on a ring (multi-producers safe).
+ *
+ * This function uses a "compare and set" instruction to move the
+ * producer index atomically.
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @param obj
+ * A pointer to the object to be added.
+ * @return
+ * - 0: Success; objects enqueued.
+ * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
+ * high water mark is exceeded.
+ * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
+ */
+static inline int __attribute__((always_inline))
+rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
+{
+ return rte_ring_mp_enqueue_bulk(r, &obj, 1);
+}
+
+/**
+ * Enqueue one object on a ring (NOT multi-producers safe).
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @param obj
+ * A pointer to the object to be added.
+ * @return
+ * - 0: Success; objects enqueued.
+ * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
+ * high water mark is exceeded.
+ * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
+ */
+static inline int __attribute__((always_inline))
+rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
+{
+ return rte_ring_sp_enqueue_bulk(r, &obj, 1);
+}
+
+/**
+ * Enqueue one object on a ring.
+ *
+ * This function calls the multi-producer or the single-producer
+ * version, depending on the default behaviour that was specified at
+ * ring creation time (see flags).
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @param obj
+ * A pointer to the object to be added.
+ * @return
+ * - 0: Success; objects enqueued.
+ * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
+ * high water mark is exceeded.
+ * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
+ */
+static inline int __attribute__((always_inline))
+rte_ring_enqueue(struct rte_ring *r, void *obj)
+{
+ if (r->prod.sp_enqueue)
+ return rte_ring_sp_enqueue(r, obj);
+ else
+ return rte_ring_mp_enqueue(r, obj);
+}
+
+/**
+ * Dequeue several objects from a ring (multi-consumers safe).
+ *
+ * This function uses a "compare and set" instruction to move the
+ * consumer index atomically.
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects) that will be filled.
+ * @param n
+ * The number of objects to dequeue from the ring to the obj_table.
+ * @return
+ * - 0: Success; objects dequeued.
+ * - -ENOENT: Not enough entries in the ring to dequeue; no object is
+ * dequeued.
+ */
+static inline int __attribute__((always_inline))
+rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
+{
+ return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+}
+
+/**
+ * Dequeue several objects from a ring (NOT multi-consumers safe).
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects) that will be filled.
+ * @param n
+ * The number of objects to dequeue from the ring to the obj_table,
+ * must be strictly positive.
+ * @return
+ * - 0: Success; objects dequeued.
+ * - -ENOENT: Not enough entries in the ring to dequeue; no object is
+ * dequeued.
+ */
+static inline int __attribute__((always_inline))
+rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
+{
+ return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+}
+
+/**
+ * Dequeue several objects from a ring.
+ *
+ * This function calls the multi-consumers or the single-consumer
+ * version, depending on the default behaviour that was specified at
+ * ring creation time (see flags).
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects) that will be filled.
+ * @param n
+ * The number of objects to dequeue from the ring to the obj_table.
+ * @return
+ * - 0: Success; objects dequeued.
+ * - -ENOENT: Not enough entries in the ring to dequeue, no object is
+ * dequeued.
+ */
+static inline int __attribute__((always_inline))
+rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
+{
+ if (r->cons.sc_dequeue)
+ return rte_ring_sc_dequeue_bulk(r, obj_table, n);
+ else
+ return rte_ring_mc_dequeue_bulk(r, obj_table, n);
+}
+
+/**
+ * Dequeue one object from a ring (multi-consumers safe).
+ *
+ * This function uses a "compare and set" instruction to move the
+ * consumer index atomically.
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @param obj_p
+ * A pointer to a void * pointer (object) that will be filled.
+ * @return
+ * - 0: Success; objects dequeued.
+ * - -ENOENT: Not enough entries in the ring to dequeue; no object is
+ * dequeued.
+ */
+static inline int __attribute__((always_inline))
+rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
+{
+ return rte_ring_mc_dequeue_bulk(r, obj_p, 1);
+}
+
+/**
+ * Dequeue one object from a ring (NOT multi-consumers safe).
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @param obj_p
+ * A pointer to a void * pointer (object) that will be filled.
+ * @return
+ * - 0: Success; objects dequeued.
+ * - -ENOENT: Not enough entries in the ring to dequeue, no object is
+ * dequeued.
+ */
+static inline int __attribute__((always_inline))
+rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
+{
+ return rte_ring_sc_dequeue_bulk(r, obj_p, 1);
+}
+
+/**
+ * Dequeue one object from a ring.
+ *
+ * This function calls the multi-consumers or the single-consumer
+ * version depending on the default behaviour that was specified at
+ * ring creation time (see flags).
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @param obj_p
+ * A pointer to a void * pointer (object) that will be filled.
+ * @return
+ * - 0: Success, objects dequeued.
+ * - -ENOENT: Not enough entries in the ring to dequeue, no object is
+ * dequeued.
+ */
+static inline int __attribute__((always_inline))
+rte_ring_dequeue(struct rte_ring *r, void **obj_p)
+{
+ if (r->cons.sc_dequeue)
+ return rte_ring_sc_dequeue(r, obj_p);
+ else
+ return rte_ring_mc_dequeue(r, obj_p);
+}
+
+/**
+ * Test if a ring is full.
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @return
+ * - 1: The ring is full.
+ * - 0: The ring is not full.
+ */
+static inline int
+rte_ring_full(const struct rte_ring *r)
+{
+ uint32_t prod_tail = r->prod.tail;
+ uint32_t cons_tail = r->cons.tail;
+ return (((cons_tail - prod_tail - 1) & r->prod.mask) == 0);
+}
+
+/**
+ * Test if a ring is empty.
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @return
+ * - 1: The ring is empty.
+ * - 0: The ring is not empty.
+ */
+static inline int
+rte_ring_empty(const struct rte_ring *r)
+{
+ uint32_t prod_tail = r->prod.tail;
+ uint32_t cons_tail = r->cons.tail;
+ return !!(cons_tail == prod_tail);
+}
+
+/**
+ * Return the number of entries in a ring.
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @return
+ * The number of entries in the ring.
+ */
+static inline unsigned
+rte_ring_count(const struct rte_ring *r)
+{
+ uint32_t prod_tail = r->prod.tail;
+ uint32_t cons_tail = r->cons.tail;
+ return ((prod_tail - cons_tail) & r->prod.mask);
+}
+
+/**
+ * Return the number of free entries in a ring.
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @return
+ * The number of free entries in the ring.
+ */
+static inline unsigned
+rte_ring_free_count(const struct rte_ring *r)
+{
+ uint32_t prod_tail = r->prod.tail;
+ uint32_t cons_tail = r->cons.tail;
+ return ((cons_tail - prod_tail - 1) & r->prod.mask);
+}
+
+/**
+ * Dump the status of all rings on the console
+ *
+ * @param f
+ * A pointer to a file for output
+ */
+void rte_ring_list_dump(FILE *f);
+
+/**
+ * Search a ring from its name
+ *
+ * @param name
+ * The name of the ring.
+ * @return
+ * The pointer to the ring matching the name, or NULL if not found,
+ * with rte_errno set appropriately. Possible rte_errno values include:
+ * - ENOENT - required entry not available to return.
+ */
+struct rte_ring *rte_ring_lookup(const char *name);
+
+/**
+ * Enqueue several objects on the ring (multi-producers safe).
+ *
+ * This function uses a "compare and set" instruction to move the
+ * producer index atomically.
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects).
+ * @param n
+ * The number of objects to add in the ring from the obj_table.
+ * @return
+ * - n: Actual number of objects enqueued.
+ */
+static inline unsigned __attribute__((always_inline))
+rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
+ unsigned n)
+{
+ return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+}
+
+/**
+ * Enqueue several objects on a ring (NOT multi-producers safe).
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects).
+ * @param n
+ * The number of objects to add in the ring from the obj_table.
+ * @return
+ * - n: Actual number of objects enqueued.
+ */
+static inline unsigned __attribute__((always_inline))
+rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
+ unsigned n)
+{
+ return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+}
+
+/**
+ * Enqueue several objects on a ring.
+ *
+ * This function calls the multi-producer or the single-producer
+ * version depending on the default behavior that was specified at
+ * ring creation time (see flags).
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects).
+ * @param n
+ * The number of objects to add in the ring from the obj_table.
+ * @return
+ * - n: Actual number of objects enqueued.
+ */
+static inline unsigned __attribute__((always_inline))
+rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
+ unsigned n)
+{
+ if (r->prod.sp_enqueue)
+ return rte_ring_sp_enqueue_burst(r, obj_table, n);
+ else
+ return rte_ring_mp_enqueue_burst(r, obj_table, n);
+}
+
+/**
+ * Dequeue several objects from a ring (multi-consumers safe). When the request
+ * objects are more than the available objects, only dequeue the actual number
+ * of objects
+ *
+ * This function uses a "compare and set" instruction to move the
+ * consumer index atomically.
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects) that will be filled.
+ * @param n
+ * The number of objects to dequeue from the ring to the obj_table.
+ * @return
+ * - n: Actual number of objects dequeued, 0 if ring is empty
+ */
+static inline unsigned __attribute__((always_inline))
+rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
+{
+ return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+}
+
+/**
+ * Dequeue several objects from a ring (NOT multi-consumers safe).When the
+ * request objects are more than the available objects, only dequeue the
+ * actual number of objects
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects) that will be filled.
+ * @param n
+ * The number of objects to dequeue from the ring to the obj_table.
+ * @return
+ * - n: Actual number of objects dequeued, 0 if ring is empty
+ */
+static inline unsigned __attribute__((always_inline))
+rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
+{
+ return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+}
+
+/**
+ * Dequeue multiple objects from a ring up to a maximum number.
+ *
+ * This function calls the multi-consumers or the single-consumer
+ * version, depending on the default behaviour that was specified at
+ * ring creation time (see flags).
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects) that will be filled.
+ * @param n
+ * The number of objects to dequeue from the ring to the obj_table.
+ * @return
+ * - Number of objects dequeued
+ */
+static inline unsigned __attribute__((always_inline))
+rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
+{
+ if (r->cons.sc_dequeue)
+ return rte_ring_sc_dequeue_burst(r, obj_table, n);
+ else
+ return rte_ring_mc_dequeue_burst(r, obj_table, n);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_RING_H_ */
diff --git a/src/dpdk_lib18/librte_sched/Makefile b/src/dpdk_lib18/librte_sched/Makefile
new file mode 100755
index 00000000..1a25b211
--- /dev/null
+++ b/src/dpdk_lib18/librte_sched/Makefile
@@ -0,0 +1,56 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_sched.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+CFLAGS_rte_red.o := -D_GNU_SOURCE
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_SCHED) += rte_sched.c rte_red.c rte_approx.c
+
+# install includes
+SYMLINK-$(CONFIG_RTE_LIBRTE_SCHED)-include := rte_sched.h rte_bitmap.h rte_sched_common.h rte_red.h rte_approx.h
+
+# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_SCHED) += lib/librte_mempool lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_SCHED) += lib/librte_net lib/librte_timer
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_sched/rte_approx.c b/src/dpdk_lib18/librte_sched/rte_approx.c
new file mode 100755
index 00000000..771c9518
--- /dev/null
+++ b/src/dpdk_lib18/librte_sched/rte_approx.c
@@ -0,0 +1,196 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdlib.h>
+
+#include "rte_approx.h"
+
+/*
+ * Based on paper "Approximating Rational Numbers by Fractions" by Michal
+ * Forisek forisek@dcs.fmph.uniba.sk
+ *
+ * Given a rational number alpha with 0 < alpha < 1 and a precision d, the goal
+ * is to find positive integers p, q such that alpha - d < p/q < alpha + d, and
+ * q is minimal.
+ *
+ * http://people.ksp.sk/~misof/publications/2007approx.pdf
+ */
+
+/* fraction comparison: compare (a/b) and (c/d) */
+static inline uint32_t
+less(uint32_t a, uint32_t b, uint32_t c, uint32_t d)
+{
+ return (a*d < b*c);
+}
+
+static inline uint32_t
+less_or_equal(uint32_t a, uint32_t b, uint32_t c, uint32_t d)
+{
+ return (a*d <= b*c);
+}
+
+/* check whether a/b is a valid approximation */
+static inline uint32_t
+matches(uint32_t a, uint32_t b,
+ uint32_t alpha_num, uint32_t d_num, uint32_t denum)
+{
+ if (less_or_equal(a, b, alpha_num - d_num, denum))
+ return 0;
+
+ if (less(a ,b, alpha_num + d_num, denum))
+ return 1;
+
+ return 0;
+}
+
+static inline void
+find_exact_solution_left(uint32_t p_a, uint32_t q_a, uint32_t p_b, uint32_t q_b,
+ uint32_t alpha_num, uint32_t d_num, uint32_t denum, uint32_t *p, uint32_t *q)
+{
+ uint32_t k_num = denum * p_b - (alpha_num + d_num) * q_b;
+ uint32_t k_denum = (alpha_num + d_num) * q_a - denum * p_a;
+ uint32_t k = (k_num / k_denum) + 1;
+
+ *p = p_b + k * p_a;
+ *q = q_b + k * q_a;
+}
+
+static inline void
+find_exact_solution_right(uint32_t p_a, uint32_t q_a, uint32_t p_b, uint32_t q_b,
+ uint32_t alpha_num, uint32_t d_num, uint32_t denum, uint32_t *p, uint32_t *q)
+{
+ uint32_t k_num = - denum * p_b + (alpha_num - d_num) * q_b;
+ uint32_t k_denum = - (alpha_num - d_num) * q_a + denum * p_a;
+ uint32_t k = (k_num / k_denum) + 1;
+
+ *p = p_b + k * p_a;
+ *q = q_b + k * q_a;
+}
+
+static int
+find_best_rational_approximation(uint32_t alpha_num, uint32_t d_num, uint32_t denum, uint32_t *p, uint32_t *q)
+{
+ uint32_t p_a, q_a, p_b, q_b;
+
+ /* check assumptions on the inputs */
+ if (!((0 < d_num) && (d_num < alpha_num) && (alpha_num < denum) && (d_num + alpha_num < denum))) {
+ return -1;
+ }
+
+ /* set initial bounds for the search */
+ p_a = 0;
+ q_a = 1;
+ p_b = 1;
+ q_b = 1;
+
+ while (1) {
+ uint32_t new_p_a, new_q_a, new_p_b, new_q_b;
+ uint32_t x_num, x_denum, x;
+ int aa, bb;
+
+ /* compute the number of steps to the left */
+ x_num = denum * p_b - alpha_num * q_b;
+ x_denum = - denum * p_a + alpha_num * q_a;
+ x = (x_num + x_denum - 1) / x_denum; /* x = ceil(x_num / x_denum) */
+
+ /* check whether we have a valid approximation */
+ aa = matches(p_b + x * p_a, q_b + x * q_a, alpha_num, d_num, denum);
+ bb = matches(p_b + (x-1) * p_a, q_b + (x - 1) * q_a, alpha_num, d_num, denum);
+ if (aa || bb) {
+ find_exact_solution_left(p_a, q_a, p_b, q_b, alpha_num, d_num, denum, p, q);
+ return 0;
+ }
+
+ /* update the interval */
+ new_p_a = p_b + (x - 1) * p_a ;
+ new_q_a = q_b + (x - 1) * q_a;
+ new_p_b = p_b + x * p_a ;
+ new_q_b = q_b + x * q_a;
+
+ p_a = new_p_a ;
+ q_a = new_q_a;
+ p_b = new_p_b ;
+ q_b = new_q_b;
+
+ /* compute the number of steps to the right */
+ x_num = alpha_num * q_b - denum * p_b;
+ x_denum = - alpha_num * q_a + denum * p_a;
+ x = (x_num + x_denum - 1) / x_denum; /* x = ceil(x_num / x_denum) */
+
+ /* check whether we have a valid approximation */
+ aa = matches(p_b + x * p_a, q_b + x * q_a, alpha_num, d_num, denum);
+ bb = matches(p_b + (x - 1) * p_a, q_b + (x - 1) * q_a, alpha_num, d_num, denum);
+ if (aa || bb) {
+ find_exact_solution_right(p_a, q_a, p_b, q_b, alpha_num, d_num, denum, p, q);
+ return 0;
+ }
+
+ /* update the interval */
+ new_p_a = p_b + (x - 1) * p_a;
+ new_q_a = q_b + (x - 1) * q_a;
+ new_p_b = p_b + x * p_a;
+ new_q_b = q_b + x * q_a;
+
+ p_a = new_p_a;
+ q_a = new_q_a;
+ p_b = new_p_b;
+ q_b = new_q_b;
+ }
+}
+
+int rte_approx(double alpha, double d, uint32_t *p, uint32_t *q)
+{
+ uint32_t alpha_num, d_num, denum;
+
+ /* Check input arguments */
+ if (!((0.0 < d) && (d < alpha) && (alpha < 1.0))) {
+ return -1;
+ }
+
+ if ((p == NULL) || (q == NULL)) {
+ return -2;
+ }
+
+ /* Compute alpha_num, d_num and denum */
+ denum = 1;
+ while (d < 1) {
+ alpha *= 10;
+ d *= 10;
+ denum *= 10;
+ }
+ alpha_num = (uint32_t) alpha;
+ d_num = (uint32_t) d;
+
+ /* Perform approximation */
+ return find_best_rational_approximation(alpha_num, d_num, denum, p, q);
+}
diff --git a/src/dpdk_lib18/librte_sched/rte_approx.h b/src/dpdk_lib18/librte_sched/rte_approx.h
new file mode 100755
index 00000000..09f30a87
--- /dev/null
+++ b/src/dpdk_lib18/librte_sched/rte_approx.h
@@ -0,0 +1,75 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_APPROX_H__
+#define __INCLUDE_RTE_APPROX_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file
+ * RTE Rational Approximation
+ *
+ * Given a rational number alpha with 0 < alpha < 1 and a precision d, the goal
+ * is to find positive integers p, q such that alpha - d < p/q < alpha + d, and
+ * q is minimal.
+ *
+ ***/
+
+#include <stdint.h>
+
+/**
+ * Find best rational approximation
+ *
+ * @param alpha
+ * Rational number to approximate
+ * @param d
+ * Precision for the rational approximation
+ * @param p
+ * Pointer to pre-allocated space where the numerator of the rational
+ * approximation will be stored when operation is successful
+ * @param q
+ * Pointer to pre-allocated space where the denominator of the rational
+ * approximation will be stored when operation is successful
+ * @return
+ * 0 upon success, error code otherwise
+ */
+int rte_approx(double alpha, double d, uint32_t *p, uint32_t *q);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __INCLUDE_RTE_APPROX_H__ */
diff --git a/src/dpdk_lib18/librte_sched/rte_bitmap.h b/src/dpdk_lib18/librte_sched/rte_bitmap.h
new file mode 100755
index 00000000..95f3c0d3
--- /dev/null
+++ b/src/dpdk_lib18/librte_sched/rte_bitmap.h
@@ -0,0 +1,563 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_BITMAP_H__
+#define __INCLUDE_RTE_BITMAP_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file
+ * RTE Bitmap
+ *
+ * The bitmap component provides a mechanism to manage large arrays of bits
+ * through bit get/set/clear and bit array scan operations.
+ *
+ * The bitmap scan operation is optimized for 64-bit CPUs using 64-byte cache
+ * lines. The bitmap is hierarchically organized using two arrays (array1 and
+ * array2), with each bit in array1 being associated with a full cache line
+ * (512 bits) of bitmap bits, which are stored in array2: the bit in array1 is
+ * set only when there is at least one bit set within its associated array2
+ * bits, otherwise the bit in array1 is cleared. The read and write operations
+ * for array1 and array2 are always done in slabs of 64 bits.
+ *
+ * This bitmap is not thread safe. For lock free operation on a specific bitmap
+ * instance, a single writer thread performing bit set/clear operations is
+ * allowed, only the writer thread can do bitmap scan operations, while there
+ * can be several reader threads performing bit get operations in parallel with
+ * the writer thread. When the use of locking primitives is acceptable, the
+ * serialization of the bit set/clear and bitmap scan operations needs to be
+ * enforced by the caller, while the bit get operation does not require locking
+ * the bitmap.
+ *
+ ***/
+
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_memory.h>
+#include <rte_branch_prediction.h>
+#include <rte_prefetch.h>
+
+#ifndef RTE_BITMAP_OPTIMIZATIONS
+#define RTE_BITMAP_OPTIMIZATIONS 1
+#endif
+#if RTE_BITMAP_OPTIMIZATIONS
+#include <tmmintrin.h>
+#endif
+
+/* Slab */
+#define RTE_BITMAP_SLAB_BIT_SIZE 64
+#define RTE_BITMAP_SLAB_BIT_SIZE_LOG2 6
+#define RTE_BITMAP_SLAB_BIT_MASK (RTE_BITMAP_SLAB_BIT_SIZE - 1)
+
+/* Cache line (CL) */
+#define RTE_BITMAP_CL_BIT_SIZE (RTE_CACHE_LINE_SIZE * 8)
+#define RTE_BITMAP_CL_BIT_SIZE_LOG2 9
+#define RTE_BITMAP_CL_BIT_MASK (RTE_BITMAP_CL_BIT_SIZE - 1)
+
+#define RTE_BITMAP_CL_SLAB_SIZE (RTE_BITMAP_CL_BIT_SIZE / RTE_BITMAP_SLAB_BIT_SIZE)
+#define RTE_BITMAP_CL_SLAB_SIZE_LOG2 3
+#define RTE_BITMAP_CL_SLAB_MASK (RTE_BITMAP_CL_SLAB_SIZE - 1)
+
+/** Bitmap data structure */
+struct rte_bitmap {
+ /* Context for array1 and array2 */
+ uint64_t *array1; /**< Bitmap array1 */
+ uint64_t *array2; /**< Bitmap array2 */
+ uint32_t array1_size; /**< Number of 64-bit slabs in array1 that are actually used */
+ uint32_t array2_size; /**< Number of 64-bit slabs in array2 */
+
+ /* Context for the "scan next" operation */
+ uint32_t index1; /**< Bitmap scan: Index of current array1 slab */
+ uint32_t offset1; /**< Bitmap scan: Offset of current bit within current array1 slab */
+ uint32_t index2; /**< Bitmap scan: Index of current array2 slab */
+ uint32_t go2; /**< Bitmap scan: Go/stop condition for current array2 cache line */
+
+ /* Storage space for array1 and array2 */
+ uint8_t memory[0];
+};
+
+static inline void
+__rte_bitmap_index1_inc(struct rte_bitmap *bmp)
+{
+ bmp->index1 = (bmp->index1 + 1) & (bmp->array1_size - 1);
+}
+
+static inline uint64_t
+__rte_bitmap_mask1_get(struct rte_bitmap *bmp)
+{
+ return ((~1lu) << bmp->offset1);
+}
+
+static inline void
+__rte_bitmap_index2_set(struct rte_bitmap *bmp)
+{
+ bmp->index2 = (((bmp->index1 << RTE_BITMAP_SLAB_BIT_SIZE_LOG2) + bmp->offset1) << RTE_BITMAP_CL_SLAB_SIZE_LOG2);
+}
+
+#if RTE_BITMAP_OPTIMIZATIONS
+
+static inline int
+rte_bsf64(uint64_t slab, uint32_t *pos)
+{
+ if (likely(slab == 0)) {
+ return 0;
+ }
+
+ *pos = __builtin_ctzll(slab);
+ return 1;
+}
+
+#else
+
+static inline int
+rte_bsf64(uint64_t slab, uint32_t *pos)
+{
+ uint64_t mask;
+ uint32_t i;
+
+ if (likely(slab == 0)) {
+ return 0;
+ }
+
+ for (i = 0, mask = 1; i < RTE_BITMAP_SLAB_BIT_SIZE; i ++, mask <<= 1) {
+ if (unlikely(slab & mask)) {
+ *pos = i;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+#endif
+
+static inline uint32_t
+__rte_bitmap_get_memory_footprint(uint32_t n_bits,
+ uint32_t *array1_byte_offset, uint32_t *array1_slabs,
+ uint32_t *array2_byte_offset, uint32_t *array2_slabs)
+{
+ uint32_t n_slabs_context, n_slabs_array1, n_cache_lines_context_and_array1;
+ uint32_t n_cache_lines_array2;
+ uint32_t n_bytes_total;
+
+ n_cache_lines_array2 = (n_bits + RTE_BITMAP_CL_BIT_SIZE - 1) / RTE_BITMAP_CL_BIT_SIZE;
+ n_slabs_array1 = (n_cache_lines_array2 + RTE_BITMAP_SLAB_BIT_SIZE - 1) / RTE_BITMAP_SLAB_BIT_SIZE;
+ n_slabs_array1 = rte_align32pow2(n_slabs_array1);
+ n_slabs_context = (sizeof(struct rte_bitmap) + (RTE_BITMAP_SLAB_BIT_SIZE / 8) - 1) / (RTE_BITMAP_SLAB_BIT_SIZE / 8);
+ n_cache_lines_context_and_array1 = (n_slabs_context + n_slabs_array1 + RTE_BITMAP_CL_SLAB_SIZE - 1) / RTE_BITMAP_CL_SLAB_SIZE;
+ n_bytes_total = (n_cache_lines_context_and_array1 + n_cache_lines_array2) * RTE_CACHE_LINE_SIZE;
+
+ if (array1_byte_offset) {
+ *array1_byte_offset = n_slabs_context * (RTE_BITMAP_SLAB_BIT_SIZE / 8);
+ }
+ if (array1_slabs) {
+ *array1_slabs = n_slabs_array1;
+ }
+ if (array2_byte_offset) {
+ *array2_byte_offset = n_cache_lines_context_and_array1 * RTE_CACHE_LINE_SIZE;
+ }
+ if (array2_slabs) {
+ *array2_slabs = n_cache_lines_array2 * RTE_BITMAP_CL_SLAB_SIZE;
+ }
+
+ return n_bytes_total;
+}
+
+static inline void
+__rte_bitmap_scan_init(struct rte_bitmap *bmp)
+{
+ bmp->index1 = bmp->array1_size - 1;
+ bmp->offset1 = RTE_BITMAP_SLAB_BIT_SIZE - 1;
+ __rte_bitmap_index2_set(bmp);
+ bmp->index2 += RTE_BITMAP_CL_SLAB_SIZE;
+
+ bmp->go2 = 0;
+}
+
+/**
+ * Bitmap memory footprint calculation
+ *
+ * @param n_bits
+ * Number of bits in the bitmap
+ * @return
+ * Bitmap memory footprint measured in bytes on success, 0 on error
+ */
+static inline uint32_t
+rte_bitmap_get_memory_footprint(uint32_t n_bits) {
+ /* Check input arguments */
+ if (n_bits == 0) {
+ return 0;
+ }
+
+ return __rte_bitmap_get_memory_footprint(n_bits, NULL, NULL, NULL, NULL);
+}
+
+/**
+ * Bitmap initialization
+ *
+ * @param bmp
+ * Handle to bitmap instance
+ * @param array2
+ * Base address of pre-allocated array2
+ * @param n_bits
+ * Number of pre-allocated bits in array2. Must be non-zero and multiple of 512.
+ * @return
+ * 0 upon success, error code otherwise
+ */
+static inline struct rte_bitmap *
+rte_bitmap_init(uint32_t n_bits, uint8_t *mem, uint32_t mem_size)
+{
+ struct rte_bitmap *bmp;
+ uint32_t array1_byte_offset, array1_slabs, array2_byte_offset, array2_slabs;
+ uint32_t size;
+
+ /* Check input arguments */
+ if (n_bits == 0) {
+ return NULL;
+ }
+
+ if ((mem == NULL) || (((uintptr_t) mem) & RTE_CACHE_LINE_MASK)) {
+ return NULL;
+ }
+
+ size = __rte_bitmap_get_memory_footprint(n_bits,
+ &array1_byte_offset, &array1_slabs,
+ &array2_byte_offset, &array2_slabs);
+ if (size < mem_size) {
+ return NULL;
+ }
+
+ /* Setup bitmap */
+ memset(mem, 0, size);
+ bmp = (struct rte_bitmap *) mem;
+
+ bmp->array1 = (uint64_t *) &mem[array1_byte_offset];
+ bmp->array1_size = array1_slabs;
+ bmp->array2 = (uint64_t *) &mem[array2_byte_offset];
+ bmp->array2_size = array2_slabs;
+
+ __rte_bitmap_scan_init(bmp);
+
+ return bmp;
+}
+
+/**
+ * Bitmap free
+ *
+ * @param bmp
+ * Handle to bitmap instance
+ * @return
+ * 0 upon success, error code otherwise
+ */
+static inline int
+rte_bitmap_free(struct rte_bitmap *bmp)
+{
+ /* Check input arguments */
+ if (bmp == NULL) {
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Bitmap reset
+ *
+ * @param bmp
+ * Handle to bitmap instance
+ */
+static inline void
+rte_bitmap_reset(struct rte_bitmap *bmp)
+{
+ memset(bmp->array1, 0, bmp->array1_size * sizeof(uint64_t));
+ memset(bmp->array2, 0, bmp->array2_size * sizeof(uint64_t));
+ __rte_bitmap_scan_init(bmp);
+}
+
+/**
+ * Bitmap location prefetch into CPU L1 cache
+ *
+ * @param bmp
+ * Handle to bitmap instance
+ * @param pos
+ * Bit position
+ * @return
+ * 0 upon success, error code otherwise
+ */
+static inline void
+rte_bitmap_prefetch0(struct rte_bitmap *bmp, uint32_t pos)
+{
+ uint64_t *slab2;
+ uint32_t index2;
+
+ index2 = pos >> RTE_BITMAP_SLAB_BIT_SIZE_LOG2;
+ slab2 = bmp->array2 + index2;
+ rte_prefetch0((void *) slab2);
+}
+
+/**
+ * Bitmap bit get
+ *
+ * @param bmp
+ * Handle to bitmap instance
+ * @param pos
+ * Bit position
+ * @return
+ * 0 when bit is cleared, non-zero when bit is set
+ */
+static inline uint64_t
+rte_bitmap_get(struct rte_bitmap *bmp, uint32_t pos)
+{
+ uint64_t *slab2;
+ uint32_t index2, offset2;
+
+ index2 = pos >> RTE_BITMAP_SLAB_BIT_SIZE_LOG2;
+ offset2 = pos & RTE_BITMAP_SLAB_BIT_MASK;
+ slab2 = bmp->array2 + index2;
+ return ((*slab2) & (1lu << offset2));
+}
+
+/**
+ * Bitmap bit set
+ *
+ * @param bmp
+ * Handle to bitmap instance
+ * @param pos
+ * Bit position
+ */
+static inline void
+rte_bitmap_set(struct rte_bitmap *bmp, uint32_t pos)
+{
+ uint64_t *slab1, *slab2;
+ uint32_t index1, index2, offset1, offset2;
+
+ /* Set bit in array2 slab and set bit in array1 slab */
+ index2 = pos >> RTE_BITMAP_SLAB_BIT_SIZE_LOG2;
+ offset2 = pos & RTE_BITMAP_SLAB_BIT_MASK;
+ index1 = pos >> (RTE_BITMAP_SLAB_BIT_SIZE_LOG2 + RTE_BITMAP_CL_BIT_SIZE_LOG2);
+ offset1 = (pos >> RTE_BITMAP_CL_BIT_SIZE_LOG2) & RTE_BITMAP_SLAB_BIT_MASK;
+ slab2 = bmp->array2 + index2;
+ slab1 = bmp->array1 + index1;
+
+ *slab2 |= 1lu << offset2;
+ *slab1 |= 1lu << offset1;
+}
+
+/**
+ * Bitmap slab set
+ *
+ * @param bmp
+ * Handle to bitmap instance
+ * @param pos
+ * Bit position identifying the array2 slab
+ * @param slab
+ * Value to be assigned to the 64-bit slab in array2
+ */
+static inline void
+rte_bitmap_set_slab(struct rte_bitmap *bmp, uint32_t pos, uint64_t slab)
+{
+ uint64_t *slab1, *slab2;
+ uint32_t index1, index2, offset1;
+
+ /* Set bits in array2 slab and set bit in array1 slab */
+ index2 = pos >> RTE_BITMAP_SLAB_BIT_SIZE_LOG2;
+ index1 = pos >> (RTE_BITMAP_SLAB_BIT_SIZE_LOG2 + RTE_BITMAP_CL_BIT_SIZE_LOG2);
+ offset1 = (pos >> RTE_BITMAP_CL_BIT_SIZE_LOG2) & RTE_BITMAP_SLAB_BIT_MASK;
+ slab2 = bmp->array2 + index2;
+ slab1 = bmp->array1 + index1;
+
+ *slab2 |= slab;
+ *slab1 |= 1lu << offset1;
+}
+
+static inline uint64_t
+__rte_bitmap_line_not_empty(uint64_t *slab2)
+{
+ uint64_t v1, v2, v3, v4;
+
+ v1 = slab2[0] | slab2[1];
+ v2 = slab2[2] | slab2[3];
+ v3 = slab2[4] | slab2[5];
+ v4 = slab2[6] | slab2[7];
+ v1 |= v2;
+ v3 |= v4;
+
+ return (v1 | v3);
+}
+
+/**
+ * Bitmap bit clear
+ *
+ * @param bmp
+ * Handle to bitmap instance
+ * @param pos
+ * Bit position
+ */
+static inline void
+rte_bitmap_clear(struct rte_bitmap *bmp, uint32_t pos)
+{
+ uint64_t *slab1, *slab2;
+ uint32_t index1, index2, offset1, offset2;
+
+ /* Clear bit in array2 slab */
+ index2 = pos >> RTE_BITMAP_SLAB_BIT_SIZE_LOG2;
+ offset2 = pos & RTE_BITMAP_SLAB_BIT_MASK;
+ slab2 = bmp->array2 + index2;
+
+ /* Return if array2 slab is not all-zeros */
+ *slab2 &= ~(1lu << offset2);
+ if (*slab2){
+ return;
+ }
+
+ /* Check the entire cache line of array2 for all-zeros */
+ index2 &= ~ RTE_BITMAP_CL_SLAB_MASK;
+ slab2 = bmp->array2 + index2;
+ if (__rte_bitmap_line_not_empty(slab2)) {
+ return;
+ }
+
+ /* The array2 cache line is all-zeros, so clear bit in array1 slab */
+ index1 = pos >> (RTE_BITMAP_SLAB_BIT_SIZE_LOG2 + RTE_BITMAP_CL_BIT_SIZE_LOG2);
+ offset1 = (pos >> RTE_BITMAP_CL_BIT_SIZE_LOG2) & RTE_BITMAP_SLAB_BIT_MASK;
+ slab1 = bmp->array1 + index1;
+ *slab1 &= ~(1lu << offset1);
+
+ return;
+}
+
+static inline int
+__rte_bitmap_scan_search(struct rte_bitmap *bmp)
+{
+ uint64_t value1;
+ uint32_t i;
+
+ /* Check current array1 slab */
+ value1 = bmp->array1[bmp->index1];
+ value1 &= __rte_bitmap_mask1_get(bmp);
+
+ if (rte_bsf64(value1, &bmp->offset1)) {
+ return 1;
+ }
+
+ __rte_bitmap_index1_inc(bmp);
+ bmp->offset1 = 0;
+
+ /* Look for another array1 slab */
+ for (i = 0; i < bmp->array1_size; i ++, __rte_bitmap_index1_inc(bmp)) {
+ value1 = bmp->array1[bmp->index1];
+
+ if (rte_bsf64(value1, &bmp->offset1)) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static inline void
+__rte_bitmap_scan_read_init(struct rte_bitmap *bmp)
+{
+ __rte_bitmap_index2_set(bmp);
+ bmp->go2 = 1;
+ rte_prefetch1((void *)(bmp->array2 + bmp->index2 + 8));
+}
+
+static inline int
+__rte_bitmap_scan_read(struct rte_bitmap *bmp, uint32_t *pos, uint64_t *slab)
+{
+ uint64_t *slab2;
+
+ slab2 = bmp->array2 + bmp->index2;
+ for ( ; bmp->go2 ; bmp->index2 ++, slab2 ++, bmp->go2 = bmp->index2 & RTE_BITMAP_CL_SLAB_MASK) {
+ if (*slab2) {
+ *pos = bmp->index2 << RTE_BITMAP_SLAB_BIT_SIZE_LOG2;
+ *slab = *slab2;
+
+ bmp->index2 ++;
+ slab2 ++;
+ bmp->go2 = bmp->index2 & RTE_BITMAP_CL_SLAB_MASK;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Bitmap scan (with automatic wrap-around)
+ *
+ * @param bmp
+ * Handle to bitmap instance
+ * @param pos
+ * When function call returns 1, pos contains the position of the next set
+ * bit, otherwise not modified
+ * @param slab
+ * When function call returns 1, slab contains the value of the entire 64-bit
+ * slab where the bit indicated by pos is located. Slabs are always 64-bit
+ * aligned, so the position of the first bit of the slab (this bit is not
+ * necessarily set) is pos / 64. Once a slab has been returned by the bitmap
+ * scan operation, the internal pointers of the bitmap are updated to point
+ * after this slab, so the same slab will not be returned again if it
+ * contains more than one bit which is set. When function call returns 0,
+ * slab is not modified.
+ * @return
+ * 0 if there is no bit set in the bitmap, 1 otherwise
+ */
+static inline int
+rte_bitmap_scan(struct rte_bitmap *bmp, uint32_t *pos, uint64_t *slab)
+{
+ /* Return data from current array2 line if available */
+ if (__rte_bitmap_scan_read(bmp, pos, slab)) {
+ return 1;
+ }
+
+ /* Look for non-empty array2 line */
+ if (__rte_bitmap_scan_search(bmp)) {
+ __rte_bitmap_scan_read_init(bmp);
+ __rte_bitmap_scan_read(bmp, pos, slab);
+ return 1;
+ }
+
+ /* Empty bitmap */
+ return 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __INCLUDE_RTE_BITMAP_H__ */
diff --git a/src/dpdk_lib18/librte_sched/rte_red.c b/src/dpdk_lib18/librte_sched/rte_red.c
new file mode 100755
index 00000000..fdf40576
--- /dev/null
+++ b/src/dpdk_lib18/librte_sched/rte_red.c
@@ -0,0 +1,158 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <math.h>
+#include "rte_red.h"
+#include <rte_random.h>
+#include <rte_common.h>
+
+#ifdef __INTEL_COMPILER
+#pragma warning(disable:2259) /* conversion may lose significant bits */
+#endif
+
+static int rte_red_init_done = 0; /**< Flag to indicate that global initialisation is done */
+uint32_t rte_red_rand_val = 0; /**< Random value cache */
+uint32_t rte_red_rand_seed = 0; /**< Seed for random number generation */
+
+/**
+ * table[i] = log2(1-Wq) * Scale * -1
+ * Wq = 1/(2^i)
+ */
+uint16_t rte_red_log2_1_minus_Wq[RTE_RED_WQ_LOG2_NUM];
+
+/**
+ * table[i] = 2^(i/16) * Scale
+ */
+uint16_t rte_red_pow2_frac_inv[16];
+
+/**
+ * @brief Initialize tables used to compute average
+ * queue size when queue is empty.
+ */
+static void
+__rte_red_init_tables(void)
+{
+ uint32_t i = 0;
+ double scale = 0.0;
+ double table_size = 0.0;
+
+ scale = (double)(1 << RTE_RED_SCALING);
+ table_size = (double)(RTE_DIM(rte_red_pow2_frac_inv));
+
+ for (i = 0; i < RTE_DIM(rte_red_pow2_frac_inv); i++) {
+ double m = (double)i;
+
+ rte_red_pow2_frac_inv[i] = (uint16_t) round(scale / pow(2, m / table_size));
+ }
+
+ scale = 1024.0;
+
+ RTE_RED_ASSERT(RTE_RED_WQ_LOG2_NUM == RTE_DIM(rte_red_log2_1_minus_Wq));
+
+ for (i = RTE_RED_WQ_LOG2_MIN; i <= RTE_RED_WQ_LOG2_MAX; i++) {
+ double n = (double)i;
+ double Wq = pow(2, -n);
+ uint32_t index = i - RTE_RED_WQ_LOG2_MIN;
+
+ rte_red_log2_1_minus_Wq[index] = (uint16_t) round(-1.0 * scale * log2(1.0 - Wq));
+ /**
+ * Table entry of zero, corresponds to a Wq of zero
+ * which is not valid (avg would remain constant no
+ * matter how long the queue is empty). So we have
+ * to check for zero and round up to one.
+ */
+ if (rte_red_log2_1_minus_Wq[index] == 0) {
+ rte_red_log2_1_minus_Wq[index] = 1;
+ }
+ }
+}
+
+int
+rte_red_rt_data_init(struct rte_red *red)
+{
+ if (red == NULL)
+ return -1;
+
+ red->avg = 0;
+ red->count = 0;
+ red->q_time = 0;
+ return 0;
+}
+
+int
+rte_red_config_init(struct rte_red_config *red_cfg,
+ const uint16_t wq_log2,
+ const uint16_t min_th,
+ const uint16_t max_th,
+ const uint16_t maxp_inv)
+{
+ if (red_cfg == NULL) {
+ return -1;
+ }
+ if (max_th > RTE_RED_MAX_TH_MAX) {
+ return -2;
+ }
+ if (min_th >= max_th) {
+ return -3;
+ }
+ if (wq_log2 > RTE_RED_WQ_LOG2_MAX) {
+ return -4;
+ }
+ if (wq_log2 < RTE_RED_WQ_LOG2_MIN) {
+ return -5;
+ }
+ if (maxp_inv < RTE_RED_MAXP_INV_MIN) {
+ return -6;
+ }
+ if (maxp_inv > RTE_RED_MAXP_INV_MAX) {
+ return -7;
+ }
+
+ /**
+ * Initialize the RED module if not already done
+ */
+ if (!rte_red_init_done) {
+ rte_red_rand_seed = rte_rand();
+ rte_red_rand_val = rte_fast_rand();
+ __rte_red_init_tables();
+ rte_red_init_done = 1;
+ }
+
+ red_cfg->min_th = ((uint32_t) min_th) << (wq_log2 + RTE_RED_SCALING);
+ red_cfg->max_th = ((uint32_t) max_th) << (wq_log2 + RTE_RED_SCALING);
+ red_cfg->pa_const = (2 * (max_th - min_th) * maxp_inv) << RTE_RED_SCALING;
+ red_cfg->maxp_inv = maxp_inv;
+ red_cfg->wq_log2 = wq_log2;
+
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_sched/rte_red.h b/src/dpdk_lib18/librte_sched/rte_red.h
new file mode 100755
index 00000000..0d8412ff
--- /dev/null
+++ b/src/dpdk_lib18/librte_sched/rte_red.h
@@ -0,0 +1,453 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTE_RED_H_INCLUDED__
+#define __RTE_RED_H_INCLUDED__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file
+ * RTE Random Early Detection (RED)
+ *
+ *
+ ***/
+
+#include <stdint.h>
+#include <limits.h>
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_branch_prediction.h>
+
+#define RTE_RED_SCALING 10 /**< Fraction size for fixed-point */
+#define RTE_RED_S (1 << 22) /**< Packet size multiplied by number of leaf queues */
+#define RTE_RED_MAX_TH_MAX 1023 /**< Max threshold limit in fixed point format */
+#define RTE_RED_WQ_LOG2_MIN 1 /**< Min inverse filter weight value */
+#define RTE_RED_WQ_LOG2_MAX 12 /**< Max inverse filter weight value */
+#define RTE_RED_MAXP_INV_MIN 1 /**< Min inverse mark probability value */
+#define RTE_RED_MAXP_INV_MAX 255 /**< Max inverse mark probability value */
+#define RTE_RED_2POW16 (1<<16) /**< 2 power 16 */
+#define RTE_RED_INT16_NBITS (sizeof(uint16_t) * CHAR_BIT)
+#define RTE_RED_WQ_LOG2_NUM (RTE_RED_WQ_LOG2_MAX - RTE_RED_WQ_LOG2_MIN + 1)
+
+#ifdef RTE_RED_DEBUG
+
+#define RTE_RED_ASSERT(exp) \
+if (!(exp)) { \
+ rte_panic("line%d\tassert \"" #exp "\" failed\n", __LINE__); \
+}
+
+#else
+
+#define RTE_RED_ASSERT(exp) do { } while(0)
+
+#endif /* RTE_RED_DEBUG */
+
+/**
+ * Externs
+ *
+ */
+extern uint32_t rte_red_rand_val;
+extern uint32_t rte_red_rand_seed;
+extern uint16_t rte_red_log2_1_minus_Wq[RTE_RED_WQ_LOG2_NUM];
+extern uint16_t rte_red_pow2_frac_inv[16];
+
+/**
+ * RED configuration parameters passed by user
+ *
+ */
+struct rte_red_params {
+ uint16_t min_th; /**< Minimum threshold for queue (max_th) */
+ uint16_t max_th; /**< Maximum threshold for queue (max_th) */
+ uint16_t maxp_inv; /**< Inverse of packet marking probability maximum value (maxp = 1 / maxp_inv) */
+ uint16_t wq_log2; /**< Negated log2 of queue weight (wq = 1 / (2 ^ wq_log2)) */
+};
+
+/**
+ * RED configuration parameters
+ */
+struct rte_red_config {
+ uint32_t min_th; /**< min_th scaled in fixed-point format */
+ uint32_t max_th; /**< max_th scaled in fixed-point format */
+ uint32_t pa_const; /**< Precomputed constant value used for pa calculation (scaled in fixed-point format) */
+ uint8_t maxp_inv; /**< maxp_inv */
+ uint8_t wq_log2; /**< wq_log2 */
+};
+
+/**
+ * RED run-time data
+ */
+struct rte_red {
+ uint32_t avg; /**< Average queue size (avg), scaled in fixed-point format */
+ uint32_t count; /**< Number of packets since last marked packet (count) */
+ uint64_t q_time; /**< Start of the queue idle time (q_time) */
+};
+
+/**
+ * @brief Initialises run-time data
+ *
+ * @param [in,out] data pointer to RED runtime data
+ *
+ * @return Operation status
+ * @retval 0 success
+ * @retval !0 error
+ */
+int
+rte_red_rt_data_init(struct rte_red *red);
+
+/**
+ * @brief Configures a single RED configuration parameter structure.
+ *
+ * @param [in,out] config pointer to a RED configuration parameter structure
+ * @param [in] wq_log2 log2 of the filter weight, valid range is:
+ * RTE_RED_WQ_LOG2_MIN <= wq_log2 <= RTE_RED_WQ_LOG2_MAX
+ * @param [in] min_th queue minimum threshold in number of packets
+ * @param [in] max_th queue maximum threshold in number of packets
+ * @param [in] maxp_inv inverse maximum mark probability
+ *
+ * @return Operation status
+ * @retval 0 success
+ * @retval !0 error
+ */
+int
+rte_red_config_init(struct rte_red_config *red_cfg,
+ const uint16_t wq_log2,
+ const uint16_t min_th,
+ const uint16_t max_th,
+ const uint16_t maxp_inv);
+
+/**
+ * @brief Generate random number for RED
+ *
+ * Implemenetation based on:
+ * http://software.intel.com/en-us/articles/fast-random-number-generator-on-the-intel-pentiumr-4-processor/
+ *
+ * 10 bit shift has been found through empirical tests (was 16).
+ *
+ * @return Random number between 0 and (2^22 - 1)
+ */
+static inline uint32_t
+rte_fast_rand(void)
+{
+ rte_red_rand_seed = (214013 * rte_red_rand_seed) + 2531011;
+ return (rte_red_rand_seed >> 10);
+}
+
+/**
+ * @brief calculate factor to scale average queue size when queue
+ * becomes empty
+ *
+ * @param [in] wq_log2, where EWMA filter weight wq = 1/(2 ^ wq_log2)
+ * @param [in] m exponent in the computed value (1 - wq) ^ m
+ *
+ * @return computed value
+ * @retval ((1 - wq) ^ m) scaled in fixed-point format
+ */
+static inline uint16_t
+__rte_red_calc_qempty_factor(uint8_t wq_log2, uint16_t m)
+{
+ uint32_t n = 0;
+ uint32_t f = 0;
+
+ /**
+ * Basic math tells us that:
+ * a^b = 2^(b * log2(a) )
+ *
+ * in our case:
+ * a = (1-Wq)
+ * b = m
+ * Wq = 1/ (2^log2n)
+ *
+ * So we are computing this equation:
+ * factor = 2 ^ ( m * log2(1-Wq))
+ *
+ * First we are computing:
+ * n = m * log2(1-Wq)
+ *
+ * To avoid dealing with signed numbers log2 values are positive
+ * but they should be negative because (1-Wq) is always < 1.
+ * Contents of log2 table values are also scaled for precision.
+ */
+
+ n = m * rte_red_log2_1_minus_Wq[wq_log2 - RTE_RED_WQ_LOG2_MIN];
+
+ /**
+ * The tricky part is computing 2^n, for this I split n into
+ * integer part and fraction part.
+ * f - is fraction part of n
+ * n - is integer part of original n
+ *
+ * Now using basic math we compute 2^n:
+ * 2^(f+n) = 2^f * 2^n
+ * 2^f - we use lookup table
+ * 2^n - can be replaced with bit shift right oeprations
+ */
+
+ f = (n >> 6) & 0xf;
+ n >>= 10;
+
+ if (n < RTE_RED_SCALING)
+ return (uint16_t) ((rte_red_pow2_frac_inv[f] + (1 << (n - 1))) >> n);
+
+ return 0;
+}
+
+/**
+ * @brief Updates queue average in condition when queue is empty
+ *
+ * Note: packet is never dropped in this particular case.
+ *
+ * @param [in] config pointer to a RED configuration parameter structure
+ * @param [in,out] data pointer to RED runtime data
+ * @param [in] time current time stamp
+ *
+ * @return Operation status
+ * @retval 0 enqueue the packet
+ * @retval 1 drop the packet based on max threshold criterion
+ * @retval 2 drop the packet based on mark probability criterion
+ */
+static inline int
+rte_red_enqueue_empty(const struct rte_red_config *red_cfg,
+ struct rte_red *red,
+ const uint64_t time)
+{
+ uint64_t time_diff = 0, m = 0;
+
+ RTE_RED_ASSERT(red_cfg != NULL);
+ RTE_RED_ASSERT(red != NULL);
+
+ red->count ++;
+
+ /**
+ * We compute avg but we don't compare avg against
+ * min_th or max_th, nor calculate drop probability
+ */
+ time_diff = time - red->q_time;
+
+ /**
+ * m is the number of packets that might have arrived while the queue was empty.
+ * In this case we have time stamps provided by scheduler in byte units (bytes
+ * transmitted on network port). Such time stamp translates into time units as
+ * port speed is fixed but such approach simplifies the code.
+ */
+ m = time_diff / RTE_RED_S;
+
+ /**
+ * Check that m will fit into 16-bit unsigned integer
+ */
+ if (m >= RTE_RED_2POW16) {
+ red->avg = 0;
+ } else {
+ red->avg = (red->avg >> RTE_RED_SCALING) * __rte_red_calc_qempty_factor(red_cfg->wq_log2, (uint16_t) m);
+ }
+
+ return 0;
+}
+
+/**
+ * Drop probability (Sally Floyd and Van Jacobson):
+ *
+ * pb = (1 / maxp_inv) * (avg - min_th) / (max_th - min_th)
+ * pa = pb / (2 - count * pb)
+ *
+ *
+ * (1 / maxp_inv) * (avg - min_th)
+ * ---------------------------------
+ * max_th - min_th
+ * pa = -----------------------------------------------
+ * count * (1 / maxp_inv) * (avg - min_th)
+ * 2 - -----------------------------------------
+ * max_th - min_th
+ *
+ *
+ * avg - min_th
+ * pa = -----------------------------------------------------------
+ * 2 * (max_th - min_th) * maxp_inv - count * (avg - min_th)
+ *
+ *
+ * We define pa_const as: pa_const = 2 * (max_th - min_th) * maxp_inv. Then:
+ *
+ *
+ * avg - min_th
+ * pa = -----------------------------------
+ * pa_const - count * (avg - min_th)
+ */
+
+/**
+ * @brief make a decision to drop or enqueue a packet based on mark probability
+ * criteria
+ *
+ * @param [in] config pointer to structure defining RED parameters
+ * @param [in,out] data pointer to RED runtime data
+ *
+ * @return operation status
+ * @retval 0 enqueue the packet
+ * @retval 1 drop the packet
+ */
+static inline int
+__rte_red_drop(const struct rte_red_config *red_cfg, struct rte_red *red)
+{
+ uint32_t pa_num = 0; /* numerator of drop-probability */
+ uint32_t pa_den = 0; /* denominator of drop-probability */
+ uint32_t pa_num_count = 0;
+
+ pa_num = (red->avg - red_cfg->min_th) >> (red_cfg->wq_log2);
+
+ pa_num_count = red->count * pa_num;
+
+ if (red_cfg->pa_const <= pa_num_count)
+ return 1;
+
+ pa_den = red_cfg->pa_const - pa_num_count;
+
+ /* If drop, generate and save random number to be used next time */
+ if (unlikely((rte_red_rand_val % pa_den) < pa_num)) {
+ rte_red_rand_val = rte_fast_rand();
+
+ return 1;
+ }
+
+ /* No drop */
+ return 0;
+}
+
+/**
+ * @brief Decides if new packet should be enqeued or dropped in queue non-empty case
+ *
+ * @param [in] config pointer to a RED configuration parameter structure
+ * @param [in,out] data pointer to RED runtime data
+ * @param [in] q current queue size (measured in packets)
+ *
+ * @return Operation status
+ * @retval 0 enqueue the packet
+ * @retval 1 drop the packet based on max threshold criterion
+ * @retval 2 drop the packet based on mark probability criterion
+ */
+static inline int
+rte_red_enqueue_nonempty(const struct rte_red_config *red_cfg,
+ struct rte_red *red,
+ const unsigned q)
+{
+ RTE_RED_ASSERT(red_cfg != NULL);
+ RTE_RED_ASSERT(red != NULL);
+
+ /**
+ * EWMA filter (Sally Floyd and Van Jacobson):
+ * avg = (1 - wq) * avg + wq * q
+ * avg = avg + q * wq - avg * wq
+ *
+ * We select: wq = 2^(-n). Let scaled version of avg be: avg_s = avg * 2^(N+n). We get:
+ * avg_s = avg_s + q * 2^N - avg_s * 2^(-n)
+ *
+ * By using shift left/right operations, we get:
+ * avg_s = avg_s + (q << N) - (avg_s >> n)
+ * avg_s += (q << N) - (avg_s >> n)
+ */
+
+ /* avg update */
+ red->avg += (q << RTE_RED_SCALING) - (red->avg >> red_cfg->wq_log2);
+
+ /* avg < min_th: do not mark the packet */
+ if (red->avg < red_cfg->min_th) {
+ red->count ++;
+ return 0;
+ }
+
+ /* min_th <= avg < max_th: mark the packet with pa probability */
+ if (red->avg < red_cfg->max_th) {
+ if (!__rte_red_drop(red_cfg, red)) {
+ red->count ++;
+ return 0;
+ }
+
+ red->count = 0;
+ return 2;
+ }
+
+ /* max_th <= avg: always mark the packet */
+ red->count = 0;
+ return 1;
+}
+
+/**
+ * @brief Decides if new packet should be enqeued or dropped
+ * Updates run time data based on new queue size value.
+ * Based on new queue average and RED configuration parameters
+ * gives verdict whether to enqueue or drop the packet.
+ *
+ * @param [in] config pointer to a RED configuration parameter structure
+ * @param [in,out] data pointer to RED runtime data
+ * @param [in] q updated queue size in packets
+ * @param [in] time current time stamp
+ *
+ * @return Operation status
+ * @retval 0 enqueue the packet
+ * @retval 1 drop the packet based on max threshold criteria
+ * @retval 2 drop the packet based on mark probability criteria
+ */
+static inline int
+rte_red_enqueue(const struct rte_red_config *red_cfg,
+ struct rte_red *red,
+ const unsigned q,
+ const uint64_t time)
+{
+ RTE_RED_ASSERT(red_cfg != NULL);
+ RTE_RED_ASSERT(red != NULL);
+
+ if (q != 0) {
+ return rte_red_enqueue_nonempty(red_cfg, red, q);
+ } else {
+ return rte_red_enqueue_empty(red_cfg, red, time);
+ }
+}
+
+/**
+ * @brief Callback to records time that queue became empty
+ *
+ * @param [in,out] data pointer to RED runtime data
+ * @param [in] time current time stamp
+ */
+static inline void
+rte_red_mark_queue_empty(struct rte_red *red, const uint64_t time)
+{
+ red->q_time = time;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_RED_H_INCLUDED__ */
diff --git a/src/dpdk_lib18/librte_sched/rte_sched.c b/src/dpdk_lib18/librte_sched/rte_sched.c
new file mode 100755
index 00000000..95dee273
--- /dev/null
+++ b/src/dpdk_lib18/librte_sched/rte_sched.c
@@ -0,0 +1,2150 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_branch_prediction.h>
+#include <rte_mbuf.h>
+
+#include "rte_sched.h"
+#include "rte_bitmap.h"
+#include "rte_sched_common.h"
+#include "rte_approx.h"
+
+#ifdef __INTEL_COMPILER
+#pragma warning(disable:2259) /* conversion may lose significant bits */
+#endif
+
+#ifndef RTE_SCHED_DEBUG
+#define RTE_SCHED_DEBUG 0
+#endif
+
+#ifndef RTE_SCHED_OPTIMIZATIONS
+#define RTE_SCHED_OPTIMIZATIONS 0
+#endif
+
+#if RTE_SCHED_OPTIMIZATIONS
+#include <immintrin.h>
+#endif
+
+#define RTE_SCHED_ENQUEUE 1
+
+#define RTE_SCHED_TS 1
+
+#if RTE_SCHED_TS == 0 /* Infinite credits. Traffic shaping disabled. */
+#define RTE_SCHED_TS_CREDITS_UPDATE 0
+#define RTE_SCHED_TS_CREDITS_CHECK 0
+#else /* Real Credits. Full traffic shaping implemented. */
+#define RTE_SCHED_TS_CREDITS_UPDATE 1
+#define RTE_SCHED_TS_CREDITS_CHECK 1
+#endif
+
+#ifndef RTE_SCHED_TB_RATE_CONFIG_ERR
+#define RTE_SCHED_TB_RATE_CONFIG_ERR (1e-7)
+#endif
+
+#define RTE_SCHED_WRR 1
+
+#ifndef RTE_SCHED_WRR_SHIFT
+#define RTE_SCHED_WRR_SHIFT 3
+#endif
+
+#ifndef RTE_SCHED_PORT_N_GRINDERS
+#define RTE_SCHED_PORT_N_GRINDERS 8
+#endif
+#if (RTE_SCHED_PORT_N_GRINDERS == 0) || (RTE_SCHED_PORT_N_GRINDERS & (RTE_SCHED_PORT_N_GRINDERS - 1))
+#error Number of grinders must be non-zero and a power of 2
+#endif
+#if (RTE_SCHED_OPTIMIZATIONS && (RTE_SCHED_PORT_N_GRINDERS != 8))
+#error Number of grinders must be 8 when RTE_SCHED_OPTIMIZATIONS is set
+#endif
+
+#define RTE_SCHED_GRINDER_PCACHE_SIZE (64 / RTE_SCHED_QUEUES_PER_PIPE)
+
+#define RTE_SCHED_PIPE_INVALID UINT32_MAX
+
+#define RTE_SCHED_BMP_POS_INVALID UINT32_MAX
+
+struct rte_sched_subport {
+ /* Token bucket (TB) */
+ uint64_t tb_time; /* time of last update */
+ uint32_t tb_period;
+ uint32_t tb_credits_per_period;
+ uint32_t tb_size;
+ uint32_t tb_credits;
+
+ /* Traffic classes (TCs) */
+ uint64_t tc_time; /* time of next update */
+ uint32_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ uint32_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ uint32_t tc_period;
+
+ /* TC oversubscription */
+ uint32_t tc_ov_wm;
+ uint32_t tc_ov_wm_min;
+ uint32_t tc_ov_wm_max;
+ uint8_t tc_ov_period_id;
+ uint8_t tc_ov;
+ uint32_t tc_ov_n;
+ double tc_ov_rate;
+
+ /* Statistics */
+ struct rte_sched_subport_stats stats;
+};
+
+struct rte_sched_pipe_profile {
+ /* Token bucket (TB) */
+ uint32_t tb_period;
+ uint32_t tb_credits_per_period;
+ uint32_t tb_size;
+
+ /* Pipe traffic classes */
+ uint32_t tc_period;
+ uint32_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ uint8_t tc_ov_weight;
+
+ /* Pipe queues */
+ uint8_t wrr_cost[RTE_SCHED_QUEUES_PER_PIPE];
+};
+
+struct rte_sched_pipe {
+ /* Token bucket (TB) */
+ uint64_t tb_time; /* time of last update */
+ uint32_t tb_credits;
+
+ /* Pipe profile and flags */
+ uint32_t profile;
+
+ /* Traffic classes (TCs) */
+ uint64_t tc_time; /* time of next update */
+ uint32_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+
+ /* Weighted Round Robin (WRR) */
+ uint8_t wrr_tokens[RTE_SCHED_QUEUES_PER_PIPE];
+
+ /* TC oversubscription */
+ uint32_t tc_ov_credits;
+ uint8_t tc_ov_period_id;
+ uint8_t reserved[3];
+} __rte_cache_aligned;
+
+struct rte_sched_queue {
+ uint16_t qw;
+ uint16_t qr;
+};
+
+struct rte_sched_queue_extra {
+ struct rte_sched_queue_stats stats;
+#ifdef RTE_SCHED_RED
+ struct rte_red red;
+#endif
+};
+
+enum grinder_state {
+ e_GRINDER_PREFETCH_PIPE = 0,
+ e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS,
+ e_GRINDER_PREFETCH_MBUF,
+ e_GRINDER_READ_MBUF
+};
+
+struct rte_sched_grinder {
+ /* Pipe cache */
+ uint16_t pcache_qmask[RTE_SCHED_GRINDER_PCACHE_SIZE];
+ uint32_t pcache_qindex[RTE_SCHED_GRINDER_PCACHE_SIZE];
+ uint32_t pcache_w;
+ uint32_t pcache_r;
+
+ /* Current pipe */
+ enum grinder_state state;
+ uint32_t productive;
+ uint32_t pindex;
+ struct rte_sched_subport *subport;
+ struct rte_sched_pipe *pipe;
+ struct rte_sched_pipe_profile *pipe_params;
+
+ /* TC cache */
+ uint8_t tccache_qmask[4];
+ uint32_t tccache_qindex[4];
+ uint32_t tccache_w;
+ uint32_t tccache_r;
+
+ /* Current TC */
+ uint32_t tc_index;
+ struct rte_sched_queue *queue[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ struct rte_mbuf **qbase[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ uint32_t qindex[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ uint16_t qsize;
+ uint32_t qmask;
+ uint32_t qpos;
+ struct rte_mbuf *pkt;
+
+ /* WRR */
+ uint16_t wrr_tokens[RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS];
+ uint16_t wrr_mask[RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS];
+ uint8_t wrr_cost[RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS];
+};
+
+struct rte_sched_port {
+ /* User parameters */
+ uint32_t n_subports_per_port;
+ uint32_t n_pipes_per_subport;
+ uint32_t rate;
+ uint32_t mtu;
+ uint32_t frame_overhead;
+ uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ uint32_t n_pipe_profiles;
+ uint32_t pipe_tc3_rate_max;
+#ifdef RTE_SCHED_RED
+ struct rte_red_config red_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][e_RTE_METER_COLORS];
+#endif
+
+ /* Timing */
+ uint64_t time_cpu_cycles; /* Current CPU time measured in CPU cyles */
+ uint64_t time_cpu_bytes; /* Current CPU time measured in bytes */
+ uint64_t time; /* Current NIC TX time measured in bytes */
+ double cycles_per_byte; /* CPU cycles per byte */
+
+ /* Scheduling loop detection */
+ uint32_t pipe_loop;
+ uint32_t pipe_exhaustion;
+
+ /* Bitmap */
+ struct rte_bitmap *bmp;
+ uint32_t grinder_base_bmp_pos[RTE_SCHED_PORT_N_GRINDERS] __rte_aligned_16;
+
+ /* Grinders */
+ struct rte_sched_grinder grinder[RTE_SCHED_PORT_N_GRINDERS];
+ uint32_t busy_grinders;
+ struct rte_mbuf **pkts_out;
+ uint32_t n_pkts_out;
+
+ /* Queue base calculation */
+ uint32_t qsize_add[RTE_SCHED_QUEUES_PER_PIPE];
+ uint32_t qsize_sum;
+
+ /* Large data structures */
+ struct rte_sched_subport *subport;
+ struct rte_sched_pipe *pipe;
+ struct rte_sched_queue *queue;
+ struct rte_sched_queue_extra *queue_extra;
+ struct rte_sched_pipe_profile *pipe_profiles;
+ uint8_t *bmp_array;
+ struct rte_mbuf **queue_array;
+ uint8_t memory[0] __rte_cache_aligned;
+} __rte_cache_aligned;
+
+enum rte_sched_port_array {
+ e_RTE_SCHED_PORT_ARRAY_SUBPORT = 0,
+ e_RTE_SCHED_PORT_ARRAY_PIPE,
+ e_RTE_SCHED_PORT_ARRAY_QUEUE,
+ e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA,
+ e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES,
+ e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY,
+ e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY,
+ e_RTE_SCHED_PORT_ARRAY_TOTAL,
+};
+
+#ifdef RTE_SCHED_COLLECT_STATS
+
+static inline uint32_t
+rte_sched_port_queues_per_subport(struct rte_sched_port *port)
+{
+ return RTE_SCHED_QUEUES_PER_PIPE * port->n_pipes_per_subport;
+}
+
+#endif
+
+static inline uint32_t
+rte_sched_port_queues_per_port(struct rte_sched_port *port)
+{
+ return RTE_SCHED_QUEUES_PER_PIPE * port->n_pipes_per_subport * port->n_subports_per_port;
+}
+
+static int
+rte_sched_port_check_params(struct rte_sched_port_params *params)
+{
+ uint32_t i, j;
+
+ if (params == NULL) {
+ return -1;
+ }
+
+ /* socket */
+ if ((params->socket < 0) || (params->socket >= RTE_MAX_NUMA_NODES)) {
+ return -3;
+ }
+
+ /* rate */
+ if (params->rate == 0) {
+ return -4;
+ }
+
+ /* mtu */
+ if (params->mtu == 0) {
+ return -5;
+ }
+
+ /* n_subports_per_port: non-zero, power of 2 */
+ if ((params->n_subports_per_port == 0) || (!rte_is_power_of_2(params->n_subports_per_port))) {
+ return -6;
+ }
+
+ /* n_pipes_per_subport: non-zero, power of 2 */
+ if ((params->n_pipes_per_subport == 0) || (!rte_is_power_of_2(params->n_pipes_per_subport))) {
+ return -7;
+ }
+
+ /* qsize: non-zero, power of 2, no bigger than 32K (due to 16-bit read/write pointers) */
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
+ uint16_t qsize = params->qsize[i];
+
+ if ((qsize == 0) || (!rte_is_power_of_2(qsize))) {
+ return -8;
+ }
+ }
+
+ /* pipe_profiles and n_pipe_profiles */
+ if ((params->pipe_profiles == NULL) ||
+ (params->n_pipe_profiles == 0) ||
+ (params->n_pipe_profiles > RTE_SCHED_PIPE_PROFILES_PER_PORT)) {
+ return -9;
+ }
+
+ for (i = 0; i < params->n_pipe_profiles; i ++) {
+ struct rte_sched_pipe_params *p = params->pipe_profiles + i;
+
+ /* TB rate: non-zero, not greater than port rate */
+ if ((p->tb_rate == 0) || (p->tb_rate > params->rate)) {
+ return -10;
+ }
+
+ /* TB size: non-zero */
+ if (p->tb_size == 0) {
+ return -11;
+ }
+
+ /* TC rate: non-zero, less than pipe rate */
+ for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j ++) {
+ if ((p->tc_rate[j] == 0) || (p->tc_rate[j] > p->tb_rate)) {
+ return -12;
+ }
+ }
+
+ /* TC period: non-zero */
+ if (p->tc_period == 0) {
+ return -13;
+ }
+
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+ /* TC3 oversubscription weight: non-zero */
+ if (p->tc_ov_weight == 0) {
+ return -14;
+ }
+#endif
+
+ /* Queue WRR weights: non-zero */
+ for (j = 0; j < RTE_SCHED_QUEUES_PER_PIPE; j ++) {
+ if (p->wrr_weights[j] == 0) {
+ return -15;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static uint32_t
+rte_sched_port_get_array_base(struct rte_sched_port_params *params, enum rte_sched_port_array array)
+{
+ uint32_t n_subports_per_port = params->n_subports_per_port;
+ uint32_t n_pipes_per_subport = params->n_pipes_per_subport;
+ uint32_t n_pipes_per_port = n_pipes_per_subport * n_subports_per_port;
+ uint32_t n_queues_per_port = RTE_SCHED_QUEUES_PER_PIPE * n_pipes_per_subport * n_subports_per_port;
+
+ uint32_t size_subport = n_subports_per_port * sizeof(struct rte_sched_subport);
+ uint32_t size_pipe = n_pipes_per_port * sizeof(struct rte_sched_pipe);
+ uint32_t size_queue = n_queues_per_port * sizeof(struct rte_sched_queue);
+ uint32_t size_queue_extra = n_queues_per_port * sizeof(struct rte_sched_queue_extra);
+ uint32_t size_pipe_profiles = RTE_SCHED_PIPE_PROFILES_PER_PORT * sizeof(struct rte_sched_pipe_profile);
+ uint32_t size_bmp_array = rte_bitmap_get_memory_footprint(n_queues_per_port);
+ uint32_t size_per_pipe_queue_array, size_queue_array;
+
+ uint32_t base, i;
+
+ size_per_pipe_queue_array = 0;
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
+ size_per_pipe_queue_array += RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS * params->qsize[i] * sizeof(struct rte_mbuf *);
+ }
+ size_queue_array = n_pipes_per_port * size_per_pipe_queue_array;
+
+ base = 0;
+
+ if (array == e_RTE_SCHED_PORT_ARRAY_SUBPORT) return base;
+ base += RTE_CACHE_LINE_ROUNDUP(size_subport);
+
+ if (array == e_RTE_SCHED_PORT_ARRAY_PIPE) return base;
+ base += RTE_CACHE_LINE_ROUNDUP(size_pipe);
+
+ if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE) return base;
+ base += RTE_CACHE_LINE_ROUNDUP(size_queue);
+
+ if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA) return base;
+ base += RTE_CACHE_LINE_ROUNDUP(size_queue_extra);
+
+ if (array == e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES) return base;
+ base += RTE_CACHE_LINE_ROUNDUP(size_pipe_profiles);
+
+ if (array == e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY) return base;
+ base += RTE_CACHE_LINE_ROUNDUP(size_bmp_array);
+
+ if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY) return base;
+ base += RTE_CACHE_LINE_ROUNDUP(size_queue_array);
+
+ return base;
+}
+
+uint32_t
+rte_sched_port_get_memory_footprint(struct rte_sched_port_params *params)
+{
+ uint32_t size0, size1;
+ int status;
+
+ status = rte_sched_port_check_params(params);
+ if (status != 0) {
+ RTE_LOG(INFO, SCHED, "Port scheduler params check failed (%d)\n", status);
+
+ return 0;
+ }
+
+ size0 = sizeof(struct rte_sched_port);
+ size1 = rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_TOTAL);
+
+ return (size0 + size1);
+}
+
+static void
+rte_sched_port_config_qsize(struct rte_sched_port *port)
+{
+ /* TC 0 */
+ port->qsize_add[0] = 0;
+ port->qsize_add[1] = port->qsize_add[0] + port->qsize[0];
+ port->qsize_add[2] = port->qsize_add[1] + port->qsize[0];
+ port->qsize_add[3] = port->qsize_add[2] + port->qsize[0];
+
+ /* TC 1 */
+ port->qsize_add[4] = port->qsize_add[3] + port->qsize[0];
+ port->qsize_add[5] = port->qsize_add[4] + port->qsize[1];
+ port->qsize_add[6] = port->qsize_add[5] + port->qsize[1];
+ port->qsize_add[7] = port->qsize_add[6] + port->qsize[1];
+
+ /* TC 2 */
+ port->qsize_add[8] = port->qsize_add[7] + port->qsize[1];
+ port->qsize_add[9] = port->qsize_add[8] + port->qsize[2];
+ port->qsize_add[10] = port->qsize_add[9] + port->qsize[2];
+ port->qsize_add[11] = port->qsize_add[10] + port->qsize[2];
+
+ /* TC 3 */
+ port->qsize_add[12] = port->qsize_add[11] + port->qsize[2];
+ port->qsize_add[13] = port->qsize_add[12] + port->qsize[3];
+ port->qsize_add[14] = port->qsize_add[13] + port->qsize[3];
+ port->qsize_add[15] = port->qsize_add[14] + port->qsize[3];
+
+ port->qsize_sum = port->qsize_add[15] + port->qsize[3];
+}
+
+static void
+rte_sched_port_log_pipe_profile(struct rte_sched_port *port, uint32_t i)
+{
+ struct rte_sched_pipe_profile *p = port->pipe_profiles + i;
+
+ RTE_LOG(INFO, SCHED, "Low level config for pipe profile %u:\n"
+ "\tToken bucket: period = %u, credits per period = %u, size = %u\n"
+ "\tTraffic classes: period = %u, credits per period = [%u, %u, %u, %u]\n"
+ "\tTraffic class 3 oversubscription: weight = %hhu\n"
+ "\tWRR cost: [%hhu, %hhu, %hhu, %hhu], [%hhu, %hhu, %hhu, %hhu], [%hhu, %hhu, %hhu, %hhu], [%hhu, %hhu, %hhu, %hhu]\n",
+ i,
+
+ /* Token bucket */
+ p->tb_period,
+ p->tb_credits_per_period,
+ p->tb_size,
+
+ /* Traffic classes */
+ p->tc_period,
+ p->tc_credits_per_period[0],
+ p->tc_credits_per_period[1],
+ p->tc_credits_per_period[2],
+ p->tc_credits_per_period[3],
+
+ /* Traffic class 3 oversubscription */
+ p->tc_ov_weight,
+
+ /* WRR */
+ p->wrr_cost[ 0], p->wrr_cost[ 1], p->wrr_cost[ 2], p->wrr_cost[ 3],
+ p->wrr_cost[ 4], p->wrr_cost[ 5], p->wrr_cost[ 6], p->wrr_cost[ 7],
+ p->wrr_cost[ 8], p->wrr_cost[ 9], p->wrr_cost[10], p->wrr_cost[11],
+ p->wrr_cost[12], p->wrr_cost[13], p->wrr_cost[14], p->wrr_cost[15]);
+}
+
+static inline uint64_t
+rte_sched_time_ms_to_bytes(uint32_t time_ms, uint32_t rate)
+{
+ uint64_t time = time_ms;
+ time = (time * rate) / 1000;
+
+ return time;
+}
+
+static void
+rte_sched_port_config_pipe_profile_table(struct rte_sched_port *port, struct rte_sched_port_params *params)
+{
+ uint32_t i, j;
+
+ for (i = 0; i < port->n_pipe_profiles; i ++) {
+ struct rte_sched_pipe_params *src = params->pipe_profiles + i;
+ struct rte_sched_pipe_profile *dst = port->pipe_profiles + i;
+
+ /* Token Bucket */
+ if (src->tb_rate == params->rate) {
+ dst->tb_credits_per_period = 1;
+ dst->tb_period = 1;
+ } else {
+ double tb_rate = ((double) src->tb_rate) / ((double) params->rate);
+ double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
+
+ rte_approx(tb_rate, d, &dst->tb_credits_per_period, &dst->tb_period);
+ }
+ dst->tb_size = src->tb_size;
+
+ /* Traffic Classes */
+ dst->tc_period = (uint32_t) rte_sched_time_ms_to_bytes(src->tc_period, params->rate);
+ for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j ++) {
+ dst->tc_credits_per_period[j] = (uint32_t) rte_sched_time_ms_to_bytes(src->tc_period, src->tc_rate[j]);
+ }
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+ dst->tc_ov_weight = src->tc_ov_weight;
+#endif
+
+ /* WRR */
+ for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j ++) {
+ uint32_t wrr_cost[RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS];
+ uint32_t lcd, lcd1, lcd2;
+ uint32_t qindex;
+
+ qindex = j * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
+
+ wrr_cost[0] = src->wrr_weights[qindex];
+ wrr_cost[1] = src->wrr_weights[qindex + 1];
+ wrr_cost[2] = src->wrr_weights[qindex + 2];
+ wrr_cost[3] = src->wrr_weights[qindex + 3];
+
+ lcd1 = rte_get_lcd(wrr_cost[0], wrr_cost[1]);
+ lcd2 = rte_get_lcd(wrr_cost[2], wrr_cost[3]);
+ lcd = rte_get_lcd(lcd1, lcd2);
+
+ wrr_cost[0] = lcd / wrr_cost[0];
+ wrr_cost[1] = lcd / wrr_cost[1];
+ wrr_cost[2] = lcd / wrr_cost[2];
+ wrr_cost[3] = lcd / wrr_cost[3];
+
+ dst->wrr_cost[qindex] = (uint8_t) wrr_cost[0];
+ dst->wrr_cost[qindex + 1] = (uint8_t) wrr_cost[1];
+ dst->wrr_cost[qindex + 2] = (uint8_t) wrr_cost[2];
+ dst->wrr_cost[qindex + 3] = (uint8_t) wrr_cost[3];
+ }
+
+ rte_sched_port_log_pipe_profile(port, i);
+ }
+
+ port->pipe_tc3_rate_max = 0;
+ for (i = 0; i < port->n_pipe_profiles; i ++) {
+ struct rte_sched_pipe_params *src = params->pipe_profiles + i;
+ uint32_t pipe_tc3_rate = src->tc_rate[3];
+
+ if (port->pipe_tc3_rate_max < pipe_tc3_rate) {
+ port->pipe_tc3_rate_max = pipe_tc3_rate;
+ }
+ }
+}
+
+struct rte_sched_port *
+rte_sched_port_config(struct rte_sched_port_params *params)
+{
+ struct rte_sched_port *port = NULL;
+ uint32_t mem_size, bmp_mem_size, n_queues_per_port, i;
+
+ /* Check user parameters. Determine the amount of memory to allocate */
+ mem_size = rte_sched_port_get_memory_footprint(params);
+ if (mem_size == 0) {
+ return NULL;
+ }
+
+ /* Allocate memory to store the data structures */
+ port = rte_zmalloc("qos_params", mem_size, RTE_CACHE_LINE_SIZE);
+ if (port == NULL) {
+ return NULL;
+ }
+
+ /* User parameters */
+ port->n_subports_per_port = params->n_subports_per_port;
+ port->n_pipes_per_subport = params->n_pipes_per_subport;
+ port->rate = params->rate;
+ port->mtu = params->mtu + params->frame_overhead;
+ port->frame_overhead = params->frame_overhead;
+ memcpy(port->qsize, params->qsize, sizeof(params->qsize));
+ port->n_pipe_profiles = params->n_pipe_profiles;
+
+#ifdef RTE_SCHED_RED
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
+ uint32_t j;
+
+ for (j = 0; j < e_RTE_METER_COLORS; j++) {
+ if (rte_red_config_init(&port->red_config[i][j],
+ params->red_params[i][j].wq_log2,
+ params->red_params[i][j].min_th,
+ params->red_params[i][j].max_th,
+ params->red_params[i][j].maxp_inv) != 0) {
+ return NULL;
+ }
+ }
+ }
+#endif
+
+ /* Timing */
+ port->time_cpu_cycles = rte_get_tsc_cycles();
+ port->time_cpu_bytes = 0;
+ port->time = 0;
+ port->cycles_per_byte = ((double) rte_get_tsc_hz()) / ((double) params->rate);
+
+ /* Scheduling loop detection */
+ port->pipe_loop = RTE_SCHED_PIPE_INVALID;
+ port->pipe_exhaustion = 0;
+
+ /* Grinders */
+ port->busy_grinders = 0;
+ port->pkts_out = NULL;
+ port->n_pkts_out = 0;
+
+ /* Queue base calculation */
+ rte_sched_port_config_qsize(port);
+
+ /* Large data structures */
+ port->subport = (struct rte_sched_subport *) (port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_SUBPORT));
+ port->pipe = (struct rte_sched_pipe *) (port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_PIPE));
+ port->queue = (struct rte_sched_queue *) (port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_QUEUE));
+ port->queue_extra = (struct rte_sched_queue_extra *) (port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA));
+ port->pipe_profiles = (struct rte_sched_pipe_profile *) (port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES));
+ port->bmp_array = port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY);
+ port->queue_array = (struct rte_mbuf **) (port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY));
+
+ /* Pipe profile table */
+ rte_sched_port_config_pipe_profile_table(port, params);
+
+ /* Bitmap */
+ n_queues_per_port = rte_sched_port_queues_per_port(port);
+ bmp_mem_size = rte_bitmap_get_memory_footprint(n_queues_per_port);
+ port->bmp = rte_bitmap_init(n_queues_per_port, port->bmp_array, bmp_mem_size);
+ if (port->bmp == NULL) {
+ RTE_LOG(INFO, SCHED, "Bitmap init error\n");
+ return NULL;
+ }
+ for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i ++) {
+ port->grinder_base_bmp_pos[i] = RTE_SCHED_PIPE_INVALID;
+ }
+
+ return port;
+}
+
+void
+rte_sched_port_free(struct rte_sched_port *port)
+{
+ /* Check user parameters */
+ if (port == NULL){
+ return;
+ }
+
+ rte_bitmap_free(port->bmp);
+ rte_free(port);
+}
+
+static void
+rte_sched_port_log_subport_config(struct rte_sched_port *port, uint32_t i)
+{
+ struct rte_sched_subport *s = port->subport + i;
+
+ RTE_LOG(INFO, SCHED, "Low level config for subport %u:\n"
+ "\tToken bucket: period = %u, credits per period = %u, size = %u\n"
+ "\tTraffic classes: period = %u, credits per period = [%u, %u, %u, %u]\n"
+ "\tTraffic class 3 oversubscription: wm min = %u, wm max = %u\n",
+ i,
+
+ /* Token bucket */
+ s->tb_period,
+ s->tb_credits_per_period,
+ s->tb_size,
+
+ /* Traffic classes */
+ s->tc_period,
+ s->tc_credits_per_period[0],
+ s->tc_credits_per_period[1],
+ s->tc_credits_per_period[2],
+ s->tc_credits_per_period[3],
+
+ /* Traffic class 3 oversubscription */
+ s->tc_ov_wm_min,
+ s->tc_ov_wm_max);
+}
+
+int
+rte_sched_subport_config(struct rte_sched_port *port,
+ uint32_t subport_id,
+ struct rte_sched_subport_params *params)
+{
+ struct rte_sched_subport *s;
+ uint32_t i;
+
+ /* Check user parameters */
+ if ((port == NULL) ||
+ (subport_id >= port->n_subports_per_port) ||
+ (params == NULL)) {
+ return -1;
+ }
+
+ if ((params->tb_rate == 0) || (params->tb_rate > port->rate)) {
+ return -2;
+ }
+
+ if (params->tb_size == 0) {
+ return -3;
+ }
+
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
+ if ((params->tc_rate[i] == 0) || (params->tc_rate[i] > params->tb_rate)) {
+ return -4;
+ }
+ }
+
+ if (params->tc_period == 0) {
+ return -5;
+ }
+
+ s = port->subport + subport_id;
+
+ /* Token Bucket (TB) */
+ if (params->tb_rate == port->rate) {
+ s->tb_credits_per_period = 1;
+ s->tb_period = 1;
+ } else {
+ double tb_rate = ((double) params->tb_rate) / ((double) port->rate);
+ double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
+
+ rte_approx(tb_rate, d, &s->tb_credits_per_period, &s->tb_period);
+ }
+ s->tb_size = params->tb_size;
+ s->tb_time = port->time;
+ s->tb_credits = s->tb_size / 2;
+
+ /* Traffic Classes (TCs) */
+ s->tc_period = (uint32_t) rte_sched_time_ms_to_bytes(params->tc_period, port->rate);
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
+ s->tc_credits_per_period[i] = (uint32_t) rte_sched_time_ms_to_bytes(params->tc_period, params->tc_rate[i]);
+ }
+ s->tc_time = port->time + s->tc_period;
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
+ s->tc_credits[i] = s->tc_credits_per_period[i];
+ }
+
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+ /* TC oversubscription */
+ s->tc_ov_wm_min = port->mtu;
+ s->tc_ov_wm_max = (uint32_t) rte_sched_time_ms_to_bytes(params->tc_period, port->pipe_tc3_rate_max);
+ s->tc_ov_wm = s->tc_ov_wm_max;
+ s->tc_ov_period_id = 0;
+ s->tc_ov = 0;
+ s->tc_ov_n = 0;
+ s->tc_ov_rate = 0;
+#endif
+
+ rte_sched_port_log_subport_config(port, subport_id);
+
+ return 0;
+}
+
+int
+rte_sched_pipe_config(struct rte_sched_port *port,
+ uint32_t subport_id,
+ uint32_t pipe_id,
+ int32_t pipe_profile)
+{
+ struct rte_sched_subport *s;
+ struct rte_sched_pipe *p;
+ struct rte_sched_pipe_profile *params;
+ uint32_t deactivate, profile, i;
+
+ /* Check user parameters */
+ profile = (uint32_t) pipe_profile;
+ deactivate = (pipe_profile < 0);
+ if ((port == NULL) ||
+ (subport_id >= port->n_subports_per_port) ||
+ (pipe_id >= port->n_pipes_per_subport) ||
+ ((!deactivate) && (profile >= port->n_pipe_profiles))) {
+ return -1;
+ }
+
+ /* Check that subport configuration is valid */
+ s = port->subport + subport_id;
+ if (s->tb_period == 0) {
+ return -2;
+ }
+
+ p = port->pipe + (subport_id * port->n_pipes_per_subport + pipe_id);
+
+ /* Handle the case when pipe already has a valid configuration */
+ if (p->tb_time) {
+ params = port->pipe_profiles + p->profile;
+
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+ double subport_tc3_rate = ((double) s->tc_credits_per_period[3]) / ((double) s->tc_period);
+ double pipe_tc3_rate = ((double) params->tc_credits_per_period[3]) / ((double) params->tc_period);
+ uint32_t tc3_ov = s->tc_ov;
+
+ /* Unplug pipe from its subport */
+ s->tc_ov_n -= params->tc_ov_weight;
+ s->tc_ov_rate -= pipe_tc3_rate;
+ s->tc_ov = s->tc_ov_rate > subport_tc3_rate;
+
+ if (s->tc_ov != tc3_ov) {
+ RTE_LOG(INFO, SCHED, "Subport %u TC3 oversubscription is OFF (%.4lf >= %.4lf)\n",
+ subport_id, subport_tc3_rate, s->tc_ov_rate);
+ }
+#endif
+
+ /* Reset the pipe */
+ memset(p, 0, sizeof(struct rte_sched_pipe));
+ }
+
+ if (deactivate) {
+ return 0;
+ }
+
+ /* Apply the new pipe configuration */
+ p->profile = profile;
+ params = port->pipe_profiles + p->profile;
+
+ /* Token Bucket (TB) */
+ p->tb_time = port->time;
+ p->tb_credits = params->tb_size / 2;
+
+ /* Traffic Classes (TCs) */
+ p->tc_time = port->time + params->tc_period;
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
+ p->tc_credits[i] = params->tc_credits_per_period[i];
+ }
+
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+ {
+ /* Subport TC3 oversubscription */
+ double subport_tc3_rate = ((double) s->tc_credits_per_period[3]) / ((double) s->tc_period);
+ double pipe_tc3_rate = ((double) params->tc_credits_per_period[3]) / ((double) params->tc_period);
+ uint32_t tc3_ov = s->tc_ov;
+
+ s->tc_ov_n += params->tc_ov_weight;
+ s->tc_ov_rate += pipe_tc3_rate;
+ s->tc_ov = s->tc_ov_rate > subport_tc3_rate;
+
+ if (s->tc_ov != tc3_ov) {
+ RTE_LOG(INFO, SCHED, "Subport %u TC3 oversubscription is ON (%.4lf < %.4lf)\n",
+ subport_id, subport_tc3_rate, s->tc_ov_rate);
+ }
+ p->tc_ov_period_id = s->tc_ov_period_id;
+ p->tc_ov_credits = s->tc_ov_wm;
+ }
+#endif
+
+ return 0;
+}
+
+int
+rte_sched_subport_read_stats(struct rte_sched_port *port,
+ uint32_t subport_id,
+ struct rte_sched_subport_stats *stats,
+ uint32_t *tc_ov)
+{
+ struct rte_sched_subport *s;
+
+ /* Check user parameters */
+ if ((port == NULL) ||
+ (subport_id >= port->n_subports_per_port) ||
+ (stats == NULL) ||
+ (tc_ov == NULL)) {
+ return -1;
+ }
+ s = port->subport + subport_id;
+
+ /* Copy subport stats and clear */
+ memcpy(stats, &s->stats, sizeof(struct rte_sched_subport_stats));
+ memset(&s->stats, 0, sizeof(struct rte_sched_subport_stats));
+
+ /* Subport TC ovesubscription status */
+ *tc_ov = s->tc_ov;
+
+ return 0;
+}
+
+int
+rte_sched_queue_read_stats(struct rte_sched_port *port,
+ uint32_t queue_id,
+ struct rte_sched_queue_stats *stats,
+ uint16_t *qlen)
+{
+ struct rte_sched_queue *q;
+ struct rte_sched_queue_extra *qe;
+
+ /* Check user parameters */
+ if ((port == NULL) ||
+ (queue_id >= rte_sched_port_queues_per_port(port)) ||
+ (stats == NULL) ||
+ (qlen == NULL)) {
+ return -1;
+ }
+ q = port->queue + queue_id;
+ qe = port->queue_extra + queue_id;
+
+ /* Copy queue stats and clear */
+ memcpy(stats, &qe->stats, sizeof(struct rte_sched_queue_stats));
+ memset(&qe->stats, 0, sizeof(struct rte_sched_queue_stats));
+
+ /* Queue length */
+ *qlen = q->qw - q->qr;
+
+ return 0;
+}
+
+static inline uint32_t
+rte_sched_port_qindex(struct rte_sched_port *port, uint32_t subport, uint32_t pipe, uint32_t traffic_class, uint32_t queue)
+{
+ uint32_t result;
+
+ result = subport * port->n_pipes_per_subport + pipe;
+ result = result * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + traffic_class;
+ result = result * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue;
+
+ return result;
+}
+
+static inline struct rte_mbuf **
+rte_sched_port_qbase(struct rte_sched_port *port, uint32_t qindex)
+{
+ uint32_t pindex = qindex >> 4;
+ uint32_t qpos = qindex & 0xF;
+
+ return (port->queue_array + pindex * port->qsize_sum + port->qsize_add[qpos]);
+}
+
+static inline uint16_t
+rte_sched_port_qsize(struct rte_sched_port *port, uint32_t qindex)
+{
+ uint32_t tc = (qindex >> 2) & 0x3;
+
+ return port->qsize[tc];
+}
+
+#if RTE_SCHED_DEBUG
+
+static inline int
+rte_sched_port_queue_is_empty(struct rte_sched_port *port, uint32_t qindex)
+{
+ struct rte_sched_queue *queue = port->queue + qindex;
+
+ return (queue->qr == queue->qw);
+}
+
+static inline int
+rte_sched_port_queue_is_full(struct rte_sched_port *port, uint32_t qindex)
+{
+ struct rte_sched_queue *queue = port->queue + qindex;
+ uint16_t qsize = rte_sched_port_qsize(port, qindex);
+ uint16_t qlen = queue->qw - queue->qr;
+
+ return (qlen >= qsize);
+}
+
+#endif /* RTE_SCHED_DEBUG */
+
+#ifdef RTE_SCHED_COLLECT_STATS
+
+static inline void
+rte_sched_port_update_subport_stats(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
+{
+ struct rte_sched_subport *s = port->subport + (qindex / rte_sched_port_queues_per_subport(port));
+ uint32_t tc_index = (qindex >> 2) & 0x3;
+ uint32_t pkt_len = pkt->pkt_len;
+
+ s->stats.n_pkts_tc[tc_index] += 1;
+ s->stats.n_bytes_tc[tc_index] += pkt_len;
+}
+
+static inline void
+rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
+{
+ struct rte_sched_subport *s = port->subport + (qindex / rte_sched_port_queues_per_subport(port));
+ uint32_t tc_index = (qindex >> 2) & 0x3;
+ uint32_t pkt_len = pkt->pkt_len;
+
+ s->stats.n_pkts_tc_dropped[tc_index] += 1;
+ s->stats.n_bytes_tc_dropped[tc_index] += pkt_len;
+}
+
+static inline void
+rte_sched_port_update_queue_stats(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
+{
+ struct rte_sched_queue_extra *qe = port->queue_extra + qindex;
+ uint32_t pkt_len = pkt->pkt_len;
+
+ qe->stats.n_pkts += 1;
+ qe->stats.n_bytes += pkt_len;
+}
+
+static inline void
+rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
+{
+ struct rte_sched_queue_extra *qe = port->queue_extra + qindex;
+ uint32_t pkt_len = pkt->pkt_len;
+
+ qe->stats.n_pkts_dropped += 1;
+ qe->stats.n_bytes_dropped += pkt_len;
+}
+
+#endif /* RTE_SCHED_COLLECT_STATS */
+
+#ifdef RTE_SCHED_RED
+
+static inline int
+rte_sched_port_red_drop(struct rte_sched_port *port, struct rte_mbuf *pkt, uint32_t qindex, uint16_t qlen)
+{
+ struct rte_sched_queue_extra *qe;
+ struct rte_red_config *red_cfg;
+ struct rte_red *red;
+ uint32_t tc_index;
+ enum rte_meter_color color;
+
+ tc_index = (qindex >> 2) & 0x3;
+ color = rte_sched_port_pkt_read_color(pkt);
+ red_cfg = &port->red_config[tc_index][color];
+
+ qe = port->queue_extra + qindex;
+ red = &qe->red;
+
+ return rte_red_enqueue(red_cfg, red, qlen, port->time);
+}
+
+static inline void
+rte_sched_port_set_queue_empty_timestamp(struct rte_sched_port *port, uint32_t qindex)
+{
+ struct rte_sched_queue_extra *qe;
+ struct rte_red *red;
+
+ qe = port->queue_extra + qindex;
+ red = &qe->red;
+
+ rte_red_mark_queue_empty(red, port->time);
+}
+
+#else
+
+#define rte_sched_port_red_drop(port, pkt, qindex, qlen) 0
+
+#define rte_sched_port_set_queue_empty_timestamp(port, qindex)
+
+#endif /* RTE_SCHED_RED */
+
+#if RTE_SCHED_DEBUG
+
+static inline int
+debug_pipe_is_empty(struct rte_sched_port *port, uint32_t pindex)
+{
+ uint32_t qindex, i;
+
+ qindex = pindex << 4;
+
+ for (i = 0; i < 16; i ++){
+ uint32_t queue_empty = rte_sched_port_queue_is_empty(port, qindex + i);
+ uint32_t bmp_bit_clear = (rte_bitmap_get(port->bmp, qindex + i) == 0);
+
+ if (queue_empty != bmp_bit_clear){
+ rte_panic("Queue status mismatch for queue %u of pipe %u\n", i, pindex);
+ }
+
+ if (!queue_empty){
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static inline void
+debug_check_queue_slab(struct rte_sched_port *port, uint32_t bmp_pos, uint64_t bmp_slab)
+{
+ uint64_t mask;
+ uint32_t i, panic;
+
+ if (bmp_slab == 0){
+ rte_panic("Empty slab at position %u\n", bmp_pos);
+ }
+
+ panic = 0;
+ for (i = 0, mask = 1; i < 64; i ++, mask <<= 1) {
+ if (mask & bmp_slab){
+ if (rte_sched_port_queue_is_empty(port, bmp_pos + i)) {
+ printf("Queue %u (slab offset %u) is empty\n", bmp_pos + i, i);
+ panic = 1;
+ }
+ }
+ }
+
+ if (panic){
+ rte_panic("Empty queues in slab 0x%" PRIx64 "starting at position %u\n",
+ bmp_slab, bmp_pos);
+ }
+}
+
+#endif /* RTE_SCHED_DEBUG */
+
+static inline uint32_t
+rte_sched_port_enqueue_qptrs_prefetch0(struct rte_sched_port *port, struct rte_mbuf *pkt)
+{
+ struct rte_sched_queue *q;
+#ifdef RTE_SCHED_COLLECT_STATS
+ struct rte_sched_queue_extra *qe;
+#endif
+ uint32_t subport, pipe, traffic_class, queue, qindex;
+
+ rte_sched_port_pkt_read_tree_path(pkt, &subport, &pipe, &traffic_class, &queue);
+
+ qindex = rte_sched_port_qindex(port, subport, pipe, traffic_class, queue);
+ q = port->queue + qindex;
+ rte_prefetch0(q);
+#ifdef RTE_SCHED_COLLECT_STATS
+ qe = port->queue_extra + qindex;
+ rte_prefetch0(qe);
+#endif
+
+ return qindex;
+}
+
+static inline void
+rte_sched_port_enqueue_qwa_prefetch0(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf **qbase)
+{
+ struct rte_sched_queue *q;
+ struct rte_mbuf **q_qw;
+ uint16_t qsize;
+
+ q = port->queue + qindex;
+ qsize = rte_sched_port_qsize(port, qindex);
+ q_qw = qbase + (q->qw & (qsize - 1));
+
+ rte_prefetch0(q_qw);
+ rte_bitmap_prefetch0(port->bmp, qindex);
+}
+
+static inline int
+rte_sched_port_enqueue_qwa(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf **qbase, struct rte_mbuf *pkt)
+{
+ struct rte_sched_queue *q;
+ uint16_t qsize;
+ uint16_t qlen;
+
+ q = port->queue + qindex;
+ qsize = rte_sched_port_qsize(port, qindex);
+ qlen = q->qw - q->qr;
+
+ /* Drop the packet (and update drop stats) when queue is full */
+ if (unlikely(rte_sched_port_red_drop(port, pkt, qindex, qlen) || (qlen >= qsize))) {
+ rte_pktmbuf_free(pkt);
+#ifdef RTE_SCHED_COLLECT_STATS
+ rte_sched_port_update_subport_stats_on_drop(port, qindex, pkt);
+ rte_sched_port_update_queue_stats_on_drop(port, qindex, pkt);
+#endif
+ return 0;
+ }
+
+ /* Enqueue packet */
+ qbase[q->qw & (qsize - 1)] = pkt;
+ q->qw ++;
+
+ /* Activate queue in the port bitmap */
+ rte_bitmap_set(port->bmp, qindex);
+
+ /* Statistics */
+#ifdef RTE_SCHED_COLLECT_STATS
+ rte_sched_port_update_subport_stats(port, qindex, pkt);
+ rte_sched_port_update_queue_stats(port, qindex, pkt);
+#endif
+
+ return 1;
+}
+
+#if RTE_SCHED_ENQUEUE == 0
+
+int
+rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts)
+{
+ uint32_t result, i;
+
+ result = 0;
+
+ for (i = 0; i < n_pkts; i ++) {
+ struct rte_mbuf *pkt;
+ struct rte_mbuf **q_base;
+ uint32_t subport, pipe, traffic_class, queue, qindex;
+
+ pkt = pkts[i];
+
+ rte_sched_port_pkt_read_tree_path(pkt, &subport, &pipe, &traffic_class, &queue);
+
+ qindex = rte_sched_port_qindex(port, subport, pipe, traffic_class, queue);
+
+ q_base = rte_sched_port_qbase(port, qindex);
+
+ result += rte_sched_port_enqueue_qwa(port, qindex, q_base, pkt);
+ }
+
+ return result;
+}
+
+#else
+
+/* The enqueue function implements a 4-level pipeline with each stage processing
+ * two different packets. The purpose of using a pipeline is to hide the latency
+ * of prefetching the data structures. The naming convention is presented in the
+ * diagram below:
+ *
+ * p00 _______ p10 _______ p20 _______ p30 _______
+ * ----->| |----->| |----->| |----->| |----->
+ * | 0 | | 1 | | 2 | | 3 |
+ * ----->|_______|----->|_______|----->|_______|----->|_______|----->
+ * p01 p11 p21 p31
+ *
+ ***/
+int
+rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts)
+{
+ struct rte_mbuf *pkt00, *pkt01, *pkt10, *pkt11, *pkt20, *pkt21, *pkt30, *pkt31, *pkt_last;
+ struct rte_mbuf **q00_base, **q01_base, **q10_base, **q11_base, **q20_base, **q21_base, **q30_base, **q31_base, **q_last_base;
+ uint32_t q00, q01, q10, q11, q20, q21, q30, q31, q_last;
+ uint32_t r00, r01, r10, r11, r20, r21, r30, r31, r_last;
+ uint32_t result, i;
+
+ result = 0;
+
+ /* Less then 6 input packets available, which is not enough to feed the pipeline */
+ if (unlikely(n_pkts < 6)) {
+ struct rte_mbuf **q_base[5];
+ uint32_t q[5];
+
+ /* Prefetch the mbuf structure of each packet */
+ for (i = 0; i < n_pkts; i ++) {
+ rte_prefetch0(pkts[i]);
+ }
+
+ /* Prefetch the queue structure for each queue */
+ for (i = 0; i < n_pkts; i ++) {
+ q[i] = rte_sched_port_enqueue_qptrs_prefetch0(port, pkts[i]);
+ }
+
+ /* Prefetch the write pointer location of each queue */
+ for (i = 0; i < n_pkts; i ++) {
+ q_base[i] = rte_sched_port_qbase(port, q[i]);
+ rte_sched_port_enqueue_qwa_prefetch0(port, q[i], q_base[i]);
+ }
+
+ /* Write each packet to its queue */
+ for (i = 0; i < n_pkts; i ++) {
+ result += rte_sched_port_enqueue_qwa(port, q[i], q_base[i], pkts[i]);
+ }
+
+ return result;
+ }
+
+ /* Feed the first 3 stages of the pipeline (6 packets needed) */
+ pkt20 = pkts[0];
+ pkt21 = pkts[1];
+ rte_prefetch0(pkt20);
+ rte_prefetch0(pkt21);
+
+ pkt10 = pkts[2];
+ pkt11 = pkts[3];
+ rte_prefetch0(pkt10);
+ rte_prefetch0(pkt11);
+
+ q20 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt20);
+ q21 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt21);
+
+ pkt00 = pkts[4];
+ pkt01 = pkts[5];
+ rte_prefetch0(pkt00);
+ rte_prefetch0(pkt01);
+
+ q10 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt10);
+ q11 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt11);
+
+ q20_base = rte_sched_port_qbase(port, q20);
+ q21_base = rte_sched_port_qbase(port, q21);
+ rte_sched_port_enqueue_qwa_prefetch0(port, q20, q20_base);
+ rte_sched_port_enqueue_qwa_prefetch0(port, q21, q21_base);
+
+ /* Run the pipeline */
+ for (i = 6; i < (n_pkts & (~1)); i += 2) {
+ /* Propagate stage inputs */
+ pkt30 = pkt20;
+ pkt31 = pkt21;
+ pkt20 = pkt10;
+ pkt21 = pkt11;
+ pkt10 = pkt00;
+ pkt11 = pkt01;
+ q30 = q20;
+ q31 = q21;
+ q20 = q10;
+ q21 = q11;
+ q30_base = q20_base;
+ q31_base = q21_base;
+
+ /* Stage 0: Get packets in */
+ pkt00 = pkts[i];
+ pkt01 = pkts[i + 1];
+ rte_prefetch0(pkt00);
+ rte_prefetch0(pkt01);
+
+ /* Stage 1: Prefetch queue structure storing queue pointers */
+ q10 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt10);
+ q11 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt11);
+
+ /* Stage 2: Prefetch queue write location */
+ q20_base = rte_sched_port_qbase(port, q20);
+ q21_base = rte_sched_port_qbase(port, q21);
+ rte_sched_port_enqueue_qwa_prefetch0(port, q20, q20_base);
+ rte_sched_port_enqueue_qwa_prefetch0(port, q21, q21_base);
+
+ /* Stage 3: Write packet to queue and activate queue */
+ r30 = rte_sched_port_enqueue_qwa(port, q30, q30_base, pkt30);
+ r31 = rte_sched_port_enqueue_qwa(port, q31, q31_base, pkt31);
+ result += r30 + r31;
+ }
+
+ /* Drain the pipeline (exactly 6 packets). Handle the last packet in the case
+ of an odd number of input packets. */
+ pkt_last = pkts[n_pkts - 1];
+ rte_prefetch0(pkt_last);
+
+ q00 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt00);
+ q01 = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt01);
+
+ q10_base = rte_sched_port_qbase(port, q10);
+ q11_base = rte_sched_port_qbase(port, q11);
+ rte_sched_port_enqueue_qwa_prefetch0(port, q10, q10_base);
+ rte_sched_port_enqueue_qwa_prefetch0(port, q11, q11_base);
+
+ r20 = rte_sched_port_enqueue_qwa(port, q20, q20_base, pkt20);
+ r21 = rte_sched_port_enqueue_qwa(port, q21, q21_base, pkt21);
+ result += r20 + r21;
+
+ q_last = rte_sched_port_enqueue_qptrs_prefetch0(port, pkt_last);
+
+ q00_base = rte_sched_port_qbase(port, q00);
+ q01_base = rte_sched_port_qbase(port, q01);
+ rte_sched_port_enqueue_qwa_prefetch0(port, q00, q00_base);
+ rte_sched_port_enqueue_qwa_prefetch0(port, q01, q01_base);
+
+ r10 = rte_sched_port_enqueue_qwa(port, q10, q10_base, pkt10);
+ r11 = rte_sched_port_enqueue_qwa(port, q11, q11_base, pkt11);
+ result += r10 + r11;
+
+ q_last_base = rte_sched_port_qbase(port, q_last);
+ rte_sched_port_enqueue_qwa_prefetch0(port, q_last, q_last_base);
+
+ r00 = rte_sched_port_enqueue_qwa(port, q00, q00_base, pkt00);
+ r01 = rte_sched_port_enqueue_qwa(port, q01, q01_base, pkt01);
+ result += r00 + r01;
+
+ if (n_pkts & 1) {
+ r_last = rte_sched_port_enqueue_qwa(port, q_last, q_last_base, pkt_last);
+ result += r_last;
+ }
+
+ return result;
+}
+
+#endif /* RTE_SCHED_ENQUEUE */
+
+#if RTE_SCHED_TS_CREDITS_UPDATE == 0
+
+#define grinder_credits_update(port, pos)
+
+#elif !defined(RTE_SCHED_SUBPORT_TC_OV)
+
+static inline void
+grinder_credits_update(struct rte_sched_port *port, uint32_t pos)
+{
+ struct rte_sched_grinder *grinder = port->grinder + pos;
+ struct rte_sched_subport *subport = grinder->subport;
+ struct rte_sched_pipe *pipe = grinder->pipe;
+ struct rte_sched_pipe_profile *params = grinder->pipe_params;
+ uint64_t n_periods;
+
+ /* Subport TB */
+ n_periods = (port->time - subport->tb_time) / subport->tb_period;
+ subport->tb_credits += n_periods * subport->tb_credits_per_period;
+ subport->tb_credits = rte_sched_min_val_2_u32(subport->tb_credits, subport->tb_size);
+ subport->tb_time += n_periods * subport->tb_period;
+
+ /* Pipe TB */
+ n_periods = (port->time - pipe->tb_time) / params->tb_period;
+ pipe->tb_credits += n_periods * params->tb_credits_per_period;
+ pipe->tb_credits = rte_sched_min_val_2_u32(pipe->tb_credits, params->tb_size);
+ pipe->tb_time += n_periods * params->tb_period;
+
+ /* Subport TCs */
+ if (unlikely(port->time >= subport->tc_time)) {
+ subport->tc_credits[0] = subport->tc_credits_per_period[0];
+ subport->tc_credits[1] = subport->tc_credits_per_period[1];
+ subport->tc_credits[2] = subport->tc_credits_per_period[2];
+ subport->tc_credits[3] = subport->tc_credits_per_period[3];
+ subport->tc_time = port->time + subport->tc_period;
+ }
+
+ /* Pipe TCs */
+ if (unlikely(port->time >= pipe->tc_time)) {
+ pipe->tc_credits[0] = params->tc_credits_per_period[0];
+ pipe->tc_credits[1] = params->tc_credits_per_period[1];
+ pipe->tc_credits[2] = params->tc_credits_per_period[2];
+ pipe->tc_credits[3] = params->tc_credits_per_period[3];
+ pipe->tc_time = port->time + params->tc_period;
+ }
+}
+
+#else
+
+static inline uint32_t
+grinder_tc_ov_credits_update(struct rte_sched_port *port, uint32_t pos)
+{
+ struct rte_sched_grinder *grinder = port->grinder + pos;
+ struct rte_sched_subport *subport = grinder->subport;
+ uint32_t tc_ov_consumption[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ uint32_t tc_ov_consumption_max;
+ uint32_t tc_ov_wm = subport->tc_ov_wm;
+
+ if (subport->tc_ov == 0) {
+ return subport->tc_ov_wm_max;
+ }
+
+ tc_ov_consumption[0] = subport->tc_credits_per_period[0] - subport->tc_credits[0];
+ tc_ov_consumption[1] = subport->tc_credits_per_period[1] - subport->tc_credits[1];
+ tc_ov_consumption[2] = subport->tc_credits_per_period[2] - subport->tc_credits[2];
+ tc_ov_consumption[3] = subport->tc_credits_per_period[3] - subport->tc_credits[3];
+
+ tc_ov_consumption_max = subport->tc_credits_per_period[3] -
+ (tc_ov_consumption[0] + tc_ov_consumption[1] + tc_ov_consumption[2]);
+
+ if (tc_ov_consumption[3] > (tc_ov_consumption_max - port->mtu)) {
+ tc_ov_wm -= tc_ov_wm >> 7;
+ if (tc_ov_wm < subport->tc_ov_wm_min) {
+ tc_ov_wm = subport->tc_ov_wm_min;
+ }
+ return tc_ov_wm;
+ }
+
+ tc_ov_wm += (tc_ov_wm >> 7) + 1;
+ if (tc_ov_wm > subport->tc_ov_wm_max) {
+ tc_ov_wm = subport->tc_ov_wm_max;
+ }
+ return tc_ov_wm;
+}
+
+static inline void
+grinder_credits_update(struct rte_sched_port *port, uint32_t pos)
+{
+ struct rte_sched_grinder *grinder = port->grinder + pos;
+ struct rte_sched_subport *subport = grinder->subport;
+ struct rte_sched_pipe *pipe = grinder->pipe;
+ struct rte_sched_pipe_profile *params = grinder->pipe_params;
+ uint64_t n_periods;
+
+ /* Subport TB */
+ n_periods = (port->time - subport->tb_time) / subport->tb_period;
+ subport->tb_credits += n_periods * subport->tb_credits_per_period;
+ subport->tb_credits = rte_sched_min_val_2_u32(subport->tb_credits, subport->tb_size);
+ subport->tb_time += n_periods * subport->tb_period;
+
+ /* Pipe TB */
+ n_periods = (port->time - pipe->tb_time) / params->tb_period;
+ pipe->tb_credits += n_periods * params->tb_credits_per_period;
+ pipe->tb_credits = rte_sched_min_val_2_u32(pipe->tb_credits, params->tb_size);
+ pipe->tb_time += n_periods * params->tb_period;
+
+ /* Subport TCs */
+ if (unlikely(port->time >= subport->tc_time)) {
+ subport->tc_ov_wm = grinder_tc_ov_credits_update(port, pos);
+
+ subport->tc_credits[0] = subport->tc_credits_per_period[0];
+ subport->tc_credits[1] = subport->tc_credits_per_period[1];
+ subport->tc_credits[2] = subport->tc_credits_per_period[2];
+ subport->tc_credits[3] = subport->tc_credits_per_period[3];
+
+ subport->tc_time = port->time + subport->tc_period;
+ subport->tc_ov_period_id ++;
+ }
+
+ /* Pipe TCs */
+ if (unlikely(port->time >= pipe->tc_time)) {
+ pipe->tc_credits[0] = params->tc_credits_per_period[0];
+ pipe->tc_credits[1] = params->tc_credits_per_period[1];
+ pipe->tc_credits[2] = params->tc_credits_per_period[2];
+ pipe->tc_credits[3] = params->tc_credits_per_period[3];
+ pipe->tc_time = port->time + params->tc_period;
+ }
+
+ /* Pipe TCs - Oversubscription */
+ if (unlikely(pipe->tc_ov_period_id != subport->tc_ov_period_id)) {
+ pipe->tc_ov_credits = subport->tc_ov_wm * params->tc_ov_weight;
+
+ pipe->tc_ov_period_id = subport->tc_ov_period_id;
+ }
+}
+
+#endif /* RTE_SCHED_TS_CREDITS_UPDATE, RTE_SCHED_SUBPORT_TC_OV */
+
+#if RTE_SCHED_TS_CREDITS_CHECK
+
+#ifndef RTE_SCHED_SUBPORT_TC_OV
+
+static inline int
+grinder_credits_check(struct rte_sched_port *port, uint32_t pos)
+{
+ struct rte_sched_grinder *grinder = port->grinder + pos;
+ struct rte_sched_subport *subport = grinder->subport;
+ struct rte_sched_pipe *pipe = grinder->pipe;
+ struct rte_mbuf *pkt = grinder->pkt;
+ uint32_t tc_index = grinder->tc_index;
+ uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
+ uint32_t subport_tb_credits = subport->tb_credits;
+ uint32_t subport_tc_credits = subport->tc_credits[tc_index];
+ uint32_t pipe_tb_credits = pipe->tb_credits;
+ uint32_t pipe_tc_credits = pipe->tc_credits[tc_index];
+ int enough_credits;
+
+ /* Check queue credits */
+ enough_credits = (pkt_len <= subport_tb_credits) &&
+ (pkt_len <= subport_tc_credits) &&
+ (pkt_len <= pipe_tb_credits) &&
+ (pkt_len <= pipe_tc_credits);
+
+ if (!enough_credits) {
+ return 0;
+ }
+
+ /* Update port credits */
+ subport->tb_credits -= pkt_len;
+ subport->tc_credits[tc_index] -= pkt_len;
+ pipe->tb_credits -= pkt_len;
+ pipe->tc_credits[tc_index] -= pkt_len;
+
+ return 1;
+}
+
+#else
+
+static inline int
+grinder_credits_check(struct rte_sched_port *port, uint32_t pos)
+{
+ struct rte_sched_grinder *grinder = port->grinder + pos;
+ struct rte_sched_subport *subport = grinder->subport;
+ struct rte_sched_pipe *pipe = grinder->pipe;
+ struct rte_mbuf *pkt = grinder->pkt;
+ uint32_t tc_index = grinder->tc_index;
+ uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
+ uint32_t subport_tb_credits = subport->tb_credits;
+ uint32_t subport_tc_credits = subport->tc_credits[tc_index];
+ uint32_t pipe_tb_credits = pipe->tb_credits;
+ uint32_t pipe_tc_credits = pipe->tc_credits[tc_index];
+ uint32_t pipe_tc_ov_mask1[] = {UINT32_MAX, UINT32_MAX, UINT32_MAX, pipe->tc_ov_credits};
+ uint32_t pipe_tc_ov_mask2[] = {0, 0, 0, UINT32_MAX};
+ uint32_t pipe_tc_ov_credits = pipe_tc_ov_mask1[tc_index];
+ int enough_credits;
+
+ /* Check pipe and subport credits */
+ enough_credits = (pkt_len <= subport_tb_credits) &&
+ (pkt_len <= subport_tc_credits) &&
+ (pkt_len <= pipe_tb_credits) &&
+ (pkt_len <= pipe_tc_credits) &&
+ (pkt_len <= pipe_tc_ov_credits);
+
+ if (!enough_credits) {
+ return 0;
+ }
+
+ /* Update pipe and subport credits */
+ subport->tb_credits -= pkt_len;
+ subport->tc_credits[tc_index] -= pkt_len;
+ pipe->tb_credits -= pkt_len;
+ pipe->tc_credits[tc_index] -= pkt_len;
+ pipe->tc_ov_credits -= pipe_tc_ov_mask2[tc_index] & pkt_len;
+
+ return 1;
+}
+
+#endif /* RTE_SCHED_SUBPORT_TC_OV */
+
+#endif /* RTE_SCHED_TS_CREDITS_CHECK */
+
+static inline int
+grinder_schedule(struct rte_sched_port *port, uint32_t pos)
+{
+ struct rte_sched_grinder *grinder = port->grinder + pos;
+ struct rte_sched_queue *queue = grinder->queue[grinder->qpos];
+ struct rte_mbuf *pkt = grinder->pkt;
+ uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
+
+#if RTE_SCHED_TS_CREDITS_CHECK
+ if (!grinder_credits_check(port, pos)) {
+ return 0;
+ }
+#endif
+
+ /* Advance port time */
+ port->time += pkt_len;
+
+ /* Send packet */
+ port->pkts_out[port->n_pkts_out ++] = pkt;
+ queue->qr ++;
+ grinder->wrr_tokens[grinder->qpos] += pkt_len * grinder->wrr_cost[grinder->qpos];
+ if (queue->qr == queue->qw) {
+ uint32_t qindex = grinder->qindex[grinder->qpos];
+
+ rte_bitmap_clear(port->bmp, qindex);
+ grinder->qmask &= ~(1 << grinder->qpos);
+ grinder->wrr_mask[grinder->qpos] = 0;
+ rte_sched_port_set_queue_empty_timestamp(port, qindex);
+ }
+
+ /* Reset pipe loop detection */
+ port->pipe_loop = RTE_SCHED_PIPE_INVALID;
+ grinder->productive = 1;
+
+ return 1;
+}
+
+#if RTE_SCHED_OPTIMIZATIONS
+
+static inline int
+grinder_pipe_exists(struct rte_sched_port *port, uint32_t base_pipe)
+{
+ __m128i index = _mm_set1_epi32 (base_pipe);
+ __m128i pipes = _mm_load_si128((__m128i *)port->grinder_base_bmp_pos);
+ __m128i res = _mm_cmpeq_epi32(pipes, index);
+ pipes = _mm_load_si128((__m128i *)(port->grinder_base_bmp_pos + 4));
+ pipes = _mm_cmpeq_epi32(pipes, index);
+ res = _mm_or_si128(res, pipes);
+
+ if (_mm_testz_si128(res, res))
+ return 0;
+
+ return 1;
+}
+
+#else
+
+static inline int
+grinder_pipe_exists(struct rte_sched_port *port, uint32_t base_pipe)
+{
+ uint32_t i;
+
+ for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i ++) {
+ if (port->grinder_base_bmp_pos[i] == base_pipe) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+#endif /* RTE_SCHED_OPTIMIZATIONS */
+
+static inline void
+grinder_pcache_populate(struct rte_sched_port *port, uint32_t pos, uint32_t bmp_pos, uint64_t bmp_slab)
+{
+ struct rte_sched_grinder *grinder = port->grinder + pos;
+ uint16_t w[4];
+
+ grinder->pcache_w = 0;
+ grinder->pcache_r = 0;
+
+ w[0] = (uint16_t) bmp_slab;
+ w[1] = (uint16_t) (bmp_slab >> 16);
+ w[2] = (uint16_t) (bmp_slab >> 32);
+ w[3] = (uint16_t) (bmp_slab >> 48);
+
+ grinder->pcache_qmask[grinder->pcache_w] = w[0];
+ grinder->pcache_qindex[grinder->pcache_w] = bmp_pos;
+ grinder->pcache_w += (w[0] != 0);
+
+ grinder->pcache_qmask[grinder->pcache_w] = w[1];
+ grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 16;
+ grinder->pcache_w += (w[1] != 0);
+
+ grinder->pcache_qmask[grinder->pcache_w] = w[2];
+ grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 32;
+ grinder->pcache_w += (w[2] != 0);
+
+ grinder->pcache_qmask[grinder->pcache_w] = w[3];
+ grinder->pcache_qindex[grinder->pcache_w] = bmp_pos + 48;
+ grinder->pcache_w += (w[3] != 0);
+}
+
+static inline void
+grinder_tccache_populate(struct rte_sched_port *port, uint32_t pos, uint32_t qindex, uint16_t qmask)
+{
+ struct rte_sched_grinder *grinder = port->grinder + pos;
+ uint8_t b[4];
+
+ grinder->tccache_w = 0;
+ grinder->tccache_r = 0;
+
+ b[0] = (uint8_t) (qmask & 0xF);
+ b[1] = (uint8_t) ((qmask >> 4) & 0xF);
+ b[2] = (uint8_t) ((qmask >> 8) & 0xF);
+ b[3] = (uint8_t) ((qmask >> 12) & 0xF);
+
+ grinder->tccache_qmask[grinder->tccache_w] = b[0];
+ grinder->tccache_qindex[grinder->tccache_w] = qindex;
+ grinder->tccache_w += (b[0] != 0);
+
+ grinder->tccache_qmask[grinder->tccache_w] = b[1];
+ grinder->tccache_qindex[grinder->tccache_w] = qindex + 4;
+ grinder->tccache_w += (b[1] != 0);
+
+ grinder->tccache_qmask[grinder->tccache_w] = b[2];
+ grinder->tccache_qindex[grinder->tccache_w] = qindex + 8;
+ grinder->tccache_w += (b[2] != 0);
+
+ grinder->tccache_qmask[grinder->tccache_w] = b[3];
+ grinder->tccache_qindex[grinder->tccache_w] = qindex + 12;
+ grinder->tccache_w += (b[3] != 0);
+}
+
+static inline int
+grinder_next_tc(struct rte_sched_port *port, uint32_t pos)
+{
+ struct rte_sched_grinder *grinder = port->grinder + pos;
+ struct rte_mbuf **qbase;
+ uint32_t qindex;
+ uint16_t qsize;
+
+ if (grinder->tccache_r == grinder->tccache_w) {
+ return 0;
+ }
+
+ qindex = grinder->tccache_qindex[grinder->tccache_r];
+ qbase = rte_sched_port_qbase(port, qindex);
+ qsize = rte_sched_port_qsize(port, qindex);
+
+ grinder->tc_index = (qindex >> 2) & 0x3;
+ grinder->qmask = grinder->tccache_qmask[grinder->tccache_r];
+ grinder->qsize = qsize;
+
+ grinder->qindex[0] = qindex;
+ grinder->qindex[1] = qindex + 1;
+ grinder->qindex[2] = qindex + 2;
+ grinder->qindex[3] = qindex + 3;
+
+ grinder->queue[0] = port->queue + qindex;
+ grinder->queue[1] = port->queue + qindex + 1;
+ grinder->queue[2] = port->queue + qindex + 2;
+ grinder->queue[3] = port->queue + qindex + 3;
+
+ grinder->qbase[0] = qbase;
+ grinder->qbase[1] = qbase + qsize;
+ grinder->qbase[2] = qbase + 2 * qsize;
+ grinder->qbase[3] = qbase + 3 * qsize;
+
+ grinder->tccache_r ++;
+ return 1;
+}
+
+static inline int
+grinder_next_pipe(struct rte_sched_port *port, uint32_t pos)
+{
+ struct rte_sched_grinder *grinder = port->grinder + pos;
+ uint32_t pipe_qindex;
+ uint16_t pipe_qmask;
+
+ if (grinder->pcache_r < grinder->pcache_w) {
+ pipe_qmask = grinder->pcache_qmask[grinder->pcache_r];
+ pipe_qindex = grinder->pcache_qindex[grinder->pcache_r];
+ grinder->pcache_r ++;
+ } else {
+ uint64_t bmp_slab = 0;
+ uint32_t bmp_pos = 0;
+
+ /* Get another non-empty pipe group */
+ if (unlikely(rte_bitmap_scan(port->bmp, &bmp_pos, &bmp_slab) <= 0)) {
+ return 0;
+ }
+
+#if RTE_SCHED_DEBUG
+ debug_check_queue_slab(port, bmp_pos, bmp_slab);
+#endif
+
+ /* Return if pipe group already in one of the other grinders */
+ port->grinder_base_bmp_pos[pos] = RTE_SCHED_BMP_POS_INVALID;
+ if (unlikely(grinder_pipe_exists(port, bmp_pos))) {
+ return 0;
+ }
+ port->grinder_base_bmp_pos[pos] = bmp_pos;
+
+ /* Install new pipe group into grinder's pipe cache */
+ grinder_pcache_populate(port, pos, bmp_pos, bmp_slab);
+
+ pipe_qmask = grinder->pcache_qmask[0];
+ pipe_qindex = grinder->pcache_qindex[0];
+ grinder->pcache_r = 1;
+ }
+
+ /* Install new pipe in the grinder */
+ grinder->pindex = pipe_qindex >> 4;
+ grinder->subport = port->subport + (grinder->pindex / port->n_pipes_per_subport);
+ grinder->pipe = port->pipe + grinder->pindex;
+ grinder->pipe_params = NULL; /* to be set after the pipe structure is prefetched */
+ grinder->productive = 0;
+
+ grinder_tccache_populate(port, pos, pipe_qindex, pipe_qmask);
+ grinder_next_tc(port, pos);
+
+ /* Check for pipe exhaustion */
+ if (grinder->pindex == port->pipe_loop) {
+ port->pipe_exhaustion = 1;
+ port->pipe_loop = RTE_SCHED_PIPE_INVALID;
+ }
+
+ return 1;
+}
+
+#if RTE_SCHED_WRR == 0
+
+#define grinder_wrr_load(a,b)
+
+#define grinder_wrr_store(a,b)
+
+static inline void
+grinder_wrr(struct rte_sched_port *port, uint32_t pos)
+{
+ struct rte_sched_grinder *grinder = port->grinder + pos;
+ uint64_t slab = grinder->qmask;
+
+ if (rte_bsf64(slab, &grinder->qpos) == 0) {
+ rte_panic("grinder wrr\n");
+ }
+}
+
+#elif RTE_SCHED_WRR == 1
+
+static inline void
+grinder_wrr_load(struct rte_sched_port *port, uint32_t pos)
+{
+ struct rte_sched_grinder *grinder = port->grinder + pos;
+ struct rte_sched_pipe *pipe = grinder->pipe;
+ struct rte_sched_pipe_profile *pipe_params = grinder->pipe_params;
+ uint32_t tc_index = grinder->tc_index;
+ uint32_t qmask = grinder->qmask;
+ uint32_t qindex;
+
+ qindex = tc_index * 4;
+
+ grinder->wrr_tokens[0] = ((uint16_t) pipe->wrr_tokens[qindex]) << RTE_SCHED_WRR_SHIFT;
+ grinder->wrr_tokens[1] = ((uint16_t) pipe->wrr_tokens[qindex + 1]) << RTE_SCHED_WRR_SHIFT;
+ grinder->wrr_tokens[2] = ((uint16_t) pipe->wrr_tokens[qindex + 2]) << RTE_SCHED_WRR_SHIFT;
+ grinder->wrr_tokens[3] = ((uint16_t) pipe->wrr_tokens[qindex + 3]) << RTE_SCHED_WRR_SHIFT;
+
+ grinder->wrr_mask[0] = (qmask & 0x1) * 0xFFFF;
+ grinder->wrr_mask[1] = ((qmask >> 1) & 0x1) * 0xFFFF;
+ grinder->wrr_mask[2] = ((qmask >> 2) & 0x1) * 0xFFFF;
+ grinder->wrr_mask[3] = ((qmask >> 3) & 0x1) * 0xFFFF;
+
+ grinder->wrr_cost[0] = pipe_params->wrr_cost[qindex];
+ grinder->wrr_cost[1] = pipe_params->wrr_cost[qindex + 1];
+ grinder->wrr_cost[2] = pipe_params->wrr_cost[qindex + 2];
+ grinder->wrr_cost[3] = pipe_params->wrr_cost[qindex + 3];
+}
+
+static inline void
+grinder_wrr_store(struct rte_sched_port *port, uint32_t pos)
+{
+ struct rte_sched_grinder *grinder = port->grinder + pos;
+ struct rte_sched_pipe *pipe = grinder->pipe;
+ uint32_t tc_index = grinder->tc_index;
+ uint32_t qindex;
+
+ qindex = tc_index * 4;
+
+ pipe->wrr_tokens[qindex] = (uint8_t) ((grinder->wrr_tokens[0] & grinder->wrr_mask[0]) >> RTE_SCHED_WRR_SHIFT);
+ pipe->wrr_tokens[qindex + 1] = (uint8_t) ((grinder->wrr_tokens[1] & grinder->wrr_mask[1]) >> RTE_SCHED_WRR_SHIFT);
+ pipe->wrr_tokens[qindex + 2] = (uint8_t) ((grinder->wrr_tokens[2] & grinder->wrr_mask[2]) >> RTE_SCHED_WRR_SHIFT);
+ pipe->wrr_tokens[qindex + 3] = (uint8_t) ((grinder->wrr_tokens[3] & grinder->wrr_mask[3]) >> RTE_SCHED_WRR_SHIFT);
+}
+
+static inline void
+grinder_wrr(struct rte_sched_port *port, uint32_t pos)
+{
+ struct rte_sched_grinder *grinder = port->grinder + pos;
+ uint16_t wrr_tokens_min;
+
+ grinder->wrr_tokens[0] |= ~grinder->wrr_mask[0];
+ grinder->wrr_tokens[1] |= ~grinder->wrr_mask[1];
+ grinder->wrr_tokens[2] |= ~grinder->wrr_mask[2];
+ grinder->wrr_tokens[3] |= ~grinder->wrr_mask[3];
+
+ grinder->qpos = rte_min_pos_4_u16(grinder->wrr_tokens);
+ wrr_tokens_min = grinder->wrr_tokens[grinder->qpos];
+
+ grinder->wrr_tokens[0] -= wrr_tokens_min;
+ grinder->wrr_tokens[1] -= wrr_tokens_min;
+ grinder->wrr_tokens[2] -= wrr_tokens_min;
+ grinder->wrr_tokens[3] -= wrr_tokens_min;
+}
+
+#else
+
+#error Invalid value for RTE_SCHED_WRR
+
+#endif /* RTE_SCHED_WRR */
+
+#define grinder_evict(port, pos)
+
+static inline void
+grinder_prefetch_pipe(struct rte_sched_port *port, uint32_t pos)
+{
+ struct rte_sched_grinder *grinder = port->grinder + pos;
+
+ rte_prefetch0(grinder->pipe);
+ rte_prefetch0(grinder->queue[0]);
+}
+
+static inline void
+grinder_prefetch_tc_queue_arrays(struct rte_sched_port *port, uint32_t pos)
+{
+ struct rte_sched_grinder *grinder = port->grinder + pos;
+ uint16_t qsize, qr[4];
+
+ qsize = grinder->qsize;
+ qr[0] = grinder->queue[0]->qr & (qsize - 1);
+ qr[1] = grinder->queue[1]->qr & (qsize - 1);
+ qr[2] = grinder->queue[2]->qr & (qsize - 1);
+ qr[3] = grinder->queue[3]->qr & (qsize - 1);
+
+ rte_prefetch0(grinder->qbase[0] + qr[0]);
+ rte_prefetch0(grinder->qbase[1] + qr[1]);
+
+ grinder_wrr_load(port, pos);
+ grinder_wrr(port, pos);
+
+ rte_prefetch0(grinder->qbase[2] + qr[2]);
+ rte_prefetch0(grinder->qbase[3] + qr[3]);
+}
+
+static inline void
+grinder_prefetch_mbuf(struct rte_sched_port *port, uint32_t pos)
+{
+ struct rte_sched_grinder *grinder = port->grinder + pos;
+ uint32_t qpos = grinder->qpos;
+ struct rte_mbuf **qbase = grinder->qbase[qpos];
+ uint16_t qsize = grinder->qsize;
+ uint16_t qr = grinder->queue[qpos]->qr & (qsize - 1);
+
+ grinder->pkt = qbase[qr];
+ rte_prefetch0(grinder->pkt);
+
+ if (unlikely((qr & 0x7) == 7)) {
+ uint16_t qr_next = (grinder->queue[qpos]->qr + 1) & (qsize - 1);
+
+ rte_prefetch0(qbase + qr_next);
+ }
+}
+
+static inline uint32_t
+grinder_handle(struct rte_sched_port *port, uint32_t pos)
+{
+ struct rte_sched_grinder *grinder = port->grinder + pos;
+
+ switch (grinder->state) {
+ case e_GRINDER_PREFETCH_PIPE:
+ {
+ if (grinder_next_pipe(port, pos)) {
+ grinder_prefetch_pipe(port, pos);
+ port->busy_grinders ++;
+
+ grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
+ return 0;
+ }
+
+ return 0;
+ }
+
+ case e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS:
+ {
+ struct rte_sched_pipe *pipe = grinder->pipe;
+
+ grinder->pipe_params = port->pipe_profiles + pipe->profile;
+ grinder_prefetch_tc_queue_arrays(port, pos);
+ grinder_credits_update(port, pos);
+
+ grinder->state = e_GRINDER_PREFETCH_MBUF;
+ return 0;
+ }
+
+ case e_GRINDER_PREFETCH_MBUF:
+ {
+ grinder_prefetch_mbuf(port, pos);
+
+ grinder->state = e_GRINDER_READ_MBUF;
+ return 0;
+ }
+
+ case e_GRINDER_READ_MBUF:
+ {
+ uint32_t result = 0;
+
+ result = grinder_schedule(port, pos);
+
+ /* Look for next packet within the same TC */
+ if (result && grinder->qmask) {
+ grinder_wrr(port, pos);
+ grinder_prefetch_mbuf(port, pos);
+
+ return 1;
+ }
+ grinder_wrr_store(port, pos);
+
+ /* Look for another active TC within same pipe */
+ if (grinder_next_tc(port, pos)) {
+ grinder_prefetch_tc_queue_arrays(port, pos);
+
+ grinder->state = e_GRINDER_PREFETCH_MBUF;
+ return result;
+ }
+ if ((grinder->productive == 0) && (port->pipe_loop == RTE_SCHED_PIPE_INVALID)) {
+ port->pipe_loop = grinder->pindex;
+ }
+ grinder_evict(port, pos);
+
+ /* Look for another active pipe */
+ if (grinder_next_pipe(port, pos)) {
+ grinder_prefetch_pipe(port, pos);
+
+ grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
+ return result;
+ }
+
+ /* No active pipe found */
+ port->busy_grinders --;
+
+ grinder->state = e_GRINDER_PREFETCH_PIPE;
+ return result;
+ }
+
+ default:
+ rte_panic("Algorithmic error (invalid state)\n");
+ return 0;
+ }
+}
+
+static inline void
+rte_sched_port_time_resync(struct rte_sched_port *port)
+{
+ uint64_t cycles = rte_get_tsc_cycles();
+ uint64_t cycles_diff = cycles - port->time_cpu_cycles;
+ double bytes_diff = ((double) cycles_diff) / port->cycles_per_byte;
+
+ /* Advance port time */
+ port->time_cpu_cycles = cycles;
+ port->time_cpu_bytes += (uint64_t) bytes_diff;
+ if (port->time < port->time_cpu_bytes) {
+ port->time = port->time_cpu_bytes;
+ }
+
+ /* Reset pipe loop detection */
+ port->pipe_loop = RTE_SCHED_PIPE_INVALID;
+}
+
+static inline int
+rte_sched_port_exceptions(struct rte_sched_port *port, int second_pass)
+{
+ int exceptions;
+
+ /* Check if any exception flag is set */
+ exceptions = (second_pass && port->busy_grinders == 0) ||
+ (port->pipe_exhaustion == 1);
+
+ /* Clear exception flags */
+ port->pipe_exhaustion = 0;
+
+ return exceptions;
+}
+
+int
+rte_sched_port_dequeue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts)
+{
+ uint32_t i, count;
+
+ port->pkts_out = pkts;
+ port->n_pkts_out = 0;
+
+ rte_sched_port_time_resync(port);
+
+ /* Take each queue in the grinder one step further */
+ for (i = 0, count = 0; ; i ++) {
+ count += grinder_handle(port, i & (RTE_SCHED_PORT_N_GRINDERS - 1));
+ if ((count == n_pkts) ||
+ rte_sched_port_exceptions(port, i >= RTE_SCHED_PORT_N_GRINDERS)) {
+ break;
+ }
+ }
+
+ return count;
+}
diff --git a/src/dpdk_lib18/librte_sched/rte_sched.h b/src/dpdk_lib18/librte_sched/rte_sched.h
new file mode 100755
index 00000000..e6bba22e
--- /dev/null
+++ b/src/dpdk_lib18/librte_sched/rte_sched.h
@@ -0,0 +1,442 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_SCHED_H__
+#define __INCLUDE_RTE_SCHED_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file
+ * RTE Hierarchical Scheduler
+ *
+ * The hierarchical scheduler prioritizes the transmission of packets from different
+ * users and traffic classes according to the Service Level Agreements (SLAs) defined
+ * for the current network node.
+ *
+ * The scheduler supports thousands of packet queues grouped under a 5-level hierarchy:
+ * 1. Port:
+ * - Typical usage: output Ethernet port;
+ * - Multiple ports are scheduled in round robin order with equal priority;
+ * 2. Subport:
+ * - Typical usage: group of users;
+ * - Traffic shaping using the token bucket algorithm (one bucket per subport);
+ * - Upper limit enforced per traffic class at subport level;
+ * - Lower priority traffic classes able to reuse subport bandwidth currently
+ * unused by higher priority traffic classes of the same subport;
+ * - When any subport traffic class is oversubscribed (configuration time
+ * event), the usage of subport member pipes with high demand for that
+ * traffic class pipes is truncated to a dynamically adjusted value with no
+ * impact to low demand pipes;
+ * 3. Pipe:
+ * - Typical usage: individual user/subscriber;
+ * - Traffic shaping using the token bucket algorithm (one bucket per pipe);
+ * 4. Traffic class:
+ * - Traffic classes of the same pipe handled in strict priority order;
+ * - Upper limit enforced per traffic class at the pipe level;
+ * - Lower priority traffic classes able to reuse pipe bandwidth currently
+ * unused by higher priority traffic classes of the same pipe;
+ * 5. Queue:
+ * - Typical usage: queue hosting packets from one or multiple connections
+ * of same traffic class belonging to the same user;
+ * - Weighted Round Robin (WRR) is used to service the queues within same
+ * pipe traffic class.
+ *
+ ***/
+
+#include <sys/types.h>
+#include <rte_mbuf.h>
+#include <rte_meter.h>
+
+/** Random Early Detection (RED) */
+#ifdef RTE_SCHED_RED
+#include "rte_red.h"
+#endif
+
+/** Number of traffic classes per pipe (as well as subport). Cannot be changed. */
+#define RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE 4
+
+/** Number of queues per pipe traffic class. Cannot be changed. */
+#define RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS 4
+
+/** Number of queues per pipe. */
+#define RTE_SCHED_QUEUES_PER_PIPE \
+ (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * \
+ RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS)
+
+/** Maximum number of pipe profiles that can be defined per port. Compile-time configurable.*/
+#ifndef RTE_SCHED_PIPE_PROFILES_PER_PORT
+#define RTE_SCHED_PIPE_PROFILES_PER_PORT 256
+#endif
+
+/** Ethernet framing overhead. Overhead fields per Ethernet frame:
+ 1. Preamble: 7 bytes;
+ 2. Start of Frame Delimiter (SFD): 1 byte;
+ 3. Frame Check Sequence (FCS): 4 bytes;
+ 4. Inter Frame Gap (IFG): 12 bytes.
+The FCS is considered overhead only if not included in the packet length (field pkt_len
+of struct rte_mbuf). */
+#ifndef RTE_SCHED_FRAME_OVERHEAD_DEFAULT
+#define RTE_SCHED_FRAME_OVERHEAD_DEFAULT 24
+#endif
+
+/** Subport configuration parameters. The period and credits_per_period parameters are measured
+in bytes, with one byte meaning the time duration associated with the transmission of one byte
+on the physical medium of the output port, with pipe or pipe traffic class rate (measured as
+percentage of output port rate) determined as credits_per_period divided by period. One credit
+represents one byte. */
+struct rte_sched_subport_params {
+ /* Subport token bucket */
+ uint32_t tb_rate; /**< Subport token bucket rate (measured in bytes per second) */
+ uint32_t tb_size; /**< Subport token bucket size (measured in credits) */
+
+ /* Subport traffic classes */
+ uint32_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Subport traffic class rates (measured in bytes per second) */
+ uint32_t tc_period; /**< Enforcement period for traffic class rates (measured in milliseconds) */
+};
+
+/** Subport statistics */
+struct rte_sched_subport_stats {
+ /* Packets */
+ uint32_t n_pkts_tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Number of packets successfully written to current
+ subport for each traffic class */
+ uint32_t n_pkts_tc_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Number of packets dropped by the current
+ subport for each traffic class due to subport queues being full or congested*/
+
+ /* Bytes */
+ uint32_t n_bytes_tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Number of bytes successfully written to current
+ subport for each traffic class*/
+ uint32_t n_bytes_tc_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Number of bytes dropped by the current
+ subport for each traffic class due to subport queues being full or congested */
+};
+
+/** Pipe configuration parameters. The period and credits_per_period parameters are measured
+in bytes, with one byte meaning the time duration associated with the transmission of one byte
+on the physical medium of the output port, with pipe or pipe traffic class rate (measured as
+percentage of output port rate) determined as credits_per_period divided by period. One credit
+represents one byte. */
+struct rte_sched_pipe_params {
+ /* Pipe token bucket */
+ uint32_t tb_rate; /**< Pipe token bucket rate (measured in bytes per second) */
+ uint32_t tb_size; /**< Pipe token bucket size (measured in credits) */
+
+ /* Pipe traffic classes */
+ uint32_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Pipe traffic class rates (measured in bytes per second) */
+ uint32_t tc_period; /**< Enforcement period for pipe traffic class rates (measured in milliseconds) */
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+ uint8_t tc_ov_weight; /**< Weight for the current pipe in the event of subport traffic class 3 oversubscription */
+#endif
+
+ /* Pipe queues */
+ uint8_t wrr_weights[RTE_SCHED_QUEUES_PER_PIPE]; /**< WRR weights for the queues of the current pipe */
+};
+
+/** Queue statistics */
+struct rte_sched_queue_stats {
+ /* Packets */
+ uint32_t n_pkts; /**< Number of packets successfully written to current queue */
+ uint32_t n_pkts_dropped; /**< Number of packets dropped due to current queue being full or congested */
+
+ /* Bytes */
+ uint32_t n_bytes; /**< Number of bytes successfully written to current queue */
+ uint32_t n_bytes_dropped; /**< Number of bytes dropped due to current queue being full or congested */
+};
+
+/** Port configuration parameters. */
+struct rte_sched_port_params {
+ const char *name; /**< Literal string to be associated to the current port scheduler instance */
+ int socket; /**< CPU socket ID where the memory for port scheduler should be allocated */
+ uint32_t rate; /**< Output port rate (measured in bytes per second) */
+ uint32_t mtu; /**< Maximum Ethernet frame size (measured in bytes). Should not include the framing overhead. */
+ uint32_t frame_overhead; /**< Framing overhead per packet (measured in bytes) */
+ uint32_t n_subports_per_port; /**< Number of subports for the current port scheduler instance*/
+ uint32_t n_pipes_per_subport; /**< Number of pipes for each port scheduler subport */
+ uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; /**< Packet queue size for each traffic class. All queues
+ within the same pipe traffic class have the same size. Queues from
+ different pipes serving the same traffic class have the same size. */
+ struct rte_sched_pipe_params *pipe_profiles; /**< Pipe profile table defined for current port scheduler instance.
+ Every pipe of the current port scheduler is configured using one of the
+ profiles from this table. */
+ uint32_t n_pipe_profiles; /**< Number of profiles in the pipe profile table */
+#ifdef RTE_SCHED_RED
+ struct rte_red_params red_params[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][e_RTE_METER_COLORS]; /**< RED parameters */
+#endif
+};
+
+/** Path through the scheduler hierarchy used by the scheduler enqueue operation to
+identify the destination queue for the current packet. Stored in the field hash.sched
+of struct rte_mbuf of each packet, typically written by the classification stage and read by
+scheduler enqueue.*/
+struct rte_sched_port_hierarchy {
+ uint32_t queue:2; /**< Queue ID (0 .. 3) */
+ uint32_t traffic_class:2; /**< Traffic class ID (0 .. 3)*/
+ uint32_t pipe:20; /**< Pipe ID */
+ uint32_t subport:6; /**< Subport ID */
+ uint32_t color:2; /**< Color */
+};
+
+/*
+ * Configuration
+ *
+ ***/
+
+/**
+ * Hierarchical scheduler port configuration
+ *
+ * @param params
+ * Port scheduler configuration parameter structure
+ * @return
+ * Handle to port scheduler instance upon success or NULL otherwise.
+ */
+struct rte_sched_port *
+rte_sched_port_config(struct rte_sched_port_params *params);
+
+/**
+ * Hierarchical scheduler port free
+ *
+ * @param port
+ * Handle to port scheduler instance
+ */
+void
+rte_sched_port_free(struct rte_sched_port *port);
+
+/**
+ * Hierarchical scheduler subport configuration
+ *
+ * @param port
+ * Handle to port scheduler instance
+ * @param subport_id
+ * Subport ID
+ * @param params
+ * Subport configuration parameters
+ * @return
+ * 0 upon success, error code otherwise
+ */
+int
+rte_sched_subport_config(struct rte_sched_port *port,
+ uint32_t subport_id,
+ struct rte_sched_subport_params *params);
+
+/**
+ * Hierarchical scheduler pipe configuration
+ *
+ * @param port
+ * Handle to port scheduler instance
+ * @param subport_id
+ * Subport ID
+ * @param pipe_id
+ * Pipe ID within subport
+ * @param pipe_profile
+ * ID of port-level pre-configured pipe profile
+ * @return
+ * 0 upon success, error code otherwise
+ */
+int
+rte_sched_pipe_config(struct rte_sched_port *port,
+ uint32_t subport_id,
+ uint32_t pipe_id,
+ int32_t pipe_profile);
+
+/**
+ * Hierarchical scheduler memory footprint size per port
+ *
+ * @param params
+ * Port scheduler configuration parameter structure
+ * @return
+ * Memory footprint size in bytes upon success, 0 otherwise
+ */
+uint32_t
+rte_sched_port_get_memory_footprint(struct rte_sched_port_params *params);
+
+/*
+ * Statistics
+ *
+ ***/
+
+/**
+ * Hierarchical scheduler subport statistics read
+ *
+ * @param port
+ * Handle to port scheduler instance
+ * @param subport_id
+ * Subport ID
+ * @param stats
+ * Pointer to pre-allocated subport statistics structure where the statistics
+ * counters should be stored
+ * @param tc_ov
+ * Pointer to pre-allocated 4-entry array where the oversubscription status for
+ * each of the 4 subport traffic classes should be stored.
+ * @return
+ * 0 upon success, error code otherwise
+ */
+int
+rte_sched_subport_read_stats(struct rte_sched_port *port,
+ uint32_t subport_id,
+ struct rte_sched_subport_stats *stats,
+ uint32_t *tc_ov);
+
+/**
+ * Hierarchical scheduler queue statistics read
+ *
+ * @param port
+ * Handle to port scheduler instance
+ * @param queue_id
+ * Queue ID within port scheduler
+ * @param stats
+ * Pointer to pre-allocated subport statistics structure where the statistics
+ * counters should be stored
+ * @param qlen
+ * Pointer to pre-allocated variable where the current queue length should be stored.
+ * @return
+ * 0 upon success, error code otherwise
+ */
+int
+rte_sched_queue_read_stats(struct rte_sched_port *port,
+ uint32_t queue_id,
+ struct rte_sched_queue_stats *stats,
+ uint16_t *qlen);
+
+/*
+ * Run-time
+ *
+ ***/
+
+/**
+ * Scheduler hierarchy path write to packet descriptor. Typically called by the
+ * packet classification stage.
+ *
+ * @param pkt
+ * Packet descriptor handle
+ * @param subport
+ * Subport ID
+ * @param pipe
+ * Pipe ID within subport
+ * @param traffic_class
+ * Traffic class ID within pipe (0 .. 3)
+ * @param queue
+ * Queue ID within pipe traffic class (0 .. 3)
+ */
+static inline void
+rte_sched_port_pkt_write(struct rte_mbuf *pkt,
+ uint32_t subport, uint32_t pipe, uint32_t traffic_class, uint32_t queue, enum rte_meter_color color)
+{
+ struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
+
+ sched->color = (uint32_t) color;
+ sched->subport = subport;
+ sched->pipe = pipe;
+ sched->traffic_class = traffic_class;
+ sched->queue = queue;
+}
+
+/**
+ * Scheduler hierarchy path read from packet descriptor (struct rte_mbuf). Typically
+ * called as part of the hierarchical scheduler enqueue operation. The subport,
+ * pipe, traffic class and queue parameters need to be pre-allocated by the caller.
+ *
+ * @param pkt
+ * Packet descriptor handle
+ * @param subport
+ * Subport ID
+ * @param pipe
+ * Pipe ID within subport
+ * @param traffic_class
+ * Traffic class ID within pipe (0 .. 3)
+ * @param queue
+ * Queue ID within pipe traffic class (0 .. 3)
+ *
+ */
+static inline void
+rte_sched_port_pkt_read_tree_path(struct rte_mbuf *pkt, uint32_t *subport, uint32_t *pipe, uint32_t *traffic_class, uint32_t *queue)
+{
+ struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
+
+ *subport = sched->subport;
+ *pipe = sched->pipe;
+ *traffic_class = sched->traffic_class;
+ *queue = sched->queue;
+}
+
+static inline enum rte_meter_color
+rte_sched_port_pkt_read_color(struct rte_mbuf *pkt)
+{
+ struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
+
+ return (enum rte_meter_color) sched->color;
+}
+
+/**
+ * Hierarchical scheduler port enqueue. Writes up to n_pkts to port scheduler and
+ * returns the number of packets actually written. For each packet, the port scheduler
+ * queue to write the packet to is identified by reading the hierarchy path from the
+ * packet descriptor; if the queue is full or congested and the packet is not written
+ * to the queue, then the packet is automatically dropped without any action required
+ * from the caller.
+ *
+ * @param port
+ * Handle to port scheduler instance
+ * @param pkts
+ * Array storing the packet descriptor handles
+ * @param n_pkts
+ * Number of packets to enqueue from the pkts array into the port scheduler
+ * @return
+ * Number of packets successfully enqueued
+ */
+int
+rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts);
+
+/**
+ * Hierarchical scheduler port dequeue. Reads up to n_pkts from the port scheduler
+ * and stores them in the pkts array and returns the number of packets actually read.
+ * The pkts array needs to be pre-allocated by the caller with at least n_pkts entries.
+ *
+ * @param port
+ * Handle to port scheduler instance
+ * @param pkts
+ * Pre-allocated packet descriptor array where the packets dequeued from the port
+ * scheduler should be stored
+ * @param n_pkts
+ * Number of packets to dequeue from the port scheduler
+ * @return
+ * Number of packets successfully dequeued and placed in the pkts array
+ */
+int
+rte_sched_port_dequeue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __INCLUDE_RTE_SCHED_H__ */
diff --git a/src/dpdk_lib18/librte_sched/rte_sched_common.h b/src/dpdk_lib18/librte_sched/rte_sched_common.h
new file mode 100755
index 00000000..8920adec
--- /dev/null
+++ b/src/dpdk_lib18/librte_sched/rte_sched_common.h
@@ -0,0 +1,129 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_SCHED_COMMON_H__
+#define __INCLUDE_RTE_SCHED_COMMON_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/types.h>
+
+#define __rte_aligned_16 __attribute__((__aligned__(16)))
+
+static inline uint32_t
+rte_sched_min_val_2_u32(uint32_t x, uint32_t y)
+{
+ return (x < y)? x : y;
+}
+
+#if 0
+static inline uint32_t
+rte_min_pos_4_u16(uint16_t *x)
+{
+ uint32_t pos0, pos1;
+
+ pos0 = (x[0] <= x[1])? 0 : 1;
+ pos1 = (x[2] <= x[3])? 2 : 3;
+
+ return (x[pos0] <= x[pos1])? pos0 : pos1;
+}
+
+#else
+
+/* simplified version to remove branches with CMOV instruction */
+static inline uint32_t
+rte_min_pos_4_u16(uint16_t *x)
+{
+ uint32_t pos0 = 0;
+ uint32_t pos1 = 2;
+
+ if (x[1] <= x[0]) pos0 = 1;
+ if (x[3] <= x[2]) pos1 = 3;
+ if (x[pos1] <= x[pos0]) pos0 = pos1;
+
+ return pos0;
+}
+
+#endif
+
+/*
+ * Compute the Greatest Common Divisor (GCD) of two numbers.
+ * This implementation uses Euclid's algorithm:
+ * gcd(a, 0) = a
+ * gcd(a, b) = gcd(b, a mod b)
+ *
+ */
+static inline uint32_t
+rte_get_gcd(uint32_t a, uint32_t b)
+{
+ uint32_t c;
+
+ if (a == 0)
+ return b;
+ if (b == 0)
+ return a;
+
+ if (a < b) {
+ c = a;
+ a = b;
+ b = c;
+ }
+
+ while (b != 0) {
+ c = a % b;
+ a = b;
+ b = c;
+ }
+
+ return a;
+}
+
+/*
+ * Compute the Lowest Common Denominator (LCD) of two numbers.
+ * This implementation computes GCD first:
+ * LCD(a, b) = (a * b) / GCD(a, b)
+ *
+ */
+static inline uint32_t
+rte_get_lcd(uint32_t a, uint32_t b)
+{
+ return (a * b) / rte_get_gcd(a, b);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __INCLUDE_RTE_SCHED_COMMON_H__ */
diff --git a/src/dpdk_lib18/librte_table/Makefile b/src/dpdk_lib18/librte_table/Makefile
new file mode 100755
index 00000000..dd684ccb
--- /dev/null
+++ b/src/dpdk_lib18/librte_table/Makefile
@@ -0,0 +1,82 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_table.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_TABLE) += rte_table_lpm.c
+SRCS-$(CONFIG_RTE_LIBRTE_TABLE) += rte_table_lpm_ipv6.c
+ifeq ($(CONFIG_RTE_LIBRTE_ACL),y)
+SRCS-$(CONFIG_RTE_LIBRTE_TABLE) += rte_table_acl.c
+endif
+SRCS-$(CONFIG_RTE_LIBRTE_TABLE) += rte_table_hash_key8.c
+SRCS-$(CONFIG_RTE_LIBRTE_TABLE) += rte_table_hash_key16.c
+SRCS-$(CONFIG_RTE_LIBRTE_TABLE) += rte_table_hash_key32.c
+SRCS-$(CONFIG_RTE_LIBRTE_TABLE) += rte_table_hash_ext.c
+SRCS-$(CONFIG_RTE_LIBRTE_TABLE) += rte_table_hash_lru.c
+SRCS-$(CONFIG_RTE_LIBRTE_TABLE) += rte_table_array.c
+SRCS-$(CONFIG_RTE_LIBRTE_TABLE) += rte_table_stub.c
+
+# install includes
+SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_table.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_table_lpm.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_table_lpm_ipv6.h
+ifeq ($(CONFIG_RTE_LIBRTE_ACL),y)
+SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_table_acl.h
+endif
+SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_table_hash.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_lru.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_table_array.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_table_stub.h
+
+# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_TABLE) := lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_TABLE) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_TABLE) += lib/librte_mempool
+DEPDIRS-$(CONFIG_RTE_LIBRTE_TABLE) += lib/librte_malloc
+DEPDIRS-$(CONFIG_RTE_LIBRTE_TABLE) += lib/librte_port
+DEPDIRS-$(CONFIG_RTE_LIBRTE_TABLE) += lib/librte_lpm
+ifeq ($(CONFIG_RTE_LIBRTE_ACL),y)
+DEPDIRS-$(CONFIG_RTE_LIBRTE_TABLE) += lib/librte_acl
+endif
+DEPDIRS-$(CONFIG_RTE_LIBRTE_TABLE) += lib/librte_hash
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_table/rte_lru.h b/src/dpdk_lib18/librte_table/rte_lru.h
new file mode 100755
index 00000000..e87e062d
--- /dev/null
+++ b/src/dpdk_lib18/librte_table/rte_lru.h
@@ -0,0 +1,213 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_LRU_H__
+#define __INCLUDE_RTE_LRU_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+#ifdef __INTEL_COMPILER
+#define GCC_VERSION (0)
+#else
+#define GCC_VERSION (__GNUC__ * 10000+__GNUC_MINOR__*100 + __GNUC_PATCHLEVEL__)
+#endif
+
+#ifndef RTE_TABLE_HASH_LRU_STRATEGY
+#ifdef __SSE4_2__
+#define RTE_TABLE_HASH_LRU_STRATEGY 2
+#else /* if no SSE, use simple scalar version */
+#define RTE_TABLE_HASH_LRU_STRATEGY 1
+#endif
+#endif
+
+#ifndef RTE_ARCH_X86_64
+#undef RTE_TABLE_HASH_LRU_STRATEGY
+#define RTE_TABLE_HASH_LRU_STRATEGY 1
+#endif
+
+#if (RTE_TABLE_HASH_LRU_STRATEGY < 0) || (RTE_TABLE_HASH_LRU_STRATEGY > 3)
+#error Invalid value for RTE_TABLE_HASH_LRU_STRATEGY
+#endif
+
+#if RTE_TABLE_HASH_LRU_STRATEGY == 0
+
+#define lru_init(bucket) \
+do \
+ bucket = bucket; \
+while (0)
+
+#define lru_pos(bucket) (bucket->lru_list & 0xFFFFLLU)
+
+#define lru_update(bucket, mru_val) \
+do { \
+ bucket = bucket; \
+ mru_val = mru_val; \
+} while (0)
+
+#elif RTE_TABLE_HASH_LRU_STRATEGY == 1
+
+#define lru_init(bucket) \
+do \
+ bucket->lru_list = 0x0000000100020003LLU; \
+while (0)
+
+#define lru_pos(bucket) (bucket->lru_list & 0xFFFFLLU)
+
+#define lru_update(bucket, mru_val) \
+do { \
+ uint64_t x, pos, x0, x1, x2, mask; \
+ \
+ x = bucket->lru_list; \
+ \
+ pos = 4; \
+ if ((x >> 48) == ((uint64_t) mru_val)) \
+ pos = 3; \
+ \
+ if (((x >> 32) & 0xFFFFLLU) == ((uint64_t) mru_val)) \
+ pos = 2; \
+ \
+ if (((x >> 16) & 0xFFFFLLU) == ((uint64_t) mru_val)) \
+ pos = 1; \
+ \
+ if ((x & 0xFFFFLLU) == ((uint64_t) mru_val)) \
+ pos = 0; \
+ \
+ \
+ pos <<= 4; \
+ mask = (~0LLU) << pos; \
+ x0 = x & (~mask); \
+ x1 = (x >> 16) & mask; \
+ x2 = (x << (48 - pos)) & (0xFFFFLLU << 48); \
+ x = x0 | x1 | x2; \
+ \
+ if (pos != 64) \
+ bucket->lru_list = x; \
+} while (0)
+
+#elif RTE_TABLE_HASH_LRU_STRATEGY == 2
+
+#if GCC_VERSION > 40306
+#include <x86intrin.h>
+#else
+#include <emmintrin.h>
+#include <smmintrin.h>
+#include <xmmintrin.h>
+#endif
+
+#define lru_init(bucket) \
+do \
+ bucket->lru_list = 0x0000000100020003LLU; \
+while (0)
+
+#define lru_pos(bucket) (bucket->lru_list & 0xFFFFLLU)
+
+#define lru_update(bucket, mru_val) \
+do { \
+ /* set up the masks for all possible shuffles, depends on pos */\
+ static uint64_t masks[10] = { \
+ /* Shuffle order; Make Zero (see _mm_shuffle_epi8 manual) */\
+ 0x0100070605040302, 0x8080808080808080, \
+ 0x0302070605040100, 0x8080808080808080, \
+ 0x0504070603020100, 0x8080808080808080, \
+ 0x0706050403020100, 0x8080808080808080, \
+ 0x0706050403020100, 0x8080808080808080}; \
+ /* load up one register with repeats of mru-val */ \
+ uint64_t mru2 = mru_val; \
+ uint64_t mru3 = mru2 | (mru2 << 16); \
+ uint64_t lru = bucket->lru_list; \
+ /* XOR to cause the word we're looking for to go to zero */ \
+ uint64_t mru = lru ^ ((mru3 << 32) | mru3); \
+ __m128i c = _mm_cvtsi64_si128(mru); \
+ __m128i b = _mm_cvtsi64_si128(lru); \
+ /* Find the minimum value (first zero word, if it's in there) */\
+ __m128i d = _mm_minpos_epu16(c); \
+ /* Second word is the index to found word (first word is the value) */\
+ unsigned pos = _mm_extract_epi16(d, 1); \
+ /* move the recently used location to top of list */ \
+ __m128i k = _mm_shuffle_epi8(b, *((__m128i *) &masks[2 * pos]));\
+ /* Finally, update the original list with the reordered data */ \
+ bucket->lru_list = _mm_extract_epi64(k, 0); \
+ /* Phwew! */ \
+} while (0)
+
+#elif RTE_TABLE_HASH_LRU_STRATEGY == 3
+
+#if GCC_VERSION > 40306
+#include <x86intrin.h>
+#else
+#include <emmintrin.h>
+#include <smmintrin.h>
+#include <xmmintrin.h>
+#endif
+
+#define lru_init(bucket) \
+do \
+ bucket->lru_list = ~0LLU; \
+while (0)
+
+
+static inline int
+f_lru_pos(uint64_t lru_list)
+{
+ __m128i lst = _mm_set_epi64x((uint64_t)-1, lru_list);
+ __m128i min = _mm_minpos_epu16(lst);
+ return _mm_extract_epi16(min, 1);
+}
+#define lru_pos(bucket) f_lru_pos(bucket->lru_list)
+
+#define lru_update(bucket, mru_val) \
+do { \
+ const uint64_t orvals[] = {0xFFFFLLU, 0xFFFFLLU << 16, \
+ 0xFFFFLLU << 32, 0xFFFFLLU << 48, 0LLU}; \
+ const uint64_t decs[] = {0x1000100010001LLU, 0}; \
+ __m128i lru = _mm_cvtsi64_si128(bucket->lru_list); \
+ __m128i vdec = _mm_cvtsi64_si128(decs[mru_val>>2]); \
+ lru = _mm_subs_epu16(lru, vdec); \
+ bucket->lru_list = _mm_extract_epi64(lru, 0) | orvals[mru_val]; \
+} while (0)
+
+#else
+
+#error "Incorrect value for RTE_TABLE_HASH_LRU_STRATEGY"
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_table/rte_table.h b/src/dpdk_lib18/librte_table/rte_table.h
new file mode 100755
index 00000000..d57bc336
--- /dev/null
+++ b/src/dpdk_lib18/librte_table/rte_table.h
@@ -0,0 +1,202 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_TABLE_H__
+#define __INCLUDE_RTE_TABLE_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file
+ * RTE Table
+ *
+ * This tool is part of the Intel DPDK Packet Framework tool suite and provides
+ * a standard interface to implement different types of lookup tables for data
+ * plane processing.
+ *
+ * Virtually any search algorithm that can uniquely associate data to a lookup
+ * key can be fitted under this lookup table abstraction. For the flow table
+ * use-case, the lookup key is an n-tuple of packet fields that uniquely
+ * identifies a traffic flow, while data represents actions and action
+ * meta-data associated with the same traffic flow.
+ *
+ ***/
+
+#include <stdint.h>
+#include <rte_mbuf.h>
+#include <rte_port.h>
+
+/**
+ * Lookup table create
+ *
+ * @param params
+ * Parameters for lookup table creation. The underlying data structure is
+ * different for each lookup table type.
+ * @param socket_id
+ * CPU socket ID (e.g. for memory allocation purpose)
+ * @param entry_size
+ * Data size of each lookup table entry (measured in bytes)
+ * @return
+ * Handle to lookup table instance
+ */
+typedef void* (*rte_table_op_create)(void *params, int socket_id,
+ uint32_t entry_size);
+
+/**
+ * Lookup table free
+ *
+ * @param table
+ * Handle to lookup table instance
+ * @return
+ * 0 on success, error code otherwise
+ */
+typedef int (*rte_table_op_free)(void *table);
+
+/**
+ * Lookup table entry add
+ *
+ * @param table
+ * Handle to lookup table instance
+ * @param key
+ * Lookup key
+ * @param entry
+ * Data to be associated with the current key. This parameter has to point to
+ * a valid memory buffer where the first entry_size bytes (table create
+ * parameter) are populated with the data.
+ * @param key_found
+ * After successful invocation, *key_found is set to a value different than 0
+ * if the current key is already present in the table and to 0 if not. This
+ * pointer has to be set to a valid memory location before the table entry add
+ * function is called.
+ * @param entry_ptr
+ * After successful invocation, *entry_ptr stores the handle to the table
+ * entry containing the data associated with the current key. This handle can
+ * be used to perform further read-write accesses to this entry. This handle
+ * is valid until the key is deleted from the table or the same key is
+ * re-added to the table, typically to associate it with different data. This
+ * pointer has to be set to a valid memory location before the function is
+ * called.
+ * @return
+ * 0 on success, error code otherwise
+ */
+typedef int (*rte_table_op_entry_add)(
+ void *table,
+ void *key,
+ void *entry,
+ int *key_found,
+ void **entry_ptr);
+
+/**
+ * Lookup table entry delete
+ *
+ * @param table
+ * Handle to lookup table instance
+ * @param key
+ * Lookup key
+ * @param key_found
+ * After successful invocation, *key_found is set to a value different than 0
+ * if the current key was present in the table before the delete operation
+ * was performed and to 0 if not. This pointer has to be set to a valid
+ * memory location before the table entry delete function is called.
+ * @param entry
+ * After successful invocation, if the key is found in the table (*key found
+ * is different than 0 after function call is completed) and entry points to
+ * a valid buffer (entry is set to a value different than NULL before the
+ * function is called), then the first entry_size bytes (table create
+ * parameter) in *entry store a copy of table entry that contained the data
+ * associated with the current key before the key was deleted.
+ * @return
+ * 0 on success, error code otherwise
+ */
+typedef int (*rte_table_op_entry_delete)(
+ void *table,
+ void *key,
+ int *key_found,
+ void *entry);
+
+/**
+ * Lookup table lookup
+ *
+ * @param table
+ * Handle to lookup table instance
+ * @param pkts
+ * Burst of input packets specified as array of up to 64 pointers to struct
+ * rte_mbuf
+ * @param pkts_mask
+ * 64-bit bitmask specifying which packets in the input burst are valid. When
+ * pkts_mask bit n is set, then element n of pkts array is pointing to a
+ * valid packet. Otherwise, element n of pkts array does not point to a valid
+ * packet, therefore it will not be accessed.
+ * @param lookup_hit_mask
+ * Once the table lookup operation is completed, this 64-bit bitmask
+ * specifies which of the valid packets in the input burst resulted in lookup
+ * hit. For each valid input packet (pkts_mask bit n is set), the following
+ * are true on lookup hit: lookup_hit_mask bit n is set, element n of entries
+ * array is valid and it points to the lookup table entry that was hit. For
+ * each valid input packet (pkts_mask bit n is set), the following are true
+ * on lookup miss: lookup_hit_mask bit n is not set and element n of entries
+ * array is not valid.
+ * @param entries
+ * Once the table lookup operation is completed, this array provides the
+ * lookup table entries that were hit, as described above. It is required
+ * that this array is always pre-allocated by the caller of this function
+ * with exactly 64 elements. The implementation is allowed to speculatively
+ * modify the elements of this array, so elements marked as invalid in
+ * lookup_hit_mask once the table lookup operation is completed might have
+ * been modified by this function.
+ * @return
+ * 0 on success, error code otherwise
+ */
+typedef int (*rte_table_op_lookup)(
+ void *table,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ uint64_t *lookup_hit_mask,
+ void **entries);
+
+/** Lookup table interface defining the lookup table operation */
+struct rte_table_ops {
+ rte_table_op_create f_create; /**< Create */
+ rte_table_op_free f_free; /**< Free */
+ rte_table_op_entry_add f_add; /**< Entry add */
+ rte_table_op_entry_delete f_delete; /**< Entry delete */
+ rte_table_op_lookup f_lookup; /**< Lookup */
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_table/rte_table_acl.c b/src/dpdk_lib18/librte_table/rte_table_acl.c
new file mode 100755
index 00000000..4416311b
--- /dev/null
+++ b/src/dpdk_lib18/librte_table/rte_table_acl.c
@@ -0,0 +1,491 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <stdio.h>
+
+#include <rte_common.h>
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_log.h>
+
+#include "rte_table_acl.h"
+#include <rte_ether.h>
+
+struct rte_table_acl {
+ /* Low-level ACL table */
+ char name[2][RTE_ACL_NAMESIZE];
+ struct rte_acl_param acl_params; /* for creating low level acl table */
+ struct rte_acl_config cfg; /* Holds the field definitions (metadata) */
+ struct rte_acl_ctx *ctx;
+ uint32_t name_id;
+
+ /* Input parameters */
+ uint32_t n_rules;
+ uint32_t entry_size;
+
+ /* Internal tables */
+ uint8_t *action_table;
+ struct rte_acl_rule **acl_rule_list; /* Array of pointers to rules */
+ uint8_t *acl_rule_memory; /* Memory to store the rules */
+
+ /* Memory to store the action table and stack of free entries */
+ uint8_t memory[0] __rte_cache_aligned;
+};
+
+
+static void *
+rte_table_acl_create(
+ void *params,
+ int socket_id,
+ uint32_t entry_size)
+{
+ struct rte_table_acl_params *p = (struct rte_table_acl_params *) params;
+ struct rte_table_acl *acl;
+ uint32_t action_table_size, acl_rule_list_size, acl_rule_memory_size;
+ uint32_t total_size;
+
+ RTE_BUILD_BUG_ON(((sizeof(struct rte_table_acl) % RTE_CACHE_LINE_SIZE)
+ != 0));
+
+ /* Check input parameters */
+ if (p == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: Invalid value for params\n", __func__);
+ return NULL;
+ }
+ if (p->name == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: Invalid value for name\n", __func__);
+ return NULL;
+ }
+ if (p->n_rules == 0) {
+ RTE_LOG(ERR, TABLE, "%s: Invalid value for n_rules\n",
+ __func__);
+ return NULL;
+ }
+ if ((p->n_rule_fields == 0) ||
+ (p->n_rule_fields > RTE_ACL_MAX_FIELDS)) {
+ RTE_LOG(ERR, TABLE, "%s: Invalid value for n_rule_fields\n",
+ __func__);
+ return NULL;
+ }
+
+ entry_size = RTE_ALIGN(entry_size, sizeof(uint64_t));
+
+ /* Memory allocation */
+ action_table_size = RTE_CACHE_LINE_ROUNDUP(p->n_rules * entry_size);
+ acl_rule_list_size =
+ RTE_CACHE_LINE_ROUNDUP(p->n_rules * sizeof(struct rte_acl_rule *));
+ acl_rule_memory_size = RTE_CACHE_LINE_ROUNDUP(p->n_rules *
+ RTE_ACL_RULE_SZ(p->n_rule_fields));
+ total_size = sizeof(struct rte_table_acl) + action_table_size +
+ acl_rule_list_size + acl_rule_memory_size;
+
+ acl = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (acl == NULL) {
+ RTE_LOG(ERR, TABLE,
+ "%s: Cannot allocate %u bytes for ACL table\n",
+ __func__, total_size);
+ return NULL;
+ }
+
+ acl->action_table = &acl->memory[0];
+ acl->acl_rule_list =
+ (struct rte_acl_rule **) &acl->memory[action_table_size];
+ acl->acl_rule_memory = (uint8_t *)
+ &acl->memory[action_table_size + acl_rule_list_size];
+
+ /* Initialization of internal fields */
+ snprintf(acl->name[0], RTE_ACL_NAMESIZE, "%s_a", p->name);
+ snprintf(acl->name[1], RTE_ACL_NAMESIZE, "%s_b", p->name);
+ acl->name_id = 1;
+
+ acl->acl_params.name = acl->name[acl->name_id];
+ acl->acl_params.socket_id = socket_id;
+ acl->acl_params.rule_size = RTE_ACL_RULE_SZ(p->n_rule_fields);
+ acl->acl_params.max_rule_num = p->n_rules;
+
+ acl->cfg.num_categories = 1;
+ acl->cfg.num_fields = p->n_rule_fields;
+ memcpy(&acl->cfg.defs[0], &p->field_format[0],
+ p->n_rule_fields * sizeof(struct rte_acl_field_def));
+
+ acl->ctx = NULL;
+
+ acl->n_rules = p->n_rules;
+ acl->entry_size = entry_size;
+
+ return acl;
+}
+
+static int
+rte_table_acl_free(void *table)
+{
+ struct rte_table_acl *acl = (struct rte_table_acl *) table;
+
+ /* Check input parameters */
+ if (table == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Free previously allocated resources */
+ if (acl->ctx != NULL)
+ rte_acl_free(acl->ctx);
+
+ rte_free(acl);
+
+ return 0;
+}
+
+RTE_ACL_RULE_DEF(rte_pipeline_acl_rule, RTE_ACL_MAX_FIELDS);
+
+static int
+rte_table_acl_build(struct rte_table_acl *acl, struct rte_acl_ctx **acl_ctx)
+{
+ struct rte_acl_ctx *ctx = NULL;
+ uint32_t n_rules, i;
+ int status;
+
+ /* Create low level ACL table */
+ ctx = rte_acl_create(&acl->acl_params);
+ if (ctx == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: Cannot create low level ACL table\n",
+ __func__);
+ return -1;
+ }
+
+ /* Add rules to low level ACL table */
+ n_rules = 0;
+ for (i = 1; i < acl->n_rules; i++) {
+ if (acl->acl_rule_list[i] != NULL) {
+ status = rte_acl_add_rules(ctx, acl->acl_rule_list[i],
+ 1);
+ if (status != 0) {
+ RTE_LOG(ERR, TABLE,
+ "%s: Cannot add rule to low level ACL table\n",
+ __func__);
+ rte_acl_free(ctx);
+ return -1;
+ }
+
+ n_rules++;
+ }
+ }
+
+ if (n_rules == 0) {
+ rte_acl_free(ctx);
+ *acl_ctx = NULL;
+ return 0;
+ }
+
+ /* Build low level ACl table */
+ status = rte_acl_build(ctx, &acl->cfg);
+ if (status != 0) {
+ RTE_LOG(ERR, TABLE,
+ "%s: Cannot build the low level ACL table\n",
+ __func__);
+ rte_acl_free(ctx);
+ return -1;
+ }
+
+ rte_acl_dump(ctx);
+
+ *acl_ctx = ctx;
+ return 0;
+}
+
+static int
+rte_table_acl_entry_add(
+ void *table,
+ void *key,
+ void *entry,
+ int *key_found,
+ void **entry_ptr)
+{
+ struct rte_table_acl *acl = (struct rte_table_acl *) table;
+ struct rte_table_acl_rule_add_params *rule =
+ (struct rte_table_acl_rule_add_params *) key;
+ struct rte_pipeline_acl_rule acl_rule;
+ struct rte_acl_rule *rule_location;
+ struct rte_acl_ctx *ctx;
+ uint32_t free_pos, free_pos_valid, i;
+ int status;
+
+ /* Check input parameters */
+ if (table == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+ if (key == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: key parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+ if (entry == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: entry parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+ if (key_found == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: key_found parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (entry_ptr == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: entry_ptr parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (rule->priority > RTE_ACL_MAX_PRIORITY) {
+ RTE_LOG(ERR, TABLE, "%s: Priority is too high\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Setup rule data structure */
+ memset(&acl_rule, 0, sizeof(acl_rule));
+ acl_rule.data.category_mask = 1;
+ acl_rule.data.priority = RTE_ACL_MAX_PRIORITY - rule->priority;
+ acl_rule.data.userdata = 0; /* To be set up later */
+ memcpy(&acl_rule.field[0],
+ &rule->field_value[0],
+ acl->cfg.num_fields * sizeof(struct rte_acl_field));
+
+ /* Look to see if the rule exists already in the table */
+ free_pos = 0;
+ free_pos_valid = 0;
+ for (i = 1; i < acl->n_rules; i++) {
+ if (acl->acl_rule_list[i] == NULL) {
+ if (free_pos_valid == 0) {
+ free_pos = i;
+ free_pos_valid = 1;
+ }
+
+ continue;
+ }
+
+ /* Compare the key fields */
+ status = memcmp(&acl->acl_rule_list[i]->field[0],
+ &rule->field_value[0],
+ acl->cfg.num_fields * sizeof(struct rte_acl_field));
+
+ /* Rule found: update data associated with the rule */
+ if (status == 0) {
+ *key_found = 1;
+ *entry_ptr = &acl->memory[i * acl->entry_size];
+ memcpy(*entry_ptr, entry, acl->entry_size);
+
+ return 0;
+ }
+ }
+
+ /* Return if max rules */
+ if (free_pos_valid == 0) {
+ RTE_LOG(ERR, TABLE, "%s: Max number of rules reached\n",
+ __func__);
+ return -ENOSPC;
+ }
+
+ /* Add the new rule to the rule set */
+ acl_rule.data.userdata = free_pos;
+ rule_location = (struct rte_acl_rule *)
+ &acl->acl_rule_memory[free_pos * acl->acl_params.rule_size];
+ memcpy(rule_location, &acl_rule, acl->acl_params.rule_size);
+ acl->acl_rule_list[free_pos] = rule_location;
+
+ /* Build low level ACL table */
+ acl->name_id ^= 1;
+ acl->acl_params.name = acl->name[acl->name_id];
+ status = rte_table_acl_build(acl, &ctx);
+ if (status != 0) {
+ /* Roll back changes */
+ acl->acl_rule_list[free_pos] = NULL;
+ acl->name_id ^= 1;
+
+ return -EINVAL;
+ }
+
+ /* Commit changes */
+ if (acl->ctx != NULL)
+ rte_acl_free(acl->ctx);
+ acl->ctx = ctx;
+ *key_found = 0;
+ *entry_ptr = &acl->memory[free_pos * acl->entry_size];
+ memcpy(*entry_ptr, entry, acl->entry_size);
+
+ return 0;
+}
+
+static int
+rte_table_acl_entry_delete(
+ void *table,
+ void *key,
+ int *key_found,
+ void *entry)
+{
+ struct rte_table_acl *acl = (struct rte_table_acl *) table;
+ struct rte_table_acl_rule_delete_params *rule =
+ (struct rte_table_acl_rule_delete_params *) key;
+ struct rte_acl_rule *deleted_rule = NULL;
+ struct rte_acl_ctx *ctx;
+ uint32_t pos, pos_valid, i;
+ int status;
+
+ /* Check input parameters */
+ if (table == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+ if (key == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: key parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+ if (key_found == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: key_found parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Look for the rule in the table */
+ pos = 0;
+ pos_valid = 0;
+ for (i = 1; i < acl->n_rules; i++) {
+ if (acl->acl_rule_list[i] != NULL) {
+ /* Compare the key fields */
+ status = memcmp(&acl->acl_rule_list[i]->field[0],
+ &rule->field_value[0], acl->cfg.num_fields *
+ sizeof(struct rte_acl_field));
+
+ /* Rule found: remove from table */
+ if (status == 0) {
+ pos = i;
+ pos_valid = 1;
+
+ deleted_rule = acl->acl_rule_list[i];
+ acl->acl_rule_list[i] = NULL;
+ }
+ }
+ }
+
+ /* Return if rule not found */
+ if (pos_valid == 0) {
+ *key_found = 0;
+ return 0;
+ }
+
+ /* Build low level ACL table */
+ acl->name_id ^= 1;
+ acl->acl_params.name = acl->name[acl->name_id];
+ status = rte_table_acl_build(acl, &ctx);
+ if (status != 0) {
+ /* Roll back changes */
+ acl->acl_rule_list[pos] = deleted_rule;
+ acl->name_id ^= 1;
+
+ return -EINVAL;
+ }
+
+ /* Commit changes */
+ if (acl->ctx != NULL)
+ rte_acl_free(acl->ctx);
+
+ acl->ctx = ctx;
+ *key_found = 1;
+ if (entry != NULL)
+ memcpy(entry, &acl->memory[pos * acl->entry_size],
+ acl->entry_size);
+
+ return 0;
+}
+
+static int
+rte_table_acl_lookup(
+ void *table,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ uint64_t *lookup_hit_mask,
+ void **entries)
+{
+ struct rte_table_acl *acl = (struct rte_table_acl *) table;
+ const uint8_t *pkts_data[RTE_PORT_IN_BURST_SIZE_MAX];
+ uint32_t results[RTE_PORT_IN_BURST_SIZE_MAX];
+ uint64_t pkts_out_mask;
+ uint32_t n_pkts, i, j;
+
+ /* Input conversion */
+ for (i = 0, j = 0; i < (uint32_t)(RTE_PORT_IN_BURST_SIZE_MAX -
+ __builtin_clzll(pkts_mask)); i++) {
+ uint64_t pkt_mask = 1LLU << i;
+
+ if (pkt_mask & pkts_mask) {
+ pkts_data[j] = rte_pktmbuf_mtod(pkts[i], uint8_t *);
+ j++;
+ }
+ }
+ n_pkts = j;
+
+ /* Low-level ACL table lookup */
+ if (acl->ctx != NULL)
+ rte_acl_classify(acl->ctx, pkts_data, results, n_pkts, 1);
+ else
+ n_pkts = 0;
+
+ /* Output conversion */
+ pkts_out_mask = 0;
+ for (i = 0; i < n_pkts; i++) {
+ uint32_t action_table_pos = results[i];
+ uint32_t pkt_pos = __builtin_ctzll(pkts_mask);
+ uint64_t pkt_mask = 1LLU << pkt_pos;
+
+ pkts_mask &= ~pkt_mask;
+
+ if (action_table_pos != RTE_ACL_INVALID_USERDATA) {
+ pkts_out_mask |= pkt_mask;
+ entries[pkt_pos] = (void *)
+ &acl->memory[action_table_pos *
+ acl->entry_size];
+ rte_prefetch0(entries[pkt_pos]);
+ }
+ }
+
+ *lookup_hit_mask = pkts_out_mask;
+
+ return 0;
+}
+
+struct rte_table_ops rte_table_acl_ops = {
+ .f_create = rte_table_acl_create,
+ .f_free = rte_table_acl_free,
+ .f_add = rte_table_acl_entry_add,
+ .f_delete = rte_table_acl_entry_delete,
+ .f_lookup = rte_table_acl_lookup,
+};
diff --git a/src/dpdk_lib18/librte_table/rte_table_acl.h b/src/dpdk_lib18/librte_table/rte_table_acl.h
new file mode 100755
index 00000000..a9cc0328
--- /dev/null
+++ b/src/dpdk_lib18/librte_table/rte_table_acl.h
@@ -0,0 +1,95 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_TABLE_ACL_H__
+#define __INCLUDE_RTE_TABLE_ACL_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file
+ * RTE Table ACL
+ *
+ * This table uses the Access Control List (ACL) algorithm to uniquely
+ * associate data to lookup keys.
+ *
+ * Use-cases: Firewall rule database, etc.
+ *
+ ***/
+
+#include <stdint.h>
+
+#include "rte_acl.h"
+
+#include "rte_table.h"
+
+/** ACL table parameters */
+struct rte_table_acl_params {
+ /** Name */
+ const char *name;
+
+ /** Maximum number of ACL rules in the table */
+ uint32_t n_rules;
+
+ /** Number of fields in the ACL rule specification */
+ uint32_t n_rule_fields;
+
+ /** Format specification of the fields of the ACL rule */
+ struct rte_acl_field_def field_format[RTE_ACL_MAX_FIELDS];
+};
+
+/** ACL rule specification for entry add operation */
+struct rte_table_acl_rule_add_params {
+ /** ACL rule priority, with 0 as the highest priority */
+ int32_t priority;
+
+ /** Values for the fields of the ACL rule to be added to the table */
+ struct rte_acl_field field_value[RTE_ACL_MAX_FIELDS];
+};
+
+/** ACL rule specification for entry delete operation */
+struct rte_table_acl_rule_delete_params {
+ /** Values for the fields of the ACL rule to be deleted from table */
+ struct rte_acl_field field_value[RTE_ACL_MAX_FIELDS];
+};
+
+/** ACL table operations */
+extern struct rte_table_ops rte_table_acl_ops;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_table/rte_table_array.c b/src/dpdk_lib18/librte_table/rte_table_array.c
new file mode 100755
index 00000000..c0310700
--- /dev/null
+++ b/src/dpdk_lib18/librte_table/rte_table_array.c
@@ -0,0 +1,205 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <stdio.h>
+
+#include <rte_common.h>
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_log.h>
+
+#include "rte_table_array.h"
+
+struct rte_table_array {
+ /* Input parameters */
+ uint32_t entry_size;
+ uint32_t n_entries;
+ uint32_t offset;
+
+ /* Internal fields */
+ uint32_t entry_pos_mask;
+
+ /* Internal table */
+ uint8_t array[0] __rte_cache_aligned;
+} __rte_cache_aligned;
+
+static void *
+rte_table_array_create(void *params, int socket_id, uint32_t entry_size)
+{
+ struct rte_table_array_params *p =
+ (struct rte_table_array_params *) params;
+ struct rte_table_array *t;
+ uint32_t total_cl_size, total_size;
+
+ /* Check input parameters */
+ if ((p == NULL) ||
+ (p->n_entries == 0) ||
+ (!rte_is_power_of_2(p->n_entries)) ||
+ ((p->offset & 0x3) != 0)) {
+ return NULL;
+ }
+
+ /* Memory allocation */
+ total_cl_size = (sizeof(struct rte_table_array) +
+ RTE_CACHE_LINE_SIZE) / RTE_CACHE_LINE_SIZE;
+ total_cl_size += (p->n_entries * entry_size +
+ RTE_CACHE_LINE_SIZE) / RTE_CACHE_LINE_SIZE;
+ total_size = total_cl_size * RTE_CACHE_LINE_SIZE;
+ t = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
+ if (t == NULL) {
+ RTE_LOG(ERR, TABLE,
+ "%s: Cannot allocate %u bytes for array table\n",
+ __func__, total_size);
+ return NULL;
+ }
+
+ /* Memory initialization */
+ t->entry_size = entry_size;
+ t->n_entries = p->n_entries;
+ t->offset = p->offset;
+ t->entry_pos_mask = t->n_entries - 1;
+
+ return t;
+}
+
+static int
+rte_table_array_free(void *table)
+{
+ struct rte_table_array *t = (struct rte_table_array *) table;
+
+ /* Check input parameters */
+ if (t == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Free previously allocated resources */
+ rte_free(t);
+
+ return 0;
+}
+
+static int
+rte_table_array_entry_add(
+ void *table,
+ void *key,
+ void *entry,
+ int *key_found,
+ void **entry_ptr)
+{
+ struct rte_table_array *t = (struct rte_table_array *) table;
+ struct rte_table_array_key *k = (struct rte_table_array_key *) key;
+ uint8_t *table_entry;
+
+ /* Check input parameters */
+ if (table == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+ if (key == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: key parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+ if (entry == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: entry parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+ if (key_found == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: key_found parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (entry_ptr == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: entry_ptr parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ table_entry = &t->array[k->pos * t->entry_size];
+ memcpy(table_entry, entry, t->entry_size);
+ *key_found = 1;
+ *entry_ptr = (void *) table_entry;
+
+ return 0;
+}
+
+static int
+rte_table_array_lookup(
+ void *table,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ uint64_t *lookup_hit_mask,
+ void **entries)
+{
+ struct rte_table_array *t = (struct rte_table_array *) table;
+
+ *lookup_hit_mask = pkts_mask;
+
+ if ((pkts_mask & (pkts_mask + 1)) == 0) {
+ uint64_t n_pkts = __builtin_popcountll(pkts_mask);
+ uint32_t i;
+
+ for (i = 0; i < n_pkts; i++) {
+ struct rte_mbuf *pkt = pkts[i];
+ uint32_t entry_pos = RTE_MBUF_METADATA_UINT32(pkt,
+ t->offset) & t->entry_pos_mask;
+
+ entries[i] = (void *) &t->array[entry_pos *
+ t->entry_size];
+ }
+ } else {
+ for ( ; pkts_mask; ) {
+ uint32_t pkt_index = __builtin_ctzll(pkts_mask);
+ uint64_t pkt_mask = 1LLU << pkt_index;
+ struct rte_mbuf *pkt = pkts[pkt_index];
+ uint32_t entry_pos = RTE_MBUF_METADATA_UINT32(pkt,
+ t->offset) & t->entry_pos_mask;
+
+ entries[pkt_index] = (void *) &t->array[entry_pos *
+ t->entry_size];
+ pkts_mask &= ~pkt_mask;
+ }
+ }
+
+ return 0;
+}
+
+struct rte_table_ops rte_table_array_ops = {
+ .f_create = rte_table_array_create,
+ .f_free = rte_table_array_free,
+ .f_add = rte_table_array_entry_add,
+ .f_delete = NULL,
+ .f_lookup = rte_table_array_lookup,
+};
diff --git a/src/dpdk_lib18/librte_table/rte_table_array.h b/src/dpdk_lib18/librte_table/rte_table_array.h
new file mode 100755
index 00000000..9521119e
--- /dev/null
+++ b/src/dpdk_lib18/librte_table/rte_table_array.h
@@ -0,0 +1,76 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_TABLE_ARRAY_H__
+#define __INCLUDE_RTE_TABLE_ARRAY_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file
+ * RTE Table Array
+ *
+ * Simple array indexing. Lookup key is the array entry index.
+ *
+ ***/
+
+#include <stdint.h>
+
+#include "rte_table.h"
+
+/** Array table parameters */
+struct rte_table_array_params {
+ /** Number of array entries. Has to be a power of two. */
+ uint32_t n_entries;
+
+ /** Byte offset within input packet meta-data where lookup key (i.e. the
+ array entry index) is located. */
+ uint32_t offset;
+};
+
+/** Array table key format */
+struct rte_table_array_key {
+ /** Array entry index */
+ uint32_t pos;
+};
+
+/** Array table operations */
+extern struct rte_table_ops rte_table_array_ops;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_table/rte_table_hash.h b/src/dpdk_lib18/librte_table/rte_table_hash.h
new file mode 100755
index 00000000..9181942a
--- /dev/null
+++ b/src/dpdk_lib18/librte_table/rte_table_hash.h
@@ -0,0 +1,350 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_TABLE_HASH_H__
+#define __INCLUDE_RTE_TABLE_HASH_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file
+ * RTE Table Hash
+ *
+ * These tables use the exact match criterion to uniquely associate data to
+ * lookup keys.
+ *
+ * Use-cases: Flow classification table, Address Resolution Protocol (ARP) table
+ *
+ * Hash table types:
+ * 1. Entry add strategy on bucket full:
+ * a. Least Recently Used (LRU): One of the existing keys in the bucket is
+ * deleted and the new key is added in its place. The number of keys in
+ * each bucket never grows bigger than 4. The logic to pick the key to
+ * be dropped from the bucket is LRU. The hash table lookup operation
+ * maintains the order in which the keys in the same bucket are hit, so
+ * every time a key is hit, it becomes the new Most Recently Used (MRU)
+ * key, i.e. the most unlikely candidate for drop. When a key is added
+ * to the bucket, it also becomes the new MRU key. When a key needs to
+ * be picked and dropped, the most likely candidate for drop, i.e. the
+ * current LRU key, is always picked. The LRU logic requires maintaining
+ * specific data structures per each bucket.
+ * b. Extendible bucket (ext): The bucket is extended with space for 4 more
+ * keys. This is done by allocating additional memory at table init time,
+ * which is used to create a pool of free keys (the size of this pool is
+ * configurable and always a multiple of 4). On key add operation, the
+ * allocation of a group of 4 keys only happens successfully within the
+ * limit of free keys, otherwise the key add operation fails. On key
+ * delete operation, a group of 4 keys is freed back to the pool of free
+ * keys when the key to be deleted is the only key that was used within
+ * its group of 4 keys at that time. On key lookup operation, if the
+ * current bucket is in extended state and a match is not found in the
+ * first group of 4 keys, the search continues beyond the first group of
+ * 4 keys, potentially until all keys in this bucket are examined. The
+ * extendible bucket logic requires maintaining specific data structures
+ * per table and per each bucket.
+ * 2. Key signature computation:
+ * a. Pre-computed key signature: The key lookup operation is split between
+ * two CPU cores. The first CPU core (typically the CPU core performing
+ * packet RX) extracts the key from the input packet, computes the key
+ * signature and saves both the key and the key signature in the packet
+ * buffer as packet meta-data. The second CPU core reads both the key and
+ * the key signature from the packet meta-data and performs the bucket
+ * search step of the key lookup operation.
+ * b. Key signature computed on lookup (do-sig): The same CPU core reads
+ * the key from the packet meta-data, uses it to compute the key
+ * signature and also performs the bucket search step of the key lookup
+ * operation.
+ * 3. Key size:
+ * a. Configurable key size
+ * b. Single key size (8-byte, 16-byte or 32-byte key size)
+ *
+ ***/
+#include <stdint.h>
+
+#include "rte_table.h"
+
+/** Hash function */
+typedef uint64_t (*rte_table_hash_op_hash)(
+ void *key,
+ uint32_t key_size,
+ uint64_t seed);
+
+/**
+ * Hash tables with configurable key size
+ *
+ */
+/** Extendible bucket hash table parameters */
+struct rte_table_hash_ext_params {
+ /** Key size (number of bytes) */
+ uint32_t key_size;
+
+ /** Maximum number of keys */
+ uint32_t n_keys;
+
+ /** Number of hash table buckets. Each bucket stores up to 4 keys. */
+ uint32_t n_buckets;
+
+ /** Number of hash table bucket extensions. Each bucket extension has
+ space for 4 keys and each bucket can have 0, 1 or more extensions. */
+ uint32_t n_buckets_ext;
+
+ /** Hash function */
+ rte_table_hash_op_hash f_hash;
+
+ /** Seed value for the hash function */
+ uint64_t seed;
+
+ /** Byte offset within packet meta-data where the 4-byte key signature
+ is located. Valid for pre-computed key signature tables, ignored for
+ do-sig tables. */
+ uint32_t signature_offset;
+
+ /** Byte offset within packet meta-data where the key is located */
+ uint32_t key_offset;
+};
+
+/** Extendible bucket hash table operations for pre-computed key signature */
+extern struct rte_table_ops rte_table_hash_ext_ops;
+
+/** Extendible bucket hash table operations for key signature computed on
+ lookup ("do-sig") */
+extern struct rte_table_ops rte_table_hash_ext_dosig_ops;
+
+/** LRU hash table parameters */
+struct rte_table_hash_lru_params {
+ /** Key size (number of bytes) */
+ uint32_t key_size;
+
+ /** Maximum number of keys */
+ uint32_t n_keys;
+
+ /** Number of hash table buckets. Each bucket stores up to 4 keys. */
+ uint32_t n_buckets;
+
+ /** Hash function */
+ rte_table_hash_op_hash f_hash;
+
+ /** Seed value for the hash function */
+ uint64_t seed;
+
+ /** Byte offset within packet meta-data where the 4-byte key signature
+ is located. Valid for pre-computed key signature tables, ignored for
+ do-sig tables. */
+ uint32_t signature_offset;
+
+ /** Byte offset within packet meta-data where the key is located */
+ uint32_t key_offset;
+};
+
+/** LRU hash table operations for pre-computed key signature */
+extern struct rte_table_ops rte_table_hash_lru_ops;
+
+/** LRU hash table operations for key signature computed on lookup ("do-sig") */
+extern struct rte_table_ops rte_table_hash_lru_dosig_ops;
+
+/**
+ * 8-byte key hash tables
+ *
+ */
+/** LRU hash table parameters */
+struct rte_table_hash_key8_lru_params {
+ /** Maximum number of entries (and keys) in the table */
+ uint32_t n_entries;
+
+ /** Hash function */
+ rte_table_hash_op_hash f_hash;
+
+ /** Seed for the hash function */
+ uint64_t seed;
+
+ /** Byte offset within packet meta-data where the 4-byte key signature
+ is located. Valid for pre-computed key signature tables, ignored for
+ do-sig tables. */
+ uint32_t signature_offset;
+
+ /** Byte offset within packet meta-data where the key is located */
+ uint32_t key_offset;
+};
+
+/** LRU hash table operations for pre-computed key signature */
+extern struct rte_table_ops rte_table_hash_key8_lru_ops;
+
+/** LRU hash table operations for key signature computed on lookup ("do-sig") */
+extern struct rte_table_ops rte_table_hash_key8_lru_dosig_ops;
+
+/** Extendible bucket hash table parameters */
+struct rte_table_hash_key8_ext_params {
+ /** Maximum number of entries (and keys) in the table */
+ uint32_t n_entries;
+
+ /** Number of entries (and keys) for hash table bucket extensions. Each
+ bucket is extended in increments of 4 keys. */
+ uint32_t n_entries_ext;
+
+ /** Hash function */
+ rte_table_hash_op_hash f_hash;
+
+ /** Seed for the hash function */
+ uint64_t seed;
+
+ /** Byte offset within packet meta-data where the 4-byte key signature
+ is located. Valid for pre-computed key signature tables, ignored for
+ do-sig tables. */
+ uint32_t signature_offset;
+
+ /** Byte offset within packet meta-data where the key is located */
+ uint32_t key_offset;
+};
+
+/** Extendible bucket hash table operations for pre-computed key signature */
+extern struct rte_table_ops rte_table_hash_key8_ext_ops;
+
+/** Extendible bucket hash table operations for key signature computed on
+ lookup ("do-sig") */
+extern struct rte_table_ops rte_table_hash_key8_ext_dosig_ops;
+
+/**
+ * 16-byte key hash tables
+ *
+ */
+/** LRU hash table parameters */
+struct rte_table_hash_key16_lru_params {
+ /** Maximum number of entries (and keys) in the table */
+ uint32_t n_entries;
+
+ /** Hash function */
+ rte_table_hash_op_hash f_hash;
+
+ /** Seed for the hash function */
+ uint64_t seed;
+
+ /** Byte offset within packet meta-data where the 4-byte key signature
+ is located. Valid for pre-computed key signature tables, ignored for
+ do-sig tables. */
+ uint32_t signature_offset;
+
+ /** Byte offset within packet meta-data where the key is located */
+ uint32_t key_offset;
+};
+
+/** LRU hash table operations for pre-computed key signature */
+extern struct rte_table_ops rte_table_hash_key16_lru_ops;
+
+/** Extendible bucket hash table parameters */
+struct rte_table_hash_key16_ext_params {
+ /** Maximum number of entries (and keys) in the table */
+ uint32_t n_entries;
+
+ /** Number of entries (and keys) for hash table bucket extensions. Each
+ bucket is extended in increments of 4 keys. */
+ uint32_t n_entries_ext;
+
+ /** Hash function */
+ rte_table_hash_op_hash f_hash;
+
+ /** Seed for the hash function */
+ uint64_t seed;
+
+ /** Byte offset within packet meta-data where the 4-byte key signature
+ is located. Valid for pre-computed key signature tables, ignored for
+ do-sig tables. */
+ uint32_t signature_offset;
+
+ /** Byte offset within packet meta-data where the key is located */
+ uint32_t key_offset;
+};
+
+/** Extendible bucket operations for pre-computed key signature */
+extern struct rte_table_ops rte_table_hash_key16_ext_ops;
+
+/**
+ * 32-byte key hash tables
+ *
+ */
+/** LRU hash table parameters */
+struct rte_table_hash_key32_lru_params {
+ /** Maximum number of entries (and keys) in the table */
+ uint32_t n_entries;
+
+ /** Hash function */
+ rte_table_hash_op_hash f_hash;
+
+ /** Seed for the hash function */
+ uint64_t seed;
+
+ /** Byte offset within packet meta-data where the 4-byte key signature
+ is located. Valid for pre-computed key signature tables, ignored for
+ do-sig tables. */
+ uint32_t signature_offset;
+
+ /** Byte offset within packet meta-data where the key is located */
+ uint32_t key_offset;
+};
+
+/** LRU hash table operations for pre-computed key signature */
+extern struct rte_table_ops rte_table_hash_key32_lru_ops;
+
+/** Extendible bucket hash table parameters */
+struct rte_table_hash_key32_ext_params {
+ /** Maximum number of entries (and keys) in the table */
+ uint32_t n_entries;
+
+ /** Number of entries (and keys) for hash table bucket extensions. Each
+ bucket is extended in increments of 4 keys. */
+ uint32_t n_entries_ext;
+
+ /** Hash function */
+ rte_table_hash_op_hash f_hash;
+
+ /** Seed for the hash function */
+ uint64_t seed;
+
+ /** Byte offset within packet meta-data where the 4-byte key signature
+ is located. Valid for pre-computed key signature tables, ignored for
+ do-sig tables. */
+ uint32_t signature_offset;
+
+ /** Byte offset within packet meta-data where the key is located */
+ uint32_t key_offset;
+};
+
+/** Extendible bucket hash table operations */
+extern struct rte_table_ops rte_table_hash_key32_ext_ops;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_table/rte_table_hash_ext.c b/src/dpdk_lib18/librte_table/rte_table_hash_ext.c
new file mode 100755
index 00000000..66e416bb
--- /dev/null
+++ b/src/dpdk_lib18/librte_table/rte_table_hash_ext.c
@@ -0,0 +1,1122 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <stdio.h>
+
+#include <rte_common.h>
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_log.h>
+
+#include "rte_table_hash.h"
+
+#define KEYS_PER_BUCKET 4
+
+struct bucket {
+ union {
+ uintptr_t next;
+ uint64_t lru_list;
+ };
+ uint16_t sig[KEYS_PER_BUCKET];
+ uint32_t key_pos[KEYS_PER_BUCKET];
+};
+
+#define BUCKET_NEXT(bucket) \
+ ((void *) ((bucket)->next & (~1LU)))
+
+#define BUCKET_NEXT_VALID(bucket) \
+ ((bucket)->next & 1LU)
+
+#define BUCKET_NEXT_SET(bucket, bucket_next) \
+do \
+ (bucket)->next = (((uintptr_t) ((void *) (bucket_next))) | 1LU);\
+while (0)
+
+#define BUCKET_NEXT_SET_NULL(bucket) \
+do \
+ (bucket)->next = 0; \
+while (0)
+
+#define BUCKET_NEXT_COPY(bucket, bucket2) \
+do \
+ (bucket)->next = (bucket2)->next; \
+while (0)
+
+struct grinder {
+ struct bucket *bkt;
+ uint64_t sig;
+ uint64_t match;
+ uint32_t key_index;
+};
+
+struct rte_table_hash {
+ /* Input parameters */
+ uint32_t key_size;
+ uint32_t entry_size;
+ uint32_t n_keys;
+ uint32_t n_buckets;
+ uint32_t n_buckets_ext;
+ rte_table_hash_op_hash f_hash;
+ uint64_t seed;
+ uint32_t signature_offset;
+ uint32_t key_offset;
+
+ /* Internal */
+ uint64_t bucket_mask;
+ uint32_t key_size_shl;
+ uint32_t data_size_shl;
+ uint32_t key_stack_tos;
+ uint32_t bkt_ext_stack_tos;
+
+ /* Grinder */
+ struct grinder grinders[RTE_PORT_IN_BURST_SIZE_MAX];
+
+ /* Tables */
+ struct bucket *buckets;
+ struct bucket *buckets_ext;
+ uint8_t *key_mem;
+ uint8_t *data_mem;
+ uint32_t *key_stack;
+ uint32_t *bkt_ext_stack;
+
+ /* Table memory */
+ uint8_t memory[0] __rte_cache_aligned;
+};
+
+static int
+check_params_create(struct rte_table_hash_ext_params *params)
+{
+ uint32_t n_buckets_min;
+
+ /* key_size */
+ if ((params->key_size == 0) ||
+ (!rte_is_power_of_2(params->key_size))) {
+ RTE_LOG(ERR, TABLE, "%s: key_size invalid value\n", __func__);
+ return -EINVAL;
+ }
+
+ /* n_keys */
+ if ((params->n_keys == 0) ||
+ (!rte_is_power_of_2(params->n_keys))) {
+ RTE_LOG(ERR, TABLE, "%s: n_keys invalid value\n", __func__);
+ return -EINVAL;
+ }
+
+ /* n_buckets */
+ n_buckets_min = (params->n_keys + KEYS_PER_BUCKET - 1) / params->n_keys;
+ if ((params->n_buckets == 0) ||
+ (!rte_is_power_of_2(params->n_keys)) ||
+ (params->n_buckets < n_buckets_min)) {
+ RTE_LOG(ERR, TABLE, "%s: n_buckets invalid value\n", __func__);
+ return -EINVAL;
+ }
+
+ /* f_hash */
+ if (params->f_hash == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: f_hash invalid value\n", __func__);
+ return -EINVAL;
+ }
+
+ /* signature offset */
+ if ((params->signature_offset & 0x3) != 0) {
+ RTE_LOG(ERR, TABLE, "%s: signature_offset invalid value\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* key offset */
+ if ((params->key_offset & 0x7) != 0) {
+ RTE_LOG(ERR, TABLE, "%s: key_offset invalid value\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void *
+rte_table_hash_ext_create(void *params, int socket_id, uint32_t entry_size)
+{
+ struct rte_table_hash_ext_params *p =
+ (struct rte_table_hash_ext_params *) params;
+ struct rte_table_hash *t;
+ uint32_t total_size, table_meta_sz;
+ uint32_t bucket_sz, bucket_ext_sz, key_sz;
+ uint32_t key_stack_sz, bkt_ext_stack_sz, data_sz;
+ uint32_t bucket_offset, bucket_ext_offset, key_offset;
+ uint32_t key_stack_offset, bkt_ext_stack_offset, data_offset;
+ uint32_t i;
+
+ /* Check input parameters */
+ if ((check_params_create(p) != 0) ||
+ (!rte_is_power_of_2(entry_size)) ||
+ ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
+ (sizeof(struct bucket) != (RTE_CACHE_LINE_SIZE / 2)))
+ return NULL;
+
+ /* Memory allocation */
+ table_meta_sz = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_table_hash));
+ bucket_sz = RTE_CACHE_LINE_ROUNDUP(p->n_buckets * sizeof(struct bucket));
+ bucket_ext_sz =
+ RTE_CACHE_LINE_ROUNDUP(p->n_buckets_ext * sizeof(struct bucket));
+ key_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * p->key_size);
+ key_stack_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * sizeof(uint32_t));
+ bkt_ext_stack_sz =
+ RTE_CACHE_LINE_ROUNDUP(p->n_buckets_ext * sizeof(uint32_t));
+ data_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * entry_size);
+ total_size = table_meta_sz + bucket_sz + bucket_ext_sz + key_sz +
+ key_stack_sz + bkt_ext_stack_sz + data_sz;
+
+ t = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
+ if (t == NULL) {
+ RTE_LOG(ERR, TABLE,
+ "%s: Cannot allocate %u bytes for hash table\n",
+ __func__, total_size);
+ return NULL;
+ }
+ RTE_LOG(INFO, TABLE, "%s (%u-byte key): Hash table memory footprint is "
+ "%u bytes\n", __func__, p->key_size, total_size);
+
+ /* Memory initialization */
+ t->key_size = p->key_size;
+ t->entry_size = entry_size;
+ t->n_keys = p->n_keys;
+ t->n_buckets = p->n_buckets;
+ t->n_buckets_ext = p->n_buckets_ext;
+ t->f_hash = p->f_hash;
+ t->seed = p->seed;
+ t->signature_offset = p->signature_offset;
+ t->key_offset = p->key_offset;
+
+ /* Internal */
+ t->bucket_mask = t->n_buckets - 1;
+ t->key_size_shl = __builtin_ctzl(p->key_size);
+ t->data_size_shl = __builtin_ctzl(entry_size);
+
+ /* Tables */
+ bucket_offset = 0;
+ bucket_ext_offset = bucket_offset + bucket_sz;
+ key_offset = bucket_ext_offset + bucket_ext_sz;
+ key_stack_offset = key_offset + key_sz;
+ bkt_ext_stack_offset = key_stack_offset + key_stack_sz;
+ data_offset = bkt_ext_stack_offset + bkt_ext_stack_sz;
+
+ t->buckets = (struct bucket *) &t->memory[bucket_offset];
+ t->buckets_ext = (struct bucket *) &t->memory[bucket_ext_offset];
+ t->key_mem = &t->memory[key_offset];
+ t->key_stack = (uint32_t *) &t->memory[key_stack_offset];
+ t->bkt_ext_stack = (uint32_t *) &t->memory[bkt_ext_stack_offset];
+ t->data_mem = &t->memory[data_offset];
+
+ /* Key stack */
+ for (i = 0; i < t->n_keys; i++)
+ t->key_stack[i] = t->n_keys - 1 - i;
+ t->key_stack_tos = t->n_keys;
+
+ /* Bucket ext stack */
+ for (i = 0; i < t->n_buckets_ext; i++)
+ t->bkt_ext_stack[i] = t->n_buckets_ext - 1 - i;
+ t->bkt_ext_stack_tos = t->n_buckets_ext;
+
+ return t;
+}
+
+static int
+rte_table_hash_ext_free(void *table)
+{
+ struct rte_table_hash *t = (struct rte_table_hash *) table;
+
+ /* Check input parameters */
+ if (t == NULL)
+ return -EINVAL;
+
+ rte_free(t);
+ return 0;
+}
+
+static int
+rte_table_hash_ext_entry_add(void *table, void *key, void *entry,
+ int *key_found, void **entry_ptr)
+{
+ struct rte_table_hash *t = (struct rte_table_hash *) table;
+ struct bucket *bkt0, *bkt, *bkt_prev;
+ uint64_t sig;
+ uint32_t bkt_index, i;
+
+ sig = t->f_hash(key, t->key_size, t->seed);
+ bkt_index = sig & t->bucket_mask;
+ bkt0 = &t->buckets[bkt_index];
+ sig = (sig >> 16) | 1LLU;
+
+ /* Key is present in the bucket */
+ for (bkt = bkt0; bkt != NULL; bkt = BUCKET_NEXT(bkt))
+ for (i = 0; i < KEYS_PER_BUCKET; i++) {
+ uint64_t bkt_sig = (uint64_t) bkt->sig[i];
+ uint32_t bkt_key_index = bkt->key_pos[i];
+ uint8_t *bkt_key =
+ &t->key_mem[bkt_key_index << t->key_size_shl];
+
+ if ((sig == bkt_sig) && (memcmp(key, bkt_key,
+ t->key_size) == 0)) {
+ uint8_t *data = &t->data_mem[bkt_key_index <<
+ t->data_size_shl];
+
+ memcpy(data, entry, t->entry_size);
+ *key_found = 1;
+ *entry_ptr = (void *) data;
+ return 0;
+ }
+ }
+
+ /* Key is not present in the bucket */
+ for (bkt_prev = NULL, bkt = bkt0; bkt != NULL; bkt_prev = bkt,
+ bkt = BUCKET_NEXT(bkt))
+ for (i = 0; i < KEYS_PER_BUCKET; i++) {
+ uint64_t bkt_sig = (uint64_t) bkt->sig[i];
+
+ if (bkt_sig == 0) {
+ uint32_t bkt_key_index;
+ uint8_t *bkt_key, *data;
+
+ /* Allocate new key */
+ if (t->key_stack_tos == 0) /* No free keys */
+ return -ENOSPC;
+
+ bkt_key_index = t->key_stack[
+ --t->key_stack_tos];
+
+ /* Install new key */
+ bkt_key = &t->key_mem[bkt_key_index <<
+ t->key_size_shl];
+ data = &t->data_mem[bkt_key_index <<
+ t->data_size_shl];
+
+ bkt->sig[i] = (uint16_t) sig;
+ bkt->key_pos[i] = bkt_key_index;
+ memcpy(bkt_key, key, t->key_size);
+ memcpy(data, entry, t->entry_size);
+
+ *key_found = 0;
+ *entry_ptr = (void *) data;
+ return 0;
+ }
+ }
+
+ /* Bucket full: extend bucket */
+ if ((t->bkt_ext_stack_tos > 0) && (t->key_stack_tos > 0)) {
+ uint32_t bkt_key_index;
+ uint8_t *bkt_key, *data;
+
+ /* Allocate new bucket ext */
+ bkt_index = t->bkt_ext_stack[--t->bkt_ext_stack_tos];
+ bkt = &t->buckets_ext[bkt_index];
+
+ /* Chain the new bucket ext */
+ BUCKET_NEXT_SET(bkt_prev, bkt);
+ BUCKET_NEXT_SET_NULL(bkt);
+
+ /* Allocate new key */
+ bkt_key_index = t->key_stack[--t->key_stack_tos];
+ bkt_key = &t->key_mem[bkt_key_index << t->key_size_shl];
+
+ data = &t->data_mem[bkt_key_index << t->data_size_shl];
+
+ /* Install new key into bucket */
+ bkt->sig[0] = (uint16_t) sig;
+ bkt->key_pos[0] = bkt_key_index;
+ memcpy(bkt_key, key, t->key_size);
+ memcpy(data, entry, t->entry_size);
+
+ *key_found = 0;
+ *entry_ptr = (void *) data;
+ return 0;
+ }
+
+ return -ENOSPC;
+}
+
+static int
+rte_table_hash_ext_entry_delete(void *table, void *key, int *key_found,
+void *entry)
+{
+ struct rte_table_hash *t = (struct rte_table_hash *) table;
+ struct bucket *bkt0, *bkt, *bkt_prev;
+ uint64_t sig;
+ uint32_t bkt_index, i;
+
+ sig = t->f_hash(key, t->key_size, t->seed);
+ bkt_index = sig & t->bucket_mask;
+ bkt0 = &t->buckets[bkt_index];
+ sig = (sig >> 16) | 1LLU;
+
+ /* Key is present in the bucket */
+ for (bkt_prev = NULL, bkt = bkt0; bkt != NULL; bkt_prev = bkt,
+ bkt = BUCKET_NEXT(bkt))
+ for (i = 0; i < KEYS_PER_BUCKET; i++) {
+ uint64_t bkt_sig = (uint64_t) bkt->sig[i];
+ uint32_t bkt_key_index = bkt->key_pos[i];
+ uint8_t *bkt_key = &t->key_mem[bkt_key_index <<
+ t->key_size_shl];
+
+ if ((sig == bkt_sig) && (memcmp(key, bkt_key,
+ t->key_size) == 0)) {
+ uint8_t *data = &t->data_mem[bkt_key_index <<
+ t->data_size_shl];
+
+ /* Uninstall key from bucket */
+ bkt->sig[i] = 0;
+ *key_found = 1;
+ if (entry)
+ memcpy(entry, data, t->entry_size);
+
+ /* Free key */
+ t->key_stack[t->key_stack_tos++] =
+ bkt_key_index;
+
+ /*Check if bucket is unused */
+ if ((bkt_prev != NULL) &&
+ (bkt->sig[0] == 0) && (bkt->sig[1] == 0) &&
+ (bkt->sig[2] == 0) && (bkt->sig[3] == 0)) {
+ /* Unchain bucket */
+ BUCKET_NEXT_COPY(bkt_prev, bkt);
+
+ /* Clear bucket */
+ memset(bkt, 0, sizeof(struct bucket));
+
+ /* Free bucket back to buckets ext */
+ bkt_index = bkt - t->buckets_ext;
+ t->bkt_ext_stack[t->bkt_ext_stack_tos++]
+ = bkt_index;
+ }
+
+ return 0;
+ }
+ }
+
+ /* Key is not present in the bucket */
+ *key_found = 0;
+ return 0;
+}
+
+static int rte_table_hash_ext_lookup_unoptimized(
+ void *table,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ uint64_t *lookup_hit_mask,
+ void **entries,
+ int dosig)
+{
+ struct rte_table_hash *t = (struct rte_table_hash *) table;
+ uint64_t pkts_mask_out = 0;
+
+ for ( ; pkts_mask; ) {
+ struct bucket *bkt0, *bkt;
+ struct rte_mbuf *pkt;
+ uint8_t *key;
+ uint64_t pkt_mask, sig;
+ uint32_t pkt_index, bkt_index, i;
+
+ pkt_index = __builtin_ctzll(pkts_mask);
+ pkt_mask = 1LLU << pkt_index;
+ pkts_mask &= ~pkt_mask;
+
+ pkt = pkts[pkt_index];
+ key = RTE_MBUF_METADATA_UINT8_PTR(pkt, t->key_offset);
+ if (dosig)
+ sig = (uint64_t) t->f_hash(key, t->key_size, t->seed);
+ else
+ sig = RTE_MBUF_METADATA_UINT32(pkt,
+ t->signature_offset);
+
+ bkt_index = sig & t->bucket_mask;
+ bkt0 = &t->buckets[bkt_index];
+ sig = (sig >> 16) | 1LLU;
+
+ /* Key is present in the bucket */
+ for (bkt = bkt0; bkt != NULL; bkt = BUCKET_NEXT(bkt))
+ for (i = 0; i < KEYS_PER_BUCKET; i++) {
+ uint64_t bkt_sig = (uint64_t) bkt->sig[i];
+ uint32_t bkt_key_index = bkt->key_pos[i];
+ uint8_t *bkt_key = &t->key_mem[bkt_key_index <<
+ t->key_size_shl];
+
+ if ((sig == bkt_sig) && (memcmp(key, bkt_key,
+ t->key_size) == 0)) {
+ uint8_t *data = &t->data_mem[
+ bkt_key_index << t->data_size_shl];
+
+ pkts_mask_out |= pkt_mask;
+ entries[pkt_index] = (void *) data;
+ break;
+ }
+ }
+ }
+
+ *lookup_hit_mask = pkts_mask_out;
+ return 0;
+}
+
+/***
+ *
+ * mask = match bitmask
+ * match = at least one match
+ * match_many = more than one match
+ * match_pos = position of first match
+ *
+ *----------------------------------------
+ * mask match match_many match_pos
+ *----------------------------------------
+ * 0000 0 0 00
+ * 0001 1 0 00
+ * 0010 1 0 01
+ * 0011 1 1 00
+ *----------------------------------------
+ * 0100 1 0 10
+ * 0101 1 1 00
+ * 0110 1 1 01
+ * 0111 1 1 00
+ *----------------------------------------
+ * 1000 1 0 11
+ * 1001 1 1 00
+ * 1010 1 1 01
+ * 1011 1 1 00
+ *----------------------------------------
+ * 1100 1 1 10
+ * 1101 1 1 00
+ * 1110 1 1 01
+ * 1111 1 1 00
+ *----------------------------------------
+ *
+ * match = 1111_1111_1111_1110
+ * match_many = 1111_1110_1110_1000
+ * match_pos = 0001_0010_0001_0011__0001_0010_0001_0000
+ *
+ * match = 0xFFFELLU
+ * match_many = 0xFEE8LLU
+ * match_pos = 0x12131210LLU
+ *
+ ***/
+
+#define LUT_MATCH 0xFFFELLU
+#define LUT_MATCH_MANY 0xFEE8LLU
+#define LUT_MATCH_POS 0x12131210LLU
+
+#define lookup_cmp_sig(mbuf_sig, bucket, match, match_many, match_pos) \
+{ \
+ uint64_t bucket_sig[4], mask[4], mask_all; \
+ \
+ bucket_sig[0] = bucket->sig[0]; \
+ bucket_sig[1] = bucket->sig[1]; \
+ bucket_sig[2] = bucket->sig[2]; \
+ bucket_sig[3] = bucket->sig[3]; \
+ \
+ bucket_sig[0] ^= mbuf_sig; \
+ bucket_sig[1] ^= mbuf_sig; \
+ bucket_sig[2] ^= mbuf_sig; \
+ bucket_sig[3] ^= mbuf_sig; \
+ \
+ mask[0] = 0; \
+ mask[1] = 0; \
+ mask[2] = 0; \
+ mask[3] = 0; \
+ \
+ if (bucket_sig[0] == 0) \
+ mask[0] = 1; \
+ if (bucket_sig[1] == 0) \
+ mask[1] = 2; \
+ if (bucket_sig[2] == 0) \
+ mask[2] = 4; \
+ if (bucket_sig[3] == 0) \
+ mask[3] = 8; \
+ \
+ mask_all = (mask[0] | mask[1]) | (mask[2] | mask[3]); \
+ \
+ match = (LUT_MATCH >> mask_all) & 1; \
+ match_many = (LUT_MATCH_MANY >> mask_all) & 1; \
+ match_pos = (LUT_MATCH_POS >> (mask_all << 1)) & 3; \
+}
+
+#define lookup_cmp_key(mbuf, key, match_key, f) \
+{ \
+ uint64_t *pkt_key = RTE_MBUF_METADATA_UINT64_PTR(mbuf, f->key_offset);\
+ uint64_t *bkt_key = (uint64_t *) key; \
+ \
+ switch (f->key_size) { \
+ case 8: \
+ { \
+ uint64_t xor = pkt_key[0] ^ bkt_key[0]; \
+ match_key = 0; \
+ if (xor == 0) \
+ match_key = 1; \
+ } \
+ break; \
+ \
+ case 16: \
+ { \
+ uint64_t xor[2], or; \
+ \
+ xor[0] = pkt_key[0] ^ bkt_key[0]; \
+ xor[1] = pkt_key[1] ^ bkt_key[1]; \
+ or = xor[0] | xor[1]; \
+ match_key = 0; \
+ if (or == 0) \
+ match_key = 1; \
+ } \
+ break; \
+ \
+ case 32: \
+ { \
+ uint64_t xor[4], or; \
+ \
+ xor[0] = pkt_key[0] ^ bkt_key[0]; \
+ xor[1] = pkt_key[1] ^ bkt_key[1]; \
+ xor[2] = pkt_key[2] ^ bkt_key[2]; \
+ xor[3] = pkt_key[3] ^ bkt_key[3]; \
+ or = xor[0] | xor[1] | xor[2] | xor[3]; \
+ match_key = 0; \
+ if (or == 0) \
+ match_key = 1; \
+ } \
+ break; \
+ \
+ case 64: \
+ { \
+ uint64_t xor[8], or; \
+ \
+ xor[0] = pkt_key[0] ^ bkt_key[0]; \
+ xor[1] = pkt_key[1] ^ bkt_key[1]; \
+ xor[2] = pkt_key[2] ^ bkt_key[2]; \
+ xor[3] = pkt_key[3] ^ bkt_key[3]; \
+ xor[4] = pkt_key[4] ^ bkt_key[4]; \
+ xor[5] = pkt_key[5] ^ bkt_key[5]; \
+ xor[6] = pkt_key[6] ^ bkt_key[6]; \
+ xor[7] = pkt_key[7] ^ bkt_key[7]; \
+ or = xor[0] | xor[1] | xor[2] | xor[3] | \
+ xor[4] | xor[5] | xor[6] | xor[7]; \
+ match_key = 0; \
+ if (or == 0) \
+ match_key = 1; \
+ } \
+ break; \
+ \
+ default: \
+ match_key = 0; \
+ if (memcmp(pkt_key, bkt_key, f->key_size) == 0) \
+ match_key = 1; \
+ } \
+}
+
+#define lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index) \
+{ \
+ uint64_t pkt00_mask, pkt01_mask; \
+ struct rte_mbuf *mbuf00, *mbuf01; \
+ \
+ pkt00_index = __builtin_ctzll(pkts_mask); \
+ pkt00_mask = 1LLU << pkt00_index; \
+ pkts_mask &= ~pkt00_mask; \
+ mbuf00 = pkts[pkt00_index]; \
+ \
+ pkt01_index = __builtin_ctzll(pkts_mask); \
+ pkt01_mask = 1LLU << pkt01_index; \
+ pkts_mask &= ~pkt01_mask; \
+ mbuf01 = pkts[pkt01_index]; \
+ \
+ rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, 0)); \
+ rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, 0)); \
+}
+
+#define lookup2_stage0_with_odd_support(t, g, pkts, pkts_mask, pkt00_index, \
+ pkt01_index) \
+{ \
+ uint64_t pkt00_mask, pkt01_mask; \
+ struct rte_mbuf *mbuf00, *mbuf01; \
+ \
+ pkt00_index = __builtin_ctzll(pkts_mask); \
+ pkt00_mask = 1LLU << pkt00_index; \
+ pkts_mask &= ~pkt00_mask; \
+ mbuf00 = pkts[pkt00_index]; \
+ \
+ pkt01_index = __builtin_ctzll(pkts_mask); \
+ if (pkts_mask == 0) \
+ pkt01_index = pkt00_index; \
+ pkt01_mask = 1LLU << pkt01_index; \
+ pkts_mask &= ~pkt01_mask; \
+ mbuf01 = pkts[pkt01_index]; \
+ \
+ rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, 0)); \
+ rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, 0)); \
+}
+
+#define lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index) \
+{ \
+ struct grinder *g10, *g11; \
+ uint64_t sig10, sig11, bkt10_index, bkt11_index; \
+ struct rte_mbuf *mbuf10, *mbuf11; \
+ struct bucket *bkt10, *bkt11, *buckets = t->buckets; \
+ uint64_t bucket_mask = t->bucket_mask; \
+ uint32_t signature_offset = t->signature_offset; \
+ \
+ mbuf10 = pkts[pkt10_index]; \
+ sig10 = (uint64_t) RTE_MBUF_METADATA_UINT32(mbuf10, signature_offset);\
+ bkt10_index = sig10 & bucket_mask; \
+ bkt10 = &buckets[bkt10_index]; \
+ \
+ mbuf11 = pkts[pkt11_index]; \
+ sig11 = (uint64_t) RTE_MBUF_METADATA_UINT32(mbuf11, signature_offset);\
+ bkt11_index = sig11 & bucket_mask; \
+ bkt11 = &buckets[bkt11_index]; \
+ \
+ rte_prefetch0(bkt10); \
+ rte_prefetch0(bkt11); \
+ \
+ g10 = &g[pkt10_index]; \
+ g10->sig = sig10; \
+ g10->bkt = bkt10; \
+ \
+ g11 = &g[pkt11_index]; \
+ g11->sig = sig11; \
+ g11->bkt = bkt11; \
+}
+
+#define lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index) \
+{ \
+ struct grinder *g10, *g11; \
+ uint64_t sig10, sig11, bkt10_index, bkt11_index; \
+ struct rte_mbuf *mbuf10, *mbuf11; \
+ struct bucket *bkt10, *bkt11, *buckets = t->buckets; \
+ uint8_t *key10, *key11; \
+ uint64_t bucket_mask = t->bucket_mask; \
+ rte_table_hash_op_hash f_hash = t->f_hash; \
+ uint64_t seed = t->seed; \
+ uint32_t key_size = t->key_size; \
+ uint32_t key_offset = t->key_offset; \
+ \
+ mbuf10 = pkts[pkt10_index]; \
+ key10 = RTE_MBUF_METADATA_UINT8_PTR(mbuf10, key_offset); \
+ sig10 = (uint64_t) f_hash(key10, key_size, seed); \
+ bkt10_index = sig10 & bucket_mask; \
+ bkt10 = &buckets[bkt10_index]; \
+ \
+ mbuf11 = pkts[pkt11_index]; \
+ key11 = RTE_MBUF_METADATA_UINT8_PTR(mbuf11, key_offset); \
+ sig11 = (uint64_t) f_hash(key11, key_size, seed); \
+ bkt11_index = sig11 & bucket_mask; \
+ bkt11 = &buckets[bkt11_index]; \
+ \
+ rte_prefetch0(bkt10); \
+ rte_prefetch0(bkt11); \
+ \
+ g10 = &g[pkt10_index]; \
+ g10->sig = sig10; \
+ g10->bkt = bkt10; \
+ \
+ g11 = &g[pkt11_index]; \
+ g11->sig = sig11; \
+ g11->bkt = bkt11; \
+}
+
+#define lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many)\
+{ \
+ struct grinder *g20, *g21; \
+ uint64_t sig20, sig21; \
+ struct bucket *bkt20, *bkt21; \
+ uint8_t *key20, *key21, *key_mem = t->key_mem; \
+ uint64_t match20, match21, match_many20, match_many21; \
+ uint64_t match_pos20, match_pos21; \
+ uint32_t key20_index, key21_index, key_size_shl = t->key_size_shl;\
+ \
+ g20 = &g[pkt20_index]; \
+ sig20 = g20->sig; \
+ bkt20 = g20->bkt; \
+ sig20 = (sig20 >> 16) | 1LLU; \
+ lookup_cmp_sig(sig20, bkt20, match20, match_many20, match_pos20);\
+ match20 <<= pkt20_index; \
+ match_many20 |= BUCKET_NEXT_VALID(bkt20); \
+ match_many20 <<= pkt20_index; \
+ key20_index = bkt20->key_pos[match_pos20]; \
+ key20 = &key_mem[key20_index << key_size_shl]; \
+ \
+ g21 = &g[pkt21_index]; \
+ sig21 = g21->sig; \
+ bkt21 = g21->bkt; \
+ sig21 = (sig21 >> 16) | 1LLU; \
+ lookup_cmp_sig(sig21, bkt21, match21, match_many21, match_pos21);\
+ match21 <<= pkt21_index; \
+ match_many21 |= BUCKET_NEXT_VALID(bkt21); \
+ match_many21 <<= pkt21_index; \
+ key21_index = bkt21->key_pos[match_pos21]; \
+ key21 = &key_mem[key21_index << key_size_shl]; \
+ \
+ rte_prefetch0(key20); \
+ rte_prefetch0(key21); \
+ \
+ pkts_mask_match_many |= match_many20 | match_many21; \
+ \
+ g20->match = match20; \
+ g20->key_index = key20_index; \
+ \
+ g21->match = match21; \
+ g21->key_index = key21_index; \
+}
+
+#define lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out, \
+ entries) \
+{ \
+ struct grinder *g30, *g31; \
+ struct rte_mbuf *mbuf30, *mbuf31; \
+ uint8_t *key30, *key31, *key_mem = t->key_mem; \
+ uint8_t *data30, *data31, *data_mem = t->data_mem; \
+ uint64_t match30, match31, match_key30, match_key31, match_keys;\
+ uint32_t key30_index, key31_index; \
+ uint32_t key_size_shl = t->key_size_shl; \
+ uint32_t data_size_shl = t->data_size_shl; \
+ \
+ mbuf30 = pkts[pkt30_index]; \
+ g30 = &g[pkt30_index]; \
+ match30 = g30->match; \
+ key30_index = g30->key_index; \
+ key30 = &key_mem[key30_index << key_size_shl]; \
+ lookup_cmp_key(mbuf30, key30, match_key30, t); \
+ match_key30 <<= pkt30_index; \
+ match_key30 &= match30; \
+ data30 = &data_mem[key30_index << data_size_shl]; \
+ entries[pkt30_index] = data30; \
+ \
+ mbuf31 = pkts[pkt31_index]; \
+ g31 = &g[pkt31_index]; \
+ match31 = g31->match; \
+ key31_index = g31->key_index; \
+ key31 = &key_mem[key31_index << key_size_shl]; \
+ lookup_cmp_key(mbuf31, key31, match_key31, t); \
+ match_key31 <<= pkt31_index; \
+ match_key31 &= match31; \
+ data31 = &data_mem[key31_index << data_size_shl]; \
+ entries[pkt31_index] = data31; \
+ \
+ rte_prefetch0(data30); \
+ rte_prefetch0(data31); \
+ \
+ match_keys = match_key30 | match_key31; \
+ pkts_mask_out |= match_keys; \
+}
+
+/***
+* The lookup function implements a 4-stage pipeline, with each stage processing
+* two different packets. The purpose of pipelined implementation is to hide the
+* latency of prefetching the data structures and loosen the data dependency
+* between instructions.
+*
+* p00 _______ p10 _______ p20 _______ p30 _______
+*----->| |----->| |----->| |----->| |----->
+* | 0 | | 1 | | 2 | | 3 |
+*----->|_______|----->|_______|----->|_______|----->|_______|----->
+* p01 p11 p21 p31
+*
+* The naming convention is:
+* pXY = packet Y of stage X, X = 0 .. 3, Y = 0 .. 1
+*
+***/
+static int rte_table_hash_ext_lookup(
+ void *table,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ uint64_t *lookup_hit_mask,
+ void **entries)
+{
+ struct rte_table_hash *t = (struct rte_table_hash *) table;
+ struct grinder *g = t->grinders;
+ uint64_t pkt00_index, pkt01_index, pkt10_index, pkt11_index;
+ uint64_t pkt20_index, pkt21_index, pkt30_index, pkt31_index;
+ uint64_t pkts_mask_out = 0, pkts_mask_match_many = 0;
+ int status = 0;
+
+ /* Cannot run the pipeline with less than 7 packets */
+ if (__builtin_popcountll(pkts_mask) < 7)
+ return rte_table_hash_ext_lookup_unoptimized(table, pkts,
+ pkts_mask, lookup_hit_mask, entries, 0);
+
+ /* Pipeline stage 0 */
+ lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
+
+ /* Pipeline feed */
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 0 */
+ lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
+
+ /* Pipeline stage 1 */
+ lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
+
+ /* Pipeline feed */
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 0 */
+ lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
+
+ /* Pipeline stage 1 */
+ lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
+
+ /* Pipeline stage 2 */
+ lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
+
+ /*
+ * Pipeline run
+ *
+ */
+ for ( ; pkts_mask; ) {
+ /* Pipeline feed */
+ pkt30_index = pkt20_index;
+ pkt31_index = pkt21_index;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 0 */
+ lookup2_stage0_with_odd_support(t, g, pkts, pkts_mask,
+ pkt00_index, pkt01_index);
+
+ /* Pipeline stage 1 */
+ lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
+
+ /* Pipeline stage 2 */
+ lookup2_stage2(t, g, pkt20_index, pkt21_index,
+ pkts_mask_match_many);
+
+ /* Pipeline stage 3 */
+ lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index,
+ pkts_mask_out, entries);
+ }
+
+ /* Pipeline feed */
+ pkt30_index = pkt20_index;
+ pkt31_index = pkt21_index;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 1 */
+ lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
+
+ /* Pipeline stage 2 */
+ lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
+
+ /* Pipeline stage 3 */
+ lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
+ entries);
+
+ /* Pipeline feed */
+ pkt30_index = pkt20_index;
+ pkt31_index = pkt21_index;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+
+ /* Pipeline stage 2 */
+ lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
+
+ /* Pipeline stage 3 */
+ lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
+ entries);
+
+ /* Pipeline feed */
+ pkt30_index = pkt20_index;
+ pkt31_index = pkt21_index;
+
+ /* Pipeline stage 3 */
+ lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
+ entries);
+
+ /* Slow path */
+ pkts_mask_match_many &= ~pkts_mask_out;
+ if (pkts_mask_match_many) {
+ uint64_t pkts_mask_out_slow = 0;
+
+ status = rte_table_hash_ext_lookup_unoptimized(table, pkts,
+ pkts_mask_match_many, &pkts_mask_out_slow, entries, 0);
+ pkts_mask_out |= pkts_mask_out_slow;
+ }
+
+ *lookup_hit_mask = pkts_mask_out;
+ return status;
+}
+
+static int rte_table_hash_ext_lookup_dosig(
+ void *table,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ uint64_t *lookup_hit_mask,
+ void **entries)
+{
+ struct rte_table_hash *t = (struct rte_table_hash *) table;
+ struct grinder *g = t->grinders;
+ uint64_t pkt00_index, pkt01_index, pkt10_index, pkt11_index;
+ uint64_t pkt20_index, pkt21_index, pkt30_index, pkt31_index;
+ uint64_t pkts_mask_out = 0, pkts_mask_match_many = 0;
+ int status = 0;
+
+ /* Cannot run the pipeline with less than 7 packets */
+ if (__builtin_popcountll(pkts_mask) < 7)
+ return rte_table_hash_ext_lookup_unoptimized(table, pkts,
+ pkts_mask, lookup_hit_mask, entries, 1);
+
+ /* Pipeline stage 0 */
+ lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
+
+ /* Pipeline feed */
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 0 */
+ lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
+
+ /* Pipeline stage 1 */
+ lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index);
+
+ /* Pipeline feed */
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 0 */
+ lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
+
+ /* Pipeline stage 1 */
+ lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index);
+
+ /* Pipeline stage 2 */
+ lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
+
+ /*
+ * Pipeline run
+ *
+ */
+ for ( ; pkts_mask; ) {
+ /* Pipeline feed */
+ pkt30_index = pkt20_index;
+ pkt31_index = pkt21_index;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 0 */
+ lookup2_stage0_with_odd_support(t, g, pkts, pkts_mask,
+ pkt00_index, pkt01_index);
+
+ /* Pipeline stage 1 */
+ lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index);
+
+ /* Pipeline stage 2 */
+ lookup2_stage2(t, g, pkt20_index, pkt21_index,
+ pkts_mask_match_many);
+
+ /* Pipeline stage 3 */
+ lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index,
+ pkts_mask_out, entries);
+ }
+
+ /* Pipeline feed */
+ pkt30_index = pkt20_index;
+ pkt31_index = pkt21_index;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 1 */
+ lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index);
+
+ /* Pipeline stage 2 */
+ lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
+
+ /* Pipeline stage 3 */
+ lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
+ entries);
+
+ /* Pipeline feed */
+ pkt30_index = pkt20_index;
+ pkt31_index = pkt21_index;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+
+ /* Pipeline stage 2 */
+ lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
+
+ /* Pipeline stage 3 */
+ lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
+ entries);
+
+ /* Pipeline feed */
+ pkt30_index = pkt20_index;
+ pkt31_index = pkt21_index;
+
+ /* Pipeline stage 3 */
+ lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
+ entries);
+
+ /* Slow path */
+ pkts_mask_match_many &= ~pkts_mask_out;
+ if (pkts_mask_match_many) {
+ uint64_t pkts_mask_out_slow = 0;
+
+ status = rte_table_hash_ext_lookup_unoptimized(table, pkts,
+ pkts_mask_match_many, &pkts_mask_out_slow, entries, 1);
+ pkts_mask_out |= pkts_mask_out_slow;
+ }
+
+ *lookup_hit_mask = pkts_mask_out;
+ return status;
+}
+
+struct rte_table_ops rte_table_hash_ext_ops = {
+ .f_create = rte_table_hash_ext_create,
+ .f_free = rte_table_hash_ext_free,
+ .f_add = rte_table_hash_ext_entry_add,
+ .f_delete = rte_table_hash_ext_entry_delete,
+ .f_lookup = rte_table_hash_ext_lookup,
+};
+
+struct rte_table_ops rte_table_hash_ext_dosig_ops = {
+ .f_create = rte_table_hash_ext_create,
+ .f_free = rte_table_hash_ext_free,
+ .f_add = rte_table_hash_ext_entry_add,
+ .f_delete = rte_table_hash_ext_entry_delete,
+ .f_lookup = rte_table_hash_ext_lookup_dosig,
+};
diff --git a/src/dpdk_lib18/librte_table/rte_table_hash_key16.c b/src/dpdk_lib18/librte_table/rte_table_hash_key16.c
new file mode 100755
index 00000000..ee5f639b
--- /dev/null
+++ b/src/dpdk_lib18/librte_table/rte_table_hash_key16.c
@@ -0,0 +1,1101 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <string.h>
+#include <stdio.h>
+
+#include <rte_common.h>
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_log.h>
+
+#include "rte_table_hash.h"
+#include "rte_lru.h"
+
+#define RTE_TABLE_HASH_KEY_SIZE 16
+
+#define RTE_BUCKET_ENTRY_VALID 0x1LLU
+
+struct rte_bucket_4_16 {
+ /* Cache line 0 */
+ uint64_t signature[4 + 1];
+ uint64_t lru_list;
+ struct rte_bucket_4_16 *next;
+ uint64_t next_valid;
+
+ /* Cache line 1 */
+ uint64_t key[4][2];
+
+ /* Cache line 2 */
+ uint8_t data[0];
+};
+
+struct rte_table_hash {
+ /* Input parameters */
+ uint32_t n_buckets;
+ uint32_t n_entries_per_bucket;
+ uint32_t key_size;
+ uint32_t entry_size;
+ uint32_t bucket_size;
+ uint32_t signature_offset;
+ uint32_t key_offset;
+ rte_table_hash_op_hash f_hash;
+ uint64_t seed;
+
+ /* Extendible buckets */
+ uint32_t n_buckets_ext;
+ uint32_t stack_pos;
+ uint32_t *stack;
+
+ /* Lookup table */
+ uint8_t memory[0] __rte_cache_aligned;
+};
+
+static int
+check_params_create_lru(struct rte_table_hash_key16_lru_params *params) {
+ /* n_entries */
+ if (params->n_entries == 0) {
+ RTE_LOG(ERR, TABLE, "%s: n_entries is zero\n", __func__);
+ return -EINVAL;
+ }
+
+ /* signature offset */
+ if ((params->signature_offset & 0x3) != 0) {
+ RTE_LOG(ERR, TABLE, "%s: invalid signature_offset\n", __func__);
+ return -EINVAL;
+ }
+
+ /* key offset */
+ if ((params->key_offset & 0x7) != 0) {
+ RTE_LOG(ERR, TABLE, "%s: invalid key_offset\n", __func__);
+ return -EINVAL;
+ }
+
+ /* f_hash */
+ if (params->f_hash == NULL) {
+ RTE_LOG(ERR, TABLE,
+ "%s: f_hash function pointer is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void *
+rte_table_hash_create_key16_lru(void *params,
+ int socket_id,
+ uint32_t entry_size)
+{
+ struct rte_table_hash_key16_lru_params *p =
+ (struct rte_table_hash_key16_lru_params *) params;
+ struct rte_table_hash *f;
+ uint32_t n_buckets, n_entries_per_bucket,
+ key_size, bucket_size_cl, total_size, i;
+
+ /* Check input parameters */
+ if ((check_params_create_lru(p) != 0) ||
+ ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
+ ((sizeof(struct rte_bucket_4_16) % RTE_CACHE_LINE_SIZE) != 0))
+ return NULL;
+ n_entries_per_bucket = 4;
+ key_size = 16;
+
+ /* Memory allocation */
+ n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
+ n_entries_per_bucket);
+ bucket_size_cl = (sizeof(struct rte_bucket_4_16) + n_entries_per_bucket
+ * entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
+ total_size = sizeof(struct rte_table_hash) + n_buckets *
+ bucket_size_cl * RTE_CACHE_LINE_SIZE;
+
+ f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
+ if (f == NULL) {
+ RTE_LOG(ERR, TABLE,
+ "%s: Cannot allocate %u bytes for hash table\n",
+ __func__, total_size);
+ return NULL;
+ }
+ RTE_LOG(INFO, TABLE,
+ "%s: Hash table memory footprint is %u bytes\n",
+ __func__, total_size);
+
+ /* Memory initialization */
+ f->n_buckets = n_buckets;
+ f->n_entries_per_bucket = n_entries_per_bucket;
+ f->key_size = key_size;
+ f->entry_size = entry_size;
+ f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
+ f->signature_offset = p->signature_offset;
+ f->key_offset = p->key_offset;
+ f->f_hash = p->f_hash;
+ f->seed = p->seed;
+
+ for (i = 0; i < n_buckets; i++) {
+ struct rte_bucket_4_16 *bucket;
+
+ bucket = (struct rte_bucket_4_16 *) &f->memory[i *
+ f->bucket_size];
+ lru_init(bucket);
+ }
+
+ return f;
+}
+
+static int
+rte_table_hash_free_key16_lru(void *table)
+{
+ struct rte_table_hash *f = (struct rte_table_hash *) table;
+
+ /* Check input parameters */
+ if (f == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ rte_free(f);
+ return 0;
+}
+
+static int
+rte_table_hash_entry_add_key16_lru(
+ void *table,
+ void *key,
+ void *entry,
+ int *key_found,
+ void **entry_ptr)
+{
+ struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_bucket_4_16 *bucket;
+ uint64_t signature, pos;
+ uint32_t bucket_index, i;
+
+ signature = f->f_hash(key, f->key_size, f->seed);
+ bucket_index = signature & (f->n_buckets - 1);
+ bucket = (struct rte_bucket_4_16 *)
+ &f->memory[bucket_index * f->bucket_size];
+ signature |= RTE_BUCKET_ENTRY_VALID;
+
+ /* Key is present in the bucket */
+ for (i = 0; i < 4; i++) {
+ uint64_t bucket_signature = bucket->signature[i];
+ uint8_t *bucket_key = (uint8_t *) bucket->key[i];
+
+ if ((bucket_signature == signature) &&
+ (memcmp(key, bucket_key, f->key_size) == 0)) {
+ uint8_t *bucket_data = &bucket->data[i * f->entry_size];
+
+ memcpy(bucket_data, entry, f->entry_size);
+ lru_update(bucket, i);
+ *key_found = 1;
+ *entry_ptr = (void *) bucket_data;
+ return 0;
+ }
+ }
+
+ /* Key is not present in the bucket */
+ for (i = 0; i < 4; i++) {
+ uint64_t bucket_signature = bucket->signature[i];
+ uint8_t *bucket_key = (uint8_t *) bucket->key[i];
+
+ if (bucket_signature == 0) {
+ uint8_t *bucket_data = &bucket->data[i * f->entry_size];
+
+ bucket->signature[i] = signature;
+ memcpy(bucket_key, key, f->key_size);
+ memcpy(bucket_data, entry, f->entry_size);
+ lru_update(bucket, i);
+ *key_found = 0;
+ *entry_ptr = (void *) bucket_data;
+
+ return 0;
+ }
+ }
+
+ /* Bucket full: replace LRU entry */
+ pos = lru_pos(bucket);
+ bucket->signature[pos] = signature;
+ memcpy(bucket->key[pos], key, f->key_size);
+ memcpy(&bucket->data[pos * f->entry_size], entry, f->entry_size);
+ lru_update(bucket, pos);
+ *key_found = 0;
+ *entry_ptr = (void *) &bucket->data[pos * f->entry_size];
+
+ return 0;
+}
+
+static int
+rte_table_hash_entry_delete_key16_lru(
+ void *table,
+ void *key,
+ int *key_found,
+ void *entry)
+{
+ struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_bucket_4_16 *bucket;
+ uint64_t signature;
+ uint32_t bucket_index, i;
+
+ signature = f->f_hash(key, f->key_size, f->seed);
+ bucket_index = signature & (f->n_buckets - 1);
+ bucket = (struct rte_bucket_4_16 *)
+ &f->memory[bucket_index * f->bucket_size];
+ signature |= RTE_BUCKET_ENTRY_VALID;
+
+ /* Key is present in the bucket */
+ for (i = 0; i < 4; i++) {
+ uint64_t bucket_signature = bucket->signature[i];
+ uint8_t *bucket_key = (uint8_t *) bucket->key[i];
+
+ if ((bucket_signature == signature) &&
+ (memcmp(key, bucket_key, f->key_size) == 0)) {
+ uint8_t *bucket_data = &bucket->data[i * f->entry_size];
+
+ bucket->signature[i] = 0;
+ *key_found = 1;
+ if (entry)
+ memcpy(entry, bucket_data, f->entry_size);
+ return 0;
+ }
+ }
+
+ /* Key is not present in the bucket */
+ *key_found = 0;
+ return 0;
+}
+
+static int
+check_params_create_ext(struct rte_table_hash_key16_ext_params *params) {
+ /* n_entries */
+ if (params->n_entries == 0) {
+ RTE_LOG(ERR, TABLE, "%s: n_entries is zero\n", __func__);
+ return -EINVAL;
+ }
+
+ /* n_entries_ext */
+ if (params->n_entries_ext == 0) {
+ RTE_LOG(ERR, TABLE, "%s: n_entries_ext is zero\n", __func__);
+ return -EINVAL;
+ }
+
+ /* signature offset */
+ if ((params->signature_offset & 0x3) != 0) {
+ RTE_LOG(ERR, TABLE, "%s: invalid signature offset\n", __func__);
+ return -EINVAL;
+ }
+
+ /* key offset */
+ if ((params->key_offset & 0x7) != 0) {
+ RTE_LOG(ERR, TABLE, "%s: invalid key offset\n", __func__);
+ return -EINVAL;
+ }
+
+ /* f_hash */
+ if (params->f_hash == NULL) {
+ RTE_LOG(ERR, TABLE,
+ "%s: f_hash function pointer is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void *
+rte_table_hash_create_key16_ext(void *params,
+ int socket_id,
+ uint32_t entry_size)
+{
+ struct rte_table_hash_key16_ext_params *p =
+ (struct rte_table_hash_key16_ext_params *) params;
+ struct rte_table_hash *f;
+ uint32_t n_buckets, n_buckets_ext, n_entries_per_bucket, key_size,
+ bucket_size_cl, stack_size_cl, total_size, i;
+
+ /* Check input parameters */
+ if ((check_params_create_ext(p) != 0) ||
+ ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
+ ((sizeof(struct rte_bucket_4_16) % RTE_CACHE_LINE_SIZE) != 0))
+ return NULL;
+
+ n_entries_per_bucket = 4;
+ key_size = 16;
+
+ /* Memory allocation */
+ n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
+ n_entries_per_bucket);
+ n_buckets_ext = (p->n_entries_ext + n_entries_per_bucket - 1) /
+ n_entries_per_bucket;
+ bucket_size_cl = (sizeof(struct rte_bucket_4_16) + n_entries_per_bucket
+ * entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
+ stack_size_cl = (n_buckets_ext * sizeof(uint32_t) + RTE_CACHE_LINE_SIZE - 1)
+ / RTE_CACHE_LINE_SIZE;
+ total_size = sizeof(struct rte_table_hash) +
+ ((n_buckets + n_buckets_ext) * bucket_size_cl + stack_size_cl) *
+ RTE_CACHE_LINE_SIZE;
+
+ f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
+ if (f == NULL) {
+ RTE_LOG(ERR, TABLE,
+ "%s: Cannot allocate %u bytes for hash table\n",
+ __func__, total_size);
+ return NULL;
+ }
+ RTE_LOG(INFO, TABLE,
+ "%s: Hash table memory footprint is %u bytes\n",
+ __func__, total_size);
+
+ /* Memory initialization */
+ f->n_buckets = n_buckets;
+ f->n_entries_per_bucket = n_entries_per_bucket;
+ f->key_size = key_size;
+ f->entry_size = entry_size;
+ f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
+ f->signature_offset = p->signature_offset;
+ f->key_offset = p->key_offset;
+ f->f_hash = p->f_hash;
+ f->seed = p->seed;
+
+ f->n_buckets_ext = n_buckets_ext;
+ f->stack_pos = n_buckets_ext;
+ f->stack = (uint32_t *)
+ &f->memory[(n_buckets + n_buckets_ext) * f->bucket_size];
+
+ for (i = 0; i < n_buckets_ext; i++)
+ f->stack[i] = i;
+
+ return f;
+}
+
+static int
+rte_table_hash_free_key16_ext(void *table)
+{
+ struct rte_table_hash *f = (struct rte_table_hash *) table;
+
+ /* Check input parameters */
+ if (f == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ rte_free(f);
+ return 0;
+}
+
+static int
+rte_table_hash_entry_add_key16_ext(
+ void *table,
+ void *key,
+ void *entry,
+ int *key_found,
+ void **entry_ptr)
+{
+ struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_bucket_4_16 *bucket0, *bucket, *bucket_prev;
+ uint64_t signature;
+ uint32_t bucket_index, i;
+
+ signature = f->f_hash(key, f->key_size, f->seed);
+ bucket_index = signature & (f->n_buckets - 1);
+ bucket0 = (struct rte_bucket_4_16 *)
+ &f->memory[bucket_index * f->bucket_size];
+ signature |= RTE_BUCKET_ENTRY_VALID;
+
+ /* Key is present in the bucket */
+ for (bucket = bucket0; bucket != NULL; bucket = bucket->next)
+ for (i = 0; i < 4; i++) {
+ uint64_t bucket_signature = bucket->signature[i];
+ uint8_t *bucket_key = (uint8_t *) bucket->key[i];
+
+ if ((bucket_signature == signature) &&
+ (memcmp(key, bucket_key, f->key_size) == 0)) {
+ uint8_t *bucket_data = &bucket->data[i *
+ f->entry_size];
+
+ memcpy(bucket_data, entry, f->entry_size);
+ *key_found = 1;
+ *entry_ptr = (void *) bucket_data;
+ return 0;
+ }
+ }
+
+ /* Key is not present in the bucket */
+ for (bucket_prev = NULL, bucket = bucket0; bucket != NULL;
+ bucket_prev = bucket, bucket = bucket->next)
+ for (i = 0; i < 4; i++) {
+ uint64_t bucket_signature = bucket->signature[i];
+ uint8_t *bucket_key = (uint8_t *) bucket->key[i];
+
+ if (bucket_signature == 0) {
+ uint8_t *bucket_data = &bucket->data[i *
+ f->entry_size];
+
+ bucket->signature[i] = signature;
+ memcpy(bucket_key, key, f->key_size);
+ memcpy(bucket_data, entry, f->entry_size);
+ *key_found = 0;
+ *entry_ptr = (void *) bucket_data;
+
+ return 0;
+ }
+ }
+
+ /* Bucket full: extend bucket */
+ if (f->stack_pos > 0) {
+ bucket_index = f->stack[--f->stack_pos];
+
+ bucket = (struct rte_bucket_4_16 *) &f->memory[(f->n_buckets +
+ bucket_index) * f->bucket_size];
+ bucket_prev->next = bucket;
+ bucket_prev->next_valid = 1;
+
+ bucket->signature[0] = signature;
+ memcpy(bucket->key[0], key, f->key_size);
+ memcpy(&bucket->data[0], entry, f->entry_size);
+ *key_found = 0;
+ *entry_ptr = (void *) &bucket->data[0];
+ return 0;
+ }
+
+ return -ENOSPC;
+}
+
+static int
+rte_table_hash_entry_delete_key16_ext(
+ void *table,
+ void *key,
+ int *key_found,
+ void *entry)
+{
+ struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_bucket_4_16 *bucket0, *bucket, *bucket_prev;
+ uint64_t signature;
+ uint32_t bucket_index, i;
+
+ signature = f->f_hash(key, f->key_size, f->seed);
+ bucket_index = signature & (f->n_buckets - 1);
+ bucket0 = (struct rte_bucket_4_16 *)
+ &f->memory[bucket_index * f->bucket_size];
+ signature |= RTE_BUCKET_ENTRY_VALID;
+
+ /* Key is present in the bucket */
+ for (bucket_prev = NULL, bucket = bucket0; bucket != NULL;
+ bucket_prev = bucket, bucket = bucket->next)
+ for (i = 0; i < 4; i++) {
+ uint64_t bucket_signature = bucket->signature[i];
+ uint8_t *bucket_key = (uint8_t *) bucket->key[i];
+
+ if ((bucket_signature == signature) &&
+ (memcmp(key, bucket_key, f->key_size) == 0)) {
+ uint8_t *bucket_data = &bucket->data[i *
+ f->entry_size];
+
+ bucket->signature[i] = 0;
+ *key_found = 1;
+ if (entry)
+ memcpy(entry, bucket_data,
+ f->entry_size);
+
+ if ((bucket->signature[0] == 0) &&
+ (bucket->signature[1] == 0) &&
+ (bucket->signature[2] == 0) &&
+ (bucket->signature[3] == 0) &&
+ (bucket_prev != NULL)) {
+ bucket_prev->next = bucket->next;
+ bucket_prev->next_valid =
+ bucket->next_valid;
+
+ memset(bucket, 0,
+ sizeof(struct rte_bucket_4_16));
+ bucket_index = (bucket -
+ ((struct rte_bucket_4_16 *)
+ f->memory)) - f->n_buckets;
+ f->stack[f->stack_pos++] = bucket_index;
+ }
+
+ return 0;
+ }
+ }
+
+ /* Key is not present in the bucket */
+ *key_found = 0;
+ return 0;
+}
+
+#define lookup_key16_cmp(key_in, bucket, pos) \
+{ \
+ uint64_t xor[4][2], or[4], signature[4]; \
+ \
+ signature[0] = (~bucket->signature[0]) & 1; \
+ signature[1] = (~bucket->signature[1]) & 1; \
+ signature[2] = (~bucket->signature[2]) & 1; \
+ signature[3] = (~bucket->signature[3]) & 1; \
+ \
+ xor[0][0] = key_in[0] ^ bucket->key[0][0]; \
+ xor[0][1] = key_in[1] ^ bucket->key[0][1]; \
+ \
+ xor[1][0] = key_in[0] ^ bucket->key[1][0]; \
+ xor[1][1] = key_in[1] ^ bucket->key[1][1]; \
+ \
+ xor[2][0] = key_in[0] ^ bucket->key[2][0]; \
+ xor[2][1] = key_in[1] ^ bucket->key[2][1]; \
+ \
+ xor[3][0] = key_in[0] ^ bucket->key[3][0]; \
+ xor[3][1] = key_in[1] ^ bucket->key[3][1]; \
+ \
+ or[0] = xor[0][0] | xor[0][1] | signature[0]; \
+ or[1] = xor[1][0] | xor[1][1] | signature[1]; \
+ or[2] = xor[2][0] | xor[2][1] | signature[2]; \
+ or[3] = xor[3][0] | xor[3][1] | signature[3]; \
+ \
+ pos = 4; \
+ if (or[0] == 0) \
+ pos = 0; \
+ if (or[1] == 0) \
+ pos = 1; \
+ if (or[2] == 0) \
+ pos = 2; \
+ if (or[3] == 0) \
+ pos = 3; \
+}
+
+#define lookup1_stage0(pkt0_index, mbuf0, pkts, pkts_mask) \
+{ \
+ uint64_t pkt_mask; \
+ \
+ pkt0_index = __builtin_ctzll(pkts_mask); \
+ pkt_mask = 1LLU << pkt0_index; \
+ pkts_mask &= ~pkt_mask; \
+ \
+ mbuf0 = pkts[pkt0_index]; \
+ rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf0, 0)); \
+}
+
+#define lookup1_stage1(mbuf1, bucket1, f) \
+{ \
+ uint64_t signature; \
+ uint32_t bucket_index; \
+ \
+ signature = RTE_MBUF_METADATA_UINT32(mbuf1, f->signature_offset);\
+ bucket_index = signature & (f->n_buckets - 1); \
+ bucket1 = (struct rte_bucket_4_16 *) \
+ &f->memory[bucket_index * f->bucket_size]; \
+ rte_prefetch0(bucket1); \
+ rte_prefetch0((void *)(((uintptr_t) bucket1) + RTE_CACHE_LINE_SIZE));\
+}
+
+#define lookup1_stage2_lru(pkt2_index, mbuf2, bucket2, \
+ pkts_mask_out, entries, f) \
+{ \
+ void *a; \
+ uint64_t pkt_mask; \
+ uint64_t *key; \
+ uint32_t pos; \
+ \
+ key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
+ \
+ lookup_key16_cmp(key, bucket2, pos); \
+ \
+ pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
+ pkts_mask_out |= pkt_mask; \
+ \
+ a = (void *) &bucket2->data[pos * f->entry_size]; \
+ rte_prefetch0(a); \
+ entries[pkt2_index] = a; \
+ lru_update(bucket2, pos); \
+}
+
+#define lookup1_stage2_ext(pkt2_index, mbuf2, bucket2, pkts_mask_out, entries, \
+ buckets_mask, buckets, keys, f) \
+{ \
+ struct rte_bucket_4_16 *bucket_next; \
+ void *a; \
+ uint64_t pkt_mask, bucket_mask; \
+ uint64_t *key; \
+ uint32_t pos; \
+ \
+ key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
+ \
+ lookup_key16_cmp(key, bucket2, pos); \
+ \
+ pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
+ pkts_mask_out |= pkt_mask; \
+ \
+ a = (void *) &bucket2->data[pos * f->entry_size]; \
+ rte_prefetch0(a); \
+ entries[pkt2_index] = a; \
+ \
+ bucket_mask = (~pkt_mask) & (bucket2->next_valid << pkt2_index);\
+ buckets_mask |= bucket_mask; \
+ bucket_next = bucket2->next; \
+ buckets[pkt2_index] = bucket_next; \
+ keys[pkt2_index] = key; \
+}
+
+#define lookup_grinder(pkt_index, buckets, keys, pkts_mask_out, entries,\
+ buckets_mask, f) \
+{ \
+ struct rte_bucket_4_16 *bucket, *bucket_next; \
+ void *a; \
+ uint64_t pkt_mask, bucket_mask; \
+ uint64_t *key; \
+ uint32_t pos; \
+ \
+ bucket = buckets[pkt_index]; \
+ key = keys[pkt_index]; \
+ \
+ lookup_key16_cmp(key, bucket, pos); \
+ \
+ pkt_mask = (bucket->signature[pos] & 1LLU) << pkt_index;\
+ pkts_mask_out |= pkt_mask; \
+ \
+ a = (void *) &bucket->data[pos * f->entry_size]; \
+ rte_prefetch0(a); \
+ entries[pkt_index] = a; \
+ \
+ bucket_mask = (~pkt_mask) & (bucket->next_valid << pkt_index);\
+ buckets_mask |= bucket_mask; \
+ bucket_next = bucket->next; \
+ rte_prefetch0(bucket_next); \
+ rte_prefetch0((void *)(((uintptr_t) bucket_next) + RTE_CACHE_LINE_SIZE));\
+ buckets[pkt_index] = bucket_next; \
+ keys[pkt_index] = key; \
+}
+
+#define lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01,\
+ pkts, pkts_mask) \
+{ \
+ uint64_t pkt00_mask, pkt01_mask; \
+ \
+ pkt00_index = __builtin_ctzll(pkts_mask); \
+ pkt00_mask = 1LLU << pkt00_index; \
+ pkts_mask &= ~pkt00_mask; \
+ \
+ mbuf00 = pkts[pkt00_index]; \
+ rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, 0)); \
+ \
+ pkt01_index = __builtin_ctzll(pkts_mask); \
+ pkt01_mask = 1LLU << pkt01_index; \
+ pkts_mask &= ~pkt01_mask; \
+ \
+ mbuf01 = pkts[pkt01_index]; \
+ rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, 0)); \
+}
+
+#define lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,\
+ mbuf00, mbuf01, pkts, pkts_mask) \
+{ \
+ uint64_t pkt00_mask, pkt01_mask; \
+ \
+ pkt00_index = __builtin_ctzll(pkts_mask); \
+ pkt00_mask = 1LLU << pkt00_index; \
+ pkts_mask &= ~pkt00_mask; \
+ \
+ mbuf00 = pkts[pkt00_index]; \
+ rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, 0)); \
+ \
+ pkt01_index = __builtin_ctzll(pkts_mask); \
+ if (pkts_mask == 0) \
+ pkt01_index = pkt00_index; \
+ pkt01_mask = 1LLU << pkt01_index; \
+ pkts_mask &= ~pkt01_mask; \
+ \
+ mbuf01 = pkts[pkt01_index]; \
+ rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, 0)); \
+}
+
+#define lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f) \
+{ \
+ uint64_t signature10, signature11; \
+ uint32_t bucket10_index, bucket11_index; \
+ \
+ signature10 = RTE_MBUF_METADATA_UINT32(mbuf10, f->signature_offset);\
+ bucket10_index = signature10 & (f->n_buckets - 1); \
+ bucket10 = (struct rte_bucket_4_16 *) \
+ &f->memory[bucket10_index * f->bucket_size]; \
+ rte_prefetch0(bucket10); \
+ rte_prefetch0((void *)(((uintptr_t) bucket10) + RTE_CACHE_LINE_SIZE));\
+ \
+ signature11 = RTE_MBUF_METADATA_UINT32(mbuf11, f->signature_offset);\
+ bucket11_index = signature11 & (f->n_buckets - 1); \
+ bucket11 = (struct rte_bucket_4_16 *) \
+ &f->memory[bucket11_index * f->bucket_size]; \
+ rte_prefetch0(bucket11); \
+ rte_prefetch0((void *)(((uintptr_t) bucket11) + RTE_CACHE_LINE_SIZE));\
+}
+
+#define lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,\
+ bucket20, bucket21, pkts_mask_out, entries, f) \
+{ \
+ void *a20, *a21; \
+ uint64_t pkt20_mask, pkt21_mask; \
+ uint64_t *key20, *key21; \
+ uint32_t pos20, pos21; \
+ \
+ key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
+ key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
+ \
+ lookup_key16_cmp(key20, bucket20, pos20); \
+ lookup_key16_cmp(key21, bucket21, pos21); \
+ \
+ pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
+ pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
+ pkts_mask_out |= pkt20_mask | pkt21_mask; \
+ \
+ a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
+ a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
+ rte_prefetch0(a20); \
+ rte_prefetch0(a21); \
+ entries[pkt20_index] = a20; \
+ entries[pkt21_index] = a21; \
+ lru_update(bucket20, pos20); \
+ lru_update(bucket21, pos21); \
+}
+
+#define lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21, bucket20, \
+ bucket21, pkts_mask_out, entries, buckets_mask, buckets, keys, f) \
+{ \
+ struct rte_bucket_4_16 *bucket20_next, *bucket21_next; \
+ void *a20, *a21; \
+ uint64_t pkt20_mask, pkt21_mask, bucket20_mask, bucket21_mask;\
+ uint64_t *key20, *key21; \
+ uint32_t pos20, pos21; \
+ \
+ key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
+ key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
+ \
+ lookup_key16_cmp(key20, bucket20, pos20); \
+ lookup_key16_cmp(key21, bucket21, pos21); \
+ \
+ pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
+ pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
+ pkts_mask_out |= pkt20_mask | pkt21_mask; \
+ \
+ a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
+ a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
+ rte_prefetch0(a20); \
+ rte_prefetch0(a21); \
+ entries[pkt20_index] = a20; \
+ entries[pkt21_index] = a21; \
+ \
+ bucket20_mask = (~pkt20_mask) & (bucket20->next_valid << pkt20_index);\
+ bucket21_mask = (~pkt21_mask) & (bucket21->next_valid << pkt21_index);\
+ buckets_mask |= bucket20_mask | bucket21_mask; \
+ bucket20_next = bucket20->next; \
+ bucket21_next = bucket21->next; \
+ buckets[pkt20_index] = bucket20_next; \
+ buckets[pkt21_index] = bucket21_next; \
+ keys[pkt20_index] = key20; \
+ keys[pkt21_index] = key21; \
+}
+
+static int
+rte_table_hash_lookup_key16_lru(
+ void *table,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ uint64_t *lookup_hit_mask,
+ void **entries)
+{
+ struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_bucket_4_16 *bucket10, *bucket11, *bucket20, *bucket21;
+ struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
+ uint32_t pkt00_index, pkt01_index, pkt10_index;
+ uint32_t pkt11_index, pkt20_index, pkt21_index;
+ uint64_t pkts_mask_out = 0;
+
+ /* Cannot run the pipeline with less than 5 packets */
+ if (__builtin_popcountll(pkts_mask) < 5) {
+ for ( ; pkts_mask; ) {
+ struct rte_bucket_4_16 *bucket;
+ struct rte_mbuf *mbuf;
+ uint32_t pkt_index;
+
+ lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask);
+ lookup1_stage1(mbuf, bucket, f);
+ lookup1_stage2_lru(pkt_index, mbuf, bucket,
+ pkts_mask_out, entries, f);
+ }
+
+ *lookup_hit_mask = pkts_mask_out;
+ return 0;
+ }
+
+ /*
+ * Pipeline fill
+ *
+ */
+ /* Pipeline stage 0 */
+ lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
+ pkts_mask);
+
+ /* Pipeline feed */
+ mbuf10 = mbuf00;
+ mbuf11 = mbuf01;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 0 */
+ lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
+ pkts_mask);
+
+ /* Pipeline stage 1 */
+ lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
+
+ /*
+ * Pipeline run
+ *
+ */
+ for ( ; pkts_mask; ) {
+ /* Pipeline feed */
+ bucket20 = bucket10;
+ bucket21 = bucket11;
+ mbuf20 = mbuf10;
+ mbuf21 = mbuf11;
+ mbuf10 = mbuf00;
+ mbuf11 = mbuf01;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 0 */
+ lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
+ mbuf00, mbuf01, pkts, pkts_mask);
+
+ /* Pipeline stage 1 */
+ lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
+
+ /* Pipeline stage 2 */
+ lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
+ bucket20, bucket21, pkts_mask_out, entries, f);
+ }
+
+ /*
+ * Pipeline flush
+ *
+ */
+ /* Pipeline feed */
+ bucket20 = bucket10;
+ bucket21 = bucket11;
+ mbuf20 = mbuf10;
+ mbuf21 = mbuf11;
+ mbuf10 = mbuf00;
+ mbuf11 = mbuf01;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 1 */
+ lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
+
+ /* Pipeline stage 2 */
+ lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
+ bucket20, bucket21, pkts_mask_out, entries, f);
+
+ /* Pipeline feed */
+ bucket20 = bucket10;
+ bucket21 = bucket11;
+ mbuf20 = mbuf10;
+ mbuf21 = mbuf11;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+
+ /* Pipeline stage 2 */
+ lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
+ bucket20, bucket21, pkts_mask_out, entries, f);
+
+ *lookup_hit_mask = pkts_mask_out;
+ return 0;
+} /* rte_table_hash_lookup_key16_lru() */
+
+static int
+rte_table_hash_lookup_key16_ext(
+ void *table,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ uint64_t *lookup_hit_mask,
+ void **entries)
+{
+ struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_bucket_4_16 *bucket10, *bucket11, *bucket20, *bucket21;
+ struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
+ uint32_t pkt00_index, pkt01_index, pkt10_index;
+ uint32_t pkt11_index, pkt20_index, pkt21_index;
+ uint64_t pkts_mask_out = 0, buckets_mask = 0;
+ struct rte_bucket_4_16 *buckets[RTE_PORT_IN_BURST_SIZE_MAX];
+ uint64_t *keys[RTE_PORT_IN_BURST_SIZE_MAX];
+
+ /* Cannot run the pipeline with less than 5 packets */
+ if (__builtin_popcountll(pkts_mask) < 5) {
+ for ( ; pkts_mask; ) {
+ struct rte_bucket_4_16 *bucket;
+ struct rte_mbuf *mbuf;
+ uint32_t pkt_index;
+
+ lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask);
+ lookup1_stage1(mbuf, bucket, f);
+ lookup1_stage2_ext(pkt_index, mbuf, bucket,
+ pkts_mask_out, entries, buckets_mask,
+ buckets, keys, f);
+ }
+
+ goto grind_next_buckets;
+ }
+
+ /*
+ * Pipeline fill
+ *
+ */
+ /* Pipeline stage 0 */
+ lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
+ pkts_mask);
+
+ /* Pipeline feed */
+ mbuf10 = mbuf00;
+ mbuf11 = mbuf01;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 0 */
+ lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
+ pkts_mask);
+
+ /* Pipeline stage 1 */
+ lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
+
+ /*
+ * Pipeline run
+ *
+ */
+ for ( ; pkts_mask; ) {
+ /* Pipeline feed */
+ bucket20 = bucket10;
+ bucket21 = bucket11;
+ mbuf20 = mbuf10;
+ mbuf21 = mbuf11;
+ mbuf10 = mbuf00;
+ mbuf11 = mbuf01;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 0 */
+ lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
+ mbuf00, mbuf01, pkts, pkts_mask);
+
+ /* Pipeline stage 1 */
+ lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
+
+ /* Pipeline stage 2 */
+ lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
+ bucket20, bucket21, pkts_mask_out, entries,
+ buckets_mask, buckets, keys, f);
+ }
+
+ /*
+ * Pipeline flush
+ *
+ */
+ /* Pipeline feed */
+ bucket20 = bucket10;
+ bucket21 = bucket11;
+ mbuf20 = mbuf10;
+ mbuf21 = mbuf11;
+ mbuf10 = mbuf00;
+ mbuf11 = mbuf01;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 1 */
+ lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
+
+ /* Pipeline stage 2 */
+ lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
+ bucket20, bucket21, pkts_mask_out, entries,
+ buckets_mask, buckets, keys, f);
+
+ /* Pipeline feed */
+ bucket20 = bucket10;
+ bucket21 = bucket11;
+ mbuf20 = mbuf10;
+ mbuf21 = mbuf11;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+
+ /* Pipeline stage 2 */
+ lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
+ bucket20, bucket21, pkts_mask_out, entries,
+ buckets_mask, buckets, keys, f);
+
+grind_next_buckets:
+ /* Grind next buckets */
+ for ( ; buckets_mask; ) {
+ uint64_t buckets_mask_next = 0;
+
+ for ( ; buckets_mask; ) {
+ uint64_t pkt_mask;
+ uint32_t pkt_index;
+
+ pkt_index = __builtin_ctzll(buckets_mask);
+ pkt_mask = 1LLU << pkt_index;
+ buckets_mask &= ~pkt_mask;
+
+ lookup_grinder(pkt_index, buckets, keys, pkts_mask_out,
+ entries, buckets_mask_next, f);
+ }
+
+ buckets_mask = buckets_mask_next;
+ }
+
+ *lookup_hit_mask = pkts_mask_out;
+ return 0;
+} /* rte_table_hash_lookup_key16_ext() */
+
+struct rte_table_ops rte_table_hash_key16_lru_ops = {
+ .f_create = rte_table_hash_create_key16_lru,
+ .f_free = rte_table_hash_free_key16_lru,
+ .f_add = rte_table_hash_entry_add_key16_lru,
+ .f_delete = rte_table_hash_entry_delete_key16_lru,
+ .f_lookup = rte_table_hash_lookup_key16_lru,
+};
+
+struct rte_table_ops rte_table_hash_key16_ext_ops = {
+ .f_create = rte_table_hash_create_key16_ext,
+ .f_free = rte_table_hash_free_key16_ext,
+ .f_add = rte_table_hash_entry_add_key16_ext,
+ .f_delete = rte_table_hash_entry_delete_key16_ext,
+ .f_lookup = rte_table_hash_lookup_key16_ext,
+};
diff --git a/src/dpdk_lib18/librte_table/rte_table_hash_key32.c b/src/dpdk_lib18/librte_table/rte_table_hash_key32.c
new file mode 100755
index 00000000..da0ce6af
--- /dev/null
+++ b/src/dpdk_lib18/librte_table/rte_table_hash_key32.c
@@ -0,0 +1,1121 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <string.h>
+#include <stdio.h>
+
+#include <rte_common.h>
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_log.h>
+
+#include "rte_table_hash.h"
+#include "rte_lru.h"
+
+#define RTE_TABLE_HASH_KEY_SIZE 32
+
+#define RTE_BUCKET_ENTRY_VALID 0x1LLU
+
+struct rte_bucket_4_32 {
+ /* Cache line 0 */
+ uint64_t signature[4 + 1];
+ uint64_t lru_list;
+ struct rte_bucket_4_32 *next;
+ uint64_t next_valid;
+
+ /* Cache lines 1 and 2 */
+ uint64_t key[4][4];
+
+ /* Cache line 3 */
+ uint8_t data[0];
+};
+
+struct rte_table_hash {
+ /* Input parameters */
+ uint32_t n_buckets;
+ uint32_t n_entries_per_bucket;
+ uint32_t key_size;
+ uint32_t entry_size;
+ uint32_t bucket_size;
+ uint32_t signature_offset;
+ uint32_t key_offset;
+ rte_table_hash_op_hash f_hash;
+ uint64_t seed;
+
+ /* Extendible buckets */
+ uint32_t n_buckets_ext;
+ uint32_t stack_pos;
+ uint32_t *stack;
+
+ /* Lookup table */
+ uint8_t memory[0] __rte_cache_aligned;
+};
+
+static int
+check_params_create_lru(struct rte_table_hash_key32_lru_params *params) {
+ /* n_entries */
+ if (params->n_entries == 0) {
+ RTE_LOG(ERR, TABLE, "%s: n_entries is zero\n", __func__);
+ return -EINVAL;
+ }
+
+ /* signature offset */
+ if ((params->signature_offset & 0x3) != 0) {
+ RTE_LOG(ERR, TABLE, "%s: invalid signature offset\n", __func__);
+ return -EINVAL;
+ }
+
+ /* key offset */
+ if ((params->key_offset & 0x7) != 0) {
+ RTE_LOG(ERR, TABLE, "%s: invalid key offset\n", __func__);
+ return -EINVAL;
+ }
+
+ /* f_hash */
+ if (params->f_hash == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: f_hash function pointer is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void *
+rte_table_hash_create_key32_lru(void *params,
+ int socket_id,
+ uint32_t entry_size)
+{
+ struct rte_table_hash_key32_lru_params *p =
+ (struct rte_table_hash_key32_lru_params *) params;
+ struct rte_table_hash *f;
+ uint32_t n_buckets, n_entries_per_bucket, key_size, bucket_size_cl;
+ uint32_t total_size, i;
+
+ /* Check input parameters */
+ if ((check_params_create_lru(p) != 0) ||
+ ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
+ ((sizeof(struct rte_bucket_4_32) % RTE_CACHE_LINE_SIZE) != 0)) {
+ return NULL;
+ }
+ n_entries_per_bucket = 4;
+ key_size = 32;
+
+ /* Memory allocation */
+ n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
+ n_entries_per_bucket);
+ bucket_size_cl = (sizeof(struct rte_bucket_4_32) + n_entries_per_bucket
+ * entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
+ total_size = sizeof(struct rte_table_hash) + n_buckets *
+ bucket_size_cl * RTE_CACHE_LINE_SIZE;
+
+ f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
+ if (f == NULL) {
+ RTE_LOG(ERR, TABLE,
+ "%s: Cannot allocate %u bytes for hash table\n",
+ __func__, total_size);
+ return NULL;
+ }
+ RTE_LOG(INFO, TABLE,
+ "%s: Hash table memory footprint is %u bytes\n", __func__,
+ total_size);
+
+ /* Memory initialization */
+ f->n_buckets = n_buckets;
+ f->n_entries_per_bucket = n_entries_per_bucket;
+ f->key_size = key_size;
+ f->entry_size = entry_size;
+ f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
+ f->signature_offset = p->signature_offset;
+ f->key_offset = p->key_offset;
+ f->f_hash = p->f_hash;
+ f->seed = p->seed;
+
+ for (i = 0; i < n_buckets; i++) {
+ struct rte_bucket_4_32 *bucket;
+
+ bucket = (struct rte_bucket_4_32 *) &f->memory[i *
+ f->bucket_size];
+ bucket->lru_list = 0x0000000100020003LLU;
+ }
+
+ return f;
+}
+
+static int
+rte_table_hash_free_key32_lru(void *table)
+{
+ struct rte_table_hash *f = (struct rte_table_hash *) table;
+
+ /* Check input parameters */
+ if (f == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ rte_free(f);
+ return 0;
+}
+
+static int
+rte_table_hash_entry_add_key32_lru(
+ void *table,
+ void *key,
+ void *entry,
+ int *key_found,
+ void **entry_ptr)
+{
+ struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_bucket_4_32 *bucket;
+ uint64_t signature, pos;
+ uint32_t bucket_index, i;
+
+ signature = f->f_hash(key, f->key_size, f->seed);
+ bucket_index = signature & (f->n_buckets - 1);
+ bucket = (struct rte_bucket_4_32 *)
+ &f->memory[bucket_index * f->bucket_size];
+ signature |= RTE_BUCKET_ENTRY_VALID;
+
+ /* Key is present in the bucket */
+ for (i = 0; i < 4; i++) {
+ uint64_t bucket_signature = bucket->signature[i];
+ uint8_t *bucket_key = (uint8_t *) bucket->key[i];
+
+ if ((bucket_signature == signature) &&
+ (memcmp(key, bucket_key, f->key_size) == 0)) {
+ uint8_t *bucket_data = &bucket->data[i * f->entry_size];
+
+ memcpy(bucket_data, entry, f->entry_size);
+ lru_update(bucket, i);
+ *key_found = 1;
+ *entry_ptr = (void *) bucket_data;
+ return 0;
+ }
+ }
+
+ /* Key is not present in the bucket */
+ for (i = 0; i < 4; i++) {
+ uint64_t bucket_signature = bucket->signature[i];
+ uint8_t *bucket_key = (uint8_t *) bucket->key[i];
+
+ if (bucket_signature == 0) {
+ uint8_t *bucket_data = &bucket->data[i * f->entry_size];
+
+ bucket->signature[i] = signature;
+ memcpy(bucket_key, key, f->key_size);
+ memcpy(bucket_data, entry, f->entry_size);
+ lru_update(bucket, i);
+ *key_found = 0;
+ *entry_ptr = (void *) bucket_data;
+
+ return 0;
+ }
+ }
+
+ /* Bucket full: replace LRU entry */
+ pos = lru_pos(bucket);
+ bucket->signature[pos] = signature;
+ memcpy(bucket->key[pos], key, f->key_size);
+ memcpy(&bucket->data[pos * f->entry_size], entry, f->entry_size);
+ lru_update(bucket, pos);
+ *key_found = 0;
+ *entry_ptr = (void *) &bucket->data[pos * f->entry_size];
+
+ return 0;
+}
+
+static int
+rte_table_hash_entry_delete_key32_lru(
+ void *table,
+ void *key,
+ int *key_found,
+ void *entry)
+{
+ struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_bucket_4_32 *bucket;
+ uint64_t signature;
+ uint32_t bucket_index, i;
+
+ signature = f->f_hash(key, f->key_size, f->seed);
+ bucket_index = signature & (f->n_buckets - 1);
+ bucket = (struct rte_bucket_4_32 *)
+ &f->memory[bucket_index * f->bucket_size];
+ signature |= RTE_BUCKET_ENTRY_VALID;
+
+ /* Key is present in the bucket */
+ for (i = 0; i < 4; i++) {
+ uint64_t bucket_signature = bucket->signature[i];
+ uint8_t *bucket_key = (uint8_t *) bucket->key[i];
+
+ if ((bucket_signature == signature) &&
+ (memcmp(key, bucket_key, f->key_size) == 0)) {
+ uint8_t *bucket_data = &bucket->data[i * f->entry_size];
+
+ bucket->signature[i] = 0;
+ *key_found = 1;
+ if (entry)
+ memcpy(entry, bucket_data, f->entry_size);
+
+ return 0;
+ }
+ }
+
+ /* Key is not present in the bucket */
+ *key_found = 0;
+ return 0;
+}
+
+static int
+check_params_create_ext(struct rte_table_hash_key32_ext_params *params) {
+ /* n_entries */
+ if (params->n_entries == 0) {
+ RTE_LOG(ERR, TABLE, "%s: n_entries is zero\n", __func__);
+ return -EINVAL;
+ }
+
+ /* n_entries_ext */
+ if (params->n_entries_ext == 0) {
+ RTE_LOG(ERR, TABLE, "%s: n_entries_ext is zero\n", __func__);
+ return -EINVAL;
+ }
+
+ /* signature offset */
+ if ((params->signature_offset & 0x3) != 0) {
+ RTE_LOG(ERR, TABLE, "%s: invalid signature offset\n", __func__);
+ return -EINVAL;
+ }
+
+ /* key offset */
+ if ((params->key_offset & 0x7) != 0) {
+ RTE_LOG(ERR, TABLE, "%s: invalid key offset\n", __func__);
+ return -EINVAL;
+ }
+
+ /* f_hash */
+ if (params->f_hash == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: f_hash function pointer is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void *
+rte_table_hash_create_key32_ext(void *params,
+ int socket_id,
+ uint32_t entry_size)
+{
+ struct rte_table_hash_key32_ext_params *p =
+ (struct rte_table_hash_key32_ext_params *) params;
+ struct rte_table_hash *f;
+ uint32_t n_buckets, n_buckets_ext, n_entries_per_bucket;
+ uint32_t key_size, bucket_size_cl, stack_size_cl, total_size, i;
+
+ /* Check input parameters */
+ if ((check_params_create_ext(p) != 0) ||
+ ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
+ ((sizeof(struct rte_bucket_4_32) % RTE_CACHE_LINE_SIZE) != 0))
+ return NULL;
+
+ n_entries_per_bucket = 4;
+ key_size = 32;
+
+ /* Memory allocation */
+ n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
+ n_entries_per_bucket);
+ n_buckets_ext = (p->n_entries_ext + n_entries_per_bucket - 1) /
+ n_entries_per_bucket;
+ bucket_size_cl = (sizeof(struct rte_bucket_4_32) + n_entries_per_bucket
+ * entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
+ stack_size_cl = (n_buckets_ext * sizeof(uint32_t) + RTE_CACHE_LINE_SIZE - 1)
+ / RTE_CACHE_LINE_SIZE;
+ total_size = sizeof(struct rte_table_hash) +
+ ((n_buckets + n_buckets_ext) * bucket_size_cl + stack_size_cl) *
+ RTE_CACHE_LINE_SIZE;
+
+ f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
+ if (f == NULL) {
+ RTE_LOG(ERR, TABLE,
+ "%s: Cannot allocate %u bytes for hash table\n",
+ __func__, total_size);
+ return NULL;
+ }
+ RTE_LOG(INFO, TABLE,
+ "%s: Hash table memory footprint is %u bytes\n", __func__,
+ total_size);
+
+ /* Memory initialization */
+ f->n_buckets = n_buckets;
+ f->n_entries_per_bucket = n_entries_per_bucket;
+ f->key_size = key_size;
+ f->entry_size = entry_size;
+ f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
+ f->signature_offset = p->signature_offset;
+ f->key_offset = p->key_offset;
+ f->f_hash = p->f_hash;
+ f->seed = p->seed;
+
+ f->n_buckets_ext = n_buckets_ext;
+ f->stack_pos = n_buckets_ext;
+ f->stack = (uint32_t *)
+ &f->memory[(n_buckets + n_buckets_ext) * f->bucket_size];
+
+ for (i = 0; i < n_buckets_ext; i++)
+ f->stack[i] = i;
+
+ return f;
+}
+
+static int
+rte_table_hash_free_key32_ext(void *table)
+{
+ struct rte_table_hash *f = (struct rte_table_hash *) table;
+
+ /* Check input parameters */
+ if (f == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ rte_free(f);
+ return 0;
+}
+
+static int
+rte_table_hash_entry_add_key32_ext(
+ void *table,
+ void *key,
+ void *entry,
+ int *key_found,
+ void **entry_ptr)
+{
+ struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_bucket_4_32 *bucket0, *bucket, *bucket_prev;
+ uint64_t signature;
+ uint32_t bucket_index, i;
+
+ signature = f->f_hash(key, f->key_size, f->seed);
+ bucket_index = signature & (f->n_buckets - 1);
+ bucket0 = (struct rte_bucket_4_32 *)
+ &f->memory[bucket_index * f->bucket_size];
+ signature |= RTE_BUCKET_ENTRY_VALID;
+
+ /* Key is present in the bucket */
+ for (bucket = bucket0; bucket != NULL; bucket = bucket->next) {
+ for (i = 0; i < 4; i++) {
+ uint64_t bucket_signature = bucket->signature[i];
+ uint8_t *bucket_key = (uint8_t *) bucket->key[i];
+
+ if ((bucket_signature == signature) &&
+ (memcmp(key, bucket_key, f->key_size) == 0)) {
+ uint8_t *bucket_data = &bucket->data[i *
+ f->entry_size];
+
+ memcpy(bucket_data, entry, f->entry_size);
+ *key_found = 1;
+ *entry_ptr = (void *) bucket_data;
+
+ return 0;
+ }
+ }
+ }
+
+ /* Key is not present in the bucket */
+ for (bucket_prev = NULL, bucket = bucket0; bucket != NULL;
+ bucket_prev = bucket, bucket = bucket->next)
+ for (i = 0; i < 4; i++) {
+ uint64_t bucket_signature = bucket->signature[i];
+ uint8_t *bucket_key = (uint8_t *) bucket->key[i];
+
+ if (bucket_signature == 0) {
+ uint8_t *bucket_data = &bucket->data[i *
+ f->entry_size];
+
+ bucket->signature[i] = signature;
+ memcpy(bucket_key, key, f->key_size);
+ memcpy(bucket_data, entry, f->entry_size);
+ *key_found = 0;
+ *entry_ptr = (void *) bucket_data;
+
+ return 0;
+ }
+ }
+
+ /* Bucket full: extend bucket */
+ if (f->stack_pos > 0) {
+ bucket_index = f->stack[--f->stack_pos];
+
+ bucket = (struct rte_bucket_4_32 *)
+ &f->memory[(f->n_buckets + bucket_index) *
+ f->bucket_size];
+ bucket_prev->next = bucket;
+ bucket_prev->next_valid = 1;
+
+ bucket->signature[0] = signature;
+ memcpy(bucket->key[0], key, f->key_size);
+ memcpy(&bucket->data[0], entry, f->entry_size);
+ *key_found = 0;
+ *entry_ptr = (void *) &bucket->data[0];
+ return 0;
+ }
+
+ return -ENOSPC;
+}
+
+static int
+rte_table_hash_entry_delete_key32_ext(
+ void *table,
+ void *key,
+ int *key_found,
+ void *entry)
+{
+ struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_bucket_4_32 *bucket0, *bucket, *bucket_prev;
+ uint64_t signature;
+ uint32_t bucket_index, i;
+
+ signature = f->f_hash(key, f->key_size, f->seed);
+ bucket_index = signature & (f->n_buckets - 1);
+ bucket0 = (struct rte_bucket_4_32 *)
+ &f->memory[bucket_index * f->bucket_size];
+ signature |= RTE_BUCKET_ENTRY_VALID;
+
+ /* Key is present in the bucket */
+ for (bucket_prev = NULL, bucket = bucket0; bucket != NULL;
+ bucket_prev = bucket, bucket = bucket->next)
+ for (i = 0; i < 4; i++) {
+ uint64_t bucket_signature = bucket->signature[i];
+ uint8_t *bucket_key = (uint8_t *) bucket->key[i];
+
+ if ((bucket_signature == signature) &&
+ (memcmp(key, bucket_key, f->key_size) == 0)) {
+ uint8_t *bucket_data = &bucket->data[i *
+ f->entry_size];
+
+ bucket->signature[i] = 0;
+ *key_found = 1;
+ if (entry)
+ memcpy(entry, bucket_data,
+ f->entry_size);
+
+ if ((bucket->signature[0] == 0) &&
+ (bucket->signature[1] == 0) &&
+ (bucket->signature[2] == 0) &&
+ (bucket->signature[3] == 0) &&
+ (bucket_prev != NULL)) {
+ bucket_prev->next = bucket->next;
+ bucket_prev->next_valid =
+ bucket->next_valid;
+
+ memset(bucket, 0,
+ sizeof(struct rte_bucket_4_32));
+ bucket_index = (bucket -
+ ((struct rte_bucket_4_32 *)
+ f->memory)) - f->n_buckets;
+ f->stack[f->stack_pos++] = bucket_index;
+ }
+
+ return 0;
+ }
+ }
+
+ /* Key is not present in the bucket */
+ *key_found = 0;
+ return 0;
+}
+
+#define lookup_key32_cmp(key_in, bucket, pos) \
+{ \
+ uint64_t xor[4][4], or[4], signature[4]; \
+ \
+ signature[0] = ((~bucket->signature[0]) & 1); \
+ signature[1] = ((~bucket->signature[1]) & 1); \
+ signature[2] = ((~bucket->signature[2]) & 1); \
+ signature[3] = ((~bucket->signature[3]) & 1); \
+ \
+ xor[0][0] = key_in[0] ^ bucket->key[0][0]; \
+ xor[0][1] = key_in[1] ^ bucket->key[0][1]; \
+ xor[0][2] = key_in[2] ^ bucket->key[0][2]; \
+ xor[0][3] = key_in[3] ^ bucket->key[0][3]; \
+ \
+ xor[1][0] = key_in[0] ^ bucket->key[1][0]; \
+ xor[1][1] = key_in[1] ^ bucket->key[1][1]; \
+ xor[1][2] = key_in[2] ^ bucket->key[1][2]; \
+ xor[1][3] = key_in[3] ^ bucket->key[1][3]; \
+ \
+ xor[2][0] = key_in[0] ^ bucket->key[2][0]; \
+ xor[2][1] = key_in[1] ^ bucket->key[2][1]; \
+ xor[2][2] = key_in[2] ^ bucket->key[2][2]; \
+ xor[2][3] = key_in[3] ^ bucket->key[2][3]; \
+ \
+ xor[3][0] = key_in[0] ^ bucket->key[3][0]; \
+ xor[3][1] = key_in[1] ^ bucket->key[3][1]; \
+ xor[3][2] = key_in[2] ^ bucket->key[3][2]; \
+ xor[3][3] = key_in[3] ^ bucket->key[3][3]; \
+ \
+ or[0] = xor[0][0] | xor[0][1] | xor[0][2] | xor[0][3] | signature[0];\
+ or[1] = xor[1][0] | xor[1][1] | xor[1][2] | xor[1][3] | signature[1];\
+ or[2] = xor[2][0] | xor[2][1] | xor[2][2] | xor[2][3] | signature[2];\
+ or[3] = xor[3][0] | xor[3][1] | xor[3][2] | xor[3][3] | signature[3];\
+ \
+ pos = 4; \
+ if (or[0] == 0) \
+ pos = 0; \
+ if (or[1] == 0) \
+ pos = 1; \
+ if (or[2] == 0) \
+ pos = 2; \
+ if (or[3] == 0) \
+ pos = 3; \
+}
+
+#define lookup1_stage0(pkt0_index, mbuf0, pkts, pkts_mask) \
+{ \
+ uint64_t pkt_mask; \
+ \
+ pkt0_index = __builtin_ctzll(pkts_mask); \
+ pkt_mask = 1LLU << pkt0_index; \
+ pkts_mask &= ~pkt_mask; \
+ \
+ mbuf0 = pkts[pkt0_index]; \
+ rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf0, 0)); \
+}
+
+#define lookup1_stage1(mbuf1, bucket1, f) \
+{ \
+ uint64_t signature; \
+ uint32_t bucket_index; \
+ \
+ signature = RTE_MBUF_METADATA_UINT32(mbuf1, f->signature_offset);\
+ bucket_index = signature & (f->n_buckets - 1); \
+ bucket1 = (struct rte_bucket_4_32 *) \
+ &f->memory[bucket_index * f->bucket_size]; \
+ rte_prefetch0(bucket1); \
+ rte_prefetch0((void *)(((uintptr_t) bucket1) + RTE_CACHE_LINE_SIZE));\
+ rte_prefetch0((void *)(((uintptr_t) bucket1) + 2 * RTE_CACHE_LINE_SIZE));\
+}
+
+#define lookup1_stage2_lru(pkt2_index, mbuf2, bucket2, \
+ pkts_mask_out, entries, f) \
+{ \
+ void *a; \
+ uint64_t pkt_mask; \
+ uint64_t *key; \
+ uint32_t pos; \
+ \
+ key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
+ \
+ lookup_key32_cmp(key, bucket2, pos); \
+ \
+ pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
+ pkts_mask_out |= pkt_mask; \
+ \
+ a = (void *) &bucket2->data[pos * f->entry_size]; \
+ rte_prefetch0(a); \
+ entries[pkt2_index] = a; \
+ lru_update(bucket2, pos); \
+}
+
+#define lookup1_stage2_ext(pkt2_index, mbuf2, bucket2, pkts_mask_out,\
+ entries, buckets_mask, buckets, keys, f) \
+{ \
+ struct rte_bucket_4_32 *bucket_next; \
+ void *a; \
+ uint64_t pkt_mask, bucket_mask; \
+ uint64_t *key; \
+ uint32_t pos; \
+ \
+ key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
+ \
+ lookup_key32_cmp(key, bucket2, pos); \
+ \
+ pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
+ pkts_mask_out |= pkt_mask; \
+ \
+ a = (void *) &bucket2->data[pos * f->entry_size]; \
+ rte_prefetch0(a); \
+ entries[pkt2_index] = a; \
+ \
+ bucket_mask = (~pkt_mask) & (bucket2->next_valid << pkt2_index);\
+ buckets_mask |= bucket_mask; \
+ bucket_next = bucket2->next; \
+ buckets[pkt2_index] = bucket_next; \
+ keys[pkt2_index] = key; \
+}
+
+#define lookup_grinder(pkt_index, buckets, keys, pkts_mask_out, \
+ entries, buckets_mask, f) \
+{ \
+ struct rte_bucket_4_32 *bucket, *bucket_next; \
+ void *a; \
+ uint64_t pkt_mask, bucket_mask; \
+ uint64_t *key; \
+ uint32_t pos; \
+ \
+ bucket = buckets[pkt_index]; \
+ key = keys[pkt_index]; \
+ \
+ lookup_key32_cmp(key, bucket, pos); \
+ \
+ pkt_mask = (bucket->signature[pos] & 1LLU) << pkt_index;\
+ pkts_mask_out |= pkt_mask; \
+ \
+ a = (void *) &bucket->data[pos * f->entry_size]; \
+ rte_prefetch0(a); \
+ entries[pkt_index] = a; \
+ \
+ bucket_mask = (~pkt_mask) & (bucket->next_valid << pkt_index);\
+ buckets_mask |= bucket_mask; \
+ bucket_next = bucket->next; \
+ rte_prefetch0(bucket_next); \
+ rte_prefetch0((void *)(((uintptr_t) bucket_next) + RTE_CACHE_LINE_SIZE));\
+ rte_prefetch0((void *)(((uintptr_t) bucket_next) + \
+ 2 * RTE_CACHE_LINE_SIZE)); \
+ buckets[pkt_index] = bucket_next; \
+ keys[pkt_index] = key; \
+}
+
+#define lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01,\
+ pkts, pkts_mask) \
+{ \
+ uint64_t pkt00_mask, pkt01_mask; \
+ \
+ pkt00_index = __builtin_ctzll(pkts_mask); \
+ pkt00_mask = 1LLU << pkt00_index; \
+ pkts_mask &= ~pkt00_mask; \
+ \
+ mbuf00 = pkts[pkt00_index]; \
+ rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, 0)); \
+ \
+ pkt01_index = __builtin_ctzll(pkts_mask); \
+ pkt01_mask = 1LLU << pkt01_index; \
+ pkts_mask &= ~pkt01_mask; \
+ \
+ mbuf01 = pkts[pkt01_index]; \
+ rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, 0)); \
+}
+
+#define lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,\
+ mbuf00, mbuf01, pkts, pkts_mask) \
+{ \
+ uint64_t pkt00_mask, pkt01_mask; \
+ \
+ pkt00_index = __builtin_ctzll(pkts_mask); \
+ pkt00_mask = 1LLU << pkt00_index; \
+ pkts_mask &= ~pkt00_mask; \
+ \
+ mbuf00 = pkts[pkt00_index]; \
+ rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, 0)); \
+ \
+ pkt01_index = __builtin_ctzll(pkts_mask); \
+ if (pkts_mask == 0) \
+ pkt01_index = pkt00_index; \
+ \
+ pkt01_mask = 1LLU << pkt01_index; \
+ pkts_mask &= ~pkt01_mask; \
+ \
+ mbuf01 = pkts[pkt01_index]; \
+ rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, 0)); \
+}
+
+#define lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f) \
+{ \
+ uint64_t signature10, signature11; \
+ uint32_t bucket10_index, bucket11_index; \
+ \
+ signature10 = RTE_MBUF_METADATA_UINT32(mbuf10, f->signature_offset);\
+ bucket10_index = signature10 & (f->n_buckets - 1); \
+ bucket10 = (struct rte_bucket_4_32 *) \
+ &f->memory[bucket10_index * f->bucket_size]; \
+ rte_prefetch0(bucket10); \
+ rte_prefetch0((void *)(((uintptr_t) bucket10) + RTE_CACHE_LINE_SIZE));\
+ rte_prefetch0((void *)(((uintptr_t) bucket10) + 2 * RTE_CACHE_LINE_SIZE));\
+ \
+ signature11 = RTE_MBUF_METADATA_UINT32(mbuf11, f->signature_offset);\
+ bucket11_index = signature11 & (f->n_buckets - 1); \
+ bucket11 = (struct rte_bucket_4_32 *) \
+ &f->memory[bucket11_index * f->bucket_size]; \
+ rte_prefetch0(bucket11); \
+ rte_prefetch0((void *)(((uintptr_t) bucket11) + RTE_CACHE_LINE_SIZE));\
+ rte_prefetch0((void *)(((uintptr_t) bucket11) + 2 * RTE_CACHE_LINE_SIZE));\
+}
+
+#define lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,\
+ bucket20, bucket21, pkts_mask_out, entries, f) \
+{ \
+ void *a20, *a21; \
+ uint64_t pkt20_mask, pkt21_mask; \
+ uint64_t *key20, *key21; \
+ uint32_t pos20, pos21; \
+ \
+ key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
+ key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
+ \
+ lookup_key32_cmp(key20, bucket20, pos20); \
+ lookup_key32_cmp(key21, bucket21, pos21); \
+ \
+ pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
+ pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
+ pkts_mask_out |= pkt20_mask | pkt21_mask; \
+ \
+ a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
+ a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
+ rte_prefetch0(a20); \
+ rte_prefetch0(a21); \
+ entries[pkt20_index] = a20; \
+ entries[pkt21_index] = a21; \
+ lru_update(bucket20, pos20); \
+ lru_update(bucket21, pos21); \
+}
+
+#define lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21, bucket20, \
+ bucket21, pkts_mask_out, entries, buckets_mask, buckets, keys, f)\
+{ \
+ struct rte_bucket_4_32 *bucket20_next, *bucket21_next; \
+ void *a20, *a21; \
+ uint64_t pkt20_mask, pkt21_mask, bucket20_mask, bucket21_mask;\
+ uint64_t *key20, *key21; \
+ uint32_t pos20, pos21; \
+ \
+ key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
+ key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
+ \
+ lookup_key32_cmp(key20, bucket20, pos20); \
+ lookup_key32_cmp(key21, bucket21, pos21); \
+ \
+ pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
+ pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
+ pkts_mask_out |= pkt20_mask | pkt21_mask; \
+ \
+ a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
+ a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
+ rte_prefetch0(a20); \
+ rte_prefetch0(a21); \
+ entries[pkt20_index] = a20; \
+ entries[pkt21_index] = a21; \
+ \
+ bucket20_mask = (~pkt20_mask) & (bucket20->next_valid << pkt20_index);\
+ bucket21_mask = (~pkt21_mask) & (bucket21->next_valid << pkt21_index);\
+ buckets_mask |= bucket20_mask | bucket21_mask; \
+ bucket20_next = bucket20->next; \
+ bucket21_next = bucket21->next; \
+ buckets[pkt20_index] = bucket20_next; \
+ buckets[pkt21_index] = bucket21_next; \
+ keys[pkt20_index] = key20; \
+ keys[pkt21_index] = key21; \
+}
+
+static int
+rte_table_hash_lookup_key32_lru(
+ void *table,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ uint64_t *lookup_hit_mask,
+ void **entries)
+{
+ struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_bucket_4_32 *bucket10, *bucket11, *bucket20, *bucket21;
+ struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
+ uint32_t pkt00_index, pkt01_index, pkt10_index;
+ uint32_t pkt11_index, pkt20_index, pkt21_index;
+ uint64_t pkts_mask_out = 0;
+
+ /* Cannot run the pipeline with less than 5 packets */
+ if (__builtin_popcountll(pkts_mask) < 5) {
+ for ( ; pkts_mask; ) {
+ struct rte_bucket_4_32 *bucket;
+ struct rte_mbuf *mbuf;
+ uint32_t pkt_index;
+
+ lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask);
+ lookup1_stage1(mbuf, bucket, f);
+ lookup1_stage2_lru(pkt_index, mbuf, bucket,
+ pkts_mask_out, entries, f);
+ }
+
+ *lookup_hit_mask = pkts_mask_out;
+ return 0;
+ }
+
+ /*
+ * Pipeline fill
+ *
+ */
+ /* Pipeline stage 0 */
+ lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
+ pkts_mask);
+
+ /* Pipeline feed */
+ mbuf10 = mbuf00;
+ mbuf11 = mbuf01;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 0 */
+ lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
+ pkts_mask);
+
+ /* Pipeline stage 1 */
+ lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
+
+ /*
+ * Pipeline run
+ *
+ */
+ for ( ; pkts_mask; ) {
+ /* Pipeline feed */
+ bucket20 = bucket10;
+ bucket21 = bucket11;
+ mbuf20 = mbuf10;
+ mbuf21 = mbuf11;
+ mbuf10 = mbuf00;
+ mbuf11 = mbuf01;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 0 */
+ lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
+ mbuf00, mbuf01, pkts, pkts_mask);
+
+ /* Pipeline stage 1 */
+ lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
+
+ /* Pipeline stage 2 */
+ lookup2_stage2_lru(pkt20_index, pkt21_index,
+ mbuf20, mbuf21, bucket20, bucket21, pkts_mask_out,
+ entries, f);
+ }
+
+ /*
+ * Pipeline flush
+ *
+ */
+ /* Pipeline feed */
+ bucket20 = bucket10;
+ bucket21 = bucket11;
+ mbuf20 = mbuf10;
+ mbuf21 = mbuf11;
+ mbuf10 = mbuf00;
+ mbuf11 = mbuf01;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 1 */
+ lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
+
+ /* Pipeline stage 2 */
+ lookup2_stage2_lru(pkt20_index, pkt21_index,
+ mbuf20, mbuf21, bucket20, bucket21, pkts_mask_out, entries, f);
+
+ /* Pipeline feed */
+ bucket20 = bucket10;
+ bucket21 = bucket11;
+ mbuf20 = mbuf10;
+ mbuf21 = mbuf11;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+
+ /* Pipeline stage 2 */
+ lookup2_stage2_lru(pkt20_index, pkt21_index,
+ mbuf20, mbuf21, bucket20, bucket21, pkts_mask_out, entries, f);
+
+ *lookup_hit_mask = pkts_mask_out;
+ return 0;
+} /* rte_table_hash_lookup_key32_lru() */
+
+static int
+rte_table_hash_lookup_key32_ext(
+ void *table,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ uint64_t *lookup_hit_mask,
+ void **entries)
+{
+ struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_bucket_4_32 *bucket10, *bucket11, *bucket20, *bucket21;
+ struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
+ uint32_t pkt00_index, pkt01_index, pkt10_index;
+ uint32_t pkt11_index, pkt20_index, pkt21_index;
+ uint64_t pkts_mask_out = 0, buckets_mask = 0;
+ struct rte_bucket_4_32 *buckets[RTE_PORT_IN_BURST_SIZE_MAX];
+ uint64_t *keys[RTE_PORT_IN_BURST_SIZE_MAX];
+
+ /* Cannot run the pipeline with less than 5 packets */
+ if (__builtin_popcountll(pkts_mask) < 5) {
+ for ( ; pkts_mask; ) {
+ struct rte_bucket_4_32 *bucket;
+ struct rte_mbuf *mbuf;
+ uint32_t pkt_index;
+
+ lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask);
+ lookup1_stage1(mbuf, bucket, f);
+ lookup1_stage2_ext(pkt_index, mbuf, bucket,
+ pkts_mask_out, entries, buckets_mask, buckets,
+ keys, f);
+ }
+
+ goto grind_next_buckets;
+ }
+
+ /*
+ * Pipeline fill
+ *
+ */
+ /* Pipeline stage 0 */
+ lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
+ pkts_mask);
+
+ /* Pipeline feed */
+ mbuf10 = mbuf00;
+ mbuf11 = mbuf01;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 0 */
+ lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
+ pkts_mask);
+
+ /* Pipeline stage 1 */
+ lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
+
+ /*
+ * Pipeline run
+ *
+ */
+ for ( ; pkts_mask; ) {
+ /* Pipeline feed */
+ bucket20 = bucket10;
+ bucket21 = bucket11;
+ mbuf20 = mbuf10;
+ mbuf21 = mbuf11;
+ mbuf10 = mbuf00;
+ mbuf11 = mbuf01;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 0 */
+ lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
+ mbuf00, mbuf01, pkts, pkts_mask);
+
+ /* Pipeline stage 1 */
+ lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
+
+ /* Pipeline stage 2 */
+ lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
+ bucket20, bucket21, pkts_mask_out, entries,
+ buckets_mask, buckets, keys, f);
+ }
+
+ /*
+ * Pipeline flush
+ *
+ */
+ /* Pipeline feed */
+ bucket20 = bucket10;
+ bucket21 = bucket11;
+ mbuf20 = mbuf10;
+ mbuf21 = mbuf11;
+ mbuf10 = mbuf00;
+ mbuf11 = mbuf01;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 1 */
+ lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
+
+ /* Pipeline stage 2 */
+ lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
+ bucket20, bucket21, pkts_mask_out, entries,
+ buckets_mask, buckets, keys, f);
+
+ /* Pipeline feed */
+ bucket20 = bucket10;
+ bucket21 = bucket11;
+ mbuf20 = mbuf10;
+ mbuf21 = mbuf11;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+
+ /* Pipeline stage 2 */
+ lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
+ bucket20, bucket21, pkts_mask_out, entries,
+ buckets_mask, buckets, keys, f);
+
+grind_next_buckets:
+ /* Grind next buckets */
+ for ( ; buckets_mask; ) {
+ uint64_t buckets_mask_next = 0;
+
+ for ( ; buckets_mask; ) {
+ uint64_t pkt_mask;
+ uint32_t pkt_index;
+
+ pkt_index = __builtin_ctzll(buckets_mask);
+ pkt_mask = 1LLU << pkt_index;
+ buckets_mask &= ~pkt_mask;
+
+ lookup_grinder(pkt_index, buckets, keys, pkts_mask_out,
+ entries, buckets_mask_next, f);
+ }
+
+ buckets_mask = buckets_mask_next;
+ }
+
+ *lookup_hit_mask = pkts_mask_out;
+ return 0;
+} /* rte_table_hash_lookup_key32_ext() */
+
+struct rte_table_ops rte_table_hash_key32_lru_ops = {
+ .f_create = rte_table_hash_create_key32_lru,
+ .f_free = rte_table_hash_free_key32_lru,
+ .f_add = rte_table_hash_entry_add_key32_lru,
+ .f_delete = rte_table_hash_entry_delete_key32_lru,
+ .f_lookup = rte_table_hash_lookup_key32_lru,
+};
+
+struct rte_table_ops rte_table_hash_key32_ext_ops = {
+ .f_create = rte_table_hash_create_key32_ext,
+ .f_free = rte_table_hash_free_key32_ext,
+ .f_add = rte_table_hash_entry_add_key32_ext,
+ .f_delete = rte_table_hash_entry_delete_key32_ext,
+ .f_lookup = rte_table_hash_lookup_key32_ext,
+};
diff --git a/src/dpdk_lib18/librte_table/rte_table_hash_key8.c b/src/dpdk_lib18/librte_table/rte_table_hash_key8.c
new file mode 100755
index 00000000..443ca7da
--- /dev/null
+++ b/src/dpdk_lib18/librte_table/rte_table_hash_key8.c
@@ -0,0 +1,1399 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <string.h>
+#include <stdio.h>
+
+#include <rte_common.h>
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_log.h>
+
+#include "rte_table_hash.h"
+#include "rte_lru.h"
+
+#define RTE_TABLE_HASH_KEY_SIZE 8
+
+struct rte_bucket_4_8 {
+ /* Cache line 0 */
+ uint64_t signature;
+ uint64_t lru_list;
+ struct rte_bucket_4_8 *next;
+ uint64_t next_valid;
+
+ uint64_t key[4];
+
+ /* Cache line 1 */
+ uint8_t data[0];
+};
+
+struct rte_table_hash {
+ /* Input parameters */
+ uint32_t n_buckets;
+ uint32_t n_entries_per_bucket;
+ uint32_t key_size;
+ uint32_t entry_size;
+ uint32_t bucket_size;
+ uint32_t signature_offset;
+ uint32_t key_offset;
+ rte_table_hash_op_hash f_hash;
+ uint64_t seed;
+
+ /* Extendible buckets */
+ uint32_t n_buckets_ext;
+ uint32_t stack_pos;
+ uint32_t *stack;
+
+ /* Lookup table */
+ uint8_t memory[0] __rte_cache_aligned;
+};
+
+static int
+check_params_create_lru(struct rte_table_hash_key8_lru_params *params) {
+ /* n_entries */
+ if (params->n_entries == 0) {
+ RTE_LOG(ERR, TABLE, "%s: n_entries is zero\n", __func__);
+ return -EINVAL;
+ }
+
+ /* signature offset */
+ if ((params->signature_offset & 0x3) != 0) {
+ RTE_LOG(ERR, TABLE, "%s: invalid signature_offset\n", __func__);
+ return -EINVAL;
+ }
+
+ /* key offset */
+ if ((params->key_offset & 0x7) != 0) {
+ RTE_LOG(ERR, TABLE, "%s: invalid key_offset\n", __func__);
+ return -EINVAL;
+ }
+
+ /* f_hash */
+ if (params->f_hash == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: f_hash function pointer is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void *
+rte_table_hash_create_key8_lru(void *params, int socket_id, uint32_t entry_size)
+{
+ struct rte_table_hash_key8_lru_params *p =
+ (struct rte_table_hash_key8_lru_params *) params;
+ struct rte_table_hash *f;
+ uint32_t n_buckets, n_entries_per_bucket, key_size, bucket_size_cl;
+ uint32_t total_size, i;
+
+ /* Check input parameters */
+ if ((check_params_create_lru(p) != 0) ||
+ ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
+ ((sizeof(struct rte_bucket_4_8) % RTE_CACHE_LINE_SIZE) != 0)) {
+ return NULL;
+ }
+ n_entries_per_bucket = 4;
+ key_size = 8;
+
+ /* Memory allocation */
+ n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
+ n_entries_per_bucket);
+ bucket_size_cl = (sizeof(struct rte_bucket_4_8) + n_entries_per_bucket *
+ entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
+ total_size = sizeof(struct rte_table_hash) + n_buckets *
+ bucket_size_cl * RTE_CACHE_LINE_SIZE;
+
+ f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
+ if (f == NULL) {
+ RTE_LOG(ERR, TABLE,
+ "%s: Cannot allocate %u bytes for hash table\n",
+ __func__, total_size);
+ return NULL;
+ }
+ RTE_LOG(INFO, TABLE,
+ "%s: Hash table memory footprint is %u bytes\n",
+ __func__, total_size);
+
+ /* Memory initialization */
+ f->n_buckets = n_buckets;
+ f->n_entries_per_bucket = n_entries_per_bucket;
+ f->key_size = key_size;
+ f->entry_size = entry_size;
+ f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
+ f->signature_offset = p->signature_offset;
+ f->key_offset = p->key_offset;
+ f->f_hash = p->f_hash;
+ f->seed = p->seed;
+
+ for (i = 0; i < n_buckets; i++) {
+ struct rte_bucket_4_8 *bucket;
+
+ bucket = (struct rte_bucket_4_8 *) &f->memory[i *
+ f->bucket_size];
+ bucket->lru_list = 0x0000000100020003LLU;
+ }
+
+ return f;
+}
+
+static int
+rte_table_hash_free_key8_lru(void *table)
+{
+ struct rte_table_hash *f = (struct rte_table_hash *) table;
+
+ /* Check input parameters */
+ if (f == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ rte_free(f);
+ return 0;
+}
+
+static int
+rte_table_hash_entry_add_key8_lru(
+ void *table,
+ void *key,
+ void *entry,
+ int *key_found,
+ void **entry_ptr)
+{
+ struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_bucket_4_8 *bucket;
+ uint64_t signature, mask, pos;
+ uint32_t bucket_index, i;
+
+ signature = f->f_hash(key, f->key_size, f->seed);
+ bucket_index = signature & (f->n_buckets - 1);
+ bucket = (struct rte_bucket_4_8 *)
+ &f->memory[bucket_index * f->bucket_size];
+
+ /* Key is present in the bucket */
+ for (i = 0, mask = 1LLU; i < 4; i++, mask <<= 1) {
+ uint64_t bucket_signature = bucket->signature;
+ uint64_t bucket_key = bucket->key[i];
+
+ if ((bucket_signature & mask) &&
+ (*((uint64_t *) key) == bucket_key)) {
+ uint8_t *bucket_data = &bucket->data[i * f->entry_size];
+
+ memcpy(bucket_data, entry, f->entry_size);
+ lru_update(bucket, i);
+ *key_found = 1;
+ *entry_ptr = (void *) bucket_data;
+ return 0;
+ }
+ }
+
+ /* Key is not present in the bucket */
+ for (i = 0, mask = 1LLU; i < 4; i++, mask <<= 1) {
+ uint64_t bucket_signature = bucket->signature;
+
+ if ((bucket_signature & mask) == 0) {
+ uint8_t *bucket_data = &bucket->data[i * f->entry_size];
+
+ bucket->signature |= mask;
+ bucket->key[i] = *((uint64_t *) key);
+ memcpy(bucket_data, entry, f->entry_size);
+ lru_update(bucket, i);
+ *key_found = 0;
+ *entry_ptr = (void *) bucket_data;
+
+ return 0;
+ }
+ }
+
+ /* Bucket full: replace LRU entry */
+ pos = lru_pos(bucket);
+ bucket->key[pos] = *((uint64_t *) key);
+ memcpy(&bucket->data[pos * f->entry_size], entry, f->entry_size);
+ lru_update(bucket, pos);
+ *key_found = 0;
+ *entry_ptr = (void *) &bucket->data[pos * f->entry_size];
+
+ return 0;
+}
+
+static int
+rte_table_hash_entry_delete_key8_lru(
+ void *table,
+ void *key,
+ int *key_found,
+ void *entry)
+{
+ struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_bucket_4_8 *bucket;
+ uint64_t signature, mask;
+ uint32_t bucket_index, i;
+
+ signature = f->f_hash(key, f->key_size, f->seed);
+ bucket_index = signature & (f->n_buckets - 1);
+ bucket = (struct rte_bucket_4_8 *)
+ &f->memory[bucket_index * f->bucket_size];
+
+ /* Key is present in the bucket */
+ for (i = 0, mask = 1LLU; i < 4; i++, mask <<= 1) {
+ uint64_t bucket_signature = bucket->signature;
+ uint64_t bucket_key = bucket->key[i];
+
+ if ((bucket_signature & mask) &&
+ (*((uint64_t *) key) == bucket_key)) {
+ uint8_t *bucket_data = &bucket->data[i * f->entry_size];
+
+ bucket->signature &= ~mask;
+ *key_found = 1;
+ if (entry)
+ memcpy(entry, bucket_data, f->entry_size);
+
+ return 0;
+ }
+ }
+
+ /* Key is not present in the bucket */
+ *key_found = 0;
+ return 0;
+}
+
+static int
+check_params_create_ext(struct rte_table_hash_key8_ext_params *params) {
+ /* n_entries */
+ if (params->n_entries == 0) {
+ RTE_LOG(ERR, TABLE, "%s: n_entries is zero\n", __func__);
+ return -EINVAL;
+ }
+
+ /* n_entries_ext */
+ if (params->n_entries_ext == 0) {
+ RTE_LOG(ERR, TABLE, "%s: n_entries_ext is zero\n", __func__);
+ return -EINVAL;
+ }
+
+ /* signature offset */
+ if ((params->signature_offset & 0x3) != 0) {
+ RTE_LOG(ERR, TABLE, "%s: invalid signature_offset\n", __func__);
+ return -EINVAL;
+ }
+
+ /* key offset */
+ if ((params->key_offset & 0x7) != 0) {
+ RTE_LOG(ERR, TABLE, "%s: invalid key_offset\n", __func__);
+ return -EINVAL;
+ }
+
+ /* f_hash */
+ if (params->f_hash == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: f_hash function pointer is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void *
+rte_table_hash_create_key8_ext(void *params, int socket_id, uint32_t entry_size)
+{
+ struct rte_table_hash_key8_ext_params *p =
+ (struct rte_table_hash_key8_ext_params *) params;
+ struct rte_table_hash *f;
+ uint32_t n_buckets, n_buckets_ext, n_entries_per_bucket, key_size;
+ uint32_t bucket_size_cl, stack_size_cl, total_size, i;
+
+ /* Check input parameters */
+ if ((check_params_create_ext(p) != 0) ||
+ ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
+ ((sizeof(struct rte_bucket_4_8) % RTE_CACHE_LINE_SIZE) != 0))
+ return NULL;
+
+ n_entries_per_bucket = 4;
+ key_size = 8;
+
+ /* Memory allocation */
+ n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
+ n_entries_per_bucket);
+ n_buckets_ext = (p->n_entries_ext + n_entries_per_bucket - 1) /
+ n_entries_per_bucket;
+ bucket_size_cl = (sizeof(struct rte_bucket_4_8) + n_entries_per_bucket *
+ entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
+ stack_size_cl = (n_buckets_ext * sizeof(uint32_t) + RTE_CACHE_LINE_SIZE - 1)
+ / RTE_CACHE_LINE_SIZE;
+ total_size = sizeof(struct rte_table_hash) + ((n_buckets +
+ n_buckets_ext) * bucket_size_cl + stack_size_cl) *
+ RTE_CACHE_LINE_SIZE;
+
+ f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
+ if (f == NULL) {
+ RTE_LOG(ERR, TABLE,
+ "%s: Cannot allocate %u bytes for hash table\n",
+ __func__, total_size);
+ return NULL;
+ }
+ RTE_LOG(INFO, TABLE,
+ "%s: Hash table memory footprint is %u bytes\n",
+ __func__, total_size);
+
+ /* Memory initialization */
+ f->n_buckets = n_buckets;
+ f->n_entries_per_bucket = n_entries_per_bucket;
+ f->key_size = key_size;
+ f->entry_size = entry_size;
+ f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
+ f->signature_offset = p->signature_offset;
+ f->key_offset = p->key_offset;
+ f->f_hash = p->f_hash;
+ f->seed = p->seed;
+
+ f->n_buckets_ext = n_buckets_ext;
+ f->stack_pos = n_buckets_ext;
+ f->stack = (uint32_t *)
+ &f->memory[(n_buckets + n_buckets_ext) * f->bucket_size];
+
+ for (i = 0; i < n_buckets_ext; i++)
+ f->stack[i] = i;
+
+ return f;
+}
+
+static int
+rte_table_hash_free_key8_ext(void *table)
+{
+ struct rte_table_hash *f = (struct rte_table_hash *) table;
+
+ /* Check input parameters */
+ if (f == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ rte_free(f);
+ return 0;
+}
+
+static int
+rte_table_hash_entry_add_key8_ext(
+ void *table,
+ void *key,
+ void *entry,
+ int *key_found,
+ void **entry_ptr)
+{
+ struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_bucket_4_8 *bucket0, *bucket, *bucket_prev;
+ uint64_t signature;
+ uint32_t bucket_index, i;
+
+ signature = f->f_hash(key, f->key_size, f->seed);
+ bucket_index = signature & (f->n_buckets - 1);
+ bucket0 = (struct rte_bucket_4_8 *)
+ &f->memory[bucket_index * f->bucket_size];
+
+ /* Key is present in the bucket */
+ for (bucket = bucket0; bucket != NULL; bucket = bucket->next) {
+ uint64_t mask;
+
+ for (i = 0, mask = 1LLU; i < 4; i++, mask <<= 1) {
+ uint64_t bucket_signature = bucket->signature;
+ uint64_t bucket_key = bucket->key[i];
+
+ if ((bucket_signature & mask) &&
+ (*((uint64_t *) key) == bucket_key)) {
+ uint8_t *bucket_data = &bucket->data[i *
+ f->entry_size];
+
+ memcpy(bucket_data, entry, f->entry_size);
+ *key_found = 1;
+ *entry_ptr = (void *) bucket_data;
+ return 0;
+ }
+ }
+ }
+
+ /* Key is not present in the bucket */
+ for (bucket_prev = NULL, bucket = bucket0;
+ bucket != NULL; bucket_prev = bucket, bucket = bucket->next) {
+ uint64_t mask;
+
+ for (i = 0, mask = 1LLU; i < 4; i++, mask <<= 1) {
+ uint64_t bucket_signature = bucket->signature;
+
+ if ((bucket_signature & mask) == 0) {
+ uint8_t *bucket_data = &bucket->data[i *
+ f->entry_size];
+
+ bucket->signature |= mask;
+ bucket->key[i] = *((uint64_t *) key);
+ memcpy(bucket_data, entry, f->entry_size);
+ *key_found = 0;
+ *entry_ptr = (void *) bucket_data;
+
+ return 0;
+ }
+ }
+ }
+
+ /* Bucket full: extend bucket */
+ if (f->stack_pos > 0) {
+ bucket_index = f->stack[--f->stack_pos];
+
+ bucket = (struct rte_bucket_4_8 *) &f->memory[(f->n_buckets +
+ bucket_index) * f->bucket_size];
+ bucket_prev->next = bucket;
+ bucket_prev->next_valid = 1;
+
+ bucket->signature = 1;
+ bucket->key[0] = *((uint64_t *) key);
+ memcpy(&bucket->data[0], entry, f->entry_size);
+ *key_found = 0;
+ *entry_ptr = (void *) &bucket->data[0];
+ return 0;
+ }
+
+ return -ENOSPC;
+}
+
+static int
+rte_table_hash_entry_delete_key8_ext(
+ void *table,
+ void *key,
+ int *key_found,
+ void *entry)
+{
+ struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_bucket_4_8 *bucket0, *bucket, *bucket_prev;
+ uint64_t signature;
+ uint32_t bucket_index, i;
+
+ signature = f->f_hash(key, f->key_size, f->seed);
+ bucket_index = signature & (f->n_buckets - 1);
+ bucket0 = (struct rte_bucket_4_8 *)
+ &f->memory[bucket_index * f->bucket_size];
+
+ /* Key is present in the bucket */
+ for (bucket_prev = NULL, bucket = bucket0; bucket != NULL;
+ bucket_prev = bucket, bucket = bucket->next) {
+ uint64_t mask;
+
+ for (i = 0, mask = 1LLU; i < 4; i++, mask <<= 1) {
+ uint64_t bucket_signature = bucket->signature;
+ uint64_t bucket_key = bucket->key[i];
+
+ if ((bucket_signature & mask) &&
+ (*((uint64_t *) key) == bucket_key)) {
+ uint8_t *bucket_data = &bucket->data[i *
+ f->entry_size];
+
+ bucket->signature &= ~mask;
+ *key_found = 1;
+ if (entry)
+ memcpy(entry, bucket_data,
+ f->entry_size);
+
+ if ((bucket->signature == 0) &&
+ (bucket_prev != NULL)) {
+ bucket_prev->next = bucket->next;
+ bucket_prev->next_valid =
+ bucket->next_valid;
+
+ memset(bucket, 0,
+ sizeof(struct rte_bucket_4_8));
+ bucket_index = (bucket -
+ ((struct rte_bucket_4_8 *)
+ f->memory)) - f->n_buckets;
+ f->stack[f->stack_pos++] = bucket_index;
+ }
+
+ return 0;
+ }
+ }
+ }
+
+ /* Key is not present in the bucket */
+ *key_found = 0;
+ return 0;
+}
+
+#define lookup_key8_cmp(key_in, bucket, pos) \
+{ \
+ uint64_t xor[4], signature; \
+ \
+ signature = ~bucket->signature; \
+ \
+ xor[0] = (key_in[0] ^ bucket->key[0]) | (signature & 1);\
+ xor[1] = (key_in[0] ^ bucket->key[1]) | (signature & 2);\
+ xor[2] = (key_in[0] ^ bucket->key[2]) | (signature & 4);\
+ xor[3] = (key_in[0] ^ bucket->key[3]) | (signature & 8);\
+ \
+ pos = 4; \
+ if (xor[0] == 0) \
+ pos = 0; \
+ if (xor[1] == 0) \
+ pos = 1; \
+ if (xor[2] == 0) \
+ pos = 2; \
+ if (xor[3] == 0) \
+ pos = 3; \
+}
+
+#define lookup1_stage0(pkt0_index, mbuf0, pkts, pkts_mask) \
+{ \
+ uint64_t pkt_mask; \
+ \
+ pkt0_index = __builtin_ctzll(pkts_mask); \
+ pkt_mask = 1LLU << pkt0_index; \
+ pkts_mask &= ~pkt_mask; \
+ \
+ mbuf0 = pkts[pkt0_index]; \
+ rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf0, 0)); \
+}
+
+#define lookup1_stage1(mbuf1, bucket1, f) \
+{ \
+ uint64_t signature; \
+ uint32_t bucket_index; \
+ \
+ signature = RTE_MBUF_METADATA_UINT32(mbuf1, f->signature_offset);\
+ bucket_index = signature & (f->n_buckets - 1); \
+ bucket1 = (struct rte_bucket_4_8 *) \
+ &f->memory[bucket_index * f->bucket_size]; \
+ rte_prefetch0(bucket1); \
+}
+
+#define lookup1_stage1_dosig(mbuf1, bucket1, f) \
+{ \
+ uint64_t *key; \
+ uint64_t signature; \
+ uint32_t bucket_index; \
+ \
+ key = RTE_MBUF_METADATA_UINT64_PTR(mbuf1, f->key_offset);\
+ signature = f->f_hash(key, RTE_TABLE_HASH_KEY_SIZE, f->seed);\
+ bucket_index = signature & (f->n_buckets - 1); \
+ bucket1 = (struct rte_bucket_4_8 *) \
+ &f->memory[bucket_index * f->bucket_size]; \
+ rte_prefetch0(bucket1); \
+}
+
+#define lookup1_stage2_lru(pkt2_index, mbuf2, bucket2, \
+ pkts_mask_out, entries, f) \
+{ \
+ void *a; \
+ uint64_t pkt_mask; \
+ uint64_t *key; \
+ uint32_t pos; \
+ \
+ key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
+ \
+ lookup_key8_cmp(key, bucket2, pos); \
+ \
+ pkt_mask = ((bucket2->signature >> pos) & 1LLU) << pkt2_index;\
+ pkts_mask_out |= pkt_mask; \
+ \
+ a = (void *) &bucket2->data[pos * f->entry_size]; \
+ rte_prefetch0(a); \
+ entries[pkt2_index] = a; \
+ lru_update(bucket2, pos); \
+}
+
+#define lookup1_stage2_ext(pkt2_index, mbuf2, bucket2, pkts_mask_out,\
+ entries, buckets_mask, buckets, keys, f) \
+{ \
+ struct rte_bucket_4_8 *bucket_next; \
+ void *a; \
+ uint64_t pkt_mask, bucket_mask; \
+ uint64_t *key; \
+ uint32_t pos; \
+ \
+ key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
+ \
+ lookup_key8_cmp(key, bucket2, pos); \
+ \
+ pkt_mask = ((bucket2->signature >> pos) & 1LLU) << pkt2_index;\
+ pkts_mask_out |= pkt_mask; \
+ \
+ a = (void *) &bucket2->data[pos * f->entry_size]; \
+ rte_prefetch0(a); \
+ entries[pkt2_index] = a; \
+ \
+ bucket_mask = (~pkt_mask) & (bucket2->next_valid << pkt2_index);\
+ buckets_mask |= bucket_mask; \
+ bucket_next = bucket2->next; \
+ buckets[pkt2_index] = bucket_next; \
+ keys[pkt2_index] = key; \
+}
+
+#define lookup_grinder(pkt_index, buckets, keys, pkts_mask_out, entries,\
+ buckets_mask, f) \
+{ \
+ struct rte_bucket_4_8 *bucket, *bucket_next; \
+ void *a; \
+ uint64_t pkt_mask, bucket_mask; \
+ uint64_t *key; \
+ uint32_t pos; \
+ \
+ bucket = buckets[pkt_index]; \
+ key = keys[pkt_index]; \
+ \
+ lookup_key8_cmp(key, bucket, pos); \
+ \
+ pkt_mask = ((bucket->signature >> pos) & 1LLU) << pkt_index;\
+ pkts_mask_out |= pkt_mask; \
+ \
+ a = (void *) &bucket->data[pos * f->entry_size]; \
+ rte_prefetch0(a); \
+ entries[pkt_index] = a; \
+ \
+ bucket_mask = (~pkt_mask) & (bucket->next_valid << pkt_index);\
+ buckets_mask |= bucket_mask; \
+ bucket_next = bucket->next; \
+ rte_prefetch0(bucket_next); \
+ buckets[pkt_index] = bucket_next; \
+ keys[pkt_index] = key; \
+}
+
+#define lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01,\
+ pkts, pkts_mask) \
+{ \
+ uint64_t pkt00_mask, pkt01_mask; \
+ \
+ pkt00_index = __builtin_ctzll(pkts_mask); \
+ pkt00_mask = 1LLU << pkt00_index; \
+ pkts_mask &= ~pkt00_mask; \
+ \
+ mbuf00 = pkts[pkt00_index]; \
+ rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, 0)); \
+ \
+ pkt01_index = __builtin_ctzll(pkts_mask); \
+ pkt01_mask = 1LLU << pkt01_index; \
+ pkts_mask &= ~pkt01_mask; \
+ \
+ mbuf01 = pkts[pkt01_index]; \
+ rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, 0)); \
+}
+
+#define lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,\
+ mbuf00, mbuf01, pkts, pkts_mask) \
+{ \
+ uint64_t pkt00_mask, pkt01_mask; \
+ \
+ pkt00_index = __builtin_ctzll(pkts_mask); \
+ pkt00_mask = 1LLU << pkt00_index; \
+ pkts_mask &= ~pkt00_mask; \
+ \
+ mbuf00 = pkts[pkt00_index]; \
+ rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, 0)); \
+ \
+ pkt01_index = __builtin_ctzll(pkts_mask); \
+ if (pkts_mask == 0) \
+ pkt01_index = pkt00_index; \
+ \
+ pkt01_mask = 1LLU << pkt01_index; \
+ pkts_mask &= ~pkt01_mask; \
+ \
+ mbuf01 = pkts[pkt01_index]; \
+ rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, 0)); \
+}
+
+#define lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f) \
+{ \
+ uint64_t signature10, signature11; \
+ uint32_t bucket10_index, bucket11_index; \
+ \
+ signature10 = RTE_MBUF_METADATA_UINT32(mbuf10, f->signature_offset);\
+ bucket10_index = signature10 & (f->n_buckets - 1); \
+ bucket10 = (struct rte_bucket_4_8 *) \
+ &f->memory[bucket10_index * f->bucket_size]; \
+ rte_prefetch0(bucket10); \
+ \
+ signature11 = RTE_MBUF_METADATA_UINT32(mbuf11, f->signature_offset);\
+ bucket11_index = signature11 & (f->n_buckets - 1); \
+ bucket11 = (struct rte_bucket_4_8 *) \
+ &f->memory[bucket11_index * f->bucket_size]; \
+ rte_prefetch0(bucket11); \
+}
+
+#define lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f)\
+{ \
+ uint64_t *key10, *key11; \
+ uint64_t signature10, signature11; \
+ uint32_t bucket10_index, bucket11_index; \
+ rte_table_hash_op_hash f_hash = f->f_hash; \
+ uint64_t seed = f->seed; \
+ uint32_t key_offset = f->key_offset; \
+ \
+ key10 = RTE_MBUF_METADATA_UINT64_PTR(mbuf10, key_offset);\
+ key11 = RTE_MBUF_METADATA_UINT64_PTR(mbuf11, key_offset);\
+ \
+ signature10 = f_hash(key10, RTE_TABLE_HASH_KEY_SIZE, seed);\
+ bucket10_index = signature10 & (f->n_buckets - 1); \
+ bucket10 = (struct rte_bucket_4_8 *) \
+ &f->memory[bucket10_index * f->bucket_size]; \
+ rte_prefetch0(bucket10); \
+ \
+ signature11 = f_hash(key11, RTE_TABLE_HASH_KEY_SIZE, seed);\
+ bucket11_index = signature11 & (f->n_buckets - 1); \
+ bucket11 = (struct rte_bucket_4_8 *) \
+ &f->memory[bucket11_index * f->bucket_size]; \
+ rte_prefetch0(bucket11); \
+}
+
+#define lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,\
+ bucket20, bucket21, pkts_mask_out, entries, f) \
+{ \
+ void *a20, *a21; \
+ uint64_t pkt20_mask, pkt21_mask; \
+ uint64_t *key20, *key21; \
+ uint32_t pos20, pos21; \
+ \
+ key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
+ key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
+ \
+ lookup_key8_cmp(key20, bucket20, pos20); \
+ lookup_key8_cmp(key21, bucket21, pos21); \
+ \
+ pkt20_mask = ((bucket20->signature >> pos20) & 1LLU) << pkt20_index;\
+ pkt21_mask = ((bucket21->signature >> pos21) & 1LLU) << pkt21_index;\
+ pkts_mask_out |= pkt20_mask | pkt21_mask; \
+ \
+ a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
+ a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
+ rte_prefetch0(a20); \
+ rte_prefetch0(a21); \
+ entries[pkt20_index] = a20; \
+ entries[pkt21_index] = a21; \
+ lru_update(bucket20, pos20); \
+ lru_update(bucket21, pos21); \
+}
+
+#define lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21, bucket20, \
+ bucket21, pkts_mask_out, entries, buckets_mask, buckets, keys, f)\
+{ \
+ struct rte_bucket_4_8 *bucket20_next, *bucket21_next; \
+ void *a20, *a21; \
+ uint64_t pkt20_mask, pkt21_mask, bucket20_mask, bucket21_mask;\
+ uint64_t *key20, *key21; \
+ uint32_t pos20, pos21; \
+ \
+ key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
+ key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
+ \
+ lookup_key8_cmp(key20, bucket20, pos20); \
+ lookup_key8_cmp(key21, bucket21, pos21); \
+ \
+ pkt20_mask = ((bucket20->signature >> pos20) & 1LLU) << pkt20_index;\
+ pkt21_mask = ((bucket21->signature >> pos21) & 1LLU) << pkt21_index;\
+ pkts_mask_out |= pkt20_mask | pkt21_mask; \
+ \
+ a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
+ a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
+ rte_prefetch0(a20); \
+ rte_prefetch0(a21); \
+ entries[pkt20_index] = a20; \
+ entries[pkt21_index] = a21; \
+ \
+ bucket20_mask = (~pkt20_mask) & (bucket20->next_valid << pkt20_index);\
+ bucket21_mask = (~pkt21_mask) & (bucket21->next_valid << pkt21_index);\
+ buckets_mask |= bucket20_mask | bucket21_mask; \
+ bucket20_next = bucket20->next; \
+ bucket21_next = bucket21->next; \
+ buckets[pkt20_index] = bucket20_next; \
+ buckets[pkt21_index] = bucket21_next; \
+ keys[pkt20_index] = key20; \
+ keys[pkt21_index] = key21; \
+}
+
+static int
+rte_table_hash_lookup_key8_lru(
+ void *table,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ uint64_t *lookup_hit_mask,
+ void **entries)
+{
+ struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_bucket_4_8 *bucket10, *bucket11, *bucket20, *bucket21;
+ struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
+ uint32_t pkt00_index, pkt01_index, pkt10_index,
+ pkt11_index, pkt20_index, pkt21_index;
+ uint64_t pkts_mask_out = 0;
+
+ /* Cannot run the pipeline with less than 5 packets */
+ if (__builtin_popcountll(pkts_mask) < 5) {
+ for ( ; pkts_mask; ) {
+ struct rte_bucket_4_8 *bucket;
+ struct rte_mbuf *mbuf;
+ uint32_t pkt_index;
+
+ lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask);
+ lookup1_stage1(mbuf, bucket, f);
+ lookup1_stage2_lru(pkt_index, mbuf, bucket,
+ pkts_mask_out, entries, f);
+ }
+
+ *lookup_hit_mask = pkts_mask_out;
+ return 0;
+ }
+
+ /*
+ * Pipeline fill
+ *
+ */
+ /* Pipeline stage 0 */
+ lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
+ pkts_mask);
+
+ /* Pipeline feed */
+ mbuf10 = mbuf00;
+ mbuf11 = mbuf01;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 0 */
+ lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
+ pkts_mask);
+
+ /* Pipeline stage 1 */
+ lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
+
+ /*
+ * Pipeline run
+ *
+ */
+ for ( ; pkts_mask; ) {
+ /* Pipeline feed */
+ bucket20 = bucket10;
+ bucket21 = bucket11;
+ mbuf20 = mbuf10;
+ mbuf21 = mbuf11;
+ mbuf10 = mbuf00;
+ mbuf11 = mbuf01;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 0 */
+ lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
+ mbuf00, mbuf01, pkts, pkts_mask);
+
+ /* Pipeline stage 1 */
+ lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
+
+ /* Pipeline stage 2 */
+ lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
+ bucket20, bucket21, pkts_mask_out, entries, f);
+ }
+
+ /*
+ * Pipeline flush
+ *
+ */
+ /* Pipeline feed */
+ bucket20 = bucket10;
+ bucket21 = bucket11;
+ mbuf20 = mbuf10;
+ mbuf21 = mbuf11;
+ mbuf10 = mbuf00;
+ mbuf11 = mbuf01;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 1 */
+ lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
+
+ /* Pipeline stage 2 */
+ lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
+ bucket20, bucket21, pkts_mask_out, entries, f);
+
+ /* Pipeline feed */
+ bucket20 = bucket10;
+ bucket21 = bucket11;
+ mbuf20 = mbuf10;
+ mbuf21 = mbuf11;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+
+ /* Pipeline stage 2 */
+ lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
+ bucket20, bucket21, pkts_mask_out, entries, f);
+
+ *lookup_hit_mask = pkts_mask_out;
+ return 0;
+} /* rte_table_hash_lookup_key8_lru() */
+
+static int
+rte_table_hash_lookup_key8_lru_dosig(
+ void *table,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ uint64_t *lookup_hit_mask,
+ void **entries)
+{
+ struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_bucket_4_8 *bucket10, *bucket11, *bucket20, *bucket21;
+ struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
+ uint32_t pkt00_index, pkt01_index, pkt10_index;
+ uint32_t pkt11_index, pkt20_index, pkt21_index;
+ uint64_t pkts_mask_out = 0;
+
+ /* Cannot run the pipeline with less than 5 packets */
+ if (__builtin_popcountll(pkts_mask) < 5) {
+ for ( ; pkts_mask; ) {
+ struct rte_bucket_4_8 *bucket;
+ struct rte_mbuf *mbuf;
+ uint32_t pkt_index;
+
+ lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask);
+ lookup1_stage1_dosig(mbuf, bucket, f);
+ lookup1_stage2_lru(pkt_index, mbuf, bucket,
+ pkts_mask_out, entries, f);
+ }
+
+ *lookup_hit_mask = pkts_mask_out;
+ return 0;
+ }
+
+ /*
+ * Pipeline fill
+ *
+ */
+ /* Pipeline stage 0 */
+ lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
+ pkts_mask);
+
+ /* Pipeline feed */
+ mbuf10 = mbuf00;
+ mbuf11 = mbuf01;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 0 */
+ lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
+ pkts_mask);
+
+ /* Pipeline stage 1 */
+ lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
+
+ /*
+ * Pipeline run
+ *
+ */
+ for ( ; pkts_mask; ) {
+ /* Pipeline feed */
+ bucket20 = bucket10;
+ bucket21 = bucket11;
+ mbuf20 = mbuf10;
+ mbuf21 = mbuf11;
+ mbuf10 = mbuf00;
+ mbuf11 = mbuf01;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 0 */
+ lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
+ mbuf00, mbuf01, pkts, pkts_mask);
+
+ /* Pipeline stage 1 */
+ lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
+
+ /* Pipeline stage 2 */
+ lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
+ bucket20, bucket21, pkts_mask_out, entries, f);
+ }
+
+ /*
+ * Pipeline flush
+ *
+ */
+ /* Pipeline feed */
+ bucket20 = bucket10;
+ bucket21 = bucket11;
+ mbuf20 = mbuf10;
+ mbuf21 = mbuf11;
+ mbuf10 = mbuf00;
+ mbuf11 = mbuf01;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 1 */
+ lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
+
+ /* Pipeline stage 2 */
+ lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
+ bucket20, bucket21, pkts_mask_out, entries, f);
+
+ /* Pipeline feed */
+ bucket20 = bucket10;
+ bucket21 = bucket11;
+ mbuf20 = mbuf10;
+ mbuf21 = mbuf11;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+
+ /* Pipeline stage 2 */
+ lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
+ bucket20, bucket21, pkts_mask_out, entries, f);
+
+ *lookup_hit_mask = pkts_mask_out;
+ return 0;
+} /* rte_table_hash_lookup_key8_lru_dosig() */
+
+static int
+rte_table_hash_lookup_key8_ext(
+ void *table,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ uint64_t *lookup_hit_mask,
+ void **entries)
+{
+ struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_bucket_4_8 *bucket10, *bucket11, *bucket20, *bucket21;
+ struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
+ uint32_t pkt00_index, pkt01_index, pkt10_index;
+ uint32_t pkt11_index, pkt20_index, pkt21_index;
+ uint64_t pkts_mask_out = 0, buckets_mask = 0;
+ struct rte_bucket_4_8 *buckets[RTE_PORT_IN_BURST_SIZE_MAX];
+ uint64_t *keys[RTE_PORT_IN_BURST_SIZE_MAX];
+
+ /* Cannot run the pipeline with less than 5 packets */
+ if (__builtin_popcountll(pkts_mask) < 5) {
+ for ( ; pkts_mask; ) {
+ struct rte_bucket_4_8 *bucket;
+ struct rte_mbuf *mbuf;
+ uint32_t pkt_index;
+
+ lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask);
+ lookup1_stage1(mbuf, bucket, f);
+ lookup1_stage2_ext(pkt_index, mbuf, bucket,
+ pkts_mask_out, entries, buckets_mask, buckets,
+ keys, f);
+ }
+
+ goto grind_next_buckets;
+ }
+
+ /*
+ * Pipeline fill
+ *
+ */
+ /* Pipeline stage 0 */
+ lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
+ pkts_mask);
+
+ /* Pipeline feed */
+ mbuf10 = mbuf00;
+ mbuf11 = mbuf01;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 0 */
+ lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
+ pkts_mask);
+
+ /* Pipeline stage 1 */
+ lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
+
+ /*
+ * Pipeline run
+ *
+ */
+ for ( ; pkts_mask; ) {
+ /* Pipeline feed */
+ bucket20 = bucket10;
+ bucket21 = bucket11;
+ mbuf20 = mbuf10;
+ mbuf21 = mbuf11;
+ mbuf10 = mbuf00;
+ mbuf11 = mbuf01;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 0 */
+ lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
+ mbuf00, mbuf01, pkts, pkts_mask);
+
+ /* Pipeline stage 1 */
+ lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
+
+ /* Pipeline stage 2 */
+ lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
+ bucket20, bucket21, pkts_mask_out, entries,
+ buckets_mask, buckets, keys, f);
+ }
+
+ /*
+ * Pipeline flush
+ *
+ */
+ /* Pipeline feed */
+ bucket20 = bucket10;
+ bucket21 = bucket11;
+ mbuf20 = mbuf10;
+ mbuf21 = mbuf11;
+ mbuf10 = mbuf00;
+ mbuf11 = mbuf01;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 1 */
+ lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
+
+ /* Pipeline stage 2 */
+ lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
+ bucket20, bucket21, pkts_mask_out, entries,
+ buckets_mask, buckets, keys, f);
+
+ /* Pipeline feed */
+ bucket20 = bucket10;
+ bucket21 = bucket11;
+ mbuf20 = mbuf10;
+ mbuf21 = mbuf11;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+
+ /* Pipeline stage 2 */
+ lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
+ bucket20, bucket21, pkts_mask_out, entries,
+ buckets_mask, buckets, keys, f);
+
+grind_next_buckets:
+ /* Grind next buckets */
+ for ( ; buckets_mask; ) {
+ uint64_t buckets_mask_next = 0;
+
+ for ( ; buckets_mask; ) {
+ uint64_t pkt_mask;
+ uint32_t pkt_index;
+
+ pkt_index = __builtin_ctzll(buckets_mask);
+ pkt_mask = 1LLU << pkt_index;
+ buckets_mask &= ~pkt_mask;
+
+ lookup_grinder(pkt_index, buckets, keys, pkts_mask_out,
+ entries, buckets_mask_next, f);
+ }
+
+ buckets_mask = buckets_mask_next;
+ }
+
+ *lookup_hit_mask = pkts_mask_out;
+ return 0;
+} /* rte_table_hash_lookup_key8_ext() */
+
+static int
+rte_table_hash_lookup_key8_ext_dosig(
+ void *table,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ uint64_t *lookup_hit_mask,
+ void **entries)
+{
+ struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_bucket_4_8 *bucket10, *bucket11, *bucket20, *bucket21;
+ struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
+ uint32_t pkt00_index, pkt01_index, pkt10_index;
+ uint32_t pkt11_index, pkt20_index, pkt21_index;
+ uint64_t pkts_mask_out = 0, buckets_mask = 0;
+ struct rte_bucket_4_8 *buckets[RTE_PORT_IN_BURST_SIZE_MAX];
+ uint64_t *keys[RTE_PORT_IN_BURST_SIZE_MAX];
+
+ /* Cannot run the pipeline with less than 5 packets */
+ if (__builtin_popcountll(pkts_mask) < 5) {
+ for ( ; pkts_mask; ) {
+ struct rte_bucket_4_8 *bucket;
+ struct rte_mbuf *mbuf;
+ uint32_t pkt_index;
+
+ lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask);
+ lookup1_stage1_dosig(mbuf, bucket, f);
+ lookup1_stage2_ext(pkt_index, mbuf, bucket,
+ pkts_mask_out, entries, buckets_mask,
+ buckets, keys, f);
+ }
+
+ goto grind_next_buckets;
+ }
+
+ /*
+ * Pipeline fill
+ *
+ */
+ /* Pipeline stage 0 */
+ lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
+ pkts_mask);
+
+ /* Pipeline feed */
+ mbuf10 = mbuf00;
+ mbuf11 = mbuf01;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 0 */
+ lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
+ pkts_mask);
+
+ /* Pipeline stage 1 */
+ lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
+
+ /*
+ * Pipeline run
+ *
+ */
+ for ( ; pkts_mask; ) {
+ /* Pipeline feed */
+ bucket20 = bucket10;
+ bucket21 = bucket11;
+ mbuf20 = mbuf10;
+ mbuf21 = mbuf11;
+ mbuf10 = mbuf00;
+ mbuf11 = mbuf01;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 0 */
+ lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
+ mbuf00, mbuf01, pkts, pkts_mask);
+
+ /* Pipeline stage 1 */
+ lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
+
+ /* Pipeline stage 2 */
+ lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
+ bucket20, bucket21, pkts_mask_out, entries,
+ buckets_mask, buckets, keys, f);
+ }
+
+ /*
+ * Pipeline flush
+ *
+ */
+ /* Pipeline feed */
+ bucket20 = bucket10;
+ bucket21 = bucket11;
+ mbuf20 = mbuf10;
+ mbuf21 = mbuf11;
+ mbuf10 = mbuf00;
+ mbuf11 = mbuf01;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 1 */
+ lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
+
+ /* Pipeline stage 2 */
+ lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
+ bucket20, bucket21, pkts_mask_out, entries,
+ buckets_mask, buckets, keys, f);
+
+ /* Pipeline feed */
+ bucket20 = bucket10;
+ bucket21 = bucket11;
+ mbuf20 = mbuf10;
+ mbuf21 = mbuf11;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+
+ /* Pipeline stage 2 */
+ lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
+ bucket20, bucket21, pkts_mask_out, entries,
+ buckets_mask, buckets, keys, f);
+
+grind_next_buckets:
+ /* Grind next buckets */
+ for ( ; buckets_mask; ) {
+ uint64_t buckets_mask_next = 0;
+
+ for ( ; buckets_mask; ) {
+ uint64_t pkt_mask;
+ uint32_t pkt_index;
+
+ pkt_index = __builtin_ctzll(buckets_mask);
+ pkt_mask = 1LLU << pkt_index;
+ buckets_mask &= ~pkt_mask;
+
+ lookup_grinder(pkt_index, buckets, keys, pkts_mask_out,
+ entries, buckets_mask_next, f);
+ }
+
+ buckets_mask = buckets_mask_next;
+ }
+
+ *lookup_hit_mask = pkts_mask_out;
+ return 0;
+} /* rte_table_hash_lookup_key8_dosig_ext() */
+
+struct rte_table_ops rte_table_hash_key8_lru_ops = {
+ .f_create = rte_table_hash_create_key8_lru,
+ .f_free = rte_table_hash_free_key8_lru,
+ .f_add = rte_table_hash_entry_add_key8_lru,
+ .f_delete = rte_table_hash_entry_delete_key8_lru,
+ .f_lookup = rte_table_hash_lookup_key8_lru,
+};
+
+struct rte_table_ops rte_table_hash_key8_lru_dosig_ops = {
+ .f_create = rte_table_hash_create_key8_lru,
+ .f_free = rte_table_hash_free_key8_lru,
+ .f_add = rte_table_hash_entry_add_key8_lru,
+ .f_delete = rte_table_hash_entry_delete_key8_lru,
+ .f_lookup = rte_table_hash_lookup_key8_lru_dosig,
+};
+
+struct rte_table_ops rte_table_hash_key8_ext_ops = {
+ .f_create = rte_table_hash_create_key8_ext,
+ .f_free = rte_table_hash_free_key8_ext,
+ .f_add = rte_table_hash_entry_add_key8_ext,
+ .f_delete = rte_table_hash_entry_delete_key8_ext,
+ .f_lookup = rte_table_hash_lookup_key8_ext,
+};
+
+struct rte_table_ops rte_table_hash_key8_ext_dosig_ops = {
+ .f_create = rte_table_hash_create_key8_ext,
+ .f_free = rte_table_hash_free_key8_ext,
+ .f_add = rte_table_hash_entry_add_key8_ext,
+ .f_delete = rte_table_hash_entry_delete_key8_ext,
+ .f_lookup = rte_table_hash_lookup_key8_ext_dosig,
+};
diff --git a/src/dpdk_lib18/librte_table/rte_table_hash_lru.c b/src/dpdk_lib18/librte_table/rte_table_hash_lru.c
new file mode 100755
index 00000000..c9a8afd7
--- /dev/null
+++ b/src/dpdk_lib18/librte_table/rte_table_hash_lru.c
@@ -0,0 +1,1065 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <stdio.h>
+
+#include <rte_common.h>
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_log.h>
+
+#include "rte_table_hash.h"
+#include "rte_lru.h"
+
+#define KEYS_PER_BUCKET 4
+
+struct bucket {
+ union {
+ struct bucket *next;
+ uint64_t lru_list;
+ };
+ uint16_t sig[KEYS_PER_BUCKET];
+ uint32_t key_pos[KEYS_PER_BUCKET];
+};
+
+struct grinder {
+ struct bucket *bkt;
+ uint64_t sig;
+ uint64_t match;
+ uint64_t match_pos;
+ uint32_t key_index;
+};
+
+struct rte_table_hash {
+ /* Input parameters */
+ uint32_t key_size;
+ uint32_t entry_size;
+ uint32_t n_keys;
+ uint32_t n_buckets;
+ rte_table_hash_op_hash f_hash;
+ uint64_t seed;
+ uint32_t signature_offset;
+ uint32_t key_offset;
+
+ /* Internal */
+ uint64_t bucket_mask;
+ uint32_t key_size_shl;
+ uint32_t data_size_shl;
+ uint32_t key_stack_tos;
+
+ /* Grinder */
+ struct grinder grinders[RTE_PORT_IN_BURST_SIZE_MAX];
+
+ /* Tables */
+ struct bucket *buckets;
+ uint8_t *key_mem;
+ uint8_t *data_mem;
+ uint32_t *key_stack;
+
+ /* Table memory */
+ uint8_t memory[0] __rte_cache_aligned;
+};
+
+static int
+check_params_create(struct rte_table_hash_lru_params *params)
+{
+ uint32_t n_buckets_min;
+
+ /* key_size */
+ if ((params->key_size == 0) ||
+ (!rte_is_power_of_2(params->key_size))) {
+ RTE_LOG(ERR, TABLE, "%s: key_size invalid value\n", __func__);
+ return -EINVAL;
+ }
+
+ /* n_keys */
+ if ((params->n_keys == 0) ||
+ (!rte_is_power_of_2(params->n_keys))) {
+ RTE_LOG(ERR, TABLE, "%s: n_keys invalid value\n", __func__);
+ return -EINVAL;
+ }
+
+ /* n_buckets */
+ n_buckets_min = (params->n_keys + KEYS_PER_BUCKET - 1) / params->n_keys;
+ if ((params->n_buckets == 0) ||
+ (!rte_is_power_of_2(params->n_keys)) ||
+ (params->n_buckets < n_buckets_min)) {
+ RTE_LOG(ERR, TABLE, "%s: n_buckets invalid value\n", __func__);
+ return -EINVAL;
+ }
+
+ /* f_hash */
+ if (params->f_hash == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: f_hash invalid value\n", __func__);
+ return -EINVAL;
+ }
+
+ /* signature offset */
+ if ((params->signature_offset & 0x3) != 0) {
+ RTE_LOG(ERR, TABLE, "%s: signature_offset invalid value\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* key offset */
+ if ((params->key_offset & 0x7) != 0) {
+ RTE_LOG(ERR, TABLE, "%s: key_offset invalid value\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void *
+rte_table_hash_lru_create(void *params, int socket_id, uint32_t entry_size)
+{
+ struct rte_table_hash_lru_params *p =
+ (struct rte_table_hash_lru_params *) params;
+ struct rte_table_hash *t;
+ uint32_t total_size, table_meta_sz;
+ uint32_t bucket_sz, key_sz, key_stack_sz, data_sz;
+ uint32_t bucket_offset, key_offset, key_stack_offset, data_offset;
+ uint32_t i;
+
+ /* Check input parameters */
+ if ((check_params_create(p) != 0) ||
+ (!rte_is_power_of_2(entry_size)) ||
+ ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
+ (sizeof(struct bucket) != (RTE_CACHE_LINE_SIZE / 2))) {
+ return NULL;
+ }
+
+ /* Memory allocation */
+ table_meta_sz = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_table_hash));
+ bucket_sz = RTE_CACHE_LINE_ROUNDUP(p->n_buckets * sizeof(struct bucket));
+ key_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * p->key_size);
+ key_stack_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * sizeof(uint32_t));
+ data_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * entry_size);
+ total_size = table_meta_sz + bucket_sz + key_sz + key_stack_sz +
+ data_sz;
+
+ t = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
+ if (t == NULL) {
+ RTE_LOG(ERR, TABLE,
+ "%s: Cannot allocate %u bytes for hash table\n",
+ __func__, total_size);
+ return NULL;
+ }
+ RTE_LOG(INFO, TABLE, "%s (%u-byte key): Hash table memory footprint is "
+ "%u bytes\n", __func__, p->key_size, total_size);
+
+ /* Memory initialization */
+ t->key_size = p->key_size;
+ t->entry_size = entry_size;
+ t->n_keys = p->n_keys;
+ t->n_buckets = p->n_buckets;
+ t->f_hash = p->f_hash;
+ t->seed = p->seed;
+ t->signature_offset = p->signature_offset;
+ t->key_offset = p->key_offset;
+
+ /* Internal */
+ t->bucket_mask = t->n_buckets - 1;
+ t->key_size_shl = __builtin_ctzl(p->key_size);
+ t->data_size_shl = __builtin_ctzl(entry_size);
+
+ /* Tables */
+ bucket_offset = 0;
+ key_offset = bucket_offset + bucket_sz;
+ key_stack_offset = key_offset + key_sz;
+ data_offset = key_stack_offset + key_stack_sz;
+
+ t->buckets = (struct bucket *) &t->memory[bucket_offset];
+ t->key_mem = &t->memory[key_offset];
+ t->key_stack = (uint32_t *) &t->memory[key_stack_offset];
+ t->data_mem = &t->memory[data_offset];
+
+ /* Key stack */
+ for (i = 0; i < t->n_keys; i++)
+ t->key_stack[i] = t->n_keys - 1 - i;
+ t->key_stack_tos = t->n_keys;
+
+ /* LRU */
+ for (i = 0; i < t->n_buckets; i++) {
+ struct bucket *bkt = &t->buckets[i];
+
+ lru_init(bkt);
+ }
+
+ return t;
+}
+
+static int
+rte_table_hash_lru_free(void *table)
+{
+ struct rte_table_hash *t = (struct rte_table_hash *) table;
+
+ /* Check input parameters */
+ if (t == NULL)
+ return -EINVAL;
+
+ rte_free(t);
+ return 0;
+}
+
+static int
+rte_table_hash_lru_entry_add(void *table, void *key, void *entry,
+ int *key_found, void **entry_ptr)
+{
+ struct rte_table_hash *t = (struct rte_table_hash *) table;
+ struct bucket *bkt;
+ uint64_t sig;
+ uint32_t bkt_index, i;
+
+ sig = t->f_hash(key, t->key_size, t->seed);
+ bkt_index = sig & t->bucket_mask;
+ bkt = &t->buckets[bkt_index];
+ sig = (sig >> 16) | 1LLU;
+
+ /* Key is present in the bucket */
+ for (i = 0; i < KEYS_PER_BUCKET; i++) {
+ uint64_t bkt_sig = (uint64_t) bkt->sig[i];
+ uint32_t bkt_key_index = bkt->key_pos[i];
+ uint8_t *bkt_key = &t->key_mem[bkt_key_index <<
+ t->key_size_shl];
+
+ if ((sig == bkt_sig) && (memcmp(key, bkt_key, t->key_size)
+ == 0)) {
+ uint8_t *data = &t->data_mem[bkt_key_index <<
+ t->data_size_shl];
+
+ memcpy(data, entry, t->entry_size);
+ lru_update(bkt, i);
+ *key_found = 1;
+ *entry_ptr = (void *) data;
+ return 0;
+ }
+ }
+
+ /* Key is not present in the bucket */
+ for (i = 0; i < KEYS_PER_BUCKET; i++) {
+ uint64_t bkt_sig = (uint64_t) bkt->sig[i];
+
+ if (bkt_sig == 0) {
+ uint32_t bkt_key_index;
+ uint8_t *bkt_key, *data;
+
+ /* Allocate new key */
+ if (t->key_stack_tos == 0) {
+ /* No keys available */
+ return -ENOSPC;
+ }
+ bkt_key_index = t->key_stack[--t->key_stack_tos];
+
+ /* Install new key */
+ bkt_key = &t->key_mem[bkt_key_index << t->key_size_shl];
+ data = &t->data_mem[bkt_key_index << t->data_size_shl];
+
+ bkt->sig[i] = (uint16_t) sig;
+ bkt->key_pos[i] = bkt_key_index;
+ memcpy(bkt_key, key, t->key_size);
+ memcpy(data, entry, t->entry_size);
+ lru_update(bkt, i);
+
+ *key_found = 0;
+ *entry_ptr = (void *) data;
+ return 0;
+ }
+ }
+
+ /* Bucket full */
+ {
+ uint64_t pos = lru_pos(bkt);
+ uint32_t bkt_key_index = bkt->key_pos[pos];
+ uint8_t *bkt_key = &t->key_mem[bkt_key_index <<
+ t->key_size_shl];
+ uint8_t *data = &t->data_mem[bkt_key_index << t->data_size_shl];
+
+ bkt->sig[pos] = (uint16_t) sig;
+ memcpy(bkt_key, key, t->key_size);
+ memcpy(data, entry, t->entry_size);
+ lru_update(bkt, pos);
+
+ *key_found = 0;
+ *entry_ptr = (void *) data;
+ return 0;
+ }
+}
+
+static int
+rte_table_hash_lru_entry_delete(void *table, void *key, int *key_found,
+ void *entry)
+{
+ struct rte_table_hash *t = (struct rte_table_hash *) table;
+ struct bucket *bkt;
+ uint64_t sig;
+ uint32_t bkt_index, i;
+
+ sig = t->f_hash(key, t->key_size, t->seed);
+ bkt_index = sig & t->bucket_mask;
+ bkt = &t->buckets[bkt_index];
+ sig = (sig >> 16) | 1LLU;
+
+ /* Key is present in the bucket */
+ for (i = 0; i < KEYS_PER_BUCKET; i++) {
+ uint64_t bkt_sig = (uint64_t) bkt->sig[i];
+ uint32_t bkt_key_index = bkt->key_pos[i];
+ uint8_t *bkt_key = &t->key_mem[bkt_key_index <<
+ t->key_size_shl];
+
+ if ((sig == bkt_sig) &&
+ (memcmp(key, bkt_key, t->key_size) == 0)) {
+ uint8_t *data = &t->data_mem[bkt_key_index <<
+ t->data_size_shl];
+
+ bkt->sig[i] = 0;
+ t->key_stack[t->key_stack_tos++] = bkt_key_index;
+ *key_found = 1;
+ memcpy(entry, data, t->entry_size);
+ return 0;
+ }
+ }
+
+ /* Key is not present in the bucket */
+ *key_found = 0;
+ return 0;
+}
+
+static int rte_table_hash_lru_lookup_unoptimized(
+ void *table,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ uint64_t *lookup_hit_mask,
+ void **entries,
+ int dosig)
+{
+ struct rte_table_hash *t = (struct rte_table_hash *) table;
+ uint64_t pkts_mask_out = 0;
+
+ for ( ; pkts_mask; ) {
+ struct bucket *bkt;
+ struct rte_mbuf *pkt;
+ uint8_t *key;
+ uint64_t pkt_mask, sig;
+ uint32_t pkt_index, bkt_index, i;
+
+ pkt_index = __builtin_ctzll(pkts_mask);
+ pkt_mask = 1LLU << pkt_index;
+ pkts_mask &= ~pkt_mask;
+
+ pkt = pkts[pkt_index];
+ key = RTE_MBUF_METADATA_UINT8_PTR(pkt, t->key_offset);
+ if (dosig)
+ sig = (uint64_t) t->f_hash(key, t->key_size, t->seed);
+ else
+ sig = RTE_MBUF_METADATA_UINT32(pkt,
+ t->signature_offset);
+
+ bkt_index = sig & t->bucket_mask;
+ bkt = &t->buckets[bkt_index];
+ sig = (sig >> 16) | 1LLU;
+
+ /* Key is present in the bucket */
+ for (i = 0; i < KEYS_PER_BUCKET; i++) {
+ uint64_t bkt_sig = (uint64_t) bkt->sig[i];
+ uint32_t bkt_key_index = bkt->key_pos[i];
+ uint8_t *bkt_key = &t->key_mem[bkt_key_index <<
+ t->key_size_shl];
+
+ if ((sig == bkt_sig) && (memcmp(key, bkt_key,
+ t->key_size) == 0)) {
+ uint8_t *data = &t->data_mem[bkt_key_index <<
+ t->data_size_shl];
+
+ lru_update(bkt, i);
+ pkts_mask_out |= pkt_mask;
+ entries[pkt_index] = (void *) data;
+ break;
+ }
+ }
+ }
+
+ *lookup_hit_mask = pkts_mask_out;
+ return 0;
+}
+
+/***
+*
+* mask = match bitmask
+* match = at least one match
+* match_many = more than one match
+* match_pos = position of first match
+*
+* ----------------------------------------
+* mask match match_many match_pos
+* ----------------------------------------
+* 0000 0 0 00
+* 0001 1 0 00
+* 0010 1 0 01
+* 0011 1 1 00
+* ----------------------------------------
+* 0100 1 0 10
+* 0101 1 1 00
+* 0110 1 1 01
+* 0111 1 1 00
+* ----------------------------------------
+* 1000 1 0 11
+* 1001 1 1 00
+* 1010 1 1 01
+* 1011 1 1 00
+* ----------------------------------------
+* 1100 1 1 10
+* 1101 1 1 00
+* 1110 1 1 01
+* 1111 1 1 00
+* ----------------------------------------
+*
+* match = 1111_1111_1111_1110
+* match_many = 1111_1110_1110_1000
+* match_pos = 0001_0010_0001_0011__0001_0010_0001_0000
+*
+* match = 0xFFFELLU
+* match_many = 0xFEE8LLU
+* match_pos = 0x12131210LLU
+*
+***/
+
+#define LUT_MATCH 0xFFFELLU
+#define LUT_MATCH_MANY 0xFEE8LLU
+#define LUT_MATCH_POS 0x12131210LLU
+
+#define lookup_cmp_sig(mbuf_sig, bucket, match, match_many, match_pos)\
+{ \
+ uint64_t bucket_sig[4], mask[4], mask_all; \
+ \
+ bucket_sig[0] = bucket->sig[0]; \
+ bucket_sig[1] = bucket->sig[1]; \
+ bucket_sig[2] = bucket->sig[2]; \
+ bucket_sig[3] = bucket->sig[3]; \
+ \
+ bucket_sig[0] ^= mbuf_sig; \
+ bucket_sig[1] ^= mbuf_sig; \
+ bucket_sig[2] ^= mbuf_sig; \
+ bucket_sig[3] ^= mbuf_sig; \
+ \
+ mask[0] = 0; \
+ mask[1] = 0; \
+ mask[2] = 0; \
+ mask[3] = 0; \
+ \
+ if (bucket_sig[0] == 0) \
+ mask[0] = 1; \
+ if (bucket_sig[1] == 0) \
+ mask[1] = 2; \
+ if (bucket_sig[2] == 0) \
+ mask[2] = 4; \
+ if (bucket_sig[3] == 0) \
+ mask[3] = 8; \
+ \
+ mask_all = (mask[0] | mask[1]) | (mask[2] | mask[3]); \
+ \
+ match = (LUT_MATCH >> mask_all) & 1; \
+ match_many = (LUT_MATCH_MANY >> mask_all) & 1; \
+ match_pos = (LUT_MATCH_POS >> (mask_all << 1)) & 3; \
+}
+
+#define lookup_cmp_key(mbuf, key, match_key, f) \
+{ \
+ uint64_t *pkt_key = RTE_MBUF_METADATA_UINT64_PTR(mbuf, f->key_offset);\
+ uint64_t *bkt_key = (uint64_t *) key; \
+ \
+ switch (f->key_size) { \
+ case 8: \
+ { \
+ uint64_t xor = pkt_key[0] ^ bkt_key[0]; \
+ match_key = 0; \
+ if (xor == 0) \
+ match_key = 1; \
+ } \
+ break; \
+ \
+ case 16: \
+ { \
+ uint64_t xor[2], or; \
+ \
+ xor[0] = pkt_key[0] ^ bkt_key[0]; \
+ xor[1] = pkt_key[1] ^ bkt_key[1]; \
+ or = xor[0] | xor[1]; \
+ match_key = 0; \
+ if (or == 0) \
+ match_key = 1; \
+ } \
+ break; \
+ \
+ case 32: \
+ { \
+ uint64_t xor[4], or; \
+ \
+ xor[0] = pkt_key[0] ^ bkt_key[0]; \
+ xor[1] = pkt_key[1] ^ bkt_key[1]; \
+ xor[2] = pkt_key[2] ^ bkt_key[2]; \
+ xor[3] = pkt_key[3] ^ bkt_key[3]; \
+ or = xor[0] | xor[1] | xor[2] | xor[3]; \
+ match_key = 0; \
+ if (or == 0) \
+ match_key = 1; \
+ } \
+ break; \
+ \
+ case 64: \
+ { \
+ uint64_t xor[8], or; \
+ \
+ xor[0] = pkt_key[0] ^ bkt_key[0]; \
+ xor[1] = pkt_key[1] ^ bkt_key[1]; \
+ xor[2] = pkt_key[2] ^ bkt_key[2]; \
+ xor[3] = pkt_key[3] ^ bkt_key[3]; \
+ xor[4] = pkt_key[4] ^ bkt_key[4]; \
+ xor[5] = pkt_key[5] ^ bkt_key[5]; \
+ xor[6] = pkt_key[6] ^ bkt_key[6]; \
+ xor[7] = pkt_key[7] ^ bkt_key[7]; \
+ or = xor[0] | xor[1] | xor[2] | xor[3] | \
+ xor[4] | xor[5] | xor[6] | xor[7]; \
+ match_key = 0; \
+ if (or == 0) \
+ match_key = 1; \
+ } \
+ break; \
+ \
+ default: \
+ match_key = 0; \
+ if (memcmp(pkt_key, bkt_key, f->key_size) == 0) \
+ match_key = 1; \
+ } \
+}
+
+#define lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index)\
+{ \
+ uint64_t pkt00_mask, pkt01_mask; \
+ struct rte_mbuf *mbuf00, *mbuf01; \
+ \
+ pkt00_index = __builtin_ctzll(pkts_mask); \
+ pkt00_mask = 1LLU << pkt00_index; \
+ pkts_mask &= ~pkt00_mask; \
+ mbuf00 = pkts[pkt00_index]; \
+ \
+ pkt01_index = __builtin_ctzll(pkts_mask); \
+ pkt01_mask = 1LLU << pkt01_index; \
+ pkts_mask &= ~pkt01_mask; \
+ mbuf01 = pkts[pkt01_index]; \
+ \
+ rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, 0)); \
+ rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, 0)); \
+}
+
+#define lookup2_stage0_with_odd_support(t, g, pkts, pkts_mask, pkt00_index, \
+ pkt01_index) \
+{ \
+ uint64_t pkt00_mask, pkt01_mask; \
+ struct rte_mbuf *mbuf00, *mbuf01; \
+ \
+ pkt00_index = __builtin_ctzll(pkts_mask); \
+ pkt00_mask = 1LLU << pkt00_index; \
+ pkts_mask &= ~pkt00_mask; \
+ mbuf00 = pkts[pkt00_index]; \
+ \
+ pkt01_index = __builtin_ctzll(pkts_mask); \
+ if (pkts_mask == 0) \
+ pkt01_index = pkt00_index; \
+ \
+ pkt01_mask = 1LLU << pkt01_index; \
+ pkts_mask &= ~pkt01_mask; \
+ mbuf01 = pkts[pkt01_index]; \
+ \
+ rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, 0)); \
+ rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, 0)); \
+}
+
+#define lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index) \
+{ \
+ struct grinder *g10, *g11; \
+ uint64_t sig10, sig11, bkt10_index, bkt11_index; \
+ struct rte_mbuf *mbuf10, *mbuf11; \
+ struct bucket *bkt10, *bkt11, *buckets = t->buckets; \
+ uint64_t bucket_mask = t->bucket_mask; \
+ uint32_t signature_offset = t->signature_offset; \
+ \
+ mbuf10 = pkts[pkt10_index]; \
+ sig10 = (uint64_t) RTE_MBUF_METADATA_UINT32(mbuf10, signature_offset);\
+ bkt10_index = sig10 & bucket_mask; \
+ bkt10 = &buckets[bkt10_index]; \
+ \
+ mbuf11 = pkts[pkt11_index]; \
+ sig11 = (uint64_t) RTE_MBUF_METADATA_UINT32(mbuf11, signature_offset);\
+ bkt11_index = sig11 & bucket_mask; \
+ bkt11 = &buckets[bkt11_index]; \
+ \
+ rte_prefetch0(bkt10); \
+ rte_prefetch0(bkt11); \
+ \
+ g10 = &g[pkt10_index]; \
+ g10->sig = sig10; \
+ g10->bkt = bkt10; \
+ \
+ g11 = &g[pkt11_index]; \
+ g11->sig = sig11; \
+ g11->bkt = bkt11; \
+}
+
+#define lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index)\
+{ \
+ struct grinder *g10, *g11; \
+ uint64_t sig10, sig11, bkt10_index, bkt11_index; \
+ struct rte_mbuf *mbuf10, *mbuf11; \
+ struct bucket *bkt10, *bkt11, *buckets = t->buckets; \
+ uint8_t *key10, *key11; \
+ uint64_t bucket_mask = t->bucket_mask; \
+ rte_table_hash_op_hash f_hash = t->f_hash; \
+ uint64_t seed = t->seed; \
+ uint32_t key_size = t->key_size; \
+ uint32_t key_offset = t->key_offset; \
+ \
+ mbuf10 = pkts[pkt10_index]; \
+ key10 = RTE_MBUF_METADATA_UINT8_PTR(mbuf10, key_offset);\
+ sig10 = (uint64_t) f_hash(key10, key_size, seed); \
+ bkt10_index = sig10 & bucket_mask; \
+ bkt10 = &buckets[bkt10_index]; \
+ \
+ mbuf11 = pkts[pkt11_index]; \
+ key11 = RTE_MBUF_METADATA_UINT8_PTR(mbuf11, key_offset);\
+ sig11 = (uint64_t) f_hash(key11, key_size, seed); \
+ bkt11_index = sig11 & bucket_mask; \
+ bkt11 = &buckets[bkt11_index]; \
+ \
+ rte_prefetch0(bkt10); \
+ rte_prefetch0(bkt11); \
+ \
+ g10 = &g[pkt10_index]; \
+ g10->sig = sig10; \
+ g10->bkt = bkt10; \
+ \
+ g11 = &g[pkt11_index]; \
+ g11->sig = sig11; \
+ g11->bkt = bkt11; \
+}
+
+#define lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many)\
+{ \
+ struct grinder *g20, *g21; \
+ uint64_t sig20, sig21; \
+ struct bucket *bkt20, *bkt21; \
+ uint8_t *key20, *key21, *key_mem = t->key_mem; \
+ uint64_t match20, match21, match_many20, match_many21; \
+ uint64_t match_pos20, match_pos21; \
+ uint32_t key20_index, key21_index, key_size_shl = t->key_size_shl;\
+ \
+ g20 = &g[pkt20_index]; \
+ sig20 = g20->sig; \
+ bkt20 = g20->bkt; \
+ sig20 = (sig20 >> 16) | 1LLU; \
+ lookup_cmp_sig(sig20, bkt20, match20, match_many20, match_pos20);\
+ match20 <<= pkt20_index; \
+ match_many20 <<= pkt20_index; \
+ key20_index = bkt20->key_pos[match_pos20]; \
+ key20 = &key_mem[key20_index << key_size_shl]; \
+ \
+ g21 = &g[pkt21_index]; \
+ sig21 = g21->sig; \
+ bkt21 = g21->bkt; \
+ sig21 = (sig21 >> 16) | 1LLU; \
+ lookup_cmp_sig(sig21, bkt21, match21, match_many21, match_pos21);\
+ match21 <<= pkt21_index; \
+ match_many21 <<= pkt21_index; \
+ key21_index = bkt21->key_pos[match_pos21]; \
+ key21 = &key_mem[key21_index << key_size_shl]; \
+ \
+ rte_prefetch0(key20); \
+ rte_prefetch0(key21); \
+ \
+ pkts_mask_match_many |= match_many20 | match_many21; \
+ \
+ g20->match = match20; \
+ g20->match_pos = match_pos20; \
+ g20->key_index = key20_index; \
+ \
+ g21->match = match21; \
+ g21->match_pos = match_pos21; \
+ g21->key_index = key21_index; \
+}
+
+#define lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out, \
+ entries) \
+{ \
+ struct grinder *g30, *g31; \
+ struct rte_mbuf *mbuf30, *mbuf31; \
+ struct bucket *bkt30, *bkt31; \
+ uint8_t *key30, *key31, *key_mem = t->key_mem; \
+ uint8_t *data30, *data31, *data_mem = t->data_mem; \
+ uint64_t match30, match31, match_pos30, match_pos31; \
+ uint64_t match_key30, match_key31, match_keys; \
+ uint32_t key30_index, key31_index; \
+ uint32_t key_size_shl = t->key_size_shl; \
+ uint32_t data_size_shl = t->data_size_shl; \
+ \
+ mbuf30 = pkts[pkt30_index]; \
+ g30 = &g[pkt30_index]; \
+ bkt30 = g30->bkt; \
+ match30 = g30->match; \
+ match_pos30 = g30->match_pos; \
+ key30_index = g30->key_index; \
+ key30 = &key_mem[key30_index << key_size_shl]; \
+ lookup_cmp_key(mbuf30, key30, match_key30, t); \
+ match_key30 <<= pkt30_index; \
+ match_key30 &= match30; \
+ data30 = &data_mem[key30_index << data_size_shl]; \
+ entries[pkt30_index] = data30; \
+ \
+ mbuf31 = pkts[pkt31_index]; \
+ g31 = &g[pkt31_index]; \
+ bkt31 = g31->bkt; \
+ match31 = g31->match; \
+ match_pos31 = g31->match_pos; \
+ key31_index = g31->key_index; \
+ key31 = &key_mem[key31_index << key_size_shl]; \
+ lookup_cmp_key(mbuf31, key31, match_key31, t); \
+ match_key31 <<= pkt31_index; \
+ match_key31 &= match31; \
+ data31 = &data_mem[key31_index << data_size_shl]; \
+ entries[pkt31_index] = data31; \
+ \
+ rte_prefetch0(data30); \
+ rte_prefetch0(data31); \
+ \
+ match_keys = match_key30 | match_key31; \
+ pkts_mask_out |= match_keys; \
+ \
+ if (match_key30 == 0) \
+ match_pos30 = 4; \
+ lru_update(bkt30, match_pos30); \
+ \
+ if (match_key31 == 0) \
+ match_pos31 = 4; \
+ lru_update(bkt31, match_pos31); \
+}
+
+/***
+* The lookup function implements a 4-stage pipeline, with each stage processing
+* two different packets. The purpose of pipelined implementation is to hide the
+* latency of prefetching the data structures and loosen the data dependency
+* between instructions.
+*
+* p00 _______ p10 _______ p20 _______ p30 _______
+* ----->| |----->| |----->| |----->| |----->
+* | 0 | | 1 | | 2 | | 3 |
+* ----->|_______|----->|_______|----->|_______|----->|_______|----->
+* p01 p11 p21 p31
+*
+* The naming convention is:
+* pXY = packet Y of stage X, X = 0 .. 3, Y = 0 .. 1
+*
+***/
+static int rte_table_hash_lru_lookup(
+ void *table,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ uint64_t *lookup_hit_mask,
+ void **entries)
+{
+ struct rte_table_hash *t = (struct rte_table_hash *) table;
+ struct grinder *g = t->grinders;
+ uint64_t pkt00_index, pkt01_index, pkt10_index, pkt11_index;
+ uint64_t pkt20_index, pkt21_index, pkt30_index, pkt31_index;
+ uint64_t pkts_mask_out = 0, pkts_mask_match_many = 0;
+ int status = 0;
+
+ /* Cannot run the pipeline with less than 7 packets */
+ if (__builtin_popcountll(pkts_mask) < 7)
+ return rte_table_hash_lru_lookup_unoptimized(table, pkts,
+ pkts_mask, lookup_hit_mask, entries, 0);
+
+ /* Pipeline stage 0 */
+ lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
+
+ /* Pipeline feed */
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 0 */
+ lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
+
+ /* Pipeline stage 1 */
+ lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
+
+ /* Pipeline feed */
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 0 */
+ lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
+
+ /* Pipeline stage 1 */
+ lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
+
+ /* Pipeline stage 2 */
+ lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
+
+ /*
+ * Pipeline run
+ *
+ */
+ for ( ; pkts_mask; ) {
+ /* Pipeline feed */
+ pkt30_index = pkt20_index;
+ pkt31_index = pkt21_index;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 0 */
+ lookup2_stage0_with_odd_support(t, g, pkts, pkts_mask,
+ pkt00_index, pkt01_index);
+
+ /* Pipeline stage 1 */
+ lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
+
+ /* Pipeline stage 2 */
+ lookup2_stage2(t, g, pkt20_index, pkt21_index,
+ pkts_mask_match_many);
+
+ /* Pipeline stage 3 */
+ lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index,
+ pkts_mask_out, entries);
+ }
+
+ /* Pipeline feed */
+ pkt30_index = pkt20_index;
+ pkt31_index = pkt21_index;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 1 */
+ lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
+
+ /* Pipeline stage 2 */
+ lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
+
+ /* Pipeline stage 3 */
+ lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
+ entries);
+
+ /* Pipeline feed */
+ pkt30_index = pkt20_index;
+ pkt31_index = pkt21_index;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+
+ /* Pipeline stage 2 */
+ lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
+
+ /* Pipeline stage 3 */
+ lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
+ entries);
+
+ /* Pipeline feed */
+ pkt30_index = pkt20_index;
+ pkt31_index = pkt21_index;
+
+ /* Pipeline stage 3 */
+ lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
+ entries);
+
+ /* Slow path */
+ pkts_mask_match_many &= ~pkts_mask_out;
+ if (pkts_mask_match_many) {
+ uint64_t pkts_mask_out_slow = 0;
+
+ status = rte_table_hash_lru_lookup_unoptimized(table, pkts,
+ pkts_mask_match_many, &pkts_mask_out_slow, entries, 0);
+ pkts_mask_out |= pkts_mask_out_slow;
+ }
+
+ *lookup_hit_mask = pkts_mask_out;
+ return status;
+}
+
+static int rte_table_hash_lru_lookup_dosig(
+ void *table,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ uint64_t *lookup_hit_mask,
+ void **entries)
+{
+ struct rte_table_hash *t = (struct rte_table_hash *) table;
+ struct grinder *g = t->grinders;
+ uint64_t pkt00_index, pkt01_index, pkt10_index, pkt11_index;
+ uint64_t pkt20_index, pkt21_index, pkt30_index, pkt31_index;
+ uint64_t pkts_mask_out = 0, pkts_mask_match_many = 0;
+ int status = 0;
+
+ /* Cannot run the pipeline with less than 7 packets */
+ if (__builtin_popcountll(pkts_mask) < 7)
+ return rte_table_hash_lru_lookup_unoptimized(table, pkts,
+ pkts_mask, lookup_hit_mask, entries, 1);
+
+ /* Pipeline stage 0 */
+ lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
+
+ /* Pipeline feed */
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 0 */
+ lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
+
+ /* Pipeline stage 1 */
+ lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index);
+
+ /* Pipeline feed */
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 0 */
+ lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
+
+ /* Pipeline stage 1 */
+ lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index);
+
+ /* Pipeline stage 2 */
+ lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
+
+ /*
+ * Pipeline run
+ *
+ */
+ for ( ; pkts_mask; ) {
+ /* Pipeline feed */
+ pkt30_index = pkt20_index;
+ pkt31_index = pkt21_index;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 0 */
+ lookup2_stage0_with_odd_support(t, g, pkts, pkts_mask,
+ pkt00_index, pkt01_index);
+
+ /* Pipeline stage 1 */
+ lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index);
+
+ /* Pipeline stage 2 */
+ lookup2_stage2(t, g, pkt20_index, pkt21_index,
+ pkts_mask_match_many);
+
+ /* Pipeline stage 3 */
+ lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index,
+ pkts_mask_out, entries);
+ }
+
+ /* Pipeline feed */
+ pkt30_index = pkt20_index;
+ pkt31_index = pkt21_index;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+ pkt10_index = pkt00_index;
+ pkt11_index = pkt01_index;
+
+ /* Pipeline stage 1 */
+ lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index);
+
+ /* Pipeline stage 2 */
+ lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
+
+ /* Pipeline stage 3 */
+ lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
+ entries);
+
+ /* Pipeline feed */
+ pkt30_index = pkt20_index;
+ pkt31_index = pkt21_index;
+ pkt20_index = pkt10_index;
+ pkt21_index = pkt11_index;
+
+ /* Pipeline stage 2 */
+ lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
+
+ /* Pipeline stage 3 */
+ lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
+ entries);
+
+ /* Pipeline feed */
+ pkt30_index = pkt20_index;
+ pkt31_index = pkt21_index;
+
+ /* Pipeline stage 3 */
+ lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
+ entries);
+
+ /* Slow path */
+ pkts_mask_match_many &= ~pkts_mask_out;
+ if (pkts_mask_match_many) {
+ uint64_t pkts_mask_out_slow = 0;
+
+ status = rte_table_hash_lru_lookup_unoptimized(table, pkts,
+ pkts_mask_match_many, &pkts_mask_out_slow, entries, 1);
+ pkts_mask_out |= pkts_mask_out_slow;
+ }
+
+ *lookup_hit_mask = pkts_mask_out;
+ return status;
+}
+
+struct rte_table_ops rte_table_hash_lru_ops = {
+ .f_create = rte_table_hash_lru_create,
+ .f_free = rte_table_hash_lru_free,
+ .f_add = rte_table_hash_lru_entry_add,
+ .f_delete = rte_table_hash_lru_entry_delete,
+ .f_lookup = rte_table_hash_lru_lookup,
+};
+
+struct rte_table_ops rte_table_hash_lru_dosig_ops = {
+ .f_create = rte_table_hash_lru_create,
+ .f_free = rte_table_hash_lru_free,
+ .f_add = rte_table_hash_lru_entry_add,
+ .f_delete = rte_table_hash_lru_entry_delete,
+ .f_lookup = rte_table_hash_lru_lookup_dosig,
+};
diff --git a/src/dpdk_lib18/librte_table/rte_table_lpm.c b/src/dpdk_lib18/librte_table/rte_table_lpm.c
new file mode 100755
index 00000000..64c684d0
--- /dev/null
+++ b/src/dpdk_lib18/librte_table/rte_table_lpm.c
@@ -0,0 +1,348 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <stdio.h>
+
+#include <rte_common.h>
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_lpm.h>
+
+#include "rte_table_lpm.h"
+
+#define RTE_TABLE_LPM_MAX_NEXT_HOPS 256
+
+struct rte_table_lpm {
+ /* Input parameters */
+ uint32_t entry_size;
+ uint32_t entry_unique_size;
+ uint32_t n_rules;
+ uint32_t offset;
+
+ /* Handle to low-level LPM table */
+ struct rte_lpm *lpm;
+
+ /* Next Hop Table (NHT) */
+ uint32_t nht_users[RTE_TABLE_LPM_MAX_NEXT_HOPS];
+ uint8_t nht[0] __rte_cache_aligned;
+};
+
+static void *
+rte_table_lpm_create(void *params, int socket_id, uint32_t entry_size)
+{
+ struct rte_table_lpm_params *p = (struct rte_table_lpm_params *) params;
+ struct rte_table_lpm *lpm;
+ uint32_t total_size, nht_size;
+
+ /* Check input parameters */
+ if (p == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: NULL input parameters\n", __func__);
+ return NULL;
+ }
+ if (p->n_rules == 0) {
+ RTE_LOG(ERR, TABLE, "%s: Invalid n_rules\n", __func__);
+ return NULL;
+ }
+ if (p->entry_unique_size == 0) {
+ RTE_LOG(ERR, TABLE, "%s: Invalid entry_unique_size\n",
+ __func__);
+ return NULL;
+ }
+ if (p->entry_unique_size > entry_size) {
+ RTE_LOG(ERR, TABLE, "%s: Invalid entry_unique_size\n",
+ __func__);
+ return NULL;
+ }
+ if ((p->offset & 0x3) != 0) {
+ RTE_LOG(ERR, TABLE, "%s: Invalid offset\n", __func__);
+ return NULL;
+ }
+
+ entry_size = RTE_ALIGN(entry_size, sizeof(uint64_t));
+
+ /* Memory allocation */
+ nht_size = RTE_TABLE_LPM_MAX_NEXT_HOPS * entry_size;
+ total_size = sizeof(struct rte_table_lpm) + nht_size;
+ lpm = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (lpm == NULL) {
+ RTE_LOG(ERR, TABLE,
+ "%s: Cannot allocate %u bytes for LPM table\n",
+ __func__, total_size);
+ return NULL;
+ }
+
+ /* LPM low-level table creation */
+ lpm->lpm = rte_lpm_create("LPM", socket_id, p->n_rules, 0);
+ if (lpm->lpm == NULL) {
+ rte_free(lpm);
+ RTE_LOG(ERR, TABLE, "Unable to create low-level LPM table\n");
+ return NULL;
+ }
+
+ /* Memory initialization */
+ lpm->entry_size = entry_size;
+ lpm->entry_unique_size = p->entry_unique_size;
+ lpm->n_rules = p->n_rules;
+ lpm->offset = p->offset;
+
+ return lpm;
+}
+
+static int
+rte_table_lpm_free(void *table)
+{
+ struct rte_table_lpm *lpm = (struct rte_table_lpm *) table;
+
+ /* Check input parameters */
+ if (lpm == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Free previously allocated resources */
+ rte_lpm_free(lpm->lpm);
+ rte_free(lpm);
+
+ return 0;
+}
+
+static int
+nht_find_free(struct rte_table_lpm *lpm, uint32_t *pos)
+{
+ uint32_t i;
+
+ for (i = 0; i < RTE_TABLE_LPM_MAX_NEXT_HOPS; i++) {
+ if (lpm->nht_users[i] == 0) {
+ *pos = i;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+nht_find_existing(struct rte_table_lpm *lpm, void *entry, uint32_t *pos)
+{
+ uint32_t i;
+
+ for (i = 0; i < RTE_TABLE_LPM_MAX_NEXT_HOPS; i++) {
+ uint8_t *nht_entry = &lpm->nht[i * lpm->entry_size];
+
+ if ((lpm->nht_users[i] > 0) && (memcmp(nht_entry, entry,
+ lpm->entry_unique_size) == 0)) {
+ *pos = i;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+rte_table_lpm_entry_add(
+ void *table,
+ void *key,
+ void *entry,
+ int *key_found,
+ void **entry_ptr)
+{
+ struct rte_table_lpm *lpm = (struct rte_table_lpm *) table;
+ struct rte_table_lpm_key *ip_prefix = (struct rte_table_lpm_key *) key;
+ uint32_t nht_pos, nht_pos0_valid;
+ int status;
+ uint8_t nht_pos0 = 0;
+
+ /* Check input parameters */
+ if (lpm == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+ if (ip_prefix == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: ip_prefix parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (entry == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: entry parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if ((ip_prefix->depth == 0) || (ip_prefix->depth > 32)) {
+ RTE_LOG(ERR, TABLE, "%s: invalid depth (%d)\n",
+ __func__, ip_prefix->depth);
+ return -EINVAL;
+ }
+
+ /* Check if rule is already present in the table */
+ status = rte_lpm_is_rule_present(lpm->lpm, ip_prefix->ip,
+ ip_prefix->depth, &nht_pos0);
+ nht_pos0_valid = status > 0;
+
+ /* Find existing or free NHT entry */
+ if (nht_find_existing(lpm, entry, &nht_pos) == 0) {
+ uint8_t *nht_entry;
+
+ if (nht_find_free(lpm, &nht_pos) == 0) {
+ RTE_LOG(ERR, TABLE, "%s: NHT full\n", __func__);
+ return -1;
+ }
+
+ nht_entry = &lpm->nht[nht_pos * lpm->entry_size];
+ memcpy(nht_entry, entry, lpm->entry_size);
+ }
+
+ /* Add rule to low level LPM table */
+ if (rte_lpm_add(lpm->lpm, ip_prefix->ip, ip_prefix->depth,
+ (uint8_t) nht_pos) < 0) {
+ RTE_LOG(ERR, TABLE, "%s: LPM rule add failed\n", __func__);
+ return -1;
+ }
+
+ /* Commit NHT changes */
+ lpm->nht_users[nht_pos]++;
+ lpm->nht_users[nht_pos0] -= nht_pos0_valid;
+
+ *key_found = nht_pos0_valid;
+ *entry_ptr = (void *) &lpm->nht[nht_pos * lpm->entry_size];
+ return 0;
+}
+
+static int
+rte_table_lpm_entry_delete(
+ void *table,
+ void *key,
+ int *key_found,
+ void *entry)
+{
+ struct rte_table_lpm *lpm = (struct rte_table_lpm *) table;
+ struct rte_table_lpm_key *ip_prefix = (struct rte_table_lpm_key *) key;
+ uint8_t nht_pos;
+ int status;
+
+ /* Check input parameters */
+ if (lpm == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+ if (ip_prefix == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: ip_prefix parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+ if ((ip_prefix->depth == 0) || (ip_prefix->depth > 32)) {
+ RTE_LOG(ERR, TABLE, "%s: invalid depth (%d)\n", __func__,
+ ip_prefix->depth);
+ return -EINVAL;
+ }
+
+ /* Return if rule is not present in the table */
+ status = rte_lpm_is_rule_present(lpm->lpm, ip_prefix->ip,
+ ip_prefix->depth, &nht_pos);
+ if (status < 0) {
+ RTE_LOG(ERR, TABLE, "%s: LPM algorithmic error\n", __func__);
+ return -1;
+ }
+ if (status == 0) {
+ *key_found = 0;
+ return 0;
+ }
+
+ /* Delete rule from the low-level LPM table */
+ status = rte_lpm_delete(lpm->lpm, ip_prefix->ip, ip_prefix->depth);
+ if (status) {
+ RTE_LOG(ERR, TABLE, "%s: LPM rule delete failed\n", __func__);
+ return -1;
+ }
+
+ /* Commit NHT changes */
+ lpm->nht_users[nht_pos]--;
+
+ *key_found = 1;
+ if (entry)
+ memcpy(entry, &lpm->nht[nht_pos * lpm->entry_size],
+ lpm->entry_size);
+
+ return 0;
+}
+
+static int
+rte_table_lpm_lookup(
+ void *table,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ uint64_t *lookup_hit_mask,
+ void **entries)
+{
+ struct rte_table_lpm *lpm = (struct rte_table_lpm *) table;
+ uint64_t pkts_out_mask = 0;
+ uint32_t i;
+
+ pkts_out_mask = 0;
+ for (i = 0; i < (uint32_t)(RTE_PORT_IN_BURST_SIZE_MAX -
+ __builtin_clzll(pkts_mask)); i++) {
+ uint64_t pkt_mask = 1LLU << i;
+
+ if (pkt_mask & pkts_mask) {
+ struct rte_mbuf *pkt = pkts[i];
+ uint32_t ip = rte_bswap32(
+ RTE_MBUF_METADATA_UINT32(pkt, lpm->offset));
+ int status;
+ uint8_t nht_pos;
+
+ status = rte_lpm_lookup(lpm->lpm, ip, &nht_pos);
+ if (status == 0) {
+ pkts_out_mask |= pkt_mask;
+ entries[i] = (void *) &lpm->nht[nht_pos *
+ lpm->entry_size];
+ }
+ }
+ }
+
+ *lookup_hit_mask = pkts_out_mask;
+
+ return 0;
+}
+
+struct rte_table_ops rte_table_lpm_ops = {
+ .f_create = rte_table_lpm_create,
+ .f_free = rte_table_lpm_free,
+ .f_add = rte_table_lpm_entry_add,
+ .f_delete = rte_table_lpm_entry_delete,
+ .f_lookup = rte_table_lpm_lookup,
+};
diff --git a/src/dpdk_lib18/librte_table/rte_table_lpm.h b/src/dpdk_lib18/librte_table/rte_table_lpm.h
new file mode 100755
index 00000000..c08c9580
--- /dev/null
+++ b/src/dpdk_lib18/librte_table/rte_table_lpm.h
@@ -0,0 +1,115 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_TABLE_LPM_H__
+#define __INCLUDE_RTE_TABLE_LPM_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file
+ * RTE Table LPM for IPv4
+ *
+ * This table uses the Longest Prefix Match (LPM) algorithm to uniquely
+ * associate data to lookup keys.
+ *
+ * Use-case: IP routing table. Routes that are added to the table associate a
+ * next hop to an IP prefix. The IP prefix is specified as IP address and depth
+ * and cover for a multitude of lookup keys (i.e. destination IP addresses)
+ * that all share the same data (i.e. next hop). The next hop information
+ * typically contains the output interface ID, the IP address of the next hop
+ * station (which is part of the same IP network the output interface is
+ * connected to) and other flags and counters.
+ *
+ * The LPM primitive only allows associating an 8-bit number (next hop ID) to
+ * an IP prefix, while a routing table can potentially contain thousands of
+ * routes or even more. This means that the same next hop ID (and next hop
+ * information) has to be shared by multiple routes, which makes sense, as
+ * multiple remote networks could be reached through the same next hop.
+ * Therefore, when a route is added or updated, the LPM table has to check
+ * whether the same next hop is already in use before using a new next hop ID
+ * for this route.
+ *
+ * The comparison between different next hops is done for the first
+ * “entry_unique_size” bytes of the next hop information (configurable
+ * parameter), which have to uniquely identify the next hop, therefore the user
+ * has to carefully manage the format of the LPM table entry (i.e. the next
+ * hop information) so that any next hop data that changes value during
+ * run-time (e.g. counters) is placed outside of this area.
+ *
+ ***/
+
+#include <stdint.h>
+
+#include "rte_table.h"
+
+/** LPM table parameters */
+struct rte_table_lpm_params {
+ /** Maximum number of LPM rules (i.e. IP routes) */
+ uint32_t n_rules;
+
+ /** Number of bytes at the start of the table entry that uniquely
+ identify the entry. Cannot be bigger than table entry size. */
+ uint32_t entry_unique_size;
+
+ /** Byte offset within input packet meta-data where lookup key (i.e.
+ the destination IP address) is located. */
+ uint32_t offset;
+};
+
+/** LPM table rule (i.e. route), specified as IP prefix. While the key used by
+the lookup operation is the destination IP address (read from the input packet
+meta-data), the entry add and entry delete operations work with LPM rules, with
+each rule covering for a multitude of lookup keys (destination IP addresses)
+that share the same data (next hop). */
+struct rte_table_lpm_key {
+ /** IP address */
+ uint32_t ip;
+
+ /** IP address depth. The most significant "depth" bits of the IP
+ address specify the network part of the IP address, while the rest of
+ the bits specify the host part of the address and are ignored for the
+ purpose of route specification. */
+ uint8_t depth;
+};
+
+/** LPM table operations */
+extern struct rte_table_ops rte_table_lpm_ops;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_table/rte_table_lpm_ipv6.c b/src/dpdk_lib18/librte_table/rte_table_lpm_ipv6.c
new file mode 100755
index 00000000..ce4ddc0b
--- /dev/null
+++ b/src/dpdk_lib18/librte_table/rte_table_lpm_ipv6.c
@@ -0,0 +1,362 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <stdio.h>
+
+#include <rte_common.h>
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_lpm6.h>
+
+#include "rte_table_lpm_ipv6.h"
+
+#define RTE_TABLE_LPM_MAX_NEXT_HOPS 256
+
+struct rte_table_lpm_ipv6 {
+ /* Input parameters */
+ uint32_t entry_size;
+ uint32_t entry_unique_size;
+ uint32_t n_rules;
+ uint32_t offset;
+
+ /* Handle to low-level LPM table */
+ struct rte_lpm6 *lpm;
+
+ /* Next Hop Table (NHT) */
+ uint32_t nht_users[RTE_TABLE_LPM_MAX_NEXT_HOPS];
+ uint8_t nht[0] __rte_cache_aligned;
+};
+
+static void *
+rte_table_lpm_ipv6_create(void *params, int socket_id, uint32_t entry_size)
+{
+ struct rte_table_lpm_ipv6_params *p =
+ (struct rte_table_lpm_ipv6_params *) params;
+ struct rte_table_lpm_ipv6 *lpm;
+ struct rte_lpm6_config lpm6_config;
+ uint32_t total_size, nht_size;
+
+ /* Check input parameters */
+ if (p == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: NULL input parameters\n", __func__);
+ return NULL;
+ }
+ if (p->n_rules == 0) {
+ RTE_LOG(ERR, TABLE, "%s: Invalid n_rules\n", __func__);
+ return NULL;
+ }
+ if (p->number_tbl8s == 0) {
+ RTE_LOG(ERR, TABLE, "%s: Invalid n_rules\n", __func__);
+ return NULL;
+ }
+ if (p->entry_unique_size == 0) {
+ RTE_LOG(ERR, TABLE, "%s: Invalid entry_unique_size\n",
+ __func__);
+ return NULL;
+ }
+ if (p->entry_unique_size > entry_size) {
+ RTE_LOG(ERR, TABLE, "%s: Invalid entry_unique_size\n",
+ __func__);
+ return NULL;
+ }
+ if ((p->offset & 0x3) != 0) {
+ RTE_LOG(ERR, TABLE, "%s: Invalid offset\n", __func__);
+ return NULL;
+ }
+
+ entry_size = RTE_ALIGN(entry_size, sizeof(uint64_t));
+
+ /* Memory allocation */
+ nht_size = RTE_TABLE_LPM_MAX_NEXT_HOPS * entry_size;
+ total_size = sizeof(struct rte_table_lpm_ipv6) + nht_size;
+ lpm = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (lpm == NULL) {
+ RTE_LOG(ERR, TABLE,
+ "%s: Cannot allocate %u bytes for LPM IPv6 table\n",
+ __func__, total_size);
+ return NULL;
+ }
+
+ /* LPM low-level table creation */
+ lpm6_config.max_rules = p->n_rules;
+ lpm6_config.number_tbl8s = p->number_tbl8s;
+ lpm6_config.flags = 0;
+ lpm->lpm = rte_lpm6_create("LPM IPv6", socket_id, &lpm6_config);
+ if (lpm->lpm == NULL) {
+ rte_free(lpm);
+ RTE_LOG(ERR, TABLE,
+ "Unable to create low-level LPM IPv6 table\n");
+ return NULL;
+ }
+
+ /* Memory initialization */
+ lpm->entry_size = entry_size;
+ lpm->entry_unique_size = p->entry_unique_size;
+ lpm->n_rules = p->n_rules;
+ lpm->offset = p->offset;
+
+ return lpm;
+}
+
+static int
+rte_table_lpm_ipv6_free(void *table)
+{
+ struct rte_table_lpm_ipv6 *lpm = (struct rte_table_lpm_ipv6 *) table;
+
+ /* Check input parameters */
+ if (lpm == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Free previously allocated resources */
+ rte_lpm6_free(lpm->lpm);
+ rte_free(lpm);
+
+ return 0;
+}
+
+static int
+nht_find_free(struct rte_table_lpm_ipv6 *lpm, uint32_t *pos)
+{
+ uint32_t i;
+
+ for (i = 0; i < RTE_TABLE_LPM_MAX_NEXT_HOPS; i++) {
+ if (lpm->nht_users[i] == 0) {
+ *pos = i;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+nht_find_existing(struct rte_table_lpm_ipv6 *lpm, void *entry, uint32_t *pos)
+{
+ uint32_t i;
+
+ for (i = 0; i < RTE_TABLE_LPM_MAX_NEXT_HOPS; i++) {
+ uint8_t *nht_entry = &lpm->nht[i * lpm->entry_size];
+
+ if ((lpm->nht_users[i] > 0) && (memcmp(nht_entry, entry,
+ lpm->entry_unique_size) == 0)) {
+ *pos = i;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+rte_table_lpm_ipv6_entry_add(
+ void *table,
+ void *key,
+ void *entry,
+ int *key_found,
+ void **entry_ptr)
+{
+ struct rte_table_lpm_ipv6 *lpm = (struct rte_table_lpm_ipv6 *) table;
+ struct rte_table_lpm_ipv6_key *ip_prefix =
+ (struct rte_table_lpm_ipv6_key *) key;
+ uint32_t nht_pos, nht_pos0_valid;
+ int status;
+ uint8_t nht_pos0;
+
+ /* Check input parameters */
+ if (lpm == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+ if (ip_prefix == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: ip_prefix parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (entry == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: entry parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if ((ip_prefix->depth == 0) || (ip_prefix->depth > 128)) {
+ RTE_LOG(ERR, TABLE, "%s: invalid depth (%d)\n", __func__,
+ ip_prefix->depth);
+ return -EINVAL;
+ }
+
+ /* Check if rule is already present in the table */
+ status = rte_lpm6_is_rule_present(lpm->lpm, ip_prefix->ip,
+ ip_prefix->depth, &nht_pos0);
+ nht_pos0_valid = status > 0;
+
+ /* Find existing or free NHT entry */
+ if (nht_find_existing(lpm, entry, &nht_pos) == 0) {
+ uint8_t *nht_entry;
+
+ if (nht_find_free(lpm, &nht_pos) == 0) {
+ RTE_LOG(ERR, TABLE, "%s: NHT full\n", __func__);
+ return -1;
+ }
+
+ nht_entry = &lpm->nht[nht_pos * lpm->entry_size];
+ memcpy(nht_entry, entry, lpm->entry_size);
+ }
+
+ /* Add rule to low level LPM table */
+ if (rte_lpm6_add(lpm->lpm, ip_prefix->ip, ip_prefix->depth,
+ (uint8_t) nht_pos) < 0) {
+ RTE_LOG(ERR, TABLE, "%s: LPM IPv6 rule add failed\n", __func__);
+ return -1;
+ }
+
+ /* Commit NHT changes */
+ lpm->nht_users[nht_pos]++;
+ lpm->nht_users[nht_pos0] -= nht_pos0_valid;
+
+ *key_found = nht_pos0_valid;
+ *entry_ptr = (void *) &lpm->nht[nht_pos * lpm->entry_size];
+ return 0;
+}
+
+static int
+rte_table_lpm_ipv6_entry_delete(
+ void *table,
+ void *key,
+ int *key_found,
+ void *entry)
+{
+ struct rte_table_lpm_ipv6 *lpm = (struct rte_table_lpm_ipv6 *) table;
+ struct rte_table_lpm_ipv6_key *ip_prefix =
+ (struct rte_table_lpm_ipv6_key *) key;
+ uint8_t nht_pos;
+ int status;
+
+ /* Check input parameters */
+ if (lpm == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+ if (ip_prefix == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: ip_prefix parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+ if ((ip_prefix->depth == 0) || (ip_prefix->depth > 128)) {
+ RTE_LOG(ERR, TABLE, "%s: invalid depth (%d)\n", __func__,
+ ip_prefix->depth);
+ return -EINVAL;
+ }
+
+ /* Return if rule is not present in the table */
+ status = rte_lpm6_is_rule_present(lpm->lpm, ip_prefix->ip,
+ ip_prefix->depth, &nht_pos);
+ if (status < 0) {
+ RTE_LOG(ERR, TABLE, "%s: LPM IPv6 algorithmic error\n",
+ __func__);
+ return -1;
+ }
+ if (status == 0) {
+ *key_found = 0;
+ return 0;
+ }
+
+ /* Delete rule from the low-level LPM table */
+ status = rte_lpm6_delete(lpm->lpm, ip_prefix->ip, ip_prefix->depth);
+ if (status) {
+ RTE_LOG(ERR, TABLE, "%s: LPM IPv6 rule delete failed\n",
+ __func__);
+ return -1;
+ }
+
+ /* Commit NHT changes */
+ lpm->nht_users[nht_pos]--;
+
+ *key_found = 1;
+ if (entry)
+ memcpy(entry, &lpm->nht[nht_pos * lpm->entry_size],
+ lpm->entry_size);
+
+ return 0;
+}
+
+static int
+rte_table_lpm_ipv6_lookup(
+ void *table,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ uint64_t *lookup_hit_mask,
+ void **entries)
+{
+ struct rte_table_lpm_ipv6 *lpm = (struct rte_table_lpm_ipv6 *) table;
+ uint64_t pkts_out_mask = 0;
+ uint32_t i;
+
+ pkts_out_mask = 0;
+ for (i = 0; i < (uint32_t)(RTE_PORT_IN_BURST_SIZE_MAX -
+ __builtin_clzll(pkts_mask)); i++) {
+ uint64_t pkt_mask = 1LLU << i;
+
+ if (pkt_mask & pkts_mask) {
+ struct rte_mbuf *pkt = pkts[i];
+ uint8_t *ip = RTE_MBUF_METADATA_UINT8_PTR(pkt,
+ lpm->offset);
+ int status;
+ uint8_t nht_pos;
+
+ status = rte_lpm6_lookup(lpm->lpm, ip, &nht_pos);
+ if (status == 0) {
+ pkts_out_mask |= pkt_mask;
+ entries[i] = (void *) &lpm->nht[nht_pos *
+ lpm->entry_size];
+ }
+ }
+ }
+
+ *lookup_hit_mask = pkts_out_mask;
+
+ return 0;
+}
+
+struct rte_table_ops rte_table_lpm_ipv6_ops = {
+ .f_create = rte_table_lpm_ipv6_create,
+ .f_free = rte_table_lpm_ipv6_free,
+ .f_add = rte_table_lpm_ipv6_entry_add,
+ .f_delete = rte_table_lpm_ipv6_entry_delete,
+ .f_lookup = rte_table_lpm_ipv6_lookup,
+};
diff --git a/src/dpdk_lib18/librte_table/rte_table_lpm_ipv6.h b/src/dpdk_lib18/librte_table/rte_table_lpm_ipv6.h
new file mode 100755
index 00000000..91fb0d8e
--- /dev/null
+++ b/src/dpdk_lib18/librte_table/rte_table_lpm_ipv6.h
@@ -0,0 +1,119 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_TABLE_LPM_IPV6_H__
+#define __INCLUDE_RTE_TABLE_LPM_IPV6_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file
+ * RTE Table LPM for IPv6
+ *
+ * This table uses the Longest Prefix Match (LPM) algorithm to uniquely
+ * associate data to lookup keys.
+ *
+ * Use-case: IP routing table. Routes that are added to the table associate a
+ * next hop to an IP prefix. The IP prefix is specified as IP address and depth
+ * and cover for a multitude of lookup keys (i.e. destination IP addresses)
+ * that all share the same data (i.e. next hop). The next hop information
+ * typically contains the output interface ID, the IP address of the next hop
+ * station (which is part of the same IP network the output interface is
+ * connected to) and other flags and counters.
+ *
+ * The LPM primitive only allows associating an 8-bit number (next hop ID) to
+ * an IP prefix, while a routing table can potentially contain thousands of
+ * routes or even more. This means that the same next hop ID (and next hop
+ * information) has to be shared by multiple routes, which makes sense, as
+ * multiple remote networks could be reached through the same next hop.
+ * Therefore, when a route is added or updated, the LPM table has to check
+ * whether the same next hop is already in use before using a new next hop ID
+ * for this route.
+ *
+ * The comparison between different next hops is done for the first
+ * “entry_unique_size” bytes of the next hop information (configurable
+ * parameter), which have to uniquely identify the next hop, therefore the user
+ * has to carefully manage the format of the LPM table entry (i.e. the next
+ * hop information) so that any next hop data that changes value during
+ * run-time (e.g. counters) is placed outside of this area.
+ *
+ ***/
+
+#include <stdint.h>
+
+#include "rte_table.h"
+
+#define RTE_LPM_IPV6_ADDR_SIZE 16
+
+/** LPM table parameters */
+struct rte_table_lpm_ipv6_params {
+ /** Maximum number of LPM rules (i.e. IP routes) */
+ uint32_t n_rules;
+
+ uint32_t number_tbl8s;
+
+ /** Number of bytes at the start of the table entry that uniquely
+ identify the entry. Cannot be bigger than table entry size. */
+ uint32_t entry_unique_size;
+
+ /** Byte offset within input packet meta-data where lookup key (i.e.
+ the destination IP address) is located. */
+ uint32_t offset;
+};
+
+/** LPM table rule (i.e. route), specified as IP prefix. While the key used by
+the lookup operation is the destination IP address (read from the input packet
+meta-data), the entry add and entry delete operations work with LPM rules, with
+each rule covering for a multitude of lookup keys (destination IP addresses)
+that share the same data (next hop). */
+struct rte_table_lpm_ipv6_key {
+ /** IP address */
+ uint8_t ip[RTE_LPM_IPV6_ADDR_SIZE];
+
+ /** IP address depth. The most significant "depth" bits of the IP
+ address specify the network part of the IP address, while the rest of
+ the bits specify the host part of the address and are ignored for the
+ purpose of route specification. */
+ uint8_t depth;
+};
+
+/** LPM table operations */
+extern struct rte_table_ops rte_table_lpm_ipv6_ops;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_table/rte_table_stub.c b/src/dpdk_lib18/librte_table/rte_table_stub.c
new file mode 100755
index 00000000..876b7e49
--- /dev/null
+++ b/src/dpdk_lib18/librte_table/rte_table_stub.c
@@ -0,0 +1,65 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_mbuf.h>
+
+#include "rte_table_stub.h"
+
+static void *
+rte_table_stub_create(__rte_unused void *params,
+ __rte_unused int socket_id,
+ __rte_unused uint32_t entry_size)
+{
+ return (void *) 1;
+}
+
+static int
+rte_table_stub_lookup(
+ __rte_unused void *table,
+ __rte_unused struct rte_mbuf **pkts,
+ __rte_unused uint64_t pkts_mask,
+ uint64_t *lookup_hit_mask,
+ __rte_unused void **entries)
+{
+ *lookup_hit_mask = 0;
+
+ return 0;
+}
+
+struct rte_table_ops rte_table_stub_ops = {
+ .f_create = rte_table_stub_create,
+ .f_free = NULL,
+ .f_add = NULL,
+ .f_delete = NULL,
+ .f_lookup = rte_table_stub_lookup,
+};
diff --git a/src/dpdk_lib18/librte_table/rte_table_stub.h b/src/dpdk_lib18/librte_table/rte_table_stub.h
new file mode 100755
index 00000000..e75340b0
--- /dev/null
+++ b/src/dpdk_lib18/librte_table/rte_table_stub.h
@@ -0,0 +1,62 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_TABLE_STUB_H__
+#define __INCLUDE_RTE_TABLE_STUB_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file
+ * RTE Table Stub
+ *
+ * The stub table lookup operation produces lookup miss for all input packets.
+ *
+ ***/
+
+#include <stdint.h>
+
+#include "rte_table.h"
+
+/** Stub table parameters: NONE */
+
+/** Stub table operations */
+extern struct rte_table_ops rte_table_stub_ops;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/dpdk_lib18/librte_timer/Makefile b/src/dpdk_lib18/librte_timer/Makefile
new file mode 100755
index 00000000..07eb0c63
--- /dev/null
+++ b/src/dpdk_lib18/librte_timer/Makefile
@@ -0,0 +1,48 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_timer.a
+
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_TIMER) := rte_timer.c
+
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_TIMER)-include := rte_timer.h
+
+# this lib needs eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_TIMER) += lib/librte_eal
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_timer/rte_timer.c b/src/dpdk_lib18/librte_timer/rte_timer.c
new file mode 100755
index 00000000..269a992b
--- /dev/null
+++ b/src/dpdk_lib18/librte_timer/rte_timer.c
@@ -0,0 +1,610 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <rte_atomic.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_per_lcore.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_spinlock.h>
+#include <rte_random.h>
+
+#include "rte_timer.h"
+
+LIST_HEAD(rte_timer_list, rte_timer);
+
+struct priv_timer {
+ struct rte_timer pending_head; /**< dummy timer instance to head up list */
+ rte_spinlock_t list_lock; /**< lock to protect list access */
+
+ /** per-core variable that true if a timer was updated on this
+ * core since last reset of the variable */
+ int updated;
+
+ /** track the current depth of the skiplist */
+ unsigned curr_skiplist_depth;
+
+ unsigned prev_lcore; /**< used for lcore round robin */
+
+#ifdef RTE_LIBRTE_TIMER_DEBUG
+ /** per-lcore statistics */
+ struct rte_timer_debug_stats stats;
+#endif
+} __rte_cache_aligned;
+
+/** per-lcore private info for timers */
+static struct priv_timer priv_timer[RTE_MAX_LCORE];
+
+/* when debug is enabled, store some statistics */
+#ifdef RTE_LIBRTE_TIMER_DEBUG
+#define __TIMER_STAT_ADD(name, n) do { \
+ unsigned __lcore_id = rte_lcore_id(); \
+ priv_timer[__lcore_id].stats.name += (n); \
+ } while(0)
+#else
+#define __TIMER_STAT_ADD(name, n) do {} while(0)
+#endif
+
+/* Init the timer library. */
+void
+rte_timer_subsystem_init(void)
+{
+ unsigned lcore_id;
+
+ /* since priv_timer is static, it's zeroed by default, so only init some
+ * fields.
+ */
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++) {
+ rte_spinlock_init(&priv_timer[lcore_id].list_lock);
+ priv_timer[lcore_id].prev_lcore = lcore_id;
+ }
+}
+
+/* Initialize the timer handle tim for use */
+void
+rte_timer_init(struct rte_timer *tim)
+{
+ union rte_timer_status status;
+
+ status.state = RTE_TIMER_STOP;
+ status.owner = RTE_TIMER_NO_OWNER;
+ tim->status.u32 = status.u32;
+}
+
+/*
+ * if timer is pending or stopped (or running on the same core than
+ * us), mark timer as configuring, and on success return the previous
+ * status of the timer
+ */
+static int
+timer_set_config_state(struct rte_timer *tim,
+ union rte_timer_status *ret_prev_status)
+{
+ union rte_timer_status prev_status, status;
+ int success = 0;
+ unsigned lcore_id;
+
+ lcore_id = rte_lcore_id();
+
+ /* wait that the timer is in correct status before update,
+ * and mark it as being configured */
+ while (success == 0) {
+ prev_status.u32 = tim->status.u32;
+
+ /* timer is running on another core, exit */
+ if (prev_status.state == RTE_TIMER_RUNNING &&
+ (unsigned)prev_status.owner != lcore_id)
+ return -1;
+
+ /* timer is being configured on another core */
+ if (prev_status.state == RTE_TIMER_CONFIG)
+ return -1;
+
+ /* here, we know that timer is stopped or pending,
+ * mark it atomically as being configured */
+ status.state = RTE_TIMER_CONFIG;
+ status.owner = (int16_t)lcore_id;
+ success = rte_atomic32_cmpset(&tim->status.u32,
+ prev_status.u32,
+ status.u32);
+ }
+
+ ret_prev_status->u32 = prev_status.u32;
+ return 0;
+}
+
+/*
+ * if timer is pending, mark timer as running
+ */
+static int
+timer_set_running_state(struct rte_timer *tim)
+{
+ union rte_timer_status prev_status, status;
+ unsigned lcore_id = rte_lcore_id();
+ int success = 0;
+
+ /* wait that the timer is in correct status before update,
+ * and mark it as running */
+ while (success == 0) {
+ prev_status.u32 = tim->status.u32;
+
+ /* timer is not pending anymore */
+ if (prev_status.state != RTE_TIMER_PENDING)
+ return -1;
+
+ /* here, we know that timer is stopped or pending,
+ * mark it atomically as beeing configured */
+ status.state = RTE_TIMER_RUNNING;
+ status.owner = (int16_t)lcore_id;
+ success = rte_atomic32_cmpset(&tim->status.u32,
+ prev_status.u32,
+ status.u32);
+ }
+
+ return 0;
+}
+
+/*
+ * Return a skiplist level for a new entry.
+ * This probabalistically gives a level with p=1/4 that an entry at level n
+ * will also appear at level n+1.
+ */
+static uint32_t
+timer_get_skiplist_level(unsigned curr_depth)
+{
+#ifdef RTE_LIBRTE_TIMER_DEBUG
+ static uint32_t i, count = 0;
+ static uint32_t levels[MAX_SKIPLIST_DEPTH] = {0};
+#endif
+
+ /* probability value is 1/4, i.e. all at level 0, 1 in 4 is at level 1,
+ * 1 in 16 at level 2, 1 in 64 at level 3, etc. Calculated using lowest
+ * bit position of a (pseudo)random number.
+ */
+ uint32_t rand = rte_rand() & (UINT32_MAX - 1);
+ uint32_t level = rand == 0 ? MAX_SKIPLIST_DEPTH : (rte_bsf32(rand)-1) / 2;
+
+ /* limit the levels used to one above our current level, so we don't,
+ * for instance, have a level 0 and a level 7 without anything between
+ */
+ if (level > curr_depth)
+ level = curr_depth;
+ if (level >= MAX_SKIPLIST_DEPTH)
+ level = MAX_SKIPLIST_DEPTH-1;
+#ifdef RTE_LIBRTE_TIMER_DEBUG
+ count ++;
+ levels[level]++;
+ if (count % 10000 == 0)
+ for (i = 0; i < MAX_SKIPLIST_DEPTH; i++)
+ printf("Level %u: %u\n", (unsigned)i, (unsigned)levels[i]);
+#endif
+ return level;
+}
+
+/*
+ * For a given time value, get the entries at each level which
+ * are <= that time value.
+ */
+static void
+timer_get_prev_entries(uint64_t time_val, unsigned tim_lcore,
+ struct rte_timer **prev)
+{
+ unsigned lvl = priv_timer[tim_lcore].curr_skiplist_depth;
+ prev[lvl] = &priv_timer[tim_lcore].pending_head;
+ while(lvl != 0) {
+ lvl--;
+ prev[lvl] = prev[lvl+1];
+ while (prev[lvl]->sl_next[lvl] &&
+ prev[lvl]->sl_next[lvl]->expire <= time_val)
+ prev[lvl] = prev[lvl]->sl_next[lvl];
+ }
+}
+
+/*
+ * Given a timer node in the skiplist, find the previous entries for it at
+ * all skiplist levels.
+ */
+static void
+timer_get_prev_entries_for_node(struct rte_timer *tim, unsigned tim_lcore,
+ struct rte_timer **prev)
+{
+ int i;
+ /* to get a specific entry in the list, look for just lower than the time
+ * values, and then increment on each level individually if necessary
+ */
+ timer_get_prev_entries(tim->expire - 1, tim_lcore, prev);
+ for (i = priv_timer[tim_lcore].curr_skiplist_depth - 1; i >= 0; i--) {
+ while (prev[i]->sl_next[i] != NULL &&
+ prev[i]->sl_next[i] != tim &&
+ prev[i]->sl_next[i]->expire <= tim->expire)
+ prev[i] = prev[i]->sl_next[i];
+ }
+}
+
+/*
+ * add in list, lock if needed
+ * timer must be in config state
+ * timer must not be in a list
+ */
+static void
+timer_add(struct rte_timer *tim, unsigned tim_lcore, int local_is_locked)
+{
+ unsigned lcore_id = rte_lcore_id();
+ unsigned lvl;
+ struct rte_timer *prev[MAX_SKIPLIST_DEPTH+1];
+
+ /* if timer needs to be scheduled on another core, we need to
+ * lock the list; if it is on local core, we need to lock if
+ * we are not called from rte_timer_manage() */
+ if (tim_lcore != lcore_id || !local_is_locked)
+ rte_spinlock_lock(&priv_timer[tim_lcore].list_lock);
+
+ /* find where exactly this element goes in the list of elements
+ * for each depth. */
+ timer_get_prev_entries(tim->expire, tim_lcore, prev);
+
+ /* now assign it a new level and add at that level */
+ const unsigned tim_level = timer_get_skiplist_level(
+ priv_timer[tim_lcore].curr_skiplist_depth);
+ if (tim_level == priv_timer[tim_lcore].curr_skiplist_depth)
+ priv_timer[tim_lcore].curr_skiplist_depth++;
+
+ lvl = tim_level;
+ while (lvl > 0) {
+ tim->sl_next[lvl] = prev[lvl]->sl_next[lvl];
+ prev[lvl]->sl_next[lvl] = tim;
+ lvl--;
+ }
+ tim->sl_next[0] = prev[0]->sl_next[0];
+ prev[0]->sl_next[0] = tim;
+
+ /* save the lowest list entry into the expire field of the dummy hdr
+ * NOTE: this is not atomic on 32-bit*/
+ priv_timer[tim_lcore].pending_head.expire = priv_timer[tim_lcore].\
+ pending_head.sl_next[0]->expire;
+
+ if (tim_lcore != lcore_id || !local_is_locked)
+ rte_spinlock_unlock(&priv_timer[tim_lcore].list_lock);
+}
+
+/*
+ * del from list, lock if needed
+ * timer must be in config state
+ * timer must be in a list
+ */
+static void
+timer_del(struct rte_timer *tim, union rte_timer_status prev_status,
+ int local_is_locked)
+{
+ unsigned lcore_id = rte_lcore_id();
+ unsigned prev_owner = prev_status.owner;
+ int i;
+ struct rte_timer *prev[MAX_SKIPLIST_DEPTH+1];
+
+ /* if timer needs is pending another core, we need to lock the
+ * list; if it is on local core, we need to lock if we are not
+ * called from rte_timer_manage() */
+ if (prev_owner != lcore_id || !local_is_locked)
+ rte_spinlock_lock(&priv_timer[prev_owner].list_lock);
+
+ /* save the lowest list entry into the expire field of the dummy hdr.
+ * NOTE: this is not atomic on 32-bit */
+ if (tim == priv_timer[prev_owner].pending_head.sl_next[0])
+ priv_timer[prev_owner].pending_head.expire =
+ ((tim->sl_next[0] == NULL) ? 0 : tim->sl_next[0]->expire);
+
+ /* adjust pointers from previous entries to point past this */
+ timer_get_prev_entries_for_node(tim, prev_owner, prev);
+ for (i = priv_timer[prev_owner].curr_skiplist_depth - 1; i >= 0; i--) {
+ if (prev[i]->sl_next[i] == tim)
+ prev[i]->sl_next[i] = tim->sl_next[i];
+ }
+
+ /* in case we deleted last entry at a level, adjust down max level */
+ for (i = priv_timer[prev_owner].curr_skiplist_depth - 1; i >= 0; i--)
+ if (priv_timer[prev_owner].pending_head.sl_next[i] == NULL)
+ priv_timer[prev_owner].curr_skiplist_depth --;
+ else
+ break;
+
+ if (prev_owner != lcore_id || !local_is_locked)
+ rte_spinlock_unlock(&priv_timer[prev_owner].list_lock);
+}
+
+/* Reset and start the timer associated with the timer handle (private func) */
+static int
+__rte_timer_reset(struct rte_timer *tim, uint64_t expire,
+ uint64_t period, unsigned tim_lcore,
+ rte_timer_cb_t fct, void *arg,
+ int local_is_locked)
+{
+ union rte_timer_status prev_status, status;
+ int ret;
+ unsigned lcore_id = rte_lcore_id();
+
+ /* round robin for tim_lcore */
+ if (tim_lcore == (unsigned)LCORE_ID_ANY) {
+ tim_lcore = rte_get_next_lcore(priv_timer[lcore_id].prev_lcore,
+ 0, 1);
+ priv_timer[lcore_id].prev_lcore = tim_lcore;
+ }
+
+ /* wait that the timer is in correct status before update,
+ * and mark it as being configured */
+ ret = timer_set_config_state(tim, &prev_status);
+ if (ret < 0)
+ return -1;
+
+ __TIMER_STAT_ADD(reset, 1);
+ if (prev_status.state == RTE_TIMER_RUNNING) {
+ priv_timer[lcore_id].updated = 1;
+ }
+
+ /* remove it from list */
+ if (prev_status.state == RTE_TIMER_PENDING) {
+ timer_del(tim, prev_status, local_is_locked);
+ __TIMER_STAT_ADD(pending, -1);
+ }
+
+ tim->period = period;
+ tim->expire = expire;
+ tim->f = fct;
+ tim->arg = arg;
+
+ __TIMER_STAT_ADD(pending, 1);
+ timer_add(tim, tim_lcore, local_is_locked);
+
+ /* update state: as we are in CONFIG state, only us can modify
+ * the state so we don't need to use cmpset() here */
+ rte_wmb();
+ status.state = RTE_TIMER_PENDING;
+ status.owner = (int16_t)tim_lcore;
+ tim->status.u32 = status.u32;
+
+ return 0;
+}
+
+/* Reset and start the timer associated with the timer handle tim */
+int
+rte_timer_reset(struct rte_timer *tim, uint64_t ticks,
+ enum rte_timer_type type, unsigned tim_lcore,
+ rte_timer_cb_t fct, void *arg)
+{
+ uint64_t cur_time = rte_get_timer_cycles();
+ uint64_t period;
+
+ if (unlikely((tim_lcore != (unsigned)LCORE_ID_ANY) &&
+ !rte_lcore_is_enabled(tim_lcore)))
+ return -1;
+
+ if (type == PERIODICAL)
+ period = ticks;
+ else
+ period = 0;
+
+ __rte_timer_reset(tim, cur_time + ticks, period, tim_lcore,
+ fct, arg, 0);
+
+ return 0;
+}
+
+/* loop until rte_timer_reset() succeed */
+void
+rte_timer_reset_sync(struct rte_timer *tim, uint64_t ticks,
+ enum rte_timer_type type, unsigned tim_lcore,
+ rte_timer_cb_t fct, void *arg)
+{
+ while (rte_timer_reset(tim, ticks, type, tim_lcore,
+ fct, arg) != 0);
+}
+
+/* Stop the timer associated with the timer handle tim */
+int
+rte_timer_stop(struct rte_timer *tim)
+{
+ union rte_timer_status prev_status, status;
+ unsigned lcore_id = rte_lcore_id();
+ int ret;
+
+ /* wait that the timer is in correct status before update,
+ * and mark it as being configured */
+ ret = timer_set_config_state(tim, &prev_status);
+ if (ret < 0)
+ return -1;
+
+ __TIMER_STAT_ADD(stop, 1);
+ if (prev_status.state == RTE_TIMER_RUNNING) {
+ priv_timer[lcore_id].updated = 1;
+ }
+
+ /* remove it from list */
+ if (prev_status.state == RTE_TIMER_PENDING) {
+ timer_del(tim, prev_status, 0);
+ __TIMER_STAT_ADD(pending, -1);
+ }
+
+ /* mark timer as stopped */
+ rte_wmb();
+ status.state = RTE_TIMER_STOP;
+ status.owner = RTE_TIMER_NO_OWNER;
+ tim->status.u32 = status.u32;
+
+ return 0;
+}
+
+/* loop until rte_timer_stop() succeed */
+void
+rte_timer_stop_sync(struct rte_timer *tim)
+{
+ while (rte_timer_stop(tim) != 0)
+ rte_pause();
+}
+
+/* Test the PENDING status of the timer handle tim */
+int
+rte_timer_pending(struct rte_timer *tim)
+{
+ return tim->status.state == RTE_TIMER_PENDING;
+}
+
+/* must be called periodically, run all timer that expired */
+void rte_timer_manage(void)
+{
+ union rte_timer_status status;
+ struct rte_timer *tim, *next_tim;
+ unsigned lcore_id = rte_lcore_id();
+ struct rte_timer *prev[MAX_SKIPLIST_DEPTH + 1];
+ uint64_t cur_time;
+ int i, ret;
+
+ __TIMER_STAT_ADD(manage, 1);
+ /* optimize for the case where per-cpu list is empty */
+ if (priv_timer[lcore_id].pending_head.sl_next[0] == NULL)
+ return;
+ cur_time = rte_get_timer_cycles();
+
+#ifdef RTE_ARCH_X86_64
+ /* on 64-bit the value cached in the pending_head.expired will be updated
+ * atomically, so we can consult that for a quick check here outside the
+ * lock */
+ if (likely(priv_timer[lcore_id].pending_head.expire > cur_time))
+ return;
+#endif
+
+ /* browse ordered list, add expired timers in 'expired' list */
+ rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
+
+ /* if nothing to do just unlock and return */
+ if (priv_timer[lcore_id].pending_head.sl_next[0] == NULL ||
+ priv_timer[lcore_id].pending_head.sl_next[0]->expire > cur_time)
+ goto done;
+
+ /* save start of list of expired timers */
+ tim = priv_timer[lcore_id].pending_head.sl_next[0];
+
+ /* break the existing list at current time point */
+ timer_get_prev_entries(cur_time, lcore_id, prev);
+ for (i = priv_timer[lcore_id].curr_skiplist_depth -1; i >= 0; i--) {
+ priv_timer[lcore_id].pending_head.sl_next[i] = prev[i]->sl_next[i];
+ if (prev[i]->sl_next[i] == NULL)
+ priv_timer[lcore_id].curr_skiplist_depth--;
+ prev[i] ->sl_next[i] = NULL;
+ }
+
+ /* now scan expired list and call callbacks */
+ for ( ; tim != NULL; tim = next_tim) {
+ next_tim = tim->sl_next[0];
+
+ ret = timer_set_running_state(tim);
+
+ /* this timer was not pending, continue */
+ if (ret < 0)
+ continue;
+
+ rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
+
+ priv_timer[lcore_id].updated = 0;
+
+ /* execute callback function with list unlocked */
+ tim->f(tim, tim->arg);
+
+ rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
+ __TIMER_STAT_ADD(pending, -1);
+ /* the timer was stopped or reloaded by the callback
+ * function, we have nothing to do here */
+ if (priv_timer[lcore_id].updated == 1)
+ continue;
+
+ if (tim->period == 0) {
+ /* remove from done list and mark timer as stopped */
+ status.state = RTE_TIMER_STOP;
+ status.owner = RTE_TIMER_NO_OWNER;
+ rte_wmb();
+ tim->status.u32 = status.u32;
+ }
+ else {
+ /* keep it in list and mark timer as pending */
+ status.state = RTE_TIMER_PENDING;
+ __TIMER_STAT_ADD(pending, 1);
+ status.owner = (int16_t)lcore_id;
+ rte_wmb();
+ tim->status.u32 = status.u32;
+ __rte_timer_reset(tim, cur_time + tim->period,
+ tim->period, lcore_id, tim->f, tim->arg, 1);
+ }
+ }
+
+ /* update the next to expire timer value */
+ priv_timer[lcore_id].pending_head.expire =
+ (priv_timer[lcore_id].pending_head.sl_next[0] == NULL) ? 0 :
+ priv_timer[lcore_id].pending_head.sl_next[0]->expire;
+done:
+ /* job finished, unlock the list lock */
+ rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
+}
+
+/* dump statistics about timers */
+void rte_timer_dump_stats(FILE *f)
+{
+#ifdef RTE_LIBRTE_TIMER_DEBUG
+ struct rte_timer_debug_stats sum;
+ unsigned lcore_id;
+
+ memset(&sum, 0, sizeof(sum));
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ sum.reset += priv_timer[lcore_id].stats.reset;
+ sum.stop += priv_timer[lcore_id].stats.stop;
+ sum.manage += priv_timer[lcore_id].stats.manage;
+ sum.pending += priv_timer[lcore_id].stats.pending;
+ }
+ fprintf(f, "Timer statistics:\n");
+ fprintf(f, " reset = %"PRIu64"\n", sum.reset);
+ fprintf(f, " stop = %"PRIu64"\n", sum.stop);
+ fprintf(f, " manage = %"PRIu64"\n", sum.manage);
+ fprintf(f, " pending = %"PRIu64"\n", sum.pending);
+#else
+ fprintf(f, "No timer statistics, RTE_LIBRTE_TIMER_DEBUG is disabled\n");
+#endif
+}
diff --git a/src/dpdk_lib18/librte_timer/rte_timer.h b/src/dpdk_lib18/librte_timer/rte_timer.h
new file mode 100755
index 00000000..4907cf5a
--- /dev/null
+++ b/src/dpdk_lib18/librte_timer/rte_timer.h
@@ -0,0 +1,335 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_TIMER_H_
+#define _RTE_TIMER_H_
+
+/**
+ * @file
+ RTE Timer
+ *
+ * This library provides a timer service to RTE Data Plane execution
+ * units that allows the execution of callback functions asynchronously.
+ *
+ * - Timers can be periodic or single (one-shot).
+ * - The timers can be loaded from one core and executed on another. This has
+ * to be specified in the call to rte_timer_reset().
+ * - High precision is possible. NOTE: this depends on the call frequency to
+ * rte_timer_manage() that check the timer expiration for the local core.
+ * - If not used in an application, for improved performance, it can be
+ * disabled at compilation time by not calling the rte_timer_manage()
+ * to improve performance.
+ *
+ * The timer library uses the rte_get_hpet_cycles() function that
+ * uses the HPET, when available, to provide a reliable time reference. [HPET
+ * routines are provided by EAL, which falls back to using the chip TSC (time-
+ * stamp counter) as fallback when HPET is not available]
+ *
+ * This library provides an interface to add, delete and restart a
+ * timer. The API is based on the BSD callout(9) API with a few
+ * differences.
+ *
+ * See the RTE architecture documentation for more information about the
+ * design of this library.
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stddef.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_TIMER_STOP 0 /**< State: timer is stopped. */
+#define RTE_TIMER_PENDING 1 /**< State: timer is scheduled. */
+#define RTE_TIMER_RUNNING 2 /**< State: timer function is running. */
+#define RTE_TIMER_CONFIG 3 /**< State: timer is being configured. */
+
+#define RTE_TIMER_NO_OWNER -1 /**< Timer has no owner. */
+
+/**
+ * Timer type: Periodic or single (one-shot).
+ */
+enum rte_timer_type {
+ SINGLE,
+ PERIODICAL
+};
+
+/**
+ * Timer status: A union of the state (stopped, pending, running,
+ * config) and an owner (the id of the lcore that owns the timer).
+ */
+union rte_timer_status {
+ struct {
+ uint16_t state; /**< Stop, pending, running, config. */
+ int16_t owner; /**< The lcore that owns the timer. */
+ };
+ uint32_t u32; /**< To atomic-set status + owner. */
+};
+
+#ifdef RTE_LIBRTE_TIMER_DEBUG
+/**
+ * A structure that stores the timer statistics (per-lcore).
+ */
+struct rte_timer_debug_stats {
+ uint64_t reset; /**< Number of success calls to rte_timer_reset(). */
+ uint64_t stop; /**< Number of success calls to rte_timer_stop(). */
+ uint64_t manage; /**< Number of calls to rte_timer_manage(). */
+ uint64_t pending; /**< Number of pending/running timers. */
+};
+#endif
+
+struct rte_timer;
+
+/**
+ * Callback function type for timer expiry.
+ */
+typedef void (rte_timer_cb_t)(struct rte_timer *, void *);
+
+#define MAX_SKIPLIST_DEPTH 10
+
+/**
+ * A structure describing a timer in RTE.
+ */
+struct rte_timer
+{
+ uint64_t expire; /**< Time when timer expire. */
+ struct rte_timer *sl_next[MAX_SKIPLIST_DEPTH];
+ volatile union rte_timer_status status; /**< Status of timer. */
+ uint64_t period; /**< Period of timer (0 if not periodic). */
+ rte_timer_cb_t *f; /**< Callback function. */
+ void *arg; /**< Argument to callback function. */
+};
+
+
+#ifdef __cplusplus
+/**
+ * A C++ static initializer for a timer structure.
+ */
+#define RTE_TIMER_INITIALIZER { \
+ 0, \
+ {NULL}, \
+ {{RTE_TIMER_STOP, RTE_TIMER_NO_OWNER}}, \
+ 0, \
+ NULL, \
+ NULL, \
+ }
+#else
+/**
+ * A static initializer for a timer structure.
+ */
+#define RTE_TIMER_INITIALIZER { \
+ .status = {{ \
+ .state = RTE_TIMER_STOP, \
+ .owner = RTE_TIMER_NO_OWNER, \
+ }}, \
+ }
+#endif
+
+/**
+ * Initialize the timer library.
+ *
+ * Initializes internal variables (list, locks and so on) for the RTE
+ * timer library.
+ */
+void rte_timer_subsystem_init(void);
+
+/**
+ * Initialize a timer handle.
+ *
+ * The rte_timer_init() function initializes the timer handle *tim*
+ * for use. No operations can be performed on a timer before it is
+ * initialized.
+ *
+ * @param tim
+ * The timer to initialize.
+ */
+void rte_timer_init(struct rte_timer *tim);
+
+/**
+ * Reset and start the timer associated with the timer handle.
+ *
+ * The rte_timer_reset() function resets and starts the timer
+ * associated with the timer handle *tim*. When the timer expires after
+ * *ticks* HPET cycles, the function specified by *fct* will be called
+ * with the argument *arg* on core *tim_lcore*.
+ *
+ * If the timer associated with the timer handle is already running
+ * (in the RUNNING state), the function will fail. The user has to check
+ * the return value of the function to see if there is a chance that the
+ * timer is in the RUNNING state.
+ *
+ * If the timer is being configured on another core (the CONFIG state),
+ * it will also fail.
+ *
+ * If the timer is pending or stopped, it will be rescheduled with the
+ * new parameters.
+ *
+ * @param tim
+ * The timer handle.
+ * @param ticks
+ * The number of cycles (see rte_get_hpet_hz()) before the callback
+ * function is called.
+ * @param type
+ * The type can be either:
+ * - PERIODICAL: The timer is automatically reloaded after execution
+ * (returns to the PENDING state)
+ * - SINGLE: The timer is one-shot, that is, the timer goes to a
+ * STOPPED state after execution.
+ * @param tim_lcore
+ * The ID of the lcore where the timer callback function has to be
+ * executed. If tim_lcore is LCORE_ID_ANY, the timer library will
+ * launch it on a different core for each call (round-robin).
+ * @param fct
+ * The callback function of the timer.
+ * @param arg
+ * The user argument of the callback function.
+ * @return
+ * - 0: Success; the timer is scheduled.
+ * - (-1): Timer is in the RUNNING or CONFIG state.
+ */
+int rte_timer_reset(struct rte_timer *tim, uint64_t ticks,
+ enum rte_timer_type type, unsigned tim_lcore,
+ rte_timer_cb_t fct, void *arg);
+
+
+/**
+ * Loop until rte_timer_reset() succeeds.
+ *
+ * Reset and start the timer associated with the timer handle. Always
+ * succeed. See rte_timer_reset() for details.
+ *
+ * @param tim
+ * The timer handle.
+ * @param ticks
+ * The number of cycles (see rte_get_hpet_hz()) before the callback
+ * function is called.
+ * @param type
+ * The type can be either:
+ * - PERIODICAL: The timer is automatically reloaded after execution
+ * (returns to the PENDING state)
+ * - SINGLE: The timer is one-shot, that is, the timer goes to a
+ * STOPPED state after execution.
+ * @param tim_lcore
+ * The ID of the lcore where the timer callback function has to be
+ * executed. If tim_lcore is LCORE_ID_ANY, the timer library will
+ * launch it on a different core for each call (round-robin).
+ * @param fct
+ * The callback function of the timer.
+ * @param arg
+ * The user argument of the callback function.
+ */
+void
+rte_timer_reset_sync(struct rte_timer *tim, uint64_t ticks,
+ enum rte_timer_type type, unsigned tim_lcore,
+ rte_timer_cb_t fct, void *arg);
+
+/**
+ * Stop a timer.
+ *
+ * The rte_timer_stop() function stops the timer associated with the
+ * timer handle *tim*. It may fail if the timer is currently running or
+ * being configured.
+ *
+ * If the timer is pending or stopped (for instance, already expired),
+ * the function will succeed. The timer handle tim must have been
+ * initialized using rte_timer_init(), otherwise, undefined behavior
+ * will occur.
+ *
+ * This function can be called safely from a timer callback. If it
+ * succeeds, the timer is not referenced anymore by the timer library
+ * and the timer structure can be freed (even in the callback
+ * function).
+ *
+ * @param tim
+ * The timer handle.
+ * @return
+ * - 0: Success; the timer is stopped.
+ * - (-1): The timer is in the RUNNING or CONFIG state.
+ */
+int rte_timer_stop(struct rte_timer *tim);
+
+
+/**
+ * Loop until rte_timer_stop() succeeds.
+ *
+ * After a call to this function, the timer identified by *tim* is
+ * stopped. See rte_timer_stop() for details.
+ *
+ * @param tim
+ * The timer handle.
+ */
+void rte_timer_stop_sync(struct rte_timer *tim);
+
+/**
+ * Test if a timer is pending.
+ *
+ * The rte_timer_pending() function tests the PENDING status
+ * of the timer handle *tim*. A PENDING timer is one that has been
+ * scheduled and whose function has not yet been called.
+ *
+ * @param tim
+ * The timer handle.
+ * @return
+ * - 0: The timer is not pending.
+ * - 1: The timer is pending.
+ */
+int rte_timer_pending(struct rte_timer *tim);
+
+/**
+ * Manage the timer list and execute callback functions.
+ *
+ * This function must be called periodically from all cores
+ * main_loop(). It browses the list of pending timers and runs all
+ * timers that are expired.
+ *
+ * The precision of the timer depends on the call frequency of this
+ * function. However, the more often the function is called, the more
+ * CPU resources it will use.
+ */
+void rte_timer_manage(void);
+
+/**
+ * Dump statistics about timers.
+ *
+ * @param f
+ * A pointer to a file for output
+ */
+void rte_timer_dump_stats(FILE *f);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_TIMER_H_ */
diff --git a/src/dpdk_lib18/librte_vhost/Makefile b/src/dpdk_lib18/librte_vhost/Makefile
new file mode 100755
index 00000000..c008d64a
--- /dev/null
+++ b/src/dpdk_lib18/librte_vhost/Makefile
@@ -0,0 +1,50 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_vhost.a
+
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3 -D_FILE_OFFSET_BITS=64 -lfuse
+LDFLAGS += -lfuse
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_VHOST) := vhost-net-cdev.c virtio-net.c vhost_rxtx.c
+
+# install includes
+SYMLINK-$(CONFIG_RTE_LIBRTE_VHOST)-include += rte_virtio_net.h
+
+# dependencies
+DEPDIRS-$(CONFIG_RTE_LIBRTE_VHOST) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_VHOST) += lib/librte_ether
+DEPDIRS-$(CONFIG_RTE_LIBRTE_VHOST) += lib/librte_mbuf
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/dpdk_lib18/librte_vhost/eventfd_link/Makefile b/src/dpdk_lib18/librte_vhost/eventfd_link/Makefile
new file mode 100755
index 00000000..fc3927bf
--- /dev/null
+++ b/src/dpdk_lib18/librte_vhost/eventfd_link/Makefile
@@ -0,0 +1,39 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+obj-m += eventfd_link.o
+
+
+all:
+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
+
+clean:
+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
diff --git a/src/dpdk_lib18/librte_vhost/eventfd_link/eventfd_link.c b/src/dpdk_lib18/librte_vhost/eventfd_link/eventfd_link.c
new file mode 100755
index 00000000..7755dd67
--- /dev/null
+++ b/src/dpdk_lib18/librte_vhost/eventfd_link/eventfd_link.c
@@ -0,0 +1,195 @@
+/*-
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation
+ */
+
+#include <linux/eventfd.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/rcupdate.h>
+#include <linux/file.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/mmu_context.h>
+#include <linux/sched.h>
+#include <asm/mmu_context.h>
+#include <linux/fdtable.h>
+
+#include "eventfd_link.h"
+
+
+/*
+ * get_files_struct is copied from fs/file.c
+ */
+struct files_struct *
+get_files_struct(struct task_struct *task)
+{
+ struct files_struct *files;
+
+ task_lock(task);
+ files = task->files;
+ if (files)
+ atomic_inc(&files->count);
+ task_unlock(task);
+
+ return files;
+}
+
+/*
+ * put_files_struct is extracted from fs/file.c
+ */
+void
+put_files_struct(struct files_struct *files)
+{
+ if (atomic_dec_and_test(&files->count))
+ BUG();
+}
+
+
+static long
+eventfd_link_ioctl(struct file *f, unsigned int ioctl, unsigned long arg)
+{
+ void __user *argp = (void __user *) arg;
+ struct task_struct *task_target = NULL;
+ struct file *file;
+ struct files_struct *files;
+ struct fdtable *fdt;
+ struct eventfd_copy eventfd_copy;
+
+ switch (ioctl) {
+ case EVENTFD_COPY:
+ if (copy_from_user(&eventfd_copy, argp,
+ sizeof(struct eventfd_copy)))
+ return -EFAULT;
+
+ /*
+ * Find the task struct for the target pid
+ */
+ task_target =
+ pid_task(find_vpid(eventfd_copy.target_pid), PIDTYPE_PID);
+ if (task_target == NULL) {
+ pr_debug("Failed to get mem ctx for target pid\n");
+ return -EFAULT;
+ }
+
+ files = get_files_struct(current);
+ if (files == NULL) {
+ pr_debug("Failed to get files struct\n");
+ return -EFAULT;
+ }
+
+ rcu_read_lock();
+ file = fcheck_files(files, eventfd_copy.source_fd);
+ if (file) {
+ if (file->f_mode & FMODE_PATH ||
+ !atomic_long_inc_not_zero(&file->f_count))
+ file = NULL;
+ }
+ rcu_read_unlock();
+ put_files_struct(files);
+
+ if (file == NULL) {
+ pr_debug("Failed to get file from source pid\n");
+ return 0;
+ }
+
+ /*
+ * Release the existing eventfd in the source process
+ */
+ spin_lock(&files->file_lock);
+ filp_close(file, files);
+ fdt = files_fdtable(files);
+ fdt->fd[eventfd_copy.source_fd] = NULL;
+ spin_unlock(&files->file_lock);
+
+ /*
+ * Find the file struct associated with the target fd.
+ */
+
+ files = get_files_struct(task_target);
+ if (files == NULL) {
+ pr_debug("Failed to get files struct\n");
+ return -EFAULT;
+ }
+
+ rcu_read_lock();
+ file = fcheck_files(files, eventfd_copy.target_fd);
+ if (file) {
+ if (file->f_mode & FMODE_PATH ||
+ !atomic_long_inc_not_zero(&file->f_count))
+ file = NULL;
+ }
+ rcu_read_unlock();
+ put_files_struct(files);
+
+ if (file == NULL) {
+ pr_debug("Failed to get file from target pid\n");
+ return 0;
+ }
+
+ /*
+ * Install the file struct from the target process into the
+ * file desciptor of the source process,
+ */
+
+ fd_install(eventfd_copy.source_fd, file);
+
+ return 0;
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+static const struct file_operations eventfd_link_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = eventfd_link_ioctl,
+};
+
+
+static struct miscdevice eventfd_link_misc = {
+ .name = "eventfd-link",
+ .fops = &eventfd_link_fops,
+};
+
+static int __init
+eventfd_link_init(void)
+{
+ return misc_register(&eventfd_link_misc);
+}
+
+module_init(eventfd_link_init);
+
+static void __exit
+eventfd_link_exit(void)
+{
+ misc_deregister(&eventfd_link_misc);
+}
+
+module_exit(eventfd_link_exit);
+
+MODULE_VERSION("0.0.1");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Anthony Fee");
+MODULE_DESCRIPTION("Link eventfd");
+MODULE_ALIAS("devname:eventfd-link");
diff --git a/src/dpdk_lib18/librte_vhost/eventfd_link/eventfd_link.h b/src/dpdk_lib18/librte_vhost/eventfd_link/eventfd_link.h
new file mode 100755
index 00000000..ea619ec0
--- /dev/null
+++ b/src/dpdk_lib18/librte_vhost/eventfd_link/eventfd_link.h
@@ -0,0 +1,76 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _EVENTFD_LINK_H_
+#define _EVENTFD_LINK_H_
+
+/*
+ * ioctl to copy an fd entry in calling process to an fd in a target process
+ */
+#define EVENTFD_COPY 1
+
+/*
+ * arguements for the EVENTFD_COPY ioctl
+ */
+struct eventfd_copy {
+ unsigned target_fd; /* fd in the target pid */
+ unsigned source_fd; /* fd in the calling pid */
+ pid_t target_pid; /* pid of the target pid */
+};
+#endif /* _EVENTFD_LINK_H_ */
diff --git a/src/dpdk_lib18/librte_vhost/libvirt/qemu-wrap.py b/src/dpdk_lib18/librte_vhost/libvirt/qemu-wrap.py
new file mode 100755
index 00000000..e2d68a0e
--- /dev/null
+++ b/src/dpdk_lib18/librte_vhost/libvirt/qemu-wrap.py
@@ -0,0 +1,367 @@
+#!/usr/bin/python
+#/*
+# * BSD LICENSE
+# *
+# * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# * All rights reserved.
+# *
+# * Redistribution and use in source and binary forms, with or without
+# * modification, are permitted provided that the following conditions
+# * are met:
+# *
+# * * Redistributions of source code must retain the above copyright
+# * notice, this list of conditions and the following disclaimer.
+# * * Redistributions in binary form must reproduce the above copyright
+# * notice, this list of conditions and the following disclaimer in
+# * the documentation and/or other materials provided with the
+# * distribution.
+# * * Neither the name of Intel Corporation nor the names of its
+# * contributors may be used to endorse or promote products derived
+# * from this software without specific prior written permission.
+# *
+# * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# */
+
+#####################################################################
+# This script is designed to modify the call to the QEMU emulator
+# to support userspace vhost when starting a guest machine through
+# libvirt with vhost enabled. The steps to enable this are as follows
+# and should be run as root:
+#
+# 1. Place this script in a libvirtd's binary search PATH ($PATH)
+# A good location would be in the same directory that the QEMU
+# binary is located
+#
+# 2. Ensure that the script has the same owner/group and file
+# permissions as the QEMU binary
+#
+# 3. Update the VM xml file using "virsh edit VM.xml"
+#
+# 3.a) Set the VM to use the launch script
+#
+# Set the emulator path contained in the
+# <emulator><emulator/> tags
+#
+# e.g replace <emulator>/usr/bin/qemu-kvm<emulator/>
+# with <emulator>/usr/bin/qemu-wrap.py<emulator/>
+#
+# 3.b) Set the VM's device's to use vhost-net offload
+#
+# <interface type="network">
+# <model type="virtio"/>
+# <driver name="vhost"/>
+# <interface/>
+#
+# 4. Enable libvirt to access our userpace device file by adding it to
+# controllers cgroup for libvirtd using the following steps
+#
+# 4.a) In /etc/libvirt/qemu.conf add/edit the following lines:
+# 1) cgroup_controllers = [ ... "devices", ... ]
+# 2) clear_emulator_capabilities = 0
+# 3) user = "root"
+# 4) group = "root"
+# 5) cgroup_device_acl = [
+# "/dev/null", "/dev/full", "/dev/zero",
+# "/dev/random", "/dev/urandom",
+# "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
+# "/dev/rtc", "/dev/hpet", "/dev/net/tun",
+# "/dev/<devbase-name>-<index>",
+# ]
+#
+# 4.b) Disable SELinux or set to permissive mode
+#
+# 4.c) Mount cgroup device controller
+# "mkdir /dev/cgroup"
+# "mount -t cgroup none /dev/cgroup -o devices"
+#
+# 4.d) Set hugetlbfs_mount variable - ( Optional )
+# VMs using userspace vhost must use hugepage backed
+# memory. This can be enabled in the libvirt XML
+# config by adding a memory backing section to the
+# XML config e.g.
+# <memoryBacking>
+# <hugepages/>
+# </memoryBacking>
+# This memory backing section should be added after the
+# <memory> and <currentMemory> sections. This will add
+# flags "-mem-prealloc -mem-path <path>" to the QEMU
+# command line. The hugetlbfs_mount variable can be used
+# to override the default <path> passed through by libvirt.
+#
+# if "-mem-prealloc" or "-mem-path <path>" are not passed
+# through and a vhost device is detected then these options will
+# be automatically added by this script. This script will detect
+# the system hugetlbfs mount point to be used for <path>. The
+# default <path> for this script can be overidden by the
+# hugetlbfs_dir variable in the configuration section of this script.
+#
+#
+# 4.e) Restart the libvirtd system process
+# e.g. on Fedora "systemctl restart libvirtd.service"
+#
+#
+# 4.f) Edit the Configuration Parameters section of this script
+# to point to the correct emulator location and set any
+# addition options
+#
+# The script modifies the libvirtd Qemu call by modifying/adding
+# options based on the configuration parameters below.
+# NOTE:
+# emul_path and us_vhost_path must be set
+# All other parameters are optional
+#####################################################################
+
+
+#############################################
+# Configuration Parameters
+#############################################
+#Path to QEMU binary
+emul_path = "/usr/local/bin/qemu-system-x86_64"
+
+#Path to userspace vhost device file
+# This filename should match the --dev-basename --dev-index parameters of
+# the command used to launch the userspace vhost sample application e.g.
+# if the sample app lauch command is:
+# ./build/vhost-switch ..... --dev-basename usvhost --dev-index 1
+# then this variable should be set to:
+# us_vhost_path = "/dev/usvhost-1"
+us_vhost_path = "/dev/usvhost-1"
+
+#List of additional user defined emulation options. These options will
+#be added to all Qemu calls
+emul_opts_user = []
+
+#List of additional user defined emulation options for vhost only.
+#These options will only be added to vhost enabled guests
+emul_opts_user_vhost = []
+
+#For all VHOST enabled VMs, the VM memory is preallocated from hugetlbfs
+# Set this variable to one to enable this option for all VMs
+use_huge_all = 0
+
+#Instead of autodetecting, override the hugetlbfs directory by setting
+#this variable
+hugetlbfs_dir = ""
+
+#############################################
+
+
+#############################################
+# ****** Do Not Modify Below this Line ******
+#############################################
+
+import sys, os, subprocess
+
+
+#List of open userspace vhost file descriptors
+fd_list = []
+
+#additional virtio device flags when using userspace vhost
+vhost_flags = [ "csum=off",
+ "gso=off",
+ "guest_tso4=off",
+ "guest_tso6=off",
+ "guest_ecn=off"
+ ]
+
+
+#############################################
+# Find the system hugefile mount point.
+# Note:
+# if multiple hugetlbfs mount points exist
+# then the first one found will be used
+#############################################
+def find_huge_mount():
+
+ if (len(hugetlbfs_dir)):
+ return hugetlbfs_dir
+
+ huge_mount = ""
+
+ if (os.access("/proc/mounts", os.F_OK)):
+ f = open("/proc/mounts", "r")
+ line = f.readline()
+ while line:
+ line_split = line.split(" ")
+ if line_split[2] == 'hugetlbfs':
+ huge_mount = line_split[1]
+ break
+ line = f.readline()
+ else:
+ print "/proc/mounts not found"
+ exit (1)
+
+ f.close
+ if len(huge_mount) == 0:
+ print "Failed to find hugetlbfs mount point"
+ exit (1)
+
+ return huge_mount
+
+
+#############################################
+# Get a userspace Vhost file descriptor
+#############################################
+def get_vhost_fd():
+
+ if (os.access(us_vhost_path, os.F_OK)):
+ fd = os.open( us_vhost_path, os.O_RDWR)
+ else:
+ print ("US-Vhost file %s not found" %us_vhost_path)
+ exit (1)
+
+ return fd
+
+
+#############################################
+# Check for vhostfd. if found then replace
+# with our own vhost fd and append any vhost
+# flags onto the end
+#############################################
+def modify_netdev_arg(arg):
+
+ global fd_list
+ vhost_in_use = 0
+ s = ''
+ new_opts = []
+ netdev_opts = arg.split(",")
+
+ for opt in netdev_opts:
+ #check if vhost is used
+ if "vhost" == opt[:5]:
+ vhost_in_use = 1
+ else:
+ new_opts.append(opt)
+
+ #if using vhost append vhost options
+ if vhost_in_use == 1:
+ #append vhost on option
+ new_opts.append('vhost=on')
+ #append vhostfd ption
+ new_fd = get_vhost_fd()
+ new_opts.append('vhostfd=' + str(new_fd))
+ fd_list.append(new_fd)
+
+ #concatenate all options
+ for opt in new_opts:
+ if len(s) > 0:
+ s+=','
+
+ s+=opt
+
+ return s
+
+
+#############################################
+# Main
+#############################################
+def main():
+
+ global fd_list
+ global vhost_in_use
+ new_args = []
+ num_cmd_args = len(sys.argv)
+ emul_call = ''
+ mem_prealloc_set = 0
+ mem_path_set = 0
+ num = 0;
+
+ #parse the parameters
+ while (num < num_cmd_args):
+ arg = sys.argv[num]
+
+ #Check netdev +1 parameter for vhostfd
+ if arg == '-netdev':
+ num_vhost_devs = len(fd_list)
+ new_args.append(arg)
+
+ num+=1
+ arg = sys.argv[num]
+ mod_arg = modify_netdev_arg(arg)
+ new_args.append(mod_arg)
+
+ #append vhost flags if this is a vhost device
+ # and -device is the next arg
+ # i.e -device -opt1,-opt2,...,-opt3,%vhost
+ if (num_vhost_devs < len(fd_list)):
+ num+=1
+ arg = sys.argv[num]
+ if arg == '-device':
+ new_args.append(arg)
+ num+=1
+ new_arg = sys.argv[num]
+ for flag in vhost_flags:
+ new_arg = ''.join([new_arg,',',flag])
+ new_args.append(new_arg)
+ else:
+ new_args.append(arg)
+ elif arg == '-mem-prealloc':
+ mem_prealloc_set = 1
+ new_args.append(arg)
+ elif arg == '-mem-path':
+ mem_path_set = 1
+ new_args.append(arg)
+
+ else:
+ new_args.append(arg)
+
+ num+=1
+
+ #Set Qemu binary location
+ emul_call+=emul_path
+ emul_call+=" "
+
+ #Add prealloc mem options if using vhost and not already added
+ if ((len(fd_list) > 0) and (mem_prealloc_set == 0)):
+ emul_call += "-mem-prealloc "
+
+ #Add mempath mem options if using vhost and not already added
+ if ((len(fd_list) > 0) and (mem_path_set == 0)):
+ #Detect and add hugetlbfs mount point
+ mp = find_huge_mount()
+ mp = "".join(["-mem-path ", mp])
+ emul_call += mp
+ emul_call += " "
+
+
+ #add user options
+ for opt in emul_opts_user:
+ emul_call += opt
+ emul_call += " "
+
+ #Add add user vhost only options
+ if len(fd_list) > 0:
+ for opt in emul_opts_user_vhost:
+ emul_call += opt
+ emul_call += " "
+
+ #Add updated libvirt options
+ iter_args = iter(new_args)
+ #skip 1st arg i.e. call to this script
+ next(iter_args)
+ for arg in iter_args:
+ emul_call+=str(arg)
+ emul_call+= " "
+
+ #Call QEMU
+ subprocess.call(emul_call, shell=True)
+
+
+ #Close usvhost files
+ for fd in fd_list:
+ os.close(fd)
+
+
+if __name__ == "__main__":
+ main()
+
diff --git a/src/dpdk_lib18/librte_vhost/rte_virtio_net.h b/src/dpdk_lib18/librte_vhost/rte_virtio_net.h
new file mode 100755
index 00000000..0bf07c72
--- /dev/null
+++ b/src/dpdk_lib18/librte_vhost/rte_virtio_net.h
@@ -0,0 +1,215 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _VIRTIO_NET_H_
+#define _VIRTIO_NET_H_
+
+/**
+ * @file
+ * Interface to vhost net
+ */
+
+#include <stdint.h>
+#include <linux/virtio_ring.h>
+#include <linux/virtio_net.h>
+#include <sys/eventfd.h>
+#include <sys/socket.h>
+#include <linux/if.h>
+
+#include <rte_memory.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+
+/* Used to indicate that the device is running on a data core */
+#define VIRTIO_DEV_RUNNING 1
+
+/* Backend value set by guest. */
+#define VIRTIO_DEV_STOPPED -1
+
+
+/* Enum for virtqueue management. */
+enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
+
+#define BUF_VECTOR_MAX 256
+
+/**
+ * Structure contains buffer address, length and descriptor index
+ * from vring to do scatter RX.
+ */
+struct buf_vector {
+ uint64_t buf_addr;
+ uint32_t buf_len;
+ uint32_t desc_idx;
+};
+
+/**
+ * Structure contains variables relevant to RX/TX virtqueues.
+ */
+struct vhost_virtqueue {
+ struct vring_desc *desc; /**< Virtqueue descriptor ring. */
+ struct vring_avail *avail; /**< Virtqueue available ring. */
+ struct vring_used *used; /**< Virtqueue used ring. */
+ uint32_t size; /**< Size of descriptor ring. */
+ uint32_t backend; /**< Backend value to determine if device should started/stopped. */
+ uint16_t vhost_hlen; /**< Vhost header length (varies depending on RX merge buffers. */
+ volatile uint16_t last_used_idx; /**< Last index used on the available ring */
+ volatile uint16_t last_used_idx_res; /**< Used for multiple devices reserving buffers. */
+ eventfd_t callfd; /**< Currently unused as polling mode is enabled. */
+ eventfd_t kickfd; /**< Used to notify the guest (trigger interrupt). */
+ struct buf_vector buf_vec[BUF_VECTOR_MAX]; /**< for scatter RX. */
+} __rte_cache_aligned;
+
+/**
+ * Device structure contains all configuration information relating to the device.
+ */
+struct virtio_net {
+ struct vhost_virtqueue *virtqueue[VIRTIO_QNUM]; /**< Contains all virtqueue information. */
+ struct virtio_memory *mem; /**< QEMU memory and memory region information. */
+ uint64_t features; /**< Negotiated feature set. */
+ uint64_t device_fh; /**< device identifier. */
+ uint32_t flags; /**< Device flags. Only used to check if device is running on data core. */
+ char ifname[IFNAMSIZ]; /**< Name of the tap device. */
+ void *priv; /**< private context */
+} __rte_cache_aligned;
+
+/**
+ * Information relating to memory regions including offsets to addresses in QEMUs memory file.
+ */
+struct virtio_memory_regions {
+ uint64_t guest_phys_address; /**< Base guest physical address of region. */
+ uint64_t guest_phys_address_end; /**< End guest physical address of region. */
+ uint64_t memory_size; /**< Size of region. */
+ uint64_t userspace_address; /**< Base userspace address of region. */
+ uint64_t address_offset; /**< Offset of region for address translation. */
+};
+
+
+/**
+ * Memory structure includes region and mapping information.
+ */
+struct virtio_memory {
+ uint64_t base_address; /**< Base QEMU userspace address of the memory file. */
+ uint64_t mapped_address; /**< Mapped address of memory file base in our applications memory space. */
+ uint64_t mapped_size; /**< Total size of memory file. */
+ uint32_t nregions; /**< Number of memory regions. */
+ struct virtio_memory_regions regions[0]; /**< Memory region information. */
+};
+
+/**
+ * Device operations to add/remove device.
+ */
+struct virtio_net_device_ops {
+ int (*new_device)(struct virtio_net *); /**< Add device. */
+ void (*destroy_device)(volatile struct virtio_net *); /**< Remove device. */
+};
+
+static inline uint16_t __attribute__((always_inline))
+rte_vring_available_entries(struct virtio_net *dev, uint16_t queue_id)
+{
+ struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
+ return *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx_res;
+}
+
+/**
+ * Function to convert guest physical addresses to vhost virtual addresses.
+ * This is used to convert guest virtio buffer addresses.
+ */
+static inline uint64_t __attribute__((always_inline))
+gpa_to_vva(struct virtio_net *dev, uint64_t guest_pa)
+{
+ struct virtio_memory_regions *region;
+ uint32_t regionidx;
+ uint64_t vhost_va = 0;
+
+ for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
+ region = &dev->mem->regions[regionidx];
+ if ((guest_pa >= region->guest_phys_address) &&
+ (guest_pa <= region->guest_phys_address_end)) {
+ vhost_va = region->address_offset + guest_pa;
+ break;
+ }
+ }
+ return vhost_va;
+}
+
+/**
+ * Disable features in feature_mask. Returns 0 on success.
+ */
+int rte_vhost_feature_disable(uint64_t feature_mask);
+
+/**
+ * Enable features in feature_mask. Returns 0 on success.
+ */
+int rte_vhost_feature_enable(uint64_t feature_mask);
+
+/* Returns currently supported vhost features */
+uint64_t rte_vhost_feature_get(void);
+
+int rte_vhost_enable_guest_notification(struct virtio_net *dev, uint16_t queue_id, int enable);
+
+/* Register vhost driver. dev_name could be different for multiple instance support. */
+int rte_vhost_driver_register(const char *dev_name);
+
+/* Register callbacks. */
+int rte_vhost_driver_callback_register(struct virtio_net_device_ops const * const);
+/* Start vhost driver session blocking loop. */
+int rte_vhost_driver_session_start(void);
+
+/**
+ * This function adds buffers to the virtio devices RX virtqueue. Buffers can
+ * be received from the physical port or from another virtual device. A packet
+ * count is returned to indicate the number of packets that were succesfully
+ * added to the RX queue.
+ * @param queue_id
+ * virtio queue index in mq case
+ * @return
+ * num of packets enqueued
+ */
+uint16_t rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count);
+
+/**
+ * This function gets guest buffers from the virtio device TX virtqueue,
+ * construct host mbufs, copies guest buffer content to host mbufs and
+ * store them in pkts to be processed.
+ * @param mbuf_pool
+ * mbuf_pool where host mbuf is allocated.
+ * @param queue_id
+ * virtio queue index in mq case.
+ * @return
+ * num of packets dequeued
+ */
+uint16_t rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count);
+
+#endif /* _VIRTIO_NET_H_ */
diff --git a/src/dpdk_lib18/librte_vhost/vhost-net-cdev.c b/src/dpdk_lib18/librte_vhost/vhost-net-cdev.c
new file mode 100755
index 00000000..57c76cb0
--- /dev/null
+++ b/src/dpdk_lib18/librte_vhost/vhost-net-cdev.c
@@ -0,0 +1,389 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <fuse/cuse_lowlevel.h>
+#include <linux/limits.h>
+#include <linux/vhost.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_log.h>
+#include <rte_string_fns.h>
+#include <rte_virtio_net.h>
+
+#include "vhost-net-cdev.h"
+
+#define FUSE_OPT_DUMMY "\0\0"
+#define FUSE_OPT_FORE "-f\0\0"
+#define FUSE_OPT_NOMULTI "-s\0\0"
+
+static const uint32_t default_major = 231;
+static const uint32_t default_minor = 1;
+static const char cuse_device_name[] = "/dev/cuse";
+static const char default_cdev[] = "vhost-net";
+
+static struct fuse_session *session;
+static struct vhost_net_device_ops const *ops;
+
+/*
+ * Returns vhost_device_ctx from given fuse_req_t. The index is populated later
+ * when the device is added to the device linked list.
+ */
+static struct vhost_device_ctx
+fuse_req_to_vhost_ctx(fuse_req_t req, struct fuse_file_info *fi)
+{
+ struct vhost_device_ctx ctx;
+ struct fuse_ctx const *const req_ctx = fuse_req_ctx(req);
+
+ ctx.pid = req_ctx->pid;
+ ctx.fh = fi->fh;
+
+ return ctx;
+}
+
+/*
+ * When the device is created in QEMU it gets initialised here and
+ * added to the device linked list.
+ */
+static void
+vhost_net_open(fuse_req_t req, struct fuse_file_info *fi)
+{
+ struct vhost_device_ctx ctx = fuse_req_to_vhost_ctx(req, fi);
+ int err = 0;
+
+ err = ops->new_device(ctx);
+ if (err == -1) {
+ fuse_reply_err(req, EPERM);
+ return;
+ }
+
+ fi->fh = err;
+
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "(%"PRIu64") Device configuration started\n", fi->fh);
+ fuse_reply_open(req, fi);
+}
+
+/*
+ * When QEMU is shutdown or killed the device gets released.
+ */
+static void
+vhost_net_release(fuse_req_t req, struct fuse_file_info *fi)
+{
+ int err = 0;
+ struct vhost_device_ctx ctx = fuse_req_to_vhost_ctx(req, fi);
+
+ ops->destroy_device(ctx);
+ RTE_LOG(INFO, VHOST_CONFIG, "(%"PRIu64") Device released\n", ctx.fh);
+ fuse_reply_err(req, err);
+}
+
+/*
+ * Boilerplate code for CUSE IOCTL
+ * Implicit arguments: ctx, req, result.
+ */
+#define VHOST_IOCTL(func) do { \
+ result = (func)(ctx); \
+ fuse_reply_ioctl(req, result, NULL, 0); \
+} while (0)
+
+/*
+ * Boilerplate IOCTL RETRY
+ * Implicit arguments: req.
+ */
+#define VHOST_IOCTL_RETRY(size_r, size_w) do { \
+ struct iovec iov_r = { arg, (size_r) }; \
+ struct iovec iov_w = { arg, (size_w) }; \
+ fuse_reply_ioctl_retry(req, &iov_r, \
+ (size_r) ? 1 : 0, &iov_w, (size_w) ? 1 : 0);\
+} while (0)
+
+/*
+ * Boilerplate code for CUSE Read IOCTL
+ * Implicit arguments: ctx, req, result, in_bufsz, in_buf.
+ */
+#define VHOST_IOCTL_R(type, var, func) do { \
+ if (!in_bufsz) { \
+ VHOST_IOCTL_RETRY(sizeof(type), 0);\
+ } else { \
+ (var) = *(const type*)in_buf; \
+ result = func(ctx, &(var)); \
+ fuse_reply_ioctl(req, result, NULL, 0);\
+ } \
+} while (0)
+
+/*
+ * Boilerplate code for CUSE Write IOCTL
+ * Implicit arguments: ctx, req, result, out_bufsz.
+ */
+#define VHOST_IOCTL_W(type, var, func) do { \
+ if (!out_bufsz) { \
+ VHOST_IOCTL_RETRY(0, sizeof(type));\
+ } else { \
+ result = (func)(ctx, &(var));\
+ fuse_reply_ioctl(req, result, &(var), sizeof(type));\
+ } \
+} while (0)
+
+/*
+ * Boilerplate code for CUSE Read/Write IOCTL
+ * Implicit arguments: ctx, req, result, in_bufsz, in_buf.
+ */
+#define VHOST_IOCTL_RW(type1, var1, type2, var2, func) do { \
+ if (!in_bufsz) { \
+ VHOST_IOCTL_RETRY(sizeof(type1), sizeof(type2));\
+ } else { \
+ (var1) = *(const type1*) (in_buf); \
+ result = (func)(ctx, (var1), &(var2)); \
+ fuse_reply_ioctl(req, result, &(var2), sizeof(type2));\
+ } \
+} while (0)
+
+/*
+ * The IOCTLs are handled using CUSE/FUSE in userspace. Depending on the type
+ * of IOCTL a buffer is requested to read or to write. This request is handled
+ * by FUSE and the buffer is then given to CUSE.
+ */
+static void
+vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
+ struct fuse_file_info *fi, __rte_unused unsigned flags,
+ const void *in_buf, size_t in_bufsz, size_t out_bufsz)
+{
+ struct vhost_device_ctx ctx = fuse_req_to_vhost_ctx(req, fi);
+ struct vhost_vring_file file;
+ struct vhost_vring_state state;
+ struct vhost_vring_addr addr;
+ uint64_t features;
+ uint32_t index;
+ int result = 0;
+
+ switch (cmd) {
+ case VHOST_NET_SET_BACKEND:
+ LOG_DEBUG(VHOST_CONFIG,
+ "(%"PRIu64") IOCTL: VHOST_NET_SET_BACKEND\n", ctx.fh);
+ VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_backend);
+ break;
+
+ case VHOST_GET_FEATURES:
+ LOG_DEBUG(VHOST_CONFIG,
+ "(%"PRIu64") IOCTL: VHOST_GET_FEATURES\n", ctx.fh);
+ VHOST_IOCTL_W(uint64_t, features, ops->get_features);
+ break;
+
+ case VHOST_SET_FEATURES:
+ LOG_DEBUG(VHOST_CONFIG,
+ "(%"PRIu64") IOCTL: VHOST_SET_FEATURES\n", ctx.fh);
+ VHOST_IOCTL_R(uint64_t, features, ops->set_features);
+ break;
+
+ case VHOST_RESET_OWNER:
+ LOG_DEBUG(VHOST_CONFIG,
+ "(%"PRIu64") IOCTL: VHOST_RESET_OWNER\n", ctx.fh);
+ VHOST_IOCTL(ops->reset_owner);
+ break;
+
+ case VHOST_SET_OWNER:
+ LOG_DEBUG(VHOST_CONFIG,
+ "(%"PRIu64") IOCTL: VHOST_SET_OWNER\n", ctx.fh);
+ VHOST_IOCTL(ops->set_owner);
+ break;
+
+ case VHOST_SET_MEM_TABLE:
+ /*TODO fix race condition.*/
+ LOG_DEBUG(VHOST_CONFIG,
+ "(%"PRIu64") IOCTL: VHOST_SET_MEM_TABLE\n", ctx.fh);
+ static struct vhost_memory mem_temp;
+
+ switch (in_bufsz) {
+ case 0:
+ VHOST_IOCTL_RETRY(sizeof(struct vhost_memory), 0);
+ break;
+
+ case sizeof(struct vhost_memory):
+ mem_temp = *(const struct vhost_memory *) in_buf;
+
+ if (mem_temp.nregions > 0) {
+ VHOST_IOCTL_RETRY(sizeof(struct vhost_memory) +
+ (sizeof(struct vhost_memory_region) *
+ mem_temp.nregions), 0);
+ } else {
+ result = -1;
+ fuse_reply_ioctl(req, result, NULL, 0);
+ }
+ break;
+
+ default:
+ result = ops->set_mem_table(ctx,
+ in_buf, mem_temp.nregions);
+ if (result)
+ fuse_reply_err(req, EINVAL);
+ else
+ fuse_reply_ioctl(req, result, NULL, 0);
+ }
+ break;
+
+ case VHOST_SET_VRING_NUM:
+ LOG_DEBUG(VHOST_CONFIG,
+ "(%"PRIu64") IOCTL: VHOST_SET_VRING_NUM\n", ctx.fh);
+ VHOST_IOCTL_R(struct vhost_vring_state, state,
+ ops->set_vring_num);
+ break;
+
+ case VHOST_SET_VRING_BASE:
+ LOG_DEBUG(VHOST_CONFIG,
+ "(%"PRIu64") IOCTL: VHOST_SET_VRING_BASE\n", ctx.fh);
+ VHOST_IOCTL_R(struct vhost_vring_state, state,
+ ops->set_vring_base);
+ break;
+
+ case VHOST_GET_VRING_BASE:
+ LOG_DEBUG(VHOST_CONFIG,
+ "(%"PRIu64") IOCTL: VHOST_GET_VRING_BASE\n", ctx.fh);
+ VHOST_IOCTL_RW(uint32_t, index,
+ struct vhost_vring_state, state, ops->get_vring_base);
+ break;
+
+ case VHOST_SET_VRING_ADDR:
+ LOG_DEBUG(VHOST_CONFIG,
+ "(%"PRIu64") IOCTL: VHOST_SET_VRING_ADDR\n", ctx.fh);
+ VHOST_IOCTL_R(struct vhost_vring_addr, addr,
+ ops->set_vring_addr);
+ break;
+
+ case VHOST_SET_VRING_KICK:
+ LOG_DEBUG(VHOST_CONFIG,
+ "(%"PRIu64") IOCTL: VHOST_SET_VRING_KICK\n", ctx.fh);
+ VHOST_IOCTL_R(struct vhost_vring_file, file,
+ ops->set_vring_kick);
+ break;
+
+ case VHOST_SET_VRING_CALL:
+ LOG_DEBUG(VHOST_CONFIG,
+ "(%"PRIu64") IOCTL: VHOST_SET_VRING_CALL\n", ctx.fh);
+ VHOST_IOCTL_R(struct vhost_vring_file, file,
+ ops->set_vring_call);
+ break;
+
+ default:
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%"PRIu64") IOCTL: DOESN NOT EXIST\n", ctx.fh);
+ result = -1;
+ fuse_reply_ioctl(req, result, NULL, 0);
+ }
+
+ if (result < 0)
+ LOG_DEBUG(VHOST_CONFIG,
+ "(%"PRIu64") IOCTL: FAIL\n", ctx.fh);
+ else
+ LOG_DEBUG(VHOST_CONFIG,
+ "(%"PRIu64") IOCTL: SUCCESS\n", ctx.fh);
+}
+
+/*
+ * Structure handling open, release and ioctl function pointers is populated.
+ */
+static const struct cuse_lowlevel_ops vhost_net_ops = {
+ .open = vhost_net_open,
+ .release = vhost_net_release,
+ .ioctl = vhost_net_ioctl,
+};
+
+/*
+ * cuse_info is populated and used to register the cuse device.
+ * vhost_net_device_ops are also passed when the device is registered in app.
+ */
+int
+rte_vhost_driver_register(const char *dev_name)
+{
+ struct cuse_info cuse_info;
+ char device_name[PATH_MAX] = "";
+ char char_device_name[PATH_MAX] = "";
+ const char *device_argv[] = { device_name };
+
+ char fuse_opt_dummy[] = FUSE_OPT_DUMMY;
+ char fuse_opt_fore[] = FUSE_OPT_FORE;
+ char fuse_opt_nomulti[] = FUSE_OPT_NOMULTI;
+ char *fuse_argv[] = {fuse_opt_dummy, fuse_opt_fore, fuse_opt_nomulti};
+
+ if (access(cuse_device_name, R_OK | W_OK) < 0) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "char device %s can't be accessed, maybe not exist\n",
+ cuse_device_name);
+ return -1;
+ }
+
+ /*
+ * The device name is created. This is passed to QEMU so that it can
+ * register the device with our application.
+ */
+ snprintf(device_name, PATH_MAX, "DEVNAME=%s", dev_name);
+ snprintf(char_device_name, PATH_MAX, "/dev/%s", dev_name);
+
+ /* Check if device already exists. */
+ if (access(char_device_name, F_OK) != -1) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "char device %s already exists\n", char_device_name);
+ return -1;
+ }
+
+ memset(&cuse_info, 0, sizeof(cuse_info));
+ cuse_info.dev_major = default_major;
+ cuse_info.dev_minor = default_minor;
+ cuse_info.dev_info_argc = 1;
+ cuse_info.dev_info_argv = device_argv;
+ cuse_info.flags = CUSE_UNRESTRICTED_IOCTL;
+
+ ops = get_virtio_net_callbacks();
+
+ session = cuse_lowlevel_setup(3, fuse_argv,
+ &cuse_info, &vhost_net_ops, 0, NULL);
+ if (session == NULL)
+ return -1;
+
+ return 0;
+}
+
+/**
+ * The CUSE session is launched allowing the application to receive open,
+ * release and ioctl calls.
+ */
+int
+rte_vhost_driver_session_start(void)
+{
+ fuse_session_loop(session);
+
+ return 0;
+}
diff --git a/src/dpdk_lib18/librte_vhost/vhost-net-cdev.h b/src/dpdk_lib18/librte_vhost/vhost-net-cdev.h
new file mode 100755
index 00000000..03a5c575
--- /dev/null
+++ b/src/dpdk_lib18/librte_vhost/vhost-net-cdev.h
@@ -0,0 +1,113 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _VHOST_NET_CDEV_H_
+#define _VHOST_NET_CDEV_H_
+#include <stdint.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <linux/vhost.h>
+
+#include <rte_log.h>
+
+/* Macros for printing using RTE_LOG */
+#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
+#define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER1
+
+#ifdef RTE_LIBRTE_VHOST_DEBUG
+#define VHOST_MAX_PRINT_BUFF 6072
+#define LOG_LEVEL RTE_LOG_DEBUG
+#define LOG_DEBUG(log_type, fmt, args...) RTE_LOG(DEBUG, log_type, fmt, ##args)
+#define PRINT_PACKET(device, addr, size, header) do { \
+ char *pkt_addr = (char *)(addr); \
+ unsigned int index; \
+ char packet[VHOST_MAX_PRINT_BUFF]; \
+ \
+ if ((header)) \
+ snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%"PRIu64") Header size %d: ", (device->device_fh), (size)); \
+ else \
+ snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%"PRIu64") Packet size %d: ", (device->device_fh), (size)); \
+ for (index = 0; index < (size); index++) { \
+ snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), \
+ "%02hhx ", pkt_addr[index]); \
+ } \
+ snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), "\n"); \
+ \
+ LOG_DEBUG(VHOST_DATA, "%s", packet); \
+} while (0)
+#else
+#define LOG_LEVEL RTE_LOG_INFO
+#define LOG_DEBUG(log_type, fmt, args...) do {} while (0)
+#define PRINT_PACKET(device, addr, size, header) do {} while (0)
+#endif
+
+
+/*
+ * Structure used to identify device context.
+ */
+struct vhost_device_ctx {
+ pid_t pid; /* PID of process calling the IOCTL. */
+ uint64_t fh; /* Populated with fi->fh to track the device index. */
+};
+
+/*
+ * Structure contains function pointers to be defined in virtio-net.c. These
+ * functions are called in CUSE context and are used to configure devices.
+ */
+struct vhost_net_device_ops {
+ int (*new_device)(struct vhost_device_ctx);
+ void (*destroy_device)(struct vhost_device_ctx);
+
+ int (*get_features)(struct vhost_device_ctx, uint64_t *);
+ int (*set_features)(struct vhost_device_ctx, uint64_t *);
+
+ int (*set_mem_table)(struct vhost_device_ctx, const void *, uint32_t);
+
+ int (*set_vring_num)(struct vhost_device_ctx, struct vhost_vring_state *);
+ int (*set_vring_addr)(struct vhost_device_ctx, struct vhost_vring_addr *);
+ int (*set_vring_base)(struct vhost_device_ctx, struct vhost_vring_state *);
+ int (*get_vring_base)(struct vhost_device_ctx, uint32_t, struct vhost_vring_state *);
+
+ int (*set_vring_kick)(struct vhost_device_ctx, struct vhost_vring_file *);
+ int (*set_vring_call)(struct vhost_device_ctx, struct vhost_vring_file *);
+
+ int (*set_backend)(struct vhost_device_ctx, struct vhost_vring_file *);
+
+ int (*set_owner)(struct vhost_device_ctx);
+ int (*reset_owner)(struct vhost_device_ctx);
+};
+
+
+struct vhost_net_device_ops const *get_virtio_net_callbacks(void);
+#endif /* _VHOST_NET_CDEV_H_ */
diff --git a/src/dpdk_lib18/librte_vhost/vhost_rxtx.c b/src/dpdk_lib18/librte_vhost/vhost_rxtx.c
new file mode 100755
index 00000000..ccfd82f4
--- /dev/null
+++ b/src/dpdk_lib18/librte_vhost/vhost_rxtx.c
@@ -0,0 +1,730 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <linux/virtio_net.h>
+
+#include <rte_mbuf.h>
+#include <rte_memcpy.h>
+#include <rte_virtio_net.h>
+
+#include "vhost-net-cdev.h"
+
+#define MAX_PKT_BURST 32
+
+/**
+ * This function adds buffers to the virtio devices RX virtqueue. Buffers can
+ * be received from the physical port or from another virtio device. A packet
+ * count is returned to indicate the number of packets that are succesfully
+ * added to the RX queue. This function works when mergeable is disabled.
+ */
+static inline uint32_t __attribute__((always_inline))
+virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count)
+{
+ struct vhost_virtqueue *vq;
+ struct vring_desc *desc;
+ struct rte_mbuf *buff;
+ /* The virtio_hdr is initialised to 0. */
+ struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0};
+ uint64_t buff_addr = 0;
+ uint64_t buff_hdr_addr = 0;
+ uint32_t head[MAX_PKT_BURST], packet_len = 0;
+ uint32_t head_idx, packet_success = 0;
+ uint16_t avail_idx, res_cur_idx;
+ uint16_t res_base_idx, res_end_idx;
+ uint16_t free_entries;
+ uint8_t success = 0;
+
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
+ if (unlikely(queue_id != VIRTIO_RXQ)) {
+ LOG_DEBUG(VHOST_DATA, "mq isn't supported in this version.\n");
+ return 0;
+ }
+
+ vq = dev->virtqueue[VIRTIO_RXQ];
+ count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
+
+ /*
+ * As many data cores may want access to available buffers,
+ * they need to be reserved.
+ */
+ do {
+ res_base_idx = vq->last_used_idx_res;
+ avail_idx = *((volatile uint16_t *)&vq->avail->idx);
+
+ free_entries = (avail_idx - res_base_idx);
+ /*check that we have enough buffers*/
+ if (unlikely(count > free_entries))
+ count = free_entries;
+
+ if (count == 0)
+ return 0;
+
+ res_end_idx = res_base_idx + count;
+ /* vq->last_used_idx_res is atomically updated. */
+ /* TODO: Allow to disable cmpset if no concurrency in application. */
+ success = rte_atomic16_cmpset(&vq->last_used_idx_res,
+ res_base_idx, res_end_idx);
+ } while (unlikely(success == 0));
+ res_cur_idx = res_base_idx;
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n",
+ dev->device_fh, res_cur_idx, res_end_idx);
+
+ /* Prefetch available ring to retrieve indexes. */
+ rte_prefetch0(&vq->avail->ring[res_cur_idx & (vq->size - 1)]);
+
+ /* Retrieve all of the head indexes first to avoid caching issues. */
+ for (head_idx = 0; head_idx < count; head_idx++)
+ head[head_idx] = vq->avail->ring[(res_cur_idx + head_idx) &
+ (vq->size - 1)];
+
+ /*Prefetch descriptor index. */
+ rte_prefetch0(&vq->desc[head[packet_success]]);
+
+ while (res_cur_idx != res_end_idx) {
+ /* Get descriptor from available ring */
+ desc = &vq->desc[head[packet_success]];
+
+ buff = pkts[packet_success];
+
+ /* Convert from gpa to vva (guest physical addr -> vhost virtual addr) */
+ buff_addr = gpa_to_vva(dev, desc->addr);
+ /* Prefetch buffer address. */
+ rte_prefetch0((void *)(uintptr_t)buff_addr);
+
+ /* Copy virtio_hdr to packet and increment buffer address */
+ buff_hdr_addr = buff_addr;
+ packet_len = rte_pktmbuf_data_len(buff) + vq->vhost_hlen;
+
+ /*
+ * If the descriptors are chained the header and data are
+ * placed in separate buffers.
+ */
+ if (desc->flags & VRING_DESC_F_NEXT) {
+ desc->len = vq->vhost_hlen;
+ desc = &vq->desc[desc->next];
+ /* Buffer address translation. */
+ buff_addr = gpa_to_vva(dev, desc->addr);
+ desc->len = rte_pktmbuf_data_len(buff);
+ } else {
+ buff_addr += vq->vhost_hlen;
+ desc->len = packet_len;
+ }
+
+ /* Update used ring with desc information */
+ vq->used->ring[res_cur_idx & (vq->size - 1)].id =
+ head[packet_success];
+ vq->used->ring[res_cur_idx & (vq->size - 1)].len = packet_len;
+
+ /* Copy mbuf data to buffer */
+ /* FIXME for sg mbuf and the case that desc couldn't hold the mbuf data */
+ rte_memcpy((void *)(uintptr_t)buff_addr,
+ rte_pktmbuf_mtod(buff, const void *),
+ rte_pktmbuf_data_len(buff));
+ PRINT_PACKET(dev, (uintptr_t)buff_addr,
+ rte_pktmbuf_data_len(buff), 0);
+
+ res_cur_idx++;
+ packet_success++;
+
+ rte_memcpy((void *)(uintptr_t)buff_hdr_addr,
+ (const void *)&virtio_hdr, vq->vhost_hlen);
+
+ PRINT_PACKET(dev, (uintptr_t)buff_hdr_addr, vq->vhost_hlen, 1);
+
+ if (res_cur_idx < res_end_idx) {
+ /* Prefetch descriptor index. */
+ rte_prefetch0(&vq->desc[head[packet_success]]);
+ }
+ }
+
+ rte_compiler_barrier();
+
+ /* Wait until it's our turn to add our buffer to the used ring. */
+ while (unlikely(vq->last_used_idx != res_base_idx))
+ rte_pause();
+
+ *(volatile uint16_t *)&vq->used->idx += count;
+ vq->last_used_idx = res_end_idx;
+
+ /* Kick the guest if necessary. */
+ if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
+ eventfd_write((int)vq->kickfd, 1);
+ return count;
+}
+
+static inline uint32_t __attribute__((always_inline))
+copy_from_mbuf_to_vring(struct virtio_net *dev, uint16_t res_base_idx,
+ uint16_t res_end_idx, struct rte_mbuf *pkt)
+{
+ uint32_t vec_idx = 0;
+ uint32_t entry_success = 0;
+ struct vhost_virtqueue *vq;
+ /* The virtio_hdr is initialised to 0. */
+ struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {
+ {0, 0, 0, 0, 0, 0}, 0};
+ uint16_t cur_idx = res_base_idx;
+ uint64_t vb_addr = 0;
+ uint64_t vb_hdr_addr = 0;
+ uint32_t seg_offset = 0;
+ uint32_t vb_offset = 0;
+ uint32_t seg_avail;
+ uint32_t vb_avail;
+ uint32_t cpy_len, entry_len;
+
+ if (pkt == NULL)
+ return 0;
+
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| "
+ "End Index %d\n",
+ dev->device_fh, cur_idx, res_end_idx);
+
+ /*
+ * Convert from gpa to vva
+ * (guest physical addr -> vhost virtual addr)
+ */
+ vq = dev->virtqueue[VIRTIO_RXQ];
+ vb_addr =
+ gpa_to_vva(dev, vq->buf_vec[vec_idx].buf_addr);
+ vb_hdr_addr = vb_addr;
+
+ /* Prefetch buffer address. */
+ rte_prefetch0((void *)(uintptr_t)vb_addr);
+
+ virtio_hdr.num_buffers = res_end_idx - res_base_idx;
+
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") RX: Num merge buffers %d\n",
+ dev->device_fh, virtio_hdr.num_buffers);
+
+ rte_memcpy((void *)(uintptr_t)vb_hdr_addr,
+ (const void *)&virtio_hdr, vq->vhost_hlen);
+
+ PRINT_PACKET(dev, (uintptr_t)vb_hdr_addr, vq->vhost_hlen, 1);
+
+ seg_avail = rte_pktmbuf_data_len(pkt);
+ vb_offset = vq->vhost_hlen;
+ vb_avail =
+ vq->buf_vec[vec_idx].buf_len - vq->vhost_hlen;
+
+ entry_len = vq->vhost_hlen;
+
+ if (vb_avail == 0) {
+ uint32_t desc_idx =
+ vq->buf_vec[vec_idx].desc_idx;
+ vq->desc[desc_idx].len = vq->vhost_hlen;
+
+ if ((vq->desc[desc_idx].flags
+ & VRING_DESC_F_NEXT) == 0) {
+ /* Update used ring with desc information */
+ vq->used->ring[cur_idx & (vq->size - 1)].id
+ = vq->buf_vec[vec_idx].desc_idx;
+ vq->used->ring[cur_idx & (vq->size - 1)].len
+ = entry_len;
+
+ entry_len = 0;
+ cur_idx++;
+ entry_success++;
+ }
+
+ vec_idx++;
+ vb_addr =
+ gpa_to_vva(dev, vq->buf_vec[vec_idx].buf_addr);
+
+ /* Prefetch buffer address. */
+ rte_prefetch0((void *)(uintptr_t)vb_addr);
+ vb_offset = 0;
+ vb_avail = vq->buf_vec[vec_idx].buf_len;
+ }
+
+ cpy_len = RTE_MIN(vb_avail, seg_avail);
+
+ while (cpy_len > 0) {
+ /* Copy mbuf data to vring buffer */
+ rte_memcpy((void *)(uintptr_t)(vb_addr + vb_offset),
+ (const void *)(rte_pktmbuf_mtod(pkt, char*) + seg_offset),
+ cpy_len);
+
+ PRINT_PACKET(dev,
+ (uintptr_t)(vb_addr + vb_offset),
+ cpy_len, 0);
+
+ seg_offset += cpy_len;
+ vb_offset += cpy_len;
+ seg_avail -= cpy_len;
+ vb_avail -= cpy_len;
+ entry_len += cpy_len;
+
+ if (seg_avail != 0) {
+ /*
+ * The virtio buffer in this vring
+ * entry reach to its end.
+ * But the segment doesn't complete.
+ */
+ if ((vq->desc[vq->buf_vec[vec_idx].desc_idx].flags &
+ VRING_DESC_F_NEXT) == 0) {
+ /* Update used ring with desc information */
+ vq->used->ring[cur_idx & (vq->size - 1)].id
+ = vq->buf_vec[vec_idx].desc_idx;
+ vq->used->ring[cur_idx & (vq->size - 1)].len
+ = entry_len;
+ entry_len = 0;
+ cur_idx++;
+ entry_success++;
+ }
+
+ vec_idx++;
+ vb_addr = gpa_to_vva(dev,
+ vq->buf_vec[vec_idx].buf_addr);
+ vb_offset = 0;
+ vb_avail = vq->buf_vec[vec_idx].buf_len;
+ cpy_len = RTE_MIN(vb_avail, seg_avail);
+ } else {
+ /*
+ * This current segment complete, need continue to
+ * check if the whole packet complete or not.
+ */
+ pkt = pkt->next;
+ if (pkt != NULL) {
+ /*
+ * There are more segments.
+ */
+ if (vb_avail == 0) {
+ /*
+ * This current buffer from vring is
+ * used up, need fetch next buffer
+ * from buf_vec.
+ */
+ uint32_t desc_idx =
+ vq->buf_vec[vec_idx].desc_idx;
+ vq->desc[desc_idx].len = vb_offset;
+
+ if ((vq->desc[desc_idx].flags &
+ VRING_DESC_F_NEXT) == 0) {
+ uint16_t wrapped_idx =
+ cur_idx & (vq->size - 1);
+ /*
+ * Update used ring with the
+ * descriptor information
+ */
+ vq->used->ring[wrapped_idx].id
+ = desc_idx;
+ vq->used->ring[wrapped_idx].len
+ = entry_len;
+ entry_success++;
+ entry_len = 0;
+ cur_idx++;
+ }
+
+ /* Get next buffer from buf_vec. */
+ vec_idx++;
+ vb_addr = gpa_to_vva(dev,
+ vq->buf_vec[vec_idx].buf_addr);
+ vb_avail =
+ vq->buf_vec[vec_idx].buf_len;
+ vb_offset = 0;
+ }
+
+ seg_offset = 0;
+ seg_avail = rte_pktmbuf_data_len(pkt);
+ cpy_len = RTE_MIN(vb_avail, seg_avail);
+ } else {
+ /*
+ * This whole packet completes.
+ */
+ uint32_t desc_idx =
+ vq->buf_vec[vec_idx].desc_idx;
+ vq->desc[desc_idx].len = vb_offset;
+
+ while (vq->desc[desc_idx].flags &
+ VRING_DESC_F_NEXT) {
+ desc_idx = vq->desc[desc_idx].next;
+ vq->desc[desc_idx].len = 0;
+ }
+
+ /* Update used ring with desc information */
+ vq->used->ring[cur_idx & (vq->size - 1)].id
+ = vq->buf_vec[vec_idx].desc_idx;
+ vq->used->ring[cur_idx & (vq->size - 1)].len
+ = entry_len;
+ entry_len = 0;
+ cur_idx++;
+ entry_success++;
+ seg_avail = 0;
+ cpy_len = RTE_MIN(vb_avail, seg_avail);
+ }
+ }
+ }
+
+ return entry_success;
+}
+
+/*
+ * This function works for mergeable RX.
+ */
+static inline uint32_t __attribute__((always_inline))
+virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count)
+{
+ struct vhost_virtqueue *vq;
+ uint32_t pkt_idx = 0, entry_success = 0;
+ uint16_t avail_idx, res_cur_idx;
+ uint16_t res_base_idx, res_end_idx;
+ uint8_t success = 0;
+
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_merge_rx()\n",
+ dev->device_fh);
+ if (unlikely(queue_id != VIRTIO_RXQ)) {
+ LOG_DEBUG(VHOST_DATA, "mq isn't supported in this version.\n");
+ }
+
+ vq = dev->virtqueue[VIRTIO_RXQ];
+ count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
+
+ if (count == 0)
+ return 0;
+
+ for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
+ uint32_t secure_len = 0;
+ uint16_t need_cnt;
+ uint32_t vec_idx = 0;
+ uint32_t pkt_len = pkts[pkt_idx]->pkt_len + vq->vhost_hlen;
+ uint16_t i, id;
+
+ do {
+ /*
+ * As many data cores may want access to available
+ * buffers, they need to be reserved.
+ */
+ res_base_idx = vq->last_used_idx_res;
+ res_cur_idx = res_base_idx;
+
+ do {
+ avail_idx = *((volatile uint16_t *)&vq->avail->idx);
+ if (unlikely(res_cur_idx == avail_idx)) {
+ LOG_DEBUG(VHOST_DATA,
+ "(%"PRIu64") Failed "
+ "to get enough desc from "
+ "vring\n",
+ dev->device_fh);
+ return pkt_idx;
+ } else {
+ uint16_t wrapped_idx =
+ (res_cur_idx) & (vq->size - 1);
+ uint32_t idx =
+ vq->avail->ring[wrapped_idx];
+ uint8_t next_desc;
+
+ do {
+ next_desc = 0;
+ secure_len += vq->desc[idx].len;
+ if (vq->desc[idx].flags &
+ VRING_DESC_F_NEXT) {
+ idx = vq->desc[idx].next;
+ next_desc = 1;
+ }
+ } while (next_desc);
+
+ res_cur_idx++;
+ }
+ } while (pkt_len > secure_len);
+
+ /* vq->last_used_idx_res is atomically updated. */
+ success = rte_atomic16_cmpset(&vq->last_used_idx_res,
+ res_base_idx,
+ res_cur_idx);
+ } while (success == 0);
+
+ id = res_base_idx;
+ need_cnt = res_cur_idx - res_base_idx;
+
+ for (i = 0; i < need_cnt; i++, id++) {
+ uint16_t wrapped_idx = id & (vq->size - 1);
+ uint32_t idx = vq->avail->ring[wrapped_idx];
+ uint8_t next_desc;
+ do {
+ next_desc = 0;
+ vq->buf_vec[vec_idx].buf_addr =
+ vq->desc[idx].addr;
+ vq->buf_vec[vec_idx].buf_len =
+ vq->desc[idx].len;
+ vq->buf_vec[vec_idx].desc_idx = idx;
+ vec_idx++;
+
+ if (vq->desc[idx].flags & VRING_DESC_F_NEXT) {
+ idx = vq->desc[idx].next;
+ next_desc = 1;
+ }
+ } while (next_desc);
+ }
+
+ res_end_idx = res_cur_idx;
+
+ entry_success = copy_from_mbuf_to_vring(dev, res_base_idx,
+ res_end_idx, pkts[pkt_idx]);
+
+ rte_compiler_barrier();
+
+ /*
+ * Wait until it's our turn to add our buffer
+ * to the used ring.
+ */
+ while (unlikely(vq->last_used_idx != res_base_idx))
+ rte_pause();
+
+ *(volatile uint16_t *)&vq->used->idx += entry_success;
+ vq->last_used_idx = res_end_idx;
+
+ /* Kick the guest if necessary. */
+ if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
+ eventfd_write((int)vq->kickfd, 1);
+ }
+
+ return count;
+}
+
+uint16_t
+rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ if (unlikely(dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)))
+ return virtio_dev_merge_rx(dev, queue_id, pkts, count);
+ else
+ return virtio_dev_rx(dev, queue_id, pkts, count);
+}
+
+uint16_t
+rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
+{
+ struct rte_mbuf *m, *prev;
+ struct vhost_virtqueue *vq;
+ struct vring_desc *desc;
+ uint64_t vb_addr = 0;
+ uint32_t head[MAX_PKT_BURST];
+ uint32_t used_idx;
+ uint32_t i;
+ uint16_t free_entries, entry_success = 0;
+ uint16_t avail_idx;
+
+ if (unlikely(queue_id != VIRTIO_TXQ)) {
+ LOG_DEBUG(VHOST_DATA, "mq isn't supported in this version.\n");
+ return 0;
+ }
+
+ vq = dev->virtqueue[VIRTIO_TXQ];
+ avail_idx = *((volatile uint16_t *)&vq->avail->idx);
+
+ /* If there are no available buffers then return. */
+ if (vq->last_used_idx == avail_idx)
+ return 0;
+
+ LOG_DEBUG(VHOST_DATA, "%s (%"PRIu64")\n", __func__,
+ dev->device_fh);
+
+ /* Prefetch available ring to retrieve head indexes. */
+ rte_prefetch0(&vq->avail->ring[vq->last_used_idx & (vq->size - 1)]);
+
+ /*get the number of free entries in the ring*/
+ free_entries = (avail_idx - vq->last_used_idx);
+
+ free_entries = RTE_MIN(free_entries, count);
+ /* Limit to MAX_PKT_BURST. */
+ free_entries = RTE_MIN(free_entries, MAX_PKT_BURST);
+
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Buffers available %d\n",
+ dev->device_fh, free_entries);
+ /* Retrieve all of the head indexes first to avoid caching issues. */
+ for (i = 0; i < free_entries; i++)
+ head[i] = vq->avail->ring[(vq->last_used_idx + i) & (vq->size - 1)];
+
+ /* Prefetch descriptor index. */
+ rte_prefetch0(&vq->desc[head[entry_success]]);
+ rte_prefetch0(&vq->used->ring[vq->last_used_idx & (vq->size - 1)]);
+
+ while (entry_success < free_entries) {
+ uint32_t vb_avail, vb_offset;
+ uint32_t seg_avail, seg_offset;
+ uint32_t cpy_len;
+ uint32_t seg_num = 0;
+ struct rte_mbuf *cur;
+ uint8_t alloc_err = 0;
+
+ desc = &vq->desc[head[entry_success]];
+
+ /* Discard first buffer as it is the virtio header */
+ desc = &vq->desc[desc->next];
+
+ /* Buffer address translation. */
+ vb_addr = gpa_to_vva(dev, desc->addr);
+ /* Prefetch buffer address. */
+ rte_prefetch0((void *)(uintptr_t)vb_addr);
+
+ used_idx = vq->last_used_idx & (vq->size - 1);
+
+ if (entry_success < (free_entries - 1)) {
+ /* Prefetch descriptor index. */
+ rte_prefetch0(&vq->desc[head[entry_success+1]]);
+ rte_prefetch0(&vq->used->ring[(used_idx + 1) & (vq->size - 1)]);
+ }
+
+ /* Update used index buffer information. */
+ vq->used->ring[used_idx].id = head[entry_success];
+ vq->used->ring[used_idx].len = 0;
+
+ vb_offset = 0;
+ vb_avail = desc->len;
+ /* Allocate an mbuf and populate the structure. */
+ m = rte_pktmbuf_alloc(mbuf_pool);
+ if (unlikely(m == NULL)) {
+ RTE_LOG(ERR, VHOST_DATA,
+ "Failed to allocate memory for mbuf.\n");
+ return entry_success;
+ }
+ seg_offset = 0;
+ seg_avail = m->buf_len - RTE_PKTMBUF_HEADROOM;
+ cpy_len = RTE_MIN(vb_avail, seg_avail);
+
+ PRINT_PACKET(dev, (uintptr_t)vb_addr, desc->len, 0);
+
+ seg_num++;
+ cur = m;
+ prev = m;
+ while (cpy_len != 0) {
+ rte_memcpy((void *)(rte_pktmbuf_mtod(cur, char *) + seg_offset),
+ (void *)((uintptr_t)(vb_addr + vb_offset)),
+ cpy_len);
+
+ seg_offset += cpy_len;
+ vb_offset += cpy_len;
+ vb_avail -= cpy_len;
+ seg_avail -= cpy_len;
+
+ if (vb_avail != 0) {
+ /*
+ * The segment reachs to its end,
+ * while the virtio buffer in TX vring has
+ * more data to be copied.
+ */
+ cur->data_len = seg_offset;
+ m->pkt_len += seg_offset;
+ /* Allocate mbuf and populate the structure. */
+ cur = rte_pktmbuf_alloc(mbuf_pool);
+ if (unlikely(cur == NULL)) {
+ RTE_LOG(ERR, VHOST_DATA, "Failed to "
+ "allocate memory for mbuf.\n");
+ rte_pktmbuf_free(m);
+ alloc_err = 1;
+ break;
+ }
+
+ seg_num++;
+ prev->next = cur;
+ prev = cur;
+ seg_offset = 0;
+ seg_avail = cur->buf_len - RTE_PKTMBUF_HEADROOM;
+ } else {
+ if (desc->flags & VRING_DESC_F_NEXT) {
+ /*
+ * There are more virtio buffers in
+ * same vring entry need to be copied.
+ */
+ if (seg_avail == 0) {
+ /*
+ * The current segment hasn't
+ * room to accomodate more
+ * data.
+ */
+ cur->data_len = seg_offset;
+ m->pkt_len += seg_offset;
+ /*
+ * Allocate an mbuf and
+ * populate the structure.
+ */
+ cur = rte_pktmbuf_alloc(mbuf_pool);
+ if (unlikely(cur == NULL)) {
+ RTE_LOG(ERR,
+ VHOST_DATA,
+ "Failed to "
+ "allocate memory "
+ "for mbuf\n");
+ rte_pktmbuf_free(m);
+ alloc_err = 1;
+ break;
+ }
+ seg_num++;
+ prev->next = cur;
+ prev = cur;
+ seg_offset = 0;
+ seg_avail = cur->buf_len - RTE_PKTMBUF_HEADROOM;
+ }
+
+ desc = &vq->desc[desc->next];
+
+ /* Buffer address translation. */
+ vb_addr = gpa_to_vva(dev, desc->addr);
+ /* Prefetch buffer address. */
+ rte_prefetch0((void *)(uintptr_t)vb_addr);
+ vb_offset = 0;
+ vb_avail = desc->len;
+
+ PRINT_PACKET(dev, (uintptr_t)vb_addr,
+ desc->len, 0);
+ } else {
+ /* The whole packet completes. */
+ cur->data_len = seg_offset;
+ m->pkt_len += seg_offset;
+ vb_avail = 0;
+ }
+ }
+
+ cpy_len = RTE_MIN(vb_avail, seg_avail);
+ }
+
+ if (unlikely(alloc_err == 1))
+ break;
+
+ m->nb_segs = seg_num;
+
+ pkts[entry_success] = m;
+ vq->last_used_idx++;
+ entry_success++;
+ }
+
+ rte_compiler_barrier();
+ vq->used->idx += entry_success;
+ /* Kick guest if required. */
+ if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
+ eventfd_write((int)vq->kickfd, 1);
+ return entry_success;
+}
diff --git a/src/dpdk_lib18/librte_vhost/virtio-net.c b/src/dpdk_lib18/librte_vhost/virtio-net.c
new file mode 100755
index 00000000..b041849d
--- /dev/null
+++ b/src/dpdk_lib18/librte_vhost/virtio-net.c
@@ -0,0 +1,1163 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <dirent.h>
+#include <fuse/cuse_lowlevel.h>
+#include <linux/vhost.h>
+#include <linux/virtio_net.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <sys/eventfd.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include <sys/socket.h>
+#include <linux/if_tun.h>
+#include <linux/if.h>
+
+#include <rte_ethdev.h>
+#include <rte_log.h>
+#include <rte_string_fns.h>
+#include <rte_memory.h>
+#include <rte_virtio_net.h>
+
+#include "vhost-net-cdev.h"
+#include "eventfd_link/eventfd_link.h"
+
+/*
+ * Device linked list structure for configuration.
+ */
+struct virtio_net_config_ll {
+ struct virtio_net dev; /* Virtio device.*/
+ struct virtio_net_config_ll *next; /* Next dev on linked list.*/
+};
+
+const char eventfd_cdev[] = "/dev/eventfd-link";
+
+/* device ops to add/remove device to/from data core. */
+static struct virtio_net_device_ops const *notify_ops;
+/* root address of the linked list of managed virtio devices */
+static struct virtio_net_config_ll *ll_root;
+
+/* Features supported by this lib. */
+#define VHOST_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \
+ (1ULL << VIRTIO_NET_F_CTRL_RX))
+static uint64_t VHOST_FEATURES = VHOST_SUPPORTED_FEATURES;
+
+/* Line size for reading maps file. */
+static const uint32_t BUFSIZE = PATH_MAX;
+
+/* Size of prot char array in procmap. */
+#define PROT_SZ 5
+
+/* Number of elements in procmap struct. */
+#define PROCMAP_SZ 8
+
+/* Structure containing information gathered from maps file. */
+struct procmap {
+ uint64_t va_start; /* Start virtual address in file. */
+ uint64_t len; /* Size of file. */
+ uint64_t pgoff; /* Not used. */
+ uint32_t maj; /* Not used. */
+ uint32_t min; /* Not used. */
+ uint32_t ino; /* Not used. */
+ char prot[PROT_SZ]; /* Not used. */
+ char fname[PATH_MAX]; /* File name. */
+};
+
+/*
+ * Converts QEMU virtual address to Vhost virtual address. This function is
+ * used to convert the ring addresses to our address space.
+ */
+static uint64_t
+qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)
+{
+ struct virtio_memory_regions *region;
+ uint64_t vhost_va = 0;
+ uint32_t regionidx = 0;
+
+ /* Find the region where the address lives. */
+ for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
+ region = &dev->mem->regions[regionidx];
+ if ((qemu_va >= region->userspace_address) &&
+ (qemu_va <= region->userspace_address +
+ region->memory_size)) {
+ vhost_va = dev->mem->mapped_address + qemu_va -
+ dev->mem->base_address;
+ break;
+ }
+ }
+ return vhost_va;
+}
+
+/*
+ * Locate the file containing QEMU's memory space and
+ * map it to our address space.
+ */
+static int
+host_memory_map(struct virtio_net *dev, struct virtio_memory *mem,
+ pid_t pid, uint64_t addr)
+{
+ struct dirent *dptr = NULL;
+ struct procmap procmap;
+ DIR *dp = NULL;
+ int fd;
+ int i;
+ char memfile[PATH_MAX];
+ char mapfile[PATH_MAX];
+ char procdir[PATH_MAX];
+ char resolved_path[PATH_MAX];
+ char *path = NULL;
+ FILE *fmap;
+ void *map;
+ uint8_t found = 0;
+ char line[BUFSIZE];
+ char dlm[] = "- : ";
+ char *str, *sp, *in[PROCMAP_SZ];
+ char *end = NULL;
+
+ /* Path where mem files are located. */
+ snprintf(procdir, PATH_MAX, "/proc/%u/fd/", pid);
+ /* Maps file used to locate mem file. */
+ snprintf(mapfile, PATH_MAX, "/proc/%u/maps", pid);
+
+ fmap = fopen(mapfile, "r");
+ if (fmap == NULL) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%"PRIu64") Failed to open maps file for pid %d\n",
+ dev->device_fh, pid);
+ return -1;
+ }
+
+ /* Read through maps file until we find out base_address. */
+ while (fgets(line, BUFSIZE, fmap) != 0) {
+ str = line;
+ errno = 0;
+ /* Split line into fields. */
+ for (i = 0; i < PROCMAP_SZ; i++) {
+ in[i] = strtok_r(str, &dlm[i], &sp);
+ if ((in[i] == NULL) || (errno != 0)) {
+ fclose(fmap);
+ return -1;
+ }
+ str = NULL;
+ }
+
+ /* Convert/Copy each field as needed. */
+ procmap.va_start = strtoull(in[0], &end, 16);
+ if ((in[0] == '\0') || (end == NULL) || (*end != '\0') ||
+ (errno != 0)) {
+ fclose(fmap);
+ return -1;
+ }
+
+ procmap.len = strtoull(in[1], &end, 16);
+ if ((in[1] == '\0') || (end == NULL) || (*end != '\0') ||
+ (errno != 0)) {
+ fclose(fmap);
+ return -1;
+ }
+
+ procmap.pgoff = strtoull(in[3], &end, 16);
+ if ((in[3] == '\0') || (end == NULL) || (*end != '\0') ||
+ (errno != 0)) {
+ fclose(fmap);
+ return -1;
+ }
+
+ procmap.maj = strtoul(in[4], &end, 16);
+ if ((in[4] == '\0') || (end == NULL) || (*end != '\0') ||
+ (errno != 0)) {
+ fclose(fmap);
+ return -1;
+ }
+
+ procmap.min = strtoul(in[5], &end, 16);
+ if ((in[5] == '\0') || (end == NULL) || (*end != '\0') ||
+ (errno != 0)) {
+ fclose(fmap);
+ return -1;
+ }
+
+ procmap.ino = strtoul(in[6], &end, 16);
+ if ((in[6] == '\0') || (end == NULL) || (*end != '\0') ||
+ (errno != 0)) {
+ fclose(fmap);
+ return -1;
+ }
+
+ memcpy(&procmap.prot, in[2], PROT_SZ);
+ memcpy(&procmap.fname, in[7], PATH_MAX);
+
+ if (procmap.va_start == addr) {
+ procmap.len = procmap.len - procmap.va_start;
+ found = 1;
+ break;
+ }
+ }
+ fclose(fmap);
+
+ if (!found) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%"PRIu64") Failed to find memory file in pid %d maps file\n",
+ dev->device_fh, pid);
+ return -1;
+ }
+
+ /* Find the guest memory file among the process fds. */
+ dp = opendir(procdir);
+ if (dp == NULL) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%"PRIu64") Cannot open pid %d process directory\n",
+ dev->device_fh, pid);
+ return -1;
+ }
+
+ found = 0;
+
+ /* Read the fd directory contents. */
+ while (NULL != (dptr = readdir(dp))) {
+ snprintf(memfile, PATH_MAX, "/proc/%u/fd/%s",
+ pid, dptr->d_name);
+ path = realpath(memfile, resolved_path);
+ if ((path == NULL) && (strlen(resolved_path) == 0)) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%"PRIu64") Failed to resolve fd directory\n",
+ dev->device_fh);
+ closedir(dp);
+ return -1;
+ }
+ if (strncmp(resolved_path, procmap.fname,
+ strnlen(procmap.fname, PATH_MAX)) == 0) {
+ found = 1;
+ break;
+ }
+ }
+
+ closedir(dp);
+
+ if (found == 0) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%"PRIu64") Failed to find memory file for pid %d\n",
+ dev->device_fh, pid);
+ return -1;
+ }
+ /* Open the shared memory file and map the memory into this process. */
+ fd = open(memfile, O_RDWR);
+
+ if (fd == -1) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%"PRIu64") Failed to open %s for pid %d\n",
+ dev->device_fh, memfile, pid);
+ return -1;
+ }
+
+ map = mmap(0, (size_t)procmap.len, PROT_READ|PROT_WRITE,
+ MAP_POPULATE|MAP_SHARED, fd, 0);
+ close(fd);
+
+ if (map == MAP_FAILED) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%"PRIu64") Error mapping the file %s for pid %d\n",
+ dev->device_fh, memfile, pid);
+ return -1;
+ }
+
+ /* Store the memory address and size in the device data structure */
+ mem->mapped_address = (uint64_t)(uintptr_t)map;
+ mem->mapped_size = procmap.len;
+
+ LOG_DEBUG(VHOST_CONFIG,
+ "(%"PRIu64") Mem File: %s->%s - Size: %llu - VA: %p\n",
+ dev->device_fh,
+ memfile, resolved_path,
+ (unsigned long long)mem->mapped_size, map);
+
+ return 0;
+}
+
+/*
+ * Retrieves an entry from the devices configuration linked list.
+ */
+static struct virtio_net_config_ll *
+get_config_ll_entry(struct vhost_device_ctx ctx)
+{
+ struct virtio_net_config_ll *ll_dev = ll_root;
+
+ /* Loop through linked list until the device_fh is found. */
+ while (ll_dev != NULL) {
+ if (ll_dev->dev.device_fh == ctx.fh)
+ return ll_dev;
+ ll_dev = ll_dev->next;
+ }
+
+ return NULL;
+}
+
+/*
+ * Searches the configuration core linked list and
+ * retrieves the device if it exists.
+ */
+static struct virtio_net *
+get_device(struct vhost_device_ctx ctx)
+{
+ struct virtio_net_config_ll *ll_dev;
+
+ ll_dev = get_config_ll_entry(ctx);
+
+ if (ll_dev)
+ return &ll_dev->dev;
+
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%"PRIu64") Device not found in linked list.\n", ctx.fh);
+ return NULL;
+}
+
+/*
+ * Add entry containing a device to the device configuration linked list.
+ */
+static void
+add_config_ll_entry(struct virtio_net_config_ll *new_ll_dev)
+{
+ struct virtio_net_config_ll *ll_dev = ll_root;
+
+ /* If ll_dev == NULL then this is the first device so go to else */
+ if (ll_dev) {
+ /* If the 1st device_fh != 0 then we insert our device here. */
+ if (ll_dev->dev.device_fh != 0) {
+ new_ll_dev->dev.device_fh = 0;
+ new_ll_dev->next = ll_dev;
+ ll_root = new_ll_dev;
+ } else {
+ /*
+ * Increment through the ll until we find un unused
+ * device_fh. Insert the device at that entry.
+ */
+ while ((ll_dev->next != NULL) &&
+ (ll_dev->dev.device_fh ==
+ (ll_dev->next->dev.device_fh - 1)))
+ ll_dev = ll_dev->next;
+
+ new_ll_dev->dev.device_fh = ll_dev->dev.device_fh + 1;
+ new_ll_dev->next = ll_dev->next;
+ ll_dev->next = new_ll_dev;
+ }
+ } else {
+ ll_root = new_ll_dev;
+ ll_root->dev.device_fh = 0;
+ }
+
+}
+
+/*
+ * Unmap any memory, close any file descriptors and
+ * free any memory owned by a device.
+ */
+static void
+cleanup_device(struct virtio_net *dev)
+{
+ /* Unmap QEMU memory file if mapped. */
+ if (dev->mem) {
+ munmap((void *)(uintptr_t)dev->mem->mapped_address,
+ (size_t)dev->mem->mapped_size);
+ free(dev->mem);
+ }
+
+ /* Close any event notifiers opened by device. */
+ if (dev->virtqueue[VIRTIO_RXQ]->callfd)
+ close((int)dev->virtqueue[VIRTIO_RXQ]->callfd);
+ if (dev->virtqueue[VIRTIO_RXQ]->kickfd)
+ close((int)dev->virtqueue[VIRTIO_RXQ]->kickfd);
+ if (dev->virtqueue[VIRTIO_TXQ]->callfd)
+ close((int)dev->virtqueue[VIRTIO_TXQ]->callfd);
+ if (dev->virtqueue[VIRTIO_TXQ]->kickfd)
+ close((int)dev->virtqueue[VIRTIO_TXQ]->kickfd);
+}
+
+/*
+ * Release virtqueues and device memory.
+ */
+static void
+free_device(struct virtio_net_config_ll *ll_dev)
+{
+ /* Free any malloc'd memory */
+ free(ll_dev->dev.virtqueue[VIRTIO_RXQ]);
+ free(ll_dev->dev.virtqueue[VIRTIO_TXQ]);
+ free(ll_dev);
+}
+
+/*
+ * Remove an entry from the device configuration linked list.
+ */
+static struct virtio_net_config_ll *
+rm_config_ll_entry(struct virtio_net_config_ll *ll_dev,
+ struct virtio_net_config_ll *ll_dev_last)
+{
+ /* First remove the device and then clean it up. */
+ if (ll_dev == ll_root) {
+ ll_root = ll_dev->next;
+ cleanup_device(&ll_dev->dev);
+ free_device(ll_dev);
+ return ll_root;
+ } else {
+ if (likely(ll_dev_last != NULL)) {
+ ll_dev_last->next = ll_dev->next;
+ cleanup_device(&ll_dev->dev);
+ free_device(ll_dev);
+ return ll_dev_last->next;
+ } else {
+ cleanup_device(&ll_dev->dev);
+ free_device(ll_dev);
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Remove entry from config_ll failed\n");
+ return NULL;
+ }
+ }
+}
+
+/*
+ * Initialise all variables in device structure.
+ */
+static void
+init_device(struct virtio_net *dev)
+{
+ uint64_t vq_offset;
+
+ /*
+ * Virtqueues have already been malloced so
+ * we don't want to set them to NULL.
+ */
+ vq_offset = offsetof(struct virtio_net, mem);
+
+ /* Set everything to 0. */
+ memset((void *)(uintptr_t)((uint64_t)(uintptr_t)dev + vq_offset), 0,
+ (sizeof(struct virtio_net) - (size_t)vq_offset));
+ memset(dev->virtqueue[VIRTIO_RXQ], 0, sizeof(struct vhost_virtqueue));
+ memset(dev->virtqueue[VIRTIO_TXQ], 0, sizeof(struct vhost_virtqueue));
+
+ /* Backends are set to -1 indicating an inactive device. */
+ dev->virtqueue[VIRTIO_RXQ]->backend = VIRTIO_DEV_STOPPED;
+ dev->virtqueue[VIRTIO_TXQ]->backend = VIRTIO_DEV_STOPPED;
+}
+
+/*
+ * Function is called from the CUSE open function. The device structure is
+ * initialised and a new entry is added to the device configuration linked
+ * list.
+ */
+static int
+new_device(struct vhost_device_ctx ctx)
+{
+ struct virtio_net_config_ll *new_ll_dev;
+ struct vhost_virtqueue *virtqueue_rx, *virtqueue_tx;
+
+ /* Setup device and virtqueues. */
+ new_ll_dev = malloc(sizeof(struct virtio_net_config_ll));
+ if (new_ll_dev == NULL) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%"PRIu64") Failed to allocate memory for dev.\n",
+ ctx.fh);
+ return -1;
+ }
+
+ virtqueue_rx = malloc(sizeof(struct vhost_virtqueue));
+ if (virtqueue_rx == NULL) {
+ free(new_ll_dev);
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%"PRIu64") Failed to allocate memory for rxq.\n",
+ ctx.fh);
+ return -1;
+ }
+
+ virtqueue_tx = malloc(sizeof(struct vhost_virtqueue));
+ if (virtqueue_tx == NULL) {
+ free(virtqueue_rx);
+ free(new_ll_dev);
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%"PRIu64") Failed to allocate memory for txq.\n",
+ ctx.fh);
+ return -1;
+ }
+
+ new_ll_dev->dev.virtqueue[VIRTIO_RXQ] = virtqueue_rx;
+ new_ll_dev->dev.virtqueue[VIRTIO_TXQ] = virtqueue_tx;
+
+ /* Initialise device and virtqueues. */
+ init_device(&new_ll_dev->dev);
+
+ new_ll_dev->next = NULL;
+
+ /* Add entry to device configuration linked list. */
+ add_config_ll_entry(new_ll_dev);
+
+ return new_ll_dev->dev.device_fh;
+}
+
+/*
+ * Function is called from the CUSE release function. This function will
+ * cleanup the device and remove it from device configuration linked list.
+ */
+static void
+destroy_device(struct vhost_device_ctx ctx)
+{
+ struct virtio_net_config_ll *ll_dev_cur_ctx, *ll_dev_last = NULL;
+ struct virtio_net_config_ll *ll_dev_cur = ll_root;
+
+ /* Find the linked list entry for the device to be removed. */
+ ll_dev_cur_ctx = get_config_ll_entry(ctx);
+ while (ll_dev_cur != NULL) {
+ /*
+ * If the device is found or
+ * a device that doesn't exist is found then it is removed.
+ */
+ if (ll_dev_cur == ll_dev_cur_ctx) {
+ /*
+ * If the device is running on a data core then call
+ * the function to remove it from the data core.
+ */
+ if ((ll_dev_cur->dev.flags & VIRTIO_DEV_RUNNING))
+ notify_ops->destroy_device(&(ll_dev_cur->dev));
+ ll_dev_cur = rm_config_ll_entry(ll_dev_cur,
+ ll_dev_last);
+ } else {
+ ll_dev_last = ll_dev_cur;
+ ll_dev_cur = ll_dev_cur->next;
+ }
+ }
+}
+
+/*
+ * Called from CUSE IOCTL: VHOST_SET_OWNER
+ * This function just returns success at the moment unless
+ * the device hasn't been initialised.
+ */
+static int
+set_owner(struct vhost_device_ctx ctx)
+{
+ struct virtio_net *dev;
+
+ dev = get_device(ctx);
+ if (dev == NULL)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Called from CUSE IOCTL: VHOST_RESET_OWNER
+ */
+static int
+reset_owner(struct vhost_device_ctx ctx)
+{
+ struct virtio_net_config_ll *ll_dev;
+
+ ll_dev = get_config_ll_entry(ctx);
+
+ cleanup_device(&ll_dev->dev);
+ init_device(&ll_dev->dev);
+
+ return 0;
+}
+
+/*
+ * Called from CUSE IOCTL: VHOST_GET_FEATURES
+ * The features that we support are requested.
+ */
+static int
+get_features(struct vhost_device_ctx ctx, uint64_t *pu)
+{
+ struct virtio_net *dev;
+
+ dev = get_device(ctx);
+ if (dev == NULL)
+ return -1;
+
+ /* Send our supported features. */
+ *pu = VHOST_FEATURES;
+ return 0;
+}
+
+/*
+ * Called from CUSE IOCTL: VHOST_SET_FEATURES
+ * We receive the negotiated features supported by us and the virtio device.
+ */
+static int
+set_features(struct vhost_device_ctx ctx, uint64_t *pu)
+{
+ struct virtio_net *dev;
+
+ dev = get_device(ctx);
+ if (dev == NULL)
+ return -1;
+ if (*pu & ~VHOST_FEATURES)
+ return -1;
+
+ /* Store the negotiated feature list for the device. */
+ dev->features = *pu;
+
+ /* Set the vhost_hlen depending on if VIRTIO_NET_F_MRG_RXBUF is set. */
+ if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {
+ LOG_DEBUG(VHOST_CONFIG,
+ "(%"PRIu64") Mergeable RX buffers enabled\n",
+ dev->device_fh);
+ dev->virtqueue[VIRTIO_RXQ]->vhost_hlen =
+ sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ dev->virtqueue[VIRTIO_TXQ]->vhost_hlen =
+ sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ } else {
+ LOG_DEBUG(VHOST_CONFIG,
+ "(%"PRIu64") Mergeable RX buffers disabled\n",
+ dev->device_fh);
+ dev->virtqueue[VIRTIO_RXQ]->vhost_hlen =
+ sizeof(struct virtio_net_hdr);
+ dev->virtqueue[VIRTIO_TXQ]->vhost_hlen =
+ sizeof(struct virtio_net_hdr);
+ }
+ return 0;
+}
+
+
+/*
+ * Called from CUSE IOCTL: VHOST_SET_MEM_TABLE
+ * This function creates and populates the memory structure for the device.
+ * This includes storing offsets used to translate buffer addresses.
+ */
+static int
+set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr,
+ uint32_t nregions)
+{
+ struct virtio_net *dev;
+ struct vhost_memory_region *mem_regions;
+ struct virtio_memory *mem;
+ uint64_t size = offsetof(struct vhost_memory, regions);
+ uint32_t regionidx, valid_regions;
+
+ dev = get_device(ctx);
+ if (dev == NULL)
+ return -1;
+
+ if (dev->mem) {
+ munmap((void *)(uintptr_t)dev->mem->mapped_address,
+ (size_t)dev->mem->mapped_size);
+ free(dev->mem);
+ }
+
+ /* Malloc the memory structure depending on the number of regions. */
+ mem = calloc(1, sizeof(struct virtio_memory) +
+ (sizeof(struct virtio_memory_regions) * nregions));
+ if (mem == NULL) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%"PRIu64") Failed to allocate memory for dev->mem.\n",
+ dev->device_fh);
+ return -1;
+ }
+
+ mem->nregions = nregions;
+
+ mem_regions = (void *)(uintptr_t)
+ ((uint64_t)(uintptr_t)mem_regions_addr + size);
+
+ for (regionidx = 0; regionidx < mem->nregions; regionidx++) {
+ /* Populate the region structure for each region. */
+ mem->regions[regionidx].guest_phys_address =
+ mem_regions[regionidx].guest_phys_addr;
+ mem->regions[regionidx].guest_phys_address_end =
+ mem->regions[regionidx].guest_phys_address +
+ mem_regions[regionidx].memory_size;
+ mem->regions[regionidx].memory_size =
+ mem_regions[regionidx].memory_size;
+ mem->regions[regionidx].userspace_address =
+ mem_regions[regionidx].userspace_addr;
+
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") REGION: %u - GPA: %p - QEMU VA: %p - SIZE (%"PRIu64")\n", dev->device_fh,
+ regionidx,
+ (void *)(uintptr_t)mem->regions[regionidx].guest_phys_address,
+ (void *)(uintptr_t)mem->regions[regionidx].userspace_address,
+ mem->regions[regionidx].memory_size);
+
+ /*set the base address mapping*/
+ if (mem->regions[regionidx].guest_phys_address == 0x0) {
+ mem->base_address =
+ mem->regions[regionidx].userspace_address;
+ /* Map VM memory file */
+ if (host_memory_map(dev, mem, ctx.pid,
+ mem->base_address) != 0) {
+ free(mem);
+ return -1;
+ }
+ }
+ }
+
+ /* Check that we have a valid base address. */
+ if (mem->base_address == 0) {
+ RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find base address of qemu memory file.\n", dev->device_fh);
+ free(mem);
+ return -1;
+ }
+
+ /*
+ * Check if all of our regions have valid mappings.
+ * Usually one does not exist in the QEMU memory file.
+ */
+ valid_regions = mem->nregions;
+ for (regionidx = 0; regionidx < mem->nregions; regionidx++) {
+ if ((mem->regions[regionidx].userspace_address <
+ mem->base_address) ||
+ (mem->regions[regionidx].userspace_address >
+ (mem->base_address + mem->mapped_size)))
+ valid_regions--;
+ }
+
+ /*
+ * If a region does not have a valid mapping,
+ * we rebuild our memory struct to contain only valid entries.
+ */
+ if (valid_regions != mem->nregions) {
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Not all memory regions exist in the QEMU mem file. Re-populating mem structure\n",
+ dev->device_fh);
+
+ /*
+ * Re-populate the memory structure with only valid regions.
+ * Invalid regions are over-written with memmove.
+ */
+ valid_regions = 0;
+
+ for (regionidx = mem->nregions; 0 != regionidx--;) {
+ if ((mem->regions[regionidx].userspace_address <
+ mem->base_address) ||
+ (mem->regions[regionidx].userspace_address >
+ (mem->base_address + mem->mapped_size))) {
+ memmove(&mem->regions[regionidx],
+ &mem->regions[regionidx + 1],
+ sizeof(struct virtio_memory_regions) *
+ valid_regions);
+ } else {
+ valid_regions++;
+ }
+ }
+ }
+ mem->nregions = valid_regions;
+ dev->mem = mem;
+
+ /*
+ * Calculate the address offset for each region.
+ * This offset is used to identify the vhost virtual address
+ * corresponding to a QEMU guest physical address.
+ */
+ for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
+ dev->mem->regions[regionidx].address_offset =
+ dev->mem->regions[regionidx].userspace_address -
+ dev->mem->base_address +
+ dev->mem->mapped_address -
+ dev->mem->regions[regionidx].guest_phys_address;
+
+ }
+ return 0;
+}
+
+/*
+ * Called from CUSE IOCTL: VHOST_SET_VRING_NUM
+ * The virtio device sends us the size of the descriptor ring.
+ */
+static int
+set_vring_num(struct vhost_device_ctx ctx, struct vhost_vring_state *state)
+{
+ struct virtio_net *dev;
+
+ dev = get_device(ctx);
+ if (dev == NULL)
+ return -1;
+
+ /* State->index refers to the queue index. The txq is 1, rxq is 0. */
+ dev->virtqueue[state->index]->size = state->num;
+
+ return 0;
+}
+
+/*
+ * Called from CUSE IOCTL: VHOST_SET_VRING_ADDR
+ * The virtio device sends us the desc, used and avail ring addresses.
+ * This function then converts these to our address space.
+ */
+static int
+set_vring_addr(struct vhost_device_ctx ctx, struct vhost_vring_addr *addr)
+{
+ struct virtio_net *dev;
+ struct vhost_virtqueue *vq;
+
+ dev = get_device(ctx);
+ if (dev == NULL)
+ return -1;
+
+ /* addr->index refers to the queue index. The txq 1, rxq is 0. */
+ vq = dev->virtqueue[addr->index];
+
+ /* The addresses are converted from QEMU virtual to Vhost virtual. */
+ vq->desc = (struct vring_desc *)(uintptr_t)qva_to_vva(dev,
+ addr->desc_user_addr);
+ if (vq->desc == 0) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%"PRIu64") Failed to find desc ring address.\n",
+ dev->device_fh);
+ return -1;
+ }
+
+ vq->avail = (struct vring_avail *)(uintptr_t)qva_to_vva(dev,
+ addr->avail_user_addr);
+ if (vq->avail == 0) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%"PRIu64") Failed to find avail ring address.\n",
+ dev->device_fh);
+ return -1;
+ }
+
+ vq->used = (struct vring_used *)(uintptr_t)qva_to_vva(dev,
+ addr->used_user_addr);
+ if (vq->used == 0) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%"PRIu64") Failed to find used ring address.\n",
+ dev->device_fh);
+ return -1;
+ }
+
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address desc: %p\n",
+ dev->device_fh, vq->desc);
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address avail: %p\n",
+ dev->device_fh, vq->avail);
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address used: %p\n",
+ dev->device_fh, vq->used);
+
+ return 0;
+}
+
+/*
+ * Called from CUSE IOCTL: VHOST_SET_VRING_BASE
+ * The virtio device sends us the available ring last used index.
+ */
+static int
+set_vring_base(struct vhost_device_ctx ctx, struct vhost_vring_state *state)
+{
+ struct virtio_net *dev;
+
+ dev = get_device(ctx);
+ if (dev == NULL)
+ return -1;
+
+ /* State->index refers to the queue index. The txq is 1, rxq is 0. */
+ dev->virtqueue[state->index]->last_used_idx = state->num;
+ dev->virtqueue[state->index]->last_used_idx_res = state->num;
+
+ return 0;
+}
+
+/*
+ * Called from CUSE IOCTL: VHOST_GET_VRING_BASE
+ * We send the virtio device our available ring last used index.
+ */
+static int
+get_vring_base(struct vhost_device_ctx ctx, uint32_t index,
+ struct vhost_vring_state *state)
+{
+ struct virtio_net *dev;
+
+ dev = get_device(ctx);
+ if (dev == NULL)
+ return -1;
+
+ state->index = index;
+ /* State->index refers to the queue index. The txq is 1, rxq is 0. */
+ state->num = dev->virtqueue[state->index]->last_used_idx;
+
+ return 0;
+}
+
+/*
+ * This function uses the eventfd_link kernel module to copy an eventfd file
+ * descriptor provided by QEMU in to our process space.
+ */
+static int
+eventfd_copy(struct virtio_net *dev, struct eventfd_copy *eventfd_copy)
+{
+ int eventfd_link, ret;
+
+ /* Open the character device to the kernel module. */
+ eventfd_link = open(eventfd_cdev, O_RDWR);
+ if (eventfd_link < 0) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%"PRIu64") eventfd_link module is not loaded\n",
+ dev->device_fh);
+ return -1;
+ }
+
+ /* Call the IOCTL to copy the eventfd. */
+ ret = ioctl(eventfd_link, EVENTFD_COPY, eventfd_copy);
+ close(eventfd_link);
+
+ if (ret < 0) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%"PRIu64") EVENTFD_COPY ioctl failed\n",
+ dev->device_fh);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Called from CUSE IOCTL: VHOST_SET_VRING_CALL
+ * The virtio device sends an eventfd to interrupt the guest. This fd gets
+ * copied into our process space.
+ */
+static int
+set_vring_call(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
+{
+ struct virtio_net *dev;
+ struct eventfd_copy eventfd_kick;
+ struct vhost_virtqueue *vq;
+
+ dev = get_device(ctx);
+ if (dev == NULL)
+ return -1;
+
+ /* file->index refers to the queue index. The txq is 1, rxq is 0. */
+ vq = dev->virtqueue[file->index];
+
+ if (vq->kickfd)
+ close((int)vq->kickfd);
+
+ /* Populate the eventfd_copy structure and call eventfd_copy. */
+ vq->kickfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
+ eventfd_kick.source_fd = vq->kickfd;
+ eventfd_kick.target_fd = file->fd;
+ eventfd_kick.target_pid = ctx.pid;
+
+ if (eventfd_copy(dev, &eventfd_kick))
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Called from CUSE IOCTL: VHOST_SET_VRING_KICK
+ * The virtio device sends an eventfd that it can use to notify us.
+ * This fd gets copied into our process space.
+ */
+static int
+set_vring_kick(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
+{
+ struct virtio_net *dev;
+ struct eventfd_copy eventfd_call;
+ struct vhost_virtqueue *vq;
+
+ dev = get_device(ctx);
+ if (dev == NULL)
+ return -1;
+
+ /* file->index refers to the queue index. The txq is 1, rxq is 0. */
+ vq = dev->virtqueue[file->index];
+
+ if (vq->callfd)
+ close((int)vq->callfd);
+
+ /* Populate the eventfd_copy structure and call eventfd_copy. */
+ vq->callfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
+ eventfd_call.source_fd = vq->callfd;
+ eventfd_call.target_fd = file->fd;
+ eventfd_call.target_pid = ctx.pid;
+
+ if (eventfd_copy(dev, &eventfd_call))
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Function to get the tap device name from the provided file descriptor and
+ * save it in the device structure.
+ */
+static int
+get_ifname(struct virtio_net *dev, int tap_fd, int pid)
+{
+ struct eventfd_copy fd_tap;
+ struct ifreq ifr;
+ uint32_t size, ifr_size;
+ int ret;
+
+ fd_tap.source_fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
+ fd_tap.target_fd = tap_fd;
+ fd_tap.target_pid = pid;
+
+ if (eventfd_copy(dev, &fd_tap))
+ return -1;
+
+ ret = ioctl(fd_tap.source_fd, TUNGETIFF, &ifr);
+
+ if (close(fd_tap.source_fd) < 0)
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%"PRIu64") fd close failed\n",
+ dev->device_fh);
+
+ if (ret >= 0) {
+ ifr_size = strnlen(ifr.ifr_name, sizeof(ifr.ifr_name));
+ size = ifr_size > sizeof(dev->ifname) ?
+ sizeof(dev->ifname) : ifr_size;
+
+ strncpy(dev->ifname, ifr.ifr_name, size);
+ } else
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%"PRIu64") TUNGETIFF ioctl failed\n",
+ dev->device_fh);
+
+ return 0;
+}
+
+/*
+ * Called from CUSE IOCTL: VHOST_NET_SET_BACKEND
+ * To complete device initialisation when the virtio driver is loaded,
+ * we are provided with a valid fd for a tap device (not used by us).
+ * If this happens then we can add the device to a data core.
+ * When the virtio driver is removed we get fd=-1.
+ * At that point we remove the device from the data core.
+ * The device will still exist in the device configuration linked list.
+ */
+static int
+set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
+{
+ struct virtio_net *dev;
+
+ dev = get_device(ctx);
+ if (dev == NULL)
+ return -1;
+
+ /* file->index refers to the queue index. The txq is 1, rxq is 0. */
+ dev->virtqueue[file->index]->backend = file->fd;
+
+ /*
+ * If the device isn't already running and both backend fds are set,
+ * we add the device.
+ */
+ if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
+ if (((int)dev->virtqueue[VIRTIO_TXQ]->backend != VIRTIO_DEV_STOPPED) &&
+ ((int)dev->virtqueue[VIRTIO_RXQ]->backend != VIRTIO_DEV_STOPPED)) {
+ get_ifname(dev, file->fd, ctx.pid);
+ return notify_ops->new_device(dev);
+ }
+ /* Otherwise we remove it. */
+ } else
+ if (file->fd == VIRTIO_DEV_STOPPED)
+ notify_ops->destroy_device(dev);
+ return 0;
+}
+
+/*
+ * Function pointers are set for the device operations to allow CUSE to call
+ * functions when an IOCTL, device_add or device_release is received.
+ */
+static const struct vhost_net_device_ops vhost_device_ops = {
+ .new_device = new_device,
+ .destroy_device = destroy_device,
+
+ .get_features = get_features,
+ .set_features = set_features,
+
+ .set_mem_table = set_mem_table,
+
+ .set_vring_num = set_vring_num,
+ .set_vring_addr = set_vring_addr,
+ .set_vring_base = set_vring_base,
+ .get_vring_base = get_vring_base,
+
+ .set_vring_kick = set_vring_kick,
+ .set_vring_call = set_vring_call,
+
+ .set_backend = set_backend,
+
+ .set_owner = set_owner,
+ .reset_owner = reset_owner,
+};
+
+/*
+ * Called by main to setup callbacks when registering CUSE device.
+ */
+struct vhost_net_device_ops const *
+get_virtio_net_callbacks(void)
+{
+ return &vhost_device_ops;
+}
+
+int rte_vhost_enable_guest_notification(struct virtio_net *dev,
+ uint16_t queue_id, int enable)
+{
+ if (enable) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "guest notification isn't supported.\n");
+ return -1;
+ }
+
+ dev->virtqueue[queue_id]->used->flags =
+ enable ? 0 : VRING_USED_F_NO_NOTIFY;
+ return 0;
+}
+
+uint64_t rte_vhost_feature_get(void)
+{
+ return VHOST_FEATURES;
+}
+
+int rte_vhost_feature_disable(uint64_t feature_mask)
+{
+ VHOST_FEATURES = VHOST_FEATURES & ~feature_mask;
+ return 0;
+}
+
+int rte_vhost_feature_enable(uint64_t feature_mask)
+{
+ if ((feature_mask & VHOST_SUPPORTED_FEATURES) == feature_mask) {
+ VHOST_FEATURES = VHOST_FEATURES | feature_mask;
+ return 0;
+ }
+ return -1;
+}
+
+/*
+ * Register ops so that we can add/remove device to data core.
+ */
+int
+rte_vhost_driver_callback_register(struct virtio_net_device_ops const * const ops)
+{
+ notify_ops = ops;
+
+ return 0;
+}
diff --git a/src/global_io_mode.cpp b/src/global_io_mode.cpp
new file mode 100755
index 00000000..23b2c4a3
--- /dev/null
+++ b/src/global_io_mode.cpp
@@ -0,0 +1,128 @@
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+#include "global_io_mode.h"
+#include "utl_term_io.h"
+#include <stdlib.h>
+
+
+void CTrexGlobalIoMode::set_mode(CliDumpMode mode){
+ switch (mode) {
+ case cdDISABLE:
+ m_g_mode=gDISABLE;
+ m_g_disable_first=false;
+ break;
+ case cdNORMAL:
+ Reset();
+ break;
+ case cdSHORT:
+ m_g_mode=gNORMAL;
+ m_pp_mode=ppDISABLE;
+ m_ap_mode=apENABLE;
+ m_l_mode=lDISABLE;
+ m_rc_mode=rcDISABLE;
+ break;
+ }
+}
+
+
+bool CTrexGlobalIoMode::handle_io_modes(void){
+ int c=utl_termio_try_getch();
+ if (c) {
+ if (c==3) {
+ return true;
+ }
+ switch (c) {
+ case ccHELP:
+ if (m_g_mode==gHELP) {
+ m_g_mode=gNORMAL;
+ }else{
+ m_g_mode=gHELP;
+ }
+ break;
+ case ccGDISABLE:
+ if (m_g_mode==gDISABLE) {
+ m_g_mode=gNORMAL;
+ }else{
+ m_g_mode=gDISABLE;
+ m_g_disable_first=true;
+ }
+ break;
+ case ccGNORAML:
+ Reset();
+ break;
+ case ccGPP:
+ m_g_mode=gNORMAL;
+ m_pp_mode++;
+ if (m_pp_mode==ppLAST) {
+ m_pp_mode = ppDISABLE;
+ }
+ break;
+ case ccGAP:
+ m_g_mode=gNORMAL;
+ m_ap_mode++;
+ if (m_ap_mode == apLAST) {
+ m_ap_mode = apDISABLE;
+ }
+ break;
+ case ccGL:
+ m_g_mode=gNORMAL;
+ m_l_mode++;
+ if (m_l_mode == lLAST) {
+ m_l_mode = lDISABLE;
+ }
+ break;
+ case ccGRC:
+ m_g_mode=gNORMAL;
+ m_rc_mode++;
+ if (m_rc_mode == rcLAST) {
+ m_rc_mode = rcDISABLE;
+ }
+ break;
+ }
+ }
+ return false;
+}
+
+void CTrexGlobalIoMode::Dump(FILE *fd){
+ fprintf(fd,"\033[2J");
+ fprintf(fd,"\033[2H");
+ fprintf(fd," global: %d \n",(int)m_g_mode);
+ fprintf(fd," pp : %d \n",(int)m_pp_mode);
+ fprintf(fd," ap : %d \n",(int)m_ap_mode);
+ fprintf(fd," l : %d \n",(int)m_l_mode);
+ fprintf(fd," rc : %d \n",(int)m_rc_mode);
+}
+
+void CTrexGlobalIoMode::DumpHelp(FILE *fd){
+ fprintf(fd,"Help for Interactive Commands - Trex \n" );
+ fprintf(fd," d : Toggle, Disable all -> Noraml \n");
+ fprintf(fd," n : Default mode all in Normal mode \n");
+ fprintf(fd," h : Toggle, Help->Normal \n");
+ fprintf(fd,"\n");
+ fprintf(fd," p : Per ports Toggle mode, disable -> table -> normal \n");
+ fprintf(fd," a : Global ports Toggle mode, disable -> enable \n");
+ fprintf(fd," l : Latency Toggle mode, disable -> enable -> enhanced \n");
+ fprintf(fd," r : Rx check Toggle mode, disable -> enable -> enhanced \n");
+ fprintf(fd," Press h or 1 to go back to Normal mode \n");
+}
+
+
+
diff --git a/src/global_io_mode.h b/src/global_io_mode.h
new file mode 100755
index 00000000..bbdcb6ef
--- /dev/null
+++ b/src/global_io_mode.h
@@ -0,0 +1,121 @@
+#ifndef GLOBAL_IO_MODE_H
+#define GLOBAL_IO_MODE_H
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include <stdint.h>
+#include <stdio.h>
+
+class CTrexGlobalIoMode {
+public:
+ CTrexGlobalIoMode(){
+ m_g_disable_first=false;
+ }
+
+ void Reset(){
+ m_g_mode=gNORMAL;
+ m_pp_mode=ppTABLE;
+ m_ap_mode=apENABLE;
+ m_l_mode=lENABLE;
+ m_rc_mode=rcENABLE;
+ }
+ /* 0 - disable -> normal
+ h - help -> normal
+ 1 - -> normal
+ 2 - pp
+ 3 - ap
+ 4 - ll
+ 5 - rx
+ */
+ enum Chars{
+ ccHELP='h',
+ ccGDISABLE='d',
+ ccGNORAML='n',
+ ccGPP='p',
+ ccGAP='a',
+ ccGL='l',
+ ccGRC='r'
+ };
+
+ enum CliDumpMode {
+ cdDISABLE=0, // no print at all
+ cdNORMAL=1, // normal
+ cdSHORT =2 // short only all ports info
+ };
+
+
+ enum Global {
+ gDISABLE=0, // no print at all
+ gHELP=1, // help
+ gNORMAL=2 // normal
+ };
+
+ typedef uint8_t Global_t;
+
+ enum PerPortCountersMode {
+ ppDISABLE=0,
+ ppTABLE =1,
+ ppSTANDARD=2,
+ ppLAST =3
+ };
+ typedef uint8_t PerPortCountersMode_t;
+
+ enum AllPortCountersMode {
+ apDISABLE=0,
+ apENABLE =1,
+ apLAST =2
+ };
+ typedef uint8_t AllPortCountersMode_t;
+
+ enum LatecnyMode {
+ lDISABLE =0,
+ lENABLE =1,
+ lENABLE_Extended=2,
+ lLAST =3
+ };
+ typedef uint8_t LatecnyMode_t;
+
+ enum RxCheckMode {
+ rcDISABLE =0,
+ rcENABLE =1,
+ rcENABLE_Extended=2,
+ rcLAST =3
+ };
+ typedef uint8_t RxCheckMode_t;
+
+ Global_t m_g_mode;
+ bool m_g_disable_first;
+ PerPortCountersMode_t m_pp_mode;
+ AllPortCountersMode_t m_ap_mode;
+ LatecnyMode_t m_l_mode;
+ RxCheckMode_t m_rc_mode;
+
+public:
+ void set_mode(CliDumpMode mode);
+ /* return true if we need to terminate */
+ bool handle_io_modes();
+
+ void Dump(FILE *fd);
+ void DumpHelp(FILE *fd);
+};
+
+
+#endif
diff --git a/src/gtest/Makefile b/src/gtest/Makefile
new file mode 100755
index 00000000..6468bcf8
--- /dev/null
+++ b/src/gtest/Makefile
@@ -0,0 +1,40 @@
+GTEST_DIR = ../common
+USER_DIR = .
+CPPFLAGS += -isystem ../
+
+CXXFLAGS += -g -Wall -Wextra -pthread
+
+TESTS = tuple_gen_test
+
+GTEST_HEADERS = $(GTEST_DIR)/gtest.h
+
+all : $(TESTS)
+
+clean :
+ rm -f $(TESTS) gtest.a gtest_main.a *.o
+
+GTEST_SRCS_ = $(GTEST_HEADERS)
+
+gtest-all.o : $(GTEST_SRCS_)
+ $(CXX) $(CPPFLAGS) -I$(GTEST_DIR) $(CXXFLAGS) -c \
+ $(GTEST_DIR)/gtest-all.cc
+
+gtest_main.o : $(GTEST_SRCS_)
+ $(CXX) $(CPPFLAGS) -I$(GTEST_DIR) $(CXXFLAGS) -c \
+ $(GTEST_DIR)/gtest_main.cc
+
+gtest.a : gtest-all.o
+ $(AR) $(ARFLAGS) $@ $^
+
+gtest_main.a : gtest-all.o gtest_main.o
+ $(AR) $(ARFLAGS) $@ $^
+
+tuple_gen.o : ../tuple_gen.cpp ../tuple_gen.h $(GTEST_HEADERS)
+ $(CXX) $(CPPFLAGS) $(CXXFLAGS) -c ../tuple_gen.cpp
+
+tuple_gen_test.o : ./tuple_gen_test.cpp \
+ ../tuple_gen.h $(GTEST_HEADERS)
+ $(CXX) $(CPPFLAGS) $(CXXFLAGS) -c ./tuple_gen_test.cpp
+
+tuple_gen_test : tuple_gen.o tuple_gen_test.o gtest_main.a
+ $(CXX) $(CPPFLAGS) $(CXXFLAGS) -lpthread $^ -o $@
diff --git a/src/gtest/nat_test.cpp b/src/gtest/nat_test.cpp
new file mode 100755
index 00000000..aa34cec2
--- /dev/null
+++ b/src/gtest/nat_test.cpp
@@ -0,0 +1,5 @@
+#include "../bp_sim.h"
+#include <common/gtest.h>
+#include <common/basic_utils.h>
+#include "../nat_check.h"
+
diff --git a/src/gtest/tuple_gen_test.cpp b/src/gtest/tuple_gen_test.cpp
new file mode 100755
index 00000000..6419ced1
--- /dev/null
+++ b/src/gtest/tuple_gen_test.cpp
@@ -0,0 +1,700 @@
+/*
+ Wenxian Li
+
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+#include "../bp_sim.h"
+#include <common/gtest.h>
+#include <common/basic_utils.h>
+
+/* TEST case for CClientInfo*/
+
+class CClientInfoUT {
+public:
+ CClientInfoUT(CClientInfo *a) {
+ obj = a;
+ }
+
+ uint16_t m_head_port_get() {
+ return obj->m_head_port;
+ }
+
+ void m_head_port_set(uint16_t head) {
+ obj->m_head_port = head;
+ }
+
+ bool is_port_legal(uint16_t port) {
+ return obj->is_port_legal(port);
+ }
+ bool is_port_free(uint16_t port) {
+ return obj->is_port_available(port);
+ }
+ void m_bitmap_port_set_bit(uint16_t port) {
+ obj->m_bitmap_port[port] = 1;
+ }
+ void m_bitmap_port_reset_bit(uint16_t port) {
+ obj->m_bitmap_port[port] = 0;
+ }
+ uint8_t m_bitmap_port_get_bit(uint16_t port) {
+ return obj->m_bitmap_port[port];
+ }
+ void get_next_free_port_by_bit() {
+ obj->get_next_free_port_by_bit();
+ }
+
+ CClientInfo *obj;
+};
+
+TEST(CClientInfoTest, Constructors) {
+ CClientInfo c;
+ CClientInfoUT client(&c);
+ EXPECT_EQ(MIN_PORT, client.m_head_port_get());
+
+ CClientInfo c2;
+ CClientInfoUT client2(&c2);
+ EXPECT_EQ(MIN_PORT, client2.m_head_port_get());
+}
+
+TEST(CClientInfoTest, is_port_legal) {
+ CClientInfo c;
+ CClientInfoUT client(&c);
+ EXPECT_TRUE(client.is_port_legal(MIN_PORT));
+ EXPECT_FALSE(client.is_port_legal(MIN_PORT-1));
+ EXPECT_TRUE(client.is_port_legal(MAX_PORT-1));
+ EXPECT_FALSE(client.is_port_legal(MAX_PORT));
+ EXPECT_FALSE(client.is_port_legal(MAX_PORT+1));
+}
+
+TEST(CClientInfoTest, is_port_free) {
+ CClientInfo c;
+ CClientInfoUT client(&c);
+ client.m_bitmap_port_set_bit(2000);
+ EXPECT_FALSE(client.is_port_free(2000));
+ client.m_bitmap_port_reset_bit(2000);
+ EXPECT_TRUE(client.is_port_free(2000));
+}
+
+
+
+TEST(CClientInfoTest,get_next_free_port_by_bit) {
+ CClientInfo c;
+ CClientInfoUT client(&c);
+ client.m_head_port_set(200);
+ client.get_next_free_port_by_bit();
+ EXPECT_EQ(MIN_PORT, client.m_head_port_get());
+ for(int idx=1024;idx<2000;idx++) {
+ client.m_bitmap_port_set_bit(idx);
+ }
+ client.get_next_free_port_by_bit();
+ EXPECT_EQ(1044, client.m_head_port_get());
+}
+
+TEST(CClientInfoTest, get_new_free_port) {
+ CClientInfo c;
+ CClientInfoUT client(&c);
+
+ EXPECT_EQ(1024, client.obj->get_new_free_port());
+ EXPECT_EQ(PORT_IN_USE, client.m_bitmap_port_get_bit(1024));
+
+ client.m_bitmap_port_reset_bit(1024);
+ EXPECT_EQ(PORT_FREE, client.m_bitmap_port_get_bit(1024));
+ client.m_head_port_set(MAX_PORT-1);
+ client.m_bitmap_port_set_bit(MAX_PORT-1);
+
+ EXPECT_EQ(1024, client.obj->get_new_free_port());
+ EXPECT_EQ(PORT_IN_USE, client.m_bitmap_port_get_bit(1024));
+ client.m_head_port_set(1024);
+ EXPECT_EQ(1025, client.obj->get_new_free_port());
+ EXPECT_EQ(PORT_IN_USE, client.m_bitmap_port_get_bit(1025));
+
+ for(int i=1024;i<1200;i++) {
+ client.m_bitmap_port_set_bit(i);
+ }
+ client.m_head_port_set(1024);
+ EXPECT_EQ(ILLEGAL_PORT, client.obj->get_new_free_port());
+
+}
+
+
+TEST(CClientInfoTest, return_port) {
+ CClientInfo c;
+ CClientInfoUT client(&c);
+ client.m_bitmap_port_set_bit(2000);
+ client.obj->return_port(2000);
+ EXPECT_EQ(PORT_FREE, client.m_bitmap_port_get_bit(2000));
+}
+
+TEST(CClientInfoLTest, get_new_free_port) {
+ CClientInfoL c;
+ for(int i=0;i<10;i++) {
+ EXPECT_EQ(1024+i, c.get_new_free_port());
+ }
+ c.return_all_ports();
+ for(int i=0;i<10;i++) {
+ EXPECT_EQ(1024+i, c.get_new_free_port());
+ }
+}
+
+
+
+/* UIT of CTupleGeneratorSmart */
+TEST(tuple_gen,GenerateTuple) {
+ CTupleGeneratorSmart gen;
+ gen.Create(1, 1,cdSEQ_DIST,
+ 0x10000001, 0x10000f01, 0x30000001, 0x40000001,
+ MAX_PORT, MAX_PORT);
+ CTupleBase result;
+ uint32_t result_src;
+ uint32_t result_dest;
+ uint16_t result_port;
+
+ for(int i=0;i<10;i++) {
+ gen.GenerateTuple(result);
+ printf(" C:%x S:%x P:%d \n",result.getClient(),result.getServer(),result.getClientPort());
+
+ result_src = result.getClient();
+ result_dest = result.getServer();
+ result_port = result.getClientPort();
+ EXPECT_EQ(result_src, (uint32_t)(0x10000001+i));
+ EXPECT_EQ(result_dest, (uint32_t) (((0x30000001+i)) ) );
+ EXPECT_EQ(result_port, 1024);
+ }
+
+ gen.Delete();
+// EXPECT_EQ((size_t)0, gen.m_clients.size());
+}
+
+TEST(tuple_gen,GenerateTuple2) {
+ CTupleGeneratorSmart gen;
+ gen.Create(1, 1,cdSEQ_DIST,
+ 0x10000001, 0x1000000f, 0x30000001, 0x40000001,
+ MAX_PORT, MAX_PORT);
+ CTupleBase result;
+ uint32_t result_src;
+ uint32_t result_dest;
+ uint16_t result_port;
+
+ for(int i=0;i<200;i++) {
+ gen.GenerateTuple(result);
+ // gen.Dump(stdout);
+ // fprintf(stdout, "i:%d\n",i);
+ result_src = result.getClient();
+ result_dest = result.getServer();
+ result_port = result.getClientPort();
+ EXPECT_EQ(result_src, (uint32_t)(0x10000001+i%15));
+ EXPECT_EQ(result_dest, (uint32_t)((0x30000001+i) ) );
+ EXPECT_EQ(result_port, 1024+i/15);
+ }
+
+ gen.Delete();
+// EXPECT_EQ((size_t)0, gen.m_clients.size());
+ gen.Create(1, 1,cdSEQ_DIST,
+ 0x10000001, 0x1000000f, 0x30000001, 0x40000001,
+ MAX_PORT,MAX_PORT);
+ for(int i=0;i<200;i++) {
+ gen.GenerateTuple(result);
+ // gen.Dump(stdout);
+ // fprintf(stdout, "i:%d\n",i);
+ result_src = result.getClient();
+ result_dest = result.getServer();
+ result_port = result.getClientPort();
+ EXPECT_EQ(result_src, (uint32_t)(0x10000001+i%15));
+ EXPECT_EQ(result_dest, (uint32_t) (((0x30000001+i)) ) );
+ EXPECT_EQ(result_port, 1024+i/15);
+ }
+
+
+
+}
+
+TEST(tuple_gen,GenerateTupleMac) {
+ CFlowGenList fl;
+ fl.Create();
+ fl.load_from_mac_file("avl/mac_uit.yaml");
+ fl.m_yaml_info.m_tuple_gen.m_clients_ip_start = 0x10000001;
+ fl.m_yaml_info.m_tuple_gen.m_clients_ip_end = 0x1000000f;
+
+
+ CTupleGeneratorSmart gen;
+ gen.Create(1, 1,cdSEQ_DIST,
+ 0x10000001, 0x1000000f, 0x30000001, 0x40000001,
+ MAX_PORT, MAX_PORT, &fl);
+ CTupleBase result;
+ uint32_t result_src;
+ uint32_t result_dest;
+ uint16_t result_port;
+
+ for(int i=0;i<10;i++) {
+ gen.GenerateTuple(result);
+ printf(" C:%x S:%x P:%d \n",result.getClient(),result.getServer(),result.getClientPort());
+
+ result_src = result.getClient();
+ result_dest = result.getServer();
+ result_port = result.getClientPort();
+ EXPECT_EQ(result_src, (uint32_t)(0x10000001+i%2));
+ EXPECT_EQ(result_dest, (uint32_t) (((0x30000001+i)) ) );
+ EXPECT_EQ(result_port, 1024+i/2);
+ }
+
+ gen.Delete();
+// EXPECT_EQ((size_t)0, gen.m_clients.size());
+}
+
+
+
+TEST(tuple_gen,GenerateTupleEx) {
+ CTupleGeneratorSmart gen;
+ gen.Create(1, 1,cdSEQ_DIST,
+ 0x10000001, 0x1000000f, 0x30000001, 0x40000001,
+ MAX_PORT, MAX_PORT);
+ CTupleBase result;
+ uint32_t result_src;
+ uint32_t result_dest;
+ uint16_t result_port;
+ uint16_t ex_port[2];
+ for(int i=0;i<20;i++) {
+
+ gen.GenerateTupleEx(result,2,ex_port);
+ fprintf(stdout, "i:%d\n",i);
+ result_src = result.getClient();
+ result_dest = result.getServer();
+ result_port = result.getClientPort();
+
+ EXPECT_EQ(result_src, (uint32_t)(0x10000001+i%15));
+ EXPECT_EQ(result_dest, (uint32_t)(((0x30000001+i)) ));
+
+ EXPECT_EQ(result_port, 1024+(i/15)*3);
+ EXPECT_EQ(ex_port[0], 1025+(i/15)*3);
+ EXPECT_EQ(ex_port[1], 1026+(i/15)*3);
+ }
+
+ gen.Delete();
+}
+
+TEST(tuple_gen,split1) {
+ CClientPortion portion;
+
+ CTupleGenYamlInfo fi;
+ fi.m_clients_ip_start =0x10000000;
+ fi.m_clients_ip_end =0x100000ff;
+
+ fi.m_servers_ip_start =0x20000000;
+ fi.m_servers_ip_end =0x200000ff;
+
+ fi.m_dual_interface_mask =0x01000000;
+
+ split_clients(0,
+ 1,
+ 0,
+ fi,
+ portion);
+ EXPECT_EQ(portion.m_client_start, (uint32_t)(0x10000000));
+ EXPECT_EQ(portion.m_client_end, (uint32_t)(0x100000ff ));
+ EXPECT_EQ(portion.m_server_start , (uint32_t)(0x20000000));
+ EXPECT_EQ(portion.m_server_end , (uint32_t)(0x200000ff));
+ printf(" %x %x %x %x \n",portion.m_client_start,portion.m_client_end,portion.m_server_start,portion.m_server_end);
+
+ split_clients(2,
+ 4,
+ 1,
+ fi,
+ portion);
+
+ EXPECT_EQ(portion.m_client_start, (uint32_t)(0x11000080));
+ EXPECT_EQ(portion.m_client_end, (uint32_t)(0x110000bf ));
+ EXPECT_EQ(portion.m_server_start , (uint32_t)(0x21000080));
+ EXPECT_EQ(portion.m_server_end , (uint32_t)(0x210000bf));
+ printf(" %x %x %x %x \n",portion.m_client_start,portion.m_client_end,portion.m_server_start,portion.m_server_end);
+}
+
+TEST(tuple_gen,split2) {
+ CClientPortion portion;
+
+ CTupleGenYamlInfo fi;
+ fi.m_clients_ip_start =0x10000000;
+ fi.m_clients_ip_end =0x100001ff;
+
+ fi.m_servers_ip_start =0x20000000;
+ fi.m_servers_ip_end =0x200001ff;
+
+ fi.m_dual_interface_mask =0x01000000;
+
+ int i;
+ for (i=0; i<8; i++) {
+ split_clients(i,
+ 8,
+ (i&1),
+ fi,
+ portion);
+
+
+ if ( (i&1) ) {
+ EXPECT_EQ(portion.m_client_start, (uint32_t)(0x11000000)+(0x40*i));
+ EXPECT_EQ(portion.m_client_end, (uint32_t)(0x11000000 +(0x40*i+0x40-1)));
+ EXPECT_EQ(portion.m_server_start , (uint32_t)(0x21000000)+ (0x40*i) );
+ EXPECT_EQ(portion.m_server_end , (uint32_t)(0x21000000)+(0x40*i+0x40-1) );
+ }else{
+ EXPECT_EQ(portion.m_client_start, (uint32_t)(0x10000000)+ (0x40*i) );
+ EXPECT_EQ(portion.m_client_end, (uint32_t)(0x10000000 + (0x40*i+0x40-1) ) );
+ EXPECT_EQ(portion.m_server_start , (uint32_t)(0x20000000) + (0x40*i) );
+ EXPECT_EQ(portion.m_server_end , (uint32_t)(0x20000000) + (0x40*i+0x40-1) );
+ }
+ printf(" %x %x %x %x \n",portion.m_client_start,portion.m_client_end,portion.m_server_start,portion.m_server_end);
+ }
+}
+
+
+
+
+
+TEST(tuple_gen,template1) {
+ CTupleGeneratorSmart gen;
+ gen.Create(1, 1,cdSEQ_DIST,
+ 0x10000001, 0x1000000f, 0x30000001, 0x40000001,
+ MAX_PORT, MAX_PORT);
+ CTupleTemplateGeneratorSmart template_1;
+ template_1.Create(&gen);
+ template_1.SetSingleServer(true,0x12121212,0,0);
+ CTupleBase result;
+
+
+ int i;
+ for (i=0; i<10; i++) {
+ template_1.GenerateTuple(result);
+ uint32_t result_src = result.getClient();
+ uint32_t result_dest = result.getServer();
+ uint16_t result_port = result.getClientPort();
+ //printf(" %x %x %x \n",result_src,result_dest,result_port);
+ EXPECT_EQ(result_src, (uint32_t)(0x10000001+i));
+ EXPECT_EQ(result_dest, (uint32_t)(((0x12121212)) ));
+ }
+
+ template_1.Delete();
+ gen.Delete();
+}
+
+TEST(tuple_gen,template2) {
+ CTupleGeneratorSmart gen;
+ gen.Create(1, 1,cdSEQ_DIST,
+ 0x10000001, 0x1000000f, 0x30000001, 0x40000001,
+ MAX_PORT, MAX_PORT);
+ CTupleTemplateGeneratorSmart template_1;
+ template_1.Create(&gen);
+ template_1.SetW(10);
+
+ CTupleBase result;
+
+
+ int i;
+ for (i=0; i<20; i++) {
+ template_1.GenerateTuple(result);
+ uint32_t result_src = result.getClient();
+ uint32_t result_dest = result.getServer();
+ uint16_t result_port = result.getClientPort();
+ //printf(" %x %x %x \n",result_src,result_dest,result_port);
+ EXPECT_EQ(result_src, (uint32_t)(0x10000001+(i/10)));
+ EXPECT_EQ(result_dest, (uint32_t)(((0x30000001+ (i/10) )) ));
+ EXPECT_EQ(result_port, 1024+(i%10));
+ }
+
+ template_1.Delete();
+ gen.Delete();
+}
+
+
+TEST(tuple_gen,no_free) {
+ CTupleGeneratorSmart gen;
+ gen.Create(1, 1,cdSEQ_DIST,
+ 0x10000001, 0x10000001, 0x30000001, 0x300000ff,
+ MAX_PORT, MAX_PORT);
+ CTupleTemplateGeneratorSmart template_1;
+ template_1.Create(&gen);
+
+ CTupleBase result;
+
+
+ int i;
+ for (i=0; i<65557; i++) {
+ template_1.GenerateTuple(result);
+ uint32_t result_src = result.getClient();
+ uint32_t result_dest = result.getServer();
+ uint16_t result_port = result.getClientPort();
+ }
+ // should have error
+ EXPECT_TRUE((gen.getErrorAllocationCounter()>0)?true:false);
+
+ template_1.Delete();
+ gen.Delete();
+}
+
+TEST(tuple_gen,try_to_free) {
+ CTupleGeneratorSmart gen;
+ gen.Create(1, 1,cdSEQ_DIST,
+ 0x10000001, 0x10000001, 0x30000001, 0x300000ff,
+ MAX_PORT, MAX_PORT);
+ CTupleTemplateGeneratorSmart template_1;
+ template_1.Create(&gen);
+
+ CTupleBase result;
+
+
+ int i;
+ for (i=0; i<65557; i++) {
+ template_1.GenerateTuple(result);
+ uint32_t result_src = result.getClient();
+ uint32_t result_dest = result.getServer();
+ uint16_t result_port = result.getClientPort();
+ gen.FreePort(result_src,result_port);
+ }
+ // should have error
+ EXPECT_FALSE((gen.getErrorAllocationCounter()>0)?true:false);
+
+ template_1.Delete();
+ gen.Delete();
+}
+
+
+
+/* tuple generator using CClientInfoL*/
+TEST(tuple_gen_2,GenerateTuple) {
+ CTupleGeneratorSmart gen;
+ gen.Create(1, 1,cdSEQ_DIST,
+ 0x10000001, 0x10000f01, 0x30000001, 0x40000001,
+ 0,0);
+ CTupleBase result;
+ uint32_t result_src;
+ uint32_t result_dest;
+ uint16_t result_port;
+
+ for(int i=0;i<10;i++) {
+ gen.GenerateTuple(result);
+ printf(" C:%x S:%x P:%d \n",result.getClient(),result.getServer(),result.getClientPort());
+
+ result_src = result.getClient();
+ result_dest = result.getServer();
+ result_port = result.getClientPort();
+ EXPECT_EQ(result_src, (uint32_t)(0x10000001+i));
+ EXPECT_EQ(result_dest, (uint32_t) (((0x30000001+i)) ) );
+ EXPECT_EQ(result_port, 1024);
+ }
+
+ gen.Delete();
+// EXPECT_EQ((size_t)0, gen.m_clients.size());
+}
+
+TEST(tuple_gen_2,GenerateTuple2) {
+ CTupleGeneratorSmart gen;
+ gen.Create(1, 1,cdSEQ_DIST,
+ 0x10000001, 0x1000000f, 0x30000001, 0x40000001,
+ 0,0);
+ CTupleBase result;
+ uint32_t result_src;
+ uint32_t result_dest;
+ uint16_t result_port;
+
+ for(int i=0;i<200;i++) {
+ gen.GenerateTuple(result);
+ // gen.Dump(stdout);
+ // fprintf(stdout, "i:%d\n",i);
+ result_src = result.getClient();
+ result_dest = result.getServer();
+ result_port = result.getClientPort();
+ EXPECT_EQ(result_src, (uint32_t)(0x10000001+i%15));
+ EXPECT_EQ(result_dest, (uint32_t)((0x30000001+i) ) );
+ EXPECT_EQ(result_port, 1024+i/15);
+ }
+
+ gen.Delete();
+// EXPECT_EQ((size_t)0, gen.m_clients.size());
+ gen.Create(1, 1,cdSEQ_DIST,
+ 0x10000001, 0x1000000f, 0x30000001, 0x40000001,
+ 0,0);
+ for(int i=0;i<200;i++) {
+ gen.GenerateTuple(result);
+ // gen.Dump(stdout);
+ // fprintf(stdout, "i:%d\n",i);
+ result_src = result.getClient();
+ result_dest = result.getServer();
+ result_port = result.getClientPort();
+ EXPECT_EQ(result_src, (uint32_t)(0x10000001+i%15));
+ EXPECT_EQ(result_dest, (uint32_t) (((0x30000001+i)) ) );
+ EXPECT_EQ(result_port, 1024+i/15);
+ }
+
+
+
+}
+
+
+
+TEST(tuple_gen_2,GenerateTupleEx) {
+ CTupleGeneratorSmart gen;
+ gen.Create(1, 1,cdSEQ_DIST,
+ 0x10000001, 0x1000000f, 0x30000001, 0x40000001,
+ 0,0);
+ CTupleBase result;
+ uint32_t result_src;
+ uint32_t result_dest;
+ uint16_t result_port;
+ uint16_t ex_port[2];
+ for(int i=0;i<20;i++) {
+
+ gen.GenerateTupleEx(result,2,ex_port);
+ fprintf(stdout, "i:%d\n",i);
+ result_src = result.getClient();
+ result_dest = result.getServer();
+ result_port = result.getClientPort();
+
+ EXPECT_EQ(result_src, (uint32_t)(0x10000001+i%15));
+ EXPECT_EQ(result_dest, (uint32_t)(((0x30000001+i)) ));
+
+ EXPECT_EQ(result_port, 1024+(i/15)*3);
+ EXPECT_EQ(ex_port[0], 1025+(i/15)*3);
+ EXPECT_EQ(ex_port[1], 1026+(i/15)*3);
+ }
+
+ gen.Delete();
+}
+
+TEST(tuple_gen_2,template1) {
+ CTupleGeneratorSmart gen;
+ gen.Create(1, 1,cdSEQ_DIST,
+ 0x10000001, 0x1000000f, 0x30000001, 0x40000001,
+ 0,0);
+ CTupleTemplateGeneratorSmart template_1;
+ template_1.Create(&gen);
+ template_1.SetSingleServer(true,0x12121212,0,0);
+ CTupleBase result;
+
+
+ int i;
+ for (i=0; i<10; i++) {
+ template_1.GenerateTuple(result);
+ uint32_t result_src = result.getClient();
+ uint32_t result_dest = result.getServer();
+ uint16_t result_port = result.getClientPort();
+ //printf(" %x %x %x \n",result_src,result_dest,result_port);
+ EXPECT_EQ(result_src, (uint32_t)(0x10000001+i));
+ EXPECT_EQ(result_dest, (uint32_t)(((0x12121212)) ));
+ EXPECT_EQ(result_port, 1024);
+ }
+
+ template_1.Delete();
+ gen.Delete();
+}
+
+TEST(tuple_gen_2,template2) {
+ CTupleGeneratorSmart gen;
+ gen.Create(1, 1,cdSEQ_DIST,
+ 0x10000001, 0x1000000f, 0x30000001, 0x40000001,
+ 0,0);
+ CTupleTemplateGeneratorSmart template_1;
+ template_1.Create(&gen);
+ template_1.SetW(10);
+
+ CTupleBase result;
+
+
+ int i;
+ for (i=0; i<20; i++) {
+ template_1.GenerateTuple(result);
+ uint32_t result_src = result.getClient();
+ uint32_t result_dest = result.getServer();
+ uint16_t result_port = result.getClientPort();
+ //printf(" %x %x %x \n",result_src,result_dest,result_port);
+ EXPECT_EQ(result_src, (uint32_t)(0x10000001+(i/10)));
+ EXPECT_EQ(result_dest, (uint32_t)(((0x30000001+ (i/10) )) ));
+ EXPECT_EQ(result_port, 1024+(i%10));
+ }
+
+ template_1.Delete();
+ gen.Delete();
+}
+
+
+TEST(tuple_gen_yaml,yam_reader1) {
+
+ CTupleGenYamlInfo fi;
+
+ try {
+ std::ifstream fin((char *)"cap2/tuple_gen.yaml");
+ YAML::Parser parser(fin);
+ YAML::Node doc;
+
+ parser.GetNextDocument(doc);
+ for(unsigned i=0;i<doc.size();i++) {
+ doc[i] >> fi;
+ break;
+ }
+ } catch ( const std::exception& e ) {
+ std::cout << e.what() << "\n";
+ exit(-1);
+ }
+ fi.Dump(stdout);
+}
+
+TEST(tuple_gen_yaml,yam_is_valid) {
+
+ CTupleGenYamlInfo fi;
+
+ fi.m_clients_ip_start = 0x10000001;
+ fi.m_clients_ip_end = 0x100000ff;
+
+ fi.m_servers_ip_start = 0x10000001;
+ fi.m_servers_ip_end = 0x100001ff;
+
+ EXPECT_EQ(fi.is_valid(8,true)?1:0, 1);
+ EXPECT_EQ(fi.m_servers_ip_start, 0x10000001);
+ EXPECT_EQ(fi.m_servers_ip_end, 0x100001fe);
+
+ printf(" start:%x end:%x \n",fi.m_servers_ip_start,fi.m_servers_ip_end);
+
+ fi.m_clients_ip_start = 0x10000001;
+ fi.m_clients_ip_end = 0x100000ff;
+
+ fi.m_servers_ip_start = 0x10000001;
+ fi.m_servers_ip_end = 0x10000009;
+
+ EXPECT_EQ(fi.is_valid(8,true)?1:0, 0);
+
+ fi.m_clients_ip_start = 0x10000001;
+ fi.m_clients_ip_end = 0x100000ff;
+
+ fi.m_servers_ip_start = 0x10000001;
+ fi.m_servers_ip_end = 0x100003ff;
+
+ EXPECT_EQ(fi.is_valid(8,true)?1:0, 1);
+ EXPECT_EQ(fi.m_servers_ip_start, 0x10000001);
+ EXPECT_EQ(fi.m_servers_ip_end, 0x100003fc);
+
+ printf(" start:%x end:%x \n",fi.m_servers_ip_start,fi.m_servers_ip_end);
+
+
+}
+
+
+
+
+
+
+
+/*GTEST_API_ int main(int argc, char **argv) {
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+} */
diff --git a/src/l2fwd/main.c b/src/l2fwd/main.c
new file mode 100755
index 00000000..e684234b
--- /dev/null
+++ b/src/l2fwd/main.c
@@ -0,0 +1,715 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <netinet/in.h>
+#include <setjmp.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <errno.h>
+#include <getopt.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+
+#define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1
+
+#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define NB_MBUF 8192
+
+#define MAX_PKT_BURST 32
+#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
+
+/*
+ * Configurable number of RX/TX ring descriptors
+ */
+#define RTE_TEST_RX_DESC_DEFAULT 128
+#define RTE_TEST_TX_DESC_DEFAULT 512
+static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
+static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
+
+/* ethernet addresses of ports */
+static struct ether_addr l2fwd_ports_eth_addr[RTE_MAX_ETHPORTS];
+
+/* mask of enabled ports */
+static uint32_t l2fwd_enabled_port_mask = 0;
+
+/* list of enabled ports */
+static uint32_t l2fwd_dst_ports[RTE_MAX_ETHPORTS];
+
+static unsigned int l2fwd_rx_queue_per_lcore = 1;
+
+struct mbuf_table {
+ unsigned len;
+ struct rte_mbuf *m_table[MAX_PKT_BURST];
+};
+
+#define MAX_RX_QUEUE_PER_LCORE 16
+#define MAX_TX_QUEUE_PER_PORT 16
+struct lcore_queue_conf {
+ unsigned n_rx_port;
+ unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE];
+ struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS];
+
+} __rte_cache_aligned;
+struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
+
+static const struct rte_eth_conf port_conf = {
+ .rxmode = {
+ .split_hdr_size = 0,
+ .header_split = 0, /**< Header Split disabled */
+ .hw_ip_checksum = 0, /**< IP checksum offload disabled */
+ .hw_vlan_filter = 0, /**< VLAN filtering disabled */
+ .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
+ .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ },
+ .txmode = {
+ .mq_mode = ETH_MQ_TX_NONE,
+ },
+};
+
+struct rte_mempool * l2fwd_pktmbuf_pool = NULL;
+
+/* Per-port statistics struct */
+struct l2fwd_port_statistics {
+ uint64_t tx;
+ uint64_t rx;
+ uint64_t dropped;
+} __rte_cache_aligned;
+struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS];
+
+/* A tsc-based timer responsible for triggering statistics printout */
+#define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
+#define MAX_TIMER_PERIOD 86400 /* 1 day max */
+static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000; /* default period is 10 seconds */
+
+/* Print out statistics on packets dropped */
+static void
+print_stats(void)
+{
+ uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
+ unsigned portid;
+
+ total_packets_dropped = 0;
+ total_packets_tx = 0;
+ total_packets_rx = 0;
+
+ const char clr[] = { 27, '[', '2', 'J', '\0' };
+ const char topLeft[] = { 27, '[', '1', ';', '1', 'H','\0' };
+
+ /* Clear screen and move to top left */
+ printf("%s%s", clr, topLeft);
+
+ printf("\nPort statistics ====================================");
+
+ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
+ /* skip disabled ports */
+ if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
+ continue;
+ printf("\nStatistics for port %u ------------------------------"
+ "\nPackets sent: %24"PRIu64
+ "\nPackets received: %20"PRIu64
+ "\nPackets dropped: %21"PRIu64,
+ portid,
+ port_statistics[portid].tx,
+ port_statistics[portid].rx,
+ port_statistics[portid].dropped);
+
+ total_packets_dropped += port_statistics[portid].dropped;
+ total_packets_tx += port_statistics[portid].tx;
+ total_packets_rx += port_statistics[portid].rx;
+ }
+ printf("\nAggregate statistics ==============================="
+ "\nTotal packets sent: %18"PRIu64
+ "\nTotal packets received: %14"PRIu64
+ "\nTotal packets dropped: %15"PRIu64,
+ total_packets_tx,
+ total_packets_rx,
+ total_packets_dropped);
+ printf("\n====================================================\n");
+}
+
+/* Send the burst of packets on an output interface */
+static int
+l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n, uint8_t port)
+{
+ struct rte_mbuf **m_table;
+ unsigned ret;
+ unsigned queueid =0;
+
+ m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
+
+ ret = rte_eth_tx_burst(port, (uint16_t) queueid, m_table, (uint16_t) n);
+ port_statistics[port].tx += ret;
+ if (unlikely(ret < n)) {
+ port_statistics[port].dropped += (n - ret);
+ do {
+ rte_pktmbuf_free(m_table[ret]);
+ } while (++ret < n);
+ }
+
+ return 0;
+}
+
+/* Enqueue packets for TX and prepare them to be sent */
+static int
+l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
+{
+ unsigned lcore_id, len;
+ struct lcore_queue_conf *qconf;
+
+ lcore_id = rte_lcore_id();
+
+ qconf = &lcore_queue_conf[lcore_id];
+ len = qconf->tx_mbufs[port].len;
+ qconf->tx_mbufs[port].m_table[len] = m;
+ len++;
+
+ /* enough pkts to be sent */
+ if (unlikely(len == MAX_PKT_BURST)) {
+ l2fwd_send_burst(qconf, MAX_PKT_BURST, port);
+ len = 0;
+ }
+
+ qconf->tx_mbufs[port].len = len;
+ return 0;
+}
+
+static void
+l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid)
+{
+ struct ether_hdr *eth;
+ void *tmp;
+ unsigned dst_port;
+
+ dst_port = l2fwd_dst_ports[portid];
+ eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
+
+ /* 02:00:00:00:00:xx */
+ tmp = &eth->d_addr.addr_bytes[0];
+ *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
+
+ /* src addr */
+ ether_addr_copy(&l2fwd_ports_eth_addr[dst_port], &eth->s_addr);
+
+ l2fwd_send_packet(m, (uint8_t) dst_port);
+}
+
+/* main processing loop */
+static void
+l2fwd_main_loop(void)
+{
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ struct rte_mbuf *m;
+ unsigned lcore_id;
+ uint64_t prev_tsc, diff_tsc, cur_tsc, timer_tsc;
+ unsigned i, j, portid, nb_rx;
+ struct lcore_queue_conf *qconf;
+ const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
+
+ prev_tsc = 0;
+ timer_tsc = 0;
+
+ lcore_id = rte_lcore_id();
+ qconf = &lcore_queue_conf[lcore_id];
+
+ if (qconf->n_rx_port == 0) {
+ RTE_LOG(INFO, L2FWD, "lcore %u has nothing to do\n", lcore_id);
+ return;
+ }
+
+ RTE_LOG(INFO, L2FWD, "entering main loop on lcore %u\n", lcore_id);
+
+ for (i = 0; i < qconf->n_rx_port; i++) {
+
+ portid = qconf->rx_port_list[i];
+ RTE_LOG(INFO, L2FWD, " -- lcoreid=%u portid=%u\n", lcore_id,
+ portid);
+ }
+
+ while (1) {
+
+ cur_tsc = rte_rdtsc();
+
+ /*
+ * TX burst queue drain
+ */
+ diff_tsc = cur_tsc - prev_tsc;
+ if (unlikely(diff_tsc > drain_tsc)) {
+
+ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
+ if (qconf->tx_mbufs[portid].len == 0)
+ continue;
+ l2fwd_send_burst(&lcore_queue_conf[lcore_id],
+ qconf->tx_mbufs[portid].len,
+ (uint8_t) portid);
+ qconf->tx_mbufs[portid].len = 0;
+ }
+
+ /* if timer is enabled */
+ if (timer_period > 0) {
+
+ /* advance the timer */
+ timer_tsc += diff_tsc;
+
+ /* if timer has reached its timeout */
+ if (unlikely(timer_tsc >= (uint64_t) timer_period)) {
+
+ /* do this only on master core */
+ if (lcore_id == rte_get_master_lcore()) {
+ print_stats();
+ /* reset the timer */
+ timer_tsc = 0;
+ }
+ }
+ }
+
+ prev_tsc = cur_tsc;
+ }
+
+ /*
+ * Read packet from RX queues
+ */
+ for (i = 0; i < qconf->n_rx_port; i++) {
+
+ portid = qconf->rx_port_list[i];
+ nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
+ pkts_burst, MAX_PKT_BURST);
+
+ port_statistics[portid].rx += nb_rx;
+
+ for (j = 0; j < nb_rx; j++) {
+ m = pkts_burst[j];
+ rte_prefetch0(rte_pktmbuf_mtod(m, void *));
+ l2fwd_simple_forward(m, portid);
+ }
+ }
+ }
+}
+
+static int
+l2fwd_launch_one_lcore(__attribute__((unused)) void *dummy)
+{
+ l2fwd_main_loop();
+ return 0;
+}
+
+/* display usage */
+static void
+l2fwd_usage(const char *prgname)
+{
+ printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
+ " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
+ " -q NQ: number of queue (=ports) per lcore (default is 1)\n"
+ " -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n",
+ prgname);
+}
+
+static int
+l2fwd_parse_portmask(const char *portmask)
+{
+ char *end = NULL;
+ unsigned long pm;
+
+ /* parse hexadecimal string */
+ pm = strtoul(portmask, &end, 16);
+ if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
+ return -1;
+
+ if (pm == 0)
+ return -1;
+
+ return pm;
+}
+
+static unsigned int
+l2fwd_parse_nqueue(const char *q_arg)
+{
+ char *end = NULL;
+ unsigned long n;
+
+ /* parse hexadecimal string */
+ n = strtoul(q_arg, &end, 10);
+ if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
+ return 0;
+ if (n == 0)
+ return 0;
+ if (n >= MAX_RX_QUEUE_PER_LCORE)
+ return 0;
+
+ return n;
+}
+
+static int
+l2fwd_parse_timer_period(const char *q_arg)
+{
+ char *end = NULL;
+ int n;
+
+ /* parse number string */
+ n = strtol(q_arg, &end, 10);
+ if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
+ return -1;
+ if (n >= MAX_TIMER_PERIOD)
+ return -1;
+
+ return n;
+}
+
+/* Parse the argument given in the command line of the application */
+static int
+l2fwd_parse_args(int argc, char **argv)
+{
+ int opt, ret;
+ char **argvopt;
+ int option_index;
+ char *prgname = argv[0];
+ static struct option lgopts[] = {
+ {NULL, 0, 0, 0}
+ };
+
+ argvopt = argv;
+
+ while ((opt = getopt_long(argc, argvopt, "p:q:T:",
+ lgopts, &option_index)) != EOF) {
+
+ switch (opt) {
+ /* portmask */
+ case 'p':
+ l2fwd_enabled_port_mask = l2fwd_parse_portmask(optarg);
+ if (l2fwd_enabled_port_mask == 0) {
+ printf("invalid portmask\n");
+ l2fwd_usage(prgname);
+ return -1;
+ }
+ break;
+
+ /* nqueue */
+ case 'q':
+ l2fwd_rx_queue_per_lcore = l2fwd_parse_nqueue(optarg);
+ if (l2fwd_rx_queue_per_lcore == 0) {
+ printf("invalid queue number\n");
+ l2fwd_usage(prgname);
+ return -1;
+ }
+ break;
+
+ /* timer period */
+ case 'T':
+ timer_period = l2fwd_parse_timer_period(optarg) * 1000 * TIMER_MILLISECOND;
+ if (timer_period < 0) {
+ printf("invalid timer period\n");
+ l2fwd_usage(prgname);
+ return -1;
+ }
+ break;
+
+ /* long options */
+ case 0:
+ l2fwd_usage(prgname);
+ return -1;
+
+ default:
+ l2fwd_usage(prgname);
+ return -1;
+ }
+ }
+
+ if (optind >= 0)
+ argv[optind-1] = prgname;
+
+ ret = optind-1;
+ optind = 0; /* reset getopt lib */
+ return ret;
+}
+
+/* Check the link status of all ports in up to 9s, and print them finally */
+static void
+check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+{
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
+ uint8_t portid, count, all_ports_up, print_flag = 0;
+ struct rte_eth_link link;
+
+ printf("\nChecking link status");
+ fflush(stdout);
+ for (count = 0; count <= MAX_CHECK_TIME; count++) {
+ all_ports_up = 1;
+ for (portid = 0; portid < port_num; portid++) {
+ if ((port_mask & (1 << portid)) == 0)
+ continue;
+ memset(&link, 0, sizeof(link));
+ rte_eth_link_get_nowait(portid, &link);
+ /* print link status if flag set */
+ if (print_flag == 1) {
+ if (link.link_status)
+ printf("Port %d Link Up - speed %u "
+ "Mbps - %s\n", (uint8_t)portid,
+ (unsigned)link.link_speed,
+ (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ ("full-duplex") : ("half-duplex\n"));
+ else
+ printf("Port %d Link Down\n",
+ (uint8_t)portid);
+ continue;
+ }
+ /* clear all_ports_up flag if any link down */
+ if (link.link_status == 0) {
+ all_ports_up = 0;
+ break;
+ }
+ }
+ /* after finally printing all link status, get out */
+ if (print_flag == 1)
+ break;
+
+ if (all_ports_up == 0) {
+ printf(".");
+ fflush(stdout);
+ rte_delay_ms(CHECK_INTERVAL);
+ }
+
+ /* set the print_flag if all ports up or timeout */
+ if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
+ print_flag = 1;
+ printf("done\n");
+ }
+ }
+}
+
+int
+main(int argc, char **argv)
+{
+ struct lcore_queue_conf *qconf;
+ struct rte_eth_dev_info dev_info;
+ int ret;
+ uint8_t nb_ports;
+ uint8_t nb_ports_available;
+ uint8_t portid, last_port;
+ unsigned lcore_id, rx_lcore_id;
+ unsigned nb_ports_in_mask = 0;
+
+ /* init EAL */
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
+ argc -= ret;
+ argv += ret;
+
+ /* parse application arguments (after the EAL ones) */
+ ret = l2fwd_parse_args(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid L2FWD arguments\n");
+
+ /* create the mbuf pool */
+ l2fwd_pktmbuf_pool =
+ rte_mempool_create("mbuf_pool", NB_MBUF,
+ MBUF_SIZE, 32,
+ sizeof(struct rte_pktmbuf_pool_private),
+ rte_pktmbuf_pool_init, NULL,
+ rte_pktmbuf_init, NULL,
+ rte_socket_id(), 0);
+ if (l2fwd_pktmbuf_pool == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
+
+ nb_ports = rte_eth_dev_count();
+ if (nb_ports == 0)
+ rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
+
+ if (nb_ports > RTE_MAX_ETHPORTS)
+ nb_ports = RTE_MAX_ETHPORTS;
+
+ /* reset l2fwd_dst_ports */
+ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
+ l2fwd_dst_ports[portid] = 0;
+ last_port = 0;
+
+ /*
+ * Each logical core is assigned a dedicated TX queue on each port.
+ */
+ for (portid = 0; portid < nb_ports; portid++) {
+ /* skip ports that are not enabled */
+ if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
+ continue;
+
+ if (nb_ports_in_mask % 2) {
+ l2fwd_dst_ports[portid] = last_port;
+ l2fwd_dst_ports[last_port] = portid;
+ }
+ else
+ last_port = portid;
+
+ nb_ports_in_mask++;
+
+ rte_eth_dev_info_get(portid, &dev_info);
+ }
+ if (nb_ports_in_mask % 2) {
+ printf("Notice: odd number of ports in portmask.\n");
+ l2fwd_dst_ports[last_port] = last_port;
+ }
+
+ rx_lcore_id = 0;
+ qconf = NULL;
+
+ /* Initialize the port/queue configuration of each logical core */
+ for (portid = 0; portid < nb_ports; portid++) {
+ /* skip ports that are not enabled */
+ if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
+ continue;
+
+ /* get the lcore_id for this port */
+ while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
+ lcore_queue_conf[rx_lcore_id].n_rx_port ==
+ l2fwd_rx_queue_per_lcore) {
+ rx_lcore_id++;
+ if (rx_lcore_id >= RTE_MAX_LCORE)
+ rte_exit(EXIT_FAILURE, "Not enough cores\n");
+ }
+
+ if (qconf != &lcore_queue_conf[rx_lcore_id])
+ /* Assigned a new logical core in the loop above. */
+ qconf = &lcore_queue_conf[rx_lcore_id];
+
+ qconf->rx_port_list[qconf->n_rx_port] = portid;
+ qconf->n_rx_port++;
+ printf("Lcore %u: RX port %u\n", rx_lcore_id, (unsigned) portid);
+ }
+
+ nb_ports_available = nb_ports;
+
+ /* Initialise each port */
+ for (portid = 0; portid < nb_ports; portid++) {
+ /* skip ports that are not enabled */
+ if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) {
+ printf("Skipping disabled port %u\n", (unsigned) portid);
+ nb_ports_available--;
+ continue;
+ }
+ /* init port */
+ printf("Initializing port %u... ", (unsigned) portid);
+ fflush(stdout);
+ ret = rte_eth_dev_configure(portid, 1, 1, &port_conf);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
+ ret, (unsigned) portid);
+
+ rte_eth_macaddr_get(portid,&l2fwd_ports_eth_addr[portid]);
+
+ /* init one RX queue */
+ fflush(stdout);
+ ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
+ rte_eth_dev_socket_id(portid),
+ NULL,
+ l2fwd_pktmbuf_pool);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n",
+ ret, (unsigned) portid);
+
+ /* init one TX queue on each port */
+ fflush(stdout);
+ ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
+ rte_eth_dev_socket_id(portid),
+ NULL);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n",
+ ret, (unsigned) portid);
+
+ /* Start device */
+ ret = rte_eth_dev_start(portid);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "rte_eth_dev_start:err=%d, port=%u\n",
+ ret, (unsigned) portid);
+
+ printf("done: \n");
+
+ rte_eth_promiscuous_enable(portid);
+
+ printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
+ (unsigned) portid,
+ l2fwd_ports_eth_addr[portid].addr_bytes[0],
+ l2fwd_ports_eth_addr[portid].addr_bytes[1],
+ l2fwd_ports_eth_addr[portid].addr_bytes[2],
+ l2fwd_ports_eth_addr[portid].addr_bytes[3],
+ l2fwd_ports_eth_addr[portid].addr_bytes[4],
+ l2fwd_ports_eth_addr[portid].addr_bytes[5]);
+
+ /* initialize port stats */
+ memset(&port_statistics, 0, sizeof(port_statistics));
+ }
+
+ if (!nb_ports_available) {
+ rte_exit(EXIT_FAILURE,
+ "All available ports are disabled. Please set portmask.\n");
+ }
+
+ check_all_ports_link_status(nb_ports, l2fwd_enabled_port_mask);
+
+ /* launch per-lcore init on every lcore */
+ rte_eal_mp_remote_launch(l2fwd_launch_one_lcore, NULL, CALL_MASTER);
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ if (rte_eal_wait_lcore(lcore_id) < 0)
+ return -1;
+ }
+
+ return 0;
+}
+
diff --git a/src/main.cpp b/src/main.cpp
new file mode 100755
index 00000000..96789cdd
--- /dev/null
+++ b/src/main.cpp
@@ -0,0 +1,783 @@
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+#include "bp_sim.h"
+#include "os_time.h"
+
+
+#include <common/arg/SimpleGlob.h>
+#include <common/arg/SimpleOpt.h>
+
+// An enum for all the option types
+enum { OPT_HELP, OPT_CFG, OPT_NODE_DUMP, OP_STATS,
+ OPT_FILE_OUT, OPT_UT, OPT_PCAP, OPT_IPV6, OPT_MAC_FILE};
+
+
+/* these are the argument types:
+ SO_NONE -- no argument needed
+ SO_REQ_SEP -- single required argument
+ SO_MULTI -- multiple arguments needed
+*/
+static CSimpleOpt::SOption parser_options[] =
+{
+ { OPT_HELP, "-?", SO_NONE },
+ { OPT_HELP, "-h", SO_NONE },
+ { OPT_HELP, "--help", SO_NONE },
+ { OPT_UT, "--ut", SO_NONE },
+ { OP_STATS, "-s", SO_NONE },
+ { OPT_CFG, "-f", SO_REQ_SEP},
+ { OPT_MAC_FILE, "--mac", SO_REQ_SEP},
+ { OPT_FILE_OUT , "-o", SO_REQ_SEP },
+ { OPT_NODE_DUMP , "-v", SO_REQ_SEP },
+ { OPT_PCAP, "--pcap", SO_NONE },
+ { OPT_IPV6, "--ipv6", SO_NONE },
+
+
+ SO_END_OF_OPTIONS
+};
+
+
+
+
+static int usage(){
+
+ printf(" Usage: bp_sim [OPTION] -f cfg.yaml -o outfile.erf \n");
+ printf(" \n");
+ printf(" \n");
+ printf(" options \n");
+ printf(" -v [1-3] verbose mode \n");
+ printf(" 1 show only stats \n");
+ printf(" 2 run preview do not write to file \n");
+ printf(" 3 run preview write stats file \n");
+ printf(" Note in case of verbose mode you don't need to add the output file \n");
+ printf(" \n");
+ printf(" Warning : This program can generate huge-files (TB ) watch out! try this only on local drive \n");
+ printf(" \n");
+ printf(" --pcap export the file in pcap mode \n");
+ printf(" Examples: ");
+ printf(" 1) preview show csv stats \n");
+ printf(" #>bp_sim -f cfg.yaml -v 1 \n");
+ printf(" \n ");
+ printf(" 2) more detail preview preview show csv stats \n");
+ printf(" #>bp_sim -f cfg.yaml -v 2 \n");
+ printf(" \n ");
+ printf(" 3) more detail preview plus stats \n");
+ printf(" #>bp_sim -f cfg.yaml -v 3 \n");
+ printf(" \n ");
+ printf(" 4) do the job ! \n");
+ printf(" #>bp_sim -f cfg.yaml -o outfile.erf \n");
+ printf("\n");
+ printf("\n");
+ printf(" Copyright (C) 2015 by hhaim Cisco-System for IL dev-test \n");
+ printf(" version : 1.0 beta \n");
+ return (0);
+}
+
+int gtest_main(int argc, char **argv) ;
+
+static int parse_options(int argc, char *argv[], CParserOption* po ) {
+ CSimpleOpt args(argc, argv, parser_options);
+
+ int a=0;
+ int node_dump=0;
+ po->preview.clean();
+ po->preview.setFileWrite(true);
+ int res1;
+
+ while ( args.Next() ){
+ if (args.LastError() == SO_SUCCESS) {
+ switch (args.OptionId()) {
+ case OPT_UT :
+ res1=gtest_main(argc, argv);
+ exit(res1);
+ break;
+ case OPT_HELP:
+ usage();
+ return -1;
+ case OPT_CFG:
+ po->cfg_file = args.OptionArg();
+ break;
+ case OPT_MAC_FILE:
+ po->mac_file = args.OptionArg();
+ break;
+ case OPT_FILE_OUT:
+ po->out_file = args.OptionArg();
+ break;
+ case OPT_IPV6:
+ po->preview.set_ipv6_mode_enable(true);
+ break;
+ case OPT_NODE_DUMP:
+ a=atoi(args.OptionArg());
+ node_dump=1;
+ po->preview.setFileWrite(false);
+ break;
+ case OPT_PCAP:
+ po->preview.set_pcap_mode_enable(true);
+ break;
+ default:
+ usage();
+ return -1;
+ break;
+ } // End of switch
+ }// End of IF
+ else {
+ usage();
+ return -1;
+ }
+ } // End of while
+
+
+ if ((po->cfg_file =="") ) {
+ printf("Invalid combination of parameters you must add -f with configuration file \n");
+ usage();
+ return -1;
+ }
+
+ if ( node_dump ){
+ po->preview.setVMode(a);
+ }else{
+ if (po->out_file=="" ){
+ printf("Invalid combination of parameters you must give output file iwth -o \n");
+ usage();
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int cores=1;
+
+/*
+
+int curent_time(){
+
+ time_init();
+
+ int i;
+ for (i=0; i<100000000; i++){
+ now=now_sec();
+ }
+ return (0);
+}*/
+
+#ifdef LINUX
+
+
+
+#include <pthread.h>
+
+void delay(int msec){
+
+ if (msec == 0)
+ {//user that requested that probebly wanted the minimal delay
+ //but because of scaling problem he have got 0 so we will give the min delay
+ //printf("\n\n\nERROR-Task delay ticks == 0 found in task %s task id = %d\n\n\n\n",
+ // SANB_TaskName(SANB_TaskIdSelf()), SANB_TaskIdSelf());
+ msec =1;
+
+ }
+
+ struct timespec time1, remain; // 2 sec max delay
+ time1.tv_sec=msec/1000;
+ time1.tv_nsec=(msec - (time1.tv_sec*1000))*1000000;
+
+ nanosleep(&time1,&remain);
+}
+
+
+struct per_thread_t {
+ pthread_t tid;
+};
+
+#define MAX_THREADS 200
+static per_thread_t tr_info[MAX_THREADS];
+
+
+//////////////
+
+struct test_t_info1 {
+ CPreviewMode * preview_info;
+ CFlowGenListPerThread * thread_info;
+ uint32_t thread_id;
+};
+
+void * thread_task(void *info){
+
+ test_t_info1 * obj =(test_t_info1 *)info;
+
+ CFlowGenListPerThread * lpt=obj->thread_info;
+
+ printf("start thread %d \n",obj->thread_id);
+ //delay(obj->thread_id *3000);
+ printf("-->start thread %d \n",obj->thread_id);
+ if (1/*obj->thread_id ==3*/) {
+
+ char buf[100];
+ sprintf(buf,"my%d.erf",obj->thread_id);
+ volatile int i;
+ lpt->generate_erf(buf,*obj->preview_info);
+ lpt->m_node_gen.DumpHist(stdout);
+ printf("end thread %d \n",obj->thread_id);
+ }
+}
+
+
+void test_load_list_of_cap_files_linux(CParserOption * op){
+
+ CFlowGenList fl;
+ //CNullIF erf_vif;
+ //CErfIF erf_vif;
+
+ fl.Create();
+
+ fl.load_from_yaml(op->cfg_file,cores);
+ fl.DumpPktSize();
+
+
+ fl.generate_p_thread_info(cores);
+ CFlowGenListPerThread * lpt;
+
+ /* set the ERF file */
+ //fl.set_vif_all(&erf_vif);
+
+ int i;
+ for (i=0; i<cores; i++) {
+ lpt=fl.m_threads_info[i];
+ test_t_info1 * obj = new test_t_info1();
+ obj->preview_info =&op->preview;
+ obj->thread_info = fl.m_threads_info[i];
+ obj->thread_id = i;
+ CNullIF * erf_vif = new CNullIF();
+ //CErfIF * erf_vif = new CErfIF();
+
+ lpt->set_vif(erf_vif);
+
+ assert(pthread_create( &tr_info[i].tid, NULL, thread_task, obj)==0);
+ }
+
+ for (i=0; i<cores; i++) {
+ /* wait for all of them to stop */
+ assert(pthread_join((pthread_t)tr_info[i].tid,NULL )==0);
+ }
+
+ printf("compare files \n");
+ for (i=1; i<cores; i++) {
+
+ CErfCmp cmp;
+ char buf[100];
+ sprintf(buf,"my%d.erf",i);
+ char buf1[100];
+ sprintf(buf1,"my%d.erf",0);
+ if ( cmp.compare(std::string(buf),std::string(buf1)) != true ) {
+ printf(" ERROR cap file is not ex !! \n");
+ assert(0);
+ }
+ printf(" thread %d is ok \n",i);
+ }
+
+ fl.Delete();
+}
+
+
+#endif
+
+/*************************************************************/
+void test_load_list_of_cap_files(CParserOption * op){
+
+ CFlowGenList fl;
+ CNullIF erf_vif;
+
+ fl.Create();
+
+ #define NUM 1
+
+ fl.load_from_yaml(op->cfg_file,NUM);
+ fl.DumpPktSize();
+
+
+ fl.generate_p_thread_info(NUM);
+ CFlowGenListPerThread * lpt;
+
+ /* set the ERF file */
+ //fl.set_vif_all(&erf_vif);
+
+ int i;
+ for (i=0; i<NUM; i++) {
+ lpt=fl.m_threads_info[i];
+ char buf[100];
+ sprintf(buf,"my%d.erf",i);
+ lpt->generate_erf(buf,op->preview);
+ lpt->m_node_gen.DumpHist(stdout);
+ }
+ //sprintf(buf,"my%d.erf",7);
+ //lpt=fl.m_threads_info[7];
+
+ //fl.Dump(stdout);
+ fl.Delete();
+}
+
+int load_list_of_cap_files(CParserOption * op){
+ CFlowGenList fl;
+ fl.Create();
+ fl.load_from_yaml(op->cfg_file,1);
+ if ( op->preview.getVMode() >0 ) {
+ fl.DumpCsv(stdout);
+ }
+ uint32_t start= os_get_time_msec();
+
+ CErfIF erf_vif;
+ //CNullIF erf_vif;
+
+ fl.generate_p_thread_info(1);
+ CFlowGenListPerThread * lpt;
+ lpt=fl.m_threads_info[0];
+ lpt->set_vif(&erf_vif);
+
+ if ( (op->preview.getVMode() >1) || op->preview.getFileWrite() ) {
+ lpt->generate_erf(op->out_file,op->preview);
+ }
+
+ lpt->m_node_gen.DumpHist(stdout);
+
+ uint32_t stop= os_get_time_msec();
+ printf(" d time = %ul %ul \n",stop-start,os_get_time_freq());
+ fl.Delete();
+ return (0);
+}
+
+
+int test_dns(){
+
+ time_init();
+ CGlobalInfo::init_pools(1000);
+
+ CParserOption po ;
+
+ //po.cfg_file = "cap2/dns.yaml";
+ //po.cfg_file = "cap2/sfr3.yaml";
+ po.cfg_file = "cap2/sfr.yaml";
+
+ po.preview.setVMode(0);
+ po.preview.setFileWrite(true);
+ #ifdef LINUX
+ test_load_list_of_cap_files_linux(&po);
+ #else
+ test_load_list_of_cap_files(&po);
+ #endif
+ return (0);
+}
+
+void test_pkt_mbuf(void);
+
+void test_compare_files(void);
+
+#if 0
+static int b=0;
+static int c=0;
+static int d=0;
+
+int test_instructions(){
+ int i;
+ for (i=0; i<100000;i++) {
+ b+=b+1;
+ c+=+b+c+1;
+ d+=+(b*2+1);
+ }
+ return (b+c+d);
+}
+
+#include <valgrind/callgrind.h>
+#endif
+
+
+void update_tcp_seq_num(CCapFileFlowInfo * obj,
+ int pkt_id,
+ int size_change){
+ CFlowPktInfo * pkt=obj->GetPacket(pkt_id);
+ if ( pkt->m_pkt_indication.m_desc.IsUdp() ){
+ /* nothing to do */
+ return;
+ }
+
+ bool o_init=pkt->m_pkt_indication.m_desc.IsInitSide();
+ TCPHeader * tcp ;
+ int s= (int)obj->Size();
+ int i;
+
+ for (i=pkt_id+1; i<s; i++) {
+ uint32_t seq;
+ uint32_t ack;
+
+ pkt=obj->GetPacket(i);
+ tcp=pkt->m_pkt_indication.l4.m_tcp;
+ bool init=pkt->m_pkt_indication.m_desc.IsInitSide();
+ if (init == o_init) {
+ /* same dir update the seq number */
+ tcp->setSeqNumber (tcp->getSeqNumber ()+size_change);
+
+ }else{
+ /* update the ack number */
+ tcp->setAckNumber (tcp->getAckNumber ()+size_change);
+ }
+ }
+}
+
+
+
+void change_pkt_len(CCapFileFlowInfo * obj,int pkt_id, int size ){
+ CFlowPktInfo * pkt=obj->GetPacket(pkt_id);
+
+ /* enlarge the packet size by 9 */
+
+ char * p=pkt->m_packet->append(size);
+ /* set it to 0xaa*/
+ memmove(p+size-4,p-4,4); /* CRCbytes */
+ memset(p-4,0x0a,size);
+
+ /* refresh the pointers */
+ pkt->m_pkt_indication.RefreshPointers();
+
+ IPHeader * ipv4 = pkt->m_pkt_indication.l3.m_ipv4;
+ ipv4->updateTotalLength (ipv4->getTotalLength()+size );
+
+ /* update seq numbers if needed */
+ update_tcp_seq_num(obj,pkt_id,size);
+}
+
+void dump_tcp_seq_num_(CCapFileFlowInfo * obj){
+ int s= (int)obj->Size();
+ int i;
+ uint32_t i_seq;
+ uint32_t r_seq;
+
+ CFlowPktInfo * pkt=obj->GetPacket(0);
+ TCPHeader * tcp = pkt->m_pkt_indication.l4.m_tcp;
+ i_seq=tcp->getSeqNumber ();
+
+ pkt=obj->GetPacket(1);
+ tcp = pkt->m_pkt_indication.l4.m_tcp;
+ r_seq=tcp->getSeqNumber ();
+
+ for (i=2; i<s; i++) {
+ uint32_t seq;
+ uint32_t ack;
+
+ pkt=obj->GetPacket(i);
+ tcp=pkt->m_pkt_indication.l4.m_tcp;
+ bool init=pkt->m_pkt_indication.m_desc.IsInitSide();
+ seq=tcp->getSeqNumber ();
+ ack=tcp->getAckNumber ();
+ if (init) {
+ seq=seq-i_seq;
+ ack=ack-r_seq;
+ }else{
+ seq=seq-r_seq;
+ ack=ack-i_seq;
+ }
+ printf(" %4d ",i);
+ if (!init) {
+ printf(" ");
+ }
+ printf(" %s seq: %4d ack : %4d \n",init?"I":"R",seq,ack);
+ }
+}
+
+
+int manipolate_capfile() {
+ time_init();
+ CGlobalInfo::init_pools(1000);
+
+ CCapFileFlowInfo flow_info;
+ flow_info.Create();
+
+ int res=flow_info.load_cap_file("avl/delay_10_rtsp_0.pcap",0,0);
+
+ change_pkt_len(&flow_info,4-1 ,6);
+ change_pkt_len(&flow_info,5-1 ,6);
+ change_pkt_len(&flow_info,6-1 ,6+2);
+ change_pkt_len(&flow_info,7-1 ,4);
+ change_pkt_len(&flow_info,8-1 ,6+2);
+ change_pkt_len(&flow_info,9-1 ,4);
+ change_pkt_len(&flow_info,10-1,6);
+ change_pkt_len(&flow_info,13-1,6);
+ change_pkt_len(&flow_info,16-1,6);
+ change_pkt_len(&flow_info,19-1,6);
+
+ flow_info.save_to_erf("exp/c.pcap",1);
+
+ return (1);
+}
+
+int manipolate_capfile_sip() {
+ time_init();
+ CGlobalInfo::init_pools(1000);
+
+ CCapFileFlowInfo flow_info;
+ flow_info.Create();
+
+ int res=flow_info.load_cap_file("avl/delay_10_sip_0.pcap",0,0);
+
+ change_pkt_len(&flow_info,1-1 ,6+6);
+ change_pkt_len(&flow_info,2-1 ,6+6);
+
+ flow_info.save_to_erf("exp/delay_10_sip_0_fixed.pcap",1);
+
+ return (1);
+}
+
+int manipolate_capfile_sip1() {
+ time_init();
+ CGlobalInfo::init_pools(1000);
+
+ CCapFileFlowInfo flow_info;
+ flow_info.Create();
+
+ int res=flow_info.load_cap_file("avl/delay_sip_0.pcap",0,0);
+ CFlowPktInfo * pkt=flow_info.GetPacket(1);
+
+ change_pkt_len(&flow_info,1-1 ,6+6+10);
+
+ change_pkt_len(&flow_info,2-1 ,6+6+10);
+
+ flow_info.save_to_erf("exp/delay_sip_0_fixed_1.pcap",1);
+
+ return (1);
+}
+
+
+class CMergeCapFileRec {
+public:
+
+ CCapFileFlowInfo m_cap;
+
+ int m_index;
+ int m_limit_number_of_packets; /* limit number of packets */
+ bool m_stop; /* Do we have more packets */
+
+ double m_offset; /* offset should be positive */
+ double m_start_time;
+
+public:
+ bool Create(std::string cap_file,double offset);
+ void Delete();
+ void IncPacket();
+ bool GetCurPacket(double & time);
+ CPacketIndication * GetUpdatedPacket();
+
+ void Dump(FILE *fd,int _id);
+};
+
+
+void CMergeCapFileRec::Dump(FILE *fd,int _id){
+ double time;
+ bool stop=GetCurPacket(time);
+ fprintf (fd," id:%2d stop : %d index:%4d %3.4f \n",_id,stop?1:0,m_index,time);
+}
+
+
+CPacketIndication * CMergeCapFileRec::GetUpdatedPacket(){
+ double t1;
+ assert(GetCurPacket(t1)==false);
+ CFlowPktInfo * pkt = m_cap.GetPacket(m_index);
+ pkt->m_pkt_indication.m_packet->set_new_time(t1);
+ return (&pkt->m_pkt_indication);
+}
+
+
+bool CMergeCapFileRec::GetCurPacket(double & time){
+ if (m_stop) {
+ return(true);
+ }
+ CFlowPktInfo * pkt = m_cap.GetPacket(m_index);
+ time= (pkt->m_packet->get_time() -m_start_time + m_offset);
+ return (false);
+}
+
+void CMergeCapFileRec::IncPacket(){
+ m_index++;
+ if ( (m_limit_number_of_packets) && (m_index > m_limit_number_of_packets ) ) {
+ m_stop=true;
+ return;
+ }
+
+ if ( m_index == (int)m_cap.Size() ) {
+ m_stop=true;
+ }
+}
+
+void CMergeCapFileRec::Delete(){
+ m_cap.Delete();
+}
+
+bool CMergeCapFileRec::Create(std::string cap_file,
+ double offset){
+ m_cap.Create();
+ m_cap.load_cap_file(cap_file,0,0);
+ CFlowPktInfo * pkt = m_cap.GetPacket(0);
+
+ m_index=0;
+ m_stop=false;
+ m_limit_number_of_packets =0;
+ m_start_time = pkt->m_packet->get_time() ;
+ m_offset = offset;
+}
+
+
+
+#define MERGE_CAP_FILES (2)
+
+class CMergeCapFile {
+public:
+ bool Create();
+ void Delete();
+ bool run_merge(std::string to_cap_file);
+private:
+ void append(int _cap_id);
+
+public:
+ CMergeCapFileRec m[MERGE_CAP_FILES];
+ CCapFileFlowInfo m_results;
+};
+
+bool CMergeCapFile::Create(){
+ m_results.Create();
+ return(true);
+}
+
+void CMergeCapFile::Delete(){
+ m_results.Delete();
+}
+
+void CMergeCapFile::append(int _cap_id){
+ CPacketIndication * lp=m[_cap_id].GetUpdatedPacket();
+ lp->m_packet->Dump(stdout,0);
+ m_results.Append(lp);
+}
+
+
+bool CMergeCapFile::run_merge(std::string to_cap_file){
+
+ int i=0;
+ int cnt=0;
+ while ( true ) {
+ int min_index=0;
+ double min_time;
+
+ fprintf(stdout," --------------\n",cnt);
+ fprintf(stdout," pkt : %d \n",cnt);
+ for (i=0; i<MERGE_CAP_FILES; i++) {
+ m[i].Dump(stdout,i);
+ }
+ fprintf(stdout," --------------\n",cnt);
+
+ bool valid = false;
+ for (i=0; i<MERGE_CAP_FILES; i++) {
+ double t1;
+ if ( m[i].GetCurPacket(t1) == false ){
+ /* not in stop */
+ if (!valid) {
+ min_time = t1;
+ min_index = i;
+ valid=true;
+ }else{
+ if (t1 < min_time) {
+ min_time=t1;
+ min_index = i;
+ }
+ }
+
+ }
+ }
+
+ /* nothing to do */
+ if (valid==false) {
+ fprintf(stdout,"nothing to do \n");
+ break;
+ }
+
+ cnt++;
+ fprintf(stdout," choose id %d \n",min_index);
+ append(min_index);
+ m[min_index].IncPacket();
+ };
+
+ m_results.save_to_erf(to_cap_file,1);
+}
+
+
+
+int merge_3_cap_files() {
+ time_init();
+ CGlobalInfo::init_pools(1000);
+
+ CMergeCapFile merger;
+ merger.Create();
+ merger.m[0].Create("exp/c.pcap",0.001);
+ merger.m[1].Create("avl/delay_10_rtp_160k_0.pcap",0.31);
+ merger.m[2].Create("avl/delay_10_rtp_160k_1.pcap",0.311);
+
+ //merger.m[1].Create("avl/delay_10_rtp_250k_0_0.pcap",0.31);
+ //merger.m[1].m_limit_number_of_packets =6;
+ //merger.m[2].Create("avl/delay_10_rtp_250k_1_0.pcap",0.311);
+ //merger.m[2].m_limit_number_of_packets =6;
+
+ merger.run_merge("exp/delay_10_rtp_160k_full.pcap");
+
+ return (0);
+}
+
+int merge_2_cap_files_sip() {
+ time_init();
+ CGlobalInfo::init_pools(1000);
+
+ CMergeCapFile merger;
+ merger.Create();
+ merger.m[0].Create("exp/delay_sip_0_fixed_1.pcap",0.001);
+ merger.m[1].Create("avl/delay_video_call_rtp_0.pcap",0.51);
+ //merger.m[1].m_limit_number_of_packets=7;
+
+ //merger.m[1].Create("avl/delay_10_rtp_250k_0_0.pcap",0.31);
+ //merger.m[1].m_limit_number_of_packets =6;
+ //merger.m[2].Create("avl/delay_10_rtp_250k_1_0.pcap",0.311);
+ //merger.m[2].m_limit_number_of_packets =6;
+
+ merger.run_merge("avl/delay_10_sip_video_call_full.pcap");
+
+ return (0);
+}
+
+
+int main(int argc , char * argv[]){
+ time_init();
+ CGlobalInfo::m_socket.Create(0);
+
+ CGlobalInfo::init_pools(1000);
+ assert( CMsgIns::Ins()->Create(4) );
+
+ if ( parse_options(argc, argv, &CGlobalInfo::m_options ) != 0){
+ exit(-1);
+ }
+ return (load_list_of_cap_files(&CGlobalInfo::m_options));
+}
+
+
+
diff --git a/src/main_dpdk.cpp b/src/main_dpdk.cpp
new file mode 100755
index 00000000..22ecc52f
--- /dev/null
+++ b/src/main_dpdk.cpp
@@ -0,0 +1,5041 @@
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// DPDK c++ issue
+#define UINT8_MAX 255
+#define UINT16_MAX 0xFFFF
+// DPDK c++ issue
+
+#include <pwd.h>
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_random.h>
+#include "bp_sim.h"
+#include "os_time.h"
+#include <common/arg/SimpleGlob.h>
+#include <common/arg/SimpleOpt.h>
+#include <common/basic_utils.h>
+
+extern "C" {
+ #include <dpdk_lib18/librte_pmd_ixgbe/ixgbe/ixgbe_type.h>
+}
+#include <dpdk_lib18/librte_pmd_e1000/e1000/e1000_regs.h>
+#include <zmq.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <string.h>
+#include <assert.h>
+#include <pthread.h>
+#include "global_io_mode.h"
+#include "utl_term_io.h"
+#include "msg_manager.h"
+#include "platform_cfg.h"
+
+#define VERSION "1.73"
+
+
+#define RX_CHECK_MIX_SAMPLE_RATE 8
+#define RX_CHECK_MIX_SAMPLE_RATE_1G 2
+
+
+#define SOCKET0 0
+
+#define BP_MAX_PKT 32
+#define MAX_PKT_BURST 32
+
+
+#define BP_MAX_PORTS (MAX_LATENCY_PORTS)
+#define BP_MAX_CORES 32
+#define BP_MAX_TX_QUEUE 16
+#define BP_MASTER_AND_LATENCY 2
+
+#define RTE_TEST_RX_DESC_DEFAULT 64
+#define RTE_TEST_RX_LATENCY_DESC_DEFAULT (1*1024)
+
+#define RTE_TEST_RX_DESC_VM_DEFAULT 512
+#define RTE_TEST_TX_DESC_VM_DEFAULT 512
+
+typedef struct rte_mbuf * (*rte_mbuf_convert_to_one_seg_t)(struct rte_mbuf *m);
+struct rte_mbuf * rte_mbuf_convert_to_one_seg(struct rte_mbuf *m);
+extern "C" int vmxnet3_xmit_set_callback(rte_mbuf_convert_to_one_seg_t cb);
+
+
+
+#define RTE_TEST_TX_DESC_DEFAULT 512
+#define RTE_TEST_RX_DESC_DROP 0
+
+
+
+static inline int get_vm_one_queue_enable(){
+ return (CGlobalInfo::m_options.preview.get_vm_one_queue_enable() ?1:0);
+}
+
+static inline int get_is_latency_thread_enable(){
+ return (CGlobalInfo::m_options.is_latency_enabled() ?1:0);
+}
+
+
+
+struct port_cfg_t;
+class CPhyEthIF;
+class CPhyEthIFStats ;
+
+
+class CTRexExtendedDriverBase {
+public:
+ virtual int get_min_sample_rate(void)=0;
+ virtual void update_configuration(port_cfg_t * cfg)=0;
+ virtual void update_global_config_fdir(port_cfg_t * cfg)=0;
+
+ virtual bool is_hardware_filter_is_supported(){
+ return(false);
+ }
+ virtual int configure_rx_filter_rules(CPhyEthIF * _if)=0;
+
+ virtual bool is_hardware_support_drop_queue(){
+ return(false);
+ }
+
+ virtual int configure_drop_queue(CPhyEthIF * _if)=0;
+ virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats)=0;
+ virtual void clear_extended_stats(CPhyEthIF * _if)=0;
+ virtual int wait_for_stable_link()=0;
+};
+
+
+class CTRexExtendedDriverBase1G : public CTRexExtendedDriverBase {
+
+public:
+ CTRexExtendedDriverBase1G(){
+ }
+
+ static CTRexExtendedDriverBase * create(){
+ return ( new CTRexExtendedDriverBase1G() );
+ }
+
+ virtual void update_global_config_fdir(port_cfg_t * cfg);
+
+ virtual int get_min_sample_rate(void){
+ return ( RX_CHECK_MIX_SAMPLE_RATE_1G);
+ }
+ virtual void update_configuration(port_cfg_t * cfg);
+
+ virtual bool is_hardware_filter_is_supported(){
+ return (true);
+ }
+
+ virtual int configure_rx_filter_rules(CPhyEthIF * _if);
+
+ virtual bool is_hardware_support_drop_queue(){
+ return(true);
+ }
+
+ virtual int configure_drop_queue(CPhyEthIF * _if);
+
+ virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
+
+ virtual void clear_extended_stats(CPhyEthIF * _if);
+
+ virtual int wait_for_stable_link();
+};
+
+class CTRexExtendedDriverBase1GVm : public CTRexExtendedDriverBase {
+
+public:
+ CTRexExtendedDriverBase1GVm(){
+ /* we are working in mode that we have 1 queue for rx and one queue for tx*/
+ CGlobalInfo::m_options.preview.set_vm_one_queue_enable(true);
+ }
+
+ static CTRexExtendedDriverBase * create(){
+ return ( new CTRexExtendedDriverBase1GVm() );
+ }
+
+ virtual void update_global_config_fdir(port_cfg_t * cfg){
+
+ }
+
+ virtual int get_min_sample_rate(void){
+ return ( RX_CHECK_MIX_SAMPLE_RATE_1G);
+ }
+ virtual void update_configuration(port_cfg_t * cfg);
+
+ virtual bool is_hardware_filter_is_supported(){
+ return (true);
+ }
+
+ virtual int configure_rx_filter_rules(CPhyEthIF * _if);
+
+ virtual bool is_hardware_support_drop_queue(){
+ return(false);
+ }
+
+ virtual int configure_drop_queue(CPhyEthIF * _if);
+
+
+ virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
+
+ virtual void clear_extended_stats(CPhyEthIF * _if);
+
+ virtual int wait_for_stable_link();
+};
+
+
+class CTRexExtendedDriverBase10G : public CTRexExtendedDriverBase {
+public:
+ CTRexExtendedDriverBase10G(){
+ }
+ static CTRexExtendedDriverBase * create(){
+ return ( new CTRexExtendedDriverBase10G() );
+ }
+
+ virtual void update_global_config_fdir(port_cfg_t * cfg);
+
+ virtual int get_min_sample_rate(void){
+ return (RX_CHECK_MIX_SAMPLE_RATE);
+ }
+ virtual void update_configuration(port_cfg_t * cfg);
+
+ virtual bool is_hardware_filter_is_supported(){
+ return (true);
+ }
+
+ virtual int configure_rx_filter_rules(CPhyEthIF * _if);
+
+ virtual bool is_hardware_support_drop_queue(){
+ return(true);
+ }
+ virtual int configure_drop_queue(CPhyEthIF * _if);
+
+ virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
+ virtual void clear_extended_stats(CPhyEthIF * _if);
+ virtual int wait_for_stable_link();
+};
+
+class CTRexExtendedDriverBase40G : public CTRexExtendedDriverBase10G {
+public:
+ CTRexExtendedDriverBase40G(){
+ }
+
+ static CTRexExtendedDriverBase * create(){
+ return ( new CTRexExtendedDriverBase40G() );
+ }
+
+ virtual void update_global_config_fdir(port_cfg_t * cfg){
+ }
+
+ virtual void update_configuration(port_cfg_t * cfg);
+
+ virtual int configure_rx_filter_rules(CPhyEthIF * _if);
+
+ virtual bool is_hardware_filter_is_supported(){
+ return (true);
+ }
+
+ virtual bool is_hardware_support_drop_queue(){
+ return(true);
+ }
+ virtual int configure_drop_queue(CPhyEthIF * _if);
+
+
+
+ virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
+ virtual void clear_extended_stats(CPhyEthIF * _if);
+ virtual int wait_for_stable_link();
+private:
+ void add_rules(CPhyEthIF * _if,
+ enum rte_eth_flow_type type,
+ uint8_t ttl);
+};
+
+typedef CTRexExtendedDriverBase * (*create_object_t) (void);
+
+
+class CTRexExtendedDriverRec {
+public:
+ std::string m_driver_name;
+ create_object_t m_constructor;
+};
+
+class CTRexExtendedDriverDb {
+public:
+ bool is_driver_exists(std::string name);
+
+
+
+ void set_driver_name(std::string name){
+ m_driver_was_set=true;
+ m_driver_name=name;
+ printf(" set driver name %s \n",name.c_str());
+ m_drv=create_driver(m_driver_name);
+ assert(m_drv);
+ }
+
+ CTRexExtendedDriverBase * get_drv(){
+ if (!m_driver_was_set) {
+ printf(" ERROR too early to use this object !\n");
+ printf(" need to set the right driver \n");
+ assert(0);
+ }
+ assert(m_drv);
+ return (m_drv);
+ }
+
+public:
+
+ static CTRexExtendedDriverDb * Ins();
+
+private:
+ CTRexExtendedDriverBase * create_driver(std::string name);
+
+ CTRexExtendedDriverDb(){
+ register_driver(std::string("rte_ixgbe_pmd"),CTRexExtendedDriverBase10G::create);
+ register_driver(std::string("rte_igb_pmd"),CTRexExtendedDriverBase1G::create);
+ register_driver(std::string("rte_i40e_pmd"),CTRexExtendedDriverBase40G::create);
+
+ /* virtual devices */
+ register_driver(std::string("rte_em_pmd"),CTRexExtendedDriverBase1GVm::create);
+ register_driver(std::string("rte_vmxnet3_pmd"),CTRexExtendedDriverBase1GVm::create);
+ register_driver(std::string("rte_virtio_pmd"),CTRexExtendedDriverBase1GVm::create);
+ register_driver(std::string("rte_enic_pmd"),CTRexExtendedDriverBase1GVm::create);
+
+
+
+
+ m_driver_was_set=false;
+ m_drv=0;
+ m_driver_name="";
+ }
+ void register_driver(std::string name,create_object_t func);
+ static CTRexExtendedDriverDb * m_ins;
+ bool m_driver_was_set;
+ std::string m_driver_name;
+ CTRexExtendedDriverBase * m_drv;
+ std::vector <CTRexExtendedDriverRec*> m_list;
+
+};
+
+CTRexExtendedDriverDb * CTRexExtendedDriverDb::m_ins;
+
+
+void CTRexExtendedDriverDb::register_driver(std::string name,
+ create_object_t func){
+ CTRexExtendedDriverRec * rec;
+ rec = new CTRexExtendedDriverRec();
+ rec->m_driver_name=name;
+ rec->m_constructor=func;
+ m_list.push_back(rec);
+}
+
+
+bool CTRexExtendedDriverDb::is_driver_exists(std::string name){
+ int i;
+ for (i=0; i<(int)m_list.size(); i++) {
+ if (m_list[i]->m_driver_name == name) {
+ return (true);
+ }
+ }
+ return (false);
+}
+
+
+CTRexExtendedDriverBase * CTRexExtendedDriverDb::create_driver(std::string name){
+ int i;
+ for (i=0; i<(int)m_list.size(); i++) {
+ if (m_list[i]->m_driver_name == name) {
+ return ( m_list[i]->m_constructor() );
+ }
+ }
+ return( (CTRexExtendedDriverBase *)0);
+}
+
+
+
+CTRexExtendedDriverDb * CTRexExtendedDriverDb::Ins(){
+ if (!m_ins) {
+ m_ins = new CTRexExtendedDriverDb();
+ }
+ return (m_ins);
+}
+
+static CTRexExtendedDriverBase * get_ex_drv(){
+
+ return ( CTRexExtendedDriverDb::Ins()->get_drv());
+}
+
+static inline int get_min_sample_rate(void){
+ return ( get_ex_drv()->get_min_sample_rate());
+}
+
+
+
+
+
+
+
+#define MAX_DPDK_ARGS 40
+static CPlatformYamlInfo global_platform_cfg_info;
+static int global_dpdk_args_num ;
+static char * global_dpdk_args[MAX_DPDK_ARGS];
+static char global_cores_str[100];
+static char global_prefix_str[100];
+static char global_loglevel_str[20];
+
+
+
+
+// cores =0==1,1*2,2,3,4,5,6
+// An enum for all the option types
+enum { OPT_HELP,
+ OPT_CFG,
+ OPT_NODE_DUMP,
+ OPT_UT,
+ OPT_FILE_OUT,
+ OPT_REAL_TIME,
+ OPT_CORES,
+ OPT_SINGLE_CORE,
+ OPT_FLIP_CLIENT_SERVER,
+ OPT_FLOW_FLIP_CLIENT_SERVER,
+ OPT_FLOW_FLIP_CLIENT_SERVER_SIDE,
+ OPT_BW_FACTOR,
+ OPT_DURATION,
+ OPT_PLATFORM_FACTOR,
+ OPT_PUB_DISABLE,
+ OPT_LIMT_NUM_OF_PORTS,
+ OPT_PLAT_CFG_FILE,
+
+
+ OPT_LATENCY,
+ OPT_NO_CLEAN_FLOW_CLOSE,
+ OPT_LATENCY_MASK,
+ OPT_ONLY_LATENCY,
+ OPT_1G_MODE,
+ OPT_LATENCY_PREVIEW ,
+ OPT_PCAP,
+ OPT_RX_CHECK,
+ OPT_IO_MODE,
+ OPT_IPV6,
+ OPT_LEARN,
+ OPT_LEARN_VERIFY,
+ OPT_NO_FLOW_CONTROL,
+ OPT_RX_CHECK_HOPS,
+ OPT_MAC_FILE,
+ OPT_NO_KEYBOARD_INPUT,
+ OPT_VLAN,
+ OPT_VIRT_ONE_TX_RX_QUEUE,
+ OPT_PREFIX,
+ OPT_MAC_SPLIT
+
+};
+
+
+
+
+
+/* these are the argument types:
+ SO_NONE -- no argument needed
+ SO_REQ_SEP -- single required argument
+ SO_MULTI -- multiple arguments needed
+*/
+static CSimpleOpt::SOption parser_options[] =
+{
+ { OPT_HELP, "-?", SO_NONE },
+ { OPT_HELP, "-h", SO_NONE },
+ { OPT_HELP, "--help", SO_NONE },
+ { OPT_UT, "--ut", SO_NONE },
+ { OPT_CFG, "-f", SO_REQ_SEP},
+ { OPT_PLAT_CFG_FILE,"--cfg", SO_REQ_SEP},
+ { OPT_REAL_TIME , "-r", SO_NONE },
+ { OPT_SINGLE_CORE , "-s", SO_NONE },
+ { OPT_FILE_OUT , "-o" , SO_REQ_SEP},
+ { OPT_FLIP_CLIENT_SERVER,"--flip",SO_NONE },
+ { OPT_FLOW_FLIP_CLIENT_SERVER,"-p",SO_NONE },
+ { OPT_FLOW_FLIP_CLIENT_SERVER_SIDE,"-e",SO_NONE },
+
+ { OPT_NO_CLEAN_FLOW_CLOSE,"--nc",SO_NONE },
+
+ { OPT_LIMT_NUM_OF_PORTS,"--limit-ports", SO_REQ_SEP },
+ { OPT_CORES , "-c", SO_REQ_SEP },
+ { OPT_NODE_DUMP , "-v", SO_REQ_SEP },
+ { OPT_LATENCY , "-l", SO_REQ_SEP },
+
+ { OPT_DURATION , "-d", SO_REQ_SEP },
+ { OPT_PLATFORM_FACTOR , "-pm", SO_REQ_SEP },
+
+ { OPT_PUB_DISABLE , "-pubd", SO_NONE },
+
+
+ { OPT_BW_FACTOR , "-m", SO_REQ_SEP },
+ { OPT_LATENCY_MASK , "--lm", SO_REQ_SEP },
+ { OPT_ONLY_LATENCY, "--lo", SO_NONE },
+
+ { OPT_1G_MODE, "-1g", SO_NONE },
+ { OPT_LATENCY_PREVIEW , "-k", SO_REQ_SEP },
+ { OPT_PCAP, "--pcap", SO_NONE },
+ { OPT_RX_CHECK, "--rx-check", SO_REQ_SEP },
+ { OPT_IO_MODE, "--iom", SO_REQ_SEP },
+ { OPT_RX_CHECK_HOPS, "--hops", SO_REQ_SEP },
+ { OPT_IPV6, "--ipv6", SO_NONE },
+ { OPT_LEARN, "--learn", SO_NONE },
+ { OPT_LEARN_VERIFY, "--learn-verify", SO_NONE },
+ { OPT_NO_FLOW_CONTROL, "--no-flow-control", SO_NONE },
+ { OPT_VLAN, "--vlan", SO_NONE },
+ { OPT_MAC_FILE, "--mac", SO_REQ_SEP },
+ { OPT_NO_KEYBOARD_INPUT ,"--no-key", SO_NONE },
+ { OPT_VIRT_ONE_TX_RX_QUEUE, "--vm-sim", SO_NONE },
+ { OPT_PREFIX, "--prefix", SO_REQ_SEP },
+ { OPT_MAC_SPLIT, "--mac-spread", SO_REQ_SEP },
+
+ SO_END_OF_OPTIONS
+};
+
+
+
+
+static int usage(){
+
+ printf(" Usage: t-rex-64 [OPTION] -f cfg.yaml -c cores \n");
+ printf(" \n");
+ printf(" \n");
+ printf(" options \n");
+ printf(" -f [file] : YAML file with template configuration \n");
+ printf(" \n\n");
+ printf(" --mac [file] : YAML file with <client ip, mac addr> configuration \n");
+ printf(" \n\n");
+ printf(" -r : realtime enable \n");
+ printf(" \n\n");
+ printf(" -c [number of cores] : 1 ,2,3,4,5 numnber of dual cores + master 1 means 1 master and 2 cores \n");
+ printf(" \n");
+ printf(" -s : run only one data path core\n");
+ printf(" \n");
+ printf(" --flip : flow will be sent from client->server and server->client for maximum throughput \n");
+ printf(" \n");
+ printf(" -p : flow-flip , send all packets flow from the same interface base of client ip \n");
+ printf(" -e : like -p but comply to the generator rules \n");
+
+ printf(" \n");
+ printf(" -l [pkt/sec] : run laterncy daemon in this rate \n");
+ printf(" e.g -l 1000 run 1000 pkt/sec from each interface , zero mean to disable latency check \n");
+ printf(" --lm : latency mask \n");
+ printf(" 0x1 only port 0 will send traffic \n");
+ printf(" --lo :only latency test \n");
+
+ printf(" \n");
+
+ printf(" --limit-ports : limit number of ports , must be even e.g 2,4 \n");
+ printf(" \n");
+ printf(" --nc : if set will not close all the flow , faster \n");
+ printf(" \n");
+ printf(" -d : duration of the test in sec \n");
+ printf(" \n");
+ printf(" -pm : platform factor , in case you have splitter in the setup you can multiply the total results in this factor \n");
+ printf(" e.g --pm 2.0 will multiply all the results bps in this factor \n");
+ printf(" \n");
+ printf(" -pubd : disable monitors publishers \n");
+
+ printf(" -m : factor of bandwidth \n");
+ printf(" \n");
+ printf(" -1g : 1G trex \n");
+ printf(" \n");
+ printf(" -k [sec] : run latency test before starting the test. it will wait for x sec sending packet and x sec after that \n");
+ printf(" \n");
+
+ printf(" --cfg [platform_yaml] : load and configure platform using this file see example in cfg/cfg_examplexx.yaml file \n");
+ printf(" this file is used to configure/mask interfaces cores affinity and mac addr \n");
+ printf(" you can copy this file to /etc/trex_cfg.yaml \n");
+ printf(" \n");
+
+ printf(" --ipv6 : work in ipv6 mode \n");
+
+ printf(" --learn : Work in NAT environments, learn the dynamic NAT translation and ALG \n");
+ printf(" --learn-verify : Learn the translation, but intended for verification of the mechanism in cases that NAT does not exist \n");
+ printf(" \n");
+
+ printf(" -v [1-3] : verbose mode ( works only on the debug image ! ) \n");
+ printf(" 1 show only stats \n");
+ printf(" 2 run preview do not write to file \n");
+ printf(" 3 run preview write stats file \n");
+ printf(" Note in case of verbose mode you don't need to add the output file \n");
+ printf(" \n");
+ printf(" Warning : This program can generate huge-files (TB ) watch out! try this only on local drive \n");
+ printf(" \n");
+ printf(" \n");
+ printf(" --rx-check [sample] : enable rx check thread , using this thread we sample flows 1/sample and check order,latency and more \n");
+ printf(" this feature consume another thread \n");
+ printf(" \n");
+ printf(" --hops [hops] : If rx check is enabled, the hop number can be assigned. The default number of hops is 1\n");
+ printf(" --iom [mode] : io mode for interactive mode [0- silent, 1- normal , 2- short] \n");
+ printf(" this feature consume another thread \n");
+ printf(" \n");
+ printf(" --no-key : daemon mode, don't get input from keyboard \n");
+ printf(" --no-flow-control : In default TRex disables flow-control using this flag it does not touch it \n");
+ printf(" --prefix : for multi trex, each instance should have a different name \n");
+ printf(" --mac-spread : Spread the destination mac-order by this factor. e.g 2 will generate the traffic to 2 devices DEST-MAC ,DEST-MAC+1 \n");
+ printf(" maximum is up to 128 devices \n");
+
+ printf(" simulation mode : \n");
+ printf(" Using this mode you can generate the traffic into a pcap file and learn how trex works \n");
+ printf(" With this version you must be SUDO to use this mode ( I know this is not normal ) \n");
+ printf(" you can use the Linux CEL version of t-rex to do it without super user \n");
+ printf(" \n");
+ printf(" -o [capfile_name] simulate trex into pcap file \n");
+ printf(" --pcap export the file in pcap mode \n");
+ printf(" t-rex-64 -d 10 -f cfg.yaml -o my.pcap --pcap # export 10 sec of what Trex will do on real-time to a file my.pcap \n");
+ printf(" --vm-sim : simulate vm with driver of one input queue and one output queue \n");
+ printf(" \n");
+ printf(" Examples: ");
+ printf(" basic trex run for 10 sec and multiplier of x10 \n");
+ printf(" #>t-rex-64 -f cfg.yaml -m 10 -d 10 \n");
+ printf(" \n ");
+
+ printf(" preview show csv stats \n");
+ printf(" #>t-rex-64 -c 1 -f cfg.yaml -v 1 -p -m 10 -d 10 --nc -l 1000\n");
+ printf(" \n ");
+
+ printf(" 5) ! \n");
+ printf(" #>t-rex-64 -f cfg.yaml -c 1 --flip \n");
+
+ printf("\n");
+ printf("\n");
+ printf(" Copyright (C) 2012 by hhaim Cisco-System POC for Israel dev-test \n");
+ printf(" version : %s \n",VERSION);
+
+ return (0);
+}
+
+
+int gtest_main(int argc, char **argv) ;
+
+static int parse_options(int argc, char *argv[], CParserOption* po, bool first_time ) {
+ CSimpleOpt args(argc, argv, parser_options);
+
+ bool latency_was_set=false;
+ int a=0;
+ int node_dump=0;
+
+ po->preview.setFileWrite(true);
+ po->preview.setRealTime(true);
+ int res1;
+ uint32_t tmp_data;
+
+
+ while ( args.Next() ){
+ if (args.LastError() == SO_SUCCESS) {
+ switch (args.OptionId()) {
+ case OPT_UT :
+ printf(" Supported only in simulation \n");
+ res1=0;
+ exit(res1);
+ break;
+ case OPT_HELP:
+ usage();
+ return -1;
+ case OPT_CFG:
+ po->cfg_file = args.OptionArg();
+ break;
+ case OPT_NO_KEYBOARD_INPUT :
+ po->preview.set_no_keyboard(true);
+ break;
+ case OPT_MAC_FILE :
+ po->mac_file = args.OptionArg();
+ break;
+ case OPT_PLAT_CFG_FILE :
+ po->platform_cfg_file = args.OptionArg();
+ break;
+ case OPT_SINGLE_CORE :
+ po->preview.setSingleCore(true);
+ break;
+ case OPT_IPV6:
+ po->preview.set_ipv6_mode_enable(true);
+ break;
+ case OPT_VLAN:
+ po->preview.set_vlan_mode_enable(true);
+ break;
+
+ case OPT_LEARN :
+ po->preview.set_lean_mode_enable(true);
+ break;
+
+ case OPT_LEARN_VERIFY :
+ po->preview.set_lean_mode_enable(true);
+ po->preview.set_lean_and_verify_mode_enable(true);
+ break;
+
+ case OPT_REAL_TIME :
+ printf(" warning -r is deprecated, real time is not needed any more , it is the default \n");
+ po->preview.setRealTime(true);
+ break;
+ case OPT_NO_FLOW_CONTROL:
+ po->preview.set_disable_flow_control_setting(true);
+ break;
+
+ case OPT_LIMT_NUM_OF_PORTS :
+ po->m_expected_portd =atoi(args.OptionArg());
+ break;
+ case OPT_CORES :
+ po->preview.setCores(atoi(args.OptionArg()));
+ break;
+ case OPT_FLIP_CLIENT_SERVER :
+ po->preview.setClientServerFlip(true);
+ break;
+ case OPT_NO_CLEAN_FLOW_CLOSE :
+ po->preview.setNoCleanFlowClose(true);
+ break;
+ case OPT_FLOW_FLIP_CLIENT_SERVER :
+ po->preview.setClientServerFlowFlip(true);
+ break;
+ case OPT_FLOW_FLIP_CLIENT_SERVER_SIDE:
+ po->preview.setClientServerFlowFlipAddr(true);
+ break;
+ case OPT_FILE_OUT:
+ po->out_file = args.OptionArg();
+ break;
+ case OPT_NODE_DUMP:
+ a=atoi(args.OptionArg());
+ node_dump=1;
+ po->preview.setFileWrite(false);
+ break;
+ case OPT_BW_FACTOR :
+ sscanf(args.OptionArg(),"%f", &po->m_factor);
+ break;
+ case OPT_DURATION :
+ sscanf(args.OptionArg(),"%f", &po->m_duration);
+ break;
+ case OPT_PUB_DISABLE:
+ po->preview.set_zmq_publish_enable(false);
+ break;
+ case OPT_PLATFORM_FACTOR:
+ sscanf(args.OptionArg(),"%f", &po->m_platform_factor);
+ break;
+ case OPT_LATENCY :
+ latency_was_set=true;
+ sscanf(args.OptionArg(),"%d", &po->m_latency_rate);
+ break;
+ case OPT_LATENCY_MASK :
+ sscanf(args.OptionArg(),"%x", &po->m_latency_mask);
+ break;
+ case OPT_ONLY_LATENCY :
+ po->preview.setOnlyLatency(true);
+ break;
+ case OPT_1G_MODE :
+ po->preview.set_1g_mode(true);
+ break;
+
+ case OPT_LATENCY_PREVIEW :
+ sscanf(args.OptionArg(),"%d", &po->m_latency_prev);
+ break;
+
+ case OPT_PCAP:
+ po->preview.set_pcap_mode_enable(true);
+ break;
+
+ case OPT_RX_CHECK :
+ sscanf(args.OptionArg(),"%d", &tmp_data);
+ po->m_rx_check_sampe=(uint16_t)tmp_data;
+ po->preview.set_rx_check_enable(true);
+ break;
+ case OPT_RX_CHECK_HOPS :
+ sscanf(args.OptionArg(),"%d", &tmp_data);
+ po->m_rx_check_hops = (uint16_t)tmp_data;
+ break;
+ case OPT_IO_MODE :
+ sscanf(args.OptionArg(),"%d", &tmp_data);
+ po->m_io_mode=(uint16_t)tmp_data;
+ break;
+
+ case OPT_VIRT_ONE_TX_RX_QUEUE:
+ po->preview.set_vm_one_queue_enable(true);
+ break;
+
+ case OPT_PREFIX:
+ po->prefix = args.OptionArg();
+ break;
+
+ case OPT_MAC_SPLIT:
+ sscanf(args.OptionArg(),"%d", &tmp_data);
+ po->m_mac_splitter = (uint8_t)tmp_data;
+ po->preview.set_mac_ip_features_enable(true);
+ po->preview.setDestMacSplit(true);
+ break;
+
+ default:
+ usage();
+ return -1;
+ break;
+ } // End of switch
+ }// End of IF
+ else {
+ usage();
+ return -1;
+ }
+ } // End of while
+
+
+ if ((po->cfg_file =="") ) {
+ printf("Invalid combination of parameters you must add -f with configuration file \n");
+ return -1;
+ }
+
+ if ( po->m_mac_splitter > 128 ){
+ printf("maximum mac spreading is 128 you set it to %d \n",po->m_mac_splitter);
+ return -1;
+ }
+
+ if ( po->preview.get_learn_mode_enable() ){
+ if ( po->preview.get_ipv6_mode_enable() ){
+ printf("--learn mode is not supported with --ipv6, beacuse there is not such thing NAT66 ( ipv6-ipv6) \n");
+ printf("if you think it is important,open a defect \n");
+ return -1;
+ }
+ if ( po->is_latency_disabled() ){
+ /* set latency thread */
+ po->m_latency_rate =1000;
+ }
+ }
+
+ if (po->preview.get_is_rx_check_enable() && ( po->is_latency_disabled() ) ) {
+ printf(" rx check must be enable with latency check. try adding '-l 1000' \n");
+ return -1;
+ }
+
+ if ( node_dump ){
+ po->preview.setVMode(a);
+ }
+
+ /* if we have a platform factor we need to devided by it so we can still work with normalized yaml profile */
+ po->m_factor = po->m_factor/po->m_platform_factor;
+
+ uint32_t cores=po->preview.getCores();
+ if ( cores > ((BP_MAX_CORES)/2-1) ) {
+ printf(" ERROR maximum cores are : %d \n",((BP_MAX_CORES)/2-1));
+ return -1;
+ }
+
+
+ if ( first_time ){
+ /* only first time read the configuration file */
+ if ( po->platform_cfg_file.length() >0 ) {
+ if ( node_dump ){
+ printf("load platform configuration file from %s \n",po->platform_cfg_file.c_str());
+ }
+ global_platform_cfg_info.load_from_yaml_file(po->platform_cfg_file);
+ if ( node_dump ){
+ global_platform_cfg_info.Dump(stdout);
+ }
+ }else{
+ if ( utl_is_file_exists("/etc/trex_cfg.yaml") ){
+ printf("found configuration file at /etc/trex_cfg.yaml \n");
+ global_platform_cfg_info.load_from_yaml_file("/etc/trex_cfg.yaml");
+ if ( node_dump ){
+ global_platform_cfg_info.Dump(stdout);
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+
+int main_test(int argc , char * argv[]);
+
+
+
+
+void delay(int msec){
+
+ if (msec == 0)
+ {//user that requested that probebly wanted the minimal delay
+ //but because of scaling problem he have got 0 so we will give the min delay
+ //printf("\n\n\nERROR-Task delay ticks == 0 found in task %s task id = %d\n\n\n\n",
+ // SANB_TaskName(SANB_TaskIdSelf()), SANB_TaskIdSelf());
+ msec =1;
+
+ }
+
+ struct timespec time1, remain; // 2 sec max delay
+ time1.tv_sec=msec/1000;
+ time1.tv_nsec=(msec - (time1.tv_sec*1000))*1000000;
+
+ nanosleep(&time1,&remain);
+}
+
+
+
+static const char * default_argv[] = {"xx","-c", "0x7", "-n","2","-b","0000:0b:01.01"};
+static int argv_num = 7;
+
+
+
+
+
+#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
+#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
+#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
+
+/*
+ * These default values are optimized for use with the Intel(R) 82599 10 GbE
+ * Controller and the DPDK ixgbe PMD. Consider using other values for other
+ * network controllers and/or network drivers.
+ */
+#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
+#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
+#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
+
+#define TX_WTHRESH_1G 1 /**< Default values of TX write-back threshold reg. */
+#define TX_PTHRESH_1G 1 /**< Default values of TX prefetch threshold reg. */
+
+
+struct port_cfg_t {
+ public:
+ port_cfg_t(){
+ memset(&m_port_conf,0,sizeof(rte_eth_conf));
+ memset(&m_rx_conf,0,sizeof(rte_eth_rxconf));
+ memset(&m_tx_conf,0,sizeof(rte_eth_rxconf));
+ memset(&m_rx_drop_conf,0,sizeof(rte_eth_rxconf));
+
+
+ m_rx_conf.rx_thresh.pthresh = RX_PTHRESH;
+ m_rx_conf.rx_thresh.hthresh = RX_HTHRESH;
+ m_rx_conf.rx_thresh.wthresh = RX_WTHRESH;
+ m_rx_conf.rx_free_thresh =32;
+
+ m_rx_drop_conf.rx_thresh.pthresh = 0;
+ m_rx_drop_conf.rx_thresh.hthresh = 0;
+ m_rx_drop_conf.rx_thresh.wthresh = 0;
+ m_rx_drop_conf.rx_free_thresh =32;
+ m_rx_drop_conf.rx_drop_en=1;
+
+ m_tx_conf.tx_thresh.pthresh = TX_PTHRESH;
+ m_tx_conf.tx_thresh.hthresh = TX_HTHRESH;
+ m_tx_conf.tx_thresh.wthresh = TX_WTHRESH;
+
+ m_port_conf.rxmode.jumbo_frame=1;
+ m_port_conf.rxmode.max_rx_pkt_len =2000;
+ m_port_conf.rxmode.hw_strip_crc=1;
+ }
+
+
+
+ inline void update_var(void){
+ get_ex_drv()->update_configuration(this);
+ }
+
+ inline void update_global_config_fdir(void){
+ get_ex_drv()->update_global_config_fdir(this);
+ }
+
+ /* enable FDIR */
+ inline void update_global_config_fdir_10g_1g(void){
+ m_port_conf.fdir_conf.mode=RTE_FDIR_MODE_PERFECT;
+ m_port_conf.fdir_conf.pballoc=RTE_FDIR_PBALLOC_64K;
+ m_port_conf.fdir_conf.status=RTE_FDIR_NO_REPORT_STATUS;
+ /* Offset of flexbytes field in RX packets (in 16-bit word units). */
+ /* Note: divide by 2 to convert byte offset to word offset */
+ if ( CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ){
+ m_port_conf.fdir_conf.flexbytes_offset=(14+6)/2;
+ }else{
+ m_port_conf.fdir_conf.flexbytes_offset=(14+8)/2;
+ }
+
+ /* Increment offset 4 bytes for the case where we add VLAN */
+ if ( CGlobalInfo::m_options.preview.get_vlan_mode_enable() ){
+ m_port_conf.fdir_conf.flexbytes_offset+=(4/2);
+ }
+ m_port_conf.fdir_conf.drop_queue=1;
+ }
+
+ inline void update_global_config_fdir_40g(void){
+ m_port_conf.fdir_conf.mode=RTE_FDIR_MODE_PERFECT;
+ m_port_conf.fdir_conf.pballoc=RTE_FDIR_PBALLOC_64K;
+ m_port_conf.fdir_conf.status=RTE_FDIR_NO_REPORT_STATUS;
+ /* Offset of flexbytes field in RX packets (in 16-bit word units). */
+ /* Note: divide by 2 to convert byte offset to word offset */
+ #if 0
+ if ( CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ){
+ m_port_conf.fdir_conf.flexbytes_offset=(14+6)/2;
+ }else{
+ m_port_conf.fdir_conf.flexbytes_offset=(14+8)/2;
+ }
+
+ /* Increment offset 4 bytes for the case where we add VLAN */
+ if ( CGlobalInfo::m_options.preview.get_vlan_mode_enable() ){
+ m_port_conf.fdir_conf.flexbytes_offset+=(4/2);
+ }
+ #endif
+
+ // TBD Flow Director does not work with XL710 yet we need to understand why
+ #if 0
+ struct rte_eth_fdir_flex_conf * lp = &m_port_conf.fdir_conf.flex_conf;
+
+ //lp->nb_flexmasks=1;
+ //lp->flex_mask[0].flow_type=RTE_ETH_FLOW_TYPE_SCTPV4;
+ //memset(lp->flex_mask[0].mask,0xff,RTE_ETH_FDIR_MAX_FLEXLEN);
+
+ lp->nb_payloads=1;
+ lp->flex_set[0].type = RTE_ETH_L3_PAYLOAD;
+ lp->flex_set[0].src_offset[0]=8;
+
+ //m_port_conf.fdir_conf.drop_queue=1;
+ #endif
+ }
+
+ struct rte_eth_conf m_port_conf;
+ struct rte_eth_rxconf m_rx_conf;
+ struct rte_eth_rxconf m_rx_drop_conf;
+ struct rte_eth_txconf m_tx_conf;
+};
+
+
+/* this object is per core / per port / per queue
+ each core will have 2 ports to send too
+
+
+ port0 port1
+
+ 0,1,2,3,..15 out queue ( per core ) 0,1,2,3,..15 out queue ( per core )
+
+*/
+
+
+typedef struct cnt_name_ {
+ uint32_t offset;
+ char * name;
+}cnt_name_t ;
+
+#define MY_REG(a) {a,(char *)#a}
+
+
+class CPhyEthIFStats {
+
+public:
+ uint64_t ipackets; /**< Total number of successfully received packets. */
+ uint64_t ibytes; /**< Total number of successfully received bytes. */
+
+ uint64_t f_ipackets; /**< Total number of successfully received packets - filter SCTP*/
+ uint64_t f_ibytes; /**< Total number of successfully received bytes. - filter SCTP */
+
+ uint64_t opackets; /**< Total number of successfully transmitted packets.*/
+ uint64_t obytes; /**< Total number of successfully transmitted bytes. */
+
+ uint64_t ierrors; /**< Total number of erroneous received packets. */
+ uint64_t oerrors; /**< Total number of failed transmitted packets. */
+ uint64_t imcasts; /**< Total number of multicast received packets. */
+ uint64_t rx_nombuf; /**< Total number of RX mbuf allocation failures. */
+
+
+public:
+ void Clear();
+ void Dump(FILE *fd);
+ void DumpAll(FILE *fd);
+};
+
+void CPhyEthIFStats::Clear(){
+
+ ipackets =0;
+ ibytes =0 ;
+
+ f_ipackets=0;
+ f_ibytes=0;
+
+ opackets=0;
+ obytes=0;
+
+ ierrors=0;
+ oerrors=0;
+ imcasts=0;
+ rx_nombuf=0;
+}
+
+
+void CPhyEthIFStats::DumpAll(FILE *fd){
+
+ #define DP_A4(f) printf(" %-40s : %llu \n",#f,f)
+ #define DP_A(f) if (f) printf(" %-40s : %llu \n",#f,f)
+ DP_A4(opackets);
+ DP_A4(obytes);
+ DP_A4(ipackets);
+ DP_A4(ibytes);
+ DP_A(ierrors);
+ DP_A(oerrors);
+
+}
+
+
+void CPhyEthIFStats::Dump(FILE *fd){
+
+ DP_A(opackets);
+ DP_A(obytes);
+
+ DP_A(f_ipackets);
+ DP_A(f_ibytes);
+
+ DP_A(ipackets);
+ DP_A(ibytes);
+ DP_A(ierrors);
+ DP_A(oerrors);
+ DP_A(imcasts);
+ DP_A(rx_nombuf);
+}
+
+
+
+class CPhyEthIF {
+public:
+ CPhyEthIF (){
+ m_port_id=0;
+ m_rx_queue=0;
+ }
+ bool Create(uint8_t portid){
+ m_port_id = portid;
+ m_last_rx_rate = 0.0;
+ m_last_tx_rate = 0.0;
+ m_last_pps=0.0;
+ return (true);
+ }
+ void Delete();
+
+ void set_rx_queue(uint8_t rx_queue){
+ m_rx_queue=rx_queue;
+ }
+
+
+ void configure(uint16_t nb_rx_queue,
+ uint16_t nb_tx_queue,
+ const struct rte_eth_conf *eth_conf);
+
+ void macaddr_get(struct ether_addr *mac_addr);
+
+ void get_stats(CPhyEthIFStats *stats);
+
+ void get_stats_1g(CPhyEthIFStats *stats);
+
+
+ void rx_queue_setup(uint16_t rx_queue_id,
+ uint16_t nb_rx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
+
+ void tx_queue_setup(uint16_t tx_queue_id,
+ uint16_t nb_tx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+
+ void configure_rx_drop_queue();
+
+ void configure_rx_duplicate_rules();
+
+ void start();
+
+ void stop();
+
+ void update_link_status();
+
+ bool is_link_up(){
+ return (m_link.link_status?true:false);
+ }
+
+ void dump_link(FILE *fd);
+
+ void disable_flow_control();
+
+ void set_promiscuous(bool enable);
+
+ void add_mac(char * mac);
+
+
+ bool get_promiscuous();
+
+ void dump_stats(FILE *fd);
+
+ void update_counters();
+
+
+ void stats_clear();
+
+ uint8_t get_port_id(){
+ return (m_port_id);
+ }
+
+ float get_last_tx_rate(){
+ return (m_last_tx_rate);
+ }
+
+ float get_last_rx_rate(){
+ return (m_last_rx_rate);
+ }
+
+ float get_last_pps_rate(){
+ return (m_last_pps);
+ }
+
+ CPhyEthIFStats & get_stats(){
+ return ( m_stats );
+ }
+
+ void flush_rx_queue(void);
+
+public:
+
+ inline uint16_t tx_burst(uint16_t queue_id,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+ inline uint16_t rx_burst(uint16_t queue_id,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+
+ inline uint32_t pci_reg_read(uint32_t reg_off){
+ void *reg_addr;
+ uint32_t reg_v;
+ reg_addr = (void *)((char *)m_dev_info.pci_dev->mem_resource[0].addr +
+ reg_off);
+ reg_v = *((volatile uint32_t *)reg_addr);
+ return rte_le_to_cpu_32(reg_v);
+ }
+
+
+ inline void pci_reg_write(uint32_t reg_off,
+ uint32_t reg_v){
+ void *reg_addr;
+
+ reg_addr = (void *)((char *)m_dev_info.pci_dev->mem_resource[0].addr +
+ reg_off);
+ *((volatile uint32_t *)reg_addr) = rte_cpu_to_le_32(reg_v);
+ }
+
+ void dump_stats_extended(FILE *fd);
+
+ uint8_t get_rte_port_id(void){
+ return ( m_port_id );
+ }
+private:
+ uint8_t m_port_id;
+ uint8_t m_rx_queue;
+ struct rte_eth_link m_link;
+ uint64_t m_sw_try_tx_pkt;
+ uint64_t m_sw_tx_drop_pkt;
+ CBwMeasure m_bw_tx;
+ CBwMeasure m_bw_rx;
+ CPPSMeasure m_pps_tx;
+
+ CPhyEthIFStats m_stats;
+
+ float m_last_rx_rate;
+ float m_last_tx_rate;
+ float m_last_pps;
+public:
+ struct rte_eth_dev_info m_dev_info;
+};
+
+
+void CPhyEthIF::flush_rx_queue(void){
+
+ rte_mbuf_t * rx_pkts[32];
+ int j=0;
+ uint16_t cnt=0;
+
+ while (true) {
+ j++;
+ cnt = rx_burst(m_rx_queue,rx_pkts,32);
+ if ( cnt ) {
+ int i;
+ for (i=0; i<(int)cnt;i++) {
+ rte_mbuf_t * m=rx_pkts[i];
+ /*printf("rx--\n");
+ rte_pktmbuf_dump(stdout,m, rte_pktmbuf_pkt_len(m));*/
+ rte_pktmbuf_free(m);
+ }
+ }
+ if ( ((cnt==0) && (j>10)) || (j>15) ) {
+ break;
+ }
+ }
+ if (cnt>0) {
+ printf(" Warning can't flush rx-queue for port %d \n",(int)get_port_id());
+ }
+}
+
+
+void CPhyEthIF::dump_stats_extended(FILE *fd){
+
+ cnt_name_t reg[]={
+ MY_REG(IXGBE_GPTC), /* total packet */
+ MY_REG(IXGBE_GOTCL), /* total bytes */
+ MY_REG(IXGBE_GOTCH),
+
+ MY_REG(IXGBE_GPRC),
+ MY_REG(IXGBE_GORCL),
+ MY_REG(IXGBE_GORCH),
+
+
+
+ MY_REG(IXGBE_RXNFGPC),
+ MY_REG(IXGBE_RXNFGBCL),
+ MY_REG(IXGBE_RXNFGBCH),
+ MY_REG(IXGBE_RXDGPC ),
+ MY_REG(IXGBE_RXDGBCL ),
+ MY_REG(IXGBE_RXDGBCH ),
+ MY_REG(IXGBE_RXDDGPC ),
+ MY_REG(IXGBE_RXDDGBCL ),
+ MY_REG(IXGBE_RXDDGBCH ),
+ MY_REG(IXGBE_RXLPBKGPC ),
+ MY_REG(IXGBE_RXLPBKGBCL),
+ MY_REG(IXGBE_RXLPBKGBCH ),
+ MY_REG(IXGBE_RXDLPBKGPC ),
+ MY_REG(IXGBE_RXDLPBKGBCL),
+ MY_REG(IXGBE_RXDLPBKGBCH ),
+ MY_REG(IXGBE_TXDGPC ),
+ MY_REG(IXGBE_TXDGBCL ),
+ MY_REG(IXGBE_TXDGBCH ),
+ MY_REG(IXGBE_FDIRUSTAT ),
+ MY_REG(IXGBE_FDIRFSTAT ),
+ MY_REG(IXGBE_FDIRMATCH ),
+ MY_REG(IXGBE_FDIRMISS )
+
+ };
+ fprintf (fd," externded counter \n");
+ int i;
+ for (i=0; i<sizeof(reg)/sizeof(reg[0]); i++) {
+ cnt_name_t *lp=&reg[i];
+ uint32_t c=pci_reg_read(lp->offset);
+ if (c) {
+ fprintf (fd," %s : %d \n",lp->name,c);
+ }
+ }
+}
+
+
+
+void CPhyEthIF::configure(uint16_t nb_rx_queue,
+ uint16_t nb_tx_queue,
+ const struct rte_eth_conf *eth_conf){
+ int ret;
+ ret = rte_eth_dev_configure(m_port_id,
+ nb_rx_queue,
+ nb_tx_queue,
+ eth_conf);
+
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Cannot configure device: "
+ "err=%d, port=%u\n",
+ ret, m_port_id);
+
+ /* get device info */
+ rte_eth_dev_info_get(m_port_id, &m_dev_info);
+
+}
+
+
+/*
+
+rx-queue 0 - default- all traffic no SCTP
+ will be drop as queue is disable
+
+
+rx-queue 1 - SCTP traffic will go to here
+
+ pci_reg_write(IXGBE_L34T_IMIR(0),(1<<21));
+
+*/
+
+void CPhyEthIF::configure_rx_duplicate_rules(){
+
+ if ( get_is_rx_filter_enable() ){
+
+ if ( get_ex_drv()->is_hardware_filter_is_supported()==false ){
+ printf(" ERROR this feature is not supported with current hardware \n");
+ exit(1);
+ }
+ get_ex_drv()->configure_rx_filter_rules(this);
+ }
+}
+
+
+void CPhyEthIF::configure_rx_drop_queue(){
+
+ if ( get_vm_one_queue_enable() ) {
+ return;
+ }
+ if ( CGlobalInfo::m_options.is_latency_disabled()==false ) {
+ if ( (!get_ex_drv()->is_hardware_support_drop_queue()) ) {
+ printf(" ERROR latency feature is not supported with current hardware \n");
+ exit(1);
+ }
+ }
+ get_ex_drv()->configure_drop_queue(this);
+}
+
+
+void CPhyEthIF::rx_queue_setup(uint16_t rx_queue_id,
+ uint16_t nb_rx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool){
+
+ int ret = rte_eth_rx_queue_setup(m_port_id , rx_queue_id,
+ nb_rx_desc,
+ socket_id,
+ rx_conf,
+ mb_pool);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: "
+ "err=%d, port=%u\n",
+ ret, m_port_id);
+}
+
+
+
+void CPhyEthIF::tx_queue_setup(uint16_t tx_queue_id,
+ uint16_t nb_tx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf){
+
+ int ret = rte_eth_tx_queue_setup( m_port_id,
+ tx_queue_id,
+ nb_tx_desc,
+ socket_id,
+ tx_conf);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
+ "err=%d, port=%u queue=%u\n",
+ ret, m_port_id, tx_queue_id);
+
+}
+
+
+void CPhyEthIF::stop(){
+ rte_eth_dev_stop(m_port_id);
+}
+
+
+void CPhyEthIF::start(){
+
+ get_ex_drv()->clear_extended_stats(this);
+
+ int ret;
+
+ m_bw_tx.reset();
+ m_bw_rx.reset();
+
+ m_stats.Clear();
+ int i;
+ for (i=0;i<10; i++ ) {
+ ret = rte_eth_dev_start(m_port_id);
+ if (ret==0) {
+ return;
+ }
+ delay(1000);
+ }
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
+ "err=%d, port=%u\n",
+ ret, m_port_id);
+
+}
+
+void CPhyEthIF::disable_flow_control(){
+ if ( get_vm_one_queue_enable() ){
+ return;
+ }
+ int ret;
+ if ( !CGlobalInfo::m_options.preview.get_is_disable_flow_control_setting() ){
+ // see trex-64 issue with loopback on the same NIC
+ struct rte_eth_fc_conf fc_conf;
+ memset(&fc_conf,0,sizeof(fc_conf));
+ fc_conf.mode=RTE_FC_NONE;
+ fc_conf.autoneg=1;
+ fc_conf.pause_time=100;
+ int i;
+ for (i=0; i<5; i++) {
+ ret=rte_eth_dev_flow_ctrl_set(m_port_id,&fc_conf);
+ if (ret==0) {
+ break;
+ }
+ delay(1000);
+ }
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "rte_eth_dev_flow_ctrl_set: "
+ "err=%d, port=%u\n probably link is down please check you link activity or enable flow-control using this CLI flag --no-flow-control \n",
+ ret, m_port_id);
+ }
+}
+
+
+
+void CPhyEthIF::dump_link(FILE *fd){
+ fprintf(fd,"port : %d \n",(int)m_port_id);
+ fprintf(fd,"------------\n");
+
+ fprintf(fd,"link : ");
+ if (m_link.link_status) {
+ fprintf(fd," link : Link Up - speed %u Mbps - %s\n",
+ (unsigned) m_link.link_speed,
+ (m_link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ ("full-duplex") : ("half-duplex\n"));
+ } else {
+ fprintf(fd," Link Down\n");
+ }
+ fprintf(fd,"promiscuous : %d \n",get_promiscuous());
+}
+
+void CPhyEthIF::update_link_status(){
+ rte_eth_link_get(m_port_id, &m_link);
+}
+
+void CPhyEthIF::add_mac(char * mac){
+ struct ether_addr mac_addr;
+ int i=0;
+ for (i=0; i<6;i++) {
+ mac_addr.addr_bytes[i] =mac[i];
+ }
+ rte_eth_dev_mac_addr_add(m_port_id, &mac_addr,0);
+}
+
+void CPhyEthIF::set_promiscuous(bool enable){
+ if (enable) {
+ rte_eth_promiscuous_enable(m_port_id);
+ }else{
+ rte_eth_promiscuous_disable(m_port_id);
+ }
+}
+
+bool CPhyEthIF::get_promiscuous(){
+ int ret=rte_eth_promiscuous_get(m_port_id);
+ if (ret<0) {
+ rte_exit(EXIT_FAILURE, "rte_eth_promiscuous_get: "
+ "err=%d, port=%u\n",
+ ret, m_port_id);
+
+ }
+ return ( ret?true:false);
+}
+
+
+void CPhyEthIF::macaddr_get(struct ether_addr *mac_addr){
+ rte_eth_macaddr_get(m_port_id , mac_addr);
+}
+
+void CPhyEthIF::get_stats_1g(CPhyEthIFStats *stats){
+
+ int i;
+ uint64_t t=0;
+
+
+ stats->ipackets += pci_reg_read(E1000_GPRC) ;
+
+ stats->ibytes += (pci_reg_read(E1000_GORCL) );
+ stats->ibytes += (((uint64_t)pci_reg_read(E1000_GORCH))<<32);
+
+
+ stats->opackets += pci_reg_read(E1000_GPTC);
+ stats->obytes += pci_reg_read(E1000_GOTCL) ;
+ stats->obytes += ( (((uint64_t)pci_reg_read(IXGBE_GOTCH))<<32) );
+
+ stats->f_ipackets += 0;
+ stats->f_ibytes += 0;
+
+
+ stats->ierrors += ( pci_reg_read(E1000_RNBC) +
+ pci_reg_read(E1000_CRCERRS) +
+ pci_reg_read(E1000_ALGNERRC ) +
+ pci_reg_read(E1000_SYMERRS ) +
+ pci_reg_read(E1000_RXERRC ) +
+
+ pci_reg_read(E1000_ROC)+
+ pci_reg_read(E1000_RUC)+
+ pci_reg_read(E1000_RJC) +
+
+ pci_reg_read(E1000_XONRXC)+
+ pci_reg_read(E1000_XONTXC)+
+ pci_reg_read(E1000_XOFFRXC)+
+ pci_reg_read(E1000_XOFFTXC)+
+ pci_reg_read(E1000_FCRUC)
+ );
+
+ stats->oerrors += 0;
+ stats->imcasts = 0;
+ stats->rx_nombuf = 0;
+
+ m_last_tx_rate = m_bw_tx.add(stats->obytes);
+ m_last_rx_rate = m_bw_rx.add(stats->ibytes);
+ m_last_pps = m_pps_tx.add(stats->opackets);
+
+}
+
+void CPhyEthIF::get_stats(CPhyEthIFStats *stats){
+
+ get_ex_drv()->get_extended_stats(this,stats);
+
+ m_last_tx_rate = m_bw_tx.add(stats->obytes);
+ m_last_rx_rate = m_bw_rx.add(stats->ibytes);
+ m_last_pps = m_pps_tx.add(stats->opackets);
+}
+
+
+void dump_hw_state(FILE *fd,struct ixgbe_hw_stats *hs ){
+
+ #define DP_A1(f) if (hs->f) fprintf(fd," %-40s : %llu \n",#f,hs->f)
+ #define DP_A2(f,m) for (i=0;i<m; i++) { if (hs->f[i]) fprintf(fd," %-40s[%d] : %llu \n",#f,i,hs->f[i]); }
+ int i;
+
+ //for (i=0;i<8; i++) { if (hs->mpc[i]) fprintf(fd," %-40s[%d] : %llu \n","mpc",i,hs->mpc[i]); }
+ DP_A2(mpc,8);
+ DP_A1(crcerrs);
+ DP_A1(illerrc);
+ //DP_A1(errbc);
+ DP_A1(mspdc);
+ DP_A1(mpctotal);
+ DP_A1(mlfc);
+ DP_A1(mrfc);
+ DP_A1(rlec);
+ //DP_A1(lxontxc);
+ //DP_A1(lxonrxc);
+ //DP_A1(lxofftxc);
+ //DP_A1(lxoffrxc);
+ //DP_A2(pxontxc,8);
+ //DP_A2(pxonrxc,8);
+ //DP_A2(pxofftxc,8);
+ //DP_A2(pxoffrxc,8);
+
+ //DP_A1(prc64);
+ //DP_A1(prc127);
+ //DP_A1(prc255);
+ // DP_A1(prc511);
+ //DP_A1(prc1023);
+ //DP_A1(prc1522);
+
+ DP_A1(gprc);
+ DP_A1(bprc);
+ DP_A1(mprc);
+ DP_A1(gptc);
+ DP_A1(gorc);
+ DP_A1(gotc);
+ DP_A2(rnbc,8);
+ DP_A1(ruc);
+ DP_A1(rfc);
+ DP_A1(roc);
+ DP_A1(rjc);
+ DP_A1(mngprc);
+ DP_A1(mngpdc);
+ DP_A1(mngptc);
+ DP_A1(tor);
+ DP_A1(tpr);
+ DP_A1(tpt);
+ DP_A1(ptc64);
+ DP_A1(ptc127);
+ DP_A1(ptc255);
+ DP_A1(ptc511);
+ DP_A1(ptc1023);
+ DP_A1(ptc1522);
+ DP_A1(mptc);
+ DP_A1(bptc);
+ DP_A1(xec);
+ DP_A2(qprc,16)
+ DP_A2(qptc,16);
+ DP_A2(qbrc,16);
+ DP_A2(qbtc,16);
+ DP_A2(qprdc,16);
+ DP_A2(pxon2offc,8);
+ DP_A1(fdirustat_add);
+ DP_A1(fdirustat_remove);
+ DP_A1(fdirfstat_fadd);
+ DP_A1(fdirfstat_fremove);
+ DP_A1(fdirmatch);
+ DP_A1(fdirmiss);
+ DP_A1(fccrc);
+ DP_A1(fclast);
+ DP_A1(fcoerpdc);
+ DP_A1(fcoeprc);
+ DP_A1(fcoeptc);
+ DP_A1(fcoedwrc);
+ DP_A1(fcoedwtc);
+ DP_A1(fcoe_noddp);
+ DP_A1(fcoe_noddp_ext_buff);
+ DP_A1(ldpcec);
+ DP_A1(pcrc8ec);
+ DP_A1(b2ospc);
+ DP_A1(b2ogprc);
+ DP_A1(o2bgptc);
+ DP_A1(o2bspc);
+}
+
+
+void CPhyEthIF::update_counters(){
+ get_stats(&m_stats);
+}
+
+void CPhyEthIF::dump_stats(FILE *fd){
+
+ update_counters();
+
+ fprintf(fd,"port : %d \n",(int)m_port_id);
+ fprintf(fd,"------------\n");
+ m_stats.DumpAll(fd);
+ //m_stats.Dump(fd);
+ printf (" Tx : %.1fMb/sec \n",m_last_tx_rate);
+ //printf (" Rx : %.1fMb/sec \n",m_last_rx_rate);
+}
+
+void CPhyEthIF::stats_clear(){
+ rte_eth_stats_reset(m_port_id);
+ m_stats.Clear();
+}
+
+inline uint16_t CPhyEthIF::tx_burst(uint16_t queue_id,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts){
+ uint16_t ret = rte_eth_tx_burst(m_port_id, queue_id, tx_pkts, nb_pkts);
+ return (ret);
+}
+
+
+inline uint16_t CPhyEthIF::rx_burst(uint16_t queue_id,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts){
+ return (rte_eth_rx_burst(m_port_id, queue_id,
+ rx_pkts, nb_pkts));
+
+}
+
+
+
+
+class CCorePerPort {
+public:
+ CCorePerPort (){
+ m_tx_queue_id=0;
+ m_len=0;
+ int i;
+ for (i=0; i<MAX_PKT_BURST; i++) {
+ m_table[i]=0;
+ }
+ m_port=0;
+ }
+ uint16_t m_tx_queue_id;
+ uint16_t m_len;
+ rte_mbuf_t * m_table[MAX_PKT_BURST];
+ CPhyEthIF * m_port;
+};
+
+
+#define MAX_MBUF_CACHE 100
+
+
+/* per core/gbe queue port for trasmitt */
+class CCoreEthIF : public CVirtualIF {
+
+public:
+
+ CCoreEthIF(){
+ m_mbuf_cache=0;
+ }
+
+public:
+ bool Create(uint8_t core_id,
+ uint16_t tx_client_queue_id,
+ CPhyEthIF * tx_client_port,
+
+ uint16_t tx_server_queue_id,
+ CPhyEthIF * tx_server_port);
+ void Delete();
+
+ virtual int open_file(std::string file_name){
+ return (0);
+ }
+
+ virtual int close_file(void){
+ return (flush_tx_queue());
+ }
+
+ virtual int send_node(CGenNode * node);
+ virtual void send_one_pkt(pkt_dir_t dir, rte_mbuf_t *m);
+
+ virtual int flush_tx_queue(void);
+
+ __attribute__ ((noinline)) void flush_rx_queue();
+ __attribute__ ((noinline)) void update_mac_addr(CGenNode * node,uint8_t *p);
+
+ bool process_rx_pkt(pkt_dir_t dir,rte_mbuf_t * m);
+
+
+public:
+ void GetCoreCounters(CVirtualIFPerSideStats *stats);
+ void DumpCoreStats(FILE *fd);
+ void DumpIfStats(FILE *fd);
+ static void DumpIfCfgHeader(FILE *fd);
+ void DumpIfCfg(FILE *fd);
+
+ socket_id_t get_socket_id(){
+ return ( CGlobalInfo::m_socket.port_to_socket( m_ports[0].m_port->get_port_id() ) );
+ }
+
+private:
+
+ int send_burst(CCorePerPort * lp_port,
+ uint16_t len,
+ CVirtualIFPerSideStats * lp_stats);
+ int send_pkt(CCorePerPort * lp_port,
+ rte_mbuf_t *m,
+ CVirtualIFPerSideStats * lp_stats);
+
+
+
+private:
+ uint8_t m_core_id;
+ uint16_t m_mbuf_cache;
+ CCorePerPort m_ports[CS_NUM]; /* each core has 2 tx queues 1. client side and server side */
+ CNodeRing * m_ring_to_rx;
+};
+
+bool CCoreEthIF::Create(uint8_t core_id,
+ uint16_t tx_client_queue_id,
+ CPhyEthIF * tx_client_port,
+
+ uint16_t tx_server_queue_id,
+ CPhyEthIF * tx_server_port){
+ m_ports[CLIENT_SIDE].m_tx_queue_id = tx_client_queue_id;
+ m_ports[CLIENT_SIDE].m_port = tx_client_port;
+
+ m_ports[SERVER_SIDE].m_tx_queue_id = tx_server_queue_id;
+ m_ports[SERVER_SIDE].m_port = tx_server_port;
+ m_core_id = core_id;
+
+ CMessagingManager * rx_dp=CMsgIns::Ins()->getRxDp();
+ m_ring_to_rx = rx_dp->getRingDpToCp(core_id-1);
+ assert( m_ring_to_rx);
+ return (true);
+}
+
+bool CCoreEthIF::process_rx_pkt(pkt_dir_t dir,
+ rte_mbuf_t * m){
+
+ CSimplePacketParser parser(m);
+ if ( !parser.Parse() ){
+ return false;
+ }
+ bool send=false;
+ if ( parser.IsLatencyPkt() ){
+ send=true;
+
+ }else{
+ if ( get_is_rx_filter_enable() ){
+ uint8_t max_ttl = 0xff - get_rx_check_hops();
+ uint8_t pkt_ttl = parser.getTTl();
+ if ( (pkt_ttl==max_ttl) || (pkt_ttl==(max_ttl-1) ) ) {
+ send=true;
+ }
+ }
+ }
+
+
+ if (send) {
+ CGenNodeLatencyPktInfo * node=(CGenNodeLatencyPktInfo * )CGlobalInfo::create_node();
+ if ( node ) {
+ node->m_msg_type = CGenNodeMsgBase::LATENCY_PKT;
+ node->m_dir = dir;
+ node->m_latency_offset = 0xdead;
+ node->m_pkt = m;
+ if ( m_ring_to_rx->Enqueue((CGenNode*)node)==0 ){
+ }else{
+ CGlobalInfo::free_node((CGenNode *)node);
+ send=false;
+ }
+
+ #ifdef LATENCY_QUEUE_TRACE_
+ printf("rx to cp --\n");
+ rte_pktmbuf_dump(stdout,m, rte_pktmbuf_pkt_len(m));
+ #endif
+ }else{
+ send=false;
+ }
+ }
+ return (send);
+}
+
+
+
+void CCoreEthIF::flush_rx_queue(void){
+ pkt_dir_t dir ;
+ bool is_latency=get_is_latency_thread_enable();
+ for (dir=CLIENT_SIDE; dir<CS_NUM; dir++) {
+ CCorePerPort * lp_port=&m_ports[dir];
+ CPhyEthIF * lp=lp_port->m_port;
+
+ rte_mbuf_t * rx_pkts[32];
+ int j=0;
+
+ while (true) {
+ j++;
+ uint16_t cnt =lp->rx_burst(0,rx_pkts,32);
+ if ( cnt ) {
+ int i;
+ for (i=0; i<(int)cnt;i++) {
+ rte_mbuf_t * m=rx_pkts[i];
+ if ( is_latency ){
+ if (!process_rx_pkt(dir,m)){
+ rte_pktmbuf_free(m);
+ }
+ }else{
+ rte_pktmbuf_free(m);
+ }
+ }
+ }
+ if ((cnt<5) || j>10 ) {
+ break;
+ }
+ }
+ }
+}
+
+int CCoreEthIF::flush_tx_queue(void){
+ /* flush both sides */
+ pkt_dir_t dir ;
+ for (dir=CLIENT_SIDE; dir<CS_NUM; dir++) {
+ CCorePerPort * lp_port=&m_ports[dir];
+ CVirtualIFPerSideStats * lp_stats= &m_stats[dir];
+ if ( likely(lp_port->m_len > 0) ) {
+ send_burst(lp_port,lp_port->m_len,lp_stats);
+ lp_port->m_len = 0;
+ }
+ }
+
+ if ( unlikely( get_vm_one_queue_enable() ) ){
+ /* try drain the rx packets */
+ flush_rx_queue();
+ }
+ return (0);
+}
+
+
+void CCoreEthIF::GetCoreCounters(CVirtualIFPerSideStats *stats){
+ stats->Clear();
+ pkt_dir_t dir ;
+ for (dir=CLIENT_SIDE; dir<CS_NUM; dir++) {
+ stats->Add(&m_stats[dir]);
+ }
+}
+
+void CCoreEthIF::DumpCoreStats(FILE *fd){
+ fprintf (fd,"------------------------ \n");
+ fprintf (fd," per core stats core id : %d \n",m_core_id);
+ fprintf (fd,"------------------------ \n");
+
+ CVirtualIFPerSideStats stats;
+ GetCoreCounters(&stats);
+ stats.Dump(stdout);
+}
+
+void CCoreEthIF::DumpIfCfgHeader(FILE *fd){
+ fprintf (fd," core , c-port, c-queue , s-port, s-queue \n");
+ fprintf (fd," ------------------------------------------\n");
+}
+
+void CCoreEthIF::DumpIfCfg(FILE *fd){
+ fprintf (fd," %d, %u , %u , %u , %u \n",m_core_id,
+ m_ports[CLIENT_SIDE].m_port->get_port_id(),
+ m_ports[CLIENT_SIDE].m_tx_queue_id,
+ m_ports[SERVER_SIDE].m_port->get_port_id(),
+ m_ports[SERVER_SIDE].m_tx_queue_id
+ );
+}
+
+
+void CCoreEthIF::DumpIfStats(FILE *fd){
+
+ fprintf (fd,"------------------------ \n");
+ fprintf (fd," per core per if stats id : %d \n",m_core_id);
+ fprintf (fd,"------------------------ \n");
+
+ const char * t[]={"client","server"};
+ pkt_dir_t dir ;
+ for (dir=CLIENT_SIDE; dir<CS_NUM; dir++) {
+ CCorePerPort * lp=&m_ports[dir];
+ CVirtualIFPerSideStats * lpstats = &m_stats[dir];
+ fprintf (fd," port %d, queue id :%d - %s \n",lp->m_port->get_port_id(),lp->m_tx_queue_id,t[dir] );
+ fprintf (fd," ---------------------------- \n");
+ lpstats->Dump(fd);
+ }
+}
+
+#define DELAY_IF_NEEDED
+
+int CCoreEthIF::send_burst(CCorePerPort * lp_port,
+ uint16_t len,
+ CVirtualIFPerSideStats * lp_stats){
+
+ uint16_t ret = lp_port->m_port->tx_burst(lp_port->m_tx_queue_id,lp_port->m_table,len);
+ #ifdef DELAY_IF_NEEDED
+ while ( unlikely( ret<len ) ){
+ rte_delay_us(1);
+ //rte_pause();
+ //rte_pause();
+ lp_stats->m_tx_queue_full += 1;
+ uint16_t ret1=lp_port->m_port->tx_burst(lp_port->m_tx_queue_id,
+ &lp_port->m_table[ret],
+ len-ret);
+ ret+=ret1;
+ }
+ #endif
+
+ /* CPU has burst of packets , more that TX can send need to drop them !!*/
+ if ( unlikely(ret < len) ) {
+ lp_stats->m_tx_drop += (len-ret);
+ uint16_t i;
+ for (i=ret; i<len;i++) {
+ rte_mbuf_t * m=lp_port->m_table[i];
+ rte_pktmbuf_free(m);
+ }
+ }
+}
+
+
+int CCoreEthIF::send_pkt(CCorePerPort * lp_port,
+ rte_mbuf_t *m,
+ CVirtualIFPerSideStats * lp_stats
+ ){
+
+ lp_stats->m_tx_pkt +=1;
+ lp_stats->m_tx_bytes += (rte_pktmbuf_pkt_len(m)+4);
+
+ uint16_t len = lp_port->m_len;
+ lp_port->m_table[len]=m;
+ len++;
+ /* enough pkts to be sent */
+ if (unlikely(len == MAX_PKT_BURST)) {
+ send_burst(lp_port, MAX_PKT_BURST,lp_stats);
+ len = 0;
+ }
+ lp_port->m_len = len;
+}
+
+
+
+void CCoreEthIF::send_one_pkt(pkt_dir_t dir,
+ rte_mbuf_t *m){
+ CCorePerPort * lp_port=&m_ports[dir];
+ CVirtualIFPerSideStats * lp_stats = &m_stats[dir];
+ send_pkt(lp_port,m,lp_stats);
+ /* flush */
+ send_burst(lp_port,lp_port->m_len,lp_stats);
+ lp_port->m_len = 0;
+}
+
+
+void CCoreEthIF::update_mac_addr(CGenNode * node,uint8_t *p){
+
+ if ( CGlobalInfo::m_options.preview.getDestMacSplit() ) {
+ p[5]+= (node->m_src_ip % CGlobalInfo::m_options.m_mac_splitter);
+ }
+
+ if ( unlikely( CGlobalInfo::m_options.preview.get_mac_ip_mapping_enable() ) ) {
+ /* mac mapping file is configured
+ */
+ if (node->m_src_mac.inused==INUSED) {
+ memcpy(p+6, &node->m_src_mac.mac, sizeof(uint8_t)*6);
+ }
+ } else if ( unlikely( CGlobalInfo::m_options.preview.get_mac_ip_overide_enable() ) ){
+ /* client side */
+ if ( node->is_initiator_pkt() ){
+ *((uint32_t*)(p+6))=PKT_NTOHL(node->m_src_ip);
+ }
+ }
+}
+
+
+int CCoreEthIF::send_node(CGenNode * node){
+
+ if ( unlikely( node->get_cache_mbuf() !=NULL ) ) {
+ pkt_dir_t dir;
+ rte_mbuf_t * m=node->get_cache_mbuf();
+ dir=(pkt_dir_t)node->get_mbuf_cache_dir();
+ CCorePerPort * lp_port=&m_ports[dir];
+ CVirtualIFPerSideStats * lp_stats = &m_stats[dir];
+ rte_pktmbuf_refcnt_update(m,1);
+ send_pkt(lp_port,m,lp_stats);
+ return (0);
+ }
+
+
+ CFlowPktInfo * lp=node->m_pkt_info;
+ rte_mbuf_t * m=lp->generate_new_mbuf(node);
+
+ pkt_dir_t dir;
+ bool single_port;
+
+ dir = node->cur_interface_dir();
+ single_port = node->get_is_all_flow_from_same_dir() ;
+
+ if ( unlikely( CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) ){
+ /* which vlan to choose 0 or 1*/
+ uint8_t vlan_port = (node->m_src_ip &1);
+
+ /* set the vlan */
+ m->ol_flags = PKT_TX_VLAN_PKT;
+ m->l2_len =14;
+ uint16_t vlan_id = CGlobalInfo::m_options.m_vlan_port[vlan_port];
+
+
+ if (likely( vlan_id >0 ) ) {
+ m->vlan_tci = vlan_id;
+ dir = dir ^ vlan_port;
+ }else{
+ /* both from the same dir but with VLAN0 */
+ m->vlan_tci = CGlobalInfo::m_options.m_vlan_port[0];
+ dir = dir ^ 0;
+ }
+ }
+
+ CCorePerPort * lp_port=&m_ports[dir];
+ CVirtualIFPerSideStats * lp_stats = &m_stats[dir];
+
+ if (unlikely(m==0)) {
+ lp_stats->m_tx_alloc_error++;
+ return(0);
+ }
+
+ /* update mac addr dest/src 12 bytes */
+ uint8_t *p=rte_pktmbuf_mtod(m, uint8_t*);
+ uint8_t p_id=lp_port->m_port->get_port_id();
+
+
+ memcpy(p,CGlobalInfo::m_options.get_dst_src_mac_addr(p_id),12);
+
+ /* if customer enables both mac_file and get_mac_ip_overide,
+ * we will apply mac_file.
+ */
+ if ( unlikely(CGlobalInfo::m_options.preview.get_mac_ip_features_enable() ) ) {
+ update_mac_addr(node,p);
+ }
+
+ if ( unlikely( node->is_rx_check_enabled() ) ) {
+ lp_stats->m_tx_rx_check_pkt++;
+ lp->do_generate_new_mbuf_rxcheck(m,node,dir,single_port);
+ lp_stats->m_template.inc_template( node->get_template_id( ));
+ }else{
+ // cache only if it is not sample as this is more complex mbuf struct
+ if ( unlikely( node->can_cache_mbuf() ) ) {
+ if ( !CGlobalInfo::m_options.preview.isMbufCacheDisabled() ){
+ m_mbuf_cache++;
+ if (m_mbuf_cache < MAX_MBUF_CACHE) {
+ /* limit the number of object to cache */
+ node->set_mbuf_cache_dir( dir);
+ node->set_cache_mbuf(m);
+ rte_pktmbuf_refcnt_update(m,1);
+ }
+ }
+ }
+ }
+
+ /*printf("send packet -- \n");
+ rte_pktmbuf_dump(stdout,m, rte_pktmbuf_pkt_len(m));*/
+
+ /* send the packet */
+ send_pkt(lp_port,m,lp_stats);
+ return (0);
+}
+
+
+
+class CLatencyHWPort : public CPortLatencyHWBase {
+public:
+ void Create(CPhyEthIF * p,
+ uint8_t tx_queue,
+ uint8_t rx_queue){
+ m_port=p;
+ m_tx_queue_id=tx_queue;
+ m_rx_queue_id=rx_queue;
+ }
+
+ virtual int tx(rte_mbuf_t * m){
+ rte_mbuf_t * tx_pkts[2];
+ tx_pkts[0]=m;
+ if ( likely( CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) ){
+ /* vlan mode is the default */
+ /* set the vlan */
+ m->ol_flags = PKT_TX_VLAN_PKT;
+ m->vlan_tci =CGlobalInfo::m_options.m_vlan_port[0];
+ m->l2_len =14;
+ }
+ uint16_t res=m_port->tx_burst(m_tx_queue_id,tx_pkts,1);
+ if ( res == 0 ) {
+ rte_pktmbuf_free(m);
+ //printf(" queue is full for latency packet !!\n");
+ return (-1);
+
+ }
+ #if 0
+ fprintf(stdout," ==> %f.03 send packet ..\n",now_sec());
+ uint8_t *p1=rte_pktmbuf_mtod(m, uint8_t*);
+ uint16_t pkt_size1=rte_pktmbuf_pkt_len(m);
+ utl_DumpBuffer(stdout,p1,pkt_size1,0);
+ #endif
+
+ return (0);
+ }
+ virtual rte_mbuf_t * rx(){
+ rte_mbuf_t * rx_pkts[1];
+ uint16_t cnt=m_port->rx_burst(m_rx_queue_id,rx_pkts,1);
+ if (cnt) {
+ return (rx_pkts[0]);
+ }else{
+ return (0);
+ }
+ }
+
+ virtual uint16_t rx_burst(struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts){
+ uint16_t cnt=m_port->rx_burst(m_rx_queue_id,rx_pkts,nb_pkts);
+ return (cnt);
+ }
+
+
+private:
+ CPhyEthIF * m_port;
+ uint8_t m_tx_queue_id ;
+ uint8_t m_rx_queue_id;
+};
+
+
+class CLatencyVmPort : public CPortLatencyHWBase {
+public:
+ void Create(uint8_t port_index,CNodeRing * ring,
+ CLatencyManager * mgr){
+ m_dir = (port_index%2);
+ m_ring_to_dp = ring;
+ m_mgr = mgr;
+ }
+
+ virtual int tx(rte_mbuf_t * m){
+ if ( likely( CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) ){
+ /* vlan mode is the default */
+ /* set the vlan */
+ m->ol_flags = PKT_TX_VLAN_PKT;
+ m->vlan_tci =CGlobalInfo::m_options.m_vlan_port[0];
+ m->l2_len =14;
+ }
+
+ /* allocate node */
+ CGenNodeLatencyPktInfo * node=(CGenNodeLatencyPktInfo * )CGlobalInfo::create_node();
+ if ( node ) {
+ node->m_msg_type = CGenNodeMsgBase::LATENCY_PKT;
+ node->m_dir = m_dir;
+ node->m_pkt = m;
+ node->m_latency_offset = m_mgr->get_latency_header_offset();
+
+ if ( m_ring_to_dp->Enqueue((CGenNode*)node) ==0 ){
+ return (0);
+ }
+ }
+ return (-1);
+ }
+
+ virtual rte_mbuf_t * rx(){
+ return (0);
+ }
+
+ virtual uint16_t rx_burst(struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts){
+ return (0);
+ }
+
+
+private:
+ uint8_t m_dir;
+ CNodeRing * m_ring_to_dp; /* ring dp -> latency thread */
+ CLatencyManager * m_mgr;
+};
+
+
+class CZMqPublisher {
+public:
+ CZMqPublisher(){
+ m_context=0;
+ m_publisher=0;
+ }
+
+ bool Create(uint16_t port,bool disable);
+ void Delete();
+ void publish_json(std::string & s);
+private:
+ void show_zmq_last_error(char *s);
+private:
+ void * m_context;
+ void * m_publisher;
+};
+
+void CZMqPublisher::show_zmq_last_error(char *s){
+ printf(" ERROR %s \n",s);
+ printf(" ZMQ: %s",zmq_strerror (zmq_errno ()));
+ exit(-1);
+}
+
+
+bool CZMqPublisher::Create(uint16_t port,bool disable){
+
+ if (disable) {
+ return(true);
+ }
+ m_context = zmq_ctx_new ();
+ if ( m_context == 0 ) {
+ show_zmq_last_error((char *)"can't connect to ZMQ library");
+ }
+ m_publisher = zmq_socket (m_context, ZMQ_PUB);
+ if ( m_context == 0 ) {
+ show_zmq_last_error((char *)"can't create ZMQ socket");
+ }
+ char buffer[100];
+ sprintf(buffer,"tcp://*:%d",port);
+ int rc=zmq_bind (m_publisher, buffer);
+ if (rc != 0 ) {
+ sprintf(buffer,"can't bind to ZMQ socket %d",port);
+ show_zmq_last_error(buffer);
+ }
+ printf("zmq publisher at: %s \n",buffer);
+ return (true);
+}
+
+
+void CZMqPublisher::Delete(){
+ if (m_publisher) {
+ zmq_close (m_publisher);
+ }
+ if (m_context) {
+ zmq_ctx_destroy (m_context);
+ }
+}
+
+
+void CZMqPublisher::publish_json(std::string & s){
+ if ( m_publisher ){
+ int size = zmq_send (m_publisher, s.c_str(), s.length(), 0);
+ assert(size==s.length());
+ }
+}
+
+class CPerPortStats {
+public:
+ uint64_t opackets;
+ uint64_t obytes;
+ uint64_t ipackets;
+ uint64_t ibytes;
+ uint64_t ierrors;
+ uint64_t oerrors;
+
+ float m_total_tx_bps;
+};
+
+class CGlobalStats {
+public:
+ enum DumpFormat {
+ dmpSTANDARD,
+ dmpTABLE
+ };
+
+ uint64_t m_total_tx_pkts;
+ uint64_t m_total_rx_pkts;
+ uint64_t m_total_tx_bytes;
+ uint64_t m_total_rx_bytes;
+
+ uint64_t m_total_alloc_error;
+ uint64_t m_total_queue_full;
+ uint64_t m_total_queue_drop;
+
+ uint64_t m_total_clients;
+ uint64_t m_total_servers;
+ uint64_t m_active_sockets;
+
+ uint64_t m_total_nat_time_out;
+ uint64_t m_total_nat_no_fid ;
+ uint64_t m_total_nat_active ;
+ uint64_t m_total_nat_open ;
+ uint64_t m_total_nat_learn_error ;
+
+ CPerTxthreadTemplateInfo m_template;
+
+ float m_socket_util;
+
+ float m_platform_factor;
+ float m_tx_bps;
+ float m_rx_bps;
+ float m_tx_pps;
+ float m_tx_cps;
+ float m_tx_expected_cps;
+ float m_tx_expected_pps;
+ float m_tx_expected_bps;
+ float m_rx_drop_bps;
+ float m_active_flows;
+ float m_open_flows;
+ float m_cpu_util;
+ uint8_t m_threads;
+
+ uint32_t m_num_of_ports;
+ CPerPortStats m_port[BP_MAX_PORTS];
+public:
+ void Dump(FILE *fd,DumpFormat mode);
+ void DumpAllPorts(FILE *fd);
+ void dump_json(std::string & json);
+private:
+ std::string get_field(std::string name,float &f);
+ std::string get_field(std::string name,uint64_t &f);
+ std::string get_field_port(int port,std::string name,float &f);
+ std::string get_field_port(int port,std::string name,uint64_t &f);
+
+};
+
+std::string CGlobalStats::get_field(std::string name,float &f){
+ char buff[200];
+ sprintf(buff,"\"%s\":%.1f,",name.c_str(),f);
+ return (std::string(buff));
+}
+
+std::string CGlobalStats::get_field(std::string name,uint64_t &f){
+ char buff[200];
+ sprintf(buff,"\"%s\":%llu,",name.c_str(),f);
+ return (std::string(buff));
+}
+
+std::string CGlobalStats::get_field_port(int port,std::string name,float &f){
+ char buff[200];
+ sprintf(buff,"\"%s-%d\":%.1f,",name.c_str(),port,f);
+ return (std::string(buff));
+}
+
+std::string CGlobalStats::get_field_port(int port,std::string name,uint64_t &f){
+ char buff[200];
+ sprintf(buff,"\"%s-%d\":%llu,",name.c_str(),port,f);
+ return (std::string(buff));
+}
+
+
+void CGlobalStats::dump_json(std::string & json){
+ json="{\"name\":\"trex-global\",\"type\":0,\"data\":{";
+
+ #define GET_FIELD(f) get_field(std::string(#f),f)
+ #define GET_FIELD_PORT(p,f) get_field_port(p,std::string(#f),lp->f)
+
+ json+=GET_FIELD(m_cpu_util);
+ json+=GET_FIELD(m_platform_factor);
+ json+=GET_FIELD(m_tx_bps);
+ json+=GET_FIELD(m_rx_bps);
+ json+=GET_FIELD(m_tx_pps);
+ json+=GET_FIELD(m_tx_cps);
+ json+=GET_FIELD(m_tx_expected_cps);
+ json+=GET_FIELD(m_tx_expected_pps);
+ json+=GET_FIELD(m_tx_expected_bps);
+ json+=GET_FIELD(m_rx_drop_bps);
+ json+=GET_FIELD(m_active_flows);
+ json+=GET_FIELD(m_open_flows);
+
+ json+=GET_FIELD(m_total_tx_pkts);
+ json+=GET_FIELD(m_total_rx_pkts);
+ json+=GET_FIELD(m_total_tx_bytes);
+ json+=GET_FIELD(m_total_rx_bytes);
+
+ json+=GET_FIELD(m_total_clients);
+ json+=GET_FIELD(m_total_servers);
+ json+=GET_FIELD(m_active_sockets);
+ json+=GET_FIELD(m_socket_util);
+
+ json+=GET_FIELD(m_total_nat_time_out);
+ json+=GET_FIELD(m_total_nat_no_fid );
+ json+=GET_FIELD(m_total_nat_active );
+ json+=GET_FIELD(m_total_nat_open );
+ json+=GET_FIELD(m_total_nat_learn_error);
+
+ int i;
+ for (i=0; i<(int)m_num_of_ports; i++) {
+ CPerPortStats * lp=&m_port[i];
+ json+=GET_FIELD_PORT(i,opackets) ;
+ json+=GET_FIELD_PORT(i,obytes) ;
+ json+=GET_FIELD_PORT(i,ipackets) ;
+ json+=GET_FIELD_PORT(i,ibytes) ;
+ json+=GET_FIELD_PORT(i,ierrors) ;
+ json+=GET_FIELD_PORT(i,oerrors) ;
+ json+=GET_FIELD_PORT(i,m_total_tx_bps);
+ }
+ json+=m_template.dump_as_json("template");
+ json+="\"unknown\":0}}" ;
+}
+
+void CGlobalStats::DumpAllPorts(FILE *fd){
+
+ //fprintf (fd," Total-Tx-Pkts : %s \n",double_to_human_str((double)m_total_tx_pkts,"pkts",KBYE_1000).c_str());
+ //fprintf (fd," Total-Rx-Pkts : %s \n",double_to_human_str((double)m_total_rx_pkts,"pkts",KBYE_1000).c_str());
+
+ //fprintf (fd," Total-Tx-Bytes : %s \n",double_to_human_str((double)m_total_tx_bytes,"bytes",KBYE_1000).c_str());
+ //fprintf (fd," Total-Rx-Bytes : %s \n",double_to_human_str((double)m_total_rx_bytes,"bytes",KBYE_1000).c_str());
+
+
+
+ fprintf (fd," Cpu Utilization : %2.1f %% %2.1f Gb/core \n",m_cpu_util,(2*(m_tx_bps/1e9)*100.0/(m_cpu_util*m_threads)));
+ fprintf (fd," Platform_factor : %2.1f \n",m_platform_factor);
+ fprintf (fd," Total-Tx : %s ",double_to_human_str(m_tx_bps,"bps",KBYE_1000).c_str());
+ if ( CGlobalInfo::is_learn_mode() ) {
+ fprintf (fd," Nat_time_out : %8llu \n",m_total_nat_time_out);
+ }else{
+ fprintf (fd,"\n");
+ }
+
+
+ fprintf (fd," Total-Rx : %s ",double_to_human_str(m_rx_bps,"bps",KBYE_1000).c_str());
+ if ( CGlobalInfo::is_learn_mode() ) {
+ fprintf (fd," Nat_no_fid : %8llu \n",m_total_nat_no_fid);
+ }else{
+ fprintf (fd,"\n");
+ }
+
+ fprintf (fd," Total-PPS : %s ",double_to_human_str(m_tx_pps,"pps",KBYE_1000).c_str());
+ if ( CGlobalInfo::is_learn_mode() ) {
+ fprintf (fd," Total_nat_active: %8llu \n",m_total_nat_active);
+ }else{
+ fprintf (fd,"\n");
+ }
+
+ fprintf (fd," Total-CPS : %s ",double_to_human_str(m_tx_cps,"cps",KBYE_1000).c_str());
+ if ( CGlobalInfo::is_learn_mode() ) {
+ fprintf (fd," Total_nat_open : %8llu \n",m_total_nat_open);
+ }else{
+ fprintf (fd,"\n");
+ }
+ fprintf (fd,"\n");
+ fprintf (fd," Expected-PPS : %s ",double_to_human_str(m_tx_expected_pps,"pps",KBYE_1000).c_str());
+ if ( CGlobalInfo::is_learn_verify_mode() ) {
+ fprintf (fd," Nat_learn_errors: %8llu \n",m_total_nat_learn_error);
+ }else{
+ fprintf (fd,"\n");
+ }
+ fprintf (fd," Expected-CPS : %s \n",double_to_human_str(m_tx_expected_cps,"cps",KBYE_1000).c_str());
+ fprintf (fd," Expected-BPS : %s \n",double_to_human_str(m_tx_expected_bps,"bps",KBYE_1000).c_str());
+ fprintf (fd,"\n");
+ fprintf (fd," Active-flows : %8llu Clients : %8llu Socket-util : %3.4f %% \n",(uint64_t)m_active_flows,m_total_clients,m_socket_util);
+ fprintf (fd," Open-flows : %8llu Servers : %8llu Socket : %8llu Socket/Clients : %.1f \n",
+ (uint64_t)m_open_flows,
+ m_total_servers,
+ m_active_sockets,
+ (float)m_active_sockets/(float)m_total_clients);
+
+ if (m_total_alloc_error) {
+ fprintf (fd," Total_alloc_err : %llu \n",(uint64_t)m_total_alloc_error);
+ }
+ if ( m_total_queue_full ){
+ fprintf (fd," Total_queue_full : %llu \n",(uint64_t)m_total_queue_full);
+ }
+ if (m_total_queue_drop) {
+ fprintf (fd," Total_queue_drop : %llu \n",(uint64_t)m_total_queue_drop);
+ }
+
+ //m_template.Dump(fd);
+
+ fprintf (fd," drop-rate : %s \n",double_to_human_str(m_rx_drop_bps,"bps",KBYE_1000).c_str() );
+}
+
+
+void CGlobalStats::Dump(FILE *fd,DumpFormat mode){
+ int i;
+ int port_to_show=m_num_of_ports;
+ if (port_to_show>4) {
+ port_to_show=4;
+ fprintf (fd," per port - limited to 4 \n");
+ }
+
+
+ if ( mode== dmpSTANDARD ){
+ fprintf (fd," --------------- \n");
+ for (i=0; i<(int)port_to_show; i++) {
+ CPerPortStats * lp=&m_port[i];
+ fprintf(fd,"port : %d \n",(int)i);
+ fprintf(fd,"------------\n");
+ #define GS_DP_A4(f) fprintf(fd," %-40s : %llu \n",#f,lp->f)
+ #define GS_DP_A(f) if (lp->f) fprintf(fd," %-40s : %llu \n",#f,lp->f)
+ GS_DP_A4(opackets);
+ GS_DP_A4(obytes);
+ GS_DP_A4(ipackets);
+ GS_DP_A4(ibytes);
+ GS_DP_A(ierrors);
+ GS_DP_A(oerrors);
+ fprintf (fd," Tx : %s \n",double_to_human_str((double)lp->m_total_tx_bps,"bps",KBYE_1000).c_str());
+ }
+ }else{
+ fprintf(fd," %10s ","ports");
+ for (i=0; i<(int)port_to_show; i++) {
+ fprintf(fd,"| %15d ",i);
+ }
+ fprintf(fd,"\n");
+ fprintf(fd," -----------------------------------------------------------------------------------------\n");
+ std::string names[]={"opackets","obytes","ipackets","ibytes","ierrors","oerrors","Tx Bw"
+ };
+ for (i=0; i<7; i++) {
+ fprintf(fd," %10s ",names[i].c_str());
+ int j=0;
+ for (j=0; j<port_to_show;j++) {
+ CPerPortStats * lp=&m_port[j];
+ uint64_t cnt;
+ switch (i) {
+ case 0:
+ cnt=lp->opackets;
+ fprintf(fd,"| %15lu ",cnt);
+
+ break;
+ case 1:
+ cnt=lp->obytes;
+ fprintf(fd,"| %15lu ",cnt);
+
+ break;
+ case 2:
+ cnt=lp->ipackets;
+ fprintf(fd,"| %15lu ",cnt);
+
+ break;
+ case 3:
+ cnt=lp->ibytes;
+ fprintf(fd,"| %15lu ",cnt);
+
+ break;
+ case 4:
+ cnt=lp->ierrors;
+ fprintf(fd,"| %15lu ",cnt);
+
+ break;
+ case 5:
+ cnt=lp->oerrors;
+ fprintf(fd,"| %15lu ",cnt);
+
+ break;
+ case 6:
+ fprintf(fd,"| %15s ",double_to_human_str((double)lp->m_total_tx_bps,"bps",KBYE_1000).c_str());
+ break;
+ default:
+ cnt=0xffffff;
+ }
+ } /* ports */
+ fprintf(fd, "\n");
+ }/* fields*/
+ }
+
+
+}
+
+
+
+
+
+
+
+struct CGlobalPortCfg {
+
+public:
+ CGlobalPortCfg (){
+ m_max_ports=4;
+ m_max_cores=1;
+ m_cores_to_dual_ports=0;
+ m_max_queues_per_port=0;
+ m_test =NULL;
+ m_fl_was_init=false;
+ m_expected_pps=0.0;
+ m_expected_cps=0.0;
+ m_expected_bps=0.0;
+ }
+public:
+
+ bool Create();
+ void Delete();
+
+ int ixgbe_prob_init();
+ int cores_prob_init();
+ int queues_prob_init();
+ int ixgbe_start();
+ int ixgbe_rx_queue_flush();
+ int ixgbe_configure_mg();
+
+
+ bool is_all_links_are_up(bool dump=false);
+ int set_promisc_all(bool enable);
+
+ int reset_counters();
+
+
+public:
+ int start_send_master();
+ int run_in_core(virtual_thread_id_t virt_core_id);
+ int stop_core(virtual_thread_id_t virt_core_id);
+
+ int core_for_latency(){
+ if ( (!get_is_latency_thread_enable()) ){
+ return (-1);
+ }else{
+ return ( m_max_cores - 1 );
+ }
+
+ }
+
+ int run_in_laterncy_core();
+
+ int run_in_master();
+ int stop_master();
+
+
+ /* return the minimum number of dp cores need to support the active ports
+ this is for c==1 or m_cores_mul==1
+ */
+ int get_base_num_cores(){
+ return (m_max_ports>>1);
+ }
+
+ int get_cores_tx(){
+ /* 0 - master
+ num_of_cores -
+
+
+ last for latency */
+ if ( (!get_is_latency_thread_enable()) ){
+ return (m_max_cores - 1 );
+ }else{
+ return (m_max_cores - BP_MASTER_AND_LATENCY );
+ }
+ }
+
+
+
+
+public:
+ int test_send();
+
+
+
+ int test_send1();
+ int rcv_send(int port,int queue_id);
+ int rcv_send_all(int queue_id);
+
+private:
+ bool is_all_cores_finished();
+
+ int test_send_pkts(uint16_t queue_id,
+ int pkt,
+ int port);
+
+
+ int create_pkt(uint8_t *pkt,int pkt_size);
+ int create_udp_pkt();
+ int create_sctp_pkt();
+
+
+
+public:
+ void dump_stats(FILE *fd,
+ std::string & json,CGlobalStats::DumpFormat format);
+
+ void dump_template_info(std::string & json);
+
+ bool sanity_check();
+
+ void update_stats(void);
+ void get_stats(CGlobalStats & stats);
+
+
+ void dump_post_test_stats(FILE *fd);
+
+ void dump_config(FILE *fd);
+
+public:
+ port_cfg_t m_port_cfg;
+
+ /*
+ exaple1 :
+ req=4 ,m_max_ports =4 ,c=1 , l=1
+
+ ==>
+ m_max_cores = 4/2+1+1 =4;
+ m_cores_mul = 1
+
+
+ */
+
+ uint32_t m_max_ports; /* active number of ports supported options are 2,4,8,10,12 */
+ uint32_t m_max_cores; /* current number of cores , include master and latency ==> ( master)1+c*(m_max_ports>>1)+1( latency ) */
+ uint32_t m_cores_mul; /* how cores multipler given c=4 ==> m_cores_mul */
+
+ uint32_t m_max_queues_per_port;
+ uint32_t m_cores_to_dual_ports; /* number of ports that will handle dual ports */
+ uint16_t m_latency_tx_queue_id;
+
+ // statistic
+ CPPSMeasure m_cps;
+ float m_expected_pps;
+ float m_expected_cps;
+ float m_expected_bps;//bps
+ float m_last_total_cps;
+
+
+
+ CPhyEthIF m_ports[BP_MAX_PORTS];
+ CCoreEthIF m_cores_vif[BP_MAX_CORES]; /* counted from 1 , 2,3 core zero is reserve*/
+
+
+ CParserOption m_po ;
+ CFlowGenList m_fl;
+ bool m_fl_was_init;
+
+ volatile uint8_t m_signal[BP_MAX_CORES] __rte_cache_aligned ;
+
+ CLatencyManager m_mg;
+ CTrexGlobalIoMode m_io_modes;
+
+private:
+
+private:
+ rte_mbuf_t * m_test;
+ uint64_t m_test_drop;
+
+ CLatencyHWPort m_latency_vports[BP_MAX_PORTS]; /* read hardware driver */
+ CLatencyVmPort m_latency_vm_vports[BP_MAX_PORTS]; /* vm driver */
+
+ CLatencyPktInfo m_latency_pkt;
+ CZMqPublisher m_zmq_publisher;
+};
+
+
+
+int CGlobalPortCfg::test_send1(){
+
+ CParserOption po ;
+ CFlowGenList fl;
+
+ po.cfg_file = "cap2/dns.yaml";
+ //po.cfg_file = "cap2/sfr3.yaml";
+ //po.cfg_file = "cap2/sfr4.yaml";
+ //po.cfg_file = "cap2/sfr.yaml";
+
+ po.preview.setVMode(3);
+ po.preview.setFileWrite(true);
+
+ fl.Create();
+
+ fl.load_from_yaml(po.cfg_file,1);
+ //fl.DumpPktSize();
+
+ fl.generate_p_thread_info(1);
+ CFlowGenListPerThread * lpt;
+
+ int i;
+ for (i=0; i<1; i++) {
+ lpt = fl.m_threads_info[i];
+ //CNullIF * erf_vif = new CNullIF();
+ CVirtualIF * erf_vif = &m_cores_vif[0];
+ lpt->set_vif(erf_vif);
+ lpt->generate_erf("hey",po.preview);
+ lpt->m_node_gen.DumpHist(stdout);
+ lpt->DumpStats(stdout);
+ }
+
+ m_cores_vif[0].flush_tx_queue();
+ delay(1000);
+ //fprintf(stdout," drop : %llu \n",m_test_drop);
+
+ m_cores_vif[0].DumpCoreStats(stdout);
+ m_cores_vif[0].DumpIfStats(stdout);
+
+ fl.Delete();
+}
+
+
+int CGlobalPortCfg::rcv_send(int port,int queue_id){
+
+ CPhyEthIF * lp=&m_ports[port];
+ rte_mbuf_t * rx_pkts[32];
+ printf(" test rx port:%d queue:%d \n",port,queue_id);
+ printf(" --------------\n");
+ uint16_t cnt=lp->rx_burst(queue_id,rx_pkts,32);
+
+ int i;
+ for (i=0; i<(int)cnt;i++) {
+ rte_mbuf_t * m=rx_pkts[i];
+ int pkt_size=rte_pktmbuf_pkt_len(m);
+ char *p=rte_pktmbuf_mtod(m, char*);
+ utl_DumpBuffer(stdout,p,pkt_size,0);
+ rte_pktmbuf_free(m);
+ }
+ return (0);
+}
+
+int CGlobalPortCfg::rcv_send_all(int queue_id){
+ int i;
+ for (i=0; i<m_max_ports; i++) {
+ rcv_send(i,queue_id);
+ }
+ return (0);
+}
+
+
+
+
+int CGlobalPortCfg::test_send(){
+ int i;
+
+ CPhyEthIF * lp=&m_ports[0];
+
+ //set_promisc_all(true);
+ //create_sctp_pkt();
+ create_udp_pkt();
+
+ CRx_check_header rx_check_header;
+ rx_check_header.m_time_stamp=0x1234567;
+ rx_check_header.m_option_type=RX_CHECK_V4_OPT_TYPE;
+ rx_check_header.m_option_len=RX_CHECK_V4_OPT_LEN;
+ rx_check_header.m_magic=2;
+ rx_check_header.m_pkt_id=7;
+ rx_check_header.m_flow_id=9;
+ rx_check_header.m_flags=11;
+
+
+ assert(m_test);
+ for (i=0; i<1; i++) {
+ //test_send_pkts(0,1,0);
+ //test_send_pkts(m_latency_tx_queue_id,12,0);
+ //test_send_pkts(m_latency_tx_queue_id,1,1);
+ //test_send_pkts(m_latency_tx_queue_id,1,2);
+ //test_send_pkts(m_latency_tx_queue_id,1,3);
+ test_send_pkts(0,1,0);
+ test_send_pkts(0,1,1);
+ //test_send_pkts(2,1,0);
+
+
+ //test_send_pkts(0,1,1);
+ //test_send_pkts(0,1,2);
+ //test_send_pkts(0,1,3);
+
+ /*test_send_pkts(2,1,0);
+ test_send_pkts(2,1,1);
+ test_send_pkts(2,1,2);
+ test_send_pkts(2,1,3);*/
+
+ /*delay(1000);
+ fprintf(stdout," --------------------------------\n");
+ fprintf(stdout," after sending to port %d \n",i);
+ fprintf(stdout," --------------------------------\n");
+ dump_stats(stdout);
+ fprintf(stdout," --------------------------------\n");*/
+ }
+ //test_send_pkts(m_latency_tx_queue_id,1,1);
+ //test_send_pkts(m_latency_tx_queue_id,1,2);
+ //test_send_pkts(m_latency_tx_queue_id,1,3);
+
+
+ printf(" ---------\n");
+ printf(" rx queue 0 \n");
+ printf(" ---------\n");
+ rcv_send_all(0);
+ printf("\n\n");
+
+ printf(" ---------\n");
+ printf(" rx queue 1 \n");
+ printf(" ---------\n");
+ rcv_send_all(1);
+ printf(" ---------\n");
+
+ delay(1000);
+
+ #if 1
+ int j=0;
+ for (j=0; j<m_max_ports; j++) {
+ CPhyEthIF * lp=&m_ports[j];
+ printf(" port : %d \n",j);
+ printf(" ----------\n");
+
+ lp->update_counters();
+ lp->get_stats().Dump(stdout);
+ lp->dump_stats_extended(stdout);
+ }
+ /*for (j=0; j<4; j++) {
+ CPhyEthIF * lp=&m_ports[j];
+ lp->dump_stats_extended(stdout);
+ }*/
+ #endif
+
+ fprintf(stdout," drop : %llu \n",m_test_drop);
+ return (0);
+}
+
+
+
+const uint8_t udp_pkt[]={
+ 0x00,0x00,0x00,0x01,0x00,0x00,
+ 0x00,0x00,0x00,0x01,0x00,0x00,
+ 0x08,0x00,
+
+ 0x45,0x00,0x00,0x81,
+ 0xaf,0x7e,0x00,0x00,
+ 0x12,0x11,0xd9,0x23,
+ 0x01,0x01,0x01,0x01,
+ 0x3d,0xad,0x72,0x1b,
+
+ 0x11,0x11,
+ 0x11,0x11,
+
+ 0x00,0x6d,
+ 0x00,0x00,
+
+ 0x64,0x31,0x3a,0x61,
+ 0x64,0x32,0x3a,0x69,0x64,
+ 0x32,0x30,0x3a,0xd0,0x0e,
+ 0xa1,0x4b,0x7b,0xbd,0xbd,
+ 0x16,0xc6,0xdb,0xc4,0xbb,0x43,
+ 0xf9,0x4b,0x51,0x68,0x33,0x72,
+ 0x20,0x39,0x3a,0x69,0x6e,0x66,0x6f,
+ 0x5f,0x68,0x61,0x73,0x68,0x32,0x30,0x3a,0xee,0xc6,0xa3,
+ 0xd3,0x13,0xa8,0x43,0x06,0x03,0xd8,0x9e,0x3f,0x67,0x6f,
+ 0xe7,0x0a,0xfd,0x18,0x13,0x8d,0x65,0x31,0x3a,0x71,0x39,
+ 0x3a,0x67,0x65,0x74,0x5f,0x70,0x65,0x65,0x72,0x73,0x31,
+ 0x3a,0x74,0x38,0x3a,0x3d,0xeb,0x0c,0xbf,0x0d,0x6a,0x0d,
+ 0xa5,0x31,0x3a,0x79,0x31,0x3a,0x71,0x65,0x87,0xa6,0x7d,
+ 0xe7
+};
+
+
+const uint8_t sctp_pkt1[]={
+
+ 0x00,0x00,0x00,0x01,0x00,0x00,
+ 0x00,0x00,0x00,0x01,0x00,0x00,
+ 0x08,0x00,
+
+ 0x45,0x02,0x00,0x30,
+ 0x00,0x00,0x40,0x00,
+ 0x40,0x84,0xbd,0x04,
+ 0x01,0x01,0x01,0x01, //sIP
+ 0x02,0x02,0x02,0x02, //DIP
+
+ 0x80,0x44,//SPORT
+ 0x00,0x50,//DPORT
+
+ 0x00,0x00,0x00,0x00, //checksum
+
+ 0x11,0x22,0x33,0x44, // magic
+ 0x00,0x00,0x00,0x00, //64 bit counter
+ 0x00,0x00,0x00,0x00,
+ 0x00,0x01,0xa0,0x00, //seq
+ 0x00,0x00,0x00,0x00,
+
+};
+
+
+
+
+
+int CGlobalPortCfg::create_pkt(uint8_t *pkt,int pkt_size){
+ rte_mempool_t * mp= CGlobalInfo::m_mem_pool[0].m_big_mbuf_pool ;
+
+ rte_mbuf_t * m=rte_pktmbuf_alloc(mp);
+ if ( unlikely(m==0) ) {
+ printf("ERROR no packets \n");
+ return (0);
+ }
+ char *p=rte_pktmbuf_append(m, pkt_size);
+ assert(p);
+ /* set pkt data */
+ memcpy(p,pkt,pkt_size);
+ //m->ol_flags = PKT_TX_VLAN_PKT;
+ //m->pkt.vlan_tci =200;
+
+ m_test = m;
+
+ return (0);
+}
+
+int CGlobalPortCfg::create_udp_pkt(){
+ return (create_pkt((uint8_t*)udp_pkt,sizeof(udp_pkt)));
+}
+
+int CGlobalPortCfg::create_sctp_pkt(){
+ return (create_pkt((uint8_t*)sctp_pkt1,sizeof(sctp_pkt1)));
+}
+
+
+/* test by sending 10 packets ...*/
+int CGlobalPortCfg::test_send_pkts(uint16_t queue_id,
+ int pkt,
+ int port){
+
+ CPhyEthIF * lp=&m_ports[port];
+ rte_mbuf_t * tx_pkts[32];
+ if (pkt >32 ) {
+ pkt =32;
+ }
+
+ int i;
+ for (i=0; i<pkt; i++) {
+ rte_mbuf_refcnt_update(m_test,1);
+ tx_pkts[i]=m_test;
+ }
+ uint16_t res=lp->tx_burst(queue_id,tx_pkts,pkt);
+ if ((pkt-res)>0) {
+ m_test_drop+=(pkt-res);
+ }
+ return (0);
+}
+
+
+
+
+
+int CGlobalPortCfg::set_promisc_all(bool enable){
+ int i;
+ for (i=0; i<m_max_ports; i++) {
+ CPhyEthIF * _if=&m_ports[i];
+ _if->set_promiscuous(enable);
+ }
+}
+
+
+
+int CGlobalPortCfg::reset_counters(){
+ int i;
+ for (i=0; i<m_max_ports; i++) {
+ CPhyEthIF * _if=&m_ports[i];
+ _if->stats_clear();
+ }
+}
+
+
+bool CGlobalPortCfg::is_all_links_are_up(bool dump){
+ bool all_link_are=true;
+ int i;
+ for (i=0; i<m_max_ports; i++) {
+ CPhyEthIF * _if=&m_ports[i];
+ _if->update_link_status();
+ if ( dump ){
+ _if->dump_stats(stdout);
+ }
+ if ( _if->is_link_up() == false){
+ all_link_are=false;
+ break;
+ }
+ }
+ return (all_link_are);
+}
+
+
+
+int CGlobalPortCfg::ixgbe_rx_queue_flush(){
+ int i;
+ for (i=0; i<m_max_ports; i++) {
+ CPhyEthIF * _if=&m_ports[i];
+ _if->flush_rx_queue();
+ }
+ return (0);
+}
+
+
+int CGlobalPortCfg::ixgbe_configure_mg(void){
+ int i;
+ CLatencyManagerCfg mg_cfg;
+ mg_cfg.m_max_ports = m_max_ports;
+
+ uint32_t latency_rate=CGlobalInfo::m_options.m_latency_rate;
+
+ if ( latency_rate ) {
+ mg_cfg.m_cps = (double)latency_rate ;
+ }else{
+ mg_cfg.m_cps = 100.0;
+ }
+
+ if ( get_vm_one_queue_enable() ) {
+ /* vm mode, indirect queues */
+ for (i=0; i<m_max_ports; i++) {
+
+ CMessagingManager * rx_dp=CMsgIns::Ins()->getRxDp();
+
+ uint8_t thread_id = (i>>1);
+
+ CNodeRing * r = rx_dp->getRingCpToDp(thread_id);
+ m_latency_vm_vports[i].Create((uint8_t)i,r,&m_mg);
+
+ mg_cfg.m_ports[i] =&m_latency_vm_vports[i];
+ }
+
+ }else{
+ for (i=0; i<m_max_ports; i++) {
+ CPhyEthIF * _if=&m_ports[i];
+ _if->dump_stats(stdout);
+ m_latency_vports[i].Create(_if,m_latency_tx_queue_id,1);
+
+ mg_cfg.m_ports[i] =&m_latency_vports[i];
+ }
+ }
+
+
+ m_mg.Create(&mg_cfg);
+ m_mg.set_mask(CGlobalInfo::m_options.m_latency_mask);
+}
+
+
+int CGlobalPortCfg::ixgbe_start(void){
+ int i;
+ for (i=0; i<m_max_ports; i++) {
+
+ CPhyEthIF * _if=&m_ports[i];
+ _if->Create((uint8_t)i);
+ /* last TX queue if for latency check */
+ if ( get_vm_one_queue_enable() ) {
+ /* one tx one rx */
+ _if->configure(1,
+ 1,
+ &m_port_cfg.m_port_conf);
+
+ /* will not be used */
+ m_latency_tx_queue_id= m_cores_to_dual_ports;
+
+ socket_id_t socket_id = CGlobalInfo::m_socket.port_to_socket((port_id_t)i);
+ assert(CGlobalInfo::m_mem_pool[socket_id].m_big_mbuf_pool);
+
+
+
+ _if->set_rx_queue(0);
+ _if->rx_queue_setup(0,
+ RTE_TEST_RX_DESC_VM_DEFAULT,
+ socket_id,
+ &m_port_cfg.m_rx_conf,
+ CGlobalInfo::m_mem_pool[socket_id].m_big_mbuf_pool);
+
+ int qid;
+ for ( qid=0; qid<(m_max_queues_per_port); qid++) {
+ _if->tx_queue_setup((uint16_t)qid,
+ RTE_TEST_TX_DESC_VM_DEFAULT ,
+ socket_id,
+ &m_port_cfg.m_tx_conf);
+
+ }
+
+ }else{
+ _if->configure(2,
+ m_cores_to_dual_ports+1,
+ &m_port_cfg.m_port_conf);
+
+ /* the latency queue for SCTP */
+ m_latency_tx_queue_id= m_cores_to_dual_ports;
+
+ socket_id_t socket_id = CGlobalInfo::m_socket.port_to_socket((port_id_t)i);
+ assert(CGlobalInfo::m_mem_pool[socket_id].m_big_mbuf_pool);
+
+
+ /* drop queue */
+ _if->rx_queue_setup(0,
+ RTE_TEST_RX_DESC_DEFAULT,
+ socket_id,
+ &m_port_cfg.m_rx_conf,
+ CGlobalInfo::m_mem_pool[socket_id].m_big_mbuf_pool);
+
+
+ /* set the filter queue */
+ _if->set_rx_queue(1);
+ /* sctp ring is 1 */
+ _if->rx_queue_setup(1,
+ RTE_TEST_RX_LATENCY_DESC_DEFAULT,
+ socket_id,
+ &m_port_cfg.m_rx_conf,
+ CGlobalInfo::m_mem_pool[socket_id].m_big_mbuf_pool);
+
+ int qid;
+ for ( qid=0; qid<(m_max_queues_per_port+1); qid++) {
+ _if->tx_queue_setup((uint16_t)qid,
+ RTE_TEST_TX_DESC_DEFAULT ,
+ socket_id,
+ &m_port_cfg.m_tx_conf);
+
+ }
+
+ }
+
+
+ _if->stats_clear();
+
+ _if->start();
+ _if->configure_rx_drop_queue();
+ _if->configure_rx_duplicate_rules();
+
+ _if->disable_flow_control();
+
+ _if->update_link_status();
+
+ _if->dump_link(stdout);
+
+ _if->add_mac((char *)CGlobalInfo::m_options.get_src_mac_addr(i));
+
+ fflush(stdout);
+ }
+
+ if ( !is_all_links_are_up() ){
+ /* wait for ports to be stable */
+ get_ex_drv()->wait_for_stable_link();
+
+ if ( !is_all_links_are_up(true) ){
+ rte_exit(EXIT_FAILURE, " "
+ " one of the link is down \n");
+ }
+ }
+
+ ixgbe_rx_queue_flush();
+
+
+ ixgbe_configure_mg();
+
+
+ /* core 0 - control
+ core 1 - port 0-0,1-0,
+ core 2 - port 2-0,3-0,
+ core 3 - port 0-1,1-1,
+ core 4 - port 2-1,3-1,
+
+ */
+ int port_offset=0;
+ int queue_offset=0;
+ for (i=0; i<get_cores_tx(); i++) {
+ int j=(i+1);
+ int queue_id=((j-1)/get_base_num_cores() ); /* for the first min core queue 0 , then queue 1 etc */
+ m_cores_vif[j].Create(j,
+ queue_id,
+ &m_ports[port_offset], /* 0,2*/
+ queue_id,
+ &m_ports[port_offset+1] /*1,3*/
+ );
+ port_offset+=2;
+ if (port_offset == m_max_ports) {
+ port_offset = 0;
+ }
+ }
+
+ fprintf(stdout," -------------------------------\n");
+ CCoreEthIF::DumpIfCfgHeader(stdout);
+ for (i=0; i<get_cores_tx(); i++) {
+ m_cores_vif[i+1].DumpIfCfg(stdout);
+ }
+ fprintf(stdout," -------------------------------\n");
+}
+
+
+bool CGlobalPortCfg::Create(){
+
+ if ( !m_zmq_publisher.Create( CGlobalInfo::m_options.m_zmq_port,
+ !CGlobalInfo::m_options.preview.get_zmq_publish_enable() ) ){
+ return (false);
+ }
+
+
+ /* We load the YAML twice,
+ this is the first time. to update global flags */
+ CFlowsYamlInfo pre_yaml_info;
+ pre_yaml_info.load_from_yaml_file(CGlobalInfo::m_options.cfg_file);
+
+ if ( pre_yaml_info.m_vlan_info.m_enable ){
+ CGlobalInfo::m_options.preview.set_vlan_mode_enable(true);
+ }
+ /* End update pre flags */
+
+ ixgbe_prob_init();
+ cores_prob_init();
+ queues_prob_init();
+ /* allocate rings */
+ assert( CMsgIns::Ins()->Create(get_cores_tx()) );
+
+ if ( sizeof(CGenNodeNatInfo) != sizeof(CGenNode) ) {
+ printf("ERROR sizeof(CGenNodeNatInfo) %d != sizeof(CGenNode) %d must be the same size \n",sizeof(CGenNodeNatInfo),sizeof(CGenNode));
+ assert(0);
+ }
+
+ if ( sizeof(CGenNodeLatencyPktInfo) != sizeof(CGenNode) ) {
+ printf("ERROR sizeof(CGenNodeLatencyPktInfo) %d != sizeof(CGenNode) %d must be the same size \n",sizeof(CGenNodeLatencyPktInfo),sizeof(CGenNode));
+ assert(0);
+ }
+
+ /* allocate the memory */
+
+ uint32_t rx_mbuf = 0 ;
+
+ if ( get_vm_one_queue_enable() ) {
+ rx_mbuf = (m_max_ports * RTE_TEST_RX_DESC_VM_DEFAULT);
+ }else{
+ rx_mbuf = (m_max_ports * (RTE_TEST_RX_LATENCY_DESC_DEFAULT+RTE_TEST_RX_DESC_DEFAULT));
+ }
+
+ CGlobalInfo::init_pools(rx_mbuf);
+ ixgbe_start();
+ dump_config(stdout);
+ return (true);
+
+}
+void CGlobalPortCfg::Delete(){
+ m_zmq_publisher.Delete();
+}
+
+
+
+int CGlobalPortCfg::ixgbe_prob_init(void){
+
+ uint8_t nb_ports;
+
+
+ m_max_ports = rte_eth_dev_count();
+ if (m_max_ports == 0)
+ rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
+
+ printf(" number of ports founded : %d \n",m_max_ports);
+
+
+
+ if ( CGlobalInfo::m_options.get_expected_ports() >BP_MAX_PORTS ){
+ rte_exit(EXIT_FAILURE, " maximum ports supported are %d, use the configuration file to set the expected number of ports \n",BP_MAX_PORTS);
+ }
+
+ if ( CGlobalInfo::m_options.get_expected_ports() > m_max_ports ){
+ rte_exit(EXIT_FAILURE, " there are %d ports you expected more %d,use the configuration file to set the expected number of ports \n",
+ m_max_ports,
+ CGlobalInfo::m_options.get_expected_ports());
+ }
+ if (CGlobalInfo::m_options.get_expected_ports() < m_max_ports ) {
+ /* limit the number of ports */
+ m_max_ports=CGlobalInfo::m_options.get_expected_ports();
+ }
+ assert(m_max_ports <= BP_MAX_PORTS);
+
+ if ( m_max_ports %2 !=0 ) {
+ rte_exit(EXIT_FAILURE, " numbe of ports %d should be even, mask the one port in the configuration file \n, ",
+ m_max_ports);
+
+ }
+
+ struct rte_eth_dev_info dev_info;
+ rte_eth_dev_info_get((uint8_t) 0,&dev_info);
+
+ if ( CGlobalInfo::m_options.preview.getVMode() > 0){
+ printf("\n\n");
+ printf("if_index : %d \n",dev_info.if_index);
+ printf("driver name : %s \n",dev_info.driver_name);
+ printf("min_rx_bufsize : %d \n",dev_info.min_rx_bufsize);
+ printf("max_rx_pktlen : %d \n",dev_info.max_rx_pktlen);
+ printf("max_rx_queues : %d \n",dev_info.max_rx_queues);
+ printf("max_tx_queues : %d \n",dev_info.max_tx_queues);
+ printf("max_mac_addrs : %d \n",dev_info.max_mac_addrs);
+
+ printf("rx_offload_capa : %x \n",dev_info.rx_offload_capa);
+ printf("tx_offload_capa : %x \n",dev_info.tx_offload_capa);
+ }
+
+
+
+ if ( !CTRexExtendedDriverDb::Ins()->is_driver_exists(dev_info.driver_name) ){
+ printf(" ERROR driver name %s is not supported \n",dev_info.driver_name);
+ }
+
+ int i;
+ struct rte_eth_dev_info dev_info1;
+
+ for (i=1; i<m_max_ports; i++) {
+ rte_eth_dev_info_get((uint8_t) i,&dev_info1);
+ if ( strcmp(dev_info1.driver_name,dev_info.driver_name)!=0) {
+ printf(" ERROR all device should have the same type %s != %s \n",dev_info1.driver_name,dev_info.driver_name);
+ exit(1);
+ }
+ }
+
+ CTRexExtendedDriverDb::Ins()->set_driver_name(dev_info.driver_name);
+
+ /* register driver callback to convert mseg to signle seg */
+ if (strcmp(dev_info.driver_name,"rte_vmxnet3_pmd")==0 ) {
+ vmxnet3_xmit_set_callback(rte_mbuf_convert_to_one_seg);
+ }
+
+
+ m_port_cfg.update_var();
+
+ if ( get_is_rx_filter_enable() ){
+ m_port_cfg.update_global_config_fdir();
+ }
+
+ if ( get_vm_one_queue_enable() ) {
+ /* verify that we have only one thread/core per dual- interface */
+ if ( CGlobalInfo::m_options.preview.getCores()>1 ) {
+ printf(" ERROR the number of cores should be 1 when the driver support only one tx queue and one rx queue \n");
+ exit(1);
+ }
+ }
+ return (0);
+}
+
+int CGlobalPortCfg::cores_prob_init(){
+ m_max_cores = rte_lcore_count();
+ assert(m_max_cores>0);
+ return (0);
+}
+
+int CGlobalPortCfg::queues_prob_init(){
+
+ if (m_max_cores < 2) {
+ rte_exit(EXIT_FAILURE, "number of cores should be at least 3 \n");
+ }
+
+ if ( !( (m_max_ports == 4) || (m_max_ports == 2) || (m_max_ports == 8) || (m_max_ports == 6) ) ){
+ rte_exit(EXIT_FAILURE, "supported number of ports are 2-8 you have %d \n",m_max_ports);
+ }
+
+ assert((m_max_ports>>1) <= get_cores_tx() );
+
+
+
+
+ m_cores_mul = CGlobalInfo::m_options.preview.getCores();
+
+ m_cores_to_dual_ports = m_cores_mul;
+
+ /* core 0 - control
+ -core 1 - port 0/1
+ -core 2 - port 2/3
+ -core 3 - port 0/1
+ -core 4 - port 2/3
+
+ m_cores_to_dual_ports = 2;
+ */
+
+ /* number of queue - 1 per core for dual ports*/
+ m_max_queues_per_port = m_cores_to_dual_ports;
+
+ if (m_max_queues_per_port > BP_MAX_TX_QUEUE) {
+ rte_exit(EXIT_FAILURE,
+ "maximum number of queue should be maximum %d \n",BP_MAX_TX_QUEUE);
+ }
+
+ assert(m_max_queues_per_port>0);
+ return (0);
+}
+
+
+void CGlobalPortCfg::dump_config(FILE *fd){
+ fprintf(fd," number of ports : %u \n",m_max_ports);
+ fprintf(fd," max cores for 2 ports : %u \n",m_cores_to_dual_ports);
+ fprintf(fd," max queue per port : %u \n",m_max_queues_per_port);
+}
+
+
+
+void CGlobalPortCfg::dump_post_test_stats(FILE *fd){
+ uint64_t pkt_out=0;
+ uint64_t pkt_out_bytes=0;
+ uint64_t pkt_in_bytes=0;
+ uint64_t pkt_in=0;
+ uint64_t sw_pkt_out=0;
+ uint64_t sw_pkt_out_err=0;
+ uint64_t sw_pkt_out_bytes=0;
+
+ int i;
+ for (i=0; i<get_cores_tx(); i++) {
+ CCoreEthIF * erf_vif = &m_cores_vif[i+1];
+ CVirtualIFPerSideStats stats;
+ erf_vif->GetCoreCounters(&stats);
+ sw_pkt_out += stats.m_tx_pkt;
+ sw_pkt_out_err += stats.m_tx_drop +stats.m_tx_queue_full +stats.m_tx_alloc_error ;
+ sw_pkt_out_bytes +=stats.m_tx_bytes;
+ }
+
+
+ for (i=0; i<m_max_ports; i++) {
+ CPhyEthIF * _if=&m_ports[i];
+ pkt_in +=_if->get_stats().ipackets;
+ pkt_in_bytes +=_if->get_stats().ibytes;
+ pkt_out +=_if->get_stats().opackets;
+ pkt_out_bytes +=_if->get_stats().obytes;
+ }
+ if ( !CGlobalInfo::m_options.is_latency_disabled() ){
+ sw_pkt_out += m_mg.get_total_pkt();
+ sw_pkt_out_bytes +=m_mg.get_total_bytes();
+ }
+
+
+ fprintf (fd," summary stats \n");
+ fprintf (fd," -------------- \n");
+
+ fprintf (fd," Total-pkt-drop : %d pkts \n",(int64_t)(pkt_out-pkt_in));
+ fprintf (fd," Total-tx-bytes : %llu bytes \n",pkt_out_bytes);
+ fprintf (fd," Total-tx-sw-bytes : %llu bytes \n",sw_pkt_out_bytes);
+ fprintf (fd," Total-rx-bytes : %llu byte \n",pkt_in_bytes);
+
+ fprintf (fd," \n");
+
+ fprintf (fd," Total-tx-pkt : %llu pkts \n",pkt_out);
+ fprintf (fd," Total-rx-pkt : %llu pkts \n",pkt_in);
+ fprintf (fd," Total-sw-tx-pkt : %llu pkts \n",sw_pkt_out);
+ fprintf (fd," Total-sw-err : %llu pkts \n",sw_pkt_out_err);
+
+
+ if ( !CGlobalInfo::m_options.is_latency_disabled() ){
+ fprintf (fd," maximum-latency : %.0f usec \n",m_mg.get_max_latency());
+ fprintf (fd," average-latency : %.0f usec \n",m_mg.get_avr_latency());
+ fprintf (fd," latency-any-error : %s \n",m_mg.is_any_error()?"ERROR":"OK");
+ }
+
+
+}
+
+
+void CGlobalPortCfg::update_stats(){
+
+ int i;
+ for (i=0; i<m_max_ports; i++) {
+ CPhyEthIF * _if=&m_ports[i];
+ _if->update_counters();
+ }
+ uint64_t total_open_flows=0;
+
+
+ CFlowGenListPerThread * lpt;
+ for (i=0; i<get_cores_tx(); i++) {
+ lpt = m_fl.m_threads_info[i];
+ total_open_flows += lpt->m_stats.m_total_open_flows ;
+ }
+ m_last_total_cps = m_cps.add(total_open_flows);
+ m_fl.Update();
+
+}
+
+
+void CGlobalPortCfg::get_stats(CGlobalStats & stats){
+
+ int i;
+ float total_tx=0.0;
+ float total_rx=0.0;
+ float total_pps=0.0;
+
+ stats.m_total_tx_pkts = 0;
+ stats.m_total_rx_pkts = 0;
+ stats.m_total_tx_bytes = 0;
+ stats.m_total_rx_bytes = 0;
+ stats.m_total_alloc_error=0;
+ stats.m_total_queue_full=0;
+ stats.m_total_queue_drop=0;
+
+
+ stats.m_num_of_ports = m_max_ports;
+ stats.m_cpu_util = m_fl.GetCpuUtil();
+ stats.m_threads = m_fl.m_threads_info.size();
+
+ for (i=0; i<m_max_ports; i++) {
+ CPhyEthIF * _if=&m_ports[i];
+ CPerPortStats * stp=&stats.m_port[i];
+
+ CPhyEthIFStats & st =_if->get_stats();
+
+ stp->opackets = st.opackets;
+ stp->obytes = st.obytes;
+ stp->ipackets = st.ipackets;
+ stp->ibytes = st.ibytes;
+ stp->ierrors = st.ierrors;
+ stp->oerrors = st.oerrors;
+ stp->m_total_tx_bps = _if->get_last_tx_rate()*_1Mb_DOUBLE;
+
+ stats.m_total_tx_pkts += st.opackets;
+ stats.m_total_rx_pkts += st.ipackets;
+ stats.m_total_tx_bytes += st.obytes;
+ stats.m_total_rx_bytes += st.ibytes;
+
+ total_tx +=_if->get_last_tx_rate();
+ total_rx +=_if->get_last_rx_rate();
+ total_pps +=_if->get_last_pps_rate();
+
+ }
+
+ uint64_t total_open_flows=0;
+ uint64_t total_active_flows=0;
+
+ uint64_t total_clients=0;
+ uint64_t total_servers=0;
+ uint64_t active_sockets=0;
+ uint64_t total_sockets=0;
+
+
+ uint64_t total_nat_time_out =0;
+ uint64_t total_nat_no_fid =0;
+ uint64_t total_nat_active =0;
+ uint64_t total_nat_open =0;
+ uint64_t total_nat_learn_error=0;
+
+
+ CFlowGenListPerThread * lpt;
+ stats.m_template.Clear();
+
+ for (i=0; i<get_cores_tx(); i++) {
+ lpt = m_fl.m_threads_info[i];
+ total_open_flows += lpt->m_stats.m_total_open_flows ;
+ total_active_flows += (lpt->m_stats.m_total_open_flows-lpt->m_stats.m_total_close_flows) ;
+
+ stats.m_total_alloc_error += lpt->m_node_gen.m_v_if->m_stats[0].m_tx_alloc_error+
+ lpt->m_node_gen.m_v_if->m_stats[1].m_tx_alloc_error;
+ stats.m_total_queue_full +=lpt->m_node_gen.m_v_if->m_stats[0].m_tx_queue_full+
+ lpt->m_node_gen.m_v_if->m_stats[1].m_tx_queue_full;
+
+ stats.m_total_queue_drop =lpt->m_node_gen.m_v_if->m_stats[0].m_tx_drop+
+ lpt->m_node_gen.m_v_if->m_stats[1].m_tx_drop;
+
+ stats.m_template.Add(&lpt->m_node_gen.m_v_if->m_stats[0].m_template);
+ stats.m_template.Add(&lpt->m_node_gen.m_v_if->m_stats[1].m_template);
+
+
+ total_clients += lpt->m_smart_gen.getTotalClients();
+ total_servers += lpt->m_smart_gen.getTotalServers();
+ active_sockets += lpt->m_smart_gen.ActiveSockets();
+ total_sockets += lpt->m_smart_gen.MaxSockets();
+
+ total_nat_time_out +=lpt->m_stats.m_nat_flow_timeout;
+ total_nat_no_fid +=lpt->m_stats.m_nat_lookup_no_flow_id ;
+ total_nat_active +=lpt->m_stats.m_nat_lookup_add_flow_id - lpt->m_stats.m_nat_lookup_remove_flow_id;
+ total_nat_open +=lpt->m_stats.m_nat_lookup_add_flow_id;
+ total_nat_learn_error +=lpt->m_stats.m_nat_flow_learn_error;
+ }
+
+ stats.m_total_nat_time_out = total_nat_time_out;
+ stats.m_total_nat_no_fid = total_nat_no_fid;
+ stats.m_total_nat_active = total_nat_active;
+ stats.m_total_nat_open = total_nat_open;
+ stats.m_total_nat_learn_error = total_nat_learn_error;
+
+ stats.m_total_clients = total_clients;
+ stats.m_total_servers = total_servers;
+ stats.m_active_sockets = active_sockets;
+ stats.m_socket_util =100.0*(double)active_sockets/(double)total_sockets;
+
+
+ float drop_rate=total_tx-total_rx;
+ if ( (drop_rate<0.0) || (drop_rate < 0.1*total_tx ) ) {
+ drop_rate=0.0;
+ }
+ float pf =CGlobalInfo::m_options.m_platform_factor;
+ stats.m_platform_factor = pf;
+
+ stats.m_active_flows = total_active_flows*pf;
+ stats.m_open_flows = total_open_flows*pf;
+ stats.m_rx_drop_bps = drop_rate*pf *_1Mb_DOUBLE;
+
+ stats.m_tx_bps = total_tx*pf*_1Mb_DOUBLE;
+ stats.m_rx_bps = total_rx*pf*_1Mb_DOUBLE;
+ stats.m_tx_pps = total_pps*pf;
+ stats.m_tx_cps = m_last_total_cps*pf;
+
+ stats.m_tx_expected_cps = m_expected_cps*pf;
+ stats.m_tx_expected_pps = m_expected_pps*pf;
+ stats.m_tx_expected_bps = m_expected_bps*pf;
+}
+
+bool CGlobalPortCfg::sanity_check(){
+
+ CFlowGenListPerThread * lpt;
+ uint32_t errors=0;
+ int i;
+ for (i=0; i<get_cores_tx(); i++) {
+ lpt = m_fl.m_threads_info[i];
+ errors += lpt->m_smart_gen.getErrorAllocationCounter();
+ }
+
+ if ( errors ) {
+ printf(" ERRORs sockets allocation errors! \n");
+ printf(" you should allocate more clients in the pool \n");
+ return(true);
+ }
+ return ( false);
+}
+
+
+/* dump the template info */
+void CGlobalPortCfg::dump_template_info(std::string & json){
+ CFlowGenListPerThread * lpt = m_fl.m_threads_info[0];
+ CFlowsYamlInfo * yaml_info=&lpt->m_yaml_info;
+
+ json="{\"name\":\"template_info\",\"type\":0,\"data\":[";
+ int i;
+ for (i=0; i<yaml_info->m_vec.size()-1; i++) {
+ CFlowYamlInfo * r=&yaml_info->m_vec[i] ;
+ json+="\""+ r->m_name+"\"";
+ json+=",";
+ }
+ json+="\""+yaml_info->m_vec[i].m_name+"\"";
+ json+="]}" ;
+}
+
+void CGlobalPortCfg::dump_stats(FILE *fd,std::string & json,
+ CGlobalStats::DumpFormat format){
+ CGlobalStats stats;
+ update_stats();
+ get_stats(stats);
+ if (format==CGlobalStats::dmpTABLE) {
+ if ( m_io_modes.m_g_mode == CTrexGlobalIoMode::gNORMAL ){
+ switch (m_io_modes.m_pp_mode ){
+ case CTrexGlobalIoMode::ppDISABLE:
+ fprintf(fd,"\n+Per port stats disabled \n");
+ break;
+ case CTrexGlobalIoMode::ppTABLE:
+ fprintf(fd,"\n-Per port stats table \n");
+ stats.Dump(fd,CGlobalStats::dmpTABLE);
+ break;
+ case CTrexGlobalIoMode::ppSTANDARD:
+ fprintf(fd,"\n-Per port stats - standard\n");
+ stats.Dump(fd,CGlobalStats::dmpSTANDARD);
+ break;
+ };
+
+ switch (m_io_modes.m_ap_mode ){
+ case CTrexGlobalIoMode::apDISABLE:
+ fprintf(fd,"\n+Global stats disabled \n");
+ break;
+ case CTrexGlobalIoMode::apENABLE:
+ fprintf(fd,"\n-Global stats enabled \n");
+ stats.DumpAllPorts(fd);
+ break;
+ };
+ }
+ }else{
+ /* at exit , always need to dump it in standartd mode for scripts*/
+ stats.Dump(fd,format);
+ stats.DumpAllPorts(fd);
+ }
+ stats.dump_json(json);
+}
+
+
+int CGlobalPortCfg::run_in_master(){
+
+ std::string json;
+ bool was_stopped=false;
+ while ( true ) {
+
+ if ( CGlobalInfo::m_options.preview.get_no_keyboard() ==false ){
+ if ( m_io_modes.handle_io_modes() ){
+ was_stopped=true;
+ break;
+ }
+ }
+
+ if ( sanity_check() ){
+ printf(" Test was stopped \n");
+ was_stopped=true;
+ break;
+ }
+ if (m_io_modes.m_g_mode != CTrexGlobalIoMode::gDISABLE ) {
+ fprintf(stdout,"\033[2J");
+ fprintf(stdout,"\033[2H");
+
+ }else{
+ if ( m_io_modes.m_g_disable_first ){
+ m_io_modes.m_g_disable_first=false;
+ fprintf(stdout,"\033[2J");
+ fprintf(stdout,"\033[2H");
+ printf("clean !!!\n");
+ fflush(stdout);
+ }
+ }
+
+
+ if (m_io_modes.m_g_mode == CTrexGlobalIoMode::gHELP ) {
+ m_io_modes.DumpHelp(stdout);
+ }
+
+ dump_stats(stdout,json,CGlobalStats::dmpTABLE);
+
+ if (m_io_modes.m_g_mode == CTrexGlobalIoMode::gNORMAL ) {
+ fprintf (stdout," current time : %.1f sec \n",now_sec());
+ float d= CGlobalInfo::m_options.m_duration - now_sec();
+ if (d<0) {
+ d=0;
+
+ }
+ fprintf (stdout," test duration : %.1f sec \n",d);
+ }
+
+ m_zmq_publisher.publish_json(json);
+
+ /* generator json , all cores are the same just sample the first one */
+ m_fl.m_threads_info[0]->m_node_gen.dump_json(json);
+ m_zmq_publisher.publish_json(json);
+
+ dump_template_info(json);
+ m_zmq_publisher.publish_json(json);
+
+
+
+ if ( !CGlobalInfo::m_options.is_latency_disabled() ){
+ m_mg.update();
+
+ if ( m_io_modes.m_g_mode == CTrexGlobalIoMode::gNORMAL ){
+ switch (m_io_modes.m_l_mode) {
+ case CTrexGlobalIoMode::lDISABLE:
+ fprintf(stdout,"\n+Latency stats disabled \n");
+ break;
+ case CTrexGlobalIoMode::lENABLE:
+ fprintf(stdout,"\n-Latency stats enabled \n");
+ m_mg.DumpShort(stdout);
+ break;
+ case CTrexGlobalIoMode::lENABLE_Extended:
+ fprintf(stdout,"\n-Latency stats extended \n");
+ m_mg.Dump(stdout);
+ break;
+ }
+
+ if ( get_is_rx_check_mode() ) {
+
+ switch (m_io_modes.m_rc_mode) {
+ case CTrexGlobalIoMode::rcDISABLE:
+ fprintf(stdout,"\n+Rx Check stats disabled \n");
+ break;
+ case CTrexGlobalIoMode::rcENABLE:
+ fprintf(stdout,"\n-Rx Check stats enabled \n");
+ m_mg.DumpShortRxCheck(stdout);
+ break;
+ case CTrexGlobalIoMode::rcENABLE_Extended:
+ fprintf(stdout,"\n-Rx Check stats enhanced \n");
+ m_mg.DumpRxCheck(stdout);
+ break;
+ }
+
+ m_mg.rx_check_dump_json(json );
+ m_zmq_publisher.publish_json(json);
+
+ }/* ex checked */
+
+ }
+
+ /* backward compatible */
+ m_mg.dump_json(json );
+ m_zmq_publisher.publish_json(json);
+
+ /* more info */
+ m_mg.dump_json_v2(json );
+ m_zmq_publisher.publish_json(json);
+
+ }
+
+ delay(500);
+
+ if ( is_all_cores_finished() ) {
+ break;
+ }
+ }
+ m_mg.stop();
+ delay(1000);
+ if ( was_stopped ){
+ /* we should stop latency and exit to stop agents */
+ exit(-1);
+ }
+ return (0);
+}
+
+
+
+int CGlobalPortCfg::run_in_laterncy_core(void){
+ if ( !CGlobalInfo::m_options.is_latency_disabled() ){
+ m_mg.start(0);
+ }
+ return (0);
+}
+
+
+int CGlobalPortCfg::stop_core(virtual_thread_id_t virt_core_id){
+ m_signal[virt_core_id]=1;
+ return (0);
+}
+
+int CGlobalPortCfg::run_in_core(virtual_thread_id_t virt_core_id){
+
+ CPreviewMode *lp=&CGlobalInfo::m_options.preview;
+ if ( lp->getSingleCore() &&
+ (virt_core_id==2 ) &&
+ (lp-> getCores() ==1) ){
+ printf(" bypass this core \n");
+ m_signal[virt_core_id]=1;
+ return (0);
+ }
+
+ assert(m_fl_was_init);
+ CFlowGenListPerThread * lpt;
+ lpt = m_fl.m_threads_info[virt_core_id-1];
+ lpt->generate_erf(CGlobalInfo::m_options.out_file,*lp);
+ //lpt->m_node_gen.DumpHist(stdout);
+ //lpt->DumpStats(stdout);
+
+ m_signal[virt_core_id]=1;
+ return (0);
+}
+
+
+int CGlobalPortCfg::stop_master(){
+
+ delay(1000);
+ std::string json;
+ fprintf(stdout," ==================\n");
+ fprintf(stdout," interface sum \n");
+ fprintf(stdout," ==================\n");
+ dump_stats(stdout,json,CGlobalStats::dmpSTANDARD);
+ fprintf(stdout," ==================\n");
+ fprintf(stdout," \n\n");
+
+ fprintf(stdout," ==================\n");
+ fprintf(stdout," interface sum \n");
+ fprintf(stdout," ==================\n");
+
+ CFlowGenListPerThread * lpt;
+ uint64_t total_tx_rx_check=0;
+
+ int i;
+ for (i=0; i<get_cores_tx(); i++) {
+ lpt = m_fl.m_threads_info[i];
+ CCoreEthIF * erf_vif = &m_cores_vif[i+1];
+
+ erf_vif->DumpCoreStats(stdout);
+ erf_vif->DumpIfStats(stdout);
+ total_tx_rx_check+=erf_vif->m_stats[CLIENT_SIDE].m_tx_rx_check_pkt+
+ erf_vif->m_stats[SERVER_SIDE].m_tx_rx_check_pkt;
+ }
+
+ fprintf(stdout," ==================\n");
+ fprintf(stdout," generators \n");
+ fprintf(stdout," ==================\n");
+ for (i=0; i<get_cores_tx(); i++) {
+ lpt = m_fl.m_threads_info[i];
+ lpt->m_node_gen.DumpHist(stdout);
+ lpt->DumpStats(stdout);
+ }
+ if ( !CGlobalInfo::m_options.is_latency_disabled() ){
+ fprintf(stdout," ==================\n");
+ fprintf(stdout," latency \n");
+ fprintf(stdout," ==================\n");
+ m_mg.DumpShort(stdout);
+ m_mg.Dump(stdout);
+ m_mg.DumpShortRxCheck(stdout);
+ m_mg.DumpRxCheck(stdout);
+ m_mg.DumpRxCheckVerification(stdout,total_tx_rx_check);
+ }
+
+ dump_stats(stdout,json,CGlobalStats::dmpSTANDARD);
+ dump_post_test_stats(stdout);
+ m_fl.Delete();
+
+}
+
+bool CGlobalPortCfg::is_all_cores_finished(){
+ int i;
+ for (i=0; i<get_cores_tx(); i++) {
+ if ( m_signal[i+1]==0){
+ return (false);
+ }
+ }
+ return (true);
+}
+
+
+
+int CGlobalPortCfg::start_send_master(){
+ int i;
+ for (i=0; i<BP_MAX_CORES; i++) {
+ m_signal[i]=0;
+ }
+
+ m_fl.Create();
+ m_fl.load_from_yaml(CGlobalInfo::m_options.cfg_file,get_cores_tx());
+ if (CGlobalInfo::m_options.mac_file != "") {
+ CGlobalInfo::m_options.preview.set_mac_ip_mapping_enable(true);
+ m_fl.load_from_mac_file(CGlobalInfo::m_options.mac_file);
+ m_fl.is_mac_info_configured = true;
+ } else {
+ m_fl.is_mac_info_configured = false;
+ }
+
+ m_expected_pps = m_fl.get_total_pps();
+ m_expected_cps = 1000.0*m_fl.get_total_kcps();
+ m_expected_bps = m_fl.get_total_tx_bps();
+ if ( m_fl.get_total_repeat_flows() > 2000) {
+ /* disable flows cache */
+ CGlobalInfo::m_options.preview.setDisableMbufCache(true);
+ }
+
+ CTupleGenYamlInfo * tg=&m_fl.m_yaml_info.m_tuple_gen;
+
+ m_mg.set_ip( tg->m_clients_ip_start,
+ tg->m_servers_ip_start,
+ tg->m_dual_interface_mask
+ );
+
+
+ if ( CGlobalInfo::m_options.preview.getVMode() >0 ) {
+ m_fl.DumpCsv(stdout);
+ for (i=0; i<100; i++) {
+ fprintf(stdout,"\n");
+ }
+ fflush(stdout);
+ }
+
+ m_fl.generate_p_thread_info(get_cores_tx());
+ CFlowGenListPerThread * lpt;
+
+ for (i=0; i<get_cores_tx(); i++) {
+ lpt = m_fl.m_threads_info[i];
+ //CNullIF * erf_vif = new CNullIF();
+ CVirtualIF * erf_vif = &m_cores_vif[i+1];
+ lpt->set_vif(erf_vif);
+ /* socket id */
+ lpt->m_node_gen.m_socket_id =m_cores_vif[i+1].get_socket_id();
+
+ }
+ m_fl_was_init=true;
+
+}
+
+
+////////////////////////////////////////////
+
+static CGlobalPortCfg ports_cfg;
+
+static int latency_one_lcore(__attribute__((unused)) void *dummy)
+{
+ CPlatformSocketInfo * lpsock=&CGlobalInfo::m_socket;
+ physical_thread_id_t phy_id =rte_lcore_id();
+
+
+ if ( lpsock->thread_phy_is_latency( phy_id ) ){
+ ports_cfg.run_in_laterncy_core();
+ }else{
+
+ if ( lpsock->thread_phy_is_master( phy_id ) ) {
+ ports_cfg.run_in_master();
+ delay(1);
+ }else{
+ delay((uint32_t)(1000.0*CGlobalInfo::m_options.m_duration));
+ /* this core has stopped */
+ ports_cfg.m_signal[ lpsock->thread_phy_to_virt( phy_id ) ]=1;
+ }
+ }
+ return 0;
+}
+
+
+
+
+static int slave_one_lcore(__attribute__((unused)) void *dummy)
+{
+ CPlatformSocketInfo * lpsock=&CGlobalInfo::m_socket;
+ physical_thread_id_t phy_id =rte_lcore_id();
+
+
+ if ( lpsock->thread_phy_is_latency( phy_id ) ){
+ ports_cfg.run_in_laterncy_core();
+ }else{
+ if ( lpsock->thread_phy_is_master( phy_id ) ) {
+ ports_cfg.run_in_master();
+ delay(1);
+ }else{
+ ports_cfg.run_in_core( lpsock->thread_phy_to_virt( phy_id ) );
+ }
+ }
+ return 0;
+}
+
+
+
+uint32_t get_cores_mask(uint32_t cores,int offset){
+ int i;
+
+ uint32_t res=1;
+
+ uint32_t mask=(1<<(offset+1));
+ for (i=0; i<(cores-1); i++) {
+ res |= mask ;
+ mask = mask <<1;
+ }
+ return (res);
+}
+
+
+
+
+int main(int argc , char * argv[]){
+
+ return ( main_test(argc , argv));
+}
+
+
+int update_global_info_from_platform_file(){
+
+ CPlatformYamlInfo *cg=&global_platform_cfg_info;
+
+ CGlobalInfo::m_socket.Create(&cg->m_platform);
+
+
+ if (!cg->m_info_exist) {
+ /* nothing to do ! */
+ return 0;
+ }
+
+ CGlobalInfo::m_options.prefix =cg->m_prefix;
+ CGlobalInfo::m_options.preview.setCores(cg->m_thread_per_dual_if);
+
+ if ( cg->m_port_limit_exist ){
+ CGlobalInfo::m_options.m_expected_portd =cg->m_port_limit;
+ }
+
+ if ( cg->m_enable_zmq_pub_exist ){
+ CGlobalInfo::m_options.preview.set_zmq_publish_enable(cg->m_enable_zmq_pub);
+ CGlobalInfo::m_options.m_zmq_port = cg->m_zmq_pub_port;
+ }
+ if ( cg->m_telnet_exist ){
+ CGlobalInfo::m_options.m_telnet_port = cg->m_telnet_port;
+ }
+
+ if ( cg->m_mac_info_exist ){
+ int i;
+ /* cop the file info */
+
+ int port_size=cg->m_mac_info.size();
+
+ if ( port_size > BP_MAX_PORTS ){
+ port_size = BP_MAX_PORTS;
+ }
+ for (i=0; i<port_size; i++){
+ cg->m_mac_info[i].copy_src(( char *)CGlobalInfo::m_options.m_mac_addr[i].u.m_mac.src) ;
+ cg->m_mac_info[i].copy_dest(( char *)CGlobalInfo::m_options.m_mac_addr[i].u.m_mac.dest) ;
+ }
+ }
+
+ /* mul by interface type */
+ float mul=1.0;
+ if (cg->m_port_bandwidth_gb<10) {
+ cg->m_port_bandwidth_gb=10.0;
+ }
+
+ mul = mul*(float)cg->m_port_bandwidth_gb/10.0;
+ mul= mul * (float)cg->m_port_limit/2.0;
+
+ CGlobalInfo::m_memory_cfg.set(cg->m_memory,mul);
+ CGlobalInfo::m_memory_cfg.set_number_of_dp_cors(
+ CGlobalInfo::m_options.get_number_of_dp_cores_needed() );
+
+ return (0);
+}
+
+
+int update_dpdk_args(void){
+
+ uint32_t cores_number;
+ CPlatformSocketInfo * lpsock=&CGlobalInfo::m_socket;
+ CParserOption * lpop= &CGlobalInfo::m_options;
+
+ lpsock->set_latency_thread_is_enabled(get_is_latency_thread_enable());
+ lpsock->set_number_of_threads_per_ports(lpop->preview.getCores() );
+ lpsock->set_number_of_dual_ports(lpop->get_expected_dual_ports());
+ if ( !lpsock->sanity_check() ){
+ printf(" ERROR in configuration file \n");
+ return (-1);
+ }
+
+ if ( CGlobalInfo::m_options.preview.getVMode() > 0 ) {
+ lpsock->dump(stdout);
+ }
+
+
+ sprintf(global_cores_str,"0x%x",lpsock->get_cores_mask());
+
+ /* set the DPDK options */
+ global_dpdk_args_num =7;
+
+ global_dpdk_args[0]=(char *)"xx";
+ global_dpdk_args[1]=(char *)"-c";
+ global_dpdk_args[2]=(char *)global_cores_str;
+ global_dpdk_args[3]=(char *)"-n";
+ global_dpdk_args[4]=(char *)"4";
+
+ if ( CGlobalInfo::m_options.preview.getVMode() == 0 ) {
+ global_dpdk_args[5]=(char *)"--log-level";
+ sprintf(global_loglevel_str,"%d",1);
+ global_dpdk_args[6]=(char *)global_loglevel_str;
+ }else{
+ global_dpdk_args[5]=(char *)"--log-level";
+ sprintf(global_loglevel_str,"%d",CGlobalInfo::m_options.preview.getVMode()+1);
+ global_dpdk_args[6]=(char *)global_loglevel_str;
+ }
+
+ global_dpdk_args_num = 7;
+
+ /* add white list */
+ for (int i=0; i<(int)global_platform_cfg_info.m_if_list.size(); i++) {
+ global_dpdk_args[global_dpdk_args_num++]=(char *)"-w";
+ global_dpdk_args[global_dpdk_args_num++]=(char *)global_platform_cfg_info.m_if_list[i].c_str();
+ }
+
+
+
+ if ( lpop->prefix.length() ){
+ global_dpdk_args[global_dpdk_args_num++]=(char *)"--file-prefix";
+ sprintf(global_prefix_str,"%s",lpop->prefix.c_str());
+ global_dpdk_args[global_dpdk_args_num++]=(char *)global_prefix_str;
+ global_dpdk_args[global_dpdk_args_num++]=(char *)"-m";
+ if (global_platform_cfg_info.m_limit_memory.length()) {
+ global_dpdk_args[global_dpdk_args_num++]=(char *)global_platform_cfg_info.m_limit_memory.c_str();
+ }else{
+ global_dpdk_args[global_dpdk_args_num++]=(char *)"1024";
+ }
+ }
+
+
+ if ( CGlobalInfo::m_options.preview.getVMode() > 0 ) {
+ printf("args \n");
+ int i;
+ for (i=0; i<global_dpdk_args_num; i++) {
+ printf(" %s \n",global_dpdk_args[i]);
+ }
+ }
+}
+
+
+int sim_load_list_of_cap_files(CParserOption * op){
+
+ CFlowGenList fl;
+ fl.Create();
+ fl.load_from_yaml(op->cfg_file,1);
+ if ( op->preview.getVMode() >0 ) {
+ fl.DumpCsv(stdout);
+ }
+ uint32_t start= os_get_time_msec();
+
+ CErfIF erf_vif;
+
+ fl.generate_p_thread_info(1);
+ CFlowGenListPerThread * lpt;
+ lpt=fl.m_threads_info[0];
+ lpt->set_vif(&erf_vif);
+
+ if ( (op->preview.getVMode() >1) || op->preview.getFileWrite() ) {
+ lpt->generate_erf(op->out_file,op->preview);
+ }
+
+ lpt->m_node_gen.DumpHist(stdout);
+
+ uint32_t stop= os_get_time_msec();
+ printf(" d time = %ul %ul \n",stop-start,os_get_time_freq());
+ fl.Delete();
+ return (0);
+}
+
+
+
+
+
+
+
+
+int main_test(int argc , char * argv[]){
+
+ utl_termio_init();
+
+ int ret;
+ unsigned lcore_id;
+ printf("Starting T-Rex %s please wait ... \n",VERSION);
+
+ CGlobalInfo::m_options.preview.clean();
+
+ if ( parse_options(argc, argv, &CGlobalInfo::m_options,true ) != 0){
+ exit(-1);
+ }
+
+ update_global_info_from_platform_file();
+
+ /* it is not a mistake , give the user higher priorty over the configuration file */
+ parse_options(argc, argv, &CGlobalInfo::m_options ,false);
+
+
+ if ( CGlobalInfo::m_options.preview.getVMode() > 0){
+ CGlobalInfo::m_options.dump(stdout);
+ CGlobalInfo::m_memory_cfg.Dump(stdout);
+ }
+
+ update_dpdk_args();
+
+ CParserOption * po=&CGlobalInfo::m_options;
+
+
+ if ( CGlobalInfo::m_options.preview.getVMode() == 0 ) {
+ rte_set_log_level(1);
+
+ }
+ uid_t uid;
+ uid = geteuid ();
+ if ( uid != 0 ) {
+ printf("ERROR you must run with superuser priviliges \n");
+ printf("User id : %d \n",uid);
+ printf("try 'sudo' %s \n",argv[0]);
+ return (-1);
+ }
+
+
+
+ ret = rte_eal_init(global_dpdk_args_num, (char **)global_dpdk_args);
+ if (ret < 0){
+ printf(" You might need to run ./trex-cfg once \n");
+ rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
+ }
+
+
+ time_init();
+
+ /* check if we are in simulation mode */
+ if ( CGlobalInfo::m_options.out_file != "" ){
+ printf(" t-rex simulation mode into %s \n",CGlobalInfo::m_options.out_file.c_str());
+ return ( sim_load_list_of_cap_files(&CGlobalInfo::m_options) );
+ }
+
+
+ if ( !ports_cfg.Create() ){
+ exit(1);
+ }
+
+ if (po->preview.get_is_rx_check_enable() && (po->m_rx_check_sampe< get_min_sample_rate()) ) {
+ po->m_rx_check_sampe = get_min_sample_rate();
+ printf("Warning rx check sample rate should be lower than %d setting it to %d\n",get_min_sample_rate(),get_min_sample_rate());
+ }
+
+ /* set dump mode */
+ ports_cfg.m_io_modes.set_mode((CTrexGlobalIoMode::CliDumpMode)CGlobalInfo::m_options.m_io_mode);
+
+ if ( !CGlobalInfo::m_options.is_latency_disabled()
+ && (CGlobalInfo::m_options.m_latency_prev>0) ){
+ uint32_t pkts = CGlobalInfo::m_options.m_latency_prev*
+ CGlobalInfo::m_options.m_latency_rate;
+ printf("Start prev latency check - hack for Keren for %d sec \n",CGlobalInfo::m_options.m_latency_prev);
+ ports_cfg.m_mg.start(pkts);
+ printf("Delay now you can call command \n");
+ delay(CGlobalInfo::m_options.m_latency_prev* 1000);
+ printf("Finish wating \n");
+ ports_cfg.m_mg.reset();
+ ports_cfg.reset_counters();
+ }
+
+ ports_cfg.start_send_master();
+
+ // TBD remove
+ //ports_cfg.test_latency();
+ /* test seding */
+ //while (1) {
+ //}
+
+
+ /* TBD_FDIR */
+ #if 0
+ printf(" test_send \n");
+ ports_cfg.test_send();
+ while (1) {
+ delay(10000);
+ }
+ #endif
+
+
+
+
+ //ports_cfg.test_latency();
+ //return (0);
+
+
+ if ( CGlobalInfo::m_options.preview.getOnlyLatency() ){
+ rte_eal_mp_remote_launch(latency_one_lcore, NULL, CALL_MASTER);
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ if (rte_eal_wait_lcore(lcore_id) < 0)
+ return -1;
+ }
+ ports_cfg.stop_master();
+
+ return (0);
+ }
+
+ if ( CGlobalInfo::m_options.preview.getSingleCore() ) {
+ ports_cfg.run_in_core(1);
+ ports_cfg.stop_master();
+ return (0);
+ }
+
+ rte_eal_mp_remote_launch(slave_one_lcore, NULL, CALL_MASTER);
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ if (rte_eal_wait_lcore(lcore_id) < 0)
+ return -1;
+ }
+ ports_cfg.stop_master();
+ ports_cfg.Delete();
+ utl_termio_reset();
+
+ return (0);
+}
+
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////////////////////////
+// driver section
+//////////////////////////////////////////////////////////////////////////////////////////////
+
+int CTRexExtendedDriverBase1G::wait_for_stable_link(){
+ int i;
+ printf(" wait 10 sec ");
+ fflush(stdout);
+ for (i=0; i<10; i++) {
+ delay(1000);
+ printf(".");
+ fflush(stdout);
+ }
+ printf("\n");
+ fflush(stdout);
+ return(0);
+}
+
+int CTRexExtendedDriverBase1G::configure_drop_queue(CPhyEthIF * _if){
+ _if->pci_reg_write( E1000_RXDCTL(0) , 0);
+
+ /* enable filter to pass packet to rx queue 1 */
+
+ _if->pci_reg_write( E1000_IMIR(0), 0x00020000);
+
+ _if->pci_reg_write( E1000_IMIREXT(0), 0x00081000);
+
+ _if->pci_reg_write( E1000_TTQF(0), 0x00000084 /* protocol */
+ | 0x00008100 /* enable */
+ | 0xE0010000 /* RX queue is 1 */
+ );
+ return (0);
+}
+
+void CTRexExtendedDriverBase1G::update_configuration(port_cfg_t * cfg){
+
+ cfg->m_tx_conf.tx_thresh.pthresh = TX_PTHRESH_1G;
+ cfg->m_tx_conf.tx_thresh.hthresh = TX_HTHRESH;
+ cfg->m_tx_conf.tx_thresh.wthresh = 0;
+}
+
+void CTRexExtendedDriverBase1G::update_global_config_fdir(port_cfg_t * cfg){
+ cfg->update_global_config_fdir_10g_1g();
+}
+
+
+int CTRexExtendedDriverBase1G::configure_rx_filter_rules(CPhyEthIF * _if){
+
+ uint16_t hops = get_rx_check_hops();
+ uint16_t v4_hops = (hops << 8)&0xff00;
+
+ /* 16 : 12 MAC , (2)0x0800,2 | DW0 , DW1
+ 6 bytes , TTL , PROTO | DW2=0 , DW3=0x0000FF06
+ */
+ int i;
+ // IPv4: bytes being compared are {TTL, Protocol}
+ uint16_t ff_rules_v4[4]={
+ 0xFF06 - v4_hops,
+ 0xFE11 - v4_hops,
+ 0xFF11 - v4_hops,
+ 0xFE06 - v4_hops,
+ } ;
+ // IPv6: bytes being compared are {NextHdr, HopLimit}
+ uint16_t ff_rules_v6[2]={
+ 0x3CFF - hops,
+ 0x3CFE - hops,
+ } ;
+ uint16_t *ff_rules;
+ uint16_t num_rules;
+ uint32_t mask=0;
+ int rule_id;
+
+ if ( CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ){
+ ff_rules = &ff_rules_v6[0];
+ num_rules = sizeof(ff_rules_v6)/sizeof(ff_rules_v6[0]);
+ }else{
+ ff_rules = &ff_rules_v4[0];
+ num_rules = sizeof(ff_rules_v4)/sizeof(ff_rules_v4[0]);
+ }
+
+ uint8_t len = 24;
+ for (rule_id=0; rule_id<num_rules; rule_id++ ) {
+ /* clear rule all */
+ for (i=0; i<0xff; i+=4) {
+ _if->pci_reg_write( (E1000_FHFT(rule_id)+i) , 0);
+ }
+
+ if ( CGlobalInfo::m_options.preview.get_vlan_mode_enable() ){
+ len += 8;
+ if ( CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ){
+ // IPv6 VLAN: NextHdr/HopLimit offset = 0x18
+ _if->pci_reg_write( (E1000_FHFT(rule_id)+(3*16)+0) , PKT_NTOHS(ff_rules[rule_id]) );
+ _if->pci_reg_write( (E1000_FHFT(rule_id)+(3*16)+8) , 0x03); /* MASK */
+ }else{
+ // IPv4 VLAN: TTL/Protocol offset = 0x1A
+ _if->pci_reg_write( (E1000_FHFT(rule_id)+(3*16)+0) , (PKT_NTOHS(ff_rules[rule_id])<<16) );
+ _if->pci_reg_write( (E1000_FHFT(rule_id)+(3*16)+8) , 0x0C); /* MASK */
+ }
+ }else{
+ if ( CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ){
+ // IPv6: NextHdr/HopLimit offset = 0x14
+ _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)+4) , PKT_NTOHS(ff_rules[rule_id]) );
+ _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)+8) , 0x30); /* MASK */
+ }else{
+ // IPv4: TTL/Protocol offset = 0x16
+ _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)+4) , (PKT_NTOHS(ff_rules[rule_id])<<16) );
+ _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)+8) , 0xC0); /* MASK */
+ }
+ }
+
+ // FLEX_PRIO[[18:16] = 1, RQUEUE[10:8] = 1
+ _if->pci_reg_write( (E1000_FHFT(rule_id)+0xFC) , (1<<16) | (1<<8) | len);
+
+ mask |=(1<<rule_id);
+ }
+
+ /* enable all rules */
+ _if->pci_reg_write(E1000_WUFC, (mask<<16) | (1<<14) );
+}
+
+
+void CTRexExtendedDriverBase1G::get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats){
+
+ int i;
+ uint64_t t=0;
+
+ stats->ipackets += _if->pci_reg_read(E1000_GPRC) ;
+
+ stats->ibytes += (_if->pci_reg_read(E1000_GORCL) );
+ stats->ibytes += (((uint64_t)_if->pci_reg_read(E1000_GORCH))<<32);
+
+
+ stats->opackets += _if->pci_reg_read(E1000_GPTC);
+ stats->obytes += _if->pci_reg_read(E1000_GOTCL) ;
+ stats->obytes += ( (((uint64_t)_if->pci_reg_read(IXGBE_GOTCH))<<32) );
+
+ stats->f_ipackets += 0;
+ stats->f_ibytes += 0;
+
+
+ stats->ierrors += ( _if->pci_reg_read(E1000_RNBC) +
+ _if->pci_reg_read(E1000_CRCERRS) +
+ _if->pci_reg_read(E1000_ALGNERRC ) +
+ _if->pci_reg_read(E1000_SYMERRS ) +
+ _if->pci_reg_read(E1000_RXERRC ) +
+
+ _if->pci_reg_read(E1000_ROC)+
+ _if->pci_reg_read(E1000_RUC)+
+ _if->pci_reg_read(E1000_RJC) +
+
+ _if->pci_reg_read(E1000_XONRXC)+
+ _if->pci_reg_read(E1000_XONTXC)+
+ _if->pci_reg_read(E1000_XOFFRXC)+
+ _if->pci_reg_read(E1000_XOFFTXC)+
+ _if->pci_reg_read(E1000_FCRUC)
+ );
+
+ stats->oerrors += 0;
+ stats->imcasts = 0;
+ stats->rx_nombuf = 0;
+}
+
+void CTRexExtendedDriverBase1G::clear_extended_stats(CPhyEthIF * _if){
+}
+
+
+
+void CTRexExtendedDriverBase10G::clear_extended_stats(CPhyEthIF * _if){
+ _if->pci_reg_read(IXGBE_RXNFGPC);
+}
+
+void CTRexExtendedDriverBase10G::update_global_config_fdir(port_cfg_t * cfg){
+ cfg->update_global_config_fdir_10g_1g();
+}
+
+void CTRexExtendedDriverBase10G::update_configuration(port_cfg_t * cfg){
+ cfg->m_tx_conf.tx_thresh.pthresh = TX_PTHRESH;
+ cfg->m_tx_conf.tx_thresh.hthresh = TX_HTHRESH;
+ cfg->m_tx_conf.tx_thresh.wthresh = TX_WTHRESH;
+}
+
+int CTRexExtendedDriverBase10G::configure_rx_filter_rules(CPhyEthIF * _if){
+ /* 10Gb/sec 82599 */
+ uint8_t port_id=_if->get_rte_port_id();
+
+ uint16_t hops = get_rx_check_hops();
+ uint16_t v4_hops = (hops << 8)&0xff00;
+
+
+ /* set the mask only for flex-data */
+ rte_fdir_masks fdir_mask;
+ memset(&fdir_mask,0,sizeof(rte_fdir_masks));
+ fdir_mask.flexbytes=1;
+ //fdir_mask.dst_port_mask=0xffff; /* enable of
+ int res;
+ res=rte_eth_dev_fdir_set_masks(port_id,&fdir_mask);
+ if (res!=0) {
+ rte_exit(EXIT_FAILURE, " ERROR rte_eth_dev_fdir_set_masks : %d \n",res);
+ }
+
+
+ // IPv4: bytes being compared are {TTL, Protocol}
+ uint16_t ff_rules_v4[4]={
+ 0xFF11 - v4_hops,
+ 0xFE11 - v4_hops,
+ 0xFF06 - v4_hops,
+ 0xFE06 - v4_hops,
+ } ;
+ // IPv6: bytes being compared are {NextHdr, HopLimit}
+ uint16_t ff_rules_v6[4]={
+ 0x3CFF - hops,
+ 0x3CFE - hops,
+ 0x3CFF - hops,
+ 0x3CFE - hops,
+ } ;
+ const rte_l4type ff_rules_type[4]={
+ RTE_FDIR_L4TYPE_UDP,
+ RTE_FDIR_L4TYPE_UDP,
+ RTE_FDIR_L4TYPE_TCP,
+ RTE_FDIR_L4TYPE_TCP
+ } ;
+
+ uint16_t *ff_rules;
+ uint16_t num_rules;
+ int rule_id;
+
+ assert (sizeof(ff_rules_v4) == sizeof(ff_rules_v6));
+ num_rules = sizeof(ff_rules_v4)/sizeof(ff_rules_v4[0]);
+ if ( CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ){
+ ff_rules = &ff_rules_v6[0];
+ }else{
+ ff_rules = &ff_rules_v4[0];
+ }
+
+ for (rule_id=0; rule_id<num_rules; rule_id++ ) {
+
+ rte_fdir_filter fdir_filter;
+ uint16_t ff_rule = ff_rules[rule_id];
+ memset(&fdir_filter,0,sizeof(rte_fdir_filter));
+ /* TOS/PROTO */
+ if ( CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ){
+ fdir_filter.iptype = RTE_FDIR_IPTYPE_IPV6;
+ }else{
+ fdir_filter.iptype = RTE_FDIR_IPTYPE_IPV4;
+ }
+ fdir_filter.flex_bytes = PKT_NTOHS(ff_rule);
+ fdir_filter.l4type = ff_rules_type[rule_id];
+
+ res=rte_eth_dev_fdir_add_perfect_filter(port_id,
+ &fdir_filter,
+ rule_id, 1,0);
+ if (res!=0) {
+ rte_exit(EXIT_FAILURE, " ERROR rte_eth_dev_fdir_add_perfect_filter : %d\n",res);
+ }
+ }
+}
+
+int CTRexExtendedDriverBase10G::configure_drop_queue(CPhyEthIF * _if){
+
+ /* enable rule 0 SCTP -> queue 1 for latency */
+ /* 1<<21 means that queue 1 is for SCTP */
+ _if->pci_reg_write(IXGBE_L34T_IMIR(0),(1<<21));
+
+ _if->pci_reg_write(IXGBE_FTQF(0),
+ IXGBE_FTQF_PROTOCOL_SCTP|
+ (IXGBE_FTQF_PRIORITY_MASK<<IXGBE_FTQF_PRIORITY_SHIFT)|
+ ((0x0f)<<IXGBE_FTQF_5TUPLE_MASK_SHIFT)|IXGBE_FTQF_QUEUE_ENABLE);
+
+ /* disable queue zero - default all traffic will go to here and will be dropped */
+
+ _if->pci_reg_write( IXGBE_RXDCTL(0) , 0);
+ return (0);
+}
+
+void CTRexExtendedDriverBase10G::get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats){
+
+ int i;
+ uint64_t t=0;
+ for (i=0; i<8;i++) {
+ t+=_if->pci_reg_read(IXGBE_MPC(i));
+ }
+
+ stats->ipackets += _if->pci_reg_read(IXGBE_GPRC) ;
+
+ stats->ibytes += (_if->pci_reg_read(IXGBE_GORCL) +(((uint64_t)_if->pci_reg_read(IXGBE_GORCH))<<32));
+
+
+
+ stats->opackets += _if->pci_reg_read(IXGBE_GPTC);
+ stats->obytes += (_if->pci_reg_read(IXGBE_GOTCL) +(((uint64_t)_if->pci_reg_read(IXGBE_GOTCH))<<32));
+
+ stats->f_ipackets += _if->pci_reg_read(IXGBE_RXDGPC);
+ stats->f_ibytes += (_if->pci_reg_read(IXGBE_RXDGBCL) +(((uint64_t)_if->pci_reg_read(IXGBE_RXDGBCH))<<32));
+
+
+ stats->ierrors += ( _if->pci_reg_read(IXGBE_RLEC) +
+ _if->pci_reg_read(IXGBE_ERRBC) +
+ _if->pci_reg_read(IXGBE_CRCERRS) +
+ _if->pci_reg_read(IXGBE_ILLERRC ) +
+ _if->pci_reg_read(IXGBE_ROC)+
+ _if->pci_reg_read(IXGBE_RUC)+t);
+
+ stats->oerrors += 0;
+ stats->imcasts = 0;
+ stats->rx_nombuf = 0;
+
+}
+
+int CTRexExtendedDriverBase10G::wait_for_stable_link(){
+ delay(2000);
+ return (0);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+
+void CTRexExtendedDriverBase40G::clear_extended_stats(CPhyEthIF * _if){
+
+ rte_eth_stats_reset(_if->get_port_id());
+
+}
+
+void CTRexExtendedDriverBase40G::update_configuration(port_cfg_t * cfg){
+ cfg->m_tx_conf.tx_thresh.pthresh = TX_PTHRESH;
+ cfg->m_tx_conf.tx_thresh.hthresh = TX_HTHRESH;
+ cfg->m_tx_conf.tx_thresh.wthresh = TX_WTHRESH;
+ cfg->update_global_config_fdir_40g();
+}
+
+
+
+void CTRexExtendedDriverBase40G::add_rules(CPhyEthIF * _if,
+ enum rte_eth_flow_type type,
+ uint8_t ttl){
+ uint8_t port_id = _if->get_port_id();
+ int ret=rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR);
+
+ if ( ret !=0 ){
+ rte_exit(EXIT_FAILURE, "rte_eth_dev_filter_supported "
+ "err=%d, port=%u \n",
+ ret, port_id);
+ }
+
+ struct rte_eth_fdir_filter filter;
+
+ memset(&filter,0,sizeof(struct rte_eth_fdir_filter));
+
+ filter.action.rx_queue =1;
+ filter.action.behavior =RTE_ETH_FDIR_ACCEPT;
+ filter.action.report_status =RTE_ETH_FDIR_NO_REPORT_STATUS;
+ filter.soft_id=0;
+
+ filter.input.flow_type = type;
+ filter.input.ttl=ttl;
+
+ /* any SCTP move to queue number 1 */
+ ret=rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
+ RTE_ETH_FILTER_ADD, (void*)&filter);
+
+ if ( ret !=0 ){
+ rte_exit(EXIT_FAILURE, "rte_eth_dev_filter_ctrl"
+ "err=%d, port=%u \n",
+ ret, port_id);
+ }
+}
+
+
+int CTRexExtendedDriverBase40G::configure_rx_filter_rules(CPhyEthIF * _if){
+ uint16_t hops = get_rx_check_hops();
+ int i;
+ for (i=0; i<2; i++) {
+ uint8_t ttl=0xff-i-hops;
+ add_rules(_if,RTE_ETH_FLOW_TYPE_UDPV4,ttl);
+ add_rules(_if,RTE_ETH_FLOW_TYPE_TCPV4,ttl);
+ add_rules(_if,RTE_ETH_FLOW_TYPE_UDPV6,ttl);
+ add_rules(_if,RTE_ETH_FLOW_TYPE_TCPV6,ttl);
+ }
+}
+
+
+int CTRexExtendedDriverBase40G::configure_drop_queue(CPhyEthIF * _if){
+
+ add_rules(_if,RTE_ETH_FLOW_TYPE_SCTPV4,0);
+ return (0);
+}
+
+void CTRexExtendedDriverBase40G::get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats){
+
+ struct rte_eth_stats stats1;
+ rte_eth_stats_get(_if->get_port_id(), &stats1);
+
+
+ stats->ipackets = stats1.ipackets;
+ stats->ibytes = stats1.ibytes;
+
+ stats->opackets = stats1.opackets;
+ stats->obytes = stats1.obytes;
+
+ stats->f_ipackets = 0;
+ stats->f_ibytes = 0;
+
+
+ stats->ierrors = stats1.ierrors + stats1.imissed + stats1.ibadcrc +
+ stats1.ibadlen +
+ stats1.ierrors +
+ stats1.oerrors +
+ stats1.imcasts +
+ stats1.rx_nombuf +
+ stats1.tx_pause_xon +
+ stats1.rx_pause_xon +
+ stats1.tx_pause_xoff+
+ stats1.rx_pause_xoff ;
+
+
+ stats->oerrors = stats1.oerrors;;
+ stats->imcasts = 0;
+ stats->rx_nombuf = stats1.rx_nombuf;
+
+}
+
+int CTRexExtendedDriverBase40G::wait_for_stable_link(){
+ delay(2000);
+ return (0);
+}
+
+/////////////////////////////////////////////////////////////////////
+
+
+void CTRexExtendedDriverBase1GVm::update_configuration(port_cfg_t * cfg){
+ struct rte_eth_dev_info dev_info;
+ rte_eth_dev_info_get((uint8_t) 0,&dev_info);
+
+ cfg->m_tx_conf.tx_thresh.pthresh = TX_PTHRESH_1G;
+ cfg->m_tx_conf.tx_thresh.hthresh = TX_HTHRESH;
+ cfg->m_tx_conf.tx_thresh.wthresh = 0;
+ cfg->m_tx_conf.txq_flags=dev_info.default_txconf.txq_flags;
+
+}
+
+
+int CTRexExtendedDriverBase1GVm::configure_rx_filter_rules(CPhyEthIF * _if){
+ return (0);
+}
+
+void CTRexExtendedDriverBase1GVm::clear_extended_stats(CPhyEthIF * _if){
+
+ rte_eth_stats_reset(_if->get_port_id());
+
+}
+
+int CTRexExtendedDriverBase1GVm::configure_drop_queue(CPhyEthIF * _if){
+
+
+ return (0);
+}
+
+void CTRexExtendedDriverBase1GVm::get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats){
+
+ struct rte_eth_stats stats1;
+ rte_eth_stats_get(_if->get_port_id(), &stats1);
+
+
+ stats->ipackets = stats1.ipackets;
+ stats->ibytes = stats1.ibytes;
+
+ stats->opackets = stats1.opackets;
+ stats->obytes = stats1.obytes;
+
+ stats->f_ipackets = 0;
+ stats->f_ibytes = 0;
+
+
+ stats->ierrors = stats1.ierrors + stats1.imissed + stats1.ibadcrc +
+ stats1.ibadlen +
+ stats1.ierrors +
+ stats1.oerrors +
+ stats1.imcasts +
+ stats1.rx_nombuf +
+ stats1.tx_pause_xon +
+ stats1.rx_pause_xon +
+ stats1.tx_pause_xoff+
+ stats1.rx_pause_xoff ;
+
+
+ stats->oerrors = stats1.oerrors;;
+ stats->imcasts = 0;
+ stats->rx_nombuf = stats1.rx_nombuf;
+
+}
+
+int CTRexExtendedDriverBase1GVm::wait_for_stable_link(){
+ delay(10);
+ return (0);
+}
+
+
+
+/**
+ * convert chain of mbuf to one big mbuf
+ *
+ * @param m
+ *
+ * @return
+ */
+struct rte_mbuf * rte_mbuf_convert_to_one_seg(struct rte_mbuf *m){
+ unsigned int len;
+ struct rte_mbuf * r;
+ struct rte_mbuf * old_m;
+ old_m=m;
+
+ len=rte_pktmbuf_pkt_len(m);
+ /* allocate one big mbuf*/
+ r = CGlobalInfo::pktmbuf_alloc(0,len);
+ assert(r);
+ if (r==0) {
+ rte_pktmbuf_free(m);
+ return(r);
+ }
+ char *p=rte_pktmbuf_append(r,len);
+
+ while ( m ) {
+ len = m->data_len;
+ assert(len);
+ memcpy(p,(char *)m->buf_addr, len);
+ p+=len;
+ m = m->next;
+ }
+ rte_pktmbuf_free(old_m);
+ return(r);
+}
+
+
diff --git a/src/msg_manager.cpp b/src/msg_manager.cpp
new file mode 100755
index 00000000..4db96583
--- /dev/null
+++ b/src/msg_manager.cpp
@@ -0,0 +1,98 @@
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "msg_manager.h"
+#include "bp_sim.h"
+#include <stdio.h>
+#include <string>
+
+/*TBD: need to fix socket_id for NUMA */
+
+bool CMessagingManager::Create(uint8_t num_dp_threads){
+ m_num_dp_threads=num_dp_threads;
+ assert(m_dp_to_cp==0);
+ assert(m_cp_to_dp==0);
+ m_cp_to_dp = new CNodeRing[num_dp_threads] ;
+ m_dp_to_cp = new CNodeRing[num_dp_threads];
+ int i;
+ for (i=0; i<num_dp_threads; i++) {
+ CNodeRing * lp;
+ char name[100];
+
+ lp=getRingCpToDp(i);
+ sprintf(name,"cp_to_dp_%d",i);
+ assert(lp->Create(std::string(name),1024,0)==true);
+
+ lp=getRingDpToCp(i);
+ sprintf(name,"dp_to_cp_%d",i);
+ assert(lp->Create(std::string(name),1024,0)==true);
+
+ }
+ assert(m_dp_to_cp);
+ assert(m_cp_to_dp);
+ return (true);
+}
+void CMessagingManager::Delete(){
+ if (m_dp_to_cp) {
+ m_dp_to_cp->Delete();
+ delete []m_dp_to_cp;
+ }
+ if (m_cp_to_dp) {
+ m_cp_to_dp->Delete();
+ delete []m_cp_to_dp;
+ }
+
+}
+
+CNodeRing * CMessagingManager::getRingCpToDp(uint8_t thread_id){
+ assert(thread_id<m_num_dp_threads);
+ return (&m_cp_to_dp[thread_id]);
+}
+
+CNodeRing * CMessagingManager::getRingDpToCp(uint8_t thread_id){
+ assert(thread_id<m_num_dp_threads);
+ return (&m_dp_to_cp[thread_id]);
+
+}
+
+
+void CMsgIns::Free(){
+ if (m_ins) {
+ delete m_ins;
+ }
+}
+
+CMsgIns * CMsgIns::Ins(void){
+ if (!m_ins) {
+ m_ins= new CMsgIns();
+ }
+ assert(m_ins);
+ return (m_ins);
+}
+
+bool CMsgIns::Create(uint8_t num_threads){
+ return ( m_rx_dp.Create(num_threads) );
+}
+
+
+CMsgIns * CMsgIns::m_ins=0;
+
+
diff --git a/src/msg_manager.h b/src/msg_manager.h
new file mode 100755
index 00000000..b25660bb
--- /dev/null
+++ b/src/msg_manager.h
@@ -0,0 +1,109 @@
+#ifndef CMSG_MANAGER_H
+#define CMSG_MANAGER_H
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+#include "CRing.h"
+
+
+/* messages from CP->DP Ids */
+
+#define NAT_MSG (7)
+#define LATENCY_PKT_SEND_MSG (8)
+
+/*
+
+e.g DP with 4 threads
+will look like this
+
+ cp_to_dp
+
+ master :push
+ dpx : pop
+
+ - --> dp0
+cp - --> dp1
+ - --> dp2
+ - --> dp3
+
+ dp_to_cp
+
+ cp : pop
+ dpx : push
+
+
+ <- -- dp0
+cp <- -- dp1
+ <- -- dp2
+ <- -- dp3
+
+
+*/
+
+class CGenNode ;
+typedef CTRingSp<CGenNode> CNodeRing;
+
+/* CP == latency thread
+ DP == traffic pkt generator */
+class CMessagingManager {
+public:
+ CMessagingManager(){
+ m_cp_to_dp=0;
+ m_dp_to_cp=0;
+ m_num_dp_threads=0;
+ }
+ bool Create(uint8_t num_dp_threads);
+ void Delete();
+ CNodeRing * getRingCpToDp(uint8_t thread_id);
+ CNodeRing * getRingDpToCp(uint8_t thread_id);
+ uint8_t get_num_threads(){
+ return (m_num_dp_threads);
+ }
+private:
+ CNodeRing * m_cp_to_dp;
+ CNodeRing * m_dp_to_cp;
+ uint8_t m_num_dp_threads;
+};
+
+
+class CMsgIns {
+public:
+ static CMsgIns * Ins();
+ static void Free();
+ bool Create(uint8_t num_threads);
+public:
+ CMessagingManager * getRxDp(){
+ return (&m_rx_dp);
+ }
+ uint8_t get_num_threads(){
+ return (m_rx_dp.get_num_threads());
+ }
+
+private:
+ CMessagingManager m_rx_dp;
+
+private:
+ /* one instance */
+ static CMsgIns * m_ins;
+};
+
+#endif
diff --git a/src/nat_check.cpp b/src/nat_check.cpp
new file mode 100755
index 00000000..676c1292
--- /dev/null
+++ b/src/nat_check.cpp
@@ -0,0 +1,197 @@
+#include <stdint.h>
+#include "nat_check.h"
+#include "bp_sim.h"
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+void CGenNodeNatInfo::dump(FILE *fd){
+
+ fprintf(fd," msg_type : %d \n",m_msg_type);
+ int i;
+ for (i=0; i<m_cnt; i++) {
+ CNatFlowInfo * lp=&m_data[i];
+ fprintf (fd," id:%d , external ip:%08x:%x , ex_port: %04x , fid: %d \n",i,lp->m_external_ip,lp->m_external_ip_server,lp->m_external_port,lp->m_fid);
+ }
+}
+
+void CGenNodeNatInfo::init(){
+ m_msg_type= CGenNodeMsgBase::NAT_FIRST;
+ m_pad=0;
+ m_cnt=0;
+}
+
+
+void CNatStats::reset(){
+ m_total_rx=0;
+ m_total_msg=0;
+ m_err_no_valid_thread_id=0;
+ m_err_no_valid_proto=0;
+ m_err_queue_full=0;
+}
+
+
+CNatPerThreadInfo * CNatRxManager::get_thread_info(uint8_t thread_id){
+ if (thread_id<m_max_threads) {
+ return (&m_per_thread[thread_id]);
+ }
+ m_stats.m_err_no_valid_thread_id++;
+ return (0);
+}
+
+
+bool CNatRxManager::Create(){
+
+ m_max_threads = CMsgIns::Ins()->get_num_threads() ;
+ m_per_thread = new CNatPerThreadInfo[m_max_threads];
+ CMessagingManager * lpm=CMsgIns::Ins()->getRxDp();
+
+ int i;
+ for (i=0; i<m_max_threads; i++) {
+ m_per_thread[i].m_ring=lpm->getRingCpToDp(i);
+ assert(m_per_thread[i].m_ring);
+ }
+
+
+
+ return (true);
+}
+
+void CNatRxManager::Delete(){
+ if (m_per_thread) {
+ delete m_per_thread;
+ m_per_thread=0;
+ }
+}
+
+void delay(int msec);
+
+
+
+/* check this every 1msec */
+void CNatRxManager::handle_aging(){
+ int i;
+ dsec_t now=now_sec();
+ for (i=0; i<m_max_threads; i++) {
+ CNatPerThreadInfo * thread_info=get_thread_info( i );
+ if ( thread_info->m_cur_nat_msg ){
+ if ( now - thread_info->m_last_time > MAX_TIME_MSG_IN_QUEUE_SEC ){
+ flush_node(thread_info);
+ }
+ }
+ }
+}
+
+void CNatRxManager::flush_node(CNatPerThreadInfo * thread_info){
+ // try send
+ int cnt=0;
+ while (true) {
+ if ( thread_info->m_ring->Enqueue((CGenNode*)thread_info->m_cur_nat_msg) == 0 ){
+ #ifdef NAT_TRACE_
+ printf("send message \n");
+ #endif
+ break;
+ }
+ m_stats.m_err_queue_full++;
+ delay(1);
+ cnt++;
+ if (cnt>10) {
+ printf(" ERROR queue from rx->dp is stuck, somthing is wrong here \n");
+ exit(1);
+ }
+ }
+ /* msg will be free by sink */
+ thread_info->m_cur_nat_msg=0;
+}
+
+
+void CNatRxManager::handle_packet_ipv4(CNatOption * option,
+ IPHeader * ipv4){
+
+ CNatPerThreadInfo * thread_info=get_thread_info(option->get_thread_id());
+ if (!thread_info) {
+ return;
+ }
+ /* Extract info from the packet ! */
+ uint32_t ext_ip = ipv4->getSourceIp();
+ uint32_t ext_ip_server = ipv4->getDestIp();
+ uint8_t proto = ipv4->getProtocol();
+ /* must be TCP/UDP this is the only supported proto */
+ if (!( (proto==6) || (proto==17) )){
+ m_stats.m_err_no_valid_proto++;
+ return;
+ }
+ /* we support only TCP/UDP so take the source port , post IP header */
+ UDPHeader * udp= (UDPHeader *) (((char *)ipv4)+ ipv4->getHeaderLength());
+ uint16_t ext_port = udp->getSourcePort();
+ #ifdef NAT_TRACE_
+ printf("rx msg ext ip : %08x:%08x ext port : %04x flow_id : %d \n",ext_ip,ext_ip_server,ext_port,option->get_fid());
+ #endif
+
+
+ CGenNodeNatInfo * node=thread_info->m_cur_nat_msg;
+ if ( !node ){
+ node = (CGenNodeNatInfo * )CGlobalInfo::create_node();
+ assert(node);
+ node->init();
+ thread_info->m_cur_nat_msg = node;
+ thread_info->m_last_time = now_sec();
+ }
+ /* get message */
+ CNatFlowInfo * msg=node->get_next_msg();
+
+ /* fill the message */
+ msg->m_external_ip = ext_ip;
+ msg->m_external_ip_server = ext_ip_server;
+ msg->m_external_port = ext_port;
+ msg->m_fid = option->get_fid();
+ msg->m_pad = 0xee;
+
+ if ( node->is_full() ){
+ flush_node(thread_info);
+ }
+}
+
+
+#define MYDP(f) if (f) fprintf(fd," %-40s: %llu \n",#f,f)
+#define MYDP_A(f) fprintf(fd," %-40s: %llu \n",#f,f)
+
+
+
+void CNatStats::Dump(FILE *fd){
+ MYDP(m_total_rx);
+ MYDP(m_total_msg);
+ MYDP(m_err_no_valid_thread_id);
+ MYDP(m_err_no_valid_proto);
+ MYDP(m_err_queue_full);
+}
+
+
+void CNatRxManager::Dump(FILE *fd){
+ m_stats.Dump(stdout);
+}
+
+void CNatRxManager::DumpShort(FILE *fd){
+ fprintf(fd,"nat check msgs: %lu, errors: %lu \n",m_stats.m_total_msg,m_stats.get_errs() );
+}
+
+
+
diff --git a/src/nat_check.h b/src/nat_check.h
new file mode 100755
index 00000000..b67c523c
--- /dev/null
+++ b/src/nat_check.h
@@ -0,0 +1,164 @@
+#ifndef NAT_CHECK_H
+#define NAT_CHECK_H
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "rx_check_header.h"
+#include "msg_manager.h"
+#include <common/Network/Packet/TcpHeader.h>
+#include <common/Network/Packet/UdpHeader.h>
+#include <common/Network/Packet/IPHeader.h>
+#include <common/Network/Packet/IPv6Header.h>
+#include <common/Network/Packet/EthernetHeader.h>
+
+
+
+// 2msec timeout
+#define MAX_TIME_MSG_IN_QUEUE_SEC ( 0.002 )
+
+struct CNatFlowInfo {
+ uint32_t m_external_ip;
+ uint32_t m_external_ip_server;
+ uint32_t m_fid;
+ uint16_t m_external_port;
+ uint16_t m_pad;
+};
+
+#if __x86_64__
+/* size of 64 bytes */
+ #define MAX_NAT_FLOW_INFO (7)
+ #define MAX_PKT_MSG_INFO (26)
+
+#else
+ #define MAX_NAT_FLOW_INFO (8)
+ #define MAX_PKT_MSG_INFO (30)
+#endif
+
+
+/*
+ !!! WARNING - CGenNodeNatInfo !!
+
+ this struct should be in the same size of CGenNode beacuse allocator is global .
+
+*/
+struct CGenNodeMsgBase {
+ enum {
+ NAT_FIRST = NAT_MSG,
+ LATENCY_PKT = LATENCY_PKT_SEND_MSG
+ } msg_types;
+
+public:
+ uint8_t m_msg_type; /* msg type */
+};
+
+
+struct CGenNodeNatInfo : public CGenNodeMsgBase {
+ uint8_t m_pad;
+ uint16_t m_cnt;
+ //uint32_t m_pad2;
+ #if __x86_64__
+ uint32_t m_pad3;
+ #endif
+ CNatFlowInfo m_data[MAX_NAT_FLOW_INFO];
+
+public:
+ CNatFlowInfo * get_next_msg(){
+ CNatFlowInfo * lp=&m_data[m_cnt];
+ m_cnt++;
+ return (lp);
+ }
+
+ void init();
+
+ bool is_full(){
+ return (m_cnt==MAX_NAT_FLOW_INFO?true:false);
+ }
+ void dump(FILE *fd);
+};
+
+struct CGenNodeLatencyPktInfo : public CGenNodeMsgBase {
+ uint8_t m_dir;
+ uint16_t m_latency_offset;
+ #if __x86_64__
+ uint32_t m_pad3;
+ #endif
+ struct rte_mbuf * m_pkt;
+
+ uint32_t m_pad4[MAX_PKT_MSG_INFO];
+};
+
+
+/* per thread ring info for NAT messages
+ try to put as many messages */
+class CNatPerThreadInfo {
+public:
+ CNatPerThreadInfo(){
+ m_last_time=0;
+ m_cur_nat_msg=0;
+ m_ring=0;
+ }
+public:
+ dsec_t m_last_time;
+ CGenNodeNatInfo * m_cur_nat_msg;
+ CNodeRing * m_ring;
+};
+
+
+class CNatStats {
+public:
+ void reset();
+public:
+ uint64_t m_total_rx;
+ uint64_t m_total_msg;
+ /* errors */
+ uint64_t m_err_no_valid_thread_id;
+ uint64_t m_err_no_valid_proto;
+ uint64_t m_err_queue_full;
+public:
+ uint64_t get_errs(){
+ return (m_err_no_valid_thread_id+m_err_no_valid_proto+m_err_queue_full);
+ }
+ void Dump(FILE *fd);
+};
+
+
+class CNatRxManager {
+
+public:
+ bool Create();
+ void Delete();
+ void handle_packet_ipv4(CNatOption * option,
+ IPHeader * ipv4);
+ void handle_aging();
+ void Dump(FILE *fd);
+ void DumpShort(FILE *fd);
+private:
+ CNatPerThreadInfo * get_thread_info(uint8_t thread_id);
+ void flush_node(CNatPerThreadInfo * thread_info);
+
+private:
+ uint8_t m_max_threads;
+ CNatPerThreadInfo * m_per_thread;
+ CNatStats m_stats;
+};
+
+
+#endif
diff --git a/src/os_time.cpp b/src/os_time.cpp
new file mode 100755
index 00000000..706ab4d8
--- /dev/null
+++ b/src/os_time.cpp
@@ -0,0 +1,125 @@
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+#include "os_time.h"
+#include <stdio.h>
+hr_time_t start_time;
+
+#ifdef WIN32
+
+#include <windows.h>
+uint32_t os_get_time_msec(){
+ return (GetTickCount());
+}
+uint32_t os_get_time_freq(){
+ return (1000);
+}
+
+
+typedef union {
+ struct {
+ uint32_t low;
+ uint32_t high;
+ } h;
+ hr_time_t x;
+} timer_hl_t;
+
+//hr_time_t os_get_hr_freq(void);
+
+hr_time_t os_get_hr_freq(void){
+ return (3000000000);
+}
+
+
+hr_time_t os_get_hr_tick_64(void) {
+ uint32_t _low,_high;
+ __asm {
+ mov ecx,0 ;select Counter0
+
+ _emit 0x0F ;RDPMC - get beginning value of Counter0 to edx:eax
+ _emit 0x31
+
+ mov _high,edx ;save beginning value
+ mov _low,eax
+ }
+
+ timer_hl_t x;
+
+ x.h.low = _low;
+ x.h.high = _high;
+
+ return x.x;
+}
+
+uint32_t os_get_hr_tick_32(void) {
+ uint32_t _low,_high;
+ __asm {
+ mov ecx,0 ;select Counter0
+
+ _emit 0x0F ;RDPMC - get beginning value of Counter0 to edx:eax
+ _emit 0x31
+
+ mov _high,edx ;save beginning value
+ mov _low,eax
+ }
+ return _low;
+}
+
+#else
+
+
+#include <time.h>
+#include <sys/times.h>
+#include <unistd.h>
+
+// Returns time in milliseconds...
+uint32_t SANB_tickGet()
+{
+ struct tms buffer; // we don't really use that
+ clock_t ticks = times(&buffer);
+ return (uint32_t)ticks;
+}
+
+// ... so rate is 1000.
+int SANB_sysClkRateGet()
+{
+ int rate = sysconf(_SC_CLK_TCK);
+ if (rate == -1)
+ {
+ fprintf(stderr,"SANB_sysClkRateGet, sysconf FAILED, something very basic is wrong....!\n");
+ }
+ return rate;
+}
+
+uint32_t os_get_time_msec(){
+ return (SANB_tickGet());
+}
+
+uint32_t os_get_time_freq(){
+ return (SANB_sysClkRateGet());
+}
+
+
+
+#endif
+
+
diff --git a/src/os_time.h b/src/os_time.h
new file mode 100755
index 00000000..153ee3e3
--- /dev/null
+++ b/src/os_time.h
@@ -0,0 +1,134 @@
+#ifndef OS_TIME_H
+#define OS_TIME_H
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include <stdint.h>
+
+typedef uint64_t hr_time_t; // time in high res tick
+typedef uint32_t hr_time_32_t; // time in high res tick
+typedef double dsec_t; //time in sec double
+
+uint32_t os_get_time_msec();
+uint32_t os_get_time_freq();
+
+
+
+#ifdef LINUX
+
+#ifdef RTE_DPDK
+
+
+//extern "C" uint64_t rte_get_hpet_hz(void);
+
+#include "rte_cycles.h"
+
+static inline hr_time_t os_get_hr_tick_64(void){
+ return (rte_rdtsc());
+}
+
+static inline hr_time_32_t os_get_hr_tick_32(void){
+ return ( (uint32_t)os_get_hr_tick_64());
+}
+
+static inline hr_time_t os_get_hr_freq(void){
+ return (rte_get_tsc_hz() );
+}
+
+
+
+#else
+
+
+/* read the rdtsc register for ticks
+ works for 64 bit aswell
+*/
+static inline void platform_time_get_highres_tick_64(uint64_t* t)
+{
+ uint32_t lo, hi;
+ __asm__ __volatile__ ( // serialize
+ "xorl %%eax,%%eax \n cpuid"
+ ::: "%rax", "%rbx", "%rcx", "%rdx");
+ /* We cannot use "=A", since this would use %rax on x86_64 and return
+ only the lower 32bits of the TSC */
+ __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
+ *t = (uint64_t)hi << 32 | lo;
+}
+
+static inline uint32_t platform_time_get_highres_tick_32()
+{
+ uint64_t t;
+ platform_time_get_highres_tick_64(&t);
+ return ((uint32_t)t);
+}
+
+
+static inline hr_time_t os_get_hr_freq(void){
+ return (3000000000ULL);
+}
+
+static inline hr_time_t os_get_hr_tick_64(void) {
+ hr_time_t res;
+ platform_time_get_highres_tick_64(&res);
+ return (res);
+}
+
+static inline uint32_t os_get_hr_tick_32(void) {
+ return (platform_time_get_highres_tick_32());
+}
+
+#endif
+
+#else
+
+hr_time_t os_get_hr_tick_64(void);
+hr_time_32_t os_get_hr_tick_32(void);
+hr_time_t os_get_hr_freq(void);
+#endif
+
+
+/* convert delta time */
+static inline hr_time_t ptime_convert_dsec_hr(dsec_t dsec){
+ return((hr_time_t)(dsec*(double)os_get_hr_freq()) );
+}
+
+/* convert delta time */
+static inline dsec_t ptime_convert_hr_dsec(hr_time_t hrt){
+ return ((dsec_t)((double)hrt/(double)os_get_hr_freq() ));
+}
+
+
+extern hr_time_t start_time;
+
+static inline void time_init(){
+ start_time=os_get_hr_tick_64();
+}
+
+/* should be fixed , need to move to high rez tick */
+static inline dsec_t now_sec(void){
+ hr_time_t d=os_get_hr_tick_64() - start_time;
+ return ( ptime_convert_hr_dsec(d) );
+}
+
+
+
+
+#endif
diff --git a/src/pal/linux/CRing.h b/src/pal/linux/CRing.h
new file mode 100755
index 00000000..cf69422e
--- /dev/null
+++ b/src/pal/linux/CRing.h
@@ -0,0 +1,98 @@
+#ifndef C_RING_H
+#define C_RING_H
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+#include <assert.h>
+#include <stdint.h>
+#include <string>
+#include <queue>
+
+
+
+typedef std::queue<void *> my_stl_queue_t;
+
+class CRingSp {
+public:
+ CRingSp(){
+ m_queue=0;
+ }
+
+ bool Create(std::string name,
+ uint16_t cnt,
+ int socket_id){
+ m_queue = new my_stl_queue_t();
+ assert(m_queue);
+ return(true);
+ }
+
+ void Delete(void){
+ if (m_queue) {
+ delete m_queue ;
+ m_queue=0;
+ }
+ }
+
+ int Enqueue(void *obj){
+ m_queue->push(obj);
+ return (0);
+ }
+
+ int Dequeue(void * & obj){
+ if ( !m_queue->empty() ){
+ obj= m_queue->front();
+ m_queue->pop();
+ return (0);
+ }else{
+ return (1);
+ }
+ }
+
+ bool isFull(void){
+ return (false);
+ }
+
+ bool isEmpty(void){
+ return ( m_queue->empty() ?true:false);
+ }
+
+private:
+ my_stl_queue_t * m_queue;
+};
+
+template <class T>
+class CTRingSp : public CRingSp {
+public:
+ int Enqueue(T *obj){
+ return ( CRingSp::Enqueue((void*)obj) );
+ }
+
+ int Dequeue(T * & obj){
+ return (CRingSp::Dequeue(*((void **)&obj)));
+ }
+};
+
+
+
+
+#endif
+
diff --git a/src/pal/linux/mbuf.cpp b/src/pal/linux/mbuf.cpp
new file mode 100755
index 00000000..7eca8fd5
--- /dev/null
+++ b/src/pal/linux/mbuf.cpp
@@ -0,0 +1,425 @@
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "mbuf.h"
+#include <stdio.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include "sanb_atomic.h"
+
+
+#define RTE_MBUF_TO_BADDR(mb) (((struct rte_mbuf *)(mb)) + 1)
+#define RTE_MBUF_FROM_BADDR(ba) (((struct rte_mbuf *)(ba)) - 1)
+
+
+void rte_pktmbuf_detach(struct rte_mbuf *m);
+
+
+
+void utl_rte_check(rte_mempool_t * mp){
+ assert(mp->magic == MAGIC0);
+ assert(mp->magic2 == MAGIC2);
+}
+
+void utl_rte_pktmbuf_check(struct rte_mbuf *m){
+ utl_rte_check(m->pool);
+ assert(m->magic == MAGIC0);
+ assert(m->magic2== MAGIC2);
+}
+
+rte_mempool_t * utl_rte_mempool_create_non_pkt(const char *name,
+ unsigned n,
+ unsigned elt_size,
+ unsigned cache_size,
+ uint32_t _id ,
+ int socket_id){
+ rte_mempool_t * p=new rte_mempool_t();
+ assert(p);
+ p->elt_size =elt_size;
+ p->size=n;
+ p->magic=MAGIC0;
+ p->magic2=MAGIC2;
+ return p;
+}
+
+rte_mempool_t * utl_rte_mempool_create(const char *name,
+ unsigned n,
+ unsigned elt_size,
+ unsigned cache_size,
+ uint32_t _id,
+ int socket_id
+ ){
+
+ rte_mempool_t * p=new rte_mempool_t();
+ assert(p);
+ p->size=n;
+ p->elt_size =elt_size;
+ p->magic=MAGIC0;
+ p->magic2=MAGIC2;
+ return p;
+}
+
+
+uint16_t rte_mbuf_refcnt_update(rte_mbuf_t *m, int16_t value)
+{
+ utl_rte_pktmbuf_check(m);
+ uint32_t a=sanb_atomic_add_return_32_old(&m->refcnt_reserved,1);
+ return (a);
+}
+
+
+
+
+void rte_pktmbuf_reset(struct rte_mbuf *m)
+{
+ utl_rte_pktmbuf_check(m);
+ m->next = NULL;
+ m->pkt_len = 0;
+ m->nb_segs = 1;
+ m->in_port = 0xff;
+ m->refcnt_reserved=1;
+
+ #if RTE_PKTMBUF_HEADROOM > 0
+ m->data_off = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
+ RTE_PKTMBUF_HEADROOM : m->buf_len;
+ #else
+ m->data_off = RTE_PKTMBUF_HEADROOM ;
+ #endif
+
+ m->data_len = 0;
+}
+
+
+rte_mbuf_t *rte_pktmbuf_alloc(rte_mempool_t *mp){
+
+ uint16_t buf_len;
+
+ utl_rte_check(mp);
+
+ buf_len = mp->elt_size ;
+
+ rte_mbuf_t *m =(rte_mbuf_t *)malloc(buf_len );
+ assert(m);
+
+ m->magic = MAGIC0;
+ m->magic2 = MAGIC2;
+ m->pool = mp;
+ m->refcnt_reserved =0;
+
+ m->buf_len = buf_len;
+ m->buf_addr =(char *)((char *)m+sizeof(rte_mbuf_t)+RTE_PKTMBUF_HEADROOM) ;
+
+ rte_pktmbuf_reset(m);
+ return (m);
+}
+
+
+
+void rte_pktmbuf_free_seg(rte_mbuf_t *m){
+
+ utl_rte_pktmbuf_check(m);
+ uint32_t old=sanb_atomic_dec2zero32(&m->refcnt_reserved);
+ if (old == 1) {
+ struct rte_mbuf *md = RTE_MBUF_FROM_BADDR(m->buf_addr);
+
+ if ( md != m ) {
+ rte_pktmbuf_detach(m);
+ if (rte_mbuf_refcnt_update(md, -1) == 0)
+ free(md);
+ }
+
+ free(m);
+ }
+}
+
+
+
+void rte_pktmbuf_free(rte_mbuf_t *m){
+
+ rte_mbuf_t *m_next;
+
+ utl_rte_pktmbuf_check(m);
+
+ while (m != NULL) {
+ m_next = m->next;
+ rte_pktmbuf_free_seg(m);
+ m = m_next;
+ }
+}
+
+static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
+{
+ struct rte_mbuf *m2 = (struct rte_mbuf *)m;
+ utl_rte_pktmbuf_check(m);
+
+
+ while (m2->next != NULL)
+ m2 = m2->next;
+ return m2;
+}
+
+static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
+{
+ return m->data_off;
+}
+
+static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
+{
+ return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
+ m->data_len);
+}
+
+
+char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
+{
+ void *tail;
+ struct rte_mbuf *m_last;
+ utl_rte_pktmbuf_check(m);
+
+
+ m_last = rte_pktmbuf_lastseg(m);
+ if (len > rte_pktmbuf_tailroom(m_last))
+ return NULL;
+
+ tail = (char*) m_last->buf_addr + m_last->data_len;
+ m_last->data_len = (uint16_t)(m_last->data_len + len);
+ m->pkt_len = (m->pkt_len + len);
+ return (char*) tail;
+}
+
+
+char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
+{
+ utl_rte_pktmbuf_check(m);
+
+ if (len > m->data_len)
+ return NULL;
+
+ m->data_len = (uint16_t)(m->data_len - len);
+ m->data_off += len;
+ m->pkt_len = (m->pkt_len - len);
+ return (char *)m->buf_addr + m->data_off;
+}
+
+
+int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
+{
+ struct rte_mbuf *m_last;
+ utl_rte_pktmbuf_check(m);
+
+ m_last = rte_pktmbuf_lastseg(m);
+ if (len > m_last->data_len)
+ return -1;
+
+ m_last->data_len = (uint16_t)(m_last->data_len - len);
+ m->pkt_len = (m->pkt_len - len);
+ return 0;
+}
+
+
+static void
+rte_pktmbuf_hexdump(const void *buf, unsigned int len)
+{
+ unsigned int i, out, ofs;
+ const unsigned char *data = (unsigned char *)buf;
+#define LINE_LEN 80
+ char line[LINE_LEN];
+
+ printf(" dump data at 0x%p, len=%u\n", data, len);
+ ofs = 0;
+ while (ofs < len) {
+ out = snprintf(line, LINE_LEN, " %08X", ofs);
+ for (i = 0; ofs+i < len && i < 16; i++)
+ out += snprintf(line+out, LINE_LEN - out, " %02X",
+ data[ofs+i]&0xff);
+ for (; i <= 16; i++)
+ out += snprintf(line+out, LINE_LEN - out, " ");
+ for (i = 0; ofs < len && i < 16; i++, ofs++) {
+ unsigned char c = data[ofs];
+ if (!isascii(c) || !isprint(c))
+ c = '.';
+ out += snprintf(line+out, LINE_LEN - out, "%c", c);
+ }
+ printf("%s\n", line);
+ }
+}
+
+
+int rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p){
+ utl_rte_check(mp);
+ uint16_t buf_len;
+ buf_len = mp->elt_size ;
+ *obj_p=(void *)::malloc(buf_len);
+ return (0);
+}
+
+void rte_mempool_sp_put(struct rte_mempool *mp, void *obj){
+ free(obj);
+}
+
+
+void rte_exit(int exit_code, const char *format, ...){
+ exit(exit_code);
+}
+
+
+
+void
+rte_pktmbuf_dump(const struct rte_mbuf *m, unsigned dump_len)
+{
+ unsigned int len;
+ unsigned nb_segs;
+
+
+ printf("dump mbuf at 0x%p, phys=0x%p, buf_len=%u\n",
+ m, m->buf_addr, (unsigned)m->buf_len);
+ printf(" pkt_len=%u, nb_segs=%u, "
+ "in_port=%u\n", m->pkt_len,
+ (unsigned)m->nb_segs, (unsigned)m->in_port);
+ nb_segs = m->nb_segs;
+
+ while (m && nb_segs != 0) {
+
+ printf(" segment at 0x%p, data=0x%p, data_len=%u\n",
+ m, m->buf_addr, (unsigned)m->data_len);
+ len = dump_len;
+ if (len > m->data_len)
+ len = m->data_len;
+ if (len != 0)
+ rte_pktmbuf_hexdump(m->buf_addr, len);
+ dump_len -= len;
+ m = m->next;
+ nb_segs --;
+ }
+}
+
+
+rte_mbuf_t * utl_rte_pktmbuf_add_after2(rte_mbuf_t *m1,rte_mbuf_t *m2){
+ utl_rte_pktmbuf_check(m1);
+ utl_rte_pktmbuf_check(m2);
+
+ m1->next=m2;
+ m1->pkt_len += m2->data_len;
+ m1->nb_segs = m2->nb_segs + 1;
+ return (m1);
+}
+
+
+
+void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *md)
+{
+
+ rte_mbuf_refcnt_update(md, 1);
+ mi->buf_addr = md->buf_addr;
+ mi->buf_len = md->buf_len;
+ mi->data_off = md->data_off;
+
+ mi->next = NULL;
+ mi->pkt_len = mi->data_len;
+ mi->nb_segs = 1;
+}
+
+void rte_pktmbuf_detach(struct rte_mbuf *m)
+{
+ const struct rte_mempool *mp = m->pool;
+ void *buf = RTE_MBUF_TO_BADDR(m);
+ uint32_t buf_len = mp->elt_size - sizeof(*m);
+
+ m->buf_addr = buf;
+ m->buf_len = (uint16_t)buf_len;
+
+ #if RTE_PKTMBUF_HEADROOM > 0
+ m->data_off = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
+ RTE_PKTMBUF_HEADROOM : m->buf_len;
+ #else
+ m->data_off = RTE_PKTMBUF_HEADROOM ;
+ #endif
+
+
+ m->data_len = 0;
+}
+
+
+
+
+
+rte_mbuf_t * utl_rte_pktmbuf_add_after(rte_mbuf_t *m1,rte_mbuf_t *m2){
+
+ utl_rte_pktmbuf_check(m1);
+ utl_rte_pktmbuf_check(m2);
+
+ rte_mbuf_refcnt_update(m2,1);
+ m1->next=m2;
+ m1->pkt_len += m2->data_len;
+ m1->nb_segs = m2->nb_segs + 1;
+ return (m1);
+}
+
+
+uint64_t rte_rand(void){
+ return ( rand() );
+}
+
+
+
+
+
+#ifdef ONLY_A_TEST
+
+
+#define CONST_NB_MBUF 2048
+#define CONST_MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+
+
+void test_pkt_mbuf(){
+
+ rte_mempool_t * mp1=utl_rte_mempool_create("big-const",
+ CONST_NB_MBUF,
+ CONST_MBUF_SIZE,
+ 32);
+ rte_mbuf_t * m1 = rte_pktmbuf_alloc(mp1);
+ rte_mbuf_t * m2 = rte_pktmbuf_alloc(mp1);
+
+ char *p=rte_pktmbuf_append(m1, 10);
+ int i;
+
+ for (i=0; i<10;i++) {
+ p[i]=i;
+ }
+
+ p=rte_pktmbuf_append(m2, 10);
+
+ for (i=0; i<10;i++) {
+ p[i]=0x55+i;
+ }
+
+ rte_pktmbuf_dump(m1, m1->pkt_len);
+ rte_pktmbuf_dump(m2, m1->pkt_len);
+
+ rte_pktmbuf_free(m1);
+ rte_pktmbuf_free(m2);
+}
+
+
+
+
+#endif
diff --git a/src/pal/linux/mbuf.h b/src/pal/linux/mbuf.h
new file mode 100755
index 00000000..693b095a
--- /dev/null
+++ b/src/pal/linux/mbuf.h
@@ -0,0 +1,192 @@
+#ifndef MBUF_H
+#define MBUF_H
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+#include <stdint.h>
+#include <string.h>
+
+#define MAGIC0 0xAABBCCDD
+#define MAGIC2 0x11223344
+
+struct rte_mempool {
+ uint32_t magic;
+ uint32_t elt_size;
+ uint32_t magic2;
+ uint32_t _id;
+ int size;
+};
+
+
+
+
+struct rte_mbuf {
+ uint32_t magic;
+ struct rte_mempool *pool; /**< Pool from which mbuf was allocated. */
+ void *buf_addr; /**< Virtual address of segment buffer. */
+ uint16_t buf_len; /**< Length of segment buffer. */
+ uint16_t data_off;
+
+ struct rte_mbuf *next; /**< Next segment of scattered packet. */
+ uint16_t data_len; /**< Amount of data in segment buffer. */
+
+ /* these fields are valid for first segment only */
+ uint8_t nb_segs; /**< Number of segments. */
+ uint8_t in_port; /**< Input port. */
+ uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
+
+ uint32_t magic2;
+ uint32_t refcnt_reserved; /**< Do not use this field */
+} ;
+
+
+typedef struct rte_mbuf rte_mbuf_t;
+
+typedef struct rte_mempool rte_mempool_t;
+
+#define RTE_PKTMBUF_HEADROOM 0
+
+rte_mempool_t * utl_rte_mempool_create(const char *name,
+ unsigned n,
+ unsigned elt_size,
+ unsigned cache_size,
+ uint32_t _id ,
+ int socket_id
+ );
+
+rte_mempool_t * utl_rte_mempool_create_non_pkt(const char *name,
+ unsigned n,
+ unsigned elt_size,
+ unsigned cache_size,
+ uint32_t _id ,
+ int socket_id);
+
+inline unsigned rte_mempool_count(rte_mempool_t *mp){
+ return (10);
+}
+
+
+
+void rte_pktmbuf_free(rte_mbuf_t *m);
+
+rte_mbuf_t *rte_pktmbuf_alloc(rte_mempool_t *mp);
+
+char *rte_pktmbuf_append(rte_mbuf_t *m, uint16_t len);
+
+char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len);
+
+int rte_pktmbuf_trim(rte_mbuf_t *m, uint16_t len);
+
+void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *md);
+
+
+
+
+void rte_pktmbuf_free_seg(rte_mbuf_t *m);
+
+uint16_t rte_mbuf_refcnt_update(rte_mbuf_t *m, int16_t value);
+
+rte_mbuf_t * utl_rte_pktmbuf_add_after(rte_mbuf_t *m1,rte_mbuf_t *m2);
+rte_mbuf_t * utl_rte_pktmbuf_add_after2(rte_mbuf_t *m1,rte_mbuf_t *m2);
+
+void rte_pktmbuf_dump(const struct rte_mbuf *m, unsigned dump_len);
+
+
+int rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p);
+
+void rte_mempool_sp_put(struct rte_mempool *mp, void *obj);
+
+inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p){
+ return (rte_mempool_sc_get(mp, obj_p));
+}
+
+inline void rte_mempool_put(struct rte_mempool *mp, void *obj){
+ rte_mempool_sp_put(mp, obj);
+}
+
+
+static inline void *
+rte_memcpy(void *dst, const void *src, size_t n)
+{
+ return (memcpy(dst, src, n));
+}
+
+
+
+void rte_exit(int exit_code, const char *format, ...);
+
+static inline unsigned
+rte_lcore_to_socket_id(unsigned lcore_id){
+ return (0);
+}
+
+#define rte_pktmbuf_mtod(m, t) ((t)((char *)(m)->buf_addr + (m)->data_off))
+
+/**
+ * A macro that returns the length of the packet.
+ *
+ * The value can be read or assigned.
+ *
+ * @param m
+ * The packet mbuf.
+ */
+#define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
+
+/**
+ * A macro that returns the length of the segment.
+ *
+ * The value can be read or assigned.
+ *
+ * @param m
+ * The packet mbuf.
+ */
+#define rte_pktmbuf_data_len(m) ((m)->data_len)
+
+
+uint64_t rte_rand(void);
+
+
+static inline void utl_rte_pktmbuf_add_last(rte_mbuf_t *m,rte_mbuf_t *m_last){
+
+ //there could be 2 cases supported
+ //1. one mbuf
+ //2. two mbug where last is indirect
+
+ if ( m->next == NULL ) {
+ utl_rte_pktmbuf_add_after2(m,m_last);
+ }else{
+ m->next->next=m_last;
+ m->pkt_len += m_last->data_len;
+ m->nb_segs = 3;
+ }
+}
+
+
+
+
+#define __rte_cache_aligned
+
+#define CACHE_LINE_SIZE 64
+
+#define SOCKET_ID_ANY 0
+
+#endif
diff --git a/src/pal/linux/pal_utl.cpp b/src/pal/linux/pal_utl.cpp
new file mode 100755
index 00000000..de864dbd
--- /dev/null
+++ b/src/pal/linux/pal_utl.cpp
@@ -0,0 +1,29 @@
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include <stdlib.h>
+#include <stdio.h>
+#include "pal_utl.h"
+
+
+
+
+
diff --git a/src/pal/linux/pal_utl.h b/src/pal/linux/pal_utl.h
new file mode 100755
index 00000000..38152850
--- /dev/null
+++ b/src/pal/linux/pal_utl.h
@@ -0,0 +1,70 @@
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+#ifndef PAL_UTL_H
+#define PAL_UTL_H
+
+#include <stdint.h>
+
+#define PAL_MSB(x) (((x) >> 8) & 0xff) /* most signif byte of 2-byte integer */
+#define PAL_LSB(x) ((x) & 0xff) /* least signif byte of 2-byte integer*/
+#define PAL_MSW(x) (((x) >> 16) & 0xffff) /* most signif word of 2-word integer */
+#define PAL_LSW(x) ((x) & 0xffff) /* least signif byte of 2-word integer*/
+
+/* swap the MSW with the LSW of a 32 bit integer */
+#define PAL_WORDSWAP(x) (PAL_MSW(x) | (PAL_LSW(x) << 16))
+
+#define PAL_LLSB(x) ((x) & 0xff) /* 32bit word byte/word swap macros */
+#define PAL_LNLSB(x) (((x) >> 8) & 0xff)
+#define PAL_LNMSB(x) (((x) >> 16) & 0xff)
+#define PAL_LMSB(x) (((x) >> 24) & 0xff)
+#define PAL_LONGSWAP(x) ((PAL_LLSB(x) << 24) | \
+ (PAL_LNLSB(x) << 16)| \
+ (PAL_LNMSB(x) << 8) | \
+ (PAL_LMSB(x)))
+
+#define PAL_NTOHL(x) ((uint32_t)(PAL_LONGSWAP((uint32_t)x)))
+#define PAL_NTOHS(x) ((uint16_t)(((PAL_MSB((uint16_t)x))) | (PAL_LSB((uint16_t)x) << 8)))
+
+#define PAL_HTONS(x) (PAL_NTOHS(x))
+#define PAL_HTONL(x) (PAL_NTOHL(x))
+
+static inline uint64_t pal_ntohl64(uint64_t x){
+ uint32_t high=(uint32_t)((x&0xffffffff00000000ULL)>>32);
+ uint32_t low=(uint32_t)((x&0xffffffff));
+ uint64_t res=((uint64_t)(PAL_LONGSWAP(low)))<<32 | PAL_LONGSWAP(high);
+ return (res);
+}
+
+#define PAL_NTOHLL(x) (pal_ntohl64(x))
+
+
+#define unlikely(a) (a)
+#define likely(a) (a)
+
+static inline void rte_pause (void)
+{
+}
+
+
+
+#endif
+
+
diff --git a/src/pal/linux/sanb_atomic.h b/src/pal/linux/sanb_atomic.h
new file mode 100755
index 00000000..8d24f8f4
--- /dev/null
+++ b/src/pal/linux/sanb_atomic.h
@@ -0,0 +1,175 @@
+#ifndef SANB_ATOMIC_
+#define SANB_ATOMIC_
+#include <stdlib.h>
+
+#define FORCE_NON_INILINE __attribute__((noinline))
+
+static inline void sanb_smp_store_memory_barrier (void)
+{
+ asm volatile ("sfence":::"memory");
+}
+
+static inline void sanb_smp_load_memory_barrier (void)
+{
+ asm volatile ("lfence":::"memory");
+}
+
+static inline void sanb_smp_memory_barrier (void)
+{
+ asm volatile ("mfence":::"memory");
+}
+
+
+static inline bool
+sanb_atomic_compare_and_set_32 (uint32_t old_value, uint32_t new_value,
+ volatile uint32_t *addr)
+{
+ long result;
+#if __WORDSIZE == 64 || defined(__x86_64__)
+ asm volatile (" lock cmpxchgl %2, 0(%3);" /* do the atomic operation */
+ " sete %b0;" /* on success the ZF=1, copy that to */
+ /* the low order byte of %eax (AKA %al)*/
+ " movzbq %b0, %0;"/* zero extend %al to all of %eax */
+ : "=a" (result)
+ : "0" (old_value), "q" (new_value), "r" (addr)
+ : "memory" );
+#else
+ asm volatile (" lock cmpxchgl %2, 0(%3);" /* do the atomic operation */
+ " sete %b0;" /* on success the ZF=1, copy that to */
+ /* the low order byte of %eax (AKA %al)*/
+ " movzbl %b0, %0;"/* zero extend %al to all of %eax */
+ : "=a" (result)
+ : "0" (old_value), "q" (new_value), "r" (addr)
+ : "memory" );
+#endif
+ return (bool)result;
+}
+
+
+
+/*
+ * FIXME: on some processors the cmpxchg8b() instruction does not exist. On
+ * those processors this will cause a seg-fault. The only way to implement
+ * this operation on such a processor is to use a global lock.
+ */
+static inline bool
+sanb_atomic_compare_and_set_64 (uint64_t old_value, uint64_t new_value,
+ volatile void *ptr)
+{
+ volatile uint64_t *addr = (volatile uint64_t *)ptr;
+#if __WORDSIZE == 64 || defined(__x86_64__)
+ uint64_t prev;
+ asm volatile (" lock cmpxchgq %2, 0(%3);" /* do the atomic operation */
+ : "=a" (prev) /* result will be stored in RAX */
+ : "0" (old_value), "q" (new_value), "r" (addr)
+ : "memory");
+ return (bool) (prev == old_value);
+#else
+ uint64_t result;
+ asm volatile (" movl 0(%2), %%ebx;" /* load ECX:EBX with new_value */
+ " movl 4(%2), %%ecx;"
+ " lock cmpxchg8b 0(%3);" /* do the atomic operation */
+ " sete %b0;\n" /* on success the ZF=1, copy that to */
+ /* the low order byte of %eax (AKA %al)*/
+ " movzbl %b0, %0;"/* zero extend %al to all of %eax */
+ : "=A" (result) /* result will be stored in EDX:EAX */
+ : "0" (old_value), "r" (&new_value), "r" (addr)
+ : "memory", "ecx", "ebx" );
+ return (bool) result;
+#endif
+}
+
+
+static inline void
+sanb_atomic_add_32 (volatile uint32_t *addr, uint32_t increment)
+{
+ asm volatile (" lock addl %0, 0(%1);"
+ :
+ : "q" (increment), "r" (addr)
+ : "memory" );
+}
+
+
+static inline void
+sanb_atomic_subtract_32 (volatile uint32_t *addr, uint32_t decrement)
+{
+ asm volatile (" lock subl %0, 0(%1);"
+ :
+ : "q" (decrement), "r" (addr)
+ : "memory" );
+}
+
+
+/*
+ * It is not possible to do an atomic 64 bit add in 32-bit mode. Fortunately
+ * it is possible to do a 64-bit cmpxchg, so we can use that to implement a
+ * 64-bit atomic_add.
+ */
+static inline void
+sanb_atomic_add_64 (volatile void *ptr, uint64_t increment)
+{
+ volatile uint64_t *addr = (volatile uint64_t *)ptr;
+ uint64_t original;
+
+ do {
+ original = *addr;
+ } while (!sanb_atomic_compare_and_set_64(original, original + increment, addr));
+}
+
+
+
+static inline uint32_t sanb_atomic_dec2zero32(volatile void *ptr){
+ volatile uint32_t *addr = (volatile uint32_t *)ptr;
+ uint32_t original;
+ do {
+ original = *addr;
+ } while (!sanb_atomic_compare_and_set_32(original, original ? (original - 1):0, addr));
+ return (original);
+}
+
+
+
+static inline uint32_t
+sanb_atomic_add_return_32_old (volatile uint32_t *addr, uint32_t increment)
+{
+ uint32_t original;
+
+ asm volatile (" lock xaddl %1, 0(%2);"
+ : "=r" (original)
+ : "0" (increment), "q" (addr)
+ : "memory" );
+ return original ;
+}
+
+
+static inline uint64_t
+sanb_atomic_add_return_64_old (volatile void *ptr, uint64_t increment)
+{
+ volatile uint64_t *addr = (volatile uint64_t *)ptr;
+ uint64_t original;
+
+ do {
+ original = *addr;
+ } while (!sanb_atomic_compare_and_set_64(original, original + increment, addr));
+ return original ;
+}
+
+
+
+static inline void*
+sanb_mem_read_ptr(void **v) {
+
+ volatile void **p=(volatile void **)v;
+ return ((void *)(*p));
+}
+
+static inline void
+sanb_mem_write_ptr(void **v,void *value) {
+
+ volatile void **p=(volatile void **)v;
+ *p = value;
+}
+
+
+
+#endif
diff --git a/src/pal/linux_dpdk/CRing.h b/src/pal/linux_dpdk/CRing.h
new file mode 100755
index 00000000..8325c1b6
--- /dev/null
+++ b/src/pal/linux_dpdk/CRing.h
@@ -0,0 +1,89 @@
+#ifndef C_RING_H
+#define C_RING_H
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+#include <assert.h>
+#include <stdint.h>
+#include <string>
+#include <rte_ring.h>
+
+
+
+class CRingSp {
+public:
+ CRingSp(){
+ m_ring=0;
+ }
+
+ bool Create(std::string name,
+ uint16_t cnt,
+ int socket_id){
+ m_ring=rte_ring_create((char *)name.c_str(),
+ cnt,
+ socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ assert(m_ring);
+ return(true);
+ }
+
+ void Delete(void){
+ // can't free the memory of DPDK, it is from reserve memory
+ }
+
+ int Enqueue(void *obj){
+ return (rte_ring_sp_enqueue(m_ring,obj));
+ }
+
+ int Dequeue(void * & obj){
+ return(rte_ring_mc_dequeue(m_ring,(void **)&obj));
+ }
+
+ bool isFull(void){
+ return ( rte_ring_full(m_ring)?true:false );
+ }
+
+ bool isEmpty(void){
+ return ( rte_ring_empty(m_ring)?true:false );
+ }
+
+private:
+ struct rte_ring * m_ring;
+};
+
+template <class T>
+class CTRingSp : public CRingSp {
+public:
+ int Enqueue(T *obj){
+ return ( CRingSp::Enqueue((void*)obj) );
+ }
+
+ int Dequeue(T * & obj){
+ return (CRingSp::Dequeue(*((void **)&obj)));
+ }
+};
+
+
+
+
+#endif
+
diff --git a/src/pal/linux_dpdk/dpdk180/rte_config.h b/src/pal/linux_dpdk/dpdk180/rte_config.h
new file mode 100755
index 00000000..68dd7a7b
--- /dev/null
+++ b/src/pal/linux_dpdk/dpdk180/rte_config.h
@@ -0,0 +1,234 @@
+#ifndef __RTE_CONFIG_H
+#define __RTE_CONFIG_H
+#undef RTE_EXEC_ENV
+#define RTE_EXEC_ENV "linuxapp"
+#undef RTE_EXEC_ENV_LINUXAPP
+#define RTE_EXEC_ENV_LINUXAPP 1
+#undef RTE_FORCE_INTRINSICS
+#undef RTE_BUILD_SHARED_LIB
+#undef RTE_BUILD_COMBINE_LIBS
+#undef RTE_LIBNAME
+#define RTE_LIBNAME "intel_dpdk"
+#undef RTE_LIBRTE_EAL
+#define RTE_LIBRTE_EAL 1
+#undef RTE_MAX_LCORE
+#define RTE_MAX_LCORE 32
+#undef RTE_MAX_NUMA_NODES
+#define RTE_MAX_NUMA_NODES 8
+#undef RTE_MAX_MEMSEG
+#define RTE_MAX_MEMSEG 256
+#undef RTE_MAX_MEMZONE
+#define RTE_MAX_MEMZONE 2560
+#undef RTE_MAX_TAILQ
+#define RTE_MAX_TAILQ 32
+#undef RTE_LOG_LEVEL
+#define RTE_LOG_LEVEL 8
+#undef RTE_LOG_HISTORY
+#define RTE_LOG_HISTORY 256
+#undef RTE_LIBEAL_USE_HPET
+#undef RTE_EAL_ALLOW_INV_SOCKET_ID
+#undef RTE_EAL_ALWAYS_PANIC_ON_ERROR
+#undef RTE_EAL_IGB_UIO
+#define RTE_EAL_IGB_UIO 1
+#undef RTE_EAL_VFIO
+#define RTE_EAL_VFIO 1
+#undef RTE_PCI_CONFIG
+#undef RTE_PCI_EXTENDED_TAG
+#define RTE_PCI_EXTENDED_TAG ""
+#undef RTE_PCI_MAX_READ_REQUEST_SIZE
+#define RTE_PCI_MAX_READ_REQUEST_SIZE 0
+#undef RTE_LIBRTE_EAL_LINUXAPP
+#define RTE_LIBRTE_EAL_LINUXAPP 1
+#undef RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT
+#define RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT 1
+#undef RTE_LIBRTE_KVARGS
+#define RTE_LIBRTE_KVARGS 1
+#undef RTE_LIBRTE_ETHER
+#define RTE_LIBRTE_ETHER 1
+#undef RTE_LIBRTE_ETHDEV_DEBUG
+#undef RTE_MAX_ETHPORTS
+#define RTE_MAX_ETHPORTS 32
+#undef RTE_LIBRTE_IEEE1588
+#undef RTE_ETHDEV_QUEUE_STAT_CNTRS
+#define RTE_ETHDEV_QUEUE_STAT_CNTRS 16
+#undef RTE_NIC_BYPASS
+#undef RTE_LIBRTE_EM_PMD
+#define RTE_LIBRTE_EM_PMD 1
+#undef RTE_LIBRTE_IGB_PMD
+#define RTE_LIBRTE_IGB_PMD 1
+#undef RTE_LIBRTE_E1000_DEBUG_INIT
+#undef RTE_LIBRTE_E1000_DEBUG_RX
+#undef RTE_LIBRTE_E1000_DEBUG_TX
+#undef RTE_LIBRTE_E1000_DEBUG_TX_FREE
+#undef RTE_LIBRTE_E1000_DEBUG_DRIVER
+#undef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
+#undef RTE_LIBRTE_IXGBE_PMD
+#define RTE_LIBRTE_IXGBE_PMD 1
+#undef RTE_LIBRTE_IXGBE_DEBUG_INIT
+#undef RTE_LIBRTE_IXGBE_DEBUG_RX
+#undef RTE_LIBRTE_IXGBE_DEBUG_TX
+#undef RTE_LIBRTE_IXGBE_DEBUG_TX_FREE
+#undef RTE_LIBRTE_IXGBE_DEBUG_DRIVER
+#undef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
+#undef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+#define RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC 1
+#undef RTE_IXGBE_INC_VECTOR
+#define RTE_IXGBE_INC_VECTOR 1
+#undef RTE_IXGBE_RX_OLFLAGS_ENABLE
+#define RTE_IXGBE_RX_OLFLAGS_ENABLE 1
+#undef RTE_LIBRTE_I40E_PMD
+#define RTE_LIBRTE_I40E_PMD 1
+#undef RTE_LIBRTE_I40E_DEBUG_INIT
+#undef RTE_LIBRTE_I40E_DEBUG_RX
+#undef RTE_LIBRTE_I40E_DEBUG_TX
+#undef RTE_LIBRTE_I40E_DEBUG_TX_FREE
+#undef RTE_LIBRTE_I40E_DEBUG_DRIVER
+#undef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+#define RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC 1
+#undef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+#undef RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF
+#define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF 4
+#undef RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM
+#define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM 4
+#undef RTE_LIBRTE_I40E_ITR_INTERVAL
+#define RTE_LIBRTE_I40E_ITR_INTERVAL -1
+#undef RTE_LIBRTE_ENIC_PMD
+#define RTE_LIBRTE_ENIC_PMD 1
+#undef RTE_LIBRTE_VIRTIO_PMD
+#define RTE_LIBRTE_VIRTIO_PMD 1
+#undef RTE_LIBRTE_VIRTIO_DEBUG_INIT
+#undef RTE_LIBRTE_VIRTIO_DEBUG_RX
+#undef RTE_LIBRTE_VIRTIO_DEBUG_TX
+#undef RTE_LIBRTE_VIRTIO_DEBUG_DRIVER
+#undef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
+#undef RTE_LIBRTE_VMXNET3_PMD
+#define RTE_LIBRTE_VMXNET3_PMD 1
+#undef RTE_LIBRTE_VMXNET3_DEBUG_INIT
+#undef RTE_LIBRTE_VMXNET3_DEBUG_RX
+#undef RTE_LIBRTE_VMXNET3_DEBUG_TX
+#undef RTE_LIBRTE_VMXNET3_DEBUG_TX_FREE
+#undef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
+#undef RTE_LIBRTE_PMD_RING
+#define RTE_LIBRTE_PMD_RING 1
+#undef RTE_PMD_RING_MAX_RX_RINGS
+#define RTE_PMD_RING_MAX_RX_RINGS 16
+#undef RTE_PMD_RING_MAX_TX_RINGS
+#define RTE_PMD_RING_MAX_TX_RINGS 16
+#undef RTE_LIBRTE_PMD_PCAP
+#undef RTE_LIBRTE_PMD_BOND
+#define RTE_LIBRTE_PMD_BOND 1
+#undef RTE_LIBRTE_PMD_AF_PACKET
+#define RTE_LIBRTE_PMD_AF_PACKET 1
+#undef RTE_LIBRTE_PMD_XENVIRT
+#undef RTE_PMD_PACKET_PREFETCH
+#define RTE_PMD_PACKET_PREFETCH 1
+#undef RTE_LIBRTE_RING
+#define RTE_LIBRTE_RING 1
+#undef RTE_LIBRTE_RING_DEBUG
+#undef RTE_RING_SPLIT_PROD_CONS
+#undef RTE_LIBRTE_MEMPOOL
+#define RTE_LIBRTE_MEMPOOL 1
+#undef RTE_MEMPOOL_CACHE_MAX_SIZE
+#define RTE_MEMPOOL_CACHE_MAX_SIZE 512
+#undef RTE_LIBRTE_MEMPOOL_DEBUG
+#undef RTE_LIBRTE_MBUF
+#define RTE_LIBRTE_MBUF 1
+#undef RTE_LIBRTE_MBUF_DEBUG
+#undef RTE_MBUF_REFCNT
+#define RTE_MBUF_REFCNT 1
+#undef RTE_MBUF_REFCNT_ATOMIC
+#define RTE_MBUF_REFCNT_ATOMIC 1
+#undef RTE_PKTMBUF_HEADROOM
+#define RTE_PKTMBUF_HEADROOM 0
+#undef RTE_LIBRTE_TIMER
+#define RTE_LIBRTE_TIMER 1
+#undef RTE_LIBRTE_TIMER_DEBUG
+#undef RTE_LIBRTE_MALLOC
+#define RTE_LIBRTE_MALLOC 1
+#undef RTE_LIBRTE_MALLOC_DEBUG
+#undef RTE_MALLOC_MEMZONE_SIZE
+#define RTE_MALLOC_MEMZONE_SIZE 11M
+#undef RTE_LIBRTE_CFGFILE
+#define RTE_LIBRTE_CFGFILE 1
+#undef RTE_LIBRTE_CMDLINE
+#define RTE_LIBRTE_CMDLINE 1
+#undef RTE_LIBRTE_CMDLINE_DEBUG
+#undef RTE_LIBRTE_HASH
+#define RTE_LIBRTE_HASH 1
+#undef RTE_LIBRTE_HASH_DEBUG
+#undef RTE_LIBRTE_LPM
+#define RTE_LIBRTE_LPM 1
+#undef RTE_LIBRTE_LPM_DEBUG
+#undef RTE_LIBRTE_ACL
+#define RTE_LIBRTE_ACL 1
+#undef RTE_LIBRTE_ACL_DEBUG
+#undef RTE_LIBRTE_ACL_STANDALONE
+#undef RTE_LIBRTE_POWER
+#define RTE_LIBRTE_POWER 1
+#undef RTE_LIBRTE_POWER_DEBUG
+#undef RTE_MAX_LCORE_FREQS
+#define RTE_MAX_LCORE_FREQS 64
+#undef RTE_LIBRTE_NET
+#define RTE_LIBRTE_NET 1
+#undef RTE_LIBRTE_IP_FRAG
+#define RTE_LIBRTE_IP_FRAG 1
+#undef RTE_LIBRTE_IP_FRAG_DEBUG
+#undef RTE_LIBRTE_IP_FRAG_MAX_FRAG
+#define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
+#undef RTE_LIBRTE_IP_FRAG_TBL_STAT
+#undef RTE_LIBRTE_METER
+#define RTE_LIBRTE_METER 1
+#undef RTE_LIBRTE_SCHED
+#define RTE_LIBRTE_SCHED 1
+#undef RTE_SCHED_RED
+#undef RTE_SCHED_COLLECT_STATS
+#undef RTE_SCHED_SUBPORT_TC_OV
+#undef RTE_SCHED_PORT_N_GRINDERS
+#define RTE_SCHED_PORT_N_GRINDERS 8
+#undef RTE_LIBRTE_DISTRIBUTOR
+#define RTE_LIBRTE_DISTRIBUTOR 1
+#undef RTE_LIBRTE_PORT
+#define RTE_LIBRTE_PORT 1
+#undef RTE_LIBRTE_TABLE
+#define RTE_LIBRTE_TABLE 1
+#undef RTE_LIBRTE_PIPELINE
+#define RTE_LIBRTE_PIPELINE 1
+#undef RTE_LIBRTE_KNI
+#define RTE_LIBRTE_KNI 1
+#undef RTE_KNI_KO_DEBUG
+#undef RTE_KNI_VHOST
+#undef RTE_KNI_VHOST_MAX_CACHE_SIZE
+#define RTE_KNI_VHOST_MAX_CACHE_SIZE 1024
+#undef RTE_KNI_VHOST_VNET_HDR_EN
+#undef RTE_KNI_VHOST_DEBUG_RX
+#undef RTE_KNI_VHOST_DEBUG_TX
+#undef RTE_LIBRTE_VHOST
+#undef RTE_LIBRTE_VHOST_DEBUG
+#undef RTE_LIBRTE_XEN_DOM0
+#undef RTE_INSECURE_FUNCTION_WARNING
+#undef RTE_APP_TEST
+#define RTE_APP_TEST 1
+#undef RTE_TEST_PMD
+#define RTE_TEST_PMD 1
+#undef RTE_TEST_PMD_RECORD_CORE_CYCLES
+#undef RTE_TEST_PMD_RECORD_BURST_STATS
+#undef RTE_MACHINE
+#define RTE_MACHINE "native"
+#undef RTE_ARCH
+#define RTE_ARCH "x86_64"
+#undef RTE_ARCH_X86_64
+#define RTE_ARCH_X86_64 1
+#undef RTE_ARCH_64
+#define RTE_ARCH_64 1
+#undef RTE_TOOLCHAIN
+#define RTE_TOOLCHAIN "gcc"
+#undef RTE_TOOLCHAIN_GCC
+#define RTE_TOOLCHAIN_GCC 1
+
+//#define RTE_LIBRTE_IXGBE_DEBUG_INIT
+//#define RTE_LIBRTE_IXGBE_DEBUG_RX
+//#define RTE_LIBRTE_IXGBE_DEBUG_TX
+//#define RTE_LIBRTE_IXGBE_DEBUG_TX_FREE
+//#define RTE_LIBRTE_IXGBE_DEBUG_DRIVER
+
+#endif /* __RTE_CONFIG_H */
diff --git a/src/pal/linux_dpdk/mbuf.cpp b/src/pal/linux_dpdk/mbuf.cpp
new file mode 100755
index 00000000..dd78617f
--- /dev/null
+++ b/src/pal/linux_dpdk/mbuf.cpp
@@ -0,0 +1,75 @@
+#include "mbuf.h"
+
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+rte_mempool_t * utl_rte_mempool_create(const char *name,
+ unsigned n,
+ unsigned elt_size,
+ unsigned cache_size,
+ uint32_t _id,
+ uint32_t socket_id ){
+ char buffer[100];
+ sprintf(buffer,"%s-%d",name,socket_id);
+
+ rte_mempool_t * res=
+ rte_mempool_create(buffer, n,
+ elt_size, cache_size,
+ sizeof(struct rte_pktmbuf_pool_private),
+ rte_pktmbuf_pool_init, NULL,
+ rte_pktmbuf_init, NULL,
+ socket_id, 0);
+ if (res == NULL){
+ printf(" ERROR there is not enough huge-pages memory in your system \n");
+ rte_exit(EXIT_FAILURE, "Cannot init mbuf pool %s\n",name);
+ }
+ return res;
+}
+
+rte_mempool_t * utl_rte_mempool_create_non_pkt(const char *name,
+ unsigned n,
+ unsigned elt_size,
+ unsigned cache_size,
+ uint32_t _id ,
+ int socket_id){
+ char buffer[100];
+ sprintf(buffer,"%s-%d",name,socket_id);
+
+ rte_mempool_t * res=
+ rte_mempool_create(buffer, n,
+ elt_size,
+ cache_size,
+ 0,
+ NULL, NULL,
+ NULL, NULL,
+ socket_id, 0);
+ if (res == NULL) {
+ printf(" ERROR there is not enough huge-pages memory in your system \n");
+ rte_exit(EXIT_FAILURE, "Cannot init nodes mbuf pool %s\n",name);
+ }
+ return res;
+}
+
+
+
+
+
diff --git a/src/pal/linux_dpdk/mbuf.h b/src/pal/linux_dpdk/mbuf.h
new file mode 100755
index 00000000..cde01077
--- /dev/null
+++ b/src/pal/linux_dpdk/mbuf.h
@@ -0,0 +1,83 @@
+#ifndef MBUF_H
+#define MBUF_H
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+#include <stdint.h>
+#include <rte_mbuf.h>
+#include <rte_random.h>
+
+typedef struct rte_mbuf rte_mbuf_t;
+
+typedef struct rte_mempool rte_mempool_t;
+
+rte_mempool_t * utl_rte_mempool_create(const char *name,
+ unsigned n,
+ unsigned elt_size,
+ unsigned cache_size,
+ uint32_t _id,
+ uint32_t socket_id );
+
+rte_mempool_t * utl_rte_mempool_create_non_pkt(const char *name,
+ unsigned n,
+ unsigned elt_size,
+ unsigned cache_size,
+ uint32_t _id ,
+ int socket_id);
+
+
+static inline rte_mbuf_t * utl_rte_pktmbuf_add_after(rte_mbuf_t *m1,rte_mbuf_t *m2){
+
+ rte_mbuf_refcnt_update(m2,1);
+ m1->next=m2;
+
+ m1->pkt_len += m2->data_len;
+ m1->nb_segs = m2->nb_segs + 1;
+ return (m1);
+}
+
+static inline rte_mbuf_t * utl_rte_pktmbuf_add_after2(rte_mbuf_t *m1,rte_mbuf_t *m2){
+
+ m1->next=m2;
+ m1->pkt_len += m2->data_len;
+ m1->nb_segs = m2->nb_segs + 1;
+ return (m1);
+}
+
+static inline void utl_rte_pktmbuf_add_last(rte_mbuf_t *m,rte_mbuf_t *m_last){
+
+ //there could be 2 cases supported
+ //1. one mbuf
+ //2. two mbug where last is indirect
+
+ if ( m->next == NULL ) {
+ utl_rte_pktmbuf_add_after2(m,m_last);
+ }else{
+ m->next->next=m_last;
+ m->pkt_len += m_last->data_len;
+ m->nb_segs = 3;
+ }
+}
+
+
+
+#endif
diff --git a/src/pal/linux_dpdk/pal_utl.cpp b/src/pal/linux_dpdk/pal_utl.cpp
new file mode 100755
index 00000000..de864dbd
--- /dev/null
+++ b/src/pal/linux_dpdk/pal_utl.cpp
@@ -0,0 +1,29 @@
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include <stdlib.h>
+#include <stdio.h>
+#include "pal_utl.h"
+
+
+
+
+
diff --git a/src/pal/linux_dpdk/pal_utl.h b/src/pal/linux_dpdk/pal_utl.h
new file mode 100755
index 00000000..13403e6c
--- /dev/null
+++ b/src/pal/linux_dpdk/pal_utl.h
@@ -0,0 +1,45 @@
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+#ifndef PAL_UTL_H
+#define PAL_UTL_H
+
+#include <stdint.h>
+#include <rte_byteorder.h>
+#include <rte_memcpy.h>
+
+#define PAL_WORDSWAP(x) rte_bswap16(x)
+
+
+#define PAL_NTOHL(x) ( (uint32_t)( rte_bswap32(x) ) )
+#define PAL_NTOHS(x) ( (uint16_t)( rte_bswap16(x) ) )
+
+#define PAL_HTONS(x) (PAL_NTOHS(x))
+#define PAL_HTONL(x) (PAL_NTOHL(x))
+
+#define pal_ntohl64(x) rte_bswap64(x)
+
+#define PAL_NTOHLL(x) ( rte_bswap64(x) )
+
+
+
+#endif
+
+
diff --git a/src/pal/linux_dpdk/x86_64-default-linuxapp-gcc/include/rte_config.h b/src/pal/linux_dpdk/x86_64-default-linuxapp-gcc/include/rte_config.h
new file mode 100755
index 00000000..fdb5b994
--- /dev/null
+++ b/src/pal/linux_dpdk/x86_64-default-linuxapp-gcc/include/rte_config.h
@@ -0,0 +1,72 @@
+#define RTE_EXEC_ENV "linuxapp"
+#define RTE_EXEC_ENV_LINUXAPP 1
+#define RTE_MACHINE "native"
+#define RTE_ARCH "x86_64"
+#define RTE_ARCH_X86_64 1
+#define RTE_TOOLCHAIN "gcc"
+#define RTE_TOOLCHAIN_GCC 1
+#undef RTE_LIBC
+#undef RTE_LIBC_NEWLIB_SRC
+#undef RTE_LIBC_NEWLIB_BIN
+#undef RTE_LIBC_NETINCS
+#undef RTE_LIBGLOSS
+#define RTE_LIBRTE_EAL 1
+#define RTE_MAX_LCORE 32
+#define RTE_MAX_NUMA_NODES 8
+#define RTE_MAX_MEMSEG 32
+#define RTE_MAX_MEMZONE 512
+#define RTE_MAX_TAILQ 32
+#define RTE_LOG_LEVEL 8
+#define RTE_LOG_HISTORY 256
+#undef RTE_LIBEAL_USE_HPET
+#undef RTE_EAL_ALLOW_INV_SOCKET_ID
+#undef RTE_EAL_ALWAYS_PANIC_ON_ERROR
+#define RTE_LIBRTE_EAL_LINUXAPP 1
+#undef RTE_LIBRTE_EAL_BAREMETAL
+#define RTE_LIBRTE_ETHER 1
+#undef RTE_LIBRTE_ETHDEV_DEBUG
+#define RTE_MAX_ETHPORTS 32
+#undef RTE_LIBRTE_IEEE1588
+#define RTE_LIBRTE_IGB_PMD 1
+#undef RTE_LIBRTE_IGB_DEBUG_INIT
+#undef RTE_LIBRTE_IGB_DEBUG_RX
+#undef RTE_LIBRTE_IGB_DEBUG_TX
+#undef RTE_LIBRTE_IGB_DEBUG_TX_FREE
+#undef RTE_LIBRTE_IGB_DEBUG_DRIVER
+#define RTE_LIBRTE_IXGBE_PMD 1
+#undef RTE_LIBRTE_IXGBE_DEBUG_INIT
+#undef RTE_LIBRTE_IXGBE_DEBUG_RX
+#undef RTE_LIBRTE_IXGBE_DEBUG_TX
+#undef RTE_LIBRTE_IXGBE_DEBUG_TX_FREE
+#undef RTE_LIBRTE_IXGBE_DEBUG_DRIVER
+#define RTE_PMD_PACKET_PREFETCH 1
+#define RTE_LIBRTE_RING 1
+#undef RTE_LIBRTE_RING_DEBUG
+#define RTE_LIBRTE_MEMPOOL 1
+#define RTE_MEMPOOL_CACHE_MAX_SIZE 512
+#undef RTE_LIBRTE_MEMPOOL_DEBUG
+#define RTE_LIBRTE_MBUF 1
+#undef RTE_LIBRTE_MBUF_DEBUG
+#define RTE_MBUF_SCATTER_GATHER 1
+#define RTE_MBUF_REFCNT_ATOMIC 1
+#define RTE_PKTMBUF_HEADROOM 0
+#define RTE_LIBRTE_TIMER 1
+#undef RTE_LIBRTE_TIMER_DEBUG
+#define RTE_LIBRTE_MALLOC 1
+#undef RTE_LIBRTE_MALLOC_DEBUG
+#define RTE_MALLOC_MEMZONE_SIZE 11M
+#define RTE_MALLOC_PER_NUMA_NODE 1
+#define RTE_LIBRTE_CMDLINE 1
+#define RTE_LIBRTE_HASH 1
+#undef RTE_LIBRTE_HASH_DEBUG
+#undef RTE_LIBRTE_HASH_USE_MEMZONE
+#define RTE_LIBRTE_LPM 1
+#undef RTE_LIBRTE_LPM_DEBUG
+#define RTE_LIBRTE_NET 1
+#define RTE_APP_TEST 1
+#define RTE_APP_CHKINCS 1
+#define RTE_TEST_PMD 1
+#undef RTE_TEST_PMD_RECORD_CORE_CYCLES
+#undef RTE_TEST_PMD_RECORD_BURST_STATS
+#undef RTE_LIBRTE_GCOV
+#undef RTE_INSECURE_FUNCTION_WARNING
diff --git a/src/platform_cfg.cpp b/src/platform_cfg.cpp
new file mode 100755
index 00000000..a226a9ac
--- /dev/null
+++ b/src/platform_cfg.cpp
@@ -0,0 +1,453 @@
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "platform_cfg.h"
+#include <common/basic_utils.h>
+#include <stdlib.h>
+#include <iostream>
+#include <fstream>
+
+
+void CPlatformMemoryYamlInfo::reset(){
+ int i;
+ i=0;
+ for (i=0; i<MBUF_SIZE; i++) {
+ m_mbuf[i] = CONST_NB_MBUF_2_10G;
+ }
+ m_mbuf[MBUF_64] = m_mbuf[MBUF_64]*2;
+ m_mbuf[MBUF_2048] = CONST_NB_MBUF_2_10G/2;
+
+ m_mbuf[TRAFFIC_MBUF_64] = m_mbuf[MBUF_64]*2;
+ m_mbuf[TRAFFIC_MBUF_2048] = CONST_NB_MBUF_2_10G*4;
+
+ m_mbuf[MBUF_DP_FLOWS] = (1024*1024/2);
+ m_mbuf[MBUF_GLOBAL_FLOWS] =(10*1024/2);
+}
+const std::string names []={
+ "MBUF_64",
+ "MBUF_128",
+ "MBUF_256",
+ "MBUF_512",
+ "MBUF_1024",
+ "MBUF_2048",
+
+ "TRAFFIC_MBUF_64",
+ "TRAFFIC_MBUF_128",
+ "TRAFFIC_MBUF_256",
+ "TRAFFIC_MBUF_512",
+ "TRAFFIC_MBUF_1024",
+ "TRAFFIC_MBUF_2048",
+
+ "MBUF_DP_FLOWS",
+ "MBUF_GLOBAL_FLOWS"
+
+ };
+
+const std::string * get_mbuf_names(void){
+ return names;
+}
+
+
+
+void CPlatformDualIfYamlInfo::Dump(FILE *fd){
+ fprintf(fd," socket : %d \n",m_socket);
+ int i;
+ fprintf(fd," [ ");
+ for (i=0; i<m_threads.size(); i++) {
+ fprintf(fd," %d ",(int)m_threads[i]);
+ }
+ fprintf(fd," ] \n");
+}
+
+
+
+void CPlatformCoresYamlInfo::Dump(FILE *fd){
+ if ( m_is_exists == false){
+ fprintf(fd," no platform info \n");
+ return;
+ }
+ fprintf(fd," master thread : %d \n",m_master_thread);
+ fprintf(fd," latency thread : %d \n",m_latency_thread);
+ int i;
+ for (i=0; i<m_dual_if.size(); i++) {
+ printf(" dual_if : %d \n",i);
+ CPlatformDualIfYamlInfo * lp=&m_dual_if[i];
+ lp->Dump(fd);
+ }
+}
+
+void operator >> (const YAML::Node& node, CPlatformDualIfYamlInfo & plat_info) {
+ node["socket"] >> plat_info.m_socket;
+ const YAML::Node& threads = node["threads"];
+ /* fill the vector*/
+ for(unsigned i=0;i<threads.size();i++) {
+ uint32_t fi;
+ const YAML::Node & node = threads;
+ node[i] >> fi;
+ plat_info.m_threads.push_back(fi);
+ }
+}
+
+
+void operator >> (const YAML::Node& node, CPlatformCoresYamlInfo & plat_info) {
+ node["master_thread_id"] >> plat_info.m_master_thread;
+ node["latency_thread_id"] >> plat_info.m_latency_thread;
+
+ const YAML::Node& dual_info = node["dual_if"];
+ for(unsigned i=0;i<dual_info.size();i++) {
+ CPlatformDualIfYamlInfo fi;
+ dual_info[i] >> fi;
+ plat_info.m_dual_if.push_back(fi);
+ }
+}
+
+
+void CPlatformMemoryYamlInfo::Dump(FILE *fd){
+
+ fprintf(fd," memory per 2x10G ports \n");
+ const std::string * names =get_mbuf_names();
+
+ int i=0;
+ for (i=0; i<MBUF_SIZE; i++) {
+ fprintf(fd," %-40s : %lu \n",names[i].c_str(),m_mbuf[i]);
+ }
+}
+
+
+
+void CMacYamlInfo::copy_dest(char *p){
+ assert(m_dest_base.size() == 6);
+ int i;
+ for (i=0; i<m_dest_base.size(); i++) {
+ p[i]=m_dest_base[i];
+ }
+}
+
+void CMacYamlInfo::copy_src(char *p){
+ assert(m_src_base.size() == 6);
+ int i;
+ for (i=0; i<m_src_base.size(); i++) {
+ p[i]=m_src_base[i];
+ }
+}
+
+
+void CMacYamlInfo::Dump(FILE *fd){
+ if (m_dest_base.size() != 6) {
+ fprintf(fd,"ERROR in dest mac addr \n");
+ return;
+ }
+ if (m_src_base.size() != 6) {
+ fprintf(fd,"ERROR in dest mac addr \n");
+ return;
+ }
+ fprintf (fd," src : ");
+ dump_mac_vector( m_dest_base,fd);
+ fprintf (fd," dest : ");
+ dump_mac_vector( m_src_base,fd);
+
+}
+
+
+
+
+void operator >> (const YAML::Node& node, CMacYamlInfo & mac_info) {
+
+ const YAML::Node& dmac = node["dest_mac"];
+ for(unsigned i=0;i<dmac.size();i++) {
+ uint32_t fi;
+ const YAML::Node & node =dmac;
+ node[i] >> fi;
+ mac_info.m_dest_base.push_back(fi);
+ }
+
+ const YAML::Node& smac = node["src_mac"];
+ for(unsigned i=0;i<dmac.size();i++) {
+ uint32_t fi;
+ const YAML::Node & node =smac;
+ node[i] >> fi;
+ mac_info.m_src_base.push_back(fi);
+ }
+}
+
+void operator >> (const YAML::Node& node, CPlatformMemoryYamlInfo & plat_info) {
+ try {
+ node["mbuf_64"] >> plat_info.m_mbuf[MBUF_64];
+ } catch ( const std::exception& e ) {
+ }
+
+ try {
+ node["mbuf_128"] >> plat_info.m_mbuf[MBUF_128];
+ } catch ( const std::exception& e ) {
+ }
+
+ try {
+ node["mbuf_256"] >> plat_info.m_mbuf[MBUF_256];
+ } catch ( const std::exception& e ) {
+ }
+
+ try {
+ node["mbuf_512"] >> plat_info.m_mbuf[MBUF_512];
+ } catch ( const std::exception& e ) {
+ }
+
+ try {
+ node["mbuf_1024"] >> plat_info.m_mbuf[MBUF_1024];
+ } catch ( const std::exception& e ) {
+ }
+
+ try {
+ node["mbuf_2048"] >> plat_info.m_mbuf[MBUF_2048];
+ } catch ( const std::exception& e ) {
+ }
+
+ try {
+ node["traffic_mbuf_64"] >> plat_info.m_mbuf[TRAFFIC_MBUF_64];
+ } catch ( const std::exception& e ) {
+ }
+
+ try {
+ node["traffic_mbuf_128"] >> plat_info.m_mbuf[TRAFFIC_MBUF_128];
+ } catch ( const std::exception& e ) {
+ }
+
+ try {
+ node["traffic_mbuf_256"] >> plat_info.m_mbuf[TRAFFIC_MBUF_256];
+ } catch ( const std::exception& e ) {
+ }
+
+ try {
+ node["traffic_mbuf_512"] >> plat_info.m_mbuf[TRAFFIC_MBUF_512];
+ } catch ( const std::exception& e ) {
+ }
+
+ try {
+ node["traffic_mbuf_1024"] >> plat_info.m_mbuf[TRAFFIC_MBUF_1024];
+ } catch ( const std::exception& e ) {
+ }
+
+ try {
+ node["traffic_mbuf_2048"] >> plat_info.m_mbuf[TRAFFIC_MBUF_2048];
+ } catch ( const std::exception& e ) {
+ }
+
+ try {
+ node["dp_flows"] >> plat_info.m_mbuf[MBUF_DP_FLOWS];
+ } catch ( const std::exception& e ) {
+ }
+
+ try {
+ node["global_flows"] >> plat_info.m_mbuf[MBUF_GLOBAL_FLOWS];
+ } catch ( const std::exception& e ) {
+ }
+}
+
+
+void operator >> (const YAML::Node& node, CPlatformYamlInfo & plat_info) {
+ try {
+ node["port_limit"] >> plat_info.m_port_limit;
+ plat_info.m_port_limit_exist=true;
+ } catch ( const std::exception& e ) {
+ plat_info.m_port_limit=0xffffffff;
+ }
+
+
+ try {
+ const YAML::Node& interface_mask = node["interface_mask"];
+ for(unsigned i=0;i<interface_mask.size();i++) {
+ std::string fi;
+ const YAML::Node & node = interface_mask;
+ node[i] >> fi;
+ plat_info.m_if_mask.push_back(fi);
+ }
+ plat_info.m_if_mask_exist=true;
+ } catch ( const std::exception& e ) {
+
+ }
+
+
+ try {
+ node["enable_zmq_pub"] >> plat_info.m_enable_zmq_pub;
+ node["zmq_pub_port"] >> plat_info.m_zmq_pub_port;
+ plat_info.m_enable_zmq_pub_exist = true;
+ } catch ( const std::exception& e ) {
+ plat_info.m_enable_zmq_pub_exist = false;
+ }
+
+ /* must have interfaces */
+ const YAML::Node& interfaces = node["interfaces"];
+ for(unsigned i=0;i<interfaces.size();i++) {
+ std::string fi;
+ const YAML::Node & node = interfaces;
+ node[i] >> fi;
+ plat_info.m_if_list.push_back(fi);
+ }
+
+ try {
+ node["prefix"] >> plat_info.m_prefix;
+ } catch ( const std::exception& e ) {
+ }
+ try {
+ node["limit_memory"] >> plat_info.m_limit_memory;
+ } catch ( const std::exception& e ) {
+ }
+ try {
+ node["c"] >> plat_info.m_thread_per_dual_if;
+ } catch ( const std::exception& e ) {
+ }
+
+
+
+ try {
+ node["telnet_port"] >> plat_info.m_telnet_port;
+ plat_info.m_telnet_exist=true;
+ } catch ( const std::exception& e ) {
+ plat_info.m_telnet_port=4501;
+ }
+
+ try {
+ node["port_bandwidth_gb"] >> plat_info.m_port_bandwidth_gb;
+ } catch ( const std::exception& e ) {
+ }
+
+ if ( node.FindValue("memory") ){
+ node["memory"] >> plat_info.m_memory;
+ }
+
+ if ( node.FindValue("platform") ){
+ node["platform"] >> plat_info.m_platform;
+ plat_info.m_platform.m_is_exists=true;
+ }
+
+ try {
+ const YAML::Node& mac_info = node["port_info"];
+ for(unsigned i=0;i<mac_info.size();i++) {
+ CMacYamlInfo fi;
+ const YAML::Node & node =mac_info;
+ node[i] >> fi;
+ plat_info.m_mac_info.push_back(fi);
+ }
+ plat_info.m_mac_info_exist = true;
+ }catch ( const std::exception& e ) {
+ }
+}
+
+int CPlatformYamlInfo::load_from_yaml_file(std::string file_name){
+ reset();
+ m_info_exist =true;
+
+ if ( !utl_is_file_exists(file_name) ){
+ printf(" ERROR file %s does not exists \n",file_name.c_str());
+ exit(-1);
+ }
+
+ try {
+ std::ifstream fin((char *)file_name.c_str());
+ YAML::Parser parser(fin);
+ YAML::Node doc;
+
+ parser.GetNextDocument(doc);
+ for(unsigned i=0;i<doc.size();i++) {
+ doc[i] >> *this;
+ break;
+ }
+ } catch ( const std::exception& e ) {
+ std::cout << e.what() << "\n";
+ exit(-1);
+ }
+ return (0);
+}
+
+
+std::string CPlatformYamlInfo::get_use_if_comma_seperated(){
+ std::string s="";
+ int i;
+ for (i=0; i<(int)m_if_list.size()-1; i++) {
+ s+=m_if_list[i]+",";
+ }
+ s+=m_if_list[i];
+ return (s);
+}
+
+
+void CPlatformYamlInfo::Dump(FILE *fd){
+ if ( m_info_exist ==false ){
+ fprintf(fd," file info does not exist \n");
+ return;
+ }
+
+
+ if (m_port_limit_exist && (m_port_limit != 0xffffffff)) {
+ fprintf(fd," port limit : %d \n",m_port_limit);
+ }else{
+ fprintf(fd," port limit : not configured \n");
+ }
+ fprintf(fd," port_bandwidth_gb : %lu \n",m_port_bandwidth_gb);
+
+ if ( m_if_mask_exist && m_if_mask.size() ) {
+ fprintf(fd," if_mask : ");
+ int i;
+ for (i=0; i<(int)m_if_mask.size(); i++) {
+ fprintf(fd," %s,",m_if_mask[i].c_str());
+ }
+ fprintf(fd,"\n",m_if_mask[i].c_str());
+
+ }else{
+ fprintf(fd," if_mask : None \n");
+ }
+
+ if ( m_prefix.length() ){
+ fprintf(fd," prefix : %s \n",m_prefix.c_str());
+ }
+ if ( m_limit_memory.length() ){
+ fprintf(fd," limit_memory : %s \n",m_limit_memory.c_str());
+ }
+ fprintf(fd," thread_per_dual_if : %d \n",(int)m_thread_per_dual_if);
+
+ fprintf(fd," if : ");
+ int i;
+ for (i=0; i<(int)m_if_list.size(); i++) {
+ fprintf(fd," %s,",m_if_list[i].c_str());
+ }
+ fprintf(fd,"\n");
+
+ if ( m_enable_zmq_pub_exist ){
+ fprintf(fd," enable_zmq_pub : %d \n",m_enable_zmq_pub?1:0);
+ fprintf(fd," zmq_pub_port : %d \n",m_zmq_pub_port);
+ }
+ if ( m_telnet_exist ){
+ fprintf(fd," telnet_port : %d \n",m_telnet_port);
+ }
+
+ if ( m_mac_info_exist ){
+ int i;
+ for (i=0; i<(int)m_mac_info.size(); i++) {
+ m_mac_info[i].Dump(fd);
+ }
+ }
+ m_memory.Dump(fd);
+ m_platform.Dump(fd);
+}
+
+
+
+
+
diff --git a/src/platform_cfg.h b/src/platform_cfg.h
new file mode 100755
index 00000000..2f335471
--- /dev/null
+++ b/src/platform_cfg.h
@@ -0,0 +1,235 @@
+#ifndef CPLATFORM_CFG_H
+#define CPLATFORM_CFG_H
+
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include <yaml-cpp/yaml.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <vector>
+#include <string>
+
+
+#define CONST_NB_MBUF_2_10G (16380/4)
+
+typedef enum { MBUF_64 =0, // per dual port, per NUMA
+
+ MBUF_128 =1,
+ MBUF_256 =2,
+ MBUF_512 =3,
+ MBUF_1024 =4,
+ MBUF_2048 =5,
+
+ // per NUMA
+ TRAFFIC_MBUF_64 =6,
+ TRAFFIC_MBUF_128 =7,
+ TRAFFIC_MBUF_256 =8,
+ TRAFFIC_MBUF_512 =9,
+ TRAFFIC_MBUF_1024 =10,
+ TRAFFIC_MBUF_2048 =11,
+
+ MBUF_DP_FLOWS =12,
+ MBUF_GLOBAL_FLOWS =13,
+ MBUF_SIZE =14
+ } mbuf_sizes_t;
+
+const std::string * get_mbuf_names(void);
+
+/*
+#- port_limit : 2 # this option can limit the number of port of the platform
+ cpu_mask_offset : 4 # the offset of the cpu affinity
+ interface_mask : [ "0000:11:00.00", "0000:11:00.01" ] # interface that should be mask and not be considered
+ scan_only_1g : true
+ enable_zmq_pub : true # enable publisher for stats data
+ zmq_pub_port : 4500
+ telnet_port : 4501 # the telnet port in case it is enable ( with intercative mode )
+ port_info : # set eh mac addr
+ - dest_mac : [0x0,0x0,0x0,0x1,0x0,0x00] # port 0
+ src_mac : [0x0,0x0,0x0,0x1,0x0,0x00]
+
+ #for system of 1Gb/sec NIC or VM enable this
+ port_bandwidth_gb : 10 # port bandwidth 10Gb/sec , for VM put here 1 for XL710 put 40
+# memory configuration for 2x10Gb/sec system
+ memory :
+ mbuf_64 : 16380
+ mbuf_128 : 8190
+ mbuf_256 : 8190
+ mbuf_512 : 8190
+ mbuf_1024 : 8190
+ mbuf_2048 : 2049
+
+ traffic_mbuf_128 : 8190
+ traffic_mbuf_256 : 8190
+ traffic_mbuf_512 : 8190
+ traffic_mbuf_1024 : 8190
+ traffic_mbuf_2048 : 2049
+
+ dp_flows : 1048576
+ global_flows : 10240
+
+*/
+
+
+
+struct CMacYamlInfo {
+ std::vector <uint8_t> m_dest_base;
+ std::vector <uint8_t> m_src_base;
+ void Dump(FILE *fd);
+
+ void copy_dest(char *p);
+ void copy_src(char *p);
+
+ void dump_mac_vector( std::vector<uint8_t> & v,FILE *fd){
+ int i;
+ for (i=0; i<5; i++) {
+ fprintf(fd,"%02x:",v[i]);
+ }
+ fprintf(fd,"%02x\n",v[5]);
+ }
+};
+
+/*
+ platform :
+ master_core : 0
+ latency_core : 5
+ dual_if :
+ - socket : 0
+ threads : [1,2,3,4]
+ - socket : 1
+ threads : [16,17,18,16]
+
+*/
+
+struct CPlatformDualIfYamlInfo {
+public:
+ uint32_t m_socket;
+ std::vector <uint8_t> m_threads;
+public:
+ void Dump(FILE *fd);
+};
+
+struct CPlatformCoresYamlInfo {
+public:
+
+ CPlatformCoresYamlInfo(){
+ m_is_exists=false;
+ }
+ bool m_is_exists;
+ uint32_t m_master_thread;
+ uint32_t m_latency_thread;
+ std::vector <CPlatformDualIfYamlInfo> m_dual_if;
+public:
+ void Dump(FILE *fd);
+};
+
+
+
+struct CPlatformMemoryYamlInfo {
+
+
+public:
+
+ CPlatformMemoryYamlInfo(){
+ reset();
+ }
+ uint32_t m_mbuf[MBUF_SIZE]; // relative to traffic norm to 2x10G ports
+
+public:
+ void Dump(FILE *fd);
+ void reset();
+};
+
+
+struct CPlatformYamlInfo {
+public:
+ CPlatformYamlInfo(){
+ reset();
+ }
+
+ void reset(){
+
+ m_if_mask.clear();
+ m_mac_info.clear();
+ m_if_list.clear();
+
+ m_info_exist=false;
+ m_port_limit_exist=false;
+ m_port_limit=0xffffffff;
+
+ m_if_mask_exist=false;
+
+ m_enable_zmq_pub_exist=false;
+ m_enable_zmq_pub=true;
+ m_zmq_pub_port=4500;
+
+
+ m_telnet_exist=false;
+ m_telnet_port=4502 ;
+
+ m_mac_info_exist=false;
+ m_port_bandwidth_gb = 10;
+ m_memory.reset();
+ m_prefix="";
+ m_limit_memory="" ;
+ m_thread_per_dual_if=1;
+
+ }
+
+ bool m_info_exist; /* file exist ?*/
+
+ bool m_port_limit_exist;
+ uint32_t m_port_limit;
+
+
+ bool m_if_mask_exist;
+ std::vector <std::string> m_if_mask;
+
+ std::vector <std::string> m_if_list;
+
+ std::string m_prefix;
+ std::string m_limit_memory;
+ uint32_t m_thread_per_dual_if;
+
+ uint32_t m_port_bandwidth_gb;
+
+ bool m_enable_zmq_pub_exist;
+ bool m_enable_zmq_pub;
+ uint16_t m_zmq_pub_port;
+
+
+ bool m_telnet_exist;
+ uint16_t m_telnet_port;
+
+ bool m_mac_info_exist;
+ std::vector <CMacYamlInfo> m_mac_info;
+ CPlatformMemoryYamlInfo m_memory;
+ CPlatformCoresYamlInfo m_platform;
+
+public:
+ std::string get_use_if_comma_seperated();
+ void Dump(FILE *fd);
+ int load_from_yaml_file(std::string file_name);
+};
+
+
+
+#endif
diff --git a/src/rx_check.cpp b/src/rx_check.cpp
new file mode 100755
index 00000000..67ce89e1
--- /dev/null
+++ b/src/rx_check.cpp
@@ -0,0 +1,536 @@
+#include "rx_check.h"
+#include "utl_json.h"
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+void CRxCheckFlowTableStats::Clear(){
+ m_total_rx_bytes=0;
+ m_total_rx=0;
+ m_lookup=0;
+ m_found=0;
+ m_fif=0;
+ m_add=0;
+ m_remove=0;
+ m_active=0;
+ m_err_no_magic=0;
+ m_err_drop=0;
+ m_err_aged=0;
+ m_err_no_magic=0;
+ m_err_wrong_pkt_id=0;
+ m_err_fif_seen_twice=0;
+ m_err_open_with_no_fif_pkt=0;
+ m_err_oo_dup=0;
+ m_err_oo_early=0;
+ m_err_oo_late=0;
+ m_err_flow_length_changed=0;
+
+}
+
+#define MYDP(f) if (f) fprintf(fd," %-40s: %llu \n",#f,f)
+#define MYDP_A(f) fprintf(fd," %-40s: %llu \n",#f,f)
+#define MYDP_J(f) json+=add_json(#f,f);
+#define MYDP_J_LAST(f) json+=add_json(#f,f,true);
+
+
+
+void CRxCheckFlowTableStats::Dump(FILE *fd){
+ MYDP (m_total_rx_bytes);
+ MYDP (m_total_rx);
+ MYDP (m_lookup);
+ MYDP (m_found);
+ MYDP (m_fif);
+ MYDP (m_add);
+ MYDP (m_remove);
+ MYDP_A (m_active);
+ MYDP (m_err_no_magic);
+ MYDP (m_err_drop);
+ MYDP (m_err_aged);
+ MYDP (m_err_no_magic);
+ MYDP (m_err_wrong_pkt_id);
+ MYDP (m_err_fif_seen_twice);
+ MYDP (m_err_open_with_no_fif_pkt);
+ MYDP (m_err_oo_dup);
+ MYDP (m_err_oo_early);
+ MYDP (m_err_oo_late);
+ MYDP (m_err_flow_length_changed);
+}
+
+void CRxCheckFlowTableStats::dump_json(std::string & json){
+ json+="\"stats\" : {";
+
+ MYDP_J (m_total_rx_bytes);
+ MYDP_J (m_total_rx);
+ MYDP_J (m_lookup);
+ MYDP_J (m_found);
+ MYDP_J (m_fif);
+ MYDP_J (m_add);
+ MYDP_J (m_remove);
+ MYDP_J (m_active);
+ MYDP_J (m_err_no_magic);
+ MYDP_J (m_err_drop);
+ MYDP_J (m_err_aged);
+ MYDP_J (m_err_no_magic);
+ MYDP_J (m_err_wrong_pkt_id);
+ MYDP_J (m_err_fif_seen_twice);
+ MYDP_J (m_err_open_with_no_fif_pkt);
+ MYDP_J (m_err_oo_dup);
+ MYDP_J (m_err_oo_early);
+ MYDP_J (m_err_oo_late);
+
+ /* must be last */
+ MYDP_J_LAST (m_err_flow_length_changed);
+ json+="},";
+}
+
+
+
+bool CRxCheckFlowTableMap::Create(int max_size){
+ return (true);
+}
+
+void CRxCheckFlowTableMap::Delete(){
+ remove_all();
+}
+
+bool CRxCheckFlowTableMap::remove(uint64_t key ){
+ CRxCheckFlow *lp = lookup(key);
+ if ( lp ) {
+ delete lp;
+ m_map.erase(key);
+ return(true);
+ }else{
+ return(false);
+ }
+}
+
+
+CRxCheckFlow * CRxCheckFlowTableMap::lookup(uint64_t key ){
+ rx_check_flow_map_t::iterator iter;
+ iter = m_map.find(key);
+ if (iter != m_map.end() ) {
+ return ( (*iter).second );
+ }else{
+ return (( CRxCheckFlow*)0);
+ }
+}
+
+CRxCheckFlow * CRxCheckFlowTableMap::add(uint64_t key ){
+ CRxCheckFlow * flow = new CRxCheckFlow();
+ m_map.insert(rx_check_flow_map_t::value_type(key,flow));
+ return (flow);
+
+}
+
+
+void CRxCheckFlowTableMap::dump_all(FILE *fd){
+
+ rx_check_flow_map_iter_t it;
+ for (it= m_map.begin(); it != m_map.end(); ++it) {
+ CRxCheckFlow *lp = it->second;
+ printf ("flow_id: %d \n",lp->m_flow_id);
+ }
+}
+
+
+void CRxCheckFlowTableMap::remove_all(){
+ if ( m_map.empty() )
+ return;
+
+ rx_check_flow_map_iter_t it;
+ for (it= m_map.begin(); it != m_map.end(); ++it) {
+ CRxCheckFlow *lp = it->second;
+ delete lp;
+ }
+ m_map.clear();
+}
+
+uint64_t CRxCheckFlowTableMap::count(){
+ return ( m_map.size());
+}
+
+
+#ifdef FT_TEST
+
+void test_flowtable (){
+ CRxCheckFlowTableMap map;
+ map.Create(1000);
+ CRxCheckFlow * lp;
+
+ lp=map.lookup(1);
+ assert(lp==0);
+ lp=map.add(2);
+ lp->m_flow_id = 7;
+ printf("%x\n", lp);
+ lp=map.lookup(2);
+ printf("%x\n", lp);
+ assert(lp);
+ map.dump_all(stdout);
+}
+
+#endif
+
+
+
+void RxCheckManager::tw_handle(){
+
+ m_tw.try_handle_events(m_cur_time);
+}
+
+
+void RxCheckManager::tw_drain(){
+ m_on_drain=true;
+ m_tw.drain_all();
+ m_on_drain=false;
+}
+
+
+std::string CPerTxthreadTemplateInfo::dump_as_json(std::string name){
+ std::string json="\"template\":[";
+ int i;
+ for (i=0;i<MAX_TEMPLATES_STATS;i++){
+ char buff[200];
+ sprintf(buff,"%llu",m_template_info[i]);
+ json+=std::string(buff);
+ if ( i < MAX_TEMPLATES_STATS-1) {
+ json+=std::string(",");
+ }
+ }
+ json+="]," ;
+ return (json);
+}
+
+
+void CPerTxthreadTemplateInfo::Add(CPerTxthreadTemplateInfo * obj){
+ int i;
+ for (i=0; i<MAX_TEMPLATES_STATS; i++) {
+ m_template_info[i]+=obj->m_template_info[i];
+ }
+}
+
+
+void CPerTxthreadTemplateInfo::Dump(FILE *fd){
+ int i;
+ for (i=0; i<MAX_TEMPLATES_STATS; i++) {
+ if (m_template_info[i]) {
+ fprintf (fd," template id: %llu %llu \n",i,m_template_info[i]);
+ }
+ }
+}
+
+
+bool RxCheckManager::Create(){
+ m_ft.Create(100000);
+ m_stats.Clear();
+ m_hist.Create();
+ m_cur_time=0.00000001;
+ m_on_drain=false;
+ return (true);
+
+}
+
+
+void RxCheckManager::handle_packet(CRx_check_header * rxh){
+ //rxh->dump(stdout);
+ m_stats.m_total_rx++;
+ if ( rxh->m_magic != RX_CHECK_MAGIC ){
+ m_stats.m_err_no_magic++;
+ update_template_err(rxh->m_template_id);
+ return;
+ }
+ if ((rxh->m_pkt_id+1) > rxh->m_flow_size ){
+ m_stats.m_err_wrong_pkt_id++;
+ update_template_err(rxh->m_template_id);
+ return;
+ }
+
+ m_cur_time=now_sec();
+
+ uint64_t d = (os_get_hr_tick_32() - rxh->m_time_stamp );
+ double dt= ptime_convert_hr_dsec(d);
+ m_hist.Add(dt);
+ // calc jitter per template
+ CPerTemplateInfo *lpt= get_template(rxh->m_template_id);
+ lpt->calc(dt);
+ lpt->inc_rx_counter();
+
+ CRxCheckFlow * lf;
+ /* lookup */
+ lf=m_ft.lookup(rxh->m_flow_id);
+ m_stats.m_lookup++;
+
+ if ((m_stats.m_lookup & 0xff)==0) {
+ /* handle aging from time to time */
+
+ tw_handle() ;
+ }
+
+ bool any_err=false;
+ if ( rxh->is_fif_dir() ) {
+ if (lf==0) {
+ /* valid , this is FIF we don't expect flows */
+ lf=m_ft.add(rxh->m_flow_id);
+ assert(lf);
+ lf->m_aging_timer_handle.m_object1=this;
+
+ lf->m_flow_id=rxh->m_flow_id;
+ if (rxh->get_both_dir()) {
+ lf->set_both_dir();
+ }
+
+ CRxCheckFlowPerDir *lpd=&lf->m_dir[rxh->get_dir()];
+ lpd->set_fif_seen(rxh->m_flow_size);
+ m_stats.m_fif++;
+ m_stats.m_add++;
+ m_stats.m_active++;
+ }else{
+ m_stats.m_found++;
+ CRxCheckFlowPerDir *lpd=&lf->m_dir[rxh->get_dir()];
+ if ( lpd->is_fif_seen() ){
+ lf->m_oo_err++;
+ any_err=true;
+ m_stats.m_err_fif_seen_twice++;
+ if ( lpd->m_flow_size != rxh->m_flow_size ){
+ m_stats.m_err_flow_length_changed++;
+ lf->m_oo_err++;
+ }
+ lpd->m_pkts++;
+ }else{
+ /* first in direction , we are OK */
+ lpd->set_fif_seen(rxh->m_flow_size);
+ }
+ }
+ }else{
+ /* NON FIF */
+ if (lf==0) {
+ /* no flow at it is not the first packet */
+ /* the first packet was dropped ?? */
+ lf=m_ft.add(rxh->m_flow_id);
+ assert(lf);
+ lf->m_aging_timer_handle.m_object1=this;
+ if (rxh->get_both_dir()) {
+ lf->set_both_dir();
+ }
+
+ lf->m_flow_id=rxh->m_flow_id;
+ CRxCheckFlowPerDir * lpd=&lf->m_dir[rxh->get_dir()];
+
+ lpd->set_fif_seen(rxh->m_flow_size);
+
+ m_stats.m_add++;
+ m_stats.m_active++;
+ m_stats.m_err_open_with_no_fif_pkt++;
+ any_err=true;
+ lf->m_oo_err++;
+ }else{
+ m_stats.m_found++;
+
+ CRxCheckFlowPerDir *lpd=&lf->m_dir[rxh->get_dir()];
+ if ( !lpd->is_fif_seen() ){
+ // init this dir
+ lpd->set_fif_seen(rxh->m_flow_size);
+
+ }else{
+ if ( lpd->m_flow_size != rxh->m_flow_size ){
+ m_stats.m_err_flow_length_changed++;
+ lf->m_oo_err++;
+ any_err=true;
+ }
+
+ /* check seq number */
+ uint16_t c_seq=lpd->m_seq;
+ if ((c_seq) != rxh->m_pkt_id) {
+ /* out of order issue */
+ lf->m_oo_err++;
+ any_err=true;
+
+ if (c_seq-1 == rxh->m_pkt_id) {
+ m_stats.m_err_oo_dup++;
+ }else{
+ if ((c_seq ) < rxh->m_pkt_id ) {
+ m_stats.m_err_oo_late++;
+ }else{
+ m_stats.m_err_oo_early++;
+ }
+ }
+ }
+ /* reset the seq */
+ lpd->m_seq=rxh->m_pkt_id+1;
+ lpd->m_pkts++;
+ }
+
+ }
+ }
+
+ if (any_err) {
+ update_template_err(rxh->m_template_id);
+ }
+
+ m_tw.restart_timer(&lf->m_aging_timer_handle,m_cur_time+std::max(rxh->m_aging_sec,(uint16_t)5));
+ /* teminate flow if needed */
+ if ( lf->is_all_pkts_seen() ){
+ /* handel from termination */
+ m_tw.stop_timer(&lf->m_aging_timer_handle);
+ lf->set_aged_correctly();
+ on_flow_end(lf);
+ }
+
+}
+
+void RxCheckManager::update_template_err(uint8_t template_id){
+ get_template(template_id)->inc_error_counter();
+}
+
+
+bool RxCheckManager::on_flow_end(CRxCheckFlow * lp){
+ m_stats.m_remove++;
+ m_stats.m_active--;
+ if ( !m_on_drain ){
+ uint16_t exp=lp->get_total_pkt_expected();
+ uint16_t seen=lp->get_total_pkt_seen();
+
+ if ( exp > seen ){
+ m_stats.m_err_drop +=(exp - seen);
+ }
+ if (!lp->is_aged_correctly()) {
+ m_stats.m_err_aged++;
+ }
+ }
+ m_ft.remove(lp->m_flow_id);
+ return(true);
+}
+
+
+
+void RxCheckManager::Delete(){
+ m_ft.Delete();
+ m_hist.Delete();
+}
+
+void flow_aging_callback(CFlowTimerHandle * t){
+ CRxCheckFlow * lp = (CRxCheckFlow *)t->m_object;
+ RxCheckManager * lpm = (RxCheckManager *)t->m_object1;
+ assert(lp);
+ assert(lpm);
+ assert(t->m_id == 0x1234);
+ lpm->on_flow_end(lp);
+}
+
+void RxCheckManager::template_dump_json(std::string & json){
+ json+="\"template\" : [";
+ int i;
+ bool is_first=true;
+ for (i=0; i<MAX_TEMPLATES_STATS;i++ ) {
+ CPerTemplateInfo * lp=get_template(i);
+ if ( is_first==true ){
+ is_first=false;
+ }else{
+ json+=",";
+ }
+ json+="{";
+ json+=add_json("id",(uint32_t)i);
+ json+=add_json("val",(uint64_t)lp->get_error_counter());
+ json+=add_json("rx_pkts",(uint64_t)lp->get_rx_counter());
+ json+=add_json("jitter",(uint64_t)lp->get_jitter_usec(),true);
+ json+="}";
+ }
+ json+="],";
+
+}
+
+uint32_t RxCheckManager::getTemplateMaxJitter(){
+ uint32_t res=0;
+ int i;
+ for (i=0; i<MAX_TEMPLATES_STATS;i++ ) {
+ CPerTemplateInfo * lp=get_template(i);
+ uint32_t jitter=lp->get_jitter_usec();
+ if ( jitter > res ) {
+ res =jitter;
+ }
+ }
+ return ( res );
+}
+
+void RxCheckManager::DumpTemplate(FILE *fd,bool verbose){
+ int i;
+ bool has_template=false;
+ int cnt=0;
+ for (i=0; i<MAX_TEMPLATES_STATS;i++ ) {
+ CPerTemplateInfo * lp=get_template(i);
+ if (verbose || (lp->get_error_counter()>0)) {
+ has_template=true;
+ if (cnt==0){
+ fprintf(fd,"\n");
+ }
+ fprintf(fd,"[id:%2d val:%8d,rx:%8d], ",i,lp->get_error_counter(),lp->get_rx_counter());
+ cnt++;
+ if (cnt>5) {
+ cnt=0;
+ }
+ }
+ }
+ if ( has_template ){
+ fprintf(fd,"\n");
+ }
+}
+
+void RxCheckManager::DumpTemplateFull(FILE *fd){
+ int i;
+ for (i=0; i<MAX_TEMPLATES_STATS;i++ ) {
+ CPerTemplateInfo * lp=get_template(i);
+ fprintf(fd," template_id_%2d , errors:%8d, jitter: %lu rx : %lu \n",i,lp->get_error_counter(),lp->get_jitter_usec(),lp->get_rx_counter() );
+ }
+}
+
+
+void RxCheckManager::DumpShort(FILE *fd){
+ m_hist.update();
+ fprintf(fd,"------------------------------------------------------------------------------------------------------------\n");
+ fprintf(fd,"rx check: avg/max/jitter latency, %8.0f ,%8.0f, %8d ",m_hist.get_average_latency(),m_hist.get_max_latency(),getTemplateMaxJitter());
+ fprintf(fd," | ");
+ m_hist.DumpWinMax(fd);
+ DumpTemplate(fd,false);
+ fprintf(fd,"\n");
+ fprintf(fd,"---\n");
+ fprintf(fd," active flows: %8d, fif: %8d, drop: %8d, errors: %8d \n",m_stats.m_active,m_stats.m_fif,m_stats.m_err_drop,m_stats.get_total_err());
+ fprintf(fd,"------------------------------------------------------------------------------------------------------------\n");
+
+}
+
+void RxCheckManager::Dump(FILE *fd){
+ m_stats.Dump(fd);
+ m_hist.DumpWinMax(fd);
+ m_hist.Dump(fd);
+ DumpTemplateFull(fd);
+ fprintf(fd," ager :\n");
+ m_tw.Dump(fd);
+}
+
+void RxCheckManager::dump_json(std::string & json){
+
+ json="{\"name\":\"rx-check\",\"type\":0,\"data\":{";
+ m_stats.dump_json(json);
+ m_hist.dump_json("latency_hist",json);
+ template_dump_json(json);
+ m_tw.dump_json(json);
+ json+="\"unknown\":0}}" ;
+}
+
diff --git a/src/rx_check.h b/src/rx_check.h
new file mode 100755
index 00000000..6f9763a2
--- /dev/null
+++ b/src/rx_check.h
@@ -0,0 +1,427 @@
+#ifndef RX_CHECK_H
+#define RX_CHECK_H
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+#include "timer_wheel_pq.h"
+#include "rx_check_header.h"
+#include "time_histogram.h"
+#include "utl_jitter.h"
+
+
+
+typedef enum {
+ CLIENT_SIDE=0,
+ SERVER_SIDE=1,
+ CS_NUM=2
+} pkt_dir_enum_t;
+
+typedef uint8_t pkt_dir_t ;
+
+
+void flow_aging_callback(CFlowTimerHandle * t);
+
+class CRxCheckFlowPerDir {
+public:
+ CRxCheckFlowPerDir(){
+ m_flags=0;
+ m_pkts=0;
+ m_seq=0;
+ m_flow_size=0;
+
+ }
+ uint16_t m_flow_size; // how many packets in this direction
+ uint16_t m_pkts;
+ uint16_t m_seq;
+private:
+ uint16_t m_flags;
+public:
+
+ void set_fif_seen(uint16_t flow_per_dir){
+ m_pkts=1;
+ m_seq=1;
+ m_flags |=2;
+ m_flow_size=flow_per_dir;
+ }
+ bool is_fif_seen(){
+ return ( (m_flags & 2) ==2 ?true:false);
+ }
+
+ void set_init_not_from_first_packet(){
+ m_flags |=1;
+ }
+ bool is_init_not_from_first_pkt(){
+ return ( (m_flags & 2) ==2 ?true:false);
+ }
+};
+
+class CRxCheckFlow {
+public:
+ CRxCheckFlow(){
+ m_aging_timer_handle.m_callback =flow_aging_callback;
+ m_aging_timer_handle.m_object = (void *)this;
+ m_aging_timer_handle.m_id= 0x1234;
+ m_oo_err=0;
+ m_flags=0;
+ }
+
+
+public:
+ /* timestamp of FIF */
+ uint64_t m_flow_id; /* key*/
+ CRxCheckFlowPerDir m_dir[CS_NUM];
+ CFlowTimerHandle m_aging_timer_handle;
+ uint16_t m_oo_err; /* out of order issue */
+ uint16_t m_flags;
+public:
+
+ uint16_t get_total_pkt_seen(void){
+ return (m_dir[0].m_pkts+
+ m_dir[1].m_pkts);
+ }
+ uint16_t get_total_pkt_expected(void){
+ return (m_dir[0].m_flow_size+
+ m_dir[1].m_flow_size);
+ }
+
+ bool is_all_pkts_seen(void){
+ int i;
+ int c=0;
+ for (i=0; i<2; i++) {
+ if ( (m_dir[i].m_pkts!=m_dir[i].m_flow_size) ){
+ return (false);
+ }
+ if (m_dir[i].m_flow_size>0) {
+ c++;
+ }
+ }
+ int expc=is_both_dir()?2:1;
+ if ( expc == c ) {
+ return ( (m_oo_err==0)?true:false );
+ }
+ return (false);
+ }
+
+ void set_both_dir(){
+ m_flags |=2;
+ }
+ bool is_both_dir(){
+ return ( (m_flags & 2) == 2 ?true:false);
+ }
+
+
+
+ void set_aged_correctly(){
+ m_flags |=4;
+ }
+ bool is_aged_correctly(){
+ return ( (m_flags & 4) ==4 ?true:false);
+ }
+
+};
+
+
+
+
+class CRxCheckFlowTableStats {
+public:
+
+ uint64_t m_total_rx;
+ uint64_t m_total_rx_bytes;
+
+ uint64_t m_lookup;
+ uint64_t m_found;
+ uint64_t m_fif;
+ uint64_t m_add;
+ uint64_t m_remove;
+ uint64_t m_active;
+ uint64_t m_err_drop;
+ uint64_t m_err_aged;
+
+
+ uint64_t m_err_no_magic;
+ uint64_t m_err_wrong_pkt_id;
+ uint64_t m_err_fif_seen_twice;
+ uint64_t m_err_open_with_no_fif_pkt;
+
+ uint64_t m_err_oo_dup; /* got same packets id twice expect 1 got 0 */
+
+ uint64_t m_err_oo_early; /* miss packet ,expect 1 got 2 */
+ uint64_t m_err_oo_late; /* early packet ,expect 7 got 6 */
+
+ uint64_t m_err_flow_length_changed; /* early packet ,expect 7 got 6 */
+
+ uint64_t get_total_err(void){
+ return (m_err_drop+m_err_aged+
+ m_err_no_magic+
+ m_err_wrong_pkt_id+
+ m_err_fif_seen_twice+
+ m_err_open_with_no_fif_pkt+
+ m_err_oo_dup+
+ m_err_oo_early+
+ m_err_oo_late+m_err_flow_length_changed);
+
+ }
+
+public:
+ void Clear();
+ void Dump(FILE *fd);
+ void dump_json(std::string & json);
+};
+
+
+
+typedef CRxCheckFlow * rx_check_flow_ptr;
+typedef std::map<uint64_t, rx_check_flow_ptr, std::less<uint64_t> > rx_check_flow_map_t;
+typedef rx_check_flow_map_t::iterator rx_check_flow_map_iter_t;
+
+
+class CRxCheckFlowTableMap {
+public:
+ virtual bool Create(int max_size);
+ virtual void Delete();
+ virtual bool remove(uint64_t fid );
+ virtual CRxCheckFlow * lookup(uint64_t fid );
+ virtual CRxCheckFlow * add(uint64_t fid );
+ virtual void remove_all(void);
+ void dump_all(FILE *fd);
+ uint64_t count(void);
+public:
+ rx_check_flow_map_t m_map;
+};
+
+
+
+class uint64_tHashEnv
+{
+public:
+ static uint32_t Hash(uint64_t x)
+ {
+ return ( (x >>40) ^ (x & 0xffffffff));
+ }
+};
+
+template<class T>
+class CMyFSA {
+public:
+ bool Create(uint32_t size, bool supportGetNext, bool ctorRequired){
+ return(true);
+ }
+
+ void Delete(){
+ }
+ void Reset(){
+ }
+
+ T * GetNewItem(){
+ return (new T());
+ }
+
+ void ReturnItem(T *obj){
+ delete obj;
+ }
+};
+
+
+
+#if 0
+
+typedef CHashEntry<uint64_t,CRxCheckFlow> rx_c_hash_ent_t;
+typedef CCloseHash<uint64_t,CRxCheckFlow,uint64_tHashEnv,CMyFSA<rx_c_hash_ent_t> > rx_c_hash_t;
+
+
+class CRxCheckFlowTableHash {
+public:
+ bool Create(int max_size){
+ return ( m_hash.Create(max_size,0,false,false,true) );
+ }
+ void Delete(){
+ m_hash.Delete();
+ }
+ bool remove(uint64_t fid ) {
+ return(m_hash.Remove(fid)==hsOK?true:false);
+ }
+ CRxCheckFlow * lookup(uint64_t fid ){
+ rx_c_hash_ent_t *lp=m_hash.Find(fid);
+ if (lp) {
+ return (&lp->value);
+ }else{
+ return ((CRxCheckFlow *)NULL);
+ }
+ }
+ CRxCheckFlow * add(uint64_t fid ){
+ rx_c_hash_ent_t *lp;
+ assert(m_hash.Insert(fid,lp)==hsOK);
+ return (&lp->value);
+ }
+
+ void remove_all(void){
+
+ }
+ void dump_all(FILE *fd){
+ m_hash.Dump(fd);
+ }
+ uint64_t count(void){
+ return ( m_hash.GetSize());
+
+ }
+public:
+
+ rx_c_hash_t m_hash;
+};
+
+#endif
+
+
+// must be 2^
+#define MAX_TEMPLATES_STATS 32
+
+#define MAX_TEMPLATES_STATS_MASK (MAX_TEMPLATES_STATS-1)
+
+class CPerTxthreadTemplateInfo {
+
+public:
+ CPerTxthreadTemplateInfo(){
+ Clear();
+ }
+ void Clear(){
+ memset(m_template_info,0,sizeof(m_template_info));
+ }
+ void Dump(FILE *fd);
+
+ void Add(CPerTxthreadTemplateInfo * obj);
+
+ void inc_template(uint8_t index){
+ if (index<MAX_TEMPLATES_STATS) {
+ m_template_info[index]++;
+ }else{
+ m_template_info[MAX_TEMPLATES_STATS-1]++;
+ }
+ }
+
+ std::string dump_as_json(std::string name);
+
+ uint64_t m_template_info[MAX_TEMPLATES_STATS];
+};
+
+class CPerTemplateInfo {
+public:
+ CPerTemplateInfo() {
+ reset();
+ }
+
+ void reset(){
+ m_errors=0;
+ m_rx_pkts=0;
+ m_jitter.reset();
+ }
+
+ void calc(double dtime){
+ m_jitter.calc(dtime);
+ }
+
+ uint32_t get_jitter_usec(){
+ return ((uint32_t)(m_jitter.get_jitter()*1000000.0));
+ }
+
+ void inc_error_counter(void){
+ m_errors++;
+
+ }
+
+ uint64_t get_error_counter(){
+ return (m_errors);
+ }
+
+ void inc_rx_counter(void){
+ m_rx_pkts++;
+ }
+
+ uint64_t get_rx_counter(){
+ return (m_rx_pkts);
+ }
+
+
+private:
+ uint64_t m_rx_pkts;
+ CJitter m_jitter;
+ uint64_t m_errors;
+};
+
+class RxCheckManager {
+
+public:
+ bool Create();
+ void Delete();
+ void handle_packet(CRx_check_header * rxh);
+ void Dump(FILE *fd);
+ void DumpShort(FILE *fd);
+ void DumpTemplate(FILE *fd,bool verbose);
+ void DumpTemplateFull(FILE *fd);
+
+ uint32_t getTemplateMaxJitter();
+
+ void template_dump_json(std::string & json);
+
+ uint64_t getTotalRx(void){
+ return ( m_stats.m_total_rx );
+ }
+
+ void tw_drain();
+ void tw_handle();
+
+ void dump_json(std::string & json );
+
+protected:
+ void update_template_err(uint8_t template_id);
+
+ CPerTemplateInfo * get_template(uint8_t index){
+ uint8_t _id;
+ if ( index < MAX_TEMPLATES_STATS_MASK ){
+ _id=index;
+ }else{
+ _id=MAX_TEMPLATES_STATS_MASK;
+ }
+ return (&m_template_info[_id]);
+ }
+
+ bool on_flow_end(CRxCheckFlow * lp);
+ friend void flow_aging_callback(CFlowTimerHandle * t);
+public:
+
+ CTimerWheel m_tw;
+ CRxCheckFlowTableMap m_ft;
+ CRxCheckFlowTableStats m_stats;
+
+ CTimeHistogram m_hist;
+ CPerTemplateInfo m_template_info[MAX_TEMPLATES_STATS];
+ bool m_on_drain;
+public:
+ dsec_t m_cur_time;
+
+};
+
+
+
+
+
+#endif
diff --git a/src/rx_check_header.cpp b/src/rx_check_header.cpp
new file mode 100755
index 00000000..8ee580db
--- /dev/null
+++ b/src/rx_check_header.cpp
@@ -0,0 +1,52 @@
+#include "rx_check_header.h"
+#include <common/basic_utils.h>
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+
+void CRx_check_header::dump(FILE *fd){
+
+ fprintf(fd," time_stamp : %x \n",m_time_stamp);
+ uint64_t d = (os_get_hr_tick_32() - m_time_stamp );
+ dsec_t dd= ptime_convert_hr_dsec(d);
+
+ fprintf(fd," time_stamp : %f \n",dd);
+ fprintf(fd," magic : %x \n",m_magic);
+ fprintf(fd," pkt_id : %x \n",m_pkt_id);
+ fprintf(fd," size : %x \n",m_flow_size);
+
+ fprintf(fd," flow_id : %lx \n",m_flow_id);
+ fprintf(fd," flags : %x \n",m_flags);
+}
+
+
+
+void CNatOption::dump(FILE *fd){
+
+ fprintf(fd," op : %lx \n",get_option_type());
+ fprintf(fd," ol : %lx \n",get_option_len());
+ fprintf(fd," thread_id : %lx \n",get_thread_id());
+ fprintf(fd," magic : %lx \n",get_magic());
+ fprintf(fd," fid : %lx \n",get_fid());
+ utl_DumpBuffer(stdout,(void *)&u.m_data[0],8,0);
+}
+
diff --git a/src/rx_check_header.h b/src/rx_check_header.h
new file mode 100755
index 00000000..3ac5dd1f
--- /dev/null
+++ b/src/rx_check_header.h
@@ -0,0 +1,212 @@
+#ifndef RX_CHECK_HEADER
+#define RX_CHECK_HEADER
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+#include <stdint.h>
+#include <stdio.h>
+#include <common/bitMan.h>
+#include <common/Network/Packet/CPktCmn.h>
+
+
+#include "os_time.h"
+
+#define RX_CHECK_LEN (sizeof(struct CRx_check_header))
+
+// IPv4 option type:
+// bit[0] = copy flag (0=do not copy to fragment)
+// bit[1:2] = option class (2=debugging)
+// bit[3:7] = option number (23=TRex)
+#define RX_CHECK_V4_OPT_TYPE 0x57
+#define RX_CHECK_V4_OPT_LEN RX_CHECK_LEN
+
+// IPv6 extension header:
+// type = 60 (destination options)
+#define RX_CHECK_V6_OPT_TYPE 0x3c
+#define RX_CHECK_V6_OPT_LEN ((RX_CHECK_LEN - 8) / 8)
+
+// IPv6 subfield option type:
+// bit[0:1] = unrecognied option action (00=skip option)
+// bit[2] = change allowed flag (0=no changes enroute)
+// bit[3:7] = option number (23=TRex)
+#define RX_CHECK_V6_SF_OPT_TYPE 0x17
+#define RX_CHECK_V6_SF_OPT_LEN (RX_CHECK_LEN - 2)
+
+// Magic field overlays IPv6 subfield option type
+#define RX_CHECK_MAGIC (((RX_CHECK_V6_SF_OPT_LEN & 0xff) << 8) | \
+ RX_CHECK_V6_SF_OPT_TYPE)
+
+struct CRx_check_header {
+ // 24 bytes, watch alignment and keep this in 8 byte increments
+ uint8_t m_option_type;
+ uint8_t m_option_len;
+ uint16_t m_magic;
+
+ uint32_t m_time_stamp;
+ uint64_t m_flow_id; // include thread_id , template_id
+ uint16_t m_pkt_id; /* pkt_id inside the flow - zero base 0,1,2*/
+ uint16_t m_flow_size; /* how many packets in flow */
+
+ uint8_t m_flags;
+ uint8_t m_template_id;
+ uint16_t m_aging_sec;
+
+public:
+ bool is_fif_dir(void){
+ return (m_pkt_id==0?true:false);
+ }
+ bool is_eof_dir(void){
+ return ( (m_pkt_id==m_flow_size-1)?true:false);
+ }
+
+ void set_dir(int dir){
+ btSetMaskBit8(m_flags,0,0,dir?1:0);
+ }
+
+ int get_dir(void){
+ return (btGetMaskBit8(m_flags,0,0) ? 1:0);
+ }
+
+ /* need to mark if we expect to see both sides of the flow, this is know offline */
+ void set_both_dir(int both){
+ btSetMaskBit8(m_flags,1,1,both?1:0);
+ }
+
+ int get_both_dir(void){
+ return (btGetMaskBit8(m_flags,1,1) ? 1:0);
+ }
+
+
+
+ void dump(FILE *fd);
+};
+
+
+class CNatOption {
+public:
+ enum {
+ noIPV4_OPTION = 0x10, /* dummy IPV4 option */
+ noOPTION_LEN = 0x8,
+ noIPV4_MAGIC = 0xEE,
+ noIPV4_MAGIC_RX = 0xED,
+
+ noIPV6_OPTION_LEN = (noOPTION_LEN/8)-1,
+ noIPV6_OPTION = 0x3C, /*IPv6-Opts Destination Options for IPv6 RFC 2460*/
+ };
+ void set_option_type(uint8_t id){
+ u.m_data[0]=id;
+ }
+ uint8_t get_option_type(){
+ return (u.m_data[0]);
+ }
+
+ void set_option_len(uint8_t len){
+ u.m_data[1]=len;
+ }
+ uint8_t get_option_len(){
+ return ( u.m_data[1]);
+ }
+
+ void set_thread_id(uint8_t thread_id){
+ u.m_data[3]=thread_id;
+ }
+ uint8_t get_thread_id(){
+ return (u.m_data[3]);
+ }
+
+ void set_magic(uint8_t magic){
+ u.m_data[2]=magic;
+ }
+
+ uint8_t get_magic(){
+ return (u.m_data[2]);
+ }
+
+ void set_rx_check(bool enable){
+ if (enable) {
+ set_magic(CNatOption::noIPV4_MAGIC_RX);
+ }else{
+ set_magic(CNatOption::noIPV4_MAGIC);
+ }
+ }
+ bool is_rx_check(){
+ if (get_magic() ==CNatOption::noIPV4_MAGIC_RX) {
+ return(true);
+ }else{
+ return (false);
+ }
+ }
+
+
+ void set_fid(uint32_t fid){
+ u.m_data_uint32[1]=fid;
+ }
+ uint32_t get_fid(){
+ return (u.m_data_uint32[1]);
+ }
+
+ bool is_valid_ipv4_magic_op0(void){
+ return ( ( PKT_NTOHL( u.m_data_uint32[0] )& 0xFFFFFF00 ) ==
+ (CNatOption::noIPV4_OPTION <<24) + (CNatOption::noOPTION_LEN<<16) + (CNatOption::noIPV4_MAGIC<<8) ?true:false);
+ }
+
+ bool is_valid_ipv4_magic_op1(void){
+ return ( ( PKT_NTOHL( u.m_data_uint32[0] )& 0xFFFFFF00 ) ==
+ (CNatOption::noIPV4_OPTION <<24) + (CNatOption::noOPTION_LEN<<16) + (CNatOption::noIPV4_MAGIC_RX<<8) ?true:false);
+ }
+
+ bool is_valid_ipv4_magic(void){
+ return (is_valid_ipv4_magic_op0() ||is_valid_ipv4_magic_op1() );
+ }
+
+
+ bool is_valid_ipv6_magic(void){
+ return ( ( PKT_NTOHL( u.m_data_uint32[0] )& 0x00FFFF00 ) ==
+ (CNatOption::noIPV6_OPTION_LEN<<16) + (CNatOption::noIPV4_MAGIC<<8) ?true:false);
+
+ }
+
+ void set_init_ipv4_header(){
+ set_option_type(CNatOption::noIPV4_OPTION);
+ set_option_len(CNatOption::noOPTION_LEN);
+ set_magic(CNatOption::noIPV4_MAGIC);
+ }
+
+ void set_init_ipv6_header(void){
+ set_option_len(noIPV6_OPTION_LEN);
+ set_magic(CNatOption::noIPV4_MAGIC);
+ }
+
+ void dump(FILE *fd);
+
+
+private:
+ union u_ {
+ uint8_t m_data[8];
+ uint32_t m_data_uint32[2];
+ } u;
+};
+
+
+#endif
+
+
diff --git a/src/time_histogram.cpp b/src/time_histogram.cpp
new file mode 100755
index 00000000..f1b47e59
--- /dev/null
+++ b/src/time_histogram.cpp
@@ -0,0 +1,251 @@
+#include "time_histogram.h"
+#include <string.h>
+#include "utl_json.h"
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+
+
+void CTimeHistogram::Reset(){
+ m_max_dt=0.0;
+ m_cnt =0;
+ m_high_cnt =0;
+ m_max_win_dt=0;
+ m_max_win_last_dt=0;
+ m_average=0.0;
+ memset(&m_max_ar[0],0,sizeof(m_max_ar));
+ m_win_cnt=0;
+
+ int i;
+ int j;
+ for (i=0;i<HISTOGRAM_SIZE; i++) {
+ for (j=0; j<HISTOGRAM_SIZE_LOG;j++) {
+ m_hcnt[j][i]=0;
+ }
+ }
+ for (i=0;i<HISTOGRAM_SIZE; i++) {
+ for (j=0; j<HISTOGRAM_SIZE_LOG;j++) {
+ m_hcnt_shadow[j][i]=0;
+ }
+ }
+ m_cnt_shadow=0;
+ m_high_cnt_shadow=0;
+}
+
+bool CTimeHistogram::Create(){
+ m_min_delta =10.0/1000000.0;
+ Reset();
+ return (true);
+}
+void CTimeHistogram::Delete(){
+}
+
+bool CTimeHistogram::Add(dsec_t dt){
+
+ m_cnt++;
+ if (dt < m_min_delta) {
+ return false;
+ }
+ m_high_cnt++;
+
+ if ( m_max_dt < dt){
+ m_max_dt = dt;
+ }
+ if ( m_max_win_dt < dt){
+ m_max_win_dt = dt;
+ }
+
+ uint32_t d_10usec=(uint32_t)(dt*100000.0);
+ // 1 10-19 usec
+ //,2 -20-29 usec
+ //,3,
+
+ int j;
+ for (j=0; j<HISTOGRAM_SIZE_LOG; j++) {
+ uint32_t low = d_10usec % 10;
+ uint32_t high = d_10usec / 10;
+ if (high == 0 ) {
+ if (low>0) {
+ low=low-1;
+ }
+ m_hcnt[j][low]++;
+ break;
+ }else{
+ d_10usec =high;
+ }
+ }
+
+ return true;
+}
+
+
+void CTimeHistogram::update(){
+
+ m_max_ar[m_win_cnt]=m_max_win_dt;
+ m_max_win_last_dt=m_max_win_dt;
+ m_max_win_dt=0.0;
+ m_win_cnt++;
+ if (m_win_cnt==HISTOGRAM_QUEUE_SIZE) {
+ m_win_cnt=0;
+ }
+ update_average();
+}
+
+double CTimeHistogram::get_cur_average(){
+ int i,j;
+ uint64_t d_cnt;
+ uint64_t d_cnt_high;
+
+ d_cnt = m_cnt - m_cnt_shadow;
+ m_cnt_shadow =m_cnt;
+ d_cnt_high = m_high_cnt - m_high_cnt_shadow;
+ m_high_cnt_shadow =m_high_cnt;
+
+ uint64_t low_events = d_cnt - d_cnt_high;
+ uint64_t sum= low_events;
+ double s = ((double)low_events * 5.0);
+
+
+ for (j=0; j<HISTOGRAM_SIZE_LOG; j++) {
+ for (i=0; i<HISTOGRAM_SIZE; i++) {
+ uint64_t cnt=m_hcnt[j][i];
+ if (cnt > 0 ) {
+ uint64_t d= cnt - m_hcnt_shadow[j][i];
+ sum += d;
+ s+= ((double)d)*1.5*get_base_usec(j,i);
+ m_hcnt_shadow[j][i] = cnt;
+ }
+ }
+ }
+
+ double c_average;
+ if ( sum > 0 ) {
+ c_average=s/(double)sum;
+ }else{
+ c_average=0.0;
+ }
+ return c_average;
+}
+
+void CTimeHistogram::update_average(){
+ double c_average=get_cur_average();
+
+ // low pass filter
+ m_average = 0.5*m_average + 0.5*c_average;
+}
+
+dsec_t CTimeHistogram::get_total_average(){
+ return (get_cur_average());
+}
+
+dsec_t CTimeHistogram::get_average_latency(){
+ return (m_average);
+}
+
+
+uint32_t CTimeHistogram::get_usec(dsec_t d){
+ return (uint32_t)(d*1000000.0);
+}
+
+void CTimeHistogram::DumpWinMax(FILE *fd){
+
+ int i;
+ uint32_t ci=m_win_cnt;
+
+ for (i=0; i<HISTOGRAM_QUEUE_SIZE-1; i++) {
+ dsec_t d=get_usec(m_max_ar[ci]);
+ ci++;
+ if (ci>HISTOGRAM_QUEUE_SIZE-1) {
+ ci=0;
+ }
+ fprintf(fd," %.0f ",d);
+ }
+}
+
+void CTimeHistogram::Dump(FILE *fd){
+ fprintf (fd," min_delta : %lu usec \n",get_usec(m_min_delta));
+ fprintf (fd," cnt : %lu \n",m_cnt);
+ fprintf (fd," high_cnt : %lu \n",m_high_cnt);
+ fprintf (fd," max_d_time : %lu usec\n",get_usec(m_max_dt));
+ //fprintf (fd," average : %.0f usec\n", get_total_average());
+ fprintf (fd," sliding_average : %.0f usec\n", get_average_latency());
+ fprintf (fd," precent : %.1f %%\n",(100.0*(double)m_high_cnt/(double)m_cnt));
+
+ fprintf (fd," histogram \n");
+ fprintf (fd," -----------\n");
+ int i;
+ int j;
+ int base=10;
+ for (j=0; j<HISTOGRAM_SIZE_LOG; j++) {
+ for (i=0; i<HISTOGRAM_SIZE; i++) {
+ if (m_hcnt[j][i] >0 ) {
+ fprintf (fd," h[%lu] : %lu \n",(base*(i+1)),m_hcnt[j][i]);
+ }
+ }
+ base=base*10;
+ }
+}
+
+/*
+ { "histogram" : [ {} ,{} ] }
+
+*/
+
+void CTimeHistogram::dump_json(std::string name,std::string & json ){
+ char buff[200];
+ sprintf(buff,"\"%s\":{",name.c_str());
+ json+=std::string(buff);
+
+ json+=add_json("min_usec",get_usec(m_min_delta));
+ json+=add_json("max_usec",get_usec(m_max_dt));
+ json+=add_json("high_cnt",m_high_cnt);
+ json+=add_json("cnt",m_cnt);
+ //json+=add_json("t_avg",get_total_average());
+ json+=add_json("s_avg",get_average_latency());
+ int i;
+ int j;
+ uint32_t base=10;
+
+ json+=" \"histogram\": [";
+ bool first=true;
+ for (j=0; j<HISTOGRAM_SIZE_LOG; j++) {
+ for (i=0; i<HISTOGRAM_SIZE; i++) {
+ if (m_hcnt[j][i] >0 ) {
+ if ( first ){
+ first=false;
+ }else{
+ json+=",";
+ }
+ json+="{";
+ json+=add_json("key",(base*(i+1)));
+ json+=add_json("val",m_hcnt[j][i],true);
+ json+="}";
+ }
+ }
+ base=base*10;
+ }
+ json+=" ] } ,";
+
+}
+
+
+
diff --git a/src/time_histogram.h b/src/time_histogram.h
new file mode 100755
index 00000000..e733c65f
--- /dev/null
+++ b/src/time_histogram.h
@@ -0,0 +1,92 @@
+#ifndef C_TIME_HISTOGRAM_H
+#define C_TIME_HISTOGRAM_H
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+#include <stdint.h>
+#include "os_time.h"
+#include <stdio.h>
+#include <math.h>
+#include "mbuf.h"
+#include <string>
+
+
+class CTimeHistogram {
+public:
+ enum {
+ HISTOGRAM_SIZE=9,
+ HISTOGRAM_SIZE_LOG=5,
+ HISTOGRAM_QUEUE_SIZE=14,
+ };
+ bool Create(void);
+ void Delete();
+ void Reset();
+ bool Add(dsec_t dt);
+ void Dump(FILE *fd);
+ void DumpWinMax(FILE *fd);
+ /* should be called each 1 sec */
+ void update();
+ dsec_t get_average_latency();
+ /* get average of total data */
+ dsec_t get_total_average();
+
+
+ dsec_t get_max_latency(){
+ return (get_usec(m_max_dt));
+ }
+
+ dsec_t get_max_latency_last_update(){
+ return ( get_usec(m_max_win_last_dt) );
+ }
+
+ void dump_json(std::string name,std::string & json );
+
+
+private:
+ uint32_t get_usec(dsec_t d);
+ double get_cur_average();
+ void update_average();
+
+ double get_base_usec(int j,int i){
+ double base=pow(10.0,(double)j+1.0);
+ return ( base * ((double)i+1.0) );
+ }
+
+public:
+ dsec_t m_min_delta;/* set to 10usec*/
+ uint64_t m_cnt;
+ uint64_t m_high_cnt;
+ dsec_t m_max_dt;
+ dsec_t m_max_win_dt;
+ dsec_t m_max_win_last_dt;
+ dsec_t m_average; /* moving average */
+
+ uint32_t m_win_cnt;
+ dsec_t m_max_ar[HISTOGRAM_QUEUE_SIZE];
+
+ uint64_t m_hcnt[HISTOGRAM_SIZE_LOG][HISTOGRAM_SIZE] __rte_cache_aligned ;
+ uint64_t m_hcnt_shadow[HISTOGRAM_SIZE_LOG][HISTOGRAM_SIZE] __rte_cache_aligned ; // this this contorl side
+ uint64_t m_cnt_shadow;
+ uint64_t m_high_cnt_shadow;
+};
+
+#endif
diff --git a/src/timer_wheel_pq.cpp b/src/timer_wheel_pq.cpp
new file mode 100755
index 00000000..172d061e
--- /dev/null
+++ b/src/timer_wheel_pq.cpp
@@ -0,0 +1,355 @@
+#include "timer_wheel_pq.h"
+#include "utl_json.h"
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+#define DP(a) fprintf(fd," %-40s : %d \n",#a,(int)a)
+#define DP_OUT(a) fprintf(stdout," %-40s : %d \n",#a,(int)a)
+#define DP_J(f) json+=add_json(#f,f);
+#define DP_J_LAST(f) json+=add_json(#f,f,true);
+
+
+void CTimerWheel::Dump(FILE *fd){
+ DP(m_st_alloc);
+ DP(m_st_free);
+ DP(m_st_start);
+ DP(m_st_stop);
+ DP(m_st_handle);
+ uint64_t m_active=m_st_alloc-m_st_free;
+ DP(m_active);
+}
+
+
+void CTimerWheel::dump_json(std::string & json ){
+ json+="\"timer_w\" : {";
+ DP_J(m_st_alloc);
+ DP_J(m_st_free);
+ DP_J(m_st_start);
+ DP_J(m_st_stop);
+ DP_J(m_st_handle);
+ uint64_t m_active=m_st_alloc-m_st_free;
+ /* MUST BE LAST */
+ DP_J_LAST(m_active);
+ json+="},";
+}
+
+
+
+void CTimerWheel::restart_timer(CFlowTimerHandle * timer,
+ double new_time){
+
+
+ m_st_start++;
+ if (timer->m_timer == 0){
+ /* first time add the new time*/
+ CFlowTimer * t = new CFlowTimer();
+ m_st_alloc++;
+ t->m_time = new_time;
+ t->m_flow = timer;
+ timer->m_timer = t;
+ m_pq.push(t);
+ }
+ else{
+ CFlowTimer * t = timer->m_timer;
+ if (new_time > t->m_time){
+ t->m_updated_time = new_time;
+ }else{
+ t->m_flow = 0;/* kill old when it timout */
+ CFlowTimer * t = new CFlowTimer(); /* alloc new one */
+ m_st_alloc++;
+ t->m_time = new_time;
+ t->m_flow = timer;
+ timer->m_timer = t;
+ m_pq.push(t);
+ }
+ }
+}
+
+void CTimerWheel::stop_timer(CFlowTimerHandle * timer){
+
+ CFlowTimer * t = timer->m_timer;
+ if (t){
+ m_st_stop++;
+ t->m_flow = 0;
+ timer->m_timer = 0;
+ }
+};
+
+
+
+bool CTimerWheel::peek_top_time(double & time){
+
+ while (!m_pq.empty()) {
+ CFlowTimer * timer = m_pq.top();
+ if (!timer->is_valid()){
+ m_pq.pop();
+ m_st_free++;
+ delete timer;
+ }
+ else{
+ if (timer->m_updated_time > 0.0 && (timer->m_updated_time > timer->m_time )) {
+ timer->m_time = timer->m_updated_time;
+ m_pq.pop();
+ m_pq.push(timer);
+
+ } else{
+ assert(timer->m_flow);
+ time= timer->m_time;
+ return (true);
+ }
+ }
+ }
+ return (false);
+}
+
+void CTimerWheel::drain_all(void){
+
+ double tw_time;
+ while (true) {
+ if ( peek_top_time(tw_time) ){
+ handle();
+ }else{
+ break;
+ }
+ }
+}
+
+
+void CTimerWheel::try_handle_events(double now){
+ double min_time;
+ while (true) {
+ if ( peek_top_time(min_time) ){
+ if (min_time < now ) {
+ handle();
+ }else{
+ break;
+ }
+ }else{
+ break;
+ }
+ }
+}
+
+
+bool CTimerWheel::handle(){
+
+ while (!m_pq.empty()) {
+ CFlowTimer * timer = m_pq.top();
+ if (!timer->is_valid()){
+ m_pq.pop();
+ m_st_free++;
+ delete timer;
+ }
+ else{
+ if (timer->m_updated_time > 0.0 && (timer->m_updated_time > timer->m_time ) ) {
+ timer->m_time = timer->m_updated_time;
+ m_pq.pop();
+ m_pq.push(timer);
+
+ } else{
+ assert(timer->m_flow);
+ CFlowTimerHandle * flow =timer->m_flow;
+ m_st_handle++;
+ if ( flow->m_callback ){
+ flow->m_callback(flow);
+ }
+ timer->m_flow=0;/* stop the timer */
+ flow->m_timer=0;
+ m_pq.pop();
+ m_st_free++;
+ delete timer;
+ return(true);
+ }
+ }
+ }
+ return(false);
+}
+
+
+#ifdef TW_TESTS
+
+
+void flow_callback(CFlowTimerHandle * timer_handle);
+
+class CTestFlow {
+public:
+ CTestFlow(){
+ flow_id = 0;
+ m_timer_handle.m_callback=flow_callback;
+ m_timer_handle.m_object = (void *)this;
+ m_timer_handle.m_id = 0x1234;
+ }
+
+ uint32_t flow_id;
+ CFlowTimerHandle m_timer_handle;
+public:
+ void OnTimeOut(){
+ printf(" timeout %d \n",flow_id);
+ }
+};
+
+void flow_callback(CFlowTimerHandle * t){
+ CTestFlow * lp=(CTestFlow *)t->m_object;
+ assert(lp);
+ assert(t->m_id==0x1234);
+ lp->OnTimeOut();
+}
+
+CTimerWheel my_tw;
+
+
+void tw_test1(){
+
+ CTestFlow f1;
+ CTestFlow f2;
+ CTestFlow f3;
+ CTestFlow f4;
+
+ f1.flow_id=1;
+ f2.flow_id=2;
+ f3.flow_id=3;
+ f4.flow_id=4;
+ double time;
+ assert(my_tw.peek_top_time(time)==false);
+ my_tw.restart_timer(&f1.m_timer_handle,10.0);
+ my_tw.restart_timer(&f2.m_timer_handle,5.0);
+ my_tw.restart_timer(&f3.m_timer_handle,1.0);
+ assert(my_tw.peek_top_time(time)==true);
+ printf(" time %f \n",time);
+ assert(time ==1.0);
+
+ assert(my_tw.peek_top_time(time)==true);
+ printf(" time %f \n",time);
+ assert(time ==1.0);
+
+ assert(my_tw.peek_top_time(time)==true);
+ printf(" time %f \n",time);
+ assert(time ==1.0);
+
+ assert(my_tw.handle());
+
+ assert(my_tw.peek_top_time(time)==true);
+ printf(" time %f \n",time);
+ assert(time ==5.0);
+
+ assert(my_tw.handle());
+
+ assert(my_tw.peek_top_time(time)==true);
+ printf(" time %f \n",time);
+ assert(time ==10.0);
+
+ assert(my_tw.handle());
+
+}
+
+void test2(){
+
+ CTestFlow f1;
+ CTestFlow f2;
+ CTestFlow f3;
+ CTestFlow f4;
+
+ f1.flow_id=1;
+ f2.flow_id=2;
+ f3.flow_id=3;
+ f4.flow_id=4;
+ double time;
+ assert(my_tw.peek_top_time(time)==false);
+ my_tw.restart_timer(&f1.m_timer_handle,10.0);
+ my_tw.restart_timer(&f2.m_timer_handle,5.0);
+ my_tw.restart_timer(&f3.m_timer_handle,1.0);
+ my_tw.stop_timer(&f1.m_timer_handle);
+
+ assert(my_tw.peek_top_time(time)==true);
+ printf(" time %f \n",time);
+ assert(time ==1.0);
+
+ assert(my_tw.peek_top_time(time)==true);
+ printf(" time %f \n",time);
+ assert(time ==1.0);
+
+ assert(my_tw.peek_top_time(time)==true);
+ printf(" time %f \n",time);
+ assert(time ==1.0);
+
+ assert(my_tw.handle());
+
+ assert(my_tw.peek_top_time(time)==true);
+ printf(" time %f \n",time);
+ assert(time ==5.0);
+
+ assert(my_tw.handle());
+
+ assert(my_tw.peek_top_time(time)==false);
+ my_tw.Dump(stdout);
+
+}
+
+void test3(){
+ int i;
+ for (i=0; i<100; i++) {
+ CTestFlow * f= new CTestFlow();
+ f->flow_id=(uint32_t)i;
+ my_tw.restart_timer(&f->m_timer_handle,100.0-(double)i);
+ }
+
+ double time;
+ while (true) {
+ if ( my_tw.peek_top_time(time) ){
+ printf(" %f \n",time);
+ assert(my_tw.handle());
+ }
+ else{
+ break;
+ }
+ }
+ my_tw.Dump(stdout);
+}
+
+void test4(){
+ int i;
+ for (i = 0; i<100; i++) {
+ CTestFlow * f = new CTestFlow();
+ f->flow_id = (uint32_t)i;
+ my_tw.restart_timer(&f->m_timer_handle, 500.0 - (double)i);
+ my_tw.restart_timer(&f->m_timer_handle, 1000.0 - (double)i);
+ my_tw.restart_timer(&f->m_timer_handle, 100.0 - (double)i);
+ my_tw.stop_timer(&f->m_timer_handle);
+ }
+
+
+ double time;
+ while (true) {
+ if (my_tw.peek_top_time(time)){
+ printf(" %f \n", time);
+ assert(my_tw.handle());
+ }
+ else{
+ break;
+ }
+ }
+ my_tw.Dump(stdout);
+}
+
+#endif
+
+
diff --git a/src/timer_wheel_pq.h b/src/timer_wheel_pq.h
new file mode 100755
index 00000000..cca5c07b
--- /dev/null
+++ b/src/timer_wheel_pq.h
@@ -0,0 +1,134 @@
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef TW_WHEEL_PQ
+#define TW_WHEEL_PQ
+
+#include <stdint.h>
+#include <stdio.h>
+#include <string>
+#include <vector>
+#include <map>
+#include <algorithm>
+#include <queue>
+#include <assert.h>
+
+
+class CFlowTimerHandle;
+
+struct CFlowTimer {
+public:
+ CFlowTimer(){
+ m_updated_time = -1.0;
+
+ }
+ /* C1 */
+ /* time to expire */
+ double m_time;
+ /* time when timer was updated */
+ double m_updated_time;
+
+ CFlowTimerHandle * m_flow; /* back pointer to the flow */
+
+ bool is_valid(){
+ return (m_flow ? true : false);
+ }
+
+public:
+ bool operator <(const CFlowTimer * rsh) const {
+ return (m_time<rsh->m_time);
+ }
+ bool operator ==(const CFlowTimer * rsh) const {
+ return (m_time == rsh->m_time);
+ }
+ bool operator >(const CFlowTimer * rsh) const {
+ return (m_time>rsh->m_time);
+ }
+
+};
+
+struct CFlowTimerCompare
+{
+ bool operator() (const CFlowTimer * lhs, const CFlowTimer * rhs)
+ {
+ return lhs->m_time > rhs->m_time;
+ }
+};
+
+class CFlowTimerHandle;
+typedef void(*CallbackType_t)(CFlowTimerHandle * timer_handle);
+
+class CFlowTimerHandle {
+public:
+ CFlowTimerHandle(){
+ m_timer = 0;
+ m_object = 0;
+ m_object1=0;
+ m_callback = 0;
+ m_id = 0;
+ }
+ CFlowTimer * m_timer;
+ void * m_object;
+ void * m_object1;
+ CallbackType_t m_callback;
+ uint32_t m_id;
+};
+
+typedef CFlowTimer * timer_handle_t;
+
+typedef std::priority_queue<CFlowTimer *, std::vector<CFlowTimer *>, CFlowTimerCompare> tw_pqueue_t;
+
+class CTimerWheel {
+
+public:
+ CTimerWheel(){
+ m_st_alloc=0;
+ m_st_free=0;
+ m_st_start=0;
+ m_st_stop=0;
+ m_st_handle=0;
+ }
+public:
+ void restart_timer(CFlowTimerHandle * timer,double new_time);
+ void stop_timer(CFlowTimerHandle * timer);
+ bool peek_top_time(double & time);
+ void try_handle_events(double now);
+ void drain_all();
+
+ bool handle();
+public:
+ void Dump(FILE *fd);
+ void dump_json(std::string & json );
+
+private:
+ tw_pqueue_t m_pq;
+public:
+
+ uint32_t m_st_alloc;
+ uint32_t m_st_free;
+ uint32_t m_st_start;
+ uint32_t m_st_stop;
+ uint32_t m_st_handle;
+
+};
+
+
+#endif
diff --git a/src/tuple_gen.cpp b/src/tuple_gen.cpp
new file mode 100755
index 00000000..0faa6b63
--- /dev/null
+++ b/src/tuple_gen.cpp
@@ -0,0 +1,307 @@
+/*
+
+ Wenxian Li
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "tuple_gen.h"
+#include <string.h>
+#include "utl_yaml.h"
+
+
+
+
+/* simple tuple genertion for one low*/
+void CTupleGeneratorSmart::GenerateTuple(CTupleBase & tuple) {
+ BP_ASSERT(m_was_init);
+ Generate_client_server();
+ m_was_generated = true;
+ m_result_client_port = GenerateOneClientPort(m_client_ip);
+ tuple.setClient(m_result_client_ip);
+ tuple.setServer(m_result_server_ip);
+ tuple.setClientPort(m_result_client_port);
+ tuple.setClientMac(&m_result_client_mac);
+// printf(" alloc %x %d mac:%x,%x\n",m_result_client_ip,m_result_client_port, m_result_client_mac.mac[0], m_result_client_mac.mac[1]);
+}
+
+
+
+
+/*
+ * allocate base tuple with n exta ports, used by bundels SIP
+ * for example need to allocat 3 ports for this C/S
+ */
+void CTupleGeneratorSmart::GenerateTupleEx(CTupleBase & tuple,
+ uint8_t extra_ports_no,
+ uint16_t * extra_ports) {
+ GenerateTuple(tuple) ;
+ for (int idx=0;idx<extra_ports_no;idx++) {
+ extra_ports[idx] = GenerateOneClientPort(m_client_ip);
+ }
+}
+
+void CTupleGeneratorSmart::Dump(FILE *fd){
+ fprintf(fd," id: %x, %x:%x - %x \n client:%x - %x, server:%x-%x\n",m_id,m_result_client_ip,m_result_server_ip,m_result_client_port,m_min_client_ip, m_max_client_ip, m_min_server_ip, m_max_server_ip);
+}
+
+
+void delay(int msec);
+
+bool CTupleGeneratorSmart::Create(uint32_t _id,
+ uint32_t thread_id,
+ IP_DIST_t dist,
+ uint32_t min_client,
+ uint32_t max_client,
+ uint32_t min_server,
+ uint32_t max_server,
+ double l_flow,
+ double t_cps,
+ CFlowGenList* fl_list){
+
+ m_active_alloc=0;
+ if (dist>=cdMAX_DIST) {
+ m_client_dist = cdSEQ_DIST;
+ } else {
+ m_client_dist = dist;
+ }
+ m_min_client_ip = min_client;
+ m_max_client_ip = max_client;
+ m_min_server_ip = min_server;
+ m_max_server_ip = max_server;
+ assert(m_max_client_ip>=m_min_client_ip);
+ assert(m_max_server_ip>=m_min_server_ip);
+ assert((m_max_client_ip- m_min_client_ip)<50000);
+
+ uint32_t total_clients = getTotalClients();
+ /*printf("\ntotal_clients:%d, longest_flow:%f sec, total_cps:%f\n",
+ total_clients, l_flow, t_cps);*/
+ m_client.resize(m_max_client_ip-m_min_client_ip+1);
+ if (fl_list == NULL || !is_mac_info_conf(fl_list)) {
+ if (total_clients > ((l_flow*t_cps/MAX_PORT))) {
+ for (int idx=0;idx<m_client.size();idx++)
+ m_client[idx] = new CClientInfoL();
+ } else {
+ for (int idx=0;idx<m_client.size();idx++)
+ m_client[idx] = new CClientInfo();
+ }
+ } else {
+ if (total_clients > ((l_flow*t_cps/MAX_PORT))) {
+ for (int idx=0;idx<m_client.size();idx++) {
+ m_client[idx] = new CClientInfoL(
+ get_mac_addr_by_ip(fl_list, min_client+idx));
+ }
+ } else {
+ for (int idx=0;idx<m_client.size();idx++)
+ m_client[idx] = new CClientInfo(
+ get_mac_addr_by_ip(fl_list, min_client+idx));
+ }
+ }
+ m_was_generated = false;
+ m_thread_id = thread_id;
+
+ m_id = _id;
+ m_was_init=true;
+ m_port_allocation_error=0;
+ return(true);
+}
+
+void CTupleGeneratorSmart::Delete(){
+ m_was_generated = false;
+ m_was_init=false;
+ m_client_dist = cdSEQ_DIST;
+
+ for (int idx=0;idx<m_client.size();idx++){
+ delete m_client[idx];
+ }
+ m_client.clear();
+}
+
+void CTupleGeneratorSmart::Generate_client_server(){
+ if (m_was_generated == false) {
+ /*first time */
+ m_was_generated = true;
+ m_cur_client_ip = m_min_client_ip;
+ m_cur_server_ip = m_min_server_ip;
+ }
+
+ uint32_t client_ip;
+ int i=0;
+ for (;i<100;i++) {
+ if (is_client_available(m_cur_client_ip)) {
+ break;
+ }
+ if (m_cur_client_ip >= m_max_client_ip) {
+ m_cur_client_ip = m_min_client_ip;
+ } else {
+ m_cur_client_ip++;
+ }
+ }
+ if (i>=100) {
+ printf(" ERROR ! sparse mac-ip files is not supported yet !\n");
+ exit(-1);
+ }
+
+ m_client_ip = m_cur_client_ip;
+ CClientInfoBase* client = get_client_by_ip(m_client_ip);
+ memcpy(&m_result_client_mac,
+ client->get_mac_addr(),
+ sizeof(mac_addr_align_t));
+ m_result_client_ip = m_client_ip;
+ m_result_server_ip = m_cur_server_ip ;
+/*
+printf("ip:%x,mac:%x,%x,%x,%x,%x,%x, inused:%x\n",m_client_ip,
+ m_result_client_mac.mac[0],
+ m_result_client_mac.mac[1],
+ m_result_client_mac.mac[2],
+ m_result_client_mac.mac[3],
+ m_result_client_mac.mac[4],
+ m_result_client_mac.mac[5],
+ m_result_client_mac.inused);
+*/
+ m_cur_client_ip ++;
+ m_cur_server_ip ++;
+ if (m_cur_client_ip > m_max_client_ip) {
+ m_cur_client_ip = m_min_client_ip;
+ }
+ if (m_cur_server_ip > m_max_server_ip) {
+ m_cur_server_ip = m_min_server_ip;
+ }
+}
+
+void CTupleGeneratorSmart::return_all_client_ports() {
+ for(int idx=0;idx<m_client.size();++idx) {
+ m_client.at(idx)->return_all_ports();
+ }
+}
+
+
+void CTupleGenYamlInfo::Dump(FILE *fd){
+ fprintf(fd," dist : %d \n",m_client_dist);
+ fprintf(fd," clients : %08x -%08x \n",m_clients_ip_start,m_clients_ip_end);
+ fprintf(fd," servers : %08x -%08x \n",m_servers_ip_start,m_servers_ip_end);
+ fprintf(fd," clients per gb : %d \n",m_number_of_clients_per_gb);
+ fprintf(fd," min clients : %d \n",m_min_clients);
+ fprintf(fd," tcp aging : %d sec \n",m_tcp_aging_sec);
+ fprintf(fd," udp aging : %d sec \n",m_udp_aging_sec);
+}
+
+
+
+void operator >> (const YAML::Node& node, CTupleGenYamlInfo & fi) {
+ std::string tmp;
+
+ try {
+ node["distribution"] >> tmp ;
+ if (tmp == "seq" ) {
+ fi.m_client_dist=cdSEQ_DIST;
+ }else{
+ if (tmp == "random") {
+ fi.m_client_dist=cdRANDOM_DIST;
+ }else{
+ if (tmp == "normal") {
+ fi.m_client_dist=cdNORMAL_DIST;
+ }
+ }
+ }
+ }catch ( const std::exception& e ) {
+ fi.m_client_dist=cdSEQ_DIST;
+ }
+ utl_yaml_read_ip_addr(node,"clients_start",fi.m_clients_ip_start);
+ utl_yaml_read_ip_addr(node,"clients_end",fi.m_clients_ip_end);
+ utl_yaml_read_ip_addr(node,"servers_start",fi.m_servers_ip_start);
+ utl_yaml_read_ip_addr(node,"servers_end",fi.m_servers_ip_end);
+ utl_yaml_read_uint32(node,"clients_per_gb",fi.m_number_of_clients_per_gb);
+ utl_yaml_read_uint32(node,"min_clients",fi.m_min_clients);
+ utl_yaml_read_ip_addr(node,"dual_port_mask",fi.m_dual_interface_mask);
+ utl_yaml_read_uint16(node,"tcp_aging",fi.m_tcp_aging_sec);
+ utl_yaml_read_uint16(node,"udp_aging",fi.m_udp_aging_sec);
+
+}
+
+bool CTupleGenYamlInfo::is_valid(uint32_t num_threads,bool is_plugins){
+ if ( m_servers_ip_start > m_servers_ip_end ){
+ printf(" ERROR The servers_ip_start must be bigger than servers_ip_end \n");
+ return(false);
+ }
+
+ if ( m_clients_ip_start > m_clients_ip_end ){
+ printf(" ERROR The clients_ip_start must be bigger than clients_ip_end \n");
+ return(false);
+ }
+ uint32_t servers= (m_servers_ip_end - m_servers_ip_start +1);
+ if ( servers < num_threads ) {
+ printf(" ERROR The number of servers should be at least number of threads %d \n",num_threads);
+ return (false);
+ }
+
+ uint32_t clients= (m_clients_ip_end - m_clients_ip_start +1);
+ if ( clients < num_threads ) {
+ printf(" ERROR The number of clients should be at least number of threads %d \n",num_threads);
+ return (false);
+ }
+
+ /* defect for plugin */
+ if (is_plugins) {
+ if ( getTotalServers() < getTotalClients() ){
+ printf(" Plugin is configured. in that case due to a limitation ( defect trex-54 ) \n");
+ printf(" the number of servers should be bigger than number of clients \n");
+ return (false);
+ }
+
+ /* update number of servers in a way that it would be exact multiplication */
+ uint32_t mul=getTotalServers() / getTotalClients();
+ uint32_t new_server_num=mul*getTotalClients();
+ m_servers_ip_end = m_servers_ip_start + new_server_num-1 ;
+
+ assert(getTotalServers() %getTotalClients() ==0);
+ }
+
+/* if (clients > 00000) {
+ printf(" The number of clients requested is %d maximum supported : %d \n",clients,100000);
+ return (false);
+ }
+ */ return (true);
+}
+
+
+/* split the clients and server by dual_port_id and thread_id ,
+ clients is splited by threads and dual_port_id
+ servers is spliteed by dual_port_id */
+void split_clients(uint32_t thread_id,
+ uint32_t total_threads,
+ uint32_t dual_port_id,
+ CTupleGenYamlInfo & fi,
+ CClientPortion & portion){
+
+ uint32_t clients_chunk = fi.getTotalClients()/total_threads;
+ // FIXME need to fix this when fixing the server
+ uint32_t servers_chunk = fi.getTotalServers()/total_threads;
+
+ assert(clients_chunk>0);
+ assert(servers_chunk>0);
+
+ uint32_t dual_if_mask=(dual_port_id*fi.m_dual_interface_mask);
+
+ portion.m_client_start = fi.m_clients_ip_start + thread_id*clients_chunk + dual_if_mask;
+ portion.m_client_end = portion.m_client_start + clients_chunk -1 ;
+
+ portion.m_server_start = fi.m_servers_ip_start + thread_id*servers_chunk +dual_if_mask;
+ portion.m_server_end = portion.m_server_start + servers_chunk -1;
+}
diff --git a/src/tuple_gen.h b/src/tuple_gen.h
new file mode 100755
index 00000000..96b9b01a
--- /dev/null
+++ b/src/tuple_gen.h
@@ -0,0 +1,620 @@
+#ifndef TUPLE_GEN_H_
+#define TUPLE_GEN_H_
+/*
+ Wenxian Li
+
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <vector>
+#include <algorithm>
+#include <map>
+#include <string>
+#include <iostream>
+#include <fstream>
+#include <string>
+#include <queue>
+#include "common/c_common.h"
+#include <bitset>
+#include <yaml-cpp/yaml.h>
+
+
+/*
+ * Class that handle the client info
+ */
+#define MAX_PORT (64000)
+#define MIN_PORT (1024)
+#define ILLEGAL_PORT (0)
+
+#define PORT_FREE (0)
+#define PORT_IN_USE (1)
+
+/*FIXME*/
+#define VLAN_SIZE (2)
+
+/* Client distribution */
+
+
+typedef enum {
+ cdSEQ_DIST = 0,
+ cdRANDOM_DIST = 1,
+ cdNORMAL_DIST = 2,
+ cdMAX_DIST = 3
+} IP_DIST_t ;
+
+typedef struct mac_addr_align_ {
+public:
+ uint8_t mac[6];
+ uint8_t inused;
+ uint8_t pad;
+} mac_addr_align_t;
+#define INUSED 0
+#define UNUSED 1
+
+/* For type 1, we generator port by maintaining a 64K bit array for each port.
+ * In this case, we cannot support large number of clients due to memory exhausted.
+ *
+ * So we develop a type 2 tuple generator. In this case, we only maintain a 16 bit
+ * current port number for each client. To apply to type 2, it should meet:
+ * number of clients > (longest_flow*Total_CPS)/64K
+ *
+ * TRex will decide which type to use automatically. It is transparent to users.
+ * */
+
+#define TYPE1 0
+#define TYPE2 1
+#define MAX_TYPE 3
+
+class CClientInfoBase {
+ public:
+ virtual uint16_t get_new_free_port() = 0;
+ virtual void return_port(uint16_t a) = 0;
+ virtual void return_all_ports() = 0;
+ virtual bool is_client_available() = 0;
+ virtual mac_addr_align_t* get_mac_addr() = 0;
+};
+
+//CClientInfo for large amount of clients support
+class CClientInfoL : public CClientInfoBase {
+ mac_addr_align_t mac;
+ private:
+ uint16_t m_curr_port;
+ public:
+ CClientInfoL(mac_addr_align_t* mac_adr) {
+ m_curr_port = MIN_PORT;
+ if (mac_adr) {
+ mac = *mac_adr;
+ mac.inused = INUSED;
+ } else {
+ memset(&mac, 0, sizeof(mac_addr_align_t));
+ mac.inused = UNUSED;
+ }
+ }
+
+ CClientInfoL() {
+ m_curr_port = MIN_PORT;
+ memset(&mac, 0, sizeof(mac_addr_align_t));
+ mac.inused = INUSED;
+ }
+ mac_addr_align_t* get_mac_addr() {
+ return &mac;
+ }
+ uint16_t get_new_free_port() {
+ if (m_curr_port>MAX_PORT) {
+ m_curr_port = MIN_PORT;
+ }
+ return m_curr_port++;
+ }
+
+ void return_port(uint16_t a) {
+ }
+
+ void return_all_ports() {
+ m_curr_port = MIN_PORT;
+ }
+
+ bool is_client_available() {
+ if (mac.inused == INUSED) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+};
+
+
+class CClientInfo : public CClientInfoBase {
+ private:
+ std::bitset<MAX_PORT> m_bitmap_port;
+ uint16_t m_head_port;
+ mac_addr_align_t mac;
+ friend class CClientInfoUT;
+
+ private:
+ bool is_port_available(uint16_t port) {
+ if (!is_port_legal(port)) {
+ return PORT_IN_USE;
+ }
+ return (m_bitmap_port[port] == PORT_FREE);
+ }
+
+ /*
+ * Return true if the port is legal
+ * false if the port is illegal.
+ */
+ bool is_port_legal(uint16_t port) {
+ if (port>=MAX_PORT || port < MIN_PORT) {
+ return false;
+ }
+ return true;
+ }
+
+ void m_head_port_set(uint16_t head) {
+ if (!is_port_legal(head)) {
+ return;
+ }
+ m_head_port = head;
+ }
+
+ // Try to find next free port
+ void get_next_free_port_by_bit() {
+ uint16_t cnt = 0;
+ if (!is_port_legal(m_head_port)) {
+ m_head_port = MIN_PORT;
+ }
+ while (true) {
+ if (is_port_available(m_head_port)) {
+ return;
+ }
+ cnt++;
+ if (cnt>20) {
+ /*FIXME: need to trigger some alarms?*/
+ return;
+ }
+ m_head_port++;
+ if (m_head_port>=MAX_PORT) {
+ m_head_port = MIN_PORT;
+ }
+ }
+ }
+
+
+ public:
+ CClientInfo() {
+ m_head_port = MIN_PORT;
+ m_bitmap_port.reset();
+ memset(&mac, 0, sizeof(mac_addr_align_t));
+ mac.inused = INUSED;
+ }
+ CClientInfo(mac_addr_align_t* mac_info) {
+ m_head_port = MIN_PORT;
+ m_bitmap_port.reset();
+ if (mac_info) {
+ mac = *mac_info;
+ mac.inused = INUSED;
+ } else {
+ memset(&mac, 0, sizeof(mac_addr_align_t));
+ mac.inused = UNUSED;
+ }
+
+ }
+
+ mac_addr_align_t* get_mac_addr() {
+ return &mac;
+ }
+
+ uint16_t get_new_free_port() {
+ uint16_t r;
+
+ get_next_free_port_by_bit();
+ if (!is_port_available(m_head_port)) {
+ m_head_port = MIN_PORT;
+ return ILLEGAL_PORT;
+ }
+
+ m_bitmap_port[m_head_port] = PORT_IN_USE;
+ r = m_head_port;
+ m_head_port++;
+ if (m_head_port>MAX_PORT) {
+ m_head_port = MIN_PORT;
+ }
+ return r;
+ }
+
+ void return_port(uint16_t a) {
+ assert(is_port_legal(a));
+ assert(m_bitmap_port[a]==PORT_IN_USE);
+ m_bitmap_port[a] = PORT_FREE;
+ }
+
+ void return_all_ports() {
+ m_head_port = MIN_PORT;
+ m_bitmap_port.reset();
+ }
+ bool is_client_available() {
+ if (mac.inused == INUSED) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+};
+
+class CTupleBase {
+public:
+ uint32_t getClient() {
+ return m_client_ip;
+ }
+ void setClient(uint32_t ip) {
+ m_client_ip = ip;
+ }
+ uint32_t getServer(){
+ return m_server_ip;
+ }
+ void setServer(uint32_t ip) {
+ m_server_ip = ip;
+ }
+ uint16_t getClientPort() {
+ return m_client_port;
+ }
+ void setClientPort(uint16_t port) {
+ m_client_port = port;
+ }
+ mac_addr_align_t* getClientMac() {
+ return &m_client_mac;
+ }
+ void setClientMac(mac_addr_align_t* mac_info) {
+ memcpy(&m_client_mac, mac_info, sizeof(mac_addr_align_t));
+ }
+private:
+ uint32_t m_client_ip;
+ uint32_t m_server_ip;
+ uint16_t m_client_port;
+ uint16_t pad1;
+ uint32_t pad2;
+ mac_addr_align_t m_client_mac;
+ uint32_t pad3[3];
+};
+
+
+
+class CFlowGenList;
+mac_addr_align_t * get_mac_addr_by_ip(CFlowGenList *fl_list,
+ uint32_t ip);
+bool is_mac_info_conf(CFlowGenList *fl_list);
+
+/* generate for each template */
+class CTupleGeneratorSmart {
+public:
+ /* simple tuple genertion for one low*/
+ void GenerateTuple(CTupleBase & tuple);
+ /*
+ * allocate base tuple with n exta ports, used by bundels SIP
+ * for example need to allocat 3 ports for this C/S
+ */
+ void GenerateTupleEx(CTupleBase & tuple,uint8_t extra_ports_no,
+ uint16_t * extra_ports);
+
+ /* free client port */
+ void FreePort(uint32_t c_ip,
+ uint16_t port){
+ //printf(" free %x %d \n",c_ip,port);
+ m_active_alloc--;
+ CClientInfoBase* client = get_client_by_ip(c_ip);
+ client->return_port(port);
+ }
+
+ /* return true if this type of generator require to free resource */
+ bool IsFreePortRequired(void){
+ return(true);
+ }
+
+ /* return the active socket */
+ uint32_t ActiveSockets(void){
+ return (m_active_alloc);
+ }
+
+ uint32_t getTotalClients(void){
+ return (m_max_client_ip -m_min_client_ip +1);
+ }
+
+ uint32_t getTotalServers(void){
+ return (m_max_server_ip -m_min_server_ip +1);
+ }
+
+ uint32_t SocketsPerClient(void){
+ return (MAX_PORT -MIN_PORT+1);
+ }
+
+ uint32_t MaxSockets(void){
+ return (SocketsPerClient() * getTotalClients());
+ }
+
+
+public:
+ CTupleGeneratorSmart(){
+ m_was_init=false;
+ m_client_dist = cdSEQ_DIST;
+ }
+ bool Create(uint32_t _id,
+ uint32_t thread_id,
+ IP_DIST_t dist,
+ uint32_t min_client,
+ uint32_t max_client,
+ uint32_t min_server,
+ uint32_t max_server,
+ double longest_flow,
+ double total_cps,
+ CFlowGenList * fl_list = NULL);
+
+ void Delete();
+
+ void Dump(FILE *fd);
+
+ void SetClientDist(IP_DIST_t dist) {
+ m_client_dist = dist;
+ }
+
+ IP_DIST_t GetClientDist() {
+ return (m_client_dist);
+ }
+
+ inline uint32_t GetThreadId(){
+ return ( m_thread_id );
+ }
+
+ bool is_valid_client(uint32_t c_ip){
+ if ((c_ip>=m_min_client_ip) && (c_ip<=m_max_client_ip)) {
+ return(true);
+ }
+ printf("invalid client ip:%x, min_ip:%x, max_ip:%x\n",
+ c_ip, m_min_client_ip, m_max_client_ip);
+ return(false);
+ }
+
+ CClientInfoBase* get_client_by_ip(uint32_t c_ip){
+ BP_ASSERT( is_valid_client(c_ip) );
+ return m_client.at(c_ip-m_min_client_ip);
+ }
+
+ bool is_client_available (uint32_t c_ip) {
+ CClientInfoBase* client = get_client_by_ip(c_ip);
+ if (client) {
+ return client->is_client_available();
+ }
+ return false;
+ }
+
+ uint16_t GenerateOneClientPort(uint32_t c_ip) {
+ CClientInfoBase* client = get_client_by_ip(c_ip);
+ uint16_t port;
+ port = client->get_new_free_port();
+
+ //printf(" alloc extra %x %d \n",c_ip,port);
+ if (port==ILLEGAL_PORT) {
+ m_port_allocation_error++;
+ }
+ m_active_alloc++;
+ return (port);
+ }
+
+ uint32_t getErrorAllocationCounter(){
+ return ( m_port_allocation_error );
+ }
+
+private:
+ void return_all_client_ports();
+
+
+ void Generate_client_server();
+
+
+private:
+ std::vector<CClientInfoBase*> m_client;
+
+ uint32_t m_id;
+ bool m_was_generated;
+ bool m_was_init;
+
+ IP_DIST_t m_client_dist;
+
+ uint32_t m_cur_server_ip;
+ uint32_t m_cur_client_ip;
+ // min-max client ip +1 and get back
+ uint32_t m_min_client_ip;
+ uint32_t m_max_client_ip;
+
+ // min max server ip ( random )
+ uint32_t m_min_server_ip;
+ uint32_t m_max_server_ip;
+
+ uint32_t m_thread_id;
+
+ // result of the generator FIXME need to clean this
+ uint32_t m_client_ip;
+ uint32_t m_result_client_ip;
+ uint32_t m_result_server_ip;
+ uint32_t m_active_alloc;
+ mac_addr_align_t m_result_client_mac;
+ uint16_t m_result_client_port;
+
+ uint32_t m_port_allocation_error;
+
+};
+
+
+class CTupleTemplateGeneratorSmart {
+public:
+ /* simple tuple genertion for one low*/
+ void GenerateTuple(CTupleBase & tuple){
+ if (m_w==1) {
+ /* new client each tuple generate */
+ m_gen->GenerateTuple(tuple);
+ m_cache_client_ip=tuple.getClient();
+ }else{
+ if (m_cnt==0) {
+ m_gen->GenerateTuple(tuple);
+ m_cache_client_ip = tuple.getClient();
+ m_cache_server_ip = tuple.getServer();
+ }else{
+ tuple.setServer(m_cache_server_ip);
+ tuple.setClient(m_cache_client_ip);
+ tuple.setClientPort( m_gen->GenerateOneClientPort(m_cache_client_ip));
+ }
+ m_cnt++;
+ if (m_cnt>=m_w) {
+ m_cnt=0;
+ }
+ }
+ if ( m_is_single_server ) {
+ tuple.setServer(m_server_ip);
+ }
+ }
+
+ uint16_t GenerateOneSourcePort(){
+ return ( m_gen->GenerateOneClientPort(m_cache_client_ip) );
+ }
+
+ inline uint32_t GetThreadId(){
+ return ( m_gen->GetThreadId() );
+ }
+
+public:
+
+ bool Create( CTupleGeneratorSmart * gen
+ ){
+ m_gen=gen;
+ m_is_single_server=false;
+ m_server_ip=0;
+ SetW(1);
+ return (true);
+ }
+
+ void Delete(){
+ }
+public:
+ void SetW(uint16_t w){
+ m_w=w;
+ m_cnt=0;
+ }
+
+ uint16_t getW(){
+ return (m_w);
+ }
+
+
+ void SetSingleServer(bool is_single,
+ uint32_t server_ip,
+ uint32_t dual_port_index,
+ uint32_t dual_mask){
+ m_is_single_server = is_single;
+ m_server_ip = server_ip+dual_mask*dual_port_index;
+ }
+ bool IsSingleServer(){
+ return (m_is_single_server);
+ }
+
+private:
+ CTupleGeneratorSmart * m_gen;
+ bool m_is_single_server;
+ uint16_t m_w;
+ uint16_t m_cnt;
+ uint32_t m_server_ip;
+ uint32_t m_cache_client_ip;
+ uint32_t m_cache_server_ip;
+
+};
+
+
+/* YAML of generator */
+#if 0
+ - client_distribution : 'seq' - ( e.g c0,1,2,3,4
+ 'random' - random from the pool
+ 'normal' - need to give average and dev -- second phase
+
+ - client_pool_mask : 10.0.0.0-20.0.0.0
+ - server_pool_mask : 70.0.0.0-70.0.20.0
+ - number_of_clients_per_gb : 20
+ - dual_interface_mask : 1.0.0.0 // each dual ports will add this to the pool of clients
+#endif
+
+struct CTupleGenYamlInfo {
+ CTupleGenYamlInfo(){
+ m_client_dist=cdSEQ_DIST;
+ m_clients_ip_start =0x11000000;
+ m_clients_ip_end =0x21000000;
+
+ m_servers_ip_start = 0x30000000;
+ m_servers_ip_end = 0x40000000;
+ m_number_of_clients_per_gb=10;
+ m_min_clients=100;
+ m_dual_interface_mask=0x10000000;
+ m_tcp_aging_sec=2;
+ m_udp_aging_sec=5;
+ }
+
+ IP_DIST_t m_client_dist;
+ uint32_t m_clients_ip_start;
+ uint32_t m_clients_ip_end;
+
+ uint32_t m_servers_ip_start;
+ uint32_t m_servers_ip_end;
+ uint32_t m_number_of_clients_per_gb;
+ uint32_t m_min_clients;
+ uint32_t m_dual_interface_mask;
+ uint16_t m_tcp_aging_sec; /* 0 means there is no aging */
+ uint16_t m_udp_aging_sec;
+
+public:
+ void Dump(FILE *fd);
+ uint32_t getTotalClients(void){
+ return ( m_clients_ip_end-m_clients_ip_start+1);
+ }
+ uint32_t getTotalServers(void){
+ return ( m_servers_ip_end-m_servers_ip_start+1);
+ }
+
+
+ bool is_valid(uint32_t num_threads,bool is_plugins);
+};
+
+void operator >> (const YAML::Node& node, CTupleGenYamlInfo & fi) ;
+
+
+struct CClientPortion {
+ uint32_t m_client_start;
+ uint32_t m_client_end;
+ uint32_t m_server_start;
+ uint32_t m_server_end;
+};
+
+void split_clients(uint32_t thread_id,
+ uint32_t total_threads,
+ uint32_t dual_port_id,
+ CTupleGenYamlInfo & fi,
+ CClientPortion & portion);
+
+
+
+#endif //TUPLE_GEN_H_
diff --git a/src/utl_cpuu.cpp b/src/utl_cpuu.cpp
new file mode 100755
index 00000000..0701b845
--- /dev/null
+++ b/src/utl_cpuu.cpp
@@ -0,0 +1,57 @@
+#include "utl_cpuu.h"
+#include <stdio.h>
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+
+void CCpuUtlCp::Create(CCpuUtlDp * cdp){
+ m_dpcpu=cdp;
+ m_cpu_util=0.0;
+ m_last_total_cycles=m_dpcpu->get_total_cycles();
+ m_last_work_cycles =m_dpcpu->get_work_cycles();
+}
+
+void CCpuUtlCp::Delete(){
+
+}
+
+
+void CCpuUtlCp::Update(){
+ uint64_t t=m_dpcpu->get_total_cycles();
+ uint64_t w=m_dpcpu->get_work_cycles();
+ uint32_t acc_total_cycles = (uint32_t)(t - m_last_total_cycles);
+ uint32_t acc_work_cycles = (uint32_t)(w - m_last_work_cycles);
+
+ m_last_total_cycles = t ;
+ m_last_work_cycles = w;
+
+ double window_cpu_u = ((double)acc_work_cycles/(double)acc_total_cycles);
+
+ /* LPF*/
+ m_cpu_util = (m_cpu_util*0.75)+(window_cpu_u*0.25);
+}
+
+/* return cpu % */
+double CCpuUtlCp::GetVal(){
+ return (m_cpu_util*100);
+}
+
diff --git a/src/utl_cpuu.h b/src/utl_cpuu.h
new file mode 100755
index 00000000..e7bb50bb
--- /dev/null
+++ b/src/utl_cpuu.h
@@ -0,0 +1,76 @@
+#ifndef UTL_CPUU_H
+#define UTL_CPUU_H
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include <stdint.h>
+#include "os_time.h"
+#include "mbuf.h"
+
+class CCpuUtlDp {
+
+public:
+ CCpuUtlDp(){
+ m_total_cycles=0;
+ m_data=0;
+ }
+ inline void start_work(){
+ m_data=os_get_hr_tick_64();
+ }
+ inline void revert(){
+ }
+ inline void commit(){
+ m_total_cycles+=(os_get_hr_tick_64()-m_data);
+ }
+ inline uint64_t get_total_cycles(void){
+ return ( os_get_hr_tick_64());
+ }
+
+ inline uint64_t get_work_cycles(void){
+ return ( m_total_cycles );
+ }
+
+private:
+ uint64_t m_total_cycles;
+ uint64_t m_data;
+
+} __rte_cache_aligned;
+
+class CCpuUtlCp {
+public:
+ void Create(CCpuUtlDp * cdp);
+ void Delete();
+ /* should be called each 1 sec */
+ void Update();
+ /* return cpu % */
+ double GetVal();
+
+private:
+ CCpuUtlDp * m_dpcpu;
+ double m_cpu_util;
+ uint64_t m_last_total_cycles;
+ uint64_t m_last_work_cycles;
+
+
+ // add filter
+};
+
+#endif
diff --git a/src/utl_jitter.h b/src/utl_jitter.h
new file mode 100755
index 00000000..75c168ef
--- /dev/null
+++ b/src/utl_jitter.h
@@ -0,0 +1,86 @@
+#ifndef UTL_JITTER_H
+#define UTL_JITTER_H
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+#include <stdint.h>
+
+class CJitter {
+
+public:
+ CJitter(){
+ reset();
+ }
+
+ void reset(){
+ m_old_transit=0.0;
+ m_jitter=0.0;
+ }
+
+ double calc(double transit){
+ double d = transit - m_old_transit;
+ m_old_transit = transit;
+ if ( d < 0){
+ d = -d;
+ }
+ m_jitter +=(1.0/16.0) * ((double)d - m_jitter);
+ return(m_jitter);
+ }
+ double get_jitter(){
+ return (m_jitter);
+ }
+
+private:
+ double m_old_transit;
+ double m_jitter;
+};
+
+class CJitterUint {
+
+public:
+ CJitterUint(){
+ reset();
+ }
+
+ void reset(){
+ m_old_transit=0;
+ m_jitter=0;
+ }
+
+ void calc(uint32_t transit){
+ int32_t d = transit - m_old_transit;
+ m_old_transit = transit;
+ if ( d < 0){
+ d = -d;
+ }
+ m_jitter += d - ((m_jitter + 8)>>4);
+ }
+ uint32_t get_jitter(){
+ return (m_jitter>>4);
+ }
+
+private:
+ int32_t m_old_transit; /* time usec maximum up to 30msec */
+ int32_t m_jitter;
+};
+
+#endif
diff --git a/src/utl_json.cpp b/src/utl_json.cpp
new file mode 100755
index 00000000..990346f5
--- /dev/null
+++ b/src/utl_json.cpp
@@ -0,0 +1,59 @@
+#include "utl_json.h"
+#include <stdio.h>
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+
+std::string add_json(std::string name, uint32_t counter,bool last){
+ char buff[200];
+ sprintf(buff,"\"%s\":%lu",name.c_str(),counter);
+ std::string s= std::string(buff);
+ if (!last) {
+ s+=",";
+ }
+ return (s);
+}
+
+std::string add_json(std::string name, uint64_t counter,bool last){
+ char buff[200];
+ sprintf(buff,"\"%s\":%llu",name.c_str(),counter);
+ std::string s= std::string(buff);
+ if (!last) {
+ s+=",";
+ }
+ return (s);
+}
+
+std::string add_json(std::string name, double counter,bool last){
+ char buff[200];
+ sprintf(buff,"\"%s\":%.1f",name.c_str(),counter);
+ std::string s= std::string(buff);
+ if (!last) {
+ s+=",";
+ }
+ return (s);
+}
+
+
+
+
+
diff --git a/src/utl_json.h b/src/utl_json.h
new file mode 100755
index 00000000..45b24193
--- /dev/null
+++ b/src/utl_json.h
@@ -0,0 +1,35 @@
+#ifndef UTL_JSON_H
+#define UTL_JSON_H
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+#include <stdint.h>
+#include <string>
+
+std::string add_json(std::string name, uint32_t counter,bool last=false);
+
+std::string add_json(std::string name, uint64_t counter,bool last=false);
+
+std::string add_json(std::string name, double counter,bool last=false);
+
+
+#endif
+
+
diff --git a/src/utl_term_io.cpp b/src/utl_term_io.cpp
new file mode 100755
index 00000000..8e561188
--- /dev/null
+++ b/src/utl_term_io.cpp
@@ -0,0 +1,98 @@
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+#include "utl_term_io.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/select.h>
+#include <termios.h>
+#include <inttypes.h>
+#include <fcntl.h>
+
+static struct termios oldterm;
+struct termios orig_termios;
+
+static void exit_handler1(void){
+ tcsetattr(fileno(stdin), TCSANOW, &oldterm);
+}
+
+static void save_termio(void){
+ tcgetattr(0, &oldterm);
+}
+
+
+void reset_terminal_mode(void){
+ tcsetattr(0, TCSANOW, &orig_termios);
+}
+
+static void set_conio_terminal_mode(void){
+
+ struct termios oldterm, term;
+
+ tcgetattr(0, &oldterm);
+ memcpy(&term, &oldterm, sizeof(term));
+ term.c_lflag &= ~(ICANON | ECHO | ISIG);
+ tcsetattr(0, TCSANOW, &term);
+ setbuf(stdin, NULL);
+}
+
+static int kbhit(void) {
+ struct timeval tv = { 0L, 0L };
+ fd_set fds;
+ FD_ZERO(&fds);
+ FD_SET(0, &fds);
+ return select(1, &fds, NULL, NULL, &tv);
+}
+
+static int getch(void){
+ int r;
+ unsigned char c;
+ if ((r = read(0, &c, sizeof(c))) < 0) {
+ return r;
+ } else {
+ return c;
+ }
+}
+
+
+int utl_termio_init(){
+ atexit(exit_handler1);
+ save_termio();
+ set_conio_terminal_mode();
+ return (0);
+}
+
+
+int utl_termio_try_getch(void){
+ if ( kbhit() ){
+ return (getch());
+ }else{
+ return (0);
+ }
+}
+
+int utl_termio_reset(void){
+ tcsetattr(fileno(stdin), TCSANOW, &oldterm);
+ return (0);
+}
+
+
diff --git a/src/utl_term_io.h b/src/utl_term_io.h
new file mode 100755
index 00000000..89a208ed
--- /dev/null
+++ b/src/utl_term_io.h
@@ -0,0 +1,33 @@
+#ifndef UTL_TERM_IO_H
+#define UTL_TERM_IO_H
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+int utl_termio_init();
+
+/* return 0 if there is no char pending */
+int utl_termio_try_getch(void);
+
+int utl_termio_reset(void);
+
+
+
+#endif
diff --git a/src/utl_yaml.cpp b/src/utl_yaml.cpp
new file mode 100755
index 00000000..237e85af
--- /dev/null
+++ b/src/utl_yaml.cpp
@@ -0,0 +1,124 @@
+#include "utl_yaml.h"
+#include <common/Network/Packet/CPktCmn.h>
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+
+#define INADDRSZ 4
+
+static int my_inet_pton4(const char *src, unsigned char *dst)
+{
+ static const char digits[] = "0123456789";
+ int saw_digit, octets, ch;
+ unsigned char tmp[INADDRSZ], *tp;
+
+ saw_digit = 0;
+ octets = 0;
+ *(tp = tmp) = 0;
+ while ((ch = *src++) != '\0') {
+ const char *pch;
+
+ if ((pch = strchr(digits, ch)) != NULL) {
+ unsigned int _new = *tp * 10 + (pch - digits);
+
+ if (_new > 255)
+ return (0);
+ if (! saw_digit) {
+ if (++octets > 4)
+ return (0);
+ saw_digit = 1;
+ }
+ *tp = (unsigned char)_new;
+ } else if (ch == '.' && saw_digit) {
+ if (octets == 4)
+ return (0);
+ *++tp = 0;
+ saw_digit = 0;
+ } else
+ return (0);
+ }
+ if (octets < 4)
+ return (0);
+
+ memcpy(dst, tmp, INADDRSZ);
+ return (1);
+}
+
+
+bool utl_yaml_read_ip_addr(const YAML::Node& node,
+ std::string name,
+ uint32_t & val){
+ std::string tmp;
+ uint32_t ip;
+ bool res=false;
+ try {
+ node[name] >> tmp ;
+ if ( my_inet_pton4((char *)tmp.c_str(), (unsigned char *)&ip) ){
+ val=PKT_NTOHL(ip);
+ res=true;
+ }
+ }catch ( const std::exception& e ) {
+ }
+ return (res);
+}
+
+bool utl_yaml_read_uint32(const YAML::Node& node,
+ std::string name,
+ uint32_t & val){
+ bool res=false;
+
+ try {
+ node[name] >> val ;
+ res=true;
+ }catch ( const std::exception& e ) {
+ }
+ return (res);
+
+}
+
+bool utl_yaml_read_uint16(const YAML::Node& node,
+ std::string name,
+ uint16_t & val){
+ uint32_t val_tmp;
+ bool res=false;
+
+ try {
+ node[name] >> val_tmp ;
+ val = (uint16_t)val_tmp;
+ res=true;
+ }catch ( const std::exception& e ) {
+ }
+}
+
+bool utl_yaml_read_bool(const YAML::Node& node,
+ std::string name,
+ bool & val){
+ bool res=false;
+ try {
+ node[name] >> val ;
+ res=true;
+ }catch ( const std::exception& e ) {
+ }
+ return( res);
+}
+
+
diff --git a/src/utl_yaml.h b/src/utl_yaml.h
new file mode 100755
index 00000000..71655488
--- /dev/null
+++ b/src/utl_yaml.h
@@ -0,0 +1,46 @@
+#ifndef UTL_YAML_
+#define UTL_YAML_
+/*
+ Hanoh Haim
+ Cisco Systems, Inc.
+*/
+
+/*
+Copyright (c) 2015-2015 Cisco Systems, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+#include <stdint.h>
+#include <yaml-cpp/yaml.h>
+
+
+bool utl_yaml_read_ip_addr(const YAML::Node& node,
+ std::string name,
+ uint32_t & val
+ );
+
+bool utl_yaml_read_uint32(const YAML::Node& node,
+ std::string name,
+ uint32_t & val);
+bool utl_yaml_read_uint16(const YAML::Node& node,
+ std::string name,
+ uint16_t & val);
+
+bool utl_yaml_read_bool(const YAML::Node& node,
+ std::string name,
+ bool & val);
+
+
+#endif
diff --git a/src/zmq/include/zmq.h b/src/zmq/include/zmq.h
new file mode 100755
index 00000000..f7b10db6
--- /dev/null
+++ b/src/zmq/include/zmq.h
@@ -0,0 +1,416 @@
+/*
+ Copyright (c) 2007-2013 Contributors as noted in the AUTHORS file
+
+ This file is part of 0MQ.
+
+ 0MQ is free software; you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ 0MQ is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+ *************************************************************************
+ NOTE to contributors. This file comprises the principal public contract
+ for ZeroMQ API users (along with zmq_utils.h). Any change to this file
+ supplied in a stable release SHOULD not break existing applications.
+ In practice this means that the value of constants must not change, and
+ that old values may not be reused for new constants.
+ *************************************************************************
+*/
+
+#ifndef __ZMQ_H_INCLUDED__
+#define __ZMQ_H_INCLUDED__
+
+/* Version macros for compile-time API version detection */
+#define ZMQ_VERSION_MAJOR 4
+#define ZMQ_VERSION_MINOR 0
+#define ZMQ_VERSION_PATCH 3
+
+#define ZMQ_MAKE_VERSION(major, minor, patch) \
+ ((major) * 10000 + (minor) * 100 + (patch))
+#define ZMQ_VERSION \
+ ZMQ_MAKE_VERSION(ZMQ_VERSION_MAJOR, ZMQ_VERSION_MINOR, ZMQ_VERSION_PATCH)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if !defined _WIN32_WCE
+#include <errno.h>
+#endif
+#include <stddef.h>
+#include <stdio.h>
+#if defined _WIN32
+#include <winsock2.h>
+#endif
+
+/* Handle DSO symbol visibility */
+#if defined _WIN32
+# if defined ZMQ_STATIC
+# define ZMQ_EXPORT
+# elif defined DLL_EXPORT
+# define ZMQ_EXPORT __declspec(dllexport)
+# else
+# define ZMQ_EXPORT __declspec(dllimport)
+# endif
+#else
+# if defined __SUNPRO_C || defined __SUNPRO_CC
+# define ZMQ_EXPORT __global
+# elif (defined __GNUC__ && __GNUC__ >= 4) || defined __INTEL_COMPILER
+# define ZMQ_EXPORT __attribute__ ((visibility("default")))
+# else
+# define ZMQ_EXPORT
+# endif
+#endif
+
+/* Define integer types needed for event interface */
+#if defined ZMQ_HAVE_SOLARIS || defined ZMQ_HAVE_OPENVMS
+# include <inttypes.h>
+#elif defined _MSC_VER && _MSC_VER < 1600
+# ifndef int32_t
+typedef __int32 int32_t;
+# endif
+# ifndef uint16_t
+typedef unsigned __int16 uint16_t;
+# endif
+# ifndef uint8_t
+typedef unsigned __int8 uint8_t;
+# endif
+#else
+# include <stdint.h>
+#endif
+
+
+/******************************************************************************/
+/* 0MQ errors. */
+/******************************************************************************/
+
+/* A number random enough not to collide with different errno ranges on */
+/* different OSes. The assumption is that error_t is at least 32-bit type. */
+#define ZMQ_HAUSNUMERO 156384712
+
+/* On Windows platform some of the standard POSIX errnos are not defined. */
+#ifndef ENOTSUP
+#define ENOTSUP (ZMQ_HAUSNUMERO + 1)
+#endif
+#ifndef EPROTONOSUPPORT
+#define EPROTONOSUPPORT (ZMQ_HAUSNUMERO + 2)
+#endif
+#ifndef ENOBUFS
+#define ENOBUFS (ZMQ_HAUSNUMERO + 3)
+#endif
+#ifndef ENETDOWN
+#define ENETDOWN (ZMQ_HAUSNUMERO + 4)
+#endif
+#ifndef EADDRINUSE
+#define EADDRINUSE (ZMQ_HAUSNUMERO + 5)
+#endif
+#ifndef EADDRNOTAVAIL
+#define EADDRNOTAVAIL (ZMQ_HAUSNUMERO + 6)
+#endif
+#ifndef ECONNREFUSED
+#define ECONNREFUSED (ZMQ_HAUSNUMERO + 7)
+#endif
+#ifndef EINPROGRESS
+#define EINPROGRESS (ZMQ_HAUSNUMERO + 8)
+#endif
+#ifndef ENOTSOCK
+#define ENOTSOCK (ZMQ_HAUSNUMERO + 9)
+#endif
+#ifndef EMSGSIZE
+#define EMSGSIZE (ZMQ_HAUSNUMERO + 10)
+#endif
+#ifndef EAFNOSUPPORT
+#define EAFNOSUPPORT (ZMQ_HAUSNUMERO + 11)
+#endif
+#ifndef ENETUNREACH
+#define ENETUNREACH (ZMQ_HAUSNUMERO + 12)
+#endif
+#ifndef ECONNABORTED
+#define ECONNABORTED (ZMQ_HAUSNUMERO + 13)
+#endif
+#ifndef ECONNRESET
+#define ECONNRESET (ZMQ_HAUSNUMERO + 14)
+#endif
+#ifndef ENOTCONN
+#define ENOTCONN (ZMQ_HAUSNUMERO + 15)
+#endif
+#ifndef ETIMEDOUT
+#define ETIMEDOUT (ZMQ_HAUSNUMERO + 16)
+#endif
+#ifndef EHOSTUNREACH
+#define EHOSTUNREACH (ZMQ_HAUSNUMERO + 17)
+#endif
+#ifndef ENETRESET
+#define ENETRESET (ZMQ_HAUSNUMERO + 18)
+#endif
+
+/* Native 0MQ error codes. */
+#define EFSM (ZMQ_HAUSNUMERO + 51)
+#define ENOCOMPATPROTO (ZMQ_HAUSNUMERO + 52)
+#define ETERM (ZMQ_HAUSNUMERO + 53)
+#define EMTHREAD (ZMQ_HAUSNUMERO + 54)
+
+/* Run-time API version detection */
+ZMQ_EXPORT void zmq_version (int *major, int *minor, int *patch);
+
+/* This function retrieves the errno as it is known to 0MQ library. The goal */
+/* of this function is to make the code 100% portable, including where 0MQ */
+/* compiled with certain CRT library (on Windows) is linked to an */
+/* application that uses different CRT library. */
+ZMQ_EXPORT int zmq_errno (void);
+
+/* Resolves system errors and 0MQ errors to human-readable string. */
+ZMQ_EXPORT const char *zmq_strerror (int errnum);
+
+/******************************************************************************/
+/* 0MQ infrastructure (a.k.a. context) initialisation & termination. */
+/******************************************************************************/
+
+/* New API */
+/* Context options */
+#define ZMQ_IO_THREADS 1
+#define ZMQ_MAX_SOCKETS 2
+
+/* Default for new contexts */
+#define ZMQ_IO_THREADS_DFLT 1
+#define ZMQ_MAX_SOCKETS_DFLT 1023
+
+ZMQ_EXPORT void *zmq_ctx_new (void);
+ZMQ_EXPORT int zmq_ctx_term (void *context);
+ZMQ_EXPORT int zmq_ctx_shutdown (void *ctx_);
+ZMQ_EXPORT int zmq_ctx_set (void *context, int option, int optval);
+ZMQ_EXPORT int zmq_ctx_get (void *context, int option);
+
+/* Old (legacy) API */
+ZMQ_EXPORT void *zmq_init (int io_threads);
+ZMQ_EXPORT int zmq_term (void *context);
+ZMQ_EXPORT int zmq_ctx_destroy (void *context);
+
+
+/******************************************************************************/
+/* 0MQ message definition. */
+/******************************************************************************/
+
+typedef struct zmq_msg_t {unsigned char _ [32];} zmq_msg_t;
+
+typedef void (zmq_free_fn) (void *data, void *hint);
+
+ZMQ_EXPORT int zmq_msg_init (zmq_msg_t *msg);
+ZMQ_EXPORT int zmq_msg_init_size (zmq_msg_t *msg, size_t size);
+ZMQ_EXPORT int zmq_msg_init_data (zmq_msg_t *msg, void *data,
+ size_t size, zmq_free_fn *ffn, void *hint);
+ZMQ_EXPORT int zmq_msg_send (zmq_msg_t *msg, void *s, int flags);
+ZMQ_EXPORT int zmq_msg_recv (zmq_msg_t *msg, void *s, int flags);
+ZMQ_EXPORT int zmq_msg_close (zmq_msg_t *msg);
+ZMQ_EXPORT int zmq_msg_move (zmq_msg_t *dest, zmq_msg_t *src);
+ZMQ_EXPORT int zmq_msg_copy (zmq_msg_t *dest, zmq_msg_t *src);
+ZMQ_EXPORT void *zmq_msg_data (zmq_msg_t *msg);
+ZMQ_EXPORT size_t zmq_msg_size (zmq_msg_t *msg);
+ZMQ_EXPORT int zmq_msg_more (zmq_msg_t *msg);
+ZMQ_EXPORT int zmq_msg_get (zmq_msg_t *msg, int option);
+ZMQ_EXPORT int zmq_msg_set (zmq_msg_t *msg, int option, int optval);
+
+
+/******************************************************************************/
+/* 0MQ socket definition. */
+/******************************************************************************/
+
+/* Socket types. */
+#define ZMQ_PAIR 0
+#define ZMQ_PUB 1
+#define ZMQ_SUB 2
+#define ZMQ_REQ 3
+#define ZMQ_REP 4
+#define ZMQ_DEALER 5
+#define ZMQ_ROUTER 6
+#define ZMQ_PULL 7
+#define ZMQ_PUSH 8
+#define ZMQ_XPUB 9
+#define ZMQ_XSUB 10
+#define ZMQ_STREAM 11
+
+/* Deprecated aliases */
+#define ZMQ_XREQ ZMQ_DEALER
+#define ZMQ_XREP ZMQ_ROUTER
+
+/* Socket options. */
+#define ZMQ_AFFINITY 4
+#define ZMQ_IDENTITY 5
+#define ZMQ_SUBSCRIBE 6
+#define ZMQ_UNSUBSCRIBE 7
+#define ZMQ_RATE 8
+#define ZMQ_RECOVERY_IVL 9
+#define ZMQ_SNDBUF 11
+#define ZMQ_RCVBUF 12
+#define ZMQ_RCVMORE 13
+#define ZMQ_FD 14
+#define ZMQ_EVENTS 15
+#define ZMQ_TYPE 16
+#define ZMQ_LINGER 17
+#define ZMQ_RECONNECT_IVL 18
+#define ZMQ_BACKLOG 19
+#define ZMQ_RECONNECT_IVL_MAX 21
+#define ZMQ_MAXMSGSIZE 22
+#define ZMQ_SNDHWM 23
+#define ZMQ_RCVHWM 24
+#define ZMQ_MULTICAST_HOPS 25
+#define ZMQ_RCVTIMEO 27
+#define ZMQ_SNDTIMEO 28
+#define ZMQ_LAST_ENDPOINT 32
+#define ZMQ_ROUTER_MANDATORY 33
+#define ZMQ_TCP_KEEPALIVE 34
+#define ZMQ_TCP_KEEPALIVE_CNT 35
+#define ZMQ_TCP_KEEPALIVE_IDLE 36
+#define ZMQ_TCP_KEEPALIVE_INTVL 37
+#define ZMQ_TCP_ACCEPT_FILTER 38
+#define ZMQ_IMMEDIATE 39
+#define ZMQ_XPUB_VERBOSE 40
+#define ZMQ_ROUTER_RAW 41
+#define ZMQ_IPV6 42
+#define ZMQ_MECHANISM 43
+#define ZMQ_PLAIN_SERVER 44
+#define ZMQ_PLAIN_USERNAME 45
+#define ZMQ_PLAIN_PASSWORD 46
+#define ZMQ_CURVE_SERVER 47
+#define ZMQ_CURVE_PUBLICKEY 48
+#define ZMQ_CURVE_SECRETKEY 49
+#define ZMQ_CURVE_SERVERKEY 50
+#define ZMQ_PROBE_ROUTER 51
+#define ZMQ_REQ_CORRELATE 52
+#define ZMQ_REQ_RELAXED 53
+#define ZMQ_CONFLATE 54
+#define ZMQ_ZAP_DOMAIN 55
+
+/* Message options */
+#define ZMQ_MORE 1
+
+/* Send/recv options. */
+#define ZMQ_DONTWAIT 1
+#define ZMQ_SNDMORE 2
+
+/* Security mechanisms */
+#define ZMQ_NULL 0
+#define ZMQ_PLAIN 1
+#define ZMQ_CURVE 2
+
+/* Deprecated options and aliases */
+#define ZMQ_IPV4ONLY 31
+#define ZMQ_DELAY_ATTACH_ON_CONNECT ZMQ_IMMEDIATE
+#define ZMQ_NOBLOCK ZMQ_DONTWAIT
+#define ZMQ_FAIL_UNROUTABLE ZMQ_ROUTER_MANDATORY
+#define ZMQ_ROUTER_BEHAVIOR ZMQ_ROUTER_MANDATORY
+
+/******************************************************************************/
+/* 0MQ socket events and monitoring */
+/******************************************************************************/
+
+/* Socket transport events (tcp and ipc only) */
+#define ZMQ_EVENT_CONNECTED 1
+#define ZMQ_EVENT_CONNECT_DELAYED 2
+#define ZMQ_EVENT_CONNECT_RETRIED 4
+
+#define ZMQ_EVENT_LISTENING 8
+#define ZMQ_EVENT_BIND_FAILED 16
+
+#define ZMQ_EVENT_ACCEPTED 32
+#define ZMQ_EVENT_ACCEPT_FAILED 64
+
+#define ZMQ_EVENT_CLOSED 128
+#define ZMQ_EVENT_CLOSE_FAILED 256
+#define ZMQ_EVENT_DISCONNECTED 512
+#define ZMQ_EVENT_MONITOR_STOPPED 1024
+
+#define ZMQ_EVENT_ALL ( ZMQ_EVENT_CONNECTED | ZMQ_EVENT_CONNECT_DELAYED | \
+ ZMQ_EVENT_CONNECT_RETRIED | ZMQ_EVENT_LISTENING | \
+ ZMQ_EVENT_BIND_FAILED | ZMQ_EVENT_ACCEPTED | \
+ ZMQ_EVENT_ACCEPT_FAILED | ZMQ_EVENT_CLOSED | \
+ ZMQ_EVENT_CLOSE_FAILED | ZMQ_EVENT_DISCONNECTED | \
+ ZMQ_EVENT_MONITOR_STOPPED)
+
+/* Socket event data */
+typedef struct {
+ uint16_t event; // id of the event as bitfield
+ int32_t value ; // value is either error code, fd or reconnect interval
+} zmq_event_t;
+
+ZMQ_EXPORT void *zmq_socket (void *, int type);
+ZMQ_EXPORT int zmq_close (void *s);
+ZMQ_EXPORT int zmq_setsockopt (void *s, int option, const void *optval,
+ size_t optvallen);
+ZMQ_EXPORT int zmq_getsockopt (void *s, int option, void *optval,
+ size_t *optvallen);
+ZMQ_EXPORT int zmq_bind (void *s, const char *addr);
+ZMQ_EXPORT int zmq_connect (void *s, const char *addr);
+ZMQ_EXPORT int zmq_unbind (void *s, const char *addr);
+ZMQ_EXPORT int zmq_disconnect (void *s, const char *addr);
+ZMQ_EXPORT int zmq_send (void *s, const void *buf, size_t len, int flags);
+ZMQ_EXPORT int zmq_send_const (void *s, const void *buf, size_t len, int flags);
+ZMQ_EXPORT int zmq_recv (void *s, void *buf, size_t len, int flags);
+ZMQ_EXPORT int zmq_socket_monitor (void *s, const char *addr, int events);
+
+ZMQ_EXPORT int zmq_sendmsg (void *s, zmq_msg_t *msg, int flags);
+ZMQ_EXPORT int zmq_recvmsg (void *s, zmq_msg_t *msg, int flags);
+
+/* Experimental */
+struct iovec;
+
+ZMQ_EXPORT int zmq_sendiov (void *s, struct iovec *iov, size_t count, int flags);
+ZMQ_EXPORT int zmq_recviov (void *s, struct iovec *iov, size_t *count, int flags);
+
+/******************************************************************************/
+/* I/O multiplexing. */
+/******************************************************************************/
+
+#define ZMQ_POLLIN 1
+#define ZMQ_POLLOUT 2
+#define ZMQ_POLLERR 4
+
+typedef struct
+{
+ void *socket;
+#if defined _WIN32
+ SOCKET fd;
+#else
+ int fd;
+#endif
+ short events;
+ short revents;
+} zmq_pollitem_t;
+
+#define ZMQ_POLLITEMS_DFLT 16
+
+ZMQ_EXPORT int zmq_poll (zmq_pollitem_t *items, int nitems, long timeout);
+
+/* Built-in message proxy (3-way) */
+
+ZMQ_EXPORT int zmq_proxy (void *frontend, void *backend, void *capture);
+
+/* Encode a binary key as printable text using ZMQ RFC 32 */
+ZMQ_EXPORT char *zmq_z85_encode (char *dest, uint8_t *data, size_t size);
+
+/* Encode a binary key from printable text per ZMQ RFC 32 */
+ZMQ_EXPORT uint8_t *zmq_z85_decode (uint8_t *dest, char *string);
+
+/* Deprecated aliases */
+#define ZMQ_STREAMER 1
+#define ZMQ_FORWARDER 2
+#define ZMQ_QUEUE 3
+/* Deprecated method */
+ZMQ_EXPORT int zmq_device (int type, void *frontend, void *backend);
+
+#undef ZMQ_EXPORT
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
diff --git a/src/zmq/include/zmq_utils.h b/src/zmq/include/zmq_utils.h
new file mode 100755
index 00000000..9b14aa72
--- /dev/null
+++ b/src/zmq/include/zmq_utils.h
@@ -0,0 +1,105 @@
+/*
+ Copyright (c) 2007-2013 Contributors as noted in the AUTHORS file
+
+ This file is part of 0MQ.
+
+ 0MQ is free software; you can redistribute it and/or modify it under
+ the terms of the GNU Lesser General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ 0MQ is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __ZMQ_UTILS_H_INCLUDED__
+#define __ZMQ_UTILS_H_INCLUDED__
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+
+/* Define integer types needed for event interface */
+#if defined ZMQ_HAVE_SOLARIS || defined ZMQ_HAVE_OPENVMS
+# include <inttypes.h>
+#elif defined _MSC_VER && _MSC_VER < 1600
+# ifndef int32_t
+typedef __int32 int32_t;
+# endif
+# ifndef uint16_t
+typedef unsigned __int16 uint16_t;
+# endif
+#else
+# include <stdint.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Handle DSO symbol visibility */
+#if defined _WIN32
+# if defined ZMQ_STATIC
+# define ZMQ_EXPORT
+# elif defined DLL_EXPORT
+# define ZMQ_EXPORT __declspec(dllexport)
+# else
+# define ZMQ_EXPORT __declspec(dllimport)
+# endif
+#else
+# if defined __SUNPRO_C || defined __SUNPRO_CC
+# define ZMQ_EXPORT __global
+# elif (defined __GNUC__ && __GNUC__ >= 4) || defined __INTEL_COMPILER
+# define ZMQ_EXPORT __attribute__ ((visibility("default")))
+# else
+# define ZMQ_EXPORT
+# endif
+#endif
+
+/* These functions are documented by man pages */
+
+/* Encode data with Z85 encoding. Returns encoded data */
+ZMQ_EXPORT char *zmq_z85_encode (char *dest, uint8_t *data, size_t size);
+
+/* Decode data with Z85 encoding. Returns decoded data */
+ZMQ_EXPORT uint8_t *zmq_z85_decode (uint8_t *dest, char *string);
+
+/* Generate z85-encoded public and private keypair with libsodium. */
+/* Returns 0 on success. */
+ZMQ_EXPORT int zmq_curve_keypair (char *z85_public_key, char *z85_secret_key);
+
+typedef void (zmq_thread_fn) (void*);
+
+/* These functions are not documented by man pages */
+
+/* Helper functions are used by perf tests so that they don't have to care */
+/* about minutiae of time-related functions on different OS platforms. */
+
+/* Starts the stopwatch. Returns the handle to the watch. */
+ZMQ_EXPORT void *zmq_stopwatch_start (void);
+
+/* Stops the stopwatch. Returns the number of microseconds elapsed since */
+/* the stopwatch was started. */
+ZMQ_EXPORT unsigned long zmq_stopwatch_stop (void *watch_);
+
+/* Sleeps for specified number of seconds. */
+ZMQ_EXPORT void zmq_sleep (int seconds_);
+
+/* Start a thread. Returns a handle to the thread. */
+ZMQ_EXPORT void *zmq_threadstart (zmq_thread_fn* func, void* arg);
+
+/* Wait for thread to complete then free up resources. */
+ZMQ_EXPORT void zmq_threadclose (void* thread);
+
+#undef ZMQ_EXPORT
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/zmq/libzmq.a b/src/zmq/libzmq.a
new file mode 100755
index 00000000..8c994993
--- /dev/null
+++ b/src/zmq/libzmq.a
Binary files differ
diff --git a/src/zmq/libzmq.la b/src/zmq/libzmq.la
new file mode 100755
index 00000000..2e5f984d
--- /dev/null
+++ b/src/zmq/libzmq.la
@@ -0,0 +1,41 @@
+# libzmq.la - a libtool library file
+# Generated by libtool (GNU libtool) 2.4.2 Debian-2.4.2-1.3ubuntu1
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname='libzmq.so.3'
+
+# Names of this library.
+library_names='libzmq.so.3.1.0 libzmq.so.3 libzmq.so'
+
+# The name of the static archive.
+old_library='libzmq.a'
+
+# Linker flags that can not go in dependency_libs.
+inherited_linker_flags=''
+
+# Libraries that this one depends upon.
+dependency_libs=' -lrt -lpthread'
+
+# Names of additional weak libraries provided by this library
+weak_library_names=''
+
+# Version information for libzmq.
+current=4
+age=1
+revision=0
+
+# Is this an already installed library?
+installed=no
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=no
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir='/usr/local/lib'
diff --git a/src/zmq/libzmq.lai b/src/zmq/libzmq.lai
new file mode 100755
index 00000000..126d3d5e
--- /dev/null
+++ b/src/zmq/libzmq.lai
@@ -0,0 +1,41 @@
+# libzmq.la - a libtool library file
+# Generated by libtool (GNU libtool) 2.4.2 Debian-2.4.2-1.3ubuntu1
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname='libzmq.so.3'
+
+# Names of this library.
+library_names='libzmq.so.3.1.0 libzmq.so.3 libzmq.so'
+
+# The name of the static archive.
+old_library='libzmq.a'
+
+# Linker flags that can not go in dependency_libs.
+inherited_linker_flags=''
+
+# Libraries that this one depends upon.
+dependency_libs=' -lrt -lpthread'
+
+# Names of additional weak libraries provided by this library
+weak_library_names=''
+
+# Version information for libzmq.
+current=4
+age=1
+revision=0
+
+# Is this an already installed library?
+installed=yes
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=no
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir='/usr/local/lib'
diff --git a/src/zmq/libzmq.so b/src/zmq/libzmq.so
new file mode 100755
index 00000000..16980c27
--- /dev/null
+++ b/src/zmq/libzmq.so
Binary files differ
diff --git a/src/zmq/libzmq.so.3 b/src/zmq/libzmq.so.3
new file mode 100755
index 00000000..16980c27
--- /dev/null
+++ b/src/zmq/libzmq.so.3
Binary files differ
diff --git a/src/zmq/libzmq.so.3.1.0 b/src/zmq/libzmq.so.3.1.0
new file mode 100755
index 00000000..16980c27
--- /dev/null
+++ b/src/zmq/libzmq.so.3.1.0
Binary files differ
diff --git a/yaml-cpp/CMakeLists.txt b/yaml-cpp/CMakeLists.txt
new file mode 100755
index 00000000..823ce201
--- /dev/null
+++ b/yaml-cpp/CMakeLists.txt
@@ -0,0 +1,282 @@
+###
+### CMake settings
+###
+## Due to Mac OSX we need to keep compatibility with CMake 2.6
+# see http://www.cmake.org/Wiki/CMake_Policies
+cmake_minimum_required(VERSION 2.6)
+# see http://www.cmake.org/cmake/help/cmake-2-8-docs.html#policy:CMP0012
+if(POLICY CMP0012)
+ cmake_policy(SET CMP0012 OLD)
+endif()
+# see http://www.cmake.org/cmake/help/cmake-2-8-docs.html#policy:CMP0015
+if(POLICY CMP0015)
+ cmake_policy(SET CMP0015 OLD)
+endif()
+
+include(CheckCXXCompilerFlag)
+
+
+###
+### Project settings
+###
+project(YAML_CPP)
+
+set(YAML_CPP_VERSION_MAJOR "0")
+set(YAML_CPP_VERSION_MINOR "3")
+set(YAML_CPP_VERSION_PATCH "0")
+set(YAML_CPP_VERSION "${YAML_CPP_VERSION_MAJOR}.${YAML_CPP_VERSION_MINOR}.${YAML_CPP_VERSION_PATCH}")
+
+enable_testing()
+
+
+###
+### Project options
+###
+## Project stuff
+option(YAML_CPP_BUILD_TOOLS "Enable testing and parse tools" ON)
+option(YAML_CPP_BUILD_CONTRIB "Enable contrib stuff in library" ON)
+
+## Build options
+# --> General
+# see http://www.cmake.org/cmake/help/cmake2.6docs.html#variable:BUILD_SHARED_LIBS
+# http://www.cmake.org/cmake/help/cmake2.6docs.html#command:add_library
+option(BUILD_SHARED_LIBS "Build Shared Libraries" OFF)
+
+# --> Apple
+option(APPLE_UNIVERSAL_BIN "Apple: Build universal binary" OFF)
+
+# --> Microsoft Visual C++
+# see http://msdn.microsoft.com/en-us/library/aa278396(v=VS.60).aspx
+# http://msdn.microsoft.com/en-us/library/2kzt1wy3(v=VS.71).aspx
+option(MSVC_SHARED_RT "MSVC: Build with shared runtime libs (/MD)" ON)
+option(MSVC_STHREADED_RT "MSVC: Build with single-threaded static runtime libs (/ML until VS .NET 2003)" OFF)
+
+###
+### Sources, headers, directories and libs
+###
+set(header_directory "include/yaml-cpp/")
+
+file(GLOB sources "src/[a-zA-Z]*.cpp")
+file(GLOB public_headers "include/yaml-cpp/[a-zA-Z]*.h")
+file(GLOB private_headers "src/[a-zA-Z]*.h")
+
+if(YAML_CPP_BUILD_CONTRIB)
+ file(GLOB contrib_sources "src/contrib/[a-zA-Z]*.cpp")
+ file(GLOB contrib_public_headers "include/yaml-cpp/contrib/[a-zA-Z]*.h")
+ file(GLOB contrib_private_headers "src/contrib/[a-zA-Z]*.h")
+else()
+ add_definitions(-DYAML_CPP_NO_CONTRIB)
+endif()
+
+if(VERBOSE)
+ message(STATUS "sources: ${sources}")
+ message(STATUS "public_headers: ${public_headers}")
+ message(STATUS "private_headers: ${private_headers}")
+ message(STATUS "contrib_sources: ${contrib_sources}")
+ message(STATUS "contrib_public_headers: ${contrib_public_headers}")
+ message(STATUS "contrib_private_headers: ${contrib_private_headers}")
+endif()
+
+include_directories(${YAML_CPP_SOURCE_DIR}/include)
+include_directories(${YAML_CPP_SOURCE_DIR}/src)
+
+
+###
+### General compilation settings
+###
+if(BUILD_SHARED_LIBS)
+ set(LABEL_SUFFIX "shared")
+else()
+ set(LABEL_SUFFIX "static")
+endif()
+
+if(APPLE)
+ if(APPLE_UNIVERSAL_BIN)
+ set(CMAKE_OSX_ARCHITECTURES ppc;i386)
+ endif()
+endif()
+
+if(IPHONE)
+ set(CMAKE_OSX_SYSROOT "iphoneos4.2")
+ set(CMAKE_OSX_ARCHITECTURES "armv6;armv7")
+endif()
+
+if(WIN32)
+ if(BUILD_SHARED_LIBS)
+ add_definitions(-D${PROJECT_NAME}_DLL) # use or build Windows DLL
+ endif()
+ if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
+ set(CMAKE_INSTALL_PREFIX "C:/")
+ endif()
+endif()
+
+# GCC specialities
+if(CMAKE_COMPILER_IS_GNUCXX)
+ ### General stuff
+ if(WIN32)
+ set(CMAKE_SHARED_LIBRARY_PREFIX "") # DLLs do not have a "lib" prefix
+ set(CMAKE_IMPORT_LIBRARY_PREFIX "") # same for DLL import libs
+ set(CMAKE_LINK_DEF_FILE_FLAG "") # CMake workaround (2.8.3)
+ endif()
+
+ ### Project stuff
+ if(NOT CMAKE_CONFIGURATION_TYPES AND NOT CMAKE_BUILD_TYPE)
+ set(CMAKE_BUILD_TYPE Release)
+ endif()
+ #
+ set(CMAKE_CXX_FLAGS_RELEASE "-O2")
+ set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-O2 -g")
+ set(CMAKE_CXX_FLAGS_DEBUG "-g")
+ set(CMAKE_CXX_FLAGS_MINSIZEREL "-Os")
+ #
+ set(GCC_EXTRA_OPTIONS "")
+ #
+ set(FLAG_TESTED "-Wextra")
+ check_cxx_compiler_flag(${FLAG_TESTED} FLAG_WEXTRA)
+ if(FLAG_WEXTRA)
+ set(GCC_EXTRA_OPTIONS "${GCC_EXTRA_OPTIONS} ${FLAG_TESTED}")
+ endif()
+ #
+ set(CMAKE_CXX_FLAGS "-Wall ${GCC_EXTRA_OPTIONS} -pedantic -Wno-long-long ${CMAKE_CXX_FLAGS}")
+ #
+ add_custom_target(debuggable $(MAKE) clean
+ COMMAND ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug ${CMAKE_SOURCE_DIR}
+ COMMENT "Adjusting settings for debug compilation"
+ VERBATIM)
+ add_custom_target(releasable $(MAKE) clean
+ COMMAND ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Release ${CMAKE_SOURCE_DIR}
+ COMMENT "Adjusting settings for release compilation"
+ VERBATIM)
+endif()
+
+# Microsoft VisualC++ specialities
+if(MSVC)
+ ### General stuff
+ # a) Change MSVC runtime library settings (/MD[d], /MT[d], /ML[d] (single-threaded until VS 2003))
+ # plus set lib suffix for later use and project label accordingly
+ # see http://msdn.microsoft.com/en-us/library/aa278396(v=VS.60).aspx
+ # http://msdn.microsoft.com/en-us/library/2kzt1wy3(v=VS.71).aspx
+ set(LIB_RT_SUFFIX "md") # CMake defaults to /MD for MSVC
+ set(LIB_RT_OPTION "/MD")
+ #
+ if(NOT MSVC_SHARED_RT) # User wants to have static runtime libraries (/MT, /ML)
+ if(MSVC_STHREADED_RT) # User wants to have old single-threaded static runtime libraries
+ set(LIB_RT_SUFFIX "ml")
+ set(LIB_RT_OPTION "/ML")
+ if(NOT ${MSVC_VERSION} LESS 1400)
+ message(FATAL_ERROR "Single-threaded static runtime libraries (/ML) only available until VS .NET 2003 (7.1).")
+ endif()
+ else()
+ set(LIB_RT_SUFFIX "mt")
+ set(LIB_RT_OPTION "/MT")
+ endif()
+
+ # correct linker options
+ foreach(flag_var CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
+ foreach(config_name "" DEBUG RELEASE MINSIZEREL RELWITHDEBINFO)
+ set(var_name "${flag_var}")
+ if(NOT "${config_name}" STREQUAL "")
+ set(var_name "${var_name}_${config_name}")
+ endif()
+ string(REPLACE "/MD" "${LIB_RT_OPTION}" ${var_name} "${${var_name}}")
+ endforeach()
+ endforeach()
+ endif()
+ #
+ set(LABEL_SUFFIX "${LABEL_SUFFIX} ${LIB_RT_SUFFIX}")
+
+ # b) Change prefix for static libraries
+ set(CMAKE_STATIC_LIBRARY_PREFIX "lib") # to distinguish static libraries from DLL import libs
+
+ # c) Correct suffixes for static libraries
+ if(NOT BUILD_SHARED_LIBS)
+ ### General stuff
+ set(LIB_TARGET_SUFFIX "${LIB_SUFFIX}${LIB_RT_SUFFIX}")
+ endif()
+
+ ### Project stuff
+ # /W3 = set warning level; see http://msdn.microsoft.com/en-us/library/thxezb7y.aspx
+ # /wd4127 = disable warning C4127 "conditional expression is constant"; see http://msdn.microsoft.com/en-us/library/6t66728h.aspx
+ # /wd4355 = disable warning C4355 "'this' : used in base member initializer list"; http://msdn.microsoft.com/en-us/library/3c594ae3.aspx
+ set(CMAKE_CXX_FLAGS "/W3 /wd4127 /wd4355 /D_SCL_SECURE_NO_WARNINGS ${CMAKE_CXX_FLAGS}")
+endif()
+
+
+###
+### General install settings
+###
+if(WIN32)
+ set(_library_dir bin) # .dll are in PATH, like executables
+else()
+ set(_library_dir lib)
+endif()
+
+set(INCLUDE_INSTALL_ROOT_DIR include)
+
+set(INCLUDE_INSTALL_DIR ${INCLUDE_INSTALL_ROOT_DIR}/yaml-cpp)
+set(LIB_INSTALL_DIR "${_library_dir}${LIB_SUFFIX}")
+
+set(_INSTALL_DESTINATIONS
+ RUNTIME DESTINATION bin
+ LIBRARY DESTINATION ${LIB_INSTALL_DIR}
+ ARCHIVE DESTINATION "lib${LIB_SUFFIX}"
+)
+
+
+###
+### Library
+###
+add_library(yaml-cpp
+ ${sources}
+ ${public_headers}
+ ${private_headers}
+ ${contrib_sources}
+ ${contrib_public_headers}
+ ${contrib_private_headers}
+)
+
+set_target_properties(yaml-cpp PROPERTIES
+ VERSION "${YAML_CPP_VERSION}"
+ SOVERSION "${YAML_CPP_VERSION_MAJOR}.${YAML_CPP_VERSION_MINOR}"
+ PROJECT_LABEL "yaml-cpp ${LABEL_SUFFIX}"
+)
+
+if(IPHONE)
+ set_target_properties(yaml-cpp PROPERTIES
+ XCODE_ATTRIBUTE_IPHONEOS_DEPLOYMENT_TARGET "3.0"
+ )
+endif()
+
+if(MSVC)
+ if(NOT BUILD_SHARED_LIBS)
+ # correct library names
+ set_target_properties(yaml-cpp PROPERTIES
+ DEBUG_POSTFIX "${LIB_TARGET_SUFFIX}d"
+ RELEASE_POSTFIX "${LIB_TARGET_SUFFIX}"
+ MINSIZEREL_POSTFIX "${LIB_TARGET_SUFFIX}"
+ RELWITHDEBINFO_POSTFIX "${LIB_TARGET_SUFFIX}"
+ )
+ endif()
+endif()
+
+install(TARGETS yaml-cpp ${_INSTALL_DESTINATIONS})
+install(
+ DIRECTORY ${header_directory}
+ DESTINATION ${INCLUDE_INSTALL_DIR}
+ FILES_MATCHING PATTERN "*.h"
+)
+
+if(UNIX)
+ set(PC_FILE ${CMAKE_BINARY_DIR}/yaml-cpp.pc)
+ configure_file("yaml-cpp.pc.cmake" ${PC_FILE} @ONLY)
+ install(FILES ${PC_FILE} DESTINATION ${LIB_INSTALL_DIR}/pkgconfig)
+endif()
+
+
+###
+### Extras
+###
+if(YAML_CPP_BUILD_TOOLS)
+ add_subdirectory(test)
+ add_subdirectory(util)
+endif()
diff --git a/yaml-cpp/include/yaml-cpp/aliasmanager.h b/yaml-cpp/include/yaml-cpp/aliasmanager.h
new file mode 100755
index 00000000..e90c93dd
--- /dev/null
+++ b/yaml-cpp/include/yaml-cpp/aliasmanager.h
@@ -0,0 +1,34 @@
+#ifndef ALIASMANAGER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define ALIASMANAGER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+#include "yaml-cpp/anchor.h"
+#include <map>
+
+namespace YAML
+{
+ class Node;
+
+ class AliasManager
+ {
+ public:
+ AliasManager();
+
+ void RegisterReference(const Node& node);
+ anchor_t LookupAnchor(const Node& node) const;
+
+ private:
+ anchor_t _CreateNewAnchor();
+
+ private:
+ typedef std::map<const Node*, anchor_t> AnchorByIdentity;
+ AnchorByIdentity m_anchorByIdentity;
+
+ anchor_t m_curAnchor;
+ };
+}
+
+#endif // ALIASMANAGER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/include/yaml-cpp/anchor.h b/yaml-cpp/include/yaml-cpp/anchor.h
new file mode 100755
index 00000000..433f2fa5
--- /dev/null
+++ b/yaml-cpp/include/yaml-cpp/anchor.h
@@ -0,0 +1,16 @@
+#ifndef ANCHOR_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define ANCHOR_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+#include <cstddef>
+
+namespace YAML
+{
+ typedef std::size_t anchor_t;
+ const anchor_t NullAnchor = 0;
+}
+
+#endif // ANCHOR_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/include/yaml-cpp/binary.h b/yaml-cpp/include/yaml-cpp/binary.h
new file mode 100755
index 00000000..8504ebeb
--- /dev/null
+++ b/yaml-cpp/include/yaml-cpp/binary.h
@@ -0,0 +1,66 @@
+#ifndef BASE64_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define BASE64_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+#include <string>
+#include <vector>
+
+namespace YAML
+{
+ class Node;
+
+ std::string EncodeBase64(const unsigned char *data, std::size_t size);
+ std::vector<unsigned char> DecodeBase64(const std::string& input);
+
+ class Binary {
+ public:
+ Binary(): m_unownedData(0), m_unownedSize(0) {}
+ Binary(const unsigned char *data, std::size_t size): m_unownedData(data), m_unownedSize(size) {}
+
+ bool owned() const { return !m_unownedData; }
+ std::size_t size() const { return owned() ? m_data.size() : m_unownedSize; }
+ const unsigned char *data() const { return owned() ? &m_data[0] : m_unownedData; }
+
+ void swap(std::vector<unsigned char>& rhs) {
+ if(m_unownedData) {
+ m_data.swap(rhs);
+ rhs.clear();
+ rhs.resize(m_unownedSize);
+ std::copy(m_unownedData, m_unownedData + m_unownedSize, &rhs[0]);
+ m_unownedData = 0;
+ m_unownedSize = 0;
+ } else {
+ m_data.swap(rhs);
+ }
+ }
+
+ bool operator == (const Binary& rhs) const {
+ const std::size_t s = size();
+ if(s != rhs.size())
+ return false;
+ const unsigned char *d1 = data();
+ const unsigned char *d2 = rhs.data();
+ for(std::size_t i=0;i<s;i++) {
+ if(*d1++ != *d2++)
+ return false;
+ }
+ return true;
+ }
+
+ bool operator != (const Binary& rhs) const {
+ return !(*this == rhs);
+ }
+
+ private:
+ std::vector<unsigned char> m_data;
+ const unsigned char *m_unownedData;
+ std::size_t m_unownedSize;
+ };
+
+ void operator >> (const Node& node, Binary& binary);
+}
+
+#endif // BASE64_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/include/yaml-cpp/contrib/anchordict.h b/yaml-cpp/include/yaml-cpp/contrib/anchordict.h
new file mode 100755
index 00000000..e483dc4b
--- /dev/null
+++ b/yaml-cpp/include/yaml-cpp/contrib/anchordict.h
@@ -0,0 +1,42 @@
+#ifndef ANCHORDICT_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define ANCHORDICT_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+#include <vector>
+
+#include "../anchor.h"
+
+namespace YAML
+{
+ /// AnchorDict
+ /// . An object that stores and retrieves values correlating to anchor_t
+ /// values.
+ /// . Efficient implementation that can make assumptions about how anchor_t
+ /// values are assigned by the Parser class.
+ template <class T>
+ class AnchorDict
+ {
+ public:
+ void Register(anchor_t anchor, T value)
+ {
+ if (anchor > m_data.size())
+ {
+ m_data.resize(anchor);
+ }
+ m_data[anchor - 1] = value;
+ }
+
+ T Get(anchor_t anchor) const
+ {
+ return m_data[anchor - 1];
+ }
+
+ private:
+ std::vector<T> m_data;
+ };
+}
+
+#endif // ANCHORDICT_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/include/yaml-cpp/contrib/graphbuilder.h b/yaml-cpp/include/yaml-cpp/contrib/graphbuilder.h
new file mode 100755
index 00000000..6739a12b
--- /dev/null
+++ b/yaml-cpp/include/yaml-cpp/contrib/graphbuilder.h
@@ -0,0 +1,133 @@
+#ifndef GRAPHBUILDER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define GRAPHBUILDER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+#include "yaml-cpp/mark.h"
+#include <string>
+
+namespace YAML
+{
+ class Parser;
+
+ // GraphBuilderInterface
+ // . Abstraction of node creation
+ // . pParentNode is always NULL or the return value of one of the NewXXX()
+ // functions.
+ class GraphBuilderInterface
+ {
+ public:
+ // Create and return a new node with a null value.
+ virtual void *NewNull(const Mark& mark, void *pParentNode) = 0;
+
+ // Create and return a new node with the given tag and value.
+ virtual void *NewScalar(const Mark& mark, const std::string& tag, void *pParentNode, const std::string& value) = 0;
+
+ // Create and return a new sequence node
+ virtual void *NewSequence(const Mark& mark, const std::string& tag, void *pParentNode) = 0;
+ // Add pNode to pSequence. pNode was created with one of the NewXxx()
+ // functions and pSequence with NewSequence().
+ virtual void AppendToSequence(void *pSequence, void *pNode) = 0;
+ // Note that no moew entries will be added to pSequence
+ virtual void SequenceComplete(void *pSequence) {(void)pSequence;}
+
+ // Create and return a new map node
+ virtual void *NewMap(const Mark& mark, const std::string& tag, void *pParentNode) = 0;
+ // Add the pKeyNode => pValueNode mapping to pMap. pKeyNode and pValueNode
+ // were created with one of the NewXxx() methods and pMap with NewMap().
+ virtual void AssignInMap(void *pMap, void *pKeyNode, void *pValueNode) = 0;
+ // Note that no more assignments will be made in pMap
+ virtual void MapComplete(void *pMap) {(void)pMap;}
+
+ // Return the node that should be used in place of an alias referencing
+ // pNode (pNode by default)
+ virtual void *AnchorReference(const Mark& mark, void *pNode) {(void)mark; return pNode;}
+ };
+
+ // Typesafe wrapper for GraphBuilderInterface. Assumes that Impl defines
+ // Node, Sequence, and Map types. Sequence and Map must derive from Node
+ // (unless Node is defined as void). Impl must also implement function with
+ // all of the same names as the virtual functions in GraphBuilderInterface
+ // -- including the ones with default implementations -- but with the
+ // prototypes changed to accept an explicit Node*, Sequence*, or Map* where
+ // appropriate.
+ template <class Impl>
+ class GraphBuilder : public GraphBuilderInterface
+ {
+ public:
+ typedef typename Impl::Node Node;
+ typedef typename Impl::Sequence Sequence;
+ typedef typename Impl::Map Map;
+
+ GraphBuilder(Impl& impl) : m_impl(impl)
+ {
+ Map* pMap = NULL;
+ Sequence* pSeq = NULL;
+ Node* pNode = NULL;
+
+ // Type consistency checks
+ pNode = pMap;
+ pNode = pSeq;
+ }
+
+ GraphBuilderInterface& AsBuilderInterface() {return *this;}
+
+ virtual void *NewNull(const Mark& mark, void* pParentNode) {
+ return CheckType<Node>(m_impl.NewNull(mark, AsNode(pParentNode)));
+ }
+
+ virtual void *NewScalar(const Mark& mark, const std::string& tag, void *pParentNode, const std::string& value) {
+ return CheckType<Node>(m_impl.NewScalar(mark, tag, AsNode(pParentNode), value));
+ }
+
+ virtual void *NewSequence(const Mark& mark, const std::string& tag, void *pParentNode) {
+ return CheckType<Sequence>(m_impl.NewSequence(mark, tag, AsNode(pParentNode)));
+ }
+ virtual void AppendToSequence(void *pSequence, void *pNode) {
+ m_impl.AppendToSequence(AsSequence(pSequence), AsNode(pNode));
+ }
+ virtual void SequenceComplete(void *pSequence) {
+ m_impl.SequenceComplete(AsSequence(pSequence));
+ }
+
+ virtual void *NewMap(const Mark& mark, const std::string& tag, void *pParentNode) {
+ return CheckType<Map>(m_impl.NewMap(mark, tag, AsNode(pParentNode)));
+ }
+ virtual void AssignInMap(void *pMap, void *pKeyNode, void *pValueNode) {
+ m_impl.AssignInMap(AsMap(pMap), AsNode(pKeyNode), AsNode(pValueNode));
+ }
+ virtual void MapComplete(void *pMap) {
+ m_impl.MapComplete(AsMap(pMap));
+ }
+
+ virtual void *AnchorReference(const Mark& mark, void *pNode) {
+ return CheckType<Node>(m_impl.AnchorReference(mark, AsNode(pNode)));
+ }
+
+ private:
+ Impl& m_impl;
+
+ // Static check for pointer to T
+ template <class T, class U>
+ static T* CheckType(U* p) {return p;}
+
+ static Node *AsNode(void *pNode) {return static_cast<Node*>(pNode);}
+ static Sequence *AsSequence(void *pSeq) {return static_cast<Sequence*>(pSeq);}
+ static Map *AsMap(void *pMap) {return static_cast<Map*>(pMap);}
+ };
+
+ void *BuildGraphOfNextDocument(Parser& parser, GraphBuilderInterface& graphBuilder);
+
+ template <class Impl>
+ typename Impl::Node *BuildGraphOfNextDocument(Parser& parser, Impl& impl)
+ {
+ GraphBuilder<Impl> graphBuilder(impl);
+ return static_cast<typename Impl::Node *>(BuildGraphOfNextDocument(
+ parser, graphBuilder
+ ));
+ }
+}
+
+#endif // GRAPHBUILDER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/include/yaml-cpp/conversion.h b/yaml-cpp/include/yaml-cpp/conversion.h
new file mode 100755
index 00000000..1b557b56
--- /dev/null
+++ b/yaml-cpp/include/yaml-cpp/conversion.h
@@ -0,0 +1,75 @@
+#ifndef CONVERSION_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define CONVERSION_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include "yaml-cpp/null.h"
+#include "yaml-cpp/traits.h"
+#include <limits>
+#include <string>
+#include <sstream>
+
+namespace YAML
+{
+ // traits for conversion
+
+ template<typename T>
+ struct is_scalar_convertible { enum { value = is_numeric<T>::value }; };
+
+ template<> struct is_scalar_convertible<std::string> { enum { value = true }; };
+ template<> struct is_scalar_convertible<bool> { enum { value = true }; };
+ template<> struct is_scalar_convertible<_Null> { enum { value = true }; };
+
+ // actual conversion
+
+ inline bool Convert(const std::string& input, std::string& output) {
+ output = input;
+ return true;
+ }
+
+ YAML_CPP_API bool Convert(const std::string& input, bool& output);
+ YAML_CPP_API bool Convert(const std::string& input, _Null& output);
+
+ inline bool IsInfinity(const std::string& input) {
+ return input == ".inf" || input == ".Inf" || input == ".INF" || input == "+.inf" || input == "+.Inf" || input == "+.INF";
+ }
+
+ inline bool IsNegativeInfinity(const std::string& input) {
+ return input == "-.inf" || input == "-.Inf" || input == "-.INF";
+ }
+
+ inline bool IsNaN(const std::string& input) {
+ return input == ".nan" || input == ".NaN" || input == ".NAN";
+ }
+
+
+ template <typename T>
+ inline bool Convert(const std::string& input, T& output, typename enable_if<is_numeric<T> >::type * = 0) {
+ std::stringstream stream(input);
+ stream.unsetf(std::ios::dec);
+ if((stream >> output) && (stream >> std::ws).eof())
+ return true;
+
+ if(std::numeric_limits<T>::has_infinity) {
+ if(IsInfinity(input)) {
+ output = std::numeric_limits<T>::infinity();
+ return true;
+ } else if(IsNegativeInfinity(input)) {
+ output = -std::numeric_limits<T>::infinity();
+ return true;
+ }
+ }
+
+ if(std::numeric_limits<T>::has_quiet_NaN && IsNaN(input)) {
+ output = std::numeric_limits<T>::quiet_NaN();
+ return true;
+ }
+
+ return false;
+ }
+}
+
+#endif // CONVERSION_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/include/yaml-cpp/dll.h b/yaml-cpp/include/yaml-cpp/dll.h
new file mode 100755
index 00000000..ea138401
--- /dev/null
+++ b/yaml-cpp/include/yaml-cpp/dll.h
@@ -0,0 +1,28 @@
+#ifndef DLL_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define DLL_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+// The following ifdef block is the standard way of creating macros which make exporting
+// from a DLL simpler. All files within this DLL are compiled with the yaml_cpp_EXPORTS
+// symbol defined on the command line. this symbol should not be defined on any project
+// that uses this DLL. This way any other project whose source files include this file see
+// YAML_CPP_API functions as being imported from a DLL, whereas this DLL sees symbols
+// defined with this macro as being exported.
+#undef YAML_CPP_API
+
+#ifdef YAML_CPP_DLL // Using or Building YAML-CPP DLL (definition defined manually)
+ #ifdef yaml_cpp_EXPORTS // Building YAML-CPP DLL (definition created by CMake or defined manually)
+ // #pragma message( "Defining YAML_CPP_API for DLL export" )
+ #define YAML_CPP_API __declspec(dllexport)
+ #else // yaml_cpp_EXPORTS
+ // #pragma message( "Defining YAML_CPP_API for DLL import" )
+ #define YAML_CPP_API __declspec(dllimport)
+ #endif // yaml_cpp_EXPORTS
+#else //YAML_CPP_DLL
+#define YAML_CPP_API
+#endif // YAML_CPP_DLL
+
+#endif // DLL_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/include/yaml-cpp/emitfromevents.h b/yaml-cpp/include/yaml-cpp/emitfromevents.h
new file mode 100755
index 00000000..e11ae640
--- /dev/null
+++ b/yaml-cpp/include/yaml-cpp/emitfromevents.h
@@ -0,0 +1,45 @@
+#ifndef EMITFROMEVENTS_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define EMITFROMEVENTS_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+#include "yaml-cpp/eventhandler.h"
+#include <stack>
+
+namespace YAML
+{
+ class Emitter;
+
+ class EmitFromEvents: public EventHandler
+ {
+ public:
+ EmitFromEvents(Emitter& emitter);
+
+ virtual void OnDocumentStart(const Mark& mark);
+ virtual void OnDocumentEnd();
+
+ virtual void OnNull(const Mark& mark, anchor_t anchor);
+ virtual void OnAlias(const Mark& mark, anchor_t anchor);
+ virtual void OnScalar(const Mark& mark, const std::string& tag, anchor_t anchor, const std::string& value);
+
+ virtual void OnSequenceStart(const Mark& mark, const std::string& tag, anchor_t anchor);
+ virtual void OnSequenceEnd();
+
+ virtual void OnMapStart(const Mark& mark, const std::string& tag, anchor_t anchor);
+ virtual void OnMapEnd();
+
+ private:
+ void BeginNode();
+ void EmitProps(const std::string& tag, anchor_t anchor);
+
+ private:
+ Emitter& m_emitter;
+
+ struct State { enum value { WaitingForSequenceEntry, WaitingForKey, WaitingForValue }; };
+ std::stack<State::value> m_stateStack;
+ };
+}
+
+#endif // EMITFROMEVENTS_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/include/yaml-cpp/emitter.h b/yaml-cpp/include/yaml-cpp/emitter.h
new file mode 100755
index 00000000..1d7edf2f
--- /dev/null
+++ b/yaml-cpp/include/yaml-cpp/emitter.h
@@ -0,0 +1,186 @@
+#ifndef EMITTER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define EMITTER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include "yaml-cpp/dll.h"
+#include "yaml-cpp/binary.h"
+#include "yaml-cpp/emittermanip.h"
+#include "yaml-cpp/ostream.h"
+#include "yaml-cpp/noncopyable.h"
+#include "yaml-cpp/null.h"
+#include <memory>
+#include <string>
+#include <sstream>
+
+namespace YAML
+{
+ class EmitterState;
+
+ class YAML_CPP_API Emitter: private noncopyable
+ {
+ public:
+ Emitter();
+ ~Emitter();
+
+ // output
+ const char *c_str() const;
+ unsigned size() const;
+
+ // state checking
+ bool good() const;
+ const std::string GetLastError() const;
+
+ // global setters
+ bool SetOutputCharset(EMITTER_MANIP value);
+ bool SetStringFormat(EMITTER_MANIP value);
+ bool SetBoolFormat(EMITTER_MANIP value);
+ bool SetIntBase(EMITTER_MANIP value);
+ bool SetSeqFormat(EMITTER_MANIP value);
+ bool SetMapFormat(EMITTER_MANIP value);
+ bool SetIndent(unsigned n);
+ bool SetPreCommentIndent(unsigned n);
+ bool SetPostCommentIndent(unsigned n);
+ bool SetFloatPrecision(unsigned n);
+ bool SetDoublePrecision(unsigned n);
+
+ // local setters
+ Emitter& SetLocalValue(EMITTER_MANIP value);
+ Emitter& SetLocalIndent(const _Indent& indent);
+ Emitter& SetLocalPrecision(const _Precision& precision);
+
+ // overloads of write
+ Emitter& Write(const std::string& str);
+ Emitter& Write(bool b);
+ Emitter& Write(char ch);
+ Emitter& Write(const _Alias& alias);
+ Emitter& Write(const _Anchor& anchor);
+ Emitter& Write(const _Tag& tag);
+ Emitter& Write(const _Comment& comment);
+ Emitter& Write(const _Null& null);
+ Emitter& Write(const Binary& binary);
+
+ template <typename T>
+ Emitter& WriteIntegralType(T value);
+
+ template <typename T>
+ Emitter& WriteStreamable(T value);
+
+ private:
+ void PreWriteIntegralType(std::stringstream& str);
+ void PreWriteStreamable(std::stringstream& str);
+ void PostWriteIntegralType(const std::stringstream& str);
+ void PostWriteStreamable(const std::stringstream& str);
+
+ template<typename T> void SetStreamablePrecision(std::stringstream&) {}
+ unsigned GetFloatPrecision() const;
+ unsigned GetDoublePrecision() const;
+
+ private:
+ void PreAtomicWrite();
+ bool GotoNextPreAtomicState();
+ void PostAtomicWrite();
+ void EmitSeparationIfNecessary();
+
+ void EmitBeginDoc();
+ void EmitEndDoc();
+ void EmitBeginSeq();
+ void EmitEndSeq();
+ void EmitBeginMap();
+ void EmitEndMap();
+ void EmitKey();
+ void EmitValue();
+ void EmitNewline();
+ void EmitKindTag();
+ void EmitTag(bool verbatim, const _Tag& tag);
+
+ const char *ComputeFullBoolName(bool b) const;
+ bool CanEmitNewline() const;
+
+ private:
+ ostream m_stream;
+ std::auto_ptr <EmitterState> m_pState;
+ };
+
+ template <typename T>
+ inline Emitter& Emitter::WriteIntegralType(T value)
+ {
+ if(!good())
+ return *this;
+
+ std::stringstream str;
+ PreWriteIntegralType(str);
+ str << value;
+ PostWriteIntegralType(str);
+ return *this;
+ }
+
+ template <typename T>
+ inline Emitter& Emitter::WriteStreamable(T value)
+ {
+ if(!good())
+ return *this;
+
+ std::stringstream str;
+ PreWriteStreamable(str);
+ SetStreamablePrecision<T>(str);
+ str << value;
+ PostWriteStreamable(str);
+ return *this;
+ }
+
+ template<>
+ inline void Emitter::SetStreamablePrecision<float>(std::stringstream& str)
+ {
+ str.precision(GetFloatPrecision());
+ }
+
+ template<>
+ inline void Emitter::SetStreamablePrecision<double>(std::stringstream& str)
+ {
+ str.precision(GetDoublePrecision());
+ }
+
+ // overloads of insertion
+ inline Emitter& operator << (Emitter& emitter, const std::string& v) { return emitter.Write(v); }
+ inline Emitter& operator << (Emitter& emitter, bool v) { return emitter.Write(v); }
+ inline Emitter& operator << (Emitter& emitter, char v) { return emitter.Write(v); }
+ inline Emitter& operator << (Emitter& emitter, unsigned char v) { return emitter.Write(static_cast<char>(v)); }
+ inline Emitter& operator << (Emitter& emitter, const _Alias& v) { return emitter.Write(v); }
+ inline Emitter& operator << (Emitter& emitter, const _Anchor& v) { return emitter.Write(v); }
+ inline Emitter& operator << (Emitter& emitter, const _Tag& v) { return emitter.Write(v); }
+ inline Emitter& operator << (Emitter& emitter, const _Comment& v) { return emitter.Write(v); }
+ inline Emitter& operator << (Emitter& emitter, const _Null& v) { return emitter.Write(v); }
+ inline Emitter& operator << (Emitter& emitter, const Binary& b) { return emitter.Write(b); }
+
+ inline Emitter& operator << (Emitter& emitter, const char *v) { return emitter.Write(std::string(v)); }
+
+ inline Emitter& operator << (Emitter& emitter, int v) { return emitter.WriteIntegralType(v); }
+ inline Emitter& operator << (Emitter& emitter, unsigned int v) { return emitter.WriteIntegralType(v); }
+ inline Emitter& operator << (Emitter& emitter, short v) { return emitter.WriteIntegralType(v); }
+ inline Emitter& operator << (Emitter& emitter, unsigned short v) { return emitter.WriteIntegralType(v); }
+ inline Emitter& operator << (Emitter& emitter, long v) { return emitter.WriteIntegralType(v); }
+ inline Emitter& operator << (Emitter& emitter, unsigned long v) { return emitter.WriteIntegralType(v); }
+ inline Emitter& operator << (Emitter& emitter, long long v) { return emitter.WriteIntegralType(v); }
+ inline Emitter& operator << (Emitter& emitter, unsigned long long v) { return emitter.WriteIntegralType(v); }
+
+ inline Emitter& operator << (Emitter& emitter, float v) { return emitter.WriteStreamable(v); }
+ inline Emitter& operator << (Emitter& emitter, double v) { return emitter.WriteStreamable(v); }
+
+ inline Emitter& operator << (Emitter& emitter, EMITTER_MANIP value) {
+ return emitter.SetLocalValue(value);
+ }
+
+ inline Emitter& operator << (Emitter& emitter, _Indent indent) {
+ return emitter.SetLocalIndent(indent);
+ }
+
+ inline Emitter& operator << (Emitter& emitter, _Precision precision) {
+ return emitter.SetLocalPrecision(precision);
+ }
+}
+
+#endif // EMITTER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/include/yaml-cpp/emittermanip.h b/yaml-cpp/include/yaml-cpp/emittermanip.h
new file mode 100755
index 00000000..a8ec64a4
--- /dev/null
+++ b/yaml-cpp/include/yaml-cpp/emittermanip.h
@@ -0,0 +1,149 @@
+#ifndef EMITTERMANIP_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define EMITTERMANIP_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include <string>
+
+namespace YAML
+{
+ enum EMITTER_MANIP {
+ // general manipulators
+ Auto,
+ TagByKind,
+ Newline,
+
+ // output character set
+ EmitNonAscii,
+ EscapeNonAscii,
+
+ // string manipulators
+ // Auto, // duplicate
+ SingleQuoted,
+ DoubleQuoted,
+ Literal,
+
+ // bool manipulators
+ YesNoBool, // yes, no
+ TrueFalseBool, // true, false
+ OnOffBool, // on, off
+ UpperCase, // TRUE, N
+ LowerCase, // f, yes
+ CamelCase, // No, Off
+ LongBool, // yes, On
+ ShortBool, // y, t
+
+ // int manipulators
+ Dec,
+ Hex,
+ Oct,
+
+ // document manipulators
+ BeginDoc,
+ EndDoc,
+
+ // sequence manipulators
+ BeginSeq,
+ EndSeq,
+ Flow,
+ Block,
+
+ // map manipulators
+ BeginMap,
+ EndMap,
+ Key,
+ Value,
+ // Flow, // duplicate
+ // Block, // duplicate
+ // Auto, // duplicate
+ LongKey
+ };
+
+ struct _Indent {
+ _Indent(int value_): value(value_) {}
+ int value;
+ };
+
+ inline _Indent Indent(int value) {
+ return _Indent(value);
+ }
+
+ struct _Alias {
+ _Alias(const std::string& content_): content(content_) {}
+ std::string content;
+ };
+
+ inline _Alias Alias(const std::string content) {
+ return _Alias(content);
+ }
+
+ struct _Anchor {
+ _Anchor(const std::string& content_): content(content_) {}
+ std::string content;
+ };
+
+ inline _Anchor Anchor(const std::string content) {
+ return _Anchor(content);
+ }
+
+ struct _Tag {
+ struct Type { enum value { Verbatim, PrimaryHandle, NamedHandle }; };
+
+ explicit _Tag(const std::string& prefix_, const std::string& content_, Type::value type_)
+ : prefix(prefix_), content(content_), type(type_)
+ {
+ }
+ std::string prefix;
+ std::string content;
+ Type::value type;
+ };
+
+ inline _Tag VerbatimTag(const std::string content) {
+ return _Tag("", content, _Tag::Type::Verbatim);
+ }
+
+ inline _Tag LocalTag(const std::string content) {
+ return _Tag("", content, _Tag::Type::PrimaryHandle);
+ }
+
+ inline _Tag LocalTag(const std::string& prefix, const std::string content) {
+ return _Tag(prefix, content, _Tag::Type::NamedHandle);
+ }
+
+ inline _Tag SecondaryTag(const std::string content) {
+ return _Tag("", content, _Tag::Type::NamedHandle);
+ }
+
+ struct _Comment {
+ _Comment(const std::string& content_): content(content_) {}
+ std::string content;
+ };
+
+ inline _Comment Comment(const std::string content) {
+ return _Comment(content);
+ }
+
+ struct _Precision {
+ _Precision(int floatPrecision_, int doublePrecision_): floatPrecision(floatPrecision_), doublePrecision(doublePrecision_) {}
+
+ int floatPrecision;
+ int doublePrecision;
+ };
+
+ inline _Precision FloatPrecision(int n) {
+ return _Precision(n, -1);
+ }
+
+ inline _Precision DoublePrecision(int n) {
+ return _Precision(-1, n);
+ }
+
+ inline _Precision Precision(int n) {
+ return _Precision(n, n);
+ }
+}
+
+#endif // EMITTERMANIP_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/include/yaml-cpp/eventhandler.h b/yaml-cpp/include/yaml-cpp/eventhandler.h
new file mode 100755
index 00000000..3173a1fb
--- /dev/null
+++ b/yaml-cpp/include/yaml-cpp/eventhandler.h
@@ -0,0 +1,36 @@
+#ifndef EVENTHANDLER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define EVENTHANDLER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+#include "yaml-cpp/anchor.h"
+#include <string>
+
+namespace YAML
+{
+ struct Mark;
+
+ class EventHandler
+ {
+ public:
+ virtual ~EventHandler() {}
+
+ virtual void OnDocumentStart(const Mark& mark) = 0;
+ virtual void OnDocumentEnd() = 0;
+
+ virtual void OnNull(const Mark& mark, anchor_t anchor) = 0;
+ virtual void OnAlias(const Mark& mark, anchor_t anchor) = 0;
+ virtual void OnScalar(const Mark& mark, const std::string& tag, anchor_t anchor, const std::string& value) = 0;
+
+ virtual void OnSequenceStart(const Mark& mark, const std::string& tag, anchor_t anchor) = 0;
+ virtual void OnSequenceEnd() = 0;
+
+ virtual void OnMapStart(const Mark& mark, const std::string& tag, anchor_t anchor) = 0;
+ virtual void OnMapEnd() = 0;
+ };
+}
+
+#endif // EVENTHANDLER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
diff --git a/yaml-cpp/include/yaml-cpp/exceptions.h b/yaml-cpp/include/yaml-cpp/exceptions.h
new file mode 100755
index 00000000..394d5868
--- /dev/null
+++ b/yaml-cpp/include/yaml-cpp/exceptions.h
@@ -0,0 +1,164 @@
+#ifndef EXCEPTIONS_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define EXCEPTIONS_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include "yaml-cpp/mark.h"
+#include "yaml-cpp/traits.h"
+#include <stdexcept>
+#include <string>
+#include <sstream>
+
+namespace YAML
+{
+ // error messages
+ namespace ErrorMsg
+ {
+ const char * const YAML_DIRECTIVE_ARGS = "YAML directives must have exactly one argument";
+ const char * const YAML_VERSION = "bad YAML version: ";
+ const char * const YAML_MAJOR_VERSION = "YAML major version too large";
+ const char * const REPEATED_YAML_DIRECTIVE= "repeated YAML directive";
+ const char * const TAG_DIRECTIVE_ARGS = "TAG directives must have exactly two arguments";
+ const char * const REPEATED_TAG_DIRECTIVE = "repeated TAG directive";
+ const char * const CHAR_IN_TAG_HANDLE = "illegal character found while scanning tag handle";
+ const char * const TAG_WITH_NO_SUFFIX = "tag handle with no suffix";
+ const char * const END_OF_VERBATIM_TAG = "end of verbatim tag not found";
+ const char * const END_OF_MAP = "end of map not found";
+ const char * const END_OF_MAP_FLOW = "end of map flow not found";
+ const char * const END_OF_SEQ = "end of sequence not found";
+ const char * const END_OF_SEQ_FLOW = "end of sequence flow not found";
+ const char * const MULTIPLE_TAGS = "cannot assign multiple tags to the same node";
+ const char * const MULTIPLE_ANCHORS = "cannot assign multiple anchors to the same node";
+ const char * const MULTIPLE_ALIASES = "cannot assign multiple aliases to the same node";
+ const char * const ALIAS_CONTENT = "aliases can't have any content, *including* tags";
+ const char * const INVALID_HEX = "bad character found while scanning hex number";
+ const char * const INVALID_UNICODE = "invalid unicode: ";
+ const char * const INVALID_ESCAPE = "unknown escape character: ";
+ const char * const UNKNOWN_TOKEN = "unknown token";
+ const char * const DOC_IN_SCALAR = "illegal document indicator in scalar";
+ const char * const EOF_IN_SCALAR = "illegal EOF in scalar";
+ const char * const CHAR_IN_SCALAR = "illegal character in scalar";
+ const char * const TAB_IN_INDENTATION = "illegal tab when looking for indentation";
+ const char * const FLOW_END = "illegal flow end";
+ const char * const BLOCK_ENTRY = "illegal block entry";
+ const char * const MAP_KEY = "illegal map key";
+ const char * const MAP_VALUE = "illegal map value";
+ const char * const ALIAS_NOT_FOUND = "alias not found after *";
+ const char * const ANCHOR_NOT_FOUND = "anchor not found after &";
+ const char * const CHAR_IN_ALIAS = "illegal character found while scanning alias";
+ const char * const CHAR_IN_ANCHOR = "illegal character found while scanning anchor";
+ const char * const ZERO_INDENT_IN_BLOCK = "cannot set zero indentation for a block scalar";
+ const char * const CHAR_IN_BLOCK = "unexpected character in block scalar";
+ const char * const AMBIGUOUS_ANCHOR = "cannot assign the same alias to multiple nodes";
+ const char * const UNKNOWN_ANCHOR = "the referenced anchor is not defined";
+
+ const char * const INVALID_SCALAR = "invalid scalar";
+ const char * const KEY_NOT_FOUND = "key not found";
+ const char * const BAD_DEREFERENCE = "bad dereference";
+
+ const char * const UNMATCHED_GROUP_TAG = "unmatched group tag";
+ const char * const UNEXPECTED_END_SEQ = "unexpected end sequence token";
+ const char * const UNEXPECTED_END_MAP = "unexpected end map token";
+ const char * const SINGLE_QUOTED_CHAR = "invalid character in single-quoted string";
+ const char * const INVALID_ANCHOR = "invalid anchor";
+ const char * const INVALID_ALIAS = "invalid alias";
+ const char * const INVALID_TAG = "invalid tag";
+ const char * const EXPECTED_KEY_TOKEN = "expected key token";
+ const char * const EXPECTED_VALUE_TOKEN = "expected value token";
+ const char * const UNEXPECTED_KEY_TOKEN = "unexpected key token";
+ const char * const UNEXPECTED_VALUE_TOKEN = "unexpected value token";
+
+ template <typename T>
+ inline const std::string KEY_NOT_FOUND_WITH_KEY(const T&, typename disable_if<is_numeric<T> >::type * = 0) {
+ return KEY_NOT_FOUND;
+ }
+
+ inline const std::string KEY_NOT_FOUND_WITH_KEY(const std::string& key) {
+ std::stringstream stream;
+ stream << KEY_NOT_FOUND << ": " << key;
+ return stream.str();
+ }
+
+ template <typename T>
+ inline const std::string KEY_NOT_FOUND_WITH_KEY(const T& key, typename enable_if<is_numeric<T> >::type * = 0) {
+ std::stringstream stream;
+ stream << KEY_NOT_FOUND << ": " << key;
+ return stream.str();
+ }
+ }
+
+ class Exception: public std::runtime_error {
+ public:
+ Exception(const Mark& mark_, const std::string& msg_)
+ : std::runtime_error(build_what(mark_, msg_)), mark(mark_), msg(msg_) {}
+ virtual ~Exception() throw() {}
+
+ Mark mark;
+ std::string msg;
+
+ private:
+ static const std::string build_what(const Mark& mark, const std::string& msg) {
+ std::stringstream output;
+ output << "yaml-cpp: error at line " << mark.line+1 << ", column " << mark.column+1 << ": " << msg;
+ return output.str();
+ }
+ };
+
+ class ParserException: public Exception {
+ public:
+ ParserException(const Mark& mark_, const std::string& msg_)
+ : Exception(mark_, msg_) {}
+ };
+
+ class RepresentationException: public Exception {
+ public:
+ RepresentationException(const Mark& mark_, const std::string& msg_)
+ : Exception(mark_, msg_) {}
+ };
+
+ // representation exceptions
+ class InvalidScalar: public RepresentationException {
+ public:
+ InvalidScalar(const Mark& mark_)
+ : RepresentationException(mark_, ErrorMsg::INVALID_SCALAR) {}
+ };
+
+ class KeyNotFound: public RepresentationException {
+ public:
+ template <typename T>
+ KeyNotFound(const Mark& mark_, const T& key_)
+ : RepresentationException(mark_, ErrorMsg::KEY_NOT_FOUND_WITH_KEY(key_)) {}
+ };
+
+ template <typename T>
+ class TypedKeyNotFound: public KeyNotFound {
+ public:
+ TypedKeyNotFound(const Mark& mark_, const T& key_)
+ : KeyNotFound(mark_, key_), key(key_) {}
+ virtual ~TypedKeyNotFound() throw() {}
+
+ T key;
+ };
+
+ template <typename T>
+ inline TypedKeyNotFound <T> MakeTypedKeyNotFound(const Mark& mark, const T& key) {
+ return TypedKeyNotFound <T> (mark, key);
+ }
+
+ class BadDereference: public RepresentationException {
+ public:
+ BadDereference()
+ : RepresentationException(Mark::null(), ErrorMsg::BAD_DEREFERENCE) {}
+ };
+
+ class EmitterException: public Exception {
+ public:
+ EmitterException(const std::string& msg_)
+ : Exception(Mark::null(), msg_) {}
+ };
+}
+
+#endif // EXCEPTIONS_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/include/yaml-cpp/iterator.h b/yaml-cpp/include/yaml-cpp/iterator.h
new file mode 100755
index 00000000..400ee340
--- /dev/null
+++ b/yaml-cpp/include/yaml-cpp/iterator.h
@@ -0,0 +1,40 @@
+#ifndef ITERATOR_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define ITERATOR_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+#include "yaml-cpp/dll.h"
+#include <memory>
+
+namespace YAML
+{
+ class Node;
+ struct IterPriv;
+
+ class YAML_CPP_API Iterator
+ {
+ public:
+ Iterator();
+ Iterator(std::auto_ptr<IterPriv> pData);
+ Iterator(const Iterator& rhs);
+ ~Iterator();
+
+ Iterator& operator = (const Iterator& rhs);
+ Iterator& operator ++ ();
+ Iterator operator ++ (int);
+ const Node& operator * () const;
+ const Node *operator -> () const;
+ const Node& first() const;
+ const Node& second() const;
+
+ friend YAML_CPP_API bool operator == (const Iterator& it, const Iterator& jt);
+ friend YAML_CPP_API bool operator != (const Iterator& it, const Iterator& jt);
+
+ private:
+ std::auto_ptr<IterPriv> m_pData;
+ };
+}
+
+#endif // ITERATOR_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/include/yaml-cpp/ltnode.h b/yaml-cpp/include/yaml-cpp/ltnode.h
new file mode 100755
index 00000000..30b4f950
--- /dev/null
+++ b/yaml-cpp/include/yaml-cpp/ltnode.h
@@ -0,0 +1,18 @@
+#ifndef LTNODE_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define LTNODE_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+namespace YAML
+{
+ class Node;
+
+ struct ltnode {
+ bool operator()(const Node *pNode1, const Node *pNode2) const;
+ };
+}
+
+#endif // LTNODE_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/include/yaml-cpp/mark.h b/yaml-cpp/include/yaml-cpp/mark.h
new file mode 100755
index 00000000..7c80fbcb
--- /dev/null
+++ b/yaml-cpp/include/yaml-cpp/mark.h
@@ -0,0 +1,26 @@
+#ifndef MARK_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define MARK_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include "yaml-cpp/dll.h"
+
+namespace YAML
+{
+ struct YAML_CPP_API Mark {
+ Mark(): pos(0), line(0), column(0) {}
+
+ static const Mark null() { return Mark(-1, -1, -1); }
+
+ int pos;
+ int line, column;
+
+ private:
+ Mark(int pos_, int line_, int column_): pos(pos_), line(line_), column(column_) {}
+ };
+}
+
+#endif // MARK_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/include/yaml-cpp/node.h b/yaml-cpp/include/yaml-cpp/node.h
new file mode 100755
index 00000000..e78190e0
--- /dev/null
+++ b/yaml-cpp/include/yaml-cpp/node.h
@@ -0,0 +1,135 @@
+#ifndef NODE_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define NODE_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include "yaml-cpp/dll.h"
+#include "yaml-cpp/exceptions.h"
+#include "yaml-cpp/mark.h"
+#include "yaml-cpp/noncopyable.h"
+#include "yaml-cpp/conversion.h"
+#include "yaml-cpp/iterator.h"
+#include "yaml-cpp/ltnode.h"
+#include <iostream>
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace YAML
+{
+ class AliasManager;
+ class Content;
+ class NodeOwnership;
+ class Scanner;
+ class Emitter;
+ class EventHandler;
+
+ struct NodeType { enum value { Null, Scalar, Sequence, Map }; };
+
+ class YAML_CPP_API Node: private noncopyable
+ {
+ public:
+ friend class NodeOwnership;
+ friend class NodeBuilder;
+
+ Node();
+ ~Node();
+
+ void Clear();
+ std::auto_ptr<Node> Clone() const;
+ void EmitEvents(EventHandler& eventHandler) const;
+ void EmitEvents(AliasManager& am, EventHandler& eventHandler) const;
+
+ NodeType::value Type() const { return m_type; }
+ bool IsAliased() const;
+
+ // file location of start of this node
+ const Mark GetMark() const { return m_mark; }
+
+ // accessors
+ Iterator begin() const;
+ Iterator end() const;
+ std::size_t size() const;
+
+ // extraction of scalars
+ bool GetScalar(std::string& s) const;
+
+ // we can specialize this for other values
+ template <typename T>
+ bool Read(T& value) const;
+
+ template <typename T>
+ const T to() const;
+
+ template <typename T>
+ friend YAML_CPP_API typename enable_if<is_scalar_convertible<T> >::type operator >> (const Node& node, T& value);
+
+ // retrieval for maps and sequences
+ template <typename T>
+ const Node *FindValue(const T& key) const;
+
+ template <typename T>
+ const Node& operator [] (const T& key) const;
+
+ // specific to maps
+ const Node *FindValue(const char *key) const;
+ const Node *FindValue(char *key) const;
+ const Node& operator [] (const char *key) const;
+ const Node& operator [] (char *key) const;
+
+ // for tags
+ const std::string& Tag() const { return m_tag; }
+
+ // emitting
+ friend YAML_CPP_API Emitter& operator << (Emitter& out, const Node& node);
+
+ // ordering
+ int Compare(const Node& rhs) const;
+ friend bool operator < (const Node& n1, const Node& n2);
+
+ private:
+ explicit Node(NodeOwnership& owner);
+ Node& CreateNode();
+
+ void Init(NodeType::value type, const Mark& mark, const std::string& tag);
+
+ void MarkAsAliased();
+ void SetScalarData(const std::string& data);
+ void Append(Node& node);
+ void Insert(Node& key, Node& value);
+
+ // helper for sequences
+ template <typename, bool> friend struct _FindFromNodeAtIndex;
+ const Node *FindAtIndex(std::size_t i) const;
+
+ // helper for maps
+ template <typename T>
+ const Node& GetValue(const T& key) const;
+
+ template <typename T>
+ const Node *FindValueForKey(const T& key) const;
+
+ private:
+ std::auto_ptr<NodeOwnership> m_pOwnership;
+
+ Mark m_mark;
+ std::string m_tag;
+
+ typedef std::vector<Node *> node_seq;
+ typedef std::map<Node *, Node *, ltnode> node_map;
+
+ NodeType::value m_type;
+ std::string m_scalarData;
+ node_seq m_seqData;
+ node_map m_mapData;
+ };
+}
+
+#include "yaml-cpp/nodeimpl.h"
+#include "yaml-cpp/nodereadimpl.h"
+
+#endif // NODE_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/include/yaml-cpp/nodeimpl.h b/yaml-cpp/include/yaml-cpp/nodeimpl.h
new file mode 100755
index 00000000..5ca7ddba
--- /dev/null
+++ b/yaml-cpp/include/yaml-cpp/nodeimpl.h
@@ -0,0 +1,85 @@
+#ifndef NODEIMPL_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define NODEIMPL_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include "yaml-cpp/nodeutil.h"
+#include <cassert>
+
+namespace YAML
+{
+ // implementation of templated things
+ template <typename T>
+ inline const T Node::to() const {
+ T value;
+ *this >> value;
+ return value;
+ }
+
+ template <typename T>
+ inline typename enable_if<is_scalar_convertible<T> >::type operator >> (const Node& node, T& value) {
+ if(!ConvertScalar(node, value))
+ throw InvalidScalar(node.m_mark);
+ }
+
+ template <typename T>
+ inline const Node *Node::FindValue(const T& key) const {
+ switch(m_type) {
+ case NodeType::Null:
+ case NodeType::Scalar:
+ throw BadDereference();
+ case NodeType::Sequence:
+ return FindFromNodeAtIndex(*this, key);
+ case NodeType::Map:
+ return FindValueForKey(key);
+ }
+ assert(false);
+ throw BadDereference();
+ }
+
+ template <typename T>
+ inline const Node *Node::FindValueForKey(const T& key) const {
+ for(Iterator it=begin();it!=end();++it) {
+ T t;
+ if(it.first().Read(t)) {
+ if(key == t)
+ return &it.second();
+ }
+ }
+
+ return 0;
+ }
+
+ template <typename T>
+ inline const Node& Node::GetValue(const T& key) const {
+ if(const Node *pValue = FindValue(key))
+ return *pValue;
+ throw MakeTypedKeyNotFound(m_mark, key);
+ }
+
+ template <typename T>
+ inline const Node& Node::operator [] (const T& key) const {
+ return GetValue(key);
+ }
+
+ inline const Node *Node::FindValue(const char *key) const {
+ return FindValue(std::string(key));
+ }
+
+ inline const Node *Node::FindValue(char *key) const {
+ return FindValue(std::string(key));
+ }
+
+ inline const Node& Node::operator [] (const char *key) const {
+ return GetValue(std::string(key));
+ }
+
+ inline const Node& Node::operator [] (char *key) const {
+ return GetValue(std::string(key));
+ }
+}
+
+#endif // NODEIMPL_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/include/yaml-cpp/nodereadimpl.h b/yaml-cpp/include/yaml-cpp/nodereadimpl.h
new file mode 100755
index 00000000..6838dc5a
--- /dev/null
+++ b/yaml-cpp/include/yaml-cpp/nodereadimpl.h
@@ -0,0 +1,86 @@
+#ifndef NODEREADIMPL_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define NODEREADIMPL_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+namespace YAML
+{
+ // implementation for Node::Read
+ // (the goal is to call ConvertScalar if we can, and fall back to operator >> if not)
+ // thanks to litb from stackoverflow.com
+ // http://stackoverflow.com/questions/1386183/how-to-call-a-templated-function-if-it-exists-and-something-else-otherwise/1386390#1386390
+
+ // Note: this doesn't work on gcc 3.2, but does on gcc 3.4 and above. I'm not sure about 3.3.
+
+#if __GNUC__ && (__GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ <= 3))
+ // trick doesn't work? Just fall back to ConvertScalar.
+ // This means that we can't use any user-defined types as keys in a map
+ template <typename T>
+ inline bool Node::Read(T& value) const {
+ return ConvertScalar(*this, value);
+ }
+#else
+ // usual case: the trick!
+ template<bool>
+ struct read_impl;
+
+ // ConvertScalar available
+ template<>
+ struct read_impl<true> {
+ template<typename T>
+ static bool read(const Node& node, T& value) {
+ return ConvertScalar(node, value);
+ }
+ };
+
+ // ConvertScalar not available
+ template<>
+ struct read_impl<false> {
+ template<typename T>
+ static bool read(const Node& node, T& value) {
+ try {
+ node >> value;
+ } catch(const Exception&) {
+ return false;
+ }
+ return true;
+ }
+ };
+
+ namespace fallback {
+ // sizeof > 1
+ struct flag { char c[2]; };
+ flag Convert(...);
+
+ int operator,(flag, flag);
+
+ template<typename T>
+ char operator,(flag, T const&);
+
+ char operator,(int, flag);
+ int operator,(char, flag);
+ }
+
+ template <typename T>
+ inline bool Node::Read(T& value) const {
+ using namespace fallback;
+
+ return read_impl<sizeof (fallback::flag(), Convert(std::string(), value), fallback::flag()) != 1>::read(*this, value);
+ }
+#endif // done with trick
+
+ // the main conversion function
+ template <typename T>
+ inline bool ConvertScalar(const Node& node, T& value) {
+ std::string scalar;
+ if(!node.GetScalar(scalar))
+ return false;
+
+ return Convert(scalar, value);
+ }
+}
+
+#endif // NODEREADIMPL_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/include/yaml-cpp/nodeutil.h b/yaml-cpp/include/yaml-cpp/nodeutil.h
new file mode 100755
index 00000000..d0c01d27
--- /dev/null
+++ b/yaml-cpp/include/yaml-cpp/nodeutil.h
@@ -0,0 +1,62 @@
+#ifndef NODEUTIL_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define NODEUTIL_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+namespace YAML
+{
+ template <typename T, typename U>
+ struct is_same_type {
+ enum { value = false };
+ };
+
+ template <typename T>
+ struct is_same_type<T, T> {
+ enum { value = true };
+ };
+
+ template <typename T, bool check>
+ struct is_index_type_with_check {
+ enum { value = false };
+ };
+
+ template <> struct is_index_type_with_check<std::size_t, false> { enum { value = true }; };
+
+#define MAKE_INDEX_TYPE(Type) \
+ template <> struct is_index_type_with_check<Type, is_same_type<Type, std::size_t>::value> { enum { value = true }; }
+
+ MAKE_INDEX_TYPE(int);
+ MAKE_INDEX_TYPE(unsigned);
+ MAKE_INDEX_TYPE(short);
+ MAKE_INDEX_TYPE(unsigned short);
+ MAKE_INDEX_TYPE(long);
+ MAKE_INDEX_TYPE(unsigned long);
+
+#undef MAKE_INDEX_TYPE
+
+ template <typename T>
+ struct is_index_type: public is_index_type_with_check<T, false> {};
+
+ // messing around with template stuff to get the right overload for operator [] for a sequence
+ template <typename T, bool b>
+ struct _FindFromNodeAtIndex {
+ const Node *pRet;
+ _FindFromNodeAtIndex(const Node&, const T&): pRet(0) {}
+ };
+
+ template <typename T>
+ struct _FindFromNodeAtIndex<T, true> {
+ const Node *pRet;
+ _FindFromNodeAtIndex(const Node& node, const T& key): pRet(node.FindAtIndex(static_cast<std::size_t>(key))) {}
+ };
+
+ template <typename T>
+ inline const Node *FindFromNodeAtIndex(const Node& node, const T& key) {
+ return _FindFromNodeAtIndex<T, is_index_type<T>::value>(node, key).pRet;
+ }
+}
+
+#endif // NODEUTIL_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/include/yaml-cpp/noncopyable.h b/yaml-cpp/include/yaml-cpp/noncopyable.h
new file mode 100755
index 00000000..8e61e433
--- /dev/null
+++ b/yaml-cpp/include/yaml-cpp/noncopyable.h
@@ -0,0 +1,25 @@
+#ifndef NONCOPYABLE_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define NONCOPYABLE_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+#include "yaml-cpp/dll.h"
+
+namespace YAML
+{
+ // this is basically boost::noncopyable
+ class YAML_CPP_API noncopyable
+ {
+ protected:
+ noncopyable() {}
+ ~noncopyable() {}
+
+ private:
+ noncopyable(const noncopyable&);
+ const noncopyable& operator = (const noncopyable&);
+ };
+}
+
+#endif // NONCOPYABLE_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/include/yaml-cpp/null.h b/yaml-cpp/include/yaml-cpp/null.h
new file mode 100755
index 00000000..711f18c3
--- /dev/null
+++ b/yaml-cpp/include/yaml-cpp/null.h
@@ -0,0 +1,25 @@
+#ifndef NULL_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define NULL_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include "yaml-cpp/dll.h"
+
+namespace YAML
+{
+ class Node;
+
+ struct YAML_CPP_API _Null {};
+ inline bool operator == (const _Null&, const _Null&) { return true; }
+ inline bool operator != (const _Null&, const _Null&) { return false; }
+
+ YAML_CPP_API bool IsNull(const Node& node); // old API only
+
+ extern YAML_CPP_API _Null Null;
+}
+
+#endif // NULL_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
diff --git a/yaml-cpp/include/yaml-cpp/ostream.h b/yaml-cpp/include/yaml-cpp/ostream.h
new file mode 100755
index 00000000..65839b1b
--- /dev/null
+++ b/yaml-cpp/include/yaml-cpp/ostream.h
@@ -0,0 +1,40 @@
+#ifndef OSTREAM_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define OSTREAM_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include <string>
+
+namespace YAML
+{
+ class ostream
+ {
+ public:
+ ostream();
+ ~ostream();
+
+ void reserve(unsigned size);
+ void put(char ch);
+ const char *str() const { return m_buffer; }
+
+ unsigned row() const { return m_row; }
+ unsigned col() const { return m_col; }
+ unsigned pos() const { return m_pos; }
+
+ private:
+ char *m_buffer;
+ unsigned m_pos;
+ unsigned m_size;
+
+ unsigned m_row, m_col;
+ };
+
+ ostream& operator << (ostream& out, const char *str);
+ ostream& operator << (ostream& out, const std::string& str);
+ ostream& operator << (ostream& out, char ch);
+}
+
+#endif // OSTREAM_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/include/yaml-cpp/parser.h b/yaml-cpp/include/yaml-cpp/parser.h
new file mode 100755
index 00000000..f71cdff4
--- /dev/null
+++ b/yaml-cpp/include/yaml-cpp/parser.h
@@ -0,0 +1,51 @@
+#ifndef PARSER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define PARSER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include "yaml-cpp/dll.h"
+#include "yaml-cpp/noncopyable.h"
+#include <ios>
+#include <memory>
+
+namespace YAML
+{
+ struct Directives;
+ struct Mark;
+ struct Token;
+ class EventHandler;
+ class Node;
+ class Scanner;
+
+ class YAML_CPP_API Parser: private noncopyable
+ {
+ public:
+ Parser();
+ Parser(std::istream& in);
+ ~Parser();
+
+ operator bool() const;
+
+ void Load(std::istream& in);
+ bool HandleNextDocument(EventHandler& eventHandler);
+
+ bool GetNextDocument(Node& document); // old API only
+
+ void PrintTokens(std::ostream& out);
+
+ private:
+ void ParseDirectives();
+ void HandleDirective(const Token& token);
+ void HandleYamlDirective(const Token& token);
+ void HandleTagDirective(const Token& token);
+
+ private:
+ std::auto_ptr<Scanner> m_pScanner;
+ std::auto_ptr<Directives> m_pDirectives;
+ };
+}
+
+#endif // PARSER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/include/yaml-cpp/stlemitter.h b/yaml-cpp/include/yaml-cpp/stlemitter.h
new file mode 100755
index 00000000..f8ff20ea
--- /dev/null
+++ b/yaml-cpp/include/yaml-cpp/stlemitter.h
@@ -0,0 +1,51 @@
+#ifndef STLEMITTER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define STLEMITTER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include <vector>
+#include <list>
+#include <set>
+#include <map>
+
+namespace YAML
+{
+ template<typename Seq>
+ inline Emitter& EmitSeq(Emitter& emitter, const Seq& seq) {
+ emitter << BeginSeq;
+ for(typename Seq::const_iterator it=seq.begin();it!=seq.end();++it)
+ emitter << *it;
+ emitter << EndSeq;
+ return emitter;
+ }
+
+ template<typename T>
+ inline Emitter& operator << (Emitter& emitter, const std::vector<T>& v) {
+ return EmitSeq(emitter, v);
+ }
+
+ template<typename T>
+ inline Emitter& operator << (Emitter& emitter, const std::list<T>& v) {
+ return EmitSeq(emitter, v);
+ }
+
+ template<typename T>
+ inline Emitter& operator << (Emitter& emitter, const std::set<T>& v) {
+ return EmitSeq(emitter, v);
+ }
+
+ template <typename K, typename V>
+ inline Emitter& operator << (Emitter& emitter, const std::map<K, V>& m) {
+ typedef typename std::map <K, V> map;
+ emitter << BeginMap;
+ for(typename map::const_iterator it=m.begin();it!=m.end();++it)
+ emitter << Key << it->first << Value << it->second;
+ emitter << EndMap;
+ return emitter;
+ }
+}
+
+#endif // STLEMITTER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/include/yaml-cpp/stlnode.h b/yaml-cpp/include/yaml-cpp/stlnode.h
new file mode 100755
index 00000000..40d4ae79
--- /dev/null
+++ b/yaml-cpp/include/yaml-cpp/stlnode.h
@@ -0,0 +1,38 @@
+#ifndef STLNODE_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define STLNODE_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include <vector>
+#include <map>
+
+namespace YAML
+{
+ template <typename T>
+ void operator >> (const Node& node, std::vector<T>& v)
+ {
+ v.clear();
+ v.resize(node.size());
+ for(unsigned i=0;i<node.size();++i)
+ node[i] >> v[i];
+ }
+
+
+ template <typename K, typename V>
+ void operator >> (const Node& node, std::map<K, V>& m)
+ {
+ m.clear();
+ for(Iterator it=node.begin();it!=node.end();++it) {
+ K k;
+ V v;
+ it.first() >> k;
+ it.second() >> v;
+ m[k] = v;
+ }
+ }
+}
+
+#endif // STLNODE_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/include/yaml-cpp/traits.h b/yaml-cpp/include/yaml-cpp/traits.h
new file mode 100755
index 00000000..09eead44
--- /dev/null
+++ b/yaml-cpp/include/yaml-cpp/traits.h
@@ -0,0 +1,57 @@
+#ifndef TRAITS_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define TRAITS_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+namespace YAML
+{
+ template <typename>
+ struct is_numeric { enum { value = false }; };
+
+ template <> struct is_numeric <char> { enum { value = true }; };
+ template <> struct is_numeric <unsigned char> { enum { value = true }; };
+ template <> struct is_numeric <int> { enum { value = true }; };
+ template <> struct is_numeric <unsigned int> { enum { value = true }; };
+ template <> struct is_numeric <long int> { enum { value = true }; };
+ template <> struct is_numeric <unsigned long int> { enum { value = true }; };
+ template <> struct is_numeric <short int> { enum { value = true }; };
+ template <> struct is_numeric <unsigned short int> { enum { value = true }; };
+#if defined(_MSC_VER) && (_MSC_VER < 1310)
+ template <> struct is_numeric <__int64> { enum { value = true }; };
+ template <> struct is_numeric <unsigned __int64> { enum { value = true }; };
+#else
+ template <> struct is_numeric <long long> { enum { value = true }; };
+ template <> struct is_numeric <unsigned long long> { enum { value = true }; };
+#endif
+ template <> struct is_numeric <float> { enum { value = true }; };
+ template <> struct is_numeric <double> { enum { value = true }; };
+ template <> struct is_numeric <long double> { enum { value = true }; };
+
+ template <bool, class T = void>
+ struct enable_if_c {
+ typedef T type;
+ };
+
+ template <class T>
+ struct enable_if_c<false, T> {};
+
+ template <class Cond, class T = void>
+ struct enable_if : public enable_if_c<Cond::value, T> {};
+
+ template <bool, class T = void>
+ struct disable_if_c {
+ typedef T type;
+ };
+
+ template <class T>
+ struct disable_if_c<true, T> {};
+
+ template <class Cond, class T = void>
+ struct disable_if : public disable_if_c<Cond::value, T> {};
+}
+
+#endif // TRAITS_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
diff --git a/yaml-cpp/include/yaml-cpp/yaml.h b/yaml-cpp/include/yaml-cpp/yaml.h
new file mode 100755
index 00000000..29595553
--- /dev/null
+++ b/yaml-cpp/include/yaml-cpp/yaml.h
@@ -0,0 +1,23 @@
+#ifndef YAML_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define YAML_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+#if defined(_MSC_VER)
+#pragma warning(disable:4786)
+#pragma warning(disable:4146)
+#pragma warning(disable:4244)
+#endif
+
+#include "yaml-cpp/parser.h"
+#include "yaml-cpp/emitter.h"
+#include "yaml-cpp/stlemitter.h"
+#include "yaml-cpp/exceptions.h"
+
+#include "yaml-cpp/node.h"
+#include "yaml-cpp/stlnode.h"
+#include "yaml-cpp/iterator.h"
+
+#endif // YAML_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/install.txt b/yaml-cpp/install.txt
new file mode 100755
index 00000000..93923624
--- /dev/null
+++ b/yaml-cpp/install.txt
@@ -0,0 +1,24 @@
+*** With CMake ***
+
+yaml-cpp uses CMake to support cross-platform building. In a UNIX-like system, the basic steps to build are:
+
+1. Download and install CMake (if you don't have root privileges, just install to a local directory, like ~/bin)
+
+2. From the source directory, run:
+
+mkdir build
+cd build
+cmake ..
+
+and then the usual
+
+make
+make install
+
+3. To clean up, just remove the 'build' directory.
+
+*** Without CMake ***
+
+If you don't want to use CMake, just add all .cpp files to a makefile. yaml-cpp does not need any special build settings, so no 'configure' file is necessary.
+
+(Note: this is pretty tedious. It's sooo much easier to use CMake.)
diff --git a/yaml-cpp/license.txt b/yaml-cpp/license.txt
new file mode 100755
index 00000000..5bd9e1a1
--- /dev/null
+++ b/yaml-cpp/license.txt
@@ -0,0 +1,19 @@
+Copyright (c) 2008 Jesse Beder.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/yaml-cpp/src/aliasmanager.cpp b/yaml-cpp/src/aliasmanager.cpp
new file mode 100755
index 00000000..ed4d3b5a
--- /dev/null
+++ b/yaml-cpp/src/aliasmanager.cpp
@@ -0,0 +1,29 @@
+#include "yaml-cpp/aliasmanager.h"
+#include "yaml-cpp/node.h"
+#include <cassert>
+#include <sstream>
+
+namespace YAML
+{
+ AliasManager::AliasManager(): m_curAnchor(0)
+ {
+ }
+
+ void AliasManager::RegisterReference(const Node& node)
+ {
+ m_anchorByIdentity.insert(std::make_pair(&node, _CreateNewAnchor()));
+ }
+
+ anchor_t AliasManager::LookupAnchor(const Node& node) const
+ {
+ AnchorByIdentity::const_iterator it = m_anchorByIdentity.find(&node);
+ if(it == m_anchorByIdentity.end())
+ return 0;
+ return it->second;
+ }
+
+ anchor_t AliasManager::_CreateNewAnchor()
+ {
+ return ++m_curAnchor;
+ }
+}
diff --git a/yaml-cpp/src/binary.cpp b/yaml-cpp/src/binary.cpp
new file mode 100755
index 00000000..589eb089
--- /dev/null
+++ b/yaml-cpp/src/binary.cpp
@@ -0,0 +1,102 @@
+#include "yaml-cpp/binary.h"
+#include "yaml-cpp/node.h"
+
+namespace YAML
+{
+ static const char encoding[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+
+ std::string EncodeBase64(const unsigned char *data, std::size_t size)
+ {
+ const char PAD = '=';
+
+ std::string ret;
+ ret.resize(4 * size / 3 + 3);
+ char *out = &ret[0];
+
+ std::size_t chunks = size / 3;
+ std::size_t remainder = size % 3;
+
+ for(std::size_t i=0;i<chunks;i++, data += 3) {
+ *out++ = encoding[data[0] >> 2];
+ *out++ = encoding[((data[0] & 0x3) << 4) | (data[1] >> 4)];
+ *out++ = encoding[((data[1] & 0xf) << 2) | (data[2] >> 6)];
+ *out++ = encoding[data[2] & 0x3f];
+ }
+
+ switch(remainder) {
+ case 0:
+ break;
+ case 1:
+ *out++ = encoding[data[0] >> 2];
+ *out++ = encoding[((data[0] & 0x3) << 4)];
+ *out++ = PAD;
+ *out++ = PAD;
+ break;
+ case 2:
+ *out++ = encoding[data[0] >> 2];
+ *out++ = encoding[((data[0] & 0x3) << 4) | (data[1] >> 4)];
+ *out++ = encoding[((data[1] & 0xf) << 2)];
+ *out++ = PAD;
+ break;
+ }
+
+ ret.resize(out - &ret[0]);
+ return ret;
+ }
+
+ static const unsigned char decoding[] = {
+ 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255,255,255,255, 62,255,255,255, 63,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,255,255,255, 0,255,255,
+ 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,255,255,255,255,255,
+ 255, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
+ };
+
+ std::vector<unsigned char> DecodeBase64(const std::string& input)
+ {
+ typedef std::vector<unsigned char> ret_type;
+ if(input.empty())
+ return ret_type();
+
+ ret_type ret(3 * input.size() / 4 + 1);
+ unsigned char *out = &ret[0];
+
+ unsigned value = 0;
+ for(std::size_t i=0;i<input.size();i++) {
+ unsigned char d = decoding[static_cast<unsigned>(input[i])];
+ if(d == 255)
+ return ret_type();
+
+ value = (value << 6) | d;
+ if(i % 4 == 3) {
+ *out++ = value >> 16;
+ if(i > 0 && input[i - 1] != '=')
+ *out++ = value >> 8;
+ if(input[i] != '=')
+ *out++ = value;
+ }
+ }
+
+ ret.resize(out - &ret[0]);
+ return ret;
+ }
+
+ void operator >> (const Node& node, Binary& binary)
+ {
+ std::string scalar;
+ node.GetScalar(scalar);
+ std::vector<unsigned char> data = DecodeBase64(scalar);
+ binary.swap(data);
+ }
+}
diff --git a/yaml-cpp/src/collectionstack.h b/yaml-cpp/src/collectionstack.h
new file mode 100755
index 00000000..4a986bc9
--- /dev/null
+++ b/yaml-cpp/src/collectionstack.h
@@ -0,0 +1,35 @@
+#ifndef COLLECTIONSTACK_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define COLLECTIONSTACK_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include <stack>
+#include <cassert>
+
+namespace YAML
+{
+ struct CollectionType {
+ enum value { None, BlockMap, BlockSeq, FlowMap, FlowSeq, CompactMap };
+ };
+
+ class CollectionStack
+ {
+ public:
+ CollectionType::value GetCurCollectionType() const {
+ if(collectionStack.empty())
+ return CollectionType::None;
+ return collectionStack.top();
+ }
+
+ void PushCollectionType(CollectionType::value type) { collectionStack.push(type); }
+ void PopCollectionType(CollectionType::value type) { assert(type == GetCurCollectionType()); collectionStack.pop(); }
+
+ private:
+ std::stack<CollectionType::value> collectionStack;
+ };
+}
+
+#endif // COLLECTIONSTACK_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/src/contrib/graphbuilder.cpp b/yaml-cpp/src/contrib/graphbuilder.cpp
new file mode 100755
index 00000000..ab5159cc
--- /dev/null
+++ b/yaml-cpp/src/contrib/graphbuilder.cpp
@@ -0,0 +1,16 @@
+#include "yaml-cpp/parser.h"
+#include "yaml-cpp/contrib/graphbuilder.h"
+#include "graphbuilderadapter.h"
+
+namespace YAML
+{
+ void *BuildGraphOfNextDocument(Parser& parser, GraphBuilderInterface& graphBuilder)
+ {
+ GraphBuilderAdapter eventHandler(graphBuilder);
+ if (parser.HandleNextDocument(eventHandler)) {
+ return eventHandler.RootNode();
+ } else {
+ return NULL;
+ }
+ }
+}
diff --git a/yaml-cpp/src/contrib/graphbuilderadapter.cpp b/yaml-cpp/src/contrib/graphbuilderadapter.cpp
new file mode 100755
index 00000000..557e97c8
--- /dev/null
+++ b/yaml-cpp/src/contrib/graphbuilderadapter.cpp
@@ -0,0 +1,96 @@
+#include "graphbuilderadapter.h"
+
+namespace YAML
+{
+ int GraphBuilderAdapter::ContainerFrame::sequenceMarker;
+
+ void GraphBuilderAdapter::OnNull(const Mark& mark, anchor_t anchor)
+ {
+ void *pParent = GetCurrentParent();
+ void *pNode = m_builder.NewNull(mark, pParent);
+ RegisterAnchor(anchor, pNode);
+
+ DispositionNode(pNode);
+ }
+
+ void GraphBuilderAdapter::OnAlias(const Mark& mark, anchor_t anchor)
+ {
+ void *pReffedNode = m_anchors.Get(anchor);
+ DispositionNode(m_builder.AnchorReference(mark, pReffedNode));
+ }
+
+ void GraphBuilderAdapter::OnScalar(const Mark& mark, const std::string& tag, anchor_t anchor, const std::string& value)
+ {
+ void *pParent = GetCurrentParent();
+ void *pNode = m_builder.NewScalar(mark, tag, pParent, value);
+ RegisterAnchor(anchor, pNode);
+
+ DispositionNode(pNode);
+ }
+
+ void GraphBuilderAdapter::OnSequenceStart(const Mark& mark, const std::string& tag, anchor_t anchor)
+ {
+ void *pNode = m_builder.NewSequence(mark, tag, GetCurrentParent());
+ m_containers.push(ContainerFrame(pNode));
+ RegisterAnchor(anchor, pNode);
+ }
+
+ void GraphBuilderAdapter::OnSequenceEnd()
+ {
+ void *pSequence = m_containers.top().pContainer;
+ m_containers.pop();
+
+ DispositionNode(pSequence);
+ }
+
+ void GraphBuilderAdapter::OnMapStart(const Mark& mark, const std::string& tag, anchor_t anchor)
+ {
+ void *pNode = m_builder.NewMap(mark, tag, GetCurrentParent());
+ m_containers.push(ContainerFrame(pNode, m_pKeyNode));
+ m_pKeyNode = NULL;
+ RegisterAnchor(anchor, pNode);
+ }
+
+ void GraphBuilderAdapter::OnMapEnd()
+ {
+ void *pMap = m_containers.top().pContainer;
+ m_pKeyNode = m_containers.top().pPrevKeyNode;
+ m_containers.pop();
+ DispositionNode(pMap);
+ }
+
+ void *GraphBuilderAdapter::GetCurrentParent() const
+ {
+ if (m_containers.empty()) {
+ return NULL;
+ }
+ return m_containers.top().pContainer;
+ }
+
+ void GraphBuilderAdapter::RegisterAnchor(anchor_t anchor, void *pNode)
+ {
+ if (anchor) {
+ m_anchors.Register(anchor, pNode);
+ }
+ }
+
+ void GraphBuilderAdapter::DispositionNode(void *pNode)
+ {
+ if (m_containers.empty()) {
+ m_pRootNode = pNode;
+ return;
+ }
+
+ void *pContainer = m_containers.top().pContainer;
+ if (m_containers.top().isMap()) {
+ if (m_pKeyNode) {
+ m_builder.AssignInMap(pContainer, m_pKeyNode, pNode);
+ m_pKeyNode = NULL;
+ } else {
+ m_pKeyNode = pNode;
+ }
+ } else {
+ m_builder.AppendToSequence(pContainer, pNode);
+ }
+ }
+}
diff --git a/yaml-cpp/src/contrib/graphbuilderadapter.h b/yaml-cpp/src/contrib/graphbuilderadapter.h
new file mode 100755
index 00000000..3ef8ab6c
--- /dev/null
+++ b/yaml-cpp/src/contrib/graphbuilderadapter.h
@@ -0,0 +1,73 @@
+#ifndef GRAPHBUILDERADAPTER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define GRAPHBUILDERADAPTER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+#include <cstdlib>
+#include <map>
+#include <stack>
+#include "yaml-cpp/eventhandler.h"
+#include "yaml-cpp/contrib/anchordict.h"
+#include "yaml-cpp/contrib/graphbuilder.h"
+
+namespace YAML
+{
+ class GraphBuilderAdapter : public EventHandler
+ {
+ public:
+ GraphBuilderAdapter(GraphBuilderInterface& builder)
+ : m_builder(builder), m_pRootNode(NULL), m_pKeyNode(NULL)
+ {
+ }
+
+ virtual void OnDocumentStart(const Mark& mark) {(void)mark;}
+ virtual void OnDocumentEnd() {}
+
+ virtual void OnNull(const Mark& mark, anchor_t anchor);
+ virtual void OnAlias(const Mark& mark, anchor_t anchor);
+ virtual void OnScalar(const Mark& mark, const std::string& tag, anchor_t anchor, const std::string& value);
+
+ virtual void OnSequenceStart(const Mark& mark, const std::string& tag, anchor_t anchor);
+ virtual void OnSequenceEnd();
+
+ virtual void OnMapStart(const Mark& mark, const std::string& tag, anchor_t anchor);
+ virtual void OnMapEnd();
+
+ void *RootNode() const {return m_pRootNode;}
+
+ private:
+ struct ContainerFrame
+ {
+ ContainerFrame(void *pSequence)
+ : pContainer(pSequence), pPrevKeyNode(&sequenceMarker)
+ {}
+ ContainerFrame(void *pMap, void* pPrevKeyNode)
+ : pContainer(pMap), pPrevKeyNode(pPrevKeyNode)
+ {}
+
+ void *pContainer;
+ void *pPrevKeyNode;
+
+ bool isMap() const {return pPrevKeyNode != &sequenceMarker;}
+
+ private:
+ static int sequenceMarker;
+ };
+ typedef std::stack<ContainerFrame> ContainerStack;
+ typedef AnchorDict<void*> AnchorMap;
+
+ GraphBuilderInterface& m_builder;
+ ContainerStack m_containers;
+ AnchorMap m_anchors;
+ void *m_pRootNode;
+ void *m_pKeyNode;
+
+ void *GetCurrentParent() const;
+ void RegisterAnchor(anchor_t anchor, void *pNode);
+ void DispositionNode(void *pNode);
+ };
+}
+
+#endif // GRAPHBUILDERADAPTER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/src/conversion.cpp b/yaml-cpp/src/conversion.cpp
new file mode 100755
index 00000000..f81e1a0b
--- /dev/null
+++ b/yaml-cpp/src/conversion.cpp
@@ -0,0 +1,89 @@
+#include "yaml-cpp/conversion.h"
+#include <algorithm>
+
+////////////////////////////////////////////////////////////////
+// Specializations for converting a string to specific types
+
+namespace
+{
+ // we're not gonna mess with the mess that is all the isupper/etc. functions
+ bool IsLower(char ch) { return 'a' <= ch && ch <= 'z'; }
+ bool IsUpper(char ch) { return 'A' <= ch && ch <= 'Z'; }
+ char ToLower(char ch) { return IsUpper(ch) ? ch + 'a' - 'A' : ch; }
+
+ std::string tolower(const std::string& str)
+ {
+ std::string s(str);
+ std::transform(s.begin(), s.end(), s.begin(), ToLower);
+ return s;
+ }
+
+ template <typename T>
+ bool IsEntirely(const std::string& str, T func)
+ {
+ for(std::size_t i=0;i<str.size();i++)
+ if(!func(str[i]))
+ return false;
+
+ return true;
+ }
+
+ // IsFlexibleCase
+ // . Returns true if 'str' is:
+ // . UPPERCASE
+ // . lowercase
+ // . Capitalized
+ bool IsFlexibleCase(const std::string& str)
+ {
+ if(str.empty())
+ return true;
+
+ if(IsEntirely(str, IsLower))
+ return true;
+
+ bool firstcaps = IsUpper(str[0]);
+ std::string rest = str.substr(1);
+ return firstcaps && (IsEntirely(rest, IsLower) || IsEntirely(rest, IsUpper));
+ }
+}
+
+namespace YAML
+{
+ bool Convert(const std::string& input, bool& b)
+ {
+ // we can't use iostream bool extraction operators as they don't
+ // recognize all possible values in the table below (taken from
+ // http://yaml.org/type/bool.html)
+ static const struct {
+ std::string truename, falsename;
+ } names[] = {
+ { "y", "n" },
+ { "yes", "no" },
+ { "true", "false" },
+ { "on", "off" },
+ };
+
+ if(!IsFlexibleCase(input))
+ return false;
+
+ for(unsigned i=0;i<sizeof(names)/sizeof(names[0]);i++) {
+ if(names[i].truename == tolower(input)) {
+ b = true;
+ return true;
+ }
+
+ if(names[i].falsename == tolower(input)) {
+ b = false;
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ bool Convert(const std::string& input, _Null& /*output*/)
+ {
+ return input.empty() || input == "~" || input == "null" || input == "Null" || input == "NULL";
+ }
+}
+
diff --git a/yaml-cpp/src/directives.cpp b/yaml-cpp/src/directives.cpp
new file mode 100755
index 00000000..faf1483b
--- /dev/null
+++ b/yaml-cpp/src/directives.cpp
@@ -0,0 +1,24 @@
+#include "directives.h"
+
+namespace YAML
+{
+ Directives::Directives()
+ {
+ // version
+ version.isDefault = true;
+ version.major = 1;
+ version.minor = 2;
+ }
+
+ const std::string Directives::TranslateTagHandle(const std::string& handle) const
+ {
+ std::map <std::string, std::string>::const_iterator it = tags.find(handle);
+ if(it == tags.end()) {
+ if(handle == "!!")
+ return "tag:yaml.org,2002:";
+ return handle;
+ }
+
+ return it->second;
+ }
+}
diff --git a/yaml-cpp/src/directives.h b/yaml-cpp/src/directives.h
new file mode 100755
index 00000000..a3308f72
--- /dev/null
+++ b/yaml-cpp/src/directives.h
@@ -0,0 +1,29 @@
+#ifndef DIRECTIVES_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define DIRECTIVES_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include <string>
+#include <map>
+
+namespace YAML
+{
+ struct Version {
+ bool isDefault;
+ int major, minor;
+ };
+
+ struct Directives {
+ Directives();
+
+ const std::string TranslateTagHandle(const std::string& handle) const;
+
+ Version version;
+ std::map<std::string, std::string> tags;
+ };
+}
+
+#endif // DIRECTIVES_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/src/emitfromevents.cpp b/yaml-cpp/src/emitfromevents.cpp
new file mode 100755
index 00000000..49fc10b2
--- /dev/null
+++ b/yaml-cpp/src/emitfromevents.cpp
@@ -0,0 +1,105 @@
+#include "yaml-cpp/emitfromevents.h"
+#include "yaml-cpp/emitter.h"
+#include "yaml-cpp/null.h"
+#include <cassert>
+#include <sstream>
+
+namespace {
+ std::string ToString(YAML::anchor_t anchor) {
+ std::stringstream stream;
+ stream << anchor;
+ return stream.str();
+ }
+}
+
+namespace YAML
+{
+ EmitFromEvents::EmitFromEvents(Emitter& emitter): m_emitter(emitter)
+ {
+ }
+
+ void EmitFromEvents::OnDocumentStart(const Mark&)
+ {
+ }
+
+ void EmitFromEvents::OnDocumentEnd()
+ {
+ }
+
+ void EmitFromEvents::OnNull(const Mark&, anchor_t anchor)
+ {
+ BeginNode();
+ EmitProps("", anchor);
+ m_emitter << Null;
+ }
+
+ void EmitFromEvents::OnAlias(const Mark&, anchor_t anchor)
+ {
+ BeginNode();
+ m_emitter << Alias(ToString(anchor));
+ }
+
+ void EmitFromEvents::OnScalar(const Mark&, const std::string& tag, anchor_t anchor, const std::string& value)
+ {
+ BeginNode();
+ EmitProps(tag, anchor);
+ m_emitter << value;
+ }
+
+ void EmitFromEvents::OnSequenceStart(const Mark&, const std::string& tag, anchor_t anchor)
+ {
+ BeginNode();
+ EmitProps(tag, anchor);
+ m_emitter << BeginSeq;
+ m_stateStack.push(State::WaitingForSequenceEntry);
+ }
+
+ void EmitFromEvents::OnSequenceEnd()
+ {
+ m_emitter << EndSeq;
+ assert(m_stateStack.top() == State::WaitingForSequenceEntry);
+ m_stateStack.pop();
+ }
+
+ void EmitFromEvents::OnMapStart(const Mark&, const std::string& tag, anchor_t anchor)
+ {
+ BeginNode();
+ EmitProps(tag, anchor);
+ m_emitter << BeginMap;
+ m_stateStack.push(State::WaitingForKey);
+ }
+
+ void EmitFromEvents::OnMapEnd()
+ {
+ m_emitter << EndMap;
+ assert(m_stateStack.top() == State::WaitingForKey);
+ m_stateStack.pop();
+ }
+
+ void EmitFromEvents::BeginNode()
+ {
+ if(m_stateStack.empty())
+ return;
+
+ switch(m_stateStack.top()) {
+ case State::WaitingForKey:
+ m_emitter << Key;
+ m_stateStack.top() = State::WaitingForValue;
+ break;
+ case State::WaitingForValue:
+ m_emitter << Value;
+ m_stateStack.top() = State::WaitingForKey;
+ break;
+ default:
+ break;
+ }
+ }
+
+ void EmitFromEvents::EmitProps(const std::string& tag, anchor_t anchor)
+ {
+ if(!tag.empty() && tag != "?")
+ m_emitter << VerbatimTag(tag);
+ if(anchor)
+ m_emitter << Anchor(ToString(anchor));
+ }
+}
diff --git a/yaml-cpp/src/emitter.cpp b/yaml-cpp/src/emitter.cpp
new file mode 100755
index 00000000..91f48da7
--- /dev/null
+++ b/yaml-cpp/src/emitter.cpp
@@ -0,0 +1,882 @@
+#include "yaml-cpp/emitter.h"
+#include "emitterstate.h"
+#include "emitterutils.h"
+#include "indentation.h"
+#include "yaml-cpp/exceptions.h"
+#include <sstream>
+
+namespace YAML
+{
+ Emitter::Emitter(): m_pState(new EmitterState)
+ {
+ }
+
+ Emitter::~Emitter()
+ {
+ }
+
+ const char *Emitter::c_str() const
+ {
+ return m_stream.str();
+ }
+
+ unsigned Emitter::size() const
+ {
+ return m_stream.pos();
+ }
+
+ // state checking
+ bool Emitter::good() const
+ {
+ return m_pState->good();
+ }
+
+ const std::string Emitter::GetLastError() const
+ {
+ return m_pState->GetLastError();
+ }
+
+ // global setters
+ bool Emitter::SetOutputCharset(EMITTER_MANIP value)
+ {
+ return m_pState->SetOutputCharset(value, GLOBAL);
+ }
+
+ bool Emitter::SetStringFormat(EMITTER_MANIP value)
+ {
+ return m_pState->SetStringFormat(value, GLOBAL);
+ }
+
+ bool Emitter::SetBoolFormat(EMITTER_MANIP value)
+ {
+ bool ok = false;
+ if(m_pState->SetBoolFormat(value, GLOBAL))
+ ok = true;
+ if(m_pState->SetBoolCaseFormat(value, GLOBAL))
+ ok = true;
+ if(m_pState->SetBoolLengthFormat(value, GLOBAL))
+ ok = true;
+ return ok;
+ }
+
+ bool Emitter::SetIntBase(EMITTER_MANIP value)
+ {
+ return m_pState->SetIntFormat(value, GLOBAL);
+ }
+
+ bool Emitter::SetSeqFormat(EMITTER_MANIP value)
+ {
+ return m_pState->SetFlowType(GT_SEQ, value, GLOBAL);
+ }
+
+ bool Emitter::SetMapFormat(EMITTER_MANIP value)
+ {
+ bool ok = false;
+ if(m_pState->SetFlowType(GT_MAP, value, GLOBAL))
+ ok = true;
+ if(m_pState->SetMapKeyFormat(value, GLOBAL))
+ ok = true;
+ return ok;
+ }
+
+ bool Emitter::SetIndent(unsigned n)
+ {
+ return m_pState->SetIndent(n, GLOBAL);
+ }
+
+ bool Emitter::SetPreCommentIndent(unsigned n)
+ {
+ return m_pState->SetPreCommentIndent(n, GLOBAL);
+ }
+
+ bool Emitter::SetPostCommentIndent(unsigned n)
+ {
+ return m_pState->SetPostCommentIndent(n, GLOBAL);
+ }
+
+ bool Emitter::SetFloatPrecision(unsigned n)
+ {
+ return m_pState->SetFloatPrecision(n, GLOBAL);
+ }
+
+ bool Emitter::SetDoublePrecision(unsigned n)
+ {
+ return m_pState->SetDoublePrecision(n, GLOBAL);
+ }
+
+ // SetLocalValue
+ // . Either start/end a group, or set a modifier locally
+ Emitter& Emitter::SetLocalValue(EMITTER_MANIP value)
+ {
+ if(!good())
+ return *this;
+
+ switch(value) {
+ case BeginDoc:
+ EmitBeginDoc();
+ break;
+ case EndDoc:
+ EmitEndDoc();
+ break;
+ case BeginSeq:
+ EmitBeginSeq();
+ break;
+ case EndSeq:
+ EmitEndSeq();
+ break;
+ case BeginMap:
+ EmitBeginMap();
+ break;
+ case EndMap:
+ EmitEndMap();
+ break;
+ case Key:
+ EmitKey();
+ break;
+ case Value:
+ EmitValue();
+ break;
+ case TagByKind:
+ EmitKindTag();
+ break;
+ case Newline:
+ EmitNewline();
+ break;
+ default:
+ m_pState->SetLocalValue(value);
+ break;
+ }
+ return *this;
+ }
+
+ Emitter& Emitter::SetLocalIndent(const _Indent& indent)
+ {
+ m_pState->SetIndent(indent.value, LOCAL);
+ return *this;
+ }
+
+ Emitter& Emitter::SetLocalPrecision(const _Precision& precision)
+ {
+ if(precision.floatPrecision >= 0)
+ m_pState->SetFloatPrecision(precision.floatPrecision, LOCAL);
+ if(precision.doublePrecision >= 0)
+ m_pState->SetDoublePrecision(precision.doublePrecision, LOCAL);
+ return *this;
+ }
+
+ // GotoNextPreAtomicState
+ // . Runs the state machine, emitting if necessary, and returns 'true' if done (i.e., ready to emit an atom)
+ bool Emitter::GotoNextPreAtomicState()
+ {
+ if(!good())
+ return true;
+
+ unsigned curIndent = m_pState->GetCurIndent();
+
+ EMITTER_STATE curState = m_pState->GetCurState();
+ switch(curState) {
+ // document-level
+ case ES_WAITING_FOR_DOC:
+ m_pState->SwitchState(ES_WRITING_DOC);
+ return true;
+ case ES_WRITING_DOC:
+ return true;
+ case ES_DONE_WITH_DOC:
+ EmitBeginDoc();
+ return false;
+
+ // block sequence
+ case ES_WAITING_FOR_BLOCK_SEQ_ENTRY:
+ m_stream << IndentTo(curIndent) << "-";
+ m_pState->RequireSoftSeparation();
+ m_pState->SwitchState(ES_WRITING_BLOCK_SEQ_ENTRY);
+ return true;
+ case ES_WRITING_BLOCK_SEQ_ENTRY:
+ return true;
+ case ES_DONE_WITH_BLOCK_SEQ_ENTRY:
+ m_stream << '\n';
+ m_pState->SwitchState(ES_WAITING_FOR_BLOCK_SEQ_ENTRY);
+ return false;
+
+ // flow sequence
+ case ES_WAITING_FOR_FLOW_SEQ_ENTRY:
+ m_pState->SwitchState(ES_WRITING_FLOW_SEQ_ENTRY);
+ return true;
+ case ES_WRITING_FLOW_SEQ_ENTRY:
+ return true;
+ case ES_DONE_WITH_FLOW_SEQ_ENTRY:
+ EmitSeparationIfNecessary();
+ m_stream << ',';
+ m_pState->RequireSoftSeparation();
+ m_pState->SwitchState(ES_WAITING_FOR_FLOW_SEQ_ENTRY);
+ return false;
+
+ // block map
+ case ES_WAITING_FOR_BLOCK_MAP_ENTRY:
+ m_pState->SetError(ErrorMsg::EXPECTED_KEY_TOKEN);
+ return true;
+ case ES_WAITING_FOR_BLOCK_MAP_KEY:
+ if(m_pState->CurrentlyInLongKey()) {
+ m_stream << IndentTo(curIndent) << '?';
+ m_pState->RequireSoftSeparation();
+ }
+ m_pState->SwitchState(ES_WRITING_BLOCK_MAP_KEY);
+ return true;
+ case ES_WRITING_BLOCK_MAP_KEY:
+ return true;
+ case ES_DONE_WITH_BLOCK_MAP_KEY:
+ m_pState->SetError(ErrorMsg::EXPECTED_VALUE_TOKEN);
+ return true;
+ case ES_WAITING_FOR_BLOCK_MAP_VALUE:
+ m_pState->SwitchState(ES_WRITING_BLOCK_MAP_VALUE);
+ return true;
+ case ES_WRITING_BLOCK_MAP_VALUE:
+ return true;
+ case ES_DONE_WITH_BLOCK_MAP_VALUE:
+ m_pState->SetError(ErrorMsg::EXPECTED_KEY_TOKEN);
+ return true;
+
+ // flow map
+ case ES_WAITING_FOR_FLOW_MAP_ENTRY:
+ m_pState->SetError(ErrorMsg::EXPECTED_KEY_TOKEN);
+ return true;
+ case ES_WAITING_FOR_FLOW_MAP_KEY:
+ EmitSeparationIfNecessary();
+ m_pState->SwitchState(ES_WRITING_FLOW_MAP_KEY);
+ if(m_pState->CurrentlyInLongKey()) {
+ m_stream << '?';
+ m_pState->RequireSoftSeparation();
+ }
+ return true;
+ case ES_WRITING_FLOW_MAP_KEY:
+ return true;
+ case ES_DONE_WITH_FLOW_MAP_KEY:
+ m_pState->SetError(ErrorMsg::EXPECTED_VALUE_TOKEN);
+ return true;
+ case ES_WAITING_FOR_FLOW_MAP_VALUE:
+ EmitSeparationIfNecessary();
+ m_stream << ':';
+ m_pState->RequireSoftSeparation();
+ m_pState->SwitchState(ES_WRITING_FLOW_MAP_VALUE);
+ return true;
+ case ES_WRITING_FLOW_MAP_VALUE:
+ return true;
+ case ES_DONE_WITH_FLOW_MAP_VALUE:
+ m_pState->SetError(ErrorMsg::EXPECTED_KEY_TOKEN);
+ return true;
+ default:
+ assert(false);
+ }
+
+ assert(false);
+ return true;
+ }
+
+ // PreAtomicWrite
+ // . Depending on the emitter state, write to the stream to get it
+ // in position to do an atomic write (e.g., scalar, sequence, or map)
+ void Emitter::PreAtomicWrite()
+ {
+ if(!good())
+ return;
+
+ while(!GotoNextPreAtomicState())
+ ;
+ }
+
+ // PostAtomicWrite
+ // . Clean up
+ void Emitter::PostAtomicWrite()
+ {
+ if(!good())
+ return;
+
+ EMITTER_STATE curState = m_pState->GetCurState();
+ switch(curState) {
+ // document-level
+ case ES_WRITING_DOC:
+ m_pState->SwitchState(ES_DONE_WITH_DOC);
+ break;
+
+ // block seq
+ case ES_WRITING_BLOCK_SEQ_ENTRY:
+ m_pState->SwitchState(ES_DONE_WITH_BLOCK_SEQ_ENTRY);
+ break;
+
+ // flow seq
+ case ES_WRITING_FLOW_SEQ_ENTRY:
+ m_pState->SwitchState(ES_DONE_WITH_FLOW_SEQ_ENTRY);
+ break;
+
+ // block map
+ case ES_WRITING_BLOCK_MAP_KEY:
+ if(!m_pState->CurrentlyInLongKey()) {
+ m_stream << ':';
+ m_pState->RequireSoftSeparation();
+ }
+ m_pState->SwitchState(ES_DONE_WITH_BLOCK_MAP_KEY);
+ break;
+ case ES_WRITING_BLOCK_MAP_VALUE:
+ m_pState->SwitchState(ES_DONE_WITH_BLOCK_MAP_VALUE);
+ break;
+
+ // flow map
+ case ES_WRITING_FLOW_MAP_KEY:
+ m_pState->SwitchState(ES_DONE_WITH_FLOW_MAP_KEY);
+ break;
+ case ES_WRITING_FLOW_MAP_VALUE:
+ m_pState->SwitchState(ES_DONE_WITH_FLOW_MAP_VALUE);
+ break;
+ default:
+ assert(false);
+ };
+
+ m_pState->ClearModifiedSettings();
+ }
+
+ // EmitSeparationIfNecessary
+ void Emitter::EmitSeparationIfNecessary()
+ {
+ if(!good())
+ return;
+
+ if(m_pState->RequiresSoftSeparation())
+ m_stream << ' ';
+ else if(m_pState->RequiresHardSeparation())
+ m_stream << '\n';
+ m_pState->UnsetSeparation();
+ }
+
+ // EmitBeginDoc
+ void Emitter::EmitBeginDoc()
+ {
+ if(!good())
+ return;
+
+ EMITTER_STATE curState = m_pState->GetCurState();
+ if(curState != ES_WAITING_FOR_DOC && curState != ES_WRITING_DOC && curState != ES_DONE_WITH_DOC) {
+ m_pState->SetError("Unexpected begin document");
+ return;
+ }
+
+ if(curState == ES_WRITING_DOC || curState == ES_DONE_WITH_DOC)
+ m_stream << '\n';
+ m_stream << "---\n";
+
+ m_pState->UnsetSeparation();
+ m_pState->SwitchState(ES_WAITING_FOR_DOC);
+ }
+
+ // EmitEndDoc
+ void Emitter::EmitEndDoc()
+ {
+ if(!good())
+ return;
+
+
+ EMITTER_STATE curState = m_pState->GetCurState();
+ if(curState != ES_WAITING_FOR_DOC && curState != ES_WRITING_DOC && curState != ES_DONE_WITH_DOC) {
+ m_pState->SetError("Unexpected end document");
+ return;
+ }
+
+ if(curState == ES_WRITING_DOC || curState == ES_DONE_WITH_DOC)
+ m_stream << '\n';
+ m_stream << "...\n";
+
+ m_pState->UnsetSeparation();
+ m_pState->SwitchState(ES_WAITING_FOR_DOC);
+ }
+
+ // EmitBeginSeq
+ void Emitter::EmitBeginSeq()
+ {
+ if(!good())
+ return;
+
+ // must have a long key if we're emitting a sequence
+ m_pState->StartLongKey();
+
+ PreAtomicWrite();
+
+ EMITTER_STATE curState = m_pState->GetCurState();
+ EMITTER_MANIP flowType = m_pState->GetFlowType(GT_SEQ);
+ if(flowType == Block) {
+ if(curState == ES_WRITING_BLOCK_SEQ_ENTRY ||
+ curState == ES_WRITING_BLOCK_MAP_KEY || curState == ES_WRITING_BLOCK_MAP_VALUE ||
+ curState == ES_WRITING_DOC
+ ) {
+ if(m_pState->RequiresHardSeparation() || curState != ES_WRITING_DOC) {
+ m_stream << "\n";
+ m_pState->UnsetSeparation();
+ }
+ }
+ m_pState->PushState(ES_WAITING_FOR_BLOCK_SEQ_ENTRY);
+ } else if(flowType == Flow) {
+ EmitSeparationIfNecessary();
+ m_stream << "[";
+ m_pState->PushState(ES_WAITING_FOR_FLOW_SEQ_ENTRY);
+ } else
+ assert(false);
+
+ m_pState->BeginGroup(GT_SEQ);
+ }
+
+ // EmitEndSeq
+ void Emitter::EmitEndSeq()
+ {
+ if(!good())
+ return;
+
+ if(m_pState->GetCurGroupType() != GT_SEQ)
+ return m_pState->SetError(ErrorMsg::UNEXPECTED_END_SEQ);
+
+ EMITTER_STATE curState = m_pState->GetCurState();
+ FLOW_TYPE flowType = m_pState->GetCurGroupFlowType();
+ if(flowType == FT_BLOCK) {
+ // Note: block sequences are *not* allowed to be empty, but we convert it
+ // to a flow sequence if it is
+ assert(curState == ES_DONE_WITH_BLOCK_SEQ_ENTRY || curState == ES_WAITING_FOR_BLOCK_SEQ_ENTRY);
+ if(curState == ES_WAITING_FOR_BLOCK_SEQ_ENTRY) {
+ // Note: only one of these will actually output anything for a given situation
+ EmitSeparationIfNecessary();
+ unsigned curIndent = m_pState->GetCurIndent();
+ m_stream << IndentTo(curIndent);
+
+ m_stream << "[]";
+ }
+ } else if(flowType == FT_FLOW) {
+ // Note: flow sequences are allowed to be empty
+ assert(curState == ES_DONE_WITH_FLOW_SEQ_ENTRY || curState == ES_WAITING_FOR_FLOW_SEQ_ENTRY);
+ m_stream << "]";
+ } else
+ assert(false);
+
+ m_pState->PopState();
+ m_pState->EndGroup(GT_SEQ);
+
+ PostAtomicWrite();
+ }
+
+ // EmitBeginMap
+ void Emitter::EmitBeginMap()
+ {
+ if(!good())
+ return;
+
+ // must have a long key if we're emitting a map
+ m_pState->StartLongKey();
+
+ PreAtomicWrite();
+
+ EMITTER_STATE curState = m_pState->GetCurState();
+ EMITTER_MANIP flowType = m_pState->GetFlowType(GT_MAP);
+ if(flowType == Block) {
+ if(curState == ES_WRITING_BLOCK_SEQ_ENTRY ||
+ curState == ES_WRITING_BLOCK_MAP_KEY || curState == ES_WRITING_BLOCK_MAP_VALUE ||
+ curState == ES_WRITING_DOC
+ ) {
+ if(m_pState->RequiresHardSeparation() || (curState != ES_WRITING_DOC && curState != ES_WRITING_BLOCK_SEQ_ENTRY)) {
+ m_stream << "\n";
+ m_pState->UnsetSeparation();
+ }
+ }
+ m_pState->PushState(ES_WAITING_FOR_BLOCK_MAP_ENTRY);
+ } else if(flowType == Flow) {
+ EmitSeparationIfNecessary();
+ m_stream << "{";
+ m_pState->PushState(ES_WAITING_FOR_FLOW_MAP_ENTRY);
+ } else
+ assert(false);
+
+ m_pState->BeginGroup(GT_MAP);
+ }
+
+ // EmitEndMap
+ void Emitter::EmitEndMap()
+ {
+ if(!good())
+ return;
+
+ if(m_pState->GetCurGroupType() != GT_MAP)
+ return m_pState->SetError(ErrorMsg::UNEXPECTED_END_MAP);
+
+ EMITTER_STATE curState = m_pState->GetCurState();
+ FLOW_TYPE flowType = m_pState->GetCurGroupFlowType();
+ if(flowType == FT_BLOCK) {
+ // Note: block sequences are *not* allowed to be empty, but we convert it
+ // to a flow sequence if it is
+ assert(curState == ES_DONE_WITH_BLOCK_MAP_VALUE || curState == ES_WAITING_FOR_BLOCK_MAP_ENTRY);
+ if(curState == ES_WAITING_FOR_BLOCK_MAP_ENTRY) {
+ // Note: only one of these will actually output anything for a given situation
+ EmitSeparationIfNecessary();
+ unsigned curIndent = m_pState->GetCurIndent();
+ m_stream << IndentTo(curIndent);
+ m_stream << "{}";
+ }
+ } else if(flowType == FT_FLOW) {
+ // Note: flow maps are allowed to be empty
+ assert(curState == ES_DONE_WITH_FLOW_MAP_VALUE || curState == ES_WAITING_FOR_FLOW_MAP_ENTRY);
+ EmitSeparationIfNecessary();
+ m_stream << "}";
+ } else
+ assert(false);
+
+ m_pState->PopState();
+ m_pState->EndGroup(GT_MAP);
+
+ PostAtomicWrite();
+ }
+
+ // EmitKey
+ void Emitter::EmitKey()
+ {
+ if(!good())
+ return;
+
+ EMITTER_STATE curState = m_pState->GetCurState();
+ FLOW_TYPE flowType = m_pState->GetCurGroupFlowType();
+ if(curState != ES_WAITING_FOR_BLOCK_MAP_ENTRY && curState != ES_DONE_WITH_BLOCK_MAP_VALUE
+ && curState != ES_WAITING_FOR_FLOW_MAP_ENTRY && curState != ES_DONE_WITH_FLOW_MAP_VALUE)
+ return m_pState->SetError(ErrorMsg::UNEXPECTED_KEY_TOKEN);
+
+ if(flowType == FT_BLOCK) {
+ if(curState == ES_DONE_WITH_BLOCK_MAP_VALUE)
+ m_stream << '\n';
+ unsigned curIndent = m_pState->GetCurIndent();
+ m_stream << IndentTo(curIndent);
+ m_pState->UnsetSeparation();
+ m_pState->SwitchState(ES_WAITING_FOR_BLOCK_MAP_KEY);
+ } else if(flowType == FT_FLOW) {
+ EmitSeparationIfNecessary();
+ if(curState == ES_DONE_WITH_FLOW_MAP_VALUE) {
+ m_stream << ',';
+ m_pState->RequireSoftSeparation();
+ }
+ m_pState->SwitchState(ES_WAITING_FOR_FLOW_MAP_KEY);
+ } else
+ assert(false);
+
+ if(m_pState->GetMapKeyFormat() == LongKey)
+ m_pState->StartLongKey();
+ else if(m_pState->GetMapKeyFormat() == Auto)
+ m_pState->StartSimpleKey();
+ else
+ assert(false);
+ }
+
+ // EmitValue
+ void Emitter::EmitValue()
+ {
+ if(!good())
+ return;
+
+ EMITTER_STATE curState = m_pState->GetCurState();
+ FLOW_TYPE flowType = m_pState->GetCurGroupFlowType();
+ if(curState != ES_DONE_WITH_BLOCK_MAP_KEY && curState != ES_DONE_WITH_FLOW_MAP_KEY)
+ return m_pState->SetError(ErrorMsg::UNEXPECTED_VALUE_TOKEN);
+
+ if(flowType == FT_BLOCK) {
+ if(m_pState->CurrentlyInLongKey()) {
+ m_stream << '\n';
+ m_stream << IndentTo(m_pState->GetCurIndent());
+ m_stream << ':';
+ m_pState->RequireSoftSeparation();
+ }
+ m_pState->SwitchState(ES_WAITING_FOR_BLOCK_MAP_VALUE);
+ } else if(flowType == FT_FLOW) {
+ m_pState->SwitchState(ES_WAITING_FOR_FLOW_MAP_VALUE);
+ } else
+ assert(false);
+ }
+
+ // EmitNewline
+ void Emitter::EmitNewline()
+ {
+ if(!good())
+ return;
+
+ if(CanEmitNewline()) {
+ m_stream << '\n';
+ m_pState->UnsetSeparation();
+ }
+ }
+
+ bool Emitter::CanEmitNewline() const
+ {
+ FLOW_TYPE flowType = m_pState->GetCurGroupFlowType();
+ if(flowType == FT_BLOCK && m_pState->CurrentlyInLongKey())
+ return true;
+
+ EMITTER_STATE curState = m_pState->GetCurState();
+ return curState != ES_DONE_WITH_BLOCK_MAP_KEY && curState != ES_WAITING_FOR_BLOCK_MAP_VALUE && curState != ES_WRITING_BLOCK_MAP_VALUE;
+ }
+
+ // *******************************************************************************************
+ // overloads of Write
+
+ Emitter& Emitter::Write(const std::string& str)
+ {
+ if(!good())
+ return *this;
+
+ // literal scalars must use long keys
+ if(m_pState->GetStringFormat() == Literal && m_pState->GetCurGroupFlowType() != FT_FLOW)
+ m_pState->StartLongKey();
+
+ PreAtomicWrite();
+ EmitSeparationIfNecessary();
+
+ bool escapeNonAscii = m_pState->GetOutputCharset() == EscapeNonAscii;
+ EMITTER_MANIP strFmt = m_pState->GetStringFormat();
+ FLOW_TYPE flowType = m_pState->GetCurGroupFlowType();
+ unsigned curIndent = m_pState->GetCurIndent();
+
+ switch(strFmt) {
+ case Auto:
+ Utils::WriteString(m_stream, str, flowType == FT_FLOW, escapeNonAscii);
+ break;
+ case SingleQuoted:
+ if(!Utils::WriteSingleQuotedString(m_stream, str)) {
+ m_pState->SetError(ErrorMsg::SINGLE_QUOTED_CHAR);
+ return *this;
+ }
+ break;
+ case DoubleQuoted:
+ Utils::WriteDoubleQuotedString(m_stream, str, escapeNonAscii);
+ break;
+ case Literal:
+ if(flowType == FT_FLOW)
+ Utils::WriteString(m_stream, str, flowType == FT_FLOW, escapeNonAscii);
+ else
+ Utils::WriteLiteralString(m_stream, str, curIndent + m_pState->GetIndent());
+ break;
+ default:
+ assert(false);
+ }
+
+ PostAtomicWrite();
+ return *this;
+ }
+
+ void Emitter::PreWriteIntegralType(std::stringstream& str)
+ {
+ PreAtomicWrite();
+ EmitSeparationIfNecessary();
+
+ EMITTER_MANIP intFmt = m_pState->GetIntFormat();
+ switch(intFmt) {
+ case Dec:
+ str << std::dec;
+ break;
+ case Hex:
+ str << "0x";
+ str << std::hex;
+ break;
+ case Oct:
+ str << "0";
+ str << std::oct;
+ break;
+ default:
+ assert(false);
+ }
+ }
+
+ void Emitter::PreWriteStreamable(std::stringstream&)
+ {
+ PreAtomicWrite();
+ EmitSeparationIfNecessary();
+ }
+
+ unsigned Emitter::GetFloatPrecision() const
+ {
+ return m_pState->GetFloatPrecision();
+ }
+
+ unsigned Emitter::GetDoublePrecision() const
+ {
+ return m_pState->GetDoublePrecision();
+ }
+
+ void Emitter::PostWriteIntegralType(const std::stringstream& str)
+ {
+ m_stream << str.str();
+ PostAtomicWrite();
+ }
+
+ void Emitter::PostWriteStreamable(const std::stringstream& str)
+ {
+ m_stream << str.str();
+ PostAtomicWrite();
+ }
+
+ const char *Emitter::ComputeFullBoolName(bool b) const
+ {
+ const EMITTER_MANIP mainFmt = (m_pState->GetBoolLengthFormat() == ShortBool ? YesNoBool : m_pState->GetBoolFormat());
+ const EMITTER_MANIP caseFmt = m_pState->GetBoolCaseFormat();
+ switch(mainFmt) {
+ case YesNoBool:
+ switch(caseFmt) {
+ case UpperCase: return b ? "YES" : "NO";
+ case CamelCase: return b ? "Yes" : "No";
+ case LowerCase: return b ? "yes" : "no";
+ default: break;
+ }
+ break;
+ case OnOffBool:
+ switch(caseFmt) {
+ case UpperCase: return b ? "ON" : "OFF";
+ case CamelCase: return b ? "On" : "Off";
+ case LowerCase: return b ? "on" : "off";
+ default: break;
+ }
+ break;
+ case TrueFalseBool:
+ switch(caseFmt) {
+ case UpperCase: return b ? "TRUE" : "FALSE";
+ case CamelCase: return b ? "True" : "False";
+ case LowerCase: return b ? "true" : "false";
+ default: break;
+ }
+ break;
+ default:
+ break;
+ }
+ return b ? "y" : "n"; // should never get here, but it can't hurt to give these answers
+ }
+
+ Emitter& Emitter::Write(bool b)
+ {
+ if(!good())
+ return *this;
+
+ PreAtomicWrite();
+ EmitSeparationIfNecessary();
+
+ const char *name = ComputeFullBoolName(b);
+ if(m_pState->GetBoolLengthFormat() == ShortBool)
+ m_stream << name[0];
+ else
+ m_stream << name;
+
+ PostAtomicWrite();
+ return *this;
+ }
+
+ Emitter& Emitter::Write(char ch)
+ {
+ if(!good())
+ return *this;
+
+ PreAtomicWrite();
+ EmitSeparationIfNecessary();
+
+ Utils::WriteChar(m_stream, ch);
+
+ PostAtomicWrite();
+ return *this;
+ }
+
+ Emitter& Emitter::Write(const _Alias& alias)
+ {
+ if(!good())
+ return *this;
+
+ PreAtomicWrite();
+ EmitSeparationIfNecessary();
+ if(!Utils::WriteAlias(m_stream, alias.content)) {
+ m_pState->SetError(ErrorMsg::INVALID_ALIAS);
+ return *this;
+ }
+ PostAtomicWrite();
+ return *this;
+ }
+
+ Emitter& Emitter::Write(const _Anchor& anchor)
+ {
+ if(!good())
+ return *this;
+
+ PreAtomicWrite();
+ EmitSeparationIfNecessary();
+ if(!Utils::WriteAnchor(m_stream, anchor.content)) {
+ m_pState->SetError(ErrorMsg::INVALID_ANCHOR);
+ return *this;
+ }
+ m_pState->RequireHardSeparation();
+ // Note: no PostAtomicWrite() because we need another value for this node
+ return *this;
+ }
+
+ Emitter& Emitter::Write(const _Tag& tag)
+ {
+ if(!good())
+ return *this;
+
+ PreAtomicWrite();
+ EmitSeparationIfNecessary();
+
+ bool success = false;
+ if(tag.type == _Tag::Type::Verbatim)
+ success = Utils::WriteTag(m_stream, tag.content, true);
+ else if(tag.type == _Tag::Type::PrimaryHandle)
+ success = Utils::WriteTag(m_stream, tag.content, false);
+ else
+ success = Utils::WriteTagWithPrefix(m_stream, tag.prefix, tag.content);
+
+ if(!success) {
+ m_pState->SetError(ErrorMsg::INVALID_TAG);
+ return *this;
+ }
+
+ m_pState->RequireHardSeparation();
+ // Note: no PostAtomicWrite() because we need another value for this node
+ return *this;
+ }
+
+ void Emitter::EmitKindTag()
+ {
+ Write(LocalTag(""));
+ }
+
+ Emitter& Emitter::Write(const _Comment& comment)
+ {
+ if(!good())
+ return *this;
+
+ if(m_stream.col() > 0)
+ m_stream << Indentation(m_pState->GetPreCommentIndent());
+ Utils::WriteComment(m_stream, comment.content, m_pState->GetPostCommentIndent());
+ m_pState->RequireHardSeparation();
+ m_pState->ForceHardSeparation();
+
+ return *this;
+ }
+
+ Emitter& Emitter::Write(const _Null& /*null*/)
+ {
+ if(!good())
+ return *this;
+
+ PreAtomicWrite();
+ EmitSeparationIfNecessary();
+ m_stream << "~";
+ PostAtomicWrite();
+ return *this;
+ }
+
+ Emitter& Emitter::Write(const Binary& binary)
+ {
+ Write(SecondaryTag("binary"));
+
+ if(!good())
+ return *this;
+
+ PreAtomicWrite();
+ EmitSeparationIfNecessary();
+ Utils::WriteBinary(m_stream, binary);
+ PostAtomicWrite();
+ return *this;
+ }
+}
+
diff --git a/yaml-cpp/src/emitterstate.cpp b/yaml-cpp/src/emitterstate.cpp
new file mode 100755
index 00000000..562e82c9
--- /dev/null
+++ b/yaml-cpp/src/emitterstate.cpp
@@ -0,0 +1,284 @@
+#include "emitterstate.h"
+#include "yaml-cpp/exceptions.h"
+#include <limits>
+
+namespace YAML
+{
+ EmitterState::EmitterState(): m_isGood(true), m_curIndent(0), m_requiresSoftSeparation(false), m_requiresHardSeparation(false)
+ {
+ // start up
+ m_stateStack.push(ES_WAITING_FOR_DOC);
+
+ // set default global manipulators
+ m_charset.set(EmitNonAscii);
+ m_strFmt.set(Auto);
+ m_boolFmt.set(TrueFalseBool);
+ m_boolLengthFmt.set(LongBool);
+ m_boolCaseFmt.set(LowerCase);
+ m_intFmt.set(Dec);
+ m_indent.set(2);
+ m_preCommentIndent.set(2);
+ m_postCommentIndent.set(1);
+ m_seqFmt.set(Block);
+ m_mapFmt.set(Block);
+ m_mapKeyFmt.set(Auto);
+ m_floatPrecision.set(6);
+ m_doublePrecision.set(15);
+ }
+
+ EmitterState::~EmitterState()
+ {
+ }
+
+ // SetLocalValue
+ // . We blindly tries to set all possible formatters to this value
+ // . Only the ones that make sense will be accepted
+ void EmitterState::SetLocalValue(EMITTER_MANIP value)
+ {
+ SetOutputCharset(value, LOCAL);
+ SetStringFormat(value, LOCAL);
+ SetBoolFormat(value, LOCAL);
+ SetBoolCaseFormat(value, LOCAL);
+ SetBoolLengthFormat(value, LOCAL);
+ SetIntFormat(value, LOCAL);
+ SetFlowType(GT_SEQ, value, LOCAL);
+ SetFlowType(GT_MAP, value, LOCAL);
+ SetMapKeyFormat(value, LOCAL);
+ }
+
+ void EmitterState::BeginGroup(GROUP_TYPE type)
+ {
+ unsigned lastIndent = (m_groups.empty() ? 0 : m_groups.top().indent);
+ m_curIndent += lastIndent;
+
+ std::auto_ptr<Group> pGroup(new Group(type));
+
+ // transfer settings (which last until this group is done)
+ pGroup->modifiedSettings = m_modifiedSettings;
+
+ // set up group
+ pGroup->flow = GetFlowType(type);
+ pGroup->indent = GetIndent();
+ pGroup->usingLongKey = (GetMapKeyFormat() == LongKey ? true : false);
+
+ m_groups.push(pGroup);
+ }
+
+ void EmitterState::EndGroup(GROUP_TYPE type)
+ {
+ if(m_groups.empty())
+ return SetError(ErrorMsg::UNMATCHED_GROUP_TAG);
+
+ // get rid of the current group
+ {
+ std::auto_ptr<Group> pFinishedGroup = m_groups.pop();
+ if(pFinishedGroup->type != type)
+ return SetError(ErrorMsg::UNMATCHED_GROUP_TAG);
+ }
+
+ // reset old settings
+ unsigned lastIndent = (m_groups.empty() ? 0 : m_groups.top().indent);
+ assert(m_curIndent >= lastIndent);
+ m_curIndent -= lastIndent;
+
+ // some global settings that we changed may have been overridden
+ // by a local setting we just popped, so we need to restore them
+ m_globalModifiedSettings.restore();
+ }
+
+ GROUP_TYPE EmitterState::GetCurGroupType() const
+ {
+ if(m_groups.empty())
+ return GT_NONE;
+
+ return m_groups.top().type;
+ }
+
+ FLOW_TYPE EmitterState::GetCurGroupFlowType() const
+ {
+ if(m_groups.empty())
+ return FT_NONE;
+
+ return (m_groups.top().flow == Flow ? FT_FLOW : FT_BLOCK);
+ }
+
+ bool EmitterState::CurrentlyInLongKey()
+ {
+ if(m_groups.empty())
+ return false;
+ return m_groups.top().usingLongKey;
+ }
+
+ void EmitterState::StartLongKey()
+ {
+ if(!m_groups.empty())
+ m_groups.top().usingLongKey = true;
+ }
+
+ void EmitterState::StartSimpleKey()
+ {
+ if(!m_groups.empty())
+ m_groups.top().usingLongKey = false;
+ }
+
+ void EmitterState::ClearModifiedSettings()
+ {
+ m_modifiedSettings.clear();
+ }
+
+ bool EmitterState::SetOutputCharset(EMITTER_MANIP value, FMT_SCOPE scope)
+ {
+ switch(value) {
+ case EmitNonAscii:
+ case EscapeNonAscii:
+ _Set(m_charset, value, scope);
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool EmitterState::SetStringFormat(EMITTER_MANIP value, FMT_SCOPE scope)
+ {
+ switch(value) {
+ case Auto:
+ case SingleQuoted:
+ case DoubleQuoted:
+ case Literal:
+ _Set(m_strFmt, value, scope);
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool EmitterState::SetBoolFormat(EMITTER_MANIP value, FMT_SCOPE scope)
+ {
+ switch(value) {
+ case OnOffBool:
+ case TrueFalseBool:
+ case YesNoBool:
+ _Set(m_boolFmt, value, scope);
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool EmitterState::SetBoolLengthFormat(EMITTER_MANIP value, FMT_SCOPE scope)
+ {
+ switch(value) {
+ case LongBool:
+ case ShortBool:
+ _Set(m_boolLengthFmt, value, scope);
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool EmitterState::SetBoolCaseFormat(EMITTER_MANIP value, FMT_SCOPE scope)
+ {
+ switch(value) {
+ case UpperCase:
+ case LowerCase:
+ case CamelCase:
+ _Set(m_boolCaseFmt, value, scope);
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool EmitterState::SetIntFormat(EMITTER_MANIP value, FMT_SCOPE scope)
+ {
+ switch(value) {
+ case Dec:
+ case Hex:
+ case Oct:
+ _Set(m_intFmt, value, scope);
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool EmitterState::SetIndent(unsigned value, FMT_SCOPE scope)
+ {
+ if(value == 0)
+ return false;
+
+ _Set(m_indent, value, scope);
+ return true;
+ }
+
+ bool EmitterState::SetPreCommentIndent(unsigned value, FMT_SCOPE scope)
+ {
+ if(value == 0)
+ return false;
+
+ _Set(m_preCommentIndent, value, scope);
+ return true;
+ }
+
+ bool EmitterState::SetPostCommentIndent(unsigned value, FMT_SCOPE scope)
+ {
+ if(value == 0)
+ return false;
+
+ _Set(m_postCommentIndent, value, scope);
+ return true;
+ }
+
+ bool EmitterState::SetFlowType(GROUP_TYPE groupType, EMITTER_MANIP value, FMT_SCOPE scope)
+ {
+ switch(value) {
+ case Block:
+ case Flow:
+ _Set(groupType == GT_SEQ ? m_seqFmt : m_mapFmt, value, scope);
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ EMITTER_MANIP EmitterState::GetFlowType(GROUP_TYPE groupType) const
+ {
+ // force flow style if we're currently in a flow
+ FLOW_TYPE flowType = GetCurGroupFlowType();
+ if(flowType == FT_FLOW)
+ return Flow;
+
+ // otherwise, go with what's asked of use
+ return (groupType == GT_SEQ ? m_seqFmt.get() : m_mapFmt.get());
+ }
+
+ bool EmitterState::SetMapKeyFormat(EMITTER_MANIP value, FMT_SCOPE scope)
+ {
+ switch(value) {
+ case Auto:
+ case LongKey:
+ _Set(m_mapKeyFmt, value, scope);
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool EmitterState::SetFloatPrecision(int value, FMT_SCOPE scope)
+ {
+ if(value < 0 || value > std::numeric_limits<float>::digits10)
+ return false;
+ _Set(m_floatPrecision, value, scope);
+ return true;
+ }
+
+ bool EmitterState::SetDoublePrecision(int value, FMT_SCOPE scope)
+ {
+ if(value < 0 || value > std::numeric_limits<double>::digits10)
+ return false;
+ _Set(m_doublePrecision, value, scope);
+ return true;
+ }
+}
+
diff --git a/yaml-cpp/src/emitterstate.h b/yaml-cpp/src/emitterstate.h
new file mode 100755
index 00000000..5698e325
--- /dev/null
+++ b/yaml-cpp/src/emitterstate.h
@@ -0,0 +1,217 @@
+#ifndef EMITTERSTATE_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define EMITTERSTATE_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include "ptr_stack.h"
+#include "setting.h"
+#include "yaml-cpp/emittermanip.h"
+#include <cassert>
+#include <vector>
+#include <stack>
+#include <memory>
+
+namespace YAML
+{
+ enum FMT_SCOPE {
+ LOCAL,
+ GLOBAL
+ };
+
+ enum GROUP_TYPE {
+ GT_NONE,
+ GT_SEQ,
+ GT_MAP
+ };
+
+ enum FLOW_TYPE {
+ FT_NONE,
+ FT_FLOW,
+ FT_BLOCK
+ };
+
+ enum NODE_STATE {
+ NS_START,
+ NS_READY_FOR_ATOM,
+ NS_END
+ };
+
+ enum EMITTER_STATE {
+ ES_WAITING_FOR_DOC,
+ ES_WRITING_DOC,
+ ES_DONE_WITH_DOC,
+
+ // block seq
+ ES_WAITING_FOR_BLOCK_SEQ_ENTRY,
+ ES_WRITING_BLOCK_SEQ_ENTRY,
+ ES_DONE_WITH_BLOCK_SEQ_ENTRY,
+
+ // flow seq
+ ES_WAITING_FOR_FLOW_SEQ_ENTRY,
+ ES_WRITING_FLOW_SEQ_ENTRY,
+ ES_DONE_WITH_FLOW_SEQ_ENTRY,
+
+ // block map
+ ES_WAITING_FOR_BLOCK_MAP_ENTRY,
+ ES_WAITING_FOR_BLOCK_MAP_KEY,
+ ES_WRITING_BLOCK_MAP_KEY,
+ ES_DONE_WITH_BLOCK_MAP_KEY,
+ ES_WAITING_FOR_BLOCK_MAP_VALUE,
+ ES_WRITING_BLOCK_MAP_VALUE,
+ ES_DONE_WITH_BLOCK_MAP_VALUE,
+
+ // flow map
+ ES_WAITING_FOR_FLOW_MAP_ENTRY,
+ ES_WAITING_FOR_FLOW_MAP_KEY,
+ ES_WRITING_FLOW_MAP_KEY,
+ ES_DONE_WITH_FLOW_MAP_KEY,
+ ES_WAITING_FOR_FLOW_MAP_VALUE,
+ ES_WRITING_FLOW_MAP_VALUE,
+ ES_DONE_WITH_FLOW_MAP_VALUE
+ };
+
+ class EmitterState
+ {
+ public:
+ EmitterState();
+ ~EmitterState();
+
+ // basic state checking
+ bool good() const { return m_isGood; }
+ const std::string GetLastError() const { return m_lastError; }
+ void SetError(const std::string& error) { m_isGood = false; m_lastError = error; }
+
+ // main state of the machine
+ EMITTER_STATE GetCurState() const { return m_stateStack.top(); }
+ void SwitchState(EMITTER_STATE state) { PopState(); PushState(state); }
+ void PushState(EMITTER_STATE state) { m_stateStack.push(state); }
+ void PopState() { m_stateStack.pop(); }
+
+ void SetLocalValue(EMITTER_MANIP value);
+
+ // group handling
+ void BeginGroup(GROUP_TYPE type);
+ void EndGroup(GROUP_TYPE type);
+
+ GROUP_TYPE GetCurGroupType() const;
+ FLOW_TYPE GetCurGroupFlowType() const;
+ int GetCurIndent() const { return m_curIndent; }
+
+ bool CurrentlyInLongKey();
+ void StartLongKey();
+ void StartSimpleKey();
+
+ bool RequiresSoftSeparation() const { return m_requiresSoftSeparation; }
+ bool RequiresHardSeparation() const { return m_requiresHardSeparation; }
+ void RequireSoftSeparation() { m_requiresSoftSeparation = true; }
+ void RequireHardSeparation() { m_requiresSoftSeparation = true; m_requiresHardSeparation = true; }
+ void ForceHardSeparation() { m_requiresSoftSeparation = false; }
+ void UnsetSeparation() { m_requiresSoftSeparation = false; m_requiresHardSeparation = false; }
+
+ void ClearModifiedSettings();
+
+ // formatters
+ bool SetOutputCharset(EMITTER_MANIP value, FMT_SCOPE scope);
+ EMITTER_MANIP GetOutputCharset() const { return m_charset.get(); }
+
+ bool SetStringFormat(EMITTER_MANIP value, FMT_SCOPE scope);
+ EMITTER_MANIP GetStringFormat() const { return m_strFmt.get(); }
+
+ bool SetBoolFormat(EMITTER_MANIP value, FMT_SCOPE scope);
+ EMITTER_MANIP GetBoolFormat() const { return m_boolFmt.get(); }
+
+ bool SetBoolLengthFormat(EMITTER_MANIP value, FMT_SCOPE scope);
+ EMITTER_MANIP GetBoolLengthFormat() const { return m_boolLengthFmt.get(); }
+
+ bool SetBoolCaseFormat(EMITTER_MANIP value, FMT_SCOPE scope);
+ EMITTER_MANIP GetBoolCaseFormat() const { return m_boolCaseFmt.get(); }
+
+ bool SetIntFormat(EMITTER_MANIP value, FMT_SCOPE scope);
+ EMITTER_MANIP GetIntFormat() const { return m_intFmt.get(); }
+
+ bool SetIndent(unsigned value, FMT_SCOPE scope);
+ int GetIndent() const { return m_indent.get(); }
+
+ bool SetPreCommentIndent(unsigned value, FMT_SCOPE scope);
+ int GetPreCommentIndent() const { return m_preCommentIndent.get(); }
+ bool SetPostCommentIndent(unsigned value, FMT_SCOPE scope);
+ int GetPostCommentIndent() const { return m_postCommentIndent.get(); }
+
+ bool SetFlowType(GROUP_TYPE groupType, EMITTER_MANIP value, FMT_SCOPE scope);
+ EMITTER_MANIP GetFlowType(GROUP_TYPE groupType) const;
+
+ bool SetMapKeyFormat(EMITTER_MANIP value, FMT_SCOPE scope);
+ EMITTER_MANIP GetMapKeyFormat() const { return m_mapKeyFmt.get(); }
+
+ bool SetFloatPrecision(int value, FMT_SCOPE scope);
+ unsigned GetFloatPrecision() const { return m_floatPrecision.get(); }
+ bool SetDoublePrecision(int value, FMT_SCOPE scope);
+ unsigned GetDoublePrecision() const { return m_doublePrecision.get(); }
+
+ private:
+ template <typename T>
+ void _Set(Setting<T>& fmt, T value, FMT_SCOPE scope);
+
+ private:
+ // basic state ok?
+ bool m_isGood;
+ std::string m_lastError;
+
+ // other state
+ std::stack<EMITTER_STATE> m_stateStack;
+
+ Setting<EMITTER_MANIP> m_charset;
+ Setting<EMITTER_MANIP> m_strFmt;
+ Setting<EMITTER_MANIP> m_boolFmt;
+ Setting<EMITTER_MANIP> m_boolLengthFmt;
+ Setting<EMITTER_MANIP> m_boolCaseFmt;
+ Setting<EMITTER_MANIP> m_intFmt;
+ Setting<unsigned> m_indent;
+ Setting<unsigned> m_preCommentIndent, m_postCommentIndent;
+ Setting<EMITTER_MANIP> m_seqFmt;
+ Setting<EMITTER_MANIP> m_mapFmt;
+ Setting<EMITTER_MANIP> m_mapKeyFmt;
+ Setting<int> m_floatPrecision;
+ Setting<int> m_doublePrecision;
+
+ SettingChanges m_modifiedSettings;
+ SettingChanges m_globalModifiedSettings;
+
+ struct Group {
+ Group(GROUP_TYPE type_): type(type_), usingLongKey(false), indent(0) {}
+
+ GROUP_TYPE type;
+ EMITTER_MANIP flow;
+ bool usingLongKey;
+ int indent;
+
+ SettingChanges modifiedSettings;
+ };
+
+ ptr_stack<Group> m_groups;
+ unsigned m_curIndent;
+ bool m_requiresSoftSeparation;
+ bool m_requiresHardSeparation;
+ };
+
+ template <typename T>
+ void EmitterState::_Set(Setting<T>& fmt, T value, FMT_SCOPE scope) {
+ switch(scope) {
+ case LOCAL:
+ m_modifiedSettings.push(fmt.set(value));
+ break;
+ case GLOBAL:
+ fmt.set(value);
+ m_globalModifiedSettings.push(fmt.set(value)); // this pushes an identity set, so when we restore,
+ // it restores to the value here, and not the previous one
+ break;
+ default:
+ assert(false);
+ }
+ }
+}
+
+#endif // EMITTERSTATE_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/src/emitterutils.cpp b/yaml-cpp/src/emitterutils.cpp
new file mode 100755
index 00000000..3d184d6c
--- /dev/null
+++ b/yaml-cpp/src/emitterutils.cpp
@@ -0,0 +1,378 @@
+#include "emitterutils.h"
+#include "exp.h"
+#include "indentation.h"
+#include "yaml-cpp/binary.h"
+#include "yaml-cpp/exceptions.h"
+#include "stringsource.h"
+#include <sstream>
+#include <iomanip>
+
+namespace YAML
+{
+ namespace Utils
+ {
+ namespace {
+ enum {REPLACEMENT_CHARACTER = 0xFFFD};
+
+ bool IsAnchorChar(int ch) { // test for ns-anchor-char
+ switch (ch) {
+ case ',': case '[': case ']': case '{': case '}': // c-flow-indicator
+ case ' ': case '\t': // s-white
+ case 0xFEFF: // c-byte-order-mark
+ case 0xA: case 0xD: // b-char
+ return false;
+ case 0x85:
+ return true;
+ }
+
+ if (ch < 0x20)
+ return false;
+
+ if (ch < 0x7E)
+ return true;
+
+ if (ch < 0xA0)
+ return false;
+ if (ch >= 0xD800 && ch <= 0xDFFF)
+ return false;
+ if ((ch & 0xFFFE) == 0xFFFE)
+ return false;
+ if ((ch >= 0xFDD0) && (ch <= 0xFDEF))
+ return false;
+ if (ch > 0x10FFFF)
+ return false;
+
+ return true;
+ }
+
+ int Utf8BytesIndicated(char ch) {
+ int byteVal = static_cast<unsigned char>(ch);
+ switch (byteVal >> 4) {
+ case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7:
+ return 1;
+ case 12: case 13:
+ return 2;
+ case 14:
+ return 3;
+ case 15:
+ return 4;
+ default:
+ return -1;
+ }
+ }
+
+ bool IsTrailingByte(char ch) {
+ return (ch & 0xC0) == 0x80;
+ }
+
+ bool GetNextCodePointAndAdvance(int& codePoint, std::string::const_iterator& first, std::string::const_iterator last) {
+ if (first == last)
+ return false;
+
+ int nBytes = Utf8BytesIndicated(*first);
+ if (nBytes < 1) {
+ // Bad lead byte
+ ++first;
+ codePoint = REPLACEMENT_CHARACTER;
+ return true;
+ }
+
+ if (nBytes == 1) {
+ codePoint = *first++;
+ return true;
+ }
+
+ // Gather bits from trailing bytes
+ codePoint = static_cast<unsigned char>(*first) & ~(0xFF << (7 - nBytes));
+ ++first;
+ --nBytes;
+ for (; nBytes > 0; ++first, --nBytes) {
+ if ((first == last) || !IsTrailingByte(*first)) {
+ codePoint = REPLACEMENT_CHARACTER;
+ break;
+ }
+ codePoint <<= 6;
+ codePoint |= *first & 0x3F;
+ }
+
+ // Check for illegal code points
+ if (codePoint > 0x10FFFF)
+ codePoint = REPLACEMENT_CHARACTER;
+ else if (codePoint >= 0xD800 && codePoint <= 0xDFFF)
+ codePoint = REPLACEMENT_CHARACTER;
+ else if ((codePoint & 0xFFFE) == 0xFFFE)
+ codePoint = REPLACEMENT_CHARACTER;
+ else if (codePoint >= 0xFDD0 && codePoint <= 0xFDEF)
+ codePoint = REPLACEMENT_CHARACTER;
+ return true;
+ }
+
+ void WriteCodePoint(ostream& out, int codePoint) {
+ if (codePoint < 0 || codePoint > 0x10FFFF) {
+ codePoint = REPLACEMENT_CHARACTER;
+ }
+ if (codePoint < 0x7F) {
+ out << static_cast<char>(codePoint);
+ } else if (codePoint < 0x7FF) {
+ out << static_cast<char>(0xC0 | (codePoint >> 6))
+ << static_cast<char>(0x80 | (codePoint & 0x3F));
+ } else if (codePoint < 0xFFFF) {
+ out << static_cast<char>(0xE0 | (codePoint >> 12))
+ << static_cast<char>(0x80 | ((codePoint >> 6) & 0x3F))
+ << static_cast<char>(0x80 | (codePoint & 0x3F));
+ } else {
+ out << static_cast<char>(0xF0 | (codePoint >> 18))
+ << static_cast<char>(0x80 | ((codePoint >> 12) & 0x3F))
+ << static_cast<char>(0x80 | ((codePoint >> 6) & 0x3F))
+ << static_cast<char>(0x80 | (codePoint & 0x3F));
+ }
+ }
+
+ bool IsValidPlainScalar(const std::string& str, bool inFlow, bool allowOnlyAscii) {
+ if(str.empty())
+ return false;
+
+ // first check the start
+ const RegEx& start = (inFlow ? Exp::PlainScalarInFlow() : Exp::PlainScalar());
+ if(!start.Matches(str))
+ return false;
+
+ // and check the end for plain whitespace (which can't be faithfully kept in a plain scalar)
+ if(!str.empty() && *str.rbegin() == ' ')
+ return false;
+
+ // then check until something is disallowed
+ const RegEx& disallowed = (inFlow ? Exp::EndScalarInFlow() : Exp::EndScalar())
+ || (Exp::BlankOrBreak() + Exp::Comment())
+ || Exp::NotPrintable()
+ || Exp::Utf8_ByteOrderMark()
+ || Exp::Break()
+ || Exp::Tab();
+ StringCharSource buffer(str.c_str(), str.size());
+ while(buffer) {
+ if(disallowed.Matches(buffer))
+ return false;
+ if(allowOnlyAscii && (0x7F < static_cast<unsigned char>(buffer[0])))
+ return false;
+ ++buffer;
+ }
+
+ return true;
+ }
+
+ void WriteDoubleQuoteEscapeSequence(ostream& out, int codePoint) {
+ static const char hexDigits[] = "0123456789abcdef";
+
+ char escSeq[] = "\\U00000000";
+ int digits = 8;
+ if (codePoint < 0xFF) {
+ escSeq[1] = 'x';
+ digits = 2;
+ } else if (codePoint < 0xFFFF) {
+ escSeq[1] = 'u';
+ digits = 4;
+ }
+
+ // Write digits into the escape sequence
+ int i = 2;
+ for (; digits > 0; --digits, ++i) {
+ escSeq[i] = hexDigits[(codePoint >> (4 * (digits - 1))) & 0xF];
+ }
+
+ escSeq[i] = 0; // terminate with NUL character
+ out << escSeq;
+ }
+
+ bool WriteAliasName(ostream& out, const std::string& str) {
+ int codePoint;
+ for(std::string::const_iterator i = str.begin();
+ GetNextCodePointAndAdvance(codePoint, i, str.end());
+ )
+ {
+ if (!IsAnchorChar(codePoint))
+ return false;
+
+ WriteCodePoint(out, codePoint);
+ }
+ return true;
+ }
+ }
+
+ bool WriteString(ostream& out, const std::string& str, bool inFlow, bool escapeNonAscii)
+ {
+ if(IsValidPlainScalar(str, inFlow, escapeNonAscii)) {
+ out << str;
+ return true;
+ } else
+ return WriteDoubleQuotedString(out, str, escapeNonAscii);
+ }
+
+ bool WriteSingleQuotedString(ostream& out, const std::string& str)
+ {
+ out << "'";
+ int codePoint;
+ for(std::string::const_iterator i = str.begin();
+ GetNextCodePointAndAdvance(codePoint, i, str.end());
+ )
+ {
+ if (codePoint == '\n')
+ return false; // We can't handle a new line and the attendant indentation yet
+
+ if (codePoint == '\'')
+ out << "''";
+ else
+ WriteCodePoint(out, codePoint);
+ }
+ out << "'";
+ return true;
+ }
+
+ bool WriteDoubleQuotedString(ostream& out, const std::string& str, bool escapeNonAscii)
+ {
+ out << "\"";
+ int codePoint;
+ for(std::string::const_iterator i = str.begin();
+ GetNextCodePointAndAdvance(codePoint, i, str.end());
+ )
+ {
+ if (codePoint == '\"')
+ out << "\\\"";
+ else if (codePoint == '\\')
+ out << "\\\\";
+ else if (codePoint < 0x20 || (codePoint >= 0x80 && codePoint <= 0xA0)) // Control characters and non-breaking space
+ WriteDoubleQuoteEscapeSequence(out, codePoint);
+ else if (codePoint == 0xFEFF) // Byte order marks (ZWNS) should be escaped (YAML 1.2, sec. 5.2)
+ WriteDoubleQuoteEscapeSequence(out, codePoint);
+ else if (escapeNonAscii && codePoint > 0x7E)
+ WriteDoubleQuoteEscapeSequence(out, codePoint);
+ else
+ WriteCodePoint(out, codePoint);
+ }
+ out << "\"";
+ return true;
+ }
+
+ bool WriteLiteralString(ostream& out, const std::string& str, int indent)
+ {
+ out << "|\n";
+ out << IndentTo(indent);
+ int codePoint;
+ for(std::string::const_iterator i = str.begin();
+ GetNextCodePointAndAdvance(codePoint, i, str.end());
+ )
+ {
+ if (codePoint == '\n')
+ out << "\n" << IndentTo(indent);
+ else
+ WriteCodePoint(out, codePoint);
+ }
+ return true;
+ }
+
+ bool WriteChar(ostream& out, char ch)
+ {
+ if(('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z'))
+ out << ch;
+ else if((0x20 <= ch && ch <= 0x7e) || ch == ' ')
+ out << "\"" << ch << "\"";
+ else if(ch == '\t')
+ out << "\"\\t\"";
+ else if(ch == '\n')
+ out << "\"\\n\"";
+ else if(ch == '\b')
+ out << "\"\\b\"";
+ else {
+ out << "\"";
+ WriteDoubleQuoteEscapeSequence(out, ch);
+ out << "\"";
+ }
+ return true;
+ }
+
+ bool WriteComment(ostream& out, const std::string& str, int postCommentIndent)
+ {
+ const unsigned curIndent = out.col();
+ out << "#" << Indentation(postCommentIndent);
+ int codePoint;
+ for(std::string::const_iterator i = str.begin();
+ GetNextCodePointAndAdvance(codePoint, i, str.end());
+ )
+ {
+ if(codePoint == '\n')
+ out << "\n" << IndentTo(curIndent) << "#" << Indentation(postCommentIndent);
+ else
+ WriteCodePoint(out, codePoint);
+ }
+ return true;
+ }
+
+ bool WriteAlias(ostream& out, const std::string& str)
+ {
+ out << "*";
+ return WriteAliasName(out, str);
+ }
+
+ bool WriteAnchor(ostream& out, const std::string& str)
+ {
+ out << "&";
+ return WriteAliasName(out, str);
+ }
+
+ bool WriteTag(ostream& out, const std::string& str, bool verbatim)
+ {
+ out << (verbatim ? "!<" : "!");
+ StringCharSource buffer(str.c_str(), str.size());
+ const RegEx& reValid = verbatim ? Exp::URI() : Exp::Tag();
+ while(buffer) {
+ int n = reValid.Match(buffer);
+ if(n <= 0)
+ return false;
+
+ while(--n >= 0) {
+ out << buffer[0];
+ ++buffer;
+ }
+ }
+ if (verbatim)
+ out << ">";
+ return true;
+ }
+
+ bool WriteTagWithPrefix(ostream& out, const std::string& prefix, const std::string& tag)
+ {
+ out << "!";
+ StringCharSource prefixBuffer(prefix.c_str(), prefix.size());
+ while(prefixBuffer) {
+ int n = Exp::URI().Match(prefixBuffer);
+ if(n <= 0)
+ return false;
+
+ while(--n >= 0) {
+ out << prefixBuffer[0];
+ ++prefixBuffer;
+ }
+ }
+
+ out << "!";
+ StringCharSource tagBuffer(tag.c_str(), tag.size());
+ while(tagBuffer) {
+ int n = Exp::Tag().Match(tagBuffer);
+ if(n <= 0)
+ return false;
+
+ while(--n >= 0) {
+ out << tagBuffer[0];
+ ++tagBuffer;
+ }
+ }
+ return true;
+ }
+
+ bool WriteBinary(ostream& out, const Binary& binary)
+ {
+ WriteDoubleQuotedString(out, EncodeBase64(binary.data(), binary.size()), false);
+ return true;
+ }
+ }
+}
+
diff --git a/yaml-cpp/src/emitterutils.h b/yaml-cpp/src/emitterutils.h
new file mode 100755
index 00000000..0e270d69
--- /dev/null
+++ b/yaml-cpp/src/emitterutils.h
@@ -0,0 +1,32 @@
+#ifndef EMITTERUTILS_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define EMITTERUTILS_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include "yaml-cpp/ostream.h"
+#include <string>
+
+namespace YAML
+{
+ class Binary;
+
+ namespace Utils
+ {
+ bool WriteString(ostream& out, const std::string& str, bool inFlow, bool escapeNonAscii);
+ bool WriteSingleQuotedString(ostream& out, const std::string& str);
+ bool WriteDoubleQuotedString(ostream& out, const std::string& str, bool escapeNonAscii);
+ bool WriteLiteralString(ostream& out, const std::string& str, int indent);
+ bool WriteChar(ostream& out, char ch);
+ bool WriteComment(ostream& out, const std::string& str, int postCommentIndent);
+ bool WriteAlias(ostream& out, const std::string& str);
+ bool WriteAnchor(ostream& out, const std::string& str);
+ bool WriteTag(ostream& out, const std::string& str, bool verbatim);
+ bool WriteTagWithPrefix(ostream& out, const std::string& prefix, const std::string& tag);
+ bool WriteBinary(ostream& out, const Binary& binary);
+ }
+}
+
+#endif // EMITTERUTILS_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/src/exp.cpp b/yaml-cpp/src/exp.cpp
new file mode 100755
index 00000000..7bc54546
--- /dev/null
+++ b/yaml-cpp/src/exp.cpp
@@ -0,0 +1,113 @@
+#include "exp.h"
+#include "yaml-cpp/exceptions.h"
+#include <sstream>
+
+namespace YAML
+{
+ namespace Exp
+ {
+ unsigned ParseHex(const std::string& str, const Mark& mark)
+ {
+ unsigned value = 0;
+ for(std::size_t i=0;i<str.size();i++) {
+ char ch = str[i];
+ int digit = 0;
+ if('a' <= ch && ch <= 'f')
+ digit = ch - 'a' + 10;
+ else if('A' <= ch && ch <= 'F')
+ digit = ch - 'A' + 10;
+ else if('0' <= ch && ch <= '9')
+ digit = ch - '0';
+ else
+ throw ParserException(mark, ErrorMsg::INVALID_HEX);
+
+ value = (value << 4) + digit;
+ }
+
+ return value;
+ }
+
+ std::string Str(unsigned ch)
+ {
+ return std::string(1, static_cast<char>(ch));
+ }
+
+ // Escape
+ // . Translates the next 'codeLength' characters into a hex number and returns the result.
+ // . Throws if it's not actually hex.
+ std::string Escape(Stream& in, int codeLength)
+ {
+ // grab string
+ std::string str;
+ for(int i=0;i<codeLength;i++)
+ str += in.get();
+
+ // get the value
+ unsigned value = ParseHex(str, in.mark());
+
+ // legal unicode?
+ if((value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF) {
+ std::stringstream msg;
+ msg << ErrorMsg::INVALID_UNICODE << value;
+ throw ParserException(in.mark(), msg.str());
+ }
+
+ // now break it up into chars
+ if(value <= 0x7F)
+ return Str(value);
+ else if(value <= 0x7FF)
+ return Str(0xC0 + (value >> 6)) + Str(0x80 + (value & 0x3F));
+ else if(value <= 0xFFFF)
+ return Str(0xE0 + (value >> 12)) + Str(0x80 + ((value >> 6) & 0x3F)) + Str(0x80 + (value & 0x3F));
+ else
+ return Str(0xF0 + (value >> 18)) + Str(0x80 + ((value >> 12) & 0x3F)) +
+ Str(0x80 + ((value >> 6) & 0x3F)) + Str(0x80 + (value & 0x3F));
+ }
+
+ // Escape
+ // . Escapes the sequence starting 'in' (it must begin with a '\' or single quote)
+ // and returns the result.
+ // . Throws if it's an unknown escape character.
+ std::string Escape(Stream& in)
+ {
+ // eat slash
+ char escape = in.get();
+
+ // switch on escape character
+ char ch = in.get();
+
+ // first do single quote, since it's easier
+ if(escape == '\'' && ch == '\'')
+ return "\'";
+
+ // now do the slash (we're not gonna check if it's a slash - you better pass one!)
+ switch(ch) {
+ case '0': return std::string(1, '\x00');
+ case 'a': return "\x07";
+ case 'b': return "\x08";
+ case 't':
+ case '\t': return "\x09";
+ case 'n': return "\x0A";
+ case 'v': return "\x0B";
+ case 'f': return "\x0C";
+ case 'r': return "\x0D";
+ case 'e': return "\x1B";
+ case ' ': return "\x20";
+ case '\"': return "\"";
+ case '\'': return "\'";
+ case '\\': return "\\";
+ case '/': return "/";
+ case 'N': return "\x85";
+ case '_': return "\xA0";
+ case 'L': return "\xE2\x80\xA8"; // LS (#x2028)
+ case 'P': return "\xE2\x80\xA9"; // PS (#x2029)
+ case 'x': return Escape(in, 2);
+ case 'u': return Escape(in, 4);
+ case 'U': return Escape(in, 8);
+ }
+
+ std::stringstream msg;
+ throw ParserException(in.mark(), std::string(ErrorMsg::INVALID_ESCAPE) + ch);
+ }
+ }
+}
diff --git a/yaml-cpp/src/exp.h b/yaml-cpp/src/exp.h
new file mode 100755
index 00000000..3e12aba4
--- /dev/null
+++ b/yaml-cpp/src/exp.h
@@ -0,0 +1,196 @@
+#ifndef EXP_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define EXP_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include "regex.h"
+#include <string>
+#include <ios>
+#include "stream.h"
+
+namespace YAML
+{
+ ////////////////////////////////////////////////////////////////////////////////
+ // Here we store a bunch of expressions for matching different parts of the file.
+
+ namespace Exp
+ {
+ // misc
+ inline const RegEx& Space() {
+ static const RegEx e = RegEx(' ');
+ return e;
+ }
+ inline const RegEx& Tab() {
+ static const RegEx e = RegEx('\t');
+ return e;
+ }
+ inline const RegEx& Blank() {
+ static const RegEx e = Space() || Tab();
+ return e;
+ }
+ inline const RegEx& Break() {
+ static const RegEx e = RegEx('\n') || RegEx("\r\n");
+ return e;
+ }
+ inline const RegEx& BlankOrBreak() {
+ static const RegEx e = Blank() || Break();
+ return e;
+ }
+ inline const RegEx& Digit() {
+ static const RegEx e = RegEx('0', '9');
+ return e;
+ }
+ inline const RegEx& Alpha() {
+ static const RegEx e = RegEx('a', 'z') || RegEx('A', 'Z');
+ return e;
+ }
+ inline const RegEx& AlphaNumeric() {
+ static const RegEx e = Alpha() || Digit();
+ return e;
+ }
+ inline const RegEx& Word() {
+ static const RegEx e = AlphaNumeric() || RegEx('-');
+ return e;
+ }
+ inline const RegEx& Hex() {
+ static const RegEx e = Digit() || RegEx('A', 'F') || RegEx('a', 'f');
+ return e;
+ }
+ // Valid Unicode code points that are not part of c-printable (YAML 1.2, sec. 5.1)
+ inline const RegEx& NotPrintable() {
+ static const RegEx e = RegEx(0) ||
+ RegEx("\x01\x02\x03\x04\x05\x06\x07\x08\x0B\x0C\x7F", REGEX_OR) ||
+ RegEx(0x0E, 0x1F) ||
+ (RegEx('\xC2') + (RegEx('\x80', '\x84') || RegEx('\x86', '\x9F')));
+ return e;
+ }
+ inline const RegEx& Utf8_ByteOrderMark() {
+ static const RegEx e = RegEx("\xEF\xBB\xBF");
+ return e;
+ }
+
+ // actual tags
+
+ inline const RegEx& DocStart() {
+ static const RegEx e = RegEx("---") + (BlankOrBreak() || RegEx());
+ return e;
+ }
+ inline const RegEx& DocEnd() {
+ static const RegEx e = RegEx("...") + (BlankOrBreak() || RegEx());
+ return e;
+ }
+ inline const RegEx& DocIndicator() {
+ static const RegEx e = DocStart() || DocEnd();
+ return e;
+ }
+ inline const RegEx& BlockEntry() {
+ static const RegEx e = RegEx('-') + (BlankOrBreak() || RegEx());
+ return e;
+ }
+ inline const RegEx& Key() {
+ static const RegEx e = RegEx('?');
+ return e;
+ }
+ inline const RegEx& KeyInFlow() {
+ static const RegEx e = RegEx('?') + BlankOrBreak();
+ return e;
+ }
+ inline const RegEx& Value() {
+ static const RegEx e = RegEx(':') + (BlankOrBreak() || RegEx());
+ return e;
+ }
+ inline const RegEx& ValueInFlow() {
+ static const RegEx e = RegEx(':') + (BlankOrBreak() || RegEx(",}", REGEX_OR));
+ return e;
+ }
+ inline const RegEx& ValueInJSONFlow() {
+ static const RegEx e = RegEx(':');
+ return e;
+ }
+ inline const RegEx Comment() {
+ static const RegEx e = RegEx('#');
+ return e;
+ }
+ inline const RegEx& Anchor() {
+ static const RegEx e = !(RegEx("[]{},", REGEX_OR) || BlankOrBreak());
+ return e;
+ }
+ inline const RegEx& AnchorEnd() {
+ static const RegEx e = RegEx("?:,]}%@`", REGEX_OR) || BlankOrBreak();
+ return e;
+ }
+ inline const RegEx& URI() {
+ static const RegEx e = Word() || RegEx("#;/?:@&=+$,_.!~*'()[]", REGEX_OR) || (RegEx('%') + Hex() + Hex());
+ return e;
+ }
+ inline const RegEx& Tag() {
+ static const RegEx e = Word() || RegEx("#;/?:@&=+$_.~*'", REGEX_OR) || (RegEx('%') + Hex() + Hex());
+ return e;
+ }
+
+ // Plain scalar rules:
+ // . Cannot start with a blank.
+ // . Can never start with any of , [ ] { } # & * ! | > \' \" % @ `
+ // . In the block context - ? : must be not be followed with a space.
+ // . In the flow context ? is illegal and : and - must not be followed with a space.
+ inline const RegEx& PlainScalar() {
+ static const RegEx e = !(BlankOrBreak() || RegEx(",[]{}#&*!|>\'\"%@`", REGEX_OR) || (RegEx("-?:", REGEX_OR) + (BlankOrBreak() || RegEx())));
+ return e;
+ }
+ inline const RegEx& PlainScalarInFlow() {
+ static const RegEx e = !(BlankOrBreak() || RegEx("?,[]{}#&*!|>\'\"%@`", REGEX_OR) || (RegEx("-:", REGEX_OR) + Blank()));
+ return e;
+ }
+ inline const RegEx& EndScalar() {
+ static const RegEx e = RegEx(':') + (BlankOrBreak() || RegEx());
+ return e;
+ }
+ inline const RegEx& EndScalarInFlow() {
+ static const RegEx e = (RegEx(':') + (BlankOrBreak() || RegEx() || RegEx(",]}", REGEX_OR))) || RegEx(",?[]{}", REGEX_OR);
+ return e;
+ }
+
+ inline const RegEx& EscSingleQuote() {
+ static const RegEx e = RegEx("\'\'");
+ return e;
+ }
+ inline const RegEx& EscBreak() {
+ static const RegEx e = RegEx('\\') + Break();
+ return e;
+ }
+
+ inline const RegEx& ChompIndicator() {
+ static const RegEx e = RegEx("+-", REGEX_OR);
+ return e;
+ }
+ inline const RegEx& Chomp() {
+ static const RegEx e = (ChompIndicator() + Digit()) || (Digit() + ChompIndicator()) || ChompIndicator() || Digit();
+ return e;
+ }
+
+ // and some functions
+ std::string Escape(Stream& in);
+ }
+
+ namespace Keys
+ {
+ const char Directive = '%';
+ const char FlowSeqStart = '[';
+ const char FlowSeqEnd = ']';
+ const char FlowMapStart = '{';
+ const char FlowMapEnd = '}';
+ const char FlowEntry = ',';
+ const char Alias = '*';
+ const char Anchor = '&';
+ const char Tag = '!';
+ const char LiteralScalar = '|';
+ const char FoldedScalar = '>';
+ const char VerbatimTagStart = '<';
+ const char VerbatimTagEnd = '>';
+ }
+}
+
+#endif // EXP_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/src/indentation.h b/yaml-cpp/src/indentation.h
new file mode 100755
index 00000000..25f684f8
--- /dev/null
+++ b/yaml-cpp/src/indentation.h
@@ -0,0 +1,38 @@
+#ifndef INDENTATION_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define INDENTATION_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include "yaml-cpp/ostream.h"
+#include <iostream>
+
+namespace YAML
+{
+ struct Indentation {
+ Indentation(unsigned n_): n(n_) {}
+ unsigned n;
+ };
+
+ inline ostream& operator << (ostream& out, const Indentation& indent) {
+ for(unsigned i=0;i<indent.n;i++)
+ out << ' ';
+ return out;
+ }
+
+ struct IndentTo {
+ IndentTo(unsigned n_): n(n_) {}
+ unsigned n;
+ };
+
+ inline ostream& operator << (ostream& out, const IndentTo& indent) {
+ while(out.col() < indent.n)
+ out << ' ';
+ return out;
+ }
+}
+
+
+#endif // INDENTATION_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/src/iterator.cpp b/yaml-cpp/src/iterator.cpp
new file mode 100755
index 00000000..f4159e32
--- /dev/null
+++ b/yaml-cpp/src/iterator.cpp
@@ -0,0 +1,103 @@
+#include "yaml-cpp/node.h"
+#include "yaml-cpp/exceptions.h"
+#include "iterpriv.h"
+
+namespace YAML
+{
+ Iterator::Iterator(): m_pData(new IterPriv)
+ {
+ }
+
+ Iterator::Iterator(std::auto_ptr<IterPriv> pData): m_pData(pData)
+ {
+ }
+
+ Iterator::Iterator(const Iterator& rhs): m_pData(new IterPriv(*rhs.m_pData))
+ {
+ }
+
+ Iterator& Iterator::operator = (const Iterator& rhs)
+ {
+ if(this == &rhs)
+ return *this;
+
+ m_pData.reset(new IterPriv(*rhs.m_pData));
+ return *this;
+ }
+
+ Iterator::~Iterator()
+ {
+ }
+
+ Iterator& Iterator::operator ++ ()
+ {
+ if(m_pData->type == IterPriv::IT_SEQ)
+ ++m_pData->seqIter;
+ else if(m_pData->type == IterPriv::IT_MAP)
+ ++m_pData->mapIter;
+
+ return *this;
+ }
+
+ Iterator Iterator::operator ++ (int)
+ {
+ Iterator temp = *this;
+
+ if(m_pData->type == IterPriv::IT_SEQ)
+ ++m_pData->seqIter;
+ else if(m_pData->type == IterPriv::IT_MAP)
+ ++m_pData->mapIter;
+
+ return temp;
+ }
+
+ const Node& Iterator::operator * () const
+ {
+ if(m_pData->type == IterPriv::IT_SEQ)
+ return **m_pData->seqIter;
+
+ throw BadDereference();
+ }
+
+ const Node *Iterator::operator -> () const
+ {
+ if(m_pData->type == IterPriv::IT_SEQ)
+ return *m_pData->seqIter;
+
+ throw BadDereference();
+ }
+
+ const Node& Iterator::first() const
+ {
+ if(m_pData->type == IterPriv::IT_MAP)
+ return *m_pData->mapIter->first;
+
+ throw BadDereference();
+ }
+
+ const Node& Iterator::second() const
+ {
+ if(m_pData->type == IterPriv::IT_MAP)
+ return *m_pData->mapIter->second;
+
+ throw BadDereference();
+ }
+
+ bool operator == (const Iterator& it, const Iterator& jt)
+ {
+ if(it.m_pData->type != jt.m_pData->type)
+ return false;
+
+ if(it.m_pData->type == IterPriv::IT_SEQ)
+ return it.m_pData->seqIter == jt.m_pData->seqIter;
+ else if(it.m_pData->type == IterPriv::IT_MAP)
+ return it.m_pData->mapIter == jt.m_pData->mapIter;
+
+ return true;
+ }
+
+ bool operator != (const Iterator& it, const Iterator& jt)
+ {
+ return !(it == jt);
+ }
+}
diff --git a/yaml-cpp/src/iterpriv.h b/yaml-cpp/src/iterpriv.h
new file mode 100755
index 00000000..c511e8ac
--- /dev/null
+++ b/yaml-cpp/src/iterpriv.h
@@ -0,0 +1,33 @@
+#ifndef ITERPRIV_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define ITERPRIV_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include "yaml-cpp/ltnode.h"
+#include <vector>
+#include <map>
+
+namespace YAML
+{
+ class Node;
+
+ // IterPriv
+ // . The implementation for iterators - essentially a union of sequence and map iterators.
+ struct IterPriv
+ {
+ IterPriv(): type(IT_NONE) {}
+ IterPriv(std::vector <Node *>::const_iterator it): type(IT_SEQ), seqIter(it) {}
+ IterPriv(std::map <Node *, Node *, ltnode>::const_iterator it): type(IT_MAP), mapIter(it) {}
+
+ enum ITER_TYPE { IT_NONE, IT_SEQ, IT_MAP };
+ ITER_TYPE type;
+
+ std::vector <Node *>::const_iterator seqIter;
+ std::map <Node *, Node *, ltnode>::const_iterator mapIter;
+ };
+}
+
+#endif // ITERPRIV_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/src/node.cpp b/yaml-cpp/src/node.cpp
new file mode 100755
index 00000000..360b4ad9
--- /dev/null
+++ b/yaml-cpp/src/node.cpp
@@ -0,0 +1,269 @@
+#include "yaml-cpp/node.h"
+#include "yaml-cpp/aliasmanager.h"
+#include "yaml-cpp/emitfromevents.h"
+#include "yaml-cpp/emitter.h"
+#include "yaml-cpp/eventhandler.h"
+#include "iterpriv.h"
+#include "nodebuilder.h"
+#include "nodeownership.h"
+#include "scanner.h"
+#include "tag.h"
+#include "token.h"
+#include <cassert>
+#include <stdexcept>
+
+namespace YAML
+{
+ bool ltnode::operator()(const Node *pNode1, const Node *pNode2) const {
+ return *pNode1 < *pNode2;
+ }
+
+ Node::Node(): m_pOwnership(new NodeOwnership), m_type(NodeType::Null)
+ {
+ }
+
+ Node::Node(NodeOwnership& owner): m_pOwnership(new NodeOwnership(&owner)), m_type(NodeType::Null)
+ {
+ }
+
+ Node::~Node()
+ {
+ Clear();
+ }
+
+ void Node::Clear()
+ {
+ m_pOwnership.reset(new NodeOwnership);
+ m_type = NodeType::Null;
+ m_tag.clear();
+ m_scalarData.clear();
+ m_seqData.clear();
+ m_mapData.clear();
+ }
+
+ bool Node::IsAliased() const
+ {
+ return m_pOwnership->IsAliased(*this);
+ }
+
+ Node& Node::CreateNode()
+ {
+ return m_pOwnership->Create();
+ }
+
+ std::auto_ptr<Node> Node::Clone() const
+ {
+ std::auto_ptr<Node> pNode(new Node);
+ NodeBuilder nodeBuilder(*pNode);
+ EmitEvents(nodeBuilder);
+ return pNode;
+ }
+
+ void Node::EmitEvents(EventHandler& eventHandler) const
+ {
+ eventHandler.OnDocumentStart(m_mark);
+ AliasManager am;
+ EmitEvents(am, eventHandler);
+ eventHandler.OnDocumentEnd();
+ }
+
+ void Node::EmitEvents(AliasManager& am, EventHandler& eventHandler) const
+ {
+ anchor_t anchor = NullAnchor;
+ if(IsAliased()) {
+ anchor = am.LookupAnchor(*this);
+ if(anchor) {
+ eventHandler.OnAlias(m_mark, anchor);
+ return;
+ }
+
+ am.RegisterReference(*this);
+ anchor = am.LookupAnchor(*this);
+ }
+
+ switch(m_type) {
+ case NodeType::Null:
+ eventHandler.OnNull(m_mark, anchor);
+ break;
+ case NodeType::Scalar:
+ eventHandler.OnScalar(m_mark, m_tag, anchor, m_scalarData);
+ break;
+ case NodeType::Sequence:
+ eventHandler.OnSequenceStart(m_mark, m_tag, anchor);
+ for(std::size_t i=0;i<m_seqData.size();i++)
+ m_seqData[i]->EmitEvents(am, eventHandler);
+ eventHandler.OnSequenceEnd();
+ break;
+ case NodeType::Map:
+ eventHandler.OnMapStart(m_mark, m_tag, anchor);
+ for(node_map::const_iterator it=m_mapData.begin();it!=m_mapData.end();++it) {
+ it->first->EmitEvents(am, eventHandler);
+ it->second->EmitEvents(am, eventHandler);
+ }
+ eventHandler.OnMapEnd();
+ break;
+ }
+ }
+
+ void Node::Init(NodeType::value type, const Mark& mark, const std::string& tag)
+ {
+ Clear();
+ m_mark = mark;
+ m_type = type;
+ m_tag = tag;
+ }
+
+ void Node::MarkAsAliased()
+ {
+ m_pOwnership->MarkAsAliased(*this);
+ }
+
+ void Node::SetScalarData(const std::string& data)
+ {
+ assert(m_type == NodeType::Scalar); // TODO: throw?
+ m_scalarData = data;
+ }
+
+ void Node::Append(Node& node)
+ {
+ assert(m_type == NodeType::Sequence); // TODO: throw?
+ m_seqData.push_back(&node);
+ }
+
+ void Node::Insert(Node& key, Node& value)
+ {
+ assert(m_type == NodeType::Map); // TODO: throw?
+ m_mapData[&key] = &value;
+ }
+
+ // begin
+ // Returns an iterator to the beginning of this (sequence or map).
+ Iterator Node::begin() const
+ {
+ switch(m_type) {
+ case NodeType::Null:
+ case NodeType::Scalar:
+ return Iterator();
+ case NodeType::Sequence:
+ return Iterator(std::auto_ptr<IterPriv>(new IterPriv(m_seqData.begin())));
+ case NodeType::Map:
+ return Iterator(std::auto_ptr<IterPriv>(new IterPriv(m_mapData.begin())));
+ }
+
+ assert(false);
+ return Iterator();
+ }
+
+ // end
+ // . Returns an iterator to the end of this (sequence or map).
+ Iterator Node::end() const
+ {
+ switch(m_type) {
+ case NodeType::Null:
+ case NodeType::Scalar:
+ return Iterator();
+ case NodeType::Sequence:
+ return Iterator(std::auto_ptr<IterPriv>(new IterPriv(m_seqData.end())));
+ case NodeType::Map:
+ return Iterator(std::auto_ptr<IterPriv>(new IterPriv(m_mapData.end())));
+ }
+
+ assert(false);
+ return Iterator();
+ }
+
+ // size
+ // . Returns the size of a sequence or map node
+ // . Otherwise, returns zero.
+ std::size_t Node::size() const
+ {
+ switch(m_type) {
+ case NodeType::Null:
+ case NodeType::Scalar:
+ return 0;
+ case NodeType::Sequence:
+ return m_seqData.size();
+ case NodeType::Map:
+ return m_mapData.size();
+ }
+
+ assert(false);
+ return 0;
+ }
+
+ const Node *Node::FindAtIndex(std::size_t i) const
+ {
+ if(m_type == NodeType::Sequence)
+ return m_seqData[i];
+ return 0;
+ }
+
+ bool Node::GetScalar(std::string& s) const
+ {
+ switch(m_type) {
+ case NodeType::Null:
+ s = "~";
+ return true;
+ case NodeType::Scalar:
+ s = m_scalarData;
+ return true;
+ case NodeType::Sequence:
+ case NodeType::Map:
+ return false;
+ }
+
+ assert(false);
+ return false;
+ }
+
+ Emitter& operator << (Emitter& out, const Node& node)
+ {
+ EmitFromEvents emitFromEvents(out);
+ node.EmitEvents(emitFromEvents);
+ return out;
+ }
+
+ int Node::Compare(const Node& rhs) const
+ {
+ if(m_type != rhs.m_type)
+ return rhs.m_type - m_type;
+
+ switch(m_type) {
+ case NodeType::Null:
+ return 0;
+ case NodeType::Scalar:
+ return m_scalarData.compare(rhs.m_scalarData);
+ case NodeType::Sequence:
+ if(m_seqData.size() < rhs.m_seqData.size())
+ return 1;
+ else if(m_seqData.size() > rhs.m_seqData.size())
+ return -1;
+ for(std::size_t i=0;i<m_seqData.size();i++)
+ if(int cmp = m_seqData[i]->Compare(*rhs.m_seqData[i]))
+ return cmp;
+ return 0;
+ case NodeType::Map:
+ if(m_mapData.size() < rhs.m_mapData.size())
+ return 1;
+ else if(m_mapData.size() > rhs.m_mapData.size())
+ return -1;
+ node_map::const_iterator it = m_mapData.begin();
+ node_map::const_iterator jt = rhs.m_mapData.begin();
+ for(;it!=m_mapData.end() && jt!=rhs.m_mapData.end();it++, jt++) {
+ if(int cmp = it->first->Compare(*jt->first))
+ return cmp;
+ if(int cmp = it->second->Compare(*jt->second))
+ return cmp;
+ }
+ return 0;
+ }
+
+ assert(false);
+ return 0;
+ }
+
+ bool operator < (const Node& n1, const Node& n2)
+ {
+ return n1.Compare(n2) < 0;
+ }
+}
diff --git a/yaml-cpp/src/nodebuilder.cpp b/yaml-cpp/src/nodebuilder.cpp
new file mode 100755
index 00000000..13a70326
--- /dev/null
+++ b/yaml-cpp/src/nodebuilder.cpp
@@ -0,0 +1,145 @@
+#include "nodebuilder.h"
+#include "yaml-cpp/mark.h"
+#include "yaml-cpp/node.h"
+#include <cassert>
+
+namespace YAML
+{
+ NodeBuilder::NodeBuilder(Node& root): m_root(root), m_initializedRoot(false), m_finished(false)
+ {
+ m_root.Clear();
+ m_anchors.push_back(0); // since the anchors start at 1
+ }
+
+ NodeBuilder::~NodeBuilder()
+ {
+ }
+
+ void NodeBuilder::OnDocumentStart(const Mark&)
+ {
+ }
+
+ void NodeBuilder::OnDocumentEnd()
+ {
+ assert(m_finished);
+ }
+
+ void NodeBuilder::OnNull(const Mark& mark, anchor_t anchor)
+ {
+ Node& node = Push(anchor);
+ node.Init(NodeType::Null, mark, "");
+ Pop();
+ }
+
+ void NodeBuilder::OnAlias(const Mark& /*mark*/, anchor_t anchor)
+ {
+ Node& node = *m_anchors[anchor];
+ Insert(node);
+ node.MarkAsAliased();
+ }
+
+ void NodeBuilder::OnScalar(const Mark& mark, const std::string& tag, anchor_t anchor, const std::string& value)
+ {
+ Node& node = Push(anchor);
+ node.Init(NodeType::Scalar, mark, tag);
+ node.SetScalarData(value);
+ Pop();
+ }
+
+ void NodeBuilder::OnSequenceStart(const Mark& mark, const std::string& tag, anchor_t anchor)
+ {
+ Node& node = Push(anchor);
+ node.Init(NodeType::Sequence, mark, tag);
+ }
+
+ void NodeBuilder::OnSequenceEnd()
+ {
+ Pop();
+ }
+
+ void NodeBuilder::OnMapStart(const Mark& mark, const std::string& tag, anchor_t anchor)
+ {
+ Node& node = Push(anchor);
+ node.Init(NodeType::Map, mark, tag);
+ m_didPushKey.push(false);
+ }
+
+ void NodeBuilder::OnMapEnd()
+ {
+ m_didPushKey.pop();
+ Pop();
+ }
+
+ Node& NodeBuilder::Push(anchor_t anchor)
+ {
+ Node& node = Push();
+ RegisterAnchor(anchor, node);
+ return node;
+ }
+
+ Node& NodeBuilder::Push()
+ {
+ if(!m_initializedRoot) {
+ m_initializedRoot = true;
+ return m_root;
+ }
+
+ Node& node = m_root.CreateNode();
+ m_stack.push(&node);
+ return node;
+ }
+
+ Node& NodeBuilder::Top()
+ {
+ return m_stack.empty() ? m_root : *m_stack.top();
+ }
+
+ void NodeBuilder::Pop()
+ {
+ assert(!m_finished);
+ if(m_stack.empty()) {
+ m_finished = true;
+ return;
+ }
+
+ Node& node = *m_stack.top();
+ m_stack.pop();
+ Insert(node);
+ }
+
+ void NodeBuilder::Insert(Node& node)
+ {
+ Node& curTop = Top();
+ switch(curTop.Type()) {
+ case NodeType::Null:
+ case NodeType::Scalar:
+ assert(false);
+ break;
+ case NodeType::Sequence:
+ curTop.Append(node);
+ break;
+ case NodeType::Map:
+ assert(!m_didPushKey.empty());
+ if(m_didPushKey.top()) {
+ assert(!m_pendingKeys.empty());
+
+ Node& key = *m_pendingKeys.top();
+ m_pendingKeys.pop();
+ curTop.Insert(key, node);
+ m_didPushKey.top() = false;
+ } else {
+ m_pendingKeys.push(&node);
+ m_didPushKey.top() = true;
+ }
+ break;
+ }
+ }
+
+ void NodeBuilder::RegisterAnchor(anchor_t anchor, Node& node)
+ {
+ if(anchor) {
+ assert(anchor == m_anchors.size());
+ m_anchors.push_back(&node);
+ }
+ }
+}
diff --git a/yaml-cpp/src/nodebuilder.h b/yaml-cpp/src/nodebuilder.h
new file mode 100755
index 00000000..9c1d16a0
--- /dev/null
+++ b/yaml-cpp/src/nodebuilder.h
@@ -0,0 +1,61 @@
+#ifndef NODEBUILDER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define NODEBUILDER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+#include "yaml-cpp/eventhandler.h"
+#include <map>
+#include <memory>
+#include <stack>
+#include <vector>
+
+namespace YAML
+{
+ class Node;
+
+ class NodeBuilder: public EventHandler
+ {
+ public:
+ explicit NodeBuilder(Node& root);
+ virtual ~NodeBuilder();
+
+ virtual void OnDocumentStart(const Mark& mark);
+ virtual void OnDocumentEnd();
+
+ virtual void OnNull(const Mark& mark, anchor_t anchor);
+ virtual void OnAlias(const Mark& mark, anchor_t anchor);
+ virtual void OnScalar(const Mark& mark, const std::string& tag, anchor_t anchor, const std::string& value);
+
+ virtual void OnSequenceStart(const Mark& mark, const std::string& tag, anchor_t anchor);
+ virtual void OnSequenceEnd();
+
+ virtual void OnMapStart(const Mark& mark, const std::string& tag, anchor_t anchor);
+ virtual void OnMapEnd();
+
+ private:
+ Node& Push(anchor_t anchor);
+ Node& Push();
+ Node& Top();
+ void Pop();
+
+ void Insert(Node& node);
+ void RegisterAnchor(anchor_t anchor, Node& node);
+
+ private:
+ Node& m_root;
+ bool m_initializedRoot;
+ bool m_finished;
+
+ std::stack<Node *> m_stack;
+ std::stack<Node *> m_pendingKeys;
+ std::stack<bool> m_didPushKey;
+
+ typedef std::vector<Node *> Anchors;
+ Anchors m_anchors;
+ };
+}
+
+#endif // NODEBUILDER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
diff --git a/yaml-cpp/src/nodeownership.cpp b/yaml-cpp/src/nodeownership.cpp
new file mode 100755
index 00000000..118edbc8
--- /dev/null
+++ b/yaml-cpp/src/nodeownership.cpp
@@ -0,0 +1,31 @@
+#include "nodeownership.h"
+#include "yaml-cpp/node.h"
+
+namespace YAML
+{
+ NodeOwnership::NodeOwnership(NodeOwnership *pOwner): m_pOwner(pOwner)
+ {
+ if(!m_pOwner)
+ m_pOwner = this;
+ }
+
+ NodeOwnership::~NodeOwnership()
+ {
+ }
+
+ Node& NodeOwnership::_Create()
+ {
+ m_nodes.push_back(std::auto_ptr<Node>(new Node));
+ return m_nodes.back();
+ }
+
+ void NodeOwnership::_MarkAsAliased(const Node& node)
+ {
+ m_aliasedNodes.insert(&node);
+ }
+
+ bool NodeOwnership::_IsAliased(const Node& node) const
+ {
+ return m_aliasedNodes.count(&node) > 0;
+ }
+}
diff --git a/yaml-cpp/src/nodeownership.h b/yaml-cpp/src/nodeownership.h
new file mode 100755
index 00000000..69870814
--- /dev/null
+++ b/yaml-cpp/src/nodeownership.h
@@ -0,0 +1,39 @@
+#ifndef NODE_OWNERSHIP_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define NODE_OWNERSHIP_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include "yaml-cpp/noncopyable.h"
+#include "ptr_vector.h"
+#include <set>
+
+namespace YAML
+{
+ class Node;
+
+ class NodeOwnership: private noncopyable
+ {
+ public:
+ explicit NodeOwnership(NodeOwnership *pOwner = 0);
+ ~NodeOwnership();
+
+ Node& Create() { return m_pOwner->_Create(); }
+ void MarkAsAliased(const Node& node) { m_pOwner->_MarkAsAliased(node); }
+ bool IsAliased(const Node& node) const { return m_pOwner->_IsAliased(node); }
+
+ private:
+ Node& _Create();
+ void _MarkAsAliased(const Node& node);
+ bool _IsAliased(const Node& node) const;
+
+ private:
+ ptr_vector<Node> m_nodes;
+ std::set<const Node *> m_aliasedNodes;
+ NodeOwnership *m_pOwner;
+ };
+}
+
+#endif // NODE_OWNERSHIP_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/src/null.cpp b/yaml-cpp/src/null.cpp
new file mode 100755
index 00000000..08fa9aae
--- /dev/null
+++ b/yaml-cpp/src/null.cpp
@@ -0,0 +1,12 @@
+#include "yaml-cpp/null.h"
+#include "yaml-cpp/node.h"
+
+namespace YAML
+{
+ _Null Null;
+
+ bool IsNull(const Node& node)
+ {
+ return node.Read(Null);
+ }
+}
diff --git a/yaml-cpp/src/ostream.cpp b/yaml-cpp/src/ostream.cpp
new file mode 100755
index 00000000..a7f1e14b
--- /dev/null
+++ b/yaml-cpp/src/ostream.cpp
@@ -0,0 +1,63 @@
+#include "yaml-cpp/ostream.h"
+#include <cstring>
+
+namespace YAML
+{
+ ostream::ostream(): m_buffer(0), m_pos(0), m_size(0), m_row(0), m_col(0)
+ {
+ reserve(1024);
+ }
+
+ ostream::~ostream()
+ {
+ delete [] m_buffer;
+ }
+
+ void ostream::reserve(unsigned size)
+ {
+ if(size <= m_size)
+ return;
+
+ char *newBuffer = new char[size];
+ std::memset(newBuffer, 0, size * sizeof(char));
+ std::memcpy(newBuffer, m_buffer, m_size * sizeof(char));
+ delete [] m_buffer;
+ m_buffer = newBuffer;
+ m_size = size;
+ }
+
+ void ostream::put(char ch)
+ {
+ if(m_pos >= m_size - 1) // an extra space for the NULL terminator
+ reserve(m_size * 2);
+
+ m_buffer[m_pos] = ch;
+ m_pos++;
+
+ if(ch == '\n') {
+ m_row++;
+ m_col = 0;
+ } else
+ m_col++;
+ }
+
+ ostream& operator << (ostream& out, const char *str)
+ {
+ std::size_t length = std::strlen(str);
+ for(std::size_t i=0;i<length;i++)
+ out.put(str[i]);
+ return out;
+ }
+
+ ostream& operator << (ostream& out, const std::string& str)
+ {
+ out << str.c_str();
+ return out;
+ }
+
+ ostream& operator << (ostream& out, char ch)
+ {
+ out.put(ch);
+ return out;
+ }
+}
diff --git a/yaml-cpp/src/parser.cpp b/yaml-cpp/src/parser.cpp
new file mode 100755
index 00000000..b836823f
--- /dev/null
+++ b/yaml-cpp/src/parser.cpp
@@ -0,0 +1,152 @@
+#include "yaml-cpp/parser.h"
+#include "yaml-cpp/eventhandler.h"
+#include "yaml-cpp/exceptions.h"
+#include "yaml-cpp/node.h"
+#include "directives.h"
+#include "nodebuilder.h"
+#include "scanner.h"
+#include "singledocparser.h"
+#include "tag.h"
+#include "token.h"
+#include <sstream>
+#include <cstdio>
+
+namespace YAML
+{
+ Parser::Parser()
+ {
+ }
+
+ Parser::Parser(std::istream& in)
+ {
+ Load(in);
+ }
+
+ Parser::~Parser()
+ {
+ }
+
+ Parser::operator bool() const
+ {
+ return m_pScanner.get() && !m_pScanner->empty();
+ }
+
+ void Parser::Load(std::istream& in)
+ {
+ m_pScanner.reset(new Scanner(in));
+ m_pDirectives.reset(new Directives);
+ }
+
+ // HandleNextDocument
+ // . Handles the next document
+ // . Throws a ParserException on error.
+ // . Returns false if there are no more documents
+ bool Parser::HandleNextDocument(EventHandler& eventHandler)
+ {
+ if(!m_pScanner.get())
+ return false;
+
+ ParseDirectives();
+ if(m_pScanner->empty())
+ return false;
+
+ SingleDocParser sdp(*m_pScanner, *m_pDirectives);
+ sdp.HandleDocument(eventHandler);
+ return true;
+ }
+
+ // GetNextDocument
+ // . Reads the next document in the queue (of tokens).
+ // . Throws a ParserException on error.
+ bool Parser::GetNextDocument(Node& document)
+ {
+ NodeBuilder builder(document);
+ return HandleNextDocument(builder);
+ }
+
+ // ParseDirectives
+ // . Reads any directives that are next in the queue.
+ void Parser::ParseDirectives()
+ {
+ bool readDirective = false;
+
+ while(1) {
+ if(m_pScanner->empty())
+ break;
+
+ Token& token = m_pScanner->peek();
+ if(token.type != Token::DIRECTIVE)
+ break;
+
+ // we keep the directives from the last document if none are specified;
+ // but if any directives are specific, then we reset them
+ if(!readDirective)
+ m_pDirectives.reset(new Directives);
+
+ readDirective = true;
+ HandleDirective(token);
+ m_pScanner->pop();
+ }
+ }
+
+ void Parser::HandleDirective(const Token& token)
+ {
+ if(token.value == "YAML")
+ HandleYamlDirective(token);
+ else if(token.value == "TAG")
+ HandleTagDirective(token);
+ }
+
+ // HandleYamlDirective
+ // . Should be of the form 'major.minor' (like a version number)
+ void Parser::HandleYamlDirective(const Token& token)
+ {
+ if(token.params.size() != 1)
+ throw ParserException(token.mark, ErrorMsg::YAML_DIRECTIVE_ARGS);
+
+ if(!m_pDirectives->version.isDefault)
+ throw ParserException(token.mark, ErrorMsg::REPEATED_YAML_DIRECTIVE);
+
+ std::stringstream str(token.params[0]);
+ str >> m_pDirectives->version.major;
+ str.get();
+ str >> m_pDirectives->version.minor;
+ if(!str || str.peek() != EOF)
+ throw ParserException(token.mark, std::string(ErrorMsg::YAML_VERSION) + token.params[0]);
+
+ if(m_pDirectives->version.major > 1)
+ throw ParserException(token.mark, ErrorMsg::YAML_MAJOR_VERSION);
+
+ m_pDirectives->version.isDefault = false;
+ // TODO: warning on major == 1, minor > 2?
+ }
+
+ // HandleTagDirective
+ // . Should be of the form 'handle prefix', where 'handle' is converted to 'prefix' in the file.
+ void Parser::HandleTagDirective(const Token& token)
+ {
+ if(token.params.size() != 2)
+ throw ParserException(token.mark, ErrorMsg::TAG_DIRECTIVE_ARGS);
+
+ const std::string& handle = token.params[0];
+ const std::string& prefix = token.params[1];
+ if(m_pDirectives->tags.find(handle) != m_pDirectives->tags.end())
+ throw ParserException(token.mark, ErrorMsg::REPEATED_TAG_DIRECTIVE);
+
+ m_pDirectives->tags[handle] = prefix;
+ }
+
+ void Parser::PrintTokens(std::ostream& out)
+ {
+ if(!m_pScanner.get())
+ return;
+
+ while(1) {
+ if(m_pScanner->empty())
+ break;
+
+ out << m_pScanner->peek() << "\n";
+ m_pScanner->pop();
+ }
+ }
+}
diff --git a/yaml-cpp/src/ptr_stack.h b/yaml-cpp/src/ptr_stack.h
new file mode 100755
index 00000000..bf454fb3
--- /dev/null
+++ b/yaml-cpp/src/ptr_stack.h
@@ -0,0 +1,46 @@
+#ifndef PTR_STACK_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define PTR_STACK_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+#include "yaml-cpp/noncopyable.h"
+#include <cstddef>
+#include <cstdlib>
+#include <memory>
+#include <vector>
+
+template <typename T>
+class ptr_stack: private YAML::noncopyable
+{
+public:
+ ptr_stack() {}
+ ~ptr_stack() { clear(); }
+
+ void clear() {
+ for(unsigned i=0;i<m_data.size();i++)
+ delete m_data[i];
+ m_data.clear();
+ }
+
+ std::size_t size() const { return m_data.size(); }
+ bool empty() const { return m_data.empty(); }
+
+ void push(std::auto_ptr<T> t) {
+ m_data.push_back(NULL);
+ m_data.back() = t.release();
+ }
+ std::auto_ptr<T> pop() {
+ std::auto_ptr<T> t(m_data.back());
+ m_data.pop_back();
+ return t;
+ }
+ T& top() { return *m_data.back(); }
+ const T& top() const { return *m_data.back(); }
+
+private:
+ std::vector<T*> m_data;
+};
+
+#endif // PTR_STACK_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/src/ptr_vector.h b/yaml-cpp/src/ptr_vector.h
new file mode 100755
index 00000000..7b936cb5
--- /dev/null
+++ b/yaml-cpp/src/ptr_vector.h
@@ -0,0 +1,47 @@
+#ifndef PTR_VECTOR_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define PTR_VECTOR_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+#include "yaml-cpp/noncopyable.h"
+#include <cstddef>
+#include <cstdlib>
+#include <memory>
+#include <vector>
+
+namespace YAML {
+
+ template <typename T>
+ class ptr_vector: private YAML::noncopyable
+ {
+ public:
+ ptr_vector() {}
+ ~ptr_vector() { clear(); }
+
+ void clear() {
+ for(unsigned i=0;i<m_data.size();i++)
+ delete m_data[i];
+ m_data.clear();
+ }
+
+ std::size_t size() const { return m_data.size(); }
+ bool empty() const { return m_data.empty(); }
+
+ void push_back(std::auto_ptr<T> t) {
+ m_data.push_back(NULL);
+ m_data.back() = t.release();
+ }
+ T& operator[](std::size_t i) { return *m_data[i]; }
+ const T& operator[](std::size_t i) const { return *m_data[i]; }
+
+ T& back() { return *m_data.back(); }
+ const T& back() const { return *m_data.back(); }
+
+ private:
+ std::vector<T*> m_data;
+ };
+}
+
+#endif // PTR_VECTOR_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/src/regex.cpp b/yaml-cpp/src/regex.cpp
new file mode 100755
index 00000000..b35b1f43
--- /dev/null
+++ b/yaml-cpp/src/regex.cpp
@@ -0,0 +1,60 @@
+#include "regex.h"
+
+namespace YAML
+{
+ // constructors
+ RegEx::RegEx(): m_op(REGEX_EMPTY)
+ {
+ }
+
+ RegEx::RegEx(REGEX_OP op): m_op(op)
+ {
+ }
+
+ RegEx::RegEx(char ch): m_op(REGEX_MATCH), m_a(ch)
+ {
+ }
+
+ RegEx::RegEx(char a, char z): m_op(REGEX_RANGE), m_a(a), m_z(z)
+ {
+ }
+
+ RegEx::RegEx(const std::string& str, REGEX_OP op): m_op(op)
+ {
+ for(std::size_t i=0;i<str.size();i++)
+ m_params.push_back(RegEx(str[i]));
+ }
+
+ // combination constructors
+ RegEx operator ! (const RegEx& ex)
+ {
+ RegEx ret(REGEX_NOT);
+ ret.m_params.push_back(ex);
+ return ret;
+ }
+
+ RegEx operator || (const RegEx& ex1, const RegEx& ex2)
+ {
+ RegEx ret(REGEX_OR);
+ ret.m_params.push_back(ex1);
+ ret.m_params.push_back(ex2);
+ return ret;
+ }
+
+ RegEx operator && (const RegEx& ex1, const RegEx& ex2)
+ {
+ RegEx ret(REGEX_AND);
+ ret.m_params.push_back(ex1);
+ ret.m_params.push_back(ex2);
+ return ret;
+ }
+
+ RegEx operator + (const RegEx& ex1, const RegEx& ex2)
+ {
+ RegEx ret(REGEX_SEQ);
+ ret.m_params.push_back(ex1);
+ ret.m_params.push_back(ex2);
+ return ret;
+ }
+}
+
diff --git a/yaml-cpp/src/regex.h b/yaml-cpp/src/regex.h
new file mode 100755
index 00000000..8722e626
--- /dev/null
+++ b/yaml-cpp/src/regex.h
@@ -0,0 +1,67 @@
+#ifndef REGEX_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define REGEX_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include <vector>
+#include <string>
+
+namespace YAML
+{
+ class Stream;
+
+ enum REGEX_OP { REGEX_EMPTY, REGEX_MATCH, REGEX_RANGE, REGEX_OR, REGEX_AND, REGEX_NOT, REGEX_SEQ };
+
+ // simplified regular expressions
+ // . Only straightforward matches (no repeated characters)
+ // . Only matches from start of string
+ class RegEx
+ {
+ public:
+ RegEx();
+ RegEx(char ch);
+ RegEx(char a, char z);
+ RegEx(const std::string& str, REGEX_OP op = REGEX_SEQ);
+ ~RegEx() {}
+
+ friend RegEx operator ! (const RegEx& ex);
+ friend RegEx operator || (const RegEx& ex1, const RegEx& ex2);
+ friend RegEx operator && (const RegEx& ex1, const RegEx& ex2);
+ friend RegEx operator + (const RegEx& ex1, const RegEx& ex2);
+
+ bool Matches(char ch) const;
+ bool Matches(const std::string& str) const;
+ bool Matches(const Stream& in) const;
+ template <typename Source> bool Matches(const Source& source) const;
+
+ int Match(const std::string& str) const;
+ int Match(const Stream& in) const;
+ template <typename Source> int Match(const Source& source) const;
+
+ private:
+ RegEx(REGEX_OP op);
+
+ template <typename Source> bool IsValidSource(const Source& source) const;
+ template <typename Source> int MatchUnchecked(const Source& source) const;
+
+ template <typename Source> int MatchOpEmpty(const Source& source) const;
+ template <typename Source> int MatchOpMatch(const Source& source) const;
+ template <typename Source> int MatchOpRange(const Source& source) const;
+ template <typename Source> int MatchOpOr(const Source& source) const;
+ template <typename Source> int MatchOpAnd(const Source& source) const;
+ template <typename Source> int MatchOpNot(const Source& source) const;
+ template <typename Source> int MatchOpSeq(const Source& source) const;
+
+ private:
+ REGEX_OP m_op;
+ char m_a, m_z;
+ std::vector <RegEx> m_params;
+ };
+}
+
+#include "regeximpl.h"
+
+#endif // REGEX_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/src/regeximpl.h b/yaml-cpp/src/regeximpl.h
new file mode 100755
index 00000000..d5c20d74
--- /dev/null
+++ b/yaml-cpp/src/regeximpl.h
@@ -0,0 +1,186 @@
+#ifndef REGEXIMPL_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define REGEXIMPL_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include "stream.h"
+#include "stringsource.h"
+#include "streamcharsource.h"
+
+namespace YAML
+{
+ // query matches
+ inline bool RegEx::Matches(char ch) const {
+ std::string str;
+ str += ch;
+ return Matches(str);
+ }
+
+ inline bool RegEx::Matches(const std::string& str) const {
+ return Match(str) >= 0;
+ }
+
+ inline bool RegEx::Matches(const Stream& in) const {
+ return Match(in) >= 0;
+ }
+
+ template <typename Source>
+ inline bool RegEx::Matches(const Source& source) const {
+ return Match(source) >= 0;
+ }
+
+ // Match
+ // . Matches the given string against this regular expression.
+ // . Returns the number of characters matched.
+ // . Returns -1 if no characters were matched (the reason for
+ // not returning zero is that we may have an empty regex
+ // which is ALWAYS successful at matching zero characters).
+ // . REMEMBER that we only match from the start of the buffer!
+ inline int RegEx::Match(const std::string& str) const
+ {
+ StringCharSource source(str.c_str(), str.size());
+ return Match(source);
+ }
+
+ inline int RegEx::Match(const Stream& in) const
+ {
+ StreamCharSource source(in);
+ return Match(source);
+ }
+
+ template <typename Source>
+ inline bool RegEx::IsValidSource(const Source& source) const
+ {
+ return source;
+ }
+
+ template<>
+ inline bool RegEx::IsValidSource<StringCharSource>(const StringCharSource&source) const
+ {
+ switch(m_op) {
+ case REGEX_MATCH:
+ case REGEX_RANGE:
+ return source;
+ default:
+ return true;
+ }
+ }
+
+ template <typename Source>
+ inline int RegEx::Match(const Source& source) const
+ {
+ return IsValidSource(source) ? MatchUnchecked(source) : -1;
+ }
+
+ template <typename Source>
+ inline int RegEx::MatchUnchecked(const Source& source) const
+ {
+ switch(m_op) {
+ case REGEX_EMPTY:
+ return MatchOpEmpty(source);
+ case REGEX_MATCH:
+ return MatchOpMatch(source);
+ case REGEX_RANGE:
+ return MatchOpRange(source);
+ case REGEX_OR:
+ return MatchOpOr(source);
+ case REGEX_AND:
+ return MatchOpAnd(source);
+ case REGEX_NOT:
+ return MatchOpNot(source);
+ case REGEX_SEQ:
+ return MatchOpSeq(source);
+ }
+
+ return -1;
+ }
+
+ //////////////////////////////////////////////////////////////////////////////
+ // Operators
+ // Note: the convention MatchOp*<Source> is that we can assume IsSourceValid(source).
+ // So we do all our checks *before* we call these functions
+
+ // EmptyOperator
+ template <typename Source>
+ inline int RegEx::MatchOpEmpty(const Source& source) const {
+ return source[0] == Stream::eof() ? 0 : -1;
+ }
+
+ template <>
+ inline int RegEx::MatchOpEmpty<StringCharSource>(const StringCharSource& source) const {
+ return !source ? 0 : -1; // the empty regex only is successful on the empty string
+ }
+
+ // MatchOperator
+ template <typename Source>
+ inline int RegEx::MatchOpMatch(const Source& source) const {
+ if(source[0] != m_a)
+ return -1;
+ return 1;
+ }
+
+ // RangeOperator
+ template <typename Source>
+ inline int RegEx::MatchOpRange(const Source& source) const {
+ if(m_a > source[0] || m_z < source[0])
+ return -1;
+ return 1;
+ }
+
+ // OrOperator
+ template <typename Source>
+ inline int RegEx::MatchOpOr(const Source& source) const {
+ for(std::size_t i=0;i<m_params.size();i++) {
+ int n = m_params[i].MatchUnchecked(source);
+ if(n >= 0)
+ return n;
+ }
+ return -1;
+ }
+
+ // AndOperator
+ // Note: 'AND' is a little funny, since we may be required to match things
+ // of different lengths. If we find a match, we return the length of
+ // the FIRST entry on the list.
+ template <typename Source>
+ inline int RegEx::MatchOpAnd(const Source& source) const {
+ int first = -1;
+ for(std::size_t i=0;i<m_params.size();i++) {
+ int n = m_params[i].MatchUnchecked(source);
+ if(n == -1)
+ return -1;
+ if(i == 0)
+ first = n;
+ }
+ return first;
+ }
+
+ // NotOperator
+ template <typename Source>
+ inline int RegEx::MatchOpNot(const Source& source) const {
+ if(m_params.empty())
+ return -1;
+ if(m_params[0].MatchUnchecked(source) >= 0)
+ return -1;
+ return 1;
+ }
+
+ // SeqOperator
+ template <typename Source>
+ inline int RegEx::MatchOpSeq(const Source& source) const {
+ int offset = 0;
+ for(std::size_t i=0;i<m_params.size();i++) {
+ int n = m_params[i].Match(source + offset); // note Match, not MatchUnchecked because we need to check validity after the offset
+ if(n == -1)
+ return -1;
+ offset += n;
+ }
+
+ return offset;
+ }
+}
+
+#endif // REGEXIMPL_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/src/scanner.cpp b/yaml-cpp/src/scanner.cpp
new file mode 100755
index 00000000..199ef25a
--- /dev/null
+++ b/yaml-cpp/src/scanner.cpp
@@ -0,0 +1,387 @@
+#include "scanner.h"
+#include "token.h"
+#include "yaml-cpp/exceptions.h"
+#include "exp.h"
+#include <cassert>
+#include <memory>
+
+namespace YAML
+{
+ Scanner::Scanner(std::istream& in)
+ : INPUT(in), m_startedStream(false), m_endedStream(false), m_simpleKeyAllowed(false), m_canBeJSONFlow(false)
+ {
+ }
+
+ Scanner::~Scanner()
+ {
+ }
+
+ // empty
+ // . Returns true if there are no more tokens to be read
+ bool Scanner::empty()
+ {
+ EnsureTokensInQueue();
+ return m_tokens.empty();
+ }
+
+ // pop
+ // . Simply removes the next token on the queue.
+ void Scanner::pop()
+ {
+ EnsureTokensInQueue();
+ if(!m_tokens.empty())
+ m_tokens.pop();
+ }
+
+ // peek
+ // . Returns (but does not remove) the next token on the queue.
+ Token& Scanner::peek()
+ {
+ EnsureTokensInQueue();
+ assert(!m_tokens.empty()); // should we be asserting here? I mean, we really just be checking
+ // if it's empty before peeking.
+
+#if 0
+ static Token *pLast = 0;
+ if(pLast != &m_tokens.front())
+ std::cerr << "peek: " << m_tokens.front() << "\n";
+ pLast = &m_tokens.front();
+#endif
+
+ return m_tokens.front();
+ }
+
+ // EnsureTokensInQueue
+ // . Scan until there's a valid token at the front of the queue,
+ // or we're sure the queue is empty.
+ void Scanner::EnsureTokensInQueue()
+ {
+ while(1) {
+ if(!m_tokens.empty()) {
+ Token& token = m_tokens.front();
+
+ // if this guy's valid, then we're done
+ if(token.status == Token::VALID)
+ return;
+
+ // here's where we clean up the impossible tokens
+ if(token.status == Token::INVALID) {
+ m_tokens.pop();
+ continue;
+ }
+
+ // note: what's left are the unverified tokens
+ }
+
+ // no token? maybe we've actually finished
+ if(m_endedStream)
+ return;
+
+ // no? then scan...
+ ScanNextToken();
+ }
+ }
+
+ // ScanNextToken
+ // . The main scanning function; here we branch out and
+ // scan whatever the next token should be.
+ void Scanner::ScanNextToken()
+ {
+ if(m_endedStream)
+ return;
+
+ if(!m_startedStream)
+ return StartStream();
+
+ // get rid of whitespace, etc. (in between tokens it should be irrelevent)
+ ScanToNextToken();
+
+ // maybe need to end some blocks
+ PopIndentToHere();
+
+ // *****
+ // And now branch based on the next few characters!
+ // *****
+
+ // end of stream
+ if(!INPUT)
+ return EndStream();
+
+ if(INPUT.column() == 0 && INPUT.peek() == Keys::Directive)
+ return ScanDirective();
+
+ // document token
+ if(INPUT.column() == 0 && Exp::DocStart().Matches(INPUT))
+ return ScanDocStart();
+
+ if(INPUT.column() == 0 && Exp::DocEnd().Matches(INPUT))
+ return ScanDocEnd();
+
+ // flow start/end/entry
+ if(INPUT.peek() == Keys::FlowSeqStart || INPUT.peek() == Keys::FlowMapStart)
+ return ScanFlowStart();
+
+ if(INPUT.peek() == Keys::FlowSeqEnd || INPUT.peek() == Keys::FlowMapEnd)
+ return ScanFlowEnd();
+
+ if(INPUT.peek() == Keys::FlowEntry)
+ return ScanFlowEntry();
+
+ // block/map stuff
+ if(Exp::BlockEntry().Matches(INPUT))
+ return ScanBlockEntry();
+
+ if((InBlockContext() ? Exp::Key() : Exp::KeyInFlow()).Matches(INPUT))
+ return ScanKey();
+
+ if(GetValueRegex().Matches(INPUT))
+ return ScanValue();
+
+ // alias/anchor
+ if(INPUT.peek() == Keys::Alias || INPUT.peek() == Keys::Anchor)
+ return ScanAnchorOrAlias();
+
+ // tag
+ if(INPUT.peek() == Keys::Tag)
+ return ScanTag();
+
+ // special scalars
+ if(InBlockContext() && (INPUT.peek() == Keys::LiteralScalar || INPUT.peek() == Keys::FoldedScalar))
+ return ScanBlockScalar();
+
+ if(INPUT.peek() == '\'' || INPUT.peek() == '\"')
+ return ScanQuotedScalar();
+
+ // plain scalars
+ if((InBlockContext() ? Exp::PlainScalar() : Exp::PlainScalarInFlow()).Matches(INPUT))
+ return ScanPlainScalar();
+
+ // don't know what it is!
+ throw ParserException(INPUT.mark(), ErrorMsg::UNKNOWN_TOKEN);
+ }
+
+ // ScanToNextToken
+ // . Eats input until we reach the next token-like thing.
+ void Scanner::ScanToNextToken()
+ {
+ while(1) {
+ // first eat whitespace
+ while(INPUT && IsWhitespaceToBeEaten(INPUT.peek())) {
+ if(InBlockContext() && Exp::Tab().Matches(INPUT))
+ m_simpleKeyAllowed = false;
+ INPUT.eat(1);
+ }
+
+ // then eat a comment
+ if(Exp::Comment().Matches(INPUT)) {
+ // eat until line break
+ while(INPUT && !Exp::Break().Matches(INPUT))
+ INPUT.eat(1);
+ }
+
+ // if it's NOT a line break, then we're done!
+ if(!Exp::Break().Matches(INPUT))
+ break;
+
+ // otherwise, let's eat the line break and keep going
+ int n = Exp::Break().Match(INPUT);
+ INPUT.eat(n);
+
+ // oh yeah, and let's get rid of that simple key
+ InvalidateSimpleKey();
+
+ // new line - we may be able to accept a simple key now
+ if(InBlockContext())
+ m_simpleKeyAllowed = true;
+ }
+ }
+
+ ///////////////////////////////////////////////////////////////////////
+ // Misc. helpers
+
+ // IsWhitespaceToBeEaten
+ // . We can eat whitespace if it's a space or tab
+ // . Note: originally tabs in block context couldn't be eaten
+ // "where a simple key could be allowed
+ // (i.e., not at the beginning of a line, or following '-', '?', or ':')"
+ // I think this is wrong, since tabs can be non-content whitespace; it's just
+ // that they can't contribute to indentation, so once you've seen a tab in a
+ // line, you can't start a simple key
+ bool Scanner::IsWhitespaceToBeEaten(char ch)
+ {
+ if(ch == ' ')
+ return true;
+
+ if(ch == '\t')
+ return true;
+
+ return false;
+ }
+
+ // GetValueRegex
+ // . Get the appropriate regex to check if it's a value token
+ const RegEx& Scanner::GetValueRegex() const
+ {
+ if(InBlockContext())
+ return Exp::Value();
+
+ return m_canBeJSONFlow ? Exp::ValueInJSONFlow() : Exp::ValueInFlow();
+ }
+
+ // StartStream
+ // . Set the initial conditions for starting a stream.
+ void Scanner::StartStream()
+ {
+ m_startedStream = true;
+ m_simpleKeyAllowed = true;
+ std::auto_ptr<IndentMarker> pIndent(new IndentMarker(-1, IndentMarker::NONE));
+ m_indentRefs.push_back(pIndent);
+ m_indents.push(&m_indentRefs.back());
+ }
+
+ // EndStream
+ // . Close out the stream, finish up, etc.
+ void Scanner::EndStream()
+ {
+ // force newline
+ if(INPUT.column() > 0)
+ INPUT.ResetColumn();
+
+ PopAllIndents();
+ PopAllSimpleKeys();
+
+ m_simpleKeyAllowed = false;
+ m_endedStream = true;
+ }
+
+ Token *Scanner::PushToken(Token::TYPE type)
+ {
+ m_tokens.push(Token(type, INPUT.mark()));
+ return &m_tokens.back();
+ }
+
+ Token::TYPE Scanner::GetStartTokenFor(IndentMarker::INDENT_TYPE type) const
+ {
+ switch(type) {
+ case IndentMarker::SEQ: return Token::BLOCK_SEQ_START;
+ case IndentMarker::MAP: return Token::BLOCK_MAP_START;
+ case IndentMarker::NONE: assert(false); break;
+ }
+ assert(false);
+ throw std::runtime_error("yaml-cpp: internal error, invalid indent type");
+ }
+
+ // PushIndentTo
+ // . Pushes an indentation onto the stack, and enqueues the
+ // proper token (sequence start or mapping start).
+ // . Returns the indent marker it generates (if any).
+ Scanner::IndentMarker *Scanner::PushIndentTo(int column, IndentMarker::INDENT_TYPE type)
+ {
+ // are we in flow?
+ if(InFlowContext())
+ return 0;
+
+ std::auto_ptr<IndentMarker> pIndent(new IndentMarker(column, type));
+ IndentMarker& indent = *pIndent;
+ const IndentMarker& lastIndent = *m_indents.top();
+
+ // is this actually an indentation?
+ if(indent.column < lastIndent.column)
+ return 0;
+ if(indent.column == lastIndent.column && !(indent.type == IndentMarker::SEQ && lastIndent.type == IndentMarker::MAP))
+ return 0;
+
+ // push a start token
+ indent.pStartToken = PushToken(GetStartTokenFor(type));
+
+ // and then the indent
+ m_indents.push(&indent);
+ m_indentRefs.push_back(pIndent);
+ return &m_indentRefs.back();
+ }
+
+ // PopIndentToHere
+ // . Pops indentations off the stack until we reach the current indentation level,
+ // and enqueues the proper token each time.
+ // . Then pops all invalid indentations off.
+ void Scanner::PopIndentToHere()
+ {
+ // are we in flow?
+ if(InFlowContext())
+ return;
+
+ // now pop away
+ while(!m_indents.empty()) {
+ const IndentMarker& indent = *m_indents.top();
+ if(indent.column < INPUT.column())
+ break;
+ if(indent.column == INPUT.column() && !(indent.type == IndentMarker::SEQ && !Exp::BlockEntry().Matches(INPUT)))
+ break;
+
+ PopIndent();
+ }
+
+ while(!m_indents.empty() && m_indents.top()->status == IndentMarker::INVALID)
+ PopIndent();
+ }
+
+ // PopAllIndents
+ // . Pops all indentations (except for the base empty one) off the stack,
+ // and enqueues the proper token each time.
+ void Scanner::PopAllIndents()
+ {
+ // are we in flow?
+ if(InFlowContext())
+ return;
+
+ // now pop away
+ while(!m_indents.empty()) {
+ const IndentMarker& indent = *m_indents.top();
+ if(indent.type == IndentMarker::NONE)
+ break;
+
+ PopIndent();
+ }
+ }
+
+ // PopIndent
+ // . Pops a single indent, pushing the proper token
+ void Scanner::PopIndent()
+ {
+ const IndentMarker& indent = *m_indents.top();
+ m_indents.pop();
+
+ if(indent.status != IndentMarker::VALID) {
+ InvalidateSimpleKey();
+ return;
+ }
+
+ if(indent.type == IndentMarker::SEQ)
+ m_tokens.push(Token(Token::BLOCK_SEQ_END, INPUT.mark()));
+ else if(indent.type == IndentMarker::MAP)
+ m_tokens.push(Token(Token::BLOCK_MAP_END, INPUT.mark()));
+ }
+
+ // GetTopIndent
+ int Scanner::GetTopIndent() const
+ {
+ if(m_indents.empty())
+ return 0;
+ return m_indents.top()->column;
+ }
+
+ // ThrowParserException
+ // . Throws a ParserException with the current token location
+ // (if available).
+ // . Does not parse any more tokens.
+ void Scanner::ThrowParserException(const std::string& msg) const
+ {
+ Mark mark = Mark::null();
+ if(!m_tokens.empty()) {
+ const Token& token = m_tokens.front();
+ mark = token.mark;
+ }
+ throw ParserException(mark, msg);
+ }
+}
+
diff --git a/yaml-cpp/src/scanner.h b/yaml-cpp/src/scanner.h
new file mode 100755
index 00000000..bc8dcbe5
--- /dev/null
+++ b/yaml-cpp/src/scanner.h
@@ -0,0 +1,132 @@
+#ifndef SCANNER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define SCANNER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include <ios>
+#include <string>
+#include <queue>
+#include <stack>
+#include <set>
+#include <map>
+#include "ptr_vector.h"
+#include "stream.h"
+#include "token.h"
+
+namespace YAML
+{
+ class Node;
+ class RegEx;
+
+ class Scanner
+ {
+ public:
+ Scanner(std::istream& in);
+ ~Scanner();
+
+ // token queue management (hopefully this looks kinda stl-ish)
+ bool empty();
+ void pop();
+ Token& peek();
+
+ private:
+ struct IndentMarker {
+ enum INDENT_TYPE { MAP, SEQ, NONE };
+ enum STATUS { VALID, INVALID, UNKNOWN };
+ IndentMarker(int column_, INDENT_TYPE type_): column(column_), type(type_), status(VALID), pStartToken(0) {}
+
+ int column;
+ INDENT_TYPE type;
+ STATUS status;
+ Token *pStartToken;
+ };
+
+ enum FLOW_MARKER { FLOW_MAP, FLOW_SEQ };
+
+ private:
+ // scanning
+ void EnsureTokensInQueue();
+ void ScanNextToken();
+ void ScanToNextToken();
+ void StartStream();
+ void EndStream();
+ Token *PushToken(Token::TYPE type);
+
+ bool InFlowContext() const { return !m_flows.empty(); }
+ bool InBlockContext() const { return m_flows.empty(); }
+ int GetFlowLevel() const { return m_flows.size(); }
+
+ Token::TYPE GetStartTokenFor(IndentMarker::INDENT_TYPE type) const;
+ IndentMarker *PushIndentTo(int column, IndentMarker::INDENT_TYPE type);
+ void PopIndentToHere();
+ void PopAllIndents();
+ void PopIndent();
+ int GetTopIndent() const;
+
+ // checking input
+ bool CanInsertPotentialSimpleKey() const;
+ bool ExistsActiveSimpleKey() const;
+ void InsertPotentialSimpleKey();
+ void InvalidateSimpleKey();
+ bool VerifySimpleKey();
+ void PopAllSimpleKeys();
+
+ void ThrowParserException(const std::string& msg) const;
+
+ bool IsWhitespaceToBeEaten(char ch);
+ const RegEx& GetValueRegex() const;
+
+ struct SimpleKey {
+ SimpleKey(const Mark& mark_, int flowLevel_);
+
+ void Validate();
+ void Invalidate();
+
+ Mark mark;
+ int flowLevel;
+ IndentMarker *pIndent;
+ Token *pMapStart, *pKey;
+ };
+
+ // and the tokens
+ void ScanDirective();
+ void ScanDocStart();
+ void ScanDocEnd();
+ void ScanBlockSeqStart();
+ void ScanBlockMapSTart();
+ void ScanBlockEnd();
+ void ScanBlockEntry();
+ void ScanFlowStart();
+ void ScanFlowEnd();
+ void ScanFlowEntry();
+ void ScanKey();
+ void ScanValue();
+ void ScanAnchorOrAlias();
+ void ScanTag();
+ void ScanPlainScalar();
+ void ScanQuotedScalar();
+ void ScanBlockScalar();
+
+ private:
+ // the stream
+ Stream INPUT;
+
+ // the output (tokens)
+ std::queue<Token> m_tokens;
+
+ // state info
+ bool m_startedStream, m_endedStream;
+ bool m_simpleKeyAllowed;
+ bool m_canBeJSONFlow;
+ std::stack<SimpleKey> m_simpleKeys;
+ std::stack<IndentMarker *> m_indents;
+ ptr_vector<IndentMarker> m_indentRefs; // for "garbage collection"
+ std::stack<FLOW_MARKER> m_flows;
+ };
+}
+
+#endif // SCANNER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
diff --git a/yaml-cpp/src/scanscalar.cpp b/yaml-cpp/src/scanscalar.cpp
new file mode 100755
index 00000000..064c0867
--- /dev/null
+++ b/yaml-cpp/src/scanscalar.cpp
@@ -0,0 +1,214 @@
+#include "scanscalar.h"
+#include "scanner.h"
+#include "exp.h"
+#include "yaml-cpp/exceptions.h"
+#include "token.h"
+
+namespace YAML
+{
+ // ScanScalar
+ // . This is where the scalar magic happens.
+ //
+ // . We do the scanning in three phases:
+ // 1. Scan until newline
+ // 2. Eat newline
+ // 3. Scan leading blanks.
+ //
+ // . Depending on the parameters given, we store or stop
+ // and different places in the above flow.
+ std::string ScanScalar(Stream& INPUT, ScanScalarParams& params)
+ {
+ bool foundNonEmptyLine = false;
+ bool pastOpeningBreak = (params.fold == FOLD_FLOW);
+ bool emptyLine = false, moreIndented = false;
+ int foldedNewlineCount = 0;
+ bool foldedNewlineStartedMoreIndented = false;
+ std::size_t lastEscapedChar = std::string::npos;
+ std::string scalar;
+ params.leadingSpaces = false;
+
+ while(INPUT) {
+ // ********************************
+ // Phase #1: scan until line ending
+
+ std::size_t lastNonWhitespaceChar = scalar.size();
+ bool escapedNewline = false;
+ while(!params.end.Matches(INPUT) && !Exp::Break().Matches(INPUT)) {
+ if(!INPUT)
+ break;
+
+ // document indicator?
+ if(INPUT.column() == 0 && Exp::DocIndicator().Matches(INPUT)) {
+ if(params.onDocIndicator == BREAK)
+ break;
+ else if(params.onDocIndicator == THROW)
+ throw ParserException(INPUT.mark(), ErrorMsg::DOC_IN_SCALAR);
+ }
+
+ foundNonEmptyLine = true;
+ pastOpeningBreak = true;
+
+ // escaped newline? (only if we're escaping on slash)
+ if(params.escape == '\\' && Exp::EscBreak().Matches(INPUT)) {
+ // eat escape character and get out (but preserve trailing whitespace!)
+ INPUT.get();
+ lastNonWhitespaceChar = scalar.size();
+ lastEscapedChar = scalar.size();
+ escapedNewline = true;
+ break;
+ }
+
+ // escape this?
+ if(INPUT.peek() == params.escape) {
+ scalar += Exp::Escape(INPUT);
+ lastNonWhitespaceChar = scalar.size();
+ lastEscapedChar = scalar.size();
+ continue;
+ }
+
+ // otherwise, just add the damn character
+ char ch = INPUT.get();
+ scalar += ch;
+ if(ch != ' ' && ch != '\t')
+ lastNonWhitespaceChar = scalar.size();
+ }
+
+ // eof? if we're looking to eat something, then we throw
+ if(!INPUT) {
+ if(params.eatEnd)
+ throw ParserException(INPUT.mark(), ErrorMsg::EOF_IN_SCALAR);
+ break;
+ }
+
+ // doc indicator?
+ if(params.onDocIndicator == BREAK && INPUT.column() == 0 && Exp::DocIndicator().Matches(INPUT))
+ break;
+
+ // are we done via character match?
+ int n = params.end.Match(INPUT);
+ if(n >= 0) {
+ if(params.eatEnd)
+ INPUT.eat(n);
+ break;
+ }
+
+ // do we remove trailing whitespace?
+ if(params.fold == FOLD_FLOW)
+ scalar.erase(lastNonWhitespaceChar);
+
+ // ********************************
+ // Phase #2: eat line ending
+ n = Exp::Break().Match(INPUT);
+ INPUT.eat(n);
+
+ // ********************************
+ // Phase #3: scan initial spaces
+
+ // first the required indentation
+ while(INPUT.peek() == ' ' && (INPUT.column() < params.indent || (params.detectIndent && !foundNonEmptyLine)))
+ INPUT.eat(1);
+
+ // update indent if we're auto-detecting
+ if(params.detectIndent && !foundNonEmptyLine)
+ params.indent = std::max(params.indent, INPUT.column());
+
+ // and then the rest of the whitespace
+ while(Exp::Blank().Matches(INPUT)) {
+ // we check for tabs that masquerade as indentation
+ if(INPUT.peek() == '\t'&& INPUT.column() < params.indent && params.onTabInIndentation == THROW)
+ throw ParserException(INPUT.mark(), ErrorMsg::TAB_IN_INDENTATION);
+
+ if(!params.eatLeadingWhitespace)
+ break;
+
+ INPUT.eat(1);
+ }
+
+ // was this an empty line?
+ bool nextEmptyLine = Exp::Break().Matches(INPUT);
+ bool nextMoreIndented = Exp::Blank().Matches(INPUT);
+ if(params.fold == FOLD_BLOCK && foldedNewlineCount == 0 && nextEmptyLine)
+ foldedNewlineStartedMoreIndented = moreIndented;
+
+ // for block scalars, we always start with a newline, so we should ignore it (not fold or keep)
+ if(pastOpeningBreak) {
+ switch(params.fold) {
+ case DONT_FOLD:
+ scalar += "\n";
+ break;
+ case FOLD_BLOCK:
+ if(!emptyLine && !nextEmptyLine && !moreIndented && !nextMoreIndented && INPUT.column() >= params.indent)
+ scalar += " ";
+ else if(nextEmptyLine)
+ foldedNewlineCount++;
+ else
+ scalar += "\n";
+
+ if(!nextEmptyLine && foldedNewlineCount > 0) {
+ scalar += std::string(foldedNewlineCount - 1, '\n');
+ if(foldedNewlineStartedMoreIndented || nextMoreIndented | !foundNonEmptyLine)
+ scalar += "\n";
+ foldedNewlineCount = 0;
+ }
+ break;
+ case FOLD_FLOW:
+ if(nextEmptyLine)
+ scalar += "\n";
+ else if(!emptyLine && !nextEmptyLine && !escapedNewline)
+ scalar += " ";
+ break;
+ }
+ }
+
+ emptyLine = nextEmptyLine;
+ moreIndented = nextMoreIndented;
+ pastOpeningBreak = true;
+
+ // are we done via indentation?
+ if(!emptyLine && INPUT.column() < params.indent) {
+ params.leadingSpaces = true;
+ break;
+ }
+ }
+
+ // post-processing
+ if(params.trimTrailingSpaces) {
+ std::size_t pos = scalar.find_last_not_of(' ');
+ if(lastEscapedChar != std::string::npos) {
+ if(pos < lastEscapedChar || pos == std::string::npos)
+ pos = lastEscapedChar;
+ }
+ if(pos < scalar.size())
+ scalar.erase(pos + 1);
+ }
+
+ switch(params.chomp) {
+ case CLIP: {
+ std::size_t pos = scalar.find_last_not_of('\n');
+ if(lastEscapedChar != std::string::npos) {
+ if(pos < lastEscapedChar || pos == std::string::npos)
+ pos = lastEscapedChar;
+ }
+ if(pos == std::string::npos)
+ scalar.erase();
+ else if(pos + 1 < scalar.size())
+ scalar.erase(pos + 2);
+ } break;
+ case STRIP: {
+ std::size_t pos = scalar.find_last_not_of('\n');
+ if(lastEscapedChar != std::string::npos) {
+ if(pos < lastEscapedChar || pos == std::string::npos)
+ pos = lastEscapedChar;
+ }
+ if(pos == std::string::npos)
+ scalar.erase();
+ else if(pos < scalar.size())
+ scalar.erase(pos + 1);
+ } break;
+ default:
+ break;
+ }
+
+ return scalar;
+ }
+}
diff --git a/yaml-cpp/src/scanscalar.h b/yaml-cpp/src/scanscalar.h
new file mode 100755
index 00000000..c198cb18
--- /dev/null
+++ b/yaml-cpp/src/scanscalar.h
@@ -0,0 +1,45 @@
+#ifndef SCANSCALAR_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define SCANSCALAR_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include <string>
+#include "regex.h"
+#include "stream.h"
+
+namespace YAML
+{
+ enum CHOMP { STRIP = -1, CLIP, KEEP };
+ enum ACTION { NONE, BREAK, THROW };
+ enum FOLD { DONT_FOLD, FOLD_BLOCK, FOLD_FLOW };
+
+ struct ScanScalarParams {
+ ScanScalarParams(): eatEnd(false), indent(0), detectIndent(false), eatLeadingWhitespace(0), escape(0), fold(DONT_FOLD),
+ trimTrailingSpaces(0), chomp(CLIP), onDocIndicator(NONE), onTabInIndentation(NONE), leadingSpaces(false) {}
+
+ // input:
+ RegEx end; // what condition ends this scalar?
+ bool eatEnd; // should we eat that condition when we see it?
+ int indent; // what level of indentation should be eaten and ignored?
+ bool detectIndent; // should we try to autodetect the indent?
+ bool eatLeadingWhitespace; // should we continue eating this delicious indentation after 'indent' spaces?
+ char escape; // what character do we escape on (i.e., slash or single quote) (0 for none)
+ FOLD fold; // how do we fold line ends?
+ bool trimTrailingSpaces; // do we remove all trailing spaces (at the very end)
+ CHOMP chomp; // do we strip, clip, or keep trailing newlines (at the very end)
+ // Note: strip means kill all, clip means keep at most one, keep means keep all
+ ACTION onDocIndicator; // what do we do if we see a document indicator?
+ ACTION onTabInIndentation; // what do we do if we see a tab where we should be seeing indentation spaces
+
+ // output:
+ bool leadingSpaces;
+ };
+
+ std::string ScanScalar(Stream& INPUT, ScanScalarParams& info);
+}
+
+#endif // SCANSCALAR_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
diff --git a/yaml-cpp/src/scantag.cpp b/yaml-cpp/src/scantag.cpp
new file mode 100755
index 00000000..b71cbcc4
--- /dev/null
+++ b/yaml-cpp/src/scantag.cpp
@@ -0,0 +1,84 @@
+#include "scanner.h"
+#include "regex.h"
+#include "exp.h"
+#include "yaml-cpp/exceptions.h"
+
+namespace YAML
+{
+ const std::string ScanVerbatimTag(Stream& INPUT)
+ {
+ std::string tag;
+
+ // eat the start character
+ INPUT.get();
+
+ while(INPUT) {
+ if(INPUT.peek() == Keys::VerbatimTagEnd) {
+ // eat the end character
+ INPUT.get();
+ return tag;
+ }
+
+ int n = Exp::URI().Match(INPUT);
+ if(n <= 0)
+ break;
+
+ tag += INPUT.get(n);
+ }
+
+ throw ParserException(INPUT.mark(), ErrorMsg::END_OF_VERBATIM_TAG);
+ }
+
+ const std::string ScanTagHandle(Stream& INPUT, bool& canBeHandle)
+ {
+ std::string tag;
+ canBeHandle = true;
+ Mark firstNonWordChar;
+
+ while(INPUT) {
+ if(INPUT.peek() == Keys::Tag) {
+ if(!canBeHandle)
+ throw ParserException(firstNonWordChar, ErrorMsg::CHAR_IN_TAG_HANDLE);
+ break;
+ }
+
+ int n = 0;
+ if(canBeHandle) {
+ n = Exp::Word().Match(INPUT);
+ if(n <= 0) {
+ canBeHandle = false;
+ firstNonWordChar = INPUT.mark();
+ }
+ }
+
+ if(!canBeHandle)
+ n = Exp::Tag().Match(INPUT);
+
+ if(n <= 0)
+ break;
+
+ tag += INPUT.get(n);
+ }
+
+ return tag;
+ }
+
+ const std::string ScanTagSuffix(Stream& INPUT)
+ {
+ std::string tag;
+
+ while(INPUT) {
+ int n = Exp::Tag().Match(INPUT);
+ if(n <= 0)
+ break;
+
+ tag += INPUT.get(n);
+ }
+
+ if(tag.empty())
+ throw ParserException(INPUT.mark(), ErrorMsg::TAG_WITH_NO_SUFFIX);
+
+ return tag;
+ }
+}
+
diff --git a/yaml-cpp/src/scantag.h b/yaml-cpp/src/scantag.h
new file mode 100755
index 00000000..38437c03
--- /dev/null
+++ b/yaml-cpp/src/scantag.h
@@ -0,0 +1,20 @@
+#ifndef SCANTAG_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define SCANTAG_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include <string>
+#include "stream.h"
+
+namespace YAML
+{
+ const std::string ScanVerbatimTag(Stream& INPUT);
+ const std::string ScanTagHandle(Stream& INPUT, bool& canBeHandle);
+ const std::string ScanTagSuffix(Stream& INPUT);
+}
+
+#endif // SCANTAG_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
diff --git a/yaml-cpp/src/scantoken.cpp b/yaml-cpp/src/scantoken.cpp
new file mode 100755
index 00000000..06d9cd62
--- /dev/null
+++ b/yaml-cpp/src/scantoken.cpp
@@ -0,0 +1,439 @@
+#include "scanner.h"
+#include "token.h"
+#include "yaml-cpp/exceptions.h"
+#include "exp.h"
+#include "scanscalar.h"
+#include "scantag.h"
+#include "tag.h"
+#include <sstream>
+
+namespace YAML
+{
+ ///////////////////////////////////////////////////////////////////////
+ // Specialization for scanning specific tokens
+
+ // Directive
+ // . Note: no semantic checking is done here (that's for the parser to do)
+ void Scanner::ScanDirective()
+ {
+ std::string name;
+ std::vector <std::string> params;
+
+ // pop indents and simple keys
+ PopAllIndents();
+ PopAllSimpleKeys();
+
+ m_simpleKeyAllowed = false;
+ m_canBeJSONFlow = false;
+
+ // store pos and eat indicator
+ Token token(Token::DIRECTIVE, INPUT.mark());
+ INPUT.eat(1);
+
+ // read name
+ while(INPUT && !Exp::BlankOrBreak().Matches(INPUT))
+ token.value += INPUT.get();
+
+ // read parameters
+ while(1) {
+ // first get rid of whitespace
+ while(Exp::Blank().Matches(INPUT))
+ INPUT.eat(1);
+
+ // break on newline or comment
+ if(!INPUT || Exp::Break().Matches(INPUT) || Exp::Comment().Matches(INPUT))
+ break;
+
+ // now read parameter
+ std::string param;
+ while(INPUT && !Exp::BlankOrBreak().Matches(INPUT))
+ param += INPUT.get();
+
+ token.params.push_back(param);
+ }
+
+ m_tokens.push(token);
+ }
+
+ // DocStart
+ void Scanner::ScanDocStart()
+ {
+ PopAllIndents();
+ PopAllSimpleKeys();
+ m_simpleKeyAllowed = false;
+ m_canBeJSONFlow = false;
+
+ // eat
+ Mark mark = INPUT.mark();
+ INPUT.eat(3);
+ m_tokens.push(Token(Token::DOC_START, mark));
+ }
+
+ // DocEnd
+ void Scanner::ScanDocEnd()
+ {
+ PopAllIndents();
+ PopAllSimpleKeys();
+ m_simpleKeyAllowed = false;
+ m_canBeJSONFlow = false;
+
+ // eat
+ Mark mark = INPUT.mark();
+ INPUT.eat(3);
+ m_tokens.push(Token(Token::DOC_END, mark));
+ }
+
+ // FlowStart
+ void Scanner::ScanFlowStart()
+ {
+ // flows can be simple keys
+ InsertPotentialSimpleKey();
+ m_simpleKeyAllowed = true;
+ m_canBeJSONFlow = false;
+
+ // eat
+ Mark mark = INPUT.mark();
+ char ch = INPUT.get();
+ FLOW_MARKER flowType = (ch == Keys::FlowSeqStart ? FLOW_SEQ : FLOW_MAP);
+ m_flows.push(flowType);
+ Token::TYPE type = (flowType == FLOW_SEQ ? Token::FLOW_SEQ_START : Token::FLOW_MAP_START);
+ m_tokens.push(Token(type, mark));
+ }
+
+ // FlowEnd
+ void Scanner::ScanFlowEnd()
+ {
+ if(InBlockContext())
+ throw ParserException(INPUT.mark(), ErrorMsg::FLOW_END);
+
+ // we might have a solo entry in the flow context
+ if(InFlowContext()) {
+ if(m_flows.top() == FLOW_MAP && VerifySimpleKey())
+ m_tokens.push(Token(Token::VALUE, INPUT.mark()));
+ else if(m_flows.top() == FLOW_SEQ)
+ InvalidateSimpleKey();
+ }
+
+ m_simpleKeyAllowed = false;
+ m_canBeJSONFlow = true;
+
+ // eat
+ Mark mark = INPUT.mark();
+ char ch = INPUT.get();
+
+ // check that it matches the start
+ FLOW_MARKER flowType = (ch == Keys::FlowSeqEnd ? FLOW_SEQ : FLOW_MAP);
+ if(m_flows.top() != flowType)
+ throw ParserException(mark, ErrorMsg::FLOW_END);
+ m_flows.pop();
+
+ Token::TYPE type = (flowType ? Token::FLOW_SEQ_END : Token::FLOW_MAP_END);
+ m_tokens.push(Token(type, mark));
+ }
+
+ // FlowEntry
+ void Scanner::ScanFlowEntry()
+ {
+ // we might have a solo entry in the flow context
+ if(InFlowContext()) {
+ if(m_flows.top() == FLOW_MAP && VerifySimpleKey())
+ m_tokens.push(Token(Token::VALUE, INPUT.mark()));
+ else if(m_flows.top() == FLOW_SEQ)
+ InvalidateSimpleKey();
+ }
+
+ m_simpleKeyAllowed = true;
+ m_canBeJSONFlow = false;
+
+ // eat
+ Mark mark = INPUT.mark();
+ INPUT.eat(1);
+ m_tokens.push(Token(Token::FLOW_ENTRY, mark));
+ }
+
+ // BlockEntry
+ void Scanner::ScanBlockEntry()
+ {
+ // we better be in the block context!
+ if(InFlowContext())
+ throw ParserException(INPUT.mark(), ErrorMsg::BLOCK_ENTRY);
+
+ // can we put it here?
+ if(!m_simpleKeyAllowed)
+ throw ParserException(INPUT.mark(), ErrorMsg::BLOCK_ENTRY);
+
+ PushIndentTo(INPUT.column(), IndentMarker::SEQ);
+ m_simpleKeyAllowed = true;
+ m_canBeJSONFlow = false;
+
+ // eat
+ Mark mark = INPUT.mark();
+ INPUT.eat(1);
+ m_tokens.push(Token(Token::BLOCK_ENTRY, mark));
+ }
+
+ // Key
+ void Scanner::ScanKey()
+ {
+ // handle keys diffently in the block context (and manage indents)
+ if(InBlockContext()) {
+ if(!m_simpleKeyAllowed)
+ throw ParserException(INPUT.mark(), ErrorMsg::MAP_KEY);
+
+ PushIndentTo(INPUT.column(), IndentMarker::MAP);
+ }
+
+ // can only put a simple key here if we're in block context
+ m_simpleKeyAllowed = InBlockContext();
+
+ // eat
+ Mark mark = INPUT.mark();
+ INPUT.eat(1);
+ m_tokens.push(Token(Token::KEY, mark));
+ }
+
+ // Value
+ void Scanner::ScanValue()
+ {
+ // and check that simple key
+ bool isSimpleKey = VerifySimpleKey();
+ m_canBeJSONFlow = false;
+
+ if(isSimpleKey) {
+ // can't follow a simple key with another simple key (dunno why, though - it seems fine)
+ m_simpleKeyAllowed = false;
+ } else {
+ // handle values diffently in the block context (and manage indents)
+ if(InBlockContext()) {
+ if(!m_simpleKeyAllowed)
+ throw ParserException(INPUT.mark(), ErrorMsg::MAP_VALUE);
+
+ PushIndentTo(INPUT.column(), IndentMarker::MAP);
+ }
+
+ // can only put a simple key here if we're in block context
+ m_simpleKeyAllowed = InBlockContext();
+ }
+
+ // eat
+ Mark mark = INPUT.mark();
+ INPUT.eat(1);
+ m_tokens.push(Token(Token::VALUE, mark));
+ }
+
+ // AnchorOrAlias
+ void Scanner::ScanAnchorOrAlias()
+ {
+ bool alias;
+ std::string name;
+
+ // insert a potential simple key
+ InsertPotentialSimpleKey();
+ m_simpleKeyAllowed = false;
+ m_canBeJSONFlow = false;
+
+ // eat the indicator
+ Mark mark = INPUT.mark();
+ char indicator = INPUT.get();
+ alias = (indicator == Keys::Alias);
+
+ // now eat the content
+ while(INPUT && Exp::Anchor().Matches(INPUT))
+ name += INPUT.get();
+
+ // we need to have read SOMETHING!
+ if(name.empty())
+ throw ParserException(INPUT.mark(), alias ? ErrorMsg::ALIAS_NOT_FOUND : ErrorMsg::ANCHOR_NOT_FOUND);
+
+ // and needs to end correctly
+ if(INPUT && !Exp::AnchorEnd().Matches(INPUT))
+ throw ParserException(INPUT.mark(), alias ? ErrorMsg::CHAR_IN_ALIAS : ErrorMsg::CHAR_IN_ANCHOR);
+
+ // and we're done
+ Token token(alias ? Token::ALIAS : Token::ANCHOR, mark);
+ token.value = name;
+ m_tokens.push(token);
+ }
+
+ // Tag
+ void Scanner::ScanTag()
+ {
+ // insert a potential simple key
+ InsertPotentialSimpleKey();
+ m_simpleKeyAllowed = false;
+ m_canBeJSONFlow = false;
+
+ Token token(Token::TAG, INPUT.mark());
+
+ // eat the indicator
+ INPUT.get();
+
+ if(INPUT && INPUT.peek() == Keys::VerbatimTagStart){
+ std::string tag = ScanVerbatimTag(INPUT);
+
+ token.value = tag;
+ token.data = Tag::VERBATIM;
+ } else {
+ bool canBeHandle;
+ token.value = ScanTagHandle(INPUT, canBeHandle);
+ if(!canBeHandle && token.value.empty())
+ token.data = Tag::NON_SPECIFIC;
+ else if(token.value.empty())
+ token.data = Tag::SECONDARY_HANDLE;
+ else
+ token.data = Tag::PRIMARY_HANDLE;
+
+ // is there a suffix?
+ if(canBeHandle && INPUT.peek() == Keys::Tag) {
+ // eat the indicator
+ INPUT.get();
+ token.params.push_back(ScanTagSuffix(INPUT));
+ token.data = Tag::NAMED_HANDLE;
+ }
+ }
+
+ m_tokens.push(token);
+ }
+
+ // PlainScalar
+ void Scanner::ScanPlainScalar()
+ {
+ std::string scalar;
+
+ // set up the scanning parameters
+ ScanScalarParams params;
+ params.end = (InFlowContext() ? Exp::EndScalarInFlow() : Exp::EndScalar()) || (Exp::BlankOrBreak() + Exp::Comment());
+ params.eatEnd = false;
+ params.indent = (InFlowContext() ? 0 : GetTopIndent() + 1);
+ params.fold = FOLD_FLOW;
+ params.eatLeadingWhitespace = true;
+ params.trimTrailingSpaces = true;
+ params.chomp = STRIP;
+ params.onDocIndicator = BREAK;
+ params.onTabInIndentation = THROW;
+
+ // insert a potential simple key
+ InsertPotentialSimpleKey();
+
+ Mark mark = INPUT.mark();
+ scalar = ScanScalar(INPUT, params);
+
+ // can have a simple key only if we ended the scalar by starting a new line
+ m_simpleKeyAllowed = params.leadingSpaces;
+ m_canBeJSONFlow = false;
+
+ // finally, check and see if we ended on an illegal character
+ //if(Exp::IllegalCharInScalar.Matches(INPUT))
+ // throw ParserException(INPUT.mark(), ErrorMsg::CHAR_IN_SCALAR);
+
+ Token token(Token::PLAIN_SCALAR, mark);
+ token.value = scalar;
+ m_tokens.push(token);
+ }
+
+ // QuotedScalar
+ void Scanner::ScanQuotedScalar()
+ {
+ std::string scalar;
+
+ // peek at single or double quote (don't eat because we need to preserve (for the time being) the input position)
+ char quote = INPUT.peek();
+ bool single = (quote == '\'');
+
+ // setup the scanning parameters
+ ScanScalarParams params;
+ params.end = (single ? RegEx(quote) && !Exp::EscSingleQuote() : RegEx(quote));
+ params.eatEnd = true;
+ params.escape = (single ? '\'' : '\\');
+ params.indent = 0;
+ params.fold = FOLD_FLOW;
+ params.eatLeadingWhitespace = true;
+ params.trimTrailingSpaces = false;
+ params.chomp = CLIP;
+ params.onDocIndicator = THROW;
+
+ // insert a potential simple key
+ InsertPotentialSimpleKey();
+
+ Mark mark = INPUT.mark();
+
+ // now eat that opening quote
+ INPUT.get();
+
+ // and scan
+ scalar = ScanScalar(INPUT, params);
+ m_simpleKeyAllowed = false;
+ m_canBeJSONFlow = true;
+
+ Token token(Token::NON_PLAIN_SCALAR, mark);
+ token.value = scalar;
+ m_tokens.push(token);
+ }
+
+ // BlockScalarToken
+ // . These need a little extra processing beforehand.
+ // . We need to scan the line where the indicator is (this doesn't count as part of the scalar),
+ // and then we need to figure out what level of indentation we'll be using.
+ void Scanner::ScanBlockScalar()
+ {
+ std::string scalar;
+
+ ScanScalarParams params;
+ params.indent = 1;
+ params.detectIndent = true;
+
+ // eat block indicator ('|' or '>')
+ Mark mark = INPUT.mark();
+ char indicator = INPUT.get();
+ params.fold = (indicator == Keys::FoldedScalar ? FOLD_BLOCK : DONT_FOLD);
+
+ // eat chomping/indentation indicators
+ params.chomp = CLIP;
+ int n = Exp::Chomp().Match(INPUT);
+ for(int i=0;i<n;i++) {
+ char ch = INPUT.get();
+ if(ch == '+')
+ params.chomp = KEEP;
+ else if(ch == '-')
+ params.chomp = STRIP;
+ else if(Exp::Digit().Matches(ch)) {
+ if(ch == '0')
+ throw ParserException(INPUT.mark(), ErrorMsg::ZERO_INDENT_IN_BLOCK);
+
+ params.indent = ch - '0';
+ params.detectIndent = false;
+ }
+ }
+
+ // now eat whitespace
+ while(Exp::Blank().Matches(INPUT))
+ INPUT.eat(1);
+
+ // and comments to the end of the line
+ if(Exp::Comment().Matches(INPUT))
+ while(INPUT && !Exp::Break().Matches(INPUT))
+ INPUT.eat(1);
+
+ // if it's not a line break, then we ran into a bad character inline
+ if(INPUT && !Exp::Break().Matches(INPUT))
+ throw ParserException(INPUT.mark(), ErrorMsg::CHAR_IN_BLOCK);
+
+ // set the initial indentation
+ if(GetTopIndent() >= 0)
+ params.indent += GetTopIndent();
+
+ params.eatLeadingWhitespace = false;
+ params.trimTrailingSpaces = false;
+ params.onTabInIndentation = THROW;
+
+ scalar = ScanScalar(INPUT, params);
+
+ // simple keys always ok after block scalars (since we're gonna start a new line anyways)
+ m_simpleKeyAllowed = true;
+ m_canBeJSONFlow = false;
+
+ Token token(Token::NON_PLAIN_SCALAR, mark);
+ token.value = scalar;
+ m_tokens.push(token);
+ }
+}
diff --git a/yaml-cpp/src/setting.h b/yaml-cpp/src/setting.h
new file mode 100755
index 00000000..806ccdae
--- /dev/null
+++ b/yaml-cpp/src/setting.h
@@ -0,0 +1,105 @@
+#ifndef SETTING_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define SETTING_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include <memory>
+#include <vector>
+#include "yaml-cpp/noncopyable.h"
+
+namespace YAML
+{
+ class SettingChangeBase;
+
+ template <typename T>
+ class Setting
+ {
+ public:
+ Setting(): m_value() {}
+
+ const T get() const { return m_value; }
+ std::auto_ptr <SettingChangeBase> set(const T& value);
+ void restore(const Setting<T>& oldSetting) {
+ m_value = oldSetting.get();
+ }
+
+ private:
+ T m_value;
+ };
+
+ class SettingChangeBase
+ {
+ public:
+ virtual ~SettingChangeBase() {}
+ virtual void pop() = 0;
+ };
+
+ template <typename T>
+ class SettingChange: public SettingChangeBase
+ {
+ public:
+ SettingChange(Setting<T> *pSetting): m_pCurSetting(pSetting) {
+ // copy old setting to save its state
+ m_oldSetting = *pSetting;
+ }
+
+ virtual void pop() {
+ m_pCurSetting->restore(m_oldSetting);
+ }
+
+ private:
+ Setting<T> *m_pCurSetting;
+ Setting<T> m_oldSetting;
+ };
+
+ template <typename T>
+ inline std::auto_ptr <SettingChangeBase> Setting<T>::set(const T& value) {
+ std::auto_ptr <SettingChangeBase> pChange(new SettingChange<T> (this));
+ m_value = value;
+ return pChange;
+ }
+
+ class SettingChanges: private noncopyable
+ {
+ public:
+ SettingChanges() {}
+ ~SettingChanges() { clear(); }
+
+ void clear() {
+ restore();
+
+ for(setting_changes::const_iterator it=m_settingChanges.begin();it!=m_settingChanges.end();++it)
+ delete *it;
+ m_settingChanges.clear();
+ }
+
+ void restore() {
+ for(setting_changes::const_iterator it=m_settingChanges.begin();it!=m_settingChanges.end();++it)
+ (*it)->pop();
+ }
+
+ void push(std::auto_ptr <SettingChangeBase> pSettingChange) {
+ m_settingChanges.push_back(pSettingChange.release());
+ }
+
+ // like std::auto_ptr - assignment is transfer of ownership
+ SettingChanges& operator = (SettingChanges& rhs) {
+ if(this == &rhs)
+ return *this;
+
+ clear();
+ m_settingChanges = rhs.m_settingChanges;
+ rhs.m_settingChanges.clear();
+ return *this;
+ }
+
+ private:
+ typedef std::vector <SettingChangeBase *> setting_changes;
+ setting_changes m_settingChanges;
+ };
+}
+
+#endif // SETTING_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/src/simplekey.cpp b/yaml-cpp/src/simplekey.cpp
new file mode 100755
index 00000000..857a9e0b
--- /dev/null
+++ b/yaml-cpp/src/simplekey.cpp
@@ -0,0 +1,139 @@
+#include "scanner.h"
+#include "token.h"
+#include "yaml-cpp/exceptions.h"
+#include "exp.h"
+
+namespace YAML
+{
+ Scanner::SimpleKey::SimpleKey(const Mark& mark_, int flowLevel_)
+ : mark(mark_), flowLevel(flowLevel_), pIndent(0), pMapStart(0), pKey(0)
+ {
+ }
+
+ void Scanner::SimpleKey::Validate()
+ {
+ // Note: pIndent will *not* be garbage here;
+ // we "garbage collect" them so we can
+ // always refer to them
+ if(pIndent)
+ pIndent->status = IndentMarker::VALID;
+ if(pMapStart)
+ pMapStart->status = Token::VALID;
+ if(pKey)
+ pKey->status = Token::VALID;
+ }
+
+ void Scanner::SimpleKey::Invalidate()
+ {
+ if(pIndent)
+ pIndent->status = IndentMarker::INVALID;
+ if(pMapStart)
+ pMapStart->status = Token::INVALID;
+ if(pKey)
+ pKey->status = Token::INVALID;
+ }
+
+ // CanInsertPotentialSimpleKey
+ bool Scanner::CanInsertPotentialSimpleKey() const
+ {
+ if(!m_simpleKeyAllowed)
+ return false;
+
+ return !ExistsActiveSimpleKey();
+ }
+
+ // ExistsActiveSimpleKey
+ // . Returns true if there's a potential simple key at our flow level
+ // (there's allowed at most one per flow level, i.e., at the start of the flow start token)
+ bool Scanner::ExistsActiveSimpleKey() const
+ {
+ if(m_simpleKeys.empty())
+ return false;
+
+ const SimpleKey& key = m_simpleKeys.top();
+ return key.flowLevel == GetFlowLevel();
+ }
+
+ // InsertPotentialSimpleKey
+ // . If we can, add a potential simple key to the queue,
+ // and save it on a stack.
+ void Scanner::InsertPotentialSimpleKey()
+ {
+ if(!CanInsertPotentialSimpleKey())
+ return;
+
+ SimpleKey key(INPUT.mark(), GetFlowLevel());
+
+ // first add a map start, if necessary
+ if(InBlockContext()) {
+ key.pIndent = PushIndentTo(INPUT.column(), IndentMarker::MAP);
+ if(key.pIndent) {
+ key.pIndent->status = IndentMarker::UNKNOWN;
+ key.pMapStart = key.pIndent->pStartToken;
+ key.pMapStart->status = Token::UNVERIFIED;
+ }
+ }
+
+ // then add the (now unverified) key
+ m_tokens.push(Token(Token::KEY, INPUT.mark()));
+ key.pKey = &m_tokens.back();
+ key.pKey->status = Token::UNVERIFIED;
+
+ m_simpleKeys.push(key);
+ }
+
+ // InvalidateSimpleKey
+ // . Automatically invalidate the simple key in our flow level
+ void Scanner::InvalidateSimpleKey()
+ {
+ if(m_simpleKeys.empty())
+ return;
+
+ // grab top key
+ SimpleKey& key = m_simpleKeys.top();
+ if(key.flowLevel != GetFlowLevel())
+ return;
+
+ key.Invalidate();
+ m_simpleKeys.pop();
+ }
+
+ // VerifySimpleKey
+ // . Determines whether the latest simple key to be added is valid,
+ // and if so, makes it valid.
+ bool Scanner::VerifySimpleKey()
+ {
+ if(m_simpleKeys.empty())
+ return false;
+
+ // grab top key
+ SimpleKey key = m_simpleKeys.top();
+
+ // only validate if we're in the correct flow level
+ if(key.flowLevel != GetFlowLevel())
+ return false;
+
+ m_simpleKeys.pop();
+
+ bool isValid = true;
+
+ // needs to be less than 1024 characters and inline
+ if(INPUT.line() != key.mark.line || INPUT.pos() - key.mark.pos > 1024)
+ isValid = false;
+
+ // invalidate key
+ if(isValid)
+ key.Validate();
+ else
+ key.Invalidate();
+
+ return isValid;
+ }
+
+ void Scanner::PopAllSimpleKeys()
+ {
+ while(!m_simpleKeys.empty())
+ m_simpleKeys.pop();
+ }
+}
+
diff --git a/yaml-cpp/src/singledocparser.cpp b/yaml-cpp/src/singledocparser.cpp
new file mode 100755
index 00000000..47759c32
--- /dev/null
+++ b/yaml-cpp/src/singledocparser.cpp
@@ -0,0 +1,381 @@
+#include "singledocparser.h"
+#include "collectionstack.h"
+#include "directives.h"
+#include "yaml-cpp/eventhandler.h"
+#include "yaml-cpp/exceptions.h"
+#include "scanner.h"
+#include "tag.h"
+#include "token.h"
+#include <sstream>
+#include <cstdio>
+#include <algorithm>
+
+namespace YAML
+{
+ SingleDocParser::SingleDocParser(Scanner& scanner, const Directives& directives): m_scanner(scanner), m_directives(directives), m_pCollectionStack(new CollectionStack), m_curAnchor(0)
+ {
+ }
+
+ SingleDocParser::~SingleDocParser()
+ {
+ }
+
+ // HandleDocument
+ // . Handles the next document
+ // . Throws a ParserException on error.
+ void SingleDocParser::HandleDocument(EventHandler& eventHandler)
+ {
+ assert(!m_scanner.empty()); // guaranteed that there are tokens
+ assert(!m_curAnchor);
+
+ eventHandler.OnDocumentStart(m_scanner.peek().mark);
+
+ // eat doc start
+ if(m_scanner.peek().type == Token::DOC_START)
+ m_scanner.pop();
+
+ // recurse!
+ HandleNode(eventHandler);
+
+ eventHandler.OnDocumentEnd();
+
+ // and finally eat any doc ends we see
+ while(!m_scanner.empty() && m_scanner.peek().type == Token::DOC_END)
+ m_scanner.pop();
+ }
+
+ void SingleDocParser::HandleNode(EventHandler& eventHandler)
+ {
+ // an empty node *is* a possibility
+ if(m_scanner.empty()) {
+ eventHandler.OnNull(Mark::null(), NullAnchor);
+ return;
+ }
+
+ // save location
+ Mark mark = m_scanner.peek().mark;
+
+ // special case: a value node by itself must be a map, with no header
+ if(m_scanner.peek().type == Token::VALUE) {
+ eventHandler.OnMapStart(mark, "", NullAnchor);
+ HandleMap(eventHandler);
+ eventHandler.OnMapEnd();
+ return;
+ }
+
+ // special case: an alias node
+ if(m_scanner.peek().type == Token::ALIAS) {
+ eventHandler.OnAlias(mark, LookupAnchor(mark, m_scanner.peek().value));
+ m_scanner.pop();
+ return;
+ }
+
+ std::string tag;
+ anchor_t anchor;
+ ParseProperties(tag, anchor);
+
+ const Token& token = m_scanner.peek();
+
+ // add non-specific tags
+ if(tag.empty())
+ tag = (token.type == Token::NON_PLAIN_SCALAR ? "!" : "?");
+
+ // now split based on what kind of node we should be
+ switch(token.type) {
+ case Token::PLAIN_SCALAR:
+ case Token::NON_PLAIN_SCALAR:
+ eventHandler.OnScalar(mark, tag, anchor, token.value);
+ m_scanner.pop();
+ return;
+ case Token::FLOW_SEQ_START:
+ case Token::BLOCK_SEQ_START:
+ eventHandler.OnSequenceStart(mark, tag, anchor);
+ HandleSequence(eventHandler);
+ eventHandler.OnSequenceEnd();
+ return;
+ case Token::FLOW_MAP_START:
+ case Token::BLOCK_MAP_START:
+ eventHandler.OnMapStart(mark, tag, anchor);
+ HandleMap(eventHandler);
+ eventHandler.OnMapEnd();
+ return;
+ case Token::KEY:
+ // compact maps can only go in a flow sequence
+ if(m_pCollectionStack->GetCurCollectionType() == CollectionType::FlowSeq) {
+ eventHandler.OnMapStart(mark, tag, anchor);
+ HandleMap(eventHandler);
+ eventHandler.OnMapEnd();
+ return;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if(tag == "?")
+ eventHandler.OnNull(mark, anchor);
+ else
+ eventHandler.OnScalar(mark, tag, anchor, "");
+ }
+
+ void SingleDocParser::HandleSequence(EventHandler& eventHandler)
+ {
+ // split based on start token
+ switch(m_scanner.peek().type) {
+ case Token::BLOCK_SEQ_START: HandleBlockSequence(eventHandler); break;
+ case Token::FLOW_SEQ_START: HandleFlowSequence(eventHandler); break;
+ default: break;
+ }
+ }
+
+ void SingleDocParser::HandleBlockSequence(EventHandler& eventHandler)
+ {
+ // eat start token
+ m_scanner.pop();
+ m_pCollectionStack->PushCollectionType(CollectionType::BlockSeq);
+
+ while(1) {
+ if(m_scanner.empty())
+ throw ParserException(Mark::null(), ErrorMsg::END_OF_SEQ);
+
+ Token token = m_scanner.peek();
+ if(token.type != Token::BLOCK_ENTRY && token.type != Token::BLOCK_SEQ_END)
+ throw ParserException(token.mark, ErrorMsg::END_OF_SEQ);
+
+ m_scanner.pop();
+ if(token.type == Token::BLOCK_SEQ_END)
+ break;
+
+ // check for null
+ if(!m_scanner.empty()) {
+ const Token& token = m_scanner.peek();
+ if(token.type == Token::BLOCK_ENTRY || token.type == Token::BLOCK_SEQ_END) {
+ eventHandler.OnNull(token.mark, NullAnchor);
+ continue;
+ }
+ }
+
+ HandleNode(eventHandler);
+ }
+
+ m_pCollectionStack->PopCollectionType(CollectionType::BlockSeq);
+ }
+
+ void SingleDocParser::HandleFlowSequence(EventHandler& eventHandler)
+ {
+ // eat start token
+ m_scanner.pop();
+ m_pCollectionStack->PushCollectionType(CollectionType::FlowSeq);
+
+ while(1) {
+ if(m_scanner.empty())
+ throw ParserException(Mark::null(), ErrorMsg::END_OF_SEQ_FLOW);
+
+ // first check for end
+ if(m_scanner.peek().type == Token::FLOW_SEQ_END) {
+ m_scanner.pop();
+ break;
+ }
+
+ // then read the node
+ HandleNode(eventHandler);
+
+ // now eat the separator (or could be a sequence end, which we ignore - but if it's neither, then it's a bad node)
+ Token& token = m_scanner.peek();
+ if(token.type == Token::FLOW_ENTRY)
+ m_scanner.pop();
+ else if(token.type != Token::FLOW_SEQ_END)
+ throw ParserException(token.mark, ErrorMsg::END_OF_SEQ_FLOW);
+ }
+
+ m_pCollectionStack->PopCollectionType(CollectionType::FlowSeq);
+ }
+
+ void SingleDocParser::HandleMap(EventHandler& eventHandler)
+ {
+ // split based on start token
+ switch(m_scanner.peek().type) {
+ case Token::BLOCK_MAP_START: HandleBlockMap(eventHandler); break;
+ case Token::FLOW_MAP_START: HandleFlowMap(eventHandler); break;
+ case Token::KEY: HandleCompactMap(eventHandler); break;
+ case Token::VALUE: HandleCompactMapWithNoKey(eventHandler); break;
+ default: break;
+ }
+ }
+
+ void SingleDocParser::HandleBlockMap(EventHandler& eventHandler)
+ {
+ // eat start token
+ m_scanner.pop();
+ m_pCollectionStack->PushCollectionType(CollectionType::BlockMap);
+
+ while(1) {
+ if(m_scanner.empty())
+ throw ParserException(Mark::null(), ErrorMsg::END_OF_MAP);
+
+ Token token = m_scanner.peek();
+ if(token.type != Token::KEY && token.type != Token::VALUE && token.type != Token::BLOCK_MAP_END)
+ throw ParserException(token.mark, ErrorMsg::END_OF_MAP);
+
+ if(token.type == Token::BLOCK_MAP_END) {
+ m_scanner.pop();
+ break;
+ }
+
+ // grab key (if non-null)
+ if(token.type == Token::KEY) {
+ m_scanner.pop();
+ HandleNode(eventHandler);
+ } else {
+ eventHandler.OnNull(token.mark, NullAnchor);
+ }
+
+ // now grab value (optional)
+ if(!m_scanner.empty() && m_scanner.peek().type == Token::VALUE) {
+ m_scanner.pop();
+ HandleNode(eventHandler);
+ } else {
+ eventHandler.OnNull(token.mark, NullAnchor);
+ }
+ }
+
+ m_pCollectionStack->PopCollectionType(CollectionType::BlockMap);
+ }
+
+ void SingleDocParser::HandleFlowMap(EventHandler& eventHandler)
+ {
+ // eat start token
+ m_scanner.pop();
+ m_pCollectionStack->PushCollectionType(CollectionType::FlowMap);
+
+ while(1) {
+ if(m_scanner.empty())
+ throw ParserException(Mark::null(), ErrorMsg::END_OF_MAP_FLOW);
+
+ Token& token = m_scanner.peek();
+ // first check for end
+ if(token.type == Token::FLOW_MAP_END) {
+ m_scanner.pop();
+ break;
+ }
+
+ // grab key (if non-null)
+ if(token.type == Token::KEY) {
+ m_scanner.pop();
+ HandleNode(eventHandler);
+ } else {
+ eventHandler.OnNull(token.mark, NullAnchor);
+ }
+
+ // now grab value (optional)
+ if(!m_scanner.empty() && m_scanner.peek().type == Token::VALUE) {
+ m_scanner.pop();
+ HandleNode(eventHandler);
+ } else {
+ eventHandler.OnNull(token.mark, NullAnchor);
+ }
+
+ // now eat the separator (or could be a map end, which we ignore - but if it's neither, then it's a bad node)
+ Token& nextToken = m_scanner.peek();
+ if(nextToken.type == Token::FLOW_ENTRY)
+ m_scanner.pop();
+ else if(nextToken.type != Token::FLOW_MAP_END)
+ throw ParserException(nextToken.mark, ErrorMsg::END_OF_MAP_FLOW);
+ }
+
+ m_pCollectionStack->PopCollectionType(CollectionType::FlowMap);
+ }
+
+ // . Single "key: value" pair in a flow sequence
+ void SingleDocParser::HandleCompactMap(EventHandler& eventHandler)
+ {
+ m_pCollectionStack->PushCollectionType(CollectionType::CompactMap);
+
+ // grab key
+ Mark mark = m_scanner.peek().mark;
+ m_scanner.pop();
+ HandleNode(eventHandler);
+
+ // now grab value (optional)
+ if(!m_scanner.empty() && m_scanner.peek().type == Token::VALUE) {
+ m_scanner.pop();
+ HandleNode(eventHandler);
+ } else {
+ eventHandler.OnNull(mark, NullAnchor);
+ }
+
+ m_pCollectionStack->PopCollectionType(CollectionType::CompactMap);
+ }
+
+ // . Single ": value" pair in a flow sequence
+ void SingleDocParser::HandleCompactMapWithNoKey(EventHandler& eventHandler)
+ {
+ m_pCollectionStack->PushCollectionType(CollectionType::CompactMap);
+
+ // null key
+ eventHandler.OnNull(m_scanner.peek().mark, NullAnchor);
+
+ // grab value
+ m_scanner.pop();
+ HandleNode(eventHandler);
+
+ m_pCollectionStack->PopCollectionType(CollectionType::CompactMap);
+ }
+
+ // ParseProperties
+ // . Grabs any tag or anchor tokens and deals with them.
+ void SingleDocParser::ParseProperties(std::string& tag, anchor_t& anchor)
+ {
+ tag.clear();
+ anchor = NullAnchor;
+
+ while(1) {
+ if(m_scanner.empty())
+ return;
+
+ switch(m_scanner.peek().type) {
+ case Token::TAG: ParseTag(tag); break;
+ case Token::ANCHOR: ParseAnchor(anchor); break;
+ default: return;
+ }
+ }
+ }
+
+ void SingleDocParser::ParseTag(std::string& tag)
+ {
+ Token& token = m_scanner.peek();
+ if(!tag.empty())
+ throw ParserException(token.mark, ErrorMsg::MULTIPLE_TAGS);
+
+ Tag tagInfo(token);
+ tag = tagInfo.Translate(m_directives);
+ m_scanner.pop();
+ }
+
+ void SingleDocParser::ParseAnchor(anchor_t& anchor)
+ {
+ Token& token = m_scanner.peek();
+ if(anchor)
+ throw ParserException(token.mark, ErrorMsg::MULTIPLE_ANCHORS);
+
+ anchor = RegisterAnchor(token.value);
+ m_scanner.pop();
+ }
+
+ anchor_t SingleDocParser::RegisterAnchor(const std::string& name)
+ {
+ if(name.empty())
+ return NullAnchor;
+
+ return m_anchors[name] = ++m_curAnchor;
+ }
+
+ anchor_t SingleDocParser::LookupAnchor(const Mark& mark, const std::string& name) const
+ {
+ Anchors::const_iterator it = m_anchors.find(name);
+ if(it == m_anchors.end())
+ throw ParserException(mark, ErrorMsg::UNKNOWN_ANCHOR);
+
+ return it->second;
+ }
+}
diff --git a/yaml-cpp/src/singledocparser.h b/yaml-cpp/src/singledocparser.h
new file mode 100755
index 00000000..3798dccf
--- /dev/null
+++ b/yaml-cpp/src/singledocparser.h
@@ -0,0 +1,65 @@
+#ifndef SINGLEDOCPARSER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define SINGLEDOCPARSER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include "yaml-cpp/anchor.h"
+#include "yaml-cpp/noncopyable.h"
+#include <string>
+#include <map>
+#include <memory>
+
+namespace YAML
+{
+ struct Directives;
+ struct Mark;
+ struct Token;
+ class CollectionStack;
+ class EventHandler;
+ class Node;
+ class Scanner;
+
+ class SingleDocParser: private noncopyable
+ {
+ public:
+ SingleDocParser(Scanner& scanner, const Directives& directives);
+ ~SingleDocParser();
+
+ void HandleDocument(EventHandler& eventHandler);
+
+ private:
+ void HandleNode(EventHandler& eventHandler);
+
+ void HandleSequence(EventHandler& eventHandler);
+ void HandleBlockSequence(EventHandler& eventHandler);
+ void HandleFlowSequence(EventHandler& eventHandler);
+
+ void HandleMap(EventHandler& eventHandler);
+ void HandleBlockMap(EventHandler& eventHandler);
+ void HandleFlowMap(EventHandler& eventHandler);
+ void HandleCompactMap(EventHandler& eventHandler);
+ void HandleCompactMapWithNoKey(EventHandler& eventHandler);
+
+ void ParseProperties(std::string& tag, anchor_t& anchor);
+ void ParseTag(std::string& tag);
+ void ParseAnchor(anchor_t& anchor);
+
+ anchor_t RegisterAnchor(const std::string& name);
+ anchor_t LookupAnchor(const Mark& mark, const std::string& name) const;
+
+ private:
+ Scanner& m_scanner;
+ const Directives& m_directives;
+ std::auto_ptr<CollectionStack> m_pCollectionStack;
+
+ typedef std::map<std::string, anchor_t> Anchors;
+ Anchors m_anchors;
+
+ anchor_t m_curAnchor;
+ };
+}
+
+#endif // SINGLEDOCPARSER_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/src/stream.cpp b/yaml-cpp/src/stream.cpp
new file mode 100755
index 00000000..447b67c1
--- /dev/null
+++ b/yaml-cpp/src/stream.cpp
@@ -0,0 +1,448 @@
+#include "stream.h"
+#include <iostream>
+#include "exp.h"
+
+#ifndef YAML_PREFETCH_SIZE
+#define YAML_PREFETCH_SIZE 2048
+#endif
+
+#define S_ARRAY_SIZE( A ) (sizeof(A)/sizeof(*(A)))
+#define S_ARRAY_END( A ) ((A) + S_ARRAY_SIZE(A))
+
+#define CP_REPLACEMENT_CHARACTER (0xFFFD)
+
+namespace YAML
+{
+ enum UtfIntroState {
+ uis_start,
+ uis_utfbe_b1,
+ uis_utf32be_b2,
+ uis_utf32be_bom3,
+ uis_utf32be,
+ uis_utf16be,
+ uis_utf16be_bom1,
+ uis_utfle_bom1,
+ uis_utf16le_bom2,
+ uis_utf32le_bom3,
+ uis_utf16le,
+ uis_utf32le,
+ uis_utf8_imp,
+ uis_utf16le_imp,
+ uis_utf32le_imp3,
+ uis_utf8_bom1,
+ uis_utf8_bom2,
+ uis_utf8,
+ uis_error
+ };
+
+ enum UtfIntroCharType {
+ uict00,
+ uictBB,
+ uictBF,
+ uictEF,
+ uictFE,
+ uictFF,
+ uictAscii,
+ uictOther,
+ uictMax
+ };
+
+ static bool s_introFinalState[] = {
+ false, //uis_start
+ false, //uis_utfbe_b1
+ false, //uis_utf32be_b2
+ false, //uis_utf32be_bom3
+ true, //uis_utf32be
+ true, //uis_utf16be
+ false, //uis_utf16be_bom1
+ false, //uis_utfle_bom1
+ false, //uis_utf16le_bom2
+ false, //uis_utf32le_bom3
+ true, //uis_utf16le
+ true, //uis_utf32le
+ false, //uis_utf8_imp
+ false, //uis_utf16le_imp
+ false, //uis_utf32le_imp3
+ false, //uis_utf8_bom1
+ false, //uis_utf8_bom2
+ true, //uis_utf8
+ true, //uis_error
+ };
+
+ static UtfIntroState s_introTransitions[][uictMax] = {
+ // uict00, uictBB, uictBF, uictEF, uictFE, uictFF, uictAscii, uictOther
+ {uis_utfbe_b1, uis_utf8, uis_utf8, uis_utf8_bom1, uis_utf16be_bom1, uis_utfle_bom1, uis_utf8_imp, uis_utf8},
+ {uis_utf32be_b2, uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf16be, uis_utf8},
+ {uis_utf32be, uis_utf8, uis_utf8, uis_utf8, uis_utf32be_bom3, uis_utf8, uis_utf8, uis_utf8},
+ {uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf32be, uis_utf8, uis_utf8},
+ {uis_utf32be, uis_utf32be, uis_utf32be, uis_utf32be, uis_utf32be, uis_utf32be, uis_utf32be, uis_utf32be},
+ {uis_utf16be, uis_utf16be, uis_utf16be, uis_utf16be, uis_utf16be, uis_utf16be, uis_utf16be, uis_utf16be},
+ {uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf16be, uis_utf8, uis_utf8},
+ {uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf16le_bom2, uis_utf8, uis_utf8, uis_utf8},
+ {uis_utf32le_bom3, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le},
+ {uis_utf32le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le},
+ {uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le},
+ {uis_utf32le, uis_utf32le, uis_utf32le, uis_utf32le, uis_utf32le, uis_utf32le, uis_utf32le, uis_utf32le},
+ {uis_utf16le_imp, uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8},
+ {uis_utf32le_imp3, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le},
+ {uis_utf32le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le, uis_utf16le},
+ {uis_utf8, uis_utf8_bom2, uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8},
+ {uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8},
+ {uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8, uis_utf8},
+ };
+
+ static char s_introUngetCount[][uictMax] = {
+ // uict00, uictBB, uictBF, uictEF, uictFE, uictFF, uictAscii, uictOther
+ {0, 1, 1, 0, 0, 0, 0, 1},
+ {0, 2, 2, 2, 2, 2, 2, 2},
+ {3, 3, 3, 3, 0, 3, 3, 3},
+ {4, 4, 4, 4, 4, 0, 4, 4},
+ {1, 1, 1, 1, 1, 1, 1, 1},
+ {1, 1, 1, 1, 1, 1, 1, 1},
+ {2, 2, 2, 2, 2, 0, 2, 2},
+ {2, 2, 2, 2, 0, 2, 2, 2},
+ {0, 1, 1, 1, 1, 1, 1, 1},
+ {0, 2, 2, 2, 2, 2, 2, 2},
+ {1, 1, 1, 1, 1, 1, 1, 1},
+ {1, 1, 1, 1, 1, 1, 1, 1},
+ {0, 2, 2, 2, 2, 2, 2, 2},
+ {0, 3, 3, 3, 3, 3, 3, 3},
+ {4, 4, 4, 4, 4, 4, 4, 4},
+ {2, 0, 2, 2, 2, 2, 2, 2},
+ {3, 3, 0, 3, 3, 3, 3, 3},
+ {1, 1, 1, 1, 1, 1, 1, 1},
+ };
+
+ inline UtfIntroCharType IntroCharTypeOf(std::istream::int_type ch)
+ {
+ if (std::istream::traits_type::eof() == ch) {
+ return uictOther;
+ }
+
+ switch (ch) {
+ case 0: return uict00;
+ case 0xBB: return uictBB;
+ case 0xBF: return uictBF;
+ case 0xEF: return uictEF;
+ case 0xFE: return uictFE;
+ case 0xFF: return uictFF;
+ }
+
+ if ((ch > 0) && (ch < 0xFF)) {
+ return uictAscii;
+ }
+
+ return uictOther;
+ }
+
+ inline char Utf8Adjust(unsigned long ch, unsigned char lead_bits, unsigned char rshift)
+ {
+ const unsigned char header = ((1 << lead_bits) - 1) << (8 - lead_bits);
+ const unsigned char mask = (0xFF >> (lead_bits + 1));
+ return static_cast<char>(static_cast<unsigned char>(
+ header | ((ch >> rshift) & mask)
+ ));
+ }
+
+ inline void QueueUnicodeCodepoint(std::deque<char>& q, unsigned long ch)
+ {
+ // We are not allowed to queue the Stream::eof() codepoint, so
+ // replace it with CP_REPLACEMENT_CHARACTER
+ if (static_cast<unsigned long>(Stream::eof()) == ch)
+ {
+ ch = CP_REPLACEMENT_CHARACTER;
+ }
+
+ if (ch < 0x80)
+ {
+ q.push_back(Utf8Adjust(ch, 0, 0));
+ }
+ else if (ch < 0x800)
+ {
+ q.push_back(Utf8Adjust(ch, 2, 6));
+ q.push_back(Utf8Adjust(ch, 1, 0));
+ }
+ else if (ch < 0x10000)
+ {
+ q.push_back(Utf8Adjust(ch, 3, 12));
+ q.push_back(Utf8Adjust(ch, 1, 6));
+ q.push_back(Utf8Adjust(ch, 1, 0));
+ }
+ else
+ {
+ q.push_back(Utf8Adjust(ch, 4, 18));
+ q.push_back(Utf8Adjust(ch, 1, 12));
+ q.push_back(Utf8Adjust(ch, 1, 6));
+ q.push_back(Utf8Adjust(ch, 1, 0));
+ }
+ }
+
+ Stream::Stream(std::istream& input)
+ : m_input(input),
+ m_pPrefetched(new unsigned char[YAML_PREFETCH_SIZE]),
+ m_nPrefetchedAvailable(0), m_nPrefetchedUsed(0)
+ {
+ typedef std::istream::traits_type char_traits;
+
+ if(!input)
+ return;
+
+ // Determine (or guess) the character-set by reading the BOM, if any. See
+ // the YAML specification for the determination algorithm.
+ char_traits::int_type intro[4];
+ int nIntroUsed = 0;
+ UtfIntroState state = uis_start;
+ for(; !s_introFinalState[state]; ) {
+ std::istream::int_type ch = input.get();
+ intro[nIntroUsed++] = ch;
+ UtfIntroCharType charType = IntroCharTypeOf(ch);
+ UtfIntroState newState = s_introTransitions[state][charType];
+ int nUngets = s_introUngetCount[state][charType];
+ if(nUngets > 0) {
+ input.clear();
+ for(; nUngets > 0; --nUngets) {
+ if(char_traits::eof() != intro[--nIntroUsed])
+ input.putback(char_traits::to_char_type(intro[nIntroUsed]));
+ }
+ }
+ state = newState;
+ }
+
+ switch (state) {
+ case uis_utf8: m_charSet = utf8; break;
+ case uis_utf16le: m_charSet = utf16le; break;
+ case uis_utf16be: m_charSet = utf16be; break;
+ case uis_utf32le: m_charSet = utf32le; break;
+ case uis_utf32be: m_charSet = utf32be; break;
+ default: m_charSet = utf8; break;
+ }
+
+ ReadAheadTo(0);
+ }
+
+ Stream::~Stream()
+ {
+ delete[] m_pPrefetched;
+ }
+
+ char Stream::peek() const
+ {
+ if (m_readahead.empty())
+ {
+ return Stream::eof();
+ }
+
+ return m_readahead[0];
+ }
+
+ Stream::operator bool() const
+ {
+ return m_input.good() || (!m_readahead.empty() && m_readahead[0] != Stream::eof());
+ }
+
+ // get
+ // . Extracts a character from the stream and updates our position
+ char Stream::get()
+ {
+ char ch = peek();
+ AdvanceCurrent();
+ m_mark.column++;
+
+ if(ch == '\n') {
+ m_mark.column = 0;
+ m_mark.line++;
+ }
+
+ return ch;
+ }
+
+ // get
+ // . Extracts 'n' characters from the stream and updates our position
+ std::string Stream::get(int n)
+ {
+ std::string ret;
+ ret.reserve(n);
+ for(int i=0;i<n;i++)
+ ret += get();
+ return ret;
+ }
+
+ // eat
+ // . Eats 'n' characters and updates our position.
+ void Stream::eat(int n)
+ {
+ for(int i=0;i<n;i++)
+ get();
+ }
+
+ void Stream::AdvanceCurrent()
+ {
+ if (!m_readahead.empty())
+ {
+ m_readahead.pop_front();
+ m_mark.pos++;
+ }
+
+ ReadAheadTo(0);
+ }
+
+ bool Stream::_ReadAheadTo(size_t i) const
+ {
+ while (m_input.good() && (m_readahead.size() <= i))
+ {
+ switch (m_charSet)
+ {
+ case utf8: StreamInUtf8(); break;
+ case utf16le: StreamInUtf16(); break;
+ case utf16be: StreamInUtf16(); break;
+ case utf32le: StreamInUtf32(); break;
+ case utf32be: StreamInUtf32(); break;
+ }
+ }
+
+ // signal end of stream
+ if(!m_input.good())
+ m_readahead.push_back(Stream::eof());
+
+ return m_readahead.size() > i;
+ }
+
+ void Stream::StreamInUtf8() const
+ {
+ unsigned char b = GetNextByte();
+ if (m_input.good())
+ {
+ m_readahead.push_back(b);
+ }
+ }
+
+ void Stream::StreamInUtf16() const
+ {
+ unsigned long ch = 0;
+ unsigned char bytes[2];
+ int nBigEnd = (m_charSet == utf16be) ? 0 : 1;
+
+ bytes[0] = GetNextByte();
+ bytes[1] = GetNextByte();
+ if (!m_input.good())
+ {
+ return;
+ }
+ ch = (static_cast<unsigned long>(bytes[nBigEnd]) << 8) |
+ static_cast<unsigned long>(bytes[1 ^ nBigEnd]);
+
+ if (ch >= 0xDC00 && ch < 0xE000)
+ {
+ // Trailing (low) surrogate...ugh, wrong order
+ QueueUnicodeCodepoint(m_readahead, CP_REPLACEMENT_CHARACTER);
+ return;
+ }
+ else if (ch >= 0xD800 && ch < 0xDC00)
+ {
+ // ch is a leading (high) surrogate
+
+ // Four byte UTF-8 code point
+
+ // Read the trailing (low) surrogate
+ for (;;)
+ {
+ bytes[0] = GetNextByte();
+ bytes[1] = GetNextByte();
+ if (!m_input.good())
+ {
+ QueueUnicodeCodepoint(m_readahead, CP_REPLACEMENT_CHARACTER);
+ return;
+ }
+ unsigned long chLow = (static_cast<unsigned long>(bytes[nBigEnd]) << 8) |
+ static_cast<unsigned long>(bytes[1 ^ nBigEnd]);
+ if (chLow < 0xDC00 || ch >= 0xE000)
+ {
+ // Trouble...not a low surrogate. Dump a REPLACEMENT CHARACTER into the stream.
+ QueueUnicodeCodepoint(m_readahead, CP_REPLACEMENT_CHARACTER);
+
+ // Deal with the next UTF-16 unit
+ if (chLow < 0xD800 || ch >= 0xE000)
+ {
+ // Easiest case: queue the codepoint and return
+ QueueUnicodeCodepoint(m_readahead, ch);
+ return;
+ }
+ else
+ {
+ // Start the loop over with the new high surrogate
+ ch = chLow;
+ continue;
+ }
+ }
+
+ // Select the payload bits from the high surrogate
+ ch &= 0x3FF;
+ ch <<= 10;
+
+ // Include bits from low surrogate
+ ch |= (chLow & 0x3FF);
+
+ // Add the surrogacy offset
+ ch += 0x10000;
+ }
+ }
+
+ QueueUnicodeCodepoint(m_readahead, ch);
+ }
+
+ inline char* ReadBuffer(unsigned char* pBuffer)
+ {
+ return reinterpret_cast<char*>(pBuffer);
+ }
+
+ unsigned char Stream::GetNextByte() const
+ {
+ if (m_nPrefetchedUsed >= m_nPrefetchedAvailable)
+ {
+ std::streambuf *pBuf = m_input.rdbuf();
+ m_nPrefetchedAvailable = (size_t)pBuf->sgetn(ReadBuffer(m_pPrefetched),
+ YAML_PREFETCH_SIZE);
+ m_nPrefetchedUsed = 0;
+ if (!m_nPrefetchedAvailable)
+ {
+ m_input.setstate(std::ios_base::eofbit);
+ }
+
+ if (0 == m_nPrefetchedAvailable)
+ {
+ return 0;
+ }
+ }
+
+ return m_pPrefetched[m_nPrefetchedUsed++];
+ }
+
+ void Stream::StreamInUtf32() const
+ {
+ static int indexes[2][4] = {
+ {3, 2, 1, 0},
+ {0, 1, 2, 3}
+ };
+
+ unsigned long ch = 0;
+ unsigned char bytes[4];
+ int* pIndexes = (m_charSet == utf32be) ? indexes[1] : indexes[0];
+
+ bytes[0] = GetNextByte();
+ bytes[1] = GetNextByte();
+ bytes[2] = GetNextByte();
+ bytes[3] = GetNextByte();
+ if (!m_input.good())
+ {
+ return;
+ }
+
+ for (int i = 0; i < 4; ++i)
+ {
+ ch <<= 8;
+ ch |= bytes[pIndexes[i]];
+ }
+
+ QueueUnicodeCodepoint(m_readahead, ch);
+ }
+}
diff --git a/yaml-cpp/src/stream.h b/yaml-cpp/src/stream.h
new file mode 100755
index 00000000..87f48dc8
--- /dev/null
+++ b/yaml-cpp/src/stream.h
@@ -0,0 +1,79 @@
+#ifndef STREAM_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define STREAM_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include "yaml-cpp/noncopyable.h"
+#include "yaml-cpp/mark.h"
+#include <cstddef>
+#include <deque>
+#include <ios>
+#include <iostream>
+#include <set>
+#include <string>
+
+namespace YAML
+{
+ class Stream: private noncopyable
+ {
+ public:
+ friend class StreamCharSource;
+
+ Stream(std::istream& input);
+ ~Stream();
+
+ operator bool() const;
+ bool operator !() const { return !static_cast <bool>(*this); }
+
+ char peek() const;
+ char get();
+ std::string get(int n);
+ void eat(int n = 1);
+
+ static char eof() { return 0x04; }
+
+ const Mark mark() const { return m_mark; }
+ int pos() const { return m_mark.pos; }
+ int line() const { return m_mark.line; }
+ int column() const { return m_mark.column; }
+ void ResetColumn() { m_mark.column = 0; }
+
+ private:
+ enum CharacterSet {utf8, utf16le, utf16be, utf32le, utf32be};
+
+ std::istream& m_input;
+ Mark m_mark;
+
+ CharacterSet m_charSet;
+ mutable std::deque<char> m_readahead;
+ unsigned char* const m_pPrefetched;
+ mutable size_t m_nPrefetchedAvailable;
+ mutable size_t m_nPrefetchedUsed;
+
+ void AdvanceCurrent();
+ char CharAt(size_t i) const;
+ bool ReadAheadTo(size_t i) const;
+ bool _ReadAheadTo(size_t i) const;
+ void StreamInUtf8() const;
+ void StreamInUtf16() const;
+ void StreamInUtf32() const;
+ unsigned char GetNextByte() const;
+ };
+
+ // CharAt
+ // . Unchecked access
+ inline char Stream::CharAt(size_t i) const {
+ return m_readahead[i];
+ }
+
+ inline bool Stream::ReadAheadTo(size_t i) const {
+ if(m_readahead.size() > i)
+ return true;
+ return _ReadAheadTo(i);
+ }
+}
+
+#endif // STREAM_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/src/streamcharsource.h b/yaml-cpp/src/streamcharsource.h
new file mode 100755
index 00000000..21fae4e1
--- /dev/null
+++ b/yaml-cpp/src/streamcharsource.h
@@ -0,0 +1,48 @@
+#ifndef STREAMCHARSOURCE_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define STREAMCHARSOURCE_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include "yaml-cpp/noncopyable.h"
+#include <cstddef>
+
+namespace YAML
+{
+ class StreamCharSource
+ {
+ public:
+ StreamCharSource(const Stream& stream): m_offset(0), m_stream(stream) {}
+ StreamCharSource(const StreamCharSource& source): m_offset(source.m_offset), m_stream(source.m_stream) {}
+ ~StreamCharSource() {}
+
+ operator bool() const;
+ char operator [] (std::size_t i) const { return m_stream.CharAt(m_offset + i); }
+ bool operator !() const { return !static_cast<bool>(*this); }
+
+ const StreamCharSource operator + (int i) const;
+
+ private:
+ std::size_t m_offset;
+ const Stream& m_stream;
+
+ StreamCharSource& operator = (const StreamCharSource&); // non-assignable
+ };
+
+ inline StreamCharSource::operator bool() const {
+ return m_stream.ReadAheadTo(m_offset);
+ }
+
+ inline const StreamCharSource StreamCharSource::operator + (int i) const {
+ StreamCharSource source(*this);
+ if(static_cast<int> (source.m_offset) + i >= 0)
+ source.m_offset += i;
+ else
+ source.m_offset = 0;
+ return source;
+ }
+}
+
+#endif // STREAMCHARSOURCE_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/src/stringsource.h b/yaml-cpp/src/stringsource.h
new file mode 100755
index 00000000..21be3c9a
--- /dev/null
+++ b/yaml-cpp/src/stringsource.h
@@ -0,0 +1,47 @@
+#ifndef STRINGSOURCE_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define STRINGSOURCE_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include <cstddef>
+
+namespace YAML
+{
+ class StringCharSource
+ {
+ public:
+ StringCharSource(const char *str, std::size_t size): m_str(str), m_size(size), m_offset(0) {}
+
+ operator bool() const { return m_offset < m_size; }
+ char operator [] (std::size_t i) const { return m_str[m_offset + i]; }
+ bool operator !() const { return !static_cast<bool>(*this); }
+
+ const StringCharSource operator + (int i) const {
+ StringCharSource source(*this);
+ if(static_cast<int> (source.m_offset) + i >= 0)
+ source.m_offset += i;
+ else
+ source.m_offset = 0;
+ return source;
+ }
+
+ StringCharSource& operator ++ () {
+ ++m_offset;
+ return *this;
+ }
+
+ StringCharSource& operator += (std::size_t offset) {
+ m_offset += offset;
+ return *this;
+ }
+ private:
+ const char *m_str;
+ std::size_t m_size;
+ std::size_t m_offset;
+ };
+}
+
+#endif // STRINGSOURCE_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/src/tag.cpp b/yaml-cpp/src/tag.cpp
new file mode 100755
index 00000000..82a47047
--- /dev/null
+++ b/yaml-cpp/src/tag.cpp
@@ -0,0 +1,52 @@
+#include "tag.h"
+#include "directives.h"
+#include "token.h"
+#include <cassert>
+#include <stdexcept>
+
+namespace YAML
+{
+ Tag::Tag(const Token& token): type(static_cast<TYPE>(token.data))
+ {
+ switch(type) {
+ case VERBATIM:
+ value = token.value;
+ break;
+ case PRIMARY_HANDLE:
+ value = token.value;
+ break;
+ case SECONDARY_HANDLE:
+ value = token.value;
+ break;
+ case NAMED_HANDLE:
+ handle = token.value;
+ value = token.params[0];
+ break;
+ case NON_SPECIFIC:
+ break;
+ default:
+ assert(false);
+ }
+ }
+
+ const std::string Tag::Translate(const Directives& directives)
+ {
+ switch(type) {
+ case VERBATIM:
+ return value;
+ case PRIMARY_HANDLE:
+ return directives.TranslateTagHandle("!") + value;
+ case SECONDARY_HANDLE:
+ return directives.TranslateTagHandle("!!") + value;
+ case NAMED_HANDLE:
+ return directives.TranslateTagHandle("!" + handle + "!") + value;
+ case NON_SPECIFIC:
+ // TODO:
+ return "!";
+ default:
+ assert(false);
+ }
+ throw std::runtime_error("yaml-cpp: internal error, bad tag type");
+ }
+}
+
diff --git a/yaml-cpp/src/tag.h b/yaml-cpp/src/tag.h
new file mode 100755
index 00000000..5f77548d
--- /dev/null
+++ b/yaml-cpp/src/tag.h
@@ -0,0 +1,28 @@
+#ifndef TAG_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define TAG_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+#include <string>
+
+namespace YAML
+{
+ struct Token;
+ struct Directives;
+
+ struct Tag {
+ enum TYPE {
+ VERBATIM, PRIMARY_HANDLE, SECONDARY_HANDLE, NAMED_HANDLE, NON_SPECIFIC
+ };
+
+ Tag(const Token& token);
+ const std::string Translate(const Directives& directives);
+
+ TYPE type;
+ std::string handle, value;
+ };
+}
+
+#endif // TAG_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/src/token.h b/yaml-cpp/src/token.h
new file mode 100755
index 00000000..9807e258
--- /dev/null
+++ b/yaml-cpp/src/token.h
@@ -0,0 +1,85 @@
+#ifndef TOKEN_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define TOKEN_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+
+#include "yaml-cpp/mark.h"
+#include <iostream>
+#include <string>
+#include <vector>
+
+namespace YAML
+{
+ const std::string TokenNames[] = {
+ "DIRECTIVE",
+ "DOC_START",
+ "DOC_END",
+ "BLOCK_SEQ_START",
+ "BLOCK_MAP_START",
+ "BLOCK_SEQ_END",
+ "BLOCK_MAP_END",
+ "BLOCK_ENTRY",
+ "FLOW_SEQ_START",
+ "FLOW_MAP_START",
+ "FLOW_SEQ_END",
+ "FLOW_MAP_END",
+ "FLOW_MAP_COMPACT",
+ "FLOW_ENTRY",
+ "KEY",
+ "VALUE",
+ "ANCHOR",
+ "ALIAS",
+ "TAG",
+ "SCALAR"
+ };
+
+ struct Token {
+ // enums
+ enum STATUS { VALID, INVALID, UNVERIFIED };
+ enum TYPE {
+ DIRECTIVE,
+ DOC_START,
+ DOC_END,
+ BLOCK_SEQ_START,
+ BLOCK_MAP_START,
+ BLOCK_SEQ_END,
+ BLOCK_MAP_END,
+ BLOCK_ENTRY,
+ FLOW_SEQ_START,
+ FLOW_MAP_START,
+ FLOW_SEQ_END,
+ FLOW_MAP_END,
+ FLOW_MAP_COMPACT,
+ FLOW_ENTRY,
+ KEY,
+ VALUE,
+ ANCHOR,
+ ALIAS,
+ TAG,
+ PLAIN_SCALAR,
+ NON_PLAIN_SCALAR
+ };
+
+ // data
+ Token(TYPE type_, const Mark& mark_): status(VALID), type(type_), mark(mark_), data(0) {}
+
+ friend std::ostream& operator << (std::ostream& out, const Token& token) {
+ out << TokenNames[token.type] << std::string(": ") << token.value;
+ for(std::size_t i=0;i<token.params.size();i++)
+ out << std::string(" ") << token.params[i];
+ return out;
+ }
+
+ STATUS status;
+ TYPE type;
+ Mark mark;
+ std::string value;
+ std::vector <std::string> params;
+ int data;
+ };
+}
+
+#endif // TOKEN_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/test/CMakeLists.txt b/yaml-cpp/test/CMakeLists.txt
new file mode 100755
index 00000000..241c19ef
--- /dev/null
+++ b/yaml-cpp/test/CMakeLists.txt
@@ -0,0 +1,15 @@
+file(GLOB test_headers [a-z]*.h)
+file(GLOB test_sources [a-z]*.cpp)
+file(GLOB test_old_api_sources old-api/[a-z]*.cpp)
+
+list(APPEND test_sources ${test_old_api_sources})
+
+include_directories(${YAML_CPP_SOURCE_DIR}/test)
+
+add_executable(run-tests
+ ${test_sources}
+ ${test_headers}
+)
+target_link_libraries(run-tests yaml-cpp)
+
+add_test(yaml-reader-test run-tests)
diff --git a/yaml-cpp/test/emittertests.cpp b/yaml-cpp/test/emittertests.cpp
new file mode 100755
index 00000000..a7fdab67
--- /dev/null
+++ b/yaml-cpp/test/emittertests.cpp
@@ -0,0 +1,1148 @@
+#include "tests.h"
+#include "yaml-cpp/yaml.h"
+#include <iostream>
+
+namespace Test
+{
+ namespace Emitter {
+ ////////////////////////////////////////////////////////////////////////////////////////////////////////
+ // correct emitting
+
+ void SimpleScalar(YAML::Emitter& out, std::string& desiredOutput) {
+ out << "Hello, World!";
+ desiredOutput = "Hello, World!";
+ }
+
+ void SimpleSeq(YAML::Emitter& out, std::string& desiredOutput) {
+ out << YAML::BeginSeq;
+ out << "eggs";
+ out << "bread";
+ out << "milk";
+ out << YAML::EndSeq;
+
+ desiredOutput = "- eggs\n- bread\n- milk";
+ }
+
+ void SimpleFlowSeq(YAML::Emitter& out, std::string& desiredOutput) {
+ out << YAML::Flow;
+ out << YAML::BeginSeq;
+ out << "Larry";
+ out << "Curly";
+ out << "Moe";
+ out << YAML::EndSeq;
+
+ desiredOutput = "[Larry, Curly, Moe]";
+ }
+
+ void EmptyFlowSeq(YAML::Emitter& out, std::string& desiredOutput) {
+ out << YAML::Flow;
+ out << YAML::BeginSeq;
+ out << YAML::EndSeq;
+
+ desiredOutput = "[]";
+ }
+
+ void NestedBlockSeq(YAML::Emitter& out, std::string& desiredOutput) {
+ out << YAML::BeginSeq;
+ out << "item 1";
+ out << YAML::BeginSeq << "subitem 1" << "subitem 2" << YAML::EndSeq;
+ out << YAML::EndSeq;
+
+ desiredOutput = "- item 1\n-\n - subitem 1\n - subitem 2";
+ }
+
+ void NestedFlowSeq(YAML::Emitter& out, std::string& desiredOutput) {
+ out << YAML::BeginSeq;
+ out << "one";
+ out << YAML::Flow << YAML::BeginSeq << "two" << "three" << YAML::EndSeq;
+ out << YAML::EndSeq;
+
+ desiredOutput = "- one\n- [two, three]";
+ }
+
+ void SimpleMap(YAML::Emitter& out, std::string& desiredOutput) {
+ out << YAML::BeginMap;
+ out << YAML::Key << "name";
+ out << YAML::Value << "Ryan Braun";
+ out << YAML::Key << "position";
+ out << YAML::Value << "3B";
+ out << YAML::EndMap;
+
+ desiredOutput = "name: Ryan Braun\nposition: 3B";
+ }
+
+ void SimpleFlowMap(YAML::Emitter& out, std::string& desiredOutput) {
+ out << YAML::Flow;
+ out << YAML::BeginMap;
+ out << YAML::Key << "shape";
+ out << YAML::Value << "square";
+ out << YAML::Key << "color";
+ out << YAML::Value << "blue";
+ out << YAML::EndMap;
+
+ desiredOutput = "{shape: square, color: blue}";
+ }
+
+ void MapAndList(YAML::Emitter& out, std::string& desiredOutput) {
+ out << YAML::BeginMap;
+ out << YAML::Key << "name";
+ out << YAML::Value << "Barack Obama";
+ out << YAML::Key << "children";
+ out << YAML::Value << YAML::BeginSeq << "Sasha" << "Malia" << YAML::EndSeq;
+ out << YAML::EndMap;
+
+ desiredOutput = "name: Barack Obama\nchildren:\n - Sasha\n - Malia";
+ }
+
+ void ListAndMap(YAML::Emitter& out, std::string& desiredOutput) {
+ out << YAML::BeginSeq;
+ out << "item 1";
+ out << YAML::BeginMap;
+ out << YAML::Key << "pens" << YAML::Value << 8;
+ out << YAML::Key << "pencils" << YAML::Value << 14;
+ out << YAML::EndMap;
+ out << "item 2";
+ out << YAML::EndSeq;
+
+ desiredOutput = "- item 1\n- pens: 8\n pencils: 14\n- item 2";
+ }
+
+ void NestedBlockMap(YAML::Emitter& out, std::string& desiredOutput) {
+ out << YAML::BeginMap;
+ out << YAML::Key << "name";
+ out << YAML::Value << "Fred";
+ out << YAML::Key << "grades";
+ out << YAML::Value;
+ out << YAML::BeginMap;
+ out << YAML::Key << "algebra" << YAML::Value << "A";
+ out << YAML::Key << "physics" << YAML::Value << "C+";
+ out << YAML::Key << "literature" << YAML::Value << "B";
+ out << YAML::EndMap;
+ out << YAML::EndMap;
+
+ desiredOutput = "name: Fred\ngrades:\n algebra: A\n physics: C+\n literature: B";
+ }
+
+ void NestedFlowMap(YAML::Emitter& out, std::string& desiredOutput) {
+ out << YAML::Flow;
+ out << YAML::BeginMap;
+ out << YAML::Key << "name";
+ out << YAML::Value << "Fred";
+ out << YAML::Key << "grades";
+ out << YAML::Value;
+ out << YAML::BeginMap;
+ out << YAML::Key << "algebra" << YAML::Value << "A";
+ out << YAML::Key << "physics" << YAML::Value << "C+";
+ out << YAML::Key << "literature" << YAML::Value << "B";
+ out << YAML::EndMap;
+ out << YAML::EndMap;
+
+ desiredOutput = "{name: Fred, grades: {algebra: A, physics: C+, literature: B}}";
+ }
+
+ void MapListMix(YAML::Emitter& out, std::string& desiredOutput) {
+ out << YAML::BeginMap;
+ out << YAML::Key << "name";
+ out << YAML::Value << "Bob";
+ out << YAML::Key << "position";
+ out << YAML::Value;
+ out << YAML::Flow << YAML::BeginSeq << 2 << 4 << YAML::EndSeq;
+ out << YAML::Key << "invincible" << YAML::Value << YAML::OnOffBool << false;
+ out << YAML::EndMap;
+
+ desiredOutput = "name: Bob\nposition: [2, 4]\ninvincible: off";
+ }
+
+ void SimpleLongKey(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::LongKey;
+ out << YAML::BeginMap;
+ out << YAML::Key << "height";
+ out << YAML::Value << "5'9\"";
+ out << YAML::Key << "weight";
+ out << YAML::Value << 145;
+ out << YAML::EndMap;
+
+ desiredOutput = "? height\n: 5'9\"\n? weight\n: 145";
+ }
+
+ void SingleLongKey(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginMap;
+ out << YAML::Key << "age";
+ out << YAML::Value << "24";
+ out << YAML::LongKey << YAML::Key << "height";
+ out << YAML::Value << "5'9\"";
+ out << YAML::Key << "weight";
+ out << YAML::Value << 145;
+ out << YAML::EndMap;
+
+ desiredOutput = "age: 24\n? height\n: 5'9\"\nweight: 145";
+ }
+
+ void ComplexLongKey(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::LongKey;
+ out << YAML::BeginMap;
+ out << YAML::Key << YAML::BeginSeq << 1 << 3 << YAML::EndSeq;
+ out << YAML::Value << "monster";
+ out << YAML::Key << YAML::Flow << YAML::BeginSeq << 2 << 0 << YAML::EndSeq;
+ out << YAML::Value << "demon";
+ out << YAML::EndMap;
+
+ desiredOutput = "?\n - 1\n - 3\n: monster\n? [2, 0]\n: demon";
+ }
+
+ void AutoLongKey(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginMap;
+ out << YAML::Key << YAML::BeginSeq << 1 << 3 << YAML::EndSeq;
+ out << YAML::Value << "monster";
+ out << YAML::Key << YAML::Flow << YAML::BeginSeq << 2 << 0 << YAML::EndSeq;
+ out << YAML::Value << "demon";
+ out << YAML::Key << "the origin";
+ out << YAML::Value << "angel";
+ out << YAML::EndMap;
+
+ desiredOutput = "?\n - 1\n - 3\n: monster\n? [2, 0]\n: demon\nthe origin: angel";
+ }
+
+ void ScalarFormat(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginSeq;
+ out << "simple scalar";
+ out << YAML::SingleQuoted << "explicit single-quoted scalar";
+ out << YAML::DoubleQuoted << "explicit double-quoted scalar";
+ out << "auto-detected\ndouble-quoted scalar";
+ out << "a non-\"auto-detected\" double-quoted scalar";
+ out << YAML::Literal << "literal scalar\nthat may span\nmany, many\nlines and have \"whatever\" crazy\tsymbols that we like";
+ out << YAML::EndSeq;
+
+ desiredOutput = "- simple scalar\n- 'explicit single-quoted scalar'\n- \"explicit double-quoted scalar\"\n- \"auto-detected\\x0adouble-quoted scalar\"\n- a non-\"auto-detected\" double-quoted scalar\n- |\n literal scalar\n that may span\n many, many\n lines and have \"whatever\" crazy\tsymbols that we like";
+ }
+
+ void AutoLongKeyScalar(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginMap;
+ out << YAML::Key << YAML::Literal << "multi-line\nscalar";
+ out << YAML::Value << "and its value";
+ out << YAML::EndMap;
+
+ desiredOutput = "? |\n multi-line\n scalar\n: and its value";
+ }
+
+ void LongKeyFlowMap(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::Flow;
+ out << YAML::BeginMap;
+ out << YAML::Key << "simple key";
+ out << YAML::Value << "and value";
+ out << YAML::LongKey << YAML::Key << "long key";
+ out << YAML::Value << "and its value";
+ out << YAML::EndMap;
+
+ desiredOutput = "{simple key: and value, ? long key: and its value}";
+ }
+
+ void BlockMapAsKey(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginMap;
+ out << YAML::Key;
+ out << YAML::BeginMap;
+ out << YAML::Key << "key" << YAML::Value << "value";
+ out << YAML::Key << "next key" << YAML::Value << "next value";
+ out << YAML::EndMap;
+ out << YAML::Value;
+ out << "total value";
+ out << YAML::EndMap;
+
+ desiredOutput = "?\n key: value\n next key: next value\n: total value";
+ }
+
+ void AliasAndAnchor(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginSeq;
+ out << YAML::Anchor("fred");
+ out << YAML::BeginMap;
+ out << YAML::Key << "name" << YAML::Value << "Fred";
+ out << YAML::Key << "age" << YAML::Value << 42;
+ out << YAML::EndMap;
+ out << YAML::Alias("fred");
+ out << YAML::EndSeq;
+
+ desiredOutput = "- &fred\n name: Fred\n age: 42\n- *fred";
+ }
+
+ void AliasAndAnchorWithNull(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginSeq;
+ out << YAML::Anchor("fred") << YAML::Null;
+ out << YAML::Alias("fred");
+ out << YAML::EndSeq;
+
+ desiredOutput = "- &fred ~\n- *fred";
+ }
+
+ void AliasAndAnchorInFlow(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::Flow << YAML::BeginSeq;
+ out << YAML::Anchor("fred");
+ out << YAML::BeginMap;
+ out << YAML::Key << "name" << YAML::Value << "Fred";
+ out << YAML::Key << "age" << YAML::Value << 42;
+ out << YAML::EndMap;
+ out << YAML::Alias("fred");
+ out << YAML::EndSeq;
+
+ desiredOutput = "[&fred {name: Fred, age: 42}, *fred]";
+ }
+
+ void SimpleVerbatimTag(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::VerbatimTag("!foo") << "bar";
+
+ desiredOutput = "!<!foo> bar";
+ }
+
+ void VerbatimTagInBlockSeq(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginSeq;
+ out << YAML::VerbatimTag("!foo") << "bar";
+ out << "baz";
+ out << YAML::EndSeq;
+
+ desiredOutput = "- !<!foo> bar\n- baz";
+ }
+
+ void VerbatimTagInFlowSeq(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::Flow << YAML::BeginSeq;
+ out << YAML::VerbatimTag("!foo") << "bar";
+ out << "baz";
+ out << YAML::EndSeq;
+
+ desiredOutput = "[!<!foo> bar, baz]";
+ }
+
+ void VerbatimTagInFlowSeqWithNull(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::Flow << YAML::BeginSeq;
+ out << YAML::VerbatimTag("!foo") << YAML::Null;
+ out << "baz";
+ out << YAML::EndSeq;
+
+ desiredOutput = "[!<!foo> ~, baz]";
+ }
+
+ void VerbatimTagInBlockMap(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginMap;
+ out << YAML::Key << YAML::VerbatimTag("!foo") << "bar";
+ out << YAML::Value << YAML::VerbatimTag("!waz") << "baz";
+ out << YAML::EndMap;
+
+ desiredOutput = "!<!foo> bar: !<!waz> baz";
+ }
+
+ void VerbatimTagInFlowMap(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::Flow << YAML::BeginMap;
+ out << YAML::Key << YAML::VerbatimTag("!foo") << "bar";
+ out << YAML::Value << "baz";
+ out << YAML::EndMap;
+
+ desiredOutput = "{!<!foo> bar: baz}";
+ }
+
+ void VerbatimTagInFlowMapWithNull(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::Flow << YAML::BeginMap;
+ out << YAML::Key << YAML::VerbatimTag("!foo") << YAML::Null;
+ out << YAML::Value << "baz";
+ out << YAML::EndMap;
+
+ desiredOutput = "{!<!foo> ~: baz}";
+ }
+
+ void VerbatimTagWithEmptySeq(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::VerbatimTag("!foo") << YAML::BeginSeq << YAML::EndSeq;
+
+ desiredOutput = "!<!foo>\n[]";
+ }
+
+ void VerbatimTagWithEmptyMap(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::VerbatimTag("!bar") << YAML::BeginMap << YAML::EndMap;
+
+ desiredOutput = "!<!bar>\n{}";
+ }
+
+ void VerbatimTagWithEmptySeqAndMap(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginSeq;
+ out << YAML::VerbatimTag("!foo") << YAML::BeginSeq << YAML::EndSeq;
+ out << YAML::VerbatimTag("!bar") << YAML::BeginMap << YAML::EndMap;
+ out << YAML::EndSeq;
+
+ desiredOutput = "- !<!foo>\n []\n- !<!bar>\n {}";
+ }
+
+ void ByKindTagWithScalar(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginSeq;
+ out << YAML::DoubleQuoted << "12";
+ out << "12";
+ out << YAML::TagByKind << "12";
+ out << YAML::EndSeq;
+
+ desiredOutput = "- \"12\"\n- 12\n- ! 12";
+ }
+
+ void LocalTagWithScalar(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::LocalTag("foo") << "bar";
+
+ desiredOutput = "!foo bar";
+ }
+
+ void BadLocalTag(YAML::Emitter& out, std::string& desiredError)
+ {
+ out << YAML::LocalTag("e!far") << "bar";
+
+ desiredError = "invalid tag";
+ }
+
+ void ComplexDoc(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginMap;
+ out << YAML::Key << "receipt";
+ out << YAML::Value << "Oz-Ware Purchase Invoice";
+ out << YAML::Key << "date";
+ out << YAML::Value << "2007-08-06";
+ out << YAML::Key << "customer";
+ out << YAML::Value;
+ out << YAML::BeginMap;
+ out << YAML::Key << "given";
+ out << YAML::Value << "Dorothy";
+ out << YAML::Key << "family";
+ out << YAML::Value << "Gale";
+ out << YAML::EndMap;
+ out << YAML::Key << "items";
+ out << YAML::Value;
+ out << YAML::BeginSeq;
+ out << YAML::BeginMap;
+ out << YAML::Key << "part_no";
+ out << YAML::Value << "A4786";
+ out << YAML::Key << "descrip";
+ out << YAML::Value << "Water Bucket (Filled)";
+ out << YAML::Key << "price";
+ out << YAML::Value << 1.47;
+ out << YAML::Key << "quantity";
+ out << YAML::Value << 4;
+ out << YAML::EndMap;
+ out << YAML::BeginMap;
+ out << YAML::Key << "part_no";
+ out << YAML::Value << "E1628";
+ out << YAML::Key << "descrip";
+ out << YAML::Value << "High Heeled \"Ruby\" Slippers";
+ out << YAML::Key << "price";
+ out << YAML::Value << 100.27;
+ out << YAML::Key << "quantity";
+ out << YAML::Value << 1;
+ out << YAML::EndMap;
+ out << YAML::EndSeq;
+ out << YAML::Key << "bill-to";
+ out << YAML::Value << YAML::Anchor("id001");
+ out << YAML::BeginMap;
+ out << YAML::Key << "street";
+ out << YAML::Value << YAML::Literal << "123 Tornado Alley\nSuite 16";
+ out << YAML::Key << "city";
+ out << YAML::Value << "East Westville";
+ out << YAML::Key << "state";
+ out << YAML::Value << "KS";
+ out << YAML::EndMap;
+ out << YAML::Key << "ship-to";
+ out << YAML::Value << YAML::Alias("id001");
+ out << YAML::EndMap;
+
+ desiredOutput = "receipt: Oz-Ware Purchase Invoice\ndate: 2007-08-06\ncustomer:\n given: Dorothy\n family: Gale\nitems:\n - part_no: A4786\n descrip: Water Bucket (Filled)\n price: 1.47\n quantity: 4\n - part_no: E1628\n descrip: High Heeled \"Ruby\" Slippers\n price: 100.27\n quantity: 1\nbill-to: &id001\n street: |\n 123 Tornado Alley\n Suite 16\n city: East Westville\n state: KS\nship-to: *id001";
+ }
+
+ void STLContainers(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginSeq;
+ std::vector <int> primes;
+ primes.push_back(2);
+ primes.push_back(3);
+ primes.push_back(5);
+ primes.push_back(7);
+ primes.push_back(11);
+ primes.push_back(13);
+ out << YAML::Flow << primes;
+ std::map <std::string, int> ages;
+ ages["Daniel"] = 26;
+ ages["Jesse"] = 24;
+ out << ages;
+ out << YAML::EndSeq;
+
+ desiredOutput = "- [2, 3, 5, 7, 11, 13]\n- Daniel: 26\n Jesse: 24";
+ }
+
+ void SimpleComment(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginMap;
+ out << YAML::Key << "method";
+ out << YAML::Value << "least squares" << YAML::Comment("should we change this method?");
+ out << YAML::EndMap;
+
+ desiredOutput = "method: least squares # should we change this method?";
+ }
+
+ void MultiLineComment(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginSeq;
+ out << "item 1" << YAML::Comment("really really long\ncomment that couldn't possibly\nfit on one line");
+ out << "item 2";
+ out << YAML::EndSeq;
+
+ desiredOutput = "- item 1 # really really long\n # comment that couldn't possibly\n # fit on one line\n- item 2";
+ }
+
+ void ComplexComments(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginMap;
+ out << YAML::LongKey << YAML::Key << "long key" << YAML::Comment("long key");
+ out << YAML::Value << "value";
+ out << YAML::EndMap;
+
+ desiredOutput = "? long key # long key\n: value";
+ }
+
+ void InitialComment(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::Comment("A comment describing the purpose of the file.");
+ out << YAML::BeginMap << YAML::Key << "key" << YAML::Value << "value" << YAML::EndMap;
+
+ desiredOutput = "# A comment describing the purpose of the file.\nkey: value";
+ }
+
+ void InitialCommentWithDocIndicator(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginDoc << YAML::Comment("A comment describing the purpose of the file.");
+ out << YAML::BeginMap << YAML::Key << "key" << YAML::Value << "value" << YAML::EndMap;
+
+ desiredOutput = "---\n# A comment describing the purpose of the file.\nkey: value";
+ }
+
+ void CommentInFlowSeq(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::Flow << YAML::BeginSeq << "foo" << YAML::Comment("foo!") << "bar" << YAML::EndSeq;
+
+ desiredOutput = "[foo # foo!\n, bar]";
+ }
+
+ void CommentInFlowMap(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::Flow << YAML::BeginMap;
+ out << YAML::Key << "foo" << YAML::Value << "foo value";
+ out << YAML::Key << "bar" << YAML::Value << "bar value" << YAML::Comment("bar!");
+ out << YAML::Key << "baz" << YAML::Value << "baz value" << YAML::Comment("baz!");
+ out << YAML::EndMap;
+
+ desiredOutput = "{foo: foo value, bar: bar value # bar!\n, baz: baz value # baz!\n}";
+ }
+
+ void Indentation(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::Indent(4);
+ out << YAML::BeginSeq;
+ out << YAML::BeginMap;
+ out << YAML::Key << "key 1" << YAML::Value << "value 1";
+ out << YAML::Key << "key 2" << YAML::Value << YAML::BeginSeq << "a" << "b" << "c" << YAML::EndSeq;
+ out << YAML::EndMap;
+ out << YAML::EndSeq;
+
+ desiredOutput = "- key 1: value 1\n key 2:\n - a\n - b\n - c";
+ }
+
+ void SimpleGlobalSettings(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out.SetIndent(4);
+ out.SetMapFormat(YAML::LongKey);
+
+ out << YAML::BeginSeq;
+ out << YAML::BeginMap;
+ out << YAML::Key << "key 1" << YAML::Value << "value 1";
+ out << YAML::Key << "key 2" << YAML::Value << YAML::Flow << YAML::BeginSeq << "a" << "b" << "c" << YAML::EndSeq;
+ out << YAML::EndMap;
+ out << YAML::EndSeq;
+
+ desiredOutput = "- ? key 1\n : value 1\n ? key 2\n : [a, b, c]";
+ }
+
+ void ComplexGlobalSettings(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginSeq;
+ out << YAML::Block;
+ out << YAML::BeginMap;
+ out << YAML::Key << "key 1" << YAML::Value << "value 1";
+ out << YAML::Key << "key 2" << YAML::Value;
+ out.SetSeqFormat(YAML::Flow);
+ out << YAML::BeginSeq << "a" << "b" << "c" << YAML::EndSeq;
+ out << YAML::EndMap;
+ out << YAML::BeginMap;
+ out << YAML::Key << YAML::BeginSeq << 1 << 2 << YAML::EndSeq;
+ out << YAML::Value << YAML::BeginMap << YAML::Key << "a" << YAML::Value << "b" << YAML::EndMap;
+ out << YAML::EndMap;
+ out << YAML::EndSeq;
+
+ desiredOutput = "- key 1: value 1\n key 2: [a, b, c]\n- ? [1, 2]\n :\n a: b";
+ }
+
+ void Null(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginSeq;
+ out << YAML::Null;
+ out << YAML::BeginMap;
+ out << YAML::Key << "null value" << YAML::Value << YAML::Null;
+ out << YAML::Key << YAML::Null << YAML::Value << "null key";
+ out << YAML::EndMap;
+ out << YAML::EndSeq;
+
+ desiredOutput = "- ~\n- null value: ~\n ~: null key";
+ }
+
+ void EscapedUnicode(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::EscapeNonAscii << "\x24 \xC2\xA2 \xE2\x82\xAC \xF0\xA4\xAD\xA2";
+
+ desiredOutput = "\"$ \\xa2 \\u20ac \\U00024b62\"";
+ }
+
+ void Unicode(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << "\x24 \xC2\xA2 \xE2\x82\xAC \xF0\xA4\xAD\xA2";
+ desiredOutput = "\x24 \xC2\xA2 \xE2\x82\xAC \xF0\xA4\xAD\xA2";
+ }
+
+ void DoubleQuotedUnicode(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::DoubleQuoted << "\x24 \xC2\xA2 \xE2\x82\xAC \xF0\xA4\xAD\xA2";
+ desiredOutput = "\"\x24 \xC2\xA2 \xE2\x82\xAC \xF0\xA4\xAD\xA2\"";
+ }
+
+ struct Foo {
+ Foo(): x(0) {}
+ Foo(int x_, const std::string& bar_): x(x_), bar(bar_) {}
+
+ int x;
+ std::string bar;
+ };
+
+ YAML::Emitter& operator << (YAML::Emitter& out, const Foo& foo) {
+ out << YAML::BeginMap;
+ out << YAML::Key << "x" << YAML::Value << foo.x;
+ out << YAML::Key << "bar" << YAML::Value << foo.bar;
+ out << YAML::EndMap;
+ return out;
+ }
+
+ void UserType(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginSeq;
+ out << Foo(5, "hello");
+ out << Foo(3, "goodbye");
+ out << YAML::EndSeq;
+
+ desiredOutput = "- x: 5\n bar: hello\n- x: 3\n bar: goodbye";
+ }
+
+ void UserTypeInContainer(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ std::vector<Foo> fv;
+ fv.push_back(Foo(5, "hello"));
+ fv.push_back(Foo(3, "goodbye"));
+ out << fv;
+
+ desiredOutput = "- x: 5\n bar: hello\n- x: 3\n bar: goodbye";
+ }
+
+ template <typename T>
+ YAML::Emitter& operator << (YAML::Emitter& out, const T *v) {
+ if(v)
+ out << *v;
+ else
+ out << YAML::Null;
+ return out;
+ }
+
+ void PointerToInt(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ int foo = 5;
+ int *bar = &foo;
+ int *baz = 0;
+ out << YAML::BeginSeq;
+ out << bar << baz;
+ out << YAML::EndSeq;
+
+ desiredOutput = "- 5\n- ~";
+ }
+
+ void PointerToUserType(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ Foo foo(5, "hello");
+ Foo *bar = &foo;
+ Foo *baz = 0;
+ out << YAML::BeginSeq;
+ out << bar << baz;
+ out << YAML::EndSeq;
+
+ desiredOutput = "- x: 5\n bar: hello\n- ~";
+ }
+
+ void NewlineAtEnd(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << "Hello" << YAML::Newline << YAML::Newline;
+ desiredOutput = "Hello\n\n";
+ }
+
+ void NewlineInBlockSequence(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginSeq;
+ out << "a" << YAML::Newline << "b" << "c" << YAML::Newline << "d";
+ out << YAML::EndSeq;
+ desiredOutput = "- a\n\n- b\n- c\n\n- d";
+ }
+
+ void NewlineInFlowSequence(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::Flow << YAML::BeginSeq;
+ out << "a" << YAML::Newline << "b" << "c" << YAML::Newline << "d";
+ out << YAML::EndSeq;
+ desiredOutput = "[a\n, b, c\n, d]";
+ }
+
+ void NewlineInBlockMap(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginMap;
+ out << YAML::Key << "a" << YAML::Value << "foo" << YAML::Newline;
+ out << YAML::Key << "b" << YAML::Newline << YAML::Value << "bar";
+ out << YAML::LongKey << YAML::Key << "c" << YAML::Newline << YAML::Value << "car";
+ out << YAML::EndMap;
+ desiredOutput = "a: foo\n\nb: bar\n? c\n\n: car";
+ }
+
+ void NewlineInFlowMap(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::Flow << YAML::BeginMap;
+ out << YAML::Key << "a" << YAML::Value << "foo" << YAML::Newline;
+ out << YAML::Key << "b" << YAML::Value << "bar";
+ out << YAML::EndMap;
+ desiredOutput = "{a: foo\n, b: bar}";
+ }
+
+ void LotsOfNewlines(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginSeq;
+ out << "a" << YAML::Newline;
+ out << YAML::BeginSeq;
+ out << "b" << "c" << YAML::Newline;
+ out << YAML::EndSeq;
+ out << YAML::Newline;
+ out << YAML::BeginMap;
+ out << YAML::Newline << YAML::Key << "d" << YAML::Value << YAML::Newline << "e";
+ out << YAML::LongKey << YAML::Key << "f" << YAML::Newline << YAML::Value << "foo";
+ out << YAML::EndMap;
+ out << YAML::EndSeq;
+ desiredOutput = "- a\n\n-\n - b\n - c\n\n\n-\n d: e\n ? f\n\n : foo";
+ }
+
+ void Binary(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::Binary(reinterpret_cast<const unsigned char*>("Hello, World!"), 13);
+ desiredOutput = "!!binary \"SGVsbG8sIFdvcmxkIQ==\"";
+ }
+
+ void LongBinary(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::Binary(reinterpret_cast<const unsigned char*>("Man is distinguished, not only by his reason, but by this singular passion from other animals, which is a lust of the mind, that by a perseverance of delight in the continued and indefatigable generation of knowledge, exceeds the short vehemence of any carnal pleasure.\n"), 270);
+ desiredOutput = "!!binary \"TWFuIGlzIGRpc3Rpbmd1aXNoZWQsIG5vdCBvbmx5IGJ5IGhpcyByZWFzb24sIGJ1dCBieSB0aGlzIHNpbmd1bGFyIHBhc3Npb24gZnJvbSBvdGhlciBhbmltYWxzLCB3aGljaCBpcyBhIGx1c3Qgb2YgdGhlIG1pbmQsIHRoYXQgYnkgYSBwZXJzZXZlcmFuY2Ugb2YgZGVsaWdodCBpbiB0aGUgY29udGludWVkIGFuZCBpbmRlZmF0aWdhYmxlIGdlbmVyYXRpb24gb2Yga25vd2xlZGdlLCBleGNlZWRzIHRoZSBzaG9ydCB2ZWhlbWVuY2Ugb2YgYW55IGNhcm5hbCBwbGVhc3VyZS4K\"";
+ }
+
+ void EmptyBinary(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::Binary(reinterpret_cast<const unsigned char *>(""), 0);
+ desiredOutput = "!!binary \"\"";
+ }
+
+ void ColonAtEndOfScalar(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << "a:";
+ desiredOutput = "\"a:\"";
+ }
+
+ void ColonAsScalar(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginMap;
+ out << YAML::Key << "apple" << YAML::Value << ":";
+ out << YAML::Key << "banana" << YAML::Value << ":";
+ out << YAML::EndMap;
+ desiredOutput = "apple: \":\"\nbanana: \":\"";
+ }
+
+ void ColonAtEndOfScalarInFlow(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::Flow << YAML::BeginMap << YAML::Key << "C:" << YAML::Value << "C:" << YAML::EndMap;
+ desiredOutput = "{\"C:\": \"C:\"}";
+ }
+
+ void BoolFormatting(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginSeq;
+ out << YAML::TrueFalseBool << YAML::UpperCase << true;
+ out << YAML::TrueFalseBool << YAML::CamelCase << true;
+ out << YAML::TrueFalseBool << YAML::LowerCase << true;
+ out << YAML::TrueFalseBool << YAML::UpperCase << false;
+ out << YAML::TrueFalseBool << YAML::CamelCase << false;
+ out << YAML::TrueFalseBool << YAML::LowerCase << false;
+ out << YAML::YesNoBool << YAML::UpperCase << true;
+ out << YAML::YesNoBool << YAML::CamelCase << true;
+ out << YAML::YesNoBool << YAML::LowerCase << true;
+ out << YAML::YesNoBool << YAML::UpperCase << false;
+ out << YAML::YesNoBool << YAML::CamelCase << false;
+ out << YAML::YesNoBool << YAML::LowerCase << false;
+ out << YAML::OnOffBool << YAML::UpperCase << true;
+ out << YAML::OnOffBool << YAML::CamelCase << true;
+ out << YAML::OnOffBool << YAML::LowerCase << true;
+ out << YAML::OnOffBool << YAML::UpperCase << false;
+ out << YAML::OnOffBool << YAML::CamelCase << false;
+ out << YAML::OnOffBool << YAML::LowerCase << false;
+ out << YAML::ShortBool << YAML::UpperCase << true;
+ out << YAML::ShortBool << YAML::CamelCase << true;
+ out << YAML::ShortBool << YAML::LowerCase << true;
+ out << YAML::ShortBool << YAML::UpperCase << false;
+ out << YAML::ShortBool << YAML::CamelCase << false;
+ out << YAML::ShortBool << YAML::LowerCase << false;
+ out << YAML::EndSeq;
+ desiredOutput =
+ "- TRUE\n- True\n- true\n- FALSE\n- False\n- false\n"
+ "- YES\n- Yes\n- yes\n- NO\n- No\n- no\n"
+ "- ON\n- On\n- on\n- OFF\n- Off\n- off\n"
+ "- Y\n- Y\n- y\n- N\n- N\n- n";
+ }
+
+ void DocStartAndEnd(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginDoc;
+ out << YAML::BeginSeq << 1 << 2 << 3 << YAML::EndSeq;
+ out << YAML::BeginDoc;
+ out << "Hi there!";
+ out << YAML::EndDoc;
+ out << YAML::EndDoc;
+ out << YAML::EndDoc;
+ out << YAML::BeginDoc;
+ out << YAML::VerbatimTag("foo") << "bar";
+ desiredOutput = "---\n- 1\n- 2\n- 3\n---\nHi there!\n...\n...\n...\n---\n!<foo> bar";
+ }
+
+ void ImplicitDocStart(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << "Hi";
+ out << "Bye";
+ out << "Oops";
+ desiredOutput = "Hi\n---\nBye\n---\nOops";
+ }
+
+ void EmptyString(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginMap;
+ out << YAML::Key << "key" << YAML::Value << "";
+ out << YAML::EndMap;
+ desiredOutput = "key: \"\"";
+ }
+
+ void SingleChar(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginSeq;
+ out << 'a';
+ out << ':';
+ out << (char)0x10;
+ out << '\n';
+ out << ' ';
+ out << '\t';
+ out << YAML::EndSeq;
+ desiredOutput = "- a\n- \":\"\n- \"\\x10\"\n- \"\\n\"\n- \" \"\n- \"\\t\"";
+ }
+
+ void DefaultPrecision(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginSeq;
+ out << 1.234f;
+ out << 3.14159265358979;
+ out << YAML::EndSeq;
+ desiredOutput = "- 1.234\n- 3.14159265358979";
+ }
+
+ void SetPrecision(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginSeq;
+ out << YAML::FloatPrecision(3) << 1.234f;
+ out << YAML::DoublePrecision(6) << 3.14159265358979;
+ out << YAML::EndSeq;
+ desiredOutput = "- 1.23\n- 3.14159";
+ }
+
+ void DashInBlockContext(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::BeginMap;
+ out << YAML::Key << "key" << YAML::Value << "-";
+ out << YAML::EndMap;
+ desiredOutput = "key: \"-\"";
+ }
+
+ void HexAndOct(YAML::Emitter& out, std::string& desiredOutput)
+ {
+ out << YAML::Flow << YAML::BeginSeq;
+ out << 31;
+ out << YAML::Hex << 31;
+ out << YAML::Oct << 31;
+ out << YAML::EndSeq;
+ desiredOutput = "[31, 0x1f, 037]";
+ }
+
+ ////////////////////////////////////////////////////////////////////////////////////////////////////////
+ // incorrect emitting
+
+ void ExtraEndSeq(YAML::Emitter& out, std::string& desiredError)
+ {
+ desiredError = YAML::ErrorMsg::UNEXPECTED_END_SEQ;
+
+ out << YAML::BeginSeq;
+ out << "Hello";
+ out << "World";
+ out << YAML::EndSeq;
+ out << YAML::EndSeq;
+ }
+
+ void ExtraEndMap(YAML::Emitter& out, std::string& desiredError)
+ {
+ desiredError = YAML::ErrorMsg::UNEXPECTED_END_MAP;
+
+ out << YAML::BeginMap;
+ out << YAML::Key << "Hello" << YAML::Value << "World";
+ out << YAML::EndMap;
+ out << YAML::EndMap;
+ }
+
+ void BadSingleQuoted(YAML::Emitter& out, std::string& desiredError)
+ {
+ desiredError = YAML::ErrorMsg::SINGLE_QUOTED_CHAR;
+
+ out << YAML::SingleQuoted << "Hello\nWorld";
+ }
+
+ void InvalidAnchor(YAML::Emitter& out, std::string& desiredError)
+ {
+ desiredError = YAML::ErrorMsg::INVALID_ANCHOR;
+
+ out << YAML::BeginSeq;
+ out << YAML::Anchor("new\nline") << "Test";
+ out << YAML::EndSeq;
+ }
+
+ void InvalidAlias(YAML::Emitter& out, std::string& desiredError)
+ {
+ desiredError = YAML::ErrorMsg::INVALID_ALIAS;
+
+ out << YAML::BeginSeq;
+ out << YAML::Alias("new\nline");
+ out << YAML::EndSeq;
+ }
+
+ void MissingKey(YAML::Emitter& out, std::string& desiredError)
+ {
+ desiredError = YAML::ErrorMsg::EXPECTED_KEY_TOKEN;
+
+ out << YAML::BeginMap;
+ out << YAML::Key << "key" << YAML::Value << "value";
+ out << "missing key" << YAML::Value << "value";
+ out << YAML::EndMap;
+ }
+
+ void MissingValue(YAML::Emitter& out, std::string& desiredError)
+ {
+ desiredError = YAML::ErrorMsg::EXPECTED_VALUE_TOKEN;
+
+ out << YAML::BeginMap;
+ out << YAML::Key << "key" << "value";
+ out << YAML::EndMap;
+ }
+
+ void UnexpectedKey(YAML::Emitter& out, std::string& desiredError)
+ {
+ desiredError = YAML::ErrorMsg::UNEXPECTED_KEY_TOKEN;
+
+ out << YAML::BeginSeq;
+ out << YAML::Key << "hi";
+ out << YAML::EndSeq;
+ }
+
+ void UnexpectedValue(YAML::Emitter& out, std::string& desiredError)
+ {
+ desiredError = YAML::ErrorMsg::UNEXPECTED_VALUE_TOKEN;
+
+ out << YAML::BeginSeq;
+ out << YAML::Value << "hi";
+ out << YAML::EndSeq;
+ }
+ }
+
+ namespace {
+ void RunEmitterTest(void (*test)(YAML::Emitter&, std::string&), const std::string& name, int& passed, int& total) {
+ YAML::Emitter out;
+ std::string desiredOutput;
+ test(out, desiredOutput);
+ std::string output = out.c_str();
+ std::string lastError = out.GetLastError();
+
+ if(output == desiredOutput) {
+ try {
+ std::stringstream stream(output);
+ YAML::Parser parser;
+ YAML::Node node;
+ parser.GetNextDocument(node);
+ passed++;
+ } catch(const YAML::Exception& e) {
+ std::cout << "Emitter test failed: " << name << "\n";
+ std::cout << "Parsing output error: " << e.what() << "\n";
+ }
+ } else {
+ std::cout << "Emitter test failed: " << name << "\n";
+ std::cout << "Output:\n";
+ std::cout << output << "<<<\n";
+ std::cout << "Desired output:\n";
+ std::cout << desiredOutput << "<<<\n";
+ if(!out.good())
+ std::cout << "Emitter error: " << lastError << "\n";
+ }
+ total++;
+ }
+
+ void RunEmitterErrorTest(void (*test)(YAML::Emitter&, std::string&), const std::string& name, int& passed, int& total) {
+ YAML::Emitter out;
+ std::string desiredError;
+ test(out, desiredError);
+ std::string lastError = out.GetLastError();
+ if(!out.good() && lastError == desiredError) {
+ passed++;
+ } else {
+ std::cout << "Emitter test failed: " << name << "\n";
+ if(out.good())
+ std::cout << "No error detected\n";
+ else
+ std::cout << "Detected error: " << lastError << "\n";
+ std::cout << "Expected error: " << desiredError << "\n";
+ }
+ total++;
+ }
+ }
+
+ bool RunEmitterTests()
+ {
+ int passed = 0;
+ int total = 0;
+ RunEmitterTest(&Emitter::SimpleScalar, "simple scalar", passed, total);
+ RunEmitterTest(&Emitter::SimpleSeq, "simple seq", passed, total);
+ RunEmitterTest(&Emitter::SimpleFlowSeq, "simple flow seq", passed, total);
+ RunEmitterTest(&Emitter::EmptyFlowSeq, "empty flow seq", passed, total);
+ RunEmitterTest(&Emitter::NestedBlockSeq, "nested block seq", passed, total);
+ RunEmitterTest(&Emitter::NestedFlowSeq, "nested flow seq", passed, total);
+ RunEmitterTest(&Emitter::SimpleMap, "simple map", passed, total);
+ RunEmitterTest(&Emitter::SimpleFlowMap, "simple flow map", passed, total);
+ RunEmitterTest(&Emitter::MapAndList, "map and list", passed, total);
+ RunEmitterTest(&Emitter::ListAndMap, "list and map", passed, total);
+ RunEmitterTest(&Emitter::NestedBlockMap, "nested block map", passed, total);
+ RunEmitterTest(&Emitter::NestedFlowMap, "nested flow map", passed, total);
+ RunEmitterTest(&Emitter::MapListMix, "map list mix", passed, total);
+ RunEmitterTest(&Emitter::SimpleLongKey, "simple long key", passed, total);
+ RunEmitterTest(&Emitter::SingleLongKey, "single long key", passed, total);
+ RunEmitterTest(&Emitter::ComplexLongKey, "complex long key", passed, total);
+ RunEmitterTest(&Emitter::AutoLongKey, "auto long key", passed, total);
+ RunEmitterTest(&Emitter::ScalarFormat, "scalar format", passed, total);
+ RunEmitterTest(&Emitter::AutoLongKeyScalar, "auto long key scalar", passed, total);
+ RunEmitterTest(&Emitter::LongKeyFlowMap, "long key flow map", passed, total);
+ RunEmitterTest(&Emitter::BlockMapAsKey, "block map as key", passed, total);
+ RunEmitterTest(&Emitter::AliasAndAnchor, "alias and anchor", passed, total);
+ RunEmitterTest(&Emitter::AliasAndAnchorWithNull, "alias and anchor with null", passed, total);
+ RunEmitterTest(&Emitter::AliasAndAnchorInFlow, "alias and anchor in flow", passed, total);
+ RunEmitterTest(&Emitter::SimpleVerbatimTag, "simple verbatim tag", passed, total);
+ RunEmitterTest(&Emitter::VerbatimTagInBlockSeq, "verbatim tag in block seq", passed, total);
+ RunEmitterTest(&Emitter::VerbatimTagInFlowSeq, "verbatim tag in flow seq", passed, total);
+ RunEmitterTest(&Emitter::VerbatimTagInFlowSeqWithNull, "verbatim tag in flow seq with null", passed, total);
+ RunEmitterTest(&Emitter::VerbatimTagInBlockMap, "verbatim tag in block map", passed, total);
+ RunEmitterTest(&Emitter::VerbatimTagInFlowMap, "verbatim tag in flow map", passed, total);
+ RunEmitterTest(&Emitter::VerbatimTagInFlowMapWithNull, "verbatim tag in flow map with null", passed, total);
+ RunEmitterTest(&Emitter::VerbatimTagWithEmptySeq, "verbatim tag with empty seq", passed, total);
+ RunEmitterTest(&Emitter::VerbatimTagWithEmptyMap, "verbatim tag with empty map", passed, total);
+ RunEmitterTest(&Emitter::VerbatimTagWithEmptySeqAndMap, "verbatim tag with empty seq and map", passed, total);
+ RunEmitterTest(&Emitter::ByKindTagWithScalar, "by-kind tag with scalar", passed, total);
+ RunEmitterTest(&Emitter::LocalTagWithScalar, "local tag with scalar", passed, total);
+ RunEmitterTest(&Emitter::ComplexDoc, "complex doc", passed, total);
+ RunEmitterTest(&Emitter::STLContainers, "STL containers", passed, total);
+ RunEmitterTest(&Emitter::SimpleComment, "simple comment", passed, total);
+ RunEmitterTest(&Emitter::MultiLineComment, "multi-line comment", passed, total);
+ RunEmitterTest(&Emitter::ComplexComments, "complex comments", passed, total);
+ RunEmitterTest(&Emitter::InitialComment, "initial comment", passed, total);
+ RunEmitterTest(&Emitter::InitialCommentWithDocIndicator, "initial comment with doc indicator", passed, total);
+ RunEmitterTest(&Emitter::CommentInFlowSeq, "comment in flow seq", passed, total);
+ RunEmitterTest(&Emitter::CommentInFlowMap, "comment in flow map", passed, total);
+ RunEmitterTest(&Emitter::Indentation, "indentation", passed, total);
+ RunEmitterTest(&Emitter::SimpleGlobalSettings, "simple global settings", passed, total);
+ RunEmitterTest(&Emitter::ComplexGlobalSettings, "complex global settings", passed, total);
+ RunEmitterTest(&Emitter::Null, "null", passed, total);
+ RunEmitterTest(&Emitter::EscapedUnicode, "escaped unicode", passed, total);
+ RunEmitterTest(&Emitter::Unicode, "unicode", passed, total);
+ RunEmitterTest(&Emitter::DoubleQuotedUnicode, "double quoted unicode", passed, total);
+ RunEmitterTest(&Emitter::UserType, "user type", passed, total);
+ RunEmitterTest(&Emitter::UserTypeInContainer, "user type in container", passed, total);
+ RunEmitterTest(&Emitter::PointerToInt, "pointer to int", passed, total);
+ RunEmitterTest(&Emitter::PointerToUserType, "pointer to user type", passed, total);
+ RunEmitterTest(&Emitter::NewlineAtEnd, "newline at end", passed, total);
+ RunEmitterTest(&Emitter::NewlineInBlockSequence, "newline in block sequence", passed, total);
+ RunEmitterTest(&Emitter::NewlineInFlowSequence, "newline in flow sequence", passed, total);
+ RunEmitterTest(&Emitter::NewlineInBlockMap, "newline in block map", passed, total);
+ RunEmitterTest(&Emitter::NewlineInFlowMap, "newline in flow map", passed, total);
+ RunEmitterTest(&Emitter::LotsOfNewlines, "lots of newlines", passed, total);
+ RunEmitterTest(&Emitter::Binary, "binary", passed, total);
+ RunEmitterTest(&Emitter::LongBinary, "long binary", passed, total);
+ RunEmitterTest(&Emitter::EmptyBinary, "empty binary", passed, total);
+ RunEmitterTest(&Emitter::ColonAtEndOfScalar, "colon at end of scalar", passed, total);
+ RunEmitterTest(&Emitter::ColonAsScalar, "colon as scalar", passed, total);
+ RunEmitterTest(&Emitter::ColonAtEndOfScalarInFlow, "colon at end of scalar in flow", passed, total);
+ RunEmitterTest(&Emitter::BoolFormatting, "bool formatting", passed, total);
+ RunEmitterTest(&Emitter::DocStartAndEnd, "doc start and end", passed, total);
+ RunEmitterTest(&Emitter::ImplicitDocStart, "implicit doc start", passed, total);
+ RunEmitterTest(&Emitter::EmptyString, "empty string", passed, total);
+ RunEmitterTest(&Emitter::SingleChar, "single char", passed, total);
+ RunEmitterTest(&Emitter::DefaultPrecision, "default precision", passed, total);
+ RunEmitterTest(&Emitter::SetPrecision, "set precision", passed, total);
+ RunEmitterTest(&Emitter::DashInBlockContext, "dash in block context", passed, total);
+ RunEmitterTest(&Emitter::HexAndOct, "hex and oct", passed, total);
+
+ RunEmitterErrorTest(&Emitter::ExtraEndSeq, "extra EndSeq", passed, total);
+ RunEmitterErrorTest(&Emitter::ExtraEndMap, "extra EndMap", passed, total);
+ RunEmitterErrorTest(&Emitter::BadSingleQuoted, "bad single quoted string", passed, total);
+ RunEmitterErrorTest(&Emitter::InvalidAnchor, "invalid anchor", passed, total);
+ RunEmitterErrorTest(&Emitter::InvalidAlias, "invalid alias", passed, total);
+ RunEmitterErrorTest(&Emitter::MissingKey, "missing key", passed, total);
+ RunEmitterErrorTest(&Emitter::MissingValue, "missing value", passed, total);
+ RunEmitterErrorTest(&Emitter::UnexpectedKey, "unexpected key", passed, total);
+ RunEmitterErrorTest(&Emitter::UnexpectedValue, "unexpected value", passed, total);
+ RunEmitterErrorTest(&Emitter::BadLocalTag, "bad local tag", passed, total);
+
+ std::cout << "Emitter tests: " << passed << "/" << total << " passed\n";
+ return passed == total;
+ }
+}
+
diff --git a/yaml-cpp/test/emittertests.h b/yaml-cpp/test/emittertests.h
new file mode 100755
index 00000000..e7c6ac50
--- /dev/null
+++ b/yaml-cpp/test/emittertests.h
@@ -0,0 +1,13 @@
+#ifndef EMITTERTESTS_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define EMITTERTESTS_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+namespace Test {
+ bool RunEmitterTests();
+}
+
+#endif // EMITTERTESTS_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
diff --git a/yaml-cpp/test/main.cpp b/yaml-cpp/test/main.cpp
new file mode 100755
index 00000000..64c69f10
--- /dev/null
+++ b/yaml-cpp/test/main.cpp
@@ -0,0 +1,7 @@
+#include "tests.h"
+
+int main()
+{
+ Test::RunAll();
+ return 0;
+}
diff --git a/yaml-cpp/test/nodetests.h b/yaml-cpp/test/nodetests.h
new file mode 100755
index 00000000..733e782e
--- /dev/null
+++ b/yaml-cpp/test/nodetests.h
@@ -0,0 +1,13 @@
+#ifndef NODETESTS_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define NODETESTS_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+namespace Test {
+ bool RunNodeTests();
+}
+
+#endif // NODETESTS_H_62B23520_7C8E_11DE_8A39_0800200C9A6666
+
diff --git a/yaml-cpp/test/old-api/parsertests.cpp b/yaml-cpp/test/old-api/parsertests.cpp
new file mode 100755
index 00000000..de7f1238
--- /dev/null
+++ b/yaml-cpp/test/old-api/parsertests.cpp
@@ -0,0 +1,1237 @@
+#include "tests.h"
+#include "yaml-cpp/yaml.h"
+#include <sstream>
+#include <algorithm>
+#include <iostream>
+
+namespace Test
+{
+ namespace Parser {
+ void SimpleScalar(std::string& inputScalar, std::string& desiredOutput)
+ {
+ inputScalar = "Hello, World!";
+ desiredOutput = "Hello, World!";
+ }
+
+ void MultiLineScalar(std::string& inputScalar, std::string& desiredOutput)
+ {
+ inputScalar =
+ "normal scalar, but\n"
+ "over several lines";
+ desiredOutput = "normal scalar, but over several lines";
+ }
+
+ void LiteralScalar(std::string& inputScalar, std::string& desiredOutput)
+ {
+ inputScalar =
+ "|\n"
+ " literal scalar - so we can draw ASCII:\n"
+ " \n"
+ " - -\n"
+ " | - |\n"
+ " -----\n";
+ desiredOutput =
+ "literal scalar - so we can draw ASCII:\n"
+ "\n"
+ " - -\n"
+ " | - |\n"
+ " -----\n";
+ }
+
+ void FoldedScalar(std::string& inputScalar, std::string& desiredOutput)
+ {
+ inputScalar =
+ ">\n"
+ " and a folded scalar... so we\n"
+ " can just keep writing various\n"
+ " things. And if we want to keep indentation:\n"
+ " \n"
+ " we just indent a little\n"
+ " see, this stays indented";
+ desiredOutput =
+ "and a folded scalar... so we"
+ " can just keep writing various"
+ " things. And if we want to keep indentation:\n"
+ "\n"
+ " we just indent a little\n"
+ " see, this stays indented";
+ }
+
+ void ChompedFoldedScalar(std::string& inputScalar, std::string& desiredOutput)
+ {
+ inputScalar =
+ ">-\n"
+ " Here's a folded scalar\n"
+ " that gets chomped.";
+ desiredOutput =
+ "Here's a folded scalar"
+ " that gets chomped.";
+ }
+
+ void ChompedLiteralScalar(std::string& inputScalar, std::string& desiredOutput)
+ {
+ inputScalar =
+ "|-\n"
+ " Here's a literal scalar\n"
+ " that gets chomped.";
+ desiredOutput =
+ "Here's a literal scalar\n"
+ "that gets chomped.";
+ }
+
+ void FoldedScalarWithIndent(std::string& inputScalar, std::string& desiredOutput)
+ {
+ inputScalar =
+ ">2\n"
+ " Here's a folded scalar\n"
+ " that starts with some indentation.";
+ desiredOutput =
+ " Here's a folded scalar\n"
+ "that starts with some indentation.";
+ }
+
+ void ColonScalar(std::string& inputScalar, std::string& desiredOutput)
+ {
+ inputScalar = "::vector";
+ desiredOutput = "::vector";
+ }
+
+ void QuotedScalar(std::string& inputScalar, std::string& desiredOutput)
+ {
+ inputScalar = "\": - ()\"";
+ desiredOutput = ": - ()";
+ }
+
+ void CommaScalar(std::string& inputScalar, std::string& desiredOutput)
+ {
+ inputScalar = "Up, up, and away!";
+ desiredOutput = "Up, up, and away!";
+ }
+
+ void DashScalar(std::string& inputScalar, std::string& desiredOutput)
+ {
+ inputScalar = "-123";
+ desiredOutput = "-123";
+ }
+
+ void URLScalar(std::string& inputScalar, std::string& desiredOutput)
+ {
+ inputScalar = "http://example.com/foo#bar";
+ desiredOutput = "http://example.com/foo#bar";
+ }
+
+ bool SimpleSeq()
+ {
+ std::string input =
+ "- eggs\n"
+ "- bread\n"
+ "- milk";
+
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+
+ std::string output;
+ if(doc[0].to<std::string>() != "eggs")
+ return false;
+ if(doc[1].to<std::string>() != "bread")
+ return false;
+ if(doc[2].to<std::string>() != "milk")
+ return false;
+
+ return true;
+ }
+
+ bool SimpleMap()
+ {
+ std::string input =
+ "name: Prince Fielder\n"
+ "position: 1B\n"
+ "bats: L";
+
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+
+ std::string output;
+ doc["name"] >> output;
+ if(output != "Prince Fielder")
+ return false;
+ doc["position"] >> output;
+ if(output != "1B")
+ return false;
+ doc["bats"] >> output;
+ if(output != "L")
+ return false;
+
+ return true;
+ }
+
+ bool FlowSeq()
+ {
+ std::string input = "[ 2 , 3, 5 , 7, 11]";
+
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+
+ int output;
+ doc[0] >> output;
+ if(output != 2)
+ return false;
+ doc[1] >> output;
+ if(output != 3)
+ return false;
+ doc[2] >> output;
+ if(output != 5)
+ return false;
+ doc[3] >> output;
+ if(output != 7)
+ return false;
+ doc[4] >> output;
+ if(output != 11)
+ return false;
+
+ return true;
+ }
+
+ bool FlowMap()
+ {
+ std::string input = "{hr: 65, avg: 0.278}";
+
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+
+ std::string output;
+ doc["hr"] >> output;
+ if(output != "65")
+ return false;
+ doc["avg"] >> output;
+ if(output != "0.278")
+ return false;
+
+ return true;
+ }
+
+ bool FlowMapWithOmittedKey()
+ {
+ std::string input = "{: omitted key}";
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+
+ std::string output;
+ doc[YAML::Null] >> output;
+ if(output != "omitted key")
+ return false;
+
+ return true;
+ }
+
+ bool FlowMapWithOmittedValue()
+ {
+ std::string input = "{a: b, c:, d:}";
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+
+ std::string output;
+ doc["a"] >> output;
+ if(output != "b")
+ return false;
+ if(!IsNull(doc["c"]))
+ return false;
+ if(!IsNull(doc["d"]))
+ return false;
+
+ return true;
+ }
+
+ bool FlowMapWithSoloEntry()
+ {
+ std::string input = "{a: b, c, d: e}";
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+
+ std::string output;
+ doc["a"] >> output;
+ if(output != "b")
+ return false;
+ if(!IsNull(doc["c"]))
+ return false;
+ doc["d"] >> output;
+ if(output != "e")
+ return false;
+
+ return true;
+ }
+
+ bool FlowMapEndingWithSoloEntry()
+ {
+ std::string input = "{a: b, c}";
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+
+ std::string output;
+ doc["a"] >> output;
+ if(output != "b")
+ return false;
+ if(!IsNull(doc["c"]))
+ return false;
+
+ return true;
+ }
+
+ bool QuotedSimpleKeys()
+ {
+ std::string KeyValue[3] = { "\"double\": double\n", "'single': single\n", "plain: plain\n" };
+
+ int perm[3] = { 0, 1, 2 };
+ do {
+ std::string input = KeyValue[perm[0]] + KeyValue[perm[1]] + KeyValue[perm[2]];
+
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+
+ std::string output;
+ doc["double"] >> output;
+ if(output != "double")
+ return false;
+ doc["single"] >> output;
+ if(output != "single")
+ return false;
+ doc["plain"] >> output;
+ if(output != "plain")
+ return false;
+ } while(std::next_permutation(perm, perm + 3));
+
+ return true;
+ }
+
+ bool CompressedMapAndSeq()
+ {
+ std::string input = "key:\n- one\n- two";
+
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+
+ const YAML::Node& seq = doc["key"];
+ if(seq.size() != 2)
+ return false;
+
+ std::string output;
+ seq[0] >> output;
+ if(output != "one")
+ return false;
+ seq[1] >> output;
+ if(output != "two")
+ return false;
+
+ return true;
+ }
+
+ bool NullBlockSeqEntry()
+ {
+ std::string input = "- hello\n-\n- world";
+
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+
+ std::string output;
+ doc[0] >> output;
+ if(output != "hello")
+ return false;
+ if(!IsNull(doc[1]))
+ return false;
+ doc[2] >> output;
+ if(output != "world")
+ return false;
+
+ return true;
+ }
+
+ bool NullBlockMapKey()
+ {
+ std::string input = ": empty key";
+
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+
+ std::string output;
+ doc[YAML::Null] >> output;
+ if(output != "empty key")
+ return false;
+
+ return true;
+ }
+
+ bool NullBlockMapValue()
+ {
+ std::string input = "empty value:";
+
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+
+ if(!IsNull(doc["empty value"]))
+ return false;
+
+ return true;
+ }
+
+ bool SimpleAlias()
+ {
+ std::string input = "- &alias test\n- *alias";
+
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+
+ std::string output;
+ doc[0] >> output;
+ if(output != "test")
+ return false;
+
+ doc[1] >> output;
+ if(output != "test")
+ return false;
+
+ if(doc.size() != 2)
+ return false;
+
+ return true;
+ }
+
+ bool AliasWithNull()
+ {
+ std::string input = "- &alias\n- *alias";
+
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+
+ if(!IsNull(doc[0]))
+ return false;
+
+ if(!IsNull(doc[1]))
+ return false;
+
+ if(doc.size() != 2)
+ return false;
+
+ return true;
+ }
+
+ bool AnchorInSimpleKey()
+ {
+ std::string input = "- &a b: c\n- *a";
+
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+
+ if(doc.size() != 2)
+ return false;
+
+ std::string output;
+ doc[0]["b"] >> output;
+ if(output != "c")
+ return false;
+
+ doc[1] >> output;
+ if(output != "b")
+ return false;
+
+ return true;
+ }
+
+ bool AliasAsSimpleKey()
+ {
+ std::string input = "- &a b\n- *a : c";
+
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+
+ if(doc.size() != 2)
+ return false;
+
+ std::string output;
+ doc[0] >> output;
+ if(output != "b")
+ return false;
+
+ doc[1]["b"] >> output;
+ if(output != "c")
+ return false;
+
+ return true;
+ }
+
+ bool ExplicitDoc()
+ {
+ std::string input = "---\n- one\n- two";
+
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+
+ if(doc.size() != 2)
+ return false;
+
+ std::string output;
+ doc[0] >> output;
+ if(output != "one")
+ return false;
+ doc[1] >> output;
+ if(output != "two")
+ return false;
+
+ return true;
+ }
+
+ bool MultipleDocs()
+ {
+ std::string input = "---\nname: doc1\n---\nname: doc2";
+
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+
+ std::string output;
+ doc["name"] >> output;
+ if(output != "doc1")
+ return false;
+
+ if(!parser)
+ return false;
+
+ parser.GetNextDocument(doc);
+ doc["name"] >> output;
+ if(output != "doc2")
+ return false;
+
+ return true;
+ }
+
+ bool ExplicitEndDoc()
+ {
+ std::string input = "- one\n- two\n...\n...";
+
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+
+ if(doc.size() != 2)
+ return false;
+
+ std::string output;
+ doc[0] >> output;
+ if(output != "one")
+ return false;
+ doc[1] >> output;
+ if(output != "two")
+ return false;
+
+ return true;
+ }
+
+ bool MultipleDocsWithSomeExplicitIndicators()
+ {
+ std::string input =
+ "- one\n- two\n...\n"
+ "---\nkey: value\n...\n...\n"
+ "- three\n- four\n"
+ "---\nkey: value";
+
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ std::string output;
+
+ parser.GetNextDocument(doc);
+ if(doc.size() != 2)
+ return false;
+ doc[0] >> output;
+ if(output != "one")
+ return false;
+ doc[1] >> output;
+ if(output != "two")
+ return false;
+
+ parser.GetNextDocument(doc);
+ doc["key"] >> output;
+ if(output != "value")
+ return false;
+
+ parser.GetNextDocument(doc);
+ if(doc.size() != 2)
+ return false;
+ doc[0] >> output;
+ if(output != "three")
+ return false;
+ doc[1] >> output;
+ if(output != "four")
+ return false;
+
+ parser.GetNextDocument(doc);
+ doc["key"] >> output;
+ if(output != "value")
+ return false;
+
+ return true;
+ }
+
+ bool BlockKeyWithNullValue()
+ {
+ std::string input =
+ "key:\n"
+ "just a key: value";
+
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+
+ parser.GetNextDocument(doc);
+ if(doc.size() != 2)
+ return false;
+ if(!IsNull(doc["key"]))
+ return false;
+ if(doc["just a key"].to<std::string>() != "value")
+ return false;
+
+ return true;
+ }
+
+ bool Bases()
+ {
+ std::string input =
+ "- 15\n"
+ "- 0x10\n"
+ "- 030\n"
+ "- 0xffffffff\n";
+
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+
+ parser.GetNextDocument(doc);
+ if(doc.size() != 4)
+ return false;
+ if(doc[0].to<int>() != 15)
+ return false;
+ if(doc[1].to<int>() != 0x10)
+ return false;
+ if(doc[2].to<int>() != 030)
+ return false;
+ if(doc[3].to<unsigned>() != 0xffffffff)
+ return false;
+ return true;
+ }
+
+ bool KeyNotFound()
+ {
+ std::string input = "key: value";
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+
+ try {
+ doc["bad key"];
+ } catch(const YAML::Exception& e) {
+ if(e.msg != std::string(YAML::ErrorMsg::KEY_NOT_FOUND) + ": bad key")
+ throw;
+ }
+
+ try {
+ doc[5];
+ } catch(const YAML::Exception& e) {
+ if(e.msg != std::string(YAML::ErrorMsg::KEY_NOT_FOUND) + ": 5")
+ throw;
+ }
+
+ try {
+ doc[2.5];
+ } catch(const YAML::Exception& e) {
+ if(e.msg != std::string(YAML::ErrorMsg::KEY_NOT_FOUND) + ": 2.5")
+ throw;
+ }
+
+ return true;
+ }
+
+ bool DuplicateKey()
+ {
+ std::string input = "{a: 1, b: 2, c: 3, a: 4}";
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+
+ if(doc["a"].to<int>() != 4)
+ return false;
+ if(doc["b"].to<int>() != 2)
+ return false;
+ if(doc["c"].to<int>() != 3)
+ return false;
+ return true;
+ }
+
+ void PrepareNodeForTagExam(YAML::Node& doc, const std::string& input)
+ {
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ parser.GetNextDocument(doc);
+ }
+
+ struct TagMismatch: public std::exception {
+ TagMismatch(const std::string& actualTag, const std::string& expectedTag) {
+ std::stringstream output;
+ output << "Tag has value \"" << actualTag << "\" but \"" << expectedTag << "\" was expected";
+ what_ = output.str();
+ }
+ virtual ~TagMismatch() throw() {}
+ virtual const char *what() const throw() { return what_.c_str(); }
+
+ private:
+ std::string what_;
+ };
+
+ bool ExpectedTagValue(YAML::Node& node, const char* tag)
+ {
+ if(node.Tag() == tag)
+ return true;
+
+ throw TagMismatch(node.Tag(), tag);
+ }
+
+ bool DefaultPlainScalarTag()
+ {
+ YAML::Node node;
+ PrepareNodeForTagExam(node, "--- 12");
+
+ return ExpectedTagValue(node, "?");
+ }
+
+ bool DefaultSingleQuotedScalarTag()
+ {
+ YAML::Node node;
+ PrepareNodeForTagExam(node, "--- '12'");
+
+ return ExpectedTagValue(node, "!");
+ }
+
+ bool ExplicitNonSpecificPlainScalarTag()
+ {
+ YAML::Node node;
+ PrepareNodeForTagExam(node, "--- ! 12");
+
+ return ExpectedTagValue(node, "!");
+ }
+
+ bool BasicLocalTag()
+ {
+ YAML::Node node;
+ PrepareNodeForTagExam(node, "--- !foo 12");
+
+ return ExpectedTagValue(node, "!foo");
+ }
+
+ bool VerbatimLocalTag()
+ {
+ YAML::Node node;
+ PrepareNodeForTagExam(node, "--- !<!foo> 12");
+
+ return ExpectedTagValue(node, "!foo");
+ }
+
+ bool StandardShortcutTag()
+ {
+ YAML::Node node;
+ PrepareNodeForTagExam(node, "--- !!int 12");
+
+ return ExpectedTagValue(node, "tag:yaml.org,2002:int");
+ }
+
+ bool VerbatimURITag()
+ {
+ YAML::Node node;
+ PrepareNodeForTagExam(node, "--- !<tag:yaml.org,2002:int> 12");
+
+ return ExpectedTagValue(node, "tag:yaml.org,2002:int");
+ }
+
+ bool DefaultSequenceTag()
+ {
+ YAML::Node node;
+ PrepareNodeForTagExam(node, "--- [12]");
+
+ return ExpectedTagValue(node, "?");
+ }
+
+ bool ExplicitNonSpecificSequenceTag()
+ {
+ YAML::Node node;
+ PrepareNodeForTagExam(node, "--- ! [12]");
+
+ return ExpectedTagValue(node, "!");
+ }
+
+ bool Infinity()
+ {
+ std::string input =
+ "- .inf\n"
+ "- .Inf\n"
+ "- .INF\n"
+ "- +.inf\n"
+ "- +.Inf\n"
+ "- +.INF\n"
+ "- -.inf\n"
+ "- -.Inf\n"
+ "- -.INF\n";
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+
+ for(unsigned i=0;i<doc.size();i++)
+ if(doc[i].to<double>() != (i < 6 ? +1 : -1) * std::numeric_limits<double>::infinity())
+ return false;
+ for(unsigned i=0;i<doc.size();i++)
+ if(doc[i].to<long double>() != (i < 6 ? +1 : -1) * std::numeric_limits<long double>::infinity())
+ return false;
+ for(unsigned i=0;i<doc.size();i++)
+ if(doc[i].to<float>() != (i < 6 ? +1 : -1) * std::numeric_limits<float>::infinity())
+ return false;
+ return true;
+ }
+
+ bool NaN()
+ {
+ std::string input =
+ "- .nan\n"
+ "- .NaN\n"
+ "- .NAN\n";
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+
+ for(unsigned i=0;i<doc.size();i++) {
+ double d;
+ doc[i] >> d;
+ if(d == d)
+ return false;
+ }
+ for(unsigned i=0;i<doc.size();i++) {
+ long double d;
+ doc[i] >> d;
+ if(d == d)
+ return false;
+ }
+ for(unsigned i=0;i<doc.size();i++) {
+ float d;
+ doc[i] >> d;
+ if(d == d)
+ return false;
+ }
+ return true;
+ }
+
+ bool NonConstKey()
+ {
+ std::string input = "{a: 1}";
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+
+ std::vector<char> key(2);
+ key[0] = 'a';
+ key[1] = '\0';
+ if(doc[&key[0]].to<int>() != 1)
+ return false;
+ return true;
+ }
+
+ bool SingleChar()
+ {
+ std::string input = "5";
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+
+ return doc.to<int>() == 5;
+ }
+
+ bool QuotedNewline()
+ {
+ std::string input = "foo: \"\\n\"";
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+
+ return doc["foo"].to<std::string>() == "\n";
+ }
+
+ bool DoubleAsInt()
+ {
+ std::string input = "1.5";
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+
+ try {
+ doc.to<int>();
+ } catch(const YAML::InvalidScalar& e) {
+ return true;
+ }
+
+ return false;
+ }
+
+ bool Binary()
+ {
+ std::string input = "[!!binary \"SGVsbG8sIFdvcmxkIQ==\", !!binary \"TWFuIGlzIGRpc3Rpbmd1aXNoZWQsIG5vdCBvbmx5IGJ5IGhpcyByZWFzb24sIGJ1dCBieSB0aGlzIHNpbmd1bGFyIHBhc3Npb24gZnJvbSBvdGhlciBhbmltYWxzLCB3aGljaCBpcyBhIGx1c3Qgb2YgdGhlIG1pbmQsIHRoYXQgYnkgYSBwZXJzZXZlcmFuY2Ugb2YgZGVsaWdodCBpbiB0aGUgY29udGludWVkIGFuZCBpbmRlZmF0aWdhYmxlIGdlbmVyYXRpb24gb2Yga25vd2xlZGdlLCBleGNlZWRzIHRoZSBzaG9ydCB2ZWhlbWVuY2Ugb2YgYW55IGNhcm5hbCBwbGVhc3VyZS4K\"]";
+ std::stringstream stream(input);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+
+ if(doc[0].to<YAML::Binary>() != YAML::Binary(reinterpret_cast<const unsigned char*>("Hello, World!"), 13))
+ return false;
+ if(doc[1].to<YAML::Binary>() != YAML::Binary(reinterpret_cast<const unsigned char*>("Man is distinguished, not only by his reason, but by this singular passion from other animals, which is a lust of the mind, that by a perseverance of delight in the continued and indefatigable generation of knowledge, exceeds the short vehemence of any carnal pleasure.\n"), 270))
+ return false;
+ return true;
+ }
+ }
+
+ namespace {
+ void RunScalarParserTest(void (*test)(std::string&, std::string&), const std::string& name, int& passed, int& total) {
+ std::string error;
+ std::string inputScalar, desiredOutput;
+ std::string output;
+ bool ok = true;
+ try {
+ test(inputScalar, desiredOutput);
+ std::stringstream stream(inputScalar);
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+ doc >> output;
+ } catch(const YAML::Exception& e) {
+ ok = false;
+ error = e.what();
+ }
+ if(ok && output == desiredOutput) {
+ passed++;
+ } else {
+ std::cout << "Parser test failed: " << name << "\n";
+ if(error != "")
+ std::cout << "Caught exception: " << error << "\n";
+ else {
+ std::cout << "Output:\n" << output << "<<<\n";
+ std::cout << "Desired output:\n" << desiredOutput << "<<<\n";
+ }
+ }
+ total++;
+ }
+
+ void RunParserTest(bool (*test)(), const std::string& name, int& passed, int& total) {
+ std::string error;
+ bool ok = true;
+ try {
+ ok = test();
+ } catch(const YAML::Exception& e) {
+ ok = false;
+ error = e.what();
+ } catch(const Parser::TagMismatch& e) {
+ ok = false;
+ error = e.what();
+ }
+ if(ok) {
+ passed++;
+ } else {
+ std::cout << "Parser test failed: " << name << "\n";
+ if(error != "")
+ std::cout << " Caught exception: " << error << "\n";
+ }
+ total++;
+ }
+
+ typedef void (*EncodingFn)(std::ostream&, int);
+
+ inline char Byte(int ch)
+ {
+ return static_cast<char>(static_cast<unsigned char>(static_cast<unsigned int>(ch)));
+ }
+
+ void EncodeToUtf8(std::ostream& stream, int ch)
+ {
+ if (ch <= 0x7F)
+ {
+ stream << Byte(ch);
+ }
+ else if (ch <= 0x7FF)
+ {
+ stream << Byte(0xC0 | (ch >> 6));
+ stream << Byte(0x80 | (ch & 0x3F));
+ }
+ else if (ch <= 0xFFFF)
+ {
+ stream << Byte(0xE0 | (ch >> 12));
+ stream << Byte(0x80 | ((ch >> 6) & 0x3F));
+ stream << Byte(0x80 | (ch & 0x3F));
+ }
+ else if (ch <= 0x1FFFFF)
+ {
+ stream << Byte(0xF0 | (ch >> 18));
+ stream << Byte(0x80 | ((ch >> 12) & 0x3F));
+ stream << Byte(0x80 | ((ch >> 6) & 0x3F));
+ stream << Byte(0x80 | (ch & 0x3F));
+ }
+ }
+
+ bool SplitUtf16HighChar(std::ostream& stream, EncodingFn encoding, int ch)
+ {
+ int biasedValue = ch - 0x10000;
+ if (biasedValue < 0)
+ {
+ return false;
+ }
+ int high = 0xD800 | (biasedValue >> 10);
+ int low = 0xDC00 | (biasedValue & 0x3FF);
+ encoding(stream, high);
+ encoding(stream, low);
+ return true;
+ }
+
+ void EncodeToUtf16LE(std::ostream& stream, int ch)
+ {
+ if (!SplitUtf16HighChar(stream, &EncodeToUtf16LE, ch))
+ {
+ stream << Byte(ch & 0xFF) << Byte(ch >> 8);
+ }
+ }
+
+ void EncodeToUtf16BE(std::ostream& stream, int ch)
+ {
+ if (!SplitUtf16HighChar(stream, &EncodeToUtf16BE, ch))
+ {
+ stream << Byte(ch >> 8) << Byte(ch & 0xFF);
+ }
+ }
+
+ void EncodeToUtf32LE(std::ostream& stream, int ch)
+ {
+ stream << Byte(ch & 0xFF) << Byte((ch >> 8) & 0xFF)
+ << Byte((ch >> 16) & 0xFF) << Byte((ch >> 24) & 0xFF);
+ }
+
+ void EncodeToUtf32BE(std::ostream& stream, int ch)
+ {
+ stream << Byte((ch >> 24) & 0xFF) << Byte((ch >> 16) & 0xFF)
+ << Byte((ch >> 8) & 0xFF) << Byte(ch & 0xFF);
+ }
+
+ class EncodingTester
+ {
+ public:
+ EncodingTester(EncodingFn encoding, bool declareEncoding)
+ {
+ if (declareEncoding)
+ {
+ encoding(m_yaml, 0xFEFF);
+ }
+
+ AddEntry(encoding, 0x0021, 0x007E); // Basic Latin
+ AddEntry(encoding, 0x00A1, 0x00FF); // Latin-1 Supplement
+ AddEntry(encoding, 0x0660, 0x06FF); // Arabic (largest contiguous block)
+
+ // CJK unified ideographs (multiple lines)
+ AddEntry(encoding, 0x4E00, 0x4EFF);
+ AddEntry(encoding, 0x4F00, 0x4FFF);
+ AddEntry(encoding, 0x5000, 0x51FF); // 512 character line
+ AddEntry(encoding, 0x5200, 0x54FF); // 768 character line
+ AddEntry(encoding, 0x5500, 0x58FF); // 1024 character line
+
+ AddEntry(encoding, 0x103A0, 0x103C3); // Old Persian
+
+ m_yaml.seekg(0, std::ios::beg);
+ }
+
+ std::istream& stream() {return m_yaml;}
+ const std::vector<std::string>& entries() {return m_entries;}
+
+ private:
+ std::stringstream m_yaml;
+ std::vector<std::string> m_entries;
+
+ void AddEntry(EncodingFn encoding, int startCh, int endCh)
+ {
+ encoding(m_yaml, '-');
+ encoding(m_yaml, ' ');
+ encoding(m_yaml, '|');
+ encoding(m_yaml, '\n');
+ encoding(m_yaml, ' ');
+ encoding(m_yaml, ' ');
+
+ std::stringstream entry;
+ for (int ch = startCh; ch <= endCh; ++ch)
+ {
+ encoding(m_yaml, ch);
+ EncodeToUtf8(entry, ch);
+ }
+ encoding(m_yaml, '\n');
+
+ m_entries.push_back(entry.str());
+ }
+ };
+
+ void RunEncodingTest(EncodingFn encoding, bool declareEncoding, const std::string& name, int& passed, int& total)
+ {
+ EncodingTester tester(encoding, declareEncoding);
+ std::string error;
+ bool ok = true;
+ try {
+ YAML::Parser parser(tester.stream());
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+
+ YAML::Iterator itNode = doc.begin();
+ std::vector<std::string>::const_iterator itEntry = tester.entries().begin();
+ for (; (itNode != doc.end()) && (itEntry != tester.entries().end()); ++itNode, ++itEntry)
+ {
+ std::string stScalarValue;
+ if (!itNode->GetScalar(stScalarValue) && (stScalarValue == *itEntry))
+ {
+ break;
+ }
+ }
+
+ if ((itNode != doc.end()) || (itEntry != tester.entries().end()))
+ {
+ ok = false;
+ }
+ } catch(const YAML::Exception& e) {
+ ok = false;
+ error = e.msg;
+ }
+ if(ok) {
+ passed++;
+ } else {
+ std::cout << "Parser test failed: " << name << "\n";
+ if(error != "")
+ std::cout << " Caught exception: " << error << "\n";
+ }
+ total++;
+ }
+ }
+
+ bool RunParserTests()
+ {
+ int passed = 0;
+ int total = 0;
+ RunScalarParserTest(&Parser::SimpleScalar, "simple scalar", passed, total);
+ RunScalarParserTest(&Parser::MultiLineScalar, "multi-line scalar", passed, total);
+ RunScalarParserTest(&Parser::LiteralScalar, "literal scalar", passed, total);
+ RunScalarParserTest(&Parser::FoldedScalar, "folded scalar", passed, total);
+ RunScalarParserTest(&Parser::ChompedFoldedScalar, "chomped folded scalar", passed, total);
+ RunScalarParserTest(&Parser::ChompedLiteralScalar, "chomped literal scalar", passed, total);
+ RunScalarParserTest(&Parser::FoldedScalarWithIndent, "folded scalar with indent", passed, total);
+ RunScalarParserTest(&Parser::ColonScalar, "colon scalar", passed, total);
+ RunScalarParserTest(&Parser::QuotedScalar, "quoted scalar", passed, total);
+ RunScalarParserTest(&Parser::CommaScalar, "comma scalar", passed, total);
+ RunScalarParserTest(&Parser::DashScalar, "dash scalar", passed, total);
+ RunScalarParserTest(&Parser::URLScalar, "url scalar", passed, total);
+
+ RunParserTest(&Parser::SimpleSeq, "simple seq", passed, total);
+ RunParserTest(&Parser::SimpleMap, "simple map", passed, total);
+ RunParserTest(&Parser::FlowSeq, "flow seq", passed, total);
+ RunParserTest(&Parser::FlowMap, "flow map", passed, total);
+ RunParserTest(&Parser::FlowMapWithOmittedKey, "flow map with omitted key", passed, total);
+ RunParserTest(&Parser::FlowMapWithOmittedValue, "flow map with omitted value", passed, total);
+ RunParserTest(&Parser::FlowMapWithSoloEntry, "flow map with solo entry", passed, total);
+ RunParserTest(&Parser::FlowMapEndingWithSoloEntry, "flow map ending with solo entry", passed, total);
+ RunParserTest(&Parser::QuotedSimpleKeys, "quoted simple keys", passed, total);
+ RunParserTest(&Parser::CompressedMapAndSeq, "compressed map and seq", passed, total);
+ RunParserTest(&Parser::NullBlockSeqEntry, "null block seq entry", passed, total);
+ RunParserTest(&Parser::NullBlockMapKey, "null block map key", passed, total);
+ RunParserTest(&Parser::NullBlockMapValue, "null block map value", passed, total);
+ RunParserTest(&Parser::SimpleAlias, "simple alias", passed, total);
+ RunParserTest(&Parser::AliasWithNull, "alias with null", passed, total);
+ RunParserTest(&Parser::AnchorInSimpleKey, "anchor in simple key", passed, total);
+ RunParserTest(&Parser::AliasAsSimpleKey, "alias as simple key", passed, total);
+ RunParserTest(&Parser::ExplicitDoc, "explicit doc", passed, total);
+ RunParserTest(&Parser::MultipleDocs, "multiple docs", passed, total);
+ RunParserTest(&Parser::ExplicitEndDoc, "explicit end doc", passed, total);
+ RunParserTest(&Parser::MultipleDocsWithSomeExplicitIndicators, "multiple docs with some explicit indicators", passed, total);
+ RunParserTest(&Parser::BlockKeyWithNullValue, "block key with null value", passed, total);
+ RunParserTest(&Parser::Bases, "bases", passed, total);
+ RunParserTest(&Parser::KeyNotFound, "key not found", passed, total);
+ RunParserTest(&Parser::DuplicateKey, "duplicate key", passed, total);
+ RunParserTest(&Parser::DefaultPlainScalarTag, "default plain scalar tag", passed, total);
+ RunParserTest(&Parser::DefaultSingleQuotedScalarTag, "default single-quoted scalar tag", passed, total);
+ RunParserTest(&Parser::ExplicitNonSpecificPlainScalarTag, "explicit, non-specific plain scalar tag", passed, total);
+ RunParserTest(&Parser::BasicLocalTag, "basic local tag", passed, total);
+ RunParserTest(&Parser::VerbatimLocalTag, "verbatim local tag", passed, total);
+ RunParserTest(&Parser::StandardShortcutTag, "standard shortcut tag", passed, total);
+ RunParserTest(&Parser::VerbatimURITag, "verbatim URI tag", passed, total);
+ RunParserTest(&Parser::DefaultPlainScalarTag, "default plain scalar tag", passed, total);
+ RunParserTest(&Parser::DefaultSequenceTag, "default sequence tag", passed, total);
+ RunParserTest(&Parser::ExplicitNonSpecificSequenceTag, "explicit, non-specific sequence tag", passed, total);
+ RunParserTest(&Parser::Infinity, "infinity", passed, total);
+ RunParserTest(&Parser::NaN, "NaN", passed, total);
+ RunParserTest(&Parser::NonConstKey, "non const key", passed, total);
+ RunParserTest(&Parser::SingleChar, "single char", passed, total);
+ RunParserTest(&Parser::QuotedNewline, "quoted newline", passed, total);
+ RunParserTest(&Parser::DoubleAsInt, "double as int", passed, total);
+ RunParserTest(&Parser::Binary, "binary", passed, total);
+
+ RunEncodingTest(&EncodeToUtf8, false, "UTF-8, no BOM", passed, total);
+ RunEncodingTest(&EncodeToUtf8, true, "UTF-8 with BOM", passed, total);
+ RunEncodingTest(&EncodeToUtf16LE, false, "UTF-16LE, no BOM", passed, total);
+ RunEncodingTest(&EncodeToUtf16LE, true, "UTF-16LE with BOM", passed, total);
+ RunEncodingTest(&EncodeToUtf16BE, false, "UTF-16BE, no BOM", passed, total);
+ RunEncodingTest(&EncodeToUtf16BE, true, "UTF-16BE with BOM", passed, total);
+ RunEncodingTest(&EncodeToUtf32LE, false, "UTF-32LE, no BOM", passed, total);
+ RunEncodingTest(&EncodeToUtf32LE, true, "UTF-32LE with BOM", passed, total);
+ RunEncodingTest(&EncodeToUtf32BE, false, "UTF-32BE, no BOM", passed, total);
+ RunEncodingTest(&EncodeToUtf32BE, true, "UTF-32BE with BOM", passed, total);
+
+ std::cout << "Parser tests: " << passed << "/" << total << " passed\n";
+ return passed == total;
+ }
+}
+
diff --git a/yaml-cpp/test/old-api/spectests.cpp b/yaml-cpp/test/old-api/spectests.cpp
new file mode 100755
index 00000000..fb5505be
--- /dev/null
+++ b/yaml-cpp/test/old-api/spectests.cpp
@@ -0,0 +1,1456 @@
+#include "spectests.h"
+#include "specexamples.h"
+#include "yaml-cpp/yaml.h"
+#include <fstream>
+#include <sstream>
+#include <vector>
+#include <iostream>
+
+#define YAML_ASSERT(cond) do { if(!(cond)) return " Assert failed: " #cond; } while(false)
+#define PARSE(doc, input) \
+ std::stringstream stream(input);\
+ YAML::Parser parser(stream);\
+ YAML::Node doc;\
+ parser.GetNextDocument(doc)
+#define PARSE_NEXT(doc) parser.GetNextDocument(doc)
+
+namespace Test {
+ namespace Spec {
+ // 2.1
+ TEST SeqScalars() {
+ PARSE(doc, ex2_1);
+ YAML_ASSERT(doc.size() == 3);
+ YAML_ASSERT(doc[0].to<std::string>() == "Mark McGwire");
+ YAML_ASSERT(doc[1].to<std::string>() == "Sammy Sosa");
+ YAML_ASSERT(doc[2].to<std::string>() == "Ken Griffey");
+ return true;
+ }
+
+ // 2.2
+ TEST MappingScalarsToScalars() {
+ PARSE(doc, ex2_2);
+ YAML_ASSERT(doc.size() == 3);
+ YAML_ASSERT(doc["hr"].to<std::string>() == "65");
+ YAML_ASSERT(doc["avg"].to<std::string>() == "0.278");
+ YAML_ASSERT(doc["rbi"].to<std::string>() == "147");
+ return true;
+ }
+
+ // 2.3
+ TEST MappingScalarsToSequences() {
+ PARSE(doc, ex2_3);
+ YAML_ASSERT(doc.size() == 2);
+ YAML_ASSERT(doc["american"].size() == 3);
+ YAML_ASSERT(doc["american"][0].to<std::string>() == "Boston Red Sox");
+ YAML_ASSERT(doc["american"][1].to<std::string>() == "Detroit Tigers");
+ YAML_ASSERT(doc["american"][2].to<std::string>() == "New York Yankees");
+ YAML_ASSERT(doc["national"].size() == 3);
+ YAML_ASSERT(doc["national"][0].to<std::string>() == "New York Mets");
+ YAML_ASSERT(doc["national"][1].to<std::string>() == "Chicago Cubs");
+ YAML_ASSERT(doc["national"][2].to<std::string>() == "Atlanta Braves");
+ return true;
+ }
+
+ // 2.4
+ TEST SequenceOfMappings()
+ {
+ PARSE(doc, ex2_4);
+ YAML_ASSERT(doc.size() == 2);
+ YAML_ASSERT(doc[0].size() == 3);
+ YAML_ASSERT(doc[0]["name"].to<std::string>() == "Mark McGwire");
+ YAML_ASSERT(doc[0]["hr"].to<std::string>() == "65");
+ YAML_ASSERT(doc[0]["avg"].to<std::string>() == "0.278");
+ YAML_ASSERT(doc[1].size() == 3);
+ YAML_ASSERT(doc[1]["name"].to<std::string>() == "Sammy Sosa");
+ YAML_ASSERT(doc[1]["hr"].to<std::string>() == "63");
+ YAML_ASSERT(doc[1]["avg"].to<std::string>() == "0.288");
+ return true;
+ }
+
+ // 2.5
+ TEST SequenceOfSequences()
+ {
+ PARSE(doc, ex2_5);
+ YAML_ASSERT(doc.size() == 3);
+ YAML_ASSERT(doc[0].size() == 3);
+ YAML_ASSERT(doc[0][0].to<std::string>() == "name");
+ YAML_ASSERT(doc[0][1].to<std::string>() == "hr");
+ YAML_ASSERT(doc[0][2].to<std::string>() == "avg");
+ YAML_ASSERT(doc[1].size() == 3);
+ YAML_ASSERT(doc[1][0].to<std::string>() == "Mark McGwire");
+ YAML_ASSERT(doc[1][1].to<std::string>() == "65");
+ YAML_ASSERT(doc[1][2].to<std::string>() == "0.278");
+ YAML_ASSERT(doc[2].size() == 3);
+ YAML_ASSERT(doc[2][0].to<std::string>() == "Sammy Sosa");
+ YAML_ASSERT(doc[2][1].to<std::string>() == "63");
+ YAML_ASSERT(doc[2][2].to<std::string>() == "0.288");
+ return true;
+ }
+
+ // 2.6
+ TEST MappingOfMappings()
+ {
+ PARSE(doc, ex2_6);
+ YAML_ASSERT(doc.size() == 2);
+ YAML_ASSERT(doc["Mark McGwire"].size() == 2);
+ YAML_ASSERT(doc["Mark McGwire"]["hr"].to<std::string>() == "65");
+ YAML_ASSERT(doc["Mark McGwire"]["avg"].to<std::string>() == "0.278");
+ YAML_ASSERT(doc["Sammy Sosa"].size() == 2);
+ YAML_ASSERT(doc["Sammy Sosa"]["hr"].to<std::string>() == "63");
+ YAML_ASSERT(doc["Sammy Sosa"]["avg"].to<std::string>() == "0.288");
+ return true;
+ }
+
+ // 2.7
+ TEST TwoDocumentsInAStream()
+ {
+ PARSE(doc, ex2_7);
+ YAML_ASSERT(doc.size() == 3);
+ YAML_ASSERT(doc[0].to<std::string>() == "Mark McGwire");
+ YAML_ASSERT(doc[1].to<std::string>() == "Sammy Sosa");
+ YAML_ASSERT(doc[2].to<std::string>() == "Ken Griffey");
+
+ PARSE_NEXT(doc);
+ YAML_ASSERT(doc.size() == 2);
+ YAML_ASSERT(doc[0].to<std::string>() == "Chicago Cubs");
+ YAML_ASSERT(doc[1].to<std::string>() == "St Louis Cardinals");
+ return true;
+ }
+
+ // 2.8
+ TEST PlayByPlayFeed()
+ {
+ PARSE(doc, ex2_8);
+ YAML_ASSERT(doc.size() == 3);
+ YAML_ASSERT(doc["time"].to<std::string>() == "20:03:20");
+ YAML_ASSERT(doc["player"].to<std::string>() == "Sammy Sosa");
+ YAML_ASSERT(doc["action"].to<std::string>() == "strike (miss)");
+
+ PARSE_NEXT(doc);
+ YAML_ASSERT(doc.size() == 3);
+ YAML_ASSERT(doc["time"].to<std::string>() == "20:03:47");
+ YAML_ASSERT(doc["player"].to<std::string>() == "Sammy Sosa");
+ YAML_ASSERT(doc["action"].to<std::string>() == "grand slam");
+ return true;
+ }
+
+ // 2.9
+ TEST SingleDocumentWithTwoComments()
+ {
+ PARSE(doc, ex2_9);
+ YAML_ASSERT(doc.size() == 2);
+ YAML_ASSERT(doc["hr"].size() == 2);
+ YAML_ASSERT(doc["hr"][0].to<std::string>() == "Mark McGwire");
+ YAML_ASSERT(doc["hr"][1].to<std::string>() == "Sammy Sosa");
+ YAML_ASSERT(doc["rbi"].size() == 2);
+ YAML_ASSERT(doc["rbi"][0].to<std::string>() == "Sammy Sosa");
+ YAML_ASSERT(doc["rbi"][1].to<std::string>() == "Ken Griffey");
+ return true;
+ }
+
+ // 2.10
+ TEST SimpleAnchor()
+ {
+ PARSE(doc, ex2_10);
+ YAML_ASSERT(doc.size() == 2);
+ YAML_ASSERT(doc["hr"].size() == 2);
+ YAML_ASSERT(doc["hr"][0].to<std::string>() == "Mark McGwire");
+ YAML_ASSERT(doc["hr"][1].to<std::string>() == "Sammy Sosa");
+ YAML_ASSERT(doc["rbi"].size() == 2);
+ YAML_ASSERT(doc["rbi"][0].to<std::string>() == "Sammy Sosa");
+ YAML_ASSERT(doc["rbi"][1].to<std::string>() == "Ken Griffey");
+ return true;
+ }
+
+ struct Pair {
+ Pair() {}
+ Pair(const std::string& f, const std::string& s): first(f), second(s) {}
+ std::string first, second;
+ };
+
+ bool operator == (const Pair& p, const Pair& q) {
+ return p.first == q.first && p.second == q.second;
+ }
+
+ void operator >> (const YAML::Node& node, Pair& p) {
+ node[0] >> p.first;
+ node[1] >> p.second;
+ }
+
+ // 2.11
+ TEST MappingBetweenSequences()
+ {
+ PARSE(doc, ex2_11);
+ YAML_ASSERT(doc.size() == 2);
+ YAML_ASSERT(doc[Pair("Detroit Tigers", "Chicago cubs")].size() == 1);
+ YAML_ASSERT(doc[Pair("Detroit Tigers", "Chicago cubs")][0].to<std::string>() == "2001-07-23");
+ YAML_ASSERT(doc[Pair("New York Yankees", "Atlanta Braves")].size() == 3);
+ YAML_ASSERT(doc[Pair("New York Yankees", "Atlanta Braves")][0].to<std::string>() == "2001-07-02");
+ YAML_ASSERT(doc[Pair("New York Yankees", "Atlanta Braves")][1].to<std::string>() == "2001-08-12");
+ YAML_ASSERT(doc[Pair("New York Yankees", "Atlanta Braves")][2].to<std::string>() == "2001-08-14");
+ return true;
+ }
+
+ // 2.12
+ TEST CompactNestedMapping()
+ {
+ PARSE(doc, ex2_12);
+ YAML_ASSERT(doc.size() == 3);
+ YAML_ASSERT(doc[0].size() == 2);
+ YAML_ASSERT(doc[0]["item"].to<std::string>() == "Super Hoop");
+ YAML_ASSERT(doc[0]["quantity"].to<int>() == 1);
+ YAML_ASSERT(doc[1].size() == 2);
+ YAML_ASSERT(doc[1]["item"].to<std::string>() == "Basketball");
+ YAML_ASSERT(doc[1]["quantity"].to<int>() == 4);
+ YAML_ASSERT(doc[2].size() == 2);
+ YAML_ASSERT(doc[2]["item"].to<std::string>() == "Big Shoes");
+ YAML_ASSERT(doc[2]["quantity"].to<int>() == 1);
+ return true;
+ }
+
+ // 2.13
+ TEST InLiteralsNewlinesArePreserved()
+ {
+ PARSE(doc, ex2_13);
+ YAML_ASSERT(doc.to<std::string>() ==
+ "\\//||\\/||\n"
+ "// || ||__");
+ return true;
+ }
+
+ // 2.14
+ TEST InFoldedScalarsNewlinesBecomeSpaces()
+ {
+ PARSE(doc, ex2_14);
+ YAML_ASSERT(doc.to<std::string>() == "Mark McGwire's year was crippled by a knee injury.");
+ return true;
+ }
+
+ // 2.15
+ TEST FoldedNewlinesArePreservedForMoreIndentedAndBlankLines()
+ {
+ PARSE(doc, ex2_15);
+ YAML_ASSERT(doc.to<std::string>() ==
+ "Sammy Sosa completed another fine season with great stats.\n\n"
+ " 63 Home Runs\n"
+ " 0.288 Batting Average\n\n"
+ "What a year!");
+ return true;
+ }
+
+ // 2.16
+ TEST IndentationDeterminesScope()
+ {
+ PARSE(doc, ex2_16);
+ YAML_ASSERT(doc.size() == 3);
+ YAML_ASSERT(doc["name"].to<std::string>() == "Mark McGwire");
+ YAML_ASSERT(doc["accomplishment"].to<std::string>() == "Mark set a major league home run record in 1998.\n");
+ YAML_ASSERT(doc["stats"].to<std::string>() == "65 Home Runs\n0.278 Batting Average\n");
+ return true;
+ }
+
+ // 2.17
+ TEST QuotedScalars()
+ {
+ PARSE(doc, ex2_17);
+ YAML_ASSERT(doc.size() == 6);
+ YAML_ASSERT(doc["unicode"].to<std::string>() == "Sosa did fine.\xe2\x98\xba");
+ YAML_ASSERT(doc["control"].to<std::string>() == "\b1998\t1999\t2000\n");
+ YAML_ASSERT(doc["hex esc"].to<std::string>() == "\x0d\x0a is \r\n");
+ YAML_ASSERT(doc["single"].to<std::string>() == "\"Howdy!\" he cried.");
+ YAML_ASSERT(doc["quoted"].to<std::string>() == " # Not a 'comment'.");
+ YAML_ASSERT(doc["tie-fighter"].to<std::string>() == "|\\-*-/|");
+ return true;
+ }
+
+ // 2.18
+ TEST MultiLineFlowScalars()
+ {
+ PARSE(doc, ex2_18);
+ YAML_ASSERT(doc.size() == 2);
+ YAML_ASSERT(doc["plain"].to<std::string>() == "This unquoted scalar spans many lines.");
+ YAML_ASSERT(doc["quoted"].to<std::string>() == "So does this quoted scalar.\n");
+ return true;
+ }
+
+ // TODO: 2.19 - 2.22 schema tags
+
+ // 2.23
+ TEST VariousExplicitTags()
+ {
+ PARSE(doc, ex2_23);
+ YAML_ASSERT(doc.size() == 3);
+ YAML_ASSERT(doc["not-date"].Tag() == "tag:yaml.org,2002:str");
+ YAML_ASSERT(doc["not-date"].to<std::string>() == "2002-04-28");
+ YAML_ASSERT(doc["picture"].Tag() == "tag:yaml.org,2002:binary");
+ YAML_ASSERT(doc["picture"].to<std::string>() ==
+ "R0lGODlhDAAMAIQAAP//9/X\n"
+ "17unp5WZmZgAAAOfn515eXv\n"
+ "Pz7Y6OjuDg4J+fn5OTk6enp\n"
+ "56enmleECcgggoBADs=\n"
+ );
+ YAML_ASSERT(doc["application specific tag"].Tag() == "!something");
+ YAML_ASSERT(doc["application specific tag"].to<std::string>() ==
+ "The semantics of the tag\n"
+ "above may be different for\n"
+ "different documents."
+ );
+ return true;
+ }
+
+ // 2.24
+ TEST GlobalTags()
+ {
+ PARSE(doc, ex2_24);
+ YAML_ASSERT(doc.Tag() == "tag:clarkevans.com,2002:shape");
+ YAML_ASSERT(doc.size() == 3);
+ YAML_ASSERT(doc[0].Tag() == "tag:clarkevans.com,2002:circle");
+ YAML_ASSERT(doc[0].size() == 2);
+ YAML_ASSERT(doc[0]["center"].size() == 2);
+ YAML_ASSERT(doc[0]["center"]["x"].to<int>() == 73);
+ YAML_ASSERT(doc[0]["center"]["y"].to<int>() == 129);
+ YAML_ASSERT(doc[0]["radius"].to<int>() == 7);
+ YAML_ASSERT(doc[1].Tag() == "tag:clarkevans.com,2002:line");
+ YAML_ASSERT(doc[1].size() == 2);
+ YAML_ASSERT(doc[1]["start"].size() == 2);
+ YAML_ASSERT(doc[1]["start"]["x"].to<int>() == 73);
+ YAML_ASSERT(doc[1]["start"]["y"].to<int>() == 129);
+ YAML_ASSERT(doc[1]["finish"].size() == 2);
+ YAML_ASSERT(doc[1]["finish"]["x"].to<int>() == 89);
+ YAML_ASSERT(doc[1]["finish"]["y"].to<int>() == 102);
+ YAML_ASSERT(doc[2].Tag() == "tag:clarkevans.com,2002:label");
+ YAML_ASSERT(doc[2].size() == 3);
+ YAML_ASSERT(doc[2]["start"].size() == 2);
+ YAML_ASSERT(doc[2]["start"]["x"].to<int>() == 73);
+ YAML_ASSERT(doc[2]["start"]["y"].to<int>() == 129);
+ YAML_ASSERT(doc[2]["color"].to<std::string>() == "0xFFEEBB");
+ YAML_ASSERT(doc[2]["text"].to<std::string>() == "Pretty vector drawing.");
+ return true;
+ }
+
+ // 2.25
+ TEST UnorderedSets()
+ {
+ PARSE(doc, ex2_25);
+ YAML_ASSERT(doc.Tag() == "tag:yaml.org,2002:set");
+ YAML_ASSERT(doc.size() == 3);
+ YAML_ASSERT(IsNull(doc["Mark McGwire"]));
+ YAML_ASSERT(IsNull(doc["Sammy Sosa"]));
+ YAML_ASSERT(IsNull(doc["Ken Griffey"]));
+ return true;
+ }
+
+ // 2.26
+ TEST OrderedMappings()
+ {
+ PARSE(doc, ex2_26);
+ YAML_ASSERT(doc.Tag() == "tag:yaml.org,2002:omap");
+ YAML_ASSERT(doc.size() == 3);
+ YAML_ASSERT(doc[0].size() == 1);
+ YAML_ASSERT(doc[0]["Mark McGwire"].to<int>() == 65);
+ YAML_ASSERT(doc[1].size() == 1);
+ YAML_ASSERT(doc[1]["Sammy Sosa"].to<int>() == 63);
+ YAML_ASSERT(doc[2].size() == 1);
+ YAML_ASSERT(doc[2]["Ken Griffey"].to<int>() == 58);
+ return true;
+ }
+
+ // 2.27
+ TEST Invoice()
+ {
+ PARSE(doc, ex2_27);
+ YAML_ASSERT(doc.Tag() == "tag:clarkevans.com,2002:invoice");
+ YAML_ASSERT(doc.size() == 8);
+ YAML_ASSERT(doc["invoice"].to<int>() == 34843);
+ YAML_ASSERT(doc["date"].to<std::string>() == "2001-01-23");
+ YAML_ASSERT(doc["bill-to"].size() == 3);
+ YAML_ASSERT(doc["bill-to"]["given"].to<std::string>() == "Chris");
+ YAML_ASSERT(doc["bill-to"]["family"].to<std::string>() == "Dumars");
+ YAML_ASSERT(doc["bill-to"]["address"].size() == 4);
+ YAML_ASSERT(doc["bill-to"]["address"]["lines"].to<std::string>() == "458 Walkman Dr.\nSuite #292\n");
+ YAML_ASSERT(doc["bill-to"]["address"]["city"].to<std::string>() == "Royal Oak");
+ YAML_ASSERT(doc["bill-to"]["address"]["state"].to<std::string>() == "MI");
+ YAML_ASSERT(doc["bill-to"]["address"]["postal"].to<std::string>() == "48046");
+ YAML_ASSERT(doc["ship-to"].size() == 3);
+ YAML_ASSERT(doc["ship-to"]["given"].to<std::string>() == "Chris");
+ YAML_ASSERT(doc["ship-to"]["family"].to<std::string>() == "Dumars");
+ YAML_ASSERT(doc["ship-to"]["address"].size() == 4);
+ YAML_ASSERT(doc["ship-to"]["address"]["lines"].to<std::string>() == "458 Walkman Dr.\nSuite #292\n");
+ YAML_ASSERT(doc["ship-to"]["address"]["city"].to<std::string>() == "Royal Oak");
+ YAML_ASSERT(doc["ship-to"]["address"]["state"].to<std::string>() == "MI");
+ YAML_ASSERT(doc["ship-to"]["address"]["postal"].to<std::string>() == "48046");
+ YAML_ASSERT(doc["product"].size() == 2);
+ YAML_ASSERT(doc["product"][0].size() == 4);
+ YAML_ASSERT(doc["product"][0]["sku"].to<std::string>() == "BL394D");
+ YAML_ASSERT(doc["product"][0]["quantity"].to<int>() == 4);
+ YAML_ASSERT(doc["product"][0]["description"].to<std::string>() == "Basketball");
+ YAML_ASSERT(doc["product"][0]["price"].to<std::string>() == "450.00");
+ YAML_ASSERT(doc["product"][1].size() == 4);
+ YAML_ASSERT(doc["product"][1]["sku"].to<std::string>() == "BL4438H");
+ YAML_ASSERT(doc["product"][1]["quantity"].to<int>() == 1);
+ YAML_ASSERT(doc["product"][1]["description"].to<std::string>() == "Super Hoop");
+ YAML_ASSERT(doc["product"][1]["price"].to<std::string>() == "2392.00");
+ YAML_ASSERT(doc["tax"].to<std::string>() == "251.42");
+ YAML_ASSERT(doc["total"].to<std::string>() == "4443.52");
+ YAML_ASSERT(doc["comments"].to<std::string>() == "Late afternoon is best. Backup contact is Nancy Billsmer @ 338-4338.");
+ return true;
+ }
+
+ // 2.28
+ TEST LogFile()
+ {
+ PARSE(doc, ex2_28);
+ YAML_ASSERT(doc.size() == 3);
+ YAML_ASSERT(doc["Time"].to<std::string>() == "2001-11-23 15:01:42 -5");
+ YAML_ASSERT(doc["User"].to<std::string>() == "ed");
+ YAML_ASSERT(doc["Warning"].to<std::string>() == "This is an error message for the log file");
+
+ PARSE_NEXT(doc);
+ YAML_ASSERT(doc.size() == 3);
+ YAML_ASSERT(doc["Time"].to<std::string>() == "2001-11-23 15:02:31 -5");
+ YAML_ASSERT(doc["User"].to<std::string>() == "ed");
+ YAML_ASSERT(doc["Warning"].to<std::string>() == "A slightly different error message.");
+
+ PARSE_NEXT(doc);
+ YAML_ASSERT(doc.size() == 4);
+ YAML_ASSERT(doc["Date"].to<std::string>() == "2001-11-23 15:03:17 -5");
+ YAML_ASSERT(doc["User"].to<std::string>() == "ed");
+ YAML_ASSERT(doc["Fatal"].to<std::string>() == "Unknown variable \"bar\"");
+ YAML_ASSERT(doc["Stack"].size() == 2);
+ YAML_ASSERT(doc["Stack"][0].size() == 3);
+ YAML_ASSERT(doc["Stack"][0]["file"].to<std::string>() == "TopClass.py");
+ YAML_ASSERT(doc["Stack"][0]["line"].to<std::string>() == "23");
+ YAML_ASSERT(doc["Stack"][0]["code"].to<std::string>() == "x = MoreObject(\"345\\n\")\n");
+ YAML_ASSERT(doc["Stack"][1].size() == 3);
+ YAML_ASSERT(doc["Stack"][1]["file"].to<std::string>() == "MoreClass.py");
+ YAML_ASSERT(doc["Stack"][1]["line"].to<std::string>() == "58");
+ YAML_ASSERT(doc["Stack"][1]["code"].to<std::string>() == "foo = bar");
+ return true;
+ }
+
+ // TODO: 5.1 - 5.2 BOM
+
+ // 5.3
+ TEST BlockStructureIndicators()
+ {
+ PARSE(doc, ex5_3);
+ YAML_ASSERT(doc.size() == 2);
+ YAML_ASSERT(doc["sequence"].size() == 2);
+ YAML_ASSERT(doc["sequence"][0].to<std::string>() == "one");
+ YAML_ASSERT(doc["sequence"][1].to<std::string>() == "two");
+ YAML_ASSERT(doc["mapping"].size() == 2);
+ YAML_ASSERT(doc["mapping"]["sky"].to<std::string>() == "blue");
+ YAML_ASSERT(doc["mapping"]["sea"].to<std::string>() == "green");
+ return true;
+ }
+
+ // 5.4
+ TEST FlowStructureIndicators()
+ {
+ PARSE(doc, ex5_4);
+ YAML_ASSERT(doc.size() == 2);
+ YAML_ASSERT(doc["sequence"].size() == 2);
+ YAML_ASSERT(doc["sequence"][0].to<std::string>() == "one");
+ YAML_ASSERT(doc["sequence"][1].to<std::string>() == "two");
+ YAML_ASSERT(doc["mapping"].size() == 2);
+ YAML_ASSERT(doc["mapping"]["sky"].to<std::string>() == "blue");
+ YAML_ASSERT(doc["mapping"]["sea"].to<std::string>() == "green");
+ return true;
+ }
+
+ // 5.5
+ TEST CommentIndicator()
+ {
+ PARSE(doc, ex5_5);
+ YAML_ASSERT(doc.size() == 0);
+ return true;
+ }
+
+ // 5.6
+ TEST NodePropertyIndicators()
+ {
+ PARSE(doc, ex5_6);
+ YAML_ASSERT(doc.size() == 2);
+ YAML_ASSERT(doc["anchored"].to<std::string>() == "value"); // TODO: assert tag
+ YAML_ASSERT(doc["alias"].to<std::string>() == "value");
+ return true;
+ }
+
+ // 5.7
+ TEST BlockScalarIndicators()
+ {
+ PARSE(doc, ex5_7);
+ YAML_ASSERT(doc.size() == 2);
+ YAML_ASSERT(doc["literal"].to<std::string>() == "some\ntext\n");
+ YAML_ASSERT(doc["folded"].to<std::string>() == "some text\n");
+ return true;
+ }
+
+ // 5.8
+ TEST QuotedScalarIndicators()
+ {
+ PARSE(doc, ex5_8);
+ YAML_ASSERT(doc.size() == 2);
+ YAML_ASSERT(doc["single"].to<std::string>() == "text");
+ YAML_ASSERT(doc["double"].to<std::string>() == "text");
+ return true;
+ }
+
+ // TODO: 5.9 directive
+ // TODO: 5.10 reserved indicator
+
+ // 5.11
+ TEST LineBreakCharacters()
+ {
+ PARSE(doc, ex5_11);
+ YAML_ASSERT(doc.to<std::string>() == "Line break (no glyph)\nLine break (glyphed)\n");
+ return true;
+ }
+
+ // 5.12
+ TEST TabsAndSpaces()
+ {
+ PARSE(doc, ex5_12);
+ YAML_ASSERT(doc.size() == 2);
+ YAML_ASSERT(doc["quoted"].to<std::string>() == "Quoted\t");
+ YAML_ASSERT(doc["block"].to<std::string>() ==
+ "void main() {\n"
+ "\tprintf(\"Hello, world!\\n\");\n"
+ "}");
+ return true;
+ }
+
+ // 5.13
+ TEST EscapedCharacters()
+ {
+ PARSE(doc, ex5_13);
+ YAML_ASSERT(doc.to<std::string>() == "Fun with \x5C \x22 \x07 \x08 \x1B \x0C \x0A \x0D \x09 \x0B " + std::string("\x00", 1) + " \x20 \xA0 \x85 \xe2\x80\xa8 \xe2\x80\xa9 A A A");
+ return true;
+ }
+
+ // 5.14
+ TEST InvalidEscapedCharacters()
+ {
+ std::stringstream stream(ex5_14);
+ try {
+ YAML::Parser parser(stream);
+ YAML::Node doc;
+ parser.GetNextDocument(doc);
+ } catch(const YAML::ParserException& e) {
+ YAML_ASSERT(e.msg == std::string(YAML::ErrorMsg::INVALID_ESCAPE) + "c");
+ return true;
+ }
+
+ return false;
+ }
+
+ // 6.1
+ TEST IndentationSpaces()
+ {
+ PARSE(doc, ex6_1);
+ YAML_ASSERT(doc.size() == 1);
+ YAML_ASSERT(doc["Not indented"].size() == 2);
+ YAML_ASSERT(doc["Not indented"]["By one space"].to<std::string>() == "By four\n spaces\n");
+ YAML_ASSERT(doc["Not indented"]["Flow style"].size() == 3);
+ YAML_ASSERT(doc["Not indented"]["Flow style"][0].to<std::string>() == "By two");
+ YAML_ASSERT(doc["Not indented"]["Flow style"][1].to<std::string>() == "Also by two");
+ YAML_ASSERT(doc["Not indented"]["Flow style"][2].to<std::string>() == "Still by two");
+ return true;
+ }
+
+ // 6.2
+ TEST IndentationIndicators()
+ {
+ PARSE(doc, ex6_2);
+ YAML_ASSERT(doc.size() == 1);
+ YAML_ASSERT(doc["a"].size() == 2);
+ YAML_ASSERT(doc["a"][0].to<std::string>() == "b");
+ YAML_ASSERT(doc["a"][1].size() == 2);
+ YAML_ASSERT(doc["a"][1][0].to<std::string>() == "c");
+ YAML_ASSERT(doc["a"][1][1].to<std::string>() == "d");
+ return true;
+ }
+
+ // 6.3
+ TEST SeparationSpaces()
+ {
+ PARSE(doc, ex6_3);
+ YAML_ASSERT(doc.size() == 2);
+ YAML_ASSERT(doc[0].size() == 1);
+ YAML_ASSERT(doc[0]["foo"].to<std::string>() == "bar");
+ YAML_ASSERT(doc[1].size() == 2);
+ YAML_ASSERT(doc[1][0].to<std::string>() == "baz");
+ YAML_ASSERT(doc[1][1].to<std::string>() == "baz");
+ return true;
+ }
+
+ // 6.4
+ TEST LinePrefixes()
+ {
+ PARSE(doc, ex6_4);
+ YAML_ASSERT(doc.size() == 3);
+ YAML_ASSERT(doc["plain"].to<std::string>() == "text lines");
+ YAML_ASSERT(doc["quoted"].to<std::string>() == "text lines");
+ YAML_ASSERT(doc["block"].to<std::string>() == "text\n \tlines\n");
+ return true;
+ }
+
+ // 6.5
+ TEST EmptyLines()
+ {
+ PARSE(doc, ex6_5);
+ YAML_ASSERT(doc.size() == 2);
+ YAML_ASSERT(doc["Folding"].to<std::string>() == "Empty line\nas a line feed");
+ YAML_ASSERT(doc["Chomping"].to<std::string>() == "Clipped empty lines\n");
+ return true;
+ }
+
+ // 6.6
+ TEST LineFolding()
+ {
+ PARSE(doc, ex6_6);
+ YAML_ASSERT(doc.to<std::string>() == "trimmed\n\n\nas space");
+ return true;
+ }
+
+ // 6.7
+ TEST BlockFolding()
+ {
+ PARSE(doc, ex6_7);
+ YAML_ASSERT(doc.to<std::string>() == "foo \n\n\t bar\n\nbaz\n");
+ return true;
+ }
+
+ // 6.8
+ TEST FlowFolding()
+ {
+ PARSE(doc, ex6_8);
+ YAML_ASSERT(doc.to<std::string>() == " foo\nbar\nbaz ");
+ return true;
+ }
+
+ // 6.9
+ TEST SeparatedComment()
+ {
+ PARSE(doc, ex6_9);
+ YAML_ASSERT(doc.size() == 1);
+ YAML_ASSERT(doc["key"].to<std::string>() == "value");
+ return true;
+ }
+
+ // 6.10
+ TEST CommentLines()
+ {
+ PARSE(doc, ex6_10);
+ YAML_ASSERT(doc.size() == 0);
+ return true;
+ }
+
+ // 6.11
+ TEST MultiLineComments()
+ {
+ PARSE(doc, ex6_11);
+ YAML_ASSERT(doc.size() == 1);
+ YAML_ASSERT(doc["key"].to<std::string>() == "value");
+ return true;
+ }
+
+ struct StringMap {
+ typedef std::map<std::string, std::string> Map;
+ Map _;
+ };
+
+ bool operator == (const StringMap& m, const StringMap& n) {
+ return m._ == n._;
+ }
+
+ void operator >> (const YAML::Node& node, StringMap& m) {
+ m._.clear();
+ for(YAML::Iterator it=node.begin();it!=node.end();++it) {
+ std::string key = it.first().to<std::string>();
+ std::string value = it.second().to<std::string>();
+ m._[key] = value;
+ }
+ }
+
+
+ // 6.12
+ TEST SeparationSpacesII()
+ {
+ PARSE(doc, ex6_12);
+ std::map<std::string, std::string> key;
+ key["first"] = "Sammy";
+ key["last"] = "Sosa";
+ YAML_ASSERT(doc.size() == 1);
+ YAML_ASSERT(doc[key].size() == 2);
+ YAML_ASSERT(doc[key]["hr"].to<int>() == 65);
+ YAML_ASSERT(doc[key]["avg"].to<std::string>() == "0.278");
+ return true;
+ }
+
+ // 6.13
+ TEST ReservedDirectives()
+ {
+ PARSE(doc, ex6_13);
+ return true;
+ }
+
+ // 6.14
+ TEST YAMLDirective()
+ {
+ PARSE(doc, ex6_14);
+ return true;
+ }
+
+ // 6.15
+ TEST InvalidRepeatedYAMLDirective()
+ {
+ try {
+ PARSE(doc, ex6_15);
+ } catch(const YAML::ParserException& e) {
+ if(e.msg == YAML::ErrorMsg::REPEATED_YAML_DIRECTIVE)
+ return true;
+
+ throw;
+ }
+
+ return " No exception was thrown";
+ }
+
+ // 6.16
+ TEST TagDirective()
+ {
+ PARSE(doc, ex6_16);
+ YAML_ASSERT(doc.Tag() == "tag:yaml.org,2002:str");
+ YAML_ASSERT(doc.to<std::string>() == "foo");
+ return true;
+ }
+
+ // 6.17
+ TEST InvalidRepeatedTagDirective()
+ {
+ try {
+ PARSE(doc, ex6_17);
+ } catch(const YAML::ParserException& e) {
+ if(e.msg == YAML::ErrorMsg::REPEATED_TAG_DIRECTIVE)
+ return true;
+
+ throw;
+ }
+
+ return " No exception was thrown";
+ }
+
+ // 6.18
+ TEST PrimaryTagHandle()
+ {
+ PARSE(doc, ex6_18);
+ YAML_ASSERT(doc.Tag() == "!foo");
+ YAML_ASSERT(doc.to<std::string>() == "bar");
+
+ PARSE_NEXT(doc);
+ YAML_ASSERT(doc.Tag() == "tag:example.com,2000:app/foo");
+ YAML_ASSERT(doc.to<std::string>() == "bar");
+ return true;
+ }
+
+ // 6.19
+ TEST SecondaryTagHandle()
+ {
+ PARSE(doc, ex6_19);
+ YAML_ASSERT(doc.Tag() == "tag:example.com,2000:app/int");
+ YAML_ASSERT(doc.to<std::string>() == "1 - 3");
+ return true;
+ }
+
+ // 6.20
+ TEST TagHandles()
+ {
+ PARSE(doc, ex6_20);
+ YAML_ASSERT(doc.Tag() == "tag:example.com,2000:app/foo");
+ YAML_ASSERT(doc.to<std::string>() == "bar");
+ return true;
+ }
+
+ // 6.21
+ TEST LocalTagPrefix()
+ {
+ PARSE(doc, ex6_21);
+ YAML_ASSERT(doc.Tag() == "!my-light");
+ YAML_ASSERT(doc.to<std::string>() == "fluorescent");
+
+ PARSE_NEXT(doc);
+ YAML_ASSERT(doc.Tag() == "!my-light");
+ YAML_ASSERT(doc.to<std::string>() == "green");
+ return true;
+ }
+
+ // 6.22
+ TEST GlobalTagPrefix()
+ {
+ PARSE(doc, ex6_22);
+ YAML_ASSERT(doc.size() == 1);
+ YAML_ASSERT(doc[0].Tag() == "tag:example.com,2000:app/foo");
+ YAML_ASSERT(doc[0].to<std::string>() == "bar");
+ return true;
+ }
+
+ // 6.23
+ TEST NodeProperties()
+ {
+ PARSE(doc, ex6_23);
+ YAML_ASSERT(doc.size() == 2);
+ for(YAML::Iterator it=doc.begin();it!=doc.end();++it) {
+ if(it.first().to<std::string>() == "foo") {
+ YAML_ASSERT(it.first().Tag() == "tag:yaml.org,2002:str");
+ YAML_ASSERT(it.second().Tag() == "tag:yaml.org,2002:str");
+ YAML_ASSERT(it.second().to<std::string>() == "bar");
+ } else if(it.first().to<std::string>() == "baz") {
+ YAML_ASSERT(it.second().to<std::string>() == "foo");
+ } else
+ return " unknown key";
+ }
+
+ return true;
+ }
+
+ // 6.24
+ TEST VerbatimTags()
+ {
+ PARSE(doc, ex6_24);
+ YAML_ASSERT(doc.size() == 1);
+ for(YAML::Iterator it=doc.begin();it!=doc.end();++it) {
+ YAML_ASSERT(it.first().Tag() == "tag:yaml.org,2002:str");
+ YAML_ASSERT(it.first().to<std::string>() == "foo");
+ YAML_ASSERT(it.second().Tag() == "!bar");
+ YAML_ASSERT(it.second().to<std::string>() == "baz");
+ }
+ return true;
+ }
+
+ // 6.25
+ TEST InvalidVerbatimTags()
+ {
+ PARSE(doc, ex6_25);
+ return " not implemented yet"; // TODO: check tags (but we probably will say these are valid, I think)
+ }
+
+ // 6.26
+ TEST TagShorthands()
+ {
+ PARSE(doc, ex6_26);
+ YAML_ASSERT(doc.size() == 3);
+ YAML_ASSERT(doc[0].Tag() == "!local");
+ YAML_ASSERT(doc[0].to<std::string>() == "foo");
+ YAML_ASSERT(doc[1].Tag() == "tag:yaml.org,2002:str");
+ YAML_ASSERT(doc[1].to<std::string>() == "bar");
+ YAML_ASSERT(doc[2].Tag() == "tag:example.com,2000:app/tag%21");
+ YAML_ASSERT(doc[2].to<std::string>() == "baz");
+ return true;
+ }
+
+ // 6.27
+ TEST InvalidTagShorthands()
+ {
+ bool threw = false;
+ try {
+ PARSE(doc, ex6_27a);
+ } catch(const YAML::ParserException& e) {
+ threw = true;
+ if(e.msg != YAML::ErrorMsg::TAG_WITH_NO_SUFFIX)
+ throw;
+ }
+
+ if(!threw)
+ return " No exception was thrown for a tag with no suffix";
+
+ PARSE(doc, ex6_27b); // TODO: should we reject this one (since !h! is not declared)?
+ return " not implemented yet";
+ }
+
+ // 6.28
+ TEST NonSpecificTags()
+ {
+ PARSE(doc, ex6_28);
+ YAML_ASSERT(doc.size() == 3);
+ YAML_ASSERT(doc[0].to<std::string>() == "12"); // TODO: check tags. How?
+ YAML_ASSERT(doc[1].to<int>() == 12);
+ YAML_ASSERT(doc[2].to<std::string>() == "12");
+ return true;
+ }
+
+ // 6.29
+ TEST NodeAnchors()
+ {
+ PARSE(doc, ex6_29);
+ YAML_ASSERT(doc.size() == 2);
+ YAML_ASSERT(doc["First occurrence"].to<std::string>() == "Value");
+ YAML_ASSERT(doc["Second occurrence"].to<std::string>() == "Value");
+ return true;
+ }
+
+ // 7.1
+ TEST AliasNodes()
+ {
+ PARSE(doc, ex7_1);
+ YAML_ASSERT(doc.size() == 4);
+ YAML_ASSERT(doc["First occurrence"].to<std::string>() == "Foo");
+ YAML_ASSERT(doc["Second occurrence"].to<std::string>() == "Foo");
+ YAML_ASSERT(doc["Override anchor"].to<std::string>() == "Bar");
+ YAML_ASSERT(doc["Reuse anchor"].to<std::string>() == "Bar");
+ return true;
+ }
+
+ // 7.2
+ TEST EmptyNodes()
+ {
+ PARSE(doc, ex7_2);
+ YAML_ASSERT(doc.size() == 2);
+ for(YAML::Iterator it=doc.begin();it!=doc.end();++it) {
+ if(it.first().to<std::string>() == "foo") {
+ YAML_ASSERT(it.second().Tag() == "tag:yaml.org,2002:str");
+ YAML_ASSERT(it.second().to<std::string>() == "");
+ } else if(it.first().to<std::string>() == "") {
+ YAML_ASSERT(it.first().Tag() == "tag:yaml.org,2002:str");
+ YAML_ASSERT(it.second().to<std::string>() == "bar");
+ } else
+ return " unexpected key";
+ }
+ return true;
+ }
+
+ // 7.3
+ TEST CompletelyEmptyNodes()
+ {
+ PARSE(doc, ex7_3);
+ YAML_ASSERT(doc.size() == 2);
+ YAML_ASSERT(IsNull(doc["foo"]));
+ YAML_ASSERT(doc[YAML::Null].to<std::string>() == "bar");
+ return true;
+ }
+
+ // 7.4
+ TEST DoubleQuotedImplicitKeys()
+ {
+ PARSE(doc, ex7_4);
+ YAML_ASSERT(doc.size() == 1);
+ YAML_ASSERT(doc["implicit block key"].size() == 1);
+ YAML_ASSERT(doc["implicit block key"][0].size() == 1);
+ YAML_ASSERT(doc["implicit block key"][0]["implicit flow key"].to<std::string>() == "value");
+ return true;
+ }
+
+ // 7.5
+ TEST DoubleQuotedLineBreaks()
+ {
+ PARSE(doc, ex7_5);
+ YAML_ASSERT(doc.to<std::string>() == "folded to a space,\nto a line feed, or \t \tnon-content");
+ return true;
+ }
+
+ // 7.6
+ TEST DoubleQuotedLines()
+ {
+ PARSE(doc, ex7_6);
+ YAML_ASSERT(doc.to<std::string>() == " 1st non-empty\n2nd non-empty 3rd non-empty ");
+ return true;
+ }
+
+ // 7.7
+ TEST SingleQuotedCharacters()
+ {
+ PARSE(doc, ex7_7);
+ YAML_ASSERT(doc.to<std::string>() == "here's to \"quotes\"");
+ return true;
+ }
+
+ // 7.8
+ TEST SingleQuotedImplicitKeys()
+ {
+ PARSE(doc, ex7_8);
+ YAML_ASSERT(doc.size() == 1);
+ YAML_ASSERT(doc["implicit block key"].size() == 1);
+ YAML_ASSERT(doc["implicit block key"][0].size() == 1);
+ YAML_ASSERT(doc["implicit block key"][0]["implicit flow key"].to<std::string>() == "value");
+ return true;
+ }
+
+ // 7.9
+ TEST SingleQuotedLines()
+ {
+ PARSE(doc, ex7_9);
+ YAML_ASSERT(doc.to<std::string>() == " 1st non-empty\n2nd non-empty 3rd non-empty ");
+ return true;
+ }
+
+ // 7.10
+ TEST PlainCharacters()
+ {
+ PARSE(doc, ex7_10);
+ YAML_ASSERT(doc.size() == 6);
+ YAML_ASSERT(doc[0].to<std::string>() == "::vector");
+ YAML_ASSERT(doc[1].to<std::string>() == ": - ()");
+ YAML_ASSERT(doc[2].to<std::string>() == "Up, up, and away!");
+ YAML_ASSERT(doc[3].to<int>() == -123);
+ YAML_ASSERT(doc[4].to<std::string>() == "http://example.com/foo#bar");
+ YAML_ASSERT(doc[5].size() == 5);
+ YAML_ASSERT(doc[5][0].to<std::string>() == "::vector");
+ YAML_ASSERT(doc[5][1].to<std::string>() == ": - ()");
+ YAML_ASSERT(doc[5][2].to<std::string>() == "Up, up, and away!");
+ YAML_ASSERT(doc[5][3].to<int>() == -123);
+ YAML_ASSERT(doc[5][4].to<std::string>() == "http://example.com/foo#bar");
+ return true;
+ }
+
+ // 7.11
+ TEST PlainImplicitKeys()
+ {
+ PARSE(doc, ex7_11);
+ YAML_ASSERT(doc.size() == 1);
+ YAML_ASSERT(doc["implicit block key"].size() == 1);
+ YAML_ASSERT(doc["implicit block key"][0].size() == 1);
+ YAML_ASSERT(doc["implicit block key"][0]["implicit flow key"].to<std::string>() == "value");
+ return true;
+ }
+
+ // 7.12
+ TEST PlainLines()
+ {
+ PARSE(doc, ex7_12);
+ YAML_ASSERT(doc.to<std::string>() == "1st non-empty\n2nd non-empty 3rd non-empty");
+ return true;
+ }
+
+ // 7.13
+ TEST FlowSequence()
+ {
+ PARSE(doc, ex7_13);
+ YAML_ASSERT(doc.size() == 2);
+ YAML_ASSERT(doc[0].size() == 2);
+ YAML_ASSERT(doc[0][0].to<std::string>() == "one");
+ YAML_ASSERT(doc[0][1].to<std::string>() == "two");
+ YAML_ASSERT(doc[1].size() == 2);
+ YAML_ASSERT(doc[1][0].to<std::string>() == "three");
+ YAML_ASSERT(doc[1][1].to<std::string>() == "four");
+ return true;
+ }
+
+ // 7.14
+ TEST FlowSequenceEntries()
+ {
+ PARSE(doc, ex7_14);
+ YAML_ASSERT(doc.size() == 5);
+ YAML_ASSERT(doc[0].to<std::string>() == "double quoted");
+ YAML_ASSERT(doc[1].to<std::string>() == "single quoted");
+ YAML_ASSERT(doc[2].to<std::string>() == "plain text");
+ YAML_ASSERT(doc[3].size() == 1);
+ YAML_ASSERT(doc[3][0].to<std::string>() == "nested");
+ YAML_ASSERT(doc[4].size() == 1);
+ YAML_ASSERT(doc[4]["single"].to<std::string>() == "pair");
+ return true;
+ }
+
+ // 7.15
+ TEST FlowMappings()
+ {
+ PARSE(doc, ex7_15);
+ YAML_ASSERT(doc.size() == 2);
+ YAML_ASSERT(doc[0].size() == 2);
+ YAML_ASSERT(doc[0]["one"].to<std::string>() == "two");
+ YAML_ASSERT(doc[0]["three"].to<std::string>() == "four");
+ YAML_ASSERT(doc[1].size() == 2);
+ YAML_ASSERT(doc[1]["five"].to<std::string>() == "six");
+ YAML_ASSERT(doc[1]["seven"].to<std::string>() == "eight");
+ return true;
+ }
+
+ // 7.16
+ TEST FlowMappingEntries()
+ {
+ PARSE(doc, ex7_16);
+ YAML_ASSERT(doc.size() == 3);
+ YAML_ASSERT(doc["explicit"].to<std::string>() == "entry");
+ YAML_ASSERT(doc["implicit"].to<std::string>() == "entry");
+ YAML_ASSERT(IsNull(doc[YAML::Null]));
+ return true;
+ }
+
+ // 7.17
+ TEST FlowMappingSeparateValues()
+ {
+ PARSE(doc, ex7_17);
+ YAML_ASSERT(doc.size() == 4);
+ YAML_ASSERT(doc["unquoted"].to<std::string>() == "separate");
+ YAML_ASSERT(IsNull(doc["http://foo.com"]));
+ YAML_ASSERT(IsNull(doc["omitted value"]));
+ YAML_ASSERT(doc[YAML::Null].to<std::string>() == "omitted key");
+ return true;
+ }
+
+ // 7.18
+ TEST FlowMappingAdjacentValues()
+ {
+ PARSE(doc, ex7_18);
+ YAML_ASSERT(doc.size() == 3);
+ YAML_ASSERT(doc["adjacent"].to<std::string>() == "value");
+ YAML_ASSERT(doc["readable"].to<std::string>() == "value");
+ YAML_ASSERT(IsNull(doc["empty"]));
+ return true;
+ }
+
+ // 7.19
+ TEST SinglePairFlowMappings()
+ {
+ PARSE(doc, ex7_19);
+ YAML_ASSERT(doc.size() == 1);
+ YAML_ASSERT(doc[0].size() == 1);
+ YAML_ASSERT(doc[0]["foo"].to<std::string>() == "bar");
+ return true;
+ }
+
+ // 7.20
+ TEST SinglePairExplicitEntry()
+ {
+ PARSE(doc, ex7_20);
+ YAML_ASSERT(doc.size() == 1);
+ YAML_ASSERT(doc[0].size() == 1);
+ YAML_ASSERT(doc[0]["foo bar"].to<std::string>() == "baz");
+ return true;
+ }
+
+ // 7.21
+ TEST SinglePairImplicitEntries()
+ {
+ PARSE(doc, ex7_21);
+ YAML_ASSERT(doc.size() == 3);
+ YAML_ASSERT(doc[0].size() == 1);
+ YAML_ASSERT(doc[0][0].size() == 1);
+ YAML_ASSERT(doc[0][0]["YAML"].to<std::string>() == "separate");
+ YAML_ASSERT(doc[1].size() == 1);
+ YAML_ASSERT(doc[1][0].size() == 1);
+ YAML_ASSERT(doc[1][0][YAML::Null].to<std::string>() == "empty key entry");
+ YAML_ASSERT(doc[2].size() == 1);
+ YAML_ASSERT(doc[2][0].size() == 1);
+ StringMap key;
+ key._["JSON"] = "like";
+ YAML_ASSERT(doc[2][0][key].to<std::string>() == "adjacent");
+ return true;
+ }
+
+ // 7.22
+ TEST InvalidImplicitKeys()
+ {
+ try {
+ PARSE(doc, ex7_22);
+ } catch(const YAML::Exception& e) {
+ if(e.msg == YAML::ErrorMsg::END_OF_SEQ_FLOW)
+ return true;
+
+ throw;
+ }
+ return " no exception thrown";
+ }
+
+ // 7.23
+ TEST FlowContent()
+ {
+ PARSE(doc, ex7_23);
+ YAML_ASSERT(doc.size() == 5);
+ YAML_ASSERT(doc[0].size() == 2);
+ YAML_ASSERT(doc[0][0].to<std::string>() == "a");
+ YAML_ASSERT(doc[0][1].to<std::string>() == "b");
+ YAML_ASSERT(doc[1].size() == 1);
+ YAML_ASSERT(doc[1]["a"].to<std::string>() == "b");
+ YAML_ASSERT(doc[2].to<std::string>() == "a");
+ YAML_ASSERT(doc[3].to<char>() == 'b');
+ YAML_ASSERT(doc[4].to<std::string>() == "c");
+ return true;
+ }
+
+ // 7.24
+ TEST FlowNodes()
+ {
+ PARSE(doc, ex7_24);
+ YAML_ASSERT(doc.size() == 5);
+ YAML_ASSERT(doc[0].Tag() == "tag:yaml.org,2002:str");
+ YAML_ASSERT(doc[0].to<std::string>() == "a");
+ YAML_ASSERT(doc[1].to<char>() == 'b');
+ YAML_ASSERT(doc[2].to<std::string>() == "c");
+ YAML_ASSERT(doc[3].to<std::string>() == "c");
+ YAML_ASSERT(doc[4].Tag() == "tag:yaml.org,2002:str");
+ YAML_ASSERT(doc[4].to<std::string>() == "");
+ return true;
+ }
+
+ // 8.1
+ TEST BlockScalarHeader()
+ {
+ PARSE(doc, ex8_1);
+ YAML_ASSERT(doc.size() == 4);
+ YAML_ASSERT(doc[0].to<std::string>() == "literal\n");
+ YAML_ASSERT(doc[1].to<std::string>() == " folded\n");
+ YAML_ASSERT(doc[2].to<std::string>() == "keep\n\n");
+ YAML_ASSERT(doc[3].to<std::string>() == " strip");
+ return true;
+ }
+
+ // 8.2
+ TEST BlockIndentationHeader()
+ {
+ PARSE(doc, ex8_2);
+ YAML_ASSERT(doc.size() == 4);
+ YAML_ASSERT(doc[0].to<std::string>() == "detected\n");
+ YAML_ASSERT(doc[1].to<std::string>() == "\n\n# detected\n");
+ YAML_ASSERT(doc[2].to<std::string>() == " explicit\n");
+ YAML_ASSERT(doc[3].to<std::string>() == "\t\ndetected\n");
+ return true;
+ }
+
+ // 8.3
+ TEST InvalidBlockScalarIndentationIndicators()
+ {
+ {
+ bool threw = false;
+ try {
+ PARSE(doc, ex8_3a);
+ } catch(const YAML::Exception& e) {
+ if(e.msg != YAML::ErrorMsg::END_OF_SEQ)
+ throw;
+
+ threw = true;
+ }
+
+ if(!threw)
+ return " no exception thrown for less indented auto-detecting indentation for a literal block scalar";
+ }
+
+ {
+ bool threw = false;
+ try {
+ PARSE(doc, ex8_3b);
+ } catch(const YAML::Exception& e) {
+ if(e.msg != YAML::ErrorMsg::END_OF_SEQ)
+ throw;
+
+ threw = true;
+ }
+
+ if(!threw)
+ return " no exception thrown for less indented auto-detecting indentation for a folded block scalar";
+ }
+
+ {
+ bool threw = false;
+ try {
+ PARSE(doc, ex8_3c);
+ } catch(const YAML::Exception& e) {
+ if(e.msg != YAML::ErrorMsg::END_OF_SEQ)
+ throw;
+
+ threw = true;
+ }
+
+ if(!threw)
+ return " no exception thrown for less indented explicit indentation for a literal block scalar";
+ }
+
+ return true;
+ }
+
+ // 8.4
+ TEST ChompingFinalLineBreak()
+ {
+ PARSE(doc, ex8_4);
+ YAML_ASSERT(doc.size() == 3);
+ YAML_ASSERT(doc["strip"].to<std::string>() == "text");
+ YAML_ASSERT(doc["clip"].to<std::string>() == "text\n");
+ YAML_ASSERT(doc["keep"].to<std::string>() == "text\n");
+ return true;
+ }
+
+ // 8.5
+ TEST ChompingTrailingLines()
+ {
+ PARSE(doc, ex8_5);
+ YAML_ASSERT(doc.size() == 3);
+ YAML_ASSERT(doc["strip"].to<std::string>() == "# text");
+ YAML_ASSERT(doc["clip"].to<std::string>() == "# text\n");
+ YAML_ASSERT(doc["keep"].to<std::string>() == "# text\n"); // Note: I believe this is a bug in the YAML spec - it should be "# text\n\n"
+ return true;
+ }
+
+ // 8.6
+ TEST EmptyScalarChomping()
+ {
+ PARSE(doc, ex8_6);
+ YAML_ASSERT(doc.size() == 3);
+ YAML_ASSERT(doc["strip"].to<std::string>() == "");
+ YAML_ASSERT(doc["clip"].to<std::string>() == "");
+ YAML_ASSERT(doc["keep"].to<std::string>() == "\n");
+ return true;
+ }
+
+ // 8.7
+ TEST LiteralScalar()
+ {
+ PARSE(doc, ex8_7);
+ YAML_ASSERT(doc.to<std::string>() == "literal\n\ttext\n");
+ return true;
+ }
+
+ // 8.8
+ TEST LiteralContent()
+ {
+ PARSE(doc, ex8_8);
+ YAML_ASSERT(doc.to<std::string>() == "\n\nliteral\n \n\ntext\n");
+ return true;
+ }
+
+ // 8.9
+ TEST FoldedScalar()
+ {
+ PARSE(doc, ex8_9);
+ YAML_ASSERT(doc.to<std::string>() == "folded text\n");
+ return true;
+ }
+
+ // 8.10
+ TEST FoldedLines()
+ {
+ PARSE(doc, ex8_10);
+ YAML_ASSERT(doc.to<std::string>() == "\nfolded line\nnext line\n * bullet\n\n * list\n * lines\n\nlast line\n");
+ return true;
+ }
+
+ // 8.11
+ TEST MoreIndentedLines()
+ {
+ return true; // same as 8.10
+ }
+
+ // 8.12
+ TEST EmptySeparationLines()
+ {
+ return true; // same as 8.10
+ }
+
+ // 8.13
+ TEST FinalEmptyLines()
+ {
+ return true; // same as 8.10
+ }
+
+ // 8.14
+ TEST BlockSequence()
+ {
+ PARSE(doc, ex8_14);
+ YAML_ASSERT(doc.size() == 1);
+ YAML_ASSERT(doc["block sequence"].size() == 2);
+ YAML_ASSERT(doc["block sequence"][0].to<std::string>() == "one");
+ YAML_ASSERT(doc["block sequence"][1].size() == 1);
+ YAML_ASSERT(doc["block sequence"][1]["two"].to<std::string>() == "three");
+ return true;
+ }
+
+ // 8.15
+ TEST BlockSequenceEntryTypes()
+ {
+ PARSE(doc, ex8_15);
+ YAML_ASSERT(doc.size() == 4);
+ YAML_ASSERT(YAML::IsNull(doc[0]));
+ YAML_ASSERT(doc[1].to<std::string>() == "block node\n");
+ YAML_ASSERT(doc[2].size() == 2);
+ YAML_ASSERT(doc[2][0].to<std::string>() == "one");
+ YAML_ASSERT(doc[2][1].to<std::string>() == "two");
+ YAML_ASSERT(doc[3].size() == 1);
+ YAML_ASSERT(doc[3]["one"].to<std::string>() == "two");
+ return true;
+ }
+
+ // 8.16
+ TEST BlockMappings()
+ {
+ PARSE(doc, ex8_16);
+ YAML_ASSERT(doc.size() == 1);
+ YAML_ASSERT(doc["block mapping"].size() == 1);
+ YAML_ASSERT(doc["block mapping"]["key"].to<std::string>() == "value");
+ return true;
+ }
+
+ // 8.17
+ TEST ExplicitBlockMappingEntries()
+ {
+ PARSE(doc, ex8_17);
+ YAML_ASSERT(doc.size() == 2);
+ YAML_ASSERT(IsNull(doc["explicit key"]));
+ YAML_ASSERT(doc["block key\n"].size() == 2);
+ YAML_ASSERT(doc["block key\n"][0].to<std::string>() == "one");
+ YAML_ASSERT(doc["block key\n"][1].to<std::string>() == "two");
+ return true;
+ }
+
+ // 8.18
+ TEST ImplicitBlockMappingEntries()
+ {
+ PARSE(doc, ex8_18);
+ YAML_ASSERT(doc.size() == 3);
+ YAML_ASSERT(doc["plain key"].to<std::string>() == "in-line value");
+ YAML_ASSERT(IsNull(doc[YAML::Null]));
+ YAML_ASSERT(doc["quoted key"].size() == 1);
+ YAML_ASSERT(doc["quoted key"][0].to<std::string>() == "entry");
+ return true;
+ }
+
+ // 8.19
+ TEST CompactBlockMappings()
+ {
+ PARSE(doc, ex8_19);
+ YAML_ASSERT(doc.size() == 2);
+ YAML_ASSERT(doc[0].size() == 1);
+ YAML_ASSERT(doc[0]["sun"].to<std::string>() == "yellow");
+ YAML_ASSERT(doc[1].size() == 1);
+ std::map<std::string, std::string> key;
+ key["earth"] = "blue";
+ YAML_ASSERT(doc[1][key].size() == 1);
+ YAML_ASSERT(doc[1][key]["moon"].to<std::string>() == "white");
+ return true;
+ }
+
+ // 8.20
+ TEST BlockNodeTypes()
+ {
+ PARSE(doc, ex8_20);
+ YAML_ASSERT(doc.size() == 3);
+ YAML_ASSERT(doc[0].to<std::string>() == "flow in block");
+ YAML_ASSERT(doc[1].to<std::string>() == "Block scalar\n");
+ YAML_ASSERT(doc[2].size() == 1);
+ YAML_ASSERT(doc[2]["foo"].to<std::string>() == "bar");
+ return true;
+ }
+
+ // 8.21
+ TEST BlockScalarNodes()
+ {
+ PARSE(doc, ex8_21);
+ YAML_ASSERT(doc.size() == 2);
+ YAML_ASSERT(doc["literal"].to<std::string>() == "value"); // Note: I believe this is a bug in the YAML spec - it should be "value\n"
+ YAML_ASSERT(doc["folded"].to<std::string>() == "value");
+ YAML_ASSERT(doc["folded"].Tag() == "!foo");
+ return true;
+ }
+
+ // 8.22
+ TEST BlockCollectionNodes()
+ {
+ PARSE(doc, ex8_22);
+ YAML_ASSERT(doc.size() == 2);
+ YAML_ASSERT(doc["sequence"].size() == 2);
+ YAML_ASSERT(doc["sequence"][0].to<std::string>() == "entry");
+ YAML_ASSERT(doc["sequence"][1].size() == 1);
+ YAML_ASSERT(doc["sequence"][1][0].to<std::string>() == "nested");
+ YAML_ASSERT(doc["mapping"].size() == 1);
+ YAML_ASSERT(doc["mapping"]["foo"].to<std::string>() == "bar");
+ return true;
+ }
+ }
+}
diff --git a/yaml-cpp/test/parsertests.h b/yaml-cpp/test/parsertests.h
new file mode 100755
index 00000000..f3de1b8c
--- /dev/null
+++ b/yaml-cpp/test/parsertests.h
@@ -0,0 +1,13 @@
+#ifndef PARSERTESTS_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define PARSERTESTS_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+namespace Test {
+ bool RunParserTests();
+}
+
+#endif // PARSERTESTS_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
diff --git a/yaml-cpp/test/specexamples.h b/yaml-cpp/test/specexamples.h
new file mode 100755
index 00000000..4688bdcf
--- /dev/null
+++ b/yaml-cpp/test/specexamples.h
@@ -0,0 +1,850 @@
+namespace Test {
+ namespace Spec {
+ const char *ex2_1 =
+ "- Mark McGwire\n"
+ "- Sammy Sosa\n"
+ "- Ken Griffey";
+
+ const char *ex2_2 =
+ "hr: 65 # Home runs\n"
+ "avg: 0.278 # Batting average\n"
+ "rbi: 147 # Runs Batted In";
+
+ const char *ex2_3 =
+ "american:\n"
+ "- Boston Red Sox\n"
+ "- Detroit Tigers\n"
+ "- New York Yankees\n"
+ "national:\n"
+ "- New York Mets\n"
+ "- Chicago Cubs\n"
+ "- Atlanta Braves";
+
+ const char *ex2_4 =
+ "-\n"
+ " name: Mark McGwire\n"
+ " hr: 65\n"
+ " avg: 0.278\n"
+ "-\n"
+ " name: Sammy Sosa\n"
+ " hr: 63\n"
+ " avg: 0.288";
+
+ const char *ex2_5 =
+ "- [name , hr, avg ]\n"
+ "- [Mark McGwire, 65, 0.278]\n"
+ "- [Sammy Sosa , 63, 0.288]";
+
+ const char *ex2_6 =
+ "Mark McGwire: {hr: 65, avg: 0.278}\n"
+ "Sammy Sosa: {\n"
+ " hr: 63,\n"
+ " avg: 0.288\n"
+ " }";
+
+ const char *ex2_7 =
+ "# Ranking of 1998 home runs\n"
+ "---\n"
+ "- Mark McGwire\n"
+ "- Sammy Sosa\n"
+ "- Ken Griffey\n"
+ "\n"
+ "# Team ranking\n"
+ "---\n"
+ "- Chicago Cubs\n"
+ "- St Louis Cardinals";
+
+ const char *ex2_8 =
+ "---\n"
+ "time: 20:03:20\n"
+ "player: Sammy Sosa\n"
+ "action: strike (miss)\n"
+ "...\n"
+ "---\n"
+ "time: 20:03:47\n"
+ "player: Sammy Sosa\n"
+ "action: grand slam\n"
+ "...";
+
+ const char *ex2_9 =
+ "---\n"
+ "hr: # 1998 hr ranking\n"
+ " - Mark McGwire\n"
+ " - Sammy Sosa\n"
+ "rbi:\n"
+ " # 1998 rbi ranking\n"
+ " - Sammy Sosa\n"
+ " - Ken Griffey";
+
+ const char *ex2_10 =
+ "---\n"
+ "hr:\n"
+ " - Mark McGwire\n"
+ " # Following node labeled SS\n"
+ " - &SS Sammy Sosa\n"
+ "rbi:\n"
+ " - *SS # Subsequent occurrence\n"
+ " - Ken Griffey";
+
+ const char *ex2_11 =
+ "? - Detroit Tigers\n"
+ " - Chicago cubs\n"
+ ":\n"
+ " - 2001-07-23\n"
+ "\n"
+ "? [ New York Yankees,\n"
+ " Atlanta Braves ]\n"
+ ": [ 2001-07-02, 2001-08-12,\n"
+ " 2001-08-14 ]";
+
+ const char *ex2_12 =
+ "---\n"
+ "# Products purchased\n"
+ "- item : Super Hoop\n"
+ " quantity: 1\n"
+ "- item : Basketball\n"
+ " quantity: 4\n"
+ "- item : Big Shoes\n"
+ " quantity: 1";
+
+ const char *ex2_13 =
+ "# ASCII Art\n"
+ "--- |\n"
+ " \\//||\\/||\n"
+ " // || ||__";
+
+ const char *ex2_14 =
+ "--- >\n"
+ " Mark McGwire's\n"
+ " year was crippled\n"
+ " by a knee injury.";
+
+ const char *ex2_15 =
+ ">\n"
+ " Sammy Sosa completed another\n"
+ " fine season with great stats.\n"
+ " \n"
+ " 63 Home Runs\n"
+ " 0.288 Batting Average\n"
+ " \n"
+ " What a year!";
+
+ const char *ex2_16 =
+ "name: Mark McGwire\n"
+ "accomplishment: >\n"
+ " Mark set a major league\n"
+ " home run record in 1998.\n"
+ "stats: |\n"
+ " 65 Home Runs\n"
+ " 0.278 Batting Average\n";
+
+ const char *ex2_17 =
+ "unicode: \"Sosa did fine.\\u263A\"\n"
+ "control: \"\\b1998\\t1999\\t2000\\n\"\n"
+ "hex esc: \"\\x0d\\x0a is \\r\\n\"\n"
+ "\n"
+ "single: '\"Howdy!\" he cried.'\n"
+ "quoted: ' # Not a ''comment''.'\n"
+ "tie-fighter: '|\\-*-/|'";
+
+ const char *ex2_18 =
+ "plain:\n"
+ " This unquoted scalar\n"
+ " spans many lines.\n"
+ "\n"
+ "quoted: \"So does this\n"
+ " quoted scalar.\\n\"";
+
+ // TODO: 2.19 - 2.22 schema tags
+
+ const char *ex2_23 =
+ "---\n"
+ "not-date: !!str 2002-04-28\n"
+ "\n"
+ "picture: !!binary |\n"
+ " R0lGODlhDAAMAIQAAP//9/X\n"
+ " 17unp5WZmZgAAAOfn515eXv\n"
+ " Pz7Y6OjuDg4J+fn5OTk6enp\n"
+ " 56enmleECcgggoBADs=\n"
+ "\n"
+ "application specific tag: !something |\n"
+ " The semantics of the tag\n"
+ " above may be different for\n"
+ " different documents.";
+
+ const char *ex2_24 =
+ "%TAG ! tag:clarkevans.com,2002:\n"
+ "--- !shape\n"
+ " # Use the ! handle for presenting\n"
+ " # tag:clarkevans.com,2002:circle\n"
+ "- !circle\n"
+ " center: &ORIGIN {x: 73, y: 129}\n"
+ " radius: 7\n"
+ "- !line\n"
+ " start: *ORIGIN\n"
+ " finish: { x: 89, y: 102 }\n"
+ "- !label\n"
+ " start: *ORIGIN\n"
+ " color: 0xFFEEBB\n"
+ " text: Pretty vector drawing.";
+
+ const char *ex2_25 =
+ "# Sets are represented as a\n"
+ "# Mapping where each key is\n"
+ "# associated with a null value\n"
+ "--- !!set\n"
+ "? Mark McGwire\n"
+ "? Sammy Sosa\n"
+ "? Ken Griffey";
+
+ const char *ex2_26 =
+ "# Ordered maps are represented as\n"
+ "# A sequence of mappings, with\n"
+ "# each mapping having one key\n"
+ "--- !!omap\n"
+ "- Mark McGwire: 65\n"
+ "- Sammy Sosa: 63\n"
+ "- Ken Griffey: 58";
+
+ const char *ex2_27 =
+ "--- !<tag:clarkevans.com,2002:invoice>\n"
+ "invoice: 34843\n"
+ "date : 2001-01-23\n"
+ "bill-to: &id001\n"
+ " given : Chris\n"
+ " family : Dumars\n"
+ " address:\n"
+ " lines: |\n"
+ " 458 Walkman Dr.\n"
+ " Suite #292\n"
+ " city : Royal Oak\n"
+ " state : MI\n"
+ " postal : 48046\n"
+ "ship-to: *id001\n"
+ "product:\n"
+ " - sku : BL394D\n"
+ " quantity : 4\n"
+ " description : Basketball\n"
+ " price : 450.00\n"
+ " - sku : BL4438H\n"
+ " quantity : 1\n"
+ " description : Super Hoop\n"
+ " price : 2392.00\n"
+ "tax : 251.42\n"
+ "total: 4443.52\n"
+ "comments:\n"
+ " Late afternoon is best.\n"
+ " Backup contact is Nancy\n"
+ " Billsmer @ 338-4338.";
+
+ const char *ex2_28 =
+ "---\n"
+ "Time: 2001-11-23 15:01:42 -5\n"
+ "User: ed\n"
+ "Warning:\n"
+ " This is an error message\n"
+ " for the log file\n"
+ "---\n"
+ "Time: 2001-11-23 15:02:31 -5\n"
+ "User: ed\n"
+ "Warning:\n"
+ " A slightly different error\n"
+ " message.\n"
+ "---\n"
+ "Date: 2001-11-23 15:03:17 -5\n"
+ "User: ed\n"
+ "Fatal:\n"
+ " Unknown variable \"bar\"\n"
+ "Stack:\n"
+ " - file: TopClass.py\n"
+ " line: 23\n"
+ " code: |\n"
+ " x = MoreObject(\"345\\n\")\n"
+ " - file: MoreClass.py\n"
+ " line: 58\n"
+ " code: |-\n"
+ " foo = bar";
+
+ // TODO: 5.1 - 5.2 BOM
+
+ const char *ex5_3 =
+ "sequence:\n"
+ "- one\n"
+ "- two\n"
+ "mapping:\n"
+ " ? sky\n"
+ " : blue\n"
+ " sea : green";
+
+ const char *ex5_4 =
+ "sequence: [ one, two, ]\n"
+ "mapping: { sky: blue, sea: green }";
+
+ const char *ex5_5 =
+ "# Comment only.";
+
+ const char *ex5_6 =
+ "anchored: !local &anchor value\n"
+ "alias: *anchor";
+
+ const char *ex5_7 =
+ "literal: |\n"
+ " some\n"
+ " text\n"
+ "folded: >\n"
+ " some\n"
+ " text\n";
+
+ const char *ex5_8 =
+ "single: 'text'\n"
+ "double: \"text\"";
+
+ // TODO: 5.9 directive
+ // TODO: 5.10 reserved indicator
+
+ const char *ex5_11 =
+ "|\n"
+ " Line break (no glyph)\n"
+ " Line break (glyphed)\n";
+
+ const char *ex5_12 =
+ "# Tabs and spaces\n"
+ "quoted: \"Quoted\t\"\n"
+ "block: |\n"
+ " void main() {\n"
+ " \tprintf(\"Hello, world!\\n\");\n"
+ " }";
+
+ const char *ex5_13 =
+ "\"Fun with \\\\\n"
+ "\\\" \\a \\b \\e \\f \\\n"
+ "\\n \\r \\t \\v \\0 \\\n"
+ "\\ \\_ \\N \\L \\P \\\n"
+ "\\x41 \\u0041 \\U00000041\"";
+
+ const char *ex5_14 =
+ "Bad escapes:\n"
+ " \"\\c\n"
+ " \\xq-\"";
+
+ const char *ex6_1 =
+ " # Leading comment line spaces are\n"
+ " # neither content nor indentation.\n"
+ " \n"
+ "Not indented:\n"
+ " By one space: |\n"
+ " By four\n"
+ " spaces\n"
+ " Flow style: [ # Leading spaces\n"
+ " By two, # in flow style\n"
+ " Also by two, # are neither\n"
+ " \tStill by two # content nor\n"
+ " ] # indentation.";
+
+ const char *ex6_2 =
+ "? a\n"
+ ": -\tb\n"
+ " - -\tc\n"
+ " - d";
+
+ const char *ex6_3 =
+ "- foo:\t bar\n"
+ "- - baz\n"
+ " -\tbaz";
+
+ const char *ex6_4 =
+ "plain: text\n"
+ " lines\n"
+ "quoted: \"text\n"
+ " \tlines\"\n"
+ "block: |\n"
+ " text\n"
+ " \tlines\n";
+
+ const char *ex6_5 =
+ "Folding:\n"
+ " \"Empty line\n"
+ " \t\n"
+ " as a line feed\"\n"
+ "Chomping: |\n"
+ " Clipped empty lines\n"
+ " ";
+
+ const char *ex6_6 =
+ ">-\n"
+ " trimmed\n"
+ " \n"
+ " \n"
+ "\n"
+ " as\n"
+ " space";
+
+ const char *ex6_7 =
+ ">\n"
+ " foo \n"
+ " \n"
+ " \t bar\n"
+ "\n"
+ " baz\n";
+
+ const char *ex6_8 =
+ "\"\n"
+ " foo \n"
+ " \n"
+ " \t bar\n"
+ "\n"
+ " baz\n"
+ "\"";
+
+ const char *ex6_9 =
+ "key: # Comment\n"
+ " value";
+
+ const char *ex6_10 =
+ " # Comment\n"
+ " \n"
+ "\n";
+
+ const char *ex6_11 =
+ "key: # Comment\n"
+ " # lines\n"
+ " value\n"
+ "\n";
+
+ const char *ex6_12 =
+ "{ first: Sammy, last: Sosa }:\n"
+ "# Statistics:\n"
+ " hr: # Home runs\n"
+ " 65\n"
+ " avg: # Average\n"
+ " 0.278";
+
+ const char *ex6_13 =
+ "%FOO bar baz # Should be ignored\n"
+ " # with a warning.\n"
+ "--- \"foo\"";
+
+ const char *ex6_14 =
+ "%YAML 1.3 # Attempt parsing\n"
+ " # with a warning\n"
+ "---\n"
+ "\"foo\"";
+
+ const char *ex6_15 =
+ "%YAML 1.2\n"
+ "%YAML 1.1\n"
+ "foo";
+
+ const char *ex6_16 =
+ "%TAG !yaml! tag:yaml.org,2002:\n"
+ "---\n"
+ "!yaml!str \"foo\"";
+
+ const char *ex6_17 =
+ "%TAG ! !foo\n"
+ "%TAG ! !foo\n"
+ "bar";
+
+ const char *ex6_18 =
+ "# Private\n"
+ "!foo \"bar\"\n"
+ "...\n"
+ "# Global\n"
+ "%TAG ! tag:example.com,2000:app/\n"
+ "---\n"
+ "!foo \"bar\"";
+
+ const char *ex6_19 =
+ "%TAG !! tag:example.com,2000:app/\n"
+ "---\n"
+ "!!int 1 - 3 # Interval, not integer";
+
+ const char *ex6_20 =
+ "%TAG !e! tag:example.com,2000:app/\n"
+ "---\n"
+ "!e!foo \"bar\"";
+
+ const char *ex6_21 =
+ "%TAG !m! !my-\n"
+ "--- # Bulb here\n"
+ "!m!light fluorescent\n"
+ "...\n"
+ "%TAG !m! !my-\n"
+ "--- # Color here\n"
+ "!m!light green";
+
+ const char *ex6_22 =
+ "%TAG !e! tag:example.com,2000:app/\n"
+ "---\n"
+ "- !e!foo \"bar\"";
+
+ const char *ex6_23 =
+ "!!str &a1 \"foo\":\n"
+ " !!str bar\n"
+ "&a2 baz : *a1";
+
+ const char *ex6_24 =
+ "!<tag:yaml.org,2002:str> foo :\n"
+ " !<!bar> baz";
+
+ const char *ex6_25 =
+ "- !<!> foo\n"
+ "- !<$:?> bar\n";
+
+ const char *ex6_26 =
+ "%TAG !e! tag:example.com,2000:app/\n"
+ "---\n"
+ "- !local foo\n"
+ "- !!str bar\n"
+ "- !e!tag%21 baz\n";
+
+ const char *ex6_27a =
+ "%TAG !e! tag:example,2000:app/\n"
+ "---\n"
+ "- !e! foo";
+
+ const char *ex6_27b =
+ "%TAG !e! tag:example,2000:app/\n"
+ "---\n"
+ "- !h!bar baz";
+
+ const char *ex6_28 =
+ "# Assuming conventional resolution:\n"
+ "- \"12\"\n"
+ "- 12\n"
+ "- ! 12";
+
+ const char *ex6_29 =
+ "First occurrence: &anchor Value\n"
+ "Second occurrence: *anchor";
+
+ const char *ex7_1 =
+ "First occurrence: &anchor Foo\n"
+ "Second occurrence: *anchor\n"
+ "Override anchor: &anchor Bar\n"
+ "Reuse anchor: *anchor";
+
+ const char *ex7_2 =
+ "{\n"
+ " foo : !!str,\n"
+ " !!str : bar,\n"
+ "}";
+
+ const char *ex7_3 =
+ "{\n"
+ " ? foo :,\n"
+ " : bar,\n"
+ "}\n";
+
+ const char *ex7_4 =
+ "\"implicit block key\" : [\n"
+ " \"implicit flow key\" : value,\n"
+ " ]";
+
+ const char *ex7_5 =
+ "\"folded \n"
+ "to a space,\t\n"
+ " \n"
+ "to a line feed, or \t\\\n"
+ " \\ \tnon-content\"";
+
+ const char *ex7_6 =
+ "\" 1st non-empty\n"
+ "\n"
+ " 2nd non-empty \n"
+ "\t3rd non-empty \"";
+
+ const char *ex7_7 =
+ " 'here''s to \"quotes\"'";
+
+ const char *ex7_8 =
+ "'implicit block key' : [\n"
+ " 'implicit flow key' : value,\n"
+ " ]";
+
+ const char *ex7_9 =
+ "' 1st non-empty\n"
+ "\n"
+ " 2nd non-empty \n"
+ "\t3rd non-empty '";
+
+ const char *ex7_10 =
+ "# Outside flow collection:\n"
+ "- ::vector\n"
+ "- \": - ()\"\n"
+ "- Up, up, and away!\n"
+ "- -123\n"
+ "- http://example.com/foo#bar\n"
+ "# Inside flow collection:\n"
+ "- [ ::vector,\n"
+ " \": - ()\",\n"
+ " \"Up, up, and away!\",\n"
+ " -123,\n"
+ " http://example.com/foo#bar ]";
+
+ const char *ex7_11 =
+ "implicit block key : [\n"
+ " implicit flow key : value,\n"
+ " ]";
+
+ const char *ex7_12 =
+ "1st non-empty\n"
+ "\n"
+ " 2nd non-empty \n"
+ "\t3rd non-empty";
+
+ const char *ex7_13 =
+ "- [ one, two, ]\n"
+ "- [three ,four]";
+
+ const char *ex7_14 =
+ "[\n"
+ "\"double\n"
+ " quoted\", 'single\n"
+ " quoted',\n"
+ "plain\n"
+ " text, [ nested ],\n"
+ "single: pair,\n"
+ "]";
+
+ const char *ex7_15 =
+ "- { one : two , three: four , }\n"
+ "- {five: six,seven : eight}";
+
+ const char *ex7_16 =
+ "{\n"
+ "? explicit: entry,\n"
+ "implicit: entry,\n"
+ "?\n"
+ "}";
+
+ const char *ex7_17 =
+ "{\n"
+ "unquoted : \"separate\",\n"
+ "http://foo.com,\n"
+ "omitted value:,\n"
+ ": omitted key,\n"
+ "}";
+
+ const char *ex7_18 =
+ "{\n"
+ "\"adjacent\":value,\n"
+ "\"readable\":value,\n"
+ "\"empty\":\n"
+ "}";
+
+ const char *ex7_19 =
+ "[\n"
+ "foo: bar\n"
+ "]";
+
+ const char *ex7_20 =
+ "[\n"
+ "? foo\n"
+ " bar : baz\n"
+ "]";
+
+ const char *ex7_21 =
+ "- [ YAML : separate ]\n"
+ "- [ : empty key entry ]\n"
+ "- [ {JSON: like}:adjacent ]";
+
+ const char *ex7_22 =
+ "[ foo\n"
+ " bar: invalid,"; // Note: we don't check (on purpose) the >1K chars for an implicit key
+
+ const char *ex7_23 =
+ "- [ a, b ]\n"
+ "- { a: b }\n"
+ "- \"a\"\n"
+ "- 'b'\n"
+ "- c";
+
+ const char *ex7_24 =
+ "- !!str \"a\"\n"
+ "- 'b'\n"
+ "- &anchor \"c\"\n"
+ "- *anchor\n"
+ "- !!str";
+
+ const char *ex8_1 =
+ "- | # Empty header\n"
+ " literal\n"
+ "- >1 # Indentation indicator\n"
+ " folded\n"
+ "- |+ # Chomping indicator\n"
+ " keep\n"
+ "\n"
+ "- >1- # Both indicators\n"
+ " strip\n";
+
+ const char *ex8_2 =
+ "- |\n"
+ " detected\n"
+ "- >\n"
+ " \n"
+ " \n"
+ " # detected\n"
+ "- |1\n"
+ " explicit\n"
+ "- >\n"
+ " \t\n"
+ " detected\n";
+
+ const char *ex8_3a =
+ "- |\n"
+ " \n"
+ " text";
+
+ const char *ex8_3b =
+ "- >\n"
+ " text\n"
+ " text";
+
+ const char *ex8_3c =
+ "- |2\n"
+ " text";
+
+ const char *ex8_4 =
+ "strip: |-\n"
+ " text\n"
+ "clip: |\n"
+ " text\n"
+ "keep: |+\n"
+ " text\n";
+
+ const char *ex8_5 =
+ " # Strip\n"
+ " # Comments:\n"
+ "strip: |-\n"
+ " # text\n"
+ " \n"
+ " # Clip\n"
+ " # comments:\n"
+ "\n"
+ "clip: |\n"
+ " # text\n"
+ " \n"
+ " # Keep\n"
+ " # comments:\n"
+ "\n"
+ "keep: |+\n"
+ " # text\n"
+ "\n"
+ " # Trail\n"
+ " # Comments\n";
+
+ const char *ex8_6 =
+ "strip: >-\n"
+ "\n"
+ "clip: >\n"
+ "\n"
+ "keep: |+\n"
+ "\n";
+
+ const char *ex8_7 =
+ "|\n"
+ " literal\n"
+ " \ttext\n"
+ "\n";
+
+ const char *ex8_8 =
+ "|\n"
+ " \n"
+ " \n"
+ " literal\n"
+ " \n"
+ " \n"
+ " text\n"
+ "\n"
+ " # Comment\n";
+
+ const char *ex8_9 =
+ ">\n"
+ " folded\n"
+ " text\n"
+ "\n";
+
+ const char *ex8_10 =
+ ">\n"
+ "\n"
+ " folded\n"
+ " line\n"
+ "\n"
+ " next\n"
+ " line\n"
+ " * bullet\n"
+ "\n"
+ " * list\n"
+ " * lines\n"
+ "\n"
+ " last\n"
+ " line\n"
+ "\n"
+ "# Comment\n";
+
+ const char *ex8_11 = ex8_10;
+ const char *ex8_12 = ex8_10;
+ const char *ex8_13 = ex8_10;
+
+ const char *ex8_14 =
+ "block sequence:\n"
+ " - one\n"
+ " - two : three\n";
+
+ const char *ex8_15 =
+ "- # Empty\n"
+ "- |\n"
+ " block node\n"
+ "- - one # Compact\n"
+ " - two # sequence\n"
+ "- one: two # Compact mapping\n";
+
+ const char *ex8_16 =
+ "block mapping:\n"
+ " key: value\n";
+
+ const char *ex8_17 =
+ "? explicit key # Empty value\n"
+ "? |\n"
+ " block key\n"
+ ": - one # Explicit compact\n"
+ " - two # block value\n";
+
+ const char *ex8_18 =
+ "plain key: in-line value\n"
+ ": # Both empty\n"
+ "\"quoted key\":\n"
+ "- entry\n";
+
+ const char *ex8_19 =
+ "- sun: yellow\n"
+ "- ? earth: blue\n"
+ " : moon: white\n";
+
+ const char *ex8_20 =
+ "-\n"
+ " \"flow in block\"\n"
+ "- >\n"
+ " Block scalar\n"
+ "- !!map # Block collection\n"
+ " foo : bar\n";
+
+ const char *ex8_21 =
+ "literal: |2\n"
+ " value\n"
+ "folded:\n"
+ " !foo\n"
+ " >1\n"
+ " value\n";
+
+ const char *ex8_22 =
+ "sequence: !!seq\n"
+ "- entry\n"
+ "- !!seq\n"
+ " - nested\n"
+ "mapping: !!map\n"
+ " foo: bar\n";
+ }
+}
+
diff --git a/yaml-cpp/test/spectests.cpp b/yaml-cpp/test/spectests.cpp
new file mode 100755
index 00000000..bffc5062
--- /dev/null
+++ b/yaml-cpp/test/spectests.cpp
@@ -0,0 +1,149 @@
+#include "spectests.h"
+#include "yaml-cpp/yaml.h"
+#include <iostream>
+
+namespace Test
+{
+ namespace {
+ void RunSpecTest(TEST (*test)(), const std::string& index, const std::string& name, int& passed, int& total) {
+ TEST ret;
+ try {
+ ret = test();
+ } catch(const YAML::Exception& e) {
+ ret.ok = false;
+ ret.error = std::string(" Exception caught: ") + e.what();
+ }
+
+ if(!ret.ok) {
+ std::cout << "Spec test " << index << " failed: " << name << "\n";
+ std::cout << ret.error << "\n";
+ }
+
+ if(ret.ok)
+ passed++;
+ total++;
+ }
+ }
+
+ bool RunSpecTests()
+ {
+ int passed = 0;
+ int total = 0;
+ RunSpecTest(&Spec::SeqScalars, "2.1", "Sequence of Scalars", passed, total);
+ RunSpecTest(&Spec::MappingScalarsToScalars, "2.2", "Mapping Scalars to Scalars", passed, total);
+ RunSpecTest(&Spec::MappingScalarsToSequences, "2.3", "Mapping Scalars to Sequences", passed, total);
+ RunSpecTest(&Spec::SequenceOfMappings, "2.4", "Sequence of Mappings", passed, total);
+ RunSpecTest(&Spec::SequenceOfSequences, "2.5", "Sequence of Sequences", passed, total);
+ RunSpecTest(&Spec::MappingOfMappings, "2.6", "Mapping of Mappings", passed, total);
+ RunSpecTest(&Spec::TwoDocumentsInAStream, "2.7", "Two Documents in a Stream", passed, total);
+ RunSpecTest(&Spec::PlayByPlayFeed, "2.8", "Play by Play Feed from a Game", passed, total);
+ RunSpecTest(&Spec::SingleDocumentWithTwoComments, "2.9", "Single Document with Two Comments", passed, total);
+ RunSpecTest(&Spec::SimpleAnchor, "2.10", "Node for \"Sammy Sosa\" appears twice in this document", passed, total);
+ RunSpecTest(&Spec::MappingBetweenSequences, "2.11", "Mapping between Sequences", passed, total);
+ RunSpecTest(&Spec::CompactNestedMapping, "2.12", "Compact Nested Mapping", passed, total);
+ RunSpecTest(&Spec::InLiteralsNewlinesArePreserved, "2.13", "In literals, newlines are preserved", passed, total);
+ RunSpecTest(&Spec::InFoldedScalarsNewlinesBecomeSpaces, "2.14", "In folded scalars, newlines become spaces", passed, total);
+ RunSpecTest(&Spec::FoldedNewlinesArePreservedForMoreIndentedAndBlankLines, "2.15", "Folded newlines are preserved for \"more indented\" and blank lines", passed, total);
+ RunSpecTest(&Spec::IndentationDeterminesScope, "2.16", "Indentation determines scope", passed, total);
+ RunSpecTest(&Spec::QuotedScalars, "2.17", "Quoted scalars", passed, total);
+ RunSpecTest(&Spec::MultiLineFlowScalars, "2.18", "Multi-line flow scalars", passed, total);
+
+ RunSpecTest(&Spec::VariousExplicitTags, "2.23", "Various Explicit Tags", passed, total);
+ RunSpecTest(&Spec::GlobalTags, "2.24", "Global Tags", passed, total);
+ RunSpecTest(&Spec::UnorderedSets, "2.25", "Unordered Sets", passed, total);
+ RunSpecTest(&Spec::OrderedMappings, "2.26", "Ordered Mappings", passed, total);
+ RunSpecTest(&Spec::Invoice, "2.27", "Invoice", passed, total);
+ RunSpecTest(&Spec::LogFile, "2.28", "Log File", passed, total);
+
+ RunSpecTest(&Spec::BlockStructureIndicators, "5.3", "Block Structure Indicators", passed, total);
+ RunSpecTest(&Spec::FlowStructureIndicators, "5.4", "Flow Structure Indicators", passed, total);
+ RunSpecTest(&Spec::NodePropertyIndicators, "5.6", "Node Property Indicators", passed, total);
+ RunSpecTest(&Spec::BlockScalarIndicators, "5.7", "Block Scalar Indicators", passed, total);
+ RunSpecTest(&Spec::QuotedScalarIndicators, "5.8", "Quoted Scalar Indicators", passed, total);
+ RunSpecTest(&Spec::LineBreakCharacters, "5.11", "Line Break Characters", passed, total);
+ RunSpecTest(&Spec::TabsAndSpaces, "5.12", "Tabs and Spaces", passed, total);
+ RunSpecTest(&Spec::EscapedCharacters, "5.13", "Escaped Characters", passed, total);
+ RunSpecTest(&Spec::InvalidEscapedCharacters, "5.14", "Invalid Escaped Characters", passed, total);
+
+ RunSpecTest(&Spec::IndentationSpaces, "6.1", "Indentation Spaces", passed, total);
+ RunSpecTest(&Spec::IndentationIndicators, "6.2", "Indentation Indicators", passed, total);
+ RunSpecTest(&Spec::SeparationSpaces, "6.3", "Separation Spaces", passed, total);
+ RunSpecTest(&Spec::LinePrefixes, "6.4", "Line Prefixes", passed, total);
+ RunSpecTest(&Spec::EmptyLines, "6.5", "Empty Lines", passed, total);
+ RunSpecTest(&Spec::LineFolding, "6.6", "Line Folding", passed, total);
+ RunSpecTest(&Spec::BlockFolding, "6.7", "Block Folding", passed, total);
+ RunSpecTest(&Spec::FlowFolding, "6.8", "Flow Folding", passed, total);
+ RunSpecTest(&Spec::SeparatedComment, "6.9", "Separated Comment", passed, total);
+ RunSpecTest(&Spec::CommentLines, "6.10", "Comment Lines", passed, total);
+ RunSpecTest(&Spec::MultiLineComments, "6.11", "Multi-Line Comments", passed, total);
+ RunSpecTest(&Spec::SeparationSpacesII, "6.12", "Separation Spaces", passed, total);
+ RunSpecTest(&Spec::ReservedDirectives, "6.13", "Reserved Directives", passed, total);
+ RunSpecTest(&Spec::YAMLDirective, "6.14", "YAML Directive", passed, total);
+ RunSpecTest(&Spec::InvalidRepeatedYAMLDirective, "6.15", "Invalid Repeated YAML Directive", passed, total);
+ RunSpecTest(&Spec::TagDirective, "6.16", "Tag Directive", passed, total);
+ RunSpecTest(&Spec::InvalidRepeatedTagDirective, "6.17", "Invalid Repeated Tag Directive", passed, total);
+ RunSpecTest(&Spec::PrimaryTagHandle, "6.18", "Primary Tag Handle", passed, total);
+ RunSpecTest(&Spec::SecondaryTagHandle, "6.19", "SecondaryTagHandle", passed, total);
+ RunSpecTest(&Spec::TagHandles, "6.20", "TagHandles", passed, total);
+ RunSpecTest(&Spec::LocalTagPrefix, "6.21", "LocalTagPrefix", passed, total);
+ RunSpecTest(&Spec::GlobalTagPrefix, "6.22", "GlobalTagPrefix", passed, total);
+ RunSpecTest(&Spec::NodeProperties, "6.23", "NodeProperties", passed, total);
+ RunSpecTest(&Spec::VerbatimTags, "6.24", "Verbatim Tags", passed, total);
+ RunSpecTest(&Spec::InvalidVerbatimTags, "6.25", "Invalid Verbatim Tags", passed, total);
+ RunSpecTest(&Spec::TagShorthands, "6.26", "Tag Shorthands", passed, total);
+ RunSpecTest(&Spec::InvalidTagShorthands, "6.27", "Invalid Tag Shorthands", passed, total);
+ RunSpecTest(&Spec::NonSpecificTags, "6.28", "Non Specific Tags", passed, total);
+ RunSpecTest(&Spec::NodeAnchors, "6.29", "Node Anchors", passed, total);
+
+ RunSpecTest(&Spec::AliasNodes, "7.1", "Alias Nodes", passed, total);
+ RunSpecTest(&Spec::EmptyNodes, "7.2", "Empty Nodes", passed, total);
+ RunSpecTest(&Spec::CompletelyEmptyNodes, "7.3", "Completely Empty Nodes", passed, total);
+ RunSpecTest(&Spec::DoubleQuotedImplicitKeys, "7.4", "Double Quoted Implicit Keys", passed, total);
+ RunSpecTest(&Spec::DoubleQuotedLineBreaks, "7.5", "Double Quoted Line Breaks", passed, total);
+ RunSpecTest(&Spec::DoubleQuotedLines, "7.6", "Double Quoted Lines", passed, total);
+ RunSpecTest(&Spec::SingleQuotedCharacters, "7.7", "Single Quoted Characters", passed, total);
+ RunSpecTest(&Spec::SingleQuotedImplicitKeys, "7.8", "Single Quoted Implicit Keys", passed, total);
+ RunSpecTest(&Spec::SingleQuotedLines, "7.9", "Single Quoted Lines", passed, total);
+ RunSpecTest(&Spec::PlainCharacters, "7.10", "Plain Characters", passed, total);
+ RunSpecTest(&Spec::PlainImplicitKeys, "7.11", "Plain Implicit Keys", passed, total);
+ RunSpecTest(&Spec::PlainLines, "7.12", "Plain Lines", passed, total);
+ RunSpecTest(&Spec::FlowSequence, "7.13", "Flow Sequence", passed, total);
+ RunSpecTest(&Spec::FlowSequenceEntries, "7.14", "Flow Sequence Entries", passed, total);
+ RunSpecTest(&Spec::FlowMappings, "7.15", "Flow Mappings", passed, total);
+ RunSpecTest(&Spec::FlowMappingEntries, "7.16", "Flow Mapping Entries", passed, total);
+ RunSpecTest(&Spec::FlowMappingSeparateValues, "7.17", "Flow Mapping Separate Values", passed, total);
+ RunSpecTest(&Spec::FlowMappingAdjacentValues, "7.18", "Flow Mapping Adjacent Values", passed, total);
+ RunSpecTest(&Spec::SinglePairFlowMappings, "7.19", "Single Pair Flow Mappings", passed, total);
+ RunSpecTest(&Spec::SinglePairExplicitEntry, "7.20", "Single Pair Explicit Entry", passed, total);
+ RunSpecTest(&Spec::SinglePairImplicitEntries, "7.21", "Single Pair Implicit Entries", passed, total);
+ RunSpecTest(&Spec::InvalidImplicitKeys, "7.22", "Invalid Implicit Keys", passed, total);
+ RunSpecTest(&Spec::FlowContent, "7.23", "Flow Content", passed, total);
+ RunSpecTest(&Spec::FlowNodes, "7.24", "FlowNodes", passed, total);
+
+ RunSpecTest(&Spec::BlockScalarHeader, "8.1", "Block Scalar Header", passed, total);
+ RunSpecTest(&Spec::BlockIndentationHeader, "8.2", "Block Indentation Header", passed, total);
+ RunSpecTest(&Spec::InvalidBlockScalarIndentationIndicators, "8.3", "Invalid Block Scalar Indentation Indicators", passed, total);
+ RunSpecTest(&Spec::ChompingFinalLineBreak, "8.4", "Chomping Final Line Break", passed, total);
+ RunSpecTest(&Spec::ChompingTrailingLines, "8.5", "Chomping Trailing Lines", passed, total);
+ RunSpecTest(&Spec::EmptyScalarChomping, "8.6", "Empty Scalar Chomping", passed, total);
+ RunSpecTest(&Spec::LiteralScalar, "8.7", "Literal Scalar", passed, total);
+ RunSpecTest(&Spec::LiteralContent, "8.8", "Literal Content", passed, total);
+ RunSpecTest(&Spec::FoldedScalar, "8.9", "Folded Scalar", passed, total);
+ RunSpecTest(&Spec::FoldedLines, "8.10", "Folded Lines", passed, total);
+ RunSpecTest(&Spec::MoreIndentedLines, "8.11", "More Indented Lines", passed, total);
+ RunSpecTest(&Spec::EmptySeparationLines, "8.12", "Empty Separation Lines", passed, total);
+ RunSpecTest(&Spec::FinalEmptyLines, "8.13", "Final Empty Lines", passed, total);
+ RunSpecTest(&Spec::BlockSequence, "8.14", "Block Sequence", passed, total);
+ RunSpecTest(&Spec::BlockSequenceEntryTypes, "8.15", "Block Sequence Entry Types", passed, total);
+ RunSpecTest(&Spec::BlockMappings, "8.16", "Block Mappings", passed, total);
+ RunSpecTest(&Spec::ExplicitBlockMappingEntries, "8.17", "Explicit Block Mapping Entries", passed, total);
+ RunSpecTest(&Spec::ImplicitBlockMappingEntries, "8.18", "Implicit Block Mapping Entries", passed, total);
+ RunSpecTest(&Spec::CompactBlockMappings, "8.19", "Compact Block Mappings", passed, total);
+ RunSpecTest(&Spec::BlockNodeTypes, "8.20", "Block Node Types", passed, total);
+ RunSpecTest(&Spec::BlockScalarNodes, "8.21", "Block Scalar Nodes", passed, total);
+ RunSpecTest(&Spec::BlockCollectionNodes, "8.22", "Block Collection Nodes", passed, total);
+
+ std::cout << "Spec tests: " << passed << "/" << total << " passed\n";
+ return passed == total;
+ }
+}
diff --git a/yaml-cpp/test/spectests.h b/yaml-cpp/test/spectests.h
new file mode 100755
index 00000000..5246df58
--- /dev/null
+++ b/yaml-cpp/test/spectests.h
@@ -0,0 +1,360 @@
+#ifndef SPECTESTS_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define SPECTESTS_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+#include <string>
+
+namespace Test {
+ struct TEST {
+ TEST(): ok(false) {}
+ TEST(bool ok_): ok(ok_) {}
+ TEST(const char *error_): ok(false), error(error_) {}
+
+ bool ok;
+ std::string error;
+ };
+
+ namespace Spec {
+ // 2.1
+ TEST SeqScalars();
+
+ // 2.2
+ TEST MappingScalarsToScalars();
+
+ // 2.3
+ TEST MappingScalarsToSequences();
+
+ // 2.4
+ TEST SequenceOfMappings();
+
+ // 2.5
+ TEST SequenceOfSequences();
+
+ // 2.6
+ TEST MappingOfMappings();
+
+ // 2.7
+ TEST TwoDocumentsInAStream();
+
+ // 2.8
+ TEST PlayByPlayFeed();
+
+ // 2.9
+ TEST SingleDocumentWithTwoComments();
+
+ // 2.10
+ TEST SimpleAnchor();
+
+ // 2.11
+ TEST MappingBetweenSequences();
+
+ // 2.12
+ TEST CompactNestedMapping();
+
+ // 2.13
+ TEST InLiteralsNewlinesArePreserved();
+
+ // 2.14
+ TEST InFoldedScalarsNewlinesBecomeSpaces();
+
+ // 2.15
+ TEST FoldedNewlinesArePreservedForMoreIndentedAndBlankLines();
+
+ // 2.16
+ TEST IndentationDeterminesScope();
+
+ // 2.17
+ TEST QuotedScalars();
+
+ // 2.18
+ TEST MultiLineFlowScalars();
+
+ // TODO: 2.19 - 2.22 schema tags
+
+ // 2.23
+ TEST VariousExplicitTags();
+
+ // 2.24
+ TEST GlobalTags();
+
+ // 2.25
+ TEST UnorderedSets();
+
+ // 2.26
+ TEST OrderedMappings();
+
+ // 2.27
+ TEST Invoice();
+
+ // 2.28
+ TEST LogFile();
+
+ // TODO: 5.1 - 5.2 BOM
+
+ // 5.3
+ TEST BlockStructureIndicators();
+
+ // 5.4
+ TEST FlowStructureIndicators();
+
+ // 5.5
+ TEST CommentIndicator();
+
+ // 5.6
+ TEST NodePropertyIndicators();
+
+ // 5.7
+ TEST BlockScalarIndicators();
+
+ // 5.8
+ TEST QuotedScalarIndicators();
+
+ // TODO: 5.9 directive
+ // TODO: 5.10 reserved indicator
+
+ // 5.11
+ TEST LineBreakCharacters();
+
+ // 5.12
+ TEST TabsAndSpaces();
+
+ // 5.13
+ TEST EscapedCharacters();
+
+ // 5.14
+ TEST InvalidEscapedCharacters();
+
+ // 6.1
+ TEST IndentationSpaces();
+
+ // 6.2
+ TEST IndentationIndicators();
+
+ // 6.3
+ TEST SeparationSpaces();
+
+ // 6.4
+ TEST LinePrefixes();
+
+ // 6.5
+ TEST EmptyLines();
+
+ // 6.6
+ TEST LineFolding();
+
+ // 6.7
+ TEST BlockFolding();
+
+ // 6.8
+ TEST FlowFolding();
+
+ // 6.9
+ TEST SeparatedComment();
+
+ // 6.10
+ TEST CommentLines();
+
+ // 6.11
+ TEST MultiLineComments();
+
+ // 6.12
+ TEST SeparationSpacesII();
+
+ // 6.13
+ TEST ReservedDirectives();
+
+ // 6.14
+ TEST YAMLDirective();
+
+ // 6.15
+ TEST InvalidRepeatedYAMLDirective();
+
+ // 6.16
+ TEST TagDirective();
+
+ // 6.17
+ TEST InvalidRepeatedTagDirective();
+
+ // 6.18
+ TEST PrimaryTagHandle();
+
+ // 6.19
+ TEST SecondaryTagHandle();
+
+ // 6.20
+ TEST TagHandles();
+
+ // 6.21
+ TEST LocalTagPrefix();
+
+ // 6.22
+ TEST GlobalTagPrefix();
+
+ // 6.23
+ TEST NodeProperties();
+
+ // 6.24
+ TEST VerbatimTags();
+
+ // 6.25
+ TEST InvalidVerbatimTags();
+
+ // 6.26
+ TEST TagShorthands();
+
+ // 6.27
+ TEST InvalidTagShorthands();
+
+ // 6.28
+ TEST NonSpecificTags();
+
+ // 6.29
+ TEST NodeAnchors();
+
+ // 7.1
+ TEST AliasNodes();
+
+ // 7.2
+ TEST EmptyNodes();
+
+ // 7.3
+ TEST CompletelyEmptyNodes();
+
+ // 7.4
+ TEST DoubleQuotedImplicitKeys();
+
+ // 7.5
+ TEST DoubleQuotedLineBreaks();
+
+ // 7.6
+ TEST DoubleQuotedLines();
+
+ // 7.7
+ TEST SingleQuotedCharacters();
+
+ // 7.8
+ TEST SingleQuotedImplicitKeys();
+
+ // 7.9
+ TEST SingleQuotedLines();
+
+ // 7.10
+ TEST PlainCharacters();
+
+ // 7.11
+ TEST PlainImplicitKeys();
+
+ // 7.12
+ TEST PlainLines();
+
+ // 7.13
+ TEST FlowSequence();
+
+ // 7.14
+ TEST FlowSequenceEntries();
+
+ // 7.15
+ TEST FlowMappings();
+
+ // 7.16
+ TEST FlowMappingEntries();
+
+ // 7.17
+ TEST FlowMappingSeparateValues();
+
+ // 7.18
+ TEST FlowMappingAdjacentValues();
+
+ // 7.19
+ TEST SinglePairFlowMappings();
+
+ // 7.20
+ TEST SinglePairExplicitEntry();
+
+ // 7.21
+ TEST SinglePairImplicitEntries();
+
+ // 7.22
+ TEST InvalidImplicitKeys();
+
+ // 7.23
+ TEST FlowContent();
+
+ // 7.24
+ TEST FlowNodes();
+
+ // 8.1
+ TEST BlockScalarHeader();
+
+ // 8.2
+ TEST BlockIndentationHeader();
+
+ // 8.3
+ TEST InvalidBlockScalarIndentationIndicators();
+
+ // 8.4
+ TEST ChompingFinalLineBreak();
+
+ // 8.5
+ TEST ChompingTrailingLines();
+
+ // 8.6
+ TEST EmptyScalarChomping();
+
+ // 8.7
+ TEST LiteralScalar();
+
+ // 8.8
+ TEST LiteralContent();
+
+ // 8.9
+ TEST FoldedScalar();
+
+ // 8.10
+ TEST FoldedLines();
+
+ // 8.11
+ TEST MoreIndentedLines();
+
+ // 8.12
+ TEST EmptySeparationLines();
+
+ // 8.13
+ TEST FinalEmptyLines();
+
+ // 8.14
+ TEST BlockSequence();
+
+ // 8.15
+ TEST BlockSequenceEntryTypes();
+
+ // 8.16
+ TEST BlockMappings();
+
+ // 8.17
+ TEST ExplicitBlockMappingEntries();
+
+ // 8.18
+ TEST ImplicitBlockMappingEntries();
+
+ // 8.19
+ TEST CompactBlockMappings();
+
+ // 8.20
+ TEST BlockNodeTypes();
+
+ // 8.21
+ TEST BlockScalarNodes();
+
+ // 8.22
+ TEST BlockCollectionNodes();
+ }
+
+ bool RunSpecTests();
+}
+
+#endif // SPECTESTS_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
diff --git a/yaml-cpp/test/tests.cpp b/yaml-cpp/test/tests.cpp
new file mode 100755
index 00000000..2dff6eeb
--- /dev/null
+++ b/yaml-cpp/test/tests.cpp
@@ -0,0 +1,30 @@
+#include "tests.h"
+#include "emittertests.h"
+#include "nodetests.h"
+#include "parsertests.h"
+#include "spectests.h"
+#include "yaml-cpp/yaml.h"
+#include <fstream>
+#include <sstream>
+#include <vector>
+#include <iostream>
+
+namespace Test
+{
+ void RunAll()
+ {
+ bool passed = true;
+ if(!RunParserTests())
+ passed = false;
+
+ if(!RunEmitterTests())
+ passed = false;
+
+ if(!RunSpecTests())
+ passed = false;
+
+ if(passed)
+ std::cout << "All tests passed!\n";
+ }
+}
+
diff --git a/yaml-cpp/test/tests.h b/yaml-cpp/test/tests.h
new file mode 100755
index 00000000..757dbc53
--- /dev/null
+++ b/yaml-cpp/test/tests.h
@@ -0,0 +1,53 @@
+#ifndef TESTS_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+#define TESTS_H_62B23520_7C8E_11DE_8A39_0800200C9A66
+
+#if defined(_MSC_VER) || (defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || (__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
+#pragma once
+#endif
+
+#include <string>
+
+namespace Test {
+ void RunAll();
+
+ namespace Parser {
+ // scalar tests
+ void SimpleScalar(std::string& inputScalar, std::string& desiredOutput);
+ void MultiLineScalar(std::string& inputScalar, std::string& desiredOutput);
+ void LiteralScalar(std::string& inputScalar, std::string& desiredOutput);
+ void FoldedScalar(std::string& inputScalar, std::string& desiredOutput);
+ void ChompedFoldedScalar(std::string& inputScalar, std::string& desiredOutput);
+ void ChompedLiteralScalar(std::string& inputScalar, std::string& desiredOutput);
+ void FoldedScalarWithIndent(std::string& inputScalar, std::string& desiredOutput);
+ void ColonScalar(std::string& inputScalar, std::string& desiredOutput);
+ void QuotedScalar(std::string& inputScalar, std::string& desiredOutput);
+ void CommaScalar(std::string& inputScalar, std::string& desiredOutput);
+ void DashScalar(std::string& inputScalar, std::string& desiredOutput);
+ void URLScalar(std::string& inputScalar, std::string& desiredOutput);
+
+ // misc tests
+ bool SimpleSeq();
+ bool SimpleMap();
+ bool FlowSeq();
+ bool FlowMap();
+ bool FlowMapWithOmittedKey();
+ bool FlowMapWithOmittedValue();
+ bool FlowMapWithSoloEntry();
+ bool FlowMapEndingWithSoloEntry();
+ bool QuotedSimpleKeys();
+ bool CompressedMapAndSeq();
+ bool NullBlockSeqEntry();
+ bool NullBlockMapKey();
+ bool NullBlockMapValue();
+ bool SimpleAlias();
+ bool AliasWithNull();
+ bool AnchorInSimpleKey();
+ bool AliasAsSimpleKey();
+ bool ExplicitDoc();
+ bool MultipleDocs();
+ bool ExplicitEndDoc();
+ bool MultipleDocsWithSomeExplicitIndicators();
+ }
+}
+
+#endif // TESTS_H_62B23520_7C8E_11DE_8A39_0800200C9A66
diff --git a/yaml-cpp/util/CMakeLists.txt b/yaml-cpp/util/CMakeLists.txt
new file mode 100755
index 00000000..22339f02
--- /dev/null
+++ b/yaml-cpp/util/CMakeLists.txt
@@ -0,0 +1,2 @@
+add_executable(parse parse.cpp)
+target_link_libraries(parse yaml-cpp)
diff --git a/yaml-cpp/util/api.cpp b/yaml-cpp/util/api.cpp
new file mode 100755
index 00000000..e5180a8a
--- /dev/null
+++ b/yaml-cpp/util/api.cpp
@@ -0,0 +1,129 @@
+// a sketch of what the new API might look like
+
+#include "yaml-cpp/yaml.h"
+#include <iostream>
+
+int main()
+{
+ {
+ // test.yaml
+ // - foo
+ // - primes: [2, 3, 5, 7, 11]
+ // odds: [1, 3, 5, 7, 9, 11]
+ // - [x, y]
+
+ // move-like semantics
+ YAML::Value root = YAML::Parse("test.yaml");
+
+ std::cout << root[0].as<std::string>(); // "foo"
+ std::cout << str(root[0]); // "foo", shorthand?
+ std::cout << root[1]["primes"][3].as<int>(); // "7"
+ std::cout << root[1]["odds"][6].as<int>(); // throws?
+
+ root[2].push_back(5);
+ root[3] = "Hello, World";
+ root[0].reset();
+ root[0]["key"] = "value";
+
+ std::cout << root;
+ // # not sure about formatting
+ // - {key: value}
+ // - primes: [2, 3, 5, 7, 11]
+ // odds: [1, 3, 5, 7, 9, 11]
+ // - [x, y, 5]
+ // - Hello, World
+ }
+
+ {
+ // for all copy-like commands, think of python's "name/value" semantics
+ YAML::Value root = "Hello"; // Hello
+ root = YAML::Sequence(); // []
+ root[0] = 0; // [0]
+ root[2] = "two"; // [0, ~, two] # forces root[1] to be initialized to null
+
+ YAML::Value other = root; // both point to the same thing
+ other[0] = 5; // now root[0] is 0 also
+ other.push_back(root); // &1 [5, ~, two, *1]
+ other[3][0] = 0; // &1 [0, ~, two, *1] # since it's a true alias
+ other.push_back(Copy(root)); // &1 [0, ~, two, *1, &2 [0, ~, two, *2]]
+ other[4][0] = 5; // &1 [0, ~, two, *1, &2 [5, ~, two, *2]] # they're really different
+ }
+
+ {
+ YAML::Value node; // ~
+ node[0] = 1; // [1] # auto-construct a sequence
+ node["key"] = 5; // {0: 1, key: 5} # auto-turn it into a map
+ node.push_back(10); // error, can't turn a map into a sequence
+ node.erase("key"); // {0: 1} # still a map, even if we remove the key that caused the problem
+ node = "Hello"; // Hello # assignment overwrites everything, so it's now just a plain scalar
+ }
+
+ {
+ YAML::Value map; // ~
+ map[3] = 1; // {3: 1} # auto-constructs a map, *not* a sequence
+
+ YAML::Value seq; // ~
+ seq = YAML::Sequence(); // []
+ seq[3] = 1; // [~, ~, ~, 1]
+ }
+
+ {
+ YAML::Value node; // ~
+ node[0] = node; // &1 [*1] # fun stuff
+ }
+
+ {
+ YAML::Value node;
+ YAML::Value subnode = node["key"]; // 'subnode' is not instantiated ('node' is still null)
+ subnode = "value"; // {key: value} # now it is
+ YAML::Value subnode2 = node["key2"];
+ node["key3"] = subnode2; // subnode2 is still not instantiated, but node["key3"] is "pseudo" aliased to it
+ subnode2 = "monkey"; // {key: value, key2: &1 monkey, key3: *1} # bam! it instantiates both
+ }
+
+ {
+ YAML::Value seq = YAML::Sequence();
+ seq[0] = "zero"; // [zero]
+ seq[1] = seq[0]; // [&1 zero, *1]
+ seq[0] = seq[1]; // [&1 zero, *1] # no-op (they both alias the same thing, so setting them equal is nothing)
+ Is(seq[0], seq[1]); // true
+ seq[1] = "one"; // [&1 one, *1]
+ UnAlias(seq[1]); // [one, one]
+ Is(seq[0], seq[1]); // false
+ }
+
+ {
+ YAML::Value root;
+ root.push_back("zero");
+ root.push_back("one");
+ root.push_back("two");
+ YAML::Value two = root[2];
+ root = "scalar"; // 'two' is still "two", even though 'root' is "scalar" (the sequence effectively no longer exists)
+
+ // Note: in all likelihood, the memory for nodes "zero" and "one" is still allocated. How can it go away? Weak pointers?
+ }
+
+ {
+ YAML::Value root; // ~
+ root[0] = root; // &1 [*1]
+ root[0] = 5; // [5]
+ }
+
+ {
+ YAML::Value root;
+ YAML::Value key;
+ key["key"] = "value";
+ root[key] = key; // &1 {key: value}: *1
+ }
+
+ {
+ YAML::Value root;
+ root[0] = "hi";
+ root[1][0] = "bye";
+ root[1][1] = root; // &1 [hi, [bye, *1]] # root
+ YAML::Value sub = root[1]; // &1 [bye, [hi, *1]] # sub
+ root = "gone"; // [bye, gone] # sub
+ }
+
+ return 0;
+}
diff --git a/yaml-cpp/util/parse.cpp b/yaml-cpp/util/parse.cpp
new file mode 100755
index 00000000..d02a76a7
--- /dev/null
+++ b/yaml-cpp/util/parse.cpp
@@ -0,0 +1,65 @@
+#include "yaml-cpp/yaml.h"
+#include "yaml-cpp/eventhandler.h"
+#include <fstream>
+#include <iostream>
+#include <vector>
+
+struct Params {
+ bool hasFile;
+ std::string fileName;
+};
+
+Params ParseArgs(int argc, char **argv) {
+ Params p;
+
+ std::vector<std::string> args(argv + 1, argv + argc);
+
+ return p;
+}
+
+class NullEventHandler: public YAML::EventHandler
+{
+public:
+ virtual void OnDocumentStart(const YAML::Mark&) {}
+ virtual void OnDocumentEnd() {}
+
+ virtual void OnNull(const YAML::Mark&, YAML::anchor_t) {}
+ virtual void OnAlias(const YAML::Mark&, YAML::anchor_t) {}
+ virtual void OnScalar(const YAML::Mark&, const std::string&, YAML::anchor_t, const std::string&) {}
+
+ virtual void OnSequenceStart(const YAML::Mark&, const std::string&, YAML::anchor_t) {}
+ virtual void OnSequenceEnd() {}
+
+ virtual void OnMapStart(const YAML::Mark&, const std::string&, YAML::anchor_t) {}
+ virtual void OnMapEnd() {}
+};
+
+void parse(std::istream& input)
+{
+ try {
+ YAML::Parser parser(input);
+ YAML::Node doc;
+ while(parser.GetNextDocument(doc)) {
+ YAML::Emitter emitter;
+ emitter << doc;
+ std::cout << emitter.c_str() << "\n";
+ }
+ } catch(const YAML::Exception& e) {
+ std::cerr << e.what() << "\n";
+ }
+}
+
+int main(int argc, char **argv)
+{
+ Params p = ParseArgs(argc, argv);
+
+ if(argc > 1) {
+ std::ifstream fin;
+ fin.open(argv[1]);
+ parse(fin);
+ } else {
+ parse(std::cin);
+ }
+
+ return 0;
+}
diff --git a/yaml-cpp/yaml-cpp.pc.cmake b/yaml-cpp/yaml-cpp.pc.cmake
new file mode 100755
index 00000000..04d343f6
--- /dev/null
+++ b/yaml-cpp/yaml-cpp.pc.cmake
@@ -0,0 +1,11 @@
+prefix=@CMAKE_INSTALL_PREFIX@
+exec_prefix=@CMAKE_INSTALL_PREFIX@
+libdir=${prefix}/@LIB_INSTALL_DIR@
+includedir=${prefix}/@INCLUDE_INSTALL_ROOT_DIR@
+
+Name: Yaml-cpp
+Description: A YAML parser and emitter for C++
+Version: @YAML_CPP_VERSION@
+Requires:
+Libs: -L${libdir} -lyaml-cpp
+Cflags: -I${includedir}